diff --git a/lib/Db/ExifFields.php b/lib/Db/ExifFields.php index e5629f5dd..4c06ad22d 100644 --- a/lib/Db/ExifFields.php +++ b/lib/Db/ExifFields.php @@ -11,11 +11,32 @@ class ExifFields * This is mostly only used for the metadata view. */ public const EXIF_FIELDS_LIST = [ - // Date/Time + // Original date fields + 'SubSecDateTimeOriginal' => true, 'DateTimeOriginal' => true, + 'SonyDateTime' => true, + + // Create date fields + 'SubSecCreateDate' => true, + 'CreationDate' => true, + 'CreationDateValue' => true, 'CreateDate' => true, + 'TrackCreateDate' => true, + 'MediaCreateDate' => true, + 'FileCreateDate' => true, + + // ModifyDate fields + 'SubSecModifyDate' => true, + 'ModifyDate' => true, + 'TrackModifyDate' => true, + 'MediaModifyDate' => true, + 'FileModifyDate' => true, + + // Timezone Offsets 'OffsetTimeOriginal' => true, 'OffsetTime' => true, + 'TimeZone' => true, + 'OffsetTimeDigitized' => true, // Generated date fields 'DateTimeEpoch' => true, diff --git a/lib/Db/TimelineWrite.php b/lib/Db/TimelineWrite.php index b7f224281..cf5fb8ec1 100644 --- a/lib/Db/TimelineWrite.php +++ b/lib/Db/TimelineWrite.php @@ -89,8 +89,14 @@ public function processFile( // Get exif data $exif = Exif::getExifFromFile($file); + // -n argument breaks how some date strings are returned from exiftool + // So we make a separate array to get numeric values only + $exifNumeric = Exif::getExifFromFile($file, ['-n']); + $exif['GPSLatitude'] = $exifNumeric['GPSLatitude'] ?? null; + $exif['GPSLongitude'] = $exifNumeric['GPSLongitude'] ?? null; + // Check if EXIF is blank, which is probably wrong - if (0 === \count($exif)) { + if (0 === \count($exif) || 0 === \count($exifNumeric)) { throw new \Exception('No EXIF data could be read'); } @@ -115,27 +121,31 @@ public function processFile( // Video parameters $videoDuration = round((float) ($isvideo ? ($exif['Duration'] ?? $exif['TrackDuration'] ?? 0) : 0)); - // Process location data - // This also modifies the exif array in-place to set the LocationTZID + // Process location data to set the LocationTZID // and drop the GPS data if it is not valid - [$lat, $lon, $mapCluster] = $this->processExifLocation($fileId, $exif, $prevRow); + [$lat, $lon, $mapCluster] = $this->processExifLocation($fileId, $exif, $exifNumeric, $prevRow); // Get date parameters (after setting timezone offset) $dateTaken = Exif::getDateTaken($file, $exif); - - // Store the acutal epoch with the EXIF data + + // Store the actual epoch with the EXIF data $epoch = $exif['DateTimeEpoch'] = $dateTaken->getTimestamp(); - // Store the date taken in the database as UTC (local date) only - // Basically, assume everything happens in Greenwich - $dateLocalUtc = Exif::forgetTimezone($dateTaken)->getTimestamp(); - $dateTakenStr = gmdate('Y-m-d H:i:s', $dateLocalUtc); - - // We need to use the local time in UTC for the dayId - // This way two photos in different timezones on the same date locally - // end up in the same dayId group - $dayId = floor($dateLocalUtc / 86400); - + // Extract and store the timezone offset from the $dateTaken object + // This preserves the timezone that Exif::getDateTaken() determined + $exif['OffsetTimeOriginal'] = $dateTaken->format('P'); // e.g., "-05:00" + + // Store the datetaken string in the database as UTC only + $dateTakenUtc = clone $dateTaken; + $dateTakenUtc->setTimezone(new \DateTimeZone('UTC')); + $dateTakenStr = $dateTakenUtc->format('Y-m-d H:i:s'); + + // Find midnight for the real date taken, give it UTC without timeshift for use with the floor function + $midnight = \DateTime::createFromFormat('Y-m-d H:i:s', $dateTaken->format('Y-m-d') . ' 00:00:00', new \DateTimeZone('UTC')); + + // Gives a day group based on the timezone the photo was taken in + $dayId = (int) floor($midnight->getTimestamp() / 86400); + // Get size of image [$w, $h] = Exif::getDimensions($exif); diff --git a/lib/Db/TimelineWritePlaces.php b/lib/Db/TimelineWritePlaces.php index bace0fa9d..e174bc26f 100644 --- a/lib/Db/TimelineWritePlaces.php +++ b/lib/Db/TimelineWritePlaces.php @@ -95,14 +95,15 @@ public function updatePlacesData(int $fileId, ?float $lat, ?float $lon): array * * @param int $fileId The file ID * @param array $exif The exif data (will change) + * @param array $exif The exif numeric data * @param ?array $prevRow The previous row of data * * @return array Update values */ - protected function processExifLocation(int $fileId, array &$exif, ?array $prevRow): array + protected function processExifLocation(int $fileId, array &$exif, array &$exifNumeric, ?array $prevRow): array { // Store location data - [$lat, $lon] = self::readCoord($exif); + [$lat, $lon] = self::readCoord($exifNumeric); $oldLat = $prevRow ? (float) $prevRow['lat'] : null; $oldLon = $prevRow ? (float) $prevRow['lon'] : null; $mapCluster = $prevRow ? (int) $prevRow['mapcluster'] : -1; @@ -128,7 +129,7 @@ protected function processExifLocation(int $fileId, array &$exif, ?array $prevRo $mapCluster = $mapCluster <= 0 ? null : $mapCluster; // Set tzid from location if not present - $this->setTzidFromLocation($exif, $osmIds); + $this->setTzidFromLocation($exif, $exifNumeric, $osmIds, $lat, $lon); // Return update values return [$lat, $lon, $mapCluster, $osmIds]; @@ -138,43 +139,124 @@ protected function processExifLocation(int $fileId, array &$exif, ?array $prevRo * Set timezone offset from location if not present. * * @param array $exif The exif data + * @param array $exif The exif numeric data * @param array $osmIds The list of osm_id of the places + * @param ?float $lat The latitude + * @param ?float $lon The longitude */ - private function setTzidFromLocation(array &$exif, array $osmIds): void + private function setTzidFromLocation(array &$exif, array &$exifNumeric, array $osmIds, ?float $lat, ?float $lon): void { // Make sure we have some places - if (empty($osmIds)) { - return; + if (!empty($osmIds)) { + + // Get timezone offset from places + $query = $this->connection->getQueryBuilder(); + $query->select('name') + ->from('memories_planet') + ->where($query->expr()->in('osm_id', $query->createNamedParameter($osmIds, IQueryBuilder::PARAM_INT_ARRAY))) + ->andWhere($query->expr()->eq('admin_level', $query->expr()->literal(-7, IQueryBuilder::PARAM_INT))) + ; + + // Get name of timezone + $tzName = $query->executeQuery()->fetchOne(); + if ($tzName !== false && $tzName !== '') { + $exif['LocationTZID'] = $tzName; + return; + } else { + // No values use fallback + $tzName = null; + } } - // Get timezone offset from places - $query = $this->connection->getQueryBuilder(); - $query->select('name') - ->from('memories_planet') - ->where($query->expr()->in('osm_id', $query->createNamedParameter($osmIds, IQueryBuilder::PARAM_INT_ARRAY))) - ->andWhere($query->expr()->eq('admin_level', $query->expr()->literal(-7, IQueryBuilder::PARAM_INT))) - ; - - // Get name of timezone - $tzName = $query->executeQuery()->fetchOne(); - if ($tzName) { - $exif['LocationTZID'] = $tzName; + // Timezone precheck, will skip unnecessary slow Python timezone lookups in most cases whengetTimezoneFromPython is called + // Will still be unnecessarily called occasionally if timezone wasn't in the below fields but was in a date field e.g. + // exiftool found one we didn't check for. + $hasTimezone = false; + try { + $tzStr = $exif['OffsetTimeOriginal'] + ?? $exif['OffsetTime'] + ?? $exif['OffsetTimeDigitized'] + ?? $exif['TimeZone'] + ?? throw new \Exception(); + + /** @psalm-suppress ArgumentTypeCoercion */ + $exifTz = new \DateTimeZone((string) $tzStr); + $hasTimezone = true; + } catch (\Exception $e) { + $hasTimezone = false; + } catch (\ValueError $e) { + $hasTimezone = false; + } + + // Fallback to Python timezonefinder if database is unavailable + if ($lat !== null && $lon !== null && $hasTimezone === false) { + $tzName = $this->getTimezoneFromPython($lat, $lon); + if ($tzName !== null) { + $exif['LocationTZID'] = $tzName; + } + } + } + + /** + * Get timezone using Python timezonefinder as a fallback. + * + * @param ?float $lat The latitude + * @param ?float $lon The longitude + * + * @return ?string The timezone name or null if not found + */ + private function getTimezoneFromPython(?float $lat, ?float $lon): ?string + { + // Validate coordinates + if (null === $lat || null === $lon) { + return null; + } + + try { + // Get timezone using Python timezonefinder + $scriptPath = \dirname(__DIR__, 2) . '/python/findtimezone.py'; + $command = sprintf('python3 %s %f %f', escapeshellarg($scriptPath), $lat, $lon); + $output = shell_exec($command); + $trimmedOutput = trim($output); + + // Retry with python command instead of python3 if not found + if (strpos($trimmedOutput, 'not found') !== false) { + $command = sprintf('python %s %f %f', escapeshellarg($scriptPath), $lat, $lon); + $output = shell_exec($command); + $trimmedOutput = trim($output); + } + + // Check if output contains error messages + if (strpos($trimmedOutput, 'Error:') !== false || strpos($trimmedOutput, 'not found') !== false) { + $this->logger->warning("Python timezone script failed: {$trimmedOutput}", ['app' => 'memories']); + return null; + } + + // Return the trimmed output + if ($output && !empty($trimmedOutput)) { + return $trimmedOutput; + } + + return null; + } catch (\Exception $e) { + $this->logger->error("Error calling Python timezone script: {$e->getMessage()}", ['app' => 'memories']); + return null; } } /** * Read coordinates from array and round to 6 decimal places. * - * Modifies the EXIF array to remove invalid coordinates. + * Modifies the EXIF Numeric array to remove invalid coordinates. * * @return (null|float)[] * * @psalm-return list{float|null, float|null} */ - private static function readCoord(array &$exif): array + private static function readCoord(array &$exifNumeric): array { - $lat = \array_key_exists(LAT_KEY, $exif) ? round((float) $exif[LAT_KEY], 6) : null; - $lon = \array_key_exists(LON_KEY, $exif) ? round((float) $exif[LON_KEY], 6) : null; + $lat = \array_key_exists(LAT_KEY, $exifNumeric) ? round((float) $exifNumeric[LAT_KEY], 6) : null; + $lon = \array_key_exists(LON_KEY, $exifNumeric) ? round((float) $exifNumeric[LON_KEY], 6) : null; // Make sure we have valid coordinates if (null === $lat || null === $lon @@ -184,11 +266,11 @@ private static function readCoord(array &$exif): array } // Remove invalid coordinates - if (null === $lat && \array_key_exists(LAT_KEY, $exif)) { - unset($exif[LAT_KEY]); + if (null === $lat && \array_key_exists(LAT_KEY, $exifNumeric)) { + unset($exifNumeric[LAT_KEY]); } - if (null === $lon && \array_key_exists(LON_KEY, $exif)) { - unset($exif[LON_KEY]); + if (null === $lon && \array_key_exists(LON_KEY, $exifNumeric)) { + unset($exifNumeric[LON_KEY]); } return [$lat, $lon]; diff --git a/lib/Exif.php b/lib/Exif.php index de600bb95..8ff7116e4 100644 --- a/lib/Exif.php +++ b/lib/Exif.php @@ -15,7 +15,32 @@ class Exif { private const FORBIDDEN_EDIT_MIMES = ['image/bmp', 'image/x-dcraw', 'video/MP2T']; // also update const.ts private const EXIFTOOL_TIMEOUT = 30000; - private const EXIFTOOL_ARGS = ['-api', 'QuickTimeUTC=1', '-api', 'LargeFileSupport=1', '-n', '-json']; + private const EXIFTOOL_ARGS = ['-api', 'LargeFileSupport=1', '-a', '-json']; + + // Fields to search for dates in + // Should also be set in ExifFields.php if you want them to show up in the metadata view + private const DATE_FIELDS = [ + // Original date fields + 'SubSecDateTimeOriginal', + 'DateTimeOriginal', + 'SonyDateTime', + + // Create date fields + 'SubSecCreateDate', + 'CreationDate', + 'CreationDateValue', + 'CreateDate', + 'TrackCreateDate', + 'MediaCreateDate', + 'FileCreateDate', + + // ModifyDate fields + 'SubSecModifyDate', + 'ModifyDate', + 'TrackModifyDate', + 'MediaModifyDate', + 'FileModifyDate', + ]; /** Opened instance of exiftool when running in command mode */ /** @var null|resource */ @@ -87,7 +112,7 @@ public static function ensureStaticExiftoolProc(): void * * @return array */ - public static function getExifFromFile(File $file): array + public static function getExifFromFile(File $file, array $extraArgs = []): array { try { $path = $file->getStorage()->getLocalFile($file->getInternalPath()); @@ -106,23 +131,14 @@ public static function getExifFromFile(File $file): array throw new \Exception("File is not readable: {$path}"); } - $exif = self::getExifFromLocalPath($path); + // Get exif data + $exif = self::getExifFromLocalPath($path, $extraArgs); // We need to remove blacklisted fields to prevent leaking info - unset($exif['SourceFile'], $exif['FileName'], $exif['ExifToolVersion'], $exif['Directory'], $exif['FileSize'], $exif['FileModifyDate'], $exif['FileAccessDate'], $exif['FileInodeChangeDate'], $exif['FilePermissions'], $exif['ThumbnailImage']); + unset($exif['SourceFile'], $exif['FileName'], $exif['ExifToolVersion'], $exif['Directory'], $exif['FileSize'], $exif['FileAccessDate'], $exif['FileInodeChangeDate'], $exif['FilePermissions'], $exif['ThumbnailImage']); // Ignore zero dates - $dateFields = [ - 'DateTimeOriginal', - 'SubSecDateTimeOriginal', - 'CreateDate', - 'ModifyDate', - 'TrackCreateDate', - 'TrackModifyDate', - 'MediaCreateDate', - 'MediaModifyDate', - ]; - foreach ($dateFields as $field) { + foreach (self::DATE_FIELDS as $field) { if (\array_key_exists($field, $exif) && \is_string($exif[$field]) && str_starts_with($exif[$field], '0000:00:00')) { unset($exif[$field]); } @@ -136,15 +152,15 @@ public static function getExifFromFile(File $file): array * * @return array */ - public static function getExifFromLocalPath(string $path): array + public static function getExifFromLocalPath(string $path, array $extraArgs = []): array { if (null !== self::$staticProc) { self::ensureStaticExiftoolProc(); - return self::getExifFromLocalPathWithStaticProc($path); + return self::getExifFromLocalPathWithStaticProc($path, $extraArgs); } - return self::getExifFromLocalPathWithSeparateProc($path); + return self::getExifFromLocalPathWithSeparateProc($path, $extraArgs); } /** @@ -154,86 +170,184 @@ public static function getExifFromLocalPath(string $path): array */ public static function parseExifDate(array $exif): \DateTime { - // Get date from exif - $exifDate = $exif['DateTimeOriginal'] ?? $exif['CreateDate'] ?? null; - - // For videos, prefer CreateDate for timezone (QuickTimeUTC=1) - if (preg_match('/^video\/\w+/', (string) ($exif['MIMEType'] ?? null))) { - $exifDate = $exif['CreateDate'] ?? $exifDate; + // Collect all candidate date strings + // Don't prioritize fields blindly because different cameras prioritize different fields + // Instead we will be choosing the oldest valid date found in exif + $candidates = []; + foreach (self::DATE_FIELDS as $field) { + if (isset($exif[$field]) && \is_string($exif[$field]) && $exif[$field] !== '') { + $val = (string) $exif[$field]; + if (!str_starts_with($val, '0000:00:00')) { + $candidates[$field] = $val; + } + } } - // Check if we have a date - if (null === $exifDate || empty($exifDate) || !\is_string($exifDate)) { + // Check if we have any candidates + if (empty($candidates)) { throw new \Exception('No date found in exif'); } - // Get timezone from exif - try { - $tzStr = $exif['OffsetTimeOriginal'] - ?? $exif['OffsetTime'] - ?? $exif['LocationTZID'] - ?? throw new \Exception(); - - /** @psalm-suppress ArgumentTypeCoercion */ - $exifTz = new \DateTimeZone((string) $tzStr); - } catch (\Exception) { - $exifTz = null; - } - - // Force UTC if no timezone found - $parseTz = $exifTz ?? new \DateTimeZone('UTC'); - - // https://github.com/pulsejet/memories/pull/397 - // https://github.com/pulsejet/memories/issues/485 - + // List of accepted parsing formats in priority order to try + // By not using exiftool with -n we can let it get precise subseconds and timezone info for us when available + // Prioritize formats with timezone and more precision first $formats = [ - 'Y:m:d H:i', // 2023:03:05 18:58 - 'Y:m:d H:iO', // 2023:03:05 18:58+05:00 - 'Y:m:d H:i:s', // 2023:03:05 18:58:17 - 'Y:m:d H:i:sO', // 2023:03:05 10:58:17+05:00 - 'Y:m:d H:i:s.u', // 2023:03:05 10:58:17.000 - 'Y:m:d H:i:s.uO', // 2023:03:05 10:58:17.000Z + 'Y:m:d H:i:s.uP', + 'Y:m:d H:i:s.uO', + 'Y:m:d H:i:sP', + 'Y:m:d H:i:sO', + 'Y:m:d H:iP', + 'Y:m:d H:iO', + 'Y:m:d H:i:s.u', + 'Y:m:d H:i:s', + 'Y:m:d H:i', ]; - /** @var \DateTime $dt */ + // Loop through candidates, compare them, and get the oldest valid date + $exifDate = null; $parsedDate = null; - - foreach ($formats as $format) { - if ($parsedDate = \DateTime::createFromFormat($format, $exifDate, $parseTz)) { - break; + $oldestTimestamp = null; + $bestPrecision = -1; + $winningField = null; + foreach ($candidates as $field => $val) { + $parse = null; + $matchedFormat = null; + + // Replace trailing Z (Zulu) with +00:00 so formats using 'p' parse correctly. + if (str_ends_with($val, 'Z')) { + $val = preg_replace('/Z$/', '+00:00', $val); } - } - - // If we couldn't parse the date, throw an error - if (!$parsedDate) { - throw new \Exception("Invalid date: {$exifDate}"); - } - // Epoch timestamp - $timestamp = $parsedDate->getTimestamp(); + // If timezone exists in a dedicated exif field get it + $exifTz = null; + try { + $tzStr = $exif['OffsetTimeOriginal'] + ?? $exif['OffsetTime'] + ?? $exif['OffsetTimeDigitized'] + ?? $exif['TimeZone'] + ?? $exif['LocationTZID'] + ?? throw new \Exception(); + + /** @psalm-suppress ArgumentTypeCoercion */ + $exifTz = new \DateTimeZone((string) $tzStr); + } catch (\Exception $e) { + $exifTz = null; + } catch (\ValueError $e) { + $exifTz = null; + } - // Filter out dates before 1800 A.D. - if ($timestamp < -5364662400) { // 1800 A.D. - throw new \Exception("Date too old: {$exifDate}"); - } + // Try to get a valid date with timezone from each candidate using accepted formats + foreach ($formats as $format) { + + // If format contains timezone offset parse directly without messing with it + // Set matchedFormat after any success in this loop so we have the format that succeeded + if (strpos($format, 'O') !== false || strpos($format, 'P') !== false) { + $parse = \DateTime::createFromFormat($format, $val); + if ($parse instanceof \DateTime) { + // On success save date string timezone for use on formats lacking timezone if dedicated EXIF timezone doesn't exist + // This can happen if exiftool is able to find a timezone offset in a field that we didn't code for + if ($exifTz === null) { + // Only use string timezones we are sure are original ones because modification date timestamps may break oldest date logic + if ($field == 'SubSecDateTimeOriginal' || $field == 'SubSecCreateDate' || $field == 'CreationDate' || $field == 'CreationDateValue'){ + $exifTz = $parse->getTimezone(); + } + } + // Stop trying other formats on success + $matchedFormat = $format; + break; + } + } else { + // If format lacks timezone offset try to correct it with offset from dedicated EXIF fields + // Dates that are UTC need to be shifted to the local timezone and dates that aren't need to have the timezone appended without shifting the clock time + + // After examining many samples from different cameras it looks like modern PHOTOS usually have all their dates saved in local time + // For these we will append the EXIF timezone without shifting the clock time + // But when modern VIDEOS have dates that lack a timezone offset these dates are usually in UTC thanks to QuickTime + // So for these we will fully shift the clock time to the EXIF timezone while setting it + + // This handling should cover the vast majority of cases correctly and should only fail when + // 1. The camera saved a photo with it's oldest date saved in UTC without timezone info + // 2. The camera saved a video with it's oldest date in local time without timezone info + // 3. The camera just saved dates completely wrong + if ($exifTz instanceof \DateTimeZone && preg_match('/^video\/\w+/i', (string) ($exif['MIMEType'] ?? null))) { + + // For videos shift time clock to timezone + $parse = \DateTime::createFromFormat($format, $val, new \DateTimeZone('UTC')); + if ($parse instanceof \DateTime) { + $parse->setTimezone($exifTz); + // Stop trying other formats on success + $matchedFormat = $format; + break; + } + } elseif ($exifTz instanceof \DateTimeZone) { + // For photos append timezone without shifting time clock + $parse = \DateTime::createFromFormat($format, $val, $exifTz); + // Stop trying other formats on success + if ($parse instanceof \DateTime) { + $matchedFormat = $format; + break; + } + } else { + // No timezone found, give up and assume UTC + // This only happens when there is absolutely no timezone info in the file across all fields + $parse = \DateTime::createFromFormat($format, $val, new \DateTimeZone('UTC')); + if ($parse instanceof \DateTime) { + $matchedFormat = $format; + break; + } + } + } + } - // Filter out January 1, 1904 12:00:00 AM UTC - // Exiftool returns this as the date when QuickTimeUTC is set and - // the date is set to 0000:00:00 00:00:00 - if (-2082844800 === $timestamp) { - throw new \Exception("Blacklisted date: {$exifDate}"); + // Timestamps are able to compare between different timezones accurately + // So we use it to find the oldest date in candidates + if ($parse instanceof \DateTime) { + + // Epoch timestamp + $timestamp = $parse->getTimestamp(); + + // Filter out January 1, 1904 12:00:00 AM UTC + // Exiftool returns this as the date when QuickTimeUTC is set and + // the date is set to 0000:00:00 00:00:00 + // Also filter out dates before 1800 A.D. + if (-2082844800 !== $timestamp || $timestamp > -5364662400) { + + // A more precise datetime will always look newer than a less precise datetime even if they are the same general timestamp + // In these scenarios we want to prefer the more precise datetime so we don't lose accuracy + // Determine precision level from the matched format + if ($matchedFormat) { + if (strpos($matchedFormat, 'u') !== false) { + $precision = 3; + } elseif (strpos($matchedFormat, 's') !== false) { + $precision = 2; + } else { + $precision = 1; + } + } + // Drop seconds and subseconds just for comparison, then subtract precision level in seconds to give them the right priority + $ot = (new \DateTime($parse->format('Y-m-d H:iO')))->modify("-{$precision} seconds")->getTimestamp(); + + // While looping through candidates we try to get the oldest datetime with the highest precision + if ($oldestTimestamp === null || $ot < $oldestTimestamp) { + $oldestTimestamp = $ot; + $exifDate = $val; + $winningField = $field; + $parsedDate = $parse; + } + } + } } - // Force the timezone to be the same as parseTz - if ($exifTz) { - $parsedDate->setTimezone($exifTz); + // Check if we have a date + if ($exifDate === null || !$parsedDate instanceof \DateTime) { + throw new \Exception('No parsable date found in exif'); } return $parsedDate; } /** - * Get the date taken from either the file or exif data if available. + * Get the date taken and timezone from either the file or exif data if available. * * @param array $exif */ @@ -241,33 +355,22 @@ public static function getDateTaken(File $file, array $exif): \DateTime { try { return self::parseExifDate($exif); - } catch (\Exception) { - } catch (\ValueError) { + } catch (\Exception $e) { + error_log("parseExifDate failed: " . $e->getMessage()); + } catch (\ValueError $e) { + error_log("parseExifDate ValueError: " . $e->getMessage()); } - // Fall back to modification time - $dt = new \DateTime('@'.$file->getMtime()); - - // Set timezone to system timezone - $tz = SystemConfig::get('default_timezone') ?: getenv('TZ') ?: date_default_timezone_get(); - + // Fallback to FileModifyDate in UTC to remain consistent with parseExifDate fallback behavior try { - $dt->setTimezone(new \DateTimeZone($tz)); - } catch (\Exception) { - throw new \Error("FATAL: system timezone is invalid (TZ): {$tz}"); + $dt = new \DateTime('@'.$file->getMtime()); + } catch (\Throwable $e) { + throw new \Error("FATAL: could not read file modification time: " . $e->getMessage()); } - + return $dt; } - /** - * Convert time to local date in UTC. - */ - public static function forgetTimezone(\DateTime $date): \DateTime - { - return new \DateTime($date->format('Y-m-d H:i:s'), new \DateTimeZone('UTC')); - } - /** * Get image dimensions from Exif data. * @@ -444,7 +547,7 @@ private static function initializeStaticExiftoolProc(): void stream_set_blocking(self::$staticPipes[1], false); } - private static function getExifFromLocalPathWithStaticProc(string $path): array + private static function getExifFromLocalPathWithStaticProc(string $path, array $extraArgs = []): array { // This function should not be called if there is no static process if (!self::$staticPipes) { @@ -452,7 +555,9 @@ private static function getExifFromLocalPathWithStaticProc(string $path): array } // Create arguments for exiftool - $args = implode("\n", self::EXIFTOOL_ARGS); + // Merge base args with extra args + $allArgs = array_merge(self::EXIFTOOL_ARGS, $extraArgs); + $args = implode("\n", $allArgs); fwrite(self::$staticPipes[0], "{$path}\n{$args}\n-execute\n"); fflush(self::$staticPipes[0]); diff --git a/python/DATA_LICENSE b/python/DATA_LICENSE new file mode 100644 index 000000000..2da947a75 --- /dev/null +++ b/python/DATA_LICENSE @@ -0,0 +1,538 @@ +Open Database License (ODbL) v1.0 +Disclaimer +Open Data Commons is not a law firm and does not provide legal services of any kind. + +Open Data Commons has no formal relationship with you. Your receipt of this document does not create any kind of agent-client relationship. Please seek the advice of a suitably qualified legal professional licensed to practice in your jurisdiction before using this document. + +No warranties and disclaimer of any damages. This information is provided ‘as is‘, and this site makes no warranties on the information provided. Any damages resulting from its use are disclaimed. + +Plain language summary +A plain language summary of the Open Database License is available. + +Alternative formats: +Plain Text + +ODC Open Database License (ODbL) +Preamble +The Open Database License (ODbL) is a license agreement intended to +allow users to freely share, modify, and use this Database while +maintaining this same freedom for others. Many databases are covered by +copyright, and therefore this document licenses these rights. Some +jurisdictions, mainly in the European Union, have specific rights that +cover databases, and so the ODbL addresses these rights, too. Finally, +the ODbL is also an agreement in contract for users of this Database to +act in certain ways in return for accessing this Database. + +Databases can contain a wide variety of types of content (images, +audiovisual material, and sounds all in the same database, for example), +and so the ODbL only governs the rights over the Database, and not the +contents of the Database individually. Licensors should use the ODbL +together with another license for the contents, if the contents have a +single set of rights that uniformly covers all of the contents. If the +contents have multiple sets of different rights, Licensors should +describe what rights govern what contents together in the individual +record or in some other way that clarifies what rights apply. + +Sometimes the contents of a database, or the database itself, can be +covered by other rights not addressed here (such as private contracts, +trade mark over the name, or privacy rights / data protection rights +over information in the contents), and so you are advised that you may +have to consult other documents or clear other rights before doing +activities not covered by this License. + +The Licensor (as defined below) + +and + +You (as defined below) + +agree as follows: + +1.0 Definitions of Capitalised Words +“Collective Database” – Means this Database in unmodified form as part +of a collection of independent databases in themselves that together are +assembled into a collective whole. A work that constitutes a Collective +Database will not be considered a Derivative Database. + +“Convey” – As a verb, means Using the Database, a Derivative Database, +or the Database as part of a Collective Database in any way that enables +a Person to make or receive copies of the Database or a Derivative +Database. Conveying does not include interaction with a user through a +computer network, or creating and Using a Produced Work, where no +transfer of a copy of the Database or a Derivative Database occurs. +“Contents” – The contents of this Database, which includes the +information, independent works, or other material collected into the +Database. For example, the contents of the Database could be factual +data or works such as images, audiovisual material, text, or sounds. + +“Database” – A collection of material (the Contents) arranged in a +systematic or methodical way and individually accessible by electronic +or other means offered under the terms of this License. + +“Database Directive” – Means Directive 96/9/EC of the European +Parliament and of the Council of 11 March 1996 on the legal protection +of databases, as amended or succeeded. + +“Database Right” – Means rights resulting from the Chapter III (“sui +generis”) rights in the Database Directive (as amended and as transposed +by member states), which includes the Extraction and Re-utilisation of +the whole or a Substantial part of the Contents, as well as any similar +rights available in the relevant jurisdiction under Section 10.4. + +“Derivative Database” – Means a database based upon the Database, and +includes any translation, adaptation, arrangement, modification, or any +other alteration of the Database or of a Substantial part of the +Contents. This includes, but is not limited to, Extracting or +Re-utilising the whole or a Substantial part of the Contents in a new +Database. + +“Extraction” – Means the permanent or temporary transfer of all or a +Substantial part of the Contents to another medium by any means or in +any form. + +“License” – Means this license agreement and is both a license of rights +such as copyright and Database Rights and an agreement in contract. + +“Licensor” – Means the Person that offers the Database under the terms +of this License. + +“Person” – Means a natural or legal person or a body of persons +corporate or incorporate. + +“Produced Work” – a work (such as an image, audiovisual material, text, +or sounds) resulting from using the whole or a Substantial part of the +Contents (via a search or other query) from this Database, a Derivative +Database, or this Database as part of a Collective Database. + +“Publicly” – means to Persons other than You or under Your control by +either more than 50% ownership or by the power to direct their +activities (such as contracting with an independent consultant). + +“Re-utilisation” – means any form of making available to the public all +or a Substantial part of the Contents by the distribution of copies, by +renting, by online or other forms of transmission. + +“Substantial” – Means substantial in terms of quantity or quality or a +combination of both. The repeated and systematic Extraction or +Re-utilisation of insubstantial parts of the Contents may amount to the +Extraction or Re-utilisation of a Substantial part of the Contents. + +“Use” – As a verb, means doing any act that is restricted by copyright +or Database Rights whether in the original medium or any other; and +includes without limitation distributing, copying, publicly performing, +publicly displaying, and preparing derivative works of the Database, as +well as modifying the Database as may be technically necessary to use it +in a different mode or format. + +“You” – Means a Person exercising rights under this License who has not +previously violated the terms of this License with respect to the +Database, or who has received express permission from the Licensor to +exercise rights under this License despite a previous violation. + +Words in the singular include the plural and vice versa. + +2.0 What this License covers +2.1. Legal effect of this document. This License is: + + a. A license of applicable copyright and neighbouring rights; + + b. A license of the Database Right; and + + c. An agreement in contract between You and the Licensor. + +2.2 Legal rights covered. This License covers the legal rights in the +Database, including: + + a. Copyright. Any copyright or neighbouring rights in the Database. +The copyright licensed includes any individual elements of the +Database, but does not cover the copyright over the Contents +independent of this Database. See Section 2.4 for details. Copyright +law varies between jurisdictions, but is likely to cover: the Database +model or schema, which is the structure, arrangement, and organisation +of the Database, and can also include the Database tables and table +indexes; the data entry and output sheets; and the Field names of +Contents stored in the Database; + + b. Database Rights. Database Rights only extend to the Extraction and +Re-utilisation of the whole or a Substantial part of the Contents. +Database Rights can apply even when there is no copyright over the + Database. Database Rights can also apply when the Contents are removed +from the Database and are selected and arranged in a way that would +not infringe any applicable copyright; and + + c. Contract. This is an agreement between You and the Licensor for +access to the Database. In return you agree to certain conditions of +use on this access as outlined in this License. + +2.3 Rights not covered. + + a. This License does not apply to computer programs used in the making +or operation of the Database; + + b. This License does not cover any patents over the Contents or the +Database; and + + c. This License does not cover any trademarks associated with the + Database. + +2.4 Relationship to Contents in the Database. The individual items of +the Contents contained in this Database may be covered by other rights, +including copyright, patent, data protection, privacy, or personality +rights, and this License does not cover any rights (other than Database +Rights or in contract) in individual Contents contained in the Database. +For example, if used on a Database of images (the Contents), this +License would not apply to copyright over individual images, which could +have their own separate licenses, or one single license covering all of +the rights over the images. + +3.0 Rights granted +3.1 Subject to the terms and conditions of this License, the Licensor +grants to You a worldwide, royalty-free, non-exclusive, terminable (but +only under Section 9) license to Use the Database for the duration of +any applicable copyright and Database Rights. These rights explicitly +include commercial use, and do not exclude any field of endeavour. To +the extent possible in the relevant jurisdiction, these rights may be +exercised in all media and formats whether now known or created in the +future. + +The rights granted cover, for example: + + a. Extraction and Re-utilisation of the whole or a Substantial part of +the Contents; + + b. Creation of Derivative Databases; + + c. Creation of Collective Databases; + + d. Creation of temporary or permanent reproductions by any means and +in any form, in whole or in part, including of any Derivative +Databases or as a part of Collective Databases; and + + e. Distribution, communication, display, lending, making available, or +performance to the public by any means and in any form, in whole or in +part, including of any Derivative Database or as a part of Collective + Databases. + +3.2 Compulsory license schemes. For the avoidance of doubt: + + a. Non-waivable compulsory license schemes. In those jurisdictions in +which the right to collect royalties through any statutory or +compulsory licensing scheme cannot be waived, the Licensor reserves +the exclusive right to collect such royalties for any exercise by You +of the rights granted under this License; + + b. Waivable compulsory license schemes. In those jurisdictions in +which the right to collect royalties through any statutory or +compulsory licensing scheme can be waived, the Licensor waives the +exclusive right to collect such royalties for any exercise by You of +the rights granted under this License; and, + + c. Voluntary license schemes. The Licensor waives the right to collect +royalties, whether individually or, in the event that the Licensor is +a member of a collecting society that administers voluntary licensing +schemes, via that society, from any exercise by You of the rights +granted under this License. + +3.3 The right to release the Database under different terms, or to stop +distributing or making available the Database, is reserved. Note that +this Database may be multiple-licensed, and so You may have the choice +of using alternative licenses for this Database. Subject to Section +10.4, all other rights not expressly granted by Licensor are reserved. + +4.0 Conditions of Use +4.1 The rights granted in Section 3 above are expressly made subject to +Your complying with the following conditions of use. These are important +conditions of this License, and if You fail to follow them, You will be +in material breach of its terms. + +4.2 Notices. If You Publicly Convey this Database, any Derivative +Database, or the Database as part of a Collective Database, then You +must: + + a. Do so only under the terms of this License or another license +permitted under Section 4.4; + + b. Include a copy of this License (or, as applicable, a license +permitted under Section 4.4) or its Uniform Resource Identifier (URI) +with the Database or Derivative Database, including both in the +Database or Derivative Database and in any relevant documentation; and + + c. Keep intact any copyright or Database Right notices and notices +that refer to this License. + + d. If it is not possible to put the required notices in a particular +file due to its structure, then You must include the notices in a +location (such as a relevant directory) where users would be likely to +look for it. + +4.3 Notice for using output (Contents). Creating and Using a Produced +Work does not require the notice in Section 4.2. However, if you +Publicly Use a Produced Work, You must include a notice associated with +the Produced Work reasonably calculated to make any Person that uses, +views, accesses, interacts with, or is otherwise exposed to the Produced +Work aware that Content was obtained from the Database, Derivative +Database, or the Database as part of a Collective Database, and that it +is available under this License. + + a. Example notice. The following text will satisfy notice under +Section 4.3: + + Contains information from DATABASE NAME, which is made available + here under the Open Database License (ODbL). +DATABASE NAME should be replaced with the name of the Database and a +hyperlink to the URI of the Database. “Open Database License” should +contain a hyperlink to the URI of the text of this License. If +hyperlinks are not possible, You should include the plain text of the +required URI’s with the above notice. + +4.4 Share alike. + + a. Any Derivative Database that You Publicly Use must be only under +the terms of: + + i. This License; + + ii. A later version of this License similar in spirit to this +License; or + + iii. A compatible license. + +If You license the Derivative Database under one of the licenses +mentioned in (iii), You must comply with the terms of that license. + + b. For the avoidance of doubt, Extraction or Re-utilisation of the +whole or a Substantial part of the Contents into a new database is a +Derivative Database and must comply with Section 4.4. + + c. Derivative Databases and Produced Works. A Derivative Database is +Publicly Used and so must comply with Section 4.4. if a Produced Work +created from the Derivative Database is Publicly Used. + + d. Share Alike and additional Contents. For the avoidance of doubt, +You must not add Contents to Derivative Databases under Section 4.4 a +that are incompatible with the rights granted under this License. + + e. Compatible licenses. Licensors may authorise a proxy to determine +compatible licenses under Section 4.4 a iii. If they do so, the +authorised proxy’s public statement of acceptance of a compatible +license grants You permission to use the compatible license. + +4.5 Limits of Share Alike. The requirements of Section 4.4 do not apply +in the following: + + a. For the avoidance of doubt, You are not required to license +Collective Databases under this License if You incorporate this +Database or a Derivative Database in the collection, but this License +still applies to this Database or a Derivative Database as a part of +the Collective Database; + + b. Using this Database, a Derivative Database, or this Database as +part of a Collective Database to create a Produced Work does not +create a Derivative Database for purposes of Section 4.4; and + + c. Use of a Derivative Database internally within an organisation is +not to the public and therefore does not fall under the requirements +of Section 4.4. + +4.6 Access to Derivative Databases. If You Publicly Use a Derivative +Database or a Produced Work from a Derivative Database, You must also +offer to recipients of the Derivative Database or Produced Work a copy +in a machine readable form of: + + a. The entire Derivative Database; or + + b. A file containing all of the alterations made to the Database or +the method of making the alterations to the Database (such as an +algorithm), including any additional Contents, that make up all the +differences between the Database and the Derivative Database. + +The Derivative Database (under a.) or alteration file (under b.) must be +available at no more than a reasonable production cost for physical +distributions and free of charge if distributed over the internet. + +4.7 Technological measures and additional terms + + a. This License does not allow You to impose (except subject to +Section 4.7 b.) any terms or any technological measures on the +Database, a Derivative Database, or the whole or a Substantial part of +the Contents that alter or restrict the terms of this License, or any +rights granted under it, or have the effect or intent of restricting +the ability of any person to exercise those rights. + + b. Parallel distribution. You may impose terms or technological +measures on the Database, a Derivative Database, or the whole or a +Substantial part of the Contents (a “Restricted Database”) in +contravention of Section 4.74 a. only if You also make a copy of the +Database or a Derivative Database available to the recipient of the +Restricted Database: + + i. That is available without additional fee; + + ii. That is available in a medium that does not alter or restrict +the terms of this License, or any rights granted under it, or have +the effect or intent of restricting the ability of any person to +exercise those rights (an “Unrestricted Database”); and + + iii. The Unrestricted Database is at least as accessible to the +recipient as a practical matter as the Restricted Database. + + c. For the avoidance of doubt, You may place this Database or a +Derivative Database in an authenticated environment, behind a +password, or within a similar access control scheme provided that You +do not alter or restrict the terms of this License or any rights +granted under it or have the effect or intent of restricting the +ability of any person to exercise those rights. + +4.8 Licensing of others. You may not sublicense the Database. Each time +You communicate the Database, the whole or Substantial part of the +Contents, or any Derivative Database to anyone else in any way, the +Licensor offers to the recipient a license to the Database on the same +terms and conditions as this License. You are not responsible for +enforcing compliance by third parties with this License, but You may +enforce any rights that You have over a Derivative Database. You are +solely responsible for any modifications of a Derivative Database made +by You or another Person at Your direction. You may not impose any +further restrictions on the exercise of the rights granted or affirmed +under this License. + +5.0 Moral rights +5.1 Moral rights. This section covers moral rights, including any rights +to be identified as the author of the Database or to object to treatment +that would otherwise prejudice the author’s honour and reputation, or +any other derogatory treatment: + + a. For jurisdictions allowing waiver of moral rights, Licensor waives +all moral rights that Licensor may have in the Database to the fullest +extent possible by the law of the relevant jurisdiction under Section + 10.4; + + b. If waiver of moral rights under Section 5.1 a in the relevant +jurisdiction is not possible, Licensor agrees not to assert any moral +rights over the Database and waives all claims in moral rights to the +fullest extent possible by the law of the relevant jurisdiction under +Section 10.4; and + + c. For jurisdictions not allowing waiver or an agreement not to assert +moral rights under Section 5.1 a and b, the author may retain their +moral rights over certain aspects of the Database. + +Please note that some jurisdictions do not allow for the waiver of moral +rights, and so moral rights may still subsist over the Database in some +jurisdictions. + +6.0 Fair dealing, Database exceptions, and other rights not affected +6.1 This License does not affect any rights that You or anyone else may +independently have under any applicable law to make any use of this +Database, including without limitation: + + a. Exceptions to the Database Right including: Extraction of Contents +from non-electronic Databases for private purposes, Extraction for +purposes of illustration for teaching or scientific research, and +Extraction or Re-utilisation for public security or an administrative +or judicial procedure. + + b. Fair dealing, fair use, or any other legally recognised limitation +or exception to infringement of copyright or other applicable laws. + +6.2 This License does not affect any rights of lawful users to Extract +and Re-utilise insubstantial parts of the Contents, evaluated +quantitatively or qualitatively, for any purposes whatsoever, including +creating a Derivative Database (subject to other rights over the +Contents, see Section 2.4). The repeated and systematic Extraction or +Re-utilisation of insubstantial parts of the Contents may however amount +to the Extraction or Re-utilisation of a Substantial part of the +Contents. + +7.0 Warranties and Disclaimer +7.1 The Database is licensed by the Licensor “as is” and without any +warranty of any kind, either express, implied, or arising by statute, +custom, course of dealing, or trade usage. Licensor specifically +disclaims any and all implied warranties or conditions of title, +non-infringement, accuracy or completeness, the presence or absence of +errors, fitness for a particular purpose, merchantability, or otherwise. +Some jurisdictions do not allow the exclusion of implied warranties, so +this exclusion may not apply to You. + +8.0 Limitation of liability +8.1 Subject to any liability that may not be excluded or limited by law, +the Licensor is not liable for, and expressly excludes, all liability +for loss or damage however and whenever caused to anyone by any use +under this License, whether by You or by anyone else, and whether caused +by any fault on the part of the Licensor or not. This exclusion of +liability includes, but is not limited to, any special, incidental, +consequential, punitive, or exemplary damages such as loss of revenue, +data, anticipated profits, and lost business. This exclusion applies +even if the Licensor has been advised of the possibility of such +damages. + +8.2 If liability may not be excluded by law, it is limited to actual and +direct financial loss to the extent it is caused by proved negligence on +the part of the Licensor. + +9.0 Termination of Your rights under this License +9.1 Any breach by You of the terms and conditions of this License +automatically terminates this License with immediate effect and without +notice to You. For the avoidance of doubt, Persons who have received the +Database, the whole or a Substantial part of the Contents, Derivative +Databases, or the Database as part of a Collective Database from You +under this License will not have their licenses terminated provided +their use is in full compliance with this License or a license granted +under Section 4.8 of this License. Sections 1, 2, 7, 8, 9 and 10 will +survive any termination of this License. + +9.2 If You are not in breach of the terms of this License, the Licensor +will not terminate Your rights under it. + +9.3 Unless terminated under Section 9.1, this License is granted to You +for the duration of applicable rights in the Database. + +9.4 Reinstatement of rights. If you cease any breach of the terms and +conditions of this License, then your full rights under this License +will be reinstated: + + a. Provisionally and subject to permanent termination until the 60th +day after cessation of breach; + + b. Permanently on the 60th day after cessation of breach unless +otherwise reasonably notified by the Licensor; or + + c. Permanently if reasonably notified by the Licensor of the +violation, this is the first time You have received notice of +violation of this License from the Licensor, and You cure the +violation prior to 30 days after your receipt of the notice. + +Persons subject to permanent termination of rights are not eligible to +be a recipient and receive a license under Section 4.8. + +9.5 Notwithstanding the above, Licensor reserves the right to release +the Database under different license terms or to stop distributing or +making available the Database. Releasing the Database under different +license terms or stopping the distribution of the Database will not +withdraw this License (or any other license that has been, or is +required to be, granted under the terms of this License), and this +License will continue in full force and effect unless terminated as +stated above. + +10.0 General +10.1 If any provision of this License is held to be invalid or +unenforceable, that must not affect the validity or enforceability of +the remainder of the terms and conditions of this License and each +remaining provision of this License shall be valid and enforced to the +fullest extent permitted by law. + +10.2 This License is the entire agreement between the parties with +respect to the rights granted here over the Database. It replaces any +earlier understandings, agreements or representations with respect to +the Database. + +10.3 If You are in breach of the terms of this License, You will not be +entitled to rely on the terms of this License or to complain of any +breach by the Licensor. + +10.4 Choice of law. This License takes effect in and will be governed by +the laws of the relevant jurisdiction in which the License terms are +sought to be enforced. If the standard suite of rights granted under +applicable copyright law and Database Rights in the relevant +jurisdiction includes additional rights not granted under this License, +these additional rights are granted in this License in order to meet the +terms of this License. diff --git a/python/_cffi_backend.cpython-312-x86_64-linux-gnu.so b/python/_cffi_backend.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 000000000..156ee4318 Binary files /dev/null and b/python/_cffi_backend.cpython-312-x86_64-linux-gnu.so differ diff --git a/python/bin/f2py b/python/bin/f2py new file mode 100644 index 000000000..c4f7341a2 --- /dev/null +++ b/python/bin/f2py @@ -0,0 +1,8 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from numpy.f2py.f2py2e import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/python/bin/numpy-config b/python/bin/numpy-config new file mode 100644 index 000000000..a69d715fd --- /dev/null +++ b/python/bin/numpy-config @@ -0,0 +1,8 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from numpy._configtool import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/python/bin/timezonefinder b/python/bin/timezonefinder new file mode 100644 index 000000000..dade4fb21 --- /dev/null +++ b/python/bin/timezonefinder @@ -0,0 +1,8 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +import re +import sys +from timezonefinder.command_line import main +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/python/cffi-2.0.0.dist-info/INSTALLER b/python/cffi-2.0.0.dist-info/INSTALLER new file mode 100644 index 000000000..a1b589e38 --- /dev/null +++ b/python/cffi-2.0.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/python/cffi-2.0.0.dist-info/METADATA b/python/cffi-2.0.0.dist-info/METADATA new file mode 100644 index 000000000..67508e56a --- /dev/null +++ b/python/cffi-2.0.0.dist-info/METADATA @@ -0,0 +1,68 @@ +Metadata-Version: 2.4 +Name: cffi +Version: 2.0.0 +Summary: Foreign Function Interface for Python calling C code. +Author: Armin Rigo, Maciej Fijalkowski +Maintainer: Matt Davis, Matt Clay, Matti Picus +License-Expression: MIT +Project-URL: Documentation, https://cffi.readthedocs.io/ +Project-URL: Changelog, https://cffi.readthedocs.io/en/latest/whatsnew.html +Project-URL: Downloads, https://github.com/python-cffi/cffi/releases +Project-URL: Contact, https://groups.google.com/forum/#!forum/python-cffi +Project-URL: Source Code, https://github.com/python-cffi/cffi +Project-URL: Issue Tracker, https://github.com/python-cffi/cffi/issues +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Classifier: Programming Language :: Python :: Free Threading :: 2 - Beta +Classifier: Programming Language :: Python :: Implementation :: CPython +Requires-Python: >=3.9 +Description-Content-Type: text/markdown +License-File: LICENSE +License-File: AUTHORS +Requires-Dist: pycparser; implementation_name != "PyPy" +Dynamic: license-file + +[![GitHub Actions Status](https://github.com/python-cffi/cffi/actions/workflows/ci.yaml/badge.svg?branch=main)](https://github.com/python-cffi/cffi/actions/workflows/ci.yaml?query=branch%3Amain++) +[![PyPI version](https://img.shields.io/pypi/v/cffi.svg)](https://pypi.org/project/cffi) +[![Read the Docs](https://img.shields.io/badge/docs-latest-blue.svg)][Documentation] + + +CFFI +==== + +Foreign Function Interface for Python calling C code. + +Please see the [Documentation] or uncompiled in the `doc/` subdirectory. + +Download +-------- + +[Download page](https://github.com/python-cffi/cffi/releases) + +Source Code +----------- + +Source code is publicly available on +[GitHub](https://github.com/python-cffi/cffi). + +Contact +------- + +[Mailing list](https://groups.google.com/forum/#!forum/python-cffi) + +Testing/development tips +------------------------ + +After `git clone` or `wget && tar`, we will get a directory called `cffi` or `cffi-x.x.x`. we call it `repo-directory`. To run tests under CPython, run the following in the `repo-directory`: + + pip install pytest + pip install -e . # editable install of CFFI for local development + pytest src/c/ testing/ + +[Documentation]: http://cffi.readthedocs.org/ diff --git a/python/cffi-2.0.0.dist-info/RECORD b/python/cffi-2.0.0.dist-info/RECORD new file mode 100644 index 000000000..148b18a2a --- /dev/null +++ b/python/cffi-2.0.0.dist-info/RECORD @@ -0,0 +1,49 @@ +_cffi_backend.cpython-312-x86_64-linux-gnu.so,sha256=AGLtw5fn9u4Cmwk3BbGlsXG7VZEvQekABMyEGuRZmcE,348808 +cffi-2.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +cffi-2.0.0.dist-info/METADATA,sha256=uYzn40F68Im8EtXHNBLZs7FoPM-OxzyYbDWsjJvhujk,2559 +cffi-2.0.0.dist-info/RECORD,, +cffi-2.0.0.dist-info/WHEEL,sha256=aSgG0F4rGPZtV0iTEIfy6dtHq6g67Lze3uLfk0vWn88,151 +cffi-2.0.0.dist-info/entry_points.txt,sha256=y6jTxnyeuLnL-XJcDv8uML3n6wyYiGRg8MTp_QGJ9Ho,75 +cffi-2.0.0.dist-info/licenses/AUTHORS,sha256=KmemC7-zN1nWfWRf8TG45ta8TK_CMtdR_Kw-2k0xTMg,208 +cffi-2.0.0.dist-info/licenses/LICENSE,sha256=W6JN3FcGf5JJrdZEw6_EGl1tw34jQz73Wdld83Cwr2M,1123 +cffi-2.0.0.dist-info/top_level.txt,sha256=rE7WR3rZfNKxWI9-jn6hsHCAl7MDkB-FmuQbxWjFehQ,19 +cffi/__init__.py,sha256=-ksBQ7MfDzVvbBlV_ftYBWAmEqfA86ljIzMxzaZeAlI,511 +cffi/__pycache__/__init__.cpython-312.pyc,, +cffi/__pycache__/_imp_emulation.cpython-312.pyc,, +cffi/__pycache__/_shimmed_dist_utils.cpython-312.pyc,, +cffi/__pycache__/api.cpython-312.pyc,, +cffi/__pycache__/backend_ctypes.cpython-312.pyc,, +cffi/__pycache__/cffi_opcode.cpython-312.pyc,, +cffi/__pycache__/commontypes.cpython-312.pyc,, +cffi/__pycache__/cparser.cpython-312.pyc,, +cffi/__pycache__/error.cpython-312.pyc,, +cffi/__pycache__/ffiplatform.cpython-312.pyc,, +cffi/__pycache__/lock.cpython-312.pyc,, +cffi/__pycache__/model.cpython-312.pyc,, +cffi/__pycache__/pkgconfig.cpython-312.pyc,, +cffi/__pycache__/recompiler.cpython-312.pyc,, +cffi/__pycache__/setuptools_ext.cpython-312.pyc,, +cffi/__pycache__/vengine_cpy.cpython-312.pyc,, +cffi/__pycache__/vengine_gen.cpython-312.pyc,, +cffi/__pycache__/verifier.cpython-312.pyc,, +cffi/_cffi_errors.h,sha256=zQXt7uR_m8gUW-fI2hJg0KoSkJFwXv8RGUkEDZ177dQ,3908 +cffi/_cffi_include.h,sha256=Exhmgm9qzHWzWivjfTe0D7Xp4rPUkVxdNuwGhMTMzbw,15055 +cffi/_embedding.h,sha256=Ai33FHblE7XSpHOCp8kPcWwN5_9BV14OvN0JVa6ITpw,18786 +cffi/_imp_emulation.py,sha256=RxREG8zAbI2RPGBww90u_5fi8sWdahpdipOoPzkp7C0,2960 +cffi/_shimmed_dist_utils.py,sha256=Bjj2wm8yZbvFvWEx5AEfmqaqZyZFhYfoyLLQHkXZuao,2230 +cffi/api.py,sha256=alBv6hZQkjpmZplBphdaRn2lPO9-CORs_M7ixabvZWI,42169 +cffi/backend_ctypes.py,sha256=h5ZIzLc6BFVXnGyc9xPqZWUS7qGy7yFSDqXe68Sa8z4,42454 +cffi/cffi_opcode.py,sha256=JDV5l0R0_OadBX_uE7xPPTYtMdmpp8I9UYd6av7aiDU,5731 +cffi/commontypes.py,sha256=7N6zPtCFlvxXMWhHV08psUjdYIK2XgsN3yo5dgua_v4,2805 +cffi/cparser.py,sha256=QUTfmlL-aO-MYR8bFGlvAUHc36OQr7XYLe0WLkGFjRo,44790 +cffi/error.py,sha256=v6xTiS4U0kvDcy4h_BDRo5v39ZQuj-IMRYLv5ETddZs,877 +cffi/ffiplatform.py,sha256=avxFjdikYGJoEtmJO7ewVmwG_VEVl6EZ_WaNhZYCqv4,3584 +cffi/lock.py,sha256=l9TTdwMIMpi6jDkJGnQgE9cvTIR7CAntIJr8EGHt3pY,747 +cffi/model.py,sha256=W30UFQZE73jL5Mx5N81YT77us2W2iJjTm0XYfnwz1cg,21797 +cffi/parse_c_type.h,sha256=OdwQfwM9ktq6vlCB43exFQmxDBtj2MBNdK8LYl15tjw,5976 +cffi/pkgconfig.py,sha256=LP1w7vmWvmKwyqLaU1Z243FOWGNQMrgMUZrvgFuOlco,4374 +cffi/recompiler.py,sha256=78J6lMEEOygXNmjN9-fOFFO3j7eW-iFxSrxfvQb54bY,65509 +cffi/setuptools_ext.py,sha256=0rCwBJ1W7FHWtiMKfNXsSST88V8UXrui5oeXFlDNLG8,9411 +cffi/vengine_cpy.py,sha256=oyQKD23kpE0aChUKA8Jg0e723foPiYzLYEdb-J0MiNs,43881 +cffi/vengine_gen.py,sha256=DUlEIrDiVin1Pnhn1sfoamnS5NLqfJcOdhRoeSNeJRg,26939 +cffi/verifier.py,sha256=oX8jpaohg2Qm3aHcznidAdvrVm5N4sQYG0a3Eo5mIl4,11182 diff --git a/python/cffi-2.0.0.dist-info/WHEEL b/python/cffi-2.0.0.dist-info/WHEEL new file mode 100644 index 000000000..e21e9f2f8 --- /dev/null +++ b/python/cffi-2.0.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: setuptools (80.9.0) +Root-Is-Purelib: false +Tag: cp312-cp312-manylinux_2_17_x86_64 +Tag: cp312-cp312-manylinux2014_x86_64 + diff --git a/python/cffi-2.0.0.dist-info/entry_points.txt b/python/cffi-2.0.0.dist-info/entry_points.txt new file mode 100644 index 000000000..4b0274f23 --- /dev/null +++ b/python/cffi-2.0.0.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[distutils.setup_keywords] +cffi_modules = cffi.setuptools_ext:cffi_modules diff --git a/python/cffi-2.0.0.dist-info/licenses/AUTHORS b/python/cffi-2.0.0.dist-info/licenses/AUTHORS new file mode 100644 index 000000000..370a25d31 --- /dev/null +++ b/python/cffi-2.0.0.dist-info/licenses/AUTHORS @@ -0,0 +1,8 @@ +This package has been mostly done by Armin Rigo with help from +Maciej Fijałkowski. The idea is heavily based (although not directly +copied) from LuaJIT ffi by Mike Pall. + + +Other contributors: + + Google Inc. diff --git a/python/cffi-2.0.0.dist-info/licenses/LICENSE b/python/cffi-2.0.0.dist-info/licenses/LICENSE new file mode 100644 index 000000000..0a1dbfb01 --- /dev/null +++ b/python/cffi-2.0.0.dist-info/licenses/LICENSE @@ -0,0 +1,23 @@ + +Except when otherwise stated (look for LICENSE files in directories or +information at the beginning of each file) all software and +documentation is licensed as follows: + + MIT No Attribution + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without + restriction, including without limitation the rights to use, + copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the + Software is furnished to do so. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. + diff --git a/python/cffi-2.0.0.dist-info/top_level.txt b/python/cffi-2.0.0.dist-info/top_level.txt new file mode 100644 index 000000000..f64577957 --- /dev/null +++ b/python/cffi-2.0.0.dist-info/top_level.txt @@ -0,0 +1,2 @@ +_cffi_backend +cffi diff --git a/python/cffi/__init__.py b/python/cffi/__init__.py new file mode 100644 index 000000000..c99ec3d48 --- /dev/null +++ b/python/cffi/__init__.py @@ -0,0 +1,14 @@ +__all__ = ['FFI', 'VerificationError', 'VerificationMissing', 'CDefError', + 'FFIError'] + +from .api import FFI +from .error import CDefError, FFIError, VerificationError, VerificationMissing +from .error import PkgConfigError + +__version__ = "2.0.0" +__version_info__ = (2, 0, 0) + +# The verifier module file names are based on the CRC32 of a string that +# contains the following version number. It may be older than __version__ +# if nothing is clearly incompatible. +__version_verifier_modules__ = "0.8.6" diff --git a/python/cffi/__pycache__/__init__.cpython-312.pyc b/python/cffi/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..384829f15 Binary files /dev/null and b/python/cffi/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/cffi/__pycache__/_imp_emulation.cpython-312.pyc b/python/cffi/__pycache__/_imp_emulation.cpython-312.pyc new file mode 100644 index 000000000..bae2eb68d Binary files /dev/null and b/python/cffi/__pycache__/_imp_emulation.cpython-312.pyc differ diff --git a/python/cffi/__pycache__/_shimmed_dist_utils.cpython-312.pyc b/python/cffi/__pycache__/_shimmed_dist_utils.cpython-312.pyc new file mode 100644 index 000000000..a22e809e6 Binary files /dev/null and b/python/cffi/__pycache__/_shimmed_dist_utils.cpython-312.pyc differ diff --git a/python/cffi/__pycache__/api.cpython-312.pyc b/python/cffi/__pycache__/api.cpython-312.pyc new file mode 100644 index 000000000..f5b4db561 Binary files /dev/null and b/python/cffi/__pycache__/api.cpython-312.pyc differ diff --git a/python/cffi/__pycache__/backend_ctypes.cpython-312.pyc b/python/cffi/__pycache__/backend_ctypes.cpython-312.pyc new file mode 100644 index 000000000..9018296ae Binary files /dev/null and b/python/cffi/__pycache__/backend_ctypes.cpython-312.pyc differ diff --git a/python/cffi/__pycache__/cffi_opcode.cpython-312.pyc b/python/cffi/__pycache__/cffi_opcode.cpython-312.pyc new file mode 100644 index 000000000..9b5d47730 Binary files /dev/null and b/python/cffi/__pycache__/cffi_opcode.cpython-312.pyc differ diff --git a/python/cffi/__pycache__/commontypes.cpython-312.pyc b/python/cffi/__pycache__/commontypes.cpython-312.pyc new file mode 100644 index 000000000..ab53d1593 Binary files /dev/null and b/python/cffi/__pycache__/commontypes.cpython-312.pyc differ diff --git a/python/cffi/__pycache__/cparser.cpython-312.pyc b/python/cffi/__pycache__/cparser.cpython-312.pyc new file mode 100644 index 000000000..be33afc69 Binary files /dev/null and b/python/cffi/__pycache__/cparser.cpython-312.pyc differ diff --git a/python/cffi/__pycache__/error.cpython-312.pyc b/python/cffi/__pycache__/error.cpython-312.pyc new file mode 100644 index 000000000..93d97c0f9 Binary files /dev/null and b/python/cffi/__pycache__/error.cpython-312.pyc differ diff --git a/python/cffi/__pycache__/ffiplatform.cpython-312.pyc b/python/cffi/__pycache__/ffiplatform.cpython-312.pyc new file mode 100644 index 000000000..da6b2f672 Binary files /dev/null and b/python/cffi/__pycache__/ffiplatform.cpython-312.pyc differ diff --git a/python/cffi/__pycache__/lock.cpython-312.pyc b/python/cffi/__pycache__/lock.cpython-312.pyc new file mode 100644 index 000000000..aa3349f6c Binary files /dev/null and b/python/cffi/__pycache__/lock.cpython-312.pyc differ diff --git a/python/cffi/__pycache__/model.cpython-312.pyc b/python/cffi/__pycache__/model.cpython-312.pyc new file mode 100644 index 000000000..57db0b1ce Binary files /dev/null and b/python/cffi/__pycache__/model.cpython-312.pyc differ diff --git a/python/cffi/__pycache__/pkgconfig.cpython-312.pyc b/python/cffi/__pycache__/pkgconfig.cpython-312.pyc new file mode 100644 index 000000000..dc20bd435 Binary files /dev/null and b/python/cffi/__pycache__/pkgconfig.cpython-312.pyc differ diff --git a/python/cffi/__pycache__/recompiler.cpython-312.pyc b/python/cffi/__pycache__/recompiler.cpython-312.pyc new file mode 100644 index 000000000..f73845f57 Binary files /dev/null and b/python/cffi/__pycache__/recompiler.cpython-312.pyc differ diff --git a/python/cffi/__pycache__/setuptools_ext.cpython-312.pyc b/python/cffi/__pycache__/setuptools_ext.cpython-312.pyc new file mode 100644 index 000000000..fd474b04f Binary files /dev/null and b/python/cffi/__pycache__/setuptools_ext.cpython-312.pyc differ diff --git a/python/cffi/__pycache__/vengine_cpy.cpython-312.pyc b/python/cffi/__pycache__/vengine_cpy.cpython-312.pyc new file mode 100644 index 000000000..762b60a8f Binary files /dev/null and b/python/cffi/__pycache__/vengine_cpy.cpython-312.pyc differ diff --git a/python/cffi/__pycache__/vengine_gen.cpython-312.pyc b/python/cffi/__pycache__/vengine_gen.cpython-312.pyc new file mode 100644 index 000000000..d8bb6cb61 Binary files /dev/null and b/python/cffi/__pycache__/vengine_gen.cpython-312.pyc differ diff --git a/python/cffi/__pycache__/verifier.cpython-312.pyc b/python/cffi/__pycache__/verifier.cpython-312.pyc new file mode 100644 index 000000000..08d40d79e Binary files /dev/null and b/python/cffi/__pycache__/verifier.cpython-312.pyc differ diff --git a/python/cffi/_cffi_errors.h b/python/cffi/_cffi_errors.h new file mode 100644 index 000000000..158e05903 --- /dev/null +++ b/python/cffi/_cffi_errors.h @@ -0,0 +1,149 @@ +#ifndef CFFI_MESSAGEBOX +# ifdef _MSC_VER +# define CFFI_MESSAGEBOX 1 +# else +# define CFFI_MESSAGEBOX 0 +# endif +#endif + + +#if CFFI_MESSAGEBOX +/* Windows only: logic to take the Python-CFFI embedding logic + initialization errors and display them in a background thread + with MessageBox. The idea is that if the whole program closes + as a result of this problem, then likely it is already a console + program and you can read the stderr output in the console too. + If it is not a console program, then it will likely show its own + dialog to complain, or generally not abruptly close, and for this + case the background thread should stay alive. +*/ +static void *volatile _cffi_bootstrap_text; + +static PyObject *_cffi_start_error_capture(void) +{ + PyObject *result = NULL; + PyObject *x, *m, *bi; + + if (InterlockedCompareExchangePointer(&_cffi_bootstrap_text, + (void *)1, NULL) != NULL) + return (PyObject *)1; + + m = PyImport_AddModule("_cffi_error_capture"); + if (m == NULL) + goto error; + + result = PyModule_GetDict(m); + if (result == NULL) + goto error; + +#if PY_MAJOR_VERSION >= 3 + bi = PyImport_ImportModule("builtins"); +#else + bi = PyImport_ImportModule("__builtin__"); +#endif + if (bi == NULL) + goto error; + PyDict_SetItemString(result, "__builtins__", bi); + Py_DECREF(bi); + + x = PyRun_String( + "import sys\n" + "class FileLike:\n" + " def write(self, x):\n" + " try:\n" + " of.write(x)\n" + " except: pass\n" + " self.buf += x\n" + " def flush(self):\n" + " pass\n" + "fl = FileLike()\n" + "fl.buf = ''\n" + "of = sys.stderr\n" + "sys.stderr = fl\n" + "def done():\n" + " sys.stderr = of\n" + " return fl.buf\n", /* make sure the returned value stays alive */ + Py_file_input, + result, result); + Py_XDECREF(x); + + error: + if (PyErr_Occurred()) + { + PyErr_WriteUnraisable(Py_None); + PyErr_Clear(); + } + return result; +} + +#pragma comment(lib, "user32.lib") + +static DWORD WINAPI _cffi_bootstrap_dialog(LPVOID ignored) +{ + Sleep(666); /* may be interrupted if the whole process is closing */ +#if PY_MAJOR_VERSION >= 3 + MessageBoxW(NULL, (wchar_t *)_cffi_bootstrap_text, + L"Python-CFFI error", + MB_OK | MB_ICONERROR); +#else + MessageBoxA(NULL, (char *)_cffi_bootstrap_text, + "Python-CFFI error", + MB_OK | MB_ICONERROR); +#endif + _cffi_bootstrap_text = NULL; + return 0; +} + +static void _cffi_stop_error_capture(PyObject *ecap) +{ + PyObject *s; + void *text; + + if (ecap == (PyObject *)1) + return; + + if (ecap == NULL) + goto error; + + s = PyRun_String("done()", Py_eval_input, ecap, ecap); + if (s == NULL) + goto error; + + /* Show a dialog box, but in a background thread, and + never show multiple dialog boxes at once. */ +#if PY_MAJOR_VERSION >= 3 + text = PyUnicode_AsWideCharString(s, NULL); +#else + text = PyString_AsString(s); +#endif + + _cffi_bootstrap_text = text; + + if (text != NULL) + { + HANDLE h; + h = CreateThread(NULL, 0, _cffi_bootstrap_dialog, + NULL, 0, NULL); + if (h != NULL) + CloseHandle(h); + } + /* decref the string, but it should stay alive as 'fl.buf' + in the small module above. It will really be freed only if + we later get another similar error. So it's a leak of at + most one copy of the small module. That's fine for this + situation which is usually a "fatal error" anyway. */ + Py_DECREF(s); + PyErr_Clear(); + return; + + error: + _cffi_bootstrap_text = NULL; + PyErr_Clear(); +} + +#else + +static PyObject *_cffi_start_error_capture(void) { return NULL; } +static void _cffi_stop_error_capture(PyObject *ecap) { } + +#endif diff --git a/python/cffi/_cffi_include.h b/python/cffi/_cffi_include.h new file mode 100644 index 000000000..908a1d734 --- /dev/null +++ b/python/cffi/_cffi_include.h @@ -0,0 +1,389 @@ +#define _CFFI_ + +/* We try to define Py_LIMITED_API before including Python.h. + + Mess: we can only define it if Py_DEBUG, Py_TRACE_REFS and + Py_REF_DEBUG are not defined. This is a best-effort approximation: + we can learn about Py_DEBUG from pyconfig.h, but it is unclear if + the same works for the other two macros. Py_DEBUG implies them, + but not the other way around. + + The implementation is messy (issue #350): on Windows, with _MSC_VER, + we have to define Py_LIMITED_API even before including pyconfig.h. + In that case, we guess what pyconfig.h will do to the macros above, + and check our guess after the #include. + + Note that on Windows, with CPython 3.x, you need >= 3.5 and virtualenv + version >= 16.0.0. With older versions of either, you don't get a + copy of PYTHON3.DLL in the virtualenv. We can't check the version of + CPython *before* we even include pyconfig.h. ffi.set_source() puts + a ``#define _CFFI_NO_LIMITED_API'' at the start of this file if it is + running on Windows < 3.5, as an attempt at fixing it, but that's + arguably wrong because it may not be the target version of Python. + Still better than nothing I guess. As another workaround, you can + remove the definition of Py_LIMITED_API here. + + See also 'py_limited_api' in cffi/setuptools_ext.py. +*/ +#if !defined(_CFFI_USE_EMBEDDING) && !defined(Py_LIMITED_API) +# ifdef _MSC_VER +# if !defined(_DEBUG) && !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) && !defined(Py_REF_DEBUG) && !defined(_CFFI_NO_LIMITED_API) +# define Py_LIMITED_API +# endif +# include + /* sanity-check: Py_LIMITED_API will cause crashes if any of these + are also defined. Normally, the Python file PC/pyconfig.h does not + cause any of these to be defined, with the exception that _DEBUG + causes Py_DEBUG. Double-check that. */ +# ifdef Py_LIMITED_API +# if defined(Py_DEBUG) +# error "pyconfig.h unexpectedly defines Py_DEBUG, but Py_LIMITED_API is set" +# endif +# if defined(Py_TRACE_REFS) +# error "pyconfig.h unexpectedly defines Py_TRACE_REFS, but Py_LIMITED_API is set" +# endif +# if defined(Py_REF_DEBUG) +# error "pyconfig.h unexpectedly defines Py_REF_DEBUG, but Py_LIMITED_API is set" +# endif +# endif +# else +# include +# if !defined(Py_DEBUG) && !defined(Py_TRACE_REFS) && !defined(Py_REF_DEBUG) && !defined(_CFFI_NO_LIMITED_API) +# define Py_LIMITED_API +# endif +# endif +#endif + +#include +#ifdef __cplusplus +extern "C" { +#endif +#include +#include "parse_c_type.h" + +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py + and cffi/_cffi_include.h */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; + typedef __int8 int_least8_t; + typedef __int16 int_least16_t; + typedef __int32 int_least32_t; + typedef __int64 int_least64_t; + typedef unsigned __int8 uint_least8_t; + typedef unsigned __int16 uint_least16_t; + typedef unsigned __int32 uint_least32_t; + typedef unsigned __int64 uint_least64_t; + typedef __int8 int_fast8_t; + typedef __int16 int_fast16_t; + typedef __int32 int_fast32_t; + typedef __int64 int_fast64_t; + typedef unsigned __int8 uint_fast8_t; + typedef unsigned __int16 uint_fast16_t; + typedef unsigned __int32 uint_fast32_t; + typedef unsigned __int64 uint_fast64_t; + typedef __int64 intmax_t; + typedef unsigned __int64 uintmax_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ +# ifndef __cplusplus + typedef unsigned char _Bool; +# endif +# endif +# define _cffi_float_complex_t _Fcomplex /* include for it */ +# define _cffi_double_complex_t _Dcomplex /* include for it */ +#else +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux) +# include +# endif +# define _cffi_float_complex_t float _Complex +# define _cffi_double_complex_t double _Complex +#endif + +#ifdef __GNUC__ +# define _CFFI_UNUSED_FN __attribute__((unused)) +#else +# define _CFFI_UNUSED_FN /* nothing */ +#endif + +#ifdef __cplusplus +# ifndef _Bool + typedef bool _Bool; /* semi-hackish: C++ has no _Bool; bool is builtin */ +# endif +#endif + +/********** CPython-specific section **********/ +#ifndef PYPY_VERSION + + +#if PY_MAJOR_VERSION >= 3 +# define PyInt_FromLong PyLong_FromLong +#endif + +#define _cffi_from_c_double PyFloat_FromDouble +#define _cffi_from_c_float PyFloat_FromDouble +#define _cffi_from_c_long PyInt_FromLong +#define _cffi_from_c_ulong PyLong_FromUnsignedLong +#define _cffi_from_c_longlong PyLong_FromLongLong +#define _cffi_from_c_ulonglong PyLong_FromUnsignedLongLong +#define _cffi_from_c__Bool PyBool_FromLong + +#define _cffi_to_c_double PyFloat_AsDouble +#define _cffi_to_c_float PyFloat_AsDouble + +#define _cffi_from_c_int(x, type) \ + (((type)-1) > 0 ? /* unsigned */ \ + (sizeof(type) < sizeof(long) ? \ + PyInt_FromLong((long)x) : \ + sizeof(type) == sizeof(long) ? \ + PyLong_FromUnsignedLong((unsigned long)x) : \ + PyLong_FromUnsignedLongLong((unsigned long long)x)) : \ + (sizeof(type) <= sizeof(long) ? \ + PyInt_FromLong((long)x) : \ + PyLong_FromLongLong((long long)x))) + +#define _cffi_to_c_int(o, type) \ + ((type)( \ + sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \ + : (type)_cffi_to_c_i8(o)) : \ + sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \ + : (type)_cffi_to_c_i16(o)) : \ + sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \ + : (type)_cffi_to_c_i32(o)) : \ + sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \ + : (type)_cffi_to_c_i64(o)) : \ + (Py_FatalError("unsupported size for type " #type), (type)0))) + +#define _cffi_to_c_i8 \ + ((int(*)(PyObject *))_cffi_exports[1]) +#define _cffi_to_c_u8 \ + ((int(*)(PyObject *))_cffi_exports[2]) +#define _cffi_to_c_i16 \ + ((int(*)(PyObject *))_cffi_exports[3]) +#define _cffi_to_c_u16 \ + ((int(*)(PyObject *))_cffi_exports[4]) +#define _cffi_to_c_i32 \ + ((int(*)(PyObject *))_cffi_exports[5]) +#define _cffi_to_c_u32 \ + ((unsigned int(*)(PyObject *))_cffi_exports[6]) +#define _cffi_to_c_i64 \ + ((long long(*)(PyObject *))_cffi_exports[7]) +#define _cffi_to_c_u64 \ + ((unsigned long long(*)(PyObject *))_cffi_exports[8]) +#define _cffi_to_c_char \ + ((int(*)(PyObject *))_cffi_exports[9]) +#define _cffi_from_c_pointer \ + ((PyObject *(*)(char *, struct _cffi_ctypedescr *))_cffi_exports[10]) +#define _cffi_to_c_pointer \ + ((char *(*)(PyObject *, struct _cffi_ctypedescr *))_cffi_exports[11]) +#define _cffi_get_struct_layout \ + not used any more +#define _cffi_restore_errno \ + ((void(*)(void))_cffi_exports[13]) +#define _cffi_save_errno \ + ((void(*)(void))_cffi_exports[14]) +#define _cffi_from_c_char \ + ((PyObject *(*)(char))_cffi_exports[15]) +#define _cffi_from_c_deref \ + ((PyObject *(*)(char *, struct _cffi_ctypedescr *))_cffi_exports[16]) +#define _cffi_to_c \ + ((int(*)(char *, struct _cffi_ctypedescr *, PyObject *))_cffi_exports[17]) +#define _cffi_from_c_struct \ + ((PyObject *(*)(char *, struct _cffi_ctypedescr *))_cffi_exports[18]) +#define _cffi_to_c_wchar_t \ + ((_cffi_wchar_t(*)(PyObject *))_cffi_exports[19]) +#define _cffi_from_c_wchar_t \ + ((PyObject *(*)(_cffi_wchar_t))_cffi_exports[20]) +#define _cffi_to_c_long_double \ + ((long double(*)(PyObject *))_cffi_exports[21]) +#define _cffi_to_c__Bool \ + ((_Bool(*)(PyObject *))_cffi_exports[22]) +#define _cffi_prepare_pointer_call_argument \ + ((Py_ssize_t(*)(struct _cffi_ctypedescr *, \ + PyObject *, char **))_cffi_exports[23]) +#define _cffi_convert_array_from_object \ + ((int(*)(char *, struct _cffi_ctypedescr *, PyObject *))_cffi_exports[24]) +#define _CFFI_CPIDX 25 +#define _cffi_call_python \ + ((void(*)(struct _cffi_externpy_s *, char *))_cffi_exports[_CFFI_CPIDX]) +#define _cffi_to_c_wchar3216_t \ + ((int(*)(PyObject *))_cffi_exports[26]) +#define _cffi_from_c_wchar3216_t \ + ((PyObject *(*)(int))_cffi_exports[27]) +#define _CFFI_NUM_EXPORTS 28 + +struct _cffi_ctypedescr; + +static void *_cffi_exports[_CFFI_NUM_EXPORTS]; + +#define _cffi_type(index) ( \ + assert((((uintptr_t)_cffi_types[index]) & 1) == 0), \ + (struct _cffi_ctypedescr *)_cffi_types[index]) + +static PyObject *_cffi_init(const char *module_name, Py_ssize_t version, + const struct _cffi_type_context_s *ctx) +{ + PyObject *module, *o_arg, *new_module; + void *raw[] = { + (void *)module_name, + (void *)version, + (void *)_cffi_exports, + (void *)ctx, + }; + + module = PyImport_ImportModule("_cffi_backend"); + if (module == NULL) + goto failure; + + o_arg = PyLong_FromVoidPtr((void *)raw); + if (o_arg == NULL) + goto failure; + + new_module = PyObject_CallMethod( + module, (char *)"_init_cffi_1_0_external_module", (char *)"O", o_arg); + + Py_DECREF(o_arg); + Py_DECREF(module); + return new_module; + + failure: + Py_XDECREF(module); + return NULL; +} + + +#ifdef HAVE_WCHAR_H +typedef wchar_t _cffi_wchar_t; +#else +typedef uint16_t _cffi_wchar_t; /* same random pick as _cffi_backend.c */ +#endif + +_CFFI_UNUSED_FN static uint16_t _cffi_to_c_char16_t(PyObject *o) +{ + if (sizeof(_cffi_wchar_t) == 2) + return (uint16_t)_cffi_to_c_wchar_t(o); + else + return (uint16_t)_cffi_to_c_wchar3216_t(o); +} + +_CFFI_UNUSED_FN static PyObject *_cffi_from_c_char16_t(uint16_t x) +{ + if (sizeof(_cffi_wchar_t) == 2) + return _cffi_from_c_wchar_t((_cffi_wchar_t)x); + else + return _cffi_from_c_wchar3216_t((int)x); +} + +_CFFI_UNUSED_FN static int _cffi_to_c_char32_t(PyObject *o) +{ + if (sizeof(_cffi_wchar_t) == 4) + return (int)_cffi_to_c_wchar_t(o); + else + return (int)_cffi_to_c_wchar3216_t(o); +} + +_CFFI_UNUSED_FN static PyObject *_cffi_from_c_char32_t(unsigned int x) +{ + if (sizeof(_cffi_wchar_t) == 4) + return _cffi_from_c_wchar_t((_cffi_wchar_t)x); + else + return _cffi_from_c_wchar3216_t((int)x); +} + +union _cffi_union_alignment_u { + unsigned char m_char; + unsigned short m_short; + unsigned int m_int; + unsigned long m_long; + unsigned long long m_longlong; + float m_float; + double m_double; + long double m_longdouble; +}; + +struct _cffi_freeme_s { + struct _cffi_freeme_s *next; + union _cffi_union_alignment_u alignment; +}; + +_CFFI_UNUSED_FN static int +_cffi_convert_array_argument(struct _cffi_ctypedescr *ctptr, PyObject *arg, + char **output_data, Py_ssize_t datasize, + struct _cffi_freeme_s **freeme) +{ + char *p; + if (datasize < 0) + return -1; + + p = *output_data; + if (p == NULL) { + struct _cffi_freeme_s *fp = (struct _cffi_freeme_s *)PyObject_Malloc( + offsetof(struct _cffi_freeme_s, alignment) + (size_t)datasize); + if (fp == NULL) + return -1; + fp->next = *freeme; + *freeme = fp; + p = *output_data = (char *)&fp->alignment; + } + memset((void *)p, 0, (size_t)datasize); + return _cffi_convert_array_from_object(p, ctptr, arg); +} + +_CFFI_UNUSED_FN static void +_cffi_free_array_arguments(struct _cffi_freeme_s *freeme) +{ + do { + void *p = (void *)freeme; + freeme = freeme->next; + PyObject_Free(p); + } while (freeme != NULL); +} + +/********** end CPython-specific section **********/ +#else +_CFFI_UNUSED_FN +static void (*_cffi_call_python_org)(struct _cffi_externpy_s *, char *); +# define _cffi_call_python _cffi_call_python_org +#endif + + +#define _cffi_array_len(array) (sizeof(array) / sizeof((array)[0])) + +#define _cffi_prim_int(size, sign) \ + ((size) == 1 ? ((sign) ? _CFFI_PRIM_INT8 : _CFFI_PRIM_UINT8) : \ + (size) == 2 ? ((sign) ? _CFFI_PRIM_INT16 : _CFFI_PRIM_UINT16) : \ + (size) == 4 ? ((sign) ? _CFFI_PRIM_INT32 : _CFFI_PRIM_UINT32) : \ + (size) == 8 ? ((sign) ? _CFFI_PRIM_INT64 : _CFFI_PRIM_UINT64) : \ + _CFFI__UNKNOWN_PRIM) + +#define _cffi_prim_float(size) \ + ((size) == sizeof(float) ? _CFFI_PRIM_FLOAT : \ + (size) == sizeof(double) ? _CFFI_PRIM_DOUBLE : \ + (size) == sizeof(long double) ? _CFFI__UNKNOWN_LONG_DOUBLE : \ + _CFFI__UNKNOWN_FLOAT_PRIM) + +#define _cffi_check_int(got, got_nonpos, expected) \ + ((got_nonpos) == (expected <= 0) && \ + (got) == (unsigned long long)expected) + +#ifdef MS_WIN32 +# define _cffi_stdcall __stdcall +#else +# define _cffi_stdcall /* nothing */ +#endif + +#ifdef __cplusplus +} +#endif diff --git a/python/cffi/_embedding.h b/python/cffi/_embedding.h new file mode 100644 index 000000000..64c04f67c --- /dev/null +++ b/python/cffi/_embedding.h @@ -0,0 +1,550 @@ + +/***** Support code for embedding *****/ + +#ifdef __cplusplus +extern "C" { +#endif + + +#if defined(_WIN32) +# define CFFI_DLLEXPORT __declspec(dllexport) +#elif defined(__GNUC__) +# define CFFI_DLLEXPORT __attribute__((visibility("default"))) +#else +# define CFFI_DLLEXPORT /* nothing */ +#endif + + +/* There are two global variables of type _cffi_call_python_fnptr: + + * _cffi_call_python, which we declare just below, is the one called + by ``extern "Python"`` implementations. + + * _cffi_call_python_org, which on CPython is actually part of the + _cffi_exports[] array, is the function pointer copied from + _cffi_backend. If _cffi_start_python() fails, then this is set + to NULL; otherwise, it should never be NULL. + + After initialization is complete, both are equal. However, the + first one remains equal to &_cffi_start_and_call_python until the + very end of initialization, when we are (or should be) sure that + concurrent threads also see a completely initialized world, and + only then is it changed. +*/ +#undef _cffi_call_python +typedef void (*_cffi_call_python_fnptr)(struct _cffi_externpy_s *, char *); +static void _cffi_start_and_call_python(struct _cffi_externpy_s *, char *); +static _cffi_call_python_fnptr _cffi_call_python = &_cffi_start_and_call_python; + + +#ifndef _MSC_VER + /* --- Assuming a GCC not infinitely old --- */ +# define cffi_compare_and_swap(l,o,n) __sync_bool_compare_and_swap(l,o,n) +# define cffi_write_barrier() __sync_synchronize() +# if !defined(__amd64__) && !defined(__x86_64__) && \ + !defined(__i386__) && !defined(__i386) +# define cffi_read_barrier() __sync_synchronize() +# else +# define cffi_read_barrier() (void)0 +# endif +#else + /* --- Windows threads version --- */ +# include +# define cffi_compare_and_swap(l,o,n) \ + (InterlockedCompareExchangePointer(l,n,o) == (o)) +# define cffi_write_barrier() InterlockedCompareExchange(&_cffi_dummy,0,0) +# define cffi_read_barrier() (void)0 +static volatile LONG _cffi_dummy; +#endif + +#ifdef WITH_THREAD +# ifndef _MSC_VER +# include + static pthread_mutex_t _cffi_embed_startup_lock; +# else + static CRITICAL_SECTION _cffi_embed_startup_lock; +# endif + static char _cffi_embed_startup_lock_ready = 0; +#endif + +static void _cffi_acquire_reentrant_mutex(void) +{ + static void *volatile lock = NULL; + + while (!cffi_compare_and_swap(&lock, NULL, (void *)1)) { + /* should ideally do a spin loop instruction here, but + hard to do it portably and doesn't really matter I + think: pthread_mutex_init() should be very fast, and + this is only run at start-up anyway. */ + } + +#ifdef WITH_THREAD + if (!_cffi_embed_startup_lock_ready) { +# ifndef _MSC_VER + pthread_mutexattr_t attr; + pthread_mutexattr_init(&attr); + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); + pthread_mutex_init(&_cffi_embed_startup_lock, &attr); +# else + InitializeCriticalSection(&_cffi_embed_startup_lock); +# endif + _cffi_embed_startup_lock_ready = 1; + } +#endif + + while (!cffi_compare_and_swap(&lock, (void *)1, NULL)) + ; + +#ifndef _MSC_VER + pthread_mutex_lock(&_cffi_embed_startup_lock); +#else + EnterCriticalSection(&_cffi_embed_startup_lock); +#endif +} + +static void _cffi_release_reentrant_mutex(void) +{ +#ifndef _MSC_VER + pthread_mutex_unlock(&_cffi_embed_startup_lock); +#else + LeaveCriticalSection(&_cffi_embed_startup_lock); +#endif +} + + +/********** CPython-specific section **********/ +#ifndef PYPY_VERSION + +#include "_cffi_errors.h" + + +#define _cffi_call_python_org _cffi_exports[_CFFI_CPIDX] + +PyMODINIT_FUNC _CFFI_PYTHON_STARTUP_FUNC(void); /* forward */ + +static void _cffi_py_initialize(void) +{ + /* XXX use initsigs=0, which "skips initialization registration of + signal handlers, which might be useful when Python is + embedded" according to the Python docs. But review and think + if it should be a user-controllable setting. + + XXX we should also give a way to write errors to a buffer + instead of to stderr. + + XXX if importing 'site' fails, CPython (any version) calls + exit(). Should we try to work around this behavior here? + */ + Py_InitializeEx(0); +} + +static int _cffi_initialize_python(void) +{ + /* This initializes Python, imports _cffi_backend, and then the + present .dll/.so is set up as a CPython C extension module. + */ + int result; + PyGILState_STATE state; + PyObject *pycode=NULL, *global_dict=NULL, *x; + PyObject *builtins; + + state = PyGILState_Ensure(); + + /* Call the initxxx() function from the present module. It will + create and initialize us as a CPython extension module, instead + of letting the startup Python code do it---it might reimport + the same .dll/.so and get maybe confused on some platforms. + It might also have troubles locating the .dll/.so again for all + I know. + */ + (void)_CFFI_PYTHON_STARTUP_FUNC(); + if (PyErr_Occurred()) + goto error; + + /* Now run the Python code provided to ffi.embedding_init_code(). + */ + pycode = Py_CompileString(_CFFI_PYTHON_STARTUP_CODE, + "", + Py_file_input); + if (pycode == NULL) + goto error; + global_dict = PyDict_New(); + if (global_dict == NULL) + goto error; + builtins = PyEval_GetBuiltins(); + if (builtins == NULL) + goto error; + if (PyDict_SetItemString(global_dict, "__builtins__", builtins) < 0) + goto error; + x = PyEval_EvalCode( +#if PY_MAJOR_VERSION < 3 + (PyCodeObject *) +#endif + pycode, global_dict, global_dict); + if (x == NULL) + goto error; + Py_DECREF(x); + + /* Done! Now if we've been called from + _cffi_start_and_call_python() in an ``extern "Python"``, we can + only hope that the Python code did correctly set up the + corresponding @ffi.def_extern() function. Otherwise, the + general logic of ``extern "Python"`` functions (inside the + _cffi_backend module) will find that the reference is still + missing and print an error. + */ + result = 0; + done: + Py_XDECREF(pycode); + Py_XDECREF(global_dict); + PyGILState_Release(state); + return result; + + error:; + { + /* Print as much information as potentially useful. + Debugging load-time failures with embedding is not fun + */ + PyObject *ecap; + PyObject *exception, *v, *tb, *f, *modules, *mod; + PyErr_Fetch(&exception, &v, &tb); + ecap = _cffi_start_error_capture(); + f = PySys_GetObject((char *)"stderr"); + if (f != NULL && f != Py_None) { + PyFile_WriteString( + "Failed to initialize the Python-CFFI embedding logic:\n\n", f); + } + + if (exception != NULL) { + PyErr_NormalizeException(&exception, &v, &tb); + PyErr_Display(exception, v, tb); + } + Py_XDECREF(exception); + Py_XDECREF(v); + Py_XDECREF(tb); + + if (f != NULL && f != Py_None) { + PyFile_WriteString("\nFrom: " _CFFI_MODULE_NAME + "\ncompiled with cffi version: 2.0.0" + "\n_cffi_backend module: ", f); + modules = PyImport_GetModuleDict(); + mod = PyDict_GetItemString(modules, "_cffi_backend"); + if (mod == NULL) { + PyFile_WriteString("not loaded", f); + } + else { + v = PyObject_GetAttrString(mod, "__file__"); + PyFile_WriteObject(v, f, 0); + Py_XDECREF(v); + } + PyFile_WriteString("\nsys.path: ", f); + PyFile_WriteObject(PySys_GetObject((char *)"path"), f, 0); + PyFile_WriteString("\n\n", f); + } + _cffi_stop_error_capture(ecap); + } + result = -1; + goto done; +} + +#if PY_VERSION_HEX < 0x03080000 +PyAPI_DATA(char *) _PyParser_TokenNames[]; /* from CPython */ +#endif + +static int _cffi_carefully_make_gil(void) +{ + /* This does the basic initialization of Python. It can be called + completely concurrently from unrelated threads. It assumes + that we don't hold the GIL before (if it exists), and we don't + hold it afterwards. + + (What it really does used to be completely different in Python 2 + and Python 3, with the Python 2 solution avoiding the spin-lock + around the Py_InitializeEx() call. However, after recent changes + to CPython 2.7 (issue #358) it no longer works. So we use the + Python 3 solution everywhere.) + + This initializes Python by calling Py_InitializeEx(). + Important: this must not be called concurrently at all. + So we use a global variable as a simple spin lock. This global + variable must be from 'libpythonX.Y.so', not from this + cffi-based extension module, because it must be shared from + different cffi-based extension modules. + + In Python < 3.8, we choose + _PyParser_TokenNames[0] as a completely arbitrary pointer value + that is never written to. The default is to point to the + string "ENDMARKER". We change it temporarily to point to the + next character in that string. (Yes, I know it's REALLY + obscure.) + + In Python >= 3.8, this string array is no longer writable, so + instead we pick PyCapsuleType.tp_version_tag. We can't change + Python < 3.8 because someone might use a mixture of cffi + embedded modules, some of which were compiled before this file + changed. + + In Python >= 3.12, this stopped working because that particular + tp_version_tag gets modified during interpreter startup. It's + arguably a bad idea before 3.12 too, but again we can't change + that because someone might use a mixture of cffi embedded + modules, and no-one reported a bug so far. In Python >= 3.12 + we go instead for PyCapsuleType.tp_as_buffer, which is supposed + to always be NULL. We write to it temporarily a pointer to + a struct full of NULLs, which is semantically the same. + */ + +#ifdef WITH_THREAD +# if PY_VERSION_HEX < 0x03080000 + char *volatile *lock = (char *volatile *)_PyParser_TokenNames; + char *old_value, *locked_value; + + while (1) { /* spin loop */ + old_value = *lock; + locked_value = old_value + 1; + if (old_value[0] == 'E') { + assert(old_value[1] == 'N'); + if (cffi_compare_and_swap(lock, old_value, locked_value)) + break; + } + else { + assert(old_value[0] == 'N'); + /* should ideally do a spin loop instruction here, but + hard to do it portably and doesn't really matter I + think: PyEval_InitThreads() should be very fast, and + this is only run at start-up anyway. */ + } + } +# else +# if PY_VERSION_HEX < 0x030C0000 + int volatile *lock = (int volatile *)&PyCapsule_Type.tp_version_tag; + int old_value, locked_value = -42; + assert(!(PyCapsule_Type.tp_flags & Py_TPFLAGS_HAVE_VERSION_TAG)); +# else + static struct ebp_s { PyBufferProcs buf; int mark; } empty_buffer_procs; + empty_buffer_procs.mark = -42; + PyBufferProcs *volatile *lock = (PyBufferProcs *volatile *) + &PyCapsule_Type.tp_as_buffer; + PyBufferProcs *old_value, *locked_value = &empty_buffer_procs.buf; +# endif + + while (1) { /* spin loop */ + old_value = *lock; + if (old_value == 0) { + if (cffi_compare_and_swap(lock, old_value, locked_value)) + break; + } + else { +# if PY_VERSION_HEX < 0x030C0000 + assert(old_value == locked_value); +# else + /* The pointer should point to a possibly different + empty_buffer_procs from another C extension module */ + assert(((struct ebp_s *)old_value)->mark == -42); +# endif + /* should ideally do a spin loop instruction here, but + hard to do it portably and doesn't really matter I + think: PyEval_InitThreads() should be very fast, and + this is only run at start-up anyway. */ + } + } +# endif +#endif + + /* call Py_InitializeEx() */ + if (!Py_IsInitialized()) { + _cffi_py_initialize(); +#if PY_VERSION_HEX < 0x03070000 + PyEval_InitThreads(); +#endif + PyEval_SaveThread(); /* release the GIL */ + /* the returned tstate must be the one that has been stored into the + autoTLSkey by _PyGILState_Init() called from Py_Initialize(). */ + } + else { +#if PY_VERSION_HEX < 0x03070000 + /* PyEval_InitThreads() is always a no-op from CPython 3.7 */ + PyGILState_STATE state = PyGILState_Ensure(); + PyEval_InitThreads(); + PyGILState_Release(state); +#endif + } + +#ifdef WITH_THREAD + /* release the lock */ + while (!cffi_compare_and_swap(lock, locked_value, old_value)) + ; +#endif + + return 0; +} + +/********** end CPython-specific section **********/ + + +#else + + +/********** PyPy-specific section **********/ + +PyMODINIT_FUNC _CFFI_PYTHON_STARTUP_FUNC(const void *[]); /* forward */ + +static struct _cffi_pypy_init_s { + const char *name; + void *func; /* function pointer */ + const char *code; +} _cffi_pypy_init = { + _CFFI_MODULE_NAME, + _CFFI_PYTHON_STARTUP_FUNC, + _CFFI_PYTHON_STARTUP_CODE, +}; + +extern int pypy_carefully_make_gil(const char *); +extern int pypy_init_embedded_cffi_module(int, struct _cffi_pypy_init_s *); + +static int _cffi_carefully_make_gil(void) +{ + return pypy_carefully_make_gil(_CFFI_MODULE_NAME); +} + +static int _cffi_initialize_python(void) +{ + return pypy_init_embedded_cffi_module(0xB011, &_cffi_pypy_init); +} + +/********** end PyPy-specific section **********/ + + +#endif + + +#ifdef __GNUC__ +__attribute__((noinline)) +#endif +static _cffi_call_python_fnptr _cffi_start_python(void) +{ + /* Delicate logic to initialize Python. This function can be + called multiple times concurrently, e.g. when the process calls + its first ``extern "Python"`` functions in multiple threads at + once. It can also be called recursively, in which case we must + ignore it. We also have to consider what occurs if several + different cffi-based extensions reach this code in parallel + threads---it is a different copy of the code, then, and we + can't have any shared global variable unless it comes from + 'libpythonX.Y.so'. + + Idea: + + * _cffi_carefully_make_gil(): "carefully" call + PyEval_InitThreads() (possibly with Py_InitializeEx() first). + + * then we use a (local) custom lock to make sure that a call to this + cffi-based extension will wait if another call to the *same* + extension is running the initialization in another thread. + It is reentrant, so that a recursive call will not block, but + only one from a different thread. + + * then we grab the GIL and (Python 2) we call Py_InitializeEx(). + At this point, concurrent calls to Py_InitializeEx() are not + possible: we have the GIL. + + * do the rest of the specific initialization, which may + temporarily release the GIL but not the custom lock. + Only release the custom lock when we are done. + */ + static char called = 0; + + if (_cffi_carefully_make_gil() != 0) + return NULL; + + _cffi_acquire_reentrant_mutex(); + + /* Here the GIL exists, but we don't have it. We're only protected + from concurrency by the reentrant mutex. */ + + /* This file only initializes the embedded module once, the first + time this is called, even if there are subinterpreters. */ + if (!called) { + called = 1; /* invoke _cffi_initialize_python() only once, + but don't set '_cffi_call_python' right now, + otherwise concurrent threads won't call + this function at all (we need them to wait) */ + if (_cffi_initialize_python() == 0) { + /* now initialization is finished. Switch to the fast-path. */ + + /* We would like nobody to see the new value of + '_cffi_call_python' without also seeing the rest of the + data initialized. However, this is not possible. But + the new value of '_cffi_call_python' is the function + 'cffi_call_python()' from _cffi_backend. So: */ + cffi_write_barrier(); + /* ^^^ we put a write barrier here, and a corresponding + read barrier at the start of cffi_call_python(). This + ensures that after that read barrier, we see everything + done here before the write barrier. + */ + + assert(_cffi_call_python_org != NULL); + _cffi_call_python = (_cffi_call_python_fnptr)_cffi_call_python_org; + } + else { + /* initialization failed. Reset this to NULL, even if it was + already set to some other value. Future calls to + _cffi_start_python() are still forced to occur, and will + always return NULL from now on. */ + _cffi_call_python_org = NULL; + } + } + + _cffi_release_reentrant_mutex(); + + return (_cffi_call_python_fnptr)_cffi_call_python_org; +} + +static +void _cffi_start_and_call_python(struct _cffi_externpy_s *externpy, char *args) +{ + _cffi_call_python_fnptr fnptr; + int current_err = errno; +#ifdef _MSC_VER + int current_lasterr = GetLastError(); +#endif + fnptr = _cffi_start_python(); + if (fnptr == NULL) { + fprintf(stderr, "function %s() called, but initialization code " + "failed. Returning 0.\n", externpy->name); + memset(args, 0, externpy->size_of_result); + } +#ifdef _MSC_VER + SetLastError(current_lasterr); +#endif + errno = current_err; + + if (fnptr != NULL) + fnptr(externpy, args); +} + + +/* The cffi_start_python() function makes sure Python is initialized + and our cffi module is set up. It can be called manually from the + user C code. The same effect is obtained automatically from any + dll-exported ``extern "Python"`` function. This function returns + -1 if initialization failed, 0 if all is OK. */ +_CFFI_UNUSED_FN +static int cffi_start_python(void) +{ + if (_cffi_call_python == &_cffi_start_and_call_python) { + if (_cffi_start_python() == NULL) + return -1; + } + cffi_read_barrier(); + return 0; +} + +#undef cffi_compare_and_swap +#undef cffi_write_barrier +#undef cffi_read_barrier + +#ifdef __cplusplus +} +#endif diff --git a/python/cffi/_imp_emulation.py b/python/cffi/_imp_emulation.py new file mode 100644 index 000000000..136abdddf --- /dev/null +++ b/python/cffi/_imp_emulation.py @@ -0,0 +1,83 @@ + +try: + # this works on Python < 3.12 + from imp import * + +except ImportError: + # this is a limited emulation for Python >= 3.12. + # Note that this is used only for tests or for the old ffi.verify(). + # This is copied from the source code of Python 3.11. + + from _imp import (acquire_lock, release_lock, + is_builtin, is_frozen) + + from importlib._bootstrap import _load + + from importlib import machinery + import os + import sys + import tokenize + + SEARCH_ERROR = 0 + PY_SOURCE = 1 + PY_COMPILED = 2 + C_EXTENSION = 3 + PY_RESOURCE = 4 + PKG_DIRECTORY = 5 + C_BUILTIN = 6 + PY_FROZEN = 7 + PY_CODERESOURCE = 8 + IMP_HOOK = 9 + + def get_suffixes(): + extensions = [(s, 'rb', C_EXTENSION) + for s in machinery.EXTENSION_SUFFIXES] + source = [(s, 'r', PY_SOURCE) for s in machinery.SOURCE_SUFFIXES] + bytecode = [(s, 'rb', PY_COMPILED) for s in machinery.BYTECODE_SUFFIXES] + return extensions + source + bytecode + + def find_module(name, path=None): + if not isinstance(name, str): + raise TypeError("'name' must be a str, not {}".format(type(name))) + elif not isinstance(path, (type(None), list)): + # Backwards-compatibility + raise RuntimeError("'path' must be None or a list, " + "not {}".format(type(path))) + + if path is None: + if is_builtin(name): + return None, None, ('', '', C_BUILTIN) + elif is_frozen(name): + return None, None, ('', '', PY_FROZEN) + else: + path = sys.path + + for entry in path: + package_directory = os.path.join(entry, name) + for suffix in ['.py', machinery.BYTECODE_SUFFIXES[0]]: + package_file_name = '__init__' + suffix + file_path = os.path.join(package_directory, package_file_name) + if os.path.isfile(file_path): + return None, package_directory, ('', '', PKG_DIRECTORY) + for suffix, mode, type_ in get_suffixes(): + file_name = name + suffix + file_path = os.path.join(entry, file_name) + if os.path.isfile(file_path): + break + else: + continue + break # Break out of outer loop when breaking out of inner loop. + else: + raise ImportError(name, name=name) + + encoding = None + if 'b' not in mode: + with open(file_path, 'rb') as file: + encoding = tokenize.detect_encoding(file.readline)[0] + file = open(file_path, mode, encoding=encoding) + return file, file_path, (suffix, mode, type_) + + def load_dynamic(name, path, file=None): + loader = machinery.ExtensionFileLoader(name, path) + spec = machinery.ModuleSpec(name=name, loader=loader, origin=path) + return _load(spec) diff --git a/python/cffi/_shimmed_dist_utils.py b/python/cffi/_shimmed_dist_utils.py new file mode 100644 index 000000000..c3d231281 --- /dev/null +++ b/python/cffi/_shimmed_dist_utils.py @@ -0,0 +1,45 @@ +""" +Temporary shim module to indirect the bits of distutils we need from setuptools/distutils while providing useful +error messages beyond `No module named 'distutils' on Python >= 3.12, or when setuptools' vendored distutils is broken. + +This is a compromise to avoid a hard-dep on setuptools for Python >= 3.12, since many users don't need runtime compilation support from CFFI. +""" +import sys + +try: + # import setuptools first; this is the most robust way to ensure its embedded distutils is available + # (the .pth shim should usually work, but this is even more robust) + import setuptools +except Exception as ex: + if sys.version_info >= (3, 12): + # Python 3.12 has no built-in distutils to fall back on, so any import problem is fatal + raise Exception("This CFFI feature requires setuptools on Python >= 3.12. The setuptools module is missing or non-functional.") from ex + + # silently ignore on older Pythons (support fallback to stdlib distutils where available) +else: + del setuptools + +try: + # bring in just the bits of distutils we need, whether they really came from setuptools or stdlib-embedded distutils + from distutils import log, sysconfig + from distutils.ccompiler import CCompiler + from distutils.command.build_ext import build_ext + from distutils.core import Distribution, Extension + from distutils.dir_util import mkpath + from distutils.errors import DistutilsSetupError, CompileError, LinkError + from distutils.log import set_threshold, set_verbosity + + if sys.platform == 'win32': + try: + # FUTURE: msvc9compiler module was removed in setuptools 74; consider removing, as it's only used by an ancient patch in `recompiler` + from distutils.msvc9compiler import MSVCCompiler + except ImportError: + MSVCCompiler = None +except Exception as ex: + if sys.version_info >= (3, 12): + raise Exception("This CFFI feature requires setuptools on Python >= 3.12. Please install the setuptools package.") from ex + + # anything older, just let the underlying distutils import error fly + raise Exception("This CFFI feature requires distutils. Please install the distutils or setuptools package.") from ex + +del sys diff --git a/python/cffi/api.py b/python/cffi/api.py new file mode 100644 index 000000000..5a474f3da --- /dev/null +++ b/python/cffi/api.py @@ -0,0 +1,967 @@ +import sys, types +from .lock import allocate_lock +from .error import CDefError +from . import model + +try: + callable +except NameError: + # Python 3.1 + from collections import Callable + callable = lambda x: isinstance(x, Callable) + +try: + basestring +except NameError: + # Python 3.x + basestring = str + +_unspecified = object() + + + +class FFI(object): + r''' + The main top-level class that you instantiate once, or once per module. + + Example usage: + + ffi = FFI() + ffi.cdef(""" + int printf(const char *, ...); + """) + + C = ffi.dlopen(None) # standard library + -or- + C = ffi.verify() # use a C compiler: verify the decl above is right + + C.printf("hello, %s!\n", ffi.new("char[]", "world")) + ''' + + def __init__(self, backend=None): + """Create an FFI instance. The 'backend' argument is used to + select a non-default backend, mostly for tests. + """ + if backend is None: + # You need PyPy (>= 2.0 beta), or a CPython (>= 2.6) with + # _cffi_backend.so compiled. + import _cffi_backend as backend + from . import __version__ + if backend.__version__ != __version__: + # bad version! Try to be as explicit as possible. + if hasattr(backend, '__file__'): + # CPython + raise Exception("Version mismatch: this is the 'cffi' package version %s, located in %r. When we import the top-level '_cffi_backend' extension module, we get version %s, located in %r. The two versions should be equal; check your installation." % ( + __version__, __file__, + backend.__version__, backend.__file__)) + else: + # PyPy + raise Exception("Version mismatch: this is the 'cffi' package version %s, located in %r. This interpreter comes with a built-in '_cffi_backend' module, which is version %s. The two versions should be equal; check your installation." % ( + __version__, __file__, backend.__version__)) + # (If you insist you can also try to pass the option + # 'backend=backend_ctypes.CTypesBackend()', but don't + # rely on it! It's probably not going to work well.) + + from . import cparser + self._backend = backend + self._lock = allocate_lock() + self._parser = cparser.Parser() + self._cached_btypes = {} + self._parsed_types = types.ModuleType('parsed_types').__dict__ + self._new_types = types.ModuleType('new_types').__dict__ + self._function_caches = [] + self._libraries = [] + self._cdefsources = [] + self._included_ffis = [] + self._windows_unicode = None + self._init_once_cache = {} + self._cdef_version = None + self._embedding = None + self._typecache = model.get_typecache(backend) + if hasattr(backend, 'set_ffi'): + backend.set_ffi(self) + for name in list(backend.__dict__): + if name.startswith('RTLD_'): + setattr(self, name, getattr(backend, name)) + # + with self._lock: + self.BVoidP = self._get_cached_btype(model.voidp_type) + self.BCharA = self._get_cached_btype(model.char_array_type) + if isinstance(backend, types.ModuleType): + # _cffi_backend: attach these constants to the class + if not hasattr(FFI, 'NULL'): + FFI.NULL = self.cast(self.BVoidP, 0) + FFI.CData, FFI.CType = backend._get_types() + else: + # ctypes backend: attach these constants to the instance + self.NULL = self.cast(self.BVoidP, 0) + self.CData, self.CType = backend._get_types() + self.buffer = backend.buffer + + def cdef(self, csource, override=False, packed=False, pack=None): + """Parse the given C source. This registers all declared functions, + types, and global variables. The functions and global variables can + then be accessed via either 'ffi.dlopen()' or 'ffi.verify()'. + The types can be used in 'ffi.new()' and other functions. + If 'packed' is specified as True, all structs declared inside this + cdef are packed, i.e. laid out without any field alignment at all. + Alternatively, 'pack' can be a small integer, and requests for + alignment greater than that are ignored (pack=1 is equivalent to + packed=True). + """ + self._cdef(csource, override=override, packed=packed, pack=pack) + + def embedding_api(self, csource, packed=False, pack=None): + self._cdef(csource, packed=packed, pack=pack, dllexport=True) + if self._embedding is None: + self._embedding = '' + + def _cdef(self, csource, override=False, **options): + if not isinstance(csource, str): # unicode, on Python 2 + if not isinstance(csource, basestring): + raise TypeError("cdef() argument must be a string") + csource = csource.encode('ascii') + with self._lock: + self._cdef_version = object() + self._parser.parse(csource, override=override, **options) + self._cdefsources.append(csource) + if override: + for cache in self._function_caches: + cache.clear() + finishlist = self._parser._recomplete + if finishlist: + self._parser._recomplete = [] + for tp in finishlist: + tp.finish_backend_type(self, finishlist) + + def dlopen(self, name, flags=0): + """Load and return a dynamic library identified by 'name'. + The standard C library can be loaded by passing None. + Note that functions and types declared by 'ffi.cdef()' are not + linked to a particular library, just like C headers; in the + library we only look for the actual (untyped) symbols. + """ + if not (isinstance(name, basestring) or + name is None or + isinstance(name, self.CData)): + raise TypeError("dlopen(name): name must be a file name, None, " + "or an already-opened 'void *' handle") + with self._lock: + lib, function_cache = _make_ffi_library(self, name, flags) + self._function_caches.append(function_cache) + self._libraries.append(lib) + return lib + + def dlclose(self, lib): + """Close a library obtained with ffi.dlopen(). After this call, + access to functions or variables from the library will fail + (possibly with a segmentation fault). + """ + type(lib).__cffi_close__(lib) + + def _typeof_locked(self, cdecl): + # call me with the lock! + key = cdecl + if key in self._parsed_types: + return self._parsed_types[key] + # + if not isinstance(cdecl, str): # unicode, on Python 2 + cdecl = cdecl.encode('ascii') + # + type = self._parser.parse_type(cdecl) + really_a_function_type = type.is_raw_function + if really_a_function_type: + type = type.as_function_pointer() + btype = self._get_cached_btype(type) + result = btype, really_a_function_type + self._parsed_types[key] = result + return result + + def _typeof(self, cdecl, consider_function_as_funcptr=False): + # string -> ctype object + try: + result = self._parsed_types[cdecl] + except KeyError: + with self._lock: + result = self._typeof_locked(cdecl) + # + btype, really_a_function_type = result + if really_a_function_type and not consider_function_as_funcptr: + raise CDefError("the type %r is a function type, not a " + "pointer-to-function type" % (cdecl,)) + return btype + + def typeof(self, cdecl): + """Parse the C type given as a string and return the + corresponding object. + It can also be used on 'cdata' instance to get its C type. + """ + if isinstance(cdecl, basestring): + return self._typeof(cdecl) + if isinstance(cdecl, self.CData): + return self._backend.typeof(cdecl) + if isinstance(cdecl, types.BuiltinFunctionType): + res = _builtin_function_type(cdecl) + if res is not None: + return res + if (isinstance(cdecl, types.FunctionType) + and hasattr(cdecl, '_cffi_base_type')): + with self._lock: + return self._get_cached_btype(cdecl._cffi_base_type) + raise TypeError(type(cdecl)) + + def sizeof(self, cdecl): + """Return the size in bytes of the argument. It can be a + string naming a C type, or a 'cdata' instance. + """ + if isinstance(cdecl, basestring): + BType = self._typeof(cdecl) + return self._backend.sizeof(BType) + else: + return self._backend.sizeof(cdecl) + + def alignof(self, cdecl): + """Return the natural alignment size in bytes of the C type + given as a string. + """ + if isinstance(cdecl, basestring): + cdecl = self._typeof(cdecl) + return self._backend.alignof(cdecl) + + def offsetof(self, cdecl, *fields_or_indexes): + """Return the offset of the named field inside the given + structure or array, which must be given as a C type name. + You can give several field names in case of nested structures. + You can also give numeric values which correspond to array + items, in case of an array type. + """ + if isinstance(cdecl, basestring): + cdecl = self._typeof(cdecl) + return self._typeoffsetof(cdecl, *fields_or_indexes)[1] + + def new(self, cdecl, init=None): + """Allocate an instance according to the specified C type and + return a pointer to it. The specified C type must be either a + pointer or an array: ``new('X *')`` allocates an X and returns + a pointer to it, whereas ``new('X[n]')`` allocates an array of + n X'es and returns an array referencing it (which works + mostly like a pointer, like in C). You can also use + ``new('X[]', n)`` to allocate an array of a non-constant + length n. + + The memory is initialized following the rules of declaring a + global variable in C: by default it is zero-initialized, but + an explicit initializer can be given which can be used to + fill all or part of the memory. + + When the returned object goes out of scope, the memory + is freed. In other words the returned object has + ownership of the value of type 'cdecl' that it points to. This + means that the raw data can be used as long as this object is + kept alive, but must not be used for a longer time. Be careful + about that when copying the pointer to the memory somewhere + else, e.g. into another structure. + """ + if isinstance(cdecl, basestring): + cdecl = self._typeof(cdecl) + return self._backend.newp(cdecl, init) + + def new_allocator(self, alloc=None, free=None, + should_clear_after_alloc=True): + """Return a new allocator, i.e. a function that behaves like ffi.new() + but uses the provided low-level 'alloc' and 'free' functions. + + 'alloc' is called with the size as argument. If it returns NULL, a + MemoryError is raised. 'free' is called with the result of 'alloc' + as argument. Both can be either Python function or directly C + functions. If 'free' is None, then no free function is called. + If both 'alloc' and 'free' are None, the default is used. + + If 'should_clear_after_alloc' is set to False, then the memory + returned by 'alloc' is assumed to be already cleared (or you are + fine with garbage); otherwise CFFI will clear it. + """ + compiled_ffi = self._backend.FFI() + allocator = compiled_ffi.new_allocator(alloc, free, + should_clear_after_alloc) + def allocate(cdecl, init=None): + if isinstance(cdecl, basestring): + cdecl = self._typeof(cdecl) + return allocator(cdecl, init) + return allocate + + def cast(self, cdecl, source): + """Similar to a C cast: returns an instance of the named C + type initialized with the given 'source'. The source is + casted between integers or pointers of any type. + """ + if isinstance(cdecl, basestring): + cdecl = self._typeof(cdecl) + return self._backend.cast(cdecl, source) + + def string(self, cdata, maxlen=-1): + """Return a Python string (or unicode string) from the 'cdata'. + If 'cdata' is a pointer or array of characters or bytes, returns + the null-terminated string. The returned string extends until + the first null character, or at most 'maxlen' characters. If + 'cdata' is an array then 'maxlen' defaults to its length. + + If 'cdata' is a pointer or array of wchar_t, returns a unicode + string following the same rules. + + If 'cdata' is a single character or byte or a wchar_t, returns + it as a string or unicode string. + + If 'cdata' is an enum, returns the value of the enumerator as a + string, or 'NUMBER' if the value is out of range. + """ + return self._backend.string(cdata, maxlen) + + def unpack(self, cdata, length): + """Unpack an array of C data of the given length, + returning a Python string/unicode/list. + + If 'cdata' is a pointer to 'char', returns a byte string. + It does not stop at the first null. This is equivalent to: + ffi.buffer(cdata, length)[:] + + If 'cdata' is a pointer to 'wchar_t', returns a unicode string. + 'length' is measured in wchar_t's; it is not the size in bytes. + + If 'cdata' is a pointer to anything else, returns a list of + 'length' items. This is a faster equivalent to: + [cdata[i] for i in range(length)] + """ + return self._backend.unpack(cdata, length) + + #def buffer(self, cdata, size=-1): + # """Return a read-write buffer object that references the raw C data + # pointed to by the given 'cdata'. The 'cdata' must be a pointer or + # an array. Can be passed to functions expecting a buffer, or directly + # manipulated with: + # + # buf[:] get a copy of it in a regular string, or + # buf[idx] as a single character + # buf[:] = ... + # buf[idx] = ... change the content + # """ + # note that 'buffer' is a type, set on this instance by __init__ + + def from_buffer(self, cdecl, python_buffer=_unspecified, + require_writable=False): + """Return a cdata of the given type pointing to the data of the + given Python object, which must support the buffer interface. + Note that this is not meant to be used on the built-in types + str or unicode (you can build 'char[]' arrays explicitly) + but only on objects containing large quantities of raw data + in some other format, like 'array.array' or numpy arrays. + + The first argument is optional and default to 'char[]'. + """ + if python_buffer is _unspecified: + cdecl, python_buffer = self.BCharA, cdecl + elif isinstance(cdecl, basestring): + cdecl = self._typeof(cdecl) + return self._backend.from_buffer(cdecl, python_buffer, + require_writable) + + def memmove(self, dest, src, n): + """ffi.memmove(dest, src, n) copies n bytes of memory from src to dest. + + Like the C function memmove(), the memory areas may overlap; + apart from that it behaves like the C function memcpy(). + + 'src' can be any cdata ptr or array, or any Python buffer object. + 'dest' can be any cdata ptr or array, or a writable Python buffer + object. The size to copy, 'n', is always measured in bytes. + + Unlike other methods, this one supports all Python buffer including + byte strings and bytearrays---but it still does not support + non-contiguous buffers. + """ + return self._backend.memmove(dest, src, n) + + def callback(self, cdecl, python_callable=None, error=None, onerror=None): + """Return a callback object or a decorator making such a + callback object. 'cdecl' must name a C function pointer type. + The callback invokes the specified 'python_callable' (which may + be provided either directly or via a decorator). Important: the + callback object must be manually kept alive for as long as the + callback may be invoked from the C level. + """ + def callback_decorator_wrap(python_callable): + if not callable(python_callable): + raise TypeError("the 'python_callable' argument " + "is not callable") + return self._backend.callback(cdecl, python_callable, + error, onerror) + if isinstance(cdecl, basestring): + cdecl = self._typeof(cdecl, consider_function_as_funcptr=True) + if python_callable is None: + return callback_decorator_wrap # decorator mode + else: + return callback_decorator_wrap(python_callable) # direct mode + + def getctype(self, cdecl, replace_with=''): + """Return a string giving the C type 'cdecl', which may be itself + a string or a object. If 'replace_with' is given, it gives + extra text to append (or insert for more complicated C types), like + a variable name, or '*' to get actually the C type 'pointer-to-cdecl'. + """ + if isinstance(cdecl, basestring): + cdecl = self._typeof(cdecl) + replace_with = replace_with.strip() + if (replace_with.startswith('*') + and '&[' in self._backend.getcname(cdecl, '&')): + replace_with = '(%s)' % replace_with + elif replace_with and not replace_with[0] in '[(': + replace_with = ' ' + replace_with + return self._backend.getcname(cdecl, replace_with) + + def gc(self, cdata, destructor, size=0): + """Return a new cdata object that points to the same + data. Later, when this new cdata object is garbage-collected, + 'destructor(old_cdata_object)' will be called. + + The optional 'size' gives an estimate of the size, used to + trigger the garbage collection more eagerly. So far only used + on PyPy. It tells the GC that the returned object keeps alive + roughly 'size' bytes of external memory. + """ + return self._backend.gcp(cdata, destructor, size) + + def _get_cached_btype(self, type): + assert self._lock.acquire(False) is False + # call me with the lock! + try: + BType = self._cached_btypes[type] + except KeyError: + finishlist = [] + BType = type.get_cached_btype(self, finishlist) + for type in finishlist: + type.finish_backend_type(self, finishlist) + return BType + + def verify(self, source='', tmpdir=None, **kwargs): + """Verify that the current ffi signatures compile on this + machine, and return a dynamic library object. The dynamic + library can be used to call functions and access global + variables declared in this 'ffi'. The library is compiled + by the C compiler: it gives you C-level API compatibility + (including calling macros). This is unlike 'ffi.dlopen()', + which requires binary compatibility in the signatures. + """ + from .verifier import Verifier, _caller_dir_pycache + # + # If set_unicode(True) was called, insert the UNICODE and + # _UNICODE macro declarations + if self._windows_unicode: + self._apply_windows_unicode(kwargs) + # + # Set the tmpdir here, and not in Verifier.__init__: it picks + # up the caller's directory, which we want to be the caller of + # ffi.verify(), as opposed to the caller of Veritier(). + tmpdir = tmpdir or _caller_dir_pycache() + # + # Make a Verifier() and use it to load the library. + self.verifier = Verifier(self, source, tmpdir, **kwargs) + lib = self.verifier.load_library() + # + # Save the loaded library for keep-alive purposes, even + # if the caller doesn't keep it alive itself (it should). + self._libraries.append(lib) + return lib + + def _get_errno(self): + return self._backend.get_errno() + def _set_errno(self, errno): + self._backend.set_errno(errno) + errno = property(_get_errno, _set_errno, None, + "the value of 'errno' from/to the C calls") + + def getwinerror(self, code=-1): + return self._backend.getwinerror(code) + + def _pointer_to(self, ctype): + with self._lock: + return model.pointer_cache(self, ctype) + + def addressof(self, cdata, *fields_or_indexes): + """Return the address of a . + If 'fields_or_indexes' are given, returns the address of that + field or array item in the structure or array, recursively in + case of nested structures. + """ + try: + ctype = self._backend.typeof(cdata) + except TypeError: + if '__addressof__' in type(cdata).__dict__: + return type(cdata).__addressof__(cdata, *fields_or_indexes) + raise + if fields_or_indexes: + ctype, offset = self._typeoffsetof(ctype, *fields_or_indexes) + else: + if ctype.kind == "pointer": + raise TypeError("addressof(pointer)") + offset = 0 + ctypeptr = self._pointer_to(ctype) + return self._backend.rawaddressof(ctypeptr, cdata, offset) + + def _typeoffsetof(self, ctype, field_or_index, *fields_or_indexes): + ctype, offset = self._backend.typeoffsetof(ctype, field_or_index) + for field1 in fields_or_indexes: + ctype, offset1 = self._backend.typeoffsetof(ctype, field1, 1) + offset += offset1 + return ctype, offset + + def include(self, ffi_to_include): + """Includes the typedefs, structs, unions and enums defined + in another FFI instance. Usage is similar to a #include in C, + where a part of the program might include types defined in + another part for its own usage. Note that the include() + method has no effect on functions, constants and global + variables, which must anyway be accessed directly from the + lib object returned by the original FFI instance. + """ + if not isinstance(ffi_to_include, FFI): + raise TypeError("ffi.include() expects an argument that is also of" + " type cffi.FFI, not %r" % ( + type(ffi_to_include).__name__,)) + if ffi_to_include is self: + raise ValueError("self.include(self)") + with ffi_to_include._lock: + with self._lock: + self._parser.include(ffi_to_include._parser) + self._cdefsources.append('[') + self._cdefsources.extend(ffi_to_include._cdefsources) + self._cdefsources.append(']') + self._included_ffis.append(ffi_to_include) + + def new_handle(self, x): + return self._backend.newp_handle(self.BVoidP, x) + + def from_handle(self, x): + return self._backend.from_handle(x) + + def release(self, x): + self._backend.release(x) + + def set_unicode(self, enabled_flag): + """Windows: if 'enabled_flag' is True, enable the UNICODE and + _UNICODE defines in C, and declare the types like TCHAR and LPTCSTR + to be (pointers to) wchar_t. If 'enabled_flag' is False, + declare these types to be (pointers to) plain 8-bit characters. + This is mostly for backward compatibility; you usually want True. + """ + if self._windows_unicode is not None: + raise ValueError("set_unicode() can only be called once") + enabled_flag = bool(enabled_flag) + if enabled_flag: + self.cdef("typedef wchar_t TBYTE;" + "typedef wchar_t TCHAR;" + "typedef const wchar_t *LPCTSTR;" + "typedef const wchar_t *PCTSTR;" + "typedef wchar_t *LPTSTR;" + "typedef wchar_t *PTSTR;" + "typedef TBYTE *PTBYTE;" + "typedef TCHAR *PTCHAR;") + else: + self.cdef("typedef char TBYTE;" + "typedef char TCHAR;" + "typedef const char *LPCTSTR;" + "typedef const char *PCTSTR;" + "typedef char *LPTSTR;" + "typedef char *PTSTR;" + "typedef TBYTE *PTBYTE;" + "typedef TCHAR *PTCHAR;") + self._windows_unicode = enabled_flag + + def _apply_windows_unicode(self, kwds): + defmacros = kwds.get('define_macros', ()) + if not isinstance(defmacros, (list, tuple)): + raise TypeError("'define_macros' must be a list or tuple") + defmacros = list(defmacros) + [('UNICODE', '1'), + ('_UNICODE', '1')] + kwds['define_macros'] = defmacros + + def _apply_embedding_fix(self, kwds): + # must include an argument like "-lpython2.7" for the compiler + def ensure(key, value): + lst = kwds.setdefault(key, []) + if value not in lst: + lst.append(value) + # + if '__pypy__' in sys.builtin_module_names: + import os + if sys.platform == "win32": + # we need 'libpypy-c.lib'. Current distributions of + # pypy (>= 4.1) contain it as 'libs/python27.lib'. + pythonlib = "python{0[0]}{0[1]}".format(sys.version_info) + if hasattr(sys, 'prefix'): + ensure('library_dirs', os.path.join(sys.prefix, 'libs')) + else: + # we need 'libpypy-c.{so,dylib}', which should be by + # default located in 'sys.prefix/bin' for installed + # systems. + if sys.version_info < (3,): + pythonlib = "pypy-c" + else: + pythonlib = "pypy3-c" + if hasattr(sys, 'prefix'): + ensure('library_dirs', os.path.join(sys.prefix, 'bin')) + # On uninstalled pypy's, the libpypy-c is typically found in + # .../pypy/goal/. + if hasattr(sys, 'prefix'): + ensure('library_dirs', os.path.join(sys.prefix, 'pypy', 'goal')) + else: + if sys.platform == "win32": + template = "python%d%d" + if hasattr(sys, 'gettotalrefcount'): + template += '_d' + else: + try: + import sysconfig + except ImportError: # 2.6 + from cffi._shimmed_dist_utils import sysconfig + template = "python%d.%d" + if sysconfig.get_config_var('DEBUG_EXT'): + template += sysconfig.get_config_var('DEBUG_EXT') + pythonlib = (template % + (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) + if hasattr(sys, 'abiflags'): + pythonlib += sys.abiflags + ensure('libraries', pythonlib) + if sys.platform == "win32": + ensure('extra_link_args', '/MANIFEST') + + def set_source(self, module_name, source, source_extension='.c', **kwds): + import os + if hasattr(self, '_assigned_source'): + raise ValueError("set_source() cannot be called several times " + "per ffi object") + if not isinstance(module_name, basestring): + raise TypeError("'module_name' must be a string") + if os.sep in module_name or (os.altsep and os.altsep in module_name): + raise ValueError("'module_name' must not contain '/': use a dotted " + "name to make a 'package.module' location") + self._assigned_source = (str(module_name), source, + source_extension, kwds) + + def set_source_pkgconfig(self, module_name, pkgconfig_libs, source, + source_extension='.c', **kwds): + from . import pkgconfig + if not isinstance(pkgconfig_libs, list): + raise TypeError("the pkgconfig_libs argument must be a list " + "of package names") + kwds2 = pkgconfig.flags_from_pkgconfig(pkgconfig_libs) + pkgconfig.merge_flags(kwds, kwds2) + self.set_source(module_name, source, source_extension, **kwds) + + def distutils_extension(self, tmpdir='build', verbose=True): + from cffi._shimmed_dist_utils import mkpath + from .recompiler import recompile + # + if not hasattr(self, '_assigned_source'): + if hasattr(self, 'verifier'): # fallback, 'tmpdir' ignored + return self.verifier.get_extension() + raise ValueError("set_source() must be called before" + " distutils_extension()") + module_name, source, source_extension, kwds = self._assigned_source + if source is None: + raise TypeError("distutils_extension() is only for C extension " + "modules, not for dlopen()-style pure Python " + "modules") + mkpath(tmpdir) + ext, updated = recompile(self, module_name, + source, tmpdir=tmpdir, extradir=tmpdir, + source_extension=source_extension, + call_c_compiler=False, **kwds) + if verbose: + if updated: + sys.stderr.write("regenerated: %r\n" % (ext.sources[0],)) + else: + sys.stderr.write("not modified: %r\n" % (ext.sources[0],)) + return ext + + def emit_c_code(self, filename): + from .recompiler import recompile + # + if not hasattr(self, '_assigned_source'): + raise ValueError("set_source() must be called before emit_c_code()") + module_name, source, source_extension, kwds = self._assigned_source + if source is None: + raise TypeError("emit_c_code() is only for C extension modules, " + "not for dlopen()-style pure Python modules") + recompile(self, module_name, source, + c_file=filename, call_c_compiler=False, + uses_ffiplatform=False, **kwds) + + def emit_python_code(self, filename): + from .recompiler import recompile + # + if not hasattr(self, '_assigned_source'): + raise ValueError("set_source() must be called before emit_c_code()") + module_name, source, source_extension, kwds = self._assigned_source + if source is not None: + raise TypeError("emit_python_code() is only for dlopen()-style " + "pure Python modules, not for C extension modules") + recompile(self, module_name, source, + c_file=filename, call_c_compiler=False, + uses_ffiplatform=False, **kwds) + + def compile(self, tmpdir='.', verbose=0, target=None, debug=None): + """The 'target' argument gives the final file name of the + compiled DLL. Use '*' to force distutils' choice, suitable for + regular CPython C API modules. Use a file name ending in '.*' + to ask for the system's default extension for dynamic libraries + (.so/.dll/.dylib). + + The default is '*' when building a non-embedded C API extension, + and (module_name + '.*') when building an embedded library. + """ + from .recompiler import recompile + # + if not hasattr(self, '_assigned_source'): + raise ValueError("set_source() must be called before compile()") + module_name, source, source_extension, kwds = self._assigned_source + return recompile(self, module_name, source, tmpdir=tmpdir, + target=target, source_extension=source_extension, + compiler_verbose=verbose, debug=debug, **kwds) + + def init_once(self, func, tag): + # Read _init_once_cache[tag], which is either (False, lock) if + # we're calling the function now in some thread, or (True, result). + # Don't call setdefault() in most cases, to avoid allocating and + # immediately freeing a lock; but still use setdefaut() to avoid + # races. + try: + x = self._init_once_cache[tag] + except KeyError: + x = self._init_once_cache.setdefault(tag, (False, allocate_lock())) + # Common case: we got (True, result), so we return the result. + if x[0]: + return x[1] + # Else, it's a lock. Acquire it to serialize the following tests. + with x[1]: + # Read again from _init_once_cache the current status. + x = self._init_once_cache[tag] + if x[0]: + return x[1] + # Call the function and store the result back. + result = func() + self._init_once_cache[tag] = (True, result) + return result + + def embedding_init_code(self, pysource): + if self._embedding: + raise ValueError("embedding_init_code() can only be called once") + # fix 'pysource' before it gets dumped into the C file: + # - remove empty lines at the beginning, so it starts at "line 1" + # - dedent, if all non-empty lines are indented + # - check for SyntaxErrors + import re + match = re.match(r'\s*\n', pysource) + if match: + pysource = pysource[match.end():] + lines = pysource.splitlines() or [''] + prefix = re.match(r'\s*', lines[0]).group() + for i in range(1, len(lines)): + line = lines[i] + if line.rstrip(): + while not line.startswith(prefix): + prefix = prefix[:-1] + i = len(prefix) + lines = [line[i:]+'\n' for line in lines] + pysource = ''.join(lines) + # + compile(pysource, "cffi_init", "exec") + # + self._embedding = pysource + + def def_extern(self, *args, **kwds): + raise ValueError("ffi.def_extern() is only available on API-mode FFI " + "objects") + + def list_types(self): + """Returns the user type names known to this FFI instance. + This returns a tuple containing three lists of names: + (typedef_names, names_of_structs, names_of_unions) + """ + typedefs = [] + structs = [] + unions = [] + for key in self._parser._declarations: + if key.startswith('typedef '): + typedefs.append(key[8:]) + elif key.startswith('struct '): + structs.append(key[7:]) + elif key.startswith('union '): + unions.append(key[6:]) + typedefs.sort() + structs.sort() + unions.sort() + return (typedefs, structs, unions) + + +def _load_backend_lib(backend, name, flags): + import os + if not isinstance(name, basestring): + if sys.platform != "win32" or name is not None: + return backend.load_library(name, flags) + name = "c" # Windows: load_library(None) fails, but this works + # on Python 2 (backward compatibility hack only) + first_error = None + if '.' in name or '/' in name or os.sep in name: + try: + return backend.load_library(name, flags) + except OSError as e: + first_error = e + import ctypes.util + path = ctypes.util.find_library(name) + if path is None: + if name == "c" and sys.platform == "win32" and sys.version_info >= (3,): + raise OSError("dlopen(None) cannot work on Windows for Python 3 " + "(see http://bugs.python.org/issue23606)") + msg = ("ctypes.util.find_library() did not manage " + "to locate a library called %r" % (name,)) + if first_error is not None: + msg = "%s. Additionally, %s" % (first_error, msg) + raise OSError(msg) + return backend.load_library(path, flags) + +def _make_ffi_library(ffi, libname, flags): + backend = ffi._backend + backendlib = _load_backend_lib(backend, libname, flags) + # + def accessor_function(name): + key = 'function ' + name + tp, _ = ffi._parser._declarations[key] + BType = ffi._get_cached_btype(tp) + value = backendlib.load_function(BType, name) + library.__dict__[name] = value + # + def accessor_variable(name): + key = 'variable ' + name + tp, _ = ffi._parser._declarations[key] + BType = ffi._get_cached_btype(tp) + read_variable = backendlib.read_variable + write_variable = backendlib.write_variable + setattr(FFILibrary, name, property( + lambda self: read_variable(BType, name), + lambda self, value: write_variable(BType, name, value))) + # + def addressof_var(name): + try: + return addr_variables[name] + except KeyError: + with ffi._lock: + if name not in addr_variables: + key = 'variable ' + name + tp, _ = ffi._parser._declarations[key] + BType = ffi._get_cached_btype(tp) + if BType.kind != 'array': + BType = model.pointer_cache(ffi, BType) + p = backendlib.load_function(BType, name) + addr_variables[name] = p + return addr_variables[name] + # + def accessor_constant(name): + raise NotImplementedError("non-integer constant '%s' cannot be " + "accessed from a dlopen() library" % (name,)) + # + def accessor_int_constant(name): + library.__dict__[name] = ffi._parser._int_constants[name] + # + accessors = {} + accessors_version = [False] + addr_variables = {} + # + def update_accessors(): + if accessors_version[0] is ffi._cdef_version: + return + # + for key, (tp, _) in ffi._parser._declarations.items(): + if not isinstance(tp, model.EnumType): + tag, name = key.split(' ', 1) + if tag == 'function': + accessors[name] = accessor_function + elif tag == 'variable': + accessors[name] = accessor_variable + elif tag == 'constant': + accessors[name] = accessor_constant + else: + for i, enumname in enumerate(tp.enumerators): + def accessor_enum(name, tp=tp, i=i): + tp.check_not_partial() + library.__dict__[name] = tp.enumvalues[i] + accessors[enumname] = accessor_enum + for name in ffi._parser._int_constants: + accessors.setdefault(name, accessor_int_constant) + accessors_version[0] = ffi._cdef_version + # + def make_accessor(name): + with ffi._lock: + if name in library.__dict__ or name in FFILibrary.__dict__: + return # added by another thread while waiting for the lock + if name not in accessors: + update_accessors() + if name not in accessors: + raise AttributeError(name) + accessors[name](name) + # + class FFILibrary(object): + def __getattr__(self, name): + make_accessor(name) + return getattr(self, name) + def __setattr__(self, name, value): + try: + property = getattr(self.__class__, name) + except AttributeError: + make_accessor(name) + setattr(self, name, value) + else: + property.__set__(self, value) + def __dir__(self): + with ffi._lock: + update_accessors() + return accessors.keys() + def __addressof__(self, name): + if name in library.__dict__: + return library.__dict__[name] + if name in FFILibrary.__dict__: + return addressof_var(name) + make_accessor(name) + if name in library.__dict__: + return library.__dict__[name] + if name in FFILibrary.__dict__: + return addressof_var(name) + raise AttributeError("cffi library has no function or " + "global variable named '%s'" % (name,)) + def __cffi_close__(self): + backendlib.close_lib() + self.__dict__.clear() + # + if isinstance(libname, basestring): + try: + if not isinstance(libname, str): # unicode, on Python 2 + libname = libname.encode('utf-8') + FFILibrary.__name__ = 'FFILibrary_%s' % libname + except UnicodeError: + pass + library = FFILibrary() + return library, library.__dict__ + +def _builtin_function_type(func): + # a hack to make at least ffi.typeof(builtin_function) work, + # if the builtin function was obtained by 'vengine_cpy'. + import sys + try: + module = sys.modules[func.__module__] + ffi = module._cffi_original_ffi + types_of_builtin_funcs = module._cffi_types_of_builtin_funcs + tp = types_of_builtin_funcs[func] + except (KeyError, AttributeError, TypeError): + return None + else: + with ffi._lock: + return ffi._get_cached_btype(tp) diff --git a/python/cffi/backend_ctypes.py b/python/cffi/backend_ctypes.py new file mode 100644 index 000000000..e7956a79c --- /dev/null +++ b/python/cffi/backend_ctypes.py @@ -0,0 +1,1121 @@ +import ctypes, ctypes.util, operator, sys +from . import model + +if sys.version_info < (3,): + bytechr = chr +else: + unicode = str + long = int + xrange = range + bytechr = lambda num: bytes([num]) + +class CTypesType(type): + pass + +class CTypesData(object): + __metaclass__ = CTypesType + __slots__ = ['__weakref__'] + __name__ = '' + + def __init__(self, *args): + raise TypeError("cannot instantiate %r" % (self.__class__,)) + + @classmethod + def _newp(cls, init): + raise TypeError("expected a pointer or array ctype, got '%s'" + % (cls._get_c_name(),)) + + @staticmethod + def _to_ctypes(value): + raise TypeError + + @classmethod + def _arg_to_ctypes(cls, *value): + try: + ctype = cls._ctype + except AttributeError: + raise TypeError("cannot create an instance of %r" % (cls,)) + if value: + res = cls._to_ctypes(*value) + if not isinstance(res, ctype): + res = cls._ctype(res) + else: + res = cls._ctype() + return res + + @classmethod + def _create_ctype_obj(cls, init): + if init is None: + return cls._arg_to_ctypes() + else: + return cls._arg_to_ctypes(init) + + @staticmethod + def _from_ctypes(ctypes_value): + raise TypeError + + @classmethod + def _get_c_name(cls, replace_with=''): + return cls._reftypename.replace(' &', replace_with) + + @classmethod + def _fix_class(cls): + cls.__name__ = 'CData<%s>' % (cls._get_c_name(),) + cls.__qualname__ = 'CData<%s>' % (cls._get_c_name(),) + cls.__module__ = 'ffi' + + def _get_own_repr(self): + raise NotImplementedError + + def _addr_repr(self, address): + if address == 0: + return 'NULL' + else: + if address < 0: + address += 1 << (8*ctypes.sizeof(ctypes.c_void_p)) + return '0x%x' % address + + def __repr__(self, c_name=None): + own = self._get_own_repr() + return '' % (c_name or self._get_c_name(), own) + + def _convert_to_address(self, BClass): + if BClass is None: + raise TypeError("cannot convert %r to an address" % ( + self._get_c_name(),)) + else: + raise TypeError("cannot convert %r to %r" % ( + self._get_c_name(), BClass._get_c_name())) + + @classmethod + def _get_size(cls): + return ctypes.sizeof(cls._ctype) + + def _get_size_of_instance(self): + return ctypes.sizeof(self._ctype) + + @classmethod + def _cast_from(cls, source): + raise TypeError("cannot cast to %r" % (cls._get_c_name(),)) + + def _cast_to_integer(self): + return self._convert_to_address(None) + + @classmethod + def _alignment(cls): + return ctypes.alignment(cls._ctype) + + def __iter__(self): + raise TypeError("cdata %r does not support iteration" % ( + self._get_c_name()),) + + def _make_cmp(name): + cmpfunc = getattr(operator, name) + def cmp(self, other): + v_is_ptr = not isinstance(self, CTypesGenericPrimitive) + w_is_ptr = (isinstance(other, CTypesData) and + not isinstance(other, CTypesGenericPrimitive)) + if v_is_ptr and w_is_ptr: + return cmpfunc(self._convert_to_address(None), + other._convert_to_address(None)) + elif v_is_ptr or w_is_ptr: + return NotImplemented + else: + if isinstance(self, CTypesGenericPrimitive): + self = self._value + if isinstance(other, CTypesGenericPrimitive): + other = other._value + return cmpfunc(self, other) + cmp.func_name = name + return cmp + + __eq__ = _make_cmp('__eq__') + __ne__ = _make_cmp('__ne__') + __lt__ = _make_cmp('__lt__') + __le__ = _make_cmp('__le__') + __gt__ = _make_cmp('__gt__') + __ge__ = _make_cmp('__ge__') + + def __hash__(self): + return hash(self._convert_to_address(None)) + + def _to_string(self, maxlen): + raise TypeError("string(): %r" % (self,)) + + +class CTypesGenericPrimitive(CTypesData): + __slots__ = [] + + def __hash__(self): + return hash(self._value) + + def _get_own_repr(self): + return repr(self._from_ctypes(self._value)) + + +class CTypesGenericArray(CTypesData): + __slots__ = [] + + @classmethod + def _newp(cls, init): + return cls(init) + + def __iter__(self): + for i in xrange(len(self)): + yield self[i] + + def _get_own_repr(self): + return self._addr_repr(ctypes.addressof(self._blob)) + + +class CTypesGenericPtr(CTypesData): + __slots__ = ['_address', '_as_ctype_ptr'] + _automatic_casts = False + kind = "pointer" + + @classmethod + def _newp(cls, init): + return cls(init) + + @classmethod + def _cast_from(cls, source): + if source is None: + address = 0 + elif isinstance(source, CTypesData): + address = source._cast_to_integer() + elif isinstance(source, (int, long)): + address = source + else: + raise TypeError("bad type for cast to %r: %r" % + (cls, type(source).__name__)) + return cls._new_pointer_at(address) + + @classmethod + def _new_pointer_at(cls, address): + self = cls.__new__(cls) + self._address = address + self._as_ctype_ptr = ctypes.cast(address, cls._ctype) + return self + + def _get_own_repr(self): + try: + return self._addr_repr(self._address) + except AttributeError: + return '???' + + def _cast_to_integer(self): + return self._address + + def __nonzero__(self): + return bool(self._address) + __bool__ = __nonzero__ + + @classmethod + def _to_ctypes(cls, value): + if not isinstance(value, CTypesData): + raise TypeError("unexpected %s object" % type(value).__name__) + address = value._convert_to_address(cls) + return ctypes.cast(address, cls._ctype) + + @classmethod + def _from_ctypes(cls, ctypes_ptr): + address = ctypes.cast(ctypes_ptr, ctypes.c_void_p).value or 0 + return cls._new_pointer_at(address) + + @classmethod + def _initialize(cls, ctypes_ptr, value): + if value: + ctypes_ptr.contents = cls._to_ctypes(value).contents + + def _convert_to_address(self, BClass): + if (BClass in (self.__class__, None) or BClass._automatic_casts + or self._automatic_casts): + return self._address + else: + return CTypesData._convert_to_address(self, BClass) + + +class CTypesBaseStructOrUnion(CTypesData): + __slots__ = ['_blob'] + + @classmethod + def _create_ctype_obj(cls, init): + # may be overridden + raise TypeError("cannot instantiate opaque type %s" % (cls,)) + + def _get_own_repr(self): + return self._addr_repr(ctypes.addressof(self._blob)) + + @classmethod + def _offsetof(cls, fieldname): + return getattr(cls._ctype, fieldname).offset + + def _convert_to_address(self, BClass): + if getattr(BClass, '_BItem', None) is self.__class__: + return ctypes.addressof(self._blob) + else: + return CTypesData._convert_to_address(self, BClass) + + @classmethod + def _from_ctypes(cls, ctypes_struct_or_union): + self = cls.__new__(cls) + self._blob = ctypes_struct_or_union + return self + + @classmethod + def _to_ctypes(cls, value): + return value._blob + + def __repr__(self, c_name=None): + return CTypesData.__repr__(self, c_name or self._get_c_name(' &')) + + +class CTypesBackend(object): + + PRIMITIVE_TYPES = { + 'char': ctypes.c_char, + 'short': ctypes.c_short, + 'int': ctypes.c_int, + 'long': ctypes.c_long, + 'long long': ctypes.c_longlong, + 'signed char': ctypes.c_byte, + 'unsigned char': ctypes.c_ubyte, + 'unsigned short': ctypes.c_ushort, + 'unsigned int': ctypes.c_uint, + 'unsigned long': ctypes.c_ulong, + 'unsigned long long': ctypes.c_ulonglong, + 'float': ctypes.c_float, + 'double': ctypes.c_double, + '_Bool': ctypes.c_bool, + } + + for _name in ['unsigned long long', 'unsigned long', + 'unsigned int', 'unsigned short', 'unsigned char']: + _size = ctypes.sizeof(PRIMITIVE_TYPES[_name]) + PRIMITIVE_TYPES['uint%d_t' % (8*_size)] = PRIMITIVE_TYPES[_name] + if _size == ctypes.sizeof(ctypes.c_void_p): + PRIMITIVE_TYPES['uintptr_t'] = PRIMITIVE_TYPES[_name] + if _size == ctypes.sizeof(ctypes.c_size_t): + PRIMITIVE_TYPES['size_t'] = PRIMITIVE_TYPES[_name] + + for _name in ['long long', 'long', 'int', 'short', 'signed char']: + _size = ctypes.sizeof(PRIMITIVE_TYPES[_name]) + PRIMITIVE_TYPES['int%d_t' % (8*_size)] = PRIMITIVE_TYPES[_name] + if _size == ctypes.sizeof(ctypes.c_void_p): + PRIMITIVE_TYPES['intptr_t'] = PRIMITIVE_TYPES[_name] + PRIMITIVE_TYPES['ptrdiff_t'] = PRIMITIVE_TYPES[_name] + if _size == ctypes.sizeof(ctypes.c_size_t): + PRIMITIVE_TYPES['ssize_t'] = PRIMITIVE_TYPES[_name] + + + def __init__(self): + self.RTLD_LAZY = 0 # not supported anyway by ctypes + self.RTLD_NOW = 0 + self.RTLD_GLOBAL = ctypes.RTLD_GLOBAL + self.RTLD_LOCAL = ctypes.RTLD_LOCAL + + def set_ffi(self, ffi): + self.ffi = ffi + + def _get_types(self): + return CTypesData, CTypesType + + def load_library(self, path, flags=0): + cdll = ctypes.CDLL(path, flags) + return CTypesLibrary(self, cdll) + + def new_void_type(self): + class CTypesVoid(CTypesData): + __slots__ = [] + _reftypename = 'void &' + @staticmethod + def _from_ctypes(novalue): + return None + @staticmethod + def _to_ctypes(novalue): + if novalue is not None: + raise TypeError("None expected, got %s object" % + (type(novalue).__name__,)) + return None + CTypesVoid._fix_class() + return CTypesVoid + + def new_primitive_type(self, name): + if name == 'wchar_t': + raise NotImplementedError(name) + ctype = self.PRIMITIVE_TYPES[name] + if name == 'char': + kind = 'char' + elif name in ('float', 'double'): + kind = 'float' + else: + if name in ('signed char', 'unsigned char'): + kind = 'byte' + elif name == '_Bool': + kind = 'bool' + else: + kind = 'int' + is_signed = (ctype(-1).value == -1) + # + def _cast_source_to_int(source): + if isinstance(source, (int, long, float)): + source = int(source) + elif isinstance(source, CTypesData): + source = source._cast_to_integer() + elif isinstance(source, bytes): + source = ord(source) + elif source is None: + source = 0 + else: + raise TypeError("bad type for cast to %r: %r" % + (CTypesPrimitive, type(source).__name__)) + return source + # + kind1 = kind + class CTypesPrimitive(CTypesGenericPrimitive): + __slots__ = ['_value'] + _ctype = ctype + _reftypename = '%s &' % name + kind = kind1 + + def __init__(self, value): + self._value = value + + @staticmethod + def _create_ctype_obj(init): + if init is None: + return ctype() + return ctype(CTypesPrimitive._to_ctypes(init)) + + if kind == 'int' or kind == 'byte': + @classmethod + def _cast_from(cls, source): + source = _cast_source_to_int(source) + source = ctype(source).value # cast within range + return cls(source) + def __int__(self): + return self._value + + if kind == 'bool': + @classmethod + def _cast_from(cls, source): + if not isinstance(source, (int, long, float)): + source = _cast_source_to_int(source) + return cls(bool(source)) + def __int__(self): + return int(self._value) + + if kind == 'char': + @classmethod + def _cast_from(cls, source): + source = _cast_source_to_int(source) + source = bytechr(source & 0xFF) + return cls(source) + def __int__(self): + return ord(self._value) + + if kind == 'float': + @classmethod + def _cast_from(cls, source): + if isinstance(source, float): + pass + elif isinstance(source, CTypesGenericPrimitive): + if hasattr(source, '__float__'): + source = float(source) + else: + source = int(source) + else: + source = _cast_source_to_int(source) + source = ctype(source).value # fix precision + return cls(source) + def __int__(self): + return int(self._value) + def __float__(self): + return self._value + + _cast_to_integer = __int__ + + if kind == 'int' or kind == 'byte' or kind == 'bool': + @staticmethod + def _to_ctypes(x): + if not isinstance(x, (int, long)): + if isinstance(x, CTypesData): + x = int(x) + else: + raise TypeError("integer expected, got %s" % + type(x).__name__) + if ctype(x).value != x: + if not is_signed and x < 0: + raise OverflowError("%s: negative integer" % name) + else: + raise OverflowError("%s: integer out of bounds" + % name) + return x + + if kind == 'char': + @staticmethod + def _to_ctypes(x): + if isinstance(x, bytes) and len(x) == 1: + return x + if isinstance(x, CTypesPrimitive): # > + return x._value + raise TypeError("character expected, got %s" % + type(x).__name__) + def __nonzero__(self): + return ord(self._value) != 0 + else: + def __nonzero__(self): + return self._value != 0 + __bool__ = __nonzero__ + + if kind == 'float': + @staticmethod + def _to_ctypes(x): + if not isinstance(x, (int, long, float, CTypesData)): + raise TypeError("float expected, got %s" % + type(x).__name__) + return ctype(x).value + + @staticmethod + def _from_ctypes(value): + return getattr(value, 'value', value) + + @staticmethod + def _initialize(blob, init): + blob.value = CTypesPrimitive._to_ctypes(init) + + if kind == 'char': + def _to_string(self, maxlen): + return self._value + if kind == 'byte': + def _to_string(self, maxlen): + return chr(self._value & 0xff) + # + CTypesPrimitive._fix_class() + return CTypesPrimitive + + def new_pointer_type(self, BItem): + getbtype = self.ffi._get_cached_btype + if BItem is getbtype(model.PrimitiveType('char')): + kind = 'charp' + elif BItem in (getbtype(model.PrimitiveType('signed char')), + getbtype(model.PrimitiveType('unsigned char'))): + kind = 'bytep' + elif BItem is getbtype(model.void_type): + kind = 'voidp' + else: + kind = 'generic' + # + class CTypesPtr(CTypesGenericPtr): + __slots__ = ['_own'] + if kind == 'charp': + __slots__ += ['__as_strbuf'] + _BItem = BItem + if hasattr(BItem, '_ctype'): + _ctype = ctypes.POINTER(BItem._ctype) + _bitem_size = ctypes.sizeof(BItem._ctype) + else: + _ctype = ctypes.c_void_p + if issubclass(BItem, CTypesGenericArray): + _reftypename = BItem._get_c_name('(* &)') + else: + _reftypename = BItem._get_c_name(' * &') + + def __init__(self, init): + ctypeobj = BItem._create_ctype_obj(init) + if kind == 'charp': + self.__as_strbuf = ctypes.create_string_buffer( + ctypeobj.value + b'\x00') + self._as_ctype_ptr = ctypes.cast( + self.__as_strbuf, self._ctype) + else: + self._as_ctype_ptr = ctypes.pointer(ctypeobj) + self._address = ctypes.cast(self._as_ctype_ptr, + ctypes.c_void_p).value + self._own = True + + def __add__(self, other): + if isinstance(other, (int, long)): + return self._new_pointer_at(self._address + + other * self._bitem_size) + else: + return NotImplemented + + def __sub__(self, other): + if isinstance(other, (int, long)): + return self._new_pointer_at(self._address - + other * self._bitem_size) + elif type(self) is type(other): + return (self._address - other._address) // self._bitem_size + else: + return NotImplemented + + def __getitem__(self, index): + if getattr(self, '_own', False) and index != 0: + raise IndexError + return BItem._from_ctypes(self._as_ctype_ptr[index]) + + def __setitem__(self, index, value): + self._as_ctype_ptr[index] = BItem._to_ctypes(value) + + if kind == 'charp' or kind == 'voidp': + @classmethod + def _arg_to_ctypes(cls, *value): + if value and isinstance(value[0], bytes): + return ctypes.c_char_p(value[0]) + else: + return super(CTypesPtr, cls)._arg_to_ctypes(*value) + + if kind == 'charp' or kind == 'bytep': + def _to_string(self, maxlen): + if maxlen < 0: + maxlen = sys.maxsize + p = ctypes.cast(self._as_ctype_ptr, + ctypes.POINTER(ctypes.c_char)) + n = 0 + while n < maxlen and p[n] != b'\x00': + n += 1 + return b''.join([p[i] for i in range(n)]) + + def _get_own_repr(self): + if getattr(self, '_own', False): + return 'owning %d bytes' % ( + ctypes.sizeof(self._as_ctype_ptr.contents),) + return super(CTypesPtr, self)._get_own_repr() + # + if (BItem is self.ffi._get_cached_btype(model.void_type) or + BItem is self.ffi._get_cached_btype(model.PrimitiveType('char'))): + CTypesPtr._automatic_casts = True + # + CTypesPtr._fix_class() + return CTypesPtr + + def new_array_type(self, CTypesPtr, length): + if length is None: + brackets = ' &[]' + else: + brackets = ' &[%d]' % length + BItem = CTypesPtr._BItem + getbtype = self.ffi._get_cached_btype + if BItem is getbtype(model.PrimitiveType('char')): + kind = 'char' + elif BItem in (getbtype(model.PrimitiveType('signed char')), + getbtype(model.PrimitiveType('unsigned char'))): + kind = 'byte' + else: + kind = 'generic' + # + class CTypesArray(CTypesGenericArray): + __slots__ = ['_blob', '_own'] + if length is not None: + _ctype = BItem._ctype * length + else: + __slots__.append('_ctype') + _reftypename = BItem._get_c_name(brackets) + _declared_length = length + _CTPtr = CTypesPtr + + def __init__(self, init): + if length is None: + if isinstance(init, (int, long)): + len1 = init + init = None + elif kind == 'char' and isinstance(init, bytes): + len1 = len(init) + 1 # extra null + else: + init = tuple(init) + len1 = len(init) + self._ctype = BItem._ctype * len1 + self._blob = self._ctype() + self._own = True + if init is not None: + self._initialize(self._blob, init) + + @staticmethod + def _initialize(blob, init): + if isinstance(init, bytes): + init = [init[i:i+1] for i in range(len(init))] + else: + if isinstance(init, CTypesGenericArray): + if (len(init) != len(blob) or + not isinstance(init, CTypesArray)): + raise TypeError("length/type mismatch: %s" % (init,)) + init = tuple(init) + if len(init) > len(blob): + raise IndexError("too many initializers") + addr = ctypes.cast(blob, ctypes.c_void_p).value + PTR = ctypes.POINTER(BItem._ctype) + itemsize = ctypes.sizeof(BItem._ctype) + for i, value in enumerate(init): + p = ctypes.cast(addr + i * itemsize, PTR) + BItem._initialize(p.contents, value) + + def __len__(self): + return len(self._blob) + + def __getitem__(self, index): + if not (0 <= index < len(self._blob)): + raise IndexError + return BItem._from_ctypes(self._blob[index]) + + def __setitem__(self, index, value): + if not (0 <= index < len(self._blob)): + raise IndexError + self._blob[index] = BItem._to_ctypes(value) + + if kind == 'char' or kind == 'byte': + def _to_string(self, maxlen): + if maxlen < 0: + maxlen = len(self._blob) + p = ctypes.cast(self._blob, + ctypes.POINTER(ctypes.c_char)) + n = 0 + while n < maxlen and p[n] != b'\x00': + n += 1 + return b''.join([p[i] for i in range(n)]) + + def _get_own_repr(self): + if getattr(self, '_own', False): + return 'owning %d bytes' % (ctypes.sizeof(self._blob),) + return super(CTypesArray, self)._get_own_repr() + + def _convert_to_address(self, BClass): + if BClass in (CTypesPtr, None) or BClass._automatic_casts: + return ctypes.addressof(self._blob) + else: + return CTypesData._convert_to_address(self, BClass) + + @staticmethod + def _from_ctypes(ctypes_array): + self = CTypesArray.__new__(CTypesArray) + self._blob = ctypes_array + return self + + @staticmethod + def _arg_to_ctypes(value): + return CTypesPtr._arg_to_ctypes(value) + + def __add__(self, other): + if isinstance(other, (int, long)): + return CTypesPtr._new_pointer_at( + ctypes.addressof(self._blob) + + other * ctypes.sizeof(BItem._ctype)) + else: + return NotImplemented + + @classmethod + def _cast_from(cls, source): + raise NotImplementedError("casting to %r" % ( + cls._get_c_name(),)) + # + CTypesArray._fix_class() + return CTypesArray + + def _new_struct_or_union(self, kind, name, base_ctypes_class): + # + class struct_or_union(base_ctypes_class): + pass + struct_or_union.__name__ = '%s_%s' % (kind, name) + kind1 = kind + # + class CTypesStructOrUnion(CTypesBaseStructOrUnion): + __slots__ = ['_blob'] + _ctype = struct_or_union + _reftypename = '%s &' % (name,) + _kind = kind = kind1 + # + CTypesStructOrUnion._fix_class() + return CTypesStructOrUnion + + def new_struct_type(self, name): + return self._new_struct_or_union('struct', name, ctypes.Structure) + + def new_union_type(self, name): + return self._new_struct_or_union('union', name, ctypes.Union) + + def complete_struct_or_union(self, CTypesStructOrUnion, fields, tp, + totalsize=-1, totalalignment=-1, sflags=0, + pack=0): + if totalsize >= 0 or totalalignment >= 0: + raise NotImplementedError("the ctypes backend of CFFI does not support " + "structures completed by verify(); please " + "compile and install the _cffi_backend module.") + struct_or_union = CTypesStructOrUnion._ctype + fnames = [fname for (fname, BField, bitsize) in fields] + btypes = [BField for (fname, BField, bitsize) in fields] + bitfields = [bitsize for (fname, BField, bitsize) in fields] + # + bfield_types = {} + cfields = [] + for (fname, BField, bitsize) in fields: + if bitsize < 0: + cfields.append((fname, BField._ctype)) + bfield_types[fname] = BField + else: + cfields.append((fname, BField._ctype, bitsize)) + bfield_types[fname] = Ellipsis + if sflags & 8: + struct_or_union._pack_ = 1 + elif pack: + struct_or_union._pack_ = pack + struct_or_union._fields_ = cfields + CTypesStructOrUnion._bfield_types = bfield_types + # + @staticmethod + def _create_ctype_obj(init): + result = struct_or_union() + if init is not None: + initialize(result, init) + return result + CTypesStructOrUnion._create_ctype_obj = _create_ctype_obj + # + def initialize(blob, init): + if is_union: + if len(init) > 1: + raise ValueError("union initializer: %d items given, but " + "only one supported (use a dict if needed)" + % (len(init),)) + if not isinstance(init, dict): + if isinstance(init, (bytes, unicode)): + raise TypeError("union initializer: got a str") + init = tuple(init) + if len(init) > len(fnames): + raise ValueError("too many values for %s initializer" % + CTypesStructOrUnion._get_c_name()) + init = dict(zip(fnames, init)) + addr = ctypes.addressof(blob) + for fname, value in init.items(): + BField, bitsize = name2fieldtype[fname] + assert bitsize < 0, \ + "not implemented: initializer with bit fields" + offset = CTypesStructOrUnion._offsetof(fname) + PTR = ctypes.POINTER(BField._ctype) + p = ctypes.cast(addr + offset, PTR) + BField._initialize(p.contents, value) + is_union = CTypesStructOrUnion._kind == 'union' + name2fieldtype = dict(zip(fnames, zip(btypes, bitfields))) + # + for fname, BField, bitsize in fields: + if fname == '': + raise NotImplementedError("nested anonymous structs/unions") + if hasattr(CTypesStructOrUnion, fname): + raise ValueError("the field name %r conflicts in " + "the ctypes backend" % fname) + if bitsize < 0: + def getter(self, fname=fname, BField=BField, + offset=CTypesStructOrUnion._offsetof(fname), + PTR=ctypes.POINTER(BField._ctype)): + addr = ctypes.addressof(self._blob) + p = ctypes.cast(addr + offset, PTR) + return BField._from_ctypes(p.contents) + def setter(self, value, fname=fname, BField=BField): + setattr(self._blob, fname, BField._to_ctypes(value)) + # + if issubclass(BField, CTypesGenericArray): + setter = None + if BField._declared_length == 0: + def getter(self, fname=fname, BFieldPtr=BField._CTPtr, + offset=CTypesStructOrUnion._offsetof(fname), + PTR=ctypes.POINTER(BField._ctype)): + addr = ctypes.addressof(self._blob) + p = ctypes.cast(addr + offset, PTR) + return BFieldPtr._from_ctypes(p) + # + else: + def getter(self, fname=fname, BField=BField): + return BField._from_ctypes(getattr(self._blob, fname)) + def setter(self, value, fname=fname, BField=BField): + # xxx obscure workaround + value = BField._to_ctypes(value) + oldvalue = getattr(self._blob, fname) + setattr(self._blob, fname, value) + if value != getattr(self._blob, fname): + setattr(self._blob, fname, oldvalue) + raise OverflowError("value too large for bitfield") + setattr(CTypesStructOrUnion, fname, property(getter, setter)) + # + CTypesPtr = self.ffi._get_cached_btype(model.PointerType(tp)) + for fname in fnames: + if hasattr(CTypesPtr, fname): + raise ValueError("the field name %r conflicts in " + "the ctypes backend" % fname) + def getter(self, fname=fname): + return getattr(self[0], fname) + def setter(self, value, fname=fname): + setattr(self[0], fname, value) + setattr(CTypesPtr, fname, property(getter, setter)) + + def new_function_type(self, BArgs, BResult, has_varargs): + nameargs = [BArg._get_c_name() for BArg in BArgs] + if has_varargs: + nameargs.append('...') + nameargs = ', '.join(nameargs) + # + class CTypesFunctionPtr(CTypesGenericPtr): + __slots__ = ['_own_callback', '_name'] + _ctype = ctypes.CFUNCTYPE(getattr(BResult, '_ctype', None), + *[BArg._ctype for BArg in BArgs], + use_errno=True) + _reftypename = BResult._get_c_name('(* &)(%s)' % (nameargs,)) + + def __init__(self, init, error=None): + # create a callback to the Python callable init() + import traceback + assert not has_varargs, "varargs not supported for callbacks" + if getattr(BResult, '_ctype', None) is not None: + error = BResult._from_ctypes( + BResult._create_ctype_obj(error)) + else: + error = None + def callback(*args): + args2 = [] + for arg, BArg in zip(args, BArgs): + args2.append(BArg._from_ctypes(arg)) + try: + res2 = init(*args2) + res2 = BResult._to_ctypes(res2) + except: + traceback.print_exc() + res2 = error + if issubclass(BResult, CTypesGenericPtr): + if res2: + res2 = ctypes.cast(res2, ctypes.c_void_p).value + # .value: http://bugs.python.org/issue1574593 + else: + res2 = None + #print repr(res2) + return res2 + if issubclass(BResult, CTypesGenericPtr): + # The only pointers callbacks can return are void*s: + # http://bugs.python.org/issue5710 + callback_ctype = ctypes.CFUNCTYPE( + ctypes.c_void_p, + *[BArg._ctype for BArg in BArgs], + use_errno=True) + else: + callback_ctype = CTypesFunctionPtr._ctype + self._as_ctype_ptr = callback_ctype(callback) + self._address = ctypes.cast(self._as_ctype_ptr, + ctypes.c_void_p).value + self._own_callback = init + + @staticmethod + def _initialize(ctypes_ptr, value): + if value: + raise NotImplementedError("ctypes backend: not supported: " + "initializers for function pointers") + + def __repr__(self): + c_name = getattr(self, '_name', None) + if c_name: + i = self._reftypename.index('(* &)') + if self._reftypename[i-1] not in ' )*': + c_name = ' ' + c_name + c_name = self._reftypename.replace('(* &)', c_name) + return CTypesData.__repr__(self, c_name) + + def _get_own_repr(self): + if getattr(self, '_own_callback', None) is not None: + return 'calling %r' % (self._own_callback,) + return super(CTypesFunctionPtr, self)._get_own_repr() + + def __call__(self, *args): + if has_varargs: + assert len(args) >= len(BArgs) + extraargs = args[len(BArgs):] + args = args[:len(BArgs)] + else: + assert len(args) == len(BArgs) + ctypes_args = [] + for arg, BArg in zip(args, BArgs): + ctypes_args.append(BArg._arg_to_ctypes(arg)) + if has_varargs: + for i, arg in enumerate(extraargs): + if arg is None: + ctypes_args.append(ctypes.c_void_p(0)) # NULL + continue + if not isinstance(arg, CTypesData): + raise TypeError( + "argument %d passed in the variadic part " + "needs to be a cdata object (got %s)" % + (1 + len(BArgs) + i, type(arg).__name__)) + ctypes_args.append(arg._arg_to_ctypes(arg)) + result = self._as_ctype_ptr(*ctypes_args) + return BResult._from_ctypes(result) + # + CTypesFunctionPtr._fix_class() + return CTypesFunctionPtr + + def new_enum_type(self, name, enumerators, enumvalues, CTypesInt): + assert isinstance(name, str) + reverse_mapping = dict(zip(reversed(enumvalues), + reversed(enumerators))) + # + class CTypesEnum(CTypesInt): + __slots__ = [] + _reftypename = '%s &' % name + + def _get_own_repr(self): + value = self._value + try: + return '%d: %s' % (value, reverse_mapping[value]) + except KeyError: + return str(value) + + def _to_string(self, maxlen): + value = self._value + try: + return reverse_mapping[value] + except KeyError: + return str(value) + # + CTypesEnum._fix_class() + return CTypesEnum + + def get_errno(self): + return ctypes.get_errno() + + def set_errno(self, value): + ctypes.set_errno(value) + + def string(self, b, maxlen=-1): + return b._to_string(maxlen) + + def buffer(self, bptr, size=-1): + raise NotImplementedError("buffer() with ctypes backend") + + def sizeof(self, cdata_or_BType): + if isinstance(cdata_or_BType, CTypesData): + return cdata_or_BType._get_size_of_instance() + else: + assert issubclass(cdata_or_BType, CTypesData) + return cdata_or_BType._get_size() + + def alignof(self, BType): + assert issubclass(BType, CTypesData) + return BType._alignment() + + def newp(self, BType, source): + if not issubclass(BType, CTypesData): + raise TypeError + return BType._newp(source) + + def cast(self, BType, source): + return BType._cast_from(source) + + def callback(self, BType, source, error, onerror): + assert onerror is None # XXX not implemented + return BType(source, error) + + _weakref_cache_ref = None + + def gcp(self, cdata, destructor, size=0): + if self._weakref_cache_ref is None: + import weakref + class MyRef(weakref.ref): + def __eq__(self, other): + myref = self() + return self is other or ( + myref is not None and myref is other()) + def __ne__(self, other): + return not (self == other) + def __hash__(self): + try: + return self._hash + except AttributeError: + self._hash = hash(self()) + return self._hash + self._weakref_cache_ref = {}, MyRef + weak_cache, MyRef = self._weakref_cache_ref + + if destructor is None: + try: + del weak_cache[MyRef(cdata)] + except KeyError: + raise TypeError("Can remove destructor only on a object " + "previously returned by ffi.gc()") + return None + + def remove(k): + cdata, destructor = weak_cache.pop(k, (None, None)) + if destructor is not None: + destructor(cdata) + + new_cdata = self.cast(self.typeof(cdata), cdata) + assert new_cdata is not cdata + weak_cache[MyRef(new_cdata, remove)] = (cdata, destructor) + return new_cdata + + typeof = type + + def getcname(self, BType, replace_with): + return BType._get_c_name(replace_with) + + def typeoffsetof(self, BType, fieldname, num=0): + if isinstance(fieldname, str): + if num == 0 and issubclass(BType, CTypesGenericPtr): + BType = BType._BItem + if not issubclass(BType, CTypesBaseStructOrUnion): + raise TypeError("expected a struct or union ctype") + BField = BType._bfield_types[fieldname] + if BField is Ellipsis: + raise TypeError("not supported for bitfields") + return (BField, BType._offsetof(fieldname)) + elif isinstance(fieldname, (int, long)): + if issubclass(BType, CTypesGenericArray): + BType = BType._CTPtr + if not issubclass(BType, CTypesGenericPtr): + raise TypeError("expected an array or ptr ctype") + BItem = BType._BItem + offset = BItem._get_size() * fieldname + if offset > sys.maxsize: + raise OverflowError + return (BItem, offset) + else: + raise TypeError(type(fieldname)) + + def rawaddressof(self, BTypePtr, cdata, offset=None): + if isinstance(cdata, CTypesBaseStructOrUnion): + ptr = ctypes.pointer(type(cdata)._to_ctypes(cdata)) + elif isinstance(cdata, CTypesGenericPtr): + if offset is None or not issubclass(type(cdata)._BItem, + CTypesBaseStructOrUnion): + raise TypeError("unexpected cdata type") + ptr = type(cdata)._to_ctypes(cdata) + elif isinstance(cdata, CTypesGenericArray): + ptr = type(cdata)._to_ctypes(cdata) + else: + raise TypeError("expected a ") + if offset: + ptr = ctypes.cast( + ctypes.c_void_p( + ctypes.cast(ptr, ctypes.c_void_p).value + offset), + type(ptr)) + return BTypePtr._from_ctypes(ptr) + + +class CTypesLibrary(object): + + def __init__(self, backend, cdll): + self.backend = backend + self.cdll = cdll + + def load_function(self, BType, name): + c_func = getattr(self.cdll, name) + funcobj = BType._from_ctypes(c_func) + funcobj._name = name + return funcobj + + def read_variable(self, BType, name): + try: + ctypes_obj = BType._ctype.in_dll(self.cdll, name) + except AttributeError as e: + raise NotImplementedError(e) + return BType._from_ctypes(ctypes_obj) + + def write_variable(self, BType, name, value): + new_ctypes_obj = BType._to_ctypes(value) + ctypes_obj = BType._ctype.in_dll(self.cdll, name) + ctypes.memmove(ctypes.addressof(ctypes_obj), + ctypes.addressof(new_ctypes_obj), + ctypes.sizeof(BType._ctype)) diff --git a/python/cffi/cffi_opcode.py b/python/cffi/cffi_opcode.py new file mode 100644 index 000000000..6421df621 --- /dev/null +++ b/python/cffi/cffi_opcode.py @@ -0,0 +1,187 @@ +from .error import VerificationError + +class CffiOp(object): + def __init__(self, op, arg): + self.op = op + self.arg = arg + + def as_c_expr(self): + if self.op is None: + assert isinstance(self.arg, str) + return '(_cffi_opcode_t)(%s)' % (self.arg,) + classname = CLASS_NAME[self.op] + return '_CFFI_OP(_CFFI_OP_%s, %s)' % (classname, self.arg) + + def as_python_bytes(self): + if self.op is None and self.arg.isdigit(): + value = int(self.arg) # non-negative: '-' not in self.arg + if value >= 2**31: + raise OverflowError("cannot emit %r: limited to 2**31-1" + % (self.arg,)) + return format_four_bytes(value) + if isinstance(self.arg, str): + raise VerificationError("cannot emit to Python: %r" % (self.arg,)) + return format_four_bytes((self.arg << 8) | self.op) + + def __str__(self): + classname = CLASS_NAME.get(self.op, self.op) + return '(%s %s)' % (classname, self.arg) + +def format_four_bytes(num): + return '\\x%02X\\x%02X\\x%02X\\x%02X' % ( + (num >> 24) & 0xFF, + (num >> 16) & 0xFF, + (num >> 8) & 0xFF, + (num ) & 0xFF) + +OP_PRIMITIVE = 1 +OP_POINTER = 3 +OP_ARRAY = 5 +OP_OPEN_ARRAY = 7 +OP_STRUCT_UNION = 9 +OP_ENUM = 11 +OP_FUNCTION = 13 +OP_FUNCTION_END = 15 +OP_NOOP = 17 +OP_BITFIELD = 19 +OP_TYPENAME = 21 +OP_CPYTHON_BLTN_V = 23 # varargs +OP_CPYTHON_BLTN_N = 25 # noargs +OP_CPYTHON_BLTN_O = 27 # O (i.e. a single arg) +OP_CONSTANT = 29 +OP_CONSTANT_INT = 31 +OP_GLOBAL_VAR = 33 +OP_DLOPEN_FUNC = 35 +OP_DLOPEN_CONST = 37 +OP_GLOBAL_VAR_F = 39 +OP_EXTERN_PYTHON = 41 + +PRIM_VOID = 0 +PRIM_BOOL = 1 +PRIM_CHAR = 2 +PRIM_SCHAR = 3 +PRIM_UCHAR = 4 +PRIM_SHORT = 5 +PRIM_USHORT = 6 +PRIM_INT = 7 +PRIM_UINT = 8 +PRIM_LONG = 9 +PRIM_ULONG = 10 +PRIM_LONGLONG = 11 +PRIM_ULONGLONG = 12 +PRIM_FLOAT = 13 +PRIM_DOUBLE = 14 +PRIM_LONGDOUBLE = 15 + +PRIM_WCHAR = 16 +PRIM_INT8 = 17 +PRIM_UINT8 = 18 +PRIM_INT16 = 19 +PRIM_UINT16 = 20 +PRIM_INT32 = 21 +PRIM_UINT32 = 22 +PRIM_INT64 = 23 +PRIM_UINT64 = 24 +PRIM_INTPTR = 25 +PRIM_UINTPTR = 26 +PRIM_PTRDIFF = 27 +PRIM_SIZE = 28 +PRIM_SSIZE = 29 +PRIM_INT_LEAST8 = 30 +PRIM_UINT_LEAST8 = 31 +PRIM_INT_LEAST16 = 32 +PRIM_UINT_LEAST16 = 33 +PRIM_INT_LEAST32 = 34 +PRIM_UINT_LEAST32 = 35 +PRIM_INT_LEAST64 = 36 +PRIM_UINT_LEAST64 = 37 +PRIM_INT_FAST8 = 38 +PRIM_UINT_FAST8 = 39 +PRIM_INT_FAST16 = 40 +PRIM_UINT_FAST16 = 41 +PRIM_INT_FAST32 = 42 +PRIM_UINT_FAST32 = 43 +PRIM_INT_FAST64 = 44 +PRIM_UINT_FAST64 = 45 +PRIM_INTMAX = 46 +PRIM_UINTMAX = 47 +PRIM_FLOATCOMPLEX = 48 +PRIM_DOUBLECOMPLEX = 49 +PRIM_CHAR16 = 50 +PRIM_CHAR32 = 51 + +_NUM_PRIM = 52 +_UNKNOWN_PRIM = -1 +_UNKNOWN_FLOAT_PRIM = -2 +_UNKNOWN_LONG_DOUBLE = -3 + +_IO_FILE_STRUCT = -1 + +PRIMITIVE_TO_INDEX = { + 'char': PRIM_CHAR, + 'short': PRIM_SHORT, + 'int': PRIM_INT, + 'long': PRIM_LONG, + 'long long': PRIM_LONGLONG, + 'signed char': PRIM_SCHAR, + 'unsigned char': PRIM_UCHAR, + 'unsigned short': PRIM_USHORT, + 'unsigned int': PRIM_UINT, + 'unsigned long': PRIM_ULONG, + 'unsigned long long': PRIM_ULONGLONG, + 'float': PRIM_FLOAT, + 'double': PRIM_DOUBLE, + 'long double': PRIM_LONGDOUBLE, + '_cffi_float_complex_t': PRIM_FLOATCOMPLEX, + '_cffi_double_complex_t': PRIM_DOUBLECOMPLEX, + '_Bool': PRIM_BOOL, + 'wchar_t': PRIM_WCHAR, + 'char16_t': PRIM_CHAR16, + 'char32_t': PRIM_CHAR32, + 'int8_t': PRIM_INT8, + 'uint8_t': PRIM_UINT8, + 'int16_t': PRIM_INT16, + 'uint16_t': PRIM_UINT16, + 'int32_t': PRIM_INT32, + 'uint32_t': PRIM_UINT32, + 'int64_t': PRIM_INT64, + 'uint64_t': PRIM_UINT64, + 'intptr_t': PRIM_INTPTR, + 'uintptr_t': PRIM_UINTPTR, + 'ptrdiff_t': PRIM_PTRDIFF, + 'size_t': PRIM_SIZE, + 'ssize_t': PRIM_SSIZE, + 'int_least8_t': PRIM_INT_LEAST8, + 'uint_least8_t': PRIM_UINT_LEAST8, + 'int_least16_t': PRIM_INT_LEAST16, + 'uint_least16_t': PRIM_UINT_LEAST16, + 'int_least32_t': PRIM_INT_LEAST32, + 'uint_least32_t': PRIM_UINT_LEAST32, + 'int_least64_t': PRIM_INT_LEAST64, + 'uint_least64_t': PRIM_UINT_LEAST64, + 'int_fast8_t': PRIM_INT_FAST8, + 'uint_fast8_t': PRIM_UINT_FAST8, + 'int_fast16_t': PRIM_INT_FAST16, + 'uint_fast16_t': PRIM_UINT_FAST16, + 'int_fast32_t': PRIM_INT_FAST32, + 'uint_fast32_t': PRIM_UINT_FAST32, + 'int_fast64_t': PRIM_INT_FAST64, + 'uint_fast64_t': PRIM_UINT_FAST64, + 'intmax_t': PRIM_INTMAX, + 'uintmax_t': PRIM_UINTMAX, + } + +F_UNION = 0x01 +F_CHECK_FIELDS = 0x02 +F_PACKED = 0x04 +F_EXTERNAL = 0x08 +F_OPAQUE = 0x10 + +G_FLAGS = dict([('_CFFI_' + _key, globals()[_key]) + for _key in ['F_UNION', 'F_CHECK_FIELDS', 'F_PACKED', + 'F_EXTERNAL', 'F_OPAQUE']]) + +CLASS_NAME = {} +for _name, _value in list(globals().items()): + if _name.startswith('OP_') and isinstance(_value, int): + CLASS_NAME[_value] = _name[3:] diff --git a/python/cffi/commontypes.py b/python/cffi/commontypes.py new file mode 100644 index 000000000..d4dae3517 --- /dev/null +++ b/python/cffi/commontypes.py @@ -0,0 +1,82 @@ +import sys +from . import model +from .error import FFIError + + +COMMON_TYPES = {} + +try: + # fetch "bool" and all simple Windows types + from _cffi_backend import _get_common_types + _get_common_types(COMMON_TYPES) +except ImportError: + pass + +COMMON_TYPES['FILE'] = model.unknown_type('FILE', '_IO_FILE') +COMMON_TYPES['bool'] = '_Bool' # in case we got ImportError above +COMMON_TYPES['float _Complex'] = '_cffi_float_complex_t' +COMMON_TYPES['double _Complex'] = '_cffi_double_complex_t' + +for _type in model.PrimitiveType.ALL_PRIMITIVE_TYPES: + if _type.endswith('_t'): + COMMON_TYPES[_type] = _type +del _type + +_CACHE = {} + +def resolve_common_type(parser, commontype): + try: + return _CACHE[commontype] + except KeyError: + cdecl = COMMON_TYPES.get(commontype, commontype) + if not isinstance(cdecl, str): + result, quals = cdecl, 0 # cdecl is already a BaseType + elif cdecl in model.PrimitiveType.ALL_PRIMITIVE_TYPES: + result, quals = model.PrimitiveType(cdecl), 0 + elif cdecl == 'set-unicode-needed': + raise FFIError("The Windows type %r is only available after " + "you call ffi.set_unicode()" % (commontype,)) + else: + if commontype == cdecl: + raise FFIError( + "Unsupported type: %r. Please look at " + "http://cffi.readthedocs.io/en/latest/cdef.html#ffi-cdef-limitations " + "and file an issue if you think this type should really " + "be supported." % (commontype,)) + result, quals = parser.parse_type_and_quals(cdecl) # recursive + + assert isinstance(result, model.BaseTypeByIdentity) + _CACHE[commontype] = result, quals + return result, quals + + +# ____________________________________________________________ +# extra types for Windows (most of them are in commontypes.c) + + +def win_common_types(): + return { + "UNICODE_STRING": model.StructType( + "_UNICODE_STRING", + ["Length", + "MaximumLength", + "Buffer"], + [model.PrimitiveType("unsigned short"), + model.PrimitiveType("unsigned short"), + model.PointerType(model.PrimitiveType("wchar_t"))], + [-1, -1, -1]), + "PUNICODE_STRING": "UNICODE_STRING *", + "PCUNICODE_STRING": "const UNICODE_STRING *", + + "TBYTE": "set-unicode-needed", + "TCHAR": "set-unicode-needed", + "LPCTSTR": "set-unicode-needed", + "PCTSTR": "set-unicode-needed", + "LPTSTR": "set-unicode-needed", + "PTSTR": "set-unicode-needed", + "PTBYTE": "set-unicode-needed", + "PTCHAR": "set-unicode-needed", + } + +if sys.platform == 'win32': + COMMON_TYPES.update(win_common_types()) diff --git a/python/cffi/cparser.py b/python/cffi/cparser.py new file mode 100644 index 000000000..dd590d874 --- /dev/null +++ b/python/cffi/cparser.py @@ -0,0 +1,1015 @@ +from . import model +from .commontypes import COMMON_TYPES, resolve_common_type +from .error import FFIError, CDefError +try: + from . import _pycparser as pycparser +except ImportError: + import pycparser +import weakref, re, sys + +try: + if sys.version_info < (3,): + import thread as _thread + else: + import _thread + lock = _thread.allocate_lock() +except ImportError: + lock = None + +def _workaround_for_static_import_finders(): + # Issue #392: packaging tools like cx_Freeze can not find these + # because pycparser uses exec dynamic import. This is an obscure + # workaround. This function is never called. + import pycparser.yacctab + import pycparser.lextab + +CDEF_SOURCE_STRING = "" +_r_comment = re.compile(r"/\*.*?\*/|//([^\n\\]|\\.)*?$", + re.DOTALL | re.MULTILINE) +_r_define = re.compile(r"^\s*#\s*define\s+([A-Za-z_][A-Za-z_0-9]*)" + r"\b((?:[^\n\\]|\\.)*?)$", + re.DOTALL | re.MULTILINE) +_r_line_directive = re.compile(r"^[ \t]*#[ \t]*(?:line|\d+)\b.*$", re.MULTILINE) +_r_partial_enum = re.compile(r"=\s*\.\.\.\s*[,}]|\.\.\.\s*\}") +_r_enum_dotdotdot = re.compile(r"__dotdotdot\d+__$") +_r_partial_array = re.compile(r"\[\s*\.\.\.\s*\]") +_r_words = re.compile(r"\w+|\S") +_parser_cache = None +_r_int_literal = re.compile(r"-?0?x?[0-9a-f]+[lu]*$", re.IGNORECASE) +_r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b") +_r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b") +_r_cdecl = re.compile(r"\b__cdecl\b") +_r_extern_python = re.compile(r'\bextern\s*"' + r'(Python|Python\s*\+\s*C|C\s*\+\s*Python)"\s*.') +_r_star_const_space = re.compile( # matches "* const " + r"[*]\s*((const|volatile|restrict)\b\s*)+") +_r_int_dotdotdot = re.compile(r"(\b(int|long|short|signed|unsigned|char)\s*)+" + r"\.\.\.") +_r_float_dotdotdot = re.compile(r"\b(double|float)\s*\.\.\.") + +def _get_parser(): + global _parser_cache + if _parser_cache is None: + _parser_cache = pycparser.CParser() + return _parser_cache + +def _workaround_for_old_pycparser(csource): + # Workaround for a pycparser issue (fixed between pycparser 2.10 and + # 2.14): "char*const***" gives us a wrong syntax tree, the same as + # for "char***(*const)". This means we can't tell the difference + # afterwards. But "char(*const(***))" gives us the right syntax + # tree. The issue only occurs if there are several stars in + # sequence with no parenthesis in between, just possibly qualifiers. + # Attempt to fix it by adding some parentheses in the source: each + # time we see "* const" or "* const *", we add an opening + # parenthesis before each star---the hard part is figuring out where + # to close them. + parts = [] + while True: + match = _r_star_const_space.search(csource) + if not match: + break + #print repr(''.join(parts)+csource), '=>', + parts.append(csource[:match.start()]) + parts.append('('); closing = ')' + parts.append(match.group()) # e.g. "* const " + endpos = match.end() + if csource.startswith('*', endpos): + parts.append('('); closing += ')' + level = 0 + i = endpos + while i < len(csource): + c = csource[i] + if c == '(': + level += 1 + elif c == ')': + if level == 0: + break + level -= 1 + elif c in ',;=': + if level == 0: + break + i += 1 + csource = csource[endpos:i] + closing + csource[i:] + #print repr(''.join(parts)+csource) + parts.append(csource) + return ''.join(parts) + +def _preprocess_extern_python(csource): + # input: `extern "Python" int foo(int);` or + # `extern "Python" { int foo(int); }` + # output: + # void __cffi_extern_python_start; + # int foo(int); + # void __cffi_extern_python_stop; + # + # input: `extern "Python+C" int foo(int);` + # output: + # void __cffi_extern_python_plus_c_start; + # int foo(int); + # void __cffi_extern_python_stop; + parts = [] + while True: + match = _r_extern_python.search(csource) + if not match: + break + endpos = match.end() - 1 + #print + #print ''.join(parts)+csource + #print '=>' + parts.append(csource[:match.start()]) + if 'C' in match.group(1): + parts.append('void __cffi_extern_python_plus_c_start; ') + else: + parts.append('void __cffi_extern_python_start; ') + if csource[endpos] == '{': + # grouping variant + closing = csource.find('}', endpos) + if closing < 0: + raise CDefError("'extern \"Python\" {': no '}' found") + if csource.find('{', endpos + 1, closing) >= 0: + raise NotImplementedError("cannot use { } inside a block " + "'extern \"Python\" { ... }'") + parts.append(csource[endpos+1:closing]) + csource = csource[closing+1:] + else: + # non-grouping variant + semicolon = csource.find(';', endpos) + if semicolon < 0: + raise CDefError("'extern \"Python\": no ';' found") + parts.append(csource[endpos:semicolon+1]) + csource = csource[semicolon+1:] + parts.append(' void __cffi_extern_python_stop;') + #print ''.join(parts)+csource + #print + parts.append(csource) + return ''.join(parts) + +def _warn_for_string_literal(csource): + if '"' not in csource: + return + for line in csource.splitlines(): + if '"' in line and not line.lstrip().startswith('#'): + import warnings + warnings.warn("String literal found in cdef() or type source. " + "String literals are ignored here, but you should " + "remove them anyway because some character sequences " + "confuse pre-parsing.") + break + +def _warn_for_non_extern_non_static_global_variable(decl): + if not decl.storage: + import warnings + warnings.warn("Global variable '%s' in cdef(): for consistency " + "with C it should have a storage class specifier " + "(usually 'extern')" % (decl.name,)) + +def _remove_line_directives(csource): + # _r_line_directive matches whole lines, without the final \n, if they + # start with '#line' with some spacing allowed, or '#NUMBER'. This + # function stores them away and replaces them with exactly the string + # '#line@N', where N is the index in the list 'line_directives'. + line_directives = [] + def replace(m): + i = len(line_directives) + line_directives.append(m.group()) + return '#line@%d' % i + csource = _r_line_directive.sub(replace, csource) + return csource, line_directives + +def _put_back_line_directives(csource, line_directives): + def replace(m): + s = m.group() + if not s.startswith('#line@'): + raise AssertionError("unexpected #line directive " + "(should have been processed and removed") + return line_directives[int(s[6:])] + return _r_line_directive.sub(replace, csource) + +def _preprocess(csource): + # First, remove the lines of the form '#line N "filename"' because + # the "filename" part could confuse the rest + csource, line_directives = _remove_line_directives(csource) + # Remove comments. NOTE: this only work because the cdef() section + # should not contain any string literals (except in line directives)! + def replace_keeping_newlines(m): + return ' ' + m.group().count('\n') * '\n' + csource = _r_comment.sub(replace_keeping_newlines, csource) + # Remove the "#define FOO x" lines + macros = {} + for match in _r_define.finditer(csource): + macroname, macrovalue = match.groups() + macrovalue = macrovalue.replace('\\\n', '').strip() + macros[macroname] = macrovalue + csource = _r_define.sub('', csource) + # + if pycparser.__version__ < '2.14': + csource = _workaround_for_old_pycparser(csource) + # + # BIG HACK: replace WINAPI or __stdcall with "volatile const". + # It doesn't make sense for the return type of a function to be + # "volatile volatile const", so we abuse it to detect __stdcall... + # Hack number 2 is that "int(volatile *fptr)();" is not valid C + # syntax, so we place the "volatile" before the opening parenthesis. + csource = _r_stdcall2.sub(' volatile volatile const(', csource) + csource = _r_stdcall1.sub(' volatile volatile const ', csource) + csource = _r_cdecl.sub(' ', csource) + # + # Replace `extern "Python"` with start/end markers + csource = _preprocess_extern_python(csource) + # + # Now there should not be any string literal left; warn if we get one + _warn_for_string_literal(csource) + # + # Replace "[...]" with "[__dotdotdotarray__]" + csource = _r_partial_array.sub('[__dotdotdotarray__]', csource) + # + # Replace "...}" with "__dotdotdotNUM__}". This construction should + # occur only at the end of enums; at the end of structs we have "...;}" + # and at the end of vararg functions "...);". Also replace "=...[,}]" + # with ",__dotdotdotNUM__[,}]": this occurs in the enums too, when + # giving an unknown value. + matches = list(_r_partial_enum.finditer(csource)) + for number, match in enumerate(reversed(matches)): + p = match.start() + if csource[p] == '=': + p2 = csource.find('...', p, match.end()) + assert p2 > p + csource = '%s,__dotdotdot%d__ %s' % (csource[:p], number, + csource[p2+3:]) + else: + assert csource[p:p+3] == '...' + csource = '%s __dotdotdot%d__ %s' % (csource[:p], number, + csource[p+3:]) + # Replace "int ..." or "unsigned long int..." with "__dotdotdotint__" + csource = _r_int_dotdotdot.sub(' __dotdotdotint__ ', csource) + # Replace "float ..." or "double..." with "__dotdotdotfloat__" + csource = _r_float_dotdotdot.sub(' __dotdotdotfloat__ ', csource) + # Replace all remaining "..." with the same name, "__dotdotdot__", + # which is declared with a typedef for the purpose of C parsing. + csource = csource.replace('...', ' __dotdotdot__ ') + # Finally, put back the line directives + csource = _put_back_line_directives(csource, line_directives) + return csource, macros + +def _common_type_names(csource): + # Look in the source for what looks like usages of types from the + # list of common types. A "usage" is approximated here as the + # appearance of the word, minus a "definition" of the type, which + # is the last word in a "typedef" statement. Approximative only + # but should be fine for all the common types. + look_for_words = set(COMMON_TYPES) + look_for_words.add(';') + look_for_words.add(',') + look_for_words.add('(') + look_for_words.add(')') + look_for_words.add('typedef') + words_used = set() + is_typedef = False + paren = 0 + previous_word = '' + for word in _r_words.findall(csource): + if word in look_for_words: + if word == ';': + if is_typedef: + words_used.discard(previous_word) + look_for_words.discard(previous_word) + is_typedef = False + elif word == 'typedef': + is_typedef = True + paren = 0 + elif word == '(': + paren += 1 + elif word == ')': + paren -= 1 + elif word == ',': + if is_typedef and paren == 0: + words_used.discard(previous_word) + look_for_words.discard(previous_word) + else: # word in COMMON_TYPES + words_used.add(word) + previous_word = word + return words_used + + +class Parser(object): + + def __init__(self): + self._declarations = {} + self._included_declarations = set() + self._anonymous_counter = 0 + self._structnode2type = weakref.WeakKeyDictionary() + self._options = {} + self._int_constants = {} + self._recomplete = [] + self._uses_new_feature = None + + def _parse(self, csource): + csource, macros = _preprocess(csource) + # XXX: for more efficiency we would need to poke into the + # internals of CParser... the following registers the + # typedefs, because their presence or absence influences the + # parsing itself (but what they are typedef'ed to plays no role) + ctn = _common_type_names(csource) + typenames = [] + for name in sorted(self._declarations): + if name.startswith('typedef '): + name = name[8:] + typenames.append(name) + ctn.discard(name) + typenames += sorted(ctn) + # + csourcelines = [] + csourcelines.append('# 1 ""') + for typename in typenames: + csourcelines.append('typedef int %s;' % typename) + csourcelines.append('typedef int __dotdotdotint__, __dotdotdotfloat__,' + ' __dotdotdot__;') + # this forces pycparser to consider the following in the file + # called from line 1 + csourcelines.append('# 1 "%s"' % (CDEF_SOURCE_STRING,)) + csourcelines.append(csource) + csourcelines.append('') # see test_missing_newline_bug + fullcsource = '\n'.join(csourcelines) + if lock is not None: + lock.acquire() # pycparser is not thread-safe... + try: + ast = _get_parser().parse(fullcsource) + except pycparser.c_parser.ParseError as e: + self.convert_pycparser_error(e, csource) + finally: + if lock is not None: + lock.release() + # csource will be used to find buggy source text + return ast, macros, csource + + def _convert_pycparser_error(self, e, csource): + # xxx look for ":NUM:" at the start of str(e) + # and interpret that as a line number. This will not work if + # the user gives explicit ``# NUM "FILE"`` directives. + line = None + msg = str(e) + match = re.match(r"%s:(\d+):" % (CDEF_SOURCE_STRING,), msg) + if match: + linenum = int(match.group(1), 10) + csourcelines = csource.splitlines() + if 1 <= linenum <= len(csourcelines): + line = csourcelines[linenum-1] + return line + + def convert_pycparser_error(self, e, csource): + line = self._convert_pycparser_error(e, csource) + + msg = str(e) + if line: + msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) + else: + msg = 'parse error\n%s' % (msg,) + raise CDefError(msg) + + def parse(self, csource, override=False, packed=False, pack=None, + dllexport=False): + if packed: + if packed != True: + raise ValueError("'packed' should be False or True; use " + "'pack' to give another value") + if pack: + raise ValueError("cannot give both 'pack' and 'packed'") + pack = 1 + elif pack: + if pack & (pack - 1): + raise ValueError("'pack' must be a power of two, not %r" % + (pack,)) + else: + pack = 0 + prev_options = self._options + try: + self._options = {'override': override, + 'packed': pack, + 'dllexport': dllexport} + self._internal_parse(csource) + finally: + self._options = prev_options + + def _internal_parse(self, csource): + ast, macros, csource = self._parse(csource) + # add the macros + self._process_macros(macros) + # find the first "__dotdotdot__" and use that as a separator + # between the repeated typedefs and the real csource + iterator = iter(ast.ext) + for decl in iterator: + if decl.name == '__dotdotdot__': + break + else: + assert 0 + current_decl = None + # + try: + self._inside_extern_python = '__cffi_extern_python_stop' + for decl in iterator: + current_decl = decl + if isinstance(decl, pycparser.c_ast.Decl): + self._parse_decl(decl) + elif isinstance(decl, pycparser.c_ast.Typedef): + if not decl.name: + raise CDefError("typedef does not declare any name", + decl) + quals = 0 + if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) and + decl.type.type.names[-1].startswith('__dotdotdot')): + realtype = self._get_unknown_type(decl) + elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and + isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and + isinstance(decl.type.type.type, + pycparser.c_ast.IdentifierType) and + decl.type.type.type.names[-1].startswith('__dotdotdot')): + realtype = self._get_unknown_ptr_type(decl) + else: + realtype, quals = self._get_type_and_quals( + decl.type, name=decl.name, partial_length_ok=True, + typedef_example="*(%s *)0" % (decl.name,)) + self._declare('typedef ' + decl.name, realtype, quals=quals) + elif decl.__class__.__name__ == 'Pragma': + # skip pragma, only in pycparser 2.15 + import warnings + warnings.warn( + "#pragma in cdef() are entirely ignored. " + "They should be removed for now, otherwise your " + "code might behave differently in a future version " + "of CFFI if #pragma support gets added. Note that " + "'#pragma pack' needs to be replaced with the " + "'packed' keyword argument to cdef().") + else: + raise CDefError("unexpected <%s>: this construct is valid " + "C but not valid in cdef()" % + decl.__class__.__name__, decl) + except CDefError as e: + if len(e.args) == 1: + e.args = e.args + (current_decl,) + raise + except FFIError as e: + msg = self._convert_pycparser_error(e, csource) + if msg: + e.args = (e.args[0] + "\n *** Err: %s" % msg,) + raise + + def _add_constants(self, key, val): + if key in self._int_constants: + if self._int_constants[key] == val: + return # ignore identical double declarations + raise FFIError( + "multiple declarations of constant: %s" % (key,)) + self._int_constants[key] = val + + def _add_integer_constant(self, name, int_str): + int_str = int_str.lower().rstrip("ul") + neg = int_str.startswith('-') + if neg: + int_str = int_str[1:] + # "010" is not valid oct in py3 + if (int_str.startswith("0") and int_str != '0' + and not int_str.startswith("0x")): + int_str = "0o" + int_str[1:] + pyvalue = int(int_str, 0) + if neg: + pyvalue = -pyvalue + self._add_constants(name, pyvalue) + self._declare('macro ' + name, pyvalue) + + def _process_macros(self, macros): + for key, value in macros.items(): + value = value.strip() + if _r_int_literal.match(value): + self._add_integer_constant(key, value) + elif value == '...': + self._declare('macro ' + key, value) + else: + raise CDefError( + 'only supports one of the following syntax:\n' + ' #define %s ... (literally dot-dot-dot)\n' + ' #define %s NUMBER (with NUMBER an integer' + ' constant, decimal/hex/octal)\n' + 'got:\n' + ' #define %s %s' + % (key, key, key, value)) + + def _declare_function(self, tp, quals, decl): + tp = self._get_type_pointer(tp, quals) + if self._options.get('dllexport'): + tag = 'dllexport_python ' + elif self._inside_extern_python == '__cffi_extern_python_start': + tag = 'extern_python ' + elif self._inside_extern_python == '__cffi_extern_python_plus_c_start': + tag = 'extern_python_plus_c ' + else: + tag = 'function ' + self._declare(tag + decl.name, tp) + + def _parse_decl(self, decl): + node = decl.type + if isinstance(node, pycparser.c_ast.FuncDecl): + tp, quals = self._get_type_and_quals(node, name=decl.name) + assert isinstance(tp, model.RawFunctionType) + self._declare_function(tp, quals, decl) + else: + if isinstance(node, pycparser.c_ast.Struct): + self._get_struct_union_enum_type('struct', node) + elif isinstance(node, pycparser.c_ast.Union): + self._get_struct_union_enum_type('union', node) + elif isinstance(node, pycparser.c_ast.Enum): + self._get_struct_union_enum_type('enum', node) + elif not decl.name: + raise CDefError("construct does not declare any variable", + decl) + # + if decl.name: + tp, quals = self._get_type_and_quals(node, + partial_length_ok=True) + if tp.is_raw_function: + self._declare_function(tp, quals, decl) + elif (tp.is_integer_type() and + hasattr(decl, 'init') and + hasattr(decl.init, 'value') and + _r_int_literal.match(decl.init.value)): + self._add_integer_constant(decl.name, decl.init.value) + elif (tp.is_integer_type() and + isinstance(decl.init, pycparser.c_ast.UnaryOp) and + decl.init.op == '-' and + hasattr(decl.init.expr, 'value') and + _r_int_literal.match(decl.init.expr.value)): + self._add_integer_constant(decl.name, + '-' + decl.init.expr.value) + elif (tp is model.void_type and + decl.name.startswith('__cffi_extern_python_')): + # hack: `extern "Python"` in the C source is replaced + # with "void __cffi_extern_python_start;" and + # "void __cffi_extern_python_stop;" + self._inside_extern_python = decl.name + else: + if self._inside_extern_python !='__cffi_extern_python_stop': + raise CDefError( + "cannot declare constants or " + "variables with 'extern \"Python\"'") + if (quals & model.Q_CONST) and not tp.is_array_type: + self._declare('constant ' + decl.name, tp, quals=quals) + else: + _warn_for_non_extern_non_static_global_variable(decl) + self._declare('variable ' + decl.name, tp, quals=quals) + + def parse_type(self, cdecl): + return self.parse_type_and_quals(cdecl)[0] + + def parse_type_and_quals(self, cdecl): + ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2] + assert not macros + exprnode = ast.ext[-1].type.args.params[0] + if isinstance(exprnode, pycparser.c_ast.ID): + raise CDefError("unknown identifier '%s'" % (exprnode.name,)) + return self._get_type_and_quals(exprnode.type) + + def _declare(self, name, obj, included=False, quals=0): + if name in self._declarations: + prevobj, prevquals = self._declarations[name] + if prevobj is obj and prevquals == quals: + return + if not self._options.get('override'): + raise FFIError( + "multiple declarations of %s (for interactive usage, " + "try cdef(xx, override=True))" % (name,)) + assert '__dotdotdot__' not in name.split() + self._declarations[name] = (obj, quals) + if included: + self._included_declarations.add(obj) + + def _extract_quals(self, type): + quals = 0 + if isinstance(type, (pycparser.c_ast.TypeDecl, + pycparser.c_ast.PtrDecl)): + if 'const' in type.quals: + quals |= model.Q_CONST + if 'volatile' in type.quals: + quals |= model.Q_VOLATILE + if 'restrict' in type.quals: + quals |= model.Q_RESTRICT + return quals + + def _get_type_pointer(self, type, quals, declname=None): + if isinstance(type, model.RawFunctionType): + return type.as_function_pointer() + if (isinstance(type, model.StructOrUnionOrEnum) and + type.name.startswith('$') and type.name[1:].isdigit() and + type.forcename is None and declname is not None): + return model.NamedPointerType(type, declname, quals) + return model.PointerType(type, quals) + + def _get_type_and_quals(self, typenode, name=None, partial_length_ok=False, + typedef_example=None): + # first, dereference typedefs, if we have it already parsed, we're good + if (isinstance(typenode, pycparser.c_ast.TypeDecl) and + isinstance(typenode.type, pycparser.c_ast.IdentifierType) and + len(typenode.type.names) == 1 and + ('typedef ' + typenode.type.names[0]) in self._declarations): + tp, quals = self._declarations['typedef ' + typenode.type.names[0]] + quals |= self._extract_quals(typenode) + return tp, quals + # + if isinstance(typenode, pycparser.c_ast.ArrayDecl): + # array type + if typenode.dim is None: + length = None + else: + length = self._parse_constant( + typenode.dim, partial_length_ok=partial_length_ok) + # a hack: in 'typedef int foo_t[...][...];', don't use '...' as + # the length but use directly the C expression that would be + # generated by recompiler.py. This lets the typedef be used in + # many more places within recompiler.py + if typedef_example is not None: + if length == '...': + length = '_cffi_array_len(%s)' % (typedef_example,) + typedef_example = "*" + typedef_example + # + tp, quals = self._get_type_and_quals(typenode.type, + partial_length_ok=partial_length_ok, + typedef_example=typedef_example) + return model.ArrayType(tp, length), quals + # + if isinstance(typenode, pycparser.c_ast.PtrDecl): + # pointer type + itemtype, itemquals = self._get_type_and_quals(typenode.type) + tp = self._get_type_pointer(itemtype, itemquals, declname=name) + quals = self._extract_quals(typenode) + return tp, quals + # + if isinstance(typenode, pycparser.c_ast.TypeDecl): + quals = self._extract_quals(typenode) + type = typenode.type + if isinstance(type, pycparser.c_ast.IdentifierType): + # assume a primitive type. get it from .names, but reduce + # synonyms to a single chosen combination + names = list(type.names) + if names != ['signed', 'char']: # keep this unmodified + prefixes = {} + while names: + name = names[0] + if name in ('short', 'long', 'signed', 'unsigned'): + prefixes[name] = prefixes.get(name, 0) + 1 + del names[0] + else: + break + # ignore the 'signed' prefix below, and reorder the others + newnames = [] + for prefix in ('unsigned', 'short', 'long'): + for i in range(prefixes.get(prefix, 0)): + newnames.append(prefix) + if not names: + names = ['int'] # implicitly + if names == ['int']: # but kill it if 'short' or 'long' + if 'short' in prefixes or 'long' in prefixes: + names = [] + names = newnames + names + ident = ' '.join(names) + if ident == 'void': + return model.void_type, quals + if ident == '__dotdotdot__': + raise FFIError(':%d: bad usage of "..."' % + typenode.coord.line) + tp0, quals0 = resolve_common_type(self, ident) + return tp0, (quals | quals0) + # + if isinstance(type, pycparser.c_ast.Struct): + # 'struct foobar' + tp = self._get_struct_union_enum_type('struct', type, name) + return tp, quals + # + if isinstance(type, pycparser.c_ast.Union): + # 'union foobar' + tp = self._get_struct_union_enum_type('union', type, name) + return tp, quals + # + if isinstance(type, pycparser.c_ast.Enum): + # 'enum foobar' + tp = self._get_struct_union_enum_type('enum', type, name) + return tp, quals + # + if isinstance(typenode, pycparser.c_ast.FuncDecl): + # a function type + return self._parse_function_type(typenode, name), 0 + # + # nested anonymous structs or unions end up here + if isinstance(typenode, pycparser.c_ast.Struct): + return self._get_struct_union_enum_type('struct', typenode, name, + nested=True), 0 + if isinstance(typenode, pycparser.c_ast.Union): + return self._get_struct_union_enum_type('union', typenode, name, + nested=True), 0 + # + raise FFIError(":%d: bad or unsupported type declaration" % + typenode.coord.line) + + def _parse_function_type(self, typenode, funcname=None): + params = list(getattr(typenode.args, 'params', [])) + for i, arg in enumerate(params): + if not hasattr(arg, 'type'): + raise CDefError("%s arg %d: unknown type '%s'" + " (if you meant to use the old C syntax of giving" + " untyped arguments, it is not supported)" + % (funcname or 'in expression', i + 1, + getattr(arg, 'name', '?'))) + ellipsis = ( + len(params) > 0 and + isinstance(params[-1].type, pycparser.c_ast.TypeDecl) and + isinstance(params[-1].type.type, + pycparser.c_ast.IdentifierType) and + params[-1].type.type.names == ['__dotdotdot__']) + if ellipsis: + params.pop() + if not params: + raise CDefError( + "%s: a function with only '(...)' as argument" + " is not correct C" % (funcname or 'in expression')) + args = [self._as_func_arg(*self._get_type_and_quals(argdeclnode.type)) + for argdeclnode in params] + if not ellipsis and args == [model.void_type]: + args = [] + result, quals = self._get_type_and_quals(typenode.type) + # the 'quals' on the result type are ignored. HACK: we absure them + # to detect __stdcall functions: we textually replace "__stdcall" + # with "volatile volatile const" above. + abi = None + if hasattr(typenode.type, 'quals'): # else, probable syntax error anyway + if typenode.type.quals[-3:] == ['volatile', 'volatile', 'const']: + abi = '__stdcall' + return model.RawFunctionType(tuple(args), result, ellipsis, abi) + + def _as_func_arg(self, type, quals): + if isinstance(type, model.ArrayType): + return model.PointerType(type.item, quals) + elif isinstance(type, model.RawFunctionType): + return type.as_function_pointer() + else: + return type + + def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): + # First, a level of caching on the exact 'type' node of the AST. + # This is obscure, but needed because pycparser "unrolls" declarations + # such as "typedef struct { } foo_t, *foo_p" and we end up with + # an AST that is not a tree, but a DAG, with the "type" node of the + # two branches foo_t and foo_p of the trees being the same node. + # It's a bit silly but detecting "DAG-ness" in the AST tree seems + # to be the only way to distinguish this case from two independent + # structs. See test_struct_with_two_usages. + try: + return self._structnode2type[type] + except KeyError: + pass + # + # Note that this must handle parsing "struct foo" any number of + # times and always return the same StructType object. Additionally, + # one of these times (not necessarily the first), the fields of + # the struct can be specified with "struct foo { ...fields... }". + # If no name is given, then we have to create a new anonymous struct + # with no caching; in this case, the fields are either specified + # right now or never. + # + force_name = name + name = type.name + # + # get the type or create it if needed + if name is None: + # 'force_name' is used to guess a more readable name for + # anonymous structs, for the common case "typedef struct { } foo". + if force_name is not None: + explicit_name = '$%s' % force_name + else: + self._anonymous_counter += 1 + explicit_name = '$%d' % self._anonymous_counter + tp = None + else: + explicit_name = name + key = '%s %s' % (kind, name) + tp, _ = self._declarations.get(key, (None, None)) + # + if tp is None: + if kind == 'struct': + tp = model.StructType(explicit_name, None, None, None) + elif kind == 'union': + tp = model.UnionType(explicit_name, None, None, None) + elif kind == 'enum': + if explicit_name == '__dotdotdot__': + raise CDefError("Enums cannot be declared with ...") + tp = self._build_enum_type(explicit_name, type.values) + else: + raise AssertionError("kind = %r" % (kind,)) + if name is not None: + self._declare(key, tp) + else: + if kind == 'enum' and type.values is not None: + raise NotImplementedError( + "enum %s: the '{}' declaration should appear on the first " + "time the enum is mentioned, not later" % explicit_name) + if not tp.forcename: + tp.force_the_name(force_name) + if tp.forcename and '$' in tp.name: + self._declare('anonymous %s' % tp.forcename, tp) + # + self._structnode2type[type] = tp + # + # enums: done here + if kind == 'enum': + return tp + # + # is there a 'type.decls'? If yes, then this is the place in the + # C sources that declare the fields. If no, then just return the + # existing type, possibly still incomplete. + if type.decls is None: + return tp + # + if tp.fldnames is not None: + raise CDefError("duplicate declaration of struct %s" % name) + fldnames = [] + fldtypes = [] + fldbitsize = [] + fldquals = [] + for decl in type.decls: + if (isinstance(decl.type, pycparser.c_ast.IdentifierType) and + ''.join(decl.type.names) == '__dotdotdot__'): + # XXX pycparser is inconsistent: 'names' should be a list + # of strings, but is sometimes just one string. Use + # str.join() as a way to cope with both. + self._make_partial(tp, nested) + continue + if decl.bitsize is None: + bitsize = -1 + else: + bitsize = self._parse_constant(decl.bitsize) + self._partial_length = False + type, fqual = self._get_type_and_quals(decl.type, + partial_length_ok=True) + if self._partial_length: + self._make_partial(tp, nested) + if isinstance(type, model.StructType) and type.partial: + self._make_partial(tp, nested) + fldnames.append(decl.name or '') + fldtypes.append(type) + fldbitsize.append(bitsize) + fldquals.append(fqual) + tp.fldnames = tuple(fldnames) + tp.fldtypes = tuple(fldtypes) + tp.fldbitsize = tuple(fldbitsize) + tp.fldquals = tuple(fldquals) + if fldbitsize != [-1] * len(fldbitsize): + if isinstance(tp, model.StructType) and tp.partial: + raise NotImplementedError("%s: using both bitfields and '...;'" + % (tp,)) + tp.packed = self._options.get('packed') + if tp.completed: # must be re-completed: it is not opaque any more + tp.completed = 0 + self._recomplete.append(tp) + return tp + + def _make_partial(self, tp, nested): + if not isinstance(tp, model.StructOrUnion): + raise CDefError("%s cannot be partial" % (tp,)) + if not tp.has_c_name() and not nested: + raise NotImplementedError("%s is partial but has no C name" %(tp,)) + tp.partial = True + + def _parse_constant(self, exprnode, partial_length_ok=False): + # for now, limited to expressions that are an immediate number + # or positive/negative number + if isinstance(exprnode, pycparser.c_ast.Constant): + s = exprnode.value + if '0' <= s[0] <= '9': + s = s.rstrip('uUlL') + try: + if s.startswith('0'): + return int(s, 8) + else: + return int(s, 10) + except ValueError: + if len(s) > 1: + if s.lower()[0:2] == '0x': + return int(s, 16) + elif s.lower()[0:2] == '0b': + return int(s, 2) + raise CDefError("invalid constant %r" % (s,)) + elif s[0] == "'" and s[-1] == "'" and ( + len(s) == 3 or (len(s) == 4 and s[1] == "\\")): + return ord(s[-2]) + else: + raise CDefError("invalid constant %r" % (s,)) + # + if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and + exprnode.op == '+'): + return self._parse_constant(exprnode.expr) + # + if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and + exprnode.op == '-'): + return -self._parse_constant(exprnode.expr) + # load previously defined int constant + if (isinstance(exprnode, pycparser.c_ast.ID) and + exprnode.name in self._int_constants): + return self._int_constants[exprnode.name] + # + if (isinstance(exprnode, pycparser.c_ast.ID) and + exprnode.name == '__dotdotdotarray__'): + if partial_length_ok: + self._partial_length = True + return '...' + raise FFIError(":%d: unsupported '[...]' here, cannot derive " + "the actual array length in this context" + % exprnode.coord.line) + # + if isinstance(exprnode, pycparser.c_ast.BinaryOp): + left = self._parse_constant(exprnode.left) + right = self._parse_constant(exprnode.right) + if exprnode.op == '+': + return left + right + elif exprnode.op == '-': + return left - right + elif exprnode.op == '*': + return left * right + elif exprnode.op == '/': + return self._c_div(left, right) + elif exprnode.op == '%': + return left - self._c_div(left, right) * right + elif exprnode.op == '<<': + return left << right + elif exprnode.op == '>>': + return left >> right + elif exprnode.op == '&': + return left & right + elif exprnode.op == '|': + return left | right + elif exprnode.op == '^': + return left ^ right + # + raise FFIError(":%d: unsupported expression: expected a " + "simple numeric constant" % exprnode.coord.line) + + def _c_div(self, a, b): + result = a // b + if ((a < 0) ^ (b < 0)) and (a % b) != 0: + result += 1 + return result + + def _build_enum_type(self, explicit_name, decls): + if decls is not None: + partial = False + enumerators = [] + enumvalues = [] + nextenumvalue = 0 + for enum in decls.enumerators: + if _r_enum_dotdotdot.match(enum.name): + partial = True + continue + if enum.value is not None: + nextenumvalue = self._parse_constant(enum.value) + enumerators.append(enum.name) + enumvalues.append(nextenumvalue) + self._add_constants(enum.name, nextenumvalue) + nextenumvalue += 1 + enumerators = tuple(enumerators) + enumvalues = tuple(enumvalues) + tp = model.EnumType(explicit_name, enumerators, enumvalues) + tp.partial = partial + else: # opaque enum + tp = model.EnumType(explicit_name, (), ()) + return tp + + def include(self, other): + for name, (tp, quals) in other._declarations.items(): + if name.startswith('anonymous $enum_$'): + continue # fix for test_anonymous_enum_include + kind = name.split(' ', 1)[0] + if kind in ('struct', 'union', 'enum', 'anonymous', 'typedef'): + self._declare(name, tp, included=True, quals=quals) + for k, v in other._int_constants.items(): + self._add_constants(k, v) + + def _get_unknown_type(self, decl): + typenames = decl.type.type.names + if typenames == ['__dotdotdot__']: + return model.unknown_type(decl.name) + + if typenames == ['__dotdotdotint__']: + if self._uses_new_feature is None: + self._uses_new_feature = "'typedef int... %s'" % decl.name + return model.UnknownIntegerType(decl.name) + + if typenames == ['__dotdotdotfloat__']: + # note: not for 'long double' so far + if self._uses_new_feature is None: + self._uses_new_feature = "'typedef float... %s'" % decl.name + return model.UnknownFloatType(decl.name) + + raise FFIError(':%d: unsupported usage of "..." in typedef' + % decl.coord.line) + + def _get_unknown_ptr_type(self, decl): + if decl.type.type.type.names == ['__dotdotdot__']: + return model.unknown_ptr_type(decl.name) + raise FFIError(':%d: unsupported usage of "..." in typedef' + % decl.coord.line) diff --git a/python/cffi/error.py b/python/cffi/error.py new file mode 100644 index 000000000..0a27247c3 --- /dev/null +++ b/python/cffi/error.py @@ -0,0 +1,31 @@ + +class FFIError(Exception): + __module__ = 'cffi' + +class CDefError(Exception): + __module__ = 'cffi' + def __str__(self): + try: + current_decl = self.args[1] + filename = current_decl.coord.file + linenum = current_decl.coord.line + prefix = '%s:%d: ' % (filename, linenum) + except (AttributeError, TypeError, IndexError): + prefix = '' + return '%s%s' % (prefix, self.args[0]) + +class VerificationError(Exception): + """ An error raised when verification fails + """ + __module__ = 'cffi' + +class VerificationMissing(Exception): + """ An error raised when incomplete structures are passed into + cdef, but no verification has been done + """ + __module__ = 'cffi' + +class PkgConfigError(Exception): + """ An error raised for missing modules in pkg-config + """ + __module__ = 'cffi' diff --git a/python/cffi/ffiplatform.py b/python/cffi/ffiplatform.py new file mode 100644 index 000000000..adca28f1a --- /dev/null +++ b/python/cffi/ffiplatform.py @@ -0,0 +1,113 @@ +import sys, os +from .error import VerificationError + + +LIST_OF_FILE_NAMES = ['sources', 'include_dirs', 'library_dirs', + 'extra_objects', 'depends'] + +def get_extension(srcfilename, modname, sources=(), **kwds): + from cffi._shimmed_dist_utils import Extension + allsources = [srcfilename] + for src in sources: + allsources.append(os.path.normpath(src)) + return Extension(name=modname, sources=allsources, **kwds) + +def compile(tmpdir, ext, compiler_verbose=0, debug=None): + """Compile a C extension module using distutils.""" + + saved_environ = os.environ.copy() + try: + outputfilename = _build(tmpdir, ext, compiler_verbose, debug) + outputfilename = os.path.abspath(outputfilename) + finally: + # workaround for a distutils bugs where some env vars can + # become longer and longer every time it is used + for key, value in saved_environ.items(): + if os.environ.get(key) != value: + os.environ[key] = value + return outputfilename + +def _build(tmpdir, ext, compiler_verbose=0, debug=None): + # XXX compact but horrible :-( + from cffi._shimmed_dist_utils import Distribution, CompileError, LinkError, set_threshold, set_verbosity + + dist = Distribution({'ext_modules': [ext]}) + dist.parse_config_files() + options = dist.get_option_dict('build_ext') + if debug is None: + debug = sys.flags.debug + options['debug'] = ('ffiplatform', debug) + options['force'] = ('ffiplatform', True) + options['build_lib'] = ('ffiplatform', tmpdir) + options['build_temp'] = ('ffiplatform', tmpdir) + # + try: + old_level = set_threshold(0) or 0 + try: + set_verbosity(compiler_verbose) + dist.run_command('build_ext') + cmd_obj = dist.get_command_obj('build_ext') + [soname] = cmd_obj.get_outputs() + finally: + set_threshold(old_level) + except (CompileError, LinkError) as e: + raise VerificationError('%s: %s' % (e.__class__.__name__, e)) + # + return soname + +try: + from os.path import samefile +except ImportError: + def samefile(f1, f2): + return os.path.abspath(f1) == os.path.abspath(f2) + +def maybe_relative_path(path): + if not os.path.isabs(path): + return path # already relative + dir = path + names = [] + while True: + prevdir = dir + dir, name = os.path.split(prevdir) + if dir == prevdir or not dir: + return path # failed to make it relative + names.append(name) + try: + if samefile(dir, os.curdir): + names.reverse() + return os.path.join(*names) + except OSError: + pass + +# ____________________________________________________________ + +try: + int_or_long = (int, long) + import cStringIO +except NameError: + int_or_long = int # Python 3 + import io as cStringIO + +def _flatten(x, f): + if isinstance(x, str): + f.write('%ds%s' % (len(x), x)) + elif isinstance(x, dict): + keys = sorted(x.keys()) + f.write('%dd' % len(keys)) + for key in keys: + _flatten(key, f) + _flatten(x[key], f) + elif isinstance(x, (list, tuple)): + f.write('%dl' % len(x)) + for value in x: + _flatten(value, f) + elif isinstance(x, int_or_long): + f.write('%di' % (x,)) + else: + raise TypeError( + "the keywords to verify() contains unsupported object %r" % (x,)) + +def flatten(x): + f = cStringIO.StringIO() + _flatten(x, f) + return f.getvalue() diff --git a/python/cffi/lock.py b/python/cffi/lock.py new file mode 100644 index 000000000..db91b7158 --- /dev/null +++ b/python/cffi/lock.py @@ -0,0 +1,30 @@ +import sys + +if sys.version_info < (3,): + try: + from thread import allocate_lock + except ImportError: + from dummy_thread import allocate_lock +else: + try: + from _thread import allocate_lock + except ImportError: + from _dummy_thread import allocate_lock + + +##import sys +##l1 = allocate_lock + +##class allocate_lock(object): +## def __init__(self): +## self._real = l1() +## def __enter__(self): +## for i in range(4, 0, -1): +## print sys._getframe(i).f_code +## print +## return self._real.__enter__() +## def __exit__(self, *args): +## return self._real.__exit__(*args) +## def acquire(self, f): +## assert f is False +## return self._real.acquire(f) diff --git a/python/cffi/model.py b/python/cffi/model.py new file mode 100644 index 000000000..e5f4cae3e --- /dev/null +++ b/python/cffi/model.py @@ -0,0 +1,618 @@ +import types +import weakref + +from .lock import allocate_lock +from .error import CDefError, VerificationError, VerificationMissing + +# type qualifiers +Q_CONST = 0x01 +Q_RESTRICT = 0x02 +Q_VOLATILE = 0x04 + +def qualify(quals, replace_with): + if quals & Q_CONST: + replace_with = ' const ' + replace_with.lstrip() + if quals & Q_VOLATILE: + replace_with = ' volatile ' + replace_with.lstrip() + if quals & Q_RESTRICT: + # It seems that __restrict is supported by gcc and msvc. + # If you hit some different compiler, add a #define in + # _cffi_include.h for it (and in its copies, documented there) + replace_with = ' __restrict ' + replace_with.lstrip() + return replace_with + + +class BaseTypeByIdentity(object): + is_array_type = False + is_raw_function = False + + def get_c_name(self, replace_with='', context='a C file', quals=0): + result = self.c_name_with_marker + assert result.count('&') == 1 + # some logic duplication with ffi.getctype()... :-( + replace_with = replace_with.strip() + if replace_with: + if replace_with.startswith('*') and '&[' in result: + replace_with = '(%s)' % replace_with + elif not replace_with[0] in '[(': + replace_with = ' ' + replace_with + replace_with = qualify(quals, replace_with) + result = result.replace('&', replace_with) + if '$' in result: + raise VerificationError( + "cannot generate '%s' in %s: unknown type name" + % (self._get_c_name(), context)) + return result + + def _get_c_name(self): + return self.c_name_with_marker.replace('&', '') + + def has_c_name(self): + return '$' not in self._get_c_name() + + def is_integer_type(self): + return False + + def get_cached_btype(self, ffi, finishlist, can_delay=False): + try: + BType = ffi._cached_btypes[self] + except KeyError: + BType = self.build_backend_type(ffi, finishlist) + BType2 = ffi._cached_btypes.setdefault(self, BType) + assert BType2 is BType + return BType + + def __repr__(self): + return '<%s>' % (self._get_c_name(),) + + def _get_items(self): + return [(name, getattr(self, name)) for name in self._attrs_] + + +class BaseType(BaseTypeByIdentity): + + def __eq__(self, other): + return (self.__class__ == other.__class__ and + self._get_items() == other._get_items()) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash((self.__class__, tuple(self._get_items()))) + + +class VoidType(BaseType): + _attrs_ = () + + def __init__(self): + self.c_name_with_marker = 'void&' + + def build_backend_type(self, ffi, finishlist): + return global_cache(self, ffi, 'new_void_type') + +void_type = VoidType() + + +class BasePrimitiveType(BaseType): + def is_complex_type(self): + return False + + +class PrimitiveType(BasePrimitiveType): + _attrs_ = ('name',) + + ALL_PRIMITIVE_TYPES = { + 'char': 'c', + 'short': 'i', + 'int': 'i', + 'long': 'i', + 'long long': 'i', + 'signed char': 'i', + 'unsigned char': 'i', + 'unsigned short': 'i', + 'unsigned int': 'i', + 'unsigned long': 'i', + 'unsigned long long': 'i', + 'float': 'f', + 'double': 'f', + 'long double': 'f', + '_cffi_float_complex_t': 'j', + '_cffi_double_complex_t': 'j', + '_Bool': 'i', + # the following types are not primitive in the C sense + 'wchar_t': 'c', + 'char16_t': 'c', + 'char32_t': 'c', + 'int8_t': 'i', + 'uint8_t': 'i', + 'int16_t': 'i', + 'uint16_t': 'i', + 'int32_t': 'i', + 'uint32_t': 'i', + 'int64_t': 'i', + 'uint64_t': 'i', + 'int_least8_t': 'i', + 'uint_least8_t': 'i', + 'int_least16_t': 'i', + 'uint_least16_t': 'i', + 'int_least32_t': 'i', + 'uint_least32_t': 'i', + 'int_least64_t': 'i', + 'uint_least64_t': 'i', + 'int_fast8_t': 'i', + 'uint_fast8_t': 'i', + 'int_fast16_t': 'i', + 'uint_fast16_t': 'i', + 'int_fast32_t': 'i', + 'uint_fast32_t': 'i', + 'int_fast64_t': 'i', + 'uint_fast64_t': 'i', + 'intptr_t': 'i', + 'uintptr_t': 'i', + 'intmax_t': 'i', + 'uintmax_t': 'i', + 'ptrdiff_t': 'i', + 'size_t': 'i', + 'ssize_t': 'i', + } + + def __init__(self, name): + assert name in self.ALL_PRIMITIVE_TYPES + self.name = name + self.c_name_with_marker = name + '&' + + def is_char_type(self): + return self.ALL_PRIMITIVE_TYPES[self.name] == 'c' + def is_integer_type(self): + return self.ALL_PRIMITIVE_TYPES[self.name] == 'i' + def is_float_type(self): + return self.ALL_PRIMITIVE_TYPES[self.name] == 'f' + def is_complex_type(self): + return self.ALL_PRIMITIVE_TYPES[self.name] == 'j' + + def build_backend_type(self, ffi, finishlist): + return global_cache(self, ffi, 'new_primitive_type', self.name) + + +class UnknownIntegerType(BasePrimitiveType): + _attrs_ = ('name',) + + def __init__(self, name): + self.name = name + self.c_name_with_marker = name + '&' + + def is_integer_type(self): + return True + + def build_backend_type(self, ffi, finishlist): + raise NotImplementedError("integer type '%s' can only be used after " + "compilation" % self.name) + +class UnknownFloatType(BasePrimitiveType): + _attrs_ = ('name', ) + + def __init__(self, name): + self.name = name + self.c_name_with_marker = name + '&' + + def build_backend_type(self, ffi, finishlist): + raise NotImplementedError("float type '%s' can only be used after " + "compilation" % self.name) + + +class BaseFunctionType(BaseType): + _attrs_ = ('args', 'result', 'ellipsis', 'abi') + + def __init__(self, args, result, ellipsis, abi=None): + self.args = args + self.result = result + self.ellipsis = ellipsis + self.abi = abi + # + reprargs = [arg._get_c_name() for arg in self.args] + if self.ellipsis: + reprargs.append('...') + reprargs = reprargs or ['void'] + replace_with = self._base_pattern % (', '.join(reprargs),) + if abi is not None: + replace_with = replace_with[:1] + abi + ' ' + replace_with[1:] + self.c_name_with_marker = ( + self.result.c_name_with_marker.replace('&', replace_with)) + + +class RawFunctionType(BaseFunctionType): + # Corresponds to a C type like 'int(int)', which is the C type of + # a function, but not a pointer-to-function. The backend has no + # notion of such a type; it's used temporarily by parsing. + _base_pattern = '(&)(%s)' + is_raw_function = True + + def build_backend_type(self, ffi, finishlist): + raise CDefError("cannot render the type %r: it is a function " + "type, not a pointer-to-function type" % (self,)) + + def as_function_pointer(self): + return FunctionPtrType(self.args, self.result, self.ellipsis, self.abi) + + +class FunctionPtrType(BaseFunctionType): + _base_pattern = '(*&)(%s)' + + def build_backend_type(self, ffi, finishlist): + result = self.result.get_cached_btype(ffi, finishlist) + args = [] + for tp in self.args: + args.append(tp.get_cached_btype(ffi, finishlist)) + abi_args = () + if self.abi == "__stdcall": + if not self.ellipsis: # __stdcall ignored for variadic funcs + try: + abi_args = (ffi._backend.FFI_STDCALL,) + except AttributeError: + pass + return global_cache(self, ffi, 'new_function_type', + tuple(args), result, self.ellipsis, *abi_args) + + def as_raw_function(self): + return RawFunctionType(self.args, self.result, self.ellipsis, self.abi) + + +class PointerType(BaseType): + _attrs_ = ('totype', 'quals') + + def __init__(self, totype, quals=0): + self.totype = totype + self.quals = quals + extra = " *&" + if totype.is_array_type: + extra = "(%s)" % (extra.lstrip(),) + extra = qualify(quals, extra) + self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra) + + def build_backend_type(self, ffi, finishlist): + BItem = self.totype.get_cached_btype(ffi, finishlist, can_delay=True) + return global_cache(self, ffi, 'new_pointer_type', BItem) + +voidp_type = PointerType(void_type) + +def ConstPointerType(totype): + return PointerType(totype, Q_CONST) + +const_voidp_type = ConstPointerType(void_type) + + +class NamedPointerType(PointerType): + _attrs_ = ('totype', 'name') + + def __init__(self, totype, name, quals=0): + PointerType.__init__(self, totype, quals) + self.name = name + self.c_name_with_marker = name + '&' + + +class ArrayType(BaseType): + _attrs_ = ('item', 'length') + is_array_type = True + + def __init__(self, item, length): + self.item = item + self.length = length + # + if length is None: + brackets = '&[]' + elif length == '...': + brackets = '&[/*...*/]' + else: + brackets = '&[%s]' % length + self.c_name_with_marker = ( + self.item.c_name_with_marker.replace('&', brackets)) + + def length_is_unknown(self): + return isinstance(self.length, str) + + def resolve_length(self, newlength): + return ArrayType(self.item, newlength) + + def build_backend_type(self, ffi, finishlist): + if self.length_is_unknown(): + raise CDefError("cannot render the type %r: unknown length" % + (self,)) + self.item.get_cached_btype(ffi, finishlist) # force the item BType + BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist) + return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length) + +char_array_type = ArrayType(PrimitiveType('char'), None) + + +class StructOrUnionOrEnum(BaseTypeByIdentity): + _attrs_ = ('name',) + forcename = None + + def build_c_name_with_marker(self): + name = self.forcename or '%s %s' % (self.kind, self.name) + self.c_name_with_marker = name + '&' + + def force_the_name(self, forcename): + self.forcename = forcename + self.build_c_name_with_marker() + + def get_official_name(self): + assert self.c_name_with_marker.endswith('&') + return self.c_name_with_marker[:-1] + + +class StructOrUnion(StructOrUnionOrEnum): + fixedlayout = None + completed = 0 + partial = False + packed = 0 + + def __init__(self, name, fldnames, fldtypes, fldbitsize, fldquals=None): + self.name = name + self.fldnames = fldnames + self.fldtypes = fldtypes + self.fldbitsize = fldbitsize + self.fldquals = fldquals + self.build_c_name_with_marker() + + def anonymous_struct_fields(self): + if self.fldtypes is not None: + for name, type in zip(self.fldnames, self.fldtypes): + if name == '' and isinstance(type, StructOrUnion): + yield type + + def enumfields(self, expand_anonymous_struct_union=True): + fldquals = self.fldquals + if fldquals is None: + fldquals = (0,) * len(self.fldnames) + for name, type, bitsize, quals in zip(self.fldnames, self.fldtypes, + self.fldbitsize, fldquals): + if (name == '' and isinstance(type, StructOrUnion) + and expand_anonymous_struct_union): + # nested anonymous struct/union + for result in type.enumfields(): + yield result + else: + yield (name, type, bitsize, quals) + + def force_flatten(self): + # force the struct or union to have a declaration that lists + # directly all fields returned by enumfields(), flattening + # nested anonymous structs/unions. + names = [] + types = [] + bitsizes = [] + fldquals = [] + for name, type, bitsize, quals in self.enumfields(): + names.append(name) + types.append(type) + bitsizes.append(bitsize) + fldquals.append(quals) + self.fldnames = tuple(names) + self.fldtypes = tuple(types) + self.fldbitsize = tuple(bitsizes) + self.fldquals = tuple(fldquals) + + def get_cached_btype(self, ffi, finishlist, can_delay=False): + BType = StructOrUnionOrEnum.get_cached_btype(self, ffi, finishlist, + can_delay) + if not can_delay: + self.finish_backend_type(ffi, finishlist) + return BType + + def finish_backend_type(self, ffi, finishlist): + if self.completed: + if self.completed != 2: + raise NotImplementedError("recursive structure declaration " + "for '%s'" % (self.name,)) + return + BType = ffi._cached_btypes[self] + # + self.completed = 1 + # + if self.fldtypes is None: + pass # not completing it: it's an opaque struct + # + elif self.fixedlayout is None: + fldtypes = [tp.get_cached_btype(ffi, finishlist) + for tp in self.fldtypes] + lst = list(zip(self.fldnames, fldtypes, self.fldbitsize)) + extra_flags = () + if self.packed: + if self.packed == 1: + extra_flags = (8,) # SF_PACKED + else: + extra_flags = (0, self.packed) + ffi._backend.complete_struct_or_union(BType, lst, self, + -1, -1, *extra_flags) + # + else: + fldtypes = [] + fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout + for i in range(len(self.fldnames)): + fsize = fieldsize[i] + ftype = self.fldtypes[i] + # + if isinstance(ftype, ArrayType) and ftype.length_is_unknown(): + # fix the length to match the total size + BItemType = ftype.item.get_cached_btype(ffi, finishlist) + nlen, nrest = divmod(fsize, ffi.sizeof(BItemType)) + if nrest != 0: + self._verification_error( + "field '%s.%s' has a bogus size?" % ( + self.name, self.fldnames[i] or '{}')) + ftype = ftype.resolve_length(nlen) + self.fldtypes = (self.fldtypes[:i] + (ftype,) + + self.fldtypes[i+1:]) + # + BFieldType = ftype.get_cached_btype(ffi, finishlist) + if isinstance(ftype, ArrayType) and ftype.length is None: + assert fsize == 0 + else: + bitemsize = ffi.sizeof(BFieldType) + if bitemsize != fsize: + self._verification_error( + "field '%s.%s' is declared as %d bytes, but is " + "really %d bytes" % (self.name, + self.fldnames[i] or '{}', + bitemsize, fsize)) + fldtypes.append(BFieldType) + # + lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs)) + ffi._backend.complete_struct_or_union(BType, lst, self, + totalsize, totalalignment) + self.completed = 2 + + def _verification_error(self, msg): + raise VerificationError(msg) + + def check_not_partial(self): + if self.partial and self.fixedlayout is None: + raise VerificationMissing(self._get_c_name()) + + def build_backend_type(self, ffi, finishlist): + self.check_not_partial() + finishlist.append(self) + # + return global_cache(self, ffi, 'new_%s_type' % self.kind, + self.get_official_name(), key=self) + + +class StructType(StructOrUnion): + kind = 'struct' + + +class UnionType(StructOrUnion): + kind = 'union' + + +class EnumType(StructOrUnionOrEnum): + kind = 'enum' + partial = False + partial_resolved = False + + def __init__(self, name, enumerators, enumvalues, baseinttype=None): + self.name = name + self.enumerators = enumerators + self.enumvalues = enumvalues + self.baseinttype = baseinttype + self.build_c_name_with_marker() + + def force_the_name(self, forcename): + StructOrUnionOrEnum.force_the_name(self, forcename) + if self.forcename is None: + name = self.get_official_name() + self.forcename = '$' + name.replace(' ', '_') + + def check_not_partial(self): + if self.partial and not self.partial_resolved: + raise VerificationMissing(self._get_c_name()) + + def build_backend_type(self, ffi, finishlist): + self.check_not_partial() + base_btype = self.build_baseinttype(ffi, finishlist) + return global_cache(self, ffi, 'new_enum_type', + self.get_official_name(), + self.enumerators, self.enumvalues, + base_btype, key=self) + + def build_baseinttype(self, ffi, finishlist): + if self.baseinttype is not None: + return self.baseinttype.get_cached_btype(ffi, finishlist) + # + if self.enumvalues: + smallest_value = min(self.enumvalues) + largest_value = max(self.enumvalues) + else: + import warnings + try: + # XXX! The goal is to ensure that the warnings.warn() + # will not suppress the warning. We want to get it + # several times if we reach this point several times. + __warningregistry__.clear() + except NameError: + pass + warnings.warn("%r has no values explicitly defined; " + "guessing that it is equivalent to 'unsigned int'" + % self._get_c_name()) + smallest_value = largest_value = 0 + if smallest_value < 0: # needs a signed type + sign = 1 + candidate1 = PrimitiveType("int") + candidate2 = PrimitiveType("long") + else: + sign = 0 + candidate1 = PrimitiveType("unsigned int") + candidate2 = PrimitiveType("unsigned long") + btype1 = candidate1.get_cached_btype(ffi, finishlist) + btype2 = candidate2.get_cached_btype(ffi, finishlist) + size1 = ffi.sizeof(btype1) + size2 = ffi.sizeof(btype2) + if (smallest_value >= ((-1) << (8*size1-1)) and + largest_value < (1 << (8*size1-sign))): + return btype1 + if (smallest_value >= ((-1) << (8*size2-1)) and + largest_value < (1 << (8*size2-sign))): + return btype2 + raise CDefError("%s values don't all fit into either 'long' " + "or 'unsigned long'" % self._get_c_name()) + +def unknown_type(name, structname=None): + if structname is None: + structname = '$%s' % name + tp = StructType(structname, None, None, None) + tp.force_the_name(name) + tp.origin = "unknown_type" + return tp + +def unknown_ptr_type(name, structname=None): + if structname is None: + structname = '$$%s' % name + tp = StructType(structname, None, None, None) + return NamedPointerType(tp, name) + + +global_lock = allocate_lock() +_typecache_cffi_backend = weakref.WeakValueDictionary() + +def get_typecache(backend): + # returns _typecache_cffi_backend if backend is the _cffi_backend + # module, or type(backend).__typecache if backend is an instance of + # CTypesBackend (or some FakeBackend class during tests) + if isinstance(backend, types.ModuleType): + return _typecache_cffi_backend + with global_lock: + if not hasattr(type(backend), '__typecache'): + type(backend).__typecache = weakref.WeakValueDictionary() + return type(backend).__typecache + +def global_cache(srctype, ffi, funcname, *args, **kwds): + key = kwds.pop('key', (funcname, args)) + assert not kwds + try: + return ffi._typecache[key] + except KeyError: + pass + try: + res = getattr(ffi._backend, funcname)(*args) + except NotImplementedError as e: + raise NotImplementedError("%s: %r: %s" % (funcname, srctype, e)) + # note that setdefault() on WeakValueDictionary is not atomic + # and contains a rare bug (http://bugs.python.org/issue19542); + # we have to use a lock and do it ourselves + cache = ffi._typecache + with global_lock: + res1 = cache.get(key) + if res1 is None: + cache[key] = res + return res + else: + return res1 + +def pointer_cache(ffi, BType): + return global_cache('?', ffi, 'new_pointer_type', BType) + +def attach_exception_info(e, name): + if e.args and type(e.args[0]) is str: + e.args = ('%s: %s' % (name, e.args[0]),) + e.args[1:] diff --git a/python/cffi/parse_c_type.h b/python/cffi/parse_c_type.h new file mode 100644 index 000000000..84e4ef856 --- /dev/null +++ b/python/cffi/parse_c_type.h @@ -0,0 +1,181 @@ + +/* This part is from file 'cffi/parse_c_type.h'. It is copied at the + beginning of C sources generated by CFFI's ffi.set_source(). */ + +typedef void *_cffi_opcode_t; + +#define _CFFI_OP(opcode, arg) (_cffi_opcode_t)(opcode | (((uintptr_t)(arg)) << 8)) +#define _CFFI_GETOP(cffi_opcode) ((unsigned char)(uintptr_t)cffi_opcode) +#define _CFFI_GETARG(cffi_opcode) (((intptr_t)cffi_opcode) >> 8) + +#define _CFFI_OP_PRIMITIVE 1 +#define _CFFI_OP_POINTER 3 +#define _CFFI_OP_ARRAY 5 +#define _CFFI_OP_OPEN_ARRAY 7 +#define _CFFI_OP_STRUCT_UNION 9 +#define _CFFI_OP_ENUM 11 +#define _CFFI_OP_FUNCTION 13 +#define _CFFI_OP_FUNCTION_END 15 +#define _CFFI_OP_NOOP 17 +#define _CFFI_OP_BITFIELD 19 +#define _CFFI_OP_TYPENAME 21 +#define _CFFI_OP_CPYTHON_BLTN_V 23 // varargs +#define _CFFI_OP_CPYTHON_BLTN_N 25 // noargs +#define _CFFI_OP_CPYTHON_BLTN_O 27 // O (i.e. a single arg) +#define _CFFI_OP_CONSTANT 29 +#define _CFFI_OP_CONSTANT_INT 31 +#define _CFFI_OP_GLOBAL_VAR 33 +#define _CFFI_OP_DLOPEN_FUNC 35 +#define _CFFI_OP_DLOPEN_CONST 37 +#define _CFFI_OP_GLOBAL_VAR_F 39 +#define _CFFI_OP_EXTERN_PYTHON 41 + +#define _CFFI_PRIM_VOID 0 +#define _CFFI_PRIM_BOOL 1 +#define _CFFI_PRIM_CHAR 2 +#define _CFFI_PRIM_SCHAR 3 +#define _CFFI_PRIM_UCHAR 4 +#define _CFFI_PRIM_SHORT 5 +#define _CFFI_PRIM_USHORT 6 +#define _CFFI_PRIM_INT 7 +#define _CFFI_PRIM_UINT 8 +#define _CFFI_PRIM_LONG 9 +#define _CFFI_PRIM_ULONG 10 +#define _CFFI_PRIM_LONGLONG 11 +#define _CFFI_PRIM_ULONGLONG 12 +#define _CFFI_PRIM_FLOAT 13 +#define _CFFI_PRIM_DOUBLE 14 +#define _CFFI_PRIM_LONGDOUBLE 15 + +#define _CFFI_PRIM_WCHAR 16 +#define _CFFI_PRIM_INT8 17 +#define _CFFI_PRIM_UINT8 18 +#define _CFFI_PRIM_INT16 19 +#define _CFFI_PRIM_UINT16 20 +#define _CFFI_PRIM_INT32 21 +#define _CFFI_PRIM_UINT32 22 +#define _CFFI_PRIM_INT64 23 +#define _CFFI_PRIM_UINT64 24 +#define _CFFI_PRIM_INTPTR 25 +#define _CFFI_PRIM_UINTPTR 26 +#define _CFFI_PRIM_PTRDIFF 27 +#define _CFFI_PRIM_SIZE 28 +#define _CFFI_PRIM_SSIZE 29 +#define _CFFI_PRIM_INT_LEAST8 30 +#define _CFFI_PRIM_UINT_LEAST8 31 +#define _CFFI_PRIM_INT_LEAST16 32 +#define _CFFI_PRIM_UINT_LEAST16 33 +#define _CFFI_PRIM_INT_LEAST32 34 +#define _CFFI_PRIM_UINT_LEAST32 35 +#define _CFFI_PRIM_INT_LEAST64 36 +#define _CFFI_PRIM_UINT_LEAST64 37 +#define _CFFI_PRIM_INT_FAST8 38 +#define _CFFI_PRIM_UINT_FAST8 39 +#define _CFFI_PRIM_INT_FAST16 40 +#define _CFFI_PRIM_UINT_FAST16 41 +#define _CFFI_PRIM_INT_FAST32 42 +#define _CFFI_PRIM_UINT_FAST32 43 +#define _CFFI_PRIM_INT_FAST64 44 +#define _CFFI_PRIM_UINT_FAST64 45 +#define _CFFI_PRIM_INTMAX 46 +#define _CFFI_PRIM_UINTMAX 47 +#define _CFFI_PRIM_FLOATCOMPLEX 48 +#define _CFFI_PRIM_DOUBLECOMPLEX 49 +#define _CFFI_PRIM_CHAR16 50 +#define _CFFI_PRIM_CHAR32 51 + +#define _CFFI__NUM_PRIM 52 +#define _CFFI__UNKNOWN_PRIM (-1) +#define _CFFI__UNKNOWN_FLOAT_PRIM (-2) +#define _CFFI__UNKNOWN_LONG_DOUBLE (-3) + +#define _CFFI__IO_FILE_STRUCT (-1) + + +struct _cffi_global_s { + const char *name; + void *address; + _cffi_opcode_t type_op; + void *size_or_direct_fn; // OP_GLOBAL_VAR: size, or 0 if unknown + // OP_CPYTHON_BLTN_*: addr of direct function +}; + +struct _cffi_getconst_s { + unsigned long long value; + const struct _cffi_type_context_s *ctx; + int gindex; +}; + +struct _cffi_struct_union_s { + const char *name; + int type_index; // -> _cffi_types, on a OP_STRUCT_UNION + int flags; // _CFFI_F_* flags below + size_t size; + int alignment; + int first_field_index; // -> _cffi_fields array + int num_fields; +}; +#define _CFFI_F_UNION 0x01 // is a union, not a struct +#define _CFFI_F_CHECK_FIELDS 0x02 // complain if fields are not in the + // "standard layout" or if some are missing +#define _CFFI_F_PACKED 0x04 // for CHECK_FIELDS, assume a packed struct +#define _CFFI_F_EXTERNAL 0x08 // in some other ffi.include() +#define _CFFI_F_OPAQUE 0x10 // opaque + +struct _cffi_field_s { + const char *name; + size_t field_offset; + size_t field_size; + _cffi_opcode_t field_type_op; +}; + +struct _cffi_enum_s { + const char *name; + int type_index; // -> _cffi_types, on a OP_ENUM + int type_prim; // _CFFI_PRIM_xxx + const char *enumerators; // comma-delimited string +}; + +struct _cffi_typename_s { + const char *name; + int type_index; /* if opaque, points to a possibly artificial + OP_STRUCT which is itself opaque */ +}; + +struct _cffi_type_context_s { + _cffi_opcode_t *types; + const struct _cffi_global_s *globals; + const struct _cffi_field_s *fields; + const struct _cffi_struct_union_s *struct_unions; + const struct _cffi_enum_s *enums; + const struct _cffi_typename_s *typenames; + int num_globals; + int num_struct_unions; + int num_enums; + int num_typenames; + const char *const *includes; + int num_types; + int flags; /* future extension */ +}; + +struct _cffi_parse_info_s { + const struct _cffi_type_context_s *ctx; + _cffi_opcode_t *output; + unsigned int output_size; + size_t error_location; + const char *error_message; +}; + +struct _cffi_externpy_s { + const char *name; + size_t size_of_result; + void *reserved1, *reserved2; +}; + +#ifdef _CFFI_INTERNAL +static int parse_c_type(struct _cffi_parse_info_s *info, const char *input); +static int search_in_globals(const struct _cffi_type_context_s *ctx, + const char *search, size_t search_len); +static int search_in_struct_unions(const struct _cffi_type_context_s *ctx, + const char *search, size_t search_len); +#endif diff --git a/python/cffi/pkgconfig.py b/python/cffi/pkgconfig.py new file mode 100644 index 000000000..5c93f15a6 --- /dev/null +++ b/python/cffi/pkgconfig.py @@ -0,0 +1,121 @@ +# pkg-config, https://www.freedesktop.org/wiki/Software/pkg-config/ integration for cffi +import sys, os, subprocess + +from .error import PkgConfigError + + +def merge_flags(cfg1, cfg2): + """Merge values from cffi config flags cfg2 to cf1 + + Example: + merge_flags({"libraries": ["one"]}, {"libraries": ["two"]}) + {"libraries": ["one", "two"]} + """ + for key, value in cfg2.items(): + if key not in cfg1: + cfg1[key] = value + else: + if not isinstance(cfg1[key], list): + raise TypeError("cfg1[%r] should be a list of strings" % (key,)) + if not isinstance(value, list): + raise TypeError("cfg2[%r] should be a list of strings" % (key,)) + cfg1[key].extend(value) + return cfg1 + + +def call(libname, flag, encoding=sys.getfilesystemencoding()): + """Calls pkg-config and returns the output if found + """ + a = ["pkg-config", "--print-errors"] + a.append(flag) + a.append(libname) + try: + pc = subprocess.Popen(a, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + except EnvironmentError as e: + raise PkgConfigError("cannot run pkg-config: %s" % (str(e).strip(),)) + + bout, berr = pc.communicate() + if pc.returncode != 0: + try: + berr = berr.decode(encoding) + except Exception: + pass + raise PkgConfigError(berr.strip()) + + if sys.version_info >= (3,) and not isinstance(bout, str): # Python 3.x + try: + bout = bout.decode(encoding) + except UnicodeDecodeError: + raise PkgConfigError("pkg-config %s %s returned bytes that cannot " + "be decoded with encoding %r:\n%r" % + (flag, libname, encoding, bout)) + + if os.altsep != '\\' and '\\' in bout: + raise PkgConfigError("pkg-config %s %s returned an unsupported " + "backslash-escaped output:\n%r" % + (flag, libname, bout)) + return bout + + +def flags_from_pkgconfig(libs): + r"""Return compiler line flags for FFI.set_source based on pkg-config output + + Usage + ... + ffibuilder.set_source("_foo", pkgconfig = ["libfoo", "libbar >= 1.8.3"]) + + If pkg-config is installed on build machine, then arguments include_dirs, + library_dirs, libraries, define_macros, extra_compile_args and + extra_link_args are extended with an output of pkg-config for libfoo and + libbar. + + Raises PkgConfigError in case the pkg-config call fails. + """ + + def get_include_dirs(string): + return [x[2:] for x in string.split() if x.startswith("-I")] + + def get_library_dirs(string): + return [x[2:] for x in string.split() if x.startswith("-L")] + + def get_libraries(string): + return [x[2:] for x in string.split() if x.startswith("-l")] + + # convert -Dfoo=bar to list of tuples [("foo", "bar")] expected by distutils + def get_macros(string): + def _macro(x): + x = x[2:] # drop "-D" + if '=' in x: + return tuple(x.split("=", 1)) # "-Dfoo=bar" => ("foo", "bar") + else: + return (x, None) # "-Dfoo" => ("foo", None) + return [_macro(x) for x in string.split() if x.startswith("-D")] + + def get_other_cflags(string): + return [x for x in string.split() if not x.startswith("-I") and + not x.startswith("-D")] + + def get_other_libs(string): + return [x for x in string.split() if not x.startswith("-L") and + not x.startswith("-l")] + + # return kwargs for given libname + def kwargs(libname): + fse = sys.getfilesystemencoding() + all_cflags = call(libname, "--cflags") + all_libs = call(libname, "--libs") + return { + "include_dirs": get_include_dirs(all_cflags), + "library_dirs": get_library_dirs(all_libs), + "libraries": get_libraries(all_libs), + "define_macros": get_macros(all_cflags), + "extra_compile_args": get_other_cflags(all_cflags), + "extra_link_args": get_other_libs(all_libs), + } + + # merge all arguments together + ret = {} + for libname in libs: + lib_flags = kwargs(libname) + merge_flags(ret, lib_flags) + return ret diff --git a/python/cffi/recompiler.py b/python/cffi/recompiler.py new file mode 100644 index 000000000..7734a3486 --- /dev/null +++ b/python/cffi/recompiler.py @@ -0,0 +1,1598 @@ +import io, os, sys, sysconfig +from . import ffiplatform, model +from .error import VerificationError +from .cffi_opcode import * + +VERSION_BASE = 0x2601 +VERSION_EMBEDDED = 0x2701 +VERSION_CHAR16CHAR32 = 0x2801 + +USE_LIMITED_API = ((sys.platform != 'win32' or sys.version_info < (3, 0) or + sys.version_info >= (3, 5)) and + not sysconfig.get_config_var("Py_GIL_DISABLED")) # free-threaded doesn't yet support limited API + +class GlobalExpr: + def __init__(self, name, address, type_op, size=0, check_value=0): + self.name = name + self.address = address + self.type_op = type_op + self.size = size + self.check_value = check_value + + def as_c_expr(self): + return ' { "%s", (void *)%s, %s, (void *)%s },' % ( + self.name, self.address, self.type_op.as_c_expr(), self.size) + + def as_python_expr(self): + return "b'%s%s',%d" % (self.type_op.as_python_bytes(), self.name, + self.check_value) + +class FieldExpr: + def __init__(self, name, field_offset, field_size, fbitsize, field_type_op): + self.name = name + self.field_offset = field_offset + self.field_size = field_size + self.fbitsize = fbitsize + self.field_type_op = field_type_op + + def as_c_expr(self): + spaces = " " * len(self.name) + return (' { "%s", %s,\n' % (self.name, self.field_offset) + + ' %s %s,\n' % (spaces, self.field_size) + + ' %s %s },' % (spaces, self.field_type_op.as_c_expr())) + + def as_python_expr(self): + raise NotImplementedError + + def as_field_python_expr(self): + if self.field_type_op.op == OP_NOOP: + size_expr = '' + elif self.field_type_op.op == OP_BITFIELD: + size_expr = format_four_bytes(self.fbitsize) + else: + raise NotImplementedError + return "b'%s%s%s'" % (self.field_type_op.as_python_bytes(), + size_expr, + self.name) + +class StructUnionExpr: + def __init__(self, name, type_index, flags, size, alignment, comment, + first_field_index, c_fields): + self.name = name + self.type_index = type_index + self.flags = flags + self.size = size + self.alignment = alignment + self.comment = comment + self.first_field_index = first_field_index + self.c_fields = c_fields + + def as_c_expr(self): + return (' { "%s", %d, %s,' % (self.name, self.type_index, self.flags) + + '\n %s, %s, ' % (self.size, self.alignment) + + '%d, %d ' % (self.first_field_index, len(self.c_fields)) + + ('/* %s */ ' % self.comment if self.comment else '') + + '},') + + def as_python_expr(self): + flags = eval(self.flags, G_FLAGS) + fields_expr = [c_field.as_field_python_expr() + for c_field in self.c_fields] + return "(b'%s%s%s',%s)" % ( + format_four_bytes(self.type_index), + format_four_bytes(flags), + self.name, + ','.join(fields_expr)) + +class EnumExpr: + def __init__(self, name, type_index, size, signed, allenums): + self.name = name + self.type_index = type_index + self.size = size + self.signed = signed + self.allenums = allenums + + def as_c_expr(self): + return (' { "%s", %d, _cffi_prim_int(%s, %s),\n' + ' "%s" },' % (self.name, self.type_index, + self.size, self.signed, self.allenums)) + + def as_python_expr(self): + prim_index = { + (1, 0): PRIM_UINT8, (1, 1): PRIM_INT8, + (2, 0): PRIM_UINT16, (2, 1): PRIM_INT16, + (4, 0): PRIM_UINT32, (4, 1): PRIM_INT32, + (8, 0): PRIM_UINT64, (8, 1): PRIM_INT64, + }[self.size, self.signed] + return "b'%s%s%s\\x00%s'" % (format_four_bytes(self.type_index), + format_four_bytes(prim_index), + self.name, self.allenums) + +class TypenameExpr: + def __init__(self, name, type_index): + self.name = name + self.type_index = type_index + + def as_c_expr(self): + return ' { "%s", %d },' % (self.name, self.type_index) + + def as_python_expr(self): + return "b'%s%s'" % (format_four_bytes(self.type_index), self.name) + + +# ____________________________________________________________ + + +class Recompiler: + _num_externpy = 0 + + def __init__(self, ffi, module_name, target_is_python=False): + self.ffi = ffi + self.module_name = module_name + self.target_is_python = target_is_python + self._version = VERSION_BASE + + def needs_version(self, ver): + self._version = max(self._version, ver) + + def collect_type_table(self): + self._typesdict = {} + self._generate("collecttype") + # + all_decls = sorted(self._typesdict, key=str) + # + # prepare all FUNCTION bytecode sequences first + self.cffi_types = [] + for tp in all_decls: + if tp.is_raw_function: + assert self._typesdict[tp] is None + self._typesdict[tp] = len(self.cffi_types) + self.cffi_types.append(tp) # placeholder + for tp1 in tp.args: + assert isinstance(tp1, (model.VoidType, + model.BasePrimitiveType, + model.PointerType, + model.StructOrUnionOrEnum, + model.FunctionPtrType)) + if self._typesdict[tp1] is None: + self._typesdict[tp1] = len(self.cffi_types) + self.cffi_types.append(tp1) # placeholder + self.cffi_types.append('END') # placeholder + # + # prepare all OTHER bytecode sequences + for tp in all_decls: + if not tp.is_raw_function and self._typesdict[tp] is None: + self._typesdict[tp] = len(self.cffi_types) + self.cffi_types.append(tp) # placeholder + if tp.is_array_type and tp.length is not None: + self.cffi_types.append('LEN') # placeholder + assert None not in self._typesdict.values() + # + # collect all structs and unions and enums + self._struct_unions = {} + self._enums = {} + for tp in all_decls: + if isinstance(tp, model.StructOrUnion): + self._struct_unions[tp] = None + elif isinstance(tp, model.EnumType): + self._enums[tp] = None + for i, tp in enumerate(sorted(self._struct_unions, + key=lambda tp: tp.name)): + self._struct_unions[tp] = i + for i, tp in enumerate(sorted(self._enums, + key=lambda tp: tp.name)): + self._enums[tp] = i + # + # emit all bytecode sequences now + for tp in all_decls: + method = getattr(self, '_emit_bytecode_' + tp.__class__.__name__) + method(tp, self._typesdict[tp]) + # + # consistency check + for op in self.cffi_types: + assert isinstance(op, CffiOp) + self.cffi_types = tuple(self.cffi_types) # don't change any more + + def _enum_fields(self, tp): + # When producing C, expand all anonymous struct/union fields. + # That's necessary to have C code checking the offsets of the + # individual fields contained in them. When producing Python, + # don't do it and instead write it like it is, with the + # corresponding fields having an empty name. Empty names are + # recognized at runtime when we import the generated Python + # file. + expand_anonymous_struct_union = not self.target_is_python + return tp.enumfields(expand_anonymous_struct_union) + + def _do_collect_type(self, tp): + if not isinstance(tp, model.BaseTypeByIdentity): + if isinstance(tp, tuple): + for x in tp: + self._do_collect_type(x) + return + if tp not in self._typesdict: + self._typesdict[tp] = None + if isinstance(tp, model.FunctionPtrType): + self._do_collect_type(tp.as_raw_function()) + elif isinstance(tp, model.StructOrUnion): + if tp.fldtypes is not None and ( + tp not in self.ffi._parser._included_declarations): + for name1, tp1, _, _ in self._enum_fields(tp): + self._do_collect_type(self._field_type(tp, name1, tp1)) + else: + for _, x in tp._get_items(): + self._do_collect_type(x) + + def _generate(self, step_name): + lst = self.ffi._parser._declarations.items() + for name, (tp, quals) in sorted(lst): + kind, realname = name.split(' ', 1) + try: + method = getattr(self, '_generate_cpy_%s_%s' % (kind, + step_name)) + except AttributeError: + raise VerificationError( + "not implemented in recompile(): %r" % name) + try: + self._current_quals = quals + method(tp, realname) + except Exception as e: + model.attach_exception_info(e, name) + raise + + # ---------- + + ALL_STEPS = ["global", "field", "struct_union", "enum", "typename"] + + def collect_step_tables(self): + # collect the declarations for '_cffi_globals', '_cffi_typenames', etc. + self._lsts = {} + for step_name in self.ALL_STEPS: + self._lsts[step_name] = [] + self._seen_struct_unions = set() + self._generate("ctx") + self._add_missing_struct_unions() + # + for step_name in self.ALL_STEPS: + lst = self._lsts[step_name] + if step_name != "field": + lst.sort(key=lambda entry: entry.name) + self._lsts[step_name] = tuple(lst) # don't change any more + # + # check for a possible internal inconsistency: _cffi_struct_unions + # should have been generated with exactly self._struct_unions + lst = self._lsts["struct_union"] + for tp, i in self._struct_unions.items(): + assert i < len(lst) + assert lst[i].name == tp.name + assert len(lst) == len(self._struct_unions) + # same with enums + lst = self._lsts["enum"] + for tp, i in self._enums.items(): + assert i < len(lst) + assert lst[i].name == tp.name + assert len(lst) == len(self._enums) + + # ---------- + + def _prnt(self, what=''): + self._f.write(what + '\n') + + def write_source_to_f(self, f, preamble): + if self.target_is_python: + assert preamble is None + self.write_py_source_to_f(f) + else: + assert preamble is not None + self.write_c_source_to_f(f, preamble) + + def _rel_readlines(self, filename): + g = open(os.path.join(os.path.dirname(__file__), filename), 'r') + lines = g.readlines() + g.close() + return lines + + def write_c_source_to_f(self, f, preamble): + self._f = f + prnt = self._prnt + if self.ffi._embedding is not None: + prnt('#define _CFFI_USE_EMBEDDING') + if not USE_LIMITED_API: + prnt('#define _CFFI_NO_LIMITED_API') + # + # first the '#include' (actually done by inlining the file's content) + lines = self._rel_readlines('_cffi_include.h') + i = lines.index('#include "parse_c_type.h"\n') + lines[i:i+1] = self._rel_readlines('parse_c_type.h') + prnt(''.join(lines)) + # + # if we have ffi._embedding != None, we give it here as a macro + # and include an extra file + base_module_name = self.module_name.split('.')[-1] + if self.ffi._embedding is not None: + prnt('#define _CFFI_MODULE_NAME "%s"' % (self.module_name,)) + prnt('static const char _CFFI_PYTHON_STARTUP_CODE[] = {') + self._print_string_literal_in_array(self.ffi._embedding) + prnt('0 };') + prnt('#ifdef PYPY_VERSION') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC _cffi_pypyinit_%s' % ( + base_module_name,)) + prnt('#elif PY_MAJOR_VERSION >= 3') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC PyInit_%s' % ( + base_module_name,)) + prnt('#else') + prnt('# define _CFFI_PYTHON_STARTUP_FUNC init%s' % ( + base_module_name,)) + prnt('#endif') + lines = self._rel_readlines('_embedding.h') + i = lines.index('#include "_cffi_errors.h"\n') + lines[i:i+1] = self._rel_readlines('_cffi_errors.h') + prnt(''.join(lines)) + self.needs_version(VERSION_EMBEDDED) + # + # then paste the C source given by the user, verbatim. + prnt('/************************************************************/') + prnt() + prnt(preamble) + prnt() + prnt('/************************************************************/') + prnt() + # + # the declaration of '_cffi_types' + prnt('static void *_cffi_types[] = {') + typeindex2type = dict([(i, tp) for (tp, i) in self._typesdict.items()]) + for i, op in enumerate(self.cffi_types): + comment = '' + if i in typeindex2type: + comment = ' // ' + typeindex2type[i]._get_c_name() + prnt('/* %2d */ %s,%s' % (i, op.as_c_expr(), comment)) + if not self.cffi_types: + prnt(' 0') + prnt('};') + prnt() + # + # call generate_cpy_xxx_decl(), for every xxx found from + # ffi._parser._declarations. This generates all the functions. + self._seen_constants = set() + self._generate("decl") + # + # the declaration of '_cffi_globals' and '_cffi_typenames' + nums = {} + for step_name in self.ALL_STEPS: + lst = self._lsts[step_name] + nums[step_name] = len(lst) + if nums[step_name] > 0: + prnt('static const struct _cffi_%s_s _cffi_%ss[] = {' % ( + step_name, step_name)) + for entry in lst: + prnt(entry.as_c_expr()) + prnt('};') + prnt() + # + # the declaration of '_cffi_includes' + if self.ffi._included_ffis: + prnt('static const char * const _cffi_includes[] = {') + for ffi_to_include in self.ffi._included_ffis: + try: + included_module_name, included_source = ( + ffi_to_include._assigned_source[:2]) + except AttributeError: + raise VerificationError( + "ffi object %r includes %r, but the latter has not " + "been prepared with set_source()" % ( + self.ffi, ffi_to_include,)) + if included_source is None: + raise VerificationError( + "not implemented yet: ffi.include() of a Python-based " + "ffi inside a C-based ffi") + prnt(' "%s",' % (included_module_name,)) + prnt(' NULL') + prnt('};') + prnt() + # + # the declaration of '_cffi_type_context' + prnt('static const struct _cffi_type_context_s _cffi_type_context = {') + prnt(' _cffi_types,') + for step_name in self.ALL_STEPS: + if nums[step_name] > 0: + prnt(' _cffi_%ss,' % step_name) + else: + prnt(' NULL, /* no %ss */' % step_name) + for step_name in self.ALL_STEPS: + if step_name != "field": + prnt(' %d, /* num_%ss */' % (nums[step_name], step_name)) + if self.ffi._included_ffis: + prnt(' _cffi_includes,') + else: + prnt(' NULL, /* no includes */') + prnt(' %d, /* num_types */' % (len(self.cffi_types),)) + flags = 0 + if self._num_externpy > 0 or self.ffi._embedding is not None: + flags |= 1 # set to mean that we use extern "Python" + prnt(' %d, /* flags */' % flags) + prnt('};') + prnt() + # + # the init function + prnt('#ifdef __GNUC__') + prnt('# pragma GCC visibility push(default) /* for -fvisibility= */') + prnt('#endif') + prnt() + prnt('#ifdef PYPY_VERSION') + prnt('PyMODINIT_FUNC') + prnt('_cffi_pypyinit_%s(const void *p[])' % (base_module_name,)) + prnt('{') + if flags & 1: + prnt(' if (((intptr_t)p[0]) >= 0x0A03) {') + prnt(' _cffi_call_python_org = ' + '(void(*)(struct _cffi_externpy_s *, char *))p[1];') + prnt(' }') + prnt(' p[0] = (const void *)0x%x;' % self._version) + prnt(' p[1] = &_cffi_type_context;') + prnt('#if PY_MAJOR_VERSION >= 3') + prnt(' return NULL;') + prnt('#endif') + prnt('}') + # on Windows, distutils insists on putting init_cffi_xyz in + # 'export_symbols', so instead of fighting it, just give up and + # give it one + prnt('# ifdef _MSC_VER') + prnt(' PyMODINIT_FUNC') + prnt('# if PY_MAJOR_VERSION >= 3') + prnt(' PyInit_%s(void) { return NULL; }' % (base_module_name,)) + prnt('# else') + prnt(' init%s(void) { }' % (base_module_name,)) + prnt('# endif') + prnt('# endif') + prnt('#elif PY_MAJOR_VERSION >= 3') + prnt('PyMODINIT_FUNC') + prnt('PyInit_%s(void)' % (base_module_name,)) + prnt('{') + prnt(' return _cffi_init("%s", 0x%x, &_cffi_type_context);' % ( + self.module_name, self._version)) + prnt('}') + prnt('#else') + prnt('PyMODINIT_FUNC') + prnt('init%s(void)' % (base_module_name,)) + prnt('{') + prnt(' _cffi_init("%s", 0x%x, &_cffi_type_context);' % ( + self.module_name, self._version)) + prnt('}') + prnt('#endif') + prnt() + prnt('#ifdef __GNUC__') + prnt('# pragma GCC visibility pop') + prnt('#endif') + self._version = None + + def _to_py(self, x): + if isinstance(x, str): + return "b'%s'" % (x,) + if isinstance(x, (list, tuple)): + rep = [self._to_py(item) for item in x] + if len(rep) == 1: + rep.append('') + return "(%s)" % (','.join(rep),) + return x.as_python_expr() # Py2: unicode unexpected; Py3: bytes unexp. + + def write_py_source_to_f(self, f): + self._f = f + prnt = self._prnt + # + # header + prnt("# auto-generated file") + prnt("import _cffi_backend") + # + # the 'import' of the included ffis + num_includes = len(self.ffi._included_ffis or ()) + for i in range(num_includes): + ffi_to_include = self.ffi._included_ffis[i] + try: + included_module_name, included_source = ( + ffi_to_include._assigned_source[:2]) + except AttributeError: + raise VerificationError( + "ffi object %r includes %r, but the latter has not " + "been prepared with set_source()" % ( + self.ffi, ffi_to_include,)) + if included_source is not None: + raise VerificationError( + "not implemented yet: ffi.include() of a C-based " + "ffi inside a Python-based ffi") + prnt('from %s import ffi as _ffi%d' % (included_module_name, i)) + prnt() + prnt("ffi = _cffi_backend.FFI('%s'," % (self.module_name,)) + prnt(" _version = 0x%x," % (self._version,)) + self._version = None + # + # the '_types' keyword argument + self.cffi_types = tuple(self.cffi_types) # don't change any more + types_lst = [op.as_python_bytes() for op in self.cffi_types] + prnt(' _types = %s,' % (self._to_py(''.join(types_lst)),)) + typeindex2type = dict([(i, tp) for (tp, i) in self._typesdict.items()]) + # + # the keyword arguments from ALL_STEPS + for step_name in self.ALL_STEPS: + lst = self._lsts[step_name] + if len(lst) > 0 and step_name != "field": + prnt(' _%ss = %s,' % (step_name, self._to_py(lst))) + # + # the '_includes' keyword argument + if num_includes > 0: + prnt(' _includes = (%s,),' % ( + ', '.join(['_ffi%d' % i for i in range(num_includes)]),)) + # + # the footer + prnt(')') + + # ---------- + + def _gettypenum(self, type): + # a KeyError here is a bug. please report it! :-) + return self._typesdict[type] + + def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode): + extraarg = '' + if isinstance(tp, model.BasePrimitiveType) and not tp.is_complex_type(): + if tp.is_integer_type() and tp.name != '_Bool': + converter = '_cffi_to_c_int' + extraarg = ', %s' % tp.name + elif isinstance(tp, model.UnknownFloatType): + # don't check with is_float_type(): it may be a 'long + # double' here, and _cffi_to_c_double would loose precision + converter = '(%s)_cffi_to_c_double' % (tp.get_c_name(''),) + else: + cname = tp.get_c_name('') + converter = '(%s)_cffi_to_c_%s' % (cname, + tp.name.replace(' ', '_')) + if cname in ('char16_t', 'char32_t'): + self.needs_version(VERSION_CHAR16CHAR32) + errvalue = '-1' + # + elif isinstance(tp, model.PointerType): + self._convert_funcarg_to_c_ptr_or_array(tp, fromvar, + tovar, errcode) + return + # + elif (isinstance(tp, model.StructOrUnionOrEnum) or + isinstance(tp, model.BasePrimitiveType)): + # a struct (not a struct pointer) as a function argument; + # or, a complex (the same code works) + self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)' + % (tovar, self._gettypenum(tp), fromvar)) + self._prnt(' %s;' % errcode) + return + # + elif isinstance(tp, model.FunctionPtrType): + converter = '(%s)_cffi_to_c_pointer' % tp.get_c_name('') + extraarg = ', _cffi_type(%d)' % self._gettypenum(tp) + errvalue = 'NULL' + # + else: + raise NotImplementedError(tp) + # + self._prnt(' %s = %s(%s%s);' % (tovar, converter, fromvar, extraarg)) + self._prnt(' if (%s == (%s)%s && PyErr_Occurred())' % ( + tovar, tp.get_c_name(''), errvalue)) + self._prnt(' %s;' % errcode) + + def _extra_local_variables(self, tp, localvars, freelines): + if isinstance(tp, model.PointerType): + localvars.add('Py_ssize_t datasize') + localvars.add('struct _cffi_freeme_s *large_args_free = NULL') + freelines.add('if (large_args_free != NULL)' + ' _cffi_free_array_arguments(large_args_free);') + + def _convert_funcarg_to_c_ptr_or_array(self, tp, fromvar, tovar, errcode): + self._prnt(' datasize = _cffi_prepare_pointer_call_argument(') + self._prnt(' _cffi_type(%d), %s, (char **)&%s);' % ( + self._gettypenum(tp), fromvar, tovar)) + self._prnt(' if (datasize != 0) {') + self._prnt(' %s = ((size_t)datasize) <= 640 ? ' + '(%s)alloca((size_t)datasize) : NULL;' % ( + tovar, tp.get_c_name(''))) + self._prnt(' if (_cffi_convert_array_argument(_cffi_type(%d), %s, ' + '(char **)&%s,' % (self._gettypenum(tp), fromvar, tovar)) + self._prnt(' datasize, &large_args_free) < 0)') + self._prnt(' %s;' % errcode) + self._prnt(' }') + + def _convert_expr_from_c(self, tp, var, context): + if isinstance(tp, model.BasePrimitiveType): + if tp.is_integer_type() and tp.name != '_Bool': + return '_cffi_from_c_int(%s, %s)' % (var, tp.name) + elif isinstance(tp, model.UnknownFloatType): + return '_cffi_from_c_double(%s)' % (var,) + elif tp.name != 'long double' and not tp.is_complex_type(): + cname = tp.name.replace(' ', '_') + if cname in ('char16_t', 'char32_t'): + self.needs_version(VERSION_CHAR16CHAR32) + return '_cffi_from_c_%s(%s)' % (cname, var) + else: + return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % ( + var, self._gettypenum(tp)) + elif isinstance(tp, (model.PointerType, model.FunctionPtrType)): + return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( + var, self._gettypenum(tp)) + elif isinstance(tp, model.ArrayType): + return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( + var, self._gettypenum(model.PointerType(tp.item))) + elif isinstance(tp, model.StructOrUnion): + if tp.fldnames is None: + raise TypeError("'%s' is used as %s, but is opaque" % ( + tp._get_c_name(), context)) + return '_cffi_from_c_struct((char *)&%s, _cffi_type(%d))' % ( + var, self._gettypenum(tp)) + elif isinstance(tp, model.EnumType): + return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % ( + var, self._gettypenum(tp)) + else: + raise NotImplementedError(tp) + + # ---------- + # typedefs + + def _typedef_type(self, tp, name): + return self._global_type(tp, "(*(%s *)0)" % (name,)) + + def _generate_cpy_typedef_collecttype(self, tp, name): + self._do_collect_type(self._typedef_type(tp, name)) + + def _generate_cpy_typedef_decl(self, tp, name): + pass + + def _typedef_ctx(self, tp, name): + type_index = self._typesdict[tp] + self._lsts["typename"].append(TypenameExpr(name, type_index)) + + def _generate_cpy_typedef_ctx(self, tp, name): + tp = self._typedef_type(tp, name) + self._typedef_ctx(tp, name) + if getattr(tp, "origin", None) == "unknown_type": + self._struct_ctx(tp, tp.name, approxname=None) + elif isinstance(tp, model.NamedPointerType): + self._struct_ctx(tp.totype, tp.totype.name, approxname=tp.name, + named_ptr=tp) + + # ---------- + # function declarations + + def _generate_cpy_function_collecttype(self, tp, name): + self._do_collect_type(tp.as_raw_function()) + if tp.ellipsis and not self.target_is_python: + self._do_collect_type(tp) + + def _generate_cpy_function_decl(self, tp, name): + assert not self.target_is_python + assert isinstance(tp, model.FunctionPtrType) + if tp.ellipsis: + # cannot support vararg functions better than this: check for its + # exact type (including the fixed arguments), and build it as a + # constant function pointer (no CPython wrapper) + self._generate_cpy_constant_decl(tp, name) + return + prnt = self._prnt + numargs = len(tp.args) + if numargs == 0: + argname = 'noarg' + elif numargs == 1: + argname = 'arg0' + else: + argname = 'args' + # + # ------------------------------ + # the 'd' version of the function, only for addressof(lib, 'func') + arguments = [] + call_arguments = [] + context = 'argument of %s' % name + for i, type in enumerate(tp.args): + arguments.append(type.get_c_name(' x%d' % i, context)) + call_arguments.append('x%d' % i) + repr_arguments = ', '.join(arguments) + repr_arguments = repr_arguments or 'void' + if tp.abi: + abi = tp.abi + ' ' + else: + abi = '' + name_and_arguments = '%s_cffi_d_%s(%s)' % (abi, name, repr_arguments) + prnt('static %s' % (tp.result.get_c_name(name_and_arguments),)) + prnt('{') + call_arguments = ', '.join(call_arguments) + result_code = 'return ' + if isinstance(tp.result, model.VoidType): + result_code = '' + prnt(' %s%s(%s);' % (result_code, name, call_arguments)) + prnt('}') + # + prnt('#ifndef PYPY_VERSION') # ------------------------------ + # + prnt('static PyObject *') + prnt('_cffi_f_%s(PyObject *self, PyObject *%s)' % (name, argname)) + prnt('{') + # + context = 'argument of %s' % name + for i, type in enumerate(tp.args): + arg = type.get_c_name(' x%d' % i, context) + prnt(' %s;' % arg) + # + localvars = set() + freelines = set() + for type in tp.args: + self._extra_local_variables(type, localvars, freelines) + for decl in sorted(localvars): + prnt(' %s;' % (decl,)) + # + if not isinstance(tp.result, model.VoidType): + result_code = 'result = ' + context = 'result of %s' % name + result_decl = ' %s;' % tp.result.get_c_name(' result', context) + prnt(result_decl) + prnt(' PyObject *pyresult;') + else: + result_decl = None + result_code = '' + # + if len(tp.args) > 1: + rng = range(len(tp.args)) + for i in rng: + prnt(' PyObject *arg%d;' % i) + prnt() + prnt(' if (!PyArg_UnpackTuple(args, "%s", %d, %d, %s))' % ( + name, len(rng), len(rng), + ', '.join(['&arg%d' % i for i in rng]))) + prnt(' return NULL;') + prnt() + # + for i, type in enumerate(tp.args): + self._convert_funcarg_to_c(type, 'arg%d' % i, 'x%d' % i, + 'return NULL') + prnt() + # + prnt(' Py_BEGIN_ALLOW_THREADS') + prnt(' _cffi_restore_errno();') + call_arguments = ['x%d' % i for i in range(len(tp.args))] + call_arguments = ', '.join(call_arguments) + prnt(' { %s%s(%s); }' % (result_code, name, call_arguments)) + prnt(' _cffi_save_errno();') + prnt(' Py_END_ALLOW_THREADS') + prnt() + # + prnt(' (void)self; /* unused */') + if numargs == 0: + prnt(' (void)noarg; /* unused */') + if result_code: + prnt(' pyresult = %s;' % + self._convert_expr_from_c(tp.result, 'result', 'result type')) + for freeline in freelines: + prnt(' ' + freeline) + prnt(' return pyresult;') + else: + for freeline in freelines: + prnt(' ' + freeline) + prnt(' Py_INCREF(Py_None);') + prnt(' return Py_None;') + prnt('}') + # + prnt('#else') # ------------------------------ + # + # the PyPy version: need to replace struct/union arguments with + # pointers, and if the result is a struct/union, insert a first + # arg that is a pointer to the result. We also do that for + # complex args and return type. + def need_indirection(type): + return (isinstance(type, model.StructOrUnion) or + (isinstance(type, model.PrimitiveType) and + type.is_complex_type())) + difference = False + arguments = [] + call_arguments = [] + context = 'argument of %s' % name + for i, type in enumerate(tp.args): + indirection = '' + if need_indirection(type): + indirection = '*' + difference = True + arg = type.get_c_name(' %sx%d' % (indirection, i), context) + arguments.append(arg) + call_arguments.append('%sx%d' % (indirection, i)) + tp_result = tp.result + if need_indirection(tp_result): + context = 'result of %s' % name + arg = tp_result.get_c_name(' *result', context) + arguments.insert(0, arg) + tp_result = model.void_type + result_decl = None + result_code = '*result = ' + difference = True + if difference: + repr_arguments = ', '.join(arguments) + repr_arguments = repr_arguments or 'void' + name_and_arguments = '%s_cffi_f_%s(%s)' % (abi, name, + repr_arguments) + prnt('static %s' % (tp_result.get_c_name(name_and_arguments),)) + prnt('{') + if result_decl: + prnt(result_decl) + call_arguments = ', '.join(call_arguments) + prnt(' { %s%s(%s); }' % (result_code, name, call_arguments)) + if result_decl: + prnt(' return result;') + prnt('}') + else: + prnt('# define _cffi_f_%s _cffi_d_%s' % (name, name)) + # + prnt('#endif') # ------------------------------ + prnt() + + def _generate_cpy_function_ctx(self, tp, name): + if tp.ellipsis and not self.target_is_python: + self._generate_cpy_constant_ctx(tp, name) + return + type_index = self._typesdict[tp.as_raw_function()] + numargs = len(tp.args) + if self.target_is_python: + meth_kind = OP_DLOPEN_FUNC + elif numargs == 0: + meth_kind = OP_CPYTHON_BLTN_N # 'METH_NOARGS' + elif numargs == 1: + meth_kind = OP_CPYTHON_BLTN_O # 'METH_O' + else: + meth_kind = OP_CPYTHON_BLTN_V # 'METH_VARARGS' + self._lsts["global"].append( + GlobalExpr(name, '_cffi_f_%s' % name, + CffiOp(meth_kind, type_index), + size='_cffi_d_%s' % name)) + + # ---------- + # named structs or unions + + def _field_type(self, tp_struct, field_name, tp_field): + if isinstance(tp_field, model.ArrayType): + actual_length = tp_field.length + if actual_length == '...': + ptr_struct_name = tp_struct.get_c_name('*') + actual_length = '_cffi_array_len(((%s)0)->%s)' % ( + ptr_struct_name, field_name) + tp_item = self._field_type(tp_struct, '%s[0]' % field_name, + tp_field.item) + tp_field = model.ArrayType(tp_item, actual_length) + return tp_field + + def _struct_collecttype(self, tp): + self._do_collect_type(tp) + if self.target_is_python: + # also requires nested anon struct/unions in ABI mode, recursively + for fldtype in tp.anonymous_struct_fields(): + self._struct_collecttype(fldtype) + + def _struct_decl(self, tp, cname, approxname): + if tp.fldtypes is None: + return + prnt = self._prnt + checkfuncname = '_cffi_checkfld_%s' % (approxname,) + prnt('_CFFI_UNUSED_FN') + prnt('static void %s(%s *p)' % (checkfuncname, cname)) + prnt('{') + prnt(' /* only to generate compile-time warnings or errors */') + prnt(' (void)p;') + for fname, ftype, fbitsize, fqual in self._enum_fields(tp): + try: + if ftype.is_integer_type() or fbitsize >= 0: + # accept all integers, but complain on float or double + if fname != '': + prnt(" (void)((p->%s) | 0); /* check that '%s.%s' is " + "an integer */" % (fname, cname, fname)) + continue + # only accept exactly the type declared, except that '[]' + # is interpreted as a '*' and so will match any array length. + # (It would also match '*', but that's harder to detect...) + while (isinstance(ftype, model.ArrayType) + and (ftype.length is None or ftype.length == '...')): + ftype = ftype.item + fname = fname + '[0]' + prnt(' { %s = &p->%s; (void)tmp; }' % ( + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) + except VerificationError as e: + prnt(' /* %s */' % str(e)) # cannot verify it, ignore + prnt('}') + prnt('struct _cffi_align_%s { char x; %s y; };' % (approxname, cname)) + prnt() + + def _struct_ctx(self, tp, cname, approxname, named_ptr=None): + type_index = self._typesdict[tp] + reason_for_not_expanding = None + flags = [] + if isinstance(tp, model.UnionType): + flags.append("_CFFI_F_UNION") + if tp.fldtypes is None: + flags.append("_CFFI_F_OPAQUE") + reason_for_not_expanding = "opaque" + if (tp not in self.ffi._parser._included_declarations and + (named_ptr is None or + named_ptr not in self.ffi._parser._included_declarations)): + if tp.fldtypes is None: + pass # opaque + elif tp.partial or any(tp.anonymous_struct_fields()): + pass # field layout obtained silently from the C compiler + else: + flags.append("_CFFI_F_CHECK_FIELDS") + if tp.packed: + if tp.packed > 1: + raise NotImplementedError( + "%r is declared with 'pack=%r'; only 0 or 1 are " + "supported in API mode (try to use \"...;\", which " + "does not require a 'pack' declaration)" % + (tp, tp.packed)) + flags.append("_CFFI_F_PACKED") + else: + flags.append("_CFFI_F_EXTERNAL") + reason_for_not_expanding = "external" + flags = '|'.join(flags) or '0' + c_fields = [] + if reason_for_not_expanding is None: + enumfields = list(self._enum_fields(tp)) + for fldname, fldtype, fbitsize, fqual in enumfields: + fldtype = self._field_type(tp, fldname, fldtype) + self._check_not_opaque(fldtype, + "field '%s.%s'" % (tp.name, fldname)) + # cname is None for _add_missing_struct_unions() only + op = OP_NOOP + if fbitsize >= 0: + op = OP_BITFIELD + size = '%d /* bits */' % fbitsize + elif cname is None or ( + isinstance(fldtype, model.ArrayType) and + fldtype.length is None): + size = '(size_t)-1' + else: + size = 'sizeof(((%s)0)->%s)' % ( + tp.get_c_name('*') if named_ptr is None + else named_ptr.name, + fldname) + if cname is None or fbitsize >= 0: + offset = '(size_t)-1' + elif named_ptr is not None: + offset = '(size_t)(((char *)&((%s)4096)->%s) - (char *)4096)' % ( + named_ptr.name, fldname) + else: + offset = 'offsetof(%s, %s)' % (tp.get_c_name(''), fldname) + c_fields.append( + FieldExpr(fldname, offset, size, fbitsize, + CffiOp(op, self._typesdict[fldtype]))) + first_field_index = len(self._lsts["field"]) + self._lsts["field"].extend(c_fields) + # + if cname is None: # unknown name, for _add_missing_struct_unions + size = '(size_t)-2' + align = -2 + comment = "unnamed" + else: + if named_ptr is not None: + size = 'sizeof(*(%s)0)' % (named_ptr.name,) + align = '-1 /* unknown alignment */' + else: + size = 'sizeof(%s)' % (cname,) + align = 'offsetof(struct _cffi_align_%s, y)' % (approxname,) + comment = None + else: + size = '(size_t)-1' + align = -1 + first_field_index = -1 + comment = reason_for_not_expanding + self._lsts["struct_union"].append( + StructUnionExpr(tp.name, type_index, flags, size, align, comment, + first_field_index, c_fields)) + self._seen_struct_unions.add(tp) + + def _check_not_opaque(self, tp, location): + while isinstance(tp, model.ArrayType): + tp = tp.item + if isinstance(tp, model.StructOrUnion) and tp.fldtypes is None: + raise TypeError( + "%s is of an opaque type (not declared in cdef())" % location) + + def _add_missing_struct_unions(self): + # not very nice, but some struct declarations might be missing + # because they don't have any known C name. Check that they are + # not partial (we can't complete or verify them!) and emit them + # anonymously. + lst = list(self._struct_unions.items()) + lst.sort(key=lambda tp_order: tp_order[1]) + for tp, order in lst: + if tp not in self._seen_struct_unions: + if tp.partial: + raise NotImplementedError("internal inconsistency: %r is " + "partial but was not seen at " + "this point" % (tp,)) + if tp.name.startswith('$') and tp.name[1:].isdigit(): + approxname = tp.name[1:] + elif tp.name == '_IO_FILE' and tp.forcename == 'FILE': + approxname = 'FILE' + self._typedef_ctx(tp, 'FILE') + else: + raise NotImplementedError("internal inconsistency: %r" % + (tp,)) + self._struct_ctx(tp, None, approxname) + + def _generate_cpy_struct_collecttype(self, tp, name): + self._struct_collecttype(tp) + _generate_cpy_union_collecttype = _generate_cpy_struct_collecttype + + def _struct_names(self, tp): + cname = tp.get_c_name('') + if ' ' in cname: + return cname, cname.replace(' ', '_') + else: + return cname, '_' + cname + + def _generate_cpy_struct_decl(self, tp, name): + self._struct_decl(tp, *self._struct_names(tp)) + _generate_cpy_union_decl = _generate_cpy_struct_decl + + def _generate_cpy_struct_ctx(self, tp, name): + self._struct_ctx(tp, *self._struct_names(tp)) + _generate_cpy_union_ctx = _generate_cpy_struct_ctx + + # ---------- + # 'anonymous' declarations. These are produced for anonymous structs + # or unions; the 'name' is obtained by a typedef. + + def _generate_cpy_anonymous_collecttype(self, tp, name): + if isinstance(tp, model.EnumType): + self._generate_cpy_enum_collecttype(tp, name) + else: + self._struct_collecttype(tp) + + def _generate_cpy_anonymous_decl(self, tp, name): + if isinstance(tp, model.EnumType): + self._generate_cpy_enum_decl(tp) + else: + self._struct_decl(tp, name, 'typedef_' + name) + + def _generate_cpy_anonymous_ctx(self, tp, name): + if isinstance(tp, model.EnumType): + self._enum_ctx(tp, name) + else: + self._struct_ctx(tp, name, 'typedef_' + name) + + # ---------- + # constants, declared with "static const ..." + + def _generate_cpy_const(self, is_int, name, tp=None, category='const', + check_value=None): + if (category, name) in self._seen_constants: + raise VerificationError( + "duplicate declaration of %s '%s'" % (category, name)) + self._seen_constants.add((category, name)) + # + prnt = self._prnt + funcname = '_cffi_%s_%s' % (category, name) + if is_int: + prnt('static int %s(unsigned long long *o)' % funcname) + prnt('{') + prnt(' int n = (%s) <= 0;' % (name,)) + prnt(' *o = (unsigned long long)((%s) | 0);' + ' /* check that %s is an integer */' % (name, name)) + if check_value is not None: + if check_value > 0: + check_value = '%dU' % (check_value,) + prnt(' if (!_cffi_check_int(*o, n, %s))' % (check_value,)) + prnt(' n |= 2;') + prnt(' return n;') + prnt('}') + else: + assert check_value is None + prnt('static void %s(char *o)' % funcname) + prnt('{') + prnt(' *(%s)o = %s;' % (tp.get_c_name('*'), name)) + prnt('}') + prnt() + + def _generate_cpy_constant_collecttype(self, tp, name): + is_int = tp.is_integer_type() + if not is_int or self.target_is_python: + self._do_collect_type(tp) + + def _generate_cpy_constant_decl(self, tp, name): + is_int = tp.is_integer_type() + self._generate_cpy_const(is_int, name, tp) + + def _generate_cpy_constant_ctx(self, tp, name): + if not self.target_is_python and tp.is_integer_type(): + type_op = CffiOp(OP_CONSTANT_INT, -1) + else: + if self.target_is_python: + const_kind = OP_DLOPEN_CONST + else: + const_kind = OP_CONSTANT + type_index = self._typesdict[tp] + type_op = CffiOp(const_kind, type_index) + self._lsts["global"].append( + GlobalExpr(name, '_cffi_const_%s' % name, type_op)) + + # ---------- + # enums + + def _generate_cpy_enum_collecttype(self, tp, name): + self._do_collect_type(tp) + + def _generate_cpy_enum_decl(self, tp, name=None): + for enumerator in tp.enumerators: + self._generate_cpy_const(True, enumerator) + + def _enum_ctx(self, tp, cname): + type_index = self._typesdict[tp] + type_op = CffiOp(OP_ENUM, -1) + if self.target_is_python: + tp.check_not_partial() + for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): + self._lsts["global"].append( + GlobalExpr(enumerator, '_cffi_const_%s' % enumerator, type_op, + check_value=enumvalue)) + # + if cname is not None and '$' not in cname and not self.target_is_python: + size = "sizeof(%s)" % cname + signed = "((%s)-1) <= 0" % cname + else: + basetp = tp.build_baseinttype(self.ffi, []) + size = self.ffi.sizeof(basetp) + signed = int(int(self.ffi.cast(basetp, -1)) < 0) + allenums = ",".join(tp.enumerators) + self._lsts["enum"].append( + EnumExpr(tp.name, type_index, size, signed, allenums)) + + def _generate_cpy_enum_ctx(self, tp, name): + self._enum_ctx(tp, tp._get_c_name()) + + # ---------- + # macros: for now only for integers + + def _generate_cpy_macro_collecttype(self, tp, name): + pass + + def _generate_cpy_macro_decl(self, tp, name): + if tp == '...': + check_value = None + else: + check_value = tp # an integer + self._generate_cpy_const(True, name, check_value=check_value) + + def _generate_cpy_macro_ctx(self, tp, name): + if tp == '...': + if self.target_is_python: + raise VerificationError( + "cannot use the syntax '...' in '#define %s ...' when " + "using the ABI mode" % (name,)) + check_value = None + else: + check_value = tp # an integer + type_op = CffiOp(OP_CONSTANT_INT, -1) + self._lsts["global"].append( + GlobalExpr(name, '_cffi_const_%s' % name, type_op, + check_value=check_value)) + + # ---------- + # global variables + + def _global_type(self, tp, global_name): + if isinstance(tp, model.ArrayType): + actual_length = tp.length + if actual_length == '...': + actual_length = '_cffi_array_len(%s)' % (global_name,) + tp_item = self._global_type(tp.item, '%s[0]' % global_name) + tp = model.ArrayType(tp_item, actual_length) + return tp + + def _generate_cpy_variable_collecttype(self, tp, name): + self._do_collect_type(self._global_type(tp, name)) + + def _generate_cpy_variable_decl(self, tp, name): + prnt = self._prnt + tp = self._global_type(tp, name) + if isinstance(tp, model.ArrayType) and tp.length is None: + tp = tp.item + ampersand = '' + else: + ampersand = '&' + # This code assumes that casts from "tp *" to "void *" is a + # no-op, i.e. a function that returns a "tp *" can be called + # as if it returned a "void *". This should be generally true + # on any modern machine. The only exception to that rule (on + # uncommon architectures, and as far as I can tell) might be + # if 'tp' were a function type, but that is not possible here. + # (If 'tp' is a function _pointer_ type, then casts from "fn_t + # **" to "void *" are again no-ops, as far as I can tell.) + decl = '*_cffi_var_%s(void)' % (name,) + prnt('static ' + tp.get_c_name(decl, quals=self._current_quals)) + prnt('{') + prnt(' return %s(%s);' % (ampersand, name)) + prnt('}') + prnt() + + def _generate_cpy_variable_ctx(self, tp, name): + tp = self._global_type(tp, name) + type_index = self._typesdict[tp] + if self.target_is_python: + op = OP_GLOBAL_VAR + else: + op = OP_GLOBAL_VAR_F + self._lsts["global"].append( + GlobalExpr(name, '_cffi_var_%s' % name, CffiOp(op, type_index))) + + # ---------- + # extern "Python" + + def _generate_cpy_extern_python_collecttype(self, tp, name): + assert isinstance(tp, model.FunctionPtrType) + self._do_collect_type(tp) + _generate_cpy_dllexport_python_collecttype = \ + _generate_cpy_extern_python_plus_c_collecttype = \ + _generate_cpy_extern_python_collecttype + + def _extern_python_decl(self, tp, name, tag_and_space): + prnt = self._prnt + if isinstance(tp.result, model.VoidType): + size_of_result = '0' + else: + context = 'result of %s' % name + size_of_result = '(int)sizeof(%s)' % ( + tp.result.get_c_name('', context),) + prnt('static struct _cffi_externpy_s _cffi_externpy__%s =' % name) + prnt(' { "%s.%s", %s, 0, 0 };' % ( + self.module_name, name, size_of_result)) + prnt() + # + arguments = [] + context = 'argument of %s' % name + for i, type in enumerate(tp.args): + arg = type.get_c_name(' a%d' % i, context) + arguments.append(arg) + # + repr_arguments = ', '.join(arguments) + repr_arguments = repr_arguments or 'void' + name_and_arguments = '%s(%s)' % (name, repr_arguments) + if tp.abi == "__stdcall": + name_and_arguments = '_cffi_stdcall ' + name_and_arguments + # + def may_need_128_bits(tp): + return (isinstance(tp, model.PrimitiveType) and + tp.name == 'long double') + # + size_of_a = max(len(tp.args)*8, 8) + if may_need_128_bits(tp.result): + size_of_a = max(size_of_a, 16) + if isinstance(tp.result, model.StructOrUnion): + size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % ( + tp.result.get_c_name(''), size_of_a, + tp.result.get_c_name(''), size_of_a) + prnt('%s%s' % (tag_and_space, tp.result.get_c_name(name_and_arguments))) + prnt('{') + prnt(' char a[%s];' % size_of_a) + prnt(' char *p = a;') + for i, type in enumerate(tp.args): + arg = 'a%d' % i + if (isinstance(type, model.StructOrUnion) or + may_need_128_bits(type)): + arg = '&' + arg + type = model.PointerType(type) + prnt(' *(%s)(p + %d) = %s;' % (type.get_c_name('*'), i*8, arg)) + prnt(' _cffi_call_python(&_cffi_externpy__%s, p);' % name) + if not isinstance(tp.result, model.VoidType): + prnt(' return *(%s)p;' % (tp.result.get_c_name('*'),)) + prnt('}') + prnt() + self._num_externpy += 1 + + def _generate_cpy_extern_python_decl(self, tp, name): + self._extern_python_decl(tp, name, 'static ') + + def _generate_cpy_dllexport_python_decl(self, tp, name): + self._extern_python_decl(tp, name, 'CFFI_DLLEXPORT ') + + def _generate_cpy_extern_python_plus_c_decl(self, tp, name): + self._extern_python_decl(tp, name, '') + + def _generate_cpy_extern_python_ctx(self, tp, name): + if self.target_is_python: + raise VerificationError( + "cannot use 'extern \"Python\"' in the ABI mode") + if tp.ellipsis: + raise NotImplementedError("a vararg function is extern \"Python\"") + type_index = self._typesdict[tp] + type_op = CffiOp(OP_EXTERN_PYTHON, type_index) + self._lsts["global"].append( + GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name)) + + _generate_cpy_dllexport_python_ctx = \ + _generate_cpy_extern_python_plus_c_ctx = \ + _generate_cpy_extern_python_ctx + + def _print_string_literal_in_array(self, s): + prnt = self._prnt + prnt('// # NB. this is not a string because of a size limit in MSVC') + if not isinstance(s, bytes): # unicode + s = s.encode('utf-8') # -> bytes + else: + s.decode('utf-8') # got bytes, check for valid utf-8 + try: + s.decode('ascii') + except UnicodeDecodeError: + s = b'# -*- encoding: utf8 -*-\n' + s + for line in s.splitlines(True): + comment = line + if type('//') is bytes: # python2 + line = map(ord, line) # make a list of integers + else: # python3 + # type(line) is bytes, which enumerates like a list of integers + comment = ascii(comment)[1:-1] + prnt(('// ' + comment).rstrip()) + printed_line = '' + for c in line: + if len(printed_line) >= 76: + prnt(printed_line) + printed_line = '' + printed_line += '%d,' % (c,) + prnt(printed_line) + + # ---------- + # emitting the opcodes for individual types + + def _emit_bytecode_VoidType(self, tp, index): + self.cffi_types[index] = CffiOp(OP_PRIMITIVE, PRIM_VOID) + + def _emit_bytecode_PrimitiveType(self, tp, index): + prim_index = PRIMITIVE_TO_INDEX[tp.name] + self.cffi_types[index] = CffiOp(OP_PRIMITIVE, prim_index) + + def _emit_bytecode_UnknownIntegerType(self, tp, index): + s = ('_cffi_prim_int(sizeof(%s), (\n' + ' ((%s)-1) | 0 /* check that %s is an integer type */\n' + ' ) <= 0)' % (tp.name, tp.name, tp.name)) + self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s) + + def _emit_bytecode_UnknownFloatType(self, tp, index): + s = ('_cffi_prim_float(sizeof(%s) *\n' + ' (((%s)1) / 2) * 2 /* integer => 0, float => 1 */\n' + ' )' % (tp.name, tp.name)) + self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s) + + def _emit_bytecode_RawFunctionType(self, tp, index): + self.cffi_types[index] = CffiOp(OP_FUNCTION, self._typesdict[tp.result]) + index += 1 + for tp1 in tp.args: + realindex = self._typesdict[tp1] + if index != realindex: + if isinstance(tp1, model.PrimitiveType): + self._emit_bytecode_PrimitiveType(tp1, index) + else: + self.cffi_types[index] = CffiOp(OP_NOOP, realindex) + index += 1 + flags = int(tp.ellipsis) + if tp.abi is not None: + if tp.abi == '__stdcall': + flags |= 2 + else: + raise NotImplementedError("abi=%r" % (tp.abi,)) + self.cffi_types[index] = CffiOp(OP_FUNCTION_END, flags) + + def _emit_bytecode_PointerType(self, tp, index): + self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[tp.totype]) + + _emit_bytecode_ConstPointerType = _emit_bytecode_PointerType + _emit_bytecode_NamedPointerType = _emit_bytecode_PointerType + + def _emit_bytecode_FunctionPtrType(self, tp, index): + raw = tp.as_raw_function() + self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[raw]) + + def _emit_bytecode_ArrayType(self, tp, index): + item_index = self._typesdict[tp.item] + if tp.length is None: + self.cffi_types[index] = CffiOp(OP_OPEN_ARRAY, item_index) + elif tp.length == '...': + raise VerificationError( + "type %s badly placed: the '...' array length can only be " + "used on global arrays or on fields of structures" % ( + str(tp).replace('/*...*/', '...'),)) + else: + assert self.cffi_types[index + 1] == 'LEN' + self.cffi_types[index] = CffiOp(OP_ARRAY, item_index) + self.cffi_types[index + 1] = CffiOp(None, str(tp.length)) + + def _emit_bytecode_StructType(self, tp, index): + struct_index = self._struct_unions[tp] + self.cffi_types[index] = CffiOp(OP_STRUCT_UNION, struct_index) + _emit_bytecode_UnionType = _emit_bytecode_StructType + + def _emit_bytecode_EnumType(self, tp, index): + enum_index = self._enums[tp] + self.cffi_types[index] = CffiOp(OP_ENUM, enum_index) + + +if sys.version_info >= (3,): + NativeIO = io.StringIO +else: + class NativeIO(io.BytesIO): + def write(self, s): + if isinstance(s, unicode): + s = s.encode('ascii') + super(NativeIO, self).write(s) + +def _is_file_like(maybefile): + # compare to xml.etree.ElementTree._get_writer + return hasattr(maybefile, 'write') + +def _make_c_or_py_source(ffi, module_name, preamble, target_file, verbose): + if verbose: + print("generating %s" % (target_file,)) + recompiler = Recompiler(ffi, module_name, + target_is_python=(preamble is None)) + recompiler.collect_type_table() + recompiler.collect_step_tables() + if _is_file_like(target_file): + recompiler.write_source_to_f(target_file, preamble) + return True + f = NativeIO() + recompiler.write_source_to_f(f, preamble) + output = f.getvalue() + try: + with open(target_file, 'r') as f1: + if f1.read(len(output) + 1) != output: + raise IOError + if verbose: + print("(already up-to-date)") + return False # already up-to-date + except IOError: + tmp_file = '%s.~%d' % (target_file, os.getpid()) + with open(tmp_file, 'w') as f1: + f1.write(output) + try: + os.rename(tmp_file, target_file) + except OSError: + os.unlink(target_file) + os.rename(tmp_file, target_file) + return True + +def make_c_source(ffi, module_name, preamble, target_c_file, verbose=False): + assert preamble is not None + return _make_c_or_py_source(ffi, module_name, preamble, target_c_file, + verbose) + +def make_py_source(ffi, module_name, target_py_file, verbose=False): + return _make_c_or_py_source(ffi, module_name, None, target_py_file, + verbose) + +def _modname_to_file(outputdir, modname, extension): + parts = modname.split('.') + try: + os.makedirs(os.path.join(outputdir, *parts[:-1])) + except OSError: + pass + parts[-1] += extension + return os.path.join(outputdir, *parts), parts + + +# Aaargh. Distutils is not tested at all for the purpose of compiling +# DLLs that are not extension modules. Here are some hacks to work +# around that, in the _patch_for_*() functions... + +def _patch_meth(patchlist, cls, name, new_meth): + old = getattr(cls, name) + patchlist.append((cls, name, old)) + setattr(cls, name, new_meth) + return old + +def _unpatch_meths(patchlist): + for cls, name, old_meth in reversed(patchlist): + setattr(cls, name, old_meth) + +def _patch_for_embedding(patchlist): + if sys.platform == 'win32': + # we must not remove the manifest when building for embedding! + # FUTURE: this module was removed in setuptools 74; this is likely dead code and should be removed, + # since the toolchain it supports (VS2005-2008) is also long dead. + from cffi._shimmed_dist_utils import MSVCCompiler + if MSVCCompiler is not None: + _patch_meth(patchlist, MSVCCompiler, '_remove_visual_c_ref', + lambda self, manifest_file: manifest_file) + + if sys.platform == 'darwin': + # we must not make a '-bundle', but a '-dynamiclib' instead + from cffi._shimmed_dist_utils import CCompiler + def my_link_shared_object(self, *args, **kwds): + if '-bundle' in self.linker_so: + self.linker_so = list(self.linker_so) + i = self.linker_so.index('-bundle') + self.linker_so[i] = '-dynamiclib' + return old_link_shared_object(self, *args, **kwds) + old_link_shared_object = _patch_meth(patchlist, CCompiler, + 'link_shared_object', + my_link_shared_object) + +def _patch_for_target(patchlist, target): + from cffi._shimmed_dist_utils import build_ext + # if 'target' is different from '*', we need to patch some internal + # method to just return this 'target' value, instead of having it + # built from module_name + if target.endswith('.*'): + target = target[:-2] + if sys.platform == 'win32': + target += '.dll' + elif sys.platform == 'darwin': + target += '.dylib' + else: + target += '.so' + _patch_meth(patchlist, build_ext, 'get_ext_filename', + lambda self, ext_name: target) + + +def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, + c_file=None, source_extension='.c', extradir=None, + compiler_verbose=1, target=None, debug=None, + uses_ffiplatform=True, **kwds): + if not isinstance(module_name, str): + module_name = module_name.encode('ascii') + if ffi._windows_unicode: + ffi._apply_windows_unicode(kwds) + if preamble is not None: + if call_c_compiler and _is_file_like(c_file): + raise TypeError("Writing to file-like objects is not supported " + "with call_c_compiler=True") + embedding = (ffi._embedding is not None) + if embedding: + ffi._apply_embedding_fix(kwds) + if c_file is None: + c_file, parts = _modname_to_file(tmpdir, module_name, + source_extension) + if extradir: + parts = [extradir] + parts + ext_c_file = os.path.join(*parts) + else: + ext_c_file = c_file + # + if target is None: + if embedding: + target = '%s.*' % module_name + else: + target = '*' + # + if uses_ffiplatform: + ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds) + else: + ext = None + updated = make_c_source(ffi, module_name, preamble, c_file, + verbose=compiler_verbose) + if call_c_compiler: + patchlist = [] + cwd = os.getcwd() + try: + if embedding: + _patch_for_embedding(patchlist) + if target != '*': + _patch_for_target(patchlist, target) + if compiler_verbose: + if tmpdir == '.': + msg = 'the current directory is' + else: + msg = 'setting the current directory to' + print('%s %r' % (msg, os.path.abspath(tmpdir))) + os.chdir(tmpdir) + outputfilename = ffiplatform.compile('.', ext, + compiler_verbose, debug) + finally: + os.chdir(cwd) + _unpatch_meths(patchlist) + return outputfilename + else: + return ext, updated + else: + if c_file is None: + c_file, _ = _modname_to_file(tmpdir, module_name, '.py') + updated = make_py_source(ffi, module_name, c_file, + verbose=compiler_verbose) + if call_c_compiler: + return c_file + else: + return None, updated + diff --git a/python/cffi/setuptools_ext.py b/python/cffi/setuptools_ext.py new file mode 100644 index 000000000..5cdd246fb --- /dev/null +++ b/python/cffi/setuptools_ext.py @@ -0,0 +1,229 @@ +import os +import sys +import sysconfig + +try: + basestring +except NameError: + # Python 3.x + basestring = str + +def error(msg): + from cffi._shimmed_dist_utils import DistutilsSetupError + raise DistutilsSetupError(msg) + + +def execfile(filename, glob): + # We use execfile() (here rewritten for Python 3) instead of + # __import__() to load the build script. The problem with + # a normal import is that in some packages, the intermediate + # __init__.py files may already try to import the file that + # we are generating. + with open(filename) as f: + src = f.read() + src += '\n' # Python 2.6 compatibility + code = compile(src, filename, 'exec') + exec(code, glob, glob) + + +def add_cffi_module(dist, mod_spec): + from cffi.api import FFI + + if not isinstance(mod_spec, basestring): + error("argument to 'cffi_modules=...' must be a str or a list of str," + " not %r" % (type(mod_spec).__name__,)) + mod_spec = str(mod_spec) + try: + build_file_name, ffi_var_name = mod_spec.split(':') + except ValueError: + error("%r must be of the form 'path/build.py:ffi_variable'" % + (mod_spec,)) + if not os.path.exists(build_file_name): + ext = '' + rewritten = build_file_name.replace('.', '/') + '.py' + if os.path.exists(rewritten): + ext = ' (rewrite cffi_modules to [%r])' % ( + rewritten + ':' + ffi_var_name,) + error("%r does not name an existing file%s" % (build_file_name, ext)) + + mod_vars = {'__name__': '__cffi__', '__file__': build_file_name} + execfile(build_file_name, mod_vars) + + try: + ffi = mod_vars[ffi_var_name] + except KeyError: + error("%r: object %r not found in module" % (mod_spec, + ffi_var_name)) + if not isinstance(ffi, FFI): + ffi = ffi() # maybe it's a function instead of directly an ffi + if not isinstance(ffi, FFI): + error("%r is not an FFI instance (got %r)" % (mod_spec, + type(ffi).__name__)) + if not hasattr(ffi, '_assigned_source'): + error("%r: the set_source() method was not called" % (mod_spec,)) + module_name, source, source_extension, kwds = ffi._assigned_source + if ffi._windows_unicode: + kwds = kwds.copy() + ffi._apply_windows_unicode(kwds) + + if source is None: + _add_py_module(dist, ffi, module_name) + else: + _add_c_module(dist, ffi, module_name, source, source_extension, kwds) + +def _set_py_limited_api(Extension, kwds): + """ + Add py_limited_api to kwds if setuptools >= 26 is in use. + Do not alter the setting if it already exists. + Setuptools takes care of ignoring the flag on Python 2 and PyPy. + + CPython itself should ignore the flag in a debugging version + (by not listing .abi3.so in the extensions it supports), but + it doesn't so far, creating troubles. That's why we check + for "not hasattr(sys, 'gettotalrefcount')" (the 2.7 compatible equivalent + of 'd' not in sys.abiflags). (http://bugs.python.org/issue28401) + + On Windows, with CPython <= 3.4, it's better not to use py_limited_api + because virtualenv *still* doesn't copy PYTHON3.DLL on these versions. + Recently (2020) we started shipping only >= 3.5 wheels, though. So + we'll give it another try and set py_limited_api on Windows >= 3.5. + """ + from cffi._shimmed_dist_utils import log + from cffi import recompiler + + if ('py_limited_api' not in kwds and not hasattr(sys, 'gettotalrefcount') + and recompiler.USE_LIMITED_API): + import setuptools + try: + setuptools_major_version = int(setuptools.__version__.partition('.')[0]) + if setuptools_major_version >= 26: + kwds['py_limited_api'] = True + except ValueError: # certain development versions of setuptools + # If we don't know the version number of setuptools, we + # try to set 'py_limited_api' anyway. At worst, we get a + # warning. + kwds['py_limited_api'] = True + + if sysconfig.get_config_var("Py_GIL_DISABLED"): + if kwds.get('py_limited_api'): + log.info("Ignoring py_limited_api=True for free-threaded build.") + + kwds['py_limited_api'] = False + + if kwds.get('py_limited_api') is False: + # avoid setting Py_LIMITED_API if py_limited_api=False + # which _cffi_include.h does unless _CFFI_NO_LIMITED_API is defined + kwds.setdefault("define_macros", []).append(("_CFFI_NO_LIMITED_API", None)) + return kwds + +def _add_c_module(dist, ffi, module_name, source, source_extension, kwds): + # We are a setuptools extension. Need this build_ext for py_limited_api. + from setuptools.command.build_ext import build_ext + from cffi._shimmed_dist_utils import Extension, log, mkpath + from cffi import recompiler + + allsources = ['$PLACEHOLDER'] + allsources.extend(kwds.pop('sources', [])) + kwds = _set_py_limited_api(Extension, kwds) + ext = Extension(name=module_name, sources=allsources, **kwds) + + def make_mod(tmpdir, pre_run=None): + c_file = os.path.join(tmpdir, module_name + source_extension) + log.info("generating cffi module %r" % c_file) + mkpath(tmpdir) + # a setuptools-only, API-only hook: called with the "ext" and "ffi" + # arguments just before we turn the ffi into C code. To use it, + # subclass the 'distutils.command.build_ext.build_ext' class and + # add a method 'def pre_run(self, ext, ffi)'. + if pre_run is not None: + pre_run(ext, ffi) + updated = recompiler.make_c_source(ffi, module_name, source, c_file) + if not updated: + log.info("already up-to-date") + return c_file + + if dist.ext_modules is None: + dist.ext_modules = [] + dist.ext_modules.append(ext) + + base_class = dist.cmdclass.get('build_ext', build_ext) + class build_ext_make_mod(base_class): + def run(self): + if ext.sources[0] == '$PLACEHOLDER': + pre_run = getattr(self, 'pre_run', None) + ext.sources[0] = make_mod(self.build_temp, pre_run) + base_class.run(self) + dist.cmdclass['build_ext'] = build_ext_make_mod + # NB. multiple runs here will create multiple 'build_ext_make_mod' + # classes. Even in this case the 'build_ext' command should be + # run once; but just in case, the logic above does nothing if + # called again. + + +def _add_py_module(dist, ffi, module_name): + from setuptools.command.build_py import build_py + from setuptools.command.build_ext import build_ext + from cffi._shimmed_dist_utils import log, mkpath + from cffi import recompiler + + def generate_mod(py_file): + log.info("generating cffi module %r" % py_file) + mkpath(os.path.dirname(py_file)) + updated = recompiler.make_py_source(ffi, module_name, py_file) + if not updated: + log.info("already up-to-date") + + base_class = dist.cmdclass.get('build_py', build_py) + class build_py_make_mod(base_class): + def run(self): + base_class.run(self) + module_path = module_name.split('.') + module_path[-1] += '.py' + generate_mod(os.path.join(self.build_lib, *module_path)) + def get_source_files(self): + # This is called from 'setup.py sdist' only. Exclude + # the generate .py module in this case. + saved_py_modules = self.py_modules + try: + if saved_py_modules: + self.py_modules = [m for m in saved_py_modules + if m != module_name] + return base_class.get_source_files(self) + finally: + self.py_modules = saved_py_modules + dist.cmdclass['build_py'] = build_py_make_mod + + # distutils and setuptools have no notion I could find of a + # generated python module. If we don't add module_name to + # dist.py_modules, then things mostly work but there are some + # combination of options (--root and --record) that will miss + # the module. So we add it here, which gives a few apparently + # harmless warnings about not finding the file outside the + # build directory. + # Then we need to hack more in get_source_files(); see above. + if dist.py_modules is None: + dist.py_modules = [] + dist.py_modules.append(module_name) + + # the following is only for "build_ext -i" + base_class_2 = dist.cmdclass.get('build_ext', build_ext) + class build_ext_make_mod(base_class_2): + def run(self): + base_class_2.run(self) + if self.inplace: + # from get_ext_fullpath() in distutils/command/build_ext.py + module_path = module_name.split('.') + package = '.'.join(module_path[:-1]) + build_py = self.get_finalized_command('build_py') + package_dir = build_py.get_package_dir(package) + file_name = module_path[-1] + '.py' + generate_mod(os.path.join(package_dir, file_name)) + dist.cmdclass['build_ext'] = build_ext_make_mod + +def cffi_modules(dist, attr, value): + assert attr == 'cffi_modules' + if isinstance(value, basestring): + value = [value] + + for cffi_module in value: + add_cffi_module(dist, cffi_module) diff --git a/python/cffi/vengine_cpy.py b/python/cffi/vengine_cpy.py new file mode 100644 index 000000000..02e6a471d --- /dev/null +++ b/python/cffi/vengine_cpy.py @@ -0,0 +1,1087 @@ +# +# DEPRECATED: implementation for ffi.verify() +# +import sys +from . import model +from .error import VerificationError +from . import _imp_emulation as imp + + +class VCPythonEngine(object): + _class_key = 'x' + _gen_python_module = True + + def __init__(self, verifier): + self.verifier = verifier + self.ffi = verifier.ffi + self._struct_pending_verification = {} + self._types_of_builtin_functions = {} + + def patch_extension_kwds(self, kwds): + pass + + def find_module(self, module_name, path, so_suffixes): + try: + f, filename, descr = imp.find_module(module_name, path) + except ImportError: + return None + if f is not None: + f.close() + # Note that after a setuptools installation, there are both .py + # and .so files with the same basename. The code here relies on + # imp.find_module() locating the .so in priority. + if descr[0] not in so_suffixes: + return None + return filename + + def collect_types(self): + self._typesdict = {} + self._generate("collecttype") + + def _prnt(self, what=''): + self._f.write(what + '\n') + + def _gettypenum(self, type): + # a KeyError here is a bug. please report it! :-) + return self._typesdict[type] + + def _do_collect_type(self, tp): + if ((not isinstance(tp, model.PrimitiveType) + or tp.name == 'long double') + and tp not in self._typesdict): + num = len(self._typesdict) + self._typesdict[tp] = num + + def write_source_to_f(self): + self.collect_types() + # + # The new module will have a _cffi_setup() function that receives + # objects from the ffi world, and that calls some setup code in + # the module. This setup code is split in several independent + # functions, e.g. one per constant. The functions are "chained" + # by ending in a tail call to each other. + # + # This is further split in two chained lists, depending on if we + # can do it at import-time or if we must wait for _cffi_setup() to + # provide us with the objects. This is needed because we + # need the values of the enum constants in order to build the + # that we may have to pass to _cffi_setup(). + # + # The following two 'chained_list_constants' items contains + # the head of these two chained lists, as a string that gives the + # call to do, if any. + self._chained_list_constants = ['((void)lib,0)', '((void)lib,0)'] + # + prnt = self._prnt + # first paste some standard set of lines that are mostly '#define' + prnt(cffimod_header) + prnt() + # then paste the C source given by the user, verbatim. + prnt(self.verifier.preamble) + prnt() + # + # call generate_cpy_xxx_decl(), for every xxx found from + # ffi._parser._declarations. This generates all the functions. + self._generate("decl") + # + # implement the function _cffi_setup_custom() as calling the + # head of the chained list. + self._generate_setup_custom() + prnt() + # + # produce the method table, including the entries for the + # generated Python->C function wrappers, which are done + # by generate_cpy_function_method(). + prnt('static PyMethodDef _cffi_methods[] = {') + self._generate("method") + prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS, NULL},') + prnt(' {NULL, NULL, 0, NULL} /* Sentinel */') + prnt('};') + prnt() + # + # standard init. + modname = self.verifier.get_module_name() + constants = self._chained_list_constants[False] + prnt('#if PY_MAJOR_VERSION >= 3') + prnt() + prnt('static struct PyModuleDef _cffi_module_def = {') + prnt(' PyModuleDef_HEAD_INIT,') + prnt(' "%s",' % modname) + prnt(' NULL,') + prnt(' -1,') + prnt(' _cffi_methods,') + prnt(' NULL, NULL, NULL, NULL') + prnt('};') + prnt() + prnt('PyMODINIT_FUNC') + prnt('PyInit_%s(void)' % modname) + prnt('{') + prnt(' PyObject *lib;') + prnt(' lib = PyModule_Create(&_cffi_module_def);') + prnt(' if (lib == NULL)') + prnt(' return NULL;') + prnt(' if (%s < 0 || _cffi_init() < 0) {' % (constants,)) + prnt(' Py_DECREF(lib);') + prnt(' return NULL;') + prnt(' }') + prnt('#if Py_GIL_DISABLED') + prnt(' PyUnstable_Module_SetGIL(lib, Py_MOD_GIL_NOT_USED);') + prnt('#endif') + prnt(' return lib;') + prnt('}') + prnt() + prnt('#else') + prnt() + prnt('PyMODINIT_FUNC') + prnt('init%s(void)' % modname) + prnt('{') + prnt(' PyObject *lib;') + prnt(' lib = Py_InitModule("%s", _cffi_methods);' % modname) + prnt(' if (lib == NULL)') + prnt(' return;') + prnt(' if (%s < 0 || _cffi_init() < 0)' % (constants,)) + prnt(' return;') + prnt(' return;') + prnt('}') + prnt() + prnt('#endif') + + def load_library(self, flags=None): + # XXX review all usages of 'self' here! + # import it as a new extension module + imp.acquire_lock() + try: + if hasattr(sys, "getdlopenflags"): + previous_flags = sys.getdlopenflags() + try: + if hasattr(sys, "setdlopenflags") and flags is not None: + sys.setdlopenflags(flags) + module = imp.load_dynamic(self.verifier.get_module_name(), + self.verifier.modulefilename) + except ImportError as e: + error = "importing %r: %s" % (self.verifier.modulefilename, e) + raise VerificationError(error) + finally: + if hasattr(sys, "setdlopenflags"): + sys.setdlopenflags(previous_flags) + finally: + imp.release_lock() + # + # call loading_cpy_struct() to get the struct layout inferred by + # the C compiler + self._load(module, 'loading') + # + # the C code will need the objects. Collect them in + # order in a list. + revmapping = dict([(value, key) + for (key, value) in self._typesdict.items()]) + lst = [revmapping[i] for i in range(len(revmapping))] + lst = list(map(self.ffi._get_cached_btype, lst)) + # + # build the FFILibrary class and instance and call _cffi_setup(). + # this will set up some fields like '_cffi_types', and only then + # it will invoke the chained list of functions that will really + # build (notably) the constant objects, as if they are + # pointers, and store them as attributes on the 'library' object. + class FFILibrary(object): + _cffi_python_module = module + _cffi_ffi = self.ffi + _cffi_dir = [] + def __dir__(self): + return FFILibrary._cffi_dir + list(self.__dict__) + library = FFILibrary() + if module._cffi_setup(lst, VerificationError, library): + import warnings + warnings.warn("reimporting %r might overwrite older definitions" + % (self.verifier.get_module_name())) + # + # finally, call the loaded_cpy_xxx() functions. This will perform + # the final adjustments, like copying the Python->C wrapper + # functions from the module to the 'library' object, and setting + # up the FFILibrary class with properties for the global C variables. + self._load(module, 'loaded', library=library) + module._cffi_original_ffi = self.ffi + module._cffi_types_of_builtin_funcs = self._types_of_builtin_functions + return library + + def _get_declarations(self): + lst = [(key, tp) for (key, (tp, qual)) in + self.ffi._parser._declarations.items()] + lst.sort() + return lst + + def _generate(self, step_name): + for name, tp in self._get_declarations(): + kind, realname = name.split(' ', 1) + try: + method = getattr(self, '_generate_cpy_%s_%s' % (kind, + step_name)) + except AttributeError: + raise VerificationError( + "not implemented in verify(): %r" % name) + try: + method(tp, realname) + except Exception as e: + model.attach_exception_info(e, name) + raise + + def _load(self, module, step_name, **kwds): + for name, tp in self._get_declarations(): + kind, realname = name.split(' ', 1) + method = getattr(self, '_%s_cpy_%s' % (step_name, kind)) + try: + method(tp, realname, module, **kwds) + except Exception as e: + model.attach_exception_info(e, name) + raise + + def _generate_nothing(self, tp, name): + pass + + def _loaded_noop(self, tp, name, module, **kwds): + pass + + # ---------- + + def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode): + extraarg = '' + if isinstance(tp, model.PrimitiveType): + if tp.is_integer_type() and tp.name != '_Bool': + converter = '_cffi_to_c_int' + extraarg = ', %s' % tp.name + elif tp.is_complex_type(): + raise VerificationError( + "not implemented in verify(): complex types") + else: + converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''), + tp.name.replace(' ', '_')) + errvalue = '-1' + # + elif isinstance(tp, model.PointerType): + self._convert_funcarg_to_c_ptr_or_array(tp, fromvar, + tovar, errcode) + return + # + elif isinstance(tp, (model.StructOrUnion, model.EnumType)): + # a struct (not a struct pointer) as a function argument + self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)' + % (tovar, self._gettypenum(tp), fromvar)) + self._prnt(' %s;' % errcode) + return + # + elif isinstance(tp, model.FunctionPtrType): + converter = '(%s)_cffi_to_c_pointer' % tp.get_c_name('') + extraarg = ', _cffi_type(%d)' % self._gettypenum(tp) + errvalue = 'NULL' + # + else: + raise NotImplementedError(tp) + # + self._prnt(' %s = %s(%s%s);' % (tovar, converter, fromvar, extraarg)) + self._prnt(' if (%s == (%s)%s && PyErr_Occurred())' % ( + tovar, tp.get_c_name(''), errvalue)) + self._prnt(' %s;' % errcode) + + def _extra_local_variables(self, tp, localvars, freelines): + if isinstance(tp, model.PointerType): + localvars.add('Py_ssize_t datasize') + localvars.add('struct _cffi_freeme_s *large_args_free = NULL') + freelines.add('if (large_args_free != NULL)' + ' _cffi_free_array_arguments(large_args_free);') + + def _convert_funcarg_to_c_ptr_or_array(self, tp, fromvar, tovar, errcode): + self._prnt(' datasize = _cffi_prepare_pointer_call_argument(') + self._prnt(' _cffi_type(%d), %s, (char **)&%s);' % ( + self._gettypenum(tp), fromvar, tovar)) + self._prnt(' if (datasize != 0) {') + self._prnt(' %s = ((size_t)datasize) <= 640 ? ' + 'alloca((size_t)datasize) : NULL;' % (tovar,)) + self._prnt(' if (_cffi_convert_array_argument(_cffi_type(%d), %s, ' + '(char **)&%s,' % (self._gettypenum(tp), fromvar, tovar)) + self._prnt(' datasize, &large_args_free) < 0)') + self._prnt(' %s;' % errcode) + self._prnt(' }') + + def _convert_expr_from_c(self, tp, var, context): + if isinstance(tp, model.PrimitiveType): + if tp.is_integer_type() and tp.name != '_Bool': + return '_cffi_from_c_int(%s, %s)' % (var, tp.name) + elif tp.name != 'long double': + return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var) + else: + return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % ( + var, self._gettypenum(tp)) + elif isinstance(tp, (model.PointerType, model.FunctionPtrType)): + return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( + var, self._gettypenum(tp)) + elif isinstance(tp, model.ArrayType): + return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( + var, self._gettypenum(model.PointerType(tp.item))) + elif isinstance(tp, model.StructOrUnion): + if tp.fldnames is None: + raise TypeError("'%s' is used as %s, but is opaque" % ( + tp._get_c_name(), context)) + return '_cffi_from_c_struct((char *)&%s, _cffi_type(%d))' % ( + var, self._gettypenum(tp)) + elif isinstance(tp, model.EnumType): + return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % ( + var, self._gettypenum(tp)) + else: + raise NotImplementedError(tp) + + # ---------- + # typedefs: generates no code so far + + _generate_cpy_typedef_collecttype = _generate_nothing + _generate_cpy_typedef_decl = _generate_nothing + _generate_cpy_typedef_method = _generate_nothing + _loading_cpy_typedef = _loaded_noop + _loaded_cpy_typedef = _loaded_noop + + # ---------- + # function declarations + + def _generate_cpy_function_collecttype(self, tp, name): + assert isinstance(tp, model.FunctionPtrType) + if tp.ellipsis: + self._do_collect_type(tp) + else: + # don't call _do_collect_type(tp) in this common case, + # otherwise test_autofilled_struct_as_argument fails + for type in tp.args: + self._do_collect_type(type) + self._do_collect_type(tp.result) + + def _generate_cpy_function_decl(self, tp, name): + assert isinstance(tp, model.FunctionPtrType) + if tp.ellipsis: + # cannot support vararg functions better than this: check for its + # exact type (including the fixed arguments), and build it as a + # constant function pointer (no CPython wrapper) + self._generate_cpy_const(False, name, tp) + return + prnt = self._prnt + numargs = len(tp.args) + if numargs == 0: + argname = 'noarg' + elif numargs == 1: + argname = 'arg0' + else: + argname = 'args' + prnt('static PyObject *') + prnt('_cffi_f_%s(PyObject *self, PyObject *%s)' % (name, argname)) + prnt('{') + # + context = 'argument of %s' % name + for i, type in enumerate(tp.args): + prnt(' %s;' % type.get_c_name(' x%d' % i, context)) + # + localvars = set() + freelines = set() + for type in tp.args: + self._extra_local_variables(type, localvars, freelines) + for decl in sorted(localvars): + prnt(' %s;' % (decl,)) + # + if not isinstance(tp.result, model.VoidType): + result_code = 'result = ' + context = 'result of %s' % name + prnt(' %s;' % tp.result.get_c_name(' result', context)) + prnt(' PyObject *pyresult;') + else: + result_code = '' + # + if len(tp.args) > 1: + rng = range(len(tp.args)) + for i in rng: + prnt(' PyObject *arg%d;' % i) + prnt() + prnt(' if (!PyArg_ParseTuple(args, "%s:%s", %s))' % ( + 'O' * numargs, name, ', '.join(['&arg%d' % i for i in rng]))) + prnt(' return NULL;') + prnt() + # + for i, type in enumerate(tp.args): + self._convert_funcarg_to_c(type, 'arg%d' % i, 'x%d' % i, + 'return NULL') + prnt() + # + prnt(' Py_BEGIN_ALLOW_THREADS') + prnt(' _cffi_restore_errno();') + prnt(' { %s%s(%s); }' % ( + result_code, name, + ', '.join(['x%d' % i for i in range(len(tp.args))]))) + prnt(' _cffi_save_errno();') + prnt(' Py_END_ALLOW_THREADS') + prnt() + # + prnt(' (void)self; /* unused */') + if numargs == 0: + prnt(' (void)noarg; /* unused */') + if result_code: + prnt(' pyresult = %s;' % + self._convert_expr_from_c(tp.result, 'result', 'result type')) + for freeline in freelines: + prnt(' ' + freeline) + prnt(' return pyresult;') + else: + for freeline in freelines: + prnt(' ' + freeline) + prnt(' Py_INCREF(Py_None);') + prnt(' return Py_None;') + prnt('}') + prnt() + + def _generate_cpy_function_method(self, tp, name): + if tp.ellipsis: + return + numargs = len(tp.args) + if numargs == 0: + meth = 'METH_NOARGS' + elif numargs == 1: + meth = 'METH_O' + else: + meth = 'METH_VARARGS' + self._prnt(' {"%s", _cffi_f_%s, %s, NULL},' % (name, name, meth)) + + _loading_cpy_function = _loaded_noop + + def _loaded_cpy_function(self, tp, name, module, library): + if tp.ellipsis: + return + func = getattr(module, name) + setattr(library, name, func) + self._types_of_builtin_functions[func] = tp + + # ---------- + # named structs + + _generate_cpy_struct_collecttype = _generate_nothing + def _generate_cpy_struct_decl(self, tp, name): + assert name == tp.name + self._generate_struct_or_union_decl(tp, 'struct', name) + def _generate_cpy_struct_method(self, tp, name): + self._generate_struct_or_union_method(tp, 'struct', name) + def _loading_cpy_struct(self, tp, name, module): + self._loading_struct_or_union(tp, 'struct', name, module) + def _loaded_cpy_struct(self, tp, name, module, **kwds): + self._loaded_struct_or_union(tp) + + _generate_cpy_union_collecttype = _generate_nothing + def _generate_cpy_union_decl(self, tp, name): + assert name == tp.name + self._generate_struct_or_union_decl(tp, 'union', name) + def _generate_cpy_union_method(self, tp, name): + self._generate_struct_or_union_method(tp, 'union', name) + def _loading_cpy_union(self, tp, name, module): + self._loading_struct_or_union(tp, 'union', name, module) + def _loaded_cpy_union(self, tp, name, module, **kwds): + self._loaded_struct_or_union(tp) + + def _generate_struct_or_union_decl(self, tp, prefix, name): + if tp.fldnames is None: + return # nothing to do with opaque structs + checkfuncname = '_cffi_check_%s_%s' % (prefix, name) + layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) + cname = ('%s %s' % (prefix, name)).strip() + # + prnt = self._prnt + prnt('static void %s(%s *p)' % (checkfuncname, cname)) + prnt('{') + prnt(' /* only to generate compile-time warnings or errors */') + prnt(' (void)p;') + for fname, ftype, fbitsize, fqual in tp.enumfields(): + if (isinstance(ftype, model.PrimitiveType) + and ftype.is_integer_type()) or fbitsize >= 0: + # accept all integers, but complain on float or double + prnt(' (void)((p->%s) << 1);' % fname) + else: + # only accept exactly the type declared. + try: + prnt(' { %s = &p->%s; (void)tmp; }' % ( + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) + except VerificationError as e: + prnt(' /* %s */' % str(e)) # cannot verify it, ignore + prnt('}') + prnt('static PyObject *') + prnt('%s(PyObject *self, PyObject *noarg)' % (layoutfuncname,)) + prnt('{') + prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname) + prnt(' static Py_ssize_t nums[] = {') + prnt(' sizeof(%s),' % cname) + prnt(' offsetof(struct _cffi_aligncheck, y),') + for fname, ftype, fbitsize, fqual in tp.enumfields(): + if fbitsize >= 0: + continue # xxx ignore fbitsize for now + prnt(' offsetof(%s, %s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + prnt(' -1') + prnt(' };') + prnt(' (void)self; /* unused */') + prnt(' (void)noarg; /* unused */') + prnt(' return _cffi_get_struct_layout(nums);') + prnt(' /* the next line is not executed, but compiled */') + prnt(' %s(0);' % (checkfuncname,)) + prnt('}') + prnt() + + def _generate_struct_or_union_method(self, tp, prefix, name): + if tp.fldnames is None: + return # nothing to do with opaque structs + layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) + self._prnt(' {"%s", %s, METH_NOARGS, NULL},' % (layoutfuncname, + layoutfuncname)) + + def _loading_struct_or_union(self, tp, prefix, name, module): + if tp.fldnames is None: + return # nothing to do with opaque structs + layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) + # + function = getattr(module, layoutfuncname) + layout = function() + if isinstance(tp, model.StructOrUnion) and tp.partial: + # use the function()'s sizes and offsets to guide the + # layout of the struct + totalsize = layout[0] + totalalignment = layout[1] + fieldofs = layout[2::2] + fieldsize = layout[3::2] + tp.force_flatten() + assert len(fieldofs) == len(fieldsize) == len(tp.fldnames) + tp.fixedlayout = fieldofs, fieldsize, totalsize, totalalignment + else: + cname = ('%s %s' % (prefix, name)).strip() + self._struct_pending_verification[tp] = layout, cname + + def _loaded_struct_or_union(self, tp): + if tp.fldnames is None: + return # nothing to do with opaque structs + self.ffi._get_cached_btype(tp) # force 'fixedlayout' to be considered + + if tp in self._struct_pending_verification: + # check that the layout sizes and offsets match the real ones + def check(realvalue, expectedvalue, msg): + if realvalue != expectedvalue: + raise VerificationError( + "%s (we have %d, but C compiler says %d)" + % (msg, expectedvalue, realvalue)) + ffi = self.ffi + BStruct = ffi._get_cached_btype(tp) + layout, cname = self._struct_pending_verification.pop(tp) + check(layout[0], ffi.sizeof(BStruct), "wrong total size") + check(layout[1], ffi.alignof(BStruct), "wrong total alignment") + i = 2 + for fname, ftype, fbitsize, fqual in tp.enumfields(): + if fbitsize >= 0: + continue # xxx ignore fbitsize for now + check(layout[i], ffi.offsetof(BStruct, fname), + "wrong offset for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) + i += 2 + assert i == len(layout) + + # ---------- + # 'anonymous' declarations. These are produced for anonymous structs + # or unions; the 'name' is obtained by a typedef. + + _generate_cpy_anonymous_collecttype = _generate_nothing + + def _generate_cpy_anonymous_decl(self, tp, name): + if isinstance(tp, model.EnumType): + self._generate_cpy_enum_decl(tp, name, '') + else: + self._generate_struct_or_union_decl(tp, '', name) + + def _generate_cpy_anonymous_method(self, tp, name): + if not isinstance(tp, model.EnumType): + self._generate_struct_or_union_method(tp, '', name) + + def _loading_cpy_anonymous(self, tp, name, module): + if isinstance(tp, model.EnumType): + self._loading_cpy_enum(tp, name, module) + else: + self._loading_struct_or_union(tp, '', name, module) + + def _loaded_cpy_anonymous(self, tp, name, module, **kwds): + if isinstance(tp, model.EnumType): + self._loaded_cpy_enum(tp, name, module, **kwds) + else: + self._loaded_struct_or_union(tp) + + # ---------- + # constants, likely declared with '#define' + + def _generate_cpy_const(self, is_int, name, tp=None, category='const', + vartp=None, delayed=True, size_too=False, + check_value=None): + prnt = self._prnt + funcname = '_cffi_%s_%s' % (category, name) + prnt('static int %s(PyObject *lib)' % funcname) + prnt('{') + prnt(' PyObject *o;') + prnt(' int res;') + if not is_int: + prnt(' %s;' % (vartp or tp).get_c_name(' i', name)) + else: + assert category == 'const' + # + if check_value is not None: + self._check_int_constant_value(name, check_value) + # + if not is_int: + if category == 'var': + realexpr = '&' + name + else: + realexpr = name + prnt(' i = (%s);' % (realexpr,)) + prnt(' o = %s;' % (self._convert_expr_from_c(tp, 'i', + 'variable type'),)) + assert delayed + else: + prnt(' o = _cffi_from_c_int_const(%s);' % name) + prnt(' if (o == NULL)') + prnt(' return -1;') + if size_too: + prnt(' {') + prnt(' PyObject *o1 = o;') + prnt(' o = Py_BuildValue("On", o1, (Py_ssize_t)sizeof(%s));' + % (name,)) + prnt(' Py_DECREF(o1);') + prnt(' if (o == NULL)') + prnt(' return -1;') + prnt(' }') + prnt(' res = PyObject_SetAttrString(lib, "%s", o);' % name) + prnt(' Py_DECREF(o);') + prnt(' if (res < 0)') + prnt(' return -1;') + prnt(' return %s;' % self._chained_list_constants[delayed]) + self._chained_list_constants[delayed] = funcname + '(lib)' + prnt('}') + prnt() + + def _generate_cpy_constant_collecttype(self, tp, name): + is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() + if not is_int: + self._do_collect_type(tp) + + def _generate_cpy_constant_decl(self, tp, name): + is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() + self._generate_cpy_const(is_int, name, tp) + + _generate_cpy_constant_method = _generate_nothing + _loading_cpy_constant = _loaded_noop + _loaded_cpy_constant = _loaded_noop + + # ---------- + # enums + + def _check_int_constant_value(self, name, value, err_prefix=''): + prnt = self._prnt + if value <= 0: + prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( + name, name, value)) + else: + prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( + name, name, value)) + prnt(' char buf[64];') + prnt(' if ((%s) <= 0)' % name) + prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % name) + prnt(' else') + prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % + name) + prnt(' PyErr_Format(_cffi_VerificationError,') + prnt(' "%s%s has the real value %s, not %s",') + prnt(' "%s", "%s", buf, "%d");' % ( + err_prefix, name, value)) + prnt(' return -1;') + prnt(' }') + + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + + def _generate_cpy_enum_decl(self, tp, name, prefix='enum'): + if tp.partial: + for enumerator in tp.enumerators: + self._generate_cpy_const(True, enumerator, delayed=False) + return + # + funcname = self._enum_funcname(prefix, name) + prnt = self._prnt + prnt('static int %s(PyObject *lib)' % funcname) + prnt('{') + for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): + self._check_int_constant_value(enumerator, enumvalue, + "enum %s: " % name) + prnt(' return %s;' % self._chained_list_constants[True]) + self._chained_list_constants[True] = funcname + '(lib)' + prnt('}') + prnt() + + _generate_cpy_enum_collecttype = _generate_nothing + _generate_cpy_enum_method = _generate_nothing + + def _loading_cpy_enum(self, tp, name, module): + if tp.partial: + enumvalues = [getattr(module, enumerator) + for enumerator in tp.enumerators] + tp.enumvalues = tuple(enumvalues) + tp.partial_resolved = True + + def _loaded_cpy_enum(self, tp, name, module, library): + for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): + setattr(library, enumerator, enumvalue) + + # ---------- + # macros: for now only for integers + + def _generate_cpy_macro_decl(self, tp, name): + if tp == '...': + check_value = None + else: + check_value = tp # an integer + self._generate_cpy_const(True, name, check_value=check_value) + + _generate_cpy_macro_collecttype = _generate_nothing + _generate_cpy_macro_method = _generate_nothing + _loading_cpy_macro = _loaded_noop + _loaded_cpy_macro = _loaded_noop + + # ---------- + # global variables + + def _generate_cpy_variable_collecttype(self, tp, name): + if isinstance(tp, model.ArrayType): + tp_ptr = model.PointerType(tp.item) + else: + tp_ptr = model.PointerType(tp) + self._do_collect_type(tp_ptr) + + def _generate_cpy_variable_decl(self, tp, name): + if isinstance(tp, model.ArrayType): + tp_ptr = model.PointerType(tp.item) + self._generate_cpy_const(False, name, tp, vartp=tp_ptr, + size_too = tp.length_is_unknown()) + else: + tp_ptr = model.PointerType(tp) + self._generate_cpy_const(False, name, tp_ptr, category='var') + + _generate_cpy_variable_method = _generate_nothing + _loading_cpy_variable = _loaded_noop + + def _loaded_cpy_variable(self, tp, name, module, library): + value = getattr(library, name) + if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the + # sense that "a=..." is forbidden + if tp.length_is_unknown(): + assert isinstance(value, tuple) + (value, size) = value + BItemType = self.ffi._get_cached_btype(tp.item) + length, rest = divmod(size, self.ffi.sizeof(BItemType)) + if rest != 0: + raise VerificationError( + "bad size: %r does not seem to be an array of %s" % + (name, tp.item)) + tp = tp.resolve_length(length) + # 'value' is a which we have to replace with + # a if the N is actually known + if tp.length is not None: + BArray = self.ffi._get_cached_btype(tp) + value = self.ffi.cast(BArray, value) + setattr(library, name, value) + return + # remove ptr= from the library instance, and replace + # it by a property on the class, which reads/writes into ptr[0]. + ptr = value + delattr(library, name) + def getter(library): + return ptr[0] + def setter(library, value): + ptr[0] = value + setattr(type(library), name, property(getter, setter)) + type(library)._cffi_dir.append(name) + + # ---------- + + def _generate_setup_custom(self): + prnt = self._prnt + prnt('static int _cffi_setup_custom(PyObject *lib)') + prnt('{') + prnt(' return %s;' % self._chained_list_constants[True]) + prnt('}') + +cffimod_header = r''' +#include +#include + +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py + and cffi/_cffi_include.h */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; + typedef __int8 int_least8_t; + typedef __int16 int_least16_t; + typedef __int32 int_least32_t; + typedef __int64 int_least64_t; + typedef unsigned __int8 uint_least8_t; + typedef unsigned __int16 uint_least16_t; + typedef unsigned __int32 uint_least32_t; + typedef unsigned __int64 uint_least64_t; + typedef __int8 int_fast8_t; + typedef __int16 int_fast16_t; + typedef __int32 int_fast32_t; + typedef __int64 int_fast64_t; + typedef unsigned __int8 uint_fast8_t; + typedef unsigned __int16 uint_fast16_t; + typedef unsigned __int32 uint_fast32_t; + typedef unsigned __int64 uint_fast64_t; + typedef __int64 intmax_t; + typedef unsigned __int64 uintmax_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ +# ifndef __cplusplus + typedef unsigned char _Bool; +# endif +# endif +# define _cffi_float_complex_t _Fcomplex /* include for it */ +# define _cffi_double_complex_t _Dcomplex /* include for it */ +#else +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux) +# include +# endif +# define _cffi_float_complex_t float _Complex +# define _cffi_double_complex_t double _Complex +#endif + +#if PY_MAJOR_VERSION < 3 +# undef PyCapsule_CheckExact +# undef PyCapsule_GetPointer +# define PyCapsule_CheckExact(capsule) (PyCObject_Check(capsule)) +# define PyCapsule_GetPointer(capsule, name) \ + (PyCObject_AsVoidPtr(capsule)) +#endif + +#if PY_MAJOR_VERSION >= 3 +# define PyInt_FromLong PyLong_FromLong +#endif + +#define _cffi_from_c_double PyFloat_FromDouble +#define _cffi_from_c_float PyFloat_FromDouble +#define _cffi_from_c_long PyInt_FromLong +#define _cffi_from_c_ulong PyLong_FromUnsignedLong +#define _cffi_from_c_longlong PyLong_FromLongLong +#define _cffi_from_c_ulonglong PyLong_FromUnsignedLongLong +#define _cffi_from_c__Bool PyBool_FromLong + +#define _cffi_to_c_double PyFloat_AsDouble +#define _cffi_to_c_float PyFloat_AsDouble + +#define _cffi_from_c_int_const(x) \ + (((x) > 0) ? \ + ((unsigned long long)(x) <= (unsigned long long)LONG_MAX) ? \ + PyInt_FromLong((long)(x)) : \ + PyLong_FromUnsignedLongLong((unsigned long long)(x)) : \ + ((long long)(x) >= (long long)LONG_MIN) ? \ + PyInt_FromLong((long)(x)) : \ + PyLong_FromLongLong((long long)(x))) + +#define _cffi_from_c_int(x, type) \ + (((type)-1) > 0 ? /* unsigned */ \ + (sizeof(type) < sizeof(long) ? \ + PyInt_FromLong((long)x) : \ + sizeof(type) == sizeof(long) ? \ + PyLong_FromUnsignedLong((unsigned long)x) : \ + PyLong_FromUnsignedLongLong((unsigned long long)x)) : \ + (sizeof(type) <= sizeof(long) ? \ + PyInt_FromLong((long)x) : \ + PyLong_FromLongLong((long long)x))) + +#define _cffi_to_c_int(o, type) \ + ((type)( \ + sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \ + : (type)_cffi_to_c_i8(o)) : \ + sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \ + : (type)_cffi_to_c_i16(o)) : \ + sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \ + : (type)_cffi_to_c_i32(o)) : \ + sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \ + : (type)_cffi_to_c_i64(o)) : \ + (Py_FatalError("unsupported size for type " #type), (type)0))) + +#define _cffi_to_c_i8 \ + ((int(*)(PyObject *))_cffi_exports[1]) +#define _cffi_to_c_u8 \ + ((int(*)(PyObject *))_cffi_exports[2]) +#define _cffi_to_c_i16 \ + ((int(*)(PyObject *))_cffi_exports[3]) +#define _cffi_to_c_u16 \ + ((int(*)(PyObject *))_cffi_exports[4]) +#define _cffi_to_c_i32 \ + ((int(*)(PyObject *))_cffi_exports[5]) +#define _cffi_to_c_u32 \ + ((unsigned int(*)(PyObject *))_cffi_exports[6]) +#define _cffi_to_c_i64 \ + ((long long(*)(PyObject *))_cffi_exports[7]) +#define _cffi_to_c_u64 \ + ((unsigned long long(*)(PyObject *))_cffi_exports[8]) +#define _cffi_to_c_char \ + ((int(*)(PyObject *))_cffi_exports[9]) +#define _cffi_from_c_pointer \ + ((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[10]) +#define _cffi_to_c_pointer \ + ((char *(*)(PyObject *, CTypeDescrObject *))_cffi_exports[11]) +#define _cffi_get_struct_layout \ + ((PyObject *(*)(Py_ssize_t[]))_cffi_exports[12]) +#define _cffi_restore_errno \ + ((void(*)(void))_cffi_exports[13]) +#define _cffi_save_errno \ + ((void(*)(void))_cffi_exports[14]) +#define _cffi_from_c_char \ + ((PyObject *(*)(char))_cffi_exports[15]) +#define _cffi_from_c_deref \ + ((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[16]) +#define _cffi_to_c \ + ((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[17]) +#define _cffi_from_c_struct \ + ((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[18]) +#define _cffi_to_c_wchar_t \ + ((wchar_t(*)(PyObject *))_cffi_exports[19]) +#define _cffi_from_c_wchar_t \ + ((PyObject *(*)(wchar_t))_cffi_exports[20]) +#define _cffi_to_c_long_double \ + ((long double(*)(PyObject *))_cffi_exports[21]) +#define _cffi_to_c__Bool \ + ((_Bool(*)(PyObject *))_cffi_exports[22]) +#define _cffi_prepare_pointer_call_argument \ + ((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23]) +#define _cffi_convert_array_from_object \ + ((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24]) +#define _CFFI_NUM_EXPORTS 25 + +typedef struct _ctypedescr CTypeDescrObject; + +static void *_cffi_exports[_CFFI_NUM_EXPORTS]; +static PyObject *_cffi_types, *_cffi_VerificationError; + +static int _cffi_setup_custom(PyObject *lib); /* forward */ + +static PyObject *_cffi_setup(PyObject *self, PyObject *args) +{ + PyObject *library; + int was_alive = (_cffi_types != NULL); + (void)self; /* unused */ + if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError, + &library)) + return NULL; + Py_INCREF(_cffi_types); + Py_INCREF(_cffi_VerificationError); + if (_cffi_setup_custom(library) < 0) + return NULL; + return PyBool_FromLong(was_alive); +} + +union _cffi_union_alignment_u { + unsigned char m_char; + unsigned short m_short; + unsigned int m_int; + unsigned long m_long; + unsigned long long m_longlong; + float m_float; + double m_double; + long double m_longdouble; +}; + +struct _cffi_freeme_s { + struct _cffi_freeme_s *next; + union _cffi_union_alignment_u alignment; +}; + +#ifdef __GNUC__ + __attribute__((unused)) +#endif +static int _cffi_convert_array_argument(CTypeDescrObject *ctptr, PyObject *arg, + char **output_data, Py_ssize_t datasize, + struct _cffi_freeme_s **freeme) +{ + char *p; + if (datasize < 0) + return -1; + + p = *output_data; + if (p == NULL) { + struct _cffi_freeme_s *fp = (struct _cffi_freeme_s *)PyObject_Malloc( + offsetof(struct _cffi_freeme_s, alignment) + (size_t)datasize); + if (fp == NULL) + return -1; + fp->next = *freeme; + *freeme = fp; + p = *output_data = (char *)&fp->alignment; + } + memset((void *)p, 0, (size_t)datasize); + return _cffi_convert_array_from_object(p, ctptr, arg); +} + +#ifdef __GNUC__ + __attribute__((unused)) +#endif +static void _cffi_free_array_arguments(struct _cffi_freeme_s *freeme) +{ + do { + void *p = (void *)freeme; + freeme = freeme->next; + PyObject_Free(p); + } while (freeme != NULL); +} + +static int _cffi_init(void) +{ + PyObject *module, *c_api_object = NULL; + + module = PyImport_ImportModule("_cffi_backend"); + if (module == NULL) + goto failure; + + c_api_object = PyObject_GetAttrString(module, "_C_API"); + if (c_api_object == NULL) + goto failure; + if (!PyCapsule_CheckExact(c_api_object)) { + PyErr_SetNone(PyExc_ImportError); + goto failure; + } + memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"), + _CFFI_NUM_EXPORTS * sizeof(void *)); + + Py_DECREF(module); + Py_DECREF(c_api_object); + return 0; + + failure: + Py_XDECREF(module); + Py_XDECREF(c_api_object); + return -1; +} + +#define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num)) + +/**********/ +''' diff --git a/python/cffi/vengine_gen.py b/python/cffi/vengine_gen.py new file mode 100644 index 000000000..bffc82122 --- /dev/null +++ b/python/cffi/vengine_gen.py @@ -0,0 +1,679 @@ +# +# DEPRECATED: implementation for ffi.verify() +# +import sys, os +import types + +from . import model +from .error import VerificationError + + +class VGenericEngine(object): + _class_key = 'g' + _gen_python_module = False + + def __init__(self, verifier): + self.verifier = verifier + self.ffi = verifier.ffi + self.export_symbols = [] + self._struct_pending_verification = {} + + def patch_extension_kwds(self, kwds): + # add 'export_symbols' to the dictionary. Note that we add the + # list before filling it. When we fill it, it will thus also show + # up in kwds['export_symbols']. + kwds.setdefault('export_symbols', self.export_symbols) + + def find_module(self, module_name, path, so_suffixes): + for so_suffix in so_suffixes: + basename = module_name + so_suffix + if path is None: + path = sys.path + for dirname in path: + filename = os.path.join(dirname, basename) + if os.path.isfile(filename): + return filename + + def collect_types(self): + pass # not needed in the generic engine + + def _prnt(self, what=''): + self._f.write(what + '\n') + + def write_source_to_f(self): + prnt = self._prnt + # first paste some standard set of lines that are mostly '#include' + prnt(cffimod_header) + # then paste the C source given by the user, verbatim. + prnt(self.verifier.preamble) + # + # call generate_gen_xxx_decl(), for every xxx found from + # ffi._parser._declarations. This generates all the functions. + self._generate('decl') + # + # on Windows, distutils insists on putting init_cffi_xyz in + # 'export_symbols', so instead of fighting it, just give up and + # give it one + if sys.platform == 'win32': + if sys.version_info >= (3,): + prefix = 'PyInit_' + else: + prefix = 'init' + modname = self.verifier.get_module_name() + prnt("void %s%s(void) { }\n" % (prefix, modname)) + + def load_library(self, flags=0): + # import it with the CFFI backend + backend = self.ffi._backend + # needs to make a path that contains '/', on Posix + filename = os.path.join(os.curdir, self.verifier.modulefilename) + module = backend.load_library(filename, flags) + # + # call loading_gen_struct() to get the struct layout inferred by + # the C compiler + self._load(module, 'loading') + + # build the FFILibrary class and instance, this is a module subclass + # because modules are expected to have usually-constant-attributes and + # in PyPy this means the JIT is able to treat attributes as constant, + # which we want. + class FFILibrary(types.ModuleType): + _cffi_generic_module = module + _cffi_ffi = self.ffi + _cffi_dir = [] + def __dir__(self): + return FFILibrary._cffi_dir + library = FFILibrary("") + # + # finally, call the loaded_gen_xxx() functions. This will set + # up the 'library' object. + self._load(module, 'loaded', library=library) + return library + + def _get_declarations(self): + lst = [(key, tp) for (key, (tp, qual)) in + self.ffi._parser._declarations.items()] + lst.sort() + return lst + + def _generate(self, step_name): + for name, tp in self._get_declarations(): + kind, realname = name.split(' ', 1) + try: + method = getattr(self, '_generate_gen_%s_%s' % (kind, + step_name)) + except AttributeError: + raise VerificationError( + "not implemented in verify(): %r" % name) + try: + method(tp, realname) + except Exception as e: + model.attach_exception_info(e, name) + raise + + def _load(self, module, step_name, **kwds): + for name, tp in self._get_declarations(): + kind, realname = name.split(' ', 1) + method = getattr(self, '_%s_gen_%s' % (step_name, kind)) + try: + method(tp, realname, module, **kwds) + except Exception as e: + model.attach_exception_info(e, name) + raise + + def _generate_nothing(self, tp, name): + pass + + def _loaded_noop(self, tp, name, module, **kwds): + pass + + # ---------- + # typedefs: generates no code so far + + _generate_gen_typedef_decl = _generate_nothing + _loading_gen_typedef = _loaded_noop + _loaded_gen_typedef = _loaded_noop + + # ---------- + # function declarations + + def _generate_gen_function_decl(self, tp, name): + assert isinstance(tp, model.FunctionPtrType) + if tp.ellipsis: + # cannot support vararg functions better than this: check for its + # exact type (including the fixed arguments), and build it as a + # constant function pointer (no _cffi_f_%s wrapper) + self._generate_gen_const(False, name, tp) + return + prnt = self._prnt + numargs = len(tp.args) + argnames = [] + for i, type in enumerate(tp.args): + indirection = '' + if isinstance(type, model.StructOrUnion): + indirection = '*' + argnames.append('%sx%d' % (indirection, i)) + context = 'argument of %s' % name + arglist = [type.get_c_name(' %s' % arg, context) + for type, arg in zip(tp.args, argnames)] + tpresult = tp.result + if isinstance(tpresult, model.StructOrUnion): + arglist.insert(0, tpresult.get_c_name(' *r', context)) + tpresult = model.void_type + arglist = ', '.join(arglist) or 'void' + wrappername = '_cffi_f_%s' % name + self.export_symbols.append(wrappername) + if tp.abi: + abi = tp.abi + ' ' + else: + abi = '' + funcdecl = ' %s%s(%s)' % (abi, wrappername, arglist) + context = 'result of %s' % name + prnt(tpresult.get_c_name(funcdecl, context)) + prnt('{') + # + if isinstance(tp.result, model.StructOrUnion): + result_code = '*r = ' + elif not isinstance(tp.result, model.VoidType): + result_code = 'return ' + else: + result_code = '' + prnt(' %s%s(%s);' % (result_code, name, ', '.join(argnames))) + prnt('}') + prnt() + + _loading_gen_function = _loaded_noop + + def _loaded_gen_function(self, tp, name, module, library): + assert isinstance(tp, model.FunctionPtrType) + if tp.ellipsis: + newfunction = self._load_constant(False, tp, name, module) + else: + indirections = [] + base_tp = tp + if (any(isinstance(typ, model.StructOrUnion) for typ in tp.args) + or isinstance(tp.result, model.StructOrUnion)): + indirect_args = [] + for i, typ in enumerate(tp.args): + if isinstance(typ, model.StructOrUnion): + typ = model.PointerType(typ) + indirections.append((i, typ)) + indirect_args.append(typ) + indirect_result = tp.result + if isinstance(indirect_result, model.StructOrUnion): + if indirect_result.fldtypes is None: + raise TypeError("'%s' is used as result type, " + "but is opaque" % ( + indirect_result._get_c_name(),)) + indirect_result = model.PointerType(indirect_result) + indirect_args.insert(0, indirect_result) + indirections.insert(0, ("result", indirect_result)) + indirect_result = model.void_type + tp = model.FunctionPtrType(tuple(indirect_args), + indirect_result, tp.ellipsis) + BFunc = self.ffi._get_cached_btype(tp) + wrappername = '_cffi_f_%s' % name + newfunction = module.load_function(BFunc, wrappername) + for i, typ in indirections: + newfunction = self._make_struct_wrapper(newfunction, i, typ, + base_tp) + setattr(library, name, newfunction) + type(library)._cffi_dir.append(name) + + def _make_struct_wrapper(self, oldfunc, i, tp, base_tp): + backend = self.ffi._backend + BType = self.ffi._get_cached_btype(tp) + if i == "result": + ffi = self.ffi + def newfunc(*args): + res = ffi.new(BType) + oldfunc(res, *args) + return res[0] + else: + def newfunc(*args): + args = args[:i] + (backend.newp(BType, args[i]),) + args[i+1:] + return oldfunc(*args) + newfunc._cffi_base_type = base_tp + return newfunc + + # ---------- + # named structs + + def _generate_gen_struct_decl(self, tp, name): + assert name == tp.name + self._generate_struct_or_union_decl(tp, 'struct', name) + + def _loading_gen_struct(self, tp, name, module): + self._loading_struct_or_union(tp, 'struct', name, module) + + def _loaded_gen_struct(self, tp, name, module, **kwds): + self._loaded_struct_or_union(tp) + + def _generate_gen_union_decl(self, tp, name): + assert name == tp.name + self._generate_struct_or_union_decl(tp, 'union', name) + + def _loading_gen_union(self, tp, name, module): + self._loading_struct_or_union(tp, 'union', name, module) + + def _loaded_gen_union(self, tp, name, module, **kwds): + self._loaded_struct_or_union(tp) + + def _generate_struct_or_union_decl(self, tp, prefix, name): + if tp.fldnames is None: + return # nothing to do with opaque structs + checkfuncname = '_cffi_check_%s_%s' % (prefix, name) + layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) + cname = ('%s %s' % (prefix, name)).strip() + # + prnt = self._prnt + prnt('static void %s(%s *p)' % (checkfuncname, cname)) + prnt('{') + prnt(' /* only to generate compile-time warnings or errors */') + prnt(' (void)p;') + for fname, ftype, fbitsize, fqual in tp.enumfields(): + if (isinstance(ftype, model.PrimitiveType) + and ftype.is_integer_type()) or fbitsize >= 0: + # accept all integers, but complain on float or double + prnt(' (void)((p->%s) << 1);' % fname) + else: + # only accept exactly the type declared. + try: + prnt(' { %s = &p->%s; (void)tmp; }' % ( + ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), + fname)) + except VerificationError as e: + prnt(' /* %s */' % str(e)) # cannot verify it, ignore + prnt('}') + self.export_symbols.append(layoutfuncname) + prnt('intptr_t %s(intptr_t i)' % (layoutfuncname,)) + prnt('{') + prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname) + prnt(' static intptr_t nums[] = {') + prnt(' sizeof(%s),' % cname) + prnt(' offsetof(struct _cffi_aligncheck, y),') + for fname, ftype, fbitsize, fqual in tp.enumfields(): + if fbitsize >= 0: + continue # xxx ignore fbitsize for now + prnt(' offsetof(%s, %s),' % (cname, fname)) + if isinstance(ftype, model.ArrayType) and ftype.length is None: + prnt(' 0, /* %s */' % ftype._get_c_name()) + else: + prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) + prnt(' -1') + prnt(' };') + prnt(' return nums[i];') + prnt(' /* the next line is not executed, but compiled */') + prnt(' %s(0);' % (checkfuncname,)) + prnt('}') + prnt() + + def _loading_struct_or_union(self, tp, prefix, name, module): + if tp.fldnames is None: + return # nothing to do with opaque structs + layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) + # + BFunc = self.ffi._typeof_locked("intptr_t(*)(intptr_t)")[0] + function = module.load_function(BFunc, layoutfuncname) + layout = [] + num = 0 + while True: + x = function(num) + if x < 0: break + layout.append(x) + num += 1 + if isinstance(tp, model.StructOrUnion) and tp.partial: + # use the function()'s sizes and offsets to guide the + # layout of the struct + totalsize = layout[0] + totalalignment = layout[1] + fieldofs = layout[2::2] + fieldsize = layout[3::2] + tp.force_flatten() + assert len(fieldofs) == len(fieldsize) == len(tp.fldnames) + tp.fixedlayout = fieldofs, fieldsize, totalsize, totalalignment + else: + cname = ('%s %s' % (prefix, name)).strip() + self._struct_pending_verification[tp] = layout, cname + + def _loaded_struct_or_union(self, tp): + if tp.fldnames is None: + return # nothing to do with opaque structs + self.ffi._get_cached_btype(tp) # force 'fixedlayout' to be considered + + if tp in self._struct_pending_verification: + # check that the layout sizes and offsets match the real ones + def check(realvalue, expectedvalue, msg): + if realvalue != expectedvalue: + raise VerificationError( + "%s (we have %d, but C compiler says %d)" + % (msg, expectedvalue, realvalue)) + ffi = self.ffi + BStruct = ffi._get_cached_btype(tp) + layout, cname = self._struct_pending_verification.pop(tp) + check(layout[0], ffi.sizeof(BStruct), "wrong total size") + check(layout[1], ffi.alignof(BStruct), "wrong total alignment") + i = 2 + for fname, ftype, fbitsize, fqual in tp.enumfields(): + if fbitsize >= 0: + continue # xxx ignore fbitsize for now + check(layout[i], ffi.offsetof(BStruct, fname), + "wrong offset for field %r" % (fname,)) + if layout[i+1] != 0: + BField = ffi._get_cached_btype(ftype) + check(layout[i+1], ffi.sizeof(BField), + "wrong size for field %r" % (fname,)) + i += 2 + assert i == len(layout) + + # ---------- + # 'anonymous' declarations. These are produced for anonymous structs + # or unions; the 'name' is obtained by a typedef. + + def _generate_gen_anonymous_decl(self, tp, name): + if isinstance(tp, model.EnumType): + self._generate_gen_enum_decl(tp, name, '') + else: + self._generate_struct_or_union_decl(tp, '', name) + + def _loading_gen_anonymous(self, tp, name, module): + if isinstance(tp, model.EnumType): + self._loading_gen_enum(tp, name, module, '') + else: + self._loading_struct_or_union(tp, '', name, module) + + def _loaded_gen_anonymous(self, tp, name, module, **kwds): + if isinstance(tp, model.EnumType): + self._loaded_gen_enum(tp, name, module, **kwds) + else: + self._loaded_struct_or_union(tp) + + # ---------- + # constants, likely declared with '#define' + + def _generate_gen_const(self, is_int, name, tp=None, category='const', + check_value=None): + prnt = self._prnt + funcname = '_cffi_%s_%s' % (category, name) + self.export_symbols.append(funcname) + if check_value is not None: + assert is_int + assert category == 'const' + prnt('int %s(char *out_error)' % funcname) + prnt('{') + self._check_int_constant_value(name, check_value) + prnt(' return 0;') + prnt('}') + elif is_int: + assert category == 'const' + prnt('int %s(long long *out_value)' % funcname) + prnt('{') + prnt(' *out_value = (long long)(%s);' % (name,)) + prnt(' return (%s) <= 0;' % (name,)) + prnt('}') + else: + assert tp is not None + assert check_value is None + if category == 'var': + ampersand = '&' + else: + ampersand = '' + extra = '' + if category == 'const' and isinstance(tp, model.StructOrUnion): + extra = 'const *' + ampersand = '&' + prnt(tp.get_c_name(' %s%s(void)' % (extra, funcname), name)) + prnt('{') + prnt(' return (%s%s);' % (ampersand, name)) + prnt('}') + prnt() + + def _generate_gen_constant_decl(self, tp, name): + is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() + self._generate_gen_const(is_int, name, tp) + + _loading_gen_constant = _loaded_noop + + def _load_constant(self, is_int, tp, name, module, check_value=None): + funcname = '_cffi_const_%s' % name + if check_value is not None: + assert is_int + self._load_known_int_constant(module, funcname) + value = check_value + elif is_int: + BType = self.ffi._typeof_locked("long long*")[0] + BFunc = self.ffi._typeof_locked("int(*)(long long*)")[0] + function = module.load_function(BFunc, funcname) + p = self.ffi.new(BType) + negative = function(p) + value = int(p[0]) + if value < 0 and not negative: + BLongLong = self.ffi._typeof_locked("long long")[0] + value += (1 << (8*self.ffi.sizeof(BLongLong))) + else: + assert check_value is None + fntypeextra = '(*)(void)' + if isinstance(tp, model.StructOrUnion): + fntypeextra = '*' + fntypeextra + BFunc = self.ffi._typeof_locked(tp.get_c_name(fntypeextra, name))[0] + function = module.load_function(BFunc, funcname) + value = function() + if isinstance(tp, model.StructOrUnion): + value = value[0] + return value + + def _loaded_gen_constant(self, tp, name, module, library): + is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() + value = self._load_constant(is_int, tp, name, module) + setattr(library, name, value) + type(library)._cffi_dir.append(name) + + # ---------- + # enums + + def _check_int_constant_value(self, name, value): + prnt = self._prnt + if value <= 0: + prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( + name, name, value)) + else: + prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( + name, name, value)) + prnt(' char buf[64];') + prnt(' if ((%s) <= 0)' % name) + prnt(' sprintf(buf, "%%ld", (long)(%s));' % name) + prnt(' else') + prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % + name) + prnt(' sprintf(out_error, "%s has the real value %s, not %s",') + prnt(' "%s", buf, "%d");' % (name[:100], value)) + prnt(' return -1;') + prnt(' }') + + def _load_known_int_constant(self, module, funcname): + BType = self.ffi._typeof_locked("char[]")[0] + BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] + function = module.load_function(BFunc, funcname) + p = self.ffi.new(BType, 256) + if function(p) < 0: + error = self.ffi.string(p) + if sys.version_info >= (3,): + error = str(error, 'utf-8') + raise VerificationError(error) + + def _enum_funcname(self, prefix, name): + # "$enum_$1" => "___D_enum____D_1" + name = name.replace('$', '___D_') + return '_cffi_e_%s_%s' % (prefix, name) + + def _generate_gen_enum_decl(self, tp, name, prefix='enum'): + if tp.partial: + for enumerator in tp.enumerators: + self._generate_gen_const(True, enumerator) + return + # + funcname = self._enum_funcname(prefix, name) + self.export_symbols.append(funcname) + prnt = self._prnt + prnt('int %s(char *out_error)' % funcname) + prnt('{') + for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): + self._check_int_constant_value(enumerator, enumvalue) + prnt(' return 0;') + prnt('}') + prnt() + + def _loading_gen_enum(self, tp, name, module, prefix='enum'): + if tp.partial: + enumvalues = [self._load_constant(True, tp, enumerator, module) + for enumerator in tp.enumerators] + tp.enumvalues = tuple(enumvalues) + tp.partial_resolved = True + else: + funcname = self._enum_funcname(prefix, name) + self._load_known_int_constant(module, funcname) + + def _loaded_gen_enum(self, tp, name, module, library): + for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): + setattr(library, enumerator, enumvalue) + type(library)._cffi_dir.append(enumerator) + + # ---------- + # macros: for now only for integers + + def _generate_gen_macro_decl(self, tp, name): + if tp == '...': + check_value = None + else: + check_value = tp # an integer + self._generate_gen_const(True, name, check_value=check_value) + + _loading_gen_macro = _loaded_noop + + def _loaded_gen_macro(self, tp, name, module, library): + if tp == '...': + check_value = None + else: + check_value = tp # an integer + value = self._load_constant(True, tp, name, module, + check_value=check_value) + setattr(library, name, value) + type(library)._cffi_dir.append(name) + + # ---------- + # global variables + + def _generate_gen_variable_decl(self, tp, name): + if isinstance(tp, model.ArrayType): + if tp.length_is_unknown(): + prnt = self._prnt + funcname = '_cffi_sizeof_%s' % (name,) + self.export_symbols.append(funcname) + prnt("size_t %s(void)" % funcname) + prnt("{") + prnt(" return sizeof(%s);" % (name,)) + prnt("}") + tp_ptr = model.PointerType(tp.item) + self._generate_gen_const(False, name, tp_ptr) + else: + tp_ptr = model.PointerType(tp) + self._generate_gen_const(False, name, tp_ptr, category='var') + + _loading_gen_variable = _loaded_noop + + def _loaded_gen_variable(self, tp, name, module, library): + if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the + # sense that "a=..." is forbidden + if tp.length_is_unknown(): + funcname = '_cffi_sizeof_%s' % (name,) + BFunc = self.ffi._typeof_locked('size_t(*)(void)')[0] + function = module.load_function(BFunc, funcname) + size = function() + BItemType = self.ffi._get_cached_btype(tp.item) + length, rest = divmod(size, self.ffi.sizeof(BItemType)) + if rest != 0: + raise VerificationError( + "bad size: %r does not seem to be an array of %s" % + (name, tp.item)) + tp = tp.resolve_length(length) + tp_ptr = model.PointerType(tp.item) + value = self._load_constant(False, tp_ptr, name, module) + # 'value' is a which we have to replace with + # a if the N is actually known + if tp.length is not None: + BArray = self.ffi._get_cached_btype(tp) + value = self.ffi.cast(BArray, value) + setattr(library, name, value) + type(library)._cffi_dir.append(name) + return + # remove ptr= from the library instance, and replace + # it by a property on the class, which reads/writes into ptr[0]. + funcname = '_cffi_var_%s' % name + BFunc = self.ffi._typeof_locked(tp.get_c_name('*(*)(void)', name))[0] + function = module.load_function(BFunc, funcname) + ptr = function() + def getter(library): + return ptr[0] + def setter(library, value): + ptr[0] = value + setattr(type(library), name, property(getter, setter)) + type(library)._cffi_dir.append(name) + +cffimod_header = r''' +#include +#include +#include +#include +#include /* XXX for ssize_t on some platforms */ + +/* this block of #ifs should be kept exactly identical between + c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py + and cffi/_cffi_include.h */ +#if defined(_MSC_VER) +# include /* for alloca() */ +# if _MSC_VER < 1600 /* MSVC < 2010 */ + typedef __int8 int8_t; + typedef __int16 int16_t; + typedef __int32 int32_t; + typedef __int64 int64_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + typedef unsigned __int64 uint64_t; + typedef __int8 int_least8_t; + typedef __int16 int_least16_t; + typedef __int32 int_least32_t; + typedef __int64 int_least64_t; + typedef unsigned __int8 uint_least8_t; + typedef unsigned __int16 uint_least16_t; + typedef unsigned __int32 uint_least32_t; + typedef unsigned __int64 uint_least64_t; + typedef __int8 int_fast8_t; + typedef __int16 int_fast16_t; + typedef __int32 int_fast32_t; + typedef __int64 int_fast64_t; + typedef unsigned __int8 uint_fast8_t; + typedef unsigned __int16 uint_fast16_t; + typedef unsigned __int32 uint_fast32_t; + typedef unsigned __int64 uint_fast64_t; + typedef __int64 intmax_t; + typedef unsigned __int64 uintmax_t; +# else +# include +# endif +# if _MSC_VER < 1800 /* MSVC < 2013 */ +# ifndef __cplusplus + typedef unsigned char _Bool; +# endif +# endif +# define _cffi_float_complex_t _Fcomplex /* include for it */ +# define _cffi_double_complex_t _Dcomplex /* include for it */ +#else +# include +# if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux) +# include +# endif +# define _cffi_float_complex_t float _Complex +# define _cffi_double_complex_t double _Complex +#endif +''' diff --git a/python/cffi/verifier.py b/python/cffi/verifier.py new file mode 100644 index 000000000..e392a2b7f --- /dev/null +++ b/python/cffi/verifier.py @@ -0,0 +1,306 @@ +# +# DEPRECATED: implementation for ffi.verify() +# +import sys, os, binascii, shutil, io +from . import __version_verifier_modules__ +from . import ffiplatform +from .error import VerificationError + +if sys.version_info >= (3, 3): + import importlib.machinery + def _extension_suffixes(): + return importlib.machinery.EXTENSION_SUFFIXES[:] +else: + import imp + def _extension_suffixes(): + return [suffix for suffix, _, type in imp.get_suffixes() + if type == imp.C_EXTENSION] + + +if sys.version_info >= (3,): + NativeIO = io.StringIO +else: + class NativeIO(io.BytesIO): + def write(self, s): + if isinstance(s, unicode): + s = s.encode('ascii') + super(NativeIO, self).write(s) + + +class Verifier(object): + + def __init__(self, ffi, preamble, tmpdir=None, modulename=None, + ext_package=None, tag='', force_generic_engine=False, + source_extension='.c', flags=None, relative_to=None, **kwds): + if ffi._parser._uses_new_feature: + raise VerificationError( + "feature not supported with ffi.verify(), but only " + "with ffi.set_source(): %s" % (ffi._parser._uses_new_feature,)) + self.ffi = ffi + self.preamble = preamble + if not modulename: + flattened_kwds = ffiplatform.flatten(kwds) + vengine_class = _locate_engine_class(ffi, force_generic_engine) + self._vengine = vengine_class(self) + self._vengine.patch_extension_kwds(kwds) + self.flags = flags + self.kwds = self.make_relative_to(kwds, relative_to) + # + if modulename: + if tag: + raise TypeError("can't specify both 'modulename' and 'tag'") + else: + key = '\x00'.join(['%d.%d' % sys.version_info[:2], + __version_verifier_modules__, + preamble, flattened_kwds] + + ffi._cdefsources) + if sys.version_info >= (3,): + key = key.encode('utf-8') + k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff) + k1 = k1.lstrip('0x').rstrip('L') + k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff) + k2 = k2.lstrip('0').rstrip('L') + modulename = '_cffi_%s_%s%s%s' % (tag, self._vengine._class_key, + k1, k2) + suffix = _get_so_suffixes()[0] + self.tmpdir = tmpdir or _caller_dir_pycache() + self.sourcefilename = os.path.join(self.tmpdir, modulename + source_extension) + self.modulefilename = os.path.join(self.tmpdir, modulename + suffix) + self.ext_package = ext_package + self._has_source = False + self._has_module = False + + def write_source(self, file=None): + """Write the C source code. It is produced in 'self.sourcefilename', + which can be tweaked beforehand.""" + with self.ffi._lock: + if self._has_source and file is None: + raise VerificationError( + "source code already written") + self._write_source(file) + + def compile_module(self): + """Write the C source code (if not done already) and compile it. + This produces a dynamic link library in 'self.modulefilename'.""" + with self.ffi._lock: + if self._has_module: + raise VerificationError("module already compiled") + if not self._has_source: + self._write_source() + self._compile_module() + + def load_library(self): + """Get a C module from this Verifier instance. + Returns an instance of a FFILibrary class that behaves like the + objects returned by ffi.dlopen(), but that delegates all + operations to the C module. If necessary, the C code is written + and compiled first. + """ + with self.ffi._lock: + if not self._has_module: + self._locate_module() + if not self._has_module: + if not self._has_source: + self._write_source() + self._compile_module() + return self._load_library() + + def get_module_name(self): + basename = os.path.basename(self.modulefilename) + # kill both the .so extension and the other .'s, as introduced + # by Python 3: 'basename.cpython-33m.so' + basename = basename.split('.', 1)[0] + # and the _d added in Python 2 debug builds --- but try to be + # conservative and not kill a legitimate _d + if basename.endswith('_d') and hasattr(sys, 'gettotalrefcount'): + basename = basename[:-2] + return basename + + def get_extension(self): + if not self._has_source: + with self.ffi._lock: + if not self._has_source: + self._write_source() + sourcename = ffiplatform.maybe_relative_path(self.sourcefilename) + modname = self.get_module_name() + return ffiplatform.get_extension(sourcename, modname, **self.kwds) + + def generates_python_module(self): + return self._vengine._gen_python_module + + def make_relative_to(self, kwds, relative_to): + if relative_to and os.path.dirname(relative_to): + dirname = os.path.dirname(relative_to) + kwds = kwds.copy() + for key in ffiplatform.LIST_OF_FILE_NAMES: + if key in kwds: + lst = kwds[key] + if not isinstance(lst, (list, tuple)): + raise TypeError("keyword '%s' should be a list or tuple" + % (key,)) + lst = [os.path.join(dirname, fn) for fn in lst] + kwds[key] = lst + return kwds + + # ---------- + + def _locate_module(self): + if not os.path.isfile(self.modulefilename): + if self.ext_package: + try: + pkg = __import__(self.ext_package, None, None, ['__doc__']) + except ImportError: + return # cannot import the package itself, give up + # (e.g. it might be called differently before installation) + path = pkg.__path__ + else: + path = None + filename = self._vengine.find_module(self.get_module_name(), path, + _get_so_suffixes()) + if filename is None: + return + self.modulefilename = filename + self._vengine.collect_types() + self._has_module = True + + def _write_source_to(self, file): + self._vengine._f = file + try: + self._vengine.write_source_to_f() + finally: + del self._vengine._f + + def _write_source(self, file=None): + if file is not None: + self._write_source_to(file) + else: + # Write our source file to an in memory file. + f = NativeIO() + self._write_source_to(f) + source_data = f.getvalue() + + # Determine if this matches the current file + if os.path.exists(self.sourcefilename): + with open(self.sourcefilename, "r") as fp: + needs_written = not (fp.read() == source_data) + else: + needs_written = True + + # Actually write the file out if it doesn't match + if needs_written: + _ensure_dir(self.sourcefilename) + with open(self.sourcefilename, "w") as fp: + fp.write(source_data) + + # Set this flag + self._has_source = True + + def _compile_module(self): + # compile this C source + tmpdir = os.path.dirname(self.sourcefilename) + outputfilename = ffiplatform.compile(tmpdir, self.get_extension()) + try: + same = ffiplatform.samefile(outputfilename, self.modulefilename) + except OSError: + same = False + if not same: + _ensure_dir(self.modulefilename) + shutil.move(outputfilename, self.modulefilename) + self._has_module = True + + def _load_library(self): + assert self._has_module + if self.flags is not None: + return self._vengine.load_library(self.flags) + else: + return self._vengine.load_library() + +# ____________________________________________________________ + +_FORCE_GENERIC_ENGINE = False # for tests + +def _locate_engine_class(ffi, force_generic_engine): + if _FORCE_GENERIC_ENGINE: + force_generic_engine = True + if not force_generic_engine: + if '__pypy__' in sys.builtin_module_names: + force_generic_engine = True + else: + try: + import _cffi_backend + except ImportError: + _cffi_backend = '?' + if ffi._backend is not _cffi_backend: + force_generic_engine = True + if force_generic_engine: + from . import vengine_gen + return vengine_gen.VGenericEngine + else: + from . import vengine_cpy + return vengine_cpy.VCPythonEngine + +# ____________________________________________________________ + +_TMPDIR = None + +def _caller_dir_pycache(): + if _TMPDIR: + return _TMPDIR + result = os.environ.get('CFFI_TMPDIR') + if result: + return result + filename = sys._getframe(2).f_code.co_filename + return os.path.abspath(os.path.join(os.path.dirname(filename), + '__pycache__')) + +def set_tmpdir(dirname): + """Set the temporary directory to use instead of __pycache__.""" + global _TMPDIR + _TMPDIR = dirname + +def cleanup_tmpdir(tmpdir=None, keep_so=False): + """Clean up the temporary directory by removing all files in it + called `_cffi_*.{c,so}` as well as the `build` subdirectory.""" + tmpdir = tmpdir or _caller_dir_pycache() + try: + filelist = os.listdir(tmpdir) + except OSError: + return + if keep_so: + suffix = '.c' # only remove .c files + else: + suffix = _get_so_suffixes()[0].lower() + for fn in filelist: + if fn.lower().startswith('_cffi_') and ( + fn.lower().endswith(suffix) or fn.lower().endswith('.c')): + try: + os.unlink(os.path.join(tmpdir, fn)) + except OSError: + pass + clean_dir = [os.path.join(tmpdir, 'build')] + for dir in clean_dir: + try: + for fn in os.listdir(dir): + fn = os.path.join(dir, fn) + if os.path.isdir(fn): + clean_dir.append(fn) + else: + os.unlink(fn) + except OSError: + pass + +def _get_so_suffixes(): + suffixes = _extension_suffixes() + if not suffixes: + # bah, no C_EXTENSION available. Occurs on pypy without cpyext + if sys.platform == 'win32': + suffixes = [".pyd"] + else: + suffixes = [".so"] + + return suffixes + +def _ensure_dir(filename): + dirname = os.path.dirname(filename) + if dirname and not os.path.isdir(dirname): + os.makedirs(dirname) diff --git a/python/findtimezone.py b/python/findtimezone.py new file mode 100644 index 000000000..e2a158e48 --- /dev/null +++ b/python/findtimezone.py @@ -0,0 +1,32 @@ +import sys +from timezonefinder import TimezoneFinder + +def get_timezone(lat, lon): + """ + Get timezone string from latitude and longitude coordinates. + + Args: + lat: Latitude coordinate (float) + lon: Longitude coordinate (float) + + Returns: + Timezone string or empty string if not found + """ + try: + tf = TimezoneFinder(in_memory=True) + timezone = tf.timezone_at(lat=float(lat), lng=float(lon)) + return timezone if timezone else "" + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + return "" + +if __name__ == "__main__": + if len(sys.argv) != 3: + print("Usage: python findtimezone.py ", file=sys.stderr) + sys.exit(1) + + lat = sys.argv[1] + lon = sys.argv[2] + + timezone = get_timezone(lat, lon) + print(timezone) \ No newline at end of file diff --git a/python/flatbuffers-25.9.23.dist-info/INSTALLER b/python/flatbuffers-25.9.23.dist-info/INSTALLER new file mode 100644 index 000000000..a1b589e38 --- /dev/null +++ b/python/flatbuffers-25.9.23.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/python/flatbuffers-25.9.23.dist-info/LICENSE b/python/flatbuffers-25.9.23.dist-info/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/python/flatbuffers-25.9.23.dist-info/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/python/flatbuffers-25.9.23.dist-info/METADATA b/python/flatbuffers-25.9.23.dist-info/METADATA new file mode 100644 index 000000000..519d950aa --- /dev/null +++ b/python/flatbuffers-25.9.23.dist-info/METADATA @@ -0,0 +1,20 @@ +Metadata-Version: 2.1 +Name: flatbuffers +Version: 25.9.23 +Summary: The FlatBuffers serialization format for Python +Home-page: https://google.github.io/flatbuffers/ +Author: Derek Bailey +Author-email: derekbailey@google.com +License: Apache 2.0 +Project-URL: Documentation, https://google.github.io/flatbuffers/ +Project-URL: Source, https://github.com/google/flatbuffers +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 3 +Classifier: Topic :: Software Development :: Libraries :: Python Modules +License-File: ../LICENSE + +Python runtime library for use with the `Flatbuffers `_ serialization format. diff --git a/python/flatbuffers-25.9.23.dist-info/RECORD b/python/flatbuffers-25.9.23.dist-info/RECORD new file mode 100644 index 000000000..c1f4854ed --- /dev/null +++ b/python/flatbuffers-25.9.23.dist-info/RECORD @@ -0,0 +1,26 @@ +flatbuffers-25.9.23.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +flatbuffers-25.9.23.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358 +flatbuffers-25.9.23.dist-info/METADATA,sha256=tTKSAMim3fxiII0atPOplikAqxp8vZwSsKE-vUlqFcE,875 +flatbuffers-25.9.23.dist-info/RECORD,, +flatbuffers-25.9.23.dist-info/WHEEL,sha256=Kh9pAotZVRFj97E15yTA4iADqXdQfIVTHcNaZTjxeGM,110 +flatbuffers-25.9.23.dist-info/top_level.txt,sha256=UXVWLA8ys6HeqTz6rfKesocUq6ln-ZL8mhZC_cq5BEc,12 +flatbuffers/__init__.py,sha256=vJZrqZOOTKdBNMa_iTKUA6WJG_c_NzKGpFXOe1Igtiw,751 +flatbuffers/__pycache__/__init__.cpython-312.pyc,, +flatbuffers/__pycache__/_version.cpython-312.pyc,, +flatbuffers/__pycache__/builder.cpython-312.pyc,, +flatbuffers/__pycache__/compat.cpython-312.pyc,, +flatbuffers/__pycache__/encode.cpython-312.pyc,, +flatbuffers/__pycache__/flexbuffers.cpython-312.pyc,, +flatbuffers/__pycache__/number_types.cpython-312.pyc,, +flatbuffers/__pycache__/packer.cpython-312.pyc,, +flatbuffers/__pycache__/table.cpython-312.pyc,, +flatbuffers/__pycache__/util.cpython-312.pyc,, +flatbuffers/_version.py,sha256=GVL6M_yJfoAklDfbfTYFV72LDbIU-YgRXL4d1yX3EVw,695 +flatbuffers/builder.py,sha256=uusDhSDKpnLLz6KR4vflC7T74VNwQew9QRkRuxGZTDg,25048 +flatbuffers/compat.py,sha256=ihBSpWDCSL-vgLSyZtcu8LX3ZI3wz9LhtqItY2GQZgg,2373 +flatbuffers/encode.py,sha256=2Or3mgWRAkJiWg-GgYasDU4zIHpQU3W06fmIhwbz5uM,1550 +flatbuffers/flexbuffers.py,sha256=yF8Wr4Lo8WJb-pj9NNaIYxLwzlHHyTroM0iO8fyDwbU,44454 +flatbuffers/number_types.py,sha256=ijO0QcJiuxlQegoBOed0v9m0DdzTZHWxpTBZUqzsWHA,3762 +flatbuffers/packer.py,sha256=LNWym8YgFRqHjcPeGpYY3inCGWH6XnbkQKtAPtFEVas,1164 +flatbuffers/table.py,sha256=ciYTmq_CzAuYpb3KAVnl75M84ieChfbyKne-dFHzwwU,4818 +flatbuffers/util.py,sha256=mRVQ1VoHp0MJMNtRTUGVzALwN4T_C-U14tMbj99py2A,1608 diff --git a/python/flatbuffers-25.9.23.dist-info/WHEEL b/python/flatbuffers-25.9.23.dist-info/WHEEL new file mode 100644 index 000000000..0c3c990c2 --- /dev/null +++ b/python/flatbuffers-25.9.23.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.45.1) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/python/flatbuffers-25.9.23.dist-info/top_level.txt b/python/flatbuffers-25.9.23.dist-info/top_level.txt new file mode 100644 index 000000000..adf11d699 --- /dev/null +++ b/python/flatbuffers-25.9.23.dist-info/top_level.txt @@ -0,0 +1 @@ +flatbuffers diff --git a/python/flatbuffers/__init__.py b/python/flatbuffers/__init__.py new file mode 100644 index 000000000..55ef9377c --- /dev/null +++ b/python/flatbuffers/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2014 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import util +from ._version import __version__ +from .builder import Builder +from .compat import range_func as compat_range +from .table import Table diff --git a/python/flatbuffers/__pycache__/__init__.cpython-312.pyc b/python/flatbuffers/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..159f7dc76 Binary files /dev/null and b/python/flatbuffers/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/flatbuffers/__pycache__/_version.cpython-312.pyc b/python/flatbuffers/__pycache__/_version.cpython-312.pyc new file mode 100644 index 000000000..955585781 Binary files /dev/null and b/python/flatbuffers/__pycache__/_version.cpython-312.pyc differ diff --git a/python/flatbuffers/__pycache__/builder.cpython-312.pyc b/python/flatbuffers/__pycache__/builder.cpython-312.pyc new file mode 100644 index 000000000..3472d9579 Binary files /dev/null and b/python/flatbuffers/__pycache__/builder.cpython-312.pyc differ diff --git a/python/flatbuffers/__pycache__/compat.cpython-312.pyc b/python/flatbuffers/__pycache__/compat.cpython-312.pyc new file mode 100644 index 000000000..83a25d5e3 Binary files /dev/null and b/python/flatbuffers/__pycache__/compat.cpython-312.pyc differ diff --git a/python/flatbuffers/__pycache__/encode.cpython-312.pyc b/python/flatbuffers/__pycache__/encode.cpython-312.pyc new file mode 100644 index 000000000..e0122a7ae Binary files /dev/null and b/python/flatbuffers/__pycache__/encode.cpython-312.pyc differ diff --git a/python/flatbuffers/__pycache__/flexbuffers.cpython-312.pyc b/python/flatbuffers/__pycache__/flexbuffers.cpython-312.pyc new file mode 100644 index 000000000..b91d31557 Binary files /dev/null and b/python/flatbuffers/__pycache__/flexbuffers.cpython-312.pyc differ diff --git a/python/flatbuffers/__pycache__/number_types.cpython-312.pyc b/python/flatbuffers/__pycache__/number_types.cpython-312.pyc new file mode 100644 index 000000000..63427a92c Binary files /dev/null and b/python/flatbuffers/__pycache__/number_types.cpython-312.pyc differ diff --git a/python/flatbuffers/__pycache__/packer.cpython-312.pyc b/python/flatbuffers/__pycache__/packer.cpython-312.pyc new file mode 100644 index 000000000..422ba7e85 Binary files /dev/null and b/python/flatbuffers/__pycache__/packer.cpython-312.pyc differ diff --git a/python/flatbuffers/__pycache__/table.cpython-312.pyc b/python/flatbuffers/__pycache__/table.cpython-312.pyc new file mode 100644 index 000000000..60688d234 Binary files /dev/null and b/python/flatbuffers/__pycache__/table.cpython-312.pyc differ diff --git a/python/flatbuffers/__pycache__/util.cpython-312.pyc b/python/flatbuffers/__pycache__/util.cpython-312.pyc new file mode 100644 index 000000000..2c2b676bf Binary files /dev/null and b/python/flatbuffers/__pycache__/util.cpython-312.pyc differ diff --git a/python/flatbuffers/_version.py b/python/flatbuffers/_version.py new file mode 100644 index 000000000..368e6d080 --- /dev/null +++ b/python/flatbuffers/_version.py @@ -0,0 +1,17 @@ +# Copyright 2019 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Placeholder, to be updated during the release process +# by the setup.py +__version__ = "25.9.23" diff --git a/python/flatbuffers/builder.py b/python/flatbuffers/builder.py new file mode 100644 index 000000000..71d0eba75 --- /dev/null +++ b/python/flatbuffers/builder.py @@ -0,0 +1,870 @@ +# Copyright 2014 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings + +from . import compat +from . import encode +from . import number_types as N +from . import packer +from .compat import memoryview_type +from .compat import NumpyRequiredForThisFeature, import_numpy +from .compat import range_func +from .number_types import (SOffsetTFlags, UOffsetTFlags, VOffsetTFlags) + +np = import_numpy() +## @file +## @addtogroup flatbuffers_python_api +## @{ + + +## @cond FLATBUFFERS_INTERNAL +class OffsetArithmeticError(RuntimeError): + """Error caused by an Offset arithmetic error. + + Probably caused by bad writing of fields. This is considered an unreachable + situation in normal circumstances. + """ + + pass + + +class IsNotNestedError(RuntimeError): + """Error caused by using a Builder to write Object data when not inside + + an Object. + """ + + pass + + +class IsNestedError(RuntimeError): + """Error caused by using a Builder to begin an Object when an Object is + + already being built. + """ + + pass + + +class StructIsNotInlineError(RuntimeError): + """Error caused by using a Builder to write a Struct at a location that + + is not the current Offset. + """ + + pass + + +class BuilderSizeError(RuntimeError): + """Error caused by causing a Builder to exceed the hardcoded limit of 2 + + gigabytes. + """ + + pass + + +class BuilderNotFinishedError(RuntimeError): + """Error caused by not calling `Finish` before calling `Output`.""" + + pass + + +class EndVectorLengthMismatched(RuntimeError): + """The number of elements passed to EndVector does not match the number + + specified in StartVector. + """ + + pass + + +# VtableMetadataFields is the count of metadata fields in each vtable. +VtableMetadataFields = 2 +## @endcond + + +class Builder(object): + """A Builder is used to construct one or more FlatBuffers. + + Typically, Builder objects will be used from code generated by the `flatc` + compiler. + + A Builder constructs byte buffers in a last-first manner for simplicity and + performance during reading. + + Internally, a Builder is a state machine for creating FlatBuffer objects. + + It holds the following internal state: + - Bytes: an array of bytes. + - current_vtable: a list of integers. + - vtables: a hash of vtable entries. + + Attributes: + Bytes: The internal `bytearray` for the Builder. + finished: A boolean determining if the Builder has been finalized. + """ + + ## @cond FLATBUFFERS_INTENRAL + __slots__ = ( + "Bytes", + "current_vtable", + "head", + "minalign", + "objectEnd", + "vtables", + "nested", + "forceDefaults", + "finished", + "vectorNumElems", + "sharedStrings", + ) + + """Maximum buffer size constant, in bytes. + + Builder will never allow it's buffer grow over this size. + Currently equals 2Gb. + """ + MAX_BUFFER_SIZE = 2**31 + ## @endcond + + def __init__(self, initialSize=1024): + """Initializes a Builder of size `initial_size`. + + The internal buffer is grown as needed. + """ + + if not (0 <= initialSize <= Builder.MAX_BUFFER_SIZE): + msg = "flatbuffers: Cannot create Builder larger than 2 gigabytes." + raise BuilderSizeError(msg) + + self.Bytes = bytearray(initialSize) + ## @cond FLATBUFFERS_INTERNAL + self.current_vtable = None + self.head = UOffsetTFlags.py_type(initialSize) + self.minalign = 1 + self.objectEnd = None + self.vtables = {} + self.nested = False + self.forceDefaults = False + self.sharedStrings = {} + ## @endcond + self.finished = False + + def Clear(self) -> None: + ## @cond FLATBUFFERS_INTERNAL + self.current_vtable = None + self.head = UOffsetTFlags.py_type(len(self.Bytes)) + self.minalign = 1 + self.objectEnd = None + self.vtables = {} + self.nested = False + self.forceDefaults = False + self.sharedStrings = {} + self.vectorNumElems = None + ## @endcond + self.finished = False + + def Output(self): + """Return the portion of the buffer that has been used for writing data. + + This is the typical way to access the FlatBuffer data inside the + builder. If you try to access `Builder.Bytes` directly, you would need + to manually index it with `Head()`, since the buffer is constructed + backwards. + + It raises BuilderNotFinishedError if the buffer has not been finished + with `Finish`. + """ + + if not self.finished: + raise BuilderNotFinishedError() + + return self.Bytes[self.Head() :] + + ## @cond FLATBUFFERS_INTERNAL + def StartObject(self, numfields): + """StartObject initializes bookkeeping for writing a new object.""" + + self.assertNotNested() + + # use 32-bit offsets so that arithmetic doesn't overflow. + self.current_vtable = [0 for _ in range_func(numfields)] + self.objectEnd = self.Offset() + self.nested = True + + def WriteVtable(self): + """WriteVtable serializes the vtable for the current object, if needed. + + Before writing out the vtable, this checks pre-existing vtables for + equality to this one. If an equal vtable is found, point the object to + the existing vtable and return. + + Because vtable values are sensitive to alignment of object data, not + all logically-equal vtables will be deduplicated. + + A vtable has the following format: + + + * N, where N is the number of fields + in the schema for this type. Includes deprecated fields. + Thus, a vtable is made of 2 + N elements, each VOffsetT bytes wide. + + An object has the following format: + + + + """ + + # Prepend a zero scalar to the object. Later in this function we'll + # write an offset here that points to the object's vtable: + self.PrependSOffsetTRelative(0) + + objectOffset = self.Offset() + + vtKey = [] + trim = True + for elem in reversed(self.current_vtable): + if elem == 0: + if trim: + continue + else: + elem = objectOffset - elem + trim = False + + vtKey.append(elem) + + vtKey = tuple(vtKey) + vt2Offset = self.vtables.get(vtKey) + if vt2Offset is None: + # Did not find a vtable, so write this one to the buffer. + + # Write out the current vtable in reverse , because + # serialization occurs in last-first order: + i = len(self.current_vtable) - 1 + trailing = 0 + trim = True + while i >= 0: + off = 0 + elem = self.current_vtable[i] + i -= 1 + + if elem == 0: + if trim: + trailing += 1 + continue + else: + # Forward reference to field; + # use 32bit number to ensure no overflow: + off = objectOffset - elem + trim = False + + self.PrependVOffsetT(off) + + # The two metadata fields are written last. + + # First, store the object bytesize: + objectSize = UOffsetTFlags.py_type(objectOffset - self.objectEnd) + self.PrependVOffsetT(VOffsetTFlags.py_type(objectSize)) + + # Second, store the vtable bytesize: + vBytes = len(self.current_vtable) - trailing + VtableMetadataFields + vBytes *= N.VOffsetTFlags.bytewidth + self.PrependVOffsetT(VOffsetTFlags.py_type(vBytes)) + + # Next, write the offset to the new vtable in the + # already-allocated SOffsetT at the beginning of this object: + objectStart = SOffsetTFlags.py_type(len(self.Bytes) - objectOffset) + encode.Write( + packer.soffset, + self.Bytes, + objectStart, + SOffsetTFlags.py_type(self.Offset() - objectOffset), + ) + + # Finally, store this vtable in memory for future + # deduplication: + self.vtables[vtKey] = self.Offset() + else: + # Found a duplicate vtable. + objectStart = SOffsetTFlags.py_type(len(self.Bytes) - objectOffset) + self.head = UOffsetTFlags.py_type(objectStart) + + # Write the offset to the found vtable in the + # already-allocated SOffsetT at the beginning of this object: + encode.Write( + packer.soffset, + self.Bytes, + self.Head(), + SOffsetTFlags.py_type(vt2Offset - objectOffset), + ) + + self.current_vtable = None + return objectOffset + + def EndObject(self): + """EndObject writes data necessary to finish object construction.""" + self.assertNested() + self.nested = False + return self.WriteVtable() + + def growByteBuffer(self): + """Doubles the size of the byteslice, and copies the old data towards + + the end of the new buffer (since we build the buffer backwards). + """ + if len(self.Bytes) == Builder.MAX_BUFFER_SIZE: + msg = "flatbuffers: cannot grow buffer beyond 2 gigabytes" + raise BuilderSizeError(msg) + + newSize = min(len(self.Bytes) * 2, Builder.MAX_BUFFER_SIZE) + if newSize == 0: + newSize = 1 + bytes2 = bytearray(newSize) + bytes2[newSize - len(self.Bytes) :] = self.Bytes + self.Bytes = bytes2 + + ## @endcond + + def Head(self): + """Get the start of useful data in the underlying byte buffer. + + Note: unlike other functions, this value is interpreted as from the + left. + """ + ## @cond FLATBUFFERS_INTERNAL + return self.head + ## @endcond + + ## @cond FLATBUFFERS_INTERNAL + def Offset(self): + """Offset relative to the end of the buffer.""" + return UOffsetTFlags.py_type(len(self.Bytes) - self.Head()) + + def Pad(self, n): + """Pad places zeros at the current offset.""" + for i in range_func(n): + self.Place(0, N.Uint8Flags) + + def Prep(self, size, additionalBytes): + """Prep prepares to write an element of `size` after `additional_bytes` + + have been written, e.g. if you write a string, you need to align + such the int length field is aligned to SizeInt32, and the string + data follows it directly. + If all you need to do is align, `additionalBytes` will be 0. + """ + + # Track the biggest thing we've ever aligned to. + if size > self.minalign: + self.minalign = size + + # Find the amount of alignment needed such that `size` is properly + # aligned after `additionalBytes`: + alignSize = (~(len(self.Bytes) - self.Head() + additionalBytes)) + 1 + alignSize &= size - 1 + + # Reallocate the buffer if needed: + while self.Head() < alignSize + size + additionalBytes: + oldBufSize = len(self.Bytes) + self.growByteBuffer() + updated_head = self.head + len(self.Bytes) - oldBufSize + self.head = UOffsetTFlags.py_type(updated_head) + self.Pad(alignSize) + + def PrependSOffsetTRelative(self, off): + """PrependSOffsetTRelative prepends an SOffsetT, relative to where it + + will be written. + """ + + # Ensure alignment is already done: + self.Prep(N.SOffsetTFlags.bytewidth, 0) + if not (off <= self.Offset()): + msg = "flatbuffers: Offset arithmetic error." + raise OffsetArithmeticError(msg) + off2 = self.Offset() - off + N.SOffsetTFlags.bytewidth + self.PlaceSOffsetT(off2) + + ## @endcond + + def PrependUOffsetTRelative(self, off): + """Prepends an unsigned offset into vector data, relative to where it + + will be written. + """ + + # Ensure alignment is already done: + self.Prep(N.UOffsetTFlags.bytewidth, 0) + if not (off <= self.Offset()): + msg = "flatbuffers: Offset arithmetic error." + raise OffsetArithmeticError(msg) + off2 = self.Offset() - off + N.UOffsetTFlags.bytewidth + self.PlaceUOffsetT(off2) + + ## @cond FLATBUFFERS_INTERNAL + def StartVector(self, elemSize, numElems, alignment): + """StartVector initializes bookkeeping for writing a new vector. + + A vector has the following format: + - + - +, where T is the type of elements of this vector. + """ + + self.assertNotNested() + self.nested = True + self.vectorNumElems = numElems + self.Prep(N.Uint32Flags.bytewidth, elemSize * numElems) + self.Prep(alignment, elemSize * numElems) # In case alignment > int. + return self.Offset() + + ## @endcond + + def EndVector(self, numElems=None): + """EndVector writes data necessary to finish vector construction.""" + + self.assertNested() + ## @cond FLATBUFFERS_INTERNAL + self.nested = False + ## @endcond + + if numElems: + warnings.warn("numElems is deprecated.", DeprecationWarning, stacklevel=2) + if numElems != self.vectorNumElems: + raise EndVectorLengthMismatched() + + # we already made space for this, so write without PrependUint32 + self.PlaceUOffsetT(self.vectorNumElems) + self.vectorNumElems = None + return self.Offset() + + def CreateSharedString(self, s, encoding="utf-8", errors="strict"): + """CreateSharedString checks if the string is already written to the buffer + + before calling CreateString. + """ + + if s in self.sharedStrings: + return self.sharedStrings[s] + + off = self.CreateString(s, encoding, errors) + self.sharedStrings[s] = off + + return off + + def CreateString(self, s, encoding="utf-8", errors="strict"): + """CreateString writes a null-terminated byte string as a vector.""" + + self.assertNotNested() + ## @cond FLATBUFFERS_INTERNAL + self.nested = True + ## @endcond + + if isinstance(s, compat.string_types): + x = s.encode(encoding, errors) + elif isinstance(s, compat.binary_types): + x = s + else: + raise TypeError("non-string passed to CreateString") + + self.Prep(N.UOffsetTFlags.bytewidth, (len(x) + 1) * N.Uint8Flags.bytewidth) + self.Place(0, N.Uint8Flags) + + l = UOffsetTFlags.py_type(len(s)) + ## @cond FLATBUFFERS_INTERNAL + self.head = UOffsetTFlags.py_type(self.Head() - l) + ## @endcond + self.Bytes[self.Head() : self.Head() + l] = x + + self.vectorNumElems = len(x) + return self.EndVector() + + def CreateByteVector(self, x): + """CreateString writes a byte vector.""" + + self.assertNotNested() + ## @cond FLATBUFFERS_INTERNAL + self.nested = True + ## @endcond + + if not isinstance(x, compat.binary_types): + raise TypeError("non-byte vector passed to CreateByteVector") + + self.Prep(N.UOffsetTFlags.bytewidth, len(x) * N.Uint8Flags.bytewidth) + + l = UOffsetTFlags.py_type(len(x)) + ## @cond FLATBUFFERS_INTERNAL + self.head = UOffsetTFlags.py_type(self.Head() - l) + ## @endcond + self.Bytes[self.Head() : self.Head() + l] = x + + self.vectorNumElems = len(x) + return self.EndVector() + + def CreateNumpyVector(self, x): + """CreateNumpyVector writes a numpy array into the buffer.""" + + if np is None: + # Numpy is required for this feature + raise NumpyRequiredForThisFeature("Numpy was not found.") + + if not isinstance(x, np.ndarray): + raise TypeError("non-numpy-ndarray passed to CreateNumpyVector") + + if x.dtype.kind not in ["b", "i", "u", "f"]: + raise TypeError("numpy-ndarray holds elements of unsupported datatype") + + if x.ndim > 1: + raise TypeError("multidimensional-ndarray passed to CreateNumpyVector") + + self.StartVector(x.itemsize, x.size, x.dtype.alignment) + + # Ensure little endian byte ordering + if x.dtype.str[0] == "<": + x_lend = x + else: + x_lend = x.byteswap(inplace=False) + + # Calculate total length + l = UOffsetTFlags.py_type(x_lend.itemsize * x_lend.size) + ## @cond FLATBUFFERS_INTERNAL + self.head = UOffsetTFlags.py_type(self.Head() - l) + ## @endcond + + # tobytes ensures c_contiguous ordering + self.Bytes[self.Head() : self.Head() + l] = x_lend.tobytes(order="C") + + self.vectorNumElems = x.size + return self.EndVector() + + ## @cond FLATBUFFERS_INTERNAL + def assertNested(self): + """Check that we are in the process of building an object.""" + + if not self.nested: + raise IsNotNestedError() + + def assertNotNested(self): + """Check that no other objects are being built while making this object. + + If not, raise an exception. + """ + + if self.nested: + raise IsNestedError() + + def assertStructIsInline(self, obj): + """Structs are always stored inline, so need to be created right + + where they are used. You'll get this error if you created it + elsewhere. + """ + + N.enforce_number(obj, N.UOffsetTFlags) + if obj != self.Offset(): + msg = ( + "flatbuffers: Tried to write a Struct at an Offset that " + "is different from the current Offset of the Builder." + ) + raise StructIsNotInlineError(msg) + + def Slot(self, slotnum): + """Slot sets the vtable key `voffset` to the current location in the + + buffer. + """ + self.assertNested() + self.current_vtable[slotnum] = self.Offset() + + ## @endcond + + def __Finish(self, rootTable, sizePrefix, file_identifier=None): + """Finish finalizes a buffer, pointing to the given `rootTable`.""" + N.enforce_number(rootTable, N.UOffsetTFlags) + + prepSize = N.UOffsetTFlags.bytewidth + if file_identifier is not None: + prepSize += N.Int32Flags.bytewidth + if sizePrefix: + prepSize += N.Int32Flags.bytewidth + self.Prep(self.minalign, prepSize) + + if file_identifier is not None: + self.Prep(N.UOffsetTFlags.bytewidth, encode.FILE_IDENTIFIER_LENGTH) + + # Convert bytes object file_identifier to an array of 4 8-bit integers, + # and use big-endian to enforce size compliance. + # https://docs.python.org/2/library/struct.html#format-characters + file_identifier = N.struct.unpack(">BBBB", file_identifier) + for i in range(encode.FILE_IDENTIFIER_LENGTH - 1, -1, -1): + # Place the bytes of the file_identifer in reverse order: + self.Place(file_identifier[i], N.Uint8Flags) + + self.PrependUOffsetTRelative(rootTable) + if sizePrefix: + size = len(self.Bytes) - self.Head() + N.enforce_number(size, N.Int32Flags) + self.PrependInt32(size) + self.finished = True + return self.Head() + + def Finish(self, rootTable, file_identifier=None): + """Finish finalizes a buffer, pointing to the given `rootTable`.""" + return self.__Finish(rootTable, False, file_identifier=file_identifier) + + def FinishSizePrefixed(self, rootTable, file_identifier=None): + """Finish finalizes a buffer, pointing to the given `rootTable`, + + with the size prefixed. + """ + return self.__Finish(rootTable, True, file_identifier=file_identifier) + + ## @cond FLATBUFFERS_INTERNAL + def Prepend(self, flags, off): + self.Prep(flags.bytewidth, 0) + self.Place(off, flags) + + def PrependSlot(self, flags, o, x, d): + if x is not None: + N.enforce_number(x, flags) + if d is not None: + N.enforce_number(d, flags) + if x != d or (self.forceDefaults and d is not None): + self.Prepend(flags, x) + self.Slot(o) + + def PrependBoolSlot(self, *args): + self.PrependSlot(N.BoolFlags, *args) + + def PrependByteSlot(self, *args): + self.PrependSlot(N.Uint8Flags, *args) + + def PrependUint8Slot(self, *args): + self.PrependSlot(N.Uint8Flags, *args) + + def PrependUint16Slot(self, *args): + self.PrependSlot(N.Uint16Flags, *args) + + def PrependUint32Slot(self, *args): + self.PrependSlot(N.Uint32Flags, *args) + + def PrependUint64Slot(self, *args): + self.PrependSlot(N.Uint64Flags, *args) + + def PrependInt8Slot(self, *args): + self.PrependSlot(N.Int8Flags, *args) + + def PrependInt16Slot(self, *args): + self.PrependSlot(N.Int16Flags, *args) + + def PrependInt32Slot(self, *args): + self.PrependSlot(N.Int32Flags, *args) + + def PrependInt64Slot(self, *args): + self.PrependSlot(N.Int64Flags, *args) + + def PrependFloat32Slot(self, *args): + self.PrependSlot(N.Float32Flags, *args) + + def PrependFloat64Slot(self, *args): + self.PrependSlot(N.Float64Flags, *args) + + def PrependUOffsetTRelativeSlot(self, o, x, d): + """PrependUOffsetTRelativeSlot prepends an UOffsetT onto the object at + + vtable slot `o`. If value `x` equals default `d`, then the slot will + be set to zero and no other data will be written. + """ + + if x != d or self.forceDefaults: + self.PrependUOffsetTRelative(x) + self.Slot(o) + + def PrependStructSlot(self, v, x, d): + """PrependStructSlot prepends a struct onto the object at vtable slot `o`. + + Structs are stored inline, so nothing additional is being added. In + generated code, `d` is always 0. + """ + + N.enforce_number(d, N.UOffsetTFlags) + if x != d: + self.assertStructIsInline(x) + self.Slot(v) + + ## @endcond + + def PrependBool(self, x): + """Prepend a `bool` to the Builder buffer. + + Note: aligns and checks for space. + """ + self.Prepend(N.BoolFlags, x) + + def PrependByte(self, x): + """Prepend a `byte` to the Builder buffer. + + Note: aligns and checks for space. + """ + self.Prepend(N.Uint8Flags, x) + + def PrependUint8(self, x): + """Prepend an `uint8` to the Builder buffer. + + Note: aligns and checks for space. + """ + self.Prepend(N.Uint8Flags, x) + + def PrependUint16(self, x): + """Prepend an `uint16` to the Builder buffer. + + Note: aligns and checks for space. + """ + self.Prepend(N.Uint16Flags, x) + + def PrependUint32(self, x): + """Prepend an `uint32` to the Builder buffer. + + Note: aligns and checks for space. + """ + self.Prepend(N.Uint32Flags, x) + + def PrependUint64(self, x): + """Prepend an `uint64` to the Builder buffer. + + Note: aligns and checks for space. + """ + self.Prepend(N.Uint64Flags, x) + + def PrependInt8(self, x): + """Prepend an `int8` to the Builder buffer. + + Note: aligns and checks for space. + """ + self.Prepend(N.Int8Flags, x) + + def PrependInt16(self, x): + """Prepend an `int16` to the Builder buffer. + + Note: aligns and checks for space. + """ + self.Prepend(N.Int16Flags, x) + + def PrependInt32(self, x): + """Prepend an `int32` to the Builder buffer. + + Note: aligns and checks for space. + """ + self.Prepend(N.Int32Flags, x) + + def PrependInt64(self, x): + """Prepend an `int64` to the Builder buffer. + + Note: aligns and checks for space. + """ + self.Prepend(N.Int64Flags, x) + + def PrependFloat32(self, x): + """Prepend a `float32` to the Builder buffer. + + Note: aligns and checks for space. + """ + self.Prepend(N.Float32Flags, x) + + def PrependFloat64(self, x): + """Prepend a `float64` to the Builder buffer. + + Note: aligns and checks for space. + """ + self.Prepend(N.Float64Flags, x) + + def ForceDefaults(self, forceDefaults): + """In order to save space, fields that are set to their default value + + don't get serialized into the buffer. Forcing defaults provides a + way to manually disable this optimization. When set to `True`, will + always serialize default values. + """ + self.forceDefaults = forceDefaults + + ############################################################## + + ## @cond FLATBUFFERS_INTERNAL + def PrependVOffsetT(self, x): + self.Prepend(N.VOffsetTFlags, x) + + def Place(self, x, flags): + """Place prepends a value specified by `flags` to the Builder, + + without checking for available space. + """ + + N.enforce_number(x, flags) + self.head = self.head - flags.bytewidth + encode.Write(flags.packer_type, self.Bytes, self.Head(), x) + + def PlaceVOffsetT(self, x): + """PlaceVOffsetT prepends a VOffsetT to the Builder, without checking + + for space. + """ + N.enforce_number(x, N.VOffsetTFlags) + self.head = self.head - N.VOffsetTFlags.bytewidth + encode.Write(packer.voffset, self.Bytes, self.Head(), x) + + def PlaceSOffsetT(self, x): + """PlaceSOffsetT prepends a SOffsetT to the Builder, without checking + + for space. + """ + N.enforce_number(x, N.SOffsetTFlags) + self.head = self.head - N.SOffsetTFlags.bytewidth + encode.Write(packer.soffset, self.Bytes, self.Head(), x) + + def PlaceUOffsetT(self, x): + """PlaceUOffsetT prepends a UOffsetT to the Builder, without checking + + for space. + """ + N.enforce_number(x, N.UOffsetTFlags) + self.head = self.head - N.UOffsetTFlags.bytewidth + encode.Write(packer.uoffset, self.Bytes, self.Head(), x) + + ## @endcond + + +## @cond FLATBUFFERS_INTERNAL +def vtableEqual(a, objectStart, b): + """vtableEqual compares an unwritten vtable to a written vtable.""" + + N.enforce_number(objectStart, N.UOffsetTFlags) + + if len(a) * N.VOffsetTFlags.bytewidth != len(b): + return False + + for i, elem in enumerate(a): + x = encode.Get(packer.voffset, b, i * N.VOffsetTFlags.bytewidth) + + # Skip vtable entries that indicate a default value. + if x == 0 and elem == 0: + pass + else: + y = objectStart - elem + if x != y: + return False + return True + + +## @endcond +## @} diff --git a/python/flatbuffers/compat.py b/python/flatbuffers/compat.py new file mode 100644 index 000000000..5668ad70f --- /dev/null +++ b/python/flatbuffers/compat.py @@ -0,0 +1,91 @@ +# Copyright 2016 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A tiny version of `six` to help with backwards compability. + +Also includes compatibility helpers for numpy. +""" + +import sys + +PY2 = sys.version_info[0] == 2 +PY26 = sys.version_info[0:2] == (2, 6) +PY27 = sys.version_info[0:2] == (2, 7) +PY275 = sys.version_info[0:3] >= (2, 7, 5) +PY3 = sys.version_info[0] == 3 +PY34 = sys.version_info[0:2] >= (3, 4) + +if PY3: + import importlib.machinery + + string_types = (str,) + binary_types = (bytes, bytearray) + range_func = range + memoryview_type = memoryview + struct_bool_decl = "?" +else: + import imp + + string_types = (unicode,) + if PY26 or PY27: + binary_types = (str, bytearray) + else: + binary_types = (str,) + range_func = xrange + if PY26 or (PY27 and not PY275): + memoryview_type = buffer + struct_bool_decl = "= 0 + + if value < (1 << 8): + return BitWidth.W8 + elif value < (1 << 16): + return BitWidth.W16 + elif value < (1 << 32): + return BitWidth.W32 + elif value < (1 << 64): + return BitWidth.W64 + else: + raise ValueError('value is too big to encode: %s' % value) + + @staticmethod + def I(value): + """Returns the minimum `BitWidth` to encode signed integer value.""" + # -2^(n-1) <= value < 2^(n-1) + # -2^n <= 2 * value < 2^n + # 2 * value < 2^n, when value >= 0 or 2 * (-value) <= 2^n, when value < 0 + # 2 * value < 2^n, when value >= 0 or 2 * (-value) - 1 < 2^n, when value < 0 + # + # if value >= 0: + # return BitWidth.U(2 * value) + # else: + # return BitWidth.U(2 * (-value) - 1) # ~x = -x - 1 + value *= 2 + return BitWidth.U(value if value >= 0 else ~value) + + @staticmethod + def F(value): + """Returns the `BitWidth` to encode floating point value.""" + if struct.unpack(' 0: + i = first + step = count // 2 + i += step + if pred(values[i], value): + i += 1 + first = i + count -= step + 1 + else: + count = step + return first + + +# https://en.cppreference.com/w/cpp/algorithm/binary_search +def _BinarySearch(values, value, pred=lambda x, y: x < y): + """Implementation of C++ std::binary_search() algorithm.""" + index = _LowerBound(values, value, pred) + if index != len(values) and not pred(value, values[index]): + return index + return -1 + + +class Type(enum.IntEnum): + """Supported types of encoded data. + + These are used as the upper 6 bits of a type field to indicate the actual + type. + """ + + NULL = 0 + INT = 1 + UINT = 2 + FLOAT = 3 + # Types above stored inline, types below store an offset. + KEY = 4 + STRING = 5 + INDIRECT_INT = 6 + INDIRECT_UINT = 7 + INDIRECT_FLOAT = 8 + MAP = 9 + VECTOR = 10 # Untyped. + + VECTOR_INT = 11 # Typed any size (stores no type table). + VECTOR_UINT = 12 + VECTOR_FLOAT = 13 + VECTOR_KEY = 14 + # DEPRECATED, use VECTOR or VECTOR_KEY instead. + # Read test.cpp/FlexBuffersDeprecatedTest() for details on why. + VECTOR_STRING_DEPRECATED = 15 + + VECTOR_INT2 = 16 # Typed tuple (no type table, no size field). + VECTOR_UINT2 = 17 + VECTOR_FLOAT2 = 18 + VECTOR_INT3 = 19 # Typed triple (no type table, no size field). + VECTOR_UINT3 = 20 + VECTOR_FLOAT3 = 21 + VECTOR_INT4 = 22 # Typed quad (no type table, no size field). + VECTOR_UINT4 = 23 + VECTOR_FLOAT4 = 24 + + BLOB = 25 + BOOL = 26 + VECTOR_BOOL = 36 # To do the same type of conversion of type to vector type + + @staticmethod + def Pack(type_, bit_width): + return (int(type_) << 2) | bit_width + + @staticmethod + def Unpack(packed_type): + return 1 << (packed_type & 0b11), Type(packed_type >> 2) + + @staticmethod + def IsInline(type_): + return type_ <= Type.FLOAT or type_ == Type.BOOL + + @staticmethod + def IsTypedVector(type_): + return ( + Type.VECTOR_INT <= type_ <= Type.VECTOR_STRING_DEPRECATED + or type_ == Type.VECTOR_BOOL + ) + + @staticmethod + def IsTypedVectorElementType(type_): + return Type.INT <= type_ <= Type.STRING or type_ == Type.BOOL + + @staticmethod + def ToTypedVectorElementType(type_): + if not Type.IsTypedVector(type_): + raise ValueError('must be typed vector type') + + return Type(type_ - Type.VECTOR_INT + Type.INT) + + @staticmethod + def IsFixedTypedVector(type_): + return Type.VECTOR_INT2 <= type_ <= Type.VECTOR_FLOAT4 + + @staticmethod + def IsFixedTypedVectorElementType(type_): + return Type.INT <= type_ <= Type.FLOAT + + @staticmethod + def ToFixedTypedVectorElementType(type_): + if not Type.IsFixedTypedVector(type_): + raise ValueError('must be fixed typed vector type') + + # 3 types each, starting from length 2. + fixed_type = type_ - Type.VECTOR_INT2 + return Type(fixed_type % 3 + Type.INT), fixed_type // 3 + 2 + + @staticmethod + def ToTypedVector(element_type, fixed_len=0): + """Converts element type to corresponding vector type. + + Args: + element_type: vector element type + fixed_len: number of elements: 0 for typed vector; 2, 3, or 4 for fixed + typed vector. + + Returns: + Typed vector type or fixed typed vector type. + """ + if fixed_len == 0: + if not Type.IsTypedVectorElementType(element_type): + raise ValueError('must be typed vector element type') + else: + if not Type.IsFixedTypedVectorElementType(element_type): + raise ValueError('must be fixed typed vector element type') + + offset = element_type - Type.INT + if fixed_len == 0: + return Type(offset + Type.VECTOR_INT) # TypedVector + elif fixed_len == 2: + return Type(offset + Type.VECTOR_INT2) # FixedTypedVector + elif fixed_len == 3: + return Type(offset + Type.VECTOR_INT3) # FixedTypedVector + elif fixed_len == 4: + return Type(offset + Type.VECTOR_INT4) # FixedTypedVector + else: + raise ValueError('unsupported fixed_len: %s' % fixed_len) + + +class Buf: + """Class to access underlying buffer object starting from the given offset.""" + + def __init__(self, buf, offset): + self._buf = buf + self._offset = offset if offset >= 0 else len(buf) + offset + self._length = len(buf) - self._offset + + def __getitem__(self, key): + if isinstance(key, slice): + return self._buf[_ShiftSlice(key, self._offset, self._length)] + elif isinstance(key, int): + return self._buf[self._offset + key] + else: + raise TypeError('invalid key type') + + def __setitem__(self, key, value): + if isinstance(key, slice): + self._buf[_ShiftSlice(key, self._offset, self._length)] = value + elif isinstance(key, int): + self._buf[self._offset + key] = key + else: + raise TypeError('invalid key type') + + def __repr__(self): + return 'buf[%d:]' % self._offset + + def Find(self, sub): + """Returns the lowest index where the sub subsequence is found.""" + return self._buf[self._offset :].find(sub) + + def Slice(self, offset): + """Returns new `Buf` which starts from the given offset.""" + return Buf(self._buf, self._offset + offset) + + def Indirect(self, offset, byte_width): + """Return new `Buf` based on the encoded offset (indirect encoding).""" + return self.Slice(offset - _Unpack(U, self[offset : offset + byte_width])) + + +class Object: + """Base class for all non-trivial data accessors.""" + + __slots__ = '_buf', '_byte_width' + + def __init__(self, buf, byte_width): + self._buf = buf + self._byte_width = byte_width + + @property + def ByteWidth(self): + return self._byte_width + + +class Sized(Object): + """Base class for all data accessors which need to read encoded size.""" + + __slots__ = ('_size',) + + def __init__(self, buf, byte_width, size=0): + super().__init__(buf, byte_width) + if size == 0: + self._size = _Unpack(U, self.SizeBytes) + else: + self._size = size + + @property + def SizeBytes(self): + return self._buf[-self._byte_width : 0] + + def __len__(self): + return self._size + + +class Blob(Sized): + """Data accessor for the encoded blob bytes.""" + + __slots__ = () + + @property + def Bytes(self): + return self._buf[0 : len(self)] + + def __repr__(self): + return 'Blob(%s, size=%d)' % (self._buf, len(self)) + + +class String(Sized): + """Data accessor for the encoded string bytes.""" + + __slots__ = () + + @property + def Bytes(self): + return self._buf[0 : len(self)] + + def Mutate(self, value): + """Mutates underlying string bytes in place. + + Args: + value: New string to replace the existing one. New string must have less + or equal UTF-8-encoded bytes than the existing one to successfully + mutate underlying byte buffer. + + Returns: + Whether the value was mutated or not. + """ + encoded = value.encode('utf-8') + n = len(encoded) + if n <= len(self): + self._buf[-self._byte_width : 0] = _Pack(U, n, self._byte_width) + self._buf[0:n] = encoded + self._buf[n : len(self)] = bytearray(len(self) - n) + return True + return False + + def __str__(self): + return self.Bytes.decode('utf-8') + + def __repr__(self): + return 'String(%s, size=%d)' % (self._buf, len(self)) + + +class Key(Object): + """Data accessor for the encoded key bytes.""" + + __slots__ = () + + def __init__(self, buf, byte_width): + assert byte_width == 1 + super().__init__(buf, byte_width) + + @property + def Bytes(self): + return self._buf[0 : len(self)] + + def __len__(self): + return self._buf.Find(0) + + def __str__(self): + return self.Bytes.decode('ascii') + + def __repr__(self): + return 'Key(%s, size=%d)' % (self._buf, len(self)) + + +class Vector(Sized): + """Data accessor for the encoded vector bytes.""" + + __slots__ = () + + def __getitem__(self, index): + if index < 0 or index >= len(self): + raise IndexError( + 'vector index %s is out of [0, %d) range' % (index, len(self)) + ) + + packed_type = self._buf[len(self) * self._byte_width + index] + buf = self._buf.Slice(index * self._byte_width) + return Ref.PackedType(buf, self._byte_width, packed_type) + + @property + def Value(self): + """Returns the underlying encoded data as a list object.""" + return [e.Value for e in self] + + def __repr__(self): + return 'Vector(%s, byte_width=%d, size=%d)' % ( + self._buf, + self._byte_width, + self._size, + ) + + +class TypedVector(Sized): + """Data accessor for the encoded typed vector or fixed typed vector bytes.""" + + __slots__ = '_element_type', '_size' + + def __init__(self, buf, byte_width, element_type, size=0): + super().__init__(buf, byte_width, size) + + if element_type == Type.STRING: + # These can't be accessed as strings, since we don't know the bit-width + # of the size field, see the declaration of + # FBT_VECTOR_STRING_DEPRECATED above for details. + # We change the type here to be keys, which are a subtype of strings, + # and will ignore the size field. This will truncate strings with + # embedded nulls. + element_type = Type.KEY + + self._element_type = element_type + + @property + def Bytes(self): + return self._buf[: self._byte_width * len(self)] + + @property + def ElementType(self): + return self._element_type + + def __getitem__(self, index): + if index < 0 or index >= len(self): + raise IndexError( + 'vector index %s is out of [0, %d) range' % (index, len(self)) + ) + + buf = self._buf.Slice(index * self._byte_width) + return Ref(buf, self._byte_width, 1, self._element_type) + + @property + def Value(self): + """Returns underlying data as list object.""" + if not self: + return [] + + if self._element_type is Type.BOOL: + return [bool(e) for e in _UnpackVector(U, self.Bytes, len(self))] + elif self._element_type is Type.INT: + return list(_UnpackVector(I, self.Bytes, len(self))) + elif self._element_type is Type.UINT: + return list(_UnpackVector(U, self.Bytes, len(self))) + elif self._element_type is Type.FLOAT: + return list(_UnpackVector(F, self.Bytes, len(self))) + elif self._element_type is Type.KEY: + return [e.AsKey for e in self] + elif self._element_type is Type.STRING: + return [e.AsString for e in self] + else: + raise TypeError('unsupported element_type: %s' % self._element_type) + + def __repr__(self): + return 'TypedVector(%s, byte_width=%d, element_type=%s, size=%d)' % ( + self._buf, + self._byte_width, + self._element_type, + self._size, + ) + + +class Map(Vector): + """Data accessor for the encoded map bytes.""" + + @staticmethod + def CompareKeys(a, b): + if isinstance(a, Ref): + a = a.AsKeyBytes + if isinstance(b, Ref): + b = b.AsKeyBytes + return a < b + + def __getitem__(self, key): + if isinstance(key, int): + return super().__getitem__(key) + + index = _BinarySearch(self.Keys, key.encode('ascii'), self.CompareKeys) + if index != -1: + return super().__getitem__(index) + + raise KeyError(key) + + @property + def Keys(self): + byte_width = _Unpack( + U, self._buf[-2 * self._byte_width : -self._byte_width] + ) + buf = self._buf.Indirect(-3 * self._byte_width, self._byte_width) + return TypedVector(buf, byte_width, Type.KEY) + + @property + def Values(self): + return Vector(self._buf, self._byte_width) + + @property + def Value(self): + return {k.Value: v.Value for k, v in zip(self.Keys, self.Values)} + + def __repr__(self): + return 'Map(%s, size=%d)' % (self._buf, len(self)) + + +class Ref: + """Data accessor for the encoded data bytes.""" + + __slots__ = '_buf', '_parent_width', '_byte_width', '_type' + + @staticmethod + def PackedType(buf, parent_width, packed_type): + byte_width, type_ = Type.Unpack(packed_type) + return Ref(buf, parent_width, byte_width, type_) + + def __init__(self, buf, parent_width, byte_width, type_): + self._buf = buf + self._parent_width = parent_width + self._byte_width = byte_width + self._type = type_ + + def __repr__(self): + return 'Ref(%s, parent_width=%d, byte_width=%d, type_=%s)' % ( + self._buf, + self._parent_width, + self._byte_width, + self._type, + ) + + @property + def _Bytes(self): + return self._buf[: self._parent_width] + + def _ConvertError(self, target_type): + raise TypeError('cannot convert %s to %s' % (self._type, target_type)) + + def _Indirect(self): + return self._buf.Indirect(0, self._parent_width) + + @property + def IsNull(self): + return self._type is Type.NULL + + @property + def IsBool(self): + return self._type is Type.BOOL + + @property + def AsBool(self): + if self._type is Type.BOOL: + return bool(_Unpack(U, self._Bytes)) + else: + return self.AsInt != 0 + + def MutateBool(self, value): + """Mutates underlying boolean value bytes in place. + + Args: + value: New boolean value. + + Returns: + Whether the value was mutated or not. + """ + return self.IsBool and _Mutate( + U, self._buf, value, self._parent_width, BitWidth.W8 + ) + + @property + def IsNumeric(self): + return self.IsInt or self.IsFloat + + @property + def IsInt(self): + return self._type in ( + Type.INT, + Type.INDIRECT_INT, + Type.UINT, + Type.INDIRECT_UINT, + ) + + @property + def AsInt(self): + """Returns current reference as integer value.""" + if self.IsNull: + return 0 + elif self.IsBool: + return int(self.AsBool) + elif self._type is Type.INT: + return _Unpack(I, self._Bytes) + elif self._type is Type.INDIRECT_INT: + return _Unpack(I, self._Indirect()[: self._byte_width]) + if self._type is Type.UINT: + return _Unpack(U, self._Bytes) + elif self._type is Type.INDIRECT_UINT: + return _Unpack(U, self._Indirect()[: self._byte_width]) + elif self.IsString: + return len(self.AsString) + elif self.IsKey: + return len(self.AsKey) + elif self.IsBlob: + return len(self.AsBlob) + elif self.IsVector: + return len(self.AsVector) + elif self.IsTypedVector: + return len(self.AsTypedVector) + elif self.IsFixedTypedVector: + return len(self.AsFixedTypedVector) + else: + raise self._ConvertError(Type.INT) + + def MutateInt(self, value): + """Mutates underlying integer value bytes in place. + + Args: + value: New integer value. It must fit to the byte size of the existing + encoded value. + + Returns: + Whether the value was mutated or not. + """ + if self._type is Type.INT: + return _Mutate(I, self._buf, value, self._parent_width, BitWidth.I(value)) + elif self._type is Type.INDIRECT_INT: + return _Mutate( + I, self._Indirect(), value, self._byte_width, BitWidth.I(value) + ) + elif self._type is Type.UINT: + return _Mutate(U, self._buf, value, self._parent_width, BitWidth.U(value)) + elif self._type is Type.INDIRECT_UINT: + return _Mutate( + U, self._Indirect(), value, self._byte_width, BitWidth.U(value) + ) + else: + return False + + @property + def IsFloat(self): + return self._type in (Type.FLOAT, Type.INDIRECT_FLOAT) + + @property + def AsFloat(self): + """Returns current reference as floating point value.""" + if self.IsNull: + return 0.0 + elif self.IsBool: + return float(self.AsBool) + elif self.IsInt: + return float(self.AsInt) + elif self._type is Type.FLOAT: + return _Unpack(F, self._Bytes) + elif self._type is Type.INDIRECT_FLOAT: + return _Unpack(F, self._Indirect()[: self._byte_width]) + elif self.IsString: + return float(self.AsString) + elif self.IsVector: + return float(len(self.AsVector)) + elif self.IsTypedVector(): + return float(len(self.AsTypedVector)) + elif self.IsFixedTypedVector(): + return float(len(self.FixedTypedVector)) + else: + raise self._ConvertError(Type.FLOAT) + + def MutateFloat(self, value): + """Mutates underlying floating point value bytes in place. + + Args: + value: New float value. It must fit to the byte size of the existing + encoded value. + + Returns: + Whether the value was mutated or not. + """ + if self._type is Type.FLOAT: + return _Mutate( + F, + self._buf, + value, + self._parent_width, + BitWidth.B(self._parent_width), + ) + elif self._type is Type.INDIRECT_FLOAT: + return _Mutate( + F, + self._Indirect(), + value, + self._byte_width, + BitWidth.B(self._byte_width), + ) + else: + return False + + @property + def IsKey(self): + return self._type is Type.KEY + + @property + def AsKeyBytes(self): + if self.IsKey: + return Key(self._Indirect(), self._byte_width).Bytes + else: + raise self._ConvertError(Type.KEY) + + @property + def AsKey(self): + if self.IsKey: + return str(Key(self._Indirect(), self._byte_width)) + else: + raise self._ConvertError(Type.KEY) + + @property + def IsString(self): + return self._type is Type.STRING + + @property + def AsStringBytes(self): + if self.IsString: + return String(self._Indirect(), self._byte_width).Bytes + elif self.IsKey: + return self.AsKeyBytes + else: + raise self._ConvertError(Type.STRING) + + @property + def AsString(self): + if self.IsString: + return str(String(self._Indirect(), self._byte_width)) + elif self.IsKey: + return self.AsKey + else: + raise self._ConvertError(Type.STRING) + + def MutateString(self, value): + return String(self._Indirect(), self._byte_width).Mutate(value) + + @property + def IsBlob(self): + return self._type is Type.BLOB + + @property + def AsBlob(self): + if self.IsBlob: + return Blob(self._Indirect(), self._byte_width).Bytes + else: + raise self._ConvertError(Type.BLOB) + + @property + def IsAnyVector(self): + return self.IsVector or self.IsTypedVector or self.IsFixedTypedVector() + + @property + def IsVector(self): + return self._type in (Type.VECTOR, Type.MAP) + + @property + def AsVector(self): + if self.IsVector: + return Vector(self._Indirect(), self._byte_width) + else: + raise self._ConvertError(Type.VECTOR) + + @property + def IsTypedVector(self): + return Type.IsTypedVector(self._type) + + @property + def AsTypedVector(self): + if self.IsTypedVector: + return TypedVector( + self._Indirect(), + self._byte_width, + Type.ToTypedVectorElementType(self._type), + ) + else: + raise self._ConvertError('TYPED_VECTOR') + + @property + def IsFixedTypedVector(self): + return Type.IsFixedTypedVector(self._type) + + @property + def AsFixedTypedVector(self): + if self.IsFixedTypedVector: + element_type, size = Type.ToFixedTypedVectorElementType(self._type) + return TypedVector(self._Indirect(), self._byte_width, element_type, size) + else: + raise self._ConvertError('FIXED_TYPED_VECTOR') + + @property + def IsMap(self): + return self._type is Type.MAP + + @property + def AsMap(self): + if self.IsMap: + return Map(self._Indirect(), self._byte_width) + else: + raise self._ConvertError(Type.MAP) + + @property + def Value(self): + """Converts current reference to value of corresponding type. + + This is equivalent to calling `AsInt` for integer values, `AsFloat` for + floating point values, etc. + + Returns: + Value of corresponding type. + """ + if self.IsNull: + return None + elif self.IsBool: + return self.AsBool + elif self.IsInt: + return self.AsInt + elif self.IsFloat: + return self.AsFloat + elif self.IsString: + return self.AsString + elif self.IsKey: + return self.AsKey + elif self.IsBlob: + return self.AsBlob + elif self.IsMap: + return self.AsMap.Value + elif self.IsVector: + return self.AsVector.Value + elif self.IsTypedVector: + return self.AsTypedVector.Value + elif self.IsFixedTypedVector: + return self.AsFixedTypedVector.Value + else: + raise TypeError('cannot convert %r to value' % self) + + +def _IsIterable(obj): + try: + iter(obj) + return True + except TypeError: + return False + + +class Value: + """Class to represent given value during the encoding process.""" + + @staticmethod + def Null(): + return Value(0, Type.NULL, BitWidth.W8) + + @staticmethod + def Bool(value): + return Value(value, Type.BOOL, BitWidth.W8) + + @staticmethod + def Int(value, bit_width): + return Value(value, Type.INT, bit_width) + + @staticmethod + def UInt(value, bit_width): + return Value(value, Type.UINT, bit_width) + + @staticmethod + def Float(value, bit_width): + return Value(value, Type.FLOAT, bit_width) + + @staticmethod + def Key(offset): + return Value(offset, Type.KEY, BitWidth.W8) + + def __init__(self, value, type_, min_bit_width): + self._value = value + self._type = type_ + + # For scalars: of itself, for vector: of its elements, for string: length. + self._min_bit_width = min_bit_width + + @property + def Value(self): + return self._value + + @property + def Type(self): + return self._type + + @property + def MinBitWidth(self): + return self._min_bit_width + + def StoredPackedType(self, parent_bit_width=BitWidth.W8): + return Type.Pack(self._type, self.StoredWidth(parent_bit_width)) + + # We have an absolute offset, but want to store a relative offset + # elem_index elements beyond the current buffer end. Since whether + # the relative offset fits in a certain byte_width depends on + # the size of the elements before it (and their alignment), we have + # to test for each size in turn. + def ElemWidth(self, buf_size, elem_index=0): + if Type.IsInline(self._type): + return self._min_bit_width + for byte_width in 1, 2, 4, 8: + offset_loc = ( + buf_size + + _PaddingBytes(buf_size, byte_width) + + elem_index * byte_width + ) + bit_width = BitWidth.U(offset_loc - self._value) + if byte_width == (1 << bit_width): + return bit_width + raise ValueError('relative offset is too big') + + def StoredWidth(self, parent_bit_width=BitWidth.W8): + if Type.IsInline(self._type): + return max(self._min_bit_width, parent_bit_width) + return self._min_bit_width + + def __repr__(self): + return 'Value(%s, %s, %s)' % (self._value, self._type, self._min_bit_width) + + def __str__(self): + return str(self._value) + + +def InMap(func): + def wrapper(self, *args, **kwargs): + if isinstance(args[0], str): + self.Key(args[0]) + func(self, *args[1:], **kwargs) + else: + func(self, *args, **kwargs) + + return wrapper + + +def InMapForString(func): + def wrapper(self, *args): + if len(args) == 1: + func(self, args[0]) + elif len(args) == 2: + self.Key(args[0]) + func(self, args[1]) + else: + raise ValueError('invalid number of arguments') + + return wrapper + + +class Pool: + """Collection of (data, offset) pairs sorted by data for quick access.""" + + def __init__(self): + self._pool = [] # sorted list of (data, offset) tuples + + def FindOrInsert(self, data, offset): + do = data, offset + index = _BinarySearch(self._pool, do, lambda a, b: a[0] < b[0]) + if index != -1: + _, offset = self._pool[index] + return offset + self._pool.insert(index, do) + return None + + def Clear(self): + self._pool = [] + + @property + def Elements(self): + return [data for data, _ in self._pool] + + +class Builder: + """Helper class to encode structural data into flexbuffers format.""" + + def __init__( + self, + share_strings=False, + share_keys=True, + force_min_bit_width=BitWidth.W8, + ): + self._share_strings = share_strings + self._share_keys = share_keys + self._force_min_bit_width = force_min_bit_width + + self._string_pool = Pool() + self._key_pool = Pool() + + self._finished = False + self._buf = bytearray() + self._stack = [] + + def __len__(self): + return len(self._buf) + + @property + def StringPool(self): + return self._string_pool + + @property + def KeyPool(self): + return self._key_pool + + def Clear(self): + self._string_pool.Clear() + self._key_pool.Clear() + self._finished = False + self._buf = bytearray() + self._stack = [] + + def Finish(self): + """Finishes encoding process and returns underlying buffer.""" + if self._finished: + raise RuntimeError('builder has been already finished') + + # If you hit this exception, you likely have objects that were never + # included in a parent. You need to have exactly one root to finish a + # buffer. Check your Start/End calls are matched, and all objects are inside + # some other object. + if len(self._stack) != 1: + raise RuntimeError('internal stack size must be one') + + value = self._stack[0] + byte_width = self._Align(value.ElemWidth(len(self._buf))) + self._WriteAny(value, byte_width=byte_width) # Root value + self._Write(U, value.StoredPackedType(), byte_width=1) # Root type + self._Write(U, byte_width, byte_width=1) # Root size + + self.finished = True + return self._buf + + def _ReadKey(self, offset): + key = self._buf[offset:] + return key[: key.find(0)] + + def _Align(self, alignment): + byte_width = 1 << alignment + self._buf.extend(b'\x00' * _PaddingBytes(len(self._buf), byte_width)) + return byte_width + + def _Write(self, fmt, value, byte_width): + self._buf.extend(_Pack(fmt, value, byte_width)) + + def _WriteVector(self, fmt, values, byte_width): + self._buf.extend(_PackVector(fmt, values, byte_width)) + + def _WriteOffset(self, offset, byte_width): + relative_offset = len(self._buf) - offset + assert byte_width == 8 or relative_offset < (1 << (8 * byte_width)) + self._Write(U, relative_offset, byte_width) + + def _WriteAny(self, value, byte_width): + fmt = { + Type.NULL: U, + Type.BOOL: U, + Type.INT: I, + Type.UINT: U, + Type.FLOAT: F, + }.get(value.Type) + if fmt: + self._Write(fmt, value.Value, byte_width) + else: + self._WriteOffset(value.Value, byte_width) + + def _WriteBlob(self, data, append_zero, type_): + bit_width = BitWidth.U(len(data)) + byte_width = self._Align(bit_width) + self._Write(U, len(data), byte_width) + loc = len(self._buf) + self._buf.extend(data) + if append_zero: + self._buf.append(0) + self._stack.append(Value(loc, type_, bit_width)) + return loc + + def _WriteScalarVector(self, element_type, byte_width, elements, fixed): + """Writes scalar vector elements to the underlying buffer.""" + bit_width = BitWidth.B(byte_width) + # If you get this exception, you're trying to write a vector with a size + # field that is bigger than the scalars you're trying to write (e.g. a + # byte vector > 255 elements). For such types, write a "blob" instead. + if BitWidth.U(len(elements)) > bit_width: + raise ValueError('too many elements for the given byte_width') + + self._Align(bit_width) + if not fixed: + self._Write(U, len(elements), byte_width) + + loc = len(self._buf) + + fmt = {Type.INT: I, Type.UINT: U, Type.FLOAT: F}.get(element_type) + if not fmt: + raise TypeError('unsupported element_type') + self._WriteVector(fmt, elements, byte_width) + + type_ = Type.ToTypedVector(element_type, len(elements) if fixed else 0) + self._stack.append(Value(loc, type_, bit_width)) + return loc + + def _CreateVector(self, elements, typed, fixed, keys=None): + """Writes vector elements to the underlying buffer.""" + length = len(elements) + + if fixed and not typed: + raise ValueError('fixed vector must be typed') + + # Figure out smallest bit width we can store this vector with. + bit_width = max(self._force_min_bit_width, BitWidth.U(length)) + prefix_elems = 1 # Vector size + if keys: + bit_width = max(bit_width, keys.ElemWidth(len(self._buf))) + prefix_elems += 2 # Offset to the keys vector and its byte width. + + vector_type = Type.KEY + # Check bit widths and types for all elements. + for i, e in enumerate(elements): + bit_width = max(bit_width, e.ElemWidth(len(self._buf), prefix_elems + i)) + + if typed: + if i == 0: + vector_type = e.Type + else: + if vector_type != e.Type: + raise RuntimeError('typed vector elements must be of the same type') + + if fixed and not Type.IsFixedTypedVectorElementType(vector_type): + raise RuntimeError('must be fixed typed vector element type') + + byte_width = self._Align(bit_width) + # Write vector. First the keys width/offset if available, and size. + if keys: + self._WriteOffset(keys.Value, byte_width) + self._Write(U, 1 << keys.MinBitWidth, byte_width) + + if not fixed: + self._Write(U, length, byte_width) + + # Then the actual data. + loc = len(self._buf) + for e in elements: + self._WriteAny(e, byte_width) + + # Then the types. + if not typed: + for e in elements: + self._buf.append(e.StoredPackedType(bit_width)) + + if keys: + type_ = Type.MAP + else: + if typed: + type_ = Type.ToTypedVector(vector_type, length if fixed else 0) + else: + type_ = Type.VECTOR + + return Value(loc, type_, bit_width) + + def _PushIndirect(self, value, type_, bit_width): + byte_width = self._Align(bit_width) + loc = len(self._buf) + fmt = {Type.INDIRECT_INT: I, Type.INDIRECT_UINT: U, Type.INDIRECT_FLOAT: F}[ + type_ + ] + self._Write(fmt, value, byte_width) + self._stack.append(Value(loc, type_, bit_width)) + + @InMapForString + def String(self, value): + """Encodes string value.""" + reset_to = len(self._buf) + encoded = value.encode('utf-8') + loc = self._WriteBlob(encoded, append_zero=True, type_=Type.STRING) + if self._share_strings: + prev_loc = self._string_pool.FindOrInsert(encoded, loc) + if prev_loc is not None: + del self._buf[reset_to:] + self._stack[-1]._value = loc = prev_loc # pylint: disable=protected-access + + return loc + + @InMap + def Blob(self, value): + """Encodes binary blob value. + + Args: + value: A byte/bytearray value to encode + + Returns: + Offset of the encoded value in underlying the byte buffer. + """ + return self._WriteBlob(value, append_zero=False, type_=Type.BLOB) + + def Key(self, value): + """Encodes key value. + + Args: + value: A byte/bytearray/str value to encode. Byte object must not contain + zero bytes. String object must be convertible to ASCII. + + Returns: + Offset of the encoded value in the underlying byte buffer. + """ + if isinstance(value, (bytes, bytearray)): + encoded = value + else: + encoded = value.encode('ascii') + + if 0 in encoded: + raise ValueError('key contains zero byte') + + loc = len(self._buf) + self._buf.extend(encoded) + self._buf.append(0) + if self._share_keys: + prev_loc = self._key_pool.FindOrInsert(encoded, loc) + if prev_loc is not None: + del self._buf[loc:] + loc = prev_loc + + self._stack.append(Value.Key(loc)) + return loc + + def Null(self, key=None): + """Encodes None value.""" + if key: + self.Key(key) + self._stack.append(Value.Null()) + + @InMap + def Bool(self, value): + """Encodes boolean value. + + Args: + value: A boolean value. + """ + self._stack.append(Value.Bool(value)) + + @InMap + def Int(self, value, byte_width=0): + """Encodes signed integer value. + + Args: + value: A signed integer value. + byte_width: Number of bytes to use: 1, 2, 4, or 8. + """ + bit_width = BitWidth.I(value) if byte_width == 0 else BitWidth.B(byte_width) + self._stack.append(Value.Int(value, bit_width)) + + @InMap + def IndirectInt(self, value, byte_width=0): + """Encodes signed integer value indirectly. + + Args: + value: A signed integer value. + byte_width: Number of bytes to use: 1, 2, 4, or 8. + """ + bit_width = BitWidth.I(value) if byte_width == 0 else BitWidth.B(byte_width) + self._PushIndirect(value, Type.INDIRECT_INT, bit_width) + + @InMap + def UInt(self, value, byte_width=0): + """Encodes unsigned integer value. + + Args: + value: An unsigned integer value. + byte_width: Number of bytes to use: 1, 2, 4, or 8. + """ + bit_width = BitWidth.U(value) if byte_width == 0 else BitWidth.B(byte_width) + self._stack.append(Value.UInt(value, bit_width)) + + @InMap + def IndirectUInt(self, value, byte_width=0): + """Encodes unsigned integer value indirectly. + + Args: + value: An unsigned integer value. + byte_width: Number of bytes to use: 1, 2, 4, or 8. + """ + bit_width = BitWidth.U(value) if byte_width == 0 else BitWidth.B(byte_width) + self._PushIndirect(value, Type.INDIRECT_UINT, bit_width) + + @InMap + def Float(self, value, byte_width=0): + """Encodes floating point value. + + Args: + value: A floating point value. + byte_width: Number of bytes to use: 4 or 8. + """ + bit_width = BitWidth.F(value) if byte_width == 0 else BitWidth.B(byte_width) + self._stack.append(Value.Float(value, bit_width)) + + @InMap + def IndirectFloat(self, value, byte_width=0): + """Encodes floating point value indirectly. + + Args: + value: A floating point value. + byte_width: Number of bytes to use: 4 or 8. + """ + bit_width = BitWidth.F(value) if byte_width == 0 else BitWidth.B(byte_width) + self._PushIndirect(value, Type.INDIRECT_FLOAT, bit_width) + + def _StartVector(self): + """Starts vector construction.""" + return len(self._stack) + + def _EndVector(self, start, typed, fixed): + """Finishes vector construction by encodung its elements.""" + vec = self._CreateVector(self._stack[start:], typed, fixed) + del self._stack[start:] + self._stack.append(vec) + return vec.Value + + @contextlib.contextmanager + def Vector(self, key=None): + if key: + self.Key(key) + + try: + start = self._StartVector() + yield self + finally: + self._EndVector(start, typed=False, fixed=False) + + @InMap + def VectorFromElements(self, elements): + """Encodes sequence of any elements as a vector. + + Args: + elements: sequence of elements, they may have different types. + """ + with self.Vector(): + for e in elements: + self.Add(e) + + @contextlib.contextmanager + def TypedVector(self, key=None): + if key: + self.Key(key) + + try: + start = self._StartVector() + yield self + finally: + self._EndVector(start, typed=True, fixed=False) + + @InMap + def TypedVectorFromElements(self, elements, element_type=None): + """Encodes sequence of elements of the same type as typed vector. + + Args: + elements: Sequence of elements, they must be of the same type. + element_type: Suggested element type. Setting it to None means determining + correct value automatically based on the given elements. + """ + if isinstance(elements, array.array): + if elements.typecode == 'f': + self._WriteScalarVector(Type.FLOAT, 4, elements, fixed=False) + elif elements.typecode == 'd': + self._WriteScalarVector(Type.FLOAT, 8, elements, fixed=False) + elif elements.typecode in ('b', 'h', 'i', 'l', 'q'): + self._WriteScalarVector( + Type.INT, elements.itemsize, elements, fixed=False + ) + elif elements.typecode in ('B', 'H', 'I', 'L', 'Q'): + self._WriteScalarVector( + Type.UINT, elements.itemsize, elements, fixed=False + ) + else: + raise ValueError('unsupported array typecode: %s' % elements.typecode) + else: + add = self.Add if element_type is None else self.Adder(element_type) + with self.TypedVector(): + for e in elements: + add(e) + + @InMap + def FixedTypedVectorFromElements( + self, elements, element_type=None, byte_width=0 + ): + """Encodes sequence of elements of the same type as fixed typed vector. + + Args: + elements: Sequence of elements, they must be of the same type. Allowed + types are `Type.INT`, `Type.UINT`, `Type.FLOAT`. Allowed number of + elements are 2, 3, or 4. + element_type: Suggested element type. Setting it to None means determining + correct value automatically based on the given elements. + byte_width: Number of bytes to use per element. For `Type.INT` and + `Type.UINT`: 1, 2, 4, or 8. For `Type.FLOAT`: 4 or 8. Setting it to 0 + means determining correct value automatically based on the given + elements. + """ + if not 2 <= len(elements) <= 4: + raise ValueError('only 2, 3, or 4 elements are supported') + + types = {type(e) for e in elements} + if len(types) != 1: + raise TypeError('all elements must be of the same type') + + (type_,) = types + + if element_type is None: + element_type = {int: Type.INT, float: Type.FLOAT}.get(type_) + if not element_type: + raise TypeError('unsupported element_type: %s' % type_) + + if byte_width == 0: + width = { + Type.UINT: BitWidth.U, + Type.INT: BitWidth.I, + Type.FLOAT: BitWidth.F, + }[element_type] + byte_width = 1 << max(width(e) for e in elements) + + self._WriteScalarVector(element_type, byte_width, elements, fixed=True) + + def _StartMap(self): + """Starts map construction.""" + return len(self._stack) + + def _EndMap(self, start): + """Finishes map construction by encodung its elements.""" + # Interleaved keys and values on the stack. + stack = self._stack[start:] + + if len(stack) % 2 != 0: + raise RuntimeError('must be even number of keys and values') + + for key in stack[::2]: + if key.Type is not Type.KEY: + raise RuntimeError('all map keys must be of %s type' % Type.KEY) + + pairs = zip(stack[::2], stack[1::2]) # [(key, value), ...] + pairs = sorted(pairs, key=lambda pair: self._ReadKey(pair[0].Value)) + + del self._stack[start:] + for pair in pairs: + self._stack.extend(pair) + + keys = self._CreateVector(self._stack[start::2], typed=True, fixed=False) + values = self._CreateVector( + self._stack[start + 1 :: 2], typed=False, fixed=False, keys=keys + ) + + del self._stack[start:] + self._stack.append(values) + return values.Value + + @contextlib.contextmanager + def Map(self, key=None): + if key: + self.Key(key) + + try: + start = self._StartMap() + yield self + finally: + self._EndMap(start) + + def MapFromElements(self, elements): + start = self._StartMap() + for k, v in elements.items(): + self.Key(k) + self.Add(v) + self._EndMap(start) + + def Adder(self, type_): + return { + Type.BOOL: self.Bool, + Type.INT: self.Int, + Type.INDIRECT_INT: self.IndirectInt, + Type.UINT: self.UInt, + Type.INDIRECT_UINT: self.IndirectUInt, + Type.FLOAT: self.Float, + Type.INDIRECT_FLOAT: self.IndirectFloat, + Type.KEY: self.Key, + Type.BLOB: self.Blob, + Type.STRING: self.String, + }[type_] + + @InMapForString + def Add(self, value): + """Encodes value of any supported type.""" + if value is None: + self.Null() + elif isinstance(value, bool): + self.Bool(value) + elif isinstance(value, int): + self.Int(value) + elif isinstance(value, float): + self.Float(value) + elif isinstance(value, str): + self.String(value) + elif isinstance(value, (bytes, bytearray)): + self.Blob(value) + elif isinstance(value, dict): + with self.Map(): + for k, v in value.items(): + self.Key(k) + self.Add(v) + elif isinstance(value, array.array): + self.TypedVectorFromElements(value) + elif _IsIterable(value): + self.VectorFromElements(value) + else: + raise TypeError('unsupported python type: %s' % type(value)) + + @property + def LastValue(self): + return self._stack[-1] + + @InMap + def ReuseValue(self, value): + self._stack.append(value) + + +def GetRoot(buf): + """Returns root `Ref` object for the given buffer.""" + if len(buf) < 3: + raise ValueError('buffer is too small') + byte_width = buf[-1] + return Ref.PackedType( + Buf(buf, -(2 + byte_width)), byte_width, packed_type=buf[-2] + ) + + +def Dumps(obj): + """Returns bytearray with the encoded python object.""" + fbb = Builder() + fbb.Add(obj) + return fbb.Finish() + + +def Loads(buf): + """Returns python object decoded from the buffer.""" + return GetRoot(buf).Value diff --git a/python/flatbuffers/number_types.py b/python/flatbuffers/number_types.py new file mode 100644 index 000000000..e47f66f12 --- /dev/null +++ b/python/flatbuffers/number_types.py @@ -0,0 +1,182 @@ +# Copyright 2014 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import struct + +from . import packer +from .compat import NumpyRequiredForThisFeature, import_numpy + +np = import_numpy() + +# For reference, see: +# https://docs.python.org/2/library/ctypes.html#ctypes-fundamental-data-types-2 + +# These classes could be collections.namedtuple instances, but those are new +# in 2.6 and we want to work towards 2.5 compatability. + + +class BoolFlags(object): + bytewidth = 1 + min_val = False + max_val = True + py_type = bool + name = "bool" + packer_type = packer.boolean + + +class Uint8Flags(object): + bytewidth = 1 + min_val = 0 + max_val = (2**8) - 1 + py_type = int + name = "uint8" + packer_type = packer.uint8 + + +class Uint16Flags(object): + bytewidth = 2 + min_val = 0 + max_val = (2**16) - 1 + py_type = int + name = "uint16" + packer_type = packer.uint16 + + +class Uint32Flags(object): + bytewidth = 4 + min_val = 0 + max_val = (2**32) - 1 + py_type = int + name = "uint32" + packer_type = packer.uint32 + + +class Uint64Flags(object): + bytewidth = 8 + min_val = 0 + max_val = (2**64) - 1 + py_type = int + name = "uint64" + packer_type = packer.uint64 + + +class Int8Flags(object): + bytewidth = 1 + min_val = -(2**7) + max_val = (2**7) - 1 + py_type = int + name = "int8" + packer_type = packer.int8 + + +class Int16Flags(object): + bytewidth = 2 + min_val = -(2**15) + max_val = (2**15) - 1 + py_type = int + name = "int16" + packer_type = packer.int16 + + +class Int32Flags(object): + bytewidth = 4 + min_val = -(2**31) + max_val = (2**31) - 1 + py_type = int + name = "int32" + packer_type = packer.int32 + + +class Int64Flags(object): + bytewidth = 8 + min_val = -(2**63) + max_val = (2**63) - 1 + py_type = int + name = "int64" + packer_type = packer.int64 + + +class Float32Flags(object): + bytewidth = 4 + min_val = None + max_val = None + py_type = float + name = "float32" + packer_type = packer.float32 + + +class Float64Flags(object): + bytewidth = 8 + min_val = None + max_val = None + py_type = float + name = "float64" + packer_type = packer.float64 + + +class SOffsetTFlags(Int32Flags): + pass + + +class UOffsetTFlags(Uint32Flags): + pass + + +class VOffsetTFlags(Uint16Flags): + pass + + +def valid_number(n, flags): + if flags.min_val is None and flags.max_val is None: + return True + return flags.min_val <= n <= flags.max_val + + +def enforce_number(n, flags): + if flags.min_val is None and flags.max_val is None: + return + if not flags.min_val <= n <= flags.max_val: + raise TypeError("bad number %s for type %s" % (str(n), flags.name)) + + +def float32_to_uint32(n): + packed = struct.pack("<1f", n) + (converted,) = struct.unpack("<1L", packed) + return converted + + +def uint32_to_float32(n): + packed = struct.pack("<1L", n) + (unpacked,) = struct.unpack("<1f", packed) + return unpacked + + +def float64_to_uint64(n): + packed = struct.pack("<1d", n) + (converted,) = struct.unpack("<1Q", packed) + return converted + + +def uint64_to_float64(n): + packed = struct.pack("<1Q", n) + (unpacked,) = struct.unpack("<1d", packed) + return unpacked + + +def to_numpy_type(number_type): + if np is not None: + return np.dtype(number_type.name).newbyteorder("<") + else: + raise NumpyRequiredForThisFeature("Numpy was not found.") diff --git a/python/flatbuffers/packer.py b/python/flatbuffers/packer.py new file mode 100644 index 000000000..0296e52b3 --- /dev/null +++ b/python/flatbuffers/packer.py @@ -0,0 +1,41 @@ +# Copyright 2016 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Provide pre-compiled struct packers for encoding and decoding. + +See: https://docs.python.org/2/library/struct.html#format-characters +""" + +import struct +from . import compat + + +boolean = struct.Struct(compat.struct_bool_decl) + +uint8 = struct.Struct(" +Maintainer-Email: AJ Friend +License: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Programming Language :: C +Classifier: Programming Language :: Cython +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: POSIX :: Linux +Classifier: Operating System :: Microsoft :: Windows +Classifier: Topic :: Scientific/Engineering :: GIS +Project-URL: Homepage, https://github.com/uber/h3-py +Project-URL: Documentation, https://uber.github.io/h3-py/ +Project-URL: Bug Tracker, https://github.com/uber/h3-py/issues +Project-URL: Discussions, https://github.com/uber/h3-py/discussions +Project-URL: Changelog, https://uber.github.io/h3-py/_changelog.html +Requires-Python: >=3.8 +Provides-Extra: numpy +Requires-Dist: numpy; extra == "numpy" +Provides-Extra: test +Requires-Dist: pytest; extra == "test" +Requires-Dist: pytest-cov; extra == "test" +Requires-Dist: ruff; extra == "test" +Requires-Dist: numpy; extra == "test" +Provides-Extra: all +Requires-Dist: h3[test]; extra == "all" +Requires-Dist: jupyter-book<2; extra == "all" +Requires-Dist: sphinx>=7.3.3; extra == "all" +Requires-Dist: jupyterlab; extra == "all" +Requires-Dist: jupyterlab-geojson; extra == "all" +Requires-Dist: geopandas; extra == "all" +Requires-Dist: geodatasets; extra == "all" +Requires-Dist: matplotlib; extra == "all" +Requires-Dist: contextily; extra == "all" +Requires-Dist: cartopy; extra == "all" +Requires-Dist: geoviews; extra == "all" +Description-Content-Type: text/markdown + +H3 Logo + +# **h3-py**: Uber's H3 Hexagonal Hierarchical Geospatial Indexing System in Python + +[![PyPI version](https://badge.fury.io/py/h3.svg)](https://badge.fury.io/py/h3) +[![PyPI downloads](https://img.shields.io/pypi/dm/h3.svg)](https://pypistats.org/packages/h3) +[![conda](https://img.shields.io/conda/vn/conda-forge/h3-py.svg)](https://anaconda.org/conda-forge/h3-py) +[![version](https://img.shields.io/badge/h3-v4.4.1-blue.svg)](https://github.com/uber/h3/releases/tag/v4.4.1) +[![version](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/uber/h3-py/blob/master/LICENSE) + +[![Tests](https://github.com/uber/h3-py/workflows/tests/badge.svg)](https://github.com/uber/h3-py/actions) +[![Coverage 100%](https://img.shields.io/badge/coverage-100%25-green.svg)](https://github.com/uber/h3-py/blob/master/.github/workflows/lint_and_coverage.yml#L31) + + +Python bindings for the [H3 core library](https://h3geo.org/). + +- Documentation: [uber.github.io/h3-py](https://uber.github.io/h3-py) +- GitHub repo: [github.com/uber/h3-py](https://github.com/uber/h3-py) + +## Installation + +From [PyPI](https://pypi.org/project/h3/): + +```console +pip install h3 +``` + +From [conda](https://github.com/conda-forge/h3-py-feedstock): + +```console +conda config --add channels conda-forge +conda install h3-py +``` + + +## Usage + +```python +>>> import h3 +>>> lat, lng = 37.769377, -122.388903 +>>> resolution = 9 +>>> h3.latlng_to_cell(lat, lng, resolution) +'89283082e73ffff' +``` + + +## APIs + +[api_comparison]: https://uber.github.io/h3-py/api_comparison +[api_quick]: https://uber.github.io/h3-py/api_quick + +We provide [multiple APIs][api_comparison] in `h3-py`. + +- All APIs have the same set of functions; + see the [API reference][api_quick]. +- The APIs differ only in their input/output formats; + see the [API comparison page][api_comparison]. + + +## Example gallery + +Browse [a collection of example notebooks](https://github.com/uber/h3-py-notebooks), +and if you have examples or visualizations of your own, please feel free +to contribute! + +[walkthrough]: https://nbviewer.jupyter.org/github/uber/h3-py-notebooks/blob/master/notebooks/usage.ipynb + +We also have an introductory [walkthrough of the API][walkthrough]. + + +## Versioning + + + +`h3-py` wraps the [H3 core library](https://github.com/uber/h3), +which is written in C. +The C and Python projects each employ +[semantic versioning](https://semver.org/), +where versions take the form `X.Y.Z`. + +The `h3-py` version string is guaranteed to match the C library string +in both *major* and *minor* numbers (`X.Y`), but may differ on the +*patch* (`Z`) number. +This convention provides users with information on breaking changes and +feature additions, while providing downstream bindings (like this one!) +with the versioning freedom to fix bugs. + +Use `h3.versions()` to see the version numbers for both +`h3-py` and the C library. For example, + +```python +>>> import h3 +>>> h3.versions() +{'c': '4.1.0', 'python': '4.1.1'} +``` diff --git a/python/h3-4.4.1.dist-info/RECORD b/python/h3-4.4.1.dist-info/RECORD new file mode 100644 index 000000000..7d37d83cb --- /dev/null +++ b/python/h3-4.4.1.dist-info/RECORD @@ -0,0 +1,64 @@ +h3-4.4.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +h3-4.4.1.dist-info/METADATA,sha256=ATVMOPAsvFcr92Ri2lvkmFWVXVCfOaG2xBmS_UjqHGM,18461 +h3-4.4.1.dist-info/RECORD,, +h3-4.4.1.dist-info/WHEEL,sha256=POTktF6yzl_XC41kalB3YizQEERCxgY4yHR5kUvp0U8,195 +h3-4.4.1.dist-info/licenses/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358 +h3/CMakeLists.txt,sha256=8zQ0011t8bmsJw_GiKOFbkTZnquBh6ESCMZyLrm9u_U,22 +h3/__init__.py,sha256=81gxWFGdRcNCG8DCGNbrtToHcN6fWDqruJcCTXhlpO4,676 +h3/__pycache__/__init__.cpython-312.pyc,, +h3/__pycache__/_h3shape.cpython-312.pyc,, +h3/__pycache__/_version.cpython-312.pyc,, +h3/_cy/CMakeLists.txt,sha256=-h6Dk_V8VYydmDht9zCOv7wbSOl_G4MtlajIrj2D8hE,1528 +h3/_cy/__init__.py,sha256=9NbeEjytaOV_ooX_p-gWmiAd2GZGCSKq9UbZ3HG_9eM,2702 +h3/_cy/__pycache__/__init__.cpython-312.pyc,, +h3/_cy/cells.cpython-312-x86_64-linux-gnu.so,sha256=2AVsBl5jf9FnmS-yvF-dza_GfX--r3FTG3yd4NQt8aY,370960 +h3/_cy/cells.pxd,sha256=Lq7HHO-5_fwctjksq8vzQ6SVjnT1S8uz3gRa9h_3Vck,1492 +h3/_cy/cells.pyx,sha256=CVtowYXpJnX42YmXffPHsZ-wN0CVAw0VL20N73Mcbdk,10263 +h3/_cy/edges.cpython-312-x86_64-linux-gnu.so,sha256=wANJwZGI3CkO6sC-lOxqJ3imQhQH--Uanqj9f-HcQx8,292208 +h3/_cy/edges.pxd,sha256=_zUOkV8aeWhk_xjicSWBDHNOp3vMEeg8o2Da06iYn38,561 +h3/_cy/edges.pyx,sha256=1ECoK-OP3_BplwLZqwqNwItMLNIja6kVMzMeo7maHz8,2487 +h3/_cy/error_system.cpython-312-x86_64-linux-gnu.so,sha256=etni584ESPUPREZmtFdDOMsDnwY6SyelrWZLzUtKwqY,104208 +h3/_cy/error_system.pxd,sha256=hhHh2IFfEQs0tWGk7Dy8eYX9q92MZWuFyFvc3vgOfPU,186 +h3/_cy/error_system.pyx,sha256=Vu2iRR3iSeNjWVhGaMbeEXQQnBOLFWCU0WahFXh7X9Y,7863 +h3/_cy/h3api.h,sha256=hCtCu2HtDzzfFthdjHSR-1kZSZoJzqEW8foGRSIAn4I,27825 +h3/_cy/h3lib.pxd,sha256=clbWU8D0EhtxLNF_zvyKi37bF09VhD5dEvivJqyMHOk,7300 +h3/_cy/latlng.cpython-312-x86_64-linux-gnu.so,sha256=NaGNOxZZnW73JPgq4x_V2qLSwWQZz2xsizKjp2dCtDE,361048 +h3/_cy/latlng.pxd,sha256=WjOjY4jH_2lK6hnaVXCusqLg2DvvZ4rvAJyXeCQd2uk,266 +h3/_cy/latlng.pyx,sha256=FW8L4LEr6l7qhsRL28mFakVLHnfqSCKQDQ6Ued-nLks,8370 +h3/_cy/memory.cpython-312-x86_64-linux-gnu.so,sha256=EHSVOPd7PQRXujCOKGZgW6ky22Q_aIcyRGM0udRA6Rk,217488 +h3/_cy/memory.pxd,sha256=jXGN68hdcPUyA_yFCesZxCksHwmHMO4Ah-I9ZwYBvXs,236 +h3/_cy/memory.pyx,sha256=KZo65H3rXn290iH05ZNkyt78mKEGIaVjzmgYdOon_PU,7103 +h3/_cy/to_multipoly.cpython-312-x86_64-linux-gnu.so,sha256=uZDy4tiE3lQf2cEJRo6Qr8RgYeL7uCWZlNY62MXJxfo,287856 +h3/_cy/to_multipoly.pyx,sha256=bpaP3rGl5mtWTp1aH0ksL-gMi_tobSxjnZvxX6B2R4o,1369 +h3/_cy/util.cpython-312-x86_64-linux-gnu.so,sha256=046nqhFwz8PqI0Wu6gX25Y-xAvw7jkjGo4Yuh-mTtDk,157392 +h3/_cy/util.pxd,sha256=xUjm1N9iOV2rGSwBeWNSCbvzqBNvmTzykAURj-aaBOU,372 +h3/_cy/util.pyx,sha256=LiCAuRFgVwi01HTd5FnglwiGUYPXcIjurFm9u6sU6Dc,2562 +h3/_cy/vertex.cpython-312-x86_64-linux-gnu.so,sha256=ZXo6XjLbLEyCB89fmrWw3VQcsx0OsAF1cRaWgdQ3-OM,283824 +h3/_cy/vertex.pxd,sha256=lnRECjEFpohFr7iB6d2UrTN-s37UzXnSpaP_UV5866I,229 +h3/_cy/vertex.pyx,sha256=_26ur2q3CZzb0BGQ85Lvbjq9dRWscvjAsKlgmCAqVHc,910 +h3/_h3shape.py,sha256=hhsBJyuy5MqDhG_BixzbCTOfQgaO6Gf2olFEYd6iLKE,8559 +h3/_version.py,sha256=Tjq_BBPmWroGgfnrf7u9TpAWss9iA5PnWSv8kkZkJY8,88 +h3/api/__init__.py,sha256=X6P438-1N_2jWlFKNZ8q_jUop7ICRhwwa4I0RuxtQIQ,114 +h3/api/__pycache__/__init__.cpython-312.pyc,, +h3/api/basic_int/__init__.py,sha256=VE2NBYUUmTcx3tinyXpZPd2p9dE4N3V4Qg6Cr7k_X1I,28858 +h3/api/basic_int/__pycache__/__init__.cpython-312.pyc,, +h3/api/basic_int/__pycache__/_convert.cpython-312.pyc,, +h3/api/basic_int/_convert.py,sha256=GmVFFTT-mMHJ_Newx-6aF8TAtKBTrXglMx5RNV9w0Nk,187 +h3/api/basic_str/__init__.py,sha256=VE2NBYUUmTcx3tinyXpZPd2p9dE4N3V4Qg6Cr7k_X1I,28858 +h3/api/basic_str/__pycache__/__init__.cpython-312.pyc,, +h3/api/basic_str/__pycache__/_convert.cpython-312.pyc,, +h3/api/basic_str/_convert.py,sha256=9B7uHdbUJhDb0GUEdSg-dUQpNCmvPviK6_DX99YS5UE,256 +h3/api/memview_int/__init__.py,sha256=VE2NBYUUmTcx3tinyXpZPd2p9dE4N3V4Qg6Cr7k_X1I,28858 +h3/api/memview_int/__pycache__/__init__.cpython-312.pyc,, +h3/api/memview_int/__pycache__/_convert.cpython-312.pyc,, +h3/api/memview_int/_convert.py,sha256=ef5Omw0GJsUlR8r66O1p8f9IEEhCeTdDjlvN2A0urjA,105 +h3/api/numpy_int/__init__.py,sha256=VE2NBYUUmTcx3tinyXpZPd2p9dE4N3V4Qg6Cr7k_X1I,28858 +h3/api/numpy_int/__pycache__/__init__.cpython-312.pyc,, +h3/api/numpy_int/__pycache__/_convert.cpython-312.pyc,, +h3/api/numpy_int/_convert.py,sha256=dhZLocPjucl6Uu872k1tAcuWXBRT29oUG8qFMoleulE,288 +include/h3/h3api.h,sha256=hCtCu2HtDzzfFthdjHSR-1kZSZoJzqEW8foGRSIAn4I,27825 +lib64/cmake/h3/h3Config.cmake,sha256=QwISc49jy-ZH9Ba1YCQnick54sapi_pZDEJjDJyMrb4,959 +lib64/cmake/h3/h3ConfigVersion.cmake,sha256=qL3-vOniX-s38C9sRHCf-Dy4FUs6km6nGpLZwDbAV34,2762 +lib64/cmake/h3/h3Targets-release.cmake,sha256=pnJffyu5LDg0GeSMTTwBoVLamlDKo8XrjrCEyN7aP0w,797 +lib64/cmake/h3/h3Targets.cmake,sha256=Q3aejXEM4BFb-onC111Auk4lyl3cwmHzf4JIyc4Gny8,4217 +lib64/libh3.a,sha256=1zboO-LUyxmYhN4RXDquGLb_4qi3TAq-r1XL4cu4KVc,174168 diff --git a/python/h3-4.4.1.dist-info/WHEEL b/python/h3-4.4.1.dist-info/WHEEL new file mode 100644 index 000000000..492940aee --- /dev/null +++ b/python/h3-4.4.1.dist-info/WHEEL @@ -0,0 +1,7 @@ +Wheel-Version: 1.0 +Generator: scikit-build-core 0.11.6 +Root-Is-Purelib: false +Tag: cp312-cp312-manylinux_2_17_x86_64 +Tag: cp312-cp312-manylinux2014_x86_64 +Tag: cp312-cp312-manylinux_2_28_x86_64 + diff --git a/python/h3-4.4.1.dist-info/licenses/LICENSE b/python/h3-4.4.1.dist-info/licenses/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/python/h3-4.4.1.dist-info/licenses/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/python/h3/CMakeLists.txt b/python/h3/CMakeLists.txt new file mode 100644 index 000000000..9d48e6671 --- /dev/null +++ b/python/h3/CMakeLists.txt @@ -0,0 +1 @@ +add_subdirectory(_cy) diff --git a/python/h3/__init__.py b/python/h3/__init__.py new file mode 100644 index 000000000..1e4f4b8d3 --- /dev/null +++ b/python/h3/__init__.py @@ -0,0 +1,33 @@ +# flake8: noqa + +from .api.basic_str import * +from ._version import __version__ + +from ._cy import ( + UnknownH3ErrorCode, + H3BaseException, + + H3GridNavigationError, + H3MemoryError, + H3ValueError, + + H3FailedError, + H3DomainError, + H3LatLngDomainError, + H3ResDomainError, + H3CellInvalidError, + H3DirEdgeInvalidError, + H3UndirEdgeInvalidError, + H3VertexInvalidError, + H3PentagonError, + H3DuplicateInputError, + H3NotNeighborsError, + H3ResMismatchError, + H3MemoryAllocError, + H3MemoryBoundsError, + H3OptionInvalidError, + H3IndexInvalidError, + H3BaseCellDomainError, + H3DigitDomainError, + H3DeletedDigitError, +) diff --git a/python/h3/__pycache__/__init__.cpython-312.pyc b/python/h3/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..f37f07ba6 Binary files /dev/null and b/python/h3/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/h3/__pycache__/_h3shape.cpython-312.pyc b/python/h3/__pycache__/_h3shape.cpython-312.pyc new file mode 100644 index 000000000..f20a59ff2 Binary files /dev/null and b/python/h3/__pycache__/_h3shape.cpython-312.pyc differ diff --git a/python/h3/__pycache__/_version.cpython-312.pyc b/python/h3/__pycache__/_version.cpython-312.pyc new file mode 100644 index 000000000..f1a073f91 Binary files /dev/null and b/python/h3/__pycache__/_version.cpython-312.pyc differ diff --git a/python/h3/_cy/CMakeLists.txt b/python/h3/_cy/CMakeLists.txt new file mode 100644 index 000000000..2cb2b6f3f --- /dev/null +++ b/python/h3/_cy/CMakeLists.txt @@ -0,0 +1,53 @@ +list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}) + +include_directories(${CMAKE_CURRENT_SOURCE_DIR}) + +macro(add_cython_file filename) + add_custom_command( + OUTPUT "${filename}.c" + COMMENT + "Making ${CMAKE_CURRENT_BINARY_DIR}/${filename}.c from ${CMAKE_CURRENT_SOURCE_DIR}/${filename}.pyx" + COMMAND Python::Interpreter -m cython + "${CMAKE_CURRENT_SOURCE_DIR}/${filename}.pyx" --output-file "${filename}.c" -I ${CMAKE_CURRENT_SOURCE_DIR} + DEPENDS "${filename}.pyx" + VERBATIM) + + python_add_library(${filename} MODULE "${filename}.c" WITH_SOABI) + + set_property(TARGET ${filename} PROPERTY C_STANDARD 99) + target_link_libraries(${filename} PRIVATE h3) + install(TARGETS ${filename} LIBRARY DESTINATION ${SKBUILD_PROJECT_NAME}/_cy) +endmacro() + +# GLOB pattern is recommended against +# https://cmake.org/cmake/help/v3.14/command/file.html?highlight=file#filesystem +add_cython_file(cells) +add_cython_file(edges) +add_cython_file(error_system) +add_cython_file(latlng) +add_cython_file(memory) +add_cython_file(vertex) + +add_cython_file(to_multipoly) +add_cython_file(util) + +# Include pyx and pxd files in distribution for use by Cython API +install( + FILES + cells.pxd + cells.pyx + edges.pxd + edges.pyx + error_system.pyx + h3lib.pxd + latlng.pxd + latlng.pyx + memory.pxd + memory.pyx + util.pxd + util.pyx + vertex.pxd + vertex.pyx + DESTINATION + ${SKBUILD_PROJECT_NAME}/_cy +) diff --git a/python/h3/_cy/__init__.py b/python/h3/_cy/__init__.py new file mode 100644 index 000000000..70fa087c9 --- /dev/null +++ b/python/h3/_cy/__init__.py @@ -0,0 +1,122 @@ +# flake8: noqa + +""" +This module should serve as the interface between the C/Cython code and +the Python code. That is, it is an internal API. +This module should import all the Cython functions we +intend to expose to be used in pure Python code, and each of the H3-py +APIs should *only* reference functions and symbols listed here. + +These functions should handle input validation, guard against the +possibility of segfaults, raise appropriate errors, and handle memory +management. The API wrapping code around this should focus on the cosmetic +function interface and input conversion (string to int, for instance). +""" + +from .cells import ( + is_valid_index, + is_valid_cell, + is_pentagon, + get_base_cell_number, + get_resolution, + get_index_digit, + construct_cell, + cell_to_parent, + grid_distance, + grid_disk, + grid_ring, + cell_to_children_size, + cell_to_children, + cell_to_child_pos, + child_pos_to_cell, + compact_cells, + uncompact_cells, + get_num_cells, + average_hexagon_area, + cell_area, + grid_path_cells, + is_res_class_iii, + get_pentagons, + get_res0_cells, + cell_to_center_child, + get_icosahedron_faces, + cell_to_local_ij, + local_ij_to_cell, +) + +from .edges import ( + are_neighbor_cells, + cells_to_directed_edge, + is_valid_directed_edge, + get_directed_edge_origin, + get_directed_edge_destination, + directed_edge_to_cells, + origin_to_directed_edges, + average_hexagon_edge_length, + edge_length, +) + +from .latlng import ( + latlng_to_cell, + cell_to_latlng, + polygon_to_cells, + polygons_to_cells, + polygon_to_cells_experimental, + polygons_to_cells_experimental, + cell_to_boundary, + directed_edge_to_boundary, + great_circle_distance, +) + +from .vertex import ( + cell_to_vertex, + cell_to_vertexes, + vertex_to_latlng, + is_valid_vertex, +) + +from .to_multipoly import ( + cells_to_multi_polygon +) + +from .util import ( + c_version, + str_to_int, + int_to_str, +) + +from .memory import ( + iter_to_mv, +) + +from .error_system import ( + UnknownH3ErrorCode, + H3BaseException, + + H3GridNavigationError, + H3MemoryError, + H3ValueError, + + H3FailedError, + H3DomainError, + H3LatLngDomainError, + H3ResDomainError, + H3CellInvalidError, + H3DirEdgeInvalidError, + H3UndirEdgeInvalidError, + H3VertexInvalidError, + H3PentagonError, + H3DuplicateInputError, + H3NotNeighborsError, + H3ResMismatchError, + H3MemoryAllocError, + H3MemoryBoundsError, + H3OptionInvalidError, + H3IndexInvalidError, + H3BaseCellDomainError, + H3DigitDomainError, + H3DeletedDigitError, + + get_H3_ERROR_END, + error_code_to_exception, +) diff --git a/python/h3/_cy/__pycache__/__init__.cpython-312.pyc b/python/h3/_cy/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..65cbf065e Binary files /dev/null and b/python/h3/_cy/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/h3/_cy/cells.cpython-312-x86_64-linux-gnu.so b/python/h3/_cy/cells.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 000000000..c6d33f6dc Binary files /dev/null and b/python/h3/_cy/cells.cpython-312-x86_64-linux-gnu.so differ diff --git a/python/h3/_cy/cells.pxd b/python/h3/_cy/cells.pxd new file mode 100644 index 000000000..1bc31b84b --- /dev/null +++ b/python/h3/_cy/cells.pxd @@ -0,0 +1,30 @@ +from .h3lib cimport bool, int64_t, H3int + +cpdef bool is_valid_index(H3int h) +cpdef bool is_valid_cell(H3int h) +cpdef bool is_pentagon(H3int h) +cpdef int get_base_cell_number(H3int h) except -1 +cpdef int get_resolution(H3int h) except -1 +cpdef int get_index_digit(H3int h, int res) except -1 +cpdef H3int construct_cell(int baseCellNumber, const int[:] digits) except 0 +cpdef int grid_distance(H3int h1, H3int h2) except -1 +cpdef H3int[:] grid_disk(H3int h, int k) +cpdef H3int[:] grid_ring(H3int h, int k) +cpdef H3int cell_to_parent(H3int h, res=*) except 0 +cpdef int64_t cell_to_children_size(H3int h, res=*) except -1 +cpdef H3int[:] cell_to_children(H3int h, res=*) +cpdef H3int cell_to_center_child(H3int h, res=*) except 0 +cpdef int64_t cell_to_child_pos(H3int child, int parent_res) except -1 +cpdef H3int child_pos_to_cell(H3int parent, int child_res, int64_t child_pos) except 0 +cpdef H3int[:] compact_cells(const H3int[:] hu) +cpdef H3int[:] uncompact_cells(const H3int[:] hc, int res) +cpdef int64_t get_num_cells(int resolution) except -1 +cpdef double average_hexagon_area(int resolution, unit=*) except -1 +cpdef double cell_area(H3int h, unit=*) except -1 +cpdef H3int[:] grid_path_cells(H3int start, H3int end) +cpdef bool is_res_class_iii(H3int h) +cpdef H3int[:] get_pentagons(int res) +cpdef H3int[:] get_res0_cells() +cpdef get_icosahedron_faces(H3int h) +cpdef (int, int) cell_to_local_ij(H3int origin, H3int h) except * +cpdef H3int local_ij_to_cell(H3int origin, int i, int j) except 0 diff --git a/python/h3/_cy/cells.pyx b/python/h3/_cy/cells.pyx new file mode 100644 index 000000000..78d256130 --- /dev/null +++ b/python/h3/_cy/cells.pyx @@ -0,0 +1,455 @@ +cimport h3lib +from .h3lib cimport bool, int64_t, H3int, H3ErrorCodes + +from .util cimport ( + check_cell, + check_index, + check_distance, +) + +from .error_system cimport ( + H3Error, + check_for_error, + check_for_error_msg, +) + +from .memory cimport ( + H3MemoryManager, + int_mv, +) + +# todo: add notes about Cython exception handling + +cpdef bool is_valid_index(H3int h): + """Validates an H3 index (cell, vertex, or directed edge). + + Returns + ------- + boolean + """ + return h3lib.isValidIndex(h) == 1 + +# bool is a python type, so we don't need the except clause +cpdef bool is_valid_cell(H3int h): + """Validates an H3 cell (hexagon or pentagon) + + Returns + ------- + boolean + """ + return h3lib.isValidCell(h) == 1 + + +cpdef bool is_pentagon(H3int h): + return h3lib.isPentagon(h) == 1 + + +cpdef int get_base_cell_number(H3int h) except -1: + check_cell(h) + + return h3lib.getBaseCellNumber(h) + + +cpdef int get_resolution(H3int h) except -1: + """Returns the resolution of an H3 Index + 0--15 + """ + check_cell(h) + + return h3lib.getResolution(h) + +cpdef int get_index_digit(H3int h, int res) except -1: + cdef: + int digit + + check_index(h) + + check_for_error( + h3lib.getIndexDigit(h, res, &digit) + ) + + return digit + +cpdef H3int construct_cell(int base_cell_number, const int[:] digits) except 0: + cdef: + H3int out + int res = len(digits) + H3Error err + + if res > 0: + err = h3lib.constructCell(res, base_cell_number, &digits[0], &out) + else: + err = h3lib.constructCell(res, base_cell_number, NULL, &out) + + check_for_error(err) + + return out + + +cpdef int grid_distance(H3int h1, H3int h2) except -1: + """ Compute the grid distance between two cells + """ + cdef: + int64_t distance + + check_cell(h1) + check_cell(h2) + + check_for_error( + h3lib.gridDistance(h1, h2, &distance) + ) + + return distance + +cpdef H3int[:] grid_disk(H3int h, int k): + """ Return cells at grid distance `<= k` from `h`. + """ + cdef: + int64_t n + + check_cell(h) + check_distance(k) + + check_for_error( + h3lib.maxGridDiskSize(k, &n) + ) + + hmm = H3MemoryManager(n) + check_for_error( + h3lib.gridDisk(h, k, hmm.ptr) + ) + mv = hmm.to_mv() + + return mv + + +cpdef H3int[:] grid_ring(H3int h, int k): + """ Return cells at grid distance `== k` from `h`. + Collection is "hollow" for k >= 1. + """ + check_cell(h) + check_distance(k) + + n = 6*k if k > 0 else 1 + hmm = H3MemoryManager(n) + check_for_error( + h3lib.gridRing(h, k, hmm.ptr) + ) + mv = hmm.to_mv() + + return mv + + +cpdef H3int cell_to_parent(H3int h, res=None) except 0: + cdef: + H3int parent + + check_cell(h) + if res is None: + res = get_resolution(h) - 1 + + err = h3lib.cellToParent(h, res, &parent) + if err: + msg = 'Invalid parent resolution {} for cell {}.' + msg = msg.format(res, hex(h)) + check_for_error_msg(err, msg) + + return parent + + +cpdef int64_t cell_to_children_size(H3int h, res=None) except -1: + cdef: + int64_t n + + check_cell(h) + if res is None: + res = get_resolution(h) + 1 + + err = h3lib.cellToChildrenSize(h, res, &n) + if err: + msg = 'Invalid child resolution {} for cell {}.' + msg = msg.format(res, hex(h)) + check_for_error_msg(err, msg) + + return n + + +cpdef H3int[:] cell_to_children(H3int h, res=None): + check_cell(h) + if res is None: + res = get_resolution(h) + 1 + + n = cell_to_children_size(h, res) + + hmm = H3MemoryManager(n) + check_for_error( + h3lib.cellToChildren(h, res, hmm.ptr) + ) + mv = hmm.to_mv() + + return mv + + + +cpdef H3int cell_to_center_child(H3int h, res=None) except 0: + cdef: + H3int child + + check_cell(h) + if res is None: + res = get_resolution(h) + 1 + + err = h3lib.cellToCenterChild(h, res, &child) + if err: + msg = 'Invalid child resolution {} for cell {}.' + msg = msg.format(res, hex(h)) + check_for_error_msg(err, msg) + + return child + + +cpdef int64_t cell_to_child_pos(H3int child, int parent_res) except -1: + cdef: + int64_t child_pos + + check_cell(child) + err = h3lib.cellToChildPos(child, parent_res, &child_pos) + if err: + msg = "Couldn't find child pos of cell {} at res {}." + msg = msg.format(hex(child), parent_res) + check_for_error_msg(err, msg) + + return child_pos + + +cpdef H3int child_pos_to_cell(H3int parent, int child_res, int64_t child_pos) except 0: + cdef: + H3int child + + check_cell(parent) + err = h3lib.childPosToCell(child_pos, parent, child_res, &child) + if err: + msg = "Couldn't find child with pos {} at res {} from parent {}." + msg = msg.format(child_pos, child_res, hex(parent)) + check_for_error_msg(err, msg) + + return child + + +cpdef H3int[:] compact_cells(const H3int[:] hu): + # todo: fix this with my own Cython object "wrapper" class? + # everything has a .ptr interface? + # todo: the Clib can handle 0-len arrays because it **avoids** + # dereferencing the pointer, but Cython's syntax of + # `&hu[0]` **requires** a dereference. For Cython, checking for array + # length of zero and returning early seems like the easiest solution. + # note: open to better ideas! + + if len(hu) == 0: + return H3MemoryManager(0).to_mv() + + for h in hu: ## todo: should we have an array version? would that be faster? + check_cell(h) + + cdef size_t n = len(hu) + hmm = H3MemoryManager(n) + check_for_error( + h3lib.compactCells(&hu[0], hmm.ptr, n) + ) + mv = hmm.to_mv() + + return mv + + +# todo: https://stackoverflow.com/questions/50684977/cython-exception-type-for-a-function-returning-a-typed-memoryview +# apparently, memoryviews are python objects, so we don't need to do the except clause +cpdef H3int[:] uncompact_cells(const H3int[:] hc, int res): + # todo: the Clib can handle 0-len arrays because it **avoids** + # dereferencing the pointer, but Cython's syntax of + # `&hc[0]` **requires** a dereference. For Cython, checking for array + # length of zero and returning early seems like the easiest solution. + # note: open to better ideas! + cdef: + int64_t n + + + if len(hc) == 0: + return H3MemoryManager(0).to_mv() + + for h in hc: + check_cell(h) + + check_for_error( + h3lib.uncompactCellsSize(&hc[0], len(hc), res, &n) + ) + + hmm = H3MemoryManager(n) + check_for_error( + h3lib.uncompactCells( + &hc[0], # todo: symmetry here with the wrapper object might be nice. hc.ptr / hc.n + len(hc), + hmm.ptr, + hmm.n, + res + ) + ) + + mv = hmm.to_mv() + + return mv + + +cpdef int64_t get_num_cells(int resolution) except -1: + cdef: + int64_t num_cells + + check_for_error( + h3lib.getNumCells(resolution, &num_cells) + ) + + return num_cells + + +cpdef double average_hexagon_area(int resolution, unit='km^2') except -1: + cdef: + double area + + check_for_error( + h3lib.getHexagonAreaAvgKm2(resolution, &area) + ) + + # todo: multiple units + convert = { + 'km^2': 1.0, + 'm^2': 1000*1000.0 + } + + try: + area *= convert[unit] + except: + raise ValueError('Unknown unit: {}'.format(unit)) + + return area + + +cpdef double cell_area(H3int h, unit='km^2') except -1: + cdef: + double area + + if unit == 'rads^2': + err = h3lib.cellAreaRads2(h, &area) + elif unit == 'km^2': + err = h3lib.cellAreaKm2(h, &area) + elif unit == 'm^2': + err = h3lib.cellAreaM2(h, &area) + else: + raise ValueError('Unknown unit: {}'.format(unit)) + + check_for_error(err) + + return area + + +cdef _could_not_find_line(err, start, end): + msg = "Couldn't find line between cells {} and {}" + msg = msg.format(hex(start), hex(end)) + + check_for_error_msg(err, msg) + +cpdef H3int[:] grid_path_cells(H3int start, H3int end): + cdef: + int64_t n + + # todo: can we segfault here with invalid inputs? + # Can we trust the c library to validate the start/end cells? + # probably applies to all size/work pairs of functions... + err = h3lib.gridPathCellsSize(start, end, &n) + + _could_not_find_line(err, start, end) + + hmm = H3MemoryManager(n) + err = h3lib.gridPathCells(start, end, hmm.ptr) + + _could_not_find_line(err, start, end) + + # todo: probably here too? + mv = hmm.to_mv() + + return mv + +cpdef bool is_res_class_iii(H3int h): + return h3lib.isResClassIII(h) == 1 + + +cpdef H3int[:] get_pentagons(int res): + n = h3lib.pentagonCount() + + hmm = H3MemoryManager(n) + check_for_error( + h3lib.getPentagons(res, hmm.ptr) + ) + mv = hmm.to_mv() + + return mv + + +cpdef H3int[:] get_res0_cells(): + n = h3lib.res0CellCount() + + hmm = H3MemoryManager(n) + check_for_error( + h3lib.getRes0Cells(hmm.ptr) + ) + mv = hmm.to_mv() + + return mv + +# oh, this is returning a set?? +# todo: convert to int[:]? +cpdef get_icosahedron_faces(H3int h): + cdef: + int n + int[:] faces ## todo: weird, this needs to be specified to avoid errors. cython bug? + + check_for_error( + h3lib.maxFaceCount(h, &n) + ) + + faces = int_mv(n) + check_for_error( + h3lib.getIcosahedronFaces(h, &faces[0]) + ) + + # todo: wait? do faces start from 0 or 1? + # we could do this check/processing in the int_mv object + out = [f for f in faces if f >= 0] + + return out + + +cpdef (int, int) cell_to_local_ij(H3int origin, H3int h) except *: + cdef: + h3lib.CoordIJ c + + err = h3lib.cellToLocalIj(origin, h, 0, &c) + if err: + msg = "Couldn't find local (i,j) between cells {} and {}." + msg = msg.format(hex(origin), hex(h)) + check_for_error_msg(err, msg) + + return c.i, c.j + +cpdef H3int local_ij_to_cell(H3int origin, int i, int j) except 0: + cdef: + h3lib.CoordIJ c + H3int out + + c.i, c.j = i, j + + err = h3lib.localIjToCell(origin, &c, 0, &out) + if err: + msg = "Couldn't find cell at local ({},{}) from cell {}." + msg = msg.format(i, j, hex(origin)) + check_for_error_msg(err, msg) + + return out diff --git a/python/h3/_cy/edges.cpython-312-x86_64-linux-gnu.so b/python/h3/_cy/edges.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 000000000..d1f4cd25f Binary files /dev/null and b/python/h3/_cy/edges.cpython-312-x86_64-linux-gnu.so differ diff --git a/python/h3/_cy/edges.pxd b/python/h3/_cy/edges.pxd new file mode 100644 index 000000000..b7fcf8499 --- /dev/null +++ b/python/h3/_cy/edges.pxd @@ -0,0 +1,11 @@ +from .h3lib cimport bool, H3int + +cpdef bool are_neighbor_cells(H3int h1, H3int h2) +cpdef H3int cells_to_directed_edge(H3int origin, H3int destination) except * +cpdef bool is_valid_directed_edge(H3int e) +cpdef H3int get_directed_edge_origin(H3int e) except 1 +cpdef H3int get_directed_edge_destination(H3int e) except 1 +cpdef (H3int, H3int) directed_edge_to_cells(H3int e) except * +cpdef H3int[:] origin_to_directed_edges(H3int origin) +cpdef double average_hexagon_edge_length(int resolution, unit=*) except -1 +cpdef double edge_length(H3int e, unit=*) except -1 diff --git a/python/h3/_cy/edges.pyx b/python/h3/_cy/edges.pyx new file mode 100644 index 000000000..bd0aa7c00 --- /dev/null +++ b/python/h3/_cy/edges.pyx @@ -0,0 +1,114 @@ +cimport h3lib +from .h3lib cimport bool, H3int + +from .error_system cimport check_for_error + +from .memory cimport H3MemoryManager + +# todo: make bint +cpdef bool are_neighbor_cells(H3int h1, H3int h2): + cdef: + int out + + err = h3lib.areNeighborCells(h1, h2, &out) + + # note: we are intentionally not raising an error here, and just + # returning false. + # todo: is this choice consistent across the Python and C libs? + if err: + return False + + return out == 1 + + +cpdef H3int cells_to_directed_edge(H3int origin, H3int destination) except *: + cdef: + int neighbor_out + H3int out + + check_for_error( + h3lib.cellsToDirectedEdge(origin, destination, &out) + ) + + return out + + +cpdef bool is_valid_directed_edge(H3int e): + return h3lib.isValidDirectedEdge(e) == 1 + +cpdef H3int get_directed_edge_origin(H3int e) except 1: + cdef: + H3int out + + check_for_error( + h3lib.getDirectedEdgeOrigin(e, &out) + ) + + return out + +cpdef H3int get_directed_edge_destination(H3int e) except 1: + cdef: + H3int out + + check_for_error( + h3lib.getDirectedEdgeDestination(e, &out) + ) + + return out + +cpdef (H3int, H3int) directed_edge_to_cells(H3int e) except *: + # todo: use directed_edge_to_cells in h3lib + return get_directed_edge_origin(e), get_directed_edge_destination(e) + +cpdef H3int[:] origin_to_directed_edges(H3int origin): + """ Returns the 6 (or 5 for pentagons) directed edges + for the given origin cell + """ + + hmm = H3MemoryManager(6) + check_for_error( + h3lib.originToDirectedEdges(origin, hmm.ptr) + ) + mv = hmm.to_mv() + + return mv + + +cpdef double average_hexagon_edge_length(int resolution, unit='km') except -1: + cdef: + double length + + check_for_error( + h3lib.getHexagonEdgeLengthAvgKm(resolution, &length) + ) + + # todo: multiple units + convert = { + 'km': 1.0, + 'm': 1000.0 + } + + try: + length *= convert[unit] + except: + raise ValueError('Unknown unit: {}'.format(unit)) + + return length + + +cpdef double edge_length(H3int e, unit='km') except -1: + cdef: + double length + + if unit == 'rads': + err = h3lib.edgeLengthRads(e, &length) + elif unit == 'km': + err = h3lib.edgeLengthKm(e, &length) + elif unit == 'm': + err = h3lib.edgeLengthM(e, &length) + else: + raise ValueError('Unknown unit: {}'.format(unit)) + + check_for_error(err) + + return length diff --git a/python/h3/_cy/error_system.cpython-312-x86_64-linux-gnu.so b/python/h3/_cy/error_system.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 000000000..5a5d9fde2 Binary files /dev/null and b/python/h3/_cy/error_system.cpython-312-x86_64-linux-gnu.so differ diff --git a/python/h3/_cy/error_system.pxd b/python/h3/_cy/error_system.pxd new file mode 100644 index 000000000..4a1e3b450 --- /dev/null +++ b/python/h3/_cy/error_system.pxd @@ -0,0 +1,6 @@ +from .h3lib cimport H3Error + +cpdef error_code_to_exception(H3Error err) +cdef check_for_error(H3Error err) +cdef check_for_error_msg(H3Error err, str msg) +cpdef H3Error get_H3_ERROR_END() diff --git a/python/h3/_cy/error_system.pyx b/python/h3/_cy/error_system.pyx new file mode 100644 index 000000000..fc18f4672 --- /dev/null +++ b/python/h3/_cy/error_system.pyx @@ -0,0 +1,260 @@ +""" +Exceptions from the h3-py library have three possible sources: + +- the Python code +- the Cython code +- the underlying H3 C library code + +The Python and Cython `h3-py` code will only raise standard Python +built-in exceptions; **no custom** exception classes will be used. + +Conversely, many functions in the H3 C library return a `uint32_t` +error code (aliased as type `H3Error`). +When these errors happen (and `h3-py` can't recover from them internally), +they are passed up to the Python/Cython code, where their +`uint32_t` error values are converted to **custom** Python exception types. +These custom exception classes all inherit from `H3BaseException`. + +There is a 1-1 correspondence between the concrete subclasses of +`H3BaseException` and the H3 C library `H3ErrorCodes` values. +The correspondence is intentional, so that the user can refer to the +H3 C library documentation on these errors. + +The (`uint32_t` <-> Exception) correspondence should be clear from +the names of each error/exception, but the explicit mapping is given by +a dictionary in the code below. + +Note that some "abstract" subclasses of `H3BaseException` are also included to +group the exceptions by type. (We say "abstract" because Python has no easy +way to make true abstract exception classes.) + +These "abstract" exceptions will never be raised directly by `h3-py`, but they +allow the user to catch general groups of errors. +Note that `h3-py` will only ever directly raise +the "concrete" exception classes. + +Summarizing, all exceptions originating from the C library inherit from +`H3BaseException`, which has both "abstract" and "concrete" subclasses. + +**Abstract classes**: + +- H3BaseException +- H3ValueError +- H3MemoryError +- H3GridNavigationError + +**Concrete classes**: + +- H3FailedError +- H3DomainError +- H3LatLngDomainError +- H3ResDomainError +- H3CellInvalidError +- H3DirEdgeInvalidError +- H3UndirEdgeInvalidError +- H3VertexInvalidError +- H3PentagonError +- H3DuplicateInputError +- H3NotNeighborsError +- H3ResMismatchError +- H3MemoryAllocError +- H3MemoryBoundsError +- H3OptionInvalidError +- H3IndexInvalidError +- H3BaseCellDomainError +- H3DigitDomainError +- H3DeletedDigitError + + +# TODO: add tests verifying that concrete exception classes have the right error codes associated with them +""" + +from contextlib import contextmanager + +from .h3lib cimport ( + H3Error, + + # H3ErrorCodes enum values + E_SUCCESS, + E_FAILED, + E_DOMAIN, + E_LATLNG_DOMAIN, + E_RES_DOMAIN, + E_CELL_INVALID, + E_DIR_EDGE_INVALID, + E_UNDIR_EDGE_INVALID, + E_VERTEX_INVALID, + E_PENTAGON, + E_DUPLICATE_INPUT, + E_NOT_NEIGHBORS, + E_RES_MISMATCH, + E_MEMORY_ALLOC, + E_MEMORY_BOUNDS, + E_OPTION_INVALID, + E_INDEX_INVALID, + E_BASE_CELL_DOMAIN, + E_DIGIT_DOMAIN, + E_DELETED_DIGIT, + H3_ERROR_END # sentinel value +) + +@contextmanager +def _the_error(obj): + """ + Syntactic maple syrup for grouping exception definitions. + The associated `with` statement ends up as a not-half-bad + approximation to a valid sentence fragment. + + This provides sort of a "pretend scope", in that it allows for + block indentation which helps to visually indicate the "scope" + of the `... as e` statement. Just note that Python doesn't treat the + `with` block as a "true" separate scope. + + Note that this doesn't actually do anything context-manager-y, outside + of the variable assignment and block indentation. + """ + yield obj + + +# +# Base exception for C library error codes +# +class H3BaseException(Exception): + """ Base H3 exception class. + + Concrete subclasses of this class correspond to specific + error codes from the C library. + + Base/abstract subclasses will have `h3_error_code = None`, while + concrete subclasses will have `h3_error_code` equal to their associated + C library error code. + """ + h3_error_code = None + + +# +# A few "abstract" exceptions; organizational. +# +with _the_error(H3BaseException) as e: + class H3ValueError(e, ValueError): ... + class H3MemoryError(e, MemoryError): ... + class H3GridNavigationError(e, RuntimeError): ... + + +# +# Concrete exceptions +# +class UnknownH3ErrorCode(H3BaseException): + """ + Indicates that the h3-py Python bindings have received an + unrecognized error code from the C library. + + This should never happen. Please report if you get this error. + + Note that this exception is *outside* of the + H3BaseException class hierarchy. + """ + pass + +with _the_error(H3BaseException) as e: + class H3FailedError(e): ... + +with _the_error(H3GridNavigationError) as e: + class H3PentagonError(e): ... + +with _the_error(H3MemoryError) as e: + class H3MemoryAllocError(e): ... + class H3MemoryBoundsError(e): ... + +with _the_error(H3ValueError) as e: + class H3DomainError(e): ... + class H3LatLngDomainError(e): ... + class H3ResDomainError(e): ... + class H3CellInvalidError(e): ... + class H3DirEdgeInvalidError(e): ... + class H3UndirEdgeInvalidError(e): ... + class H3VertexInvalidError(e): ... + class H3DuplicateInputError(e): ... + class H3NotNeighborsError(e): ... + class H3ResMismatchError(e): ... + class H3OptionInvalidError(e): ... + class H3IndexInvalidError(e): ... + class H3BaseCellDomainError(e): ... + class H3DigitDomainError(e): ... + class H3DeletedDigitError(e): ... + + +""" +This defines a mapping between uint32_t error codes and concrete Python +exception classes. +Note that we intentionally omit E_SUCCESS, as it isn't an actual error. +""" +error_mapping = { + E_FAILED: H3FailedError, + E_DOMAIN: H3DomainError, + E_LATLNG_DOMAIN: H3LatLngDomainError, + E_RES_DOMAIN: H3ResDomainError, + E_CELL_INVALID: H3CellInvalidError, + E_DIR_EDGE_INVALID: H3DirEdgeInvalidError, + E_UNDIR_EDGE_INVALID: H3UndirEdgeInvalidError, + E_VERTEX_INVALID: H3VertexInvalidError, + E_PENTAGON: H3PentagonError, + E_DUPLICATE_INPUT: H3DuplicateInputError, + E_NOT_NEIGHBORS: H3NotNeighborsError, + E_RES_MISMATCH: H3ResMismatchError, + E_MEMORY_ALLOC: H3MemoryAllocError, + E_MEMORY_BOUNDS: H3MemoryBoundsError, + E_OPTION_INVALID: H3OptionInvalidError, + E_INDEX_INVALID: H3IndexInvalidError, + E_BASE_CELL_DOMAIN: H3BaseCellDomainError, + E_DIGIT_DOMAIN: H3DigitDomainError, + E_DELETED_DIGIT: H3DeletedDigitError, +} + +# Go back and modify the class definitions so that each concrete exception +# stores its associated error code. +for code, ex in error_mapping.items(): + ex.h3_error_code = code + + +# +# Helper functions +# + +# TODO: Move the helpers to util? +# TODO: Unclear how/where to expose these functions. cdef/cpdef? + +cpdef error_code_to_exception(H3Error err): + """ + Return Python exception corresponding to integer error code + given via the H3ErrorCodes enum in `h3api.h.in` in the C library. + """ + if err == E_SUCCESS: + return None + elif err in error_mapping: + return error_mapping[err] + else: + return UnknownH3ErrorCode(err) + +cdef check_for_error(H3Error err): + ex = error_code_to_exception(err) + if ex: + raise ex + +cpdef H3Error get_H3_ERROR_END(): + """ + Return integer H3_ERROR_END from the H3ErrorCodes enum + in `h3api.h.in` in the C library, which is one greater than + the last valid error code. + """ + return H3_ERROR_END + +# todo: There's no easy way to do `*args` in `cdef` functions, but I'm also +# not sure this even needs to be a Cython `cdef` function at all, or that +# any of the other helper functions need to be in Cython. +# todo: Revisit after we've played with this a bit. +# todo: also: maybe the extra messages aren't that much more helpful... +cdef check_for_error_msg(H3Error err, str msg): + ex = error_code_to_exception(err) + if ex: + raise ex(msg) diff --git a/python/h3/_cy/h3api.h b/python/h3/_cy/h3api.h new file mode 100644 index 000000000..dc2505b04 --- /dev/null +++ b/python/h3/_cy/h3api.h @@ -0,0 +1,851 @@ +/* + * Copyright 2016-2021 Uber Technologies, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** @file h3api.h + * @brief Primary H3 core library entry points. + * + * This file defines the public API of the H3 library. Incompatible changes to + * these functions require the library's major version be increased. + */ + +#ifndef H3API_H +#define H3API_H + +/* + * Preprocessor code to support renaming (prefixing) the public API. + * All public functions should be wrapped in H3_EXPORT so they can be + * renamed. + */ +#ifdef H3_PREFIX +#define XTJOIN(a, b) a##b +#define TJOIN(a, b) XTJOIN(a, b) + +/* export joins the user provided prefix with our exported function name */ +#define H3_EXPORT(name) TJOIN(H3_PREFIX, name) +#else +#define H3_EXPORT(name) name +#endif + +/* Windows DLL requires attributes indicating what to export */ +#if _WIN32 && BUILD_SHARED_LIBS +#if BUILDING_H3 +#define DECLSPEC __declspec(dllexport) +#else +#define DECLSPEC __declspec(dllimport) +#endif +#else +#define DECLSPEC +#endif + +/* For uint64_t */ +#include +/* For size_t */ +#include + +/* + * H3 is compiled as C, not C++ code. `extern "C"` is needed for C++ code + * to be able to use the library. + */ +#ifdef __cplusplus +extern "C" { +#endif + +/** @brief Identifier for an object (cell, edge, etc) in the H3 system. + * + * The H3Index fits within a 64-bit unsigned integer. + */ +typedef uint64_t H3Index; + +/** + * Invalid index used to indicate an error from latLngToCell and related + * functions or missing data in arrays of H3 indices. Analogous to NaN in + * floating point. + */ +#define H3_NULL 0 + +/** @brief Result code (success or specific error) from an H3 operation */ +typedef uint32_t H3Error; + +typedef enum { + E_SUCCESS = 0, // Success (no error) + E_FAILED = 1, // The operation failed but a more specific error is not + // available + E_DOMAIN = 2, // Argument was outside the acceptable range (when a more + // specific error code is not available) + E_LATLNG_DOMAIN = 3, // Latitude or longitude arguments were outside the + // acceptable range + E_RES_DOMAIN = 4, // Resolution argument was outside the acceptable range + E_CELL_INVALID = 5, // `H3Index` cell argument was not valid + E_DIR_EDGE_INVALID = 6, // `H3Index` directed edge argument was not valid + E_UNDIR_EDGE_INVALID = + 7, // `H3Index` undirected edge argument was not valid + E_VERTEX_INVALID = 8, // `H3Index` vertex argument was not valid + E_PENTAGON = 9, // Pentagon distortion was encountered which the algorithm + // could not handle + E_DUPLICATE_INPUT = 10, // Duplicate input was encountered in the arguments + // and the algorithm could not handle it + E_NOT_NEIGHBORS = 11, // `H3Index` cell arguments were not neighbors + E_RES_MISMATCH = + 12, // `H3Index` cell arguments had incompatible resolutions + E_MEMORY_ALLOC = 13, // Necessary memory allocation failed + E_MEMORY_BOUNDS = 14, // Bounds of provided memory were not large enough + + E_OPTION_INVALID = 15, // Mode or flags argument was not valid + E_INDEX_INVALID = 16, // `H3Index` argument was not valid + E_BASE_CELL_DOMAIN = + 17, // Base cell number was outside of acceptable range + E_DIGIT_DOMAIN = 18, // Child digits invalid + E_DELETED_DIGIT = 19, // Deleted subsequence indicates invalid index + + // Sentinel value; not a real error. One past the last valid code. + H3_ERROR_END +} H3ErrorCodes; + +/** @defgroup describeH3Error describeH3Error + * Functions for describeH3Error + * @{ + */ +/** @brief converts the provided H3Error value into a description string */ +DECLSPEC const char *H3_EXPORT(describeH3Error)(H3Error err); +/** @} */ + +/* library version numbers generated from VERSION file */ +// clang-format off +#define H3_VERSION_MAJOR 4 +#define H3_VERSION_MINOR 4 +#define H3_VERSION_PATCH 1 +// clang-format on + +/** Maximum number of cell boundary vertices; worst case is pentagon: + * 5 original verts + 5 edge crossings + */ +#define MAX_CELL_BNDRY_VERTS 10 + +/** @struct LatLng + @brief latitude/longitude in radians +*/ +typedef struct { + double lat; ///< latitude in radians + double lng; ///< longitude in radians +} LatLng; + +/** @struct CellBoundary + @brief cell boundary in latitude/longitude +*/ +typedef struct { + int numVerts; ///< number of vertices + LatLng verts[MAX_CELL_BNDRY_VERTS]; ///< vertices in ccw order +} CellBoundary; + +/** @struct GeoLoop + * @brief similar to CellBoundary, but requires more alloc work + */ +typedef struct { + int numVerts; + LatLng *verts; +} GeoLoop; + +/** @struct GeoPolygon + * @brief Simplified core of GeoJSON Polygon coordinates definition + */ +typedef struct { + GeoLoop geoloop; ///< exterior boundary of the polygon + int numHoles; ///< number of elements in the array pointed to by holes + GeoLoop *holes; ///< interior boundaries (holes) in the polygon +} GeoPolygon; + +/** @struct GeoMultiPolygon + * @brief Simplified core of GeoJSON MultiPolygon coordinates definition + */ +typedef struct { + int numPolygons; + GeoPolygon *polygons; +} GeoMultiPolygon; + +/** + * Values representing polyfill containment modes, to be used in + * the `flags` bit field for `polygonToCellsExperimental`. + */ +typedef enum { + CONTAINMENT_CENTER = 0, ///< Cell center is contained in the shape + CONTAINMENT_FULL = 1, ///< Cell is fully contained in the shape + CONTAINMENT_OVERLAPPING = 2, ///< Cell overlaps the shape at any point + CONTAINMENT_OVERLAPPING_BBOX = 3, ///< Cell bounding box overlaps shape + CONTAINMENT_INVALID = 4 ///< This mode is invalid and should not be used +} ContainmentMode; + +/** @struct LinkedLatLng + * @brief A coordinate node in a linked geo structure, part of a linked list + */ +typedef struct LinkedLatLng LinkedLatLng; +struct LinkedLatLng { + LatLng vertex; + LinkedLatLng *next; +}; + +/** @struct LinkedGeoLoop + * @brief A loop node in a linked geo structure, part of a linked list + */ +typedef struct LinkedGeoLoop LinkedGeoLoop; +struct LinkedGeoLoop { + LinkedLatLng *first; + LinkedLatLng *last; + LinkedGeoLoop *next; +}; + +/** @struct LinkedGeoPolygon + * @brief A polygon node in a linked geo structure, part of a linked list. + */ +typedef struct LinkedGeoPolygon LinkedGeoPolygon; +struct LinkedGeoPolygon { + LinkedGeoLoop *first; + LinkedGeoLoop *last; + LinkedGeoPolygon *next; +}; + +/** @struct CoordIJ + * @brief IJ hexagon coordinates + * + * Each axis is spaced 120 degrees apart. + */ +typedef struct { + int i; ///< i component + int j; ///< j component +} CoordIJ; + +/** @defgroup latLngToCell latLngToCell + * Functions for latLngToCell + * @{ + */ +/** @brief find the H3 index of the resolution res cell containing the lat/lng + */ +DECLSPEC H3Error H3_EXPORT(latLngToCell)(const LatLng *g, int res, + H3Index *out); +/** @} */ + +/** @defgroup cellToLatLng cellToLatLng + * Functions for cellToLatLng + * @{ + */ +/** @brief find the lat/lng center point g of the cell h3 */ +DECLSPEC H3Error H3_EXPORT(cellToLatLng)(H3Index h3, LatLng *g); +/** @} */ + +/** @defgroup cellToBoundary cellToBoundary + * Functions for cellToBoundary + * @{ + */ +/** @brief give the cell boundary in lat/lng coordinates for the cell h3 */ +DECLSPEC H3Error H3_EXPORT(cellToBoundary)(H3Index h3, CellBoundary *gp); +/** @} */ + +/** @defgroup gridDisk gridDisk + * Functions for gridDisk + * @{ + */ +/** @brief maximum number of hexagons in k-ring */ +DECLSPEC H3Error H3_EXPORT(maxGridDiskSize)(int k, int64_t *out); + +/** @brief hexagons neighbors in all directions, assuming no pentagons */ +DECLSPEC H3Error H3_EXPORT(gridDiskUnsafe)(H3Index origin, int k, H3Index *out); +/** @} */ + +/** @brief hexagons neighbors in all directions, assuming no pentagons, + * reporting distance from origin */ +DECLSPEC H3Error H3_EXPORT(gridDiskDistancesUnsafe)(H3Index origin, int k, + H3Index *out, + int *distances); + +/** @brief hexagons neighbors in all directions reporting distance from origin + */ +DECLSPEC H3Error H3_EXPORT(gridDiskDistancesSafe)(H3Index origin, int k, + H3Index *out, int *distances); + +/** @brief collection of hex rings sorted by ring for all given hexagons */ +DECLSPEC H3Error H3_EXPORT(gridDisksUnsafe)(H3Index *h3Set, int length, int k, + H3Index *out); + +/** @brief hexagon neighbors in all directions */ +DECLSPEC H3Error H3_EXPORT(gridDisk)(H3Index origin, int k, H3Index *out); +/** @} */ + +/** @defgroup gridDiskDistances gridDiskDistances + * Functions for gridDiskDistances + * @{ + */ +/** @brief hexagon neighbors in all directions, reporting distance from origin + */ +DECLSPEC H3Error H3_EXPORT(gridDiskDistances)(H3Index origin, int k, + H3Index *out, int *distances); +/** @} */ + +/** @defgroup gridRing gridRing + * Functions for gridRing + * @{ + */ +/** @brief maximum number of hexagons in hollow k-ring */ +DECLSPEC H3Error H3_EXPORT(maxGridRingSize)(int k, int64_t *out); + +/** @brief hollow hexagon ring k distance from origin */ +DECLSPEC H3Error H3_EXPORT(gridRingUnsafe)(H3Index origin, int k, H3Index *out); + +/** @brief hollow hexagon ring k distance from origin */ +DECLSPEC H3Error H3_EXPORT(gridRing)(H3Index origin, int k, H3Index *out); +/** @} */ + +/** @defgroup polygonToCells polygonToCells + * Functions for polygonToCells + * @{ + */ +/** @brief maximum number of cells that could be in the polygon */ +DECLSPEC H3Error H3_EXPORT(maxPolygonToCellsSize)(const GeoPolygon *geoPolygon, + int res, uint32_t flags, + int64_t *out); + +/** @brief cells within the given polygon */ +DECLSPEC H3Error H3_EXPORT(polygonToCells)(const GeoPolygon *geoPolygon, + int res, uint32_t flags, + H3Index *out); +/** @} */ + +/** @defgroup polygonToCellsExperimental polygonToCellsExperimental + * Functions for polygonToCellsExperimental. + * This is an experimental-only API and is subject to change in minor versions. + * @{ + */ +/** @brief maximum number of cells that could be in the polygon */ +DECLSPEC H3Error H3_EXPORT(maxPolygonToCellsSizeExperimental)( + const GeoPolygon *polygon, int res, uint32_t flags, int64_t *out); + +/** @brief cells within the given polygon */ +DECLSPEC H3Error H3_EXPORT(polygonToCellsExperimental)( + const GeoPolygon *polygon, int res, uint32_t flags, int64_t size, + H3Index *out); +/** @} */ + +/** @defgroup cellsToMultiPolygon cellsToMultiPolygon + * Functions for cellsToMultiPolygon (currently a binding-only concept) + * @{ + */ +/** @brief Create a LinkedGeoPolygon from a set of contiguous hexagons */ +DECLSPEC H3Error H3_EXPORT(cellsToLinkedMultiPolygon)(const H3Index *h3Set, + const int numHexes, + LinkedGeoPolygon *out); + +/** @brief Free all memory created for a LinkedGeoPolygon */ +DECLSPEC void H3_EXPORT(destroyLinkedMultiPolygon)(LinkedGeoPolygon *polygon); +/** @} */ + +/** @defgroup degsToRads degsToRads + * Functions for degsToRads + * @{ + */ +/** @brief converts degrees to radians */ +DECLSPEC double H3_EXPORT(degsToRads)(double degrees); +/** @} */ + +/** @defgroup radsToDegs radsToDegs + * Functions for radsToDegs + * @{ + */ +/** @brief converts radians to degrees */ +DECLSPEC double H3_EXPORT(radsToDegs)(double radians); +/** @} */ + +/** @defgroup greatCircleDistance greatCircleDistance + * Functions for distance + * @{ + */ +/** @brief "great circle distance" between pairs of LatLng points in radians*/ +DECLSPEC double H3_EXPORT(greatCircleDistanceRads)(const LatLng *a, + const LatLng *b); + +/** @brief "great circle distance" between pairs of LatLng points in + * kilometers*/ +DECLSPEC double H3_EXPORT(greatCircleDistanceKm)(const LatLng *a, + const LatLng *b); + +/** @brief "great circle distance" between pairs of LatLng points in meters*/ +DECLSPEC double H3_EXPORT(greatCircleDistanceM)(const LatLng *a, + const LatLng *b); +/** @} */ + +/** @defgroup getHexagonAreaAvg getHexagonAreaAvg + * Functions for getHexagonAreaAvg + * @{ + */ +/** @brief average hexagon area in square kilometers (excludes pentagons) */ +DECLSPEC H3Error H3_EXPORT(getHexagonAreaAvgKm2)(int res, double *out); + +/** @brief average hexagon area in square meters (excludes pentagons) */ +DECLSPEC H3Error H3_EXPORT(getHexagonAreaAvgM2)(int res, double *out); +/** @} */ + +/** @defgroup cellArea cellArea + * Functions for cellArea + * @{ + */ +/** @brief exact area for a specific cell (hexagon or pentagon) in radians^2 */ +DECLSPEC H3Error H3_EXPORT(cellAreaRads2)(H3Index h, double *out); + +/** @brief exact area for a specific cell (hexagon or pentagon) in kilometers^2 + */ +DECLSPEC H3Error H3_EXPORT(cellAreaKm2)(H3Index h, double *out); + +/** @brief exact area for a specific cell (hexagon or pentagon) in meters^2 */ +DECLSPEC H3Error H3_EXPORT(cellAreaM2)(H3Index h, double *out); +/** @} */ + +/** @defgroup getHexagonEdgeLengthAvg getHexagonEdgeLengthAvg + * Functions for getHexagonEdgeLengthAvg + * @{ + */ +/** @brief average hexagon edge length in kilometers (excludes pentagons) */ +DECLSPEC H3Error H3_EXPORT(getHexagonEdgeLengthAvgKm)(int res, double *out); + +/** @brief average hexagon edge length in meters (excludes pentagons) */ +DECLSPEC H3Error H3_EXPORT(getHexagonEdgeLengthAvgM)(int res, double *out); +/** @} */ + +/** @defgroup edgeLength edgeLength + * Functions for edgeLength + * @{ + */ +/** @brief exact length for a specific directed edge in radians*/ +DECLSPEC H3Error H3_EXPORT(edgeLengthRads)(H3Index edge, double *length); + +/** @brief exact length for a specific directed edge in kilometers*/ +DECLSPEC H3Error H3_EXPORT(edgeLengthKm)(H3Index edge, double *length); + +/** @brief exact length for a specific directed edge in meters*/ +DECLSPEC H3Error H3_EXPORT(edgeLengthM)(H3Index edge, double *length); +/** @} */ + +/** @defgroup getNumCells getNumCells + * Functions for getNumCells + * @{ + */ +/** @brief number of cells (hexagons and pentagons) for a given resolution + * + * It works out to be `2 + 120*7^r` for resolution `r`. + * + * # Mathematical notes + * + * Let h(n) be the number of children n levels below + * a single *hexagon*. + * + * Then h(n) = 7^n. + * + * Let p(n) be the number of children n levels below + * a single *pentagon*. + * + * Then p(0) = 1, and p(1) = 6, since each pentagon + * has 5 hexagonal immediate children and 1 pentagonal + * immediate child. + * + * In general, we have the recurrence relation + * + * p(n) = 5*h(n-1) + p(n-1) + * = 5*7^(n-1) + p(n-1). + * + * Working through the recurrence, we get that + * + * p(n) = 1 + 5*\sum_{k=1}^n 7^{k-1} + * = 1 + 5*(7^n - 1)/6, + * + * using the closed form for a geometric series. + * + * Using the closed forms for h(n) and p(n), we can + * get a closed form for the total number of cells + * at resolution r: + * + * c(r) = 12*p(r) + 110*h(r) + * = 2 + 120*7^r. + * + * + * @param res H3 cell resolution + * + * @return number of cells at resolution `res` + */ +DECLSPEC H3Error H3_EXPORT(getNumCells)(int res, int64_t *out); +/** @} */ + +/** @defgroup getRes0Cells getRes0Cells + * Functions for getRes0Cells + * @{ + */ +/** @brief returns the number of resolution 0 cells (hexagons and pentagons) */ +DECLSPEC int H3_EXPORT(res0CellCount)(void); + +/** @brief provides all base cells in H3Index format*/ +DECLSPEC H3Error H3_EXPORT(getRes0Cells)(H3Index *out); +/** @} */ + +/** @defgroup getPentagons getPentagons + * Functions for getPentagons + * @{ + */ +/** @brief returns the number of pentagons per resolution */ +DECLSPEC int H3_EXPORT(pentagonCount)(void); + +/** @brief generates all pentagons at the specified resolution */ +DECLSPEC H3Error H3_EXPORT(getPentagons)(int res, H3Index *out); +/** @} */ + +/** @defgroup getResolution getResolution + * Functions for getResolution + * @{ + */ +/** @brief returns the resolution of the provided H3 index + * Works on both cells and directed edges. */ +DECLSPEC int H3_EXPORT(getResolution)(H3Index h); +/** @} */ + +/** @defgroup getBaseCellNumber getBaseCellNumber + * Functions for getBaseCellNumber + * @{ + */ +/** @brief returns the base cell "number" (0 to 121) of the provided H3 cell + * + * Note: Technically works on H3 edges, but will return base cell of the + * origin cell. */ +DECLSPEC int H3_EXPORT(getBaseCellNumber)(H3Index h); +/** @} */ + +/** @defgroup getIndexDigit getIndexDigit + * Functions for getIndexDigit + * @{ + */ +/** @brief returns the indexing digit of the provided H3 cell at a given + * resolution + * + * Indexing digits are 1-indexed beginning with the digit for resolution 1. */ +DECLSPEC H3Error H3_EXPORT(getIndexDigit)(H3Index h, int res, int *out); +/** @} */ + +/** @defgroup constructCell constructCell + * Functions for constructCell + * @{ + */ +/** @brief create a cell from its components + * Only allows for constructing valid H3 cells. + **/ +DECLSPEC H3Error H3_EXPORT(constructCell)(int res, int baseCellNumber, + const int *digits, H3Index *out); +/** @} */ + +/** @defgroup stringToH3 stringToH3 + * Functions for stringToH3 + * @{ + */ +/** @brief converts the canonical string format to H3Index format */ +DECLSPEC H3Error H3_EXPORT(stringToH3)(const char *str, H3Index *out); +/** @} */ + +/** @defgroup h3ToString h3ToString + * Functions for h3ToString + * @{ + */ +/** @brief converts an H3Index to a canonical string */ +DECLSPEC H3Error H3_EXPORT(h3ToString)(H3Index h, char *str, size_t sz); +/** @} */ + +/** @defgroup isValidCell isValidCell + * Functions for isValidCell + * @{ + */ +/** @brief confirms if an H3Index is a valid cell (hexagon or pentagon) + * In particular, returns 0 (False) for H3 directed edges or invalid data + */ +DECLSPEC int H3_EXPORT(isValidCell)(H3Index h); +/** @} */ + +/** @defgroup isValidIndex isValidIndex + * Functions for isValidIndex + * @{ + */ +/** @brief confirms if an H3Index is valid for any mode (cell, directed edge, or + * vertex) Returns 1 if the H3 index is valid for any supported type, 0 + * otherwise + */ +DECLSPEC int H3_EXPORT(isValidIndex)(H3Index h); +/** @} */ + +/** @defgroup cellToParent cellToParent + * Functions for cellToParent + * @{ + */ +/** @brief returns the parent (or grandparent, etc) cell of the given cell + */ +DECLSPEC H3Error H3_EXPORT(cellToParent)(H3Index h, int parentRes, + H3Index *parent); +/** @} */ + +/** @defgroup cellToChildren cellToChildren + * Functions for cellToChildren + * @{ + */ +/** @brief determines the exact number of children (or grandchildren, etc) + * that would be returned for the given cell */ +DECLSPEC H3Error H3_EXPORT(cellToChildrenSize)(H3Index h, int childRes, + int64_t *out); + +/** @brief provides the children (or grandchildren, etc) of the given cell */ +DECLSPEC H3Error H3_EXPORT(cellToChildren)(H3Index h, int childRes, + H3Index *children); +/** @} */ + +/** @defgroup cellToCenterChild cellToCenterChild + * Functions for cellToCenterChild + * @{ + */ +/** @brief returns the center child of the given cell at the specified + * resolution */ +DECLSPEC H3Error H3_EXPORT(cellToCenterChild)(H3Index h, int childRes, + H3Index *child); +/** @} */ + +/** @defgroup cellToChildPos cellToChildPos + * Functions for cellToChildPos + * @{ + */ +/** @brief Returns the position of the cell within an ordered list of all + * children of the cell's parent at the specified resolution */ +DECLSPEC H3Error H3_EXPORT(cellToChildPos)(H3Index child, int parentRes, + int64_t *out); +/** @} */ + +/** @defgroup childPosToCell childPosToCell + * Functions for childPosToCell + * @{ + */ +/** @brief Returns the child cell at a given position within an ordered list of + * all children at the specified resolution */ +DECLSPEC H3Error H3_EXPORT(childPosToCell)(int64_t childPos, H3Index parent, + int childRes, H3Index *child); +/** @} */ + +/** @defgroup compactCells compactCells + * Functions for compactCells + * @{ + */ +/** @brief compacts the given set of hexagons as best as possible */ +DECLSPEC H3Error H3_EXPORT(compactCells)(const H3Index *h3Set, + H3Index *compactedSet, + const int64_t numHexes); +/** @} */ + +/** @defgroup uncompactCells uncompactCells + * Functions for uncompactCells + * @{ + */ +/** @brief determines the exact number of hexagons that will be uncompacted + * from the compacted set */ +DECLSPEC H3Error H3_EXPORT(uncompactCellsSize)(const H3Index *compactedSet, + const int64_t numCompacted, + const int res, int64_t *out); + +/** @brief uncompacts the compacted hexagon set */ +DECLSPEC H3Error H3_EXPORT(uncompactCells)(const H3Index *compactedSet, + const int64_t numCompacted, + H3Index *outSet, + const int64_t numOut, const int res); +/** @} */ + +/** @defgroup isResClassIII isResClassIII + * Functions for isResClassIII + * @{ + */ +/** @brief determines if a hexagon is Class III (or Class II) */ +DECLSPEC int H3_EXPORT(isResClassIII)(H3Index h); +/** @} */ + +/** @defgroup isPentagon isPentagon + * Functions for isPentagon + * @{ + */ +/** @brief determines if an H3 cell is a pentagon */ +DECLSPEC int H3_EXPORT(isPentagon)(H3Index h); +/** @} */ + +/** @defgroup getIcosahedronFaces getIcosahedronFaces + * Functions for getIcosahedronFaces + * @{ + */ +/** @brief Max number of icosahedron faces intersected by an index */ +DECLSPEC H3Error H3_EXPORT(maxFaceCount)(H3Index h3, int *out); + +/** @brief Find all icosahedron faces intersected by a given H3 index */ +DECLSPEC H3Error H3_EXPORT(getIcosahedronFaces)(H3Index h3, int *out); +/** @} */ + +/** @defgroup areNeighborCells areNeighborCells + * Functions for areNeighborCells + * @{ + */ +/** @brief returns whether or not the provided hexagons border */ +DECLSPEC H3Error H3_EXPORT(areNeighborCells)(H3Index origin, + H3Index destination, int *out); +/** @} */ + +/** @defgroup cellsToDirectedEdge cellsToDirectedEdge + * Functions for cellsToDirectedEdge + * @{ + */ +/** @brief returns the directed edge H3Index for the specified origin and + * destination */ +DECLSPEC H3Error H3_EXPORT(cellsToDirectedEdge)(H3Index origin, + H3Index destination, + H3Index *out); +/** @} */ + +/** @defgroup isValidDirectedEdge isValidDirectedEdge + * Functions for isValidDirectedEdge + * @{ + */ +/** @brief returns whether the H3Index is a valid directed edge */ +DECLSPEC int H3_EXPORT(isValidDirectedEdge)(H3Index edge); +/** @} */ + +/** @defgroup getDirectedEdgeOrigin \ + * getDirectedEdgeOrigin + * Functions for getDirectedEdgeOrigin + * @{ + */ +/** @brief Returns the origin hexagon H3Index from the directed edge + * H3Index */ +DECLSPEC H3Error H3_EXPORT(getDirectedEdgeOrigin)(H3Index edge, H3Index *out); +/** @} */ + +/** @defgroup getDirectedEdgeDestination \ + * getDirectedEdgeDestination + * Functions for getDirectedEdgeDestination + * @{ + */ +/** @brief Returns the destination hexagon H3Index from the directed edge + * H3Index */ +DECLSPEC H3Error H3_EXPORT(getDirectedEdgeDestination)(H3Index edge, + H3Index *out); +/** @} */ + +/** @defgroup directedEdgeToCells \ + * directedEdgeToCells + * Functions for directedEdgeToCells + * @{ + */ +/** @brief Returns the origin and destination hexagons from the directed + * edge H3Index */ +DECLSPEC H3Error H3_EXPORT(directedEdgeToCells)(H3Index edge, + H3Index *originDestination); +/** @} */ + +/** @defgroup originToDirectedEdges \ + * originToDirectedEdges + * Functions for originToDirectedEdges + * @{ + */ +/** @brief Returns the 6 (or 5 for pentagons) edges associated with the H3Index + */ +DECLSPEC H3Error H3_EXPORT(originToDirectedEdges)(H3Index origin, + H3Index *edges); +/** @} */ + +/** @defgroup directedEdgeToBoundary directedEdgeToBoundary + * Functions for directedEdgeToBoundary + * @{ + */ +/** @brief Returns the CellBoundary containing the coordinates of the edge */ +DECLSPEC H3Error H3_EXPORT(directedEdgeToBoundary)(H3Index edge, + CellBoundary *gb); +/** @} */ + +/** @defgroup cellToVertex cellToVertex + * Functions for cellToVertex + * @{ + */ +/** @brief Returns a single vertex for a given cell, as an H3 index */ +DECLSPEC H3Error H3_EXPORT(cellToVertex)(H3Index origin, int vertexNum, + H3Index *out); +/** @} */ + +/** @defgroup cellToVertexes cellToVertexes + * Functions for cellToVertexes + * @{ + */ +/** @brief Returns all vertexes for a given cell, as H3 indexes */ +DECLSPEC H3Error H3_EXPORT(cellToVertexes)(H3Index origin, H3Index *vertexes); +/** @} */ + +/** @defgroup vertexToLatLng vertexToLatLng + * Functions for vertexToLatLng + * @{ + */ +/** @brief Returns a single vertex for a given cell, as an H3 index */ +DECLSPEC H3Error H3_EXPORT(vertexToLatLng)(H3Index vertex, LatLng *point); +/** @} */ + +/** @defgroup isValidVertex isValidVertex + * Functions for isValidVertex + * @{ + */ +/** @brief Whether the input is a valid H3 vertex */ +DECLSPEC int H3_EXPORT(isValidVertex)(H3Index vertex); +/** @} */ + +/** @defgroup gridDistance gridDistance + * Functions for gridDistance + * @{ + */ +/** @brief Returns grid distance between two indexes */ +DECLSPEC H3Error H3_EXPORT(gridDistance)(H3Index origin, H3Index h3, + int64_t *distance); +/** @} */ + +/** @defgroup gridPathCells gridPathCells + * Functions for gridPathCells + * @{ + */ +/** @brief Number of indexes in a line connecting two indexes */ +DECLSPEC H3Error H3_EXPORT(gridPathCellsSize)(H3Index start, H3Index end, + int64_t *size); + +/** @brief Line of h3 indexes connecting two indexes */ +DECLSPEC H3Error H3_EXPORT(gridPathCells)(H3Index start, H3Index end, + H3Index *out); +/** @} */ + +/** @defgroup cellToLocalIj cellToLocalIj + * Functions for cellToLocalIj + * @{ + */ +/** @brief Returns two dimensional coordinates for the given index */ +DECLSPEC H3Error H3_EXPORT(cellToLocalIj)(H3Index origin, H3Index h3, + uint32_t mode, CoordIJ *out); +/** @} */ + +/** @defgroup localIjToCell localIjToCell + * Functions for localIjToCell + * @{ + */ +/** @brief Returns index for the given two dimensional coordinates */ +DECLSPEC H3Error H3_EXPORT(localIjToCell)(H3Index origin, const CoordIJ *ij, + uint32_t mode, H3Index *out); +/** @} */ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif diff --git a/python/h3/_cy/h3lib.pxd b/python/h3/_cy/h3lib.pxd new file mode 100644 index 000000000..8cf74a7e2 --- /dev/null +++ b/python/h3/_cy/h3lib.pxd @@ -0,0 +1,201 @@ +# cython: c_string_type=unicode, c_string_encoding=utf8 +from cpython cimport bool +from libc.stdint cimport uint32_t, uint64_t, int64_t + +ctypedef object H3str + +cdef extern from 'h3api.h': + cdef int H3_VERSION_MAJOR + cdef int H3_VERSION_MINOR + cdef int H3_VERSION_PATCH + + ctypedef uint64_t H3int 'H3Index' + + ctypedef uint32_t H3Error + ctypedef enum H3ErrorCodes: + E_SUCCESS = 0 + E_FAILED = 1 + E_DOMAIN = 2 + E_LATLNG_DOMAIN = 3 + E_RES_DOMAIN = 4 + E_CELL_INVALID = 5 + E_DIR_EDGE_INVALID = 6 + E_UNDIR_EDGE_INVALID = 7 + E_VERTEX_INVALID = 8 + E_PENTAGON = 9 + E_DUPLICATE_INPUT = 10 + E_NOT_NEIGHBORS = 11 + E_RES_MISMATCH = 12 + E_MEMORY_ALLOC = 13 + E_MEMORY_BOUNDS = 14 + E_OPTION_INVALID = 15 + E_INDEX_INVALID = 16 + E_BASE_CELL_DOMAIN = 17 + E_DIGIT_DOMAIN = 18 + E_DELETED_DIGIT = 19 + H3_ERROR_END # sentinel value + + ctypedef struct LatLng: + double lat # in radians + double lng # in radians + + ctypedef struct CellBoundary: + int num_verts 'numVerts' + LatLng verts[10] # MAX_CELL_BNDRY_VERTS + + ctypedef struct CoordIJ: + int i + int j + + ctypedef struct LinkedLatLng: + LatLng data 'vertex' + LinkedLatLng *next + + # renaming these for clarity + ctypedef struct LinkedGeoLoop: + LinkedLatLng *data 'first' + LinkedLatLng *_data_last 'last' # not needed in Cython bindings + LinkedGeoLoop *next + + ctypedef struct LinkedGeoPolygon: + LinkedGeoLoop *data 'first' + LinkedGeoLoop *_data_last 'last' # not needed in Cython bindings + LinkedGeoPolygon *next + + ctypedef struct GeoLoop: + int numVerts + LatLng *verts + + ctypedef struct GeoPolygon: + GeoLoop geoloop + int numHoles + GeoLoop *holes + + int isValidCell(H3int h) nogil + int isPentagon(H3int h) nogil + int isResClassIII(H3int h) nogil + int isValidDirectedEdge(H3int edge) nogil + int isValidVertex(H3int v) nogil + int isValidIndex(H3int h) nogil + + double degsToRads(double degrees) nogil + double radsToDegs(double radians) nogil + + int getResolution(H3int h) nogil + int getBaseCellNumber(H3int h) nogil + H3Error getIndexDigit(H3int h, int res, int *out) nogil + H3Error constructCell(int res, int baseCellNumber, const int *digits, H3int *out) nogil + + H3Error latLngToCell(const LatLng *g, int res, H3int *out) nogil + H3Error cellToLatLng(H3int h, LatLng *) nogil + H3Error gridDistance(H3int h1, H3int h2, int64_t *distance) nogil + + H3Error cellToVertex(H3int cell, int vertexNum, H3int *out) nogil + H3Error cellToVertexes(H3int cell, H3int *vertexes) nogil + H3Error vertexToLatLng(H3int vertex, LatLng *coord) nogil + + H3Error maxGridDiskSize(int k, int64_t *out) nogil # num/out/N? + H3Error gridDisk(H3int h, int k, H3int *out) nogil + + H3Error cellToParent( H3int h, int parentRes, H3int *parent) nogil + H3Error cellToCenterChild(H3int h, int childRes, H3int *child) nogil + H3Error cellToChildPos(H3int child, int parentRes, int64_t *out) nogil + H3Error childPosToCell(int64_t childPos, H3int parent, int childRes, H3int *child) nogil + + H3Error cellToChildrenSize(H3int h, int childRes, int64_t *num) nogil # num/out/N? + H3Error cellToChildren( H3int h, int childRes, H3int *children) nogil + + H3Error compactCells( + const H3int *cells_u, + H3int *cells_c, + const int num_u + ) nogil + H3Error uncompactCellsSize( + const H3int *cells_c, + const int64_t num_c, + const int res, + int64_t *num_u + ) nogil + H3Error uncompactCells( + const H3int *cells_c, + const int num_c, + H3int *cells_u, + const int num_u, + const int res + ) nogil + + H3Error getNumCells(int res, int64_t *out) nogil + int pentagonCount() nogil + int res0CellCount() nogil + H3Error getPentagons(int res, H3int *out) nogil + H3Error getRes0Cells(H3int *out) nogil + + H3Error gridPathCellsSize(H3int start, H3int end, int64_t *size) nogil + H3Error gridPathCells(H3int start, H3int end, H3int *out) nogil + + H3Error getHexagonAreaAvgKm2(int res, double *out) nogil + H3Error getHexagonAreaAvgM2(int res, double *out) nogil + + H3Error cellAreaRads2(H3int h, double *out) nogil + H3Error cellAreaKm2(H3int h, double *out) nogil + H3Error cellAreaM2(H3int h, double *out) nogil + + H3Error maxFaceCount(H3int h, int *out) nogil + H3Error getIcosahedronFaces(H3int h3, int *out) nogil + + H3Error cellToLocalIj(H3int origin, H3int h3, uint32_t mode, CoordIJ *out) nogil + H3Error localIjToCell(H3int origin, const CoordIJ *ij, uint32_t mode, H3int *out) nogil + + H3Error gridDiskDistances(H3int origin, int k, H3int *out, int *distances) nogil + H3Error gridRing(H3int origin, int k, H3int *out) nogil + H3Error gridRingUnsafe(H3int origin, int k, H3int *out) nogil + + H3Error areNeighborCells(H3int origin, H3int destination, int *out) nogil + H3Error cellsToDirectedEdge(H3int origin, H3int destination, H3int *out) nogil + H3Error getDirectedEdgeOrigin(H3int edge, H3int *out) nogil + H3Error getDirectedEdgeDestination(H3int edge, H3int *out) nogil + H3Error originToDirectedEdges(H3int origin, H3int *edges) nogil + # todo: directedEdgeToCells + + H3Error getHexagonEdgeLengthAvgKm(int res, double *out) nogil + H3Error getHexagonEdgeLengthAvgM(int res, double *out) nogil + + H3Error edgeLengthRads(H3int edge, double *out) nogil + H3Error edgeLengthKm(H3int edge, double *out) nogil + H3Error edgeLengthM(H3int edge, double *out) nogil + + H3Error cellToBoundary(H3int h3, CellBoundary *gp) nogil + H3Error directedEdgeToBoundary(H3int edge, CellBoundary *gb) nogil + + double greatCircleDistanceRads(const LatLng *a, const LatLng *b) nogil + double greatCircleDistanceKm(const LatLng *a, const LatLng *b) nogil + double greatCircleDistanceM(const LatLng *a, const LatLng *b) nogil + + H3Error cellsToLinkedMultiPolygon(const H3int *h3Set, const int numCells, LinkedGeoPolygon *out) + void destroyLinkedMultiPolygon(LinkedGeoPolygon *polygon) + + H3Error maxPolygonToCellsSize(const GeoPolygon *geoPolygon, int res, uint32_t flags, uint64_t *count) + H3Error polygonToCells(const GeoPolygon *geoPolygon, int res, uint32_t flags, H3int *out) + + H3Error maxPolygonToCellsSizeExperimental(const GeoPolygon *geoPolygon, int res, uint32_t flags, uint64_t *count) + H3Error polygonToCellsExperimental(const GeoPolygon *geoPolygon, int res, uint32_t flags, uint64_t sz, H3int *out) + + # ctypedef struct GeoMultiPolygon: + # int numPolygons + # GeoPolygon *polygons + + # int hexRange(H3int origin, int k, H3int *out) + + # int hexRangeDistances(H3int origin, int k, H3int *out, int *distances) + + # int hexRanges(H3int *h3Set, int length, int k, H3int *out) + + # void h3SetToLinkedGeo(const H3int *h3Set, const int numCells, LinkedGeoPolygon *out) + + # void destroyLinkedPolygon(LinkedGeoPolygon *polygon) + + # H3int stringToH3(const char *str) + + # void h3ToString(H3int h, char *str, size_t sz) + + # void getH3intesFromUnidirectionalEdge(H3int edge, H3int *originDestination) diff --git a/python/h3/_cy/latlng.cpython-312-x86_64-linux-gnu.so b/python/h3/_cy/latlng.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 000000000..add14d39d Binary files /dev/null and b/python/h3/_cy/latlng.cpython-312-x86_64-linux-gnu.so differ diff --git a/python/h3/_cy/latlng.pxd b/python/h3/_cy/latlng.pxd new file mode 100644 index 000000000..40dfc3490 --- /dev/null +++ b/python/h3/_cy/latlng.pxd @@ -0,0 +1,7 @@ +from .h3lib cimport H3int + +cpdef H3int latlng_to_cell(double lat, double lng, int res) except 1 +cpdef (double, double) cell_to_latlng(H3int h) except * +cpdef double great_circle_distance( + double lat1, double lng1, + double lat2, double lng2, unit=*) except -1 diff --git a/python/h3/_cy/latlng.pyx b/python/h3/_cy/latlng.pyx new file mode 100644 index 000000000..d83a9d3d8 --- /dev/null +++ b/python/h3/_cy/latlng.pyx @@ -0,0 +1,325 @@ +from libc.stdint cimport uint64_t + +cimport h3lib +from h3lib cimport bool, H3int + +from .util cimport ( + check_cell, + check_edge, + check_res, + deg2coord, + coord2deg +) + +from .error_system cimport check_for_error + +from .memory cimport H3MemoryManager + +# TODO: We might be OK with taking the GIL for the functions in this module +from libc.stdlib cimport ( + # malloc as h3_malloc, # not used + calloc as h3_calloc, + realloc as h3_realloc, + free as h3_free, +) + + +cpdef H3int latlng_to_cell(double lat, double lng, int res) except 1: + cdef: + h3lib.LatLng c + H3int out + + c = deg2coord(lat, lng) + + check_for_error( + h3lib.latLngToCell(&c, res, &out) + ) + + return out + + +cpdef (double, double) cell_to_latlng(H3int h) except *: + """Map an H3 cell into its centroid geo-coordinate (lat/lng)""" + cdef: + h3lib.LatLng c + + check_cell(h) + # todo: think about: if you give this an invalid cell, should it still return a lat/lng? + # idea: safe and unsafe APIs? + + check_for_error( + h3lib.cellToLatLng(h, &c) + ) + + return coord2deg(c) + + +cdef h3lib.GeoLoop make_geoloop(latlngs) except *: + """ + The returned `GeoLoop` must be freed with a call to `free_geoloop`. + + Parameters + ---------- + latlngs : list or tuple + GeoLoop: A sequence of >= 3 (lat, lng) pairs where the last + element may or may not be same as the first (to form a closed loop). + The order of the pairs may be either clockwise or counterclockwise. + """ + cdef: + h3lib.GeoLoop gl + + gl.numVerts = len(latlngs) + + # todo: need for memory management + # can automatically free? + gl.verts = h3_calloc(gl.numVerts, sizeof(h3lib.LatLng)) + + for i, (lat, lng) in enumerate(latlngs): + gl.verts[i] = deg2coord(lat, lng) + + return gl + + +cdef free_geoloop(h3lib.GeoLoop* gl): + h3_free(gl.verts) + gl.verts = NULL + + +cdef class GeoPolygon: + cdef: + h3lib.GeoPolygon gp + + def __cinit__(self, outer, holes=None): + """ + + Parameters + ---------- + outer : list or tuple + GeoLoop + A GeoLoop is a sequence of >= 3 (lat, lng) pairs where the last + element may or may not be same as the first (to form a closed loop). + The order of the pairs may be either clockwise or counterclockwise. + holes : list or tuple + A sequence of GeoLoops + """ + if holes is None: + holes = [] + + self.gp.geoloop = make_geoloop(outer) + self.gp.numHoles = len(holes) + self.gp.holes = NULL + + if len(holes) > 0: + self.gp.holes = h3_calloc(len(holes), sizeof(h3lib.GeoLoop)) + for i, hole in enumerate(holes): + self.gp.holes[i] = make_geoloop(hole) + + + def __dealloc__(self): + free_geoloop(&self.gp.geoloop) + + for i in range(self.gp.numHoles): + free_geoloop(&self.gp.holes[i]) + + h3_free(self.gp.holes) + + +def polygon_to_cells(outer, int res, holes=None): + """ Get the set of cells whose center is contained in a polygon. + + The polygon is defined similarity to the GeoJson standard, with an exterior + `outer` ring of lat/lng points, and a list of `holes`, each of which are also + rings of lat/lng points. + + Each ring may be in clockwise or counter-clockwise order + (right-hand rule or not), and may or may not be a closed loop (where the last + element is equal to the first). + The GeoJSON spec requires the right-hand rule and a closed loop, but + this function relaxes those constraints. + + Unlike the GeoJson standard, the elements of the lat/lng pairs of each + ring are in lat/lng order, instead of lng/lat order. + + We'll handle translation to different formats in the Python code, + rather than the Cython code. + + Parameters + ---------- + outer : list or tuple + A ring given by a sequence of lat/lng pairs. + res : int + The resolution of the output hexagons + holes : list or tuple + A collection of rings, each given by a sequence of lat/lng pairs. + These describe any the "holes" in the polygon. + """ + cdef: + uint64_t n + + check_res(res) + + if not outer: + return H3MemoryManager(0).to_mv() + + gp = GeoPolygon(outer, holes=holes) + + check_for_error( + h3lib.maxPolygonToCellsSize(&gp.gp, res, 0, &n) + ) + + hmm = H3MemoryManager(n) + check_for_error( + h3lib.polygonToCells(&gp.gp, res, 0, hmm.ptr) + ) + mv = hmm.to_mv() + + return mv + + +def polygons_to_cells(polygons, int res): + mvs = [ + polygon_to_cells(outer=poly.outer, res=res, holes=poly.holes) + for poly in polygons + ] + + n = sum(map(len, mvs)) + hmm = H3MemoryManager(n) + + # probably super inefficient, but it is working! + # tood: move this to C + k = 0 + for mv in mvs: + for v in mv: + hmm.ptr[k] = v + k += 1 + + return hmm.to_mv() + + +def polygon_to_cells_experimental(outer, int res, int flag, holes=None): + """ Get the set of cells whose center is contained in a polygon. + + The polygon is defined similarity to the GeoJson standard, with an exterior + `outer` ring of lat/lng points, and a list of `holes`, each of which are also + rings of lat/lng points. + + Each ring may be in clockwise or counter-clockwise order + (right-hand rule or not), and may or may not be a closed loop (where the last + element is equal to the first). + The GeoJSON spec requires the right-hand rule and a closed loop, but + this function relaxes those constraints. + + Unlike the GeoJson standard, the elements of the lat/lng pairs of each + ring are in lat/lng order, instead of lng/lat order. + + We'll handle translation to different formats in the Python code, + rather than the Cython code. + + Parameters + ---------- + outer : list or tuple + A ring given by a sequence of lat/lng pairs. + res : int + The resolution of the output hexagons + flag : int + Polygon to cells flag, such as containment mode. + holes : list or tuple + A collection of rings, each given by a sequence of lat/lng pairs. + These describe any the "holes" in the polygon. + """ + cdef: + uint64_t n + + check_res(res) + + if not outer: + return H3MemoryManager(0).to_mv() + + gp = GeoPolygon(outer, holes=holes) + + check_for_error( + h3lib.maxPolygonToCellsSizeExperimental(&gp.gp, res, flag, &n) + ) + + hmm = H3MemoryManager(n) + check_for_error( + h3lib.polygonToCellsExperimental(&gp.gp, res, flag, n, hmm.ptr) + ) + mv = hmm.to_mv() + + return mv + + +def polygons_to_cells_experimental(polygons, int res, int flag): + mvs = [ + polygon_to_cells_experimental(outer=poly.outer, res=res, holes=poly.holes, flag=flag) + for poly in polygons + ] + + n = sum(map(len, mvs)) + hmm = H3MemoryManager(n) + + # probably super inefficient, but it is working! + # tood: move this to C + k = 0 + for mv in mvs: + for v in mv: + hmm.ptr[k] = v + k += 1 + + return hmm.to_mv() + + +def cell_to_boundary(H3int h): + """Compose an array of geo-coordinates that outlines a hexagonal cell""" + cdef: + h3lib.CellBoundary gb + + check_cell(h) + + h3lib.cellToBoundary(h, &gb) + + verts = tuple( + coord2deg(gb.verts[i]) + for i in range(gb.num_verts) + ) + + return verts + + +def directed_edge_to_boundary(H3int edge): + """ Returns the CellBoundary containing the coordinates of the edge + """ + cdef: + h3lib.CellBoundary gb + + check_edge(edge) + + h3lib.directedEdgeToBoundary(edge, &gb) + + # todo: move this verts transform into the CellBoundary object + verts = tuple( + coord2deg(gb.verts[i]) + for i in range(gb.num_verts) + ) + + return verts + + +cpdef double great_circle_distance( + double lat1, double lng1, + double lat2, double lng2, unit='km') except -1: + + a = deg2coord(lat1, lng1) + b = deg2coord(lat2, lng2) + + if unit == 'rads': + d = h3lib.greatCircleDistanceRads(&a, &b) + elif unit == 'km': + d = h3lib.greatCircleDistanceKm(&a, &b) + elif unit == 'm': + d = h3lib.greatCircleDistanceM(&a, &b) + else: + raise ValueError('Unknown unit: {}'.format(unit)) + + return d diff --git a/python/h3/_cy/memory.cpython-312-x86_64-linux-gnu.so b/python/h3/_cy/memory.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 000000000..1156b6458 Binary files /dev/null and b/python/h3/_cy/memory.cpython-312-x86_64-linux-gnu.so differ diff --git a/python/h3/_cy/memory.pxd b/python/h3/_cy/memory.pxd new file mode 100644 index 000000000..5e259e256 --- /dev/null +++ b/python/h3/_cy/memory.pxd @@ -0,0 +1,12 @@ +from .h3lib cimport H3int + +cdef class H3MemoryManager: + cdef: + size_t n + H3int* ptr + + cdef H3int[:] to_mv(self) + cdef H3int[:] to_mv_keep_zeros(self) + +cdef int[:] int_mv(size_t n) +cpdef H3int[:] iter_to_mv(cells) diff --git a/python/h3/_cy/memory.pyx b/python/h3/_cy/memory.pyx new file mode 100644 index 000000000..0852105fe --- /dev/null +++ b/python/h3/_cy/memory.pyx @@ -0,0 +1,248 @@ +from cython.view cimport array +from .h3lib cimport H3int + +""" +### Memory allocation options + +We have a few options for the memory allocation functions. +There's a trade-off between using the Python allocators which let Python +track memory usage and offers some optimizations vs the system +allocators, which do not need to acquire the GIL. +""" + +""" +System allocation functions. These do not acquire the GIL. +""" +from libc.stdlib cimport ( + # malloc as h3_malloc, # not used + calloc as h3_calloc, + realloc as h3_realloc, + free as h3_free, +) + + +""" +PyMem_Raw* functions should just be wrappers around system allocators +also given in libc.stdlib. These functions do not acquire the GIL. + +Note that these do not have a calloc function until py 3.5 and Cython 3.0, +so we would need to zero-out memory manually. + +https://python.readthedocs.io/en/stable/c-api/memory.html#raw-memory-interface +""" +# from cpython.mem cimport ( +# PyMem_RawMalloc as h3_malloc, +# # PyMem_RawCalloc as h3_calloc, # only in Python >=3.5 (and Cython >=3.0?) +# PyMem_RawRealloc as h3_realloc, +# PyMem_RawFree as h3_free, +# ) + + +""" +These functions use the Python allocator (instead of the system allocator), +which offers some optimizations for Python, and allows Python to track +memory usage. However, these functions must acquire the GIL. + +Note that these do not have a calloc function until py 3.5 and Cython 3.0, +so we would need to zero-out memory manually. + +https://cython.readthedocs.io/en/stable/src/tutorial/memory_allocation.html +https://python.readthedocs.io/en/stable/c-api/memory.html#memory-interface +""" +# from cpython.mem cimport ( +# PyMem_Malloc as h3_malloc, +# # PyMem_Calloc as h3_calloc, # only in Python >=3.5 (and Cython >=3.0?) +# PyMem_Realloc as h3_realloc, +# PyMem_Free as h3_free, +# ) + + +cdef size_t move_nonzeros(H3int* a, size_t n): + """ Move nonzero elements to front of array `a` of length `n`. + Return the number of nonzero elements. + + Loop invariant: Everything *before* `i` or *after* `j` is "done". + Move `i` and `j` inwards until they equal, and exit. + You can move `i` forward until there's a zero in front of it. + You can move `j` backward until there's a nonzero to the left of it. + Anything to the right of `j` is "junk" that can be reallocated. + + | a | b | 0 | c | d | ... | + ^ ^ + i j + + + | a | b | d | c | d | ... | + ^ ^ + i j + """ + cdef: + size_t i = 0 + size_t j = n + + while i < j: + if a[j-1] == 0: + j -= 1 + continue + + if a[i] != 0: + i += 1 + continue + + # if we're here, we know: + # a[i] == 0 + # a[j-1] != 0 + # i < j + # so we can swap! (actually, move a[j-1] -> a[i]) + a[i] = a[j-1] + j -= 1 + + return i + + +cdef H3int[:] empty_memory_view(): + # todo: get rid of this? + # there's gotta be a better way to do this... + # create an empty cython.view.array? + cdef: + H3int a[1] + + return (a)[:0] + + +cdef _remove_zeros(H3MemoryManager x): + x.n = move_nonzeros(x.ptr, x.n) + + if x.n == 0: + h3_free(x.ptr) + x.ptr = NULL + else: + x.ptr = h3_realloc(x.ptr, x.n*sizeof(H3int)) + if not x.ptr: + raise MemoryError() + + +cdef H3int[:] _copy_to_mv(const H3int* ptr, size_t n): + cdef: + array arr + + arr = ptr + arr.callback_free_data = h3_free + + return arr + + +cdef H3int[:] _create_mv(H3MemoryManager x): + if x.n == 0: + h3_free(x.ptr) + x.ptr = NULL + mv = empty_memory_view() + else: + mv = _copy_to_mv(x.ptr, x.n) + + # responsibility for the memory moves from this object to the array/memoryview + x.ptr = NULL + x.n = 0 + + return mv + + +""" +TODO: The not None declaration for the argument automatically rejects None values as input, which would otherwise be allowed. The reason why None is allowed by default is that it is conveniently used for return arguments: + https://cython.readthedocs.io/en/latest/src/userguide/memoryviews.html#syntax + +TODO: potential optimization: https://cython.readthedocs.io/en/latest/src/userguide/memoryviews.html#performance-disabling-initialization-checks + +## future improvements: + +- abolish any appearance of &thing[0]. (i.e., identical interfaces) +- can i make the interface for all these memory views identical? +""" + +cdef class H3MemoryManager: + """ + Cython object in charge of allocating and freeing memory for arrays + of H3 indexes. + + Initially allocates memory and provides access through `self.ptr` and + `self.n`. + + The `to_mv()` function removes responsibility for the allocated memory + from this object to a memory view object. A memory view object automatically + deallocates its memory during garbage collection. + + If the H3MemoryManager is garbage collected before running `to_mv()`, + it will deallocate its memory itself. + + This pattern is useful for a few reasons: + + - provide convenient access to the raw memory pointer and length for passing + to h3lib functions + - remove zeroes from the array output (some h3lib functions may return + results with zeros/H3NULL values) + - cython and python array types have weird interfaces; memoryviews are + much cleaner + + If we find a better way to do these then this class may no longer be + necessary. + + TODO: consider a context manager pattern + """ + def __cinit__(self, size_t n): + self.n = n + self.ptr = h3_calloc(self.n, sizeof(H3int)) + + if not self.ptr: + raise MemoryError() + + cdef H3int[:] to_mv_keep_zeros(self): + # todo: this could be a private method + return _create_mv(self) + + cdef H3int[:] to_mv(self): + _remove_zeros(self) + return _create_mv(self) + + def __dealloc__(self): + # If the memory has been handed off to a memoryview, this pointer + # should be NULL, and deallocing on NULL is fine. + # If the pointer is *not* NULL, then this means the MemoryManager + # has is still responsible for the memory (it hasn't given the memory away to another object). + h3_free(self.ptr) + + +""" +todo: combine with the H3MemoryManager using fused types? +https://cython.readthedocs.io/en/stable/src/userguide/fusedtypes.html +""" +cdef int[:] int_mv(size_t n): + cdef: + array arr + + if n == 0: + raise MemoryError() + else: + ptr = h3_calloc(n, sizeof(int)) + if ptr is NULL: + raise MemoryError() + + arr = ptr + arr.callback_free_data = h3_free + + return arr + + +cpdef H3int[:] iter_to_mv(cells): + """ cells needs to be an iterable that knows its size... + or should we have it match the np.fromiter function, which infers if not available? + """ + cdef: + H3int[:] mv + + n = len(cells) + mv = H3MemoryManager(n).to_mv_keep_zeros() + + for i,h in enumerate(cells): + mv[i] = h + + return mv diff --git a/python/h3/_cy/to_multipoly.cpython-312-x86_64-linux-gnu.so b/python/h3/_cy/to_multipoly.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 000000000..c83e22341 Binary files /dev/null and b/python/h3/_cy/to_multipoly.cpython-312-x86_64-linux-gnu.so differ diff --git a/python/h3/_cy/to_multipoly.pyx b/python/h3/_cy/to_multipoly.pyx new file mode 100644 index 000000000..db29baa5c --- /dev/null +++ b/python/h3/_cy/to_multipoly.pyx @@ -0,0 +1,60 @@ +cimport h3lib +from h3lib cimport H3int +from .util cimport check_cell, coord2deg + + +# todo: it's driving me crazy that these three functions are all essentially the same linked list walker... +# grumble: no way to do iterators in with cdef functions! +cdef walk_polys(const h3lib.LinkedGeoPolygon* L): + out = [] + while L: + out += [walk_loops(L.data)] + L = L.next + + return out + + +cdef walk_loops(const h3lib.LinkedGeoLoop* L): + out = [] + while L: + out += [walk_coords(L.data)] + L = L.next + + return out + + +cdef walk_coords(const h3lib.LinkedLatLng* L): + out = [] + while L: + out += [coord2deg(L.data)] + L = L.next + + return out + +# todo: tuples instead of lists? +def _to_multi_polygon(const H3int[:] cells): + cdef: + h3lib.LinkedGeoPolygon polygon + + for h in cells: + check_cell(h) + + h3lib.cellsToLinkedMultiPolygon(&cells[0], len(cells), &polygon) + + out = walk_polys(&polygon) + + # we're still responsible for cleaning up the passed in `polygon`, + # but not a problem here, since it is stack allocated + h3lib.destroyLinkedMultiPolygon(&polygon) + + return out + + +def cells_to_multi_polygon(const H3int[:] cells): + # todo: gotta be a more elegant way to handle these... + if len(cells) == 0: + return [] + + multipoly = _to_multi_polygon(cells) + + return multipoly diff --git a/python/h3/_cy/util.cpython-312-x86_64-linux-gnu.so b/python/h3/_cy/util.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 000000000..151dd9b74 Binary files /dev/null and b/python/h3/_cy/util.cpython-312-x86_64-linux-gnu.so differ diff --git a/python/h3/_cy/util.pxd b/python/h3/_cy/util.pxd new file mode 100644 index 000000000..b584d3aad --- /dev/null +++ b/python/h3/_cy/util.pxd @@ -0,0 +1,14 @@ +from .h3lib cimport H3int, H3str, LatLng + +cdef LatLng deg2coord(double lat, double lng) nogil +cdef (double, double) coord2deg(LatLng c) nogil + +cpdef H3int str_to_int(H3str h) except? 0 +cpdef H3str int_to_str(H3int x) + +cdef check_cell(H3int h) +cdef check_edge(H3int e) +cdef check_vertex(H3int v) +cdef check_index(H3int h) +cdef check_res(int res) +cdef check_distance(int k) diff --git a/python/h3/_cy/util.pyx b/python/h3/_cy/util.pyx new file mode 100644 index 000000000..9b070f9c1 --- /dev/null +++ b/python/h3/_cy/util.pyx @@ -0,0 +1,101 @@ +from .h3lib cimport ( + H3int, + H3str, + isValidCell, + isValidDirectedEdge, + isValidVertex, + isValidIndex, +) + +cimport h3lib + +from .error_system import ( + H3ResDomainError, + H3DomainError, + H3DirEdgeInvalidError, + H3CellInvalidError, + H3VertexInvalidError, + H3IndexInvalidError, +) + +cdef h3lib.LatLng deg2coord(double lat, double lng) nogil: + cdef: + h3lib.LatLng c + + c.lat = h3lib.degsToRads(lat) + c.lng = h3lib.degsToRads(lng) + + return c + + +cdef (double, double) coord2deg(h3lib.LatLng c) nogil: + return ( + h3lib.radsToDegs(c.lat), + h3lib.radsToDegs(c.lng) + ) + + +cpdef basestring c_version(): + v = ( + h3lib.H3_VERSION_MAJOR, + h3lib.H3_VERSION_MINOR, + h3lib.H3_VERSION_PATCH, + ) + + return '{}.{}.{}'.format(*v) + + +cpdef H3int str_to_int(H3str h) except? 0: + return int(h, 16) + + +cpdef H3str int_to_str(H3int x): + """ Convert H3 integer to hex string representation + + Need to be careful in Python 2 because `hex(x)` may return a string + with a trailing `L` character (denoting a "large" integer). + The formatting approach below avoids this. + + Also need to be careful about unicode/str differences. + """ + return '{:x}'.format(x) + + +cdef check_cell(H3int h): + """ Check if valid H3 "cell" (hexagon or pentagon). + + Does not check if a valid H3 edge, for example. + + Since this function is used by multiple interfaces (int or str), + we want the error message to be informative to the user + in either case. + + We use the builtin `hex` function instead of `int_to_str` to + prepend `0x` to indicate that this **integer** representation + is incorrect, but in a format that is easily compared to + `str` inputs. + """ + if isValidCell(h) == 0: + raise H3CellInvalidError('Integer is not a valid H3 cell: {}'.format(hex(h))) + +cdef check_edge(H3int e): + if isValidDirectedEdge(e) == 0: + raise H3DirEdgeInvalidError('Integer is not a valid H3 edge: {}'.format(hex(e))) + +cdef check_vertex(H3int v): + if isValidVertex(v) == 0: + raise H3VertexInvalidError('Integer is not a valid H3 vertex: {}'.format(hex(v))) + +cdef check_index(H3int h): + if isValidIndex(h) == 0: + raise H3IndexInvalidError('Integer is not a valid H3 index: {}'.format(hex(h))) + +cdef check_res(int res): + if (res < 0) or (res > 15): + raise H3ResDomainError(res) + +cdef check_distance(int k): + if k < 0: + raise H3DomainError( + 'Grid distances must be nonnegative. Received: {}'.format(k) + ) diff --git a/python/h3/_cy/vertex.cpython-312-x86_64-linux-gnu.so b/python/h3/_cy/vertex.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 000000000..85bcca3c0 Binary files /dev/null and b/python/h3/_cy/vertex.cpython-312-x86_64-linux-gnu.so differ diff --git a/python/h3/_cy/vertex.pxd b/python/h3/_cy/vertex.pxd new file mode 100644 index 000000000..98d890ce6 --- /dev/null +++ b/python/h3/_cy/vertex.pxd @@ -0,0 +1,6 @@ +from .h3lib cimport bool, H3int + +cpdef H3int cell_to_vertex(H3int h, int vertex_num) except 1 +cpdef H3int[:] cell_to_vertexes(H3int h) +cpdef (double, double) vertex_to_latlng(H3int v) except * +cpdef bool is_valid_vertex(H3int v) diff --git a/python/h3/_cy/vertex.pyx b/python/h3/_cy/vertex.pyx new file mode 100644 index 000000000..26a674af5 --- /dev/null +++ b/python/h3/_cy/vertex.pyx @@ -0,0 +1,54 @@ +cimport h3lib +from h3lib cimport bool, H3int + +from .util cimport ( + check_cell, + check_vertex, + coord2deg +) + +from .error_system cimport check_for_error + +from .memory cimport H3MemoryManager + + +cpdef H3int cell_to_vertex(H3int h, int vertex_num) except 1: + cdef: + H3int out + + check_cell(h) + + check_for_error( + h3lib.cellToVertex(h, vertex_num, &out) + ) + + return out + +cpdef H3int[:] cell_to_vertexes(H3int h): + cdef: + H3int out + + check_cell(h) + + hmm = H3MemoryManager(6) + check_for_error( + h3lib.cellToVertexes(h, hmm.ptr) + ) + mv = hmm.to_mv() + + return mv + +cpdef (double, double) vertex_to_latlng(H3int v) except *: + cdef: + h3lib.LatLng c + + check_vertex(v) + + check_for_error( + h3lib.vertexToLatLng(v, &c) + ) + + return coord2deg(c) + +cpdef bool is_valid_vertex(H3int v): + return h3lib.isValidVertex(v) == 1 diff --git a/python/h3/_h3shape.py b/python/h3/_h3shape.py new file mode 100644 index 000000000..5c3629512 --- /dev/null +++ b/python/h3/_h3shape.py @@ -0,0 +1,338 @@ +from abc import ABCMeta, abstractmethod + + +class H3Shape(metaclass=ABCMeta): + """ + Abstract parent class of ``LatLngPoly`` and ``LatLngMultiPoly``. + """ + @property + @abstractmethod + def __geo_interface__(self): + """ https://github.com/pytest-dev/pytest-cov/issues/428 """ + + +class LatLngPoly(H3Shape): + """ + Container for loops of lat/lng points describing a polygon, possibly with holes. + + Attributes + ---------- + outer : list[tuple[float, float]] + List of lat/lng points describing the outer loop of the polygon + + holes : list[list[tuple[float, float]]] + List of loops of lat/lng points describing the holes of the polygon + + Examples + -------- + + A polygon with a single outer ring consisting of 4 points, having no holes: + + >>> LatLngPoly( + ... [(37.68, -122.54), (37.68, -122.34), (37.82, -122.34), (37.82, -122.54)], + ... ) + + + The same polygon, but with one hole consisting of 3 points: + + >>> LatLngPoly( + ... [(37.68, -122.54), (37.68, -122.34), (37.82, -122.34), (37.82, -122.54)], + ... [(37.76, -122.51), (37.76, -122.44), (37.81, -122.51)], + ... ) + + + The same as above, but with one additional hole, made up of 5 points: + + >>> LatLngPoly( + ... [(37.68, -122.54), (37.68, -122.34), (37.82, -122.34), (37.82, -122.54)], + ... [(37.76, -122.51), (37.76, -122.44), (37.81, -122.51)], + ... [(37.71, -122.43), (37.71, -122.37), (37.73, -122.37), (37.75, -122.41), + ... (37.73, -122.43)], + ... ) + + """ + def __init__(self, outer, *holes): + loops = [outer] + list(holes) + for loop in loops: + if len(loop) in (1, 2): + raise ValueError('Non-empty LatLngPoly loops need at least 3 points.') + + point_dimensions = set(map(len, loop)) + # empty set is possible for empty polygons, so we check if a subset + if not (point_dimensions <= {2}): + raise ValueError('LatLngPoly only accepts 2D points: lat/lng.') + + self.outer = tuple(_open_ring(outer)) + self.holes = tuple( + _open_ring(hole) + for hole in holes + ) + + def __repr__(self): + return ''.format(self.loopcode) + + def __len__(self): + """ + Should this be the number of points in the outer loop, + the number of holes (or +1 for the outer loop)? + """ + raise NotImplementedError('No clear definition of length for LatLngPoly.') + + @property + def loopcode(self): + """ Short code for describing the length of the outer loop and each hole + + Example: ``[382/(18, 6, 6)]`` indicates an outer loop of 382 points, + along with 3 holes with 18, 6, and 6 points, respectively. + + Example: ``[15]`` indicates an outer loop of 15 points and no holes. + """ + outer = len(self.outer) + holes = tuple(map(len, self.holes)) + + outer = str(outer) + + if holes: + out = outer + '/' + str(holes) + else: + out = outer + + return '[' + out + ']' + + @property + def __geo_interface__(self): + ll2 = _polygon_to_LL2(self) + gj_dict = _LL2_to_geojson_dict(ll2) + + return gj_dict + + +class LatLngMultiPoly(H3Shape): + """ + Container for multiple ``LatLngPoly`` polygons. + + Attributes + ---------- + polys : list[LatLngPoly] + List of lat/lng points describing the outer loop of the polygon + """ + def __init__(self, *polys): + self.polys = tuple(polys) + + for p in self.polys: + if not isinstance(p, LatLngPoly): + raise ValueError('LatLngMultiPoly requires each input to be an LatLngPoly object, instead got: ' + str(p)) # noqa + + def __repr__(self): + out = [p.loopcode for p in self.polys] + out = ', '.join(out) + out = ''.format(out) + return out + + def __iter__(self): + return iter(self.polys) + + def __len__(self): + """ Give the number of polygons in this multi-polygon. + """ + + """ + TODO: Pandas series or dataframe representation changes depending + on if __len__ is defined. + + I'd prefer the one that states `LatLngMultiPoly`. + It seems like Pandas is assuming an iterable is best-described + by its elements when choosing the representation. + + when __len__ *IS NOT* defined: + + 0 + 1 , , ) + 1 (, , , , , , <... + """ + return len(self.polys) + + def __getitem__(self, index): + return self.polys[index] + + @property + def __geo_interface__(self): + ll3 = _mpoly_to_LL3(self) + gj_dict = _LL3_to_geojson_dict(ll3) + + return gj_dict + + +""" +Helpers for cells_to_geojson and geojson_to_cells. + +Dealing with GeoJSON Polygons and MultiPolygons can be confusing because +there are so many nested lists. To help keep track, we use the following +symbols to denote different levels of nesting. + +LL0: lat/lng or lng/lat pair +LL1: list of LL0s +LL2: list of LL1s (i.e., a polygon with holes) +LL3: list of LL2s (i.e., several polygons with holes) + + +## TODO + +- Allow user to specify "container" in `cells_to_geojson`. + - That is, they may want a MultiPolygon even if the output fits in a Polygon + - 'auto', Polygon, MultiPolygon, FeatureCollection, GeometryCollection, ... +""" + + +def _mpoly_to_LL3(mpoly): + ll3 = tuple( + _polygon_to_LL2(poly) + for poly in mpoly + ) + + return ll3 + + +def _LL3_to_mpoly(ll3): + polys = [ + _LL2_to_polygon(ll2) + for ll2 in ll3 + ] + + mpoly = LatLngMultiPoly(*polys) + + return mpoly + + +def _polygon_to_LL2(poly): + ll2 = [poly.outer] + list(poly.holes) + ll2 = tuple( + _close_ring(_swap_latlng(ll1)) + for ll1 in ll2 + ) + + return ll2 + + +def _remove_z(ll1): + ll1 = [(a, b) for a, b, *z in ll1] + return ll1 + + +def _LL2_to_polygon(ll2): + ll2 = [ + _remove_z(ll1) + for ll1 in ll2 + ] + + ll2 = [ + _swap_latlng(ll1) + for ll1 in ll2 + ] + h3poly = LatLngPoly(*ll2) + + return h3poly + + +def _LL2_to_geojson_dict(ll2): + gj_dict = { + 'type': 'Polygon', + 'coordinates': ll2, + } + + return gj_dict + + +def _LL3_to_geojson_dict(ll3): + gj_dict = { + 'type': 'MultiPolygon', + 'coordinates': ll3, + } + + return gj_dict + + +def _swap_latlng(ll1): + ll1 = tuple( + (b, a) for a, b in ll1 + ) + return ll1 + + +def _close_ring(ll1): + """ + Idempotent + """ + if ll1 and (ll1[0] != ll1[-1]): + ll1 = tuple(ll1) + (ll1[0],) + + return ll1 + + +def _open_ring(ll1): + """ + Idempotent + """ + if ll1 and (ll1[0] == ll1[-1]): + ll1 = ll1[:-1] + + return ll1 + + +def geo_to_h3shape(geo): + """ + Translate from ``__geo_interface__`` to H3Shape. + + ``geo`` either implements ``__geo_interface__`` or is a dict matching the format + + Returns + ------- + H3Shape + """ + + # geo can be dict, a __geo_interface__, a string, LatLngPoly or LatLngMultiPoly + if isinstance(geo, H3Shape): + return geo + + if hasattr(geo, '__geo_interface__'): + # get dict + geo = geo.__geo_interface__ + + assert isinstance(geo, dict) # todo: remove + + t = geo['type'] + coord = geo['coordinates'] + + if t == 'Polygon': + ll2 = coord + shape = _LL2_to_polygon(ll2) + elif t == 'MultiPolygon': + ll3 = coord + shape = _LL3_to_mpoly(ll3) + else: + raise ValueError('Unrecognized type: ' + str(t)) + + return shape + + +def h3shape_to_geo(h3shape): + """ + Translate from an ``H3Shape`` to a ``__geo_interface__`` dict. + + ``h3shape`` should be either ``LatLngPoly`` or ``LatLngMultiPoly`` + + Returns + ------- + dict + """ + return h3shape.__geo_interface__ diff --git a/python/h3/_version.py b/python/h3/_version.py new file mode 100644 index 000000000..712565a5a --- /dev/null +++ b/python/h3/_version.py @@ -0,0 +1,3 @@ +from importlib import metadata + +__version__ = metadata.version(__package__ or __name__) diff --git a/python/h3/api/__init__.py b/python/h3/api/__init__.py new file mode 100644 index 000000000..435b13a15 --- /dev/null +++ b/python/h3/api/__init__.py @@ -0,0 +1,6 @@ +# flake8: noqa + +from . import basic_int +from . import basic_str +from . import memview_int +from . import numpy_int diff --git a/python/h3/api/__pycache__/__init__.cpython-312.pyc b/python/h3/api/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..5c767a19c Binary files /dev/null and b/python/h3/api/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/h3/api/basic_int/__init__.py b/python/h3/api/basic_int/__init__.py new file mode 100644 index 000000000..47f1c7eca --- /dev/null +++ b/python/h3/api/basic_int/__init__.py @@ -0,0 +1,1369 @@ +# This file is **symlinked** across the APIs to ensure they are exactly the same. +from typing import Literal +from array import array + +from ... import _cy +from ..._h3shape import ( + H3Shape, + LatLngPoly, + LatLngMultiPoly, + geo_to_h3shape, + h3shape_to_geo, +) + +from ._convert import ( + _in_scalar, + _out_scalar, + _in_collection, + _out_collection, +) + + +def versions(): + """ + Version numbers for the Python (wrapper) and C (wrapped) libraries. + + Versions are output as strings of the form ``'X.Y.Z'``. + C and Python should match on ``X`` (major) and ``Y`` (minor), + but may differ on ``Z`` (patch). + + Returns + ------- + dict like ``{'c': 'X.Y.Z', 'python': 'A.B.C'}`` + """ + from ..._version import __version__ + + v = { + 'c': _cy.c_version(), + 'python': __version__, + } + + return v + + +def str_to_int(h): + """ + Converts a hexadecimal string to an H3 64-bit integer index. + + Parameters + ---------- + h : str + Hexadecimal string like ``'89754e64993ffff'`` + + Returns + ------- + int + Unsigned 64-bit integer + """ + return _cy.str_to_int(h) + + +def int_to_str(x): + """ + Converts an H3 64-bit integer index to a hexadecimal string. + + Parameters + ---------- + x : int + Unsigned 64-bit integer + + Returns + ------- + str + Hexadecimal string like ``'89754e64993ffff'`` + """ + return _cy.int_to_str(x) + + +def get_num_cells(res): + """ + Return the total number of *cells* (hexagons and pentagons) + for the given resolution. + + Returns + ------- + int + """ + return _cy.get_num_cells(res) + + +def average_hexagon_area(res, unit='km^2'): + """ + Return the average area of an H3 *hexagon* + for the given resolution. + + This average *excludes* pentagons. + + Parameters + ---------- + res : int + H3 resolution + unit: str + Unit for area result (``'km^2'``, ``'m^2'``, or ``'rads^2'``) + + Returns + ------- + float + """ + return _cy.average_hexagon_area(res, unit) + + +def average_hexagon_edge_length(res, unit='km'): + """ + Return the average *hexagon* edge length + for the given resolution. + + This average *excludes* pentagons. + + Parameters + ---------- + res : int + H3 resolution + unit: str + Unit for length result (``'km'``, ``'m'``, or ``'rads'``) + + Returns + ------- + float + """ + return _cy.average_hexagon_edge_length(res, unit) + + +def is_valid_index(h): + """Validates *any* H3 index (cell, vertex, or directed edge). + + Returns + ------- + bool + """ + try: + h = _in_scalar(h) + return _cy.is_valid_index(h) + except (ValueError, TypeError): + return False + + +def is_valid_cell(h): + """ + Validates an H3 cell (hexagon or pentagon). + + Returns + ------- + bool + """ + try: + h = _in_scalar(h) + return _cy.is_valid_cell(h) + except (ValueError, TypeError): + return False + + +def is_valid_directed_edge(edge): + """ + Validates an H3 unidirectional edge. + + Returns + ------- + bool + """ + try: + e = _in_scalar(edge) + return _cy.is_valid_directed_edge(e) + except (ValueError, TypeError): + return False + + +def latlng_to_cell(lat, lng, res): + """ + Return the cell containing the (lat, lng) point + for a given resolution. + + Returns + ------- + H3Cell + + """ + return _out_scalar(_cy.latlng_to_cell(lat, lng, res)) + + +def cell_to_latlng(h): + """ + Return the center point of an H3 cell as a lat/lng pair. + + Parameters + ---------- + h : H3Cell + + Returns + ------- + lat : float + Latitude + lng : float + Longitude + """ + h = _in_scalar(h) + return _cy.cell_to_latlng(h) + + +def get_resolution(h): + """ + Return the resolution of an H3 cell. + + Parameters + ---------- + h : H3Cell + + Returns + ------- + int + """ + # todo: could also work for edges + h = _in_scalar(h) + return _cy.get_resolution(h) + + +def cell_to_parent(h, res=None): + """ + Get the parent of a cell. + + Parameters + ---------- + h : H3Cell + res : int or None, optional + The resolution for the parent + If ``None``, then ``res = resolution(h) - 1`` + + Returns + ------- + H3Cell + """ + h = _in_scalar(h) + p = _cy.cell_to_parent(h, res) + p = _out_scalar(p) + + return p + + +def grid_distance(h1, h2): + """ + Compute the grid distance between two cells. + + The grid distance is defined as the length of the shortest + path between the cells in the graph formed by connecting + adjacent cells. + + This function will raise an exception if the + cells are too far apart to compute the distance. + + Parameters + ---------- + h1 : H3Cell + h2 : H3Cell + + Returns + ------- + int + """ + h1 = _in_scalar(h1) + h2 = _in_scalar(h2) + + d = _cy.grid_distance(h1, h2) + + return d + + +def cell_to_boundary(h): + """ + Return tuple of lat/lng pairs describing the cell boundary. + + Parameters + ---------- + h : H3Cell + + Returns + ------- + tuple of (lat, lng) tuples + """ + h = _in_scalar(h) + return _cy.cell_to_boundary(h) + + +def grid_disk(h, k=1): + """ + Return unordered collection of cells with grid distance ``<= k`` from ``h``. + That is, the "filled-in" disk. + + Parameters + ---------- + h : H3Cell + k : int + Size of disk. + + Returns + ------- + unordered collection of H3Cell + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + h = _in_scalar(h) + mv = _cy.grid_disk(h, k) + + return _out_collection(mv) + + +def grid_ring(h, k=1): + """ + Return unordered collection of cells with grid distance ``== k`` from ``h``. + That is, the "hollow" ring. + + Parameters + ---------- + h : H3Cell + k : int + Size of ring. + + Returns + ------- + unordered collection of H3Cell + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + h = _in_scalar(h) + mv = _cy.grid_ring(h, k) + + return _out_collection(mv) + + +def cell_to_children_size(h, res=None): + """ + Number of children at resolution ``res`` of given cell. + + Parameters + ---------- + h : H3Cell + res : int or None, optional + The resolution for the children. + If ``None``, then ``res = resolution(h) + 1`` + + Returns + ------- + int + Count of children + """ + h = _in_scalar(h) + return _cy.cell_to_children_size(h, res) + + +def cell_to_children(h, res=None): + """ + Children of a cell as an unordered collection. + + Parameters + ---------- + h : H3Cell + res : int or None, optional + The resolution for the children. + If ``None``, then ``res = resolution(h) + 1`` + + Returns + ------- + unordered collection of H3Cell + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + h = _in_scalar(h) + mv = _cy.cell_to_children(h, res) + + return _out_collection(mv) + + +def cell_to_child_pos(child, res_parent): + """ + Child position index of given cell, with respect to its parent at ``res_parent``. + + The reverse operation can be done with ``child_pos_to_cell``. + + Parameters + ---------- + child : H3Cell + res_parent : int + + Returns + ------- + int + Integer index of the child with respect to parent cell. + """ + child = _in_scalar(child) + return _cy.cell_to_child_pos(child, res_parent) + + +def child_pos_to_cell(parent, res_child, child_pos): + """ + Get child H3 cell from a parent cell, child resolution, and child position index. + + The reverse operation can be done with ``cell_to_child_pos``. + + Parameters + ---------- + parent : H3Cell + res_child : int + Child cell resolution + child_pos : int + Integer position of child cell, releative to parent. + + + Returns + ------- + H3Cell + """ + parent = _in_scalar(parent) + child = _cy.child_pos_to_cell(parent, res_child, child_pos) + child = _out_scalar(child) + + return child + + +# todo: nogil for expensive C operation? +def compact_cells(cells): + """ + Compact a collection of H3 cells by combining + smaller cells into larger cells, if all child cells + are present. Input cells must all share the same resolution. + + Parameters + ---------- + cells : iterable of H3 Cells + + Returns + ------- + unordered collection of H3Cell + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + hu = _in_collection(cells) + hc = _cy.compact_cells(hu) + + return _out_collection(hc) + + +def uncompact_cells(cells, res): + """ + Reverse the ``compact_cells`` operation. + + Return a collection of H3 cells, all of resolution ``res``. + + Parameters + ---------- + cells : iterable of H3Cell + res : int + Resolution of desired output cells. + + Returns + ------- + unordered collection of H3Cell + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + # TODO: add test to make sure an error is returned when input contains cell + # smaller than output res. + + hc = _in_collection(cells) + hu = _cy.uncompact_cells(hc, res) + + return _out_collection(hu) + + +def polygon_to_cells(h3shape, res): + """ + Alias for ``h3shape_to_cells``. + """ + return h3shape_to_cells(h3shape, res) + + +def h3shape_to_cells(h3shape, res): + """ + Return the collection of H3 cells at a given resolution whose center points + are contained within an ``LatLngPoly`` or ``LatLngMultiPoly``. + + Parameters + ---------- + h3shape : ``H3Shape`` + res : int + Resolution of the output cells + + Returns + ------- + list of H3Cell + + Examples + -------- + + >>> poly = LatLngPoly( + ... [(37.68, -122.54), (37.68, -122.34), (37.82, -122.34), + ... (37.82, -122.54)], + ... ) + >>> h3.h3shape_to_cells(poly, 6) + ['862830807ffffff', + '862830827ffffff', + '86283082fffffff', + '862830877ffffff', + '862830947ffffff', + '862830957ffffff', + '86283095fffffff'] + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + + # todo: not sure if i want this dispatch logic here. maybe in the objects? + if isinstance(h3shape, LatLngPoly): + poly = h3shape + mv = _cy.polygon_to_cells(poly.outer, res=res, holes=poly.holes) + elif isinstance(h3shape, LatLngMultiPoly): + mpoly = h3shape + mv = _cy.polygons_to_cells(mpoly.polys, res=res) + elif isinstance(h3shape, H3Shape): + raise ValueError('Unrecognized H3Shape: ' + str(h3shape)) + else: + raise ValueError('Unrecognized type: ' + str(type(h3shape))) + + return _out_collection(mv) + + +def polygon_to_cells_experimental( + h3shape: H3Shape, + res: int, + contain: Literal['center', 'full', 'overlap', 'bbox_overlap'] = 'center', +): + """ + Alias for ``h3shape_to_cells_experimental``. + """ + return h3shape_to_cells_experimental(h3shape, res, contain) + + +def h3shape_to_cells_experimental( + h3shape: H3Shape, + res: int, + contain: Literal['center', 'full', 'overlap', 'bbox_overlap'] = 'center', +): + """ + Experimental function similar to ``h3shape_to_cells``, but with support for + multiple cell containment modes. + + Using ``contain='center'`` should give identical behavior as + ``h3shape_to_cells``. + + Note that this function is **experimental** and has no API stability gaurantees + across versions, so it may change in the future. + + + Parameters + ---------- + h3shape : ``H3Shape`` + res : int + Resolution of the output cells + contain : {'center', 'full', 'overlap', 'bbox_overlap'}, optional + Specifies the containment condition. + - 'center': Cell center is contained in shape + - 'full': Cell is fully contained in shape + - 'overlap': Cell is partially contained in shape + - 'bbox_overlap': Cell bounding box is partially contained in shape + + Default is 'center'. + + Returns + ------- + list of H3Cell + + Examples + -------- + + >>> poly = LatLngPoly( + ... [(37.68, -122.54), (37.68, -122.34), (37.82, -122.34), + ... (37.82, -122.54)], + ... ) + >>> h3.h3shape_to_cells_experimental(poly, 6, 'center') + ['862830807ffffff', + '862830827ffffff', + '86283082fffffff', + '862830877ffffff', + '862830947ffffff', + '862830957ffffff', + '86283095fffffff'] + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + + contain_modes = { + 'center': 0, + 'full': 1, + 'overlap': 2, + 'bbox_overlap': 3, + } + + flag = contain_modes[contain] + + # todo: not sure if i want this dispatch logic here. maybe in the objects? + if isinstance(h3shape, LatLngPoly): + poly = h3shape + mv = _cy.polygon_to_cells_experimental( + poly.outer, + res = res, + holes = poly.holes, + flag = flag, + ) + elif isinstance(h3shape, LatLngMultiPoly): + mpoly = h3shape + mv = _cy.polygons_to_cells_experimental(mpoly.polys, res=res, flag=flag) + elif isinstance(h3shape, H3Shape): + raise ValueError('Unrecognized H3Shape: ' + str(h3shape)) + else: + raise ValueError('Unrecognized type: ' + str(type(h3shape))) + + return _out_collection(mv) + + +def cells_to_h3shape(cells, *, tight=True): + """ + Return an ``H3Shape`` describing the area covered by a collection of H3 cells. + Will return ``LatLngPoly`` or ``LatLngMultiPoly``. + + Parameters + ---------- + cells : iterable of H3 cells + tight : bool + If True, return ``LatLngPoly`` if possible. + If False, always return ``LatLngMultiPoly``. + + Returns + ------- + LatLngPoly | LatLngMultiPoly + + Examples + -------- + + >>> cells = ['8428309ffffffff', '842830dffffffff'] + >>> h3.cells_to_h3shape(cells, tight=True) + + >>> h3.cells_to_h3shape(cells, tight=False) + + """ + cells = _in_collection(cells) + mpoly = _cy.cells_to_multi_polygon(cells) + + polys = [LatLngPoly(*poly) for poly in mpoly] + out = LatLngMultiPoly(*polys) + + if tight and len(out) == 1: + out = out[0] + + return out + + +def geo_to_cells(geo, res): + """Convert from ``__geo_interface__`` to cells. + + Parameters + ---------- + geo : an object implementing ``__geo_interface__`` or a dictionary in that format. + Both ``LatLngPoly`` and ``LatLngMultiPoly`` implement the interface. + res : int + Resolution of desired output cells. + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + h3shape = geo_to_h3shape(geo) + return h3shape_to_cells(h3shape, res) + + +def cells_to_geo(cells, tight=True): + """ + Convert from cells to a ``__geo_interface__`` dict. + + Parameters + ---------- + cells : iterable of H3 Cells + tight : bool + When ``True``, returns an ``LatLngPoly`` when possible. + When ``False``, always returns an ``LatLngMultiPoly``. + + Returns + ------- + dict + in `__geo_interface__` format + """ + h3shape = cells_to_h3shape(cells, tight=tight) + return h3shape_to_geo(h3shape) + + +def is_pentagon(h): + """ + Identify if an H3 cell is a pentagon. + + Parameters + ---------- + h : H3Index + + Returns + ------- + bool + ``True`` if input is a valid H3 cell which is a pentagon. + + Notes + ----- + A pentagon should *also* pass ``is_valid_cell()``. + Will return ``False`` for valid H3Edge. + """ + return _cy.is_pentagon(_in_scalar(h)) + + +def get_base_cell_number(h): + """ + Return the base cell *number* (``0`` to ``121``) of the given cell. + + The base cell *number* and the H3Index are two different representations + of the same cell: the parent cell of resolution ``0``. + + The base cell *number* is encoded within the corresponding + H3Index. + + todo: could work with edges + + Parameters + ---------- + h : H3Cell + + Returns + ------- + int + + Examples + -------- + >>> h = construct_cell(57, 2, 1, 4) + >>> h + '83728cfffffffff' + >> get_base_cell_number(h) + 57 + """ + return _cy.get_base_cell_number(_in_scalar(h)) + + +def get_index_digit(h, res): + """ + Get the index digit of a cell at the given resolution. + + Parameters + ---------- + h : H3Cell + Cell whose index digit will be returned. + res : int + Resolution (``>= 1``) at which to read the digit. + + Returns + ------- + int + The index digit at the requested resolution. + + Examples + -------- + >>> h = construct_cell(7, 2, 1, 4) + >>> h + '830e8cfffffffff' + >>> get_index_digit(h, 1) + 2 + >>> get_index_digit(h, 2) + 1 + >>> get_index_digit(h, 3) + 4 + """ + return _cy.get_index_digit(_in_scalar(h), res) + + +def construct_cell(base_cell_number, *digits, res=None): + """ + Construct cell from base cell and digits. + + Parameters + ---------- + base_cell_number : int + Base cell *number* (``0`` to ``121``). + *digits : int + Sequence of index digits (``0`` to ``6``). + Length of digits will be the resulting resolution of the output cell. + res : int, optional + Resolution of the constructed cell. If provided, it must equal + ``len(digits)``; otherwise it is inferred from the number of digits. + + Returns + ------- + H3Cell + The constructed cell. + + Examples + -------- + >>> construct_cell(7, 2, 1, 4) # resolution 3 cell + '830e8cfffffffff' + + >>> construct_cell(15, 0, 0, 5, 3) # resolution 4 cell + '841e057ffffffff' + + >>> construct_cell(15, 0, 0, 5, 3, res=4) + '841e057ffffffff' + """ + if (res is not None) and (len(digits) != res): + raise ValueError('Resolution must match number of digits.') + + digits = array('i', digits) + o = _cy.construct_cell(base_cell_number, digits) + return _out_scalar(o) + + +def deconstruct_cell(h): + """ + Deconstruct cell into base cell and digits. + + Parameters + ---------- + h : H3Cell + Cell to deconstruct. + + Returns + ------- + list of int + [base_cell_number, digit1, digit2, ..., digitN] + + Examples + -------- + >>> h = construct_cell(7, 2, 1, 4) # resolution 3 cell + >>> h + '830e8cfffffffff' + >>> deconstruct_cell(h) + (7, 2, 1, 4) + + >>> h = construct_cell(15, 0, 0, 5, 3) # resolution 4 cell + >>> h + '841e057ffffffff' + >>> deconstruct_cell(h) + (15, 0, 0, 5, 3) + >>> construct_cell(*deconstruct_cell(h), 0) == cell_to_center_child(h) + """ + res = get_resolution(h) + bc = get_base_cell_number(h) + digits = [get_index_digit(h, r + 1) for r in range(res)] + + return [bc, *digits] + + +def are_neighbor_cells(h1, h2): + """ + Returns ``True`` if ``h1`` and ``h2`` are neighboring cells. + + Parameters + ---------- + h1 : H3Cell + h2 : H3Cell + + Returns + ------- + bool + """ + h1 = _in_scalar(h1) + h2 = _in_scalar(h2) + + return _cy.are_neighbor_cells(h1, h2) + + +def cells_to_directed_edge(origin, destination): + """ + Create an H3 Index denoting a unidirectional edge. + + The edge is constructed from neighboring cells ``origin`` and + ``destination``. + + Parameters + ---------- + origin : H3Cell + destination : H3Cell + + Raises + ------ + ValueError + When cells are not adjacent. + + Returns + ------- + H3Edge + """ + o = _in_scalar(origin) + d = _in_scalar(destination) + e = _cy.cells_to_directed_edge(o, d) + e = _out_scalar(e) + + return e + + +def get_directed_edge_origin(e): + """ + Origin cell from an H3 directed edge. + + Parameters + ---------- + e : H3Edge + + Returns + ------- + H3Cell + """ + e = _in_scalar(e) + o = _cy.get_directed_edge_origin(e) + o = _out_scalar(o) + + return o + + +def get_directed_edge_destination(e): + """ + Destination cell from an H3 directed edge. + + Parameters + ---------- + e : H3Edge + + Returns + ------- + H3Cell + """ + e = _in_scalar(e) + d = _cy.get_directed_edge_destination(e) + d = _out_scalar(d) + + return d + + +def directed_edge_to_cells(e): + """ + Return (origin, destination) tuple from H3 directed edge + + Parameters + ---------- + e : H3Edge + + Returns + ------- + H3Cell + Origin cell of edge + H3Cell + Destination cell of edge + """ + e = _in_scalar(e) + o, d = _cy.directed_edge_to_cells(e) + o, d = _out_scalar(o), _out_scalar(d) + + return o, d + + +def origin_to_directed_edges(origin): + """ + Return all directed edges starting from ``origin`` cell. + + Parameters + ---------- + origin : H3Cell + + Returns + ------- + unordered collection of H3Edge + """ + mv = _cy.origin_to_directed_edges(_in_scalar(origin)) + + return _out_collection(mv) + + +def directed_edge_to_boundary(edge): + """ + Returns points representing the edge (line of points + describing the boundary between two cells). + + Parameters + ---------- + edge : H3Edge + + Returns + ------- + tuple of (lat, lng) tuples + """ + return _cy.directed_edge_to_boundary(_in_scalar(edge)) + + +def grid_path_cells(start, end): + """ + Returns the ordered collection of cells denoting a + minimum-length non-unique path between cells. + + Parameters + ---------- + start : H3Cell + end : H3Cell + + Returns + ------- + ordered collection of H3Cell + Starting with ``start``, and ending with ``end``. + """ + mv = _cy.grid_path_cells(_in_scalar(start), _in_scalar(end)) + + return _out_collection(mv) + + +def is_res_class_III(h): + """ + Determine if cell has orientation "Class II" or "Class III". + + The orientation of pentagons/hexagons on the icosahedron can be one + of two types: "Class II" or "Class III". + + All cells within a resolution have the same type, and the type + alternates between resolutions. + + "Class II" cells have resolutions: 0,2,4,6,8,10,12,14 + "Class III" cells have resolutions: 1,3,5,7,9,11,13,15 + + Parameters + ---------- + h : H3Cell + + Returns + ------- + bool + ``True`` if ``h`` is "Class III". + ``False`` if ``h`` is "Class II". + + References + ---------- + 1. https://uber.github.io/h3/#/documentation/core-library/coordinate-systems + """ + return _cy.is_res_class_iii(_in_scalar(h)) + + +def get_pentagons(res): + """ + Return all pentagons at a given resolution. + + Parameters + ---------- + res : int + Resolution of the pentagons + + Returns + ------- + unordered collection of H3Cell + """ + mv = _cy.get_pentagons(res) + + return _out_collection(mv) + + +def get_res0_cells(): + """ + Return all cells at resolution 0. + + Parameters + ---------- + None + + Returns + ------- + unordered collection of H3Cell + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + mv = _cy.get_res0_cells() + + return _out_collection(mv) + + +def cell_to_center_child(h, res=None): + """ + Get the center child of a cell at some finer resolution. + + Parameters + ---------- + h : H3Cell + res : int or None, optional + The resolution for the child cell + If ``None``, then ``res = resolution(h) + 1`` + + Returns + ------- + H3Cell + """ + h = _in_scalar(h) + p = _cy.cell_to_center_child(h, res) + p = _out_scalar(p) + + return p + + +def get_icosahedron_faces(h): + """ + Return icosahedron faces intersecting a given H3 cell. + + There are twenty possible faces, ranging from 0--19. + + Note: Every interface returns a Python ``set`` of ``int``. + + Parameters + ---------- + h : H3Cell + + Returns + ------- + Python ``set`` of ``int`` + """ + h = _in_scalar(h) + faces = _cy.get_icosahedron_faces(h) + + return faces + + +def cell_to_local_ij(origin, h): + """ + Return local (i,j) coordinates of cell ``h`` in relation to ``origin`` cell + + + Parameters + ---------- + origin : H3Cell + Origin/central cell for defining i,j coordinates. + h: H3Cell + Destination cell whose i,j coordinates we'd like, based off + of the origin cell. + + + Returns + ------- + Tuple (i, j) of integer local coordinates of cell ``h`` + + + Notes + ----- + + The ``origin`` cell does not define (0, 0) for the IJ coordinate space. + (0, 0) refers to the center of the base cell containing origin at the + resolution of `origin`. + Subtracting the IJ coordinates of ``origin`` from every cell would get + you the property of (0, 0) being the ``origin``. + + This is done so we don't need to keep recomputing the coordinates of + ``origin`` if not needed. + """ + origin = _in_scalar(origin) + h = _in_scalar(h) + + i, j = _cy.cell_to_local_ij(origin, h) + + return i, j + + +def local_ij_to_cell(origin, i, j): + """ + Return cell at local (i,j) position relative to the ``origin`` cell. + + Parameters + ---------- + origin : H3Cell + Origin/central cell for defining i,j coordinates. + i, j: int + Integer coordinates with respect to ``origin`` cell. + + + Returns + ------- + H3Cell at local (i,j) position relative to the ``origin`` cell + + + Notes + ----- + + The ``origin`` cell does not define (0, 0) for the IJ coordinate space. + (0, 0) refers to the center of the base cell containing origin at the + resolution of ``origin``. + Subtracting the IJ coordinates of ``origin`` from every cell would get + you the property of (0, 0) being the ``origin``. + + This is done so we don't need to keep recomputing the coordinates of + ``origin`` if not needed. + """ + origin = _in_scalar(origin) + + h = _cy.local_ij_to_cell(origin, i, j) + h = _out_scalar(h) + + return h + + +def cell_area(h, unit='km^2'): + """ + Compute the spherical surface area of a specific H3 cell. + + Parameters + ---------- + h : H3Cell + unit: str + Unit for area result (``'km^2'``, ``'m^2'``, or ``'rads^2'``) + + + Returns + ------- + The area of the H3 cell in the given units + + + Notes + ----- + This function breaks the cell into spherical triangles, and computes + their spherical area. + The function uses the spherical distance calculation given by + ``great_circle_distance()``. + """ + h = _in_scalar(h) + + return _cy.cell_area(h, unit=unit) + + +def edge_length(e, unit='km'): + """ + Compute the spherical length of a specific H3 edge. + + Parameters + ---------- + h : H3Cell + unit: str + Unit for length result (``'km'``, ``'m'``, or ``'rads'``) + + + Returns + ------- + The length of the edge in the given units + + + Notes + ----- + This function uses the spherical distance calculation given by + ``great_circle_distance()``. + """ + e = _in_scalar(e) + + return _cy.edge_length(e, unit=unit) + + +def great_circle_distance(latlng1, latlng2, unit='km'): + """ + Compute the spherical distance between two (lat, lng) points. + AKA: great circle distance or "haversine" distance. + + todo: overload to allow two cell inputs? + + Parameters + ---------- + latlng1 : tuple + (lat, lng) tuple in degrees + latlng2 : tuple + (lat, lng) tuple in degrees + unit: str + Unit for distance result (``'km'``, ``'m'``, or ``'rads'``) + + Returns + ------- + The spherical distance between the points in the given units + """ + lat1, lng1 = latlng1 + lat2, lng2 = latlng2 + return _cy.great_circle_distance( + lat1, lng1, + lat2, lng2, + unit = unit + ) + + +def cell_to_vertex(h, vertex_num): + """ + Return a (specified) vertex of an H3 cell. + + Parameters + ---------- + h : H3Cell + vertex_num : int + Vertex number (0-5) + + Returns + ------- + The vertex + """ + h = _in_scalar(h) + h = _cy.cell_to_vertex(h, vertex_num) + return _out_scalar(h) + + +def cell_to_vertexes(h): + """ + Return a list of vertexes of an H3 cell. + The list will be of length 5 for pentagons and 6 for hexagons. + + Parameters + ---------- + h : H3Cell + + Returns + ------- + A list of vertexes + """ + h = _in_scalar(h) + mv = _cy.cell_to_vertexes(h) + return _out_collection(mv) + + +def vertex_to_latlng(v): + """ + Return latitude and longitude of a vertex. + + Returns + ------- + lat : float + Latitude + lng : float + Longitude + """ + v = _in_scalar(v) + return _cy.vertex_to_latlng(v) + + +def is_valid_vertex(v): + """ + Validates an H3 vertex. + + Returns + ------- + bool + """ + try: + v = _in_scalar(v) + return _cy.is_valid_vertex(v) + except (ValueError, TypeError): + return False diff --git a/python/h3/api/basic_int/__pycache__/__init__.cpython-312.pyc b/python/h3/api/basic_int/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..2ddd754b6 Binary files /dev/null and b/python/h3/api/basic_int/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/h3/api/basic_int/__pycache__/_convert.cpython-312.pyc b/python/h3/api/basic_int/__pycache__/_convert.cpython-312.pyc new file mode 100644 index 000000000..dec4a456c Binary files /dev/null and b/python/h3/api/basic_int/__pycache__/_convert.cpython-312.pyc differ diff --git a/python/h3/api/basic_int/_convert.py b/python/h3/api/basic_int/_convert.py new file mode 100644 index 000000000..51f472838 --- /dev/null +++ b/python/h3/api/basic_int/_convert.py @@ -0,0 +1,17 @@ +from ... import _cy + + +def _in_scalar(x): + return x + + +_out_scalar = _in_scalar + + +def _in_collection(cells): + it = list(cells) + + return _cy.iter_to_mv(it) + + +_out_collection = list diff --git a/python/h3/api/basic_str/__init__.py b/python/h3/api/basic_str/__init__.py new file mode 100644 index 000000000..47f1c7eca --- /dev/null +++ b/python/h3/api/basic_str/__init__.py @@ -0,0 +1,1369 @@ +# This file is **symlinked** across the APIs to ensure they are exactly the same. +from typing import Literal +from array import array + +from ... import _cy +from ..._h3shape import ( + H3Shape, + LatLngPoly, + LatLngMultiPoly, + geo_to_h3shape, + h3shape_to_geo, +) + +from ._convert import ( + _in_scalar, + _out_scalar, + _in_collection, + _out_collection, +) + + +def versions(): + """ + Version numbers for the Python (wrapper) and C (wrapped) libraries. + + Versions are output as strings of the form ``'X.Y.Z'``. + C and Python should match on ``X`` (major) and ``Y`` (minor), + but may differ on ``Z`` (patch). + + Returns + ------- + dict like ``{'c': 'X.Y.Z', 'python': 'A.B.C'}`` + """ + from ..._version import __version__ + + v = { + 'c': _cy.c_version(), + 'python': __version__, + } + + return v + + +def str_to_int(h): + """ + Converts a hexadecimal string to an H3 64-bit integer index. + + Parameters + ---------- + h : str + Hexadecimal string like ``'89754e64993ffff'`` + + Returns + ------- + int + Unsigned 64-bit integer + """ + return _cy.str_to_int(h) + + +def int_to_str(x): + """ + Converts an H3 64-bit integer index to a hexadecimal string. + + Parameters + ---------- + x : int + Unsigned 64-bit integer + + Returns + ------- + str + Hexadecimal string like ``'89754e64993ffff'`` + """ + return _cy.int_to_str(x) + + +def get_num_cells(res): + """ + Return the total number of *cells* (hexagons and pentagons) + for the given resolution. + + Returns + ------- + int + """ + return _cy.get_num_cells(res) + + +def average_hexagon_area(res, unit='km^2'): + """ + Return the average area of an H3 *hexagon* + for the given resolution. + + This average *excludes* pentagons. + + Parameters + ---------- + res : int + H3 resolution + unit: str + Unit for area result (``'km^2'``, ``'m^2'``, or ``'rads^2'``) + + Returns + ------- + float + """ + return _cy.average_hexagon_area(res, unit) + + +def average_hexagon_edge_length(res, unit='km'): + """ + Return the average *hexagon* edge length + for the given resolution. + + This average *excludes* pentagons. + + Parameters + ---------- + res : int + H3 resolution + unit: str + Unit for length result (``'km'``, ``'m'``, or ``'rads'``) + + Returns + ------- + float + """ + return _cy.average_hexagon_edge_length(res, unit) + + +def is_valid_index(h): + """Validates *any* H3 index (cell, vertex, or directed edge). + + Returns + ------- + bool + """ + try: + h = _in_scalar(h) + return _cy.is_valid_index(h) + except (ValueError, TypeError): + return False + + +def is_valid_cell(h): + """ + Validates an H3 cell (hexagon or pentagon). + + Returns + ------- + bool + """ + try: + h = _in_scalar(h) + return _cy.is_valid_cell(h) + except (ValueError, TypeError): + return False + + +def is_valid_directed_edge(edge): + """ + Validates an H3 unidirectional edge. + + Returns + ------- + bool + """ + try: + e = _in_scalar(edge) + return _cy.is_valid_directed_edge(e) + except (ValueError, TypeError): + return False + + +def latlng_to_cell(lat, lng, res): + """ + Return the cell containing the (lat, lng) point + for a given resolution. + + Returns + ------- + H3Cell + + """ + return _out_scalar(_cy.latlng_to_cell(lat, lng, res)) + + +def cell_to_latlng(h): + """ + Return the center point of an H3 cell as a lat/lng pair. + + Parameters + ---------- + h : H3Cell + + Returns + ------- + lat : float + Latitude + lng : float + Longitude + """ + h = _in_scalar(h) + return _cy.cell_to_latlng(h) + + +def get_resolution(h): + """ + Return the resolution of an H3 cell. + + Parameters + ---------- + h : H3Cell + + Returns + ------- + int + """ + # todo: could also work for edges + h = _in_scalar(h) + return _cy.get_resolution(h) + + +def cell_to_parent(h, res=None): + """ + Get the parent of a cell. + + Parameters + ---------- + h : H3Cell + res : int or None, optional + The resolution for the parent + If ``None``, then ``res = resolution(h) - 1`` + + Returns + ------- + H3Cell + """ + h = _in_scalar(h) + p = _cy.cell_to_parent(h, res) + p = _out_scalar(p) + + return p + + +def grid_distance(h1, h2): + """ + Compute the grid distance between two cells. + + The grid distance is defined as the length of the shortest + path between the cells in the graph formed by connecting + adjacent cells. + + This function will raise an exception if the + cells are too far apart to compute the distance. + + Parameters + ---------- + h1 : H3Cell + h2 : H3Cell + + Returns + ------- + int + """ + h1 = _in_scalar(h1) + h2 = _in_scalar(h2) + + d = _cy.grid_distance(h1, h2) + + return d + + +def cell_to_boundary(h): + """ + Return tuple of lat/lng pairs describing the cell boundary. + + Parameters + ---------- + h : H3Cell + + Returns + ------- + tuple of (lat, lng) tuples + """ + h = _in_scalar(h) + return _cy.cell_to_boundary(h) + + +def grid_disk(h, k=1): + """ + Return unordered collection of cells with grid distance ``<= k`` from ``h``. + That is, the "filled-in" disk. + + Parameters + ---------- + h : H3Cell + k : int + Size of disk. + + Returns + ------- + unordered collection of H3Cell + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + h = _in_scalar(h) + mv = _cy.grid_disk(h, k) + + return _out_collection(mv) + + +def grid_ring(h, k=1): + """ + Return unordered collection of cells with grid distance ``== k`` from ``h``. + That is, the "hollow" ring. + + Parameters + ---------- + h : H3Cell + k : int + Size of ring. + + Returns + ------- + unordered collection of H3Cell + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + h = _in_scalar(h) + mv = _cy.grid_ring(h, k) + + return _out_collection(mv) + + +def cell_to_children_size(h, res=None): + """ + Number of children at resolution ``res`` of given cell. + + Parameters + ---------- + h : H3Cell + res : int or None, optional + The resolution for the children. + If ``None``, then ``res = resolution(h) + 1`` + + Returns + ------- + int + Count of children + """ + h = _in_scalar(h) + return _cy.cell_to_children_size(h, res) + + +def cell_to_children(h, res=None): + """ + Children of a cell as an unordered collection. + + Parameters + ---------- + h : H3Cell + res : int or None, optional + The resolution for the children. + If ``None``, then ``res = resolution(h) + 1`` + + Returns + ------- + unordered collection of H3Cell + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + h = _in_scalar(h) + mv = _cy.cell_to_children(h, res) + + return _out_collection(mv) + + +def cell_to_child_pos(child, res_parent): + """ + Child position index of given cell, with respect to its parent at ``res_parent``. + + The reverse operation can be done with ``child_pos_to_cell``. + + Parameters + ---------- + child : H3Cell + res_parent : int + + Returns + ------- + int + Integer index of the child with respect to parent cell. + """ + child = _in_scalar(child) + return _cy.cell_to_child_pos(child, res_parent) + + +def child_pos_to_cell(parent, res_child, child_pos): + """ + Get child H3 cell from a parent cell, child resolution, and child position index. + + The reverse operation can be done with ``cell_to_child_pos``. + + Parameters + ---------- + parent : H3Cell + res_child : int + Child cell resolution + child_pos : int + Integer position of child cell, releative to parent. + + + Returns + ------- + H3Cell + """ + parent = _in_scalar(parent) + child = _cy.child_pos_to_cell(parent, res_child, child_pos) + child = _out_scalar(child) + + return child + + +# todo: nogil for expensive C operation? +def compact_cells(cells): + """ + Compact a collection of H3 cells by combining + smaller cells into larger cells, if all child cells + are present. Input cells must all share the same resolution. + + Parameters + ---------- + cells : iterable of H3 Cells + + Returns + ------- + unordered collection of H3Cell + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + hu = _in_collection(cells) + hc = _cy.compact_cells(hu) + + return _out_collection(hc) + + +def uncompact_cells(cells, res): + """ + Reverse the ``compact_cells`` operation. + + Return a collection of H3 cells, all of resolution ``res``. + + Parameters + ---------- + cells : iterable of H3Cell + res : int + Resolution of desired output cells. + + Returns + ------- + unordered collection of H3Cell + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + # TODO: add test to make sure an error is returned when input contains cell + # smaller than output res. + + hc = _in_collection(cells) + hu = _cy.uncompact_cells(hc, res) + + return _out_collection(hu) + + +def polygon_to_cells(h3shape, res): + """ + Alias for ``h3shape_to_cells``. + """ + return h3shape_to_cells(h3shape, res) + + +def h3shape_to_cells(h3shape, res): + """ + Return the collection of H3 cells at a given resolution whose center points + are contained within an ``LatLngPoly`` or ``LatLngMultiPoly``. + + Parameters + ---------- + h3shape : ``H3Shape`` + res : int + Resolution of the output cells + + Returns + ------- + list of H3Cell + + Examples + -------- + + >>> poly = LatLngPoly( + ... [(37.68, -122.54), (37.68, -122.34), (37.82, -122.34), + ... (37.82, -122.54)], + ... ) + >>> h3.h3shape_to_cells(poly, 6) + ['862830807ffffff', + '862830827ffffff', + '86283082fffffff', + '862830877ffffff', + '862830947ffffff', + '862830957ffffff', + '86283095fffffff'] + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + + # todo: not sure if i want this dispatch logic here. maybe in the objects? + if isinstance(h3shape, LatLngPoly): + poly = h3shape + mv = _cy.polygon_to_cells(poly.outer, res=res, holes=poly.holes) + elif isinstance(h3shape, LatLngMultiPoly): + mpoly = h3shape + mv = _cy.polygons_to_cells(mpoly.polys, res=res) + elif isinstance(h3shape, H3Shape): + raise ValueError('Unrecognized H3Shape: ' + str(h3shape)) + else: + raise ValueError('Unrecognized type: ' + str(type(h3shape))) + + return _out_collection(mv) + + +def polygon_to_cells_experimental( + h3shape: H3Shape, + res: int, + contain: Literal['center', 'full', 'overlap', 'bbox_overlap'] = 'center', +): + """ + Alias for ``h3shape_to_cells_experimental``. + """ + return h3shape_to_cells_experimental(h3shape, res, contain) + + +def h3shape_to_cells_experimental( + h3shape: H3Shape, + res: int, + contain: Literal['center', 'full', 'overlap', 'bbox_overlap'] = 'center', +): + """ + Experimental function similar to ``h3shape_to_cells``, but with support for + multiple cell containment modes. + + Using ``contain='center'`` should give identical behavior as + ``h3shape_to_cells``. + + Note that this function is **experimental** and has no API stability gaurantees + across versions, so it may change in the future. + + + Parameters + ---------- + h3shape : ``H3Shape`` + res : int + Resolution of the output cells + contain : {'center', 'full', 'overlap', 'bbox_overlap'}, optional + Specifies the containment condition. + - 'center': Cell center is contained in shape + - 'full': Cell is fully contained in shape + - 'overlap': Cell is partially contained in shape + - 'bbox_overlap': Cell bounding box is partially contained in shape + + Default is 'center'. + + Returns + ------- + list of H3Cell + + Examples + -------- + + >>> poly = LatLngPoly( + ... [(37.68, -122.54), (37.68, -122.34), (37.82, -122.34), + ... (37.82, -122.54)], + ... ) + >>> h3.h3shape_to_cells_experimental(poly, 6, 'center') + ['862830807ffffff', + '862830827ffffff', + '86283082fffffff', + '862830877ffffff', + '862830947ffffff', + '862830957ffffff', + '86283095fffffff'] + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + + contain_modes = { + 'center': 0, + 'full': 1, + 'overlap': 2, + 'bbox_overlap': 3, + } + + flag = contain_modes[contain] + + # todo: not sure if i want this dispatch logic here. maybe in the objects? + if isinstance(h3shape, LatLngPoly): + poly = h3shape + mv = _cy.polygon_to_cells_experimental( + poly.outer, + res = res, + holes = poly.holes, + flag = flag, + ) + elif isinstance(h3shape, LatLngMultiPoly): + mpoly = h3shape + mv = _cy.polygons_to_cells_experimental(mpoly.polys, res=res, flag=flag) + elif isinstance(h3shape, H3Shape): + raise ValueError('Unrecognized H3Shape: ' + str(h3shape)) + else: + raise ValueError('Unrecognized type: ' + str(type(h3shape))) + + return _out_collection(mv) + + +def cells_to_h3shape(cells, *, tight=True): + """ + Return an ``H3Shape`` describing the area covered by a collection of H3 cells. + Will return ``LatLngPoly`` or ``LatLngMultiPoly``. + + Parameters + ---------- + cells : iterable of H3 cells + tight : bool + If True, return ``LatLngPoly`` if possible. + If False, always return ``LatLngMultiPoly``. + + Returns + ------- + LatLngPoly | LatLngMultiPoly + + Examples + -------- + + >>> cells = ['8428309ffffffff', '842830dffffffff'] + >>> h3.cells_to_h3shape(cells, tight=True) + + >>> h3.cells_to_h3shape(cells, tight=False) + + """ + cells = _in_collection(cells) + mpoly = _cy.cells_to_multi_polygon(cells) + + polys = [LatLngPoly(*poly) for poly in mpoly] + out = LatLngMultiPoly(*polys) + + if tight and len(out) == 1: + out = out[0] + + return out + + +def geo_to_cells(geo, res): + """Convert from ``__geo_interface__`` to cells. + + Parameters + ---------- + geo : an object implementing ``__geo_interface__`` or a dictionary in that format. + Both ``LatLngPoly`` and ``LatLngMultiPoly`` implement the interface. + res : int + Resolution of desired output cells. + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + h3shape = geo_to_h3shape(geo) + return h3shape_to_cells(h3shape, res) + + +def cells_to_geo(cells, tight=True): + """ + Convert from cells to a ``__geo_interface__`` dict. + + Parameters + ---------- + cells : iterable of H3 Cells + tight : bool + When ``True``, returns an ``LatLngPoly`` when possible. + When ``False``, always returns an ``LatLngMultiPoly``. + + Returns + ------- + dict + in `__geo_interface__` format + """ + h3shape = cells_to_h3shape(cells, tight=tight) + return h3shape_to_geo(h3shape) + + +def is_pentagon(h): + """ + Identify if an H3 cell is a pentagon. + + Parameters + ---------- + h : H3Index + + Returns + ------- + bool + ``True`` if input is a valid H3 cell which is a pentagon. + + Notes + ----- + A pentagon should *also* pass ``is_valid_cell()``. + Will return ``False`` for valid H3Edge. + """ + return _cy.is_pentagon(_in_scalar(h)) + + +def get_base_cell_number(h): + """ + Return the base cell *number* (``0`` to ``121``) of the given cell. + + The base cell *number* and the H3Index are two different representations + of the same cell: the parent cell of resolution ``0``. + + The base cell *number* is encoded within the corresponding + H3Index. + + todo: could work with edges + + Parameters + ---------- + h : H3Cell + + Returns + ------- + int + + Examples + -------- + >>> h = construct_cell(57, 2, 1, 4) + >>> h + '83728cfffffffff' + >> get_base_cell_number(h) + 57 + """ + return _cy.get_base_cell_number(_in_scalar(h)) + + +def get_index_digit(h, res): + """ + Get the index digit of a cell at the given resolution. + + Parameters + ---------- + h : H3Cell + Cell whose index digit will be returned. + res : int + Resolution (``>= 1``) at which to read the digit. + + Returns + ------- + int + The index digit at the requested resolution. + + Examples + -------- + >>> h = construct_cell(7, 2, 1, 4) + >>> h + '830e8cfffffffff' + >>> get_index_digit(h, 1) + 2 + >>> get_index_digit(h, 2) + 1 + >>> get_index_digit(h, 3) + 4 + """ + return _cy.get_index_digit(_in_scalar(h), res) + + +def construct_cell(base_cell_number, *digits, res=None): + """ + Construct cell from base cell and digits. + + Parameters + ---------- + base_cell_number : int + Base cell *number* (``0`` to ``121``). + *digits : int + Sequence of index digits (``0`` to ``6``). + Length of digits will be the resulting resolution of the output cell. + res : int, optional + Resolution of the constructed cell. If provided, it must equal + ``len(digits)``; otherwise it is inferred from the number of digits. + + Returns + ------- + H3Cell + The constructed cell. + + Examples + -------- + >>> construct_cell(7, 2, 1, 4) # resolution 3 cell + '830e8cfffffffff' + + >>> construct_cell(15, 0, 0, 5, 3) # resolution 4 cell + '841e057ffffffff' + + >>> construct_cell(15, 0, 0, 5, 3, res=4) + '841e057ffffffff' + """ + if (res is not None) and (len(digits) != res): + raise ValueError('Resolution must match number of digits.') + + digits = array('i', digits) + o = _cy.construct_cell(base_cell_number, digits) + return _out_scalar(o) + + +def deconstruct_cell(h): + """ + Deconstruct cell into base cell and digits. + + Parameters + ---------- + h : H3Cell + Cell to deconstruct. + + Returns + ------- + list of int + [base_cell_number, digit1, digit2, ..., digitN] + + Examples + -------- + >>> h = construct_cell(7, 2, 1, 4) # resolution 3 cell + >>> h + '830e8cfffffffff' + >>> deconstruct_cell(h) + (7, 2, 1, 4) + + >>> h = construct_cell(15, 0, 0, 5, 3) # resolution 4 cell + >>> h + '841e057ffffffff' + >>> deconstruct_cell(h) + (15, 0, 0, 5, 3) + >>> construct_cell(*deconstruct_cell(h), 0) == cell_to_center_child(h) + """ + res = get_resolution(h) + bc = get_base_cell_number(h) + digits = [get_index_digit(h, r + 1) for r in range(res)] + + return [bc, *digits] + + +def are_neighbor_cells(h1, h2): + """ + Returns ``True`` if ``h1`` and ``h2`` are neighboring cells. + + Parameters + ---------- + h1 : H3Cell + h2 : H3Cell + + Returns + ------- + bool + """ + h1 = _in_scalar(h1) + h2 = _in_scalar(h2) + + return _cy.are_neighbor_cells(h1, h2) + + +def cells_to_directed_edge(origin, destination): + """ + Create an H3 Index denoting a unidirectional edge. + + The edge is constructed from neighboring cells ``origin`` and + ``destination``. + + Parameters + ---------- + origin : H3Cell + destination : H3Cell + + Raises + ------ + ValueError + When cells are not adjacent. + + Returns + ------- + H3Edge + """ + o = _in_scalar(origin) + d = _in_scalar(destination) + e = _cy.cells_to_directed_edge(o, d) + e = _out_scalar(e) + + return e + + +def get_directed_edge_origin(e): + """ + Origin cell from an H3 directed edge. + + Parameters + ---------- + e : H3Edge + + Returns + ------- + H3Cell + """ + e = _in_scalar(e) + o = _cy.get_directed_edge_origin(e) + o = _out_scalar(o) + + return o + + +def get_directed_edge_destination(e): + """ + Destination cell from an H3 directed edge. + + Parameters + ---------- + e : H3Edge + + Returns + ------- + H3Cell + """ + e = _in_scalar(e) + d = _cy.get_directed_edge_destination(e) + d = _out_scalar(d) + + return d + + +def directed_edge_to_cells(e): + """ + Return (origin, destination) tuple from H3 directed edge + + Parameters + ---------- + e : H3Edge + + Returns + ------- + H3Cell + Origin cell of edge + H3Cell + Destination cell of edge + """ + e = _in_scalar(e) + o, d = _cy.directed_edge_to_cells(e) + o, d = _out_scalar(o), _out_scalar(d) + + return o, d + + +def origin_to_directed_edges(origin): + """ + Return all directed edges starting from ``origin`` cell. + + Parameters + ---------- + origin : H3Cell + + Returns + ------- + unordered collection of H3Edge + """ + mv = _cy.origin_to_directed_edges(_in_scalar(origin)) + + return _out_collection(mv) + + +def directed_edge_to_boundary(edge): + """ + Returns points representing the edge (line of points + describing the boundary between two cells). + + Parameters + ---------- + edge : H3Edge + + Returns + ------- + tuple of (lat, lng) tuples + """ + return _cy.directed_edge_to_boundary(_in_scalar(edge)) + + +def grid_path_cells(start, end): + """ + Returns the ordered collection of cells denoting a + minimum-length non-unique path between cells. + + Parameters + ---------- + start : H3Cell + end : H3Cell + + Returns + ------- + ordered collection of H3Cell + Starting with ``start``, and ending with ``end``. + """ + mv = _cy.grid_path_cells(_in_scalar(start), _in_scalar(end)) + + return _out_collection(mv) + + +def is_res_class_III(h): + """ + Determine if cell has orientation "Class II" or "Class III". + + The orientation of pentagons/hexagons on the icosahedron can be one + of two types: "Class II" or "Class III". + + All cells within a resolution have the same type, and the type + alternates between resolutions. + + "Class II" cells have resolutions: 0,2,4,6,8,10,12,14 + "Class III" cells have resolutions: 1,3,5,7,9,11,13,15 + + Parameters + ---------- + h : H3Cell + + Returns + ------- + bool + ``True`` if ``h`` is "Class III". + ``False`` if ``h`` is "Class II". + + References + ---------- + 1. https://uber.github.io/h3/#/documentation/core-library/coordinate-systems + """ + return _cy.is_res_class_iii(_in_scalar(h)) + + +def get_pentagons(res): + """ + Return all pentagons at a given resolution. + + Parameters + ---------- + res : int + Resolution of the pentagons + + Returns + ------- + unordered collection of H3Cell + """ + mv = _cy.get_pentagons(res) + + return _out_collection(mv) + + +def get_res0_cells(): + """ + Return all cells at resolution 0. + + Parameters + ---------- + None + + Returns + ------- + unordered collection of H3Cell + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + mv = _cy.get_res0_cells() + + return _out_collection(mv) + + +def cell_to_center_child(h, res=None): + """ + Get the center child of a cell at some finer resolution. + + Parameters + ---------- + h : H3Cell + res : int or None, optional + The resolution for the child cell + If ``None``, then ``res = resolution(h) + 1`` + + Returns + ------- + H3Cell + """ + h = _in_scalar(h) + p = _cy.cell_to_center_child(h, res) + p = _out_scalar(p) + + return p + + +def get_icosahedron_faces(h): + """ + Return icosahedron faces intersecting a given H3 cell. + + There are twenty possible faces, ranging from 0--19. + + Note: Every interface returns a Python ``set`` of ``int``. + + Parameters + ---------- + h : H3Cell + + Returns + ------- + Python ``set`` of ``int`` + """ + h = _in_scalar(h) + faces = _cy.get_icosahedron_faces(h) + + return faces + + +def cell_to_local_ij(origin, h): + """ + Return local (i,j) coordinates of cell ``h`` in relation to ``origin`` cell + + + Parameters + ---------- + origin : H3Cell + Origin/central cell for defining i,j coordinates. + h: H3Cell + Destination cell whose i,j coordinates we'd like, based off + of the origin cell. + + + Returns + ------- + Tuple (i, j) of integer local coordinates of cell ``h`` + + + Notes + ----- + + The ``origin`` cell does not define (0, 0) for the IJ coordinate space. + (0, 0) refers to the center of the base cell containing origin at the + resolution of `origin`. + Subtracting the IJ coordinates of ``origin`` from every cell would get + you the property of (0, 0) being the ``origin``. + + This is done so we don't need to keep recomputing the coordinates of + ``origin`` if not needed. + """ + origin = _in_scalar(origin) + h = _in_scalar(h) + + i, j = _cy.cell_to_local_ij(origin, h) + + return i, j + + +def local_ij_to_cell(origin, i, j): + """ + Return cell at local (i,j) position relative to the ``origin`` cell. + + Parameters + ---------- + origin : H3Cell + Origin/central cell for defining i,j coordinates. + i, j: int + Integer coordinates with respect to ``origin`` cell. + + + Returns + ------- + H3Cell at local (i,j) position relative to the ``origin`` cell + + + Notes + ----- + + The ``origin`` cell does not define (0, 0) for the IJ coordinate space. + (0, 0) refers to the center of the base cell containing origin at the + resolution of ``origin``. + Subtracting the IJ coordinates of ``origin`` from every cell would get + you the property of (0, 0) being the ``origin``. + + This is done so we don't need to keep recomputing the coordinates of + ``origin`` if not needed. + """ + origin = _in_scalar(origin) + + h = _cy.local_ij_to_cell(origin, i, j) + h = _out_scalar(h) + + return h + + +def cell_area(h, unit='km^2'): + """ + Compute the spherical surface area of a specific H3 cell. + + Parameters + ---------- + h : H3Cell + unit: str + Unit for area result (``'km^2'``, ``'m^2'``, or ``'rads^2'``) + + + Returns + ------- + The area of the H3 cell in the given units + + + Notes + ----- + This function breaks the cell into spherical triangles, and computes + their spherical area. + The function uses the spherical distance calculation given by + ``great_circle_distance()``. + """ + h = _in_scalar(h) + + return _cy.cell_area(h, unit=unit) + + +def edge_length(e, unit='km'): + """ + Compute the spherical length of a specific H3 edge. + + Parameters + ---------- + h : H3Cell + unit: str + Unit for length result (``'km'``, ``'m'``, or ``'rads'``) + + + Returns + ------- + The length of the edge in the given units + + + Notes + ----- + This function uses the spherical distance calculation given by + ``great_circle_distance()``. + """ + e = _in_scalar(e) + + return _cy.edge_length(e, unit=unit) + + +def great_circle_distance(latlng1, latlng2, unit='km'): + """ + Compute the spherical distance between two (lat, lng) points. + AKA: great circle distance or "haversine" distance. + + todo: overload to allow two cell inputs? + + Parameters + ---------- + latlng1 : tuple + (lat, lng) tuple in degrees + latlng2 : tuple + (lat, lng) tuple in degrees + unit: str + Unit for distance result (``'km'``, ``'m'``, or ``'rads'``) + + Returns + ------- + The spherical distance between the points in the given units + """ + lat1, lng1 = latlng1 + lat2, lng2 = latlng2 + return _cy.great_circle_distance( + lat1, lng1, + lat2, lng2, + unit = unit + ) + + +def cell_to_vertex(h, vertex_num): + """ + Return a (specified) vertex of an H3 cell. + + Parameters + ---------- + h : H3Cell + vertex_num : int + Vertex number (0-5) + + Returns + ------- + The vertex + """ + h = _in_scalar(h) + h = _cy.cell_to_vertex(h, vertex_num) + return _out_scalar(h) + + +def cell_to_vertexes(h): + """ + Return a list of vertexes of an H3 cell. + The list will be of length 5 for pentagons and 6 for hexagons. + + Parameters + ---------- + h : H3Cell + + Returns + ------- + A list of vertexes + """ + h = _in_scalar(h) + mv = _cy.cell_to_vertexes(h) + return _out_collection(mv) + + +def vertex_to_latlng(v): + """ + Return latitude and longitude of a vertex. + + Returns + ------- + lat : float + Latitude + lng : float + Longitude + """ + v = _in_scalar(v) + return _cy.vertex_to_latlng(v) + + +def is_valid_vertex(v): + """ + Validates an H3 vertex. + + Returns + ------- + bool + """ + try: + v = _in_scalar(v) + return _cy.is_valid_vertex(v) + except (ValueError, TypeError): + return False diff --git a/python/h3/api/basic_str/__pycache__/__init__.cpython-312.pyc b/python/h3/api/basic_str/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..65f377f2a Binary files /dev/null and b/python/h3/api/basic_str/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/h3/api/basic_str/__pycache__/_convert.cpython-312.pyc b/python/h3/api/basic_str/__pycache__/_convert.cpython-312.pyc new file mode 100644 index 000000000..afb433281 Binary files /dev/null and b/python/h3/api/basic_str/__pycache__/_convert.cpython-312.pyc differ diff --git a/python/h3/api/basic_str/_convert.py b/python/h3/api/basic_str/_convert.py new file mode 100644 index 000000000..a6c0ba0da --- /dev/null +++ b/python/h3/api/basic_str/_convert.py @@ -0,0 +1,14 @@ +from ... import _cy + +_in_scalar = _cy.str_to_int +_out_scalar = _cy.int_to_str + + +def _in_collection(cells): + it = [_cy.str_to_int(h) for h in cells] + + return _cy.iter_to_mv(it) + + +def _out_collection(mv): + return list(_cy.int_to_str(h) for h in mv) diff --git a/python/h3/api/memview_int/__init__.py b/python/h3/api/memview_int/__init__.py new file mode 100644 index 000000000..47f1c7eca --- /dev/null +++ b/python/h3/api/memview_int/__init__.py @@ -0,0 +1,1369 @@ +# This file is **symlinked** across the APIs to ensure they are exactly the same. +from typing import Literal +from array import array + +from ... import _cy +from ..._h3shape import ( + H3Shape, + LatLngPoly, + LatLngMultiPoly, + geo_to_h3shape, + h3shape_to_geo, +) + +from ._convert import ( + _in_scalar, + _out_scalar, + _in_collection, + _out_collection, +) + + +def versions(): + """ + Version numbers for the Python (wrapper) and C (wrapped) libraries. + + Versions are output as strings of the form ``'X.Y.Z'``. + C and Python should match on ``X`` (major) and ``Y`` (minor), + but may differ on ``Z`` (patch). + + Returns + ------- + dict like ``{'c': 'X.Y.Z', 'python': 'A.B.C'}`` + """ + from ..._version import __version__ + + v = { + 'c': _cy.c_version(), + 'python': __version__, + } + + return v + + +def str_to_int(h): + """ + Converts a hexadecimal string to an H3 64-bit integer index. + + Parameters + ---------- + h : str + Hexadecimal string like ``'89754e64993ffff'`` + + Returns + ------- + int + Unsigned 64-bit integer + """ + return _cy.str_to_int(h) + + +def int_to_str(x): + """ + Converts an H3 64-bit integer index to a hexadecimal string. + + Parameters + ---------- + x : int + Unsigned 64-bit integer + + Returns + ------- + str + Hexadecimal string like ``'89754e64993ffff'`` + """ + return _cy.int_to_str(x) + + +def get_num_cells(res): + """ + Return the total number of *cells* (hexagons and pentagons) + for the given resolution. + + Returns + ------- + int + """ + return _cy.get_num_cells(res) + + +def average_hexagon_area(res, unit='km^2'): + """ + Return the average area of an H3 *hexagon* + for the given resolution. + + This average *excludes* pentagons. + + Parameters + ---------- + res : int + H3 resolution + unit: str + Unit for area result (``'km^2'``, ``'m^2'``, or ``'rads^2'``) + + Returns + ------- + float + """ + return _cy.average_hexagon_area(res, unit) + + +def average_hexagon_edge_length(res, unit='km'): + """ + Return the average *hexagon* edge length + for the given resolution. + + This average *excludes* pentagons. + + Parameters + ---------- + res : int + H3 resolution + unit: str + Unit for length result (``'km'``, ``'m'``, or ``'rads'``) + + Returns + ------- + float + """ + return _cy.average_hexagon_edge_length(res, unit) + + +def is_valid_index(h): + """Validates *any* H3 index (cell, vertex, or directed edge). + + Returns + ------- + bool + """ + try: + h = _in_scalar(h) + return _cy.is_valid_index(h) + except (ValueError, TypeError): + return False + + +def is_valid_cell(h): + """ + Validates an H3 cell (hexagon or pentagon). + + Returns + ------- + bool + """ + try: + h = _in_scalar(h) + return _cy.is_valid_cell(h) + except (ValueError, TypeError): + return False + + +def is_valid_directed_edge(edge): + """ + Validates an H3 unidirectional edge. + + Returns + ------- + bool + """ + try: + e = _in_scalar(edge) + return _cy.is_valid_directed_edge(e) + except (ValueError, TypeError): + return False + + +def latlng_to_cell(lat, lng, res): + """ + Return the cell containing the (lat, lng) point + for a given resolution. + + Returns + ------- + H3Cell + + """ + return _out_scalar(_cy.latlng_to_cell(lat, lng, res)) + + +def cell_to_latlng(h): + """ + Return the center point of an H3 cell as a lat/lng pair. + + Parameters + ---------- + h : H3Cell + + Returns + ------- + lat : float + Latitude + lng : float + Longitude + """ + h = _in_scalar(h) + return _cy.cell_to_latlng(h) + + +def get_resolution(h): + """ + Return the resolution of an H3 cell. + + Parameters + ---------- + h : H3Cell + + Returns + ------- + int + """ + # todo: could also work for edges + h = _in_scalar(h) + return _cy.get_resolution(h) + + +def cell_to_parent(h, res=None): + """ + Get the parent of a cell. + + Parameters + ---------- + h : H3Cell + res : int or None, optional + The resolution for the parent + If ``None``, then ``res = resolution(h) - 1`` + + Returns + ------- + H3Cell + """ + h = _in_scalar(h) + p = _cy.cell_to_parent(h, res) + p = _out_scalar(p) + + return p + + +def grid_distance(h1, h2): + """ + Compute the grid distance between two cells. + + The grid distance is defined as the length of the shortest + path between the cells in the graph formed by connecting + adjacent cells. + + This function will raise an exception if the + cells are too far apart to compute the distance. + + Parameters + ---------- + h1 : H3Cell + h2 : H3Cell + + Returns + ------- + int + """ + h1 = _in_scalar(h1) + h2 = _in_scalar(h2) + + d = _cy.grid_distance(h1, h2) + + return d + + +def cell_to_boundary(h): + """ + Return tuple of lat/lng pairs describing the cell boundary. + + Parameters + ---------- + h : H3Cell + + Returns + ------- + tuple of (lat, lng) tuples + """ + h = _in_scalar(h) + return _cy.cell_to_boundary(h) + + +def grid_disk(h, k=1): + """ + Return unordered collection of cells with grid distance ``<= k`` from ``h``. + That is, the "filled-in" disk. + + Parameters + ---------- + h : H3Cell + k : int + Size of disk. + + Returns + ------- + unordered collection of H3Cell + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + h = _in_scalar(h) + mv = _cy.grid_disk(h, k) + + return _out_collection(mv) + + +def grid_ring(h, k=1): + """ + Return unordered collection of cells with grid distance ``== k`` from ``h``. + That is, the "hollow" ring. + + Parameters + ---------- + h : H3Cell + k : int + Size of ring. + + Returns + ------- + unordered collection of H3Cell + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + h = _in_scalar(h) + mv = _cy.grid_ring(h, k) + + return _out_collection(mv) + + +def cell_to_children_size(h, res=None): + """ + Number of children at resolution ``res`` of given cell. + + Parameters + ---------- + h : H3Cell + res : int or None, optional + The resolution for the children. + If ``None``, then ``res = resolution(h) + 1`` + + Returns + ------- + int + Count of children + """ + h = _in_scalar(h) + return _cy.cell_to_children_size(h, res) + + +def cell_to_children(h, res=None): + """ + Children of a cell as an unordered collection. + + Parameters + ---------- + h : H3Cell + res : int or None, optional + The resolution for the children. + If ``None``, then ``res = resolution(h) + 1`` + + Returns + ------- + unordered collection of H3Cell + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + h = _in_scalar(h) + mv = _cy.cell_to_children(h, res) + + return _out_collection(mv) + + +def cell_to_child_pos(child, res_parent): + """ + Child position index of given cell, with respect to its parent at ``res_parent``. + + The reverse operation can be done with ``child_pos_to_cell``. + + Parameters + ---------- + child : H3Cell + res_parent : int + + Returns + ------- + int + Integer index of the child with respect to parent cell. + """ + child = _in_scalar(child) + return _cy.cell_to_child_pos(child, res_parent) + + +def child_pos_to_cell(parent, res_child, child_pos): + """ + Get child H3 cell from a parent cell, child resolution, and child position index. + + The reverse operation can be done with ``cell_to_child_pos``. + + Parameters + ---------- + parent : H3Cell + res_child : int + Child cell resolution + child_pos : int + Integer position of child cell, releative to parent. + + + Returns + ------- + H3Cell + """ + parent = _in_scalar(parent) + child = _cy.child_pos_to_cell(parent, res_child, child_pos) + child = _out_scalar(child) + + return child + + +# todo: nogil for expensive C operation? +def compact_cells(cells): + """ + Compact a collection of H3 cells by combining + smaller cells into larger cells, if all child cells + are present. Input cells must all share the same resolution. + + Parameters + ---------- + cells : iterable of H3 Cells + + Returns + ------- + unordered collection of H3Cell + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + hu = _in_collection(cells) + hc = _cy.compact_cells(hu) + + return _out_collection(hc) + + +def uncompact_cells(cells, res): + """ + Reverse the ``compact_cells`` operation. + + Return a collection of H3 cells, all of resolution ``res``. + + Parameters + ---------- + cells : iterable of H3Cell + res : int + Resolution of desired output cells. + + Returns + ------- + unordered collection of H3Cell + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + # TODO: add test to make sure an error is returned when input contains cell + # smaller than output res. + + hc = _in_collection(cells) + hu = _cy.uncompact_cells(hc, res) + + return _out_collection(hu) + + +def polygon_to_cells(h3shape, res): + """ + Alias for ``h3shape_to_cells``. + """ + return h3shape_to_cells(h3shape, res) + + +def h3shape_to_cells(h3shape, res): + """ + Return the collection of H3 cells at a given resolution whose center points + are contained within an ``LatLngPoly`` or ``LatLngMultiPoly``. + + Parameters + ---------- + h3shape : ``H3Shape`` + res : int + Resolution of the output cells + + Returns + ------- + list of H3Cell + + Examples + -------- + + >>> poly = LatLngPoly( + ... [(37.68, -122.54), (37.68, -122.34), (37.82, -122.34), + ... (37.82, -122.54)], + ... ) + >>> h3.h3shape_to_cells(poly, 6) + ['862830807ffffff', + '862830827ffffff', + '86283082fffffff', + '862830877ffffff', + '862830947ffffff', + '862830957ffffff', + '86283095fffffff'] + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + + # todo: not sure if i want this dispatch logic here. maybe in the objects? + if isinstance(h3shape, LatLngPoly): + poly = h3shape + mv = _cy.polygon_to_cells(poly.outer, res=res, holes=poly.holes) + elif isinstance(h3shape, LatLngMultiPoly): + mpoly = h3shape + mv = _cy.polygons_to_cells(mpoly.polys, res=res) + elif isinstance(h3shape, H3Shape): + raise ValueError('Unrecognized H3Shape: ' + str(h3shape)) + else: + raise ValueError('Unrecognized type: ' + str(type(h3shape))) + + return _out_collection(mv) + + +def polygon_to_cells_experimental( + h3shape: H3Shape, + res: int, + contain: Literal['center', 'full', 'overlap', 'bbox_overlap'] = 'center', +): + """ + Alias for ``h3shape_to_cells_experimental``. + """ + return h3shape_to_cells_experimental(h3shape, res, contain) + + +def h3shape_to_cells_experimental( + h3shape: H3Shape, + res: int, + contain: Literal['center', 'full', 'overlap', 'bbox_overlap'] = 'center', +): + """ + Experimental function similar to ``h3shape_to_cells``, but with support for + multiple cell containment modes. + + Using ``contain='center'`` should give identical behavior as + ``h3shape_to_cells``. + + Note that this function is **experimental** and has no API stability gaurantees + across versions, so it may change in the future. + + + Parameters + ---------- + h3shape : ``H3Shape`` + res : int + Resolution of the output cells + contain : {'center', 'full', 'overlap', 'bbox_overlap'}, optional + Specifies the containment condition. + - 'center': Cell center is contained in shape + - 'full': Cell is fully contained in shape + - 'overlap': Cell is partially contained in shape + - 'bbox_overlap': Cell bounding box is partially contained in shape + + Default is 'center'. + + Returns + ------- + list of H3Cell + + Examples + -------- + + >>> poly = LatLngPoly( + ... [(37.68, -122.54), (37.68, -122.34), (37.82, -122.34), + ... (37.82, -122.54)], + ... ) + >>> h3.h3shape_to_cells_experimental(poly, 6, 'center') + ['862830807ffffff', + '862830827ffffff', + '86283082fffffff', + '862830877ffffff', + '862830947ffffff', + '862830957ffffff', + '86283095fffffff'] + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + + contain_modes = { + 'center': 0, + 'full': 1, + 'overlap': 2, + 'bbox_overlap': 3, + } + + flag = contain_modes[contain] + + # todo: not sure if i want this dispatch logic here. maybe in the objects? + if isinstance(h3shape, LatLngPoly): + poly = h3shape + mv = _cy.polygon_to_cells_experimental( + poly.outer, + res = res, + holes = poly.holes, + flag = flag, + ) + elif isinstance(h3shape, LatLngMultiPoly): + mpoly = h3shape + mv = _cy.polygons_to_cells_experimental(mpoly.polys, res=res, flag=flag) + elif isinstance(h3shape, H3Shape): + raise ValueError('Unrecognized H3Shape: ' + str(h3shape)) + else: + raise ValueError('Unrecognized type: ' + str(type(h3shape))) + + return _out_collection(mv) + + +def cells_to_h3shape(cells, *, tight=True): + """ + Return an ``H3Shape`` describing the area covered by a collection of H3 cells. + Will return ``LatLngPoly`` or ``LatLngMultiPoly``. + + Parameters + ---------- + cells : iterable of H3 cells + tight : bool + If True, return ``LatLngPoly`` if possible. + If False, always return ``LatLngMultiPoly``. + + Returns + ------- + LatLngPoly | LatLngMultiPoly + + Examples + -------- + + >>> cells = ['8428309ffffffff', '842830dffffffff'] + >>> h3.cells_to_h3shape(cells, tight=True) + + >>> h3.cells_to_h3shape(cells, tight=False) + + """ + cells = _in_collection(cells) + mpoly = _cy.cells_to_multi_polygon(cells) + + polys = [LatLngPoly(*poly) for poly in mpoly] + out = LatLngMultiPoly(*polys) + + if tight and len(out) == 1: + out = out[0] + + return out + + +def geo_to_cells(geo, res): + """Convert from ``__geo_interface__`` to cells. + + Parameters + ---------- + geo : an object implementing ``__geo_interface__`` or a dictionary in that format. + Both ``LatLngPoly`` and ``LatLngMultiPoly`` implement the interface. + res : int + Resolution of desired output cells. + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + h3shape = geo_to_h3shape(geo) + return h3shape_to_cells(h3shape, res) + + +def cells_to_geo(cells, tight=True): + """ + Convert from cells to a ``__geo_interface__`` dict. + + Parameters + ---------- + cells : iterable of H3 Cells + tight : bool + When ``True``, returns an ``LatLngPoly`` when possible. + When ``False``, always returns an ``LatLngMultiPoly``. + + Returns + ------- + dict + in `__geo_interface__` format + """ + h3shape = cells_to_h3shape(cells, tight=tight) + return h3shape_to_geo(h3shape) + + +def is_pentagon(h): + """ + Identify if an H3 cell is a pentagon. + + Parameters + ---------- + h : H3Index + + Returns + ------- + bool + ``True`` if input is a valid H3 cell which is a pentagon. + + Notes + ----- + A pentagon should *also* pass ``is_valid_cell()``. + Will return ``False`` for valid H3Edge. + """ + return _cy.is_pentagon(_in_scalar(h)) + + +def get_base_cell_number(h): + """ + Return the base cell *number* (``0`` to ``121``) of the given cell. + + The base cell *number* and the H3Index are two different representations + of the same cell: the parent cell of resolution ``0``. + + The base cell *number* is encoded within the corresponding + H3Index. + + todo: could work with edges + + Parameters + ---------- + h : H3Cell + + Returns + ------- + int + + Examples + -------- + >>> h = construct_cell(57, 2, 1, 4) + >>> h + '83728cfffffffff' + >> get_base_cell_number(h) + 57 + """ + return _cy.get_base_cell_number(_in_scalar(h)) + + +def get_index_digit(h, res): + """ + Get the index digit of a cell at the given resolution. + + Parameters + ---------- + h : H3Cell + Cell whose index digit will be returned. + res : int + Resolution (``>= 1``) at which to read the digit. + + Returns + ------- + int + The index digit at the requested resolution. + + Examples + -------- + >>> h = construct_cell(7, 2, 1, 4) + >>> h + '830e8cfffffffff' + >>> get_index_digit(h, 1) + 2 + >>> get_index_digit(h, 2) + 1 + >>> get_index_digit(h, 3) + 4 + """ + return _cy.get_index_digit(_in_scalar(h), res) + + +def construct_cell(base_cell_number, *digits, res=None): + """ + Construct cell from base cell and digits. + + Parameters + ---------- + base_cell_number : int + Base cell *number* (``0`` to ``121``). + *digits : int + Sequence of index digits (``0`` to ``6``). + Length of digits will be the resulting resolution of the output cell. + res : int, optional + Resolution of the constructed cell. If provided, it must equal + ``len(digits)``; otherwise it is inferred from the number of digits. + + Returns + ------- + H3Cell + The constructed cell. + + Examples + -------- + >>> construct_cell(7, 2, 1, 4) # resolution 3 cell + '830e8cfffffffff' + + >>> construct_cell(15, 0, 0, 5, 3) # resolution 4 cell + '841e057ffffffff' + + >>> construct_cell(15, 0, 0, 5, 3, res=4) + '841e057ffffffff' + """ + if (res is not None) and (len(digits) != res): + raise ValueError('Resolution must match number of digits.') + + digits = array('i', digits) + o = _cy.construct_cell(base_cell_number, digits) + return _out_scalar(o) + + +def deconstruct_cell(h): + """ + Deconstruct cell into base cell and digits. + + Parameters + ---------- + h : H3Cell + Cell to deconstruct. + + Returns + ------- + list of int + [base_cell_number, digit1, digit2, ..., digitN] + + Examples + -------- + >>> h = construct_cell(7, 2, 1, 4) # resolution 3 cell + >>> h + '830e8cfffffffff' + >>> deconstruct_cell(h) + (7, 2, 1, 4) + + >>> h = construct_cell(15, 0, 0, 5, 3) # resolution 4 cell + >>> h + '841e057ffffffff' + >>> deconstruct_cell(h) + (15, 0, 0, 5, 3) + >>> construct_cell(*deconstruct_cell(h), 0) == cell_to_center_child(h) + """ + res = get_resolution(h) + bc = get_base_cell_number(h) + digits = [get_index_digit(h, r + 1) for r in range(res)] + + return [bc, *digits] + + +def are_neighbor_cells(h1, h2): + """ + Returns ``True`` if ``h1`` and ``h2`` are neighboring cells. + + Parameters + ---------- + h1 : H3Cell + h2 : H3Cell + + Returns + ------- + bool + """ + h1 = _in_scalar(h1) + h2 = _in_scalar(h2) + + return _cy.are_neighbor_cells(h1, h2) + + +def cells_to_directed_edge(origin, destination): + """ + Create an H3 Index denoting a unidirectional edge. + + The edge is constructed from neighboring cells ``origin`` and + ``destination``. + + Parameters + ---------- + origin : H3Cell + destination : H3Cell + + Raises + ------ + ValueError + When cells are not adjacent. + + Returns + ------- + H3Edge + """ + o = _in_scalar(origin) + d = _in_scalar(destination) + e = _cy.cells_to_directed_edge(o, d) + e = _out_scalar(e) + + return e + + +def get_directed_edge_origin(e): + """ + Origin cell from an H3 directed edge. + + Parameters + ---------- + e : H3Edge + + Returns + ------- + H3Cell + """ + e = _in_scalar(e) + o = _cy.get_directed_edge_origin(e) + o = _out_scalar(o) + + return o + + +def get_directed_edge_destination(e): + """ + Destination cell from an H3 directed edge. + + Parameters + ---------- + e : H3Edge + + Returns + ------- + H3Cell + """ + e = _in_scalar(e) + d = _cy.get_directed_edge_destination(e) + d = _out_scalar(d) + + return d + + +def directed_edge_to_cells(e): + """ + Return (origin, destination) tuple from H3 directed edge + + Parameters + ---------- + e : H3Edge + + Returns + ------- + H3Cell + Origin cell of edge + H3Cell + Destination cell of edge + """ + e = _in_scalar(e) + o, d = _cy.directed_edge_to_cells(e) + o, d = _out_scalar(o), _out_scalar(d) + + return o, d + + +def origin_to_directed_edges(origin): + """ + Return all directed edges starting from ``origin`` cell. + + Parameters + ---------- + origin : H3Cell + + Returns + ------- + unordered collection of H3Edge + """ + mv = _cy.origin_to_directed_edges(_in_scalar(origin)) + + return _out_collection(mv) + + +def directed_edge_to_boundary(edge): + """ + Returns points representing the edge (line of points + describing the boundary between two cells). + + Parameters + ---------- + edge : H3Edge + + Returns + ------- + tuple of (lat, lng) tuples + """ + return _cy.directed_edge_to_boundary(_in_scalar(edge)) + + +def grid_path_cells(start, end): + """ + Returns the ordered collection of cells denoting a + minimum-length non-unique path between cells. + + Parameters + ---------- + start : H3Cell + end : H3Cell + + Returns + ------- + ordered collection of H3Cell + Starting with ``start``, and ending with ``end``. + """ + mv = _cy.grid_path_cells(_in_scalar(start), _in_scalar(end)) + + return _out_collection(mv) + + +def is_res_class_III(h): + """ + Determine if cell has orientation "Class II" or "Class III". + + The orientation of pentagons/hexagons on the icosahedron can be one + of two types: "Class II" or "Class III". + + All cells within a resolution have the same type, and the type + alternates between resolutions. + + "Class II" cells have resolutions: 0,2,4,6,8,10,12,14 + "Class III" cells have resolutions: 1,3,5,7,9,11,13,15 + + Parameters + ---------- + h : H3Cell + + Returns + ------- + bool + ``True`` if ``h`` is "Class III". + ``False`` if ``h`` is "Class II". + + References + ---------- + 1. https://uber.github.io/h3/#/documentation/core-library/coordinate-systems + """ + return _cy.is_res_class_iii(_in_scalar(h)) + + +def get_pentagons(res): + """ + Return all pentagons at a given resolution. + + Parameters + ---------- + res : int + Resolution of the pentagons + + Returns + ------- + unordered collection of H3Cell + """ + mv = _cy.get_pentagons(res) + + return _out_collection(mv) + + +def get_res0_cells(): + """ + Return all cells at resolution 0. + + Parameters + ---------- + None + + Returns + ------- + unordered collection of H3Cell + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + mv = _cy.get_res0_cells() + + return _out_collection(mv) + + +def cell_to_center_child(h, res=None): + """ + Get the center child of a cell at some finer resolution. + + Parameters + ---------- + h : H3Cell + res : int or None, optional + The resolution for the child cell + If ``None``, then ``res = resolution(h) + 1`` + + Returns + ------- + H3Cell + """ + h = _in_scalar(h) + p = _cy.cell_to_center_child(h, res) + p = _out_scalar(p) + + return p + + +def get_icosahedron_faces(h): + """ + Return icosahedron faces intersecting a given H3 cell. + + There are twenty possible faces, ranging from 0--19. + + Note: Every interface returns a Python ``set`` of ``int``. + + Parameters + ---------- + h : H3Cell + + Returns + ------- + Python ``set`` of ``int`` + """ + h = _in_scalar(h) + faces = _cy.get_icosahedron_faces(h) + + return faces + + +def cell_to_local_ij(origin, h): + """ + Return local (i,j) coordinates of cell ``h`` in relation to ``origin`` cell + + + Parameters + ---------- + origin : H3Cell + Origin/central cell for defining i,j coordinates. + h: H3Cell + Destination cell whose i,j coordinates we'd like, based off + of the origin cell. + + + Returns + ------- + Tuple (i, j) of integer local coordinates of cell ``h`` + + + Notes + ----- + + The ``origin`` cell does not define (0, 0) for the IJ coordinate space. + (0, 0) refers to the center of the base cell containing origin at the + resolution of `origin`. + Subtracting the IJ coordinates of ``origin`` from every cell would get + you the property of (0, 0) being the ``origin``. + + This is done so we don't need to keep recomputing the coordinates of + ``origin`` if not needed. + """ + origin = _in_scalar(origin) + h = _in_scalar(h) + + i, j = _cy.cell_to_local_ij(origin, h) + + return i, j + + +def local_ij_to_cell(origin, i, j): + """ + Return cell at local (i,j) position relative to the ``origin`` cell. + + Parameters + ---------- + origin : H3Cell + Origin/central cell for defining i,j coordinates. + i, j: int + Integer coordinates with respect to ``origin`` cell. + + + Returns + ------- + H3Cell at local (i,j) position relative to the ``origin`` cell + + + Notes + ----- + + The ``origin`` cell does not define (0, 0) for the IJ coordinate space. + (0, 0) refers to the center of the base cell containing origin at the + resolution of ``origin``. + Subtracting the IJ coordinates of ``origin`` from every cell would get + you the property of (0, 0) being the ``origin``. + + This is done so we don't need to keep recomputing the coordinates of + ``origin`` if not needed. + """ + origin = _in_scalar(origin) + + h = _cy.local_ij_to_cell(origin, i, j) + h = _out_scalar(h) + + return h + + +def cell_area(h, unit='km^2'): + """ + Compute the spherical surface area of a specific H3 cell. + + Parameters + ---------- + h : H3Cell + unit: str + Unit for area result (``'km^2'``, ``'m^2'``, or ``'rads^2'``) + + + Returns + ------- + The area of the H3 cell in the given units + + + Notes + ----- + This function breaks the cell into spherical triangles, and computes + their spherical area. + The function uses the spherical distance calculation given by + ``great_circle_distance()``. + """ + h = _in_scalar(h) + + return _cy.cell_area(h, unit=unit) + + +def edge_length(e, unit='km'): + """ + Compute the spherical length of a specific H3 edge. + + Parameters + ---------- + h : H3Cell + unit: str + Unit for length result (``'km'``, ``'m'``, or ``'rads'``) + + + Returns + ------- + The length of the edge in the given units + + + Notes + ----- + This function uses the spherical distance calculation given by + ``great_circle_distance()``. + """ + e = _in_scalar(e) + + return _cy.edge_length(e, unit=unit) + + +def great_circle_distance(latlng1, latlng2, unit='km'): + """ + Compute the spherical distance between two (lat, lng) points. + AKA: great circle distance or "haversine" distance. + + todo: overload to allow two cell inputs? + + Parameters + ---------- + latlng1 : tuple + (lat, lng) tuple in degrees + latlng2 : tuple + (lat, lng) tuple in degrees + unit: str + Unit for distance result (``'km'``, ``'m'``, or ``'rads'``) + + Returns + ------- + The spherical distance between the points in the given units + """ + lat1, lng1 = latlng1 + lat2, lng2 = latlng2 + return _cy.great_circle_distance( + lat1, lng1, + lat2, lng2, + unit = unit + ) + + +def cell_to_vertex(h, vertex_num): + """ + Return a (specified) vertex of an H3 cell. + + Parameters + ---------- + h : H3Cell + vertex_num : int + Vertex number (0-5) + + Returns + ------- + The vertex + """ + h = _in_scalar(h) + h = _cy.cell_to_vertex(h, vertex_num) + return _out_scalar(h) + + +def cell_to_vertexes(h): + """ + Return a list of vertexes of an H3 cell. + The list will be of length 5 for pentagons and 6 for hexagons. + + Parameters + ---------- + h : H3Cell + + Returns + ------- + A list of vertexes + """ + h = _in_scalar(h) + mv = _cy.cell_to_vertexes(h) + return _out_collection(mv) + + +def vertex_to_latlng(v): + """ + Return latitude and longitude of a vertex. + + Returns + ------- + lat : float + Latitude + lng : float + Longitude + """ + v = _in_scalar(v) + return _cy.vertex_to_latlng(v) + + +def is_valid_vertex(v): + """ + Validates an H3 vertex. + + Returns + ------- + bool + """ + try: + v = _in_scalar(v) + return _cy.is_valid_vertex(v) + except (ValueError, TypeError): + return False diff --git a/python/h3/api/memview_int/__pycache__/__init__.cpython-312.pyc b/python/h3/api/memview_int/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..88f21afe5 Binary files /dev/null and b/python/h3/api/memview_int/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/h3/api/memview_int/__pycache__/_convert.cpython-312.pyc b/python/h3/api/memview_int/__pycache__/_convert.cpython-312.pyc new file mode 100644 index 000000000..4bcdde7a6 Binary files /dev/null and b/python/h3/api/memview_int/__pycache__/_convert.cpython-312.pyc differ diff --git a/python/h3/api/memview_int/_convert.py b/python/h3/api/memview_int/_convert.py new file mode 100644 index 000000000..2f054fd23 --- /dev/null +++ b/python/h3/api/memview_int/_convert.py @@ -0,0 +1,8 @@ +def _id(x): + return x + + +_in_scalar = _id +_out_scalar = _id +_in_collection = _id +_out_collection = _id diff --git a/python/h3/api/numpy_int/__init__.py b/python/h3/api/numpy_int/__init__.py new file mode 100644 index 000000000..47f1c7eca --- /dev/null +++ b/python/h3/api/numpy_int/__init__.py @@ -0,0 +1,1369 @@ +# This file is **symlinked** across the APIs to ensure they are exactly the same. +from typing import Literal +from array import array + +from ... import _cy +from ..._h3shape import ( + H3Shape, + LatLngPoly, + LatLngMultiPoly, + geo_to_h3shape, + h3shape_to_geo, +) + +from ._convert import ( + _in_scalar, + _out_scalar, + _in_collection, + _out_collection, +) + + +def versions(): + """ + Version numbers for the Python (wrapper) and C (wrapped) libraries. + + Versions are output as strings of the form ``'X.Y.Z'``. + C and Python should match on ``X`` (major) and ``Y`` (minor), + but may differ on ``Z`` (patch). + + Returns + ------- + dict like ``{'c': 'X.Y.Z', 'python': 'A.B.C'}`` + """ + from ..._version import __version__ + + v = { + 'c': _cy.c_version(), + 'python': __version__, + } + + return v + + +def str_to_int(h): + """ + Converts a hexadecimal string to an H3 64-bit integer index. + + Parameters + ---------- + h : str + Hexadecimal string like ``'89754e64993ffff'`` + + Returns + ------- + int + Unsigned 64-bit integer + """ + return _cy.str_to_int(h) + + +def int_to_str(x): + """ + Converts an H3 64-bit integer index to a hexadecimal string. + + Parameters + ---------- + x : int + Unsigned 64-bit integer + + Returns + ------- + str + Hexadecimal string like ``'89754e64993ffff'`` + """ + return _cy.int_to_str(x) + + +def get_num_cells(res): + """ + Return the total number of *cells* (hexagons and pentagons) + for the given resolution. + + Returns + ------- + int + """ + return _cy.get_num_cells(res) + + +def average_hexagon_area(res, unit='km^2'): + """ + Return the average area of an H3 *hexagon* + for the given resolution. + + This average *excludes* pentagons. + + Parameters + ---------- + res : int + H3 resolution + unit: str + Unit for area result (``'km^2'``, ``'m^2'``, or ``'rads^2'``) + + Returns + ------- + float + """ + return _cy.average_hexagon_area(res, unit) + + +def average_hexagon_edge_length(res, unit='km'): + """ + Return the average *hexagon* edge length + for the given resolution. + + This average *excludes* pentagons. + + Parameters + ---------- + res : int + H3 resolution + unit: str + Unit for length result (``'km'``, ``'m'``, or ``'rads'``) + + Returns + ------- + float + """ + return _cy.average_hexagon_edge_length(res, unit) + + +def is_valid_index(h): + """Validates *any* H3 index (cell, vertex, or directed edge). + + Returns + ------- + bool + """ + try: + h = _in_scalar(h) + return _cy.is_valid_index(h) + except (ValueError, TypeError): + return False + + +def is_valid_cell(h): + """ + Validates an H3 cell (hexagon or pentagon). + + Returns + ------- + bool + """ + try: + h = _in_scalar(h) + return _cy.is_valid_cell(h) + except (ValueError, TypeError): + return False + + +def is_valid_directed_edge(edge): + """ + Validates an H3 unidirectional edge. + + Returns + ------- + bool + """ + try: + e = _in_scalar(edge) + return _cy.is_valid_directed_edge(e) + except (ValueError, TypeError): + return False + + +def latlng_to_cell(lat, lng, res): + """ + Return the cell containing the (lat, lng) point + for a given resolution. + + Returns + ------- + H3Cell + + """ + return _out_scalar(_cy.latlng_to_cell(lat, lng, res)) + + +def cell_to_latlng(h): + """ + Return the center point of an H3 cell as a lat/lng pair. + + Parameters + ---------- + h : H3Cell + + Returns + ------- + lat : float + Latitude + lng : float + Longitude + """ + h = _in_scalar(h) + return _cy.cell_to_latlng(h) + + +def get_resolution(h): + """ + Return the resolution of an H3 cell. + + Parameters + ---------- + h : H3Cell + + Returns + ------- + int + """ + # todo: could also work for edges + h = _in_scalar(h) + return _cy.get_resolution(h) + + +def cell_to_parent(h, res=None): + """ + Get the parent of a cell. + + Parameters + ---------- + h : H3Cell + res : int or None, optional + The resolution for the parent + If ``None``, then ``res = resolution(h) - 1`` + + Returns + ------- + H3Cell + """ + h = _in_scalar(h) + p = _cy.cell_to_parent(h, res) + p = _out_scalar(p) + + return p + + +def grid_distance(h1, h2): + """ + Compute the grid distance between two cells. + + The grid distance is defined as the length of the shortest + path between the cells in the graph formed by connecting + adjacent cells. + + This function will raise an exception if the + cells are too far apart to compute the distance. + + Parameters + ---------- + h1 : H3Cell + h2 : H3Cell + + Returns + ------- + int + """ + h1 = _in_scalar(h1) + h2 = _in_scalar(h2) + + d = _cy.grid_distance(h1, h2) + + return d + + +def cell_to_boundary(h): + """ + Return tuple of lat/lng pairs describing the cell boundary. + + Parameters + ---------- + h : H3Cell + + Returns + ------- + tuple of (lat, lng) tuples + """ + h = _in_scalar(h) + return _cy.cell_to_boundary(h) + + +def grid_disk(h, k=1): + """ + Return unordered collection of cells with grid distance ``<= k`` from ``h``. + That is, the "filled-in" disk. + + Parameters + ---------- + h : H3Cell + k : int + Size of disk. + + Returns + ------- + unordered collection of H3Cell + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + h = _in_scalar(h) + mv = _cy.grid_disk(h, k) + + return _out_collection(mv) + + +def grid_ring(h, k=1): + """ + Return unordered collection of cells with grid distance ``== k`` from ``h``. + That is, the "hollow" ring. + + Parameters + ---------- + h : H3Cell + k : int + Size of ring. + + Returns + ------- + unordered collection of H3Cell + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + h = _in_scalar(h) + mv = _cy.grid_ring(h, k) + + return _out_collection(mv) + + +def cell_to_children_size(h, res=None): + """ + Number of children at resolution ``res`` of given cell. + + Parameters + ---------- + h : H3Cell + res : int or None, optional + The resolution for the children. + If ``None``, then ``res = resolution(h) + 1`` + + Returns + ------- + int + Count of children + """ + h = _in_scalar(h) + return _cy.cell_to_children_size(h, res) + + +def cell_to_children(h, res=None): + """ + Children of a cell as an unordered collection. + + Parameters + ---------- + h : H3Cell + res : int or None, optional + The resolution for the children. + If ``None``, then ``res = resolution(h) + 1`` + + Returns + ------- + unordered collection of H3Cell + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + h = _in_scalar(h) + mv = _cy.cell_to_children(h, res) + + return _out_collection(mv) + + +def cell_to_child_pos(child, res_parent): + """ + Child position index of given cell, with respect to its parent at ``res_parent``. + + The reverse operation can be done with ``child_pos_to_cell``. + + Parameters + ---------- + child : H3Cell + res_parent : int + + Returns + ------- + int + Integer index of the child with respect to parent cell. + """ + child = _in_scalar(child) + return _cy.cell_to_child_pos(child, res_parent) + + +def child_pos_to_cell(parent, res_child, child_pos): + """ + Get child H3 cell from a parent cell, child resolution, and child position index. + + The reverse operation can be done with ``cell_to_child_pos``. + + Parameters + ---------- + parent : H3Cell + res_child : int + Child cell resolution + child_pos : int + Integer position of child cell, releative to parent. + + + Returns + ------- + H3Cell + """ + parent = _in_scalar(parent) + child = _cy.child_pos_to_cell(parent, res_child, child_pos) + child = _out_scalar(child) + + return child + + +# todo: nogil for expensive C operation? +def compact_cells(cells): + """ + Compact a collection of H3 cells by combining + smaller cells into larger cells, if all child cells + are present. Input cells must all share the same resolution. + + Parameters + ---------- + cells : iterable of H3 Cells + + Returns + ------- + unordered collection of H3Cell + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + hu = _in_collection(cells) + hc = _cy.compact_cells(hu) + + return _out_collection(hc) + + +def uncompact_cells(cells, res): + """ + Reverse the ``compact_cells`` operation. + + Return a collection of H3 cells, all of resolution ``res``. + + Parameters + ---------- + cells : iterable of H3Cell + res : int + Resolution of desired output cells. + + Returns + ------- + unordered collection of H3Cell + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + # TODO: add test to make sure an error is returned when input contains cell + # smaller than output res. + + hc = _in_collection(cells) + hu = _cy.uncompact_cells(hc, res) + + return _out_collection(hu) + + +def polygon_to_cells(h3shape, res): + """ + Alias for ``h3shape_to_cells``. + """ + return h3shape_to_cells(h3shape, res) + + +def h3shape_to_cells(h3shape, res): + """ + Return the collection of H3 cells at a given resolution whose center points + are contained within an ``LatLngPoly`` or ``LatLngMultiPoly``. + + Parameters + ---------- + h3shape : ``H3Shape`` + res : int + Resolution of the output cells + + Returns + ------- + list of H3Cell + + Examples + -------- + + >>> poly = LatLngPoly( + ... [(37.68, -122.54), (37.68, -122.34), (37.82, -122.34), + ... (37.82, -122.54)], + ... ) + >>> h3.h3shape_to_cells(poly, 6) + ['862830807ffffff', + '862830827ffffff', + '86283082fffffff', + '862830877ffffff', + '862830947ffffff', + '862830957ffffff', + '86283095fffffff'] + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + + # todo: not sure if i want this dispatch logic here. maybe in the objects? + if isinstance(h3shape, LatLngPoly): + poly = h3shape + mv = _cy.polygon_to_cells(poly.outer, res=res, holes=poly.holes) + elif isinstance(h3shape, LatLngMultiPoly): + mpoly = h3shape + mv = _cy.polygons_to_cells(mpoly.polys, res=res) + elif isinstance(h3shape, H3Shape): + raise ValueError('Unrecognized H3Shape: ' + str(h3shape)) + else: + raise ValueError('Unrecognized type: ' + str(type(h3shape))) + + return _out_collection(mv) + + +def polygon_to_cells_experimental( + h3shape: H3Shape, + res: int, + contain: Literal['center', 'full', 'overlap', 'bbox_overlap'] = 'center', +): + """ + Alias for ``h3shape_to_cells_experimental``. + """ + return h3shape_to_cells_experimental(h3shape, res, contain) + + +def h3shape_to_cells_experimental( + h3shape: H3Shape, + res: int, + contain: Literal['center', 'full', 'overlap', 'bbox_overlap'] = 'center', +): + """ + Experimental function similar to ``h3shape_to_cells``, but with support for + multiple cell containment modes. + + Using ``contain='center'`` should give identical behavior as + ``h3shape_to_cells``. + + Note that this function is **experimental** and has no API stability gaurantees + across versions, so it may change in the future. + + + Parameters + ---------- + h3shape : ``H3Shape`` + res : int + Resolution of the output cells + contain : {'center', 'full', 'overlap', 'bbox_overlap'}, optional + Specifies the containment condition. + - 'center': Cell center is contained in shape + - 'full': Cell is fully contained in shape + - 'overlap': Cell is partially contained in shape + - 'bbox_overlap': Cell bounding box is partially contained in shape + + Default is 'center'. + + Returns + ------- + list of H3Cell + + Examples + -------- + + >>> poly = LatLngPoly( + ... [(37.68, -122.54), (37.68, -122.34), (37.82, -122.34), + ... (37.82, -122.54)], + ... ) + >>> h3.h3shape_to_cells_experimental(poly, 6, 'center') + ['862830807ffffff', + '862830827ffffff', + '86283082fffffff', + '862830877ffffff', + '862830947ffffff', + '862830957ffffff', + '86283095fffffff'] + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + + contain_modes = { + 'center': 0, + 'full': 1, + 'overlap': 2, + 'bbox_overlap': 3, + } + + flag = contain_modes[contain] + + # todo: not sure if i want this dispatch logic here. maybe in the objects? + if isinstance(h3shape, LatLngPoly): + poly = h3shape + mv = _cy.polygon_to_cells_experimental( + poly.outer, + res = res, + holes = poly.holes, + flag = flag, + ) + elif isinstance(h3shape, LatLngMultiPoly): + mpoly = h3shape + mv = _cy.polygons_to_cells_experimental(mpoly.polys, res=res, flag=flag) + elif isinstance(h3shape, H3Shape): + raise ValueError('Unrecognized H3Shape: ' + str(h3shape)) + else: + raise ValueError('Unrecognized type: ' + str(type(h3shape))) + + return _out_collection(mv) + + +def cells_to_h3shape(cells, *, tight=True): + """ + Return an ``H3Shape`` describing the area covered by a collection of H3 cells. + Will return ``LatLngPoly`` or ``LatLngMultiPoly``. + + Parameters + ---------- + cells : iterable of H3 cells + tight : bool + If True, return ``LatLngPoly`` if possible. + If False, always return ``LatLngMultiPoly``. + + Returns + ------- + LatLngPoly | LatLngMultiPoly + + Examples + -------- + + >>> cells = ['8428309ffffffff', '842830dffffffff'] + >>> h3.cells_to_h3shape(cells, tight=True) + + >>> h3.cells_to_h3shape(cells, tight=False) + + """ + cells = _in_collection(cells) + mpoly = _cy.cells_to_multi_polygon(cells) + + polys = [LatLngPoly(*poly) for poly in mpoly] + out = LatLngMultiPoly(*polys) + + if tight and len(out) == 1: + out = out[0] + + return out + + +def geo_to_cells(geo, res): + """Convert from ``__geo_interface__`` to cells. + + Parameters + ---------- + geo : an object implementing ``__geo_interface__`` or a dictionary in that format. + Both ``LatLngPoly`` and ``LatLngMultiPoly`` implement the interface. + res : int + Resolution of desired output cells. + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + h3shape = geo_to_h3shape(geo) + return h3shape_to_cells(h3shape, res) + + +def cells_to_geo(cells, tight=True): + """ + Convert from cells to a ``__geo_interface__`` dict. + + Parameters + ---------- + cells : iterable of H3 Cells + tight : bool + When ``True``, returns an ``LatLngPoly`` when possible. + When ``False``, always returns an ``LatLngMultiPoly``. + + Returns + ------- + dict + in `__geo_interface__` format + """ + h3shape = cells_to_h3shape(cells, tight=tight) + return h3shape_to_geo(h3shape) + + +def is_pentagon(h): + """ + Identify if an H3 cell is a pentagon. + + Parameters + ---------- + h : H3Index + + Returns + ------- + bool + ``True`` if input is a valid H3 cell which is a pentagon. + + Notes + ----- + A pentagon should *also* pass ``is_valid_cell()``. + Will return ``False`` for valid H3Edge. + """ + return _cy.is_pentagon(_in_scalar(h)) + + +def get_base_cell_number(h): + """ + Return the base cell *number* (``0`` to ``121``) of the given cell. + + The base cell *number* and the H3Index are two different representations + of the same cell: the parent cell of resolution ``0``. + + The base cell *number* is encoded within the corresponding + H3Index. + + todo: could work with edges + + Parameters + ---------- + h : H3Cell + + Returns + ------- + int + + Examples + -------- + >>> h = construct_cell(57, 2, 1, 4) + >>> h + '83728cfffffffff' + >> get_base_cell_number(h) + 57 + """ + return _cy.get_base_cell_number(_in_scalar(h)) + + +def get_index_digit(h, res): + """ + Get the index digit of a cell at the given resolution. + + Parameters + ---------- + h : H3Cell + Cell whose index digit will be returned. + res : int + Resolution (``>= 1``) at which to read the digit. + + Returns + ------- + int + The index digit at the requested resolution. + + Examples + -------- + >>> h = construct_cell(7, 2, 1, 4) + >>> h + '830e8cfffffffff' + >>> get_index_digit(h, 1) + 2 + >>> get_index_digit(h, 2) + 1 + >>> get_index_digit(h, 3) + 4 + """ + return _cy.get_index_digit(_in_scalar(h), res) + + +def construct_cell(base_cell_number, *digits, res=None): + """ + Construct cell from base cell and digits. + + Parameters + ---------- + base_cell_number : int + Base cell *number* (``0`` to ``121``). + *digits : int + Sequence of index digits (``0`` to ``6``). + Length of digits will be the resulting resolution of the output cell. + res : int, optional + Resolution of the constructed cell. If provided, it must equal + ``len(digits)``; otherwise it is inferred from the number of digits. + + Returns + ------- + H3Cell + The constructed cell. + + Examples + -------- + >>> construct_cell(7, 2, 1, 4) # resolution 3 cell + '830e8cfffffffff' + + >>> construct_cell(15, 0, 0, 5, 3) # resolution 4 cell + '841e057ffffffff' + + >>> construct_cell(15, 0, 0, 5, 3, res=4) + '841e057ffffffff' + """ + if (res is not None) and (len(digits) != res): + raise ValueError('Resolution must match number of digits.') + + digits = array('i', digits) + o = _cy.construct_cell(base_cell_number, digits) + return _out_scalar(o) + + +def deconstruct_cell(h): + """ + Deconstruct cell into base cell and digits. + + Parameters + ---------- + h : H3Cell + Cell to deconstruct. + + Returns + ------- + list of int + [base_cell_number, digit1, digit2, ..., digitN] + + Examples + -------- + >>> h = construct_cell(7, 2, 1, 4) # resolution 3 cell + >>> h + '830e8cfffffffff' + >>> deconstruct_cell(h) + (7, 2, 1, 4) + + >>> h = construct_cell(15, 0, 0, 5, 3) # resolution 4 cell + >>> h + '841e057ffffffff' + >>> deconstruct_cell(h) + (15, 0, 0, 5, 3) + >>> construct_cell(*deconstruct_cell(h), 0) == cell_to_center_child(h) + """ + res = get_resolution(h) + bc = get_base_cell_number(h) + digits = [get_index_digit(h, r + 1) for r in range(res)] + + return [bc, *digits] + + +def are_neighbor_cells(h1, h2): + """ + Returns ``True`` if ``h1`` and ``h2`` are neighboring cells. + + Parameters + ---------- + h1 : H3Cell + h2 : H3Cell + + Returns + ------- + bool + """ + h1 = _in_scalar(h1) + h2 = _in_scalar(h2) + + return _cy.are_neighbor_cells(h1, h2) + + +def cells_to_directed_edge(origin, destination): + """ + Create an H3 Index denoting a unidirectional edge. + + The edge is constructed from neighboring cells ``origin`` and + ``destination``. + + Parameters + ---------- + origin : H3Cell + destination : H3Cell + + Raises + ------ + ValueError + When cells are not adjacent. + + Returns + ------- + H3Edge + """ + o = _in_scalar(origin) + d = _in_scalar(destination) + e = _cy.cells_to_directed_edge(o, d) + e = _out_scalar(e) + + return e + + +def get_directed_edge_origin(e): + """ + Origin cell from an H3 directed edge. + + Parameters + ---------- + e : H3Edge + + Returns + ------- + H3Cell + """ + e = _in_scalar(e) + o = _cy.get_directed_edge_origin(e) + o = _out_scalar(o) + + return o + + +def get_directed_edge_destination(e): + """ + Destination cell from an H3 directed edge. + + Parameters + ---------- + e : H3Edge + + Returns + ------- + H3Cell + """ + e = _in_scalar(e) + d = _cy.get_directed_edge_destination(e) + d = _out_scalar(d) + + return d + + +def directed_edge_to_cells(e): + """ + Return (origin, destination) tuple from H3 directed edge + + Parameters + ---------- + e : H3Edge + + Returns + ------- + H3Cell + Origin cell of edge + H3Cell + Destination cell of edge + """ + e = _in_scalar(e) + o, d = _cy.directed_edge_to_cells(e) + o, d = _out_scalar(o), _out_scalar(d) + + return o, d + + +def origin_to_directed_edges(origin): + """ + Return all directed edges starting from ``origin`` cell. + + Parameters + ---------- + origin : H3Cell + + Returns + ------- + unordered collection of H3Edge + """ + mv = _cy.origin_to_directed_edges(_in_scalar(origin)) + + return _out_collection(mv) + + +def directed_edge_to_boundary(edge): + """ + Returns points representing the edge (line of points + describing the boundary between two cells). + + Parameters + ---------- + edge : H3Edge + + Returns + ------- + tuple of (lat, lng) tuples + """ + return _cy.directed_edge_to_boundary(_in_scalar(edge)) + + +def grid_path_cells(start, end): + """ + Returns the ordered collection of cells denoting a + minimum-length non-unique path between cells. + + Parameters + ---------- + start : H3Cell + end : H3Cell + + Returns + ------- + ordered collection of H3Cell + Starting with ``start``, and ending with ``end``. + """ + mv = _cy.grid_path_cells(_in_scalar(start), _in_scalar(end)) + + return _out_collection(mv) + + +def is_res_class_III(h): + """ + Determine if cell has orientation "Class II" or "Class III". + + The orientation of pentagons/hexagons on the icosahedron can be one + of two types: "Class II" or "Class III". + + All cells within a resolution have the same type, and the type + alternates between resolutions. + + "Class II" cells have resolutions: 0,2,4,6,8,10,12,14 + "Class III" cells have resolutions: 1,3,5,7,9,11,13,15 + + Parameters + ---------- + h : H3Cell + + Returns + ------- + bool + ``True`` if ``h`` is "Class III". + ``False`` if ``h`` is "Class II". + + References + ---------- + 1. https://uber.github.io/h3/#/documentation/core-library/coordinate-systems + """ + return _cy.is_res_class_iii(_in_scalar(h)) + + +def get_pentagons(res): + """ + Return all pentagons at a given resolution. + + Parameters + ---------- + res : int + Resolution of the pentagons + + Returns + ------- + unordered collection of H3Cell + """ + mv = _cy.get_pentagons(res) + + return _out_collection(mv) + + +def get_res0_cells(): + """ + Return all cells at resolution 0. + + Parameters + ---------- + None + + Returns + ------- + unordered collection of H3Cell + + Notes + ----- + There is currently no guaranteed order of the output cells. + """ + mv = _cy.get_res0_cells() + + return _out_collection(mv) + + +def cell_to_center_child(h, res=None): + """ + Get the center child of a cell at some finer resolution. + + Parameters + ---------- + h : H3Cell + res : int or None, optional + The resolution for the child cell + If ``None``, then ``res = resolution(h) + 1`` + + Returns + ------- + H3Cell + """ + h = _in_scalar(h) + p = _cy.cell_to_center_child(h, res) + p = _out_scalar(p) + + return p + + +def get_icosahedron_faces(h): + """ + Return icosahedron faces intersecting a given H3 cell. + + There are twenty possible faces, ranging from 0--19. + + Note: Every interface returns a Python ``set`` of ``int``. + + Parameters + ---------- + h : H3Cell + + Returns + ------- + Python ``set`` of ``int`` + """ + h = _in_scalar(h) + faces = _cy.get_icosahedron_faces(h) + + return faces + + +def cell_to_local_ij(origin, h): + """ + Return local (i,j) coordinates of cell ``h`` in relation to ``origin`` cell + + + Parameters + ---------- + origin : H3Cell + Origin/central cell for defining i,j coordinates. + h: H3Cell + Destination cell whose i,j coordinates we'd like, based off + of the origin cell. + + + Returns + ------- + Tuple (i, j) of integer local coordinates of cell ``h`` + + + Notes + ----- + + The ``origin`` cell does not define (0, 0) for the IJ coordinate space. + (0, 0) refers to the center of the base cell containing origin at the + resolution of `origin`. + Subtracting the IJ coordinates of ``origin`` from every cell would get + you the property of (0, 0) being the ``origin``. + + This is done so we don't need to keep recomputing the coordinates of + ``origin`` if not needed. + """ + origin = _in_scalar(origin) + h = _in_scalar(h) + + i, j = _cy.cell_to_local_ij(origin, h) + + return i, j + + +def local_ij_to_cell(origin, i, j): + """ + Return cell at local (i,j) position relative to the ``origin`` cell. + + Parameters + ---------- + origin : H3Cell + Origin/central cell for defining i,j coordinates. + i, j: int + Integer coordinates with respect to ``origin`` cell. + + + Returns + ------- + H3Cell at local (i,j) position relative to the ``origin`` cell + + + Notes + ----- + + The ``origin`` cell does not define (0, 0) for the IJ coordinate space. + (0, 0) refers to the center of the base cell containing origin at the + resolution of ``origin``. + Subtracting the IJ coordinates of ``origin`` from every cell would get + you the property of (0, 0) being the ``origin``. + + This is done so we don't need to keep recomputing the coordinates of + ``origin`` if not needed. + """ + origin = _in_scalar(origin) + + h = _cy.local_ij_to_cell(origin, i, j) + h = _out_scalar(h) + + return h + + +def cell_area(h, unit='km^2'): + """ + Compute the spherical surface area of a specific H3 cell. + + Parameters + ---------- + h : H3Cell + unit: str + Unit for area result (``'km^2'``, ``'m^2'``, or ``'rads^2'``) + + + Returns + ------- + The area of the H3 cell in the given units + + + Notes + ----- + This function breaks the cell into spherical triangles, and computes + their spherical area. + The function uses the spherical distance calculation given by + ``great_circle_distance()``. + """ + h = _in_scalar(h) + + return _cy.cell_area(h, unit=unit) + + +def edge_length(e, unit='km'): + """ + Compute the spherical length of a specific H3 edge. + + Parameters + ---------- + h : H3Cell + unit: str + Unit for length result (``'km'``, ``'m'``, or ``'rads'``) + + + Returns + ------- + The length of the edge in the given units + + + Notes + ----- + This function uses the spherical distance calculation given by + ``great_circle_distance()``. + """ + e = _in_scalar(e) + + return _cy.edge_length(e, unit=unit) + + +def great_circle_distance(latlng1, latlng2, unit='km'): + """ + Compute the spherical distance between two (lat, lng) points. + AKA: great circle distance or "haversine" distance. + + todo: overload to allow two cell inputs? + + Parameters + ---------- + latlng1 : tuple + (lat, lng) tuple in degrees + latlng2 : tuple + (lat, lng) tuple in degrees + unit: str + Unit for distance result (``'km'``, ``'m'``, or ``'rads'``) + + Returns + ------- + The spherical distance between the points in the given units + """ + lat1, lng1 = latlng1 + lat2, lng2 = latlng2 + return _cy.great_circle_distance( + lat1, lng1, + lat2, lng2, + unit = unit + ) + + +def cell_to_vertex(h, vertex_num): + """ + Return a (specified) vertex of an H3 cell. + + Parameters + ---------- + h : H3Cell + vertex_num : int + Vertex number (0-5) + + Returns + ------- + The vertex + """ + h = _in_scalar(h) + h = _cy.cell_to_vertex(h, vertex_num) + return _out_scalar(h) + + +def cell_to_vertexes(h): + """ + Return a list of vertexes of an H3 cell. + The list will be of length 5 for pentagons and 6 for hexagons. + + Parameters + ---------- + h : H3Cell + + Returns + ------- + A list of vertexes + """ + h = _in_scalar(h) + mv = _cy.cell_to_vertexes(h) + return _out_collection(mv) + + +def vertex_to_latlng(v): + """ + Return latitude and longitude of a vertex. + + Returns + ------- + lat : float + Latitude + lng : float + Longitude + """ + v = _in_scalar(v) + return _cy.vertex_to_latlng(v) + + +def is_valid_vertex(v): + """ + Validates an H3 vertex. + + Returns + ------- + bool + """ + try: + v = _in_scalar(v) + return _cy.is_valid_vertex(v) + except (ValueError, TypeError): + return False diff --git a/python/h3/api/numpy_int/__pycache__/__init__.cpython-312.pyc b/python/h3/api/numpy_int/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..09e99de2e Binary files /dev/null and b/python/h3/api/numpy_int/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/h3/api/numpy_int/__pycache__/_convert.cpython-312.pyc b/python/h3/api/numpy_int/__pycache__/_convert.cpython-312.pyc new file mode 100644 index 000000000..1f92eb3a6 Binary files /dev/null and b/python/h3/api/numpy_int/__pycache__/_convert.cpython-312.pyc differ diff --git a/python/h3/api/numpy_int/_convert.py b/python/h3/api/numpy_int/_convert.py new file mode 100644 index 000000000..6b293cc9c --- /dev/null +++ b/python/h3/api/numpy_int/_convert.py @@ -0,0 +1,15 @@ +def _in_scalar(x): + return x + + +_out_scalar = _in_scalar + + +def _in_collection(x): + import numpy as np + # array is copied only if dtype does not match + # `list`s should work, but not `set`s of integers + return np.asarray(x, dtype='uint64') + + +_out_collection = _in_collection diff --git a/python/include/h3/h3api.h b/python/include/h3/h3api.h new file mode 100644 index 000000000..dc2505b04 --- /dev/null +++ b/python/include/h3/h3api.h @@ -0,0 +1,851 @@ +/* + * Copyright 2016-2021 Uber Technologies, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** @file h3api.h + * @brief Primary H3 core library entry points. + * + * This file defines the public API of the H3 library. Incompatible changes to + * these functions require the library's major version be increased. + */ + +#ifndef H3API_H +#define H3API_H + +/* + * Preprocessor code to support renaming (prefixing) the public API. + * All public functions should be wrapped in H3_EXPORT so they can be + * renamed. + */ +#ifdef H3_PREFIX +#define XTJOIN(a, b) a##b +#define TJOIN(a, b) XTJOIN(a, b) + +/* export joins the user provided prefix with our exported function name */ +#define H3_EXPORT(name) TJOIN(H3_PREFIX, name) +#else +#define H3_EXPORT(name) name +#endif + +/* Windows DLL requires attributes indicating what to export */ +#if _WIN32 && BUILD_SHARED_LIBS +#if BUILDING_H3 +#define DECLSPEC __declspec(dllexport) +#else +#define DECLSPEC __declspec(dllimport) +#endif +#else +#define DECLSPEC +#endif + +/* For uint64_t */ +#include +/* For size_t */ +#include + +/* + * H3 is compiled as C, not C++ code. `extern "C"` is needed for C++ code + * to be able to use the library. + */ +#ifdef __cplusplus +extern "C" { +#endif + +/** @brief Identifier for an object (cell, edge, etc) in the H3 system. + * + * The H3Index fits within a 64-bit unsigned integer. + */ +typedef uint64_t H3Index; + +/** + * Invalid index used to indicate an error from latLngToCell and related + * functions or missing data in arrays of H3 indices. Analogous to NaN in + * floating point. + */ +#define H3_NULL 0 + +/** @brief Result code (success or specific error) from an H3 operation */ +typedef uint32_t H3Error; + +typedef enum { + E_SUCCESS = 0, // Success (no error) + E_FAILED = 1, // The operation failed but a more specific error is not + // available + E_DOMAIN = 2, // Argument was outside the acceptable range (when a more + // specific error code is not available) + E_LATLNG_DOMAIN = 3, // Latitude or longitude arguments were outside the + // acceptable range + E_RES_DOMAIN = 4, // Resolution argument was outside the acceptable range + E_CELL_INVALID = 5, // `H3Index` cell argument was not valid + E_DIR_EDGE_INVALID = 6, // `H3Index` directed edge argument was not valid + E_UNDIR_EDGE_INVALID = + 7, // `H3Index` undirected edge argument was not valid + E_VERTEX_INVALID = 8, // `H3Index` vertex argument was not valid + E_PENTAGON = 9, // Pentagon distortion was encountered which the algorithm + // could not handle + E_DUPLICATE_INPUT = 10, // Duplicate input was encountered in the arguments + // and the algorithm could not handle it + E_NOT_NEIGHBORS = 11, // `H3Index` cell arguments were not neighbors + E_RES_MISMATCH = + 12, // `H3Index` cell arguments had incompatible resolutions + E_MEMORY_ALLOC = 13, // Necessary memory allocation failed + E_MEMORY_BOUNDS = 14, // Bounds of provided memory were not large enough + + E_OPTION_INVALID = 15, // Mode or flags argument was not valid + E_INDEX_INVALID = 16, // `H3Index` argument was not valid + E_BASE_CELL_DOMAIN = + 17, // Base cell number was outside of acceptable range + E_DIGIT_DOMAIN = 18, // Child digits invalid + E_DELETED_DIGIT = 19, // Deleted subsequence indicates invalid index + + // Sentinel value; not a real error. One past the last valid code. + H3_ERROR_END +} H3ErrorCodes; + +/** @defgroup describeH3Error describeH3Error + * Functions for describeH3Error + * @{ + */ +/** @brief converts the provided H3Error value into a description string */ +DECLSPEC const char *H3_EXPORT(describeH3Error)(H3Error err); +/** @} */ + +/* library version numbers generated from VERSION file */ +// clang-format off +#define H3_VERSION_MAJOR 4 +#define H3_VERSION_MINOR 4 +#define H3_VERSION_PATCH 1 +// clang-format on + +/** Maximum number of cell boundary vertices; worst case is pentagon: + * 5 original verts + 5 edge crossings + */ +#define MAX_CELL_BNDRY_VERTS 10 + +/** @struct LatLng + @brief latitude/longitude in radians +*/ +typedef struct { + double lat; ///< latitude in radians + double lng; ///< longitude in radians +} LatLng; + +/** @struct CellBoundary + @brief cell boundary in latitude/longitude +*/ +typedef struct { + int numVerts; ///< number of vertices + LatLng verts[MAX_CELL_BNDRY_VERTS]; ///< vertices in ccw order +} CellBoundary; + +/** @struct GeoLoop + * @brief similar to CellBoundary, but requires more alloc work + */ +typedef struct { + int numVerts; + LatLng *verts; +} GeoLoop; + +/** @struct GeoPolygon + * @brief Simplified core of GeoJSON Polygon coordinates definition + */ +typedef struct { + GeoLoop geoloop; ///< exterior boundary of the polygon + int numHoles; ///< number of elements in the array pointed to by holes + GeoLoop *holes; ///< interior boundaries (holes) in the polygon +} GeoPolygon; + +/** @struct GeoMultiPolygon + * @brief Simplified core of GeoJSON MultiPolygon coordinates definition + */ +typedef struct { + int numPolygons; + GeoPolygon *polygons; +} GeoMultiPolygon; + +/** + * Values representing polyfill containment modes, to be used in + * the `flags` bit field for `polygonToCellsExperimental`. + */ +typedef enum { + CONTAINMENT_CENTER = 0, ///< Cell center is contained in the shape + CONTAINMENT_FULL = 1, ///< Cell is fully contained in the shape + CONTAINMENT_OVERLAPPING = 2, ///< Cell overlaps the shape at any point + CONTAINMENT_OVERLAPPING_BBOX = 3, ///< Cell bounding box overlaps shape + CONTAINMENT_INVALID = 4 ///< This mode is invalid and should not be used +} ContainmentMode; + +/** @struct LinkedLatLng + * @brief A coordinate node in a linked geo structure, part of a linked list + */ +typedef struct LinkedLatLng LinkedLatLng; +struct LinkedLatLng { + LatLng vertex; + LinkedLatLng *next; +}; + +/** @struct LinkedGeoLoop + * @brief A loop node in a linked geo structure, part of a linked list + */ +typedef struct LinkedGeoLoop LinkedGeoLoop; +struct LinkedGeoLoop { + LinkedLatLng *first; + LinkedLatLng *last; + LinkedGeoLoop *next; +}; + +/** @struct LinkedGeoPolygon + * @brief A polygon node in a linked geo structure, part of a linked list. + */ +typedef struct LinkedGeoPolygon LinkedGeoPolygon; +struct LinkedGeoPolygon { + LinkedGeoLoop *first; + LinkedGeoLoop *last; + LinkedGeoPolygon *next; +}; + +/** @struct CoordIJ + * @brief IJ hexagon coordinates + * + * Each axis is spaced 120 degrees apart. + */ +typedef struct { + int i; ///< i component + int j; ///< j component +} CoordIJ; + +/** @defgroup latLngToCell latLngToCell + * Functions for latLngToCell + * @{ + */ +/** @brief find the H3 index of the resolution res cell containing the lat/lng + */ +DECLSPEC H3Error H3_EXPORT(latLngToCell)(const LatLng *g, int res, + H3Index *out); +/** @} */ + +/** @defgroup cellToLatLng cellToLatLng + * Functions for cellToLatLng + * @{ + */ +/** @brief find the lat/lng center point g of the cell h3 */ +DECLSPEC H3Error H3_EXPORT(cellToLatLng)(H3Index h3, LatLng *g); +/** @} */ + +/** @defgroup cellToBoundary cellToBoundary + * Functions for cellToBoundary + * @{ + */ +/** @brief give the cell boundary in lat/lng coordinates for the cell h3 */ +DECLSPEC H3Error H3_EXPORT(cellToBoundary)(H3Index h3, CellBoundary *gp); +/** @} */ + +/** @defgroup gridDisk gridDisk + * Functions for gridDisk + * @{ + */ +/** @brief maximum number of hexagons in k-ring */ +DECLSPEC H3Error H3_EXPORT(maxGridDiskSize)(int k, int64_t *out); + +/** @brief hexagons neighbors in all directions, assuming no pentagons */ +DECLSPEC H3Error H3_EXPORT(gridDiskUnsafe)(H3Index origin, int k, H3Index *out); +/** @} */ + +/** @brief hexagons neighbors in all directions, assuming no pentagons, + * reporting distance from origin */ +DECLSPEC H3Error H3_EXPORT(gridDiskDistancesUnsafe)(H3Index origin, int k, + H3Index *out, + int *distances); + +/** @brief hexagons neighbors in all directions reporting distance from origin + */ +DECLSPEC H3Error H3_EXPORT(gridDiskDistancesSafe)(H3Index origin, int k, + H3Index *out, int *distances); + +/** @brief collection of hex rings sorted by ring for all given hexagons */ +DECLSPEC H3Error H3_EXPORT(gridDisksUnsafe)(H3Index *h3Set, int length, int k, + H3Index *out); + +/** @brief hexagon neighbors in all directions */ +DECLSPEC H3Error H3_EXPORT(gridDisk)(H3Index origin, int k, H3Index *out); +/** @} */ + +/** @defgroup gridDiskDistances gridDiskDistances + * Functions for gridDiskDistances + * @{ + */ +/** @brief hexagon neighbors in all directions, reporting distance from origin + */ +DECLSPEC H3Error H3_EXPORT(gridDiskDistances)(H3Index origin, int k, + H3Index *out, int *distances); +/** @} */ + +/** @defgroup gridRing gridRing + * Functions for gridRing + * @{ + */ +/** @brief maximum number of hexagons in hollow k-ring */ +DECLSPEC H3Error H3_EXPORT(maxGridRingSize)(int k, int64_t *out); + +/** @brief hollow hexagon ring k distance from origin */ +DECLSPEC H3Error H3_EXPORT(gridRingUnsafe)(H3Index origin, int k, H3Index *out); + +/** @brief hollow hexagon ring k distance from origin */ +DECLSPEC H3Error H3_EXPORT(gridRing)(H3Index origin, int k, H3Index *out); +/** @} */ + +/** @defgroup polygonToCells polygonToCells + * Functions for polygonToCells + * @{ + */ +/** @brief maximum number of cells that could be in the polygon */ +DECLSPEC H3Error H3_EXPORT(maxPolygonToCellsSize)(const GeoPolygon *geoPolygon, + int res, uint32_t flags, + int64_t *out); + +/** @brief cells within the given polygon */ +DECLSPEC H3Error H3_EXPORT(polygonToCells)(const GeoPolygon *geoPolygon, + int res, uint32_t flags, + H3Index *out); +/** @} */ + +/** @defgroup polygonToCellsExperimental polygonToCellsExperimental + * Functions for polygonToCellsExperimental. + * This is an experimental-only API and is subject to change in minor versions. + * @{ + */ +/** @brief maximum number of cells that could be in the polygon */ +DECLSPEC H3Error H3_EXPORT(maxPolygonToCellsSizeExperimental)( + const GeoPolygon *polygon, int res, uint32_t flags, int64_t *out); + +/** @brief cells within the given polygon */ +DECLSPEC H3Error H3_EXPORT(polygonToCellsExperimental)( + const GeoPolygon *polygon, int res, uint32_t flags, int64_t size, + H3Index *out); +/** @} */ + +/** @defgroup cellsToMultiPolygon cellsToMultiPolygon + * Functions for cellsToMultiPolygon (currently a binding-only concept) + * @{ + */ +/** @brief Create a LinkedGeoPolygon from a set of contiguous hexagons */ +DECLSPEC H3Error H3_EXPORT(cellsToLinkedMultiPolygon)(const H3Index *h3Set, + const int numHexes, + LinkedGeoPolygon *out); + +/** @brief Free all memory created for a LinkedGeoPolygon */ +DECLSPEC void H3_EXPORT(destroyLinkedMultiPolygon)(LinkedGeoPolygon *polygon); +/** @} */ + +/** @defgroup degsToRads degsToRads + * Functions for degsToRads + * @{ + */ +/** @brief converts degrees to radians */ +DECLSPEC double H3_EXPORT(degsToRads)(double degrees); +/** @} */ + +/** @defgroup radsToDegs radsToDegs + * Functions for radsToDegs + * @{ + */ +/** @brief converts radians to degrees */ +DECLSPEC double H3_EXPORT(radsToDegs)(double radians); +/** @} */ + +/** @defgroup greatCircleDistance greatCircleDistance + * Functions for distance + * @{ + */ +/** @brief "great circle distance" between pairs of LatLng points in radians*/ +DECLSPEC double H3_EXPORT(greatCircleDistanceRads)(const LatLng *a, + const LatLng *b); + +/** @brief "great circle distance" between pairs of LatLng points in + * kilometers*/ +DECLSPEC double H3_EXPORT(greatCircleDistanceKm)(const LatLng *a, + const LatLng *b); + +/** @brief "great circle distance" between pairs of LatLng points in meters*/ +DECLSPEC double H3_EXPORT(greatCircleDistanceM)(const LatLng *a, + const LatLng *b); +/** @} */ + +/** @defgroup getHexagonAreaAvg getHexagonAreaAvg + * Functions for getHexagonAreaAvg + * @{ + */ +/** @brief average hexagon area in square kilometers (excludes pentagons) */ +DECLSPEC H3Error H3_EXPORT(getHexagonAreaAvgKm2)(int res, double *out); + +/** @brief average hexagon area in square meters (excludes pentagons) */ +DECLSPEC H3Error H3_EXPORT(getHexagonAreaAvgM2)(int res, double *out); +/** @} */ + +/** @defgroup cellArea cellArea + * Functions for cellArea + * @{ + */ +/** @brief exact area for a specific cell (hexagon or pentagon) in radians^2 */ +DECLSPEC H3Error H3_EXPORT(cellAreaRads2)(H3Index h, double *out); + +/** @brief exact area for a specific cell (hexagon or pentagon) in kilometers^2 + */ +DECLSPEC H3Error H3_EXPORT(cellAreaKm2)(H3Index h, double *out); + +/** @brief exact area for a specific cell (hexagon or pentagon) in meters^2 */ +DECLSPEC H3Error H3_EXPORT(cellAreaM2)(H3Index h, double *out); +/** @} */ + +/** @defgroup getHexagonEdgeLengthAvg getHexagonEdgeLengthAvg + * Functions for getHexagonEdgeLengthAvg + * @{ + */ +/** @brief average hexagon edge length in kilometers (excludes pentagons) */ +DECLSPEC H3Error H3_EXPORT(getHexagonEdgeLengthAvgKm)(int res, double *out); + +/** @brief average hexagon edge length in meters (excludes pentagons) */ +DECLSPEC H3Error H3_EXPORT(getHexagonEdgeLengthAvgM)(int res, double *out); +/** @} */ + +/** @defgroup edgeLength edgeLength + * Functions for edgeLength + * @{ + */ +/** @brief exact length for a specific directed edge in radians*/ +DECLSPEC H3Error H3_EXPORT(edgeLengthRads)(H3Index edge, double *length); + +/** @brief exact length for a specific directed edge in kilometers*/ +DECLSPEC H3Error H3_EXPORT(edgeLengthKm)(H3Index edge, double *length); + +/** @brief exact length for a specific directed edge in meters*/ +DECLSPEC H3Error H3_EXPORT(edgeLengthM)(H3Index edge, double *length); +/** @} */ + +/** @defgroup getNumCells getNumCells + * Functions for getNumCells + * @{ + */ +/** @brief number of cells (hexagons and pentagons) for a given resolution + * + * It works out to be `2 + 120*7^r` for resolution `r`. + * + * # Mathematical notes + * + * Let h(n) be the number of children n levels below + * a single *hexagon*. + * + * Then h(n) = 7^n. + * + * Let p(n) be the number of children n levels below + * a single *pentagon*. + * + * Then p(0) = 1, and p(1) = 6, since each pentagon + * has 5 hexagonal immediate children and 1 pentagonal + * immediate child. + * + * In general, we have the recurrence relation + * + * p(n) = 5*h(n-1) + p(n-1) + * = 5*7^(n-1) + p(n-1). + * + * Working through the recurrence, we get that + * + * p(n) = 1 + 5*\sum_{k=1}^n 7^{k-1} + * = 1 + 5*(7^n - 1)/6, + * + * using the closed form for a geometric series. + * + * Using the closed forms for h(n) and p(n), we can + * get a closed form for the total number of cells + * at resolution r: + * + * c(r) = 12*p(r) + 110*h(r) + * = 2 + 120*7^r. + * + * + * @param res H3 cell resolution + * + * @return number of cells at resolution `res` + */ +DECLSPEC H3Error H3_EXPORT(getNumCells)(int res, int64_t *out); +/** @} */ + +/** @defgroup getRes0Cells getRes0Cells + * Functions for getRes0Cells + * @{ + */ +/** @brief returns the number of resolution 0 cells (hexagons and pentagons) */ +DECLSPEC int H3_EXPORT(res0CellCount)(void); + +/** @brief provides all base cells in H3Index format*/ +DECLSPEC H3Error H3_EXPORT(getRes0Cells)(H3Index *out); +/** @} */ + +/** @defgroup getPentagons getPentagons + * Functions for getPentagons + * @{ + */ +/** @brief returns the number of pentagons per resolution */ +DECLSPEC int H3_EXPORT(pentagonCount)(void); + +/** @brief generates all pentagons at the specified resolution */ +DECLSPEC H3Error H3_EXPORT(getPentagons)(int res, H3Index *out); +/** @} */ + +/** @defgroup getResolution getResolution + * Functions for getResolution + * @{ + */ +/** @brief returns the resolution of the provided H3 index + * Works on both cells and directed edges. */ +DECLSPEC int H3_EXPORT(getResolution)(H3Index h); +/** @} */ + +/** @defgroup getBaseCellNumber getBaseCellNumber + * Functions for getBaseCellNumber + * @{ + */ +/** @brief returns the base cell "number" (0 to 121) of the provided H3 cell + * + * Note: Technically works on H3 edges, but will return base cell of the + * origin cell. */ +DECLSPEC int H3_EXPORT(getBaseCellNumber)(H3Index h); +/** @} */ + +/** @defgroup getIndexDigit getIndexDigit + * Functions for getIndexDigit + * @{ + */ +/** @brief returns the indexing digit of the provided H3 cell at a given + * resolution + * + * Indexing digits are 1-indexed beginning with the digit for resolution 1. */ +DECLSPEC H3Error H3_EXPORT(getIndexDigit)(H3Index h, int res, int *out); +/** @} */ + +/** @defgroup constructCell constructCell + * Functions for constructCell + * @{ + */ +/** @brief create a cell from its components + * Only allows for constructing valid H3 cells. + **/ +DECLSPEC H3Error H3_EXPORT(constructCell)(int res, int baseCellNumber, + const int *digits, H3Index *out); +/** @} */ + +/** @defgroup stringToH3 stringToH3 + * Functions for stringToH3 + * @{ + */ +/** @brief converts the canonical string format to H3Index format */ +DECLSPEC H3Error H3_EXPORT(stringToH3)(const char *str, H3Index *out); +/** @} */ + +/** @defgroup h3ToString h3ToString + * Functions for h3ToString + * @{ + */ +/** @brief converts an H3Index to a canonical string */ +DECLSPEC H3Error H3_EXPORT(h3ToString)(H3Index h, char *str, size_t sz); +/** @} */ + +/** @defgroup isValidCell isValidCell + * Functions for isValidCell + * @{ + */ +/** @brief confirms if an H3Index is a valid cell (hexagon or pentagon) + * In particular, returns 0 (False) for H3 directed edges or invalid data + */ +DECLSPEC int H3_EXPORT(isValidCell)(H3Index h); +/** @} */ + +/** @defgroup isValidIndex isValidIndex + * Functions for isValidIndex + * @{ + */ +/** @brief confirms if an H3Index is valid for any mode (cell, directed edge, or + * vertex) Returns 1 if the H3 index is valid for any supported type, 0 + * otherwise + */ +DECLSPEC int H3_EXPORT(isValidIndex)(H3Index h); +/** @} */ + +/** @defgroup cellToParent cellToParent + * Functions for cellToParent + * @{ + */ +/** @brief returns the parent (or grandparent, etc) cell of the given cell + */ +DECLSPEC H3Error H3_EXPORT(cellToParent)(H3Index h, int parentRes, + H3Index *parent); +/** @} */ + +/** @defgroup cellToChildren cellToChildren + * Functions for cellToChildren + * @{ + */ +/** @brief determines the exact number of children (or grandchildren, etc) + * that would be returned for the given cell */ +DECLSPEC H3Error H3_EXPORT(cellToChildrenSize)(H3Index h, int childRes, + int64_t *out); + +/** @brief provides the children (or grandchildren, etc) of the given cell */ +DECLSPEC H3Error H3_EXPORT(cellToChildren)(H3Index h, int childRes, + H3Index *children); +/** @} */ + +/** @defgroup cellToCenterChild cellToCenterChild + * Functions for cellToCenterChild + * @{ + */ +/** @brief returns the center child of the given cell at the specified + * resolution */ +DECLSPEC H3Error H3_EXPORT(cellToCenterChild)(H3Index h, int childRes, + H3Index *child); +/** @} */ + +/** @defgroup cellToChildPos cellToChildPos + * Functions for cellToChildPos + * @{ + */ +/** @brief Returns the position of the cell within an ordered list of all + * children of the cell's parent at the specified resolution */ +DECLSPEC H3Error H3_EXPORT(cellToChildPos)(H3Index child, int parentRes, + int64_t *out); +/** @} */ + +/** @defgroup childPosToCell childPosToCell + * Functions for childPosToCell + * @{ + */ +/** @brief Returns the child cell at a given position within an ordered list of + * all children at the specified resolution */ +DECLSPEC H3Error H3_EXPORT(childPosToCell)(int64_t childPos, H3Index parent, + int childRes, H3Index *child); +/** @} */ + +/** @defgroup compactCells compactCells + * Functions for compactCells + * @{ + */ +/** @brief compacts the given set of hexagons as best as possible */ +DECLSPEC H3Error H3_EXPORT(compactCells)(const H3Index *h3Set, + H3Index *compactedSet, + const int64_t numHexes); +/** @} */ + +/** @defgroup uncompactCells uncompactCells + * Functions for uncompactCells + * @{ + */ +/** @brief determines the exact number of hexagons that will be uncompacted + * from the compacted set */ +DECLSPEC H3Error H3_EXPORT(uncompactCellsSize)(const H3Index *compactedSet, + const int64_t numCompacted, + const int res, int64_t *out); + +/** @brief uncompacts the compacted hexagon set */ +DECLSPEC H3Error H3_EXPORT(uncompactCells)(const H3Index *compactedSet, + const int64_t numCompacted, + H3Index *outSet, + const int64_t numOut, const int res); +/** @} */ + +/** @defgroup isResClassIII isResClassIII + * Functions for isResClassIII + * @{ + */ +/** @brief determines if a hexagon is Class III (or Class II) */ +DECLSPEC int H3_EXPORT(isResClassIII)(H3Index h); +/** @} */ + +/** @defgroup isPentagon isPentagon + * Functions for isPentagon + * @{ + */ +/** @brief determines if an H3 cell is a pentagon */ +DECLSPEC int H3_EXPORT(isPentagon)(H3Index h); +/** @} */ + +/** @defgroup getIcosahedronFaces getIcosahedronFaces + * Functions for getIcosahedronFaces + * @{ + */ +/** @brief Max number of icosahedron faces intersected by an index */ +DECLSPEC H3Error H3_EXPORT(maxFaceCount)(H3Index h3, int *out); + +/** @brief Find all icosahedron faces intersected by a given H3 index */ +DECLSPEC H3Error H3_EXPORT(getIcosahedronFaces)(H3Index h3, int *out); +/** @} */ + +/** @defgroup areNeighborCells areNeighborCells + * Functions for areNeighborCells + * @{ + */ +/** @brief returns whether or not the provided hexagons border */ +DECLSPEC H3Error H3_EXPORT(areNeighborCells)(H3Index origin, + H3Index destination, int *out); +/** @} */ + +/** @defgroup cellsToDirectedEdge cellsToDirectedEdge + * Functions for cellsToDirectedEdge + * @{ + */ +/** @brief returns the directed edge H3Index for the specified origin and + * destination */ +DECLSPEC H3Error H3_EXPORT(cellsToDirectedEdge)(H3Index origin, + H3Index destination, + H3Index *out); +/** @} */ + +/** @defgroup isValidDirectedEdge isValidDirectedEdge + * Functions for isValidDirectedEdge + * @{ + */ +/** @brief returns whether the H3Index is a valid directed edge */ +DECLSPEC int H3_EXPORT(isValidDirectedEdge)(H3Index edge); +/** @} */ + +/** @defgroup getDirectedEdgeOrigin \ + * getDirectedEdgeOrigin + * Functions for getDirectedEdgeOrigin + * @{ + */ +/** @brief Returns the origin hexagon H3Index from the directed edge + * H3Index */ +DECLSPEC H3Error H3_EXPORT(getDirectedEdgeOrigin)(H3Index edge, H3Index *out); +/** @} */ + +/** @defgroup getDirectedEdgeDestination \ + * getDirectedEdgeDestination + * Functions for getDirectedEdgeDestination + * @{ + */ +/** @brief Returns the destination hexagon H3Index from the directed edge + * H3Index */ +DECLSPEC H3Error H3_EXPORT(getDirectedEdgeDestination)(H3Index edge, + H3Index *out); +/** @} */ + +/** @defgroup directedEdgeToCells \ + * directedEdgeToCells + * Functions for directedEdgeToCells + * @{ + */ +/** @brief Returns the origin and destination hexagons from the directed + * edge H3Index */ +DECLSPEC H3Error H3_EXPORT(directedEdgeToCells)(H3Index edge, + H3Index *originDestination); +/** @} */ + +/** @defgroup originToDirectedEdges \ + * originToDirectedEdges + * Functions for originToDirectedEdges + * @{ + */ +/** @brief Returns the 6 (or 5 for pentagons) edges associated with the H3Index + */ +DECLSPEC H3Error H3_EXPORT(originToDirectedEdges)(H3Index origin, + H3Index *edges); +/** @} */ + +/** @defgroup directedEdgeToBoundary directedEdgeToBoundary + * Functions for directedEdgeToBoundary + * @{ + */ +/** @brief Returns the CellBoundary containing the coordinates of the edge */ +DECLSPEC H3Error H3_EXPORT(directedEdgeToBoundary)(H3Index edge, + CellBoundary *gb); +/** @} */ + +/** @defgroup cellToVertex cellToVertex + * Functions for cellToVertex + * @{ + */ +/** @brief Returns a single vertex for a given cell, as an H3 index */ +DECLSPEC H3Error H3_EXPORT(cellToVertex)(H3Index origin, int vertexNum, + H3Index *out); +/** @} */ + +/** @defgroup cellToVertexes cellToVertexes + * Functions for cellToVertexes + * @{ + */ +/** @brief Returns all vertexes for a given cell, as H3 indexes */ +DECLSPEC H3Error H3_EXPORT(cellToVertexes)(H3Index origin, H3Index *vertexes); +/** @} */ + +/** @defgroup vertexToLatLng vertexToLatLng + * Functions for vertexToLatLng + * @{ + */ +/** @brief Returns a single vertex for a given cell, as an H3 index */ +DECLSPEC H3Error H3_EXPORT(vertexToLatLng)(H3Index vertex, LatLng *point); +/** @} */ + +/** @defgroup isValidVertex isValidVertex + * Functions for isValidVertex + * @{ + */ +/** @brief Whether the input is a valid H3 vertex */ +DECLSPEC int H3_EXPORT(isValidVertex)(H3Index vertex); +/** @} */ + +/** @defgroup gridDistance gridDistance + * Functions for gridDistance + * @{ + */ +/** @brief Returns grid distance between two indexes */ +DECLSPEC H3Error H3_EXPORT(gridDistance)(H3Index origin, H3Index h3, + int64_t *distance); +/** @} */ + +/** @defgroup gridPathCells gridPathCells + * Functions for gridPathCells + * @{ + */ +/** @brief Number of indexes in a line connecting two indexes */ +DECLSPEC H3Error H3_EXPORT(gridPathCellsSize)(H3Index start, H3Index end, + int64_t *size); + +/** @brief Line of h3 indexes connecting two indexes */ +DECLSPEC H3Error H3_EXPORT(gridPathCells)(H3Index start, H3Index end, + H3Index *out); +/** @} */ + +/** @defgroup cellToLocalIj cellToLocalIj + * Functions for cellToLocalIj + * @{ + */ +/** @brief Returns two dimensional coordinates for the given index */ +DECLSPEC H3Error H3_EXPORT(cellToLocalIj)(H3Index origin, H3Index h3, + uint32_t mode, CoordIJ *out); +/** @} */ + +/** @defgroup localIjToCell localIjToCell + * Functions for localIjToCell + * @{ + */ +/** @brief Returns index for the given two dimensional coordinates */ +DECLSPEC H3Error H3_EXPORT(localIjToCell)(H3Index origin, const CoordIJ *ij, + uint32_t mode, H3Index *out); +/** @} */ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif diff --git a/python/lib64/cmake/h3/h3Config.cmake b/python/lib64/cmake/h3/h3Config.cmake new file mode 100644 index 000000000..0aed3d6a8 --- /dev/null +++ b/python/lib64/cmake/h3/h3Config.cmake @@ -0,0 +1,28 @@ + +####### Expanded from @PACKAGE_INIT@ by configure_package_config_file() ####### +####### Any changes to this file will be overwritten by the next CMake run #### +####### The input file was Config.cmake.in ######## + +get_filename_component(PACKAGE_PREFIX_DIR "${CMAKE_CURRENT_LIST_DIR}/../../../" ABSOLUTE) + +macro(set_and_check _var _file) + set(${_var} "${_file}") + if(NOT EXISTS "${_file}") + message(FATAL_ERROR "File or directory ${_file} referenced by variable ${_var} does not exist !") + endif() +endmacro() + +macro(check_required_components _NAME) + foreach(comp ${${_NAME}_FIND_COMPONENTS}) + if(NOT ${_NAME}_${comp}_FOUND) + if(${_NAME}_FIND_REQUIRED_${comp}) + set(${_NAME}_FOUND FALSE) + endif() + endif() + endforeach() +endmacro() + +#################################################################################### + +include("${CMAKE_CURRENT_LIST_DIR}/h3Targets.cmake") +check_required_components("h3") diff --git a/python/lib64/cmake/h3/h3ConfigVersion.cmake b/python/lib64/cmake/h3/h3ConfigVersion.cmake new file mode 100644 index 000000000..a45ed4e65 --- /dev/null +++ b/python/lib64/cmake/h3/h3ConfigVersion.cmake @@ -0,0 +1,65 @@ +# This is a basic version file for the Config-mode of find_package(). +# It is used by write_basic_package_version_file() as input file for configure_file() +# to create a version-file which can be installed along a config.cmake file. +# +# The created file sets PACKAGE_VERSION_EXACT if the current version string and +# the requested version string are exactly the same and it sets +# PACKAGE_VERSION_COMPATIBLE if the current version is >= requested version, +# but only if the requested major version is the same as the current one. +# The variable CVF_VERSION must be set before calling configure_file(). + + +set(PACKAGE_VERSION "4.4.1") + +if(PACKAGE_VERSION VERSION_LESS PACKAGE_FIND_VERSION) + set(PACKAGE_VERSION_COMPATIBLE FALSE) +else() + + if("4.4.1" MATCHES "^([0-9]+)\\.") + set(CVF_VERSION_MAJOR "${CMAKE_MATCH_1}") + if(NOT CVF_VERSION_MAJOR VERSION_EQUAL 0) + string(REGEX REPLACE "^0+" "" CVF_VERSION_MAJOR "${CVF_VERSION_MAJOR}") + endif() + else() + set(CVF_VERSION_MAJOR "4.4.1") + endif() + + if(PACKAGE_FIND_VERSION_RANGE) + # both endpoints of the range must have the expected major version + math (EXPR CVF_VERSION_MAJOR_NEXT "${CVF_VERSION_MAJOR} + 1") + if (NOT PACKAGE_FIND_VERSION_MIN_MAJOR STREQUAL CVF_VERSION_MAJOR + OR ((PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "INCLUDE" AND NOT PACKAGE_FIND_VERSION_MAX_MAJOR STREQUAL CVF_VERSION_MAJOR) + OR (PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "EXCLUDE" AND NOT PACKAGE_FIND_VERSION_MAX VERSION_LESS_EQUAL CVF_VERSION_MAJOR_NEXT))) + set(PACKAGE_VERSION_COMPATIBLE FALSE) + elseif(PACKAGE_FIND_VERSION_MIN_MAJOR STREQUAL CVF_VERSION_MAJOR + AND ((PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "INCLUDE" AND PACKAGE_VERSION VERSION_LESS_EQUAL PACKAGE_FIND_VERSION_MAX) + OR (PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "EXCLUDE" AND PACKAGE_VERSION VERSION_LESS PACKAGE_FIND_VERSION_MAX))) + set(PACKAGE_VERSION_COMPATIBLE TRUE) + else() + set(PACKAGE_VERSION_COMPATIBLE FALSE) + endif() + else() + if(PACKAGE_FIND_VERSION_MAJOR STREQUAL CVF_VERSION_MAJOR) + set(PACKAGE_VERSION_COMPATIBLE TRUE) + else() + set(PACKAGE_VERSION_COMPATIBLE FALSE) + endif() + + if(PACKAGE_FIND_VERSION STREQUAL PACKAGE_VERSION) + set(PACKAGE_VERSION_EXACT TRUE) + endif() + endif() +endif() + + +# if the installed or the using project don't have CMAKE_SIZEOF_VOID_P set, ignore it: +if("${CMAKE_SIZEOF_VOID_P}" STREQUAL "" OR "8" STREQUAL "") + return() +endif() + +# check that the installed version has the same 32/64bit-ness as the one which is currently searching: +if(NOT CMAKE_SIZEOF_VOID_P STREQUAL "8") + math(EXPR installedBits "8 * 8") + set(PACKAGE_VERSION "${PACKAGE_VERSION} (${installedBits}bit)") + set(PACKAGE_VERSION_UNSUITABLE TRUE) +endif() diff --git a/python/lib64/cmake/h3/h3Targets-release.cmake b/python/lib64/cmake/h3/h3Targets-release.cmake new file mode 100644 index 000000000..f7e32e3f6 --- /dev/null +++ b/python/lib64/cmake/h3/h3Targets-release.cmake @@ -0,0 +1,19 @@ +#---------------------------------------------------------------- +# Generated CMake target import file for configuration "Release". +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Import target "h3::h3" for configuration "Release" +set_property(TARGET h3::h3 APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) +set_target_properties(h3::h3 PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "C" + IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib64/libh3.a" + ) + +list(APPEND _cmake_import_check_targets h3::h3 ) +list(APPEND _cmake_import_check_files_for_h3::h3 "${_IMPORT_PREFIX}/lib64/libh3.a" ) + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) diff --git a/python/lib64/cmake/h3/h3Targets.cmake b/python/lib64/cmake/h3/h3Targets.cmake new file mode 100644 index 000000000..37ffab466 --- /dev/null +++ b/python/lib64/cmake/h3/h3Targets.cmake @@ -0,0 +1,109 @@ +# Generated by CMake + +if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.8) + message(FATAL_ERROR "CMake >= 2.8.12 required") +endif() +if(CMAKE_VERSION VERSION_LESS "2.8.12") + message(FATAL_ERROR "CMake >= 2.8.12 required") +endif() +cmake_policy(PUSH) +cmake_policy(VERSION 2.8.12...3.31) +#---------------------------------------------------------------- +# Generated CMake target import file. +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Protect against multiple inclusion, which would fail when already imported targets are added once more. +set(_cmake_targets_defined "") +set(_cmake_targets_not_defined "") +set(_cmake_expected_targets "") +foreach(_cmake_expected_target IN ITEMS h3::h3) + list(APPEND _cmake_expected_targets "${_cmake_expected_target}") + if(TARGET "${_cmake_expected_target}") + list(APPEND _cmake_targets_defined "${_cmake_expected_target}") + else() + list(APPEND _cmake_targets_not_defined "${_cmake_expected_target}") + endif() +endforeach() +unset(_cmake_expected_target) +if(_cmake_targets_defined STREQUAL _cmake_expected_targets) + unset(_cmake_targets_defined) + unset(_cmake_targets_not_defined) + unset(_cmake_expected_targets) + unset(CMAKE_IMPORT_FILE_VERSION) + cmake_policy(POP) + return() +endif() +if(NOT _cmake_targets_defined STREQUAL "") + string(REPLACE ";" ", " _cmake_targets_defined_text "${_cmake_targets_defined}") + string(REPLACE ";" ", " _cmake_targets_not_defined_text "${_cmake_targets_not_defined}") + message(FATAL_ERROR "Some (but not all) targets in this export set were already defined.\nTargets Defined: ${_cmake_targets_defined_text}\nTargets not yet defined: ${_cmake_targets_not_defined_text}\n") +endif() +unset(_cmake_targets_defined) +unset(_cmake_targets_not_defined) +unset(_cmake_expected_targets) + + +# Compute the installation prefix relative to this file. +get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +if(_IMPORT_PREFIX STREQUAL "/") + set(_IMPORT_PREFIX "") +endif() + +# Create imported target h3::h3 +add_library(h3::h3 STATIC IMPORTED) + +set_target_properties(h3::h3 PROPERTIES + INTERFACE_COMPILE_DEFINITIONS "H3_PREFIX=" + INTERFACE_COMPILE_FEATURES "c_std_99" + INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include" + INTERFACE_LINK_LIBRARIES "/usr/lib64/libm.so" +) + +# Load information for each installed configuration. +file(GLOB _cmake_config_files "${CMAKE_CURRENT_LIST_DIR}/h3Targets-*.cmake") +foreach(_cmake_config_file IN LISTS _cmake_config_files) + include("${_cmake_config_file}") +endforeach() +unset(_cmake_config_file) +unset(_cmake_config_files) + +# Cleanup temporary variables. +set(_IMPORT_PREFIX) + +# Loop over all imported files and verify that they actually exist +foreach(_cmake_target IN LISTS _cmake_import_check_targets) + if(CMAKE_VERSION VERSION_LESS "3.28" + OR NOT DEFINED _cmake_import_check_xcframework_for_${_cmake_target} + OR NOT IS_DIRECTORY "${_cmake_import_check_xcframework_for_${_cmake_target}}") + foreach(_cmake_file IN LISTS "_cmake_import_check_files_for_${_cmake_target}") + if(NOT EXISTS "${_cmake_file}") + message(FATAL_ERROR "The imported target \"${_cmake_target}\" references the file + \"${_cmake_file}\" +but this file does not exist. Possible reasons include: +* The file was deleted, renamed, or moved to another location. +* An install or uninstall procedure did not complete successfully. +* The installation package was faulty and contained + \"${CMAKE_CURRENT_LIST_FILE}\" +but not all the files it references. +") + endif() + endforeach() + endif() + unset(_cmake_file) + unset("_cmake_import_check_files_for_${_cmake_target}") +endforeach() +unset(_cmake_target) +unset(_cmake_import_check_targets) + +# This file does not depend on other imported targets which have +# been exported from the same project but in a separate export set. + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) +cmake_policy(POP) diff --git a/python/lib64/libh3.a b/python/lib64/libh3.a new file mode 100644 index 000000000..aeb7539e6 Binary files /dev/null and b/python/lib64/libh3.a differ diff --git a/python/numpy-2.3.5.dist-info/INSTALLER b/python/numpy-2.3.5.dist-info/INSTALLER new file mode 100644 index 000000000..a1b589e38 --- /dev/null +++ b/python/numpy-2.3.5.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/python/numpy-2.3.5.dist-info/LICENSE.txt b/python/numpy-2.3.5.dist-info/LICENSE.txt new file mode 100644 index 000000000..284458b0b --- /dev/null +++ b/python/numpy-2.3.5.dist-info/LICENSE.txt @@ -0,0 +1,971 @@ +Copyright (c) 2005-2025, NumPy Developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +---- + +The NumPy repository and source distributions bundle several libraries that are +compatibly licensed. We list these here. + +Name: lapack-lite +Files: numpy/linalg/lapack_lite/* +License: BSD-3-Clause + For details, see numpy/linalg/lapack_lite/LICENSE.txt + +Name: dragon4 +Files: numpy/_core/src/multiarray/dragon4.c +License: MIT + For license text, see numpy/_core/src/multiarray/dragon4.c + +Name: libdivide +Files: numpy/_core/include/numpy/libdivide/* +License: Zlib + For license text, see numpy/_core/include/numpy/libdivide/LICENSE.txt + + +Note that the following files are vendored in the repository and sdist but not +installed in built numpy packages: + +Name: Meson +Files: vendored-meson/meson/* +License: Apache 2.0 + For license text, see vendored-meson/meson/COPYING + +Name: spin +Files: .spin/cmds.py +License: BSD-3 + For license text, see .spin/LICENSE + +Name: tempita +Files: numpy/_build_utils/tempita/* +License: MIT + For details, see numpy/_build_utils/tempita/LICENCE.txt + +---- + +This binary distribution of NumPy also bundles the following software: + + +Name: OpenBLAS +Files: numpy.libs/libscipy_openblas*.so +Description: bundled as a dynamically linked library +Availability: https://github.com/OpenMathLib/OpenBLAS/ +License: BSD-3-Clause + Copyright (c) 2011-2014, The OpenBLAS Project + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Name: LAPACK +Files: numpy.libs/libscipy_openblas*.so +Description: bundled in OpenBLAS +Availability: https://github.com/OpenMathLib/OpenBLAS/ +License: BSD-3-Clause-Open-MPI + Copyright (c) 1992-2013 The University of Tennessee and The University + of Tennessee Research Foundation. All rights + reserved. + Copyright (c) 2000-2013 The University of California Berkeley. All + rights reserved. + Copyright (c) 2006-2013 The University of Colorado Denver. All rights + reserved. + + $COPYRIGHT$ + + Additional copyrights may follow + + $HEADER$ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer listed + in this license in the documentation and/or other materials + provided with the distribution. + + - Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + The copyright holders provide no reassurances that the source code + provided does not infringe any patent, copyright, or any other + intellectual property rights of third parties. The copyright holders + disclaim any liability to any recipient for claims brought against + recipient by any third party for infringement of that parties + intellectual property rights. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Name: GCC runtime library +Files: numpy.libs/libgfortran*.so +Description: dynamically linked to files compiled with gcc +Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran +License: GPL-3.0-or-later WITH GCC-exception-3.1 + Copyright (C) 2002-2017 Free Software Foundation, Inc. + + Libgfortran is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgfortran is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . + +---- + +Full text of license texts referred to above follows (that they are +listed below does not necessarily imply the conditions apply to the +present binary release): + +---- + +GCC RUNTIME LIBRARY EXCEPTION + +Version 3.1, 31 March 2009 + +Copyright (C) 2009 Free Software Foundation, Inc. + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +This GCC Runtime Library Exception ("Exception") is an additional +permission under section 7 of the GNU General Public License, version +3 ("GPLv3"). It applies to a given file (the "Runtime Library") that +bears a notice placed by the copyright holder of the file stating that +the file is governed by GPLv3 along with this Exception. + +When you use GCC to compile a program, GCC may combine portions of +certain GCC header files and runtime libraries with the compiled +program. The purpose of this Exception is to allow compilation of +non-GPL (including proprietary) programs to use, in this way, the +header files and runtime libraries covered by this Exception. + +0. Definitions. + +A file is an "Independent Module" if it either requires the Runtime +Library for execution after a Compilation Process, or makes use of an +interface provided by the Runtime Library, but is not otherwise based +on the Runtime Library. + +"GCC" means a version of the GNU Compiler Collection, with or without +modifications, governed by version 3 (or a specified later version) of +the GNU General Public License (GPL) with the option of using any +subsequent versions published by the FSF. + +"GPL-compatible Software" is software whose conditions of propagation, +modification and use would permit combination with GCC in accord with +the license of GCC. + +"Target Code" refers to output from any compiler for a real or virtual +target processor architecture, in executable form or suitable for +input to an assembler, loader, linker and/or execution +phase. Notwithstanding that, Target Code does not include data in any +format that is used as a compiler intermediate representation, or used +for producing a compiler intermediate representation. + +The "Compilation Process" transforms code entirely represented in +non-intermediate languages designed for human-written code, and/or in +Java Virtual Machine byte code, into Target Code. Thus, for example, +use of source code generators and preprocessors need not be considered +part of the Compilation Process, since the Compilation Process can be +understood as starting with the output of the generators or +preprocessors. + +A Compilation Process is "Eligible" if it is done using GCC, alone or +with other GPL-compatible software, or if it is done without using any +work based on GCC. For example, using non-GPL-compatible Software to +optimize any GCC intermediate representations would not qualify as an +Eligible Compilation Process. + +1. Grant of Additional Permission. + +You have permission to propagate a work of Target Code formed by +combining the Runtime Library with Independent Modules, even if such +propagation would otherwise violate the terms of GPLv3, provided that +all Target Code was generated by Eligible Compilation Processes. You +may then convey such a combination under terms of your choice, +consistent with the licensing of the Independent Modules. + +2. No Weakening of GCC Copyleft. + +The availability of this Exception does not imply any general +presumption that third-party software is unaffected by the copyleft +requirements of the license of GCC. + +---- + + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. + +Name: libquadmath +Files: numpy.libs/libquadmath*.so +Description: dynamically linked to files compiled with gcc +Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath +License: LGPL-2.1-or-later + + GCC Quad-Precision Math Library + Copyright (C) 2010-2019 Free Software Foundation, Inc. + Written by Francois-Xavier Coudert + + This file is part of the libquadmath library. + Libquadmath is free software; you can redistribute it and/or + modify it under the terms of the GNU Library General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + Libquadmath is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html diff --git a/python/numpy-2.3.5.dist-info/METADATA b/python/numpy-2.3.5.dist-info/METADATA new file mode 100644 index 000000000..f9e7a39ee --- /dev/null +++ b/python/numpy-2.3.5.dist-info/METADATA @@ -0,0 +1,1093 @@ +Metadata-Version: 2.1 +Name: numpy +Version: 2.3.5 +Summary: Fundamental package for array computing in Python +Author: Travis E. Oliphant et al. +Maintainer-Email: NumPy Developers +License: Copyright (c) 2005-2025, NumPy Developers. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + ---- + + The NumPy repository and source distributions bundle several libraries that are + compatibly licensed. We list these here. + + Name: lapack-lite + Files: numpy/linalg/lapack_lite/* + License: BSD-3-Clause + For details, see numpy/linalg/lapack_lite/LICENSE.txt + + Name: dragon4 + Files: numpy/_core/src/multiarray/dragon4.c + License: MIT + For license text, see numpy/_core/src/multiarray/dragon4.c + + Name: libdivide + Files: numpy/_core/include/numpy/libdivide/* + License: Zlib + For license text, see numpy/_core/include/numpy/libdivide/LICENSE.txt + + + Note that the following files are vendored in the repository and sdist but not + installed in built numpy packages: + + Name: Meson + Files: vendored-meson/meson/* + License: Apache 2.0 + For license text, see vendored-meson/meson/COPYING + + Name: spin + Files: .spin/cmds.py + License: BSD-3 + For license text, see .spin/LICENSE + + Name: tempita + Files: numpy/_build_utils/tempita/* + License: MIT + For details, see numpy/_build_utils/tempita/LICENCE.txt + + ---- + + This binary distribution of NumPy also bundles the following software: + + + Name: OpenBLAS + Files: numpy.libs/libscipy_openblas*.so + Description: bundled as a dynamically linked library + Availability: https://github.com/OpenMathLib/OpenBLAS/ + License: BSD-3-Clause + Copyright (c) 2011-2014, The OpenBLAS Project + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + 3. Neither the name of the OpenBLAS project nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + Name: LAPACK + Files: numpy.libs/libscipy_openblas*.so + Description: bundled in OpenBLAS + Availability: https://github.com/OpenMathLib/OpenBLAS/ + License: BSD-3-Clause-Open-MPI + Copyright (c) 1992-2013 The University of Tennessee and The University + of Tennessee Research Foundation. All rights + reserved. + Copyright (c) 2000-2013 The University of California Berkeley. All + rights reserved. + Copyright (c) 2006-2013 The University of Colorado Denver. All rights + reserved. + + $COPYRIGHT$ + + Additional copyrights may follow + + $HEADER$ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer listed + in this license in the documentation and/or other materials + provided with the distribution. + + - Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + The copyright holders provide no reassurances that the source code + provided does not infringe any patent, copyright, or any other + intellectual property rights of third parties. The copyright holders + disclaim any liability to any recipient for claims brought against + recipient by any third party for infringement of that parties + intellectual property rights. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + Name: GCC runtime library + Files: numpy.libs/libgfortran*.so + Description: dynamically linked to files compiled with gcc + Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran + License: GPL-3.0-or-later WITH GCC-exception-3.1 + Copyright (C) 2002-2017 Free Software Foundation, Inc. + + Libgfortran is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3, or (at your option) + any later version. + + Libgfortran is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + Under Section 7 of GPL version 3, you are granted additional + permissions described in the GCC Runtime Library Exception, version + 3.1, as published by the Free Software Foundation. + + You should have received a copy of the GNU General Public License and + a copy of the GCC Runtime Library Exception along with this program; + see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + . + + ---- + + Full text of license texts referred to above follows (that they are + listed below does not necessarily imply the conditions apply to the + present binary release): + + ---- + + GCC RUNTIME LIBRARY EXCEPTION + + Version 3.1, 31 March 2009 + + Copyright (C) 2009 Free Software Foundation, Inc. + + Everyone is permitted to copy and distribute verbatim copies of this + license document, but changing it is not allowed. + + This GCC Runtime Library Exception ("Exception") is an additional + permission under section 7 of the GNU General Public License, version + 3 ("GPLv3"). It applies to a given file (the "Runtime Library") that + bears a notice placed by the copyright holder of the file stating that + the file is governed by GPLv3 along with this Exception. + + When you use GCC to compile a program, GCC may combine portions of + certain GCC header files and runtime libraries with the compiled + program. The purpose of this Exception is to allow compilation of + non-GPL (including proprietary) programs to use, in this way, the + header files and runtime libraries covered by this Exception. + + 0. Definitions. + + A file is an "Independent Module" if it either requires the Runtime + Library for execution after a Compilation Process, or makes use of an + interface provided by the Runtime Library, but is not otherwise based + on the Runtime Library. + + "GCC" means a version of the GNU Compiler Collection, with or without + modifications, governed by version 3 (or a specified later version) of + the GNU General Public License (GPL) with the option of using any + subsequent versions published by the FSF. + + "GPL-compatible Software" is software whose conditions of propagation, + modification and use would permit combination with GCC in accord with + the license of GCC. + + "Target Code" refers to output from any compiler for a real or virtual + target processor architecture, in executable form or suitable for + input to an assembler, loader, linker and/or execution + phase. Notwithstanding that, Target Code does not include data in any + format that is used as a compiler intermediate representation, or used + for producing a compiler intermediate representation. + + The "Compilation Process" transforms code entirely represented in + non-intermediate languages designed for human-written code, and/or in + Java Virtual Machine byte code, into Target Code. Thus, for example, + use of source code generators and preprocessors need not be considered + part of the Compilation Process, since the Compilation Process can be + understood as starting with the output of the generators or + preprocessors. + + A Compilation Process is "Eligible" if it is done using GCC, alone or + with other GPL-compatible software, or if it is done without using any + work based on GCC. For example, using non-GPL-compatible Software to + optimize any GCC intermediate representations would not qualify as an + Eligible Compilation Process. + + 1. Grant of Additional Permission. + + You have permission to propagate a work of Target Code formed by + combining the Runtime Library with Independent Modules, even if such + propagation would otherwise violate the terms of GPLv3, provided that + all Target Code was generated by Eligible Compilation Processes. You + may then convey such a combination under terms of your choice, + consistent with the licensing of the Independent Modules. + + 2. No Weakening of GCC Copyleft. + + The availability of this Exception does not imply any general + presumption that third-party software is unaffected by the copyleft + requirements of the license of GCC. + + ---- + + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for + software and other kinds of works. + + The licenses for most software and other practical works are designed + to take away your freedom to share and change the works. By contrast, + the GNU General Public License is intended to guarantee your freedom to + share and change all versions of a program--to make sure it remains free + software for all its users. We, the Free Software Foundation, use the + GNU General Public License for most of our software; it applies also to + any other work released this way by its authors. You can apply it to + your programs, too. + + When we speak of free software, we are referring to freedom, not + price. Our General Public Licenses are designed to make sure that you + have the freedom to distribute copies of free software (and charge for + them if you wish), that you receive source code or can get it if you + want it, that you can change the software or use pieces of it in new + free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you + these rights or asking you to surrender the rights. Therefore, you have + certain responsibilities if you distribute copies of the software, or if + you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether + gratis or for a fee, you must pass on to the recipients the same + freedoms that you received. You must make sure that they, too, receive + or can get the source code. And you must show them these terms so they + know their rights. + + Developers that use the GNU GPL protect your rights with two steps: + (1) assert copyright on the software, and (2) offer you this License + giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains + that there is no warranty for this free software. For both users' and + authors' sake, the GPL requires that modified versions be marked as + changed, so that their problems will not be attributed erroneously to + authors of previous versions. + + Some devices are designed to deny users access to install or run + modified versions of the software inside them, although the manufacturer + can do so. This is fundamentally incompatible with the aim of + protecting users' freedom to change the software. The systematic + pattern of such abuse occurs in the area of products for individuals to + use, which is precisely where it is most unacceptable. Therefore, we + have designed this version of the GPL to prohibit the practice for those + products. If such problems arise substantially in other domains, we + stand ready to extend this provision to those domains in future versions + of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. + States should not allow patents to restrict development and use of + software on general-purpose computers, but in those that do, we wish to + avoid the special danger that patents applied to a free program could + make it effectively proprietary. To prevent this, the GPL assures that + patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and + modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of + works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this + License. Each licensee is addressed as "you". "Licensees" and + "recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work + in a fashion requiring copyright permission, other than the making of an + exact copy. The resulting work is called a "modified version" of the + earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based + on the Program. + + To "propagate" a work means to do anything with it that, without + permission, would make you directly or secondarily liable for + infringement under applicable copyright law, except executing it on a + computer or modifying a private copy. Propagation includes copying, + distribution (with or without modification), making available to the + public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other + parties to make or receive copies. Mere interaction with a user through + a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" + to the extent that it includes a convenient and prominently visible + feature that (1) displays an appropriate copyright notice, and (2) + tells the user that there is no warranty for the work (except to the + extent that warranties are provided), that licensees may convey the + work under this License, and how to view a copy of this License. If + the interface presents a list of user commands or options, such as a + menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work + for making modifications to it. "Object code" means any non-source + form of a work. + + A "Standard Interface" means an interface that either is an official + standard defined by a recognized standards body, or, in the case of + interfaces specified for a particular programming language, one that + is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other + than the work as a whole, that (a) is included in the normal form of + packaging a Major Component, but which is not part of that Major + Component, and (b) serves only to enable use of the work with that + Major Component, or to implement a Standard Interface for which an + implementation is available to the public in source code form. A + "Major Component", in this context, means a major essential component + (kernel, window system, and so on) of the specific operating system + (if any) on which the executable work runs, or a compiler used to + produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all + the source code needed to generate, install, and (for an executable + work) run the object code and to modify the work, including scripts to + control those activities. However, it does not include the work's + System Libraries, or general-purpose tools or generally available free + programs which are used unmodified in performing those activities but + which are not part of the work. For example, Corresponding Source + includes interface definition files associated with source files for + the work, and the source code for shared libraries and dynamically + linked subprograms that the work is specifically designed to require, + such as by intimate data communication or control flow between those + subprograms and other parts of the work. + + The Corresponding Source need not include anything that users + can regenerate automatically from other parts of the Corresponding + Source. + + The Corresponding Source for a work in source code form is that + same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of + copyright on the Program, and are irrevocable provided the stated + conditions are met. This License explicitly affirms your unlimited + permission to run the unmodified Program. The output from running a + covered work is covered by this License only if the output, given its + content, constitutes a covered work. This License acknowledges your + rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not + convey, without conditions so long as your license otherwise remains + in force. You may convey covered works to others for the sole purpose + of having them make modifications exclusively for you, or provide you + with facilities for running those works, provided that you comply with + the terms of this License in conveying all material for which you do + not control copyright. Those thus making or running the covered works + for you must do so exclusively on your behalf, under your direction + and control, on terms that prohibit them from making any copies of + your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under + the conditions stated below. Sublicensing is not allowed; section 10 + makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological + measure under any applicable law fulfilling obligations under article + 11 of the WIPO copyright treaty adopted on 20 December 1996, or + similar laws prohibiting or restricting circumvention of such + measures. + + When you convey a covered work, you waive any legal power to forbid + circumvention of technological measures to the extent such circumvention + is effected by exercising rights under this License with respect to + the covered work, and you disclaim any intention to limit operation or + modification of the work as a means of enforcing, against the work's + users, your or third parties' legal rights to forbid circumvention of + technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you + receive it, in any medium, provided that you conspicuously and + appropriately publish on each copy an appropriate copyright notice; + keep intact all notices stating that this License and any + non-permissive terms added in accord with section 7 apply to the code; + keep intact all notices of the absence of any warranty; and give all + recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, + and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to + produce it from the Program, in the form of source code under the + terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent + works, which are not by their nature extensions of the covered work, + and which are not combined with it such as to form a larger program, + in or on a volume of a storage or distribution medium, is called an + "aggregate" if the compilation and its resulting copyright are not + used to limit the access or legal rights of the compilation's users + beyond what the individual works permit. Inclusion of a covered work + in an aggregate does not cause this License to apply to the other + parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms + of sections 4 and 5, provided that you also convey the + machine-readable Corresponding Source under the terms of this License, + in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded + from the Corresponding Source as a System Library, need not be + included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any + tangible personal property which is normally used for personal, family, + or household purposes, or (2) anything designed or sold for incorporation + into a dwelling. In determining whether a product is a consumer product, + doubtful cases shall be resolved in favor of coverage. For a particular + product received by a particular user, "normally used" refers to a + typical or common use of that class of product, regardless of the status + of the particular user or of the way in which the particular user + actually uses, or expects or is expected to use, the product. A product + is a consumer product regardless of whether the product has substantial + commercial, industrial or non-consumer uses, unless such uses represent + the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, + procedures, authorization keys, or other information required to install + and execute modified versions of a covered work in that User Product from + a modified version of its Corresponding Source. The information must + suffice to ensure that the continued functioning of the modified object + code is in no case prevented or interfered with solely because + modification has been made. + + If you convey an object code work under this section in, or with, or + specifically for use in, a User Product, and the conveying occurs as + part of a transaction in which the right of possession and use of the + User Product is transferred to the recipient in perpetuity or for a + fixed term (regardless of how the transaction is characterized), the + Corresponding Source conveyed under this section must be accompanied + by the Installation Information. But this requirement does not apply + if neither you nor any third party retains the ability to install + modified object code on the User Product (for example, the work has + been installed in ROM). + + The requirement to provide Installation Information does not include a + requirement to continue to provide support service, warranty, or updates + for a work that has been modified or installed by the recipient, or for + the User Product in which it has been modified or installed. Access to a + network may be denied when the modification itself materially and + adversely affects the operation of the network or violates the rules and + protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, + in accord with this section must be in a format that is publicly + documented (and with an implementation available to the public in + source code form), and must require no special password or key for + unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this + License by making exceptions from one or more of its conditions. + Additional permissions that are applicable to the entire Program shall + be treated as though they were included in this License, to the extent + that they are valid under applicable law. If additional permissions + apply only to part of the Program, that part may be used separately + under those permissions, but the entire Program remains governed by + this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option + remove any additional permissions from that copy, or from any part of + it. (Additional permissions may be written to require their own + removal in certain cases when you modify the work.) You may place + additional permissions on material, added by you to a covered work, + for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you + add to a covered work, you may (if authorized by the copyright holders of + that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further + restrictions" within the meaning of section 10. If the Program as you + received it, or any part of it, contains a notice stating that it is + governed by this License along with a term that is a further + restriction, you may remove that term. If a license document contains + a further restriction but permits relicensing or conveying under this + License, you may add to a covered work material governed by the terms + of that license document, provided that the further restriction does + not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you + must place, in the relevant source files, a statement of the + additional terms that apply to those files, or a notice indicating + where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the + form of a separately written license, or stated as exceptions; + the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly + provided under this License. Any attempt otherwise to propagate or + modify it is void, and will automatically terminate your rights under + this License (including any patent licenses granted under the third + paragraph of section 11). + + However, if you cease all violation of this License, then your + license from a particular copyright holder is reinstated (a) + provisionally, unless and until the copyright holder explicitly and + finally terminates your license, and (b) permanently, if the copyright + holder fails to notify you of the violation by some reasonable means + prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is + reinstated permanently if the copyright holder notifies you of the + violation by some reasonable means, this is the first time you have + received notice of violation of this License (for any work) from that + copyright holder, and you cure the violation prior to 30 days after + your receipt of the notice. + + Termination of your rights under this section does not terminate the + licenses of parties who have received copies or rights from you under + this License. If your rights have been terminated and not permanently + reinstated, you do not qualify to receive new licenses for the same + material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or + run a copy of the Program. Ancillary propagation of a covered work + occurring solely as a consequence of using peer-to-peer transmission + to receive a copy likewise does not require acceptance. However, + nothing other than this License grants you permission to propagate or + modify any covered work. These actions infringe copyright if you do + not accept this License. Therefore, by modifying or propagating a + covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically + receives a license from the original licensors, to run, modify and + propagate that work, subject to this License. You are not responsible + for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an + organization, or substantially all assets of one, or subdividing an + organization, or merging organizations. If propagation of a covered + work results from an entity transaction, each party to that + transaction who receives a copy of the work also receives whatever + licenses to the work the party's predecessor in interest had or could + give under the previous paragraph, plus a right to possession of the + Corresponding Source of the work from the predecessor in interest, if + the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the + rights granted or affirmed under this License. For example, you may + not impose a license fee, royalty, or other charge for exercise of + rights granted under this License, and you may not initiate litigation + (including a cross-claim or counterclaim in a lawsuit) alleging that + any patent claim is infringed by making, using, selling, offering for + sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this + License of the Program or a work on which the Program is based. The + work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims + owned or controlled by the contributor, whether already acquired or + hereafter acquired, that would be infringed by some manner, permitted + by this License, of making, using, or selling its contributor version, + but do not include claims that would be infringed only as a + consequence of further modification of the contributor version. For + purposes of this definition, "control" includes the right to grant + patent sublicenses in a manner consistent with the requirements of + this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free + patent license under the contributor's essential patent claims, to + make, use, sell, offer for sale, import and otherwise run, modify and + propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express + agreement or commitment, however denominated, not to enforce a patent + (such as an express permission to practice a patent or covenant not to + sue for patent infringement). To "grant" such a patent license to a + party means to make such an agreement or commitment not to enforce a + patent against the party. + + If you convey a covered work, knowingly relying on a patent license, + and the Corresponding Source of the work is not available for anyone + to copy, free of charge and under the terms of this License, through a + publicly available network server or other readily accessible means, + then you must either (1) cause the Corresponding Source to be so + available, or (2) arrange to deprive yourself of the benefit of the + patent license for this particular work, or (3) arrange, in a manner + consistent with the requirements of this License, to extend the patent + license to downstream recipients. "Knowingly relying" means you have + actual knowledge that, but for the patent license, your conveying the + covered work in a country, or your recipient's use of the covered work + in a country, would infringe one or more identifiable patents in that + country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or + arrangement, you convey, or propagate by procuring conveyance of, a + covered work, and grant a patent license to some of the parties + receiving the covered work authorizing them to use, propagate, modify + or convey a specific copy of the covered work, then the patent license + you grant is automatically extended to all recipients of the covered + work and works based on it. + + A patent license is "discriminatory" if it does not include within + the scope of its coverage, prohibits the exercise of, or is + conditioned on the non-exercise of one or more of the rights that are + specifically granted under this License. You may not convey a covered + work if you are a party to an arrangement with a third party that is + in the business of distributing software, under which you make payment + to the third party based on the extent of your activity of conveying + the work, and under which the third party grants, to any of the + parties who would receive the covered work from you, a discriminatory + patent license (a) in connection with copies of the covered work + conveyed by you (or copies made from those copies), or (b) primarily + for and in connection with specific products or compilations that + contain the covered work, unless you entered into that arrangement, + or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting + any implied license or other defenses to infringement that may + otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or + otherwise) that contradict the conditions of this License, they do not + excuse you from the conditions of this License. If you cannot convey a + covered work so as to satisfy simultaneously your obligations under this + License and any other pertinent obligations, then as a consequence you may + not convey it at all. For example, if you agree to terms that obligate you + to collect a royalty for further conveying from those to whom you convey + the Program, the only way you could satisfy both those terms and this + License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have + permission to link or combine any covered work with a work licensed + under version 3 of the GNU Affero General Public License into a single + combined work, and to convey the resulting work. The terms of this + License will continue to apply to the part which is the covered work, + but the special requirements of the GNU Affero General Public License, + section 13, concerning interaction through a network will apply to the + combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of + the GNU General Public License from time to time. Such new versions will + be similar in spirit to the present version, but may differ in detail to + address new problems or concerns. + + Each version is given a distinguishing version number. If the + Program specifies that a certain numbered version of the GNU General + Public License "or any later version" applies to it, you have the + option of following the terms and conditions either of that numbered + version or of any later version published by the Free Software + Foundation. If the Program does not specify a version number of the + GNU General Public License, you may choose any version ever published + by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future + versions of the GNU General Public License can be used, that proxy's + public statement of acceptance of a version permanently authorizes you + to choose that version for the Program. + + Later license versions may give you additional or different + permissions. However, no additional obligations are imposed on any + author or copyright holder as a result of your choosing to follow a + later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY + APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT + HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY + OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, + THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM + IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF + ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING + WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS + THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY + GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE + USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF + DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD + PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), + EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF + SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided + above cannot be given local legal effect according to their terms, + reviewing courts shall apply local law that most closely approximates + an absolute waiver of all civil liability in connection with the + Program, unless a warranty or assumption of liability accompanies a + copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest + possible use to the public, the best way to achieve this is to make it + free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest + to attach them to the start of each source file to most effectively + state the exclusion of warranty; and each file should have at least + the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + + Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short + notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + + The hypothetical commands `show w' and `show c' should show the appropriate + parts of the General Public License. Of course, your program's commands + might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, + if any, to sign a "copyright disclaimer" for the program, if necessary. + For more information on this, and how to apply and follow the GNU GPL, see + . + + The GNU General Public License does not permit incorporating your program + into proprietary programs. If your program is a subroutine library, you + may consider it more useful to permit linking proprietary applications with + the library. If this is what you want to do, use the GNU Lesser General + Public License instead of this License. But first, please read + . + + Name: libquadmath + Files: numpy.libs/libquadmath*.so + Description: dynamically linked to files compiled with gcc + Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath + License: LGPL-2.1-or-later + + GCC Quad-Precision Math Library + Copyright (C) 2010-2019 Free Software Foundation, Inc. + Written by Francois-Xavier Coudert + + This file is part of the libquadmath library. + Libquadmath is free software; you can redistribute it and/or + modify it under the terms of the GNU Library General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + Libquadmath is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html + +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Science/Research +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: C +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Topic :: Software Development +Classifier: Topic :: Scientific/Engineering +Classifier: Typing :: Typed +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: Operating System :: Unix +Classifier: Operating System :: MacOS +Project-URL: homepage, https://numpy.org +Project-URL: documentation, https://numpy.org/doc/ +Project-URL: source, https://github.com/numpy/numpy +Project-URL: download, https://pypi.org/project/numpy/#files +Project-URL: tracker, https://github.com/numpy/numpy/issues +Project-URL: release notes, https://numpy.org/doc/stable/release +Requires-Python: >=3.11 +Description-Content-Type: text/markdown + +

+ +


+ + +[![Powered by NumFOCUS](https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)]( +https://numfocus.org) +[![PyPI Downloads](https://img.shields.io/pypi/dm/numpy.svg?label=PyPI%20downloads)]( +https://pypi.org/project/numpy/) +[![Conda Downloads](https://img.shields.io/conda/dn/conda-forge/numpy.svg?label=Conda%20downloads)]( +https://anaconda.org/conda-forge/numpy) +[![Stack Overflow](https://img.shields.io/badge/stackoverflow-Ask%20questions-blue.svg)]( +https://stackoverflow.com/questions/tagged/numpy) +[![Nature Paper](https://img.shields.io/badge/DOI-10.1038%2Fs41586--020--2649--2-blue)]( +https://doi.org/10.1038/s41586-020-2649-2) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/numpy/numpy/badge)](https://securityscorecards.dev/viewer/?uri=github.com/numpy/numpy) +[![Typing](https://img.shields.io/pypi/types/numpy)](https://pypi.org/project/numpy/) + + +NumPy is the fundamental package for scientific computing with Python. + +- **Website:** https://numpy.org +- **Documentation:** https://numpy.org/doc +- **Mailing list:** https://mail.python.org/mailman/listinfo/numpy-discussion +- **Source code:** https://github.com/numpy/numpy +- **Contributing:** https://numpy.org/devdocs/dev/index.html +- **Bug reports:** https://github.com/numpy/numpy/issues +- **Report a security vulnerability:** https://tidelift.com/docs/security + +It provides: + +- a powerful N-dimensional array object +- sophisticated (broadcasting) functions +- tools for integrating C/C++ and Fortran code +- useful linear algebra, Fourier transform, and random number capabilities + +Testing: + +NumPy requires `pytest` and `hypothesis`. Tests can then be run after installation with: + + python -c "import numpy, sys; sys.exit(numpy.test() is False)" + +Code of Conduct +---------------------- + +NumPy is a community-driven open source project developed by a diverse group of +[contributors](https://numpy.org/teams/). The NumPy leadership has made a strong +commitment to creating an open, inclusive, and positive community. Please read the +[NumPy Code of Conduct](https://numpy.org/code-of-conduct/) for guidance on how to interact +with others in a way that makes our community thrive. + +Call for Contributions +---------------------- + +The NumPy project welcomes your expertise and enthusiasm! + +Small improvements or fixes are always appreciated. If you are considering larger contributions +to the source code, please contact us through the [mailing +list](https://mail.python.org/mailman/listinfo/numpy-discussion) first. + +Writing code isn’t the only way to contribute to NumPy. You can also: +- review pull requests +- help us stay on top of new and old issues +- develop tutorials, presentations, and other educational materials +- maintain and improve [our website](https://github.com/numpy/numpy.org) +- develop graphic design for our brand assets and promotional materials +- translate website content +- help with outreach and onboard new contributors +- write grant proposals and help with other fundraising efforts + +For more information about the ways you can contribute to NumPy, visit [our website](https://numpy.org/contribute/). +If you’re unsure where to start or how your skills fit in, reach out! You can +ask on the mailing list or here, on GitHub, by opening a new issue or leaving a +comment on a relevant issue that is already open. + +Our preferred channels of communication are all public, but if you’d like to +speak to us in private first, contact our community coordinators at +numpy-team@googlegroups.com or on Slack (write numpy-team@googlegroups.com for +an invitation). + +We also have a biweekly community call, details of which are announced on the +mailing list. You are very welcome to join. + +If you are new to contributing to open source, [this +guide](https://opensource.guide/how-to-contribute/) helps explain why, what, +and how to successfully get involved. diff --git a/python/numpy-2.3.5.dist-info/RECORD b/python/numpy-2.3.5.dist-info/RECORD new file mode 100644 index 000000000..b1b5a82c1 --- /dev/null +++ b/python/numpy-2.3.5.dist-info/RECORD @@ -0,0 +1,1312 @@ +../../bin/f2py,sha256=OrHyBlcMFecJHT1QercQjr-jd3-UYsOjdOfi_wMRKxE,216 +../../bin/numpy-config,sha256=6lh0ufaLtJ1iT0EJrAy9Tv-WNv3b8yZt7NMlbRyhdqY,216 +numpy-2.3.5.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +numpy-2.3.5.dist-info/LICENSE.txt,sha256=IEajEw5QsRwBZZs6DZY-auC3Q2_46Jy8_Z6HvGES1ZU,47768 +numpy-2.3.5.dist-info/METADATA,sha256=YALyl2XlvBo75KG5LtVrZJaKD-y6c-f4HNU20MQPT6M,62117 +numpy-2.3.5.dist-info/RECORD,, +numpy-2.3.5.dist-info/WHEEL,sha256=rJCplGeGFjRoUvxJWhVONRgyGYGZoOkhyej0s3KyjoI,138 +numpy-2.3.5.dist-info/entry_points.txt,sha256=7Cb63gyL2sIRpsHdADpl6xaIW5JTlUI-k_yqEVr0BSw,220 +numpy.libs/libgfortran-040039e1-0352e75f.so.5.0.0,sha256=xgkASOzMdjUiwS7wFvgdprYnyzoET1XPBHmoOcQcCYA,2833617 +numpy.libs/libquadmath-96973f99-934c22de.so.0.0.0,sha256=btUTf0Enga14Y0OftUNhP2ILQ8MrYykqACkkYWL1u8Y,250985 +numpy.libs/libscipy_openblas64_-fdde5778.so,sha256=7iA5zxOkXh6J8E0M6pWFZX2T_6pCVZThk1FhAT_fThI,25034001 +numpy/__config__.py,sha256=UMYLMNSpnR94Kc4wn5yQJjX0iXLFgw_b_OJcAESt6J4,5281 +numpy/__config__.pyi,sha256=7nE-kUNs2lWPIpofTastbf2PCMgCka7FCiK5jrFkDYE,2367 +numpy/__init__.cython-30.pxd,sha256=qT7d9_TWkj4UsfpY1uaBUmcYflptcjZfDGZsYJth8rU,47123 +numpy/__init__.pxd,sha256=BFYYkcQUcrl0Ee8ReoQiA0wgtxsWeIGovC8jYeEw5qg,43758 +numpy/__init__.py,sha256=k5JKxLeTMolH39nrk1XlTszf0mqSyN1SGIrtLlLH6zg,25919 +numpy/__init__.pyi,sha256=8sXDZSZvtxBASYKvGhDI6P1fc0YaRbnLIsywIwdmfxs,242020 +numpy/__pycache__/__config__.cpython-312.pyc,, +numpy/__pycache__/__init__.cpython-312.pyc,, +numpy/__pycache__/_array_api_info.cpython-312.pyc,, +numpy/__pycache__/_configtool.cpython-312.pyc,, +numpy/__pycache__/_distributor_init.cpython-312.pyc,, +numpy/__pycache__/_expired_attrs_2_0.cpython-312.pyc,, +numpy/__pycache__/_globals.cpython-312.pyc,, +numpy/__pycache__/_pytesttester.cpython-312.pyc,, +numpy/__pycache__/conftest.cpython-312.pyc,, +numpy/__pycache__/dtypes.cpython-312.pyc,, +numpy/__pycache__/exceptions.cpython-312.pyc,, +numpy/__pycache__/matlib.cpython-312.pyc,, +numpy/__pycache__/version.cpython-312.pyc,, +numpy/_array_api_info.py,sha256=NzJSuf8vutjGSqiqahq3jRI3SxMX4X1cva4J6dFv4EU,10354 +numpy/_array_api_info.pyi,sha256=QP_tYDbjtTOPtJECk3ehRXOQ24QM8TZjAfWX8XAsZCM,4864 +numpy/_configtool.py,sha256=EFRJ3pazTxYhE9op-ocWyKTLZrrpFhfmmS_tWrq8Cxo,1007 +numpy/_configtool.pyi,sha256=d4f22QGwpb1ZtDk-1Sn72ftvo4incC5E2JAikmjzfJI,24 +numpy/_core/__init__.py,sha256=yJ0iy1fXk9ogCFnflCWzBBLwlKSS-xlQWCpWCozaT6c,5542 +numpy/_core/__init__.pyi,sha256=Mj2I4BtqBVNUZVs5o1T58Z7wSaWjfhX0nCl-a0ULjgA,86 +numpy/_core/__pycache__/__init__.cpython-312.pyc,, +numpy/_core/__pycache__/_add_newdocs.cpython-312.pyc,, +numpy/_core/__pycache__/_add_newdocs_scalars.cpython-312.pyc,, +numpy/_core/__pycache__/_asarray.cpython-312.pyc,, +numpy/_core/__pycache__/_dtype.cpython-312.pyc,, +numpy/_core/__pycache__/_dtype_ctypes.cpython-312.pyc,, +numpy/_core/__pycache__/_exceptions.cpython-312.pyc,, +numpy/_core/__pycache__/_internal.cpython-312.pyc,, +numpy/_core/__pycache__/_machar.cpython-312.pyc,, +numpy/_core/__pycache__/_methods.cpython-312.pyc,, +numpy/_core/__pycache__/_string_helpers.cpython-312.pyc,, +numpy/_core/__pycache__/_type_aliases.cpython-312.pyc,, +numpy/_core/__pycache__/_ufunc_config.cpython-312.pyc,, +numpy/_core/__pycache__/arrayprint.cpython-312.pyc,, +numpy/_core/__pycache__/cversions.cpython-312.pyc,, +numpy/_core/__pycache__/defchararray.cpython-312.pyc,, +numpy/_core/__pycache__/einsumfunc.cpython-312.pyc,, +numpy/_core/__pycache__/fromnumeric.cpython-312.pyc,, +numpy/_core/__pycache__/function_base.cpython-312.pyc,, +numpy/_core/__pycache__/getlimits.cpython-312.pyc,, +numpy/_core/__pycache__/memmap.cpython-312.pyc,, +numpy/_core/__pycache__/multiarray.cpython-312.pyc,, +numpy/_core/__pycache__/numeric.cpython-312.pyc,, +numpy/_core/__pycache__/numerictypes.cpython-312.pyc,, +numpy/_core/__pycache__/overrides.cpython-312.pyc,, +numpy/_core/__pycache__/printoptions.cpython-312.pyc,, +numpy/_core/__pycache__/records.cpython-312.pyc,, +numpy/_core/__pycache__/shape_base.cpython-312.pyc,, +numpy/_core/__pycache__/strings.cpython-312.pyc,, +numpy/_core/__pycache__/umath.cpython-312.pyc,, +numpy/_core/_add_newdocs.py,sha256=ySKuP_4sVPNLHp1ojgTMhSRWi3d18CcBFHFHkD8Xf-U,208893 +numpy/_core/_add_newdocs.pyi,sha256=r__d_-GHkfjzuZ0qyjDztsKgdc1eIyeN-cBoYVgMBuo,168 +numpy/_core/_add_newdocs_scalars.py,sha256=Z5WcIAXy2Vs8kWLCzgyvxWVH0CAl-O64YFK3ttbU7yc,12600 +numpy/_core/_add_newdocs_scalars.pyi,sha256=ZnIk0TgL0szrv6SPCH-4dF469Q_92UvV5_ek47Oj7HM,573 +numpy/_core/_asarray.py,sha256=fCNHLaaCP-5Ia-RR_bIrHxWY3xklcmvlZiGhJIDiKLM,3911 +numpy/_core/_asarray.pyi,sha256=QHyb8DM_9U0otRugoNIyKjtvTVS3dZLn6DSxGi_ZU4U,1073 +numpy/_core/_dtype.py,sha256=cM6JnjoHLURWCHgN8VmQyjeiiDjcwhB5L_fPMOe1uuM,10547 +numpy/_core/_dtype.pyi,sha256=turm6RyVVEGKm6antqWWnyA0bnS2AuMwmKeFj-9mYHA,1851 +numpy/_core/_dtype_ctypes.py,sha256=KPPlakDsPkuThSOr5qFwW0jJ9VnjbvW4EWhObCHYGIE,3726 +numpy/_core/_dtype_ctypes.pyi,sha256=VwEZFViCPuHlCURv2jpJp9sbHh2hYUpzC_FRZNNGMMw,3682 +numpy/_core/_exceptions.py,sha256=X8Eg1hq1uU8L9wiOwFo2jRq6S0vnjCdgYFHj3hAW9Co,5159 +numpy/_core/_exceptions.pyi,sha256=ESXpijoEK0HrPy0dQYtjO62-Krd0419WLlrDROqwTyU,1900 +numpy/_core/_internal.py,sha256=YZ6nMGVOvfTD1nzk2XqRdz8k05WVnYGiljb1TnHvMq8,28981 +numpy/_core/_internal.pyi,sha256=2V2rXMQocZZHw8z_9HSrUi3LNGxaxA1nm0B0fcofjU8,2654 +numpy/_core/_machar.py,sha256=YUX24XYbxXJ79KrWar27FlDYKfeodr_RCkE7w0bETqs,11569 +numpy/_core/_machar.pyi,sha256=ESXpijoEK0HrPy0dQYtjO62-Krd0419WLlrDROqwTyU,1900 +numpy/_core/_methods.py,sha256=4qiUUES5wnOFeXnPavtqqMVhZ09ZZeSKlwqdPw2eKSI,9430 +numpy/_core/_methods.pyi,sha256=5HzEt2Z0-vxQfS1QJKDlTvNyLXcinNsja-xQiehMGbw,526 +numpy/_core/_multiarray_tests.cpython-312-x86_64-linux-gnu.so,sha256=-uOqys8wnCdsxMjL3BNzURd2PxxfZSy0i7l-zJwbndg,141888 +numpy/_core/_multiarray_umath.cpython-312-x86_64-linux-gnu.so,sha256=upZGeuSUurr3w0WLCcwDxCAMLhABCGDYDkUv-4sByM0,10808937 +numpy/_core/_operand_flag_tests.cpython-312-x86_64-linux-gnu.so,sha256=LaAKYVcg0fwPuQL89z8q1-GrpQykaaNK4kJuXRArmDs,16800 +numpy/_core/_rational_tests.cpython-312-x86_64-linux-gnu.so,sha256=L-0ufvXmFouELpWO_1IJOyZp7m1EiznGwGSZC3lGRVA,59592 +numpy/_core/_simd.cpython-312-x86_64-linux-gnu.so,sha256=NGUI17JKGWykF1UY3TsJruMrbH-YCfePwDFQRDbA0_c,2882368 +numpy/_core/_simd.pyi,sha256=2z2sFPgXr3KRzHltbt31HVrhkXM0VwXFp1lUjxaRMAM,669 +numpy/_core/_string_helpers.py,sha256=6Smgoi6oD2CunjwBSr9BZ20HkCnvW6nTPblTOU3pWng,2845 +numpy/_core/_string_helpers.pyi,sha256=xLlLKJHutEYzyKnTG2k7clcWvVUTvD319SjnKmDXuac,358 +numpy/_core/_struct_ufunc_tests.cpython-312-x86_64-linux-gnu.so,sha256=U__bzTJORJ4S1Ax393Y1ui1vpwNmQKJzNAIkICwK-hk,16936 +numpy/_core/_type_aliases.py,sha256=msFHBkZ2s1wKQyuguK_cF6NBS0_3AOww7j3oh26mo3Q,3489 +numpy/_core/_type_aliases.pyi,sha256=Tn1Ex4bAGQa1HuMx0Vn-tEBl3HDF_uesTzmiSrz81kQ,2388 +numpy/_core/_ufunc_config.py,sha256=9j6R12YmNbaT5-y5rCAOPBbgH4bYGYlnDgFq-vZ5nDs,15130 +numpy/_core/_ufunc_config.pyi,sha256=OuMlO8SLVrBQAGdtULHYs1owM6yQUWu14WK71OQBMpo,1890 +numpy/_core/_umath_tests.cpython-312-x86_64-linux-gnu.so,sha256=7Q7bbK-i26WJdhCDw-22TnLzD9gWwyqa93FEClt5M7A,50312 +numpy/_core/arrayprint.py,sha256=AAAvkrI0U6Pa_wZOnpuVZBpdsCCjpYpcWF8sA_SPYbg,65278 +numpy/_core/arrayprint.pyi,sha256=ogMYnp2ipEfagADzRaRK9ySGAfH_oabGNJegiA6LicY,6971 +numpy/_core/cversions.py,sha256=H_iNIpx9-hY1cQNxqjT2d_5SXZhJbMo_caq4_q6LB7I,347 +numpy/_core/defchararray.py,sha256=1tSvLWEeac20DodpDBxapJKwwczpJG1lVy2qjScIVXg,38007 +numpy/_core/defchararray.pyi,sha256=tF8nsglE-SD4Lbx4Jnmrt4AW-BZYu5FwGw7qhQTqF9w,27984 +numpy/_core/einsumfunc.py,sha256=heFeCiEKji-qfVk8zAZ1b5bKm-MUMLzCETMQ7yyHBhc,52820 +numpy/_core/einsumfunc.pyi,sha256=b10CKdAeLEryabwRMdiW1cKdNyqWLa5kMV7O2_X8g3A,4893 +numpy/_core/fromnumeric.py,sha256=s0f6WfkIRVwFZMlDrdYb3EjyF9vMGr0bms0Pc-VcOAM,143882 +numpy/_core/fromnumeric.pyi,sha256=VoUF-d31OuZYaRIi-duoYAABOADe4KjbBhFFx3Hd_Mc,42034 +numpy/_core/function_base.py,sha256=QT1pbll_8rf_3ZsGtLQoAeQ1OSqCqeAGtMTzPAE1I_w,19683 +numpy/_core/function_base.pyi,sha256=A9BlWQeiX08iIwDQJ6W1FUhy2qrRPVenXtHiEnPkt0k,7064 +numpy/_core/getlimits.py,sha256=32Qe7tlBFdyiDvdSjG1cp2a0NJ0rSMxeDRij3agiPrg,26101 +numpy/_core/getlimits.pyi,sha256=q30hQ3wDenmxoZUSoSOqyVrZZVGlsixXCHe6QUthbp8,61 +numpy/_core/include/numpy/__multiarray_api.c,sha256=ndBF5wbdd7F8_zWvR52MDO0Qm15_PrCCBlSk4dky4F8,12698 +numpy/_core/include/numpy/__multiarray_api.h,sha256=JguOheIpSWnm9uvlrfdZZJNOf0P3IXEqQc13wmxaeCE,61743 +numpy/_core/include/numpy/__ufunc_api.c,sha256=Fg7WlH4Ow6jETKRArVL_QF11ABKYz1VpOve56_U3E0w,1755 +numpy/_core/include/numpy/__ufunc_api.h,sha256=J5h9KHdntM27XQdq1PwHwI7V2v-sOx6AIbgCwP8mg9M,13175 +numpy/_core/include/numpy/_neighborhood_iterator_imp.h,sha256=s-Hw_l5WRwKtYvsiIghF0bg-mA_CgWnzFFOYVFJ-q4k,1857 +numpy/_core/include/numpy/_numpyconfig.h,sha256=lfgEF_31SixqOweZEHjn19bN5ng62MSwuVWEXS1_p_U,926 +numpy/_core/include/numpy/_public_dtype_api_table.h,sha256=n6_Kb98SyvsR_X7stiNA6VuGp_c5W1e4fMVcJdO0wis,4574 +numpy/_core/include/numpy/arrayobject.h,sha256=mU5vpcQ95PH1j3bp8KYhJOFHB-GxwRjSUsR7nxlTSRk,204 +numpy/_core/include/numpy/arrayscalars.h,sha256=LlyrZIa_5td11BfqfMCv1hYbiG6__zxxGv1MRj8uIVo,4243 +numpy/_core/include/numpy/dtype_api.h,sha256=Gn37RzObmcTsL6YUYY9aG22Ct8F-r4ZaC53NPFqaIso,19238 +numpy/_core/include/numpy/halffloat.h,sha256=TRZfXgipa-dFppX2uNgkrjrPli-1BfJtadWjAembJ4s,1959 +numpy/_core/include/numpy/ndarrayobject.h,sha256=MnykWmchyS05ler_ZyhFIr_0j6c0IcndEi3X3n0ZWDk,12057 +numpy/_core/include/numpy/ndarraytypes.h,sha256=kS9uirBf_ewXdIgsmRQETk3aQXeSPjLPCa6hlX5By-0,65810 +numpy/_core/include/numpy/npy_2_compat.h,sha256=wdjB7_-AtW3op67Xbj3EVH6apSF7cRG6h3c5hBz-YMs,8546 +numpy/_core/include/numpy/npy_2_complexcompat.h,sha256=eE9dV_Iq3jEfGGJFH_pQjJnvC6eQ12WgOB7cZMmHByE,857 +numpy/_core/include/numpy/npy_3kcompat.h,sha256=grN6W1n7benj3F2pSAOpl_s6vn1Y50QfAP-DaleD7cA,9648 +numpy/_core/include/numpy/npy_common.h,sha256=-05bavbk44KUjy5Q-qnM5YzU32VJRv0N8ozfCI_SKcE,32586 +numpy/_core/include/numpy/npy_cpu.h,sha256=Vw8mVPm1fGmLdeLV3RoBZnBMMXA8cghgwRdWhlkDLi4,4225 +numpy/_core/include/numpy/npy_endian.h,sha256=vvK7ZlOt0vgqTVrIyviWzoxQz70S-BvflS4Z_k6X5XE,2834 +numpy/_core/include/numpy/npy_math.h,sha256=aeSFs60QbWPy1gIPyHDPrYExifm5mbDAcjP_mLk_PF0,18858 +numpy/_core/include/numpy/npy_no_deprecated_api.h,sha256=0yZrJcQEJ6MCHJInQk5TP9_qZ4t7EfBuoLOJ34IlJd4,678 +numpy/_core/include/numpy/npy_os.h,sha256=hlQsg_7-RkvS3s8OM8KXy99xxyJbCm-W1AYVcdnO1cw,1256 +numpy/_core/include/numpy/numpyconfig.h,sha256=FGuDPIr0gTFYgUzhVMXqq5BIQL-WqgmXfp003cUwpWE,7333 +numpy/_core/include/numpy/random/LICENSE.txt,sha256=-8U59H0M-DvGE3gID7hz1cFGMBJsrL_nVANcOSbapew,1018 +numpy/_core/include/numpy/random/bitgen.h,sha256=49AwKOR552r-NkhuSOF1usb_URiMSRMvD22JF5pKIng,488 +numpy/_core/include/numpy/random/distributions.h,sha256=W5tOyETd0m1W0GdaZ5dJP8fKlBtsTpG23V2Zlmrlqpg,9861 +numpy/_core/include/numpy/random/libdivide.h,sha256=ew9MNhPQd1LsCZiWiFmj9IZ7yOnA3HKOXffDeR9X1jw,80138 +numpy/_core/include/numpy/ufuncobject.h,sha256=BengvqXqiy4ipzz23KQi1Kldy9ybYUs4Sp5yA73VgiU,11780 +numpy/_core/include/numpy/utils.h,sha256=wMNomSH3Dfj0q78PrjLVtFtN-FPo7UJ4o0ifCUO-6Es,1185 +numpy/_core/lib/libnpymath.a,sha256=oXeSGrMy3L_zDbnj58as1hihfFFftHWb73ah3KPeCT4,54312 +numpy/_core/lib/npy-pkg-config/mlib.ini,sha256=_LsWV1eStNqwhdiYPa2538GL46dnfVwT4MrI1zbsoFw,147 +numpy/_core/lib/npy-pkg-config/npymath.ini,sha256=0iMzarBfkkZ_EXO95_kz-SHZRcNIEwIeOjE_esVBkRQ,361 +numpy/_core/lib/pkgconfig/numpy.pc,sha256=E7i8S4T6If44pIIwAj3c3DY6nmQfU8SR7zSfHgOGlXc,191 +numpy/_core/memmap.py,sha256=yIsQ6n9kpZulggRJJFkTbjVwnB4leoyizvUpc2iU4n8,12651 +numpy/_core/memmap.pyi,sha256=_LKjb_PuhcQwpqc2lFaL379DYzQ9PtuKdlVV3jXOYEM,47 +numpy/_core/multiarray.py,sha256=zwHBdyOoxiBRcOhG2QB_xBAYm-p8ARSpQbye9EzrrBo,58155 +numpy/_core/multiarray.pyi,sha256=Uy5Unmczfk7Pyz8Ohgh_5g4ASY7aZ0ZYpmhhmPnG6OA,32150 +numpy/_core/numeric.py,sha256=_DcnvXu6oaHXSi9Q-BV9yGzfx7tc9iCx69r9MnJDm5g,82322 +numpy/_core/numeric.pyi,sha256=ZSWTBi2kdP7BPG3KMGJWJIlqM9BLKFmgq_xgK_GnDUo,19042 +numpy/_core/numerictypes.py,sha256=15JLBX0m_MQgaiH_yBBI5glv0vXueU0arnS56RXfUxk,15967 +numpy/_core/numerictypes.pyi,sha256=tTXE4RCX_1OLbADevOifanAbOOJsROxISzKVMCRLSFk,3521 +numpy/_core/overrides.py,sha256=MtgzOBavG7wzQYCA7O7ArdCJVV72STIb_cvkWBuDLJE,7241 +numpy/_core/overrides.pyi,sha256=2lHte4EbOTDQvknjVfO71RgiLXnOpGQky5j2meS09JU,1713 +numpy/_core/printoptions.py,sha256=NFpvy5bnjbvqnKeqQt0veEExpAAYAVNoiGXH3pglWAc,1056 +numpy/_core/printoptions.pyi,sha256=eNiliCnDuZBxla6X9kwZ-7YiCn-UtMbT-U_qTnw8l9w,594 +numpy/_core/records.py,sha256=hoXCDswM6hbytiGdYGkhRISzQjnqImXcIdGlNuOUDX4,36767 +numpy/_core/records.pyi,sha256=tob9AxABbCXsO--gWXX-pD5Bo50NgCXKOt4JstVESjY,8935 +numpy/_core/shape_base.py,sha256=7yDPrIXTmmBnZMUStHXsq1iJNiGmIxEAcepxQ9o-JVQ,32738 +numpy/_core/shape_base.pyi,sha256=Qgfi1izbvKgRWAojCMXw3HsONgvsryFCsDhAvNI1dZE,4753 +numpy/_core/strings.py,sha256=OVfimWSljTxfV-abwPI_ivOQtnXryHha5pKNBktXsSg,50838 +numpy/_core/strings.pyi,sha256=Fyjq70ZP70BzV3Ov490dxX5EOv76sgnxA7qVBxeXuRU,13502 +numpy/_core/tests/__pycache__/_locales.cpython-312.pyc,, +numpy/_core/tests/__pycache__/_natype.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test__exceptions.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_abc.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_api.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_argparse.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_array_api_info.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_array_coercion.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_array_interface.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_arraymethod.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_arrayobject.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_arrayprint.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_casting_floatingpoint_errors.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_casting_unittests.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_conversion_utils.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_cpu_dispatcher.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_cpu_features.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_custom_dtypes.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_cython.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_datetime.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_defchararray.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_deprecations.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_dlpack.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_dtype.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_einsum.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_errstate.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_extint128.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_function_base.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_getlimits.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_half.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_hashtable.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_indexerrors.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_indexing.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_item_selection.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_limited_api.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_longdouble.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_machar.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_mem_overlap.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_mem_policy.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_memmap.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_multiarray.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_multithreading.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_nditer.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_nep50_promotions.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_numeric.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_numerictypes.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_overrides.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_print.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_protocols.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_records.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_regression.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_scalar_ctors.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_scalar_methods.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_scalarbuffer.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_scalarinherit.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_scalarmath.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_scalarprint.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_shape_base.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_simd.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_simd_module.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_stringdtype.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_strings.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_ufunc.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_umath.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_umath_accuracy.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_umath_complex.cpython-312.pyc,, +numpy/_core/tests/__pycache__/test_unicode.cpython-312.pyc,, +numpy/_core/tests/_locales.py,sha256=lvHqUJVMsrE7Jh3N_KpO5fGBZgID-l3Zr4-_RrH1ZNM,2176 +numpy/_core/tests/_natype.py,sha256=YCAkuhvWuMjTjt-C0VjA8zzui-KoioNwOmAYnvf6KR0,6525 +numpy/_core/tests/data/astype_copy.pkl,sha256=lWSzCcvzRB_wpuRGj92spGIw-rNPFcd9hwJaRVvfWdk,716 +numpy/_core/tests/data/generate_umath_validation_data.cpp,sha256=BQakB5o8Mq60zex5ovVO0IatNa7xbF8JvXmtk6373So,5842 +numpy/_core/tests/data/recarray_from_file.fits,sha256=NA0kliz31FlLnYxv3ppzeruONqNYkuEvts5wzXEeIc4,8640 +numpy/_core/tests/data/umath-validation-set-README.txt,sha256=pxWwOaGGahaRd-AlAidDfocLyrAiDp0whf5hC7hYwqM,967 +numpy/_core/tests/data/umath-validation-set-arccos.csv,sha256=yBlz8r6RnnAYhdlobzGGo2FKY-DoSTQaP26y8138a3I,61365 +numpy/_core/tests/data/umath-validation-set-arccosh.csv,sha256=0GXe7XG1Z3jXAcK-OlEot_Df3MetDQSlbm3MJ__iMQk,61365 +numpy/_core/tests/data/umath-validation-set-arcsin.csv,sha256=w_Sv2NDn-mLZSAqb56JT2g4bqBzxYAihedWxHuf82uU,61339 +numpy/_core/tests/data/umath-validation-set-arcsinh.csv,sha256=DZrMYoZZZyM1DDyXNUxSlzx6bOgajnRSLWAzxcPck8k,60289 +numpy/_core/tests/data/umath-validation-set-arctan.csv,sha256=0aosXZ-9DYTop0lj4bfcBNwYVvjZdW13hbMRTRRTmV0,60305 +numpy/_core/tests/data/umath-validation-set-arctanh.csv,sha256=HEK9ePx1OkKrXIKkMUV0IxrmsDqIlgKddiI-LvF2J20,61339 +numpy/_core/tests/data/umath-validation-set-cbrt.csv,sha256=v855MTZih-fZp_GuEDst2qaIsxU4a7vlAbeIJy2xKpc,60846 +numpy/_core/tests/data/umath-validation-set-cos.csv,sha256=0PNnDqKkokZ7ERVDgbes8KNZc-ISJrZUlVZc5LkW18E,59122 +numpy/_core/tests/data/umath-validation-set-cosh.csv,sha256=JKC4nKr3wTzA_XNSiQvVUq9zkYy4djvtu2-j4ZZ_7Oc,60869 +numpy/_core/tests/data/umath-validation-set-exp.csv,sha256=rUAWIbvyeKh9rPfp2n0Zq7AKq_nvHpgbgzLjAllhsek,17491 +numpy/_core/tests/data/umath-validation-set-exp2.csv,sha256=djosT-3fTpiN_f_2WOumgMuuKgC_XhpVO-QsUFwI6uU,58624 +numpy/_core/tests/data/umath-validation-set-expm1.csv,sha256=K7jL6N4KQGX71fj5hvYkzcMXk7MmQes8FwrNfyrPpgU,60299 +numpy/_core/tests/data/umath-validation-set-log.csv,sha256=ynzbVbKxFzxWFwxHnxX7Fpm-va09oI3oK1_lTe19g4w,11692 +numpy/_core/tests/data/umath-validation-set-log10.csv,sha256=NOBD-rOWI_FPG4Vmbzu3JtX9UA838f2AaDFA-waiqGA,68922 +numpy/_core/tests/data/umath-validation-set-log1p.csv,sha256=tdbYWPqWIz8BEbIyklynh_tpQJzo970Edd4ek6DsPb8,60303 +numpy/_core/tests/data/umath-validation-set-log2.csv,sha256=39EUD0vFMbwyoXoOhgCmid6NeEAQU7Ff7QFjPsVObIE,68917 +numpy/_core/tests/data/umath-validation-set-sin.csv,sha256=8PUjnQ_YfmxFb42XJrvpvmkeSpEOlEXSmNvIK4VgfAM,58611 +numpy/_core/tests/data/umath-validation-set-sinh.csv,sha256=XOsBUuPcMjiO_pevMalpmd0iRv2gmnh9u7bV9ZLLg8I,60293 +numpy/_core/tests/data/umath-validation-set-tan.csv,sha256=Hv2WUMIscfvQJ5Y5BipuHk4oE4VY6QKbQp_kNRdCqYQ,60299 +numpy/_core/tests/data/umath-validation-set-tanh.csv,sha256=iolZF_MOyWRgYSa-SsD4df5mnyFK18zrICI740SWoTc,60299 +numpy/_core/tests/examples/cython/__pycache__/setup.cpython-312.pyc,, +numpy/_core/tests/examples/cython/checks.pyx,sha256=nw6o0nlj3SfNQP3McS10zVH9UCZiITBdAi5yO4gm9Qo,10774 +numpy/_core/tests/examples/cython/meson.build,sha256=uuXVPKemNVMQ5MiEDqS4BXhwGHa96JHjS50WxZuJS_8,1268 +numpy/_core/tests/examples/cython/setup.py,sha256=JM6UnDql7LsAnRo6p9G-nRz3dfnoy9fHF6YVKy1OzdA,859 +numpy/_core/tests/examples/limited_api/__pycache__/setup.cpython-312.pyc,, +numpy/_core/tests/examples/limited_api/limited_api1.c,sha256=htSR9ER3S8AJqv4EZMsrxQ-SufTIlXNpuFI6MXQs87w,346 +numpy/_core/tests/examples/limited_api/limited_api2.pyx,sha256=1q4I59pdkCmMhLcYngN_XwQnPoLmDEo1uTGnhrLRjDc,203 +numpy/_core/tests/examples/limited_api/limited_api_latest.c,sha256=ltBLbrl1g9XxD2wvN_-g3NhIizc8mxnh2Z6wCyXo-8E,452 +numpy/_core/tests/examples/limited_api/meson.build,sha256=YM5RwW_waFymlWSHFhCCOHO6KCknooN0jCiqScL0i5M,1627 +numpy/_core/tests/examples/limited_api/setup.py,sha256=Y6tgsOF58qe7eG2QmRQHG2wacZWfpbJLT8u-5OamjqA,437 +numpy/_core/tests/test__exceptions.py,sha256=luMT6vPIdf6LuwFNGyT-xLMZaKZEYYOFzFpMaesojoE,2922 +numpy/_core/tests/test_abc.py,sha256=9y2SsJdkPeV0oW6dsROPZOcQ72_mXie1uU2yPN93wzo,2221 +numpy/_core/tests/test_api.py,sha256=WDiG1oUtTChL0e7sCmoCvo0NnFLl7koAQCeJ5d24VQA,24209 +numpy/_core/tests/test_argparse.py,sha256=pfFfRr0grfOt-6Y7D8q9yPmz8Fcx4UbUxLpe96Tk9Xg,2870 +numpy/_core/tests/test_array_api_info.py,sha256=PZ2EzS9pq4nLZRAvvUSOb2Ke5p7pb4u4P4HKLRZjstw,3063 +numpy/_core/tests/test_array_coercion.py,sha256=PJ3s7psngDM084R2x7luAHVkHoa31TDiH1FiZpUWSfs,34897 +numpy/_core/tests/test_array_interface.py,sha256=l39VuV4nCdIeV1RUvMtjjPohAgIvJP-V3GQ5MaPrVK8,7843 +numpy/_core/tests/test_arraymethod.py,sha256=my4I9YjpVGLwN1GMbuoEhBZEJN0PuH6R2wtvGHcfoWI,3223 +numpy/_core/tests/test_arrayobject.py,sha256=aVv2eGjunCMEDFgmFujxMpk4xb-zo1MQrFcwQLfblx0,2596 +numpy/_core/tests/test_arrayprint.py,sha256=6UmL93wltbIDKdhF_WcdPRH5mztX0wyzuBy6PYW3R_o,50738 +numpy/_core/tests/test_casting_floatingpoint_errors.py,sha256=cER1YCNEwq67uAPX0QhkJonb5oA4Ws1_t0Z2AWJjYJg,5076 +numpy/_core/tests/test_casting_unittests.py,sha256=HH849h4ox1dejLB4aFX2B9tSGf0WhVvPZBPJT4yTOAA,34336 +numpy/_core/tests/test_conversion_utils.py,sha256=HAIdSRUit1lhSQEn-UVPTwyNxKjP9bSr8NGeHXnp6ew,6362 +numpy/_core/tests/test_cpu_dispatcher.py,sha256=26vob-nCPkjtxf9lRlQvwoTR92lqquyDGPgE5DIoii8,1570 +numpy/_core/tests/test_cpu_features.py,sha256=lS9iIWWznKZgR8-G4ABZqznMTJGC343-FBaCG9ZHXmQ,15703 +numpy/_core/tests/test_custom_dtypes.py,sha256=LZCbBeoyCcluhz_drg5neyiAsoTaK-6DjB4l3LaNnTw,11766 +numpy/_core/tests/test_cython.py,sha256=hLdTcd5wbzMXOx_OyQEzNyFWm-rIcWto7LpCl1SNdIU,10186 +numpy/_core/tests/test_datetime.py,sha256=gbArTFwyvmbQSkvTwa7oCv6UXDuvYV3_AbFEvK4ImOo,122685 +numpy/_core/tests/test_defchararray.py,sha256=hmMd5Wv5PjTEIuBXq_DopSqJsnp-qJ8ub5BBGRKIUEw,30629 +numpy/_core/tests/test_deprecations.py,sha256=CayfNUVMMj4BYTIFdYR4xvL2Sy2CTLN7VTABe0HIlxg,17101 +numpy/_core/tests/test_dlpack.py,sha256=Lfi3Xd2umxJ4W8fJht5epHlYWwTKx7MB47i7dcOIpq8,5830 +numpy/_core/tests/test_dtype.py,sha256=e1ZLn0xj8FrlxK3FeHOOsoQ-xV17-FMM7mh7VpuuVhs,78797 +numpy/_core/tests/test_einsum.py,sha256=Sixz-ZogKZmnFz3t49voD6AsCxmxUl_c_DHxT9rdscE,56277 +numpy/_core/tests/test_errstate.py,sha256=czhSWJJ8mdDpkh76pAxU2-d4ebMyopyk2D_CC-2lzI0,4627 +numpy/_core/tests/test_extint128.py,sha256=F6TAH3PlGON3CNz-B4hunClNUTQYQ2R8CkvaX2Zqeo4,5625 +numpy/_core/tests/test_function_base.py,sha256=x6rHdbqXtHj07Oml_5DslnG6y8jm0XfW4RdV0Q_lHHA,17651 +numpy/_core/tests/test_getlimits.py,sha256=CAHTLA8QIYVXTLWCGAISUZaAJ-xd_cBnSdYaOGuLWn8,6976 +numpy/_core/tests/test_half.py,sha256=QSKuHAfa8NWvl0A51-XcV0UOIvk-ooLy6pndq90hr6k,24425 +numpy/_core/tests/test_hashtable.py,sha256=m9-IRALLhU5liPuAk4v-ZQTVQ4s5XtLhL6xRXf5QTOE,1147 +numpy/_core/tests/test_indexerrors.py,sha256=mU2MJbdpbrcvxLZqZR293So4ZJxMH4apAjqXufRyOis,4726 +numpy/_core/tests/test_indexing.py,sha256=lU0jP4UvEe2_MUiAhy4_GD1zvpdIwUrHviu0MJhW_wQ,55421 +numpy/_core/tests/test_item_selection.py,sha256=AoPUe3llYwKjv3dO1PW1qSml4SWrAAL3fNqpwKAku6w,6631 +numpy/_core/tests/test_limited_api.py,sha256=75nz_t-jBdjKim6j-WW7WsD2rPnJ_KQ-zrRUiP3nVic,3463 +numpy/_core/tests/test_longdouble.py,sha256=FjuntHkYe158dwWr7eYe_mlqkj7sQ9lQXKZ93CKF0Pc,12391 +numpy/_core/tests/test_machar.py,sha256=Aw8icmrolAGmbIuXhUIYd4YvqIRR1I8GkcSx0J2c6yM,1067 +numpy/_core/tests/test_mem_overlap.py,sha256=IGpRF2GnkLQxEiIizsVT0eWUtlgCcJQ4w0-BEjSpT_8,29219 +numpy/_core/tests/test_mem_policy.py,sha256=pL6kBK8fgtRDTfMubFGGWnliTPWnS64uZ9l1H5qI8hk,16794 +numpy/_core/tests/test_memmap.py,sha256=LtghbNqt9AOmAalIyZF3lepthcKircyNfb2-5_Tkj1c,8186 +numpy/_core/tests/test_multiarray.py,sha256=91PCdEbUgXx_IdLdVlVAyC78aBV_kizA6k5I11F73MI,400997 +numpy/_core/tests/test_multithreading.py,sha256=VkvO2311ch8a_EeF7RTmhAQWvtHXuTZhqLVZZH1ovKI,8601 +numpy/_core/tests/test_nditer.py,sha256=7y1wdYzpGdwEbHRc5xppx8FZ45cKxNrm3JKzUPvkhrE,136568 +numpy/_core/tests/test_nep50_promotions.py,sha256=i6KpABBWFB5PWCdEv8kIjNQd7ryAPINS5m_Tnu7sDj4,10068 +numpy/_core/tests/test_numeric.py,sha256=aM2TfTaSVE2fz0Z3nN72XoxSDvZzAdatwWpLYWGBBws,159748 +numpy/_core/tests/test_numerictypes.py,sha256=PIUObIk8qTZKHwqwbc3ib6nTD4-8iCA4VTxqUg9Jg1s,24144 +numpy/_core/tests/test_overrides.py,sha256=0sDSmDWIr88GuCj0gOxdE3l0X_T5Hb5Wj2zfJDkOtvU,27518 +numpy/_core/tests/test_print.py,sha256=_cuM-DIpljOkzErb2ggIgs9HvOYrtpRppaECF6xAo0c,6787 +numpy/_core/tests/test_protocols.py,sha256=pbfumoRNnPhDP6PAPNIgLHUPPlmCdamCo4akkO8afjo,1173 +numpy/_core/tests/test_records.py,sha256=PAMHzIPp2WWDm4JHFQ-cjPBWf4BDuQumIYo7UX-zElk,20547 +numpy/_core/tests/test_regression.py,sha256=fJJnesLRUyPziCbYVM9LfLSS3qAMUz1-mzddhV9Br-U,95565 +numpy/_core/tests/test_scalar_ctors.py,sha256=I3akKp6WdwsTGic8pYQC_c6AxPXPEXStywWOF0n_ivU,6724 +numpy/_core/tests/test_scalar_methods.py,sha256=tx1RoZ03QsWblqg3Dv_JkaBFUOOILKZIqaEsFEs4tfE,9117 +numpy/_core/tests/test_scalarbuffer.py,sha256=2mZblaScwhN8mdlQvUULAKt273B2ia-mjtNmL_2UxfQ,5638 +numpy/_core/tests/test_scalarinherit.py,sha256=OIvSjrltdNSSP2c5HvDQ6pza3aKfmfgtixu1Zbahpcg,2587 +numpy/_core/tests/test_scalarmath.py,sha256=gBHBZ5SQMru1A57FUEaIMk19GFdVLTRXiO9vVh4XVVc,46583 +numpy/_core/tests/test_scalarprint.py,sha256=NS-FQDWICDcuDF5gxTQuG1Td1-EiOXIXufI-dwvKwxU,19705 +numpy/_core/tests/test_shape_base.py,sha256=mRSruY7S84ula25ZoOvbcRg_ea_3C3338e1tmdmv1Uk,31536 +numpy/_core/tests/test_simd.py,sha256=u8xSZ6HNLJ9-siYNIuyd0RA7FbD1BLEmnV5TGUrt1FU,48823 +numpy/_core/tests/test_simd_module.py,sha256=JjXH4Yq-0K-R8FHqVDinNaqY_grb1fQFFyVTHGQ0pBg,3904 +numpy/_core/tests/test_stringdtype.py,sha256=LImhDevH5NtTQNcdx23T2NwWZfHxmeBuMl-sjQXfctA,57052 +numpy/_core/tests/test_strings.py,sha256=EZq1GJjc94vbJKyGxAsdfECWeq-2CJGS8MjJTv3x08Y,59420 +numpy/_core/tests/test_ufunc.py,sha256=yO1DbSTyonZWsz8HoXV0E4YN5Xlg-aIHi6xn2gTi928,136356 +numpy/_core/tests/test_umath.py,sha256=piPN7xvcHI-0rcv0Go7YOBjDWRbmG02s896P7a9W4m8,194156 +numpy/_core/tests/test_umath_accuracy.py,sha256=QCFAeiPN6rEO8fwDwJun4J1pCKm0bPsQK6-1pTYCMIY,5478 +numpy/_core/tests/test_umath_complex.py,sha256=LZMd-divBHQQ7dS34obwvmStXa8aNez45VIVTwPg_jM,23627 +numpy/_core/tests/test_unicode.py,sha256=qrQ7UC0yndXFYI7MiJu8y_I5jCK2lxOQcehE289MElk,12967 +numpy/_core/umath.py,sha256=t_SQIHR7dkMF-VRp8dKyroOEd90oqNlzmgGwaH28qW8,2130 +numpy/_core/umath.pyi,sha256=FIqmlQwQIueIrs-_QehV3guNEnJE2LxVs3NPCj38Vdo,2643 +numpy/_distributor_init.py,sha256=FBSJdgVHlQca5BrQEVYPoFm6KSTJhIFnWtWbEkEhTSo,421 +numpy/_distributor_init.pyi,sha256=6IvMzAmr0-Z6oqTkZcgXgrkJrQXVMjBih2AZvLdDgOE,27 +numpy/_expired_attrs_2_0.py,sha256=zP31EXmbwygcOEzyetDEp-RxL9cUfbUUht956zaOSf8,3826 +numpy/_expired_attrs_2_0.pyi,sha256=n2ipDUFTFS4puCD56dlNWGkVkw_b0M6cEyugo4Qh3HM,1253 +numpy/_globals.py,sha256=k5ZVnzUbKNSLPmZ0URYwJN5C_7xIzfMNaaSsBSrPTuI,3091 +numpy/_globals.pyi,sha256=IrHHIXmibXzgK0VUlECQLw4IEkveXSHo_ZWnTkfnLe4,280 +numpy/_pyinstaller/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/_pyinstaller/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/_pyinstaller/__pycache__/__init__.cpython-312.pyc,, +numpy/_pyinstaller/__pycache__/hook-numpy.cpython-312.pyc,, +numpy/_pyinstaller/hook-numpy.py,sha256=MU22pQ4AkUYPQWu5C8pRDpnYXElLJ8R0FGNYJUQpiVE,1362 +numpy/_pyinstaller/hook-numpy.pyi,sha256=tAvtMPovoi-sur0D1NAo3_evSmYKLTh0bgRSC7QrCIk,349 +numpy/_pyinstaller/tests/__init__.py,sha256=pdPbCTRwpCJamlyvIi9HZTlqAvK5HPbGu3oMA0cu2Rs,329 +numpy/_pyinstaller/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/_pyinstaller/tests/__pycache__/pyinstaller-smoke.cpython-312.pyc,, +numpy/_pyinstaller/tests/__pycache__/test_pyinstaller.cpython-312.pyc,, +numpy/_pyinstaller/tests/pyinstaller-smoke.py,sha256=6iL-eHMQaG3rxnS5EgcvrCqElm9aKL07Cjr1FZJSXls,1143 +numpy/_pyinstaller/tests/test_pyinstaller.py,sha256=8K-7QxmfoXCG0NwR0bhIgCNrDjGlrTzWnrR1sR8btgU,1135 +numpy/_pytesttester.py,sha256=DjlYL8uINN2XWa3nnlX6gPGuoLjcx1Bie_PQzbp2cpA,6328 +numpy/_pytesttester.pyi,sha256=VXCuwPYTb9-PF6nxXwibwBbre0hW9jIB4nkzmtm2kls,497 +numpy/_typing/__init__.py,sha256=MG5Wv9dc3ZyOmDfidH5cFtykeyNM77ArC4R3UW7Tn-Y,7188 +numpy/_typing/__pycache__/__init__.cpython-312.pyc,, +numpy/_typing/__pycache__/_add_docstring.cpython-312.pyc,, +numpy/_typing/__pycache__/_array_like.cpython-312.pyc,, +numpy/_typing/__pycache__/_char_codes.cpython-312.pyc,, +numpy/_typing/__pycache__/_dtype_like.cpython-312.pyc,, +numpy/_typing/__pycache__/_extended_precision.cpython-312.pyc,, +numpy/_typing/__pycache__/_nbit.cpython-312.pyc,, +numpy/_typing/__pycache__/_nbit_base.cpython-312.pyc,, +numpy/_typing/__pycache__/_nested_sequence.cpython-312.pyc,, +numpy/_typing/__pycache__/_scalars.cpython-312.pyc,, +numpy/_typing/__pycache__/_shape.cpython-312.pyc,, +numpy/_typing/__pycache__/_ufunc.cpython-312.pyc,, +numpy/_typing/_add_docstring.py,sha256=_3g7D-6HAQ3MT4X6DE07yLua9LqWFhskNVx1TS7X9O4,3999 +numpy/_typing/_array_like.py,sha256=EPZUfJSjamvsWJ6Rs5ZwwA_5FhBpYdoifcVVtVcWPn0,4188 +numpy/_typing/_char_codes.py,sha256=j07npk82Nb7Ira2z7ZTlU3UcOPwt2gM7qZKrPLdjT48,8764 +numpy/_typing/_dtype_like.py,sha256=8M5RekLqdheEjWMIn4RnbkEzsS7jCatCiT0D5hg-53c,3762 +numpy/_typing/_extended_precision.py,sha256=pknUqgak0FBNM-sERPqW-pFGH71_K-iehFSee5oQiqE,434 +numpy/_typing/_nbit.py,sha256=KSbKwOKttob-5ytT5vCVkHrDMn0YHvyptTTyj_6AYcw,632 +numpy/_typing/_nbit_base.py,sha256=nPZpsQltuR5B0iaAYF9qD2he_kXnmssv_RhaUNFsW-s,3058 +numpy/_typing/_nbit_base.pyi,sha256=kHAqTmpYUWbQyTUVRs4NKKcDwiEJgUzWvvT1FQgQ89I,740 +numpy/_typing/_nested_sequence.py,sha256=so1agYGHd5gDo_IBvvHqBB5lsqGbHqN_imyC5UHU-HI,2505 +numpy/_typing/_scalars.py,sha256=LhXY2BTHmeYKzeIZfpjvuMn-5eOLjU2n9z7z1l5bKf8,944 +numpy/_typing/_shape.py,sha256=6cFv-LbSyG9mlfSBOGGyul9Q_GUrlcHQC9JZa-m20cA,275 +numpy/_typing/_ufunc.py,sha256=HOkaE-6wV0fd3rmHZGC39YAHIIf8tyvlzekD4y4GQxA,156 +numpy/_typing/_ufunc.pyi,sha256=1Ni26dsi2fbH2oNvXDNNXaBPQQzdhkwA7VQ8eyuJS_c,26575 +numpy/_utils/__init__.py,sha256=hVnZ7C0MCSNbMw-Zyq-MKCYStaGX6RzqFMnnh7ed4dE,3477 +numpy/_utils/__init__.pyi,sha256=VxEygNvp90alV8zYsUSuDYNdF7BEucXUx3w55Ef7YXI,726 +numpy/_utils/__pycache__/__init__.cpython-312.pyc,, +numpy/_utils/__pycache__/_convertions.cpython-312.pyc,, +numpy/_utils/__pycache__/_inspect.cpython-312.pyc,, +numpy/_utils/__pycache__/_pep440.cpython-312.pyc,, +numpy/_utils/_convertions.py,sha256=0xMxdeLOziDmHsRM_8luEh4S-kQdMoMg6GxNDDas69k,329 +numpy/_utils/_convertions.pyi,sha256=4l-0UmPCyVA70UJ8WAd2A45HrKFSzgC0sFDBSnKcYiQ,118 +numpy/_utils/_inspect.py,sha256=zFuJABH08D1Kgq_eecYkD1Ogg0OXp1t4oqjZxM0kdLk,7436 +numpy/_utils/_inspect.pyi,sha256=wFajmQpCTXpMbJBbdiiyJMb29HkaMW0jEWLMqbQcQ5k,2255 +numpy/_utils/_pep440.py,sha256=it9P4_oHXWw3BxdoVz7JPMuj5kxF5M7_BJ8Z1m9nu0w,13988 +numpy/_utils/_pep440.pyi,sha256=xzYJoZ6DnjvgaKr8OsBwim77fAJ0xeQJI9XAt75gvfI,3870 +numpy/char/__init__.py,sha256=xs6pprMdmNeXVfuTRkU3nF9qdhutWdPu5oaep2AjWmc,93 +numpy/char/__init__.pyi,sha256=siwqDh7X7u4e0HGx3xg8eDaJVqy0_nac5y8UMzz-BcM,1540 +numpy/char/__pycache__/__init__.cpython-312.pyc,, +numpy/conftest.py,sha256=pXdv-CKocoIEpr0DsYstu7TgqvNdzSvfiDNMlMwmqYk,8577 +numpy/core/__init__.py,sha256=wJNaRF1UFOnZKqiBrsshWLjTGiEZ9rvWlcit0xj7Y0w,1290 +numpy/core/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/core/__pycache__/__init__.cpython-312.pyc,, +numpy/core/__pycache__/_dtype.cpython-312.pyc,, +numpy/core/__pycache__/_dtype_ctypes.cpython-312.pyc,, +numpy/core/__pycache__/_internal.cpython-312.pyc,, +numpy/core/__pycache__/_multiarray_umath.cpython-312.pyc,, +numpy/core/__pycache__/_utils.cpython-312.pyc,, +numpy/core/__pycache__/arrayprint.cpython-312.pyc,, +numpy/core/__pycache__/defchararray.cpython-312.pyc,, +numpy/core/__pycache__/einsumfunc.cpython-312.pyc,, +numpy/core/__pycache__/fromnumeric.cpython-312.pyc,, +numpy/core/__pycache__/function_base.cpython-312.pyc,, +numpy/core/__pycache__/getlimits.cpython-312.pyc,, +numpy/core/__pycache__/multiarray.cpython-312.pyc,, +numpy/core/__pycache__/numeric.cpython-312.pyc,, +numpy/core/__pycache__/numerictypes.cpython-312.pyc,, +numpy/core/__pycache__/overrides.cpython-312.pyc,, +numpy/core/__pycache__/records.cpython-312.pyc,, +numpy/core/__pycache__/shape_base.cpython-312.pyc,, +numpy/core/__pycache__/umath.cpython-312.pyc,, +numpy/core/_dtype.py,sha256=GHBhfVtsVrP7v13IujEz9aGIENkYIdbfuRu-New1UnU,323 +numpy/core/_dtype.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/core/_dtype_ctypes.py,sha256=wX4m37b0zQgxlzT5OjE_uj2E5CpiX9E7HLFpO6h_lDY,351 +numpy/core/_dtype_ctypes.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/core/_internal.py,sha256=qxpHJELXNUcYJkJt1LktQuZm4BwYu4bXnMuBEOp6POU,949 +numpy/core/_multiarray_umath.py,sha256=T88HZgFD5VCuXRCSeLbPoj99nKUSdgyw8xWyf6eqhxQ,2098 +numpy/core/_utils.py,sha256=5fk18JN43Rg6YHvan6QjdrOeOuLtRlLVmP6MadBEJVA,923 +numpy/core/arrayprint.py,sha256=Lbe4smWXYFzd9sO9LLJ5PZS4C3bSvLt6HRtwSE56xN8,339 +numpy/core/defchararray.py,sha256=a9luvvni8gRrGVdKO7U_xwsFFvkzlxnVgxL75jLRmCI,347 +numpy/core/einsumfunc.py,sha256=CNucINgUIrpiLQn4xPI_mogwjfKlFA3h7gwAvRVwb5M,339 +numpy/core/fromnumeric.py,sha256=5TaonJVuC110qv3f3cqTtmjayTX0BmqJAgoAJn5H3ZI,343 +numpy/core/function_base.py,sha256=vhjhzsEzDd11RHg6pilfMJO3X6k94an5RAJqj-nlzms,351 +numpy/core/getlimits.py,sha256=6nCk4Tw0LjW7joWsprI5LiMzje1gsOjO2lSQ_OwBB8I,335 +numpy/core/multiarray.py,sha256=bjdPLbvJuj61M6TZkbB5NXOCNmH4QbUq6g3ePkKP6TA,793 +numpy/core/numeric.py,sha256=Ctk_QikyB2mM0xI0lBeB8YTUfTwQSXfVdpIMRtunbMo,360 +numpy/core/numerictypes.py,sha256=bXwTwzUahzbHrFGhS5RkJOvb6TYEsQnQC5ww9mN-1Vw,347 +numpy/core/overrides.py,sha256=1FZyb0U6JJuyojtxFvQ7HSJ2rpfhWec0F-X0mapCjc8,335 +numpy/core/overrides.pyi,sha256=-3xfjHfa4UaCuhTVwwRN4EOM5uz9vZR0gMeTVvEdbYI,525 +numpy/core/records.py,sha256=9yfFDxyOc68lXqfbaosgRNlw1dbWP8CRHzIPEtEtSgc,327 +numpy/core/shape_base.py,sha256=2srdQtF1d8LpUbDjGMXT-Tqz2K2NaTO-ZEC4viCYswY,339 +numpy/core/umath.py,sha256=hMVmNrICdqXRiiRG7UMV0Gr-9xYqJGmkONGQn20iK98,319 +numpy/ctypeslib/__init__.py,sha256=WFwMhpV2LJP-IQOspaInhV8c6XPKZwqppE-cvtIpqvU,193 +numpy/ctypeslib/__init__.pyi,sha256=R0tHAk1P0jw-HLYjjKBqXEjDyXhByrtbjrgOxht9tE4,619 +numpy/ctypeslib/__pycache__/__init__.cpython-312.pyc,, +numpy/ctypeslib/__pycache__/_ctypeslib.cpython-312.pyc,, +numpy/ctypeslib/_ctypeslib.py,sha256=NtEUpisQhDfETBLAkqYf7Ajq0xiNhZurb5SmGGH54pA,19079 +numpy/ctypeslib/_ctypeslib.pyi,sha256=xS-NLEO6xwjUr-AUWfGxz3N7X5jwIGBVl6RhOUUYZ74,8084 +numpy/doc/__pycache__/ufuncs.cpython-312.pyc,, +numpy/doc/ufuncs.py,sha256=9xt8H34GhrXrFq9cWFUGvJFePa9YuH9Tq1DzAnm2E2E,5414 +numpy/dtypes.py,sha256=zuPwgC0ijF2oDRAOJ6I9JKhaJuhXFAygByLQaoVtT54,1312 +numpy/dtypes.pyi,sha256=sNN4kzUfhArHuKaMRKofBNZ57trl35UaZ51oDWrMmJ4,15544 +numpy/exceptions.py,sha256=x1z7C2RjrDFW8tLewbZjyMiQok0WBm5kKuRPIxVLUjg,7800 +numpy/exceptions.pyi,sha256=KBZlZTYUZmMhal-TMR9n7GwKfIdi3P4yy8y9uW12VDk,791 +numpy/f2py/__init__.py,sha256=cAgUHWgJQZZsfv8co8KBNr_m8B6fpzdBaUNvJeBf_No,2448 +numpy/f2py/__init__.pyi,sha256=UbgqGZKYnDHGHX9MlwBB3aBZ2T470ojrNREIhkwt6gc,132 +numpy/f2py/__main__.py,sha256=6i2jVH2fPriV1aocTY_dUFvWK18qa-zjpnISA-OpF3w,130 +numpy/f2py/__pycache__/__init__.cpython-312.pyc,, +numpy/f2py/__pycache__/__main__.cpython-312.pyc,, +numpy/f2py/__pycache__/__version__.cpython-312.pyc,, +numpy/f2py/__pycache__/_isocbind.cpython-312.pyc,, +numpy/f2py/__pycache__/_src_pyf.cpython-312.pyc,, +numpy/f2py/__pycache__/auxfuncs.cpython-312.pyc,, +numpy/f2py/__pycache__/capi_maps.cpython-312.pyc,, +numpy/f2py/__pycache__/cb_rules.cpython-312.pyc,, +numpy/f2py/__pycache__/cfuncs.cpython-312.pyc,, +numpy/f2py/__pycache__/common_rules.cpython-312.pyc,, +numpy/f2py/__pycache__/crackfortran.cpython-312.pyc,, +numpy/f2py/__pycache__/diagnose.cpython-312.pyc,, +numpy/f2py/__pycache__/f2py2e.cpython-312.pyc,, +numpy/f2py/__pycache__/f90mod_rules.cpython-312.pyc,, +numpy/f2py/__pycache__/func2subr.cpython-312.pyc,, +numpy/f2py/__pycache__/rules.cpython-312.pyc,, +numpy/f2py/__pycache__/symbolic.cpython-312.pyc,, +numpy/f2py/__pycache__/use_rules.cpython-312.pyc,, +numpy/f2py/__version__.py,sha256=99S6mSevuhwGmO9ku--7VUJekhN0ot4-J0cZKiHcqpw,48 +numpy/f2py/__version__.pyi,sha256=L4V6f6B-wuPi82B0MzeQsgN0NuHUQs9rKYl1jy3tG7s,45 +numpy/f2py/_backends/__init__.py,sha256=7_bA7c_xDpLc4_8vPfH32-Lxn9fcUTgjQ25srdvwvAM,299 +numpy/f2py/_backends/__init__.pyi,sha256=i4XhDRwbrl0ta6QGJPxhYGfSgugNGdtoWf1_27eSd60,136 +numpy/f2py/_backends/__pycache__/__init__.cpython-312.pyc,, +numpy/f2py/_backends/__pycache__/_backend.cpython-312.pyc,, +numpy/f2py/_backends/__pycache__/_distutils.cpython-312.pyc,, +numpy/f2py/_backends/__pycache__/_meson.cpython-312.pyc,, +numpy/f2py/_backends/_backend.py,sha256=oFXZ8-VwcQSbltl8_pgWLPqCOZ8Y_px7oeTk_BlxJTc,1151 +numpy/f2py/_backends/_backend.pyi,sha256=sU4YiHvGfMkzDFbhZqqQPT-kwJZsWpGemkLxDion7ss,1342 +numpy/f2py/_backends/_distutils.py,sha256=hET0WB4qy-D4BznekGAWhk945k5weq2lGUDR6hriXMo,2385 +numpy/f2py/_backends/_distutils.pyi,sha256=-L8K1KQShPGGd1vgr4DlnYf6AshHFaRzAcgGqKv205g,463 +numpy/f2py/_backends/_meson.py,sha256=VouUQkWRUk74WhDtkf6HR79QoK-Wrx8E7qO7gVpyDnk,8107 +numpy/f2py/_backends/_meson.pyi,sha256=wvYtBdippKeiSeLzaYKehql0_3ThS8T8Aqat03hhjQ4,1869 +numpy/f2py/_backends/meson.build.template,sha256=hQeTapAY0xtni5Li-QaEtWx9DH9WDKah2lcEuSZfLLo,1599 +numpy/f2py/_isocbind.py,sha256=zaBgpfPNRmxVG3doUIlbZIiyB990MsXiwDabrSj9HnQ,2360 +numpy/f2py/_isocbind.pyi,sha256=KuzqHJQk0YSQnRnb8xqnyh8T0DGNnDD6bNI880tadCY,339 +numpy/f2py/_src_pyf.py,sha256=PHpo9D28Kq3q_3-KFX8D3sFD9eX8A1c3LuLNzXzByOw,7695 +numpy/f2py/_src_pyf.pyi,sha256=9NKnovhbLibbQkjCrRnyiTPDw3MBqycOHl1--BNrIqw,1012 +numpy/f2py/auxfuncs.py,sha256=dnaUwrdAv4-LbEiHNbS1vrjQNCO0lBuyWkj3Rt_UizE,26920 +numpy/f2py/auxfuncs.pyi,sha256=7RUoWWaHrqSYEmdNd5zCNnmbjUYE5pCe0FCxMXejbhg,8011 +numpy/f2py/capi_maps.py,sha256=7C-NndI2UbStNGXbhgbWOmr9tLAxfQvw1zf7Z7w5SFk,30079 +numpy/f2py/capi_maps.pyi,sha256=pR0pVZhUxaCpctq7FOWFSAGI_gaLdE-NWAyT96cWWZg,1066 +numpy/f2py/cb_rules.py,sha256=6KbPu9yfJ-7pAa24Ij9H34Ll15Qc8CXTqCFiUJI6R8Y,25051 +numpy/f2py/cb_rules.pyi,sha256=X_it8-Q0188EDlXd-QxhRdc3OUoA2t6V_jgM5TiQC88,495 +numpy/f2py/cfuncs.py,sha256=4J4P12oGpyWZHb1AVKAl7YJ3QUgngwGMCnB1IhrJn7U,52660 +numpy/f2py/cfuncs.pyi,sha256=EiAtSQxw4x-UlxsGKIEOJnld1d7dNYrk0bt_rlqLSp0,802 +numpy/f2py/common_rules.py,sha256=_9yzIolJMGgpd3D94LdBsODnfUskMRgt2v03rECIHJQ,5030 +numpy/f2py/common_rules.pyi,sha256=1uzTkcwiin6dVBbWUiOVB1ZppjKBHoRHG_Byvw-1UbI,323 +numpy/f2py/crackfortran.py,sha256=vbAvWj6XszLS-nU0nOedaNNtwtqvkkM8gqZAP9MvPBI,146879 +numpy/f2py/crackfortran.pyi,sha256=AvV_KPeE9jLG9EdmPdb2u7-gPJXc1H2yWVmmihHzCgM,10276 +numpy/f2py/diagnose.py,sha256=YWNj1vM68e47Lb270wlZk5yrcU-yTlzGaYNPBZ7nTAU,5075 +numpy/f2py/diagnose.pyi,sha256=ZFVCWTwf_xzL736p9FcfCYWftOXcNqSMCmq-K27KNN8,23 +numpy/f2py/f2py2e.py,sha256=krSW4RpZPDHNX2IWLdn28KWzj0lzFNSc_6fScbGQMfI,28763 +numpy/f2py/f2py2e.pyi,sha256=Qt6ZeOYBugJLFpAY3F9K_4hcm0sZt_3APTtdKLKObWA,2153 +numpy/f2py/f90mod_rules.py,sha256=7Z5vorU4whX405xML66hr4i1icCUc9gr6an4R-AMh7M,9810 +numpy/f2py/f90mod_rules.pyi,sha256=r6w0DuH2Jdt8wPdDYAnXZAQmytIYUqPOxVz-QaWwt74,451 +numpy/f2py/func2subr.py,sha256=9igCMMDttIgF1MG6kBOagkjI_SF-UlGjACAj3Ncv0-o,10049 +numpy/f2py/func2subr.pyi,sha256=-MDbOrhanuizf3rlcwBQooCF4GnoGprA8ypeFV_m8d0,386 +numpy/f2py/rules.py,sha256=Irj-13oLGowNHYElFV-TZUs0VEd0NQpRsnomnI1NTx8,63091 +numpy/f2py/rules.pyi,sha256=9GfFmNA8Unlg3pxcGwqwFl7yeKyIcTmx7wiPuiBAT-k,1326 +numpy/f2py/setup.cfg,sha256=Fpn4sjqTl5OT5sp8haqKIRnUcTPZNM6MIvUJBU7BIhg,48 +numpy/f2py/src/fortranobject.c,sha256=kLiHOty8fUruzfOmL5MQeVNFJSGHBjn7W6QbPYgQb30,46356 +numpy/f2py/src/fortranobject.h,sha256=7cfRN_tToAQ1Na13VQ2Kzb2ujMHUAgGsbScnfLVOHqs,5823 +numpy/f2py/symbolic.py,sha256=UuFs411WYSqR7JfbsuyNv__IC9wKqxQAWoWRDeKPcdw,53214 +numpy/f2py/symbolic.pyi,sha256=piZrats8SXrOD1qEADo-mbsc5NZOIaZ27Fl3d3cydTc,6083 +numpy/f2py/tests/__init__.py,sha256=pdPbCTRwpCJamlyvIi9HZTlqAvK5HPbGu3oMA0cu2Rs,329 +numpy/f2py/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_abstract_interface.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_array_from_pyobj.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_assumed_shape.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_block_docstring.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_callback.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_character.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_common.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_crackfortran.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_data.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_docs.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_f2cmap.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_f2py2e.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_isoc.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_kind.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_mixed.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_modules.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_parameter.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_pyf_src.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_quoted_character.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_regression.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_return_character.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_return_complex.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_return_integer.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_return_logical.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_return_real.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_routines.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_semicolon_split.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_size.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_string.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_symbolic.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/test_value_attrspec.cpython-312.pyc,, +numpy/f2py/tests/__pycache__/util.cpython-312.pyc,, +numpy/f2py/tests/src/abstract_interface/foo.f90,sha256=JFU2w98cB_XNwfrqNtI0yDTmpEdxYO_UEl2pgI_rnt8,658 +numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90,sha256=gvQJIzNtvacWE0dhysxn30-iUeI65Hpq7DiE9oRauz8,105 +numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c,sha256=s6XLwujiCr6Xi8yBkvLPBXRmo2WsGVohU7K9ALnKUng,7478 +numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap,sha256=But9r9m4iL7EGq_haMW8IiQ4VivH0TgUozxX4pPvdpE,29 +numpy/f2py/tests/src/assumed_shape/foo_free.f90,sha256=oBwbGSlbr9MkFyhVO2aldjc01dr9GHrMrSiRQek8U64,460 +numpy/f2py/tests/src/assumed_shape/foo_mod.f90,sha256=rfzw3QdI-eaDSl-hslCgGpd5tHftJOVhXvb21Y9Gf6M,499 +numpy/f2py/tests/src/assumed_shape/foo_use.f90,sha256=rmT9k4jP9Ru1PLcGqepw9Jc6P9XNXM0axY7o4hi9lUw,269 +numpy/f2py/tests/src/assumed_shape/precision.f90,sha256=r08JeTVmTTExA-hYZ6HzaxVwBn1GMbPAuuwBhBDtJUk,130 +numpy/f2py/tests/src/block_docstring/foo.f,sha256=y7lPCPu7_Fhs_Tf2hfdpDQo1bhtvNSKRaZAOpM_l3dg,97 +numpy/f2py/tests/src/callback/foo.f,sha256=C1hjfpRCQWiOVVzIHqnsYcnLrqQcixrnHCn8hd9GhVk,1254 +numpy/f2py/tests/src/callback/gh17797.f90,sha256=_Nrl0a2HgUbtymGU0twaJ--7rMa1Uco2A3swbWvHoMo,148 +numpy/f2py/tests/src/callback/gh18335.f90,sha256=NraOyKIXyvv_Y-3xGnmTjtNjW2Znsnlk8AViI8zfovc,506 +numpy/f2py/tests/src/callback/gh25211.f,sha256=a2sxlQhtDVbYn8KOKHUYqwc-aCFt7sDPSnJsXFG35uI,179 +numpy/f2py/tests/src/callback/gh25211.pyf,sha256=FWxo0JWQlw519BpZV8PoYeI_FZ_K6C-3Wk6gLrfBPlw,447 +numpy/f2py/tests/src/callback/gh26681.f90,sha256=-cD69x7omk5wvVsfMHlXiZ-pTcaxs2Bl5G9GHA4UJ2M,566 +numpy/f2py/tests/src/cli/gh_22819.pyf,sha256=5rvOfCv-wSosB354LC9pExJmMoSHnbGZGl_rtA2fogA,142 +numpy/f2py/tests/src/cli/hi77.f,sha256=ttyI6vAP3qLnDqy82V04XmoqrXNM6uhMvvLri2p0dq0,71 +numpy/f2py/tests/src/cli/hiworld.f90,sha256=QWOLPrTxYQu1yrEtyQMbM0fE9M2RmXe7c185KnD5x3o,51 +numpy/f2py/tests/src/common/block.f,sha256=GQ0Pd-VMX3H3a-__f2SuosSdwNXHpBqoGnQDjf8aG9g,224 +numpy/f2py/tests/src/common/gh19161.f90,sha256=BUejyhqpNVfHZHQ-QC7o7ZSo7lQ6YHyX08lSmQqs6YM,193 +numpy/f2py/tests/src/crackfortran/accesstype.f90,sha256=-5Din7YlY1TU7tUHD2p-_DSTxGBpDsWYNeT9WOwGhno,208 +numpy/f2py/tests/src/crackfortran/common_with_division.f,sha256=2LfRa26JEB07_ti-WDmIveq991PxRlL_K6ss28rZDkk,494 +numpy/f2py/tests/src/crackfortran/data_common.f,sha256=ZSUAh3uhn9CCF-cYqK5TNmosBGPfsuHBIEfudgysun4,193 +numpy/f2py/tests/src/crackfortran/data_multiplier.f,sha256=jYrJKZWF_59JF9EMOSALUjn0UupWvp1teuGpcL5s1Sc,197 +numpy/f2py/tests/src/crackfortran/data_stmts.f90,sha256=19YO7OGj0IksyBlmMLZGRBQLjoE3erfkR4tFvhznvvE,693 +numpy/f2py/tests/src/crackfortran/data_with_comments.f,sha256=hoyXw330VHh8duMVmAQZjr1lgLVF4zFCIuEaUIrupv0,175 +numpy/f2py/tests/src/crackfortran/foo_deps.f90,sha256=CaH7mnWTG7FcnJe2vXN_0zDbMadw6NCqK-JJ2HmDjK8,128 +numpy/f2py/tests/src/crackfortran/gh15035.f,sha256=jJly1AzF5L9VxbVQ0vr-sf4LaUo4eQzJguhuemFxnvg,375 +numpy/f2py/tests/src/crackfortran/gh17859.f,sha256=7K5dtOXGuBDAENPNCt-tAGJqTfNKz5OsqVSk16_e7Es,340 +numpy/f2py/tests/src/crackfortran/gh22648.pyf,sha256=qZHPRNQljIeYNwbqPLxREnOrSdVV14f3fnaHqB1M7c0,241 +numpy/f2py/tests/src/crackfortran/gh23533.f,sha256=w3tr_KcY3s7oSWGDmjfMHv5h0RYVGUpyXquNdNFOJQg,126 +numpy/f2py/tests/src/crackfortran/gh23598.f90,sha256=41W6Ire-5wjJTTg6oAo7O1WZfd1Ug9vvNtNgHS5MhEU,101 +numpy/f2py/tests/src/crackfortran/gh23598Warn.f90,sha256=1v-hMCT_K7prhhamoM20nMU9zILam84Hr-imck_dYYk,205 +numpy/f2py/tests/src/crackfortran/gh23879.f90,sha256=LWDJTYR3t9h1IsrKC8dVXZlBfWX7clLeU006X6Ow8oI,332 +numpy/f2py/tests/src/crackfortran/gh27697.f90,sha256=bbnKpDsOuCWluoNodxzCspUQnu169zKTsn4fLTkhwpM,364 +numpy/f2py/tests/src/crackfortran/gh2848.f90,sha256=gPNasx98SIf7Z9ibk_DHiGKCvl7ERtsfoGXiFDT7FbM,282 +numpy/f2py/tests/src/crackfortran/operators.f90,sha256=-Fc-qjW1wBr3Dkvdd5dMTrt0hnjnV-1AYo-NFWcwFSo,1184 +numpy/f2py/tests/src/crackfortran/privatemod.f90,sha256=7bubZGMIn7iD31wDkjF1TlXCUM7naCIK69M9d0e3y-U,174 +numpy/f2py/tests/src/crackfortran/publicmod.f90,sha256=Pnwyf56Qd6W3FUH-ZMgnXEYkb7gn18ptNTdwmGan0Jo,167 +numpy/f2py/tests/src/crackfortran/pubprivmod.f90,sha256=eYpJwBYLKGOxVbKgEqfny1znib-b7uYhxcRXIf7uwXg,165 +numpy/f2py/tests/src/crackfortran/unicode_comment.f90,sha256=aINLh6GlfTwFewxvDoqnMqwuCNb4XAqi5Nj5vXguXYs,98 +numpy/f2py/tests/src/f2cmap/.f2py_f2cmap,sha256=iUOtfHd3OuT1Rz2-yiSgt4uPKGvCt5AzQ1iygJt_yjg,82 +numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90,sha256=iJCD8a8MUTmuPuedbcmxW54Nr4alYuLhksBe1sHS4K0,298 +numpy/f2py/tests/src/isocintrin/isoCtests.f90,sha256=jcw-fzrFh0w5U66uJYfeUW4gv94L5MnWQ_NpsV9y0oI,998 +numpy/f2py/tests/src/kind/foo.f90,sha256=zIHpw1KdkWbTzbXb73hPbCg4N2Htj3XL8DIwM7seXpo,347 +numpy/f2py/tests/src/mixed/foo.f,sha256=90zmbSHloY1XQYcPb8B5d9bv9mCZx8Z8AMTtgDwJDz8,85 +numpy/f2py/tests/src/mixed/foo_fixed.f90,sha256=pxKuPzxF3Kn5khyFq9ayCsQiolxB3SaNtcWaK5j6Rv4,179 +numpy/f2py/tests/src/mixed/foo_free.f90,sha256=fIQ71wrBc00JUAVUj_r3QF9SdeNniBiMw6Ly7CGgPWU,139 +numpy/f2py/tests/src/modules/gh25337/data.f90,sha256=9Uz8CHB9i3_mjC3cTOmkTgPAF5tWSwYacG3MUrU-SY0,180 +numpy/f2py/tests/src/modules/gh25337/use_data.f90,sha256=WATiDGAoCKnGgMzm_iMgmfVU0UKOQlk5Fm0iXCmPAkE,179 +numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90,sha256=c7VU4SbK3yWn-6wksP3tDx_Hxh5u_g8UnlDpjU_-tBg,402 +numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90,sha256=eEU7RgFPh-TnNXEuJFdtJmTF-wPnpbHLQhG4fEeJnag,403 +numpy/f2py/tests/src/modules/module_data_docstring.f90,sha256=tDZ3fUlazLL8ThJm3VwNGJ75QIlLcW70NnMFv-JA4W0,224 +numpy/f2py/tests/src/modules/use_modules.f90,sha256=UsFfx0B2gu_tS-H-BpLWed_yoMDl1kbydMIOz8fvXWA,398 +numpy/f2py/tests/src/negative_bounds/issue_20853.f90,sha256=fdOPhRi7ipygwYCXcda7p_dlrws5Hd2GlpF9EZ-qnck,157 +numpy/f2py/tests/src/parameter/constant_array.f90,sha256=KRg7Gmq_r3B7t3IEgRkP1FT8ve8AuUFWT0WcTlXoN5U,1468 +numpy/f2py/tests/src/parameter/constant_both.f90,sha256=-bBf2eqHb-uFxgo6Q7iAtVUUQzrGFqzhHDNaxwSICfQ,1939 +numpy/f2py/tests/src/parameter/constant_compound.f90,sha256=re7pfzcuaquiOia53UT7qNNrTYu2euGKOF4IhoLmT6g,469 +numpy/f2py/tests/src/parameter/constant_integer.f90,sha256=nEmMLitKoSAG7gBBEQLWumogN-KS3DBZOAZJWcSDnFw,612 +numpy/f2py/tests/src/parameter/constant_non_compound.f90,sha256=IcxESVLKJUZ1k9uYKoSb8Hfm9-O_4rVnlkiUU2diy8Q,609 +numpy/f2py/tests/src/parameter/constant_real.f90,sha256=quNbDsM1Ts2rN4WtPO67S9Xi_8l2cXabWRO00CPQSSQ,610 +numpy/f2py/tests/src/quoted_character/foo.f,sha256=WjC9D9171fe2f7rkUAZUvik9bkIf9adByfRGzh6V0cM,482 +numpy/f2py/tests/src/regression/AB.inc,sha256=cSNxitwrjTKMiJzhY2AI5FaXJ5y9zDgA27x79jyoI6s,16 +numpy/f2py/tests/src/regression/assignOnlyModule.f90,sha256=c9RvUP1pQ201O_zOXgV0xp_aJF_8llxuA8Uot9z5tr0,608 +numpy/f2py/tests/src/regression/datonly.f90,sha256=9cVvl8zlAuGiqbSHMFzFn6aNWXj2v7sHJdd9A1Oc0qg,392 +numpy/f2py/tests/src/regression/f77comments.f,sha256=bqTsmO8WuSLVFsViIV7Nj7wQbJoZ7IAA3d2tpRDKsnA,626 +numpy/f2py/tests/src/regression/f77fixedform.f95,sha256=hcLZbdozMJ3V9pByVRp3RoeUvZgLMRLFctpZvxK2hTI,139 +numpy/f2py/tests/src/regression/f90continuation.f90,sha256=_W1fj0wXLqT91Q14qpBnM3F7rJKaiSR8upe0mR6_OIE,276 +numpy/f2py/tests/src/regression/incfile.f90,sha256=i7Y1zgMXR9bSxnjeYWSDGeCfsS5jiyn7BLb-wbwjz2U,92 +numpy/f2py/tests/src/regression/inout.f90,sha256=CpHpgMrf0bqA1W3Ozo3vInDz0RP904S7LkpdAH6ODck,277 +numpy/f2py/tests/src/regression/lower_f2py_fortran.f90,sha256=CMQL5RWf9LKnnUDiS-IYa9xc9DGanCYraNq0vGmunOE,100 +numpy/f2py/tests/src/regression/mod_derived_types.f90,sha256=565plqPwWDgnkpSb4-cfZbf3wTM85F2Gocklx5wpGWA,567 +numpy/f2py/tests/src/return_character/foo77.f,sha256=WzDNF3d_hUDSSZjtxd3DtE-bSx1ilOMEviGyYHbcFgM,980 +numpy/f2py/tests/src/return_character/foo90.f90,sha256=ULcETDEt7gXHRzmsMhPsGG4o3lGrcx-FEFaJsPGFKyA,1248 +numpy/f2py/tests/src/return_complex/foo77.f,sha256=8ECRJkfX82oFvGWKbIrCvKjf5QQQClx4sSEvsbkB6A8,973 +numpy/f2py/tests/src/return_complex/foo90.f90,sha256=c1BnrtWwL2dkrTr7wvlEqNDg59SeNMo3gyJuGdRwcDw,1238 +numpy/f2py/tests/src/return_integer/foo77.f,sha256=_8k1evlzBwvgZ047ofpdcbwKdF8Bm3eQ7VYl2Y8b5kA,1178 +numpy/f2py/tests/src/return_integer/foo90.f90,sha256=bzxbYtofivGRYH35Ang9ScnbNsVERN8-6ub5-eI-LGQ,1531 +numpy/f2py/tests/src/return_logical/foo77.f,sha256=FxiF_X0HkyXHzJM2rLyTubZJu4JB-ObLnVqfZwAQFl8,1188 +numpy/f2py/tests/src/return_logical/foo90.f90,sha256=9KmCe7yJYpi4ftkKOM3BCDnPOdBPTbUNrKxY3p37O14,1531 +numpy/f2py/tests/src/return_real/foo77.f,sha256=ZTrzb6oDrIDPlrVWP3Bmtkbz3ffHaaSQoXkfTGtCuFE,933 +numpy/f2py/tests/src/return_real/foo90.f90,sha256=gZuH5lj2lG6gqHlH766KQ3J4-Ero-G4WpOOo2MG3ohU,1194 +numpy/f2py/tests/src/routines/funcfortranname.f,sha256=oGPnHo0zL7kjFnuHw41mWUSXauoeRVPXnYXBb2qljio,123 +numpy/f2py/tests/src/routines/funcfortranname.pyf,sha256=coD8AdLyPK4_cGvQJgE2WJW_jH8EAulZCsMeb-Q1gOk,440 +numpy/f2py/tests/src/routines/subrout.f,sha256=RTexoH7RApv_mhu-RcVwyNiU-DXMTUP8LJAMSn2wQjk,90 +numpy/f2py/tests/src/routines/subrout.pyf,sha256=c9qv4XtIh4wA9avdkDJuXNwojK-VBPldrNhxlh446Ic,322 +numpy/f2py/tests/src/size/foo.f90,sha256=IlFAQazwBRr3zyT7v36-tV0-fXtB1d7WFp6S1JVMstg,815 +numpy/f2py/tests/src/string/char.f90,sha256=ihr_BH9lY7eXcQpHHDQhFoKcbu7VMOX5QP2Tlr7xlaM,618 +numpy/f2py/tests/src/string/fixed_string.f90,sha256=5n6IkuASFKgYICXY9foCVoqndfAY0AQZFEK8L8ARBGM,695 +numpy/f2py/tests/src/string/gh24008.f,sha256=UA8Pr-_yplfOFmc6m4v9ryFQ8W9OulaglulefkFWD68,217 +numpy/f2py/tests/src/string/gh24662.f90,sha256=-Tp9Kd1avvM7AIr8ZukFA9RVr-wusziAnE8AvG9QQI4,197 +numpy/f2py/tests/src/string/gh25286.f90,sha256=2EpxvC-0_dA58MBfGQcLyHzpZgKcMf_W9c73C_Mqnok,304 +numpy/f2py/tests/src/string/gh25286.pyf,sha256=GjgWKh1fHNdPGRiX5ek60i1XSeZsfFalydWqjISPVV8,381 +numpy/f2py/tests/src/string/gh25286_bc.pyf,sha256=6Y9zU66NfcGhTXlFOdFjCSMSwKXpq5ZfAe3FwpkAsm4,384 +numpy/f2py/tests/src/string/scalar_string.f90,sha256=ACxV2i6iPDk-a6L_Bs4jryVKYJMEGUTitEIYTjbJes4,176 +numpy/f2py/tests/src/string/string.f,sha256=shr3fLVZaa6SyUJFYIF1OZuhff8v5lCwsVNBU2B-3pk,248 +numpy/f2py/tests/src/value_attrspec/gh21665.f90,sha256=JC0FfVXsnB2lZHb-nGbySnxv_9VHAyD0mKaLDowczFU,190 +numpy/f2py/tests/test_abstract_interface.py,sha256=PXNQB0DZdmdZyysJkB8f9GY0_hA3hGkmha8aQBXc1Sk,811 +numpy/f2py/tests/test_array_from_pyobj.py,sha256=N1RJ0yFcLs6cFmdxSjizjfLRTEhdKRhrO9Vx8bcG0GU,23696 +numpy/f2py/tests/test_assumed_shape.py,sha256=8kPoQWn6IfMWNMba0al7a5XopKb3JnvZP3V3P6O2F8o,1467 +numpy/f2py/tests/test_block_docstring.py,sha256=P3K0QqnY0UfUQPc3vDrlP_WlZ6gNJ7iokG-D-ZG9tXQ,584 +numpy/f2py/tests/test_callback.py,sha256=P_5qM1xWOYfjeDgd70cIVpV1h0_tA1AP3kxRZDAeqII,7099 +numpy/f2py/tests/test_character.py,sha256=R6FhfIi85E6L1qwlJtsnTCvNgFRriE3kSXefTwIVgLk,21931 +numpy/f2py/tests/test_common.py,sha256=gr4MF659JBWvSY4eQAqgHnOrVbEpq0ZhGM5Cdbye1L4,644 +numpy/f2py/tests/test_crackfortran.py,sha256=x_E4KmEfBX5SFsNkO_-mUi4W_WuzB-ZFsLOfUdHjLVE,16413 +numpy/f2py/tests/test_data.py,sha256=tete-xcIZHZi5VFjy_pyTjr5AjhQzoyJvLsT9QLYU1M,2895 +numpy/f2py/tests/test_docs.py,sha256=wGsRmCJugExEAvj25pANoLr45S6fkpG4kf47dnfg9Ew,1855 +numpy/f2py/tests/test_f2cmap.py,sha256=zM8lksGAoH-cRvEVRkzciZ4oqH28obd-vvMVUObVjt0,387 +numpy/f2py/tests/test_f2py2e.py,sha256=aGZnZH5USd8FJpG5F1L6bWfUzuUqP954lit5-TDPbeE,27834 +numpy/f2py/tests/test_isoc.py,sha256=g5PLyJuAYwF0obaZ55j_e-CNOODJcADsYFSfxcCl5LM,1434 +numpy/f2py/tests/test_kind.py,sha256=ovQVxbtbbnb-Keo8Dh2LpDyPLbIA1uxiZOzMLo5KMX0,1825 +numpy/f2py/tests/test_mixed.py,sha256=DZcTCCle0o4aopFmGi58KtxzP6NFFci4N-pL3_HLb90,862 +numpy/f2py/tests/test_modules.py,sha256=GaOwxLf8KLdNkWIl9fveT9xg_wvCFdDsel9QiFweCAE,2301 +numpy/f2py/tests/test_parameter.py,sha256=P8hDezlxKN_Cm06oWGkS0pwlJvQz5QYwBsyTEA_Y1PQ,4634 +numpy/f2py/tests/test_pyf_src.py,sha256=xV81hRiGeytFFKeVnp-6O2OrGVdzJyecMEalCQSoDoI,1134 +numpy/f2py/tests/test_quoted_character.py,sha256=x19FhD6ZA7JkDuLuiXi48sGd8b42SPRuwwEY8CVRb24,477 +numpy/f2py/tests/test_regression.py,sha256=APQz3e38jz-AbGEBN5n-P1Wuegx4Da1ze7D7nLLpUL8,6197 +numpy/f2py/tests/test_return_character.py,sha256=t8cxO8LatnBXf2EU-HkfmdxvdHMYDk9DLx3kNUTArC4,1534 +numpy/f2py/tests/test_return_complex.py,sha256=_uWrnSh-IDL8men8X__5srP4wM0RkICr4WVJgoNgrzY,2440 +numpy/f2py/tests/test_return_integer.py,sha256=ng_cpFX4nStcpSFoYdD9GiUdCJSXPU0On2MLOA4uOpQ,1813 +numpy/f2py/tests/test_return_logical.py,sha256=OrS11uAw_asDamL7inRKf-S-7SBG0GTS8Vrqlexrkm0,2048 +numpy/f2py/tests/test_return_real.py,sha256=ynInWwkcRfUe981kGJnrkkZeKK7QFlvkiODoIJj6Jg0,3273 +numpy/f2py/tests/test_routines.py,sha256=f9pR8FNJgKuBWtzCjlfniWVHJecpW6gSNkGDb2t693c,795 +numpy/f2py/tests/test_semicolon_split.py,sha256=akc4xJiHI6xOCfpCEtFYPMz8qy2K5jODEPyJHYQvLdE,1627 +numpy/f2py/tests/test_size.py,sha256=SjES727lNcCJFePDnh7uBhncOXWOcqHqVPbZPvBO5js,1155 +numpy/f2py/tests/test_string.py,sha256=47wYPuO1NkjhXSbbyS8vBKsNCju5dA9uMjNhGPx-BGg,2938 +numpy/f2py/tests/test_symbolic.py,sha256=dmuYLhhcv-rT-ux_aVrWaJj_Yxmznezl6Enu8-ediK0,18342 +numpy/f2py/tests/test_value_attrspec.py,sha256=4wY9qPXl0JoPGCG7GyyuMDKLfsHAV8KRWGdEk9-ZZT8,330 +numpy/f2py/tests/util.py,sha256=KIDsCW5uZXe6jSdWpY9Ozlqs5-v-eeDsW3P5TDWKDzo,12112 +numpy/f2py/use_rules.py,sha256=emZhSLPbNDyBHnsfKKXDnGz4P_gwrgL0dfCZcD3n9D4,3376 +numpy/f2py/use_rules.pyi,sha256=gIAAemWfcidclVYZUpa6RRmSdUEDw4FDnGPaCNo93Zw,424 +numpy/fft/__init__.py,sha256=OWE0m6H_blyv1wtqQpiXU5kqxF6O2UxxcV5t11U05RE,8291 +numpy/fft/__init__.pyi,sha256=6XgAsd9coqJ3jBOPD3vn1-8AcbMLhjxzQd21xjeqmlA,514 +numpy/fft/__pycache__/__init__.cpython-312.pyc,, +numpy/fft/__pycache__/_helper.cpython-312.pyc,, +numpy/fft/__pycache__/_pocketfft.cpython-312.pyc,, +numpy/fft/__pycache__/helper.cpython-312.pyc,, +numpy/fft/_helper.py,sha256=hIn2ZyEYG4fLB3MGvCPvpSrLXFfh-xO4zGKljk_TQjY,6787 +numpy/fft/_helper.pyi,sha256=1A1kitc5k62ER6X1XLF7PIQL5FiVxxRKu_iCqiQ1kIU,1394 +numpy/fft/_pocketfft.py,sha256=CfpApR9R0SOucql9gp9vXadm_y5cBM-Xnj5trDpvFSE,62598 +numpy/fft/_pocketfft.pyi,sha256=_RIRwdhtixjN4qszZk-xeYn2jmcW_NNAMEJHeETigv0,3174 +numpy/fft/_pocketfft_umath.cpython-312-x86_64-linux-gnu.so,sha256=6EnciecXsqMVlI3QEneGCYNyx6IgbP0TD6gYtz5pFk0,539072 +numpy/fft/helper.py,sha256=RoEADsOnoCgSTL1gE5n-36llz8iwxGzn52af3L-9KEY,611 +numpy/fft/helper.pyi,sha256=KsF45bVyZ4_eJbBFpkER9L8MCWmg7dJuhLqY_7uFNZs,891 +numpy/fft/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/fft/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/fft/tests/__pycache__/test_helper.cpython-312.pyc,, +numpy/fft/tests/__pycache__/test_pocketfft.cpython-312.pyc,, +numpy/fft/tests/test_helper.py,sha256=LeVDCCdHzFhmCQ5ByMtVyA22GphgTQS5dupuxrLE8X0,6154 +numpy/fft/tests/test_pocketfft.py,sha256=PCF833rSWsXOMWN8wCluhq0aYHU24_tHbuMl1PuO6dE,24446 +numpy/lib/__init__.py,sha256=zYGuqEfPqq7LDbidpxYs8GgCNAmoJ4xQgFvF3XKJ5Rg,3004 +numpy/lib/__init__.pyi,sha256=Z7OsQAZGURd4cI3xnEF37unbOUqtknwEkT8yQTF-AF8,1651 +numpy/lib/__pycache__/__init__.cpython-312.pyc,, +numpy/lib/__pycache__/_array_utils_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_arraypad_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_arraysetops_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_arrayterator_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_datasource.cpython-312.pyc,, +numpy/lib/__pycache__/_format_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_function_base_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_histograms_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_index_tricks_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_iotools.cpython-312.pyc,, +numpy/lib/__pycache__/_nanfunctions_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_npyio_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_polynomial_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_scimath_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_shape_base_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_stride_tricks_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_twodim_base_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_type_check_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_ufunclike_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_user_array_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_utils_impl.cpython-312.pyc,, +numpy/lib/__pycache__/_version.cpython-312.pyc,, +numpy/lib/__pycache__/array_utils.cpython-312.pyc,, +numpy/lib/__pycache__/format.cpython-312.pyc,, +numpy/lib/__pycache__/introspect.cpython-312.pyc,, +numpy/lib/__pycache__/mixins.cpython-312.pyc,, +numpy/lib/__pycache__/npyio.cpython-312.pyc,, +numpy/lib/__pycache__/recfunctions.cpython-312.pyc,, +numpy/lib/__pycache__/scimath.cpython-312.pyc,, +numpy/lib/__pycache__/stride_tricks.cpython-312.pyc,, +numpy/lib/__pycache__/user_array.cpython-312.pyc,, +numpy/lib/_array_utils_impl.py,sha256=GYWiyNqLQ7DGUSBXz0bbR6AAqZStDIwUe7tsbZ__15M,1697 +numpy/lib/_array_utils_impl.pyi,sha256=AktSeZcFe_XUQ6utYHQyJKG8l8bhM8tQL2Kttj1DjcQ,820 +numpy/lib/_arraypad_impl.py,sha256=z5--XT80TcnDZezHVrdxauJSY3yC4vMDdd7JlO-h3zw,32296 +numpy/lib/_arraypad_impl.pyi,sha256=W98XPsguuf8B924KVVxs6l_EOBM9JKzwTmHL98CKbs0,1837 +numpy/lib/_arraysetops_impl.py,sha256=VFdgpFZJcyJhYFPcTk_LQD_SrqX6poy_shsLKvZigy0,41275 +numpy/lib/_arraysetops_impl.pyi,sha256=Yh-w9l43w6vMBLfwzIKQlxHcE6gFqOsfu5gyKpMgc_s,13403 +numpy/lib/_arrayterator_impl.py,sha256=HtOADIHuG9ADbbMTgmh4P_muke1V-8E-FNEO3bVOGPA,7218 +numpy/lib/_arrayterator_impl.pyi,sha256=8u0nb5NPpWNib-FlWaXlp6BXBPgTv5__NF30FD_1qmM,1876 +numpy/lib/_datasource.py,sha256=zk-Vbn4JlDHEVa3De6A3NgjnnizSJi-HF0ZvvA6YIo4,22731 +numpy/lib/_datasource.pyi,sha256=135RvD3p-3mHdNp_sZV4aN9brwEFvEM49VE1eHlFEfs,996 +numpy/lib/_format_impl.py,sha256=zcQ3xXxPf7epktsYrcdBbIPuOCh9OPV1g3gB6ghf4rE,36865 +numpy/lib/_format_impl.pyi,sha256=_0lEht2hKbTevv0eGChmYMBTAg-2jAfvrfU9p326VHs,869 +numpy/lib/_function_base_impl.py,sha256=AZGyN29Ecw4LRuU1TNUcPC7cVHO9ye4bJ9FQI7n_Gwc,196425 +numpy/lib/_function_base_impl.pyi,sha256=gCLGH3_G1KEZW8g3MVeyDW0MymTWQa8RfF7ARbA51DQ,29569 +numpy/lib/_histograms_impl.py,sha256=Utu7aAQc7ZpsHn_04ogUnZq1ZcdHfipcq9eRq817oVU,38432 +numpy/lib/_histograms_impl.pyi,sha256=N3aGYnQ5y2R7yumhvasZ931tnwaD76eF_RB22ebwkrU,1111 +numpy/lib/_index_tricks_impl.py,sha256=g7Np4E8AG9sgyi9HTUgvOM08pIlAj_cvXw4cc7NrU5I,32186 +numpy/lib/_index_tricks_impl.pyi,sha256=EKFYX2k61rhpDPiK-lM7eSIby8PzKEb9-JL8ngKhI80,6466 +numpy/lib/_iotools.py,sha256=0jtpvpl5L-_1ODI21F-1i19t1e3L-6wJxRd1CSLewL0,30876 +numpy/lib/_iotools.pyi,sha256=69hfBI89W2UP6ozHiSByt-GxTupni-gBRPihFbXSh6Q,3393 +numpy/lib/_nanfunctions_impl.py,sha256=cdOT7dYwjvUpI9iEHTrwzbbtKhP9ZZgOCMirTBeYPUk,71949 +numpy/lib/_nanfunctions_impl.pyi,sha256=j5dyJz_c-SQDxXrL9N2ouKC-DsP_EVDZyLedGXqCpMI,833 +numpy/lib/_npyio_impl.py,sha256=kucazwCufh4mNwECyZxEerxsqa_GxQMz1kYuZURDI8s,99277 +numpy/lib/_npyio_impl.pyi,sha256=WWlGxbobwLgEiD-k58g_Q9K1HW1vDk--AYrBSjjqALE,9388 +numpy/lib/_polynomial_impl.py,sha256=TWiqlG3WDa97tayxQCEltZD9TNhUyFprzL_Umd7Lxso,44134 +numpy/lib/_polynomial_impl.pyi,sha256=1awyY61O9YK-U3P2aVaIi_lslAZIgKzMrdbYzu2y-J8,7009 +numpy/lib/_scimath_impl.py,sha256=QAU4uM_INzVqCTs-ATEyy1JhREl_wDJn_ygU75YtfgE,15692 +numpy/lib/_scimath_impl.pyi,sha256=pXBZjHPB_FbeBfe9M3N8TjrET_oclGuafWjTHC-xjUs,2774 +numpy/lib/_shape_base_impl.py,sha256=5vkU9rPOwKvSc7TzxdfWtM08uV0m15iHPTxbqcY47Oc,39479 +numpy/lib/_shape_base_impl.pyi,sha256=36gmgbFd1cUmSUfUihFtb1brc2gKLYi8NXDAEzLyBmQ,5412 +numpy/lib/_stride_tricks_impl.py,sha256=y3Uxp3jFzDwmIQ137N2zap7-vW_jONUQmXnbfqrs60A,18025 +numpy/lib/_stride_tricks_impl.pyi,sha256=6rR7IO04w1FPCKUM920r9Kf_A_hpZbIABo6Rcl34tFI,1815 +numpy/lib/_twodim_base_impl.py,sha256=3nOLvCD6cfM6MD3o381F48GB8poqsUGDCDOQlOBQXmY,33925 +numpy/lib/_twodim_base_impl.pyi,sha256=nBRqOTSD21ioBkUw6vtzy1-ZyczJcvybkvG3-hvSIkY,11193 +numpy/lib/_type_check_impl.py,sha256=WeVfWz_0Klvb2K_6l0x4nHwHBwPYgfcxeZinV_dp_mw,19221 +numpy/lib/_type_check_impl.pyi,sha256=xpZV5LStVGHbEDAcJUbD7iZFE0onwCPZZuwb01P4o_Q,9713 +numpy/lib/_ufunclike_impl.py,sha256=0eemf_EYlLmSa4inNr3iuJ1eoTMqLyIR0n6dQymga3Y,6309 +numpy/lib/_ufunclike_impl.pyi,sha256=SJ7wbjWFI6WL_rp3CNqbZoKoza4Ou4uDwXvpt4iekys,1288 +numpy/lib/_user_array_impl.py,sha256=t3nnrFuvbBizFV1K3C9NNyIM80LU5spA88MlrYJzEok,7697 +numpy/lib/_user_array_impl.pyi,sha256=AZpI9fHHYpLxyYL9ud5YDHcZhxLl-YpfB23i9f154BQ,9110 +numpy/lib/_utils_impl.py,sha256=ltsW9MMp3k1vw7InidhYT5fo9grgwz624fvReiHDyWc,23499 +numpy/lib/_utils_impl.pyi,sha256=ckxdUjdGEaa3JAKVQZHYgZ1R3glZZg-ssh90vkV7dJg,371 +numpy/lib/_version.py,sha256=4dUrc9Js0KPEQ5adoYKR5dnP4ffjCDtJUKPqcMauwY4,4851 +numpy/lib/_version.pyi,sha256=vysY5Vl_nh4si6GkMXEoB6pUDl-jJ5g0LpSDa40F124,641 +numpy/lib/array_utils.py,sha256=XbcyhJ9S0IlNnP9Ny6yygLMEACWWUPNOU8vevj1TEpI,144 +numpy/lib/array_utils.pyi,sha256=LfY_fzfTdtjoLIi5FSCDsC5weYrmAHFh7fxFfniupbg,296 +numpy/lib/format.py,sha256=npJ0eJhT7uKNK5a0lCMGfiJv-R4jyNhiIPeZbJcNXBs,477 +numpy/lib/format.pyi,sha256=fh-5SN4MORvjLliV8LwOb3VqG8tFvOaMeG4Vn5CBusA,1482 +numpy/lib/introspect.py,sha256=u-wgfMuYt8GI3AnRNdXs4j4w9eNTsazlqrazS-P7gKA,2749 +numpy/lib/introspect.pyi,sha256=AWVX6b9mzdwsxizOY0LydWKBEpGatHaeeXGc2txYJEM,152 +numpy/lib/mixins.py,sha256=Kff76ScpgWV3cruicI9A7a4zfBnGVmXtwQzMzu5xDEo,7200 +numpy/lib/mixins.pyi,sha256=f4MwOviD4rssgeombJ9xUH7LgwMlAq8JTGQuf84vMFI,3151 +numpy/lib/npyio.py,sha256=eaPvfHGSzUE70TJHHLOCPIX9G5ihMuBEexy6_PNhJ9Q,68 +numpy/lib/npyio.pyi,sha256=qX68dlgy7M2MtAgNSabTV8rWOTXOXCE1_72XcdJq10Y,192 +numpy/lib/recfunctions.py,sha256=T4aa5xXav9ntfw5YmzPiq_YUkh12wGk40XyBLQPCEzU,59539 +numpy/lib/recfunctions.pyi,sha256=NTf4FyM2Kinx56nNHcyGjKUz_RBSJQr-qtZsLKeIYvQ,13216 +numpy/lib/scimath.py,sha256=qjFaQeq0zEIl7gKqOhaj_vmCC_KaFdyTmHdLUUkSp5I,169 +numpy/lib/scimath.pyi,sha256=Fe7sfleFSY0uCGUj5gATxjEoMnva1nJ53YyP1wP11Nk,512 +numpy/lib/stride_tricks.py,sha256=x0_BfwlycBAlR3BvpxTndeP96dHBT_fASbkTTTzBYgI,88 +numpy/lib/stride_tricks.pyi,sha256=FLo0b8NlLPsS58VzjFFchivpBOjjE_meU0EhWEFPQNY,170 +numpy/lib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/lib/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test__datasource.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test__iotools.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test__version.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_array_utils.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_arraypad.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_arraysetops.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_arrayterator.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_format.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_function_base.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_histograms.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_index_tricks.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_io.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_loadtxt.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_mixins.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_nanfunctions.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_packbits.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_polynomial.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_recfunctions.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_regression.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_shape_base.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_stride_tricks.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_twodim_base.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_type_check.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_ufunclike.cpython-312.pyc,, +numpy/lib/tests/__pycache__/test_utils.cpython-312.pyc,, +numpy/lib/tests/data/py2-np0-objarr.npy,sha256=ZLoI7K3iQpXDkuoDF1Ymyc6Jbw4JngbQKC9grauVRsk,258 +numpy/lib/tests/data/py2-objarr.npy,sha256=F4cyUC-_TB9QSFLAo2c7c44rC6NUYIgrfGx9PqWPSKk,258 +numpy/lib/tests/data/py2-objarr.npz,sha256=xo13HBT0FbFZ2qvZz0LWGDb3SuQASSaXh7rKfVcJjx4,366 +numpy/lib/tests/data/py3-objarr.npy,sha256=7mtikKlHXp4unZhM8eBot8Cknlx1BofJdd73Np2PW8o,325 +numpy/lib/tests/data/py3-objarr.npz,sha256=vVRl9_NZ7_q-hjduUr8YWnzRy8ESNlmvMPlaSSC69fk,453 +numpy/lib/tests/data/python3.npy,sha256=X0ad3hAaLGXig9LtSHAo-BgOvLlFfPYMnZuVIxRmj-0,96 +numpy/lib/tests/data/win64python2.npy,sha256=agOcgHVYFJrV-nrRJDbGnUnF4ZTPYXuSeF-Mtg7GMpc,96 +numpy/lib/tests/test__datasource.py,sha256=0vL8l30yb53Wwnt0YbdqvOl2xQf9fc0S-0pRTMAdaYc,10581 +numpy/lib/tests/test__iotools.py,sha256=LTODsFclDQnIbKQb98hEysgVhQ6cs230aj45pA1QYFc,13765 +numpy/lib/tests/test__version.py,sha256=SwXoEqMap603c2jd7ONod0ZOVQeX6T-zArMf03OCHbw,1999 +numpy/lib/tests/test_array_utils.py,sha256=hPXtCjoBKe6MP91sg_04EBpRYg7MITVlCAgD1AScjx8,1118 +numpy/lib/tests/test_arraypad.py,sha256=GzqMIQ0Y8XLYmP5osXzl5W1Pcywy_OK-39STKoCWJc4,56155 +numpy/lib/tests/test_arraysetops.py,sha256=GKotFUbKgEfHybghYP1zIM0RWMqW1pa4cdYlML1seXQ,40445 +numpy/lib/tests/test_arrayterator.py,sha256=1LZmgQQJpndfwh3X2mL4JpaWvKQl9a0WAnQdSpXimhM,1301 +numpy/lib/tests/test_format.py,sha256=BTKd2lUodd8gNznWkh_Hl3mG8Mu8SOFADEqGd5kCw64,41956 +numpy/lib/tests/test_function_base.py,sha256=z2SkeGd9qQjXmaxk6bhoi06qlfxdrDzJEqRsDxIuEoM,171119 +numpy/lib/tests/test_histograms.py,sha256=QkcA46lJ1Y-T3f4-Qn7kn6J9bIid3RLK7NKMrUI3Rpw,33966 +numpy/lib/tests/test_index_tricks.py,sha256=4HVNEIcCbXz45bwo9E7DUzChikgG3_3QtOKtPXGYxmc,20695 +numpy/lib/tests/test_io.py,sha256=8StHTe3-XsyPNBy4IveftRY1Zba2JTW3ALOHg_bEfRw,110989 +numpy/lib/tests/test_loadtxt.py,sha256=1R_xoumDPtPGQYoWh_WWCFKeb3-9WfLIoMHCYQQ0CtQ,40557 +numpy/lib/tests/test_mixins.py,sha256=9r6tgP4Wb6vCDn590PkHmHl-GBAoAL6_-mwp2wbiaO0,7009 +numpy/lib/tests/test_nanfunctions.py,sha256=1GGtPUD8bS5v2FxLr8e0BUgx9k6Iu-8WLZisawPY4Yw,54098 +numpy/lib/tests/test_packbits.py,sha256=REkoSXh9FVVTizyyHWkLqXFLIjt0rynXeixhK8-gBgk,17543 +numpy/lib/tests/test_polynomial.py,sha256=3Z7x5gf2cSb5pN5e0Sb_hZetF3mI5GrTLv-OaN7v0m0,12312 +numpy/lib/tests/test_recfunctions.py,sha256=xYsC_t_tpIpWJvS1pRU2HNxZTO1cJ3QZ1OnXt4ajm0s,43928 +numpy/lib/tests/test_regression.py,sha256=UURtmtwfrxMDF3UY1ZMNbgIJOa38jUzYKCmpYYD8e3Q,7716 +numpy/lib/tests/test_shape_base.py,sha256=ZWHeWCs9x0sD-L03h6kTmUdRHvxHVC-8KOu8KomhyKQ,27406 +numpy/lib/tests/test_stride_tricks.py,sha256=tBErppWSp8jAckkx_zN5ZbAhfKxZJ99cOQxDI9B_xh0,23030 +numpy/lib/tests/test_twodim_base.py,sha256=-djv2iP3W2sB4rAgj9Orl8alGwDFfPvcVu6CNvlKIcg,18925 +numpy/lib/tests/test_type_check.py,sha256=2M6uyLSI-CP13CAylnBn3kbT6nrK6wYWW-Scw13vsAQ,14796 +numpy/lib/tests/test_ufunclike.py,sha256=5a65WfziLpjPJ_yE8zg-A-q08xlyiU8_S1JH8kb-Uyw,3015 +numpy/lib/tests/test_utils.py,sha256=HRZxH8Rs-PxCpMAhgbNOrTfBrsA8B2eTOKypY0Udczw,2374 +numpy/lib/user_array.py,sha256=zs6u6TAXoAySGAZc1qE6fKD4AN-t6urZCaiZaKmHiso,63 +numpy/lib/user_array.pyi,sha256=8C-aTekEYA0bVU7F3turaw1w0j8FfFvDp9xKa9Pfe94,53 +numpy/linalg/__init__.py,sha256=7pVvFwOJFKOArGeUs6MNj3MNqqsx7xx0vt2_7avNAg4,2124 +numpy/linalg/__init__.pyi,sha256=C3fZHKPSa4wpfRqfTjw3DpzE5p-Czjus48OuMLsDckQ,1060 +numpy/linalg/__pycache__/__init__.cpython-312.pyc,, +numpy/linalg/__pycache__/_linalg.cpython-312.pyc,, +numpy/linalg/__pycache__/linalg.cpython-312.pyc,, +numpy/linalg/_linalg.py,sha256=6rC77pyHWNOHk03DKEnwHezrUCYdAuItQfA61v8lYsw,115106 +numpy/linalg/_linalg.pyi,sha256=IJGbQaUsud5McMw9SbYqIogTV-Di_X_Oh0EBWB0T94Y,11548 +numpy/linalg/_umath_linalg.cpython-312-x86_64-linux-gnu.so,sha256=ZL-4u4oaCcox96bjB6h7THinXzSJqvPGHtiqT_ramEU,231833 +numpy/linalg/_umath_linalg.pyi,sha256=awvRP1FGuomyfeaR0wzHvrXURAI8tUF3u2RRZ24hkXw,1409 +numpy/linalg/lapack_lite.cpython-312-x86_64-linux-gnu.so,sha256=k8oG4FmR0jzF-gLnbHV7l88gte8p7A72n7iKJDLd9dU,30001 +numpy/linalg/lapack_lite.pyi,sha256=QjaS8R4uu6MiJDcCFNE5EOAYGnFCcrNz873gs2OUXEM,2672 +numpy/linalg/linalg.py,sha256=6NimP68tYa0qBRglWH87_tOh2scshtDpcwfvBvmd6Po,585 +numpy/linalg/linalg.pyi,sha256=8E5sbKeM5Ors7r143mM7A4ui8kFZM0SF7NfUGW1eN-4,932 +numpy/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/linalg/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/linalg/tests/__pycache__/test_deprecations.cpython-312.pyc,, +numpy/linalg/tests/__pycache__/test_linalg.cpython-312.pyc,, +numpy/linalg/tests/__pycache__/test_regression.cpython-312.pyc,, +numpy/linalg/tests/test_deprecations.py,sha256=9p_SRmtxj2zc1doY9Ie3dyy5JzWy-tCQWFoajcAJUmM,640 +numpy/linalg/tests/test_linalg.py,sha256=VEvQHtAe0o3iVPogcCv9Frx-0HOyNH7WsQHcbkVfgaQ,84998 +numpy/linalg/tests/test_regression.py,sha256=9a96oyeEGQMUxfw_-GUjNWqn51iu4Cf7kllJ0bKp9ws,6704 +numpy/ma/API_CHANGES.txt,sha256=F_4jW8X5cYBbzpcwteymkonTmvzgKKY2kGrHF1AtnrI,3405 +numpy/ma/LICENSE,sha256=BfO4g1GYjs-tEKvpLAxQ5YdcZFLVAJoAhMwpFVH_zKY,1593 +numpy/ma/README.rst,sha256=krf2cvVK_zNQf1d3yVYwg0uDHzTiR4vHbr91zwaAyoI,9874 +numpy/ma/__init__.py,sha256=XpDWYXwauDc49-INsk455D03Uw4p6xFdsdWOn2rt87U,1406 +numpy/ma/__init__.pyi,sha256=QV7F1eN7GQLA2V2vI_bYXC_XhoZl-2IqXHWIqJtXLKU,6946 +numpy/ma/__pycache__/__init__.cpython-312.pyc,, +numpy/ma/__pycache__/core.cpython-312.pyc,, +numpy/ma/__pycache__/extras.cpython-312.pyc,, +numpy/ma/__pycache__/mrecords.cpython-312.pyc,, +numpy/ma/__pycache__/testutils.cpython-312.pyc,, +numpy/ma/core.py,sha256=fnIoiwf6XgH84lYSqn4LoVTgm0QyhaGJTQAKb6dSZq4,288733 +numpy/ma/core.pyi,sha256=RxL-vzdzpBB97UqNesAkHjvFxQUom1ARUvKurQgz58I,40459 +numpy/ma/extras.py,sha256=f8qf6t_x9k34OKmHiNIft9PFCyLYMeBSGhiYjhUuIpc,70680 +numpy/ma/extras.pyi,sha256=w6b84rYKp1tJO5aezma1OdF6E5lr6aNXpNAqMVHTI3M,3834 +numpy/ma/mrecords.py,sha256=00gzzy_xxC408pVZIRUSRhbwqc1UHcyhE-tO2FYM8IE,27073 +numpy/ma/mrecords.pyi,sha256=YW81zL9LDzi-L-2WI7135-HxBzj12n4YgARHh2qZ6Bs,1973 +numpy/ma/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/ma/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/ma/tests/__pycache__/test_arrayobject.cpython-312.pyc,, +numpy/ma/tests/__pycache__/test_core.cpython-312.pyc,, +numpy/ma/tests/__pycache__/test_deprecations.cpython-312.pyc,, +numpy/ma/tests/__pycache__/test_extras.cpython-312.pyc,, +numpy/ma/tests/__pycache__/test_mrecords.cpython-312.pyc,, +numpy/ma/tests/__pycache__/test_old_ma.cpython-312.pyc,, +numpy/ma/tests/__pycache__/test_regression.cpython-312.pyc,, +numpy/ma/tests/__pycache__/test_subclassing.cpython-312.pyc,, +numpy/ma/tests/test_arrayobject.py,sha256=MSvEcxlsVt4YZ7mVXU8q_hkwM0I7xsxWejEqnUQx6hE,1099 +numpy/ma/tests/test_core.py,sha256=novMpyqUqf9O7970aVB2HUqTBSiUqMQINMas3PbTgjM,219717 +numpy/ma/tests/test_deprecations.py,sha256=Hye4FMqAdPOOCVnihbs4R8ntLvYJy6WF3LA29876urI,2569 +numpy/ma/tests/test_extras.py,sha256=BnFaTx33kNdLDuLJ74Dt1f7gGsD_noYFmBGA8UelUqI,78435 +numpy/ma/tests/test_mrecords.py,sha256=ZDEv-LbPlx4Qf9NQs8unNXgrdXupRv4IQljf4_vCr34,19894 +numpy/ma/tests/test_old_ma.py,sha256=PMA26SyXJxN0o-pPvyEhl_YF2zRcxuPRMPAXztKCphA,33018 +numpy/ma/tests/test_regression.py,sha256=_eskYMrmSHe-_iODK6mvRD5gN_w6NpAl5agsyIGRRUo,3303 +numpy/ma/tests/test_subclassing.py,sha256=_TQZ4WM2VG-yuITIXeRZbAZrWDHpxtQoLzDKbGRmuHM,16936 +numpy/ma/testutils.py,sha256=vNG1ay689zOktrm-33tyz0bsCLxkJHK6j--2JtHRPq4,10235 +numpy/matlib.py,sha256=_S9N8S2NNsHGQUcloxrxABtJDejHiUyMdMJO7SayPkA,10638 +numpy/matlib.pyi,sha256=d9Tw-ThrWNUgXKGTiQvCjqrkWQSWqHcXUXAxvYENtYk,9602 +numpy/matrixlib/__init__.py,sha256=Ut6IqfjuA-kwwo6HBOYAgFjXXC_h7YV_3HyDsKM72dk,243 +numpy/matrixlib/__init__.pyi,sha256=e9xC6kWhIYoPqa3-tmtxdaq8RLjXrBjpyXLqV-pV9UY,106 +numpy/matrixlib/__pycache__/__init__.cpython-312.pyc,, +numpy/matrixlib/__pycache__/defmatrix.cpython-312.pyc,, +numpy/matrixlib/defmatrix.py,sha256=wpw6lZU9X6qp8wAJokDXt2RBrL1eXqlmBt-ojIwYzlU,30875 +numpy/matrixlib/defmatrix.pyi,sha256=ReQicwbCq4EFGM6paj5KoTeFK3fyiBMC4fJLJcP0SI4,478 +numpy/matrixlib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/matrixlib/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/matrixlib/tests/__pycache__/test_defmatrix.cpython-312.pyc,, +numpy/matrixlib/tests/__pycache__/test_interaction.cpython-312.pyc,, +numpy/matrixlib/tests/__pycache__/test_masked_matrix.cpython-312.pyc,, +numpy/matrixlib/tests/__pycache__/test_matrix_linalg.cpython-312.pyc,, +numpy/matrixlib/tests/__pycache__/test_multiarray.cpython-312.pyc,, +numpy/matrixlib/tests/__pycache__/test_numeric.cpython-312.pyc,, +numpy/matrixlib/tests/__pycache__/test_regression.cpython-312.pyc,, +numpy/matrixlib/tests/test_defmatrix.py,sha256=G9v4-cGuAbHVVDCJ2rCUnQrSTUChOih_6ZMV-ZlYsNA,14977 +numpy/matrixlib/tests/test_interaction.py,sha256=BMpaAIeGOJ5EEHWuozBifN8l3Av5RO6jGoaPgdzTiqQ,11874 +numpy/matrixlib/tests/test_masked_matrix.py,sha256=UN212xE5e3G9OuwdOWvRMFT5-z3zIfjQQIIpY26a52k,8787 +numpy/matrixlib/tests/test_matrix_linalg.py,sha256=33UxWKz2NwI2Wt3pP0AyaooZ5tCFpbOePWek3XT0a4U,2149 +numpy/matrixlib/tests/test_multiarray.py,sha256=S5kjzsQR2YgT0qIGrNO1lUDl3o-h0EIdg_g3U3CnuRc,555 +numpy/matrixlib/tests/test_numeric.py,sha256=hZ-r921WDG8Ck8KmT6ulgykjHU1QaGY6gprC2OPo-vg,447 +numpy/matrixlib/tests/test_regression.py,sha256=XnfZ4RoTS49XMUyUlHVMc6wcWImNRja7DT1wTdEk428,934 +numpy/polynomial/__init__.py,sha256=gGSwLNpPCpXfPgiJSsgVoVsJ0AS1c-_MWlGOeiG55sI,6726 +numpy/polynomial/__init__.pyi,sha256=tVWqA3_ZzcTyfp5yIr4ca87Tgx4YtY4660UQi3JhfJI,688 +numpy/polynomial/__pycache__/__init__.cpython-312.pyc,, +numpy/polynomial/__pycache__/_polybase.cpython-312.pyc,, +numpy/polynomial/__pycache__/chebyshev.cpython-312.pyc,, +numpy/polynomial/__pycache__/hermite.cpython-312.pyc,, +numpy/polynomial/__pycache__/hermite_e.cpython-312.pyc,, +numpy/polynomial/__pycache__/laguerre.cpython-312.pyc,, +numpy/polynomial/__pycache__/legendre.cpython-312.pyc,, +numpy/polynomial/__pycache__/polynomial.cpython-312.pyc,, +numpy/polynomial/__pycache__/polyutils.cpython-312.pyc,, +numpy/polynomial/_polybase.py,sha256=b0kCiTgUm8D5QC_LWSm6yNvwC79npDAeksK0vQPciCQ,39358 +numpy/polynomial/_polybase.pyi,sha256=mKbxu6z3iC6NnDNXHPrMm6Vo6RQvrvtCel7S5Mi3Q3Q,8187 +numpy/polynomial/_polytypes.pyi,sha256=e-uO5HmbYsWffZtOKCDgrxEqvUm-YKTqQKXj83m8j6s,22382 +numpy/polynomial/chebyshev.py,sha256=T0vrDsOrO8Ntxbzf_-0dv_lPyF5c45OjDoVJDzeGBAI,62322 +numpy/polynomial/chebyshev.pyi,sha256=jn21NMBsc4FYvC_5BM4kOfnYEaUSINdq3RyooS-5rjU,4787 +numpy/polynomial/hermite.py,sha256=IguwJittKDh3y0rF1M9lLuIptFXgq-PhaHNTjfE3CnA,54603 +numpy/polynomial/hermite.pyi,sha256=bNrlxTVHTskFUOKDbyrISXbOsmPxxhnAGmZmOF1mLpc,2463 +numpy/polynomial/hermite_e.py,sha256=fhuui2jLc0I5WEEsRDcyw8FKSFxOl9jr8b4yRIxEZqQ,52305 +numpy/polynomial/hermite_e.pyi,sha256=OyjRyzP7tz5sDP-90D4dpn82zJ4zPUCIzhpXaOCpkCY,2555 +numpy/polynomial/laguerre.py,sha256=XJ5dNqWuZNhqwARb_QW4nfrRHyJv1JMCgsP2W4-KE9M,52474 +numpy/polynomial/laguerre.pyi,sha256=_72JssagROc-vwt8W1i3aOo8s5l2v2G2NzMUm14vZnw,2191 +numpy/polynomial/legendre.py,sha256=sMJTmGdewNhccrK9CyfNIIFRgzmY-AJHhgo6zxtGYvo,51129 +numpy/polynomial/legendre.pyi,sha256=dPizRI4HLqAQ8Jms8Ta_HtsUyHV49fk3hFCZNOid1fo,2191 +numpy/polynomial/polynomial.py,sha256=-IICosb2j8ClsIfXPDWgXqLx6WuhU6olocU4JkxN7kI,52196 +numpy/polynomial/polynomial.pyi,sha256=A3oK3wKteiRkUcNEkzgvZQ11HIqloIRoxG2X9rPVZBE,2021 +numpy/polynomial/polyutils.py,sha256=mQEa3oCz9X-d1HaNdXkpBJzXWGzgY42WDMjJOn988O8,22657 +numpy/polynomial/polyutils.pyi,sha256=gnB7TQZclbMGneVVFE1z5LX5Qgs3GCidRTWL97rja-4,10235 +numpy/polynomial/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/polynomial/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_chebyshev.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_classes.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_hermite.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_hermite_e.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_laguerre.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_legendre.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_polynomial.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_polyutils.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_printing.cpython-312.pyc,, +numpy/polynomial/tests/__pycache__/test_symbol.cpython-312.pyc,, +numpy/polynomial/tests/test_chebyshev.py,sha256=gcK5jVv1vG3O-VVMkZKpmweR6_4HQAXtzvbJ_ib0-B8,20650 +numpy/polynomial/tests/test_classes.py,sha256=nBsVHcubheo1s7t-jUXY984ptC2x-aWDPkWED1cUZt4,18552 +numpy/polynomial/tests/test_hermite.py,sha256=sexvJUDmac1JKL8qOeQr70KJTD1KdoJ10LKosFfBqm0,18687 +numpy/polynomial/tests/test_hermite_e.py,sha256=r3QQOUVoBBVgZzCjE3qzIl-wMcl_kI1Nuc-KGNy7rIw,19026 +numpy/polynomial/tests/test_laguerre.py,sha256=8c2h7Lj3F2DtuVuOPlS8ZL-dq_IoFxPrzREbuI5iZqQ,17637 +numpy/polynomial/tests/test_legendre.py,sha256=hMdOs_RzkGihUzg7gDmeM1FxkIT2UIgqkDWanucfMHg,18805 +numpy/polynomial/tests/test_polynomial.py,sha256=Pi_X6ThfxgVbgzyAnu3FcyTIUvpL9ENxRSanyUjgon8,22911 +numpy/polynomial/tests/test_polyutils.py,sha256=gO7B1oPBRRClF7WeXFsLjGwqUl9kjVIv7aAoHlhqVsk,3780 +numpy/polynomial/tests/test_printing.py,sha256=qk76AKCvHHqbsDnHIVf5fxIEH9Va4U9jwJkJ1b67k1o,21403 +numpy/polynomial/tests/test_symbol.py,sha256=ShBdNg9cvYy31fQnrn4gprZUSD0shz5r8zlG8CEq7gs,5375 +numpy/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/random/LICENSE.md,sha256=EDFmtiuARDr7nrNIjgUuoGvgz_VmuQjxmeVh_eSa8Z8,3511 +numpy/random/__init__.pxd,sha256=9JbnX540aJNSothGs-7e23ozhilG6U8tINOUEp08M_k,431 +numpy/random/__init__.py,sha256=WFzntztUVNaiXCpQln8twyL8HSFNS7XAWJlJsQXgbqk,7480 +numpy/random/__init__.pyi,sha256=5X5UqSDkeruZafGWv9EnYb0RrjRs49r-TlzV3PPQOjs,2109 +numpy/random/__pycache__/__init__.cpython-312.pyc,, +numpy/random/__pycache__/_pickle.cpython-312.pyc,, +numpy/random/_bounded_integers.cpython-312-x86_64-linux-gnu.so,sha256=U6i9Lpk9MsMEuRAglwyLZ_oXjhQhN_UldDupJ1VSS4Q,320144 +numpy/random/_bounded_integers.pxd,sha256=SH_FwJDigFEInhdliSaNH2H2ZIZoX02xYhNQA81g2-g,1678 +numpy/random/_bounded_integers.pyi,sha256=juqd9PbXs4yg45zMJ7BHAOPQjb7sgEbWE9InBtGZhfo,24 +numpy/random/_common.cpython-312-x86_64-linux-gnu.so,sha256=5_shzGEPbiLx9Ao2Ww_mxXMTK2wiGZ7SHb5RwOfHNiA,241288 +numpy/random/_common.pxd,sha256=7kGArYkBcemrxJcSttwvtDGbimLszdQnZdNvPMgN5xQ,4982 +numpy/random/_common.pyi,sha256=02dQDSAflunmZQFWThDLG3650py_DNqCmxjmkv5_XpA,421 +numpy/random/_examples/cffi/__pycache__/extending.cpython-312.pyc,, +numpy/random/_examples/cffi/__pycache__/parse.cpython-312.pyc,, +numpy/random/_examples/cffi/extending.py,sha256=jpIL1njMhf0nehmlMHkgZkIxns2JC9GEDYgAChX87G8,884 +numpy/random/_examples/cffi/parse.py,sha256=PK9vdUxwmvdnFvH3rOpgnnpISwnid7ri5XOmBrMWpJw,1750 +numpy/random/_examples/cython/extending.pyx,sha256=ePnHDNfMQcTUzAqgFiEqrTFr9BoDmbqgjxzrDLvV8fE,2267 +numpy/random/_examples/cython/extending_distributions.pyx,sha256=ahvbdSuRj35DKJRaNFP5JDuPqveBBp-M9mFfF3Wd_M4,3866 +numpy/random/_examples/cython/meson.build,sha256=GxZZT_Lu3nZsgcqo_7sTR_IdMJaHA1fxyjwrQTcodPs,1694 +numpy/random/_examples/numba/__pycache__/extending.cpython-312.pyc,, +numpy/random/_examples/numba/__pycache__/extending_distributions.cpython-312.pyc,, +numpy/random/_examples/numba/extending.py,sha256=Z7Z_Xp7HPE4K5BZ7AwpZ29qvuftFAkvhMtNX53tlMMw,1959 +numpy/random/_examples/numba/extending_distributions.py,sha256=fdePXeUj46yXK0MK1cszxUHQiOTiNuNsrbZqPw4AdGs,2036 +numpy/random/_generator.cpython-312-x86_64-linux-gnu.so,sha256=1_7XPD42G2_2g6kUi3TZgvFwKxrzYx5JYGYDTNHLp60,809096 +numpy/random/_generator.pyi,sha256=e3GPyHrmNB7CZu4OhuhAYMbAjHvuV_nYZdUoIMILtmY,24183 +numpy/random/_mt19937.cpython-312-x86_64-linux-gnu.so,sha256=o6mBWa2_hqv5WzS0mJhA8PC_veOlyHrSo-Aep8BCjU0,130384 +numpy/random/_mt19937.pyi,sha256=ZjOCfOQb1KLDywy8ZHy8pQb1C-DZvStqYK3OOB6rETo,775 +numpy/random/_pcg64.cpython-312-x86_64-linux-gnu.so,sha256=K5vJA6WrT4V-Fp6Tz1moJFZPE2Gx_E_1uzNqVOznK6k,145528 +numpy/random/_pcg64.pyi,sha256=bIlGJyN2X3gtKEzh6qwDdyXX88al_2vVmCzGNpbNifs,1142 +numpy/random/_philox.cpython-312-x86_64-linux-gnu.so,sha256=n7fkhQN8fwCqPkqhBW8V9_m5ZKpPBGV5UhlO-qkXQ1Y,121968 +numpy/random/_philox.pyi,sha256=xFogUASfSHdviqexIf4bGgkzbryir7Tik7z0XQR9xx4,1005 +numpy/random/_pickle.py,sha256=Lt47ma_vnnJHdnQlc5jZ_DqBHsdKi0QiUNaIkMf95qA,2742 +numpy/random/_pickle.pyi,sha256=5obQY7CZRLMDjOgRtNgzV_Bg5O9E8DK_G74j7J7q6qo,1608 +numpy/random/_sfc64.cpython-312-x86_64-linux-gnu.so,sha256=YpBtBWBAVu5N4s6XjNnSPg3mA8gKdppkjOrHTYq2gLY,91560 +numpy/random/_sfc64.pyi,sha256=wRrbkEGLNhjXa7-LyGNtO5El9c8B_hNRQqF0Kmv6hQM,682 +numpy/random/bit_generator.cpython-312-x86_64-linux-gnu.so,sha256=0VxjDBAeEy3hTZJPrK31gfXgBLCXSxevovDeobz1_m4,218080 +numpy/random/bit_generator.pxd,sha256=lArpIXSgTwVnJMYc4XX0NGxegXq3h_QsUDK6qeZKbNc,1007 +numpy/random/bit_generator.pyi,sha256=tX5lVJDp6J5bNzflo-1rNylceD30oDBYtbiYVA1cWOY,3604 +numpy/random/c_distributions.pxd,sha256=UCtqx0Nf-vHuJVaqPlLFURWnaI1vH-vJRE01BZDTL9o,6335 +numpy/random/lib/libnpyrandom.a,sha256=DTNTpkzNlmwIAC6xVeNemTNNiEiOSyJwzv26iryg8Qs,71798 +numpy/random/mtrand.cpython-312-x86_64-linux-gnu.so,sha256=cLLZlC89Nb6flce8O6GeeOPbAOKwoyPKAjY87pyeEow,619416 +numpy/random/mtrand.pyi,sha256=Ds2d-DloxUUE2wNNMA1w6oqqPsgBilkaRMCLioBTiJA,22687 +numpy/random/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/random/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_direct.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_extending.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_generator_mt19937.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_generator_mt19937_regressions.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_random.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_randomstate.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_randomstate_regression.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_regression.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_seed_sequence.cpython-312.pyc,, +numpy/random/tests/__pycache__/test_smoke.cpython-312.pyc,, +numpy/random/tests/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/random/tests/data/__pycache__/__init__.cpython-312.pyc,, +numpy/random/tests/data/generator_pcg64_np121.pkl.gz,sha256=EfQ-X70KkHgBAFX2pIPcCUl4MNP1ZNROaXOU75vdiqM,203 +numpy/random/tests/data/generator_pcg64_np126.pkl.gz,sha256=fN8deNVxX-HELA1eIZ32kdtYvc4hwKya6wv00GJeH0Y,208 +numpy/random/tests/data/mt19937-testset-1.csv,sha256=Xkef402AVB-eZgYQkVtoxERHkxffCA9Jyt_oMbtJGwY,15844 +numpy/random/tests/data/mt19937-testset-2.csv,sha256=nsBEQNnff-aFjHYK4thjvUK4xSXDSfv5aTbcE59pOkE,15825 +numpy/random/tests/data/pcg64-testset-1.csv,sha256=xB00DpknGUTTCxDr9L6aNo9Hs-sfzEMbUSS4t11TTfE,23839 +numpy/random/tests/data/pcg64-testset-2.csv,sha256=NTdzTKvG2U7_WyU_IoQUtMzU3kEvDH39CgnR6VzhTkw,23845 +numpy/random/tests/data/pcg64dxsm-testset-1.csv,sha256=vNSUT-gXS_oEw_awR3O30ziVO4seNPUv1UIZ01SfVnI,23833 +numpy/random/tests/data/pcg64dxsm-testset-2.csv,sha256=uylS8PU2AIKZ185OC04RBr_OePweGRtvn-dE4YN0yYA,23839 +numpy/random/tests/data/philox-testset-1.csv,sha256=SedRaIy5zFadmk71nKrGxCFZ6BwKz8g1A9-OZp3IkkY,23852 +numpy/random/tests/data/philox-testset-2.csv,sha256=dWECt-sbfvaSiK8-Ygp5AqyjoN5i26VEOrXqg01rk3g,23838 +numpy/random/tests/data/sfc64-testset-1.csv,sha256=iHs6iX6KR8bxGwKk-3tedAdMPz6ZW8slDSUECkAqC8Q,23840 +numpy/random/tests/data/sfc64-testset-2.csv,sha256=FIDIDFCaPZfWUSxsJMAe58hPNmMrU27kCd9FhCEYt_k,23833 +numpy/random/tests/data/sfc64_np126.pkl.gz,sha256=MVa1ylFy7DUPgUBK-oIeKSdVl4UYEiN3AZ7G3sdzzaw,290 +numpy/random/tests/test_direct.py,sha256=-ugW0cpuYhFSGVDtAbpEy_uFk-cG0JKFpPpQMDyFJh4,19919 +numpy/random/tests/test_extending.py,sha256=8KgkOAbxrgU9_cj9Qm0F8r9qVEVy438Q-Usp7_HpSLQ,4532 +numpy/random/tests/test_generator_mt19937.py,sha256=AYm340SgiQUWjEBoNHK1G17W75-VT3iZL4HVwQGtw0U,118000 +numpy/random/tests/test_generator_mt19937_regressions.py,sha256=QZVFTSN9gnJXN-ye89JfUoov1Cu65r4e32FMmCYje5U,8107 +numpy/random/tests/test_random.py,sha256=YSlHTwu6t7BAjDLZrBz4e8-ynSuV6eOHP9NwxDoZBvU,70298 +numpy/random/tests/test_randomstate.py,sha256=WbZBpZplBlgmhWKXNsj7d0Zw0BHJ2nxEerMRnuwyYnE,85749 +numpy/random/tests/test_randomstate_regression.py,sha256=1NgkJ60dVg8-UZ-ApepKlZGotqgenW_vZ3jqofMOSlw,8010 +numpy/random/tests/test_regression.py,sha256=DqqLLE3_MW04ltPhSXy44oFx_daO9b4I7NgI-WoMc-s,5471 +numpy/random/tests/test_seed_sequence.py,sha256=0lb4LRofbt_wHO-Cs_d1hwp1WcWjOmxH-OePkXST5bc,3310 +numpy/random/tests/test_smoke.py,sha256=epkUF47HanaSZVz9NVUt6xUmKZhJNolPIB-z4MN67Qw,28141 +numpy/rec/__init__.py,sha256=kNAYYoSAA0izpUVRb-18sJw-iKtFq2Rl2U5SOH3pHRM,83 +numpy/rec/__init__.pyi,sha256=1ZL2SbvFSaoXwOK-378QQ0g0XldOjskx2E2uIerEGUI,347 +numpy/rec/__pycache__/__init__.cpython-312.pyc,, +numpy/strings/__init__.py,sha256=o27wHW8jGaUfbDopSyEmYD6Rjeo33AzkGBBTgWrlGH4,83 +numpy/strings/__init__.pyi,sha256=JP8YQR3xZ_mPMdQax7QSR2cZ-N-V7ZDqvOcWIIUP_54,1319 +numpy/strings/__pycache__/__init__.cpython-312.pyc,, +numpy/testing/__init__.py,sha256=Eqe-Ox-3JSqk6QRnnPPFLCW9Ikqv9OuJDhnm2uGM3zc,581 +numpy/testing/__init__.pyi,sha256=1jr2Gj9BmCdtK4bqNGkwUAuqwC4n2JPOy6lqczK7xpA,2045 +numpy/testing/__pycache__/__init__.cpython-312.pyc,, +numpy/testing/__pycache__/overrides.cpython-312.pyc,, +numpy/testing/__pycache__/print_coercion_tables.cpython-312.pyc,, +numpy/testing/_private/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/testing/_private/__init__.pyi,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/testing/_private/__pycache__/__init__.cpython-312.pyc,, +numpy/testing/_private/__pycache__/extbuild.cpython-312.pyc,, +numpy/testing/_private/__pycache__/utils.cpython-312.pyc,, +numpy/testing/_private/extbuild.py,sha256=Lg1sqA94Q74Ki5u-sx0PEj7urr3YP470-BCiyvJwExQ,7716 +numpy/testing/_private/extbuild.pyi,sha256=AO2r2DVbl2A9EyytVQLIYAgvuIheYGn1pkdLRL38XY4,653 +numpy/testing/_private/utils.py,sha256=R6YZ0lQ4CZjH818rksAy6vyxGEtTQkCGhOG96aMD74o,95499 +numpy/testing/_private/utils.pyi,sha256=0-RRq2IemjHalVvA5eh_9zVLqabGHmXe_3oF_oGy_KY,12960 +numpy/testing/overrides.py,sha256=B8Y8PlpvK71IcSuoubXWj4L5NVmLVSn7WMg1L7xZO8k,2134 +numpy/testing/overrides.pyi,sha256=IQvQLxD-dHcbTQOZEO5bnCtCp8Uv3vj51dl0dZ0htjg,397 +numpy/testing/print_coercion_tables.py,sha256=SboNmCLc5FyV-UR8gKjJc2PIojN1XQTvH0WzDq75M2M,6286 +numpy/testing/print_coercion_tables.pyi,sha256=FRNibMYi0OyLIzKD4RUASZyhlsTY8elN0Q3jcBPEdgE,821 +numpy/testing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/testing/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/testing/tests/__pycache__/test_utils.cpython-312.pyc,, +numpy/testing/tests/test_utils.py,sha256=yb2RpPDZvVagXiwQPFhV2IhwslZRkC-d-Vtb5wbJbbo,69575 +numpy/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/tests/__pycache__/test__all__.cpython-312.pyc,, +numpy/tests/__pycache__/test_configtool.cpython-312.pyc,, +numpy/tests/__pycache__/test_ctypeslib.cpython-312.pyc,, +numpy/tests/__pycache__/test_lazyloading.cpython-312.pyc,, +numpy/tests/__pycache__/test_matlib.cpython-312.pyc,, +numpy/tests/__pycache__/test_numpy_config.cpython-312.pyc,, +numpy/tests/__pycache__/test_numpy_version.cpython-312.pyc,, +numpy/tests/__pycache__/test_public_api.cpython-312.pyc,, +numpy/tests/__pycache__/test_reloading.cpython-312.pyc,, +numpy/tests/__pycache__/test_scripts.cpython-312.pyc,, +numpy/tests/__pycache__/test_warnings.cpython-312.pyc,, +numpy/tests/test__all__.py,sha256=AXbT9VmRSTYq9beba4d1Eom_V9SVXXEtpkBdEW2XCqU,222 +numpy/tests/test_configtool.py,sha256=aVO9XZPUq-T0LXFx-sQbtsOcnwKIFnpKtfuWWlnWDFs,1749 +numpy/tests/test_ctypeslib.py,sha256=RNTHi3cYOEPQno5zZQ_WyekW5E_0bVuwmn1AFgkDzY8,12375 +numpy/tests/test_lazyloading.py,sha256=mMbie5VOu7S4uQBu66RNA2ipSsAY4C0lyoJXeHclAvk,1160 +numpy/tests/test_matlib.py,sha256=RMduSGHBJuVFmk__Ug_hVeGD4-Y3f28G0tlDt8F7k7c,1854 +numpy/tests/test_numpy_config.py,sha256=y4U3wnNW0Ags4W_ejhQ4CRCPnBc9p-4-B9OFDcLq9fg,1235 +numpy/tests/test_numpy_version.py,sha256=6PIeISx9_Hglpxc3y6KugeAgB4eBkuZC-DFlXt4LocA,1744 +numpy/tests/test_public_api.py,sha256=KqMtjIjq0_lp2ag4FTtulzypCqyZ43kuUlXgzd_Vkxc,27851 +numpy/tests/test_reloading.py,sha256=T0NTsxAZFPY0LuAzbsy0wV_vSIZON7dwWSNjz_yzpDg,2367 +numpy/tests/test_scripts.py,sha256=QpjsWc0vgi-IFLdMr81horvHAnjRI7RhYyO-edHxzcU,1665 +numpy/tests/test_warnings.py,sha256=ynGuW4FOgjLcwdyi5AYCGCrmAu7jZlIQWPNK-0Yr800,2328 +numpy/typing/__init__.py,sha256=FdaIH47j8uGEA5luTu-DnrOOTFw-3ne2JVHe-yn_7bA,6048 +numpy/typing/__pycache__/__init__.cpython-312.pyc,, +numpy/typing/__pycache__/mypy_plugin.cpython-312.pyc,, +numpy/typing/mypy_plugin.py,sha256=1pcfLxJaYFdCPKQJVwHvdYbZSVdZ7RSIcg1QXHR7nqM,6541 +numpy/typing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +numpy/typing/tests/__pycache__/__init__.cpython-312.pyc,, +numpy/typing/tests/__pycache__/test_isfile.cpython-312.pyc,, +numpy/typing/tests/__pycache__/test_runtime.cpython-312.pyc,, +numpy/typing/tests/__pycache__/test_typing.cpython-312.pyc,, +numpy/typing/tests/data/fail/arithmetic.pyi,sha256=B1iRvZyWH_beZWBFsUtF6aHtF99VBjWpHQya6IyY1o8,3690 +numpy/typing/tests/data/fail/array_constructors.pyi,sha256=2917vcb59EaV406oGtA9lSQ8n_KDDyfvMrxj1Na6rPM,1200 +numpy/typing/tests/data/fail/array_like.pyi,sha256=klBpaBcONODcPC1LztdVZ3UuIPwXN8kUK_e9s_QnZoo,496 +numpy/typing/tests/data/fail/array_pad.pyi,sha256=mt-nhrs6f4YP7xgXizti7k_AwFAJ50yK97fMrMAAEds,137 +numpy/typing/tests/data/fail/arrayprint.pyi,sha256=i-0R4ExF_gtfXML5qirbsQLmDwJyg6_37HDYsk6g6tI,616 +numpy/typing/tests/data/fail/arrayterator.pyi,sha256=9Y8aD2lkDO7UcLo9ySR0pnVz0p2ofl2Lq4XTHgoMXxA,463 +numpy/typing/tests/data/fail/bitwise_ops.pyi,sha256=4u2NPPwvp3KeK739EXOTFMFTRKdxvpfICpJJ8F-eO-g,384 +numpy/typing/tests/data/fail/char.pyi,sha256=IrAk7rxT3ixBVwNWHzCWEsW9rF_oCXTOtwIG-q_Vx2A,2800 +numpy/typing/tests/data/fail/chararray.pyi,sha256=aTqYSgwQUGUVUDkOly_Dy5SdOiHdKKTEbo6jD68KaH0,2356 +numpy/typing/tests/data/fail/comparisons.pyi,sha256=zscovvsL89W8wrL43k4z8z1DLrjXmxBbu6lAOpiyhp0,750 +numpy/typing/tests/data/fail/constants.pyi,sha256=OHaBJo6GYW94BlUWKQDat5jiit5ZAy_h-5bb1WUJnLU,78 +numpy/typing/tests/data/fail/datasource.pyi,sha256=7om31_WCptsSn_z7E5p-pKFlswZItyZm9GQ-L5fXWqM,419 +numpy/typing/tests/data/fail/dtype.pyi,sha256=crqAVZmBYLYV9N-ihiOnKCY1QK4iTxX7J4Tviac2Vq4,305 +numpy/typing/tests/data/fail/einsumfunc.pyi,sha256=4QWkwE4sr5bKz5-OCjPzeUcr4V-wpaMsqee2PXDwyjw,458 +numpy/typing/tests/data/fail/flatiter.pyi,sha256=3WblzrQewUBZo1mjTRWzwda_rQ0HSVamtz2XwH2CgCc,715 +numpy/typing/tests/data/fail/fromnumeric.pyi,sha256=eWCbs1dcoJraAA3b5qME09sWWvsSdlDO912_OwQ_M7k,5685 +numpy/typing/tests/data/fail/histograms.pyi,sha256=wgI2CG-P0jbDw4KohR_nbJxqa34PT1e6nmnLi9KbPQM,376 +numpy/typing/tests/data/fail/index_tricks.pyi,sha256=RNZLHeMOpSX594Eem4WyJrM_QouqndGRVj2YQakJN-E,517 +numpy/typing/tests/data/fail/lib_function_base.pyi,sha256=JdvdZlgNUNzlOuY74T6Lt_hNOpveU6U1jhFGB9Iu6ZA,2817 +numpy/typing/tests/data/fail/lib_polynomial.pyi,sha256=Y3jlwigvtr5tFEHvr3SgguMVsYZe8cvsdgKcavgfucs,937 +numpy/typing/tests/data/fail/lib_utils.pyi,sha256=6Oc_wYI0mv0l74p4pEiVtKjkWFNg4WmXjGW7_2zEKS4,98 +numpy/typing/tests/data/fail/lib_version.pyi,sha256=BvABs2aeC6ZHUGkrsowu80Ks22pbxUMwSPJ8c5i7H14,154 +numpy/typing/tests/data/fail/linalg.pyi,sha256=h9bcCeP0ARGONB3iYGkdX-BFPsNI-pZq3C-nfKgbbBU,1381 +numpy/typing/tests/data/fail/ma.pyi,sha256=inPaC4jP7hGPqQJn-rBspeJZnxJz7m1nVDYQxuMI8SE,6364 +numpy/typing/tests/data/fail/memmap.pyi,sha256=uXPLcVx726Bbw93E5kdIc7K0ssvLIZoJfNTULMtAa_8,169 +numpy/typing/tests/data/fail/modules.pyi,sha256=mEBLIY6vZAPIf2BuyJcMAR-FarSkT55FRlusrsR0qCo,603 +numpy/typing/tests/data/fail/multiarray.pyi,sha256=lSV5JiLNz-CxHUlNbF1Bq3x7mOftfr1kiiG2DgtXilE,1656 +numpy/typing/tests/data/fail/ndarray.pyi,sha256=65IDiOprlv-sg375SBmmh6_hYOzlucTVLe42GymniGM,381 +numpy/typing/tests/data/fail/ndarray_misc.pyi,sha256=hSdxKyxweyxAH32DGa_ZnZIXqPNh6CafBK90rjbi8cs,1061 +numpy/typing/tests/data/fail/nditer.pyi,sha256=nRbQ66HcoKXDKIacbWD9pTq-523GJOqxzJ3r278lDsc,319 +numpy/typing/tests/data/fail/nested_sequence.pyi,sha256=jGjoQhCLr8dbTCPvWkilJKAW0RRMbrY-iEHf24Happo,463 +numpy/typing/tests/data/fail/npyio.pyi,sha256=vPYmFaPCFbr5V2AC3074w8hTCBUYxpSF4fi1sbbfopw,646 +numpy/typing/tests/data/fail/numerictypes.pyi,sha256=NJxviXTJIaDoz7q56dBrHCBNNG-doTu-oIryzwURxHQ,124 +numpy/typing/tests/data/fail/random.pyi,sha256=IvKXQuxZhuK6M0u2x3Y4PhXvLoC8OFnUdoeneaqDiIE,2903 +numpy/typing/tests/data/fail/rec.pyi,sha256=eeFXVvVg4DherRMA1T8KERtTiRN1ZIbarw4Yokb8WrU,741 +numpy/typing/tests/data/fail/scalars.pyi,sha256=EQy8ovBZX49a7AgtRyI8uHgauQoVzAmjE3NALe97tEw,2849 +numpy/typing/tests/data/fail/shape.pyi,sha256=VNucLx9ittp1a0AOyVPd6XKfERm0kq_ND1lOr-LXQ_s,131 +numpy/typing/tests/data/fail/shape_base.pyi,sha256=8366-8mCNii1D0W6YrOhCNxo8rrfqQThO-dIVWNRHvA,157 +numpy/typing/tests/data/fail/stride_tricks.pyi,sha256=g7-DY8Zc8pzTDyOBA-8t6yIFj1FZI9XpvVdbybQN2i0,330 +numpy/typing/tests/data/fail/strings.pyi,sha256=wX9ROrRNhpH9g_ewNGjWuTKU-He4xaNxrtz2Dm3iPo8,2333 +numpy/typing/tests/data/fail/testing.pyi,sha256=m8d2OZZ1DtsHfmnTwvdMRETUfo0lwRzaOXjuyNi08PQ,1399 +numpy/typing/tests/data/fail/twodim_base.pyi,sha256=ROt5iqOp9ENbXlMEG8dzUZxHD3N4lwcbyCffuJ4BLZE,936 +numpy/typing/tests/data/fail/type_check.pyi,sha256=hRXyE4Ywx6zjtSgiHwKRs4k47M4hnPjj7yjVhi91IaU,397 +numpy/typing/tests/data/fail/ufunc_config.pyi,sha256=v5rd68Y2TzLplIOaOXM4h66HqSv8XbapR0b3xaoUOdQ,589 +numpy/typing/tests/data/fail/ufunclike.pyi,sha256=ejCb6kb7mmxPH0QrDsYfdFSLLPFKx0IZ9xSLs3YXOzg,649 +numpy/typing/tests/data/fail/ufuncs.pyi,sha256=XBoxO597ponBkFcCfwCS3s-jKfcnDzC_K5n2uBPrD6E,505 +numpy/typing/tests/data/fail/warnings_and_errors.pyi,sha256=SoFIznFd_xDifIsS0pv0aqS2BvhZaT6xsOA0zJrRJkA,200 +numpy/typing/tests/data/misc/extended_precision.pyi,sha256=n1nzRzRa_oKDdNExxB0qRIQr8MeDIosbLU6Vpgi6ZYo,322 +numpy/typing/tests/data/mypy.ini,sha256=rfUCMP01SsfRLJ-MRGEicI9XW-HJDoTJ_ncaACuKJ0s,245 +numpy/typing/tests/data/pass/__pycache__/arithmetic.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/array_constructors.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/array_like.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/arrayprint.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/arrayterator.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/bitwise_ops.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/comparisons.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/dtype.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/einsumfunc.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/flatiter.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/fromnumeric.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/index_tricks.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/lib_user_array.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/lib_utils.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/lib_version.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/literal.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/ma.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/mod.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/modules.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/multiarray.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/ndarray_conversion.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/ndarray_misc.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/ndarray_shape_manipulation.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/nditer.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/numeric.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/numerictypes.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/random.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/recfunctions.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/scalars.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/shape.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/simple.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/simple_py3.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/ufunc_config.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/ufunclike.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/ufuncs.cpython-312.pyc,, +numpy/typing/tests/data/pass/__pycache__/warnings_and_errors.cpython-312.pyc,, +numpy/typing/tests/data/pass/arithmetic.py,sha256=t4UK-TROh0uYPlUNn5CZHdTysECmDZa04uUOCZO58cY,7762 +numpy/typing/tests/data/pass/array_constructors.py,sha256=rfJ8SRB4raElxRjsHBCsZIkZAfqZMie0VE8sSKMgkHg,2447 +numpy/typing/tests/data/pass/array_like.py,sha256=-wTiw2o_rLw1aeT7FSh60RKguhvxKyr_Vv5XNXTYeS4,1032 +numpy/typing/tests/data/pass/arrayprint.py,sha256=y_KkuLz1uM7pv53qfq7GQOuud4LoXE3apK1wtARdVyM,766 +numpy/typing/tests/data/pass/arrayterator.py,sha256=FqcpKdUQBQ0FazHFxr9MsLEZG-jnJVGKWZX2owRr4DQ,393 +numpy/typing/tests/data/pass/bitwise_ops.py,sha256=FmEs_sKaU9ox-5f0NU3_TRIv0XxLQVEZ8rou9VNehb4,964 +numpy/typing/tests/data/pass/comparisons.py,sha256=5aGrNl3D7Yd1m9WVkHrjJtqi7SricTxrEMtmIV9x0aE,3298 +numpy/typing/tests/data/pass/dtype.py,sha256=YDuYAb0oKoJc9eOnKJuoPfLbIKOgEdE04_CYxRS4U5I,1070 +numpy/typing/tests/data/pass/einsumfunc.py,sha256=eXj5L5MWPtQHgrHPsJ36qqrmBHqct9UoujjJCvHnF1k,1370 +numpy/typing/tests/data/pass/flatiter.py,sha256=tpKL_EAjkJoCZ5C0iuIX0dNCwQ9wUq1XlBMP-n2rjM4,203 +numpy/typing/tests/data/pass/fromnumeric.py,sha256=d_hVLyrVDFPVx33aqLIyAGYYQ8XAJFIzrAsE8QCoof4,3991 +numpy/typing/tests/data/pass/index_tricks.py,sha256=dmonWJMUKsXg23zD_mibEEtd4b5ys-sEfT9Fnnq08x8,1402 +numpy/typing/tests/data/pass/lib_user_array.py,sha256=Za_n84msWtV8dqQZhMhvh7lzu5WZvO8ixTPkEqO2Hms,590 +numpy/typing/tests/data/pass/lib_utils.py,sha256=bj1sEA4gsmezqbYdqKnVtKzY_fb64w7PEoZwNvaaUdA,317 +numpy/typing/tests/data/pass/lib_version.py,sha256=HnuGOx7tQA_bcxFIJ3dRoMAR0fockxg4lGqQ4g7LGIw,299 +numpy/typing/tests/data/pass/literal.py,sha256=HSG-2Gf7J5ax3mjTOeh0pAYUrVOqboTkrt2m6ssfqVY,1508 +numpy/typing/tests/data/pass/ma.py,sha256=ZIi85AwntBX7M1LIvl4yEGixAauHAS2GINBR42Ri4Hw,3362 +numpy/typing/tests/data/pass/mod.py,sha256=owFL1fys3LPTWpAlsjS-IzW4sSu98ncp2BnsIetLSrA,1576 +numpy/typing/tests/data/pass/modules.py,sha256=g9PhyLO6rflYHZtmryx1VWTubphN4TAPUSfoiYriTqE,625 +numpy/typing/tests/data/pass/multiarray.py,sha256=MxHax6l94yqlTVZleAqG77ILEbW6wU5osPcHzxJ85ns,1331 +numpy/typing/tests/data/pass/ndarray_conversion.py,sha256=d7cFNUrofdLXh9T_9RG3Esz1XOihWWQNlz5Lb0yt6dM,1525 +numpy/typing/tests/data/pass/ndarray_misc.py,sha256=Pm6HQkhXli8seadeIoekJzD45v5s1fg8Jci4kUdiH6g,3776 +numpy/typing/tests/data/pass/ndarray_shape_manipulation.py,sha256=37eYwMNqMLwanIW9-63hrokacnSz2K_qtPUlkdpsTjo,640 +numpy/typing/tests/data/pass/nditer.py,sha256=nYO45Lw3ZNbQq75Vht86zzLZ4cWzP3ml0rxDPlYt8_8,63 +numpy/typing/tests/data/pass/numeric.py,sha256=pOwxnmZmdCtDKh9ih0h5GFIUPJwsi97NBs1y5ZAGyUM,1622 +numpy/typing/tests/data/pass/numerictypes.py,sha256=6x6eN9-5NsSQUSc6rf3fYieS2poYEY0t_ujbwgF9S5Q,331 +numpy/typing/tests/data/pass/random.py,sha256=UJF6epKYGfGq9QlrR9YuA7EK_mI8AQ2osdA4Uhsh1ms,61824 +numpy/typing/tests/data/pass/recfunctions.py,sha256=GwDirrHsL3upfIsAEZakPt95-RLY7BpXqU_KXxi4HhQ,5003 +numpy/typing/tests/data/pass/scalars.py,sha256=pzV3Y20dd6xB9NRsJ0YSdkcvI5XcD8cEWtEo1KTL1SU,3724 +numpy/typing/tests/data/pass/shape.py,sha256=L2iugxTnbm8kmBpaJVYpURKJEAnI7TH2KtuYeqNR9co,445 +numpy/typing/tests/data/pass/simple.py,sha256=lPj620zkTA8Sg893eu2mGuj-Xq2BGZ_1dcmfsVDkz8g,2751 +numpy/typing/tests/data/pass/simple_py3.py,sha256=HuLrc5aphThQkLjU2_19KgGFaXwKOfSzXe0p2xMm8ZI,96 +numpy/typing/tests/data/pass/ufunc_config.py,sha256=uzXOhCl9N4LPV9hV2Iqg_skgkKMbBPBF0GXPU9EMeuE,1205 +numpy/typing/tests/data/pass/ufunclike.py,sha256=U4Aay11VALvm22bWEX0eDWuN5qxJlg_hH5IpOL62M3I,1125 +numpy/typing/tests/data/pass/ufuncs.py,sha256=1Rem_geEm4qyD3XaRA1NAPKwr3YjRq68zbIlC_Xhi9M,422 +numpy/typing/tests/data/pass/warnings_and_errors.py,sha256=ETLZkDTGpZspvwjVYAZlnA1gH4PJ4bSY5PkWyxTjusU,161 +numpy/typing/tests/data/reveal/arithmetic.pyi,sha256=PQtbiDs4NYye_ycCJF4B625-j3iDfgD5GXIp_W0OIM4,26800 +numpy/typing/tests/data/reveal/array_api_info.pyi,sha256=oWKW0yGS9xKcLZnH2QeeixMBcI74dNIcwZr0bwGmDVM,3017 +numpy/typing/tests/data/reveal/array_constructors.pyi,sha256=OkW6r-NkRUDawuZAFpM30Jf_2QGxFPKXPxIjNZWa7k0,12854 +numpy/typing/tests/data/reveal/arraypad.pyi,sha256=Dg5ss1cDS_QiNT4YEheHXMa2beM4qBTUb1mq-REkh6A,653 +numpy/typing/tests/data/reveal/arrayprint.pyi,sha256=iUHzZaUrYFGC9QBCxhiEAIJODeqGwG7VCv875il-9gY,777 +numpy/typing/tests/data/reveal/arraysetops.pyi,sha256=Hhe49rLgj0P8SXElncNvLeCv1OqdI-iryB_673w7vL4,4411 +numpy/typing/tests/data/reveal/arrayterator.pyi,sha256=QPRyZzHFmti4HlrJ315dgzBjaet8LqM9il-8uc9e2P8,1039 +numpy/typing/tests/data/reveal/bitwise_ops.pyi,sha256=tlyf8qGUwuuavvkDDu1oXr5SSPNcE137fFT1jveexg4,4660 +numpy/typing/tests/data/reveal/char.pyi,sha256=9QbiMbkKycnZl4f4eKBoF_rAxIUIv3vBcOQyksHJCug,11470 +numpy/typing/tests/data/reveal/chararray.pyi,sha256=4oqRNZt7jIdfbNVgcsWPDVVFrrEYhqjAExaNzPya_lY,5199 +numpy/typing/tests/data/reveal/comparisons.pyi,sha256=mXRfm3ZUsk8YbSPg9ugPSWLGRwzUVy4BEVN7q4K56tc,7195 +numpy/typing/tests/data/reveal/constants.pyi,sha256=AazwlvF--Te1dt35f8lkDLNuo3jQXqmGvddDQ37jAE0,333 +numpy/typing/tests/data/reveal/ctypeslib.pyi,sha256=U9ZO5GnGHxVyv-OWRYWHSXctH7LGHPWDdyNVl_saQEQ,4134 +numpy/typing/tests/data/reveal/datasource.pyi,sha256=B9nCoOPE4fJvBIeInAgUCg5pIsr8IYOu_iToqt6n-Nc,583 +numpy/typing/tests/data/reveal/dtype.pyi,sha256=IdxNE3NIE0YKpVw4yI9lS-wWPmeFyfGCW2V0oyor4zk,5080 +numpy/typing/tests/data/reveal/einsumfunc.pyi,sha256=qPYk5W3lardDdgsQIGyu356iIGDnb0P38UKQDXWQlrk,1926 +numpy/typing/tests/data/reveal/emath.pyi,sha256=fcf0-GftYRByfJFuZC-MvzHlQU4A-f9-kPnxzQt48E0,2125 +numpy/typing/tests/data/reveal/fft.pyi,sha256=uZOJ0ljmmnejfPEwMsfUGDb52NOuTh7Npl7ONwx-Y2k,1601 +numpy/typing/tests/data/reveal/flatiter.pyi,sha256=ZxgdgbRWYXlyxlPOXJzZSHvALqGsK3aV4lf9RePghdA,1347 +numpy/typing/tests/data/reveal/fromnumeric.pyi,sha256=xweKmm6uKVgJF4-AwtM6hGEI_YHosu-8jXnd8yjSfJ4,15066 +numpy/typing/tests/data/reveal/getlimits.pyi,sha256=mH0kk94VBu-O5ZzA1nki80jttDK_EBGOsLQOZo3Rq18,1547 +numpy/typing/tests/data/reveal/histograms.pyi,sha256=Mr7P7JYMWF9jM6w5othyzh8CN3ygd2A-WRoB4jImnzk,1257 +numpy/typing/tests/data/reveal/index_tricks.pyi,sha256=4dvG8RXY5ktKXo1uC_pfPHXBDd7tatTbjCs8xr8M2os,3241 +numpy/typing/tests/data/reveal/lib_function_base.pyi,sha256=LMCyduuUjX1E7ruBI-B_cEJQ_rUt9ZO21ck22_OLa_c,10112 +numpy/typing/tests/data/reveal/lib_polynomial.pyi,sha256=CrG0zxbY-HddD7D93q5Cow6c_3mx3nVb1ZCcAq5mC4U,5660 +numpy/typing/tests/data/reveal/lib_utils.pyi,sha256=oQCay2NF8pYHD5jNgRZKNjn8uJW4TJqUPIlytOwDSi0,436 +numpy/typing/tests/data/reveal/lib_version.pyi,sha256=y4ZJSLEeS273Zd6fqaE2XNdczTS0-cwIJ2Yn_4Otm44,572 +numpy/typing/tests/data/reveal/linalg.pyi,sha256=UAa92Iwqtj4_5rLC9S-KNVKKE72f4N0Jde6fWHhKHmM,5905 +numpy/typing/tests/data/reveal/ma.pyi,sha256=5FCR2aqUpKOtoQcazro_5C-NE2MrywouDrMHirVyHF0,16223 +numpy/typing/tests/data/reveal/matrix.pyi,sha256=ntknd4qkGbaBMMzPlkTeahyg_H8_TDBJQDbd36a_QfY,3040 +numpy/typing/tests/data/reveal/memmap.pyi,sha256=OCcEhR5mvvXk4UhF6lRqmkxU2NcAqJ4nqAuBpcroQ1g,719 +numpy/typing/tests/data/reveal/mod.pyi,sha256=-hF5jJQYbicLsWPTn0KnwvRN4yb1YFWyCwM-mLD1rqE,7196 +numpy/typing/tests/data/reveal/modules.pyi,sha256=_Gvxgql5KbJFL1Mj5gFAphzyGC44AkuNZLnYkv-3LRA,1858 +numpy/typing/tests/data/reveal/multiarray.pyi,sha256=oz81sV4JUBbd6memodStUpT11TARzqRXWUs4H0cU-YA,7779 +numpy/typing/tests/data/reveal/nbit_base_example.pyi,sha256=9OqWKUGRGCIt-mywzDmZExTOsM7l3JGw0YAPB9rs_8k,687 +numpy/typing/tests/data/reveal/ndarray_assignability.pyi,sha256=KOl5ActvtUx6h1oTQT3c0EiU5eCDbMD1okQVfxpc4j0,2668 +numpy/typing/tests/data/reveal/ndarray_conversion.pyi,sha256=SAI9kxMNl66L8n7kO3jn7-EL_3Ygn46behqD_dVa5Hw,3309 +numpy/typing/tests/data/reveal/ndarray_misc.pyi,sha256=8jwi9O-iGcojU0xSF_GUYMFRpkRdol5hQza0hkziNXc,8663 +numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi,sha256=z8SRTWdl6fSj_ENNF-M5jZnujUl1180WaFMAanXqCVw,1394 +numpy/typing/tests/data/reveal/nditer.pyi,sha256=yih7UE0OynR7GuVCGgwhzjTjARwOXikDe6Dr4ymRC2g,1898 +numpy/typing/tests/data/reveal/nested_sequence.pyi,sha256=Z2vwweUjoqxR0zUUldOUXsg6mkDfDP1BMyFV2hje5Z8,612 +numpy/typing/tests/data/reveal/npyio.pyi,sha256=p6jJFmcwXuQhshYC70zhg_itI1kLiDu9saUCNwYpFNo,3493 +numpy/typing/tests/data/reveal/numeric.pyi,sha256=0hvPN803QJoO38lYY68of7M-1KGXqdgHy9RdqcHwO-M,5869 +numpy/typing/tests/data/reveal/numerictypes.pyi,sha256=8XB1qex01lga6r0hXaNh6X6MCBjPZM2DYpSVBgBicIg,568 +numpy/typing/tests/data/reveal/polynomial_polybase.pyi,sha256=V7ulOvXuAcduWTD_7Jg1yPCLvROq8E-10GobfNlKXD8,7925 +numpy/typing/tests/data/reveal/polynomial_polyutils.pyi,sha256=I_4waxJEeUsp5pjnbBN55kqZ2kycK8akD_XvhsgsCGY,10642 +numpy/typing/tests/data/reveal/polynomial_series.pyi,sha256=YowKiIaDd2Je0PjEmXDINUXe4il0r4KDkpzDbYpwG38,6853 +numpy/typing/tests/data/reveal/random.pyi,sha256=xXJobSp5nVBelmrBO_OTvV8XQnbnZjbAyJfrRwlJshg,104296 +numpy/typing/tests/data/reveal/rec.pyi,sha256=E8lxkOQ4qSwwX20Y4d438s5g-kTnNARsZc4f-Y8OhZo,3378 +numpy/typing/tests/data/reveal/scalars.pyi,sha256=5s5Xm1HoA6bwwqK4gfEWqoNk45dAQvxAZLZc2zUhe3A,6378 +numpy/typing/tests/data/reveal/shape.pyi,sha256=ZT6e5LW4nU90tA-Av5NLiyoaPW9NIX_XkWJ-LOOzh84,262 +numpy/typing/tests/data/reveal/shape_base.pyi,sha256=xbnt0jps1djVxVMn4Lj8bxGl-mGvbhqSKFVWYcFApLg,2006 +numpy/typing/tests/data/reveal/stride_tricks.pyi,sha256=Cm9P_F7promu0zGZmo957SOFCZ6Np8wSv5ecR_hB668,1315 +numpy/typing/tests/data/reveal/strings.pyi,sha256=WvSd8xHIdxQdah3Q0ZJUva79jfVngB3UD9yb6awDW8w,9547 +numpy/typing/tests/data/reveal/testing.pyi,sha256=vP3uEWEdFHrfv_Q4OaJ0Oo5gUqUxkkIRVjvJMsqiHs8,8443 +numpy/typing/tests/data/reveal/twodim_base.pyi,sha256=TiBbWXI0xRCgk0bE-Bd4ZryWaLeJIQ5I-6KBjIVoMuE,4237 +numpy/typing/tests/data/reveal/type_check.pyi,sha256=W7rJUEf_iwI0D1FIVjhCEfzIjw_T04qcBYFxuPwnXAo,2392 +numpy/typing/tests/data/reveal/ufunc_config.pyi,sha256=XoD9fxaMVCGgyMncWKIJssFBO0SmndHsDs0hDXS04A8,1162 +numpy/typing/tests/data/reveal/ufunclike.pyi,sha256=0jwIYSgXn0usVGkzyZz0ttO5tSYfWMYu_U2ByqrzuRQ,1183 +numpy/typing/tests/data/reveal/ufuncs.pyi,sha256=2IYvfPlLCuqgoyNKzbcv3mr-Dva2cyUSWtBWuM77sDk,4789 +numpy/typing/tests/data/reveal/warnings_and_errors.pyi,sha256=5qqRFzPOon1GhU_i5CHDxQLPKVcO2EMhbc851V8Gusc,449 +numpy/typing/tests/test_isfile.py,sha256=yaRIX3JLmwY1cgD-xxKvJjMVVBRmv9QNSXx9kQSoVAc,878 +numpy/typing/tests/test_runtime.py,sha256=YHS0Hgv1v3cip7C14UcsJWLGI37m18MqXrwLmb88Ctc,2919 +numpy/typing/tests/test_typing.py,sha256=VERPf6NJ6gRLoKk0ki-s1wvDS4E--InjNUaj63_Q-00,6289 +numpy/version.py,sha256=5ay-GmZtpin1gcQKLAnVQinHqpGPOh1WfbrAgpyAdHI,293 +numpy/version.pyi,sha256=x3oCrDqM_gQhitdDgfgMhJ-UPabIXk5etqBq8HUwUok,358 diff --git a/python/numpy-2.3.5.dist-info/WHEEL b/python/numpy-2.3.5.dist-info/WHEEL new file mode 100644 index 000000000..227a38cf0 --- /dev/null +++ b/python/numpy-2.3.5.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: meson +Root-Is-Purelib: false +Tag: cp312-cp312-manylinux_2_27_x86_64 +Tag: cp312-cp312-manylinux_2_28_x86_64 + diff --git a/python/numpy-2.3.5.dist-info/entry_points.txt b/python/numpy-2.3.5.dist-info/entry_points.txt new file mode 100644 index 000000000..48c4f6435 --- /dev/null +++ b/python/numpy-2.3.5.dist-info/entry_points.txt @@ -0,0 +1,13 @@ +[pkg_config] +numpy = numpy._core.lib.pkgconfig + +[array_api] +numpy = numpy + +[pyinstaller40] +hook-dirs = numpy:_pyinstaller_hooks_dir + +[console_scripts] +f2py = numpy.f2py.f2py2e:main +numpy-config = numpy._configtool:main + diff --git a/python/numpy.libs/libgfortran-040039e1-0352e75f.so.5.0.0 b/python/numpy.libs/libgfortran-040039e1-0352e75f.so.5.0.0 new file mode 100644 index 000000000..f00c303d0 Binary files /dev/null and b/python/numpy.libs/libgfortran-040039e1-0352e75f.so.5.0.0 differ diff --git a/python/numpy.libs/libquadmath-96973f99-934c22de.so.0.0.0 b/python/numpy.libs/libquadmath-96973f99-934c22de.so.0.0.0 new file mode 100644 index 000000000..b6063a05d Binary files /dev/null and b/python/numpy.libs/libquadmath-96973f99-934c22de.so.0.0.0 differ diff --git a/python/numpy.libs/libscipy_openblas64_-fdde5778.so b/python/numpy.libs/libscipy_openblas64_-fdde5778.so new file mode 100644 index 000000000..3b0a786df Binary files /dev/null and b/python/numpy.libs/libscipy_openblas64_-fdde5778.so differ diff --git a/python/numpy/__config__.py b/python/numpy/__config__.py new file mode 100644 index 000000000..eda56c55d --- /dev/null +++ b/python/numpy/__config__.py @@ -0,0 +1,170 @@ +# This file is generated by numpy's build process +# It contains system_info results at the time of building this package. +from enum import Enum +from numpy._core._multiarray_umath import ( + __cpu_features__, + __cpu_baseline__, + __cpu_dispatch__, +) + +__all__ = ["show_config"] +_built_with_meson = True + + +class DisplayModes(Enum): + stdout = "stdout" + dicts = "dicts" + + +def _cleanup(d): + """ + Removes empty values in a `dict` recursively + This ensures we remove values that Meson could not provide to CONFIG + """ + if isinstance(d, dict): + return {k: _cleanup(v) for k, v in d.items() if v and _cleanup(v)} + else: + return d + + +CONFIG = _cleanup( + { + "Compilers": { + "c": { + "name": "gcc", + "linker": r"ld.bfd", + "version": "14.2.1", + "commands": r"cc", + "args": r"", + "linker args": r"", + }, + "cython": { + "name": "cython", + "linker": r"cython", + "version": "3.2.1", + "commands": r"cython", + "args": r"", + "linker args": r"", + }, + "c++": { + "name": "gcc", + "linker": r"ld.bfd", + "version": "14.2.1", + "commands": r"c++", + "args": r"", + "linker args": r"", + }, + }, + "Machine Information": { + "host": { + "cpu": "x86_64", + "family": "x86_64", + "endian": "little", + "system": "linux", + }, + "build": { + "cpu": "x86_64", + "family": "x86_64", + "endian": "little", + "system": "linux", + }, + "cross-compiled": bool("False".lower().replace("false", "")), + }, + "Build Dependencies": { + "blas": { + "name": "scipy-openblas", + "found": bool("True".lower().replace("false", "")), + "version": "0.3.30", + "detection method": "pkgconfig", + "include directory": r"/opt/_internal/cpython-3.12.12/lib/python3.12/site-packages/scipy_openblas64/include", + "lib directory": r"/opt/_internal/cpython-3.12.12/lib/python3.12/site-packages/scipy_openblas64/lib", + "openblas configuration": r"OpenBLAS 0.3.30 USE64BITINT DYNAMIC_ARCH NO_AFFINITY Haswell MAX_THREADS=64", + "pc file directory": r"/project/.openblas", + }, + "lapack": { + "name": "scipy-openblas", + "found": bool("True".lower().replace("false", "")), + "version": "0.3.30", + "detection method": "pkgconfig", + "include directory": r"/opt/_internal/cpython-3.12.12/lib/python3.12/site-packages/scipy_openblas64/include", + "lib directory": r"/opt/_internal/cpython-3.12.12/lib/python3.12/site-packages/scipy_openblas64/lib", + "openblas configuration": r"OpenBLAS 0.3.30 USE64BITINT DYNAMIC_ARCH NO_AFFINITY Haswell MAX_THREADS=64", + "pc file directory": r"/project/.openblas", + }, + }, + "Python Information": { + "path": r"/tmp/build-env-aw1mwcap/bin/python", + "version": "3.12", + }, + "SIMD Extensions": { + "baseline": __cpu_baseline__, + "found": [ + feature for feature in __cpu_dispatch__ if __cpu_features__[feature] + ], + "not found": [ + feature for feature in __cpu_dispatch__ if not __cpu_features__[feature] + ], + }, + } +) + + +def _check_pyyaml(): + import yaml + + return yaml + + +def show(mode=DisplayModes.stdout.value): + """ + Show libraries and system information on which NumPy was built + and is being used + + Parameters + ---------- + mode : {`'stdout'`, `'dicts'`}, optional. + Indicates how to display the config information. + `'stdout'` prints to console, `'dicts'` returns a dictionary + of the configuration. + + Returns + ------- + out : {`dict`, `None`} + If mode is `'dicts'`, a dict is returned, else None + + See Also + -------- + get_include : Returns the directory containing NumPy C + header files. + + Notes + ----- + 1. The `'stdout'` mode will give more readable + output if ``pyyaml`` is installed + + """ + if mode == DisplayModes.stdout.value: + try: # Non-standard library, check import + yaml = _check_pyyaml() + + print(yaml.dump(CONFIG)) + except ModuleNotFoundError: + import warnings + import json + + warnings.warn("Install `pyyaml` for better output", stacklevel=1) + print(json.dumps(CONFIG, indent=2)) + elif mode == DisplayModes.dicts.value: + return CONFIG + else: + raise AttributeError( + f"Invalid `mode`, use one of: {', '.join([e.value for e in DisplayModes])}" + ) + + +def show_config(mode=DisplayModes.stdout.value): + return show(mode) + + +show_config.__doc__ = show.__doc__ +show_config.__module__ = "numpy" diff --git a/python/numpy/__config__.pyi b/python/numpy/__config__.pyi new file mode 100644 index 000000000..b59bdcd25 --- /dev/null +++ b/python/numpy/__config__.pyi @@ -0,0 +1,102 @@ +from enum import Enum +from types import ModuleType +from typing import Final, NotRequired, TypedDict, overload, type_check_only +from typing import Literal as L + +_CompilerConfigDictValue = TypedDict( + "_CompilerConfigDictValue", + { + "name": str, + "linker": str, + "version": str, + "commands": str, + "args": str, + "linker args": str, + }, +) +_CompilerConfigDict = TypedDict( + "_CompilerConfigDict", + { + "c": _CompilerConfigDictValue, + "cython": _CompilerConfigDictValue, + "c++": _CompilerConfigDictValue, + }, +) +_MachineInformationDict = TypedDict( + "_MachineInformationDict", + { + "host": _MachineInformationDictValue, + "build": _MachineInformationDictValue, + "cross-compiled": NotRequired[L[True]], + }, +) + +@type_check_only +class _MachineInformationDictValue(TypedDict): + cpu: str + family: str + endian: L["little", "big"] + system: str + +_BuildDependenciesDictValue = TypedDict( + "_BuildDependenciesDictValue", + { + "name": str, + "found": NotRequired[L[True]], + "version": str, + "include directory": str, + "lib directory": str, + "openblas configuration": str, + "pc file directory": str, + }, +) + +class _BuildDependenciesDict(TypedDict): + blas: _BuildDependenciesDictValue + lapack: _BuildDependenciesDictValue + +class _PythonInformationDict(TypedDict): + path: str + version: str + +_SIMDExtensionsDict = TypedDict( + "_SIMDExtensionsDict", + { + "baseline": list[str], + "found": list[str], + "not found": list[str], + }, +) + +_ConfigDict = TypedDict( + "_ConfigDict", + { + "Compilers": _CompilerConfigDict, + "Machine Information": _MachineInformationDict, + "Build Dependencies": _BuildDependenciesDict, + "Python Information": _PythonInformationDict, + "SIMD Extensions": _SIMDExtensionsDict, + }, +) + +### + +__all__ = ["show_config"] + +CONFIG: Final[_ConfigDict] = ... + +class DisplayModes(Enum): + stdout = "stdout" + dicts = "dicts" + +def _check_pyyaml() -> ModuleType: ... + +@overload +def show(mode: L["stdout"] = "stdout") -> None: ... +@overload +def show(mode: L["dicts"]) -> _ConfigDict: ... + +@overload +def show_config(mode: L["stdout"] = "stdout") -> None: ... +@overload +def show_config(mode: L["dicts"]) -> _ConfigDict: ... diff --git a/python/numpy/__init__.cython-30.pxd b/python/numpy/__init__.cython-30.pxd new file mode 100644 index 000000000..86c91cf61 --- /dev/null +++ b/python/numpy/__init__.cython-30.pxd @@ -0,0 +1,1241 @@ +# NumPy static imports for Cython >= 3.0 +# +# If any of the PyArray_* functions are called, import_array must be +# called first. This is done automatically by Cython 3.0+ if a call +# is not detected inside of the module. +# +# Author: Dag Sverre Seljebotn +# + +from cpython.ref cimport Py_INCREF +from cpython.object cimport PyObject, PyTypeObject, PyObject_TypeCheck +cimport libc.stdio as stdio + + +cdef extern from *: + # Leave a marker that the NumPy declarations came from NumPy itself and not from Cython. + # See https://github.com/cython/cython/issues/3573 + """ + /* Using NumPy API declarations from "numpy/__init__.cython-30.pxd" */ + """ + + +cdef extern from "numpy/arrayobject.h": + # It would be nice to use size_t and ssize_t, but ssize_t has special + # implicit conversion rules, so just use "long". + # Note: The actual type only matters for Cython promotion, so long + # is closer than int, but could lead to incorrect promotion. + # (Not to worrying, and always the status-quo.) + ctypedef signed long npy_intp + ctypedef unsigned long npy_uintp + + ctypedef unsigned char npy_bool + + ctypedef signed char npy_byte + ctypedef signed short npy_short + ctypedef signed int npy_int + ctypedef signed long npy_long + ctypedef signed long long npy_longlong + + ctypedef unsigned char npy_ubyte + ctypedef unsigned short npy_ushort + ctypedef unsigned int npy_uint + ctypedef unsigned long npy_ulong + ctypedef unsigned long long npy_ulonglong + + ctypedef float npy_float + ctypedef double npy_double + ctypedef long double npy_longdouble + + ctypedef signed char npy_int8 + ctypedef signed short npy_int16 + ctypedef signed int npy_int32 + ctypedef signed long long npy_int64 + + ctypedef unsigned char npy_uint8 + ctypedef unsigned short npy_uint16 + ctypedef unsigned int npy_uint32 + ctypedef unsigned long long npy_uint64 + + ctypedef float npy_float32 + ctypedef double npy_float64 + ctypedef long double npy_float80 + ctypedef long double npy_float96 + ctypedef long double npy_float128 + + ctypedef struct npy_cfloat: + pass + + ctypedef struct npy_cdouble: + pass + + ctypedef struct npy_clongdouble: + pass + + ctypedef struct npy_complex64: + pass + + ctypedef struct npy_complex128: + pass + + ctypedef struct npy_complex160: + pass + + ctypedef struct npy_complex192: + pass + + ctypedef struct npy_complex256: + pass + + ctypedef struct PyArray_Dims: + npy_intp *ptr + int len + + + cdef enum NPY_TYPES: + NPY_BOOL + NPY_BYTE + NPY_UBYTE + NPY_SHORT + NPY_USHORT + NPY_INT + NPY_UINT + NPY_LONG + NPY_ULONG + NPY_LONGLONG + NPY_ULONGLONG + NPY_FLOAT + NPY_DOUBLE + NPY_LONGDOUBLE + NPY_CFLOAT + NPY_CDOUBLE + NPY_CLONGDOUBLE + NPY_OBJECT + NPY_STRING + NPY_UNICODE + NPY_VSTRING + NPY_VOID + NPY_DATETIME + NPY_TIMEDELTA + NPY_NTYPES_LEGACY + NPY_NOTYPE + + NPY_INT8 + NPY_INT16 + NPY_INT32 + NPY_INT64 + NPY_UINT8 + NPY_UINT16 + NPY_UINT32 + NPY_UINT64 + NPY_FLOAT16 + NPY_FLOAT32 + NPY_FLOAT64 + NPY_FLOAT80 + NPY_FLOAT96 + NPY_FLOAT128 + NPY_COMPLEX64 + NPY_COMPLEX128 + NPY_COMPLEX160 + NPY_COMPLEX192 + NPY_COMPLEX256 + + NPY_INTP + NPY_UINTP + NPY_DEFAULT_INT # Not a compile time constant (normally)! + + ctypedef enum NPY_ORDER: + NPY_ANYORDER + NPY_CORDER + NPY_FORTRANORDER + NPY_KEEPORDER + + ctypedef enum NPY_CASTING: + NPY_NO_CASTING + NPY_EQUIV_CASTING + NPY_SAFE_CASTING + NPY_SAME_KIND_CASTING + NPY_UNSAFE_CASTING + + ctypedef enum NPY_CLIPMODE: + NPY_CLIP + NPY_WRAP + NPY_RAISE + + ctypedef enum NPY_SCALARKIND: + NPY_NOSCALAR, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR + + ctypedef enum NPY_SORTKIND: + NPY_QUICKSORT + NPY_HEAPSORT + NPY_MERGESORT + + ctypedef enum NPY_SEARCHSIDE: + NPY_SEARCHLEFT + NPY_SEARCHRIGHT + + enum: + NPY_ARRAY_C_CONTIGUOUS + NPY_ARRAY_F_CONTIGUOUS + NPY_ARRAY_OWNDATA + NPY_ARRAY_FORCECAST + NPY_ARRAY_ENSURECOPY + NPY_ARRAY_ENSUREARRAY + NPY_ARRAY_ELEMENTSTRIDES + NPY_ARRAY_ALIGNED + NPY_ARRAY_NOTSWAPPED + NPY_ARRAY_WRITEABLE + NPY_ARRAY_WRITEBACKIFCOPY + + NPY_ARRAY_BEHAVED + NPY_ARRAY_BEHAVED_NS + NPY_ARRAY_CARRAY + NPY_ARRAY_CARRAY_RO + NPY_ARRAY_FARRAY + NPY_ARRAY_FARRAY_RO + NPY_ARRAY_DEFAULT + + NPY_ARRAY_IN_ARRAY + NPY_ARRAY_OUT_ARRAY + NPY_ARRAY_INOUT_ARRAY + NPY_ARRAY_IN_FARRAY + NPY_ARRAY_OUT_FARRAY + NPY_ARRAY_INOUT_FARRAY + + NPY_ARRAY_UPDATE_ALL + + cdef enum: + NPY_MAXDIMS # 64 on NumPy 2.x and 32 on NumPy 1.x + NPY_RAVEL_AXIS # Used for functions like PyArray_Mean + + ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *) + + ctypedef struct PyArray_ArrayDescr: + # shape is a tuple, but Cython doesn't support "tuple shape" + # inside a non-PyObject declaration, so we have to declare it + # as just a PyObject*. + PyObject* shape + + ctypedef struct PyArray_Descr: + pass + + ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]: + # Use PyDataType_* macros when possible, however there are no macros + # for accessing some of the fields, so some are defined. + cdef PyTypeObject* typeobj + cdef char kind + cdef char type + # Numpy sometimes mutates this without warning (e.g. it'll + # sometimes change "|" to "<" in shared dtype objects on + # little-endian machines). If this matters to you, use + # PyArray_IsNativeByteOrder(dtype.byteorder) instead of + # directly accessing this field. + cdef char byteorder + cdef int type_num + + @property + cdef inline npy_intp itemsize(self) noexcept nogil: + return PyDataType_ELSIZE(self) + + @property + cdef inline npy_intp alignment(self) noexcept nogil: + return PyDataType_ALIGNMENT(self) + + # Use fields/names with care as they may be NULL. You must check + # for this using PyDataType_HASFIELDS. + @property + cdef inline object fields(self): + return PyDataType_FIELDS(self) + + @property + cdef inline tuple names(self): + return PyDataType_NAMES(self) + + # Use PyDataType_HASSUBARRAY to test whether this field is + # valid (the pointer can be NULL). Most users should access + # this field via the inline helper method PyDataType_SHAPE. + @property + cdef inline PyArray_ArrayDescr* subarray(self) noexcept nogil: + return PyDataType_SUBARRAY(self) + + @property + cdef inline npy_uint64 flags(self) noexcept nogil: + """The data types flags.""" + return PyDataType_FLAGS(self) + + + ctypedef class numpy.flatiter [object PyArrayIterObject, check_size ignore]: + # Use through macros + pass + + ctypedef class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]: + + @property + cdef inline int numiter(self) noexcept nogil: + """The number of arrays that need to be broadcast to the same shape.""" + return PyArray_MultiIter_NUMITER(self) + + @property + cdef inline npy_intp size(self) noexcept nogil: + """The total broadcasted size.""" + return PyArray_MultiIter_SIZE(self) + + @property + cdef inline npy_intp index(self) noexcept nogil: + """The current (1-d) index into the broadcasted result.""" + return PyArray_MultiIter_INDEX(self) + + @property + cdef inline int nd(self) noexcept nogil: + """The number of dimensions in the broadcasted result.""" + return PyArray_MultiIter_NDIM(self) + + @property + cdef inline npy_intp* dimensions(self) noexcept nogil: + """The shape of the broadcasted result.""" + return PyArray_MultiIter_DIMS(self) + + @property + cdef inline void** iters(self) noexcept nogil: + """An array of iterator objects that holds the iterators for the arrays to be broadcast together. + On return, the iterators are adjusted for broadcasting.""" + return PyArray_MultiIter_ITERS(self) + + + ctypedef struct PyArrayObject: + # For use in situations where ndarray can't replace PyArrayObject*, + # like PyArrayObject**. + pass + + ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]: + cdef __cythonbufferdefaults__ = {"mode": "strided"} + + # NOTE: no field declarations since direct access is deprecated since NumPy 1.7 + # Instead, we use properties that map to the corresponding C-API functions. + + @property + cdef inline PyObject* base(self) noexcept nogil: + """Returns a borrowed reference to the object owning the data/memory. + """ + return PyArray_BASE(self) + + @property + cdef inline dtype descr(self): + """Returns an owned reference to the dtype of the array. + """ + return PyArray_DESCR(self) + + @property + cdef inline int ndim(self) noexcept nogil: + """Returns the number of dimensions in the array. + """ + return PyArray_NDIM(self) + + @property + cdef inline npy_intp *shape(self) noexcept nogil: + """Returns a pointer to the dimensions/shape of the array. + The number of elements matches the number of dimensions of the array (ndim). + Can return NULL for 0-dimensional arrays. + """ + return PyArray_DIMS(self) + + @property + cdef inline npy_intp *strides(self) noexcept nogil: + """Returns a pointer to the strides of the array. + The number of elements matches the number of dimensions of the array (ndim). + """ + return PyArray_STRIDES(self) + + @property + cdef inline npy_intp size(self) noexcept nogil: + """Returns the total size (in number of elements) of the array. + """ + return PyArray_SIZE(self) + + @property + cdef inline char* data(self) noexcept nogil: + """The pointer to the data buffer as a char*. + This is provided for legacy reasons to avoid direct struct field access. + For new code that needs this access, you probably want to cast the result + of `PyArray_DATA()` instead, which returns a 'void*'. + """ + return PyArray_BYTES(self) + + + int _import_array() except -1 + # A second definition so _import_array isn't marked as used when we use it here. + # Do not use - subject to change any time. + int __pyx_import_array "_import_array"() except -1 + + # + # Macros from ndarrayobject.h + # + bint PyArray_CHKFLAGS(ndarray m, int flags) nogil + bint PyArray_IS_C_CONTIGUOUS(ndarray arr) nogil + bint PyArray_IS_F_CONTIGUOUS(ndarray arr) nogil + bint PyArray_ISCONTIGUOUS(ndarray m) nogil + bint PyArray_ISWRITEABLE(ndarray m) nogil + bint PyArray_ISALIGNED(ndarray m) nogil + + int PyArray_NDIM(ndarray) nogil + bint PyArray_ISONESEGMENT(ndarray) nogil + bint PyArray_ISFORTRAN(ndarray) nogil + int PyArray_FORTRANIF(ndarray) nogil + + void* PyArray_DATA(ndarray) nogil + char* PyArray_BYTES(ndarray) nogil + + npy_intp* PyArray_DIMS(ndarray) nogil + npy_intp* PyArray_STRIDES(ndarray) nogil + npy_intp PyArray_DIM(ndarray, size_t) nogil + npy_intp PyArray_STRIDE(ndarray, size_t) nogil + + PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference! + PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype! + PyArray_Descr *PyArray_DTYPE(ndarray) nogil # returns borrowed reference to dtype! NP 1.7+ alias for descr. + int PyArray_FLAGS(ndarray) nogil + void PyArray_CLEARFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + void PyArray_ENABLEFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + npy_intp PyArray_ITEMSIZE(ndarray) nogil + int PyArray_TYPE(ndarray arr) nogil + + object PyArray_GETITEM(ndarray arr, void *itemptr) + int PyArray_SETITEM(ndarray arr, void *itemptr, object obj) except -1 + + bint PyTypeNum_ISBOOL(int) nogil + bint PyTypeNum_ISUNSIGNED(int) nogil + bint PyTypeNum_ISSIGNED(int) nogil + bint PyTypeNum_ISINTEGER(int) nogil + bint PyTypeNum_ISFLOAT(int) nogil + bint PyTypeNum_ISNUMBER(int) nogil + bint PyTypeNum_ISSTRING(int) nogil + bint PyTypeNum_ISCOMPLEX(int) nogil + bint PyTypeNum_ISFLEXIBLE(int) nogil + bint PyTypeNum_ISUSERDEF(int) nogil + bint PyTypeNum_ISEXTENDED(int) nogil + bint PyTypeNum_ISOBJECT(int) nogil + + npy_intp PyDataType_ELSIZE(dtype) nogil + npy_intp PyDataType_ALIGNMENT(dtype) nogil + PyObject* PyDataType_METADATA(dtype) nogil + PyArray_ArrayDescr* PyDataType_SUBARRAY(dtype) nogil + PyObject* PyDataType_NAMES(dtype) nogil + PyObject* PyDataType_FIELDS(dtype) nogil + + bint PyDataType_ISBOOL(dtype) nogil + bint PyDataType_ISUNSIGNED(dtype) nogil + bint PyDataType_ISSIGNED(dtype) nogil + bint PyDataType_ISINTEGER(dtype) nogil + bint PyDataType_ISFLOAT(dtype) nogil + bint PyDataType_ISNUMBER(dtype) nogil + bint PyDataType_ISSTRING(dtype) nogil + bint PyDataType_ISCOMPLEX(dtype) nogil + bint PyDataType_ISFLEXIBLE(dtype) nogil + bint PyDataType_ISUSERDEF(dtype) nogil + bint PyDataType_ISEXTENDED(dtype) nogil + bint PyDataType_ISOBJECT(dtype) nogil + bint PyDataType_HASFIELDS(dtype) nogil + bint PyDataType_HASSUBARRAY(dtype) nogil + npy_uint64 PyDataType_FLAGS(dtype) nogil + + bint PyArray_ISBOOL(ndarray) nogil + bint PyArray_ISUNSIGNED(ndarray) nogil + bint PyArray_ISSIGNED(ndarray) nogil + bint PyArray_ISINTEGER(ndarray) nogil + bint PyArray_ISFLOAT(ndarray) nogil + bint PyArray_ISNUMBER(ndarray) nogil + bint PyArray_ISSTRING(ndarray) nogil + bint PyArray_ISCOMPLEX(ndarray) nogil + bint PyArray_ISFLEXIBLE(ndarray) nogil + bint PyArray_ISUSERDEF(ndarray) nogil + bint PyArray_ISEXTENDED(ndarray) nogil + bint PyArray_ISOBJECT(ndarray) nogil + bint PyArray_HASFIELDS(ndarray) nogil + + bint PyArray_ISVARIABLE(ndarray) nogil + + bint PyArray_SAFEALIGNEDCOPY(ndarray) nogil + bint PyArray_ISNBO(char) nogil # works on ndarray.byteorder + bint PyArray_IsNativeByteOrder(char) nogil # works on ndarray.byteorder + bint PyArray_ISNOTSWAPPED(ndarray) nogil + bint PyArray_ISBYTESWAPPED(ndarray) nogil + + bint PyArray_FLAGSWAP(ndarray, int) nogil + + bint PyArray_ISCARRAY(ndarray) nogil + bint PyArray_ISCARRAY_RO(ndarray) nogil + bint PyArray_ISFARRAY(ndarray) nogil + bint PyArray_ISFARRAY_RO(ndarray) nogil + bint PyArray_ISBEHAVED(ndarray) nogil + bint PyArray_ISBEHAVED_RO(ndarray) nogil + + + bint PyDataType_ISNOTSWAPPED(dtype) nogil + bint PyDataType_ISBYTESWAPPED(dtype) nogil + + bint PyArray_DescrCheck(object) + + bint PyArray_Check(object) + bint PyArray_CheckExact(object) + + # Cannot be supported due to out arg: + # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&) + # bint PyArray_HasArrayInterface(op, out) + + + bint PyArray_IsZeroDim(object) + # Cannot be supported due to ## ## in macro: + # bint PyArray_IsScalar(object, verbatim work) + bint PyArray_CheckScalar(object) + bint PyArray_IsPythonNumber(object) + bint PyArray_IsPythonScalar(object) + bint PyArray_IsAnyScalar(object) + bint PyArray_CheckAnyScalar(object) + + ndarray PyArray_GETCONTIGUOUS(ndarray) + bint PyArray_SAMESHAPE(ndarray, ndarray) nogil + npy_intp PyArray_SIZE(ndarray) nogil + npy_intp PyArray_NBYTES(ndarray) nogil + + object PyArray_FROM_O(object) + object PyArray_FROM_OF(object m, int flags) + object PyArray_FROM_OT(object m, int type) + object PyArray_FROM_OTF(object m, int type, int flags) + object PyArray_FROMANY(object m, int type, int min, int max, int flags) + object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran) + object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran) + void PyArray_FILLWBYTE(ndarray, int val) + object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth) + unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2) + bint PyArray_EquivByteorders(int b1, int b2) nogil + object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum) + object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data) + #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr) + object PyArray_ToScalar(void* data, ndarray arr) + + void* PyArray_GETPTR1(ndarray m, npy_intp i) nogil + void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) nogil + void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil + void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil + + # Cannot be supported due to out arg + # void PyArray_DESCR_REPLACE(descr) + + + object PyArray_Copy(ndarray) + object PyArray_FromObject(object op, int type, int min_depth, int max_depth) + object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth) + object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth) + + object PyArray_Cast(ndarray mp, int type_num) + object PyArray_Take(ndarray ap, object items, int axis) + object PyArray_Put(ndarray ap, object items, object values) + + void PyArray_ITER_RESET(flatiter it) nogil + void PyArray_ITER_NEXT(flatiter it) nogil + void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil + void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil + void* PyArray_ITER_DATA(flatiter it) nogil + bint PyArray_ITER_NOTDONE(flatiter it) nogil + + void PyArray_MultiIter_RESET(broadcast multi) nogil + void PyArray_MultiIter_NEXT(broadcast multi) nogil + void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil + void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil + void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil + void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil + bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil + npy_intp PyArray_MultiIter_SIZE(broadcast multi) nogil + int PyArray_MultiIter_NDIM(broadcast multi) nogil + npy_intp PyArray_MultiIter_INDEX(broadcast multi) nogil + int PyArray_MultiIter_NUMITER(broadcast multi) nogil + npy_intp* PyArray_MultiIter_DIMS(broadcast multi) nogil + void** PyArray_MultiIter_ITERS(broadcast multi) nogil + + # Functions from __multiarray_api.h + + # Functions taking dtype and returning object/ndarray are disabled + # for now as they steal dtype references. I'm conservative and disable + # more than is probably needed until it can be checked further. + int PyArray_INCREF (ndarray) except * # uses PyArray_Item_INCREF... + int PyArray_XDECREF (ndarray) except * # uses PyArray_Item_DECREF... + dtype PyArray_DescrFromType (int) + object PyArray_TypeObjectFromType (int) + char * PyArray_Zero (ndarray) + char * PyArray_One (ndarray) + #object PyArray_CastToType (ndarray, dtype, int) + int PyArray_CanCastSafely (int, int) # writes errors + npy_bool PyArray_CanCastTo (dtype, dtype) # writes errors + int PyArray_ObjectType (object, int) except 0 + dtype PyArray_DescrFromObject (object, dtype) + #ndarray* PyArray_ConvertToCommonType (object, int *) + dtype PyArray_DescrFromScalar (object) + dtype PyArray_DescrFromTypeObject (object) + npy_intp PyArray_Size (object) + #object PyArray_Scalar (void *, dtype, object) + #object PyArray_FromScalar (object, dtype) + void PyArray_ScalarAsCtype (object, void *) + #int PyArray_CastScalarToCtype (object, void *, dtype) + #int PyArray_CastScalarDirect (object, dtype, void *, int) + #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int) + #object PyArray_FromAny (object, dtype, int, int, int, object) + object PyArray_EnsureArray (object) + object PyArray_EnsureAnyArray (object) + #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *) + #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *) + #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp) + #object PyArray_FromIter (object, dtype, npy_intp) + object PyArray_Return (ndarray) + #object PyArray_GetField (ndarray, dtype, int) + #int PyArray_SetField (ndarray, dtype, int, object) except -1 + object PyArray_Byteswap (ndarray, npy_bool) + object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER) + int PyArray_CopyInto (ndarray, ndarray) except -1 + int PyArray_CopyAnyInto (ndarray, ndarray) except -1 + int PyArray_CopyObject (ndarray, object) except -1 + object PyArray_NewCopy (ndarray, NPY_ORDER) + object PyArray_ToList (ndarray) + object PyArray_ToString (ndarray, NPY_ORDER) + int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *) except -1 + int PyArray_Dump (object, object, int) except -1 + object PyArray_Dumps (object, int) + int PyArray_ValidType (int) # Cannot error + void PyArray_UpdateFlags (ndarray, int) + object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object) + #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object) + #dtype PyArray_DescrNew (dtype) + dtype PyArray_DescrNewFromType (int) + double PyArray_GetPriority (object, double) # clears errors as of 1.25 + object PyArray_IterNew (object) + object PyArray_MultiIterNew (int, ...) + + int PyArray_PyIntAsInt (object) except? -1 + npy_intp PyArray_PyIntAsIntp (object) + int PyArray_Broadcast (broadcast) except -1 + int PyArray_FillWithScalar (ndarray, object) except -1 + npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *) + dtype PyArray_DescrNewByteorder (dtype, char) + object PyArray_IterAllButAxis (object, int *) + #object PyArray_CheckFromAny (object, dtype, int, int, int, object) + #object PyArray_FromArray (ndarray, dtype, int) + object PyArray_FromInterface (object) + object PyArray_FromStructInterface (object) + #object PyArray_FromArrayAttr (object, dtype, object) + #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*) + int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND) + npy_bool PyArray_CanCastScalar (type, type) + int PyArray_RemoveSmallest (broadcast) except -1 + int PyArray_ElementStrides (object) + void PyArray_Item_INCREF (char *, dtype) except * + void PyArray_Item_XDECREF (char *, dtype) except * + object PyArray_Transpose (ndarray, PyArray_Dims *) + object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE) + object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE) + object PyArray_PutMask (ndarray, object, object) + object PyArray_Repeat (ndarray, object, int) + object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE) + int PyArray_Sort (ndarray, int, NPY_SORTKIND) except -1 + object PyArray_ArgSort (ndarray, int, NPY_SORTKIND) + object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, PyObject *) + object PyArray_ArgMax (ndarray, int, ndarray) + object PyArray_ArgMin (ndarray, int, ndarray) + object PyArray_Reshape (ndarray, object) + object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER) + object PyArray_Squeeze (ndarray) + #object PyArray_View (ndarray, dtype, type) + object PyArray_SwapAxes (ndarray, int, int) + object PyArray_Max (ndarray, int, ndarray) + object PyArray_Min (ndarray, int, ndarray) + object PyArray_Ptp (ndarray, int, ndarray) + object PyArray_Mean (ndarray, int, int, ndarray) + object PyArray_Trace (ndarray, int, int, int, int, ndarray) + object PyArray_Diagonal (ndarray, int, int, int) + object PyArray_Clip (ndarray, object, object, ndarray) + object PyArray_Conjugate (ndarray, ndarray) + object PyArray_Nonzero (ndarray) + object PyArray_Std (ndarray, int, int, ndarray, int) + object PyArray_Sum (ndarray, int, int, ndarray) + object PyArray_CumSum (ndarray, int, int, ndarray) + object PyArray_Prod (ndarray, int, int, ndarray) + object PyArray_CumProd (ndarray, int, int, ndarray) + object PyArray_All (ndarray, int, ndarray) + object PyArray_Any (ndarray, int, ndarray) + object PyArray_Compress (ndarray, object, int, ndarray) + object PyArray_Flatten (ndarray, NPY_ORDER) + object PyArray_Ravel (ndarray, NPY_ORDER) + npy_intp PyArray_MultiplyList (npy_intp *, int) + int PyArray_MultiplyIntList (int *, int) + void * PyArray_GetPtr (ndarray, npy_intp*) + int PyArray_CompareLists (npy_intp *, npy_intp *, int) + #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype) + int PyArray_Free (object, void *) + #int PyArray_Converter (object, object*) + int PyArray_IntpFromSequence (object, npy_intp *, int) except -1 + object PyArray_Concatenate (object, int) + object PyArray_InnerProduct (object, object) + object PyArray_MatrixProduct (object, object) + object PyArray_Correlate (object, object, int) + #int PyArray_DescrConverter (object, dtype*) except 0 + #int PyArray_DescrConverter2 (object, dtype*) except 0 + int PyArray_IntpConverter (object, PyArray_Dims *) except 0 + #int PyArray_BufferConverter (object, chunk) except 0 + int PyArray_AxisConverter (object, int *) except 0 + int PyArray_BoolConverter (object, npy_bool *) except 0 + int PyArray_ByteorderConverter (object, char *) except 0 + int PyArray_OrderConverter (object, NPY_ORDER *) except 0 + unsigned char PyArray_EquivTypes (dtype, dtype) # clears errors + #object PyArray_Zeros (int, npy_intp *, dtype, int) + #object PyArray_Empty (int, npy_intp *, dtype, int) + object PyArray_Where (object, object, object) + object PyArray_Arange (double, double, double, int) + #object PyArray_ArangeObj (object, object, object, dtype) + int PyArray_SortkindConverter (object, NPY_SORTKIND *) except 0 + object PyArray_LexSort (object, int) + object PyArray_Round (ndarray, int, ndarray) + unsigned char PyArray_EquivTypenums (int, int) + int PyArray_RegisterDataType (dtype) except -1 + int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *) except -1 + int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND) except -1 + #void PyArray_InitArrFuncs (PyArray_ArrFuncs *) + object PyArray_IntTupleFromIntp (int, npy_intp *) + int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *) except 0 + #int PyArray_OutputConverter (object, ndarray*) except 0 + object PyArray_BroadcastToShape (object, npy_intp *, int) + #int PyArray_DescrAlignConverter (object, dtype*) except 0 + #int PyArray_DescrAlignConverter2 (object, dtype*) except 0 + int PyArray_SearchsideConverter (object, void *) except 0 + object PyArray_CheckAxis (ndarray, int *, int) + npy_intp PyArray_OverflowMultiplyList (npy_intp *, int) + int PyArray_SetBaseObject(ndarray, base) except -1 # NOTE: steals a reference to base! Use "set_array_base()" instead. + + # The memory handler functions require the NumPy 1.22 API + # and may require defining NPY_TARGET_VERSION + ctypedef struct PyDataMemAllocator: + void *ctx + void* (*malloc) (void *ctx, size_t size) + void* (*calloc) (void *ctx, size_t nelem, size_t elsize) + void* (*realloc) (void *ctx, void *ptr, size_t new_size) + void (*free) (void *ctx, void *ptr, size_t size) + + ctypedef struct PyDataMem_Handler: + char* name + npy_uint8 version + PyDataMemAllocator allocator + + object PyDataMem_SetHandler(object handler) + object PyDataMem_GetHandler() + + # additional datetime related functions are defined below + + +# Typedefs that matches the runtime dtype objects in +# the numpy module. + +# The ones that are commented out needs an IFDEF function +# in Cython to enable them only on the right systems. + +ctypedef npy_int8 int8_t +ctypedef npy_int16 int16_t +ctypedef npy_int32 int32_t +ctypedef npy_int64 int64_t + +ctypedef npy_uint8 uint8_t +ctypedef npy_uint16 uint16_t +ctypedef npy_uint32 uint32_t +ctypedef npy_uint64 uint64_t + +ctypedef npy_float32 float32_t +ctypedef npy_float64 float64_t +#ctypedef npy_float80 float80_t +#ctypedef npy_float128 float128_t + +ctypedef float complex complex64_t +ctypedef double complex complex128_t + +ctypedef npy_longlong longlong_t +ctypedef npy_ulonglong ulonglong_t + +ctypedef npy_intp intp_t +ctypedef npy_uintp uintp_t + +ctypedef npy_double float_t +ctypedef npy_double double_t +ctypedef npy_longdouble longdouble_t + +ctypedef float complex cfloat_t +ctypedef double complex cdouble_t +ctypedef double complex complex_t +ctypedef long double complex clongdouble_t + +cdef inline object PyArray_MultiIterNew1(a): + return PyArray_MultiIterNew(1, a) + +cdef inline object PyArray_MultiIterNew2(a, b): + return PyArray_MultiIterNew(2, a, b) + +cdef inline object PyArray_MultiIterNew3(a, b, c): + return PyArray_MultiIterNew(3, a, b, c) + +cdef inline object PyArray_MultiIterNew4(a, b, c, d): + return PyArray_MultiIterNew(4, a, b, c, d) + +cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + return PyArray_MultiIterNew(5, a, b, c, d, e) + +cdef inline tuple PyDataType_SHAPE(dtype d): + if PyDataType_HASSUBARRAY(d): + return d.subarray.shape + else: + return () + + +cdef extern from "numpy/ndarrayobject.h": + PyTypeObject PyTimedeltaArrType_Type + PyTypeObject PyDatetimeArrType_Type + ctypedef int64_t npy_timedelta + ctypedef int64_t npy_datetime + +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct PyArray_DatetimeMetaData: + NPY_DATETIMEUNIT base + int64_t num + + ctypedef struct npy_datetimestruct: + int64_t year + int32_t month, day, hour, min, sec, us, ps, as + + # Iterator API added in v1.6 + # + # These don't match the definition in the C API because Cython can't wrap + # function pointers that return functions. + # https://github.com/cython/cython/issues/6720 + ctypedef int (*NpyIter_IterNextFunc "NpyIter_IterNextFunc *")(NpyIter* it) noexcept nogil + ctypedef void (*NpyIter_GetMultiIndexFunc "NpyIter_GetMultiIndexFunc *")(NpyIter* it, npy_intp* outcoords) noexcept nogil + + +cdef extern from "numpy/arrayscalars.h": + + # abstract types + ctypedef class numpy.generic [object PyObject]: + pass + ctypedef class numpy.number [object PyObject]: + pass + ctypedef class numpy.integer [object PyObject]: + pass + ctypedef class numpy.signedinteger [object PyObject]: + pass + ctypedef class numpy.unsignedinteger [object PyObject]: + pass + ctypedef class numpy.inexact [object PyObject]: + pass + ctypedef class numpy.floating [object PyObject]: + pass + ctypedef class numpy.complexfloating [object PyObject]: + pass + ctypedef class numpy.flexible [object PyObject]: + pass + ctypedef class numpy.character [object PyObject]: + pass + + ctypedef struct PyDatetimeScalarObject: + # PyObject_HEAD + npy_datetime obval + PyArray_DatetimeMetaData obmeta + + ctypedef struct PyTimedeltaScalarObject: + # PyObject_HEAD + npy_timedelta obval + PyArray_DatetimeMetaData obmeta + + ctypedef enum NPY_DATETIMEUNIT: + NPY_FR_Y + NPY_FR_M + NPY_FR_W + NPY_FR_D + NPY_FR_B + NPY_FR_h + NPY_FR_m + NPY_FR_s + NPY_FR_ms + NPY_FR_us + NPY_FR_ns + NPY_FR_ps + NPY_FR_fs + NPY_FR_as + NPY_FR_GENERIC + + +cdef extern from "numpy/arrayobject.h": + # These are part of the C-API defined in `__multiarray_api.h` + + # NumPy internal definitions in datetime_strings.c: + int get_datetime_iso_8601_strlen "NpyDatetime_GetDatetimeISO8601StrLen" ( + int local, NPY_DATETIMEUNIT base) + int make_iso_8601_datetime "NpyDatetime_MakeISO8601Datetime" ( + npy_datetimestruct *dts, char *outstr, npy_intp outlen, + int local, int utc, NPY_DATETIMEUNIT base, int tzoffset, + NPY_CASTING casting) except -1 + + # NumPy internal definition in datetime.c: + # May return 1 to indicate that object does not appear to be a datetime + # (returns 0 on success). + int convert_pydatetime_to_datetimestruct "NpyDatetime_ConvertPyDateTimeToDatetimeStruct" ( + PyObject *obj, npy_datetimestruct *out, + NPY_DATETIMEUNIT *out_bestunit, int apply_tzinfo) except -1 + int convert_datetime64_to_datetimestruct "NpyDatetime_ConvertDatetime64ToDatetimeStruct" ( + PyArray_DatetimeMetaData *meta, npy_datetime dt, + npy_datetimestruct *out) except -1 + int convert_datetimestruct_to_datetime64 "NpyDatetime_ConvertDatetimeStructToDatetime64"( + PyArray_DatetimeMetaData *meta, const npy_datetimestruct *dts, + npy_datetime *out) except -1 + + +# +# ufunc API +# + +cdef extern from "numpy/ufuncobject.h": + + ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *) + + ctypedef class numpy.ufunc [object PyUFuncObject, check_size ignore]: + cdef: + int nin, nout, nargs + int identity + PyUFuncGenericFunction *functions + void **data + int ntypes + int check_return + char *name + char *types + char *doc + void *ptr + PyObject *obj + PyObject *userloops + + cdef enum: + PyUFunc_Zero + PyUFunc_One + PyUFunc_None + # deprecated + UFUNC_FPE_DIVIDEBYZERO + UFUNC_FPE_OVERFLOW + UFUNC_FPE_UNDERFLOW + UFUNC_FPE_INVALID + # use these instead + NPY_FPE_DIVIDEBYZERO + NPY_FPE_OVERFLOW + NPY_FPE_UNDERFLOW + NPY_FPE_INVALID + + + object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *, + void **, char *, int, int, int, int, char *, char *, int) + int PyUFunc_RegisterLoopForType(ufunc, int, + PyUFuncGenericFunction, int *, void *) except -1 + void PyUFunc_f_f_As_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_f_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_g_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F_As_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_G_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f_As_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_gg_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F_As_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_GG_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_On_Om \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_clearfperr() + int PyUFunc_getfperr() + int PyUFunc_ReplaceLoopBySignature \ + (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *) + object PyUFunc_FromFuncAndDataAndSignature \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, + int, char *, char *, int, char *) + + int _import_umath() except -1 + +cdef inline void set_array_base(ndarray arr, object base) except *: + Py_INCREF(base) # important to do this before stealing the reference below! + PyArray_SetBaseObject(arr, base) + +cdef inline object get_array_base(ndarray arr): + base = PyArray_BASE(arr) + if base is NULL: + return None + return base + +# Versions of the import_* functions which are more suitable for +# Cython code. +cdef inline int import_array() except -1: + try: + __pyx_import_array() + except Exception: + raise ImportError("numpy._core.multiarray failed to import") + +cdef inline int import_umath() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy._core.umath failed to import") + +cdef inline int import_ufunc() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy._core.umath failed to import") + + +cdef inline bint is_timedelta64_object(object obj) noexcept: + """ + Cython equivalent of `isinstance(obj, np.timedelta64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) + + +cdef inline bint is_datetime64_object(object obj) noexcept: + """ + Cython equivalent of `isinstance(obj, np.datetime64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) + + +cdef inline npy_datetime get_datetime64_value(object obj) noexcept nogil: + """ + returns the int64 value underlying scalar numpy datetime64 object + + Note that to interpret this as a datetime, the corresponding unit is + also needed. That can be found using `get_datetime64_unit`. + """ + return (obj).obval + + +cdef inline npy_timedelta get_timedelta64_value(object obj) noexcept nogil: + """ + returns the int64 value underlying scalar numpy timedelta64 object + """ + return (obj).obval + + +cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) noexcept nogil: + """ + returns the unit part of the dtype for a numpy datetime64 object. + """ + return (obj).obmeta.base + + +cdef extern from "numpy/arrayobject.h": + + ctypedef struct NpyIter: + pass + + cdef enum: + NPY_FAIL + NPY_SUCCEED + + cdef enum: + # Track an index representing C order + NPY_ITER_C_INDEX + # Track an index representing Fortran order + NPY_ITER_F_INDEX + # Track a multi-index + NPY_ITER_MULTI_INDEX + # User code external to the iterator does the 1-dimensional innermost loop + NPY_ITER_EXTERNAL_LOOP + # Convert all the operands to a common data type + NPY_ITER_COMMON_DTYPE + # Operands may hold references, requiring API access during iteration + NPY_ITER_REFS_OK + # Zero-sized operands should be permitted, iteration checks IterSize for 0 + NPY_ITER_ZEROSIZE_OK + # Permits reductions (size-0 stride with dimension size > 1) + NPY_ITER_REDUCE_OK + # Enables sub-range iteration + NPY_ITER_RANGED + # Enables buffering + NPY_ITER_BUFFERED + # When buffering is enabled, grows the inner loop if possible + NPY_ITER_GROWINNER + # Delay allocation of buffers until first Reset* call + NPY_ITER_DELAY_BUFALLOC + # When NPY_KEEPORDER is specified, disable reversing negative-stride axes + NPY_ITER_DONT_NEGATE_STRIDES + NPY_ITER_COPY_IF_OVERLAP + # The operand will be read from and written to + NPY_ITER_READWRITE + # The operand will only be read from + NPY_ITER_READONLY + # The operand will only be written to + NPY_ITER_WRITEONLY + # The operand's data must be in native byte order + NPY_ITER_NBO + # The operand's data must be aligned + NPY_ITER_ALIGNED + # The operand's data must be contiguous (within the inner loop) + NPY_ITER_CONTIG + # The operand may be copied to satisfy requirements + NPY_ITER_COPY + # The operand may be copied with WRITEBACKIFCOPY to satisfy requirements + NPY_ITER_UPDATEIFCOPY + # Allocate the operand if it is NULL + NPY_ITER_ALLOCATE + # If an operand is allocated, don't use any subtype + NPY_ITER_NO_SUBTYPE + # This is a virtual array slot, operand is NULL but temporary data is there + NPY_ITER_VIRTUAL + # Require that the dimension match the iterator dimensions exactly + NPY_ITER_NO_BROADCAST + # A mask is being used on this array, affects buffer -> array copy + NPY_ITER_WRITEMASKED + # This array is the mask for all WRITEMASKED operands + NPY_ITER_ARRAYMASK + # Assume iterator order data access for COPY_IF_OVERLAP + NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE + + # construction and destruction functions + NpyIter* NpyIter_New(ndarray arr, npy_uint32 flags, NPY_ORDER order, + NPY_CASTING casting, dtype datatype) except NULL + NpyIter* NpyIter_MultiNew(npy_intp nop, PyArrayObject** op, npy_uint32 flags, + NPY_ORDER order, NPY_CASTING casting, npy_uint32* + op_flags, PyArray_Descr** op_dtypes) except NULL + NpyIter* NpyIter_AdvancedNew(npy_intp nop, PyArrayObject** op, + npy_uint32 flags, NPY_ORDER order, + NPY_CASTING casting, npy_uint32* op_flags, + PyArray_Descr** op_dtypes, int oa_ndim, + int** op_axes, const npy_intp* itershape, + npy_intp buffersize) except NULL + NpyIter* NpyIter_Copy(NpyIter* it) except NULL + int NpyIter_RemoveAxis(NpyIter* it, int axis) except NPY_FAIL + int NpyIter_RemoveMultiIndex(NpyIter* it) except NPY_FAIL + int NpyIter_EnableExternalLoop(NpyIter* it) except NPY_FAIL + int NpyIter_Deallocate(NpyIter* it) except NPY_FAIL + int NpyIter_Reset(NpyIter* it, char** errmsg) except NPY_FAIL + int NpyIter_ResetToIterIndexRange(NpyIter* it, npy_intp istart, + npy_intp iend, char** errmsg) except NPY_FAIL + int NpyIter_ResetBasePointers(NpyIter* it, char** baseptrs, char** errmsg) except NPY_FAIL + int NpyIter_GotoMultiIndex(NpyIter* it, const npy_intp* multi_index) except NPY_FAIL + int NpyIter_GotoIndex(NpyIter* it, npy_intp index) except NPY_FAIL + npy_intp NpyIter_GetIterSize(NpyIter* it) nogil + npy_intp NpyIter_GetIterIndex(NpyIter* it) nogil + void NpyIter_GetIterIndexRange(NpyIter* it, npy_intp* istart, + npy_intp* iend) nogil + int NpyIter_GotoIterIndex(NpyIter* it, npy_intp iterindex) except NPY_FAIL + npy_bool NpyIter_HasDelayedBufAlloc(NpyIter* it) nogil + npy_bool NpyIter_HasExternalLoop(NpyIter* it) nogil + npy_bool NpyIter_HasMultiIndex(NpyIter* it) nogil + npy_bool NpyIter_HasIndex(NpyIter* it) nogil + npy_bool NpyIter_RequiresBuffering(NpyIter* it) nogil + npy_bool NpyIter_IsBuffered(NpyIter* it) nogil + npy_bool NpyIter_IsGrowInner(NpyIter* it) nogil + npy_intp NpyIter_GetBufferSize(NpyIter* it) nogil + int NpyIter_GetNDim(NpyIter* it) nogil + int NpyIter_GetNOp(NpyIter* it) nogil + npy_intp* NpyIter_GetAxisStrideArray(NpyIter* it, int axis) except NULL + int NpyIter_GetShape(NpyIter* it, npy_intp* outshape) nogil + PyArray_Descr** NpyIter_GetDescrArray(NpyIter* it) + PyArrayObject** NpyIter_GetOperandArray(NpyIter* it) + ndarray NpyIter_GetIterView(NpyIter* it, npy_intp i) + void NpyIter_GetReadFlags(NpyIter* it, char* outreadflags) + void NpyIter_GetWriteFlags(NpyIter* it, char* outwriteflags) + int NpyIter_CreateCompatibleStrides(NpyIter* it, npy_intp itemsize, + npy_intp* outstrides) except NPY_FAIL + npy_bool NpyIter_IsFirstVisit(NpyIter* it, int iop) nogil + # functions for iterating an NpyIter object + # + # These don't match the definition in the C API because Cython can't wrap + # function pointers that return functions. + NpyIter_IterNextFunc NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL + NpyIter_GetMultiIndexFunc NpyIter_GetGetMultiIndex(NpyIter* it, + char** errmsg) except NULL + char** NpyIter_GetDataPtrArray(NpyIter* it) nogil + char** NpyIter_GetInitialDataPtrArray(NpyIter* it) nogil + npy_intp* NpyIter_GetIndexPtr(NpyIter* it) + npy_intp* NpyIter_GetInnerStrideArray(NpyIter* it) nogil + npy_intp* NpyIter_GetInnerLoopSizePtr(NpyIter* it) nogil + void NpyIter_GetInnerFixedStrideArray(NpyIter* it, npy_intp* outstrides) nogil + npy_bool NpyIter_IterationNeedsAPI(NpyIter* it) nogil + void NpyIter_DebugPrint(NpyIter* it) + +# NpyString API +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct npy_string_allocator: + pass + + ctypedef struct npy_packed_static_string: + pass + + ctypedef struct npy_static_string: + size_t size + const char *buf + + ctypedef struct PyArray_StringDTypeObject: + PyArray_Descr base + PyObject *na_object + char coerce + char has_nan_na + char has_string_na + char array_owned + npy_static_string default_string + npy_static_string na_name + npy_string_allocator *allocator + +cdef extern from "numpy/arrayobject.h": + npy_string_allocator *NpyString_acquire_allocator(const PyArray_StringDTypeObject *descr) + void NpyString_acquire_allocators(size_t n_descriptors, PyArray_Descr *const descrs[], npy_string_allocator *allocators[]) + void NpyString_release_allocator(npy_string_allocator *allocator) + void NpyString_release_allocators(size_t length, npy_string_allocator *allocators[]) + int NpyString_load(npy_string_allocator *allocator, const npy_packed_static_string *packed_string, npy_static_string *unpacked_string) + int NpyString_pack_null(npy_string_allocator *allocator, npy_packed_static_string *packed_string) + int NpyString_pack(npy_string_allocator *allocator, npy_packed_static_string *packed_string, const char *buf, size_t size) diff --git a/python/numpy/__init__.pxd b/python/numpy/__init__.pxd new file mode 100644 index 000000000..eb0764126 --- /dev/null +++ b/python/numpy/__init__.pxd @@ -0,0 +1,1154 @@ +# NumPy static imports for Cython < 3.0 +# +# If any of the PyArray_* functions are called, import_array must be +# called first. +# +# Author: Dag Sverre Seljebotn +# + +DEF _buffer_format_string_len = 255 + +cimport cpython.buffer as pybuf +from cpython.ref cimport Py_INCREF +from cpython.mem cimport PyObject_Malloc, PyObject_Free +from cpython.object cimport PyObject, PyTypeObject +from cpython.buffer cimport PyObject_GetBuffer +from cpython.type cimport type +cimport libc.stdio as stdio + + +cdef extern from *: + # Leave a marker that the NumPy declarations came from NumPy itself and not from Cython. + # See https://github.com/cython/cython/issues/3573 + """ + /* Using NumPy API declarations from "numpy/__init__.pxd" */ + """ + + +cdef extern from "Python.h": + ctypedef int Py_intptr_t + bint PyObject_TypeCheck(object obj, PyTypeObject* type) + +cdef extern from "numpy/arrayobject.h": + # It would be nice to use size_t and ssize_t, but ssize_t has special + # implicit conversion rules, so just use "long". + # Note: The actual type only matters for Cython promotion, so long + # is closer than int, but could lead to incorrect promotion. + # (Not to worrying, and always the status-quo.) + ctypedef signed long npy_intp + ctypedef unsigned long npy_uintp + + ctypedef unsigned char npy_bool + + ctypedef signed char npy_byte + ctypedef signed short npy_short + ctypedef signed int npy_int + ctypedef signed long npy_long + ctypedef signed long long npy_longlong + + ctypedef unsigned char npy_ubyte + ctypedef unsigned short npy_ushort + ctypedef unsigned int npy_uint + ctypedef unsigned long npy_ulong + ctypedef unsigned long long npy_ulonglong + + ctypedef float npy_float + ctypedef double npy_double + ctypedef long double npy_longdouble + + ctypedef signed char npy_int8 + ctypedef signed short npy_int16 + ctypedef signed int npy_int32 + ctypedef signed long long npy_int64 + + ctypedef unsigned char npy_uint8 + ctypedef unsigned short npy_uint16 + ctypedef unsigned int npy_uint32 + ctypedef unsigned long long npy_uint64 + + ctypedef float npy_float32 + ctypedef double npy_float64 + ctypedef long double npy_float80 + ctypedef long double npy_float96 + ctypedef long double npy_float128 + + ctypedef struct npy_cfloat: + pass + + ctypedef struct npy_cdouble: + pass + + ctypedef struct npy_clongdouble: + pass + + ctypedef struct npy_complex64: + pass + + ctypedef struct npy_complex128: + pass + + ctypedef struct npy_complex160: + pass + + ctypedef struct npy_complex192: + pass + + ctypedef struct npy_complex256: + pass + + ctypedef struct PyArray_Dims: + npy_intp *ptr + int len + + + cdef enum NPY_TYPES: + NPY_BOOL + NPY_BYTE + NPY_UBYTE + NPY_SHORT + NPY_USHORT + NPY_INT + NPY_UINT + NPY_LONG + NPY_ULONG + NPY_LONGLONG + NPY_ULONGLONG + NPY_FLOAT + NPY_DOUBLE + NPY_LONGDOUBLE + NPY_CFLOAT + NPY_CDOUBLE + NPY_CLONGDOUBLE + NPY_OBJECT + NPY_STRING + NPY_UNICODE + NPY_VSTRING + NPY_VOID + NPY_DATETIME + NPY_TIMEDELTA + NPY_NTYPES_LEGACY + NPY_NOTYPE + + NPY_INT8 + NPY_INT16 + NPY_INT32 + NPY_INT64 + NPY_UINT8 + NPY_UINT16 + NPY_UINT32 + NPY_UINT64 + NPY_FLOAT16 + NPY_FLOAT32 + NPY_FLOAT64 + NPY_FLOAT80 + NPY_FLOAT96 + NPY_FLOAT128 + NPY_COMPLEX64 + NPY_COMPLEX128 + NPY_COMPLEX160 + NPY_COMPLEX192 + NPY_COMPLEX256 + + NPY_INTP + NPY_UINTP + NPY_DEFAULT_INT # Not a compile time constant (normally)! + + ctypedef enum NPY_ORDER: + NPY_ANYORDER + NPY_CORDER + NPY_FORTRANORDER + NPY_KEEPORDER + + ctypedef enum NPY_CASTING: + NPY_NO_CASTING + NPY_EQUIV_CASTING + NPY_SAFE_CASTING + NPY_SAME_KIND_CASTING + NPY_UNSAFE_CASTING + + ctypedef enum NPY_CLIPMODE: + NPY_CLIP + NPY_WRAP + NPY_RAISE + + ctypedef enum NPY_SCALARKIND: + NPY_NOSCALAR, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR + + ctypedef enum NPY_SORTKIND: + NPY_QUICKSORT + NPY_HEAPSORT + NPY_MERGESORT + + ctypedef enum NPY_SEARCHSIDE: + NPY_SEARCHLEFT + NPY_SEARCHRIGHT + + enum: + NPY_ARRAY_C_CONTIGUOUS + NPY_ARRAY_F_CONTIGUOUS + NPY_ARRAY_OWNDATA + NPY_ARRAY_FORCECAST + NPY_ARRAY_ENSURECOPY + NPY_ARRAY_ENSUREARRAY + NPY_ARRAY_ELEMENTSTRIDES + NPY_ARRAY_ALIGNED + NPY_ARRAY_NOTSWAPPED + NPY_ARRAY_WRITEABLE + NPY_ARRAY_WRITEBACKIFCOPY + + NPY_ARRAY_BEHAVED + NPY_ARRAY_BEHAVED_NS + NPY_ARRAY_CARRAY + NPY_ARRAY_CARRAY_RO + NPY_ARRAY_FARRAY + NPY_ARRAY_FARRAY_RO + NPY_ARRAY_DEFAULT + + NPY_ARRAY_IN_ARRAY + NPY_ARRAY_OUT_ARRAY + NPY_ARRAY_INOUT_ARRAY + NPY_ARRAY_IN_FARRAY + NPY_ARRAY_OUT_FARRAY + NPY_ARRAY_INOUT_FARRAY + + NPY_ARRAY_UPDATE_ALL + + cdef enum: + NPY_MAXDIMS # 64 on NumPy 2.x and 32 on NumPy 1.x + NPY_RAVEL_AXIS # Used for functions like PyArray_Mean + + ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *) + + ctypedef struct PyArray_ArrayDescr: + # shape is a tuple, but Cython doesn't support "tuple shape" + # inside a non-PyObject declaration, so we have to declare it + # as just a PyObject*. + PyObject* shape + + ctypedef struct PyArray_Descr: + pass + + ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]: + # Use PyDataType_* macros when possible, however there are no macros + # for accessing some of the fields, so some are defined. + cdef PyTypeObject* typeobj + cdef char kind + cdef char type + # Numpy sometimes mutates this without warning (e.g. it'll + # sometimes change "|" to "<" in shared dtype objects on + # little-endian machines). If this matters to you, use + # PyArray_IsNativeByteOrder(dtype.byteorder) instead of + # directly accessing this field. + cdef char byteorder + # Flags are not directly accessible on Cython <3. Use PyDataType_FLAGS. + # cdef char flags + cdef int type_num + # itemsize/elsize, alignment, fields, names, and subarray must + # use the `PyDataType_*` accessor macros. With Cython 3 you can + # still use getter attributes `dtype.itemsize` + + ctypedef class numpy.flatiter [object PyArrayIterObject, check_size ignore]: + # Use through macros + pass + + ctypedef class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]: + cdef int numiter + cdef npy_intp size, index + cdef int nd + cdef npy_intp *dimensions + cdef void **iters + + ctypedef struct PyArrayObject: + # For use in situations where ndarray can't replace PyArrayObject*, + # like PyArrayObject**. + pass + + ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]: + cdef __cythonbufferdefaults__ = {"mode": "strided"} + + cdef: + # Only taking a few of the most commonly used and stable fields. + # One should use PyArray_* macros instead to access the C fields. + char *data + int ndim "nd" + npy_intp *shape "dimensions" + npy_intp *strides + dtype descr # deprecated since NumPy 1.7 ! + PyObject* base # NOT PUBLIC, DO NOT USE ! + + + int _import_array() except -1 + # A second definition so _import_array isn't marked as used when we use it here. + # Do not use - subject to change any time. + int __pyx_import_array "_import_array"() except -1 + + # + # Macros from ndarrayobject.h + # + bint PyArray_CHKFLAGS(ndarray m, int flags) nogil + bint PyArray_IS_C_CONTIGUOUS(ndarray arr) nogil + bint PyArray_IS_F_CONTIGUOUS(ndarray arr) nogil + bint PyArray_ISCONTIGUOUS(ndarray m) nogil + bint PyArray_ISWRITEABLE(ndarray m) nogil + bint PyArray_ISALIGNED(ndarray m) nogil + + int PyArray_NDIM(ndarray) nogil + bint PyArray_ISONESEGMENT(ndarray) nogil + bint PyArray_ISFORTRAN(ndarray) nogil + int PyArray_FORTRANIF(ndarray) nogil + + void* PyArray_DATA(ndarray) nogil + char* PyArray_BYTES(ndarray) nogil + + npy_intp* PyArray_DIMS(ndarray) nogil + npy_intp* PyArray_STRIDES(ndarray) nogil + npy_intp PyArray_DIM(ndarray, size_t) nogil + npy_intp PyArray_STRIDE(ndarray, size_t) nogil + + PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference! + PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype! + PyArray_Descr *PyArray_DTYPE(ndarray) nogil # returns borrowed reference to dtype! NP 1.7+ alias for descr. + int PyArray_FLAGS(ndarray) nogil + void PyArray_CLEARFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + void PyArray_ENABLEFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + npy_intp PyArray_ITEMSIZE(ndarray) nogil + int PyArray_TYPE(ndarray arr) nogil + + object PyArray_GETITEM(ndarray arr, void *itemptr) + int PyArray_SETITEM(ndarray arr, void *itemptr, object obj) except -1 + + bint PyTypeNum_ISBOOL(int) nogil + bint PyTypeNum_ISUNSIGNED(int) nogil + bint PyTypeNum_ISSIGNED(int) nogil + bint PyTypeNum_ISINTEGER(int) nogil + bint PyTypeNum_ISFLOAT(int) nogil + bint PyTypeNum_ISNUMBER(int) nogil + bint PyTypeNum_ISSTRING(int) nogil + bint PyTypeNum_ISCOMPLEX(int) nogil + bint PyTypeNum_ISFLEXIBLE(int) nogil + bint PyTypeNum_ISUSERDEF(int) nogil + bint PyTypeNum_ISEXTENDED(int) nogil + bint PyTypeNum_ISOBJECT(int) nogil + + npy_intp PyDataType_ELSIZE(dtype) nogil + npy_intp PyDataType_ALIGNMENT(dtype) nogil + PyObject* PyDataType_METADATA(dtype) nogil + PyArray_ArrayDescr* PyDataType_SUBARRAY(dtype) nogil + PyObject* PyDataType_NAMES(dtype) nogil + PyObject* PyDataType_FIELDS(dtype) nogil + + bint PyDataType_ISBOOL(dtype) nogil + bint PyDataType_ISUNSIGNED(dtype) nogil + bint PyDataType_ISSIGNED(dtype) nogil + bint PyDataType_ISINTEGER(dtype) nogil + bint PyDataType_ISFLOAT(dtype) nogil + bint PyDataType_ISNUMBER(dtype) nogil + bint PyDataType_ISSTRING(dtype) nogil + bint PyDataType_ISCOMPLEX(dtype) nogil + bint PyDataType_ISFLEXIBLE(dtype) nogil + bint PyDataType_ISUSERDEF(dtype) nogil + bint PyDataType_ISEXTENDED(dtype) nogil + bint PyDataType_ISOBJECT(dtype) nogil + bint PyDataType_HASFIELDS(dtype) nogil + bint PyDataType_HASSUBARRAY(dtype) nogil + npy_uint64 PyDataType_FLAGS(dtype) nogil + + bint PyArray_ISBOOL(ndarray) nogil + bint PyArray_ISUNSIGNED(ndarray) nogil + bint PyArray_ISSIGNED(ndarray) nogil + bint PyArray_ISINTEGER(ndarray) nogil + bint PyArray_ISFLOAT(ndarray) nogil + bint PyArray_ISNUMBER(ndarray) nogil + bint PyArray_ISSTRING(ndarray) nogil + bint PyArray_ISCOMPLEX(ndarray) nogil + bint PyArray_ISFLEXIBLE(ndarray) nogil + bint PyArray_ISUSERDEF(ndarray) nogil + bint PyArray_ISEXTENDED(ndarray) nogil + bint PyArray_ISOBJECT(ndarray) nogil + bint PyArray_HASFIELDS(ndarray) nogil + + bint PyArray_ISVARIABLE(ndarray) nogil + + bint PyArray_SAFEALIGNEDCOPY(ndarray) nogil + bint PyArray_ISNBO(char) nogil # works on ndarray.byteorder + bint PyArray_IsNativeByteOrder(char) nogil # works on ndarray.byteorder + bint PyArray_ISNOTSWAPPED(ndarray) nogil + bint PyArray_ISBYTESWAPPED(ndarray) nogil + + bint PyArray_FLAGSWAP(ndarray, int) nogil + + bint PyArray_ISCARRAY(ndarray) nogil + bint PyArray_ISCARRAY_RO(ndarray) nogil + bint PyArray_ISFARRAY(ndarray) nogil + bint PyArray_ISFARRAY_RO(ndarray) nogil + bint PyArray_ISBEHAVED(ndarray) nogil + bint PyArray_ISBEHAVED_RO(ndarray) nogil + + + bint PyDataType_ISNOTSWAPPED(dtype) nogil + bint PyDataType_ISBYTESWAPPED(dtype) nogil + + bint PyArray_DescrCheck(object) + + bint PyArray_Check(object) + bint PyArray_CheckExact(object) + + # Cannot be supported due to out arg: + # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&) + # bint PyArray_HasArrayInterface(op, out) + + + bint PyArray_IsZeroDim(object) + # Cannot be supported due to ## ## in macro: + # bint PyArray_IsScalar(object, verbatim work) + bint PyArray_CheckScalar(object) + bint PyArray_IsPythonNumber(object) + bint PyArray_IsPythonScalar(object) + bint PyArray_IsAnyScalar(object) + bint PyArray_CheckAnyScalar(object) + + ndarray PyArray_GETCONTIGUOUS(ndarray) + bint PyArray_SAMESHAPE(ndarray, ndarray) nogil + npy_intp PyArray_SIZE(ndarray) nogil + npy_intp PyArray_NBYTES(ndarray) nogil + + object PyArray_FROM_O(object) + object PyArray_FROM_OF(object m, int flags) + object PyArray_FROM_OT(object m, int type) + object PyArray_FROM_OTF(object m, int type, int flags) + object PyArray_FROMANY(object m, int type, int min, int max, int flags) + object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran) + object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran) + void PyArray_FILLWBYTE(ndarray, int val) + object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth) + unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2) + bint PyArray_EquivByteorders(int b1, int b2) nogil + object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum) + object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data) + #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr) + object PyArray_ToScalar(void* data, ndarray arr) + + void* PyArray_GETPTR1(ndarray m, npy_intp i) nogil + void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) nogil + void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) nogil + void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l) nogil + + # Cannot be supported due to out arg + # void PyArray_DESCR_REPLACE(descr) + + + object PyArray_Copy(ndarray) + object PyArray_FromObject(object op, int type, int min_depth, int max_depth) + object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth) + object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth) + + object PyArray_Cast(ndarray mp, int type_num) + object PyArray_Take(ndarray ap, object items, int axis) + object PyArray_Put(ndarray ap, object items, object values) + + void PyArray_ITER_RESET(flatiter it) nogil + void PyArray_ITER_NEXT(flatiter it) nogil + void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil + void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil + void* PyArray_ITER_DATA(flatiter it) nogil + bint PyArray_ITER_NOTDONE(flatiter it) nogil + + void PyArray_MultiIter_RESET(broadcast multi) nogil + void PyArray_MultiIter_NEXT(broadcast multi) nogil + void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil + void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil + void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil + void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil + bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil + npy_intp PyArray_MultiIter_SIZE(broadcast multi) nogil + int PyArray_MultiIter_NDIM(broadcast multi) nogil + npy_intp PyArray_MultiIter_INDEX(broadcast multi) nogil + int PyArray_MultiIter_NUMITER(broadcast multi) nogil + npy_intp* PyArray_MultiIter_DIMS(broadcast multi) nogil + void** PyArray_MultiIter_ITERS(broadcast multi) nogil + + # Functions from __multiarray_api.h + + # Functions taking dtype and returning object/ndarray are disabled + # for now as they steal dtype references. I'm conservative and disable + # more than is probably needed until it can be checked further. + int PyArray_INCREF (ndarray) except * # uses PyArray_Item_INCREF... + int PyArray_XDECREF (ndarray) except * # uses PyArray_Item_DECREF... + dtype PyArray_DescrFromType (int) + object PyArray_TypeObjectFromType (int) + char * PyArray_Zero (ndarray) + char * PyArray_One (ndarray) + #object PyArray_CastToType (ndarray, dtype, int) + int PyArray_CanCastSafely (int, int) # writes errors + npy_bool PyArray_CanCastTo (dtype, dtype) # writes errors + int PyArray_ObjectType (object, int) except 0 + dtype PyArray_DescrFromObject (object, dtype) + #ndarray* PyArray_ConvertToCommonType (object, int *) + dtype PyArray_DescrFromScalar (object) + dtype PyArray_DescrFromTypeObject (object) + npy_intp PyArray_Size (object) + #object PyArray_Scalar (void *, dtype, object) + #object PyArray_FromScalar (object, dtype) + void PyArray_ScalarAsCtype (object, void *) + #int PyArray_CastScalarToCtype (object, void *, dtype) + #int PyArray_CastScalarDirect (object, dtype, void *, int) + #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int) + #object PyArray_FromAny (object, dtype, int, int, int, object) + object PyArray_EnsureArray (object) + object PyArray_EnsureAnyArray (object) + #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *) + #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *) + #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp) + #object PyArray_FromIter (object, dtype, npy_intp) + object PyArray_Return (ndarray) + #object PyArray_GetField (ndarray, dtype, int) + #int PyArray_SetField (ndarray, dtype, int, object) except -1 + object PyArray_Byteswap (ndarray, npy_bool) + object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER) + int PyArray_CopyInto (ndarray, ndarray) except -1 + int PyArray_CopyAnyInto (ndarray, ndarray) except -1 + int PyArray_CopyObject (ndarray, object) except -1 + object PyArray_NewCopy (ndarray, NPY_ORDER) + object PyArray_ToList (ndarray) + object PyArray_ToString (ndarray, NPY_ORDER) + int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *) except -1 + int PyArray_Dump (object, object, int) except -1 + object PyArray_Dumps (object, int) + int PyArray_ValidType (int) # Cannot error + void PyArray_UpdateFlags (ndarray, int) + object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object) + #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object) + #dtype PyArray_DescrNew (dtype) + dtype PyArray_DescrNewFromType (int) + double PyArray_GetPriority (object, double) # clears errors as of 1.25 + object PyArray_IterNew (object) + object PyArray_MultiIterNew (int, ...) + + int PyArray_PyIntAsInt (object) except? -1 + npy_intp PyArray_PyIntAsIntp (object) + int PyArray_Broadcast (broadcast) except -1 + int PyArray_FillWithScalar (ndarray, object) except -1 + npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *) + dtype PyArray_DescrNewByteorder (dtype, char) + object PyArray_IterAllButAxis (object, int *) + #object PyArray_CheckFromAny (object, dtype, int, int, int, object) + #object PyArray_FromArray (ndarray, dtype, int) + object PyArray_FromInterface (object) + object PyArray_FromStructInterface (object) + #object PyArray_FromArrayAttr (object, dtype, object) + #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*) + int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND) + npy_bool PyArray_CanCastScalar (type, type) + int PyArray_RemoveSmallest (broadcast) except -1 + int PyArray_ElementStrides (object) + void PyArray_Item_INCREF (char *, dtype) except * + void PyArray_Item_XDECREF (char *, dtype) except * + object PyArray_Transpose (ndarray, PyArray_Dims *) + object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE) + object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE) + object PyArray_PutMask (ndarray, object, object) + object PyArray_Repeat (ndarray, object, int) + object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE) + int PyArray_Sort (ndarray, int, NPY_SORTKIND) except -1 + object PyArray_ArgSort (ndarray, int, NPY_SORTKIND) + object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, PyObject *) + object PyArray_ArgMax (ndarray, int, ndarray) + object PyArray_ArgMin (ndarray, int, ndarray) + object PyArray_Reshape (ndarray, object) + object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER) + object PyArray_Squeeze (ndarray) + #object PyArray_View (ndarray, dtype, type) + object PyArray_SwapAxes (ndarray, int, int) + object PyArray_Max (ndarray, int, ndarray) + object PyArray_Min (ndarray, int, ndarray) + object PyArray_Ptp (ndarray, int, ndarray) + object PyArray_Mean (ndarray, int, int, ndarray) + object PyArray_Trace (ndarray, int, int, int, int, ndarray) + object PyArray_Diagonal (ndarray, int, int, int) + object PyArray_Clip (ndarray, object, object, ndarray) + object PyArray_Conjugate (ndarray, ndarray) + object PyArray_Nonzero (ndarray) + object PyArray_Std (ndarray, int, int, ndarray, int) + object PyArray_Sum (ndarray, int, int, ndarray) + object PyArray_CumSum (ndarray, int, int, ndarray) + object PyArray_Prod (ndarray, int, int, ndarray) + object PyArray_CumProd (ndarray, int, int, ndarray) + object PyArray_All (ndarray, int, ndarray) + object PyArray_Any (ndarray, int, ndarray) + object PyArray_Compress (ndarray, object, int, ndarray) + object PyArray_Flatten (ndarray, NPY_ORDER) + object PyArray_Ravel (ndarray, NPY_ORDER) + npy_intp PyArray_MultiplyList (npy_intp *, int) + int PyArray_MultiplyIntList (int *, int) + void * PyArray_GetPtr (ndarray, npy_intp*) + int PyArray_CompareLists (npy_intp *, npy_intp *, int) + #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype) + int PyArray_Free (object, void *) + #int PyArray_Converter (object, object*) + int PyArray_IntpFromSequence (object, npy_intp *, int) except -1 + object PyArray_Concatenate (object, int) + object PyArray_InnerProduct (object, object) + object PyArray_MatrixProduct (object, object) + object PyArray_Correlate (object, object, int) + #int PyArray_DescrConverter (object, dtype*) except 0 + #int PyArray_DescrConverter2 (object, dtype*) except 0 + int PyArray_IntpConverter (object, PyArray_Dims *) except 0 + #int PyArray_BufferConverter (object, chunk) except 0 + int PyArray_AxisConverter (object, int *) except 0 + int PyArray_BoolConverter (object, npy_bool *) except 0 + int PyArray_ByteorderConverter (object, char *) except 0 + int PyArray_OrderConverter (object, NPY_ORDER *) except 0 + unsigned char PyArray_EquivTypes (dtype, dtype) # clears errors + #object PyArray_Zeros (int, npy_intp *, dtype, int) + #object PyArray_Empty (int, npy_intp *, dtype, int) + object PyArray_Where (object, object, object) + object PyArray_Arange (double, double, double, int) + #object PyArray_ArangeObj (object, object, object, dtype) + int PyArray_SortkindConverter (object, NPY_SORTKIND *) except 0 + object PyArray_LexSort (object, int) + object PyArray_Round (ndarray, int, ndarray) + unsigned char PyArray_EquivTypenums (int, int) + int PyArray_RegisterDataType (dtype) except -1 + int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *) except -1 + int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND) except -1 + #void PyArray_InitArrFuncs (PyArray_ArrFuncs *) + object PyArray_IntTupleFromIntp (int, npy_intp *) + int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *) except 0 + #int PyArray_OutputConverter (object, ndarray*) except 0 + object PyArray_BroadcastToShape (object, npy_intp *, int) + #int PyArray_DescrAlignConverter (object, dtype*) except 0 + #int PyArray_DescrAlignConverter2 (object, dtype*) except 0 + int PyArray_SearchsideConverter (object, void *) except 0 + object PyArray_CheckAxis (ndarray, int *, int) + npy_intp PyArray_OverflowMultiplyList (npy_intp *, int) + int PyArray_SetBaseObject(ndarray, base) except -1 # NOTE: steals a reference to base! Use "set_array_base()" instead. + + # The memory handler functions require the NumPy 1.22 API + # and may require defining NPY_TARGET_VERSION + ctypedef struct PyDataMemAllocator: + void *ctx + void* (*malloc) (void *ctx, size_t size) + void* (*calloc) (void *ctx, size_t nelem, size_t elsize) + void* (*realloc) (void *ctx, void *ptr, size_t new_size) + void (*free) (void *ctx, void *ptr, size_t size) + + ctypedef struct PyDataMem_Handler: + char* name + npy_uint8 version + PyDataMemAllocator allocator + + object PyDataMem_SetHandler(object handler) + object PyDataMem_GetHandler() + + # additional datetime related functions are defined below + + +# Typedefs that matches the runtime dtype objects in +# the numpy module. + +# The ones that are commented out needs an IFDEF function +# in Cython to enable them only on the right systems. + +ctypedef npy_int8 int8_t +ctypedef npy_int16 int16_t +ctypedef npy_int32 int32_t +ctypedef npy_int64 int64_t + +ctypedef npy_uint8 uint8_t +ctypedef npy_uint16 uint16_t +ctypedef npy_uint32 uint32_t +ctypedef npy_uint64 uint64_t + +ctypedef npy_float32 float32_t +ctypedef npy_float64 float64_t +#ctypedef npy_float80 float80_t +#ctypedef npy_float128 float128_t + +ctypedef float complex complex64_t +ctypedef double complex complex128_t + +ctypedef npy_longlong longlong_t +ctypedef npy_ulonglong ulonglong_t + +ctypedef npy_intp intp_t +ctypedef npy_uintp uintp_t + +ctypedef npy_double float_t +ctypedef npy_double double_t +ctypedef npy_longdouble longdouble_t + +ctypedef float complex cfloat_t +ctypedef double complex cdouble_t +ctypedef double complex complex_t +ctypedef long double complex clongdouble_t + +cdef inline object PyArray_MultiIterNew1(a): + return PyArray_MultiIterNew(1, a) + +cdef inline object PyArray_MultiIterNew2(a, b): + return PyArray_MultiIterNew(2, a, b) + +cdef inline object PyArray_MultiIterNew3(a, b, c): + return PyArray_MultiIterNew(3, a, b, c) + +cdef inline object PyArray_MultiIterNew4(a, b, c, d): + return PyArray_MultiIterNew(4, a, b, c, d) + +cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + return PyArray_MultiIterNew(5, a, b, c, d, e) + +cdef inline tuple PyDataType_SHAPE(dtype d): + if PyDataType_HASSUBARRAY(d): + return d.subarray.shape + else: + return () + + +cdef extern from "numpy/ndarrayobject.h": + PyTypeObject PyTimedeltaArrType_Type + PyTypeObject PyDatetimeArrType_Type + ctypedef int64_t npy_timedelta + ctypedef int64_t npy_datetime + +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct PyArray_DatetimeMetaData: + NPY_DATETIMEUNIT base + int64_t num + + ctypedef struct npy_datetimestruct: + int64_t year + int32_t month, day, hour, min, sec, us, ps, as + + # Iterator API added in v1.6 + # + # These don't match the definition in the C API because Cython can't wrap + # function pointers that return functions. + # https://github.com/cython/cython/issues/6720 + ctypedef int (*NpyIter_IterNextFunc "NpyIter_IterNextFunc *")(NpyIter* it) noexcept nogil + ctypedef void (*NpyIter_GetMultiIndexFunc "NpyIter_GetMultiIndexFunc *")(NpyIter* it, npy_intp* outcoords) noexcept nogil + +cdef extern from "numpy/arrayscalars.h": + + # abstract types + ctypedef class numpy.generic [object PyObject]: + pass + ctypedef class numpy.number [object PyObject]: + pass + ctypedef class numpy.integer [object PyObject]: + pass + ctypedef class numpy.signedinteger [object PyObject]: + pass + ctypedef class numpy.unsignedinteger [object PyObject]: + pass + ctypedef class numpy.inexact [object PyObject]: + pass + ctypedef class numpy.floating [object PyObject]: + pass + ctypedef class numpy.complexfloating [object PyObject]: + pass + ctypedef class numpy.flexible [object PyObject]: + pass + ctypedef class numpy.character [object PyObject]: + pass + + ctypedef struct PyDatetimeScalarObject: + # PyObject_HEAD + npy_datetime obval + PyArray_DatetimeMetaData obmeta + + ctypedef struct PyTimedeltaScalarObject: + # PyObject_HEAD + npy_timedelta obval + PyArray_DatetimeMetaData obmeta + + ctypedef enum NPY_DATETIMEUNIT: + NPY_FR_Y + NPY_FR_M + NPY_FR_W + NPY_FR_D + NPY_FR_B + NPY_FR_h + NPY_FR_m + NPY_FR_s + NPY_FR_ms + NPY_FR_us + NPY_FR_ns + NPY_FR_ps + NPY_FR_fs + NPY_FR_as + NPY_FR_GENERIC + + +cdef extern from "numpy/arrayobject.h": + # These are part of the C-API defined in `__multiarray_api.h` + + # NumPy internal definitions in datetime_strings.c: + int get_datetime_iso_8601_strlen "NpyDatetime_GetDatetimeISO8601StrLen" ( + int local, NPY_DATETIMEUNIT base) + int make_iso_8601_datetime "NpyDatetime_MakeISO8601Datetime" ( + npy_datetimestruct *dts, char *outstr, npy_intp outlen, + int local, int utc, NPY_DATETIMEUNIT base, int tzoffset, + NPY_CASTING casting) except -1 + + # NumPy internal definition in datetime.c: + # May return 1 to indicate that object does not appear to be a datetime + # (returns 0 on success). + int convert_pydatetime_to_datetimestruct "NpyDatetime_ConvertPyDateTimeToDatetimeStruct" ( + PyObject *obj, npy_datetimestruct *out, + NPY_DATETIMEUNIT *out_bestunit, int apply_tzinfo) except -1 + int convert_datetime64_to_datetimestruct "NpyDatetime_ConvertDatetime64ToDatetimeStruct" ( + PyArray_DatetimeMetaData *meta, npy_datetime dt, + npy_datetimestruct *out) except -1 + int convert_datetimestruct_to_datetime64 "NpyDatetime_ConvertDatetimeStructToDatetime64"( + PyArray_DatetimeMetaData *meta, const npy_datetimestruct *dts, + npy_datetime *out) except -1 + + +# +# ufunc API +# + +cdef extern from "numpy/ufuncobject.h": + + ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *) + + ctypedef class numpy.ufunc [object PyUFuncObject, check_size ignore]: + cdef: + int nin, nout, nargs + int identity + PyUFuncGenericFunction *functions + void **data + int ntypes + int check_return + char *name + char *types + char *doc + void *ptr + PyObject *obj + PyObject *userloops + + cdef enum: + PyUFunc_Zero + PyUFunc_One + PyUFunc_None + # deprecated + UFUNC_FPE_DIVIDEBYZERO + UFUNC_FPE_OVERFLOW + UFUNC_FPE_UNDERFLOW + UFUNC_FPE_INVALID + # use these instead + NPY_FPE_DIVIDEBYZERO + NPY_FPE_OVERFLOW + NPY_FPE_UNDERFLOW + NPY_FPE_INVALID + + object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *, + void **, char *, int, int, int, int, char *, char *, int) + int PyUFunc_RegisterLoopForType(ufunc, int, + PyUFuncGenericFunction, int *, void *) except -1 + void PyUFunc_f_f_As_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_d_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_f_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_g_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F_As_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_F_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_D_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_G_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f_As_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_ff_f \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_dd_d \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_gg_g \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F_As_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_DD_D \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_FF_F \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_GG_G \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_O_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_OO_O_method \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_On_Om \ + (char **, npy_intp *, npy_intp *, void *) + void PyUFunc_clearfperr() + int PyUFunc_getfperr() + int PyUFunc_ReplaceLoopBySignature \ + (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *) + object PyUFunc_FromFuncAndDataAndSignature \ + (PyUFuncGenericFunction *, void **, char *, int, int, int, + int, char *, char *, int, char *) + + int _import_umath() except -1 + +cdef inline void set_array_base(ndarray arr, object base): + Py_INCREF(base) # important to do this before stealing the reference below! + PyArray_SetBaseObject(arr, base) + +cdef inline object get_array_base(ndarray arr): + base = PyArray_BASE(arr) + if base is NULL: + return None + return base + +# Versions of the import_* functions which are more suitable for +# Cython code. +cdef inline int import_array() except -1: + try: + __pyx_import_array() + except Exception: + raise ImportError("numpy._core.multiarray failed to import") + +cdef inline int import_umath() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy._core.umath failed to import") + +cdef inline int import_ufunc() except -1: + try: + _import_umath() + except Exception: + raise ImportError("numpy._core.umath failed to import") + + +cdef inline bint is_timedelta64_object(object obj): + """ + Cython equivalent of `isinstance(obj, np.timedelta64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) + + +cdef inline bint is_datetime64_object(object obj): + """ + Cython equivalent of `isinstance(obj, np.datetime64)` + + Parameters + ---------- + obj : object + + Returns + ------- + bool + """ + return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) + + +cdef inline npy_datetime get_datetime64_value(object obj) nogil: + """ + returns the int64 value underlying scalar numpy datetime64 object + + Note that to interpret this as a datetime, the corresponding unit is + also needed. That can be found using `get_datetime64_unit`. + """ + return (obj).obval + + +cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: + """ + returns the int64 value underlying scalar numpy timedelta64 object + """ + return (obj).obval + + +cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: + """ + returns the unit part of the dtype for a numpy datetime64 object. + """ + return (obj).obmeta.base + + +cdef extern from "numpy/arrayobject.h": + + ctypedef struct NpyIter: + pass + + cdef enum: + NPY_FAIL + NPY_SUCCEED + + cdef enum: + # Track an index representing C order + NPY_ITER_C_INDEX + # Track an index representing Fortran order + NPY_ITER_F_INDEX + # Track a multi-index + NPY_ITER_MULTI_INDEX + # User code external to the iterator does the 1-dimensional innermost loop + NPY_ITER_EXTERNAL_LOOP + # Convert all the operands to a common data type + NPY_ITER_COMMON_DTYPE + # Operands may hold references, requiring API access during iteration + NPY_ITER_REFS_OK + # Zero-sized operands should be permitted, iteration checks IterSize for 0 + NPY_ITER_ZEROSIZE_OK + # Permits reductions (size-0 stride with dimension size > 1) + NPY_ITER_REDUCE_OK + # Enables sub-range iteration + NPY_ITER_RANGED + # Enables buffering + NPY_ITER_BUFFERED + # When buffering is enabled, grows the inner loop if possible + NPY_ITER_GROWINNER + # Delay allocation of buffers until first Reset* call + NPY_ITER_DELAY_BUFALLOC + # When NPY_KEEPORDER is specified, disable reversing negative-stride axes + NPY_ITER_DONT_NEGATE_STRIDES + NPY_ITER_COPY_IF_OVERLAP + # The operand will be read from and written to + NPY_ITER_READWRITE + # The operand will only be read from + NPY_ITER_READONLY + # The operand will only be written to + NPY_ITER_WRITEONLY + # The operand's data must be in native byte order + NPY_ITER_NBO + # The operand's data must be aligned + NPY_ITER_ALIGNED + # The operand's data must be contiguous (within the inner loop) + NPY_ITER_CONTIG + # The operand may be copied to satisfy requirements + NPY_ITER_COPY + # The operand may be copied with WRITEBACKIFCOPY to satisfy requirements + NPY_ITER_UPDATEIFCOPY + # Allocate the operand if it is NULL + NPY_ITER_ALLOCATE + # If an operand is allocated, don't use any subtype + NPY_ITER_NO_SUBTYPE + # This is a virtual array slot, operand is NULL but temporary data is there + NPY_ITER_VIRTUAL + # Require that the dimension match the iterator dimensions exactly + NPY_ITER_NO_BROADCAST + # A mask is being used on this array, affects buffer -> array copy + NPY_ITER_WRITEMASKED + # This array is the mask for all WRITEMASKED operands + NPY_ITER_ARRAYMASK + # Assume iterator order data access for COPY_IF_OVERLAP + NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE + + # construction and destruction functions + NpyIter* NpyIter_New(ndarray arr, npy_uint32 flags, NPY_ORDER order, + NPY_CASTING casting, dtype datatype) except NULL + NpyIter* NpyIter_MultiNew(npy_intp nop, PyArrayObject** op, npy_uint32 flags, + NPY_ORDER order, NPY_CASTING casting, npy_uint32* + op_flags, PyArray_Descr** op_dtypes) except NULL + NpyIter* NpyIter_AdvancedNew(npy_intp nop, PyArrayObject** op, + npy_uint32 flags, NPY_ORDER order, + NPY_CASTING casting, npy_uint32* op_flags, + PyArray_Descr** op_dtypes, int oa_ndim, + int** op_axes, const npy_intp* itershape, + npy_intp buffersize) except NULL + NpyIter* NpyIter_Copy(NpyIter* it) except NULL + int NpyIter_RemoveAxis(NpyIter* it, int axis) except NPY_FAIL + int NpyIter_RemoveMultiIndex(NpyIter* it) except NPY_FAIL + int NpyIter_EnableExternalLoop(NpyIter* it) except NPY_FAIL + int NpyIter_Deallocate(NpyIter* it) except NPY_FAIL + int NpyIter_Reset(NpyIter* it, char** errmsg) except NPY_FAIL + int NpyIter_ResetToIterIndexRange(NpyIter* it, npy_intp istart, + npy_intp iend, char** errmsg) except NPY_FAIL + int NpyIter_ResetBasePointers(NpyIter* it, char** baseptrs, char** errmsg) except NPY_FAIL + int NpyIter_GotoMultiIndex(NpyIter* it, const npy_intp* multi_index) except NPY_FAIL + int NpyIter_GotoIndex(NpyIter* it, npy_intp index) except NPY_FAIL + npy_intp NpyIter_GetIterSize(NpyIter* it) nogil + npy_intp NpyIter_GetIterIndex(NpyIter* it) nogil + void NpyIter_GetIterIndexRange(NpyIter* it, npy_intp* istart, + npy_intp* iend) nogil + int NpyIter_GotoIterIndex(NpyIter* it, npy_intp iterindex) except NPY_FAIL + npy_bool NpyIter_HasDelayedBufAlloc(NpyIter* it) nogil + npy_bool NpyIter_HasExternalLoop(NpyIter* it) nogil + npy_bool NpyIter_HasMultiIndex(NpyIter* it) nogil + npy_bool NpyIter_HasIndex(NpyIter* it) nogil + npy_bool NpyIter_RequiresBuffering(NpyIter* it) nogil + npy_bool NpyIter_IsBuffered(NpyIter* it) nogil + npy_bool NpyIter_IsGrowInner(NpyIter* it) nogil + npy_intp NpyIter_GetBufferSize(NpyIter* it) nogil + int NpyIter_GetNDim(NpyIter* it) nogil + int NpyIter_GetNOp(NpyIter* it) nogil + npy_intp* NpyIter_GetAxisStrideArray(NpyIter* it, int axis) except NULL + int NpyIter_GetShape(NpyIter* it, npy_intp* outshape) nogil + PyArray_Descr** NpyIter_GetDescrArray(NpyIter* it) + PyArrayObject** NpyIter_GetOperandArray(NpyIter* it) + ndarray NpyIter_GetIterView(NpyIter* it, npy_intp i) + void NpyIter_GetReadFlags(NpyIter* it, char* outreadflags) + void NpyIter_GetWriteFlags(NpyIter* it, char* outwriteflags) + int NpyIter_CreateCompatibleStrides(NpyIter* it, npy_intp itemsize, + npy_intp* outstrides) except NPY_FAIL + npy_bool NpyIter_IsFirstVisit(NpyIter* it, int iop) nogil + # functions for iterating an NpyIter object + # + # These don't match the definition in the C API because Cython can't wrap + # function pointers that return functions. + NpyIter_IterNextFunc* NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL + NpyIter_GetMultiIndexFunc* NpyIter_GetGetMultiIndex(NpyIter* it, + char** errmsg) except NULL + char** NpyIter_GetDataPtrArray(NpyIter* it) nogil + char** NpyIter_GetInitialDataPtrArray(NpyIter* it) nogil + npy_intp* NpyIter_GetIndexPtr(NpyIter* it) + npy_intp* NpyIter_GetInnerStrideArray(NpyIter* it) nogil + npy_intp* NpyIter_GetInnerLoopSizePtr(NpyIter* it) nogil + void NpyIter_GetInnerFixedStrideArray(NpyIter* it, npy_intp* outstrides) nogil + npy_bool NpyIter_IterationNeedsAPI(NpyIter* it) nogil + void NpyIter_DebugPrint(NpyIter* it) + +# NpyString API +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct npy_string_allocator: + pass + + ctypedef struct npy_packed_static_string: + pass + + ctypedef struct npy_static_string: + size_t size + const char *buf + + ctypedef struct PyArray_StringDTypeObject: + PyArray_Descr base + PyObject *na_object + char coerce + char has_nan_na + char has_string_na + char array_owned + npy_static_string default_string + npy_static_string na_name + npy_string_allocator *allocator + +cdef extern from "numpy/arrayobject.h": + npy_string_allocator *NpyString_acquire_allocator(const PyArray_StringDTypeObject *descr) + void NpyString_acquire_allocators(size_t n_descriptors, PyArray_Descr *const descrs[], npy_string_allocator *allocators[]) + void NpyString_release_allocator(npy_string_allocator *allocator) + void NpyString_release_allocators(size_t length, npy_string_allocator *allocators[]) + int NpyString_load(npy_string_allocator *allocator, const npy_packed_static_string *packed_string, npy_static_string *unpacked_string) + int NpyString_pack_null(npy_string_allocator *allocator, npy_packed_static_string *packed_string) + int NpyString_pack(npy_string_allocator *allocator, npy_packed_static_string *packed_string, const char *buf, size_t size) diff --git a/python/numpy/__init__.py b/python/numpy/__init__.py new file mode 100644 index 000000000..097fe9f41 --- /dev/null +++ b/python/numpy/__init__.py @@ -0,0 +1,945 @@ +""" +NumPy +===== + +Provides + 1. An array object of arbitrary homogeneous items + 2. Fast mathematical operations over arrays + 3. Linear Algebra, Fourier Transforms, Random Number Generation + +How to use the documentation +---------------------------- +Documentation is available in two forms: docstrings provided +with the code, and a loose standing reference guide, available from +`the NumPy homepage `_. + +We recommend exploring the docstrings using +`IPython `_, an advanced Python shell with +TAB-completion and introspection capabilities. See below for further +instructions. + +The docstring examples assume that `numpy` has been imported as ``np``:: + + >>> import numpy as np + +Code snippets are indicated by three greater-than signs:: + + >>> x = 42 + >>> x = x + 1 + +Use the built-in ``help`` function to view a function's docstring:: + + >>> help(np.sort) + ... # doctest: +SKIP + +For some objects, ``np.info(obj)`` may provide additional help. This is +particularly true if you see the line "Help on ufunc object:" at the top +of the help() page. Ufuncs are implemented in C, not Python, for speed. +The native Python help() does not know how to view their help, but our +np.info() function does. + +Available subpackages +--------------------- +lib + Basic functions used by several sub-packages. +random + Core Random Tools +linalg + Core Linear Algebra Tools +fft + Core FFT routines +polynomial + Polynomial tools +testing + NumPy testing tools +distutils + Enhancements to distutils with support for + Fortran compilers support and more (for Python <= 3.11) + +Utilities +--------- +test + Run numpy unittests +show_config + Show numpy build configuration +__version__ + NumPy version string + +Viewing documentation using IPython +----------------------------------- + +Start IPython and import `numpy` usually under the alias ``np``: `import +numpy as np`. Then, directly past or use the ``%cpaste`` magic to paste +examples into the shell. To see which functions are available in `numpy`, +type ``np.`` (where ```` refers to the TAB key), or use +``np.*cos*?`` (where ```` refers to the ENTER key) to narrow +down the list. To view the docstring for a function, use +``np.cos?`` (to view the docstring) and ``np.cos??`` (to view +the source code). + +Copies vs. in-place operation +----------------------------- +Most of the functions in `numpy` return a copy of the array argument +(e.g., `np.sort`). In-place versions of these functions are often +available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``. +Exceptions to this rule are documented. + +""" +import os +import sys +import warnings + +# If a version with git hash was stored, use that instead +from . import version +from ._expired_attrs_2_0 import __expired_attributes__ +from ._globals import _CopyMode, _NoValue +from .version import __version__ + +# We first need to detect if we're being called as part of the numpy setup +# procedure itself in a reliable manner. +try: + __NUMPY_SETUP__ # noqa: B018 +except NameError: + __NUMPY_SETUP__ = False + +if __NUMPY_SETUP__: + sys.stderr.write('Running from numpy source directory.\n') +else: + # Allow distributors to run custom init code before importing numpy._core + from . import _distributor_init + + try: + from numpy.__config__ import show_config + except ImportError as e: + msg = """Error importing numpy: you should not try to import numpy from + its source directory; please exit the numpy source tree, and relaunch + your python interpreter from there.""" + raise ImportError(msg) from e + + from . import _core + from ._core import ( + False_, + ScalarType, + True_, + abs, + absolute, + acos, + acosh, + add, + all, + allclose, + amax, + amin, + any, + arange, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + argmax, + argmin, + argpartition, + argsort, + argwhere, + around, + array, + array2string, + array_equal, + array_equiv, + array_repr, + array_str, + asanyarray, + asarray, + ascontiguousarray, + asfortranarray, + asin, + asinh, + astype, + atan, + atan2, + atanh, + atleast_1d, + atleast_2d, + atleast_3d, + base_repr, + binary_repr, + bitwise_and, + bitwise_count, + bitwise_invert, + bitwise_left_shift, + bitwise_not, + bitwise_or, + bitwise_right_shift, + bitwise_xor, + block, + bool, + bool_, + broadcast, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + can_cast, + cbrt, + cdouble, + ceil, + character, + choose, + clip, + clongdouble, + complex64, + complex128, + complexfloating, + compress, + concat, + concatenate, + conj, + conjugate, + convolve, + copysign, + copyto, + correlate, + cos, + cosh, + count_nonzero, + cross, + csingle, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + datetime64, + datetime_as_string, + datetime_data, + deg2rad, + degrees, + diagonal, + divide, + divmod, + dot, + double, + dtype, + e, + einsum, + einsum_path, + empty, + empty_like, + equal, + errstate, + euler_gamma, + exp, + exp2, + expm1, + fabs, + finfo, + flatiter, + flatnonzero, + flexible, + float16, + float32, + float64, + float_power, + floating, + floor, + floor_divide, + fmax, + fmin, + fmod, + format_float_positional, + format_float_scientific, + frexp, + from_dlpack, + frombuffer, + fromfile, + fromfunction, + fromiter, + frompyfunc, + fromstring, + full, + full_like, + gcd, + generic, + geomspace, + get_printoptions, + getbufsize, + geterr, + geterrcall, + greater, + greater_equal, + half, + heaviside, + hstack, + hypot, + identity, + iinfo, + indices, + inexact, + inf, + inner, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + intp, + invert, + is_busday, + isclose, + isdtype, + isfinite, + isfortran, + isinf, + isnan, + isnat, + isscalar, + issubdtype, + lcm, + ldexp, + left_shift, + less, + less_equal, + lexsort, + linspace, + little_endian, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + logspace, + long, + longdouble, + longlong, + matmul, + matrix_transpose, + matvec, + max, + maximum, + may_share_memory, + mean, + memmap, + min, + min_scalar_type, + minimum, + mod, + modf, + moveaxis, + multiply, + nan, + ndarray, + ndim, + nditer, + negative, + nested_iters, + newaxis, + nextafter, + nonzero, + not_equal, + number, + object_, + ones, + ones_like, + outer, + partition, + permute_dims, + pi, + positive, + pow, + power, + printoptions, + prod, + promote_types, + ptp, + put, + putmask, + rad2deg, + radians, + ravel, + recarray, + reciprocal, + record, + remainder, + repeat, + require, + reshape, + resize, + result_type, + right_shift, + rint, + roll, + rollaxis, + round, + sctypeDict, + searchsorted, + set_printoptions, + setbufsize, + seterr, + seterrcall, + shape, + shares_memory, + short, + sign, + signbit, + signedinteger, + sin, + single, + sinh, + size, + sort, + spacing, + sqrt, + square, + squeeze, + stack, + std, + str_, + subtract, + sum, + swapaxes, + take, + tan, + tanh, + tensordot, + timedelta64, + trace, + transpose, + true_divide, + trunc, + typecodes, + ubyte, + ufunc, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + unsignedinteger, + unstack, + ushort, + var, + vdot, + vecdot, + vecmat, + void, + vstack, + where, + zeros, + zeros_like, + ) + + # NOTE: It's still under discussion whether these aliases + # should be removed. + for ta in ["float96", "float128", "complex192", "complex256"]: + try: + globals()[ta] = getattr(_core, ta) + except AttributeError: + pass + del ta + + from . import lib + from . import matrixlib as _mat + from .lib import scimath as emath + from .lib._arraypad_impl import pad + from .lib._arraysetops_impl import ( + ediff1d, + in1d, + intersect1d, + isin, + setdiff1d, + setxor1d, + union1d, + unique, + unique_all, + unique_counts, + unique_inverse, + unique_values, + ) + from .lib._function_base_impl import ( + angle, + append, + asarray_chkfinite, + average, + bartlett, + bincount, + blackman, + copy, + corrcoef, + cov, + delete, + diff, + digitize, + extract, + flip, + gradient, + hamming, + hanning, + i0, + insert, + interp, + iterable, + kaiser, + median, + meshgrid, + percentile, + piecewise, + place, + quantile, + rot90, + select, + sinc, + sort_complex, + trapezoid, + trapz, + trim_zeros, + unwrap, + vectorize, + ) + from .lib._histograms_impl import histogram, histogram_bin_edges, histogramdd + from .lib._index_tricks_impl import ( + c_, + diag_indices, + diag_indices_from, + fill_diagonal, + index_exp, + ix_, + mgrid, + ndenumerate, + ndindex, + ogrid, + r_, + ravel_multi_index, + s_, + unravel_index, + ) + from .lib._nanfunctions_impl import ( + nanargmax, + nanargmin, + nancumprod, + nancumsum, + nanmax, + nanmean, + nanmedian, + nanmin, + nanpercentile, + nanprod, + nanquantile, + nanstd, + nansum, + nanvar, + ) + from .lib._npyio_impl import ( + fromregex, + genfromtxt, + load, + loadtxt, + packbits, + save, + savetxt, + savez, + savez_compressed, + unpackbits, + ) + from .lib._polynomial_impl import ( + poly, + poly1d, + polyadd, + polyder, + polydiv, + polyfit, + polyint, + polymul, + polysub, + polyval, + roots, + ) + from .lib._shape_base_impl import ( + apply_along_axis, + apply_over_axes, + array_split, + column_stack, + dsplit, + dstack, + expand_dims, + hsplit, + kron, + put_along_axis, + row_stack, + split, + take_along_axis, + tile, + vsplit, + ) + from .lib._stride_tricks_impl import ( + broadcast_arrays, + broadcast_shapes, + broadcast_to, + ) + from .lib._twodim_base_impl import ( + diag, + diagflat, + eye, + fliplr, + flipud, + histogram2d, + mask_indices, + tri, + tril, + tril_indices, + tril_indices_from, + triu, + triu_indices, + triu_indices_from, + vander, + ) + from .lib._type_check_impl import ( + common_type, + imag, + iscomplex, + iscomplexobj, + isreal, + isrealobj, + mintypecode, + nan_to_num, + real, + real_if_close, + typename, + ) + from .lib._ufunclike_impl import fix, isneginf, isposinf + from .lib._utils_impl import get_include, info, show_runtime + from .matrixlib import asmatrix, bmat, matrix + + # public submodules are imported lazily, therefore are accessible from + # __getattr__. Note that `distutils` (deprecated) and `array_api` + # (experimental label) are not added here, because `from numpy import *` + # must not raise any warnings - that's too disruptive. + __numpy_submodules__ = { + "linalg", "fft", "dtypes", "random", "polynomial", "ma", + "exceptions", "lib", "ctypeslib", "testing", "typing", + "f2py", "test", "rec", "char", "core", "strings", + } + + # We build warning messages for former attributes + _msg = ( + "module 'numpy' has no attribute '{n}'.\n" + "`np.{n}` was a deprecated alias for the builtin `{n}`. " + "To avoid this error in existing code, use `{n}` by itself. " + "Doing this will not modify any behavior and is safe. {extended_msg}\n" + "The aliases was originally deprecated in NumPy 1.20; for more " + "details and guidance see the original release note at:\n" + " https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations") + + _specific_msg = ( + "If you specifically wanted the numpy scalar type, use `np.{}` here.") + + _int_extended_msg = ( + "When replacing `np.{}`, you may wish to use e.g. `np.int64` " + "or `np.int32` to specify the precision. If you wish to review " + "your current use, check the release note link for " + "additional information.") + + _type_info = [ + ("object", ""), # The NumPy scalar only exists by name. + ("float", _specific_msg.format("float64")), + ("complex", _specific_msg.format("complex128")), + ("str", _specific_msg.format("str_")), + ("int", _int_extended_msg.format("int"))] + + __former_attrs__ = { + n: _msg.format(n=n, extended_msg=extended_msg) + for n, extended_msg in _type_info + } + + # Some of these could be defined right away, but most were aliases to + # the Python objects and only removed in NumPy 1.24. Defining them should + # probably wait for NumPy 1.26 or 2.0. + # When defined, these should possibly not be added to `__all__` to avoid + # import with `from numpy import *`. + __future_scalars__ = {"str", "bytes", "object"} + + __array_api_version__ = "2024.12" + + from ._array_api_info import __array_namespace_info__ + + # now that numpy core module is imported, can initialize limits + _core.getlimits._register_known_types() + + __all__ = list( + __numpy_submodules__ | + set(_core.__all__) | + set(_mat.__all__) | + set(lib._histograms_impl.__all__) | + set(lib._nanfunctions_impl.__all__) | + set(lib._function_base_impl.__all__) | + set(lib._twodim_base_impl.__all__) | + set(lib._shape_base_impl.__all__) | + set(lib._type_check_impl.__all__) | + set(lib._arraysetops_impl.__all__) | + set(lib._ufunclike_impl.__all__) | + set(lib._arraypad_impl.__all__) | + set(lib._utils_impl.__all__) | + set(lib._stride_tricks_impl.__all__) | + set(lib._polynomial_impl.__all__) | + set(lib._npyio_impl.__all__) | + set(lib._index_tricks_impl.__all__) | + {"emath", "show_config", "__version__", "__array_namespace_info__"} + ) + + # Filter out Cython harmless warnings + warnings.filterwarnings("ignore", message="numpy.dtype size changed") + warnings.filterwarnings("ignore", message="numpy.ufunc size changed") + warnings.filterwarnings("ignore", message="numpy.ndarray size changed") + + def __getattr__(attr): + # Warn for expired attributes + import warnings + + if attr == "linalg": + import numpy.linalg as linalg + return linalg + elif attr == "fft": + import numpy.fft as fft + return fft + elif attr == "dtypes": + import numpy.dtypes as dtypes + return dtypes + elif attr == "random": + import numpy.random as random + return random + elif attr == "polynomial": + import numpy.polynomial as polynomial + return polynomial + elif attr == "ma": + import numpy.ma as ma + return ma + elif attr == "ctypeslib": + import numpy.ctypeslib as ctypeslib + return ctypeslib + elif attr == "exceptions": + import numpy.exceptions as exceptions + return exceptions + elif attr == "testing": + import numpy.testing as testing + return testing + elif attr == "matlib": + import numpy.matlib as matlib + return matlib + elif attr == "f2py": + import numpy.f2py as f2py + return f2py + elif attr == "typing": + import numpy.typing as typing + return typing + elif attr == "rec": + import numpy.rec as rec + return rec + elif attr == "char": + import numpy.char as char + return char + elif attr == "array_api": + raise AttributeError("`numpy.array_api` is not available from " + "numpy 2.0 onwards", name=None) + elif attr == "core": + import numpy.core as core + return core + elif attr == "strings": + import numpy.strings as strings + return strings + elif attr == "distutils": + if 'distutils' in __numpy_submodules__: + import numpy.distutils as distutils + return distutils + else: + raise AttributeError("`numpy.distutils` is not available from " + "Python 3.12 onwards", name=None) + + if attr in __future_scalars__: + # And future warnings for those that will change, but also give + # the AttributeError + warnings.warn( + f"In the future `np.{attr}` will be defined as the " + "corresponding NumPy scalar.", FutureWarning, stacklevel=2) + + if attr in __former_attrs__: + raise AttributeError(__former_attrs__[attr], name=None) + + if attr in __expired_attributes__: + raise AttributeError( + f"`np.{attr}` was removed in the NumPy 2.0 release. " + f"{__expired_attributes__[attr]}", + name=None + ) + + if attr == "chararray": + warnings.warn( + "`np.chararray` is deprecated and will be removed from " + "the main namespace in the future. Use an array with a string " + "or bytes dtype instead.", DeprecationWarning, stacklevel=2) + import numpy.char as char + return char.chararray + + raise AttributeError(f"module {__name__!r} has no attribute {attr!r}") + + def __dir__(): + public_symbols = ( + globals().keys() | __numpy_submodules__ + ) + public_symbols -= { + "matrixlib", "matlib", "tests", "conftest", "version", + "distutils", "array_api" + } + return list(public_symbols) + + # Pytest testing + from numpy._pytesttester import PytestTester + test = PytestTester(__name__) + del PytestTester + + def _sanity_check(): + """ + Quick sanity checks for common bugs caused by environment. + There are some cases e.g. with wrong BLAS ABI that cause wrong + results under specific runtime conditions that are not necessarily + achieved during test suite runs, and it is useful to catch those early. + + See https://github.com/numpy/numpy/issues/8577 and other + similar bug reports. + + """ + try: + x = ones(2, dtype=float32) + if not abs(x.dot(x) - float32(2.0)) < 1e-5: + raise AssertionError + except AssertionError: + msg = ("The current Numpy installation ({!r}) fails to " + "pass simple sanity checks. This can be caused for example " + "by incorrect BLAS library being linked in, or by mixing " + "package managers (pip, conda, apt, ...). Search closed " + "numpy issues for similar problems.") + raise RuntimeError(msg.format(__file__)) from None + + _sanity_check() + del _sanity_check + + def _mac_os_check(): + """ + Quick Sanity check for Mac OS look for accelerate build bugs. + Testing numpy polyfit calls init_dgelsd(LAPACK) + """ + try: + c = array([3., 2., 1.]) + x = linspace(0, 2, 5) + y = polyval(c, x) + _ = polyfit(x, y, 2, cov=True) + except ValueError: + pass + + if sys.platform == "darwin": + from . import exceptions + with warnings.catch_warnings(record=True) as w: + _mac_os_check() + # Throw runtime error, if the test failed + # Check for warning and report the error_message + if len(w) > 0: + for _wn in w: + if _wn.category is exceptions.RankWarning: + # Ignore other warnings, they may not be relevant (see gh-25433) + error_message = ( + f"{_wn.category.__name__}: {_wn.message}" + ) + msg = ( + "Polyfit sanity test emitted a warning, most likely due " + "to using a buggy Accelerate backend." + "\nIf you compiled yourself, more information is available at:" # noqa: E501 + "\nhttps://numpy.org/devdocs/building/index.html" + "\nOtherwise report this to the vendor " + f"that provided NumPy.\n\n{error_message}\n") + raise RuntimeError(msg) + del _wn + del w + del _mac_os_check + + def blas_fpe_check(): + # Check if BLAS adds spurious FPEs, mostly seen on M4 arms with Accelerate. + with errstate(all='raise'): + x = ones((20, 20)) + try: + x @ x + except FloatingPointError: + res = _core._multiarray_umath._blas_supports_fpe(False) + if res: # res was not modified (hardcoded to True for now) + warnings.warn( + "Spurious warnings given by blas but suppression not " + "set up on this platform. Please open a NumPy issue.", + UserWarning, stacklevel=2) + + blas_fpe_check() + del blas_fpe_check + + def hugepage_setup(): + """ + We usually use madvise hugepages support, but on some old kernels it + is slow and thus better avoided. Specifically kernel version 4.6 + had a bug fix which probably fixed this: + https://github.com/torvalds/linux/commit/7cf91a98e607c2f935dbcc177d70011e95b8faff + """ + use_hugepage = os.environ.get("NUMPY_MADVISE_HUGEPAGE", None) + if sys.platform == "linux" and use_hugepage is None: + # If there is an issue with parsing the kernel version, + # set use_hugepage to 0. Usage of LooseVersion will handle + # the kernel version parsing better, but avoided since it + # will increase the import time. + # See: #16679 for related discussion. + try: + use_hugepage = 1 + kernel_version = os.uname().release.split(".")[:2] + kernel_version = tuple(int(v) for v in kernel_version) + if kernel_version < (4, 6): + use_hugepage = 0 + except ValueError: + use_hugepage = 0 + elif use_hugepage is None: + # This is not Linux, so it should not matter, just enable anyway + use_hugepage = 1 + else: + use_hugepage = int(use_hugepage) + return use_hugepage + + # Note that this will currently only make a difference on Linux + _core.multiarray._set_madvise_hugepage(hugepage_setup()) + del hugepage_setup + + # Give a warning if NumPy is reloaded or imported on a sub-interpreter + # We do this from python, since the C-module may not be reloaded and + # it is tidier organized. + _core.multiarray._multiarray_umath._reload_guard() + + # TODO: Remove the environment variable entirely now that it is "weak" + if (os.environ.get("NPY_PROMOTION_STATE", "weak") != "weak"): + warnings.warn( + "NPY_PROMOTION_STATE was a temporary feature for NumPy 2.0 " + "transition and is ignored after NumPy 2.2.", + UserWarning, stacklevel=2) + + # Tell PyInstaller where to find hook-numpy.py + def _pyinstaller_hooks_dir(): + from pathlib import Path + return [str(Path(__file__).with_name("_pyinstaller").resolve())] + + +# Remove symbols imported for internal use +del os, sys, warnings diff --git a/python/numpy/__init__.pyi b/python/numpy/__init__.pyi new file mode 100644 index 000000000..093c8e0f0 --- /dev/null +++ b/python/numpy/__init__.pyi @@ -0,0 +1,6147 @@ +# ruff: noqa: I001 +import builtins +import sys +import mmap +import ctypes as ct +import array as _array +import datetime as dt +from abc import abstractmethod +from types import EllipsisType, ModuleType, TracebackType, MappingProxyType, GenericAlias +from decimal import Decimal +from fractions import Fraction +from uuid import UUID + +import numpy as np +from numpy.__config__ import show as show_config +from numpy._pytesttester import PytestTester +from numpy._core._internal import _ctypes + +from numpy._typing import ( + # Arrays + ArrayLike, + NDArray, + _SupportsArray, + _NestedSequence, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeUInt_co, + _ArrayLikeInt, + _ArrayLikeInt_co, + _ArrayLikeFloat64_co, + _ArrayLikeFloat_co, + _ArrayLikeComplex128_co, + _ArrayLikeComplex_co, + _ArrayLikeNumber_co, + _ArrayLikeObject_co, + _ArrayLikeBytes_co, + _ArrayLikeStr_co, + _ArrayLikeString_co, + _ArrayLikeTD64_co, + _ArrayLikeDT64_co, + # DTypes + DTypeLike, + _DTypeLike, + _DTypeLikeVoid, + _VoidDTypeLike, + # Shapes + _AnyShape, + _Shape, + _ShapeLike, + # Scalars + _CharLike_co, + _IntLike_co, + _FloatLike_co, + _TD64Like_co, + _NumberLike_co, + _ScalarLike_co, + # `number` precision + NBitBase, + # NOTE: Do not remove the extended precision bit-types even if seemingly unused; + # they're used by the mypy plugin + _128Bit, + _96Bit, + _64Bit, + _32Bit, + _16Bit, + _8Bit, + _NBitByte, + _NBitShort, + _NBitIntC, + _NBitIntP, + _NBitLong, + _NBitLongLong, + _NBitHalf, + _NBitSingle, + _NBitDouble, + _NBitLongDouble, + # Character codes + _BoolCodes, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _Float16Codes, + _Float32Codes, + _Float64Codes, + _Complex64Codes, + _Complex128Codes, + _ByteCodes, + _ShortCodes, + _IntCCodes, + _IntPCodes, + _LongCodes, + _LongLongCodes, + _UByteCodes, + _UShortCodes, + _UIntCCodes, + _UIntPCodes, + _ULongCodes, + _ULongLongCodes, + _HalfCodes, + _SingleCodes, + _DoubleCodes, + _LongDoubleCodes, + _CSingleCodes, + _CDoubleCodes, + _CLongDoubleCodes, + _DT64Codes, + _TD64Codes, + _StrCodes, + _BytesCodes, + _VoidCodes, + _ObjectCodes, + _StringCodes, + _UnsignedIntegerCodes, + _SignedIntegerCodes, + _IntegerCodes, + _FloatingCodes, + _ComplexFloatingCodes, + _InexactCodes, + _NumberCodes, + _CharacterCodes, + _FlexibleCodes, + _GenericCodes, + # Ufuncs + _UFunc_Nin1_Nout1, + _UFunc_Nin2_Nout1, + _UFunc_Nin1_Nout2, + _UFunc_Nin2_Nout2, + _GUFunc_Nin2_Nout1, +) + +# NOTE: Numpy's mypy plugin is used for removing the types unavailable to the specific platform +from numpy._typing._extended_precision import ( + float96, + float128, + complex192, + complex256, +) + +from numpy._array_api_info import __array_namespace_info__ + +from collections.abc import ( + Callable, + Iterable, + Iterator, + Mapping, + Sequence, +) + +if sys.version_info >= (3, 12): + from collections.abc import Buffer as _SupportsBuffer +else: + _SupportsBuffer: TypeAlias = ( + bytes + | bytearray + | memoryview + | _array.array[Any] + | mmap.mmap + | NDArray[Any] + | generic + ) + +from typing import ( + Any, + ClassVar, + Final, + Generic, + Literal as L, + LiteralString, + Never, + NoReturn, + Protocol, + Self, + SupportsComplex, + SupportsFloat, + SupportsInt, + SupportsIndex, + TypeAlias, + TypedDict, + final, + overload, + type_check_only, +) + +# NOTE: `typing_extensions` and `_typeshed` are always available in `.pyi` stubs, even +# if not available at runtime. This is because the `typeshed` stubs for the standard +# library include `typing_extensions` stubs: +# https://github.com/python/typeshed/blob/main/stdlib/typing_extensions.pyi +from _typeshed import Incomplete, StrOrBytesPath, SupportsFlush, SupportsLenAndGetItem, SupportsWrite +from typing_extensions import CapsuleType, TypeVar, deprecated, override + +from numpy import ( + char, + core, + ctypeslib, + dtypes, + exceptions, + f2py, + fft, + lib, + linalg, + ma, + polynomial, + random, + rec, + strings, + testing, + typing, +) + +# available through `__getattr__`, but not in `__all__` or `__dir__` +from numpy import ( + __config__ as __config__, + matlib as matlib, + matrixlib as matrixlib, + version as version, +) +if sys.version_info < (3, 12): + from numpy import distutils as distutils + +from numpy._core.records import ( + record, + recarray, +) + +from numpy._core.function_base import ( + linspace, + logspace, + geomspace, +) + +from numpy._core.fromnumeric import ( + take, + reshape, + choose, + repeat, + put, + swapaxes, + transpose, + matrix_transpose, + partition, + argpartition, + sort, + argsort, + argmax, + argmin, + searchsorted, + resize, + squeeze, + diagonal, + trace, + ravel, + nonzero, + shape, + compress, + clip, + sum, + all, + any, + cumsum, + cumulative_sum, + ptp, + max, + min, + amax, + amin, + prod, + cumprod, + cumulative_prod, + ndim, + size, + around, + round, + mean, + std, + var, +) + +from numpy._core._asarray import ( + require, +) + +from numpy._core._type_aliases import ( + sctypeDict, +) + +from numpy._core._ufunc_config import ( + seterr, + geterr, + setbufsize, + getbufsize, + seterrcall, + geterrcall, + errstate, +) + +from numpy._core.arrayprint import ( + set_printoptions, + get_printoptions, + array2string, + format_float_scientific, + format_float_positional, + array_repr, + array_str, + printoptions, +) + +from numpy._core.einsumfunc import ( + einsum, + einsum_path, +) + +from numpy._core.multiarray import ( + array, + empty_like, + empty, + zeros, + concatenate, + inner, + where, + lexsort, + can_cast, + min_scalar_type, + result_type, + dot, + vdot, + bincount, + copyto, + putmask, + packbits, + unpackbits, + shares_memory, + may_share_memory, + asarray, + asanyarray, + ascontiguousarray, + asfortranarray, + arange, + busday_count, + busday_offset, + datetime_as_string, + datetime_data, + frombuffer, + fromfile, + fromiter, + is_busday, + promote_types, + fromstring, + frompyfunc, + nested_iters, + flagsobj, +) + +from numpy._core.numeric import ( + zeros_like, + ones, + ones_like, + full, + full_like, + count_nonzero, + isfortran, + argwhere, + flatnonzero, + correlate, + convolve, + outer, + tensordot, + roll, + rollaxis, + moveaxis, + cross, + indices, + fromfunction, + isscalar, + binary_repr, + base_repr, + identity, + allclose, + isclose, + array_equal, + array_equiv, + astype, +) + +from numpy._core.numerictypes import ( + isdtype, + issubdtype, + ScalarType, + typecodes, +) + +from numpy._core.shape_base import ( + atleast_1d, + atleast_2d, + atleast_3d, + block, + hstack, + stack, + vstack, + unstack, +) + +from ._expired_attrs_2_0 import __expired_attributes__ as __expired_attributes__ +from ._globals import _CopyMode as _CopyMode +from ._globals import _NoValue as _NoValue, _NoValueType + +from numpy.lib import ( + scimath as emath, +) + +from numpy.lib._arraypad_impl import ( + pad, +) + +from numpy.lib._arraysetops_impl import ( + ediff1d, + in1d, + intersect1d, + isin, + setdiff1d, + setxor1d, + union1d, + unique, + unique_all, + unique_counts, + unique_inverse, + unique_values, +) + +from numpy.lib._function_base_impl import ( + select, + piecewise, + trim_zeros, + copy, + iterable, + percentile, + diff, + gradient, + angle, + unwrap, + sort_complex, + flip, + rot90, + extract, + place, + asarray_chkfinite, + average, + digitize, + cov, + corrcoef, + median, + sinc, + hamming, + hanning, + bartlett, + blackman, + kaiser, + trapezoid, + trapz, + i0, + meshgrid, + delete, + insert, + append, + interp, + quantile, +) + +from numpy.lib._histograms_impl import ( + histogram_bin_edges, + histogram, + histogramdd, +) + +from numpy.lib._index_tricks_impl import ( + ndenumerate, + ndindex, + ravel_multi_index, + unravel_index, + mgrid, + ogrid, + r_, + c_, + s_, + index_exp, + ix_, + fill_diagonal, + diag_indices, + diag_indices_from, +) + +from numpy.lib._nanfunctions_impl import ( + nansum, + nanmax, + nanmin, + nanargmax, + nanargmin, + nanmean, + nanmedian, + nanpercentile, + nanvar, + nanstd, + nanprod, + nancumsum, + nancumprod, + nanquantile, +) + +from numpy.lib._npyio_impl import ( + savetxt, + loadtxt, + genfromtxt, + load, + save, + savez, + savez_compressed, + fromregex, +) + +from numpy.lib._polynomial_impl import ( + poly, + roots, + polyint, + polyder, + polyadd, + polysub, + polymul, + polydiv, + polyval, + polyfit, +) + +from numpy.lib._shape_base_impl import ( + column_stack, + row_stack, + dstack, + array_split, + split, + hsplit, + vsplit, + dsplit, + apply_over_axes, + expand_dims, + apply_along_axis, + kron, + tile, + take_along_axis, + put_along_axis, +) + +from numpy.lib._stride_tricks_impl import ( + broadcast_to, + broadcast_arrays, + broadcast_shapes, +) + +from numpy.lib._twodim_base_impl import ( + diag, + diagflat, + eye, + fliplr, + flipud, + tri, + triu, + tril, + vander, + histogram2d, + mask_indices, + tril_indices, + tril_indices_from, + triu_indices, + triu_indices_from, +) + +from numpy.lib._type_check_impl import ( + mintypecode, + real, + imag, + iscomplex, + isreal, + iscomplexobj, + isrealobj, + nan_to_num, + real_if_close, + typename, + common_type, +) + +from numpy.lib._ufunclike_impl import ( + fix, + isposinf, + isneginf, +) + +from numpy.lib._utils_impl import ( + get_include, + info, + show_runtime, +) + +from numpy.matrixlib import ( + asmatrix, + bmat, +) + +__all__ = [ # noqa: RUF022 + # __numpy_submodules__ + "char", "core", "ctypeslib", "dtypes", "exceptions", "f2py", "fft", "lib", "linalg", + "ma", "polynomial", "random", "rec", "strings", "test", "testing", "typing", + + # _core.__all__ + "abs", "acos", "acosh", "asin", "asinh", "atan", "atanh", "atan2", "bitwise_invert", + "bitwise_left_shift", "bitwise_right_shift", "concat", "pow", "permute_dims", + "memmap", "sctypeDict", "record", "recarray", + + # _core.numeric.__all__ + "newaxis", "ndarray", "flatiter", "nditer", "nested_iters", "ufunc", "arange", + "array", "asarray", "asanyarray", "ascontiguousarray", "asfortranarray", "zeros", + "count_nonzero", "empty", "broadcast", "dtype", "fromstring", "fromfile", + "frombuffer", "from_dlpack", "where", "argwhere", "copyto", "concatenate", + "lexsort", "astype", "can_cast", "promote_types", "min_scalar_type", "result_type", + "isfortran", "empty_like", "zeros_like", "ones_like", "correlate", "convolve", + "inner", "dot", "outer", "vdot", "roll", "rollaxis", "moveaxis", "cross", + "tensordot", "little_endian", "fromiter", "array_equal", "array_equiv", "indices", + "fromfunction", "isclose", "isscalar", "binary_repr", "base_repr", "ones", + "identity", "allclose", "putmask", "flatnonzero", "inf", "nan", "False_", "True_", + "bitwise_not", "full", "full_like", "matmul", "vecdot", "vecmat", + "shares_memory", "may_share_memory", + "all", "amax", "amin", "any", "argmax", "argmin", "argpartition", "argsort", + "around", "choose", "clip", "compress", "cumprod", "cumsum", "cumulative_prod", + "cumulative_sum", "diagonal", "mean", "max", "min", "matrix_transpose", "ndim", + "nonzero", "partition", "prod", "ptp", "put", "ravel", "repeat", "reshape", + "resize", "round", "searchsorted", "shape", "size", "sort", "squeeze", "std", "sum", + "swapaxes", "take", "trace", "transpose", "var", + "absolute", "add", "arccos", "arccosh", "arcsin", "arcsinh", "arctan", "arctan2", + "arctanh", "bitwise_and", "bitwise_or", "bitwise_xor", "cbrt", "ceil", "conj", + "conjugate", "copysign", "cos", "cosh", "bitwise_count", "deg2rad", "degrees", + "divide", "divmod", "e", "equal", "euler_gamma", "exp", "exp2", "expm1", "fabs", + "floor", "floor_divide", "float_power", "fmax", "fmin", "fmod", "frexp", + "frompyfunc", "gcd", "greater", "greater_equal", "heaviside", "hypot", "invert", + "isfinite", "isinf", "isnan", "isnat", "lcm", "ldexp", "left_shift", "less", + "less_equal", "log", "log10", "log1p", "log2", "logaddexp", "logaddexp2", + "logical_and", "logical_not", "logical_or", "logical_xor", "matvec", "maximum", "minimum", + "mod", "modf", "multiply", "negative", "nextafter", "not_equal", "pi", "positive", + "power", "rad2deg", "radians", "reciprocal", "remainder", "right_shift", "rint", + "sign", "signbit", "sin", "sinh", "spacing", "sqrt", "square", "subtract", "tan", + "tanh", "true_divide", "trunc", "ScalarType", "typecodes", "issubdtype", + "datetime_data", "datetime_as_string", "busday_offset", "busday_count", "is_busday", + "busdaycalendar", "isdtype", + "complexfloating", "character", "unsignedinteger", "inexact", "generic", "floating", + "integer", "signedinteger", "number", "flexible", "bool", "float16", "float32", + "float64", "longdouble", "complex64", "complex128", "clongdouble", + "bytes_", "str_", "void", "object_", "datetime64", "timedelta64", "int8", "byte", + "uint8", "ubyte", "int16", "short", "uint16", "ushort", "int32", "intc", "uint32", + "uintc", "int64", "long", "uint64", "ulong", "longlong", "ulonglong", "intp", + "uintp", "double", "cdouble", "single", "csingle", "half", "bool_", "int_", "uint", + "float96", "float128", "complex192", "complex256", + "array2string", "array_str", "array_repr", "set_printoptions", "get_printoptions", + "printoptions", "format_float_positional", "format_float_scientific", "require", + "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall", + "errstate", + # _core.function_base.__all__ + "logspace", "linspace", "geomspace", + # _core.getlimits.__all__ + "finfo", "iinfo", + # _core.shape_base.__all__ + "atleast_1d", "atleast_2d", "atleast_3d", "block", "hstack", "stack", "unstack", + "vstack", + # _core.einsumfunc.__all__ + "einsum", "einsum_path", + # matrixlib.__all__ + "matrix", "bmat", "asmatrix", + # lib._histograms_impl.__all__ + "histogram", "histogramdd", "histogram_bin_edges", + # lib._nanfunctions_impl.__all__ + "nansum", "nanmax", "nanmin", "nanargmax", "nanargmin", "nanmean", "nanmedian", + "nanpercentile", "nanvar", "nanstd", "nanprod", "nancumsum", "nancumprod", + "nanquantile", + # lib._function_base_impl.__all__ + "select", "piecewise", "trim_zeros", "copy", "iterable", "percentile", "diff", + "gradient", "angle", "unwrap", "sort_complex", "flip", "rot90", "extract", "place", + "vectorize", "asarray_chkfinite", "average", "bincount", "digitize", "cov", + "corrcoef", "median", "sinc", "hamming", "hanning", "bartlett", "blackman", + "kaiser", "trapezoid", "trapz", "i0", "meshgrid", "delete", "insert", "append", + "interp", "quantile", + # lib._twodim_base_impl.__all__ + "diag", "diagflat", "eye", "fliplr", "flipud", "tri", "triu", "tril", "vander", + "histogram2d", "mask_indices", "tril_indices", "tril_indices_from", "triu_indices", + "triu_indices_from", + # lib._shape_base_impl.__all__ + "column_stack", "dstack", "array_split", "split", "hsplit", "vsplit", "dsplit", + "apply_over_axes", "expand_dims", "apply_along_axis", "kron", "tile", + "take_along_axis", "put_along_axis", "row_stack", + # lib._type_check_impl.__all__ + "iscomplexobj", "isrealobj", "imag", "iscomplex", "isreal", "nan_to_num", "real", + "real_if_close", "typename", "mintypecode", "common_type", + # lib._arraysetops_impl.__all__ + "ediff1d", "in1d", "intersect1d", "isin", "setdiff1d", "setxor1d", "union1d", + "unique", "unique_all", "unique_counts", "unique_inverse", "unique_values", + # lib._ufunclike_impl.__all__ + "fix", "isneginf", "isposinf", + # lib._arraypad_impl.__all__ + "pad", + # lib._utils_impl.__all__ + "get_include", "info", "show_runtime", + # lib._stride_tricks_impl.__all__ + "broadcast_to", "broadcast_arrays", "broadcast_shapes", + # lib._polynomial_impl.__all__ + "poly", "roots", "polyint", "polyder", "polyadd", "polysub", "polymul", "polydiv", + "polyval", "poly1d", "polyfit", + # lib._npyio_impl.__all__ + "savetxt", "loadtxt", "genfromtxt", "load", "save", "savez", "savez_compressed", + "packbits", "unpackbits", "fromregex", + # lib._index_tricks_impl.__all__ + "ravel_multi_index", "unravel_index", "mgrid", "ogrid", "r_", "c_", "s_", + "index_exp", "ix_", "ndenumerate", "ndindex", "fill_diagonal", "diag_indices", + "diag_indices_from", + + # __init__.__all__ + "emath", "show_config", "__version__", "__array_namespace_info__", +] # fmt: skip + +### Constrained types (for internal use only) +# Only use these for functions; never as generic type parameter. + +_AnyStr = TypeVar("_AnyStr", LiteralString, str, bytes) +_AnyShapeT = TypeVar( + "_AnyShapeT", + tuple[()], # 0-d + tuple[int], # 1-d + tuple[int, int], # 2-d + tuple[int, int, int], # 3-d + tuple[int, int, int, int], # 4-d + tuple[int, int, int, int, int], # 5-d + tuple[int, int, int, int, int, int], # 6-d + tuple[int, int, int, int, int, int, int], # 7-d + tuple[int, int, int, int, int, int, int, int], # 8-d + tuple[int, ...], # N-d +) +_AnyTD64Item = TypeVar("_AnyTD64Item", dt.timedelta, int, None, dt.timedelta | int | None) +_AnyDT64Arg = TypeVar("_AnyDT64Arg", dt.datetime, dt.date, None) +_AnyDT64Item = TypeVar("_AnyDT64Item", dt.datetime, dt.date, int, None, dt.date, int | None) +_AnyDate = TypeVar("_AnyDate", dt.date, dt.datetime) +_AnyDateOrTime = TypeVar("_AnyDateOrTime", dt.date, dt.datetime, dt.timedelta) + +### Type parameters (for internal use only) + +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) +_T_contra = TypeVar("_T_contra", contravariant=True) +_RealT_co = TypeVar("_RealT_co", covariant=True) +_ImagT_co = TypeVar("_ImagT_co", covariant=True) + +_DTypeT = TypeVar("_DTypeT", bound=dtype) +_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) +_FlexDTypeT = TypeVar("_FlexDTypeT", bound=dtype[flexible]) + +_ArrayT = TypeVar("_ArrayT", bound=ndarray) +_ArrayT_co = TypeVar("_ArrayT_co", bound=ndarray, default=ndarray, covariant=True) +_IntegralArrayT = TypeVar("_IntegralArrayT", bound=NDArray[integer | np.bool | object_]) +_RealArrayT = TypeVar("_RealArrayT", bound=NDArray[floating | integer | timedelta64 | np.bool | object_]) +_NumericArrayT = TypeVar("_NumericArrayT", bound=NDArray[number | timedelta64 | object_]) + +_ShapeT = TypeVar("_ShapeT", bound=_Shape) +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) +_1DShapeT = TypeVar("_1DShapeT", bound=_1D) +_2DShapeT_co = TypeVar("_2DShapeT_co", bound=_2D, default=_2D, covariant=True) +_1NShapeT = TypeVar("_1NShapeT", bound=tuple[L[1], *tuple[L[1], ...]]) # (1,) | (1, 1) | (1, 1, 1) | ... + +_ScalarT = TypeVar("_ScalarT", bound=generic) +_ScalarT_co = TypeVar("_ScalarT_co", bound=generic, default=Any, covariant=True) +_NumberT = TypeVar("_NumberT", bound=number) +_InexactT = TypeVar("_InexactT", bound=inexact) +_RealNumberT = TypeVar("_RealNumberT", bound=floating | integer) +_FloatingT_co = TypeVar("_FloatingT_co", bound=floating, default=floating, covariant=True) +_IntegerT = TypeVar("_IntegerT", bound=integer) +_IntegerT_co = TypeVar("_IntegerT_co", bound=integer, default=integer, covariant=True) +_NonObjectScalarT = TypeVar("_NonObjectScalarT", bound=np.bool | number | flexible | datetime64 | timedelta64) + +_NBit = TypeVar("_NBit", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] +_NBit1 = TypeVar("_NBit1", bound=NBitBase, default=Any) # pyright: ignore[reportDeprecated] +_NBit2 = TypeVar("_NBit2", bound=NBitBase, default=_NBit1) # pyright: ignore[reportDeprecated] + +_ItemT_co = TypeVar("_ItemT_co", default=Any, covariant=True) +_BoolItemT = TypeVar("_BoolItemT", bound=builtins.bool) +_BoolItemT_co = TypeVar("_BoolItemT_co", bound=builtins.bool, default=builtins.bool, covariant=True) +_NumberItemT_co = TypeVar("_NumberItemT_co", bound=complex, default=int | float | complex, covariant=True) +_InexactItemT_co = TypeVar("_InexactItemT_co", bound=complex, default=float | complex, covariant=True) +_FlexibleItemT_co = TypeVar( + "_FlexibleItemT_co", + bound=_CharLike_co | tuple[Any, ...], + default=_CharLike_co | tuple[Any, ...], + covariant=True, +) +_CharacterItemT_co = TypeVar("_CharacterItemT_co", bound=_CharLike_co, default=_CharLike_co, covariant=True) +_TD64ItemT_co = TypeVar("_TD64ItemT_co", bound=dt.timedelta | int | None, default=dt.timedelta | int | None, covariant=True) +_DT64ItemT_co = TypeVar("_DT64ItemT_co", bound=dt.date | int | None, default=dt.date | int | None, covariant=True) +_TD64UnitT = TypeVar("_TD64UnitT", bound=_TD64Unit, default=_TD64Unit) +_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[integer | np.bool]) + +### Type Aliases (for internal use only) + +_Falsy: TypeAlias = L[False, 0] | np.bool[L[False]] +_Truthy: TypeAlias = L[True, 1] | np.bool[L[True]] + +_1D: TypeAlias = tuple[int] +_2D: TypeAlias = tuple[int, int] +_2Tuple: TypeAlias = tuple[_T, _T] + +_ArrayUInt_co: TypeAlias = NDArray[unsignedinteger | np.bool] +_ArrayInt_co: TypeAlias = NDArray[integer | np.bool] +_ArrayFloat64_co: TypeAlias = NDArray[floating[_64Bit] | float32 | float16 | integer | np.bool] +_ArrayFloat_co: TypeAlias = NDArray[floating | integer | np.bool] +_ArrayComplex128_co: TypeAlias = NDArray[number[_64Bit] | number[_32Bit] | float16 | integer | np.bool] +_ArrayComplex_co: TypeAlias = NDArray[inexact | integer | np.bool] +_ArrayNumber_co: TypeAlias = NDArray[number | np.bool] +_ArrayTD64_co: TypeAlias = NDArray[timedelta64 | integer | np.bool] + +_Float64_co: TypeAlias = float | floating[_64Bit] | float32 | float16 | integer | np.bool +_Complex64_co: TypeAlias = number[_32Bit] | number[_16Bit] | number[_8Bit] | builtins.bool | np.bool +_Complex128_co: TypeAlias = complex | number[_64Bit] | _Complex64_co + +_ToIndex: TypeAlias = SupportsIndex | slice | EllipsisType | _ArrayLikeInt_co | None +_ToIndices: TypeAlias = _ToIndex | tuple[_ToIndex, ...] + +_UnsignedIntegerCType: TypeAlias = type[ + ct.c_uint8 | ct.c_uint16 | ct.c_uint32 | ct.c_uint64 + | ct.c_ushort | ct.c_uint | ct.c_ulong | ct.c_ulonglong + | ct.c_size_t | ct.c_void_p +] # fmt: skip +_SignedIntegerCType: TypeAlias = type[ + ct.c_int8 | ct.c_int16 | ct.c_int32 | ct.c_int64 + | ct.c_short | ct.c_int | ct.c_long | ct.c_longlong + | ct.c_ssize_t +] # fmt: skip +_FloatingCType: TypeAlias = type[ct.c_float | ct.c_double | ct.c_longdouble] +_IntegerCType: TypeAlias = _UnsignedIntegerCType | _SignedIntegerCType +_NumberCType: TypeAlias = _IntegerCType +_GenericCType: TypeAlias = _NumberCType | type[ct.c_bool | ct.c_char | ct.py_object[Any]] + +# some commonly used builtin types that are known to result in a +# `dtype[object_]`, when their *type* is passed to the `dtype` constructor +# NOTE: `builtins.object` should not be included here +_BuiltinObjectLike: TypeAlias = ( + slice | Decimal | Fraction | UUID + | dt.date | dt.time | dt.timedelta | dt.tzinfo + | tuple[Any, ...] | list[Any] | set[Any] | frozenset[Any] | dict[Any, Any] +) # fmt: skip + +# Introduce an alias for `dtype` to avoid naming conflicts. +_dtype: TypeAlias = dtype[_ScalarT] + +_ByteOrderChar: TypeAlias = L["<", ">", "=", "|"] +# can be anything, is case-insensitive, and only the first character matters +_ByteOrder: TypeAlias = L[ + "S", # swap the current order (default) + "<", "L", "little", # little-endian + ">", "B", "big", # big endian + "=", "N", "native", # native order + "|", "I", # ignore +] # fmt: skip +_DTypeKind: TypeAlias = L[ + "b", # boolean + "i", # signed integer + "u", # unsigned integer + "f", # floating-point + "c", # complex floating-point + "m", # timedelta64 + "M", # datetime64 + "O", # python object + "S", # byte-string (fixed-width) + "U", # unicode-string (fixed-width) + "V", # void + "T", # unicode-string (variable-width) +] +_DTypeChar: TypeAlias = L[ + "?", # bool + "b", # byte + "B", # ubyte + "h", # short + "H", # ushort + "i", # intc + "I", # uintc + "l", # long + "L", # ulong + "q", # longlong + "Q", # ulonglong + "e", # half + "f", # single + "d", # double + "g", # longdouble + "F", # csingle + "D", # cdouble + "G", # clongdouble + "O", # object + "S", # bytes_ (S0) + "a", # bytes_ (deprecated) + "U", # str_ + "V", # void + "M", # datetime64 + "m", # timedelta64 + "c", # bytes_ (S1) + "T", # StringDType +] +_DTypeNum: TypeAlias = L[ + 0, # bool + 1, # byte + 2, # ubyte + 3, # short + 4, # ushort + 5, # intc + 6, # uintc + 7, # long + 8, # ulong + 9, # longlong + 10, # ulonglong + 23, # half + 11, # single + 12, # double + 13, # longdouble + 14, # csingle + 15, # cdouble + 16, # clongdouble + 17, # object + 18, # bytes_ + 19, # str_ + 20, # void + 21, # datetime64 + 22, # timedelta64 + 25, # no type + 256, # user-defined + 2056, # StringDType +] +_DTypeBuiltinKind: TypeAlias = L[0, 1, 2] + +_ArrayAPIVersion: TypeAlias = L["2021.12", "2022.12", "2023.12", "2024.12"] + +_CastingKind: TypeAlias = L["no", "equiv", "safe", "same_kind", "unsafe"] + +_OrderKACF: TypeAlias = L["K", "A", "C", "F"] | None +_OrderACF: TypeAlias = L["A", "C", "F"] | None +_OrderCF: TypeAlias = L["C", "F"] | None + +_ModeKind: TypeAlias = L["raise", "wrap", "clip"] +_PartitionKind: TypeAlias = L["introselect"] +# in practice, only the first case-insensitive character is considered (so e.g. +# "QuantumSort3000" will be interpreted as quicksort). +_SortKind: TypeAlias = L[ + "Q", "quick", "quicksort", + "M", "merge", "mergesort", + "H", "heap", "heapsort", + "S", "stable", "stablesort", +] +_SortSide: TypeAlias = L["left", "right"] + +_ConvertibleToInt: TypeAlias = SupportsInt | SupportsIndex | _CharLike_co +_ConvertibleToFloat: TypeAlias = SupportsFloat | SupportsIndex | _CharLike_co +_ConvertibleToComplex: TypeAlias = SupportsComplex | SupportsFloat | SupportsIndex | _CharLike_co +_ConvertibleToTD64: TypeAlias = dt.timedelta | int | _CharLike_co | character | number | timedelta64 | np.bool | None +_ConvertibleToDT64: TypeAlias = dt.date | int | _CharLike_co | character | number | datetime64 | np.bool | None + +_NDIterFlagsKind: TypeAlias = L[ + "buffered", + "c_index", + "copy_if_overlap", + "common_dtype", + "delay_bufalloc", + "external_loop", + "f_index", + "grow_inner", "growinner", + "multi_index", + "ranged", + "refs_ok", + "reduce_ok", + "zerosize_ok", +] +_NDIterFlagsOp: TypeAlias = L[ + "aligned", + "allocate", + "arraymask", + "copy", + "config", + "nbo", + "no_subtype", + "no_broadcast", + "overlap_assume_elementwise", + "readonly", + "readwrite", + "updateifcopy", + "virtual", + "writeonly", + "writemasked" +] + +_MemMapModeKind: TypeAlias = L[ + "readonly", "r", + "copyonwrite", "c", + "readwrite", "r+", + "write", "w+", +] + +_DT64Date: TypeAlias = _HasDateAttributes | L["TODAY", "today", b"TODAY", b"today"] +_DT64Now: TypeAlias = L["NOW", "now", b"NOW", b"now"] +_NaTValue: TypeAlias = L["NAT", "NaT", "nat", b"NAT", b"NaT", b"nat"] + +_MonthUnit: TypeAlias = L["Y", "M", b"Y", b"M"] +_DayUnit: TypeAlias = L["W", "D", b"W", b"D"] +_DateUnit: TypeAlias = L[_MonthUnit, _DayUnit] +_NativeTimeUnit: TypeAlias = L["h", "m", "s", "ms", "us", "μs", b"h", b"m", b"s", b"ms", b"us"] +_IntTimeUnit: TypeAlias = L["ns", "ps", "fs", "as", b"ns", b"ps", b"fs", b"as"] +_TimeUnit: TypeAlias = L[_NativeTimeUnit, _IntTimeUnit] +_NativeTD64Unit: TypeAlias = L[_DayUnit, _NativeTimeUnit] +_IntTD64Unit: TypeAlias = L[_MonthUnit, _IntTimeUnit] +_TD64Unit: TypeAlias = L[_DateUnit, _TimeUnit] +_TimeUnitSpec: TypeAlias = _TD64UnitT | tuple[_TD64UnitT, SupportsIndex] + +### TypedDict's (for internal use only) + +@type_check_only +class _FormerAttrsDict(TypedDict): + object: LiteralString + float: LiteralString + complex: LiteralString + str: LiteralString + int: LiteralString + +### Protocols (for internal use only) + +@final +@type_check_only +class _SupportsLT(Protocol): + def __lt__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsLE(Protocol): + def __le__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsGT(Protocol): + def __gt__(self, other: Any, /) -> Any: ... + +@final +@type_check_only +class _SupportsGE(Protocol): + def __ge__(self, other: Any, /) -> Any: ... + +@type_check_only +class _SupportsFileMethods(SupportsFlush, Protocol): + # Protocol for representing file-like-objects accepted by `ndarray.tofile` and `fromfile` + def fileno(self) -> SupportsIndex: ... + def tell(self) -> SupportsIndex: ... + def seek(self, offset: int, whence: int, /) -> object: ... + +@type_check_only +class _SupportsFileMethodsRW(SupportsWrite[bytes], _SupportsFileMethods, Protocol): ... + +@type_check_only +class _SupportsItem(Protocol[_T_co]): + def item(self, /) -> _T_co: ... + +@type_check_only +class _SupportsDLPack(Protocol[_T_contra]): + def __dlpack__(self, /, *, stream: _T_contra | None = None) -> CapsuleType: ... + +@type_check_only +class _HasDType(Protocol[_T_co]): + @property + def dtype(self, /) -> _T_co: ... + +@type_check_only +class _HasRealAndImag(Protocol[_RealT_co, _ImagT_co]): + @property + def real(self, /) -> _RealT_co: ... + @property + def imag(self, /) -> _ImagT_co: ... + +@type_check_only +class _HasTypeWithRealAndImag(Protocol[_RealT_co, _ImagT_co]): + @property + def type(self, /) -> type[_HasRealAndImag[_RealT_co, _ImagT_co]]: ... + +@type_check_only +class _HasDTypeWithRealAndImag(Protocol[_RealT_co, _ImagT_co]): + @property + def dtype(self, /) -> _HasTypeWithRealAndImag[_RealT_co, _ImagT_co]: ... + +@type_check_only +class _HasDateAttributes(Protocol): + # The `datetime64` constructors requires an object with the three attributes below, + # and thus supports datetime duck typing + @property + def day(self) -> int: ... + @property + def month(self) -> int: ... + @property + def year(self) -> int: ... + +### Mixins (for internal use only) + +@type_check_only +class _RealMixin: + @property + def real(self) -> Self: ... + @property + def imag(self) -> Self: ... + +@type_check_only +class _RoundMixin: + @overload + def __round__(self, /, ndigits: None = None) -> int: ... + @overload + def __round__(self, /, ndigits: SupportsIndex) -> Self: ... + +@type_check_only +class _IntegralMixin(_RealMixin): + @property + def numerator(self) -> Self: ... + @property + def denominator(self) -> L[1]: ... + + def is_integer(self, /) -> L[True]: ... + +### Public API + +__version__: Final[LiteralString] = ... + +e: Final[float] = ... +euler_gamma: Final[float] = ... +pi: Final[float] = ... +inf: Final[float] = ... +nan: Final[float] = ... +little_endian: Final[builtins.bool] = ... +False_: Final[np.bool[L[False]]] = ... +True_: Final[np.bool[L[True]]] = ... +newaxis: Final[None] = None + +# not in __all__ +__NUMPY_SETUP__: Final[L[False]] = False +__numpy_submodules__: Final[set[LiteralString]] = ... +__former_attrs__: Final[_FormerAttrsDict] = ... +__future_scalars__: Final[set[L["bytes", "str", "object"]]] = ... +__array_api_version__: Final[L["2024.12"]] = "2024.12" +test: Final[PytestTester] = ... + +@type_check_only +class _DTypeMeta(type): + @property + def type(cls, /) -> type[generic] | None: ... + @property + def _abstract(cls, /) -> bool: ... + @property + def _is_numeric(cls, /) -> bool: ... + @property + def _parametric(cls, /) -> bool: ... + @property + def _legacy(cls, /) -> bool: ... + +@final +class dtype(Generic[_ScalarT_co], metaclass=_DTypeMeta): + names: tuple[builtins.str, ...] | None + def __hash__(self) -> int: ... + + # `None` results in the default dtype + @overload + def __new__( + cls, + dtype: type[float64] | None, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ... + ) -> dtype[float64]: ... + + # Overload for `dtype` instances, scalar types, and instances that have a + # `dtype: dtype[_ScalarT]` attribute + @overload + def __new__( + cls, + dtype: _DTypeLike[_ScalarT], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[_ScalarT]: ... + + # Builtin types + # + # NOTE: Typecheckers act as if `bool <: int <: float <: complex <: object`, + # even though at runtime `int`, `float`, and `complex` aren't subtypes.. + # This makes it impossible to express e.g. "a float that isn't an int", + # since type checkers treat `_: float` like `_: float | int`. + # + # For more details, see: + # - https://github.com/numpy/numpy/issues/27032#issuecomment-2278958251 + # - https://typing.readthedocs.io/en/latest/spec/special-types.html#special-cases-for-float-and-complex + @overload + def __new__( + cls, + dtype: type[builtins.bool | np.bool], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[np.bool]: ... + # NOTE: `_: type[int]` also accepts `type[int | bool]` + @overload + def __new__( + cls, + dtype: type[int | int_ | np.bool], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[int_ | np.bool]: ... + # NOTE: `_: type[float]` also accepts `type[float | int | bool]` + # NOTE: `float64` inherits from `float` at runtime; but this isn't + # reflected in these stubs. So an explicit `float64` is required here. + @overload + def __new__( + cls, + dtype: type[float | float64 | int_ | np.bool] | None, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[float64 | int_ | np.bool]: ... + # NOTE: `_: type[complex]` also accepts `type[complex | float | int | bool]` + @overload + def __new__( + cls, + dtype: type[complex | complex128 | float64 | int_ | np.bool], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[complex128 | float64 | int_ | np.bool]: ... + @overload + def __new__( + cls, + dtype: type[bytes], # also includes `type[bytes_]` + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[bytes_]: ... + @overload + def __new__( + cls, + dtype: type[str], # also includes `type[str_]` + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[str_]: ... + # NOTE: These `memoryview` overloads assume PEP 688, which requires mypy to + # be run with the (undocumented) `--disable-memoryview-promotion` flag, + # This will be the default in a future mypy release, see: + # https://github.com/python/mypy/issues/15313 + # Pyright / Pylance requires setting `disableBytesTypePromotions=true`, + # which is the default in strict mode + @overload + def __new__( + cls, + dtype: type[memoryview | void], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[void]: ... + # NOTE: `_: type[object]` would also accept e.g. `type[object | complex]`, + # and is therefore not included here + @overload + def __new__( + cls, + dtype: type[_BuiltinObjectLike | object_], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[object_]: ... + + # Unions of builtins. + @overload + def __new__( + cls, + dtype: type[bytes | str], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[character]: ... + @overload + def __new__( + cls, + dtype: type[bytes | str | memoryview], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[flexible]: ... + @overload + def __new__( + cls, + dtype: type[complex | bytes | str | memoryview | _BuiltinObjectLike], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[str, Any] = ..., + ) -> dtype[np.bool | int_ | float64 | complex128 | flexible | object_]: ... + + # `unsignedinteger` string-based representations and ctypes + @overload + def __new__(cls, dtype: _UInt8Codes | type[ct.c_uint8], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint8]: ... + @overload + def __new__(cls, dtype: _UInt16Codes | type[ct.c_uint16], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint16]: ... + @overload + def __new__(cls, dtype: _UInt32Codes | type[ct.c_uint32], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint32]: ... + @overload + def __new__(cls, dtype: _UInt64Codes | type[ct.c_uint64], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uint64]: ... + @overload + def __new__(cls, dtype: _UByteCodes | type[ct.c_ubyte], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ubyte]: ... + @overload + def __new__(cls, dtype: _UShortCodes | type[ct.c_ushort], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ushort]: ... + @overload + def __new__(cls, dtype: _UIntCCodes | type[ct.c_uint], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uintc]: ... + # NOTE: We're assuming here that `uint_ptr_t == size_t`, + # an assumption that does not hold in rare cases (same for `ssize_t`) + @overload + def __new__(cls, dtype: _UIntPCodes | type[ct.c_void_p] | type[ct.c_size_t], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[uintp]: ... + @overload + def __new__(cls, dtype: _ULongCodes | type[ct.c_ulong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ulong]: ... + @overload + def __new__(cls, dtype: _ULongLongCodes | type[ct.c_ulonglong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[ulonglong]: ... + + # `signedinteger` string-based representations and ctypes + @overload + def __new__(cls, dtype: _Int8Codes | type[ct.c_int8], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int8]: ... + @overload + def __new__(cls, dtype: _Int16Codes | type[ct.c_int16], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int16]: ... + @overload + def __new__(cls, dtype: _Int32Codes | type[ct.c_int32], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int32]: ... + @overload + def __new__(cls, dtype: _Int64Codes | type[ct.c_int64], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[int64]: ... + @overload + def __new__(cls, dtype: _ByteCodes | type[ct.c_byte], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[byte]: ... + @overload + def __new__(cls, dtype: _ShortCodes | type[ct.c_short], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[short]: ... + @overload + def __new__(cls, dtype: _IntCCodes | type[ct.c_int], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[intc]: ... + @overload + def __new__(cls, dtype: _IntPCodes | type[ct.c_ssize_t], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[intp]: ... + @overload + def __new__(cls, dtype: _LongCodes | type[ct.c_long], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[long]: ... + @overload + def __new__(cls, dtype: _LongLongCodes | type[ct.c_longlong], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[longlong]: ... + + # `floating` string-based representations and ctypes + @overload + def __new__(cls, dtype: _Float16Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float16]: ... + @overload + def __new__(cls, dtype: _Float32Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float32]: ... + @overload + def __new__(cls, dtype: _Float64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[float64]: ... + @overload + def __new__(cls, dtype: _HalfCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[half]: ... + @overload + def __new__(cls, dtype: _SingleCodes | type[ct.c_float], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[single]: ... + @overload + def __new__(cls, dtype: _DoubleCodes | type[ct.c_double], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[double]: ... + @overload + def __new__(cls, dtype: _LongDoubleCodes | type[ct.c_longdouble], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[longdouble]: ... + + # `complexfloating` string-based representations + @overload + def __new__(cls, dtype: _Complex64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[complex64]: ... + @overload + def __new__(cls, dtype: _Complex128Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[complex128]: ... + @overload + def __new__(cls, dtype: _CSingleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[csingle]: ... + @overload + def __new__(cls, dtype: _CDoubleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[cdouble]: ... + @overload + def __new__(cls, dtype: _CLongDoubleCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[clongdouble]: ... + + # Miscellaneous string-based representations and ctypes + @overload + def __new__(cls, dtype: _BoolCodes | type[ct.c_bool], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[np.bool]: ... + @overload + def __new__(cls, dtype: _TD64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[timedelta64]: ... + @overload + def __new__(cls, dtype: _DT64Codes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[datetime64]: ... + @overload + def __new__(cls, dtype: _StrCodes, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[str_]: ... + @overload + def __new__(cls, dtype: _BytesCodes | type[ct.c_char], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[bytes_]: ... + @overload + def __new__(cls, dtype: _VoidCodes | _VoidDTypeLike, align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[void]: ... + @overload + def __new__(cls, dtype: _ObjectCodes | type[ct.py_object[Any]], align: builtins.bool = ..., copy: builtins.bool = ..., metadata: dict[builtins.str, Any] = ...) -> dtype[object_]: ... + + # `StringDType` requires special treatment because it has no scalar type + @overload + def __new__( + cls, + dtype: dtypes.StringDType | _StringCodes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ... + ) -> dtypes.StringDType: ... + + # Combined char-codes and ctypes, analogous to the scalar-type hierarchy + @overload + def __new__( + cls, + dtype: _UnsignedIntegerCodes | _UnsignedIntegerCType, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[unsignedinteger]: ... + @overload + def __new__( + cls, + dtype: _SignedIntegerCodes | _SignedIntegerCType, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[signedinteger]: ... + @overload + def __new__( + cls, + dtype: _IntegerCodes | _IntegerCType, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[integer]: ... + @overload + def __new__( + cls, + dtype: _FloatingCodes | _FloatingCType, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[floating]: ... + @overload + def __new__( + cls, + dtype: _ComplexFloatingCodes, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[complexfloating]: ... + @overload + def __new__( + cls, + dtype: _InexactCodes | _FloatingCType, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[inexact]: ... + @overload + def __new__( + cls, + dtype: _NumberCodes | _NumberCType, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[number]: ... + @overload + def __new__( + cls, + dtype: _CharacterCodes | type[ct.c_char], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[character]: ... + @overload + def __new__( + cls, + dtype: _FlexibleCodes | type[ct.c_char], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[flexible]: ... + @overload + def __new__( + cls, + dtype: _GenericCodes | _GenericCType, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[generic]: ... + + # Handle strings that can't be expressed as literals; i.e. "S1", "S2", ... + @overload + def __new__( + cls, + dtype: builtins.str, + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype: ... + + # Catch-all overload for object-likes + # NOTE: `object_ | Any` is *not* equivalent to `Any` -- it describes some + # (static) type `T` s.t. `object_ <: T <: builtins.object` (`<:` denotes + # the subtyping relation, the (gradual) typing analogue of `issubclass()`). + # https://typing.readthedocs.io/en/latest/spec/concepts.html#union-types + @overload + def __new__( + cls, + dtype: type[object], + align: builtins.bool = ..., + copy: builtins.bool = ..., + metadata: dict[builtins.str, Any] = ..., + ) -> dtype[object_ | Any]: ... + + def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... + + @overload + def __getitem__(self: dtype[void], key: list[builtins.str], /) -> dtype[void]: ... + @overload + def __getitem__(self: dtype[void], key: builtins.str | SupportsIndex, /) -> dtype: ... + + # NOTE: In the future 1-based multiplications will also yield `flexible` dtypes + @overload + def __mul__(self: _DTypeT, value: L[1], /) -> _DTypeT: ... + @overload + def __mul__(self: _FlexDTypeT, value: SupportsIndex, /) -> _FlexDTypeT: ... + @overload + def __mul__(self, value: SupportsIndex, /) -> dtype[void]: ... + + # NOTE: `__rmul__` seems to be broken when used in combination with + # literals as of mypy 0.902. Set the return-type to `dtype` for + # now for non-flexible dtypes. + @overload + def __rmul__(self: _FlexDTypeT, value: SupportsIndex, /) -> _FlexDTypeT: ... + @overload + def __rmul__(self, value: SupportsIndex, /) -> dtype: ... + + def __gt__(self, other: DTypeLike, /) -> builtins.bool: ... + def __ge__(self, other: DTypeLike, /) -> builtins.bool: ... + def __lt__(self, other: DTypeLike, /) -> builtins.bool: ... + def __le__(self, other: DTypeLike, /) -> builtins.bool: ... + + # Explicitly defined `__eq__` and `__ne__` to get around mypy's + # `strict_equality` option; even though their signatures are + # identical to their `object`-based counterpart + def __eq__(self, other: Any, /) -> builtins.bool: ... + def __ne__(self, other: Any, /) -> builtins.bool: ... + + @property + def alignment(self) -> int: ... + @property + def base(self) -> dtype: ... + @property + def byteorder(self) -> _ByteOrderChar: ... + @property + def char(self) -> _DTypeChar: ... + @property + def descr(self) -> list[tuple[LiteralString, LiteralString] | tuple[LiteralString, LiteralString, _Shape]]: ... + @property + def fields(self,) -> MappingProxyType[LiteralString, tuple[dtype, int] | tuple[dtype, int, Any]] | None: ... + @property + def flags(self) -> int: ... + @property + def hasobject(self) -> builtins.bool: ... + @property + def isbuiltin(self) -> _DTypeBuiltinKind: ... + @property + def isnative(self) -> builtins.bool: ... + @property + def isalignedstruct(self) -> builtins.bool: ... + @property + def itemsize(self) -> int: ... + @property + def kind(self) -> _DTypeKind: ... + @property + def metadata(self) -> MappingProxyType[builtins.str, Any] | None: ... + @property + def name(self) -> LiteralString: ... + @property + def num(self) -> _DTypeNum: ... + @property + def shape(self) -> _AnyShape: ... + @property + def ndim(self) -> int: ... + @property + def subdtype(self) -> tuple[dtype, _AnyShape] | None: ... + def newbyteorder(self, new_order: _ByteOrder = ..., /) -> Self: ... + @property + def str(self) -> LiteralString: ... + @property + def type(self) -> type[_ScalarT_co]: ... + +@final +class flatiter(Generic[_ArrayT_co]): + __hash__: ClassVar[None] + @property + def base(self) -> _ArrayT_co: ... + @property + def coords(self) -> _Shape: ... + @property + def index(self) -> int: ... + def copy(self) -> _ArrayT_co: ... + def __iter__(self) -> Self: ... + def __next__(self: flatiter[NDArray[_ScalarT]]) -> _ScalarT: ... + def __len__(self) -> int: ... + @overload + def __getitem__( + self: flatiter[NDArray[_ScalarT]], + key: int | integer | tuple[int | integer], + ) -> _ScalarT: ... + @overload + def __getitem__( + self, + key: _ArrayLikeInt | slice | EllipsisType | tuple[_ArrayLikeInt | slice | EllipsisType], + ) -> _ArrayT_co: ... + # TODO: `__setitem__` operates via `unsafe` casting rules, and can + # thus accept any type accepted by the relevant underlying `np.generic` + # constructor. + # This means that `value` must in reality be a supertype of `npt.ArrayLike`. + def __setitem__( + self, + key: _ArrayLikeInt | slice | EllipsisType | tuple[_ArrayLikeInt | slice | EllipsisType], + value: Any, + ) -> None: ... + @overload + def __array__(self: flatiter[ndarray[_1DShapeT, _DTypeT]], dtype: None = ..., /) -> ndarray[_1DShapeT, _DTypeT]: ... + @overload + def __array__(self: flatiter[ndarray[_1DShapeT, Any]], dtype: _DTypeT, /) -> ndarray[_1DShapeT, _DTypeT]: ... + @overload + def __array__(self: flatiter[ndarray[Any, _DTypeT]], dtype: None = ..., /) -> ndarray[_AnyShape, _DTypeT]: ... + @overload + def __array__(self, dtype: _DTypeT, /) -> ndarray[_AnyShape, _DTypeT]: ... + +@type_check_only +class _ArrayOrScalarCommon: + @property + def real(self, /) -> Any: ... + @property + def imag(self, /) -> Any: ... + @property + def T(self) -> Self: ... + @property + def mT(self) -> Self: ... + @property + def data(self) -> memoryview: ... + @property + def flags(self) -> flagsobj: ... + @property + def itemsize(self) -> int: ... + @property + def nbytes(self) -> int: ... + @property + def device(self) -> L["cpu"]: ... + + def __bool__(self, /) -> builtins.bool: ... + def __int__(self, /) -> int: ... + def __float__(self, /) -> float: ... + def __copy__(self) -> Self: ... + def __deepcopy__(self, memo: dict[int, Any] | None, /) -> Self: ... + + # TODO: How to deal with the non-commutative nature of `==` and `!=`? + # xref numpy/numpy#17368 + def __eq__(self, other: Any, /) -> Any: ... + def __ne__(self, other: Any, /) -> Any: ... + + def copy(self, order: _OrderKACF = ...) -> Self: ... + def dump(self, file: StrOrBytesPath | SupportsWrite[bytes]) -> None: ... + def dumps(self) -> bytes: ... + def tobytes(self, order: _OrderKACF = ...) -> bytes: ... + def tofile(self, fid: StrOrBytesPath | _SupportsFileMethods, sep: str = ..., format: str = ...) -> None: ... + # generics and 0d arrays return builtin scalars + def tolist(self) -> Any: ... + def to_device(self, device: L["cpu"], /, *, stream: int | Any | None = ...) -> Self: ... + + @property + def __array_interface__(self) -> dict[str, Any]: ... + @property + def __array_priority__(self) -> float: ... + @property + def __array_struct__(self) -> CapsuleType: ... # builtins.PyCapsule + def __array_namespace__(self, /, *, api_version: _ArrayAPIVersion | None = None) -> ModuleType: ... + def __setstate__(self, state: tuple[ + SupportsIndex, # version + _ShapeLike, # Shape + _DTypeT_co, # DType + np.bool, # F-continuous + bytes | list[Any], # Data + ], /) -> None: ... + + def conj(self) -> Self: ... + def conjugate(self) -> Self: ... + + def argsort( + self, + axis: SupportsIndex | None = ..., + kind: _SortKind | None = ..., + order: str | Sequence[str] | None = ..., + *, + stable: builtins.bool | None = ..., + ) -> NDArray[Any]: ... + + @overload # axis=None (default), out=None (default), keepdims=False (default) + def argmax(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ... + @overload # axis=index, out=None (default) + def argmax(self, /, axis: SupportsIndex, out: None = None, *, keepdims: builtins.bool = False) -> Any: ... + @overload # axis=index, out=ndarray + def argmax(self, /, axis: SupportsIndex | None, out: _BoolOrIntArrayT, *, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + @overload + def argmax(self, /, axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + + @overload # axis=None (default), out=None (default), keepdims=False (default) + def argmin(self, /, axis: None = None, out: None = None, *, keepdims: L[False] = False) -> intp: ... + @overload # axis=index, out=None (default) + def argmin(self, /, axis: SupportsIndex, out: None = None, *, keepdims: builtins.bool = False) -> Any: ... + @overload # axis=index, out=ndarray + def argmin(self, /, axis: SupportsIndex | None, out: _BoolOrIntArrayT, *, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + @overload + def argmin(self, /, axis: SupportsIndex | None = None, *, out: _BoolOrIntArrayT, keepdims: builtins.bool = False) -> _BoolOrIntArrayT: ... + + @overload # out=None (default) + def round(self, /, decimals: SupportsIndex = 0, out: None = None) -> Self: ... + @overload # out=ndarray + def round(self, /, decimals: SupportsIndex, out: _ArrayT) -> _ArrayT: ... + @overload + def round(self, /, decimals: SupportsIndex = 0, *, out: _ArrayT) -> _ArrayT: ... + + @overload # out=None (default) + def choose(self, /, choices: ArrayLike, out: None = None, mode: _ModeKind = "raise") -> NDArray[Any]: ... + @overload # out=ndarray + def choose(self, /, choices: ArrayLike, out: _ArrayT, mode: _ModeKind = "raise") -> _ArrayT: ... + + # TODO: Annotate kwargs with an unpacked `TypedDict` + @overload # out: None (default) + def clip(self, /, min: ArrayLike, max: ArrayLike | None = None, out: None = None, **kwargs: Any) -> NDArray[Any]: ... + @overload + def clip(self, /, min: None, max: ArrayLike, out: None = None, **kwargs: Any) -> NDArray[Any]: ... + @overload + def clip(self, /, min: None = None, *, max: ArrayLike, out: None = None, **kwargs: Any) -> NDArray[Any]: ... + @overload # out: ndarray + def clip(self, /, min: ArrayLike, max: ArrayLike | None, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + @overload + def clip(self, /, min: ArrayLike, max: ArrayLike | None = None, *, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + @overload + def clip(self, /, min: None, max: ArrayLike, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + @overload + def clip(self, /, min: None = None, *, max: ArrayLike, out: _ArrayT, **kwargs: Any) -> _ArrayT: ... + + @overload + def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, out: None = None) -> NDArray[Any]: ... + @overload + def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None, out: _ArrayT) -> _ArrayT: ... + @overload + def compress(self, /, condition: _ArrayLikeInt_co, axis: SupportsIndex | None = None, *, out: _ArrayT) -> _ArrayT: ... + + @overload # out: None (default) + def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... + @overload # out: ndarray + def cumprod(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def cumprod(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + + @overload # out: None (default) + def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, out: None = None) -> NDArray[Any]: ... + @overload # out: ndarray + def cumsum(self, /, axis: SupportsIndex | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def cumsum(self, /, axis: SupportsIndex | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... + + @overload + def max( + self, + /, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: builtins.bool = False, + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = True, + ) -> Any: ... + @overload + def max( + self, + /, + axis: _ShapeLike | None, + out: _ArrayT, + keepdims: builtins.bool = False, + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + @overload + def max( + self, + /, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + keepdims: builtins.bool = False, + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + + @overload + def min( + self, + /, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: builtins.bool = False, + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = True, + ) -> Any: ... + @overload + def min( + self, + /, + axis: _ShapeLike | None, + out: _ArrayT, + keepdims: builtins.bool = False, + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + @overload + def min( + self, + /, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + keepdims: builtins.bool = False, + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + + @overload + def sum( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: builtins.bool = False, + initial: _NumberLike_co = 0, + where: _ArrayLikeBool_co = True, + ) -> Any: ... + @overload + def sum( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: builtins.bool = False, + initial: _NumberLike_co = 0, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + @overload + def sum( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: builtins.bool = False, + initial: _NumberLike_co = 0, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + + @overload + def prod( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: builtins.bool = False, + initial: _NumberLike_co = 1, + where: _ArrayLikeBool_co = True, + ) -> Any: ... + @overload + def prod( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: builtins.bool = False, + initial: _NumberLike_co = 1, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + @overload + def prod( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: builtins.bool = False, + initial: _NumberLike_co = 1, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + + @overload + def mean( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + keepdims: builtins.bool = False, + *, + where: _ArrayLikeBool_co = True, + ) -> Any: ... + @overload + def mean( + self, + /, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: builtins.bool = False, + *, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + @overload + def mean( + self, + /, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + keepdims: builtins.bool = False, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + + @overload + def std( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: builtins.bool = False, + *, + where: _ArrayLikeBool_co = True, + mean: _ArrayLikeNumber_co = ..., + correction: float = ..., + ) -> Any: ... + @overload + def std( + self, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + keepdims: builtins.bool = False, + *, + where: _ArrayLikeBool_co = True, + mean: _ArrayLikeNumber_co = ..., + correction: float = ..., + ) -> _ArrayT: ... + @overload + def std( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: builtins.bool = False, + where: _ArrayLikeBool_co = True, + mean: _ArrayLikeNumber_co = ..., + correction: float = ..., + ) -> _ArrayT: ... + + @overload + def var( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + out: None = None, + ddof: float = 0, + keepdims: builtins.bool = False, + *, + where: _ArrayLikeBool_co = True, + mean: _ArrayLikeNumber_co = ..., + correction: float = ..., + ) -> Any: ... + @overload + def var( + self, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + ddof: float = 0, + keepdims: builtins.bool = False, + *, + where: _ArrayLikeBool_co = True, + mean: _ArrayLikeNumber_co = ..., + correction: float = ..., + ) -> _ArrayT: ... + @overload + def var( + self, + axis: _ShapeLike | None = None, + dtype: DTypeLike | None = None, + *, + out: _ArrayT, + ddof: float = 0, + keepdims: builtins.bool = False, + where: _ArrayLikeBool_co = True, + mean: _ArrayLikeNumber_co = ..., + correction: float = ..., + ) -> _ArrayT: ... + +class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): + __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + @property + def base(self) -> NDArray[Any] | None: ... + @property + def ndim(self) -> int: ... + @property + def size(self) -> int: ... + @property + def real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + @real.setter + def real(self, value: ArrayLike, /) -> None: ... + @property + def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + @imag.setter + def imag(self, value: ArrayLike, /) -> None: ... + + def __new__( + cls, + shape: _ShapeLike, + dtype: DTypeLike = ..., + buffer: _SupportsBuffer | None = ..., + offset: SupportsIndex = ..., + strides: _ShapeLike | None = ..., + order: _OrderKACF = ..., + ) -> Self: ... + + if sys.version_info >= (3, 12): + def __buffer__(self, flags: int, /) -> memoryview: ... + + def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... + + @overload + def __array__(self, dtype: None = None, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __array__(self, dtype: _DTypeT, /, *, copy: builtins.bool | None = None) -> ndarray[_ShapeT_co, _DTypeT]: ... + + def __array_ufunc__( + self, + ufunc: ufunc, + method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "at"], + *inputs: Any, + **kwargs: Any, + ) -> Any: ... + + def __array_function__( + self, + func: Callable[..., Any], + types: Iterable[type], + args: Iterable[Any], + kwargs: Mapping[str, Any], + ) -> Any: ... + + # NOTE: In practice any object is accepted by `obj`, but as `__array_finalize__` + # is a pseudo-abstract method the type has been narrowed down in order to + # grant subclasses a bit more flexibility + def __array_finalize__(self, obj: NDArray[Any] | None, /) -> None: ... + + def __array_wrap__( + self, + array: ndarray[_ShapeT, _DTypeT], + context: tuple[ufunc, tuple[Any, ...], int] | None = ..., + return_scalar: builtins.bool = ..., + /, + ) -> ndarray[_ShapeT, _DTypeT]: ... + + @overload + def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> ndarray[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self, key: SupportsIndex | tuple[SupportsIndex, ...], /) -> Any: ... + @overload + def __getitem__(self, key: _ToIndices, /) -> ndarray[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self: NDArray[void], key: str, /) -> ndarray[_ShapeT_co, np.dtype]: ... + @overload + def __getitem__(self: NDArray[void], key: list[str], /) -> ndarray[_ShapeT_co, _dtype[void]]: ... + + @overload # flexible | object_ | bool + def __setitem__( + self: ndarray[Any, dtype[flexible | object_ | np.bool] | dtypes.StringDType], + key: _ToIndices, + value: object, + /, + ) -> None: ... + @overload # integer + def __setitem__( + self: NDArray[integer], + key: _ToIndices, + value: _ConvertibleToInt | _NestedSequence[_ConvertibleToInt] | _ArrayLikeInt_co, + /, + ) -> None: ... + @overload # floating + def __setitem__( + self: NDArray[floating], + key: _ToIndices, + value: _ConvertibleToFloat | _NestedSequence[_ConvertibleToFloat | None] | _ArrayLikeFloat_co | None, + /, + ) -> None: ... + @overload # complexfloating + def __setitem__( + self: NDArray[complexfloating], + key: _ToIndices, + value: _ConvertibleToComplex | _NestedSequence[_ConvertibleToComplex | None] | _ArrayLikeNumber_co | None, + /, + ) -> None: ... + @overload # timedelta64 + def __setitem__( + self: NDArray[timedelta64], + key: _ToIndices, + value: _ConvertibleToTD64 | _NestedSequence[_ConvertibleToTD64], + /, + ) -> None: ... + @overload # datetime64 + def __setitem__( + self: NDArray[datetime64], + key: _ToIndices, + value: _ConvertibleToDT64 | _NestedSequence[_ConvertibleToDT64], + /, + ) -> None: ... + @overload # void + def __setitem__(self: NDArray[void], key: str | list[str], value: object, /) -> None: ... + @overload # catch-all + def __setitem__(self, key: _ToIndices, value: ArrayLike, /) -> None: ... + + @property + def ctypes(self) -> _ctypes[int]: ... + @property + def shape(self) -> _ShapeT_co: ... + @shape.setter + def shape(self, value: _ShapeLike) -> None: ... + @property + def strides(self) -> _Shape: ... + @strides.setter + def strides(self, value: _ShapeLike) -> None: ... + def byteswap(self, inplace: builtins.bool = ...) -> Self: ... + def fill(self, value: Any, /) -> None: ... + @property + def flat(self) -> flatiter[Self]: ... + + @overload # use the same output type as that of the underlying `generic` + def item(self: NDArray[generic[_T]], i0: SupportsIndex | tuple[SupportsIndex, ...] = ..., /, *args: SupportsIndex) -> _T: ... + @overload # special casing for `StringDType`, which has no scalar type + def item( + self: ndarray[Any, dtypes.StringDType], + arg0: SupportsIndex | tuple[SupportsIndex, ...] = ..., + /, + *args: SupportsIndex, + ) -> str: ... + + @overload # this first overload prevents mypy from over-eagerly selecting `tuple[()]` in case of `_AnyShape` + def tolist(self: ndarray[tuple[Never], dtype[generic[_T]]], /) -> Any: ... + @overload + def tolist(self: ndarray[tuple[()], dtype[generic[_T]]], /) -> _T: ... + @overload + def tolist(self: ndarray[tuple[int], dtype[generic[_T]]], /) -> list[_T]: ... + @overload + def tolist(self: ndarray[tuple[int, int], dtype[generic[_T]]], /) -> list[list[_T]]: ... + @overload + def tolist(self: ndarray[tuple[int, int, int], dtype[generic[_T]]], /) -> list[list[list[_T]]]: ... + @overload + def tolist(self, /) -> Any: ... + + @overload + def resize(self, new_shape: _ShapeLike, /, *, refcheck: builtins.bool = ...) -> None: ... + @overload + def resize(self, /, *new_shape: SupportsIndex, refcheck: builtins.bool = ...) -> None: ... + + def setflags(self, write: builtins.bool = ..., align: builtins.bool = ..., uic: builtins.bool = ...) -> None: ... + + def squeeze( + self, + axis: SupportsIndex | tuple[SupportsIndex, ...] | None = ..., + ) -> ndarray[_AnyShape, _DTypeT_co]: ... + + def swapaxes( + self, + axis1: SupportsIndex, + axis2: SupportsIndex, + ) -> ndarray[_AnyShape, _DTypeT_co]: ... + + @overload + def transpose(self, axes: _ShapeLike | None, /) -> Self: ... + @overload + def transpose(self, *axes: SupportsIndex) -> Self: ... + + @overload + def all( + self, + axis: None = None, + out: None = None, + keepdims: L[False, 0] = False, + *, + where: _ArrayLikeBool_co = True + ) -> np.bool: ... + @overload + def all( + self, + axis: int | tuple[int, ...] | None = None, + out: None = None, + keepdims: SupportsIndex = False, + *, + where: _ArrayLikeBool_co = True, + ) -> np.bool | NDArray[np.bool]: ... + @overload + def all( + self, + axis: int | tuple[int, ...] | None, + out: _ArrayT, + keepdims: SupportsIndex = False, + *, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + @overload + def all( + self, + axis: int | tuple[int, ...] | None = None, + *, + out: _ArrayT, + keepdims: SupportsIndex = False, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + + @overload + def any( + self, + axis: None = None, + out: None = None, + keepdims: L[False, 0] = False, + *, + where: _ArrayLikeBool_co = True + ) -> np.bool: ... + @overload + def any( + self, + axis: int | tuple[int, ...] | None = None, + out: None = None, + keepdims: SupportsIndex = False, + *, + where: _ArrayLikeBool_co = True, + ) -> np.bool | NDArray[np.bool]: ... + @overload + def any( + self, + axis: int | tuple[int, ...] | None, + out: _ArrayT, + keepdims: SupportsIndex = False, + *, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + @overload + def any( + self, + axis: int | tuple[int, ...] | None = None, + *, + out: _ArrayT, + keepdims: SupportsIndex = False, + where: _ArrayLikeBool_co = True, + ) -> _ArrayT: ... + + # + @overload + def partition( + self, + /, + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None, + ) -> None: ... + @overload + def partition( + self: NDArray[void], + /, + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> None: ... + + # + @overload + def argpartition( + self, + /, + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: None = None, + ) -> NDArray[intp]: ... + @overload + def argpartition( + self: NDArray[void], + /, + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> NDArray[intp]: ... + + # + def diagonal( + self, + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., + ) -> ndarray[_AnyShape, _DTypeT_co]: ... + + # 1D + 1D returns a scalar; + # all other with at least 1 non-0D array return an ndarray. + @overload + def dot(self, b: _ScalarLike_co, out: None = ...) -> NDArray[Any]: ... + @overload + def dot(self, b: ArrayLike, out: None = ...) -> Any: ... # type: ignore[misc] + @overload + def dot(self, b: ArrayLike, out: _ArrayT) -> _ArrayT: ... + + # `nonzero()` is deprecated for 0d arrays/generics + def nonzero(self) -> tuple[NDArray[intp], ...]: ... + + # `put` is technically available to `generic`, + # but is pointless as `generic`s are immutable + def put(self, /, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... + + @overload + def searchsorted( # type: ignore[misc] + self, # >= 1D array + v: _ScalarLike_co, # 0D array-like + side: _SortSide = ..., + sorter: _ArrayLikeInt_co | None = ..., + ) -> intp: ... + @overload + def searchsorted( + self, # >= 1D array + v: ArrayLike, + side: _SortSide = ..., + sorter: _ArrayLikeInt_co | None = ..., + ) -> NDArray[intp]: ... + + def sort( + self, + axis: SupportsIndex = ..., + kind: _SortKind | None = ..., + order: str | Sequence[str] | None = ..., + *, + stable: builtins.bool | None = ..., + ) -> None: ... + + @overload + def trace( + self, # >= 2D array + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., + ) -> Any: ... + @overload + def trace( + self, # >= 2D array + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: _ArrayT = ..., + ) -> _ArrayT: ... + + @overload + def take( # type: ignore[misc] + self: NDArray[_ScalarT], + indices: _IntLike_co, + axis: SupportsIndex | None = ..., + out: None = ..., + mode: _ModeKind = ..., + ) -> _ScalarT: ... + @overload + def take( # type: ignore[misc] + self, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = ..., + out: None = ..., + mode: _ModeKind = ..., + ) -> ndarray[_AnyShape, _DTypeT_co]: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = ..., + out: _ArrayT = ..., + mode: _ModeKind = ..., + ) -> _ArrayT: ... + + @overload + def repeat( + self, + repeats: _ArrayLikeInt_co, + axis: None = None, + ) -> ndarray[tuple[int], _DTypeT_co]: ... + @overload + def repeat( + self, + repeats: _ArrayLikeInt_co, + axis: SupportsIndex, + ) -> ndarray[_AnyShape, _DTypeT_co]: ... + + def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... + def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... + + # NOTE: reshape also accepts negative integers, so we can't use integer literals + @overload # (None) + def reshape(self, shape: None, /, *, order: _OrderACF = "C", copy: builtins.bool | None = None) -> Self: ... + @overload # (empty_sequence) + def reshape( # type: ignore[overload-overlap] # mypy false positive + self, + shape: Sequence[Never], + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[()], _DTypeT_co]: ... + @overload # (() | (int) | (int, int) | ....) # up to 8-d + def reshape( + self, + shape: _AnyShapeT, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[_AnyShapeT, _DTypeT_co]: ... + @overload # (index) + def reshape( + self, + size1: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[int], _DTypeT_co]: ... + @overload # (index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[int, int], _DTypeT_co]: ... + @overload # (index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[int, int, int], _DTypeT_co]: ... + @overload # (index, index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + size4: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[int, int, int, int], _DTypeT_co]: ... + @overload # (int, *(index, ...)) + def reshape( + self, + size0: SupportsIndex, + /, + *shape: SupportsIndex, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[_AnyShape, _DTypeT_co]: ... + @overload # (sequence[index]) + def reshape( + self, + shape: Sequence[SupportsIndex], + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[_AnyShape, _DTypeT_co]: ... + + @overload + def astype( + self, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = ..., + casting: _CastingKind = ..., + subok: builtins.bool = ..., + copy: builtins.bool | _CopyMode = ..., + ) -> ndarray[_ShapeT_co, dtype[_ScalarT]]: ... + @overload + def astype( + self, + dtype: DTypeLike, + order: _OrderKACF = ..., + casting: _CastingKind = ..., + subok: builtins.bool = ..., + copy: builtins.bool | _CopyMode = ..., + ) -> ndarray[_ShapeT_co, dtype]: ... + + # + @overload # () + def view(self, /) -> Self: ... + @overload # (dtype: T) + def view(self, /, dtype: _DTypeT | _HasDType[_DTypeT]) -> ndarray[_ShapeT_co, _DTypeT]: ... + @overload # (dtype: dtype[T]) + def view(self, /, dtype: _DTypeLike[_ScalarT]) -> NDArray[_ScalarT]: ... + @overload # (type: T) + def view(self, /, *, type: type[_ArrayT]) -> _ArrayT: ... + @overload # (_: T) + def view(self, /, dtype: type[_ArrayT]) -> _ArrayT: ... + @overload # (dtype: ?) + def view(self, /, dtype: DTypeLike) -> ndarray[_ShapeT_co, dtype]: ... + @overload # (dtype: ?, type: type[T]) + def view(self, /, dtype: DTypeLike, type: type[_ArrayT]) -> _ArrayT: ... + + def setfield(self, /, val: ArrayLike, dtype: DTypeLike, offset: SupportsIndex = 0) -> None: ... + @overload + def getfield(self, dtype: _DTypeLike[_ScalarT], offset: SupportsIndex = 0) -> NDArray[_ScalarT]: ... + @overload + def getfield(self, dtype: DTypeLike, offset: SupportsIndex = 0) -> NDArray[Any]: ... + + def __index__(self: NDArray[integer], /) -> int: ... + def __complex__(self: NDArray[number | np.bool | object_], /) -> complex: ... + + def __len__(self) -> int: ... + def __contains__(self, value: object, /) -> builtins.bool: ... + + # NOTE: This weird `Never` tuple works around a strange mypy issue where it assigns + # `tuple[int]` to `tuple[Never]` or `tuple[int, int]` to `tuple[Never, Never]`. + # This way the bug only occurs for 9-D arrays, which are probably not very common. + @overload + def __iter__(self: ndarray[tuple[Never, Never, Never, Never, Never, Never, Never, Never, Never]], /) -> Iterator[Any]: ... + @overload # == 1-d & dtype[T \ object_] + def __iter__(self: ndarray[tuple[int], dtype[_NonObjectScalarT]], /) -> Iterator[_NonObjectScalarT]: ... + @overload # >= 2-d + def __iter__(self: ndarray[tuple[int, int, *tuple[int, ...]], dtype[_ScalarT]], /) -> Iterator[NDArray[_ScalarT]]: ... + @overload # ?-d + def __iter__(self, /) -> Iterator[Any]: ... + + # + @overload + def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + @overload + def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + @overload + def __lt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + @overload + def __lt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + @overload + def __lt__( + self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / + ) -> NDArray[np.bool]: ... + @overload + def __lt__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + @overload + def __lt__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + + # + @overload + def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + @overload + def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + @overload + def __le__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + @overload + def __le__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + @overload + def __le__( + self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / + ) -> NDArray[np.bool]: ... + @overload + def __le__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + @overload + def __le__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + + # + @overload + def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + @overload + def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + @overload + def __gt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + @overload + def __gt__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + @overload + def __gt__( + self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / + ) -> NDArray[np.bool]: ... + @overload + def __gt__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + @overload + def __gt__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + + # + @overload + def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... + @overload + def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... + @overload + def __ge__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... + @overload + def __ge__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[np.bool]: ... + @overload + def __ge__( + self: ndarray[Any, dtype[str_] | dtypes.StringDType], other: _ArrayLikeStr_co | _ArrayLikeString_co, / + ) -> NDArray[np.bool]: ... + @overload + def __ge__(self: NDArray[object_], other: object, /) -> NDArray[np.bool]: ... + @overload + def __ge__(self, other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... + + # Unary ops + + # TODO: Uncomment once https://github.com/python/mypy/issues/14070 is fixed + # @overload + # def __abs__(self: ndarray[_ShapeT, dtypes.Complex64DType], /) -> ndarray[_ShapeT, dtypes.Float32DType]: ... + # @overload + # def __abs__(self: ndarray[_ShapeT, dtypes.Complex128DType], /) -> ndarray[_ShapeT, dtypes.Float64DType]: ... + # @overload + # def __abs__(self: ndarray[_ShapeT, dtypes.CLongDoubleDType], /) -> ndarray[_ShapeT, dtypes.LongDoubleDType]: ... + # @overload + # def __abs__(self: ndarray[_ShapeT, dtype[complex128]], /) -> ndarray[_ShapeT, dtype[float64]]: ... + @overload + def __abs__(self: ndarray[_ShapeT, dtype[complexfloating[_NBit]]], /) -> ndarray[_ShapeT, dtype[floating[_NBit]]]: ... + @overload + def __abs__(self: _RealArrayT, /) -> _RealArrayT: ... + + def __invert__(self: _IntegralArrayT, /) -> _IntegralArrayT: ... # noqa: PYI019 + def __neg__(self: _NumericArrayT, /) -> _NumericArrayT: ... # noqa: PYI019 + def __pos__(self: _NumericArrayT, /) -> _NumericArrayT: ... # noqa: PYI019 + + # Binary ops + + # TODO: Support the "1d @ 1d -> scalar" case + @overload + def __matmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + @overload + def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __matmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __matmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __matmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __matmul__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __matmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... + @overload + def __matmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __matmul__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __matmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload # signature equivalent to __matmul__ + def __rmatmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... + @overload + def __rmatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __rmatmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmatmul__(self: NDArray[floating[_64Bit]], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rmatmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rmatmul__(self: NDArray[complexfloating[_64Bit]], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __rmatmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... + @overload + def __rmatmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __rmatmul__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rmatmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __mod__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __mod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __mod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __mod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __mod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __mod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __mod__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __mod__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __mod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload # signature equivalent to __mod__ + def __rmod__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __rmod__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __rmod__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmod__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rmod__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __rmod__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __rmod__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rmod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __divmod__(self: NDArray[_RealNumberT], rhs: int | np.bool, /) -> _2Tuple[ndarray[_ShapeT_co, dtype[_RealNumberT]]]: ... + @overload + def __divmod__(self: NDArray[_RealNumberT], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + @overload + def __divmod__(self: NDArray[np.bool], rhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[overload-overlap] + @overload + def __divmod__(self: NDArray[np.bool], rhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + @overload + def __divmod__(self: NDArray[float64], rhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... + @overload + def __divmod__(self: _ArrayFloat64_co, rhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ... + @overload + def __divmod__(self: _ArrayUInt_co, rhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... # type: ignore[overload-overlap] + @overload + def __divmod__(self: _ArrayInt_co, rhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... # type: ignore[overload-overlap] + @overload + def __divmod__(self: _ArrayFloat_co, rhs: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating]]: ... + @overload + def __divmod__(self: NDArray[timedelta64], rhs: _ArrayLike[timedelta64], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + + @overload # signature equivalent to __divmod__ + def __rdivmod__(self: NDArray[_RealNumberT], lhs: int | np.bool, /) -> _2Tuple[ndarray[_ShapeT_co, dtype[_RealNumberT]]]: ... + @overload + def __rdivmod__(self: NDArray[_RealNumberT], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + @overload + def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[overload-overlap] + @overload + def __rdivmod__(self: NDArray[np.bool], lhs: _ArrayLike[_RealNumberT], /) -> _2Tuple[NDArray[_RealNumberT]]: ... # type: ignore[overload-overlap] + @overload + def __rdivmod__(self: NDArray[float64], lhs: _ArrayLikeFloat64_co, /) -> _2Tuple[NDArray[float64]]: ... + @overload + def __rdivmod__(self: _ArrayFloat64_co, lhs: _ArrayLike[floating[_64Bit]], /) -> _2Tuple[NDArray[float64]]: ... + @overload + def __rdivmod__(self: _ArrayUInt_co, lhs: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger]]: ... # type: ignore[overload-overlap] + @overload + def __rdivmod__(self: _ArrayInt_co, lhs: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger]]: ... # type: ignore[overload-overlap] + @overload + def __rdivmod__(self: _ArrayFloat_co, lhs: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating]]: ... + @overload + def __rdivmod__(self: NDArray[timedelta64], lhs: _ArrayLike[timedelta64], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + + @overload + def __add__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __add__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __add__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __add__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __add__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + @overload + def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + @overload + def __add__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... + @overload + def __add__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... + @overload + def __add__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bytes_]: ... + @overload + def __add__(self: NDArray[str_], other: _ArrayLikeStr_co, /) -> NDArray[str_]: ... + @overload + def __add__( + self: ndarray[Any, dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> ndarray[tuple[Any, ...], dtypes.StringDType]: ... + @overload + def __add__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __add__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload # signature equivalent to __add__ + def __radd__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __radd__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __radd__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __radd__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __radd__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + @overload + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + @overload + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... + @overload + def __radd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... + @overload + def __radd__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> NDArray[bytes_]: ... + @overload + def __radd__(self: NDArray[str_], other: _ArrayLikeStr_co, /) -> NDArray[str_]: ... + @overload + def __radd__( + self: ndarray[Any, dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> ndarray[tuple[Any, ...], dtypes.StringDType]: ... + @overload + def __radd__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __radd__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __sub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __sub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __sub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __sub__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __sub__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __sub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + @overload + def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + @overload + def __sub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... + @overload + def __sub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[timedelta64]: ... + @overload + def __sub__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __sub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __rsub__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rsub__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __rsub__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rsub__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rsub__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __rsub__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... # type: ignore[overload-overlap] + @overload + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... + @overload + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... + @overload + def __rsub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[timedelta64]: ... + @overload + def __rsub__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rsub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __mul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __mul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __mul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __mul__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __mul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __mul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __mul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + @overload + def __mul__(self: _ArrayFloat_co, other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __mul__( + self: ndarray[Any, dtype[character] | dtypes.StringDType], + other: _ArrayLikeInt, + /, + ) -> ndarray[tuple[Any, ...], _DTypeT_co]: ... + @overload + def __mul__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __mul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload # signature equivalent to __mul__ + def __rmul__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rmul__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rmul__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rmul__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __rmul__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... # type: ignore[overload-overlap] + @overload + def __rmul__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __rmul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + @overload + def __rmul__(self: _ArrayFloat_co, other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __rmul__( + self: ndarray[Any, dtype[character] | dtypes.StringDType], + other: _ArrayLikeInt, + /, + ) -> ndarray[tuple[Any, ...], _DTypeT_co]: ... + @overload + def __rmul__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __truediv__(self: _ArrayInt_co | NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __truediv__(self: _ArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __truediv__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __truediv__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __truediv__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __truediv__(self: _ArrayFloat_co, other: _ArrayLike[floating], /) -> NDArray[floating]: ... + @overload + def __truediv__(self: NDArray[complexfloating], other: _ArrayLikeNumber_co, /) -> NDArray[complexfloating]: ... + @overload + def __truediv__(self: _ArrayNumber_co, other: _ArrayLike[complexfloating], /) -> NDArray[complexfloating]: ... + @overload + def __truediv__(self: NDArray[inexact], other: _ArrayLikeNumber_co, /) -> NDArray[inexact]: ... + @overload + def __truediv__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __truediv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[float64]: ... + @overload + def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + @overload + def __truediv__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __rtruediv__(self: _ArrayInt_co | NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rtruediv__(self: _ArrayFloat64_co, other: _ArrayLikeInt_co | _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rtruediv__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + @overload + def __rtruediv__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + @overload + def __rtruediv__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLike[floating], /) -> NDArray[floating]: ... + @overload + def __rtruediv__(self: NDArray[complexfloating], other: _ArrayLikeNumber_co, /) -> NDArray[complexfloating]: ... + @overload + def __rtruediv__(self: _ArrayNumber_co, other: _ArrayLike[complexfloating], /) -> NDArray[complexfloating]: ... + @overload + def __rtruediv__(self: NDArray[inexact], other: _ArrayLikeNumber_co, /) -> NDArray[inexact]: ... + @overload + def __rtruediv__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + @overload + def __rtruediv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[float64]: ... + @overload + def __rtruediv__(self: NDArray[integer | floating], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __rtruediv__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __floordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __floordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __floordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... + @overload + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[int64]: ... + @overload + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... + @overload + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... + @overload + def __floordiv__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __rfloordiv__(self: NDArray[_RealNumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_RealNumberT]]: ... + @overload + def __rfloordiv__(self: NDArray[_RealNumberT], other: _ArrayLikeBool_co, /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLike[_RealNumberT], /) -> NDArray[_RealNumberT]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + @overload + def __rfloordiv__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + @overload + def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rfloordiv__(self: NDArray[timedelta64], other: _ArrayLike[timedelta64], /) -> NDArray[int64]: ... + @overload + def __rfloordiv__(self: NDArray[floating | integer], other: _ArrayLike[timedelta64], /) -> NDArray[timedelta64]: ... + @overload + def __rfloordiv__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __pow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __pow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... + @overload + def __pow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], mod: None = None, /) -> NDArray[float64]: ... + @overload + def __pow__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, mod: None = None, /) -> NDArray[complex128]: ... + @overload + def __pow__( + self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / + ) -> NDArray[complex128]: ... + @overload + def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> NDArray[complexfloating]: ... + @overload + def __pow__(self: NDArray[number], other: _ArrayLikeNumber_co, mod: None = None, /) -> NDArray[number]: ... + @overload + def __pow__(self: NDArray[object_], other: Any, mod: None = None, /) -> Any: ... + @overload + def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... + + @overload + def __rpow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + @overload + def __rpow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... + @overload + def __rpow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], mod: None = None, /) -> NDArray[float64]: ... + @overload + def __rpow__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, mod: None = None, /) -> NDArray[complex128]: ... + @overload + def __rpow__( + self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / + ) -> NDArray[complex128]: ... + @overload + def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + @overload + def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> NDArray[complexfloating]: ... + @overload + def __rpow__(self: NDArray[number], other: _ArrayLikeNumber_co, mod: None = None, /) -> NDArray[number]: ... + @overload + def __rpow__(self: NDArray[object_], other: Any, mod: None = None, /) -> Any: ... + @overload + def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... + + @overload + def __lshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + @overload + def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __lshift__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __lshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __rlshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + @overload + def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __rlshift__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rlshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __rshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + @overload + def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __rshift__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __rrshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] + @overload + def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + @overload + def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __rrshift__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rrshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __and__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + @overload + def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + @overload + def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __and__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __and__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __rand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + @overload + def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + @overload + def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __rand__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rand__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __xor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + @overload + def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + @overload + def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __xor__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __xor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __rxor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + @overload + def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + @overload + def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __rxor__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __rxor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __or__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + @overload + def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + @overload + def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __or__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __or__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + @overload + def __ror__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] + @overload + def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[misc] + @overload + def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... + @overload + def __ror__(self: NDArray[object_], other: Any, /) -> Any: ... + @overload + def __ror__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + + # `np.generic` does not support inplace operations + + # NOTE: Inplace ops generally use "same_kind" casting w.r.t. to the left + # operand. An exception to this rule are unsigned integers though, which + # also accepts a signed integer for the right operand as long it is a 0D + # object and its value is >= 0 + # NOTE: Due to a mypy bug, overloading on e.g. `self: NDArray[SCT_floating]` won't + # work, as this will lead to `false negatives` when using these inplace ops. + @overload + def __iadd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__(self: NDArray[timedelta64 | datetime64], other: _ArrayLikeTD64_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__(self: NDArray[bytes_], other: _ArrayLikeBytes_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__( + self: ndarray[Any, dtype[str_] | dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + # + @overload + def __isub__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __isub__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __isub__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __isub__(self: NDArray[timedelta64 | datetime64], other: _ArrayLikeTD64_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __isub__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + # + @overload + def __imul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imul__( + self: ndarray[Any, dtype[integer | character] | dtypes.StringDType], other: _ArrayLikeInt_co, / + ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imul__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imul__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imul__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + @overload + def __ipow__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ipow__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ipow__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ipow__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + # + @overload + def __itruediv__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __itruediv__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __itruediv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + # keep in sync with `__imod__` + @overload + def __ifloordiv__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ifloordiv__(self: NDArray[floating | timedelta64], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ifloordiv__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + # keep in sync with `__ifloordiv__` + @overload + def __imod__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imod__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imod__( + self: NDArray[timedelta64], + other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], + /, + ) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imod__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + # keep in sync with `__irshift__` + @overload + def __ilshift__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ilshift__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + # keep in sync with `__ilshift__` + @overload + def __irshift__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __irshift__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + # keep in sync with `__ixor__` and `__ior__` + @overload + def __iand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iand__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iand__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + # keep in sync with `__iand__` and `__ior__` + @overload + def __ixor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ixor__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ixor__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + # keep in sync with `__iand__` and `__ixor__` + @overload + def __ior__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ior__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ior__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + # + @overload + def __imatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imatmul__(self: NDArray[integer], other: _ArrayLikeInt_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imatmul__(self: NDArray[floating], other: _ArrayLikeFloat_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imatmul__(self: NDArray[complexfloating], other: _ArrayLikeComplex_co, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imatmul__(self: NDArray[object_], other: Any, /) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + + # + def __dlpack__( + self: NDArray[number], + /, + *, + stream: int | Any | None = None, + max_version: tuple[int, int] | None = None, + dl_device: tuple[int, int] | None = None, + copy: builtins.bool | None = None, + ) -> CapsuleType: ... + def __dlpack_device__(self, /) -> tuple[L[1], L[0]]: ... + + # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` + @property + def dtype(self) -> _DTypeT_co: ... + +# NOTE: while `np.generic` is not technically an instance of `ABCMeta`, +# the `@abstractmethod` decorator is herein used to (forcefully) deny +# the creation of `np.generic` instances. +# The `# type: ignore` comments are necessary to silence mypy errors regarding +# the missing `ABCMeta` metaclass. +# See https://github.com/numpy/numpy-stubs/pull/80 for more details. +class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): + @abstractmethod + def __new__(cls, /, *args: Any, **kwargs: Any) -> Self: ... + def __hash__(self) -> int: ... + @overload + def __array__(self, dtype: None = None, /) -> ndarray[tuple[()], dtype[Self]]: ... + @overload + def __array__(self, dtype: _DTypeT, /) -> ndarray[tuple[()], _DTypeT]: ... + if sys.version_info >= (3, 12): + def __buffer__(self, flags: int, /) -> memoryview: ... + + @property + def base(self) -> None: ... + @property + def ndim(self) -> L[0]: ... + @property + def size(self) -> L[1]: ... + @property + def shape(self) -> tuple[()]: ... + @property + def strides(self) -> tuple[()]: ... + @property + def flat(self) -> flatiter[ndarray[tuple[int], dtype[Self]]]: ... + + @overload + def item(self, /) -> _ItemT_co: ... + @overload + def item(self, arg0: L[0, -1] | tuple[L[0, -1]] | tuple[()] = ..., /) -> _ItemT_co: ... + def tolist(self, /) -> _ItemT_co: ... + + def byteswap(self, inplace: L[False] = ...) -> Self: ... + + @overload + def astype( + self, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = ..., + casting: _CastingKind = ..., + subok: builtins.bool = ..., + copy: builtins.bool | _CopyMode = ..., + ) -> _ScalarT: ... + @overload + def astype( + self, + dtype: DTypeLike, + order: _OrderKACF = ..., + casting: _CastingKind = ..., + subok: builtins.bool = ..., + copy: builtins.bool | _CopyMode = ..., + ) -> Any: ... + + # NOTE: `view` will perform a 0D->scalar cast, + # thus the array `type` is irrelevant to the output type + @overload + def view(self, type: type[NDArray[Any]] = ...) -> Self: ... + @overload + def view( + self, + dtype: _DTypeLike[_ScalarT], + type: type[NDArray[Any]] = ..., + ) -> _ScalarT: ... + @overload + def view( + self, + dtype: DTypeLike, + type: type[NDArray[Any]] = ..., + ) -> Any: ... + + @overload + def getfield( + self, + dtype: _DTypeLike[_ScalarT], + offset: SupportsIndex = ... + ) -> _ScalarT: ... + @overload + def getfield( + self, + dtype: DTypeLike, + offset: SupportsIndex = ... + ) -> Any: ... + + @overload + def take( # type: ignore[misc] + self, + indices: _IntLike_co, + axis: SupportsIndex | None = ..., + out: None = ..., + mode: _ModeKind = ..., + ) -> Self: ... + @overload + def take( # type: ignore[misc] + self, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = ..., + out: None = ..., + mode: _ModeKind = ..., + ) -> NDArray[Self]: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = ..., + out: _ArrayT = ..., + mode: _ModeKind = ..., + ) -> _ArrayT: ... + + def repeat(self, repeats: _ArrayLikeInt_co, axis: SupportsIndex | None = None) -> ndarray[tuple[int], dtype[Self]]: ... + def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... + def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... + + @overload # (() | []) + def reshape( + self, + shape: tuple[()] | list[Never], + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> Self: ... + @overload # ((1, *(1, ...))@_ShapeT) + def reshape( + self, + shape: _1NShapeT, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[_1NShapeT, dtype[Self]]: ... + @overload # (Sequence[index, ...]) # not recommended + def reshape( + self, + shape: Sequence[SupportsIndex], + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> Self | ndarray[tuple[L[1], ...], dtype[Self]]: ... + @overload # _(index) + def reshape( + self, + size1: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[L[1]], dtype[Self]]: ... + @overload # _(index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[L[1], L[1]], dtype[Self]]: ... + @overload # _(index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[L[1], L[1], L[1]], dtype[Self]]: ... + @overload # _(index, index, index, index) + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + size4: SupportsIndex, + /, + *, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[L[1], L[1], L[1], L[1]], dtype[Self]]: ... + @overload # _(index, index, index, index, index, *index) # ndim >= 5 + def reshape( + self, + size1: SupportsIndex, + size2: SupportsIndex, + size3: SupportsIndex, + size4: SupportsIndex, + size5: SupportsIndex, + /, + *sizes6_: SupportsIndex, + order: _OrderACF = "C", + copy: builtins.bool | None = None, + ) -> ndarray[tuple[L[1], L[1], L[1], L[1], L[1], *tuple[L[1], ...]], dtype[Self]]: ... + + def squeeze(self, axis: L[0] | tuple[()] | None = ...) -> Self: ... + def transpose(self, axes: tuple[()] | None = ..., /) -> Self: ... + + @overload + def all( + self, + /, + axis: L[0, -1] | tuple[()] | None = None, + out: None = None, + keepdims: SupportsIndex = False, + *, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True + ) -> np.bool: ... + @overload + def all( + self, + /, + axis: L[0, -1] | tuple[()] | None, + out: ndarray[tuple[()], dtype[_ScalarT]], + keepdims: SupportsIndex = False, + *, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, + ) -> _ScalarT: ... + @overload + def all( + self, + /, + axis: L[0, -1] | tuple[()] | None = None, + *, + out: ndarray[tuple[()], dtype[_ScalarT]], + keepdims: SupportsIndex = False, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, + ) -> _ScalarT: ... + + @overload + def any( + self, + /, + axis: L[0, -1] | tuple[()] | None = None, + out: None = None, + keepdims: SupportsIndex = False, + *, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True + ) -> np.bool: ... + @overload + def any( + self, + /, + axis: L[0, -1] | tuple[()] | None, + out: ndarray[tuple[()], dtype[_ScalarT]], + keepdims: SupportsIndex = False, + *, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, + ) -> _ScalarT: ... + @overload + def any( + self, + /, + axis: L[0, -1] | tuple[()] | None = None, + *, + out: ndarray[tuple[()], dtype[_ScalarT]], + keepdims: SupportsIndex = False, + where: builtins.bool | np.bool | ndarray[tuple[()], dtype[np.bool]] = True, + ) -> _ScalarT: ... + + # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` + @property + def dtype(self) -> _dtype[Self]: ... + +class number(generic[_NumberItemT_co], Generic[_NBit, _NumberItemT_co]): + @abstractmethod # `SupportsIndex | str | bytes` equivs `_ConvertibleToInt & _ConvertibleToFloat` + def __new__(cls, value: SupportsIndex | str | bytes = 0, /) -> Self: ... + def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... + + def __neg__(self) -> Self: ... + def __pos__(self) -> Self: ... + def __abs__(self) -> Self: ... + + def __add__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __radd__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __sub__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __rsub__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __mul__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __rmul__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __pow__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __rpow__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __truediv__(self, other: _NumberLike_co, /) -> Incomplete: ... + def __rtruediv__(self, other: _NumberLike_co, /) -> Incomplete: ... + + @overload + def __lt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __lt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __le__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __le__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGE, /) -> bool_: ... + + @overload + def __gt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsLT, /) -> bool_: ... + + @overload + def __ge__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsLE, /) -> bool_: ... + +class bool(generic[_BoolItemT_co], Generic[_BoolItemT_co]): + @property + def itemsize(self) -> L[1]: ... + @property + def nbytes(self) -> L[1]: ... + @property + def real(self) -> Self: ... + @property + def imag(self) -> np.bool[L[False]]: ... + + @overload # mypy bug workaround: https://github.com/numpy/numpy/issues/29245 + def __new__(cls, value: Never, /) -> np.bool[builtins.bool]: ... + @overload + def __new__(cls, value: _Falsy = ..., /) -> np.bool[L[False]]: ... + @overload + def __new__(cls, value: _Truthy, /) -> np.bool[L[True]]: ... + @overload + def __new__(cls, value: object, /) -> np.bool[builtins.bool]: ... + + def __bool__(self, /) -> _BoolItemT_co: ... + + @overload + def __int__(self: np.bool[L[False]], /) -> L[0]: ... + @overload + def __int__(self: np.bool[L[True]], /) -> L[1]: ... + @overload + def __int__(self, /) -> L[0, 1]: ... + + def __abs__(self) -> Self: ... + + @overload + def __invert__(self: np.bool[L[False]], /) -> np.bool[L[True]]: ... + @overload + def __invert__(self: np.bool[L[True]], /) -> np.bool[L[False]]: ... + @overload + def __invert__(self, /) -> np.bool: ... + + @overload + def __add__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __add__(self, other: builtins.bool | bool_, /) -> bool_: ... + @overload + def __add__(self, other: int, /) -> int_: ... + @overload + def __add__(self, other: float, /) -> float64: ... + @overload + def __add__(self, other: complex, /) -> complex128: ... + + @overload + def __radd__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __radd__(self, other: builtins.bool, /) -> bool_: ... + @overload + def __radd__(self, other: int, /) -> int_: ... + @overload + def __radd__(self, other: float, /) -> float64: ... + @overload + def __radd__(self, other: complex, /) -> complex128: ... + + @overload + def __sub__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __sub__(self, other: int, /) -> int_: ... + @overload + def __sub__(self, other: float, /) -> float64: ... + @overload + def __sub__(self, other: complex, /) -> complex128: ... + + @overload + def __rsub__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __rsub__(self, other: int, /) -> int_: ... + @overload + def __rsub__(self, other: float, /) -> float64: ... + @overload + def __rsub__(self, other: complex, /) -> complex128: ... + + @overload + def __mul__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __mul__(self, other: builtins.bool | bool_, /) -> bool_: ... + @overload + def __mul__(self, other: int, /) -> int_: ... + @overload + def __mul__(self, other: float, /) -> float64: ... + @overload + def __mul__(self, other: complex, /) -> complex128: ... + + @overload + def __rmul__(self, other: _NumberT, /) -> _NumberT: ... + @overload + def __rmul__(self, other: builtins.bool, /) -> bool_: ... + @overload + def __rmul__(self, other: int, /) -> int_: ... + @overload + def __rmul__(self, other: float, /) -> float64: ... + @overload + def __rmul__(self, other: complex, /) -> complex128: ... + + @overload + def __pow__(self, other: _NumberT, mod: None = None, /) -> _NumberT: ... + @overload + def __pow__(self, other: builtins.bool | bool_, mod: None = None, /) -> int8: ... + @overload + def __pow__(self, other: int, mod: None = None, /) -> int_: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... + + @overload + def __rpow__(self, other: _NumberT, mod: None = None, /) -> _NumberT: ... + @overload + def __rpow__(self, other: builtins.bool, mod: None = None, /) -> int8: ... + @overload + def __rpow__(self, other: int, mod: None = None, /) -> int_: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... + + @overload + def __truediv__(self, other: _InexactT, /) -> _InexactT: ... + @overload + def __truediv__(self, other: float | integer | bool_, /) -> float64: ... + @overload + def __truediv__(self, other: complex, /) -> complex128: ... + + @overload + def __rtruediv__(self, other: _InexactT, /) -> _InexactT: ... + @overload + def __rtruediv__(self, other: float | integer, /) -> float64: ... + @overload + def __rtruediv__(self, other: complex, /) -> complex128: ... + + @overload + def __floordiv__(self, other: _RealNumberT, /) -> _RealNumberT: ... + @overload + def __floordiv__(self, other: builtins.bool | bool_, /) -> int8: ... + @overload + def __floordiv__(self, other: int, /) -> int_: ... + @overload + def __floordiv__(self, other: float, /) -> float64: ... + + @overload + def __rfloordiv__(self, other: _RealNumberT, /) -> _RealNumberT: ... + @overload + def __rfloordiv__(self, other: builtins.bool, /) -> int8: ... + @overload + def __rfloordiv__(self, other: int, /) -> int_: ... + @overload + def __rfloordiv__(self, other: float, /) -> float64: ... + + # keep in sync with __floordiv__ + @overload + def __mod__(self, other: _RealNumberT, /) -> _RealNumberT: ... + @overload + def __mod__(self, other: builtins.bool | bool_, /) -> int8: ... + @overload + def __mod__(self, other: int, /) -> int_: ... + @overload + def __mod__(self, other: float, /) -> float64: ... + + # keep in sync with __rfloordiv__ + @overload + def __rmod__(self, other: _RealNumberT, /) -> _RealNumberT: ... + @overload + def __rmod__(self, other: builtins.bool, /) -> int8: ... + @overload + def __rmod__(self, other: int, /) -> int_: ... + @overload + def __rmod__(self, other: float, /) -> float64: ... + + # keep in sync with __mod__ + @overload + def __divmod__(self, other: _RealNumberT, /) -> _2Tuple[_RealNumberT]: ... + @overload + def __divmod__(self, other: builtins.bool | bool_, /) -> _2Tuple[int8]: ... + @overload + def __divmod__(self, other: int, /) -> _2Tuple[int_]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[float64]: ... + + # keep in sync with __rmod__ + @overload + def __rdivmod__(self, other: _RealNumberT, /) -> _2Tuple[_RealNumberT]: ... + @overload + def __rdivmod__(self, other: builtins.bool, /) -> _2Tuple[int8]: ... + @overload + def __rdivmod__(self, other: int, /) -> _2Tuple[int_]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... + + @overload + def __lshift__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __lshift__(self, other: builtins.bool | bool_, /) -> int8: ... + @overload + def __lshift__(self, other: int, /) -> int_: ... + + @overload + def __rlshift__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __rlshift__(self, other: builtins.bool, /) -> int8: ... + @overload + def __rlshift__(self, other: int, /) -> int_: ... + + # keep in sync with __lshift__ + @overload + def __rshift__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __rshift__(self, other: builtins.bool | bool_, /) -> int8: ... + @overload + def __rshift__(self, other: int, /) -> int_: ... + + # keep in sync with __rlshift__ + @overload + def __rrshift__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __rrshift__(self, other: builtins.bool, /) -> int8: ... + @overload + def __rrshift__(self, other: int, /) -> int_: ... + + @overload + def __and__(self: np.bool[L[False]], other: builtins.bool | np.bool, /) -> np.bool[L[False]]: ... + @overload + def __and__(self, other: L[False] | np.bool[L[False]], /) -> np.bool[L[False]]: ... + @overload + def __and__(self, other: L[True] | np.bool[L[True]], /) -> Self: ... + @overload + def __and__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + @overload + def __and__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __and__(self, other: int, /) -> np.bool | intp: ... + __rand__ = __and__ + + @overload + def __xor__(self: np.bool[L[False]], other: _BoolItemT | np.bool[_BoolItemT], /) -> np.bool[_BoolItemT]: ... + @overload + def __xor__(self: np.bool[L[True]], other: L[True] | np.bool[L[True]], /) -> np.bool[L[False]]: ... + @overload + def __xor__(self, other: L[False] | np.bool[L[False]], /) -> Self: ... + @overload + def __xor__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + @overload + def __xor__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __xor__(self, other: int, /) -> np.bool | intp: ... + __rxor__ = __xor__ + + @overload + def __or__(self: np.bool[L[True]], other: builtins.bool | np.bool, /) -> np.bool[L[True]]: ... + @overload + def __or__(self, other: L[False] | np.bool[L[False]], /) -> Self: ... + @overload + def __or__(self, other: L[True] | np.bool[L[True]], /) -> np.bool[L[True]]: ... + @overload + def __or__(self, other: builtins.bool | np.bool, /) -> np.bool: ... + @overload + def __or__(self, other: _IntegerT, /) -> _IntegerT: ... + @overload + def __or__(self, other: int, /) -> np.bool | intp: ... + __ror__ = __or__ + + @overload + def __lt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __lt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __le__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __le__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGE, /) -> bool_: ... + + @overload + def __gt__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsLT, /) -> bool_: ... + + @overload + def __ge__(self, other: _NumberLike_co, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeNumber_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsLE, /) -> bool_: ... + +# NOTE: This should _not_ be `Final` or a `TypeAlias` +bool_ = bool + +# NOTE: The `object_` constructor returns the passed object, so instances with type +# `object_` cannot exists (at runtime). +# NOTE: Because mypy has some long-standing bugs related to `__new__`, `object_` can't +# be made generic. +@final +class object_(_RealMixin, generic): + @overload + def __new__(cls, nothing_to_see_here: None = None, /) -> None: ... # type: ignore[misc] + @overload + def __new__(cls, stringy: _AnyStr, /) -> _AnyStr: ... # type: ignore[misc] + @overload + def __new__(cls, array: ndarray[_ShapeT, Any], /) -> ndarray[_ShapeT, dtype[Self]]: ... # type: ignore[misc] + @overload + def __new__(cls, sequence: SupportsLenAndGetItem[object], /) -> NDArray[Self]: ... # type: ignore[misc] + @overload + def __new__(cls, value: _T, /) -> _T: ... # type: ignore[misc] + @overload # catch-all + def __new__(cls, value: Any = ..., /) -> object | NDArray[Self]: ... # type: ignore[misc] + + def __hash__(self, /) -> int: ... + def __abs__(self, /) -> object_: ... # this affects NDArray[object_].__abs__ + def __call__(self, /, *args: object, **kwargs: object) -> Any: ... + + if sys.version_info >= (3, 12): + def __release_buffer__(self, buffer: memoryview, /) -> None: ... + +class integer(_IntegralMixin, _RoundMixin, number[_NBit, int]): + @abstractmethod + def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... + + # NOTE: `bit_count` and `__index__` are technically defined in the concrete subtypes + def bit_count(self, /) -> int: ... + def __index__(self, /) -> int: ... + def __invert__(self, /) -> Self: ... + + @override # type: ignore[override] + @overload + def __truediv__(self, other: float | integer, /) -> float64: ... + @overload + def __truediv__(self, other: complex, /) -> complex128: ... + + @override # type: ignore[override] + @overload + def __rtruediv__(self, other: float | integer, /) -> float64: ... + @overload + def __rtruediv__(self, other: complex, /) -> complex128: ... + + def __floordiv__(self, value: _IntLike_co, /) -> integer: ... + def __rfloordiv__(self, value: _IntLike_co, /) -> integer: ... + def __mod__(self, value: _IntLike_co, /) -> integer: ... + def __rmod__(self, value: _IntLike_co, /) -> integer: ... + def __divmod__(self, value: _IntLike_co, /) -> _2Tuple[integer]: ... + def __rdivmod__(self, value: _IntLike_co, /) -> _2Tuple[integer]: ... + + # Ensure that objects annotated as `integer` support bit-wise operations + def __lshift__(self, other: _IntLike_co, /) -> integer: ... + def __rlshift__(self, other: _IntLike_co, /) -> integer: ... + def __rshift__(self, other: _IntLike_co, /) -> integer: ... + def __rrshift__(self, other: _IntLike_co, /) -> integer: ... + def __and__(self, other: _IntLike_co, /) -> integer: ... + def __rand__(self, other: _IntLike_co, /) -> integer: ... + def __or__(self, other: _IntLike_co, /) -> integer: ... + def __ror__(self, other: _IntLike_co, /) -> integer: ... + def __xor__(self, other: _IntLike_co, /) -> integer: ... + def __rxor__(self, other: _IntLike_co, /) -> integer: ... + +class signedinteger(integer[_NBit]): + def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... + + # arithmetic ops + + @override # type: ignore[override] + @overload + def __add__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __add__(self, other: float, /) -> float64: ... + @overload + def __add__(self, other: complex, /) -> complex128: ... + @overload + def __add__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __add__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __radd__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __radd__(self, other: float, /) -> float64: ... + @overload + def __radd__(self, other: complex, /) -> complex128: ... + @overload + def __radd__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __radd__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __sub__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __sub__(self, other: float, /) -> float64: ... + @overload + def __sub__(self, other: complex, /) -> complex128: ... + @overload + def __sub__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __sub__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rsub__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rsub__(self, other: float, /) -> float64: ... + @overload + def __rsub__(self, other: complex, /) -> complex128: ... + @overload + def __rsub__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rsub__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __mul__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mul__(self, other: float, /) -> float64: ... + @overload + def __mul__(self, other: complex, /) -> complex128: ... + @overload + def __mul__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __mul__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rmul__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rmul__(self, other: float, /) -> float64: ... + @overload + def __rmul__(self, other: complex, /) -> complex128: ... + @overload + def __rmul__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rmul__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __pow__(self, other: int | int8 | bool_ | Self, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __pow__(self, other: signedinteger, mod: None = None, /) -> signedinteger: ... + @overload + def __pow__(self, other: integer, mod: None = None, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rpow__(self, other: int | int8 | bool_, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __rpow__(self, other: signedinteger, mod: None = None, /) -> signedinteger: ... + @overload + def __rpow__(self, other: integer, mod: None = None, /) -> Incomplete: ... + + # modular division ops + + @override # type: ignore[override] + @overload + def __floordiv__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __floordiv__(self, other: float, /) -> float64: ... + @overload + def __floordiv__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __floordiv__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rfloordiv__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rfloordiv__(self, other: float, /) -> float64: ... + @overload + def __rfloordiv__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rfloordiv__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __mod__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mod__(self, other: float, /) -> float64: ... + @overload + def __mod__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __mod__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rmod__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rmod__(self, other: float, /) -> float64: ... + @overload + def __rmod__(self, other: signedinteger, /) -> signedinteger: ... + @overload + def __rmod__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __divmod__(self, other: int | int8 | bool_ | Self, /) -> _2Tuple[Self]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __divmod__(self, other: signedinteger, /) -> _2Tuple[signedinteger]: ... + @overload + def __divmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... + + @override # type: ignore[override] + @overload + def __rdivmod__(self, other: int | int8 | bool_, /) -> _2Tuple[Self]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __rdivmod__(self, other: signedinteger, /) -> _2Tuple[signedinteger]: ... + @overload + def __rdivmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... + + # bitwise ops + + @override # type: ignore[override] + @overload + def __lshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __lshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rlshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rlshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __rshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rrshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rrshift__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __and__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __and__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rand__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rand__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __xor__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __xor__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rxor__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rxor__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __or__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __or__(self, other: integer, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __ror__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __ror__(self, other: integer, /) -> signedinteger: ... + +int8 = signedinteger[_8Bit] +int16 = signedinteger[_16Bit] +int32 = signedinteger[_32Bit] +int64 = signedinteger[_64Bit] + +byte = signedinteger[_NBitByte] +short = signedinteger[_NBitShort] +intc = signedinteger[_NBitIntC] +intp = signedinteger[_NBitIntP] +int_ = intp +long = signedinteger[_NBitLong] +longlong = signedinteger[_NBitLongLong] + +class unsignedinteger(integer[_NBit1]): + def __new__(cls, value: _ConvertibleToInt = 0, /) -> Self: ... + + # arithmetic ops + + @override # type: ignore[override] + @overload + def __add__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __add__(self, other: float, /) -> float64: ... + @overload + def __add__(self, other: complex, /) -> complex128: ... + @overload + def __add__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __add__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __radd__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __radd__(self, other: float, /) -> float64: ... + @overload + def __radd__(self, other: complex, /) -> complex128: ... + @overload + def __radd__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __radd__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __sub__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __sub__(self, other: float, /) -> float64: ... + @overload + def __sub__(self, other: complex, /) -> complex128: ... + @overload + def __sub__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __sub__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rsub__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rsub__(self, other: float, /) -> float64: ... + @overload + def __rsub__(self, other: complex, /) -> complex128: ... + @overload + def __rsub__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rsub__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __mul__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __mul__(self, other: float, /) -> float64: ... + @overload + def __mul__(self, other: complex, /) -> complex128: ... + @overload + def __mul__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __mul__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rmul__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rmul__(self, other: float, /) -> float64: ... + @overload + def __rmul__(self, other: complex, /) -> complex128: ... + @overload + def __rmul__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rmul__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __pow__(self, other: int | uint8 | bool_ | Self, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __pow__(self, other: unsignedinteger, mod: None = None, /) -> unsignedinteger: ... + @overload + def __pow__(self, other: integer, mod: None = None, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rpow__(self, other: int | uint8 | bool_, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> float64: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complex128: ... + @overload + def __rpow__(self, other: unsignedinteger, mod: None = None, /) -> unsignedinteger: ... + @overload + def __rpow__(self, other: integer, mod: None = None, /) -> Incomplete: ... + + # modular division ops + + @override # type: ignore[override] + @overload + def __floordiv__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __floordiv__(self, other: float, /) -> float64: ... + @overload + def __floordiv__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __floordiv__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rfloordiv__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rfloordiv__(self, other: float, /) -> float64: ... + @overload + def __rfloordiv__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rfloordiv__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __mod__(self, other: int | uint8 | bool_ | Self, /) -> Self: ... + @overload + def __mod__(self, other: float, /) -> float64: ... + @overload + def __mod__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __mod__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __rmod__(self, other: int | uint8 | bool_, /) -> Self: ... + @overload + def __rmod__(self, other: float, /) -> float64: ... + @overload + def __rmod__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rmod__(self, other: integer, /) -> Incomplete: ... + + @override # type: ignore[override] + @overload + def __divmod__(self, other: int | uint8 | bool_ | Self, /) -> _2Tuple[Self]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __divmod__(self, other: unsignedinteger, /) -> _2Tuple[unsignedinteger]: ... + @overload + def __divmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... + + @override # type: ignore[override] + @overload + def __rdivmod__(self, other: int | uint8 | bool_, /) -> _2Tuple[Self]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[float64]: ... + @overload + def __rdivmod__(self, other: unsignedinteger, /) -> _2Tuple[unsignedinteger]: ... + @overload + def __rdivmod__(self, other: integer, /) -> _2Tuple[Incomplete]: ... + + # bitwise ops + + @override # type: ignore[override] + @overload + def __lshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __lshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __lshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rlshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rlshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rlshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rshift__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __rshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rrshift__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rrshift__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rrshift__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __and__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __and__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __and__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rand__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rand__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rand__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __xor__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __xor__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __xor__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __rxor__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __rxor__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __rxor__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __or__(self, other: int | int8 | bool_ | Self, /) -> Self: ... + @overload + def __or__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __or__(self, other: signedinteger, /) -> signedinteger: ... + + @override # type: ignore[override] + @overload + def __ror__(self, other: int | int8 | bool_, /) -> Self: ... + @overload + def __ror__(self, other: unsignedinteger, /) -> unsignedinteger: ... + @overload + def __ror__(self, other: signedinteger, /) -> signedinteger: ... + +uint8: TypeAlias = unsignedinteger[_8Bit] +uint16: TypeAlias = unsignedinteger[_16Bit] +uint32: TypeAlias = unsignedinteger[_32Bit] +uint64: TypeAlias = unsignedinteger[_64Bit] + +ubyte: TypeAlias = unsignedinteger[_NBitByte] +ushort: TypeAlias = unsignedinteger[_NBitShort] +uintc: TypeAlias = unsignedinteger[_NBitIntC] +uintp: TypeAlias = unsignedinteger[_NBitIntP] +uint: TypeAlias = uintp +ulong: TypeAlias = unsignedinteger[_NBitLong] +ulonglong: TypeAlias = unsignedinteger[_NBitLongLong] + +class inexact(number[_NBit, _InexactItemT_co], Generic[_NBit, _InexactItemT_co]): + @abstractmethod + def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... + +class floating(_RealMixin, _RoundMixin, inexact[_NBit1, float]): + def __new__(cls, value: _ConvertibleToFloat | None = 0, /) -> Self: ... + + # arithmetic ops + + @override # type: ignore[override] + @overload + def __add__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __add__(self, other: integer | floating, /) -> floating: ... + @overload + def __add__(self, other: float, /) -> Self: ... + @overload + def __add__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __radd__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __radd__(self, other: integer | floating, /) -> floating: ... + @overload + def __radd__(self, other: float, /) -> Self: ... + @overload + def __radd__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __sub__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __sub__(self, other: integer | floating, /) -> floating: ... + @overload + def __sub__(self, other: float, /) -> Self: ... + @overload + def __sub__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rsub__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rsub__(self, other: integer | floating, /) -> floating: ... + @overload + def __rsub__(self, other: float, /) -> Self: ... + @overload + def __rsub__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __mul__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mul__(self, other: integer | floating, /) -> floating: ... + @overload + def __mul__(self, other: float, /) -> Self: ... + @overload + def __mul__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rmul__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rmul__(self, other: integer | floating, /) -> floating: ... + @overload + def __rmul__(self, other: float, /) -> Self: ... + @overload + def __rmul__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __pow__(self, other: int | float16 | uint8 | int8 | bool_ | Self, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: integer | floating, mod: None = None, /) -> floating: ... + @overload + def __pow__(self, other: float, mod: None = None, /) -> Self: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rpow__(self, other: int | float16 | uint8 | int8 | bool_, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: integer | floating, mod: None = None, /) -> floating: ... + @overload + def __rpow__(self, other: float, mod: None = None, /) -> Self: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __truediv__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __truediv__(self, other: integer | floating, /) -> floating: ... + @overload + def __truediv__(self, other: float, /) -> Self: ... + @overload + def __truediv__(self, other: complex, /) -> complexfloating: ... + + @override # type: ignore[override] + @overload + def __rtruediv__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rtruediv__(self, other: integer | floating, /) -> floating: ... + @overload + def __rtruediv__(self, other: float, /) -> Self: ... + @overload + def __rtruediv__(self, other: complex, /) -> complexfloating: ... + + # modular division ops + + @overload + def __floordiv__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __floordiv__(self, other: integer | floating, /) -> floating: ... + @overload + def __floordiv__(self, other: float, /) -> Self: ... + + @overload + def __rfloordiv__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rfloordiv__(self, other: integer | floating, /) -> floating: ... + @overload + def __rfloordiv__(self, other: float, /) -> Self: ... + + @overload + def __mod__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> Self: ... + @overload + def __mod__(self, other: integer | floating, /) -> floating: ... + @overload + def __mod__(self, other: float, /) -> Self: ... + + @overload + def __rmod__(self, other: int | float16 | uint8 | int8 | bool_, /) -> Self: ... + @overload + def __rmod__(self, other: integer | floating, /) -> floating: ... + @overload + def __rmod__(self, other: float, /) -> Self: ... + + @overload + def __divmod__(self, other: int | float16 | uint8 | int8 | bool_ | Self, /) -> _2Tuple[Self]: ... + @overload + def __divmod__(self, other: integer | floating, /) -> _2Tuple[floating]: ... + @overload + def __divmod__(self, other: float, /) -> _2Tuple[Self]: ... + + @overload + def __rdivmod__(self, other: int | float16 | uint8 | int8 | bool_, /) -> _2Tuple[Self]: ... + @overload + def __rdivmod__(self, other: integer | floating, /) -> _2Tuple[floating]: ... + @overload + def __rdivmod__(self, other: float, /) -> _2Tuple[Self]: ... + + # NOTE: `is_integer` and `as_integer_ratio` are technically defined in the concrete subtypes + def is_integer(self, /) -> builtins.bool: ... + def as_integer_ratio(self, /) -> tuple[int, int]: ... + +float16: TypeAlias = floating[_16Bit] +float32: TypeAlias = floating[_32Bit] + +# either a C `double`, `float`, or `longdouble` +class float64(floating[_64Bit], float): # type: ignore[misc] + @property + def itemsize(self) -> L[8]: ... + @property + def nbytes(self) -> L[8]: ... + + # overrides for `floating` and `builtins.float` compatibility (`_RealMixin` doesn't work) + @property + def real(self) -> Self: ... + @property + def imag(self) -> Self: ... + def conjugate(self) -> Self: ... + def __getformat__(self, typestr: L["double", "float"], /) -> str: ... + def __getnewargs__(self, /) -> tuple[float]: ... + + # float64-specific operator overrides + @overload + def __add__(self, other: _Float64_co, /) -> float64: ... + @overload + def __add__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __add__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __add__(self, other: complex, /) -> float64 | complex128: ... + @overload + def __radd__(self, other: _Float64_co, /) -> float64: ... + @overload + def __radd__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __radd__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __radd__(self, other: complex, /) -> float64 | complex128: ... + + @overload + def __sub__(self, other: _Float64_co, /) -> float64: ... + @overload + def __sub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __sub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __sub__(self, other: complex, /) -> float64 | complex128: ... + @overload + def __rsub__(self, other: _Float64_co, /) -> float64: ... + @overload + def __rsub__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __rsub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __rsub__(self, other: complex, /) -> float64 | complex128: ... + + @overload + def __mul__(self, other: _Float64_co, /) -> float64: ... + @overload + def __mul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __mul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __mul__(self, other: complex, /) -> float64 | complex128: ... + @overload + def __rmul__(self, other: _Float64_co, /) -> float64: ... + @overload + def __rmul__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __rmul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __rmul__(self, other: complex, /) -> float64 | complex128: ... + + @overload + def __truediv__(self, other: _Float64_co, /) -> float64: ... + @overload + def __truediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __truediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __truediv__(self, other: complex, /) -> float64 | complex128: ... + @overload + def __rtruediv__(self, other: _Float64_co, /) -> float64: ... + @overload + def __rtruediv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __rtruediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __rtruediv__(self, other: complex, /) -> float64 | complex128: ... + + @overload + def __floordiv__(self, other: _Float64_co, /) -> float64: ... + @overload + def __floordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __floordiv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __floordiv__(self, other: complex, /) -> float64 | complex128: ... + @overload + def __rfloordiv__(self, other: _Float64_co, /) -> float64: ... + @overload + def __rfloordiv__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + @overload + def __rfloordiv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __rfloordiv__(self, other: complex, /) -> float64 | complex128: ... + + @overload + def __pow__(self, other: _Float64_co, mod: None = None, /) -> float64: ... + @overload + def __pow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... + @overload + def __pow__( + self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / + ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __pow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... + @overload + def __rpow__(self, other: _Float64_co, mod: None = None, /) -> float64: ... + @overload + def __rpow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... + @overload + def __rpow__( + self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / + ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... + + def __mod__(self, other: _Float64_co, /) -> float64: ... # type: ignore[override] + def __rmod__(self, other: _Float64_co, /) -> float64: ... # type: ignore[override] + + def __divmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[override] + def __rdivmod__(self, other: _Float64_co, /) -> _2Tuple[float64]: ... # type: ignore[override] + +half: TypeAlias = floating[_NBitHalf] +single: TypeAlias = floating[_NBitSingle] +double: TypeAlias = floating[_NBitDouble] +longdouble: TypeAlias = floating[_NBitLongDouble] + +# The main reason for `complexfloating` having two typevars is cosmetic. +# It is used to clarify why `complex128`s precision is `_64Bit`, the latter +# describing the two 64 bit floats representing its real and imaginary component + +class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): + @overload + def __new__( + cls, + real: complex | SupportsComplex | SupportsFloat | SupportsIndex = 0, + imag: complex | SupportsFloat | SupportsIndex = 0, + /, + ) -> Self: ... + @overload + def __new__(cls, real: _ConvertibleToComplex | None = 0, /) -> Self: ... + + @property + def real(self) -> floating[_NBit1]: ... + @property + def imag(self) -> floating[_NBit2]: ... + + # NOTE: `__complex__` is technically defined in the concrete subtypes + def __complex__(self, /) -> complex: ... + def __abs__(self, /) -> floating[_NBit1 | _NBit2]: ... # type: ignore[override] + + @overload + def __add__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __add__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __add__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + @overload + def __radd__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __radd__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __radd__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload + def __sub__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __sub__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __sub__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + @overload + def __rsub__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __rsub__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __rsub__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload + def __mul__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __mul__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __mul__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + @overload + def __rmul__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __rmul__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __rmul__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload + def __truediv__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __truediv__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __truediv__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + @overload + def __rtruediv__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __rtruediv__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __rtruediv__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + + @overload + def __pow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __pow__( + self, other: complex | float64 | complex128, mod: None = None, / + ) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __pow__( + self, other: number[_NBit], mod: None = None, / + ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + @overload + def __rpow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBit1, _NBit2]: ... + @overload + def __rpow__(self, other: complex, mod: None = None, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + @overload + def __rpow__( + self, other: number[_NBit], mod: None = None, / + ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + +complex64: TypeAlias = complexfloating[_32Bit, _32Bit] + +class complex128(complexfloating[_64Bit, _64Bit], complex): + @property + def itemsize(self) -> L[16]: ... + @property + def nbytes(self) -> L[16]: ... + + # overrides for `floating` and `builtins.float` compatibility + @property + def real(self) -> float64: ... + @property + def imag(self) -> float64: ... + def conjugate(self) -> Self: ... + def __abs__(self) -> float64: ... # type: ignore[override] + def __getnewargs__(self, /) -> tuple[float, float]: ... + + # complex128-specific operator overrides + @overload + def __add__(self, other: _Complex128_co, /) -> complex128: ... + @overload + def __add__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __radd__(self, other: _Complex128_co, /) -> complex128: ... + + @overload + def __sub__(self, other: _Complex128_co, /) -> complex128: ... + @overload + def __sub__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rsub__(self, other: _Complex128_co, /) -> complex128: ... + + @overload + def __mul__(self, other: _Complex128_co, /) -> complex128: ... + @overload + def __mul__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rmul__(self, other: _Complex128_co, /) -> complex128: ... + + @overload + def __truediv__(self, other: _Complex128_co, /) -> complex128: ... + @overload + def __truediv__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rtruediv__(self, other: _Complex128_co, /) -> complex128: ... + + @overload + def __pow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... + @overload + def __pow__( + self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / + ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rpow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... + +csingle: TypeAlias = complexfloating[_NBitSingle, _NBitSingle] +cdouble: TypeAlias = complexfloating[_NBitDouble, _NBitDouble] +clongdouble: TypeAlias = complexfloating[_NBitLongDouble, _NBitLongDouble] + +class timedelta64(_IntegralMixin, generic[_TD64ItemT_co], Generic[_TD64ItemT_co]): + @property + def itemsize(self) -> L[8]: ... + @property + def nbytes(self) -> L[8]: ... + + @overload + def __new__(cls, value: _TD64ItemT_co | timedelta64[_TD64ItemT_co], /) -> Self: ... + @overload + def __new__(cls, /) -> timedelta64[L[0]]: ... + @overload + def __new__(cls, value: _NaTValue | None, format: _TimeUnitSpec, /) -> timedelta64[None]: ... + @overload + def __new__(cls, value: L[0], format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> timedelta64[L[0]]: ... + @overload + def __new__(cls, value: _IntLike_co, format: _TimeUnitSpec[_IntTD64Unit] = ..., /) -> timedelta64[int]: ... + @overload + def __new__(cls, value: dt.timedelta, format: _TimeUnitSpec[_IntTimeUnit], /) -> timedelta64[int]: ... + @overload + def __new__( + cls, + value: dt.timedelta | _IntLike_co, + format: _TimeUnitSpec[_NativeTD64Unit] = ..., + /, + ) -> timedelta64[dt.timedelta]: ... + @overload + def __new__(cls, value: _ConvertibleToTD64, format: _TimeUnitSpec = ..., /) -> Self: ... + + # inherited at runtime from `signedinteger` + def __class_getitem__(cls, type_arg: type | object, /) -> GenericAlias: ... + + # NOTE: Only a limited number of units support conversion + # to builtin scalar types: `Y`, `M`, `ns`, `ps`, `fs`, `as` + def __int__(self: timedelta64[int], /) -> int: ... + def __float__(self: timedelta64[int], /) -> float: ... + + def __neg__(self, /) -> Self: ... + def __pos__(self, /) -> Self: ... + def __abs__(self, /) -> Self: ... + + @overload + def __add__(self: timedelta64[None], x: _TD64Like_co, /) -> timedelta64[None]: ... + @overload + def __add__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ... + @overload + def __add__(self: timedelta64[int], x: timedelta64, /) -> timedelta64[int | None]: ... + @overload + def __add__(self: timedelta64[dt.timedelta], x: _AnyDateOrTime, /) -> _AnyDateOrTime: ... + @overload + def __add__(self: timedelta64[_AnyTD64Item], x: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __add__(self, x: timedelta64[None], /) -> timedelta64[None]: ... + __radd__ = __add__ + + @overload + def __mul__(self: timedelta64[_AnyTD64Item], x: int | np.integer | np.bool, /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __mul__(self: timedelta64[_AnyTD64Item], x: float | np.floating, /) -> timedelta64[_AnyTD64Item | None]: ... + @overload + def __mul__(self, x: float | np.floating | np.integer | np.bool, /) -> timedelta64: ... + __rmul__ = __mul__ + + @overload + def __mod__(self, x: timedelta64[L[0] | None], /) -> timedelta64[None]: ... + @overload + def __mod__(self: timedelta64[None], x: timedelta64, /) -> timedelta64[None]: ... + @overload + def __mod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... + @overload + def __mod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ... + @overload + def __mod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... + @overload + def __mod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... + @overload + def __mod__(self, x: timedelta64, /) -> timedelta64: ... + + # the L[0] makes __mod__ non-commutative, which the first two overloads reflect + @overload + def __rmod__(self, x: timedelta64[None], /) -> timedelta64[None]: ... + @overload + def __rmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> timedelta64[None]: ... + @overload + def __rmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> timedelta64[int | None]: ... + @overload + def __rmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item | None]: ... + @overload + def __rmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> dt.timedelta: ... + @overload + def __rmod__(self, x: timedelta64[int], /) -> timedelta64[int | None]: ... + @overload + def __rmod__(self, x: timedelta64, /) -> timedelta64: ... + + # keep in sync with __mod__ + @overload + def __divmod__(self, x: timedelta64[L[0] | None], /) -> tuple[int64, timedelta64[None]]: ... + @overload + def __divmod__(self: timedelta64[None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... + @overload + def __divmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... + @overload + def __divmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... + @overload + def __divmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... + @overload + def __divmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... + @overload + def __divmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... + + # keep in sync with __rmod__ + @overload + def __rdivmod__(self, x: timedelta64[None], /) -> tuple[int64, timedelta64[None]]: ... + @overload + def __rdivmod__(self: timedelta64[L[0] | None], x: timedelta64, /) -> tuple[int64, timedelta64[None]]: ... + @overload + def __rdivmod__(self: timedelta64[int], x: timedelta64[int | dt.timedelta], /) -> tuple[int64, timedelta64[int | None]]: ... + @overload + def __rdivmod__(self: timedelta64[dt.timedelta], x: timedelta64[_AnyTD64Item], /) -> tuple[int64, timedelta64[_AnyTD64Item | None]]: ... + @overload + def __rdivmod__(self: timedelta64[dt.timedelta], x: dt.timedelta, /) -> tuple[int, dt.timedelta]: ... + @overload + def __rdivmod__(self, x: timedelta64[int], /) -> tuple[int64, timedelta64[int | None]]: ... + @overload + def __rdivmod__(self, x: timedelta64, /) -> tuple[int64, timedelta64]: ... + + @overload + def __sub__(self: timedelta64[None], b: _TD64Like_co, /) -> timedelta64[None]: ... + @overload + def __sub__(self: timedelta64[int], b: timedelta64[int | dt.timedelta], /) -> timedelta64[int]: ... + @overload + def __sub__(self: timedelta64[int], b: timedelta64, /) -> timedelta64[int | None]: ... + @overload + def __sub__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> dt.timedelta: ... + @overload + def __sub__(self: timedelta64[_AnyTD64Item], b: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __sub__(self, b: timedelta64[None], /) -> timedelta64[None]: ... + + @overload + def __rsub__(self: timedelta64[None], a: _TD64Like_co, /) -> timedelta64[None]: ... + @overload + def __rsub__(self: timedelta64[dt.timedelta], a: _AnyDateOrTime, /) -> _AnyDateOrTime: ... + @overload + def __rsub__(self: timedelta64[dt.timedelta], a: timedelta64[_AnyTD64Item], /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __rsub__(self: timedelta64[_AnyTD64Item], a: timedelta64[_AnyTD64Item] | _IntLike_co, /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __rsub__(self, a: timedelta64[None], /) -> timedelta64[None]: ... + @overload + def __rsub__(self, a: datetime64[None], /) -> datetime64[None]: ... + + @overload + def __truediv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> float: ... + @overload + def __truediv__(self, b: timedelta64, /) -> float64: ... + @overload + def __truediv__(self: timedelta64[_AnyTD64Item], b: int | integer, /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __truediv__(self: timedelta64[_AnyTD64Item], b: float | floating, /) -> timedelta64[_AnyTD64Item | None]: ... + @overload + def __truediv__(self, b: float | floating | integer, /) -> timedelta64: ... + @overload + def __rtruediv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> float: ... + @overload + def __rtruediv__(self, a: timedelta64, /) -> float64: ... + + @overload + def __floordiv__(self: timedelta64[dt.timedelta], b: dt.timedelta, /) -> int: ... + @overload + def __floordiv__(self, b: timedelta64, /) -> int64: ... + @overload + def __floordiv__(self: timedelta64[_AnyTD64Item], b: int | integer, /) -> timedelta64[_AnyTD64Item]: ... + @overload + def __floordiv__(self: timedelta64[_AnyTD64Item], b: float | floating, /) -> timedelta64[_AnyTD64Item | None]: ... + @overload + def __rfloordiv__(self: timedelta64[dt.timedelta], a: dt.timedelta, /) -> int: ... + @overload + def __rfloordiv__(self, a: timedelta64, /) -> int64: ... + + # comparison ops + + @overload + def __lt__(self, other: _TD64Like_co, /) -> bool_: ... + @overload + def __lt__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __le__(self, other: _TD64Like_co, /) -> bool_: ... + @overload + def __le__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __gt__(self, other: _TD64Like_co, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __ge__(self, other: _TD64Like_co, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeTD64_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsGT, /) -> bool_: ... + +class datetime64(_RealMixin, generic[_DT64ItemT_co], Generic[_DT64ItemT_co]): + @property + def itemsize(self) -> L[8]: ... + @property + def nbytes(self) -> L[8]: ... + + @overload + def __new__(cls, value: datetime64[_DT64ItemT_co], /) -> Self: ... + @overload + def __new__(cls, value: _AnyDT64Arg, /) -> datetime64[_AnyDT64Arg]: ... + @overload + def __new__(cls, value: _NaTValue | None = ..., format: _TimeUnitSpec = ..., /) -> datetime64[None]: ... + @overload + def __new__(cls, value: _DT64Now, format: _TimeUnitSpec[_NativeTimeUnit] = ..., /) -> datetime64[dt.datetime]: ... + @overload + def __new__(cls, value: _DT64Date, format: _TimeUnitSpec[_DateUnit] = ..., /) -> datetime64[dt.date]: ... + @overload + def __new__(cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_IntTimeUnit], /) -> datetime64[int]: ... + @overload + def __new__( + cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_NativeTimeUnit], / + ) -> datetime64[dt.datetime]: ... + @overload + def __new__(cls, value: int | bytes | str | dt.date, format: _TimeUnitSpec[_DateUnit], /) -> datetime64[dt.date]: ... + @overload + def __new__(cls, value: bytes | str | dt.date | None, format: _TimeUnitSpec = ..., /) -> Self: ... + + @overload + def __add__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... + @overload + def __add__(self: datetime64[None], x: _TD64Like_co, /) -> datetime64[None]: ... + @overload + def __add__(self: datetime64[int], x: timedelta64[int | dt.timedelta], /) -> datetime64[int]: ... + @overload + def __add__(self: datetime64[dt.datetime], x: timedelta64[dt.timedelta], /) -> datetime64[dt.datetime]: ... + @overload + def __add__(self: datetime64[dt.date], x: timedelta64[dt.timedelta], /) -> datetime64[dt.date]: ... + @overload + def __add__(self: datetime64[dt.date], x: timedelta64[int], /) -> datetime64[int]: ... + @overload + def __add__(self, x: datetime64[None], /) -> datetime64[None]: ... + @overload + def __add__(self, x: _TD64Like_co, /) -> datetime64: ... + __radd__ = __add__ + + @overload + def __sub__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... + @overload + def __sub__(self: datetime64[_AnyDate], x: _AnyDate, /) -> dt.timedelta: ... + @overload + def __sub__(self: datetime64[None], x: timedelta64, /) -> datetime64[None]: ... + @overload + def __sub__(self: datetime64[None], x: datetime64, /) -> timedelta64[None]: ... + @overload + def __sub__(self: datetime64[int], x: timedelta64, /) -> datetime64[int]: ... + @overload + def __sub__(self: datetime64[int], x: datetime64, /) -> timedelta64[int]: ... + @overload + def __sub__(self: datetime64[dt.datetime], x: timedelta64[int], /) -> datetime64[int]: ... + @overload + def __sub__(self: datetime64[dt.datetime], x: timedelta64[dt.timedelta], /) -> datetime64[dt.datetime]: ... + @overload + def __sub__(self: datetime64[dt.datetime], x: datetime64[int], /) -> timedelta64[int]: ... + @overload + def __sub__(self: datetime64[dt.date], x: timedelta64[int], /) -> datetime64[dt.date | int]: ... + @overload + def __sub__(self: datetime64[dt.date], x: timedelta64[dt.timedelta], /) -> datetime64[dt.date]: ... + @overload + def __sub__(self: datetime64[dt.date], x: datetime64[dt.date], /) -> timedelta64[dt.timedelta]: ... + @overload + def __sub__(self, x: timedelta64[None], /) -> datetime64[None]: ... + @overload + def __sub__(self, x: datetime64[None], /) -> timedelta64[None]: ... + @overload + def __sub__(self, x: _TD64Like_co, /) -> datetime64: ... + @overload + def __sub__(self, x: datetime64, /) -> timedelta64: ... + + @overload + def __rsub__(self: datetime64[_AnyDT64Item], x: int | integer | np.bool, /) -> datetime64[_AnyDT64Item]: ... + @overload + def __rsub__(self: datetime64[_AnyDate], x: _AnyDate, /) -> dt.timedelta: ... + @overload + def __rsub__(self: datetime64[None], x: datetime64, /) -> timedelta64[None]: ... + @overload + def __rsub__(self: datetime64[int], x: datetime64, /) -> timedelta64[int]: ... + @overload + def __rsub__(self: datetime64[dt.datetime], x: datetime64[int], /) -> timedelta64[int]: ... + @overload + def __rsub__(self: datetime64[dt.datetime], x: datetime64[dt.date], /) -> timedelta64[dt.timedelta]: ... + @overload + def __rsub__(self, x: datetime64[None], /) -> timedelta64[None]: ... + @overload + def __rsub__(self, x: datetime64, /) -> timedelta64: ... + + @overload + def __lt__(self, other: datetime64, /) -> bool_: ... + @overload + def __lt__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsGT], /) -> NDArray[bool_]: ... + @overload + def __lt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __le__(self, other: datetime64, /) -> bool_: ... + @overload + def __le__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsGE], /) -> NDArray[bool_]: ... + @overload + def __le__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __gt__(self, other: datetime64, /) -> bool_: ... + @overload + def __gt__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsLT], /) -> NDArray[bool_]: ... + @overload + def __gt__(self, other: _SupportsGT, /) -> bool_: ... + + @overload + def __ge__(self, other: datetime64, /) -> bool_: ... + @overload + def __ge__(self, other: _ArrayLikeDT64_co | _NestedSequence[_SupportsLE], /) -> NDArray[bool_]: ... + @overload + def __ge__(self, other: _SupportsGT, /) -> bool_: ... + +class flexible(_RealMixin, generic[_FlexibleItemT_co], Generic[_FlexibleItemT_co]): ... # type: ignore[misc] + +class void(flexible[bytes | tuple[Any, ...]]): + @overload + def __new__(cls, value: _IntLike_co | bytes, /, dtype: None = None) -> Self: ... + @overload + def __new__(cls, value: Any, /, dtype: _DTypeLikeVoid) -> Self: ... + + @overload + def __getitem__(self, key: str | SupportsIndex, /) -> Any: ... + @overload + def __getitem__(self, key: list[str], /) -> void: ... + def __setitem__(self, key: str | list[str] | SupportsIndex, value: ArrayLike, /) -> None: ... + + def setfield(self, val: ArrayLike, dtype: DTypeLike, offset: int = ...) -> None: ... + +class character(flexible[_CharacterItemT_co], Generic[_CharacterItemT_co]): + @abstractmethod + def __new__(cls, value: object = ..., /) -> Self: ... + +# NOTE: Most `np.bytes_` / `np.str_` methods return their builtin `bytes` / `str` counterpart + +class bytes_(character[bytes], bytes): + @overload + def __new__(cls, o: object = ..., /) -> Self: ... + @overload + def __new__(cls, s: str, /, encoding: str, errors: str = ...) -> Self: ... + + # + def __bytes__(self, /) -> bytes: ... + +class str_(character[str], str): + @overload + def __new__(cls, value: object = ..., /) -> Self: ... + @overload + def __new__(cls, value: bytes, /, encoding: str = ..., errors: str = ...) -> Self: ... + +# See `numpy._typing._ufunc` for more concrete nin-/nout-specific stubs +@final +class ufunc: + @property + def __name__(self) -> LiteralString: ... + @property + def __qualname__(self) -> LiteralString: ... + @property + def __doc__(self) -> str: ... + @property + def nin(self) -> int: ... + @property + def nout(self) -> int: ... + @property + def nargs(self) -> int: ... + @property + def ntypes(self) -> int: ... + @property + def types(self) -> list[LiteralString]: ... + # Broad return type because it has to encompass things like + # + # >>> np.logical_and.identity is True + # True + # >>> np.add.identity is 0 + # True + # >>> np.sin.identity is None + # True + # + # and any user-defined ufuncs. + @property + def identity(self) -> Any: ... + # This is None for ufuncs and a string for gufuncs. + @property + def signature(self) -> LiteralString | None: ... + + def __call__(self, *args: Any, **kwargs: Any) -> Any: ... + # The next four methods will always exist, but they will just + # raise a ValueError ufuncs with that don't accept two input + # arguments and return one output argument. Because of that we + # can't type them very precisely. + def reduce(self, /, *args: Any, **kwargs: Any) -> Any: ... + def accumulate(self, /, *args: Any, **kwargs: Any) -> NDArray[Any]: ... + def reduceat(self, /, *args: Any, **kwargs: Any) -> NDArray[Any]: ... + def outer(self, *args: Any, **kwargs: Any) -> Any: ... + # Similarly at won't be defined for ufuncs that return multiple + # outputs, so we can't type it very precisely. + def at(self, /, *args: Any, **kwargs: Any) -> None: ... + + # + def resolve_dtypes( + self, + /, + dtypes: tuple[dtype | type | None, ...], + *, + signature: tuple[dtype | None, ...] | None = None, + casting: _CastingKind | None = None, + reduction: builtins.bool = False, + ) -> tuple[dtype, ...]: ... + +# Parameters: `__name__`, `ntypes` and `identity` +absolute: _UFunc_Nin1_Nout1[L['absolute'], L[20], None] +add: _UFunc_Nin2_Nout1[L['add'], L[22], L[0]] +arccos: _UFunc_Nin1_Nout1[L['arccos'], L[8], None] +arccosh: _UFunc_Nin1_Nout1[L['arccosh'], L[8], None] +arcsin: _UFunc_Nin1_Nout1[L['arcsin'], L[8], None] +arcsinh: _UFunc_Nin1_Nout1[L['arcsinh'], L[8], None] +arctan2: _UFunc_Nin2_Nout1[L['arctan2'], L[5], None] +arctan: _UFunc_Nin1_Nout1[L['arctan'], L[8], None] +arctanh: _UFunc_Nin1_Nout1[L['arctanh'], L[8], None] +bitwise_and: _UFunc_Nin2_Nout1[L['bitwise_and'], L[12], L[-1]] +bitwise_count: _UFunc_Nin1_Nout1[L['bitwise_count'], L[11], None] +bitwise_not: _UFunc_Nin1_Nout1[L['invert'], L[12], None] +bitwise_or: _UFunc_Nin2_Nout1[L['bitwise_or'], L[12], L[0]] +bitwise_xor: _UFunc_Nin2_Nout1[L['bitwise_xor'], L[12], L[0]] +cbrt: _UFunc_Nin1_Nout1[L['cbrt'], L[5], None] +ceil: _UFunc_Nin1_Nout1[L['ceil'], L[7], None] +conj: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None] +conjugate: _UFunc_Nin1_Nout1[L['conjugate'], L[18], None] +copysign: _UFunc_Nin2_Nout1[L['copysign'], L[4], None] +cos: _UFunc_Nin1_Nout1[L['cos'], L[9], None] +cosh: _UFunc_Nin1_Nout1[L['cosh'], L[8], None] +deg2rad: _UFunc_Nin1_Nout1[L['deg2rad'], L[5], None] +degrees: _UFunc_Nin1_Nout1[L['degrees'], L[5], None] +divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] +divmod: _UFunc_Nin2_Nout2[L['divmod'], L[15], None] +equal: _UFunc_Nin2_Nout1[L['equal'], L[23], None] +exp2: _UFunc_Nin1_Nout1[L['exp2'], L[8], None] +exp: _UFunc_Nin1_Nout1[L['exp'], L[10], None] +expm1: _UFunc_Nin1_Nout1[L['expm1'], L[8], None] +fabs: _UFunc_Nin1_Nout1[L['fabs'], L[5], None] +float_power: _UFunc_Nin2_Nout1[L['float_power'], L[4], None] +floor: _UFunc_Nin1_Nout1[L['floor'], L[7], None] +floor_divide: _UFunc_Nin2_Nout1[L['floor_divide'], L[21], None] +fmax: _UFunc_Nin2_Nout1[L['fmax'], L[21], None] +fmin: _UFunc_Nin2_Nout1[L['fmin'], L[21], None] +fmod: _UFunc_Nin2_Nout1[L['fmod'], L[15], None] +frexp: _UFunc_Nin1_Nout2[L['frexp'], L[4], None] +gcd: _UFunc_Nin2_Nout1[L['gcd'], L[11], L[0]] +greater: _UFunc_Nin2_Nout1[L['greater'], L[23], None] +greater_equal: _UFunc_Nin2_Nout1[L['greater_equal'], L[23], None] +heaviside: _UFunc_Nin2_Nout1[L['heaviside'], L[4], None] +hypot: _UFunc_Nin2_Nout1[L['hypot'], L[5], L[0]] +invert: _UFunc_Nin1_Nout1[L['invert'], L[12], None] +isfinite: _UFunc_Nin1_Nout1[L['isfinite'], L[20], None] +isinf: _UFunc_Nin1_Nout1[L['isinf'], L[20], None] +isnan: _UFunc_Nin1_Nout1[L['isnan'], L[20], None] +isnat: _UFunc_Nin1_Nout1[L['isnat'], L[2], None] +lcm: _UFunc_Nin2_Nout1[L['lcm'], L[11], None] +ldexp: _UFunc_Nin2_Nout1[L['ldexp'], L[8], None] +left_shift: _UFunc_Nin2_Nout1[L['left_shift'], L[11], None] +less: _UFunc_Nin2_Nout1[L['less'], L[23], None] +less_equal: _UFunc_Nin2_Nout1[L['less_equal'], L[23], None] +log10: _UFunc_Nin1_Nout1[L['log10'], L[8], None] +log1p: _UFunc_Nin1_Nout1[L['log1p'], L[8], None] +log2: _UFunc_Nin1_Nout1[L['log2'], L[8], None] +log: _UFunc_Nin1_Nout1[L['log'], L[10], None] +logaddexp2: _UFunc_Nin2_Nout1[L['logaddexp2'], L[4], float] +logaddexp: _UFunc_Nin2_Nout1[L['logaddexp'], L[4], float] +logical_and: _UFunc_Nin2_Nout1[L['logical_and'], L[20], L[True]] +logical_not: _UFunc_Nin1_Nout1[L['logical_not'], L[20], None] +logical_or: _UFunc_Nin2_Nout1[L['logical_or'], L[20], L[False]] +logical_xor: _UFunc_Nin2_Nout1[L['logical_xor'], L[19], L[False]] +matmul: _GUFunc_Nin2_Nout1[L['matmul'], L[19], None, L["(n?,k),(k,m?)->(n?,m?)"]] +matvec: _GUFunc_Nin2_Nout1[L['matvec'], L[19], None, L["(m,n),(n)->(m)"]] +maximum: _UFunc_Nin2_Nout1[L['maximum'], L[21], None] +minimum: _UFunc_Nin2_Nout1[L['minimum'], L[21], None] +mod: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] +modf: _UFunc_Nin1_Nout2[L['modf'], L[4], None] +multiply: _UFunc_Nin2_Nout1[L['multiply'], L[23], L[1]] +negative: _UFunc_Nin1_Nout1[L['negative'], L[19], None] +nextafter: _UFunc_Nin2_Nout1[L['nextafter'], L[4], None] +not_equal: _UFunc_Nin2_Nout1[L['not_equal'], L[23], None] +positive: _UFunc_Nin1_Nout1[L['positive'], L[19], None] +power: _UFunc_Nin2_Nout1[L['power'], L[18], None] +rad2deg: _UFunc_Nin1_Nout1[L['rad2deg'], L[5], None] +radians: _UFunc_Nin1_Nout1[L['radians'], L[5], None] +reciprocal: _UFunc_Nin1_Nout1[L['reciprocal'], L[18], None] +remainder: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] +right_shift: _UFunc_Nin2_Nout1[L['right_shift'], L[11], None] +rint: _UFunc_Nin1_Nout1[L['rint'], L[10], None] +sign: _UFunc_Nin1_Nout1[L['sign'], L[19], None] +signbit: _UFunc_Nin1_Nout1[L['signbit'], L[4], None] +sin: _UFunc_Nin1_Nout1[L['sin'], L[9], None] +sinh: _UFunc_Nin1_Nout1[L['sinh'], L[8], None] +spacing: _UFunc_Nin1_Nout1[L['spacing'], L[4], None] +sqrt: _UFunc_Nin1_Nout1[L['sqrt'], L[10], None] +square: _UFunc_Nin1_Nout1[L['square'], L[18], None] +subtract: _UFunc_Nin2_Nout1[L['subtract'], L[21], None] +tan: _UFunc_Nin1_Nout1[L['tan'], L[8], None] +tanh: _UFunc_Nin1_Nout1[L['tanh'], L[8], None] +true_divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] +trunc: _UFunc_Nin1_Nout1[L['trunc'], L[7], None] +vecdot: _GUFunc_Nin2_Nout1[L['vecdot'], L[19], None, L["(n),(n)->()"]] +vecmat: _GUFunc_Nin2_Nout1[L['vecmat'], L[19], None, L["(n),(n,m)->(m)"]] + +abs = absolute +acos = arccos +acosh = arccosh +asin = arcsin +asinh = arcsinh +atan = arctan +atanh = arctanh +atan2 = arctan2 +concat = concatenate +bitwise_left_shift = left_shift +bitwise_invert = invert +bitwise_right_shift = right_shift +permute_dims = transpose +pow = power + +# TODO: The type of each `__next__` and `iters` return-type depends +# on the length and dtype of `args`; we can't describe this behavior yet +# as we lack variadics (PEP 646). +@final +class broadcast: + def __new__(cls, *args: ArrayLike) -> broadcast: ... + @property + def index(self) -> int: ... + @property + def iters(self) -> tuple[flatiter[Any], ...]: ... + @property + def nd(self) -> int: ... + @property + def ndim(self) -> int: ... + @property + def numiter(self) -> int: ... + @property + def shape(self) -> _AnyShape: ... + @property + def size(self) -> int: ... + def __next__(self) -> tuple[Any, ...]: ... + def __iter__(self) -> Self: ... + def reset(self) -> None: ... + +@final +class busdaycalendar: + def __new__( + cls, + weekmask: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] = ..., + ) -> busdaycalendar: ... + @property + def weekmask(self) -> NDArray[np.bool]: ... + @property + def holidays(self) -> NDArray[datetime64]: ... + +class finfo(Generic[_FloatingT_co]): + dtype: Final[dtype[_FloatingT_co]] + bits: Final[int] + eps: Final[_FloatingT_co] + epsneg: Final[_FloatingT_co] + iexp: Final[int] + machep: Final[int] + max: Final[_FloatingT_co] + maxexp: Final[int] + min: Final[_FloatingT_co] + minexp: Final[int] + negep: Final[int] + nexp: Final[int] + nmant: Final[int] + precision: Final[int] + resolution: Final[_FloatingT_co] + smallest_subnormal: Final[_FloatingT_co] + @property + def smallest_normal(self) -> _FloatingT_co: ... + @property + def tiny(self) -> _FloatingT_co: ... + @overload + def __new__(cls, dtype: inexact[_NBit1] | _DTypeLike[inexact[_NBit1]]) -> finfo[floating[_NBit1]]: ... + @overload + def __new__(cls, dtype: complex | type[complex]) -> finfo[float64]: ... + @overload + def __new__(cls, dtype: str) -> finfo[floating]: ... + +class iinfo(Generic[_IntegerT_co]): + dtype: Final[dtype[_IntegerT_co]] + kind: Final[LiteralString] + bits: Final[int] + key: Final[LiteralString] + @property + def min(self) -> int: ... + @property + def max(self) -> int: ... + + @overload + def __new__( + cls, dtype: _IntegerT_co | _DTypeLike[_IntegerT_co] + ) -> iinfo[_IntegerT_co]: ... + @overload + def __new__(cls, dtype: int | type[int]) -> iinfo[int_]: ... + @overload + def __new__(cls, dtype: str) -> iinfo[Any]: ... + +@final +class nditer: + def __new__( + cls, + op: ArrayLike | Sequence[ArrayLike | None], + flags: Sequence[_NDIterFlagsKind] | None = ..., + op_flags: Sequence[Sequence[_NDIterFlagsOp]] | None = ..., + op_dtypes: DTypeLike | Sequence[DTypeLike] = ..., + order: _OrderKACF = ..., + casting: _CastingKind = ..., + op_axes: Sequence[Sequence[SupportsIndex]] | None = ..., + itershape: _ShapeLike | None = ..., + buffersize: SupportsIndex = ..., + ) -> nditer: ... + def __enter__(self) -> nditer: ... + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: ... + def __iter__(self) -> nditer: ... + def __next__(self) -> tuple[NDArray[Any], ...]: ... + def __len__(self) -> int: ... + def __copy__(self) -> nditer: ... + @overload + def __getitem__(self, index: SupportsIndex) -> NDArray[Any]: ... + @overload + def __getitem__(self, index: slice) -> tuple[NDArray[Any], ...]: ... + def __setitem__(self, index: slice | SupportsIndex, value: ArrayLike) -> None: ... + def close(self) -> None: ... + def copy(self) -> nditer: ... + def debug_print(self) -> None: ... + def enable_external_loop(self) -> None: ... + def iternext(self) -> builtins.bool: ... + def remove_axis(self, i: SupportsIndex, /) -> None: ... + def remove_multi_index(self) -> None: ... + def reset(self) -> None: ... + @property + def dtypes(self) -> tuple[dtype, ...]: ... + @property + def finished(self) -> builtins.bool: ... + @property + def has_delayed_bufalloc(self) -> builtins.bool: ... + @property + def has_index(self) -> builtins.bool: ... + @property + def has_multi_index(self) -> builtins.bool: ... + @property + def index(self) -> int: ... + @property + def iterationneedsapi(self) -> builtins.bool: ... + @property + def iterindex(self) -> int: ... + @property + def iterrange(self) -> tuple[int, ...]: ... + @property + def itersize(self) -> int: ... + @property + def itviews(self) -> tuple[NDArray[Any], ...]: ... + @property + def multi_index(self) -> tuple[int, ...]: ... + @property + def ndim(self) -> int: ... + @property + def nop(self) -> int: ... + @property + def operands(self) -> tuple[NDArray[Any], ...]: ... + @property + def shape(self) -> tuple[int, ...]: ... + @property + def value(self) -> tuple[NDArray[Any], ...]: ... + +class memmap(ndarray[_ShapeT_co, _DTypeT_co]): + __array_priority__: ClassVar[float] + filename: str | None + offset: int + mode: str + @overload + def __new__( + subtype, + filename: StrOrBytesPath | _SupportsFileMethodsRW, + dtype: type[uint8] = ..., + mode: _MemMapModeKind = ..., + offset: int = ..., + shape: int | tuple[int, ...] | None = ..., + order: _OrderKACF = ..., + ) -> memmap[Any, dtype[uint8]]: ... + @overload + def __new__( + subtype, + filename: StrOrBytesPath | _SupportsFileMethodsRW, + dtype: _DTypeLike[_ScalarT], + mode: _MemMapModeKind = ..., + offset: int = ..., + shape: int | tuple[int, ...] | None = ..., + order: _OrderKACF = ..., + ) -> memmap[Any, dtype[_ScalarT]]: ... + @overload + def __new__( + subtype, + filename: StrOrBytesPath | _SupportsFileMethodsRW, + dtype: DTypeLike, + mode: _MemMapModeKind = ..., + offset: int = ..., + shape: int | tuple[int, ...] | None = ..., + order: _OrderKACF = ..., + ) -> memmap[Any, dtype]: ... + def __array_finalize__(self, obj: object) -> None: ... + def __array_wrap__( + self, + array: memmap[_ShapeT_co, _DTypeT_co], + context: tuple[ufunc, tuple[Any, ...], int] | None = ..., + return_scalar: builtins.bool = ..., + ) -> Any: ... + def flush(self) -> None: ... + +# TODO: Add a mypy plugin for managing functions whose output type is dependent +# on the literal value of some sort of signature (e.g. `einsum` and `vectorize`) +class vectorize: + pyfunc: Callable[..., Any] + cache: builtins.bool + signature: LiteralString | None + otypes: LiteralString | None + excluded: set[int | str] + __doc__: str | None + def __init__( + self, + /, + pyfunc: Callable[..., Any] | _NoValueType = ..., # = _NoValue + otypes: str | Iterable[DTypeLike] | None = None, + doc: str | None = None, + excluded: Iterable[int | str] | None = None, + cache: builtins.bool = False, + signature: str | None = None, + ) -> None: ... + def __call__(self, *args: Any, **kwargs: Any) -> Any: ... + +class poly1d: + @property + def variable(self) -> LiteralString: ... + @property + def order(self) -> int: ... + @property + def o(self) -> int: ... + @property + def roots(self) -> NDArray[Any]: ... + @property + def r(self) -> NDArray[Any]: ... + + @property + def coeffs(self) -> NDArray[Any]: ... + @coeffs.setter + def coeffs(self, value: NDArray[Any]) -> None: ... + + @property + def c(self) -> NDArray[Any]: ... + @c.setter + def c(self, value: NDArray[Any]) -> None: ... + + @property + def coef(self) -> NDArray[Any]: ... + @coef.setter + def coef(self, value: NDArray[Any]) -> None: ... + + @property + def coefficients(self) -> NDArray[Any]: ... + @coefficients.setter + def coefficients(self, value: NDArray[Any]) -> None: ... + + __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + + @overload + def __array__(self, /, t: None = None, copy: builtins.bool | None = None) -> ndarray[tuple[int], dtype]: ... + @overload + def __array__(self, /, t: _DTypeT, copy: builtins.bool | None = None) -> ndarray[tuple[int], _DTypeT]: ... + + @overload + def __call__(self, val: _ScalarLike_co) -> Any: ... + @overload + def __call__(self, val: poly1d) -> poly1d: ... + @overload + def __call__(self, val: ArrayLike) -> NDArray[Any]: ... + + def __init__( + self, + c_or_r: ArrayLike, + r: builtins.bool = ..., + variable: str | None = ..., + ) -> None: ... + def __len__(self) -> int: ... + def __neg__(self) -> poly1d: ... + def __pos__(self) -> poly1d: ... + def __mul__(self, other: ArrayLike, /) -> poly1d: ... + def __rmul__(self, other: ArrayLike, /) -> poly1d: ... + def __add__(self, other: ArrayLike, /) -> poly1d: ... + def __radd__(self, other: ArrayLike, /) -> poly1d: ... + def __pow__(self, val: _FloatLike_co, /) -> poly1d: ... # Integral floats are accepted + def __sub__(self, other: ArrayLike, /) -> poly1d: ... + def __rsub__(self, other: ArrayLike, /) -> poly1d: ... + def __truediv__(self, other: ArrayLike, /) -> poly1d: ... + def __rtruediv__(self, other: ArrayLike, /) -> poly1d: ... + def __getitem__(self, val: int, /) -> Any: ... + def __setitem__(self, key: int, val: Any, /) -> None: ... + def __iter__(self) -> Iterator[Any]: ... + def deriv(self, m: SupportsInt | SupportsIndex = ...) -> poly1d: ... + def integ( + self, + m: SupportsInt | SupportsIndex = ..., + k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = ..., + ) -> poly1d: ... + +class matrix(ndarray[_2DShapeT_co, _DTypeT_co]): + __array_priority__: ClassVar[float] = 10.0 # pyright: ignore[reportIncompatibleMethodOverride] + + def __new__( + subtype, # pyright: ignore[reportSelfClsParameterName] + data: ArrayLike, + dtype: DTypeLike = ..., + copy: builtins.bool = ..., + ) -> matrix[_2D, Incomplete]: ... + def __array_finalize__(self, obj: object) -> None: ... + + @overload # type: ignore[override] + def __getitem__( + self, key: SupportsIndex | _ArrayLikeInt_co | tuple[SupportsIndex | _ArrayLikeInt_co, ...], / + ) -> Incomplete: ... + @overload + def __getitem__(self, key: _ToIndices, /) -> matrix[_2D, _DTypeT_co]: ... + @overload + def __getitem__(self: matrix[Any, dtype[void]], key: str, /) -> matrix[_2D, dtype]: ... + @overload + def __getitem__(self: matrix[Any, dtype[void]], key: list[str], /) -> matrix[_2DShapeT_co, _DTypeT_co]: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # + def __mul__(self, other: ArrayLike, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __rmul__(self, other: ArrayLike, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __imul__(self, other: ArrayLike, /) -> Self: ... + + # + def __pow__(self, other: ArrayLike, mod: None = None, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __rpow__(self, other: ArrayLike, mod: None = None, /) -> matrix[_2D, Incomplete]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __ipow__(self, other: ArrayLike, /) -> Self: ... # type: ignore[misc, override] + + # keep in sync with `prod` and `mean` + @overload # type: ignore[override] + def sum(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... + @overload + def sum(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> matrix[_2D, Incomplete]: ... + @overload + def sum(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def sum(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `sum` and `mean` + @overload # type: ignore[override] + def prod(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... + @overload + def prod(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> matrix[_2D, Incomplete]: ... + @overload + def prod(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def prod(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `sum` and `prod` + @overload # type: ignore[override] + def mean(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None) -> Incomplete: ... + @overload + def mean(self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None) -> matrix[_2D, Incomplete]: ... + @overload + def mean(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def mean(self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `var` + @overload # type: ignore[override] + def std(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> Incomplete: ... + @overload + def std( + self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0 + ) -> matrix[_2D, Incomplete]: ... + @overload + def std(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0) -> _ArrayT: ... + @overload + def std( # pyright: ignore[reportIncompatibleMethodOverride] + self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0 + ) -> _ArrayT: ... + + # keep in sync with `std` + @overload # type: ignore[override] + def var(self, axis: None = None, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0) -> Incomplete: ... + @overload + def var( + self, axis: _ShapeLike, dtype: DTypeLike | None = None, out: None = None, ddof: float = 0 + ) -> matrix[_2D, Incomplete]: ... + @overload + def var(self, axis: _ShapeLike | None, dtype: DTypeLike | None, out: _ArrayT, ddof: float = 0) -> _ArrayT: ... + @overload + def var( # pyright: ignore[reportIncompatibleMethodOverride] + self, axis: _ShapeLike | None = None, dtype: DTypeLike | None = None, *, out: _ArrayT, ddof: float = 0 + ) -> _ArrayT: ... + + # keep in sync with `all` + @overload # type: ignore[override] + def any(self, axis: None = None, out: None = None) -> np.bool: ... + @overload + def any(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, dtype[np.bool]]: ... + @overload + def any(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def any(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `any` + @overload # type: ignore[override] + def all(self, axis: None = None, out: None = None) -> np.bool: ... + @overload + def all(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, dtype[np.bool]]: ... + @overload + def all(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def all(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `min` and `ptp` + @overload # type: ignore[override] + def max(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... + @overload + def max(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... + @overload + def max(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def max(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `max` and `ptp` + @overload # type: ignore[override] + def min(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... + @overload + def min(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... + @overload + def min(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def min(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `max` and `min` + @overload + def ptp(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> _ScalarT: ... + @overload + def ptp(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, _DTypeT_co]: ... + @overload + def ptp(self, axis: _ShapeLike | None, out: _ArrayT) -> _ArrayT: ... + @overload + def ptp(self, axis: _ShapeLike | None = None, *, out: _ArrayT) -> _ArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `argmin` + @overload # type: ignore[override] + def argmax(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> intp: ... + @overload + def argmax(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, dtype[intp]]: ... + @overload + def argmax(self, axis: _ShapeLike | None, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... + @overload + def argmax(self, axis: _ShapeLike | None = None, *, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # keep in sync with `argmax` + @overload # type: ignore[override] + def argmin(self: NDArray[_ScalarT], axis: None = None, out: None = None) -> intp: ... + @overload + def argmin(self, axis: _ShapeLike, out: None = None) -> matrix[_2D, dtype[intp]]: ... + @overload + def argmin(self, axis: _ShapeLike | None, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... + @overload + def argmin(self, axis: _ShapeLike | None = None, *, out: _BoolOrIntArrayT) -> _BoolOrIntArrayT: ... # pyright: ignore[reportIncompatibleMethodOverride] + + #the second overload handles the (rare) case that the matrix is not 2-d + @overload + def tolist(self: matrix[_2D, dtype[generic[_T]]]) -> list[list[_T]]: ... # pyright: ignore[reportIncompatibleMethodOverride] + @overload + def tolist(self) -> Incomplete: ... # pyright: ignore[reportIncompatibleMethodOverride] + + # these three methods will at least return a `2-d` array of shape (1, n) + def squeeze(self, axis: _ShapeLike | None = None) -> matrix[_2D, _DTypeT_co]: ... + def ravel(self, /, order: _OrderKACF = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def flatten(self, /, order: _OrderKACF = "C") -> matrix[_2D, _DTypeT_co]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + + # matrix.T is inherited from _ScalarOrArrayCommon + def getT(self) -> Self: ... + @property + def I(self) -> matrix[_2D, Incomplete]: ... # noqa: E743 + def getI(self) -> matrix[_2D, Incomplete]: ... + @property + def A(self) -> ndarray[_2DShapeT_co, _DTypeT_co]: ... + def getA(self) -> ndarray[_2DShapeT_co, _DTypeT_co]: ... + @property + def A1(self) -> ndarray[_AnyShape, _DTypeT_co]: ... + def getA1(self) -> ndarray[_AnyShape, _DTypeT_co]: ... + @property + def H(self) -> matrix[_2D, _DTypeT_co]: ... + def getH(self) -> matrix[_2D, _DTypeT_co]: ... + +def from_dlpack( + x: _SupportsDLPack[None], + /, + *, + device: L["cpu"] | None = None, + copy: builtins.bool | None = None, +) -> NDArray[number | np.bool]: ... diff --git a/python/numpy/__pycache__/__config__.cpython-312.pyc b/python/numpy/__pycache__/__config__.cpython-312.pyc new file mode 100644 index 000000000..9a14d20ce Binary files /dev/null and b/python/numpy/__pycache__/__config__.cpython-312.pyc differ diff --git a/python/numpy/__pycache__/__init__.cpython-312.pyc b/python/numpy/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..5b55a4318 Binary files /dev/null and b/python/numpy/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/__pycache__/_array_api_info.cpython-312.pyc b/python/numpy/__pycache__/_array_api_info.cpython-312.pyc new file mode 100644 index 000000000..36eeb8e4a Binary files /dev/null and b/python/numpy/__pycache__/_array_api_info.cpython-312.pyc differ diff --git a/python/numpy/__pycache__/_configtool.cpython-312.pyc b/python/numpy/__pycache__/_configtool.cpython-312.pyc new file mode 100644 index 000000000..be8669576 Binary files /dev/null and b/python/numpy/__pycache__/_configtool.cpython-312.pyc differ diff --git a/python/numpy/__pycache__/_distributor_init.cpython-312.pyc b/python/numpy/__pycache__/_distributor_init.cpython-312.pyc new file mode 100644 index 000000000..c815b6349 Binary files /dev/null and b/python/numpy/__pycache__/_distributor_init.cpython-312.pyc differ diff --git a/python/numpy/__pycache__/_expired_attrs_2_0.cpython-312.pyc b/python/numpy/__pycache__/_expired_attrs_2_0.cpython-312.pyc new file mode 100644 index 000000000..70889b0d7 Binary files /dev/null and b/python/numpy/__pycache__/_expired_attrs_2_0.cpython-312.pyc differ diff --git a/python/numpy/__pycache__/_globals.cpython-312.pyc b/python/numpy/__pycache__/_globals.cpython-312.pyc new file mode 100644 index 000000000..dbae131e8 Binary files /dev/null and b/python/numpy/__pycache__/_globals.cpython-312.pyc differ diff --git a/python/numpy/__pycache__/_pytesttester.cpython-312.pyc b/python/numpy/__pycache__/_pytesttester.cpython-312.pyc new file mode 100644 index 000000000..f6248756a Binary files /dev/null and b/python/numpy/__pycache__/_pytesttester.cpython-312.pyc differ diff --git a/python/numpy/__pycache__/conftest.cpython-312.pyc b/python/numpy/__pycache__/conftest.cpython-312.pyc new file mode 100644 index 000000000..822493856 Binary files /dev/null and b/python/numpy/__pycache__/conftest.cpython-312.pyc differ diff --git a/python/numpy/__pycache__/dtypes.cpython-312.pyc b/python/numpy/__pycache__/dtypes.cpython-312.pyc new file mode 100644 index 000000000..69dccaecd Binary files /dev/null and b/python/numpy/__pycache__/dtypes.cpython-312.pyc differ diff --git a/python/numpy/__pycache__/exceptions.cpython-312.pyc b/python/numpy/__pycache__/exceptions.cpython-312.pyc new file mode 100644 index 000000000..fb0bf6fec Binary files /dev/null and b/python/numpy/__pycache__/exceptions.cpython-312.pyc differ diff --git a/python/numpy/__pycache__/matlib.cpython-312.pyc b/python/numpy/__pycache__/matlib.cpython-312.pyc new file mode 100644 index 000000000..063da2cdf Binary files /dev/null and b/python/numpy/__pycache__/matlib.cpython-312.pyc differ diff --git a/python/numpy/__pycache__/version.cpython-312.pyc b/python/numpy/__pycache__/version.cpython-312.pyc new file mode 100644 index 000000000..5488626f1 Binary files /dev/null and b/python/numpy/__pycache__/version.cpython-312.pyc differ diff --git a/python/numpy/_array_api_info.py b/python/numpy/_array_api_info.py new file mode 100644 index 000000000..067e38798 --- /dev/null +++ b/python/numpy/_array_api_info.py @@ -0,0 +1,346 @@ +""" +Array API Inspection namespace + +This is the namespace for inspection functions as defined by the array API +standard. See +https://data-apis.org/array-api/latest/API_specification/inspection.html for +more details. + +""" +from numpy._core import ( + bool, + complex64, + complex128, + dtype, + float32, + float64, + int8, + int16, + int32, + int64, + intp, + uint8, + uint16, + uint32, + uint64, +) + + +class __array_namespace_info__: + """ + Get the array API inspection namespace for NumPy. + + The array API inspection namespace defines the following functions: + + - capabilities() + - default_device() + - default_dtypes() + - dtypes() + - devices() + + See + https://data-apis.org/array-api/latest/API_specification/inspection.html + for more details. + + Returns + ------- + info : ModuleType + The array API inspection namespace for NumPy. + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.default_dtypes() + {'real floating': numpy.float64, + 'complex floating': numpy.complex128, + 'integral': numpy.int64, + 'indexing': numpy.int64} + + """ + + __module__ = 'numpy' + + def capabilities(self): + """ + Return a dictionary of array API library capabilities. + + The resulting dictionary has the following keys: + + - **"boolean indexing"**: boolean indicating whether an array library + supports boolean indexing. Always ``True`` for NumPy. + + - **"data-dependent shapes"**: boolean indicating whether an array + library supports data-dependent output shapes. Always ``True`` for + NumPy. + + See + https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html + for more details. + + See Also + -------- + __array_namespace_info__.default_device, + __array_namespace_info__.default_dtypes, + __array_namespace_info__.dtypes, + __array_namespace_info__.devices + + Returns + ------- + capabilities : dict + A dictionary of array API library capabilities. + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.capabilities() + {'boolean indexing': True, + 'data-dependent shapes': True, + 'max dimensions': 64} + + """ + return { + "boolean indexing": True, + "data-dependent shapes": True, + "max dimensions": 64, + } + + def default_device(self): + """ + The default device used for new NumPy arrays. + + For NumPy, this always returns ``'cpu'``. + + See Also + -------- + __array_namespace_info__.capabilities, + __array_namespace_info__.default_dtypes, + __array_namespace_info__.dtypes, + __array_namespace_info__.devices + + Returns + ------- + device : str + The default device used for new NumPy arrays. + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.default_device() + 'cpu' + + """ + return "cpu" + + def default_dtypes(self, *, device=None): + """ + The default data types used for new NumPy arrays. + + For NumPy, this always returns the following dictionary: + + - **"real floating"**: ``numpy.float64`` + - **"complex floating"**: ``numpy.complex128`` + - **"integral"**: ``numpy.intp`` + - **"indexing"**: ``numpy.intp`` + + Parameters + ---------- + device : str, optional + The device to get the default data types for. For NumPy, only + ``'cpu'`` is allowed. + + Returns + ------- + dtypes : dict + A dictionary describing the default data types used for new NumPy + arrays. + + See Also + -------- + __array_namespace_info__.capabilities, + __array_namespace_info__.default_device, + __array_namespace_info__.dtypes, + __array_namespace_info__.devices + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.default_dtypes() + {'real floating': numpy.float64, + 'complex floating': numpy.complex128, + 'integral': numpy.int64, + 'indexing': numpy.int64} + + """ + if device not in ["cpu", None]: + raise ValueError( + 'Device not understood. Only "cpu" is allowed, but received:' + f' {device}' + ) + return { + "real floating": dtype(float64), + "complex floating": dtype(complex128), + "integral": dtype(intp), + "indexing": dtype(intp), + } + + def dtypes(self, *, device=None, kind=None): + """ + The array API data types supported by NumPy. + + Note that this function only returns data types that are defined by + the array API. + + Parameters + ---------- + device : str, optional + The device to get the data types for. For NumPy, only ``'cpu'`` is + allowed. + kind : str or tuple of str, optional + The kind of data types to return. If ``None``, all data types are + returned. If a string, only data types of that kind are returned. + If a tuple, a dictionary containing the union of the given kinds + is returned. The following kinds are supported: + + - ``'bool'``: boolean data types (i.e., ``bool``). + - ``'signed integer'``: signed integer data types (i.e., ``int8``, + ``int16``, ``int32``, ``int64``). + - ``'unsigned integer'``: unsigned integer data types (i.e., + ``uint8``, ``uint16``, ``uint32``, ``uint64``). + - ``'integral'``: integer data types. Shorthand for ``('signed + integer', 'unsigned integer')``. + - ``'real floating'``: real-valued floating-point data types + (i.e., ``float32``, ``float64``). + - ``'complex floating'``: complex floating-point data types (i.e., + ``complex64``, ``complex128``). + - ``'numeric'``: numeric data types. Shorthand for ``('integral', + 'real floating', 'complex floating')``. + + Returns + ------- + dtypes : dict + A dictionary mapping the names of data types to the corresponding + NumPy data types. + + See Also + -------- + __array_namespace_info__.capabilities, + __array_namespace_info__.default_device, + __array_namespace_info__.default_dtypes, + __array_namespace_info__.devices + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.dtypes(kind='signed integer') + {'int8': numpy.int8, + 'int16': numpy.int16, + 'int32': numpy.int32, + 'int64': numpy.int64} + + """ + if device not in ["cpu", None]: + raise ValueError( + 'Device not understood. Only "cpu" is allowed, but received:' + f' {device}' + ) + if kind is None: + return { + "bool": dtype(bool), + "int8": dtype(int8), + "int16": dtype(int16), + "int32": dtype(int32), + "int64": dtype(int64), + "uint8": dtype(uint8), + "uint16": dtype(uint16), + "uint32": dtype(uint32), + "uint64": dtype(uint64), + "float32": dtype(float32), + "float64": dtype(float64), + "complex64": dtype(complex64), + "complex128": dtype(complex128), + } + if kind == "bool": + return {"bool": bool} + if kind == "signed integer": + return { + "int8": dtype(int8), + "int16": dtype(int16), + "int32": dtype(int32), + "int64": dtype(int64), + } + if kind == "unsigned integer": + return { + "uint8": dtype(uint8), + "uint16": dtype(uint16), + "uint32": dtype(uint32), + "uint64": dtype(uint64), + } + if kind == "integral": + return { + "int8": dtype(int8), + "int16": dtype(int16), + "int32": dtype(int32), + "int64": dtype(int64), + "uint8": dtype(uint8), + "uint16": dtype(uint16), + "uint32": dtype(uint32), + "uint64": dtype(uint64), + } + if kind == "real floating": + return { + "float32": dtype(float32), + "float64": dtype(float64), + } + if kind == "complex floating": + return { + "complex64": dtype(complex64), + "complex128": dtype(complex128), + } + if kind == "numeric": + return { + "int8": dtype(int8), + "int16": dtype(int16), + "int32": dtype(int32), + "int64": dtype(int64), + "uint8": dtype(uint8), + "uint16": dtype(uint16), + "uint32": dtype(uint32), + "uint64": dtype(uint64), + "float32": dtype(float32), + "float64": dtype(float64), + "complex64": dtype(complex64), + "complex128": dtype(complex128), + } + if isinstance(kind, tuple): + res = {} + for k in kind: + res.update(self.dtypes(kind=k)) + return res + raise ValueError(f"unsupported kind: {kind!r}") + + def devices(self): + """ + The devices supported by NumPy. + + For NumPy, this always returns ``['cpu']``. + + Returns + ------- + devices : list of str + The devices supported by NumPy. + + See Also + -------- + __array_namespace_info__.capabilities, + __array_namespace_info__.default_device, + __array_namespace_info__.default_dtypes, + __array_namespace_info__.dtypes + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.devices() + ['cpu'] + + """ + return ["cpu"] diff --git a/python/numpy/_array_api_info.pyi b/python/numpy/_array_api_info.pyi new file mode 100644 index 000000000..ee9f8a566 --- /dev/null +++ b/python/numpy/_array_api_info.pyi @@ -0,0 +1,207 @@ +from typing import ( + ClassVar, + Literal, + Never, + TypeAlias, + TypedDict, + TypeVar, + final, + overload, + type_check_only, +) + +import numpy as np + +_Device: TypeAlias = Literal["cpu"] +_DeviceLike: TypeAlias = _Device | None + +_Capabilities = TypedDict( + "_Capabilities", + { + "boolean indexing": Literal[True], + "data-dependent shapes": Literal[True], + }, +) + +_DefaultDTypes = TypedDict( + "_DefaultDTypes", + { + "real floating": np.dtype[np.float64], + "complex floating": np.dtype[np.complex128], + "integral": np.dtype[np.intp], + "indexing": np.dtype[np.intp], + }, +) + +_KindBool: TypeAlias = Literal["bool"] +_KindInt: TypeAlias = Literal["signed integer"] +_KindUInt: TypeAlias = Literal["unsigned integer"] +_KindInteger: TypeAlias = Literal["integral"] +_KindFloat: TypeAlias = Literal["real floating"] +_KindComplex: TypeAlias = Literal["complex floating"] +_KindNumber: TypeAlias = Literal["numeric"] +_Kind: TypeAlias = ( + _KindBool + | _KindInt + | _KindUInt + | _KindInteger + | _KindFloat + | _KindComplex + | _KindNumber +) + +_T1 = TypeVar("_T1") +_T2 = TypeVar("_T2") +_T3 = TypeVar("_T3") +_Permute1: TypeAlias = _T1 | tuple[_T1] +_Permute2: TypeAlias = tuple[_T1, _T2] | tuple[_T2, _T1] +_Permute3: TypeAlias = ( + tuple[_T1, _T2, _T3] | tuple[_T1, _T3, _T2] + | tuple[_T2, _T1, _T3] | tuple[_T2, _T3, _T1] + | tuple[_T3, _T1, _T2] | tuple[_T3, _T2, _T1] +) + +@type_check_only +class _DTypesBool(TypedDict): + bool: np.dtype[np.bool] + +@type_check_only +class _DTypesInt(TypedDict): + int8: np.dtype[np.int8] + int16: np.dtype[np.int16] + int32: np.dtype[np.int32] + int64: np.dtype[np.int64] + +@type_check_only +class _DTypesUInt(TypedDict): + uint8: np.dtype[np.uint8] + uint16: np.dtype[np.uint16] + uint32: np.dtype[np.uint32] + uint64: np.dtype[np.uint64] + +@type_check_only +class _DTypesInteger(_DTypesInt, _DTypesUInt): ... + +@type_check_only +class _DTypesFloat(TypedDict): + float32: np.dtype[np.float32] + float64: np.dtype[np.float64] + +@type_check_only +class _DTypesComplex(TypedDict): + complex64: np.dtype[np.complex64] + complex128: np.dtype[np.complex128] + +@type_check_only +class _DTypesNumber(_DTypesInteger, _DTypesFloat, _DTypesComplex): ... + +@type_check_only +class _DTypes(_DTypesBool, _DTypesNumber): ... + +@type_check_only +class _DTypesUnion(TypedDict, total=False): + bool: np.dtype[np.bool] + int8: np.dtype[np.int8] + int16: np.dtype[np.int16] + int32: np.dtype[np.int32] + int64: np.dtype[np.int64] + uint8: np.dtype[np.uint8] + uint16: np.dtype[np.uint16] + uint32: np.dtype[np.uint32] + uint64: np.dtype[np.uint64] + float32: np.dtype[np.float32] + float64: np.dtype[np.float64] + complex64: np.dtype[np.complex64] + complex128: np.dtype[np.complex128] + +_EmptyDict: TypeAlias = dict[Never, Never] + +@final +class __array_namespace_info__: + __module__: ClassVar[Literal['numpy']] + + def capabilities(self) -> _Capabilities: ... + def default_device(self) -> _Device: ... + def default_dtypes( + self, + *, + device: _DeviceLike = ..., + ) -> _DefaultDTypes: ... + def devices(self) -> list[_Device]: ... + + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: None = ..., + ) -> _DTypes: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: _Permute1[_KindBool], + ) -> _DTypesBool: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: _Permute1[_KindInt], + ) -> _DTypesInt: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: _Permute1[_KindUInt], + ) -> _DTypesUInt: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: _Permute1[_KindFloat], + ) -> _DTypesFloat: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: _Permute1[_KindComplex], + ) -> _DTypesComplex: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: ( + _Permute1[_KindInteger] + | _Permute2[_KindInt, _KindUInt] + ), + ) -> _DTypesInteger: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: ( + _Permute1[_KindNumber] + | _Permute3[_KindInteger, _KindFloat, _KindComplex] + ), + ) -> _DTypesNumber: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: tuple[()], + ) -> _EmptyDict: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: tuple[_Kind, ...], + ) -> _DTypesUnion: ... diff --git a/python/numpy/_configtool.py b/python/numpy/_configtool.py new file mode 100644 index 000000000..db7831c33 --- /dev/null +++ b/python/numpy/_configtool.py @@ -0,0 +1,39 @@ +import argparse +import sys +from pathlib import Path + +from .lib._utils_impl import get_include +from .version import __version__ + + +def main() -> None: + parser = argparse.ArgumentParser() + parser.add_argument( + "--version", + action="version", + version=__version__, + help="Print the version and exit.", + ) + parser.add_argument( + "--cflags", + action="store_true", + help="Compile flag needed when using the NumPy headers.", + ) + parser.add_argument( + "--pkgconfigdir", + action="store_true", + help=("Print the pkgconfig directory in which `numpy.pc` is stored " + "(useful for setting $PKG_CONFIG_PATH)."), + ) + args = parser.parse_args() + if not sys.argv[1:]: + parser.print_help() + if args.cflags: + print("-I" + get_include()) + if args.pkgconfigdir: + _path = Path(get_include()) / '..' / 'lib' / 'pkgconfig' + print(_path.resolve()) + + +if __name__ == "__main__": + main() diff --git a/python/numpy/_configtool.pyi b/python/numpy/_configtool.pyi new file mode 100644 index 000000000..7e7363e79 --- /dev/null +++ b/python/numpy/_configtool.pyi @@ -0,0 +1 @@ +def main() -> None: ... diff --git a/python/numpy/_core/__init__.py b/python/numpy/_core/__init__.py new file mode 100644 index 000000000..d0da7e0ad --- /dev/null +++ b/python/numpy/_core/__init__.py @@ -0,0 +1,186 @@ +""" +Contains the core of NumPy: ndarray, ufuncs, dtypes, etc. + +Please note that this module is private. All functions and objects +are available in the main ``numpy`` namespace - use that instead. + +""" + +import os + +from numpy.version import version as __version__ + +# disables OpenBLAS affinity setting of the main thread that limits +# python threads or processes to one core +env_added = [] +for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']: + if envkey not in os.environ: + os.environ[envkey] = '1' + env_added.append(envkey) + +try: + from . import multiarray +except ImportError as exc: + import sys + msg = """ + +IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE! + +Importing the numpy C-extensions failed. This error can happen for +many reasons, often due to issues with your setup or how NumPy was +installed. + +We have compiled some common reasons and troubleshooting tips at: + + https://numpy.org/devdocs/user/troubleshooting-importerror.html + +Please note and check the following: + + * The Python version is: Python%d.%d from "%s" + * The NumPy version is: "%s" + +and make sure that they are the versions you expect. +Please carefully study the documentation linked above for further help. + +Original error was: %s +""" % (sys.version_info[0], sys.version_info[1], sys.executable, + __version__, exc) + raise ImportError(msg) from exc +finally: + for envkey in env_added: + del os.environ[envkey] +del envkey +del env_added +del os + +from . import umath + +# Check that multiarray,umath are pure python modules wrapping +# _multiarray_umath and not either of the old c-extension modules +if not (hasattr(multiarray, '_multiarray_umath') and + hasattr(umath, '_multiarray_umath')): + import sys + path = sys.modules['numpy'].__path__ + msg = ("Something is wrong with the numpy installation. " + "While importing we detected an older version of " + "numpy in {}. One method of fixing this is to repeatedly uninstall " + "numpy until none is found, then reinstall this version.") + raise ImportError(msg.format(path)) + +from . import numerictypes as nt +from .numerictypes import sctypeDict, sctypes + +multiarray.set_typeDict(nt.sctypeDict) +from . import ( + _machar, + einsumfunc, + fromnumeric, + function_base, + getlimits, + numeric, + shape_base, +) +from .einsumfunc import * +from .fromnumeric import * +from .function_base import * +from .getlimits import * + +# Note: module name memmap is overwritten by a class with same name +from .memmap import * +from .numeric import * +from .records import recarray, record +from .shape_base import * + +del nt + +# do this after everything else, to minimize the chance of this misleadingly +# appearing in an import-time traceback +# add these for module-freeze analysis (like PyInstaller) +from . import ( + _add_newdocs, + _add_newdocs_scalars, + _dtype, + _dtype_ctypes, + _internal, + _methods, +) +from .numeric import absolute as abs + +acos = numeric.arccos +acosh = numeric.arccosh +asin = numeric.arcsin +asinh = numeric.arcsinh +atan = numeric.arctan +atanh = numeric.arctanh +atan2 = numeric.arctan2 +concat = numeric.concatenate +bitwise_left_shift = numeric.left_shift +bitwise_invert = numeric.invert +bitwise_right_shift = numeric.right_shift +permute_dims = numeric.transpose +pow = numeric.power + +__all__ = [ + "abs", "acos", "acosh", "asin", "asinh", "atan", "atanh", "atan2", + "bitwise_invert", "bitwise_left_shift", "bitwise_right_shift", "concat", + "pow", "permute_dims", "memmap", "sctypeDict", "record", "recarray" +] +__all__ += numeric.__all__ +__all__ += function_base.__all__ +__all__ += getlimits.__all__ +__all__ += shape_base.__all__ +__all__ += einsumfunc.__all__ + + +def _ufunc_reduce(func): + # Report the `__name__`. pickle will try to find the module. Note that + # pickle supports for this `__name__` to be a `__qualname__`. It may + # make sense to add a `__qualname__` to ufuncs, to allow this more + # explicitly (Numba has ufuncs as attributes). + # See also: https://github.com/dask/distributed/issues/3450 + return func.__name__ + + +def _DType_reconstruct(scalar_type): + # This is a work-around to pickle type(np.dtype(np.float64)), etc. + # and it should eventually be replaced with a better solution, e.g. when + # DTypes become HeapTypes. + return type(dtype(scalar_type)) + + +def _DType_reduce(DType): + # As types/classes, most DTypes can simply be pickled by their name: + if not DType._legacy or DType.__module__ == "numpy.dtypes": + return DType.__name__ + + # However, user defined legacy dtypes (like rational) do not end up in + # `numpy.dtypes` as module and do not have a public class at all. + # For these, we pickle them by reconstructing them from the scalar type: + scalar_type = DType.type + return _DType_reconstruct, (scalar_type,) + + +def __getattr__(name): + # Deprecated 2022-11-22, NumPy 1.25. + if name == "MachAr": + import warnings + warnings.warn( + "The `np._core.MachAr` is considered private API (NumPy 1.24)", + DeprecationWarning, stacklevel=2, + ) + return _machar.MachAr + raise AttributeError(f"Module {__name__!r} has no attribute {name!r}") + + +import copyreg + +copyreg.pickle(ufunc, _ufunc_reduce) +copyreg.pickle(type(dtype), _DType_reduce, _DType_reconstruct) + +# Unclutter namespace (must keep _*_reconstruct for unpickling) +del copyreg, _ufunc_reduce, _DType_reduce + +from numpy._pytesttester import PytestTester + +test = PytestTester(__name__) +del PytestTester diff --git a/python/numpy/_core/__init__.pyi b/python/numpy/_core/__init__.pyi new file mode 100644 index 000000000..40d9c411b --- /dev/null +++ b/python/numpy/_core/__init__.pyi @@ -0,0 +1,2 @@ +# NOTE: The `np._core` namespace is deliberately kept empty due to it +# being private diff --git a/python/numpy/_core/__pycache__/__init__.cpython-312.pyc b/python/numpy/_core/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..eeb2989a8 Binary files /dev/null and b/python/numpy/_core/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/_add_newdocs.cpython-312.pyc b/python/numpy/_core/__pycache__/_add_newdocs.cpython-312.pyc new file mode 100644 index 000000000..02da488b7 Binary files /dev/null and b/python/numpy/_core/__pycache__/_add_newdocs.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/_add_newdocs_scalars.cpython-312.pyc b/python/numpy/_core/__pycache__/_add_newdocs_scalars.cpython-312.pyc new file mode 100644 index 000000000..c7c673832 Binary files /dev/null and b/python/numpy/_core/__pycache__/_add_newdocs_scalars.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/_asarray.cpython-312.pyc b/python/numpy/_core/__pycache__/_asarray.cpython-312.pyc new file mode 100644 index 000000000..8093c1feb Binary files /dev/null and b/python/numpy/_core/__pycache__/_asarray.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/_dtype.cpython-312.pyc b/python/numpy/_core/__pycache__/_dtype.cpython-312.pyc new file mode 100644 index 000000000..5c58c70a7 Binary files /dev/null and b/python/numpy/_core/__pycache__/_dtype.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/_dtype_ctypes.cpython-312.pyc b/python/numpy/_core/__pycache__/_dtype_ctypes.cpython-312.pyc new file mode 100644 index 000000000..eb71a6485 Binary files /dev/null and b/python/numpy/_core/__pycache__/_dtype_ctypes.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/_exceptions.cpython-312.pyc b/python/numpy/_core/__pycache__/_exceptions.cpython-312.pyc new file mode 100644 index 000000000..27f87bd91 Binary files /dev/null and b/python/numpy/_core/__pycache__/_exceptions.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/_internal.cpython-312.pyc b/python/numpy/_core/__pycache__/_internal.cpython-312.pyc new file mode 100644 index 000000000..bf01c9e3c Binary files /dev/null and b/python/numpy/_core/__pycache__/_internal.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/_machar.cpython-312.pyc b/python/numpy/_core/__pycache__/_machar.cpython-312.pyc new file mode 100644 index 000000000..a9b638e8f Binary files /dev/null and b/python/numpy/_core/__pycache__/_machar.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/_methods.cpython-312.pyc b/python/numpy/_core/__pycache__/_methods.cpython-312.pyc new file mode 100644 index 000000000..5e4082003 Binary files /dev/null and b/python/numpy/_core/__pycache__/_methods.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/_string_helpers.cpython-312.pyc b/python/numpy/_core/__pycache__/_string_helpers.cpython-312.pyc new file mode 100644 index 000000000..f463a965a Binary files /dev/null and b/python/numpy/_core/__pycache__/_string_helpers.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/_type_aliases.cpython-312.pyc b/python/numpy/_core/__pycache__/_type_aliases.cpython-312.pyc new file mode 100644 index 000000000..a1253b4b7 Binary files /dev/null and b/python/numpy/_core/__pycache__/_type_aliases.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/_ufunc_config.cpython-312.pyc b/python/numpy/_core/__pycache__/_ufunc_config.cpython-312.pyc new file mode 100644 index 000000000..39b7d6b73 Binary files /dev/null and b/python/numpy/_core/__pycache__/_ufunc_config.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/arrayprint.cpython-312.pyc b/python/numpy/_core/__pycache__/arrayprint.cpython-312.pyc new file mode 100644 index 000000000..d14a09b8c Binary files /dev/null and b/python/numpy/_core/__pycache__/arrayprint.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/cversions.cpython-312.pyc b/python/numpy/_core/__pycache__/cversions.cpython-312.pyc new file mode 100644 index 000000000..11d217977 Binary files /dev/null and b/python/numpy/_core/__pycache__/cversions.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/defchararray.cpython-312.pyc b/python/numpy/_core/__pycache__/defchararray.cpython-312.pyc new file mode 100644 index 000000000..d33710fbd Binary files /dev/null and b/python/numpy/_core/__pycache__/defchararray.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/einsumfunc.cpython-312.pyc b/python/numpy/_core/__pycache__/einsumfunc.cpython-312.pyc new file mode 100644 index 000000000..a29d2b9f1 Binary files /dev/null and b/python/numpy/_core/__pycache__/einsumfunc.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/fromnumeric.cpython-312.pyc b/python/numpy/_core/__pycache__/fromnumeric.cpython-312.pyc new file mode 100644 index 000000000..30b1c05c2 Binary files /dev/null and b/python/numpy/_core/__pycache__/fromnumeric.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/function_base.cpython-312.pyc b/python/numpy/_core/__pycache__/function_base.cpython-312.pyc new file mode 100644 index 000000000..db2030c7e Binary files /dev/null and b/python/numpy/_core/__pycache__/function_base.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/getlimits.cpython-312.pyc b/python/numpy/_core/__pycache__/getlimits.cpython-312.pyc new file mode 100644 index 000000000..5b19a95da Binary files /dev/null and b/python/numpy/_core/__pycache__/getlimits.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/memmap.cpython-312.pyc b/python/numpy/_core/__pycache__/memmap.cpython-312.pyc new file mode 100644 index 000000000..d68e43ca5 Binary files /dev/null and b/python/numpy/_core/__pycache__/memmap.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/multiarray.cpython-312.pyc b/python/numpy/_core/__pycache__/multiarray.cpython-312.pyc new file mode 100644 index 000000000..77b3354dc Binary files /dev/null and b/python/numpy/_core/__pycache__/multiarray.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/numeric.cpython-312.pyc b/python/numpy/_core/__pycache__/numeric.cpython-312.pyc new file mode 100644 index 000000000..061cb3fca Binary files /dev/null and b/python/numpy/_core/__pycache__/numeric.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/numerictypes.cpython-312.pyc b/python/numpy/_core/__pycache__/numerictypes.cpython-312.pyc new file mode 100644 index 000000000..54ffa4f6a Binary files /dev/null and b/python/numpy/_core/__pycache__/numerictypes.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/overrides.cpython-312.pyc b/python/numpy/_core/__pycache__/overrides.cpython-312.pyc new file mode 100644 index 000000000..2931870d1 Binary files /dev/null and b/python/numpy/_core/__pycache__/overrides.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/printoptions.cpython-312.pyc b/python/numpy/_core/__pycache__/printoptions.cpython-312.pyc new file mode 100644 index 000000000..c9e02d9f2 Binary files /dev/null and b/python/numpy/_core/__pycache__/printoptions.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/records.cpython-312.pyc b/python/numpy/_core/__pycache__/records.cpython-312.pyc new file mode 100644 index 000000000..bb772f59a Binary files /dev/null and b/python/numpy/_core/__pycache__/records.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/shape_base.cpython-312.pyc b/python/numpy/_core/__pycache__/shape_base.cpython-312.pyc new file mode 100644 index 000000000..b62d94f59 Binary files /dev/null and b/python/numpy/_core/__pycache__/shape_base.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/strings.cpython-312.pyc b/python/numpy/_core/__pycache__/strings.cpython-312.pyc new file mode 100644 index 000000000..f7e644f14 Binary files /dev/null and b/python/numpy/_core/__pycache__/strings.cpython-312.pyc differ diff --git a/python/numpy/_core/__pycache__/umath.cpython-312.pyc b/python/numpy/_core/__pycache__/umath.cpython-312.pyc new file mode 100644 index 000000000..fb774ec9a Binary files /dev/null and b/python/numpy/_core/__pycache__/umath.cpython-312.pyc differ diff --git a/python/numpy/_core/_add_newdocs.py b/python/numpy/_core/_add_newdocs.py new file mode 100644 index 000000000..8f5de4b7b --- /dev/null +++ b/python/numpy/_core/_add_newdocs.py @@ -0,0 +1,6967 @@ +""" +This is only meant to add docs to objects defined in C-extension modules. +The purpose is to allow easier editing of the docstrings without +requiring a re-compile. + +NOTE: Many of the methods of ndarray have corresponding functions. + If you update these docstrings, please keep also the ones in + _core/fromnumeric.py, matrixlib/defmatrix.py up-to-date. + +""" + +from numpy._core.function_base import add_newdoc +from numpy._core.overrides import get_array_function_like_doc # noqa: F401 + +############################################################################### +# +# flatiter +# +# flatiter needs a toplevel description +# +############################################################################### + +add_newdoc('numpy._core', 'flatiter', + """ + Flat iterator object to iterate over arrays. + + A `flatiter` iterator is returned by ``x.flat`` for any array `x`. + It allows iterating over the array as if it were a 1-D array, + either in a for-loop or by calling its `next` method. + + Iteration is done in row-major, C-style order (the last + index varying the fastest). The iterator can also be indexed using + basic slicing or advanced indexing. + + See Also + -------- + ndarray.flat : Return a flat iterator over an array. + ndarray.flatten : Returns a flattened copy of an array. + + Notes + ----- + A `flatiter` iterator can not be constructed directly from Python code + by calling the `flatiter` constructor. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6).reshape(2, 3) + >>> fl = x.flat + >>> type(fl) + + >>> for item in fl: + ... print(item) + ... + 0 + 1 + 2 + 3 + 4 + 5 + + >>> fl[2:4] + array([2, 3]) + + """) + +# flatiter attributes + +add_newdoc('numpy._core', 'flatiter', ('base', + """ + A reference to the array that is iterated over. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(5) + >>> fl = x.flat + >>> fl.base is x + True + + """)) + + +add_newdoc('numpy._core', 'flatiter', ('coords', + """ + An N-dimensional tuple of current coordinates. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6).reshape(2, 3) + >>> fl = x.flat + >>> fl.coords + (0, 0) + >>> next(fl) + 0 + >>> fl.coords + (0, 1) + + """)) + + +add_newdoc('numpy._core', 'flatiter', ('index', + """ + Current flat index into the array. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6).reshape(2, 3) + >>> fl = x.flat + >>> fl.index + 0 + >>> next(fl) + 0 + >>> fl.index + 1 + + """)) + +# flatiter functions + +add_newdoc('numpy._core', 'flatiter', ('__array__', + """__array__(type=None) Get array from iterator + + """)) + + +add_newdoc('numpy._core', 'flatiter', ('copy', + """ + copy() + + Get a copy of the iterator as a 1-D array. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6).reshape(2, 3) + >>> x + array([[0, 1, 2], + [3, 4, 5]]) + >>> fl = x.flat + >>> fl.copy() + array([0, 1, 2, 3, 4, 5]) + + """)) + + +############################################################################### +# +# nditer +# +############################################################################### + +add_newdoc('numpy._core', 'nditer', + """ + nditer(op, flags=None, op_flags=None, op_dtypes=None, order='K', + casting='safe', op_axes=None, itershape=None, buffersize=0) + + Efficient multi-dimensional iterator object to iterate over arrays. + To get started using this object, see the + :ref:`introductory guide to array iteration `. + + Parameters + ---------- + op : ndarray or sequence of array_like + The array(s) to iterate over. + + flags : sequence of str, optional + Flags to control the behavior of the iterator. + + * ``buffered`` enables buffering when required. + * ``c_index`` causes a C-order index to be tracked. + * ``f_index`` causes a Fortran-order index to be tracked. + * ``multi_index`` causes a multi-index, or a tuple of indices + with one per iteration dimension, to be tracked. + * ``common_dtype`` causes all the operands to be converted to + a common data type, with copying or buffering as necessary. + * ``copy_if_overlap`` causes the iterator to determine if read + operands have overlap with write operands, and make temporary + copies as necessary to avoid overlap. False positives (needless + copying) are possible in some cases. + * ``delay_bufalloc`` delays allocation of the buffers until + a reset() call is made. Allows ``allocate`` operands to + be initialized before their values are copied into the buffers. + * ``external_loop`` causes the ``values`` given to be + one-dimensional arrays with multiple values instead of + zero-dimensional arrays. + * ``grow_inner`` allows the ``value`` array sizes to be made + larger than the buffer size when both ``buffered`` and + ``external_loop`` is used. + * ``ranged`` allows the iterator to be restricted to a sub-range + of the iterindex values. + * ``refs_ok`` enables iteration of reference types, such as + object arrays. + * ``reduce_ok`` enables iteration of ``readwrite`` operands + which are broadcasted, also known as reduction operands. + * ``zerosize_ok`` allows `itersize` to be zero. + op_flags : list of list of str, optional + This is a list of flags for each operand. At minimum, one of + ``readonly``, ``readwrite``, or ``writeonly`` must be specified. + + * ``readonly`` indicates the operand will only be read from. + * ``readwrite`` indicates the operand will be read from and written to. + * ``writeonly`` indicates the operand will only be written to. + * ``no_broadcast`` prevents the operand from being broadcasted. + * ``contig`` forces the operand data to be contiguous. + * ``aligned`` forces the operand data to be aligned. + * ``nbo`` forces the operand data to be in native byte order. + * ``copy`` allows a temporary read-only copy if required. + * ``updateifcopy`` allows a temporary read-write copy if required. + * ``allocate`` causes the array to be allocated if it is None + in the ``op`` parameter. + * ``no_subtype`` prevents an ``allocate`` operand from using a subtype. + * ``arraymask`` indicates that this operand is the mask to use + for selecting elements when writing to operands with the + 'writemasked' flag set. The iterator does not enforce this, + but when writing from a buffer back to the array, it only + copies those elements indicated by this mask. + * ``writemasked`` indicates that only elements where the chosen + ``arraymask`` operand is True will be written to. + * ``overlap_assume_elementwise`` can be used to mark operands that are + accessed only in the iterator order, to allow less conservative + copying when ``copy_if_overlap`` is present. + op_dtypes : dtype or tuple of dtype(s), optional + The required data type(s) of the operands. If copying or buffering + is enabled, the data will be converted to/from their original types. + order : {'C', 'F', 'A', 'K'}, optional + Controls the iteration order. 'C' means C order, 'F' means + Fortran order, 'A' means 'F' order if all the arrays are Fortran + contiguous, 'C' order otherwise, and 'K' means as close to the + order the array elements appear in memory as possible. This also + affects the element memory order of ``allocate`` operands, as they + are allocated to be compatible with iteration order. + Default is 'K'. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur when making a copy + or buffering. Setting this to 'unsafe' is not recommended, + as it can adversely affect accumulations. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + op_axes : list of list of ints, optional + If provided, is a list of ints or None for each operands. + The list of axes for an operand is a mapping from the dimensions + of the iterator to the dimensions of the operand. A value of + -1 can be placed for entries, causing that dimension to be + treated as `newaxis`. + itershape : tuple of ints, optional + The desired shape of the iterator. This allows ``allocate`` operands + with a dimension mapped by op_axes not corresponding to a dimension + of a different operand to get a value not equal to 1 for that + dimension. + buffersize : int, optional + When buffering is enabled, controls the size of the temporary + buffers. Set to 0 for the default value. + + Attributes + ---------- + dtypes : tuple of dtype(s) + The data types of the values provided in `value`. This may be + different from the operand data types if buffering is enabled. + Valid only before the iterator is closed. + finished : bool + Whether the iteration over the operands is finished or not. + has_delayed_bufalloc : bool + If True, the iterator was created with the ``delay_bufalloc`` flag, + and no reset() function was called on it yet. + has_index : bool + If True, the iterator was created with either the ``c_index`` or + the ``f_index`` flag, and the property `index` can be used to + retrieve it. + has_multi_index : bool + If True, the iterator was created with the ``multi_index`` flag, + and the property `multi_index` can be used to retrieve it. + index + When the ``c_index`` or ``f_index`` flag was used, this property + provides access to the index. Raises a ValueError if accessed + and ``has_index`` is False. + iterationneedsapi : bool + Whether iteration requires access to the Python API, for example + if one of the operands is an object array. + iterindex : int + An index which matches the order of iteration. + itersize : int + Size of the iterator. + itviews + Structured view(s) of `operands` in memory, matching the reordered + and optimized iterator access pattern. Valid only before the iterator + is closed. + multi_index + When the ``multi_index`` flag was used, this property + provides access to the index. Raises a ValueError if accessed + accessed and ``has_multi_index`` is False. + ndim : int + The dimensions of the iterator. + nop : int + The number of iterator operands. + operands : tuple of operand(s) + The array(s) to be iterated over. Valid only before the iterator is + closed. + shape : tuple of ints + Shape tuple, the shape of the iterator. + value + Value of ``operands`` at current iteration. Normally, this is a + tuple of array scalars, but if the flag ``external_loop`` is used, + it is a tuple of one dimensional arrays. + + Notes + ----- + `nditer` supersedes `flatiter`. The iterator implementation behind + `nditer` is also exposed by the NumPy C API. + + The Python exposure supplies two iteration interfaces, one which follows + the Python iterator protocol, and another which mirrors the C-style + do-while pattern. The native Python approach is better in most cases, but + if you need the coordinates or index of an iterator, use the C-style pattern. + + Examples + -------- + Here is how we might write an ``iter_add`` function, using the + Python iterator protocol: + + >>> import numpy as np + + >>> def iter_add_py(x, y, out=None): + ... addop = np.add + ... it = np.nditer([x, y, out], [], + ... [['readonly'], ['readonly'], ['writeonly','allocate']]) + ... with it: + ... for (a, b, c) in it: + ... addop(a, b, out=c) + ... return it.operands[2] + + Here is the same function, but following the C-style pattern: + + >>> def iter_add(x, y, out=None): + ... addop = np.add + ... it = np.nditer([x, y, out], [], + ... [['readonly'], ['readonly'], ['writeonly','allocate']]) + ... with it: + ... while not it.finished: + ... addop(it[0], it[1], out=it[2]) + ... it.iternext() + ... return it.operands[2] + + Here is an example outer product function: + + >>> def outer_it(x, y, out=None): + ... mulop = np.multiply + ... it = np.nditer([x, y, out], ['external_loop'], + ... [['readonly'], ['readonly'], ['writeonly', 'allocate']], + ... op_axes=[list(range(x.ndim)) + [-1] * y.ndim, + ... [-1] * x.ndim + list(range(y.ndim)), + ... None]) + ... with it: + ... for (a, b, c) in it: + ... mulop(a, b, out=c) + ... return it.operands[2] + + >>> a = np.arange(2)+1 + >>> b = np.arange(3)+1 + >>> outer_it(a,b) + array([[1, 2, 3], + [2, 4, 6]]) + + Here is an example function which operates like a "lambda" ufunc: + + >>> def luf(lamdaexpr, *args, **kwargs): + ... '''luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)''' + ... nargs = len(args) + ... op = (kwargs.get('out',None),) + args + ... it = np.nditer(op, ['buffered','external_loop'], + ... [['writeonly','allocate','no_broadcast']] + + ... [['readonly','nbo','aligned']]*nargs, + ... order=kwargs.get('order','K'), + ... casting=kwargs.get('casting','safe'), + ... buffersize=kwargs.get('buffersize',0)) + ... while not it.finished: + ... it[0] = lamdaexpr(*it[1:]) + ... it.iternext() + ... return it.operands[0] + + >>> a = np.arange(5) + >>> b = np.ones(5) + >>> luf(lambda i,j:i*i + j/2, a, b) + array([ 0.5, 1.5, 4.5, 9.5, 16.5]) + + If operand flags ``"writeonly"`` or ``"readwrite"`` are used the + operands may be views into the original data with the + `WRITEBACKIFCOPY` flag. In this case `nditer` must be used as a + context manager or the `nditer.close` method must be called before + using the result. The temporary data will be written back to the + original data when the :meth:`~object.__exit__` function is called + but not before: + + >>> a = np.arange(6, dtype='i4')[::-2] + >>> with np.nditer(a, [], + ... [['writeonly', 'updateifcopy']], + ... casting='unsafe', + ... op_dtypes=[np.dtype('f4')]) as i: + ... x = i.operands[0] + ... x[:] = [-1, -2, -3] + ... # a still unchanged here + >>> a, x + (array([-1, -2, -3], dtype=int32), array([-1., -2., -3.], dtype=float32)) + + It is important to note that once the iterator is exited, dangling + references (like `x` in the example) may or may not share data with + the original data `a`. If writeback semantics were active, i.e. if + `x.base.flags.writebackifcopy` is `True`, then exiting the iterator + will sever the connection between `x` and `a`, writing to `x` will + no longer write to `a`. If writeback semantics are not active, then + `x.data` will still point at some part of `a.data`, and writing to + one will affect the other. + + Context management and the `close` method appeared in version 1.15.0. + + """) + +# nditer methods + +add_newdoc('numpy._core', 'nditer', ('copy', + """ + copy() + + Get a copy of the iterator in its current state. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(10) + >>> y = x + 1 + >>> it = np.nditer([x, y]) + >>> next(it) + (array(0), array(1)) + >>> it2 = it.copy() + >>> next(it2) + (array(1), array(2)) + + """)) + +add_newdoc('numpy._core', 'nditer', ('operands', + """ + operands[`Slice`] + + The array(s) to be iterated over. Valid only before the iterator is closed. + """)) + +add_newdoc('numpy._core', 'nditer', ('debug_print', + """ + debug_print() + + Print the current state of the `nditer` instance and debug info to stdout. + + """)) + +add_newdoc('numpy._core', 'nditer', ('enable_external_loop', + """ + enable_external_loop() + + When the "external_loop" was not used during construction, but + is desired, this modifies the iterator to behave as if the flag + was specified. + + """)) + +add_newdoc('numpy._core', 'nditer', ('iternext', + """ + iternext() + + Check whether iterations are left, and perform a single internal iteration + without returning the result. Used in the C-style pattern do-while + pattern. For an example, see `nditer`. + + Returns + ------- + iternext : bool + Whether or not there are iterations left. + + """)) + +add_newdoc('numpy._core', 'nditer', ('remove_axis', + """ + remove_axis(i, /) + + Removes axis `i` from the iterator. Requires that the flag "multi_index" + be enabled. + + """)) + +add_newdoc('numpy._core', 'nditer', ('remove_multi_index', + """ + remove_multi_index() + + When the "multi_index" flag was specified, this removes it, allowing + the internal iteration structure to be optimized further. + + """)) + +add_newdoc('numpy._core', 'nditer', ('reset', + """ + reset() + + Reset the iterator to its initial state. + + """)) + +add_newdoc('numpy._core', 'nested_iters', + """ + nested_iters(op, axes, flags=None, op_flags=None, op_dtypes=None, \ + order="K", casting="safe", buffersize=0) + + Create nditers for use in nested loops + + Create a tuple of `nditer` objects which iterate in nested loops over + different axes of the op argument. The first iterator is used in the + outermost loop, the last in the innermost loop. Advancing one will change + the subsequent iterators to point at its new element. + + Parameters + ---------- + op : ndarray or sequence of array_like + The array(s) to iterate over. + + axes : list of list of int + Each item is used as an "op_axes" argument to an nditer + + flags, op_flags, op_dtypes, order, casting, buffersize (optional) + See `nditer` parameters of the same name + + Returns + ------- + iters : tuple of nditer + An nditer for each item in `axes`, outermost first + + See Also + -------- + nditer + + Examples + -------- + + Basic usage. Note how y is the "flattened" version of + [a[:, 0, :], a[:, 1, 0], a[:, 2, :]] since we specified + the first iter's axes as [1] + + >>> import numpy as np + >>> a = np.arange(12).reshape(2, 3, 2) + >>> i, j = np.nested_iters(a, [[1], [0, 2]], flags=["multi_index"]) + >>> for x in i: + ... print(i.multi_index) + ... for y in j: + ... print('', j.multi_index, y) + (0,) + (0, 0) 0 + (0, 1) 1 + (1, 0) 6 + (1, 1) 7 + (1,) + (0, 0) 2 + (0, 1) 3 + (1, 0) 8 + (1, 1) 9 + (2,) + (0, 0) 4 + (0, 1) 5 + (1, 0) 10 + (1, 1) 11 + + """) + +add_newdoc('numpy._core', 'nditer', ('close', + """ + close() + + Resolve all writeback semantics in writeable operands. + + See Also + -------- + + :ref:`nditer-context-manager` + + """)) + + +############################################################################### +# +# broadcast +# +############################################################################### + +add_newdoc('numpy._core', 'broadcast', + """ + Produce an object that mimics broadcasting. + + Parameters + ---------- + in1, in2, ... : array_like + Input parameters. + + Returns + ------- + b : broadcast object + Broadcast the input parameters against one another, and + return an object that encapsulates the result. + Amongst others, it has ``shape`` and ``nd`` properties, and + may be used as an iterator. + + See Also + -------- + broadcast_arrays + broadcast_to + broadcast_shapes + + Examples + -------- + + Manually adding two vectors, using broadcasting: + + >>> import numpy as np + >>> x = np.array([[1], [2], [3]]) + >>> y = np.array([4, 5, 6]) + >>> b = np.broadcast(x, y) + + >>> out = np.empty(b.shape) + >>> out.flat = [u+v for (u,v) in b] + >>> out + array([[5., 6., 7.], + [6., 7., 8.], + [7., 8., 9.]]) + + Compare against built-in broadcasting: + + >>> x + y + array([[5, 6, 7], + [6, 7, 8], + [7, 8, 9]]) + + """) + +# attributes + +add_newdoc('numpy._core', 'broadcast', ('index', + """ + current index in broadcasted result + + Examples + -------- + + >>> import numpy as np + >>> x = np.array([[1], [2], [3]]) + >>> y = np.array([4, 5, 6]) + >>> b = np.broadcast(x, y) + >>> b.index + 0 + >>> next(b), next(b), next(b) + ((1, 4), (1, 5), (1, 6)) + >>> b.index + 3 + + """)) + +add_newdoc('numpy._core', 'broadcast', ('iters', + """ + tuple of iterators along ``self``'s "components." + + Returns a tuple of `numpy.flatiter` objects, one for each "component" + of ``self``. + + See Also + -------- + numpy.flatiter + + Examples + -------- + + >>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> row, col = b.iters + >>> next(row), next(col) + (1, 4) + + """)) + +add_newdoc('numpy._core', 'broadcast', ('ndim', + """ + Number of dimensions of broadcasted result. Alias for `nd`. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.ndim + 2 + + """)) + +add_newdoc('numpy._core', 'broadcast', ('nd', + """ + Number of dimensions of broadcasted result. For code intended for NumPy + 1.12.0 and later the more consistent `ndim` is preferred. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.nd + 2 + + """)) + +add_newdoc('numpy._core', 'broadcast', ('numiter', + """ + Number of iterators possessed by the broadcasted result. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.numiter + 2 + + """)) + +add_newdoc('numpy._core', 'broadcast', ('shape', + """ + Shape of broadcasted result. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.shape + (3, 3) + + """)) + +add_newdoc('numpy._core', 'broadcast', ('size', + """ + Total size of broadcasted result. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.size + 9 + + """)) + +add_newdoc('numpy._core', 'broadcast', ('reset', + """ + reset() + + Reset the broadcasted result's iterator(s). + + Parameters + ---------- + None + + Returns + ------- + None + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> y = np.array([[4], [5], [6]]) + >>> b = np.broadcast(x, y) + >>> b.index + 0 + >>> next(b), next(b), next(b) + ((1, 4), (2, 4), (3, 4)) + >>> b.index + 3 + >>> b.reset() + >>> b.index + 0 + + """)) + +############################################################################### +# +# numpy functions +# +############################################################################### + +add_newdoc('numpy._core.multiarray', 'array', + """ + array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0, + like=None) + + Create an array. + + Parameters + ---------- + object : array_like + An array, any object exposing the array interface, an object whose + ``__array__`` method returns an array, or any (nested) sequence. + If object is a scalar, a 0-dimensional array containing object is + returned. + dtype : data-type, optional + The desired data-type for the array. If not given, NumPy will try to use + a default ``dtype`` that can represent the values (by applying promotion + rules when necessary.) + copy : bool, optional + If ``True`` (default), then the array data is copied. If ``None``, + a copy will only be made if ``__array__`` returns a copy, if obj is + a nested sequence, or if a copy is needed to satisfy any of the other + requirements (``dtype``, ``order``, etc.). Note that any copy of + the data is shallow, i.e., for arrays with object dtype, the new + array will point to the same objects. See Examples for `ndarray.copy`. + For ``False`` it raises a ``ValueError`` if a copy cannot be avoided. + Default: ``True``. + order : {'K', 'A', 'C', 'F'}, optional + Specify the memory layout of the array. If object is not an array, the + newly created array will be in C order (row major) unless 'F' is + specified, in which case it will be in Fortran order (column major). + If object is an array the following holds. + + ===== ========= =================================================== + order no copy copy=True + ===== ========= =================================================== + 'K' unchanged F & C order preserved, otherwise most similar order + 'A' unchanged F order if input is F and not C, otherwise C order + 'C' C order C order + 'F' F order F order + ===== ========= =================================================== + + When ``copy=None`` and a copy is made for other reasons, the result is + the same as if ``copy=True``, with some exceptions for 'A', see the + Notes section. The default order is 'K'. + subok : bool, optional + If True, then sub-classes will be passed-through, otherwise + the returned array will be forced to be a base-class array (default). + ndmin : int, optional + Specifies the minimum number of dimensions that the resulting + array should have. Ones will be prepended to the shape as + needed to meet this requirement. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + An array object satisfying the specified requirements. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + ones_like : Return an array of ones with shape and type of input. + zeros_like : Return an array of zeros with shape and type of input. + full_like : Return a new array with shape of input filled with value. + empty : Return a new uninitialized array. + ones : Return a new array setting values to one. + zeros : Return a new array setting values to zero. + full : Return a new array of given shape filled with value. + copy: Return an array copy of the given object. + + + Notes + ----- + When order is 'A' and ``object`` is an array in neither 'C' nor 'F' order, + and a copy is forced by a change in dtype, then the order of the result is + not necessarily 'C' as expected. This is likely a bug. + + Examples + -------- + >>> import numpy as np + >>> np.array([1, 2, 3]) + array([1, 2, 3]) + + Upcasting: + + >>> np.array([1, 2, 3.0]) + array([ 1., 2., 3.]) + + More than one dimension: + + >>> np.array([[1, 2], [3, 4]]) + array([[1, 2], + [3, 4]]) + + Minimum dimensions 2: + + >>> np.array([1, 2, 3], ndmin=2) + array([[1, 2, 3]]) + + Type provided: + + >>> np.array([1, 2, 3], dtype=complex) + array([ 1.+0.j, 2.+0.j, 3.+0.j]) + + Data-type consisting of more than one element: + + >>> x = np.array([(1,2),(3,4)],dtype=[('a','>> x['a'] + array([1, 3], dtype=int32) + + Creating an array from sub-classes: + + >>> np.array(np.asmatrix('1 2; 3 4')) + array([[1, 2], + [3, 4]]) + + >>> np.array(np.asmatrix('1 2; 3 4'), subok=True) + matrix([[1, 2], + [3, 4]]) + + """) + +add_newdoc('numpy._core.multiarray', 'asarray', + """ + asarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) + + Convert the input to an array. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F', 'A', 'K'}, optional + Memory layout. 'A' and 'K' depend on the order of input array a. + 'C' row-major (C-style), + 'F' column-major (Fortran-style) memory representation. + 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise + 'K' (keep) preserve input order + Defaults to 'K'. + device : str, optional + The device on which to place the created array. Default: ``None``. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + copy : bool, optional + If ``True``, then the object is copied. If ``None`` then the object is + copied only if needed, i.e. if ``__array__`` returns a copy, if obj + is a nested sequence, or if a copy is needed to satisfy any of + the other requirements (``dtype``, ``order``, etc.). + For ``False`` it raises a ``ValueError`` if a copy cannot be avoided. + Default: ``None``. + + .. versionadded:: 2.0.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array interpretation of ``a``. No copy is performed if the input + is already an ndarray with matching dtype and order. If ``a`` is a + subclass of ndarray, a base class ndarray is returned. + + See Also + -------- + asanyarray : Similar function which passes through subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfortranarray : Convert input to an ndarray with column-major + memory order. + asarray_chkfinite : Similar function which checks input for NaNs and Infs. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + Convert a list into an array: + + >>> a = [1, 2] + >>> import numpy as np + >>> np.asarray(a) + array([1, 2]) + + Existing arrays are not copied: + + >>> a = np.array([1, 2]) + >>> np.asarray(a) is a + True + + If `dtype` is set, array is copied only if dtype does not match: + + >>> a = np.array([1, 2], dtype=np.float32) + >>> np.shares_memory(np.asarray(a, dtype=np.float32), a) + True + >>> np.shares_memory(np.asarray(a, dtype=np.float64), a) + False + + Contrary to `asanyarray`, ndarray subclasses are not passed through: + + >>> issubclass(np.recarray, np.ndarray) + True + >>> a = np.array([(1., 2), (3., 4)], dtype='f4,i4').view(np.recarray) + >>> np.asarray(a) is a + False + >>> np.asanyarray(a) is a + True + + """) + +add_newdoc('numpy._core.multiarray', 'asanyarray', + """ + asanyarray(a, dtype=None, order=None, *, device=None, copy=None, like=None) + + Convert the input to an ndarray, but pass ndarray subclasses through. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes scalars, lists, lists of tuples, tuples, tuples of tuples, + tuples of lists, and ndarrays. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F', 'A', 'K'}, optional + Memory layout. 'A' and 'K' depend on the order of input array a. + 'C' row-major (C-style), + 'F' column-major (Fortran-style) memory representation. + 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise + 'K' (keep) preserve input order + Defaults to 'C'. + device : str, optional + The device on which to place the created array. Default: ``None``. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.1.0 + + copy : bool, optional + If ``True``, then the object is copied. If ``None`` then the object is + copied only if needed, i.e. if ``__array__`` returns a copy, if obj + is a nested sequence, or if a copy is needed to satisfy any of + the other requirements (``dtype``, ``order``, etc.). + For ``False`` it raises a ``ValueError`` if a copy cannot be avoided. + Default: ``None``. + + .. versionadded:: 2.1.0 + + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray or an ndarray subclass + Array interpretation of `a`. If `a` is an ndarray or a subclass + of ndarray, it is returned as-is and no copy is performed. + + See Also + -------- + asarray : Similar function which always returns ndarrays. + ascontiguousarray : Convert input to a contiguous array. + asfortranarray : Convert input to an ndarray with column-major + memory order. + asarray_chkfinite : Similar function which checks input for NaNs and + Infs. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + Convert a list into an array: + + >>> a = [1, 2] + >>> import numpy as np + >>> np.asanyarray(a) + array([1, 2]) + + Instances of `ndarray` subclasses are passed through as-is: + + >>> a = np.array([(1., 2), (3., 4)], dtype='f4,i4').view(np.recarray) + >>> np.asanyarray(a) is a + True + + """) + +add_newdoc('numpy._core.multiarray', 'ascontiguousarray', + """ + ascontiguousarray(a, dtype=None, *, like=None) + + Return a contiguous array (ndim >= 1) in memory (C order). + + Parameters + ---------- + a : array_like + Input array. + dtype : str or dtype object, optional + Data-type of returned array. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Contiguous array of same shape and content as `a`, with type `dtype` + if specified. + + See Also + -------- + asfortranarray : Convert input to an ndarray with column-major + memory order. + require : Return an ndarray that satisfies requirements. + ndarray.flags : Information about the memory layout of the array. + + Examples + -------- + Starting with a Fortran-contiguous array: + + >>> import numpy as np + >>> x = np.ones((2, 3), order='F') + >>> x.flags['F_CONTIGUOUS'] + True + + Calling ``ascontiguousarray`` makes a C-contiguous copy: + + >>> y = np.ascontiguousarray(x) + >>> y.flags['C_CONTIGUOUS'] + True + >>> np.may_share_memory(x, y) + False + + Now, starting with a C-contiguous array: + + >>> x = np.ones((2, 3), order='C') + >>> x.flags['C_CONTIGUOUS'] + True + + Then, calling ``ascontiguousarray`` returns the same object: + + >>> y = np.ascontiguousarray(x) + >>> x is y + True + + Note: This function returns an array with at least one-dimension (1-d) + so it will not preserve 0-d arrays. + + """) + +add_newdoc('numpy._core.multiarray', 'asfortranarray', + """ + asfortranarray(a, dtype=None, *, like=None) + + Return an array (ndim >= 1) laid out in Fortran order in memory. + + Parameters + ---------- + a : array_like + Input array. + dtype : str or dtype object, optional + By default, the data-type is inferred from the input data. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + The input `a` in Fortran, or column-major, order. + + See Also + -------- + ascontiguousarray : Convert input to a contiguous (C order) array. + asanyarray : Convert input to an ndarray with either row or + column-major memory order. + require : Return an ndarray that satisfies requirements. + ndarray.flags : Information about the memory layout of the array. + + Examples + -------- + Starting with a C-contiguous array: + + >>> import numpy as np + >>> x = np.ones((2, 3), order='C') + >>> x.flags['C_CONTIGUOUS'] + True + + Calling ``asfortranarray`` makes a Fortran-contiguous copy: + + >>> y = np.asfortranarray(x) + >>> y.flags['F_CONTIGUOUS'] + True + >>> np.may_share_memory(x, y) + False + + Now, starting with a Fortran-contiguous array: + + >>> x = np.ones((2, 3), order='F') + >>> x.flags['F_CONTIGUOUS'] + True + + Then, calling ``asfortranarray`` returns the same object: + + >>> y = np.asfortranarray(x) + >>> x is y + True + + Note: This function returns an array with at least one-dimension (1-d) + so it will not preserve 0-d arrays. + + """) + +add_newdoc('numpy._core.multiarray', 'empty', + """ + empty(shape, dtype=float, order='C', *, device=None, like=None) + + Return a new array of given shape and type, without initializing entries. + + Parameters + ---------- + shape : int or tuple of int + Shape of the empty array, e.g., ``(2, 3)`` or ``2``. + dtype : data-type, optional + Desired output data-type for the array, e.g, `numpy.int8`. Default is + `numpy.float64`. + order : {'C', 'F'}, optional, default: 'C' + Whether to store multi-dimensional data in row-major + (C-style) or column-major (Fortran-style) order in + memory. + device : str, optional + The device on which to place the created array. Default: ``None``. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array of uninitialized (arbitrary) data of the given shape, dtype, and + order. Object arrays will be initialized to None. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + ones : Return a new array setting values to one. + zeros : Return a new array setting values to zero. + full : Return a new array of given shape filled with value. + + Notes + ----- + Unlike other array creation functions (e.g. `zeros`, `ones`, `full`), + `empty` does not initialize the values of the array, and may therefore be + marginally faster. However, the values stored in the newly allocated array + are arbitrary. For reproducible behavior, be sure to set each element of + the array before reading. + + Examples + -------- + >>> import numpy as np + >>> np.empty([2, 2]) + array([[ -9.74499359e+001, 6.69583040e-309], + [ 2.13182611e-314, 3.06959433e-309]]) #uninitialized + + >>> np.empty([2, 2], dtype=int) + array([[-1073741821, -1067949133], + [ 496041986, 19249760]]) #uninitialized + + """) + +add_newdoc('numpy._core.multiarray', 'scalar', + """ + scalar(dtype, obj) + + Return a new scalar array of the given type initialized with obj. + + This function is meant mainly for pickle support. `dtype` must be a + valid data-type descriptor. If `dtype` corresponds to an object + descriptor, then `obj` can be any object, otherwise `obj` must be a + string. If `obj` is not given, it will be interpreted as None for object + type and as zeros for all other types. + + """) + +add_newdoc('numpy._core.multiarray', 'zeros', + """ + zeros(shape, dtype=float, order='C', *, like=None) + + Return a new array of given shape and type, filled with zeros. + + Parameters + ---------- + shape : int or tuple of ints + Shape of the new array, e.g., ``(2, 3)`` or ``2``. + dtype : data-type, optional + The desired data-type for the array, e.g., `numpy.int8`. Default is + `numpy.float64`. + order : {'C', 'F'}, optional, default: 'C' + Whether to store multi-dimensional data in row-major + (C-style) or column-major (Fortran-style) order in + memory. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array of zeros with the given shape, dtype, and order. + + See Also + -------- + zeros_like : Return an array of zeros with shape and type of input. + empty : Return a new uninitialized array. + ones : Return a new array setting values to one. + full : Return a new array of given shape filled with value. + + Examples + -------- + >>> import numpy as np + >>> np.zeros(5) + array([ 0., 0., 0., 0., 0.]) + + >>> np.zeros((5,), dtype=int) + array([0, 0, 0, 0, 0]) + + >>> np.zeros((2, 1)) + array([[ 0.], + [ 0.]]) + + >>> s = (2,2) + >>> np.zeros(s) + array([[ 0., 0.], + [ 0., 0.]]) + + >>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype + array([(0, 0), (0, 0)], + dtype=[('x', '>> import numpy as np + >>> np.fromstring('1 2', dtype=int, sep=' ') + array([1, 2]) + >>> np.fromstring('1, 2', dtype=int, sep=',') + array([1, 2]) + + """) + +add_newdoc('numpy._core.multiarray', 'compare_chararrays', + """ + compare_chararrays(a1, a2, cmp, rstrip) + + Performs element-wise comparison of two string arrays using the + comparison operator specified by `cmp`. + + Parameters + ---------- + a1, a2 : array_like + Arrays to be compared. + cmp : {"<", "<=", "==", ">=", ">", "!="} + Type of comparison. + rstrip : Boolean + If True, the spaces at the end of Strings are removed before the comparison. + + Returns + ------- + out : ndarray + The output array of type Boolean with the same shape as a and b. + + Raises + ------ + ValueError + If `cmp` is not valid. + TypeError + If at least one of `a` or `b` is a non-string array + + Examples + -------- + >>> import numpy as np + >>> a = np.array(["a", "b", "cde"]) + >>> b = np.array(["a", "a", "dec"]) + >>> np.char.compare_chararrays(a, b, ">", True) + array([False, True, False]) + + """) + +add_newdoc('numpy._core.multiarray', 'fromiter', + """ + fromiter(iter, dtype, count=-1, *, like=None) + + Create a new 1-dimensional array from an iterable object. + + Parameters + ---------- + iter : iterable object + An iterable object providing data for the array. + dtype : data-type + The data-type of the returned array. + + .. versionchanged:: 1.23 + Object and subarray dtypes are now supported (note that the final + result is not 1-D for a subarray dtype). + + count : int, optional + The number of items to read from *iterable*. The default is -1, + which means all data is read. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + The output array. + + Notes + ----- + Specify `count` to improve performance. It allows ``fromiter`` to + pre-allocate the output array, instead of resizing it on demand. + + Examples + -------- + >>> import numpy as np + >>> iterable = (x*x for x in range(5)) + >>> np.fromiter(iterable, float) + array([ 0., 1., 4., 9., 16.]) + + A carefully constructed subarray dtype will lead to higher dimensional + results: + + >>> iterable = ((x+1, x+2) for x in range(5)) + >>> np.fromiter(iterable, dtype=np.dtype((int, 2))) + array([[1, 2], + [2, 3], + [3, 4], + [4, 5], + [5, 6]]) + + + """) + +add_newdoc('numpy._core.multiarray', 'fromfile', + """ + fromfile(file, dtype=float, count=-1, sep='', offset=0, *, like=None) + + Construct an array from data in a text or binary file. + + A highly efficient way of reading binary data with a known data-type, + as well as parsing simply formatted text files. Data written using the + `tofile` method can be read using this function. + + Parameters + ---------- + file : file or str or Path + Open file object or filename. + dtype : data-type + Data type of the returned array. + For binary files, it is used to determine the size and byte-order + of the items in the file. + Most builtin numeric types are supported and extension types may be supported. + count : int + Number of items to read. ``-1`` means all items (i.e., the complete + file). + sep : str + Separator between items if file is a text file. + Empty ("") separator means the file should be treated as binary. + Spaces (" ") in the separator match zero or more whitespace characters. + A separator consisting only of spaces must match at least one + whitespace. + offset : int + The offset (in bytes) from the file's current position. Defaults to 0. + Only permitted for binary files. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + See also + -------- + load, save + ndarray.tofile + loadtxt : More flexible way of loading data from a text file. + + Notes + ----- + Do not rely on the combination of `tofile` and `fromfile` for + data storage, as the binary files generated are not platform + independent. In particular, no byte-order or data-type information is + saved. Data can be stored in the platform independent ``.npy`` format + using `save` and `load` instead. + + Examples + -------- + Construct an ndarray: + + >>> import numpy as np + >>> dt = np.dtype([('time', [('min', np.int64), ('sec', np.int64)]), + ... ('temp', float)]) + >>> x = np.zeros((1,), dtype=dt) + >>> x['time']['min'] = 10; x['temp'] = 98.25 + >>> x + array([((10, 0), 98.25)], + dtype=[('time', [('min', '>> import tempfile + >>> fname = tempfile.mkstemp()[1] + >>> x.tofile(fname) + + Read the raw data from disk: + + >>> np.fromfile(fname, dtype=dt) + array([((10, 0), 98.25)], + dtype=[('time', [('min', '>> np.save(fname, x) + >>> np.load(fname + '.npy') + array([((10, 0), 98.25)], + dtype=[('time', [('min', '>> dt = np.dtype(int) + >>> dt = dt.newbyteorder('>') + >>> np.frombuffer(buf, dtype=dt) # doctest: +SKIP + + The data of the resulting array will not be byteswapped, but will be + interpreted correctly. + + This function creates a view into the original object. This should be safe + in general, but it may make sense to copy the result when the original + object is mutable or untrusted. + + Examples + -------- + >>> import numpy as np + >>> s = b'hello world' + >>> np.frombuffer(s, dtype='S1', count=5, offset=6) + array([b'w', b'o', b'r', b'l', b'd'], dtype='|S1') + + >>> np.frombuffer(b'\\x01\\x02', dtype=np.uint8) + array([1, 2], dtype=uint8) + >>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3) + array([1, 2, 3], dtype=uint8) + + """) + +add_newdoc('numpy._core.multiarray', 'from_dlpack', + """ + from_dlpack(x, /, *, device=None, copy=None) + + Create a NumPy array from an object implementing the ``__dlpack__`` + protocol. Generally, the returned NumPy array is a view of the input + object. See [1]_ and [2]_ for more details. + + Parameters + ---------- + x : object + A Python object that implements the ``__dlpack__`` and + ``__dlpack_device__`` methods. + device : device, optional + Device on which to place the created array. Default: ``None``. + Must be ``"cpu"`` if passed which may allow importing an array + that is not already CPU available. + copy : bool, optional + Boolean indicating whether or not to copy the input. If ``True``, + the copy will be made. If ``False``, the function will never copy, + and will raise ``BufferError`` in case a copy is deemed necessary. + Passing it requests a copy from the exporter who may or may not + implement the capability. + If ``None``, the function will reuse the existing memory buffer if + possible and copy otherwise. Default: ``None``. + + + Returns + ------- + out : ndarray + + References + ---------- + .. [1] Array API documentation, + https://data-apis.org/array-api/latest/design_topics/data_interchange.html#syntax-for-data-interchange-with-dlpack + + .. [2] Python specification for DLPack, + https://dmlc.github.io/dlpack/latest/python_spec.html + + Examples + -------- + >>> import torch # doctest: +SKIP + >>> x = torch.arange(10) # doctest: +SKIP + >>> # create a view of the torch tensor "x" in NumPy + >>> y = np.from_dlpack(x) # doctest: +SKIP + """) + +add_newdoc('numpy._core.multiarray', 'correlate', + """cross_correlate(a,v, mode=0)""") + +add_newdoc('numpy._core.multiarray', 'arange', + """ + arange([start,] stop[, step,], dtype=None, *, device=None, like=None) + + Return evenly spaced values within a given interval. + + ``arange`` can be called with a varying number of positional arguments: + + * ``arange(stop)``: Values are generated within the half-open interval + ``[0, stop)`` (in other words, the interval including `start` but + excluding `stop`). + * ``arange(start, stop)``: Values are generated within the half-open + interval ``[start, stop)``. + * ``arange(start, stop, step)`` Values are generated within the half-open + interval ``[start, stop)``, with spacing between values given by + ``step``. + + For integer arguments the function is roughly equivalent to the Python + built-in :py:class:`range`, but returns an ndarray rather than a ``range`` + instance. + + When using a non-integer step, such as 0.1, it is often better to use + `numpy.linspace`. + + See the Warning sections below for more information. + + Parameters + ---------- + start : integer or real, optional + Start of interval. The interval includes this value. The default + start value is 0. + stop : integer or real + End of interval. The interval does not include this value, except + in some cases where `step` is not an integer and floating point + round-off affects the length of `out`. + step : integer or real, optional + Spacing between values. For any output `out`, this is the distance + between two adjacent values, ``out[i+1] - out[i]``. The default + step size is 1. If `step` is specified as a position argument, + `start` must also be given. + dtype : dtype, optional + The type of the output array. If `dtype` is not given, infer the data + type from the other input arguments. + device : str, optional + The device on which to place the created array. Default: ``None``. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + arange : ndarray + Array of evenly spaced values. + + For floating point arguments, the length of the result is + ``ceil((stop - start)/step)``. Because of floating point overflow, + this rule may result in the last element of `out` being greater + than `stop`. + + Warnings + -------- + The length of the output might not be numerically stable. + + Another stability issue is due to the internal implementation of + `numpy.arange`. + The actual step value used to populate the array is + ``dtype(start + step) - dtype(start)`` and not `step`. Precision loss + can occur here, due to casting or due to using floating points when + `start` is much larger than `step`. This can lead to unexpected + behaviour. For example:: + + >>> np.arange(0, 5, 0.5, dtype=int) + array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) + >>> np.arange(-3, 3, 0.5, dtype=int) + array([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8]) + + In such cases, the use of `numpy.linspace` should be preferred. + + The built-in :py:class:`range` generates :std:doc:`Python built-in integers + that have arbitrary size `, while `numpy.arange` + produces `numpy.int32` or `numpy.int64` numbers. This may result in + incorrect results for large integer values:: + + >>> power = 40 + >>> modulo = 10000 + >>> x1 = [(n ** power) % modulo for n in range(8)] + >>> x2 = [(n ** power) % modulo for n in np.arange(8)] + >>> print(x1) + [0, 1, 7776, 8801, 6176, 625, 6576, 4001] # correct + >>> print(x2) + [0, 1, 7776, 7185, 0, 5969, 4816, 3361] # incorrect + + See Also + -------- + numpy.linspace : Evenly spaced numbers with careful handling of endpoints. + numpy.ogrid: Arrays of evenly spaced numbers in N-dimensions. + numpy.mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions. + :ref:`how-to-partition` + + Examples + -------- + >>> import numpy as np + >>> np.arange(3) + array([0, 1, 2]) + >>> np.arange(3.0) + array([ 0., 1., 2.]) + >>> np.arange(3,7) + array([3, 4, 5, 6]) + >>> np.arange(3,7,2) + array([3, 5]) + + """) + +add_newdoc('numpy._core.multiarray', '_get_ndarray_c_version', + """_get_ndarray_c_version() + + Return the compile time NPY_VERSION (formerly called NDARRAY_VERSION) number. + + """) + +add_newdoc('numpy._core.multiarray', '_reconstruct', + """_reconstruct(subtype, shape, dtype) + + Construct an empty array. Used by Pickles. + + """) + +add_newdoc('numpy._core.multiarray', 'promote_types', + """ + promote_types(type1, type2) + + Returns the data type with the smallest size and smallest scalar + kind to which both ``type1`` and ``type2`` may be safely cast. + The returned data type is always considered "canonical", this mainly + means that the promoted dtype will always be in native byte order. + + This function is symmetric, but rarely associative. + + Parameters + ---------- + type1 : dtype or dtype specifier + First data type. + type2 : dtype or dtype specifier + Second data type. + + Returns + ------- + out : dtype + The promoted data type. + + Notes + ----- + Please see `numpy.result_type` for additional information about promotion. + + Starting in NumPy 1.9, promote_types function now returns a valid string + length when given an integer or float dtype as one argument and a string + dtype as another argument. Previously it always returned the input string + dtype, even if it wasn't long enough to store the max integer/float value + converted to a string. + + .. versionchanged:: 1.23.0 + + NumPy now supports promotion for more structured dtypes. It will now + remove unnecessary padding from a structure dtype and promote included + fields individually. + + See Also + -------- + result_type, dtype, can_cast + + Examples + -------- + >>> import numpy as np + >>> np.promote_types('f4', 'f8') + dtype('float64') + + >>> np.promote_types('i8', 'f4') + dtype('float64') + + >>> np.promote_types('>i8', '>> np.promote_types('i4', 'S8') + dtype('S11') + + An example of a non-associative case: + + >>> p = np.promote_types + >>> p('S', p('i1', 'u1')) + dtype('S6') + >>> p(p('S', 'i1'), 'u1') + dtype('S4') + + """) + +add_newdoc('numpy._core.multiarray', 'c_einsum', + """ + c_einsum(subscripts, *operands, out=None, dtype=None, order='K', + casting='safe') + + *This documentation shadows that of the native python implementation of the `einsum` function, + except all references and examples related to the `optimize` argument (v 0.12.0) have been removed.* + + Evaluates the Einstein summation convention on the operands. + + Using the Einstein summation convention, many common multi-dimensional, + linear algebraic array operations can be represented in a simple fashion. + In *implicit* mode `einsum` computes these values. + + In *explicit* mode, `einsum` provides further flexibility to compute + other array operations that might not be considered classical Einstein + summation operations, by disabling, or forcing summation over specified + subscript labels. + + See the notes and examples for clarification. + + Parameters + ---------- + subscripts : str + Specifies the subscripts for summation as comma separated list of + subscript labels. An implicit (classical Einstein summation) + calculation is performed unless the explicit indicator '->' is + included as well as subscript labels of the precise output form. + operands : list of array_like + These are the arrays for the operation. + out : ndarray, optional + If provided, the calculation is done into this array. + dtype : {data-type, None}, optional + If provided, forces the calculation to use the data type specified. + Note that you may have to also give a more liberal `casting` + parameter to allow the conversions. Default is None. + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout of the output. 'C' means it should + be C contiguous. 'F' means it should be Fortran contiguous, + 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise. + 'K' means it should be as close to the layout of the inputs as + is possible, including arbitrarily permuted axes. + Default is 'K'. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Setting this to + 'unsafe' is not recommended, as it can adversely affect accumulations. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + + Default is 'safe'. + optimize : {False, True, 'greedy', 'optimal'}, optional + Controls if intermediate optimization should occur. No optimization + will occur if False and True will default to the 'greedy' algorithm. + Also accepts an explicit contraction list from the ``np.einsum_path`` + function. See ``np.einsum_path`` for more details. Defaults to False. + + Returns + ------- + output : ndarray + The calculation based on the Einstein summation convention. + + See Also + -------- + einsum_path, dot, inner, outer, tensordot, linalg.multi_dot + + Notes + ----- + The Einstein summation convention can be used to compute + many multi-dimensional, linear algebraic array operations. `einsum` + provides a succinct way of representing these. + + A non-exhaustive list of these operations, + which can be computed by `einsum`, is shown below along with examples: + + * Trace of an array, :py:func:`numpy.trace`. + * Return a diagonal, :py:func:`numpy.diag`. + * Array axis summations, :py:func:`numpy.sum`. + * Transpositions and permutations, :py:func:`numpy.transpose`. + * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`. + * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`. + * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`. + * Tensor contractions, :py:func:`numpy.tensordot`. + * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`. + + The subscripts string is a comma-separated list of subscript labels, + where each label refers to a dimension of the corresponding operand. + Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)`` + is equivalent to :py:func:`np.inner(a,b) `. If a label + appears only once, it is not summed, so ``np.einsum('i', a)`` produces a + view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)`` + describes traditional matrix multiplication and is equivalent to + :py:func:`np.matmul(a,b) `. Repeated subscript labels in one + operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent + to :py:func:`np.trace(a) `. + + In *implicit mode*, the chosen subscripts are important + since the axes of the output are reordered alphabetically. This + means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while + ``np.einsum('ji', a)`` takes its transpose. Additionally, + ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while, + ``np.einsum('ij,jh', a, b)`` returns the transpose of the + multiplication since subscript 'h' precedes subscript 'i'. + + In *explicit mode* the output can be directly controlled by + specifying output subscript labels. This requires the + identifier '->' as well as the list of output subscript labels. + This feature increases the flexibility of the function since + summing can be disabled or forced when required. The call + ``np.einsum('i->', a)`` is like :py:func:`np.sum(a) ` + if ``a`` is a 1-D array, and ``np.einsum('ii->i', a)`` + is like :py:func:`np.diag(a) ` if ``a`` is a square 2-D array. + The difference is that `einsum` does not allow broadcasting by default. + Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the + order of the output subscript labels and therefore returns matrix + multiplication, unlike the example above in implicit mode. + + To enable and control broadcasting, use an ellipsis. Default + NumPy-style broadcasting is done by adding an ellipsis + to the left of each term, like ``np.einsum('...ii->...i', a)``. + ``np.einsum('...i->...', a)`` is like + :py:func:`np.sum(a, axis=-1) ` for array ``a`` of any shape. + To take the trace along the first and last axes, + you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix + product with the left-most indices instead of rightmost, one can do + ``np.einsum('ij...,jk...->ik...', a, b)``. + + When there is only one operand, no axes are summed, and no output + parameter is provided, a view into the operand is returned instead + of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)`` + produces a view (changed in version 1.10.0). + + `einsum` also provides an alternative way to provide the subscripts + and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. + If the output shape is not provided in this format `einsum` will be + calculated in implicit mode, otherwise it will be performed explicitly. + The examples below have corresponding `einsum` calls with the two + parameter methods. + + Views returned from einsum are now writeable whenever the input array + is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now + have the same effect as :py:func:`np.swapaxes(a, 0, 2) ` + and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal + of a 2D array. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(25).reshape(5,5) + >>> b = np.arange(5) + >>> c = np.arange(6).reshape(2,3) + + Trace of a matrix: + + >>> np.einsum('ii', a) + 60 + >>> np.einsum(a, [0,0]) + 60 + >>> np.trace(a) + 60 + + Extract the diagonal (requires explicit form): + + >>> np.einsum('ii->i', a) + array([ 0, 6, 12, 18, 24]) + >>> np.einsum(a, [0,0], [0]) + array([ 0, 6, 12, 18, 24]) + >>> np.diag(a) + array([ 0, 6, 12, 18, 24]) + + Sum over an axis (requires explicit form): + + >>> np.einsum('ij->i', a) + array([ 10, 35, 60, 85, 110]) + >>> np.einsum(a, [0,1], [0]) + array([ 10, 35, 60, 85, 110]) + >>> np.sum(a, axis=1) + array([ 10, 35, 60, 85, 110]) + + For higher dimensional arrays summing a single axis can be done with ellipsis: + + >>> np.einsum('...j->...', a) + array([ 10, 35, 60, 85, 110]) + >>> np.einsum(a, [Ellipsis,1], [Ellipsis]) + array([ 10, 35, 60, 85, 110]) + + Compute a matrix transpose, or reorder any number of axes: + + >>> np.einsum('ji', c) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.einsum('ij->ji', c) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.einsum(c, [1,0]) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.transpose(c) + array([[0, 3], + [1, 4], + [2, 5]]) + + Vector inner products: + + >>> np.einsum('i,i', b, b) + 30 + >>> np.einsum(b, [0], b, [0]) + 30 + >>> np.inner(b,b) + 30 + + Matrix vector multiplication: + + >>> np.einsum('ij,j', a, b) + array([ 30, 80, 130, 180, 230]) + >>> np.einsum(a, [0,1], b, [1]) + array([ 30, 80, 130, 180, 230]) + >>> np.dot(a, b) + array([ 30, 80, 130, 180, 230]) + >>> np.einsum('...j,j', a, b) + array([ 30, 80, 130, 180, 230]) + + Broadcasting and scalar multiplication: + + >>> np.einsum('..., ...', 3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.einsum(',ij', 3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.einsum(3, [Ellipsis], c, [Ellipsis]) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.multiply(3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + + Vector outer product: + + >>> np.einsum('i,j', np.arange(2)+1, b) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> np.einsum(np.arange(2)+1, [0], b, [1]) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> np.outer(np.arange(2)+1, b) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + + Tensor contraction: + + >>> a = np.arange(60.).reshape(3,4,5) + >>> b = np.arange(24.).reshape(4,3,2) + >>> np.einsum('ijk,jil->kl', a, b) + array([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3]) + array([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + >>> np.tensordot(a,b, axes=([1,0],[0,1])) + array([[ 4400., 4730.], + [ 4532., 4874.], + [ 4664., 5018.], + [ 4796., 5162.], + [ 4928., 5306.]]) + + Writeable returned arrays (since version 1.10.0): + + >>> a = np.zeros((3, 3)) + >>> np.einsum('ii->i', a)[:] = 1 + >>> a + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + + Example of ellipsis use: + + >>> a = np.arange(6).reshape((3,2)) + >>> b = np.arange(12).reshape((4,3)) + >>> np.einsum('ki,jk->ij', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + >>> np.einsum('ki,...k->i...', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + >>> np.einsum('k...,jk', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + + """) + + +############################################################################## +# +# Documentation for ndarray attributes and methods +# +############################################################################## + + +############################################################################## +# +# ndarray object +# +############################################################################## + + +add_newdoc('numpy._core.multiarray', 'ndarray', + """ + ndarray(shape, dtype=float, buffer=None, offset=0, + strides=None, order=None) + + An array object represents a multidimensional, homogeneous array + of fixed-size items. An associated data-type object describes the + format of each element in the array (its byte-order, how many bytes it + occupies in memory, whether it is an integer, a floating point number, + or something else, etc.) + + Arrays should be constructed using `array`, `zeros` or `empty` (refer + to the See Also section below). The parameters given here refer to + a low-level method (`ndarray(...)`) for instantiating an array. + + For more information, refer to the `numpy` module and examine the + methods and attributes of an array. + + Parameters + ---------- + (for the __new__ method; see Notes below) + + shape : tuple of ints + Shape of created array. + dtype : data-type, optional + Any object that can be interpreted as a numpy data type. + buffer : object exposing buffer interface, optional + Used to fill the array with data. + offset : int, optional + Offset of array data in buffer. + strides : tuple of ints, optional + Strides of data in memory. + order : {'C', 'F'}, optional + Row-major (C-style) or column-major (Fortran-style) order. + + Attributes + ---------- + T : ndarray + Transpose of the array. + data : buffer + The array's elements, in memory. + dtype : dtype object + Describes the format of the elements in the array. + flags : dict + Dictionary containing information related to memory use, e.g., + 'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc. + flat : numpy.flatiter object + Flattened version of the array as an iterator. The iterator + allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for + assignment examples; TODO). + imag : ndarray + Imaginary part of the array. + real : ndarray + Real part of the array. + size : int + Number of elements in the array. + itemsize : int + The memory use of each array element in bytes. + nbytes : int + The total number of bytes required to store the array data, + i.e., ``itemsize * size``. + ndim : int + The array's number of dimensions. + shape : tuple of ints + Shape of the array. + strides : tuple of ints + The step-size required to move from one element to the next in + memory. For example, a contiguous ``(3, 4)`` array of type + ``int16`` in C-order has strides ``(8, 2)``. This implies that + to move from element to element in memory requires jumps of 2 bytes. + To move from row-to-row, one needs to jump 8 bytes at a time + (``2 * 4``). + ctypes : ctypes object + Class containing properties of the array needed for interaction + with ctypes. + base : ndarray + If the array is a view into another array, that array is its `base` + (unless that array is also a view). The `base` array is where the + array data is actually stored. + + See Also + -------- + array : Construct an array. + zeros : Create an array, each element of which is zero. + empty : Create an array, but leave its allocated memory unchanged (i.e., + it contains "garbage"). + dtype : Create a data-type. + numpy.typing.NDArray : An ndarray alias :term:`generic ` + w.r.t. its `dtype.type `. + + Notes + ----- + There are two modes of creating an array using ``__new__``: + + 1. If `buffer` is None, then only `shape`, `dtype`, and `order` + are used. + 2. If `buffer` is an object exposing the buffer interface, then + all keywords are interpreted. + + No ``__init__`` method is needed because the array is fully initialized + after the ``__new__`` method. + + Examples + -------- + These examples illustrate the low-level `ndarray` constructor. Refer + to the `See Also` section above for easier ways of constructing an + ndarray. + + First mode, `buffer` is None: + + >>> import numpy as np + >>> np.ndarray(shape=(2,2), dtype=float, order='F') + array([[0.0e+000, 0.0e+000], # random + [ nan, 2.5e-323]]) + + Second mode: + + >>> np.ndarray((2,), buffer=np.array([1,2,3]), + ... offset=np.int_().itemsize, + ... dtype=int) # offset = 1*itemsize, i.e. skip first element + array([2, 3]) + + """) + + +############################################################################## +# +# ndarray attributes +# +############################################################################## + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_interface__', + """Array protocol: Python side.""")) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_priority__', + """Array priority.""")) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_struct__', + """Array protocol: C-struct side.""")) + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack__', + """ + a.__dlpack__(*, stream=None, max_version=None, dl_device=None, copy=None) + + DLPack Protocol: Part of the Array API. + + """)) + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack_device__', + """ + a.__dlpack_device__() + + DLPack Protocol: Part of the Array API. + + """)) + +add_newdoc('numpy._core.multiarray', 'ndarray', ('base', + """ + Base object if memory is from some other object. + + Examples + -------- + The base of an array that owns its memory is None: + + >>> import numpy as np + >>> x = np.array([1,2,3,4]) + >>> x.base is None + True + + Slicing creates a view, whose memory is shared with x: + + >>> y = x[2:] + >>> y.base is x + True + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('ctypes', + """ + An object to simplify the interaction of the array with the ctypes + module. + + This attribute creates an object that makes it easier to use arrays + when calling shared libraries with the ctypes module. The returned + object has, among others, data, shape, and strides attributes (see + Notes below) which themselves return ctypes objects that can be used + as arguments to a shared library. + + Parameters + ---------- + None + + Returns + ------- + c : Python object + Possessing attributes data, shape, strides, etc. + + See Also + -------- + numpy.ctypeslib + + Notes + ----- + Below are the public attributes of this object which were documented + in "Guide to NumPy" (we have omitted undocumented public attributes, + as well as documented private attributes): + + .. autoattribute:: numpy._core._internal._ctypes.data + :noindex: + + .. autoattribute:: numpy._core._internal._ctypes.shape + :noindex: + + .. autoattribute:: numpy._core._internal._ctypes.strides + :noindex: + + .. automethod:: numpy._core._internal._ctypes.data_as + :noindex: + + .. automethod:: numpy._core._internal._ctypes.shape_as + :noindex: + + .. automethod:: numpy._core._internal._ctypes.strides_as + :noindex: + + If the ctypes module is not available, then the ctypes attribute + of array objects still returns something useful, but ctypes objects + are not returned and errors may be raised instead. In particular, + the object will still have the ``as_parameter`` attribute which will + return an integer equal to the data attribute. + + Examples + -------- + >>> import numpy as np + >>> import ctypes + >>> x = np.array([[0, 1], [2, 3]], dtype=np.int32) + >>> x + array([[0, 1], + [2, 3]], dtype=int32) + >>> x.ctypes.data + 31962608 # may vary + >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)) + <__main__.LP_c_uint object at 0x7ff2fc1fc200> # may vary + >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)).contents + c_uint(0) + >>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint64)).contents + c_ulong(4294967296) + >>> x.ctypes.shape + # may vary + >>> x.ctypes.strides + # may vary + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('data', + """Python buffer object pointing to the start of the array's data.""")) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('dtype', + """ + Data-type of the array's elements. + + .. warning:: + + Setting ``arr.dtype`` is discouraged and may be deprecated in the + future. Setting will replace the ``dtype`` without modifying the + memory (see also `ndarray.view` and `ndarray.astype`). + + Parameters + ---------- + None + + Returns + ------- + d : numpy dtype object + + See Also + -------- + ndarray.astype : Cast the values contained in the array to a new data-type. + ndarray.view : Create a view of the same data but a different data-type. + numpy.dtype + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(4).reshape((2, 2)) + >>> x + array([[0, 1], + [2, 3]]) + >>> x.dtype + dtype('int64') # may vary (OS, bitness) + >>> isinstance(x.dtype, np.dtype) + True + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('imag', + """ + The imaginary part of the array. + + Examples + -------- + >>> import numpy as np + >>> x = np.sqrt([1+0j, 0+1j]) + >>> x.imag + array([ 0. , 0.70710678]) + >>> x.imag.dtype + dtype('float64') + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('itemsize', + """ + Length of one array element in bytes. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1,2,3], dtype=np.float64) + >>> x.itemsize + 8 + >>> x = np.array([1,2,3], dtype=np.complex128) + >>> x.itemsize + 16 + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('flags', + """ + Information about the memory layout of the array. + + Attributes + ---------- + C_CONTIGUOUS (C) + The data is in a single, C-style contiguous segment. + F_CONTIGUOUS (F) + The data is in a single, Fortran-style contiguous segment. + OWNDATA (O) + The array owns the memory it uses or borrows it from another object. + WRITEABLE (W) + The data area can be written to. Setting this to False locks + the data, making it read-only. A view (slice, etc.) inherits WRITEABLE + from its base array at creation time, but a view of a writeable + array may be subsequently locked while the base array remains writeable. + (The opposite is not true, in that a view of a locked array may not + be made writeable. However, currently, locking a base object does not + lock any views that already reference it, so under that circumstance it + is possible to alter the contents of a locked array via a previously + created writeable view onto it.) Attempting to change a non-writeable + array raises a RuntimeError exception. + ALIGNED (A) + The data and all elements are aligned appropriately for the hardware. + WRITEBACKIFCOPY (X) + This array is a copy of some other array. The C-API function + PyArray_ResolveWritebackIfCopy must be called before deallocating + to the base array will be updated with the contents of this array. + FNC + F_CONTIGUOUS and not C_CONTIGUOUS. + FORC + F_CONTIGUOUS or C_CONTIGUOUS (one-segment test). + BEHAVED (B) + ALIGNED and WRITEABLE. + CARRAY (CA) + BEHAVED and C_CONTIGUOUS. + FARRAY (FA) + BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS. + + Notes + ----- + The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``), + or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag + names are only supported in dictionary access. + + Only the WRITEBACKIFCOPY, WRITEABLE, and ALIGNED flags can be + changed by the user, via direct assignment to the attribute or dictionary + entry, or by calling `ndarray.setflags`. + + The array flags cannot be set arbitrarily: + + - WRITEBACKIFCOPY can only be set ``False``. + - ALIGNED can only be set ``True`` if the data is truly aligned. + - WRITEABLE can only be set ``True`` if the array owns its own memory + or the ultimate owner of the memory exposes a writeable buffer + interface or is a string. + + Arrays can be both C-style and Fortran-style contiguous simultaneously. + This is clear for 1-dimensional arrays, but can also be true for higher + dimensional arrays. + + Even for contiguous arrays a stride for a given dimension + ``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1`` + or the array has no elements. + It does *not* generally hold that ``self.strides[-1] == self.itemsize`` + for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for + Fortran-style contiguous arrays is true. + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('flat', + """ + A 1-D iterator over the array. + + This is a `numpy.flatiter` instance, which acts similarly to, but is not + a subclass of, Python's built-in iterator object. + + See Also + -------- + flatten : Return a copy of the array collapsed into one dimension. + + flatiter + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(1, 7).reshape(2, 3) + >>> x + array([[1, 2, 3], + [4, 5, 6]]) + >>> x.flat[3] + 4 + >>> x.T + array([[1, 4], + [2, 5], + [3, 6]]) + >>> x.T.flat[3] + 5 + >>> type(x.flat) + + + An assignment example: + + >>> x.flat = 3; x + array([[3, 3, 3], + [3, 3, 3]]) + >>> x.flat[[1,4]] = 1; x + array([[3, 1, 3], + [3, 1, 3]]) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('nbytes', + """ + Total bytes consumed by the elements of the array. + + Notes + ----- + Does not include memory consumed by non-element attributes of the + array object. + + See Also + -------- + sys.getsizeof + Memory consumed by the object itself without parents in case view. + This does include memory consumed by non-element attributes. + + Examples + -------- + >>> import numpy as np + >>> x = np.zeros((3,5,2), dtype=np.complex128) + >>> x.nbytes + 480 + >>> np.prod(x.shape) * x.itemsize + 480 + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('ndim', + """ + Number of array dimensions. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> x.ndim + 1 + >>> y = np.zeros((2, 3, 4)) + >>> y.ndim + 3 + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('real', + """ + The real part of the array. + + Examples + -------- + >>> import numpy as np + >>> x = np.sqrt([1+0j, 0+1j]) + >>> x.real + array([ 1. , 0.70710678]) + >>> x.real.dtype + dtype('float64') + + See Also + -------- + numpy.real : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('shape', + """ + Tuple of array dimensions. + + The shape property is usually used to get the current shape of an array, + but may also be used to reshape the array in-place by assigning a tuple of + array dimensions to it. As with `numpy.reshape`, one of the new shape + dimensions can be -1, in which case its value is inferred from the size of + the array and the remaining dimensions. Reshaping an array in-place will + fail if a copy is required. + + .. warning:: + + Setting ``arr.shape`` is discouraged and may be deprecated in the + future. Using `ndarray.reshape` is the preferred approach. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3, 4]) + >>> x.shape + (4,) + >>> y = np.zeros((2, 3, 4)) + >>> y.shape + (2, 3, 4) + >>> y.shape = (3, 8) + >>> y + array([[ 0., 0., 0., 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0., 0., 0., 0.], + [ 0., 0., 0., 0., 0., 0., 0., 0.]]) + >>> y.shape = (3, 6) + Traceback (most recent call last): + File "", line 1, in + ValueError: cannot reshape array of size 24 into shape (3,6) + >>> np.zeros((4,2))[::2].shape = (-1,) + Traceback (most recent call last): + File "", line 1, in + AttributeError: Incompatible shape for in-place modification. Use + `.reshape()` to make a copy with the desired shape. + + See Also + -------- + numpy.shape : Equivalent getter function. + numpy.reshape : Function similar to setting ``shape``. + ndarray.reshape : Method similar to setting ``shape``. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('size', + """ + Number of elements in the array. + + Equal to ``np.prod(a.shape)``, i.e., the product of the array's + dimensions. + + Notes + ----- + `a.size` returns a standard arbitrary precision Python integer. This + may not be the case with other methods of obtaining the same value + (like the suggested ``np.prod(a.shape)``, which returns an instance + of ``np.int_``), and may be relevant if the value is used further in + calculations that may overflow a fixed size integer type. + + Examples + -------- + >>> import numpy as np + >>> x = np.zeros((3, 5, 2), dtype=np.complex128) + >>> x.size + 30 + >>> np.prod(x.shape) + 30 + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('strides', + """ + Tuple of bytes to step in each dimension when traversing an array. + + The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a` + is:: + + offset = sum(np.array(i) * a.strides) + + A more detailed explanation of strides can be found in + :ref:`arrays.ndarray`. + + .. warning:: + + Setting ``arr.strides`` is discouraged and may be deprecated in the + future. `numpy.lib.stride_tricks.as_strided` should be preferred + to create a new view of the same data in a safer way. + + Notes + ----- + Imagine an array of 32-bit integers (each 4 bytes):: + + x = np.array([[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]], dtype=np.int32) + + This array is stored in memory as 40 bytes, one after the other + (known as a contiguous block of memory). The strides of an array tell + us how many bytes we have to skip in memory to move to the next position + along a certain axis. For example, we have to skip 4 bytes (1 value) to + move to the next column, but 20 bytes (5 values) to get to the same + position in the next row. As such, the strides for the array `x` will be + ``(20, 4)``. + + See Also + -------- + numpy.lib.stride_tricks.as_strided + + Examples + -------- + >>> import numpy as np + >>> y = np.reshape(np.arange(2 * 3 * 4, dtype=np.int32), (2, 3, 4)) + >>> y + array([[[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]], + [[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]]], dtype=np.int32) + >>> y.strides + (48, 16, 4) + >>> y[1, 1, 1] + np.int32(17) + >>> offset = sum(y.strides * np.array((1, 1, 1))) + >>> offset // y.itemsize + np.int64(17) + + >>> x = np.reshape(np.arange(5*6*7*8, dtype=np.int32), (5, 6, 7, 8)) + >>> x = x.transpose(2, 3, 1, 0) + >>> x.strides + (32, 4, 224, 1344) + >>> i = np.array([3, 5, 2, 2], dtype=np.int32) + >>> offset = sum(i * x.strides) + >>> x[3, 5, 2, 2] + np.int32(813) + >>> offset // x.itemsize + np.int64(813) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('T', + """ + View of the transposed array. + + Same as ``self.transpose()``. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> a + array([[1, 2], + [3, 4]]) + >>> a.T + array([[1, 3], + [2, 4]]) + + >>> a = np.array([1, 2, 3, 4]) + >>> a + array([1, 2, 3, 4]) + >>> a.T + array([1, 2, 3, 4]) + + See Also + -------- + transpose + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('mT', + """ + View of the matrix transposed array. + + The matrix transpose is the transpose of the last two dimensions, even + if the array is of higher dimension. + + .. versionadded:: 2.0 + + Raises + ------ + ValueError + If the array is of dimension less than 2. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> a + array([[1, 2], + [3, 4]]) + >>> a.mT + array([[1, 3], + [2, 4]]) + + >>> a = np.arange(8).reshape((2, 2, 2)) + >>> a + array([[[0, 1], + [2, 3]], + + [[4, 5], + [6, 7]]]) + >>> a.mT + array([[[0, 2], + [1, 3]], + + [[4, 6], + [5, 7]]]) + + """)) +############################################################################## +# +# ndarray methods +# +############################################################################## + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array__', + """ + a.__array__([dtype], *, copy=None) + + For ``dtype`` parameter it returns a new reference to self if + ``dtype`` is not given or it matches array's data type. + A new array of provided data type is returned if ``dtype`` + is different from the current data type of the array. + For ``copy`` parameter it returns a new reference to self if + ``copy=False`` or ``copy=None`` and copying isn't enforced by ``dtype`` + parameter. The method returns a new array for ``copy=True``, regardless of + ``dtype`` parameter. + + A more detailed explanation of the ``__array__`` interface + can be found in :ref:`dunder_array.interface`. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_finalize__', + """ + a.__array_finalize__(obj, /) + + Present so subclasses can call super. Does nothing. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__array_wrap__', + """ + a.__array_wrap__(array[, context], /) + + Returns a view of `array` with the same type as self. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__copy__', + """ + a.__copy__() + + Used if :func:`copy.copy` is called on an array. Returns a copy of the array. + + Equivalent to ``a.copy(order='K')``. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__class_getitem__', + """ + a.__class_getitem__(item, /) + + Return a parametrized wrapper around the `~numpy.ndarray` type. + + .. versionadded:: 1.22 + + Returns + ------- + alias : types.GenericAlias + A parametrized `~numpy.ndarray` type. + + Examples + -------- + >>> from typing import Any + >>> import numpy as np + + >>> np.ndarray[Any, np.dtype[np.uint8]] + numpy.ndarray[typing.Any, numpy.dtype[numpy.uint8]] + + See Also + -------- + :pep:`585` : Type hinting generics in standard collections. + numpy.typing.NDArray : An ndarray alias :term:`generic ` + w.r.t. its `dtype.type `. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__deepcopy__', + """ + a.__deepcopy__(memo, /) + + Used if :func:`copy.deepcopy` is called on an array. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__reduce__', + """ + a.__reduce__() + + For pickling. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('__setstate__', + """ + a.__setstate__(state, /) + + For unpickling. + + The `state` argument must be a sequence that contains the following + elements: + + Parameters + ---------- + version : int + optional pickle version. If omitted defaults to 0. + shape : tuple + dtype : data-type + isFortran : bool + rawdata : string or list + a binary string with the data (or a list if 'a' is an object array) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('all', + """ + a.all(axis=None, out=None, keepdims=False, *, where=True) + + Returns True if all elements evaluate to True. + + Refer to `numpy.all` for full documentation. + + See Also + -------- + numpy.all : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('any', + """ + a.any(axis=None, out=None, keepdims=False, *, where=True) + + Returns True if any of the elements of `a` evaluate to True. + + Refer to `numpy.any` for full documentation. + + See Also + -------- + numpy.any : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('argmax', + """ + a.argmax(axis=None, out=None, *, keepdims=False) + + Return indices of the maximum values along the given axis. + + Refer to `numpy.argmax` for full documentation. + + See Also + -------- + numpy.argmax : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('argmin', + """ + a.argmin(axis=None, out=None, *, keepdims=False) + + Return indices of the minimum values along the given axis. + + Refer to `numpy.argmin` for detailed documentation. + + See Also + -------- + numpy.argmin : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('argsort', + """ + a.argsort(axis=-1, kind=None, order=None) + + Returns the indices that would sort this array. + + Refer to `numpy.argsort` for full documentation. + + See Also + -------- + numpy.argsort : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('argpartition', + """ + a.argpartition(kth, axis=-1, kind='introselect', order=None) + + Returns the indices that would partition this array. + + Refer to `numpy.argpartition` for full documentation. + + See Also + -------- + numpy.argpartition : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('astype', + """ + a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True) + + Copy of the array, cast to a specified type. + + Parameters + ---------- + dtype : str or dtype + Typecode or data-type to which the array is cast. + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout order of the result. + 'C' means C order, 'F' means Fortran order, 'A' + means 'F' order if all the arrays are Fortran contiguous, + 'C' order otherwise, and 'K' means as close to the + order the array elements appear in memory as possible. + Default is 'K'. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Defaults to 'unsafe' + for backwards compatibility. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + subok : bool, optional + If True, then sub-classes will be passed-through (default), otherwise + the returned array will be forced to be a base-class array. + copy : bool, optional + By default, astype always returns a newly allocated array. If this + is set to false, and the `dtype`, `order`, and `subok` + requirements are satisfied, the input array is returned instead + of a copy. + + Returns + ------- + arr_t : ndarray + Unless `copy` is False and the other conditions for returning the input + array are satisfied (see description for `copy` input parameter), `arr_t` + is a new array of the same shape as the input array, with dtype, order + given by `dtype`, `order`. + + Raises + ------ + ComplexWarning + When casting from complex to float or int. To avoid this, + one should use ``a.real.astype(t)``. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 2.5]) + >>> x + array([1. , 2. , 2.5]) + + >>> x.astype(int) + array([1, 2, 2]) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('byteswap', + """ + a.byteswap(inplace=False) + + Swap the bytes of the array elements + + Toggle between low-endian and big-endian data representation by + returning a byteswapped array, optionally swapped in-place. + Arrays of byte-strings are not swapped. The real and imaginary + parts of a complex number are swapped individually. + + Parameters + ---------- + inplace : bool, optional + If ``True``, swap bytes in-place, default is ``False``. + + Returns + ------- + out : ndarray + The byteswapped array. If `inplace` is ``True``, this is + a view to self. + + Examples + -------- + >>> import numpy as np + >>> A = np.array([1, 256, 8755], dtype=np.int16) + >>> list(map(hex, A)) + ['0x1', '0x100', '0x2233'] + >>> A.byteswap(inplace=True) + array([ 256, 1, 13090], dtype=int16) + >>> list(map(hex, A)) + ['0x100', '0x1', '0x3322'] + + Arrays of byte-strings are not swapped + + >>> A = np.array([b'ceg', b'fac']) + >>> A.byteswap() + array([b'ceg', b'fac'], dtype='|S3') + + ``A.view(A.dtype.newbyteorder()).byteswap()`` produces an array with + the same values but different representation in memory + + >>> A = np.array([1, 2, 3],dtype=np.int64) + >>> A.view(np.uint8) + array([1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, + 0, 0], dtype=uint8) + >>> A.view(A.dtype.newbyteorder()).byteswap(inplace=True) + array([1, 2, 3], dtype='>i8') + >>> A.view(np.uint8) + array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, + 0, 3], dtype=uint8) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('choose', + """ + a.choose(choices, out=None, mode='raise') + + Use an index array to construct a new array from a set of choices. + + Refer to `numpy.choose` for full documentation. + + See Also + -------- + numpy.choose : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('clip', + """ + a.clip(min=None, max=None, out=None, **kwargs) + + Return an array whose values are limited to ``[min, max]``. + One of max or min must be given. + + Refer to `numpy.clip` for full documentation. + + See Also + -------- + numpy.clip : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('compress', + """ + a.compress(condition, axis=None, out=None) + + Return selected slices of this array along given axis. + + Refer to `numpy.compress` for full documentation. + + See Also + -------- + numpy.compress : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('conj', + """ + a.conj() + + Complex-conjugate all elements. + + Refer to `numpy.conjugate` for full documentation. + + See Also + -------- + numpy.conjugate : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('conjugate', + """ + a.conjugate() + + Return the complex conjugate, element-wise. + + Refer to `numpy.conjugate` for full documentation. + + See Also + -------- + numpy.conjugate : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('copy', + """ + a.copy(order='C') + + Return a copy of the array. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout of the copy. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. (Note that this function and :func:`numpy.copy` are very + similar but have different default values for their order= + arguments, and this function always passes sub-classes through.) + + See also + -------- + numpy.copy : Similar function with different default behavior + numpy.copyto + + Notes + ----- + This function is the preferred method for creating an array copy. The + function :func:`numpy.copy` is similar, but it defaults to using order 'K', + and will not pass sub-classes through by default. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([[1,2,3],[4,5,6]], order='F') + + >>> y = x.copy() + + >>> x.fill(0) + + >>> x + array([[0, 0, 0], + [0, 0, 0]]) + + >>> y + array([[1, 2, 3], + [4, 5, 6]]) + + >>> y.flags['C_CONTIGUOUS'] + True + + For arrays containing Python objects (e.g. dtype=object), + the copy is a shallow one. The new array will contain the + same object which may lead to surprises if that object can + be modified (is mutable): + + >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) + >>> b = a.copy() + >>> b[2][0] = 10 + >>> a + array([1, 'm', list([10, 3, 4])], dtype=object) + + To ensure all elements within an ``object`` array are copied, + use `copy.deepcopy`: + + >>> import copy + >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) + >>> c = copy.deepcopy(a) + >>> c[2][0] = 10 + >>> c + array([1, 'm', list([10, 3, 4])], dtype=object) + >>> a + array([1, 'm', list([2, 3, 4])], dtype=object) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('cumprod', + """ + a.cumprod(axis=None, dtype=None, out=None) + + Return the cumulative product of the elements along the given axis. + + Refer to `numpy.cumprod` for full documentation. + + See Also + -------- + numpy.cumprod : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('cumsum', + """ + a.cumsum(axis=None, dtype=None, out=None) + + Return the cumulative sum of the elements along the given axis. + + Refer to `numpy.cumsum` for full documentation. + + See Also + -------- + numpy.cumsum : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('diagonal', + """ + a.diagonal(offset=0, axis1=0, axis2=1) + + Return specified diagonals. In NumPy 1.9 the returned array is a + read-only view instead of a copy as in previous NumPy versions. In + a future version the read-only restriction will be removed. + + Refer to :func:`numpy.diagonal` for full documentation. + + See Also + -------- + numpy.diagonal : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('dot')) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('dump', + """ + a.dump(file) + + Dump a pickle of the array to the specified file. + The array can be read back with pickle.load or numpy.load. + + Parameters + ---------- + file : str or Path + A string naming the dump file. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('dumps', + """ + a.dumps() + + Returns the pickle of the array as a string. + pickle.loads will convert the string back to an array. + + Parameters + ---------- + None + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('fill', + """ + a.fill(value) + + Fill the array with a scalar value. + + Parameters + ---------- + value : scalar + All elements of `a` will be assigned this value. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1, 2]) + >>> a.fill(0) + >>> a + array([0, 0]) + >>> a = np.empty(2) + >>> a.fill(1) + >>> a + array([1., 1.]) + + Fill expects a scalar value and always behaves the same as assigning + to a single array element. The following is a rare example where this + distinction is important: + + >>> a = np.array([None, None], dtype=object) + >>> a[0] = np.array(3) + >>> a + array([array(3), None], dtype=object) + >>> a.fill(np.array(3)) + >>> a + array([array(3), array(3)], dtype=object) + + Where other forms of assignments will unpack the array being assigned: + + >>> a[...] = np.array(3) + >>> a + array([3, 3], dtype=object) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('flatten', + """ + a.flatten(order='C') + + Return a copy of the array collapsed into one dimension. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + 'C' means to flatten in row-major (C-style) order. + 'F' means to flatten in column-major (Fortran- + style) order. 'A' means to flatten in column-major + order if `a` is Fortran *contiguous* in memory, + row-major order otherwise. 'K' means to flatten + `a` in the order the elements occur in memory. + The default is 'C'. + + Returns + ------- + y : ndarray + A copy of the input array, flattened to one dimension. + + See Also + -------- + ravel : Return a flattened array. + flat : A 1-D flat iterator over the array. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1,2], [3,4]]) + >>> a.flatten() + array([1, 2, 3, 4]) + >>> a.flatten('F') + array([1, 3, 2, 4]) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('getfield', + """ + a.getfield(dtype, offset=0) + + Returns a field of the given array as a certain type. + + A field is a view of the array data with a given data-type. The values in + the view are determined by the given type and the offset into the current + array in bytes. The offset needs to be such that the view dtype fits in the + array dtype; for example an array of dtype complex128 has 16-byte elements. + If taking a view with a 32-bit integer (4 bytes), the offset needs to be + between 0 and 12 bytes. + + Parameters + ---------- + dtype : str or dtype + The data type of the view. The dtype size of the view can not be larger + than that of the array itself. + offset : int + Number of bytes to skip before beginning the element view. + + Examples + -------- + >>> import numpy as np + >>> x = np.diag([1.+1.j]*2) + >>> x[1, 1] = 2 + 4.j + >>> x + array([[1.+1.j, 0.+0.j], + [0.+0.j, 2.+4.j]]) + >>> x.getfield(np.float64) + array([[1., 0.], + [0., 2.]]) + + By choosing an offset of 8 bytes we can select the complex part of the + array for our view: + + >>> x.getfield(np.float64, offset=8) + array([[1., 0.], + [0., 4.]]) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('item', + """ + a.item(*args) + + Copy an element of an array to a standard Python scalar and return it. + + Parameters + ---------- + \\*args : Arguments (variable number and type) + + * none: in this case, the method only works for arrays + with one element (`a.size == 1`), which element is + copied into a standard Python scalar object and returned. + + * int_type: this argument is interpreted as a flat index into + the array, specifying which element to copy and return. + + * tuple of int_types: functions as does a single int_type argument, + except that the argument is interpreted as an nd-index into the + array. + + Returns + ------- + z : Standard Python scalar object + A copy of the specified element of the array as a suitable + Python scalar + + Notes + ----- + When the data type of `a` is longdouble or clongdouble, item() returns + a scalar array object because there is no available Python scalar that + would not lose information. Void arrays return a buffer object for item(), + unless fields are defined, in which case a tuple is returned. + + `item` is very similar to a[args], except, instead of an array scalar, + a standard Python scalar is returned. This can be useful for speeding up + access to elements of the array and doing arithmetic on elements of the + array using Python's optimized math. + + Examples + -------- + >>> import numpy as np + >>> np.random.seed(123) + >>> x = np.random.randint(9, size=(3, 3)) + >>> x + array([[2, 2, 6], + [1, 3, 6], + [1, 0, 1]]) + >>> x.item(3) + 1 + >>> x.item(7) + 0 + >>> x.item((0, 1)) + 2 + >>> x.item((2, 2)) + 1 + + For an array with object dtype, elements are returned as-is. + + >>> a = np.array([np.int64(1)], dtype=object) + >>> a.item() #return np.int64 + np.int64(1) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('max', + """ + a.max(axis=None, out=None, keepdims=False, initial=, where=True) + + Return the maximum along a given axis. + + Refer to `numpy.amax` for full documentation. + + See Also + -------- + numpy.amax : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('mean', + """ + a.mean(axis=None, dtype=None, out=None, keepdims=False, *, where=True) + + Returns the average of the array elements along given axis. + + Refer to `numpy.mean` for full documentation. + + See Also + -------- + numpy.mean : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('min', + """ + a.min(axis=None, out=None, keepdims=False, initial=, where=True) + + Return the minimum along a given axis. + + Refer to `numpy.amin` for full documentation. + + See Also + -------- + numpy.amin : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('nonzero', + """ + a.nonzero() + + Return the indices of the elements that are non-zero. + + Refer to `numpy.nonzero` for full documentation. + + See Also + -------- + numpy.nonzero : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('prod', + """ + a.prod(axis=None, dtype=None, out=None, keepdims=False, + initial=1, where=True) + + Return the product of the array elements over the given axis + + Refer to `numpy.prod` for full documentation. + + See Also + -------- + numpy.prod : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('put', + """ + a.put(indices, values, mode='raise') + + Set ``a.flat[n] = values[n]`` for all `n` in indices. + + Refer to `numpy.put` for full documentation. + + See Also + -------- + numpy.put : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('ravel', + """ + a.ravel([order]) + + Return a flattened array. + + Refer to `numpy.ravel` for full documentation. + + See Also + -------- + numpy.ravel : equivalent function + + ndarray.flat : a flat iterator on the array. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('repeat', + """ + a.repeat(repeats, axis=None) + + Repeat elements of an array. + + Refer to `numpy.repeat` for full documentation. + + See Also + -------- + numpy.repeat : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('reshape', + """ + a.reshape(shape, /, *, order='C', copy=None) + + Returns an array containing the same data with a new shape. + + Refer to `numpy.reshape` for full documentation. + + See Also + -------- + numpy.reshape : equivalent function + + Notes + ----- + Unlike the free function `numpy.reshape`, this method on `ndarray` allows + the elements of the shape parameter to be passed in as separate arguments. + For example, ``a.reshape(10, 11)`` is equivalent to + ``a.reshape((10, 11))``. + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('resize', + """ + a.resize(new_shape, refcheck=True) + + Change shape and size of array in-place. + + Parameters + ---------- + new_shape : tuple of ints, or `n` ints + Shape of resized array. + refcheck : bool, optional + If False, reference count will not be checked. Default is True. + + Returns + ------- + None + + Raises + ------ + ValueError + If `a` does not own its own data or references or views to it exist, + and the data memory must be changed. + PyPy only: will always raise if the data memory must be changed, since + there is no reliable way to determine if references or views to it + exist. + + SystemError + If the `order` keyword argument is specified. This behaviour is a + bug in NumPy. + + See Also + -------- + resize : Return a new array with the specified shape. + + Notes + ----- + This reallocates space for the data area if necessary. + + Only contiguous arrays (data elements consecutive in memory) can be + resized. + + The purpose of the reference count check is to make sure you + do not use this array as a buffer for another Python object and then + reallocate the memory. However, reference counts can increase in + other ways so if you are sure that you have not shared the memory + for this array with another Python object, then you may safely set + `refcheck` to False. + + Examples + -------- + Shrinking an array: array is flattened (in the order that the data are + stored in memory), resized, and reshaped: + + >>> import numpy as np + + >>> a = np.array([[0, 1], [2, 3]], order='C') + >>> a.resize((2, 1)) + >>> a + array([[0], + [1]]) + + >>> a = np.array([[0, 1], [2, 3]], order='F') + >>> a.resize((2, 1)) + >>> a + array([[0], + [2]]) + + Enlarging an array: as above, but missing entries are filled with zeros: + + >>> b = np.array([[0, 1], [2, 3]]) + >>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple + >>> b + array([[0, 1, 2], + [3, 0, 0]]) + + Referencing an array prevents resizing... + + >>> c = a + >>> a.resize((1, 1)) + Traceback (most recent call last): + ... + ValueError: cannot resize an array that references or is referenced ... + + Unless `refcheck` is False: + + >>> a.resize((1, 1), refcheck=False) + >>> a + array([[0]]) + >>> c + array([[0]]) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('round', + """ + a.round(decimals=0, out=None) + + Return `a` with each element rounded to the given number of decimals. + + Refer to `numpy.around` for full documentation. + + See Also + -------- + numpy.around : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('searchsorted', + """ + a.searchsorted(v, side='left', sorter=None) + + Find indices where elements of v should be inserted in a to maintain order. + + For full documentation, see `numpy.searchsorted` + + See Also + -------- + numpy.searchsorted : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('setfield', + """ + a.setfield(val, dtype, offset=0) + + Put a value into a specified place in a field defined by a data-type. + + Place `val` into `a`'s field defined by `dtype` and beginning `offset` + bytes into the field. + + Parameters + ---------- + val : object + Value to be placed in field. + dtype : dtype object + Data-type of the field in which to place `val`. + offset : int, optional + The number of bytes into the field at which to place `val`. + + Returns + ------- + None + + See Also + -------- + getfield + + Examples + -------- + >>> import numpy as np + >>> x = np.eye(3) + >>> x.getfield(np.float64) + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + >>> x.setfield(3, np.int32) + >>> x.getfield(np.int32) + array([[3, 3, 3], + [3, 3, 3], + [3, 3, 3]], dtype=int32) + >>> x + array([[1.0e+000, 1.5e-323, 1.5e-323], + [1.5e-323, 1.0e+000, 1.5e-323], + [1.5e-323, 1.5e-323, 1.0e+000]]) + >>> x.setfield(np.eye(3), np.int32) + >>> x + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('setflags', + """ + a.setflags(write=None, align=None, uic=None) + + Set array flags WRITEABLE, ALIGNED, WRITEBACKIFCOPY, + respectively. + + These Boolean-valued flags affect how numpy interprets the memory + area used by `a` (see Notes below). The ALIGNED flag can only + be set to True if the data is actually aligned according to the type. + The WRITEBACKIFCOPY flag can never be set + to True. The flag WRITEABLE can only be set to True if the array owns its + own memory, or the ultimate owner of the memory exposes a writeable buffer + interface, or is a string. (The exception for string is made so that + unpickling can be done without copying memory.) + + Parameters + ---------- + write : bool, optional + Describes whether or not `a` can be written to. + align : bool, optional + Describes whether or not `a` is aligned properly for its type. + uic : bool, optional + Describes whether or not `a` is a copy of another "base" array. + + Notes + ----- + Array flags provide information about how the memory area used + for the array is to be interpreted. There are 7 Boolean flags + in use, only three of which can be changed by the user: + WRITEBACKIFCOPY, WRITEABLE, and ALIGNED. + + WRITEABLE (W) the data area can be written to; + + ALIGNED (A) the data and strides are aligned appropriately for the hardware + (as determined by the compiler); + + WRITEBACKIFCOPY (X) this array is a copy of some other array (referenced + by .base). When the C-API function PyArray_ResolveWritebackIfCopy is + called, the base array will be updated with the contents of this array. + + All flags can be accessed using the single (upper case) letter as well + as the full name. + + Examples + -------- + >>> import numpy as np + >>> y = np.array([[3, 1, 7], + ... [2, 0, 0], + ... [8, 5, 9]]) + >>> y + array([[3, 1, 7], + [2, 0, 0], + [8, 5, 9]]) + >>> y.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : False + OWNDATA : True + WRITEABLE : True + ALIGNED : True + WRITEBACKIFCOPY : False + >>> y.setflags(write=0, align=0) + >>> y.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : False + OWNDATA : True + WRITEABLE : False + ALIGNED : False + WRITEBACKIFCOPY : False + >>> y.setflags(uic=1) + Traceback (most recent call last): + File "", line 1, in + ValueError: cannot set WRITEBACKIFCOPY flag to True + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('sort', + """ + a.sort(axis=-1, kind=None, order=None) + + Sort an array in-place. Refer to `numpy.sort` for full documentation. + + Parameters + ---------- + axis : int, optional + Axis along which to sort. Default is -1, which means sort along the + last axis. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + Sorting algorithm. The default is 'quicksort'. Note that both 'stable' + and 'mergesort' use timsort under the covers and, in general, the + actual implementation will vary with datatype. The 'mergesort' option + is retained for backwards compatibility. + order : str or list of str, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. A single field can + be specified as a string, and not all fields need be specified, + but unspecified fields will still be used, in the order in which + they come up in the dtype, to break ties. + + See Also + -------- + numpy.sort : Return a sorted copy of an array. + numpy.argsort : Indirect sort. + numpy.lexsort : Indirect stable sort on multiple keys. + numpy.searchsorted : Find elements in sorted array. + numpy.partition: Partial sort. + + Notes + ----- + See `numpy.sort` for notes on the different sorting algorithms. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1,4], [3,1]]) + >>> a.sort(axis=1) + >>> a + array([[1, 4], + [1, 3]]) + >>> a.sort(axis=0) + >>> a + array([[1, 3], + [1, 4]]) + + Use the `order` keyword to specify a field to use when sorting a + structured array: + + >>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)]) + >>> a.sort(order='y') + >>> a + array([(b'c', 1), (b'a', 2)], + dtype=[('x', 'S1'), ('y', '>> import numpy as np + >>> a = np.array([3, 4, 2, 1]) + >>> a.partition(3) + >>> a + array([2, 1, 3, 4]) # may vary + + >>> a.partition((1, 3)) + >>> a + array([1, 2, 3, 4]) + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('squeeze', + """ + a.squeeze(axis=None) + + Remove axes of length one from `a`. + + Refer to `numpy.squeeze` for full documentation. + + See Also + -------- + numpy.squeeze : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('std', + """ + a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True) + + Returns the standard deviation of the array elements along given axis. + + Refer to `numpy.std` for full documentation. + + See Also + -------- + numpy.std : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('sum', + """ + a.sum(axis=None, dtype=None, out=None, keepdims=False, initial=0, where=True) + + Return the sum of the array elements over the given axis. + + Refer to `numpy.sum` for full documentation. + + See Also + -------- + numpy.sum : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('swapaxes', + """ + a.swapaxes(axis1, axis2) + + Return a view of the array with `axis1` and `axis2` interchanged. + + Refer to `numpy.swapaxes` for full documentation. + + See Also + -------- + numpy.swapaxes : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('take', + """ + a.take(indices, axis=None, out=None, mode='raise') + + Return an array formed from the elements of `a` at the given indices. + + Refer to `numpy.take` for full documentation. + + See Also + -------- + numpy.take : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('tofile', + """ + a.tofile(fid, sep="", format="%s") + + Write array to a file as text or binary (default). + + Data is always written in 'C' order, independent of the order of `a`. + The data produced by this method can be recovered using the function + fromfile(). + + Parameters + ---------- + fid : file or str or Path + An open file object, or a string containing a filename. + sep : str + Separator between array items for text output. + If "" (empty), a binary file is written, equivalent to + ``file.write(a.tobytes())``. + format : str + Format string for text file output. + Each entry in the array is formatted to text by first converting + it to the closest Python type, and then using "format" % item. + + Notes + ----- + This is a convenience function for quick storage of array data. + Information on endianness and precision is lost, so this method is not a + good choice for files intended to archive data or transport data between + machines with different endianness. Some of these problems can be overcome + by outputting the data as text files, at the expense of speed and file + size. + + When fid is a file object, array contents are directly written to the + file, bypassing the file object's ``write`` method. As a result, tofile + cannot be used with files objects supporting compression (e.g., GzipFile) + or file-like objects that do not support ``fileno()`` (e.g., BytesIO). + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('tolist', + """ + a.tolist() + + Return the array as an ``a.ndim``-levels deep nested list of Python scalars. + + Return a copy of the array data as a (nested) Python list. + Data items are converted to the nearest compatible builtin Python type, via + the `~numpy.ndarray.item` function. + + If ``a.ndim`` is 0, then since the depth of the nested list is 0, it will + not be a list at all, but a simple Python scalar. + + Parameters + ---------- + none + + Returns + ------- + y : object, or list of object, or list of list of object, or ... + The possibly nested list of array elements. + + Notes + ----- + The array may be recreated via ``a = np.array(a.tolist())``, although this + may sometimes lose precision. + + Examples + -------- + For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``, + except that ``tolist`` changes numpy scalars to Python scalars: + + >>> import numpy as np + >>> a = np.uint32([1, 2]) + >>> a_list = list(a) + >>> a_list + [np.uint32(1), np.uint32(2)] + >>> type(a_list[0]) + + >>> a_tolist = a.tolist() + >>> a_tolist + [1, 2] + >>> type(a_tolist[0]) + + + Additionally, for a 2D array, ``tolist`` applies recursively: + + >>> a = np.array([[1, 2], [3, 4]]) + >>> list(a) + [array([1, 2]), array([3, 4])] + >>> a.tolist() + [[1, 2], [3, 4]] + + The base case for this recursion is a 0D array: + + >>> a = np.array(1) + >>> list(a) + Traceback (most recent call last): + ... + TypeError: iteration over a 0-d array + >>> a.tolist() + 1 + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('tobytes', """ + a.tobytes(order='C') + + Construct Python bytes containing the raw data bytes in the array. + + Constructs Python bytes showing a copy of the raw contents of + data memory. The bytes object is produced in C-order by default. + This behavior is controlled by the ``order`` parameter. + + Parameters + ---------- + order : {'C', 'F', 'A'}, optional + Controls the memory layout of the bytes object. 'C' means C-order, + 'F' means F-order, 'A' (short for *Any*) means 'F' if `a` is + Fortran contiguous, 'C' otherwise. Default is 'C'. + + Returns + ------- + s : bytes + Python bytes exhibiting a copy of `a`'s raw data. + + See also + -------- + frombuffer + Inverse of this operation, construct a 1-dimensional array from Python + bytes. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([[0, 1], [2, 3]], dtype='>> x.tobytes() + b'\\x00\\x00\\x01\\x00\\x02\\x00\\x03\\x00' + >>> x.tobytes('C') == x.tobytes() + True + >>> x.tobytes('F') + b'\\x00\\x00\\x02\\x00\\x01\\x00\\x03\\x00' + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('trace', + """ + a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) + + Return the sum along diagonals of the array. + + Refer to `numpy.trace` for full documentation. + + See Also + -------- + numpy.trace : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('transpose', + """ + a.transpose(*axes) + + Returns a view of the array with axes transposed. + + Refer to `numpy.transpose` for full documentation. + + Parameters + ---------- + axes : None, tuple of ints, or `n` ints + + * None or no argument: reverses the order of the axes. + + * tuple of ints: `i` in the `j`-th place in the tuple means that the + array's `i`-th axis becomes the transposed array's `j`-th axis. + + * `n` ints: same as an n-tuple of the same ints (this form is + intended simply as a "convenience" alternative to the tuple form). + + Returns + ------- + p : ndarray + View of the array with its axes suitably permuted. + + See Also + -------- + transpose : Equivalent function. + ndarray.T : Array property returning the array transposed. + ndarray.reshape : Give a new shape to an array without changing its data. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> a + array([[1, 2], + [3, 4]]) + >>> a.transpose() + array([[1, 3], + [2, 4]]) + >>> a.transpose((1, 0)) + array([[1, 3], + [2, 4]]) + >>> a.transpose(1, 0) + array([[1, 3], + [2, 4]]) + + >>> a = np.array([1, 2, 3, 4]) + >>> a + array([1, 2, 3, 4]) + >>> a.transpose() + array([1, 2, 3, 4]) + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('var', + """ + a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True) + + Returns the variance of the array elements, along given axis. + + Refer to `numpy.var` for full documentation. + + See Also + -------- + numpy.var : equivalent function + + """)) + + +add_newdoc('numpy._core.multiarray', 'ndarray', ('view', + """ + a.view([dtype][, type]) + + New view of array with the same data. + + .. note:: + Passing None for ``dtype`` is different from omitting the parameter, + since the former invokes ``dtype(None)`` which is an alias for + ``dtype('float64')``. + + Parameters + ---------- + dtype : data-type or ndarray sub-class, optional + Data-type descriptor of the returned view, e.g., float32 or int16. + Omitting it results in the view having the same data-type as `a`. + This argument can also be specified as an ndarray sub-class, which + then specifies the type of the returned object (this is equivalent to + setting the ``type`` parameter). + type : Python type, optional + Type of the returned view, e.g., ndarray or matrix. Again, omission + of the parameter results in type preservation. + + Notes + ----- + ``a.view()`` is used two different ways: + + ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view + of the array's memory with a different data-type. This can cause a + reinterpretation of the bytes of memory. + + ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just + returns an instance of `ndarray_subclass` that looks at the same array + (same shape, dtype, etc.) This does not cause a reinterpretation of the + memory. + + For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of + bytes per entry than the previous dtype (for example, converting a regular + array to a structured array), then the last axis of ``a`` must be + contiguous. This axis will be resized in the result. + + .. versionchanged:: 1.23.0 + Only the last axis needs to be contiguous. Previously, the entire array + had to be C-contiguous. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([(-1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) + + Viewing array data using a different type and dtype: + + >>> nonneg = np.dtype([("a", np.uint8), ("b", np.uint8)]) + >>> y = x.view(dtype=nonneg, type=np.recarray) + >>> x["a"] + array([-1], dtype=int8) + >>> y.a + array([255], dtype=uint8) + + Creating a view on a structured array so it can be used in calculations + + >>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)]) + >>> xv = x.view(dtype=np.int8).reshape(-1,2) + >>> xv + array([[1, 2], + [3, 4]], dtype=int8) + >>> xv.mean(0) + array([2., 3.]) + + Making changes to the view changes the underlying array + + >>> xv[0,1] = 20 + >>> x + array([(1, 20), (3, 4)], dtype=[('a', 'i1'), ('b', 'i1')]) + + Using a view to convert an array to a recarray: + + >>> z = x.view(np.recarray) + >>> z.a + array([1, 3], dtype=int8) + + Views share data: + + >>> x[0] = (9, 10) + >>> z[0] + np.record((9, 10), dtype=[('a', 'i1'), ('b', 'i1')]) + + Views that change the dtype size (bytes per entry) should normally be + avoided on arrays defined by slices, transposes, fortran-ordering, etc.: + + >>> x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int16) + >>> y = x[:, ::2] + >>> y + array([[1, 3], + [4, 6]], dtype=int16) + >>> y.view(dtype=[('width', np.int16), ('length', np.int16)]) + Traceback (most recent call last): + ... + ValueError: To change to a dtype of a different size, the last axis must be contiguous + >>> z = y.copy() + >>> z.view(dtype=[('width', np.int16), ('length', np.int16)]) + array([[(1, 3)], + [(4, 6)]], dtype=[('width', '>> x = np.arange(2 * 3 * 4, dtype=np.int8).reshape(2, 3, 4) + >>> x.transpose(1, 0, 2).view(np.int16) + array([[[ 256, 770], + [3340, 3854]], + + [[1284, 1798], + [4368, 4882]], + + [[2312, 2826], + [5396, 5910]]], dtype=int16) + + """)) + + +############################################################################## +# +# umath functions +# +############################################################################## + +add_newdoc('numpy._core.umath', 'frompyfunc', + """ + frompyfunc(func, /, nin, nout, *[, identity]) + + Takes an arbitrary Python function and returns a NumPy ufunc. + + Can be used, for example, to add broadcasting to a built-in Python + function (see Examples section). + + Parameters + ---------- + func : Python function object + An arbitrary Python function. + nin : int + The number of input arguments. + nout : int + The number of objects returned by `func`. + identity : object, optional + The value to use for the `~numpy.ufunc.identity` attribute of the resulting + object. If specified, this is equivalent to setting the underlying + C ``identity`` field to ``PyUFunc_IdentityValue``. + If omitted, the identity is set to ``PyUFunc_None``. Note that this is + _not_ equivalent to setting the identity to ``None``, which implies the + operation is reorderable. + + Returns + ------- + out : ufunc + Returns a NumPy universal function (``ufunc``) object. + + See Also + -------- + vectorize : Evaluates pyfunc over input arrays using broadcasting rules of numpy. + + Notes + ----- + The returned ufunc always returns PyObject arrays. + + Examples + -------- + Use frompyfunc to add broadcasting to the Python function ``oct``: + + >>> import numpy as np + >>> oct_array = np.frompyfunc(oct, 1, 1) + >>> oct_array(np.array((10, 30, 100))) + array(['0o12', '0o36', '0o144'], dtype=object) + >>> np.array((oct(10), oct(30), oct(100))) # for comparison + array(['0o12', '0o36', '0o144'], dtype='doc is NULL.) + + Parameters + ---------- + ufunc : numpy.ufunc + A ufunc whose current doc is NULL. + new_docstring : string + The new docstring for the ufunc. + + Notes + ----- + This method allocates memory for new_docstring on + the heap. Technically this creates a memory leak, since this + memory will not be reclaimed until the end of the program + even if the ufunc itself is removed. However this will only + be a problem if the user is repeatedly creating ufuncs with + no documentation, adding documentation via add_newdoc_ufunc, + and then throwing away the ufunc. + """) + +add_newdoc('numpy._core.multiarray', 'get_handler_name', + """ + get_handler_name(a: ndarray) -> str,None + + Return the name of the memory handler used by `a`. If not provided, return + the name of the memory handler that will be used to allocate data for the + next `ndarray` in this context. May return None if `a` does not own its + memory, in which case you can traverse ``a.base`` for a memory handler. + """) + +add_newdoc('numpy._core.multiarray', 'get_handler_version', + """ + get_handler_version(a: ndarray) -> int,None + + Return the version of the memory handler used by `a`. If not provided, + return the version of the memory handler that will be used to allocate data + for the next `ndarray` in this context. May return None if `a` does not own + its memory, in which case you can traverse ``a.base`` for a memory handler. + """) + +add_newdoc('numpy._core._multiarray_umath', '_array_converter', + """ + _array_converter(*array_likes) + + Helper to convert one or more objects to arrays. Integrates machinery + to deal with the ``result_type`` and ``__array_wrap__``. + + The reason for this is that e.g. ``result_type`` needs to convert to arrays + to find the ``dtype``. But converting to an array before calling + ``result_type`` would incorrectly "forget" whether it was a Python int, + float, or complex. + """) + +add_newdoc( + 'numpy._core._multiarray_umath', '_array_converter', ('scalar_input', + """ + A tuple which indicates for each input whether it was a scalar that + was coerced to a 0-D array (and was not already an array or something + converted via a protocol like ``__array__()``). + """)) + +add_newdoc('numpy._core._multiarray_umath', '_array_converter', ('as_arrays', + """ + as_arrays(/, subok=True, pyscalars="convert_if_no_array") + + Return the inputs as arrays or scalars. + + Parameters + ---------- + subok : True or False, optional + Whether array subclasses are preserved. + pyscalars : {"convert", "preserve", "convert_if_no_array"}, optional + To allow NEP 50 weak promotion later, it may be desirable to preserve + Python scalars. As default, these are preserved unless all inputs + are Python scalars. "convert" enforces an array return. + """)) + +add_newdoc('numpy._core._multiarray_umath', '_array_converter', ('result_type', + """result_type(/, extra_dtype=None, ensure_inexact=False) + + Find the ``result_type`` just as ``np.result_type`` would, but taking + into account that the original inputs (before converting to an array) may + have been Python scalars with weak promotion. + + Parameters + ---------- + extra_dtype : dtype instance or class + An additional DType or dtype instance to promote (e.g. could be used + to ensure the result precision is at least float32). + ensure_inexact : True or False + When ``True``, ensures a floating point (or complex) result replacing + the ``arr * 1.`` or ``result_type(..., 0.0)`` pattern. + """)) + +add_newdoc('numpy._core._multiarray_umath', '_array_converter', ('wrap', + """ + wrap(arr, /, to_scalar=None) + + Call ``__array_wrap__`` on ``arr`` if ``arr`` is not the same subclass + as the input the ``__array_wrap__`` method was retrieved from. + + Parameters + ---------- + arr : ndarray + The object to be wrapped. Normally an ndarray or subclass, + although for backward compatibility NumPy scalars are also accepted + (these will be converted to a NumPy array before being passed on to + the ``__array_wrap__`` method). + to_scalar : {True, False, None}, optional + When ``True`` will convert a 0-d array to a scalar via ``result[()]`` + (with a fast-path for non-subclasses). If ``False`` the result should + be an array-like (as ``__array_wrap__`` is free to return a non-array). + By default (``None``), a scalar is returned if all inputs were scalar. + """)) + + +add_newdoc('numpy._core.multiarray', '_get_madvise_hugepage', + """ + _get_madvise_hugepage() -> bool + + Get use of ``madvise (2)`` MADV_HUGEPAGE support when + allocating the array data. Returns the currently set value. + See `global_state` for more information. + """) + +add_newdoc('numpy._core.multiarray', '_set_madvise_hugepage', + """ + _set_madvise_hugepage(enabled: bool) -> bool + + Set or unset use of ``madvise (2)`` MADV_HUGEPAGE support when + allocating the array data. Returns the previously set value. + See `global_state` for more information. + """) + + +############################################################################## +# +# Documentation for ufunc attributes and methods +# +############################################################################## + + +############################################################################## +# +# ufunc object +# +############################################################################## + +add_newdoc('numpy._core', 'ufunc', + """ + Functions that operate element by element on whole arrays. + + To see the documentation for a specific ufunc, use `info`. For + example, ``np.info(np.sin)``. Because ufuncs are written in C + (for speed) and linked into Python with NumPy's ufunc facility, + Python's help() function finds this page whenever help() is called + on a ufunc. + + A detailed explanation of ufuncs can be found in the docs for :ref:`ufuncs`. + + **Calling ufuncs:** ``op(*x[, out], where=True, **kwargs)`` + + Apply `op` to the arguments `*x` elementwise, broadcasting the arguments. + + The broadcasting rules are: + + * Dimensions of length 1 may be prepended to either array. + * Arrays may be repeated along dimensions of length 1. + + Parameters + ---------- + *x : array_like + Input arrays. + out : ndarray, None, ..., or tuple of ndarray and None, optional + Location(s) into which the result(s) are stored. + If not provided or None, new array(s) are created by the ufunc. + If passed as a keyword argument, can be Ellipses (``out=...``) to + ensure an array is returned even if the result is 0-dimensional, + or a tuple with length equal to the number of outputs (where None + can be used for allocation by the ufunc). + + .. versionadded:: 2.3 + Support for ``out=...`` was added. + + where : array_like, optional + This condition is broadcast over the input. At locations where the + condition is True, the `out` array will be set to the ufunc result. + Elsewhere, the `out` array will retain its original value. + Note that if an uninitialized `out` array is created via the default + ``out=None``, locations within it where the condition is False will + remain uninitialized. + **kwargs + For other keyword-only arguments, see the :ref:`ufunc docs `. + + Returns + ------- + r : ndarray or tuple of ndarray + `r` will have the shape that the arrays in `x` broadcast to; if `out` is + provided, it will be returned. If not, `r` will be allocated and + may contain uninitialized values. If the function has more than one + output, then the result will be a tuple of arrays. + + """) + + +############################################################################## +# +# ufunc attributes +# +############################################################################## + +add_newdoc('numpy._core', 'ufunc', ('identity', + """ + The identity value. + + Data attribute containing the identity element for the ufunc, + if it has one. If it does not, the attribute value is None. + + Examples + -------- + >>> import numpy as np + >>> np.add.identity + 0 + >>> np.multiply.identity + 1 + >>> print(np.power.identity) + None + >>> print(np.exp.identity) + None + """)) + +add_newdoc('numpy._core', 'ufunc', ('nargs', + """ + The number of arguments. + + Data attribute containing the number of arguments the ufunc takes, including + optional ones. + + Notes + ----- + Typically this value will be one more than what you might expect + because all ufuncs take the optional "out" argument. + + Examples + -------- + >>> import numpy as np + >>> np.add.nargs + 3 + >>> np.multiply.nargs + 3 + >>> np.power.nargs + 3 + >>> np.exp.nargs + 2 + """)) + +add_newdoc('numpy._core', 'ufunc', ('nin', + """ + The number of inputs. + + Data attribute containing the number of arguments the ufunc treats as input. + + Examples + -------- + >>> import numpy as np + >>> np.add.nin + 2 + >>> np.multiply.nin + 2 + >>> np.power.nin + 2 + >>> np.exp.nin + 1 + """)) + +add_newdoc('numpy._core', 'ufunc', ('nout', + """ + The number of outputs. + + Data attribute containing the number of arguments the ufunc treats as output. + + Notes + ----- + Since all ufuncs can take output arguments, this will always be at least 1. + + Examples + -------- + >>> import numpy as np + >>> np.add.nout + 1 + >>> np.multiply.nout + 1 + >>> np.power.nout + 1 + >>> np.exp.nout + 1 + + """)) + +add_newdoc('numpy._core', 'ufunc', ('ntypes', + """ + The number of types. + + The number of numerical NumPy types - of which there are 18 total - on which + the ufunc can operate. + + See Also + -------- + numpy.ufunc.types + + Examples + -------- + >>> import numpy as np + >>> np.add.ntypes + 22 + >>> np.multiply.ntypes + 23 + >>> np.power.ntypes + 21 + >>> np.exp.ntypes + 10 + >>> np.remainder.ntypes + 16 + + """)) + +add_newdoc('numpy._core', 'ufunc', ('types', + """ + Returns a list with types grouped input->output. + + Data attribute listing the data-type "Domain-Range" groupings the ufunc can + deliver. The data-types are given using the character codes. + + See Also + -------- + numpy.ufunc.ntypes + + Examples + -------- + >>> import numpy as np + >>> np.add.types + ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', ... + + >>> np.power.types + ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', ... + + >>> np.exp.types + ['e->e', 'f->f', 'd->d', 'f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O'] + + >>> np.remainder.types + ['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', ... + + """)) + +add_newdoc('numpy._core', 'ufunc', ('signature', + """ + Definition of the core elements a generalized ufunc operates on. + + The signature determines how the dimensions of each input/output array + are split into core and loop dimensions: + + 1. Each dimension in the signature is matched to a dimension of the + corresponding passed-in array, starting from the end of the shape tuple. + 2. Core dimensions assigned to the same label in the signature must have + exactly matching sizes, no broadcasting is performed. + 3. The core dimensions are removed from all inputs and the remaining + dimensions are broadcast together, defining the loop dimensions. + + Notes + ----- + Generalized ufuncs are used internally in many linalg functions, and in + the testing suite; the examples below are taken from these. + For ufuncs that operate on scalars, the signature is None, which is + equivalent to '()' for every argument. + + Examples + -------- + >>> import numpy as np + >>> np.linalg._umath_linalg.det.signature + '(m,m)->()' + >>> np.matmul.signature + '(n?,k),(k,m?)->(n?,m?)' + >>> np.add.signature is None + True # equivalent to '(),()->()' + """)) + +############################################################################## +# +# ufunc methods +# +############################################################################## + +add_newdoc('numpy._core', 'ufunc', ('reduce', + """ + reduce(array, axis=0, dtype=None, out=None, keepdims=False, initial=, where=True) + + Reduces `array`'s dimension by one, by applying ufunc along one axis. + + Let :math:`array.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then + :math:`ufunc.reduce(array, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` = + the result of iterating `j` over :math:`range(N_i)`, cumulatively applying + ufunc to each :math:`array[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`. + For a one-dimensional array, reduce produces results equivalent to: + :: + + r = op.identity # op = ufunc + for i in range(len(A)): + r = op(r, A[i]) + return r + + For example, add.reduce() is equivalent to sum(). + + Parameters + ---------- + array : array_like + The array to act on. + axis : None or int or tuple of ints, optional + Axis or axes along which a reduction is performed. + The default (`axis` = 0) is perform a reduction over the first + dimension of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. + + If this is None, a reduction is performed over all the axes. + If this is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. + + For operations which are either not commutative or not associative, + doing a reduction over multiple axes is not well-defined. The + ufuncs do not currently raise an exception in this case, but will + likely do so in the future. + dtype : data-type code, optional + The data type used to perform the operation. Defaults to that of + ``out`` if given, and the data type of ``array`` otherwise (though + upcast to conserve precision for some cases, such as + ``numpy.add.reduce`` for integer or boolean input). + out : ndarray, None, ..., or tuple of ndarray and None, optional + Location into which the result is stored. + If not provided or None, a freshly-allocated array is returned. + If passed as a keyword argument, can be Ellipses (``out=...``) to + ensure an array is returned even if the result is 0-dimensional + (which is useful especially for object dtype), or a 1-element tuple + (latter for consistency with ``ufunc.__call__``). + + .. versionadded:: 2.3 + Support for ``out=...`` was added. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `array`. + initial : scalar, optional + The value with which to start the reduction. + If the ufunc has no identity or the dtype is object, this defaults + to None - otherwise it defaults to ufunc.identity. + If ``None`` is given, the first element of the reduction is used, + and an error is thrown if the reduction is empty. + where : array_like of bool, optional + A boolean array which is broadcasted to match the dimensions + of `array`, and selects elements to include in the reduction. Note + that for ufuncs like ``minimum`` that do not have an identity + defined, one has to pass in also ``initial``. + + Returns + ------- + r : ndarray + The reduced array. If `out` was supplied, `r` is a reference to it. + + Examples + -------- + >>> import numpy as np + >>> np.multiply.reduce([2,3,5]) + 30 + + A multi-dimensional array example: + + >>> X = np.arange(8).reshape((2,2,2)) + >>> X + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> np.add.reduce(X, 0) + array([[ 4, 6], + [ 8, 10]]) + >>> np.add.reduce(X) # confirm: default axis value is 0 + array([[ 4, 6], + [ 8, 10]]) + >>> np.add.reduce(X, 1) + array([[ 2, 4], + [10, 12]]) + >>> np.add.reduce(X, 2) + array([[ 1, 5], + [ 9, 13]]) + + You can use the ``initial`` keyword argument to initialize the reduction + with a different value, and ``where`` to select specific elements to include: + + >>> np.add.reduce([10], initial=5) + 15 + >>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initial=10) + array([14., 14.]) + >>> a = np.array([10., np.nan, 10]) + >>> np.add.reduce(a, where=~np.isnan(a)) + 20.0 + + Allows reductions of empty arrays where they would normally fail, i.e. + for ufuncs without an identity. + + >>> np.minimum.reduce([], initial=np.inf) + inf + >>> np.minimum.reduce([[1., 2.], [3., 4.]], initial=10., where=[True, False]) + array([ 1., 10.]) + >>> np.minimum.reduce([]) + Traceback (most recent call last): + ... + ValueError: zero-size array to reduction operation minimum which has no identity + """)) + +add_newdoc('numpy._core', 'ufunc', ('accumulate', + """ + accumulate(array, axis=0, dtype=None, out=None) + + Accumulate the result of applying the operator to all elements. + + For a one-dimensional array, accumulate produces results equivalent to:: + + r = np.empty(len(A)) + t = op.identity # op = the ufunc being applied to A's elements + for i in range(len(A)): + t = op(t, A[i]) + r[i] = t + return r + + For example, add.accumulate() is equivalent to np.cumsum(). + + For a multi-dimensional array, accumulate is applied along only one + axis (axis zero by default; see Examples below) so repeated use is + necessary if one wants to accumulate over multiple axes. + + Parameters + ---------- + array : array_like + The array to act on. + axis : int, optional + The axis along which to apply the accumulation; default is zero. + dtype : data-type code, optional + The data-type used to represent the intermediate results. Defaults + to the data-type of the output array if such is provided, or the + data-type of the input array if no output array is provided. + out : ndarray, None, or tuple of ndarray and None, optional + Location into which the result is stored. + If not provided or None, a freshly-allocated array is returned. + For consistency with ``ufunc.__call__``, if passed as a keyword + argument, can be Ellipses (``out=...``, which has the same effect + as None as an array is always returned), or a 1-element tuple. + + Returns + ------- + r : ndarray + The accumulated values. If `out` was supplied, `r` is a reference to + `out`. + + Examples + -------- + 1-D array examples: + + >>> import numpy as np + >>> np.add.accumulate([2, 3, 5]) + array([ 2, 5, 10]) + >>> np.multiply.accumulate([2, 3, 5]) + array([ 2, 6, 30]) + + 2-D array examples: + + >>> I = np.eye(2) + >>> I + array([[1., 0.], + [0., 1.]]) + + Accumulate along axis 0 (rows), down columns: + + >>> np.add.accumulate(I, 0) + array([[1., 0.], + [1., 1.]]) + >>> np.add.accumulate(I) # no axis specified = axis zero + array([[1., 0.], + [1., 1.]]) + + Accumulate along axis 1 (columns), through rows: + + >>> np.add.accumulate(I, 1) + array([[1., 1.], + [0., 1.]]) + + """)) + +add_newdoc('numpy._core', 'ufunc', ('reduceat', + """ + reduceat(array, indices, axis=0, dtype=None, out=None) + + Performs a (local) reduce with specified slices over a single axis. + + For i in ``range(len(indices))``, `reduceat` computes + ``ufunc.reduce(array[indices[i]:indices[i+1]])``, which becomes the i-th + generalized "row" parallel to `axis` in the final result (i.e., in a + 2-D array, for example, if `axis = 0`, it becomes the i-th row, but if + `axis = 1`, it becomes the i-th column). There are three exceptions to this: + + * when ``i = len(indices) - 1`` (so for the last index), + ``indices[i+1] = array.shape[axis]``. + * if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is + simply ``array[indices[i]]``. + * if ``indices[i] >= len(array)`` or ``indices[i] < 0``, an error is raised. + + The shape of the output depends on the size of `indices`, and may be + larger than `array` (this happens if ``len(indices) > array.shape[axis]``). + + Parameters + ---------- + array : array_like + The array to act on. + indices : array_like + Paired indices, comma separated (not colon), specifying slices to + reduce. + axis : int, optional + The axis along which to apply the reduceat. + dtype : data-type code, optional + The data type used to perform the operation. Defaults to that of + ``out`` if given, and the data type of ``array`` otherwise (though + upcast to conserve precision for some cases, such as + ``numpy.add.reduce`` for integer or boolean input). + out : ndarray, None, or tuple of ndarray and None, optional + Location into which the result is stored. + If not provided or None, a freshly-allocated array is returned. + For consistency with ``ufunc.__call__``, if passed as a keyword + argument, can be Ellipses (``out=...``, which has the same effect + as None as an array is always returned), or a 1-element tuple. + + Returns + ------- + r : ndarray + The reduced values. If `out` was supplied, `r` is a reference to + `out`. + + Notes + ----- + A descriptive example: + + If `array` is 1-D, the function `ufunc.accumulate(array)` is the same as + ``ufunc.reduceat(array, indices)[::2]`` where `indices` is + ``range(len(array) - 1)`` with a zero placed + in every other element: + ``indices = zeros(2 * len(array) - 1)``, + ``indices[1::2] = range(1, len(array))``. + + Don't be fooled by this attribute's name: `reduceat(array)` is not + necessarily smaller than `array`. + + Examples + -------- + To take the running sum of four successive values: + + >>> import numpy as np + >>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2] + array([ 6, 10, 14, 18]) + + A 2-D example: + + >>> x = np.linspace(0, 15, 16).reshape(4,4) + >>> x + array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + + :: + + # reduce such that the result has the following five rows: + # [row1 + row2 + row3] + # [row4] + # [row2] + # [row3] + # [row1 + row2 + row3 + row4] + + >>> np.add.reduceat(x, [0, 3, 1, 2, 0]) + array([[12., 15., 18., 21.], + [12., 13., 14., 15.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [24., 28., 32., 36.]]) + + :: + + # reduce such that result has the following two columns: + # [col1 * col2 * col3, col4] + + >>> np.multiply.reduceat(x, [0, 3], 1) + array([[ 0., 3.], + [ 120., 7.], + [ 720., 11.], + [2184., 15.]]) + + """)) + +add_newdoc('numpy._core', 'ufunc', ('outer', + r""" + outer(A, B, /, **kwargs) + + Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`. + + Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of + ``op.outer(A, B)`` is an array of dimension M + N such that: + + .. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] = + op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}]) + + For `A` and `B` one-dimensional, this is equivalent to:: + + r = empty(len(A),len(B)) + for i in range(len(A)): + for j in range(len(B)): + r[i,j] = op(A[i], B[j]) # op = ufunc in question + + Parameters + ---------- + A : array_like + First array + B : array_like + Second array + kwargs : any + Arguments to pass on to the ufunc. Typically `dtype` or `out`. + See `ufunc` for a comprehensive overview of all available arguments. + + Returns + ------- + r : ndarray + Output array + + See Also + -------- + numpy.outer : A less powerful version of ``np.multiply.outer`` + that `ravel`\ s all inputs to 1D. This exists + primarily for compatibility with old code. + + tensordot : ``np.tensordot(a, b, axes=((), ()))`` and + ``np.multiply.outer(a, b)`` behave same for all + dimensions of a and b. + + Examples + -------- + >>> np.multiply.outer([1, 2, 3], [4, 5, 6]) + array([[ 4, 5, 6], + [ 8, 10, 12], + [12, 15, 18]]) + + A multi-dimensional example: + + >>> A = np.array([[1, 2, 3], [4, 5, 6]]) + >>> A.shape + (2, 3) + >>> B = np.array([[1, 2, 3, 4]]) + >>> B.shape + (1, 4) + >>> C = np.multiply.outer(A, B) + >>> C.shape; C + (2, 3, 1, 4) + array([[[[ 1, 2, 3, 4]], + [[ 2, 4, 6, 8]], + [[ 3, 6, 9, 12]]], + [[[ 4, 8, 12, 16]], + [[ 5, 10, 15, 20]], + [[ 6, 12, 18, 24]]]]) + + """)) + +add_newdoc('numpy._core', 'ufunc', ('at', + """ + at(a, indices, b=None, /) + + Performs unbuffered in place operation on operand 'a' for elements + specified by 'indices'. For addition ufunc, this method is equivalent to + ``a[indices] += b``, except that results are accumulated for elements that + are indexed more than once. For example, ``a[[0,0]] += 1`` will only + increment the first element once because of buffering, whereas + ``add.at(a, [0,0], 1)`` will increment the first element twice. + + Parameters + ---------- + a : array_like + The array to perform in place operation on. + indices : array_like or tuple + Array like index object or slice object for indexing into first + operand. If first operand has multiple dimensions, indices can be a + tuple of array like index objects or slice objects. + b : array_like + Second operand for ufuncs requiring two operands. Operand must be + broadcastable over first operand after indexing or slicing. + + Examples + -------- + Set items 0 and 1 to their negative values: + + >>> import numpy as np + >>> a = np.array([1, 2, 3, 4]) + >>> np.negative.at(a, [0, 1]) + >>> a + array([-1, -2, 3, 4]) + + Increment items 0 and 1, and increment item 2 twice: + + >>> a = np.array([1, 2, 3, 4]) + >>> np.add.at(a, [0, 1, 2, 2], 1) + >>> a + array([2, 3, 5, 4]) + + Add items 0 and 1 in first array to second array, + and store results in first array: + + >>> a = np.array([1, 2, 3, 4]) + >>> b = np.array([1, 2]) + >>> np.add.at(a, [0, 1], b) + >>> a + array([2, 4, 3, 4]) + + """)) + +add_newdoc('numpy._core', 'ufunc', ('resolve_dtypes', + """ + resolve_dtypes(dtypes, *, signature=None, casting=None, reduction=False) + + Find the dtypes NumPy will use for the operation. Both input and + output dtypes are returned and may differ from those provided. + + .. note:: + + This function always applies NEP 50 rules since it is not provided + any actual values. The Python types ``int``, ``float``, and + ``complex`` thus behave weak and should be passed for "untyped" + Python input. + + Parameters + ---------- + dtypes : tuple of dtypes, None, or literal int, float, complex + The input dtypes for each operand. Output operands can be + None, indicating that the dtype must be found. + signature : tuple of DTypes or None, optional + If given, enforces exact DType (classes) of the specific operand. + The ufunc ``dtype`` argument is equivalent to passing a tuple with + only output dtypes set. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + The casting mode when casting is necessary. This is identical to + the ufunc call casting modes. + reduction : boolean + If given, the resolution assumes a reduce operation is happening + which slightly changes the promotion and type resolution rules. + `dtypes` is usually something like ``(None, np.dtype("i2"), None)`` + for reductions (first input is also the output). + + .. note:: + + The default casting mode is "same_kind", however, as of + NumPy 1.24, NumPy uses "unsafe" for reductions. + + Returns + ------- + dtypes : tuple of dtypes + The dtypes which NumPy would use for the calculation. Note that + dtypes may not match the passed in ones (casting is necessary). + + + Examples + -------- + This API requires passing dtypes, define them for convenience: + + >>> import numpy as np + >>> int32 = np.dtype("int32") + >>> float32 = np.dtype("float32") + + The typical ufunc call does not pass an output dtype. `numpy.add` has two + inputs and one output, so leave the output as ``None`` (not provided): + + >>> np.add.resolve_dtypes((int32, float32, None)) + (dtype('float64'), dtype('float64'), dtype('float64')) + + The loop found uses "float64" for all operands (including the output), the + first input would be cast. + + ``resolve_dtypes`` supports "weak" handling for Python scalars by passing + ``int``, ``float``, or ``complex``: + + >>> np.add.resolve_dtypes((float32, float, None)) + (dtype('float32'), dtype('float32'), dtype('float32')) + + Where the Python ``float`` behaves similar to a Python value ``0.0`` + in a ufunc call. (See :ref:`NEP 50 ` for details.) + + """)) + +add_newdoc('numpy._core', 'ufunc', ('_resolve_dtypes_and_context', + """ + _resolve_dtypes_and_context(dtypes, *, signature=None, casting=None, reduction=False) + + See `numpy.ufunc.resolve_dtypes` for parameter information. This + function is considered *unstable*. You may use it, but the returned + information is NumPy version specific and expected to change. + Large API/ABI changes are not expected, but a new NumPy version is + expected to require updating code using this functionality. + + This function is designed to be used in conjunction with + `numpy.ufunc._get_strided_loop`. The calls are split to mirror the C API + and allow future improvements. + + Returns + ------- + dtypes : tuple of dtypes + call_info : + PyCapsule with all necessary information to get access to low level + C calls. See `numpy.ufunc._get_strided_loop` for more information. + + """)) + +add_newdoc('numpy._core', 'ufunc', ('_get_strided_loop', + """ + _get_strided_loop(call_info, /, *, fixed_strides=None) + + This function fills in the ``call_info`` capsule to include all + information necessary to call the low-level strided loop from NumPy. + + See notes for more information. + + Parameters + ---------- + call_info : PyCapsule + The PyCapsule returned by `numpy.ufunc._resolve_dtypes_and_context`. + fixed_strides : tuple of int or None, optional + A tuple with fixed byte strides of all input arrays. NumPy may use + this information to find specialized loops, so any call must follow + the given stride. Use ``None`` to indicate that the stride is not + known (or not fixed) for all calls. + + Notes + ----- + Together with `numpy.ufunc._resolve_dtypes_and_context` this function + gives low-level access to the NumPy ufunc loops. + The first function does general preparation and returns the required + information. It returns this as a C capsule with the version specific + name ``numpy_1.24_ufunc_call_info``. + The NumPy 1.24 ufunc call info capsule has the following layout:: + + typedef struct { + PyArrayMethod_StridedLoop *strided_loop; + PyArrayMethod_Context *context; + NpyAuxData *auxdata; + + /* Flag information (expected to change) */ + npy_bool requires_pyapi; /* GIL is required by loop */ + + /* Loop doesn't set FPE flags; if not set check FPE flags */ + npy_bool no_floatingpoint_errors; + } ufunc_call_info; + + Note that the first call only fills in the ``context``. The call to + ``_get_strided_loop`` fills in all other data. The main thing to note is + that the new-style loops return 0 on success, -1 on failure. They are + passed context as new first input and ``auxdata`` as (replaced) last. + + Only the ``strided_loop``signature is considered guaranteed stable + for NumPy bug-fix releases. All other API is tied to the experimental + API versioning. + + The reason for the split call is that cast information is required to + decide what the fixed-strides will be. + + NumPy ties the lifetime of the ``auxdata`` information to the capsule. + + """)) + + +############################################################################## +# +# Documentation for dtype attributes and methods +# +############################################################################## + +############################################################################## +# +# dtype object +# +############################################################################## + +add_newdoc('numpy._core.multiarray', 'dtype', + """ + dtype(dtype, align=False, copy=False, [metadata]) + + Create a data type object. + + A numpy array is homogeneous, and contains elements described by a + dtype object. A dtype object can be constructed from different + combinations of fundamental numeric types. + + Parameters + ---------- + dtype + Object to be converted to a data type object. + align : bool, optional + Add padding to the fields to match what a C compiler would output + for a similar C-struct. Can be ``True`` only if `obj` is a dictionary + or a comma-separated string. If a struct dtype is being created, + this also sets a sticky alignment flag ``isalignedstruct``. + copy : bool, optional + Make a new copy of the data-type object. If ``False``, the result + may just be a reference to a built-in data-type object. + metadata : dict, optional + An optional dictionary with dtype metadata. + + See also + -------- + result_type + + Examples + -------- + Using array-scalar type: + + >>> import numpy as np + >>> np.dtype(np.int16) + dtype('int16') + + Structured type, one field name 'f1', containing int16: + + >>> np.dtype([('f1', np.int16)]) + dtype([('f1', '>> np.dtype([('f1', [('f1', np.int16)])]) + dtype([('f1', [('f1', '>> np.dtype([('f1', np.uint64), ('f2', np.int32)]) + dtype([('f1', '>> np.dtype([('a','f8'),('b','S10')]) + dtype([('a', '>> np.dtype("i4, (2,3)f8") + dtype([('f0', '>> np.dtype([('hello',(np.int64,3)),('world',np.void,10)]) + dtype([('hello', '>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)})) + dtype((numpy.int16, [('x', 'i1'), ('y', 'i1')])) + + Using dictionaries. Two fields named 'gender' and 'age': + + >>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]}) + dtype([('gender', 'S1'), ('age', 'u1')]) + + Offsets in bytes, here 0 and 25: + + >>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)}) + dtype([('surname', 'S25'), ('age', 'u1')]) + + """) + +############################################################################## +# +# dtype attributes +# +############################################################################## + +add_newdoc('numpy._core.multiarray', 'dtype', ('alignment', + """ + The required alignment (bytes) of this data-type according to the compiler. + + More information is available in the C-API section of the manual. + + Examples + -------- + + >>> import numpy as np + >>> x = np.dtype('i4') + >>> x.alignment + 4 + + >>> x = np.dtype(float) + >>> x.alignment + 8 + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('byteorder', + """ + A character indicating the byte-order of this data-type object. + + One of: + + === ============== + '=' native + '<' little-endian + '>' big-endian + '|' not applicable + === ============== + + All built-in data-type objects have byteorder either '=' or '|'. + + Examples + -------- + + >>> import numpy as np + >>> dt = np.dtype('i2') + >>> dt.byteorder + '=' + >>> # endian is not relevant for 8 bit numbers + >>> np.dtype('i1').byteorder + '|' + >>> # or ASCII strings + >>> np.dtype('S2').byteorder + '|' + >>> # Even if specific code is given, and it is native + >>> # '=' is the byteorder + >>> import sys + >>> sys_is_le = sys.byteorder == 'little' + >>> native_code = '<' if sys_is_le else '>' + >>> swapped_code = '>' if sys_is_le else '<' + >>> dt = np.dtype(native_code + 'i2') + >>> dt.byteorder + '=' + >>> # Swapped code shows up as itself + >>> dt = np.dtype(swapped_code + 'i2') + >>> dt.byteorder == swapped_code + True + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('char', + """A unique character code for each of the 21 different built-in types. + + Examples + -------- + + >>> import numpy as np + >>> x = np.dtype(float) + >>> x.char + 'd' + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('descr', + """ + `__array_interface__` description of the data-type. + + The format is that required by the 'descr' key in the + `__array_interface__` attribute. + + Warning: This attribute exists specifically for `__array_interface__`, + and passing it directly to `numpy.dtype` will not accurately reconstruct + some dtypes (e.g., scalar and subarray dtypes). + + Examples + -------- + + >>> import numpy as np + >>> x = np.dtype(float) + >>> x.descr + [('', '>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + >>> dt.descr + [('name', '>> import numpy as np + >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + >>> print(dt.fields) + {'name': (dtype('|S16'), 0), 'grades': (dtype(('float64',(2,))), 16)} + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('flags', + """ + Bit-flags describing how this data type is to be interpreted. + + Bit-masks are in ``numpy._core.multiarray`` as the constants + `ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`, + `NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation + of these flags is in C-API documentation; they are largely useful + for user-defined data-types. + + The following example demonstrates that operations on this particular + dtype requires Python C-API. + + Examples + -------- + + >>> import numpy as np + >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)]) + >>> x.flags + 16 + >>> np._core.multiarray.NEEDS_PYAPI + 16 + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('hasobject', + """ + Boolean indicating whether this dtype contains any reference-counted + objects in any fields or sub-dtypes. + + Recall that what is actually in the ndarray memory representing + the Python object is the memory address of that object (a pointer). + Special handling may be required, and this attribute is useful for + distinguishing data types that may contain arbitrary Python objects + and data-types that won't. + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('isbuiltin', + """ + Integer indicating how this dtype relates to the built-in dtypes. + + Read-only. + + = ======================================================================== + 0 if this is a structured array type, with fields + 1 if this is a dtype compiled into numpy (such as ints, floats etc) + 2 if the dtype is for a user-defined numpy type + A user-defined type uses the numpy C-API machinery to extend + numpy to handle a new array type. See + :ref:`user.user-defined-data-types` in the NumPy manual. + = ======================================================================== + + Examples + -------- + + >>> import numpy as np + >>> dt = np.dtype('i2') + >>> dt.isbuiltin + 1 + >>> dt = np.dtype('f8') + >>> dt.isbuiltin + 1 + >>> dt = np.dtype([('field1', 'f8')]) + >>> dt.isbuiltin + 0 + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('isnative', + """ + Boolean indicating whether the byte order of this dtype is native + to the platform. + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('isalignedstruct', + """ + Boolean indicating whether the dtype is a struct which maintains + field alignment. This flag is sticky, so when combining multiple + structs together, it is preserved and produces new dtypes which + are also aligned. + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('itemsize', + """ + The element size of this data-type object. + + For 18 of the 21 types this number is fixed by the data-type. + For the flexible data-types, this number can be anything. + + Examples + -------- + + >>> import numpy as np + >>> arr = np.array([[1, 2], [3, 4]]) + >>> arr.dtype + dtype('int64') + >>> arr.itemsize + 8 + + >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + >>> dt.itemsize + 80 + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('kind', + """ + A character code (one of 'biufcmMOSTUV') identifying the general kind of data. + + = ====================== + b boolean + i signed integer + u unsigned integer + f floating-point + c complex floating-point + m timedelta + M datetime + O object + S (byte-)string + T string (StringDType) + U Unicode + V void + = ====================== + + Examples + -------- + + >>> import numpy as np + >>> dt = np.dtype('i4') + >>> dt.kind + 'i' + >>> dt = np.dtype('f8') + >>> dt.kind + 'f' + >>> dt = np.dtype([('field1', 'f8')]) + >>> dt.kind + 'V' + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('metadata', + """ + Either ``None`` or a readonly dictionary of metadata (mappingproxy). + + The metadata field can be set using any dictionary at data-type + creation. NumPy currently has no uniform approach to propagating + metadata; although some array operations preserve it, there is no + guarantee that others will. + + .. warning:: + + Although used in certain projects, this feature was long undocumented + and is not well supported. Some aspects of metadata propagation + are expected to change in the future. + + Examples + -------- + + >>> import numpy as np + >>> dt = np.dtype(float, metadata={"key": "value"}) + >>> dt.metadata["key"] + 'value' + >>> arr = np.array([1, 2, 3], dtype=dt) + >>> arr.dtype.metadata + mappingproxy({'key': 'value'}) + + Adding arrays with identical datatypes currently preserves the metadata: + + >>> (arr + arr).dtype.metadata + mappingproxy({'key': 'value'}) + + If the arrays have different dtype metadata, the first one wins: + + >>> dt2 = np.dtype(float, metadata={"key2": "value2"}) + >>> arr2 = np.array([3, 2, 1], dtype=dt2) + >>> print((arr + arr2).dtype.metadata) + {'key': 'value'} + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('name', + """ + A bit-width name for this data-type. + + Un-sized flexible data-type objects do not have this attribute. + + Examples + -------- + + >>> import numpy as np + >>> x = np.dtype(float) + >>> x.name + 'float64' + >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)]) + >>> x.name + 'void640' + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('names', + """ + Ordered list of field names, or ``None`` if there are no fields. + + The names are ordered according to increasing byte offset. This can be + used, for example, to walk through all of the named fields in offset order. + + Examples + -------- + >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + >>> dt.names + ('name', 'grades') + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('num', + """ + A unique number for each of the 21 different built-in types. + + These are roughly ordered from least-to-most precision. + + Examples + -------- + + >>> import numpy as np + >>> dt = np.dtype(str) + >>> dt.num + 19 + + >>> dt = np.dtype(float) + >>> dt.num + 12 + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('shape', + """ + Shape tuple of the sub-array if this data type describes a sub-array, + and ``()`` otherwise. + + Examples + -------- + + >>> import numpy as np + >>> dt = np.dtype(('i4', 4)) + >>> dt.shape + (4,) + + >>> dt = np.dtype(('i4', (2, 3))) + >>> dt.shape + (2, 3) + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('ndim', + """ + Number of dimensions of the sub-array if this data type describes a + sub-array, and ``0`` otherwise. + + Examples + -------- + >>> import numpy as np + >>> x = np.dtype(float) + >>> x.ndim + 0 + + >>> x = np.dtype((float, 8)) + >>> x.ndim + 1 + + >>> x = np.dtype(('i4', (3, 4))) + >>> x.ndim + 2 + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('str', + """The array-protocol typestring of this data-type object.""")) + +add_newdoc('numpy._core.multiarray', 'dtype', ('subdtype', + """ + Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and + None otherwise. + + The *shape* is the fixed shape of the sub-array described by this + data type, and *item_dtype* the data type of the array. + + If a field whose dtype object has this attribute is retrieved, + then the extra dimensions implied by *shape* are tacked on to + the end of the retrieved array. + + See Also + -------- + dtype.base + + Examples + -------- + >>> import numpy as np + >>> x = np.dtype('8f') + >>> x.subdtype + (dtype('float32'), (8,)) + + >>> x = np.dtype('i2') + >>> x.subdtype + >>> + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('base', + """ + Returns dtype for the base element of the subarrays, + regardless of their dimension or shape. + + See Also + -------- + dtype.subdtype + + Examples + -------- + >>> import numpy as np + >>> x = np.dtype('8f') + >>> x.base + dtype('float32') + + >>> x = np.dtype('i2') + >>> x.base + dtype('int16') + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('type', + """The type object used to instantiate a scalar of this data-type.""")) + +############################################################################## +# +# dtype methods +# +############################################################################## + +add_newdoc('numpy._core.multiarray', 'dtype', ('newbyteorder', + """ + newbyteorder(new_order='S', /) + + Return a new dtype with a different byte order. + + Changes are also made in all fields and sub-arrays of the data type. + + Parameters + ---------- + new_order : string, optional + Byte order to force; a value from the byte order specifications + below. The default value ('S') results in swapping the current + byte order. `new_order` codes can be any of: + + * 'S' - swap dtype from current to opposite endian + * {'<', 'little'} - little endian + * {'>', 'big'} - big endian + * {'=', 'native'} - native order + * {'|', 'I'} - ignore (no change to byte order) + + Returns + ------- + new_dtype : dtype + New dtype object with the given change to the byte order. + + Notes + ----- + Changes are also made in all fields and sub-arrays of the data type. + + Examples + -------- + >>> import sys + >>> sys_is_le = sys.byteorder == 'little' + >>> native_code = '<' if sys_is_le else '>' + >>> swapped_code = '>' if sys_is_le else '<' + >>> import numpy as np + >>> native_dt = np.dtype(native_code+'i2') + >>> swapped_dt = np.dtype(swapped_code+'i2') + >>> native_dt.newbyteorder('S') == swapped_dt + True + >>> native_dt.newbyteorder() == swapped_dt + True + >>> native_dt == swapped_dt.newbyteorder('S') + True + >>> native_dt == swapped_dt.newbyteorder('=') + True + >>> native_dt == swapped_dt.newbyteorder('N') + True + >>> native_dt == native_dt.newbyteorder('|') + True + >>> np.dtype('>> np.dtype('>> np.dtype('>i2') == native_dt.newbyteorder('>') + True + >>> np.dtype('>i2') == native_dt.newbyteorder('B') + True + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('__class_getitem__', + """ + __class_getitem__(item, /) + + Return a parametrized wrapper around the `~numpy.dtype` type. + + .. versionadded:: 1.22 + + Returns + ------- + alias : types.GenericAlias + A parametrized `~numpy.dtype` type. + + Examples + -------- + >>> import numpy as np + + >>> np.dtype[np.int64] + numpy.dtype[numpy.int64] + + See Also + -------- + :pep:`585` : Type hinting generics in standard collections. + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('__ge__', + """ + __ge__(value, /) + + Return ``self >= value``. + + Equivalent to ``np.can_cast(value, self, casting="safe")``. + + See Also + -------- + can_cast : Returns True if cast between data types can occur according to + the casting rule. + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('__le__', + """ + __le__(value, /) + + Return ``self <= value``. + + Equivalent to ``np.can_cast(self, value, casting="safe")``. + + See Also + -------- + can_cast : Returns True if cast between data types can occur according to + the casting rule. + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('__gt__', + """ + __ge__(value, /) + + Return ``self > value``. + + Equivalent to + ``self != value and np.can_cast(value, self, casting="safe")``. + + See Also + -------- + can_cast : Returns True if cast between data types can occur according to + the casting rule. + + """)) + +add_newdoc('numpy._core.multiarray', 'dtype', ('__lt__', + """ + __lt__(value, /) + + Return ``self < value``. + + Equivalent to + ``self != value and np.can_cast(self, value, casting="safe")``. + + See Also + -------- + can_cast : Returns True if cast between data types can occur according to + the casting rule. + + """)) + +############################################################################## +# +# Datetime-related Methods +# +############################################################################## + +add_newdoc('numpy._core.multiarray', 'busdaycalendar', + """ + busdaycalendar(weekmask='1111100', holidays=None) + + A business day calendar object that efficiently stores information + defining valid days for the busday family of functions. + + The default valid days are Monday through Friday ("business days"). + A busdaycalendar object can be specified with any set of weekly + valid days, plus an optional "holiday" dates that always will be invalid. + + Once a busdaycalendar object is created, the weekmask and holidays + cannot be modified. + + Parameters + ---------- + weekmask : str or array_like of bool, optional + A seven-element array indicating which of Monday through Sunday are + valid days. May be specified as a length-seven list or array, like + [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string + like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for + weekdays, optionally separated by white space. Valid abbreviations + are: Mon Tue Wed Thu Fri Sat Sun + holidays : array_like of datetime64[D], optional + An array of dates to consider as invalid dates, no matter which + weekday they fall upon. Holiday dates may be specified in any + order, and NaT (not-a-time) dates are ignored. This list is + saved in a normalized form that is suited for fast calculations + of valid days. + + Returns + ------- + out : busdaycalendar + A business day calendar object containing the specified + weekmask and holidays values. + + See Also + -------- + is_busday : Returns a boolean array indicating valid days. + busday_offset : Applies an offset counted in valid days. + busday_count : Counts how many valid days are in a half-open date range. + + Attributes + ---------- + weekmask : (copy) seven-element array of bool + holidays : (copy) sorted array of datetime64[D] + + Notes + ----- + Once a busdaycalendar object is created, you cannot modify the + weekmask or holidays. The attributes return copies of internal data. + + Examples + -------- + >>> import numpy as np + >>> # Some important days in July + ... bdd = np.busdaycalendar( + ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) + >>> # Default is Monday to Friday weekdays + ... bdd.weekmask + array([ True, True, True, True, True, False, False]) + >>> # Any holidays already on the weekend are removed + ... bdd.holidays + array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]') + """) + +add_newdoc('numpy._core.multiarray', 'busdaycalendar', ('weekmask', + """A copy of the seven-element boolean mask indicating valid days.""")) + +add_newdoc('numpy._core.multiarray', 'busdaycalendar', ('holidays', + """A copy of the holiday array indicating additional invalid days.""")) + +add_newdoc('numpy._core.multiarray', 'normalize_axis_index', + """ + normalize_axis_index(axis, ndim, msg_prefix=None) + + Normalizes an axis index, `axis`, such that is a valid positive index into + the shape of array with `ndim` dimensions. Raises an AxisError with an + appropriate message if this is not possible. + + Used internally by all axis-checking logic. + + Parameters + ---------- + axis : int + The un-normalized index of the axis. Can be negative + ndim : int + The number of dimensions of the array that `axis` should be normalized + against + msg_prefix : str + A prefix to put before the message, typically the name of the argument + + Returns + ------- + normalized_axis : int + The normalized axis index, such that `0 <= normalized_axis < ndim` + + Raises + ------ + AxisError + If the axis index is invalid, when `-ndim <= axis < ndim` is false. + + Examples + -------- + >>> import numpy as np + >>> from numpy.lib.array_utils import normalize_axis_index + >>> normalize_axis_index(0, ndim=3) + 0 + >>> normalize_axis_index(1, ndim=3) + 1 + >>> normalize_axis_index(-1, ndim=3) + 2 + + >>> normalize_axis_index(3, ndim=3) + Traceback (most recent call last): + ... + numpy.exceptions.AxisError: axis 3 is out of bounds for array ... + >>> normalize_axis_index(-4, ndim=3, msg_prefix='axes_arg') + Traceback (most recent call last): + ... + numpy.exceptions.AxisError: axes_arg: axis -4 is out of bounds ... + """) + +add_newdoc('numpy._core.multiarray', 'datetime_data', + """ + datetime_data(dtype, /) + + Get information about the step size of a date or time type. + + The returned tuple can be passed as the second argument of `numpy.datetime64` and + `numpy.timedelta64`. + + Parameters + ---------- + dtype : dtype + The dtype object, which must be a `datetime64` or `timedelta64` type. + + Returns + ------- + unit : str + The :ref:`datetime unit ` on which this dtype + is based. + count : int + The number of base units in a step. + + Examples + -------- + >>> import numpy as np + >>> dt_25s = np.dtype('timedelta64[25s]') + >>> np.datetime_data(dt_25s) + ('s', 25) + >>> np.array(10, dt_25s).astype('timedelta64[s]') + array(250, dtype='timedelta64[s]') + + The result can be used to construct a datetime that uses the same units + as a timedelta + + >>> np.datetime64('2010', np.datetime_data(dt_25s)) + np.datetime64('2010-01-01T00:00:00','25s') + """) + + +############################################################################## +# +# Documentation for `generic` attributes and methods +# +############################################################################## + +add_newdoc('numpy._core.numerictypes', 'generic', + """ + Base class for numpy scalar types. + + Class from which most (all?) numpy scalar types are derived. For + consistency, exposes the same API as `ndarray`, despite many + consequent attributes being either "get-only," or completely irrelevant. + This is the class from which it is strongly suggested users should derive + custom scalar types. + + """) + +# Attributes + +def refer_to_array_attribute(attr, method=True): + docstring = """ + Scalar {} identical to the corresponding array attribute. + + Please see `ndarray.{}`. + """ + + return attr, docstring.format("method" if method else "attribute", attr) + + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('T', method=False)) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('base', method=False)) + +add_newdoc('numpy._core.numerictypes', 'generic', ('data', + """Pointer to start of data.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('dtype', + """Get array data-descriptor.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('flags', + """The integer value of flags.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('flat', + """A 1-D view of the scalar.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('imag', + """The imaginary part of the scalar.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('itemsize', + """The length of one element in bytes.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('ndim', + """The number of array dimensions.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('real', + """The real part of the scalar.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('shape', + """Tuple of array dimensions.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('size', + """The number of elements in the gentype.""")) + +add_newdoc('numpy._core.numerictypes', 'generic', ('strides', + """Tuple of bytes steps in each dimension.""")) + +# Methods + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('all')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('any')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('argmax')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('argmin')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('argsort')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('astype')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('byteswap')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('choose')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('clip')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('compress')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('conjugate')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('copy')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('cumprod')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('cumsum')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('diagonal')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('dump')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('dumps')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('fill')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('flatten')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('getfield')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('item')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('max')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('mean')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('min')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('nonzero')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('prod')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('put')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('ravel')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('repeat')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('reshape')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('resize')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('round')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('searchsorted')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('setfield')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('setflags')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('sort')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('squeeze')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('std')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('sum')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('swapaxes')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('take')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('tofile')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('tolist')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('tostring')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('trace')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('transpose')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('var')) + +add_newdoc('numpy._core.numerictypes', 'generic', + refer_to_array_attribute('view')) + +add_newdoc('numpy._core.numerictypes', 'number', ('__class_getitem__', + """ + __class_getitem__(item, /) + + Return a parametrized wrapper around the `~numpy.number` type. + + .. versionadded:: 1.22 + + Returns + ------- + alias : types.GenericAlias + A parametrized `~numpy.number` type. + + Examples + -------- + >>> from typing import Any + >>> import numpy as np + + >>> np.signedinteger[Any] + numpy.signedinteger[typing.Any] + + See Also + -------- + :pep:`585` : Type hinting generics in standard collections. + + """)) + +############################################################################## +# +# Documentation for scalar type abstract base classes in type hierarchy +# +############################################################################## + + +add_newdoc('numpy._core.numerictypes', 'number', + """ + Abstract base class of all numeric scalar types. + + """) + +add_newdoc('numpy._core.numerictypes', 'integer', + """ + Abstract base class of all integer scalar types. + + """) + +add_newdoc('numpy._core.numerictypes', 'signedinteger', + """ + Abstract base class of all signed integer scalar types. + + """) + +add_newdoc('numpy._core.numerictypes', 'unsignedinteger', + """ + Abstract base class of all unsigned integer scalar types. + + """) + +add_newdoc('numpy._core.numerictypes', 'inexact', + """ + Abstract base class of all numeric scalar types with a (potentially) + inexact representation of the values in its range, such as + floating-point numbers. + + """) + +add_newdoc('numpy._core.numerictypes', 'floating', + """ + Abstract base class of all floating-point scalar types. + + """) + +add_newdoc('numpy._core.numerictypes', 'complexfloating', + """ + Abstract base class of all complex number scalar types that are made up of + floating-point numbers. + + """) + +add_newdoc('numpy._core.numerictypes', 'flexible', + """ + Abstract base class of all scalar types without predefined length. + The actual size of these types depends on the specific `numpy.dtype` + instantiation. + + """) + +add_newdoc('numpy._core.numerictypes', 'character', + """ + Abstract base class of all character string scalar types. + + """) + +add_newdoc('numpy._core.multiarray', 'StringDType', + """ + StringDType(*, na_object=np._NoValue, coerce=True) + + Create a StringDType instance. + + StringDType can be used to store UTF-8 encoded variable-width strings in + a NumPy array. + + Parameters + ---------- + na_object : object, optional + Object used to represent missing data. If unset, the array will not + use a missing data sentinel. + coerce : bool, optional + Whether or not items in an array-like passed to an array creation + function that are neither a str or str subtype should be coerced to + str. Defaults to True. If set to False, creating a StringDType + array from an array-like containing entries that are not already + strings will raise an error. + + Examples + -------- + + >>> import numpy as np + + >>> from numpy.dtypes import StringDType + >>> np.array(["hello", "world"], dtype=StringDType()) + array(["hello", "world"], dtype=StringDType()) + + >>> arr = np.array(["hello", None, "world"], + ... dtype=StringDType(na_object=None)) + >>> arr + array(["hello", None, "world"], dtype=StringDType(na_object=None)) + >>> arr[1] is None + True + + >>> arr = np.array(["hello", np.nan, "world"], + ... dtype=StringDType(na_object=np.nan)) + >>> np.isnan(arr) + array([False, True, False]) + + >>> np.array([1.2, object(), "hello world"], + ... dtype=StringDType(coerce=False)) + Traceback (most recent call last): + ... + ValueError: StringDType only allows string data when string coercion is disabled. + + >>> np.array(["hello", "world"], dtype=StringDType(coerce=True)) + array(["hello", "world"], dtype=StringDType(coerce=True)) + """) diff --git a/python/numpy/_core/_add_newdocs.pyi b/python/numpy/_core/_add_newdocs.pyi new file mode 100644 index 000000000..b23c3b1ad --- /dev/null +++ b/python/numpy/_core/_add_newdocs.pyi @@ -0,0 +1,3 @@ +from .overrides import get_array_function_like_doc as get_array_function_like_doc + +def refer_to_array_attribute(attr: str, method: bool = True) -> tuple[str, str]: ... diff --git a/python/numpy/_core/_add_newdocs_scalars.py b/python/numpy/_core/_add_newdocs_scalars.py new file mode 100644 index 000000000..96170d80c --- /dev/null +++ b/python/numpy/_core/_add_newdocs_scalars.py @@ -0,0 +1,390 @@ +""" +This file is separate from ``_add_newdocs.py`` so that it can be mocked out by +our sphinx ``conf.py`` during doc builds, where we want to avoid showing +platform-dependent information. +""" +import os +import sys + +from numpy._core import dtype +from numpy._core import numerictypes as _numerictypes +from numpy._core.function_base import add_newdoc + +############################################################################## +# +# Documentation for concrete scalar classes +# +############################################################################## + +def numeric_type_aliases(aliases): + def type_aliases_gen(): + for alias, doc in aliases: + try: + alias_type = getattr(_numerictypes, alias) + except AttributeError: + # The set of aliases that actually exist varies between platforms + pass + else: + yield (alias_type, alias, doc) + return list(type_aliases_gen()) + + +possible_aliases = numeric_type_aliases([ + ('int8', '8-bit signed integer (``-128`` to ``127``)'), + ('int16', '16-bit signed integer (``-32_768`` to ``32_767``)'), + ('int32', '32-bit signed integer (``-2_147_483_648`` to ``2_147_483_647``)'), + ('int64', '64-bit signed integer (``-9_223_372_036_854_775_808`` to ``9_223_372_036_854_775_807``)'), + ('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'), + ('uint8', '8-bit unsigned integer (``0`` to ``255``)'), + ('uint16', '16-bit unsigned integer (``0`` to ``65_535``)'), + ('uint32', '32-bit unsigned integer (``0`` to ``4_294_967_295``)'), + ('uint64', '64-bit unsigned integer (``0`` to ``18_446_744_073_709_551_615``)'), + ('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'), + ('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'), + ('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'), + ('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'), + ('float96', '96-bit extended-precision floating-point number type'), + ('float128', '128-bit extended-precision floating-point number type'), + ('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'), + ('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'), + ('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'), + ('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'), + ]) + + +def _get_platform_and_machine(): + try: + system, _, _, _, machine = os.uname() + except AttributeError: + system = sys.platform + if system == 'win32': + machine = os.environ.get('PROCESSOR_ARCHITEW6432', '') \ + or os.environ.get('PROCESSOR_ARCHITECTURE', '') + else: + machine = 'unknown' + return system, machine + + +_system, _machine = _get_platform_and_machine() +_doc_alias_string = f":Alias on this platform ({_system} {_machine}):" + + +def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): + # note: `:field: value` is rST syntax which renders as field lists. + o = getattr(_numerictypes, obj) + + character_code = dtype(o).char + canonical_name_doc = "" if obj == o.__name__ else \ + f":Canonical name: `numpy.{obj}`\n " + if fixed_aliases: + alias_doc = ''.join(f":Alias: `numpy.{alias}`\n " + for alias in fixed_aliases) + else: + alias_doc = '' + alias_doc += ''.join(f"{_doc_alias_string} `numpy.{alias}`: {doc}.\n " + for (alias_type, alias, doc) in possible_aliases if alias_type is o) + + docstring = f""" + {doc.strip()} + + :Character code: ``'{character_code}'`` + {canonical_name_doc}{alias_doc} + """ + + add_newdoc('numpy._core.numerictypes', obj, docstring) + + +_bool_docstring = ( + """ + Boolean type (True or False), stored as a byte. + + .. warning:: + + The :class:`bool` type is not a subclass of the :class:`int_` type + (the :class:`bool` is not even a number type). This is different + than Python's default implementation of :class:`bool` as a + sub-class of :class:`int`. + """ +) + +add_newdoc_for_scalar_type('bool', [], _bool_docstring) + +add_newdoc_for_scalar_type('bool_', [], _bool_docstring) + +add_newdoc_for_scalar_type('byte', [], + """ + Signed integer type, compatible with C ``char``. + """) + +add_newdoc_for_scalar_type('short', [], + """ + Signed integer type, compatible with C ``short``. + """) + +add_newdoc_for_scalar_type('intc', [], + """ + Signed integer type, compatible with C ``int``. + """) + +# TODO: These docs probably need an if to highlight the default rather than +# the C-types (and be correct). +add_newdoc_for_scalar_type('int_', [], + """ + Default signed integer type, 64bit on 64bit systems and 32bit on 32bit + systems. + """) + +add_newdoc_for_scalar_type('longlong', [], + """ + Signed integer type, compatible with C ``long long``. + """) + +add_newdoc_for_scalar_type('ubyte', [], + """ + Unsigned integer type, compatible with C ``unsigned char``. + """) + +add_newdoc_for_scalar_type('ushort', [], + """ + Unsigned integer type, compatible with C ``unsigned short``. + """) + +add_newdoc_for_scalar_type('uintc', [], + """ + Unsigned integer type, compatible with C ``unsigned int``. + """) + +add_newdoc_for_scalar_type('uint', [], + """ + Unsigned signed integer type, 64bit on 64bit systems and 32bit on 32bit + systems. + """) + +add_newdoc_for_scalar_type('ulonglong', [], + """ + Signed integer type, compatible with C ``unsigned long long``. + """) + +add_newdoc_for_scalar_type('half', [], + """ + Half-precision floating-point number type. + """) + +add_newdoc_for_scalar_type('single', [], + """ + Single-precision floating-point number type, compatible with C ``float``. + """) + +add_newdoc_for_scalar_type('double', [], + """ + Double-precision floating-point number type, compatible with Python + :class:`float` and C ``double``. + """) + +add_newdoc_for_scalar_type('longdouble', [], + """ + Extended-precision floating-point number type, compatible with C + ``long double`` but not necessarily with IEEE 754 quadruple-precision. + """) + +add_newdoc_for_scalar_type('csingle', [], + """ + Complex number type composed of two single-precision floating-point + numbers. + """) + +add_newdoc_for_scalar_type('cdouble', [], + """ + Complex number type composed of two double-precision floating-point + numbers, compatible with Python :class:`complex`. + """) + +add_newdoc_for_scalar_type('clongdouble', [], + """ + Complex number type composed of two extended-precision floating-point + numbers. + """) + +add_newdoc_for_scalar_type('object_', [], + """ + Any Python object. + """) + +add_newdoc_for_scalar_type('str_', [], + r""" + A unicode string. + + This type strips trailing null codepoints. + + >>> s = np.str_("abc\x00") + >>> s + 'abc' + + Unlike the builtin :class:`str`, this supports the + :ref:`python:bufferobjects`, exposing its contents as UCS4: + + >>> m = memoryview(np.str_("abc")) + >>> m.format + '3w' + >>> m.tobytes() + b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00' + """) + +add_newdoc_for_scalar_type('bytes_', [], + r""" + A byte string. + + When used in arrays, this type strips trailing null bytes. + """) + +add_newdoc_for_scalar_type('void', [], + r""" + np.void(length_or_data, /, dtype=None) + + Create a new structured or unstructured void scalar. + + Parameters + ---------- + length_or_data : int, array-like, bytes-like, object + One of multiple meanings (see notes). The length or + bytes data of an unstructured void. Or alternatively, + the data to be stored in the new scalar when `dtype` + is provided. + This can be an array-like, in which case an array may + be returned. + dtype : dtype, optional + If provided the dtype of the new scalar. This dtype must + be "void" dtype (i.e. a structured or unstructured void, + see also :ref:`defining-structured-types`). + + .. versionadded:: 1.24 + + Notes + ----- + For historical reasons and because void scalars can represent both + arbitrary byte data and structured dtypes, the void constructor + has three calling conventions: + + 1. ``np.void(5)`` creates a ``dtype="V5"`` scalar filled with five + ``\0`` bytes. The 5 can be a Python or NumPy integer. + 2. ``np.void(b"bytes-like")`` creates a void scalar from the byte string. + The dtype itemsize will match the byte string length, here ``"V10"``. + 3. When a ``dtype=`` is passed the call is roughly the same as an + array creation. However, a void scalar rather than array is returned. + + Please see the examples which show all three different conventions. + + Examples + -------- + >>> np.void(5) + np.void(b'\x00\x00\x00\x00\x00') + >>> np.void(b'abcd') + np.void(b'\x61\x62\x63\x64') + >>> np.void((3.2, b'eggs'), dtype="d,S5") + np.void((3.2, b'eggs'), dtype=[('f0', '>> np.void(3, dtype=[('x', np.int8), ('y', np.int8)]) + np.void((3, 3), dtype=[('x', 'i1'), ('y', 'i1')]) + + """) + +add_newdoc_for_scalar_type('datetime64', [], + """ + If created from a 64-bit integer, it represents an offset from + ``1970-01-01T00:00:00``. + If created from string, the string can be in ISO 8601 date + or datetime format. + + When parsing a string to create a datetime object, if the string contains + a trailing timezone (A 'Z' or a timezone offset), the timezone will be + dropped and a User Warning is given. + + Datetime64 objects should be considered to be UTC and therefore have an + offset of +0000. + + >>> np.datetime64(10, 'Y') + np.datetime64('1980') + >>> np.datetime64('1980', 'Y') + np.datetime64('1980') + >>> np.datetime64(10, 'D') + np.datetime64('1970-01-11') + + See :ref:`arrays.datetime` for more information. + """) + +add_newdoc_for_scalar_type('timedelta64', [], + """ + A timedelta stored as a 64-bit integer. + + See :ref:`arrays.datetime` for more information. + """) + +add_newdoc('numpy._core.numerictypes', "integer", ('is_integer', + """ + integer.is_integer() -> bool + + Return ``True`` if the number is finite with integral value. + + .. versionadded:: 1.22 + + Examples + -------- + >>> import numpy as np + >>> np.int64(-2).is_integer() + True + >>> np.uint32(5).is_integer() + True + """)) + +# TODO: work out how to put this on the base class, np.floating +for float_name in ('half', 'single', 'double', 'longdouble'): + add_newdoc('numpy._core.numerictypes', float_name, ('as_integer_ratio', + f""" + {float_name}.as_integer_ratio() -> (int, int) + + Return a pair of integers, whose ratio is exactly equal to the original + floating point number, and with a positive denominator. + Raise `OverflowError` on infinities and a `ValueError` on NaNs. + + >>> np.{float_name}(10.0).as_integer_ratio() + (10, 1) + >>> np.{float_name}(0.0).as_integer_ratio() + (0, 1) + >>> np.{float_name}(-.25).as_integer_ratio() + (-1, 4) + """)) + + add_newdoc('numpy._core.numerictypes', float_name, ('is_integer', + f""" + {float_name}.is_integer() -> bool + + Return ``True`` if the floating point number is finite with integral + value, and ``False`` otherwise. + + .. versionadded:: 1.22 + + Examples + -------- + >>> np.{float_name}(-2.0).is_integer() + True + >>> np.{float_name}(3.2).is_integer() + False + """)) + +for int_name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', + 'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64'): + # Add negative examples for signed cases by checking typecode + add_newdoc('numpy._core.numerictypes', int_name, ('bit_count', + f""" + {int_name}.bit_count() -> int + + Computes the number of 1-bits in the absolute value of the input. + Analogous to the builtin `int.bit_count` or ``popcount`` in C++. + + Examples + -------- + >>> np.{int_name}(127).bit_count() + 7""" + + (f""" + >>> np.{int_name}(-127).bit_count() + 7 + """ if dtype(int_name).char.islower() else ""))) diff --git a/python/numpy/_core/_add_newdocs_scalars.pyi b/python/numpy/_core/_add_newdocs_scalars.pyi new file mode 100644 index 000000000..4a06c9b07 --- /dev/null +++ b/python/numpy/_core/_add_newdocs_scalars.pyi @@ -0,0 +1,16 @@ +from collections.abc import Iterable +from typing import Final + +import numpy as np + +possible_aliases: Final[list[tuple[type[np.number], str, str]]] = ... +_system: Final[str] = ... +_machine: Final[str] = ... +_doc_alias_string: Final[str] = ... +_bool_docstring: Final[str] = ... +int_name: str = ... +float_name: str = ... + +def numeric_type_aliases(aliases: list[tuple[str, str]]) -> list[tuple[type[np.number], str, str]]: ... +def add_newdoc_for_scalar_type(obj: str, fixed_aliases: Iterable[str], doc: str) -> None: ... +def _get_platform_and_machine() -> tuple[str, str]: ... diff --git a/python/numpy/_core/_asarray.py b/python/numpy/_core/_asarray.py new file mode 100644 index 000000000..613c5cf57 --- /dev/null +++ b/python/numpy/_core/_asarray.py @@ -0,0 +1,134 @@ +""" +Functions in the ``as*array`` family that promote array-likes into arrays. + +`require` fits this category despite its name not matching this pattern. +""" +from .multiarray import array, asanyarray +from .overrides import ( + array_function_dispatch, + finalize_array_function_like, + set_module, +) + +__all__ = ["require"] + + +POSSIBLE_FLAGS = { + 'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C', + 'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F', + 'A': 'A', 'ALIGNED': 'A', + 'W': 'W', 'WRITEABLE': 'W', + 'O': 'O', 'OWNDATA': 'O', + 'E': 'E', 'ENSUREARRAY': 'E' +} + + +@finalize_array_function_like +@set_module('numpy') +def require(a, dtype=None, requirements=None, *, like=None): + """ + Return an ndarray of the provided type that satisfies requirements. + + This function is useful to be sure that an array with the correct flags + is returned for passing to compiled code (perhaps through ctypes). + + Parameters + ---------- + a : array_like + The object to be converted to a type-and-requirement-satisfying array. + dtype : data-type + The required data-type. If None preserve the current dtype. If your + application requires the data to be in native byteorder, include + a byteorder specification as a part of the dtype specification. + requirements : str or sequence of str + The requirements list can be any of the following + + * 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array + * 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array + * 'ALIGNED' ('A') - ensure a data-type aligned array + * 'WRITEABLE' ('W') - ensure a writable array + * 'OWNDATA' ('O') - ensure an array that owns its own data + * 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array with specified requirements and type if given. + + See Also + -------- + asarray : Convert input to an ndarray. + asanyarray : Convert to an ndarray, but pass through ndarray subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfortranarray : Convert input to an ndarray with column-major + memory order. + ndarray.flags : Information about the memory layout of the array. + + Notes + ----- + The returned array will be guaranteed to have the listed requirements + by making a copy if needed. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6).reshape(2,3) + >>> x.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : False + OWNDATA : False + WRITEABLE : True + ALIGNED : True + WRITEBACKIFCOPY : False + + >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F']) + >>> y.flags + C_CONTIGUOUS : False + F_CONTIGUOUS : True + OWNDATA : True + WRITEABLE : True + ALIGNED : True + WRITEBACKIFCOPY : False + + """ + if like is not None: + return _require_with_like( + like, + a, + dtype=dtype, + requirements=requirements, + ) + + if not requirements: + return asanyarray(a, dtype=dtype) + + requirements = {POSSIBLE_FLAGS[x.upper()] for x in requirements} + + if 'E' in requirements: + requirements.remove('E') + subok = False + else: + subok = True + + order = 'A' + if requirements >= {'C', 'F'}: + raise ValueError('Cannot specify both "C" and "F" order') + elif 'F' in requirements: + order = 'F' + requirements.remove('F') + elif 'C' in requirements: + order = 'C' + requirements.remove('C') + + arr = array(a, dtype=dtype, order=order, copy=None, subok=subok) + + for prop in requirements: + if not arr.flags[prop]: + return arr.copy(order) + return arr + + +_require_with_like = array_function_dispatch()(require) diff --git a/python/numpy/_core/_asarray.pyi b/python/numpy/_core/_asarray.pyi new file mode 100644 index 000000000..a4bee0048 --- /dev/null +++ b/python/numpy/_core/_asarray.pyi @@ -0,0 +1,41 @@ +from collections.abc import Iterable +from typing import Any, Literal, TypeAlias, TypeVar, overload + +from numpy._typing import DTypeLike, NDArray, _SupportsArrayFunc + +_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) + +_Requirements: TypeAlias = Literal[ + "C", "C_CONTIGUOUS", "CONTIGUOUS", + "F", "F_CONTIGUOUS", "FORTRAN", + "A", "ALIGNED", + "W", "WRITEABLE", + "O", "OWNDATA" +] +_E: TypeAlias = Literal["E", "ENSUREARRAY"] +_RequirementsWithE: TypeAlias = _Requirements | _E + +@overload +def require( + a: _ArrayT, + dtype: None = ..., + requirements: _Requirements | Iterable[_Requirements] | None = ..., + *, + like: _SupportsArrayFunc = ... +) -> _ArrayT: ... +@overload +def require( + a: object, + dtype: DTypeLike = ..., + requirements: _E | Iterable[_RequirementsWithE] = ..., + *, + like: _SupportsArrayFunc = ... +) -> NDArray[Any]: ... +@overload +def require( + a: object, + dtype: DTypeLike = ..., + requirements: _Requirements | Iterable[_Requirements] | None = ..., + *, + like: _SupportsArrayFunc = ... +) -> NDArray[Any]: ... diff --git a/python/numpy/_core/_dtype.py b/python/numpy/_core/_dtype.py new file mode 100644 index 000000000..6a8a091b2 --- /dev/null +++ b/python/numpy/_core/_dtype.py @@ -0,0 +1,366 @@ +""" +A place for code to be called from the implementation of np.dtype + +String handling is much easier to do correctly in python. +""" +import numpy as np + +_kind_to_stem = { + 'u': 'uint', + 'i': 'int', + 'c': 'complex', + 'f': 'float', + 'b': 'bool', + 'V': 'void', + 'O': 'object', + 'M': 'datetime', + 'm': 'timedelta', + 'S': 'bytes', + 'U': 'str', +} + + +def _kind_name(dtype): + try: + return _kind_to_stem[dtype.kind] + except KeyError as e: + raise RuntimeError( + f"internal dtype error, unknown kind {dtype.kind!r}" + ) from None + + +def __str__(dtype): + if dtype.fields is not None: + return _struct_str(dtype, include_align=True) + elif dtype.subdtype: + return _subarray_str(dtype) + elif issubclass(dtype.type, np.flexible) or not dtype.isnative: + return dtype.str + else: + return dtype.name + + +def __repr__(dtype): + arg_str = _construction_repr(dtype, include_align=False) + if dtype.isalignedstruct: + arg_str = arg_str + ", align=True" + return f"dtype({arg_str})" + + +def _unpack_field(dtype, offset, title=None): + """ + Helper function to normalize the items in dtype.fields. + + Call as: + + dtype, offset, title = _unpack_field(*dtype.fields[name]) + """ + return dtype, offset, title + + +def _isunsized(dtype): + # PyDataType_ISUNSIZED + return dtype.itemsize == 0 + + +def _construction_repr(dtype, include_align=False, short=False): + """ + Creates a string repr of the dtype, excluding the 'dtype()' part + surrounding the object. This object may be a string, a list, or + a dict depending on the nature of the dtype. This + is the object passed as the first parameter to the dtype + constructor, and if no additional constructor parameters are + given, will reproduce the exact memory layout. + + Parameters + ---------- + short : bool + If true, this creates a shorter repr using 'kind' and 'itemsize', + instead of the longer type name. + + include_align : bool + If true, this includes the 'align=True' parameter + inside the struct dtype construction dict when needed. Use this flag + if you want a proper repr string without the 'dtype()' part around it. + + If false, this does not preserve the + 'align=True' parameter or sticky NPY_ALIGNED_STRUCT flag for + struct arrays like the regular repr does, because the 'align' + flag is not part of first dtype constructor parameter. This + mode is intended for a full 'repr', where the 'align=True' is + provided as the second parameter. + """ + if dtype.fields is not None: + return _struct_str(dtype, include_align=include_align) + elif dtype.subdtype: + return _subarray_str(dtype) + else: + return _scalar_str(dtype, short=short) + + +def _scalar_str(dtype, short): + byteorder = _byte_order_str(dtype) + + if dtype.type == np.bool: + if short: + return "'?'" + else: + return "'bool'" + + elif dtype.type == np.object_: + # The object reference may be different sizes on different + # platforms, so it should never include the itemsize here. + return "'O'" + + elif dtype.type == np.bytes_: + if _isunsized(dtype): + return "'S'" + else: + return "'S%d'" % dtype.itemsize + + elif dtype.type == np.str_: + if _isunsized(dtype): + return f"'{byteorder}U'" + else: + return "'%sU%d'" % (byteorder, dtype.itemsize / 4) + + elif dtype.type == str: + return "'T'" + + elif not type(dtype)._legacy: + return f"'{byteorder}{type(dtype).__name__}{dtype.itemsize * 8}'" + + # unlike the other types, subclasses of void are preserved - but + # historically the repr does not actually reveal the subclass + elif issubclass(dtype.type, np.void): + if _isunsized(dtype): + return "'V'" + else: + return "'V%d'" % dtype.itemsize + + elif dtype.type == np.datetime64: + return f"'{byteorder}M8{_datetime_metadata_str(dtype)}'" + + elif dtype.type == np.timedelta64: + return f"'{byteorder}m8{_datetime_metadata_str(dtype)}'" + + elif dtype.isbuiltin == 2: + return dtype.type.__name__ + + elif np.issubdtype(dtype, np.number): + # Short repr with endianness, like '' """ + # hack to obtain the native and swapped byte order characters + swapped = np.dtype(int).newbyteorder('S') + native = swapped.newbyteorder('S') + + byteorder = dtype.byteorder + if byteorder == '=': + return native.byteorder + if byteorder == 'S': + # TODO: this path can never be reached + return swapped.byteorder + elif byteorder == '|': + return '' + else: + return byteorder + + +def _datetime_metadata_str(dtype): + # TODO: this duplicates the C metastr_to_unicode functionality + unit, count = np.datetime_data(dtype) + if unit == 'generic': + return '' + elif count == 1: + return f'[{unit}]' + else: + return f'[{count}{unit}]' + + +def _struct_dict_str(dtype, includealignedflag): + # unpack the fields dictionary into ls + names = dtype.names + fld_dtypes = [] + offsets = [] + titles = [] + for name in names: + fld_dtype, offset, title = _unpack_field(*dtype.fields[name]) + fld_dtypes.append(fld_dtype) + offsets.append(offset) + titles.append(title) + + # Build up a string to make the dictionary + + if np._core.arrayprint._get_legacy_print_mode() <= 121: + colon = ":" + fieldsep = "," + else: + colon = ": " + fieldsep = ", " + + # First, the names + ret = "{'names'%s[" % colon + ret += fieldsep.join(repr(name) for name in names) + + # Second, the formats + ret += f"], 'formats'{colon}[" + ret += fieldsep.join( + _construction_repr(fld_dtype, short=True) for fld_dtype in fld_dtypes) + + # Third, the offsets + ret += f"], 'offsets'{colon}[" + ret += fieldsep.join("%d" % offset for offset in offsets) + + # Fourth, the titles + if any(title is not None for title in titles): + ret += f"], 'titles'{colon}[" + ret += fieldsep.join(repr(title) for title in titles) + + # Fifth, the itemsize + ret += "], 'itemsize'%s%d" % (colon, dtype.itemsize) + + if (includealignedflag and dtype.isalignedstruct): + # Finally, the aligned flag + ret += ", 'aligned'%sTrue}" % colon + else: + ret += "}" + + return ret + + +def _aligned_offset(offset, alignment): + # round up offset: + return - (-offset // alignment) * alignment + + +def _is_packed(dtype): + """ + Checks whether the structured data type in 'dtype' + has a simple layout, where all the fields are in order, + and follow each other with no alignment padding. + + When this returns true, the dtype can be reconstructed + from a list of the field names and dtypes with no additional + dtype parameters. + + Duplicates the C `is_dtype_struct_simple_unaligned_layout` function. + """ + align = dtype.isalignedstruct + max_alignment = 1 + total_offset = 0 + for name in dtype.names: + fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name]) + + if align: + total_offset = _aligned_offset(total_offset, fld_dtype.alignment) + max_alignment = max(max_alignment, fld_dtype.alignment) + + if fld_offset != total_offset: + return False + total_offset += fld_dtype.itemsize + + if align: + total_offset = _aligned_offset(total_offset, max_alignment) + + return total_offset == dtype.itemsize + + +def _struct_list_str(dtype): + items = [] + for name in dtype.names: + fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name]) + + item = "(" + if title is not None: + item += f"({title!r}, {name!r}), " + else: + item += f"{name!r}, " + # Special case subarray handling here + if fld_dtype.subdtype is not None: + base, shape = fld_dtype.subdtype + item += f"{_construction_repr(base, short=True)}, {shape}" + else: + item += _construction_repr(fld_dtype, short=True) + + item += ")" + items.append(item) + + return "[" + ", ".join(items) + "]" + + +def _struct_str(dtype, include_align): + # The list str representation can't include the 'align=' flag, + # so if it is requested and the struct has the aligned flag set, + # we must use the dict str instead. + if not (include_align and dtype.isalignedstruct) and _is_packed(dtype): + sub = _struct_list_str(dtype) + + else: + sub = _struct_dict_str(dtype, include_align) + + # If the data type isn't the default, void, show it + if dtype.type != np.void: + return f"({dtype.type.__module__}.{dtype.type.__name__}, {sub})" + else: + return sub + + +def _subarray_str(dtype): + base, shape = dtype.subdtype + return f"({_construction_repr(base, short=True)}, {shape})" + + +def _name_includes_bit_suffix(dtype): + if dtype.type == np.object_: + # pointer size varies by system, best to omit it + return False + elif dtype.type == np.bool: + # implied + return False + elif dtype.type is None: + return True + elif np.issubdtype(dtype, np.flexible) and _isunsized(dtype): + # unspecified + return False + else: + return True + + +def _name_get(dtype): + # provides dtype.name.__get__, documented as returning a "bit name" + + if dtype.isbuiltin == 2: + # user dtypes don't promise to do anything special + return dtype.type.__name__ + + if not type(dtype)._legacy: + name = type(dtype).__name__ + + elif issubclass(dtype.type, np.void): + # historically, void subclasses preserve their name, eg `record64` + name = dtype.type.__name__ + else: + name = _kind_name(dtype) + + # append bit counts + if _name_includes_bit_suffix(dtype): + name += f"{dtype.itemsize * 8}" + + # append metadata to datetimes + if dtype.type in (np.datetime64, np.timedelta64): + name += _datetime_metadata_str(dtype) + + return name diff --git a/python/numpy/_core/_dtype.pyi b/python/numpy/_core/_dtype.pyi new file mode 100644 index 000000000..6cdd77b22 --- /dev/null +++ b/python/numpy/_core/_dtype.pyi @@ -0,0 +1,58 @@ +from typing import Final, TypeAlias, TypedDict, overload, type_check_only +from typing import Literal as L + +from typing_extensions import ReadOnly, TypeVar + +import numpy as np + +### + +_T = TypeVar("_T") + +_Name: TypeAlias = L["uint", "int", "complex", "float", "bool", "void", "object", "datetime", "timedelta", "bytes", "str"] + +@type_check_only +class _KindToStemType(TypedDict): + u: ReadOnly[L["uint"]] + i: ReadOnly[L["int"]] + c: ReadOnly[L["complex"]] + f: ReadOnly[L["float"]] + b: ReadOnly[L["bool"]] + V: ReadOnly[L["void"]] + O: ReadOnly[L["object"]] + M: ReadOnly[L["datetime"]] + m: ReadOnly[L["timedelta"]] + S: ReadOnly[L["bytes"]] + U: ReadOnly[L["str"]] + +### + +_kind_to_stem: Final[_KindToStemType] = ... + +# +def _kind_name(dtype: np.dtype) -> _Name: ... +def __str__(dtype: np.dtype) -> str: ... +def __repr__(dtype: np.dtype) -> str: ... + +# +def _isunsized(dtype: np.dtype) -> bool: ... +def _is_packed(dtype: np.dtype) -> bool: ... +def _name_includes_bit_suffix(dtype: np.dtype) -> bool: ... + +# +def _construction_repr(dtype: np.dtype, include_align: bool = False, short: bool = False) -> str: ... +def _scalar_str(dtype: np.dtype, short: bool) -> str: ... +def _byte_order_str(dtype: np.dtype) -> str: ... +def _datetime_metadata_str(dtype: np.dtype) -> str: ... +def _struct_dict_str(dtype: np.dtype, includealignedflag: bool) -> str: ... +def _struct_list_str(dtype: np.dtype) -> str: ... +def _struct_str(dtype: np.dtype, include_align: bool) -> str: ... +def _subarray_str(dtype: np.dtype) -> str: ... +def _name_get(dtype: np.dtype) -> str: ... + +# +@overload +def _unpack_field(dtype: np.dtype, offset: int, title: _T) -> tuple[np.dtype, int, _T]: ... +@overload +def _unpack_field(dtype: np.dtype, offset: int, title: None = None) -> tuple[np.dtype, int, None]: ... +def _aligned_offset(offset: int, alignment: int) -> int: ... diff --git a/python/numpy/_core/_dtype_ctypes.py b/python/numpy/_core/_dtype_ctypes.py new file mode 100644 index 000000000..4de6df6db --- /dev/null +++ b/python/numpy/_core/_dtype_ctypes.py @@ -0,0 +1,120 @@ +""" +Conversion from ctypes to dtype. + +In an ideal world, we could achieve this through the PEP3118 buffer protocol, +something like:: + + def dtype_from_ctypes_type(t): + # needed to ensure that the shape of `t` is within memoryview.format + class DummyStruct(ctypes.Structure): + _fields_ = [('a', t)] + + # empty to avoid memory allocation + ctype_0 = (DummyStruct * 0)() + mv = memoryview(ctype_0) + + # convert the struct, and slice back out the field + return _dtype_from_pep3118(mv.format)['a'] + +Unfortunately, this fails because: + +* ctypes cannot handle length-0 arrays with PEP3118 (bpo-32782) +* PEP3118 cannot represent unions, but both numpy and ctypes can +* ctypes cannot handle big-endian structs with PEP3118 (bpo-32780) +""" + +# We delay-import ctypes for distributions that do not include it. +# While this module is not used unless the user passes in ctypes +# members, it is eagerly imported from numpy/_core/__init__.py. +import numpy as np + + +def _from_ctypes_array(t): + return np.dtype((dtype_from_ctypes_type(t._type_), (t._length_,))) + + +def _from_ctypes_structure(t): + for item in t._fields_: + if len(item) > 2: + raise TypeError( + "ctypes bitfields have no dtype equivalent") + + if hasattr(t, "_pack_"): + import ctypes + formats = [] + offsets = [] + names = [] + current_offset = 0 + for fname, ftyp in t._fields_: + names.append(fname) + formats.append(dtype_from_ctypes_type(ftyp)) + # Each type has a default offset, this is platform dependent + # for some types. + effective_pack = min(t._pack_, ctypes.alignment(ftyp)) + current_offset = ( + (current_offset + effective_pack - 1) // effective_pack + ) * effective_pack + offsets.append(current_offset) + current_offset += ctypes.sizeof(ftyp) + + return np.dtype({ + "formats": formats, + "offsets": offsets, + "names": names, + "itemsize": ctypes.sizeof(t)}) + else: + fields = [] + for fname, ftyp in t._fields_: + fields.append((fname, dtype_from_ctypes_type(ftyp))) + + # by default, ctypes structs are aligned + return np.dtype(fields, align=True) + + +def _from_ctypes_scalar(t): + """ + Return the dtype type with endianness included if it's the case + """ + if getattr(t, '__ctype_be__', None) is t: + return np.dtype('>' + t._type_) + elif getattr(t, '__ctype_le__', None) is t: + return np.dtype('<' + t._type_) + else: + return np.dtype(t._type_) + + +def _from_ctypes_union(t): + import ctypes + formats = [] + offsets = [] + names = [] + for fname, ftyp in t._fields_: + names.append(fname) + formats.append(dtype_from_ctypes_type(ftyp)) + offsets.append(0) # Union fields are offset to 0 + + return np.dtype({ + "formats": formats, + "offsets": offsets, + "names": names, + "itemsize": ctypes.sizeof(t)}) + + +def dtype_from_ctypes_type(t): + """ + Construct a dtype object from a ctypes type + """ + import _ctypes + if issubclass(t, _ctypes.Array): + return _from_ctypes_array(t) + elif issubclass(t, _ctypes._Pointer): + raise TypeError("ctypes pointers have no dtype equivalent") + elif issubclass(t, _ctypes.Structure): + return _from_ctypes_structure(t) + elif issubclass(t, _ctypes.Union): + return _from_ctypes_union(t) + elif isinstance(getattr(t, '_type_', None), str): + return _from_ctypes_scalar(t) + else: + raise NotImplementedError( + f"Unknown ctypes type {t.__name__}") diff --git a/python/numpy/_core/_dtype_ctypes.pyi b/python/numpy/_core/_dtype_ctypes.pyi new file mode 100644 index 000000000..69438a2c1 --- /dev/null +++ b/python/numpy/_core/_dtype_ctypes.pyi @@ -0,0 +1,83 @@ +import _ctypes +import ctypes as ct +from typing import Any, overload + +import numpy as np + +# +@overload +def dtype_from_ctypes_type(t: type[_ctypes.Array[Any] | _ctypes.Structure]) -> np.dtype[np.void]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_bool]) -> np.dtype[np.bool]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_int8 | ct.c_byte]) -> np.dtype[np.int8]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_uint8 | ct.c_ubyte]) -> np.dtype[np.uint8]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_int16 | ct.c_short]) -> np.dtype[np.int16]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_uint16 | ct.c_ushort]) -> np.dtype[np.uint16]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_int32 | ct.c_int]) -> np.dtype[np.int32]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_uint32 | ct.c_uint]) -> np.dtype[np.uint32]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_ssize_t | ct.c_long]) -> np.dtype[np.int32 | np.int64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_size_t | ct.c_ulong]) -> np.dtype[np.uint32 | np.uint64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_int64 | ct.c_longlong]) -> np.dtype[np.int64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_uint64 | ct.c_ulonglong]) -> np.dtype[np.uint64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_float]) -> np.dtype[np.float32]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_double]) -> np.dtype[np.float64]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_longdouble]) -> np.dtype[np.longdouble]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.c_char]) -> np.dtype[np.bytes_]: ... +@overload +def dtype_from_ctypes_type(t: type[ct.py_object[Any]]) -> np.dtype[np.object_]: ... + +# NOTE: the complex ctypes on python>=3.14 are not yet supported at runtim, see +# https://github.com/numpy/numpy/issues/28360 + +# +def _from_ctypes_array(t: type[_ctypes.Array[Any]]) -> np.dtype[np.void]: ... +def _from_ctypes_structure(t: type[_ctypes.Structure]) -> np.dtype[np.void]: ... +def _from_ctypes_union(t: type[_ctypes.Union]) -> np.dtype[np.void]: ... + +# keep in sync with `dtype_from_ctypes_type` (minus the first overload) +@overload +def _from_ctypes_scalar(t: type[ct.c_bool]) -> np.dtype[np.bool]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_int8 | ct.c_byte]) -> np.dtype[np.int8]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_uint8 | ct.c_ubyte]) -> np.dtype[np.uint8]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_int16 | ct.c_short]) -> np.dtype[np.int16]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_uint16 | ct.c_ushort]) -> np.dtype[np.uint16]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_int32 | ct.c_int]) -> np.dtype[np.int32]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_uint32 | ct.c_uint]) -> np.dtype[np.uint32]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_ssize_t | ct.c_long]) -> np.dtype[np.int32 | np.int64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_size_t | ct.c_ulong]) -> np.dtype[np.uint32 | np.uint64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_int64 | ct.c_longlong]) -> np.dtype[np.int64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_uint64 | ct.c_ulonglong]) -> np.dtype[np.uint64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_float]) -> np.dtype[np.float32]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_double]) -> np.dtype[np.float64]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_longdouble]) -> np.dtype[np.longdouble]: ... +@overload +def _from_ctypes_scalar(t: type[ct.c_char]) -> np.dtype[np.bytes_]: ... +@overload +def _from_ctypes_scalar(t: type[ct.py_object[Any]]) -> np.dtype[np.object_]: ... diff --git a/python/numpy/_core/_exceptions.py b/python/numpy/_core/_exceptions.py new file mode 100644 index 000000000..73b07d25e --- /dev/null +++ b/python/numpy/_core/_exceptions.py @@ -0,0 +1,162 @@ +""" +Various richly-typed exceptions, that also help us deal with string formatting +in python where it's easier. + +By putting the formatting in `__str__`, we also avoid paying the cost for +users who silence the exceptions. +""" + +def _unpack_tuple(tup): + if len(tup) == 1: + return tup[0] + else: + return tup + + +def _display_as_base(cls): + """ + A decorator that makes an exception class look like its base. + + We use this to hide subclasses that are implementation details - the user + should catch the base type, which is what the traceback will show them. + + Classes decorated with this decorator are subject to removal without a + deprecation warning. + """ + assert issubclass(cls, Exception) + cls.__name__ = cls.__base__.__name__ + return cls + + +class UFuncTypeError(TypeError): + """ Base class for all ufunc exceptions """ + def __init__(self, ufunc): + self.ufunc = ufunc + + +@_display_as_base +class _UFuncNoLoopError(UFuncTypeError): + """ Thrown when a ufunc loop cannot be found """ + def __init__(self, ufunc, dtypes): + super().__init__(ufunc) + self.dtypes = tuple(dtypes) + + def __str__(self): + return ( + f"ufunc {self.ufunc.__name__!r} did not contain a loop with signature " + f"matching types {_unpack_tuple(self.dtypes[:self.ufunc.nin])!r} " + f"-> {_unpack_tuple(self.dtypes[self.ufunc.nin:])!r}" + ) + + +@_display_as_base +class _UFuncBinaryResolutionError(_UFuncNoLoopError): + """ Thrown when a binary resolution fails """ + def __init__(self, ufunc, dtypes): + super().__init__(ufunc, dtypes) + assert len(self.dtypes) == 2 + + def __str__(self): + return ( + "ufunc {!r} cannot use operands with types {!r} and {!r}" + ).format( + self.ufunc.__name__, *self.dtypes + ) + + +@_display_as_base +class _UFuncCastingError(UFuncTypeError): + def __init__(self, ufunc, casting, from_, to): + super().__init__(ufunc) + self.casting = casting + self.from_ = from_ + self.to = to + + +@_display_as_base +class _UFuncInputCastingError(_UFuncCastingError): + """ Thrown when a ufunc input cannot be casted """ + def __init__(self, ufunc, casting, from_, to, i): + super().__init__(ufunc, casting, from_, to) + self.in_i = i + + def __str__(self): + # only show the number if more than one input exists + i_str = f"{self.in_i} " if self.ufunc.nin != 1 else "" + return ( + f"Cannot cast ufunc {self.ufunc.__name__!r} input {i_str}from " + f"{self.from_!r} to {self.to!r} with casting rule {self.casting!r}" + ) + + +@_display_as_base +class _UFuncOutputCastingError(_UFuncCastingError): + """ Thrown when a ufunc output cannot be casted """ + def __init__(self, ufunc, casting, from_, to, i): + super().__init__(ufunc, casting, from_, to) + self.out_i = i + + def __str__(self): + # only show the number if more than one output exists + i_str = f"{self.out_i} " if self.ufunc.nout != 1 else "" + return ( + f"Cannot cast ufunc {self.ufunc.__name__!r} output {i_str}from " + f"{self.from_!r} to {self.to!r} with casting rule {self.casting!r}" + ) + + +@_display_as_base +class _ArrayMemoryError(MemoryError): + """ Thrown when an array cannot be allocated""" + def __init__(self, shape, dtype): + self.shape = shape + self.dtype = dtype + + @property + def _total_size(self): + num_bytes = self.dtype.itemsize + for dim in self.shape: + num_bytes *= dim + return num_bytes + + @staticmethod + def _size_to_string(num_bytes): + """ Convert a number of bytes into a binary size string """ + + # https://en.wikipedia.org/wiki/Binary_prefix + LOG2_STEP = 10 + STEP = 1024 + units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB'] + + unit_i = max(num_bytes.bit_length() - 1, 1) // LOG2_STEP + unit_val = 1 << (unit_i * LOG2_STEP) + n_units = num_bytes / unit_val + del unit_val + + # ensure we pick a unit that is correct after rounding + if round(n_units) == STEP: + unit_i += 1 + n_units /= STEP + + # deal with sizes so large that we don't have units for them + if unit_i >= len(units): + new_unit_i = len(units) - 1 + n_units *= 1 << ((unit_i - new_unit_i) * LOG2_STEP) + unit_i = new_unit_i + + unit_name = units[unit_i] + # format with a sensible number of digits + if unit_i == 0: + # no decimal point on bytes + return f'{n_units:.0f} {unit_name}' + elif round(n_units) < 1000: + # 3 significant figures, if none are dropped to the left of the . + return f'{n_units:#.3g} {unit_name}' + else: + # just give all the digits otherwise + return f'{n_units:#.0f} {unit_name}' + + def __str__(self): + size_str = self._size_to_string(self._total_size) + return (f"Unable to allocate {size_str} for an array with shape " + f"{self.shape} and data type {self.dtype}") diff --git a/python/numpy/_core/_exceptions.pyi b/python/numpy/_core/_exceptions.pyi new file mode 100644 index 000000000..02637a17b --- /dev/null +++ b/python/numpy/_core/_exceptions.pyi @@ -0,0 +1,55 @@ +from collections.abc import Iterable +from typing import Any, Final, TypeVar, overload + +import numpy as np +from numpy import _CastingKind +from numpy._utils import set_module as set_module + +### + +_T = TypeVar("_T") +_TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, *tuple[Any, ...]]) +_ExceptionT = TypeVar("_ExceptionT", bound=Exception) + +### + +class UFuncTypeError(TypeError): + ufunc: Final[np.ufunc] + def __init__(self, /, ufunc: np.ufunc) -> None: ... + +class _UFuncNoLoopError(UFuncTypeError): + dtypes: tuple[np.dtype, ...] + def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ... + +class _UFuncBinaryResolutionError(_UFuncNoLoopError): + dtypes: tuple[np.dtype, np.dtype] + def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ... + +class _UFuncCastingError(UFuncTypeError): + casting: Final[_CastingKind] + from_: Final[np.dtype] + to: Final[np.dtype] + def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype) -> None: ... + +class _UFuncInputCastingError(_UFuncCastingError): + in_i: Final[int] + def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ... + +class _UFuncOutputCastingError(_UFuncCastingError): + out_i: Final[int] + def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ... + +class _ArrayMemoryError(MemoryError): + shape: tuple[int, ...] + dtype: np.dtype + def __init__(self, /, shape: tuple[int, ...], dtype: np.dtype) -> None: ... + @property + def _total_size(self) -> int: ... + @staticmethod + def _size_to_string(num_bytes: int) -> str: ... + +@overload +def _unpack_tuple(tup: tuple[_T]) -> _T: ... +@overload +def _unpack_tuple(tup: _TupleT) -> _TupleT: ... +def _display_as_base(cls: type[_ExceptionT]) -> type[_ExceptionT]: ... diff --git a/python/numpy/_core/_internal.py b/python/numpy/_core/_internal.py new file mode 100644 index 000000000..e00e1b2c1 --- /dev/null +++ b/python/numpy/_core/_internal.py @@ -0,0 +1,958 @@ +""" +A place for internal code + +Some things are more easily handled Python. + +""" +import ast +import math +import re +import sys +import warnings + +from numpy import _NoValue +from numpy.exceptions import DTypePromotionError + +from .multiarray import StringDType, array, dtype, promote_types + +try: + import ctypes +except ImportError: + ctypes = None + +IS_PYPY = sys.implementation.name == 'pypy' + +if sys.byteorder == 'little': + _nbo = '<' +else: + _nbo = '>' + +def _makenames_list(adict, align): + allfields = [] + + for fname, obj in adict.items(): + n = len(obj) + if not isinstance(obj, tuple) or n not in (2, 3): + raise ValueError("entry not a 2- or 3- tuple") + if n > 2 and obj[2] == fname: + continue + num = int(obj[1]) + if num < 0: + raise ValueError("invalid offset.") + format = dtype(obj[0], align=align) + if n > 2: + title = obj[2] + else: + title = None + allfields.append((fname, format, num, title)) + # sort by offsets + allfields.sort(key=lambda x: x[2]) + names = [x[0] for x in allfields] + formats = [x[1] for x in allfields] + offsets = [x[2] for x in allfields] + titles = [x[3] for x in allfields] + + return names, formats, offsets, titles + +# Called in PyArray_DescrConverter function when +# a dictionary without "names" and "formats" +# fields is used as a data-type descriptor. +def _usefields(adict, align): + try: + names = adict[-1] + except KeyError: + names = None + if names is None: + names, formats, offsets, titles = _makenames_list(adict, align) + else: + formats = [] + offsets = [] + titles = [] + for name in names: + res = adict[name] + formats.append(res[0]) + offsets.append(res[1]) + if len(res) > 2: + titles.append(res[2]) + else: + titles.append(None) + + return dtype({"names": names, + "formats": formats, + "offsets": offsets, + "titles": titles}, align) + + +# construct an array_protocol descriptor list +# from the fields attribute of a descriptor +# This calls itself recursively but should eventually hit +# a descriptor that has no fields and then return +# a simple typestring + +def _array_descr(descriptor): + fields = descriptor.fields + if fields is None: + subdtype = descriptor.subdtype + if subdtype is None: + if descriptor.metadata is None: + return descriptor.str + else: + new = descriptor.metadata.copy() + if new: + return (descriptor.str, new) + else: + return descriptor.str + else: + return (_array_descr(subdtype[0]), subdtype[1]) + + names = descriptor.names + ordered_fields = [fields[x] + (x,) for x in names] + result = [] + offset = 0 + for field in ordered_fields: + if field[1] > offset: + num = field[1] - offset + result.append(('', f'|V{num}')) + offset += num + elif field[1] < offset: + raise ValueError( + "dtype.descr is not defined for types with overlapping or " + "out-of-order fields") + if len(field) > 3: + name = (field[2], field[3]) + else: + name = field[2] + if field[0].subdtype: + tup = (name, _array_descr(field[0].subdtype[0]), + field[0].subdtype[1]) + else: + tup = (name, _array_descr(field[0])) + offset += field[0].itemsize + result.append(tup) + + if descriptor.itemsize > offset: + num = descriptor.itemsize - offset + result.append(('', f'|V{num}')) + + return result + + +# format_re was originally from numarray by J. Todd Miller + +format_re = re.compile(r'(?P[<>|=]?)' + r'(?P *[(]?[ ,0-9]*[)]? *)' + r'(?P[<>|=]?)' + r'(?P[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)') +sep_re = re.compile(r'\s*,\s*') +space_re = re.compile(r'\s+$') + +# astr is a string (perhaps comma separated) + +_convorder = {'=': _nbo} + +def _commastring(astr): + startindex = 0 + result = [] + islist = False + while startindex < len(astr): + mo = format_re.match(astr, pos=startindex) + try: + (order1, repeats, order2, dtype) = mo.groups() + except (TypeError, AttributeError): + raise ValueError( + f'format number {len(result) + 1} of "{astr}" is not recognized' + ) from None + startindex = mo.end() + # Separator or ending padding + if startindex < len(astr): + if space_re.match(astr, pos=startindex): + startindex = len(astr) + else: + mo = sep_re.match(astr, pos=startindex) + if not mo: + raise ValueError( + 'format number %d of "%s" is not recognized' % + (len(result) + 1, astr)) + startindex = mo.end() + islist = True + + if order2 == '': + order = order1 + elif order1 == '': + order = order2 + else: + order1 = _convorder.get(order1, order1) + order2 = _convorder.get(order2, order2) + if (order1 != order2): + raise ValueError( + f'inconsistent byte-order specification {order1} and {order2}') + order = order1 + + if order in ('|', '=', _nbo): + order = '' + dtype = order + dtype + if repeats == '': + newitem = dtype + else: + if (repeats[0] == "(" and repeats[-1] == ")" + and repeats[1:-1].strip() != "" + and "," not in repeats): + warnings.warn( + 'Passing in a parenthesized single number for repeats ' + 'is deprecated; pass either a single number or indicate ' + 'a tuple with a comma, like "(2,)".', DeprecationWarning, + stacklevel=2) + newitem = (dtype, ast.literal_eval(repeats)) + + result.append(newitem) + + return result if islist else result[0] + +class dummy_ctype: + + def __init__(self, cls): + self._cls = cls + + def __mul__(self, other): + return self + + def __call__(self, *other): + return self._cls(other) + + def __eq__(self, other): + return self._cls == other._cls + + def __ne__(self, other): + return self._cls != other._cls + +def _getintp_ctype(): + val = _getintp_ctype.cache + if val is not None: + return val + if ctypes is None: + import numpy as np + val = dummy_ctype(np.intp) + else: + char = dtype('n').char + if char == 'i': + val = ctypes.c_int + elif char == 'l': + val = ctypes.c_long + elif char == 'q': + val = ctypes.c_longlong + else: + val = ctypes.c_long + _getintp_ctype.cache = val + return val + + +_getintp_ctype.cache = None + +# Used for .ctypes attribute of ndarray + +class _missing_ctypes: + def cast(self, num, obj): + return num.value + + class c_void_p: + def __init__(self, ptr): + self.value = ptr + + +class _ctypes: + def __init__(self, array, ptr=None): + self._arr = array + + if ctypes: + self._ctypes = ctypes + self._data = self._ctypes.c_void_p(ptr) + else: + # fake a pointer-like object that holds onto the reference + self._ctypes = _missing_ctypes() + self._data = self._ctypes.c_void_p(ptr) + self._data._objects = array + + if self._arr.ndim == 0: + self._zerod = True + else: + self._zerod = False + + def data_as(self, obj): + """ + Return the data pointer cast to a particular c-types object. + For example, calling ``self._as_parameter_`` is equivalent to + ``self.data_as(ctypes.c_void_p)``. Perhaps you want to use + the data as a pointer to a ctypes array of floating-point data: + ``self.data_as(ctypes.POINTER(ctypes.c_double))``. + + The returned pointer will keep a reference to the array. + """ + # _ctypes.cast function causes a circular reference of self._data in + # self._data._objects. Attributes of self._data cannot be released + # until gc.collect is called. Make a copy of the pointer first then + # let it hold the array reference. This is a workaround to circumvent + # the CPython bug https://bugs.python.org/issue12836. + ptr = self._ctypes.cast(self._data, obj) + ptr._arr = self._arr + return ptr + + def shape_as(self, obj): + """ + Return the shape tuple as an array of some other c-types + type. For example: ``self.shape_as(ctypes.c_short)``. + """ + if self._zerod: + return None + return (obj * self._arr.ndim)(*self._arr.shape) + + def strides_as(self, obj): + """ + Return the strides tuple as an array of some other + c-types type. For example: ``self.strides_as(ctypes.c_longlong)``. + """ + if self._zerod: + return None + return (obj * self._arr.ndim)(*self._arr.strides) + + @property + def data(self): + """ + A pointer to the memory area of the array as a Python integer. + This memory area may contain data that is not aligned, or not in + correct byte-order. The memory area may not even be writeable. + The array flags and data-type of this array should be respected + when passing this attribute to arbitrary C-code to avoid trouble + that can include Python crashing. User Beware! The value of this + attribute is exactly the same as: + ``self._array_interface_['data'][0]``. + + Note that unlike ``data_as``, a reference won't be kept to the array: + code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a + pointer to a deallocated array, and should be spelt + ``(a + b).ctypes.data_as(ctypes.c_void_p)`` + """ + return self._data.value + + @property + def shape(self): + """ + (c_intp*self.ndim): A ctypes array of length self.ndim where + the basetype is the C-integer corresponding to ``dtype('p')`` on this + platform (see `~numpy.ctypeslib.c_intp`). This base-type could be + `ctypes.c_int`, `ctypes.c_long`, or `ctypes.c_longlong` depending on + the platform. The ctypes array contains the shape of + the underlying array. + """ + return self.shape_as(_getintp_ctype()) + + @property + def strides(self): + """ + (c_intp*self.ndim): A ctypes array of length self.ndim where + the basetype is the same as for the shape attribute. This ctypes + array contains the strides information from the underlying array. + This strides information is important for showing how many bytes + must be jumped to get to the next element in the array. + """ + return self.strides_as(_getintp_ctype()) + + @property + def _as_parameter_(self): + """ + Overrides the ctypes semi-magic method + + Enables `c_func(some_array.ctypes)` + """ + return self.data_as(ctypes.c_void_p) + + # Numpy 1.21.0, 2021-05-18 + + def get_data(self): + """Deprecated getter for the `_ctypes.data` property. + + .. deprecated:: 1.21 + """ + warnings.warn('"get_data" is deprecated. Use "data" instead', + DeprecationWarning, stacklevel=2) + return self.data + + def get_shape(self): + """Deprecated getter for the `_ctypes.shape` property. + + .. deprecated:: 1.21 + """ + warnings.warn('"get_shape" is deprecated. Use "shape" instead', + DeprecationWarning, stacklevel=2) + return self.shape + + def get_strides(self): + """Deprecated getter for the `_ctypes.strides` property. + + .. deprecated:: 1.21 + """ + warnings.warn('"get_strides" is deprecated. Use "strides" instead', + DeprecationWarning, stacklevel=2) + return self.strides + + def get_as_parameter(self): + """Deprecated getter for the `_ctypes._as_parameter_` property. + + .. deprecated:: 1.21 + """ + warnings.warn( + '"get_as_parameter" is deprecated. Use "_as_parameter_" instead', + DeprecationWarning, stacklevel=2, + ) + return self._as_parameter_ + + +def _newnames(datatype, order): + """ + Given a datatype and an order object, return a new names tuple, with the + order indicated + """ + oldnames = datatype.names + nameslist = list(oldnames) + if isinstance(order, str): + order = [order] + seen = set() + if isinstance(order, (list, tuple)): + for name in order: + try: + nameslist.remove(name) + except ValueError: + if name in seen: + raise ValueError(f"duplicate field name: {name}") from None + else: + raise ValueError(f"unknown field name: {name}") from None + seen.add(name) + return tuple(list(order) + nameslist) + raise ValueError(f"unsupported order value: {order}") + +def _copy_fields(ary): + """Return copy of structured array with padding between fields removed. + + Parameters + ---------- + ary : ndarray + Structured array from which to remove padding bytes + + Returns + ------- + ary_copy : ndarray + Copy of ary with padding bytes removed + """ + dt = ary.dtype + copy_dtype = {'names': dt.names, + 'formats': [dt.fields[name][0] for name in dt.names]} + return array(ary, dtype=copy_dtype, copy=True) + +def _promote_fields(dt1, dt2): + """ Perform type promotion for two structured dtypes. + + Parameters + ---------- + dt1 : structured dtype + First dtype. + dt2 : structured dtype + Second dtype. + + Returns + ------- + out : dtype + The promoted dtype + + Notes + ----- + If one of the inputs is aligned, the result will be. The titles of + both descriptors must match (point to the same field). + """ + # Both must be structured and have the same names in the same order + if (dt1.names is None or dt2.names is None) or dt1.names != dt2.names: + raise DTypePromotionError( + f"field names `{dt1.names}` and `{dt2.names}` mismatch.") + + # if both are identical, we can (maybe!) just return the same dtype. + identical = dt1 is dt2 + new_fields = [] + for name in dt1.names: + field1 = dt1.fields[name] + field2 = dt2.fields[name] + new_descr = promote_types(field1[0], field2[0]) + identical = identical and new_descr is field1[0] + + # Check that the titles match (if given): + if field1[2:] != field2[2:]: + raise DTypePromotionError( + f"field titles of field '{name}' mismatch") + if len(field1) == 2: + new_fields.append((name, new_descr)) + else: + new_fields.append(((field1[2], name), new_descr)) + + res = dtype(new_fields, align=dt1.isalignedstruct or dt2.isalignedstruct) + + # Might as well preserve identity (and metadata) if the dtype is identical + # and the itemsize, offsets are also unmodified. This could probably be + # sped up, but also probably just be removed entirely. + if identical and res.itemsize == dt1.itemsize: + for name in dt1.names: + if dt1.fields[name][1] != res.fields[name][1]: + return res # the dtype changed. + return dt1 + + return res + + +def _getfield_is_safe(oldtype, newtype, offset): + """ Checks safety of getfield for object arrays. + + As in _view_is_safe, we need to check that memory containing objects is not + reinterpreted as a non-object datatype and vice versa. + + Parameters + ---------- + oldtype : data-type + Data type of the original ndarray. + newtype : data-type + Data type of the field being accessed by ndarray.getfield + offset : int + Offset of the field being accessed by ndarray.getfield + + Raises + ------ + TypeError + If the field access is invalid + + """ + if newtype.hasobject or oldtype.hasobject: + if offset == 0 and newtype == oldtype: + return + if oldtype.names is not None: + for name in oldtype.names: + if (oldtype.fields[name][1] == offset and + oldtype.fields[name][0] == newtype): + return + raise TypeError("Cannot get/set field of an object array") + return + +def _view_is_safe(oldtype, newtype): + """ Checks safety of a view involving object arrays, for example when + doing:: + + np.zeros(10, dtype=oldtype).view(newtype) + + Parameters + ---------- + oldtype : data-type + Data type of original ndarray + newtype : data-type + Data type of the view + + Raises + ------ + TypeError + If the new type is incompatible with the old type. + + """ + + # if the types are equivalent, there is no problem. + # for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4')) + if oldtype == newtype: + return + + if newtype.hasobject or oldtype.hasobject: + raise TypeError("Cannot change data-type for array of references.") + return + + +# Given a string containing a PEP 3118 format specifier, +# construct a NumPy dtype + +_pep3118_native_map = { + '?': '?', + 'c': 'S1', + 'b': 'b', + 'B': 'B', + 'h': 'h', + 'H': 'H', + 'i': 'i', + 'I': 'I', + 'l': 'l', + 'L': 'L', + 'q': 'q', + 'Q': 'Q', + 'e': 'e', + 'f': 'f', + 'd': 'd', + 'g': 'g', + 'Zf': 'F', + 'Zd': 'D', + 'Zg': 'G', + 's': 'S', + 'w': 'U', + 'O': 'O', + 'x': 'V', # padding +} +_pep3118_native_typechars = ''.join(_pep3118_native_map.keys()) + +_pep3118_standard_map = { + '?': '?', + 'c': 'S1', + 'b': 'b', + 'B': 'B', + 'h': 'i2', + 'H': 'u2', + 'i': 'i4', + 'I': 'u4', + 'l': 'i4', + 'L': 'u4', + 'q': 'i8', + 'Q': 'u8', + 'e': 'f2', + 'f': 'f', + 'd': 'd', + 'Zf': 'F', + 'Zd': 'D', + 's': 'S', + 'w': 'U', + 'O': 'O', + 'x': 'V', # padding +} +_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys()) + +_pep3118_unsupported_map = { + 'u': 'UCS-2 strings', + '&': 'pointers', + 't': 'bitfields', + 'X': 'function pointers', +} + +class _Stream: + def __init__(self, s): + self.s = s + self.byteorder = '@' + + def advance(self, n): + res = self.s[:n] + self.s = self.s[n:] + return res + + def consume(self, c): + if self.s[:len(c)] == c: + self.advance(len(c)) + return True + return False + + def consume_until(self, c): + if callable(c): + i = 0 + while i < len(self.s) and not c(self.s[i]): + i = i + 1 + return self.advance(i) + else: + i = self.s.index(c) + res = self.advance(i) + self.advance(len(c)) + return res + + @property + def next(self): + return self.s[0] + + def __bool__(self): + return bool(self.s) + + +def _dtype_from_pep3118(spec): + stream = _Stream(spec) + dtype, align = __dtype_from_pep3118(stream, is_subdtype=False) + return dtype + +def __dtype_from_pep3118(stream, is_subdtype): + field_spec = { + 'names': [], + 'formats': [], + 'offsets': [], + 'itemsize': 0 + } + offset = 0 + common_alignment = 1 + is_padding = False + + # Parse spec + while stream: + value = None + + # End of structure, bail out to upper level + if stream.consume('}'): + break + + # Sub-arrays (1) + shape = None + if stream.consume('('): + shape = stream.consume_until(')') + shape = tuple(map(int, shape.split(','))) + + # Byte order + if stream.next in ('@', '=', '<', '>', '^', '!'): + byteorder = stream.advance(1) + if byteorder == '!': + byteorder = '>' + stream.byteorder = byteorder + + # Byte order characters also control native vs. standard type sizes + if stream.byteorder in ('@', '^'): + type_map = _pep3118_native_map + type_map_chars = _pep3118_native_typechars + else: + type_map = _pep3118_standard_map + type_map_chars = _pep3118_standard_typechars + + # Item sizes + itemsize_str = stream.consume_until(lambda c: not c.isdigit()) + if itemsize_str: + itemsize = int(itemsize_str) + else: + itemsize = 1 + + # Data types + is_padding = False + + if stream.consume('T{'): + value, align = __dtype_from_pep3118( + stream, is_subdtype=True) + elif stream.next in type_map_chars: + if stream.next == 'Z': + typechar = stream.advance(2) + else: + typechar = stream.advance(1) + + is_padding = (typechar == 'x') + dtypechar = type_map[typechar] + if dtypechar in 'USV': + dtypechar += '%d' % itemsize + itemsize = 1 + numpy_byteorder = {'@': '=', '^': '='}.get( + stream.byteorder, stream.byteorder) + value = dtype(numpy_byteorder + dtypechar) + align = value.alignment + elif stream.next in _pep3118_unsupported_map: + desc = _pep3118_unsupported_map[stream.next] + raise NotImplementedError( + f"Unrepresentable PEP 3118 data type {stream.next!r} ({desc})") + else: + raise ValueError( + f"Unknown PEP 3118 data type specifier {stream.s!r}" + ) + + # + # Native alignment may require padding + # + # Here we assume that the presence of a '@' character implicitly + # implies that the start of the array is *already* aligned. + # + extra_offset = 0 + if stream.byteorder == '@': + start_padding = (-offset) % align + intra_padding = (-value.itemsize) % align + + offset += start_padding + + if intra_padding != 0: + if itemsize > 1 or (shape is not None and _prod(shape) > 1): + # Inject internal padding to the end of the sub-item + value = _add_trailing_padding(value, intra_padding) + else: + # We can postpone the injection of internal padding, + # as the item appears at most once + extra_offset += intra_padding + + # Update common alignment + common_alignment = _lcm(align, common_alignment) + + # Convert itemsize to sub-array + if itemsize != 1: + value = dtype((value, (itemsize,))) + + # Sub-arrays (2) + if shape is not None: + value = dtype((value, shape)) + + # Field name + if stream.consume(':'): + name = stream.consume_until(':') + else: + name = None + + if not (is_padding and name is None): + if name is not None and name in field_spec['names']: + raise RuntimeError( + f"Duplicate field name '{name}' in PEP3118 format" + ) + field_spec['names'].append(name) + field_spec['formats'].append(value) + field_spec['offsets'].append(offset) + + offset += value.itemsize + offset += extra_offset + + field_spec['itemsize'] = offset + + # extra final padding for aligned types + if stream.byteorder == '@': + field_spec['itemsize'] += (-offset) % common_alignment + + # Check if this was a simple 1-item type, and unwrap it + if (field_spec['names'] == [None] + and field_spec['offsets'][0] == 0 + and field_spec['itemsize'] == field_spec['formats'][0].itemsize + and not is_subdtype): + ret = field_spec['formats'][0] + else: + _fix_names(field_spec) + ret = dtype(field_spec) + + # Finished + return ret, common_alignment + +def _fix_names(field_spec): + """ Replace names which are None with the next unused f%d name """ + names = field_spec['names'] + for i, name in enumerate(names): + if name is not None: + continue + + j = 0 + while True: + name = f'f{j}' + if name not in names: + break + j = j + 1 + names[i] = name + +def _add_trailing_padding(value, padding): + """Inject the specified number of padding bytes at the end of a dtype""" + if value.fields is None: + field_spec = { + 'names': ['f0'], + 'formats': [value], + 'offsets': [0], + 'itemsize': value.itemsize + } + else: + fields = value.fields + names = value.names + field_spec = { + 'names': names, + 'formats': [fields[name][0] for name in names], + 'offsets': [fields[name][1] for name in names], + 'itemsize': value.itemsize + } + + field_spec['itemsize'] += padding + return dtype(field_spec) + +def _prod(a): + p = 1 + for x in a: + p *= x + return p + +def _gcd(a, b): + """Calculate the greatest common divisor of a and b""" + if not (math.isfinite(a) and math.isfinite(b)): + raise ValueError('Can only find greatest common divisor of ' + f'finite arguments, found "{a}" and "{b}"') + while b: + a, b = b, a % b + return a + +def _lcm(a, b): + return a // _gcd(a, b) * b + +def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs): + """ Format the error message for when __array_ufunc__ gives up. """ + args_string = ', '.join([f'{arg!r}' for arg in inputs] + + [f'{k}={v!r}' + for k, v in kwargs.items()]) + args = inputs + kwargs.get('out', ()) + types_string = ', '.join(repr(type(arg).__name__) for arg in args) + return ('operand type(s) all returned NotImplemented from ' + f'__array_ufunc__({ufunc!r}, {method!r}, {args_string}): {types_string}' + ) + + +def array_function_errmsg_formatter(public_api, types): + """ Format the error message for when __array_ufunc__ gives up. """ + func_name = f'{public_api.__module__}.{public_api.__name__}' + return (f"no implementation found for '{func_name}' on types that implement " + f'__array_function__: {list(types)}') + + +def _ufunc_doc_signature_formatter(ufunc): + """ + Builds a signature string which resembles PEP 457 + + This is used to construct the first line of the docstring + """ + + # input arguments are simple + if ufunc.nin == 1: + in_args = 'x' + else: + in_args = ', '.join(f'x{i + 1}' for i in range(ufunc.nin)) + + # output arguments are both keyword or positional + if ufunc.nout == 0: + out_args = ', /, out=()' + elif ufunc.nout == 1: + out_args = ', /, out=None' + else: + out_args = '[, {positional}], / [, out={default}]'.format( + positional=', '.join( + f'out{i + 1}' for i in range(ufunc.nout)), + default=repr((None,) * ufunc.nout) + ) + + # keyword only args depend on whether this is a gufunc + kwargs = ( + ", casting='same_kind'" + ", order='K'" + ", dtype=None" + ", subok=True" + ) + + # NOTE: gufuncs may or may not support the `axis` parameter + if ufunc.signature is None: + kwargs = f", where=True{kwargs}[, signature]" + else: + kwargs += "[, signature, axes, axis]" + + # join all the parts together + return f'{ufunc.__name__}({in_args}{out_args}, *{kwargs})' + + +def npy_ctypes_check(cls): + # determine if a class comes from ctypes, in order to work around + # a bug in the buffer protocol for those objects, bpo-10746 + try: + # ctypes class are new-style, so have an __mro__. This probably fails + # for ctypes classes with multiple inheritance. + if IS_PYPY: + # (..., _ctypes.basics._CData, Bufferable, object) + ctype_base = cls.__mro__[-3] + else: + # # (..., _ctypes._CData, object) + ctype_base = cls.__mro__[-2] + # right now, they're part of the _ctypes module + return '_ctypes' in ctype_base.__module__ + except Exception: + return False + +# used to handle the _NoValue default argument for na_object +# in the C implementation of the __reduce__ method for stringdtype +def _convert_to_stringdtype_kwargs(coerce, na_object=_NoValue): + if na_object is _NoValue: + return StringDType(coerce=coerce) + return StringDType(coerce=coerce, na_object=na_object) diff --git a/python/numpy/_core/_internal.pyi b/python/numpy/_core/_internal.pyi new file mode 100644 index 000000000..3038297b6 --- /dev/null +++ b/python/numpy/_core/_internal.pyi @@ -0,0 +1,72 @@ +import ctypes as ct +import re +from collections.abc import Callable, Iterable +from typing import Any, Final, Generic, Self, overload + +from typing_extensions import TypeVar, deprecated + +import numpy as np +import numpy.typing as npt +from numpy.ctypeslib import c_intp + +_CastT = TypeVar("_CastT", bound=ct._CanCastTo) +_T_co = TypeVar("_T_co", covariant=True) +_CT = TypeVar("_CT", bound=ct._CData) +_PT_co = TypeVar("_PT_co", bound=int | None, default=None, covariant=True) + +### + +IS_PYPY: Final[bool] = ... + +format_re: Final[re.Pattern[str]] = ... +sep_re: Final[re.Pattern[str]] = ... +space_re: Final[re.Pattern[str]] = ... + +### + +# TODO: Let the likes of `shape_as` and `strides_as` return `None` +# for 0D arrays once we've got shape-support + +class _ctypes(Generic[_PT_co]): + @overload + def __init__(self: _ctypes[None], /, array: npt.NDArray[Any], ptr: None = None) -> None: ... + @overload + def __init__(self, /, array: npt.NDArray[Any], ptr: _PT_co) -> None: ... + + # + @property + def data(self) -> _PT_co: ... + @property + def shape(self) -> ct.Array[c_intp]: ... + @property + def strides(self) -> ct.Array[c_intp]: ... + @property + def _as_parameter_(self) -> ct.c_void_p: ... + + # + def data_as(self, /, obj: type[_CastT]) -> _CastT: ... + def shape_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ... + def strides_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ... + + # + @deprecated('"get_data" is deprecated. Use "data" instead') + def get_data(self, /) -> _PT_co: ... + @deprecated('"get_shape" is deprecated. Use "shape" instead') + def get_shape(self, /) -> ct.Array[c_intp]: ... + @deprecated('"get_strides" is deprecated. Use "strides" instead') + def get_strides(self, /) -> ct.Array[c_intp]: ... + @deprecated('"get_as_parameter" is deprecated. Use "_as_parameter_" instead') + def get_as_parameter(self, /) -> ct.c_void_p: ... + +class dummy_ctype(Generic[_T_co]): + _cls: type[_T_co] + + def __init__(self, /, cls: type[_T_co]) -> None: ... + def __eq__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __ne__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + def __mul__(self, other: object, /) -> Self: ... + def __call__(self, /, *other: object) -> _T_co: ... + +def array_ufunc_errmsg_formatter(dummy: object, ufunc: np.ufunc, method: str, *inputs: object, **kwargs: object) -> str: ... +def array_function_errmsg_formatter(public_api: Callable[..., object], types: Iterable[str]) -> str: ... +def npy_ctypes_check(cls: type) -> bool: ... diff --git a/python/numpy/_core/_machar.py b/python/numpy/_core/_machar.py new file mode 100644 index 000000000..b49742a15 --- /dev/null +++ b/python/numpy/_core/_machar.py @@ -0,0 +1,355 @@ +""" +Machine arithmetic - determine the parameters of the +floating-point arithmetic system + +Author: Pearu Peterson, September 2003 + +""" +__all__ = ['MachAr'] + +from ._ufunc_config import errstate +from .fromnumeric import any + +# Need to speed this up...especially for longdouble + +# Deprecated 2021-10-20, NumPy 1.22 +class MachAr: + """ + Diagnosing machine parameters. + + Attributes + ---------- + ibeta : int + Radix in which numbers are represented. + it : int + Number of base-`ibeta` digits in the floating point mantissa M. + machep : int + Exponent of the smallest (most negative) power of `ibeta` that, + added to 1.0, gives something different from 1.0 + eps : float + Floating-point number ``beta**machep`` (floating point precision) + negep : int + Exponent of the smallest power of `ibeta` that, subtracted + from 1.0, gives something different from 1.0. + epsneg : float + Floating-point number ``beta**negep``. + iexp : int + Number of bits in the exponent (including its sign and bias). + minexp : int + Smallest (most negative) power of `ibeta` consistent with there + being no leading zeros in the mantissa. + xmin : float + Floating-point number ``beta**minexp`` (the smallest [in + magnitude] positive floating point number with full precision). + maxexp : int + Smallest (positive) power of `ibeta` that causes overflow. + xmax : float + ``(1-epsneg) * beta**maxexp`` (the largest [in magnitude] + usable floating value). + irnd : int + In ``range(6)``, information on what kind of rounding is done + in addition, and on how underflow is handled. + ngrd : int + Number of 'guard digits' used when truncating the product + of two mantissas to fit the representation. + epsilon : float + Same as `eps`. + tiny : float + An alias for `smallest_normal`, kept for backwards compatibility. + huge : float + Same as `xmax`. + precision : float + ``- int(-log10(eps))`` + resolution : float + ``- 10**(-precision)`` + smallest_normal : float + The smallest positive floating point number with 1 as leading bit in + the mantissa following IEEE-754. Same as `xmin`. + smallest_subnormal : float + The smallest positive floating point number with 0 as leading bit in + the mantissa following IEEE-754. + + Parameters + ---------- + float_conv : function, optional + Function that converts an integer or integer array to a float + or float array. Default is `float`. + int_conv : function, optional + Function that converts a float or float array to an integer or + integer array. Default is `int`. + float_to_float : function, optional + Function that converts a float array to float. Default is `float`. + Note that this does not seem to do anything useful in the current + implementation. + float_to_str : function, optional + Function that converts a single float to a string. Default is + ``lambda v:'%24.16e' %v``. + title : str, optional + Title that is printed in the string representation of `MachAr`. + + See Also + -------- + finfo : Machine limits for floating point types. + iinfo : Machine limits for integer types. + + References + ---------- + .. [1] Press, Teukolsky, Vetterling and Flannery, + "Numerical Recipes in C++," 2nd ed, + Cambridge University Press, 2002, p. 31. + + """ + + def __init__(self, float_conv=float, int_conv=int, + float_to_float=float, + float_to_str=lambda v: f'{v:24.16e}', + title='Python floating point number'): + """ + + float_conv - convert integer to float (array) + int_conv - convert float (array) to integer + float_to_float - convert float array to float + float_to_str - convert array float to str + title - description of used floating point numbers + + """ + # We ignore all errors here because we are purposely triggering + # underflow to detect the properties of the running arch. + with errstate(under='ignore'): + self._do_init(float_conv, int_conv, float_to_float, float_to_str, title) + + def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title): + max_iterN = 10000 + msg = "Did not converge after %d tries with %s" + one = float_conv(1) + two = one + one + zero = one - one + + # Do we really need to do this? Aren't they 2 and 2.0? + # Determine ibeta and beta + a = one + for _ in range(max_iterN): + a = a + a + temp = a + one + temp1 = temp - a + if any(temp1 - one != zero): + break + else: + raise RuntimeError(msg % (_, one.dtype)) + b = one + for _ in range(max_iterN): + b = b + b + temp = a + b + itemp = int_conv(temp - a) + if any(itemp != 0): + break + else: + raise RuntimeError(msg % (_, one.dtype)) + ibeta = itemp + beta = float_conv(ibeta) + + # Determine it and irnd + it = -1 + b = one + for _ in range(max_iterN): + it = it + 1 + b = b * beta + temp = b + one + temp1 = temp - b + if any(temp1 - one != zero): + break + else: + raise RuntimeError(msg % (_, one.dtype)) + + betah = beta / two + a = one + for _ in range(max_iterN): + a = a + a + temp = a + one + temp1 = temp - a + if any(temp1 - one != zero): + break + else: + raise RuntimeError(msg % (_, one.dtype)) + temp = a + betah + irnd = 0 + if any(temp - a != zero): + irnd = 1 + tempa = a + beta + temp = tempa + betah + if irnd == 0 and any(temp - tempa != zero): + irnd = 2 + + # Determine negep and epsneg + negep = it + 3 + betain = one / beta + a = one + for i in range(negep): + a = a * betain + b = a + for _ in range(max_iterN): + temp = one - a + if any(temp - one != zero): + break + a = a * beta + negep = negep - 1 + # Prevent infinite loop on PPC with gcc 4.0: + if negep < 0: + raise RuntimeError("could not determine machine tolerance " + "for 'negep', locals() -> %s" % (locals())) + else: + raise RuntimeError(msg % (_, one.dtype)) + negep = -negep + epsneg = a + + # Determine machep and eps + machep = - it - 3 + a = b + + for _ in range(max_iterN): + temp = one + a + if any(temp - one != zero): + break + a = a * beta + machep = machep + 1 + else: + raise RuntimeError(msg % (_, one.dtype)) + eps = a + + # Determine ngrd + ngrd = 0 + temp = one + eps + if irnd == 0 and any(temp * one - one != zero): + ngrd = 1 + + # Determine iexp + i = 0 + k = 1 + z = betain + t = one + eps + nxres = 0 + for _ in range(max_iterN): + y = z + z = y * y + a = z * one # Check here for underflow + temp = z * t + if any(a + a == zero) or any(abs(z) >= y): + break + temp1 = temp * betain + if any(temp1 * beta == z): + break + i = i + 1 + k = k + k + else: + raise RuntimeError(msg % (_, one.dtype)) + if ibeta != 10: + iexp = i + 1 + mx = k + k + else: + iexp = 2 + iz = ibeta + while k >= iz: + iz = iz * ibeta + iexp = iexp + 1 + mx = iz + iz - 1 + + # Determine minexp and xmin + for _ in range(max_iterN): + xmin = y + y = y * betain + a = y * one + temp = y * t + if any((a + a) != zero) and any(abs(y) < xmin): + k = k + 1 + temp1 = temp * betain + if any(temp1 * beta == y) and any(temp != y): + nxres = 3 + xmin = y + break + else: + break + else: + raise RuntimeError(msg % (_, one.dtype)) + minexp = -k + + # Determine maxexp, xmax + if mx <= k + k - 3 and ibeta != 10: + mx = mx + mx + iexp = iexp + 1 + maxexp = mx + minexp + irnd = irnd + nxres + if irnd >= 2: + maxexp = maxexp - 2 + i = maxexp + minexp + if ibeta == 2 and not i: + maxexp = maxexp - 1 + if i > 20: + maxexp = maxexp - 1 + if any(a != y): + maxexp = maxexp - 2 + xmax = one - epsneg + if any(xmax * one != xmax): + xmax = one - beta * epsneg + xmax = xmax / (xmin * beta * beta * beta) + i = maxexp + minexp + 3 + for j in range(i): + if ibeta == 2: + xmax = xmax + xmax + else: + xmax = xmax * beta + + smallest_subnormal = abs(xmin / beta ** (it)) + + self.ibeta = ibeta + self.it = it + self.negep = negep + self.epsneg = float_to_float(epsneg) + self._str_epsneg = float_to_str(epsneg) + self.machep = machep + self.eps = float_to_float(eps) + self._str_eps = float_to_str(eps) + self.ngrd = ngrd + self.iexp = iexp + self.minexp = minexp + self.xmin = float_to_float(xmin) + self._str_xmin = float_to_str(xmin) + self.maxexp = maxexp + self.xmax = float_to_float(xmax) + self._str_xmax = float_to_str(xmax) + self.irnd = irnd + + self.title = title + # Commonly used parameters + self.epsilon = self.eps + self.tiny = self.xmin + self.huge = self.xmax + self.smallest_normal = self.xmin + self._str_smallest_normal = float_to_str(self.xmin) + self.smallest_subnormal = float_to_float(smallest_subnormal) + self._str_smallest_subnormal = float_to_str(smallest_subnormal) + + import math + self.precision = int(-math.log10(float_to_float(self.eps))) + ten = two + two + two + two + two + resolution = ten ** (-self.precision) + self.resolution = float_to_float(resolution) + self._str_resolution = float_to_str(resolution) + + def __str__(self): + fmt = ( + 'Machine parameters for %(title)s\n' + '---------------------------------------------------------------------\n' + 'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n' + 'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n' + 'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n' + 'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n' + 'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n' + 'smallest_normal=%(smallest_normal)s ' + 'smallest_subnormal=%(smallest_subnormal)s\n' + '---------------------------------------------------------------------\n' + ) + return fmt % self.__dict__ + + +if __name__ == '__main__': + print(MachAr()) diff --git a/python/numpy/_core/_machar.pyi b/python/numpy/_core/_machar.pyi new file mode 100644 index 000000000..02637a17b --- /dev/null +++ b/python/numpy/_core/_machar.pyi @@ -0,0 +1,55 @@ +from collections.abc import Iterable +from typing import Any, Final, TypeVar, overload + +import numpy as np +from numpy import _CastingKind +from numpy._utils import set_module as set_module + +### + +_T = TypeVar("_T") +_TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, *tuple[Any, ...]]) +_ExceptionT = TypeVar("_ExceptionT", bound=Exception) + +### + +class UFuncTypeError(TypeError): + ufunc: Final[np.ufunc] + def __init__(self, /, ufunc: np.ufunc) -> None: ... + +class _UFuncNoLoopError(UFuncTypeError): + dtypes: tuple[np.dtype, ...] + def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ... + +class _UFuncBinaryResolutionError(_UFuncNoLoopError): + dtypes: tuple[np.dtype, np.dtype] + def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ... + +class _UFuncCastingError(UFuncTypeError): + casting: Final[_CastingKind] + from_: Final[np.dtype] + to: Final[np.dtype] + def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype) -> None: ... + +class _UFuncInputCastingError(_UFuncCastingError): + in_i: Final[int] + def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ... + +class _UFuncOutputCastingError(_UFuncCastingError): + out_i: Final[int] + def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ... + +class _ArrayMemoryError(MemoryError): + shape: tuple[int, ...] + dtype: np.dtype + def __init__(self, /, shape: tuple[int, ...], dtype: np.dtype) -> None: ... + @property + def _total_size(self) -> int: ... + @staticmethod + def _size_to_string(num_bytes: int) -> str: ... + +@overload +def _unpack_tuple(tup: tuple[_T]) -> _T: ... +@overload +def _unpack_tuple(tup: _TupleT) -> _TupleT: ... +def _display_as_base(cls: type[_ExceptionT]) -> type[_ExceptionT]: ... diff --git a/python/numpy/_core/_methods.py b/python/numpy/_core/_methods.py new file mode 100644 index 000000000..21ad79000 --- /dev/null +++ b/python/numpy/_core/_methods.py @@ -0,0 +1,255 @@ +""" +Array methods which are called by both the C-code for the method +and the Python code for the NumPy-namespace function + +""" +import os +import pickle +import warnings +from contextlib import nullcontext + +import numpy as np +from numpy._core import multiarray as mu +from numpy._core import numerictypes as nt +from numpy._core import umath as um +from numpy._core.multiarray import asanyarray +from numpy._globals import _NoValue + +# save those O(100) nanoseconds! +bool_dt = mu.dtype("bool") +umr_maximum = um.maximum.reduce +umr_minimum = um.minimum.reduce +umr_sum = um.add.reduce +umr_prod = um.multiply.reduce +umr_bitwise_count = um.bitwise_count +umr_any = um.logical_or.reduce +umr_all = um.logical_and.reduce + +# Complex types to -> (2,)float view for fast-path computation in _var() +_complex_to_float = { + nt.dtype(nt.csingle): nt.dtype(nt.single), + nt.dtype(nt.cdouble): nt.dtype(nt.double), +} +# Special case for windows: ensure double takes precedence +if nt.dtype(nt.longdouble) != nt.dtype(nt.double): + _complex_to_float.update({ + nt.dtype(nt.clongdouble): nt.dtype(nt.longdouble), + }) + +# avoid keyword arguments to speed up parsing, saves about 15%-20% for very +# small reductions +def _amax(a, axis=None, out=None, keepdims=False, + initial=_NoValue, where=True): + return umr_maximum(a, axis, None, out, keepdims, initial, where) + +def _amin(a, axis=None, out=None, keepdims=False, + initial=_NoValue, where=True): + return umr_minimum(a, axis, None, out, keepdims, initial, where) + +def _sum(a, axis=None, dtype=None, out=None, keepdims=False, + initial=_NoValue, where=True): + return umr_sum(a, axis, dtype, out, keepdims, initial, where) + +def _prod(a, axis=None, dtype=None, out=None, keepdims=False, + initial=_NoValue, where=True): + return umr_prod(a, axis, dtype, out, keepdims, initial, where) + +def _any(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): + # By default, return a boolean for any and all + if dtype is None: + dtype = bool_dt + # Parsing keyword arguments is currently fairly slow, so avoid it for now + if where is True: + return umr_any(a, axis, dtype, out, keepdims) + return umr_any(a, axis, dtype, out, keepdims, where=where) + +def _all(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): + # By default, return a boolean for any and all + if dtype is None: + dtype = bool_dt + # Parsing keyword arguments is currently fairly slow, so avoid it for now + if where is True: + return umr_all(a, axis, dtype, out, keepdims) + return umr_all(a, axis, dtype, out, keepdims, where=where) + +def _count_reduce_items(arr, axis, keepdims=False, where=True): + # fast-path for the default case + if where is True: + # no boolean mask given, calculate items according to axis + if axis is None: + axis = tuple(range(arr.ndim)) + elif not isinstance(axis, tuple): + axis = (axis,) + items = 1 + for ax in axis: + items *= arr.shape[mu.normalize_axis_index(ax, arr.ndim)] + items = nt.intp(items) + else: + # TODO: Optimize case when `where` is broadcast along a non-reduction + # axis and full sum is more excessive than needed. + + # guarded to protect circular imports + from numpy.lib._stride_tricks_impl import broadcast_to + # count True values in (potentially broadcasted) boolean mask + items = umr_sum(broadcast_to(where, arr.shape), axis, nt.intp, None, + keepdims) + return items + +def _clip(a, min=None, max=None, out=None, **kwargs): + if a.dtype.kind in "iu": + # If min/max is a Python integer, deal with out-of-bound values here. + # (This enforces NEP 50 rules as no value based promotion is done.) + if type(min) is int and min <= np.iinfo(a.dtype).min: + min = None + if type(max) is int and max >= np.iinfo(a.dtype).max: + max = None + + if min is None and max is None: + # return identity + return um.positive(a, out=out, **kwargs) + elif min is None: + return um.minimum(a, max, out=out, **kwargs) + elif max is None: + return um.maximum(a, min, out=out, **kwargs) + else: + return um.clip(a, min, max, out=out, **kwargs) + +def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True): + arr = asanyarray(a) + + is_float16_result = False + + rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where) + if rcount == 0 if where is True else umr_any(rcount == 0, axis=None): + warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2) + + # Cast bool, unsigned int, and int to float64 by default + if dtype is None: + if issubclass(arr.dtype.type, (nt.integer, nt.bool)): + dtype = mu.dtype('f8') + elif issubclass(arr.dtype.type, nt.float16): + dtype = mu.dtype('f4') + is_float16_result = True + + ret = umr_sum(arr, axis, dtype, out, keepdims, where=where) + if isinstance(ret, mu.ndarray): + ret = um.true_divide( + ret, rcount, out=ret, casting='unsafe', subok=False) + if is_float16_result and out is None: + ret = arr.dtype.type(ret) + elif hasattr(ret, 'dtype'): + if is_float16_result: + ret = arr.dtype.type(ret / rcount) + else: + ret = ret.dtype.type(ret / rcount) + else: + ret = ret / rcount + + return ret + +def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, + where=True, mean=None): + arr = asanyarray(a) + + rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where) + # Make this warning show up on top. + if ddof >= rcount if where is True else umr_any(ddof >= rcount, axis=None): + warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning, + stacklevel=2) + + # Cast bool, unsigned int, and int to float64 by default + if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool)): + dtype = mu.dtype('f8') + + if mean is not None: + arrmean = mean + else: + # Compute the mean. + # Note that if dtype is not of inexact type then arraymean will + # not be either. + arrmean = umr_sum(arr, axis, dtype, keepdims=True, where=where) + # The shape of rcount has to match arrmean to not change the shape of + # out in broadcasting. Otherwise, it cannot be stored back to arrmean. + if rcount.ndim == 0: + # fast-path for default case when where is True + div = rcount + else: + # matching rcount to arrmean when where is specified as array + div = rcount.reshape(arrmean.shape) + if isinstance(arrmean, mu.ndarray): + arrmean = um.true_divide(arrmean, div, out=arrmean, + casting='unsafe', subok=False) + elif hasattr(arrmean, "dtype"): + arrmean = arrmean.dtype.type(arrmean / rcount) + else: + arrmean = arrmean / rcount + + # Compute sum of squared deviations from mean + # Note that x may not be inexact and that we need it to be an array, + # not a scalar. + x = asanyarray(arr - arrmean) + + if issubclass(arr.dtype.type, (nt.floating, nt.integer)): + x = um.multiply(x, x, out=x) + # Fast-paths for built-in complex types + elif x.dtype in _complex_to_float: + xv = x.view(dtype=(_complex_to_float[x.dtype], (2,))) + um.multiply(xv, xv, out=xv) + x = um.add(xv[..., 0], xv[..., 1], out=x.real).real + # Most general case; includes handling object arrays containing imaginary + # numbers and complex types with non-native byteorder + else: + x = um.multiply(x, um.conjugate(x), out=x).real + + ret = umr_sum(x, axis, dtype, out, keepdims=keepdims, where=where) + + # Compute degrees of freedom and make sure it is not negative. + rcount = um.maximum(rcount - ddof, 0) + + # divide by degrees of freedom + if isinstance(ret, mu.ndarray): + ret = um.true_divide( + ret, rcount, out=ret, casting='unsafe', subok=False) + elif hasattr(ret, 'dtype'): + ret = ret.dtype.type(ret / rcount) + else: + ret = ret / rcount + + return ret + +def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, + where=True, mean=None): + ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + keepdims=keepdims, where=where, mean=mean) + + if isinstance(ret, mu.ndarray): + ret = um.sqrt(ret, out=ret) + elif hasattr(ret, 'dtype'): + ret = ret.dtype.type(um.sqrt(ret)) + else: + ret = um.sqrt(ret) + + return ret + +def _ptp(a, axis=None, out=None, keepdims=False): + return um.subtract( + umr_maximum(a, axis, None, out, keepdims), + umr_minimum(a, axis, None, None, keepdims), + out + ) + +def _dump(self, file, protocol=2): + if hasattr(file, 'write'): + ctx = nullcontext(file) + else: + ctx = open(os.fspath(file), "wb") + with ctx as f: + pickle.dump(self, f, protocol=protocol) + +def _dumps(self, protocol=2): + return pickle.dumps(self, protocol=protocol) + +def _bitwise_count(a, out=None, *, where=True, casting='same_kind', + order='K', dtype=None, subok=True): + return umr_bitwise_count(a, out, where=where, casting=casting, + order=order, dtype=dtype, subok=subok) diff --git a/python/numpy/_core/_methods.pyi b/python/numpy/_core/_methods.pyi new file mode 100644 index 000000000..3c80683f0 --- /dev/null +++ b/python/numpy/_core/_methods.pyi @@ -0,0 +1,22 @@ +from collections.abc import Callable +from typing import Any, Concatenate, TypeAlias + +import numpy as np + +from . import _exceptions as _exceptions + +### + +_Reduce2: TypeAlias = Callable[Concatenate[object, ...], Any] + +### + +bool_dt: np.dtype[np.bool] = ... +umr_maximum: _Reduce2 = ... +umr_minimum: _Reduce2 = ... +umr_sum: _Reduce2 = ... +umr_prod: _Reduce2 = ... +umr_bitwise_count = np.bitwise_count +umr_any: _Reduce2 = ... +umr_all: _Reduce2 = ... +_complex_to_float: dict[np.dtype[np.complexfloating], np.dtype[np.floating]] = ... diff --git a/python/numpy/_core/_multiarray_tests.cpython-312-x86_64-linux-gnu.so b/python/numpy/_core/_multiarray_tests.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 000000000..ce78182ec Binary files /dev/null and b/python/numpy/_core/_multiarray_tests.cpython-312-x86_64-linux-gnu.so differ diff --git a/python/numpy/_core/_multiarray_umath.cpython-312-x86_64-linux-gnu.so b/python/numpy/_core/_multiarray_umath.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 000000000..32146050b Binary files /dev/null and b/python/numpy/_core/_multiarray_umath.cpython-312-x86_64-linux-gnu.so differ diff --git a/python/numpy/_core/_operand_flag_tests.cpython-312-x86_64-linux-gnu.so b/python/numpy/_core/_operand_flag_tests.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 000000000..bf24fe0de Binary files /dev/null and b/python/numpy/_core/_operand_flag_tests.cpython-312-x86_64-linux-gnu.so differ diff --git a/python/numpy/_core/_rational_tests.cpython-312-x86_64-linux-gnu.so b/python/numpy/_core/_rational_tests.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 000000000..b9a771731 Binary files /dev/null and b/python/numpy/_core/_rational_tests.cpython-312-x86_64-linux-gnu.so differ diff --git a/python/numpy/_core/_simd.cpython-312-x86_64-linux-gnu.so b/python/numpy/_core/_simd.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 000000000..54bbb837f Binary files /dev/null and b/python/numpy/_core/_simd.cpython-312-x86_64-linux-gnu.so differ diff --git a/python/numpy/_core/_simd.pyi b/python/numpy/_core/_simd.pyi new file mode 100644 index 000000000..70bb70777 --- /dev/null +++ b/python/numpy/_core/_simd.pyi @@ -0,0 +1,25 @@ +from types import ModuleType +from typing import TypedDict, type_check_only + +# NOTE: these 5 are only defined on systems with an intel processor +SSE42: ModuleType | None = ... +FMA3: ModuleType | None = ... +AVX2: ModuleType | None = ... +AVX512F: ModuleType | None = ... +AVX512_SKX: ModuleType | None = ... + +baseline: ModuleType | None = ... + +@type_check_only +class SimdTargets(TypedDict): + SSE42: ModuleType | None + AVX2: ModuleType | None + FMA3: ModuleType | None + AVX512F: ModuleType | None + AVX512_SKX: ModuleType | None + baseline: ModuleType | None + +targets: SimdTargets = ... + +def clear_floatstatus() -> None: ... +def get_floatstatus() -> int: ... diff --git a/python/numpy/_core/_string_helpers.py b/python/numpy/_core/_string_helpers.py new file mode 100644 index 000000000..87085d411 --- /dev/null +++ b/python/numpy/_core/_string_helpers.py @@ -0,0 +1,100 @@ +""" +String-handling utilities to avoid locale-dependence. + +Used primarily to generate type name aliases. +""" +# "import string" is costly to import! +# Construct the translation tables directly +# "A" = chr(65), "a" = chr(97) +_all_chars = tuple(map(chr, range(256))) +_ascii_upper = _all_chars[65:65 + 26] +_ascii_lower = _all_chars[97:97 + 26] +LOWER_TABLE = _all_chars[:65] + _ascii_lower + _all_chars[65 + 26:] +UPPER_TABLE = _all_chars[:97] + _ascii_upper + _all_chars[97 + 26:] + + +def english_lower(s): + """ Apply English case rules to convert ASCII strings to all lower case. + + This is an internal utility function to replace calls to str.lower() such + that we can avoid changing behavior with changing locales. In particular, + Turkish has distinct dotted and dotless variants of the Latin letter "I" in + both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale. + + Parameters + ---------- + s : str + + Returns + ------- + lowered : str + + Examples + -------- + >>> from numpy._core.numerictypes import english_lower + >>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') + 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_' + >>> english_lower('') + '' + """ + lowered = s.translate(LOWER_TABLE) + return lowered + + +def english_upper(s): + """ Apply English case rules to convert ASCII strings to all upper case. + + This is an internal utility function to replace calls to str.upper() such + that we can avoid changing behavior with changing locales. In particular, + Turkish has distinct dotted and dotless variants of the Latin letter "I" in + both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale. + + Parameters + ---------- + s : str + + Returns + ------- + uppered : str + + Examples + -------- + >>> from numpy._core.numerictypes import english_upper + >>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') + 'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_' + >>> english_upper('') + '' + """ + uppered = s.translate(UPPER_TABLE) + return uppered + + +def english_capitalize(s): + """ Apply English case rules to convert the first character of an ASCII + string to upper case. + + This is an internal utility function to replace calls to str.capitalize() + such that we can avoid changing behavior with changing locales. + + Parameters + ---------- + s : str + + Returns + ------- + capitalized : str + + Examples + -------- + >>> from numpy._core.numerictypes import english_capitalize + >>> english_capitalize('int8') + 'Int8' + >>> english_capitalize('Int8') + 'Int8' + >>> english_capitalize('') + '' + """ + if s: + return english_upper(s[0]) + s[1:] + else: + return s diff --git a/python/numpy/_core/_string_helpers.pyi b/python/numpy/_core/_string_helpers.pyi new file mode 100644 index 000000000..6a85832b7 --- /dev/null +++ b/python/numpy/_core/_string_helpers.pyi @@ -0,0 +1,12 @@ +from typing import Final + +_all_chars: Final[tuple[str, ...]] = ... +_ascii_upper: Final[tuple[str, ...]] = ... +_ascii_lower: Final[tuple[str, ...]] = ... + +LOWER_TABLE: Final[tuple[str, ...]] = ... +UPPER_TABLE: Final[tuple[str, ...]] = ... + +def english_lower(s: str) -> str: ... +def english_upper(s: str) -> str: ... +def english_capitalize(s: str) -> str: ... diff --git a/python/numpy/_core/_struct_ufunc_tests.cpython-312-x86_64-linux-gnu.so b/python/numpy/_core/_struct_ufunc_tests.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 000000000..10747a360 Binary files /dev/null and b/python/numpy/_core/_struct_ufunc_tests.cpython-312-x86_64-linux-gnu.so differ diff --git a/python/numpy/_core/_type_aliases.py b/python/numpy/_core/_type_aliases.py new file mode 100644 index 000000000..de6c30953 --- /dev/null +++ b/python/numpy/_core/_type_aliases.py @@ -0,0 +1,119 @@ +""" +Due to compatibility, numpy has a very large number of different naming +conventions for the scalar types (those subclassing from `numpy.generic`). +This file produces a convoluted set of dictionaries mapping names to types, +and sometimes other mappings too. + +.. data:: allTypes + A dictionary of names to types that will be exposed as attributes through + ``np._core.numerictypes.*`` + +.. data:: sctypeDict + Similar to `allTypes`, but maps a broader set of aliases to their types. + +.. data:: sctypes + A dictionary keyed by a "type group" string, providing a list of types + under that group. + +""" + +import numpy._core.multiarray as ma +from numpy._core.multiarray import dtype, typeinfo + +###################################### +# Building `sctypeDict` and `allTypes` +###################################### + +sctypeDict = {} +allTypes = {} +c_names_dict = {} + +_abstract_type_names = { + "generic", "integer", "inexact", "floating", "number", + "flexible", "character", "complexfloating", "unsignedinteger", + "signedinteger" +} + +for _abstract_type_name in _abstract_type_names: + allTypes[_abstract_type_name] = getattr(ma, _abstract_type_name) + +for k, v in typeinfo.items(): + if k.startswith("NPY_") and v not in c_names_dict: + c_names_dict[k[4:]] = v + else: + concrete_type = v.type + allTypes[k] = concrete_type + sctypeDict[k] = concrete_type + +_aliases = { + "double": "float64", + "cdouble": "complex128", + "single": "float32", + "csingle": "complex64", + "half": "float16", + "bool_": "bool", + # Default integer: + "int_": "intp", + "uint": "uintp", +} + +for k, v in _aliases.items(): + sctypeDict[k] = allTypes[v] + allTypes[k] = allTypes[v] + +# extra aliases are added only to `sctypeDict` +# to support dtype name access, such as`np.dtype("float")` +_extra_aliases = { + "float": "float64", + "complex": "complex128", + "object": "object_", + "bytes": "bytes_", + "a": "bytes_", + "int": "int_", + "str": "str_", + "unicode": "str_", +} + +for k, v in _extra_aliases.items(): + sctypeDict[k] = allTypes[v] + +# include extended precision sized aliases +for is_complex, full_name in [(False, "longdouble"), (True, "clongdouble")]: + longdouble_type: type = allTypes[full_name] + + bits: int = dtype(longdouble_type).itemsize * 8 + base_name: str = "complex" if is_complex else "float" + extended_prec_name: str = f"{base_name}{bits}" + if extended_prec_name not in allTypes: + sctypeDict[extended_prec_name] = longdouble_type + allTypes[extended_prec_name] = longdouble_type + + +#################### +# Building `sctypes` +#################### + +sctypes = {"int": set(), "uint": set(), "float": set(), + "complex": set(), "others": set()} + +for type_info in typeinfo.values(): + if type_info.kind in ["M", "m"]: # exclude timedelta and datetime + continue + + concrete_type = type_info.type + + # find proper group for each concrete type + for type_group, abstract_type in [ + ("int", ma.signedinteger), ("uint", ma.unsignedinteger), + ("float", ma.floating), ("complex", ma.complexfloating), + ("others", ma.generic) + ]: + if issubclass(concrete_type, abstract_type): + sctypes[type_group].add(concrete_type) + break + +# sort sctype groups by bitsize +for sctype_key in sctypes.keys(): + sctype_list = list(sctypes[sctype_key]) + sctype_list.sort(key=lambda x: dtype(x).itemsize) + sctypes[sctype_key] = sctype_list diff --git a/python/numpy/_core/_type_aliases.pyi b/python/numpy/_core/_type_aliases.pyi new file mode 100644 index 000000000..3c9dac7a1 --- /dev/null +++ b/python/numpy/_core/_type_aliases.pyi @@ -0,0 +1,97 @@ +from collections.abc import Collection +from typing import Final, TypeAlias, TypedDict, type_check_only +from typing import Literal as L + +import numpy as np + +__all__ = ( + "_abstract_type_names", + "_aliases", + "_extra_aliases", + "allTypes", + "c_names_dict", + "sctypeDict", + "sctypes", +) + +sctypeDict: Final[dict[str, type[np.generic]]] +allTypes: Final[dict[str, type[np.generic]]] + +@type_check_only +class _CNamesDict(TypedDict): + BOOL: np.dtype[np.bool] + HALF: np.dtype[np.half] + FLOAT: np.dtype[np.single] + DOUBLE: np.dtype[np.double] + LONGDOUBLE: np.dtype[np.longdouble] + CFLOAT: np.dtype[np.csingle] + CDOUBLE: np.dtype[np.cdouble] + CLONGDOUBLE: np.dtype[np.clongdouble] + STRING: np.dtype[np.bytes_] + UNICODE: np.dtype[np.str_] + VOID: np.dtype[np.void] + OBJECT: np.dtype[np.object_] + DATETIME: np.dtype[np.datetime64] + TIMEDELTA: np.dtype[np.timedelta64] + BYTE: np.dtype[np.byte] + UBYTE: np.dtype[np.ubyte] + SHORT: np.dtype[np.short] + USHORT: np.dtype[np.ushort] + INT: np.dtype[np.intc] + UINT: np.dtype[np.uintc] + LONG: np.dtype[np.long] + ULONG: np.dtype[np.ulong] + LONGLONG: np.dtype[np.longlong] + ULONGLONG: np.dtype[np.ulonglong] + +c_names_dict: Final[_CNamesDict] + +_AbstractTypeName: TypeAlias = L[ + "generic", + "flexible", + "character", + "number", + "integer", + "inexact", + "unsignedinteger", + "signedinteger", + "floating", + "complexfloating", +] +_abstract_type_names: Final[set[_AbstractTypeName]] + +@type_check_only +class _AliasesType(TypedDict): + double: L["float64"] + cdouble: L["complex128"] + single: L["float32"] + csingle: L["complex64"] + half: L["float16"] + bool_: L["bool"] + int_: L["intp"] + uint: L["intp"] + +_aliases: Final[_AliasesType] + +@type_check_only +class _ExtraAliasesType(TypedDict): + float: L["float64"] + complex: L["complex128"] + object: L["object_"] + bytes: L["bytes_"] + a: L["bytes_"] + int: L["int_"] + str: L["str_"] + unicode: L["str_"] + +_extra_aliases: Final[_ExtraAliasesType] + +@type_check_only +class _SCTypes(TypedDict): + int: Collection[type[np.signedinteger]] + uint: Collection[type[np.unsignedinteger]] + float: Collection[type[np.floating]] + complex: Collection[type[np.complexfloating]] + others: Collection[type[np.flexible | np.bool | np.object_]] + +sctypes: Final[_SCTypes] diff --git a/python/numpy/_core/_ufunc_config.py b/python/numpy/_core/_ufunc_config.py new file mode 100644 index 000000000..b16147c18 --- /dev/null +++ b/python/numpy/_core/_ufunc_config.py @@ -0,0 +1,491 @@ +""" +Functions for changing global ufunc configuration + +This provides helpers which wrap `_get_extobj_dict` and `_make_extobj`, and +`_extobj_contextvar` from umath. +""" +import functools + +from numpy._utils import set_module + +from .umath import _extobj_contextvar, _get_extobj_dict, _make_extobj + +__all__ = [ + "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall", + "errstate" +] + + +@set_module('numpy') +def seterr(all=None, divide=None, over=None, under=None, invalid=None): + """ + Set how floating-point errors are handled. + + Note that operations on integer scalar types (such as `int16`) are + handled like floating point, and are affected by these settings. + + Parameters + ---------- + all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Set treatment for all types of floating-point errors at once: + + - ignore: Take no action when the exception occurs. + - warn: Print a :exc:`RuntimeWarning` (via the Python `warnings` + module). + - raise: Raise a :exc:`FloatingPointError`. + - call: Call a function specified using the `seterrcall` function. + - print: Print a warning directly to ``stdout``. + - log: Record error in a Log object specified by `seterrcall`. + + The default is not to change the current behavior. + divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Treatment for division by zero. + over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Treatment for floating-point overflow. + under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Treatment for floating-point underflow. + invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional + Treatment for invalid floating-point operation. + + Returns + ------- + old_settings : dict + Dictionary containing the old settings. + + See also + -------- + seterrcall : Set a callback function for the 'call' mode. + geterr, geterrcall, errstate + + Notes + ----- + The floating-point exceptions are defined in the IEEE 754 standard [1]_: + + - Division by zero: infinite result obtained from finite numbers. + - Overflow: result too large to be expressed. + - Underflow: result so close to zero that some precision + was lost. + - Invalid operation: result is not an expressible number, typically + indicates that a NaN was produced. + + .. [1] https://en.wikipedia.org/wiki/IEEE_754 + + Examples + -------- + >>> import numpy as np + >>> orig_settings = np.seterr(all='ignore') # seterr to known value + >>> np.int16(32000) * np.int16(3) + np.int16(30464) + >>> np.seterr(over='raise') + {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'} + >>> old_settings = np.seterr(all='warn', over='raise') + >>> np.int16(32000) * np.int16(3) + Traceback (most recent call last): + File "", line 1, in + FloatingPointError: overflow encountered in scalar multiply + + >>> old_settings = np.seterr(all='print') + >>> np.geterr() + {'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'} + >>> np.int16(32000) * np.int16(3) + np.int16(30464) + >>> np.seterr(**orig_settings) # restore original + {'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'} + + """ + + old = _get_extobj_dict() + # The errstate doesn't include call and bufsize, so pop them: + old.pop("call", None) + old.pop("bufsize", None) + + extobj = _make_extobj( + all=all, divide=divide, over=over, under=under, invalid=invalid) + _extobj_contextvar.set(extobj) + return old + + +@set_module('numpy') +def geterr(): + """ + Get the current way of handling floating-point errors. + + Returns + ------- + res : dict + A dictionary with keys "divide", "over", "under", and "invalid", + whose values are from the strings "ignore", "print", "log", "warn", + "raise", and "call". The keys represent possible floating-point + exceptions, and the values define how these exceptions are handled. + + See Also + -------- + geterrcall, seterr, seterrcall + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> import numpy as np + >>> np.geterr() + {'divide': 'warn', 'over': 'warn', 'under': 'ignore', 'invalid': 'warn'} + >>> np.arange(3.) / np.arange(3.) # doctest: +SKIP + array([nan, 1., 1.]) + RuntimeWarning: invalid value encountered in divide + + >>> oldsettings = np.seterr(all='warn', invalid='raise') + >>> np.geterr() + {'divide': 'warn', 'over': 'warn', 'under': 'warn', 'invalid': 'raise'} + >>> np.arange(3.) / np.arange(3.) + Traceback (most recent call last): + ... + FloatingPointError: invalid value encountered in divide + >>> oldsettings = np.seterr(**oldsettings) # restore original + + """ + res = _get_extobj_dict() + # The "geterr" doesn't include call and bufsize,: + res.pop("call", None) + res.pop("bufsize", None) + return res + + +@set_module('numpy') +def setbufsize(size): + """ + Set the size of the buffer used in ufuncs. + + .. versionchanged:: 2.0 + The scope of setting the buffer is tied to the `numpy.errstate` + context. Exiting a ``with errstate():`` will also restore the bufsize. + + Parameters + ---------- + size : int + Size of buffer. + + Returns + ------- + bufsize : int + Previous size of ufunc buffer in bytes. + + Examples + -------- + When exiting a `numpy.errstate` context manager the bufsize is restored: + + >>> import numpy as np + >>> with np.errstate(): + ... np.setbufsize(4096) + ... print(np.getbufsize()) + ... + 8192 + 4096 + >>> np.getbufsize() + 8192 + + """ + if size < 0: + raise ValueError("buffer size must be non-negative") + old = _get_extobj_dict()["bufsize"] + extobj = _make_extobj(bufsize=size) + _extobj_contextvar.set(extobj) + return old + + +@set_module('numpy') +def getbufsize(): + """ + Return the size of the buffer used in ufuncs. + + Returns + ------- + getbufsize : int + Size of ufunc buffer in bytes. + + Examples + -------- + >>> import numpy as np + >>> np.getbufsize() + 8192 + + """ + return _get_extobj_dict()["bufsize"] + + +@set_module('numpy') +def seterrcall(func): + """ + Set the floating-point error callback function or log object. + + There are two ways to capture floating-point error messages. The first + is to set the error-handler to 'call', using `seterr`. Then, set + the function to call using this function. + + The second is to set the error-handler to 'log', using `seterr`. + Floating-point errors then trigger a call to the 'write' method of + the provided object. + + Parameters + ---------- + func : callable f(err, flag) or object with write method + Function to call upon floating-point errors ('call'-mode) or + object whose 'write' method is used to log such message ('log'-mode). + + The call function takes two arguments. The first is a string describing + the type of error (such as "divide by zero", "overflow", "underflow", + or "invalid value"), and the second is the status flag. The flag is a + byte, whose four least-significant bits indicate the type of error, one + of "divide", "over", "under", "invalid":: + + [0 0 0 0 divide over under invalid] + + In other words, ``flags = divide + 2*over + 4*under + 8*invalid``. + + If an object is provided, its write method should take one argument, + a string. + + Returns + ------- + h : callable, log instance or None + The old error handler. + + See Also + -------- + seterr, geterr, geterrcall + + Examples + -------- + Callback upon error: + + >>> def err_handler(type, flag): + ... print("Floating point error (%s), with flag %s" % (type, flag)) + ... + + >>> import numpy as np + + >>> orig_handler = np.seterrcall(err_handler) + >>> orig_err = np.seterr(all='call') + + >>> np.array([1, 2, 3]) / 0.0 + Floating point error (divide by zero), with flag 1 + array([inf, inf, inf]) + + >>> np.seterrcall(orig_handler) + + >>> np.seterr(**orig_err) + {'divide': 'call', 'over': 'call', 'under': 'call', 'invalid': 'call'} + + Log error message: + + >>> class Log: + ... def write(self, msg): + ... print("LOG: %s" % msg) + ... + + >>> log = Log() + >>> saved_handler = np.seterrcall(log) + >>> save_err = np.seterr(all='log') + + >>> np.array([1, 2, 3]) / 0.0 + LOG: Warning: divide by zero encountered in divide + array([inf, inf, inf]) + + >>> np.seterrcall(orig_handler) + + >>> np.seterr(**orig_err) + {'divide': 'log', 'over': 'log', 'under': 'log', 'invalid': 'log'} + + """ + old = _get_extobj_dict()["call"] + extobj = _make_extobj(call=func) + _extobj_contextvar.set(extobj) + return old + + +@set_module('numpy') +def geterrcall(): + """ + Return the current callback function used on floating-point errors. + + When the error handling for a floating-point error (one of "divide", + "over", "under", or "invalid") is set to 'call' or 'log', the function + that is called or the log instance that is written to is returned by + `geterrcall`. This function or log instance has been set with + `seterrcall`. + + Returns + ------- + errobj : callable, log instance or None + The current error handler. If no handler was set through `seterrcall`, + ``None`` is returned. + + See Also + -------- + seterrcall, seterr, geterr + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> import numpy as np + >>> np.geterrcall() # we did not yet set a handler, returns None + + >>> orig_settings = np.seterr(all='call') + >>> def err_handler(type, flag): + ... print("Floating point error (%s), with flag %s" % (type, flag)) + >>> old_handler = np.seterrcall(err_handler) + >>> np.array([1, 2, 3]) / 0.0 + Floating point error (divide by zero), with flag 1 + array([inf, inf, inf]) + + >>> cur_handler = np.geterrcall() + >>> cur_handler is err_handler + True + >>> old_settings = np.seterr(**orig_settings) # restore original + >>> old_handler = np.seterrcall(None) # restore original + + """ + return _get_extobj_dict()["call"] + + +class _unspecified: + pass + + +_Unspecified = _unspecified() + + +@set_module('numpy') +class errstate: + """ + errstate(**kwargs) + + Context manager for floating-point error handling. + + Using an instance of `errstate` as a context manager allows statements in + that context to execute with a known error handling behavior. Upon entering + the context the error handling is set with `seterr` and `seterrcall`, and + upon exiting it is reset to what it was before. + + .. versionchanged:: 1.17.0 + `errstate` is also usable as a function decorator, saving + a level of indentation if an entire function is wrapped. + + .. versionchanged:: 2.0 + `errstate` is now fully thread and asyncio safe, but may not be + entered more than once. + It is not safe to decorate async functions using ``errstate``. + + Parameters + ---------- + kwargs : {divide, over, under, invalid} + Keyword arguments. The valid keywords are the possible floating-point + exceptions. Each keyword should have a string value that defines the + treatment for the particular error. Possible values are + {'ignore', 'warn', 'raise', 'call', 'print', 'log'}. + + See Also + -------- + seterr, geterr, seterrcall, geterrcall + + Notes + ----- + For complete documentation of the types of floating-point exceptions and + treatment options, see `seterr`. + + Examples + -------- + >>> import numpy as np + >>> olderr = np.seterr(all='ignore') # Set error handling to known state. + + >>> np.arange(3) / 0. + array([nan, inf, inf]) + >>> with np.errstate(divide='ignore'): + ... np.arange(3) / 0. + array([nan, inf, inf]) + + >>> np.sqrt(-1) + np.float64(nan) + >>> with np.errstate(invalid='raise'): + ... np.sqrt(-1) + Traceback (most recent call last): + File "", line 2, in + FloatingPointError: invalid value encountered in sqrt + + Outside the context the error handling behavior has not changed: + + >>> np.geterr() + {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'} + >>> olderr = np.seterr(**olderr) # restore original state + + """ + __slots__ = ( + "_all", + "_call", + "_divide", + "_invalid", + "_over", + "_token", + "_under", + ) + + def __init__(self, *, call=_Unspecified, + all=None, divide=None, over=None, under=None, invalid=None): + self._token = None + self._call = call + self._all = all + self._divide = divide + self._over = over + self._under = under + self._invalid = invalid + + def __enter__(self): + # Note that __call__ duplicates much of this logic + if self._token is not None: + raise TypeError("Cannot enter `np.errstate` twice.") + if self._call is _Unspecified: + extobj = _make_extobj( + all=self._all, divide=self._divide, over=self._over, + under=self._under, invalid=self._invalid) + else: + extobj = _make_extobj( + call=self._call, + all=self._all, divide=self._divide, over=self._over, + under=self._under, invalid=self._invalid) + + self._token = _extobj_contextvar.set(extobj) + + def __exit__(self, *exc_info): + _extobj_contextvar.reset(self._token) + + def __call__(self, func): + # We need to customize `__call__` compared to `ContextDecorator` + # because we must store the token per-thread so cannot store it on + # the instance (we could create a new instance for this). + # This duplicates the code from `__enter__`. + @functools.wraps(func) + def inner(*args, **kwargs): + if self._call is _Unspecified: + extobj = _make_extobj( + all=self._all, divide=self._divide, over=self._over, + under=self._under, invalid=self._invalid) + else: + extobj = _make_extobj( + call=self._call, + all=self._all, divide=self._divide, over=self._over, + under=self._under, invalid=self._invalid) + + _token = _extobj_contextvar.set(extobj) + try: + # Call the original, decorated, function: + return func(*args, **kwargs) + finally: + _extobj_contextvar.reset(_token) + + return inner diff --git a/python/numpy/_core/_ufunc_config.pyi b/python/numpy/_core/_ufunc_config.pyi new file mode 100644 index 000000000..008fb5512 --- /dev/null +++ b/python/numpy/_core/_ufunc_config.pyi @@ -0,0 +1,78 @@ +from collections.abc import Callable +from types import TracebackType +from typing import ( + Any, + Final, + Literal, + TypeAlias, + TypedDict, + TypeVar, + type_check_only, +) + +from _typeshed import SupportsWrite + +__all__ = [ + "seterr", + "geterr", + "setbufsize", + "getbufsize", + "seterrcall", + "geterrcall", + "errstate", +] + +_ErrKind: TypeAlias = Literal["ignore", "warn", "raise", "call", "print", "log"] +_ErrCall: TypeAlias = Callable[[str, int], Any] | SupportsWrite[str] + +_CallableT = TypeVar("_CallableT", bound=Callable[..., object]) + +@type_check_only +class _ErrDict(TypedDict): + divide: _ErrKind + over: _ErrKind + under: _ErrKind + invalid: _ErrKind + +### + +class _unspecified: ... + +_Unspecified: Final[_unspecified] + +class errstate: + __slots__ = "_all", "_call", "_divide", "_invalid", "_over", "_token", "_under" + + def __init__( + self, + /, + *, + call: _ErrCall | _unspecified = ..., # = _Unspecified + all: _ErrKind | None = None, + divide: _ErrKind | None = None, + over: _ErrKind | None = None, + under: _ErrKind | None = None, + invalid: _ErrKind | None = None, + ) -> None: ... + def __call__(self, /, func: _CallableT) -> _CallableT: ... + def __enter__(self) -> None: ... + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + /, + ) -> None: ... + +def seterr( + all: _ErrKind | None = ..., + divide: _ErrKind | None = ..., + over: _ErrKind | None = ..., + under: _ErrKind | None = ..., + invalid: _ErrKind | None = ..., +) -> _ErrDict: ... +def geterr() -> _ErrDict: ... +def setbufsize(size: int) -> int: ... +def getbufsize() -> int: ... +def seterrcall(func: _ErrCall | None) -> _ErrCall | None: ... +def geterrcall() -> _ErrCall | None: ... diff --git a/python/numpy/_core/_umath_tests.cpython-312-x86_64-linux-gnu.so b/python/numpy/_core/_umath_tests.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 000000000..51212e83a Binary files /dev/null and b/python/numpy/_core/_umath_tests.cpython-312-x86_64-linux-gnu.so differ diff --git a/python/numpy/_core/arrayprint.py b/python/numpy/_core/arrayprint.py new file mode 100644 index 000000000..2a6842806 --- /dev/null +++ b/python/numpy/_core/arrayprint.py @@ -0,0 +1,1775 @@ +"""Array printing function + +$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $ + +""" +__all__ = ["array2string", "array_str", "array_repr", + "set_printoptions", "get_printoptions", "printoptions", + "format_float_positional", "format_float_scientific"] +__docformat__ = 'restructuredtext' + +# +# Written by Konrad Hinsen +# last revision: 1996-3-13 +# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details) +# and by Perry Greenfield 2000-4-1 for numarray +# and by Travis Oliphant 2005-8-22 for numpy + + +# Note: Both scalartypes.c.src and arrayprint.py implement strs for numpy +# scalars but for different purposes. scalartypes.c.src has str/reprs for when +# the scalar is printed on its own, while arrayprint.py has strs for when +# scalars are printed inside an ndarray. Only the latter strs are currently +# user-customizable. + +import functools +import numbers +import sys + +try: + from _thread import get_ident +except ImportError: + from _dummy_thread import get_ident + +import contextlib +import operator +import warnings + +import numpy as np + +from . import numerictypes as _nt +from .fromnumeric import any +from .multiarray import ( + array, + datetime_as_string, + datetime_data, + dragon4_positional, + dragon4_scientific, + ndarray, +) +from .numeric import asarray, concatenate, errstate +from .numerictypes import complex128, flexible, float64, int_ +from .overrides import array_function_dispatch, set_module +from .printoptions import format_options +from .umath import absolute, isfinite, isinf, isnat + + +def _make_options_dict(precision=None, threshold=None, edgeitems=None, + linewidth=None, suppress=None, nanstr=None, infstr=None, + sign=None, formatter=None, floatmode=None, legacy=None, + override_repr=None): + """ + Make a dictionary out of the non-None arguments, plus conversion of + *legacy* and sanity checks. + """ + + options = {k: v for k, v in list(locals().items()) if v is not None} + + if suppress is not None: + options['suppress'] = bool(suppress) + + modes = ['fixed', 'unique', 'maxprec', 'maxprec_equal'] + if floatmode not in modes + [None]: + raise ValueError("floatmode option must be one of " + + ", ".join(f'"{m}"' for m in modes)) + + if sign not in [None, '-', '+', ' ']: + raise ValueError("sign option must be one of ' ', '+', or '-'") + + if legacy is False: + options['legacy'] = sys.maxsize + elif legacy == False: # noqa: E712 + warnings.warn( + f"Passing `legacy={legacy!r}` is deprecated.", + FutureWarning, stacklevel=3 + ) + options['legacy'] = sys.maxsize + elif legacy == '1.13': + options['legacy'] = 113 + elif legacy == '1.21': + options['legacy'] = 121 + elif legacy == '1.25': + options['legacy'] = 125 + elif legacy == '2.1': + options['legacy'] = 201 + elif legacy == '2.2': + options['legacy'] = 202 + elif legacy is None: + pass # OK, do nothing. + else: + warnings.warn( + "legacy printing option can currently only be '1.13', '1.21', " + "'1.25', '2.1', '2.2' or `False`", stacklevel=3) + + if threshold is not None: + # forbid the bad threshold arg suggested by stack overflow, gh-12351 + if not isinstance(threshold, numbers.Number): + raise TypeError("threshold must be numeric") + if np.isnan(threshold): + raise ValueError("threshold must be non-NAN, try " + "sys.maxsize for untruncated representation") + + if precision is not None: + # forbid the bad precision arg as suggested by issue #18254 + try: + options['precision'] = operator.index(precision) + except TypeError as e: + raise TypeError('precision must be an integer') from e + + return options + + +@set_module('numpy') +def set_printoptions(precision=None, threshold=None, edgeitems=None, + linewidth=None, suppress=None, nanstr=None, + infstr=None, formatter=None, sign=None, floatmode=None, + *, legacy=None, override_repr=None): + """ + Set printing options. + + These options determine the way floating point numbers, arrays and + other NumPy objects are displayed. + + Parameters + ---------- + precision : int or None, optional + Number of digits of precision for floating point output (default 8). + May be None if `floatmode` is not `fixed`, to print as many digits as + necessary to uniquely specify the value. + threshold : int, optional + Total number of array elements which trigger summarization + rather than full repr (default 1000). + To always use the full repr without summarization, pass `sys.maxsize`. + edgeitems : int, optional + Number of array items in summary at beginning and end of + each dimension (default 3). + linewidth : int, optional + The number of characters per line for the purpose of inserting + line breaks (default 75). + suppress : bool, optional + If True, always print floating point numbers using fixed point + notation, in which case numbers equal to zero in the current precision + will print as zero. If False, then scientific notation is used when + absolute value of the smallest number is < 1e-4 or the ratio of the + maximum absolute value to the minimum is > 1e3. The default is False. + nanstr : str, optional + String representation of floating point not-a-number (default nan). + infstr : str, optional + String representation of floating point infinity (default inf). + sign : string, either '-', '+', or ' ', optional + Controls printing of the sign of floating-point types. If '+', always + print the sign of positive values. If ' ', always prints a space + (whitespace character) in the sign position of positive values. If + '-', omit the sign character of positive values. (default '-') + + .. versionchanged:: 2.0 + The sign parameter can now be an integer type, previously + types were floating-point types. + + formatter : dict of callables, optional + If not None, the keys should indicate the type(s) that the respective + formatting function applies to. Callables should return a string. + Types that are not specified (by their corresponding keys) are handled + by the default formatters. Individual types for which a formatter + can be set are: + + - 'bool' + - 'int' + - 'timedelta' : a `numpy.timedelta64` + - 'datetime' : a `numpy.datetime64` + - 'float' + - 'longfloat' : 128-bit floats + - 'complexfloat' + - 'longcomplexfloat' : composed of two 128-bit floats + - 'numpystr' : types `numpy.bytes_` and `numpy.str_` + - 'object' : `np.object_` arrays + + Other keys that can be used to set a group of types at once are: + + - 'all' : sets all types + - 'int_kind' : sets 'int' + - 'float_kind' : sets 'float' and 'longfloat' + - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' + - 'str_kind' : sets 'numpystr' + floatmode : str, optional + Controls the interpretation of the `precision` option for + floating-point types. Can take the following values + (default maxprec_equal): + + * 'fixed': Always print exactly `precision` fractional digits, + even if this would print more or fewer digits than + necessary to specify the value uniquely. + * 'unique': Print the minimum number of fractional digits necessary + to represent each value uniquely. Different elements may + have a different number of digits. The value of the + `precision` option is ignored. + * 'maxprec': Print at most `precision` fractional digits, but if + an element can be uniquely represented with fewer digits + only print it with that many. + * 'maxprec_equal': Print at most `precision` fractional digits, + but if every element in the array can be uniquely + represented with an equal number of fewer digits, use that + many digits for all elements. + legacy : string or `False`, optional + If set to the string ``'1.13'`` enables 1.13 legacy printing mode. This + approximates numpy 1.13 print output by including a space in the sign + position of floats and different behavior for 0d arrays. This also + enables 1.21 legacy printing mode (described below). + + If set to the string ``'1.21'`` enables 1.21 legacy printing mode. This + approximates numpy 1.21 print output of complex structured dtypes + by not inserting spaces after commas that separate fields and after + colons. + + If set to ``'1.25'`` approximates printing of 1.25 which mainly means + that numeric scalars are printed without their type information, e.g. + as ``3.0`` rather than ``np.float64(3.0)``. + + If set to ``'2.1'``, shape information is not given when arrays are + summarized (i.e., multiple elements replaced with ``...``). + + If set to ``'2.2'``, the transition to use scientific notation for + printing ``np.float16`` and ``np.float32`` types may happen later or + not at all for larger values. + + If set to `False`, disables legacy mode. + + Unrecognized strings will be ignored with a warning for forward + compatibility. + + .. versionchanged:: 1.22.0 + .. versionchanged:: 2.2 + + override_repr: callable, optional + If set a passed function will be used for generating arrays' repr. + Other options will be ignored. + + See Also + -------- + get_printoptions, printoptions, array2string + + Notes + ----- + `formatter` is always reset with a call to `set_printoptions`. + + Use `printoptions` as a context manager to set the values temporarily. + + Examples + -------- + Floating point precision can be set: + + >>> import numpy as np + >>> np.set_printoptions(precision=4) + >>> np.array([1.123456789]) + [1.1235] + + Long arrays can be summarised: + + >>> np.set_printoptions(threshold=5) + >>> np.arange(10) + array([0, 1, 2, ..., 7, 8, 9], shape=(10,)) + + Small results can be suppressed: + + >>> eps = np.finfo(float).eps + >>> x = np.arange(4.) + >>> x**2 - (x + eps)**2 + array([-4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00]) + >>> np.set_printoptions(suppress=True) + >>> x**2 - (x + eps)**2 + array([-0., -0., 0., 0.]) + + A custom formatter can be used to display array elements as desired: + + >>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)}) + >>> x = np.arange(3) + >>> x + array([int: 0, int: -1, int: -2]) + >>> np.set_printoptions() # formatter gets reset + >>> x + array([0, 1, 2]) + + To put back the default options, you can use: + + >>> np.set_printoptions(edgeitems=3, infstr='inf', + ... linewidth=75, nanstr='nan', precision=8, + ... suppress=False, threshold=1000, formatter=None) + + Also to temporarily override options, use `printoptions` + as a context manager: + + >>> with np.printoptions(precision=2, suppress=True, threshold=5): + ... np.linspace(0, 10, 10) + array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ], shape=(10,)) + + """ + _set_printoptions(precision, threshold, edgeitems, linewidth, suppress, + nanstr, infstr, formatter, sign, floatmode, + legacy=legacy, override_repr=override_repr) + + +def _set_printoptions(precision=None, threshold=None, edgeitems=None, + linewidth=None, suppress=None, nanstr=None, + infstr=None, formatter=None, sign=None, floatmode=None, + *, legacy=None, override_repr=None): + new_opt = _make_options_dict(precision, threshold, edgeitems, linewidth, + suppress, nanstr, infstr, sign, formatter, + floatmode, legacy) + # formatter and override_repr are always reset + new_opt['formatter'] = formatter + new_opt['override_repr'] = override_repr + + updated_opt = format_options.get() | new_opt + updated_opt.update(new_opt) + + if updated_opt['legacy'] == 113: + updated_opt['sign'] = '-' + + return format_options.set(updated_opt) + + +@set_module('numpy') +def get_printoptions(): + """ + Return the current print options. + + Returns + ------- + print_opts : dict + Dictionary of current print options with keys + + - precision : int + - threshold : int + - edgeitems : int + - linewidth : int + - suppress : bool + - nanstr : str + - infstr : str + - sign : str + - formatter : dict of callables + - floatmode : str + - legacy : str or False + + For a full description of these options, see `set_printoptions`. + + See Also + -------- + set_printoptions, printoptions + + Examples + -------- + >>> import numpy as np + + >>> np.get_printoptions() + {'edgeitems': 3, 'threshold': 1000, ..., 'override_repr': None} + + >>> np.get_printoptions()['linewidth'] + 75 + >>> np.set_printoptions(linewidth=100) + >>> np.get_printoptions()['linewidth'] + 100 + + """ + opts = format_options.get().copy() + opts['legacy'] = { + 113: '1.13', 121: '1.21', 125: '1.25', 201: '2.1', + 202: '2.2', sys.maxsize: False, + }[opts['legacy']] + return opts + + +def _get_legacy_print_mode(): + """Return the legacy print mode as an int.""" + return format_options.get()['legacy'] + + +@set_module('numpy') +@contextlib.contextmanager +def printoptions(*args, **kwargs): + """Context manager for setting print options. + + Set print options for the scope of the `with` block, and restore the old + options at the end. See `set_printoptions` for the full description of + available options. + + Examples + -------- + >>> import numpy as np + + >>> from numpy.testing import assert_equal + >>> with np.printoptions(precision=2): + ... np.array([2.0]) / 3 + array([0.67]) + + The `as`-clause of the `with`-statement gives the current print options: + + >>> with np.printoptions(precision=2) as opts: + ... assert_equal(opts, np.get_printoptions()) + + See Also + -------- + set_printoptions, get_printoptions + + """ + token = _set_printoptions(*args, **kwargs) + + try: + yield get_printoptions() + finally: + format_options.reset(token) + + +def _leading_trailing(a, edgeitems, index=()): + """ + Keep only the N-D corners (leading and trailing edges) of an array. + + Should be passed a base-class ndarray, since it makes no guarantees about + preserving subclasses. + """ + axis = len(index) + if axis == a.ndim: + return a[index] + + if a.shape[axis] > 2 * edgeitems: + return concatenate(( + _leading_trailing(a, edgeitems, index + np.index_exp[:edgeitems]), + _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:]) + ), axis=axis) + else: + return _leading_trailing(a, edgeitems, index + np.index_exp[:]) + + +def _object_format(o): + """ Object arrays containing lists should be printed unambiguously """ + if type(o) is list: + fmt = 'list({!r})' + else: + fmt = '{!r}' + return fmt.format(o) + +def repr_format(x): + if isinstance(x, (np.str_, np.bytes_)): + return repr(x.item()) + return repr(x) + +def str_format(x): + if isinstance(x, (np.str_, np.bytes_)): + return str(x.item()) + return str(x) + +def _get_formatdict(data, *, precision, floatmode, suppress, sign, legacy, + formatter, **kwargs): + # note: extra arguments in kwargs are ignored + + # wrapped in lambdas to avoid taking a code path + # with the wrong type of data + formatdict = { + 'bool': lambda: BoolFormat(data), + 'int': lambda: IntegerFormat(data, sign), + 'float': lambda: FloatingFormat( + data, precision, floatmode, suppress, sign, legacy=legacy), + 'longfloat': lambda: FloatingFormat( + data, precision, floatmode, suppress, sign, legacy=legacy), + 'complexfloat': lambda: ComplexFloatingFormat( + data, precision, floatmode, suppress, sign, legacy=legacy), + 'longcomplexfloat': lambda: ComplexFloatingFormat( + data, precision, floatmode, suppress, sign, legacy=legacy), + 'datetime': lambda: DatetimeFormat(data, legacy=legacy), + 'timedelta': lambda: TimedeltaFormat(data), + 'object': lambda: _object_format, + 'void': lambda: str_format, + 'numpystr': lambda: repr_format} + + # we need to wrap values in `formatter` in a lambda, so that the interface + # is the same as the above values. + def indirect(x): + return lambda: x + + if formatter is not None: + fkeys = [k for k in formatter.keys() if formatter[k] is not None] + if 'all' in fkeys: + for key in formatdict.keys(): + formatdict[key] = indirect(formatter['all']) + if 'int_kind' in fkeys: + for key in ['int']: + formatdict[key] = indirect(formatter['int_kind']) + if 'float_kind' in fkeys: + for key in ['float', 'longfloat']: + formatdict[key] = indirect(formatter['float_kind']) + if 'complex_kind' in fkeys: + for key in ['complexfloat', 'longcomplexfloat']: + formatdict[key] = indirect(formatter['complex_kind']) + if 'str_kind' in fkeys: + formatdict['numpystr'] = indirect(formatter['str_kind']) + for key in formatdict.keys(): + if key in fkeys: + formatdict[key] = indirect(formatter[key]) + + return formatdict + +def _get_format_function(data, **options): + """ + find the right formatting function for the dtype_ + """ + dtype_ = data.dtype + dtypeobj = dtype_.type + formatdict = _get_formatdict(data, **options) + if dtypeobj is None: + return formatdict["numpystr"]() + elif issubclass(dtypeobj, _nt.bool): + return formatdict['bool']() + elif issubclass(dtypeobj, _nt.integer): + if issubclass(dtypeobj, _nt.timedelta64): + return formatdict['timedelta']() + else: + return formatdict['int']() + elif issubclass(dtypeobj, _nt.floating): + if issubclass(dtypeobj, _nt.longdouble): + return formatdict['longfloat']() + else: + return formatdict['float']() + elif issubclass(dtypeobj, _nt.complexfloating): + if issubclass(dtypeobj, _nt.clongdouble): + return formatdict['longcomplexfloat']() + else: + return formatdict['complexfloat']() + elif issubclass(dtypeobj, (_nt.str_, _nt.bytes_)): + return formatdict['numpystr']() + elif issubclass(dtypeobj, _nt.datetime64): + return formatdict['datetime']() + elif issubclass(dtypeobj, _nt.object_): + return formatdict['object']() + elif issubclass(dtypeobj, _nt.void): + if dtype_.names is not None: + return StructuredVoidFormat.from_data(data, **options) + else: + return formatdict['void']() + else: + return formatdict['numpystr']() + + +def _recursive_guard(fillvalue='...'): + """ + Like the python 3.2 reprlib.recursive_repr, but forwards *args and **kwargs + + Decorates a function such that if it calls itself with the same first + argument, it returns `fillvalue` instead of recursing. + + Largely copied from reprlib.recursive_repr + """ + + def decorating_function(f): + repr_running = set() + + @functools.wraps(f) + def wrapper(self, *args, **kwargs): + key = id(self), get_ident() + if key in repr_running: + return fillvalue + repr_running.add(key) + try: + return f(self, *args, **kwargs) + finally: + repr_running.discard(key) + + return wrapper + + return decorating_function + + +# gracefully handle recursive calls, when object arrays contain themselves +@_recursive_guard() +def _array2string(a, options, separator=' ', prefix=""): + # The formatter __init__s in _get_format_function cannot deal with + # subclasses yet, and we also need to avoid recursion issues in + # _formatArray with subclasses which return 0d arrays in place of scalars + data = asarray(a) + if a.shape == (): + a = data + + if a.size > options['threshold']: + summary_insert = "..." + data = _leading_trailing(data, options['edgeitems']) + else: + summary_insert = "" + + # find the right formatting function for the array + format_function = _get_format_function(data, **options) + + # skip over "[" + next_line_prefix = " " + # skip over array( + next_line_prefix += " " * len(prefix) + + lst = _formatArray(a, format_function, options['linewidth'], + next_line_prefix, separator, options['edgeitems'], + summary_insert, options['legacy']) + return lst + + +def _array2string_dispatcher( + a, max_line_width=None, precision=None, + suppress_small=None, separator=None, prefix=None, + style=None, formatter=None, threshold=None, + edgeitems=None, sign=None, floatmode=None, suffix=None, + *, legacy=None): + return (a,) + + +@array_function_dispatch(_array2string_dispatcher, module='numpy') +def array2string(a, max_line_width=None, precision=None, + suppress_small=None, separator=' ', prefix="", + style=np._NoValue, formatter=None, threshold=None, + edgeitems=None, sign=None, floatmode=None, suffix="", + *, legacy=None): + """ + Return a string representation of an array. + + Parameters + ---------- + a : ndarray + Input array. + max_line_width : int, optional + Inserts newlines if text is longer than `max_line_width`. + Defaults to ``numpy.get_printoptions()['linewidth']``. + precision : int or None, optional + Floating point precision. + Defaults to ``numpy.get_printoptions()['precision']``. + suppress_small : bool, optional + Represent numbers "very close" to zero as zero; default is False. + Very close is defined by precision: if the precision is 8, e.g., + numbers smaller (in absolute value) than 5e-9 are represented as + zero. + Defaults to ``numpy.get_printoptions()['suppress']``. + separator : str, optional + Inserted between elements. + prefix : str, optional + suffix : str, optional + The length of the prefix and suffix strings are used to respectively + align and wrap the output. An array is typically printed as:: + + prefix + array2string(a) + suffix + + The output is left-padded by the length of the prefix string, and + wrapping is forced at the column ``max_line_width - len(suffix)``. + It should be noted that the content of prefix and suffix strings are + not included in the output. + style : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.14.0 + formatter : dict of callables, optional + If not None, the keys should indicate the type(s) that the respective + formatting function applies to. Callables should return a string. + Types that are not specified (by their corresponding keys) are handled + by the default formatters. Individual types for which a formatter + can be set are: + + - 'bool' + - 'int' + - 'timedelta' : a `numpy.timedelta64` + - 'datetime' : a `numpy.datetime64` + - 'float' + - 'longfloat' : 128-bit floats + - 'complexfloat' + - 'longcomplexfloat' : composed of two 128-bit floats + - 'void' : type `numpy.void` + - 'numpystr' : types `numpy.bytes_` and `numpy.str_` + + Other keys that can be used to set a group of types at once are: + + - 'all' : sets all types + - 'int_kind' : sets 'int' + - 'float_kind' : sets 'float' and 'longfloat' + - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' + - 'str_kind' : sets 'numpystr' + threshold : int, optional + Total number of array elements which trigger summarization + rather than full repr. + Defaults to ``numpy.get_printoptions()['threshold']``. + edgeitems : int, optional + Number of array items in summary at beginning and end of + each dimension. + Defaults to ``numpy.get_printoptions()['edgeitems']``. + sign : string, either '-', '+', or ' ', optional + Controls printing of the sign of floating-point types. If '+', always + print the sign of positive values. If ' ', always prints a space + (whitespace character) in the sign position of positive values. If + '-', omit the sign character of positive values. + Defaults to ``numpy.get_printoptions()['sign']``. + + .. versionchanged:: 2.0 + The sign parameter can now be an integer type, previously + types were floating-point types. + + floatmode : str, optional + Controls the interpretation of the `precision` option for + floating-point types. + Defaults to ``numpy.get_printoptions()['floatmode']``. + Can take the following values: + + - 'fixed': Always print exactly `precision` fractional digits, + even if this would print more or fewer digits than + necessary to specify the value uniquely. + - 'unique': Print the minimum number of fractional digits necessary + to represent each value uniquely. Different elements may + have a different number of digits. The value of the + `precision` option is ignored. + - 'maxprec': Print at most `precision` fractional digits, but if + an element can be uniquely represented with fewer digits + only print it with that many. + - 'maxprec_equal': Print at most `precision` fractional digits, + but if every element in the array can be uniquely + represented with an equal number of fewer digits, use that + many digits for all elements. + legacy : string or `False`, optional + If set to the string ``'1.13'`` enables 1.13 legacy printing mode. This + approximates numpy 1.13 print output by including a space in the sign + position of floats and different behavior for 0d arrays. If set to + `False`, disables legacy mode. Unrecognized strings will be ignored + with a warning for forward compatibility. + + Returns + ------- + array_str : str + String representation of the array. + + Raises + ------ + TypeError + if a callable in `formatter` does not return a string. + + See Also + -------- + array_str, array_repr, set_printoptions, get_printoptions + + Notes + ----- + If a formatter is specified for a certain type, the `precision` keyword is + ignored for that type. + + This is a very flexible function; `array_repr` and `array_str` are using + `array2string` internally so keywords with the same name should work + identically in all three functions. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1e-16,1,2,3]) + >>> np.array2string(x, precision=2, separator=',', + ... suppress_small=True) + '[0.,1.,2.,3.]' + + >>> x = np.arange(3.) + >>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) + '[0.00 1.00 2.00]' + + >>> x = np.arange(3) + >>> np.array2string(x, formatter={'int':lambda x: hex(x)}) + '[0x0 0x1 0x2]' + + """ + + overrides = _make_options_dict(precision, threshold, edgeitems, + max_line_width, suppress_small, None, None, + sign, formatter, floatmode, legacy) + options = format_options.get().copy() + options.update(overrides) + + if options['legacy'] <= 113: + if style is np._NoValue: + style = repr + + if a.shape == () and a.dtype.names is None: + return style(a.item()) + elif style is not np._NoValue: + # Deprecation 11-9-2017 v1.14 + warnings.warn("'style' argument is deprecated and no longer functional" + " except in 1.13 'legacy' mode", + DeprecationWarning, stacklevel=2) + + if options['legacy'] > 113: + options['linewidth'] -= len(suffix) + + # treat as a null array if any of shape elements == 0 + if a.size == 0: + return "[]" + + return _array2string(a, options, separator, prefix) + + +def _extendLine(s, line, word, line_width, next_line_prefix, legacy): + needs_wrap = len(line) + len(word) > line_width + if legacy > 113: + # don't wrap lines if it won't help + if len(line) <= len(next_line_prefix): + needs_wrap = False + + if needs_wrap: + s += line.rstrip() + "\n" + line = next_line_prefix + line += word + return s, line + + +def _extendLine_pretty(s, line, word, line_width, next_line_prefix, legacy): + """ + Extends line with nicely formatted (possibly multi-line) string ``word``. + """ + words = word.splitlines() + if len(words) == 1 or legacy <= 113: + return _extendLine(s, line, word, line_width, next_line_prefix, legacy) + + max_word_length = max(len(word) for word in words) + if (len(line) + max_word_length > line_width and + len(line) > len(next_line_prefix)): + s += line.rstrip() + '\n' + line = next_line_prefix + words[0] + indent = next_line_prefix + else: + indent = len(line) * ' ' + line += words[0] + + for word in words[1::]: + s += line.rstrip() + '\n' + line = indent + word + + suffix_length = max_word_length - len(words[-1]) + line += suffix_length * ' ' + + return s, line + +def _formatArray(a, format_function, line_width, next_line_prefix, + separator, edge_items, summary_insert, legacy): + """formatArray is designed for two modes of operation: + + 1. Full output + + 2. Summarized output + + """ + def recurser(index, hanging_indent, curr_width): + """ + By using this local function, we don't need to recurse with all the + arguments. Since this function is not created recursively, the cost is + not significant + """ + axis = len(index) + axes_left = a.ndim - axis + + if axes_left == 0: + return format_function(a[index]) + + # when recursing, add a space to align with the [ added, and reduce the + # length of the line by 1 + next_hanging_indent = hanging_indent + ' ' + if legacy <= 113: + next_width = curr_width + else: + next_width = curr_width - len(']') + + a_len = a.shape[axis] + show_summary = summary_insert and 2 * edge_items < a_len + if show_summary: + leading_items = edge_items + trailing_items = edge_items + else: + leading_items = 0 + trailing_items = a_len + + # stringify the array with the hanging indent on the first line too + s = '' + + # last axis (rows) - wrap elements if they would not fit on one line + if axes_left == 1: + # the length up until the beginning of the separator / bracket + if legacy <= 113: + elem_width = curr_width - len(separator.rstrip()) + else: + elem_width = curr_width - max( + len(separator.rstrip()), len(']') + ) + + line = hanging_indent + for i in range(leading_items): + word = recurser(index + (i,), next_hanging_indent, next_width) + s, line = _extendLine_pretty( + s, line, word, elem_width, hanging_indent, legacy) + line += separator + + if show_summary: + s, line = _extendLine( + s, line, summary_insert, elem_width, hanging_indent, legacy + ) + if legacy <= 113: + line += ", " + else: + line += separator + + for i in range(trailing_items, 1, -1): + word = recurser(index + (-i,), next_hanging_indent, next_width) + s, line = _extendLine_pretty( + s, line, word, elem_width, hanging_indent, legacy) + line += separator + + if legacy <= 113: + # width of the separator is not considered on 1.13 + elem_width = curr_width + word = recurser(index + (-1,), next_hanging_indent, next_width) + s, line = _extendLine_pretty( + s, line, word, elem_width, hanging_indent, legacy) + + s += line + + # other axes - insert newlines between rows + else: + s = '' + line_sep = separator.rstrip() + '\n' * (axes_left - 1) + + for i in range(leading_items): + nested = recurser( + index + (i,), next_hanging_indent, next_width + ) + s += hanging_indent + nested + line_sep + + if show_summary: + if legacy <= 113: + # trailing space, fixed nbr of newlines, + # and fixed separator + s += hanging_indent + summary_insert + ", \n" + else: + s += hanging_indent + summary_insert + line_sep + + for i in range(trailing_items, 1, -1): + nested = recurser(index + (-i,), next_hanging_indent, + next_width) + s += hanging_indent + nested + line_sep + + nested = recurser(index + (-1,), next_hanging_indent, next_width) + s += hanging_indent + nested + + # remove the hanging indent, and wrap in [] + s = '[' + s[len(hanging_indent):] + ']' + return s + + try: + # invoke the recursive part with an initial index and prefix + return recurser(index=(), + hanging_indent=next_line_prefix, + curr_width=line_width) + finally: + # recursive closures have a cyclic reference to themselves, which + # requires gc to collect (gh-10620). To avoid this problem, for + # performance and PyPy friendliness, we break the cycle: + recurser = None + +def _none_or_positive_arg(x, name): + if x is None: + return -1 + if x < 0: + raise ValueError(f"{name} must be >= 0") + return x + +class FloatingFormat: + """ Formatter for subtypes of np.floating """ + def __init__(self, data, precision, floatmode, suppress_small, sign=False, + *, legacy=None): + # for backcompatibility, accept bools + if isinstance(sign, bool): + sign = '+' if sign else '-' + + self._legacy = legacy + if self._legacy <= 113: + # when not 0d, legacy does not support '-' + if data.shape != () and sign == '-': + sign = ' ' + + self.floatmode = floatmode + if floatmode == 'unique': + self.precision = None + else: + self.precision = precision + + self.precision = _none_or_positive_arg(self.precision, 'precision') + + self.suppress_small = suppress_small + self.sign = sign + self.exp_format = False + self.large_exponent = False + self.fillFormat(data) + + def fillFormat(self, data): + # only the finite values are used to compute the number of digits + finite_vals = data[isfinite(data)] + + # choose exponential mode based on the non-zero finite values: + abs_non_zero = absolute(finite_vals[finite_vals != 0]) + if len(abs_non_zero) != 0: + max_val = np.max(abs_non_zero) + min_val = np.min(abs_non_zero) + if self._legacy <= 202: + exp_cutoff_max = 1.e8 + else: + # consider data type while deciding the max cutoff for exp format + exp_cutoff_max = 10.**min(8, np.finfo(data.dtype).precision) + with errstate(over='ignore'): # division can overflow + if max_val >= exp_cutoff_max or (not self.suppress_small and + (min_val < 0.0001 or max_val / min_val > 1000.)): + self.exp_format = True + + # do a first pass of printing all the numbers, to determine sizes + if len(finite_vals) == 0: + self.pad_left = 0 + self.pad_right = 0 + self.trim = '.' + self.exp_size = -1 + self.unique = True + self.min_digits = None + elif self.exp_format: + trim, unique = '.', True + if self.floatmode == 'fixed' or self._legacy <= 113: + trim, unique = 'k', False + strs = (dragon4_scientific(x, precision=self.precision, + unique=unique, trim=trim, sign=self.sign == '+') + for x in finite_vals) + frac_strs, _, exp_strs = zip(*(s.partition('e') for s in strs)) + int_part, frac_part = zip(*(s.split('.') for s in frac_strs)) + self.exp_size = max(len(s) for s in exp_strs) - 1 + + self.trim = 'k' + self.precision = max(len(s) for s in frac_part) + self.min_digits = self.precision + self.unique = unique + + # for back-compat with np 1.13, use 2 spaces & sign and full prec + if self._legacy <= 113: + self.pad_left = 3 + else: + # this should be only 1 or 2. Can be calculated from sign. + self.pad_left = max(len(s) for s in int_part) + # pad_right is only needed for nan length calculation + self.pad_right = self.exp_size + 2 + self.precision + else: + trim, unique = '.', True + if self.floatmode == 'fixed': + trim, unique = 'k', False + strs = (dragon4_positional(x, precision=self.precision, + fractional=True, + unique=unique, trim=trim, + sign=self.sign == '+') + for x in finite_vals) + int_part, frac_part = zip(*(s.split('.') for s in strs)) + if self._legacy <= 113: + self.pad_left = 1 + max(len(s.lstrip('-+')) for s in int_part) + else: + self.pad_left = max(len(s) for s in int_part) + self.pad_right = max(len(s) for s in frac_part) + self.exp_size = -1 + self.unique = unique + + if self.floatmode in ['fixed', 'maxprec_equal']: + self.precision = self.min_digits = self.pad_right + self.trim = 'k' + else: + self.trim = '.' + self.min_digits = 0 + + if self._legacy > 113: + # account for sign = ' ' by adding one to pad_left + if self.sign == ' ' and not any(np.signbit(finite_vals)): + self.pad_left += 1 + + # if there are non-finite values, may need to increase pad_left + if data.size != finite_vals.size: + neginf = self.sign != '-' or any(data[isinf(data)] < 0) + offset = self.pad_right + 1 # +1 for decimal pt + current_options = format_options.get() + self.pad_left = max( + self.pad_left, len(current_options['nanstr']) - offset, + len(current_options['infstr']) + neginf - offset + ) + + def __call__(self, x): + if not np.isfinite(x): + with errstate(invalid='ignore'): + current_options = format_options.get() + if np.isnan(x): + sign = '+' if self.sign == '+' else '' + ret = sign + current_options['nanstr'] + else: # isinf + sign = '-' if x < 0 else '+' if self.sign == '+' else '' + ret = sign + current_options['infstr'] + return ' ' * ( + self.pad_left + self.pad_right + 1 - len(ret) + ) + ret + + if self.exp_format: + return dragon4_scientific(x, + precision=self.precision, + min_digits=self.min_digits, + unique=self.unique, + trim=self.trim, + sign=self.sign == '+', + pad_left=self.pad_left, + exp_digits=self.exp_size) + else: + return dragon4_positional(x, + precision=self.precision, + min_digits=self.min_digits, + unique=self.unique, + fractional=True, + trim=self.trim, + sign=self.sign == '+', + pad_left=self.pad_left, + pad_right=self.pad_right) + + +@set_module('numpy') +def format_float_scientific(x, precision=None, unique=True, trim='k', + sign=False, pad_left=None, exp_digits=None, + min_digits=None): + """ + Format a floating-point scalar as a decimal string in scientific notation. + + Provides control over rounding, trimming and padding. Uses and assumes + IEEE unbiased rounding. Uses the "Dragon4" algorithm. + + Parameters + ---------- + x : python float or numpy floating scalar + Value to format. + precision : non-negative integer or None, optional + Maximum number of digits to print. May be None if `unique` is + `True`, but must be an integer if unique is `False`. + unique : boolean, optional + If `True`, use a digit-generation strategy which gives the shortest + representation which uniquely identifies the floating-point number from + other values of the same type, by judicious rounding. If `precision` + is given fewer digits than necessary can be printed. If `min_digits` + is given more can be printed, in which cases the last digit is rounded + with unbiased rounding. + If `False`, digits are generated as if printing an infinite-precision + value and stopping after `precision` digits, rounding the remaining + value with unbiased rounding + trim : one of 'k', '.', '0', '-', optional + Controls post-processing trimming of trailing digits, as follows: + + * 'k' : keep trailing zeros, keep decimal point (no trimming) + * '.' : trim all trailing zeros, leave decimal point + * '0' : trim all but the zero before the decimal point. Insert the + zero if it is missing. + * '-' : trim trailing zeros and any trailing decimal point + sign : boolean, optional + Whether to show the sign for positive values. + pad_left : non-negative integer, optional + Pad the left side of the string with whitespace until at least that + many characters are to the left of the decimal point. + exp_digits : non-negative integer, optional + Pad the exponent with zeros until it contains at least this + many digits. If omitted, the exponent will be at least 2 digits. + min_digits : non-negative integer or None, optional + Minimum number of digits to print. This only has an effect for + `unique=True`. In that case more digits than necessary to uniquely + identify the value may be printed and rounded unbiased. + + .. versionadded:: 1.21.0 + + Returns + ------- + rep : string + The string representation of the floating point value + + See Also + -------- + format_float_positional + + Examples + -------- + >>> import numpy as np + >>> np.format_float_scientific(np.float32(np.pi)) + '3.1415927e+00' + >>> s = np.float32(1.23e24) + >>> np.format_float_scientific(s, unique=False, precision=15) + '1.230000071797338e+24' + >>> np.format_float_scientific(s, exp_digits=4) + '1.23e+0024' + """ + precision = _none_or_positive_arg(precision, 'precision') + pad_left = _none_or_positive_arg(pad_left, 'pad_left') + exp_digits = _none_or_positive_arg(exp_digits, 'exp_digits') + min_digits = _none_or_positive_arg(min_digits, 'min_digits') + if min_digits > 0 and precision > 0 and min_digits > precision: + raise ValueError("min_digits must be less than or equal to precision") + return dragon4_scientific(x, precision=precision, unique=unique, + trim=trim, sign=sign, pad_left=pad_left, + exp_digits=exp_digits, min_digits=min_digits) + + +@set_module('numpy') +def format_float_positional(x, precision=None, unique=True, + fractional=True, trim='k', sign=False, + pad_left=None, pad_right=None, min_digits=None): + """ + Format a floating-point scalar as a decimal string in positional notation. + + Provides control over rounding, trimming and padding. Uses and assumes + IEEE unbiased rounding. Uses the "Dragon4" algorithm. + + Parameters + ---------- + x : python float or numpy floating scalar + Value to format. + precision : non-negative integer or None, optional + Maximum number of digits to print. May be None if `unique` is + `True`, but must be an integer if unique is `False`. + unique : boolean, optional + If `True`, use a digit-generation strategy which gives the shortest + representation which uniquely identifies the floating-point number from + other values of the same type, by judicious rounding. If `precision` + is given fewer digits than necessary can be printed, or if `min_digits` + is given more can be printed, in which cases the last digit is rounded + with unbiased rounding. + If `False`, digits are generated as if printing an infinite-precision + value and stopping after `precision` digits, rounding the remaining + value with unbiased rounding + fractional : boolean, optional + If `True`, the cutoffs of `precision` and `min_digits` refer to the + total number of digits after the decimal point, including leading + zeros. + If `False`, `precision` and `min_digits` refer to the total number of + significant digits, before or after the decimal point, ignoring leading + zeros. + trim : one of 'k', '.', '0', '-', optional + Controls post-processing trimming of trailing digits, as follows: + + * 'k' : keep trailing zeros, keep decimal point (no trimming) + * '.' : trim all trailing zeros, leave decimal point + * '0' : trim all but the zero before the decimal point. Insert the + zero if it is missing. + * '-' : trim trailing zeros and any trailing decimal point + sign : boolean, optional + Whether to show the sign for positive values. + pad_left : non-negative integer, optional + Pad the left side of the string with whitespace until at least that + many characters are to the left of the decimal point. + pad_right : non-negative integer, optional + Pad the right side of the string with whitespace until at least that + many characters are to the right of the decimal point. + min_digits : non-negative integer or None, optional + Minimum number of digits to print. Only has an effect if `unique=True` + in which case additional digits past those necessary to uniquely + identify the value may be printed, rounding the last additional digit. + + .. versionadded:: 1.21.0 + + Returns + ------- + rep : string + The string representation of the floating point value + + See Also + -------- + format_float_scientific + + Examples + -------- + >>> import numpy as np + >>> np.format_float_positional(np.float32(np.pi)) + '3.1415927' + >>> np.format_float_positional(np.float16(np.pi)) + '3.14' + >>> np.format_float_positional(np.float16(0.3)) + '0.3' + >>> np.format_float_positional(np.float16(0.3), unique=False, precision=10) + '0.3000488281' + """ + precision = _none_or_positive_arg(precision, 'precision') + pad_left = _none_or_positive_arg(pad_left, 'pad_left') + pad_right = _none_or_positive_arg(pad_right, 'pad_right') + min_digits = _none_or_positive_arg(min_digits, 'min_digits') + if not fractional and precision == 0: + raise ValueError("precision must be greater than 0 if " + "fractional=False") + if min_digits > 0 and precision > 0 and min_digits > precision: + raise ValueError("min_digits must be less than or equal to precision") + return dragon4_positional(x, precision=precision, unique=unique, + fractional=fractional, trim=trim, + sign=sign, pad_left=pad_left, + pad_right=pad_right, min_digits=min_digits) + +class IntegerFormat: + def __init__(self, data, sign='-'): + if data.size > 0: + data_max = np.max(data) + data_min = np.min(data) + data_max_str_len = len(str(data_max)) + if sign == ' ' and data_min < 0: + sign = '-' + if data_max >= 0 and sign in "+ ": + data_max_str_len += 1 + max_str_len = max(data_max_str_len, + len(str(data_min))) + else: + max_str_len = 0 + self.format = f'{{:{sign}{max_str_len}d}}' + + def __call__(self, x): + return self.format.format(x) + +class BoolFormat: + def __init__(self, data, **kwargs): + # add an extra space so " True" and "False" have the same length and + # array elements align nicely when printed, except in 0d arrays + self.truestr = ' True' if data.shape != () else 'True' + + def __call__(self, x): + return self.truestr if x else "False" + + +class ComplexFloatingFormat: + """ Formatter for subtypes of np.complexfloating """ + def __init__(self, x, precision, floatmode, suppress_small, + sign=False, *, legacy=None): + # for backcompatibility, accept bools + if isinstance(sign, bool): + sign = '+' if sign else '-' + + floatmode_real = floatmode_imag = floatmode + if legacy <= 113: + floatmode_real = 'maxprec_equal' + floatmode_imag = 'maxprec' + + self.real_format = FloatingFormat( + x.real, precision, floatmode_real, suppress_small, + sign=sign, legacy=legacy + ) + self.imag_format = FloatingFormat( + x.imag, precision, floatmode_imag, suppress_small, + sign='+', legacy=legacy + ) + + def __call__(self, x): + r = self.real_format(x.real) + i = self.imag_format(x.imag) + + # add the 'j' before the terminal whitespace in i + sp = len(i.rstrip()) + i = i[:sp] + 'j' + i[sp:] + + return r + i + + +class _TimelikeFormat: + def __init__(self, data): + non_nat = data[~isnat(data)] + if len(non_nat) > 0: + # Max str length of non-NaT elements + max_str_len = max(len(self._format_non_nat(np.max(non_nat))), + len(self._format_non_nat(np.min(non_nat)))) + else: + max_str_len = 0 + if len(non_nat) < data.size: + # data contains a NaT + max_str_len = max(max_str_len, 5) + self._format = f'%{max_str_len}s' + self._nat = "'NaT'".rjust(max_str_len) + + def _format_non_nat(self, x): + # override in subclass + raise NotImplementedError + + def __call__(self, x): + if isnat(x): + return self._nat + else: + return self._format % self._format_non_nat(x) + + +class DatetimeFormat(_TimelikeFormat): + def __init__(self, x, unit=None, timezone=None, casting='same_kind', + legacy=False): + # Get the unit from the dtype + if unit is None: + if x.dtype.kind == 'M': + unit = datetime_data(x.dtype)[0] + else: + unit = 's' + + if timezone is None: + timezone = 'naive' + self.timezone = timezone + self.unit = unit + self.casting = casting + self.legacy = legacy + + # must be called after the above are configured + super().__init__(x) + + def __call__(self, x): + if self.legacy <= 113: + return self._format_non_nat(x) + return super().__call__(x) + + def _format_non_nat(self, x): + return "'%s'" % datetime_as_string(x, + unit=self.unit, + timezone=self.timezone, + casting=self.casting) + + +class TimedeltaFormat(_TimelikeFormat): + def _format_non_nat(self, x): + return str(x.astype('i8')) + + +class SubArrayFormat: + def __init__(self, format_function, **options): + self.format_function = format_function + self.threshold = options['threshold'] + self.edge_items = options['edgeitems'] + + def __call__(self, a): + self.summary_insert = "..." if a.size > self.threshold else "" + return self.format_array(a) + + def format_array(self, a): + if np.ndim(a) == 0: + return self.format_function(a) + + if self.summary_insert and a.shape[0] > 2 * self.edge_items: + formatted = ( + [self.format_array(a_) for a_ in a[:self.edge_items]] + + [self.summary_insert] + + [self.format_array(a_) for a_ in a[-self.edge_items:]] + ) + else: + formatted = [self.format_array(a_) for a_ in a] + + return "[" + ", ".join(formatted) + "]" + + +class StructuredVoidFormat: + """ + Formatter for structured np.void objects. + + This does not work on structured alias types like + np.dtype(('i4', 'i2,i2')), as alias scalars lose their field information, + and the implementation relies upon np.void.__getitem__. + """ + def __init__(self, format_functions): + self.format_functions = format_functions + + @classmethod + def from_data(cls, data, **options): + """ + This is a second way to initialize StructuredVoidFormat, + using the raw data as input. Added to avoid changing + the signature of __init__. + """ + format_functions = [] + for field_name in data.dtype.names: + format_function = _get_format_function(data[field_name], **options) + if data.dtype[field_name].shape != (): + format_function = SubArrayFormat(format_function, **options) + format_functions.append(format_function) + return cls(format_functions) + + def __call__(self, x): + str_fields = [ + format_function(field) + for field, format_function in zip(x, self.format_functions) + ] + if len(str_fields) == 1: + return f"({str_fields[0]},)" + else: + return f"({', '.join(str_fields)})" + + +def _void_scalar_to_string(x, is_repr=True): + """ + Implements the repr for structured-void scalars. It is called from the + scalartypes.c.src code, and is placed here because it uses the elementwise + formatters defined above. + """ + options = format_options.get().copy() + + if options["legacy"] <= 125: + return StructuredVoidFormat.from_data(array(x), **options)(x) + + if options.get('formatter') is None: + options['formatter'] = {} + options['formatter'].setdefault('float_kind', str) + val_repr = StructuredVoidFormat.from_data(array(x), **options)(x) + if not is_repr: + return val_repr + cls = type(x) + cls_fqn = cls.__module__.replace("numpy", "np") + "." + cls.__name__ + void_dtype = np.dtype((np.void, x.dtype)) + return f"{cls_fqn}({val_repr}, dtype={void_dtype!s})" + + +_typelessdata = [int_, float64, complex128, _nt.bool] + + +def dtype_is_implied(dtype): + """ + Determine if the given dtype is implied by the representation + of its values. + + Parameters + ---------- + dtype : dtype + Data type + + Returns + ------- + implied : bool + True if the dtype is implied by the representation of its values. + + Examples + -------- + >>> import numpy as np + >>> np._core.arrayprint.dtype_is_implied(int) + True + >>> np.array([1, 2, 3], int) + array([1, 2, 3]) + >>> np._core.arrayprint.dtype_is_implied(np.int8) + False + >>> np.array([1, 2, 3], np.int8) + array([1, 2, 3], dtype=int8) + """ + dtype = np.dtype(dtype) + if format_options.get()['legacy'] <= 113 and dtype.type == np.bool: + return False + + # not just void types can be structured, and names are not part of the repr + if dtype.names is not None: + return False + + # should care about endianness *unless size is 1* (e.g., int8, bool) + if not dtype.isnative: + return False + + return dtype.type in _typelessdata + + +def dtype_short_repr(dtype): + """ + Convert a dtype to a short form which evaluates to the same dtype. + + The intent is roughly that the following holds + + >>> from numpy import * + >>> dt = np.int64([1, 2]).dtype + >>> assert eval(dtype_short_repr(dt)) == dt + """ + if type(dtype).__repr__ != np.dtype.__repr__: + # TODO: Custom repr for user DTypes, logic should likely move. + return repr(dtype) + if dtype.names is not None: + # structured dtypes give a list or tuple repr + return str(dtype) + elif issubclass(dtype.type, flexible): + # handle these separately so they don't give garbage like str256 + return f"'{str(dtype)}'" + + typename = dtype.name + if not dtype.isnative: + # deal with cases like dtype(' 210 + and arr.size > current_options['threshold'])): + extras.append(f"shape={arr.shape}") + if not dtype_is_implied(arr.dtype) or arr.size == 0: + extras.append(f"dtype={dtype_short_repr(arr.dtype)}") + + if not extras: + return prefix + lst + ")" + + arr_str = prefix + lst + "," + extra_str = ", ".join(extras) + ")" + # compute whether we should put extras on a new line: Do so if adding the + # extras would extend the last line past max_line_width. + # Note: This line gives the correct result even when rfind returns -1. + last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1) + spacer = " " + if current_options['legacy'] <= 113: + if issubclass(arr.dtype.type, flexible): + spacer = '\n' + ' ' * len(prefix) + elif last_line_len + len(extra_str) + 1 > max_line_width: + spacer = '\n' + ' ' * len(prefix) + + return arr_str + spacer + extra_str + + +def _array_repr_dispatcher( + arr, max_line_width=None, precision=None, suppress_small=None): + return (arr,) + + +@array_function_dispatch(_array_repr_dispatcher, module='numpy') +def array_repr(arr, max_line_width=None, precision=None, suppress_small=None): + """ + Return the string representation of an array. + + Parameters + ---------- + arr : ndarray + Input array. + max_line_width : int, optional + Inserts newlines if text is longer than `max_line_width`. + Defaults to ``numpy.get_printoptions()['linewidth']``. + precision : int, optional + Floating point precision. + Defaults to ``numpy.get_printoptions()['precision']``. + suppress_small : bool, optional + Represent numbers "very close" to zero as zero; default is False. + Very close is defined by precision: if the precision is 8, e.g., + numbers smaller (in absolute value) than 5e-9 are represented as + zero. + Defaults to ``numpy.get_printoptions()['suppress']``. + + Returns + ------- + string : str + The string representation of an array. + + See Also + -------- + array_str, array2string, set_printoptions + + Examples + -------- + >>> import numpy as np + >>> np.array_repr(np.array([1,2])) + 'array([1, 2])' + >>> np.array_repr(np.ma.array([0.])) + 'MaskedArray([0.])' + >>> np.array_repr(np.array([], np.int32)) + 'array([], dtype=int32)' + + >>> x = np.array([1e-6, 4e-7, 2, 3]) + >>> np.array_repr(x, precision=6, suppress_small=True) + 'array([0.000001, 0. , 2. , 3. ])' + + """ + return _array_repr_implementation( + arr, max_line_width, precision, suppress_small) + + +@_recursive_guard() +def _guarded_repr_or_str(v): + if isinstance(v, bytes): + return repr(v) + return str(v) + + +def _array_str_implementation( + a, max_line_width=None, precision=None, suppress_small=None, + array2string=array2string): + """Internal version of array_str() that allows overriding array2string.""" + if (format_options.get()['legacy'] <= 113 and + a.shape == () and not a.dtype.names): + return str(a.item()) + + # the str of 0d arrays is a special case: It should appear like a scalar, + # so floats are not truncated by `precision`, and strings are not wrapped + # in quotes. So we return the str of the scalar value. + if a.shape == (): + # obtain a scalar and call str on it, avoiding problems for subclasses + # for which indexing with () returns a 0d instead of a scalar by using + # ndarray's getindex. Also guard against recursive 0d object arrays. + return _guarded_repr_or_str(np.ndarray.__getitem__(a, ())) + + return array2string(a, max_line_width, precision, suppress_small, ' ', "") + + +def _array_str_dispatcher( + a, max_line_width=None, precision=None, suppress_small=None): + return (a,) + + +@array_function_dispatch(_array_str_dispatcher, module='numpy') +def array_str(a, max_line_width=None, precision=None, suppress_small=None): + """ + Return a string representation of the data in an array. + + The data in the array is returned as a single string. This function is + similar to `array_repr`, the difference being that `array_repr` also + returns information on the kind of array and its data type. + + Parameters + ---------- + a : ndarray + Input array. + max_line_width : int, optional + Inserts newlines if text is longer than `max_line_width`. + Defaults to ``numpy.get_printoptions()['linewidth']``. + precision : int, optional + Floating point precision. + Defaults to ``numpy.get_printoptions()['precision']``. + suppress_small : bool, optional + Represent numbers "very close" to zero as zero; default is False. + Very close is defined by precision: if the precision is 8, e.g., + numbers smaller (in absolute value) than 5e-9 are represented as + zero. + Defaults to ``numpy.get_printoptions()['suppress']``. + + See Also + -------- + array2string, array_repr, set_printoptions + + Examples + -------- + >>> import numpy as np + >>> np.array_str(np.arange(3)) + '[0 1 2]' + + """ + return _array_str_implementation( + a, max_line_width, precision, suppress_small) + + +# needed if __array_function__ is disabled +_array2string_impl = getattr(array2string, '__wrapped__', array2string) +_default_array_str = functools.partial(_array_str_implementation, + array2string=_array2string_impl) +_default_array_repr = functools.partial(_array_repr_implementation, + array2string=_array2string_impl) diff --git a/python/numpy/_core/arrayprint.pyi b/python/numpy/_core/arrayprint.pyi new file mode 100644 index 000000000..fec03a6f2 --- /dev/null +++ b/python/numpy/_core/arrayprint.pyi @@ -0,0 +1,238 @@ +from collections.abc import Callable + +# Using a private class is by no means ideal, but it is simply a consequence +# of a `contextlib.context` returning an instance of aforementioned class +from contextlib import _GeneratorContextManager +from typing import ( + Any, + Final, + Literal, + SupportsIndex, + TypeAlias, + TypedDict, + overload, + type_check_only, +) + +from typing_extensions import deprecated + +import numpy as np +from numpy._globals import _NoValueType +from numpy._typing import NDArray, _CharLike_co, _FloatLike_co + +__all__ = [ + "array2string", + "array_repr", + "array_str", + "format_float_positional", + "format_float_scientific", + "get_printoptions", + "printoptions", + "set_printoptions", +] + +### + +_FloatMode: TypeAlias = Literal["fixed", "unique", "maxprec", "maxprec_equal"] +_LegacyNoStyle: TypeAlias = Literal["1.21", "1.25", "2.1", False] +_Legacy: TypeAlias = Literal["1.13", _LegacyNoStyle] +_Sign: TypeAlias = Literal["-", "+", " "] +_Trim: TypeAlias = Literal["k", ".", "0", "-"] +_ReprFunc: TypeAlias = Callable[[NDArray[Any]], str] + +@type_check_only +class _FormatDict(TypedDict, total=False): + bool: Callable[[np.bool], str] + int: Callable[[np.integer], str] + timedelta: Callable[[np.timedelta64], str] + datetime: Callable[[np.datetime64], str] + float: Callable[[np.floating], str] + longfloat: Callable[[np.longdouble], str] + complexfloat: Callable[[np.complexfloating], str] + longcomplexfloat: Callable[[np.clongdouble], str] + void: Callable[[np.void], str] + numpystr: Callable[[_CharLike_co], str] + object: Callable[[object], str] + all: Callable[[object], str] + int_kind: Callable[[np.integer], str] + float_kind: Callable[[np.floating], str] + complex_kind: Callable[[np.complexfloating], str] + str_kind: Callable[[_CharLike_co], str] + +@type_check_only +class _FormatOptions(TypedDict): + precision: int + threshold: int + edgeitems: int + linewidth: int + suppress: bool + nanstr: str + infstr: str + formatter: _FormatDict | None + sign: _Sign + floatmode: _FloatMode + legacy: _Legacy + +### + +__docformat__: Final = "restructuredtext" # undocumented + +def set_printoptions( + precision: SupportsIndex | None = ..., + threshold: int | None = ..., + edgeitems: int | None = ..., + linewidth: int | None = ..., + suppress: bool | None = ..., + nanstr: str | None = ..., + infstr: str | None = ..., + formatter: _FormatDict | None = ..., + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + *, + legacy: _Legacy | None = None, + override_repr: _ReprFunc | None = None, +) -> None: ... +def get_printoptions() -> _FormatOptions: ... + +# public numpy export +@overload # no style +def array2string( + a: NDArray[Any], + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, + separator: str = " ", + prefix: str = "", + style: _NoValueType = ..., + formatter: _FormatDict | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + suffix: str = "", + *, + legacy: _Legacy | None = None, +) -> str: ... +@overload # style= (positional), legacy="1.13" +def array2string( + a: NDArray[Any], + max_line_width: int | None, + precision: SupportsIndex | None, + suppress_small: bool | None, + separator: str, + prefix: str, + style: _ReprFunc, + formatter: _FormatDict | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + suffix: str = "", + *, + legacy: Literal["1.13"], +) -> str: ... +@overload # style= (keyword), legacy="1.13" +def array2string( + a: NDArray[Any], + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, + separator: str = " ", + prefix: str = "", + *, + style: _ReprFunc, + formatter: _FormatDict | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + suffix: str = "", + legacy: Literal["1.13"], +) -> str: ... +@overload # style= (positional), legacy!="1.13" +@deprecated("'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode") +def array2string( + a: NDArray[Any], + max_line_width: int | None, + precision: SupportsIndex | None, + suppress_small: bool | None, + separator: str, + prefix: str, + style: _ReprFunc, + formatter: _FormatDict | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + suffix: str = "", + *, + legacy: _LegacyNoStyle | None = None, +) -> str: ... +@overload # style= (keyword), legacy="1.13" +@deprecated("'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode") +def array2string( + a: NDArray[Any], + max_line_width: int | None = None, + precision: SupportsIndex | None = None, + suppress_small: bool | None = None, + separator: str = " ", + prefix: str = "", + *, + style: _ReprFunc, + formatter: _FormatDict | None = None, + threshold: int | None = None, + edgeitems: int | None = None, + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + suffix: str = "", + legacy: _LegacyNoStyle | None = None, +) -> str: ... + +def format_float_scientific( + x: _FloatLike_co, + precision: int | None = ..., + unique: bool = ..., + trim: _Trim = "k", + sign: bool = ..., + pad_left: int | None = ..., + exp_digits: int | None = ..., + min_digits: int | None = ..., +) -> str: ... +def format_float_positional( + x: _FloatLike_co, + precision: int | None = ..., + unique: bool = ..., + fractional: bool = ..., + trim: _Trim = "k", + sign: bool = ..., + pad_left: int | None = ..., + pad_right: int | None = ..., + min_digits: int | None = ..., +) -> str: ... +def array_repr( + arr: NDArray[Any], + max_line_width: int | None = ..., + precision: SupportsIndex | None = ..., + suppress_small: bool | None = ..., +) -> str: ... +def array_str( + a: NDArray[Any], + max_line_width: int | None = ..., + precision: SupportsIndex | None = ..., + suppress_small: bool | None = ..., +) -> str: ... +def printoptions( + precision: SupportsIndex | None = ..., + threshold: int | None = ..., + edgeitems: int | None = ..., + linewidth: int | None = ..., + suppress: bool | None = ..., + nanstr: str | None = ..., + infstr: str | None = ..., + formatter: _FormatDict | None = ..., + sign: _Sign | None = None, + floatmode: _FloatMode | None = None, + *, + legacy: _Legacy | None = None, + override_repr: _ReprFunc | None = None, +) -> _GeneratorContextManager[_FormatOptions]: ... diff --git a/python/numpy/_core/cversions.py b/python/numpy/_core/cversions.py new file mode 100644 index 000000000..00159c3a8 --- /dev/null +++ b/python/numpy/_core/cversions.py @@ -0,0 +1,13 @@ +"""Simple script to compute the api hash of the current API. + +The API has is defined by numpy_api_order and ufunc_api_order. + +""" +from os.path import dirname + +from code_generators.genapi import fullapi_hash +from code_generators.numpy_api import full_api + +if __name__ == '__main__': + curdir = dirname(__file__) + print(fullapi_hash(full_api)) diff --git a/python/numpy/_core/defchararray.py b/python/numpy/_core/defchararray.py new file mode 100644 index 000000000..bde8921f5 --- /dev/null +++ b/python/numpy/_core/defchararray.py @@ -0,0 +1,1427 @@ +""" +This module contains a set of functions for vectorized string +operations and methods. + +.. note:: + The `chararray` class exists for backwards compatibility with + Numarray, it is not recommended for new development. Starting from numpy + 1.4, if one needs arrays of strings, it is recommended to use arrays of + `dtype` `object_`, `bytes_` or `str_`, and use the free functions + in the `numpy.char` module for fast vectorized string operations. + +Some methods will only be available if the corresponding string method is +available in your version of Python. + +The preferred alias for `defchararray` is `numpy.char`. + +""" +import functools + +import numpy as np +from numpy._core import overrides +from numpy._core.multiarray import compare_chararrays +from numpy._core.strings import ( + _join as join, +) +from numpy._core.strings import ( + _rsplit as rsplit, +) +from numpy._core.strings import ( + _split as split, +) +from numpy._core.strings import ( + _splitlines as splitlines, +) +from numpy._utils import set_module +from numpy.strings import * +from numpy.strings import ( + multiply as strings_multiply, +) +from numpy.strings import ( + partition as strings_partition, +) +from numpy.strings import ( + rpartition as strings_rpartition, +) + +from .numeric import array as narray +from .numeric import asarray as asnarray +from .numeric import ndarray +from .numerictypes import bytes_, character, str_ + +__all__ = [ + 'equal', 'not_equal', 'greater_equal', 'less_equal', + 'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize', + 'center', 'count', 'decode', 'encode', 'endswith', 'expandtabs', + 'find', 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace', + 'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', 'partition', + 'replace', 'rfind', 'rindex', 'rjust', 'rpartition', 'rsplit', + 'rstrip', 'split', 'splitlines', 'startswith', 'strip', 'swapcase', + 'title', 'translate', 'upper', 'zfill', 'isnumeric', 'isdecimal', + 'array', 'asarray', 'compare_chararrays', 'chararray' + ] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy.char') + + +def _binary_op_dispatcher(x1, x2): + return (x1, x2) + + +@array_function_dispatch(_binary_op_dispatcher) +def equal(x1, x2): + """ + Return (x1 == x2) element-wise. + + Unlike `numpy.equal`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + Examples + -------- + >>> import numpy as np + >>> y = "aa " + >>> x = "aa" + >>> np.char.equal(x, y) + array(True) + + See Also + -------- + not_equal, greater_equal, less_equal, greater, less + """ + return compare_chararrays(x1, x2, '==', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def not_equal(x1, x2): + """ + Return (x1 != x2) element-wise. + + Unlike `numpy.not_equal`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + See Also + -------- + equal, greater_equal, less_equal, greater, less + + Examples + -------- + >>> import numpy as np + >>> x1 = np.array(['a', 'b', 'c']) + >>> np.char.not_equal(x1, 'b') + array([ True, False, True]) + + """ + return compare_chararrays(x1, x2, '!=', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def greater_equal(x1, x2): + """ + Return (x1 >= x2) element-wise. + + Unlike `numpy.greater_equal`, this comparison is performed by + first stripping whitespace characters from the end of the string. + This behavior is provided for backward-compatibility with + numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + See Also + -------- + equal, not_equal, less_equal, greater, less + + Examples + -------- + >>> import numpy as np + >>> x1 = np.array(['a', 'b', 'c']) + >>> np.char.greater_equal(x1, 'b') + array([False, True, True]) + + """ + return compare_chararrays(x1, x2, '>=', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def less_equal(x1, x2): + """ + Return (x1 <= x2) element-wise. + + Unlike `numpy.less_equal`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + See Also + -------- + equal, not_equal, greater_equal, greater, less + + Examples + -------- + >>> import numpy as np + >>> x1 = np.array(['a', 'b', 'c']) + >>> np.char.less_equal(x1, 'b') + array([ True, True, False]) + + """ + return compare_chararrays(x1, x2, '<=', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def greater(x1, x2): + """ + Return (x1 > x2) element-wise. + + Unlike `numpy.greater`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + See Also + -------- + equal, not_equal, greater_equal, less_equal, less + + Examples + -------- + >>> import numpy as np + >>> x1 = np.array(['a', 'b', 'c']) + >>> np.char.greater(x1, 'b') + array([False, False, True]) + + """ + return compare_chararrays(x1, x2, '>', True) + + +@array_function_dispatch(_binary_op_dispatcher) +def less(x1, x2): + """ + Return (x1 < x2) element-wise. + + Unlike `numpy.greater`, this comparison is performed by first + stripping whitespace characters from the end of the string. This + behavior is provided for backward-compatibility with numarray. + + Parameters + ---------- + x1, x2 : array_like of str or unicode + Input arrays of the same shape. + + Returns + ------- + out : ndarray + Output array of bools. + + See Also + -------- + equal, not_equal, greater_equal, less_equal, greater + + Examples + -------- + >>> import numpy as np + >>> x1 = np.array(['a', 'b', 'c']) + >>> np.char.less(x1, 'b') + array([True, False, False]) + + """ + return compare_chararrays(x1, x2, '<', True) + + +@set_module("numpy.char") +def multiply(a, i): + """ + Return (a * i), that is string multiple concatenation, + element-wise. + + Values in ``i`` of less than 0 are treated as 0 (which yields an + empty string). + + Parameters + ---------- + a : array_like, with `np.bytes_` or `np.str_` dtype + + i : array_like, with any integer dtype + + Returns + ------- + out : ndarray + Output array of str or unicode, depending on input types + + Notes + ----- + This is a thin wrapper around np.strings.multiply that raises + `ValueError` when ``i`` is not an integer. It only + exists for backwards-compatibility. + + Examples + -------- + >>> import numpy as np + >>> a = np.array(["a", "b", "c"]) + >>> np.strings.multiply(a, 3) + array(['aaa', 'bbb', 'ccc'], dtype='>> i = np.array([1, 2, 3]) + >>> np.strings.multiply(a, i) + array(['a', 'bb', 'ccc'], dtype='>> np.strings.multiply(np.array(['a']), i) + array(['a', 'aa', 'aaa'], dtype='>> a = np.array(['a', 'b', 'c', 'd', 'e', 'f']).reshape((2, 3)) + >>> np.strings.multiply(a, 3) + array([['aaa', 'bbb', 'ccc'], + ['ddd', 'eee', 'fff']], dtype='>> np.strings.multiply(a, i) + array([['a', 'bb', 'ccc'], + ['d', 'ee', 'fff']], dtype='>> import numpy as np + >>> x = np.array(["Numpy is nice!"]) + >>> np.char.partition(x, " ") + array([['Numpy', ' ', 'is nice!']], dtype='>> import numpy as np + >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> np.char.rpartition(a, 'A') + array([['aAaAa', 'A', ''], + [' a', 'A', ' '], + ['abB', 'A', 'Bba']], dtype='= 2`` and ``order='F'``, in which case `strides` + is in "Fortran order". + + Methods + ------- + astype + argsort + copy + count + decode + dump + dumps + encode + endswith + expandtabs + fill + find + flatten + getfield + index + isalnum + isalpha + isdecimal + isdigit + islower + isnumeric + isspace + istitle + isupper + item + join + ljust + lower + lstrip + nonzero + put + ravel + repeat + replace + reshape + resize + rfind + rindex + rjust + rsplit + rstrip + searchsorted + setfield + setflags + sort + split + splitlines + squeeze + startswith + strip + swapaxes + swapcase + take + title + tofile + tolist + tostring + translate + transpose + upper + view + zfill + + Parameters + ---------- + shape : tuple + Shape of the array. + itemsize : int, optional + Length of each array element, in number of characters. Default is 1. + unicode : bool, optional + Are the array elements of type unicode (True) or string (False). + Default is False. + buffer : object exposing the buffer interface or str, optional + Memory address of the start of the array data. Default is None, + in which case a new array is created. + offset : int, optional + Fixed stride displacement from the beginning of an axis? + Default is 0. Needs to be >=0. + strides : array_like of ints, optional + Strides for the array (see `~numpy.ndarray.strides` for + full description). Default is None. + order : {'C', 'F'}, optional + The order in which the array data is stored in memory: 'C' -> + "row major" order (the default), 'F' -> "column major" + (Fortran) order. + + Examples + -------- + >>> import numpy as np + >>> charar = np.char.chararray((3, 3)) + >>> charar[:] = 'a' + >>> charar + chararray([[b'a', b'a', b'a'], + [b'a', b'a', b'a'], + [b'a', b'a', b'a']], dtype='|S1') + + >>> charar = np.char.chararray(charar.shape, itemsize=5) + >>> charar[:] = 'abc' + >>> charar + chararray([[b'abc', b'abc', b'abc'], + [b'abc', b'abc', b'abc'], + [b'abc', b'abc', b'abc']], dtype='|S5') + + """ + def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None, + offset=0, strides=None, order='C'): + if unicode: + dtype = str_ + else: + dtype = bytes_ + + # force itemsize to be a Python int, since using NumPy integer + # types results in itemsize.itemsize being used as the size of + # strings in the new array. + itemsize = int(itemsize) + + if isinstance(buffer, str): + # unicode objects do not have the buffer interface + filler = buffer + buffer = None + else: + filler = None + + if buffer is None: + self = ndarray.__new__(subtype, shape, (dtype, itemsize), + order=order) + else: + self = ndarray.__new__(subtype, shape, (dtype, itemsize), + buffer=buffer, + offset=offset, strides=strides, + order=order) + if filler is not None: + self[...] = filler + + return self + + def __array_wrap__(self, arr, context=None, return_scalar=False): + # When calling a ufunc (and some other functions), we return a + # chararray if the ufunc output is a string-like array, + # or an ndarray otherwise + if arr.dtype.char in "SUbc": + return arr.view(type(self)) + return arr + + def __array_finalize__(self, obj): + # The b is a special case because it is used for reconstructing. + if self.dtype.char not in 'VSUbc': + raise ValueError("Can only create a chararray from string data.") + + def __getitem__(self, obj): + val = ndarray.__getitem__(self, obj) + if isinstance(val, character): + return val.rstrip() + return val + + # IMPLEMENTATION NOTE: Most of the methods of this class are + # direct delegations to the free functions in this module. + # However, those that return an array of strings should instead + # return a chararray, so some extra wrapping is required. + + def __eq__(self, other): + """ + Return (self == other) element-wise. + + See Also + -------- + equal + """ + return equal(self, other) + + def __ne__(self, other): + """ + Return (self != other) element-wise. + + See Also + -------- + not_equal + """ + return not_equal(self, other) + + def __ge__(self, other): + """ + Return (self >= other) element-wise. + + See Also + -------- + greater_equal + """ + return greater_equal(self, other) + + def __le__(self, other): + """ + Return (self <= other) element-wise. + + See Also + -------- + less_equal + """ + return less_equal(self, other) + + def __gt__(self, other): + """ + Return (self > other) element-wise. + + See Also + -------- + greater + """ + return greater(self, other) + + def __lt__(self, other): + """ + Return (self < other) element-wise. + + See Also + -------- + less + """ + return less(self, other) + + def __add__(self, other): + """ + Return (self + other), that is string concatenation, + element-wise for a pair of array_likes of str or unicode. + + See Also + -------- + add + """ + return add(self, other) + + def __radd__(self, other): + """ + Return (other + self), that is string concatenation, + element-wise for a pair of array_likes of `bytes_` or `str_`. + + See Also + -------- + add + """ + return add(other, self) + + def __mul__(self, i): + """ + Return (self * i), that is string multiple concatenation, + element-wise. + + See Also + -------- + multiply + """ + return asarray(multiply(self, i)) + + def __rmul__(self, i): + """ + Return (self * i), that is string multiple concatenation, + element-wise. + + See Also + -------- + multiply + """ + return asarray(multiply(self, i)) + + def __mod__(self, i): + """ + Return (self % i), that is pre-Python 2.6 string formatting + (interpolation), element-wise for a pair of array_likes of `bytes_` + or `str_`. + + See Also + -------- + mod + """ + return asarray(mod(self, i)) + + def __rmod__(self, other): + return NotImplemented + + def argsort(self, axis=-1, kind=None, order=None): + """ + Return the indices that sort the array lexicographically. + + For full documentation see `numpy.argsort`, for which this method is + in fact merely a "thin wrapper." + + Examples + -------- + >>> c = np.array(['a1b c', '1b ca', 'b ca1', 'Ca1b'], 'S5') + >>> c = c.view(np.char.chararray); c + chararray(['a1b c', '1b ca', 'b ca1', 'Ca1b'], + dtype='|S5') + >>> c[c.argsort()] + chararray(['1b ca', 'Ca1b', 'a1b c', 'b ca1'], + dtype='|S5') + + """ + return self.__array__().argsort(axis, kind, order) + argsort.__doc__ = ndarray.argsort.__doc__ + + def capitalize(self): + """ + Return a copy of `self` with only the first character of each element + capitalized. + + See Also + -------- + char.capitalize + + """ + return asarray(capitalize(self)) + + def center(self, width, fillchar=' '): + """ + Return a copy of `self` with its elements centered in a + string of length `width`. + + See Also + -------- + center + """ + return asarray(center(self, width, fillchar)) + + def count(self, sub, start=0, end=None): + """ + Returns an array with the number of non-overlapping occurrences of + substring `sub` in the range [`start`, `end`]. + + See Also + -------- + char.count + + """ + return count(self, sub, start, end) + + def decode(self, encoding=None, errors=None): + """ + Calls ``bytes.decode`` element-wise. + + See Also + -------- + char.decode + + """ + return decode(self, encoding, errors) + + def encode(self, encoding=None, errors=None): + """ + Calls :meth:`str.encode` element-wise. + + See Also + -------- + char.encode + + """ + return encode(self, encoding, errors) + + def endswith(self, suffix, start=0, end=None): + """ + Returns a boolean array which is `True` where the string element + in `self` ends with `suffix`, otherwise `False`. + + See Also + -------- + char.endswith + + """ + return endswith(self, suffix, start, end) + + def expandtabs(self, tabsize=8): + """ + Return a copy of each string element where all tab characters are + replaced by one or more spaces. + + See Also + -------- + char.expandtabs + + """ + return asarray(expandtabs(self, tabsize)) + + def find(self, sub, start=0, end=None): + """ + For each element, return the lowest index in the string where + substring `sub` is found. + + See Also + -------- + char.find + + """ + return find(self, sub, start, end) + + def index(self, sub, start=0, end=None): + """ + Like `find`, but raises :exc:`ValueError` when the substring is not + found. + + See Also + -------- + char.index + + """ + return index(self, sub, start, end) + + def isalnum(self): + """ + Returns true for each element if all characters in the string + are alphanumeric and there is at least one character, false + otherwise. + + See Also + -------- + char.isalnum + + """ + return isalnum(self) + + def isalpha(self): + """ + Returns true for each element if all characters in the string + are alphabetic and there is at least one character, false + otherwise. + + See Also + -------- + char.isalpha + + """ + return isalpha(self) + + def isdigit(self): + """ + Returns true for each element if all characters in the string are + digits and there is at least one character, false otherwise. + + See Also + -------- + char.isdigit + + """ + return isdigit(self) + + def islower(self): + """ + Returns true for each element if all cased characters in the + string are lowercase and there is at least one cased character, + false otherwise. + + See Also + -------- + char.islower + + """ + return islower(self) + + def isspace(self): + """ + Returns true for each element if there are only whitespace + characters in the string and there is at least one character, + false otherwise. + + See Also + -------- + char.isspace + + """ + return isspace(self) + + def istitle(self): + """ + Returns true for each element if the element is a titlecased + string and there is at least one character, false otherwise. + + See Also + -------- + char.istitle + + """ + return istitle(self) + + def isupper(self): + """ + Returns true for each element if all cased characters in the + string are uppercase and there is at least one character, false + otherwise. + + See Also + -------- + char.isupper + + """ + return isupper(self) + + def join(self, seq): + """ + Return a string which is the concatenation of the strings in the + sequence `seq`. + + See Also + -------- + char.join + + """ + return join(self, seq) + + def ljust(self, width, fillchar=' '): + """ + Return an array with the elements of `self` left-justified in a + string of length `width`. + + See Also + -------- + char.ljust + + """ + return asarray(ljust(self, width, fillchar)) + + def lower(self): + """ + Return an array with the elements of `self` converted to + lowercase. + + See Also + -------- + char.lower + + """ + return asarray(lower(self)) + + def lstrip(self, chars=None): + """ + For each element in `self`, return a copy with the leading characters + removed. + + See Also + -------- + char.lstrip + + """ + return lstrip(self, chars) + + def partition(self, sep): + """ + Partition each element in `self` around `sep`. + + See Also + -------- + partition + """ + return asarray(partition(self, sep)) + + def replace(self, old, new, count=None): + """ + For each element in `self`, return a copy of the string with all + occurrences of substring `old` replaced by `new`. + + See Also + -------- + char.replace + + """ + return replace(self, old, new, count if count is not None else -1) + + def rfind(self, sub, start=0, end=None): + """ + For each element in `self`, return the highest index in the string + where substring `sub` is found, such that `sub` is contained + within [`start`, `end`]. + + See Also + -------- + char.rfind + + """ + return rfind(self, sub, start, end) + + def rindex(self, sub, start=0, end=None): + """ + Like `rfind`, but raises :exc:`ValueError` when the substring `sub` is + not found. + + See Also + -------- + char.rindex + + """ + return rindex(self, sub, start, end) + + def rjust(self, width, fillchar=' '): + """ + Return an array with the elements of `self` + right-justified in a string of length `width`. + + See Also + -------- + char.rjust + + """ + return asarray(rjust(self, width, fillchar)) + + def rpartition(self, sep): + """ + Partition each element in `self` around `sep`. + + See Also + -------- + rpartition + """ + return asarray(rpartition(self, sep)) + + def rsplit(self, sep=None, maxsplit=None): + """ + For each element in `self`, return a list of the words in + the string, using `sep` as the delimiter string. + + See Also + -------- + char.rsplit + + """ + return rsplit(self, sep, maxsplit) + + def rstrip(self, chars=None): + """ + For each element in `self`, return a copy with the trailing + characters removed. + + See Also + -------- + char.rstrip + + """ + return rstrip(self, chars) + + def split(self, sep=None, maxsplit=None): + """ + For each element in `self`, return a list of the words in the + string, using `sep` as the delimiter string. + + See Also + -------- + char.split + + """ + return split(self, sep, maxsplit) + + def splitlines(self, keepends=None): + """ + For each element in `self`, return a list of the lines in the + element, breaking at line boundaries. + + See Also + -------- + char.splitlines + + """ + return splitlines(self, keepends) + + def startswith(self, prefix, start=0, end=None): + """ + Returns a boolean array which is `True` where the string element + in `self` starts with `prefix`, otherwise `False`. + + See Also + -------- + char.startswith + + """ + return startswith(self, prefix, start, end) + + def strip(self, chars=None): + """ + For each element in `self`, return a copy with the leading and + trailing characters removed. + + See Also + -------- + char.strip + + """ + return strip(self, chars) + + def swapcase(self): + """ + For each element in `self`, return a copy of the string with + uppercase characters converted to lowercase and vice versa. + + See Also + -------- + char.swapcase + + """ + return asarray(swapcase(self)) + + def title(self): + """ + For each element in `self`, return a titlecased version of the + string: words start with uppercase characters, all remaining cased + characters are lowercase. + + See Also + -------- + char.title + + """ + return asarray(title(self)) + + def translate(self, table, deletechars=None): + """ + For each element in `self`, return a copy of the string where + all characters occurring in the optional argument + `deletechars` are removed, and the remaining characters have + been mapped through the given translation table. + + See Also + -------- + char.translate + + """ + return asarray(translate(self, table, deletechars)) + + def upper(self): + """ + Return an array with the elements of `self` converted to + uppercase. + + See Also + -------- + char.upper + + """ + return asarray(upper(self)) + + def zfill(self, width): + """ + Return the numeric string left-filled with zeros in a string of + length `width`. + + See Also + -------- + char.zfill + + """ + return asarray(zfill(self, width)) + + def isnumeric(self): + """ + For each element in `self`, return True if there are only + numeric characters in the element. + + See Also + -------- + char.isnumeric + + """ + return isnumeric(self) + + def isdecimal(self): + """ + For each element in `self`, return True if there are only + decimal characters in the element. + + See Also + -------- + char.isdecimal + + """ + return isdecimal(self) + + +@set_module("numpy.char") +def array(obj, itemsize=None, copy=True, unicode=None, order=None): + """ + Create a `~numpy.char.chararray`. + + .. note:: + This class is provided for numarray backward-compatibility. + New code (not concerned with numarray compatibility) should use + arrays of type `bytes_` or `str_` and use the free functions + in :mod:`numpy.char` for fast vectorized string operations instead. + + Versus a NumPy array of dtype `bytes_` or `str_`, this + class adds the following functionality: + + 1) values automatically have whitespace removed from the end + when indexed + + 2) comparison operators automatically remove whitespace from the + end when comparing values + + 3) vectorized string operations are provided as methods + (e.g. `chararray.endswith `) + and infix operators (e.g. ``+, *, %``) + + Parameters + ---------- + obj : array of str or unicode-like + + itemsize : int, optional + `itemsize` is the number of characters per scalar in the + resulting array. If `itemsize` is None, and `obj` is an + object array or a Python list, the `itemsize` will be + automatically determined. If `itemsize` is provided and `obj` + is of type str or unicode, then the `obj` string will be + chunked into `itemsize` pieces. + + copy : bool, optional + If true (default), then the object is copied. Otherwise, a copy + will only be made if ``__array__`` returns a copy, if obj is a + nested sequence, or if a copy is needed to satisfy any of the other + requirements (`itemsize`, unicode, `order`, etc.). + + unicode : bool, optional + When true, the resulting `~numpy.char.chararray` can contain Unicode + characters, when false only 8-bit characters. If unicode is + None and `obj` is one of the following: + + - a `~numpy.char.chararray`, + - an ndarray of type :class:`str_` or :class:`bytes_` + - a Python :class:`str` or :class:`bytes` object, + + then the unicode setting of the output array will be + automatically determined. + + order : {'C', 'F', 'A'}, optional + Specify the order of the array. If order is 'C' (default), then the + array will be in C-contiguous order (last-index varies the + fastest). If order is 'F', then the returned array + will be in Fortran-contiguous order (first-index varies the + fastest). If order is 'A', then the returned array may + be in any order (either C-, Fortran-contiguous, or even + discontiguous). + + Examples + -------- + + >>> import numpy as np + >>> char_array = np.char.array(['hello', 'world', 'numpy','array']) + >>> char_array + chararray(['hello', 'world', 'numpy', 'array'], dtype='`) + and infix operators (e.g. ``+``, ``*``, ``%``) + + Parameters + ---------- + obj : array of str or unicode-like + + itemsize : int, optional + `itemsize` is the number of characters per scalar in the + resulting array. If `itemsize` is None, and `obj` is an + object array or a Python list, the `itemsize` will be + automatically determined. If `itemsize` is provided and `obj` + is of type str or unicode, then the `obj` string will be + chunked into `itemsize` pieces. + + unicode : bool, optional + When true, the resulting `~numpy.char.chararray` can contain Unicode + characters, when false only 8-bit characters. If unicode is + None and `obj` is one of the following: + + - a `~numpy.char.chararray`, + - an ndarray of type `str_` or `unicode_` + - a Python str or unicode object, + + then the unicode setting of the output array will be + automatically determined. + + order : {'C', 'F'}, optional + Specify the order of the array. If order is 'C' (default), then the + array will be in C-contiguous order (last-index varies the + fastest). If order is 'F', then the returned array + will be in Fortran-contiguous order (first-index varies the + fastest). + + Examples + -------- + >>> import numpy as np + >>> np.char.asarray(['hello', 'world']) + chararray(['hello', 'world'], dtype=' _CharArray[bytes_]: ... + @overload + def __new__( + subtype, + shape: _ShapeLike, + itemsize: SupportsIndex | SupportsInt = ..., + unicode: L[True] = ..., + buffer: _SupportsBuffer = ..., + offset: SupportsIndex = ..., + strides: _ShapeLike = ..., + order: _OrderKACF = ..., + ) -> _CharArray[str_]: ... + + def __array_finalize__(self, obj: object) -> None: ... + def __mul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... + def __rmul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ... + def __mod__(self, i: Any) -> chararray[_AnyShape, _CharDTypeT_co]: ... + + @overload + def __eq__( + self: _CharArray[str_], + other: U_co, + ) -> NDArray[np.bool]: ... + @overload + def __eq__( + self: _CharArray[bytes_], + other: S_co, + ) -> NDArray[np.bool]: ... + + @overload + def __ne__( + self: _CharArray[str_], + other: U_co, + ) -> NDArray[np.bool]: ... + @overload + def __ne__( + self: _CharArray[bytes_], + other: S_co, + ) -> NDArray[np.bool]: ... + + @overload + def __ge__( + self: _CharArray[str_], + other: U_co, + ) -> NDArray[np.bool]: ... + @overload + def __ge__( + self: _CharArray[bytes_], + other: S_co, + ) -> NDArray[np.bool]: ... + + @overload + def __le__( + self: _CharArray[str_], + other: U_co, + ) -> NDArray[np.bool]: ... + @overload + def __le__( + self: _CharArray[bytes_], + other: S_co, + ) -> NDArray[np.bool]: ... + + @overload + def __gt__( + self: _CharArray[str_], + other: U_co, + ) -> NDArray[np.bool]: ... + @overload + def __gt__( + self: _CharArray[bytes_], + other: S_co, + ) -> NDArray[np.bool]: ... + + @overload + def __lt__( + self: _CharArray[str_], + other: U_co, + ) -> NDArray[np.bool]: ... + @overload + def __lt__( + self: _CharArray[bytes_], + other: S_co, + ) -> NDArray[np.bool]: ... + + @overload + def __add__( + self: _CharArray[str_], + other: U_co, + ) -> _CharArray[str_]: ... + @overload + def __add__( + self: _CharArray[bytes_], + other: S_co, + ) -> _CharArray[bytes_]: ... + + @overload + def __radd__( + self: _CharArray[str_], + other: U_co, + ) -> _CharArray[str_]: ... + @overload + def __radd__( + self: _CharArray[bytes_], + other: S_co, + ) -> _CharArray[bytes_]: ... + + @overload + def center( + self: _CharArray[str_], + width: i_co, + fillchar: U_co = ..., + ) -> _CharArray[str_]: ... + @overload + def center( + self: _CharArray[bytes_], + width: i_co, + fillchar: S_co = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def count( + self: _CharArray[str_], + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., + ) -> NDArray[int_]: ... + @overload + def count( + self: _CharArray[bytes_], + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., + ) -> NDArray[int_]: ... + + def decode( + self: _CharArray[bytes_], + encoding: str | None = ..., + errors: str | None = ..., + ) -> _CharArray[str_]: ... + + def encode( + self: _CharArray[str_], + encoding: str | None = ..., + errors: str | None = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def endswith( + self: _CharArray[str_], + suffix: U_co, + start: i_co = ..., + end: i_co | None = ..., + ) -> NDArray[np.bool]: ... + @overload + def endswith( + self: _CharArray[bytes_], + suffix: S_co, + start: i_co = ..., + end: i_co | None = ..., + ) -> NDArray[np.bool]: ... + + def expandtabs( + self, + tabsize: i_co = ..., + ) -> Self: ... + + @overload + def find( + self: _CharArray[str_], + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., + ) -> NDArray[int_]: ... + @overload + def find( + self: _CharArray[bytes_], + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., + ) -> NDArray[int_]: ... + + @overload + def index( + self: _CharArray[str_], + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., + ) -> NDArray[int_]: ... + @overload + def index( + self: _CharArray[bytes_], + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., + ) -> NDArray[int_]: ... + + @overload + def join( + self: _CharArray[str_], + seq: U_co, + ) -> _CharArray[str_]: ... + @overload + def join( + self: _CharArray[bytes_], + seq: S_co, + ) -> _CharArray[bytes_]: ... + + @overload + def ljust( + self: _CharArray[str_], + width: i_co, + fillchar: U_co = ..., + ) -> _CharArray[str_]: ... + @overload + def ljust( + self: _CharArray[bytes_], + width: i_co, + fillchar: S_co = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def lstrip( + self: _CharArray[str_], + chars: U_co | None = ..., + ) -> _CharArray[str_]: ... + @overload + def lstrip( + self: _CharArray[bytes_], + chars: S_co | None = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def partition( + self: _CharArray[str_], + sep: U_co, + ) -> _CharArray[str_]: ... + @overload + def partition( + self: _CharArray[bytes_], + sep: S_co, + ) -> _CharArray[bytes_]: ... + + @overload + def replace( + self: _CharArray[str_], + old: U_co, + new: U_co, + count: i_co | None = ..., + ) -> _CharArray[str_]: ... + @overload + def replace( + self: _CharArray[bytes_], + old: S_co, + new: S_co, + count: i_co | None = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def rfind( + self: _CharArray[str_], + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., + ) -> NDArray[int_]: ... + @overload + def rfind( + self: _CharArray[bytes_], + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., + ) -> NDArray[int_]: ... + + @overload + def rindex( + self: _CharArray[str_], + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., + ) -> NDArray[int_]: ... + @overload + def rindex( + self: _CharArray[bytes_], + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., + ) -> NDArray[int_]: ... + + @overload + def rjust( + self: _CharArray[str_], + width: i_co, + fillchar: U_co = ..., + ) -> _CharArray[str_]: ... + @overload + def rjust( + self: _CharArray[bytes_], + width: i_co, + fillchar: S_co = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def rpartition( + self: _CharArray[str_], + sep: U_co, + ) -> _CharArray[str_]: ... + @overload + def rpartition( + self: _CharArray[bytes_], + sep: S_co, + ) -> _CharArray[bytes_]: ... + + @overload + def rsplit( + self: _CharArray[str_], + sep: U_co | None = ..., + maxsplit: i_co | None = ..., + ) -> NDArray[object_]: ... + @overload + def rsplit( + self: _CharArray[bytes_], + sep: S_co | None = ..., + maxsplit: i_co | None = ..., + ) -> NDArray[object_]: ... + + @overload + def rstrip( + self: _CharArray[str_], + chars: U_co | None = ..., + ) -> _CharArray[str_]: ... + @overload + def rstrip( + self: _CharArray[bytes_], + chars: S_co | None = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def split( + self: _CharArray[str_], + sep: U_co | None = ..., + maxsplit: i_co | None = ..., + ) -> NDArray[object_]: ... + @overload + def split( + self: _CharArray[bytes_], + sep: S_co | None = ..., + maxsplit: i_co | None = ..., + ) -> NDArray[object_]: ... + + def splitlines(self, keepends: b_co | None = ...) -> NDArray[object_]: ... + + @overload + def startswith( + self: _CharArray[str_], + prefix: U_co, + start: i_co = ..., + end: i_co | None = ..., + ) -> NDArray[np.bool]: ... + @overload + def startswith( + self: _CharArray[bytes_], + prefix: S_co, + start: i_co = ..., + end: i_co | None = ..., + ) -> NDArray[np.bool]: ... + + @overload + def strip( + self: _CharArray[str_], + chars: U_co | None = ..., + ) -> _CharArray[str_]: ... + @overload + def strip( + self: _CharArray[bytes_], + chars: S_co | None = ..., + ) -> _CharArray[bytes_]: ... + + @overload + def translate( + self: _CharArray[str_], + table: U_co, + deletechars: U_co | None = ..., + ) -> _CharArray[str_]: ... + @overload + def translate( + self: _CharArray[bytes_], + table: S_co, + deletechars: S_co | None = ..., + ) -> _CharArray[bytes_]: ... + + def zfill(self, width: i_co) -> Self: ... + def capitalize(self) -> Self: ... + def title(self) -> Self: ... + def swapcase(self) -> Self: ... + def lower(self) -> Self: ... + def upper(self) -> Self: ... + def isalnum(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isalpha(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isdigit(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def islower(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isspace(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def istitle(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isupper(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isnumeric(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + def isdecimal(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ... + +# Comparison +@overload +def equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def not_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def not_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def not_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def greater_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def greater_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def greater_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def less_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def less_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def less_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def greater(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def greater(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def greater(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def less(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def less(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def less(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def add(x1: U_co, x2: U_co) -> NDArray[np.str_]: ... +@overload +def add(x1: S_co, x2: S_co) -> NDArray[np.bytes_]: ... +@overload +def add(x1: _StringDTypeSupportsArray, x2: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def add(x1: T_co, x2: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def multiply(a: U_co, i: i_co) -> NDArray[np.str_]: ... +@overload +def multiply(a: S_co, i: i_co) -> NDArray[np.bytes_]: ... +@overload +def multiply(a: _StringDTypeSupportsArray, i: i_co) -> _StringDTypeArray: ... +@overload +def multiply(a: T_co, i: i_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def mod(a: U_co, value: Any) -> NDArray[np.str_]: ... +@overload +def mod(a: S_co, value: Any) -> NDArray[np.bytes_]: ... +@overload +def mod(a: _StringDTypeSupportsArray, value: Any) -> _StringDTypeArray: ... +@overload +def mod(a: T_co, value: Any) -> _StringDTypeOrUnicodeArray: ... + +@overload +def capitalize(a: U_co) -> NDArray[str_]: ... +@overload +def capitalize(a: S_co) -> NDArray[bytes_]: ... +@overload +def capitalize(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def capitalize(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def center(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ... +@overload +def center(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ... +@overload +def center(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +@overload +def center(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ... + +def decode( + a: S_co, + encoding: str | None = ..., + errors: str | None = ..., +) -> NDArray[str_]: ... +def encode( + a: U_co | T_co, + encoding: str | None = ..., + errors: str | None = ..., +) -> NDArray[bytes_]: ... + +@overload +def expandtabs(a: U_co, tabsize: i_co = ...) -> NDArray[str_]: ... +@overload +def expandtabs(a: S_co, tabsize: i_co = ...) -> NDArray[bytes_]: ... +@overload +def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = ...) -> _StringDTypeArray: ... +@overload +def expandtabs(a: T_co, tabsize: i_co = ...) -> _StringDTypeOrUnicodeArray: ... + +@overload +def join(sep: U_co, seq: U_co) -> NDArray[str_]: ... +@overload +def join(sep: S_co, seq: S_co) -> NDArray[bytes_]: ... +@overload +def join(sep: _StringDTypeSupportsArray, seq: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def join(sep: T_co, seq: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def ljust(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ... +@overload +def ljust(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ... +@overload +def ljust(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ... +@overload +def ljust(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ... + +@overload +def lower(a: U_co) -> NDArray[str_]: ... +@overload +def lower(a: S_co) -> NDArray[bytes_]: ... +@overload +def lower(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def lower(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def lstrip(a: U_co, chars: U_co | None = ...) -> NDArray[str_]: ... +@overload +def lstrip(a: S_co, chars: S_co | None = ...) -> NDArray[bytes_]: ... +@overload +def lstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = ...) -> _StringDTypeArray: ... +@overload +def lstrip(a: T_co, chars: T_co | None = ...) -> _StringDTypeOrUnicodeArray: ... + +@overload +def partition(a: U_co, sep: U_co) -> NDArray[str_]: ... +@overload +def partition(a: S_co, sep: S_co) -> NDArray[bytes_]: ... +@overload +def partition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def partition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def replace( + a: U_co, + old: U_co, + new: U_co, + count: i_co | None = ..., +) -> NDArray[str_]: ... +@overload +def replace( + a: S_co, + old: S_co, + new: S_co, + count: i_co | None = ..., +) -> NDArray[bytes_]: ... +@overload +def replace( + a: _StringDTypeSupportsArray, + old: _StringDTypeSupportsArray, + new: _StringDTypeSupportsArray, + count: i_co = ..., +) -> _StringDTypeArray: ... +@overload +def replace( + a: T_co, + old: T_co, + new: T_co, + count: i_co = ..., +) -> _StringDTypeOrUnicodeArray: ... + +@overload +def rjust( + a: U_co, + width: i_co, + fillchar: U_co = ..., +) -> NDArray[str_]: ... +@overload +def rjust( + a: S_co, + width: i_co, + fillchar: S_co = ..., +) -> NDArray[bytes_]: ... +@overload +def rjust( + a: _StringDTypeSupportsArray, + width: i_co, + fillchar: _StringDTypeSupportsArray = ..., +) -> _StringDTypeArray: ... +@overload +def rjust( + a: T_co, + width: i_co, + fillchar: T_co = ..., +) -> _StringDTypeOrUnicodeArray: ... + +@overload +def rpartition(a: U_co, sep: U_co) -> NDArray[str_]: ... +@overload +def rpartition(a: S_co, sep: S_co) -> NDArray[bytes_]: ... +@overload +def rpartition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def rpartition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def rsplit( + a: U_co, + sep: U_co | None = ..., + maxsplit: i_co | None = ..., +) -> NDArray[object_]: ... +@overload +def rsplit( + a: S_co, + sep: S_co | None = ..., + maxsplit: i_co | None = ..., +) -> NDArray[object_]: ... +@overload +def rsplit( + a: _StringDTypeSupportsArray, + sep: _StringDTypeSupportsArray | None = ..., + maxsplit: i_co | None = ..., +) -> NDArray[object_]: ... +@overload +def rsplit( + a: T_co, + sep: T_co | None = ..., + maxsplit: i_co | None = ..., +) -> NDArray[object_]: ... + +@overload +def rstrip(a: U_co, chars: U_co | None = ...) -> NDArray[str_]: ... +@overload +def rstrip(a: S_co, chars: S_co | None = ...) -> NDArray[bytes_]: ... +@overload +def rstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = ...) -> _StringDTypeArray: ... +@overload +def rstrip(a: T_co, chars: T_co | None = ...) -> _StringDTypeOrUnicodeArray: ... + +@overload +def split( + a: U_co, + sep: U_co | None = ..., + maxsplit: i_co | None = ..., +) -> NDArray[object_]: ... +@overload +def split( + a: S_co, + sep: S_co | None = ..., + maxsplit: i_co | None = ..., +) -> NDArray[object_]: ... +@overload +def split( + a: _StringDTypeSupportsArray, + sep: _StringDTypeSupportsArray | None = ..., + maxsplit: i_co | None = ..., +) -> NDArray[object_]: ... +@overload +def split( + a: T_co, + sep: T_co | None = ..., + maxsplit: i_co | None = ..., +) -> NDArray[object_]: ... + +def splitlines(a: UST_co, keepends: b_co | None = ...) -> NDArray[np.object_]: ... + +@overload +def strip(a: U_co, chars: U_co | None = ...) -> NDArray[str_]: ... +@overload +def strip(a: S_co, chars: S_co | None = ...) -> NDArray[bytes_]: ... +@overload +def strip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = ...) -> _StringDTypeArray: ... +@overload +def strip(a: T_co, chars: T_co | None = ...) -> _StringDTypeOrUnicodeArray: ... + +@overload +def swapcase(a: U_co) -> NDArray[str_]: ... +@overload +def swapcase(a: S_co) -> NDArray[bytes_]: ... +@overload +def swapcase(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def swapcase(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def title(a: U_co) -> NDArray[str_]: ... +@overload +def title(a: S_co) -> NDArray[bytes_]: ... +@overload +def title(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def title(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def translate( + a: U_co, + table: str, + deletechars: str | None = ..., +) -> NDArray[str_]: ... +@overload +def translate( + a: S_co, + table: str, + deletechars: str | None = ..., +) -> NDArray[bytes_]: ... +@overload +def translate( + a: _StringDTypeSupportsArray, + table: str, + deletechars: str | None = ..., +) -> _StringDTypeArray: ... +@overload +def translate( + a: T_co, + table: str, + deletechars: str | None = ..., +) -> _StringDTypeOrUnicodeArray: ... + +@overload +def upper(a: U_co) -> NDArray[str_]: ... +@overload +def upper(a: S_co) -> NDArray[bytes_]: ... +@overload +def upper(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def upper(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def zfill(a: U_co, width: i_co) -> NDArray[str_]: ... +@overload +def zfill(a: S_co, width: i_co) -> NDArray[bytes_]: ... +@overload +def zfill(a: _StringDTypeSupportsArray, width: i_co) -> _StringDTypeArray: ... +@overload +def zfill(a: T_co, width: i_co) -> _StringDTypeOrUnicodeArray: ... + +# String information +@overload +def count( + a: U_co, + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[int_]: ... +@overload +def count( + a: S_co, + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[int_]: ... +@overload +def count( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... + +@overload +def endswith( + a: U_co, + suffix: U_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.bool]: ... +@overload +def endswith( + a: S_co, + suffix: S_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.bool]: ... +@overload +def endswith( + a: T_co, + suffix: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.bool]: ... + +@overload +def find( + a: U_co, + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[int_]: ... +@overload +def find( + a: S_co, + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[int_]: ... +@overload +def find( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... + +@overload +def index( + a: U_co, + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[int_]: ... +@overload +def index( + a: S_co, + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[int_]: ... +@overload +def index( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... + +def isalpha(a: UST_co) -> NDArray[np.bool]: ... +def isalnum(a: UST_co) -> NDArray[np.bool]: ... +def isdecimal(a: U_co | T_co) -> NDArray[np.bool]: ... +def isdigit(a: UST_co) -> NDArray[np.bool]: ... +def islower(a: UST_co) -> NDArray[np.bool]: ... +def isnumeric(a: U_co | T_co) -> NDArray[np.bool]: ... +def isspace(a: UST_co) -> NDArray[np.bool]: ... +def istitle(a: UST_co) -> NDArray[np.bool]: ... +def isupper(a: UST_co) -> NDArray[np.bool]: ... + +@overload +def rfind( + a: U_co, + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[int_]: ... +@overload +def rfind( + a: S_co, + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[int_]: ... +@overload +def rfind( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... + +@overload +def rindex( + a: U_co, + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[int_]: ... +@overload +def rindex( + a: S_co, + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[int_]: ... +@overload +def rindex( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... + +@overload +def startswith( + a: U_co, + prefix: U_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.bool]: ... +@overload +def startswith( + a: S_co, + prefix: S_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.bool]: ... +@overload +def startswith( + a: T_co, + prefix: T_co, + start: i_co = 0, + end: i_co | None = None, +) -> NDArray[np.bool]: ... + +def str_len(A: UST_co) -> NDArray[int_]: ... + +# Overload 1 and 2: str- or bytes-based array-likes +# overload 3 and 4: arbitrary object with unicode=False (-> bytes_) +# overload 5 and 6: arbitrary object with unicode=True (-> str_) +# overload 7: arbitrary object with unicode=None (default) (-> str_ | bytes_) +@overload +def array( + obj: U_co, + itemsize: int | None = ..., + copy: bool = ..., + unicode: L[True] | None = ..., + order: _OrderKACF = ..., +) -> _CharArray[str_]: ... +@overload +def array( + obj: S_co, + itemsize: int | None = ..., + copy: bool = ..., + unicode: L[False] | None = ..., + order: _OrderKACF = ..., +) -> _CharArray[bytes_]: ... +@overload +def array( + obj: object, + itemsize: int | None, + copy: bool, + unicode: L[False], + order: _OrderKACF = ..., +) -> _CharArray[bytes_]: ... +@overload +def array( + obj: object, + itemsize: int | None = ..., + copy: bool = ..., + *, + unicode: L[False], + order: _OrderKACF = ..., +) -> _CharArray[bytes_]: ... +@overload +def array( + obj: object, + itemsize: int | None, + copy: bool, + unicode: L[True], + order: _OrderKACF = ..., +) -> _CharArray[str_]: ... +@overload +def array( + obj: object, + itemsize: int | None = ..., + copy: bool = ..., + *, + unicode: L[True], + order: _OrderKACF = ..., +) -> _CharArray[str_]: ... +@overload +def array( + obj: object, + itemsize: int | None = ..., + copy: bool = ..., + unicode: bool | None = ..., + order: _OrderKACF = ..., +) -> _CharArray[str_] | _CharArray[bytes_]: ... + +@overload +def asarray( + obj: U_co, + itemsize: int | None = ..., + unicode: L[True] | None = ..., + order: _OrderKACF = ..., +) -> _CharArray[str_]: ... +@overload +def asarray( + obj: S_co, + itemsize: int | None = ..., + unicode: L[False] | None = ..., + order: _OrderKACF = ..., +) -> _CharArray[bytes_]: ... +@overload +def asarray( + obj: object, + itemsize: int | None, + unicode: L[False], + order: _OrderKACF = ..., +) -> _CharArray[bytes_]: ... +@overload +def asarray( + obj: object, + itemsize: int | None = ..., + *, + unicode: L[False], + order: _OrderKACF = ..., +) -> _CharArray[bytes_]: ... +@overload +def asarray( + obj: object, + itemsize: int | None, + unicode: L[True], + order: _OrderKACF = ..., +) -> _CharArray[str_]: ... +@overload +def asarray( + obj: object, + itemsize: int | None = ..., + *, + unicode: L[True], + order: _OrderKACF = ..., +) -> _CharArray[str_]: ... +@overload +def asarray( + obj: object, + itemsize: int | None = ..., + unicode: bool | None = ..., + order: _OrderKACF = ..., +) -> _CharArray[str_] | _CharArray[bytes_]: ... diff --git a/python/numpy/_core/einsumfunc.py b/python/numpy/_core/einsumfunc.py new file mode 100644 index 000000000..8e71e6d4b --- /dev/null +++ b/python/numpy/_core/einsumfunc.py @@ -0,0 +1,1498 @@ +""" +Implementation of optimized einsum. + +""" +import itertools +import operator + +from numpy._core.multiarray import c_einsum +from numpy._core.numeric import asanyarray, tensordot +from numpy._core.overrides import array_function_dispatch + +__all__ = ['einsum', 'einsum_path'] + +# importing string for string.ascii_letters would be too slow +# the first import before caching has been measured to take 800 µs (#23777) +# imports begin with uppercase to mimic ASCII values to avoid sorting issues +einsum_symbols = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' +einsum_symbols_set = set(einsum_symbols) + + +def _flop_count(idx_contraction, inner, num_terms, size_dictionary): + """ + Computes the number of FLOPS in the contraction. + + Parameters + ---------- + idx_contraction : iterable + The indices involved in the contraction + inner : bool + Does this contraction require an inner product? + num_terms : int + The number of terms in a contraction + size_dictionary : dict + The size of each of the indices in idx_contraction + + Returns + ------- + flop_count : int + The total number of FLOPS required for the contraction. + + Examples + -------- + + >>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5}) + 30 + + >>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5}) + 60 + + """ + + overall_size = _compute_size_by_dict(idx_contraction, size_dictionary) + op_factor = max(1, num_terms - 1) + if inner: + op_factor += 1 + + return overall_size * op_factor + +def _compute_size_by_dict(indices, idx_dict): + """ + Computes the product of the elements in indices based on the dictionary + idx_dict. + + Parameters + ---------- + indices : iterable + Indices to base the product on. + idx_dict : dictionary + Dictionary of index sizes + + Returns + ------- + ret : int + The resulting product. + + Examples + -------- + >>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5}) + 90 + + """ + ret = 1 + for i in indices: + ret *= idx_dict[i] + return ret + + +def _find_contraction(positions, input_sets, output_set): + """ + Finds the contraction for a given set of input and output sets. + + Parameters + ---------- + positions : iterable + Integer positions of terms used in the contraction. + input_sets : list + List of sets that represent the lhs side of the einsum subscript + output_set : set + Set that represents the rhs side of the overall einsum subscript + + Returns + ------- + new_result : set + The indices of the resulting contraction + remaining : list + List of sets that have not been contracted, the new set is appended to + the end of this list + idx_removed : set + Indices removed from the entire contraction + idx_contraction : set + The indices used in the current contraction + + Examples + -------- + + # A simple dot product test case + >>> pos = (0, 1) + >>> isets = [set('ab'), set('bc')] + >>> oset = set('ac') + >>> _find_contraction(pos, isets, oset) + ({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'}) + + # A more complex case with additional terms in the contraction + >>> pos = (0, 2) + >>> isets = [set('abd'), set('ac'), set('bdc')] + >>> oset = set('ac') + >>> _find_contraction(pos, isets, oset) + ({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'}) + """ + + idx_contract = set() + idx_remain = output_set.copy() + remaining = [] + for ind, value in enumerate(input_sets): + if ind in positions: + idx_contract |= value + else: + remaining.append(value) + idx_remain |= value + + new_result = idx_remain & idx_contract + idx_removed = (idx_contract - new_result) + remaining.append(new_result) + + return (new_result, remaining, idx_removed, idx_contract) + + +def _optimal_path(input_sets, output_set, idx_dict, memory_limit): + """ + Computes all possible pair contractions, sieves the results based + on ``memory_limit`` and returns the lowest cost path. This algorithm + scales factorial with respect to the elements in the list ``input_sets``. + + Parameters + ---------- + input_sets : list + List of sets that represent the lhs side of the einsum subscript + output_set : set + Set that represents the rhs side of the overall einsum subscript + idx_dict : dictionary + Dictionary of index sizes + memory_limit : int + The maximum number of elements in a temporary array + + Returns + ------- + path : list + The optimal contraction order within the memory limit constraint. + + Examples + -------- + >>> isets = [set('abd'), set('ac'), set('bdc')] + >>> oset = set() + >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4} + >>> _optimal_path(isets, oset, idx_sizes, 5000) + [(0, 2), (0, 1)] + """ + + full_results = [(0, [], input_sets)] + for iteration in range(len(input_sets) - 1): + iter_results = [] + + # Compute all unique pairs + for curr in full_results: + cost, positions, remaining = curr + for con in itertools.combinations( + range(len(input_sets) - iteration), 2 + ): + + # Find the contraction + cont = _find_contraction(con, remaining, output_set) + new_result, new_input_sets, idx_removed, idx_contract = cont + + # Sieve the results based on memory_limit + new_size = _compute_size_by_dict(new_result, idx_dict) + if new_size > memory_limit: + continue + + # Build (total_cost, positions, indices_remaining) + total_cost = cost + _flop_count( + idx_contract, idx_removed, len(con), idx_dict + ) + new_pos = positions + [con] + iter_results.append((total_cost, new_pos, new_input_sets)) + + # Update combinatorial list, if we did not find anything return best + # path + remaining contractions + if iter_results: + full_results = iter_results + else: + path = min(full_results, key=lambda x: x[0])[1] + path += [tuple(range(len(input_sets) - iteration))] + return path + + # If we have not found anything return single einsum contraction + if len(full_results) == 0: + return [tuple(range(len(input_sets)))] + + path = min(full_results, key=lambda x: x[0])[1] + return path + +def _parse_possible_contraction( + positions, input_sets, output_set, idx_dict, + memory_limit, path_cost, naive_cost + ): + """Compute the cost (removed size + flops) and resultant indices for + performing the contraction specified by ``positions``. + + Parameters + ---------- + positions : tuple of int + The locations of the proposed tensors to contract. + input_sets : list of sets + The indices found on each tensors. + output_set : set + The output indices of the expression. + idx_dict : dict + Mapping of each index to its size. + memory_limit : int + The total allowed size for an intermediary tensor. + path_cost : int + The contraction cost so far. + naive_cost : int + The cost of the unoptimized expression. + + Returns + ------- + cost : (int, int) + A tuple containing the size of any indices removed, and the flop cost. + positions : tuple of int + The locations of the proposed tensors to contract. + new_input_sets : list of sets + The resulting new list of indices if this proposed contraction + is performed. + + """ + + # Find the contraction + contract = _find_contraction(positions, input_sets, output_set) + idx_result, new_input_sets, idx_removed, idx_contract = contract + + # Sieve the results based on memory_limit + new_size = _compute_size_by_dict(idx_result, idx_dict) + if new_size > memory_limit: + return None + + # Build sort tuple + old_sizes = ( + _compute_size_by_dict(input_sets[p], idx_dict) for p in positions + ) + removed_size = sum(old_sizes) - new_size + + # NB: removed_size used to be just the size of any removed indices i.e.: + # helpers.compute_size_by_dict(idx_removed, idx_dict) + cost = _flop_count(idx_contract, idx_removed, len(positions), idx_dict) + sort = (-removed_size, cost) + + # Sieve based on total cost as well + if (path_cost + cost) > naive_cost: + return None + + # Add contraction to possible choices + return [sort, positions, new_input_sets] + + +def _update_other_results(results, best): + """Update the positions and provisional input_sets of ``results`` + based on performing the contraction result ``best``. Remove any + involving the tensors contracted. + + Parameters + ---------- + results : list + List of contraction results produced by + ``_parse_possible_contraction``. + best : list + The best contraction of ``results`` i.e. the one that + will be performed. + + Returns + ------- + mod_results : list + The list of modified results, updated with outcome of + ``best`` contraction. + """ + + best_con = best[1] + bx, by = best_con + mod_results = [] + + for cost, (x, y), con_sets in results: + + # Ignore results involving tensors just contracted + if x in best_con or y in best_con: + continue + + # Update the input_sets + del con_sets[by - int(by > x) - int(by > y)] + del con_sets[bx - int(bx > x) - int(bx > y)] + con_sets.insert(-1, best[2][-1]) + + # Update the position indices + mod_con = x - int(x > bx) - int(x > by), y - int(y > bx) - int(y > by) + mod_results.append((cost, mod_con, con_sets)) + + return mod_results + +def _greedy_path(input_sets, output_set, idx_dict, memory_limit): + """ + Finds the path by contracting the best pair until the input list is + exhausted. The best pair is found by minimizing the tuple + ``(-prod(indices_removed), cost)``. What this amounts to is prioritizing + matrix multiplication or inner product operations, then Hadamard like + operations, and finally outer operations. Outer products are limited by + ``memory_limit``. This algorithm scales cubically with respect to the + number of elements in the list ``input_sets``. + + Parameters + ---------- + input_sets : list + List of sets that represent the lhs side of the einsum subscript + output_set : set + Set that represents the rhs side of the overall einsum subscript + idx_dict : dictionary + Dictionary of index sizes + memory_limit : int + The maximum number of elements in a temporary array + + Returns + ------- + path : list + The greedy contraction order within the memory limit constraint. + + Examples + -------- + >>> isets = [set('abd'), set('ac'), set('bdc')] + >>> oset = set() + >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4} + >>> _greedy_path(isets, oset, idx_sizes, 5000) + [(0, 2), (0, 1)] + """ + + # Handle trivial cases that leaked through + if len(input_sets) == 1: + return [(0,)] + elif len(input_sets) == 2: + return [(0, 1)] + + # Build up a naive cost + contract = _find_contraction( + range(len(input_sets)), input_sets, output_set + ) + idx_result, new_input_sets, idx_removed, idx_contract = contract + naive_cost = _flop_count( + idx_contract, idx_removed, len(input_sets), idx_dict + ) + + # Initially iterate over all pairs + comb_iter = itertools.combinations(range(len(input_sets)), 2) + known_contractions = [] + + path_cost = 0 + path = [] + + for iteration in range(len(input_sets) - 1): + + # Iterate over all pairs on the first step, only previously + # found pairs on subsequent steps + for positions in comb_iter: + + # Always initially ignore outer products + if input_sets[positions[0]].isdisjoint(input_sets[positions[1]]): + continue + + result = _parse_possible_contraction( + positions, input_sets, output_set, idx_dict, + memory_limit, path_cost, naive_cost + ) + if result is not None: + known_contractions.append(result) + + # If we do not have a inner contraction, rescan pairs + # including outer products + if len(known_contractions) == 0: + + # Then check the outer products + for positions in itertools.combinations( + range(len(input_sets)), 2 + ): + result = _parse_possible_contraction( + positions, input_sets, output_set, idx_dict, + memory_limit, path_cost, naive_cost + ) + if result is not None: + known_contractions.append(result) + + # If we still did not find any remaining contractions, + # default back to einsum like behavior + if len(known_contractions) == 0: + path.append(tuple(range(len(input_sets)))) + break + + # Sort based on first index + best = min(known_contractions, key=lambda x: x[0]) + + # Now propagate as many unused contractions as possible + # to the next iteration + known_contractions = _update_other_results(known_contractions, best) + + # Next iteration only compute contractions with the new tensor + # All other contractions have been accounted for + input_sets = best[2] + new_tensor_pos = len(input_sets) - 1 + comb_iter = ((i, new_tensor_pos) for i in range(new_tensor_pos)) + + # Update path and total cost + path.append(best[1]) + path_cost += best[0][1] + + return path + + +def _can_dot(inputs, result, idx_removed): + """ + Checks if we can use BLAS (np.tensordot) call and its beneficial to do so. + + Parameters + ---------- + inputs : list of str + Specifies the subscripts for summation. + result : str + Resulting summation. + idx_removed : set + Indices that are removed in the summation + + + Returns + ------- + type : bool + Returns true if BLAS should and can be used, else False + + Notes + ----- + If the operations is BLAS level 1 or 2 and is not already aligned + we default back to einsum as the memory movement to copy is more + costly than the operation itself. + + + Examples + -------- + + # Standard GEMM operation + >>> _can_dot(['ij', 'jk'], 'ik', set('j')) + True + + # Can use the standard BLAS, but requires odd data movement + >>> _can_dot(['ijj', 'jk'], 'ik', set('j')) + False + + # DDOT where the memory is not aligned + >>> _can_dot(['ijk', 'ikj'], '', set('ijk')) + False + + """ + + # All `dot` calls remove indices + if len(idx_removed) == 0: + return False + + # BLAS can only handle two operands + if len(inputs) != 2: + return False + + input_left, input_right = inputs + + for c in set(input_left + input_right): + # can't deal with repeated indices on same input or more than 2 total + nl, nr = input_left.count(c), input_right.count(c) + if (nl > 1) or (nr > 1) or (nl + nr > 2): + return False + + # can't do implicit summation or dimension collapse e.g. + # "ab,bc->c" (implicitly sum over 'a') + # "ab,ca->ca" (take diagonal of 'a') + if nl + nr - 1 == int(c in result): + return False + + # Build a few temporaries + set_left = set(input_left) + set_right = set(input_right) + keep_left = set_left - idx_removed + keep_right = set_right - idx_removed + rs = len(idx_removed) + + # At this point we are a DOT, GEMV, or GEMM operation + + # Handle inner products + + # DDOT with aligned data + if input_left == input_right: + return True + + # DDOT without aligned data (better to use einsum) + if set_left == set_right: + return False + + # Handle the 4 possible (aligned) GEMV or GEMM cases + + # GEMM or GEMV no transpose + if input_left[-rs:] == input_right[:rs]: + return True + + # GEMM or GEMV transpose both + if input_left[:rs] == input_right[-rs:]: + return True + + # GEMM or GEMV transpose right + if input_left[-rs:] == input_right[-rs:]: + return True + + # GEMM or GEMV transpose left + if input_left[:rs] == input_right[:rs]: + return True + + # Einsum is faster than GEMV if we have to copy data + if not keep_left or not keep_right: + return False + + # We are a matrix-matrix product, but we need to copy data + return True + + +def _parse_einsum_input(operands): + """ + A reproduction of einsum c side einsum parsing in python. + + Returns + ------- + input_strings : str + Parsed input strings + output_string : str + Parsed output string + operands : list of array_like + The operands to use in the numpy contraction + + Examples + -------- + The operand list is simplified to reduce printing: + + >>> np.random.seed(123) + >>> a = np.random.rand(4, 4) + >>> b = np.random.rand(4, 4, 4) + >>> _parse_einsum_input(('...a,...a->...', a, b)) + ('za,xza', 'xz', [a, b]) # may vary + + >>> _parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0])) + ('za,xza', 'xz', [a, b]) # may vary + """ + + if len(operands) == 0: + raise ValueError("No input operands") + + if isinstance(operands[0], str): + subscripts = operands[0].replace(" ", "") + operands = [asanyarray(v) for v in operands[1:]] + + # Ensure all characters are valid + for s in subscripts: + if s in '.,->': + continue + if s not in einsum_symbols: + raise ValueError(f"Character {s} is not a valid symbol.") + + else: + tmp_operands = list(operands) + operand_list = [] + subscript_list = [] + for p in range(len(operands) // 2): + operand_list.append(tmp_operands.pop(0)) + subscript_list.append(tmp_operands.pop(0)) + + output_list = tmp_operands[-1] if len(tmp_operands) else None + operands = [asanyarray(v) for v in operand_list] + subscripts = "" + last = len(subscript_list) - 1 + for num, sub in enumerate(subscript_list): + for s in sub: + if s is Ellipsis: + subscripts += "..." + else: + try: + s = operator.index(s) + except TypeError as e: + raise TypeError( + "For this input type lists must contain " + "either int or Ellipsis" + ) from e + subscripts += einsum_symbols[s] + if num != last: + subscripts += "," + + if output_list is not None: + subscripts += "->" + for s in output_list: + if s is Ellipsis: + subscripts += "..." + else: + try: + s = operator.index(s) + except TypeError as e: + raise TypeError( + "For this input type lists must contain " + "either int or Ellipsis" + ) from e + subscripts += einsum_symbols[s] + # Check for proper "->" + if ("-" in subscripts) or (">" in subscripts): + invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1) + if invalid or (subscripts.count("->") != 1): + raise ValueError("Subscripts can only contain one '->'.") + + # Parse ellipses + if "." in subscripts: + used = subscripts.replace(".", "").replace(",", "").replace("->", "") + unused = list(einsum_symbols_set - set(used)) + ellipse_inds = "".join(unused) + longest = 0 + + if "->" in subscripts: + input_tmp, output_sub = subscripts.split("->") + split_subscripts = input_tmp.split(",") + out_sub = True + else: + split_subscripts = subscripts.split(',') + out_sub = False + + for num, sub in enumerate(split_subscripts): + if "." in sub: + if (sub.count(".") != 3) or (sub.count("...") != 1): + raise ValueError("Invalid Ellipses.") + + # Take into account numerical values + if operands[num].shape == (): + ellipse_count = 0 + else: + ellipse_count = max(operands[num].ndim, 1) + ellipse_count -= (len(sub) - 3) + + if ellipse_count > longest: + longest = ellipse_count + + if ellipse_count < 0: + raise ValueError("Ellipses lengths do not match.") + elif ellipse_count == 0: + split_subscripts[num] = sub.replace('...', '') + else: + rep_inds = ellipse_inds[-ellipse_count:] + split_subscripts[num] = sub.replace('...', rep_inds) + + subscripts = ",".join(split_subscripts) + if longest == 0: + out_ellipse = "" + else: + out_ellipse = ellipse_inds[-longest:] + + if out_sub: + subscripts += "->" + output_sub.replace("...", out_ellipse) + else: + # Special care for outputless ellipses + output_subscript = "" + tmp_subscripts = subscripts.replace(",", "") + for s in sorted(set(tmp_subscripts)): + if s not in (einsum_symbols): + raise ValueError(f"Character {s} is not a valid symbol.") + if tmp_subscripts.count(s) == 1: + output_subscript += s + normal_inds = ''.join(sorted(set(output_subscript) - + set(out_ellipse))) + + subscripts += "->" + out_ellipse + normal_inds + + # Build output string if does not exist + if "->" in subscripts: + input_subscripts, output_subscript = subscripts.split("->") + else: + input_subscripts = subscripts + # Build output subscripts + tmp_subscripts = subscripts.replace(",", "") + output_subscript = "" + for s in sorted(set(tmp_subscripts)): + if s not in einsum_symbols: + raise ValueError(f"Character {s} is not a valid symbol.") + if tmp_subscripts.count(s) == 1: + output_subscript += s + + # Make sure output subscripts are in the input + for char in output_subscript: + if output_subscript.count(char) != 1: + raise ValueError("Output character %s appeared more than once in " + "the output." % char) + if char not in input_subscripts: + raise ValueError(f"Output character {char} did not appear in the input") + + # Make sure number operands is equivalent to the number of terms + if len(input_subscripts.split(',')) != len(operands): + raise ValueError("Number of einsum subscripts must be equal to the " + "number of operands.") + + return (input_subscripts, output_subscript, operands) + + +def _einsum_path_dispatcher(*operands, optimize=None, einsum_call=None): + # NOTE: technically, we should only dispatch on array-like arguments, not + # subscripts (given as strings). But separating operands into + # arrays/subscripts is a little tricky/slow (given einsum's two supported + # signatures), so as a practical shortcut we dispatch on everything. + # Strings will be ignored for dispatching since they don't define + # __array_function__. + return operands + + +@array_function_dispatch(_einsum_path_dispatcher, module='numpy') +def einsum_path(*operands, optimize='greedy', einsum_call=False): + """ + einsum_path(subscripts, *operands, optimize='greedy') + + Evaluates the lowest cost contraction order for an einsum expression by + considering the creation of intermediate arrays. + + Parameters + ---------- + subscripts : str + Specifies the subscripts for summation. + *operands : list of array_like + These are the arrays for the operation. + optimize : {bool, list, tuple, 'greedy', 'optimal'} + Choose the type of path. If a tuple is provided, the second argument is + assumed to be the maximum intermediate size created. If only a single + argument is provided the largest input or output array size is used + as a maximum intermediate size. + + * if a list is given that starts with ``einsum_path``, uses this as the + contraction path + * if False no optimization is taken + * if True defaults to the 'greedy' algorithm + * 'optimal' An algorithm that combinatorially explores all possible + ways of contracting the listed tensors and chooses the least costly + path. Scales exponentially with the number of terms in the + contraction. + * 'greedy' An algorithm that chooses the best pair contraction + at each step. Effectively, this algorithm searches the largest inner, + Hadamard, and then outer products at each step. Scales cubically with + the number of terms in the contraction. Equivalent to the 'optimal' + path for most contractions. + + Default is 'greedy'. + + Returns + ------- + path : list of tuples + A list representation of the einsum path. + string_repr : str + A printable representation of the einsum path. + + Notes + ----- + The resulting path indicates which terms of the input contraction should be + contracted first, the result of this contraction is then appended to the + end of the contraction list. This list can then be iterated over until all + intermediate contractions are complete. + + See Also + -------- + einsum, linalg.multi_dot + + Examples + -------- + + We can begin with a chain dot example. In this case, it is optimal to + contract the ``b`` and ``c`` tensors first as represented by the first + element of the path ``(1, 2)``. The resulting tensor is added to the end + of the contraction and the remaining contraction ``(0, 1)`` is then + completed. + + >>> np.random.seed(123) + >>> a = np.random.rand(2, 2) + >>> b = np.random.rand(2, 5) + >>> c = np.random.rand(5, 2) + >>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy') + >>> print(path_info[0]) + ['einsum_path', (1, 2), (0, 1)] + >>> print(path_info[1]) + Complete contraction: ij,jk,kl->il # may vary + Naive scaling: 4 + Optimized scaling: 3 + Naive FLOP count: 1.600e+02 + Optimized FLOP count: 5.600e+01 + Theoretical speedup: 2.857 + Largest intermediate: 4.000e+00 elements + ------------------------------------------------------------------------- + scaling current remaining + ------------------------------------------------------------------------- + 3 kl,jk->jl ij,jl->il + 3 jl,ij->il il->il + + + A more complex index transformation example. + + >>> I = np.random.rand(10, 10, 10, 10) + >>> C = np.random.rand(10, 10) + >>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C, + ... optimize='greedy') + + >>> print(path_info[0]) + ['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)] + >>> print(path_info[1]) + Complete contraction: ea,fb,abcd,gc,hd->efgh # may vary + Naive scaling: 8 + Optimized scaling: 5 + Naive FLOP count: 8.000e+08 + Optimized FLOP count: 8.000e+05 + Theoretical speedup: 1000.000 + Largest intermediate: 1.000e+04 elements + -------------------------------------------------------------------------- + scaling current remaining + -------------------------------------------------------------------------- + 5 abcd,ea->bcde fb,gc,hd,bcde->efgh + 5 bcde,fb->cdef gc,hd,cdef->efgh + 5 cdef,gc->defg hd,defg->efgh + 5 defg,hd->efgh efgh->efgh + """ + + # Figure out what the path really is + path_type = optimize + if path_type is True: + path_type = 'greedy' + if path_type is None: + path_type = False + + explicit_einsum_path = False + memory_limit = None + + # No optimization or a named path algorithm + if (path_type is False) or isinstance(path_type, str): + pass + + # Given an explicit path + elif len(path_type) and (path_type[0] == 'einsum_path'): + explicit_einsum_path = True + + # Path tuple with memory limit + elif ((len(path_type) == 2) and isinstance(path_type[0], str) and + isinstance(path_type[1], (int, float))): + memory_limit = int(path_type[1]) + path_type = path_type[0] + + else: + raise TypeError(f"Did not understand the path: {str(path_type)}") + + # Hidden option, only einsum should call this + einsum_call_arg = einsum_call + + # Python side parsing + input_subscripts, output_subscript, operands = ( + _parse_einsum_input(operands) + ) + + # Build a few useful list and sets + input_list = input_subscripts.split(',') + input_sets = [set(x) for x in input_list] + output_set = set(output_subscript) + indices = set(input_subscripts.replace(',', '')) + + # Get length of each unique dimension and ensure all dimensions are correct + dimension_dict = {} + broadcast_indices = [[] for x in range(len(input_list))] + for tnum, term in enumerate(input_list): + sh = operands[tnum].shape + if len(sh) != len(term): + raise ValueError("Einstein sum subscript %s does not contain the " + "correct number of indices for operand %d." + % (input_subscripts[tnum], tnum)) + for cnum, char in enumerate(term): + dim = sh[cnum] + + # Build out broadcast indices + if dim == 1: + broadcast_indices[tnum].append(char) + + if char in dimension_dict.keys(): + # For broadcasting cases we always want the largest dim size + if dimension_dict[char] == 1: + dimension_dict[char] = dim + elif dim not in (1, dimension_dict[char]): + raise ValueError("Size of label '%s' for operand %d (%d) " + "does not match previous terms (%d)." + % (char, tnum, dimension_dict[char], dim)) + else: + dimension_dict[char] = dim + + # Convert broadcast inds to sets + broadcast_indices = [set(x) for x in broadcast_indices] + + # Compute size of each input array plus the output array + size_list = [_compute_size_by_dict(term, dimension_dict) + for term in input_list + [output_subscript]] + max_size = max(size_list) + + if memory_limit is None: + memory_arg = max_size + else: + memory_arg = memory_limit + + # Compute naive cost + # This isn't quite right, need to look into exactly how einsum does this + inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0 + naive_cost = _flop_count( + indices, inner_product, len(input_list), dimension_dict + ) + + # Compute the path + if explicit_einsum_path: + path = path_type[1:] + elif ( + (path_type is False) + or (len(input_list) in [1, 2]) + or (indices == output_set) + ): + # Nothing to be optimized, leave it to einsum + path = [tuple(range(len(input_list)))] + elif path_type == "greedy": + path = _greedy_path( + input_sets, output_set, dimension_dict, memory_arg + ) + elif path_type == "optimal": + path = _optimal_path( + input_sets, output_set, dimension_dict, memory_arg + ) + else: + raise KeyError("Path name %s not found", path_type) + + cost_list, scale_list, size_list, contraction_list = [], [], [], [] + + # Build contraction tuple (positions, gemm, einsum_str, remaining) + for cnum, contract_inds in enumerate(path): + # Make sure we remove inds from right to left + contract_inds = tuple(sorted(contract_inds, reverse=True)) + + contract = _find_contraction(contract_inds, input_sets, output_set) + out_inds, input_sets, idx_removed, idx_contract = contract + + cost = _flop_count( + idx_contract, idx_removed, len(contract_inds), dimension_dict + ) + cost_list.append(cost) + scale_list.append(len(idx_contract)) + size_list.append(_compute_size_by_dict(out_inds, dimension_dict)) + + bcast = set() + tmp_inputs = [] + for x in contract_inds: + tmp_inputs.append(input_list.pop(x)) + bcast |= broadcast_indices.pop(x) + + new_bcast_inds = bcast - idx_removed + + # If we're broadcasting, nix blas + if not len(idx_removed & bcast): + do_blas = _can_dot(tmp_inputs, out_inds, idx_removed) + else: + do_blas = False + + # Last contraction + if (cnum - len(path)) == -1: + idx_result = output_subscript + else: + sort_result = [(dimension_dict[ind], ind) for ind in out_inds] + idx_result = "".join([x[1] for x in sorted(sort_result)]) + + input_list.append(idx_result) + broadcast_indices.append(new_bcast_inds) + einsum_str = ",".join(tmp_inputs) + "->" + idx_result + + contraction = ( + contract_inds, idx_removed, einsum_str, input_list[:], do_blas + ) + contraction_list.append(contraction) + + opt_cost = sum(cost_list) + 1 + + if len(input_list) != 1: + # Explicit "einsum_path" is usually trusted, but we detect this kind of + # mistake in order to prevent from returning an intermediate value. + raise RuntimeError( + f"Invalid einsum_path is specified: {len(input_list) - 1} more " + "operands has to be contracted.") + + if einsum_call_arg: + return (operands, contraction_list) + + # Return the path along with a nice string representation + overall_contraction = input_subscripts + "->" + output_subscript + header = ("scaling", "current", "remaining") + + speedup = naive_cost / opt_cost + max_i = max(size_list) + + path_print = f" Complete contraction: {overall_contraction}\n" + path_print += f" Naive scaling: {len(indices)}\n" + path_print += " Optimized scaling: %d\n" % max(scale_list) + path_print += f" Naive FLOP count: {naive_cost:.3e}\n" + path_print += f" Optimized FLOP count: {opt_cost:.3e}\n" + path_print += f" Theoretical speedup: {speedup:3.3f}\n" + path_print += f" Largest intermediate: {max_i:.3e} elements\n" + path_print += "-" * 74 + "\n" + path_print += "%6s %24s %40s\n" % header + path_print += "-" * 74 + + for n, contraction in enumerate(contraction_list): + inds, idx_rm, einsum_str, remaining, blas = contraction + remaining_str = ",".join(remaining) + "->" + output_subscript + path_run = (scale_list[n], einsum_str, remaining_str) + path_print += "\n%4d %24s %40s" % path_run + + path = ['einsum_path'] + path + return (path, path_print) + + +def _einsum_dispatcher(*operands, out=None, optimize=None, **kwargs): + # Arguably we dispatch on more arguments than we really should; see note in + # _einsum_path_dispatcher for why. + yield from operands + yield out + + +# Rewrite einsum to handle different cases +@array_function_dispatch(_einsum_dispatcher, module='numpy') +def einsum(*operands, out=None, optimize=False, **kwargs): + """ + einsum(subscripts, *operands, out=None, dtype=None, order='K', + casting='safe', optimize=False) + + Evaluates the Einstein summation convention on the operands. + + Using the Einstein summation convention, many common multi-dimensional, + linear algebraic array operations can be represented in a simple fashion. + In *implicit* mode `einsum` computes these values. + + In *explicit* mode, `einsum` provides further flexibility to compute + other array operations that might not be considered classical Einstein + summation operations, by disabling, or forcing summation over specified + subscript labels. + + See the notes and examples for clarification. + + Parameters + ---------- + subscripts : str + Specifies the subscripts for summation as comma separated list of + subscript labels. An implicit (classical Einstein summation) + calculation is performed unless the explicit indicator '->' is + included as well as subscript labels of the precise output form. + operands : list of array_like + These are the arrays for the operation. + out : ndarray, optional + If provided, the calculation is done into this array. + dtype : {data-type, None}, optional + If provided, forces the calculation to use the data type specified. + Note that you may have to also give a more liberal `casting` + parameter to allow the conversions. Default is None. + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout of the output. 'C' means it should + be C contiguous. 'F' means it should be Fortran contiguous, + 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise. + 'K' means it should be as close to the layout as the inputs as + is possible, including arbitrarily permuted axes. + Default is 'K'. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Setting this to + 'unsafe' is not recommended, as it can adversely affect accumulations. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + + Default is 'safe'. + optimize : {False, True, 'greedy', 'optimal'}, optional + Controls if intermediate optimization should occur. No optimization + will occur if False and True will default to the 'greedy' algorithm. + Also accepts an explicit contraction list from the ``np.einsum_path`` + function. See ``np.einsum_path`` for more details. Defaults to False. + + Returns + ------- + output : ndarray + The calculation based on the Einstein summation convention. + + See Also + -------- + einsum_path, dot, inner, outer, tensordot, linalg.multi_dot + einsum: + Similar verbose interface is provided by the + `einops `_ package to cover + additional operations: transpose, reshape/flatten, repeat/tile, + squeeze/unsqueeze and reductions. + The `opt_einsum `_ + optimizes contraction order for einsum-like expressions + in backend-agnostic manner. + + Notes + ----- + The Einstein summation convention can be used to compute + many multi-dimensional, linear algebraic array operations. `einsum` + provides a succinct way of representing these. + + A non-exhaustive list of these operations, + which can be computed by `einsum`, is shown below along with examples: + + * Trace of an array, :py:func:`numpy.trace`. + * Return a diagonal, :py:func:`numpy.diag`. + * Array axis summations, :py:func:`numpy.sum`. + * Transpositions and permutations, :py:func:`numpy.transpose`. + * Matrix multiplication and dot product, :py:func:`numpy.matmul` + :py:func:`numpy.dot`. + * Vector inner and outer products, :py:func:`numpy.inner` + :py:func:`numpy.outer`. + * Broadcasting, element-wise and scalar multiplication, + :py:func:`numpy.multiply`. + * Tensor contractions, :py:func:`numpy.tensordot`. + * Chained array operations, in efficient calculation order, + :py:func:`numpy.einsum_path`. + + The subscripts string is a comma-separated list of subscript labels, + where each label refers to a dimension of the corresponding operand. + Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)`` + is equivalent to :py:func:`np.inner(a,b) `. If a label + appears only once, it is not summed, so ``np.einsum('i', a)`` + produces a view of ``a`` with no changes. A further example + ``np.einsum('ij,jk', a, b)`` describes traditional matrix multiplication + and is equivalent to :py:func:`np.matmul(a,b) `. + Repeated subscript labels in one operand take the diagonal. + For example, ``np.einsum('ii', a)`` is equivalent to + :py:func:`np.trace(a) `. + + In *implicit mode*, the chosen subscripts are important + since the axes of the output are reordered alphabetically. This + means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while + ``np.einsum('ji', a)`` takes its transpose. Additionally, + ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while, + ``np.einsum('ij,jh', a, b)`` returns the transpose of the + multiplication since subscript 'h' precedes subscript 'i'. + + In *explicit mode* the output can be directly controlled by + specifying output subscript labels. This requires the + identifier '->' as well as the list of output subscript labels. + This feature increases the flexibility of the function since + summing can be disabled or forced when required. The call + ``np.einsum('i->', a)`` is like :py:func:`np.sum(a) ` + if ``a`` is a 1-D array, and ``np.einsum('ii->i', a)`` + is like :py:func:`np.diag(a) ` if ``a`` is a square 2-D array. + The difference is that `einsum` does not allow broadcasting by default. + Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the + order of the output subscript labels and therefore returns matrix + multiplication, unlike the example above in implicit mode. + + To enable and control broadcasting, use an ellipsis. Default + NumPy-style broadcasting is done by adding an ellipsis + to the left of each term, like ``np.einsum('...ii->...i', a)``. + ``np.einsum('...i->...', a)`` is like + :py:func:`np.sum(a, axis=-1) ` for array ``a`` of any shape. + To take the trace along the first and last axes, + you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix + product with the left-most indices instead of rightmost, one can do + ``np.einsum('ij...,jk...->ik...', a, b)``. + + When there is only one operand, no axes are summed, and no output + parameter is provided, a view into the operand is returned instead + of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)`` + produces a view (changed in version 1.10.0). + + `einsum` also provides an alternative way to provide the subscripts and + operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. + If the output shape is not provided in this format `einsum` will be + calculated in implicit mode, otherwise it will be performed explicitly. + The examples below have corresponding `einsum` calls with the two + parameter methods. + + Views returned from einsum are now writeable whenever the input array + is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now + have the same effect as :py:func:`np.swapaxes(a, 0, 2) ` + and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal + of a 2D array. + + Added the ``optimize`` argument which will optimize the contraction order + of an einsum expression. For a contraction with three or more operands + this can greatly increase the computational efficiency at the cost of + a larger memory footprint during computation. + + Typically a 'greedy' algorithm is applied which empirical tests have shown + returns the optimal path in the majority of cases. In some cases 'optimal' + will return the superlative path through a more expensive, exhaustive + search. For iterative calculations it may be advisable to calculate + the optimal path once and reuse that path by supplying it as an argument. + An example is given below. + + See :py:func:`numpy.einsum_path` for more details. + + Examples + -------- + >>> a = np.arange(25).reshape(5,5) + >>> b = np.arange(5) + >>> c = np.arange(6).reshape(2,3) + + Trace of a matrix: + + >>> np.einsum('ii', a) + 60 + >>> np.einsum(a, [0,0]) + 60 + >>> np.trace(a) + 60 + + Extract the diagonal (requires explicit form): + + >>> np.einsum('ii->i', a) + array([ 0, 6, 12, 18, 24]) + >>> np.einsum(a, [0,0], [0]) + array([ 0, 6, 12, 18, 24]) + >>> np.diag(a) + array([ 0, 6, 12, 18, 24]) + + Sum over an axis (requires explicit form): + + >>> np.einsum('ij->i', a) + array([ 10, 35, 60, 85, 110]) + >>> np.einsum(a, [0,1], [0]) + array([ 10, 35, 60, 85, 110]) + >>> np.sum(a, axis=1) + array([ 10, 35, 60, 85, 110]) + + For higher dimensional arrays summing a single axis can be done + with ellipsis: + + >>> np.einsum('...j->...', a) + array([ 10, 35, 60, 85, 110]) + >>> np.einsum(a, [Ellipsis,1], [Ellipsis]) + array([ 10, 35, 60, 85, 110]) + + Compute a matrix transpose, or reorder any number of axes: + + >>> np.einsum('ji', c) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.einsum('ij->ji', c) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.einsum(c, [1,0]) + array([[0, 3], + [1, 4], + [2, 5]]) + >>> np.transpose(c) + array([[0, 3], + [1, 4], + [2, 5]]) + + Vector inner products: + + >>> np.einsum('i,i', b, b) + 30 + >>> np.einsum(b, [0], b, [0]) + 30 + >>> np.inner(b,b) + 30 + + Matrix vector multiplication: + + >>> np.einsum('ij,j', a, b) + array([ 30, 80, 130, 180, 230]) + >>> np.einsum(a, [0,1], b, [1]) + array([ 30, 80, 130, 180, 230]) + >>> np.dot(a, b) + array([ 30, 80, 130, 180, 230]) + >>> np.einsum('...j,j', a, b) + array([ 30, 80, 130, 180, 230]) + + Broadcasting and scalar multiplication: + + >>> np.einsum('..., ...', 3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.einsum(',ij', 3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.einsum(3, [Ellipsis], c, [Ellipsis]) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + >>> np.multiply(3, c) + array([[ 0, 3, 6], + [ 9, 12, 15]]) + + Vector outer product: + + >>> np.einsum('i,j', np.arange(2)+1, b) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> np.einsum(np.arange(2)+1, [0], b, [1]) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + >>> np.outer(np.arange(2)+1, b) + array([[0, 1, 2, 3, 4], + [0, 2, 4, 6, 8]]) + + Tensor contraction: + + >>> a = np.arange(60.).reshape(3,4,5) + >>> b = np.arange(24.).reshape(4,3,2) + >>> np.einsum('ijk,jil->kl', a, b) + array([[4400., 4730.], + [4532., 4874.], + [4664., 5018.], + [4796., 5162.], + [4928., 5306.]]) + >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3]) + array([[4400., 4730.], + [4532., 4874.], + [4664., 5018.], + [4796., 5162.], + [4928., 5306.]]) + >>> np.tensordot(a,b, axes=([1,0],[0,1])) + array([[4400., 4730.], + [4532., 4874.], + [4664., 5018.], + [4796., 5162.], + [4928., 5306.]]) + + Writeable returned arrays (since version 1.10.0): + + >>> a = np.zeros((3, 3)) + >>> np.einsum('ii->i', a)[:] = 1 + >>> a + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + Example of ellipsis use: + + >>> a = np.arange(6).reshape((3,2)) + >>> b = np.arange(12).reshape((4,3)) + >>> np.einsum('ki,jk->ij', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + >>> np.einsum('ki,...k->i...', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + >>> np.einsum('k...,jk', a, b) + array([[10, 28, 46, 64], + [13, 40, 67, 94]]) + + Chained array operations. For more complicated contractions, speed ups + might be achieved by repeatedly computing a 'greedy' path or pre-computing + the 'optimal' path and repeatedly applying it, using an `einsum_path` + insertion (since version 1.12.0). Performance improvements can be + particularly significant with larger arrays: + + >>> a = np.ones(64).reshape(2,4,8) + + Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.) + + >>> for iteration in range(500): + ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a) + + Sub-optimal `einsum` (due to repeated path calculation time): ~330ms + + >>> for iteration in range(500): + ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, + ... optimize='optimal') + + Greedy `einsum` (faster optimal path approximation): ~160ms + + >>> for iteration in range(500): + ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy') + + Optimal `einsum` (best usage pattern in some use cases): ~110ms + + >>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, + ... optimize='optimal')[0] + >>> for iteration in range(500): + ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path) + + """ + # Special handling if out is specified + specified_out = out is not None + + # If no optimization, run pure einsum + if optimize is False: + if specified_out: + kwargs['out'] = out + return c_einsum(*operands, **kwargs) + + # Check the kwargs to avoid a more cryptic error later, without having to + # repeat default values here + valid_einsum_kwargs = ['dtype', 'order', 'casting'] + unknown_kwargs = [k for (k, v) in kwargs.items() if + k not in valid_einsum_kwargs] + if len(unknown_kwargs): + raise TypeError(f"Did not understand the following kwargs: {unknown_kwargs}") + + # Build the contraction list and operand + operands, contraction_list = einsum_path(*operands, optimize=optimize, + einsum_call=True) + + # Handle order kwarg for output array, c_einsum allows mixed case + output_order = kwargs.pop('order', 'K') + if output_order.upper() == 'A': + if all(arr.flags.f_contiguous for arr in operands): + output_order = 'F' + else: + output_order = 'C' + + # Start contraction loop + for num, contraction in enumerate(contraction_list): + inds, idx_rm, einsum_str, remaining, blas = contraction + tmp_operands = [operands.pop(x) for x in inds] + + # Do we need to deal with the output? + handle_out = specified_out and ((num + 1) == len(contraction_list)) + + # Call tensordot if still possible + if blas: + # Checks have already been handled + input_str, results_index = einsum_str.split('->') + input_left, input_right = input_str.split(',') + + tensor_result = input_left + input_right + for s in idx_rm: + tensor_result = tensor_result.replace(s, "") + + # Find indices to contract over + left_pos, right_pos = [], [] + for s in sorted(idx_rm): + left_pos.append(input_left.find(s)) + right_pos.append(input_right.find(s)) + + # Contract! + new_view = tensordot( + *tmp_operands, axes=(tuple(left_pos), tuple(right_pos)) + ) + + # Build a new view if needed + if (tensor_result != results_index) or handle_out: + if handle_out: + kwargs["out"] = out + new_view = c_einsum( + tensor_result + '->' + results_index, new_view, **kwargs + ) + + # Call einsum + else: + # If out was specified + if handle_out: + kwargs["out"] = out + + # Do the contraction + new_view = c_einsum(einsum_str, *tmp_operands, **kwargs) + + # Append new items and dereference what we can + operands.append(new_view) + del tmp_operands, new_view + + if specified_out: + return out + else: + return asanyarray(operands[0], order=output_order) diff --git a/python/numpy/_core/einsumfunc.pyi b/python/numpy/_core/einsumfunc.pyi new file mode 100644 index 000000000..9653a26dc --- /dev/null +++ b/python/numpy/_core/einsumfunc.pyi @@ -0,0 +1,184 @@ +from collections.abc import Sequence +from typing import Any, Literal, TypeAlias, TypeVar, overload + +import numpy as np +from numpy import _OrderKACF, number +from numpy._typing import ( + NDArray, + _ArrayLikeBool_co, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeObject_co, + _ArrayLikeUInt_co, + _DTypeLikeBool, + _DTypeLikeComplex, + _DTypeLikeComplex_co, + _DTypeLikeFloat, + _DTypeLikeInt, + _DTypeLikeObject, + _DTypeLikeUInt, +) + +__all__ = ["einsum", "einsum_path"] + +_ArrayT = TypeVar( + "_ArrayT", + bound=NDArray[np.bool | number], +) + +_OptimizeKind: TypeAlias = bool | Literal["greedy", "optimal"] | Sequence[Any] | None +_CastingSafe: TypeAlias = Literal["no", "equiv", "safe", "same_kind"] +_CastingUnsafe: TypeAlias = Literal["unsafe"] + +# TODO: Properly handle the `casting`-based combinatorics +# TODO: We need to evaluate the content `__subscripts` in order +# to identify whether or an array or scalar is returned. At a cursory +# glance this seems like something that can quite easily be done with +# a mypy plugin. +# Something like `is_scalar = bool(__subscripts.partition("->")[-1])` +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeBool_co, + out: None = ..., + dtype: _DTypeLikeBool | None = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeUInt_co, + out: None = ..., + dtype: _DTypeLikeUInt | None = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeInt_co, + out: None = ..., + dtype: _DTypeLikeInt | None = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeFloat_co, + out: None = ..., + dtype: _DTypeLikeFloat | None = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeComplex_co, + out: None = ..., + dtype: _DTypeLikeComplex | None = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: Any, + casting: _CastingUnsafe, + dtype: _DTypeLikeComplex_co | None = ..., + out: None = ..., + order: _OrderKACF = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeComplex_co, + out: _ArrayT, + dtype: _DTypeLikeComplex_co | None = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> _ArrayT: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: Any, + out: _ArrayT, + casting: _CastingUnsafe, + dtype: _DTypeLikeComplex_co | None = ..., + order: _OrderKACF = ..., + optimize: _OptimizeKind = ..., +) -> _ArrayT: ... + +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeObject_co, + out: None = ..., + dtype: _DTypeLikeObject | None = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: Any, + casting: _CastingUnsafe, + dtype: _DTypeLikeObject | None = ..., + out: None = ..., + order: _OrderKACF = ..., + optimize: _OptimizeKind = ..., +) -> Any: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeObject_co, + out: _ArrayT, + dtype: _DTypeLikeObject | None = ..., + order: _OrderKACF = ..., + casting: _CastingSafe = ..., + optimize: _OptimizeKind = ..., +) -> _ArrayT: ... +@overload +def einsum( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: Any, + out: _ArrayT, + casting: _CastingUnsafe, + dtype: _DTypeLikeObject | None = ..., + order: _OrderKACF = ..., + optimize: _OptimizeKind = ..., +) -> _ArrayT: ... + +# NOTE: `einsum_call` is a hidden kwarg unavailable for public use. +# It is therefore excluded from the signatures below. +# NOTE: In practice the list consists of a `str` (first element) +# and a variable number of integer tuples. +def einsum_path( + subscripts: str | _ArrayLikeInt_co, + /, + *operands: _ArrayLikeComplex_co | _DTypeLikeObject, + optimize: _OptimizeKind = "greedy", + einsum_call: Literal[False] = False, +) -> tuple[list[Any], str]: ... diff --git a/python/numpy/_core/fromnumeric.py b/python/numpy/_core/fromnumeric.py new file mode 100644 index 000000000..e20d774d0 --- /dev/null +++ b/python/numpy/_core/fromnumeric.py @@ -0,0 +1,4269 @@ +"""Module containing non-deprecated functions borrowed from Numeric. + +""" +import functools +import types +import warnings + +import numpy as np +from numpy._utils import set_module + +from . import _methods, overrides +from . import multiarray as mu +from . import numerictypes as nt +from . import umath as um +from ._multiarray_umath import _array_converter +from .multiarray import asanyarray, asarray, concatenate + +_dt_ = nt.sctype2char + +# functions that are methods +__all__ = [ + 'all', 'amax', 'amin', 'any', 'argmax', + 'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip', + 'compress', 'cumprod', 'cumsum', 'cumulative_prod', 'cumulative_sum', + 'diagonal', 'mean', 'max', 'min', 'matrix_transpose', + 'ndim', 'nonzero', 'partition', 'prod', 'ptp', 'put', + 'ravel', 'repeat', 'reshape', 'resize', 'round', + 'searchsorted', 'shape', 'size', 'sort', 'squeeze', + 'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var', +] + +_gentype = types.GeneratorType +# save away Python sum +_sum_ = sum + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +# functions that are now methods +def _wrapit(obj, method, *args, **kwds): + conv = _array_converter(obj) + # As this already tried the method, subok is maybe quite reasonable here + # but this follows what was done before. TODO: revisit this. + arr, = conv.as_arrays(subok=False) + result = getattr(arr, method)(*args, **kwds) + + return conv.wrap(result, to_scalar=False) + + +def _wrapfunc(obj, method, *args, **kwds): + bound = getattr(obj, method, None) + if bound is None: + return _wrapit(obj, method, *args, **kwds) + + try: + return bound(*args, **kwds) + except TypeError: + # A TypeError occurs if the object does have such a method in its + # class, but its signature is not identical to that of NumPy's. This + # situation has occurred in the case of a downstream library like + # 'pandas'. + # + # Call _wrapit from within the except clause to ensure a potential + # exception has a traceback chain. + return _wrapit(obj, method, *args, **kwds) + + +def _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs): + passkwargs = {k: v for k, v in kwargs.items() + if v is not np._NoValue} + + if type(obj) is not mu.ndarray: + try: + reduction = getattr(obj, method) + except AttributeError: + pass + else: + # This branch is needed for reductions like any which don't + # support a dtype. + if dtype is not None: + return reduction(axis=axis, dtype=dtype, out=out, **passkwargs) + else: + return reduction(axis=axis, out=out, **passkwargs) + + return ufunc.reduce(obj, axis, dtype, out, **passkwargs) + + +def _wrapreduction_any_all(obj, ufunc, method, axis, out, **kwargs): + # Same as above function, but dtype is always bool (but never passed on) + passkwargs = {k: v for k, v in kwargs.items() + if v is not np._NoValue} + + if type(obj) is not mu.ndarray: + try: + reduction = getattr(obj, method) + except AttributeError: + pass + else: + return reduction(axis=axis, out=out, **passkwargs) + + return ufunc.reduce(obj, axis, bool, out, **passkwargs) + + +def _take_dispatcher(a, indices, axis=None, out=None, mode=None): + return (a, out) + + +@array_function_dispatch(_take_dispatcher) +def take(a, indices, axis=None, out=None, mode='raise'): + """ + Take elements from an array along an axis. + + When axis is not None, this function does the same thing as "fancy" + indexing (indexing arrays using arrays); however, it can be easier to use + if you need elements along a given axis. A call such as + ``np.take(arr, indices, axis=3)`` is equivalent to + ``arr[:,:,:,indices,...]``. + + Explained without fancy indexing, this is equivalent to the following use + of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of + indices:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + Nj = indices.shape + for ii in ndindex(Ni): + for jj in ndindex(Nj): + for kk in ndindex(Nk): + out[ii + jj + kk] = a[ii + (indices[jj],) + kk] + + Parameters + ---------- + a : array_like (Ni..., M, Nk...) + The source array. + indices : array_like (Nj...) + The indices of the values to extract. + Also allow scalars for indices. + axis : int, optional + The axis over which to select values. By default, the flattened + input array is used. + out : ndarray, optional (Ni..., Nj..., Nk...) + If provided, the result will be placed in this array. It should + be of the appropriate shape and dtype. Note that `out` is always + buffered if `mode='raise'`; use other modes for better performance. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + + * 'raise' -- raise an error (default) + * 'wrap' -- wrap around + * 'clip' -- clip to the range + + 'clip' mode means that all indices that are too large are replaced + by the index that addresses the last element along that axis. Note + that this disables indexing with negative numbers. + + Returns + ------- + out : ndarray (Ni..., Nj..., Nk...) + The returned array has the same type as `a`. + + See Also + -------- + compress : Take elements using a boolean mask + ndarray.take : equivalent method + take_along_axis : Take elements by matching the array and the index arrays + + Notes + ----- + By eliminating the inner loop in the description above, and using `s_` to + build simple slice objects, `take` can be expressed in terms of applying + fancy indexing to each 1-d slice:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + for ii in ndindex(Ni): + for kk in ndindex(Nj): + out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices] + + For this reason, it is equivalent to (but faster than) the following use + of `apply_along_axis`:: + + out = np.apply_along_axis(lambda a_1d: a_1d[indices], axis, a) + + Examples + -------- + >>> import numpy as np + >>> a = [4, 3, 5, 7, 6, 8] + >>> indices = [0, 1, 4] + >>> np.take(a, indices) + array([4, 3, 6]) + + In this example if `a` is an ndarray, "fancy" indexing can be used. + + >>> a = np.array(a) + >>> a[indices] + array([4, 3, 6]) + + If `indices` is not one dimensional, the output also has these dimensions. + + >>> np.take(a, [[0, 1], [2, 3]]) + array([[4, 3], + [5, 7]]) + """ + return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode) + + +def _reshape_dispatcher(a, /, shape=None, order=None, *, newshape=None, + copy=None): + return (a,) + + +@array_function_dispatch(_reshape_dispatcher) +def reshape(a, /, shape=None, order='C', *, newshape=None, copy=None): + """ + Gives a new shape to an array without changing its data. + + Parameters + ---------- + a : array_like + Array to be reshaped. + shape : int or tuple of ints + The new shape should be compatible with the original shape. If + an integer, then the result will be a 1-D array of that length. + One shape dimension can be -1. In this case, the value is + inferred from the length of the array and remaining dimensions. + order : {'C', 'F', 'A'}, optional + Read the elements of ``a`` using this index order, and place the + elements into the reshaped array using this index order. 'C' + means to read / write the elements using C-like index order, + with the last axis index changing fastest, back to the first + axis index changing slowest. 'F' means to read / write the + elements using Fortran-like index order, with the first index + changing fastest, and the last index changing slowest. Note that + the 'C' and 'F' options take no account of the memory layout of + the underlying array, and only refer to the order of indexing. + 'A' means to read / write the elements in Fortran-like index + order if ``a`` is Fortran *contiguous* in memory, C-like order + otherwise. + newshape : int or tuple of ints + .. deprecated:: 2.1 + Replaced by ``shape`` argument. Retained for backward + compatibility. + copy : bool, optional + If ``True``, then the array data is copied. If ``None``, a copy will + only be made if it's required by ``order``. For ``False`` it raises + a ``ValueError`` if a copy cannot be avoided. Default: ``None``. + + Returns + ------- + reshaped_array : ndarray + This will be a new view object if possible; otherwise, it will + be a copy. Note there is no guarantee of the *memory layout* (C- or + Fortran- contiguous) of the returned array. + + See Also + -------- + ndarray.reshape : Equivalent method. + + Notes + ----- + It is not always possible to change the shape of an array without copying + the data. + + The ``order`` keyword gives the index ordering both for *fetching* + the values from ``a``, and then *placing* the values into the output + array. For example, let's say you have an array: + + >>> a = np.arange(6).reshape((3, 2)) + >>> a + array([[0, 1], + [2, 3], + [4, 5]]) + + You can think of reshaping as first raveling the array (using the given + index order), then inserting the elements from the raveled array into the + new array using the same kind of index ordering as was used for the + raveling. + + >>> np.reshape(a, (2, 3)) # C-like index ordering + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering + array([[0, 4, 3], + [2, 1, 5]]) + >>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F') + array([[0, 4, 3], + [2, 1, 5]]) + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1,2,3], [4,5,6]]) + >>> np.reshape(a, 6) + array([1, 2, 3, 4, 5, 6]) + >>> np.reshape(a, 6, order='F') + array([1, 4, 2, 5, 3, 6]) + + >>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2 + array([[1, 2], + [3, 4], + [5, 6]]) + """ + if newshape is None and shape is None: + raise TypeError( + "reshape() missing 1 required positional argument: 'shape'") + if newshape is not None: + if shape is not None: + raise TypeError( + "You cannot specify 'newshape' and 'shape' arguments " + "at the same time.") + # Deprecated in NumPy 2.1, 2024-04-18 + warnings.warn( + "`newshape` keyword argument is deprecated, " + "use `shape=...` or pass shape positionally instead. " + "(deprecated in NumPy 2.1)", + DeprecationWarning, + stacklevel=2, + ) + shape = newshape + if copy is not None: + return _wrapfunc(a, 'reshape', shape, order=order, copy=copy) + return _wrapfunc(a, 'reshape', shape, order=order) + + +def _choose_dispatcher(a, choices, out=None, mode=None): + yield a + yield from choices + yield out + + +@array_function_dispatch(_choose_dispatcher) +def choose(a, choices, out=None, mode='raise'): + """ + Construct an array from an index array and a list of arrays to choose from. + + First of all, if confused or uncertain, definitely look at the Examples - + in its full generality, this function is less simple than it might + seem from the following code description:: + + np.choose(a,c) == np.array([c[a[I]][I] for I in np.ndindex(a.shape)]) + + But this omits some subtleties. Here is a fully general summary: + + Given an "index" array (`a`) of integers and a sequence of ``n`` arrays + (`choices`), `a` and each choice array are first broadcast, as necessary, + to arrays of a common shape; calling these *Ba* and *Bchoices[i], i = + 0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape`` + for each ``i``. Then, a new array with shape ``Ba.shape`` is created as + follows: + + * if ``mode='raise'`` (the default), then, first of all, each element of + ``a`` (and thus ``Ba``) must be in the range ``[0, n-1]``; now, suppose + that ``i`` (in that range) is the value at the ``(j0, j1, ..., jm)`` + position in ``Ba`` - then the value at the same position in the new array + is the value in ``Bchoices[i]`` at that same position; + + * if ``mode='wrap'``, values in `a` (and thus `Ba`) may be any (signed) + integer; modular arithmetic is used to map integers outside the range + `[0, n-1]` back into that range; and then the new array is constructed + as above; + + * if ``mode='clip'``, values in `a` (and thus ``Ba``) may be any (signed) + integer; negative integers are mapped to 0; values greater than ``n-1`` + are mapped to ``n-1``; and then the new array is constructed as above. + + Parameters + ---------- + a : int array + This array must contain integers in ``[0, n-1]``, where ``n`` is the + number of choices, unless ``mode=wrap`` or ``mode=clip``, in which + cases any integers are permissible. + choices : sequence of arrays + Choice arrays. `a` and all of the choices must be broadcastable to the + same shape. If `choices` is itself an array (not recommended), then + its outermost dimension (i.e., the one corresponding to + ``choices.shape[0]``) is taken as defining the "sequence". + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. Note that `out` is always + buffered if ``mode='raise'``; use other modes for better performance. + mode : {'raise' (default), 'wrap', 'clip'}, optional + Specifies how indices outside ``[0, n-1]`` will be treated: + + * 'raise' : an exception is raised + * 'wrap' : value becomes value mod ``n`` + * 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1 + + Returns + ------- + merged_array : array + The merged result. + + Raises + ------ + ValueError: shape mismatch + If `a` and each choice array are not all broadcastable to the same + shape. + + See Also + -------- + ndarray.choose : equivalent method + numpy.take_along_axis : Preferable if `choices` is an array + + Notes + ----- + To reduce the chance of misinterpretation, even though the following + "abuse" is nominally supported, `choices` should neither be, nor be + thought of as, a single array, i.e., the outermost sequence-like container + should be either a list or a tuple. + + Examples + -------- + + >>> import numpy as np + >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], + ... [20, 21, 22, 23], [30, 31, 32, 33]] + >>> np.choose([2, 3, 1, 0], choices + ... # the first element of the result will be the first element of the + ... # third (2+1) "array" in choices, namely, 20; the second element + ... # will be the second element of the fourth (3+1) choice array, i.e., + ... # 31, etc. + ... ) + array([20, 31, 12, 3]) + >>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1) + array([20, 31, 12, 3]) + >>> # because there are 4 choice arrays + >>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4) + array([20, 1, 12, 3]) + >>> # i.e., 0 + + A couple examples illustrating how choose broadcasts: + + >>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]] + >>> choices = [-10, 10] + >>> np.choose(a, choices) + array([[ 10, -10, 10], + [-10, 10, -10], + [ 10, -10, 10]]) + + >>> # With thanks to Anne Archibald + >>> a = np.array([0, 1]).reshape((2,1,1)) + >>> c1 = np.array([1, 2, 3]).reshape((1,3,1)) + >>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5)) + >>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2 + array([[[ 1, 1, 1, 1, 1], + [ 2, 2, 2, 2, 2], + [ 3, 3, 3, 3, 3]], + [[-1, -2, -3, -4, -5], + [-1, -2, -3, -4, -5], + [-1, -2, -3, -4, -5]]]) + + """ + return _wrapfunc(a, 'choose', choices, out=out, mode=mode) + + +def _repeat_dispatcher(a, repeats, axis=None): + return (a,) + + +@array_function_dispatch(_repeat_dispatcher) +def repeat(a, repeats, axis=None): + """ + Repeat each element of an array after themselves + + Parameters + ---------- + a : array_like + Input array. + repeats : int or array of ints + The number of repetitions for each element. `repeats` is broadcasted + to fit the shape of the given axis. + axis : int, optional + The axis along which to repeat values. By default, use the + flattened input array, and return a flat output array. + + Returns + ------- + repeated_array : ndarray + Output array which has the same shape as `a`, except along + the given axis. + + See Also + -------- + tile : Tile an array. + unique : Find the unique elements of an array. + + Examples + -------- + >>> import numpy as np + >>> np.repeat(3, 4) + array([3, 3, 3, 3]) + >>> x = np.array([[1,2],[3,4]]) + >>> np.repeat(x, 2) + array([1, 1, 2, 2, 3, 3, 4, 4]) + >>> np.repeat(x, 3, axis=1) + array([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 4, 4, 4]]) + >>> np.repeat(x, [1, 2], axis=0) + array([[1, 2], + [3, 4], + [3, 4]]) + + """ + return _wrapfunc(a, 'repeat', repeats, axis=axis) + + +def _put_dispatcher(a, ind, v, mode=None): + return (a, ind, v) + + +@array_function_dispatch(_put_dispatcher) +def put(a, ind, v, mode='raise'): + """ + Replaces specified elements of an array with given values. + + The indexing works on the flattened target array. `put` is roughly + equivalent to: + + :: + + a.flat[ind] = v + + Parameters + ---------- + a : ndarray + Target array. + ind : array_like + Target indices, interpreted as integers. + v : array_like + Values to place in `a` at target indices. If `v` is shorter than + `ind` it will be repeated as necessary. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + + * 'raise' -- raise an error (default) + * 'wrap' -- wrap around + * 'clip' -- clip to the range + + 'clip' mode means that all indices that are too large are replaced + by the index that addresses the last element along that axis. Note + that this disables indexing with negative numbers. In 'raise' mode, + if an exception occurs the target array may still be modified. + + See Also + -------- + putmask, place + put_along_axis : Put elements by matching the array and the index arrays + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(5) + >>> np.put(a, [0, 2], [-44, -55]) + >>> a + array([-44, 1, -55, 3, 4]) + + >>> a = np.arange(5) + >>> np.put(a, 22, -5, mode='clip') + >>> a + array([ 0, 1, 2, 3, -5]) + + """ + try: + put = a.put + except AttributeError as e: + raise TypeError(f"argument 1 must be numpy.ndarray, not {type(a)}") from e + + return put(ind, v, mode=mode) + + +def _swapaxes_dispatcher(a, axis1, axis2): + return (a,) + + +@array_function_dispatch(_swapaxes_dispatcher) +def swapaxes(a, axis1, axis2): + """ + Interchange two axes of an array. + + Parameters + ---------- + a : array_like + Input array. + axis1 : int + First axis. + axis2 : int + Second axis. + + Returns + ------- + a_swapped : ndarray + For NumPy >= 1.10.0, if `a` is an ndarray, then a view of `a` is + returned; otherwise a new array is created. For earlier NumPy + versions a view of `a` is returned only if the order of the + axes is changed, otherwise the input array is returned. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([[1,2,3]]) + >>> np.swapaxes(x,0,1) + array([[1], + [2], + [3]]) + + >>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]]) + >>> x + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + + >>> np.swapaxes(x,0,2) + array([[[0, 4], + [2, 6]], + [[1, 5], + [3, 7]]]) + + """ + return _wrapfunc(a, 'swapaxes', axis1, axis2) + + +def _transpose_dispatcher(a, axes=None): + return (a,) + + +@array_function_dispatch(_transpose_dispatcher) +def transpose(a, axes=None): + """ + Returns an array with axes transposed. + + For a 1-D array, this returns an unchanged view of the original array, as a + transposed vector is simply the same vector. + To convert a 1-D array into a 2-D column vector, an additional dimension + must be added, e.g., ``np.atleast_2d(a).T`` achieves this, as does + ``a[:, np.newaxis]``. + For a 2-D array, this is the standard matrix transpose. + For an n-D array, if axes are given, their order indicates how the + axes are permuted (see Examples). If axes are not provided, then + ``transpose(a).shape == a.shape[::-1]``. + + Parameters + ---------- + a : array_like + Input array. + axes : tuple or list of ints, optional + If specified, it must be a tuple or list which contains a permutation + of [0, 1, ..., N-1] where N is the number of axes of `a`. Negative + indices can also be used to specify axes. The i-th axis of the returned + array will correspond to the axis numbered ``axes[i]`` of the input. + If not specified, defaults to ``range(a.ndim)[::-1]``, which reverses + the order of the axes. + + Returns + ------- + p : ndarray + `a` with its axes permuted. A view is returned whenever possible. + + See Also + -------- + ndarray.transpose : Equivalent method. + moveaxis : Move axes of an array to new positions. + argsort : Return the indices that would sort an array. + + Notes + ----- + Use ``transpose(a, argsort(axes))`` to invert the transposition of tensors + when using the `axes` keyword argument. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> a + array([[1, 2], + [3, 4]]) + >>> np.transpose(a) + array([[1, 3], + [2, 4]]) + + >>> a = np.array([1, 2, 3, 4]) + >>> a + array([1, 2, 3, 4]) + >>> np.transpose(a) + array([1, 2, 3, 4]) + + >>> a = np.ones((1, 2, 3)) + >>> np.transpose(a, (1, 0, 2)).shape + (2, 1, 3) + + >>> a = np.ones((2, 3, 4, 5)) + >>> np.transpose(a).shape + (5, 4, 3, 2) + + >>> a = np.arange(3*4*5).reshape((3, 4, 5)) + >>> np.transpose(a, (-1, 0, -2)).shape + (5, 3, 4) + + """ + return _wrapfunc(a, 'transpose', axes) + + +def _matrix_transpose_dispatcher(x): + return (x,) + +@array_function_dispatch(_matrix_transpose_dispatcher) +def matrix_transpose(x, /): + """ + Transposes a matrix (or a stack of matrices) ``x``. + + This function is Array API compatible. + + Parameters + ---------- + x : array_like + Input array having shape (..., M, N) and whose two innermost + dimensions form ``MxN`` matrices. + + Returns + ------- + out : ndarray + An array containing the transpose for each matrix and having shape + (..., N, M). + + See Also + -------- + transpose : Generic transpose method. + + Examples + -------- + >>> import numpy as np + >>> np.matrix_transpose([[1, 2], [3, 4]]) + array([[1, 3], + [2, 4]]) + + >>> np.matrix_transpose([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) + array([[[1, 3], + [2, 4]], + [[5, 7], + [6, 8]]]) + + """ + x = asanyarray(x) + if x.ndim < 2: + raise ValueError( + f"Input array must be at least 2-dimensional, but it is {x.ndim}" + ) + return swapaxes(x, -1, -2) + + +def _partition_dispatcher(a, kth, axis=None, kind=None, order=None): + return (a,) + + +@array_function_dispatch(_partition_dispatcher) +def partition(a, kth, axis=-1, kind='introselect', order=None): + """ + Return a partitioned copy of an array. + + Creates a copy of the array and partially sorts it in such a way that + the value of the element in k-th position is in the position it would be + in a sorted array. In the output array, all elements smaller than the k-th + element are located to the left of this element and all equal or greater + are located to its right. The ordering of the elements in the two + partitions on the either side of the k-th element in the output array is + undefined. + + Parameters + ---------- + a : array_like + Array to be sorted. + kth : int or sequence of ints + Element index to partition by. The k-th value of the element + will be in its final sorted position and all smaller elements + will be moved before it and all equal or greater elements behind + it. The order of all elements in the partitions is undefined. If + provided with a sequence of k-th it will partition all elements + indexed by k-th of them into their sorted position at once. + + .. deprecated:: 1.22.0 + Passing booleans as index is deprecated. + axis : int or None, optional + Axis along which to sort. If None, the array is flattened before + sorting. The default is -1, which sorts along the last axis. + kind : {'introselect'}, optional + Selection algorithm. Default is 'introselect'. + order : str or list of str, optional + When `a` is an array with fields defined, this argument + specifies which fields to compare first, second, etc. A single + field can be specified as a string. Not all fields need be + specified, but unspecified fields will still be used, in the + order in which they come up in the dtype, to break ties. + + Returns + ------- + partitioned_array : ndarray + Array of the same type and shape as `a`. + + See Also + -------- + ndarray.partition : Method to sort an array in-place. + argpartition : Indirect partition. + sort : Full sorting + + Notes + ----- + The various selection algorithms are characterized by their average + speed, worst case performance, work space size, and whether they are + stable. A stable sort keeps items with the same key in the same + relative order. The available algorithms have the following + properties: + + ================= ======= ============= ============ ======= + kind speed worst case work space stable + ================= ======= ============= ============ ======= + 'introselect' 1 O(n) 0 no + ================= ======= ============= ============ ======= + + All the partition algorithms make temporary copies of the data when + partitioning along any but the last axis. Consequently, + partitioning along the last axis is faster and uses less space than + partitioning along any other axis. + + The sort order for complex numbers is lexicographic. If both the + real and imaginary parts are non-nan then the order is determined by + the real parts except when they are equal, in which case the order + is determined by the imaginary parts. + + The sort order of ``np.nan`` is bigger than ``np.inf``. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([7, 1, 7, 7, 1, 5, 7, 2, 3, 2, 6, 2, 3, 0]) + >>> p = np.partition(a, 4) + >>> p + array([0, 1, 2, 1, 2, 5, 2, 3, 3, 6, 7, 7, 7, 7]) # may vary + + ``p[4]`` is 2; all elements in ``p[:4]`` are less than or equal + to ``p[4]``, and all elements in ``p[5:]`` are greater than or + equal to ``p[4]``. The partition is:: + + [0, 1, 2, 1], [2], [5, 2, 3, 3, 6, 7, 7, 7, 7] + + The next example shows the use of multiple values passed to `kth`. + + >>> p2 = np.partition(a, (4, 8)) + >>> p2 + array([0, 1, 2, 1, 2, 3, 3, 2, 5, 6, 7, 7, 7, 7]) + + ``p2[4]`` is 2 and ``p2[8]`` is 5. All elements in ``p2[:4]`` + are less than or equal to ``p2[4]``, all elements in ``p2[5:8]`` + are greater than or equal to ``p2[4]`` and less than or equal to + ``p2[8]``, and all elements in ``p2[9:]`` are greater than or + equal to ``p2[8]``. The partition is:: + + [0, 1, 2, 1], [2], [3, 3, 2], [5], [6, 7, 7, 7, 7] + """ + if axis is None: + # flatten returns (1, N) for np.matrix, so always use the last axis + a = asanyarray(a).flatten() + axis = -1 + else: + a = asanyarray(a).copy(order="K") + a.partition(kth, axis=axis, kind=kind, order=order) + return a + + +def _argpartition_dispatcher(a, kth, axis=None, kind=None, order=None): + return (a,) + + +@array_function_dispatch(_argpartition_dispatcher) +def argpartition(a, kth, axis=-1, kind='introselect', order=None): + """ + Perform an indirect partition along the given axis using the + algorithm specified by the `kind` keyword. It returns an array of + indices of the same shape as `a` that index data along the given + axis in partitioned order. + + Parameters + ---------- + a : array_like + Array to sort. + kth : int or sequence of ints + Element index to partition by. The k-th element will be in its + final sorted position and all smaller elements will be moved + before it and all larger elements behind it. The order of all + elements in the partitions is undefined. If provided with a + sequence of k-th it will partition all of them into their sorted + position at once. + + .. deprecated:: 1.22.0 + Passing booleans as index is deprecated. + axis : int or None, optional + Axis along which to sort. The default is -1 (the last axis). If + None, the flattened array is used. + kind : {'introselect'}, optional + Selection algorithm. Default is 'introselect' + order : str or list of str, optional + When `a` is an array with fields defined, this argument + specifies which fields to compare first, second, etc. A single + field can be specified as a string, and not all fields need be + specified, but unspecified fields will still be used, in the + order in which they come up in the dtype, to break ties. + + Returns + ------- + index_array : ndarray, int + Array of indices that partition `a` along the specified axis. + If `a` is one-dimensional, ``a[index_array]`` yields a partitioned `a`. + More generally, ``np.take_along_axis(a, index_array, axis=axis)`` + always yields the partitioned `a`, irrespective of dimensionality. + + See Also + -------- + partition : Describes partition algorithms used. + ndarray.partition : Inplace partition. + argsort : Full indirect sort. + take_along_axis : Apply ``index_array`` from argpartition + to an array as if by calling partition. + + Notes + ----- + The returned indices are not guaranteed to be sorted according to + the values. Furthermore, the default selection algorithm ``introselect`` + is unstable, and hence the returned indices are not guaranteed + to be the earliest/latest occurrence of the element. + + `argpartition` works for real/complex inputs with nan values, + see `partition` for notes on the enhanced sort order and + different selection algorithms. + + Examples + -------- + One dimensional array: + + >>> import numpy as np + >>> x = np.array([3, 4, 2, 1]) + >>> x[np.argpartition(x, 3)] + array([2, 1, 3, 4]) # may vary + >>> x[np.argpartition(x, (1, 3))] + array([1, 2, 3, 4]) # may vary + + >>> x = [3, 4, 2, 1] + >>> np.array(x)[np.argpartition(x, 3)] + array([2, 1, 3, 4]) # may vary + + Multi-dimensional array: + + >>> x = np.array([[3, 4, 2], [1, 3, 1]]) + >>> index_array = np.argpartition(x, kth=1, axis=-1) + >>> # below is the same as np.partition(x, kth=1) + >>> np.take_along_axis(x, index_array, axis=-1) + array([[2, 3, 4], + [1, 1, 3]]) + + """ + return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order) + + +def _sort_dispatcher(a, axis=None, kind=None, order=None, *, stable=None): + return (a,) + + +@array_function_dispatch(_sort_dispatcher) +def sort(a, axis=-1, kind=None, order=None, *, stable=None): + """ + Return a sorted copy of an array. + + Parameters + ---------- + a : array_like + Array to be sorted. + axis : int or None, optional + Axis along which to sort. If None, the array is flattened before + sorting. The default is -1, which sorts along the last axis. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + Sorting algorithm. The default is 'quicksort'. Note that both 'stable' + and 'mergesort' use timsort or radix sort under the covers and, + in general, the actual implementation will vary with data type. + The 'mergesort' option is retained for backwards compatibility. + order : str or list of str, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. A single field can + be specified as a string, and not all fields need be specified, + but unspecified fields will still be used, in the order in which + they come up in the dtype, to break ties. + stable : bool, optional + Sort stability. If ``True``, the returned array will maintain + the relative order of ``a`` values which compare as equal. + If ``False`` or ``None``, this is not guaranteed. Internally, + this option selects ``kind='stable'``. Default: ``None``. + + .. versionadded:: 2.0.0 + + Returns + ------- + sorted_array : ndarray + Array of the same type and shape as `a`. + + See Also + -------- + ndarray.sort : Method to sort an array in-place. + argsort : Indirect sort. + lexsort : Indirect stable sort on multiple keys. + searchsorted : Find elements in a sorted array. + partition : Partial sort. + + Notes + ----- + The various sorting algorithms are characterized by their average speed, + worst case performance, work space size, and whether they are stable. A + stable sort keeps items with the same key in the same relative + order. The four algorithms implemented in NumPy have the following + properties: + + =========== ======= ============= ============ ======== + kind speed worst case work space stable + =========== ======= ============= ============ ======== + 'quicksort' 1 O(n^2) 0 no + 'heapsort' 3 O(n*log(n)) 0 no + 'mergesort' 2 O(n*log(n)) ~n/2 yes + 'timsort' 2 O(n*log(n)) ~n/2 yes + =========== ======= ============= ============ ======== + + .. note:: The datatype determines which of 'mergesort' or 'timsort' + is actually used, even if 'mergesort' is specified. User selection + at a finer scale is not currently available. + + For performance, ``sort`` makes a temporary copy if needed to make the data + `contiguous `_ + in memory along the sort axis. For even better performance and reduced + memory consumption, ensure that the array is already contiguous along the + sort axis. + + The sort order for complex numbers is lexicographic. If both the real + and imaginary parts are non-nan then the order is determined by the + real parts except when they are equal, in which case the order is + determined by the imaginary parts. + + Previous to numpy 1.4.0 sorting real and complex arrays containing nan + values led to undefined behaviour. In numpy versions >= 1.4.0 nan + values are sorted to the end. The extended sort order is: + + * Real: [R, nan] + * Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj] + + where R is a non-nan real value. Complex values with the same nan + placements are sorted according to the non-nan part if it exists. + Non-nan values are sorted as before. + + quicksort has been changed to: + `introsort `_. + When sorting does not make enough progress it switches to + `heapsort `_. + This implementation makes quicksort O(n*log(n)) in the worst case. + + 'stable' automatically chooses the best stable sorting algorithm + for the data type being sorted. + It, along with 'mergesort' is currently mapped to + `timsort `_ + or `radix sort `_ + depending on the data type. + API forward compatibility currently limits the + ability to select the implementation and it is hardwired for the different + data types. + + Timsort is added for better performance on already or nearly + sorted data. On random data timsort is almost identical to + mergesort. It is now used for stable sort while quicksort is still the + default sort if none is chosen. For timsort details, refer to + `CPython listsort.txt + `_ + 'mergesort' and 'stable' are mapped to radix sort for integer data types. + Radix sort is an O(n) sort instead of O(n log n). + + NaT now sorts to the end of arrays for consistency with NaN. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1,4],[3,1]]) + >>> np.sort(a) # sort along the last axis + array([[1, 4], + [1, 3]]) + >>> np.sort(a, axis=None) # sort the flattened array + array([1, 1, 3, 4]) + >>> np.sort(a, axis=0) # sort along the first axis + array([[1, 1], + [3, 4]]) + + Use the `order` keyword to specify a field to use when sorting a + structured array: + + >>> dtype = [('name', 'S10'), ('height', float), ('age', int)] + >>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38), + ... ('Galahad', 1.7, 38)] + >>> a = np.array(values, dtype=dtype) # create a structured array + >>> np.sort(a, order='height') # doctest: +SKIP + array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), + ('Lancelot', 1.8999999999999999, 38)], + dtype=[('name', '|S10'), ('height', '>> np.sort(a, order=['age', 'height']) # doctest: +SKIP + array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38), + ('Arthur', 1.8, 41)], + dtype=[('name', '|S10'), ('height', '>> import numpy as np + >>> x = np.array([3, 1, 2]) + >>> np.argsort(x) + array([1, 2, 0]) + + Two-dimensional array: + + >>> x = np.array([[0, 3], [2, 2]]) + >>> x + array([[0, 3], + [2, 2]]) + + >>> ind = np.argsort(x, axis=0) # sorts along first axis (down) + >>> ind + array([[0, 1], + [1, 0]]) + >>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0) + array([[0, 2], + [2, 3]]) + + >>> ind = np.argsort(x, axis=1) # sorts along last axis (across) + >>> ind + array([[0, 1], + [0, 1]]) + >>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1) + array([[0, 3], + [2, 2]]) + + Indices of the sorted elements of a N-dimensional array: + + >>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape) + >>> ind + (array([0, 1, 1, 0]), array([0, 0, 1, 1])) + >>> x[ind] # same as np.sort(x, axis=None) + array([0, 2, 2, 3]) + + Sorting with keys: + + >>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '>> x + array([(1, 0), (0, 1)], + dtype=[('x', '>> np.argsort(x, order=('x','y')) + array([1, 0]) + + >>> np.argsort(x, order=('y','x')) + array([0, 1]) + + """ + return _wrapfunc( + a, 'argsort', axis=axis, kind=kind, order=order, stable=stable + ) + +def _argmax_dispatcher(a, axis=None, out=None, *, keepdims=np._NoValue): + return (a, out) + + +@array_function_dispatch(_argmax_dispatcher) +def argmax(a, axis=None, out=None, *, keepdims=np._NoValue): + """ + Returns the indices of the maximum values along an axis. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + By default, the index is into the flattened array, otherwise + along the specified axis. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + .. versionadded:: 1.22.0 + + Returns + ------- + index_array : ndarray of ints + Array of indices into the array. It has the same shape as ``a.shape`` + with the dimension along `axis` removed. If `keepdims` is set to True, + then the size of `axis` will be 1 with the resulting array having same + shape as ``a.shape``. + + See Also + -------- + ndarray.argmax, argmin + amax : The maximum value along a given axis. + unravel_index : Convert a flat index into an index tuple. + take_along_axis : Apply ``np.expand_dims(index_array, axis)`` + from argmax to an array as if by calling max. + + Notes + ----- + In case of multiple occurrences of the maximum values, the indices + corresponding to the first occurrence are returned. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(6).reshape(2,3) + 10 + >>> a + array([[10, 11, 12], + [13, 14, 15]]) + >>> np.argmax(a) + 5 + >>> np.argmax(a, axis=0) + array([1, 1, 1]) + >>> np.argmax(a, axis=1) + array([2, 2]) + + Indexes of the maximal elements of a N-dimensional array: + + >>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape) + >>> ind + (1, 2) + >>> a[ind] + 15 + + >>> b = np.arange(6) + >>> b[1] = 5 + >>> b + array([0, 5, 2, 3, 4, 5]) + >>> np.argmax(b) # Only the first occurrence is returned. + 1 + + >>> x = np.array([[4,2,3], [1,0,3]]) + >>> index_array = np.argmax(x, axis=-1) + >>> # Same as np.amax(x, axis=-1, keepdims=True) + >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1) + array([[4], + [3]]) + >>> # Same as np.amax(x, axis=-1) + >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), + ... axis=-1).squeeze(axis=-1) + array([4, 3]) + + Setting `keepdims` to `True`, + + >>> x = np.arange(24).reshape((2, 3, 4)) + >>> res = np.argmax(x, axis=1, keepdims=True) + >>> res.shape + (2, 1, 4) + """ + kwds = {'keepdims': keepdims} if keepdims is not np._NoValue else {} + return _wrapfunc(a, 'argmax', axis=axis, out=out, **kwds) + + +def _argmin_dispatcher(a, axis=None, out=None, *, keepdims=np._NoValue): + return (a, out) + + +@array_function_dispatch(_argmin_dispatcher) +def argmin(a, axis=None, out=None, *, keepdims=np._NoValue): + """ + Returns the indices of the minimum values along an axis. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + By default, the index is into the flattened array, otherwise + along the specified axis. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + .. versionadded:: 1.22.0 + + Returns + ------- + index_array : ndarray of ints + Array of indices into the array. It has the same shape as `a.shape` + with the dimension along `axis` removed. If `keepdims` is set to True, + then the size of `axis` will be 1 with the resulting array having same + shape as `a.shape`. + + See Also + -------- + ndarray.argmin, argmax + amin : The minimum value along a given axis. + unravel_index : Convert a flat index into an index tuple. + take_along_axis : Apply ``np.expand_dims(index_array, axis)`` + from argmin to an array as if by calling min. + + Notes + ----- + In case of multiple occurrences of the minimum values, the indices + corresponding to the first occurrence are returned. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(6).reshape(2,3) + 10 + >>> a + array([[10, 11, 12], + [13, 14, 15]]) + >>> np.argmin(a) + 0 + >>> np.argmin(a, axis=0) + array([0, 0, 0]) + >>> np.argmin(a, axis=1) + array([0, 0]) + + Indices of the minimum elements of a N-dimensional array: + + >>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape) + >>> ind + (0, 0) + >>> a[ind] + 10 + + >>> b = np.arange(6) + 10 + >>> b[4] = 10 + >>> b + array([10, 11, 12, 13, 10, 15]) + >>> np.argmin(b) # Only the first occurrence is returned. + 0 + + >>> x = np.array([[4,2,3], [1,0,3]]) + >>> index_array = np.argmin(x, axis=-1) + >>> # Same as np.amin(x, axis=-1, keepdims=True) + >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1) + array([[2], + [0]]) + >>> # Same as np.amax(x, axis=-1) + >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), + ... axis=-1).squeeze(axis=-1) + array([2, 0]) + + Setting `keepdims` to `True`, + + >>> x = np.arange(24).reshape((2, 3, 4)) + >>> res = np.argmin(x, axis=1, keepdims=True) + >>> res.shape + (2, 1, 4) + """ + kwds = {'keepdims': keepdims} if keepdims is not np._NoValue else {} + return _wrapfunc(a, 'argmin', axis=axis, out=out, **kwds) + + +def _searchsorted_dispatcher(a, v, side=None, sorter=None): + return (a, v, sorter) + + +@array_function_dispatch(_searchsorted_dispatcher) +def searchsorted(a, v, side='left', sorter=None): + """ + Find indices where elements should be inserted to maintain order. + + Find the indices into a sorted array `a` such that, if the + corresponding elements in `v` were inserted before the indices, the + order of `a` would be preserved. + + Assuming that `a` is sorted: + + ====== ============================ + `side` returned index `i` satisfies + ====== ============================ + left ``a[i-1] < v <= a[i]`` + right ``a[i-1] <= v < a[i]`` + ====== ============================ + + Parameters + ---------- + a : 1-D array_like + Input array. If `sorter` is None, then it must be sorted in + ascending order, otherwise `sorter` must be an array of indices + that sort it. + v : array_like + Values to insert into `a`. + side : {'left', 'right'}, optional + If 'left', the index of the first suitable location found is given. + If 'right', return the last such index. If there is no suitable + index, return either 0 or N (where N is the length of `a`). + sorter : 1-D array_like, optional + Optional array of integer indices that sort array a into ascending + order. They are typically the result of argsort. + + Returns + ------- + indices : int or array of ints + Array of insertion points with the same shape as `v`, + or an integer if `v` is a scalar. + + See Also + -------- + sort : Return a sorted copy of an array. + histogram : Produce histogram from 1-D data. + + Notes + ----- + Binary search is used to find the required insertion points. + + As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing + `nan` values. The enhanced sort order is documented in `sort`. + + This function uses the same algorithm as the builtin python + `bisect.bisect_left` (``side='left'``) and `bisect.bisect_right` + (``side='right'``) functions, which is also vectorized + in the `v` argument. + + Examples + -------- + >>> import numpy as np + >>> np.searchsorted([11,12,13,14,15], 13) + 2 + >>> np.searchsorted([11,12,13,14,15], 13, side='right') + 3 + >>> np.searchsorted([11,12,13,14,15], [-10, 20, 12, 13]) + array([0, 5, 1, 2]) + + When `sorter` is used, the returned indices refer to the sorted + array of `a` and not `a` itself: + + >>> a = np.array([40, 10, 20, 30]) + >>> sorter = np.argsort(a) + >>> sorter + array([1, 2, 3, 0]) # Indices that would sort the array 'a' + >>> result = np.searchsorted(a, 25, sorter=sorter) + >>> result + 2 + >>> a[sorter[result]] + 30 # The element at index 2 of the sorted array is 30. + """ + return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter) + + +def _resize_dispatcher(a, new_shape): + return (a,) + + +@array_function_dispatch(_resize_dispatcher) +def resize(a, new_shape): + """ + Return a new array with the specified shape. + + If the new array is larger than the original array, then the new + array is filled with repeated copies of `a`. Note that this behavior + is different from a.resize(new_shape) which fills with zeros instead + of repeated copies of `a`. + + Parameters + ---------- + a : array_like + Array to be resized. + + new_shape : int or tuple of int + Shape of resized array. + + Returns + ------- + reshaped_array : ndarray + The new array is formed from the data in the old array, repeated + if necessary to fill out the required number of elements. The + data are repeated iterating over the array in C-order. + + See Also + -------- + numpy.reshape : Reshape an array without changing the total size. + numpy.pad : Enlarge and pad an array. + numpy.repeat : Repeat elements of an array. + ndarray.resize : resize an array in-place. + + Notes + ----- + When the total size of the array does not change `~numpy.reshape` should + be used. In most other cases either indexing (to reduce the size) + or padding (to increase the size) may be a more appropriate solution. + + Warning: This functionality does **not** consider axes separately, + i.e. it does not apply interpolation/extrapolation. + It fills the return array with the required number of elements, iterating + over `a` in C-order, disregarding axes (and cycling back from the start if + the new shape is larger). This functionality is therefore not suitable to + resize images, or data where each axis represents a separate and distinct + entity. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[0,1],[2,3]]) + >>> np.resize(a,(2,3)) + array([[0, 1, 2], + [3, 0, 1]]) + >>> np.resize(a,(1,4)) + array([[0, 1, 2, 3]]) + >>> np.resize(a,(2,4)) + array([[0, 1, 2, 3], + [0, 1, 2, 3]]) + + """ + if isinstance(new_shape, (int, nt.integer)): + new_shape = (new_shape,) + + a = ravel(a) + + new_size = 1 + for dim_length in new_shape: + new_size *= dim_length + if dim_length < 0: + raise ValueError( + 'all elements of `new_shape` must be non-negative' + ) + + if a.size == 0 or new_size == 0: + # First case must zero fill. The second would have repeats == 0. + return np.zeros_like(a, shape=new_shape) + + # ceiling division without negating new_size + repeats = (new_size + a.size - 1) // a.size + a = concatenate((a,) * repeats)[:new_size] + + return reshape(a, new_shape) + + +def _squeeze_dispatcher(a, axis=None): + return (a,) + + +@array_function_dispatch(_squeeze_dispatcher) +def squeeze(a, axis=None): + """ + Remove axes of length one from `a`. + + Parameters + ---------- + a : array_like + Input data. + axis : None or int or tuple of ints, optional + Selects a subset of the entries of length one in the + shape. If an axis is selected with shape entry greater than + one, an error is raised. + + Returns + ------- + squeezed : ndarray + The input array, but with all or a subset of the + dimensions of length 1 removed. This is always `a` itself + or a view into `a`. Note that if all axes are squeezed, + the result is a 0d array and not a scalar. + + Raises + ------ + ValueError + If `axis` is not None, and an axis being squeezed is not of length 1 + + See Also + -------- + expand_dims : The inverse operation, adding entries of length one + reshape : Insert, remove, and combine dimensions, and resize existing ones + + Examples + -------- + >>> import numpy as np + >>> x = np.array([[[0], [1], [2]]]) + >>> x.shape + (1, 3, 1) + >>> np.squeeze(x).shape + (3,) + >>> np.squeeze(x, axis=0).shape + (3, 1) + >>> np.squeeze(x, axis=1).shape + Traceback (most recent call last): + ... + ValueError: cannot select an axis to squeeze out which has size + not equal to one + >>> np.squeeze(x, axis=2).shape + (1, 3) + >>> x = np.array([[1234]]) + >>> x.shape + (1, 1) + >>> np.squeeze(x) + array(1234) # 0d array + >>> np.squeeze(x).shape + () + >>> np.squeeze(x)[()] + 1234 + + """ + try: + squeeze = a.squeeze + except AttributeError: + return _wrapit(a, 'squeeze', axis=axis) + if axis is None: + return squeeze() + else: + return squeeze(axis=axis) + + +def _diagonal_dispatcher(a, offset=None, axis1=None, axis2=None): + return (a,) + + +@array_function_dispatch(_diagonal_dispatcher) +def diagonal(a, offset=0, axis1=0, axis2=1): + """ + Return specified diagonals. + + If `a` is 2-D, returns the diagonal of `a` with the given offset, + i.e., the collection of elements of the form ``a[i, i+offset]``. If + `a` has more than two dimensions, then the axes specified by `axis1` + and `axis2` are used to determine the 2-D sub-array whose diagonal is + returned. The shape of the resulting array can be determined by + removing `axis1` and `axis2` and appending an index to the right equal + to the size of the resulting diagonals. + + In versions of NumPy prior to 1.7, this function always returned a new, + independent array containing a copy of the values in the diagonal. + + In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal, + but depending on this fact is deprecated. Writing to the resulting + array continues to work as it used to, but a FutureWarning is issued. + + Starting in NumPy 1.9 it returns a read-only view on the original array. + Attempting to write to the resulting array will produce an error. + + In some future release, it will return a read/write view and writing to + the returned array will alter your original array. The returned array + will have the same type as the input array. + + If you don't write to the array returned by this function, then you can + just ignore all of the above. + + If you depend on the current behavior, then we suggest copying the + returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead + of just ``np.diagonal(a)``. This will work with both past and future + versions of NumPy. + + Parameters + ---------- + a : array_like + Array from which the diagonals are taken. + offset : int, optional + Offset of the diagonal from the main diagonal. Can be positive or + negative. Defaults to main diagonal (0). + axis1 : int, optional + Axis to be used as the first axis of the 2-D sub-arrays from which + the diagonals should be taken. Defaults to first axis (0). + axis2 : int, optional + Axis to be used as the second axis of the 2-D sub-arrays from + which the diagonals should be taken. Defaults to second axis (1). + + Returns + ------- + array_of_diagonals : ndarray + If `a` is 2-D, then a 1-D array containing the diagonal and of the + same type as `a` is returned unless `a` is a `matrix`, in which case + a 1-D array rather than a (2-D) `matrix` is returned in order to + maintain backward compatibility. + + If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2` + are removed, and a new axis inserted at the end corresponding to the + diagonal. + + Raises + ------ + ValueError + If the dimension of `a` is less than 2. + + See Also + -------- + diag : MATLAB work-a-like for 1-D and 2-D arrays. + diagflat : Create diagonal arrays. + trace : Sum along diagonals. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(4).reshape(2,2) + >>> a + array([[0, 1], + [2, 3]]) + >>> a.diagonal() + array([0, 3]) + >>> a.diagonal(1) + array([1]) + + A 3-D example: + + >>> a = np.arange(8).reshape(2,2,2); a + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> a.diagonal(0, # Main diagonals of two arrays created by skipping + ... 0, # across the outer(left)-most axis last and + ... 1) # the "middle" (row) axis first. + array([[0, 6], + [1, 7]]) + + The sub-arrays whose main diagonals we just obtained; note that each + corresponds to fixing the right-most (column) axis, and that the + diagonals are "packed" in rows. + + >>> a[:,:,0] # main diagonal is [0 6] + array([[0, 2], + [4, 6]]) + >>> a[:,:,1] # main diagonal is [1 7] + array([[1, 3], + [5, 7]]) + + The anti-diagonal can be obtained by reversing the order of elements + using either `numpy.flipud` or `numpy.fliplr`. + + >>> a = np.arange(9).reshape(3, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> np.fliplr(a).diagonal() # Horizontal flip + array([2, 4, 6]) + >>> np.flipud(a).diagonal() # Vertical flip + array([6, 4, 2]) + + Note that the order in which the diagonal is retrieved varies depending + on the flip function. + """ + if isinstance(a, np.matrix): + # Make diagonal of matrix 1-D to preserve backward compatibility. + return asarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2) + else: + return asanyarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2) + + +def _trace_dispatcher( + a, offset=None, axis1=None, axis2=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_trace_dispatcher) +def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None): + """ + Return the sum along diagonals of the array. + + If `a` is 2-D, the sum along its diagonal with the given offset + is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i. + + If `a` has more than two dimensions, then the axes specified by axis1 and + axis2 are used to determine the 2-D sub-arrays whose traces are returned. + The shape of the resulting array is the same as that of `a` with `axis1` + and `axis2` removed. + + Parameters + ---------- + a : array_like + Input array, from which the diagonals are taken. + offset : int, optional + Offset of the diagonal from the main diagonal. Can be both positive + and negative. Defaults to 0. + axis1, axis2 : int, optional + Axes to be used as the first and second axis of the 2-D sub-arrays + from which the diagonals should be taken. Defaults are the first two + axes of `a`. + dtype : dtype, optional + Determines the data-type of the returned array and of the accumulator + where the elements are summed. If dtype has the value None and `a` is + of integer type of precision less than the default integer + precision, then the default integer precision is used. Otherwise, + the precision is the same as that of `a`. + out : ndarray, optional + Array into which the output is placed. Its type is preserved and + it must be of the right shape to hold the output. + + Returns + ------- + sum_along_diagonals : ndarray + If `a` is 2-D, the sum along the diagonal is returned. If `a` has + larger dimensions, then an array of sums along diagonals is returned. + + See Also + -------- + diag, diagonal, diagflat + + Examples + -------- + >>> import numpy as np + >>> np.trace(np.eye(3)) + 3.0 + >>> a = np.arange(8).reshape((2,2,2)) + >>> np.trace(a) + array([6, 8]) + + >>> a = np.arange(24).reshape((2,2,2,3)) + >>> np.trace(a).shape + (2, 3) + + """ + if isinstance(a, np.matrix): + # Get trace of matrix via an array to preserve backward compatibility. + return asarray(a).trace( + offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out + ) + else: + return asanyarray(a).trace( + offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out + ) + + +def _ravel_dispatcher(a, order=None): + return (a,) + + +@array_function_dispatch(_ravel_dispatcher) +def ravel(a, order='C'): + """Return a contiguous flattened array. + + A 1-D array, containing the elements of the input, is returned. A copy is + made only if needed. + + As of NumPy 1.10, the returned array will have the same type as the input + array. (for example, a masked array will be returned for a masked array + input) + + Parameters + ---------- + a : array_like + Input array. The elements in `a` are read in the order specified by + `order`, and packed as a 1-D array. + order : {'C','F', 'A', 'K'}, optional + + The elements of `a` are read using this index order. 'C' means + to index the elements in row-major, C-style order, + with the last axis index changing fastest, back to the first + axis index changing slowest. 'F' means to index the elements + in column-major, Fortran-style order, with the + first index changing fastest, and the last index changing + slowest. Note that the 'C' and 'F' options take no account of + the memory layout of the underlying array, and only refer to + the order of axis indexing. 'A' means to read the elements in + Fortran-like index order if `a` is Fortran *contiguous* in + memory, C-like order otherwise. 'K' means to read the + elements in the order they occur in memory, except for + reversing the data when strides are negative. By default, 'C' + index order is used. + + Returns + ------- + y : array_like + y is a contiguous 1-D array of the same subtype as `a`, + with shape ``(a.size,)``. + Note that matrices are special cased for backward compatibility, + if `a` is a matrix, then y is a 1-D ndarray. + + See Also + -------- + ndarray.flat : 1-D iterator over an array. + ndarray.flatten : 1-D array copy of the elements of an array + in row-major order. + ndarray.reshape : Change the shape of an array without changing its data. + + Notes + ----- + In row-major, C-style order, in two dimensions, the row index + varies the slowest, and the column index the quickest. This can + be generalized to multiple dimensions, where row-major order + implies that the index along the first axis varies slowest, and + the index along the last quickest. The opposite holds for + column-major, Fortran-style index ordering. + + When a view is desired in as many cases as possible, ``arr.reshape(-1)`` + may be preferable. However, ``ravel`` supports ``K`` in the optional + ``order`` argument while ``reshape`` does not. + + Examples + -------- + It is equivalent to ``reshape(-1, order=order)``. + + >>> import numpy as np + >>> x = np.array([[1, 2, 3], [4, 5, 6]]) + >>> np.ravel(x) + array([1, 2, 3, 4, 5, 6]) + + >>> x.reshape(-1) + array([1, 2, 3, 4, 5, 6]) + + >>> np.ravel(x, order='F') + array([1, 4, 2, 5, 3, 6]) + + When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering: + + >>> np.ravel(x.T) + array([1, 4, 2, 5, 3, 6]) + >>> np.ravel(x.T, order='A') + array([1, 2, 3, 4, 5, 6]) + + When ``order`` is 'K', it will preserve orderings that are neither 'C' + nor 'F', but won't reverse axes: + + >>> a = np.arange(3)[::-1]; a + array([2, 1, 0]) + >>> a.ravel(order='C') + array([2, 1, 0]) + >>> a.ravel(order='K') + array([2, 1, 0]) + + >>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a + array([[[ 0, 2, 4], + [ 1, 3, 5]], + [[ 6, 8, 10], + [ 7, 9, 11]]]) + >>> a.ravel(order='C') + array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11]) + >>> a.ravel(order='K') + array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + + """ + if isinstance(a, np.matrix): + return asarray(a).ravel(order=order) + else: + return asanyarray(a).ravel(order=order) + + +def _nonzero_dispatcher(a): + return (a,) + + +@array_function_dispatch(_nonzero_dispatcher) +def nonzero(a): + """ + Return the indices of the elements that are non-zero. + + Returns a tuple of arrays, one for each dimension of `a`, + containing the indices of the non-zero elements in that + dimension. The values in `a` are always tested and returned in + row-major, C-style order. + + To group the indices by element, rather than dimension, use `argwhere`, + which returns a row for each non-zero element. + + .. note:: + + When called on a zero-d array or scalar, ``nonzero(a)`` is treated + as ``nonzero(atleast_1d(a))``. + + .. deprecated:: 1.17.0 + + Use `atleast_1d` explicitly if this behavior is deliberate. + + Parameters + ---------- + a : array_like + Input array. + + Returns + ------- + tuple_of_arrays : tuple + Indices of elements that are non-zero. + + See Also + -------- + flatnonzero : + Return indices that are non-zero in the flattened version of the input + array. + ndarray.nonzero : + Equivalent ndarray method. + count_nonzero : + Counts the number of non-zero elements in the input array. + + Notes + ----- + While the nonzero values can be obtained with ``a[nonzero(a)]``, it is + recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which + will correctly handle 0-d arrays. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]]) + >>> x + array([[3, 0, 0], + [0, 4, 0], + [5, 6, 0]]) + >>> np.nonzero(x) + (array([0, 1, 2, 2]), array([0, 1, 0, 1])) + + >>> x[np.nonzero(x)] + array([3, 4, 5, 6]) + >>> np.transpose(np.nonzero(x)) + array([[0, 0], + [1, 1], + [2, 0], + [2, 1]]) + + A common use for ``nonzero`` is to find the indices of an array, where + a condition is True. Given an array `a`, the condition `a` > 3 is a + boolean array and since False is interpreted as 0, np.nonzero(a > 3) + yields the indices of the `a` where the condition is true. + + >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> a > 3 + array([[False, False, False], + [ True, True, True], + [ True, True, True]]) + >>> np.nonzero(a > 3) + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + Using this result to index `a` is equivalent to using the mask directly: + + >>> a[np.nonzero(a > 3)] + array([4, 5, 6, 7, 8, 9]) + >>> a[a > 3] # prefer this spelling + array([4, 5, 6, 7, 8, 9]) + + ``nonzero`` can also be called as a method of the array. + + >>> (a > 3).nonzero() + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + """ + return _wrapfunc(a, 'nonzero') + + +def _shape_dispatcher(a): + return (a,) + + +@array_function_dispatch(_shape_dispatcher) +def shape(a): + """ + Return the shape of an array. + + Parameters + ---------- + a : array_like + Input array. + + Returns + ------- + shape : tuple of ints + The elements of the shape tuple give the lengths of the + corresponding array dimensions. + + See Also + -------- + len : ``len(a)`` is equivalent to ``np.shape(a)[0]`` for N-D arrays with + ``N>=1``. + ndarray.shape : Equivalent array method. + + Examples + -------- + >>> import numpy as np + >>> np.shape(np.eye(3)) + (3, 3) + >>> np.shape([[1, 3]]) + (1, 2) + >>> np.shape([0]) + (1,) + >>> np.shape(0) + () + + >>> a = np.array([(1, 2), (3, 4), (5, 6)], + ... dtype=[('x', 'i4'), ('y', 'i4')]) + >>> np.shape(a) + (3,) + >>> a.shape + (3,) + + """ + try: + result = a.shape + except AttributeError: + result = asarray(a).shape + return result + + +def _compress_dispatcher(condition, a, axis=None, out=None): + return (condition, a, out) + + +@array_function_dispatch(_compress_dispatcher) +def compress(condition, a, axis=None, out=None): + """ + Return selected slices of an array along given axis. + + When working along a given axis, a slice along that axis is returned in + `output` for each index where `condition` evaluates to True. When + working on a 1-D array, `compress` is equivalent to `extract`. + + Parameters + ---------- + condition : 1-D array of bools + Array that selects which entries to return. If len(condition) + is less than the size of `a` along the given axis, then output is + truncated to the length of the condition array. + a : array_like + Array from which to extract a part. + axis : int, optional + Axis along which to take slices. If None (default), work on the + flattened array. + out : ndarray, optional + Output array. Its type is preserved and it must be of the right + shape to hold the output. + + Returns + ------- + compressed_array : ndarray + A copy of `a` without the slices along axis for which `condition` + is false. + + See Also + -------- + take, choose, diag, diagonal, select + ndarray.compress : Equivalent method in ndarray + extract : Equivalent method when working on 1-D arrays + :ref:`ufuncs-output-type` + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4], [5, 6]]) + >>> a + array([[1, 2], + [3, 4], + [5, 6]]) + >>> np.compress([0, 1], a, axis=0) + array([[3, 4]]) + >>> np.compress([False, True, True], a, axis=0) + array([[3, 4], + [5, 6]]) + >>> np.compress([False, True], a, axis=1) + array([[2], + [4], + [6]]) + + Working on the flattened array does not return slices along an axis but + selects elements. + + >>> np.compress([False, True], a) + array([2]) + + """ + return _wrapfunc(a, 'compress', condition, axis=axis, out=out) + + +def _clip_dispatcher(a, a_min=None, a_max=None, out=None, *, min=None, + max=None, **kwargs): + return (a, a_min, a_max, out, min, max) + + +@array_function_dispatch(_clip_dispatcher) +def clip(a, a_min=np._NoValue, a_max=np._NoValue, out=None, *, + min=np._NoValue, max=np._NoValue, **kwargs): + """ + Clip (limit) the values in an array. + + Given an interval, values outside the interval are clipped to + the interval edges. For example, if an interval of ``[0, 1]`` + is specified, values smaller than 0 become 0, and values larger + than 1 become 1. + + Equivalent to but faster than ``np.minimum(a_max, np.maximum(a, a_min))``. + + No check is performed to ensure ``a_min < a_max``. + + Parameters + ---------- + a : array_like + Array containing elements to clip. + a_min, a_max : array_like or None + Minimum and maximum value. If ``None``, clipping is not performed on + the corresponding edge. If both ``a_min`` and ``a_max`` are ``None``, + the elements of the returned array stay the same. Both are broadcasted + against ``a``. + out : ndarray, optional + The results will be placed in this array. It may be the input + array for in-place clipping. `out` must be of the right shape + to hold the output. Its type is preserved. + min, max : array_like or None + Array API compatible alternatives for ``a_min`` and ``a_max`` + arguments. Either ``a_min`` and ``a_max`` or ``min`` and ``max`` + can be passed at the same time. Default: ``None``. + + .. versionadded:: 2.1.0 + **kwargs + For other keyword-only arguments, see the + :ref:`ufunc docs `. + + Returns + ------- + clipped_array : ndarray + An array with the elements of `a`, but where values + < `a_min` are replaced with `a_min`, and those > `a_max` + with `a_max`. + + See Also + -------- + :ref:`ufuncs-output-type` + + Notes + ----- + When `a_min` is greater than `a_max`, `clip` returns an + array in which all values are equal to `a_max`, + as shown in the second example. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(10) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.clip(a, 1, 8) + array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8]) + >>> np.clip(a, 8, 1) + array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) + >>> np.clip(a, 3, 6, out=a) + array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6]) + >>> a + array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6]) + >>> a = np.arange(10) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.clip(a, [3, 4, 1, 1, 1, 4, 4, 4, 4, 4], 8) + array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8]) + + """ + if a_min is np._NoValue and a_max is np._NoValue: + a_min = None if min is np._NoValue else min + a_max = None if max is np._NoValue else max + elif a_min is np._NoValue: + raise TypeError("clip() missing 1 required positional " + "argument: 'a_min'") + elif a_max is np._NoValue: + raise TypeError("clip() missing 1 required positional " + "argument: 'a_max'") + elif min is not np._NoValue or max is not np._NoValue: + raise ValueError("Passing `min` or `max` keyword argument when " + "`a_min` and `a_max` are provided is forbidden.") + + return _wrapfunc(a, 'clip', a_min, a_max, out=out, **kwargs) + + +def _sum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_sum_dispatcher) +def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + initial=np._NoValue, where=np._NoValue): + """ + Sum of array elements over a given axis. + + Parameters + ---------- + a : array_like + Elements to sum. + axis : None or int or tuple of ints, optional + Axis or axes along which a sum is performed. The default, + axis=None, will sum all of the elements of the input array. If + axis is negative it counts from the last to the first axis. If + axis is a tuple of ints, a sum is performed on all of the axes + specified in the tuple instead of a single axis or all the axes as + before. + dtype : dtype, optional + The type of the returned array and of the accumulator in which the + elements are summed. The dtype of `a` is used by default unless `a` + has an integer dtype of less precision than the default platform + integer. In that case, if `a` is signed then the platform integer + is used while if `a` is unsigned then an unsigned integer of the + same precision as the platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output, but the type of the output + values will be cast if necessary. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `sum` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + initial : scalar, optional + Starting value for the sum. See `~numpy.ufunc.reduce` for details. + where : array_like of bool, optional + Elements to include in the sum. See `~numpy.ufunc.reduce` for details. + + Returns + ------- + sum_along_axis : ndarray + An array with the same shape as `a`, with the specified + axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar + is returned. If an output array is specified, a reference to + `out` is returned. + + See Also + -------- + ndarray.sum : Equivalent method. + add: ``numpy.add.reduce`` equivalent function. + cumsum : Cumulative sum of array elements. + trapezoid : Integration of array values using composite trapezoidal rule. + + mean, average + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + The sum of an empty array is the neutral element 0: + + >>> np.sum([]) + 0.0 + + For floating point numbers the numerical precision of sum (and + ``np.add.reduce``) is in general limited by directly adding each number + individually to the result causing rounding errors in every step. + However, often numpy will use a numerically better approach (partial + pairwise summation) leading to improved precision in many use-cases. + This improved precision is always provided when no ``axis`` is given. + When ``axis`` is given, it will depend on which axis is summed. + Technically, to provide the best speed possible, the improved precision + is only used when the summation is along the fast axis in memory. + Note that the exact precision may vary depending on other parameters. + In contrast to NumPy, Python's ``math.fsum`` function uses a slower but + more precise approach to summation. + Especially when summing a large number of lower precision floating point + numbers, such as ``float32``, numerical errors can become significant. + In such cases it can be advisable to use `dtype="float64"` to use a higher + precision for the output. + + Examples + -------- + >>> import numpy as np + >>> np.sum([0.5, 1.5]) + 2.0 + >>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32) + np.int32(1) + >>> np.sum([[0, 1], [0, 5]]) + 6 + >>> np.sum([[0, 1], [0, 5]], axis=0) + array([0, 6]) + >>> np.sum([[0, 1], [0, 5]], axis=1) + array([1, 5]) + >>> np.sum([[0, 1], [np.nan, 5]], where=[False, True], axis=1) + array([1., 5.]) + + If the accumulator is too small, overflow occurs: + + >>> np.ones(128, dtype=np.int8).sum(dtype=np.int8) + np.int8(-128) + + You can also start the sum with a value other than zero: + + >>> np.sum([10], initial=5) + 15 + """ + if isinstance(a, _gentype): + # 2018-02-25, 1.15.0 + warnings.warn( + "Calling np.sum(generator) is deprecated, and in the future will " + "give a different result. Use np.sum(np.fromiter(generator)) or " + "the python sum builtin instead.", + DeprecationWarning, stacklevel=2 + ) + + res = _sum_(a) + if out is not None: + out[...] = res + return out + return res + + return _wrapreduction( + a, np.add, 'sum', axis, dtype, out, + keepdims=keepdims, initial=initial, where=where + ) + + +def _any_dispatcher(a, axis=None, out=None, keepdims=None, *, + where=np._NoValue): + return (a, where, out) + + +@array_function_dispatch(_any_dispatcher) +def any(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue): + """ + Test whether any array element along a given axis evaluates to True. + + Returns single boolean if `axis` is ``None`` + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : None or int or tuple of ints, optional + Axis or axes along which a logical OR reduction is performed. + The default (``axis=None``) is to perform a logical OR over all + the dimensions of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. If this + is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. + out : ndarray, optional + Alternate output array in which to place the result. It must have + the same shape as the expected output and its type is preserved + (e.g., if it is of type float, then it will remain so, returning + 1.0 for True and 0.0 for False, regardless of the type of `a`). + See :ref:`ufuncs-output-type` for more details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `any` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + where : array_like of bool, optional + Elements to include in checking for any `True` values. + See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.20.0 + + Returns + ------- + any : bool or ndarray + A new boolean or `ndarray` is returned unless `out` is specified, + in which case a reference to `out` is returned. + + See Also + -------- + ndarray.any : equivalent method + + all : Test whether all elements along a given axis evaluate to True. + + Notes + ----- + Not a Number (NaN), positive infinity and negative infinity evaluate + to `True` because these are not equal to zero. + + .. versionchanged:: 2.0 + Before NumPy 2.0, ``any`` did not return booleans for object dtype + input arrays. + This behavior is still available via ``np.logical_or.reduce``. + + Examples + -------- + >>> import numpy as np + >>> np.any([[True, False], [True, True]]) + True + + >>> np.any([[True, False, True ], + ... [False, False, False]], axis=0) + array([ True, False, True]) + + >>> np.any([-1, 0, 5]) + True + + >>> np.any([[np.nan], [np.inf]], axis=1, keepdims=True) + array([[ True], + [ True]]) + + >>> np.any([[True, False], [False, False]], where=[[False], [True]]) + False + + >>> a = np.array([[1, 0, 0], + ... [0, 0, 1], + ... [0, 0, 0]]) + >>> np.any(a, axis=0) + array([ True, False, True]) + >>> np.any(a, axis=1) + array([ True, True, False]) + + >>> o=np.array(False) + >>> z=np.any([-1, 4, 5], out=o) + >>> z, o + (array(True), array(True)) + >>> # Check now that z is a reference to o + >>> z is o + True + >>> id(z), id(o) # identity of z and o # doctest: +SKIP + (191614240, 191614240) + + """ + return _wrapreduction_any_all(a, np.logical_or, 'any', axis, out, + keepdims=keepdims, where=where) + + +def _all_dispatcher(a, axis=None, out=None, keepdims=None, *, + where=None): + return (a, where, out) + + +@array_function_dispatch(_all_dispatcher) +def all(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue): + """ + Test whether all array elements along a given axis evaluate to True. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : None or int or tuple of ints, optional + Axis or axes along which a logical AND reduction is performed. + The default (``axis=None``) is to perform a logical AND over all + the dimensions of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. If this + is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. + out : ndarray, optional + Alternate output array in which to place the result. + It must have the same shape as the expected output and its + type is preserved (e.g., if ``dtype(out)`` is float, the result + will consist of 0.0's and 1.0's). See :ref:`ufuncs-output-type` + for more details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `all` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + where : array_like of bool, optional + Elements to include in checking for all `True` values. + See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.20.0 + + Returns + ------- + all : ndarray, bool + A new boolean or array is returned unless `out` is specified, + in which case a reference to `out` is returned. + + See Also + -------- + ndarray.all : equivalent method + + any : Test whether any element along a given axis evaluates to True. + + Notes + ----- + Not a Number (NaN), positive infinity and negative infinity + evaluate to `True` because these are not equal to zero. + + .. versionchanged:: 2.0 + Before NumPy 2.0, ``all`` did not return booleans for object dtype + input arrays. + This behavior is still available via ``np.logical_and.reduce``. + + Examples + -------- + >>> import numpy as np + >>> np.all([[True,False],[True,True]]) + False + + >>> np.all([[True,False],[True,True]], axis=0) + array([ True, False]) + + >>> np.all([-1, 4, 5]) + True + + >>> np.all([1.0, np.nan]) + True + + >>> np.all([[True, True], [False, True]], where=[[True], [False]]) + True + + >>> o=np.array(False) + >>> z=np.all([-1, 4, 5], out=o) + >>> id(z), id(o), z + (28293632, 28293632, array(True)) # may vary + + """ + return _wrapreduction_any_all(a, np.logical_and, 'all', axis, out, + keepdims=keepdims, where=where) + + +def _cumulative_func(x, func, axis, dtype, out, include_initial): + x = np.atleast_1d(x) + x_ndim = x.ndim + if axis is None: + if x_ndim >= 2: + raise ValueError("For arrays which have more than one dimension " + "``axis`` argument is required.") + axis = 0 + + if out is not None and include_initial: + item = [slice(None)] * x_ndim + item[axis] = slice(1, None) + func.accumulate(x, axis=axis, dtype=dtype, out=out[tuple(item)]) + item[axis] = 0 + out[tuple(item)] = func.identity + return out + + res = func.accumulate(x, axis=axis, dtype=dtype, out=out) + if include_initial: + initial_shape = list(x.shape) + initial_shape[axis] = 1 + res = np.concat( + [np.full_like(res, func.identity, shape=initial_shape), res], + axis=axis, + ) + + return res + + +def _cumulative_prod_dispatcher(x, /, *, axis=None, dtype=None, out=None, + include_initial=None): + return (x, out) + + +@array_function_dispatch(_cumulative_prod_dispatcher) +def cumulative_prod(x, /, *, axis=None, dtype=None, out=None, + include_initial=False): + """ + Return the cumulative product of elements along a given axis. + + This function is an Array API compatible alternative to `numpy.cumprod`. + + Parameters + ---------- + x : array_like + Input array. + axis : int, optional + Axis along which the cumulative product is computed. The default + (None) is only allowed for one-dimensional arrays. For arrays + with more than one dimension ``axis`` is required. + dtype : dtype, optional + Type of the returned array, as well as of the accumulator in which + the elements are multiplied. If ``dtype`` is not specified, it + defaults to the dtype of ``x``, unless ``x`` has an integer dtype + with a precision less than that of the default platform integer. + In that case, the default platform integer is used instead. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type of the resulting values will be cast if necessary. + See :ref:`ufuncs-output-type` for more details. + include_initial : bool, optional + Boolean indicating whether to include the initial value (ones) as + the first value in the output. With ``include_initial=True`` + the shape of the output is different than the shape of the input. + Default: ``False``. + + Returns + ------- + cumulative_prod_along_axis : ndarray + A new array holding the result is returned unless ``out`` is + specified, in which case a reference to ``out`` is returned. The + result has the same shape as ``x`` if ``include_initial=False``. + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + Examples + -------- + >>> a = np.array([1, 2, 3]) + >>> np.cumulative_prod(a) # intermediate results 1, 1*2 + ... # total product 1*2*3 = 6 + array([1, 2, 6]) + >>> a = np.array([1, 2, 3, 4, 5, 6]) + >>> np.cumulative_prod(a, dtype=float) # specify type of output + array([ 1., 2., 6., 24., 120., 720.]) + + The cumulative product for each column (i.e., over the rows) of ``b``: + + >>> b = np.array([[1, 2, 3], [4, 5, 6]]) + >>> np.cumulative_prod(b, axis=0) + array([[ 1, 2, 3], + [ 4, 10, 18]]) + + The cumulative product for each row (i.e. over the columns) of ``b``: + + >>> np.cumulative_prod(b, axis=1) + array([[ 1, 2, 6], + [ 4, 20, 120]]) + + """ + return _cumulative_func(x, um.multiply, axis, dtype, out, include_initial) + + +def _cumulative_sum_dispatcher(x, /, *, axis=None, dtype=None, out=None, + include_initial=None): + return (x, out) + + +@array_function_dispatch(_cumulative_sum_dispatcher) +def cumulative_sum(x, /, *, axis=None, dtype=None, out=None, + include_initial=False): + """ + Return the cumulative sum of the elements along a given axis. + + This function is an Array API compatible alternative to `numpy.cumsum`. + + Parameters + ---------- + x : array_like + Input array. + axis : int, optional + Axis along which the cumulative sum is computed. The default + (None) is only allowed for one-dimensional arrays. For arrays + with more than one dimension ``axis`` is required. + dtype : dtype, optional + Type of the returned array and of the accumulator in which the + elements are summed. If ``dtype`` is not specified, it defaults + to the dtype of ``x``, unless ``x`` has an integer dtype with + a precision less than that of the default platform integer. + In that case, the default platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. See :ref:`ufuncs-output-type` + for more details. + include_initial : bool, optional + Boolean indicating whether to include the initial value (zeros) as + the first value in the output. With ``include_initial=True`` + the shape of the output is different than the shape of the input. + Default: ``False``. + + Returns + ------- + cumulative_sum_along_axis : ndarray + A new array holding the result is returned unless ``out`` is + specified, in which case a reference to ``out`` is returned. The + result has the same shape as ``x`` if ``include_initial=False``. + + See Also + -------- + sum : Sum array elements. + trapezoid : Integration of array values using composite trapezoidal rule. + diff : Calculate the n-th discrete difference along given axis. + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + ``cumulative_sum(a)[-1]`` may not be equal to ``sum(a)`` for + floating-point values since ``sum`` may use a pairwise summation routine, + reducing the roundoff-error. See `sum` for more information. + + Examples + -------- + >>> a = np.array([1, 2, 3, 4, 5, 6]) + >>> a + array([1, 2, 3, 4, 5, 6]) + >>> np.cumulative_sum(a) + array([ 1, 3, 6, 10, 15, 21]) + >>> np.cumulative_sum(a, dtype=float) # specifies type of output value(s) + array([ 1., 3., 6., 10., 15., 21.]) + + >>> b = np.array([[1, 2, 3], [4, 5, 6]]) + >>> np.cumulative_sum(b,axis=0) # sum over rows for each of the 3 columns + array([[1, 2, 3], + [5, 7, 9]]) + >>> np.cumulative_sum(b,axis=1) # sum over columns for each of the 2 rows + array([[ 1, 3, 6], + [ 4, 9, 15]]) + + ``cumulative_sum(c)[-1]`` may not be equal to ``sum(c)`` + + >>> c = np.array([1, 2e-9, 3e-9] * 1000000) + >>> np.cumulative_sum(c)[-1] + 1000000.0050045159 + >>> c.sum() + 1000000.0050000029 + + """ + return _cumulative_func(x, um.add, axis, dtype, out, include_initial) + + +def _cumsum_dispatcher(a, axis=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_cumsum_dispatcher) +def cumsum(a, axis=None, dtype=None, out=None): + """ + Return the cumulative sum of the elements along a given axis. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the cumulative sum is computed. The default + (None) is to compute the cumsum over the flattened array. + dtype : dtype, optional + Type of the returned array and of the accumulator in which the + elements are summed. If `dtype` is not specified, it defaults + to the dtype of `a`, unless `a` has an integer dtype with a + precision less than that of the default platform integer. In + that case, the default platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. See :ref:`ufuncs-output-type` + for more details. + + Returns + ------- + cumsum_along_axis : ndarray. + A new array holding the result is returned unless `out` is + specified, in which case a reference to `out` is returned. The + result has the same size as `a`, and the same shape as `a` if + `axis` is not None or `a` is a 1-d array. + + See Also + -------- + cumulative_sum : Array API compatible alternative for ``cumsum``. + sum : Sum array elements. + trapezoid : Integration of array values using composite trapezoidal rule. + diff : Calculate the n-th discrete difference along given axis. + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + ``cumsum(a)[-1]`` may not be equal to ``sum(a)`` for floating-point + values since ``sum`` may use a pairwise summation routine, reducing + the roundoff-error. See `sum` for more information. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1,2,3], [4,5,6]]) + >>> a + array([[1, 2, 3], + [4, 5, 6]]) + >>> np.cumsum(a) + array([ 1, 3, 6, 10, 15, 21]) + >>> np.cumsum(a, dtype=float) # specifies type of output value(s) + array([ 1., 3., 6., 10., 15., 21.]) + + >>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns + array([[1, 2, 3], + [5, 7, 9]]) + >>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows + array([[ 1, 3, 6], + [ 4, 9, 15]]) + + ``cumsum(b)[-1]`` may not be equal to ``sum(b)`` + + >>> b = np.array([1, 2e-9, 3e-9] * 1000000) + >>> b.cumsum()[-1] + 1000000.0050045159 + >>> b.sum() + 1000000.0050000029 + + """ + return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out) + + +def _ptp_dispatcher(a, axis=None, out=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_ptp_dispatcher) +def ptp(a, axis=None, out=None, keepdims=np._NoValue): + """ + Range of values (maximum - minimum) along an axis. + + The name of the function comes from the acronym for 'peak to peak'. + + .. warning:: + `ptp` preserves the data type of the array. This means the + return value for an input of signed integers with n bits + (e.g. `numpy.int8`, `numpy.int16`, etc) is also a signed integer + with n bits. In that case, peak-to-peak values greater than + ``2**(n-1)-1`` will be returned as negative values. An example + with a work-around is shown below. + + Parameters + ---------- + a : array_like + Input values. + axis : None or int or tuple of ints, optional + Axis along which to find the peaks. By default, flatten the + array. `axis` may be negative, in + which case it counts from the last to the first axis. + If this is a tuple of ints, a reduction is performed on multiple + axes, instead of a single axis or all the axes as before. + out : array_like + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type of the output values will be cast if necessary. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `ptp` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + Returns + ------- + ptp : ndarray or scalar + The range of a given array - `scalar` if array is one-dimensional + or a new array holding the result along the given axis + + Examples + -------- + >>> import numpy as np + >>> x = np.array([[4, 9, 2, 10], + ... [6, 9, 7, 12]]) + + >>> np.ptp(x, axis=1) + array([8, 6]) + + >>> np.ptp(x, axis=0) + array([2, 0, 5, 2]) + + >>> np.ptp(x) + 10 + + This example shows that a negative value can be returned when + the input is an array of signed integers. + + >>> y = np.array([[1, 127], + ... [0, 127], + ... [-1, 127], + ... [-2, 127]], dtype=np.int8) + >>> np.ptp(y, axis=1) + array([ 126, 127, -128, -127], dtype=int8) + + A work-around is to use the `view()` method to view the result as + unsigned integers with the same bit width: + + >>> np.ptp(y, axis=1).view(np.uint8) + array([126, 127, 128, 129], dtype=uint8) + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + return _methods._ptp(a, axis=axis, out=out, **kwargs) + + +def _max_dispatcher(a, axis=None, out=None, keepdims=None, initial=None, + where=None): + return (a, out) + + +@array_function_dispatch(_max_dispatcher) +@set_module('numpy') +def max(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): + """ + Return the maximum of an array or maximum along an axis. + + Parameters + ---------- + a : array_like + Input data. + axis : None or int or tuple of ints, optional + Axis or axes along which to operate. By default, flattened input is + used. If this is a tuple of ints, the maximum is selected over + multiple axes, instead of a single axis or all the axes as before. + + out : ndarray, optional + Alternative output array in which to place the result. Must + be of the same shape and buffer length as the expected output. + See :ref:`ufuncs-output-type` for more details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the ``max`` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + initial : scalar, optional + The minimum value of an output element. Must be present to allow + computation on empty slice. See `~numpy.ufunc.reduce` for details. + + where : array_like of bool, optional + Elements to compare for the maximum. See `~numpy.ufunc.reduce` + for details. + + Returns + ------- + max : ndarray or scalar + Maximum of `a`. If `axis` is None, the result is a scalar value. + If `axis` is an int, the result is an array of dimension + ``a.ndim - 1``. If `axis` is a tuple, the result is an array of + dimension ``a.ndim - len(axis)``. + + See Also + -------- + amin : + The minimum value of an array along a given axis, propagating any NaNs. + nanmax : + The maximum value of an array along a given axis, ignoring any NaNs. + maximum : + Element-wise maximum of two arrays, propagating any NaNs. + fmax : + Element-wise maximum of two arrays, ignoring any NaNs. + argmax : + Return the indices of the maximum values. + + nanmin, minimum, fmin + + Notes + ----- + NaN values are propagated, that is if at least one item is NaN, the + corresponding max value will be NaN as well. To ignore NaN values + (MATLAB behavior), please use nanmax. + + Don't use `~numpy.max` for element-wise comparison of 2 arrays; when + ``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than + ``max(a, axis=0)``. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(4).reshape((2,2)) + >>> a + array([[0, 1], + [2, 3]]) + >>> np.max(a) # Maximum of the flattened array + 3 + >>> np.max(a, axis=0) # Maxima along the first axis + array([2, 3]) + >>> np.max(a, axis=1) # Maxima along the second axis + array([1, 3]) + >>> np.max(a, where=[False, True], initial=-1, axis=0) + array([-1, 3]) + >>> b = np.arange(5, dtype=float) + >>> b[2] = np.nan + >>> np.max(b) + np.float64(nan) + >>> np.max(b, where=~np.isnan(b), initial=-1) + 4.0 + >>> np.nanmax(b) + 4.0 + + You can use an initial value to compute the maximum of an empty slice, or + to initialize it to a different value: + + >>> np.max([[-50], [10]], axis=-1, initial=0) + array([ 0, 10]) + + Notice that the initial value is used as one of the elements for which the + maximum is determined, unlike for the default argument Python's max + function, which is only used for empty iterables. + + >>> np.max([5], initial=6) + 6 + >>> max([5], default=6) + 5 + """ + return _wrapreduction(a, np.maximum, 'max', axis, None, out, + keepdims=keepdims, initial=initial, where=where) + + +@array_function_dispatch(_max_dispatcher) +def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): + """ + Return the maximum of an array or maximum along an axis. + + `amax` is an alias of `~numpy.max`. + + See Also + -------- + max : alias of this function + ndarray.max : equivalent method + """ + return _wrapreduction(a, np.maximum, 'max', axis, None, out, + keepdims=keepdims, initial=initial, where=where) + + +def _min_dispatcher(a, axis=None, out=None, keepdims=None, initial=None, + where=None): + return (a, out) + + +@array_function_dispatch(_min_dispatcher) +def min(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): + """ + Return the minimum of an array or minimum along an axis. + + Parameters + ---------- + a : array_like + Input data. + axis : None or int or tuple of ints, optional + Axis or axes along which to operate. By default, flattened input is + used. + + If this is a tuple of ints, the minimum is selected over multiple axes, + instead of a single axis or all the axes as before. + out : ndarray, optional + Alternative output array in which to place the result. Must + be of the same shape and buffer length as the expected output. + See :ref:`ufuncs-output-type` for more details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the ``min`` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + initial : scalar, optional + The maximum value of an output element. Must be present to allow + computation on empty slice. See `~numpy.ufunc.reduce` for details. + + where : array_like of bool, optional + Elements to compare for the minimum. See `~numpy.ufunc.reduce` + for details. + + Returns + ------- + min : ndarray or scalar + Minimum of `a`. If `axis` is None, the result is a scalar value. + If `axis` is an int, the result is an array of dimension + ``a.ndim - 1``. If `axis` is a tuple, the result is an array of + dimension ``a.ndim - len(axis)``. + + See Also + -------- + amax : + The maximum value of an array along a given axis, propagating any NaNs. + nanmin : + The minimum value of an array along a given axis, ignoring any NaNs. + minimum : + Element-wise minimum of two arrays, propagating any NaNs. + fmin : + Element-wise minimum of two arrays, ignoring any NaNs. + argmin : + Return the indices of the minimum values. + + nanmax, maximum, fmax + + Notes + ----- + NaN values are propagated, that is if at least one item is NaN, the + corresponding min value will be NaN as well. To ignore NaN values + (MATLAB behavior), please use nanmin. + + Don't use `~numpy.min` for element-wise comparison of 2 arrays; when + ``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than + ``min(a, axis=0)``. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(4).reshape((2,2)) + >>> a + array([[0, 1], + [2, 3]]) + >>> np.min(a) # Minimum of the flattened array + 0 + >>> np.min(a, axis=0) # Minima along the first axis + array([0, 1]) + >>> np.min(a, axis=1) # Minima along the second axis + array([0, 2]) + >>> np.min(a, where=[False, True], initial=10, axis=0) + array([10, 1]) + + >>> b = np.arange(5, dtype=float) + >>> b[2] = np.nan + >>> np.min(b) + np.float64(nan) + >>> np.min(b, where=~np.isnan(b), initial=10) + 0.0 + >>> np.nanmin(b) + 0.0 + + >>> np.min([[-50], [10]], axis=-1, initial=0) + array([-50, 0]) + + Notice that the initial value is used as one of the elements for which the + minimum is determined, unlike for the default argument Python's max + function, which is only used for empty iterables. + + Notice that this isn't the same as Python's ``default`` argument. + + >>> np.min([6], initial=5) + 5 + >>> min([6], default=5) + 6 + """ + return _wrapreduction(a, np.minimum, 'min', axis, None, out, + keepdims=keepdims, initial=initial, where=where) + + +@array_function_dispatch(_min_dispatcher) +def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): + """ + Return the minimum of an array or minimum along an axis. + + `amin` is an alias of `~numpy.min`. + + See Also + -------- + min : alias of this function + ndarray.min : equivalent method + """ + return _wrapreduction(a, np.minimum, 'min', axis, None, out, + keepdims=keepdims, initial=initial, where=where) + + +def _prod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_prod_dispatcher) +def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + initial=np._NoValue, where=np._NoValue): + """ + Return the product of array elements over a given axis. + + Parameters + ---------- + a : array_like + Input data. + axis : None or int or tuple of ints, optional + Axis or axes along which a product is performed. The default, + axis=None, will calculate the product of all the elements in the + input array. If axis is negative it counts from the last to the + first axis. + + If axis is a tuple of ints, a product is performed on all of the + axes specified in the tuple instead of a single axis or all the + axes as before. + dtype : dtype, optional + The type of the returned array, as well as of the accumulator in + which the elements are multiplied. The dtype of `a` is used by + default unless `a` has an integer dtype of less precision than the + default platform integer. In that case, if `a` is signed then the + platform integer is used while if `a` is unsigned then an unsigned + integer of the same precision as the platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output, but the type of the output + values will be cast if necessary. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in the + result as dimensions with size one. With this option, the result + will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `prod` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + initial : scalar, optional + The starting value for this product. See `~numpy.ufunc.reduce` + for details. + where : array_like of bool, optional + Elements to include in the product. See `~numpy.ufunc.reduce` + for details. + + Returns + ------- + product_along_axis : ndarray, see `dtype` parameter above. + An array shaped as `a` but with the specified axis removed. + Returns a reference to `out` if specified. + + See Also + -------- + ndarray.prod : equivalent method + :ref:`ufuncs-output-type` + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. That means that, on a 32-bit platform: + + >>> x = np.array([536870910, 536870910, 536870910, 536870910]) + >>> np.prod(x) + 16 # may vary + + The product of an empty array is the neutral element 1: + + >>> np.prod([]) + 1.0 + + Examples + -------- + By default, calculate the product of all elements: + + >>> import numpy as np + >>> np.prod([1.,2.]) + 2.0 + + Even when the input array is two-dimensional: + + >>> a = np.array([[1., 2.], [3., 4.]]) + >>> np.prod(a) + 24.0 + + But we can also specify the axis over which to multiply: + + >>> np.prod(a, axis=1) + array([ 2., 12.]) + >>> np.prod(a, axis=0) + array([3., 8.]) + + Or select specific elements to include: + + >>> np.prod([1., np.nan, 3.], where=[True, False, True]) + 3.0 + + If the type of `x` is unsigned, then the output type is + the unsigned platform integer: + + >>> x = np.array([1, 2, 3], dtype=np.uint8) + >>> np.prod(x).dtype == np.uint + True + + If `x` is of a signed integer type, then the output type + is the default platform integer: + + >>> x = np.array([1, 2, 3], dtype=np.int8) + >>> np.prod(x).dtype == int + True + + You can also start the product with a value other than one: + + >>> np.prod([1, 2], initial=5) + 10 + """ + return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out, + keepdims=keepdims, initial=initial, where=where) + + +def _cumprod_dispatcher(a, axis=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_cumprod_dispatcher) +def cumprod(a, axis=None, dtype=None, out=None): + """ + Return the cumulative product of elements along a given axis. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the cumulative product is computed. By default + the input is flattened. + dtype : dtype, optional + Type of the returned array, as well as of the accumulator in which + the elements are multiplied. If *dtype* is not specified, it + defaults to the dtype of `a`, unless `a` has an integer dtype with + a precision less than that of the default platform integer. In + that case, the default platform integer is used instead. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type of the resulting values will be cast if necessary. + + Returns + ------- + cumprod : ndarray + A new array holding the result is returned unless `out` is + specified, in which case a reference to out is returned. + + See Also + -------- + cumulative_prod : Array API compatible alternative for ``cumprod``. + :ref:`ufuncs-output-type` + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1,2,3]) + >>> np.cumprod(a) # intermediate results 1, 1*2 + ... # total product 1*2*3 = 6 + array([1, 2, 6]) + >>> a = np.array([[1, 2, 3], [4, 5, 6]]) + >>> np.cumprod(a, dtype=float) # specify type of output + array([ 1., 2., 6., 24., 120., 720.]) + + The cumulative product for each column (i.e., over the rows) of `a`: + + >>> np.cumprod(a, axis=0) + array([[ 1, 2, 3], + [ 4, 10, 18]]) + + The cumulative product for each row (i.e. over the columns) of `a`: + + >>> np.cumprod(a,axis=1) + array([[ 1, 2, 6], + [ 4, 20, 120]]) + + """ + return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out) + + +def _ndim_dispatcher(a): + return (a,) + + +@array_function_dispatch(_ndim_dispatcher) +def ndim(a): + """ + Return the number of dimensions of an array. + + Parameters + ---------- + a : array_like + Input array. If it is not already an ndarray, a conversion is + attempted. + + Returns + ------- + number_of_dimensions : int + The number of dimensions in `a`. Scalars are zero-dimensional. + + See Also + -------- + ndarray.ndim : equivalent method + shape : dimensions of array + ndarray.shape : dimensions of array + + Examples + -------- + >>> import numpy as np + >>> np.ndim([[1,2,3],[4,5,6]]) + 2 + >>> np.ndim(np.array([[1,2,3],[4,5,6]])) + 2 + >>> np.ndim(1) + 0 + + """ + try: + return a.ndim + except AttributeError: + return asarray(a).ndim + + +def _size_dispatcher(a, axis=None): + return (a,) + + +@array_function_dispatch(_size_dispatcher) +def size(a, axis=None): + """ + Return the number of elements along a given axis. + + Parameters + ---------- + a : array_like + Input data. + axis : int, optional + Axis along which the elements are counted. By default, give + the total number of elements. + + Returns + ------- + element_count : int + Number of elements along the specified axis. + + See Also + -------- + shape : dimensions of array + ndarray.shape : dimensions of array + ndarray.size : number of elements in array + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1,2,3],[4,5,6]]) + >>> np.size(a) + 6 + >>> np.size(a,1) + 3 + >>> np.size(a,0) + 2 + + """ + if axis is None: + try: + return a.size + except AttributeError: + return asarray(a).size + else: + try: + return a.shape[axis] + except AttributeError: + return asarray(a).shape[axis] + + +def _round_dispatcher(a, decimals=None, out=None): + return (a, out) + + +@array_function_dispatch(_round_dispatcher) +def round(a, decimals=0, out=None): + """ + Evenly round to the given number of decimals. + + Parameters + ---------- + a : array_like + Input data. + decimals : int, optional + Number of decimal places to round to (default: 0). If + decimals is negative, it specifies the number of positions to + the left of the decimal point. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output, but the type of the output + values will be cast if necessary. See :ref:`ufuncs-output-type` + for more details. + + Returns + ------- + rounded_array : ndarray + An array of the same type as `a`, containing the rounded values. + Unless `out` was specified, a new array is created. A reference to + the result is returned. + + The real and imaginary parts of complex numbers are rounded + separately. The result of rounding a float is a float. + + See Also + -------- + ndarray.round : equivalent method + around : an alias for this function + ceil, fix, floor, rint, trunc + + + Notes + ----- + For values exactly halfway between rounded decimal values, NumPy + rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0, + -0.5 and 0.5 round to 0.0, etc. + + ``np.round`` uses a fast but sometimes inexact algorithm to round + floating-point datatypes. For positive `decimals` it is equivalent to + ``np.true_divide(np.rint(a * 10**decimals), 10**decimals)``, which has + error due to the inexact representation of decimal fractions in the IEEE + floating point standard [1]_ and errors introduced when scaling by powers + of ten. For instance, note the extra "1" in the following: + + >>> np.round(56294995342131.5, 3) + 56294995342131.51 + + If your goal is to print such values with a fixed number of decimals, it is + preferable to use numpy's float printing routines to limit the number of + printed decimals: + + >>> np.format_float_positional(56294995342131.5, precision=3) + '56294995342131.5' + + The float printing routines use an accurate but much more computationally + demanding algorithm to compute the number of digits after the decimal + point. + + Alternatively, Python's builtin `round` function uses a more accurate + but slower algorithm for 64-bit floating point values: + + >>> round(56294995342131.5, 3) + 56294995342131.5 + >>> np.round(16.055, 2), round(16.055, 2) # equals 16.0549999999999997 + (16.06, 16.05) + + + References + ---------- + .. [1] "Lecture Notes on the Status of IEEE 754", William Kahan, + https://people.eecs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF + + Examples + -------- + >>> import numpy as np + >>> np.round([0.37, 1.64]) + array([0., 2.]) + >>> np.round([0.37, 1.64], decimals=1) + array([0.4, 1.6]) + >>> np.round([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value + array([0., 2., 2., 4., 4.]) + >>> np.round([1,2,3,11], decimals=1) # ndarray of ints is returned + array([ 1, 2, 3, 11]) + >>> np.round([1,2,3,11], decimals=-1) + array([ 0, 0, 0, 10]) + + """ + return _wrapfunc(a, 'round', decimals=decimals, out=out) + + +@array_function_dispatch(_round_dispatcher) +def around(a, decimals=0, out=None): + """ + Round an array to the given number of decimals. + + `around` is an alias of `~numpy.round`. + + See Also + -------- + ndarray.round : equivalent method + round : alias for this function + ceil, fix, floor, rint, trunc + + """ + return _wrapfunc(a, 'round', decimals=decimals, out=out) + + +def _mean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, *, + where=None): + return (a, where, out) + + +@array_function_dispatch(_mean_dispatcher) +def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, *, + where=np._NoValue): + """ + Compute the arithmetic mean along the specified axis. + + Returns the average of the array elements. The average is taken over + the flattened array by default, otherwise over the specified axis. + `float64` intermediate and return values are used for integer inputs. + + Parameters + ---------- + a : array_like + Array containing numbers whose mean is desired. If `a` is not an + array, a conversion is attempted. + axis : None or int or tuple of ints, optional + Axis or axes along which the means are computed. The default is to + compute the mean of the flattened array. + + If this is a tuple of ints, a mean is performed over multiple axes, + instead of a single axis or all the axes as before. + dtype : data-type, optional + Type to use in computing the mean. For integer inputs, the default + is `float64`; for floating point inputs, it is the same as the + input dtype. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. + See :ref:`ufuncs-output-type` for more details. + See :ref:`ufuncs-output-type` for more details. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `mean` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + + where : array_like of bool, optional + Elements to include in the mean. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.20.0 + + Returns + ------- + m : ndarray, see dtype parameter above + If `out=None`, returns a new array containing the mean values, + otherwise a reference to the output array is returned. + + See Also + -------- + average : Weighted average + std, var, nanmean, nanstd, nanvar + + Notes + ----- + The arithmetic mean is the sum of the elements along the axis divided + by the number of elements. + + Note that for floating-point input, the mean is computed using the + same precision the input has. Depending on the input data, this can + cause the results to be inaccurate, especially for `float32` (see + example below). Specifying a higher-precision accumulator using the + `dtype` keyword can alleviate this issue. + + By default, `float16` results are computed using `float32` intermediates + for extra precision. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.mean(a) + 2.5 + >>> np.mean(a, axis=0) + array([2., 3.]) + >>> np.mean(a, axis=1) + array([1.5, 3.5]) + + In single precision, `mean` can be inaccurate: + + >>> a = np.zeros((2, 512*512), dtype=np.float32) + >>> a[0, :] = 1.0 + >>> a[1, :] = 0.1 + >>> np.mean(a) + np.float32(0.54999924) + + Computing the mean in float64 is more accurate: + + >>> np.mean(a, dtype=np.float64) + 0.55000000074505806 # may vary + + Computing the mean in timedelta64 is available: + + >>> b = np.array([1, 3], dtype="timedelta64[D]") + >>> np.mean(b) + np.timedelta64(2,'D') + + Specifying a where argument: + + >>> a = np.array([[5, 9, 13], [14, 10, 12], [11, 15, 19]]) + >>> np.mean(a) + 12.0 + >>> np.mean(a, where=[[True], [False], [False]]) + 9.0 + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if where is not np._NoValue: + kwargs['where'] = where + if type(a) is not mu.ndarray: + try: + mean = a.mean + except AttributeError: + pass + else: + return mean(axis=axis, dtype=dtype, out=out, **kwargs) + + return _methods._mean(a, axis=axis, dtype=dtype, + out=out, **kwargs) + + +def _std_dispatcher(a, axis=None, dtype=None, out=None, ddof=None, + keepdims=None, *, where=None, mean=None, correction=None): + return (a, where, out, mean) + + +@array_function_dispatch(_std_dispatcher) +def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, + where=np._NoValue, mean=np._NoValue, correction=np._NoValue): + r""" + Compute the standard deviation along the specified axis. + + Returns the standard deviation, a measure of the spread of a distribution, + of the array elements. The standard deviation is computed for the + flattened array by default, otherwise over the specified axis. + + Parameters + ---------- + a : array_like + Calculate the standard deviation of these values. + axis : None or int or tuple of ints, optional + Axis or axes along which the standard deviation is computed. The + default is to compute the standard deviation of the flattened array. + If this is a tuple of ints, a standard deviation is performed over + multiple axes, instead of a single axis or all the axes as before. + dtype : dtype, optional + Type to use in computing the standard deviation. For arrays of + integer type the default is float64, for arrays of float types it is + the same as the array type. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type (of the calculated + values) will be cast if necessary. + See :ref:`ufuncs-output-type` for more details. + ddof : {int, float}, optional + Means Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + By default `ddof` is zero. See Notes for details about use of `ddof`. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `std` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + where : array_like of bool, optional + Elements to include in the standard deviation. + See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.20.0 + + mean : array_like, optional + Provide the mean to prevent its recalculation. The mean should have + a shape as if it was calculated with ``keepdims=True``. + The axis for the calculation of the mean should be the same as used in + the call to this std function. + + .. versionadded:: 2.0.0 + + correction : {int, float}, optional + Array API compatible name for the ``ddof`` parameter. Only one of them + can be provided at the same time. + + .. versionadded:: 2.0.0 + + Returns + ------- + standard_deviation : ndarray, see dtype parameter above. + If `out` is None, return a new array containing the standard deviation, + otherwise return a reference to the output array. + + See Also + -------- + var, mean, nanmean, nanstd, nanvar + :ref:`ufuncs-output-type` + + Notes + ----- + There are several common variants of the array standard deviation + calculation. Assuming the input `a` is a one-dimensional NumPy array + and ``mean`` is either provided as an argument or computed as + ``a.mean()``, NumPy computes the standard deviation of an array as:: + + N = len(a) + d2 = abs(a - mean)**2 # abs is for complex `a` + var = d2.sum() / (N - ddof) # note use of `ddof` + std = var**0.5 + + Different values of the argument `ddof` are useful in different + contexts. NumPy's default ``ddof=0`` corresponds with the expression: + + .. math:: + + \sqrt{\frac{\sum_i{|a_i - \bar{a}|^2 }}{N}} + + which is sometimes called the "population standard deviation" in the field + of statistics because it applies the definition of standard deviation to + `a` as if `a` were a complete population of possible observations. + + Many other libraries define the standard deviation of an array + differently, e.g.: + + .. math:: + + \sqrt{\frac{\sum_i{|a_i - \bar{a}|^2 }}{N - 1}} + + In statistics, the resulting quantity is sometimes called the "sample + standard deviation" because if `a` is a random sample from a larger + population, this calculation provides the square root of an unbiased + estimate of the variance of the population. The use of :math:`N-1` in the + denominator is often called "Bessel's correction" because it corrects for + bias (toward lower values) in the variance estimate introduced when the + sample mean of `a` is used in place of the true mean of the population. + The resulting estimate of the standard deviation is still biased, but less + than it would have been without the correction. For this quantity, use + ``ddof=1``. + + Note that, for complex numbers, `std` takes the absolute + value before squaring, so that the result is always real and nonnegative. + + For floating-point input, the standard deviation is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for float32 (see example below). + Specifying a higher-accuracy accumulator using the `dtype` keyword can + alleviate this issue. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.std(a) + 1.1180339887498949 # may vary + >>> np.std(a, axis=0) + array([1., 1.]) + >>> np.std(a, axis=1) + array([0.5, 0.5]) + + In single precision, std() can be inaccurate: + + >>> a = np.zeros((2, 512*512), dtype=np.float32) + >>> a[0, :] = 1.0 + >>> a[1, :] = 0.1 + >>> np.std(a) + np.float32(0.45000005) + + Computing the standard deviation in float64 is more accurate: + + >>> np.std(a, dtype=np.float64) + 0.44999999925494177 # may vary + + Specifying a where argument: + + >>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]]) + >>> np.std(a) + 2.614064523559687 # may vary + >>> np.std(a, where=[[True], [True], [False]]) + 2.0 + + Using the mean keyword to save computation time: + + >>> import numpy as np + >>> from timeit import timeit + >>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]]) + >>> mean = np.mean(a, axis=1, keepdims=True) + >>> + >>> g = globals() + >>> n = 10000 + >>> t1 = timeit("std = np.std(a, axis=1, mean=mean)", globals=g, number=n) + >>> t2 = timeit("std = np.std(a, axis=1)", globals=g, number=n) + >>> print(f'Percentage execution time saved {100*(t2-t1)/t2:.0f}%') + #doctest: +SKIP + Percentage execution time saved 30% + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if where is not np._NoValue: + kwargs['where'] = where + if mean is not np._NoValue: + kwargs['mean'] = mean + + if correction != np._NoValue: + if ddof != 0: + raise ValueError( + "ddof and correction can't be provided simultaneously." + ) + else: + ddof = correction + + if type(a) is not mu.ndarray: + try: + std = a.std + except AttributeError: + pass + else: + return std(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs) + + return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + **kwargs) + + +def _var_dispatcher(a, axis=None, dtype=None, out=None, ddof=None, + keepdims=None, *, where=None, mean=None, correction=None): + return (a, where, out, mean) + + +@array_function_dispatch(_var_dispatcher) +def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, + where=np._NoValue, mean=np._NoValue, correction=np._NoValue): + r""" + Compute the variance along the specified axis. + + Returns the variance of the array elements, a measure of the spread of a + distribution. The variance is computed for the flattened array by + default, otherwise over the specified axis. + + Parameters + ---------- + a : array_like + Array containing numbers whose variance is desired. If `a` is not an + array, a conversion is attempted. + axis : None or int or tuple of ints, optional + Axis or axes along which the variance is computed. The default is to + compute the variance of the flattened array. + If this is a tuple of ints, a variance is performed over multiple axes, + instead of a single axis or all the axes as before. + dtype : data-type, optional + Type to use in computing the variance. For arrays of integer type + the default is `float64`; for arrays of float types it is the same as + the array type. + out : ndarray, optional + Alternate output array in which to place the result. It must have + the same shape as the expected output, but the type is cast if + necessary. + ddof : {int, float}, optional + "Delta Degrees of Freedom": the divisor used in the calculation is + ``N - ddof``, where ``N`` represents the number of elements. By + default `ddof` is zero. See notes for details about use of `ddof`. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + If the default value is passed, then `keepdims` will not be + passed through to the `var` method of sub-classes of + `ndarray`, however any non-default value will be. If the + sub-class' method does not implement `keepdims` any + exceptions will be raised. + where : array_like of bool, optional + Elements to include in the variance. See `~numpy.ufunc.reduce` for + details. + + .. versionadded:: 1.20.0 + + mean : array like, optional + Provide the mean to prevent its recalculation. The mean should have + a shape as if it was calculated with ``keepdims=True``. + The axis for the calculation of the mean should be the same as used in + the call to this var function. + + .. versionadded:: 2.0.0 + + correction : {int, float}, optional + Array API compatible name for the ``ddof`` parameter. Only one of them + can be provided at the same time. + + .. versionadded:: 2.0.0 + + Returns + ------- + variance : ndarray, see dtype parameter above + If ``out=None``, returns a new array containing the variance; + otherwise, a reference to the output array is returned. + + See Also + -------- + std, mean, nanmean, nanstd, nanvar + :ref:`ufuncs-output-type` + + Notes + ----- + There are several common variants of the array variance calculation. + Assuming the input `a` is a one-dimensional NumPy array and ``mean`` is + either provided as an argument or computed as ``a.mean()``, NumPy + computes the variance of an array as:: + + N = len(a) + d2 = abs(a - mean)**2 # abs is for complex `a` + var = d2.sum() / (N - ddof) # note use of `ddof` + + Different values of the argument `ddof` are useful in different + contexts. NumPy's default ``ddof=0`` corresponds with the expression: + + .. math:: + + \frac{\sum_i{|a_i - \bar{a}|^2 }}{N} + + which is sometimes called the "population variance" in the field of + statistics because it applies the definition of variance to `a` as if `a` + were a complete population of possible observations. + + Many other libraries define the variance of an array differently, e.g.: + + .. math:: + + \frac{\sum_i{|a_i - \bar{a}|^2}}{N - 1} + + In statistics, the resulting quantity is sometimes called the "sample + variance" because if `a` is a random sample from a larger population, + this calculation provides an unbiased estimate of the variance of the + population. The use of :math:`N-1` in the denominator is often called + "Bessel's correction" because it corrects for bias (toward lower values) + in the variance estimate introduced when the sample mean of `a` is used + in place of the true mean of the population. For this quantity, use + ``ddof=1``. + + Note that for complex numbers, the absolute value is taken before + squaring, so that the result is always real and nonnegative. + + For floating-point input, the variance is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for `float32` (see example + below). Specifying a higher-accuracy accumulator using the ``dtype`` + keyword can alleviate this issue. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.var(a) + 1.25 + >>> np.var(a, axis=0) + array([1., 1.]) + >>> np.var(a, axis=1) + array([0.25, 0.25]) + + In single precision, var() can be inaccurate: + + >>> a = np.zeros((2, 512*512), dtype=np.float32) + >>> a[0, :] = 1.0 + >>> a[1, :] = 0.1 + >>> np.var(a) + np.float32(0.20250003) + + Computing the variance in float64 is more accurate: + + >>> np.var(a, dtype=np.float64) + 0.20249999932944759 # may vary + >>> ((1-0.55)**2 + (0.1-0.55)**2)/2 + 0.2025 + + Specifying a where argument: + + >>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]]) + >>> np.var(a) + 6.833333333333333 # may vary + >>> np.var(a, where=[[True], [True], [False]]) + 4.0 + + Using the mean keyword to save computation time: + + >>> import numpy as np + >>> from timeit import timeit + >>> + >>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]]) + >>> mean = np.mean(a, axis=1, keepdims=True) + >>> + >>> g = globals() + >>> n = 10000 + >>> t1 = timeit("var = np.var(a, axis=1, mean=mean)", globals=g, number=n) + >>> t2 = timeit("var = np.var(a, axis=1)", globals=g, number=n) + >>> print(f'Percentage execution time saved {100*(t2-t1)/t2:.0f}%') + #doctest: +SKIP + Percentage execution time saved 32% + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if where is not np._NoValue: + kwargs['where'] = where + if mean is not np._NoValue: + kwargs['mean'] = mean + + if correction != np._NoValue: + if ddof != 0: + raise ValueError( + "ddof and correction can't be provided simultaneously." + ) + else: + ddof = correction + + if type(a) is not mu.ndarray: + try: + var = a.var + + except AttributeError: + pass + else: + return var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs) + + return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + **kwargs) diff --git a/python/numpy/_core/fromnumeric.pyi b/python/numpy/_core/fromnumeric.pyi new file mode 100644 index 000000000..f0f83093c --- /dev/null +++ b/python/numpy/_core/fromnumeric.pyi @@ -0,0 +1,1750 @@ +# ruff: noqa: ANN401 +from collections.abc import Sequence +from typing import ( + Any, + Literal, + Never, + Protocol, + SupportsIndex, + TypeAlias, + TypeVar, + overload, + type_check_only, +) + +from _typeshed import Incomplete +from typing_extensions import deprecated + +import numpy as np +from numpy import ( + _AnyShapeT, + _CastingKind, + _ModeKind, + _OrderACF, + _OrderKACF, + _PartitionKind, + _SortKind, + _SortSide, + complexfloating, + float16, + floating, + generic, + int64, + int_, + intp, + object_, + timedelta64, + uint64, +) +from numpy._globals import _NoValueType +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _AnyShape, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt, + _ArrayLikeInt_co, + _ArrayLikeObject_co, + _ArrayLikeUInt_co, + _BoolLike_co, + _ComplexLike_co, + _DTypeLike, + _IntLike_co, + _NestedSequence, + _NumberLike_co, + _ScalarLike_co, + _ShapeLike, +) + +__all__ = [ + "all", + "amax", + "amin", + "any", + "argmax", + "argmin", + "argpartition", + "argsort", + "around", + "choose", + "clip", + "compress", + "cumprod", + "cumsum", + "cumulative_prod", + "cumulative_sum", + "diagonal", + "mean", + "max", + "min", + "matrix_transpose", + "ndim", + "nonzero", + "partition", + "prod", + "ptp", + "put", + "ravel", + "repeat", + "reshape", + "resize", + "round", + "searchsorted", + "shape", + "size", + "sort", + "squeeze", + "std", + "sum", + "swapaxes", + "take", + "trace", + "transpose", + "var", +] + +_ScalarT = TypeVar("_ScalarT", bound=generic) +_NumberOrObjectT = TypeVar("_NumberOrObjectT", bound=np.number | np.object_) +_ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) +_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) +_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], covariant=True) +_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[np.integer | np.bool]) + +@type_check_only +class _SupportsShape(Protocol[_ShapeT_co]): + # NOTE: it matters that `self` is positional only + @property + def shape(self, /) -> _ShapeT_co: ... + +# a "sequence" that isn't a string, bytes, bytearray, or memoryview +_T = TypeVar("_T") +_PyArray: TypeAlias = list[_T] | tuple[_T, ...] +# `int` also covers `bool` +_PyScalar: TypeAlias = complex | bytes | str + +@overload +def take( + a: _ArrayLike[_ScalarT], + indices: _IntLike_co, + axis: None = ..., + out: None = ..., + mode: _ModeKind = ..., +) -> _ScalarT: ... +@overload +def take( + a: ArrayLike, + indices: _IntLike_co, + axis: SupportsIndex | None = ..., + out: None = ..., + mode: _ModeKind = ..., +) -> Any: ... +@overload +def take( + a: _ArrayLike[_ScalarT], + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = ..., + out: None = ..., + mode: _ModeKind = ..., +) -> NDArray[_ScalarT]: ... +@overload +def take( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = ..., + out: None = ..., + mode: _ModeKind = ..., +) -> NDArray[Any]: ... +@overload +def take( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None, + out: _ArrayT, + mode: _ModeKind = ..., +) -> _ArrayT: ... +@overload +def take( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = ..., + *, + out: _ArrayT, + mode: _ModeKind = ..., +) -> _ArrayT: ... + +@overload +def reshape( # shape: index + a: _ArrayLike[_ScalarT], + /, + shape: SupportsIndex, + order: _OrderACF = "C", + *, + copy: bool | None = None, +) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ... +@overload +def reshape( # shape: (int, ...) @ _AnyShapeT + a: _ArrayLike[_ScalarT], + /, + shape: _AnyShapeT, + order: _OrderACF = "C", + *, + copy: bool | None = None, +) -> np.ndarray[_AnyShapeT, np.dtype[_ScalarT]]: ... +@overload # shape: Sequence[index] +def reshape( + a: _ArrayLike[_ScalarT], + /, + shape: Sequence[SupportsIndex], + order: _OrderACF = "C", + *, + copy: bool | None = None, +) -> NDArray[_ScalarT]: ... +@overload # shape: index +def reshape( + a: ArrayLike, + /, + shape: SupportsIndex, + order: _OrderACF = "C", + *, + copy: bool | None = None, +) -> np.ndarray[tuple[int], np.dtype]: ... +@overload +def reshape( # shape: (int, ...) @ _AnyShapeT + a: ArrayLike, + /, + shape: _AnyShapeT, + order: _OrderACF = "C", + *, + copy: bool | None = None, +) -> np.ndarray[_AnyShapeT, np.dtype]: ... +@overload # shape: Sequence[index] +def reshape( + a: ArrayLike, + /, + shape: Sequence[SupportsIndex], + order: _OrderACF = "C", + *, + copy: bool | None = None, +) -> NDArray[Any]: ... +@overload +@deprecated( + "`newshape` keyword argument is deprecated, " + "use `shape=...` or pass shape positionally instead. " + "(deprecated in NumPy 2.1)", +) +def reshape( + a: ArrayLike, + /, + shape: None = None, + order: _OrderACF = "C", + *, + newshape: _ShapeLike, + copy: bool | None = None, +) -> NDArray[Any]: ... + +@overload +def choose( + a: _IntLike_co, + choices: ArrayLike, + out: None = ..., + mode: _ModeKind = ..., +) -> Any: ... +@overload +def choose( + a: _ArrayLikeInt_co, + choices: _ArrayLike[_ScalarT], + out: None = ..., + mode: _ModeKind = ..., +) -> NDArray[_ScalarT]: ... +@overload +def choose( + a: _ArrayLikeInt_co, + choices: ArrayLike, + out: None = ..., + mode: _ModeKind = ..., +) -> NDArray[Any]: ... +@overload +def choose( + a: _ArrayLikeInt_co, + choices: ArrayLike, + out: _ArrayT, + mode: _ModeKind = ..., +) -> _ArrayT: ... + +@overload +def repeat( + a: _ArrayLike[_ScalarT], + repeats: _ArrayLikeInt_co, + axis: None = None, +) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ... +@overload +def repeat( + a: _ArrayLike[_ScalarT], + repeats: _ArrayLikeInt_co, + axis: SupportsIndex, +) -> NDArray[_ScalarT]: ... +@overload +def repeat( + a: ArrayLike, + repeats: _ArrayLikeInt_co, + axis: None = None, +) -> np.ndarray[tuple[int], np.dtype[Any]]: ... +@overload +def repeat( + a: ArrayLike, + repeats: _ArrayLikeInt_co, + axis: SupportsIndex, +) -> NDArray[Any]: ... + +def put( + a: NDArray[Any], + ind: _ArrayLikeInt_co, + v: ArrayLike, + mode: _ModeKind = ..., +) -> None: ... + +@overload +def swapaxes( + a: _ArrayLike[_ScalarT], + axis1: SupportsIndex, + axis2: SupportsIndex, +) -> NDArray[_ScalarT]: ... +@overload +def swapaxes( + a: ArrayLike, + axis1: SupportsIndex, + axis2: SupportsIndex, +) -> NDArray[Any]: ... + +@overload +def transpose( + a: _ArrayLike[_ScalarT], + axes: _ShapeLike | None = ... +) -> NDArray[_ScalarT]: ... +@overload +def transpose( + a: ArrayLike, + axes: _ShapeLike | None = ... +) -> NDArray[Any]: ... + +@overload +def matrix_transpose(x: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... +@overload +def matrix_transpose(x: ArrayLike, /) -> NDArray[Any]: ... + +# +@overload +def partition( + a: _ArrayLike[_ScalarT], + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: None = None, +) -> NDArray[_ScalarT]: ... +@overload +def partition( + a: _ArrayLike[np.void], + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, +) -> NDArray[np.void]: ... +@overload +def partition( + a: ArrayLike, + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, +) -> NDArray[Any]: ... + +# +def argpartition( + a: ArrayLike, + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, +) -> NDArray[intp]: ... + +# +@overload +def sort( + a: _ArrayLike[_ScalarT], + axis: SupportsIndex | None = ..., + kind: _SortKind | None = ..., + order: str | Sequence[str] | None = ..., + *, + stable: bool | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def sort( + a: ArrayLike, + axis: SupportsIndex | None = ..., + kind: _SortKind | None = ..., + order: str | Sequence[str] | None = ..., + *, + stable: bool | None = ..., +) -> NDArray[Any]: ... + +def argsort( + a: ArrayLike, + axis: SupportsIndex | None = ..., + kind: _SortKind | None = ..., + order: str | Sequence[str] | None = ..., + *, + stable: bool | None = ..., +) -> NDArray[intp]: ... + +@overload +def argmax( + a: ArrayLike, + axis: None = ..., + out: None = ..., + *, + keepdims: Literal[False] = ..., +) -> intp: ... +@overload +def argmax( + a: ArrayLike, + axis: SupportsIndex | None = ..., + out: None = ..., + *, + keepdims: bool = ..., +) -> Any: ... +@overload +def argmax( + a: ArrayLike, + axis: SupportsIndex | None, + out: _BoolOrIntArrayT, + *, + keepdims: bool = ..., +) -> _BoolOrIntArrayT: ... +@overload +def argmax( + a: ArrayLike, + axis: SupportsIndex | None = ..., + *, + out: _BoolOrIntArrayT, + keepdims: bool = ..., +) -> _BoolOrIntArrayT: ... + +@overload +def argmin( + a: ArrayLike, + axis: None = ..., + out: None = ..., + *, + keepdims: Literal[False] = ..., +) -> intp: ... +@overload +def argmin( + a: ArrayLike, + axis: SupportsIndex | None = ..., + out: None = ..., + *, + keepdims: bool = ..., +) -> Any: ... +@overload +def argmin( + a: ArrayLike, + axis: SupportsIndex | None, + out: _BoolOrIntArrayT, + *, + keepdims: bool = ..., +) -> _BoolOrIntArrayT: ... +@overload +def argmin( + a: ArrayLike, + axis: SupportsIndex | None = ..., + *, + out: _BoolOrIntArrayT, + keepdims: bool = ..., +) -> _BoolOrIntArrayT: ... + +@overload +def searchsorted( + a: ArrayLike, + v: _ScalarLike_co, + side: _SortSide = ..., + sorter: _ArrayLikeInt_co | None = ..., # 1D int array +) -> intp: ... +@overload +def searchsorted( + a: ArrayLike, + v: ArrayLike, + side: _SortSide = ..., + sorter: _ArrayLikeInt_co | None = ..., # 1D int array +) -> NDArray[intp]: ... + +# +@overload +def resize(a: _ArrayLike[_ScalarT], new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ... +@overload +def resize(a: _ArrayLike[_ScalarT], new_shape: _AnyShapeT) -> np.ndarray[_AnyShapeT, np.dtype[_ScalarT]]: ... +@overload +def resize(a: _ArrayLike[_ScalarT], new_shape: _ShapeLike) -> NDArray[_ScalarT]: ... +@overload +def resize(a: ArrayLike, new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype]: ... +@overload +def resize(a: ArrayLike, new_shape: _AnyShapeT) -> np.ndarray[_AnyShapeT, np.dtype]: ... +@overload +def resize(a: ArrayLike, new_shape: _ShapeLike) -> NDArray[Any]: ... + +@overload +def squeeze( + a: _ScalarT, + axis: _ShapeLike | None = ..., +) -> _ScalarT: ... +@overload +def squeeze( + a: _ArrayLike[_ScalarT], + axis: _ShapeLike | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def squeeze( + a: ArrayLike, + axis: _ShapeLike | None = ..., +) -> NDArray[Any]: ... + +@overload +def diagonal( + a: _ArrayLike[_ScalarT], + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., # >= 2D array +) -> NDArray[_ScalarT]: ... +@overload +def diagonal( + a: ArrayLike, + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., # >= 2D array +) -> NDArray[Any]: ... + +@overload +def trace( + a: ArrayLike, # >= 2D array + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., +) -> Any: ... +@overload +def trace( + a: ArrayLike, # >= 2D array + offset: SupportsIndex, + axis1: SupportsIndex, + axis2: SupportsIndex, + dtype: DTypeLike, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def trace( + a: ArrayLike, # >= 2D array + offset: SupportsIndex = ..., + axis1: SupportsIndex = ..., + axis2: SupportsIndex = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayT, +) -> _ArrayT: ... + +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] + +@overload +def ravel(a: _ArrayLike[_ScalarT], order: _OrderKACF = "C") -> _Array1D[_ScalarT]: ... +@overload +def ravel(a: bytes | _NestedSequence[bytes], order: _OrderKACF = "C") -> _Array1D[np.bytes_]: ... +@overload +def ravel(a: str | _NestedSequence[str], order: _OrderKACF = "C") -> _Array1D[np.str_]: ... +@overload +def ravel(a: bool | _NestedSequence[bool], order: _OrderKACF = "C") -> _Array1D[np.bool]: ... +@overload +def ravel(a: int | _NestedSequence[int], order: _OrderKACF = "C") -> _Array1D[np.int_ | np.bool]: ... +@overload +def ravel(a: float | _NestedSequence[float], order: _OrderKACF = "C") -> _Array1D[np.float64 | np.int_ | np.bool]: ... +@overload +def ravel( + a: complex | _NestedSequence[complex], + order: _OrderKACF = "C", +) -> _Array1D[np.complex128 | np.float64 | np.int_ | np.bool]: ... +@overload +def ravel(a: ArrayLike, order: _OrderKACF = "C") -> np.ndarray[tuple[int], np.dtype]: ... + +def nonzero(a: _ArrayLike[Any]) -> tuple[NDArray[intp], ...]: ... + +# this prevents `Any` from being returned with Pyright +@overload +def shape(a: _SupportsShape[Never]) -> _AnyShape: ... +@overload +def shape(a: _SupportsShape[_ShapeT]) -> _ShapeT: ... +@overload +def shape(a: _PyScalar) -> tuple[()]: ... +# `collections.abc.Sequence` can't be used hesre, since `bytes` and `str` are +# subtypes of it, which would make the return types incompatible. +@overload +def shape(a: _PyArray[_PyScalar]) -> tuple[int]: ... +@overload +def shape(a: _PyArray[_PyArray[_PyScalar]]) -> tuple[int, int]: ... +# this overload will be skipped by typecheckers that don't support PEP 688 +@overload +def shape(a: memoryview | bytearray) -> tuple[int]: ... +@overload +def shape(a: ArrayLike) -> _AnyShape: ... + +@overload +def compress( + condition: _ArrayLikeBool_co, # 1D bool array + a: _ArrayLike[_ScalarT], + axis: SupportsIndex | None = ..., + out: None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def compress( + condition: _ArrayLikeBool_co, # 1D bool array + a: ArrayLike, + axis: SupportsIndex | None = ..., + out: None = ..., +) -> NDArray[Any]: ... +@overload +def compress( + condition: _ArrayLikeBool_co, # 1D bool array + a: ArrayLike, + axis: SupportsIndex | None, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def compress( + condition: _ArrayLikeBool_co, # 1D bool array + a: ArrayLike, + axis: SupportsIndex | None = ..., + *, + out: _ArrayT, +) -> _ArrayT: ... + +@overload +def clip( + a: _ScalarT, + a_min: ArrayLike | None, + a_max: ArrayLike | None, + out: None = ..., + *, + min: ArrayLike | None = ..., + max: ArrayLike | None = ..., + dtype: None = ..., + where: _ArrayLikeBool_co | None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + signature: str | tuple[str | None, ...] = ..., + casting: _CastingKind = ..., +) -> _ScalarT: ... +@overload +def clip( + a: _ScalarLike_co, + a_min: ArrayLike | None, + a_max: ArrayLike | None, + out: None = ..., + *, + min: ArrayLike | None = ..., + max: ArrayLike | None = ..., + dtype: None = ..., + where: _ArrayLikeBool_co | None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + signature: str | tuple[str | None, ...] = ..., + casting: _CastingKind = ..., +) -> Any: ... +@overload +def clip( + a: _ArrayLike[_ScalarT], + a_min: ArrayLike | None, + a_max: ArrayLike | None, + out: None = ..., + *, + min: ArrayLike | None = ..., + max: ArrayLike | None = ..., + dtype: None = ..., + where: _ArrayLikeBool_co | None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + signature: str | tuple[str | None, ...] = ..., + casting: _CastingKind = ..., +) -> NDArray[_ScalarT]: ... +@overload +def clip( + a: ArrayLike, + a_min: ArrayLike | None, + a_max: ArrayLike | None, + out: None = ..., + *, + min: ArrayLike | None = ..., + max: ArrayLike | None = ..., + dtype: None = ..., + where: _ArrayLikeBool_co | None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + signature: str | tuple[str | None, ...] = ..., + casting: _CastingKind = ..., +) -> NDArray[Any]: ... +@overload +def clip( + a: ArrayLike, + a_min: ArrayLike | None, + a_max: ArrayLike | None, + out: _ArrayT, + *, + min: ArrayLike | None = ..., + max: ArrayLike | None = ..., + dtype: DTypeLike = ..., + where: _ArrayLikeBool_co | None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + signature: str | tuple[str | None, ...] = ..., + casting: _CastingKind = ..., +) -> _ArrayT: ... +@overload +def clip( + a: ArrayLike, + a_min: ArrayLike | None, + a_max: ArrayLike | None, + out: ArrayLike = ..., + *, + min: ArrayLike | None = ..., + max: ArrayLike | None = ..., + dtype: DTypeLike, + where: _ArrayLikeBool_co | None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + signature: str | tuple[str | None, ...] = ..., + casting: _CastingKind = ..., +) -> Any: ... + +@overload +def sum( + a: _ArrayLike[_ScalarT], + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ScalarT: ... +@overload +def sum( + a: _ArrayLike[_ScalarT], + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ScalarT | NDArray[_ScalarT]: ... +@overload +def sum( + a: ArrayLike, + axis: None, + dtype: _DTypeLike[_ScalarT], + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ScalarT: ... +@overload +def sum( + a: ArrayLike, + axis: None = ..., + *, + dtype: _DTypeLike[_ScalarT], + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ScalarT: ... +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: _DTypeLike[_ScalarT], + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ScalarT | NDArray[_ScalarT]: ... +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None = ..., + *, + dtype: _DTypeLike[_ScalarT], + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ScalarT | NDArray[_ScalarT]: ... +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None = ..., + dtype: DTypeLike = ..., + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> Any: ... +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike, + out: _ArrayT, + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ArrayT: ... +@overload +def sum( + a: ArrayLike, + axis: _ShapeLike | None = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayT, + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ArrayT: ... + +# keep in sync with `any` +@overload +def all( + a: ArrayLike | None, + axis: None = None, + out: None = None, + keepdims: Literal[False, 0] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.bool: ... +@overload +def all( + a: ArrayLike | None, + axis: int | tuple[int, ...] | None = None, + out: None = None, + keepdims: _BoolLike_co | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Incomplete: ... +@overload +def all( + a: ArrayLike | None, + axis: int | tuple[int, ...] | None, + out: _ArrayT, + keepdims: _BoolLike_co | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def all( + a: ArrayLike | None, + axis: int | tuple[int, ...] | None = None, + *, + out: _ArrayT, + keepdims: _BoolLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... + +# keep in sync with `all` +@overload +def any( + a: ArrayLike | None, + axis: None = None, + out: None = None, + keepdims: Literal[False, 0] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> np.bool: ... +@overload +def any( + a: ArrayLike | None, + axis: int | tuple[int, ...] | None = None, + out: None = None, + keepdims: _BoolLike_co | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Incomplete: ... +@overload +def any( + a: ArrayLike | None, + axis: int | tuple[int, ...] | None, + out: _ArrayT, + keepdims: _BoolLike_co | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def any( + a: ArrayLike | None, + axis: int | tuple[int, ...] | None = None, + *, + out: _ArrayT, + keepdims: _BoolLike_co | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... + +# +@overload +def cumsum( + a: _ArrayLike[_ScalarT], + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def cumsum( + a: ArrayLike, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., +) -> NDArray[Any]: ... +@overload +def cumsum( + a: ArrayLike, + axis: SupportsIndex | None, + dtype: _DTypeLike[_ScalarT], + out: None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def cumsum( + a: ArrayLike, + axis: SupportsIndex | None = ..., + *, + dtype: _DTypeLike[_ScalarT], + out: None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def cumsum( + a: ArrayLike, + axis: SupportsIndex | None = ..., + dtype: DTypeLike = ..., + out: None = ..., +) -> NDArray[Any]: ... +@overload +def cumsum( + a: ArrayLike, + axis: SupportsIndex | None, + dtype: DTypeLike, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def cumsum( + a: ArrayLike, + axis: SupportsIndex | None = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayT, +) -> _ArrayT: ... + +@overload +def cumulative_sum( + x: _ArrayLike[_ScalarT], + /, + *, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[_ScalarT]: ... +@overload +def cumulative_sum( + x: ArrayLike, + /, + *, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[Any]: ... +@overload +def cumulative_sum( + x: ArrayLike, + /, + *, + axis: SupportsIndex | None = ..., + dtype: _DTypeLike[_ScalarT], + out: None = ..., + include_initial: bool = ..., +) -> NDArray[_ScalarT]: ... +@overload +def cumulative_sum( + x: ArrayLike, + /, + *, + axis: SupportsIndex | None = ..., + dtype: DTypeLike = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[Any]: ... +@overload +def cumulative_sum( + x: ArrayLike, + /, + *, + axis: SupportsIndex | None = ..., + dtype: DTypeLike = ..., + out: _ArrayT, + include_initial: bool = ..., +) -> _ArrayT: ... + +@overload +def ptp( + a: _ArrayLike[_ScalarT], + axis: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., +) -> _ScalarT: ... +@overload +def ptp( + a: ArrayLike, + axis: _ShapeLike | None = ..., + out: None = ..., + keepdims: bool = ..., +) -> Any: ... +@overload +def ptp( + a: ArrayLike, + axis: _ShapeLike | None, + out: _ArrayT, + keepdims: bool = ..., +) -> _ArrayT: ... +@overload +def ptp( + a: ArrayLike, + axis: _ShapeLike | None = ..., + *, + out: _ArrayT, + keepdims: bool = ..., +) -> _ArrayT: ... + +@overload +def amax( + a: _ArrayLike[_ScalarT], + axis: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ScalarT: ... +@overload +def amax( + a: ArrayLike, + axis: _ShapeLike | None = ..., + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> Any: ... +@overload +def amax( + a: ArrayLike, + axis: _ShapeLike | None, + out: _ArrayT, + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ArrayT: ... +@overload +def amax( + a: ArrayLike, + axis: _ShapeLike | None = ..., + *, + out: _ArrayT, + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ArrayT: ... + +@overload +def amin( + a: _ArrayLike[_ScalarT], + axis: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ScalarT: ... +@overload +def amin( + a: ArrayLike, + axis: _ShapeLike | None = ..., + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> Any: ... +@overload +def amin( + a: ArrayLike, + axis: _ShapeLike | None, + out: _ArrayT, + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ArrayT: ... +@overload +def amin( + a: ArrayLike, + axis: _ShapeLike | None = ..., + *, + out: _ArrayT, + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ArrayT: ... + +# TODO: `np.prod()``: For object arrays `initial` does not necessarily +# have to be a numerical scalar. +# The only requirement is that it is compatible +# with the `.__mul__()` method(s) of the passed array's elements. + +# Note that the same situation holds for all wrappers around +# `np.ufunc.reduce`, e.g. `np.sum()` (`.__add__()`). +@overload +def prod( + a: _ArrayLikeBool_co, + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> int_: ... +@overload +def prod( + a: _ArrayLikeUInt_co, + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> uint64: ... +@overload +def prod( + a: _ArrayLikeInt_co, + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> int64: ... +@overload +def prod( + a: _ArrayLikeFloat_co, + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> floating: ... +@overload +def prod( + a: _ArrayLikeComplex_co, + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> complexfloating: ... +@overload +def prod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: None = ..., + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> Any: ... +@overload +def prod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None, + dtype: _DTypeLike[_ScalarT], + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ScalarT: ... +@overload +def prod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None = ..., + *, + dtype: _DTypeLike[_ScalarT], + out: None = ..., + keepdims: Literal[False] = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ScalarT: ... +@overload +def prod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: DTypeLike | None = ..., + out: None = ..., + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> Any: ... +@overload +def prod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None, + dtype: DTypeLike | None, + out: _ArrayT, + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ArrayT: ... +@overload +def prod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: DTypeLike | None = ..., + *, + out: _ArrayT, + keepdims: bool = ..., + initial: _NumberLike_co = ..., + where: _ArrayLikeBool_co = ..., +) -> _ArrayT: ... + +@overload +def cumprod( + a: _ArrayLikeBool_co, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., +) -> NDArray[int_]: ... +@overload +def cumprod( + a: _ArrayLikeUInt_co, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., +) -> NDArray[uint64]: ... +@overload +def cumprod( + a: _ArrayLikeInt_co, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., +) -> NDArray[int64]: ... +@overload +def cumprod( + a: _ArrayLikeFloat_co, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., +) -> NDArray[floating]: ... +@overload +def cumprod( + a: _ArrayLikeComplex_co, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., +) -> NDArray[complexfloating]: ... +@overload +def cumprod( + a: _ArrayLikeObject_co, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., +) -> NDArray[object_]: ... +@overload +def cumprod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: SupportsIndex | None, + dtype: _DTypeLike[_ScalarT], + out: None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def cumprod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: SupportsIndex | None = ..., + *, + dtype: _DTypeLike[_ScalarT], + out: None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def cumprod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: SupportsIndex | None = ..., + dtype: DTypeLike = ..., + out: None = ..., +) -> NDArray[Any]: ... +@overload +def cumprod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: SupportsIndex | None, + dtype: DTypeLike, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def cumprod( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: SupportsIndex | None = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayT, +) -> _ArrayT: ... + +@overload +def cumulative_prod( + x: _ArrayLikeBool_co, + /, + *, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[int_]: ... +@overload +def cumulative_prod( + x: _ArrayLikeUInt_co, + /, + *, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[uint64]: ... +@overload +def cumulative_prod( + x: _ArrayLikeInt_co, + /, + *, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[int64]: ... +@overload +def cumulative_prod( + x: _ArrayLikeFloat_co, + /, + *, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[floating]: ... +@overload +def cumulative_prod( + x: _ArrayLikeComplex_co, + /, + *, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[complexfloating]: ... +@overload +def cumulative_prod( + x: _ArrayLikeObject_co, + /, + *, + axis: SupportsIndex | None = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[object_]: ... +@overload +def cumulative_prod( + x: _ArrayLikeComplex_co | _ArrayLikeObject_co, + /, + *, + axis: SupportsIndex | None = ..., + dtype: _DTypeLike[_ScalarT], + out: None = ..., + include_initial: bool = ..., +) -> NDArray[_ScalarT]: ... +@overload +def cumulative_prod( + x: _ArrayLikeComplex_co | _ArrayLikeObject_co, + /, + *, + axis: SupportsIndex | None = ..., + dtype: DTypeLike = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[Any]: ... +@overload +def cumulative_prod( + x: _ArrayLikeComplex_co | _ArrayLikeObject_co, + /, + *, + axis: SupportsIndex | None = ..., + dtype: DTypeLike = ..., + out: _ArrayT, + include_initial: bool = ..., +) -> _ArrayT: ... + +def ndim(a: ArrayLike) -> int: ... + +def size(a: ArrayLike, axis: int | None = ...) -> int: ... + +@overload +def around( + a: _BoolLike_co, + decimals: SupportsIndex = ..., + out: None = ..., +) -> float16: ... +@overload +def around( + a: _NumberOrObjectT, + decimals: SupportsIndex = ..., + out: None = ..., +) -> _NumberOrObjectT: ... +@overload +def around( + a: _ComplexLike_co | object_, + decimals: SupportsIndex = ..., + out: None = ..., +) -> Any: ... +@overload +def around( + a: _ArrayLikeBool_co, + decimals: SupportsIndex = ..., + out: None = ..., +) -> NDArray[float16]: ... +@overload +def around( + a: _ArrayLike[_NumberOrObjectT], + decimals: SupportsIndex = ..., + out: None = ..., +) -> NDArray[_NumberOrObjectT]: ... +@overload +def around( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + decimals: SupportsIndex = ..., + out: None = ..., +) -> NDArray[Any]: ... +@overload +def around( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + decimals: SupportsIndex, + out: _ArrayT, +) -> _ArrayT: ... +@overload +def around( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + decimals: SupportsIndex = ..., + *, + out: _ArrayT, +) -> _ArrayT: ... + +@overload +def mean( + a: _ArrayLikeFloat_co, + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> floating: ... +@overload +def mean( + a: _ArrayLikeComplex_co, + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> complexfloating: ... +@overload +def mean( + a: _ArrayLike[np.timedelta64], + axis: None = ..., + dtype: None = ..., + out: None = ..., + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> timedelta64: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None, + dtype: DTypeLike, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: DTypeLike | None = ..., + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None, + dtype: _DTypeLike[_ScalarT], + out: None = ..., + keepdims: Literal[False] | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None = ..., + *, + dtype: _DTypeLike[_ScalarT], + out: None = ..., + keepdims: Literal[False] | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None, + dtype: _DTypeLike[_ScalarT], + out: None, + keepdims: Literal[True, 1], + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> NDArray[_ScalarT]: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None, + dtype: _DTypeLike[_ScalarT], + out: None = ..., + *, + keepdims: bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT | NDArray[_ScalarT]: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + *, + dtype: _DTypeLike[_ScalarT], + out: None = ..., + keepdims: bool | _NoValueType = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> _ScalarT | NDArray[_ScalarT]: ... +@overload +def mean( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: DTypeLike | None = ..., + out: None = ..., + keepdims: bool | _NoValueType = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., +) -> Incomplete: ... + +@overload +def std( + a: _ArrayLikeComplex_co, + axis: None = ..., + dtype: None = ..., + out: None = ..., + ddof: float = ..., + keepdims: Literal[False] = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> floating: ... +@overload +def std( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: None = ..., + out: None = ..., + ddof: float = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> Any: ... +@overload +def std( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None, + dtype: _DTypeLike[_ScalarT], + out: None = ..., + ddof: float = ..., + keepdims: Literal[False] = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def std( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None = ..., + *, + dtype: _DTypeLike[_ScalarT], + out: None = ..., + ddof: float = ..., + keepdims: Literal[False] = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def std( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: DTypeLike = ..., + out: None = ..., + ddof: float = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> Any: ... +@overload +def std( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None, + dtype: DTypeLike, + out: _ArrayT, + ddof: float = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def std( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayT, + ddof: float = ..., + keepdims: bool = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ArrayT: ... + +@overload +def var( + a: _ArrayLikeComplex_co, + axis: None = ..., + dtype: None = ..., + out: None = ..., + ddof: float = ..., + keepdims: Literal[False] = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> floating: ... +@overload +def var( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: None = ..., + out: None = ..., + ddof: float = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> Any: ... +@overload +def var( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None, + dtype: _DTypeLike[_ScalarT], + out: None = ..., + ddof: float = ..., + keepdims: Literal[False] = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def var( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: None = ..., + *, + dtype: _DTypeLike[_ScalarT], + out: None = ..., + ddof: float = ..., + keepdims: Literal[False] = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def var( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: DTypeLike = ..., + out: None = ..., + ddof: float = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> Any: ... +@overload +def var( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None, + dtype: DTypeLike, + out: _ArrayT, + ddof: float = ..., + keepdims: bool = ..., + *, + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def var( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayT, + ddof: float = ..., + keepdims: bool = ..., + where: _ArrayLikeBool_co | _NoValueType = ..., + mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ..., + correction: float | _NoValueType = ..., +) -> _ArrayT: ... + +max = amax +min = amin +round = around diff --git a/python/numpy/_core/function_base.py b/python/numpy/_core/function_base.py new file mode 100644 index 000000000..12ab2a7ef --- /dev/null +++ b/python/numpy/_core/function_base.py @@ -0,0 +1,545 @@ +import functools +import operator +import types +import warnings + +import numpy as np +from numpy._core import overrides +from numpy._core._multiarray_umath import _array_converter +from numpy._core.multiarray import add_docstring + +from . import numeric as _nx +from .numeric import asanyarray, nan, ndim, result_type + +__all__ = ['logspace', 'linspace', 'geomspace'] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +def _linspace_dispatcher(start, stop, num=None, endpoint=None, retstep=None, + dtype=None, axis=None, *, device=None): + return (start, stop) + + +@array_function_dispatch(_linspace_dispatcher) +def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, + axis=0, *, device=None): + """ + Return evenly spaced numbers over a specified interval. + + Returns `num` evenly spaced samples, calculated over the + interval [`start`, `stop`]. + + The endpoint of the interval can optionally be excluded. + + .. versionchanged:: 1.20.0 + Values are rounded towards ``-inf`` instead of ``0`` when an + integer ``dtype`` is specified. The old behavior can + still be obtained with ``np.linspace(start, stop, num).astype(int)`` + + Parameters + ---------- + start : array_like + The starting value of the sequence. + stop : array_like + The end value of the sequence, unless `endpoint` is set to False. + In that case, the sequence consists of all but the last of ``num + 1`` + evenly spaced samples, so that `stop` is excluded. Note that the step + size changes when `endpoint` is False. + num : int, optional + Number of samples to generate. Default is 50. Must be non-negative. + endpoint : bool, optional + If True, `stop` is the last sample. Otherwise, it is not included. + Default is True. + retstep : bool, optional + If True, return (`samples`, `step`), where `step` is the spacing + between samples. + dtype : dtype, optional + The type of the output array. If `dtype` is not given, the data type + is inferred from `start` and `stop`. The inferred dtype will never be + an integer; `float` is chosen even if the arguments would produce an + array of integers. + axis : int, optional + The axis in the result to store the samples. Relevant only if start + or stop are array-like. By default (0), the samples will be along a + new axis inserted at the beginning. Use -1 to get an axis at the end. + device : str, optional + The device on which to place the created array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + + Returns + ------- + samples : ndarray + There are `num` equally spaced samples in the closed interval + ``[start, stop]`` or the half-open interval ``[start, stop)`` + (depending on whether `endpoint` is True or False). + step : float, optional + Only returned if `retstep` is True + + Size of spacing between samples. + + + See Also + -------- + arange : Similar to `linspace`, but uses a step size (instead of the + number of samples). + geomspace : Similar to `linspace`, but with numbers spaced evenly on a log + scale (a geometric progression). + logspace : Similar to `geomspace`, but with the end points specified as + logarithms. + :ref:`how-to-partition` + + Examples + -------- + >>> import numpy as np + >>> np.linspace(2.0, 3.0, num=5) + array([2. , 2.25, 2.5 , 2.75, 3. ]) + >>> np.linspace(2.0, 3.0, num=5, endpoint=False) + array([2. , 2.2, 2.4, 2.6, 2.8]) + >>> np.linspace(2.0, 3.0, num=5, retstep=True) + (array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25) + + Graphical illustration: + + >>> import matplotlib.pyplot as plt + >>> N = 8 + >>> y = np.zeros(N) + >>> x1 = np.linspace(0, 10, N, endpoint=True) + >>> x2 = np.linspace(0, 10, N, endpoint=False) + >>> plt.plot(x1, y, 'o') + [] + >>> plt.plot(x2, y + 0.5, 'o') + [] + >>> plt.ylim([-0.5, 1]) + (-0.5, 1) + >>> plt.show() + + """ + num = operator.index(num) + if num < 0: + raise ValueError( + f"Number of samples, {num}, must be non-negative." + ) + div = (num - 1) if endpoint else num + + conv = _array_converter(start, stop) + start, stop = conv.as_arrays() + dt = conv.result_type(ensure_inexact=True) + + if dtype is None: + dtype = dt + integer_dtype = False + else: + integer_dtype = _nx.issubdtype(dtype, _nx.integer) + + # Use `dtype=type(dt)` to enforce a floating point evaluation: + delta = np.subtract(stop, start, dtype=type(dt)) + y = _nx.arange( + 0, num, dtype=dt, device=device + ).reshape((-1,) + (1,) * ndim(delta)) + + # In-place multiplication y *= delta/div is faster, but prevents + # the multiplicant from overriding what class is produced, and thus + # prevents, e.g. use of Quantities, see gh-7142. Hence, we multiply + # in place only for standard scalar types. + if div > 0: + _mult_inplace = _nx.isscalar(delta) + step = delta / div + any_step_zero = ( + step == 0 if _mult_inplace else _nx.asanyarray(step == 0).any()) + if any_step_zero: + # Special handling for denormal numbers, gh-5437 + y /= div + if _mult_inplace: + y *= delta + else: + y = y * delta + elif _mult_inplace: + y *= step + else: + y = y * step + else: + # sequences with 0 items or 1 item with endpoint=True (i.e. div <= 0) + # have an undefined step + step = nan + # Multiply with delta to allow possible override of output class. + y = y * delta + + y += start + + if endpoint and num > 1: + y[-1, ...] = stop + + if axis != 0: + y = _nx.moveaxis(y, 0, axis) + + if integer_dtype: + _nx.floor(y, out=y) + + y = conv.wrap(y.astype(dtype, copy=False)) + if retstep: + return y, step + else: + return y + + +def _logspace_dispatcher(start, stop, num=None, endpoint=None, base=None, + dtype=None, axis=None): + return (start, stop, base) + + +@array_function_dispatch(_logspace_dispatcher) +def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, + axis=0): + """ + Return numbers spaced evenly on a log scale. + + In linear space, the sequence starts at ``base ** start`` + (`base` to the power of `start`) and ends with ``base ** stop`` + (see `endpoint` below). + + .. versionchanged:: 1.25.0 + Non-scalar 'base` is now supported + + Parameters + ---------- + start : array_like + ``base ** start`` is the starting value of the sequence. + stop : array_like + ``base ** stop`` is the final value of the sequence, unless `endpoint` + is False. In that case, ``num + 1`` values are spaced over the + interval in log-space, of which all but the last (a sequence of + length `num`) are returned. + num : integer, optional + Number of samples to generate. Default is 50. + endpoint : boolean, optional + If true, `stop` is the last sample. Otherwise, it is not included. + Default is True. + base : array_like, optional + The base of the log space. The step size between the elements in + ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform. + Default is 10.0. + dtype : dtype + The type of the output array. If `dtype` is not given, the data type + is inferred from `start` and `stop`. The inferred type will never be + an integer; `float` is chosen even if the arguments would produce an + array of integers. + axis : int, optional + The axis in the result to store the samples. Relevant only if start, + stop, or base are array-like. By default (0), the samples will be + along a new axis inserted at the beginning. Use -1 to get an axis at + the end. + + Returns + ------- + samples : ndarray + `num` samples, equally spaced on a log scale. + + See Also + -------- + arange : Similar to linspace, with the step size specified instead of the + number of samples. Note that, when used with a float endpoint, the + endpoint may or may not be included. + linspace : Similar to logspace, but with the samples uniformly distributed + in linear space, instead of log space. + geomspace : Similar to logspace, but with endpoints specified directly. + :ref:`how-to-partition` + + Notes + ----- + If base is a scalar, logspace is equivalent to the code + + >>> y = np.linspace(start, stop, num=num, endpoint=endpoint) + ... # doctest: +SKIP + >>> power(base, y).astype(dtype) + ... # doctest: +SKIP + + Examples + -------- + >>> import numpy as np + >>> np.logspace(2.0, 3.0, num=4) + array([ 100. , 215.443469 , 464.15888336, 1000. ]) + >>> np.logspace(2.0, 3.0, num=4, endpoint=False) + array([100. , 177.827941 , 316.22776602, 562.34132519]) + >>> np.logspace(2.0, 3.0, num=4, base=2.0) + array([4. , 5.0396842 , 6.34960421, 8. ]) + >>> np.logspace(2.0, 3.0, num=4, base=[2.0, 3.0], axis=-1) + array([[ 4. , 5.0396842 , 6.34960421, 8. ], + [ 9. , 12.98024613, 18.72075441, 27. ]]) + + Graphical illustration: + + >>> import matplotlib.pyplot as plt + >>> N = 10 + >>> x1 = np.logspace(0.1, 1, N, endpoint=True) + >>> x2 = np.logspace(0.1, 1, N, endpoint=False) + >>> y = np.zeros(N) + >>> plt.plot(x1, y, 'o') + [] + >>> plt.plot(x2, y + 0.5, 'o') + [] + >>> plt.ylim([-0.5, 1]) + (-0.5, 1) + >>> plt.show() + + """ + if not isinstance(base, (float, int)) and np.ndim(base): + # If base is non-scalar, broadcast it with the others, since it + # may influence how axis is interpreted. + ndmax = np.broadcast(start, stop, base).ndim + start, stop, base = ( + np.array(a, copy=None, subok=True, ndmin=ndmax) + for a in (start, stop, base) + ) + base = np.expand_dims(base, axis=axis) + y = linspace(start, stop, num=num, endpoint=endpoint, axis=axis) + if dtype is None: + return _nx.power(base, y) + return _nx.power(base, y).astype(dtype, copy=False) + + +def _geomspace_dispatcher(start, stop, num=None, endpoint=None, dtype=None, + axis=None): + return (start, stop) + + +@array_function_dispatch(_geomspace_dispatcher) +def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): + """ + Return numbers spaced evenly on a log scale (a geometric progression). + + This is similar to `logspace`, but with endpoints specified directly. + Each output sample is a constant multiple of the previous. + + Parameters + ---------- + start : array_like + The starting value of the sequence. + stop : array_like + The final value of the sequence, unless `endpoint` is False. + In that case, ``num + 1`` values are spaced over the + interval in log-space, of which all but the last (a sequence of + length `num`) are returned. + num : integer, optional + Number of samples to generate. Default is 50. + endpoint : boolean, optional + If true, `stop` is the last sample. Otherwise, it is not included. + Default is True. + dtype : dtype + The type of the output array. If `dtype` is not given, the data type + is inferred from `start` and `stop`. The inferred dtype will never be + an integer; `float` is chosen even if the arguments would produce an + array of integers. + axis : int, optional + The axis in the result to store the samples. Relevant only if start + or stop are array-like. By default (0), the samples will be along a + new axis inserted at the beginning. Use -1 to get an axis at the end. + + Returns + ------- + samples : ndarray + `num` samples, equally spaced on a log scale. + + See Also + -------- + logspace : Similar to geomspace, but with endpoints specified using log + and base. + linspace : Similar to geomspace, but with arithmetic instead of geometric + progression. + arange : Similar to linspace, with the step size specified instead of the + number of samples. + :ref:`how-to-partition` + + Notes + ----- + If the inputs or dtype are complex, the output will follow a logarithmic + spiral in the complex plane. (There are an infinite number of spirals + passing through two points; the output will follow the shortest such path.) + + Examples + -------- + >>> import numpy as np + >>> np.geomspace(1, 1000, num=4) + array([ 1., 10., 100., 1000.]) + >>> np.geomspace(1, 1000, num=3, endpoint=False) + array([ 1., 10., 100.]) + >>> np.geomspace(1, 1000, num=4, endpoint=False) + array([ 1. , 5.62341325, 31.6227766 , 177.827941 ]) + >>> np.geomspace(1, 256, num=9) + array([ 1., 2., 4., 8., 16., 32., 64., 128., 256.]) + + Note that the above may not produce exact integers: + + >>> np.geomspace(1, 256, num=9, dtype=int) + array([ 1, 2, 4, 7, 16, 32, 63, 127, 256]) + >>> np.around(np.geomspace(1, 256, num=9)).astype(int) + array([ 1, 2, 4, 8, 16, 32, 64, 128, 256]) + + Negative, decreasing, and complex inputs are allowed: + + >>> np.geomspace(1000, 1, num=4) + array([1000., 100., 10., 1.]) + >>> np.geomspace(-1000, -1, num=4) + array([-1000., -100., -10., -1.]) + >>> np.geomspace(1j, 1000j, num=4) # Straight line + array([0. +1.j, 0. +10.j, 0. +100.j, 0.+1000.j]) + >>> np.geomspace(-1+0j, 1+0j, num=5) # Circle + array([-1.00000000e+00+1.22464680e-16j, -7.07106781e-01+7.07106781e-01j, + 6.12323400e-17+1.00000000e+00j, 7.07106781e-01+7.07106781e-01j, + 1.00000000e+00+0.00000000e+00j]) + + Graphical illustration of `endpoint` parameter: + + >>> import matplotlib.pyplot as plt + >>> N = 10 + >>> y = np.zeros(N) + >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=True), y + 1, 'o') + [] + >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=False), y + 2, 'o') + [] + >>> plt.axis([0.5, 2000, 0, 3]) + [0.5, 2000, 0, 3] + >>> plt.grid(True, color='0.7', linestyle='-', which='both', axis='both') + >>> plt.show() + + """ + start = asanyarray(start) + stop = asanyarray(stop) + if _nx.any(start == 0) or _nx.any(stop == 0): + raise ValueError('Geometric sequence cannot include zero') + + dt = result_type(start, stop, float(num), _nx.zeros((), dtype)) + if dtype is None: + dtype = dt + else: + # complex to dtype('complex128'), for instance + dtype = _nx.dtype(dtype) + + # Promote both arguments to the same dtype in case, for instance, one is + # complex and another is negative and log would produce NaN otherwise. + # Copy since we may change things in-place further down. + start = start.astype(dt, copy=True) + stop = stop.astype(dt, copy=True) + + # Allow negative real values and ensure a consistent result for complex + # (including avoiding negligible real or imaginary parts in output) by + # rotating start to positive real, calculating, then undoing rotation. + out_sign = _nx.sign(start) + start /= out_sign + stop = stop / out_sign + + log_start = _nx.log10(start) + log_stop = _nx.log10(stop) + result = logspace(log_start, log_stop, num=num, + endpoint=endpoint, base=10.0, dtype=dt) + + # Make sure the endpoints match the start and stop arguments. This is + # necessary because np.exp(np.log(x)) is not necessarily equal to x. + if num > 0: + result[0] = start + if num > 1 and endpoint: + result[-1] = stop + + result *= out_sign + + if axis != 0: + result = _nx.moveaxis(result, 0, axis) + + return result.astype(dtype, copy=False) + + +def _needs_add_docstring(obj): + """ + Returns true if the only way to set the docstring of `obj` from python is + via add_docstring. + + This function errs on the side of being overly conservative. + """ + Py_TPFLAGS_HEAPTYPE = 1 << 9 + + if isinstance(obj, (types.FunctionType, types.MethodType, property)): + return False + + if isinstance(obj, type) and obj.__flags__ & Py_TPFLAGS_HEAPTYPE: + return False + + return True + + +def _add_docstring(obj, doc, warn_on_python): + if warn_on_python and not _needs_add_docstring(obj): + warnings.warn( + f"add_newdoc was used on a pure-python object {obj}. " + "Prefer to attach it directly to the source.", + UserWarning, + stacklevel=3) + try: + add_docstring(obj, doc) + except Exception: + pass + + +def add_newdoc(place, obj, doc, warn_on_python=True): + """ + Add documentation to an existing object, typically one defined in C + + The purpose is to allow easier editing of the docstrings without requiring + a re-compile. This exists primarily for internal use within numpy itself. + + Parameters + ---------- + place : str + The absolute name of the module to import from + obj : str or None + The name of the object to add documentation to, typically a class or + function name. + doc : {str, Tuple[str, str], List[Tuple[str, str]]} + If a string, the documentation to apply to `obj` + + If a tuple, then the first element is interpreted as an attribute + of `obj` and the second as the docstring to apply - + ``(method, docstring)`` + + If a list, then each element of the list should be a tuple of length + two - ``[(method1, docstring1), (method2, docstring2), ...]`` + warn_on_python : bool + If True, the default, emit `UserWarning` if this is used to attach + documentation to a pure-python object. + + Notes + ----- + This routine never raises an error if the docstring can't be written, but + will raise an error if the object being documented does not exist. + + This routine cannot modify read-only docstrings, as appear + in new-style classes or built-in functions. Because this + routine never raises an error the caller must check manually + that the docstrings were changed. + + Since this function grabs the ``char *`` from a c-level str object and puts + it into the ``tp_doc`` slot of the type of `obj`, it violates a number of + C-API best-practices, by: + + - modifying a `PyTypeObject` after calling `PyType_Ready` + - calling `Py_INCREF` on the str and losing the reference, so the str + will never be released + + If possible it should be avoided. + """ + new = getattr(__import__(place, globals(), {}, [obj]), obj) + if isinstance(doc, str): + if "${ARRAY_FUNCTION_LIKE}" in doc: + doc = overrides.get_array_function_like_doc(new, doc) + _add_docstring(new, doc.strip(), warn_on_python) + elif isinstance(doc, tuple): + attr, docstring = doc + _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python) + elif isinstance(doc, list): + for attr, docstring in doc: + _add_docstring( + getattr(new, attr), docstring.strip(), warn_on_python + ) diff --git a/python/numpy/_core/function_base.pyi b/python/numpy/_core/function_base.pyi new file mode 100644 index 000000000..44d1311f5 --- /dev/null +++ b/python/numpy/_core/function_base.pyi @@ -0,0 +1,278 @@ +from typing import Literal as L +from typing import SupportsIndex, TypeAlias, TypeVar, overload + +from _typeshed import Incomplete + +import numpy as np +from numpy._typing import ( + DTypeLike, + NDArray, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _DTypeLike, +) +from numpy._typing._array_like import _DualArrayLike + +__all__ = ["geomspace", "linspace", "logspace"] + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) + +_ToArrayFloat64: TypeAlias = _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], float] + +@overload +def linspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: None = None, + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> NDArray[np.float64]: ... +@overload +def linspace( + start: _ArrayLikeFloat_co, + stop: _ArrayLikeFloat_co, + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: None = None, + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> NDArray[np.floating]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: None = None, + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> NDArray[np.complexfloating]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex, + endpoint: bool, + retstep: L[False], + dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + *, + dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: DTypeLike | None = None, + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> NDArray[Incomplete]: ... +@overload +def linspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + retstep: L[True], + dtype: None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[np.float64], np.float64]: ... +@overload +def linspace( + start: _ArrayLikeFloat_co, + stop: _ArrayLikeFloat_co, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + retstep: L[True], + dtype: None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[np.floating], np.floating]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + retstep: L[True], + dtype: None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[np.complexfloating], np.complexfloating]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + retstep: L[True], + dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[_ScalarT], _ScalarT]: ... +@overload +def linspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + retstep: L[True], + dtype: DTypeLike | None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[Incomplete], Incomplete]: ... + +@overload +def logspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ToArrayFloat64 = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.float64]: ... +@overload +def logspace( + start: _ArrayLikeFloat_co, + stop: _ArrayLikeFloat_co, + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeFloat_co = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.floating]: ... +@overload +def logspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeComplex_co = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.complexfloating]: ... +@overload +def logspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex, + endpoint: bool, + base: _ArrayLikeComplex_co, + dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex = 0, +) -> NDArray[_ScalarT]: ... +@overload +def logspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeComplex_co = 10.0, + *, + dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex = 0, +) -> NDArray[_ScalarT]: ... +@overload +def logspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeComplex_co = 10.0, + dtype: DTypeLike | None = None, + axis: SupportsIndex = 0, +) -> NDArray[Incomplete]: ... + +@overload +def geomspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.float64]: ... +@overload +def geomspace( + start: _ArrayLikeFloat_co, + stop: _ArrayLikeFloat_co, + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.floating]: ... +@overload +def geomspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.complexfloating]: ... +@overload +def geomspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex, + endpoint: bool, + dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex = 0, +) -> NDArray[_ScalarT]: ... +@overload +def geomspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + dtype: _DTypeLike[_ScalarT], + axis: SupportsIndex = 0, +) -> NDArray[_ScalarT]: ... +@overload +def geomspace( + start: _ArrayLikeComplex_co, + stop: _ArrayLikeComplex_co, + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: DTypeLike | None = None, + axis: SupportsIndex = 0, +) -> NDArray[Incomplete]: ... + +def add_newdoc( + place: str, + obj: str, + doc: str | tuple[str, str] | list[tuple[str, str]], + warn_on_python: bool = True, +) -> None: ... diff --git a/python/numpy/_core/getlimits.py b/python/numpy/_core/getlimits.py new file mode 100644 index 000000000..afa2ccebc --- /dev/null +++ b/python/numpy/_core/getlimits.py @@ -0,0 +1,748 @@ +"""Machine limits for Float32 and Float64 and (long double) if available... + +""" +__all__ = ['finfo', 'iinfo'] + +import types +import warnings + +from numpy._utils import set_module + +from . import numeric +from . import numerictypes as ntypes +from ._machar import MachAr +from .numeric import array, inf, nan +from .umath import exp2, isnan, log10, nextafter + + +def _fr0(a): + """fix rank-0 --> rank-1""" + if a.ndim == 0: + a = a.copy() + a.shape = (1,) + return a + + +def _fr1(a): + """fix rank > 0 --> rank-0""" + if a.size == 1: + a = a.copy() + a.shape = () + return a + + +class MachArLike: + """ Object to simulate MachAr instance """ + def __init__(self, ftype, *, eps, epsneg, huge, tiny, + ibeta, smallest_subnormal=None, **kwargs): + self.params = _MACHAR_PARAMS[ftype] + self.ftype = ftype + self.title = self.params['title'] + # Parameter types same as for discovered MachAr object. + if not smallest_subnormal: + self._smallest_subnormal = nextafter( + self.ftype(0), self.ftype(1), dtype=self.ftype) + else: + self._smallest_subnormal = smallest_subnormal + self.epsilon = self.eps = self._float_to_float(eps) + self.epsneg = self._float_to_float(epsneg) + self.xmax = self.huge = self._float_to_float(huge) + self.xmin = self._float_to_float(tiny) + self.smallest_normal = self.tiny = self._float_to_float(tiny) + self.ibeta = self.params['itype'](ibeta) + self.__dict__.update(kwargs) + self.precision = int(-log10(self.eps)) + self.resolution = self._float_to_float( + self._float_conv(10) ** (-self.precision)) + self._str_eps = self._float_to_str(self.eps) + self._str_epsneg = self._float_to_str(self.epsneg) + self._str_xmin = self._float_to_str(self.xmin) + self._str_xmax = self._float_to_str(self.xmax) + self._str_resolution = self._float_to_str(self.resolution) + self._str_smallest_normal = self._float_to_str(self.xmin) + + @property + def smallest_subnormal(self): + """Return the value for the smallest subnormal. + + Returns + ------- + smallest_subnormal : float + value for the smallest subnormal. + + Warns + ----- + UserWarning + If the calculated value for the smallest subnormal is zero. + """ + # Check that the calculated value is not zero, in case it raises a + # warning. + value = self._smallest_subnormal + if self.ftype(0) == value: + warnings.warn( + f'The value of the smallest subnormal for {self.ftype} type is zero.', + UserWarning, stacklevel=2) + + return self._float_to_float(value) + + @property + def _str_smallest_subnormal(self): + """Return the string representation of the smallest subnormal.""" + return self._float_to_str(self.smallest_subnormal) + + def _float_to_float(self, value): + """Converts float to float. + + Parameters + ---------- + value : float + value to be converted. + """ + return _fr1(self._float_conv(value)) + + def _float_conv(self, value): + """Converts float to conv. + + Parameters + ---------- + value : float + value to be converted. + """ + return array([value], self.ftype) + + def _float_to_str(self, value): + """Converts float to str. + + Parameters + ---------- + value : float + value to be converted. + """ + return self.params['fmt'] % array(_fr0(value)[0], self.ftype) + + +_convert_to_float = { + ntypes.csingle: ntypes.single, + ntypes.complex128: ntypes.float64, + ntypes.clongdouble: ntypes.longdouble + } + +# Parameters for creating MachAr / MachAr-like objects +_title_fmt = 'numpy {} precision floating point number' +_MACHAR_PARAMS = { + ntypes.double: { + 'itype': ntypes.int64, + 'fmt': '%24.16e', + 'title': _title_fmt.format('double')}, + ntypes.single: { + 'itype': ntypes.int32, + 'fmt': '%15.7e', + 'title': _title_fmt.format('single')}, + ntypes.longdouble: { + 'itype': ntypes.longlong, + 'fmt': '%s', + 'title': _title_fmt.format('long double')}, + ntypes.half: { + 'itype': ntypes.int16, + 'fmt': '%12.5e', + 'title': _title_fmt.format('half')}} + +# Key to identify the floating point type. Key is result of +# +# ftype = np.longdouble # or float64, float32, etc. +# v = (ftype(-1.0) / ftype(10.0)) +# v.view(v.dtype.newbyteorder('<')).tobytes() +# +# Uses division to work around deficiencies in strtold on some platforms. +# See: +# https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure + +_KNOWN_TYPES = {} +def _register_type(machar, bytepat): + _KNOWN_TYPES[bytepat] = machar + + +_float_ma = {} + + +def _register_known_types(): + # Known parameters for float16 + # See docstring of MachAr class for description of parameters. + f16 = ntypes.float16 + float16_ma = MachArLike(f16, + machep=-10, + negep=-11, + minexp=-14, + maxexp=16, + it=10, + iexp=5, + ibeta=2, + irnd=5, + ngrd=0, + eps=exp2(f16(-10)), + epsneg=exp2(f16(-11)), + huge=f16(65504), + tiny=f16(2 ** -14)) + _register_type(float16_ma, b'f\xae') + _float_ma[16] = float16_ma + + # Known parameters for float32 + f32 = ntypes.float32 + float32_ma = MachArLike(f32, + machep=-23, + negep=-24, + minexp=-126, + maxexp=128, + it=23, + iexp=8, + ibeta=2, + irnd=5, + ngrd=0, + eps=exp2(f32(-23)), + epsneg=exp2(f32(-24)), + huge=f32((1 - 2 ** -24) * 2**128), + tiny=exp2(f32(-126))) + _register_type(float32_ma, b'\xcd\xcc\xcc\xbd') + _float_ma[32] = float32_ma + + # Known parameters for float64 + f64 = ntypes.float64 + epsneg_f64 = 2.0 ** -53.0 + tiny_f64 = 2.0 ** -1022.0 + float64_ma = MachArLike(f64, + machep=-52, + negep=-53, + minexp=-1022, + maxexp=1024, + it=52, + iexp=11, + ibeta=2, + irnd=5, + ngrd=0, + eps=2.0 ** -52.0, + epsneg=epsneg_f64, + huge=(1.0 - epsneg_f64) / tiny_f64 * f64(4), + tiny=tiny_f64) + _register_type(float64_ma, b'\x9a\x99\x99\x99\x99\x99\xb9\xbf') + _float_ma[64] = float64_ma + + # Known parameters for IEEE 754 128-bit binary float + ld = ntypes.longdouble + epsneg_f128 = exp2(ld(-113)) + tiny_f128 = exp2(ld(-16382)) + # Ignore runtime error when this is not f128 + with numeric.errstate(all='ignore'): + huge_f128 = (ld(1) - epsneg_f128) / tiny_f128 * ld(4) + float128_ma = MachArLike(ld, + machep=-112, + negep=-113, + minexp=-16382, + maxexp=16384, + it=112, + iexp=15, + ibeta=2, + irnd=5, + ngrd=0, + eps=exp2(ld(-112)), + epsneg=epsneg_f128, + huge=huge_f128, + tiny=tiny_f128) + # IEEE 754 128-bit binary float + _register_type(float128_ma, + b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf') + _float_ma[128] = float128_ma + + # Known parameters for float80 (Intel 80-bit extended precision) + epsneg_f80 = exp2(ld(-64)) + tiny_f80 = exp2(ld(-16382)) + # Ignore runtime error when this is not f80 + with numeric.errstate(all='ignore'): + huge_f80 = (ld(1) - epsneg_f80) / tiny_f80 * ld(4) + float80_ma = MachArLike(ld, + machep=-63, + negep=-64, + minexp=-16382, + maxexp=16384, + it=63, + iexp=15, + ibeta=2, + irnd=5, + ngrd=0, + eps=exp2(ld(-63)), + epsneg=epsneg_f80, + huge=huge_f80, + tiny=tiny_f80) + # float80, first 10 bytes containing actual storage + _register_type(float80_ma, b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf') + _float_ma[80] = float80_ma + + # Guessed / known parameters for double double; see: + # https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic + # These numbers have the same exponent range as float64, but extended + # number of digits in the significand. + huge_dd = nextafter(ld(inf), ld(0), dtype=ld) + # As the smallest_normal in double double is so hard to calculate we set + # it to NaN. + smallest_normal_dd = nan + # Leave the same value for the smallest subnormal as double + smallest_subnormal_dd = ld(nextafter(0., 1.)) + float_dd_ma = MachArLike(ld, + machep=-105, + negep=-106, + minexp=-1022, + maxexp=1024, + it=105, + iexp=11, + ibeta=2, + irnd=5, + ngrd=0, + eps=exp2(ld(-105)), + epsneg=exp2(ld(-106)), + huge=huge_dd, + tiny=smallest_normal_dd, + smallest_subnormal=smallest_subnormal_dd) + # double double; low, high order (e.g. PPC 64) + _register_type(float_dd_ma, + b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf') + # double double; high, low order (e.g. PPC 64 le) + _register_type(float_dd_ma, + b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<') + _float_ma['dd'] = float_dd_ma + + +def _get_machar(ftype): + """ Get MachAr instance or MachAr-like instance + + Get parameters for floating point type, by first trying signatures of + various known floating point types, then, if none match, attempting to + identify parameters by analysis. + + Parameters + ---------- + ftype : class + Numpy floating point type class (e.g. ``np.float64``) + + Returns + ------- + ma_like : instance of :class:`MachAr` or :class:`MachArLike` + Object giving floating point parameters for `ftype`. + + Warns + ----- + UserWarning + If the binary signature of the float type is not in the dictionary of + known float types. + """ + params = _MACHAR_PARAMS.get(ftype) + if params is None: + raise ValueError(repr(ftype)) + # Detect known / suspected types + # ftype(-1.0) / ftype(10.0) is better than ftype('-0.1') because stold + # may be deficient + key = (ftype(-1.0) / ftype(10.)) + key = key.view(key.dtype.newbyteorder("<")).tobytes() + ma_like = None + if ftype == ntypes.longdouble: + # Could be 80 bit == 10 byte extended precision, where last bytes can + # be random garbage. + # Comparing first 10 bytes to pattern first to avoid branching on the + # random garbage. + ma_like = _KNOWN_TYPES.get(key[:10]) + if ma_like is None: + # see if the full key is known. + ma_like = _KNOWN_TYPES.get(key) + if ma_like is None and len(key) == 16: + # machine limits could be f80 masquerading as np.float128, + # find all keys with length 16 and make new dict, but make the keys + # only 10 bytes long, the last bytes can be random garbage + _kt = {k[:10]: v for k, v in _KNOWN_TYPES.items() if len(k) == 16} + ma_like = _kt.get(key[:10]) + if ma_like is not None: + return ma_like + # Fall back to parameter discovery + warnings.warn( + f'Signature {key} for {ftype} does not match any known type: ' + 'falling back to type probe function.\n' + 'This warnings indicates broken support for the dtype!', + UserWarning, stacklevel=2) + return _discovered_machar(ftype) + + +def _discovered_machar(ftype): + """ Create MachAr instance with found information on float types + + TODO: MachAr should be retired completely ideally. We currently only + ever use it system with broken longdouble (valgrind, WSL). + """ + params = _MACHAR_PARAMS[ftype] + return MachAr(lambda v: array([v], ftype), + lambda v: _fr0(v.astype(params['itype']))[0], + lambda v: array(_fr0(v)[0], ftype), + lambda v: params['fmt'] % array(_fr0(v)[0], ftype), + params['title']) + + +@set_module('numpy') +class finfo: + """ + finfo(dtype) + + Machine limits for floating point types. + + Attributes + ---------- + bits : int + The number of bits occupied by the type. + dtype : dtype + Returns the dtype for which `finfo` returns information. For complex + input, the returned dtype is the associated ``float*`` dtype for its + real and complex components. + eps : float + The difference between 1.0 and the next smallest representable float + larger than 1.0. For example, for 64-bit binary floats in the IEEE-754 + standard, ``eps = 2**-52``, approximately 2.22e-16. + epsneg : float + The difference between 1.0 and the next smallest representable float + less than 1.0. For example, for 64-bit binary floats in the IEEE-754 + standard, ``epsneg = 2**-53``, approximately 1.11e-16. + iexp : int + The number of bits in the exponent portion of the floating point + representation. + machep : int + The exponent that yields `eps`. + max : floating point number of the appropriate type + The largest representable number. + maxexp : int + The smallest positive power of the base (2) that causes overflow. + min : floating point number of the appropriate type + The smallest representable number, typically ``-max``. + minexp : int + The most negative power of the base (2) consistent with there + being no leading 0's in the mantissa. + negep : int + The exponent that yields `epsneg`. + nexp : int + The number of bits in the exponent including its sign and bias. + nmant : int + The number of bits in the mantissa. + precision : int + The approximate number of decimal digits to which this kind of + float is precise. + resolution : floating point number of the appropriate type + The approximate decimal resolution of this type, i.e., + ``10**-precision``. + tiny : float + An alias for `smallest_normal`, kept for backwards compatibility. + smallest_normal : float + The smallest positive floating point number with 1 as leading bit in + the mantissa following IEEE-754 (see Notes). + smallest_subnormal : float + The smallest positive floating point number with 0 as leading bit in + the mantissa following IEEE-754. + + Parameters + ---------- + dtype : float, dtype, or instance + Kind of floating point or complex floating point + data-type about which to get information. + + See Also + -------- + iinfo : The equivalent for integer data types. + spacing : The distance between a value and the nearest adjacent number + nextafter : The next floating point value after x1 towards x2 + + Notes + ----- + For developers of NumPy: do not instantiate this at the module level. + The initial calculation of these parameters is expensive and negatively + impacts import times. These objects are cached, so calling ``finfo()`` + repeatedly inside your functions is not a problem. + + Note that ``smallest_normal`` is not actually the smallest positive + representable value in a NumPy floating point type. As in the IEEE-754 + standard [1]_, NumPy floating point types make use of subnormal numbers to + fill the gap between 0 and ``smallest_normal``. However, subnormal numbers + may have significantly reduced precision [2]_. + + This function can also be used for complex data types as well. If used, + the output will be the same as the corresponding real float type + (e.g. numpy.finfo(numpy.csingle) is the same as numpy.finfo(numpy.single)). + However, the output is true for the real and imaginary components. + + References + ---------- + .. [1] IEEE Standard for Floating-Point Arithmetic, IEEE Std 754-2008, + pp.1-70, 2008, https://doi.org/10.1109/IEEESTD.2008.4610935 + .. [2] Wikipedia, "Denormal Numbers", + https://en.wikipedia.org/wiki/Denormal_number + + Examples + -------- + >>> import numpy as np + >>> np.finfo(np.float64).dtype + dtype('float64') + >>> np.finfo(np.complex64).dtype + dtype('float32') + + """ + + _finfo_cache = {} + + __class_getitem__ = classmethod(types.GenericAlias) + + def __new__(cls, dtype): + try: + obj = cls._finfo_cache.get(dtype) # most common path + if obj is not None: + return obj + except TypeError: + pass + + if dtype is None: + # Deprecated in NumPy 1.25, 2023-01-16 + warnings.warn( + "finfo() dtype cannot be None. This behavior will " + "raise an error in the future. (Deprecated in NumPy 1.25)", + DeprecationWarning, + stacklevel=2 + ) + + try: + dtype = numeric.dtype(dtype) + except TypeError: + # In case a float instance was given + dtype = numeric.dtype(type(dtype)) + + obj = cls._finfo_cache.get(dtype) + if obj is not None: + return obj + dtypes = [dtype] + newdtype = ntypes.obj2sctype(dtype) + if newdtype is not dtype: + dtypes.append(newdtype) + dtype = newdtype + if not issubclass(dtype, numeric.inexact): + raise ValueError(f"data type {dtype!r} not inexact") + obj = cls._finfo_cache.get(dtype) + if obj is not None: + return obj + if not issubclass(dtype, numeric.floating): + newdtype = _convert_to_float[dtype] + if newdtype is not dtype: + # dtype changed, for example from complex128 to float64 + dtypes.append(newdtype) + dtype = newdtype + + obj = cls._finfo_cache.get(dtype, None) + if obj is not None: + # the original dtype was not in the cache, but the new + # dtype is in the cache. we add the original dtypes to + # the cache and return the result + for dt in dtypes: + cls._finfo_cache[dt] = obj + return obj + obj = object.__new__(cls)._init(dtype) + for dt in dtypes: + cls._finfo_cache[dt] = obj + return obj + + def _init(self, dtype): + self.dtype = numeric.dtype(dtype) + machar = _get_machar(dtype) + + for word in ['precision', 'iexp', + 'maxexp', 'minexp', 'negep', + 'machep']: + setattr(self, word, getattr(machar, word)) + for word in ['resolution', 'epsneg', 'smallest_subnormal']: + setattr(self, word, getattr(machar, word).flat[0]) + self.bits = self.dtype.itemsize * 8 + self.max = machar.huge.flat[0] + self.min = -self.max + self.eps = machar.eps.flat[0] + self.nexp = machar.iexp + self.nmant = machar.it + self._machar = machar + self._str_tiny = machar._str_xmin.strip() + self._str_max = machar._str_xmax.strip() + self._str_epsneg = machar._str_epsneg.strip() + self._str_eps = machar._str_eps.strip() + self._str_resolution = machar._str_resolution.strip() + self._str_smallest_normal = machar._str_smallest_normal.strip() + self._str_smallest_subnormal = machar._str_smallest_subnormal.strip() + return self + + def __str__(self): + fmt = ( + 'Machine parameters for %(dtype)s\n' + '---------------------------------------------------------------\n' + 'precision = %(precision)3s resolution = %(_str_resolution)s\n' + 'machep = %(machep)6s eps = %(_str_eps)s\n' + 'negep = %(negep)6s epsneg = %(_str_epsneg)s\n' + 'minexp = %(minexp)6s tiny = %(_str_tiny)s\n' + 'maxexp = %(maxexp)6s max = %(_str_max)s\n' + 'nexp = %(nexp)6s min = -max\n' + 'smallest_normal = %(_str_smallest_normal)s ' + 'smallest_subnormal = %(_str_smallest_subnormal)s\n' + '---------------------------------------------------------------\n' + ) + return fmt % self.__dict__ + + def __repr__(self): + c = self.__class__.__name__ + d = self.__dict__.copy() + d['klass'] = c + return (("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s," + " max=%(_str_max)s, dtype=%(dtype)s)") % d) + + @property + def smallest_normal(self): + """Return the value for the smallest normal. + + Returns + ------- + smallest_normal : float + Value for the smallest normal. + + Warns + ----- + UserWarning + If the calculated value for the smallest normal is requested for + double-double. + """ + # This check is necessary because the value for smallest_normal is + # platform dependent for longdouble types. + if isnan(self._machar.smallest_normal.flat[0]): + warnings.warn( + 'The value of smallest normal is undefined for double double', + UserWarning, stacklevel=2) + return self._machar.smallest_normal.flat[0] + + @property + def tiny(self): + """Return the value for tiny, alias of smallest_normal. + + Returns + ------- + tiny : float + Value for the smallest normal, alias of smallest_normal. + + Warns + ----- + UserWarning + If the calculated value for the smallest normal is requested for + double-double. + """ + return self.smallest_normal + + +@set_module('numpy') +class iinfo: + """ + iinfo(type) + + Machine limits for integer types. + + Attributes + ---------- + bits : int + The number of bits occupied by the type. + dtype : dtype + Returns the dtype for which `iinfo` returns information. + min : int + The smallest integer expressible by the type. + max : int + The largest integer expressible by the type. + + Parameters + ---------- + int_type : integer type, dtype, or instance + The kind of integer data type to get information about. + + See Also + -------- + finfo : The equivalent for floating point data types. + + Examples + -------- + With types: + + >>> import numpy as np + >>> ii16 = np.iinfo(np.int16) + >>> ii16.min + -32768 + >>> ii16.max + 32767 + >>> ii32 = np.iinfo(np.int32) + >>> ii32.min + -2147483648 + >>> ii32.max + 2147483647 + + With instances: + + >>> ii32 = np.iinfo(np.int32(10)) + >>> ii32.min + -2147483648 + >>> ii32.max + 2147483647 + + """ + + _min_vals = {} + _max_vals = {} + + __class_getitem__ = classmethod(types.GenericAlias) + + def __init__(self, int_type): + try: + self.dtype = numeric.dtype(int_type) + except TypeError: + self.dtype = numeric.dtype(type(int_type)) + self.kind = self.dtype.kind + self.bits = self.dtype.itemsize * 8 + self.key = "%s%d" % (self.kind, self.bits) + if self.kind not in 'iu': + raise ValueError(f"Invalid integer data type {self.kind!r}.") + + @property + def min(self): + """Minimum value of given dtype.""" + if self.kind == 'u': + return 0 + else: + try: + val = iinfo._min_vals[self.key] + except KeyError: + val = int(-(1 << (self.bits - 1))) + iinfo._min_vals[self.key] = val + return val + + @property + def max(self): + """Maximum value of given dtype.""" + try: + val = iinfo._max_vals[self.key] + except KeyError: + if self.kind == 'u': + val = int((1 << self.bits) - 1) + else: + val = int((1 << (self.bits - 1)) - 1) + iinfo._max_vals[self.key] = val + return val + + def __str__(self): + """String representation.""" + fmt = ( + 'Machine parameters for %(dtype)s\n' + '---------------------------------------------------------------\n' + 'min = %(min)s\n' + 'max = %(max)s\n' + '---------------------------------------------------------------\n' + ) + return fmt % {'dtype': self.dtype, 'min': self.min, 'max': self.max} + + def __repr__(self): + return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__, + self.min, self.max, self.dtype) diff --git a/python/numpy/_core/getlimits.pyi b/python/numpy/_core/getlimits.pyi new file mode 100644 index 000000000..9d79b178f --- /dev/null +++ b/python/numpy/_core/getlimits.pyi @@ -0,0 +1,3 @@ +from numpy import finfo, iinfo + +__all__ = ["finfo", "iinfo"] diff --git a/python/numpy/_core/include/numpy/__multiarray_api.c b/python/numpy/_core/include/numpy/__multiarray_api.c new file mode 100644 index 000000000..8398c627e --- /dev/null +++ b/python/numpy/_core/include/numpy/__multiarray_api.c @@ -0,0 +1,376 @@ + +/* These pointers will be stored in the C-object for use in other + extension modules +*/ + +void *PyArray_API[] = { + (void *) PyArray_GetNDArrayCVersion, + NULL, + (void *) &PyArray_Type, + (void *) &PyArrayDescr_Type, + NULL, + (void *) &PyArrayIter_Type, + (void *) &PyArrayMultiIter_Type, + (int *) &NPY_NUMUSERTYPES, + (void *) &PyBoolArrType_Type, + (void *) &_PyArrayScalar_BoolValues, + (void *) &PyGenericArrType_Type, + (void *) &PyNumberArrType_Type, + (void *) &PyIntegerArrType_Type, + (void *) &PySignedIntegerArrType_Type, + (void *) &PyUnsignedIntegerArrType_Type, + (void *) &PyInexactArrType_Type, + (void *) &PyFloatingArrType_Type, + (void *) &PyComplexFloatingArrType_Type, + (void *) &PyFlexibleArrType_Type, + (void *) &PyCharacterArrType_Type, + (void *) &PyByteArrType_Type, + (void *) &PyShortArrType_Type, + (void *) &PyIntArrType_Type, + (void *) &PyLongArrType_Type, + (void *) &PyLongLongArrType_Type, + (void *) &PyUByteArrType_Type, + (void *) &PyUShortArrType_Type, + (void *) &PyUIntArrType_Type, + (void *) &PyULongArrType_Type, + (void *) &PyULongLongArrType_Type, + (void *) &PyFloatArrType_Type, + (void *) &PyDoubleArrType_Type, + (void *) &PyLongDoubleArrType_Type, + (void *) &PyCFloatArrType_Type, + (void *) &PyCDoubleArrType_Type, + (void *) &PyCLongDoubleArrType_Type, + (void *) &PyObjectArrType_Type, + (void *) &PyStringArrType_Type, + (void *) &PyUnicodeArrType_Type, + (void *) &PyVoidArrType_Type, + NULL, + NULL, + (void *) PyArray_INCREF, + (void *) PyArray_XDECREF, + (void *) PyArray_SetStringFunction, + (void *) PyArray_DescrFromType, + (void *) PyArray_TypeObjectFromType, + (void *) PyArray_Zero, + (void *) PyArray_One, + (void *) PyArray_CastToType, + (void *) PyArray_CopyInto, + (void *) PyArray_CopyAnyInto, + (void *) PyArray_CanCastSafely, + (void *) PyArray_CanCastTo, + (void *) PyArray_ObjectType, + (void *) PyArray_DescrFromObject, + (void *) PyArray_ConvertToCommonType, + (void *) PyArray_DescrFromScalar, + (void *) PyArray_DescrFromTypeObject, + (void *) PyArray_Size, + (void *) PyArray_Scalar, + (void *) PyArray_FromScalar, + (void *) PyArray_ScalarAsCtype, + (void *) PyArray_CastScalarToCtype, + (void *) PyArray_CastScalarDirect, + (void *) PyArray_Pack, + NULL, + NULL, + NULL, + (void *) PyArray_FromAny, + (void *) PyArray_EnsureArray, + (void *) PyArray_EnsureAnyArray, + (void *) PyArray_FromFile, + (void *) PyArray_FromString, + (void *) PyArray_FromBuffer, + (void *) PyArray_FromIter, + (void *) PyArray_Return, + (void *) PyArray_GetField, + (void *) PyArray_SetField, + (void *) PyArray_Byteswap, + (void *) PyArray_Resize, + NULL, + NULL, + NULL, + (void *) PyArray_CopyObject, + (void *) PyArray_NewCopy, + (void *) PyArray_ToList, + (void *) PyArray_ToString, + (void *) PyArray_ToFile, + (void *) PyArray_Dump, + (void *) PyArray_Dumps, + (void *) PyArray_ValidType, + (void *) PyArray_UpdateFlags, + (void *) PyArray_New, + (void *) PyArray_NewFromDescr, + (void *) PyArray_DescrNew, + (void *) PyArray_DescrNewFromType, + (void *) PyArray_GetPriority, + (void *) PyArray_IterNew, + (void *) PyArray_MultiIterNew, + (void *) PyArray_PyIntAsInt, + (void *) PyArray_PyIntAsIntp, + (void *) PyArray_Broadcast, + NULL, + (void *) PyArray_FillWithScalar, + (void *) PyArray_CheckStrides, + (void *) PyArray_DescrNewByteorder, + (void *) PyArray_IterAllButAxis, + (void *) PyArray_CheckFromAny, + (void *) PyArray_FromArray, + (void *) PyArray_FromInterface, + (void *) PyArray_FromStructInterface, + (void *) PyArray_FromArrayAttr, + (void *) PyArray_ScalarKind, + (void *) PyArray_CanCoerceScalar, + NULL, + (void *) PyArray_CanCastScalar, + NULL, + (void *) PyArray_RemoveSmallest, + (void *) PyArray_ElementStrides, + (void *) PyArray_Item_INCREF, + (void *) PyArray_Item_XDECREF, + NULL, + (void *) PyArray_Transpose, + (void *) PyArray_TakeFrom, + (void *) PyArray_PutTo, + (void *) PyArray_PutMask, + (void *) PyArray_Repeat, + (void *) PyArray_Choose, + (void *) PyArray_Sort, + (void *) PyArray_ArgSort, + (void *) PyArray_SearchSorted, + (void *) PyArray_ArgMax, + (void *) PyArray_ArgMin, + (void *) PyArray_Reshape, + (void *) PyArray_Newshape, + (void *) PyArray_Squeeze, + (void *) PyArray_View, + (void *) PyArray_SwapAxes, + (void *) PyArray_Max, + (void *) PyArray_Min, + (void *) PyArray_Ptp, + (void *) PyArray_Mean, + (void *) PyArray_Trace, + (void *) PyArray_Diagonal, + (void *) PyArray_Clip, + (void *) PyArray_Conjugate, + (void *) PyArray_Nonzero, + (void *) PyArray_Std, + (void *) PyArray_Sum, + (void *) PyArray_CumSum, + (void *) PyArray_Prod, + (void *) PyArray_CumProd, + (void *) PyArray_All, + (void *) PyArray_Any, + (void *) PyArray_Compress, + (void *) PyArray_Flatten, + (void *) PyArray_Ravel, + (void *) PyArray_MultiplyList, + (void *) PyArray_MultiplyIntList, + (void *) PyArray_GetPtr, + (void *) PyArray_CompareLists, + (void *) PyArray_AsCArray, + NULL, + NULL, + (void *) PyArray_Free, + (void *) PyArray_Converter, + (void *) PyArray_IntpFromSequence, + (void *) PyArray_Concatenate, + (void *) PyArray_InnerProduct, + (void *) PyArray_MatrixProduct, + NULL, + (void *) PyArray_Correlate, + NULL, + (void *) PyArray_DescrConverter, + (void *) PyArray_DescrConverter2, + (void *) PyArray_IntpConverter, + (void *) PyArray_BufferConverter, + (void *) PyArray_AxisConverter, + (void *) PyArray_BoolConverter, + (void *) PyArray_ByteorderConverter, + (void *) PyArray_OrderConverter, + (void *) PyArray_EquivTypes, + (void *) PyArray_Zeros, + (void *) PyArray_Empty, + (void *) PyArray_Where, + (void *) PyArray_Arange, + (void *) PyArray_ArangeObj, + (void *) PyArray_SortkindConverter, + (void *) PyArray_LexSort, + (void *) PyArray_Round, + (void *) PyArray_EquivTypenums, + (void *) PyArray_RegisterDataType, + (void *) PyArray_RegisterCastFunc, + (void *) PyArray_RegisterCanCast, + (void *) PyArray_InitArrFuncs, + (void *) PyArray_IntTupleFromIntp, + NULL, + (void *) PyArray_ClipmodeConverter, + (void *) PyArray_OutputConverter, + (void *) PyArray_BroadcastToShape, + NULL, + NULL, + (void *) PyArray_DescrAlignConverter, + (void *) PyArray_DescrAlignConverter2, + (void *) PyArray_SearchsideConverter, + (void *) PyArray_CheckAxis, + (void *) PyArray_OverflowMultiplyList, + NULL, + (void *) PyArray_MultiIterFromObjects, + (void *) PyArray_GetEndianness, + (void *) PyArray_GetNDArrayCFeatureVersion, + (void *) PyArray_Correlate2, + (void *) PyArray_NeighborhoodIterNew, + (void *) &PyTimeIntegerArrType_Type, + (void *) &PyDatetimeArrType_Type, + (void *) &PyTimedeltaArrType_Type, + (void *) &PyHalfArrType_Type, + (void *) &NpyIter_Type, + NULL, + NULL, + NULL, + NULL, + (void *) NpyIter_GetTransferFlags, + (void *) NpyIter_New, + (void *) NpyIter_MultiNew, + (void *) NpyIter_AdvancedNew, + (void *) NpyIter_Copy, + (void *) NpyIter_Deallocate, + (void *) NpyIter_HasDelayedBufAlloc, + (void *) NpyIter_HasExternalLoop, + (void *) NpyIter_EnableExternalLoop, + (void *) NpyIter_GetInnerStrideArray, + (void *) NpyIter_GetInnerLoopSizePtr, + (void *) NpyIter_Reset, + (void *) NpyIter_ResetBasePointers, + (void *) NpyIter_ResetToIterIndexRange, + (void *) NpyIter_GetNDim, + (void *) NpyIter_GetNOp, + (void *) NpyIter_GetIterNext, + (void *) NpyIter_GetIterSize, + (void *) NpyIter_GetIterIndexRange, + (void *) NpyIter_GetIterIndex, + (void *) NpyIter_GotoIterIndex, + (void *) NpyIter_HasMultiIndex, + (void *) NpyIter_GetShape, + (void *) NpyIter_GetGetMultiIndex, + (void *) NpyIter_GotoMultiIndex, + (void *) NpyIter_RemoveMultiIndex, + (void *) NpyIter_HasIndex, + (void *) NpyIter_IsBuffered, + (void *) NpyIter_IsGrowInner, + (void *) NpyIter_GetBufferSize, + (void *) NpyIter_GetIndexPtr, + (void *) NpyIter_GotoIndex, + (void *) NpyIter_GetDataPtrArray, + (void *) NpyIter_GetDescrArray, + (void *) NpyIter_GetOperandArray, + (void *) NpyIter_GetIterView, + (void *) NpyIter_GetReadFlags, + (void *) NpyIter_GetWriteFlags, + (void *) NpyIter_DebugPrint, + (void *) NpyIter_IterationNeedsAPI, + (void *) NpyIter_GetInnerFixedStrideArray, + (void *) NpyIter_RemoveAxis, + (void *) NpyIter_GetAxisStrideArray, + (void *) NpyIter_RequiresBuffering, + (void *) NpyIter_GetInitialDataPtrArray, + (void *) NpyIter_CreateCompatibleStrides, + (void *) PyArray_CastingConverter, + (void *) PyArray_CountNonzero, + (void *) PyArray_PromoteTypes, + (void *) PyArray_MinScalarType, + (void *) PyArray_ResultType, + (void *) PyArray_CanCastArrayTo, + (void *) PyArray_CanCastTypeTo, + (void *) PyArray_EinsteinSum, + (void *) PyArray_NewLikeArray, + NULL, + (void *) PyArray_ConvertClipmodeSequence, + (void *) PyArray_MatrixProduct2, + (void *) NpyIter_IsFirstVisit, + (void *) PyArray_SetBaseObject, + (void *) PyArray_CreateSortedStridePerm, + (void *) PyArray_RemoveAxesInPlace, + (void *) PyArray_DebugPrint, + (void *) PyArray_FailUnlessWriteable, + (void *) PyArray_SetUpdateIfCopyBase, + (void *) PyDataMem_NEW, + (void *) PyDataMem_FREE, + (void *) PyDataMem_RENEW, + NULL, + (NPY_CASTING *) &NPY_DEFAULT_ASSIGN_CASTING, + NULL, + NULL, + NULL, + (void *) PyArray_Partition, + (void *) PyArray_ArgPartition, + (void *) PyArray_SelectkindConverter, + (void *) PyDataMem_NEW_ZEROED, + (void *) PyArray_CheckAnyScalarExact, + NULL, + (void *) PyArray_ResolveWritebackIfCopy, + (void *) PyArray_SetWritebackIfCopyBase, + (void *) PyDataMem_SetHandler, + (void *) PyDataMem_GetHandler, + (PyObject* *) &PyDataMem_DefaultHandler, + (void *) NpyDatetime_ConvertDatetime64ToDatetimeStruct, + (void *) NpyDatetime_ConvertDatetimeStructToDatetime64, + (void *) NpyDatetime_ConvertPyDateTimeToDatetimeStruct, + (void *) NpyDatetime_GetDatetimeISO8601StrLen, + (void *) NpyDatetime_MakeISO8601Datetime, + (void *) NpyDatetime_ParseISO8601Datetime, + (void *) NpyString_load, + (void *) NpyString_pack, + (void *) NpyString_pack_null, + (void *) NpyString_acquire_allocator, + (void *) NpyString_acquire_allocators, + (void *) NpyString_release_allocator, + (void *) NpyString_release_allocators, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + (void *) PyArray_GetDefaultDescr, + (void *) PyArrayInitDTypeMeta_FromSpec, + (void *) PyArray_CommonDType, + (void *) PyArray_PromoteDTypeSequence, + (void *) _PyDataType_GetArrFuncs, + NULL, + NULL, + NULL +}; diff --git a/python/numpy/_core/include/numpy/__multiarray_api.h b/python/numpy/_core/include/numpy/__multiarray_api.h new file mode 100644 index 000000000..763dc85fb --- /dev/null +++ b/python/numpy/_core/include/numpy/__multiarray_api.h @@ -0,0 +1,1628 @@ + +#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE) + +typedef struct { + PyObject_HEAD + npy_bool obval; +} PyBoolScalarObject; + +extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type; +extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; + +NPY_NO_EXPORT unsigned int PyArray_GetNDArrayCVersion \ + (void); +extern NPY_NO_EXPORT PyTypeObject PyArray_Type; + +extern NPY_NO_EXPORT PyArray_DTypeMeta PyArrayDescr_TypeFull; +#define PyArrayDescr_Type (*(PyTypeObject *)(&PyArrayDescr_TypeFull)) + +extern NPY_NO_EXPORT PyTypeObject PyArrayIter_Type; + +extern NPY_NO_EXPORT PyTypeObject PyArrayMultiIter_Type; + +extern NPY_NO_EXPORT int NPY_NUMUSERTYPES; + +extern NPY_NO_EXPORT PyTypeObject PyBoolArrType_Type; + +extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; + +extern NPY_NO_EXPORT PyTypeObject PyGenericArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyNumberArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyIntegerArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PySignedIntegerArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUnsignedIntegerArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyInexactArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyFloatingArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyComplexFloatingArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyFlexibleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyCharacterArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyByteArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyShortArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyIntArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyLongArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyLongLongArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUByteArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUShortArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUIntArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyULongArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyULongLongArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyFloatArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyDoubleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyLongDoubleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyCFloatArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyCDoubleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyCLongDoubleArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyObjectArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyStringArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUnicodeArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyVoidArrType_Type; + +NPY_NO_EXPORT int PyArray_INCREF \ + (PyArrayObject *); +NPY_NO_EXPORT int PyArray_XDECREF \ + (PyArrayObject *); +NPY_NO_EXPORT void PyArray_SetStringFunction \ + (PyObject *, int); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromType \ + (int); +NPY_NO_EXPORT PyObject * PyArray_TypeObjectFromType \ + (int); +NPY_NO_EXPORT char * PyArray_Zero \ + (PyArrayObject *); +NPY_NO_EXPORT char * PyArray_One \ + (PyArrayObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_CastToType \ + (PyArrayObject *, PyArray_Descr *, int); +NPY_NO_EXPORT int PyArray_CopyInto \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT int PyArray_CopyAnyInto \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT int PyArray_CanCastSafely \ + (int, int); +NPY_NO_EXPORT npy_bool PyArray_CanCastTo \ + (PyArray_Descr *, PyArray_Descr *); +NPY_NO_EXPORT int PyArray_ObjectType \ + (PyObject *, int); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromObject \ + (PyObject *, PyArray_Descr *); +NPY_NO_EXPORT PyArrayObject ** PyArray_ConvertToCommonType \ + (PyObject *, int *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromScalar \ + (PyObject *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromTypeObject \ + (PyObject *); +NPY_NO_EXPORT npy_intp PyArray_Size \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Scalar \ + (void *, PyArray_Descr *, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromScalar \ + (PyObject *, PyArray_Descr *); +NPY_NO_EXPORT void PyArray_ScalarAsCtype \ + (PyObject *, void *); +NPY_NO_EXPORT int PyArray_CastScalarToCtype \ + (PyObject *, void *, PyArray_Descr *); +NPY_NO_EXPORT int PyArray_CastScalarDirect \ + (PyObject *, PyArray_Descr *, void *, int); +NPY_NO_EXPORT int PyArray_Pack \ + (PyArray_Descr *, void *, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromAny \ + (PyObject *, PyArray_Descr *, int, int, int, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_EnsureArray \ + (PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_EnsureAnyArray \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_FromFile \ + (FILE *, PyArray_Descr *, npy_intp, char *); +NPY_NO_EXPORT PyObject * PyArray_FromString \ + (char *, npy_intp, PyArray_Descr *, npy_intp, char *); +NPY_NO_EXPORT PyObject * PyArray_FromBuffer \ + (PyObject *, PyArray_Descr *, npy_intp, npy_intp); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromIter \ + (PyObject *, PyArray_Descr *, npy_intp); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(1) PyObject * PyArray_Return \ + (PyArrayObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_GetField \ + (PyArrayObject *, PyArray_Descr *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetField \ + (PyArrayObject *, PyArray_Descr *, int, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Byteswap \ + (PyArrayObject *, npy_bool); +NPY_NO_EXPORT PyObject * PyArray_Resize \ + (PyArrayObject *, PyArray_Dims *, int, NPY_ORDER NPY_UNUSED(order)); +NPY_NO_EXPORT int PyArray_CopyObject \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_NewCopy \ + (PyArrayObject *, NPY_ORDER); +NPY_NO_EXPORT PyObject * PyArray_ToList \ + (PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_ToString \ + (PyArrayObject *, NPY_ORDER); +NPY_NO_EXPORT int PyArray_ToFile \ + (PyArrayObject *, FILE *, char *, char *); +NPY_NO_EXPORT int PyArray_Dump \ + (PyObject *, PyObject *, int); +NPY_NO_EXPORT PyObject * PyArray_Dumps \ + (PyObject *, int); +NPY_NO_EXPORT int PyArray_ValidType \ + (int); +NPY_NO_EXPORT void PyArray_UpdateFlags \ + (PyArrayObject *, int); +NPY_NO_EXPORT PyObject * PyArray_New \ + (PyTypeObject *, int, npy_intp const *, int, npy_intp const *, void *, int, int, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_NewFromDescr \ + (PyTypeObject *, PyArray_Descr *, int, npy_intp const *, npy_intp const *, void *, int, PyObject *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNew \ + (PyArray_Descr *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNewFromType \ + (int); +NPY_NO_EXPORT double PyArray_GetPriority \ + (PyObject *, double); +NPY_NO_EXPORT PyObject * PyArray_IterNew \ + (PyObject *); +NPY_NO_EXPORT PyObject* PyArray_MultiIterNew \ + (int, ...); +NPY_NO_EXPORT int PyArray_PyIntAsInt \ + (PyObject *); +NPY_NO_EXPORT npy_intp PyArray_PyIntAsIntp \ + (PyObject *); +NPY_NO_EXPORT int PyArray_Broadcast \ + (PyArrayMultiIterObject *); +NPY_NO_EXPORT int PyArray_FillWithScalar \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT npy_bool PyArray_CheckStrides \ + (int, int, npy_intp, npy_intp, npy_intp const *, npy_intp const *); +NPY_NO_EXPORT PyArray_Descr * PyArray_DescrNewByteorder \ + (PyArray_Descr *, char); +NPY_NO_EXPORT PyObject * PyArray_IterAllButAxis \ + (PyObject *, int *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_CheckFromAny \ + (PyObject *, PyArray_Descr *, int, int, int, PyObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_FromArray \ + (PyArrayObject *, PyArray_Descr *, int); +NPY_NO_EXPORT PyObject * PyArray_FromInterface \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_FromStructInterface \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyArray_FromArrayAttr \ + (PyObject *, PyArray_Descr *, PyObject *); +NPY_NO_EXPORT NPY_SCALARKIND PyArray_ScalarKind \ + (int, PyArrayObject **); +NPY_NO_EXPORT int PyArray_CanCoerceScalar \ + (int, int, NPY_SCALARKIND); +NPY_NO_EXPORT npy_bool PyArray_CanCastScalar \ + (PyTypeObject *, PyTypeObject *); +NPY_NO_EXPORT int PyArray_RemoveSmallest \ + (PyArrayMultiIterObject *); +NPY_NO_EXPORT int PyArray_ElementStrides \ + (PyObject *); +NPY_NO_EXPORT void PyArray_Item_INCREF \ + (char *, PyArray_Descr *); +NPY_NO_EXPORT void PyArray_Item_XDECREF \ + (char *, PyArray_Descr *); +NPY_NO_EXPORT PyObject * PyArray_Transpose \ + (PyArrayObject *, PyArray_Dims *); +NPY_NO_EXPORT PyObject * PyArray_TakeFrom \ + (PyArrayObject *, PyObject *, int, PyArrayObject *, NPY_CLIPMODE); +NPY_NO_EXPORT PyObject * PyArray_PutTo \ + (PyArrayObject *, PyObject*, PyObject *, NPY_CLIPMODE); +NPY_NO_EXPORT PyObject * PyArray_PutMask \ + (PyArrayObject *, PyObject*, PyObject*); +NPY_NO_EXPORT PyObject * PyArray_Repeat \ + (PyArrayObject *, PyObject *, int); +NPY_NO_EXPORT PyObject * PyArray_Choose \ + (PyArrayObject *, PyObject *, PyArrayObject *, NPY_CLIPMODE); +NPY_NO_EXPORT int PyArray_Sort \ + (PyArrayObject *, int, NPY_SORTKIND); +NPY_NO_EXPORT PyObject * PyArray_ArgSort \ + (PyArrayObject *, int, NPY_SORTKIND); +NPY_NO_EXPORT PyObject * PyArray_SearchSorted \ + (PyArrayObject *, PyObject *, NPY_SEARCHSIDE, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_ArgMax \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_ArgMin \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Reshape \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Newshape \ + (PyArrayObject *, PyArray_Dims *, NPY_ORDER); +NPY_NO_EXPORT PyObject * PyArray_Squeeze \ + (PyArrayObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) PyObject * PyArray_View \ + (PyArrayObject *, PyArray_Descr *, PyTypeObject *); +NPY_NO_EXPORT PyObject * PyArray_SwapAxes \ + (PyArrayObject *, int, int); +NPY_NO_EXPORT PyObject * PyArray_Max \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Min \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Ptp \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Mean \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Trace \ + (PyArrayObject *, int, int, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Diagonal \ + (PyArrayObject *, int, int, int); +NPY_NO_EXPORT PyObject * PyArray_Clip \ + (PyArrayObject *, PyObject *, PyObject *, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Conjugate \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Nonzero \ + (PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Std \ + (PyArrayObject *, int, int, PyArrayObject *, int); +NPY_NO_EXPORT PyObject * PyArray_Sum \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_CumSum \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Prod \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_CumProd \ + (PyArrayObject *, int, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_All \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Any \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Compress \ + (PyArrayObject *, PyObject *, int, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyArray_Flatten \ + (PyArrayObject *, NPY_ORDER); +NPY_NO_EXPORT PyObject * PyArray_Ravel \ + (PyArrayObject *, NPY_ORDER); +NPY_NO_EXPORT npy_intp PyArray_MultiplyList \ + (npy_intp const *, int); +NPY_NO_EXPORT int PyArray_MultiplyIntList \ + (int const *, int); +NPY_NO_EXPORT void * PyArray_GetPtr \ + (PyArrayObject *, npy_intp const*); +NPY_NO_EXPORT int PyArray_CompareLists \ + (npy_intp const *, npy_intp const *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(5) int PyArray_AsCArray \ + (PyObject **, void *, npy_intp *, int, PyArray_Descr*); +NPY_NO_EXPORT int PyArray_Free \ + (PyObject *, void *); +NPY_NO_EXPORT int PyArray_Converter \ + (PyObject *, PyObject **); +NPY_NO_EXPORT int PyArray_IntpFromSequence \ + (PyObject *, npy_intp *, int); +NPY_NO_EXPORT PyObject * PyArray_Concatenate \ + (PyObject *, int); +NPY_NO_EXPORT PyObject * PyArray_InnerProduct \ + (PyObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_MatrixProduct \ + (PyObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Correlate \ + (PyObject *, PyObject *, int); +NPY_NO_EXPORT int PyArray_DescrConverter \ + (PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyArray_DescrConverter2 \ + (PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyArray_IntpConverter \ + (PyObject *, PyArray_Dims *); +NPY_NO_EXPORT int PyArray_BufferConverter \ + (PyObject *, PyArray_Chunk *); +NPY_NO_EXPORT int PyArray_AxisConverter \ + (PyObject *, int *); +NPY_NO_EXPORT int PyArray_BoolConverter \ + (PyObject *, npy_bool *); +NPY_NO_EXPORT int PyArray_ByteorderConverter \ + (PyObject *, char *); +NPY_NO_EXPORT int PyArray_OrderConverter \ + (PyObject *, NPY_ORDER *); +NPY_NO_EXPORT unsigned char PyArray_EquivTypes \ + (PyArray_Descr *, PyArray_Descr *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_Zeros \ + (int, npy_intp const *, PyArray_Descr *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_Empty \ + (int, npy_intp const *, PyArray_Descr *, int); +NPY_NO_EXPORT PyObject * PyArray_Where \ + (PyObject *, PyObject *, PyObject *); +NPY_NO_EXPORT PyObject * PyArray_Arange \ + (double, double, double, int); +NPY_NO_EXPORT PyObject * PyArray_ArangeObj \ + (PyObject *, PyObject *, PyObject *, PyArray_Descr *); +NPY_NO_EXPORT int PyArray_SortkindConverter \ + (PyObject *, NPY_SORTKIND *); +NPY_NO_EXPORT PyObject * PyArray_LexSort \ + (PyObject *, int); +NPY_NO_EXPORT PyObject * PyArray_Round \ + (PyArrayObject *, int, PyArrayObject *); +NPY_NO_EXPORT unsigned char PyArray_EquivTypenums \ + (int, int); +NPY_NO_EXPORT int PyArray_RegisterDataType \ + (PyArray_DescrProto *); +NPY_NO_EXPORT int PyArray_RegisterCastFunc \ + (PyArray_Descr *, int, PyArray_VectorUnaryFunc *); +NPY_NO_EXPORT int PyArray_RegisterCanCast \ + (PyArray_Descr *, int, NPY_SCALARKIND); +NPY_NO_EXPORT void PyArray_InitArrFuncs \ + (PyArray_ArrFuncs *); +NPY_NO_EXPORT PyObject * PyArray_IntTupleFromIntp \ + (int, npy_intp const *); +NPY_NO_EXPORT int PyArray_ClipmodeConverter \ + (PyObject *, NPY_CLIPMODE *); +NPY_NO_EXPORT int PyArray_OutputConverter \ + (PyObject *, PyArrayObject **); +NPY_NO_EXPORT PyObject * PyArray_BroadcastToShape \ + (PyObject *, npy_intp *, int); +NPY_NO_EXPORT int PyArray_DescrAlignConverter \ + (PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyArray_DescrAlignConverter2 \ + (PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyArray_SearchsideConverter \ + (PyObject *, void *); +NPY_NO_EXPORT PyObject * PyArray_CheckAxis \ + (PyArrayObject *, int *, int); +NPY_NO_EXPORT npy_intp PyArray_OverflowMultiplyList \ + (npy_intp const *, int); +NPY_NO_EXPORT PyObject* PyArray_MultiIterFromObjects \ + (PyObject **, int, int, ...); +NPY_NO_EXPORT int PyArray_GetEndianness \ + (void); +NPY_NO_EXPORT unsigned int PyArray_GetNDArrayCFeatureVersion \ + (void); +NPY_NO_EXPORT PyObject * PyArray_Correlate2 \ + (PyObject *, PyObject *, int); +NPY_NO_EXPORT PyObject* PyArray_NeighborhoodIterNew \ + (PyArrayIterObject *, const npy_intp *, int, PyArrayObject*); +extern NPY_NO_EXPORT PyTypeObject PyTimeIntegerArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyDatetimeArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyTimedeltaArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject PyHalfArrType_Type; + +extern NPY_NO_EXPORT PyTypeObject NpyIter_Type; + +NPY_NO_EXPORT NPY_ARRAYMETHOD_FLAGS NpyIter_GetTransferFlags \ + (NpyIter *); +NPY_NO_EXPORT NpyIter * NpyIter_New \ + (PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*); +NPY_NO_EXPORT NpyIter * NpyIter_MultiNew \ + (int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **); +NPY_NO_EXPORT NpyIter * NpyIter_AdvancedNew \ + (int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **, int, int **, npy_intp *, npy_intp); +NPY_NO_EXPORT NpyIter * NpyIter_Copy \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_Deallocate \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_HasDelayedBufAlloc \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_HasExternalLoop \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_EnableExternalLoop \ + (NpyIter *); +NPY_NO_EXPORT npy_intp * NpyIter_GetInnerStrideArray \ + (NpyIter *); +NPY_NO_EXPORT npy_intp * NpyIter_GetInnerLoopSizePtr \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_Reset \ + (NpyIter *, char **); +NPY_NO_EXPORT int NpyIter_ResetBasePointers \ + (NpyIter *, char **, char **); +NPY_NO_EXPORT int NpyIter_ResetToIterIndexRange \ + (NpyIter *, npy_intp, npy_intp, char **); +NPY_NO_EXPORT int NpyIter_GetNDim \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_GetNOp \ + (NpyIter *); +NPY_NO_EXPORT NpyIter_IterNextFunc * NpyIter_GetIterNext \ + (NpyIter *, char **); +NPY_NO_EXPORT npy_intp NpyIter_GetIterSize \ + (NpyIter *); +NPY_NO_EXPORT void NpyIter_GetIterIndexRange \ + (NpyIter *, npy_intp *, npy_intp *); +NPY_NO_EXPORT npy_intp NpyIter_GetIterIndex \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_GotoIterIndex \ + (NpyIter *, npy_intp); +NPY_NO_EXPORT npy_bool NpyIter_HasMultiIndex \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_GetShape \ + (NpyIter *, npy_intp *); +NPY_NO_EXPORT NpyIter_GetMultiIndexFunc * NpyIter_GetGetMultiIndex \ + (NpyIter *, char **); +NPY_NO_EXPORT int NpyIter_GotoMultiIndex \ + (NpyIter *, npy_intp const *); +NPY_NO_EXPORT int NpyIter_RemoveMultiIndex \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_HasIndex \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_IsBuffered \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_IsGrowInner \ + (NpyIter *); +NPY_NO_EXPORT npy_intp NpyIter_GetBufferSize \ + (NpyIter *); +NPY_NO_EXPORT npy_intp * NpyIter_GetIndexPtr \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_GotoIndex \ + (NpyIter *, npy_intp); +NPY_NO_EXPORT char ** NpyIter_GetDataPtrArray \ + (NpyIter *); +NPY_NO_EXPORT PyArray_Descr ** NpyIter_GetDescrArray \ + (NpyIter *); +NPY_NO_EXPORT PyArrayObject ** NpyIter_GetOperandArray \ + (NpyIter *); +NPY_NO_EXPORT PyArrayObject * NpyIter_GetIterView \ + (NpyIter *, npy_intp); +NPY_NO_EXPORT void NpyIter_GetReadFlags \ + (NpyIter *, char *); +NPY_NO_EXPORT void NpyIter_GetWriteFlags \ + (NpyIter *, char *); +NPY_NO_EXPORT void NpyIter_DebugPrint \ + (NpyIter *); +NPY_NO_EXPORT npy_bool NpyIter_IterationNeedsAPI \ + (NpyIter *); +NPY_NO_EXPORT void NpyIter_GetInnerFixedStrideArray \ + (NpyIter *, npy_intp *); +NPY_NO_EXPORT int NpyIter_RemoveAxis \ + (NpyIter *, int); +NPY_NO_EXPORT npy_intp * NpyIter_GetAxisStrideArray \ + (NpyIter *, int); +NPY_NO_EXPORT npy_bool NpyIter_RequiresBuffering \ + (NpyIter *); +NPY_NO_EXPORT char ** NpyIter_GetInitialDataPtrArray \ + (NpyIter *); +NPY_NO_EXPORT int NpyIter_CreateCompatibleStrides \ + (NpyIter *, npy_intp, npy_intp *); +NPY_NO_EXPORT int PyArray_CastingConverter \ + (PyObject *, NPY_CASTING *); +NPY_NO_EXPORT npy_intp PyArray_CountNonzero \ + (PyArrayObject *); +NPY_NO_EXPORT PyArray_Descr * PyArray_PromoteTypes \ + (PyArray_Descr *, PyArray_Descr *); +NPY_NO_EXPORT PyArray_Descr * PyArray_MinScalarType \ + (PyArrayObject *); +NPY_NO_EXPORT PyArray_Descr * PyArray_ResultType \ + (npy_intp, PyArrayObject *arrs[], npy_intp, PyArray_Descr *descrs[]); +NPY_NO_EXPORT npy_bool PyArray_CanCastArrayTo \ + (PyArrayObject *, PyArray_Descr *, NPY_CASTING); +NPY_NO_EXPORT npy_bool PyArray_CanCastTypeTo \ + (PyArray_Descr *, PyArray_Descr *, NPY_CASTING); +NPY_NO_EXPORT PyArrayObject * PyArray_EinsteinSum \ + (char *, npy_intp, PyArrayObject **, PyArray_Descr *, NPY_ORDER, NPY_CASTING, PyArrayObject *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(3) PyObject * PyArray_NewLikeArray \ + (PyArrayObject *, NPY_ORDER, PyArray_Descr *, int); +NPY_NO_EXPORT int PyArray_ConvertClipmodeSequence \ + (PyObject *, NPY_CLIPMODE *, int); +NPY_NO_EXPORT PyObject * PyArray_MatrixProduct2 \ + (PyObject *, PyObject *, PyArrayObject*); +NPY_NO_EXPORT npy_bool NpyIter_IsFirstVisit \ + (NpyIter *, int); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetBaseObject \ + (PyArrayObject *, PyObject *); +NPY_NO_EXPORT void PyArray_CreateSortedStridePerm \ + (int, npy_intp const *, npy_stride_sort_item *); +NPY_NO_EXPORT void PyArray_RemoveAxesInPlace \ + (PyArrayObject *, const npy_bool *); +NPY_NO_EXPORT void PyArray_DebugPrint \ + (PyArrayObject *); +NPY_NO_EXPORT int PyArray_FailUnlessWriteable \ + (PyArrayObject *, const char *); +NPY_NO_EXPORT NPY_STEALS_REF_TO_ARG(2) int PyArray_SetUpdateIfCopyBase \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT void * PyDataMem_NEW \ + (size_t); +NPY_NO_EXPORT void PyDataMem_FREE \ + (void *); +NPY_NO_EXPORT void * PyDataMem_RENEW \ + (void *, size_t); +extern NPY_NO_EXPORT NPY_CASTING NPY_DEFAULT_ASSIGN_CASTING; + +NPY_NO_EXPORT int PyArray_Partition \ + (PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND); +NPY_NO_EXPORT PyObject * PyArray_ArgPartition \ + (PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND); +NPY_NO_EXPORT int PyArray_SelectkindConverter \ + (PyObject *, NPY_SELECTKIND *); +NPY_NO_EXPORT void * PyDataMem_NEW_ZEROED \ + (size_t, size_t); +NPY_NO_EXPORT int PyArray_CheckAnyScalarExact \ + (PyObject *); +NPY_NO_EXPORT int PyArray_ResolveWritebackIfCopy \ + (PyArrayObject *); +NPY_NO_EXPORT int PyArray_SetWritebackIfCopyBase \ + (PyArrayObject *, PyArrayObject *); +NPY_NO_EXPORT PyObject * PyDataMem_SetHandler \ + (PyObject *); +NPY_NO_EXPORT PyObject * PyDataMem_GetHandler \ + (void); +extern NPY_NO_EXPORT PyObject* PyDataMem_DefaultHandler; + +NPY_NO_EXPORT int NpyDatetime_ConvertDatetime64ToDatetimeStruct \ + (PyArray_DatetimeMetaData *, npy_datetime, npy_datetimestruct *); +NPY_NO_EXPORT int NpyDatetime_ConvertDatetimeStructToDatetime64 \ + (PyArray_DatetimeMetaData *, const npy_datetimestruct *, npy_datetime *); +NPY_NO_EXPORT int NpyDatetime_ConvertPyDateTimeToDatetimeStruct \ + (PyObject *, npy_datetimestruct *, NPY_DATETIMEUNIT *, int); +NPY_NO_EXPORT int NpyDatetime_GetDatetimeISO8601StrLen \ + (int, NPY_DATETIMEUNIT); +NPY_NO_EXPORT int NpyDatetime_MakeISO8601Datetime \ + (npy_datetimestruct *, char *, npy_intp, int, int, NPY_DATETIMEUNIT, int, NPY_CASTING); +NPY_NO_EXPORT int NpyDatetime_ParseISO8601Datetime \ + (char const *, Py_ssize_t, NPY_DATETIMEUNIT, NPY_CASTING, npy_datetimestruct *, NPY_DATETIMEUNIT *, npy_bool *); +NPY_NO_EXPORT int NpyString_load \ + (npy_string_allocator *, const npy_packed_static_string *, npy_static_string *); +NPY_NO_EXPORT int NpyString_pack \ + (npy_string_allocator *, npy_packed_static_string *, const char *, size_t); +NPY_NO_EXPORT int NpyString_pack_null \ + (npy_string_allocator *, npy_packed_static_string *); +NPY_NO_EXPORT npy_string_allocator * NpyString_acquire_allocator \ + (const PyArray_StringDTypeObject *); +NPY_NO_EXPORT void NpyString_acquire_allocators \ + (size_t, PyArray_Descr *const descrs[], npy_string_allocator *allocators[]); +NPY_NO_EXPORT void NpyString_release_allocator \ + (npy_string_allocator *); +NPY_NO_EXPORT void NpyString_release_allocators \ + (size_t, npy_string_allocator *allocators[]); +NPY_NO_EXPORT PyArray_Descr * PyArray_GetDefaultDescr \ + (PyArray_DTypeMeta *); +NPY_NO_EXPORT int PyArrayInitDTypeMeta_FromSpec \ + (PyArray_DTypeMeta *, PyArrayDTypeMeta_Spec *); +NPY_NO_EXPORT PyArray_DTypeMeta * PyArray_CommonDType \ + (PyArray_DTypeMeta *, PyArray_DTypeMeta *); +NPY_NO_EXPORT PyArray_DTypeMeta * PyArray_PromoteDTypeSequence \ + (npy_intp, PyArray_DTypeMeta **); +NPY_NO_EXPORT PyArray_ArrFuncs * _PyDataType_GetArrFuncs \ + (const PyArray_Descr *); + +#else + +#if defined(PY_ARRAY_UNIQUE_SYMBOL) + #define PyArray_API PY_ARRAY_UNIQUE_SYMBOL + #define _NPY_VERSION_CONCAT_HELPER2(x, y) x ## y + #define _NPY_VERSION_CONCAT_HELPER(arg) \ + _NPY_VERSION_CONCAT_HELPER2(arg, PyArray_RUNTIME_VERSION) + #define PyArray_RUNTIME_VERSION \ + _NPY_VERSION_CONCAT_HELPER(PY_ARRAY_UNIQUE_SYMBOL) +#endif + +/* By default do not export API in an .so (was never the case on windows) */ +#ifndef NPY_API_SYMBOL_ATTRIBUTE + #define NPY_API_SYMBOL_ATTRIBUTE NPY_VISIBILITY_HIDDEN +#endif + +#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY) +extern NPY_API_SYMBOL_ATTRIBUTE void **PyArray_API; +extern NPY_API_SYMBOL_ATTRIBUTE int PyArray_RUNTIME_VERSION; +#else +#if defined(PY_ARRAY_UNIQUE_SYMBOL) +NPY_API_SYMBOL_ATTRIBUTE void **PyArray_API; +NPY_API_SYMBOL_ATTRIBUTE int PyArray_RUNTIME_VERSION; +#else +static void **PyArray_API = NULL; +static int PyArray_RUNTIME_VERSION = 0; +#endif +#endif + +#define PyArray_GetNDArrayCVersion \ + (*(unsigned int (*)(void)) \ + PyArray_API[0]) +#define PyArray_Type (*(PyTypeObject *)PyArray_API[2]) +#define PyArrayDescr_Type (*(PyTypeObject *)PyArray_API[3]) +#define PyArrayIter_Type (*(PyTypeObject *)PyArray_API[5]) +#define PyArrayMultiIter_Type (*(PyTypeObject *)PyArray_API[6]) +#define NPY_NUMUSERTYPES (*(int *)PyArray_API[7]) +#define PyBoolArrType_Type (*(PyTypeObject *)PyArray_API[8]) +#define _PyArrayScalar_BoolValues ((PyBoolScalarObject *)PyArray_API[9]) +#define PyGenericArrType_Type (*(PyTypeObject *)PyArray_API[10]) +#define PyNumberArrType_Type (*(PyTypeObject *)PyArray_API[11]) +#define PyIntegerArrType_Type (*(PyTypeObject *)PyArray_API[12]) +#define PySignedIntegerArrType_Type (*(PyTypeObject *)PyArray_API[13]) +#define PyUnsignedIntegerArrType_Type (*(PyTypeObject *)PyArray_API[14]) +#define PyInexactArrType_Type (*(PyTypeObject *)PyArray_API[15]) +#define PyFloatingArrType_Type (*(PyTypeObject *)PyArray_API[16]) +#define PyComplexFloatingArrType_Type (*(PyTypeObject *)PyArray_API[17]) +#define PyFlexibleArrType_Type (*(PyTypeObject *)PyArray_API[18]) +#define PyCharacterArrType_Type (*(PyTypeObject *)PyArray_API[19]) +#define PyByteArrType_Type (*(PyTypeObject *)PyArray_API[20]) +#define PyShortArrType_Type (*(PyTypeObject *)PyArray_API[21]) +#define PyIntArrType_Type (*(PyTypeObject *)PyArray_API[22]) +#define PyLongArrType_Type (*(PyTypeObject *)PyArray_API[23]) +#define PyLongLongArrType_Type (*(PyTypeObject *)PyArray_API[24]) +#define PyUByteArrType_Type (*(PyTypeObject *)PyArray_API[25]) +#define PyUShortArrType_Type (*(PyTypeObject *)PyArray_API[26]) +#define PyUIntArrType_Type (*(PyTypeObject *)PyArray_API[27]) +#define PyULongArrType_Type (*(PyTypeObject *)PyArray_API[28]) +#define PyULongLongArrType_Type (*(PyTypeObject *)PyArray_API[29]) +#define PyFloatArrType_Type (*(PyTypeObject *)PyArray_API[30]) +#define PyDoubleArrType_Type (*(PyTypeObject *)PyArray_API[31]) +#define PyLongDoubleArrType_Type (*(PyTypeObject *)PyArray_API[32]) +#define PyCFloatArrType_Type (*(PyTypeObject *)PyArray_API[33]) +#define PyCDoubleArrType_Type (*(PyTypeObject *)PyArray_API[34]) +#define PyCLongDoubleArrType_Type (*(PyTypeObject *)PyArray_API[35]) +#define PyObjectArrType_Type (*(PyTypeObject *)PyArray_API[36]) +#define PyStringArrType_Type (*(PyTypeObject *)PyArray_API[37]) +#define PyUnicodeArrType_Type (*(PyTypeObject *)PyArray_API[38]) +#define PyVoidArrType_Type (*(PyTypeObject *)PyArray_API[39]) +#define PyArray_INCREF \ + (*(int (*)(PyArrayObject *)) \ + PyArray_API[42]) +#define PyArray_XDECREF \ + (*(int (*)(PyArrayObject *)) \ + PyArray_API[43]) +#define PyArray_SetStringFunction \ + (*(void (*)(PyObject *, int)) \ + PyArray_API[44]) +#define PyArray_DescrFromType \ + (*(PyArray_Descr * (*)(int)) \ + PyArray_API[45]) +#define PyArray_TypeObjectFromType \ + (*(PyObject * (*)(int)) \ + PyArray_API[46]) +#define PyArray_Zero \ + (*(char * (*)(PyArrayObject *)) \ + PyArray_API[47]) +#define PyArray_One \ + (*(char * (*)(PyArrayObject *)) \ + PyArray_API[48]) +#define PyArray_CastToType \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ + PyArray_API[49]) +#define PyArray_CopyInto \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[50]) +#define PyArray_CopyAnyInto \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[51]) +#define PyArray_CanCastSafely \ + (*(int (*)(int, int)) \ + PyArray_API[52]) +#define PyArray_CanCastTo \ + (*(npy_bool (*)(PyArray_Descr *, PyArray_Descr *)) \ + PyArray_API[53]) +#define PyArray_ObjectType \ + (*(int (*)(PyObject *, int)) \ + PyArray_API[54]) +#define PyArray_DescrFromObject \ + (*(PyArray_Descr * (*)(PyObject *, PyArray_Descr *)) \ + PyArray_API[55]) +#define PyArray_ConvertToCommonType \ + (*(PyArrayObject ** (*)(PyObject *, int *)) \ + PyArray_API[56]) +#define PyArray_DescrFromScalar \ + (*(PyArray_Descr * (*)(PyObject *)) \ + PyArray_API[57]) +#define PyArray_DescrFromTypeObject \ + (*(PyArray_Descr * (*)(PyObject *)) \ + PyArray_API[58]) +#define PyArray_Size \ + (*(npy_intp (*)(PyObject *)) \ + PyArray_API[59]) +#define PyArray_Scalar \ + (*(PyObject * (*)(void *, PyArray_Descr *, PyObject *)) \ + PyArray_API[60]) +#define PyArray_FromScalar \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *)) \ + PyArray_API[61]) +#define PyArray_ScalarAsCtype \ + (*(void (*)(PyObject *, void *)) \ + PyArray_API[62]) +#define PyArray_CastScalarToCtype \ + (*(int (*)(PyObject *, void *, PyArray_Descr *)) \ + PyArray_API[63]) +#define PyArray_CastScalarDirect \ + (*(int (*)(PyObject *, PyArray_Descr *, void *, int)) \ + PyArray_API[64]) + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define PyArray_Pack \ + (*(int (*)(PyArray_Descr *, void *, PyObject *)) \ + PyArray_API[65]) +#endif +#define PyArray_FromAny \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, int, int, int, PyObject *)) \ + PyArray_API[69]) +#define PyArray_EnsureArray \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[70]) +#define PyArray_EnsureAnyArray \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[71]) +#define PyArray_FromFile \ + (*(PyObject * (*)(FILE *, PyArray_Descr *, npy_intp, char *)) \ + PyArray_API[72]) +#define PyArray_FromString \ + (*(PyObject * (*)(char *, npy_intp, PyArray_Descr *, npy_intp, char *)) \ + PyArray_API[73]) +#define PyArray_FromBuffer \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, npy_intp, npy_intp)) \ + PyArray_API[74]) +#define PyArray_FromIter \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, npy_intp)) \ + PyArray_API[75]) +#define PyArray_Return \ + (*(PyObject * (*)(PyArrayObject *)) \ + PyArray_API[76]) +#define PyArray_GetField \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ + PyArray_API[77]) +#define PyArray_SetField \ + (*(int (*)(PyArrayObject *, PyArray_Descr *, int, PyObject *)) \ + PyArray_API[78]) +#define PyArray_Byteswap \ + (*(PyObject * (*)(PyArrayObject *, npy_bool)) \ + PyArray_API[79]) +#define PyArray_Resize \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *, int, NPY_ORDER NPY_UNUSED(order))) \ + PyArray_API[80]) +#define PyArray_CopyObject \ + (*(int (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[84]) +#define PyArray_NewCopy \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ + PyArray_API[85]) +#define PyArray_ToList \ + (*(PyObject * (*)(PyArrayObject *)) \ + PyArray_API[86]) +#define PyArray_ToString \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ + PyArray_API[87]) +#define PyArray_ToFile \ + (*(int (*)(PyArrayObject *, FILE *, char *, char *)) \ + PyArray_API[88]) +#define PyArray_Dump \ + (*(int (*)(PyObject *, PyObject *, int)) \ + PyArray_API[89]) +#define PyArray_Dumps \ + (*(PyObject * (*)(PyObject *, int)) \ + PyArray_API[90]) +#define PyArray_ValidType \ + (*(int (*)(int)) \ + PyArray_API[91]) +#define PyArray_UpdateFlags \ + (*(void (*)(PyArrayObject *, int)) \ + PyArray_API[92]) +#define PyArray_New \ + (*(PyObject * (*)(PyTypeObject *, int, npy_intp const *, int, npy_intp const *, void *, int, int, PyObject *)) \ + PyArray_API[93]) +#define PyArray_NewFromDescr \ + (*(PyObject * (*)(PyTypeObject *, PyArray_Descr *, int, npy_intp const *, npy_intp const *, void *, int, PyObject *)) \ + PyArray_API[94]) +#define PyArray_DescrNew \ + (*(PyArray_Descr * (*)(PyArray_Descr *)) \ + PyArray_API[95]) +#define PyArray_DescrNewFromType \ + (*(PyArray_Descr * (*)(int)) \ + PyArray_API[96]) +#define PyArray_GetPriority \ + (*(double (*)(PyObject *, double)) \ + PyArray_API[97]) +#define PyArray_IterNew \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[98]) +#define PyArray_MultiIterNew \ + (*(PyObject* (*)(int, ...)) \ + PyArray_API[99]) +#define PyArray_PyIntAsInt \ + (*(int (*)(PyObject *)) \ + PyArray_API[100]) +#define PyArray_PyIntAsIntp \ + (*(npy_intp (*)(PyObject *)) \ + PyArray_API[101]) +#define PyArray_Broadcast \ + (*(int (*)(PyArrayMultiIterObject *)) \ + PyArray_API[102]) +#define PyArray_FillWithScalar \ + (*(int (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[104]) +#define PyArray_CheckStrides \ + (*(npy_bool (*)(int, int, npy_intp, npy_intp, npy_intp const *, npy_intp const *)) \ + PyArray_API[105]) +#define PyArray_DescrNewByteorder \ + (*(PyArray_Descr * (*)(PyArray_Descr *, char)) \ + PyArray_API[106]) +#define PyArray_IterAllButAxis \ + (*(PyObject * (*)(PyObject *, int *)) \ + PyArray_API[107]) +#define PyArray_CheckFromAny \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, int, int, int, PyObject *)) \ + PyArray_API[108]) +#define PyArray_FromArray \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, int)) \ + PyArray_API[109]) +#define PyArray_FromInterface \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[110]) +#define PyArray_FromStructInterface \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[111]) +#define PyArray_FromArrayAttr \ + (*(PyObject * (*)(PyObject *, PyArray_Descr *, PyObject *)) \ + PyArray_API[112]) +#define PyArray_ScalarKind \ + (*(NPY_SCALARKIND (*)(int, PyArrayObject **)) \ + PyArray_API[113]) +#define PyArray_CanCoerceScalar \ + (*(int (*)(int, int, NPY_SCALARKIND)) \ + PyArray_API[114]) +#define PyArray_CanCastScalar \ + (*(npy_bool (*)(PyTypeObject *, PyTypeObject *)) \ + PyArray_API[116]) +#define PyArray_RemoveSmallest \ + (*(int (*)(PyArrayMultiIterObject *)) \ + PyArray_API[118]) +#define PyArray_ElementStrides \ + (*(int (*)(PyObject *)) \ + PyArray_API[119]) +#define PyArray_Item_INCREF \ + (*(void (*)(char *, PyArray_Descr *)) \ + PyArray_API[120]) +#define PyArray_Item_XDECREF \ + (*(void (*)(char *, PyArray_Descr *)) \ + PyArray_API[121]) +#define PyArray_Transpose \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *)) \ + PyArray_API[123]) +#define PyArray_TakeFrom \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *, NPY_CLIPMODE)) \ + PyArray_API[124]) +#define PyArray_PutTo \ + (*(PyObject * (*)(PyArrayObject *, PyObject*, PyObject *, NPY_CLIPMODE)) \ + PyArray_API[125]) +#define PyArray_PutMask \ + (*(PyObject * (*)(PyArrayObject *, PyObject*, PyObject*)) \ + PyArray_API[126]) +#define PyArray_Repeat \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, int)) \ + PyArray_API[127]) +#define PyArray_Choose \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, PyArrayObject *, NPY_CLIPMODE)) \ + PyArray_API[128]) +#define PyArray_Sort \ + (*(int (*)(PyArrayObject *, int, NPY_SORTKIND)) \ + PyArray_API[129]) +#define PyArray_ArgSort \ + (*(PyObject * (*)(PyArrayObject *, int, NPY_SORTKIND)) \ + PyArray_API[130]) +#define PyArray_SearchSorted \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, NPY_SEARCHSIDE, PyObject *)) \ + PyArray_API[131]) +#define PyArray_ArgMax \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[132]) +#define PyArray_ArgMin \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[133]) +#define PyArray_Reshape \ + (*(PyObject * (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[134]) +#define PyArray_Newshape \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Dims *, NPY_ORDER)) \ + PyArray_API[135]) +#define PyArray_Squeeze \ + (*(PyObject * (*)(PyArrayObject *)) \ + PyArray_API[136]) +#define PyArray_View \ + (*(PyObject * (*)(PyArrayObject *, PyArray_Descr *, PyTypeObject *)) \ + PyArray_API[137]) +#define PyArray_SwapAxes \ + (*(PyObject * (*)(PyArrayObject *, int, int)) \ + PyArray_API[138]) +#define PyArray_Max \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[139]) +#define PyArray_Min \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[140]) +#define PyArray_Ptp \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[141]) +#define PyArray_Mean \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[142]) +#define PyArray_Trace \ + (*(PyObject * (*)(PyArrayObject *, int, int, int, int, PyArrayObject *)) \ + PyArray_API[143]) +#define PyArray_Diagonal \ + (*(PyObject * (*)(PyArrayObject *, int, int, int)) \ + PyArray_API[144]) +#define PyArray_Clip \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, PyObject *, PyArrayObject *)) \ + PyArray_API[145]) +#define PyArray_Conjugate \ + (*(PyObject * (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[146]) +#define PyArray_Nonzero \ + (*(PyObject * (*)(PyArrayObject *)) \ + PyArray_API[147]) +#define PyArray_Std \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *, int)) \ + PyArray_API[148]) +#define PyArray_Sum \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[149]) +#define PyArray_CumSum \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[150]) +#define PyArray_Prod \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[151]) +#define PyArray_CumProd \ + (*(PyObject * (*)(PyArrayObject *, int, int, PyArrayObject *)) \ + PyArray_API[152]) +#define PyArray_All \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[153]) +#define PyArray_Any \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[154]) +#define PyArray_Compress \ + (*(PyObject * (*)(PyArrayObject *, PyObject *, int, PyArrayObject *)) \ + PyArray_API[155]) +#define PyArray_Flatten \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ + PyArray_API[156]) +#define PyArray_Ravel \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER)) \ + PyArray_API[157]) +#define PyArray_MultiplyList \ + (*(npy_intp (*)(npy_intp const *, int)) \ + PyArray_API[158]) +#define PyArray_MultiplyIntList \ + (*(int (*)(int const *, int)) \ + PyArray_API[159]) +#define PyArray_GetPtr \ + (*(void * (*)(PyArrayObject *, npy_intp const*)) \ + PyArray_API[160]) +#define PyArray_CompareLists \ + (*(int (*)(npy_intp const *, npy_intp const *, int)) \ + PyArray_API[161]) +#define PyArray_AsCArray \ + (*(int (*)(PyObject **, void *, npy_intp *, int, PyArray_Descr*)) \ + PyArray_API[162]) +#define PyArray_Free \ + (*(int (*)(PyObject *, void *)) \ + PyArray_API[165]) +#define PyArray_Converter \ + (*(int (*)(PyObject *, PyObject **)) \ + PyArray_API[166]) +#define PyArray_IntpFromSequence \ + (*(int (*)(PyObject *, npy_intp *, int)) \ + PyArray_API[167]) +#define PyArray_Concatenate \ + (*(PyObject * (*)(PyObject *, int)) \ + PyArray_API[168]) +#define PyArray_InnerProduct \ + (*(PyObject * (*)(PyObject *, PyObject *)) \ + PyArray_API[169]) +#define PyArray_MatrixProduct \ + (*(PyObject * (*)(PyObject *, PyObject *)) \ + PyArray_API[170]) +#define PyArray_Correlate \ + (*(PyObject * (*)(PyObject *, PyObject *, int)) \ + PyArray_API[172]) +#define PyArray_DescrConverter \ + (*(int (*)(PyObject *, PyArray_Descr **)) \ + PyArray_API[174]) +#define PyArray_DescrConverter2 \ + (*(int (*)(PyObject *, PyArray_Descr **)) \ + PyArray_API[175]) +#define PyArray_IntpConverter \ + (*(int (*)(PyObject *, PyArray_Dims *)) \ + PyArray_API[176]) +#define PyArray_BufferConverter \ + (*(int (*)(PyObject *, PyArray_Chunk *)) \ + PyArray_API[177]) +#define PyArray_AxisConverter \ + (*(int (*)(PyObject *, int *)) \ + PyArray_API[178]) +#define PyArray_BoolConverter \ + (*(int (*)(PyObject *, npy_bool *)) \ + PyArray_API[179]) +#define PyArray_ByteorderConverter \ + (*(int (*)(PyObject *, char *)) \ + PyArray_API[180]) +#define PyArray_OrderConverter \ + (*(int (*)(PyObject *, NPY_ORDER *)) \ + PyArray_API[181]) +#define PyArray_EquivTypes \ + (*(unsigned char (*)(PyArray_Descr *, PyArray_Descr *)) \ + PyArray_API[182]) +#define PyArray_Zeros \ + (*(PyObject * (*)(int, npy_intp const *, PyArray_Descr *, int)) \ + PyArray_API[183]) +#define PyArray_Empty \ + (*(PyObject * (*)(int, npy_intp const *, PyArray_Descr *, int)) \ + PyArray_API[184]) +#define PyArray_Where \ + (*(PyObject * (*)(PyObject *, PyObject *, PyObject *)) \ + PyArray_API[185]) +#define PyArray_Arange \ + (*(PyObject * (*)(double, double, double, int)) \ + PyArray_API[186]) +#define PyArray_ArangeObj \ + (*(PyObject * (*)(PyObject *, PyObject *, PyObject *, PyArray_Descr *)) \ + PyArray_API[187]) +#define PyArray_SortkindConverter \ + (*(int (*)(PyObject *, NPY_SORTKIND *)) \ + PyArray_API[188]) +#define PyArray_LexSort \ + (*(PyObject * (*)(PyObject *, int)) \ + PyArray_API[189]) +#define PyArray_Round \ + (*(PyObject * (*)(PyArrayObject *, int, PyArrayObject *)) \ + PyArray_API[190]) +#define PyArray_EquivTypenums \ + (*(unsigned char (*)(int, int)) \ + PyArray_API[191]) +#define PyArray_RegisterDataType \ + (*(int (*)(PyArray_DescrProto *)) \ + PyArray_API[192]) +#define PyArray_RegisterCastFunc \ + (*(int (*)(PyArray_Descr *, int, PyArray_VectorUnaryFunc *)) \ + PyArray_API[193]) +#define PyArray_RegisterCanCast \ + (*(int (*)(PyArray_Descr *, int, NPY_SCALARKIND)) \ + PyArray_API[194]) +#define PyArray_InitArrFuncs \ + (*(void (*)(PyArray_ArrFuncs *)) \ + PyArray_API[195]) +#define PyArray_IntTupleFromIntp \ + (*(PyObject * (*)(int, npy_intp const *)) \ + PyArray_API[196]) +#define PyArray_ClipmodeConverter \ + (*(int (*)(PyObject *, NPY_CLIPMODE *)) \ + PyArray_API[198]) +#define PyArray_OutputConverter \ + (*(int (*)(PyObject *, PyArrayObject **)) \ + PyArray_API[199]) +#define PyArray_BroadcastToShape \ + (*(PyObject * (*)(PyObject *, npy_intp *, int)) \ + PyArray_API[200]) +#define PyArray_DescrAlignConverter \ + (*(int (*)(PyObject *, PyArray_Descr **)) \ + PyArray_API[203]) +#define PyArray_DescrAlignConverter2 \ + (*(int (*)(PyObject *, PyArray_Descr **)) \ + PyArray_API[204]) +#define PyArray_SearchsideConverter \ + (*(int (*)(PyObject *, void *)) \ + PyArray_API[205]) +#define PyArray_CheckAxis \ + (*(PyObject * (*)(PyArrayObject *, int *, int)) \ + PyArray_API[206]) +#define PyArray_OverflowMultiplyList \ + (*(npy_intp (*)(npy_intp const *, int)) \ + PyArray_API[207]) +#define PyArray_MultiIterFromObjects \ + (*(PyObject* (*)(PyObject **, int, int, ...)) \ + PyArray_API[209]) +#define PyArray_GetEndianness \ + (*(int (*)(void)) \ + PyArray_API[210]) +#define PyArray_GetNDArrayCFeatureVersion \ + (*(unsigned int (*)(void)) \ + PyArray_API[211]) +#define PyArray_Correlate2 \ + (*(PyObject * (*)(PyObject *, PyObject *, int)) \ + PyArray_API[212]) +#define PyArray_NeighborhoodIterNew \ + (*(PyObject* (*)(PyArrayIterObject *, const npy_intp *, int, PyArrayObject*)) \ + PyArray_API[213]) +#define PyTimeIntegerArrType_Type (*(PyTypeObject *)PyArray_API[214]) +#define PyDatetimeArrType_Type (*(PyTypeObject *)PyArray_API[215]) +#define PyTimedeltaArrType_Type (*(PyTypeObject *)PyArray_API[216]) +#define PyHalfArrType_Type (*(PyTypeObject *)PyArray_API[217]) +#define NpyIter_Type (*(PyTypeObject *)PyArray_API[218]) + +#if NPY_FEATURE_VERSION >= NPY_2_3_API_VERSION +#define NpyIter_GetTransferFlags \ + (*(NPY_ARRAYMETHOD_FLAGS (*)(NpyIter *)) \ + PyArray_API[223]) +#endif +#define NpyIter_New \ + (*(NpyIter * (*)(PyArrayObject *, npy_uint32, NPY_ORDER, NPY_CASTING, PyArray_Descr*)) \ + PyArray_API[224]) +#define NpyIter_MultiNew \ + (*(NpyIter * (*)(int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **)) \ + PyArray_API[225]) +#define NpyIter_AdvancedNew \ + (*(NpyIter * (*)(int, PyArrayObject **, npy_uint32, NPY_ORDER, NPY_CASTING, npy_uint32 *, PyArray_Descr **, int, int **, npy_intp *, npy_intp)) \ + PyArray_API[226]) +#define NpyIter_Copy \ + (*(NpyIter * (*)(NpyIter *)) \ + PyArray_API[227]) +#define NpyIter_Deallocate \ + (*(int (*)(NpyIter *)) \ + PyArray_API[228]) +#define NpyIter_HasDelayedBufAlloc \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[229]) +#define NpyIter_HasExternalLoop \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[230]) +#define NpyIter_EnableExternalLoop \ + (*(int (*)(NpyIter *)) \ + PyArray_API[231]) +#define NpyIter_GetInnerStrideArray \ + (*(npy_intp * (*)(NpyIter *)) \ + PyArray_API[232]) +#define NpyIter_GetInnerLoopSizePtr \ + (*(npy_intp * (*)(NpyIter *)) \ + PyArray_API[233]) +#define NpyIter_Reset \ + (*(int (*)(NpyIter *, char **)) \ + PyArray_API[234]) +#define NpyIter_ResetBasePointers \ + (*(int (*)(NpyIter *, char **, char **)) \ + PyArray_API[235]) +#define NpyIter_ResetToIterIndexRange \ + (*(int (*)(NpyIter *, npy_intp, npy_intp, char **)) \ + PyArray_API[236]) +#define NpyIter_GetNDim \ + (*(int (*)(NpyIter *)) \ + PyArray_API[237]) +#define NpyIter_GetNOp \ + (*(int (*)(NpyIter *)) \ + PyArray_API[238]) +#define NpyIter_GetIterNext \ + (*(NpyIter_IterNextFunc * (*)(NpyIter *, char **)) \ + PyArray_API[239]) +#define NpyIter_GetIterSize \ + (*(npy_intp (*)(NpyIter *)) \ + PyArray_API[240]) +#define NpyIter_GetIterIndexRange \ + (*(void (*)(NpyIter *, npy_intp *, npy_intp *)) \ + PyArray_API[241]) +#define NpyIter_GetIterIndex \ + (*(npy_intp (*)(NpyIter *)) \ + PyArray_API[242]) +#define NpyIter_GotoIterIndex \ + (*(int (*)(NpyIter *, npy_intp)) \ + PyArray_API[243]) +#define NpyIter_HasMultiIndex \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[244]) +#define NpyIter_GetShape \ + (*(int (*)(NpyIter *, npy_intp *)) \ + PyArray_API[245]) +#define NpyIter_GetGetMultiIndex \ + (*(NpyIter_GetMultiIndexFunc * (*)(NpyIter *, char **)) \ + PyArray_API[246]) +#define NpyIter_GotoMultiIndex \ + (*(int (*)(NpyIter *, npy_intp const *)) \ + PyArray_API[247]) +#define NpyIter_RemoveMultiIndex \ + (*(int (*)(NpyIter *)) \ + PyArray_API[248]) +#define NpyIter_HasIndex \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[249]) +#define NpyIter_IsBuffered \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[250]) +#define NpyIter_IsGrowInner \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[251]) +#define NpyIter_GetBufferSize \ + (*(npy_intp (*)(NpyIter *)) \ + PyArray_API[252]) +#define NpyIter_GetIndexPtr \ + (*(npy_intp * (*)(NpyIter *)) \ + PyArray_API[253]) +#define NpyIter_GotoIndex \ + (*(int (*)(NpyIter *, npy_intp)) \ + PyArray_API[254]) +#define NpyIter_GetDataPtrArray \ + (*(char ** (*)(NpyIter *)) \ + PyArray_API[255]) +#define NpyIter_GetDescrArray \ + (*(PyArray_Descr ** (*)(NpyIter *)) \ + PyArray_API[256]) +#define NpyIter_GetOperandArray \ + (*(PyArrayObject ** (*)(NpyIter *)) \ + PyArray_API[257]) +#define NpyIter_GetIterView \ + (*(PyArrayObject * (*)(NpyIter *, npy_intp)) \ + PyArray_API[258]) +#define NpyIter_GetReadFlags \ + (*(void (*)(NpyIter *, char *)) \ + PyArray_API[259]) +#define NpyIter_GetWriteFlags \ + (*(void (*)(NpyIter *, char *)) \ + PyArray_API[260]) +#define NpyIter_DebugPrint \ + (*(void (*)(NpyIter *)) \ + PyArray_API[261]) +#define NpyIter_IterationNeedsAPI \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[262]) +#define NpyIter_GetInnerFixedStrideArray \ + (*(void (*)(NpyIter *, npy_intp *)) \ + PyArray_API[263]) +#define NpyIter_RemoveAxis \ + (*(int (*)(NpyIter *, int)) \ + PyArray_API[264]) +#define NpyIter_GetAxisStrideArray \ + (*(npy_intp * (*)(NpyIter *, int)) \ + PyArray_API[265]) +#define NpyIter_RequiresBuffering \ + (*(npy_bool (*)(NpyIter *)) \ + PyArray_API[266]) +#define NpyIter_GetInitialDataPtrArray \ + (*(char ** (*)(NpyIter *)) \ + PyArray_API[267]) +#define NpyIter_CreateCompatibleStrides \ + (*(int (*)(NpyIter *, npy_intp, npy_intp *)) \ + PyArray_API[268]) +#define PyArray_CastingConverter \ + (*(int (*)(PyObject *, NPY_CASTING *)) \ + PyArray_API[269]) +#define PyArray_CountNonzero \ + (*(npy_intp (*)(PyArrayObject *)) \ + PyArray_API[270]) +#define PyArray_PromoteTypes \ + (*(PyArray_Descr * (*)(PyArray_Descr *, PyArray_Descr *)) \ + PyArray_API[271]) +#define PyArray_MinScalarType \ + (*(PyArray_Descr * (*)(PyArrayObject *)) \ + PyArray_API[272]) +#define PyArray_ResultType \ + (*(PyArray_Descr * (*)(npy_intp, PyArrayObject *arrs[], npy_intp, PyArray_Descr *descrs[])) \ + PyArray_API[273]) +#define PyArray_CanCastArrayTo \ + (*(npy_bool (*)(PyArrayObject *, PyArray_Descr *, NPY_CASTING)) \ + PyArray_API[274]) +#define PyArray_CanCastTypeTo \ + (*(npy_bool (*)(PyArray_Descr *, PyArray_Descr *, NPY_CASTING)) \ + PyArray_API[275]) +#define PyArray_EinsteinSum \ + (*(PyArrayObject * (*)(char *, npy_intp, PyArrayObject **, PyArray_Descr *, NPY_ORDER, NPY_CASTING, PyArrayObject *)) \ + PyArray_API[276]) +#define PyArray_NewLikeArray \ + (*(PyObject * (*)(PyArrayObject *, NPY_ORDER, PyArray_Descr *, int)) \ + PyArray_API[277]) +#define PyArray_ConvertClipmodeSequence \ + (*(int (*)(PyObject *, NPY_CLIPMODE *, int)) \ + PyArray_API[279]) +#define PyArray_MatrixProduct2 \ + (*(PyObject * (*)(PyObject *, PyObject *, PyArrayObject*)) \ + PyArray_API[280]) +#define NpyIter_IsFirstVisit \ + (*(npy_bool (*)(NpyIter *, int)) \ + PyArray_API[281]) +#define PyArray_SetBaseObject \ + (*(int (*)(PyArrayObject *, PyObject *)) \ + PyArray_API[282]) +#define PyArray_CreateSortedStridePerm \ + (*(void (*)(int, npy_intp const *, npy_stride_sort_item *)) \ + PyArray_API[283]) +#define PyArray_RemoveAxesInPlace \ + (*(void (*)(PyArrayObject *, const npy_bool *)) \ + PyArray_API[284]) +#define PyArray_DebugPrint \ + (*(void (*)(PyArrayObject *)) \ + PyArray_API[285]) +#define PyArray_FailUnlessWriteable \ + (*(int (*)(PyArrayObject *, const char *)) \ + PyArray_API[286]) +#define PyArray_SetUpdateIfCopyBase \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[287]) +#define PyDataMem_NEW \ + (*(void * (*)(size_t)) \ + PyArray_API[288]) +#define PyDataMem_FREE \ + (*(void (*)(void *)) \ + PyArray_API[289]) +#define PyDataMem_RENEW \ + (*(void * (*)(void *, size_t)) \ + PyArray_API[290]) +#define NPY_DEFAULT_ASSIGN_CASTING (*(NPY_CASTING *)PyArray_API[292]) +#define PyArray_Partition \ + (*(int (*)(PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND)) \ + PyArray_API[296]) +#define PyArray_ArgPartition \ + (*(PyObject * (*)(PyArrayObject *, PyArrayObject *, int, NPY_SELECTKIND)) \ + PyArray_API[297]) +#define PyArray_SelectkindConverter \ + (*(int (*)(PyObject *, NPY_SELECTKIND *)) \ + PyArray_API[298]) +#define PyDataMem_NEW_ZEROED \ + (*(void * (*)(size_t, size_t)) \ + PyArray_API[299]) +#define PyArray_CheckAnyScalarExact \ + (*(int (*)(PyObject *)) \ + PyArray_API[300]) +#define PyArray_ResolveWritebackIfCopy \ + (*(int (*)(PyArrayObject *)) \ + PyArray_API[302]) +#define PyArray_SetWritebackIfCopyBase \ + (*(int (*)(PyArrayObject *, PyArrayObject *)) \ + PyArray_API[303]) + +#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION +#define PyDataMem_SetHandler \ + (*(PyObject * (*)(PyObject *)) \ + PyArray_API[304]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION +#define PyDataMem_GetHandler \ + (*(PyObject * (*)(void)) \ + PyArray_API[305]) +#endif +#define PyDataMem_DefaultHandler (*(PyObject* *)PyArray_API[306]) + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyDatetime_ConvertDatetime64ToDatetimeStruct \ + (*(int (*)(PyArray_DatetimeMetaData *, npy_datetime, npy_datetimestruct *)) \ + PyArray_API[307]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyDatetime_ConvertDatetimeStructToDatetime64 \ + (*(int (*)(PyArray_DatetimeMetaData *, const npy_datetimestruct *, npy_datetime *)) \ + PyArray_API[308]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyDatetime_ConvertPyDateTimeToDatetimeStruct \ + (*(int (*)(PyObject *, npy_datetimestruct *, NPY_DATETIMEUNIT *, int)) \ + PyArray_API[309]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyDatetime_GetDatetimeISO8601StrLen \ + (*(int (*)(int, NPY_DATETIMEUNIT)) \ + PyArray_API[310]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyDatetime_MakeISO8601Datetime \ + (*(int (*)(npy_datetimestruct *, char *, npy_intp, int, int, NPY_DATETIMEUNIT, int, NPY_CASTING)) \ + PyArray_API[311]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyDatetime_ParseISO8601Datetime \ + (*(int (*)(char const *, Py_ssize_t, NPY_DATETIMEUNIT, NPY_CASTING, npy_datetimestruct *, NPY_DATETIMEUNIT *, npy_bool *)) \ + PyArray_API[312]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyString_load \ + (*(int (*)(npy_string_allocator *, const npy_packed_static_string *, npy_static_string *)) \ + PyArray_API[313]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyString_pack \ + (*(int (*)(npy_string_allocator *, npy_packed_static_string *, const char *, size_t)) \ + PyArray_API[314]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyString_pack_null \ + (*(int (*)(npy_string_allocator *, npy_packed_static_string *)) \ + PyArray_API[315]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyString_acquire_allocator \ + (*(npy_string_allocator * (*)(const PyArray_StringDTypeObject *)) \ + PyArray_API[316]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyString_acquire_allocators \ + (*(void (*)(size_t, PyArray_Descr *const descrs[], npy_string_allocator *allocators[])) \ + PyArray_API[317]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyString_release_allocator \ + (*(void (*)(npy_string_allocator *)) \ + PyArray_API[318]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define NpyString_release_allocators \ + (*(void (*)(size_t, npy_string_allocator *allocators[])) \ + PyArray_API[319]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define PyArray_GetDefaultDescr \ + (*(PyArray_Descr * (*)(PyArray_DTypeMeta *)) \ + PyArray_API[361]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define PyArrayInitDTypeMeta_FromSpec \ + (*(int (*)(PyArray_DTypeMeta *, PyArrayDTypeMeta_Spec *)) \ + PyArray_API[362]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define PyArray_CommonDType \ + (*(PyArray_DTypeMeta * (*)(PyArray_DTypeMeta *, PyArray_DTypeMeta *)) \ + PyArray_API[363]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define PyArray_PromoteDTypeSequence \ + (*(PyArray_DTypeMeta * (*)(npy_intp, PyArray_DTypeMeta **)) \ + PyArray_API[364]) +#endif +#define _PyDataType_GetArrFuncs \ + (*(PyArray_ArrFuncs * (*)(const PyArray_Descr *)) \ + PyArray_API[365]) + +/* + * The DType classes are inconvenient for the Python generation so exposed + * manually in the header below (may be moved). + */ +#include "numpy/_public_dtype_api_table.h" + +#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT) +static int +_import_array(void) +{ + int st; + PyObject *numpy = PyImport_ImportModule("numpy._core._multiarray_umath"); + PyObject *c_api; + if (numpy == NULL && PyErr_ExceptionMatches(PyExc_ModuleNotFoundError)) { + PyErr_Clear(); + numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); + } + + if (numpy == NULL) { + return -1; + } + + c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); + Py_DECREF(numpy); + if (c_api == NULL) { + return -1; + } + + if (!PyCapsule_CheckExact(c_api)) { + PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object"); + Py_DECREF(c_api); + return -1; + } + PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL); + Py_DECREF(c_api); + if (PyArray_API == NULL) { + PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer"); + return -1; + } + + /* + * On exceedingly few platforms these sizes may not match, in which case + * We do not support older NumPy versions at all. + */ + if (sizeof(Py_ssize_t) != sizeof(Py_intptr_t) && + PyArray_RUNTIME_VERSION < NPY_2_0_API_VERSION) { + PyErr_Format(PyExc_RuntimeError, + "module compiled against NumPy 2.0 but running on NumPy 1.x. " + "Unfortunately, this is not supported on niche platforms where " + "`sizeof(size_t) != sizeof(inptr_t)`."); + } + /* + * Perform runtime check of C API version. As of now NumPy 2.0 is ABI + * backwards compatible (in the exposed feature subset!) for all practical + * purposes. + */ + if (NPY_VERSION < PyArray_GetNDArrayCVersion()) { + PyErr_Format(PyExc_RuntimeError, "module compiled against "\ + "ABI version 0x%x but this version of numpy is 0x%x", \ + (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion()); + return -1; + } + PyArray_RUNTIME_VERSION = (int)PyArray_GetNDArrayCFeatureVersion(); + if (NPY_FEATURE_VERSION > PyArray_RUNTIME_VERSION) { + PyErr_Format(PyExc_RuntimeError, + "module was compiled against NumPy C-API version 0x%x " + "(NumPy " NPY_FEATURE_VERSION_STRING ") " + "but the running NumPy has C-API version 0x%x. " + "Check the section C-API incompatibility at the " + "Troubleshooting ImportError section at " + "https://numpy.org/devdocs/user/troubleshooting-importerror.html" + "#c-api-incompatibility " + "for indications on how to solve this problem.", + (int)NPY_FEATURE_VERSION, PyArray_RUNTIME_VERSION); + return -1; + } + + /* + * Perform runtime check of endianness and check it matches the one set by + * the headers (npy_endian.h) as a safeguard + */ + st = PyArray_GetEndianness(); + if (st == NPY_CPU_UNKNOWN_ENDIAN) { + PyErr_SetString(PyExc_RuntimeError, + "FATAL: module compiled as unknown endian"); + return -1; + } +#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN + if (st != NPY_CPU_BIG) { + PyErr_SetString(PyExc_RuntimeError, + "FATAL: module compiled as big endian, but " + "detected different endianness at runtime"); + return -1; + } +#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN + if (st != NPY_CPU_LITTLE) { + PyErr_SetString(PyExc_RuntimeError, + "FATAL: module compiled as little endian, but " + "detected different endianness at runtime"); + return -1; + } +#endif + + return 0; +} + +#if (SWIG_VERSION < 0x040400) +#define _RETURN_VALUE NULL +#else +#define _RETURN_VALUE 0 +#endif + +#define import_array() { \ + if (_import_array() < 0) { \ + PyErr_Print(); \ + PyErr_SetString( \ + PyExc_ImportError, \ + "numpy._core.multiarray failed to import" \ + ); \ + return _RETURN_VALUE; \ + } \ +} + +#define import_array1(ret) { \ + if (_import_array() < 0) { \ + PyErr_Print(); \ + PyErr_SetString( \ + PyExc_ImportError, \ + "numpy._core.multiarray failed to import" \ + ); \ + return ret; \ + } \ +} + +#define import_array2(msg, ret) { \ + if (_import_array() < 0) { \ + PyErr_Print(); \ + PyErr_SetString(PyExc_ImportError, msg); \ + return ret; \ + } \ +} + +#endif + +#endif diff --git a/python/numpy/_core/include/numpy/__ufunc_api.c b/python/numpy/_core/include/numpy/__ufunc_api.c new file mode 100644 index 000000000..10fcbc455 --- /dev/null +++ b/python/numpy/_core/include/numpy/__ufunc_api.c @@ -0,0 +1,54 @@ + +/* These pointers will be stored in the C-object for use in other + extension modules +*/ + +void *PyUFunc_API[] = { + (void *) &PyUFunc_Type, + (void *) PyUFunc_FromFuncAndData, + (void *) PyUFunc_RegisterLoopForType, + NULL, + (void *) PyUFunc_f_f_As_d_d, + (void *) PyUFunc_d_d, + (void *) PyUFunc_f_f, + (void *) PyUFunc_g_g, + (void *) PyUFunc_F_F_As_D_D, + (void *) PyUFunc_F_F, + (void *) PyUFunc_D_D, + (void *) PyUFunc_G_G, + (void *) PyUFunc_O_O, + (void *) PyUFunc_ff_f_As_dd_d, + (void *) PyUFunc_ff_f, + (void *) PyUFunc_dd_d, + (void *) PyUFunc_gg_g, + (void *) PyUFunc_FF_F_As_DD_D, + (void *) PyUFunc_DD_D, + (void *) PyUFunc_FF_F, + (void *) PyUFunc_GG_G, + (void *) PyUFunc_OO_O, + (void *) PyUFunc_O_O_method, + (void *) PyUFunc_OO_O_method, + (void *) PyUFunc_On_Om, + NULL, + NULL, + (void *) PyUFunc_clearfperr, + (void *) PyUFunc_getfperr, + NULL, + (void *) PyUFunc_ReplaceLoopBySignature, + (void *) PyUFunc_FromFuncAndDataAndSignature, + NULL, + (void *) PyUFunc_e_e, + (void *) PyUFunc_e_e_As_f_f, + (void *) PyUFunc_e_e_As_d_d, + (void *) PyUFunc_ee_e, + (void *) PyUFunc_ee_e_As_ff_f, + (void *) PyUFunc_ee_e_As_dd_d, + (void *) PyUFunc_DefaultTypeResolver, + (void *) PyUFunc_ValidateCasting, + (void *) PyUFunc_RegisterLoopForDescr, + (void *) PyUFunc_FromFuncAndDataAndSignatureAndIdentity, + (void *) PyUFunc_AddLoopFromSpec, + (void *) PyUFunc_AddPromoter, + (void *) PyUFunc_AddWrappingLoop, + (void *) PyUFunc_GiveFloatingpointErrors +}; diff --git a/python/numpy/_core/include/numpy/__ufunc_api.h b/python/numpy/_core/include/numpy/__ufunc_api.h new file mode 100644 index 000000000..b05dce341 --- /dev/null +++ b/python/numpy/_core/include/numpy/__ufunc_api.h @@ -0,0 +1,341 @@ + +#ifdef _UMATHMODULE + +extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type; + +extern NPY_NO_EXPORT PyTypeObject PyUFunc_Type; + +NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndData \ + (PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, int); +NPY_NO_EXPORT int PyUFunc_RegisterLoopForType \ + (PyUFuncObject *, int, PyUFuncGenericFunction, const int *, void *); +NPY_NO_EXPORT void PyUFunc_f_f_As_d_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_d_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_f_f \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_g_g \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_F_F_As_D_D \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_F_F \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_D_D \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_G_G \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_O_O \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_ff_f_As_dd_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_ff_f \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_dd_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_gg_g \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_FF_F_As_DD_D \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_DD_D \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_FF_F \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_GG_G \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_OO_O \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_O_O_method \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_OO_O_method \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_On_Om \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_clearfperr \ + (void); +NPY_NO_EXPORT int PyUFunc_getfperr \ + (void); +NPY_NO_EXPORT int PyUFunc_ReplaceLoopBySignature \ + (PyUFuncObject *, PyUFuncGenericFunction, const int *, PyUFuncGenericFunction *); +NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignature \ + (PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, int, const char *); +NPY_NO_EXPORT void PyUFunc_e_e \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_e_e_As_f_f \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_e_e_As_d_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_ee_e \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_ee_e_As_ff_f \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT void PyUFunc_ee_e_As_dd_d \ + (char **, npy_intp const *, npy_intp const *, void *); +NPY_NO_EXPORT int PyUFunc_DefaultTypeResolver \ + (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **); +NPY_NO_EXPORT int PyUFunc_ValidateCasting \ + (PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr *const *); +NPY_NO_EXPORT int PyUFunc_RegisterLoopForDescr \ + (PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *); +NPY_NO_EXPORT PyObject * PyUFunc_FromFuncAndDataAndSignatureAndIdentity \ + (PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, const int, const char *, PyObject *); +NPY_NO_EXPORT int PyUFunc_AddLoopFromSpec \ + (PyObject *, PyArrayMethod_Spec *); +NPY_NO_EXPORT int PyUFunc_AddPromoter \ + (PyObject *, PyObject *, PyObject *); +NPY_NO_EXPORT int PyUFunc_AddWrappingLoop \ + (PyObject *, PyArray_DTypeMeta *new_dtypes[], PyArray_DTypeMeta *wrapped_dtypes[], PyArrayMethod_TranslateGivenDescriptors *, PyArrayMethod_TranslateLoopDescriptors *); +NPY_NO_EXPORT int PyUFunc_GiveFloatingpointErrors \ + (const char *, int); + +#else + +#if defined(PY_UFUNC_UNIQUE_SYMBOL) +#define PyUFunc_API PY_UFUNC_UNIQUE_SYMBOL +#endif + +/* By default do not export API in an .so (was never the case on windows) */ +#ifndef NPY_API_SYMBOL_ATTRIBUTE + #define NPY_API_SYMBOL_ATTRIBUTE NPY_VISIBILITY_HIDDEN +#endif + +#if defined(NO_IMPORT) || defined(NO_IMPORT_UFUNC) +extern NPY_API_SYMBOL_ATTRIBUTE void **PyUFunc_API; +#else +#if defined(PY_UFUNC_UNIQUE_SYMBOL) +NPY_API_SYMBOL_ATTRIBUTE void **PyUFunc_API; +#else +static void **PyUFunc_API=NULL; +#endif +#endif + +#define PyUFunc_Type (*(PyTypeObject *)PyUFunc_API[0]) +#define PyUFunc_FromFuncAndData \ + (*(PyObject * (*)(PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, int)) \ + PyUFunc_API[1]) +#define PyUFunc_RegisterLoopForType \ + (*(int (*)(PyUFuncObject *, int, PyUFuncGenericFunction, const int *, void *)) \ + PyUFunc_API[2]) +#define PyUFunc_f_f_As_d_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[4]) +#define PyUFunc_d_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[5]) +#define PyUFunc_f_f \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[6]) +#define PyUFunc_g_g \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[7]) +#define PyUFunc_F_F_As_D_D \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[8]) +#define PyUFunc_F_F \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[9]) +#define PyUFunc_D_D \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[10]) +#define PyUFunc_G_G \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[11]) +#define PyUFunc_O_O \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[12]) +#define PyUFunc_ff_f_As_dd_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[13]) +#define PyUFunc_ff_f \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[14]) +#define PyUFunc_dd_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[15]) +#define PyUFunc_gg_g \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[16]) +#define PyUFunc_FF_F_As_DD_D \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[17]) +#define PyUFunc_DD_D \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[18]) +#define PyUFunc_FF_F \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[19]) +#define PyUFunc_GG_G \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[20]) +#define PyUFunc_OO_O \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[21]) +#define PyUFunc_O_O_method \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[22]) +#define PyUFunc_OO_O_method \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[23]) +#define PyUFunc_On_Om \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[24]) +#define PyUFunc_clearfperr \ + (*(void (*)(void)) \ + PyUFunc_API[27]) +#define PyUFunc_getfperr \ + (*(int (*)(void)) \ + PyUFunc_API[28]) +#define PyUFunc_ReplaceLoopBySignature \ + (*(int (*)(PyUFuncObject *, PyUFuncGenericFunction, const int *, PyUFuncGenericFunction *)) \ + PyUFunc_API[30]) +#define PyUFunc_FromFuncAndDataAndSignature \ + (*(PyObject * (*)(PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, int, const char *)) \ + PyUFunc_API[31]) +#define PyUFunc_e_e \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[33]) +#define PyUFunc_e_e_As_f_f \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[34]) +#define PyUFunc_e_e_As_d_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[35]) +#define PyUFunc_ee_e \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[36]) +#define PyUFunc_ee_e_As_ff_f \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[37]) +#define PyUFunc_ee_e_As_dd_d \ + (*(void (*)(char **, npy_intp const *, npy_intp const *, void *)) \ + PyUFunc_API[38]) +#define PyUFunc_DefaultTypeResolver \ + (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyObject *, PyArray_Descr **)) \ + PyUFunc_API[39]) +#define PyUFunc_ValidateCasting \ + (*(int (*)(PyUFuncObject *, NPY_CASTING, PyArrayObject **, PyArray_Descr *const *)) \ + PyUFunc_API[40]) +#define PyUFunc_RegisterLoopForDescr \ + (*(int (*)(PyUFuncObject *, PyArray_Descr *, PyUFuncGenericFunction, PyArray_Descr **, void *)) \ + PyUFunc_API[41]) + +#if NPY_FEATURE_VERSION >= NPY_1_16_API_VERSION +#define PyUFunc_FromFuncAndDataAndSignatureAndIdentity \ + (*(PyObject * (*)(PyUFuncGenericFunction *, void *const *, const char *, int, int, int, int, const char *, const char *, const int, const char *, PyObject *)) \ + PyUFunc_API[42]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define PyUFunc_AddLoopFromSpec \ + (*(int (*)(PyObject *, PyArrayMethod_Spec *)) \ + PyUFunc_API[43]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define PyUFunc_AddPromoter \ + (*(int (*)(PyObject *, PyObject *, PyObject *)) \ + PyUFunc_API[44]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define PyUFunc_AddWrappingLoop \ + (*(int (*)(PyObject *, PyArray_DTypeMeta *new_dtypes[], PyArray_DTypeMeta *wrapped_dtypes[], PyArrayMethod_TranslateGivenDescriptors *, PyArrayMethod_TranslateLoopDescriptors *)) \ + PyUFunc_API[45]) +#endif + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +#define PyUFunc_GiveFloatingpointErrors \ + (*(int (*)(const char *, int)) \ + PyUFunc_API[46]) +#endif + +static inline int +_import_umath(void) +{ + PyObject *c_api; + PyObject *numpy = PyImport_ImportModule("numpy._core._multiarray_umath"); + if (numpy == NULL && PyErr_ExceptionMatches(PyExc_ModuleNotFoundError)) { + PyErr_Clear(); + numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); + } + + if (numpy == NULL) { + PyErr_SetString(PyExc_ImportError, + "_multiarray_umath failed to import"); + return -1; + } + + c_api = PyObject_GetAttrString(numpy, "_UFUNC_API"); + Py_DECREF(numpy); + if (c_api == NULL) { + PyErr_SetString(PyExc_AttributeError, "_UFUNC_API not found"); + return -1; + } + + if (!PyCapsule_CheckExact(c_api)) { + PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is not PyCapsule object"); + Py_DECREF(c_api); + return -1; + } + PyUFunc_API = (void **)PyCapsule_GetPointer(c_api, NULL); + Py_DECREF(c_api); + if (PyUFunc_API == NULL) { + PyErr_SetString(PyExc_RuntimeError, "_UFUNC_API is NULL pointer"); + return -1; + } + return 0; +} + +#define import_umath() \ + do {\ + UFUNC_NOFPE\ + if (_import_umath() < 0) {\ + PyErr_Print();\ + PyErr_SetString(PyExc_ImportError,\ + "numpy._core.umath failed to import");\ + return NULL;\ + }\ + } while(0) + +#define import_umath1(ret) \ + do {\ + UFUNC_NOFPE\ + if (_import_umath() < 0) {\ + PyErr_Print();\ + PyErr_SetString(PyExc_ImportError,\ + "numpy._core.umath failed to import");\ + return ret;\ + }\ + } while(0) + +#define import_umath2(ret, msg) \ + do {\ + UFUNC_NOFPE\ + if (_import_umath() < 0) {\ + PyErr_Print();\ + PyErr_SetString(PyExc_ImportError, msg);\ + return ret;\ + }\ + } while(0) + +#define import_ufunc() \ + do {\ + UFUNC_NOFPE\ + if (_import_umath() < 0) {\ + PyErr_Print();\ + PyErr_SetString(PyExc_ImportError,\ + "numpy._core.umath failed to import");\ + }\ + } while(0) + + +static inline int +PyUFunc_ImportUFuncAPI() +{ + if (NPY_UNLIKELY(PyUFunc_API == NULL)) { + import_umath1(-1); + } + return 0; +} + +#endif diff --git a/python/numpy/_core/include/numpy/_neighborhood_iterator_imp.h b/python/numpy/_core/include/numpy/_neighborhood_iterator_imp.h new file mode 100644 index 000000000..b365cb508 --- /dev/null +++ b/python/numpy/_core/include/numpy/_neighborhood_iterator_imp.h @@ -0,0 +1,90 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_ +#error You should not include this header directly +#endif +/* + * Private API (here for inline) + */ +static inline int +_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter); + +/* + * Update to next item of the iterator + * + * Note: this simply increment the coordinates vector, last dimension + * incremented first , i.e, for dimension 3 + * ... + * -1, -1, -1 + * -1, -1, 0 + * -1, -1, 1 + * .... + * -1, 0, -1 + * -1, 0, 0 + * .... + * 0, -1, -1 + * 0, -1, 0 + * .... + */ +#define _UPDATE_COORD_ITER(c) \ + wb = iter->coordinates[c] < iter->bounds[c][1]; \ + if (wb) { \ + iter->coordinates[c] += 1; \ + return 0; \ + } \ + else { \ + iter->coordinates[c] = iter->bounds[c][0]; \ + } + +static inline int +_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter) +{ + npy_intp i, wb; + + for (i = iter->nd - 1; i >= 0; --i) { + _UPDATE_COORD_ITER(i) + } + + return 0; +} + +/* + * Version optimized for 2d arrays, manual loop unrolling + */ +static inline int +_PyArrayNeighborhoodIter_IncrCoord2D(PyArrayNeighborhoodIterObject* iter) +{ + npy_intp wb; + + _UPDATE_COORD_ITER(1) + _UPDATE_COORD_ITER(0) + + return 0; +} +#undef _UPDATE_COORD_ITER + +/* + * Advance to the next neighbour + */ +static inline int +PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter) +{ + _PyArrayNeighborhoodIter_IncrCoord (iter); + iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); + + return 0; +} + +/* + * Reset functions + */ +static inline int +PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter) +{ + npy_intp i; + + for (i = 0; i < iter->nd; ++i) { + iter->coordinates[i] = iter->bounds[i][0]; + } + iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); + + return 0; +} diff --git a/python/numpy/_core/include/numpy/_numpyconfig.h b/python/numpy/_core/include/numpy/_numpyconfig.h new file mode 100644 index 000000000..16a4b443d --- /dev/null +++ b/python/numpy/_core/include/numpy/_numpyconfig.h @@ -0,0 +1,33 @@ +#define NPY_HAVE_ENDIAN_H 1 + +#define NPY_SIZEOF_SHORT 2 +#define NPY_SIZEOF_INT 4 +#define NPY_SIZEOF_LONG 8 +#define NPY_SIZEOF_FLOAT 4 +#define NPY_SIZEOF_COMPLEX_FLOAT 8 +#define NPY_SIZEOF_DOUBLE 8 +#define NPY_SIZEOF_COMPLEX_DOUBLE 16 +#define NPY_SIZEOF_LONGDOUBLE 16 +#define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32 +#define NPY_SIZEOF_PY_INTPTR_T 8 +#define NPY_SIZEOF_INTP 8 +#define NPY_SIZEOF_UINTP 8 +#define NPY_SIZEOF_WCHAR_T 4 +#define NPY_SIZEOF_OFF_T 8 +#define NPY_SIZEOF_PY_LONG_LONG 8 +#define NPY_SIZEOF_LONGLONG 8 + +/* + * Defined to 1 or 0. Note that Pyodide hardcodes NPY_NO_SMP (and other defines + * in this header) for better cross-compilation, so don't rename them without a + * good reason. + */ +#define NPY_NO_SMP 0 + +#define NPY_VISIBILITY_HIDDEN __attribute__((visibility("hidden"))) +#define NPY_ABI_VERSION 0x02000000 +#define NPY_API_VERSION 0x00000014 + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS 1 +#endif diff --git a/python/numpy/_core/include/numpy/_public_dtype_api_table.h b/python/numpy/_core/include/numpy/_public_dtype_api_table.h new file mode 100644 index 000000000..51f390540 --- /dev/null +++ b/python/numpy/_core/include/numpy/_public_dtype_api_table.h @@ -0,0 +1,86 @@ +/* + * Public exposure of the DType Classes. These are tricky to expose + * via the Python API, so they are exposed through this header for now. + * + * These definitions are only relevant for the public API and we reserve + * the slots 320-360 in the API table generation for this (currently). + * + * TODO: This file should be consolidated with the API table generation + * (although not sure the current generation is worth preserving). + */ +#ifndef NUMPY_CORE_INCLUDE_NUMPY__PUBLIC_DTYPE_API_TABLE_H_ +#define NUMPY_CORE_INCLUDE_NUMPY__PUBLIC_DTYPE_API_TABLE_H_ + +#if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD) + +/* All of these require NumPy 2.0 support */ +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION + +/* + * The type of the DType metaclass + */ +#define PyArrayDTypeMeta_Type (*(PyTypeObject *)(PyArray_API + 320)[0]) +/* + * NumPy's builtin DTypes: + */ +#define PyArray_BoolDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[1]) +/* Integers */ +#define PyArray_ByteDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[2]) +#define PyArray_UByteDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[3]) +#define PyArray_ShortDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[4]) +#define PyArray_UShortDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[5]) +#define PyArray_IntDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[6]) +#define PyArray_UIntDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[7]) +#define PyArray_LongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[8]) +#define PyArray_ULongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[9]) +#define PyArray_LongLongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[10]) +#define PyArray_ULongLongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[11]) +/* Integer aliases */ +#define PyArray_Int8DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[12]) +#define PyArray_UInt8DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[13]) +#define PyArray_Int16DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[14]) +#define PyArray_UInt16DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[15]) +#define PyArray_Int32DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[16]) +#define PyArray_UInt32DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[17]) +#define PyArray_Int64DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[18]) +#define PyArray_UInt64DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[19]) +#define PyArray_IntpDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[20]) +#define PyArray_UIntpDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[21]) +/* Floats */ +#define PyArray_HalfDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[22]) +#define PyArray_FloatDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[23]) +#define PyArray_DoubleDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[24]) +#define PyArray_LongDoubleDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[25]) +/* Complex */ +#define PyArray_CFloatDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[26]) +#define PyArray_CDoubleDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[27]) +#define PyArray_CLongDoubleDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[28]) +/* String/Bytes */ +#define PyArray_BytesDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[29]) +#define PyArray_UnicodeDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[30]) +/* Datetime/Timedelta */ +#define PyArray_DatetimeDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[31]) +#define PyArray_TimedeltaDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[32]) +/* Object/Void */ +#define PyArray_ObjectDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[33]) +#define PyArray_VoidDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[34]) +/* Python types (used as markers for scalars) */ +#define PyArray_PyLongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[35]) +#define PyArray_PyFloatDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[36]) +#define PyArray_PyComplexDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[37]) +/* Default integer type */ +#define PyArray_DefaultIntDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[38]) +/* New non-legacy DTypes follow in the order they were added */ +#define PyArray_StringDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[39]) + +/* NOTE: offset 40 is free */ + +/* Need to start with a larger offset again for the abstract classes: */ +#define PyArray_IntAbstractDType (*(PyArray_DTypeMeta *)PyArray_API[366]) +#define PyArray_FloatAbstractDType (*(PyArray_DTypeMeta *)PyArray_API[367]) +#define PyArray_ComplexAbstractDType (*(PyArray_DTypeMeta *)PyArray_API[368]) + +#endif /* NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION */ + +#endif /* NPY_INTERNAL_BUILD */ +#endif /* NUMPY_CORE_INCLUDE_NUMPY__PUBLIC_DTYPE_API_TABLE_H_ */ diff --git a/python/numpy/_core/include/numpy/arrayobject.h b/python/numpy/_core/include/numpy/arrayobject.h new file mode 100644 index 000000000..97d93590e --- /dev/null +++ b/python/numpy/_core/include/numpy/arrayobject.h @@ -0,0 +1,7 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_ +#define Py_ARRAYOBJECT_H + +#include "ndarrayobject.h" + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_ */ diff --git a/python/numpy/_core/include/numpy/arrayscalars.h b/python/numpy/_core/include/numpy/arrayscalars.h new file mode 100644 index 000000000..ff048061f --- /dev/null +++ b/python/numpy/_core/include/numpy/arrayscalars.h @@ -0,0 +1,196 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_ + +#ifndef _MULTIARRAYMODULE +typedef struct { + PyObject_HEAD + npy_bool obval; +} PyBoolScalarObject; +#endif + + +typedef struct { + PyObject_HEAD + signed char obval; +} PyByteScalarObject; + + +typedef struct { + PyObject_HEAD + short obval; +} PyShortScalarObject; + + +typedef struct { + PyObject_HEAD + int obval; +} PyIntScalarObject; + + +typedef struct { + PyObject_HEAD + long obval; +} PyLongScalarObject; + + +typedef struct { + PyObject_HEAD + npy_longlong obval; +} PyLongLongScalarObject; + + +typedef struct { + PyObject_HEAD + unsigned char obval; +} PyUByteScalarObject; + + +typedef struct { + PyObject_HEAD + unsigned short obval; +} PyUShortScalarObject; + + +typedef struct { + PyObject_HEAD + unsigned int obval; +} PyUIntScalarObject; + + +typedef struct { + PyObject_HEAD + unsigned long obval; +} PyULongScalarObject; + + +typedef struct { + PyObject_HEAD + npy_ulonglong obval; +} PyULongLongScalarObject; + + +typedef struct { + PyObject_HEAD + npy_half obval; +} PyHalfScalarObject; + + +typedef struct { + PyObject_HEAD + float obval; +} PyFloatScalarObject; + + +typedef struct { + PyObject_HEAD + double obval; +} PyDoubleScalarObject; + + +typedef struct { + PyObject_HEAD + npy_longdouble obval; +} PyLongDoubleScalarObject; + + +typedef struct { + PyObject_HEAD + npy_cfloat obval; +} PyCFloatScalarObject; + + +typedef struct { + PyObject_HEAD + npy_cdouble obval; +} PyCDoubleScalarObject; + + +typedef struct { + PyObject_HEAD + npy_clongdouble obval; +} PyCLongDoubleScalarObject; + + +typedef struct { + PyObject_HEAD + PyObject * obval; +} PyObjectScalarObject; + +typedef struct { + PyObject_HEAD + npy_datetime obval; + PyArray_DatetimeMetaData obmeta; +} PyDatetimeScalarObject; + +typedef struct { + PyObject_HEAD + npy_timedelta obval; + PyArray_DatetimeMetaData obmeta; +} PyTimedeltaScalarObject; + + +typedef struct { + PyObject_HEAD + char obval; +} PyScalarObject; + +#define PyStringScalarObject PyBytesObject +#ifndef Py_LIMITED_API +typedef struct { + /* note that the PyObject_HEAD macro lives right here */ + PyUnicodeObject base; + Py_UCS4 *obval; + #if NPY_FEATURE_VERSION >= NPY_1_20_API_VERSION + char *buffer_fmt; + #endif +} PyUnicodeScalarObject; +#endif + + +typedef struct { + PyObject_VAR_HEAD + char *obval; +#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD + /* Internally use the subclass to allow accessing names/fields */ + _PyArray_LegacyDescr *descr; +#else + PyArray_Descr *descr; +#endif + int flags; + PyObject *base; + #if NPY_FEATURE_VERSION >= NPY_1_20_API_VERSION + void *_buffer_info; /* private buffer info, tagged to allow warning */ + #endif +} PyVoidScalarObject; + +/* Macros + PyScalarObject + PyArrType_Type + are defined in ndarrayobject.h +*/ + +#define PyArrayScalar_False ((PyObject *)(&(_PyArrayScalar_BoolValues[0]))) +#define PyArrayScalar_True ((PyObject *)(&(_PyArrayScalar_BoolValues[1]))) +#define PyArrayScalar_FromLong(i) \ + ((PyObject *)(&(_PyArrayScalar_BoolValues[((i)!=0)]))) +#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) \ + return Py_INCREF(PyArrayScalar_FromLong(i)), \ + PyArrayScalar_FromLong(i) +#define PyArrayScalar_RETURN_FALSE \ + return Py_INCREF(PyArrayScalar_False), \ + PyArrayScalar_False +#define PyArrayScalar_RETURN_TRUE \ + return Py_INCREF(PyArrayScalar_True), \ + PyArrayScalar_True + +#define PyArrayScalar_New(cls) \ + Py##cls##ArrType_Type.tp_alloc(&Py##cls##ArrType_Type, 0) +#ifndef Py_LIMITED_API +/* For the limited API, use PyArray_ScalarAsCtype instead */ +#define PyArrayScalar_VAL(obj, cls) \ + ((Py##cls##ScalarObject *)obj)->obval +#define PyArrayScalar_ASSIGN(obj, cls, val) \ + PyArrayScalar_VAL(obj, cls) = val +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_ */ diff --git a/python/numpy/_core/include/numpy/dtype_api.h b/python/numpy/_core/include/numpy/dtype_api.h new file mode 100644 index 000000000..b37c9fbb6 --- /dev/null +++ b/python/numpy/_core/include/numpy/dtype_api.h @@ -0,0 +1,480 @@ +/* + * The public DType API + */ + +#ifndef NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_ +#define NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_ + +struct PyArrayMethodObject_tag; + +/* + * Largely opaque struct for DType classes (i.e. metaclass instances). + * The internal definition is currently in `ndarraytypes.h` (export is a bit + * more complex because `PyArray_Descr` is a DTypeMeta internally but not + * externally). + */ +#if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD) + +#ifndef Py_LIMITED_API + + typedef struct PyArray_DTypeMeta_tag { + PyHeapTypeObject super; + + /* + * Most DTypes will have a singleton default instance, for the + * parametric legacy DTypes (bytes, string, void, datetime) this + * may be a pointer to the *prototype* instance? + */ + PyArray_Descr *singleton; + /* Copy of the legacy DTypes type number, usually invalid. */ + int type_num; + + /* The type object of the scalar instances (may be NULL?) */ + PyTypeObject *scalar_type; + /* + * DType flags to signal legacy, parametric, or + * abstract. But plenty of space for additional information/flags. + */ + npy_uint64 flags; + + /* + * Use indirection in order to allow a fixed size for this struct. + * A stable ABI size makes creating a static DType less painful + * while also ensuring flexibility for all opaque API (with one + * indirection due the pointer lookup). + */ + void *dt_slots; + /* Allow growing (at the moment also beyond this) */ + void *reserved[3]; + } PyArray_DTypeMeta; + +#else + +typedef PyTypeObject PyArray_DTypeMeta; + +#endif /* Py_LIMITED_API */ + +#endif /* not internal build */ + +/* + * ****************************************************** + * ArrayMethod API (Casting and UFuncs) + * ****************************************************** + */ + + +typedef enum { + /* Flag for whether the GIL is required */ + NPY_METH_REQUIRES_PYAPI = 1 << 0, + /* + * Some functions cannot set floating point error flags, this flag + * gives us the option (not requirement) to skip floating point error + * setup/check. No function should set error flags and ignore them + * since it would interfere with chaining operations (e.g. casting). + */ + NPY_METH_NO_FLOATINGPOINT_ERRORS = 1 << 1, + /* Whether the method supports unaligned access (not runtime) */ + NPY_METH_SUPPORTS_UNALIGNED = 1 << 2, + /* + * Used for reductions to allow reordering the operation. At this point + * assume that if set, it also applies to normal operations though! + */ + NPY_METH_IS_REORDERABLE = 1 << 3, + /* + * Private flag for now for *logic* functions. The logical functions + * `logical_or` and `logical_and` can always cast the inputs to booleans + * "safely" (because that is how the cast to bool is defined). + * @seberg: I am not sure this is the best way to handle this, so its + * private for now (also it is very limited anyway). + * There is one "exception". NA aware dtypes cannot cast to bool + * (hopefully), so the `??->?` loop should error even with this flag. + * But a second NA fallback loop will be necessary. + */ + _NPY_METH_FORCE_CAST_INPUTS = 1 << 17, + + /* All flags which can change at runtime */ + NPY_METH_RUNTIME_FLAGS = ( + NPY_METH_REQUIRES_PYAPI | + NPY_METH_NO_FLOATINGPOINT_ERRORS), +} NPY_ARRAYMETHOD_FLAGS; + + +typedef struct PyArrayMethod_Context_tag { + /* The caller, which is typically the original ufunc. May be NULL */ + PyObject *caller; + /* The method "self". Currently an opaque object. */ + struct PyArrayMethodObject_tag *method; + + /* Operand descriptors, filled in by resolve_descriptors */ + PyArray_Descr *const *descriptors; + /* Structure may grow (this is harmless for DType authors) */ +} PyArrayMethod_Context; + + +/* + * The main object for creating a new ArrayMethod. We use the typical `slots` + * mechanism used by the Python limited API (see below for the slot defs). + */ +typedef struct { + const char *name; + int nin, nout; + NPY_CASTING casting; + NPY_ARRAYMETHOD_FLAGS flags; + PyArray_DTypeMeta **dtypes; + PyType_Slot *slots; +} PyArrayMethod_Spec; + + +/* + * ArrayMethod slots + * ----------------- + * + * SLOTS IDs For the ArrayMethod creation, once fully public, IDs are fixed + * but can be deprecated and arbitrarily extended. + */ +#define _NPY_METH_resolve_descriptors_with_scalars 1 +#define NPY_METH_resolve_descriptors 2 +#define NPY_METH_get_loop 3 +#define NPY_METH_get_reduction_initial 4 +/* specific loops for constructions/default get_loop: */ +#define NPY_METH_strided_loop 5 +#define NPY_METH_contiguous_loop 6 +#define NPY_METH_unaligned_strided_loop 7 +#define NPY_METH_unaligned_contiguous_loop 8 +#define NPY_METH_contiguous_indexed_loop 9 +#define _NPY_METH_static_data 10 + + +/* + * The resolve descriptors function, must be able to handle NULL values for + * all output (but not input) `given_descrs` and fill `loop_descrs`. + * Return -1 on error or 0 if the operation is not possible without an error + * set. (This may still be in flux.) + * Otherwise must return the "casting safety", for normal functions, this is + * almost always "safe" (or even "equivalent"?). + * + * `resolve_descriptors` is optional if all output DTypes are non-parametric. + */ +typedef NPY_CASTING (PyArrayMethod_ResolveDescriptors)( + /* "method" is currently opaque (necessary e.g. to wrap Python) */ + struct PyArrayMethodObject_tag *method, + /* DTypes the method was created for */ + PyArray_DTypeMeta *const *dtypes, + /* Input descriptors (instances). Outputs may be NULL. */ + PyArray_Descr *const *given_descrs, + /* Exact loop descriptors to use, must not hold references on error */ + PyArray_Descr **loop_descrs, + npy_intp *view_offset); + + +/* + * Rarely needed, slightly more powerful version of `resolve_descriptors`. + * See also `PyArrayMethod_ResolveDescriptors` for details on shared arguments. + * + * NOTE: This function is private now as it is unclear how and what to pass + * exactly as additional information to allow dealing with the scalars. + * See also gh-24915. + */ +typedef NPY_CASTING (PyArrayMethod_ResolveDescriptorsWithScalar)( + struct PyArrayMethodObject_tag *method, + PyArray_DTypeMeta *const *dtypes, + /* Unlike above, these can have any DType and we may allow NULL. */ + PyArray_Descr *const *given_descrs, + /* + * Input scalars or NULL. Only ever passed for python scalars. + * WARNING: In some cases, a loop may be explicitly selected and the + * value passed is not available (NULL) or does not have the + * expected type. + */ + PyObject *const *input_scalars, + PyArray_Descr **loop_descrs, + npy_intp *view_offset); + + + +typedef int (PyArrayMethod_StridedLoop)(PyArrayMethod_Context *context, + char *const *data, const npy_intp *dimensions, const npy_intp *strides, + NpyAuxData *transferdata); + + +typedef int (PyArrayMethod_GetLoop)( + PyArrayMethod_Context *context, + int aligned, int move_references, + const npy_intp *strides, + PyArrayMethod_StridedLoop **out_loop, + NpyAuxData **out_transferdata, + NPY_ARRAYMETHOD_FLAGS *flags); + +/** + * Query an ArrayMethod for the initial value for use in reduction. + * + * @param context The arraymethod context, mainly to access the descriptors. + * @param reduction_is_empty Whether the reduction is empty. When it is, the + * value returned may differ. In this case it is a "default" value that + * may differ from the "identity" value normally used. For example: + * - `0.0` is the default for `sum([])`. But `-0.0` is the correct + * identity otherwise as it preserves the sign for `sum([-0.0])`. + * - We use no identity for object, but return the default of `0` and `1` + * for the empty `sum([], dtype=object)` and `prod([], dtype=object)`. + * This allows `np.sum(np.array(["a", "b"], dtype=object))` to work. + * - `-inf` or `INT_MIN` for `max` is an identity, but at least `INT_MIN` + * not a good *default* when there are no items. + * @param initial Pointer to initial data to be filled (if possible) + * + * @returns -1, 0, or 1 indicating error, no initial value, and initial being + * successfully filled. Errors must not be given where 0 is correct, NumPy + * may call this even when not strictly necessary. + */ +typedef int (PyArrayMethod_GetReductionInitial)( + PyArrayMethod_Context *context, npy_bool reduction_is_empty, + void *initial); + +/* + * The following functions are only used by the wrapping array method defined + * in umath/wrapping_array_method.c + */ + + +/* + * The function to convert the given descriptors (passed in to + * `resolve_descriptors`) and translates them for the wrapped loop. + * The new descriptors MUST be viewable with the old ones, `NULL` must be + * supported (for outputs) and should normally be forwarded. + * + * The function must clean up on error. + * + * NOTE: We currently assume that this translation gives "viewable" results. + * I.e. there is no additional casting related to the wrapping process. + * In principle that could be supported, but not sure it is useful. + * This currently also means that e.g. alignment must apply identically + * to the new dtypes. + * + * TODO: Due to the fact that `resolve_descriptors` is also used for `can_cast` + * there is no way to "pass out" the result of this function. This means + * it will be called twice for every ufunc call. + * (I am considering including `auxdata` as an "optional" parameter to + * `resolve_descriptors`, so that it can be filled there if not NULL.) + */ +typedef int (PyArrayMethod_TranslateGivenDescriptors)(int nin, int nout, + PyArray_DTypeMeta *const wrapped_dtypes[], + PyArray_Descr *const given_descrs[], PyArray_Descr *new_descrs[]); + +/** + * The function to convert the actual loop descriptors (as returned by the + * original `resolve_descriptors` function) to the ones the output array + * should use. + * This function must return "viewable" types, it must not mutate them in any + * form that would break the inner-loop logic. Does not need to support NULL. + * + * The function must clean up on error. + * + * @param nin Number of input arguments + * @param nout Number of output arguments + * @param new_dtypes The DTypes of the output (usually probably not needed) + * @param given_descrs Original given_descrs to the resolver, necessary to + * fetch any information related to the new dtypes from the original. + * @param original_descrs The `loop_descrs` returned by the wrapped loop. + * @param loop_descrs The output descriptors, compatible to `original_descrs`. + * + * @returns 0 on success, -1 on failure. + */ +typedef int (PyArrayMethod_TranslateLoopDescriptors)(int nin, int nout, + PyArray_DTypeMeta *const new_dtypes[], PyArray_Descr *const given_descrs[], + PyArray_Descr *original_descrs[], PyArray_Descr *loop_descrs[]); + + + +/* + * A traverse loop working on a single array. This is similar to the general + * strided-loop function. This is designed for loops that need to visit every + * element of a single array. + * + * Currently this is used for array clearing, via the NPY_DT_get_clear_loop + * API hook, and zero-filling, via the NPY_DT_get_fill_zero_loop API hook. + * These are most useful for handling arrays storing embedded references to + * python objects or heap-allocated data. + * + * The `void *traverse_context` is passed in because we may need to pass in + * Interpreter state or similar in the future, but we don't want to pass in + * a full context (with pointers to dtypes, method, caller which all make + * no sense for a traverse function). + * + * We assume for now that this context can be just passed through in the + * the future (for structured dtypes). + * + */ +typedef int (PyArrayMethod_TraverseLoop)( + void *traverse_context, const PyArray_Descr *descr, char *data, + npy_intp size, npy_intp stride, NpyAuxData *auxdata); + + +/* + * Simplified get_loop function specific to dtype traversal + * + * It should set the flags needed for the traversal loop and set out_loop to the + * loop function, which must be a valid PyArrayMethod_TraverseLoop + * pointer. Currently this is used for zero-filling and clearing arrays storing + * embedded references. + * + */ +typedef int (PyArrayMethod_GetTraverseLoop)( + void *traverse_context, const PyArray_Descr *descr, + int aligned, npy_intp fixed_stride, + PyArrayMethod_TraverseLoop **out_loop, NpyAuxData **out_auxdata, + NPY_ARRAYMETHOD_FLAGS *flags); + + +/* + * Type of the C promoter function, which must be wrapped into a + * PyCapsule with name "numpy._ufunc_promoter". + * + * Note that currently the output dtypes are always NULL unless they are + * also part of the signature. This is an implementation detail and could + * change in the future. However, in general promoters should not have a + * need for output dtypes. + * (There are potential use-cases, these are currently unsupported.) + */ +typedef int (PyArrayMethod_PromoterFunction)(PyObject *ufunc, + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], + PyArray_DTypeMeta *new_op_dtypes[]); + +/* + * **************************** + * DTYPE API + * **************************** + */ + +#define NPY_DT_ABSTRACT 1 << 1 +#define NPY_DT_PARAMETRIC 1 << 2 +#define NPY_DT_NUMERIC 1 << 3 + +/* + * These correspond to slots in the NPY_DType_Slots struct and must + * be in the same order as the members of that struct. If new slots + * get added or old slots get removed NPY_NUM_DTYPE_SLOTS must also + * be updated + */ + +#define NPY_DT_discover_descr_from_pyobject 1 +// this slot is considered private because its API hasn't been decided +#define _NPY_DT_is_known_scalar_type 2 +#define NPY_DT_default_descr 3 +#define NPY_DT_common_dtype 4 +#define NPY_DT_common_instance 5 +#define NPY_DT_ensure_canonical 6 +#define NPY_DT_setitem 7 +#define NPY_DT_getitem 8 +#define NPY_DT_get_clear_loop 9 +#define NPY_DT_get_fill_zero_loop 10 +#define NPY_DT_finalize_descr 11 + +// These PyArray_ArrFunc slots will be deprecated and replaced eventually +// getitem and setitem can be defined as a performance optimization; +// by default the user dtypes call `legacy_getitem_using_DType` and +// `legacy_setitem_using_DType`, respectively. This functionality is +// only supported for basic NumPy DTypes. + + +// used to separate dtype slots from arrfuncs slots +// intended only for internal use but defined here for clarity +#define _NPY_DT_ARRFUNCS_OFFSET (1 << 10) + +// Cast is disabled +// #define NPY_DT_PyArray_ArrFuncs_cast 0 + _NPY_DT_ARRFUNCS_OFFSET + +#define NPY_DT_PyArray_ArrFuncs_getitem 1 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_setitem 2 + _NPY_DT_ARRFUNCS_OFFSET + +// Copyswap is disabled +// #define NPY_DT_PyArray_ArrFuncs_copyswapn 3 + _NPY_DT_ARRFUNCS_OFFSET +// #define NPY_DT_PyArray_ArrFuncs_copyswap 4 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_compare 5 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_argmax 6 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_dotfunc 7 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_scanfunc 8 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_fromstr 9 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_nonzero 10 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_fill 11 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_fillwithscalar 12 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_sort 13 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_argsort 14 + _NPY_DT_ARRFUNCS_OFFSET + +// Casting related slots are disabled. See +// https://github.com/numpy/numpy/pull/23173#discussion_r1101098163 +// #define NPY_DT_PyArray_ArrFuncs_castdict 15 + _NPY_DT_ARRFUNCS_OFFSET +// #define NPY_DT_PyArray_ArrFuncs_scalarkind 16 + _NPY_DT_ARRFUNCS_OFFSET +// #define NPY_DT_PyArray_ArrFuncs_cancastscalarkindto 17 + _NPY_DT_ARRFUNCS_OFFSET +// #define NPY_DT_PyArray_ArrFuncs_cancastto 18 + _NPY_DT_ARRFUNCS_OFFSET + +// These are deprecated in NumPy 1.19, so are disabled here. +// #define NPY_DT_PyArray_ArrFuncs_fastclip 19 + _NPY_DT_ARRFUNCS_OFFSET +// #define NPY_DT_PyArray_ArrFuncs_fastputmask 20 + _NPY_DT_ARRFUNCS_OFFSET +// #define NPY_DT_PyArray_ArrFuncs_fasttake 21 + _NPY_DT_ARRFUNCS_OFFSET +#define NPY_DT_PyArray_ArrFuncs_argmin 22 + _NPY_DT_ARRFUNCS_OFFSET + + +// TODO: These slots probably still need some thought, and/or a way to "grow"? +typedef struct { + PyTypeObject *typeobj; /* type of python scalar or NULL */ + int flags; /* flags, including parametric and abstract */ + /* NULL terminated cast definitions. Use NULL for the newly created DType */ + PyArrayMethod_Spec **casts; + PyType_Slot *slots; + /* Baseclass or NULL (will always subclass `np.dtype`) */ + PyTypeObject *baseclass; +} PyArrayDTypeMeta_Spec; + + +typedef PyArray_Descr *(PyArrayDTypeMeta_DiscoverDescrFromPyobject)( + PyArray_DTypeMeta *cls, PyObject *obj); + +/* + * Before making this public, we should decide whether it should pass + * the type, or allow looking at the object. A possible use-case: + * `np.array(np.array([0]), dtype=np.ndarray)` + * Could consider arrays that are not `dtype=ndarray` "scalars". + */ +typedef int (PyArrayDTypeMeta_IsKnownScalarType)( + PyArray_DTypeMeta *cls, PyTypeObject *obj); + +typedef PyArray_Descr *(PyArrayDTypeMeta_DefaultDescriptor)(PyArray_DTypeMeta *cls); +typedef PyArray_DTypeMeta *(PyArrayDTypeMeta_CommonDType)( + PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2); + + +/* + * Convenience utility for getting a reference to the DType metaclass associated + * with a dtype instance. + */ +#define NPY_DTYPE(descr) ((PyArray_DTypeMeta *)Py_TYPE(descr)) + +static inline PyArray_DTypeMeta * +NPY_DT_NewRef(PyArray_DTypeMeta *o) { + Py_INCREF((PyObject *)o); + return o; +} + + +typedef PyArray_Descr *(PyArrayDTypeMeta_CommonInstance)( + PyArray_Descr *dtype1, PyArray_Descr *dtype2); +typedef PyArray_Descr *(PyArrayDTypeMeta_EnsureCanonical)(PyArray_Descr *dtype); +/* + * Returns either a new reference to *dtype* or a new descriptor instance + * initialized with the same parameters as *dtype*. The caller cannot know + * which choice a dtype will make. This function is called just before the + * array buffer is created for a newly created array, it is not called for + * views and the descriptor returned by this function is attached to the array. + */ +typedef PyArray_Descr *(PyArrayDTypeMeta_FinalizeDescriptor)(PyArray_Descr *dtype); + +/* + * TODO: These two functions are currently only used for experimental DType + * API support. Their relation should be "reversed": NumPy should + * always use them internally. + * There are open points about "casting safety" though, e.g. setting + * elements is currently always unsafe. + */ +typedef int(PyArrayDTypeMeta_SetItem)(PyArray_Descr *, PyObject *, char *); +typedef PyObject *(PyArrayDTypeMeta_GetItem)(PyArray_Descr *, char *); + +#endif /* NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_ */ diff --git a/python/numpy/_core/include/numpy/halffloat.h b/python/numpy/_core/include/numpy/halffloat.h new file mode 100644 index 000000000..950401664 --- /dev/null +++ b/python/numpy/_core/include/numpy/halffloat.h @@ -0,0 +1,70 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Half-precision routines + */ + +/* Conversions */ +float npy_half_to_float(npy_half h); +double npy_half_to_double(npy_half h); +npy_half npy_float_to_half(float f); +npy_half npy_double_to_half(double d); +/* Comparisons */ +int npy_half_eq(npy_half h1, npy_half h2); +int npy_half_ne(npy_half h1, npy_half h2); +int npy_half_le(npy_half h1, npy_half h2); +int npy_half_lt(npy_half h1, npy_half h2); +int npy_half_ge(npy_half h1, npy_half h2); +int npy_half_gt(npy_half h1, npy_half h2); +/* faster *_nonan variants for when you know h1 and h2 are not NaN */ +int npy_half_eq_nonan(npy_half h1, npy_half h2); +int npy_half_lt_nonan(npy_half h1, npy_half h2); +int npy_half_le_nonan(npy_half h1, npy_half h2); +/* Miscellaneous functions */ +int npy_half_iszero(npy_half h); +int npy_half_isnan(npy_half h); +int npy_half_isinf(npy_half h); +int npy_half_isfinite(npy_half h); +int npy_half_signbit(npy_half h); +npy_half npy_half_copysign(npy_half x, npy_half y); +npy_half npy_half_spacing(npy_half h); +npy_half npy_half_nextafter(npy_half x, npy_half y); +npy_half npy_half_divmod(npy_half x, npy_half y, npy_half *modulus); + +/* + * Half-precision constants + */ + +#define NPY_HALF_ZERO (0x0000u) +#define NPY_HALF_PZERO (0x0000u) +#define NPY_HALF_NZERO (0x8000u) +#define NPY_HALF_ONE (0x3c00u) +#define NPY_HALF_NEGONE (0xbc00u) +#define NPY_HALF_PINF (0x7c00u) +#define NPY_HALF_NINF (0xfc00u) +#define NPY_HALF_NAN (0x7e00u) + +#define NPY_MAX_HALF (0x7bffu) + +/* + * Bit-level conversions + */ + +npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f); +npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d); +npy_uint32 npy_halfbits_to_floatbits(npy_uint16 h); +npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h); + +#ifdef __cplusplus +} +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_ */ diff --git a/python/numpy/_core/include/numpy/ndarrayobject.h b/python/numpy/_core/include/numpy/ndarrayobject.h new file mode 100644 index 000000000..f06bafe5b --- /dev/null +++ b/python/numpy/_core/include/numpy/ndarrayobject.h @@ -0,0 +1,304 @@ +/* + * DON'T INCLUDE THIS DIRECTLY. + */ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include "ndarraytypes.h" +#include "dtype_api.h" + +/* Includes the "function" C-API -- these are all stored in a + list of pointers --- one for each file + The two lists are concatenated into one in multiarray. + + They are available as import_array() +*/ + +#include "__multiarray_api.h" + +/* + * Include any definitions which are defined differently for 1.x and 2.x + * (Symbols only available on 2.x are not there, but rather guarded.) + */ +#include "npy_2_compat.h" + +/* C-API that requires previous API to be defined */ + +#define PyArray_DescrCheck(op) PyObject_TypeCheck(op, &PyArrayDescr_Type) + +#define PyArray_Check(op) PyObject_TypeCheck(op, &PyArray_Type) +#define PyArray_CheckExact(op) (((PyObject*)(op))->ob_type == &PyArray_Type) + +#define PyArray_HasArrayInterfaceType(op, type, context, out) \ + ((((out)=PyArray_FromStructInterface(op)) != Py_NotImplemented) || \ + (((out)=PyArray_FromInterface(op)) != Py_NotImplemented) || \ + (((out)=PyArray_FromArrayAttr(op, type, context)) != \ + Py_NotImplemented)) + +#define PyArray_HasArrayInterface(op, out) \ + PyArray_HasArrayInterfaceType(op, NULL, NULL, out) + +#define PyArray_IsZeroDim(op) (PyArray_Check(op) && \ + (PyArray_NDIM((PyArrayObject *)op) == 0)) + +#define PyArray_IsScalar(obj, cls) \ + (PyObject_TypeCheck(obj, &Py##cls##ArrType_Type)) + +#define PyArray_CheckScalar(m) (PyArray_IsScalar(m, Generic) || \ + PyArray_IsZeroDim(m)) +#define PyArray_IsPythonNumber(obj) \ + (PyFloat_Check(obj) || PyComplex_Check(obj) || \ + PyLong_Check(obj) || PyBool_Check(obj)) +#define PyArray_IsIntegerScalar(obj) (PyLong_Check(obj) \ + || PyArray_IsScalar((obj), Integer)) +#define PyArray_IsPythonScalar(obj) \ + (PyArray_IsPythonNumber(obj) || PyBytes_Check(obj) || \ + PyUnicode_Check(obj)) + +#define PyArray_IsAnyScalar(obj) \ + (PyArray_IsScalar(obj, Generic) || PyArray_IsPythonScalar(obj)) + +#define PyArray_CheckAnyScalar(obj) (PyArray_IsPythonScalar(obj) || \ + PyArray_CheckScalar(obj)) + + +#define PyArray_GETCONTIGUOUS(m) (PyArray_ISCONTIGUOUS(m) ? \ + Py_INCREF(m), (m) : \ + (PyArrayObject *)(PyArray_Copy(m))) + +#define PyArray_SAMESHAPE(a1,a2) ((PyArray_NDIM(a1) == PyArray_NDIM(a2)) && \ + PyArray_CompareLists(PyArray_DIMS(a1), \ + PyArray_DIMS(a2), \ + PyArray_NDIM(a1))) + +#define PyArray_SIZE(m) PyArray_MultiplyList(PyArray_DIMS(m), PyArray_NDIM(m)) +#define PyArray_NBYTES(m) (PyArray_ITEMSIZE(m) * PyArray_SIZE(m)) +#define PyArray_FROM_O(m) PyArray_FromAny(m, NULL, 0, 0, 0, NULL) + +#define PyArray_FROM_OF(m,flags) PyArray_CheckFromAny(m, NULL, 0, 0, flags, \ + NULL) + +#define PyArray_FROM_OT(m,type) PyArray_FromAny(m, \ + PyArray_DescrFromType(type), 0, 0, 0, NULL) + +#define PyArray_FROM_OTF(m, type, flags) \ + PyArray_FromAny(m, PyArray_DescrFromType(type), 0, 0, \ + (((flags) & NPY_ARRAY_ENSURECOPY) ? \ + ((flags) | NPY_ARRAY_DEFAULT) : (flags)), NULL) + +#define PyArray_FROMANY(m, type, min, max, flags) \ + PyArray_FromAny(m, PyArray_DescrFromType(type), min, max, \ + (((flags) & NPY_ARRAY_ENSURECOPY) ? \ + (flags) | NPY_ARRAY_DEFAULT : (flags)), NULL) + +#define PyArray_ZEROS(m, dims, type, is_f_order) \ + PyArray_Zeros(m, dims, PyArray_DescrFromType(type), is_f_order) + +#define PyArray_EMPTY(m, dims, type, is_f_order) \ + PyArray_Empty(m, dims, PyArray_DescrFromType(type), is_f_order) + +#define PyArray_FILLWBYTE(obj, val) memset(PyArray_DATA(obj), val, \ + PyArray_NBYTES(obj)) + +#define PyArray_ContiguousFromAny(op, type, min_depth, max_depth) \ + PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ + max_depth, NPY_ARRAY_DEFAULT, NULL) + +#define PyArray_EquivArrTypes(a1, a2) \ + PyArray_EquivTypes(PyArray_DESCR(a1), PyArray_DESCR(a2)) + +#define PyArray_EquivByteorders(b1, b2) \ + (((b1) == (b2)) || (PyArray_ISNBO(b1) == PyArray_ISNBO(b2))) + +#define PyArray_SimpleNew(nd, dims, typenum) \ + PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, NULL, 0, 0, NULL) + +#define PyArray_SimpleNewFromData(nd, dims, typenum, data) \ + PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, \ + data, 0, NPY_ARRAY_CARRAY, NULL) + +#define PyArray_SimpleNewFromDescr(nd, dims, descr) \ + PyArray_NewFromDescr(&PyArray_Type, descr, nd, dims, \ + NULL, NULL, 0, NULL) + +#define PyArray_ToScalar(data, arr) \ + PyArray_Scalar(data, PyArray_DESCR(arr), (PyObject *)arr) + + +/* These might be faster without the dereferencing of obj + going on inside -- of course an optimizing compiler should + inline the constants inside a for loop making it a moot point +*/ + +#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDES(obj)[0])) + +#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDES(obj)[0] + \ + (j)*PyArray_STRIDES(obj)[1])) + +#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDES(obj)[0] + \ + (j)*PyArray_STRIDES(obj)[1] + \ + (k)*PyArray_STRIDES(obj)[2])) + +#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \ + (i)*PyArray_STRIDES(obj)[0] + \ + (j)*PyArray_STRIDES(obj)[1] + \ + (k)*PyArray_STRIDES(obj)[2] + \ + (l)*PyArray_STRIDES(obj)[3])) + +static inline void +PyArray_DiscardWritebackIfCopy(PyArrayObject *arr) +{ + PyArrayObject_fields *fa = (PyArrayObject_fields *)arr; + if (fa && fa->base) { + if (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY) { + PyArray_ENABLEFLAGS((PyArrayObject*)fa->base, NPY_ARRAY_WRITEABLE); + Py_DECREF(fa->base); + fa->base = NULL; + PyArray_CLEARFLAGS(arr, NPY_ARRAY_WRITEBACKIFCOPY); + } + } +} + +#define PyArray_DESCR_REPLACE(descr) do { \ + PyArray_Descr *_new_; \ + _new_ = PyArray_DescrNew(descr); \ + Py_XDECREF(descr); \ + descr = _new_; \ + } while(0) + +/* Copy should always return contiguous array */ +#define PyArray_Copy(obj) PyArray_NewCopy(obj, NPY_CORDER) + +#define PyArray_FromObject(op, type, min_depth, max_depth) \ + PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ + max_depth, NPY_ARRAY_BEHAVED | \ + NPY_ARRAY_ENSUREARRAY, NULL) + +#define PyArray_ContiguousFromObject(op, type, min_depth, max_depth) \ + PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ + max_depth, NPY_ARRAY_DEFAULT | \ + NPY_ARRAY_ENSUREARRAY, NULL) + +#define PyArray_CopyFromObject(op, type, min_depth, max_depth) \ + PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \ + max_depth, NPY_ARRAY_ENSURECOPY | \ + NPY_ARRAY_DEFAULT | \ + NPY_ARRAY_ENSUREARRAY, NULL) + +#define PyArray_Cast(mp, type_num) \ + PyArray_CastToType(mp, PyArray_DescrFromType(type_num), 0) + +#define PyArray_Take(ap, items, axis) \ + PyArray_TakeFrom(ap, items, axis, NULL, NPY_RAISE) + +#define PyArray_Put(ap, items, values) \ + PyArray_PutTo(ap, items, values, NPY_RAISE) + + +/* + Check to see if this key in the dictionary is the "title" + entry of the tuple (i.e. a duplicate dictionary entry in the fields + dict). +*/ + +static inline int +NPY_TITLE_KEY_check(PyObject *key, PyObject *value) +{ + PyObject *title; + if (PyTuple_Size(value) != 3) { + return 0; + } + title = PyTuple_GetItem(value, 2); + if (key == title) { + return 1; + } +#ifdef PYPY_VERSION + /* + * On PyPy, dictionary keys do not always preserve object identity. + * Fall back to comparison by value. + */ + if (PyUnicode_Check(title) && PyUnicode_Check(key)) { + return PyUnicode_Compare(title, key) == 0 ? 1 : 0; + } +#endif + return 0; +} + +/* Macro, for backward compat with "if NPY_TITLE_KEY(key, value) { ..." */ +#define NPY_TITLE_KEY(key, value) (NPY_TITLE_KEY_check((key), (value))) + +#define DEPRECATE(msg) PyErr_WarnEx(PyExc_DeprecationWarning,msg,1) +#define DEPRECATE_FUTUREWARNING(msg) PyErr_WarnEx(PyExc_FutureWarning,msg,1) + + +/* + * These macros and functions unfortunately require runtime version checks + * that are only defined in `npy_2_compat.h`. For that reasons they cannot be + * part of `ndarraytypes.h` which tries to be self contained. + */ + +static inline npy_intp +PyArray_ITEMSIZE(const PyArrayObject *arr) +{ + return PyDataType_ELSIZE(((PyArrayObject_fields *)arr)->descr); +} + +#define PyDataType_HASFIELDS(obj) (PyDataType_ISLEGACY((PyArray_Descr*)(obj)) && PyDataType_NAMES((PyArray_Descr*)(obj)) != NULL) +#define PyDataType_HASSUBARRAY(dtype) (PyDataType_ISLEGACY(dtype) && PyDataType_SUBARRAY(dtype) != NULL) +#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0 && \ + !PyDataType_HASFIELDS(dtype)) + +#define PyDataType_FLAGCHK(dtype, flag) \ + ((PyDataType_FLAGS(dtype) & (flag)) == (flag)) + +#define PyDataType_REFCHK(dtype) \ + PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT) + +#define NPY_BEGIN_THREADS_DESCR(dtype) \ + do {if (!(PyDataType_FLAGCHK((dtype), NPY_NEEDS_PYAPI))) \ + NPY_BEGIN_THREADS;} while (0); + +#define NPY_END_THREADS_DESCR(dtype) \ + do {if (!(PyDataType_FLAGCHK((dtype), NPY_NEEDS_PYAPI))) \ + NPY_END_THREADS; } while (0); + +#if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD) +/* The internal copy of this is now defined in `dtypemeta.h` */ +/* + * `PyArray_Scalar` is the same as this function but converts will convert + * most NumPy types to Python scalars. + */ +static inline PyObject * +PyArray_GETITEM(const PyArrayObject *arr, const char *itemptr) +{ + return PyDataType_GetArrFuncs(((PyArrayObject_fields *)arr)->descr)->getitem( + (void *)itemptr, (PyArrayObject *)arr); +} + +/* + * SETITEM should only be used if it is known that the value is a scalar + * and of a type understood by the arrays dtype. + * Use `PyArray_Pack` if the value may be of a different dtype. + */ +static inline int +PyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v) +{ + return PyDataType_GetArrFuncs(((PyArrayObject_fields *)arr)->descr)->setitem(v, itemptr, arr); +} +#endif /* not internal */ + + +#ifdef __cplusplus +} +#endif + + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_ */ diff --git a/python/numpy/_core/include/numpy/ndarraytypes.h b/python/numpy/_core/include/numpy/ndarraytypes.h new file mode 100644 index 000000000..baa42406a --- /dev/null +++ b/python/numpy/_core/include/numpy/ndarraytypes.h @@ -0,0 +1,1950 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_ + +#include "npy_common.h" +#include "npy_endian.h" +#include "npy_cpu.h" +#include "utils.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN + +/* Always allow threading unless it was explicitly disabled at build time */ +#if !NPY_NO_SMP + #define NPY_ALLOW_THREADS 1 +#else + #define NPY_ALLOW_THREADS 0 +#endif + +#ifndef __has_extension +#define __has_extension(x) 0 +#endif + +/* + * There are several places in the code where an array of dimensions + * is allocated statically. This is the size of that static + * allocation. + * + * The array creation itself could have arbitrary dimensions but all + * the places where static allocation is used would need to be changed + * to dynamic (including inside of several structures) + * + * As of NumPy 2.0, we strongly discourage the downstream use of NPY_MAXDIMS, + * but since auditing everything seems a big ask, define it as 64. + * A future version could: + * - Increase or remove the limit and require recompilation (like 2.0 did) + * - Deprecate or remove the macro but keep the limit (at basically any time) + */ +#define NPY_MAXDIMS 64 +/* We cannot change this as it would break ABI: */ +#define NPY_MAXDIMS_LEGACY_ITERS 32 +/* NPY_MAXARGS is version dependent and defined in npy_2_compat.h */ + +/* Used for Converter Functions "O&" code in ParseTuple */ +#define NPY_FAIL 0 +#define NPY_SUCCEED 1 + + +enum NPY_TYPES { NPY_BOOL=0, + NPY_BYTE, NPY_UBYTE, + NPY_SHORT, NPY_USHORT, + NPY_INT, NPY_UINT, + NPY_LONG, NPY_ULONG, + NPY_LONGLONG, NPY_ULONGLONG, + NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE, + NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE, + NPY_OBJECT=17, + NPY_STRING, NPY_UNICODE, + NPY_VOID, + /* + * New 1.6 types appended, may be integrated + * into the above in 2.0. + */ + NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF, + + NPY_CHAR, /* Deprecated, will raise if used */ + + /* The number of *legacy* dtypes */ + NPY_NTYPES_LEGACY=24, + + /* assign a high value to avoid changing this in the + future when new dtypes are added */ + NPY_NOTYPE=25, + + NPY_USERDEF=256, /* leave room for characters */ + + /* The number of types not including the new 1.6 types */ + NPY_NTYPES_ABI_COMPATIBLE=21, + + /* + * New DTypes which do not share the legacy layout + * (added after NumPy 2.0). VSTRING is the first of these + * we may open up a block for user-defined dtypes in the + * future. + */ + NPY_VSTRING=2056, +}; + + +/* basetype array priority */ +#define NPY_PRIORITY 0.0 + +/* default subtype priority */ +#define NPY_SUBTYPE_PRIORITY 1.0 + +/* default scalar priority */ +#define NPY_SCALAR_PRIORITY -1000000.0 + +/* How many floating point types are there (excluding half) */ +#define NPY_NUM_FLOATTYPE 3 + +/* + * These characters correspond to the array type and the struct + * module + */ + +enum NPY_TYPECHAR { + NPY_BOOLLTR = '?', + NPY_BYTELTR = 'b', + NPY_UBYTELTR = 'B', + NPY_SHORTLTR = 'h', + NPY_USHORTLTR = 'H', + NPY_INTLTR = 'i', + NPY_UINTLTR = 'I', + NPY_LONGLTR = 'l', + NPY_ULONGLTR = 'L', + NPY_LONGLONGLTR = 'q', + NPY_ULONGLONGLTR = 'Q', + NPY_HALFLTR = 'e', + NPY_FLOATLTR = 'f', + NPY_DOUBLELTR = 'd', + NPY_LONGDOUBLELTR = 'g', + NPY_CFLOATLTR = 'F', + NPY_CDOUBLELTR = 'D', + NPY_CLONGDOUBLELTR = 'G', + NPY_OBJECTLTR = 'O', + NPY_STRINGLTR = 'S', + NPY_DEPRECATED_STRINGLTR2 = 'a', + NPY_UNICODELTR = 'U', + NPY_VOIDLTR = 'V', + NPY_DATETIMELTR = 'M', + NPY_TIMEDELTALTR = 'm', + NPY_CHARLTR = 'c', + + /* + * New non-legacy DTypes + */ + NPY_VSTRINGLTR = 'T', + + /* + * Note, we removed `NPY_INTPLTR` due to changing its definition + * to 'n', rather than 'p'. On any typical platform this is the + * same integer. 'n' should be used for the `np.intp` with the same + * size as `size_t` while 'p' remains pointer sized. + * + * 'p', 'P', 'n', and 'N' are valid and defined explicitly + * in `arraytypes.c.src`. + */ + + /* + * These are for dtype 'kinds', not dtype 'typecodes' + * as the above are for. + */ + NPY_GENBOOLLTR ='b', + NPY_SIGNEDLTR = 'i', + NPY_UNSIGNEDLTR = 'u', + NPY_FLOATINGLTR = 'f', + NPY_COMPLEXLTR = 'c', + +}; + +/* + * Changing this may break Numpy API compatibility + * due to changing offsets in PyArray_ArrFuncs, so be + * careful. Here we have reused the mergesort slot for + * any kind of stable sort, the actual implementation will + * depend on the data type. + */ +typedef enum { + _NPY_SORT_UNDEFINED=-1, + NPY_QUICKSORT=0, + NPY_HEAPSORT=1, + NPY_MERGESORT=2, + NPY_STABLESORT=2, +} NPY_SORTKIND; +#define NPY_NSORTS (NPY_STABLESORT + 1) + + +typedef enum { + NPY_INTROSELECT=0 +} NPY_SELECTKIND; +#define NPY_NSELECTS (NPY_INTROSELECT + 1) + + +typedef enum { + NPY_SEARCHLEFT=0, + NPY_SEARCHRIGHT=1 +} NPY_SEARCHSIDE; +#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1) + + +typedef enum { + NPY_NOSCALAR=-1, + NPY_BOOL_SCALAR, + NPY_INTPOS_SCALAR, + NPY_INTNEG_SCALAR, + NPY_FLOAT_SCALAR, + NPY_COMPLEX_SCALAR, + NPY_OBJECT_SCALAR +} NPY_SCALARKIND; +#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1) + +/* For specifying array memory layout or iteration order */ +typedef enum { + /* Fortran order if inputs are all Fortran, C otherwise */ + NPY_ANYORDER=-1, + /* C order */ + NPY_CORDER=0, + /* Fortran order */ + NPY_FORTRANORDER=1, + /* An order as close to the inputs as possible */ + NPY_KEEPORDER=2 +} NPY_ORDER; + +/* For specifying allowed casting in operations which support it */ +typedef enum { + _NPY_ERROR_OCCURRED_IN_CAST = -1, + /* Only allow identical types */ + NPY_NO_CASTING=0, + /* Allow identical and byte swapped types */ + NPY_EQUIV_CASTING=1, + /* Only allow safe casts */ + NPY_SAFE_CASTING=2, + /* Allow safe casts or casts within the same kind */ + NPY_SAME_KIND_CASTING=3, + /* Allow any casts */ + NPY_UNSAFE_CASTING=4, +} NPY_CASTING; + +typedef enum { + NPY_CLIP=0, + NPY_WRAP=1, + NPY_RAISE=2 +} NPY_CLIPMODE; + +typedef enum { + NPY_VALID=0, + NPY_SAME=1, + NPY_FULL=2 +} NPY_CORRELATEMODE; + +/* The special not-a-time (NaT) value */ +#define NPY_DATETIME_NAT NPY_MIN_INT64 + +/* + * Upper bound on the length of a DATETIME ISO 8601 string + * YEAR: 21 (64-bit year) + * MONTH: 3 + * DAY: 3 + * HOURS: 3 + * MINUTES: 3 + * SECONDS: 3 + * ATTOSECONDS: 1 + 3*6 + * TIMEZONE: 5 + * NULL TERMINATOR: 1 + */ +#define NPY_DATETIME_MAX_ISO8601_STRLEN (21 + 3*5 + 1 + 3*6 + 6 + 1) + +/* The FR in the unit names stands for frequency */ +typedef enum { + /* Force signed enum type, must be -1 for code compatibility */ + NPY_FR_ERROR = -1, /* error or undetermined */ + + /* Start of valid units */ + NPY_FR_Y = 0, /* Years */ + NPY_FR_M = 1, /* Months */ + NPY_FR_W = 2, /* Weeks */ + /* Gap where 1.6 NPY_FR_B (value 3) was */ + NPY_FR_D = 4, /* Days */ + NPY_FR_h = 5, /* hours */ + NPY_FR_m = 6, /* minutes */ + NPY_FR_s = 7, /* seconds */ + NPY_FR_ms = 8, /* milliseconds */ + NPY_FR_us = 9, /* microseconds */ + NPY_FR_ns = 10, /* nanoseconds */ + NPY_FR_ps = 11, /* picoseconds */ + NPY_FR_fs = 12, /* femtoseconds */ + NPY_FR_as = 13, /* attoseconds */ + NPY_FR_GENERIC = 14 /* unbound units, can convert to anything */ +} NPY_DATETIMEUNIT; + +/* + * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS + * is technically one more than the actual number of units. + */ +#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1) +#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC + +/* + * Business day conventions for mapping invalid business + * days to valid business days. + */ +typedef enum { + /* Go forward in time to the following business day. */ + NPY_BUSDAY_FORWARD, + NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD, + /* Go backward in time to the preceding business day. */ + NPY_BUSDAY_BACKWARD, + NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD, + /* + * Go forward in time to the following business day, unless it + * crosses a month boundary, in which case go backward + */ + NPY_BUSDAY_MODIFIEDFOLLOWING, + /* + * Go backward in time to the preceding business day, unless it + * crosses a month boundary, in which case go forward. + */ + NPY_BUSDAY_MODIFIEDPRECEDING, + /* Produce a NaT for non-business days. */ + NPY_BUSDAY_NAT, + /* Raise an exception for non-business days. */ + NPY_BUSDAY_RAISE +} NPY_BUSDAY_ROLL; + + +/************************************************************ + * NumPy Auxiliary Data for inner loops, sort functions, etc. + ************************************************************/ + +/* + * When creating an auxiliary data struct, this should always appear + * as the first member, like this: + * + * typedef struct { + * NpyAuxData base; + * double constant; + * } constant_multiplier_aux_data; + */ +typedef struct NpyAuxData_tag NpyAuxData; + +/* Function pointers for freeing or cloning auxiliary data */ +typedef void (NpyAuxData_FreeFunc) (NpyAuxData *); +typedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *); + +struct NpyAuxData_tag { + NpyAuxData_FreeFunc *free; + NpyAuxData_CloneFunc *clone; + /* To allow for a bit of expansion without breaking the ABI */ + void *reserved[2]; +}; + +/* Macros to use for freeing and cloning auxiliary data */ +#define NPY_AUXDATA_FREE(auxdata) \ + do { \ + if ((auxdata) != NULL) { \ + (auxdata)->free(auxdata); \ + } \ + } while(0) +#define NPY_AUXDATA_CLONE(auxdata) \ + ((auxdata)->clone(auxdata)) + +#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr); +#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr); + +/* +* Macros to define how array, and dimension/strides data is +* allocated. These should be made private +*/ + +#define NPY_USE_PYMEM 1 + + +#if NPY_USE_PYMEM == 1 +/* use the Raw versions which are safe to call with the GIL released */ +#define PyArray_malloc PyMem_RawMalloc +#define PyArray_free PyMem_RawFree +#define PyArray_realloc PyMem_RawRealloc +#else +#define PyArray_malloc malloc +#define PyArray_free free +#define PyArray_realloc realloc +#endif + +/* Dimensions and strides */ +#define PyDimMem_NEW(size) \ + ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp))) + +#define PyDimMem_FREE(ptr) PyArray_free(ptr) + +#define PyDimMem_RENEW(ptr,size) \ + ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp))) + +/* forward declaration */ +struct _PyArray_Descr; + +/* These must deal with unaligned and swapped data if necessary */ +typedef PyObject * (PyArray_GetItemFunc) (void *, void *); +typedef int (PyArray_SetItemFunc)(PyObject *, void *, void *); + +typedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp, + npy_intp, int, void *); + +typedef void (PyArray_CopySwapFunc)(void *, void *, int, void *); +typedef npy_bool (PyArray_NonzeroFunc)(void *, void *); + + +/* + * These assume aligned and notswapped data -- a buffer will be used + * before or contiguous data will be obtained + */ + +typedef int (PyArray_CompareFunc)(const void *, const void *, void *); +typedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *); + +typedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *, + npy_intp, void *); + +typedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, + void *); + +/* + * XXX the ignore argument should be removed next time the API version + * is bumped. It used to be the separator. + */ +typedef int (PyArray_ScanFunc)(FILE *fp, void *dptr, + char *ignore, struct _PyArray_Descr *); +typedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr, + struct _PyArray_Descr *); + +typedef int (PyArray_FillFunc)(void *, npy_intp, void *); + +typedef int (PyArray_SortFunc)(void *, npy_intp, void *); +typedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *); + +typedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *); + +typedef int (PyArray_ScalarKindFunc)(void *); + +typedef struct { + npy_intp *ptr; + int len; +} PyArray_Dims; + +typedef struct { + /* + * Functions to cast to most other standard types + * Can have some NULL entries. The types + * DATETIME, TIMEDELTA, and HALF go into the castdict + * even though they are built-in. + */ + PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE]; + + /* The next four functions *cannot* be NULL */ + + /* + * Functions to get and set items with standard Python types + * -- not array scalars + */ + PyArray_GetItemFunc *getitem; + PyArray_SetItemFunc *setitem; + + /* + * Copy and/or swap data. Memory areas may not overlap + * Use memmove first if they might + */ + PyArray_CopySwapNFunc *copyswapn; + PyArray_CopySwapFunc *copyswap; + + /* + * Function to compare items + * Can be NULL + */ + PyArray_CompareFunc *compare; + + /* + * Function to select largest + * Can be NULL + */ + PyArray_ArgFunc *argmax; + + /* + * Function to compute dot product + * Can be NULL + */ + PyArray_DotFunc *dotfunc; + + /* + * Function to scan an ASCII file and + * place a single value plus possible separator + * Can be NULL + */ + PyArray_ScanFunc *scanfunc; + + /* + * Function to read a single value from a string + * and adjust the pointer; Can be NULL + */ + PyArray_FromStrFunc *fromstr; + + /* + * Function to determine if data is zero or not + * If NULL a default version is + * used at Registration time. + */ + PyArray_NonzeroFunc *nonzero; + + /* + * Used for arange. Should return 0 on success + * and -1 on failure. + * Can be NULL. + */ + PyArray_FillFunc *fill; + + /* + * Function to fill arrays with scalar values + * Can be NULL + */ + PyArray_FillWithScalarFunc *fillwithscalar; + + /* + * Sorting functions + * Can be NULL + */ + PyArray_SortFunc *sort[NPY_NSORTS]; + PyArray_ArgSortFunc *argsort[NPY_NSORTS]; + + /* + * Dictionary of additional casting functions + * PyArray_VectorUnaryFuncs + * which can be populated to support casting + * to other registered types. Can be NULL + */ + PyObject *castdict; + + /* + * Functions useful for generalizing + * the casting rules. + * Can be NULL; + */ + PyArray_ScalarKindFunc *scalarkind; + int **cancastscalarkindto; + int *cancastto; + + void *_unused1; + void *_unused2; + void *_unused3; + + /* + * Function to select smallest + * Can be NULL + */ + PyArray_ArgFunc *argmin; + +} PyArray_ArrFuncs; + + +/* The item must be reference counted when it is inserted or extracted. */ +#define NPY_ITEM_REFCOUNT 0x01 +/* Same as needing REFCOUNT */ +#define NPY_ITEM_HASOBJECT 0x01 +/* Convert to list for pickling */ +#define NPY_LIST_PICKLE 0x02 +/* The item is a POINTER */ +#define NPY_ITEM_IS_POINTER 0x04 +/* memory needs to be initialized for this data-type */ +#define NPY_NEEDS_INIT 0x08 +/* operations need Python C-API so don't give-up thread. */ +#define NPY_NEEDS_PYAPI 0x10 +/* Use f.getitem when extracting elements of this data-type */ +#define NPY_USE_GETITEM 0x20 +/* Use f.setitem when setting creating 0-d array from this data-type.*/ +#define NPY_USE_SETITEM 0x40 +/* A sticky flag specifically for structured arrays */ +#define NPY_ALIGNED_STRUCT 0x80 + +/* + *These are inherited for global data-type if any data-types in the + * field have them + */ +#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \ + NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI) + +#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \ + NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \ + NPY_NEEDS_INIT | NPY_NEEDS_PYAPI) + +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION +/* + * Public version of the Descriptor struct as of 2.x + */ +typedef struct _PyArray_Descr { + PyObject_HEAD + /* + * the type object representing an + * instance of this type -- should not + * be two type_numbers with the same type + * object. + */ + PyTypeObject *typeobj; + /* kind for this type */ + char kind; + /* unique-character representing this type */ + char type; + /* + * '>' (big), '<' (little), '|' + * (not-applicable), or '=' (native). + */ + char byteorder; + /* Former flags flags space (unused) to ensure type_num is stable. */ + char _former_flags; + /* number representing this type */ + int type_num; + /* Space for dtype instance specific flags. */ + npy_uint64 flags; + /* element size (itemsize) for this type */ + npy_intp elsize; + /* alignment needed for this type */ + npy_intp alignment; + /* metadata dict or NULL */ + PyObject *metadata; + /* Cached hash value (-1 if not yet computed). */ + npy_hash_t hash; + /* Unused slot (must be initialized to NULL) for future use */ + void *reserved_null[2]; +} PyArray_Descr; + +#else /* 1.x and 2.x compatible version (only shared fields): */ + +typedef struct _PyArray_Descr { + PyObject_HEAD + PyTypeObject *typeobj; + char kind; + char type; + char byteorder; + char _former_flags; + int type_num; +} PyArray_Descr; + +/* To access modified fields, define the full 2.0 struct: */ +typedef struct { + PyObject_HEAD + PyTypeObject *typeobj; + char kind; + char type; + char byteorder; + char _former_flags; + int type_num; + npy_uint64 flags; + npy_intp elsize; + npy_intp alignment; + PyObject *metadata; + npy_hash_t hash; + void *reserved_null[2]; +} _PyArray_DescrNumPy2; + +#endif /* 1.x and 2.x compatible version */ + +/* + * Semi-private struct with additional field of legacy descriptors (must + * check NPY_DT_is_legacy before casting/accessing). The struct is also not + * valid when running on 1.x (i.e. in public API use). + */ +typedef struct { + PyObject_HEAD + PyTypeObject *typeobj; + char kind; + char type; + char byteorder; + char _former_flags; + int type_num; + npy_uint64 flags; + npy_intp elsize; + npy_intp alignment; + PyObject *metadata; + npy_hash_t hash; + void *reserved_null[2]; + struct _arr_descr *subarray; + PyObject *fields; + PyObject *names; + NpyAuxData *c_metadata; +} _PyArray_LegacyDescr; + + +/* + * Umodified PyArray_Descr struct identical to NumPy 1.x. This struct is + * used as a prototype for registering a new legacy DType. + * It is also used to access the fields in user code running on 1.x. + */ +typedef struct { + PyObject_HEAD + PyTypeObject *typeobj; + char kind; + char type; + char byteorder; + char flags; + int type_num; + int elsize; + int alignment; + struct _arr_descr *subarray; + PyObject *fields; + PyObject *names; + PyArray_ArrFuncs *f; + PyObject *metadata; + NpyAuxData *c_metadata; + npy_hash_t hash; +} PyArray_DescrProto; + + +typedef struct _arr_descr { + PyArray_Descr *base; + PyObject *shape; /* a tuple */ +} PyArray_ArrayDescr; + +/* + * Memory handler structure for array data. + */ +/* The declaration of free differs from PyMemAllocatorEx */ +typedef struct { + void *ctx; + void* (*malloc) (void *ctx, size_t size); + void* (*calloc) (void *ctx, size_t nelem, size_t elsize); + void* (*realloc) (void *ctx, void *ptr, size_t new_size); + void (*free) (void *ctx, void *ptr, size_t size); + /* + * This is the end of the version=1 struct. Only add new fields after + * this line + */ +} PyDataMemAllocator; + +typedef struct { + char name[127]; /* multiple of 64 to keep the struct aligned */ + uint8_t version; /* currently 1 */ + PyDataMemAllocator allocator; +} PyDataMem_Handler; + + +/* + * The main array object structure. + * + * It has been recommended to use the inline functions defined below + * (PyArray_DATA and friends) to access fields here for a number of + * releases. Direct access to the members themselves is deprecated. + * To ensure that your code does not use deprecated access, + * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + * (or NPY_1_8_API_VERSION or higher as required). + */ +/* This struct will be moved to a private header in a future release */ +typedef struct tagPyArrayObject_fields { + PyObject_HEAD + /* Pointer to the raw data buffer */ + char *data; + /* The number of dimensions, also called 'ndim' */ + int nd; + /* The size in each dimension, also called 'shape' */ + npy_intp *dimensions; + /* + * Number of bytes to jump to get to the + * next element in each dimension + */ + npy_intp *strides; + /* + * This object is decref'd upon + * deletion of array. Except in the + * case of WRITEBACKIFCOPY which has + * special handling. + * + * For views it points to the original + * array, collapsed so no chains of + * views occur. + * + * For creation from buffer object it + * points to an object that should be + * decref'd on deletion + * + * For WRITEBACKIFCOPY flag this is an + * array to-be-updated upon calling + * PyArray_ResolveWritebackIfCopy + */ + PyObject *base; + /* Pointer to type structure */ + PyArray_Descr *descr; + /* Flags describing array -- see below */ + int flags; + /* For weak references */ + PyObject *weakreflist; +#if NPY_FEATURE_VERSION >= NPY_1_20_API_VERSION + void *_buffer_info; /* private buffer info, tagged to allow warning */ +#endif + /* + * For malloc/calloc/realloc/free per object + */ +#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION + PyObject *mem_handler; +#endif +} PyArrayObject_fields; + +/* + * To hide the implementation details, we only expose + * the Python struct HEAD. + */ +#if !defined(NPY_NO_DEPRECATED_API) || \ + (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION) +/* + * Can't put this in npy_deprecated_api.h like the others. + * PyArrayObject field access is deprecated as of NumPy 1.7. + */ +typedef PyArrayObject_fields PyArrayObject; +#else +typedef struct tagPyArrayObject { + PyObject_HEAD +} PyArrayObject; +#endif + +/* + * Removed 2020-Nov-25, NumPy 1.20 + * #define NPY_SIZEOF_PYARRAYOBJECT (sizeof(PyArrayObject_fields)) + * + * The above macro was removed as it gave a false sense of a stable ABI + * with respect to the structures size. If you require a runtime constant, + * you can use `PyArray_Type.tp_basicsize` instead. Otherwise, please + * see the PyArrayObject documentation or ask the NumPy developers for + * information on how to correctly replace the macro in a way that is + * compatible with multiple NumPy versions. + */ + +/* Mirrors buffer object to ptr */ + +typedef struct { + PyObject_HEAD + PyObject *base; + void *ptr; + npy_intp len; + int flags; +} PyArray_Chunk; + +typedef struct { + NPY_DATETIMEUNIT base; + int num; +} PyArray_DatetimeMetaData; + +typedef struct { + NpyAuxData base; + PyArray_DatetimeMetaData meta; +} PyArray_DatetimeDTypeMetaData; + +/* + * This structure contains an exploded view of a date-time value. + * NaT is represented by year == NPY_DATETIME_NAT. + */ +typedef struct { + npy_int64 year; + npy_int32 month, day, hour, min, sec, us, ps, as; +} npy_datetimestruct; + +/* This structure contains an exploded view of a timedelta value */ +typedef struct { + npy_int64 day; + npy_int32 sec, us, ps, as; +} npy_timedeltastruct; + +typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *); + +/* + * Means c-style contiguous (last index varies the fastest). The data + * elements right after each other. + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_C_CONTIGUOUS 0x0001 + +/* + * Set if array is a contiguous Fortran array: the first index varies + * the fastest in memory (strides array is reverse of C-contiguous + * array) + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_F_CONTIGUOUS 0x0002 + +/* + * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. If a + * 1-d array is C_CONTIGUOUS it is also F_CONTIGUOUS. Arrays with + * more then one dimension can be C_CONTIGUOUS and F_CONTIGUOUS + * at the same time if they have either zero or one element. + * A higher dimensional array always has the same contiguity flags as + * `array.squeeze()`; dimensions with `array.shape[dimension] == 1` are + * effectively ignored when checking for contiguity. + */ + +/* + * If set, the array owns the data: it will be free'd when the array + * is deleted. + * + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_OWNDATA 0x0004 + +/* + * An array never has the next four set; they're only used as parameter + * flags to the various FromAny functions + * + * This flag may be requested in constructor functions. + */ + +/* Cause a cast to occur regardless of whether or not it is safe. */ +#define NPY_ARRAY_FORCECAST 0x0010 + +/* + * Always copy the array. Returned arrays are always CONTIGUOUS, + * ALIGNED, and WRITEABLE. See also: NPY_ARRAY_ENSURENOCOPY = 0x4000. + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSURECOPY 0x0020 + +/* + * Make sure the returned array is a base-class ndarray + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSUREARRAY 0x0040 + +/* + * Make sure that the strides are in units of the element size Needed + * for some operations with record-arrays. + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ELEMENTSTRIDES 0x0080 + +/* + * Array data is aligned on the appropriate memory address for the type + * stored according to how the compiler would align things (e.g., an + * array of integers (4 bytes each) starts on a memory address that's + * a multiple of 4) + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_ALIGNED 0x0100 + +/* + * Array data has the native endianness + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_NOTSWAPPED 0x0200 + +/* + * Array data is writeable + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_WRITEABLE 0x0400 + +/* + * If this flag is set, then base contains a pointer to an array of + * the same size that should be updated with the current contents of + * this array when PyArray_ResolveWritebackIfCopy is called. + * + * This flag may be requested in constructor functions. + * This flag may be tested for in PyArray_FLAGS(arr). + */ +#define NPY_ARRAY_WRITEBACKIFCOPY 0x2000 + +/* + * No copy may be made while converting from an object/array (result is a view) + * + * This flag may be requested in constructor functions. + */ +#define NPY_ARRAY_ENSURENOCOPY 0x4000 + +/* + * NOTE: there are also internal flags defined in multiarray/arrayobject.h, + * which start at bit 31 and work down. + */ + +#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE) +#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \ + NPY_ARRAY_WRITEABLE | \ + NPY_ARRAY_NOTSWAPPED) +#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_BEHAVED) +#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) +#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO) +#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY) +#define NPY_ARRAY_INOUT_ARRAY2 (NPY_ARRAY_CARRAY | \ + NPY_ARRAY_WRITEBACKIFCOPY) +#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO) +#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY) +#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY) +#define NPY_ARRAY_INOUT_FARRAY2 (NPY_ARRAY_FARRAY | \ + NPY_ARRAY_WRITEBACKIFCOPY) + +#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \ + NPY_ARRAY_F_CONTIGUOUS | \ + NPY_ARRAY_ALIGNED) + +/* This flag is for the array interface, not PyArrayObject */ +#define NPY_ARR_HAS_DESCR 0x0800 + + + + +/* + * Size of internal buffers used for alignment Make BUFSIZE a multiple + * of sizeof(npy_cdouble) -- usually 16 so that ufunc buffers are aligned + */ +#define NPY_MIN_BUFSIZE ((int)sizeof(npy_cdouble)) +#define NPY_MAX_BUFSIZE (((int)sizeof(npy_cdouble))*1000000) +#define NPY_BUFSIZE 8192 +/* buffer stress test size: */ +/*#define NPY_BUFSIZE 17*/ + +/* + * C API: consists of Macros and functions. The MACROS are defined + * here. + */ + + +#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS((m), NPY_ARRAY_WRITEABLE) +#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS((m), NPY_ARRAY_ALIGNED) + +#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_C_CONTIGUOUS) +#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_F_CONTIGUOUS) + +/* the variable is used in some places, so always define it */ +#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL; +#if NPY_ALLOW_THREADS +#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS +#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS +#define NPY_BEGIN_THREADS do {_save = PyEval_SaveThread();} while (0); +#define NPY_END_THREADS do { if (_save) \ + { PyEval_RestoreThread(_save); _save = NULL;} } while (0); +#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) do { if ((loop_size) > 500) \ + { _save = PyEval_SaveThread();} } while (0); + + +#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__; +#define NPY_ALLOW_C_API do {__save__ = PyGILState_Ensure();} while (0); +#define NPY_DISABLE_C_API do {PyGILState_Release(__save__);} while (0); +#else +#define NPY_BEGIN_ALLOW_THREADS +#define NPY_END_ALLOW_THREADS +#define NPY_BEGIN_THREADS +#define NPY_END_THREADS +#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) +#define NPY_BEGIN_THREADS_DESCR(dtype) +#define NPY_END_THREADS_DESCR(dtype) +#define NPY_ALLOW_C_API_DEF +#define NPY_ALLOW_C_API +#define NPY_DISABLE_C_API +#endif + +/********************************** + * The nditer object, added in 1.6 + **********************************/ + +/* The actual structure of the iterator is an internal detail */ +typedef struct NpyIter_InternalOnly NpyIter; + +/* Iterator function pointers that may be specialized */ +typedef int (NpyIter_IterNextFunc)(NpyIter *iter); +typedef void (NpyIter_GetMultiIndexFunc)(NpyIter *iter, + npy_intp *outcoords); + +/*** Global flags that may be passed to the iterator constructors ***/ + +/* Track an index representing C order */ +#define NPY_ITER_C_INDEX 0x00000001 +/* Track an index representing Fortran order */ +#define NPY_ITER_F_INDEX 0x00000002 +/* Track a multi-index */ +#define NPY_ITER_MULTI_INDEX 0x00000004 +/* User code external to the iterator does the 1-dimensional innermost loop */ +#define NPY_ITER_EXTERNAL_LOOP 0x00000008 +/* Convert all the operands to a common data type */ +#define NPY_ITER_COMMON_DTYPE 0x00000010 +/* Operands may hold references, requiring API access during iteration */ +#define NPY_ITER_REFS_OK 0x00000020 +/* Zero-sized operands should be permitted, iteration checks IterSize for 0 */ +#define NPY_ITER_ZEROSIZE_OK 0x00000040 +/* Permits reductions (size-0 stride with dimension size > 1) */ +#define NPY_ITER_REDUCE_OK 0x00000080 +/* Enables sub-range iteration */ +#define NPY_ITER_RANGED 0x00000100 +/* Enables buffering */ +#define NPY_ITER_BUFFERED 0x00000200 +/* When buffering is enabled, grows the inner loop if possible */ +#define NPY_ITER_GROWINNER 0x00000400 +/* Delay allocation of buffers until first Reset* call */ +#define NPY_ITER_DELAY_BUFALLOC 0x00000800 +/* When NPY_KEEPORDER is specified, disable reversing negative-stride axes */ +#define NPY_ITER_DONT_NEGATE_STRIDES 0x00001000 +/* + * If output operands overlap with other operands (based on heuristics that + * has false positives but no false negatives), make temporary copies to + * eliminate overlap. + */ +#define NPY_ITER_COPY_IF_OVERLAP 0x00002000 + +/*** Per-operand flags that may be passed to the iterator constructors ***/ + +/* The operand will be read from and written to */ +#define NPY_ITER_READWRITE 0x00010000 +/* The operand will only be read from */ +#define NPY_ITER_READONLY 0x00020000 +/* The operand will only be written to */ +#define NPY_ITER_WRITEONLY 0x00040000 +/* The operand's data must be in native byte order */ +#define NPY_ITER_NBO 0x00080000 +/* The operand's data must be aligned */ +#define NPY_ITER_ALIGNED 0x00100000 +/* The operand's data must be contiguous (within the inner loop) */ +#define NPY_ITER_CONTIG 0x00200000 +/* The operand may be copied to satisfy requirements */ +#define NPY_ITER_COPY 0x00400000 +/* The operand may be copied with WRITEBACKIFCOPY to satisfy requirements */ +#define NPY_ITER_UPDATEIFCOPY 0x00800000 +/* Allocate the operand if it is NULL */ +#define NPY_ITER_ALLOCATE 0x01000000 +/* If an operand is allocated, don't use any subtype */ +#define NPY_ITER_NO_SUBTYPE 0x02000000 +/* This is a virtual array slot, operand is NULL but temporary data is there */ +#define NPY_ITER_VIRTUAL 0x04000000 +/* Require that the dimension match the iterator dimensions exactly */ +#define NPY_ITER_NO_BROADCAST 0x08000000 +/* A mask is being used on this array, affects buffer -> array copy */ +#define NPY_ITER_WRITEMASKED 0x10000000 +/* This array is the mask for all WRITEMASKED operands */ +#define NPY_ITER_ARRAYMASK 0x20000000 +/* Assume iterator order data access for COPY_IF_OVERLAP */ +#define NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE 0x40000000 + +#define NPY_ITER_GLOBAL_FLAGS 0x0000ffff +#define NPY_ITER_PER_OP_FLAGS 0xffff0000 + + +/***************************** + * Basic iterator object + *****************************/ + +/* FWD declaration */ +typedef struct PyArrayIterObject_tag PyArrayIterObject; + +/* + * type of the function which translates a set of coordinates to a + * pointer to the data + */ +typedef char* (*npy_iter_get_dataptr_t)( + PyArrayIterObject* iter, const npy_intp*); + +struct PyArrayIterObject_tag { + PyObject_HEAD + int nd_m1; /* number of dimensions - 1 */ + npy_intp index, size; + npy_intp coordinates[NPY_MAXDIMS_LEGACY_ITERS];/* N-dimensional loop */ + npy_intp dims_m1[NPY_MAXDIMS_LEGACY_ITERS]; /* ao->dimensions - 1 */ + npy_intp strides[NPY_MAXDIMS_LEGACY_ITERS]; /* ao->strides or fake */ + npy_intp backstrides[NPY_MAXDIMS_LEGACY_ITERS];/* how far to jump back */ + npy_intp factors[NPY_MAXDIMS_LEGACY_ITERS]; /* shape factors */ + PyArrayObject *ao; + char *dataptr; /* pointer to current item*/ + npy_bool contiguous; + + npy_intp bounds[NPY_MAXDIMS_LEGACY_ITERS][2]; + npy_intp limits[NPY_MAXDIMS_LEGACY_ITERS][2]; + npy_intp limits_sizes[NPY_MAXDIMS_LEGACY_ITERS]; + npy_iter_get_dataptr_t translate; +} ; + + +/* Iterator API */ +#define PyArrayIter_Check(op) PyObject_TypeCheck((op), &PyArrayIter_Type) + +#define _PyAIT(it) ((PyArrayIterObject *)(it)) +#define PyArray_ITER_RESET(it) do { \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + memset(_PyAIT(it)->coordinates, 0, \ + (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \ +} while (0) + +#define _PyArray_ITER_NEXT1(it) do { \ + (it)->dataptr += _PyAIT(it)->strides[0]; \ + (it)->coordinates[0]++; \ +} while (0) + +#define _PyArray_ITER_NEXT2(it) do { \ + if ((it)->coordinates[1] < (it)->dims_m1[1]) { \ + (it)->coordinates[1]++; \ + (it)->dataptr += (it)->strides[1]; \ + } \ + else { \ + (it)->coordinates[1] = 0; \ + (it)->coordinates[0]++; \ + (it)->dataptr += (it)->strides[0] - \ + (it)->backstrides[1]; \ + } \ +} while (0) + +#define PyArray_ITER_NEXT(it) do { \ + _PyAIT(it)->index++; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyArray_ITER_NEXT1(_PyAIT(it)); \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr += PyArray_ITEMSIZE(_PyAIT(it)->ao); \ + else if (_PyAIT(it)->nd_m1 == 1) { \ + _PyArray_ITER_NEXT2(_PyAIT(it)); \ + } \ + else { \ + int __npy_i; \ + for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \ + if (_PyAIT(it)->coordinates[__npy_i] < \ + _PyAIT(it)->dims_m1[__npy_i]) { \ + _PyAIT(it)->coordinates[__npy_i]++; \ + _PyAIT(it)->dataptr += \ + _PyAIT(it)->strides[__npy_i]; \ + break; \ + } \ + else { \ + _PyAIT(it)->coordinates[__npy_i] = 0; \ + _PyAIT(it)->dataptr -= \ + _PyAIT(it)->backstrides[__npy_i]; \ + } \ + } \ + } \ +} while (0) + +#define PyArray_ITER_GOTO(it, destination) do { \ + int __npy_i; \ + _PyAIT(it)->index = 0; \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \ + if (destination[__npy_i] < 0) { \ + destination[__npy_i] += \ + _PyAIT(it)->dims_m1[__npy_i]+1; \ + } \ + _PyAIT(it)->dataptr += destination[__npy_i] * \ + _PyAIT(it)->strides[__npy_i]; \ + _PyAIT(it)->coordinates[__npy_i] = \ + destination[__npy_i]; \ + _PyAIT(it)->index += destination[__npy_i] * \ + ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \ + _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \ + } \ +} while (0) + +#define PyArray_ITER_GOTO1D(it, ind) do { \ + int __npy_i; \ + npy_intp __npy_ind = (npy_intp)(ind); \ + if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \ + _PyAIT(it)->index = __npy_ind; \ + if (_PyAIT(it)->nd_m1 == 0) { \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ + __npy_ind * _PyAIT(it)->strides[0]; \ + } \ + else if (_PyAIT(it)->contiguous) \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \ + __npy_ind * PyArray_ITEMSIZE(_PyAIT(it)->ao); \ + else { \ + _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \ + for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \ + __npy_i++) { \ + _PyAIT(it)->coordinates[__npy_i] = \ + (__npy_ind / _PyAIT(it)->factors[__npy_i]); \ + _PyAIT(it)->dataptr += \ + (__npy_ind / _PyAIT(it)->factors[__npy_i]) \ + * _PyAIT(it)->strides[__npy_i]; \ + __npy_ind %= _PyAIT(it)->factors[__npy_i]; \ + } \ + } \ +} while (0) + +#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr)) + +#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size) + + +/* + * Any object passed to PyArray_Broadcast must be binary compatible + * with this structure. + */ + +typedef struct { + PyObject_HEAD + int numiter; /* number of iters */ + npy_intp size; /* broadcasted size */ + npy_intp index; /* current index */ + int nd; /* number of dims */ + npy_intp dimensions[NPY_MAXDIMS_LEGACY_ITERS]; /* dimensions */ + /* + * Space for the individual iterators, do not specify size publicly + * to allow changing it more easily. + * One reason is that Cython uses this for checks and only allows + * growing structs (as of Cython 3.0.6). It also allows NPY_MAXARGS + * to be runtime dependent. + */ +#if (defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD) + PyArrayIterObject *iters[64]; +#elif defined(__cplusplus) + /* + * C++ doesn't strictly support flexible members and gives compilers + * warnings (pedantic only), so we lie. We can't make it 64 because + * then Cython is unhappy (larger struct at runtime is OK smaller not). + */ + PyArrayIterObject *iters[32]; +#else + PyArrayIterObject *iters[]; +#endif +} PyArrayMultiIterObject; + +#define _PyMIT(m) ((PyArrayMultiIterObject *)(m)) +#define PyArray_MultiIter_RESET(multi) do { \ + int __npy_mi; \ + _PyMIT(multi)->index = 0; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} while (0) + +#define PyArray_MultiIter_NEXT(multi) do { \ + int __npy_mi; \ + _PyMIT(multi)->index++; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \ + } \ +} while (0) + +#define PyArray_MultiIter_GOTO(multi, dest) do { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \ + } \ + _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ +} while (0) + +#define PyArray_MultiIter_GOTO1D(multi, ind) do { \ + int __npy_mi; \ + for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \ + PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \ + } \ + _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \ +} while (0) + +#define PyArray_MultiIter_DATA(multi, i) \ + ((void *)(_PyMIT(multi)->iters[i]->dataptr)) + +#define PyArray_MultiIter_NEXTi(multi, i) \ + PyArray_ITER_NEXT(_PyMIT(multi)->iters[i]) + +#define PyArray_MultiIter_NOTDONE(multi) \ + (_PyMIT(multi)->index < _PyMIT(multi)->size) + + +static NPY_INLINE int +PyArray_MultiIter_NUMITER(PyArrayMultiIterObject *multi) +{ + return multi->numiter; +} + + +static NPY_INLINE npy_intp +PyArray_MultiIter_SIZE(PyArrayMultiIterObject *multi) +{ + return multi->size; +} + + +static NPY_INLINE npy_intp +PyArray_MultiIter_INDEX(PyArrayMultiIterObject *multi) +{ + return multi->index; +} + + +static NPY_INLINE int +PyArray_MultiIter_NDIM(PyArrayMultiIterObject *multi) +{ + return multi->nd; +} + + +static NPY_INLINE npy_intp * +PyArray_MultiIter_DIMS(PyArrayMultiIterObject *multi) +{ + return multi->dimensions; +} + + +static NPY_INLINE void ** +PyArray_MultiIter_ITERS(PyArrayMultiIterObject *multi) +{ + return (void**)multi->iters; +} + + +enum { + NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, + NPY_NEIGHBORHOOD_ITER_ONE_PADDING, + NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING, + NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING, + NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING +}; + +typedef struct { + PyObject_HEAD + + /* + * PyArrayIterObject part: keep this in this exact order + */ + int nd_m1; /* number of dimensions - 1 */ + npy_intp index, size; + npy_intp coordinates[NPY_MAXDIMS_LEGACY_ITERS];/* N-dimensional loop */ + npy_intp dims_m1[NPY_MAXDIMS_LEGACY_ITERS]; /* ao->dimensions - 1 */ + npy_intp strides[NPY_MAXDIMS_LEGACY_ITERS]; /* ao->strides or fake */ + npy_intp backstrides[NPY_MAXDIMS_LEGACY_ITERS];/* how far to jump back */ + npy_intp factors[NPY_MAXDIMS_LEGACY_ITERS]; /* shape factors */ + PyArrayObject *ao; + char *dataptr; /* pointer to current item*/ + npy_bool contiguous; + + npy_intp bounds[NPY_MAXDIMS_LEGACY_ITERS][2]; + npy_intp limits[NPY_MAXDIMS_LEGACY_ITERS][2]; + npy_intp limits_sizes[NPY_MAXDIMS_LEGACY_ITERS]; + npy_iter_get_dataptr_t translate; + + /* + * New members + */ + npy_intp nd; + + /* Dimensions is the dimension of the array */ + npy_intp dimensions[NPY_MAXDIMS_LEGACY_ITERS]; + + /* + * Neighborhood points coordinates are computed relatively to the + * point pointed by _internal_iter + */ + PyArrayIterObject* _internal_iter; + /* + * To keep a reference to the representation of the constant value + * for constant padding + */ + char* constant; + + int mode; +} PyArrayNeighborhoodIterObject; + +/* + * Neighborhood iterator API + */ + +/* General: those work for any mode */ +static inline int +PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter); +static inline int +PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter); +#if 0 +static inline int +PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter); +#endif + +/* + * Include inline implementations - functions defined there are not + * considered public API + */ +#define NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_ +#include "_neighborhood_iterator_imp.h" +#undef NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_ + + + +/* The default array type */ +#define NPY_DEFAULT_TYPE NPY_DOUBLE +/* default integer type defined in npy_2_compat header */ + +/* + * All sorts of useful ways to look into a PyArrayObject. It is recommended + * to use PyArrayObject * objects instead of always casting from PyObject *, + * for improved type checking. + * + * In many cases here the macro versions of the accessors are deprecated, + * but can't be immediately changed to inline functions because the + * preexisting macros accept PyObject * and do automatic casts. Inline + * functions accepting PyArrayObject * provides for some compile-time + * checking of correctness when working with these objects in C. + */ + +#define PyArray_ISONESEGMENT(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) || \ + PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS)) + +#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) && \ + (!PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS))) + +#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) ? \ + NPY_ARRAY_F_CONTIGUOUS : 0)) + +static inline int +PyArray_NDIM(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->nd; +} + +static inline void * +PyArray_DATA(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->data; +} + +static inline char * +PyArray_BYTES(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->data; +} + +static inline npy_intp * +PyArray_DIMS(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->dimensions; +} + +static inline npy_intp * +PyArray_STRIDES(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->strides; +} + +static inline npy_intp +PyArray_DIM(const PyArrayObject *arr, int idim) +{ + return ((PyArrayObject_fields *)arr)->dimensions[idim]; +} + +static inline npy_intp +PyArray_STRIDE(const PyArrayObject *arr, int istride) +{ + return ((PyArrayObject_fields *)arr)->strides[istride]; +} + +static inline NPY_RETURNS_BORROWED_REF PyObject * +PyArray_BASE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->base; +} + +static inline NPY_RETURNS_BORROWED_REF PyArray_Descr * +PyArray_DESCR(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr; +} + +static inline int +PyArray_FLAGS(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->flags; +} + + +static inline int +PyArray_TYPE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr->type_num; +} + +static inline int +PyArray_CHKFLAGS(const PyArrayObject *arr, int flags) +{ + return (PyArray_FLAGS(arr) & flags) == flags; +} + +static inline PyArray_Descr * +PyArray_DTYPE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->descr; +} + +static inline npy_intp * +PyArray_SHAPE(const PyArrayObject *arr) +{ + return ((PyArrayObject_fields *)arr)->dimensions; +} + +/* + * Enables the specified array flags. Does no checking, + * assumes you know what you're doing. + */ +static inline void +PyArray_ENABLEFLAGS(PyArrayObject *arr, int flags) +{ + ((PyArrayObject_fields *)arr)->flags |= flags; +} + +/* + * Clears the specified array flags. Does no checking, + * assumes you know what you're doing. + */ +static inline void +PyArray_CLEARFLAGS(PyArrayObject *arr, int flags) +{ + ((PyArrayObject_fields *)arr)->flags &= ~flags; +} + +#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION + static inline NPY_RETURNS_BORROWED_REF PyObject * + PyArray_HANDLER(PyArrayObject *arr) + { + return ((PyArrayObject_fields *)arr)->mem_handler; + } +#endif + +#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL) + +#define PyTypeNum_ISUNSIGNED(type) (((type) == NPY_UBYTE) || \ + ((type) == NPY_USHORT) || \ + ((type) == NPY_UINT) || \ + ((type) == NPY_ULONG) || \ + ((type) == NPY_ULONGLONG)) + +#define PyTypeNum_ISSIGNED(type) (((type) == NPY_BYTE) || \ + ((type) == NPY_SHORT) || \ + ((type) == NPY_INT) || \ + ((type) == NPY_LONG) || \ + ((type) == NPY_LONGLONG)) + +#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \ + ((type) <= NPY_ULONGLONG)) + +#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \ + ((type) <= NPY_LONGDOUBLE)) || \ + ((type) == NPY_HALF)) + +#define PyTypeNum_ISNUMBER(type) (((type) <= NPY_CLONGDOUBLE) || \ + ((type) == NPY_HALF)) + +#define PyTypeNum_ISSTRING(type) (((type) == NPY_STRING) || \ + ((type) == NPY_UNICODE)) + +#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \ + ((type) <= NPY_CLONGDOUBLE)) + +#define PyTypeNum_ISFLEXIBLE(type) (((type) >=NPY_STRING) && \ + ((type) <=NPY_VOID)) + +#define PyTypeNum_ISDATETIME(type) (((type) >=NPY_DATETIME) && \ + ((type) <=NPY_TIMEDELTA)) + +#define PyTypeNum_ISUSERDEF(type) (((type) >= NPY_USERDEF) && \ + ((type) < NPY_USERDEF+ \ + NPY_NUMUSERTYPES)) + +#define PyTypeNum_ISEXTENDED(type) (PyTypeNum_ISFLEXIBLE(type) || \ + PyTypeNum_ISUSERDEF(type)) + +#define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT) + + +#define PyDataType_ISLEGACY(dtype) ((dtype)->type_num < NPY_VSTRING && ((dtype)->type_num >= 0)) +#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num ) +#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISDATETIME(obj) PyTypeNum_ISDATETIME(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num) +#define PyDataType_MAKEUNSIZED(dtype) ((dtype)->elsize = 0) +/* + * PyDataType_* FLAGS, FLACHK, REFCHK, HASFIELDS, HASSUBARRAY, UNSIZED, + * SUBARRAY, NAMES, FIELDS, C_METADATA, and METADATA require version specific + * lookup and are defined in npy_2_compat.h. + */ + + +#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj)) +#define PyArray_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyArray_TYPE(obj)) +#define PyArray_ISSIGNED(obj) PyTypeNum_ISSIGNED(PyArray_TYPE(obj)) +#define PyArray_ISINTEGER(obj) PyTypeNum_ISINTEGER(PyArray_TYPE(obj)) +#define PyArray_ISFLOAT(obj) PyTypeNum_ISFLOAT(PyArray_TYPE(obj)) +#define PyArray_ISNUMBER(obj) PyTypeNum_ISNUMBER(PyArray_TYPE(obj)) +#define PyArray_ISSTRING(obj) PyTypeNum_ISSTRING(PyArray_TYPE(obj)) +#define PyArray_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(PyArray_TYPE(obj)) +#define PyArray_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) +#define PyArray_ISDATETIME(obj) PyTypeNum_ISDATETIME(PyArray_TYPE(obj)) +#define PyArray_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(PyArray_TYPE(obj)) +#define PyArray_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(PyArray_TYPE(obj)) +#define PyArray_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyArray_TYPE(obj)) +#define PyArray_HASFIELDS(obj) PyDataType_HASFIELDS(PyArray_DESCR(obj)) + + /* + * FIXME: This should check for a flag on the data-type that + * states whether or not it is variable length. Because the + * ISFLEXIBLE check is hard-coded to the built-in data-types. + */ +#define PyArray_ISVARIABLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj)) + +#define PyArray_SAFEALIGNEDCOPY(obj) (PyArray_ISALIGNED(obj) && !PyArray_ISVARIABLE(obj)) + + +#define NPY_LITTLE '<' +#define NPY_BIG '>' +#define NPY_NATIVE '=' +#define NPY_SWAP 's' +#define NPY_IGNORE '|' + +#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN +#define NPY_NATBYTE NPY_BIG +#define NPY_OPPBYTE NPY_LITTLE +#else +#define NPY_NATBYTE NPY_LITTLE +#define NPY_OPPBYTE NPY_BIG +#endif + +#define PyArray_ISNBO(arg) ((arg) != NPY_OPPBYTE) +#define PyArray_IsNativeByteOrder PyArray_ISNBO +#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyArray_DESCR(m)->byteorder) +#define PyArray_ISBYTESWAPPED(m) (!PyArray_ISNOTSWAPPED(m)) + +#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \ + PyArray_ISNOTSWAPPED(m)) + +#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY) +#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO) +#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY) +#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO) +#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED) +#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED) + + +#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(((PyArray_Descr *)(d))->byteorder) +#define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d)) + +/************************************************************ + * A struct used by PyArray_CreateSortedStridePerm, new in 1.7. + ************************************************************/ + +typedef struct { + npy_intp perm, stride; +} npy_stride_sort_item; + +/************************************************************ + * This is the form of the struct that's stored in the + * PyCapsule returned by an array's __array_struct__ attribute. See + * https://docs.scipy.org/doc/numpy/reference/arrays.interface.html for the full + * documentation. + ************************************************************/ +typedef struct { + int two; /* + * contains the integer 2 as a sanity + * check + */ + + int nd; /* number of dimensions */ + + char typekind; /* + * kind in array --- character code of + * typestr + */ + + int itemsize; /* size of each element */ + + int flags; /* + * how should be data interpreted. Valid + * flags are CONTIGUOUS (1), F_CONTIGUOUS (2), + * ALIGNED (0x100), NOTSWAPPED (0x200), and + * WRITEABLE (0x400). ARR_HAS_DESCR (0x800) + * states that arrdescr field is present in + * structure + */ + + npy_intp *shape; /* + * A length-nd array of shape + * information + */ + + npy_intp *strides; /* A length-nd array of stride information */ + + void *data; /* A pointer to the first element of the array */ + + PyObject *descr; /* + * A list of fields or NULL (ignored if flags + * does not have ARR_HAS_DESCR flag set) + */ +} PyArrayInterface; + + +/**************************************** + * NpyString + * + * Types used by the NpyString API. + ****************************************/ + +/* + * A "packed" encoded string. The string data must be accessed by first unpacking the string. + */ +typedef struct npy_packed_static_string npy_packed_static_string; + +/* + * An unpacked read-only view onto the data in a packed string + */ +typedef struct npy_unpacked_static_string { + size_t size; + const char *buf; +} npy_static_string; + +/* + * Handles heap allocations for static strings. + */ +typedef struct npy_string_allocator npy_string_allocator; + +typedef struct { + PyArray_Descr base; + // The object representing a null value + PyObject *na_object; + // Flag indicating whether or not to coerce arbitrary objects to strings + char coerce; + // Flag indicating the na object is NaN-like + char has_nan_na; + // Flag indicating the na object is a string + char has_string_na; + // If nonzero, indicates that this instance is owned by an array already + char array_owned; + // The string data to use when a default string is needed + npy_static_string default_string; + // The name of the missing data object, if any + npy_static_string na_name; + // the allocator should only be directly accessed after + // acquiring the allocator_lock and the lock should + // be released immediately after the allocator is + // no longer needed + npy_string_allocator *allocator; +} PyArray_StringDTypeObject; + +/* + * PyArray_DTypeMeta related definitions. + * + * As of now, this API is preliminary and will be extended as necessary. + */ +#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD + /* + * The Structures defined in this block are currently considered + * private API and may change without warning! + * Part of this (at least the size) is expected to be public API without + * further modifications. + */ + /* TODO: Make this definition public in the API, as soon as its settled */ + NPY_NO_EXPORT extern PyTypeObject PyArrayDTypeMeta_Type; + + /* + * While NumPy DTypes would not need to be heap types the plan is to + * make DTypes available in Python at which point they will be heap types. + * Since we also wish to add fields to the DType class, this looks like + * a typical instance definition, but with PyHeapTypeObject instead of + * only the PyObject_HEAD. + * This must only be exposed very extremely careful consideration, since + * it is a fairly complex construct which may be better to allow + * refactoring of. + */ + typedef struct { + PyHeapTypeObject super; + + /* + * Most DTypes will have a singleton default instance, for the + * parametric legacy DTypes (bytes, string, void, datetime) this + * may be a pointer to the *prototype* instance? + */ + PyArray_Descr *singleton; + /* Copy of the legacy DTypes type number, usually invalid. */ + int type_num; + + /* The type object of the scalar instances (may be NULL?) */ + PyTypeObject *scalar_type; + /* + * DType flags to signal legacy, parametric, or + * abstract. But plenty of space for additional information/flags. + */ + npy_uint64 flags; + + /* + * Use indirection in order to allow a fixed size for this struct. + * A stable ABI size makes creating a static DType less painful + * while also ensuring flexibility for all opaque API (with one + * indirection due the pointer lookup). + */ + void *dt_slots; + void *reserved[3]; + } PyArray_DTypeMeta; + +#endif /* NPY_INTERNAL_BUILD */ + + +/* + * Use the keyword NPY_DEPRECATED_INCLUDES to ensure that the header files + * npy_*_*_deprecated_api.h are only included from here and nowhere else. + */ +#ifdef NPY_DEPRECATED_INCLUDES +#error "Do not use the reserved keyword NPY_DEPRECATED_INCLUDES." +#endif +#define NPY_DEPRECATED_INCLUDES +/* + * There is no file npy_1_8_deprecated_api.h since there are no additional + * deprecated API features in NumPy 1.8. + * + * Note to maintainers: insert code like the following in future NumPy + * versions. + * + * #if !defined(NPY_NO_DEPRECATED_API) || \ + * (NPY_NO_DEPRECATED_API < NPY_1_9_API_VERSION) + * #include "npy_1_9_deprecated_api.h" + * #endif + * Then in the npy_1_9_deprecated_api.h header add something like this + * -------------------- + * #ifndef NPY_DEPRECATED_INCLUDES + * #error "Should never include npy_*_*_deprecated_api directly." + * #endif + * #ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_ + * #define NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_ + * + * #ifndef NPY_NO_DEPRECATED_API + * #if defined(_WIN32) + * #define _WARN___STR2__(x) #x + * #define _WARN___STR1__(x) _WARN___STR2__(x) + * #define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: " + * #pragma message(_WARN___LOC__"Using deprecated NumPy API, disable it with " \ + * "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION") + * #else + * #warning "Using deprecated NumPy API, disable it with " \ + * "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION" + * #endif + * #endif + * -------------------- + */ +#undef NPY_DEPRECATED_INCLUDES + +#ifdef __cplusplus +} +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_ */ diff --git a/python/numpy/_core/include/numpy/npy_2_compat.h b/python/numpy/_core/include/numpy/npy_2_compat.h new file mode 100644 index 000000000..e39e65aed --- /dev/null +++ b/python/numpy/_core/include/numpy/npy_2_compat.h @@ -0,0 +1,249 @@ +/* + * This header file defines relevant features which: + * - Require runtime inspection depending on the NumPy version. + * - May be needed when compiling with an older version of NumPy to allow + * a smooth transition. + * + * As such, it is shipped with NumPy 2.0, but designed to be vendored in full + * or parts by downstream projects. + * + * It must be included after any other includes. `import_array()` must have + * been called in the scope or version dependency will misbehave, even when + * only `PyUFunc_` API is used. + * + * If required complicated defs (with inline functions) should be written as: + * + * #if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION + * Simple definition when NumPy 2.0 API is guaranteed. + * #else + * static inline definition of a 1.x compatibility shim + * #if NPY_ABI_VERSION < 0x02000000 + * Make 1.x compatibility shim the public API (1.x only branch) + * #else + * Runtime dispatched version (1.x or 2.x) + * #endif + * #endif + * + * An internal build always passes NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION + */ + +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPAT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPAT_H_ + +/* + * New macros for accessing real and complex part of a complex number can be + * found in "npy_2_complexcompat.h". + */ + + +/* + * This header is meant to be included by downstream directly for 1.x compat. + * In that case we need to ensure that users first included the full headers + * and not just `ndarraytypes.h`. + */ + +#ifndef NPY_FEATURE_VERSION + #error "The NumPy 2 compat header requires `import_array()` for which " \ + "the `ndarraytypes.h` header include is not sufficient. Please " \ + "include it after `numpy/ndarrayobject.h` or similar.\n" \ + "To simplify inclusion, you may use `PyArray_ImportNumPy()` " \ + "which is defined in the compat header and is lightweight (can be)." +#endif + +#if NPY_ABI_VERSION < 0x02000000 + /* + * Define 2.0 feature version as it is needed below to decide whether we + * compile for both 1.x and 2.x (defining it guarantees 1.x only). + */ + #define NPY_2_0_API_VERSION 0x00000012 + /* + * If we are compiling with NumPy 1.x, PyArray_RUNTIME_VERSION so we + * pretend the `PyArray_RUNTIME_VERSION` is `NPY_FEATURE_VERSION`. + * This allows downstream to use `PyArray_RUNTIME_VERSION` if they need to. + */ + #define PyArray_RUNTIME_VERSION NPY_FEATURE_VERSION + /* Compiling on NumPy 1.x where these are the same: */ + #define PyArray_DescrProto PyArray_Descr +#endif + + +/* + * Define a better way to call `_import_array()` to simplify backporting as + * we now require imports more often (necessary to make ABI flexible). + */ +#ifdef import_array1 + +static inline int +PyArray_ImportNumPyAPI(void) +{ + if (NPY_UNLIKELY(PyArray_API == NULL)) { + import_array1(-1); + } + return 0; +} + +#endif /* import_array1 */ + + +/* + * NPY_DEFAULT_INT + * + * The default integer has changed, `NPY_DEFAULT_INT` is available at runtime + * for use as type number, e.g. `PyArray_DescrFromType(NPY_DEFAULT_INT)`. + * + * NPY_RAVEL_AXIS + * + * This was introduced in NumPy 2.0 to allow indicating that an axis should be + * raveled in an operation. Before NumPy 2.0, NPY_MAXDIMS was used for this purpose. + * + * NPY_MAXDIMS + * + * A constant indicating the maximum number dimensions allowed when creating + * an ndarray. + * + * NPY_NTYPES_LEGACY + * + * The number of built-in NumPy dtypes. + */ +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION + #define NPY_DEFAULT_INT NPY_INTP + #define NPY_RAVEL_AXIS NPY_MIN_INT + #define NPY_MAXARGS 64 + +#elif NPY_ABI_VERSION < 0x02000000 + #define NPY_DEFAULT_INT NPY_LONG + #define NPY_RAVEL_AXIS 32 + #define NPY_MAXARGS 32 + + /* Aliases of 2.x names to 1.x only equivalent names */ + #define NPY_NTYPES NPY_NTYPES_LEGACY + #define PyArray_DescrProto PyArray_Descr + #define _PyArray_LegacyDescr PyArray_Descr + /* NumPy 2 definition always works, but add it for 1.x only */ + #define PyDataType_ISLEGACY(dtype) (1) +#else + #define NPY_DEFAULT_INT \ + (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? NPY_INTP : NPY_LONG) + #define NPY_RAVEL_AXIS \ + (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? NPY_MIN_INT : 32) + #define NPY_MAXARGS \ + (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? 64 : 32) +#endif + + +/* + * Access inline functions for descriptor fields. Except for the first + * few fields, these needed to be moved (elsize, alignment) for + * additional space. Or they are descriptor specific and are not generally + * available anymore (metadata, c_metadata, subarray, names, fields). + * + * Most of these are defined via the `DESCR_ACCESSOR` macro helper. + */ +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION || NPY_ABI_VERSION < 0x02000000 + /* Compiling for 1.x or 2.x only, direct field access is OK: */ + + static inline void + PyDataType_SET_ELSIZE(PyArray_Descr *dtype, npy_intp size) + { + dtype->elsize = size; + } + + static inline npy_uint64 + PyDataType_FLAGS(const PyArray_Descr *dtype) + { + #if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION + return dtype->flags; + #else + return (unsigned char)dtype->flags; /* Need unsigned cast on 1.x */ + #endif + } + + #define DESCR_ACCESSOR(FIELD, field, type, legacy_only) \ + static inline type \ + PyDataType_##FIELD(const PyArray_Descr *dtype) { \ + if (legacy_only && !PyDataType_ISLEGACY(dtype)) { \ + return (type)0; \ + } \ + return ((_PyArray_LegacyDescr *)dtype)->field; \ + } +#else /* compiling for both 1.x and 2.x */ + + static inline void + PyDataType_SET_ELSIZE(PyArray_Descr *dtype, npy_intp size) + { + if (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION) { + ((_PyArray_DescrNumPy2 *)dtype)->elsize = size; + } + else { + ((PyArray_DescrProto *)dtype)->elsize = (int)size; + } + } + + static inline npy_uint64 + PyDataType_FLAGS(const PyArray_Descr *dtype) + { + if (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION) { + return ((_PyArray_DescrNumPy2 *)dtype)->flags; + } + else { + return (unsigned char)((PyArray_DescrProto *)dtype)->flags; + } + } + + /* Cast to LegacyDescr always fine but needed when `legacy_only` */ + #define DESCR_ACCESSOR(FIELD, field, type, legacy_only) \ + static inline type \ + PyDataType_##FIELD(const PyArray_Descr *dtype) { \ + if (legacy_only && !PyDataType_ISLEGACY(dtype)) { \ + return (type)0; \ + } \ + if (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION) { \ + return ((_PyArray_LegacyDescr *)dtype)->field; \ + } \ + else { \ + return ((PyArray_DescrProto *)dtype)->field; \ + } \ + } +#endif + +DESCR_ACCESSOR(ELSIZE, elsize, npy_intp, 0) +DESCR_ACCESSOR(ALIGNMENT, alignment, npy_intp, 0) +DESCR_ACCESSOR(METADATA, metadata, PyObject *, 1) +DESCR_ACCESSOR(SUBARRAY, subarray, PyArray_ArrayDescr *, 1) +DESCR_ACCESSOR(NAMES, names, PyObject *, 1) +DESCR_ACCESSOR(FIELDS, fields, PyObject *, 1) +DESCR_ACCESSOR(C_METADATA, c_metadata, NpyAuxData *, 1) + +#undef DESCR_ACCESSOR + + +#if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD) +#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION + static inline PyArray_ArrFuncs * + PyDataType_GetArrFuncs(const PyArray_Descr *descr) + { + return _PyDataType_GetArrFuncs(descr); + } +#elif NPY_ABI_VERSION < 0x02000000 + static inline PyArray_ArrFuncs * + PyDataType_GetArrFuncs(const PyArray_Descr *descr) + { + return descr->f; + } +#else + static inline PyArray_ArrFuncs * + PyDataType_GetArrFuncs(const PyArray_Descr *descr) + { + if (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION) { + return _PyDataType_GetArrFuncs(descr); + } + else { + return ((PyArray_DescrProto *)descr)->f; + } + } +#endif + + +#endif /* not internal build */ + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPAT_H_ */ diff --git a/python/numpy/_core/include/numpy/npy_2_complexcompat.h b/python/numpy/_core/include/numpy/npy_2_complexcompat.h new file mode 100644 index 000000000..0b509011b --- /dev/null +++ b/python/numpy/_core/include/numpy/npy_2_complexcompat.h @@ -0,0 +1,28 @@ +/* This header is designed to be copy-pasted into downstream packages, since it provides + a compatibility layer between the old C struct complex types and the new native C99 + complex types. The new macros are in numpy/npy_math.h, which is why it is included here. */ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPLEXCOMPAT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPLEXCOMPAT_H_ + +#include + +#ifndef NPY_CSETREALF +#define NPY_CSETREALF(c, r) (c)->real = (r) +#endif +#ifndef NPY_CSETIMAGF +#define NPY_CSETIMAGF(c, i) (c)->imag = (i) +#endif +#ifndef NPY_CSETREAL +#define NPY_CSETREAL(c, r) (c)->real = (r) +#endif +#ifndef NPY_CSETIMAG +#define NPY_CSETIMAG(c, i) (c)->imag = (i) +#endif +#ifndef NPY_CSETREALL +#define NPY_CSETREALL(c, r) (c)->real = (r) +#endif +#ifndef NPY_CSETIMAGL +#define NPY_CSETIMAGL(c, i) (c)->imag = (i) +#endif + +#endif diff --git a/python/numpy/_core/include/numpy/npy_3kcompat.h b/python/numpy/_core/include/numpy/npy_3kcompat.h new file mode 100644 index 000000000..c2bf74faf --- /dev/null +++ b/python/numpy/_core/include/numpy/npy_3kcompat.h @@ -0,0 +1,374 @@ +/* + * This is a convenience header file providing compatibility utilities + * for supporting different minor versions of Python 3. + * It was originally used to support the transition from Python 2, + * hence the "3k" naming. + * + * If you want to use this for your own projects, it's recommended to make a + * copy of it. We don't provide backwards compatibility guarantees. + */ + +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_ + +#include +#include + +#include "npy_common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Python13 removes _PyLong_AsInt */ +static inline int +Npy__PyLong_AsInt(PyObject *obj) +{ + int overflow; + long result = PyLong_AsLongAndOverflow(obj, &overflow); + + /* INT_MAX and INT_MIN are defined in Python.h */ + if (overflow || result > INT_MAX || result < INT_MIN) { + /* XXX: could be cute and give a different + message for overflow == -1 */ + PyErr_SetString(PyExc_OverflowError, + "Python int too large to convert to C int"); + return -1; + } + return (int)result; +} + +#if defined _MSC_VER && _MSC_VER >= 1900 + +#include + +/* + * Macros to protect CRT calls against instant termination when passed an + * invalid parameter (https://bugs.python.org/issue23524). + */ +extern _invalid_parameter_handler _Py_silent_invalid_parameter_handler; +#define NPY_BEGIN_SUPPRESS_IPH { _invalid_parameter_handler _Py_old_handler = \ + _set_thread_local_invalid_parameter_handler(_Py_silent_invalid_parameter_handler); +#define NPY_END_SUPPRESS_IPH _set_thread_local_invalid_parameter_handler(_Py_old_handler); } + +#else + +#define NPY_BEGIN_SUPPRESS_IPH +#define NPY_END_SUPPRESS_IPH + +#endif /* _MSC_VER >= 1900 */ + +/* + * PyFile_* compatibility + */ + +/* + * Get a FILE* handle to the file represented by the Python object + */ +static inline FILE* +npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos) +{ + int fd, fd2, unbuf; + Py_ssize_t fd2_tmp; + PyObject *ret, *os, *io, *io_raw; + npy_off_t pos; + FILE *handle; + + /* Flush first to ensure things end up in the file in the correct order */ + ret = PyObject_CallMethod(file, "flush", ""); + if (ret == NULL) { + return NULL; + } + Py_DECREF(ret); + fd = PyObject_AsFileDescriptor(file); + if (fd == -1) { + return NULL; + } + + /* + * The handle needs to be dup'd because we have to call fclose + * at the end + */ + os = PyImport_ImportModule("os"); + if (os == NULL) { + return NULL; + } + ret = PyObject_CallMethod(os, "dup", "i", fd); + Py_DECREF(os); + if (ret == NULL) { + return NULL; + } + fd2_tmp = PyNumber_AsSsize_t(ret, PyExc_IOError); + Py_DECREF(ret); + if (fd2_tmp == -1 && PyErr_Occurred()) { + return NULL; + } + if (fd2_tmp < INT_MIN || fd2_tmp > INT_MAX) { + PyErr_SetString(PyExc_IOError, + "Getting an 'int' from os.dup() failed"); + return NULL; + } + fd2 = (int)fd2_tmp; + + /* Convert to FILE* handle */ +#ifdef _WIN32 + NPY_BEGIN_SUPPRESS_IPH + handle = _fdopen(fd2, mode); + NPY_END_SUPPRESS_IPH +#else + handle = fdopen(fd2, mode); +#endif + if (handle == NULL) { + PyErr_SetString(PyExc_IOError, + "Getting a FILE* from a Python file object via " + "_fdopen failed. If you built NumPy, you probably " + "linked with the wrong debug/release runtime"); + return NULL; + } + + /* Record the original raw file handle position */ + *orig_pos = npy_ftell(handle); + if (*orig_pos == -1) { + /* The io module is needed to determine if buffering is used */ + io = PyImport_ImportModule("io"); + if (io == NULL) { + fclose(handle); + return NULL; + } + /* File object instances of RawIOBase are unbuffered */ + io_raw = PyObject_GetAttrString(io, "RawIOBase"); + Py_DECREF(io); + if (io_raw == NULL) { + fclose(handle); + return NULL; + } + unbuf = PyObject_IsInstance(file, io_raw); + Py_DECREF(io_raw); + if (unbuf == 1) { + /* Succeed if the IO is unbuffered */ + return handle; + } + else { + PyErr_SetString(PyExc_IOError, "obtaining file position failed"); + fclose(handle); + return NULL; + } + } + + /* Seek raw handle to the Python-side position */ + ret = PyObject_CallMethod(file, "tell", ""); + if (ret == NULL) { + fclose(handle); + return NULL; + } + pos = PyLong_AsLongLong(ret); + Py_DECREF(ret); + if (PyErr_Occurred()) { + fclose(handle); + return NULL; + } + if (npy_fseek(handle, pos, SEEK_SET) == -1) { + PyErr_SetString(PyExc_IOError, "seeking file failed"); + fclose(handle); + return NULL; + } + return handle; +} + +/* + * Close the dup-ed file handle, and seek the Python one to the current position + */ +static inline int +npy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos) +{ + int fd, unbuf; + PyObject *ret, *io, *io_raw; + npy_off_t position; + + position = npy_ftell(handle); + + /* Close the FILE* handle */ + fclose(handle); + + /* + * Restore original file handle position, in order to not confuse + * Python-side data structures + */ + fd = PyObject_AsFileDescriptor(file); + if (fd == -1) { + return -1; + } + + if (npy_lseek(fd, orig_pos, SEEK_SET) == -1) { + + /* The io module is needed to determine if buffering is used */ + io = PyImport_ImportModule("io"); + if (io == NULL) { + return -1; + } + /* File object instances of RawIOBase are unbuffered */ + io_raw = PyObject_GetAttrString(io, "RawIOBase"); + Py_DECREF(io); + if (io_raw == NULL) { + return -1; + } + unbuf = PyObject_IsInstance(file, io_raw); + Py_DECREF(io_raw); + if (unbuf == 1) { + /* Succeed if the IO is unbuffered */ + return 0; + } + else { + PyErr_SetString(PyExc_IOError, "seeking file failed"); + return -1; + } + } + + if (position == -1) { + PyErr_SetString(PyExc_IOError, "obtaining file position failed"); + return -1; + } + + /* Seek Python-side handle to the FILE* handle position */ + ret = PyObject_CallMethod(file, "seek", NPY_OFF_T_PYFMT "i", position, 0); + if (ret == NULL) { + return -1; + } + Py_DECREF(ret); + return 0; +} + +static inline PyObject* +npy_PyFile_OpenFile(PyObject *filename, const char *mode) +{ + PyObject *open; + open = PyDict_GetItemString(PyEval_GetBuiltins(), "open"); + if (open == NULL) { + return NULL; + } + return PyObject_CallFunction(open, "Os", filename, mode); +} + +static inline int +npy_PyFile_CloseFile(PyObject *file) +{ + PyObject *ret; + + ret = PyObject_CallMethod(file, "close", NULL); + if (ret == NULL) { + return -1; + } + Py_DECREF(ret); + return 0; +} + +/* This is a copy of _PyErr_ChainExceptions, which + * is no longer exported from Python3.12 + */ +static inline void +npy_PyErr_ChainExceptions(PyObject *exc, PyObject *val, PyObject *tb) +{ + if (exc == NULL) + return; + + if (PyErr_Occurred()) { + PyObject *exc2, *val2, *tb2; + PyErr_Fetch(&exc2, &val2, &tb2); + PyErr_NormalizeException(&exc, &val, &tb); + if (tb != NULL) { + PyException_SetTraceback(val, tb); + Py_DECREF(tb); + } + Py_DECREF(exc); + PyErr_NormalizeException(&exc2, &val2, &tb2); + PyException_SetContext(val2, val); + PyErr_Restore(exc2, val2, tb2); + } + else { + PyErr_Restore(exc, val, tb); + } +} + +/* This is a copy of _PyErr_ChainExceptions, with: + * __cause__ used instead of __context__ + */ +static inline void +npy_PyErr_ChainExceptionsCause(PyObject *exc, PyObject *val, PyObject *tb) +{ + if (exc == NULL) + return; + + if (PyErr_Occurred()) { + PyObject *exc2, *val2, *tb2; + PyErr_Fetch(&exc2, &val2, &tb2); + PyErr_NormalizeException(&exc, &val, &tb); + if (tb != NULL) { + PyException_SetTraceback(val, tb); + Py_DECREF(tb); + } + Py_DECREF(exc); + PyErr_NormalizeException(&exc2, &val2, &tb2); + PyException_SetCause(val2, val); + PyErr_Restore(exc2, val2, tb2); + } + else { + PyErr_Restore(exc, val, tb); + } +} + +/* + * PyCObject functions adapted to PyCapsules. + * + * The main job here is to get rid of the improved error handling + * of PyCapsules. It's a shame... + */ +static inline PyObject * +NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)) +{ + PyObject *ret = PyCapsule_New(ptr, NULL, dtor); + if (ret == NULL) { + PyErr_Clear(); + } + return ret; +} + +static inline PyObject * +NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, void (*dtor)(PyObject *)) +{ + PyObject *ret = NpyCapsule_FromVoidPtr(ptr, dtor); + if (ret != NULL && PyCapsule_SetContext(ret, context) != 0) { + PyErr_Clear(); + Py_DECREF(ret); + ret = NULL; + } + return ret; +} + +static inline void * +NpyCapsule_AsVoidPtr(PyObject *obj) +{ + void *ret = PyCapsule_GetPointer(obj, NULL); + if (ret == NULL) { + PyErr_Clear(); + } + return ret; +} + +static inline void * +NpyCapsule_GetDesc(PyObject *obj) +{ + return PyCapsule_GetContext(obj); +} + +static inline int +NpyCapsule_Check(PyObject *ptr) +{ + return PyCapsule_CheckExact(ptr); +} + +#ifdef __cplusplus +} +#endif + + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_ */ diff --git a/python/numpy/_core/include/numpy/npy_common.h b/python/numpy/_core/include/numpy/npy_common.h new file mode 100644 index 000000000..e2556a07a --- /dev/null +++ b/python/numpy/_core/include/numpy/npy_common.h @@ -0,0 +1,977 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_ + +/* need Python.h for npy_intp, npy_uintp */ +#include + +/* numpconfig.h is auto-generated */ +#include "numpyconfig.h" +#ifdef HAVE_NPY_CONFIG_H +#include +#endif + +/* + * using static inline modifiers when defining npy_math functions + * allows the compiler to make optimizations when possible + */ +#ifndef NPY_INLINE_MATH +#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD + #define NPY_INLINE_MATH 1 +#else + #define NPY_INLINE_MATH 0 +#endif +#endif + +/* + * gcc does not unroll even with -O3 + * use with care, unrolling on modern cpus rarely speeds things up + */ +#ifdef HAVE_ATTRIBUTE_OPTIMIZE_UNROLL_LOOPS +#define NPY_GCC_UNROLL_LOOPS \ + __attribute__((optimize("unroll-loops"))) +#else +#define NPY_GCC_UNROLL_LOOPS +#endif + +/* highest gcc optimization level, enabled autovectorizer */ +#ifdef HAVE_ATTRIBUTE_OPTIMIZE_OPT_3 +#define NPY_GCC_OPT_3 __attribute__((optimize("O3"))) +#else +#define NPY_GCC_OPT_3 +#endif + +/* + * mark an argument (starting from 1) that must not be NULL and is not checked + * DO NOT USE IF FUNCTION CHECKS FOR NULL!! the compiler will remove the check + */ +#ifdef HAVE_ATTRIBUTE_NONNULL +#define NPY_GCC_NONNULL(n) __attribute__((nonnull(n))) +#else +#define NPY_GCC_NONNULL(n) +#endif + +/* + * give a hint to the compiler which branch is more likely or unlikely + * to occur, e.g. rare error cases: + * + * if (NPY_UNLIKELY(failure == 0)) + * return NULL; + * + * the double !! is to cast the expression (e.g. NULL) to a boolean required by + * the intrinsic + */ +#ifdef HAVE___BUILTIN_EXPECT +#define NPY_LIKELY(x) __builtin_expect(!!(x), 1) +#define NPY_UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define NPY_LIKELY(x) (x) +#define NPY_UNLIKELY(x) (x) +#endif + +#ifdef HAVE___BUILTIN_PREFETCH +/* unlike _mm_prefetch also works on non-x86 */ +#define NPY_PREFETCH(x, rw, loc) __builtin_prefetch((x), (rw), (loc)) +#else +#ifdef NPY_HAVE_SSE +/* _MM_HINT_ET[01] (rw = 1) unsupported, only available in gcc >= 4.9 */ +#define NPY_PREFETCH(x, rw, loc) _mm_prefetch((x), loc == 0 ? _MM_HINT_NTA : \ + (loc == 1 ? _MM_HINT_T2 : \ + (loc == 2 ? _MM_HINT_T1 : \ + (loc == 3 ? _MM_HINT_T0 : -1)))) +#else +#define NPY_PREFETCH(x, rw,loc) +#endif +#endif + +/* `NPY_INLINE` kept for backwards compatibility; use `inline` instead */ +#if defined(_MSC_VER) && !defined(__clang__) + #define NPY_INLINE __inline +/* clang included here to handle clang-cl on Windows */ +#elif defined(__GNUC__) || defined(__clang__) + #if defined(__STRICT_ANSI__) + #define NPY_INLINE __inline__ + #else + #define NPY_INLINE inline + #endif +#else + #define NPY_INLINE +#endif + +#ifdef _MSC_VER + #define NPY_FINLINE static __forceinline +#elif defined(__GNUC__) + #define NPY_FINLINE static inline __attribute__((always_inline)) +#else + #define NPY_FINLINE static +#endif + +#if defined(_MSC_VER) + #define NPY_NOINLINE static __declspec(noinline) +#elif defined(__GNUC__) || defined(__clang__) + #define NPY_NOINLINE static __attribute__((noinline)) +#else + #define NPY_NOINLINE static +#endif + +#ifdef __cplusplus + #define NPY_TLS thread_local +#elif defined(HAVE_THREAD_LOCAL) + #define NPY_TLS thread_local +#elif defined(HAVE__THREAD_LOCAL) + #define NPY_TLS _Thread_local +#elif defined(HAVE___THREAD) + #define NPY_TLS __thread +#elif defined(HAVE___DECLSPEC_THREAD_) + #define NPY_TLS __declspec(thread) +#else + #define NPY_TLS +#endif + +#ifdef WITH_CPYCHECKER_RETURNS_BORROWED_REF_ATTRIBUTE + #define NPY_RETURNS_BORROWED_REF \ + __attribute__((cpychecker_returns_borrowed_ref)) +#else + #define NPY_RETURNS_BORROWED_REF +#endif + +#ifdef WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE + #define NPY_STEALS_REF_TO_ARG(n) \ + __attribute__((cpychecker_steals_reference_to_arg(n))) +#else + #define NPY_STEALS_REF_TO_ARG(n) +#endif + +/* 64 bit file position support, also on win-amd64. Issue gh-2256 */ +#if defined(_MSC_VER) && defined(_WIN64) && (_MSC_VER > 1400) || \ + defined(__MINGW32__) || defined(__MINGW64__) + #include + + #define npy_fseek _fseeki64 + #define npy_ftell _ftelli64 + #define npy_lseek _lseeki64 + #define npy_off_t npy_int64 + + #if NPY_SIZEOF_INT == 8 + #define NPY_OFF_T_PYFMT "i" + #elif NPY_SIZEOF_LONG == 8 + #define NPY_OFF_T_PYFMT "l" + #elif NPY_SIZEOF_LONGLONG == 8 + #define NPY_OFF_T_PYFMT "L" + #else + #error Unsupported size for type off_t + #endif +#else +#ifdef HAVE_FSEEKO + #define npy_fseek fseeko +#else + #define npy_fseek fseek +#endif +#ifdef HAVE_FTELLO + #define npy_ftell ftello +#else + #define npy_ftell ftell +#endif + #include + #ifndef _WIN32 + #include + #endif + #define npy_lseek lseek + #define npy_off_t off_t + + #if NPY_SIZEOF_OFF_T == NPY_SIZEOF_SHORT + #define NPY_OFF_T_PYFMT "h" + #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_INT + #define NPY_OFF_T_PYFMT "i" + #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONG + #define NPY_OFF_T_PYFMT "l" + #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONGLONG + #define NPY_OFF_T_PYFMT "L" + #else + #error Unsupported size for type off_t + #endif +#endif + +/* enums for detected endianness */ +enum { + NPY_CPU_UNKNOWN_ENDIAN, + NPY_CPU_LITTLE, + NPY_CPU_BIG +}; + +/* + * This is to typedef npy_intp to the appropriate size for Py_ssize_t. + * (Before NumPy 2.0 we used Py_intptr_t and Py_uintptr_t from `pyport.h`.) + */ +typedef Py_ssize_t npy_intp; +typedef size_t npy_uintp; + +/* + * Define sizes that were not defined in numpyconfig.h. + */ +#define NPY_SIZEOF_CHAR 1 +#define NPY_SIZEOF_BYTE 1 +#define NPY_SIZEOF_DATETIME 8 +#define NPY_SIZEOF_TIMEDELTA 8 +#define NPY_SIZEOF_HALF 2 +#define NPY_SIZEOF_CFLOAT NPY_SIZEOF_COMPLEX_FLOAT +#define NPY_SIZEOF_CDOUBLE NPY_SIZEOF_COMPLEX_DOUBLE +#define NPY_SIZEOF_CLONGDOUBLE NPY_SIZEOF_COMPLEX_LONGDOUBLE + +#ifdef constchar +#undef constchar +#endif + +#define NPY_SSIZE_T_PYFMT "n" +#define constchar char + +/* NPY_INTP_FMT Note: + * Unlike the other NPY_*_FMT macros, which are used with PyOS_snprintf, + * NPY_INTP_FMT is used with PyErr_Format and PyUnicode_FromFormat. Those + * functions use different formatting codes that are portably specified + * according to the Python documentation. See issue gh-2388. + */ +#if NPY_SIZEOF_INTP == NPY_SIZEOF_LONG + #define NPY_INTP NPY_LONG + #define NPY_UINTP NPY_ULONG + #define PyIntpArrType_Type PyLongArrType_Type + #define PyUIntpArrType_Type PyULongArrType_Type + #define NPY_MAX_INTP NPY_MAX_LONG + #define NPY_MIN_INTP NPY_MIN_LONG + #define NPY_MAX_UINTP NPY_MAX_ULONG + #define NPY_INTP_FMT "ld" +#elif NPY_SIZEOF_INTP == NPY_SIZEOF_INT + #define NPY_INTP NPY_INT + #define NPY_UINTP NPY_UINT + #define PyIntpArrType_Type PyIntArrType_Type + #define PyUIntpArrType_Type PyUIntArrType_Type + #define NPY_MAX_INTP NPY_MAX_INT + #define NPY_MIN_INTP NPY_MIN_INT + #define NPY_MAX_UINTP NPY_MAX_UINT + #define NPY_INTP_FMT "d" +#elif defined(PY_LONG_LONG) && (NPY_SIZEOF_INTP == NPY_SIZEOF_LONGLONG) + #define NPY_INTP NPY_LONGLONG + #define NPY_UINTP NPY_ULONGLONG + #define PyIntpArrType_Type PyLongLongArrType_Type + #define PyUIntpArrType_Type PyULongLongArrType_Type + #define NPY_MAX_INTP NPY_MAX_LONGLONG + #define NPY_MIN_INTP NPY_MIN_LONGLONG + #define NPY_MAX_UINTP NPY_MAX_ULONGLONG + #define NPY_INTP_FMT "lld" +#else + #error "Failed to correctly define NPY_INTP and NPY_UINTP" +#endif + + +/* + * Some platforms don't define bool, long long, or long double. + * Handle that here. + */ +#define NPY_BYTE_FMT "hhd" +#define NPY_UBYTE_FMT "hhu" +#define NPY_SHORT_FMT "hd" +#define NPY_USHORT_FMT "hu" +#define NPY_INT_FMT "d" +#define NPY_UINT_FMT "u" +#define NPY_LONG_FMT "ld" +#define NPY_ULONG_FMT "lu" +#define NPY_HALF_FMT "g" +#define NPY_FLOAT_FMT "g" +#define NPY_DOUBLE_FMT "g" + + +#ifdef PY_LONG_LONG +typedef PY_LONG_LONG npy_longlong; +typedef unsigned PY_LONG_LONG npy_ulonglong; +# ifdef _MSC_VER +# define NPY_LONGLONG_FMT "I64d" +# define NPY_ULONGLONG_FMT "I64u" +# else +# define NPY_LONGLONG_FMT "lld" +# define NPY_ULONGLONG_FMT "llu" +# endif +# ifdef _MSC_VER +# define NPY_LONGLONG_SUFFIX(x) (x##i64) +# define NPY_ULONGLONG_SUFFIX(x) (x##Ui64) +# else +# define NPY_LONGLONG_SUFFIX(x) (x##LL) +# define NPY_ULONGLONG_SUFFIX(x) (x##ULL) +# endif +#else +typedef long npy_longlong; +typedef unsigned long npy_ulonglong; +# define NPY_LONGLONG_SUFFIX(x) (x##L) +# define NPY_ULONGLONG_SUFFIX(x) (x##UL) +#endif + + +typedef unsigned char npy_bool; +#define NPY_FALSE 0 +#define NPY_TRUE 1 +/* + * `NPY_SIZEOF_LONGDOUBLE` isn't usually equal to sizeof(long double). + * In some certain cases, it may forced to be equal to sizeof(double) + * even against the compiler implementation and the same goes for + * `complex long double`. + * + * Therefore, avoid `long double`, use `npy_longdouble` instead, + * and when it comes to standard math functions make sure of using + * the double version when `NPY_SIZEOF_LONGDOUBLE` == `NPY_SIZEOF_DOUBLE`. + * For example: + * npy_longdouble *ptr, x; + * #if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE + * npy_longdouble r = modf(x, ptr); + * #else + * npy_longdouble r = modfl(x, ptr); + * #endif + * + * See https://github.com/numpy/numpy/issues/20348 + */ +#if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE + #define NPY_LONGDOUBLE_FMT "g" + #define longdouble_t double + typedef double npy_longdouble; +#else + #define NPY_LONGDOUBLE_FMT "Lg" + #define longdouble_t long double + typedef long double npy_longdouble; +#endif + +#ifndef Py_USING_UNICODE +#error Must use Python with unicode enabled. +#endif + + +typedef signed char npy_byte; +typedef unsigned char npy_ubyte; +typedef unsigned short npy_ushort; +typedef unsigned int npy_uint; +typedef unsigned long npy_ulong; + +/* These are for completeness */ +typedef char npy_char; +typedef short npy_short; +typedef int npy_int; +typedef long npy_long; +typedef float npy_float; +typedef double npy_double; + +typedef Py_hash_t npy_hash_t; +#define NPY_SIZEOF_HASH_T NPY_SIZEOF_INTP + +#if defined(__cplusplus) + +typedef struct +{ + double _Val[2]; +} npy_cdouble; + +typedef struct +{ + float _Val[2]; +} npy_cfloat; + +typedef struct +{ + long double _Val[2]; +} npy_clongdouble; + +#else + +#include + + +#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) +typedef _Dcomplex npy_cdouble; +typedef _Fcomplex npy_cfloat; +typedef _Lcomplex npy_clongdouble; +#else /* !defined(_MSC_VER) || defined(__INTEL_COMPILER) */ +typedef double _Complex npy_cdouble; +typedef float _Complex npy_cfloat; +typedef longdouble_t _Complex npy_clongdouble; +#endif + +#endif + +/* + * numarray-style bit-width typedefs + */ +#define NPY_MAX_INT8 127 +#define NPY_MIN_INT8 -128 +#define NPY_MAX_UINT8 255 +#define NPY_MAX_INT16 32767 +#define NPY_MIN_INT16 -32768 +#define NPY_MAX_UINT16 65535 +#define NPY_MAX_INT32 2147483647 +#define NPY_MIN_INT32 (-NPY_MAX_INT32 - 1) +#define NPY_MAX_UINT32 4294967295U +#define NPY_MAX_INT64 NPY_LONGLONG_SUFFIX(9223372036854775807) +#define NPY_MIN_INT64 (-NPY_MAX_INT64 - NPY_LONGLONG_SUFFIX(1)) +#define NPY_MAX_UINT64 NPY_ULONGLONG_SUFFIX(18446744073709551615) +#define NPY_MAX_INT128 NPY_LONGLONG_SUFFIX(85070591730234615865843651857942052864) +#define NPY_MIN_INT128 (-NPY_MAX_INT128 - NPY_LONGLONG_SUFFIX(1)) +#define NPY_MAX_UINT128 NPY_ULONGLONG_SUFFIX(170141183460469231731687303715884105728) +#define NPY_MIN_DATETIME NPY_MIN_INT64 +#define NPY_MAX_DATETIME NPY_MAX_INT64 +#define NPY_MIN_TIMEDELTA NPY_MIN_INT64 +#define NPY_MAX_TIMEDELTA NPY_MAX_INT64 + + /* Need to find the number of bits for each type and + make definitions accordingly. + + C states that sizeof(char) == 1 by definition + + So, just using the sizeof keyword won't help. + + It also looks like Python itself uses sizeof(char) quite a + bit, which by definition should be 1 all the time. + + Idea: Make Use of CHAR_BIT which should tell us how many + BITS per CHARACTER + */ + + /* Include platform definitions -- These are in the C89/90 standard */ +#include +#define NPY_MAX_BYTE SCHAR_MAX +#define NPY_MIN_BYTE SCHAR_MIN +#define NPY_MAX_UBYTE UCHAR_MAX +#define NPY_MAX_SHORT SHRT_MAX +#define NPY_MIN_SHORT SHRT_MIN +#define NPY_MAX_USHORT USHRT_MAX +#define NPY_MAX_INT INT_MAX +#ifndef INT_MIN +#define INT_MIN (-INT_MAX - 1) +#endif +#define NPY_MIN_INT INT_MIN +#define NPY_MAX_UINT UINT_MAX +#define NPY_MAX_LONG LONG_MAX +#define NPY_MIN_LONG LONG_MIN +#define NPY_MAX_ULONG ULONG_MAX + +#define NPY_BITSOF_BOOL (sizeof(npy_bool) * CHAR_BIT) +#define NPY_BITSOF_CHAR CHAR_BIT +#define NPY_BITSOF_BYTE (NPY_SIZEOF_BYTE * CHAR_BIT) +#define NPY_BITSOF_SHORT (NPY_SIZEOF_SHORT * CHAR_BIT) +#define NPY_BITSOF_INT (NPY_SIZEOF_INT * CHAR_BIT) +#define NPY_BITSOF_LONG (NPY_SIZEOF_LONG * CHAR_BIT) +#define NPY_BITSOF_LONGLONG (NPY_SIZEOF_LONGLONG * CHAR_BIT) +#define NPY_BITSOF_INTP (NPY_SIZEOF_INTP * CHAR_BIT) +#define NPY_BITSOF_HALF (NPY_SIZEOF_HALF * CHAR_BIT) +#define NPY_BITSOF_FLOAT (NPY_SIZEOF_FLOAT * CHAR_BIT) +#define NPY_BITSOF_DOUBLE (NPY_SIZEOF_DOUBLE * CHAR_BIT) +#define NPY_BITSOF_LONGDOUBLE (NPY_SIZEOF_LONGDOUBLE * CHAR_BIT) +#define NPY_BITSOF_CFLOAT (NPY_SIZEOF_CFLOAT * CHAR_BIT) +#define NPY_BITSOF_CDOUBLE (NPY_SIZEOF_CDOUBLE * CHAR_BIT) +#define NPY_BITSOF_CLONGDOUBLE (NPY_SIZEOF_CLONGDOUBLE * CHAR_BIT) +#define NPY_BITSOF_DATETIME (NPY_SIZEOF_DATETIME * CHAR_BIT) +#define NPY_BITSOF_TIMEDELTA (NPY_SIZEOF_TIMEDELTA * CHAR_BIT) + +#if NPY_BITSOF_LONG == 8 +#define NPY_INT8 NPY_LONG +#define NPY_UINT8 NPY_ULONG + typedef long npy_int8; + typedef unsigned long npy_uint8; +#define PyInt8ScalarObject PyLongScalarObject +#define PyInt8ArrType_Type PyLongArrType_Type +#define PyUInt8ScalarObject PyULongScalarObject +#define PyUInt8ArrType_Type PyULongArrType_Type +#define NPY_INT8_FMT NPY_LONG_FMT +#define NPY_UINT8_FMT NPY_ULONG_FMT +#elif NPY_BITSOF_LONG == 16 +#define NPY_INT16 NPY_LONG +#define NPY_UINT16 NPY_ULONG + typedef long npy_int16; + typedef unsigned long npy_uint16; +#define PyInt16ScalarObject PyLongScalarObject +#define PyInt16ArrType_Type PyLongArrType_Type +#define PyUInt16ScalarObject PyULongScalarObject +#define PyUInt16ArrType_Type PyULongArrType_Type +#define NPY_INT16_FMT NPY_LONG_FMT +#define NPY_UINT16_FMT NPY_ULONG_FMT +#elif NPY_BITSOF_LONG == 32 +#define NPY_INT32 NPY_LONG +#define NPY_UINT32 NPY_ULONG + typedef long npy_int32; + typedef unsigned long npy_uint32; + typedef unsigned long npy_ucs4; +#define PyInt32ScalarObject PyLongScalarObject +#define PyInt32ArrType_Type PyLongArrType_Type +#define PyUInt32ScalarObject PyULongScalarObject +#define PyUInt32ArrType_Type PyULongArrType_Type +#define NPY_INT32_FMT NPY_LONG_FMT +#define NPY_UINT32_FMT NPY_ULONG_FMT +#elif NPY_BITSOF_LONG == 64 +#define NPY_INT64 NPY_LONG +#define NPY_UINT64 NPY_ULONG + typedef long npy_int64; + typedef unsigned long npy_uint64; +#define PyInt64ScalarObject PyLongScalarObject +#define PyInt64ArrType_Type PyLongArrType_Type +#define PyUInt64ScalarObject PyULongScalarObject +#define PyUInt64ArrType_Type PyULongArrType_Type +#define NPY_INT64_FMT NPY_LONG_FMT +#define NPY_UINT64_FMT NPY_ULONG_FMT +#define MyPyLong_FromInt64 PyLong_FromLong +#define MyPyLong_AsInt64 PyLong_AsLong +#endif + +#if NPY_BITSOF_LONGLONG == 8 +# ifndef NPY_INT8 +# define NPY_INT8 NPY_LONGLONG +# define NPY_UINT8 NPY_ULONGLONG + typedef npy_longlong npy_int8; + typedef npy_ulonglong npy_uint8; +# define PyInt8ScalarObject PyLongLongScalarObject +# define PyInt8ArrType_Type PyLongLongArrType_Type +# define PyUInt8ScalarObject PyULongLongScalarObject +# define PyUInt8ArrType_Type PyULongLongArrType_Type +#define NPY_INT8_FMT NPY_LONGLONG_FMT +#define NPY_UINT8_FMT NPY_ULONGLONG_FMT +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT8 +# define NPY_MIN_LONGLONG NPY_MIN_INT8 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT8 +#elif NPY_BITSOF_LONGLONG == 16 +# ifndef NPY_INT16 +# define NPY_INT16 NPY_LONGLONG +# define NPY_UINT16 NPY_ULONGLONG + typedef npy_longlong npy_int16; + typedef npy_ulonglong npy_uint16; +# define PyInt16ScalarObject PyLongLongScalarObject +# define PyInt16ArrType_Type PyLongLongArrType_Type +# define PyUInt16ScalarObject PyULongLongScalarObject +# define PyUInt16ArrType_Type PyULongLongArrType_Type +#define NPY_INT16_FMT NPY_LONGLONG_FMT +#define NPY_UINT16_FMT NPY_ULONGLONG_FMT +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT16 +# define NPY_MIN_LONGLONG NPY_MIN_INT16 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT16 +#elif NPY_BITSOF_LONGLONG == 32 +# ifndef NPY_INT32 +# define NPY_INT32 NPY_LONGLONG +# define NPY_UINT32 NPY_ULONGLONG + typedef npy_longlong npy_int32; + typedef npy_ulonglong npy_uint32; + typedef npy_ulonglong npy_ucs4; +# define PyInt32ScalarObject PyLongLongScalarObject +# define PyInt32ArrType_Type PyLongLongArrType_Type +# define PyUInt32ScalarObject PyULongLongScalarObject +# define PyUInt32ArrType_Type PyULongLongArrType_Type +#define NPY_INT32_FMT NPY_LONGLONG_FMT +#define NPY_UINT32_FMT NPY_ULONGLONG_FMT +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT32 +# define NPY_MIN_LONGLONG NPY_MIN_INT32 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT32 +#elif NPY_BITSOF_LONGLONG == 64 +# ifndef NPY_INT64 +# define NPY_INT64 NPY_LONGLONG +# define NPY_UINT64 NPY_ULONGLONG + typedef npy_longlong npy_int64; + typedef npy_ulonglong npy_uint64; +# define PyInt64ScalarObject PyLongLongScalarObject +# define PyInt64ArrType_Type PyLongLongArrType_Type +# define PyUInt64ScalarObject PyULongLongScalarObject +# define PyUInt64ArrType_Type PyULongLongArrType_Type +#define NPY_INT64_FMT NPY_LONGLONG_FMT +#define NPY_UINT64_FMT NPY_ULONGLONG_FMT +# define MyPyLong_FromInt64 PyLong_FromLongLong +# define MyPyLong_AsInt64 PyLong_AsLongLong +# endif +# define NPY_MAX_LONGLONG NPY_MAX_INT64 +# define NPY_MIN_LONGLONG NPY_MIN_INT64 +# define NPY_MAX_ULONGLONG NPY_MAX_UINT64 +#endif + +#if NPY_BITSOF_INT == 8 +#ifndef NPY_INT8 +#define NPY_INT8 NPY_INT +#define NPY_UINT8 NPY_UINT + typedef int npy_int8; + typedef unsigned int npy_uint8; +# define PyInt8ScalarObject PyIntScalarObject +# define PyInt8ArrType_Type PyIntArrType_Type +# define PyUInt8ScalarObject PyUIntScalarObject +# define PyUInt8ArrType_Type PyUIntArrType_Type +#define NPY_INT8_FMT NPY_INT_FMT +#define NPY_UINT8_FMT NPY_UINT_FMT +#endif +#elif NPY_BITSOF_INT == 16 +#ifndef NPY_INT16 +#define NPY_INT16 NPY_INT +#define NPY_UINT16 NPY_UINT + typedef int npy_int16; + typedef unsigned int npy_uint16; +# define PyInt16ScalarObject PyIntScalarObject +# define PyInt16ArrType_Type PyIntArrType_Type +# define PyUInt16ScalarObject PyIntUScalarObject +# define PyUInt16ArrType_Type PyIntUArrType_Type +#define NPY_INT16_FMT NPY_INT_FMT +#define NPY_UINT16_FMT NPY_UINT_FMT +#endif +#elif NPY_BITSOF_INT == 32 +#ifndef NPY_INT32 +#define NPY_INT32 NPY_INT +#define NPY_UINT32 NPY_UINT + typedef int npy_int32; + typedef unsigned int npy_uint32; + typedef unsigned int npy_ucs4; +# define PyInt32ScalarObject PyIntScalarObject +# define PyInt32ArrType_Type PyIntArrType_Type +# define PyUInt32ScalarObject PyUIntScalarObject +# define PyUInt32ArrType_Type PyUIntArrType_Type +#define NPY_INT32_FMT NPY_INT_FMT +#define NPY_UINT32_FMT NPY_UINT_FMT +#endif +#elif NPY_BITSOF_INT == 64 +#ifndef NPY_INT64 +#define NPY_INT64 NPY_INT +#define NPY_UINT64 NPY_UINT + typedef int npy_int64; + typedef unsigned int npy_uint64; +# define PyInt64ScalarObject PyIntScalarObject +# define PyInt64ArrType_Type PyIntArrType_Type +# define PyUInt64ScalarObject PyUIntScalarObject +# define PyUInt64ArrType_Type PyUIntArrType_Type +#define NPY_INT64_FMT NPY_INT_FMT +#define NPY_UINT64_FMT NPY_UINT_FMT +# define MyPyLong_FromInt64 PyLong_FromLong +# define MyPyLong_AsInt64 PyLong_AsLong +#endif +#endif + +#if NPY_BITSOF_SHORT == 8 +#ifndef NPY_INT8 +#define NPY_INT8 NPY_SHORT +#define NPY_UINT8 NPY_USHORT + typedef short npy_int8; + typedef unsigned short npy_uint8; +# define PyInt8ScalarObject PyShortScalarObject +# define PyInt8ArrType_Type PyShortArrType_Type +# define PyUInt8ScalarObject PyUShortScalarObject +# define PyUInt8ArrType_Type PyUShortArrType_Type +#define NPY_INT8_FMT NPY_SHORT_FMT +#define NPY_UINT8_FMT NPY_USHORT_FMT +#endif +#elif NPY_BITSOF_SHORT == 16 +#ifndef NPY_INT16 +#define NPY_INT16 NPY_SHORT +#define NPY_UINT16 NPY_USHORT + typedef short npy_int16; + typedef unsigned short npy_uint16; +# define PyInt16ScalarObject PyShortScalarObject +# define PyInt16ArrType_Type PyShortArrType_Type +# define PyUInt16ScalarObject PyUShortScalarObject +# define PyUInt16ArrType_Type PyUShortArrType_Type +#define NPY_INT16_FMT NPY_SHORT_FMT +#define NPY_UINT16_FMT NPY_USHORT_FMT +#endif +#elif NPY_BITSOF_SHORT == 32 +#ifndef NPY_INT32 +#define NPY_INT32 NPY_SHORT +#define NPY_UINT32 NPY_USHORT + typedef short npy_int32; + typedef unsigned short npy_uint32; + typedef unsigned short npy_ucs4; +# define PyInt32ScalarObject PyShortScalarObject +# define PyInt32ArrType_Type PyShortArrType_Type +# define PyUInt32ScalarObject PyUShortScalarObject +# define PyUInt32ArrType_Type PyUShortArrType_Type +#define NPY_INT32_FMT NPY_SHORT_FMT +#define NPY_UINT32_FMT NPY_USHORT_FMT +#endif +#elif NPY_BITSOF_SHORT == 64 +#ifndef NPY_INT64 +#define NPY_INT64 NPY_SHORT +#define NPY_UINT64 NPY_USHORT + typedef short npy_int64; + typedef unsigned short npy_uint64; +# define PyInt64ScalarObject PyShortScalarObject +# define PyInt64ArrType_Type PyShortArrType_Type +# define PyUInt64ScalarObject PyUShortScalarObject +# define PyUInt64ArrType_Type PyUShortArrType_Type +#define NPY_INT64_FMT NPY_SHORT_FMT +#define NPY_UINT64_FMT NPY_USHORT_FMT +# define MyPyLong_FromInt64 PyLong_FromLong +# define MyPyLong_AsInt64 PyLong_AsLong +#endif +#endif + + +#if NPY_BITSOF_CHAR == 8 +#ifndef NPY_INT8 +#define NPY_INT8 NPY_BYTE +#define NPY_UINT8 NPY_UBYTE + typedef signed char npy_int8; + typedef unsigned char npy_uint8; +# define PyInt8ScalarObject PyByteScalarObject +# define PyInt8ArrType_Type PyByteArrType_Type +# define PyUInt8ScalarObject PyUByteScalarObject +# define PyUInt8ArrType_Type PyUByteArrType_Type +#define NPY_INT8_FMT NPY_BYTE_FMT +#define NPY_UINT8_FMT NPY_UBYTE_FMT +#endif +#elif NPY_BITSOF_CHAR == 16 +#ifndef NPY_INT16 +#define NPY_INT16 NPY_BYTE +#define NPY_UINT16 NPY_UBYTE + typedef signed char npy_int16; + typedef unsigned char npy_uint16; +# define PyInt16ScalarObject PyByteScalarObject +# define PyInt16ArrType_Type PyByteArrType_Type +# define PyUInt16ScalarObject PyUByteScalarObject +# define PyUInt16ArrType_Type PyUByteArrType_Type +#define NPY_INT16_FMT NPY_BYTE_FMT +#define NPY_UINT16_FMT NPY_UBYTE_FMT +#endif +#elif NPY_BITSOF_CHAR == 32 +#ifndef NPY_INT32 +#define NPY_INT32 NPY_BYTE +#define NPY_UINT32 NPY_UBYTE + typedef signed char npy_int32; + typedef unsigned char npy_uint32; + typedef unsigned char npy_ucs4; +# define PyInt32ScalarObject PyByteScalarObject +# define PyInt32ArrType_Type PyByteArrType_Type +# define PyUInt32ScalarObject PyUByteScalarObject +# define PyUInt32ArrType_Type PyUByteArrType_Type +#define NPY_INT32_FMT NPY_BYTE_FMT +#define NPY_UINT32_FMT NPY_UBYTE_FMT +#endif +#elif NPY_BITSOF_CHAR == 64 +#ifndef NPY_INT64 +#define NPY_INT64 NPY_BYTE +#define NPY_UINT64 NPY_UBYTE + typedef signed char npy_int64; + typedef unsigned char npy_uint64; +# define PyInt64ScalarObject PyByteScalarObject +# define PyInt64ArrType_Type PyByteArrType_Type +# define PyUInt64ScalarObject PyUByteScalarObject +# define PyUInt64ArrType_Type PyUByteArrType_Type +#define NPY_INT64_FMT NPY_BYTE_FMT +#define NPY_UINT64_FMT NPY_UBYTE_FMT +# define MyPyLong_FromInt64 PyLong_FromLong +# define MyPyLong_AsInt64 PyLong_AsLong +#endif +#elif NPY_BITSOF_CHAR == 128 +#endif + + + +#if NPY_BITSOF_DOUBLE == 32 +#ifndef NPY_FLOAT32 +#define NPY_FLOAT32 NPY_DOUBLE +#define NPY_COMPLEX64 NPY_CDOUBLE + typedef double npy_float32; + typedef npy_cdouble npy_complex64; +# define PyFloat32ScalarObject PyDoubleScalarObject +# define PyComplex64ScalarObject PyCDoubleScalarObject +# define PyFloat32ArrType_Type PyDoubleArrType_Type +# define PyComplex64ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT32_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX64_FMT NPY_CDOUBLE_FMT +#endif +#elif NPY_BITSOF_DOUBLE == 64 +#ifndef NPY_FLOAT64 +#define NPY_FLOAT64 NPY_DOUBLE +#define NPY_COMPLEX128 NPY_CDOUBLE + typedef double npy_float64; + typedef npy_cdouble npy_complex128; +# define PyFloat64ScalarObject PyDoubleScalarObject +# define PyComplex128ScalarObject PyCDoubleScalarObject +# define PyFloat64ArrType_Type PyDoubleArrType_Type +# define PyComplex128ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT64_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX128_FMT NPY_CDOUBLE_FMT +#endif +#elif NPY_BITSOF_DOUBLE == 80 +#ifndef NPY_FLOAT80 +#define NPY_FLOAT80 NPY_DOUBLE +#define NPY_COMPLEX160 NPY_CDOUBLE + typedef double npy_float80; + typedef npy_cdouble npy_complex160; +# define PyFloat80ScalarObject PyDoubleScalarObject +# define PyComplex160ScalarObject PyCDoubleScalarObject +# define PyFloat80ArrType_Type PyDoubleArrType_Type +# define PyComplex160ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT80_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX160_FMT NPY_CDOUBLE_FMT +#endif +#elif NPY_BITSOF_DOUBLE == 96 +#ifndef NPY_FLOAT96 +#define NPY_FLOAT96 NPY_DOUBLE +#define NPY_COMPLEX192 NPY_CDOUBLE + typedef double npy_float96; + typedef npy_cdouble npy_complex192; +# define PyFloat96ScalarObject PyDoubleScalarObject +# define PyComplex192ScalarObject PyCDoubleScalarObject +# define PyFloat96ArrType_Type PyDoubleArrType_Type +# define PyComplex192ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT96_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX192_FMT NPY_CDOUBLE_FMT +#endif +#elif NPY_BITSOF_DOUBLE == 128 +#ifndef NPY_FLOAT128 +#define NPY_FLOAT128 NPY_DOUBLE +#define NPY_COMPLEX256 NPY_CDOUBLE + typedef double npy_float128; + typedef npy_cdouble npy_complex256; +# define PyFloat128ScalarObject PyDoubleScalarObject +# define PyComplex256ScalarObject PyCDoubleScalarObject +# define PyFloat128ArrType_Type PyDoubleArrType_Type +# define PyComplex256ArrType_Type PyCDoubleArrType_Type +#define NPY_FLOAT128_FMT NPY_DOUBLE_FMT +#define NPY_COMPLEX256_FMT NPY_CDOUBLE_FMT +#endif +#endif + + + +#if NPY_BITSOF_FLOAT == 32 +#ifndef NPY_FLOAT32 +#define NPY_FLOAT32 NPY_FLOAT +#define NPY_COMPLEX64 NPY_CFLOAT + typedef float npy_float32; + typedef npy_cfloat npy_complex64; +# define PyFloat32ScalarObject PyFloatScalarObject +# define PyComplex64ScalarObject PyCFloatScalarObject +# define PyFloat32ArrType_Type PyFloatArrType_Type +# define PyComplex64ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT32_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX64_FMT NPY_CFLOAT_FMT +#endif +#elif NPY_BITSOF_FLOAT == 64 +#ifndef NPY_FLOAT64 +#define NPY_FLOAT64 NPY_FLOAT +#define NPY_COMPLEX128 NPY_CFLOAT + typedef float npy_float64; + typedef npy_cfloat npy_complex128; +# define PyFloat64ScalarObject PyFloatScalarObject +# define PyComplex128ScalarObject PyCFloatScalarObject +# define PyFloat64ArrType_Type PyFloatArrType_Type +# define PyComplex128ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT64_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX128_FMT NPY_CFLOAT_FMT +#endif +#elif NPY_BITSOF_FLOAT == 80 +#ifndef NPY_FLOAT80 +#define NPY_FLOAT80 NPY_FLOAT +#define NPY_COMPLEX160 NPY_CFLOAT + typedef float npy_float80; + typedef npy_cfloat npy_complex160; +# define PyFloat80ScalarObject PyFloatScalarObject +# define PyComplex160ScalarObject PyCFloatScalarObject +# define PyFloat80ArrType_Type PyFloatArrType_Type +# define PyComplex160ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT80_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX160_FMT NPY_CFLOAT_FMT +#endif +#elif NPY_BITSOF_FLOAT == 96 +#ifndef NPY_FLOAT96 +#define NPY_FLOAT96 NPY_FLOAT +#define NPY_COMPLEX192 NPY_CFLOAT + typedef float npy_float96; + typedef npy_cfloat npy_complex192; +# define PyFloat96ScalarObject PyFloatScalarObject +# define PyComplex192ScalarObject PyCFloatScalarObject +# define PyFloat96ArrType_Type PyFloatArrType_Type +# define PyComplex192ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT96_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX192_FMT NPY_CFLOAT_FMT +#endif +#elif NPY_BITSOF_FLOAT == 128 +#ifndef NPY_FLOAT128 +#define NPY_FLOAT128 NPY_FLOAT +#define NPY_COMPLEX256 NPY_CFLOAT + typedef float npy_float128; + typedef npy_cfloat npy_complex256; +# define PyFloat128ScalarObject PyFloatScalarObject +# define PyComplex256ScalarObject PyCFloatScalarObject +# define PyFloat128ArrType_Type PyFloatArrType_Type +# define PyComplex256ArrType_Type PyCFloatArrType_Type +#define NPY_FLOAT128_FMT NPY_FLOAT_FMT +#define NPY_COMPLEX256_FMT NPY_CFLOAT_FMT +#endif +#endif + +/* half/float16 isn't a floating-point type in C */ +#define NPY_FLOAT16 NPY_HALF +typedef npy_uint16 npy_half; +typedef npy_half npy_float16; + +#if NPY_BITSOF_LONGDOUBLE == 32 +#ifndef NPY_FLOAT32 +#define NPY_FLOAT32 NPY_LONGDOUBLE +#define NPY_COMPLEX64 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float32; + typedef npy_clongdouble npy_complex64; +# define PyFloat32ScalarObject PyLongDoubleScalarObject +# define PyComplex64ScalarObject PyCLongDoubleScalarObject +# define PyFloat32ArrType_Type PyLongDoubleArrType_Type +# define PyComplex64ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT32_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX64_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 64 +#ifndef NPY_FLOAT64 +#define NPY_FLOAT64 NPY_LONGDOUBLE +#define NPY_COMPLEX128 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float64; + typedef npy_clongdouble npy_complex128; +# define PyFloat64ScalarObject PyLongDoubleScalarObject +# define PyComplex128ScalarObject PyCLongDoubleScalarObject +# define PyFloat64ArrType_Type PyLongDoubleArrType_Type +# define PyComplex128ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT64_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX128_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 80 +#ifndef NPY_FLOAT80 +#define NPY_FLOAT80 NPY_LONGDOUBLE +#define NPY_COMPLEX160 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float80; + typedef npy_clongdouble npy_complex160; +# define PyFloat80ScalarObject PyLongDoubleScalarObject +# define PyComplex160ScalarObject PyCLongDoubleScalarObject +# define PyFloat80ArrType_Type PyLongDoubleArrType_Type +# define PyComplex160ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT80_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX160_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 96 +#ifndef NPY_FLOAT96 +#define NPY_FLOAT96 NPY_LONGDOUBLE +#define NPY_COMPLEX192 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float96; + typedef npy_clongdouble npy_complex192; +# define PyFloat96ScalarObject PyLongDoubleScalarObject +# define PyComplex192ScalarObject PyCLongDoubleScalarObject +# define PyFloat96ArrType_Type PyLongDoubleArrType_Type +# define PyComplex192ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT96_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX192_FMT NPY_CLONGDOUBLE_FMT +#endif +#elif NPY_BITSOF_LONGDOUBLE == 128 +#ifndef NPY_FLOAT128 +#define NPY_FLOAT128 NPY_LONGDOUBLE +#define NPY_COMPLEX256 NPY_CLONGDOUBLE + typedef npy_longdouble npy_float128; + typedef npy_clongdouble npy_complex256; +# define PyFloat128ScalarObject PyLongDoubleScalarObject +# define PyComplex256ScalarObject PyCLongDoubleScalarObject +# define PyFloat128ArrType_Type PyLongDoubleArrType_Type +# define PyComplex256ArrType_Type PyCLongDoubleArrType_Type +#define NPY_FLOAT128_FMT NPY_LONGDOUBLE_FMT +#define NPY_COMPLEX256_FMT NPY_CLONGDOUBLE_FMT +#endif +#endif + +/* datetime typedefs */ +typedef npy_int64 npy_timedelta; +typedef npy_int64 npy_datetime; +#define NPY_DATETIME_FMT NPY_INT64_FMT +#define NPY_TIMEDELTA_FMT NPY_INT64_FMT + +/* End of typedefs for numarray style bit-width names */ + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_ */ diff --git a/python/numpy/_core/include/numpy/npy_cpu.h b/python/numpy/_core/include/numpy/npy_cpu.h new file mode 100644 index 000000000..91cf2d825 --- /dev/null +++ b/python/numpy/_core/include/numpy/npy_cpu.h @@ -0,0 +1,124 @@ +/* + * This set (target) cpu specific macros: + * - Possible values: + * NPY_CPU_X86 + * NPY_CPU_AMD64 + * NPY_CPU_PPC + * NPY_CPU_PPC64 + * NPY_CPU_PPC64LE + * NPY_CPU_SPARC + * NPY_CPU_S390 + * NPY_CPU_IA64 + * NPY_CPU_HPPA + * NPY_CPU_ALPHA + * NPY_CPU_ARMEL + * NPY_CPU_ARMEB + * NPY_CPU_SH_LE + * NPY_CPU_SH_BE + * NPY_CPU_ARCEL + * NPY_CPU_ARCEB + * NPY_CPU_RISCV64 + * NPY_CPU_RISCV32 + * NPY_CPU_LOONGARCH + * NPY_CPU_WASM + */ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ + +#include "numpyconfig.h" + +#if defined( __i386__ ) || defined(i386) || defined(_M_IX86) + /* + * __i386__ is defined by gcc and Intel compiler on Linux, + * _M_IX86 by VS compiler, + * i386 by Sun compilers on opensolaris at least + */ + #define NPY_CPU_X86 +#elif defined(__x86_64__) || defined(__amd64__) || defined(__x86_64) || defined(_M_AMD64) + /* + * both __x86_64__ and __amd64__ are defined by gcc + * __x86_64 defined by sun compiler on opensolaris at least + * _M_AMD64 defined by MS compiler + */ + #define NPY_CPU_AMD64 +#elif defined(__powerpc64__) && defined(__LITTLE_ENDIAN__) + #define NPY_CPU_PPC64LE +#elif defined(__powerpc64__) && defined(__BIG_ENDIAN__) + #define NPY_CPU_PPC64 +#elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC) + /* + * __ppc__ is defined by gcc, I remember having seen __powerpc__ once, + * but can't find it ATM + * _ARCH_PPC is used by at least gcc on AIX + * As __powerpc__ and _ARCH_PPC are also defined by PPC64 check + * for those specifically first before defaulting to ppc + */ + #define NPY_CPU_PPC +#elif defined(__sparc__) || defined(__sparc) + /* __sparc__ is defined by gcc and Forte (e.g. Sun) compilers */ + #define NPY_CPU_SPARC +#elif defined(__s390__) + #define NPY_CPU_S390 +#elif defined(__ia64) + #define NPY_CPU_IA64 +#elif defined(__hppa) + #define NPY_CPU_HPPA +#elif defined(__alpha__) + #define NPY_CPU_ALPHA +#elif defined(__arm__) || defined(__aarch64__) || defined(_M_ARM64) + /* _M_ARM64 is defined in MSVC for ARM64 compilation on Windows */ + #if defined(__ARMEB__) || defined(__AARCH64EB__) + #if defined(__ARM_32BIT_STATE) + #define NPY_CPU_ARMEB_AARCH32 + #elif defined(__ARM_64BIT_STATE) + #define NPY_CPU_ARMEB_AARCH64 + #else + #define NPY_CPU_ARMEB + #endif + #elif defined(__ARMEL__) || defined(__AARCH64EL__) || defined(_M_ARM64) + #if defined(__ARM_32BIT_STATE) + #define NPY_CPU_ARMEL_AARCH32 + #elif defined(__ARM_64BIT_STATE) || defined(_M_ARM64) || defined(__AARCH64EL__) + #define NPY_CPU_ARMEL_AARCH64 + #else + #define NPY_CPU_ARMEL + #endif + #else + # error Unknown ARM CPU, please report this to numpy maintainers with \ + information about your platform (OS, CPU and compiler) + #endif +#elif defined(__sh__) && defined(__LITTLE_ENDIAN__) + #define NPY_CPU_SH_LE +#elif defined(__sh__) && defined(__BIG_ENDIAN__) + #define NPY_CPU_SH_BE +#elif defined(__MIPSEL__) + #define NPY_CPU_MIPSEL +#elif defined(__MIPSEB__) + #define NPY_CPU_MIPSEB +#elif defined(__or1k__) + #define NPY_CPU_OR1K +#elif defined(__mc68000__) + #define NPY_CPU_M68K +#elif defined(__arc__) && defined(__LITTLE_ENDIAN__) + #define NPY_CPU_ARCEL +#elif defined(__arc__) && defined(__BIG_ENDIAN__) + #define NPY_CPU_ARCEB +#elif defined(__riscv) + #if __riscv_xlen == 64 + #define NPY_CPU_RISCV64 + #elif __riscv_xlen == 32 + #define NPY_CPU_RISCV32 + #endif +#elif defined(__loongarch_lp64) + #define NPY_CPU_LOONGARCH64 +#elif defined(__EMSCRIPTEN__) + /* __EMSCRIPTEN__ is defined by emscripten: an LLVM-to-Web compiler */ + #define NPY_CPU_WASM +#else + #error Unknown CPU, please report this to numpy maintainers with \ + information about your platform (OS, CPU and compiler) +#endif + +#define NPY_ALIGNMENT_REQUIRED 1 + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ */ diff --git a/python/numpy/_core/include/numpy/npy_endian.h b/python/numpy/_core/include/numpy/npy_endian.h new file mode 100644 index 000000000..09262120b --- /dev/null +++ b/python/numpy/_core/include/numpy/npy_endian.h @@ -0,0 +1,78 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_ + +/* + * NPY_BYTE_ORDER is set to the same value as BYTE_ORDER set by glibc in + * endian.h + */ + +#if defined(NPY_HAVE_ENDIAN_H) || defined(NPY_HAVE_SYS_ENDIAN_H) + /* Use endian.h if available */ + + #if defined(NPY_HAVE_ENDIAN_H) + #include + #elif defined(NPY_HAVE_SYS_ENDIAN_H) + #include + #endif + + #if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && defined(LITTLE_ENDIAN) + #define NPY_BYTE_ORDER BYTE_ORDER + #define NPY_LITTLE_ENDIAN LITTLE_ENDIAN + #define NPY_BIG_ENDIAN BIG_ENDIAN + #elif defined(_BYTE_ORDER) && defined(_BIG_ENDIAN) && defined(_LITTLE_ENDIAN) + #define NPY_BYTE_ORDER _BYTE_ORDER + #define NPY_LITTLE_ENDIAN _LITTLE_ENDIAN + #define NPY_BIG_ENDIAN _BIG_ENDIAN + #elif defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN) + #define NPY_BYTE_ORDER __BYTE_ORDER + #define NPY_LITTLE_ENDIAN __LITTLE_ENDIAN + #define NPY_BIG_ENDIAN __BIG_ENDIAN + #endif +#endif + +#ifndef NPY_BYTE_ORDER + /* Set endianness info using target CPU */ + #include "npy_cpu.h" + + #define NPY_LITTLE_ENDIAN 1234 + #define NPY_BIG_ENDIAN 4321 + + #if defined(NPY_CPU_X86) \ + || defined(NPY_CPU_AMD64) \ + || defined(NPY_CPU_IA64) \ + || defined(NPY_CPU_ALPHA) \ + || defined(NPY_CPU_ARMEL) \ + || defined(NPY_CPU_ARMEL_AARCH32) \ + || defined(NPY_CPU_ARMEL_AARCH64) \ + || defined(NPY_CPU_SH_LE) \ + || defined(NPY_CPU_MIPSEL) \ + || defined(NPY_CPU_PPC64LE) \ + || defined(NPY_CPU_ARCEL) \ + || defined(NPY_CPU_RISCV64) \ + || defined(NPY_CPU_RISCV32) \ + || defined(NPY_CPU_LOONGARCH) \ + || defined(NPY_CPU_WASM) + #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN + + #elif defined(NPY_CPU_PPC) \ + || defined(NPY_CPU_SPARC) \ + || defined(NPY_CPU_S390) \ + || defined(NPY_CPU_HPPA) \ + || defined(NPY_CPU_PPC64) \ + || defined(NPY_CPU_ARMEB) \ + || defined(NPY_CPU_ARMEB_AARCH32) \ + || defined(NPY_CPU_ARMEB_AARCH64) \ + || defined(NPY_CPU_SH_BE) \ + || defined(NPY_CPU_MIPSEB) \ + || defined(NPY_CPU_OR1K) \ + || defined(NPY_CPU_M68K) \ + || defined(NPY_CPU_ARCEB) + #define NPY_BYTE_ORDER NPY_BIG_ENDIAN + + #else + #error Unknown CPU: can not set endianness + #endif + +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_ */ diff --git a/python/numpy/_core/include/numpy/npy_math.h b/python/numpy/_core/include/numpy/npy_math.h new file mode 100644 index 000000000..abc784bc6 --- /dev/null +++ b/python/numpy/_core/include/numpy/npy_math.h @@ -0,0 +1,602 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_ + +#include + +#include + +/* By adding static inline specifiers to npy_math function definitions when + appropriate, compiler is given the opportunity to optimize */ +#if NPY_INLINE_MATH +#define NPY_INPLACE static inline +#else +#define NPY_INPLACE +#endif + + +#ifdef __cplusplus +extern "C" { +#endif + +#define PyArray_MAX(a,b) (((a)>(b))?(a):(b)) +#define PyArray_MIN(a,b) (((a)<(b))?(a):(b)) + +/* + * NAN and INFINITY like macros (same behavior as glibc for NAN, same as C99 + * for INFINITY) + * + * XXX: I should test whether INFINITY and NAN are available on the platform + */ +static inline float __npy_inff(void) +{ + const union { npy_uint32 __i; float __f;} __bint = {0x7f800000UL}; + return __bint.__f; +} + +static inline float __npy_nanf(void) +{ + const union { npy_uint32 __i; float __f;} __bint = {0x7fc00000UL}; + return __bint.__f; +} + +static inline float __npy_pzerof(void) +{ + const union { npy_uint32 __i; float __f;} __bint = {0x00000000UL}; + return __bint.__f; +} + +static inline float __npy_nzerof(void) +{ + const union { npy_uint32 __i; float __f;} __bint = {0x80000000UL}; + return __bint.__f; +} + +#define NPY_INFINITYF __npy_inff() +#define NPY_NANF __npy_nanf() +#define NPY_PZEROF __npy_pzerof() +#define NPY_NZEROF __npy_nzerof() + +#define NPY_INFINITY ((npy_double)NPY_INFINITYF) +#define NPY_NAN ((npy_double)NPY_NANF) +#define NPY_PZERO ((npy_double)NPY_PZEROF) +#define NPY_NZERO ((npy_double)NPY_NZEROF) + +#define NPY_INFINITYL ((npy_longdouble)NPY_INFINITYF) +#define NPY_NANL ((npy_longdouble)NPY_NANF) +#define NPY_PZEROL ((npy_longdouble)NPY_PZEROF) +#define NPY_NZEROL ((npy_longdouble)NPY_NZEROF) + +/* + * Useful constants + */ +#define NPY_E 2.718281828459045235360287471352662498 /* e */ +#define NPY_LOG2E 1.442695040888963407359924681001892137 /* log_2 e */ +#define NPY_LOG10E 0.434294481903251827651128918916605082 /* log_10 e */ +#define NPY_LOGE2 0.693147180559945309417232121458176568 /* log_e 2 */ +#define NPY_LOGE10 2.302585092994045684017991454684364208 /* log_e 10 */ +#define NPY_PI 3.141592653589793238462643383279502884 /* pi */ +#define NPY_PI_2 1.570796326794896619231321691639751442 /* pi/2 */ +#define NPY_PI_4 0.785398163397448309615660845819875721 /* pi/4 */ +#define NPY_1_PI 0.318309886183790671537767526745028724 /* 1/pi */ +#define NPY_2_PI 0.636619772367581343075535053490057448 /* 2/pi */ +#define NPY_EULER 0.577215664901532860606512090082402431 /* Euler constant */ +#define NPY_SQRT2 1.414213562373095048801688724209698079 /* sqrt(2) */ +#define NPY_SQRT1_2 0.707106781186547524400844362104849039 /* 1/sqrt(2) */ + +#define NPY_Ef 2.718281828459045235360287471352662498F /* e */ +#define NPY_LOG2Ef 1.442695040888963407359924681001892137F /* log_2 e */ +#define NPY_LOG10Ef 0.434294481903251827651128918916605082F /* log_10 e */ +#define NPY_LOGE2f 0.693147180559945309417232121458176568F /* log_e 2 */ +#define NPY_LOGE10f 2.302585092994045684017991454684364208F /* log_e 10 */ +#define NPY_PIf 3.141592653589793238462643383279502884F /* pi */ +#define NPY_PI_2f 1.570796326794896619231321691639751442F /* pi/2 */ +#define NPY_PI_4f 0.785398163397448309615660845819875721F /* pi/4 */ +#define NPY_1_PIf 0.318309886183790671537767526745028724F /* 1/pi */ +#define NPY_2_PIf 0.636619772367581343075535053490057448F /* 2/pi */ +#define NPY_EULERf 0.577215664901532860606512090082402431F /* Euler constant */ +#define NPY_SQRT2f 1.414213562373095048801688724209698079F /* sqrt(2) */ +#define NPY_SQRT1_2f 0.707106781186547524400844362104849039F /* 1/sqrt(2) */ + +#define NPY_El 2.718281828459045235360287471352662498L /* e */ +#define NPY_LOG2El 1.442695040888963407359924681001892137L /* log_2 e */ +#define NPY_LOG10El 0.434294481903251827651128918916605082L /* log_10 e */ +#define NPY_LOGE2l 0.693147180559945309417232121458176568L /* log_e 2 */ +#define NPY_LOGE10l 2.302585092994045684017991454684364208L /* log_e 10 */ +#define NPY_PIl 3.141592653589793238462643383279502884L /* pi */ +#define NPY_PI_2l 1.570796326794896619231321691639751442L /* pi/2 */ +#define NPY_PI_4l 0.785398163397448309615660845819875721L /* pi/4 */ +#define NPY_1_PIl 0.318309886183790671537767526745028724L /* 1/pi */ +#define NPY_2_PIl 0.636619772367581343075535053490057448L /* 2/pi */ +#define NPY_EULERl 0.577215664901532860606512090082402431L /* Euler constant */ +#define NPY_SQRT2l 1.414213562373095048801688724209698079L /* sqrt(2) */ +#define NPY_SQRT1_2l 0.707106781186547524400844362104849039L /* 1/sqrt(2) */ + +/* + * Integer functions. + */ +NPY_INPLACE npy_uint npy_gcdu(npy_uint a, npy_uint b); +NPY_INPLACE npy_uint npy_lcmu(npy_uint a, npy_uint b); +NPY_INPLACE npy_ulong npy_gcdul(npy_ulong a, npy_ulong b); +NPY_INPLACE npy_ulong npy_lcmul(npy_ulong a, npy_ulong b); +NPY_INPLACE npy_ulonglong npy_gcdull(npy_ulonglong a, npy_ulonglong b); +NPY_INPLACE npy_ulonglong npy_lcmull(npy_ulonglong a, npy_ulonglong b); + +NPY_INPLACE npy_int npy_gcd(npy_int a, npy_int b); +NPY_INPLACE npy_int npy_lcm(npy_int a, npy_int b); +NPY_INPLACE npy_long npy_gcdl(npy_long a, npy_long b); +NPY_INPLACE npy_long npy_lcml(npy_long a, npy_long b); +NPY_INPLACE npy_longlong npy_gcdll(npy_longlong a, npy_longlong b); +NPY_INPLACE npy_longlong npy_lcmll(npy_longlong a, npy_longlong b); + +NPY_INPLACE npy_ubyte npy_rshiftuhh(npy_ubyte a, npy_ubyte b); +NPY_INPLACE npy_ubyte npy_lshiftuhh(npy_ubyte a, npy_ubyte b); +NPY_INPLACE npy_ushort npy_rshiftuh(npy_ushort a, npy_ushort b); +NPY_INPLACE npy_ushort npy_lshiftuh(npy_ushort a, npy_ushort b); +NPY_INPLACE npy_uint npy_rshiftu(npy_uint a, npy_uint b); +NPY_INPLACE npy_uint npy_lshiftu(npy_uint a, npy_uint b); +NPY_INPLACE npy_ulong npy_rshiftul(npy_ulong a, npy_ulong b); +NPY_INPLACE npy_ulong npy_lshiftul(npy_ulong a, npy_ulong b); +NPY_INPLACE npy_ulonglong npy_rshiftull(npy_ulonglong a, npy_ulonglong b); +NPY_INPLACE npy_ulonglong npy_lshiftull(npy_ulonglong a, npy_ulonglong b); + +NPY_INPLACE npy_byte npy_rshifthh(npy_byte a, npy_byte b); +NPY_INPLACE npy_byte npy_lshifthh(npy_byte a, npy_byte b); +NPY_INPLACE npy_short npy_rshifth(npy_short a, npy_short b); +NPY_INPLACE npy_short npy_lshifth(npy_short a, npy_short b); +NPY_INPLACE npy_int npy_rshift(npy_int a, npy_int b); +NPY_INPLACE npy_int npy_lshift(npy_int a, npy_int b); +NPY_INPLACE npy_long npy_rshiftl(npy_long a, npy_long b); +NPY_INPLACE npy_long npy_lshiftl(npy_long a, npy_long b); +NPY_INPLACE npy_longlong npy_rshiftll(npy_longlong a, npy_longlong b); +NPY_INPLACE npy_longlong npy_lshiftll(npy_longlong a, npy_longlong b); + +NPY_INPLACE uint8_t npy_popcountuhh(npy_ubyte a); +NPY_INPLACE uint8_t npy_popcountuh(npy_ushort a); +NPY_INPLACE uint8_t npy_popcountu(npy_uint a); +NPY_INPLACE uint8_t npy_popcountul(npy_ulong a); +NPY_INPLACE uint8_t npy_popcountull(npy_ulonglong a); +NPY_INPLACE uint8_t npy_popcounthh(npy_byte a); +NPY_INPLACE uint8_t npy_popcounth(npy_short a); +NPY_INPLACE uint8_t npy_popcount(npy_int a); +NPY_INPLACE uint8_t npy_popcountl(npy_long a); +NPY_INPLACE uint8_t npy_popcountll(npy_longlong a); + +/* + * C99 double math funcs that need fixups or are blocklist-able + */ +NPY_INPLACE double npy_sin(double x); +NPY_INPLACE double npy_cos(double x); +NPY_INPLACE double npy_tan(double x); +NPY_INPLACE double npy_hypot(double x, double y); +NPY_INPLACE double npy_log2(double x); +NPY_INPLACE double npy_atan2(double x, double y); + +/* Mandatory C99 double math funcs, no blocklisting or fixups */ +/* defined for legacy reasons, should be deprecated at some point */ +#define npy_sinh sinh +#define npy_cosh cosh +#define npy_tanh tanh +#define npy_asin asin +#define npy_acos acos +#define npy_atan atan +#define npy_log log +#define npy_log10 log10 +#define npy_cbrt cbrt +#define npy_fabs fabs +#define npy_ceil ceil +#define npy_fmod fmod +#define npy_floor floor +#define npy_expm1 expm1 +#define npy_log1p log1p +#define npy_acosh acosh +#define npy_asinh asinh +#define npy_atanh atanh +#define npy_rint rint +#define npy_trunc trunc +#define npy_exp2 exp2 +#define npy_frexp frexp +#define npy_ldexp ldexp +#define npy_copysign copysign +#define npy_exp exp +#define npy_sqrt sqrt +#define npy_pow pow +#define npy_modf modf +#define npy_nextafter nextafter + +double npy_spacing(double x); + +/* + * IEEE 754 fpu handling + */ + +/* use builtins to avoid function calls in tight loops + * only available if npy_config.h is available (= numpys own build) */ +#ifdef HAVE___BUILTIN_ISNAN + #define npy_isnan(x) __builtin_isnan(x) +#else + #define npy_isnan(x) isnan(x) +#endif + + +/* only available if npy_config.h is available (= numpys own build) */ +#ifdef HAVE___BUILTIN_ISFINITE + #define npy_isfinite(x) __builtin_isfinite(x) +#else + #define npy_isfinite(x) isfinite((x)) +#endif + +/* only available if npy_config.h is available (= numpys own build) */ +#ifdef HAVE___BUILTIN_ISINF + #define npy_isinf(x) __builtin_isinf(x) +#else + #define npy_isinf(x) isinf((x)) +#endif + +#define npy_signbit(x) signbit((x)) + +/* + * float C99 math funcs that need fixups or are blocklist-able + */ +NPY_INPLACE float npy_sinf(float x); +NPY_INPLACE float npy_cosf(float x); +NPY_INPLACE float npy_tanf(float x); +NPY_INPLACE float npy_expf(float x); +NPY_INPLACE float npy_sqrtf(float x); +NPY_INPLACE float npy_hypotf(float x, float y); +NPY_INPLACE float npy_log2f(float x); +NPY_INPLACE float npy_atan2f(float x, float y); +NPY_INPLACE float npy_powf(float x, float y); +NPY_INPLACE float npy_modff(float x, float* y); + +/* Mandatory C99 float math funcs, no blocklisting or fixups */ +/* defined for legacy reasons, should be deprecated at some point */ + +#define npy_sinhf sinhf +#define npy_coshf coshf +#define npy_tanhf tanhf +#define npy_asinf asinf +#define npy_acosf acosf +#define npy_atanf atanf +#define npy_logf logf +#define npy_log10f log10f +#define npy_cbrtf cbrtf +#define npy_fabsf fabsf +#define npy_ceilf ceilf +#define npy_fmodf fmodf +#define npy_floorf floorf +#define npy_expm1f expm1f +#define npy_log1pf log1pf +#define npy_asinhf asinhf +#define npy_acoshf acoshf +#define npy_atanhf atanhf +#define npy_rintf rintf +#define npy_truncf truncf +#define npy_exp2f exp2f +#define npy_frexpf frexpf +#define npy_ldexpf ldexpf +#define npy_copysignf copysignf +#define npy_nextafterf nextafterf + +float npy_spacingf(float x); + +/* + * long double C99 double math funcs that need fixups or are blocklist-able + */ +NPY_INPLACE npy_longdouble npy_sinl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_cosl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_tanl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_expl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_sqrtl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_hypotl(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_log2l(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_atan2l(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_powl(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y); + +/* Mandatory C99 double math funcs, no blocklisting or fixups */ +/* defined for legacy reasons, should be deprecated at some point */ +#define npy_sinhl sinhl +#define npy_coshl coshl +#define npy_tanhl tanhl +#define npy_fabsl fabsl +#define npy_floorl floorl +#define npy_ceill ceill +#define npy_rintl rintl +#define npy_truncl truncl +#define npy_cbrtl cbrtl +#define npy_log10l log10l +#define npy_logl logl +#define npy_expm1l expm1l +#define npy_asinl asinl +#define npy_acosl acosl +#define npy_atanl atanl +#define npy_asinhl asinhl +#define npy_acoshl acoshl +#define npy_atanhl atanhl +#define npy_log1pl log1pl +#define npy_exp2l exp2l +#define npy_fmodl fmodl +#define npy_frexpl frexpl +#define npy_ldexpl ldexpl +#define npy_copysignl copysignl +#define npy_nextafterl nextafterl + +npy_longdouble npy_spacingl(npy_longdouble x); + +/* + * Non standard functions + */ +NPY_INPLACE double npy_deg2rad(double x); +NPY_INPLACE double npy_rad2deg(double x); +NPY_INPLACE double npy_logaddexp(double x, double y); +NPY_INPLACE double npy_logaddexp2(double x, double y); +NPY_INPLACE double npy_divmod(double x, double y, double *modulus); +NPY_INPLACE double npy_heaviside(double x, double h0); + +NPY_INPLACE float npy_deg2radf(float x); +NPY_INPLACE float npy_rad2degf(float x); +NPY_INPLACE float npy_logaddexpf(float x, float y); +NPY_INPLACE float npy_logaddexp2f(float x, float y); +NPY_INPLACE float npy_divmodf(float x, float y, float *modulus); +NPY_INPLACE float npy_heavisidef(float x, float h0); + +NPY_INPLACE npy_longdouble npy_deg2radl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_rad2degl(npy_longdouble x); +NPY_INPLACE npy_longdouble npy_logaddexpl(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_logaddexp2l(npy_longdouble x, npy_longdouble y); +NPY_INPLACE npy_longdouble npy_divmodl(npy_longdouble x, npy_longdouble y, + npy_longdouble *modulus); +NPY_INPLACE npy_longdouble npy_heavisidel(npy_longdouble x, npy_longdouble h0); + +#define npy_degrees npy_rad2deg +#define npy_degreesf npy_rad2degf +#define npy_degreesl npy_rad2degl + +#define npy_radians npy_deg2rad +#define npy_radiansf npy_deg2radf +#define npy_radiansl npy_deg2radl + +/* + * Complex declarations + */ + +static inline double npy_creal(const npy_cdouble z) +{ +#if defined(__cplusplus) + return z._Val[0]; +#else + return creal(z); +#endif +} + +static inline void npy_csetreal(npy_cdouble *z, const double r) +{ + ((double *) z)[0] = r; +} + +static inline double npy_cimag(const npy_cdouble z) +{ +#if defined(__cplusplus) + return z._Val[1]; +#else + return cimag(z); +#endif +} + +static inline void npy_csetimag(npy_cdouble *z, const double i) +{ + ((double *) z)[1] = i; +} + +static inline float npy_crealf(const npy_cfloat z) +{ +#if defined(__cplusplus) + return z._Val[0]; +#else + return crealf(z); +#endif +} + +static inline void npy_csetrealf(npy_cfloat *z, const float r) +{ + ((float *) z)[0] = r; +} + +static inline float npy_cimagf(const npy_cfloat z) +{ +#if defined(__cplusplus) + return z._Val[1]; +#else + return cimagf(z); +#endif +} + +static inline void npy_csetimagf(npy_cfloat *z, const float i) +{ + ((float *) z)[1] = i; +} + +static inline npy_longdouble npy_creall(const npy_clongdouble z) +{ +#if defined(__cplusplus) + return (npy_longdouble)z._Val[0]; +#else + return creall(z); +#endif +} + +static inline void npy_csetreall(npy_clongdouble *z, const longdouble_t r) +{ + ((longdouble_t *) z)[0] = r; +} + +static inline npy_longdouble npy_cimagl(const npy_clongdouble z) +{ +#if defined(__cplusplus) + return (npy_longdouble)z._Val[1]; +#else + return cimagl(z); +#endif +} + +static inline void npy_csetimagl(npy_clongdouble *z, const longdouble_t i) +{ + ((longdouble_t *) z)[1] = i; +} + +#define NPY_CSETREAL(z, r) npy_csetreal(z, r) +#define NPY_CSETIMAG(z, i) npy_csetimag(z, i) +#define NPY_CSETREALF(z, r) npy_csetrealf(z, r) +#define NPY_CSETIMAGF(z, i) npy_csetimagf(z, i) +#define NPY_CSETREALL(z, r) npy_csetreall(z, r) +#define NPY_CSETIMAGL(z, i) npy_csetimagl(z, i) + +static inline npy_cdouble npy_cpack(double x, double y) +{ + npy_cdouble z; + npy_csetreal(&z, x); + npy_csetimag(&z, y); + return z; +} + +static inline npy_cfloat npy_cpackf(float x, float y) +{ + npy_cfloat z; + npy_csetrealf(&z, x); + npy_csetimagf(&z, y); + return z; +} + +static inline npy_clongdouble npy_cpackl(npy_longdouble x, npy_longdouble y) +{ + npy_clongdouble z; + npy_csetreall(&z, x); + npy_csetimagl(&z, y); + return z; +} + +/* + * Double precision complex functions + */ +double npy_cabs(npy_cdouble z); +double npy_carg(npy_cdouble z); + +npy_cdouble npy_cexp(npy_cdouble z); +npy_cdouble npy_clog(npy_cdouble z); +npy_cdouble npy_cpow(npy_cdouble x, npy_cdouble y); + +npy_cdouble npy_csqrt(npy_cdouble z); + +npy_cdouble npy_ccos(npy_cdouble z); +npy_cdouble npy_csin(npy_cdouble z); +npy_cdouble npy_ctan(npy_cdouble z); + +npy_cdouble npy_ccosh(npy_cdouble z); +npy_cdouble npy_csinh(npy_cdouble z); +npy_cdouble npy_ctanh(npy_cdouble z); + +npy_cdouble npy_cacos(npy_cdouble z); +npy_cdouble npy_casin(npy_cdouble z); +npy_cdouble npy_catan(npy_cdouble z); + +npy_cdouble npy_cacosh(npy_cdouble z); +npy_cdouble npy_casinh(npy_cdouble z); +npy_cdouble npy_catanh(npy_cdouble z); + +/* + * Single precision complex functions + */ +float npy_cabsf(npy_cfloat z); +float npy_cargf(npy_cfloat z); + +npy_cfloat npy_cexpf(npy_cfloat z); +npy_cfloat npy_clogf(npy_cfloat z); +npy_cfloat npy_cpowf(npy_cfloat x, npy_cfloat y); + +npy_cfloat npy_csqrtf(npy_cfloat z); + +npy_cfloat npy_ccosf(npy_cfloat z); +npy_cfloat npy_csinf(npy_cfloat z); +npy_cfloat npy_ctanf(npy_cfloat z); + +npy_cfloat npy_ccoshf(npy_cfloat z); +npy_cfloat npy_csinhf(npy_cfloat z); +npy_cfloat npy_ctanhf(npy_cfloat z); + +npy_cfloat npy_cacosf(npy_cfloat z); +npy_cfloat npy_casinf(npy_cfloat z); +npy_cfloat npy_catanf(npy_cfloat z); + +npy_cfloat npy_cacoshf(npy_cfloat z); +npy_cfloat npy_casinhf(npy_cfloat z); +npy_cfloat npy_catanhf(npy_cfloat z); + + +/* + * Extended precision complex functions + */ +npy_longdouble npy_cabsl(npy_clongdouble z); +npy_longdouble npy_cargl(npy_clongdouble z); + +npy_clongdouble npy_cexpl(npy_clongdouble z); +npy_clongdouble npy_clogl(npy_clongdouble z); +npy_clongdouble npy_cpowl(npy_clongdouble x, npy_clongdouble y); + +npy_clongdouble npy_csqrtl(npy_clongdouble z); + +npy_clongdouble npy_ccosl(npy_clongdouble z); +npy_clongdouble npy_csinl(npy_clongdouble z); +npy_clongdouble npy_ctanl(npy_clongdouble z); + +npy_clongdouble npy_ccoshl(npy_clongdouble z); +npy_clongdouble npy_csinhl(npy_clongdouble z); +npy_clongdouble npy_ctanhl(npy_clongdouble z); + +npy_clongdouble npy_cacosl(npy_clongdouble z); +npy_clongdouble npy_casinl(npy_clongdouble z); +npy_clongdouble npy_catanl(npy_clongdouble z); + +npy_clongdouble npy_cacoshl(npy_clongdouble z); +npy_clongdouble npy_casinhl(npy_clongdouble z); +npy_clongdouble npy_catanhl(npy_clongdouble z); + + +/* + * Functions that set the floating point error + * status word. + */ + +/* + * platform-dependent code translates floating point + * status to an integer sum of these values + */ +#define NPY_FPE_DIVIDEBYZERO 1 +#define NPY_FPE_OVERFLOW 2 +#define NPY_FPE_UNDERFLOW 4 +#define NPY_FPE_INVALID 8 + +int npy_clear_floatstatus_barrier(char*); +int npy_get_floatstatus_barrier(char*); +/* + * use caution with these - clang and gcc8.1 are known to reorder calls + * to this form of the function which can defeat the check. The _barrier + * form of the call is preferable, where the argument is + * (char*)&local_variable + */ +int npy_clear_floatstatus(void); +int npy_get_floatstatus(void); + +void npy_set_floatstatus_divbyzero(void); +void npy_set_floatstatus_overflow(void); +void npy_set_floatstatus_underflow(void); +void npy_set_floatstatus_invalid(void); + +#ifdef __cplusplus +} +#endif + +#if NPY_INLINE_MATH +#include "npy_math_internal.h" +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_ */ diff --git a/python/numpy/_core/include/numpy/npy_no_deprecated_api.h b/python/numpy/_core/include/numpy/npy_no_deprecated_api.h new file mode 100644 index 000000000..39658c0bd --- /dev/null +++ b/python/numpy/_core/include/numpy/npy_no_deprecated_api.h @@ -0,0 +1,20 @@ +/* + * This include file is provided for inclusion in Cython *.pyd files where + * one would like to define the NPY_NO_DEPRECATED_API macro. It can be + * included by + * + * cdef extern from "npy_no_deprecated_api.h": pass + * + */ +#ifndef NPY_NO_DEPRECATED_API + +/* put this check here since there may be multiple includes in C extensions. */ +#if defined(NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_) || \ + defined(NUMPY_CORE_INCLUDE_NUMPY_NPY_DEPRECATED_API_H) || \ + defined(NUMPY_CORE_INCLUDE_NUMPY_OLD_DEFINES_H_) +#error "npy_no_deprecated_api.h" must be first among numpy includes. +#else +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#endif + +#endif /* NPY_NO_DEPRECATED_API */ diff --git a/python/numpy/_core/include/numpy/npy_os.h b/python/numpy/_core/include/numpy/npy_os.h new file mode 100644 index 000000000..0ce5d78b4 --- /dev/null +++ b/python/numpy/_core/include/numpy/npy_os.h @@ -0,0 +1,42 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_ + +#if defined(linux) || defined(__linux) || defined(__linux__) + #define NPY_OS_LINUX +#elif defined(__FreeBSD__) || defined(__NetBSD__) || \ + defined(__OpenBSD__) || defined(__DragonFly__) + #define NPY_OS_BSD + #ifdef __FreeBSD__ + #define NPY_OS_FREEBSD + #elif defined(__NetBSD__) + #define NPY_OS_NETBSD + #elif defined(__OpenBSD__) + #define NPY_OS_OPENBSD + #elif defined(__DragonFly__) + #define NPY_OS_DRAGONFLY + #endif +#elif defined(sun) || defined(__sun) + #define NPY_OS_SOLARIS +#elif defined(__CYGWIN__) + #define NPY_OS_CYGWIN +/* We are on Windows.*/ +#elif defined(_WIN32) + /* We are using MinGW (64-bit or 32-bit)*/ + #if defined(__MINGW32__) || defined(__MINGW64__) + #define NPY_OS_MINGW + /* Otherwise, if _WIN64 is defined, we are targeting 64-bit Windows*/ + #elif defined(_WIN64) + #define NPY_OS_WIN64 + /* Otherwise assume we are targeting 32-bit Windows*/ + #else + #define NPY_OS_WIN32 + #endif +#elif defined(__APPLE__) + #define NPY_OS_DARWIN +#elif defined(__HAIKU__) + #define NPY_OS_HAIKU +#else + #define NPY_OS_UNKNOWN +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_ */ diff --git a/python/numpy/_core/include/numpy/numpyconfig.h b/python/numpy/_core/include/numpy/numpyconfig.h new file mode 100644 index 000000000..ba44c28b9 --- /dev/null +++ b/python/numpy/_core/include/numpy/numpyconfig.h @@ -0,0 +1,182 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_ + +#include "_numpyconfig.h" + +/* + * On Mac OS X, because there is only one configuration stage for all the archs + * in universal builds, any macro which depends on the arch needs to be + * hardcoded. + * + * Note that distutils/pip will attempt a universal2 build when Python itself + * is built as universal2, hence this hardcoding is needed even if we do not + * support universal2 wheels anymore (see gh-22796). + * This code block can be removed after we have dropped the setup.py based + * build completely. + */ +#ifdef __APPLE__ + #undef NPY_SIZEOF_LONG + + #ifdef __LP64__ + #define NPY_SIZEOF_LONG 8 + #else + #define NPY_SIZEOF_LONG 4 + #endif + + #undef NPY_SIZEOF_LONGDOUBLE + #undef NPY_SIZEOF_COMPLEX_LONGDOUBLE + #ifdef HAVE_LDOUBLE_IEEE_DOUBLE_LE + #undef HAVE_LDOUBLE_IEEE_DOUBLE_LE + #endif + #ifdef HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE + #undef HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE + #endif + + #if defined(__arm64__) + #define NPY_SIZEOF_LONGDOUBLE 8 + #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 16 + #define HAVE_LDOUBLE_IEEE_DOUBLE_LE 1 + #elif defined(__x86_64) + #define NPY_SIZEOF_LONGDOUBLE 16 + #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32 + #define HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE 1 + #elif defined (__i386) + #define NPY_SIZEOF_LONGDOUBLE 12 + #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 24 + #elif defined(__ppc__) || defined (__ppc64__) + #define NPY_SIZEOF_LONGDOUBLE 16 + #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32 + #else + #error "unknown architecture" + #endif +#endif + + +/** + * To help with both NPY_TARGET_VERSION and the NPY_NO_DEPRECATED_API macro, + * we include API version numbers for specific versions of NumPy. + * To exclude all API that was deprecated as of 1.7, add the following before + * #including any NumPy headers: + * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + * The same is true for NPY_TARGET_VERSION, although NumPy will default to + * a backwards compatible build anyway. + */ +#define NPY_1_7_API_VERSION 0x00000007 +#define NPY_1_8_API_VERSION 0x00000008 +#define NPY_1_9_API_VERSION 0x00000009 +#define NPY_1_10_API_VERSION 0x0000000a +#define NPY_1_11_API_VERSION 0x0000000a +#define NPY_1_12_API_VERSION 0x0000000a +#define NPY_1_13_API_VERSION 0x0000000b +#define NPY_1_14_API_VERSION 0x0000000c +#define NPY_1_15_API_VERSION 0x0000000c +#define NPY_1_16_API_VERSION 0x0000000d +#define NPY_1_17_API_VERSION 0x0000000d +#define NPY_1_18_API_VERSION 0x0000000d +#define NPY_1_19_API_VERSION 0x0000000d +#define NPY_1_20_API_VERSION 0x0000000e +#define NPY_1_21_API_VERSION 0x0000000e +#define NPY_1_22_API_VERSION 0x0000000f +#define NPY_1_23_API_VERSION 0x00000010 +#define NPY_1_24_API_VERSION 0x00000010 +#define NPY_1_25_API_VERSION 0x00000011 +#define NPY_2_0_API_VERSION 0x00000012 +#define NPY_2_1_API_VERSION 0x00000013 +#define NPY_2_2_API_VERSION 0x00000013 +#define NPY_2_3_API_VERSION 0x00000014 + + +/* + * Binary compatibility version number. This number is increased + * whenever the C-API is changed such that binary compatibility is + * broken, i.e. whenever a recompile of extension modules is needed. + */ +#define NPY_VERSION NPY_ABI_VERSION + +/* + * Minor API version we are compiling to be compatible with. The version + * Number is always increased when the API changes via: `NPY_API_VERSION` + * (and should maybe just track the NumPy version). + * + * If we have an internal build, we always target the current version of + * course. + * + * For downstream users, we default to an older version to provide them with + * maximum compatibility by default. Downstream can choose to extend that + * default, or narrow it down if they wish to use newer API. If you adjust + * this, consider the Python version support (example for 1.25.x): + * + * NumPy 1.25.x supports Python: 3.9 3.10 3.11 (3.12) + * NumPy 1.19.x supports Python: 3.6 3.7 3.8 3.9 + * NumPy 1.17.x supports Python: 3.5 3.6 3.7 3.8 + * NumPy 1.15.x supports Python: ... 3.6 3.7 + * + * Users of the stable ABI may wish to target the last Python that is not + * end of life. This would be 3.8 at NumPy 1.25 release time. + * 1.17 as default was the choice of oldest-support-numpy at the time and + * has in practice no limit (compared to 1.19). Even earlier becomes legacy. + */ +#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD + /* NumPy internal build, always use current version. */ + #define NPY_FEATURE_VERSION NPY_API_VERSION +#elif defined(NPY_TARGET_VERSION) && NPY_TARGET_VERSION + /* user provided a target version, use it */ + #define NPY_FEATURE_VERSION NPY_TARGET_VERSION +#else + /* Use the default (increase when dropping Python 3.11 support) */ + #define NPY_FEATURE_VERSION NPY_1_23_API_VERSION +#endif + +/* Sanity check the (requested) feature version */ +#if NPY_FEATURE_VERSION > NPY_API_VERSION + #error "NPY_TARGET_VERSION higher than NumPy headers!" +#elif NPY_FEATURE_VERSION < NPY_1_15_API_VERSION + /* No support for irrelevant old targets, no need for error, but warn. */ + #ifndef _MSC_VER + #warning "Requested NumPy target lower than supported NumPy 1.15." + #else + #define _WARN___STR2__(x) #x + #define _WARN___STR1__(x) _WARN___STR2__(x) + #define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: " + #pragma message(_WARN___LOC__"Requested NumPy target lower than supported NumPy 1.15.") + #endif +#endif + +/* + * We define a human readable translation to the Python version of NumPy + * for error messages (and also to allow grepping the binaries for conda). + */ +#if NPY_FEATURE_VERSION == NPY_1_7_API_VERSION + #define NPY_FEATURE_VERSION_STRING "1.7" +#elif NPY_FEATURE_VERSION == NPY_1_8_API_VERSION + #define NPY_FEATURE_VERSION_STRING "1.8" +#elif NPY_FEATURE_VERSION == NPY_1_9_API_VERSION + #define NPY_FEATURE_VERSION_STRING "1.9" +#elif NPY_FEATURE_VERSION == NPY_1_10_API_VERSION /* also 1.11, 1.12 */ + #define NPY_FEATURE_VERSION_STRING "1.10" +#elif NPY_FEATURE_VERSION == NPY_1_13_API_VERSION + #define NPY_FEATURE_VERSION_STRING "1.13" +#elif NPY_FEATURE_VERSION == NPY_1_14_API_VERSION /* also 1.15 */ + #define NPY_FEATURE_VERSION_STRING "1.14" +#elif NPY_FEATURE_VERSION == NPY_1_16_API_VERSION /* also 1.17, 1.18, 1.19 */ + #define NPY_FEATURE_VERSION_STRING "1.16" +#elif NPY_FEATURE_VERSION == NPY_1_20_API_VERSION /* also 1.21 */ + #define NPY_FEATURE_VERSION_STRING "1.20" +#elif NPY_FEATURE_VERSION == NPY_1_22_API_VERSION + #define NPY_FEATURE_VERSION_STRING "1.22" +#elif NPY_FEATURE_VERSION == NPY_1_23_API_VERSION /* also 1.24 */ + #define NPY_FEATURE_VERSION_STRING "1.23" +#elif NPY_FEATURE_VERSION == NPY_1_25_API_VERSION + #define NPY_FEATURE_VERSION_STRING "1.25" +#elif NPY_FEATURE_VERSION == NPY_2_0_API_VERSION + #define NPY_FEATURE_VERSION_STRING "2.0" +#elif NPY_FEATURE_VERSION == NPY_2_1_API_VERSION + #define NPY_FEATURE_VERSION_STRING "2.1" +#elif NPY_FEATURE_VERSION == NPY_2_3_API_VERSION + #define NPY_FEATURE_VERSION_STRING "2.3" +#else + #error "Missing version string define for new NumPy version." +#endif + + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_ */ diff --git a/python/numpy/_core/include/numpy/random/LICENSE.txt b/python/numpy/_core/include/numpy/random/LICENSE.txt new file mode 100644 index 000000000..d72a7c388 --- /dev/null +++ b/python/numpy/_core/include/numpy/random/LICENSE.txt @@ -0,0 +1,21 @@ + zlib License + ------------ + + Copyright (C) 2010 - 2019 ridiculous_fish, + Copyright (C) 2016 - 2019 Kim Walisch, + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. diff --git a/python/numpy/_core/include/numpy/random/bitgen.h b/python/numpy/_core/include/numpy/random/bitgen.h new file mode 100644 index 000000000..162dd5c57 --- /dev/null +++ b/python/numpy/_core/include/numpy/random/bitgen.h @@ -0,0 +1,20 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_ + +#pragma once +#include +#include +#include + +/* Must match the declaration in numpy/random/.pxd */ + +typedef struct bitgen { + void *state; + uint64_t (*next_uint64)(void *st); + uint32_t (*next_uint32)(void *st); + double (*next_double)(void *st); + uint64_t (*next_raw)(void *st); +} bitgen_t; + + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_RANDOM_BITGEN_H_ */ diff --git a/python/numpy/_core/include/numpy/random/distributions.h b/python/numpy/_core/include/numpy/random/distributions.h new file mode 100644 index 000000000..e7fa4bd00 --- /dev/null +++ b/python/numpy/_core/include/numpy/random/distributions.h @@ -0,0 +1,209 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_RANDOM_DISTRIBUTIONS_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_RANDOM_DISTRIBUTIONS_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include "numpy/npy_common.h" +#include +#include +#include + +#include "numpy/npy_math.h" +#include "numpy/random/bitgen.h" + +/* + * RAND_INT_TYPE is used to share integer generators with RandomState which + * used long in place of int64_t. If changing a distribution that uses + * RAND_INT_TYPE, then the original unmodified copy must be retained for + * use in RandomState by copying to the legacy distributions source file. + */ +#ifdef NP_RANDOM_LEGACY +#define RAND_INT_TYPE long +#define RAND_INT_MAX LONG_MAX +#else +#define RAND_INT_TYPE int64_t +#define RAND_INT_MAX INT64_MAX +#endif + +#ifdef _MSC_VER +#define DECLDIR __declspec(dllexport) +#else +#define DECLDIR extern +#endif + +#ifndef MIN +#define MIN(x, y) (((x) < (y)) ? x : y) +#define MAX(x, y) (((x) > (y)) ? x : y) +#endif + +#ifndef M_PI +#define M_PI 3.14159265358979323846264338328 +#endif + +typedef struct s_binomial_t { + int has_binomial; /* !=0: following parameters initialized for binomial */ + double psave; + RAND_INT_TYPE nsave; + double r; + double q; + double fm; + RAND_INT_TYPE m; + double p1; + double xm; + double xl; + double xr; + double c; + double laml; + double lamr; + double p2; + double p3; + double p4; +} binomial_t; + +DECLDIR float random_standard_uniform_f(bitgen_t *bitgen_state); +DECLDIR double random_standard_uniform(bitgen_t *bitgen_state); +DECLDIR void random_standard_uniform_fill(bitgen_t *, npy_intp, double *); +DECLDIR void random_standard_uniform_fill_f(bitgen_t *, npy_intp, float *); + +DECLDIR int64_t random_positive_int64(bitgen_t *bitgen_state); +DECLDIR int32_t random_positive_int32(bitgen_t *bitgen_state); +DECLDIR int64_t random_positive_int(bitgen_t *bitgen_state); +DECLDIR uint64_t random_uint(bitgen_t *bitgen_state); + +DECLDIR double random_standard_exponential(bitgen_t *bitgen_state); +DECLDIR float random_standard_exponential_f(bitgen_t *bitgen_state); +DECLDIR void random_standard_exponential_fill(bitgen_t *, npy_intp, double *); +DECLDIR void random_standard_exponential_fill_f(bitgen_t *, npy_intp, float *); +DECLDIR void random_standard_exponential_inv_fill(bitgen_t *, npy_intp, double *); +DECLDIR void random_standard_exponential_inv_fill_f(bitgen_t *, npy_intp, float *); + +DECLDIR double random_standard_normal(bitgen_t *bitgen_state); +DECLDIR float random_standard_normal_f(bitgen_t *bitgen_state); +DECLDIR void random_standard_normal_fill(bitgen_t *, npy_intp, double *); +DECLDIR void random_standard_normal_fill_f(bitgen_t *, npy_intp, float *); +DECLDIR double random_standard_gamma(bitgen_t *bitgen_state, double shape); +DECLDIR float random_standard_gamma_f(bitgen_t *bitgen_state, float shape); + +DECLDIR double random_normal(bitgen_t *bitgen_state, double loc, double scale); + +DECLDIR double random_gamma(bitgen_t *bitgen_state, double shape, double scale); +DECLDIR float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale); + +DECLDIR double random_exponential(bitgen_t *bitgen_state, double scale); +DECLDIR double random_uniform(bitgen_t *bitgen_state, double lower, double range); +DECLDIR double random_beta(bitgen_t *bitgen_state, double a, double b); +DECLDIR double random_chisquare(bitgen_t *bitgen_state, double df); +DECLDIR double random_f(bitgen_t *bitgen_state, double dfnum, double dfden); +DECLDIR double random_standard_cauchy(bitgen_t *bitgen_state); +DECLDIR double random_pareto(bitgen_t *bitgen_state, double a); +DECLDIR double random_weibull(bitgen_t *bitgen_state, double a); +DECLDIR double random_power(bitgen_t *bitgen_state, double a); +DECLDIR double random_laplace(bitgen_t *bitgen_state, double loc, double scale); +DECLDIR double random_gumbel(bitgen_t *bitgen_state, double loc, double scale); +DECLDIR double random_logistic(bitgen_t *bitgen_state, double loc, double scale); +DECLDIR double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma); +DECLDIR double random_rayleigh(bitgen_t *bitgen_state, double mode); +DECLDIR double random_standard_t(bitgen_t *bitgen_state, double df); +DECLDIR double random_noncentral_chisquare(bitgen_t *bitgen_state, double df, + double nonc); +DECLDIR double random_noncentral_f(bitgen_t *bitgen_state, double dfnum, + double dfden, double nonc); +DECLDIR double random_wald(bitgen_t *bitgen_state, double mean, double scale); +DECLDIR double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa); +DECLDIR double random_triangular(bitgen_t *bitgen_state, double left, double mode, + double right); + +DECLDIR RAND_INT_TYPE random_poisson(bitgen_t *bitgen_state, double lam); +DECLDIR RAND_INT_TYPE random_negative_binomial(bitgen_t *bitgen_state, double n, + double p); + +DECLDIR int64_t random_binomial(bitgen_t *bitgen_state, double p, + int64_t n, binomial_t *binomial); + +DECLDIR int64_t random_logseries(bitgen_t *bitgen_state, double p); +DECLDIR int64_t random_geometric(bitgen_t *bitgen_state, double p); +DECLDIR RAND_INT_TYPE random_geometric_search(bitgen_t *bitgen_state, double p); +DECLDIR RAND_INT_TYPE random_zipf(bitgen_t *bitgen_state, double a); +DECLDIR int64_t random_hypergeometric(bitgen_t *bitgen_state, + int64_t good, int64_t bad, int64_t sample); +DECLDIR uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max); + +/* Generate random uint64 numbers in closed interval [off, off + rng]. */ +DECLDIR uint64_t random_bounded_uint64(bitgen_t *bitgen_state, uint64_t off, + uint64_t rng, uint64_t mask, + bool use_masked); + +/* Generate random uint32 numbers in closed interval [off, off + rng]. */ +DECLDIR uint32_t random_buffered_bounded_uint32(bitgen_t *bitgen_state, + uint32_t off, uint32_t rng, + uint32_t mask, bool use_masked, + int *bcnt, uint32_t *buf); +DECLDIR uint16_t random_buffered_bounded_uint16(bitgen_t *bitgen_state, + uint16_t off, uint16_t rng, + uint16_t mask, bool use_masked, + int *bcnt, uint32_t *buf); +DECLDIR uint8_t random_buffered_bounded_uint8(bitgen_t *bitgen_state, uint8_t off, + uint8_t rng, uint8_t mask, + bool use_masked, int *bcnt, + uint32_t *buf); +DECLDIR npy_bool random_buffered_bounded_bool(bitgen_t *bitgen_state, npy_bool off, + npy_bool rng, npy_bool mask, + bool use_masked, int *bcnt, + uint32_t *buf); + +DECLDIR void random_bounded_uint64_fill(bitgen_t *bitgen_state, uint64_t off, + uint64_t rng, npy_intp cnt, + bool use_masked, uint64_t *out); +DECLDIR void random_bounded_uint32_fill(bitgen_t *bitgen_state, uint32_t off, + uint32_t rng, npy_intp cnt, + bool use_masked, uint32_t *out); +DECLDIR void random_bounded_uint16_fill(bitgen_t *bitgen_state, uint16_t off, + uint16_t rng, npy_intp cnt, + bool use_masked, uint16_t *out); +DECLDIR void random_bounded_uint8_fill(bitgen_t *bitgen_state, uint8_t off, + uint8_t rng, npy_intp cnt, + bool use_masked, uint8_t *out); +DECLDIR void random_bounded_bool_fill(bitgen_t *bitgen_state, npy_bool off, + npy_bool rng, npy_intp cnt, + bool use_masked, npy_bool *out); + +DECLDIR void random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n, RAND_INT_TYPE *mnix, + double *pix, npy_intp d, binomial_t *binomial); + +/* multivariate hypergeometric, "count" method */ +DECLDIR int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state, + int64_t total, + size_t num_colors, int64_t *colors, + int64_t nsample, + size_t num_variates, int64_t *variates); + +/* multivariate hypergeometric, "marginals" method */ +DECLDIR void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state, + int64_t total, + size_t num_colors, int64_t *colors, + int64_t nsample, + size_t num_variates, int64_t *variates); + +/* Common to legacy-distributions.c and distributions.c but not exported */ + +RAND_INT_TYPE random_binomial_btpe(bitgen_t *bitgen_state, + RAND_INT_TYPE n, + double p, + binomial_t *binomial); +RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state, + RAND_INT_TYPE n, + double p, + binomial_t *binomial); +double random_loggam(double x); +static inline double next_double(bitgen_t *bitgen_state) { + return bitgen_state->next_double(bitgen_state->state); +} + +#ifdef __cplusplus +} +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_RANDOM_DISTRIBUTIONS_H_ */ diff --git a/python/numpy/_core/include/numpy/random/libdivide.h b/python/numpy/_core/include/numpy/random/libdivide.h new file mode 100644 index 000000000..f4eb8039b --- /dev/null +++ b/python/numpy/_core/include/numpy/random/libdivide.h @@ -0,0 +1,2079 @@ +// libdivide.h - Optimized integer division +// https://libdivide.com +// +// Copyright (C) 2010 - 2019 ridiculous_fish, +// Copyright (C) 2016 - 2019 Kim Walisch, +// +// libdivide is dual-licensed under the Boost or zlib licenses. +// You may use libdivide under the terms of either of these. +// See LICENSE.txt for more details. + +#ifndef NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_ + +#define LIBDIVIDE_VERSION "3.0" +#define LIBDIVIDE_VERSION_MAJOR 3 +#define LIBDIVIDE_VERSION_MINOR 0 + +#include + +#if defined(__cplusplus) + #include + #include + #include +#else + #include + #include +#endif + +#if defined(LIBDIVIDE_AVX512) + #include +#elif defined(LIBDIVIDE_AVX2) + #include +#elif defined(LIBDIVIDE_SSE2) + #include +#endif + +#if defined(_MSC_VER) + #include + // disable warning C4146: unary minus operator applied + // to unsigned type, result still unsigned + #pragma warning(disable: 4146) + #define LIBDIVIDE_VC +#endif + +#if !defined(__has_builtin) + #define __has_builtin(x) 0 +#endif + +#if defined(__SIZEOF_INT128__) + #define HAS_INT128_T + // clang-cl on Windows does not yet support 128-bit division + #if !(defined(__clang__) && defined(LIBDIVIDE_VC)) + #define HAS_INT128_DIV + #endif +#endif + +#if defined(__x86_64__) || defined(_M_X64) + #define LIBDIVIDE_X86_64 +#endif + +#if defined(__i386__) + #define LIBDIVIDE_i386 +#endif + +#if defined(__GNUC__) || defined(__clang__) + #define LIBDIVIDE_GCC_STYLE_ASM +#endif + +#if defined(__cplusplus) || defined(LIBDIVIDE_VC) + #define LIBDIVIDE_FUNCTION __FUNCTION__ +#else + #define LIBDIVIDE_FUNCTION __func__ +#endif + +#define LIBDIVIDE_ERROR(msg) \ + do { \ + fprintf(stderr, "libdivide.h:%d: %s(): Error: %s\n", \ + __LINE__, LIBDIVIDE_FUNCTION, msg); \ + abort(); \ + } while (0) + +#if defined(LIBDIVIDE_ASSERTIONS_ON) + #define LIBDIVIDE_ASSERT(x) \ + do { \ + if (!(x)) { \ + fprintf(stderr, "libdivide.h:%d: %s(): Assertion failed: %s\n", \ + __LINE__, LIBDIVIDE_FUNCTION, #x); \ + abort(); \ + } \ + } while (0) +#else + #define LIBDIVIDE_ASSERT(x) +#endif + +#ifdef __cplusplus +namespace libdivide { +#endif + +// pack divider structs to prevent compilers from padding. +// This reduces memory usage by up to 43% when using a large +// array of libdivide dividers and improves performance +// by up to 10% because of reduced memory bandwidth. +#pragma pack(push, 1) + +struct libdivide_u32_t { + uint32_t magic; + uint8_t more; +}; + +struct libdivide_s32_t { + int32_t magic; + uint8_t more; +}; + +struct libdivide_u64_t { + uint64_t magic; + uint8_t more; +}; + +struct libdivide_s64_t { + int64_t magic; + uint8_t more; +}; + +struct libdivide_u32_branchfree_t { + uint32_t magic; + uint8_t more; +}; + +struct libdivide_s32_branchfree_t { + int32_t magic; + uint8_t more; +}; + +struct libdivide_u64_branchfree_t { + uint64_t magic; + uint8_t more; +}; + +struct libdivide_s64_branchfree_t { + int64_t magic; + uint8_t more; +}; + +#pragma pack(pop) + +// Explanation of the "more" field: +// +// * Bits 0-5 is the shift value (for shift path or mult path). +// * Bit 6 is the add indicator for mult path. +// * Bit 7 is set if the divisor is negative. We use bit 7 as the negative +// divisor indicator so that we can efficiently use sign extension to +// create a bitmask with all bits set to 1 (if the divisor is negative) +// or 0 (if the divisor is positive). +// +// u32: [0-4] shift value +// [5] ignored +// [6] add indicator +// magic number of 0 indicates shift path +// +// s32: [0-4] shift value +// [5] ignored +// [6] add indicator +// [7] indicates negative divisor +// magic number of 0 indicates shift path +// +// u64: [0-5] shift value +// [6] add indicator +// magic number of 0 indicates shift path +// +// s64: [0-5] shift value +// [6] add indicator +// [7] indicates negative divisor +// magic number of 0 indicates shift path +// +// In s32 and s64 branchfree modes, the magic number is negated according to +// whether the divisor is negated. In branchfree strategy, it is not negated. + +enum { + LIBDIVIDE_32_SHIFT_MASK = 0x1F, + LIBDIVIDE_64_SHIFT_MASK = 0x3F, + LIBDIVIDE_ADD_MARKER = 0x40, + LIBDIVIDE_NEGATIVE_DIVISOR = 0x80 +}; + +static inline struct libdivide_s32_t libdivide_s32_gen(int32_t d); +static inline struct libdivide_u32_t libdivide_u32_gen(uint32_t d); +static inline struct libdivide_s64_t libdivide_s64_gen(int64_t d); +static inline struct libdivide_u64_t libdivide_u64_gen(uint64_t d); + +static inline struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d); +static inline struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d); +static inline struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d); +static inline struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d); + +static inline int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom); +static inline uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom); +static inline int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom); +static inline uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom); + +static inline int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom); +static inline uint32_t libdivide_u32_branchfree_do(uint32_t numer, const struct libdivide_u32_branchfree_t *denom); +static inline int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom); +static inline uint64_t libdivide_u64_branchfree_do(uint64_t numer, const struct libdivide_u64_branchfree_t *denom); + +static inline int32_t libdivide_s32_recover(const struct libdivide_s32_t *denom); +static inline uint32_t libdivide_u32_recover(const struct libdivide_u32_t *denom); +static inline int64_t libdivide_s64_recover(const struct libdivide_s64_t *denom); +static inline uint64_t libdivide_u64_recover(const struct libdivide_u64_t *denom); + +static inline int32_t libdivide_s32_branchfree_recover(const struct libdivide_s32_branchfree_t *denom); +static inline uint32_t libdivide_u32_branchfree_recover(const struct libdivide_u32_branchfree_t *denom); +static inline int64_t libdivide_s64_branchfree_recover(const struct libdivide_s64_branchfree_t *denom); +static inline uint64_t libdivide_u64_branchfree_recover(const struct libdivide_u64_branchfree_t *denom); + +//////// Internal Utility Functions + +static inline uint32_t libdivide_mullhi_u32(uint32_t x, uint32_t y) { + uint64_t xl = x, yl = y; + uint64_t rl = xl * yl; + return (uint32_t)(rl >> 32); +} + +static inline int32_t libdivide_mullhi_s32(int32_t x, int32_t y) { + int64_t xl = x, yl = y; + int64_t rl = xl * yl; + // needs to be arithmetic shift + return (int32_t)(rl >> 32); +} + +static inline uint64_t libdivide_mullhi_u64(uint64_t x, uint64_t y) { +#if defined(LIBDIVIDE_VC) && \ + defined(LIBDIVIDE_X86_64) + return __umulh(x, y); +#elif defined(HAS_INT128_T) + __uint128_t xl = x, yl = y; + __uint128_t rl = xl * yl; + return (uint64_t)(rl >> 64); +#else + // full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) + uint32_t mask = 0xFFFFFFFF; + uint32_t x0 = (uint32_t)(x & mask); + uint32_t x1 = (uint32_t)(x >> 32); + uint32_t y0 = (uint32_t)(y & mask); + uint32_t y1 = (uint32_t)(y >> 32); + uint32_t x0y0_hi = libdivide_mullhi_u32(x0, y0); + uint64_t x0y1 = x0 * (uint64_t)y1; + uint64_t x1y0 = x1 * (uint64_t)y0; + uint64_t x1y1 = x1 * (uint64_t)y1; + uint64_t temp = x1y0 + x0y0_hi; + uint64_t temp_lo = temp & mask; + uint64_t temp_hi = temp >> 32; + + return x1y1 + temp_hi + ((temp_lo + x0y1) >> 32); +#endif +} + +static inline int64_t libdivide_mullhi_s64(int64_t x, int64_t y) { +#if defined(LIBDIVIDE_VC) && \ + defined(LIBDIVIDE_X86_64) + return __mulh(x, y); +#elif defined(HAS_INT128_T) + __int128_t xl = x, yl = y; + __int128_t rl = xl * yl; + return (int64_t)(rl >> 64); +#else + // full 128 bits are x0 * y0 + (x0 * y1 << 32) + (x1 * y0 << 32) + (x1 * y1 << 64) + uint32_t mask = 0xFFFFFFFF; + uint32_t x0 = (uint32_t)(x & mask); + uint32_t y0 = (uint32_t)(y & mask); + int32_t x1 = (int32_t)(x >> 32); + int32_t y1 = (int32_t)(y >> 32); + uint32_t x0y0_hi = libdivide_mullhi_u32(x0, y0); + int64_t t = x1 * (int64_t)y0 + x0y0_hi; + int64_t w1 = x0 * (int64_t)y1 + (t & mask); + + return x1 * (int64_t)y1 + (t >> 32) + (w1 >> 32); +#endif +} + +static inline int32_t libdivide_count_leading_zeros32(uint32_t val) { +#if defined(__GNUC__) || \ + __has_builtin(__builtin_clz) + // Fast way to count leading zeros + return __builtin_clz(val); +#elif defined(LIBDIVIDE_VC) + unsigned long result; + if (_BitScanReverse(&result, val)) { + return 31 - result; + } + return 0; +#else + if (val == 0) + return 32; + int32_t result = 8; + uint32_t hi = 0xFFU << 24; + while ((val & hi) == 0) { + hi >>= 8; + result += 8; + } + while (val & hi) { + result -= 1; + hi <<= 1; + } + return result; +#endif +} + +static inline int32_t libdivide_count_leading_zeros64(uint64_t val) { +#if defined(__GNUC__) || \ + __has_builtin(__builtin_clzll) + // Fast way to count leading zeros + return __builtin_clzll(val); +#elif defined(LIBDIVIDE_VC) && defined(_WIN64) + unsigned long result; + if (_BitScanReverse64(&result, val)) { + return 63 - result; + } + return 0; +#else + uint32_t hi = val >> 32; + uint32_t lo = val & 0xFFFFFFFF; + if (hi != 0) return libdivide_count_leading_zeros32(hi); + return 32 + libdivide_count_leading_zeros32(lo); +#endif +} + +// libdivide_64_div_32_to_32: divides a 64-bit uint {u1, u0} by a 32-bit +// uint {v}. The result must fit in 32 bits. +// Returns the quotient directly and the remainder in *r +static inline uint32_t libdivide_64_div_32_to_32(uint32_t u1, uint32_t u0, uint32_t v, uint32_t *r) { +#if (defined(LIBDIVIDE_i386) || defined(LIBDIVIDE_X86_64)) && \ + defined(LIBDIVIDE_GCC_STYLE_ASM) + uint32_t result; + __asm__("divl %[v]" + : "=a"(result), "=d"(*r) + : [v] "r"(v), "a"(u0), "d"(u1) + ); + return result; +#else + uint64_t n = ((uint64_t)u1 << 32) | u0; + uint32_t result = (uint32_t)(n / v); + *r = (uint32_t)(n - result * (uint64_t)v); + return result; +#endif +} + +// libdivide_128_div_64_to_64: divides a 128-bit uint {u1, u0} by a 64-bit +// uint {v}. The result must fit in 64 bits. +// Returns the quotient directly and the remainder in *r +static uint64_t libdivide_128_div_64_to_64(uint64_t u1, uint64_t u0, uint64_t v, uint64_t *r) { +#if defined(LIBDIVIDE_X86_64) && \ + defined(LIBDIVIDE_GCC_STYLE_ASM) + uint64_t result; + __asm__("divq %[v]" + : "=a"(result), "=d"(*r) + : [v] "r"(v), "a"(u0), "d"(u1) + ); + return result; +#elif defined(HAS_INT128_T) && \ + defined(HAS_INT128_DIV) + __uint128_t n = ((__uint128_t)u1 << 64) | u0; + uint64_t result = (uint64_t)(n / v); + *r = (uint64_t)(n - result * (__uint128_t)v); + return result; +#else + // Code taken from Hacker's Delight: + // http://www.hackersdelight.org/HDcode/divlu.c. + // License permits inclusion here per: + // http://www.hackersdelight.org/permissions.htm + + const uint64_t b = (1ULL << 32); // Number base (32 bits) + uint64_t un1, un0; // Norm. dividend LSD's + uint64_t vn1, vn0; // Norm. divisor digits + uint64_t q1, q0; // Quotient digits + uint64_t un64, un21, un10; // Dividend digit pairs + uint64_t rhat; // A remainder + int32_t s; // Shift amount for norm + + // If overflow, set rem. to an impossible value, + // and return the largest possible quotient + if (u1 >= v) { + *r = (uint64_t) -1; + return (uint64_t) -1; + } + + // count leading zeros + s = libdivide_count_leading_zeros64(v); + if (s > 0) { + // Normalize divisor + v = v << s; + un64 = (u1 << s) | (u0 >> (64 - s)); + un10 = u0 << s; // Shift dividend left + } else { + // Avoid undefined behavior of (u0 >> 64). + // The behavior is undefined if the right operand is + // negative, or greater than or equal to the length + // in bits of the promoted left operand. + un64 = u1; + un10 = u0; + } + + // Break divisor up into two 32-bit digits + vn1 = v >> 32; + vn0 = v & 0xFFFFFFFF; + + // Break right half of dividend into two digits + un1 = un10 >> 32; + un0 = un10 & 0xFFFFFFFF; + + // Compute the first quotient digit, q1 + q1 = un64 / vn1; + rhat = un64 - q1 * vn1; + + while (q1 >= b || q1 * vn0 > b * rhat + un1) { + q1 = q1 - 1; + rhat = rhat + vn1; + if (rhat >= b) + break; + } + + // Multiply and subtract + un21 = un64 * b + un1 - q1 * v; + + // Compute the second quotient digit + q0 = un21 / vn1; + rhat = un21 - q0 * vn1; + + while (q0 >= b || q0 * vn0 > b * rhat + un0) { + q0 = q0 - 1; + rhat = rhat + vn1; + if (rhat >= b) + break; + } + + *r = (un21 * b + un0 - q0 * v) >> s; + return q1 * b + q0; +#endif +} + +// Bitshift a u128 in place, left (signed_shift > 0) or right (signed_shift < 0) +static inline void libdivide_u128_shift(uint64_t *u1, uint64_t *u0, int32_t signed_shift) { + if (signed_shift > 0) { + uint32_t shift = signed_shift; + *u1 <<= shift; + *u1 |= *u0 >> (64 - shift); + *u0 <<= shift; + } + else if (signed_shift < 0) { + uint32_t shift = -signed_shift; + *u0 >>= shift; + *u0 |= *u1 << (64 - shift); + *u1 >>= shift; + } +} + +// Computes a 128 / 128 -> 64 bit division, with a 128 bit remainder. +static uint64_t libdivide_128_div_128_to_64(uint64_t u_hi, uint64_t u_lo, uint64_t v_hi, uint64_t v_lo, uint64_t *r_hi, uint64_t *r_lo) { +#if defined(HAS_INT128_T) && \ + defined(HAS_INT128_DIV) + __uint128_t ufull = u_hi; + __uint128_t vfull = v_hi; + ufull = (ufull << 64) | u_lo; + vfull = (vfull << 64) | v_lo; + uint64_t res = (uint64_t)(ufull / vfull); + __uint128_t remainder = ufull - (vfull * res); + *r_lo = (uint64_t)remainder; + *r_hi = (uint64_t)(remainder >> 64); + return res; +#else + // Adapted from "Unsigned Doubleword Division" in Hacker's Delight + // We want to compute u / v + typedef struct { uint64_t hi; uint64_t lo; } u128_t; + u128_t u = {u_hi, u_lo}; + u128_t v = {v_hi, v_lo}; + + if (v.hi == 0) { + // divisor v is a 64 bit value, so we just need one 128/64 division + // Note that we are simpler than Hacker's Delight here, because we know + // the quotient fits in 64 bits whereas Hacker's Delight demands a full + // 128 bit quotient + *r_hi = 0; + return libdivide_128_div_64_to_64(u.hi, u.lo, v.lo, r_lo); + } + // Here v >= 2**64 + // We know that v.hi != 0, so count leading zeros is OK + // We have 0 <= n <= 63 + uint32_t n = libdivide_count_leading_zeros64(v.hi); + + // Normalize the divisor so its MSB is 1 + u128_t v1t = v; + libdivide_u128_shift(&v1t.hi, &v1t.lo, n); + uint64_t v1 = v1t.hi; // i.e. v1 = v1t >> 64 + + // To ensure no overflow + u128_t u1 = u; + libdivide_u128_shift(&u1.hi, &u1.lo, -1); + + // Get quotient from divide unsigned insn. + uint64_t rem_ignored; + uint64_t q1 = libdivide_128_div_64_to_64(u1.hi, u1.lo, v1, &rem_ignored); + + // Undo normalization and division of u by 2. + u128_t q0 = {0, q1}; + libdivide_u128_shift(&q0.hi, &q0.lo, n); + libdivide_u128_shift(&q0.hi, &q0.lo, -63); + + // Make q0 correct or too small by 1 + // Equivalent to `if (q0 != 0) q0 = q0 - 1;` + if (q0.hi != 0 || q0.lo != 0) { + q0.hi -= (q0.lo == 0); // borrow + q0.lo -= 1; + } + + // Now q0 is correct. + // Compute q0 * v as q0v + // = (q0.hi << 64 + q0.lo) * (v.hi << 64 + v.lo) + // = (q0.hi * v.hi << 128) + (q0.hi * v.lo << 64) + + // (q0.lo * v.hi << 64) + q0.lo * v.lo) + // Each term is 128 bit + // High half of full product (upper 128 bits!) are dropped + u128_t q0v = {0, 0}; + q0v.hi = q0.hi*v.lo + q0.lo*v.hi + libdivide_mullhi_u64(q0.lo, v.lo); + q0v.lo = q0.lo*v.lo; + + // Compute u - q0v as u_q0v + // This is the remainder + u128_t u_q0v = u; + u_q0v.hi -= q0v.hi + (u.lo < q0v.lo); // second term is borrow + u_q0v.lo -= q0v.lo; + + // Check if u_q0v >= v + // This checks if our remainder is larger than the divisor + if ((u_q0v.hi > v.hi) || + (u_q0v.hi == v.hi && u_q0v.lo >= v.lo)) { + // Increment q0 + q0.lo += 1; + q0.hi += (q0.lo == 0); // carry + + // Subtract v from remainder + u_q0v.hi -= v.hi + (u_q0v.lo < v.lo); + u_q0v.lo -= v.lo; + } + + *r_hi = u_q0v.hi; + *r_lo = u_q0v.lo; + + LIBDIVIDE_ASSERT(q0.hi == 0); + return q0.lo; +#endif +} + +////////// UINT32 + +static inline struct libdivide_u32_t libdivide_internal_u32_gen(uint32_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } + + struct libdivide_u32_t result; + uint32_t floor_log_2_d = 31 - libdivide_count_leading_zeros32(d); + + // Power of 2 + if ((d & (d - 1)) == 0) { + // We need to subtract 1 from the shift value in case of an unsigned + // branchfree divider because there is a hardcoded right shift by 1 + // in its division algorithm. Because of this we also need to add back + // 1 in its recovery algorithm. + result.magic = 0; + result.more = (uint8_t)(floor_log_2_d - (branchfree != 0)); + } else { + uint8_t more; + uint32_t rem, proposed_m; + proposed_m = libdivide_64_div_32_to_32(1U << floor_log_2_d, 0, d, &rem); + + LIBDIVIDE_ASSERT(rem > 0 && rem < d); + const uint32_t e = d - rem; + + // This power works if e < 2**floor_log_2_d. + if (!branchfree && (e < (1U << floor_log_2_d))) { + // This power works + more = floor_log_2_d; + } else { + // We have to use the general 33-bit algorithm. We need to compute + // (2**power) / d. However, we already have (2**(power-1))/d and + // its remainder. By doubling both, and then correcting the + // remainder, we can compute the larger division. + // don't care about overflow here - in fact, we expect it + proposed_m += proposed_m; + const uint32_t twice_rem = rem + rem; + if (twice_rem >= d || twice_rem < rem) proposed_m += 1; + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; + } + result.magic = 1 + proposed_m; + result.more = more; + // result.more's shift should in general be ceil_log_2_d. But if we + // used the smaller power, we subtract one from the shift because we're + // using the smaller power. If we're using the larger power, we + // subtract one from the shift because it's taken care of by the add + // indicator. So floor_log_2_d happens to be correct in both cases. + } + return result; +} + +struct libdivide_u32_t libdivide_u32_gen(uint32_t d) { + return libdivide_internal_u32_gen(d, 0); +} + +struct libdivide_u32_branchfree_t libdivide_u32_branchfree_gen(uint32_t d) { + if (d == 1) { + LIBDIVIDE_ERROR("branchfree divider must be != 1"); + } + struct libdivide_u32_t tmp = libdivide_internal_u32_gen(d, 1); + struct libdivide_u32_branchfree_t ret = {tmp.magic, (uint8_t)(tmp.more & LIBDIVIDE_32_SHIFT_MASK)}; + return ret; +} + +uint32_t libdivide_u32_do(uint32_t numer, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return numer >> more; + } + else { + uint32_t q = libdivide_mullhi_u32(denom->magic, numer); + if (more & LIBDIVIDE_ADD_MARKER) { + uint32_t t = ((numer - q) >> 1) + q; + return t >> (more & LIBDIVIDE_32_SHIFT_MASK); + } + else { + // All upper bits are 0, + // don't need to mask them off. + return q >> more; + } + } +} + +uint32_t libdivide_u32_branchfree_do(uint32_t numer, const struct libdivide_u32_branchfree_t *denom) { + uint32_t q = libdivide_mullhi_u32(denom->magic, numer); + uint32_t t = ((numer - q) >> 1) + q; + return t >> denom->more; +} + +uint32_t libdivide_u32_recover(const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + + if (!denom->magic) { + return 1U << shift; + } else if (!(more & LIBDIVIDE_ADD_MARKER)) { + // We compute q = n/d = n*m / 2^(32 + shift) + // Therefore we have d = 2^(32 + shift) / m + // We need to ceil it. + // We know d is not a power of 2, so m is not a power of 2, + // so we can just add 1 to the floor + uint32_t hi_dividend = 1U << shift; + uint32_t rem_ignored; + return 1 + libdivide_64_div_32_to_32(hi_dividend, 0, denom->magic, &rem_ignored); + } else { + // Here we wish to compute d = 2^(32+shift+1)/(m+2^32). + // Notice (m + 2^32) is a 33 bit number. Use 64 bit division for now + // Also note that shift may be as high as 31, so shift + 1 will + // overflow. So we have to compute it as 2^(32+shift)/(m+2^32), and + // then double the quotient and remainder. + uint64_t half_n = 1ULL << (32 + shift); + uint64_t d = (1ULL << 32) | denom->magic; + // Note that the quotient is guaranteed <= 32 bits, but the remainder + // may need 33! + uint32_t half_q = (uint32_t)(half_n / d); + uint64_t rem = half_n % d; + // We computed 2^(32+shift)/(m+2^32) + // Need to double it, and then add 1 to the quotient if doubling th + // remainder would increase the quotient. + // Note that rem<<1 cannot overflow, since rem < d and d is 33 bits + uint32_t full_q = half_q + half_q + ((rem<<1) >= d); + + // We rounded down in gen (hence +1) + return full_q + 1; + } +} + +uint32_t libdivide_u32_branchfree_recover(const struct libdivide_u32_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + + if (!denom->magic) { + return 1U << (shift + 1); + } else { + // Here we wish to compute d = 2^(32+shift+1)/(m+2^32). + // Notice (m + 2^32) is a 33 bit number. Use 64 bit division for now + // Also note that shift may be as high as 31, so shift + 1 will + // overflow. So we have to compute it as 2^(32+shift)/(m+2^32), and + // then double the quotient and remainder. + uint64_t half_n = 1ULL << (32 + shift); + uint64_t d = (1ULL << 32) | denom->magic; + // Note that the quotient is guaranteed <= 32 bits, but the remainder + // may need 33! + uint32_t half_q = (uint32_t)(half_n / d); + uint64_t rem = half_n % d; + // We computed 2^(32+shift)/(m+2^32) + // Need to double it, and then add 1 to the quotient if doubling th + // remainder would increase the quotient. + // Note that rem<<1 cannot overflow, since rem < d and d is 33 bits + uint32_t full_q = half_q + half_q + ((rem<<1) >= d); + + // We rounded down in gen (hence +1) + return full_q + 1; + } +} + +/////////// UINT64 + +static inline struct libdivide_u64_t libdivide_internal_u64_gen(uint64_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } + + struct libdivide_u64_t result; + uint32_t floor_log_2_d = 63 - libdivide_count_leading_zeros64(d); + + // Power of 2 + if ((d & (d - 1)) == 0) { + // We need to subtract 1 from the shift value in case of an unsigned + // branchfree divider because there is a hardcoded right shift by 1 + // in its division algorithm. Because of this we also need to add back + // 1 in its recovery algorithm. + result.magic = 0; + result.more = (uint8_t)(floor_log_2_d - (branchfree != 0)); + } else { + uint64_t proposed_m, rem; + uint8_t more; + // (1 << (64 + floor_log_2_d)) / d + proposed_m = libdivide_128_div_64_to_64(1ULL << floor_log_2_d, 0, d, &rem); + + LIBDIVIDE_ASSERT(rem > 0 && rem < d); + const uint64_t e = d - rem; + + // This power works if e < 2**floor_log_2_d. + if (!branchfree && e < (1ULL << floor_log_2_d)) { + // This power works + more = floor_log_2_d; + } else { + // We have to use the general 65-bit algorithm. We need to compute + // (2**power) / d. However, we already have (2**(power-1))/d and + // its remainder. By doubling both, and then correcting the + // remainder, we can compute the larger division. + // don't care about overflow here - in fact, we expect it + proposed_m += proposed_m; + const uint64_t twice_rem = rem + rem; + if (twice_rem >= d || twice_rem < rem) proposed_m += 1; + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; + } + result.magic = 1 + proposed_m; + result.more = more; + // result.more's shift should in general be ceil_log_2_d. But if we + // used the smaller power, we subtract one from the shift because we're + // using the smaller power. If we're using the larger power, we + // subtract one from the shift because it's taken care of by the add + // indicator. So floor_log_2_d happens to be correct in both cases, + // which is why we do it outside of the if statement. + } + return result; +} + +struct libdivide_u64_t libdivide_u64_gen(uint64_t d) { + return libdivide_internal_u64_gen(d, 0); +} + +struct libdivide_u64_branchfree_t libdivide_u64_branchfree_gen(uint64_t d) { + if (d == 1) { + LIBDIVIDE_ERROR("branchfree divider must be != 1"); + } + struct libdivide_u64_t tmp = libdivide_internal_u64_gen(d, 1); + struct libdivide_u64_branchfree_t ret = {tmp.magic, (uint8_t)(tmp.more & LIBDIVIDE_64_SHIFT_MASK)}; + return ret; +} + +uint64_t libdivide_u64_do(uint64_t numer, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return numer >> more; + } + else { + uint64_t q = libdivide_mullhi_u64(denom->magic, numer); + if (more & LIBDIVIDE_ADD_MARKER) { + uint64_t t = ((numer - q) >> 1) + q; + return t >> (more & LIBDIVIDE_64_SHIFT_MASK); + } + else { + // All upper bits are 0, + // don't need to mask them off. + return q >> more; + } + } +} + +uint64_t libdivide_u64_branchfree_do(uint64_t numer, const struct libdivide_u64_branchfree_t *denom) { + uint64_t q = libdivide_mullhi_u64(denom->magic, numer); + uint64_t t = ((numer - q) >> 1) + q; + return t >> denom->more; +} + +uint64_t libdivide_u64_recover(const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + + if (!denom->magic) { + return 1ULL << shift; + } else if (!(more & LIBDIVIDE_ADD_MARKER)) { + // We compute q = n/d = n*m / 2^(64 + shift) + // Therefore we have d = 2^(64 + shift) / m + // We need to ceil it. + // We know d is not a power of 2, so m is not a power of 2, + // so we can just add 1 to the floor + uint64_t hi_dividend = 1ULL << shift; + uint64_t rem_ignored; + return 1 + libdivide_128_div_64_to_64(hi_dividend, 0, denom->magic, &rem_ignored); + } else { + // Here we wish to compute d = 2^(64+shift+1)/(m+2^64). + // Notice (m + 2^64) is a 65 bit number. This gets hairy. See + // libdivide_u32_recover for more on what we do here. + // TODO: do something better than 128 bit math + + // Full n is a (potentially) 129 bit value + // half_n is a 128 bit value + // Compute the hi half of half_n. Low half is 0. + uint64_t half_n_hi = 1ULL << shift, half_n_lo = 0; + // d is a 65 bit value. The high bit is always set to 1. + const uint64_t d_hi = 1, d_lo = denom->magic; + // Note that the quotient is guaranteed <= 64 bits, + // but the remainder may need 65! + uint64_t r_hi, r_lo; + uint64_t half_q = libdivide_128_div_128_to_64(half_n_hi, half_n_lo, d_hi, d_lo, &r_hi, &r_lo); + // We computed 2^(64+shift)/(m+2^64) + // Double the remainder ('dr') and check if that is larger than d + // Note that d is a 65 bit value, so r1 is small and so r1 + r1 + // cannot overflow + uint64_t dr_lo = r_lo + r_lo; + uint64_t dr_hi = r_hi + r_hi + (dr_lo < r_lo); // last term is carry + int dr_exceeds_d = (dr_hi > d_hi) || (dr_hi == d_hi && dr_lo >= d_lo); + uint64_t full_q = half_q + half_q + (dr_exceeds_d ? 1 : 0); + return full_q + 1; + } +} + +uint64_t libdivide_u64_branchfree_recover(const struct libdivide_u64_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + + if (!denom->magic) { + return 1ULL << (shift + 1); + } else { + // Here we wish to compute d = 2^(64+shift+1)/(m+2^64). + // Notice (m + 2^64) is a 65 bit number. This gets hairy. See + // libdivide_u32_recover for more on what we do here. + // TODO: do something better than 128 bit math + + // Full n is a (potentially) 129 bit value + // half_n is a 128 bit value + // Compute the hi half of half_n. Low half is 0. + uint64_t half_n_hi = 1ULL << shift, half_n_lo = 0; + // d is a 65 bit value. The high bit is always set to 1. + const uint64_t d_hi = 1, d_lo = denom->magic; + // Note that the quotient is guaranteed <= 64 bits, + // but the remainder may need 65! + uint64_t r_hi, r_lo; + uint64_t half_q = libdivide_128_div_128_to_64(half_n_hi, half_n_lo, d_hi, d_lo, &r_hi, &r_lo); + // We computed 2^(64+shift)/(m+2^64) + // Double the remainder ('dr') and check if that is larger than d + // Note that d is a 65 bit value, so r1 is small and so r1 + r1 + // cannot overflow + uint64_t dr_lo = r_lo + r_lo; + uint64_t dr_hi = r_hi + r_hi + (dr_lo < r_lo); // last term is carry + int dr_exceeds_d = (dr_hi > d_hi) || (dr_hi == d_hi && dr_lo >= d_lo); + uint64_t full_q = half_q + half_q + (dr_exceeds_d ? 1 : 0); + return full_q + 1; + } +} + +/////////// SINT32 + +static inline struct libdivide_s32_t libdivide_internal_s32_gen(int32_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } + + struct libdivide_s32_t result; + + // If d is a power of 2, or negative a power of 2, we have to use a shift. + // This is especially important because the magic algorithm fails for -1. + // To check if d is a power of 2 or its inverse, it suffices to check + // whether its absolute value has exactly one bit set. This works even for + // INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set + // and is a power of 2. + uint32_t ud = (uint32_t)d; + uint32_t absD = (d < 0) ? -ud : ud; + uint32_t floor_log_2_d = 31 - libdivide_count_leading_zeros32(absD); + // check if exactly one bit is set, + // don't care if absD is 0 since that's divide by zero + if ((absD & (absD - 1)) == 0) { + // Branchfree and normal paths are exactly the same + result.magic = 0; + result.more = floor_log_2_d | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0); + } else { + LIBDIVIDE_ASSERT(floor_log_2_d >= 1); + + uint8_t more; + // the dividend here is 2**(floor_log_2_d + 31), so the low 32 bit word + // is 0 and the high word is floor_log_2_d - 1 + uint32_t rem, proposed_m; + proposed_m = libdivide_64_div_32_to_32(1U << (floor_log_2_d - 1), 0, absD, &rem); + const uint32_t e = absD - rem; + + // We are going to start with a power of floor_log_2_d - 1. + // This works if works if e < 2**floor_log_2_d. + if (!branchfree && e < (1U << floor_log_2_d)) { + // This power works + more = floor_log_2_d - 1; + } else { + // We need to go one higher. This should not make proposed_m + // overflow, but it will make it negative when interpreted as an + // int32_t. + proposed_m += proposed_m; + const uint32_t twice_rem = rem + rem; + if (twice_rem >= absD || twice_rem < rem) proposed_m += 1; + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; + } + + proposed_m += 1; + int32_t magic = (int32_t)proposed_m; + + // Mark if we are negative. Note we only negate the magic number in the + // branchfull case. + if (d < 0) { + more |= LIBDIVIDE_NEGATIVE_DIVISOR; + if (!branchfree) { + magic = -magic; + } + } + + result.more = more; + result.magic = magic; + } + return result; +} + +struct libdivide_s32_t libdivide_s32_gen(int32_t d) { + return libdivide_internal_s32_gen(d, 0); +} + +struct libdivide_s32_branchfree_t libdivide_s32_branchfree_gen(int32_t d) { + struct libdivide_s32_t tmp = libdivide_internal_s32_gen(d, 1); + struct libdivide_s32_branchfree_t result = {tmp.magic, tmp.more}; + return result; +} + +int32_t libdivide_s32_do(int32_t numer, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + + if (!denom->magic) { + uint32_t sign = (int8_t)more >> 7; + uint32_t mask = (1U << shift) - 1; + uint32_t uq = numer + ((numer >> 31) & mask); + int32_t q = (int32_t)uq; + q >>= shift; + q = (q ^ sign) - sign; + return q; + } else { + uint32_t uq = (uint32_t)libdivide_mullhi_s32(denom->magic, numer); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift and then sign extend + int32_t sign = (int8_t)more >> 7; + // q += (more < 0 ? -numer : numer) + // cast required to avoid UB + uq += ((uint32_t)numer ^ sign) - sign; + } + int32_t q = (int32_t)uq; + q >>= shift; + q += (q < 0); + return q; + } +} + +int32_t libdivide_s32_branchfree_do(int32_t numer, const struct libdivide_s32_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift and then sign extend + int32_t sign = (int8_t)more >> 7; + int32_t magic = denom->magic; + int32_t q = libdivide_mullhi_s32(magic, numer); + q += numer; + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is a power of + // 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + uint32_t q_sign = (uint32_t)(q >> 31); + q += q_sign & ((1U << shift) - is_power_of_2); + + // Now arithmetic right shift + q >>= shift; + // Negate if needed + q = (q ^ sign) - sign; + + return q; +} + +int32_t libdivide_s32_recover(const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + if (!denom->magic) { + uint32_t absD = 1U << shift; + if (more & LIBDIVIDE_NEGATIVE_DIVISOR) { + absD = -absD; + } + return (int32_t)absD; + } else { + // Unsigned math is much easier + // We negate the magic number only in the branchfull case, and we don't + // know which case we're in. However we have enough information to + // determine the correct sign of the magic number. The divisor was + // negative if LIBDIVIDE_NEGATIVE_DIVISOR is set. If ADD_MARKER is set, + // the magic number's sign is opposite that of the divisor. + // We want to compute the positive magic number. + int negative_divisor = (more & LIBDIVIDE_NEGATIVE_DIVISOR); + int magic_was_negated = (more & LIBDIVIDE_ADD_MARKER) + ? denom->magic > 0 : denom->magic < 0; + + // Handle the power of 2 case (including branchfree) + if (denom->magic == 0) { + int32_t result = 1U << shift; + return negative_divisor ? -result : result; + } + + uint32_t d = (uint32_t)(magic_was_negated ? -denom->magic : denom->magic); + uint64_t n = 1ULL << (32 + shift); // this shift cannot exceed 30 + uint32_t q = (uint32_t)(n / d); + int32_t result = (int32_t)q; + result += 1; + return negative_divisor ? -result : result; + } +} + +int32_t libdivide_s32_branchfree_recover(const struct libdivide_s32_branchfree_t *denom) { + return libdivide_s32_recover((const struct libdivide_s32_t *)denom); +} + +///////////// SINT64 + +static inline struct libdivide_s64_t libdivide_internal_s64_gen(int64_t d, int branchfree) { + if (d == 0) { + LIBDIVIDE_ERROR("divider must be != 0"); + } + + struct libdivide_s64_t result; + + // If d is a power of 2, or negative a power of 2, we have to use a shift. + // This is especially important because the magic algorithm fails for -1. + // To check if d is a power of 2 or its inverse, it suffices to check + // whether its absolute value has exactly one bit set. This works even for + // INT_MIN, because abs(INT_MIN) == INT_MIN, and INT_MIN has one bit set + // and is a power of 2. + uint64_t ud = (uint64_t)d; + uint64_t absD = (d < 0) ? -ud : ud; + uint32_t floor_log_2_d = 63 - libdivide_count_leading_zeros64(absD); + // check if exactly one bit is set, + // don't care if absD is 0 since that's divide by zero + if ((absD & (absD - 1)) == 0) { + // Branchfree and non-branchfree cases are the same + result.magic = 0; + result.more = floor_log_2_d | (d < 0 ? LIBDIVIDE_NEGATIVE_DIVISOR : 0); + } else { + // the dividend here is 2**(floor_log_2_d + 63), so the low 64 bit word + // is 0 and the high word is floor_log_2_d - 1 + uint8_t more; + uint64_t rem, proposed_m; + proposed_m = libdivide_128_div_64_to_64(1ULL << (floor_log_2_d - 1), 0, absD, &rem); + const uint64_t e = absD - rem; + + // We are going to start with a power of floor_log_2_d - 1. + // This works if works if e < 2**floor_log_2_d. + if (!branchfree && e < (1ULL << floor_log_2_d)) { + // This power works + more = floor_log_2_d - 1; + } else { + // We need to go one higher. This should not make proposed_m + // overflow, but it will make it negative when interpreted as an + // int32_t. + proposed_m += proposed_m; + const uint64_t twice_rem = rem + rem; + if (twice_rem >= absD || twice_rem < rem) proposed_m += 1; + // note that we only set the LIBDIVIDE_NEGATIVE_DIVISOR bit if we + // also set ADD_MARKER this is an annoying optimization that + // enables algorithm #4 to avoid the mask. However we always set it + // in the branchfree case + more = floor_log_2_d | LIBDIVIDE_ADD_MARKER; + } + proposed_m += 1; + int64_t magic = (int64_t)proposed_m; + + // Mark if we are negative + if (d < 0) { + more |= LIBDIVIDE_NEGATIVE_DIVISOR; + if (!branchfree) { + magic = -magic; + } + } + + result.more = more; + result.magic = magic; + } + return result; +} + +struct libdivide_s64_t libdivide_s64_gen(int64_t d) { + return libdivide_internal_s64_gen(d, 0); +} + +struct libdivide_s64_branchfree_t libdivide_s64_branchfree_gen(int64_t d) { + struct libdivide_s64_t tmp = libdivide_internal_s64_gen(d, 1); + struct libdivide_s64_branchfree_t ret = {tmp.magic, tmp.more}; + return ret; +} + +int64_t libdivide_s64_do(int64_t numer, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + + if (!denom->magic) { // shift path + uint64_t mask = (1ULL << shift) - 1; + uint64_t uq = numer + ((numer >> 63) & mask); + int64_t q = (int64_t)uq; + q >>= shift; + // must be arithmetic shift and then sign-extend + int64_t sign = (int8_t)more >> 7; + q = (q ^ sign) - sign; + return q; + } else { + uint64_t uq = (uint64_t)libdivide_mullhi_s64(denom->magic, numer); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift and then sign extend + int64_t sign = (int8_t)more >> 7; + // q += (more < 0 ? -numer : numer) + // cast required to avoid UB + uq += ((uint64_t)numer ^ sign) - sign; + } + int64_t q = (int64_t)uq; + q >>= shift; + q += (q < 0); + return q; + } +} + +int64_t libdivide_s64_branchfree_do(int64_t numer, const struct libdivide_s64_branchfree_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift and then sign extend + int64_t sign = (int8_t)more >> 7; + int64_t magic = denom->magic; + int64_t q = libdivide_mullhi_s64(magic, numer); + q += numer; + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is a power of + // 2, or (2**shift) if it is not a power of 2. + uint64_t is_power_of_2 = (magic == 0); + uint64_t q_sign = (uint64_t)(q >> 63); + q += q_sign & ((1ULL << shift) - is_power_of_2); + + // Arithmetic right shift + q >>= shift; + // Negate if needed + q = (q ^ sign) - sign; + + return q; +} + +int64_t libdivide_s64_recover(const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + if (denom->magic == 0) { // shift path + uint64_t absD = 1ULL << shift; + if (more & LIBDIVIDE_NEGATIVE_DIVISOR) { + absD = -absD; + } + return (int64_t)absD; + } else { + // Unsigned math is much easier + int negative_divisor = (more & LIBDIVIDE_NEGATIVE_DIVISOR); + int magic_was_negated = (more & LIBDIVIDE_ADD_MARKER) + ? denom->magic > 0 : denom->magic < 0; + + uint64_t d = (uint64_t)(magic_was_negated ? -denom->magic : denom->magic); + uint64_t n_hi = 1ULL << shift, n_lo = 0; + uint64_t rem_ignored; + uint64_t q = libdivide_128_div_64_to_64(n_hi, n_lo, d, &rem_ignored); + int64_t result = (int64_t)(q + 1); + if (negative_divisor) { + result = -result; + } + return result; + } +} + +int64_t libdivide_s64_branchfree_recover(const struct libdivide_s64_branchfree_t *denom) { + return libdivide_s64_recover((const struct libdivide_s64_t *)denom); +} + +#if defined(LIBDIVIDE_AVX512) + +static inline __m512i libdivide_u32_do_vector(__m512i numers, const struct libdivide_u32_t *denom); +static inline __m512i libdivide_s32_do_vector(__m512i numers, const struct libdivide_s32_t *denom); +static inline __m512i libdivide_u64_do_vector(__m512i numers, const struct libdivide_u64_t *denom); +static inline __m512i libdivide_s64_do_vector(__m512i numers, const struct libdivide_s64_t *denom); + +static inline __m512i libdivide_u32_branchfree_do_vector(__m512i numers, const struct libdivide_u32_branchfree_t *denom); +static inline __m512i libdivide_s32_branchfree_do_vector(__m512i numers, const struct libdivide_s32_branchfree_t *denom); +static inline __m512i libdivide_u64_branchfree_do_vector(__m512i numers, const struct libdivide_u64_branchfree_t *denom); +static inline __m512i libdivide_s64_branchfree_do_vector(__m512i numers, const struct libdivide_s64_branchfree_t *denom); + +//////// Internal Utility Functions + +static inline __m512i libdivide_s64_signbits(__m512i v) {; + return _mm512_srai_epi64(v, 63); +} + +static inline __m512i libdivide_s64_shift_right_vector(__m512i v, int amt) { + return _mm512_srai_epi64(v, amt); +} + +// Here, b is assumed to contain one 32-bit value repeated. +static inline __m512i libdivide_mullhi_u32_vector(__m512i a, __m512i b) { + __m512i hi_product_0Z2Z = _mm512_srli_epi64(_mm512_mul_epu32(a, b), 32); + __m512i a1X3X = _mm512_srli_epi64(a, 32); + __m512i mask = _mm512_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0); + __m512i hi_product_Z1Z3 = _mm512_and_si512(_mm512_mul_epu32(a1X3X, b), mask); + return _mm512_or_si512(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// b is one 32-bit value repeated. +static inline __m512i libdivide_mullhi_s32_vector(__m512i a, __m512i b) { + __m512i hi_product_0Z2Z = _mm512_srli_epi64(_mm512_mul_epi32(a, b), 32); + __m512i a1X3X = _mm512_srli_epi64(a, 32); + __m512i mask = _mm512_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0); + __m512i hi_product_Z1Z3 = _mm512_and_si512(_mm512_mul_epi32(a1X3X, b), mask); + return _mm512_or_si512(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// Here, y is assumed to contain one 64-bit value repeated. +// https://stackoverflow.com/a/28827013 +static inline __m512i libdivide_mullhi_u64_vector(__m512i x, __m512i y) { + __m512i lomask = _mm512_set1_epi64(0xffffffff); + __m512i xh = _mm512_shuffle_epi32(x, (_MM_PERM_ENUM) 0xB1); + __m512i yh = _mm512_shuffle_epi32(y, (_MM_PERM_ENUM) 0xB1); + __m512i w0 = _mm512_mul_epu32(x, y); + __m512i w1 = _mm512_mul_epu32(x, yh); + __m512i w2 = _mm512_mul_epu32(xh, y); + __m512i w3 = _mm512_mul_epu32(xh, yh); + __m512i w0h = _mm512_srli_epi64(w0, 32); + __m512i s1 = _mm512_add_epi64(w1, w0h); + __m512i s1l = _mm512_and_si512(s1, lomask); + __m512i s1h = _mm512_srli_epi64(s1, 32); + __m512i s2 = _mm512_add_epi64(w2, s1l); + __m512i s2h = _mm512_srli_epi64(s2, 32); + __m512i hi = _mm512_add_epi64(w3, s1h); + hi = _mm512_add_epi64(hi, s2h); + + return hi; +} + +// y is one 64-bit value repeated. +static inline __m512i libdivide_mullhi_s64_vector(__m512i x, __m512i y) { + __m512i p = libdivide_mullhi_u64_vector(x, y); + __m512i t1 = _mm512_and_si512(libdivide_s64_signbits(x), y); + __m512i t2 = _mm512_and_si512(libdivide_s64_signbits(y), x); + p = _mm512_sub_epi64(p, t1); + p = _mm512_sub_epi64(p, t2); + return p; +} + +////////// UINT32 + +__m512i libdivide_u32_do_vector(__m512i numers, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm512_srli_epi32(numers, more); + } + else { + __m512i q = libdivide_mullhi_u32_vector(numers, _mm512_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + __m512i t = _mm512_add_epi32(_mm512_srli_epi32(_mm512_sub_epi32(numers, q), 1), q); + return _mm512_srli_epi32(t, shift); + } + else { + return _mm512_srli_epi32(q, more); + } + } +} + +__m512i libdivide_u32_branchfree_do_vector(__m512i numers, const struct libdivide_u32_branchfree_t *denom) { + __m512i q = libdivide_mullhi_u32_vector(numers, _mm512_set1_epi32(denom->magic)); + __m512i t = _mm512_add_epi32(_mm512_srli_epi32(_mm512_sub_epi32(numers, q), 1), q); + return _mm512_srli_epi32(t, denom->more); +} + +////////// UINT64 + +__m512i libdivide_u64_do_vector(__m512i numers, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm512_srli_epi64(numers, more); + } + else { + __m512i q = libdivide_mullhi_u64_vector(numers, _mm512_set1_epi64(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + __m512i t = _mm512_add_epi64(_mm512_srli_epi64(_mm512_sub_epi64(numers, q), 1), q); + return _mm512_srli_epi64(t, shift); + } + else { + return _mm512_srli_epi64(q, more); + } + } +} + +__m512i libdivide_u64_branchfree_do_vector(__m512i numers, const struct libdivide_u64_branchfree_t *denom) { + __m512i q = libdivide_mullhi_u64_vector(numers, _mm512_set1_epi64(denom->magic)); + __m512i t = _mm512_add_epi64(_mm512_srli_epi64(_mm512_sub_epi64(numers, q), 1), q); + return _mm512_srli_epi64(t, denom->more); +} + +////////// SINT32 + +__m512i libdivide_s32_do_vector(__m512i numers, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + uint32_t mask = (1U << shift) - 1; + __m512i roundToZeroTweak = _mm512_set1_epi32(mask); + // q = numer + ((numer >> 31) & roundToZeroTweak); + __m512i q = _mm512_add_epi32(numers, _mm512_and_si512(_mm512_srai_epi32(numers, 31), roundToZeroTweak)); + q = _mm512_srai_epi32(q, shift); + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm512_sub_epi32(_mm512_xor_si512(q, sign), sign); + return q; + } + else { + __m512i q = libdivide_mullhi_s32_vector(numers, _mm512_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm512_add_epi32(q, _mm512_sub_epi32(_mm512_xor_si512(numers, sign), sign)); + } + // q >>= shift + q = _mm512_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK); + q = _mm512_add_epi32(q, _mm512_srli_epi32(q, 31)); // q += (q < 0) + return q; + } +} + +__m512i libdivide_s32_branchfree_do_vector(__m512i numers, const struct libdivide_s32_branchfree_t *denom) { + int32_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + __m512i q = libdivide_mullhi_s32_vector(numers, _mm512_set1_epi32(magic)); + q = _mm512_add_epi32(q, numers); // q += numers + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + __m512i q_sign = _mm512_srai_epi32(q, 31); // q_sign = q >> 31 + __m512i mask = _mm512_set1_epi32((1U << shift) - is_power_of_2); + q = _mm512_add_epi32(q, _mm512_and_si512(q_sign, mask)); // q = q + (q_sign & mask) + q = _mm512_srai_epi32(q, shift); // q >>= shift + q = _mm512_sub_epi32(_mm512_xor_si512(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +////////// SINT64 + +__m512i libdivide_s64_do_vector(__m512i numers, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + int64_t magic = denom->magic; + if (magic == 0) { // shift path + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + uint64_t mask = (1ULL << shift) - 1; + __m512i roundToZeroTweak = _mm512_set1_epi64(mask); + // q = numer + ((numer >> 63) & roundToZeroTweak); + __m512i q = _mm512_add_epi64(numers, _mm512_and_si512(libdivide_s64_signbits(numers), roundToZeroTweak)); + q = libdivide_s64_shift_right_vector(q, shift); + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm512_sub_epi64(_mm512_xor_si512(q, sign), sign); + return q; + } + else { + __m512i q = libdivide_mullhi_s64_vector(numers, _mm512_set1_epi64(magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm512_add_epi64(q, _mm512_sub_epi64(_mm512_xor_si512(numers, sign), sign)); + } + // q >>= denom->mult_path.shift + q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); + q = _mm512_add_epi64(q, _mm512_srli_epi64(q, 63)); // q += (q < 0) + return q; + } +} + +__m512i libdivide_s64_branchfree_do_vector(__m512i numers, const struct libdivide_s64_branchfree_t *denom) { + int64_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift + __m512i sign = _mm512_set1_epi32((int8_t)more >> 7); + + // libdivide_mullhi_s64(numers, magic); + __m512i q = libdivide_mullhi_s64_vector(numers, _mm512_set1_epi64(magic)); + q = _mm512_add_epi64(q, numers); // q += numers + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2. + uint32_t is_power_of_2 = (magic == 0); + __m512i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63 + __m512i mask = _mm512_set1_epi64((1ULL << shift) - is_power_of_2); + q = _mm512_add_epi64(q, _mm512_and_si512(q_sign, mask)); // q = q + (q_sign & mask) + q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift + q = _mm512_sub_epi64(_mm512_xor_si512(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +#elif defined(LIBDIVIDE_AVX2) + +static inline __m256i libdivide_u32_do_vector(__m256i numers, const struct libdivide_u32_t *denom); +static inline __m256i libdivide_s32_do_vector(__m256i numers, const struct libdivide_s32_t *denom); +static inline __m256i libdivide_u64_do_vector(__m256i numers, const struct libdivide_u64_t *denom); +static inline __m256i libdivide_s64_do_vector(__m256i numers, const struct libdivide_s64_t *denom); + +static inline __m256i libdivide_u32_branchfree_do_vector(__m256i numers, const struct libdivide_u32_branchfree_t *denom); +static inline __m256i libdivide_s32_branchfree_do_vector(__m256i numers, const struct libdivide_s32_branchfree_t *denom); +static inline __m256i libdivide_u64_branchfree_do_vector(__m256i numers, const struct libdivide_u64_branchfree_t *denom); +static inline __m256i libdivide_s64_branchfree_do_vector(__m256i numers, const struct libdivide_s64_branchfree_t *denom); + +//////// Internal Utility Functions + +// Implementation of _mm256_srai_epi64(v, 63) (from AVX512). +static inline __m256i libdivide_s64_signbits(__m256i v) { + __m256i hiBitsDuped = _mm256_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1)); + __m256i signBits = _mm256_srai_epi32(hiBitsDuped, 31); + return signBits; +} + +// Implementation of _mm256_srai_epi64 (from AVX512). +static inline __m256i libdivide_s64_shift_right_vector(__m256i v, int amt) { + const int b = 64 - amt; + __m256i m = _mm256_set1_epi64x(1ULL << (b - 1)); + __m256i x = _mm256_srli_epi64(v, amt); + __m256i result = _mm256_sub_epi64(_mm256_xor_si256(x, m), m); + return result; +} + +// Here, b is assumed to contain one 32-bit value repeated. +static inline __m256i libdivide_mullhi_u32_vector(__m256i a, __m256i b) { + __m256i hi_product_0Z2Z = _mm256_srli_epi64(_mm256_mul_epu32(a, b), 32); + __m256i a1X3X = _mm256_srli_epi64(a, 32); + __m256i mask = _mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0); + __m256i hi_product_Z1Z3 = _mm256_and_si256(_mm256_mul_epu32(a1X3X, b), mask); + return _mm256_or_si256(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// b is one 32-bit value repeated. +static inline __m256i libdivide_mullhi_s32_vector(__m256i a, __m256i b) { + __m256i hi_product_0Z2Z = _mm256_srli_epi64(_mm256_mul_epi32(a, b), 32); + __m256i a1X3X = _mm256_srli_epi64(a, 32); + __m256i mask = _mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0); + __m256i hi_product_Z1Z3 = _mm256_and_si256(_mm256_mul_epi32(a1X3X, b), mask); + return _mm256_or_si256(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// Here, y is assumed to contain one 64-bit value repeated. +// https://stackoverflow.com/a/28827013 +static inline __m256i libdivide_mullhi_u64_vector(__m256i x, __m256i y) { + __m256i lomask = _mm256_set1_epi64x(0xffffffff); + __m256i xh = _mm256_shuffle_epi32(x, 0xB1); // x0l, x0h, x1l, x1h + __m256i yh = _mm256_shuffle_epi32(y, 0xB1); // y0l, y0h, y1l, y1h + __m256i w0 = _mm256_mul_epu32(x, y); // x0l*y0l, x1l*y1l + __m256i w1 = _mm256_mul_epu32(x, yh); // x0l*y0h, x1l*y1h + __m256i w2 = _mm256_mul_epu32(xh, y); // x0h*y0l, x1h*y0l + __m256i w3 = _mm256_mul_epu32(xh, yh); // x0h*y0h, x1h*y1h + __m256i w0h = _mm256_srli_epi64(w0, 32); + __m256i s1 = _mm256_add_epi64(w1, w0h); + __m256i s1l = _mm256_and_si256(s1, lomask); + __m256i s1h = _mm256_srli_epi64(s1, 32); + __m256i s2 = _mm256_add_epi64(w2, s1l); + __m256i s2h = _mm256_srli_epi64(s2, 32); + __m256i hi = _mm256_add_epi64(w3, s1h); + hi = _mm256_add_epi64(hi, s2h); + + return hi; +} + +// y is one 64-bit value repeated. +static inline __m256i libdivide_mullhi_s64_vector(__m256i x, __m256i y) { + __m256i p = libdivide_mullhi_u64_vector(x, y); + __m256i t1 = _mm256_and_si256(libdivide_s64_signbits(x), y); + __m256i t2 = _mm256_and_si256(libdivide_s64_signbits(y), x); + p = _mm256_sub_epi64(p, t1); + p = _mm256_sub_epi64(p, t2); + return p; +} + +////////// UINT32 + +__m256i libdivide_u32_do_vector(__m256i numers, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm256_srli_epi32(numers, more); + } + else { + __m256i q = libdivide_mullhi_u32_vector(numers, _mm256_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + __m256i t = _mm256_add_epi32(_mm256_srli_epi32(_mm256_sub_epi32(numers, q), 1), q); + return _mm256_srli_epi32(t, shift); + } + else { + return _mm256_srli_epi32(q, more); + } + } +} + +__m256i libdivide_u32_branchfree_do_vector(__m256i numers, const struct libdivide_u32_branchfree_t *denom) { + __m256i q = libdivide_mullhi_u32_vector(numers, _mm256_set1_epi32(denom->magic)); + __m256i t = _mm256_add_epi32(_mm256_srli_epi32(_mm256_sub_epi32(numers, q), 1), q); + return _mm256_srli_epi32(t, denom->more); +} + +////////// UINT64 + +__m256i libdivide_u64_do_vector(__m256i numers, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm256_srli_epi64(numers, more); + } + else { + __m256i q = libdivide_mullhi_u64_vector(numers, _mm256_set1_epi64x(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + __m256i t = _mm256_add_epi64(_mm256_srli_epi64(_mm256_sub_epi64(numers, q), 1), q); + return _mm256_srli_epi64(t, shift); + } + else { + return _mm256_srli_epi64(q, more); + } + } +} + +__m256i libdivide_u64_branchfree_do_vector(__m256i numers, const struct libdivide_u64_branchfree_t *denom) { + __m256i q = libdivide_mullhi_u64_vector(numers, _mm256_set1_epi64x(denom->magic)); + __m256i t = _mm256_add_epi64(_mm256_srli_epi64(_mm256_sub_epi64(numers, q), 1), q); + return _mm256_srli_epi64(t, denom->more); +} + +////////// SINT32 + +__m256i libdivide_s32_do_vector(__m256i numers, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + uint32_t mask = (1U << shift) - 1; + __m256i roundToZeroTweak = _mm256_set1_epi32(mask); + // q = numer + ((numer >> 31) & roundToZeroTweak); + __m256i q = _mm256_add_epi32(numers, _mm256_and_si256(_mm256_srai_epi32(numers, 31), roundToZeroTweak)); + q = _mm256_srai_epi32(q, shift); + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm256_sub_epi32(_mm256_xor_si256(q, sign), sign); + return q; + } + else { + __m256i q = libdivide_mullhi_s32_vector(numers, _mm256_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm256_add_epi32(q, _mm256_sub_epi32(_mm256_xor_si256(numers, sign), sign)); + } + // q >>= shift + q = _mm256_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK); + q = _mm256_add_epi32(q, _mm256_srli_epi32(q, 31)); // q += (q < 0) + return q; + } +} + +__m256i libdivide_s32_branchfree_do_vector(__m256i numers, const struct libdivide_s32_branchfree_t *denom) { + int32_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + __m256i q = libdivide_mullhi_s32_vector(numers, _mm256_set1_epi32(magic)); + q = _mm256_add_epi32(q, numers); // q += numers + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + __m256i q_sign = _mm256_srai_epi32(q, 31); // q_sign = q >> 31 + __m256i mask = _mm256_set1_epi32((1U << shift) - is_power_of_2); + q = _mm256_add_epi32(q, _mm256_and_si256(q_sign, mask)); // q = q + (q_sign & mask) + q = _mm256_srai_epi32(q, shift); // q >>= shift + q = _mm256_sub_epi32(_mm256_xor_si256(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +////////// SINT64 + +__m256i libdivide_s64_do_vector(__m256i numers, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + int64_t magic = denom->magic; + if (magic == 0) { // shift path + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + uint64_t mask = (1ULL << shift) - 1; + __m256i roundToZeroTweak = _mm256_set1_epi64x(mask); + // q = numer + ((numer >> 63) & roundToZeroTweak); + __m256i q = _mm256_add_epi64(numers, _mm256_and_si256(libdivide_s64_signbits(numers), roundToZeroTweak)); + q = libdivide_s64_shift_right_vector(q, shift); + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm256_sub_epi64(_mm256_xor_si256(q, sign), sign); + return q; + } + else { + __m256i q = libdivide_mullhi_s64_vector(numers, _mm256_set1_epi64x(magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm256_add_epi64(q, _mm256_sub_epi64(_mm256_xor_si256(numers, sign), sign)); + } + // q >>= denom->mult_path.shift + q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); + q = _mm256_add_epi64(q, _mm256_srli_epi64(q, 63)); // q += (q < 0) + return q; + } +} + +__m256i libdivide_s64_branchfree_do_vector(__m256i numers, const struct libdivide_s64_branchfree_t *denom) { + int64_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift + __m256i sign = _mm256_set1_epi32((int8_t)more >> 7); + + // libdivide_mullhi_s64(numers, magic); + __m256i q = libdivide_mullhi_s64_vector(numers, _mm256_set1_epi64x(magic)); + q = _mm256_add_epi64(q, numers); // q += numers + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2. + uint32_t is_power_of_2 = (magic == 0); + __m256i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63 + __m256i mask = _mm256_set1_epi64x((1ULL << shift) - is_power_of_2); + q = _mm256_add_epi64(q, _mm256_and_si256(q_sign, mask)); // q = q + (q_sign & mask) + q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift + q = _mm256_sub_epi64(_mm256_xor_si256(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +#elif defined(LIBDIVIDE_SSE2) + +static inline __m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom); +static inline __m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t *denom); +static inline __m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t *denom); +static inline __m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t *denom); + +static inline __m128i libdivide_u32_branchfree_do_vector(__m128i numers, const struct libdivide_u32_branchfree_t *denom); +static inline __m128i libdivide_s32_branchfree_do_vector(__m128i numers, const struct libdivide_s32_branchfree_t *denom); +static inline __m128i libdivide_u64_branchfree_do_vector(__m128i numers, const struct libdivide_u64_branchfree_t *denom); +static inline __m128i libdivide_s64_branchfree_do_vector(__m128i numers, const struct libdivide_s64_branchfree_t *denom); + +//////// Internal Utility Functions + +// Implementation of _mm_srai_epi64(v, 63) (from AVX512). +static inline __m128i libdivide_s64_signbits(__m128i v) { + __m128i hiBitsDuped = _mm_shuffle_epi32(v, _MM_SHUFFLE(3, 3, 1, 1)); + __m128i signBits = _mm_srai_epi32(hiBitsDuped, 31); + return signBits; +} + +// Implementation of _mm_srai_epi64 (from AVX512). +static inline __m128i libdivide_s64_shift_right_vector(__m128i v, int amt) { + const int b = 64 - amt; + __m128i m = _mm_set1_epi64x(1ULL << (b - 1)); + __m128i x = _mm_srli_epi64(v, amt); + __m128i result = _mm_sub_epi64(_mm_xor_si128(x, m), m); + return result; +} + +// Here, b is assumed to contain one 32-bit value repeated. +static inline __m128i libdivide_mullhi_u32_vector(__m128i a, __m128i b) { + __m128i hi_product_0Z2Z = _mm_srli_epi64(_mm_mul_epu32(a, b), 32); + __m128i a1X3X = _mm_srli_epi64(a, 32); + __m128i mask = _mm_set_epi32(-1, 0, -1, 0); + __m128i hi_product_Z1Z3 = _mm_and_si128(_mm_mul_epu32(a1X3X, b), mask); + return _mm_or_si128(hi_product_0Z2Z, hi_product_Z1Z3); +} + +// SSE2 does not have a signed multiplication instruction, but we can convert +// unsigned to signed pretty efficiently. Again, b is just a 32 bit value +// repeated four times. +static inline __m128i libdivide_mullhi_s32_vector(__m128i a, __m128i b) { + __m128i p = libdivide_mullhi_u32_vector(a, b); + // t1 = (a >> 31) & y, arithmetic shift + __m128i t1 = _mm_and_si128(_mm_srai_epi32(a, 31), b); + __m128i t2 = _mm_and_si128(_mm_srai_epi32(b, 31), a); + p = _mm_sub_epi32(p, t1); + p = _mm_sub_epi32(p, t2); + return p; +} + +// Here, y is assumed to contain one 64-bit value repeated. +// https://stackoverflow.com/a/28827013 +static inline __m128i libdivide_mullhi_u64_vector(__m128i x, __m128i y) { + __m128i lomask = _mm_set1_epi64x(0xffffffff); + __m128i xh = _mm_shuffle_epi32(x, 0xB1); // x0l, x0h, x1l, x1h + __m128i yh = _mm_shuffle_epi32(y, 0xB1); // y0l, y0h, y1l, y1h + __m128i w0 = _mm_mul_epu32(x, y); // x0l*y0l, x1l*y1l + __m128i w1 = _mm_mul_epu32(x, yh); // x0l*y0h, x1l*y1h + __m128i w2 = _mm_mul_epu32(xh, y); // x0h*y0l, x1h*y0l + __m128i w3 = _mm_mul_epu32(xh, yh); // x0h*y0h, x1h*y1h + __m128i w0h = _mm_srli_epi64(w0, 32); + __m128i s1 = _mm_add_epi64(w1, w0h); + __m128i s1l = _mm_and_si128(s1, lomask); + __m128i s1h = _mm_srli_epi64(s1, 32); + __m128i s2 = _mm_add_epi64(w2, s1l); + __m128i s2h = _mm_srli_epi64(s2, 32); + __m128i hi = _mm_add_epi64(w3, s1h); + hi = _mm_add_epi64(hi, s2h); + + return hi; +} + +// y is one 64-bit value repeated. +static inline __m128i libdivide_mullhi_s64_vector(__m128i x, __m128i y) { + __m128i p = libdivide_mullhi_u64_vector(x, y); + __m128i t1 = _mm_and_si128(libdivide_s64_signbits(x), y); + __m128i t2 = _mm_and_si128(libdivide_s64_signbits(y), x); + p = _mm_sub_epi64(p, t1); + p = _mm_sub_epi64(p, t2); + return p; +} + +////////// UINT32 + +__m128i libdivide_u32_do_vector(__m128i numers, const struct libdivide_u32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm_srli_epi32(numers, more); + } + else { + __m128i q = libdivide_mullhi_u32_vector(numers, _mm_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q); + return _mm_srli_epi32(t, shift); + } + else { + return _mm_srli_epi32(q, more); + } + } +} + +__m128i libdivide_u32_branchfree_do_vector(__m128i numers, const struct libdivide_u32_branchfree_t *denom) { + __m128i q = libdivide_mullhi_u32_vector(numers, _mm_set1_epi32(denom->magic)); + __m128i t = _mm_add_epi32(_mm_srli_epi32(_mm_sub_epi32(numers, q), 1), q); + return _mm_srli_epi32(t, denom->more); +} + +////////// UINT64 + +__m128i libdivide_u64_do_vector(__m128i numers, const struct libdivide_u64_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + return _mm_srli_epi64(numers, more); + } + else { + __m128i q = libdivide_mullhi_u64_vector(numers, _mm_set1_epi64x(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // uint32_t t = ((numer - q) >> 1) + q; + // return t >> denom->shift; + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q); + return _mm_srli_epi64(t, shift); + } + else { + return _mm_srli_epi64(q, more); + } + } +} + +__m128i libdivide_u64_branchfree_do_vector(__m128i numers, const struct libdivide_u64_branchfree_t *denom) { + __m128i q = libdivide_mullhi_u64_vector(numers, _mm_set1_epi64x(denom->magic)); + __m128i t = _mm_add_epi64(_mm_srli_epi64(_mm_sub_epi64(numers, q), 1), q); + return _mm_srli_epi64(t, denom->more); +} + +////////// SINT32 + +__m128i libdivide_s32_do_vector(__m128i numers, const struct libdivide_s32_t *denom) { + uint8_t more = denom->more; + if (!denom->magic) { + uint32_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + uint32_t mask = (1U << shift) - 1; + __m128i roundToZeroTweak = _mm_set1_epi32(mask); + // q = numer + ((numer >> 31) & roundToZeroTweak); + __m128i q = _mm_add_epi32(numers, _mm_and_si128(_mm_srai_epi32(numers, 31), roundToZeroTweak)); + q = _mm_srai_epi32(q, shift); + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm_sub_epi32(_mm_xor_si128(q, sign), sign); + return q; + } + else { + __m128i q = libdivide_mullhi_s32_vector(numers, _mm_set1_epi32(denom->magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm_add_epi32(q, _mm_sub_epi32(_mm_xor_si128(numers, sign), sign)); + } + // q >>= shift + q = _mm_srai_epi32(q, more & LIBDIVIDE_32_SHIFT_MASK); + q = _mm_add_epi32(q, _mm_srli_epi32(q, 31)); // q += (q < 0) + return q; + } +} + +__m128i libdivide_s32_branchfree_do_vector(__m128i numers, const struct libdivide_s32_branchfree_t *denom) { + int32_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_32_SHIFT_MASK; + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + __m128i q = libdivide_mullhi_s32_vector(numers, _mm_set1_epi32(magic)); + q = _mm_add_epi32(q, numers); // q += numers + + // If q is non-negative, we have nothing to do + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2 + uint32_t is_power_of_2 = (magic == 0); + __m128i q_sign = _mm_srai_epi32(q, 31); // q_sign = q >> 31 + __m128i mask = _mm_set1_epi32((1U << shift) - is_power_of_2); + q = _mm_add_epi32(q, _mm_and_si128(q_sign, mask)); // q = q + (q_sign & mask) + q = _mm_srai_epi32(q, shift); // q >>= shift + q = _mm_sub_epi32(_mm_xor_si128(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +////////// SINT64 + +__m128i libdivide_s64_do_vector(__m128i numers, const struct libdivide_s64_t *denom) { + uint8_t more = denom->more; + int64_t magic = denom->magic; + if (magic == 0) { // shift path + uint32_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + uint64_t mask = (1ULL << shift) - 1; + __m128i roundToZeroTweak = _mm_set1_epi64x(mask); + // q = numer + ((numer >> 63) & roundToZeroTweak); + __m128i q = _mm_add_epi64(numers, _mm_and_si128(libdivide_s64_signbits(numers), roundToZeroTweak)); + q = libdivide_s64_shift_right_vector(q, shift); + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q = (q ^ sign) - sign; + q = _mm_sub_epi64(_mm_xor_si128(q, sign), sign); + return q; + } + else { + __m128i q = libdivide_mullhi_s64_vector(numers, _mm_set1_epi64x(magic)); + if (more & LIBDIVIDE_ADD_MARKER) { + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + // q += ((numer ^ sign) - sign); + q = _mm_add_epi64(q, _mm_sub_epi64(_mm_xor_si128(numers, sign), sign)); + } + // q >>= denom->mult_path.shift + q = libdivide_s64_shift_right_vector(q, more & LIBDIVIDE_64_SHIFT_MASK); + q = _mm_add_epi64(q, _mm_srli_epi64(q, 63)); // q += (q < 0) + return q; + } +} + +__m128i libdivide_s64_branchfree_do_vector(__m128i numers, const struct libdivide_s64_branchfree_t *denom) { + int64_t magic = denom->magic; + uint8_t more = denom->more; + uint8_t shift = more & LIBDIVIDE_64_SHIFT_MASK; + // must be arithmetic shift + __m128i sign = _mm_set1_epi32((int8_t)more >> 7); + + // libdivide_mullhi_s64(numers, magic); + __m128i q = libdivide_mullhi_s64_vector(numers, _mm_set1_epi64x(magic)); + q = _mm_add_epi64(q, numers); // q += numers + + // If q is non-negative, we have nothing to do. + // If q is negative, we want to add either (2**shift)-1 if d is + // a power of 2, or (2**shift) if it is not a power of 2. + uint32_t is_power_of_2 = (magic == 0); + __m128i q_sign = libdivide_s64_signbits(q); // q_sign = q >> 63 + __m128i mask = _mm_set1_epi64x((1ULL << shift) - is_power_of_2); + q = _mm_add_epi64(q, _mm_and_si128(q_sign, mask)); // q = q + (q_sign & mask) + q = libdivide_s64_shift_right_vector(q, shift); // q >>= shift + q = _mm_sub_epi64(_mm_xor_si128(q, sign), sign); // q = (q ^ sign) - sign + return q; +} + +#endif + +/////////// C++ stuff + +#ifdef __cplusplus + +// The C++ divider class is templated on both an integer type +// (like uint64_t) and an algorithm type. +// * BRANCHFULL is the default algorithm type. +// * BRANCHFREE is the branchfree algorithm type. +enum { + BRANCHFULL, + BRANCHFREE +}; + +#if defined(LIBDIVIDE_AVX512) + #define LIBDIVIDE_VECTOR_TYPE __m512i +#elif defined(LIBDIVIDE_AVX2) + #define LIBDIVIDE_VECTOR_TYPE __m256i +#elif defined(LIBDIVIDE_SSE2) + #define LIBDIVIDE_VECTOR_TYPE __m128i +#endif + +#if !defined(LIBDIVIDE_VECTOR_TYPE) + #define LIBDIVIDE_DIVIDE_VECTOR(ALGO) +#else + #define LIBDIVIDE_DIVIDE_VECTOR(ALGO) \ + LIBDIVIDE_VECTOR_TYPE divide(LIBDIVIDE_VECTOR_TYPE n) const { \ + return libdivide_##ALGO##_do_vector(n, &denom); \ + } +#endif + +// The DISPATCHER_GEN() macro generates C++ methods (for the given integer +// and algorithm types) that redirect to libdivide's C API. +#define DISPATCHER_GEN(T, ALGO) \ + libdivide_##ALGO##_t denom; \ + dispatcher() { } \ + dispatcher(T d) \ + : denom(libdivide_##ALGO##_gen(d)) \ + { } \ + T divide(T n) const { \ + return libdivide_##ALGO##_do(n, &denom); \ + } \ + LIBDIVIDE_DIVIDE_VECTOR(ALGO) \ + T recover() const { \ + return libdivide_##ALGO##_recover(&denom); \ + } + +// The dispatcher selects a specific division algorithm for a given +// type and ALGO using partial template specialization. +template struct dispatcher { }; + +template<> struct dispatcher { DISPATCHER_GEN(int32_t, s32) }; +template<> struct dispatcher { DISPATCHER_GEN(int32_t, s32_branchfree) }; +template<> struct dispatcher { DISPATCHER_GEN(uint32_t, u32) }; +template<> struct dispatcher { DISPATCHER_GEN(uint32_t, u32_branchfree) }; +template<> struct dispatcher { DISPATCHER_GEN(int64_t, s64) }; +template<> struct dispatcher { DISPATCHER_GEN(int64_t, s64_branchfree) }; +template<> struct dispatcher { DISPATCHER_GEN(uint64_t, u64) }; +template<> struct dispatcher { DISPATCHER_GEN(uint64_t, u64_branchfree) }; + +// This is the main divider class for use by the user (C++ API). +// The actual division algorithm is selected using the dispatcher struct +// based on the integer and algorithm template parameters. +template +class divider { +public: + // We leave the default constructor empty so that creating + // an array of dividers and then initializing them + // later doesn't slow us down. + divider() { } + + // Constructor that takes the divisor as a parameter + divider(T d) : div(d) { } + + // Divides n by the divisor + T divide(T n) const { + return div.divide(n); + } + + // Recovers the divisor, returns the value that was + // used to initialize this divider object. + T recover() const { + return div.recover(); + } + + bool operator==(const divider& other) const { + return div.denom.magic == other.denom.magic && + div.denom.more == other.denom.more; + } + + bool operator!=(const divider& other) const { + return !(*this == other); + } + +#if defined(LIBDIVIDE_VECTOR_TYPE) + // Treats the vector as packed integer values with the same type as + // the divider (e.g. s32, u32, s64, u64) and divides each of + // them by the divider, returning the packed quotients. + LIBDIVIDE_VECTOR_TYPE divide(LIBDIVIDE_VECTOR_TYPE n) const { + return div.divide(n); + } +#endif + +private: + // Storage for the actual divisor + dispatcher::value, + std::is_signed::value, sizeof(T), ALGO> div; +}; + +// Overload of operator / for scalar division +template +T operator/(T n, const divider& div) { + return div.divide(n); +} + +// Overload of operator /= for scalar division +template +T& operator/=(T& n, const divider& div) { + n = div.divide(n); + return n; +} + +#if defined(LIBDIVIDE_VECTOR_TYPE) + // Overload of operator / for vector division + template + LIBDIVIDE_VECTOR_TYPE operator/(LIBDIVIDE_VECTOR_TYPE n, const divider& div) { + return div.divide(n); + } + // Overload of operator /= for vector division + template + LIBDIVIDE_VECTOR_TYPE& operator/=(LIBDIVIDE_VECTOR_TYPE& n, const divider& div) { + n = div.divide(n); + return n; + } +#endif + +// libdivdie::branchfree_divider +template +using branchfree_divider = divider; + +} // namespace libdivide + +#endif // __cplusplus + +#endif // NUMPY_CORE_INCLUDE_NUMPY_LIBDIVIDE_LIBDIVIDE_H_ diff --git a/python/numpy/_core/include/numpy/ufuncobject.h b/python/numpy/_core/include/numpy/ufuncobject.h new file mode 100644 index 000000000..f5f82b57c --- /dev/null +++ b/python/numpy/_core/include/numpy/ufuncobject.h @@ -0,0 +1,343 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * The legacy generic inner loop for a standard element-wise or + * generalized ufunc. + */ +typedef void (*PyUFuncGenericFunction) + (char **args, + npy_intp const *dimensions, + npy_intp const *strides, + void *innerloopdata); + +/* + * The most generic one-dimensional inner loop for + * a masked standard element-wise ufunc. "Masked" here means that it skips + * doing calculations on any items for which the maskptr array has a true + * value. + */ +typedef void (PyUFunc_MaskedStridedInnerLoopFunc)( + char **dataptrs, npy_intp *strides, + char *maskptr, npy_intp mask_stride, + npy_intp count, + NpyAuxData *innerloopdata); + +/* Forward declaration for the type resolver and loop selector typedefs */ +struct _tagPyUFuncObject; + +/* + * Given the operands for calling a ufunc, should determine the + * calculation input and output data types and return an inner loop function. + * This function should validate that the casting rule is being followed, + * and fail if it is not. + * + * For backwards compatibility, the regular type resolution function does not + * support auxiliary data with object semantics. The type resolution call + * which returns a masked generic function returns a standard NpyAuxData + * object, for which the NPY_AUXDATA_FREE and NPY_AUXDATA_CLONE macros + * work. + * + * ufunc: The ufunc object. + * casting: The 'casting' parameter provided to the ufunc. + * operands: An array of length (ufunc->nin + ufunc->nout), + * with the output parameters possibly NULL. + * type_tup: Either NULL, or the type_tup passed to the ufunc. + * out_dtypes: An array which should be populated with new + * references to (ufunc->nin + ufunc->nout) new + * dtypes, one for each input and output. These + * dtypes should all be in native-endian format. + * + * Should return 0 on success, -1 on failure (with exception set), + * or -2 if Py_NotImplemented should be returned. + */ +typedef int (PyUFunc_TypeResolutionFunc)( + struct _tagPyUFuncObject *ufunc, + NPY_CASTING casting, + PyArrayObject **operands, + PyObject *type_tup, + PyArray_Descr **out_dtypes); + +/* + * This is the signature for the functions that may be assigned to the + * `process_core_dims_func` field of the PyUFuncObject structure. + * Implementation of this function is optional. This function is only used + * by generalized ufuncs (i.e. those with the field `core_enabled` set to 1). + * The function is called by the ufunc during the processing of the arguments + * of a call of the ufunc. The function can check the core dimensions of the + * input and output arrays and return -1 with an exception set if any + * requirements are not satisfied. If the caller of the ufunc didn't provide + * output arrays, the core dimensions associated with the output arrays (i.e. + * those that are not also used in input arrays) will have the value -1 in + * `core_dim_sizes`. This function can replace any output core dimensions + * that are -1 with a value that is appropriate for the ufunc. + * + * Parameter Description + * --------------- ------------------------------------------------------ + * ufunc The ufunc object + * core_dim_sizes An array with length `ufunc->core_num_dim_ix`. + * The core dimensions of the arrays passed to the ufunc + * will have been set. If the caller of the ufunc didn't + * provide the output array(s), the output-only core + * dimensions will have the value -1. + * + * The function must not change any element in `core_dim_sizes` that is + * not -1 on input. Doing so will result in incorrect output from the + * ufunc, and could result in a crash of the Python interpreter. + * + * The function must return 0 on success, -1 on failure (with an exception + * set). + */ +typedef int (PyUFunc_ProcessCoreDimsFunc)( + struct _tagPyUFuncObject *ufunc, + npy_intp *core_dim_sizes); + +typedef struct _tagPyUFuncObject { + PyObject_HEAD + /* + * nin: Number of inputs + * nout: Number of outputs + * nargs: Always nin + nout (Why is it stored?) + */ + int nin, nout, nargs; + + /* + * Identity for reduction, any of PyUFunc_One, PyUFunc_Zero + * PyUFunc_MinusOne, PyUFunc_None, PyUFunc_ReorderableNone, + * PyUFunc_IdentityValue. + */ + int identity; + + /* Array of one-dimensional core loops */ + PyUFuncGenericFunction *functions; + /* Array of funcdata that gets passed into the functions */ + void *const *data; + /* The number of elements in 'functions' and 'data' */ + int ntypes; + + /* Used to be unused field 'check_return' */ + int reserved1; + + /* The name of the ufunc */ + const char *name; + + /* Array of type numbers, of size ('nargs' * 'ntypes') */ + const char *types; + + /* Documentation string */ + const char *doc; + + void *ptr; + PyObject *obj; + PyObject *userloops; + + /* generalized ufunc parameters */ + + /* 0 for scalar ufunc; 1 for generalized ufunc */ + int core_enabled; + /* number of distinct dimension names in signature */ + int core_num_dim_ix; + + /* + * dimension indices of input/output argument k are stored in + * core_dim_ixs[core_offsets[k]..core_offsets[k]+core_num_dims[k]-1] + */ + + /* numbers of core dimensions of each argument */ + int *core_num_dims; + /* + * dimension indices in a flatted form; indices + * are in the range of [0,core_num_dim_ix) + */ + int *core_dim_ixs; + /* + * positions of 1st core dimensions of each + * argument in core_dim_ixs, equivalent to cumsum(core_num_dims) + */ + int *core_offsets; + /* signature string for printing purpose */ + char *core_signature; + + /* + * A function which resolves the types and fills an array + * with the dtypes for the inputs and outputs. + */ + PyUFunc_TypeResolutionFunc *type_resolver; + + /* A dictionary to monkeypatch ufuncs */ + PyObject *dict; + + /* + * This was blocked off to be the "new" inner loop selector in 1.7, + * but this was never implemented. (This is also why the above + * selector is called the "legacy" selector.) + */ + #ifndef Py_LIMITED_API + vectorcallfunc vectorcall; + #else + void *vectorcall; + #endif + + /* Was previously the `PyUFunc_MaskedInnerLoopSelectionFunc` */ + void *reserved3; + + /* + * List of flags for each operand when ufunc is called by nditer object. + * These flags will be used in addition to the default flags for each + * operand set by nditer object. + */ + npy_uint32 *op_flags; + + /* + * List of global flags used when ufunc is called by nditer object. + * These flags will be used in addition to the default global flags + * set by nditer object. + */ + npy_uint32 iter_flags; + + /* New in NPY_API_VERSION 0x0000000D and above */ + #if NPY_FEATURE_VERSION >= NPY_1_16_API_VERSION + /* + * for each core_num_dim_ix distinct dimension names, + * the possible "frozen" size (-1 if not frozen). + */ + npy_intp *core_dim_sizes; + + /* + * for each distinct core dimension, a set of UFUNC_CORE_DIM* flags + */ + npy_uint32 *core_dim_flags; + + /* Identity for reduction, when identity == PyUFunc_IdentityValue */ + PyObject *identity_value; + #endif /* NPY_FEATURE_VERSION >= NPY_1_16_API_VERSION */ + + /* New in NPY_API_VERSION 0x0000000F and above */ + #if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION + /* New private fields related to dispatching */ + void *_dispatch_cache; + /* A PyListObject of `(tuple of DTypes, ArrayMethod/Promoter)` */ + PyObject *_loops; + #endif + #if NPY_FEATURE_VERSION >= NPY_2_1_API_VERSION + /* + * Optional function to process core dimensions of a gufunc. + */ + PyUFunc_ProcessCoreDimsFunc *process_core_dims_func; + #endif +} PyUFuncObject; + +#include "arrayobject.h" +/* Generalized ufunc; 0x0001 reserved for possible use as CORE_ENABLED */ +/* the core dimension's size will be determined by the operands. */ +#define UFUNC_CORE_DIM_SIZE_INFERRED 0x0002 +/* the core dimension may be absent */ +#define UFUNC_CORE_DIM_CAN_IGNORE 0x0004 +/* flags inferred during execution */ +#define UFUNC_CORE_DIM_MISSING 0x00040000 + + +#define UFUNC_OBJ_ISOBJECT 1 +#define UFUNC_OBJ_NEEDS_API 2 + + +#if NPY_ALLOW_THREADS +#define NPY_LOOP_BEGIN_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) _save = PyEval_SaveThread();} while (0); +#define NPY_LOOP_END_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) PyEval_RestoreThread(_save);} while (0); +#else +#define NPY_LOOP_BEGIN_THREADS +#define NPY_LOOP_END_THREADS +#endif + +/* + * UFunc has unit of 0, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. + */ +#define PyUFunc_Zero 0 +/* + * UFunc has unit of 1, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. + */ +#define PyUFunc_One 1 +/* + * UFunc has unit of -1, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. Intended for + * bitwise_and reduction. + */ +#define PyUFunc_MinusOne 2 +/* + * UFunc has no unit, and the order of operations cannot be reordered. + * This case does not allow reduction with multiple axes at once. + */ +#define PyUFunc_None -1 +/* + * UFunc has no unit, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. + */ +#define PyUFunc_ReorderableNone -2 +/* + * UFunc unit is an identity_value, and the order of operations can be reordered + * This case allows reduction with multiple axes at once. + */ +#define PyUFunc_IdentityValue -3 + + +#define UFUNC_REDUCE 0 +#define UFUNC_ACCUMULATE 1 +#define UFUNC_REDUCEAT 2 +#define UFUNC_OUTER 3 + + +typedef struct { + int nin; + int nout; + PyObject *callable; +} PyUFunc_PyFuncData; + +/* A linked-list of function information for + user-defined 1-d loops. + */ +typedef struct _loop1d_info { + PyUFuncGenericFunction func; + void *data; + int *arg_types; + struct _loop1d_info *next; + int nargs; + PyArray_Descr **arg_dtypes; +} PyUFunc_Loop1d; + + +#define UFUNC_PYVALS_NAME "UFUNC_PYVALS" + +/* THESE MACROS ARE DEPRECATED. + * Use npy_set_floatstatus_* in the npymath library. + */ +#define UFUNC_FPE_DIVIDEBYZERO NPY_FPE_DIVIDEBYZERO +#define UFUNC_FPE_OVERFLOW NPY_FPE_OVERFLOW +#define UFUNC_FPE_UNDERFLOW NPY_FPE_UNDERFLOW +#define UFUNC_FPE_INVALID NPY_FPE_INVALID + +/* Make sure it gets defined if it isn't already */ +#ifndef UFUNC_NOFPE +/* Clear the floating point exception default of Borland C++ */ +#if defined(__BORLANDC__) +#define UFUNC_NOFPE _control87(MCW_EM, MCW_EM); +#else +#define UFUNC_NOFPE +#endif +#endif + +#include "__ufunc_api.h" + +#ifdef __cplusplus +} +#endif + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_ */ diff --git a/python/numpy/_core/include/numpy/utils.h b/python/numpy/_core/include/numpy/utils.h new file mode 100644 index 000000000..97f06092e --- /dev/null +++ b/python/numpy/_core/include/numpy/utils.h @@ -0,0 +1,37 @@ +#ifndef NUMPY_CORE_INCLUDE_NUMPY_UTILS_H_ +#define NUMPY_CORE_INCLUDE_NUMPY_UTILS_H_ + +#ifndef __COMP_NPY_UNUSED + #if defined(__GNUC__) + #define __COMP_NPY_UNUSED __attribute__ ((__unused__)) + #elif defined(__ICC) + #define __COMP_NPY_UNUSED __attribute__ ((__unused__)) + #elif defined(__clang__) + #define __COMP_NPY_UNUSED __attribute__ ((unused)) + #else + #define __COMP_NPY_UNUSED + #endif +#endif + +#if defined(__GNUC__) || defined(__ICC) || defined(__clang__) + #define NPY_DECL_ALIGNED(x) __attribute__ ((aligned (x))) +#elif defined(_MSC_VER) + #define NPY_DECL_ALIGNED(x) __declspec(align(x)) +#else + #define NPY_DECL_ALIGNED(x) +#endif + +/* Use this to tag a variable as not used. It will remove unused variable + * warning on support platforms (see __COM_NPY_UNUSED) and mangle the variable + * to avoid accidental use */ +#define NPY_UNUSED(x) __NPY_UNUSED_TAGGED ## x __COMP_NPY_UNUSED +#define NPY_EXPAND(x) x + +#define NPY_STRINGIFY(x) #x +#define NPY_TOSTRING(x) NPY_STRINGIFY(x) + +#define NPY_CAT__(a, b) a ## b +#define NPY_CAT_(a, b) NPY_CAT__(a, b) +#define NPY_CAT(a, b) NPY_CAT_(a, b) + +#endif /* NUMPY_CORE_INCLUDE_NUMPY_UTILS_H_ */ diff --git a/python/numpy/_core/lib/libnpymath.a b/python/numpy/_core/lib/libnpymath.a new file mode 100644 index 000000000..f8d561d28 Binary files /dev/null and b/python/numpy/_core/lib/libnpymath.a differ diff --git a/python/numpy/_core/lib/npy-pkg-config/mlib.ini b/python/numpy/_core/lib/npy-pkg-config/mlib.ini new file mode 100644 index 000000000..5840f5e1b --- /dev/null +++ b/python/numpy/_core/lib/npy-pkg-config/mlib.ini @@ -0,0 +1,12 @@ +[meta] +Name = mlib +Description = Math library used with this version of numpy +Version = 1.0 + +[default] +Libs=-lm +Cflags= + +[msvc] +Libs=m.lib +Cflags= diff --git a/python/numpy/_core/lib/npy-pkg-config/npymath.ini b/python/numpy/_core/lib/npy-pkg-config/npymath.ini new file mode 100644 index 000000000..8d879e3fb --- /dev/null +++ b/python/numpy/_core/lib/npy-pkg-config/npymath.ini @@ -0,0 +1,20 @@ +[meta] +Name=npymath +Description=Portable, core math library implementing C99 standard +Version=0.1 + +[variables] +pkgname=numpy._core +prefix=${pkgdir} +libdir=${prefix}/lib +includedir=${prefix}/include + +[default] +Libs=-L${libdir} -lnpymath +Cflags=-I${includedir} +Requires=mlib + +[msvc] +Libs=/LIBPATH:${libdir} npymath.lib +Cflags=/INCLUDE:${includedir} +Requires=mlib diff --git a/python/numpy/_core/lib/pkgconfig/numpy.pc b/python/numpy/_core/lib/pkgconfig/numpy.pc new file mode 100644 index 000000000..61de639cb --- /dev/null +++ b/python/numpy/_core/lib/pkgconfig/numpy.pc @@ -0,0 +1,7 @@ +prefix=${pcfiledir}/../.. +includedir=${prefix}/include + +Name: numpy +Description: NumPy is the fundamental package for scientific computing with Python. +Version: 2.3.5 +Cflags: -I${includedir} diff --git a/python/numpy/_core/memmap.py b/python/numpy/_core/memmap.py new file mode 100644 index 000000000..8cfa7f94a --- /dev/null +++ b/python/numpy/_core/memmap.py @@ -0,0 +1,363 @@ +import operator +from contextlib import nullcontext + +import numpy as np +from numpy._utils import set_module + +from .numeric import dtype, ndarray, uint8 + +__all__ = ['memmap'] + +dtypedescr = dtype +valid_filemodes = ["r", "c", "r+", "w+"] +writeable_filemodes = ["r+", "w+"] + +mode_equivalents = { + "readonly": "r", + "copyonwrite": "c", + "readwrite": "r+", + "write": "w+" + } + + +@set_module('numpy') +class memmap(ndarray): + """Create a memory-map to an array stored in a *binary* file on disk. + + Memory-mapped files are used for accessing small segments of large files + on disk, without reading the entire file into memory. NumPy's + memmap's are array-like objects. This differs from Python's ``mmap`` + module, which uses file-like objects. + + This subclass of ndarray has some unpleasant interactions with + some operations, because it doesn't quite fit properly as a subclass. + An alternative to using this subclass is to create the ``mmap`` + object yourself, then create an ndarray with ndarray.__new__ directly, + passing the object created in its 'buffer=' parameter. + + This class may at some point be turned into a factory function + which returns a view into an mmap buffer. + + Flush the memmap instance to write the changes to the file. Currently there + is no API to close the underlying ``mmap``. It is tricky to ensure the + resource is actually closed, since it may be shared between different + memmap instances. + + + Parameters + ---------- + filename : str, file-like object, or pathlib.Path instance + The file name or file object to be used as the array data buffer. + dtype : data-type, optional + The data-type used to interpret the file contents. + Default is `uint8`. + mode : {'r+', 'r', 'w+', 'c'}, optional + The file is opened in this mode: + + +------+-------------------------------------------------------------+ + | 'r' | Open existing file for reading only. | + +------+-------------------------------------------------------------+ + | 'r+' | Open existing file for reading and writing. | + +------+-------------------------------------------------------------+ + | 'w+' | Create or overwrite existing file for reading and writing. | + | | If ``mode == 'w+'`` then `shape` must also be specified. | + +------+-------------------------------------------------------------+ + | 'c' | Copy-on-write: assignments affect data in memory, but | + | | changes are not saved to disk. The file on disk is | + | | read-only. | + +------+-------------------------------------------------------------+ + + Default is 'r+'. + offset : int, optional + In the file, array data starts at this offset. Since `offset` is + measured in bytes, it should normally be a multiple of the byte-size + of `dtype`. When ``mode != 'r'``, even positive offsets beyond end of + file are valid; The file will be extended to accommodate the + additional data. By default, ``memmap`` will start at the beginning of + the file, even if ``filename`` is a file pointer ``fp`` and + ``fp.tell() != 0``. + shape : int or sequence of ints, optional + The desired shape of the array. If ``mode == 'r'`` and the number + of remaining bytes after `offset` is not a multiple of the byte-size + of `dtype`, you must specify `shape`. By default, the returned array + will be 1-D with the number of elements determined by file size + and data-type. + + .. versionchanged:: 2.0 + The shape parameter can now be any integer sequence type, previously + types were limited to tuple and int. + + order : {'C', 'F'}, optional + Specify the order of the ndarray memory layout: + :term:`row-major`, C-style or :term:`column-major`, + Fortran-style. This only has an effect if the shape is + greater than 1-D. The default order is 'C'. + + Attributes + ---------- + filename : str or pathlib.Path instance + Path to the mapped file. + offset : int + Offset position in the file. + mode : str + File mode. + + Methods + ------- + flush + Flush any changes in memory to file on disk. + When you delete a memmap object, flush is called first to write + changes to disk. + + + See also + -------- + lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file. + + Notes + ----- + The memmap object can be used anywhere an ndarray is accepted. + Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns + ``True``. + + Memory-mapped files cannot be larger than 2GB on 32-bit systems. + + When a memmap causes a file to be created or extended beyond its + current size in the filesystem, the contents of the new part are + unspecified. On systems with POSIX filesystem semantics, the extended + part will be filled with zero bytes. + + Examples + -------- + >>> import numpy as np + >>> data = np.arange(12, dtype='float32') + >>> data.resize((3,4)) + + This example uses a temporary file so that doctest doesn't write + files to your directory. You would use a 'normal' filename. + + >>> from tempfile import mkdtemp + >>> import os.path as path + >>> filename = path.join(mkdtemp(), 'newfile.dat') + + Create a memmap with dtype and shape that matches our data: + + >>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4)) + >>> fp + memmap([[0., 0., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.]], dtype=float32) + + Write data to memmap array: + + >>> fp[:] = data[:] + >>> fp + memmap([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + + >>> fp.filename == path.abspath(filename) + True + + Flushes memory changes to disk in order to read them back + + >>> fp.flush() + + Load the memmap and verify data was stored: + + >>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) + >>> newfp + memmap([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + + Read-only memmap: + + >>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4)) + >>> fpr.flags.writeable + False + + Copy-on-write memmap: + + >>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4)) + >>> fpc.flags.writeable + True + + It's possible to assign to copy-on-write array, but values are only + written into the memory copy of the array, and not written to disk: + + >>> fpc + memmap([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + >>> fpc[0,:] = 0 + >>> fpc + memmap([[ 0., 0., 0., 0.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + + File on disk is unchanged: + + >>> fpr + memmap([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]], dtype=float32) + + Offset into a memmap: + + >>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16) + >>> fpo + memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32) + + """ + + __array_priority__ = -100.0 + + def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0, + shape=None, order='C'): + # Import here to minimize 'import numpy' overhead + import mmap + import os.path + try: + mode = mode_equivalents[mode] + except KeyError as e: + if mode not in valid_filemodes: + all_modes = valid_filemodes + list(mode_equivalents.keys()) + raise ValueError( + f"mode must be one of {all_modes!r} (got {mode!r})" + ) from None + + if mode == 'w+' and shape is None: + raise ValueError("shape must be given if mode == 'w+'") + + if hasattr(filename, 'read'): + f_ctx = nullcontext(filename) + else: + f_ctx = open( + os.fspath(filename), + ('r' if mode == 'c' else mode) + 'b' + ) + + with f_ctx as fid: + fid.seek(0, 2) + flen = fid.tell() + descr = dtypedescr(dtype) + _dbytes = descr.itemsize + + if shape is None: + bytes = flen - offset + if bytes % _dbytes: + raise ValueError("Size of available data is not a " + "multiple of the data-type size.") + size = bytes // _dbytes + shape = (size,) + else: + if not isinstance(shape, (tuple, list)): + try: + shape = [operator.index(shape)] + except TypeError: + pass + shape = tuple(shape) + size = np.intp(1) # avoid overflows + for k in shape: + size *= k + + bytes = int(offset + size * _dbytes) + + if mode in ('w+', 'r+'): + # gh-27723 + # if bytes == 0, we write out 1 byte to allow empty memmap. + bytes = max(bytes, 1) + if flen < bytes: + fid.seek(bytes - 1, 0) + fid.write(b'\0') + fid.flush() + + if mode == 'c': + acc = mmap.ACCESS_COPY + elif mode == 'r': + acc = mmap.ACCESS_READ + else: + acc = mmap.ACCESS_WRITE + + start = offset - offset % mmap.ALLOCATIONGRANULARITY + bytes -= start + # bytes == 0 is problematic as in mmap length=0 maps the full file. + # See PR gh-27723 for a more detailed explanation. + if bytes == 0 and start > 0: + bytes += mmap.ALLOCATIONGRANULARITY + start -= mmap.ALLOCATIONGRANULARITY + array_offset = offset - start + mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start) + + self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm, + offset=array_offset, order=order) + self._mmap = mm + self.offset = offset + self.mode = mode + + if isinstance(filename, os.PathLike): + # special case - if we were constructed with a pathlib.path, + # then filename is a path object, not a string + self.filename = filename.resolve() + elif hasattr(fid, "name") and isinstance(fid.name, str): + # py3 returns int for TemporaryFile().name + self.filename = os.path.abspath(fid.name) + # same as memmap copies (e.g. memmap + 1) + else: + self.filename = None + + return self + + def __array_finalize__(self, obj): + if hasattr(obj, '_mmap') and np.may_share_memory(self, obj): + self._mmap = obj._mmap + self.filename = obj.filename + self.offset = obj.offset + self.mode = obj.mode + else: + self._mmap = None + self.filename = None + self.offset = None + self.mode = None + + def flush(self): + """ + Write any changes in the array to the file on disk. + + For further information, see `memmap`. + + Parameters + ---------- + None + + See Also + -------- + memmap + + """ + if self.base is not None and hasattr(self.base, 'flush'): + self.base.flush() + + def __array_wrap__(self, arr, context=None, return_scalar=False): + arr = super().__array_wrap__(arr, context) + + # Return a memmap if a memmap was given as the output of the + # ufunc. Leave the arr class unchanged if self is not a memmap + # to keep original memmap subclasses behavior + if self is arr or type(self) is not memmap: + return arr + + # Return scalar instead of 0d memmap, e.g. for np.sum with + # axis=None (note that subclasses will not reach here) + if return_scalar: + return arr[()] + + # Return ndarray otherwise + return arr.view(np.ndarray) + + def __getitem__(self, index): + res = super().__getitem__(index) + if type(res) is memmap and res._mmap is None: + return res.view(type=ndarray) + return res diff --git a/python/numpy/_core/memmap.pyi b/python/numpy/_core/memmap.pyi new file mode 100644 index 000000000..0b3132840 --- /dev/null +++ b/python/numpy/_core/memmap.pyi @@ -0,0 +1,3 @@ +from numpy import memmap + +__all__ = ["memmap"] diff --git a/python/numpy/_core/multiarray.py b/python/numpy/_core/multiarray.py new file mode 100644 index 000000000..236ca7e7c --- /dev/null +++ b/python/numpy/_core/multiarray.py @@ -0,0 +1,1762 @@ +""" +Create the numpy._core.multiarray namespace for backward compatibility. +In v1.16 the multiarray and umath c-extension modules were merged into +a single _multiarray_umath extension module. So we replicate the old +namespace by importing from the extension module. + +""" + +import functools + +from . import _multiarray_umath, overrides +from ._multiarray_umath import * # noqa: F403 + +# These imports are needed for backward compatibility, +# do not change them. issue gh-15518 +# _get_ndarray_c_version is semi-public, on purpose not added to __all__ +from ._multiarray_umath import ( # noqa: F401 + _ARRAY_API, + _flagdict, + _get_madvise_hugepage, + _get_ndarray_c_version, + _monotonicity, + _place, + _reconstruct, + _set_madvise_hugepage, + _vec_string, + from_dlpack, +) + +__all__ = [ + '_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS', + 'ITEM_HASOBJECT', 'ITEM_IS_POINTER', 'LIST_PICKLE', 'MAXDIMS', + 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'NEEDS_INIT', 'NEEDS_PYAPI', + 'RAISE', 'USE_GETITEM', 'USE_SETITEM', 'WRAP', + '_flagdict', 'from_dlpack', '_place', '_reconstruct', '_vec_string', + '_monotonicity', 'add_docstring', 'arange', 'array', 'asarray', + 'asanyarray', 'ascontiguousarray', 'asfortranarray', 'bincount', + 'broadcast', 'busday_count', 'busday_offset', 'busdaycalendar', 'can_cast', + 'compare_chararrays', 'concatenate', 'copyto', 'correlate', 'correlate2', + 'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data', + 'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype', + 'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat', + 'frombuffer', 'fromfile', 'fromiter', 'fromstring', + 'get_handler_name', 'get_handler_version', 'inner', 'interp', + 'interp_complex', 'is_busday', 'lexsort', 'matmul', 'vecdot', + 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer', 'nested_iters', + 'normalize_axis_index', 'packbits', 'promote_types', 'putmask', + 'ravel_multi_index', 'result_type', 'scalar', 'set_datetimeparse_function', + 'set_typeDict', 'shares_memory', 'typeinfo', + 'unpackbits', 'unravel_index', 'vdot', 'where', 'zeros'] + +# For backward compatibility, make sure pickle imports +# these functions from here +_reconstruct.__module__ = 'numpy._core.multiarray' +scalar.__module__ = 'numpy._core.multiarray' + + +from_dlpack.__module__ = 'numpy' +arange.__module__ = 'numpy' +array.__module__ = 'numpy' +asarray.__module__ = 'numpy' +asanyarray.__module__ = 'numpy' +ascontiguousarray.__module__ = 'numpy' +asfortranarray.__module__ = 'numpy' +datetime_data.__module__ = 'numpy' +empty.__module__ = 'numpy' +frombuffer.__module__ = 'numpy' +fromfile.__module__ = 'numpy' +fromiter.__module__ = 'numpy' +frompyfunc.__module__ = 'numpy' +fromstring.__module__ = 'numpy' +may_share_memory.__module__ = 'numpy' +nested_iters.__module__ = 'numpy' +promote_types.__module__ = 'numpy' +zeros.__module__ = 'numpy' +normalize_axis_index.__module__ = 'numpy.lib.array_utils' +add_docstring.__module__ = 'numpy.lib' +compare_chararrays.__module__ = 'numpy.char' + + +def _override___module__(): + namespace_names = globals() + for ufunc_name in [ + 'absolute', 'arccos', 'arccosh', 'add', 'arcsin', 'arcsinh', 'arctan', + 'arctan2', 'arctanh', 'bitwise_and', 'bitwise_count', 'invert', + 'left_shift', 'bitwise_or', 'right_shift', 'bitwise_xor', 'cbrt', + 'ceil', 'conjugate', 'copysign', 'cos', 'cosh', 'deg2rad', 'degrees', + 'divide', 'divmod', 'equal', 'exp', 'exp2', 'expm1', 'fabs', + 'float_power', 'floor', 'floor_divide', 'fmax', 'fmin', 'fmod', + 'frexp', 'gcd', 'greater', 'greater_equal', 'heaviside', 'hypot', + 'isfinite', 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp', 'less', + 'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp', + 'logaddexp2', 'logical_and', 'logical_not', 'logical_or', + 'logical_xor', 'matmul', 'matvec', 'maximum', 'minimum', 'remainder', + 'modf', 'multiply', 'negative', 'nextafter', 'not_equal', 'positive', + 'power', 'rad2deg', 'radians', 'reciprocal', 'rint', 'sign', 'signbit', + 'sin', 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan', 'tanh', + 'trunc', 'vecdot', 'vecmat', + ]: + ufunc = namespace_names[ufunc_name] + ufunc.__module__ = "numpy" + ufunc.__qualname__ = ufunc_name + + +_override___module__() + + +# We can't verify dispatcher signatures because NumPy's C functions don't +# support introspection. +array_function_from_c_func_and_dispatcher = functools.partial( + overrides.array_function_from_dispatcher, + module='numpy', docs_from_dispatcher=True, verify=False) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like) +def empty_like( + prototype, dtype=None, order=None, subok=None, shape=None, *, device=None +): + """ + empty_like(prototype, dtype=None, order='K', subok=True, shape=None, *, + device=None) + + Return a new array with the same shape and type as a given array. + + Parameters + ---------- + prototype : array_like + The shape and data-type of `prototype` define these same attributes + of the returned array. + dtype : data-type, optional + Overrides the data type of the result. + order : {'C', 'F', 'A', or 'K'}, optional + Overrides the memory layout of the result. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `prototype` is Fortran + contiguous, 'C' otherwise. 'K' means match the layout of `prototype` + as closely as possible. + subok : bool, optional. + If True, then the newly created array will use the sub-class + type of `prototype`, otherwise it will be a base-class array. Defaults + to True. + shape : int or sequence of ints, optional. + Overrides the shape of the result. If order='K' and the number of + dimensions is unchanged, will try to keep order, otherwise, + order='C' is implied. + device : str, optional + The device on which to place the created array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + Array of uninitialized (arbitrary) data with the same + shape and type as `prototype`. + + See Also + -------- + ones_like : Return an array of ones with shape and type of input. + zeros_like : Return an array of zeros with shape and type of input. + full_like : Return a new array with shape of input filled with value. + empty : Return a new uninitialized array. + + Notes + ----- + Unlike other array creation functions (e.g. `zeros_like`, `ones_like`, + `full_like`), `empty_like` does not initialize the values of the array, + and may therefore be marginally faster. However, the values stored in the + newly allocated array are arbitrary. For reproducible behavior, be sure + to set each element of the array before reading. + + Examples + -------- + >>> import numpy as np + >>> a = ([1,2,3], [4,5,6]) # a is array-like + >>> np.empty_like(a) + array([[-1073741821, -1073741821, 3], # uninitialized + [ 0, 0, -1073741821]]) + >>> a = np.array([[1., 2., 3.],[4.,5.,6.]]) + >>> np.empty_like(a) + array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], # uninitialized + [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]]) + + """ + return (prototype,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate) +def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None): + """ + concatenate( + (a1, a2, ...), + axis=0, + out=None, + dtype=None, + casting="same_kind" + ) + + Join a sequence of arrays along an existing axis. + + Parameters + ---------- + a1, a2, ... : sequence of array_like + The arrays must have the same shape, except in the dimension + corresponding to `axis` (the first, by default). + axis : int, optional + The axis along which the arrays will be joined. If axis is None, + arrays are flattened before use. Default is 0. + out : ndarray, optional + If provided, the destination to place the result. The shape must be + correct, matching that of what concatenate would have returned if no + out argument were specified. + dtype : str or dtype + If provided, the destination array will have this dtype. Cannot be + provided together with `out`. + + .. versionadded:: 1.20.0 + + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Defaults to 'same_kind'. + For a description of the options, please see :term:`casting`. + + .. versionadded:: 1.20.0 + + Returns + ------- + res : ndarray + The concatenated array. + + See Also + -------- + ma.concatenate : Concatenate function that preserves input masks. + array_split : Split an array into multiple sub-arrays of equal or + near-equal size. + split : Split array into a list of multiple sub-arrays of equal size. + hsplit : Split array into multiple sub-arrays horizontally (column wise). + vsplit : Split array into multiple sub-arrays vertically (row wise). + dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). + stack : Stack a sequence of arrays along a new axis. + block : Assemble arrays from blocks. + hstack : Stack arrays in sequence horizontally (column wise). + vstack : Stack arrays in sequence vertically (row wise). + dstack : Stack arrays in sequence depth wise (along third dimension). + column_stack : Stack 1-D arrays as columns into a 2-D array. + + Notes + ----- + When one or more of the arrays to be concatenated is a MaskedArray, + this function will return a MaskedArray object instead of an ndarray, + but the input masks are *not* preserved. In cases where a MaskedArray + is expected as input, use the ma.concatenate function from the masked + array module instead. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> b = np.array([[5, 6]]) + >>> np.concatenate((a, b), axis=0) + array([[1, 2], + [3, 4], + [5, 6]]) + >>> np.concatenate((a, b.T), axis=1) + array([[1, 2, 5], + [3, 4, 6]]) + >>> np.concatenate((a, b), axis=None) + array([1, 2, 3, 4, 5, 6]) + + This function will not preserve masking of MaskedArray inputs. + + >>> a = np.ma.arange(3) + >>> a[1] = np.ma.masked + >>> b = np.arange(2, 5) + >>> a + masked_array(data=[0, --, 2], + mask=[False, True, False], + fill_value=999999) + >>> b + array([2, 3, 4]) + >>> np.concatenate([a, b]) + masked_array(data=[0, 1, 2, 2, 3, 4], + mask=False, + fill_value=999999) + >>> np.ma.concatenate([a, b]) + masked_array(data=[0, --, 2, 2, 3, 4], + mask=[False, True, False, False, False, False], + fill_value=999999) + + """ + if out is not None: + # optimize for the typical case where only arrays is provided + arrays = list(arrays) + arrays.append(out) + return arrays + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner) +def inner(a, b): + """ + inner(a, b, /) + + Inner product of two arrays. + + Ordinary inner product of vectors for 1-D arrays (without complex + conjugation), in higher dimensions a sum product over the last axes. + + Parameters + ---------- + a, b : array_like + If `a` and `b` are nonscalar, their last dimensions must match. + + Returns + ------- + out : ndarray + If `a` and `b` are both + scalars or both 1-D arrays then a scalar is returned; otherwise + an array is returned. + ``out.shape = (*a.shape[:-1], *b.shape[:-1])`` + + Raises + ------ + ValueError + If both `a` and `b` are nonscalar and their last dimensions have + different sizes. + + See Also + -------- + tensordot : Sum products over arbitrary axes. + dot : Generalised matrix product, using second last dimension of `b`. + vecdot : Vector dot product of two arrays. + einsum : Einstein summation convention. + + Notes + ----- + For vectors (1-D arrays) it computes the ordinary inner-product:: + + np.inner(a, b) = sum(a[:]*b[:]) + + More generally, if ``ndim(a) = r > 0`` and ``ndim(b) = s > 0``:: + + np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1)) + + or explicitly:: + + np.inner(a, b)[i0,...,ir-2,j0,...,js-2] + = sum(a[i0,...,ir-2,:]*b[j0,...,js-2,:]) + + In addition `a` or `b` may be scalars, in which case:: + + np.inner(a,b) = a*b + + Examples + -------- + Ordinary inner product for vectors: + + >>> import numpy as np + >>> a = np.array([1,2,3]) + >>> b = np.array([0,1,0]) + >>> np.inner(a, b) + 2 + + Some multidimensional examples: + + >>> a = np.arange(24).reshape((2,3,4)) + >>> b = np.arange(4) + >>> c = np.inner(a, b) + >>> c.shape + (2, 3) + >>> c + array([[ 14, 38, 62], + [ 86, 110, 134]]) + + >>> a = np.arange(2).reshape((1,1,2)) + >>> b = np.arange(6).reshape((3,2)) + >>> c = np.inner(a, b) + >>> c.shape + (1, 1, 3) + >>> c + array([[[1, 3, 5]]]) + + An example where `b` is a scalar: + + >>> np.inner(np.eye(2), 7) + array([[7., 0.], + [0., 7.]]) + + """ + return (a, b) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.where) +def where(condition, x=None, y=None): + """ + where(condition, [x, y], /) + + Return elements chosen from `x` or `y` depending on `condition`. + + .. note:: + When only `condition` is provided, this function is a shorthand for + ``np.asarray(condition).nonzero()``. Using `nonzero` directly should be + preferred, as it behaves correctly for subclasses. The rest of this + documentation covers only the case where all three arguments are + provided. + + Parameters + ---------- + condition : array_like, bool + Where True, yield `x`, otherwise yield `y`. + x, y : array_like + Values from which to choose. `x`, `y` and `condition` need to be + broadcastable to some shape. + + Returns + ------- + out : ndarray + An array with elements from `x` where `condition` is True, and elements + from `y` elsewhere. + + See Also + -------- + choose + nonzero : The function that is called when x and y are omitted + + Notes + ----- + If all the arrays are 1-D, `where` is equivalent to:: + + [xv if c else yv + for c, xv, yv in zip(condition, x, y)] + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(10) + >>> a + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.where(a < 5, a, 10*a) + array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90]) + + This can be used on multidimensional arrays too: + + >>> np.where([[True, False], [True, True]], + ... [[1, 2], [3, 4]], + ... [[9, 8], [7, 6]]) + array([[1, 8], + [3, 4]]) + + The shapes of x, y, and the condition are broadcast together: + + >>> x, y = np.ogrid[:3, :4] + >>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast + array([[10, 0, 0, 0], + [10, 11, 1, 1], + [10, 11, 12, 2]]) + + >>> a = np.array([[0, 1, 2], + ... [0, 2, 4], + ... [0, 3, 6]]) + >>> np.where(a < 4, a, -1) # -1 is broadcast + array([[ 0, 1, 2], + [ 0, 2, -1], + [ 0, 3, -1]]) + """ + return (condition, x, y) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort) +def lexsort(keys, axis=None): + """ + lexsort(keys, axis=-1) + + Perform an indirect stable sort using a sequence of keys. + + Given multiple sorting keys, lexsort returns an array of integer indices + that describes the sort order by multiple keys. The last key in the + sequence is used for the primary sort order, ties are broken by the + second-to-last key, and so on. + + Parameters + ---------- + keys : (k, m, n, ...) array-like + The `k` keys to be sorted. The *last* key (e.g, the last + row if `keys` is a 2D array) is the primary sort key. + Each element of `keys` along the zeroth axis must be + an array-like object of the same shape. + axis : int, optional + Axis to be indirectly sorted. By default, sort over the last axis + of each sequence. Separate slices along `axis` sorted over + independently; see last example. + + Returns + ------- + indices : (m, n, ...) ndarray of ints + Array of indices that sort the keys along the specified axis. + + See Also + -------- + argsort : Indirect sort. + ndarray.sort : In-place sort. + sort : Return a sorted copy of an array. + + Examples + -------- + Sort names: first by surname, then by name. + + >>> import numpy as np + >>> surnames = ('Hertz', 'Galilei', 'Hertz') + >>> first_names = ('Heinrich', 'Galileo', 'Gustav') + >>> ind = np.lexsort((first_names, surnames)) + >>> ind + array([1, 2, 0]) + + >>> [surnames[i] + ", " + first_names[i] for i in ind] + ['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich'] + + Sort according to two numerical keys, first by elements + of ``a``, then breaking ties according to elements of ``b``: + + >>> a = [1, 5, 1, 4, 3, 4, 4] # First sequence + >>> b = [9, 4, 0, 4, 0, 2, 1] # Second sequence + >>> ind = np.lexsort((b, a)) # Sort by `a`, then by `b` + >>> ind + array([2, 0, 4, 6, 5, 3, 1]) + >>> [(a[i], b[i]) for i in ind] + [(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)] + + Compare against `argsort`, which would sort each key independently. + + >>> np.argsort((b, a), kind='stable') + array([[2, 4, 6, 5, 1, 3, 0], + [0, 2, 4, 3, 5, 6, 1]]) + + To sort lexicographically with `argsort`, we would need to provide a + structured array. + + >>> x = np.array([(ai, bi) for ai, bi in zip(a, b)], + ... dtype = np.dtype([('x', int), ('y', int)])) + >>> np.argsort(x) # or np.argsort(x, order=('x', 'y')) + array([2, 0, 4, 6, 5, 3, 1]) + + The zeroth axis of `keys` always corresponds with the sequence of keys, + so 2D arrays are treated just like other sequences of keys. + + >>> arr = np.asarray([b, a]) + >>> ind2 = np.lexsort(arr) + >>> np.testing.assert_equal(ind2, ind) + + Accordingly, the `axis` parameter refers to an axis of *each* key, not of + the `keys` argument itself. For instance, the array ``arr`` is treated as + a sequence of two 1-D keys, so specifying ``axis=0`` is equivalent to + using the default axis, ``axis=-1``. + + >>> np.testing.assert_equal(np.lexsort(arr, axis=0), + ... np.lexsort(arr, axis=-1)) + + For higher-dimensional arrays, the axis parameter begins to matter. The + resulting array has the same shape as each key, and the values are what + we would expect if `lexsort` were performed on corresponding slices + of the keys independently. For instance, + + >>> x = [[1, 2, 3, 4], + ... [4, 3, 2, 1], + ... [2, 1, 4, 3]] + >>> y = [[2, 2, 1, 1], + ... [1, 2, 1, 2], + ... [1, 1, 2, 1]] + >>> np.lexsort((x, y), axis=1) + array([[2, 3, 0, 1], + [2, 0, 3, 1], + [1, 0, 3, 2]]) + + Each row of the result is what we would expect if we were to perform + `lexsort` on the corresponding row of the keys: + + >>> for i in range(3): + ... print(np.lexsort((x[i], y[i]))) + [2 3 0 1] + [2 0 3 1] + [1 0 3 2] + + """ + if isinstance(keys, tuple): + return keys + else: + return (keys,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast) +def can_cast(from_, to, casting=None): + """ + can_cast(from_, to, casting='safe') + + Returns True if cast between data types can occur according to the + casting rule. + + Parameters + ---------- + from_ : dtype, dtype specifier, NumPy scalar, or array + Data type, NumPy scalar, or array to cast from. + to : dtype or dtype specifier + Data type to cast to. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + + Returns + ------- + out : bool + True if cast can occur according to the casting rule. + + Notes + ----- + .. versionchanged:: 2.0 + This function does not support Python scalars anymore and does not + apply any value-based logic for 0-D arrays and NumPy scalars. + + See also + -------- + dtype, result_type + + Examples + -------- + Basic examples + + >>> import numpy as np + >>> np.can_cast(np.int32, np.int64) + True + >>> np.can_cast(np.float64, complex) + True + >>> np.can_cast(complex, float) + False + + >>> np.can_cast('i8', 'f8') + True + >>> np.can_cast('i8', 'f4') + False + >>> np.can_cast('i4', 'S4') + False + + """ + return (from_,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type) +def min_scalar_type(a): + """ + min_scalar_type(a, /) + + For scalar ``a``, returns the data type with the smallest size + and smallest scalar kind which can hold its value. For non-scalar + array ``a``, returns the vector's dtype unmodified. + + Floating point values are not demoted to integers, + and complex values are not demoted to floats. + + Parameters + ---------- + a : scalar or array_like + The value whose minimal data type is to be found. + + Returns + ------- + out : dtype + The minimal data type. + + See Also + -------- + result_type, promote_types, dtype, can_cast + + Examples + -------- + >>> import numpy as np + >>> np.min_scalar_type(10) + dtype('uint8') + + >>> np.min_scalar_type(-260) + dtype('int16') + + >>> np.min_scalar_type(3.1) + dtype('float16') + + >>> np.min_scalar_type(1e50) + dtype('float64') + + >>> np.min_scalar_type(np.arange(4,dtype='f8')) + dtype('float64') + + """ + return (a,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.result_type) +def result_type(*arrays_and_dtypes): + """ + result_type(*arrays_and_dtypes) + + Returns the type that results from applying the NumPy + type promotion rules to the arguments. + + Type promotion in NumPy works similarly to the rules in languages + like C++, with some slight differences. When both scalars and + arrays are used, the array's type takes precedence and the actual value + of the scalar is taken into account. + + For example, calculating 3*a, where a is an array of 32-bit floats, + intuitively should result in a 32-bit float output. If the 3 is a + 32-bit integer, the NumPy rules indicate it can't convert losslessly + into a 32-bit float, so a 64-bit float should be the result type. + By examining the value of the constant, '3', we see that it fits in + an 8-bit integer, which can be cast losslessly into the 32-bit float. + + Parameters + ---------- + arrays_and_dtypes : list of arrays and dtypes + The operands of some operation whose result type is needed. + + Returns + ------- + out : dtype + The result type. + + See also + -------- + dtype, promote_types, min_scalar_type, can_cast + + Notes + ----- + The specific algorithm used is as follows. + + Categories are determined by first checking which of boolean, + integer (int/uint), or floating point (float/complex) the maximum + kind of all the arrays and the scalars are. + + If there are only scalars or the maximum category of the scalars + is higher than the maximum category of the arrays, + the data types are combined with :func:`promote_types` + to produce the return value. + + Otherwise, `min_scalar_type` is called on each scalar, and + the resulting data types are all combined with :func:`promote_types` + to produce the return value. + + The set of int values is not a subset of the uint values for types + with the same number of bits, something not reflected in + :func:`min_scalar_type`, but handled as a special case in `result_type`. + + Examples + -------- + >>> import numpy as np + >>> np.result_type(3, np.arange(7, dtype='i1')) + dtype('int8') + + >>> np.result_type('i4', 'c8') + dtype('complex128') + + >>> np.result_type(3.0, -2) + dtype('float64') + + """ + return arrays_and_dtypes + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.dot) +def dot(a, b, out=None): + """ + dot(a, b, out=None) + + Dot product of two arrays. Specifically, + + - If both `a` and `b` are 1-D arrays, it is inner product of vectors + (without complex conjugation). + + - If both `a` and `b` are 2-D arrays, it is matrix multiplication, + but using :func:`matmul` or ``a @ b`` is preferred. + + - If either `a` or `b` is 0-D (scalar), it is equivalent to + :func:`multiply` and using ``numpy.multiply(a, b)`` or ``a * b`` is + preferred. + + - If `a` is an N-D array and `b` is a 1-D array, it is a sum product over + the last axis of `a` and `b`. + + - If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a + sum product over the last axis of `a` and the second-to-last axis of + `b`:: + + dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m]) + + It uses an optimized BLAS library when possible (see `numpy.linalg`). + + Parameters + ---------- + a : array_like + First argument. + b : array_like + Second argument. + out : ndarray, optional + Output argument. This must have the exact kind that would be returned + if it was not used. In particular, it must have the right type, must be + C-contiguous, and its dtype must be the dtype that would be returned + for `dot(a,b)`. This is a performance feature. Therefore, if these + conditions are not met, an exception is raised, instead of attempting + to be flexible. + + Returns + ------- + output : ndarray + Returns the dot product of `a` and `b`. If `a` and `b` are both + scalars or both 1-D arrays then a scalar is returned; otherwise + an array is returned. + If `out` is given, then it is returned. + + Raises + ------ + ValueError + If the last dimension of `a` is not the same size as + the second-to-last dimension of `b`. + + See Also + -------- + vdot : Complex-conjugating dot product. + vecdot : Vector dot product of two arrays. + tensordot : Sum products over arbitrary axes. + einsum : Einstein summation convention. + matmul : '@' operator as method with out parameter. + linalg.multi_dot : Chained dot product. + + Examples + -------- + >>> import numpy as np + >>> np.dot(3, 4) + 12 + + Neither argument is complex-conjugated: + + >>> np.dot([2j, 3j], [2j, 3j]) + (-13+0j) + + For 2-D arrays it is the matrix product: + + >>> a = [[1, 0], [0, 1]] + >>> b = [[4, 1], [2, 2]] + >>> np.dot(a, b) + array([[4, 1], + [2, 2]]) + + >>> a = np.arange(3*4*5*6).reshape((3,4,5,6)) + >>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3)) + >>> np.dot(a, b)[2,3,2,1,2,2] + 499128 + >>> sum(a[2,3,2,:] * b[1,2,:,2]) + 499128 + + """ + return (a, b, out) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot) +def vdot(a, b): + r""" + vdot(a, b, /) + + Return the dot product of two vectors. + + The `vdot` function handles complex numbers differently than `dot`: + if the first argument is complex, it is replaced by its complex conjugate + in the dot product calculation. `vdot` also handles multidimensional + arrays differently than `dot`: it does not perform a matrix product, but + flattens the arguments to 1-D arrays before taking a vector dot product. + + Consequently, when the arguments are 2-D arrays of the same shape, this + function effectively returns their + `Frobenius inner product `_ + (also known as the *trace inner product* or the *standard inner product* + on a vector space of matrices). + + Parameters + ---------- + a : array_like + If `a` is complex the complex conjugate is taken before calculation + of the dot product. + b : array_like + Second argument to the dot product. + + Returns + ------- + output : ndarray + Dot product of `a` and `b`. Can be an int, float, or + complex depending on the types of `a` and `b`. + + See Also + -------- + dot : Return the dot product without using the complex conjugate of the + first argument. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1+2j,3+4j]) + >>> b = np.array([5+6j,7+8j]) + >>> np.vdot(a, b) + (70-8j) + >>> np.vdot(b, a) + (70+8j) + + Note that higher-dimensional arrays are flattened! + + >>> a = np.array([[1, 4], [5, 6]]) + >>> b = np.array([[4, 1], [2, 2]]) + >>> np.vdot(a, b) + 30 + >>> np.vdot(b, a) + 30 + >>> 1*4 + 4*1 + 5*2 + 6*2 + 30 + + """ # noqa: E501 + return (a, b) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount) +def bincount(x, weights=None, minlength=None): + """ + bincount(x, /, weights=None, minlength=0) + + Count number of occurrences of each value in array of non-negative ints. + + The number of bins (of size 1) is one larger than the largest value in + `x`. If `minlength` is specified, there will be at least this number + of bins in the output array (though it will be longer if necessary, + depending on the contents of `x`). + Each bin gives the number of occurrences of its index value in `x`. + If `weights` is specified the input array is weighted by it, i.e. if a + value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead + of ``out[n] += 1``. + + Parameters + ---------- + x : array_like, 1 dimension, nonnegative ints + Input array. + weights : array_like, optional + Weights, array of the same shape as `x`. + minlength : int, optional + A minimum number of bins for the output array. + + Returns + ------- + out : ndarray of ints + The result of binning the input array. + The length of `out` is equal to ``np.amax(x)+1``. + + Raises + ------ + ValueError + If the input is not 1-dimensional, or contains elements with negative + values, or if `minlength` is negative. + TypeError + If the type of the input is float or complex. + + See Also + -------- + histogram, digitize, unique + + Examples + -------- + >>> import numpy as np + >>> np.bincount(np.arange(5)) + array([1, 1, 1, 1, 1]) + >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7])) + array([1, 3, 1, 1, 0, 0, 0, 1]) + + >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23]) + >>> np.bincount(x).size == np.amax(x)+1 + True + + The input array needs to be of integer dtype, otherwise a + TypeError is raised: + + >>> np.bincount(np.arange(5, dtype=float)) + Traceback (most recent call last): + ... + TypeError: Cannot cast array data from dtype('float64') to dtype('int64') + according to the rule 'safe' + + A possible use of ``bincount`` is to perform sums over + variable-size chunks of an array, using the ``weights`` keyword. + + >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights + >>> x = np.array([0, 1, 1, 2, 2, 2]) + >>> np.bincount(x, weights=w) + array([ 0.3, 0.7, 1.1]) + + """ + return (x, weights) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index) +def ravel_multi_index(multi_index, dims, mode=None, order=None): + """ + ravel_multi_index(multi_index, dims, mode='raise', order='C') + + Converts a tuple of index arrays into an array of flat + indices, applying boundary modes to the multi-index. + + Parameters + ---------- + multi_index : tuple of array_like + A tuple of integer arrays, one array for each dimension. + dims : tuple of ints + The shape of array into which the indices from ``multi_index`` apply. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices are handled. Can specify + either one mode or a tuple of modes, one mode per index. + + * 'raise' -- raise an error (default) + * 'wrap' -- wrap around + * 'clip' -- clip to the range + + In 'clip' mode, a negative index which would normally + wrap will clip to 0 instead. + order : {'C', 'F'}, optional + Determines whether the multi-index should be viewed as + indexing in row-major (C-style) or column-major + (Fortran-style) order. + + Returns + ------- + raveled_indices : ndarray + An array of indices into the flattened version of an array + of dimensions ``dims``. + + See Also + -------- + unravel_index + + Examples + -------- + >>> import numpy as np + >>> arr = np.array([[3,6,6],[4,5,1]]) + >>> np.ravel_multi_index(arr, (7,6)) + array([22, 41, 37]) + >>> np.ravel_multi_index(arr, (7,6), order='F') + array([31, 41, 13]) + >>> np.ravel_multi_index(arr, (4,6), mode='clip') + array([22, 23, 19]) + >>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap')) + array([12, 13, 13]) + + >>> np.ravel_multi_index((3,1,4,1), (6,7,8,9)) + 1621 + """ + return multi_index + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index) +def unravel_index(indices, shape=None, order=None): + """ + unravel_index(indices, shape, order='C') + + Converts a flat index or array of flat indices into a tuple + of coordinate arrays. + + Parameters + ---------- + indices : array_like + An integer array whose elements are indices into the flattened + version of an array of dimensions ``shape``. Before version 1.6.0, + this function accepted just one index value. + shape : tuple of ints + The shape of the array to use for unraveling ``indices``. + order : {'C', 'F'}, optional + Determines whether the indices should be viewed as indexing in + row-major (C-style) or column-major (Fortran-style) order. + + Returns + ------- + unraveled_coords : tuple of ndarray + Each array in the tuple has the same shape as the ``indices`` + array. + + See Also + -------- + ravel_multi_index + + Examples + -------- + >>> import numpy as np + >>> np.unravel_index([22, 41, 37], (7,6)) + (array([3, 6, 6]), array([4, 5, 1])) + >>> np.unravel_index([31, 41, 13], (7,6), order='F') + (array([3, 6, 6]), array([4, 5, 1])) + + >>> np.unravel_index(1621, (6,7,8,9)) + (3, 1, 4, 1) + + """ + return (indices,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto) +def copyto(dst, src, casting=None, where=None): + """ + copyto(dst, src, casting='same_kind', where=True) + + Copies values from one array to another, broadcasting as necessary. + + Raises a TypeError if the `casting` rule is violated, and if + `where` is provided, it selects which elements to copy. + + Parameters + ---------- + dst : ndarray + The array into which values are copied. + src : array_like + The array from which values are copied. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur when copying. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + where : array_like of bool, optional + A boolean array which is broadcasted to match the dimensions + of `dst`, and selects elements to copy from `src` to `dst` + wherever it contains the value True. + + Examples + -------- + >>> import numpy as np + >>> A = np.array([4, 5, 6]) + >>> B = [1, 2, 3] + >>> np.copyto(A, B) + >>> A + array([1, 2, 3]) + + >>> A = np.array([[1, 2, 3], [4, 5, 6]]) + >>> B = [[4, 5, 6], [7, 8, 9]] + >>> np.copyto(A, B) + >>> A + array([[4, 5, 6], + [7, 8, 9]]) + + """ + return (dst, src, where) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask) +def putmask(a, /, mask, values): + """ + putmask(a, mask, values) + + Changes elements of an array based on conditional and input values. + + Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``. + + If `values` is not the same size as `a` and `mask` then it will repeat. + This gives behavior different from ``a[mask] = values``. + + Parameters + ---------- + a : ndarray + Target array. + mask : array_like + Boolean mask array. It has to be the same shape as `a`. + values : array_like + Values to put into `a` where `mask` is True. If `values` is smaller + than `a` it will be repeated. + + See Also + -------- + place, put, take, copyto + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6).reshape(2, 3) + >>> np.putmask(x, x>2, x**2) + >>> x + array([[ 0, 1, 2], + [ 9, 16, 25]]) + + If `values` is smaller than `a` it is repeated: + + >>> x = np.arange(5) + >>> np.putmask(x, x>1, [-33, -44]) + >>> x + array([ 0, 1, -33, -44, -33]) + + """ + return (a, mask, values) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits) +def packbits(a, axis=None, bitorder='big'): + """ + packbits(a, /, axis=None, bitorder='big') + + Packs the elements of a binary-valued array into bits in a uint8 array. + + The result is padded to full bytes by inserting zero bits at the end. + + Parameters + ---------- + a : array_like + An array of integers or booleans whose elements should be packed to + bits. + axis : int, optional + The dimension over which bit-packing is done. + ``None`` implies packing the flattened array. + bitorder : {'big', 'little'}, optional + The order of the input bits. 'big' will mimic bin(val), + ``[0, 0, 0, 0, 0, 0, 1, 1] => 3 = 0b00000011``, 'little' will + reverse the order so ``[1, 1, 0, 0, 0, 0, 0, 0] => 3``. + Defaults to 'big'. + + Returns + ------- + packed : ndarray + Array of type uint8 whose elements represent bits corresponding to the + logical (0 or nonzero) value of the input elements. The shape of + `packed` has the same number of dimensions as the input (unless `axis` + is None, in which case the output is 1-D). + + See Also + -------- + unpackbits: Unpacks elements of a uint8 array into a binary-valued output + array. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[[1,0,1], + ... [0,1,0]], + ... [[1,1,0], + ... [0,0,1]]]) + >>> b = np.packbits(a, axis=-1) + >>> b + array([[[160], + [ 64]], + [[192], + [ 32]]], dtype=uint8) + + Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000, + and 32 = 0010 0000. + + """ + return (a,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits) +def unpackbits(a, axis=None, count=None, bitorder='big'): + """ + unpackbits(a, /, axis=None, count=None, bitorder='big') + + Unpacks elements of a uint8 array into a binary-valued output array. + + Each element of `a` represents a bit-field that should be unpacked + into a binary-valued output array. The shape of the output array is + either 1-D (if `axis` is ``None``) or the same shape as the input + array with unpacking done along the axis specified. + + Parameters + ---------- + a : ndarray, uint8 type + Input array. + axis : int, optional + The dimension over which bit-unpacking is done. + ``None`` implies unpacking the flattened array. + count : int or None, optional + The number of elements to unpack along `axis`, provided as a way + of undoing the effect of packing a size that is not a multiple + of eight. A non-negative number means to only unpack `count` + bits. A negative number means to trim off that many bits from + the end. ``None`` means to unpack the entire array (the + default). Counts larger than the available number of bits will + add zero padding to the output. Negative counts must not + exceed the available number of bits. + bitorder : {'big', 'little'}, optional + The order of the returned bits. 'big' will mimic bin(val), + ``3 = 0b00000011 => [0, 0, 0, 0, 0, 0, 1, 1]``, 'little' will reverse + the order to ``[1, 1, 0, 0, 0, 0, 0, 0]``. + Defaults to 'big'. + + Returns + ------- + unpacked : ndarray, uint8 type + The elements are binary-valued (0 or 1). + + See Also + -------- + packbits : Packs the elements of a binary-valued array into bits in + a uint8 array. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[2], [7], [23]], dtype=np.uint8) + >>> a + array([[ 2], + [ 7], + [23]], dtype=uint8) + >>> b = np.unpackbits(a, axis=1) + >>> b + array([[0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8) + >>> c = np.unpackbits(a, axis=1, count=-3) + >>> c + array([[0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 1, 0]], dtype=uint8) + + >>> p = np.packbits(b, axis=0) + >>> np.unpackbits(p, axis=0) + array([[0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 0, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8) + >>> np.array_equal(b, np.unpackbits(p, axis=0, count=b.shape[0])) + True + + """ + return (a,) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory) +def shares_memory(a, b, max_work=None): + """ + shares_memory(a, b, /, max_work=None) + + Determine if two arrays share memory. + + .. warning:: + + This function can be exponentially slow for some inputs, unless + `max_work` is set to zero or a positive integer. + If in doubt, use `numpy.may_share_memory` instead. + + Parameters + ---------- + a, b : ndarray + Input arrays + max_work : int, optional + Effort to spend on solving the overlap problem (maximum number + of candidate solutions to consider). The following special + values are recognized: + + max_work=-1 (default) + The problem is solved exactly. In this case, the function returns + True only if there is an element shared between the arrays. Finding + the exact solution may take extremely long in some cases. + max_work=0 + Only the memory bounds of a and b are checked. + This is equivalent to using ``may_share_memory()``. + + Raises + ------ + numpy.exceptions.TooHardError + Exceeded max_work. + + Returns + ------- + out : bool + + See Also + -------- + may_share_memory + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3, 4]) + >>> np.shares_memory(x, np.array([5, 6, 7])) + False + >>> np.shares_memory(x[::2], x) + True + >>> np.shares_memory(x[::2], x[1::2]) + False + + Checking whether two arrays share memory is NP-complete, and + runtime may increase exponentially in the number of + dimensions. Hence, `max_work` should generally be set to a finite + number, as it is possible to construct examples that take + extremely long to run: + + >>> from numpy.lib.stride_tricks import as_strided + >>> x = np.zeros([192163377], dtype=np.int8) + >>> x1 = as_strided( + ... x, strides=(36674, 61119, 85569), shape=(1049, 1049, 1049)) + >>> x2 = as_strided( + ... x[64023025:], strides=(12223, 12224, 1), shape=(1049, 1049, 1)) + >>> np.shares_memory(x1, x2, max_work=1000) + Traceback (most recent call last): + ... + numpy.exceptions.TooHardError: Exceeded max_work + + Running ``np.shares_memory(x1, x2)`` without `max_work` set takes + around 1 minute for this case. It is possible to find problems + that take still significantly longer. + + """ + return (a, b) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory) +def may_share_memory(a, b, max_work=None): + """ + may_share_memory(a, b, /, max_work=None) + + Determine if two arrays might share memory + + A return of True does not necessarily mean that the two arrays + share any element. It just means that they *might*. + + Only the memory bounds of a and b are checked by default. + + Parameters + ---------- + a, b : ndarray + Input arrays + max_work : int, optional + Effort to spend on solving the overlap problem. See + `shares_memory` for details. Default for ``may_share_memory`` + is to do a bounds check. + + Returns + ------- + out : bool + + See Also + -------- + shares_memory + + Examples + -------- + >>> import numpy as np + >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) + False + >>> x = np.zeros([3, 4]) + >>> np.may_share_memory(x[:,0], x[:,1]) + True + + """ + return (a, b) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday) +def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None): + """ + is_busday( + dates, + weekmask='1111100', + holidays=None, + busdaycal=None, + out=None + ) + + Calculates which of the given dates are valid days, and which are not. + + Parameters + ---------- + dates : array_like of datetime64[D] + The array of dates to process. + weekmask : str or array_like of bool, optional + A seven-element array indicating which of Monday through Sunday are + valid days. May be specified as a length-seven list or array, like + [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string + like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for + weekdays, optionally separated by white space. Valid abbreviations + are: Mon Tue Wed Thu Fri Sat Sun + holidays : array_like of datetime64[D], optional + An array of dates to consider as invalid dates. They may be + specified in any order, and NaT (not-a-time) dates are ignored. + This list is saved in a normalized form that is suited for + fast calculations of valid days. + busdaycal : busdaycalendar, optional + A `busdaycalendar` object which specifies the valid days. If this + parameter is provided, neither weekmask nor holidays may be + provided. + out : array of bool, optional + If provided, this array is filled with the result. + + Returns + ------- + out : array of bool + An array with the same shape as ``dates``, containing True for + each valid day, and False for each invalid day. + + See Also + -------- + busdaycalendar : An object that specifies a custom set of valid days. + busday_offset : Applies an offset counted in valid days. + busday_count : Counts how many valid days are in a half-open date range. + + Examples + -------- + >>> import numpy as np + >>> # The weekdays are Friday, Saturday, and Monday + ... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'], + ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) + array([False, False, True]) + """ + return (dates, weekmask, holidays, out) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset) +def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, + busdaycal=None, out=None): + """ + busday_offset( + dates, + offsets, + roll='raise', + weekmask='1111100', + holidays=None, + busdaycal=None, + out=None + ) + + First adjusts the date to fall on a valid day according to + the ``roll`` rule, then applies offsets to the given dates + counted in valid days. + + Parameters + ---------- + dates : array_like of datetime64[D] + The array of dates to process. + offsets : array_like of int + The array of offsets, which is broadcast with ``dates``. + roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', \ + 'modifiedfollowing', 'modifiedpreceding'}, optional + How to treat dates that do not fall on a valid day. The default + is 'raise'. + + * 'raise' means to raise an exception for an invalid day. + * 'nat' means to return a NaT (not-a-time) for an invalid day. + * 'forward' and 'following' mean to take the first valid day + later in time. + * 'backward' and 'preceding' mean to take the first valid day + earlier in time. + * 'modifiedfollowing' means to take the first valid day + later in time unless it is across a Month boundary, in which + case to take the first valid day earlier in time. + * 'modifiedpreceding' means to take the first valid day + earlier in time unless it is across a Month boundary, in which + case to take the first valid day later in time. + weekmask : str or array_like of bool, optional + A seven-element array indicating which of Monday through Sunday are + valid days. May be specified as a length-seven list or array, like + [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string + like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for + weekdays, optionally separated by white space. Valid abbreviations + are: Mon Tue Wed Thu Fri Sat Sun + holidays : array_like of datetime64[D], optional + An array of dates to consider as invalid dates. They may be + specified in any order, and NaT (not-a-time) dates are ignored. + This list is saved in a normalized form that is suited for + fast calculations of valid days. + busdaycal : busdaycalendar, optional + A `busdaycalendar` object which specifies the valid days. If this + parameter is provided, neither weekmask nor holidays may be + provided. + out : array of datetime64[D], optional + If provided, this array is filled with the result. + + Returns + ------- + out : array of datetime64[D] + An array with a shape from broadcasting ``dates`` and ``offsets`` + together, containing the dates with offsets applied. + + See Also + -------- + busdaycalendar : An object that specifies a custom set of valid days. + is_busday : Returns a boolean array indicating valid days. + busday_count : Counts how many valid days are in a half-open date range. + + Examples + -------- + >>> import numpy as np + >>> # First business day in October 2011 (not accounting for holidays) + ... np.busday_offset('2011-10', 0, roll='forward') + np.datetime64('2011-10-03') + >>> # Last business day in February 2012 (not accounting for holidays) + ... np.busday_offset('2012-03', -1, roll='forward') + np.datetime64('2012-02-29') + >>> # Third Wednesday in January 2011 + ... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed') + np.datetime64('2011-01-19') + >>> # 2012 Mother's Day in Canada and the U.S. + ... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun') + np.datetime64('2012-05-13') + + >>> # First business day on or after a date + ... np.busday_offset('2011-03-20', 0, roll='forward') + np.datetime64('2011-03-21') + >>> np.busday_offset('2011-03-22', 0, roll='forward') + np.datetime64('2011-03-22') + >>> # First business day after a date + ... np.busday_offset('2011-03-20', 1, roll='backward') + np.datetime64('2011-03-21') + >>> np.busday_offset('2011-03-22', 1, roll='backward') + np.datetime64('2011-03-23') + """ + return (dates, offsets, weekmask, holidays, out) + + +@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count) +def busday_count(begindates, enddates, weekmask=None, holidays=None, + busdaycal=None, out=None): + """ + busday_count( + begindates, + enddates, + weekmask='1111100', + holidays=[], + busdaycal=None, + out=None + ) + + Counts the number of valid days between `begindates` and + `enddates`, not including the day of `enddates`. + + If ``enddates`` specifies a date value that is earlier than the + corresponding ``begindates`` date value, the count will be negative. + + Parameters + ---------- + begindates : array_like of datetime64[D] + The array of the first dates for counting. + enddates : array_like of datetime64[D] + The array of the end dates for counting, which are excluded + from the count themselves. + weekmask : str or array_like of bool, optional + A seven-element array indicating which of Monday through Sunday are + valid days. May be specified as a length-seven list or array, like + [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string + like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for + weekdays, optionally separated by white space. Valid abbreviations + are: Mon Tue Wed Thu Fri Sat Sun + holidays : array_like of datetime64[D], optional + An array of dates to consider as invalid dates. They may be + specified in any order, and NaT (not-a-time) dates are ignored. + This list is saved in a normalized form that is suited for + fast calculations of valid days. + busdaycal : busdaycalendar, optional + A `busdaycalendar` object which specifies the valid days. If this + parameter is provided, neither weekmask nor holidays may be + provided. + out : array of int, optional + If provided, this array is filled with the result. + + Returns + ------- + out : array of int + An array with a shape from broadcasting ``begindates`` and ``enddates`` + together, containing the number of valid days between + the begin and end dates. + + See Also + -------- + busdaycalendar : An object that specifies a custom set of valid days. + is_busday : Returns a boolean array indicating valid days. + busday_offset : Applies an offset counted in valid days. + + Examples + -------- + >>> import numpy as np + >>> # Number of weekdays in January 2011 + ... np.busday_count('2011-01', '2011-02') + 21 + >>> # Number of weekdays in 2011 + >>> np.busday_count('2011', '2012') + 260 + >>> # Number of Saturdays in 2011 + ... np.busday_count('2011', '2012', weekmask='Sat') + 53 + """ + return (begindates, enddates, weekmask, holidays, out) + + +@array_function_from_c_func_and_dispatcher( + _multiarray_umath.datetime_as_string) +def datetime_as_string(arr, unit=None, timezone=None, casting=None): + """ + datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind') + + Convert an array of datetimes into an array of strings. + + Parameters + ---------- + arr : array_like of datetime64 + The array of UTC timestamps to format. + unit : str + One of None, 'auto', or + a :ref:`datetime unit `. + timezone : {'naive', 'UTC', 'local'} or tzinfo + Timezone information to use when displaying the datetime. If 'UTC', + end with a Z to indicate UTC time. If 'local', convert to the local + timezone first, and suffix with a +-#### timezone offset. If a tzinfo + object, then do as with 'local', but use the specified timezone. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'} + Casting to allow when changing between datetime units. + + Returns + ------- + str_arr : ndarray + An array of strings the same shape as `arr`. + + Examples + -------- + >>> import numpy as np + >>> import pytz + >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]') + >>> d + array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30', + '2002-10-27T07:30'], dtype='datetime64[m]') + + Setting the timezone to UTC shows the same information, but with a Z suffix + + >>> np.datetime_as_string(d, timezone='UTC') + array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z', + '2002-10-27T07:30Z'], dtype='>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern')) + array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400', + '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='>> np.datetime_as_string(d, unit='h') + array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'], + dtype='>> np.datetime_as_string(d, unit='s') + array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00', + '2002-10-27T07:30:00'], dtype='>> np.datetime_as_string(d, unit='h', casting='safe') + Traceback (most recent call last): + ... + TypeError: Cannot create a datetime string as units 'h' from a NumPy + datetime with units 'm' according to the rule 'safe' + """ + return (arr,) diff --git a/python/numpy/_core/multiarray.pyi b/python/numpy/_core/multiarray.pyi new file mode 100644 index 000000000..13a3f0077 --- /dev/null +++ b/python/numpy/_core/multiarray.pyi @@ -0,0 +1,1285 @@ +# TODO: Sort out any and all missing functions in this namespace +import datetime as dt +from collections.abc import Callable, Iterable, Sequence +from typing import ( + Any, + ClassVar, + Final, + Protocol, + SupportsIndex, + TypeAlias, + TypedDict, + TypeVar, + Unpack, + final, + overload, + type_check_only, +) +from typing import ( + Literal as L, +) + +from _typeshed import StrOrBytesPath, SupportsLenAndGetItem +from typing_extensions import CapsuleType + +import numpy as np +from numpy import ( # type: ignore[attr-defined] + _AnyShapeT, + _CastingKind, + _CopyMode, + _ModeKind, + _NDIterFlagsKind, + _NDIterFlagsOp, + _OrderCF, + _OrderKACF, + _SupportsBuffer, + _SupportsFileMethods, + broadcast, + # Re-exports + busdaycalendar, + complexfloating, + correlate, + count_nonzero, + datetime64, + dtype, + flatiter, + float64, + floating, + from_dlpack, + generic, + int_, + interp, + intp, + matmul, + ndarray, + nditer, + signedinteger, + str_, + timedelta64, + # The rest + ufunc, + uint8, + unsignedinteger, + vecdot, +) +from numpy import ( + einsum as c_einsum, +) +from numpy._typing import ( + ArrayLike, + # DTypes + DTypeLike, + # Arrays + NDArray, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeBytes_co, + _ArrayLikeComplex_co, + _ArrayLikeDT64_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeObject_co, + _ArrayLikeStr_co, + _ArrayLikeTD64_co, + _ArrayLikeUInt_co, + _DTypeLike, + _FloatLike_co, + _IntLike_co, + _NestedSequence, + _ScalarLike_co, + # Shapes + _Shape, + _ShapeLike, + _SupportsArrayFunc, + _SupportsDType, + _TD64Like_co, +) +from numpy._typing._ufunc import ( + _2PTuple, + _PyFunc_Nin1_Nout1, + _PyFunc_Nin1P_Nout2P, + _PyFunc_Nin2_Nout1, + _PyFunc_Nin3P_Nout1, +) +from numpy.lib._array_utils_impl import normalize_axis_index + +__all__ = [ + "_ARRAY_API", + "ALLOW_THREADS", + "BUFSIZE", + "CLIP", + "DATETIMEUNITS", + "ITEM_HASOBJECT", + "ITEM_IS_POINTER", + "LIST_PICKLE", + "MAXDIMS", + "MAY_SHARE_BOUNDS", + "MAY_SHARE_EXACT", + "NEEDS_INIT", + "NEEDS_PYAPI", + "RAISE", + "USE_GETITEM", + "USE_SETITEM", + "WRAP", + "_flagdict", + "from_dlpack", + "_place", + "_reconstruct", + "_vec_string", + "_monotonicity", + "add_docstring", + "arange", + "array", + "asarray", + "asanyarray", + "ascontiguousarray", + "asfortranarray", + "bincount", + "broadcast", + "busday_count", + "busday_offset", + "busdaycalendar", + "can_cast", + "compare_chararrays", + "concatenate", + "copyto", + "correlate", + "correlate2", + "count_nonzero", + "c_einsum", + "datetime_as_string", + "datetime_data", + "dot", + "dragon4_positional", + "dragon4_scientific", + "dtype", + "empty", + "empty_like", + "error", + "flagsobj", + "flatiter", + "format_longfloat", + "frombuffer", + "fromfile", + "fromiter", + "fromstring", + "get_handler_name", + "get_handler_version", + "inner", + "interp", + "interp_complex", + "is_busday", + "lexsort", + "matmul", + "vecdot", + "may_share_memory", + "min_scalar_type", + "ndarray", + "nditer", + "nested_iters", + "normalize_axis_index", + "packbits", + "promote_types", + "putmask", + "ravel_multi_index", + "result_type", + "scalar", + "set_datetimeparse_function", + "set_typeDict", + "shares_memory", + "typeinfo", + "unpackbits", + "unravel_index", + "vdot", + "where", + "zeros", +] + +_ScalarT = TypeVar("_ScalarT", bound=generic) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype) +_ArrayT = TypeVar("_ArrayT", bound=ndarray[Any, Any]) +_ArrayT_co = TypeVar( + "_ArrayT_co", + bound=ndarray[Any, Any], + covariant=True, +) +_ReturnType = TypeVar("_ReturnType") +_IDType = TypeVar("_IDType") +_Nin = TypeVar("_Nin", bound=int) +_Nout = TypeVar("_Nout", bound=int) + +_ShapeT = TypeVar("_ShapeT", bound=_Shape) +_Array: TypeAlias = ndarray[_ShapeT, dtype[_ScalarT]] +_Array1D: TypeAlias = ndarray[tuple[int], dtype[_ScalarT]] + +# Valid time units +_UnitKind: TypeAlias = L[ + "Y", + "M", + "D", + "h", + "m", + "s", + "ms", + "us", "μs", + "ns", + "ps", + "fs", + "as", +] +_RollKind: TypeAlias = L[ # `raise` is deliberately excluded + "nat", + "forward", + "following", + "backward", + "preceding", + "modifiedfollowing", + "modifiedpreceding", +] + +@type_check_only +class _SupportsArray(Protocol[_ArrayT_co]): + def __array__(self, /) -> _ArrayT_co: ... + +@type_check_only +class _KwargsEmpty(TypedDict, total=False): + device: L["cpu"] | None + like: _SupportsArrayFunc | None + +@type_check_only +class _ConstructorEmpty(Protocol): + # 1-D shape + @overload + def __call__( + self, + /, + shape: SupportsIndex, + dtype: None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> _Array1D[float64]: ... + @overload + def __call__( + self, + /, + shape: SupportsIndex, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> ndarray[tuple[int], _DTypeT]: ... + @overload + def __call__( + self, + /, + shape: SupportsIndex, + dtype: type[_ScalarT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> _Array1D[_ScalarT]: ... + @overload + def __call__( + self, + /, + shape: SupportsIndex, + dtype: DTypeLike | None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> _Array1D[Any]: ... + + # known shape + @overload + def __call__( + self, + /, + shape: _AnyShapeT, + dtype: None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> _Array[_AnyShapeT, float64]: ... + @overload + def __call__( + self, + /, + shape: _AnyShapeT, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> ndarray[_AnyShapeT, _DTypeT]: ... + @overload + def __call__( + self, + /, + shape: _AnyShapeT, + dtype: type[_ScalarT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> _Array[_AnyShapeT, _ScalarT]: ... + @overload + def __call__( + self, + /, + shape: _AnyShapeT, + dtype: DTypeLike | None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> _Array[_AnyShapeT, Any]: ... + + # unknown shape + @overload + def __call__( + self, /, + shape: _ShapeLike, + dtype: None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> NDArray[float64]: ... + @overload + def __call__( + self, /, + shape: _ShapeLike, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> ndarray[Any, _DTypeT]: ... + @overload + def __call__( + self, /, + shape: _ShapeLike, + dtype: type[_ScalarT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> NDArray[_ScalarT]: ... + @overload + def __call__( + self, + /, + shape: _ShapeLike, + dtype: DTypeLike | None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], + ) -> NDArray[Any]: ... + +# using `Final` or `TypeAlias` will break stubtest +error = Exception + +# from ._multiarray_umath +ITEM_HASOBJECT: Final = 1 +LIST_PICKLE: Final = 2 +ITEM_IS_POINTER: Final = 4 +NEEDS_INIT: Final = 8 +NEEDS_PYAPI: Final = 16 +USE_GETITEM: Final = 32 +USE_SETITEM: Final = 64 +DATETIMEUNITS: Final[CapsuleType] +_ARRAY_API: Final[CapsuleType] +_flagdict: Final[dict[str, int]] +_monotonicity: Final[Callable[..., object]] +_place: Final[Callable[..., object]] +_reconstruct: Final[Callable[..., object]] +_vec_string: Final[Callable[..., object]] +correlate2: Final[Callable[..., object]] +dragon4_positional: Final[Callable[..., object]] +dragon4_scientific: Final[Callable[..., object]] +interp_complex: Final[Callable[..., object]] +set_datetimeparse_function: Final[Callable[..., object]] +def get_handler_name(a: NDArray[Any] = ..., /) -> str | None: ... +def get_handler_version(a: NDArray[Any] = ..., /) -> int | None: ... +def format_longfloat(x: np.longdouble, precision: int) -> str: ... +def scalar(dtype: _DTypeT, object: bytes | object = ...) -> ndarray[tuple[()], _DTypeT]: ... +def set_typeDict(dict_: dict[str, np.dtype], /) -> None: ... +typeinfo: Final[dict[str, np.dtype[np.generic]]] + +ALLOW_THREADS: Final[int] # 0 or 1 (system-specific) +BUFSIZE: L[8192] +CLIP: L[0] +WRAP: L[1] +RAISE: L[2] +MAXDIMS: L[32] +MAY_SHARE_BOUNDS: L[0] +MAY_SHARE_EXACT: L[-1] +tracemalloc_domain: L[389047] + +zeros: Final[_ConstructorEmpty] +empty: Final[_ConstructorEmpty] + +@overload +def empty_like( + prototype: _ArrayT, + dtype: None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: _ShapeLike | None = ..., + *, + device: L["cpu"] | None = ..., +) -> _ArrayT: ... +@overload +def empty_like( + prototype: _ArrayLike[_ScalarT], + dtype: None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: _ShapeLike | None = ..., + *, + device: L["cpu"] | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def empty_like( + prototype: Any, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = ..., + subok: bool = ..., + shape: _ShapeLike | None = ..., + *, + device: L["cpu"] | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def empty_like( + prototype: Any, + dtype: DTypeLike | None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: _ShapeLike | None = ..., + *, + device: L["cpu"] | None = ..., +) -> NDArray[Any]: ... + +@overload +def array( + object: _ArrayT, + dtype: None = ..., + *, + copy: bool | _CopyMode | None = ..., + order: _OrderKACF = ..., + subok: L[True], + ndmin: int = ..., + like: _SupportsArrayFunc | None = ..., +) -> _ArrayT: ... +@overload +def array( + object: _SupportsArray[_ArrayT], + dtype: None = ..., + *, + copy: bool | _CopyMode | None = ..., + order: _OrderKACF = ..., + subok: L[True], + ndmin: L[0] = ..., + like: _SupportsArrayFunc | None = ..., +) -> _ArrayT: ... +@overload +def array( + object: _ArrayLike[_ScalarT], + dtype: None = ..., + *, + copy: bool | _CopyMode | None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + ndmin: int = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def array( + object: Any, + dtype: _DTypeLike[_ScalarT], + *, + copy: bool | _CopyMode | None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + ndmin: int = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def array( + object: Any, + dtype: DTypeLike | None = ..., + *, + copy: bool | _CopyMode | None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + ndmin: int = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +# +@overload +def ravel_multi_index( + multi_index: SupportsLenAndGetItem[_IntLike_co], + dims: _ShapeLike, + mode: _ModeKind | tuple[_ModeKind, ...] = "raise", + order: _OrderCF = "C", +) -> intp: ... +@overload +def ravel_multi_index( + multi_index: SupportsLenAndGetItem[_ArrayLikeInt_co], + dims: _ShapeLike, + mode: _ModeKind | tuple[_ModeKind, ...] = "raise", + order: _OrderCF = "C", +) -> NDArray[intp]: ... + +# +@overload +def unravel_index(indices: _IntLike_co, shape: _ShapeLike, order: _OrderCF = "C") -> tuple[intp, ...]: ... +@overload +def unravel_index(indices: _ArrayLikeInt_co, shape: _ShapeLike, order: _OrderCF = "C") -> tuple[NDArray[intp], ...]: ... + +# NOTE: Allow any sequence of array-like objects +@overload +def concatenate( # type: ignore[misc] + arrays: _ArrayLike[_ScalarT], + /, + axis: SupportsIndex | None = ..., + out: None = ..., + *, + dtype: None = ..., + casting: _CastingKind | None = ... +) -> NDArray[_ScalarT]: ... +@overload +@overload +def concatenate( # type: ignore[misc] + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None = ..., + out: None = ..., + *, + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind | None = ... +) -> NDArray[_ScalarT]: ... +@overload +def concatenate( # type: ignore[misc] + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None = ..., + out: None = ..., + *, + dtype: DTypeLike | None = None, + casting: _CastingKind | None = ... +) -> NDArray[Any]: ... +@overload +def concatenate( + arrays: SupportsLenAndGetItem[ArrayLike], + /, + axis: SupportsIndex | None = ..., + out: _ArrayT = ..., + *, + dtype: DTypeLike = ..., + casting: _CastingKind | None = ... +) -> _ArrayT: ... + +def inner( + a: ArrayLike, + b: ArrayLike, + /, +) -> Any: ... + +@overload +def where( + condition: ArrayLike, + /, +) -> tuple[NDArray[intp], ...]: ... +@overload +def where( + condition: ArrayLike, + x: ArrayLike, + y: ArrayLike, + /, +) -> NDArray[Any]: ... + +def lexsort( + keys: ArrayLike, + axis: SupportsIndex | None = ..., +) -> Any: ... + +def can_cast( + from_: ArrayLike | DTypeLike, + to: DTypeLike, + casting: _CastingKind | None = ..., +) -> bool: ... + +def min_scalar_type(a: ArrayLike, /) -> dtype: ... + +def result_type(*arrays_and_dtypes: ArrayLike | DTypeLike) -> dtype: ... + +@overload +def dot(a: ArrayLike, b: ArrayLike, out: None = ...) -> Any: ... +@overload +def dot(a: ArrayLike, b: ArrayLike, out: _ArrayT) -> _ArrayT: ... + +@overload +def vdot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /) -> np.bool: ... # type: ignore[misc] +@overload +def vdot(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, /) -> unsignedinteger: ... # type: ignore[misc] +@overload +def vdot(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, /) -> signedinteger: ... # type: ignore[misc] +@overload +def vdot(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, /) -> floating: ... # type: ignore[misc] +@overload +def vdot(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, /) -> complexfloating: ... # type: ignore[misc] +@overload +def vdot(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, /) -> timedelta64: ... +@overload +def vdot(a: _ArrayLikeObject_co, b: Any, /) -> Any: ... +@overload +def vdot(a: Any, b: _ArrayLikeObject_co, /) -> Any: ... + +def bincount( + x: ArrayLike, + /, + weights: ArrayLike | None = ..., + minlength: SupportsIndex = ..., +) -> NDArray[intp]: ... + +def copyto( + dst: NDArray[Any], + src: ArrayLike, + casting: _CastingKind | None = ..., + where: _ArrayLikeBool_co | None = ..., +) -> None: ... + +def putmask( + a: NDArray[Any], + /, + mask: _ArrayLikeBool_co, + values: ArrayLike, +) -> None: ... + +def packbits( + a: _ArrayLikeInt_co, + /, + axis: SupportsIndex | None = ..., + bitorder: L["big", "little"] = ..., +) -> NDArray[uint8]: ... + +def unpackbits( + a: _ArrayLike[uint8], + /, + axis: SupportsIndex | None = ..., + count: SupportsIndex | None = ..., + bitorder: L["big", "little"] = ..., +) -> NDArray[uint8]: ... + +def shares_memory( + a: object, + b: object, + /, + max_work: int | None = ..., +) -> bool: ... + +def may_share_memory( + a: object, + b: object, + /, + max_work: int | None = ..., +) -> bool: ... + +@overload +def asarray( + a: _ArrayLike[_ScalarT], + dtype: None = ..., + order: _OrderKACF = ..., + *, + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def asarray( + a: Any, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = ..., + *, + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def asarray( + a: Any, + dtype: DTypeLike | None = ..., + order: _OrderKACF = ..., + *, + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +@overload +def asanyarray( + a: _ArrayT, # Preserve subclass-information + dtype: None = ..., + order: _OrderKACF = ..., + *, + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> _ArrayT: ... +@overload +def asanyarray( + a: _ArrayLike[_ScalarT], + dtype: None = ..., + order: _OrderKACF = ..., + *, + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def asanyarray( + a: Any, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = ..., + *, + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def asanyarray( + a: Any, + dtype: DTypeLike | None = ..., + order: _OrderKACF = ..., + *, + device: L["cpu"] | None = ..., + copy: bool | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +@overload +def ascontiguousarray( + a: _ArrayLike[_ScalarT], + dtype: None = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def ascontiguousarray( + a: Any, + dtype: _DTypeLike[_ScalarT], + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def ascontiguousarray( + a: Any, + dtype: DTypeLike | None = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +@overload +def asfortranarray( + a: _ArrayLike[_ScalarT], + dtype: None = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def asfortranarray( + a: Any, + dtype: _DTypeLike[_ScalarT], + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def asfortranarray( + a: Any, + dtype: DTypeLike | None = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +def promote_types(__type1: DTypeLike, __type2: DTypeLike) -> dtype: ... + +# `sep` is a de facto mandatory argument, as its default value is deprecated +@overload +def fromstring( + string: str | bytes, + dtype: None = ..., + count: SupportsIndex = ..., + *, + sep: str, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[float64]: ... +@overload +def fromstring( + string: str | bytes, + dtype: _DTypeLike[_ScalarT], + count: SupportsIndex = ..., + *, + sep: str, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def fromstring( + string: str | bytes, + dtype: DTypeLike | None = ..., + count: SupportsIndex = ..., + *, + sep: str, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +@overload +def frompyfunc( # type: ignore[overload-overlap] + func: Callable[[Any], _ReturnType], /, + nin: L[1], + nout: L[1], + *, + identity: None = ..., +) -> _PyFunc_Nin1_Nout1[_ReturnType, None]: ... +@overload +def frompyfunc( # type: ignore[overload-overlap] + func: Callable[[Any], _ReturnType], /, + nin: L[1], + nout: L[1], + *, + identity: _IDType, +) -> _PyFunc_Nin1_Nout1[_ReturnType, _IDType]: ... +@overload +def frompyfunc( # type: ignore[overload-overlap] + func: Callable[[Any, Any], _ReturnType], /, + nin: L[2], + nout: L[1], + *, + identity: None = ..., +) -> _PyFunc_Nin2_Nout1[_ReturnType, None]: ... +@overload +def frompyfunc( # type: ignore[overload-overlap] + func: Callable[[Any, Any], _ReturnType], /, + nin: L[2], + nout: L[1], + *, + identity: _IDType, +) -> _PyFunc_Nin2_Nout1[_ReturnType, _IDType]: ... +@overload +def frompyfunc( # type: ignore[overload-overlap] + func: Callable[..., _ReturnType], /, + nin: _Nin, + nout: L[1], + *, + identity: None = ..., +) -> _PyFunc_Nin3P_Nout1[_ReturnType, None, _Nin]: ... +@overload +def frompyfunc( # type: ignore[overload-overlap] + func: Callable[..., _ReturnType], /, + nin: _Nin, + nout: L[1], + *, + identity: _IDType, +) -> _PyFunc_Nin3P_Nout1[_ReturnType, _IDType, _Nin]: ... +@overload +def frompyfunc( + func: Callable[..., _2PTuple[_ReturnType]], /, + nin: _Nin, + nout: _Nout, + *, + identity: None = ..., +) -> _PyFunc_Nin1P_Nout2P[_ReturnType, None, _Nin, _Nout]: ... +@overload +def frompyfunc( + func: Callable[..., _2PTuple[_ReturnType]], /, + nin: _Nin, + nout: _Nout, + *, + identity: _IDType, +) -> _PyFunc_Nin1P_Nout2P[_ReturnType, _IDType, _Nin, _Nout]: ... +@overload +def frompyfunc( + func: Callable[..., Any], /, + nin: SupportsIndex, + nout: SupportsIndex, + *, + identity: object | None = ..., +) -> ufunc: ... + +@overload +def fromfile( + file: StrOrBytesPath | _SupportsFileMethods, + dtype: None = ..., + count: SupportsIndex = ..., + sep: str = ..., + offset: SupportsIndex = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[float64]: ... +@overload +def fromfile( + file: StrOrBytesPath | _SupportsFileMethods, + dtype: _DTypeLike[_ScalarT], + count: SupportsIndex = ..., + sep: str = ..., + offset: SupportsIndex = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def fromfile( + file: StrOrBytesPath | _SupportsFileMethods, + dtype: DTypeLike | None = ..., + count: SupportsIndex = ..., + sep: str = ..., + offset: SupportsIndex = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +@overload +def fromiter( + iter: Iterable[Any], + dtype: _DTypeLike[_ScalarT], + count: SupportsIndex = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def fromiter( + iter: Iterable[Any], + dtype: DTypeLike, + count: SupportsIndex = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +@overload +def frombuffer( + buffer: _SupportsBuffer, + dtype: None = ..., + count: SupportsIndex = ..., + offset: SupportsIndex = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[float64]: ... +@overload +def frombuffer( + buffer: _SupportsBuffer, + dtype: _DTypeLike[_ScalarT], + count: SupportsIndex = ..., + offset: SupportsIndex = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def frombuffer( + buffer: _SupportsBuffer, + dtype: DTypeLike | None = ..., + count: SupportsIndex = ..., + offset: SupportsIndex = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +@overload +def arange( # type: ignore[misc] + stop: _IntLike_co, + /, *, + dtype: None = ..., + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> _Array1D[signedinteger]: ... +@overload +def arange( # type: ignore[misc] + start: _IntLike_co, + stop: _IntLike_co, + step: _IntLike_co = ..., + dtype: None = ..., + *, + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> _Array1D[signedinteger]: ... +@overload +def arange( # type: ignore[misc] + stop: _FloatLike_co, + /, *, + dtype: None = ..., + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> _Array1D[floating]: ... +@overload +def arange( # type: ignore[misc] + start: _FloatLike_co, + stop: _FloatLike_co, + step: _FloatLike_co = ..., + dtype: None = ..., + *, + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> _Array1D[floating]: ... +@overload +def arange( + stop: _TD64Like_co, + /, *, + dtype: None = ..., + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> _Array1D[timedelta64]: ... +@overload +def arange( + start: _TD64Like_co, + stop: _TD64Like_co, + step: _TD64Like_co = ..., + dtype: None = ..., + *, + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> _Array1D[timedelta64]: ... +@overload +def arange( # both start and stop must always be specified for datetime64 + start: datetime64, + stop: datetime64, + step: datetime64 = ..., + dtype: None = ..., + *, + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> _Array1D[datetime64]: ... +@overload +def arange( + stop: Any, + /, *, + dtype: _DTypeLike[_ScalarT], + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> _Array1D[_ScalarT]: ... +@overload +def arange( + start: Any, + stop: Any, + step: Any = ..., + dtype: _DTypeLike[_ScalarT] = ..., + *, + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> _Array1D[_ScalarT]: ... +@overload +def arange( + stop: Any, /, + *, + dtype: DTypeLike | None = ..., + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> _Array1D[Any]: ... +@overload +def arange( + start: Any, + stop: Any, + step: Any = ..., + dtype: DTypeLike | None = ..., + *, + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> _Array1D[Any]: ... + +def datetime_data( + dtype: str | _DTypeLike[datetime64] | _DTypeLike[timedelta64], /, +) -> tuple[str, int]: ... + +# The datetime functions perform unsafe casts to `datetime64[D]`, +# so a lot of different argument types are allowed here + +@overload +def busday_count( # type: ignore[misc] + begindates: _ScalarLike_co | dt.date, + enddates: _ScalarLike_co | dt.date, + weekmask: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., + busdaycal: busdaycalendar | None = ..., + out: None = ..., +) -> int_: ... +@overload +def busday_count( # type: ignore[misc] + begindates: ArrayLike | dt.date | _NestedSequence[dt.date], + enddates: ArrayLike | dt.date | _NestedSequence[dt.date], + weekmask: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., + busdaycal: busdaycalendar | None = ..., + out: None = ..., +) -> NDArray[int_]: ... +@overload +def busday_count( + begindates: ArrayLike | dt.date | _NestedSequence[dt.date], + enddates: ArrayLike | dt.date | _NestedSequence[dt.date], + weekmask: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., + busdaycal: busdaycalendar | None = ..., + out: _ArrayT = ..., +) -> _ArrayT: ... + +# `roll="raise"` is (more or less?) equivalent to `casting="safe"` +@overload +def busday_offset( # type: ignore[misc] + dates: datetime64 | dt.date, + offsets: _TD64Like_co | dt.timedelta, + roll: L["raise"] = ..., + weekmask: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., + busdaycal: busdaycalendar | None = ..., + out: None = ..., +) -> datetime64: ... +@overload +def busday_offset( # type: ignore[misc] + dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date], + offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta], + roll: L["raise"] = ..., + weekmask: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., + busdaycal: busdaycalendar | None = ..., + out: None = ..., +) -> NDArray[datetime64]: ... +@overload +def busday_offset( # type: ignore[misc] + dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date], + offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta], + roll: L["raise"] = ..., + weekmask: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., + busdaycal: busdaycalendar | None = ..., + out: _ArrayT = ..., +) -> _ArrayT: ... +@overload +def busday_offset( # type: ignore[misc] + dates: _ScalarLike_co | dt.date, + offsets: _ScalarLike_co | dt.timedelta, + roll: _RollKind, + weekmask: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., + busdaycal: busdaycalendar | None = ..., + out: None = ..., +) -> datetime64: ... +@overload +def busday_offset( # type: ignore[misc] + dates: ArrayLike | dt.date | _NestedSequence[dt.date], + offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta], + roll: _RollKind, + weekmask: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., + busdaycal: busdaycalendar | None = ..., + out: None = ..., +) -> NDArray[datetime64]: ... +@overload +def busday_offset( + dates: ArrayLike | dt.date | _NestedSequence[dt.date], + offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta], + roll: _RollKind, + weekmask: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., + busdaycal: busdaycalendar | None = ..., + out: _ArrayT = ..., +) -> _ArrayT: ... + +@overload +def is_busday( # type: ignore[misc] + dates: _ScalarLike_co | dt.date, + weekmask: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., + busdaycal: busdaycalendar | None = ..., + out: None = ..., +) -> np.bool: ... +@overload +def is_busday( # type: ignore[misc] + dates: ArrayLike | _NestedSequence[dt.date], + weekmask: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., + busdaycal: busdaycalendar | None = ..., + out: None = ..., +) -> NDArray[np.bool]: ... +@overload +def is_busday( + dates: ArrayLike | _NestedSequence[dt.date], + weekmask: ArrayLike = ..., + holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ..., + busdaycal: busdaycalendar | None = ..., + out: _ArrayT = ..., +) -> _ArrayT: ... + +@overload +def datetime_as_string( # type: ignore[misc] + arr: datetime64 | dt.date, + unit: L["auto"] | _UnitKind | None = ..., + timezone: L["naive", "UTC", "local"] | dt.tzinfo = ..., + casting: _CastingKind = ..., +) -> str_: ... +@overload +def datetime_as_string( + arr: _ArrayLikeDT64_co | _NestedSequence[dt.date], + unit: L["auto"] | _UnitKind | None = ..., + timezone: L["naive", "UTC", "local"] | dt.tzinfo = ..., + casting: _CastingKind = ..., +) -> NDArray[str_]: ... + +@overload +def compare_chararrays( + a1: _ArrayLikeStr_co, + a2: _ArrayLikeStr_co, + cmp: L["<", "<=", "==", ">=", ">", "!="], + rstrip: bool, +) -> NDArray[np.bool]: ... +@overload +def compare_chararrays( + a1: _ArrayLikeBytes_co, + a2: _ArrayLikeBytes_co, + cmp: L["<", "<=", "==", ">=", ">", "!="], + rstrip: bool, +) -> NDArray[np.bool]: ... + +def add_docstring(obj: Callable[..., Any], docstring: str, /) -> None: ... + +_GetItemKeys: TypeAlias = L[ + "C", "CONTIGUOUS", "C_CONTIGUOUS", + "F", "FORTRAN", "F_CONTIGUOUS", + "W", "WRITEABLE", + "B", "BEHAVED", + "O", "OWNDATA", + "A", "ALIGNED", + "X", "WRITEBACKIFCOPY", + "CA", "CARRAY", + "FA", "FARRAY", + "FNC", + "FORC", +] +_SetItemKeys: TypeAlias = L[ + "A", "ALIGNED", + "W", "WRITEABLE", + "X", "WRITEBACKIFCOPY", +] + +@final +class flagsobj: + __hash__: ClassVar[None] # type: ignore[assignment] + aligned: bool + # NOTE: deprecated + # updateifcopy: bool + writeable: bool + writebackifcopy: bool + @property + def behaved(self) -> bool: ... + @property + def c_contiguous(self) -> bool: ... + @property + def carray(self) -> bool: ... + @property + def contiguous(self) -> bool: ... + @property + def f_contiguous(self) -> bool: ... + @property + def farray(self) -> bool: ... + @property + def fnc(self) -> bool: ... + @property + def forc(self) -> bool: ... + @property + def fortran(self) -> bool: ... + @property + def num(self) -> int: ... + @property + def owndata(self) -> bool: ... + def __getitem__(self, key: _GetItemKeys) -> bool: ... + def __setitem__(self, key: _SetItemKeys, value: bool) -> None: ... + +def nested_iters( + op: ArrayLike | Sequence[ArrayLike], + axes: Sequence[Sequence[SupportsIndex]], + flags: Sequence[_NDIterFlagsKind] | None = ..., + op_flags: Sequence[Sequence[_NDIterFlagsOp]] | None = ..., + op_dtypes: DTypeLike | Sequence[DTypeLike] = ..., + order: _OrderKACF = ..., + casting: _CastingKind = ..., + buffersize: SupportsIndex = ..., +) -> tuple[nditer, ...]: ... diff --git a/python/numpy/_core/numeric.py b/python/numpy/_core/numeric.py new file mode 100644 index 000000000..964447fa0 --- /dev/null +++ b/python/numpy/_core/numeric.py @@ -0,0 +1,2760 @@ +import builtins +import functools +import itertools +import math +import numbers +import operator +import sys +import warnings + +import numpy as np +from numpy.exceptions import AxisError + +from . import multiarray, numerictypes, overrides, shape_base, umath +from . import numerictypes as nt +from ._ufunc_config import errstate +from .multiarray import ( # noqa: F401 + ALLOW_THREADS, + BUFSIZE, + CLIP, + MAXDIMS, + MAY_SHARE_BOUNDS, + MAY_SHARE_EXACT, + RAISE, + WRAP, + arange, + array, + asanyarray, + asarray, + ascontiguousarray, + asfortranarray, + broadcast, + can_cast, + concatenate, + copyto, + dot, + dtype, + empty, + empty_like, + flatiter, + from_dlpack, + frombuffer, + fromfile, + fromiter, + fromstring, + inner, + lexsort, + matmul, + may_share_memory, + min_scalar_type, + ndarray, + nditer, + nested_iters, + normalize_axis_index, + promote_types, + putmask, + result_type, + shares_memory, + vdot, + vecdot, + where, + zeros, +) +from .overrides import finalize_array_function_like, set_module +from .umath import NAN, PINF, invert, multiply, sin + +bitwise_not = invert +ufunc = type(sin) +newaxis = None + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + 'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc', + 'arange', 'array', 'asarray', 'asanyarray', 'ascontiguousarray', + 'asfortranarray', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype', + 'fromstring', 'fromfile', 'frombuffer', 'from_dlpack', 'where', + 'argwhere', 'copyto', 'concatenate', 'lexsort', 'astype', + 'can_cast', 'promote_types', 'min_scalar_type', + 'result_type', 'isfortran', 'empty_like', 'zeros_like', 'ones_like', + 'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll', + 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian', + 'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction', + 'isclose', 'isscalar', 'binary_repr', 'base_repr', 'ones', + 'identity', 'allclose', 'putmask', + 'flatnonzero', 'inf', 'nan', 'False_', 'True_', 'bitwise_not', + 'full', 'full_like', 'matmul', 'vecdot', 'shares_memory', + 'may_share_memory'] + + +def _zeros_like_dispatcher( + a, dtype=None, order=None, subok=None, shape=None, *, device=None +): + return (a,) + + +@array_function_dispatch(_zeros_like_dispatcher) +def zeros_like( + a, dtype=None, order='K', subok=True, shape=None, *, device=None +): + """ + Return an array of zeros with the same shape and type as a given array. + + Parameters + ---------- + a : array_like + The shape and data-type of `a` define these same attributes of + the returned array. + dtype : data-type, optional + Overrides the data type of the result. + order : {'C', 'F', 'A', or 'K'}, optional + Overrides the memory layout of the result. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. + subok : bool, optional. + If True, then the newly created array will use the sub-class + type of `a`, otherwise it will be a base-class array. Defaults + to True. + shape : int or sequence of ints, optional. + Overrides the shape of the result. If order='K' and the number of + dimensions is unchanged, will try to keep order, otherwise, + order='C' is implied. + device : str, optional + The device on which to place the created array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + Array of zeros with the same shape and type as `a`. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + ones_like : Return an array of ones with shape and type of input. + full_like : Return a new array with shape of input filled with value. + zeros : Return a new array setting values to zero. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6) + >>> x = x.reshape((2, 3)) + >>> x + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.zeros_like(x) + array([[0, 0, 0], + [0, 0, 0]]) + + >>> y = np.arange(3, dtype=float) + >>> y + array([0., 1., 2.]) + >>> np.zeros_like(y) + array([0., 0., 0.]) + + """ + res = empty_like( + a, dtype=dtype, order=order, subok=subok, shape=shape, device=device + ) + # needed instead of a 0 to get same result as zeros for string dtypes + z = zeros(1, dtype=res.dtype) + multiarray.copyto(res, z, casting='unsafe') + return res + + +@finalize_array_function_like +@set_module('numpy') +def ones(shape, dtype=None, order='C', *, device=None, like=None): + """ + Return a new array of given shape and type, filled with ones. + + Parameters + ---------- + shape : int or sequence of ints + Shape of the new array, e.g., ``(2, 3)`` or ``2``. + dtype : data-type, optional + The desired data-type for the array, e.g., `numpy.int8`. Default is + `numpy.float64`. + order : {'C', 'F'}, optional, default: C + Whether to store multi-dimensional data in row-major + (C-style) or column-major (Fortran-style) order in + memory. + device : str, optional + The device on which to place the created array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array of ones with the given shape, dtype, and order. + + See Also + -------- + ones_like : Return an array of ones with shape and type of input. + empty : Return a new uninitialized array. + zeros : Return a new array setting values to zero. + full : Return a new array of given shape filled with value. + + Examples + -------- + >>> import numpy as np + >>> np.ones(5) + array([1., 1., 1., 1., 1.]) + + >>> np.ones((5,), dtype=int) + array([1, 1, 1, 1, 1]) + + >>> np.ones((2, 1)) + array([[1.], + [1.]]) + + >>> s = (2,2) + >>> np.ones(s) + array([[1., 1.], + [1., 1.]]) + + """ + if like is not None: + return _ones_with_like( + like, shape, dtype=dtype, order=order, device=device + ) + + a = empty(shape, dtype, order, device=device) + multiarray.copyto(a, 1, casting='unsafe') + return a + + +_ones_with_like = array_function_dispatch()(ones) + + +def _ones_like_dispatcher( + a, dtype=None, order=None, subok=None, shape=None, *, device=None +): + return (a,) + + +@array_function_dispatch(_ones_like_dispatcher) +def ones_like( + a, dtype=None, order='K', subok=True, shape=None, *, device=None +): + """ + Return an array of ones with the same shape and type as a given array. + + Parameters + ---------- + a : array_like + The shape and data-type of `a` define these same attributes of + the returned array. + dtype : data-type, optional + Overrides the data type of the result. + order : {'C', 'F', 'A', or 'K'}, optional + Overrides the memory layout of the result. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. + subok : bool, optional. + If True, then the newly created array will use the sub-class + type of `a`, otherwise it will be a base-class array. Defaults + to True. + shape : int or sequence of ints, optional. + Overrides the shape of the result. If order='K' and the number of + dimensions is unchanged, will try to keep order, otherwise, + order='C' is implied. + device : str, optional + The device on which to place the created array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + Array of ones with the same shape and type as `a`. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + zeros_like : Return an array of zeros with shape and type of input. + full_like : Return a new array with shape of input filled with value. + ones : Return a new array setting values to one. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6) + >>> x = x.reshape((2, 3)) + >>> x + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.ones_like(x) + array([[1, 1, 1], + [1, 1, 1]]) + + >>> y = np.arange(3, dtype=float) + >>> y + array([0., 1., 2.]) + >>> np.ones_like(y) + array([1., 1., 1.]) + + """ + res = empty_like( + a, dtype=dtype, order=order, subok=subok, shape=shape, device=device + ) + multiarray.copyto(res, 1, casting='unsafe') + return res + + +def _full_dispatcher( + shape, fill_value, dtype=None, order=None, *, device=None, like=None +): + return (like,) + + +@finalize_array_function_like +@set_module('numpy') +def full(shape, fill_value, dtype=None, order='C', *, device=None, like=None): + """ + Return a new array of given shape and type, filled with `fill_value`. + + Parameters + ---------- + shape : int or sequence of ints + Shape of the new array, e.g., ``(2, 3)`` or ``2``. + fill_value : scalar or array_like + Fill value. + dtype : data-type, optional + The desired data-type for the array The default, None, means + ``np.array(fill_value).dtype``. + order : {'C', 'F'}, optional + Whether to store multidimensional data in C- or Fortran-contiguous + (row- or column-wise) order in memory. + device : str, optional + The device on which to place the created array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Array of `fill_value` with the given shape, dtype, and order. + + See Also + -------- + full_like : Return a new array with shape of input filled with value. + empty : Return a new uninitialized array. + ones : Return a new array setting values to one. + zeros : Return a new array setting values to zero. + + Examples + -------- + >>> import numpy as np + >>> np.full((2, 2), np.inf) + array([[inf, inf], + [inf, inf]]) + >>> np.full((2, 2), 10) + array([[10, 10], + [10, 10]]) + + >>> np.full((2, 2), [1, 2]) + array([[1, 2], + [1, 2]]) + + """ + if like is not None: + return _full_with_like( + like, shape, fill_value, dtype=dtype, order=order, device=device + ) + + if dtype is None: + fill_value = asarray(fill_value) + dtype = fill_value.dtype + a = empty(shape, dtype, order, device=device) + multiarray.copyto(a, fill_value, casting='unsafe') + return a + + +_full_with_like = array_function_dispatch()(full) + + +def _full_like_dispatcher( + a, fill_value, dtype=None, order=None, subok=None, shape=None, + *, device=None +): + return (a,) + + +@array_function_dispatch(_full_like_dispatcher) +def full_like( + a, fill_value, dtype=None, order='K', subok=True, shape=None, + *, device=None +): + """ + Return a full array with the same shape and type as a given array. + + Parameters + ---------- + a : array_like + The shape and data-type of `a` define these same attributes of + the returned array. + fill_value : array_like + Fill value. + dtype : data-type, optional + Overrides the data type of the result. + order : {'C', 'F', 'A', or 'K'}, optional + Overrides the memory layout of the result. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. + subok : bool, optional. + If True, then the newly created array will use the sub-class + type of `a`, otherwise it will be a base-class array. Defaults + to True. + shape : int or sequence of ints, optional. + Overrides the shape of the result. If order='K' and the number of + dimensions is unchanged, will try to keep order, otherwise, + order='C' is implied. + device : str, optional + The device on which to place the created array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + Array of `fill_value` with the same shape and type as `a`. + + See Also + -------- + empty_like : Return an empty array with shape and type of input. + ones_like : Return an array of ones with shape and type of input. + zeros_like : Return an array of zeros with shape and type of input. + full : Return a new array of given shape filled with value. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6, dtype=int) + >>> np.full_like(x, 1) + array([1, 1, 1, 1, 1, 1]) + >>> np.full_like(x, 0.1) + array([0, 0, 0, 0, 0, 0]) + >>> np.full_like(x, 0.1, dtype=np.double) + array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) + >>> np.full_like(x, np.nan, dtype=np.double) + array([nan, nan, nan, nan, nan, nan]) + + >>> y = np.arange(6, dtype=np.double) + >>> np.full_like(y, 0.1) + array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) + + >>> y = np.zeros([2, 2, 3], dtype=int) + >>> np.full_like(y, [0, 0, 255]) + array([[[ 0, 0, 255], + [ 0, 0, 255]], + [[ 0, 0, 255], + [ 0, 0, 255]]]) + """ + res = empty_like( + a, dtype=dtype, order=order, subok=subok, shape=shape, device=device + ) + multiarray.copyto(res, fill_value, casting='unsafe') + return res + + +def _count_nonzero_dispatcher(a, axis=None, *, keepdims=None): + return (a,) + + +@array_function_dispatch(_count_nonzero_dispatcher) +def count_nonzero(a, axis=None, *, keepdims=False): + """ + Counts the number of non-zero values in the array ``a``. + + The word "non-zero" is in reference to the Python 2.x + built-in method ``__nonzero__()`` (renamed ``__bool__()`` + in Python 3.x) of Python objects that tests an object's + "truthfulness". For example, any number is considered + truthful if it is nonzero, whereas any string is considered + truthful if it is not the empty string. Thus, this function + (recursively) counts how many elements in ``a`` (and in + sub-arrays thereof) have their ``__nonzero__()`` or ``__bool__()`` + method evaluated to ``True``. + + Parameters + ---------- + a : array_like + The array for which to count non-zeros. + axis : int or tuple, optional + Axis or tuple of axes along which to count non-zeros. + Default is None, meaning that non-zeros will be counted + along a flattened version of ``a``. + keepdims : bool, optional + If this is set to True, the axes that are counted are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + Returns + ------- + count : int or array of int + Number of non-zero values in the array along a given axis. + Otherwise, the total number of non-zero values in the array + is returned. + + See Also + -------- + nonzero : Return the coordinates of all the non-zero values. + + Examples + -------- + >>> import numpy as np + >>> np.count_nonzero(np.eye(4)) + 4 + >>> a = np.array([[0, 1, 7, 0], + ... [3, 0, 2, 19]]) + >>> np.count_nonzero(a) + 5 + >>> np.count_nonzero(a, axis=0) + array([1, 1, 2, 1]) + >>> np.count_nonzero(a, axis=1) + array([2, 3]) + >>> np.count_nonzero(a, axis=1, keepdims=True) + array([[2], + [3]]) + """ + if axis is None and not keepdims: + return multiarray.count_nonzero(a) + + a = asanyarray(a) + + # TODO: this works around .astype(bool) not working properly (gh-9847) + if np.issubdtype(a.dtype, np.character): + a_bool = a != a.dtype.type() + else: + a_bool = a.astype(np.bool, copy=False) + + return a_bool.sum(axis=axis, dtype=np.intp, keepdims=keepdims) + + +@set_module('numpy') +def isfortran(a): + """ + Check if the array is Fortran contiguous but *not* C contiguous. + + This function is obsolete. If you only want to check if an array is Fortran + contiguous use ``a.flags.f_contiguous`` instead. + + Parameters + ---------- + a : ndarray + Input array. + + Returns + ------- + isfortran : bool + Returns True if the array is Fortran contiguous but *not* C contiguous. + + + Examples + -------- + + np.array allows to specify whether the array is written in C-contiguous + order (last index varies the fastest), or FORTRAN-contiguous order in + memory (first index varies the fastest). + + >>> import numpy as np + >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') + >>> a + array([[1, 2, 3], + [4, 5, 6]]) + >>> np.isfortran(a) + False + + >>> b = np.array([[1, 2, 3], [4, 5, 6]], order='F') + >>> b + array([[1, 2, 3], + [4, 5, 6]]) + >>> np.isfortran(b) + True + + + The transpose of a C-ordered array is a FORTRAN-ordered array. + + >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') + >>> a + array([[1, 2, 3], + [4, 5, 6]]) + >>> np.isfortran(a) + False + >>> b = a.T + >>> b + array([[1, 4], + [2, 5], + [3, 6]]) + >>> np.isfortran(b) + True + + C-ordered arrays evaluate as False even if they are also FORTRAN-ordered. + + >>> np.isfortran(np.array([1, 2], order='F')) + False + + """ + return a.flags.fnc + + +def _argwhere_dispatcher(a): + return (a,) + + +@array_function_dispatch(_argwhere_dispatcher) +def argwhere(a): + """ + Find the indices of array elements that are non-zero, grouped by element. + + Parameters + ---------- + a : array_like + Input data. + + Returns + ------- + index_array : (N, a.ndim) ndarray + Indices of elements that are non-zero. Indices are grouped by element. + This array will have shape ``(N, a.ndim)`` where ``N`` is the number of + non-zero items. + + See Also + -------- + where, nonzero + + Notes + ----- + ``np.argwhere(a)`` is almost the same as ``np.transpose(np.nonzero(a))``, + but produces a result of the correct shape for a 0D array. + + The output of ``argwhere`` is not suitable for indexing arrays. + For this purpose use ``nonzero(a)`` instead. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(6).reshape(2,3) + >>> x + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.argwhere(x>1) + array([[0, 2], + [1, 0], + [1, 1], + [1, 2]]) + + """ + # nonzero does not behave well on 0d, so promote to 1d + if np.ndim(a) == 0: + a = shape_base.atleast_1d(a) + # then remove the added dimension + return argwhere(a)[:, :0] + return transpose(nonzero(a)) + + +def _flatnonzero_dispatcher(a): + return (a,) + + +@array_function_dispatch(_flatnonzero_dispatcher) +def flatnonzero(a): + """ + Return indices that are non-zero in the flattened version of a. + + This is equivalent to ``np.nonzero(np.ravel(a))[0]``. + + Parameters + ---------- + a : array_like + Input data. + + Returns + ------- + res : ndarray + Output array, containing the indices of the elements of ``a.ravel()`` + that are non-zero. + + See Also + -------- + nonzero : Return the indices of the non-zero elements of the input array. + ravel : Return a 1-D array containing the elements of the input array. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(-2, 3) + >>> x + array([-2, -1, 0, 1, 2]) + >>> np.flatnonzero(x) + array([0, 1, 3, 4]) + + Use the indices of the non-zero elements as an index array to extract + these elements: + + >>> x.ravel()[np.flatnonzero(x)] + array([-2, -1, 1, 2]) + + """ + return np.nonzero(np.ravel(a))[0] + + +def _correlate_dispatcher(a, v, mode=None): + return (a, v) + + +@array_function_dispatch(_correlate_dispatcher) +def correlate(a, v, mode='valid'): + r""" + Cross-correlation of two 1-dimensional sequences. + + This function computes the correlation as generally defined in signal + processing texts [1]_: + + .. math:: c_k = \sum_n a_{n+k} \cdot \overline{v}_n + + with a and v sequences being zero-padded where necessary and + :math:`\overline v` denoting complex conjugation. + + Parameters + ---------- + a, v : array_like + Input sequences. + mode : {'valid', 'same', 'full'}, optional + Refer to the `convolve` docstring. Note that the default + is 'valid', unlike `convolve`, which uses 'full'. + + Returns + ------- + out : ndarray + Discrete cross-correlation of `a` and `v`. + + See Also + -------- + convolve : Discrete, linear convolution of two one-dimensional sequences. + scipy.signal.correlate : uses FFT which has superior performance + on large arrays. + + Notes + ----- + The definition of correlation above is not unique and sometimes + correlation may be defined differently. Another common definition is [1]_: + + .. math:: c'_k = \sum_n a_{n} \cdot \overline{v_{n+k}} + + which is related to :math:`c_k` by :math:`c'_k = c_{-k}`. + + `numpy.correlate` may perform slowly in large arrays (i.e. n = 1e5) + because it does not use the FFT to compute the convolution; in that case, + `scipy.signal.correlate` might be preferable. + + References + ---------- + .. [1] Wikipedia, "Cross-correlation", + https://en.wikipedia.org/wiki/Cross-correlation + + Examples + -------- + >>> import numpy as np + >>> np.correlate([1, 2, 3], [0, 1, 0.5]) + array([3.5]) + >>> np.correlate([1, 2, 3], [0, 1, 0.5], "same") + array([2. , 3.5, 3. ]) + >>> np.correlate([1, 2, 3], [0, 1, 0.5], "full") + array([0.5, 2. , 3.5, 3. , 0. ]) + + Using complex sequences: + + >>> np.correlate([1+1j, 2, 3-1j], [0, 1, 0.5j], 'full') + array([ 0.5-0.5j, 1.0+0.j , 1.5-1.5j, 3.0-1.j , 0.0+0.j ]) + + Note that you get the time reversed, complex conjugated result + (:math:`\overline{c_{-k}}`) when the two input sequences a and v change + places: + + >>> np.correlate([0, 1, 0.5j], [1+1j, 2, 3-1j], 'full') + array([ 0.0+0.j , 3.0+1.j , 1.5+1.5j, 1.0+0.j , 0.5+0.5j]) + + """ + return multiarray.correlate2(a, v, mode) + + +def _convolve_dispatcher(a, v, mode=None): + return (a, v) + + +@array_function_dispatch(_convolve_dispatcher) +def convolve(a, v, mode='full'): + """ + Returns the discrete, linear convolution of two one-dimensional sequences. + + The convolution operator is often seen in signal processing, where it + models the effect of a linear time-invariant system on a signal [1]_. In + probability theory, the sum of two independent random variables is + distributed according to the convolution of their individual + distributions. + + If `v` is longer than `a`, the arrays are swapped before computation. + + Parameters + ---------- + a : (N,) array_like + First one-dimensional input array. + v : (M,) array_like + Second one-dimensional input array. + mode : {'full', 'valid', 'same'}, optional + 'full': + By default, mode is 'full'. This returns the convolution + at each point of overlap, with an output shape of (N+M-1,). At + the end-points of the convolution, the signals do not overlap + completely, and boundary effects may be seen. + + 'same': + Mode 'same' returns output of length ``max(M, N)``. Boundary + effects are still visible. + + 'valid': + Mode 'valid' returns output of length + ``max(M, N) - min(M, N) + 1``. The convolution product is only given + for points where the signals overlap completely. Values outside + the signal boundary have no effect. + + Returns + ------- + out : ndarray + Discrete, linear convolution of `a` and `v`. + + See Also + -------- + scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier + Transform. + scipy.linalg.toeplitz : Used to construct the convolution operator. + polymul : Polynomial multiplication. Same output as convolve, but also + accepts poly1d objects as input. + + Notes + ----- + The discrete convolution operation is defined as + + .. math:: (a * v)_n = \\sum_{m = -\\infty}^{\\infty} a_m v_{n - m} + + It can be shown that a convolution :math:`x(t) * y(t)` in time/space + is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier + domain, after appropriate padding (padding is necessary to prevent + circular convolution). Since multiplication is more efficient (faster) + than convolution, the function `scipy.signal.fftconvolve` exploits the + FFT to calculate the convolution of large data-sets. + + References + ---------- + .. [1] Wikipedia, "Convolution", + https://en.wikipedia.org/wiki/Convolution + + Examples + -------- + Note how the convolution operator flips the second array + before "sliding" the two across one another: + + >>> import numpy as np + >>> np.convolve([1, 2, 3], [0, 1, 0.5]) + array([0. , 1. , 2.5, 4. , 1.5]) + + Only return the middle values of the convolution. + Contains boundary effects, where zeros are taken + into account: + + >>> np.convolve([1,2,3],[0,1,0.5], 'same') + array([1. , 2.5, 4. ]) + + The two arrays are of the same length, so there + is only one position where they completely overlap: + + >>> np.convolve([1,2,3],[0,1,0.5], 'valid') + array([2.5]) + + """ + a, v = array(a, copy=None, ndmin=1), array(v, copy=None, ndmin=1) + if (len(v) > len(a)): + a, v = v, a + if len(a) == 0: + raise ValueError('a cannot be empty') + if len(v) == 0: + raise ValueError('v cannot be empty') + return multiarray.correlate(a, v[::-1], mode) + + +def _outer_dispatcher(a, b, out=None): + return (a, b, out) + + +@array_function_dispatch(_outer_dispatcher) +def outer(a, b, out=None): + """ + Compute the outer product of two vectors. + + Given two vectors `a` and `b` of length ``M`` and ``N``, respectively, + the outer product [1]_ is:: + + [[a_0*b_0 a_0*b_1 ... a_0*b_{N-1} ] + [a_1*b_0 . + [ ... . + [a_{M-1}*b_0 a_{M-1}*b_{N-1} ]] + + Parameters + ---------- + a : (M,) array_like + First input vector. Input is flattened if + not already 1-dimensional. + b : (N,) array_like + Second input vector. Input is flattened if + not already 1-dimensional. + out : (M, N) ndarray, optional + A location where the result is stored + + Returns + ------- + out : (M, N) ndarray + ``out[i, j] = a[i] * b[j]`` + + See also + -------- + inner + einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent. + ufunc.outer : A generalization to dimensions other than 1D and other + operations. ``np.multiply.outer(a.ravel(), b.ravel())`` + is the equivalent. + linalg.outer : An Array API compatible variation of ``np.outer``, + which accepts 1-dimensional inputs only. + tensordot : ``np.tensordot(a.ravel(), b.ravel(), axes=((), ()))`` + is the equivalent. + + References + ---------- + .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd + ed., Baltimore, MD, Johns Hopkins University Press, 1996, + pg. 8. + + Examples + -------- + Make a (*very* coarse) grid for computing a Mandelbrot set: + + >>> import numpy as np + >>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5)) + >>> rl + array([[-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.]]) + >>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,))) + >>> im + array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j], + [0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j], + [0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], + [0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j], + [0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]]) + >>> grid = rl + im + >>> grid + array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j], + [-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j], + [-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j], + [-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j], + [-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]]) + + An example using a "vector" of letters: + + >>> x = np.array(['a', 'b', 'c'], dtype=object) + >>> np.outer(x, [1, 2, 3]) + array([['a', 'aa', 'aaa'], + ['b', 'bb', 'bbb'], + ['c', 'cc', 'ccc']], dtype=object) + + """ + a = asarray(a) + b = asarray(b) + return multiply(a.ravel()[:, newaxis], b.ravel()[newaxis, :], out) + + +def _tensordot_dispatcher(a, b, axes=None): + return (a, b) + + +@array_function_dispatch(_tensordot_dispatcher) +def tensordot(a, b, axes=2): + """ + Compute tensor dot product along specified axes. + + Given two tensors, `a` and `b`, and an array_like object containing + two array_like objects, ``(a_axes, b_axes)``, sum the products of + `a`'s and `b`'s elements (components) over the axes specified by + ``a_axes`` and ``b_axes``. The third argument can be a single non-negative + integer_like scalar, ``N``; if it is such, then the last ``N`` dimensions + of `a` and the first ``N`` dimensions of `b` are summed over. + + Parameters + ---------- + a, b : array_like + Tensors to "dot". + + axes : int or (2,) array_like + * integer_like + If an int N, sum over the last N axes of `a` and the first N axes + of `b` in order. The sizes of the corresponding axes must match. + * (2,) array_like + Or, a list of axes to be summed over, first sequence applying to `a`, + second to `b`. Both elements array_like must be of the same length. + + Returns + ------- + output : ndarray + The tensor dot product of the input. + + See Also + -------- + dot, einsum + + Notes + ----- + Three common use cases are: + * ``axes = 0`` : tensor product :math:`a\\otimes b` + * ``axes = 1`` : tensor dot product :math:`a\\cdot b` + * ``axes = 2`` : (default) tensor double contraction :math:`a:b` + + When `axes` is integer_like, the sequence of axes for evaluation + will be: from the -Nth axis to the -1th axis in `a`, + and from the 0th axis to (N-1)th axis in `b`. + For example, ``axes = 2`` is the equal to + ``axes = [[-2, -1], [0, 1]]``. + When N-1 is smaller than 0, or when -N is larger than -1, + the element of `a` and `b` are defined as the `axes`. + + When there is more than one axis to sum over - and they are not the last + (first) axes of `a` (`b`) - the argument `axes` should consist of + two sequences of the same length, with the first axis to sum over given + first in both sequences, the second axis second, and so forth. + The calculation can be referred to ``numpy.einsum``. + + The shape of the result consists of the non-contracted axes of the + first tensor, followed by the non-contracted axes of the second. + + Examples + -------- + An example on integer_like: + + >>> a_0 = np.array([[1, 2], [3, 4]]) + >>> b_0 = np.array([[5, 6], [7, 8]]) + >>> c_0 = np.tensordot(a_0, b_0, axes=0) + >>> c_0.shape + (2, 2, 2, 2) + >>> c_0 + array([[[[ 5, 6], + [ 7, 8]], + [[10, 12], + [14, 16]]], + [[[15, 18], + [21, 24]], + [[20, 24], + [28, 32]]]]) + + An example on array_like: + + >>> a = np.arange(60.).reshape(3,4,5) + >>> b = np.arange(24.).reshape(4,3,2) + >>> c = np.tensordot(a,b, axes=([1,0],[0,1])) + >>> c.shape + (5, 2) + >>> c + array([[4400., 4730.], + [4532., 4874.], + [4664., 5018.], + [4796., 5162.], + [4928., 5306.]]) + + A slower but equivalent way of computing the same... + + >>> d = np.zeros((5,2)) + >>> for i in range(5): + ... for j in range(2): + ... for k in range(3): + ... for n in range(4): + ... d[i,j] += a[k,n,i] * b[n,k,j] + >>> c == d + array([[ True, True], + [ True, True], + [ True, True], + [ True, True], + [ True, True]]) + + An extended example taking advantage of the overloading of + and \\*: + + >>> a = np.array(range(1, 9)) + >>> a.shape = (2, 2, 2) + >>> A = np.array(('a', 'b', 'c', 'd'), dtype=object) + >>> A.shape = (2, 2) + >>> a; A + array([[[1, 2], + [3, 4]], + [[5, 6], + [7, 8]]]) + array([['a', 'b'], + ['c', 'd']], dtype=object) + + >>> np.tensordot(a, A) # third argument default is 2 for double-contraction + array(['abbcccdddd', 'aaaaabbbbbbcccccccdddddddd'], dtype=object) + + >>> np.tensordot(a, A, 1) + array([[['acc', 'bdd'], + ['aaacccc', 'bbbdddd']], + [['aaaaacccccc', 'bbbbbdddddd'], + ['aaaaaaacccccccc', 'bbbbbbbdddddddd']]], dtype=object) + + >>> np.tensordot(a, A, 0) # tensor product (result too long to incl.) + array([[[[['a', 'b'], + ['c', 'd']], + ... + + >>> np.tensordot(a, A, (0, 1)) + array([[['abbbbb', 'cddddd'], + ['aabbbbbb', 'ccdddddd']], + [['aaabbbbbbb', 'cccddddddd'], + ['aaaabbbbbbbb', 'ccccdddddddd']]], dtype=object) + + >>> np.tensordot(a, A, (2, 1)) + array([[['abb', 'cdd'], + ['aaabbbb', 'cccdddd']], + [['aaaaabbbbbb', 'cccccdddddd'], + ['aaaaaaabbbbbbbb', 'cccccccdddddddd']]], dtype=object) + + >>> np.tensordot(a, A, ((0, 1), (0, 1))) + array(['abbbcccccddddddd', 'aabbbbccccccdddddddd'], dtype=object) + + >>> np.tensordot(a, A, ((2, 1), (1, 0))) + array(['acccbbdddd', 'aaaaacccccccbbbbbbdddddddd'], dtype=object) + + """ + try: + iter(axes) + except Exception: + axes_a = list(range(-axes, 0)) + axes_b = list(range(axes)) + else: + axes_a, axes_b = axes + try: + na = len(axes_a) + axes_a = list(axes_a) + except TypeError: + axes_a = [axes_a] + na = 1 + try: + nb = len(axes_b) + axes_b = list(axes_b) + except TypeError: + axes_b = [axes_b] + nb = 1 + + a, b = asarray(a), asarray(b) + as_ = a.shape + nda = a.ndim + bs = b.shape + ndb = b.ndim + equal = True + if na != nb: + equal = False + else: + for k in range(na): + if as_[axes_a[k]] != bs[axes_b[k]]: + equal = False + break + if axes_a[k] < 0: + axes_a[k] += nda + if axes_b[k] < 0: + axes_b[k] += ndb + if not equal: + raise ValueError("shape-mismatch for sum") + + # Move the axes to sum over to the end of "a" + # and to the front of "b" + notin = [k for k in range(nda) if k not in axes_a] + newaxes_a = notin + axes_a + N2 = math.prod(as_[axis] for axis in axes_a) + newshape_a = (math.prod(as_[ax] for ax in notin), N2) + olda = [as_[axis] for axis in notin] + + notin = [k for k in range(ndb) if k not in axes_b] + newaxes_b = axes_b + notin + N2 = math.prod(bs[axis] for axis in axes_b) + newshape_b = (N2, math.prod(bs[ax] for ax in notin)) + oldb = [bs[axis] for axis in notin] + + at = a.transpose(newaxes_a).reshape(newshape_a) + bt = b.transpose(newaxes_b).reshape(newshape_b) + res = dot(at, bt) + return res.reshape(olda + oldb) + + +def _roll_dispatcher(a, shift, axis=None): + return (a,) + + +@array_function_dispatch(_roll_dispatcher) +def roll(a, shift, axis=None): + """ + Roll array elements along a given axis. + + Elements that roll beyond the last position are re-introduced at + the first. + + Parameters + ---------- + a : array_like + Input array. + shift : int or tuple of ints + The number of places by which elements are shifted. If a tuple, + then `axis` must be a tuple of the same size, and each of the + given axes is shifted by the corresponding number. If an int + while `axis` is a tuple of ints, then the same value is used for + all given axes. + axis : int or tuple of ints, optional + Axis or axes along which elements are shifted. By default, the + array is flattened before shifting, after which the original + shape is restored. + + Returns + ------- + res : ndarray + Output array, with the same shape as `a`. + + See Also + -------- + rollaxis : Roll the specified axis backwards, until it lies in a + given position. + + Notes + ----- + Supports rolling over multiple dimensions simultaneously. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(10) + >>> np.roll(x, 2) + array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]) + >>> np.roll(x, -2) + array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1]) + + >>> x2 = np.reshape(x, (2, 5)) + >>> x2 + array([[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]]) + >>> np.roll(x2, 1) + array([[9, 0, 1, 2, 3], + [4, 5, 6, 7, 8]]) + >>> np.roll(x2, -1) + array([[1, 2, 3, 4, 5], + [6, 7, 8, 9, 0]]) + >>> np.roll(x2, 1, axis=0) + array([[5, 6, 7, 8, 9], + [0, 1, 2, 3, 4]]) + >>> np.roll(x2, -1, axis=0) + array([[5, 6, 7, 8, 9], + [0, 1, 2, 3, 4]]) + >>> np.roll(x2, 1, axis=1) + array([[4, 0, 1, 2, 3], + [9, 5, 6, 7, 8]]) + >>> np.roll(x2, -1, axis=1) + array([[1, 2, 3, 4, 0], + [6, 7, 8, 9, 5]]) + >>> np.roll(x2, (1, 1), axis=(1, 0)) + array([[9, 5, 6, 7, 8], + [4, 0, 1, 2, 3]]) + >>> np.roll(x2, (2, 1), axis=(1, 0)) + array([[8, 9, 5, 6, 7], + [3, 4, 0, 1, 2]]) + + """ + a = asanyarray(a) + if axis is None: + return roll(a.ravel(), shift, 0).reshape(a.shape) + + else: + axis = normalize_axis_tuple(axis, a.ndim, allow_duplicate=True) + broadcasted = broadcast(shift, axis) + if broadcasted.ndim > 1: + raise ValueError( + "'shift' and 'axis' should be scalars or 1D sequences") + shifts = dict.fromkeys(range(a.ndim), 0) + for sh, ax in broadcasted: + shifts[ax] += int(sh) + + rolls = [((slice(None), slice(None)),)] * a.ndim + for ax, offset in shifts.items(): + offset %= a.shape[ax] or 1 # If `a` is empty, nothing matters. + if offset: + # (original, result), (original, result) + rolls[ax] = ((slice(None, -offset), slice(offset, None)), + (slice(-offset, None), slice(None, offset))) + + result = empty_like(a) + for indices in itertools.product(*rolls): + arr_index, res_index = zip(*indices) + result[res_index] = a[arr_index] + + return result + + +def _rollaxis_dispatcher(a, axis, start=None): + return (a,) + + +@array_function_dispatch(_rollaxis_dispatcher) +def rollaxis(a, axis, start=0): + """ + Roll the specified axis backwards, until it lies in a given position. + + This function continues to be supported for backward compatibility, but you + should prefer `moveaxis`. The `moveaxis` function was added in NumPy + 1.11. + + Parameters + ---------- + a : ndarray + Input array. + axis : int + The axis to be rolled. The positions of the other axes do not + change relative to one another. + start : int, optional + When ``start <= axis``, the axis is rolled back until it lies in + this position. When ``start > axis``, the axis is rolled until it + lies before this position. The default, 0, results in a "complete" + roll. The following table describes how negative values of ``start`` + are interpreted: + + .. table:: + :align: left + + +-------------------+----------------------+ + | ``start`` | Normalized ``start`` | + +===================+======================+ + | ``-(arr.ndim+1)`` | raise ``AxisError`` | + +-------------------+----------------------+ + | ``-arr.ndim`` | 0 | + +-------------------+----------------------+ + | |vdots| | |vdots| | + +-------------------+----------------------+ + | ``-1`` | ``arr.ndim-1`` | + +-------------------+----------------------+ + | ``0`` | ``0`` | + +-------------------+----------------------+ + | |vdots| | |vdots| | + +-------------------+----------------------+ + | ``arr.ndim`` | ``arr.ndim`` | + +-------------------+----------------------+ + | ``arr.ndim + 1`` | raise ``AxisError`` | + +-------------------+----------------------+ + + .. |vdots| unicode:: U+22EE .. Vertical Ellipsis + + Returns + ------- + res : ndarray + For NumPy >= 1.10.0 a view of `a` is always returned. For earlier + NumPy versions a view of `a` is returned only if the order of the + axes is changed, otherwise the input array is returned. + + See Also + -------- + moveaxis : Move array axes to new positions. + roll : Roll the elements of an array by a number of positions along a + given axis. + + Examples + -------- + >>> import numpy as np + >>> a = np.ones((3,4,5,6)) + >>> np.rollaxis(a, 3, 1).shape + (3, 6, 4, 5) + >>> np.rollaxis(a, 2).shape + (5, 3, 4, 6) + >>> np.rollaxis(a, 1, 4).shape + (3, 5, 6, 4) + + """ + n = a.ndim + axis = normalize_axis_index(axis, n) + if start < 0: + start += n + msg = "'%s' arg requires %d <= %s < %d, but %d was passed in" + if not (0 <= start < n + 1): + raise AxisError(msg % ('start', -n, 'start', n + 1, start)) + if axis < start: + # it's been removed + start -= 1 + if axis == start: + return a[...] + axes = list(range(n)) + axes.remove(axis) + axes.insert(start, axis) + return a.transpose(axes) + + +@set_module("numpy.lib.array_utils") +def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False): + """ + Normalizes an axis argument into a tuple of non-negative integer axes. + + This handles shorthands such as ``1`` and converts them to ``(1,)``, + as well as performing the handling of negative indices covered by + `normalize_axis_index`. + + By default, this forbids axes from being specified multiple times. + + Used internally by multi-axis-checking logic. + + Parameters + ---------- + axis : int, iterable of int + The un-normalized index or indices of the axis. + ndim : int + The number of dimensions of the array that `axis` should be normalized + against. + argname : str, optional + A prefix to put before the error message, typically the name of the + argument. + allow_duplicate : bool, optional + If False, the default, disallow an axis from being specified twice. + + Returns + ------- + normalized_axes : tuple of int + The normalized axis index, such that `0 <= normalized_axis < ndim` + + Raises + ------ + AxisError + If any axis provided is out of range + ValueError + If an axis is repeated + + See also + -------- + normalize_axis_index : normalizing a single scalar axis + """ + # Optimization to speed-up the most common cases. + if not isinstance(axis, (tuple, list)): + try: + axis = [operator.index(axis)] + except TypeError: + pass + # Going via an iterator directly is slower than via list comprehension. + axis = tuple(normalize_axis_index(ax, ndim, argname) for ax in axis) + if not allow_duplicate and len(set(axis)) != len(axis): + if argname: + raise ValueError(f'repeated axis in `{argname}` argument') + else: + raise ValueError('repeated axis') + return axis + + +def _moveaxis_dispatcher(a, source, destination): + return (a,) + + +@array_function_dispatch(_moveaxis_dispatcher) +def moveaxis(a, source, destination): + """ + Move axes of an array to new positions. + + Other axes remain in their original order. + + Parameters + ---------- + a : np.ndarray + The array whose axes should be reordered. + source : int or sequence of int + Original positions of the axes to move. These must be unique. + destination : int or sequence of int + Destination positions for each of the original axes. These must also be + unique. + + Returns + ------- + result : np.ndarray + Array with moved axes. This array is a view of the input array. + + See Also + -------- + transpose : Permute the dimensions of an array. + swapaxes : Interchange two axes of an array. + + Examples + -------- + >>> import numpy as np + >>> x = np.zeros((3, 4, 5)) + >>> np.moveaxis(x, 0, -1).shape + (4, 5, 3) + >>> np.moveaxis(x, -1, 0).shape + (5, 3, 4) + + These all achieve the same result: + + >>> np.transpose(x).shape + (5, 4, 3) + >>> np.swapaxes(x, 0, -1).shape + (5, 4, 3) + >>> np.moveaxis(x, [0, 1], [-1, -2]).shape + (5, 4, 3) + >>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape + (5, 4, 3) + + """ + try: + # allow duck-array types if they define transpose + transpose = a.transpose + except AttributeError: + a = asarray(a) + transpose = a.transpose + + source = normalize_axis_tuple(source, a.ndim, 'source') + destination = normalize_axis_tuple(destination, a.ndim, 'destination') + if len(source) != len(destination): + raise ValueError('`source` and `destination` arguments must have ' + 'the same number of elements') + + order = [n for n in range(a.ndim) if n not in source] + + for dest, src in sorted(zip(destination, source)): + order.insert(dest, src) + + result = transpose(order) + return result + + +def _cross_dispatcher(a, b, axisa=None, axisb=None, axisc=None, axis=None): + return (a, b) + + +@array_function_dispatch(_cross_dispatcher) +def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): + """ + Return the cross product of two (arrays of) vectors. + + The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular + to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors + are defined by the last axis of `a` and `b` by default, and these axes + can have dimensions 2 or 3. Where the dimension of either `a` or `b` is + 2, the third component of the input vector is assumed to be zero and the + cross product calculated accordingly. In cases where both input vectors + have dimension 2, the z-component of the cross product is returned. + + Parameters + ---------- + a : array_like + Components of the first vector(s). + b : array_like + Components of the second vector(s). + axisa : int, optional + Axis of `a` that defines the vector(s). By default, the last axis. + axisb : int, optional + Axis of `b` that defines the vector(s). By default, the last axis. + axisc : int, optional + Axis of `c` containing the cross product vector(s). Ignored if + both input vectors have dimension 2, as the return is scalar. + By default, the last axis. + axis : int, optional + If defined, the axis of `a`, `b` and `c` that defines the vector(s) + and cross product(s). Overrides `axisa`, `axisb` and `axisc`. + + Returns + ------- + c : ndarray + Vector cross product(s). + + Raises + ------ + ValueError + When the dimension of the vector(s) in `a` and/or `b` does not + equal 2 or 3. + + See Also + -------- + inner : Inner product + outer : Outer product. + linalg.cross : An Array API compatible variation of ``np.cross``, + which accepts (arrays of) 3-element vectors only. + ix_ : Construct index arrays. + + Notes + ----- + Supports full broadcasting of the inputs. + + Dimension-2 input arrays were deprecated in 2.0.0. If you do need this + functionality, you can use:: + + def cross2d(x, y): + return x[..., 0] * y[..., 1] - x[..., 1] * y[..., 0] + + Examples + -------- + Vector cross-product. + + >>> import numpy as np + >>> x = [1, 2, 3] + >>> y = [4, 5, 6] + >>> np.cross(x, y) + array([-3, 6, -3]) + + One vector with dimension 2. + + >>> x = [1, 2] + >>> y = [4, 5, 6] + >>> np.cross(x, y) + array([12, -6, -3]) + + Equivalently: + + >>> x = [1, 2, 0] + >>> y = [4, 5, 6] + >>> np.cross(x, y) + array([12, -6, -3]) + + Both vectors with dimension 2. + + >>> x = [1,2] + >>> y = [4,5] + >>> np.cross(x, y) + array(-3) + + Multiple vector cross-products. Note that the direction of the cross + product vector is defined by the *right-hand rule*. + + >>> x = np.array([[1,2,3], [4,5,6]]) + >>> y = np.array([[4,5,6], [1,2,3]]) + >>> np.cross(x, y) + array([[-3, 6, -3], + [ 3, -6, 3]]) + + The orientation of `c` can be changed using the `axisc` keyword. + + >>> np.cross(x, y, axisc=0) + array([[-3, 3], + [ 6, -6], + [-3, 3]]) + + Change the vector definition of `x` and `y` using `axisa` and `axisb`. + + >>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]]) + >>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]]) + >>> np.cross(x, y) + array([[ -6, 12, -6], + [ 0, 0, 0], + [ 6, -12, 6]]) + >>> np.cross(x, y, axisa=0, axisb=0) + array([[-24, 48, -24], + [-30, 60, -30], + [-36, 72, -36]]) + + """ + if axis is not None: + axisa, axisb, axisc = (axis,) * 3 + a = asarray(a) + b = asarray(b) + + if (a.ndim < 1) or (b.ndim < 1): + raise ValueError("At least one array has zero dimension") + + # Check axisa and axisb are within bounds + axisa = normalize_axis_index(axisa, a.ndim, msg_prefix='axisa') + axisb = normalize_axis_index(axisb, b.ndim, msg_prefix='axisb') + + # Move working axis to the end of the shape + a = moveaxis(a, axisa, -1) + b = moveaxis(b, axisb, -1) + msg = ("incompatible dimensions for cross product\n" + "(dimension must be 2 or 3)") + if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3): + raise ValueError(msg) + if a.shape[-1] == 2 or b.shape[-1] == 2: + # Deprecated in NumPy 2.0, 2023-09-26 + warnings.warn( + "Arrays of 2-dimensional vectors are deprecated. Use arrays of " + "3-dimensional vectors instead. (deprecated in NumPy 2.0)", + DeprecationWarning, stacklevel=2 + ) + + # Create the output array + shape = broadcast(a[..., 0], b[..., 0]).shape + if a.shape[-1] == 3 or b.shape[-1] == 3: + shape += (3,) + # Check axisc is within bounds + axisc = normalize_axis_index(axisc, len(shape), msg_prefix='axisc') + dtype = promote_types(a.dtype, b.dtype) + cp = empty(shape, dtype) + + # recast arrays as dtype + a = a.astype(dtype) + b = b.astype(dtype) + + # create local aliases for readability + a0 = a[..., 0] + a1 = a[..., 1] + if a.shape[-1] == 3: + a2 = a[..., 2] + b0 = b[..., 0] + b1 = b[..., 1] + if b.shape[-1] == 3: + b2 = b[..., 2] + if cp.ndim != 0 and cp.shape[-1] == 3: + cp0 = cp[..., 0] + cp1 = cp[..., 1] + cp2 = cp[..., 2] + + if a.shape[-1] == 2: + if b.shape[-1] == 2: + # a0 * b1 - a1 * b0 + multiply(a0, b1, out=cp) + cp -= a1 * b0 + return cp + else: + assert b.shape[-1] == 3 + # cp0 = a1 * b2 - 0 (a2 = 0) + # cp1 = 0 - a0 * b2 (a2 = 0) + # cp2 = a0 * b1 - a1 * b0 + multiply(a1, b2, out=cp0) + multiply(a0, b2, out=cp1) + negative(cp1, out=cp1) + multiply(a0, b1, out=cp2) + cp2 -= a1 * b0 + else: + assert a.shape[-1] == 3 + if b.shape[-1] == 3: + # cp0 = a1 * b2 - a2 * b1 + # cp1 = a2 * b0 - a0 * b2 + # cp2 = a0 * b1 - a1 * b0 + multiply(a1, b2, out=cp0) + tmp = np.multiply(a2, b1, out=...) + cp0 -= tmp + multiply(a2, b0, out=cp1) + multiply(a0, b2, out=tmp) + cp1 -= tmp + multiply(a0, b1, out=cp2) + multiply(a1, b0, out=tmp) + cp2 -= tmp + else: + assert b.shape[-1] == 2 + # cp0 = 0 - a2 * b1 (b2 = 0) + # cp1 = a2 * b0 - 0 (b2 = 0) + # cp2 = a0 * b1 - a1 * b0 + multiply(a2, b1, out=cp0) + negative(cp0, out=cp0) + multiply(a2, b0, out=cp1) + multiply(a0, b1, out=cp2) + cp2 -= a1 * b0 + + return moveaxis(cp, -1, axisc) + + +little_endian = (sys.byteorder == 'little') + + +@set_module('numpy') +def indices(dimensions, dtype=int, sparse=False): + """ + Return an array representing the indices of a grid. + + Compute an array where the subarrays contain index values 0, 1, ... + varying only along the corresponding axis. + + Parameters + ---------- + dimensions : sequence of ints + The shape of the grid. + dtype : dtype, optional + Data type of the result. + sparse : boolean, optional + Return a sparse representation of the grid instead of a dense + representation. Default is False. + + Returns + ------- + grid : one ndarray or tuple of ndarrays + If sparse is False: + Returns one array of grid indices, + ``grid.shape = (len(dimensions),) + tuple(dimensions)``. + If sparse is True: + Returns a tuple of arrays, with + ``grid[i].shape = (1, ..., 1, dimensions[i], 1, ..., 1)`` with + dimensions[i] in the ith place + + See Also + -------- + mgrid, ogrid, meshgrid + + Notes + ----- + The output shape in the dense case is obtained by prepending the number + of dimensions in front of the tuple of dimensions, i.e. if `dimensions` + is a tuple ``(r0, ..., rN-1)`` of length ``N``, the output shape is + ``(N, r0, ..., rN-1)``. + + The subarrays ``grid[k]`` contains the N-D array of indices along the + ``k-th`` axis. Explicitly:: + + grid[k, i0, i1, ..., iN-1] = ik + + Examples + -------- + >>> import numpy as np + >>> grid = np.indices((2, 3)) + >>> grid.shape + (2, 2, 3) + >>> grid[0] # row indices + array([[0, 0, 0], + [1, 1, 1]]) + >>> grid[1] # column indices + array([[0, 1, 2], + [0, 1, 2]]) + + The indices can be used as an index into an array. + + >>> x = np.arange(20).reshape(5, 4) + >>> row, col = np.indices((2, 3)) + >>> x[row, col] + array([[0, 1, 2], + [4, 5, 6]]) + + Note that it would be more straightforward in the above example to + extract the required elements directly with ``x[:2, :3]``. + + If sparse is set to true, the grid will be returned in a sparse + representation. + + >>> i, j = np.indices((2, 3), sparse=True) + >>> i.shape + (2, 1) + >>> j.shape + (1, 3) + >>> i # row indices + array([[0], + [1]]) + >>> j # column indices + array([[0, 1, 2]]) + + """ + dimensions = tuple(dimensions) + N = len(dimensions) + shape = (1,) * N + if sparse: + res = () + else: + res = empty((N,) + dimensions, dtype=dtype) + for i, dim in enumerate(dimensions): + idx = arange(dim, dtype=dtype).reshape( + shape[:i] + (dim,) + shape[i + 1:] + ) + if sparse: + res = res + (idx,) + else: + res[i] = idx + return res + + +@finalize_array_function_like +@set_module('numpy') +def fromfunction(function, shape, *, dtype=float, like=None, **kwargs): + """ + Construct an array by executing a function over each coordinate. + + The resulting array therefore has a value ``fn(x, y, z)`` at + coordinate ``(x, y, z)``. + + Parameters + ---------- + function : callable + The function is called with N parameters, where N is the rank of + `shape`. Each parameter represents the coordinates of the array + varying along a specific axis. For example, if `shape` + were ``(2, 2)``, then the parameters would be + ``array([[0, 0], [1, 1]])`` and ``array([[0, 1], [0, 1]])`` + shape : (N,) tuple of ints + Shape of the output array, which also determines the shape of + the coordinate arrays passed to `function`. + dtype : data-type, optional + Data-type of the coordinate arrays passed to `function`. + By default, `dtype` is float. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + fromfunction : any + The result of the call to `function` is passed back directly. + Therefore the shape of `fromfunction` is completely determined by + `function`. If `function` returns a scalar value, the shape of + `fromfunction` would not match the `shape` parameter. + + See Also + -------- + indices, meshgrid + + Notes + ----- + Keywords other than `dtype` and `like` are passed to `function`. + + Examples + -------- + >>> import numpy as np + >>> np.fromfunction(lambda i, j: i, (2, 2), dtype=float) + array([[0., 0.], + [1., 1.]]) + + >>> np.fromfunction(lambda i, j: j, (2, 2), dtype=float) + array([[0., 1.], + [0., 1.]]) + + >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int) + array([[ True, False, False], + [False, True, False], + [False, False, True]]) + + >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int) + array([[0, 1, 2], + [1, 2, 3], + [2, 3, 4]]) + + """ + if like is not None: + return _fromfunction_with_like( + like, function, shape, dtype=dtype, **kwargs) + + args = indices(shape, dtype=dtype) + return function(*args, **kwargs) + + +_fromfunction_with_like = array_function_dispatch()(fromfunction) + + +def _frombuffer(buf, dtype, shape, order, axis_order=None): + array = frombuffer(buf, dtype=dtype) + if order == 'K' and axis_order is not None: + return array.reshape(shape, order='C').transpose(axis_order) + return array.reshape(shape, order=order) + + +@set_module('numpy') +def isscalar(element): + """ + Returns True if the type of `element` is a scalar type. + + Parameters + ---------- + element : any + Input argument, can be of any type and shape. + + Returns + ------- + val : bool + True if `element` is a scalar type, False if it is not. + + See Also + -------- + ndim : Get the number of dimensions of an array + + Notes + ----- + If you need a stricter way to identify a *numerical* scalar, use + ``isinstance(x, numbers.Number)``, as that returns ``False`` for most + non-numerical elements such as strings. + + In most cases ``np.ndim(x) == 0`` should be used instead of this function, + as that will also return true for 0d arrays. This is how numpy overloads + functions in the style of the ``dx`` arguments to `gradient` and + the ``bins`` argument to `histogram`. Some key differences: + + +------------------------------------+---------------+-------------------+ + | x |``isscalar(x)``|``np.ndim(x) == 0``| + +====================================+===============+===================+ + | PEP 3141 numeric objects | ``True`` | ``True`` | + | (including builtins) | | | + +------------------------------------+---------------+-------------------+ + | builtin string and buffer objects | ``True`` | ``True`` | + +------------------------------------+---------------+-------------------+ + | other builtin objects, like | ``False`` | ``True`` | + | `pathlib.Path`, `Exception`, | | | + | the result of `re.compile` | | | + +------------------------------------+---------------+-------------------+ + | third-party objects like | ``False`` | ``True`` | + | `matplotlib.figure.Figure` | | | + +------------------------------------+---------------+-------------------+ + | zero-dimensional numpy arrays | ``False`` | ``True`` | + +------------------------------------+---------------+-------------------+ + | other numpy arrays | ``False`` | ``False`` | + +------------------------------------+---------------+-------------------+ + | `list`, `tuple`, and other | ``False`` | ``False`` | + | sequence objects | | | + +------------------------------------+---------------+-------------------+ + + Examples + -------- + >>> import numpy as np + + >>> np.isscalar(3.1) + True + + >>> np.isscalar(np.array(3.1)) + False + + >>> np.isscalar([3.1]) + False + + >>> np.isscalar(False) + True + + >>> np.isscalar('numpy') + True + + NumPy supports PEP 3141 numbers: + + >>> from fractions import Fraction + >>> np.isscalar(Fraction(5, 17)) + True + >>> from numbers import Number + >>> np.isscalar(Number()) + True + + """ + return (isinstance(element, generic) + or type(element) in ScalarType + or isinstance(element, numbers.Number)) + + +@set_module('numpy') +def binary_repr(num, width=None): + """ + Return the binary representation of the input number as a string. + + For negative numbers, if width is not given, a minus sign is added to the + front. If width is given, the two's complement of the number is + returned, with respect to that width. + + In a two's-complement system negative numbers are represented by the two's + complement of the absolute value. This is the most common method of + representing signed integers on computers [1]_. A N-bit two's-complement + system can represent every integer in the range + :math:`-2^{N-1}` to :math:`+2^{N-1}-1`. + + Parameters + ---------- + num : int + Only an integer decimal number can be used. + width : int, optional + The length of the returned string if `num` is positive, or the length + of the two's complement if `num` is negative, provided that `width` is + at least a sufficient number of bits for `num` to be represented in + the designated form. If the `width` value is insufficient, an error is + raised. + + Returns + ------- + bin : str + Binary representation of `num` or two's complement of `num`. + + See Also + -------- + base_repr: Return a string representation of a number in the given base + system. + bin: Python's built-in binary representation generator of an integer. + + Notes + ----- + `binary_repr` is equivalent to using `base_repr` with base 2, but about 25x + faster. + + References + ---------- + .. [1] Wikipedia, "Two's complement", + https://en.wikipedia.org/wiki/Two's_complement + + Examples + -------- + >>> import numpy as np + >>> np.binary_repr(3) + '11' + >>> np.binary_repr(-3) + '-11' + >>> np.binary_repr(3, width=4) + '0011' + + The two's complement is returned when the input number is negative and + width is specified: + + >>> np.binary_repr(-3, width=3) + '101' + >>> np.binary_repr(-3, width=5) + '11101' + + """ + def err_if_insufficient(width, binwidth): + if width is not None and width < binwidth: + raise ValueError( + f"Insufficient bit {width=} provided for {binwidth=}" + ) + + # Ensure that num is a Python integer to avoid overflow or unwanted + # casts to floating point. + num = operator.index(num) + + if num == 0: + return '0' * (width or 1) + + elif num > 0: + binary = f'{num:b}' + binwidth = len(binary) + outwidth = (binwidth if width is None + else builtins.max(binwidth, width)) + err_if_insufficient(width, binwidth) + return binary.zfill(outwidth) + + elif width is None: + return f'-{-num:b}' + + else: + poswidth = len(f'{-num:b}') + + # See gh-8679: remove extra digit + # for numbers at boundaries. + if 2**(poswidth - 1) == -num: + poswidth -= 1 + + twocomp = 2**(poswidth + 1) + num + binary = f'{twocomp:b}' + binwidth = len(binary) + + outwidth = builtins.max(binwidth, width) + err_if_insufficient(width, binwidth) + return '1' * (outwidth - binwidth) + binary + + +@set_module('numpy') +def base_repr(number, base=2, padding=0): + """ + Return a string representation of a number in the given base system. + + Parameters + ---------- + number : int + The value to convert. Positive and negative values are handled. + base : int, optional + Convert `number` to the `base` number system. The valid range is 2-36, + the default value is 2. + padding : int, optional + Number of zeros padded on the left. Default is 0 (no padding). + + Returns + ------- + out : str + String representation of `number` in `base` system. + + See Also + -------- + binary_repr : Faster version of `base_repr` for base 2. + + Examples + -------- + >>> import numpy as np + >>> np.base_repr(5) + '101' + >>> np.base_repr(6, 5) + '11' + >>> np.base_repr(7, base=5, padding=3) + '00012' + + >>> np.base_repr(10, base=16) + 'A' + >>> np.base_repr(32, base=16) + '20' + + """ + digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' + if base > len(digits): + raise ValueError("Bases greater than 36 not handled in base_repr.") + elif base < 2: + raise ValueError("Bases less than 2 not handled in base_repr.") + + num = abs(int(number)) + res = [] + while num: + res.append(digits[num % base]) + num //= base + if padding: + res.append('0' * padding) + if number < 0: + res.append('-') + return ''.join(reversed(res or '0')) + + +# These are all essentially abbreviations +# These might wind up in a special abbreviations module + + +def _maketup(descr, val): + dt = dtype(descr) + # Place val in all scalar tuples: + fields = dt.fields + if fields is None: + return val + else: + res = [_maketup(fields[name][0], val) for name in dt.names] + return tuple(res) + + +@finalize_array_function_like +@set_module('numpy') +def identity(n, dtype=None, *, like=None): + """ + Return the identity array. + + The identity array is a square array with ones on + the main diagonal. + + Parameters + ---------- + n : int + Number of rows (and columns) in `n` x `n` output. + dtype : data-type, optional + Data-type of the output. Defaults to ``float``. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + `n` x `n` array with its main diagonal set to one, + and all other elements 0. + + Examples + -------- + >>> import numpy as np + >>> np.identity(3) + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + """ + if like is not None: + return _identity_with_like(like, n, dtype=dtype) + + from numpy import eye + return eye(n, dtype=dtype, like=like) + + +_identity_with_like = array_function_dispatch()(identity) + + +def _allclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None): + return (a, b, rtol, atol) + + +@array_function_dispatch(_allclose_dispatcher) +def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): + """ + Returns True if two arrays are element-wise equal within a tolerance. + + The tolerance values are positive, typically very small numbers. The + relative difference (`rtol` * abs(`b`)) and the absolute difference + `atol` are added together to compare against the absolute difference + between `a` and `b`. + + .. warning:: The default `atol` is not appropriate for comparing numbers + with magnitudes much smaller than one (see Notes). + + NaNs are treated as equal if they are in the same place and if + ``equal_nan=True``. Infs are treated as equal if they are in the same + place and of the same sign in both arrays. + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + rtol : array_like + The relative tolerance parameter (see Notes). + atol : array_like + The absolute tolerance parameter (see Notes). + equal_nan : bool + Whether to compare NaN's as equal. If True, NaN's in `a` will be + considered equal to NaN's in `b` in the output array. + + Returns + ------- + allclose : bool + Returns True if the two arrays are equal within the given + tolerance; False otherwise. + + See Also + -------- + isclose, all, any, equal + + Notes + ----- + If the following equation is element-wise True, then allclose returns + True.:: + + absolute(a - b) <= (atol + rtol * absolute(b)) + + The above equation is not symmetric in `a` and `b`, so that + ``allclose(a, b)`` might be different from ``allclose(b, a)`` in + some rare cases. + + The default value of `atol` is not appropriate when the reference value + `b` has magnitude smaller than one. For example, it is unlikely that + ``a = 1e-9`` and ``b = 2e-9`` should be considered "close", yet + ``allclose(1e-9, 2e-9)`` is ``True`` with default settings. Be sure + to select `atol` for the use case at hand, especially for defining the + threshold below which a non-zero value in `a` will be considered "close" + to a very small or zero value in `b`. + + The comparison of `a` and `b` uses standard broadcasting, which + means that `a` and `b` need not have the same shape in order for + ``allclose(a, b)`` to evaluate to True. The same is true for + `equal` but not `array_equal`. + + `allclose` is not defined for non-numeric data types. + `bool` is considered a numeric data-type for this purpose. + + Examples + -------- + >>> import numpy as np + >>> np.allclose([1e10,1e-7], [1.00001e10,1e-8]) + False + + >>> np.allclose([1e10,1e-8], [1.00001e10,1e-9]) + True + + >>> np.allclose([1e10,1e-8], [1.0001e10,1e-9]) + False + + >>> np.allclose([1.0, np.nan], [1.0, np.nan]) + False + + >>> np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) + True + + + """ + res = all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)) + return builtins.bool(res) + + +def _isclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None): + return (a, b, rtol, atol) + + +@array_function_dispatch(_isclose_dispatcher) +def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): + """ + Returns a boolean array where two arrays are element-wise equal within a + tolerance. + + The tolerance values are positive, typically very small numbers. The + relative difference (`rtol` * abs(`b`)) and the absolute difference + `atol` are added together to compare against the absolute difference + between `a` and `b`. + + .. warning:: The default `atol` is not appropriate for comparing numbers + with magnitudes much smaller than one (see Notes). + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + rtol : array_like + The relative tolerance parameter (see Notes). + atol : array_like + The absolute tolerance parameter (see Notes). + equal_nan : bool + Whether to compare NaN's as equal. If True, NaN's in `a` will be + considered equal to NaN's in `b` in the output array. + + Returns + ------- + y : array_like + Returns a boolean array of where `a` and `b` are equal within the + given tolerance. If both `a` and `b` are scalars, returns a single + boolean value. + + See Also + -------- + allclose + math.isclose + + Notes + ----- + For finite values, isclose uses the following equation to test whether + two floating point values are equivalent.:: + + absolute(a - b) <= (atol + rtol * absolute(b)) + + Unlike the built-in `math.isclose`, the above equation is not symmetric + in `a` and `b` -- it assumes `b` is the reference value -- so that + `isclose(a, b)` might be different from `isclose(b, a)`. + + The default value of `atol` is not appropriate when the reference value + `b` has magnitude smaller than one. For example, it is unlikely that + ``a = 1e-9`` and ``b = 2e-9`` should be considered "close", yet + ``isclose(1e-9, 2e-9)`` is ``True`` with default settings. Be sure + to select `atol` for the use case at hand, especially for defining the + threshold below which a non-zero value in `a` will be considered "close" + to a very small or zero value in `b`. + + `isclose` is not defined for non-numeric data types. + :class:`bool` is considered a numeric data-type for this purpose. + + Examples + -------- + >>> import numpy as np + >>> np.isclose([1e10,1e-7], [1.00001e10,1e-8]) + array([ True, False]) + + >>> np.isclose([1e10,1e-8], [1.00001e10,1e-9]) + array([ True, True]) + + >>> np.isclose([1e10,1e-8], [1.0001e10,1e-9]) + array([False, True]) + + >>> np.isclose([1.0, np.nan], [1.0, np.nan]) + array([ True, False]) + + >>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) + array([ True, True]) + + >>> np.isclose([1e-8, 1e-7], [0.0, 0.0]) + array([ True, False]) + + >>> np.isclose([1e-100, 1e-7], [0.0, 0.0], atol=0.0) + array([False, False]) + + >>> np.isclose([1e-10, 1e-10], [1e-20, 0.0]) + array([ True, True]) + + >>> np.isclose([1e-10, 1e-10], [1e-20, 0.999999e-10], atol=0.0) + array([False, True]) + + """ + # Turn all but python scalars into arrays. + x, y, atol, rtol = ( + a if isinstance(a, (int, float, complex)) else asanyarray(a) + for a in (a, b, atol, rtol)) + + # Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT). + # This will cause casting of x later. Also, make sure to allow subclasses + # (e.g., for numpy.ma). + # NOTE: We explicitly allow timedelta, which used to work. This could + # possibly be deprecated. See also gh-18286. + # timedelta works if `atol` is an integer or also a timedelta. + # Although, the default tolerances are unlikely to be useful + if (dtype := getattr(y, "dtype", None)) is not None and dtype.kind != "m": + dt = multiarray.result_type(y, 1.) + y = asanyarray(y, dtype=dt) + elif isinstance(y, int): + y = float(y) + + # atol and rtol can be arrays + if not (np.all(np.isfinite(atol)) and np.all(np.isfinite(rtol))): + err_s = np.geterr()["invalid"] + err_msg = f"One of rtol or atol is not valid, atol: {atol}, rtol: {rtol}" + + if err_s == "warn": + warnings.warn(err_msg, RuntimeWarning, stacklevel=2) + elif err_s == "raise": + raise FloatingPointError(err_msg) + elif err_s == "print": + print(err_msg) + + with errstate(invalid='ignore'): + + result = (less_equal(abs(x - y), atol + rtol * abs(y)) + & isfinite(y) + | (x == y)) + if equal_nan: + result |= isnan(x) & isnan(y) + + return result[()] # Flatten 0d arrays to scalars + + +def _array_equal_dispatcher(a1, a2, equal_nan=None): + return (a1, a2) + + +_no_nan_types = { + # should use np.dtype.BoolDType, but as of writing + # that fails the reloading test. + type(dtype(nt.bool)), + type(dtype(nt.int8)), + type(dtype(nt.int16)), + type(dtype(nt.int32)), + type(dtype(nt.int64)), +} + + +def _dtype_cannot_hold_nan(dtype): + return type(dtype) in _no_nan_types + + +@array_function_dispatch(_array_equal_dispatcher) +def array_equal(a1, a2, equal_nan=False): + """ + True if two arrays have the same shape and elements, False otherwise. + + Parameters + ---------- + a1, a2 : array_like + Input arrays. + equal_nan : bool + Whether to compare NaN's as equal. If the dtype of a1 and a2 is + complex, values will be considered equal if either the real or the + imaginary component of a given value is ``nan``. + + Returns + ------- + b : bool + Returns True if the arrays are equal. + + See Also + -------- + allclose: Returns True if two arrays are element-wise equal within a + tolerance. + array_equiv: Returns True if input arrays are shape consistent and all + elements equal. + + Examples + -------- + >>> import numpy as np + + >>> np.array_equal([1, 2], [1, 2]) + True + + >>> np.array_equal(np.array([1, 2]), np.array([1, 2])) + True + + >>> np.array_equal([1, 2], [1, 2, 3]) + False + + >>> np.array_equal([1, 2], [1, 4]) + False + + >>> a = np.array([1, np.nan]) + >>> np.array_equal(a, a) + False + + >>> np.array_equal(a, a, equal_nan=True) + True + + When ``equal_nan`` is True, complex values with nan components are + considered equal if either the real *or* the imaginary components are nan. + + >>> a = np.array([1 + 1j]) + >>> b = a.copy() + >>> a.real = np.nan + >>> b.imag = np.nan + >>> np.array_equal(a, b, equal_nan=True) + True + """ + try: + a1, a2 = asarray(a1), asarray(a2) + except Exception: + return False + if a1.shape != a2.shape: + return False + if not equal_nan: + return builtins.bool((asanyarray(a1 == a2)).all()) + + if a1 is a2: + # nan will compare equal so an array will compare equal to itself. + return True + + cannot_have_nan = (_dtype_cannot_hold_nan(a1.dtype) + and _dtype_cannot_hold_nan(a2.dtype)) + if cannot_have_nan: + return builtins.bool(asarray(a1 == a2).all()) + + # Handling NaN values if equal_nan is True + a1nan, a2nan = isnan(a1), isnan(a2) + # NaN's occur at different locations + if not (a1nan == a2nan).all(): + return False + # Shapes of a1, a2 and masks are guaranteed to be consistent by this point + return builtins.bool((a1[~a1nan] == a2[~a1nan]).all()) + + +def _array_equiv_dispatcher(a1, a2): + return (a1, a2) + + +@array_function_dispatch(_array_equiv_dispatcher) +def array_equiv(a1, a2): + """ + Returns True if input arrays are shape consistent and all elements equal. + + Shape consistent means they are either the same shape, or one input array + can be broadcasted to create the same shape as the other one. + + Parameters + ---------- + a1, a2 : array_like + Input arrays. + + Returns + ------- + out : bool + True if equivalent, False otherwise. + + Examples + -------- + >>> import numpy as np + >>> np.array_equiv([1, 2], [1, 2]) + True + >>> np.array_equiv([1, 2], [1, 3]) + False + + Showing the shape equivalence: + + >>> np.array_equiv([1, 2], [[1, 2], [1, 2]]) + True + >>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]]) + False + + >>> np.array_equiv([1, 2], [[1, 2], [1, 3]]) + False + + """ + try: + a1, a2 = asarray(a1), asarray(a2) + except Exception: + return False + try: + multiarray.broadcast(a1, a2) + except Exception: + return False + + return builtins.bool(asanyarray(a1 == a2).all()) + + +def _astype_dispatcher(x, dtype, /, *, copy=None, device=None): + return (x, dtype) + + +@array_function_dispatch(_astype_dispatcher) +def astype(x, dtype, /, *, copy=True, device=None): + """ + Copies an array to a specified data type. + + This function is an Array API compatible alternative to + `numpy.ndarray.astype`. + + Parameters + ---------- + x : ndarray + Input NumPy array to cast. ``array_likes`` are explicitly not + supported here. + dtype : dtype + Data type of the result. + copy : bool, optional + Specifies whether to copy an array when the specified dtype matches + the data type of the input array ``x``. If ``True``, a newly allocated + array must always be returned. If ``False`` and the specified dtype + matches the data type of the input array, the input array must be + returned; otherwise, a newly allocated array must be returned. + Defaults to ``True``. + device : str, optional + The device on which to place the returned array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.1.0 + + Returns + ------- + out : ndarray + An array having the specified data type. + + See Also + -------- + ndarray.astype + + Examples + -------- + >>> import numpy as np + >>> arr = np.array([1, 2, 3]); arr + array([1, 2, 3]) + >>> np.astype(arr, np.float64) + array([1., 2., 3.]) + + Non-copy case: + + >>> arr = np.array([1, 2, 3]) + >>> arr_noncpy = np.astype(arr, arr.dtype, copy=False) + >>> np.shares_memory(arr, arr_noncpy) + True + + """ + if not (isinstance(x, np.ndarray) or isscalar(x)): + raise TypeError( + "Input should be a NumPy array or scalar. " + f"It is a {type(x)} instead." + ) + if device is not None and device != "cpu": + raise ValueError( + 'Device not understood. Only "cpu" is allowed, but received:' + f' {device}' + ) + return x.astype(dtype, copy=copy) + + +inf = PINF +nan = NAN +False_ = nt.bool(False) +True_ = nt.bool(True) + + +def extend_all(module): + existing = set(__all__) + mall = module.__all__ + for a in mall: + if a not in existing: + __all__.append(a) + + +from . import _asarray, _ufunc_config, arrayprint, fromnumeric +from ._asarray import * +from ._ufunc_config import * +from .arrayprint import * +from .fromnumeric import * +from .numerictypes import * +from .umath import * + +extend_all(fromnumeric) +extend_all(umath) +extend_all(numerictypes) +extend_all(arrayprint) +extend_all(_asarray) +extend_all(_ufunc_config) diff --git a/python/numpy/_core/numeric.pyi b/python/numpy/_core/numeric.pyi new file mode 100644 index 000000000..919fe1917 --- /dev/null +++ b/python/numpy/_core/numeric.pyi @@ -0,0 +1,882 @@ +from collections.abc import Callable, Sequence +from typing import ( + Any, + Final, + Never, + NoReturn, + SupportsAbs, + SupportsIndex, + TypeAlias, + TypeGuard, + TypeVar, + Unpack, + overload, +) +from typing import Literal as L + +import numpy as np +from numpy import ( + False_, + True_, + _OrderCF, + _OrderKACF, + # re-exports + bitwise_not, + broadcast, + complexfloating, + dtype, + flatiter, + float64, + floating, + from_dlpack, + # other + generic, + inf, + int_, + intp, + little_endian, + matmul, + nan, + ndarray, + nditer, + newaxis, + object_, + signedinteger, + timedelta64, + ufunc, + unsignedinteger, + vecdot, +) +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeObject_co, + _ArrayLikeTD64_co, + _ArrayLikeUInt_co, + _DTypeLike, + _NestedSequence, + _ScalarLike_co, + _Shape, + _ShapeLike, + _SupportsArrayFunc, + _SupportsDType, +) + +from .fromnumeric import all as all +from .fromnumeric import any as any +from .fromnumeric import argpartition as argpartition +from .fromnumeric import matrix_transpose as matrix_transpose +from .fromnumeric import mean as mean +from .multiarray import ( + # other + _Array, + _ConstructorEmpty, + _KwargsEmpty, + # re-exports + arange, + array, + asanyarray, + asarray, + ascontiguousarray, + asfortranarray, + can_cast, + concatenate, + copyto, + dot, + empty, + empty_like, + frombuffer, + fromfile, + fromiter, + fromstring, + inner, + lexsort, + may_share_memory, + min_scalar_type, + nested_iters, + promote_types, + putmask, + result_type, + shares_memory, + vdot, + where, + zeros, +) + +__all__ = [ + "newaxis", + "ndarray", + "flatiter", + "nditer", + "nested_iters", + "ufunc", + "arange", + "array", + "asarray", + "asanyarray", + "ascontiguousarray", + "asfortranarray", + "zeros", + "count_nonzero", + "empty", + "broadcast", + "dtype", + "fromstring", + "fromfile", + "frombuffer", + "from_dlpack", + "where", + "argwhere", + "copyto", + "concatenate", + "lexsort", + "astype", + "can_cast", + "promote_types", + "min_scalar_type", + "result_type", + "isfortran", + "empty_like", + "zeros_like", + "ones_like", + "correlate", + "convolve", + "inner", + "dot", + "outer", + "vdot", + "roll", + "rollaxis", + "moveaxis", + "cross", + "tensordot", + "little_endian", + "fromiter", + "array_equal", + "array_equiv", + "indices", + "fromfunction", + "isclose", + "isscalar", + "binary_repr", + "base_repr", + "ones", + "identity", + "allclose", + "putmask", + "flatnonzero", + "inf", + "nan", + "False_", + "True_", + "bitwise_not", + "full", + "full_like", + "matmul", + "vecdot", + "shares_memory", + "may_share_memory", +] + +_T = TypeVar("_T") +_ScalarT = TypeVar("_ScalarT", bound=generic) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype) +_ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) +_ShapeT = TypeVar("_ShapeT", bound=_Shape) +_AnyShapeT = TypeVar( + "_AnyShapeT", + tuple[()], + tuple[int], + tuple[int, int], + tuple[int, int, int], + tuple[int, int, int, int], + tuple[int, ...], +) + +_CorrelateMode: TypeAlias = L["valid", "same", "full"] + +@overload +def zeros_like( + a: _ArrayT, + dtype: None = ..., + order: _OrderKACF = ..., + subok: L[True] = ..., + shape: None = ..., + *, + device: L["cpu"] | None = ..., +) -> _ArrayT: ... +@overload +def zeros_like( + a: _ArrayLike[_ScalarT], + dtype: None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: _ShapeLike | None = ..., + *, + device: L["cpu"] | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def zeros_like( + a: Any, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = ..., + subok: bool = ..., + shape: _ShapeLike | None = ..., + *, + device: L["cpu"] | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def zeros_like( + a: Any, + dtype: DTypeLike | None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: _ShapeLike | None = ..., + *, + device: L["cpu"] | None = ..., +) -> NDArray[Any]: ... + +ones: Final[_ConstructorEmpty] + +@overload +def ones_like( + a: _ArrayT, + dtype: None = ..., + order: _OrderKACF = ..., + subok: L[True] = ..., + shape: None = ..., + *, + device: L["cpu"] | None = ..., +) -> _ArrayT: ... +@overload +def ones_like( + a: _ArrayLike[_ScalarT], + dtype: None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: _ShapeLike | None = ..., + *, + device: L["cpu"] | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def ones_like( + a: Any, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = ..., + subok: bool = ..., + shape: _ShapeLike | None = ..., + *, + device: L["cpu"] | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def ones_like( + a: Any, + dtype: DTypeLike | None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: _ShapeLike | None = ..., + *, + device: L["cpu"] | None = ..., +) -> NDArray[Any]: ... + +# TODO: Add overloads for bool, int, float, complex, str, bytes, and memoryview +# 1-D shape +@overload +def full( + shape: SupportsIndex, + fill_value: _ScalarT, + dtype: None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> _Array[tuple[int], _ScalarT]: ... +@overload +def full( + shape: SupportsIndex, + fill_value: Any, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> np.ndarray[tuple[int], _DTypeT]: ... +@overload +def full( + shape: SupportsIndex, + fill_value: Any, + dtype: type[_ScalarT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> _Array[tuple[int], _ScalarT]: ... +@overload +def full( + shape: SupportsIndex, + fill_value: Any, + dtype: DTypeLike | None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> _Array[tuple[int], Any]: ... +# known shape +@overload +def full( + shape: _AnyShapeT, + fill_value: _ScalarT, + dtype: None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> _Array[_AnyShapeT, _ScalarT]: ... +@overload +def full( + shape: _AnyShapeT, + fill_value: Any, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> np.ndarray[_AnyShapeT, _DTypeT]: ... +@overload +def full( + shape: _AnyShapeT, + fill_value: Any, + dtype: type[_ScalarT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> _Array[_AnyShapeT, _ScalarT]: ... +@overload +def full( + shape: _AnyShapeT, + fill_value: Any, + dtype: DTypeLike | None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> _Array[_AnyShapeT, Any]: ... +# unknown shape +@overload +def full( + shape: _ShapeLike, + fill_value: _ScalarT, + dtype: None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> NDArray[_ScalarT]: ... +@overload +def full( + shape: _ShapeLike, + fill_value: Any, + dtype: _DTypeT | _SupportsDType[_DTypeT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> np.ndarray[Any, _DTypeT]: ... +@overload +def full( + shape: _ShapeLike, + fill_value: Any, + dtype: type[_ScalarT], + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> NDArray[_ScalarT]: ... +@overload +def full( + shape: _ShapeLike, + fill_value: Any, + dtype: DTypeLike | None = ..., + order: _OrderCF = ..., + **kwargs: Unpack[_KwargsEmpty], +) -> NDArray[Any]: ... + +@overload +def full_like( + a: _ArrayT, + fill_value: Any, + dtype: None = ..., + order: _OrderKACF = ..., + subok: L[True] = ..., + shape: None = ..., + *, + device: L["cpu"] | None = ..., +) -> _ArrayT: ... +@overload +def full_like( + a: _ArrayLike[_ScalarT], + fill_value: Any, + dtype: None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: _ShapeLike | None = ..., + *, + device: L["cpu"] | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def full_like( + a: Any, + fill_value: Any, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = ..., + subok: bool = ..., + shape: _ShapeLike | None = ..., + *, + device: L["cpu"] | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def full_like( + a: Any, + fill_value: Any, + dtype: DTypeLike | None = ..., + order: _OrderKACF = ..., + subok: bool = ..., + shape: _ShapeLike | None = ..., + *, + device: L["cpu"] | None = ..., +) -> NDArray[Any]: ... + +# +@overload +def count_nonzero(a: ArrayLike, axis: None = None, *, keepdims: L[False] = False) -> np.intp: ... +@overload +def count_nonzero(a: _ScalarLike_co, axis: _ShapeLike | None = None, *, keepdims: L[True]) -> np.intp: ... +@overload +def count_nonzero( + a: NDArray[Any] | _NestedSequence[ArrayLike], axis: _ShapeLike | None = None, *, keepdims: L[True] +) -> NDArray[np.intp]: ... +@overload +def count_nonzero(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: bool = False) -> Any: ... + +# +def isfortran(a: NDArray[Any] | generic) -> bool: ... + +def argwhere(a: ArrayLike) -> NDArray[intp]: ... + +def flatnonzero(a: ArrayLike) -> NDArray[intp]: ... + +@overload +def correlate( + a: _ArrayLike[Never], + v: _ArrayLike[Never], + mode: _CorrelateMode = ..., +) -> NDArray[Any]: ... +@overload +def correlate( + a: _ArrayLikeBool_co, + v: _ArrayLikeBool_co, + mode: _CorrelateMode = ..., +) -> NDArray[np.bool]: ... +@overload +def correlate( + a: _ArrayLikeUInt_co, + v: _ArrayLikeUInt_co, + mode: _CorrelateMode = ..., +) -> NDArray[unsignedinteger]: ... +@overload +def correlate( + a: _ArrayLikeInt_co, + v: _ArrayLikeInt_co, + mode: _CorrelateMode = ..., +) -> NDArray[signedinteger]: ... +@overload +def correlate( + a: _ArrayLikeFloat_co, + v: _ArrayLikeFloat_co, + mode: _CorrelateMode = ..., +) -> NDArray[floating]: ... +@overload +def correlate( + a: _ArrayLikeComplex_co, + v: _ArrayLikeComplex_co, + mode: _CorrelateMode = ..., +) -> NDArray[complexfloating]: ... +@overload +def correlate( + a: _ArrayLikeTD64_co, + v: _ArrayLikeTD64_co, + mode: _CorrelateMode = ..., +) -> NDArray[timedelta64]: ... +@overload +def correlate( + a: _ArrayLikeObject_co, + v: _ArrayLikeObject_co, + mode: _CorrelateMode = ..., +) -> NDArray[object_]: ... + +@overload +def convolve( + a: _ArrayLike[Never], + v: _ArrayLike[Never], + mode: _CorrelateMode = ..., +) -> NDArray[Any]: ... +@overload +def convolve( + a: _ArrayLikeBool_co, + v: _ArrayLikeBool_co, + mode: _CorrelateMode = ..., +) -> NDArray[np.bool]: ... +@overload +def convolve( + a: _ArrayLikeUInt_co, + v: _ArrayLikeUInt_co, + mode: _CorrelateMode = ..., +) -> NDArray[unsignedinteger]: ... +@overload +def convolve( + a: _ArrayLikeInt_co, + v: _ArrayLikeInt_co, + mode: _CorrelateMode = ..., +) -> NDArray[signedinteger]: ... +@overload +def convolve( + a: _ArrayLikeFloat_co, + v: _ArrayLikeFloat_co, + mode: _CorrelateMode = ..., +) -> NDArray[floating]: ... +@overload +def convolve( + a: _ArrayLikeComplex_co, + v: _ArrayLikeComplex_co, + mode: _CorrelateMode = ..., +) -> NDArray[complexfloating]: ... +@overload +def convolve( + a: _ArrayLikeTD64_co, + v: _ArrayLikeTD64_co, + mode: _CorrelateMode = ..., +) -> NDArray[timedelta64]: ... +@overload +def convolve( + a: _ArrayLikeObject_co, + v: _ArrayLikeObject_co, + mode: _CorrelateMode = ..., +) -> NDArray[object_]: ... + +@overload +def outer( + a: _ArrayLike[Never], + b: _ArrayLike[Never], + out: None = ..., +) -> NDArray[Any]: ... +@overload +def outer( + a: _ArrayLikeBool_co, + b: _ArrayLikeBool_co, + out: None = ..., +) -> NDArray[np.bool]: ... +@overload +def outer( + a: _ArrayLikeUInt_co, + b: _ArrayLikeUInt_co, + out: None = ..., +) -> NDArray[unsignedinteger]: ... +@overload +def outer( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, + out: None = ..., +) -> NDArray[signedinteger]: ... +@overload +def outer( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + out: None = ..., +) -> NDArray[floating]: ... +@overload +def outer( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, + out: None = ..., +) -> NDArray[complexfloating]: ... +@overload +def outer( + a: _ArrayLikeTD64_co, + b: _ArrayLikeTD64_co, + out: None = ..., +) -> NDArray[timedelta64]: ... +@overload +def outer( + a: _ArrayLikeObject_co, + b: _ArrayLikeObject_co, + out: None = ..., +) -> NDArray[object_]: ... +@overload +def outer( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + b: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + out: _ArrayT, +) -> _ArrayT: ... + +@overload +def tensordot( + a: _ArrayLike[Never], + b: _ArrayLike[Never], + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[Any]: ... +@overload +def tensordot( + a: _ArrayLikeBool_co, + b: _ArrayLikeBool_co, + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[np.bool]: ... +@overload +def tensordot( + a: _ArrayLikeUInt_co, + b: _ArrayLikeUInt_co, + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[unsignedinteger]: ... +@overload +def tensordot( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[signedinteger]: ... +@overload +def tensordot( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[floating]: ... +@overload +def tensordot( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[complexfloating]: ... +@overload +def tensordot( + a: _ArrayLikeTD64_co, + b: _ArrayLikeTD64_co, + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[timedelta64]: ... +@overload +def tensordot( + a: _ArrayLikeObject_co, + b: _ArrayLikeObject_co, + axes: int | tuple[_ShapeLike, _ShapeLike] = ..., +) -> NDArray[object_]: ... + +@overload +def roll( + a: _ArrayLike[_ScalarT], + shift: _ShapeLike, + axis: _ShapeLike | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def roll( + a: ArrayLike, + shift: _ShapeLike, + axis: _ShapeLike | None = ..., +) -> NDArray[Any]: ... + +def rollaxis( + a: NDArray[_ScalarT], + axis: int, + start: int = ..., +) -> NDArray[_ScalarT]: ... + +def moveaxis( + a: NDArray[_ScalarT], + source: _ShapeLike, + destination: _ShapeLike, +) -> NDArray[_ScalarT]: ... + +@overload +def cross( + a: _ArrayLike[Never], + b: _ArrayLike[Never], + axisa: int = ..., + axisb: int = ..., + axisc: int = ..., + axis: int | None = ..., +) -> NDArray[Any]: ... +@overload +def cross( + a: _ArrayLikeBool_co, + b: _ArrayLikeBool_co, + axisa: int = ..., + axisb: int = ..., + axisc: int = ..., + axis: int | None = ..., +) -> NoReturn: ... +@overload +def cross( + a: _ArrayLikeUInt_co, + b: _ArrayLikeUInt_co, + axisa: int = ..., + axisb: int = ..., + axisc: int = ..., + axis: int | None = ..., +) -> NDArray[unsignedinteger]: ... +@overload +def cross( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, + axisa: int = ..., + axisb: int = ..., + axisc: int = ..., + axis: int | None = ..., +) -> NDArray[signedinteger]: ... +@overload +def cross( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + axisa: int = ..., + axisb: int = ..., + axisc: int = ..., + axis: int | None = ..., +) -> NDArray[floating]: ... +@overload +def cross( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, + axisa: int = ..., + axisb: int = ..., + axisc: int = ..., + axis: int | None = ..., +) -> NDArray[complexfloating]: ... +@overload +def cross( + a: _ArrayLikeObject_co, + b: _ArrayLikeObject_co, + axisa: int = ..., + axisb: int = ..., + axisc: int = ..., + axis: int | None = ..., +) -> NDArray[object_]: ... + +@overload +def indices( + dimensions: Sequence[int], + dtype: type[int] = ..., + sparse: L[False] = ..., +) -> NDArray[int_]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: type[int], + sparse: L[True], +) -> tuple[NDArray[int_], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: type[int] = ..., + *, + sparse: L[True], +) -> tuple[NDArray[int_], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: _DTypeLike[_ScalarT], + sparse: L[False] = ..., +) -> NDArray[_ScalarT]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: _DTypeLike[_ScalarT], + sparse: L[True], +) -> tuple[NDArray[_ScalarT], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike = ..., + sparse: L[False] = ..., +) -> NDArray[Any]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike, + sparse: L[True], +) -> tuple[NDArray[Any], ...]: ... +@overload +def indices( + dimensions: Sequence[int], + dtype: DTypeLike = ..., + *, + sparse: L[True], +) -> tuple[NDArray[Any], ...]: ... + +def fromfunction( + function: Callable[..., _T], + shape: Sequence[int], + *, + dtype: DTypeLike = ..., + like: _SupportsArrayFunc | None = ..., + **kwargs: Any, +) -> _T: ... + +def isscalar(element: object) -> TypeGuard[generic | complex | str | bytes | memoryview]: ... + +def binary_repr(num: SupportsIndex, width: int | None = ...) -> str: ... + +def base_repr( + number: SupportsAbs[float], + base: float = ..., + padding: SupportsIndex | None = ..., +) -> str: ... + +@overload +def identity( + n: int, + dtype: None = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[float64]: ... +@overload +def identity( + n: int, + dtype: _DTypeLike[_ScalarT], + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def identity( + n: int, + dtype: DTypeLike | None = ..., + *, + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +def allclose( + a: ArrayLike, + b: ArrayLike, + rtol: ArrayLike = ..., + atol: ArrayLike = ..., + equal_nan: bool = ..., +) -> bool: ... + +@overload +def isclose( + a: _ScalarLike_co, + b: _ScalarLike_co, + rtol: ArrayLike = ..., + atol: ArrayLike = ..., + equal_nan: bool = ..., +) -> np.bool: ... +@overload +def isclose( + a: ArrayLike, + b: ArrayLike, + rtol: ArrayLike = ..., + atol: ArrayLike = ..., + equal_nan: bool = ..., +) -> NDArray[np.bool]: ... + +def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: bool = ...) -> bool: ... + +def array_equiv(a1: ArrayLike, a2: ArrayLike) -> bool: ... + +@overload +def astype( + x: ndarray[_ShapeT, dtype], + dtype: _DTypeLike[_ScalarT], + /, + *, + copy: bool = ..., + device: L["cpu"] | None = ..., +) -> ndarray[_ShapeT, dtype[_ScalarT]]: ... +@overload +def astype( + x: ndarray[_ShapeT, dtype], + dtype: DTypeLike, + /, + *, + copy: bool = ..., + device: L["cpu"] | None = ..., +) -> ndarray[_ShapeT, dtype]: ... diff --git a/python/numpy/_core/numerictypes.py b/python/numpy/_core/numerictypes.py new file mode 100644 index 000000000..265ad4f8e --- /dev/null +++ b/python/numpy/_core/numerictypes.py @@ -0,0 +1,633 @@ +""" +numerictypes: Define the numeric type objects + +This module is designed so "from numerictypes import \\*" is safe. +Exported symbols include: + + Dictionary with all registered number types (including aliases): + sctypeDict + + Type objects (not all will be available, depends on platform): + see variable sctypes for which ones you have + + Bit-width names + + int8 int16 int32 int64 + uint8 uint16 uint32 uint64 + float16 float32 float64 float96 float128 + complex64 complex128 complex192 complex256 + datetime64 timedelta64 + + c-based names + + bool + + object_ + + void, str_ + + byte, ubyte, + short, ushort + intc, uintc, + intp, uintp, + int_, uint, + longlong, ulonglong, + + single, csingle, + double, cdouble, + longdouble, clongdouble, + + As part of the type-hierarchy: xx -- is bit-width + + generic + +-> bool (kind=b) + +-> number + | +-> integer + | | +-> signedinteger (intxx) (kind=i) + | | | byte + | | | short + | | | intc + | | | intp + | | | int_ + | | | longlong + | | \\-> unsignedinteger (uintxx) (kind=u) + | | ubyte + | | ushort + | | uintc + | | uintp + | | uint + | | ulonglong + | +-> inexact + | +-> floating (floatxx) (kind=f) + | | half + | | single + | | double + | | longdouble + | \\-> complexfloating (complexxx) (kind=c) + | csingle + | cdouble + | clongdouble + +-> flexible + | +-> character + | | bytes_ (kind=S) + | | str_ (kind=U) + | | + | \\-> void (kind=V) + \\-> object_ (not used much) (kind=O) + +""" +import numbers +import warnings + +from numpy._utils import set_module + +from . import multiarray as ma +from .multiarray import ( + busday_count, + busday_offset, + busdaycalendar, + datetime_as_string, + datetime_data, + dtype, + is_busday, + ndarray, +) + +# we add more at the bottom +__all__ = [ + 'ScalarType', 'typecodes', 'issubdtype', 'datetime_data', + 'datetime_as_string', 'busday_offset', 'busday_count', + 'is_busday', 'busdaycalendar', 'isdtype' +] + +# we don't need all these imports, but we need to keep them for compatibility +# for users using np._core.numerictypes.UPPER_TABLE +# we don't export these for import *, but we do want them accessible +# as numerictypes.bool, etc. +from builtins import bool, bytes, complex, float, int, object, str # noqa: F401, UP029 + +from ._dtype import _kind_name +from ._string_helpers import ( # noqa: F401 + LOWER_TABLE, + UPPER_TABLE, + english_capitalize, + english_lower, + english_upper, +) +from ._type_aliases import allTypes, sctypeDict, sctypes + +# We use this later +generic = allTypes['generic'] + +genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16', + 'int32', 'uint32', 'int64', 'uint64', + 'float16', 'float32', 'float64', 'float96', 'float128', + 'complex64', 'complex128', 'complex192', 'complex256', + 'object'] + +@set_module('numpy') +def maximum_sctype(t): + """ + Return the scalar type of highest precision of the same kind as the input. + + .. deprecated:: 2.0 + Use an explicit dtype like int64 or float64 instead. + + Parameters + ---------- + t : dtype or dtype specifier + The input data type. This can be a `dtype` object or an object that + is convertible to a `dtype`. + + Returns + ------- + out : dtype + The highest precision data type of the same kind (`dtype.kind`) as `t`. + + See Also + -------- + obj2sctype, mintypecode, sctype2char + dtype + + Examples + -------- + >>> from numpy._core.numerictypes import maximum_sctype + >>> maximum_sctype(int) + + >>> maximum_sctype(np.uint8) + + >>> maximum_sctype(complex) + # may vary + + >>> maximum_sctype(str) + + + >>> maximum_sctype('i2') + + >>> maximum_sctype('f4') + # may vary + + """ + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`maximum_sctype` is deprecated. Use an explicit dtype like int64 " + "or float64 instead. (deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + g = obj2sctype(t) + if g is None: + return t + t = g + base = _kind_name(dtype(t)) + if base in sctypes: + return sctypes[base][-1] + else: + return t + + +@set_module('numpy') +def issctype(rep): + """ + Determines whether the given object represents a scalar data-type. + + Parameters + ---------- + rep : any + If `rep` is an instance of a scalar dtype, True is returned. If not, + False is returned. + + Returns + ------- + out : bool + Boolean result of check whether `rep` is a scalar dtype. + + See Also + -------- + issubsctype, issubdtype, obj2sctype, sctype2char + + Examples + -------- + >>> from numpy._core.numerictypes import issctype + >>> issctype(np.int32) + True + >>> issctype(list) + False + >>> issctype(1.1) + False + + Strings are also a scalar type: + + >>> issctype(np.dtype('str')) + True + + """ + if not isinstance(rep, (type, dtype)): + return False + try: + res = obj2sctype(rep) + if res and res != object_: + return True + else: + return False + except Exception: + return False + + +def obj2sctype(rep, default=None): + """ + Return the scalar dtype or NumPy equivalent of Python type of an object. + + Parameters + ---------- + rep : any + The object of which the type is returned. + default : any, optional + If given, this is returned for objects whose types can not be + determined. If not given, None is returned for those objects. + + Returns + ------- + dtype : dtype or Python type + The data type of `rep`. + + See Also + -------- + sctype2char, issctype, issubsctype, issubdtype + + Examples + -------- + >>> from numpy._core.numerictypes import obj2sctype + >>> obj2sctype(np.int32) + + >>> obj2sctype(np.array([1., 2.])) + + >>> obj2sctype(np.array([1.j])) + + + >>> obj2sctype(dict) + + >>> obj2sctype('string') + + >>> obj2sctype(1, default=list) + + + """ + # prevent abstract classes being upcast + if isinstance(rep, type) and issubclass(rep, generic): + return rep + # extract dtype from arrays + if isinstance(rep, ndarray): + return rep.dtype.type + # fall back on dtype to convert + try: + res = dtype(rep) + except Exception: + return default + else: + return res.type + + +@set_module('numpy') +def issubclass_(arg1, arg2): + """ + Determine if a class is a subclass of a second class. + + `issubclass_` is equivalent to the Python built-in ``issubclass``, + except that it returns False instead of raising a TypeError if one + of the arguments is not a class. + + Parameters + ---------- + arg1 : class + Input class. True is returned if `arg1` is a subclass of `arg2`. + arg2 : class or tuple of classes. + Input class. If a tuple of classes, True is returned if `arg1` is a + subclass of any of the tuple elements. + + Returns + ------- + out : bool + Whether `arg1` is a subclass of `arg2` or not. + + See Also + -------- + issubsctype, issubdtype, issctype + + Examples + -------- + >>> np.issubclass_(np.int32, int) + False + >>> np.issubclass_(np.int32, float) + False + >>> np.issubclass_(np.float64, float) + True + + """ + try: + return issubclass(arg1, arg2) + except TypeError: + return False + + +@set_module('numpy') +def issubsctype(arg1, arg2): + """ + Determine if the first argument is a subclass of the second argument. + + Parameters + ---------- + arg1, arg2 : dtype or dtype specifier + Data-types. + + Returns + ------- + out : bool + The result. + + See Also + -------- + issctype, issubdtype, obj2sctype + + Examples + -------- + >>> from numpy._core import issubsctype + >>> issubsctype('S8', str) + False + >>> issubsctype(np.array([1]), int) + True + >>> issubsctype(np.array([1]), float) + False + + """ + return issubclass(obj2sctype(arg1), obj2sctype(arg2)) + + +class _PreprocessDTypeError(Exception): + pass + + +def _preprocess_dtype(dtype): + """ + Preprocess dtype argument by: + 1. fetching type from a data type + 2. verifying that types are built-in NumPy dtypes + """ + if isinstance(dtype, ma.dtype): + dtype = dtype.type + if isinstance(dtype, ndarray) or dtype not in allTypes.values(): + raise _PreprocessDTypeError + return dtype + + +@set_module('numpy') +def isdtype(dtype, kind): + """ + Determine if a provided dtype is of a specified data type ``kind``. + + This function only supports built-in NumPy's data types. + Third-party dtypes are not yet supported. + + Parameters + ---------- + dtype : dtype + The input dtype. + kind : dtype or str or tuple of dtypes/strs. + dtype or dtype kind. Allowed dtype kinds are: + * ``'bool'`` : boolean kind + * ``'signed integer'`` : signed integer data types + * ``'unsigned integer'`` : unsigned integer data types + * ``'integral'`` : integer data types + * ``'real floating'`` : real-valued floating-point data types + * ``'complex floating'`` : complex floating-point data types + * ``'numeric'`` : numeric data types + + Returns + ------- + out : bool + + See Also + -------- + issubdtype + + Examples + -------- + >>> import numpy as np + >>> np.isdtype(np.float32, np.float64) + False + >>> np.isdtype(np.float32, "real floating") + True + >>> np.isdtype(np.complex128, ("real floating", "complex floating")) + True + + """ + try: + dtype = _preprocess_dtype(dtype) + except _PreprocessDTypeError: + raise TypeError( + "dtype argument must be a NumPy dtype, " + f"but it is a {type(dtype)}." + ) from None + + input_kinds = kind if isinstance(kind, tuple) else (kind,) + + processed_kinds = set() + + for kind in input_kinds: + if kind == "bool": + processed_kinds.add(allTypes["bool"]) + elif kind == "signed integer": + processed_kinds.update(sctypes["int"]) + elif kind == "unsigned integer": + processed_kinds.update(sctypes["uint"]) + elif kind == "integral": + processed_kinds.update(sctypes["int"] + sctypes["uint"]) + elif kind == "real floating": + processed_kinds.update(sctypes["float"]) + elif kind == "complex floating": + processed_kinds.update(sctypes["complex"]) + elif kind == "numeric": + processed_kinds.update( + sctypes["int"] + sctypes["uint"] + + sctypes["float"] + sctypes["complex"] + ) + elif isinstance(kind, str): + raise ValueError( + "kind argument is a string, but" + f" {kind!r} is not a known kind name." + ) + else: + try: + kind = _preprocess_dtype(kind) + except _PreprocessDTypeError: + raise TypeError( + "kind argument must be comprised of " + "NumPy dtypes or strings only, " + f"but is a {type(kind)}." + ) from None + processed_kinds.add(kind) + + return dtype in processed_kinds + + +@set_module('numpy') +def issubdtype(arg1, arg2): + r""" + Returns True if first argument is a typecode lower/equal in type hierarchy. + + This is like the builtin :func:`issubclass`, but for `dtype`\ s. + + Parameters + ---------- + arg1, arg2 : dtype_like + `dtype` or object coercible to one + + Returns + ------- + out : bool + + See Also + -------- + :ref:`arrays.scalars` : Overview of the numpy type hierarchy. + + Examples + -------- + `issubdtype` can be used to check the type of arrays: + + >>> ints = np.array([1, 2, 3], dtype=np.int32) + >>> np.issubdtype(ints.dtype, np.integer) + True + >>> np.issubdtype(ints.dtype, np.floating) + False + + >>> floats = np.array([1, 2, 3], dtype=np.float32) + >>> np.issubdtype(floats.dtype, np.integer) + False + >>> np.issubdtype(floats.dtype, np.floating) + True + + Similar types of different sizes are not subdtypes of each other: + + >>> np.issubdtype(np.float64, np.float32) + False + >>> np.issubdtype(np.float32, np.float64) + False + + but both are subtypes of `floating`: + + >>> np.issubdtype(np.float64, np.floating) + True + >>> np.issubdtype(np.float32, np.floating) + True + + For convenience, dtype-like objects are allowed too: + + >>> np.issubdtype('S1', np.bytes_) + True + >>> np.issubdtype('i4', np.signedinteger) + True + + """ + if not issubclass_(arg1, generic): + arg1 = dtype(arg1).type + if not issubclass_(arg2, generic): + arg2 = dtype(arg2).type + + return issubclass(arg1, arg2) + + +@set_module('numpy') +def sctype2char(sctype): + """ + Return the string representation of a scalar dtype. + + Parameters + ---------- + sctype : scalar dtype or object + If a scalar dtype, the corresponding string character is + returned. If an object, `sctype2char` tries to infer its scalar type + and then return the corresponding string character. + + Returns + ------- + typechar : str + The string character corresponding to the scalar type. + + Raises + ------ + ValueError + If `sctype` is an object for which the type can not be inferred. + + See Also + -------- + obj2sctype, issctype, issubsctype, mintypecode + + Examples + -------- + >>> from numpy._core.numerictypes import sctype2char + >>> for sctype in [np.int32, np.double, np.cdouble, np.bytes_, np.ndarray]: + ... print(sctype2char(sctype)) + l # may vary + d + D + S + O + + >>> x = np.array([1., 2-1.j]) + >>> sctype2char(x) + 'D' + >>> sctype2char(list) + 'O' + + """ + sctype = obj2sctype(sctype) + if sctype is None: + raise ValueError("unrecognized type") + if sctype not in sctypeDict.values(): + # for compatibility + raise KeyError(sctype) + return dtype(sctype).char + + +def _scalar_type_key(typ): + """A ``key`` function for `sorted`.""" + dt = dtype(typ) + return (dt.kind.lower(), dt.itemsize) + + +ScalarType = [int, float, complex, bool, bytes, str, memoryview] +ScalarType += sorted(dict.fromkeys(sctypeDict.values()), key=_scalar_type_key) +ScalarType = tuple(ScalarType) + + +# Now add the types we've determined to this module +for key in allTypes: + globals()[key] = allTypes[key] + __all__.append(key) + +del key + +typecodes = {'Character': 'c', + 'Integer': 'bhilqnp', + 'UnsignedInteger': 'BHILQNP', + 'Float': 'efdg', + 'Complex': 'FDG', + 'AllInteger': 'bBhHiIlLqQnNpP', + 'AllFloat': 'efdgFDG', + 'Datetime': 'Mm', + 'All': '?bhilqnpBHILQNPefdgFDGSUVOMm'} + +# backwards compatibility --- deprecated name +# Formal deprecation: Numpy 1.20.0, 2020-10-19 (see numpy/__init__.py) +typeDict = sctypeDict + +def _register_types(): + numbers.Integral.register(integer) + numbers.Complex.register(inexact) + numbers.Real.register(floating) + numbers.Number.register(number) + + +_register_types() diff --git a/python/numpy/_core/numerictypes.pyi b/python/numpy/_core/numerictypes.pyi new file mode 100644 index 000000000..5a309d4e1 --- /dev/null +++ b/python/numpy/_core/numerictypes.pyi @@ -0,0 +1,197 @@ +from builtins import bool as py_bool +from typing import Any, Final, TypedDict, type_check_only +from typing import Literal as L + +import numpy as np +from numpy import ( + bool, + bool_, + byte, + bytes_, + cdouble, + character, + clongdouble, + complex64, + complex128, + complex192, + complex256, + complexfloating, + csingle, + datetime64, + double, + dtype, + flexible, + float16, + float32, + float64, + float96, + float128, + floating, + generic, + half, + inexact, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + intp, + long, + longdouble, + longlong, + number, + object_, + short, + signedinteger, + single, + str_, + timedelta64, + ubyte, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + unsignedinteger, + ushort, + void, +) +from numpy._typing import DTypeLike + +from ._type_aliases import sctypeDict as sctypeDict +from .multiarray import ( + busday_count, + busday_offset, + busdaycalendar, + datetime_as_string, + datetime_data, + is_busday, +) + +__all__ = [ + "ScalarType", + "typecodes", + "issubdtype", + "datetime_data", + "datetime_as_string", + "busday_offset", + "busday_count", + "is_busday", + "busdaycalendar", + "isdtype", + "generic", + "unsignedinteger", + "character", + "inexact", + "number", + "integer", + "flexible", + "complexfloating", + "signedinteger", + "floating", + "bool", + "float16", + "float32", + "float64", + "longdouble", + "complex64", + "complex128", + "clongdouble", + "bytes_", + "str_", + "void", + "object_", + "datetime64", + "timedelta64", + "int8", + "byte", + "uint8", + "ubyte", + "int16", + "short", + "uint16", + "ushort", + "int32", + "intc", + "uint32", + "uintc", + "int64", + "long", + "uint64", + "ulong", + "longlong", + "ulonglong", + "intp", + "uintp", + "double", + "cdouble", + "single", + "csingle", + "half", + "bool_", + "int_", + "uint", + "float96", + "float128", + "complex192", + "complex256", +] + +@type_check_only +class _TypeCodes(TypedDict): + Character: L["c"] + Integer: L["bhilqnp"] + UnsignedInteger: L["BHILQNP"] + Float: L["efdg"] + Complex: L["FDG"] + AllInteger: L["bBhHiIlLqQnNpP"] + AllFloat: L["efdgFDG"] + Datetime: L["Mm"] + All: L["?bhilqnpBHILQNPefdgFDGSUVOMm"] + +def isdtype(dtype: dtype | type, kind: DTypeLike | tuple[DTypeLike, ...]) -> py_bool: ... +def issubdtype(arg1: DTypeLike | None, arg2: DTypeLike | None) -> py_bool: ... + +typecodes: Final[_TypeCodes] = ... +ScalarType: Final[ + tuple[ + type[int], + type[float], + type[complex], + type[py_bool], + type[bytes], + type[str], + type[memoryview[Any]], + type[np.bool], + type[complex64], + type[complex128], + type[complex128 | complex192 | complex256], + type[float16], + type[float32], + type[float64], + type[float64 | float96 | float128], + type[int8], + type[int16], + type[int32], + type[int32 | int64], + type[int64], + type[datetime64], + type[timedelta64], + type[object_], + type[bytes_], + type[str_], + type[uint8], + type[uint16], + type[uint32], + type[uint32 | uint64], + type[uint64], + type[void], + ] +] = ... +typeDict: Final = sctypeDict diff --git a/python/numpy/_core/overrides.py b/python/numpy/_core/overrides.py new file mode 100644 index 000000000..6414710ae --- /dev/null +++ b/python/numpy/_core/overrides.py @@ -0,0 +1,183 @@ +"""Implementation of __array_function__ overrides from NEP-18.""" +import collections +import functools + +from numpy._core._multiarray_umath import ( + _ArrayFunctionDispatcher, + _get_implementing_args, + add_docstring, +) +from numpy._utils import set_module # noqa: F401 +from numpy._utils._inspect import getargspec + +ARRAY_FUNCTIONS = set() + +array_function_like_doc = ( + """like : array_like, optional + Reference object to allow the creation of arrays which are not + NumPy arrays. If an array-like passed in as ``like`` supports + the ``__array_function__`` protocol, the result will be defined + by it. In this case, it ensures the creation of an array object + compatible with that passed in via this argument.""" +) + +def get_array_function_like_doc(public_api, docstring_template=""): + ARRAY_FUNCTIONS.add(public_api) + docstring = public_api.__doc__ or docstring_template + return docstring.replace("${ARRAY_FUNCTION_LIKE}", array_function_like_doc) + +def finalize_array_function_like(public_api): + public_api.__doc__ = get_array_function_like_doc(public_api) + return public_api + + +add_docstring( + _ArrayFunctionDispatcher, + """ + Class to wrap functions with checks for __array_function__ overrides. + + All arguments are required, and can only be passed by position. + + Parameters + ---------- + dispatcher : function or None + The dispatcher function that returns a single sequence-like object + of all arguments relevant. It must have the same signature (except + the default values) as the actual implementation. + If ``None``, this is a ``like=`` dispatcher and the + ``_ArrayFunctionDispatcher`` must be called with ``like`` as the + first (additional and positional) argument. + implementation : function + Function that implements the operation on NumPy arrays without + overrides. Arguments passed calling the ``_ArrayFunctionDispatcher`` + will be forwarded to this (and the ``dispatcher``) as if using + ``*args, **kwargs``. + + Attributes + ---------- + _implementation : function + The original implementation passed in. + """) + + +# exposed for testing purposes; used internally by _ArrayFunctionDispatcher +add_docstring( + _get_implementing_args, + """ + Collect arguments on which to call __array_function__. + + Parameters + ---------- + relevant_args : iterable of array-like + Iterable of possibly array-like arguments to check for + __array_function__ methods. + + Returns + ------- + Sequence of arguments with __array_function__ methods, in the order in + which they should be called. + """) + + +ArgSpec = collections.namedtuple('ArgSpec', 'args varargs keywords defaults') + + +def verify_matching_signatures(implementation, dispatcher): + """Verify that a dispatcher function has the right signature.""" + implementation_spec = ArgSpec(*getargspec(implementation)) + dispatcher_spec = ArgSpec(*getargspec(dispatcher)) + + if (implementation_spec.args != dispatcher_spec.args or + implementation_spec.varargs != dispatcher_spec.varargs or + implementation_spec.keywords != dispatcher_spec.keywords or + (bool(implementation_spec.defaults) != + bool(dispatcher_spec.defaults)) or + (implementation_spec.defaults is not None and + len(implementation_spec.defaults) != + len(dispatcher_spec.defaults))): + raise RuntimeError('implementation and dispatcher for %s have ' + 'different function signatures' % implementation) + + if implementation_spec.defaults is not None: + if dispatcher_spec.defaults != (None,) * len(dispatcher_spec.defaults): + raise RuntimeError('dispatcher functions can only use None for ' + 'default argument values') + + +def array_function_dispatch(dispatcher=None, module=None, verify=True, + docs_from_dispatcher=False): + """Decorator for adding dispatch with the __array_function__ protocol. + + See NEP-18 for example usage. + + Parameters + ---------- + dispatcher : callable or None + Function that when called like ``dispatcher(*args, **kwargs)`` with + arguments from the NumPy function call returns an iterable of + array-like arguments to check for ``__array_function__``. + + If `None`, the first argument is used as the single `like=` argument + and not passed on. A function implementing `like=` must call its + dispatcher with `like` as the first non-keyword argument. + module : str, optional + __module__ attribute to set on new function, e.g., ``module='numpy'``. + By default, module is copied from the decorated function. + verify : bool, optional + If True, verify the that the signature of the dispatcher and decorated + function signatures match exactly: all required and optional arguments + should appear in order with the same names, but the default values for + all optional arguments should be ``None``. Only disable verification + if the dispatcher's signature needs to deviate for some particular + reason, e.g., because the function has a signature like + ``func(*args, **kwargs)``. + docs_from_dispatcher : bool, optional + If True, copy docs from the dispatcher function onto the dispatched + function, rather than from the implementation. This is useful for + functions defined in C, which otherwise don't have docstrings. + + Returns + ------- + Function suitable for decorating the implementation of a NumPy function. + + """ + def decorator(implementation): + if verify: + if dispatcher is not None: + verify_matching_signatures(implementation, dispatcher) + else: + # Using __code__ directly similar to verify_matching_signature + co = implementation.__code__ + last_arg = co.co_argcount + co.co_kwonlyargcount - 1 + last_arg = co.co_varnames[last_arg] + if last_arg != "like" or co.co_kwonlyargcount == 0: + raise RuntimeError( + "__array_function__ expects `like=` to be the last " + "argument and a keyword-only argument. " + f"{implementation} does not seem to comply.") + + if docs_from_dispatcher: + add_docstring(implementation, dispatcher.__doc__) + + public_api = _ArrayFunctionDispatcher(dispatcher, implementation) + public_api = functools.wraps(implementation)(public_api) + + if module is not None: + public_api.__module__ = module + + ARRAY_FUNCTIONS.add(public_api) + + return public_api + + return decorator + + +def array_function_from_dispatcher( + implementation, module=None, verify=True, docs_from_dispatcher=True): + """Like array_function_dispatcher, but with function arguments flipped.""" + + def decorator(dispatcher): + return array_function_dispatch( + dispatcher, module, verify=verify, + docs_from_dispatcher=docs_from_dispatcher)(implementation) + return decorator diff --git a/python/numpy/_core/overrides.pyi b/python/numpy/_core/overrides.pyi new file mode 100644 index 000000000..05453190e --- /dev/null +++ b/python/numpy/_core/overrides.pyi @@ -0,0 +1,48 @@ +from collections.abc import Callable, Iterable +from typing import Any, Final, NamedTuple, ParamSpec, TypeVar + +from numpy._typing import _SupportsArrayFunc + +_T = TypeVar("_T") +_Tss = ParamSpec("_Tss") +_FuncT = TypeVar("_FuncT", bound=Callable[..., object]) + +### + +ARRAY_FUNCTIONS: set[Callable[..., Any]] = ... +array_function_like_doc: Final[str] = ... + +class ArgSpec(NamedTuple): + args: list[str] + varargs: str | None + keywords: str | None + defaults: tuple[Any, ...] + +def get_array_function_like_doc(public_api: Callable[..., Any], docstring_template: str = "") -> str: ... +def finalize_array_function_like(public_api: _FuncT) -> _FuncT: ... + +# +def verify_matching_signatures( + implementation: Callable[_Tss, object], + dispatcher: Callable[_Tss, Iterable[_SupportsArrayFunc]], +) -> None: ... + +# NOTE: This actually returns a `_ArrayFunctionDispatcher` callable wrapper object, with +# the original wrapped callable stored in the `._implementation` attribute. It checks +# for any `__array_function__` of the values of specific arguments that the dispatcher +# specifies. Since the dispatcher only returns an iterable of passed array-like args, +# this overridable behaviour is impossible to annotate. +def array_function_dispatch( + dispatcher: Callable[_Tss, Iterable[_SupportsArrayFunc]] | None = None, + module: str | None = None, + verify: bool = True, + docs_from_dispatcher: bool = False, +) -> Callable[[_FuncT], _FuncT]: ... + +# +def array_function_from_dispatcher( + implementation: Callable[_Tss, _T], + module: str | None = None, + verify: bool = True, + docs_from_dispatcher: bool = True, +) -> Callable[[Callable[_Tss, Iterable[_SupportsArrayFunc]]], Callable[_Tss, _T]]: ... diff --git a/python/numpy/_core/printoptions.py b/python/numpy/_core/printoptions.py new file mode 100644 index 000000000..5d6f9635c --- /dev/null +++ b/python/numpy/_core/printoptions.py @@ -0,0 +1,32 @@ +""" +Stores and defines the low-level format_options context variable. + +This is defined in its own file outside of the arrayprint module +so we can import it from C while initializing the multiarray +C module during import without introducing circular dependencies. +""" + +import sys +from contextvars import ContextVar + +__all__ = ["format_options"] + +default_format_options_dict = { + "edgeitems": 3, # repr N leading and trailing items of each dimension + "threshold": 1000, # total items > triggers array summarization + "floatmode": "maxprec", + "precision": 8, # precision of floating point representations + "suppress": False, # suppress printing small floating values in exp format + "linewidth": 75, + "nanstr": "nan", + "infstr": "inf", + "sign": "-", + "formatter": None, + # Internally stored as an int to simplify comparisons; converted from/to + # str/False on the way in/out. + 'legacy': sys.maxsize, + 'override_repr': None, +} + +format_options = ContextVar( + "format_options", default=default_format_options_dict) diff --git a/python/numpy/_core/printoptions.pyi b/python/numpy/_core/printoptions.pyi new file mode 100644 index 000000000..bd7c7b406 --- /dev/null +++ b/python/numpy/_core/printoptions.pyi @@ -0,0 +1,28 @@ +from collections.abc import Callable +from contextvars import ContextVar +from typing import Any, Final, TypedDict + +from .arrayprint import _FormatDict + +__all__ = ["format_options"] + +### + +class _FormatOptionsDict(TypedDict): + edgeitems: int + threshold: int + floatmode: str + precision: int + suppress: bool + linewidth: int + nanstr: str + infstr: str + sign: str + formatter: _FormatDict | None + legacy: int + override_repr: Callable[[Any], str] | None + +### + +default_format_options_dict: Final[_FormatOptionsDict] = ... +format_options: ContextVar[_FormatOptionsDict] diff --git a/python/numpy/_core/records.py b/python/numpy/_core/records.py new file mode 100644 index 000000000..39bcf4ba6 --- /dev/null +++ b/python/numpy/_core/records.py @@ -0,0 +1,1089 @@ +""" +This module contains a set of functions for record arrays. +""" +import os +import warnings +from collections import Counter +from contextlib import nullcontext + +from numpy._utils import set_module + +from . import numeric as sb +from . import numerictypes as nt +from .arrayprint import _get_legacy_print_mode + +# All of the functions allow formats to be a dtype +__all__ = [ + 'record', 'recarray', 'format_parser', 'fromarrays', 'fromrecords', + 'fromstring', 'fromfile', 'array', 'find_duplicate', +] + + +ndarray = sb.ndarray + +_byteorderconv = {'b': '>', + 'l': '<', + 'n': '=', + 'B': '>', + 'L': '<', + 'N': '=', + 'S': 's', + 's': 's', + '>': '>', + '<': '<', + '=': '=', + '|': '|', + 'I': '|', + 'i': '|'} + +# formats regular expression +# allows multidimensional spec with a tuple syntax in front +# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 ' +# are equally allowed + +numfmt = nt.sctypeDict + + +@set_module('numpy.rec') +def find_duplicate(list): + """Find duplication in a list, return a list of duplicated elements""" + return [ + item + for item, counts in Counter(list).items() + if counts > 1 + ] + + +@set_module('numpy.rec') +class format_parser: + """ + Class to convert formats, names, titles description to a dtype. + + After constructing the format_parser object, the dtype attribute is + the converted data-type: + ``dtype = format_parser(formats, names, titles).dtype`` + + Attributes + ---------- + dtype : dtype + The converted data-type. + + Parameters + ---------- + formats : str or list of str + The format description, either specified as a string with + comma-separated format descriptions in the form ``'f8, i4, S5'``, or + a list of format description strings in the form + ``['f8', 'i4', 'S5']``. + names : str or list/tuple of str + The field names, either specified as a comma-separated string in the + form ``'col1, col2, col3'``, or as a list or tuple of strings in the + form ``['col1', 'col2', 'col3']``. + An empty list can be used, in that case default field names + ('f0', 'f1', ...) are used. + titles : sequence + Sequence of title strings. An empty list can be used to leave titles + out. + aligned : bool, optional + If True, align the fields by padding as the C-compiler would. + Default is False. + byteorder : str, optional + If specified, all the fields will be changed to the + provided byte-order. Otherwise, the default byte-order is + used. For all available string specifiers, see `dtype.newbyteorder`. + + See Also + -------- + numpy.dtype, numpy.typename + + Examples + -------- + >>> import numpy as np + >>> np.rec.format_parser(['>> np.rec.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'], + ... []).dtype + dtype([('col1', '>> np.rec.format_parser([' len(titles): + self._titles += [None] * (self._nfields - len(titles)) + + def _createdtype(self, byteorder): + dtype = sb.dtype({ + 'names': self._names, + 'formats': self._f_formats, + 'offsets': self._offsets, + 'titles': self._titles, + }) + if byteorder is not None: + byteorder = _byteorderconv[byteorder[0]] + dtype = dtype.newbyteorder(byteorder) + + self.dtype = dtype + + +class record(nt.void): + """A data-type scalar that allows field access as attribute lookup. + """ + + # manually set name and module so that this class's type shows up + # as numpy.record when printed + __name__ = 'record' + __module__ = 'numpy' + + def __repr__(self): + if _get_legacy_print_mode() <= 113: + return self.__str__() + return super().__repr__() + + def __str__(self): + if _get_legacy_print_mode() <= 113: + return str(self.item()) + return super().__str__() + + def __getattribute__(self, attr): + if attr in ('setfield', 'getfield', 'dtype'): + return nt.void.__getattribute__(self, attr) + try: + return nt.void.__getattribute__(self, attr) + except AttributeError: + pass + fielddict = nt.void.__getattribute__(self, 'dtype').fields + res = fielddict.get(attr, None) + if res: + obj = self.getfield(*res[:2]) + # if it has fields return a record, + # otherwise return the object + try: + dt = obj.dtype + except AttributeError: + # happens if field is Object type + return obj + if dt.names is not None: + return obj.view((self.__class__, obj.dtype)) + return obj + else: + raise AttributeError(f"'record' object has no attribute '{attr}'") + + def __setattr__(self, attr, val): + if attr in ('setfield', 'getfield', 'dtype'): + raise AttributeError(f"Cannot set '{attr}' attribute") + fielddict = nt.void.__getattribute__(self, 'dtype').fields + res = fielddict.get(attr, None) + if res: + return self.setfield(val, *res[:2]) + elif getattr(self, attr, None): + return nt.void.__setattr__(self, attr, val) + else: + raise AttributeError(f"'record' object has no attribute '{attr}'") + + def __getitem__(self, indx): + obj = nt.void.__getitem__(self, indx) + + # copy behavior of record.__getattribute__, + if isinstance(obj, nt.void) and obj.dtype.names is not None: + return obj.view((self.__class__, obj.dtype)) + else: + # return a single element + return obj + + def pprint(self): + """Pretty-print all fields.""" + # pretty-print all fields + names = self.dtype.names + maxlen = max(len(name) for name in names) + fmt = '%% %ds: %%s' % maxlen + rows = [fmt % (name, getattr(self, name)) for name in names] + return "\n".join(rows) + +# The recarray is almost identical to a standard array (which supports +# named fields already) The biggest difference is that it can use +# attribute-lookup to find the fields and it is constructed using +# a record. + +# If byteorder is given it forces a particular byteorder on all +# the fields (and any subfields) + + +@set_module("numpy.rec") +class recarray(ndarray): + """Construct an ndarray that allows field access using attributes. + + Arrays may have a data-types containing fields, analogous + to columns in a spread sheet. An example is ``[(x, int), (y, float)]``, + where each entry in the array is a pair of ``(int, float)``. Normally, + these attributes are accessed using dictionary lookups such as ``arr['x']`` + and ``arr['y']``. Record arrays allow the fields to be accessed as members + of the array, using ``arr.x`` and ``arr.y``. + + Parameters + ---------- + shape : tuple + Shape of output array. + dtype : data-type, optional + The desired data-type. By default, the data-type is determined + from `formats`, `names`, `titles`, `aligned` and `byteorder`. + formats : list of data-types, optional + A list containing the data-types for the different columns, e.g. + ``['i4', 'f8', 'i4']``. `formats` does *not* support the new + convention of using types directly, i.e. ``(int, float, int)``. + Note that `formats` must be a list, not a tuple. + Given that `formats` is somewhat limited, we recommend specifying + `dtype` instead. + names : tuple of str, optional + The name of each column, e.g. ``('x', 'y', 'z')``. + buf : buffer, optional + By default, a new array is created of the given shape and data-type. + If `buf` is specified and is an object exposing the buffer interface, + the array will use the memory from the existing buffer. In this case, + the `offset` and `strides` keywords are available. + + Other Parameters + ---------------- + titles : tuple of str, optional + Aliases for column names. For example, if `names` were + ``('x', 'y', 'z')`` and `titles` is + ``('x_coordinate', 'y_coordinate', 'z_coordinate')``, then + ``arr['x']`` is equivalent to both ``arr.x`` and ``arr.x_coordinate``. + byteorder : {'<', '>', '='}, optional + Byte-order for all fields. + aligned : bool, optional + Align the fields in memory as the C-compiler would. + strides : tuple of ints, optional + Buffer (`buf`) is interpreted according to these strides (strides + define how many bytes each array element, row, column, etc. + occupy in memory). + offset : int, optional + Start reading buffer (`buf`) from this offset onwards. + order : {'C', 'F'}, optional + Row-major (C-style) or column-major (Fortran-style) order. + + Returns + ------- + rec : recarray + Empty array of the given shape and type. + + See Also + -------- + numpy.rec.fromrecords : Construct a record array from data. + numpy.record : fundamental data-type for `recarray`. + numpy.rec.format_parser : determine data-type from formats, names, titles. + + Notes + ----- + This constructor can be compared to ``empty``: it creates a new record + array but does not fill it with data. To create a record array from data, + use one of the following methods: + + 1. Create a standard ndarray and convert it to a record array, + using ``arr.view(np.recarray)`` + 2. Use the `buf` keyword. + 3. Use `np.rec.fromrecords`. + + Examples + -------- + Create an array with two fields, ``x`` and ``y``: + + >>> import numpy as np + >>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', '>> x + array([(1., 2), (3., 4)], dtype=[('x', '>> x['x'] + array([1., 3.]) + + View the array as a record array: + + >>> x = x.view(np.recarray) + + >>> x.x + array([1., 3.]) + + >>> x.y + array([2, 4]) + + Create a new, empty record array: + + >>> np.recarray((2,), + ... dtype=[('x', int), ('y', float), ('z', int)]) #doctest: +SKIP + rec.array([(-1073741821, 1.2249118382103472e-301, 24547520), + (3471280, 1.2134086255804012e-316, 0)], + dtype=[('x', ' 0 or self.shape == (0,): + lst = sb.array2string( + self, separator=', ', prefix=prefix, suffix=',') + else: + # show zero-length shape unless it is (0,) + lst = f"[], shape={repr(self.shape)}" + + lf = '\n' + ' ' * len(prefix) + if _get_legacy_print_mode() <= 113: + lf = ' ' + lf # trailing space + return fmt % (lst, lf, repr_dtype) + + def field(self, attr, val=None): + if isinstance(attr, int): + names = ndarray.__getattribute__(self, 'dtype').names + attr = names[attr] + + fielddict = ndarray.__getattribute__(self, 'dtype').fields + + res = fielddict[attr][:2] + + if val is None: + obj = self.getfield(*res) + if obj.dtype.names is not None: + return obj + return obj.view(ndarray) + else: + return self.setfield(val, *res) + + +def _deprecate_shape_0_as_None(shape): + if shape == 0: + warnings.warn( + "Passing `shape=0` to have the shape be inferred is deprecated, " + "and in future will be equivalent to `shape=(0,)`. To infer " + "the shape and suppress this warning, pass `shape=None` instead.", + FutureWarning, stacklevel=3) + return None + else: + return shape + + +@set_module("numpy.rec") +def fromarrays(arrayList, dtype=None, shape=None, formats=None, + names=None, titles=None, aligned=False, byteorder=None): + """Create a record array from a (flat) list of arrays + + Parameters + ---------- + arrayList : list or tuple + List of array-like objects (such as lists, tuples, + and ndarrays). + dtype : data-type, optional + valid dtype for all arrays + shape : int or tuple of ints, optional + Shape of the resulting array. If not provided, inferred from + ``arrayList[0]``. + formats, names, titles, aligned, byteorder : + If `dtype` is ``None``, these arguments are passed to + `numpy.rec.format_parser` to construct a dtype. See that function for + detailed documentation. + + Returns + ------- + np.recarray + Record array consisting of given arrayList columns. + + Examples + -------- + >>> x1=np.array([1,2,3,4]) + >>> x2=np.array(['a','dd','xyz','12']) + >>> x3=np.array([1.1,2,3,4]) + >>> r = np.rec.fromarrays([x1,x2,x3],names='a,b,c') + >>> print(r[1]) + (2, 'dd', 2.0) # may vary + >>> x1[1]=34 + >>> r.a + array([1, 2, 3, 4]) + + >>> x1 = np.array([1, 2, 3, 4]) + >>> x2 = np.array(['a', 'dd', 'xyz', '12']) + >>> x3 = np.array([1.1, 2, 3,4]) + >>> r = np.rec.fromarrays( + ... [x1, x2, x3], + ... dtype=np.dtype([('a', np.int32), ('b', 'S3'), ('c', np.float32)])) + >>> r + rec.array([(1, b'a', 1.1), (2, b'dd', 2. ), (3, b'xyz', 3. ), + (4, b'12', 4. )], + dtype=[('a', ' 0: + shape = shape[:-nn] + + _array = recarray(shape, descr) + + # populate the record array (makes a copy) + for k, obj in enumerate(arrayList): + nn = descr[k].ndim + testshape = obj.shape[:obj.ndim - nn] + name = _names[k] + if testshape != shape: + raise ValueError(f'array-shape mismatch in array {k} ("{name}")') + + _array[name] = obj + + return _array + + +@set_module("numpy.rec") +def fromrecords(recList, dtype=None, shape=None, formats=None, names=None, + titles=None, aligned=False, byteorder=None): + """Create a recarray from a list of records in text form. + + Parameters + ---------- + recList : sequence + data in the same field may be heterogeneous - they will be promoted + to the highest data type. + dtype : data-type, optional + valid dtype for all arrays + shape : int or tuple of ints, optional + shape of each array. + formats, names, titles, aligned, byteorder : + If `dtype` is ``None``, these arguments are passed to + `numpy.format_parser` to construct a dtype. See that function for + detailed documentation. + + If both `formats` and `dtype` are None, then this will auto-detect + formats. Use list of tuples rather than list of lists for faster + processing. + + Returns + ------- + np.recarray + record array consisting of given recList rows. + + Examples + -------- + >>> r=np.rec.fromrecords([(456,'dbe',1.2),(2,'de',1.3)], + ... names='col1,col2,col3') + >>> print(r[0]) + (456, 'dbe', 1.2) + >>> r.col1 + array([456, 2]) + >>> r.col2 + array(['dbe', 'de'], dtype='>> import pickle + >>> pickle.loads(pickle.dumps(r)) + rec.array([(456, 'dbe', 1.2), ( 2, 'de', 1.3)], + dtype=[('col1', ' 1: + raise ValueError("Can only deal with 1-d array.") + _array = recarray(shape, descr) + for k in range(_array.size): + _array[k] = tuple(recList[k]) + # list of lists instead of list of tuples ? + # 2018-02-07, 1.14.1 + warnings.warn( + "fromrecords expected a list of tuples, may have received a list " + "of lists instead. In the future that will raise an error", + FutureWarning, stacklevel=2) + return _array + else: + if shape is not None and retval.shape != shape: + retval.shape = shape + + res = retval.view(recarray) + + return res + + +@set_module("numpy.rec") +def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None, + names=None, titles=None, aligned=False, byteorder=None): + r"""Create a record array from binary data + + Note that despite the name of this function it does not accept `str` + instances. + + Parameters + ---------- + datastring : bytes-like + Buffer of binary data + dtype : data-type, optional + Valid dtype for all arrays + shape : int or tuple of ints, optional + Shape of each array. + offset : int, optional + Position in the buffer to start reading from. + formats, names, titles, aligned, byteorder : + If `dtype` is ``None``, these arguments are passed to + `numpy.format_parser` to construct a dtype. See that function for + detailed documentation. + + + Returns + ------- + np.recarray + Record array view into the data in datastring. This will be readonly + if `datastring` is readonly. + + See Also + -------- + numpy.frombuffer + + Examples + -------- + >>> a = b'\x01\x02\x03abc' + >>> np.rec.fromstring(a, dtype='u1,u1,u1,S3') + rec.array([(1, 2, 3, b'abc')], + dtype=[('f0', 'u1'), ('f1', 'u1'), ('f2', 'u1'), ('f3', 'S3')]) + + >>> grades_dtype = [('Name', (np.str_, 10)), ('Marks', np.float64), + ... ('GradeLevel', np.int32)] + >>> grades_array = np.array([('Sam', 33.3, 3), ('Mike', 44.4, 5), + ... ('Aadi', 66.6, 6)], dtype=grades_dtype) + >>> np.rec.fromstring(grades_array.tobytes(), dtype=grades_dtype) + rec.array([('Sam', 33.3, 3), ('Mike', 44.4, 5), ('Aadi', 66.6, 6)], + dtype=[('Name', '>> s = '\x01\x02\x03abc' + >>> np.rec.fromstring(s, dtype='u1,u1,u1,S3') + Traceback (most recent call last): + ... + TypeError: a bytes-like object is required, not 'str' + """ + + if dtype is None and formats is None: + raise TypeError("fromstring() needs a 'dtype' or 'formats' argument") + + if dtype is not None: + descr = sb.dtype(dtype) + else: + descr = format_parser(formats, names, titles, aligned, byteorder).dtype + + itemsize = descr.itemsize + + # NumPy 1.19.0, 2020-01-01 + shape = _deprecate_shape_0_as_None(shape) + + if shape in (None, -1): + shape = (len(datastring) - offset) // itemsize + + _array = recarray(shape, descr, buf=datastring, offset=offset) + return _array + +def get_remaining_size(fd): + pos = fd.tell() + try: + fd.seek(0, 2) + return fd.tell() - pos + finally: + fd.seek(pos, 0) + + +@set_module("numpy.rec") +def fromfile(fd, dtype=None, shape=None, offset=0, formats=None, + names=None, titles=None, aligned=False, byteorder=None): + """Create an array from binary file data + + Parameters + ---------- + fd : str or file type + If file is a string or a path-like object then that file is opened, + else it is assumed to be a file object. The file object must + support random access (i.e. it must have tell and seek methods). + dtype : data-type, optional + valid dtype for all arrays + shape : int or tuple of ints, optional + shape of each array. + offset : int, optional + Position in the file to start reading from. + formats, names, titles, aligned, byteorder : + If `dtype` is ``None``, these arguments are passed to + `numpy.format_parser` to construct a dtype. See that function for + detailed documentation + + Returns + ------- + np.recarray + record array consisting of data enclosed in file. + + Examples + -------- + >>> from tempfile import TemporaryFile + >>> a = np.empty(10,dtype='f8,i4,a5') + >>> a[5] = (0.5,10,'abcde') + >>> + >>> fd=TemporaryFile() + >>> a = a.view(a.dtype.newbyteorder('<')) + >>> a.tofile(fd) + >>> + >>> _ = fd.seek(0) + >>> r=np.rec.fromfile(fd, formats='f8,i4,a5', shape=10, + ... byteorder='<') + >>> print(r[5]) + (0.5, 10, b'abcde') + >>> r.shape + (10,) + """ + + if dtype is None and formats is None: + raise TypeError("fromfile() needs a 'dtype' or 'formats' argument") + + # NumPy 1.19.0, 2020-01-01 + shape = _deprecate_shape_0_as_None(shape) + + if shape is None: + shape = (-1,) + elif isinstance(shape, int): + shape = (shape,) + + if hasattr(fd, 'readinto'): + # GH issue 2504. fd supports io.RawIOBase or io.BufferedIOBase + # interface. Example of fd: gzip, BytesIO, BufferedReader + # file already opened + ctx = nullcontext(fd) + else: + # open file + ctx = open(os.fspath(fd), 'rb') + + with ctx as fd: + if offset > 0: + fd.seek(offset, 1) + size = get_remaining_size(fd) + + if dtype is not None: + descr = sb.dtype(dtype) + else: + descr = format_parser( + formats, names, titles, aligned, byteorder + ).dtype + + itemsize = descr.itemsize + + shapeprod = sb.array(shape).prod(dtype=nt.intp) + shapesize = shapeprod * itemsize + if shapesize < 0: + shape = list(shape) + shape[shape.index(-1)] = size // -shapesize + shape = tuple(shape) + shapeprod = sb.array(shape).prod(dtype=nt.intp) + + nbytes = shapeprod * itemsize + + if nbytes > size: + raise ValueError( + "Not enough bytes left in file for specified " + "shape and type." + ) + + # create the array + _array = recarray(shape, descr) + nbytesread = fd.readinto(_array.data) + if nbytesread != nbytes: + raise OSError("Didn't read as many bytes as expected") + + return _array + + +@set_module("numpy.rec") +def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None, + names=None, titles=None, aligned=False, byteorder=None, copy=True): + """ + Construct a record array from a wide-variety of objects. + + A general-purpose record array constructor that dispatches to the + appropriate `recarray` creation function based on the inputs (see Notes). + + Parameters + ---------- + obj : any + Input object. See Notes for details on how various input types are + treated. + dtype : data-type, optional + Valid dtype for array. + shape : int or tuple of ints, optional + Shape of each array. + offset : int, optional + Position in the file or buffer to start reading from. + strides : tuple of ints, optional + Buffer (`buf`) is interpreted according to these strides (strides + define how many bytes each array element, row, column, etc. + occupy in memory). + formats, names, titles, aligned, byteorder : + If `dtype` is ``None``, these arguments are passed to + `numpy.format_parser` to construct a dtype. See that function for + detailed documentation. + copy : bool, optional + Whether to copy the input object (True), or to use a reference instead. + This option only applies when the input is an ndarray or recarray. + Defaults to True. + + Returns + ------- + np.recarray + Record array created from the specified object. + + Notes + ----- + If `obj` is ``None``, then call the `~numpy.recarray` constructor. If + `obj` is a string, then call the `fromstring` constructor. If `obj` is a + list or a tuple, then if the first object is an `~numpy.ndarray`, call + `fromarrays`, otherwise call `fromrecords`. If `obj` is a + `~numpy.recarray`, then make a copy of the data in the recarray + (if ``copy=True``) and use the new formats, names, and titles. If `obj` + is a file, then call `fromfile`. Finally, if obj is an `ndarray`, then + return ``obj.view(recarray)``, making a copy of the data if ``copy=True``. + + Examples + -------- + >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> a + array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + + >>> np.rec.array(a) + rec.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]], + dtype=int64) + + >>> b = [(1, 1), (2, 4), (3, 9)] + >>> c = np.rec.array(b, formats = ['i2', 'f2'], names = ('x', 'y')) + >>> c + rec.array([(1, 1.), (2, 4.), (3, 9.)], + dtype=[('x', '>> c.x + array([1, 2, 3], dtype=int16) + + >>> c.y + array([1., 4., 9.], dtype=float16) + + >>> r = np.rec.array(['abc','def'], names=['col1','col2']) + >>> print(r.col1) + abc + + >>> r.col1 + array('abc', dtype='>> r.col2 + array('def', dtype=' object: ... + def tell(self, /) -> int: ... + def readinto(self, buffer: memoryview, /) -> int: ... + +### + +# exported in `numpy.rec` +class record(np.void): + def __getattribute__(self, attr: str) -> Any: ... + def __setattr__(self, attr: str, val: ArrayLike) -> None: ... + def pprint(self) -> str: ... + @overload + def __getitem__(self, key: str | SupportsIndex) -> Any: ... + @overload + def __getitem__(self, key: list[str]) -> record: ... + +# exported in `numpy.rec` +class recarray(np.ndarray[_ShapeT_co, _DTypeT_co]): + __name__: ClassVar[Literal["record"]] = "record" + __module__: Literal["numpy"] = "numpy" + @overload + def __new__( + subtype, + shape: _ShapeLike, + dtype: None = None, + buf: _SupportsBuffer | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, + *, + formats: DTypeLike, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + byteorder: _ByteOrder | None = None, + aligned: bool = False, + order: _OrderKACF = "C", + ) -> _RecArray[record]: ... + @overload + def __new__( + subtype, + shape: _ShapeLike, + dtype: DTypeLike, + buf: _SupportsBuffer | None = None, + offset: SupportsIndex = 0, + strides: _ShapeLike | None = None, + formats: None = None, + names: None = None, + titles: None = None, + byteorder: None = None, + aligned: Literal[False] = False, + order: _OrderKACF = "C", + ) -> _RecArray[Any]: ... + def __array_finalize__(self, /, obj: object) -> None: ... + def __getattribute__(self, attr: str, /) -> Any: ... + def __setattr__(self, attr: str, val: ArrayLike, /) -> None: ... + + # + @overload + def field(self, /, attr: int | str, val: ArrayLike) -> None: ... + @overload + def field(self, /, attr: int | str, val: None = None) -> Any: ... + +# exported in `numpy.rec` +class format_parser: + dtype: np.dtype[np.void] + def __init__( + self, + /, + formats: DTypeLike, + names: str | Sequence[str] | None, + titles: str | Sequence[str] | None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + ) -> None: ... + +# exported in `numpy.rec` +@overload +def fromarrays( + arrayList: Iterable[ArrayLike], + dtype: DTypeLike | None = None, + shape: _ShapeLike | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, +) -> _RecArray[Any]: ... +@overload +def fromarrays( + arrayList: Iterable[ArrayLike], + dtype: None = None, + shape: _ShapeLike | None = None, + *, + formats: DTypeLike, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, +) -> _RecArray[record]: ... + +@overload +def fromrecords( + recList: _ArrayLikeVoid_co | tuple[object, ...] | _NestedSequence[tuple[object, ...]], + dtype: DTypeLike | None = None, + shape: _ShapeLike | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, +) -> _RecArray[record]: ... +@overload +def fromrecords( + recList: _ArrayLikeVoid_co | tuple[object, ...] | _NestedSequence[tuple[object, ...]], + dtype: None = None, + shape: _ShapeLike | None = None, + *, + formats: DTypeLike, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, +) -> _RecArray[record]: ... + +# exported in `numpy.rec` +@overload +def fromstring( + datastring: _SupportsBuffer, + dtype: DTypeLike, + shape: _ShapeLike | None = None, + offset: int = 0, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, +) -> _RecArray[record]: ... +@overload +def fromstring( + datastring: _SupportsBuffer, + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, + *, + formats: DTypeLike, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, +) -> _RecArray[record]: ... + +# exported in `numpy.rec` +@overload +def fromfile( + fd: StrOrBytesPath | _SupportsReadInto, + dtype: DTypeLike, + shape: _ShapeLike | None = None, + offset: int = 0, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, +) -> _RecArray[Any]: ... +@overload +def fromfile( + fd: StrOrBytesPath | _SupportsReadInto, + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, + *, + formats: DTypeLike, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, +) -> _RecArray[record]: ... + +# exported in `numpy.rec` +@overload +def array( + obj: _ScalarT | NDArray[_ScalarT], + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, + strides: tuple[int, ...] | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, + copy: bool = True, +) -> _RecArray[_ScalarT]: ... +@overload +def array( + obj: ArrayLike, + dtype: DTypeLike, + shape: _ShapeLike | None = None, + offset: int = 0, + strides: tuple[int, ...] | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, + copy: bool = True, +) -> _RecArray[Any]: ... +@overload +def array( + obj: ArrayLike, + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, + strides: tuple[int, ...] | None = None, + *, + formats: DTypeLike, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + copy: bool = True, +) -> _RecArray[record]: ... +@overload +def array( + obj: None, + dtype: DTypeLike, + shape: _ShapeLike, + offset: int = 0, + strides: tuple[int, ...] | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, + copy: bool = True, +) -> _RecArray[Any]: ... +@overload +def array( + obj: None, + dtype: None = None, + *, + shape: _ShapeLike, + offset: int = 0, + strides: tuple[int, ...] | None = None, + formats: DTypeLike, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + copy: bool = True, +) -> _RecArray[record]: ... +@overload +def array( + obj: _SupportsReadInto, + dtype: DTypeLike, + shape: _ShapeLike | None = None, + offset: int = 0, + strides: tuple[int, ...] | None = None, + formats: None = None, + names: None = None, + titles: None = None, + aligned: bool = False, + byteorder: None = None, + copy: bool = True, +) -> _RecArray[Any]: ... +@overload +def array( + obj: _SupportsReadInto, + dtype: None = None, + shape: _ShapeLike | None = None, + offset: int = 0, + strides: tuple[int, ...] | None = None, + *, + formats: DTypeLike, + names: str | Sequence[str] | None = None, + titles: str | Sequence[str] | None = None, + aligned: bool = False, + byteorder: _ByteOrder | None = None, + copy: bool = True, +) -> _RecArray[record]: ... + +# exported in `numpy.rec` +def find_duplicate(list: Iterable[_T]) -> list[_T]: ... diff --git a/python/numpy/_core/shape_base.py b/python/numpy/_core/shape_base.py new file mode 100644 index 000000000..c2a0f0dae --- /dev/null +++ b/python/numpy/_core/shape_base.py @@ -0,0 +1,998 @@ +__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack', + 'stack', 'unstack', 'vstack'] + +import functools +import itertools +import operator + +from . import fromnumeric as _from_nx +from . import numeric as _nx +from . import overrides +from .multiarray import array, asanyarray, normalize_axis_index + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +def _atleast_1d_dispatcher(*arys): + return arys + + +@array_function_dispatch(_atleast_1d_dispatcher) +def atleast_1d(*arys): + """ + Convert inputs to arrays with at least one dimension. + + Scalar inputs are converted to 1-dimensional arrays, whilst + higher-dimensional inputs are preserved. + + Parameters + ---------- + arys1, arys2, ... : array_like + One or more input arrays. + + Returns + ------- + ret : ndarray + An array, or tuple of arrays, each with ``a.ndim >= 1``. + Copies are made only if necessary. + + See Also + -------- + atleast_2d, atleast_3d + + Examples + -------- + >>> import numpy as np + >>> np.atleast_1d(1.0) + array([1.]) + + >>> x = np.arange(9.0).reshape(3,3) + >>> np.atleast_1d(x) + array([[0., 1., 2.], + [3., 4., 5.], + [6., 7., 8.]]) + >>> np.atleast_1d(x) is x + True + + >>> np.atleast_1d(1, [3, 4]) + (array([1]), array([3, 4])) + + """ + if len(arys) == 1: + result = asanyarray(arys[0]) + if result.ndim == 0: + result = result.reshape(1) + return result + res = [] + for ary in arys: + result = asanyarray(ary) + if result.ndim == 0: + result = result.reshape(1) + res.append(result) + return tuple(res) + + +def _atleast_2d_dispatcher(*arys): + return arys + + +@array_function_dispatch(_atleast_2d_dispatcher) +def atleast_2d(*arys): + """ + View inputs as arrays with at least two dimensions. + + Parameters + ---------- + arys1, arys2, ... : array_like + One or more array-like sequences. Non-array inputs are converted + to arrays. Arrays that already have two or more dimensions are + preserved. + + Returns + ------- + res, res2, ... : ndarray + An array, or tuple of arrays, each with ``a.ndim >= 2``. + Copies are avoided where possible, and views with two or more + dimensions are returned. + + See Also + -------- + atleast_1d, atleast_3d + + Examples + -------- + >>> import numpy as np + >>> np.atleast_2d(3.0) + array([[3.]]) + + >>> x = np.arange(3.0) + >>> np.atleast_2d(x) + array([[0., 1., 2.]]) + >>> np.atleast_2d(x).base is x + True + + >>> np.atleast_2d(1, [1, 2], [[1, 2]]) + (array([[1]]), array([[1, 2]]), array([[1, 2]])) + + """ + res = [] + for ary in arys: + ary = asanyarray(ary) + if ary.ndim == 0: + result = ary.reshape(1, 1) + elif ary.ndim == 1: + result = ary[_nx.newaxis, :] + else: + result = ary + res.append(result) + if len(res) == 1: + return res[0] + else: + return tuple(res) + + +def _atleast_3d_dispatcher(*arys): + return arys + + +@array_function_dispatch(_atleast_3d_dispatcher) +def atleast_3d(*arys): + """ + View inputs as arrays with at least three dimensions. + + Parameters + ---------- + arys1, arys2, ... : array_like + One or more array-like sequences. Non-array inputs are converted to + arrays. Arrays that already have three or more dimensions are + preserved. + + Returns + ------- + res1, res2, ... : ndarray + An array, or tuple of arrays, each with ``a.ndim >= 3``. Copies are + avoided where possible, and views with three or more dimensions are + returned. For example, a 1-D array of shape ``(N,)`` becomes a view + of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a + view of shape ``(M, N, 1)``. + + See Also + -------- + atleast_1d, atleast_2d + + Examples + -------- + >>> import numpy as np + >>> np.atleast_3d(3.0) + array([[[3.]]]) + + >>> x = np.arange(3.0) + >>> np.atleast_3d(x).shape + (1, 3, 1) + + >>> x = np.arange(12.0).reshape(4,3) + >>> np.atleast_3d(x).shape + (4, 3, 1) + >>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself + True + + >>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]): + ... print(arr, arr.shape) # doctest: +SKIP + ... + [[[1] + [2]]] (1, 2, 1) + [[[1] + [2]]] (1, 2, 1) + [[[1 2]]] (1, 1, 2) + + """ + res = [] + for ary in arys: + ary = asanyarray(ary) + if ary.ndim == 0: + result = ary.reshape(1, 1, 1) + elif ary.ndim == 1: + result = ary[_nx.newaxis, :, _nx.newaxis] + elif ary.ndim == 2: + result = ary[:, :, _nx.newaxis] + else: + result = ary + res.append(result) + if len(res) == 1: + return res[0] + else: + return tuple(res) + + +def _arrays_for_stack_dispatcher(arrays): + if not hasattr(arrays, "__getitem__"): + raise TypeError('arrays to stack must be passed as a "sequence" type ' + 'such as list or tuple.') + + return tuple(arrays) + + +def _vhstack_dispatcher(tup, *, dtype=None, casting=None): + return _arrays_for_stack_dispatcher(tup) + + +@array_function_dispatch(_vhstack_dispatcher) +def vstack(tup, *, dtype=None, casting="same_kind"): + """ + Stack arrays in sequence vertically (row wise). + + This is equivalent to concatenation along the first axis after 1-D arrays + of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by + `vsplit`. + + This function makes most sense for arrays with up to 3 dimensions. For + instance, for pixel-data with a height (first axis), width (second axis), + and r/g/b channels (third axis). The functions `concatenate`, `stack` and + `block` provide more general stacking and concatenation operations. + + Parameters + ---------- + tup : sequence of ndarrays + The arrays must have the same shape along all but the first axis. + 1-D arrays must have the same length. In the case of a single + array_like input, it will be treated as a sequence of arrays; i.e., + each element along the zeroth axis is treated as a separate array. + + dtype : str or dtype + If provided, the destination array will have this dtype. Cannot be + provided together with `out`. + + .. versionadded:: 1.24 + + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Defaults to 'same_kind'. + + .. versionadded:: 1.24 + + Returns + ------- + stacked : ndarray + The array formed by stacking the given arrays, will be at least 2-D. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + block : Assemble an nd-array from nested lists of blocks. + hstack : Stack arrays in sequence horizontally (column wise). + dstack : Stack arrays in sequence depth wise (along third axis). + column_stack : Stack 1-D arrays as columns into a 2-D array. + vsplit : Split an array into multiple sub-arrays vertically (row-wise). + unstack : Split an array into a tuple of sub-arrays along an axis. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1, 2, 3]) + >>> b = np.array([4, 5, 6]) + >>> np.vstack((a,b)) + array([[1, 2, 3], + [4, 5, 6]]) + + >>> a = np.array([[1], [2], [3]]) + >>> b = np.array([[4], [5], [6]]) + >>> np.vstack((a,b)) + array([[1], + [2], + [3], + [4], + [5], + [6]]) + + """ + arrs = atleast_2d(*tup) + if not isinstance(arrs, tuple): + arrs = (arrs,) + return _nx.concatenate(arrs, 0, dtype=dtype, casting=casting) + + +@array_function_dispatch(_vhstack_dispatcher) +def hstack(tup, *, dtype=None, casting="same_kind"): + """ + Stack arrays in sequence horizontally (column wise). + + This is equivalent to concatenation along the second axis, except for 1-D + arrays where it concatenates along the first axis. Rebuilds arrays divided + by `hsplit`. + + This function makes most sense for arrays with up to 3 dimensions. For + instance, for pixel-data with a height (first axis), width (second axis), + and r/g/b channels (third axis). The functions `concatenate`, `stack` and + `block` provide more general stacking and concatenation operations. + + Parameters + ---------- + tup : sequence of ndarrays + The arrays must have the same shape along all but the second axis, + except 1-D arrays which can be any length. In the case of a single + array_like input, it will be treated as a sequence of arrays; i.e., + each element along the zeroth axis is treated as a separate array. + + dtype : str or dtype + If provided, the destination array will have this dtype. Cannot be + provided together with `out`. + + .. versionadded:: 1.24 + + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Defaults to 'same_kind'. + + .. versionadded:: 1.24 + + Returns + ------- + stacked : ndarray + The array formed by stacking the given arrays. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + block : Assemble an nd-array from nested lists of blocks. + vstack : Stack arrays in sequence vertically (row wise). + dstack : Stack arrays in sequence depth wise (along third axis). + column_stack : Stack 1-D arrays as columns into a 2-D array. + hsplit : Split an array into multiple sub-arrays + horizontally (column-wise). + unstack : Split an array into a tuple of sub-arrays along an axis. + + Examples + -------- + >>> import numpy as np + >>> a = np.array((1,2,3)) + >>> b = np.array((4,5,6)) + >>> np.hstack((a,b)) + array([1, 2, 3, 4, 5, 6]) + >>> a = np.array([[1],[2],[3]]) + >>> b = np.array([[4],[5],[6]]) + >>> np.hstack((a,b)) + array([[1, 4], + [2, 5], + [3, 6]]) + + """ + arrs = atleast_1d(*tup) + if not isinstance(arrs, tuple): + arrs = (arrs,) + # As a special case, dimension 0 of 1-dimensional arrays is "horizontal" + if arrs and arrs[0].ndim == 1: + return _nx.concatenate(arrs, 0, dtype=dtype, casting=casting) + else: + return _nx.concatenate(arrs, 1, dtype=dtype, casting=casting) + + +def _stack_dispatcher(arrays, axis=None, out=None, *, + dtype=None, casting=None): + arrays = _arrays_for_stack_dispatcher(arrays) + if out is not None: + # optimize for the typical case where only arrays is provided + arrays = list(arrays) + arrays.append(out) + return arrays + + +@array_function_dispatch(_stack_dispatcher) +def stack(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"): + """ + Join a sequence of arrays along a new axis. + + The ``axis`` parameter specifies the index of the new axis in the + dimensions of the result. For example, if ``axis=0`` it will be the first + dimension and if ``axis=-1`` it will be the last dimension. + + Parameters + ---------- + arrays : sequence of ndarrays + Each array must have the same shape. In the case of a single ndarray + array_like input, it will be treated as a sequence of arrays; i.e., + each element along the zeroth axis is treated as a separate array. + + axis : int, optional + The axis in the result array along which the input arrays are stacked. + + out : ndarray, optional + If provided, the destination to place the result. The shape must be + correct, matching that of what stack would have returned if no + out argument were specified. + + dtype : str or dtype + If provided, the destination array will have this dtype. Cannot be + provided together with `out`. + + .. versionadded:: 1.24 + + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. Defaults to 'same_kind'. + + .. versionadded:: 1.24 + + + Returns + ------- + stacked : ndarray + The stacked array has one more dimension than the input arrays. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + block : Assemble an nd-array from nested lists of blocks. + split : Split array into a list of multiple sub-arrays of equal size. + unstack : Split an array into a tuple of sub-arrays along an axis. + + Examples + -------- + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> arrays = [rng.normal(size=(3,4)) for _ in range(10)] + >>> np.stack(arrays, axis=0).shape + (10, 3, 4) + + >>> np.stack(arrays, axis=1).shape + (3, 10, 4) + + >>> np.stack(arrays, axis=2).shape + (3, 4, 10) + + >>> a = np.array([1, 2, 3]) + >>> b = np.array([4, 5, 6]) + >>> np.stack((a, b)) + array([[1, 2, 3], + [4, 5, 6]]) + + >>> np.stack((a, b), axis=-1) + array([[1, 4], + [2, 5], + [3, 6]]) + + """ + arrays = [asanyarray(arr) for arr in arrays] + if not arrays: + raise ValueError('need at least one array to stack') + + shapes = {arr.shape for arr in arrays} + if len(shapes) != 1: + raise ValueError('all input arrays must have the same shape') + + result_ndim = arrays[0].ndim + 1 + axis = normalize_axis_index(axis, result_ndim) + + sl = (slice(None),) * axis + (_nx.newaxis,) + expanded_arrays = [arr[sl] for arr in arrays] + return _nx.concatenate(expanded_arrays, axis=axis, out=out, + dtype=dtype, casting=casting) + +def _unstack_dispatcher(x, /, *, axis=None): + return (x,) + +@array_function_dispatch(_unstack_dispatcher) +def unstack(x, /, *, axis=0): + """ + Split an array into a sequence of arrays along the given axis. + + The ``axis`` parameter specifies the dimension along which the array will + be split. For example, if ``axis=0`` (the default) it will be the first + dimension and if ``axis=-1`` it will be the last dimension. + + The result is a tuple of arrays split along ``axis``. + + .. versionadded:: 2.1.0 + + Parameters + ---------- + x : ndarray + The array to be unstacked. + axis : int, optional + Axis along which the array will be split. Default: ``0``. + + Returns + ------- + unstacked : tuple of ndarrays + The unstacked arrays. + + See Also + -------- + stack : Join a sequence of arrays along a new axis. + concatenate : Join a sequence of arrays along an existing axis. + block : Assemble an nd-array from nested lists of blocks. + split : Split array into a list of multiple sub-arrays of equal size. + + Notes + ----- + ``unstack`` serves as the reverse operation of :py:func:`stack`, i.e., + ``stack(unstack(x, axis=axis), axis=axis) == x``. + + This function is equivalent to ``tuple(np.moveaxis(x, axis, 0))``, since + iterating on an array iterates along the first axis. + + Examples + -------- + >>> arr = np.arange(24).reshape((2, 3, 4)) + >>> np.unstack(arr) + (array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]), + array([[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]])) + >>> np.unstack(arr, axis=1) + (array([[ 0, 1, 2, 3], + [12, 13, 14, 15]]), + array([[ 4, 5, 6, 7], + [16, 17, 18, 19]]), + array([[ 8, 9, 10, 11], + [20, 21, 22, 23]])) + >>> arr2 = np.stack(np.unstack(arr, axis=1), axis=1) + >>> arr2.shape + (2, 3, 4) + >>> np.all(arr == arr2) + np.True_ + + """ + if x.ndim == 0: + raise ValueError("Input array must be at least 1-d.") + return tuple(_nx.moveaxis(x, axis, 0)) + + +# Internal functions to eliminate the overhead of repeated dispatch in one of +# the two possible paths inside np.block. +# Use getattr to protect against __array_function__ being disabled. +_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size) +_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim) +_concatenate = getattr(_from_nx.concatenate, + '__wrapped__', _from_nx.concatenate) + + +def _block_format_index(index): + """ + Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``. + """ + idx_str = ''.join(f'[{i}]' for i in index if i is not None) + return 'arrays' + idx_str + + +def _block_check_depths_match(arrays, parent_index=[]): + """ + Recursive function checking that the depths of nested lists in `arrays` + all match. Mismatch raises a ValueError as described in the block + docstring below. + + The entire index (rather than just the depth) needs to be calculated + for each innermost list, in case an error needs to be raised, so that + the index of the offending list can be printed as part of the error. + + Parameters + ---------- + arrays : nested list of arrays + The arrays to check + parent_index : list of int + The full index of `arrays` within the nested lists passed to + `_block_check_depths_match` at the top of the recursion. + + Returns + ------- + first_index : list of int + The full index of an element from the bottom of the nesting in + `arrays`. If any element at the bottom is an empty list, this will + refer to it, and the last index along the empty axis will be None. + max_arr_ndim : int + The maximum of the ndims of the arrays nested in `arrays`. + final_size: int + The number of elements in the final array. This is used the motivate + the choice of algorithm used using benchmarking wisdom. + + """ + if isinstance(arrays, tuple): + # not strictly necessary, but saves us from: + # - more than one way to do things - no point treating tuples like + # lists + # - horribly confusing behaviour that results when tuples are + # treated like ndarray + raise TypeError( + f'{_block_format_index(parent_index)} is a tuple. ' + 'Only lists can be used to arrange blocks, and np.block does ' + 'not allow implicit conversion from tuple to ndarray.' + ) + elif isinstance(arrays, list) and len(arrays) > 0: + idxs_ndims = (_block_check_depths_match(arr, parent_index + [i]) + for i, arr in enumerate(arrays)) + + first_index, max_arr_ndim, final_size = next(idxs_ndims) + for index, ndim, size in idxs_ndims: + final_size += size + if ndim > max_arr_ndim: + max_arr_ndim = ndim + if len(index) != len(first_index): + raise ValueError( + "List depths are mismatched. First element was at " + f"depth {len(first_index)}, but there is an element at " + f"depth {len(index)} ({_block_format_index(index)})" + ) + # propagate our flag that indicates an empty list at the bottom + if index[-1] is None: + first_index = index + + return first_index, max_arr_ndim, final_size + elif isinstance(arrays, list) and len(arrays) == 0: + # We've 'bottomed out' on an empty list + return parent_index + [None], 0, 0 + else: + # We've 'bottomed out' - arrays is either a scalar or an array + size = _size(arrays) + return parent_index, _ndim(arrays), size + + +def _atleast_nd(a, ndim): + # Ensures `a` has at least `ndim` dimensions by prepending + # ones to `a.shape` as necessary + return array(a, ndmin=ndim, copy=None, subok=True) + + +def _accumulate(values): + return list(itertools.accumulate(values)) + + +def _concatenate_shapes(shapes, axis): + """Given array shapes, return the resulting shape and slices prefixes. + + These help in nested concatenation. + + Returns + ------- + shape: tuple of int + This tuple satisfies:: + + shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis) + shape == concatenate(arrs, axis).shape + + slice_prefixes: tuple of (slice(start, end), ) + For a list of arrays being concatenated, this returns the slice + in the larger array at axis that needs to be sliced into. + + For example, the following holds:: + + ret = concatenate([a, b, c], axis) + _, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis) + + ret[(slice(None),) * axis + sl_a] == a + ret[(slice(None),) * axis + sl_b] == b + ret[(slice(None),) * axis + sl_c] == c + + These are called slice prefixes since they are used in the recursive + blocking algorithm to compute the left-most slices during the + recursion. Therefore, they must be prepended to rest of the slice + that was computed deeper in the recursion. + + These are returned as tuples to ensure that they can quickly be added + to existing slice tuple without creating a new tuple every time. + + """ + # Cache a result that will be reused. + shape_at_axis = [shape[axis] for shape in shapes] + + # Take a shape, any shape + first_shape = shapes[0] + first_shape_pre = first_shape[:axis] + first_shape_post = first_shape[axis + 1:] + + if any(shape[:axis] != first_shape_pre or + shape[axis + 1:] != first_shape_post for shape in shapes): + raise ValueError( + f'Mismatched array shapes in block along axis {axis}.') + + shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis + 1:]) + + offsets_at_axis = _accumulate(shape_at_axis) + slice_prefixes = [(slice(start, end),) + for start, end in zip([0] + offsets_at_axis, + offsets_at_axis)] + return shape, slice_prefixes + + +def _block_info_recursion(arrays, max_depth, result_ndim, depth=0): + """ + Returns the shape of the final array, along with a list + of slices and a list of arrays that can be used for assignment inside the + new array + + Parameters + ---------- + arrays : nested list of arrays + The arrays to check + max_depth : list of int + The number of nested lists + result_ndim : int + The number of dimensions in thefinal array. + + Returns + ------- + shape : tuple of int + The shape that the final array will take on. + slices: list of tuple of slices + The slices into the full array required for assignment. These are + required to be prepended with ``(Ellipsis, )`` to obtain to correct + final index. + arrays: list of ndarray + The data to assign to each slice of the full array + + """ + if depth < max_depth: + shapes, slices, arrays = zip( + *[_block_info_recursion(arr, max_depth, result_ndim, depth + 1) + for arr in arrays]) + + axis = result_ndim - max_depth + depth + shape, slice_prefixes = _concatenate_shapes(shapes, axis) + + # Prepend the slice prefix and flatten the slices + slices = [slice_prefix + the_slice + for slice_prefix, inner_slices in zip(slice_prefixes, slices) + for the_slice in inner_slices] + + # Flatten the array list + arrays = functools.reduce(operator.add, arrays) + + return shape, slices, arrays + else: + # We've 'bottomed out' - arrays is either a scalar or an array + # type(arrays) is not list + # Return the slice and the array inside a list to be consistent with + # the recursive case. + arr = _atleast_nd(arrays, result_ndim) + return arr.shape, [()], [arr] + + +def _block(arrays, max_depth, result_ndim, depth=0): + """ + Internal implementation of block based on repeated concatenation. + `arrays` is the argument passed to + block. `max_depth` is the depth of nested lists within `arrays` and + `result_ndim` is the greatest of the dimensions of the arrays in + `arrays` and the depth of the lists in `arrays` (see block docstring + for details). + """ + if depth < max_depth: + arrs = [_block(arr, max_depth, result_ndim, depth + 1) + for arr in arrays] + return _concatenate(arrs, axis=-(max_depth - depth)) + else: + # We've 'bottomed out' - arrays is either a scalar or an array + # type(arrays) is not list + return _atleast_nd(arrays, result_ndim) + + +def _block_dispatcher(arrays): + # Use type(...) is list to match the behavior of np.block(), which special + # cases list specifically rather than allowing for generic iterables or + # tuple. Also, we know that list.__array_function__ will never exist. + if isinstance(arrays, list): + for subarrays in arrays: + yield from _block_dispatcher(subarrays) + else: + yield arrays + + +@array_function_dispatch(_block_dispatcher) +def block(arrays): + """ + Assemble an nd-array from nested lists of blocks. + + Blocks in the innermost lists are concatenated (see `concatenate`) along + the last dimension (-1), then these are concatenated along the + second-last dimension (-2), and so on until the outermost list is reached. + + Blocks can be of any dimension, but will not be broadcasted using + the normal rules. Instead, leading axes of size 1 are inserted, + to make ``block.ndim`` the same for all blocks. This is primarily useful + for working with scalars, and means that code like ``np.block([v, 1])`` + is valid, where ``v.ndim == 1``. + + When the nested list is two levels deep, this allows block matrices to be + constructed from their components. + + Parameters + ---------- + arrays : nested list of array_like or scalars (but not tuples) + If passed a single ndarray or scalar (a nested list of depth 0), this + is returned unmodified (and not copied). + + Elements shapes must match along the appropriate axes (without + broadcasting), but leading 1s will be prepended to the shape as + necessary to make the dimensions match. + + Returns + ------- + block_array : ndarray + The array assembled from the given blocks. + + The dimensionality of the output is equal to the greatest of: + + * the dimensionality of all the inputs + * the depth to which the input list is nested + + Raises + ------ + ValueError + * If list depths are mismatched - for instance, ``[[a, b], c]`` is + illegal, and should be spelt ``[[a, b], [c]]`` + * If lists are empty - for instance, ``[[a, b], []]`` + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + vstack : Stack arrays in sequence vertically (row wise). + hstack : Stack arrays in sequence horizontally (column wise). + dstack : Stack arrays in sequence depth wise (along third axis). + column_stack : Stack 1-D arrays as columns into a 2-D array. + vsplit : Split an array into multiple sub-arrays vertically (row-wise). + unstack : Split an array into a tuple of sub-arrays along an axis. + + Notes + ----- + When called with only scalars, ``np.block`` is equivalent to an ndarray + call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to + ``np.array([[1, 2], [3, 4]])``. + + This function does not enforce that the blocks lie on a fixed grid. + ``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form:: + + AAAbb + AAAbb + cccDD + + But is also allowed to produce, for some ``a, b, c, d``:: + + AAAbb + AAAbb + cDDDD + + Since concatenation happens along the last axis first, `block` is *not* + capable of producing the following directly:: + + AAAbb + cccbb + cccDD + + Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is + equivalent to ``np.block([[A, B, ...], [p, q, ...]])``. + + Examples + -------- + The most common use of this function is to build a block matrix: + + >>> import numpy as np + >>> A = np.eye(2) * 2 + >>> B = np.eye(3) * 3 + >>> np.block([ + ... [A, np.zeros((2, 3))], + ... [np.ones((3, 2)), B ] + ... ]) + array([[2., 0., 0., 0., 0.], + [0., 2., 0., 0., 0.], + [1., 1., 3., 0., 0.], + [1., 1., 0., 3., 0.], + [1., 1., 0., 0., 3.]]) + + With a list of depth 1, `block` can be used as `hstack`: + + >>> np.block([1, 2, 3]) # hstack([1, 2, 3]) + array([1, 2, 3]) + + >>> a = np.array([1, 2, 3]) + >>> b = np.array([4, 5, 6]) + >>> np.block([a, b, 10]) # hstack([a, b, 10]) + array([ 1, 2, 3, 4, 5, 6, 10]) + + >>> A = np.ones((2, 2), int) + >>> B = 2 * A + >>> np.block([A, B]) # hstack([A, B]) + array([[1, 1, 2, 2], + [1, 1, 2, 2]]) + + With a list of depth 2, `block` can be used in place of `vstack`: + + >>> a = np.array([1, 2, 3]) + >>> b = np.array([4, 5, 6]) + >>> np.block([[a], [b]]) # vstack([a, b]) + array([[1, 2, 3], + [4, 5, 6]]) + + >>> A = np.ones((2, 2), int) + >>> B = 2 * A + >>> np.block([[A], [B]]) # vstack([A, B]) + array([[1, 1], + [1, 1], + [2, 2], + [2, 2]]) + + It can also be used in place of `atleast_1d` and `atleast_2d`: + + >>> a = np.array(0) + >>> b = np.array([1]) + >>> np.block([a]) # atleast_1d(a) + array([0]) + >>> np.block([b]) # atleast_1d(b) + array([1]) + + >>> np.block([[a]]) # atleast_2d(a) + array([[0]]) + >>> np.block([[b]]) # atleast_2d(b) + array([[1]]) + + + """ + arrays, list_ndim, result_ndim, final_size = _block_setup(arrays) + + # It was found through benchmarking that making an array of final size + # around 256x256 was faster by straight concatenation on a + # i7-7700HQ processor and dual channel ram 2400MHz. + # It didn't seem to matter heavily on the dtype used. + # + # A 2D array using repeated concatenation requires 2 copies of the array. + # + # The fastest algorithm will depend on the ratio of CPU power to memory + # speed. + # One can monitor the results of the benchmark + # https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d + # to tune this parameter until a C version of the `_block_info_recursion` + # algorithm is implemented which would likely be faster than the python + # version. + if list_ndim * final_size > (2 * 512 * 512): + return _block_slicing(arrays, list_ndim, result_ndim) + else: + return _block_concatenate(arrays, list_ndim, result_ndim) + + +# These helper functions are mostly used for testing. +# They allow us to write tests that directly call `_block_slicing` +# or `_block_concatenate` without blocking large arrays to force the wisdom +# to trigger the desired path. +def _block_setup(arrays): + """ + Returns + (`arrays`, list_ndim, result_ndim, final_size) + """ + bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays) + list_ndim = len(bottom_index) + if bottom_index and bottom_index[-1] is None: + raise ValueError( + f'List at {_block_format_index(bottom_index)} cannot be empty' + ) + result_ndim = max(arr_ndim, list_ndim) + return arrays, list_ndim, result_ndim, final_size + + +def _block_slicing(arrays, list_ndim, result_ndim): + shape, slices, arrays = _block_info_recursion( + arrays, list_ndim, result_ndim) + dtype = _nx.result_type(*[arr.dtype for arr in arrays]) + + # Test preferring F only in the case that all input arrays are F + F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays) + C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays) + order = 'F' if F_order and not C_order else 'C' + result = _nx.empty(shape=shape, dtype=dtype, order=order) + # Note: In a c implementation, the function + # PyArray_CreateMultiSortedStridePerm could be used for more advanced + # guessing of the desired order. + + for the_slice, arr in zip(slices, arrays): + result[(Ellipsis,) + the_slice] = arr + return result + + +def _block_concatenate(arrays, list_ndim, result_ndim): + result = _block(arrays, list_ndim, result_ndim) + if list_ndim == 0: + # Catch an edge case where _block returns a view because + # `arrays` is a single numpy array and not a list of numpy arrays. + # This might copy scalars or lists twice, but this isn't a likely + # usecase for those interested in performance + result = result.copy() + return result diff --git a/python/numpy/_core/shape_base.pyi b/python/numpy/_core/shape_base.pyi new file mode 100644 index 000000000..c2c9c961e --- /dev/null +++ b/python/numpy/_core/shape_base.pyi @@ -0,0 +1,175 @@ +from collections.abc import Sequence +from typing import Any, SupportsIndex, TypeVar, overload + +from numpy import _CastingKind, generic +from numpy._typing import ArrayLike, DTypeLike, NDArray, _ArrayLike, _DTypeLike + +__all__ = [ + "atleast_1d", + "atleast_2d", + "atleast_3d", + "block", + "hstack", + "stack", + "unstack", + "vstack", +] + +_ScalarT = TypeVar("_ScalarT", bound=generic) +_ScalarT1 = TypeVar("_ScalarT1", bound=generic) +_ScalarT2 = TypeVar("_ScalarT2", bound=generic) +_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) + +### + +@overload +def atleast_1d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... +@overload +def atleast_1d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... +@overload +def atleast_1d(a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT]) -> tuple[NDArray[_ScalarT], ...]: ... +@overload +def atleast_1d(a0: ArrayLike, /) -> NDArray[Any]: ... +@overload +def atleast_1d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[Any]]: ... +@overload +def atleast_1d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... + +# +@overload +def atleast_2d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... +@overload +def atleast_2d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... +@overload +def atleast_2d(a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT]) -> tuple[NDArray[_ScalarT], ...]: ... +@overload +def atleast_2d(a0: ArrayLike, /) -> NDArray[Any]: ... +@overload +def atleast_2d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[Any]]: ... +@overload +def atleast_2d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... + +# +@overload +def atleast_3d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ... +@overload +def atleast_3d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... +@overload +def atleast_3d(a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT]) -> tuple[NDArray[_ScalarT], ...]: ... +@overload +def atleast_3d(a0: ArrayLike, /) -> NDArray[Any]: ... +@overload +def atleast_3d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[Any]]: ... +@overload +def atleast_3d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ... + +# +@overload +def vstack( + tup: Sequence[_ArrayLike[_ScalarT]], + *, + dtype: None = ..., + casting: _CastingKind = ... +) -> NDArray[_ScalarT]: ... +@overload +def vstack( + tup: Sequence[ArrayLike], + *, + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind = ... +) -> NDArray[_ScalarT]: ... +@overload +def vstack( + tup: Sequence[ArrayLike], + *, + dtype: DTypeLike = ..., + casting: _CastingKind = ... +) -> NDArray[Any]: ... + +@overload +def hstack( + tup: Sequence[_ArrayLike[_ScalarT]], + *, + dtype: None = ..., + casting: _CastingKind = ... +) -> NDArray[_ScalarT]: ... +@overload +def hstack( + tup: Sequence[ArrayLike], + *, + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind = ... +) -> NDArray[_ScalarT]: ... +@overload +def hstack( + tup: Sequence[ArrayLike], + *, + dtype: DTypeLike = ..., + casting: _CastingKind = ... +) -> NDArray[Any]: ... + +@overload +def stack( + arrays: Sequence[_ArrayLike[_ScalarT]], + axis: SupportsIndex = ..., + out: None = ..., + *, + dtype: None = ..., + casting: _CastingKind = ... +) -> NDArray[_ScalarT]: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = ..., + out: None = ..., + *, + dtype: _DTypeLike[_ScalarT], + casting: _CastingKind = ... +) -> NDArray[_ScalarT]: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = ..., + out: None = ..., + *, + dtype: DTypeLike = ..., + casting: _CastingKind = ... +) -> NDArray[Any]: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex, + out: _ArrayT, + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> _ArrayT: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = 0, + *, + out: _ArrayT, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> _ArrayT: ... + +@overload +def unstack( + array: _ArrayLike[_ScalarT], + /, + *, + axis: int = ..., +) -> tuple[NDArray[_ScalarT], ...]: ... +@overload +def unstack( + array: ArrayLike, + /, + *, + axis: int = ..., +) -> tuple[NDArray[Any], ...]: ... + +@overload +def block(arrays: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +@overload +def block(arrays: ArrayLike) -> NDArray[Any]: ... diff --git a/python/numpy/_core/strings.py b/python/numpy/_core/strings.py new file mode 100644 index 000000000..0fd5b0359 --- /dev/null +++ b/python/numpy/_core/strings.py @@ -0,0 +1,1829 @@ +""" +This module contains a set of functions for vectorized string +operations. +""" + +import functools +import sys + +import numpy as np +from numpy import ( + add, + equal, + greater, + greater_equal, + less, + less_equal, + not_equal, +) +from numpy import ( + multiply as _multiply_ufunc, +) +from numpy._core.multiarray import _vec_string +from numpy._core.overrides import array_function_dispatch, set_module +from numpy._core.umath import ( + _center, + _expandtabs, + _expandtabs_length, + _ljust, + _lstrip_chars, + _lstrip_whitespace, + _partition, + _partition_index, + _replace, + _rjust, + _rpartition, + _rpartition_index, + _rstrip_chars, + _rstrip_whitespace, + _slice, + _strip_chars, + _strip_whitespace, + _zfill, + isalnum, + isalpha, + isdecimal, + isdigit, + islower, + isnumeric, + isspace, + istitle, + isupper, + str_len, +) +from numpy._core.umath import ( + count as _count_ufunc, +) +from numpy._core.umath import ( + endswith as _endswith_ufunc, +) +from numpy._core.umath import ( + find as _find_ufunc, +) +from numpy._core.umath import ( + index as _index_ufunc, +) +from numpy._core.umath import ( + rfind as _rfind_ufunc, +) +from numpy._core.umath import ( + rindex as _rindex_ufunc, +) +from numpy._core.umath import ( + startswith as _startswith_ufunc, +) + + +def _override___module__(): + for ufunc in [ + isalnum, isalpha, isdecimal, isdigit, islower, isnumeric, isspace, + istitle, isupper, str_len, + ]: + ufunc.__module__ = "numpy.strings" + ufunc.__qualname__ = ufunc.__name__ + + +_override___module__() + + +__all__ = [ + # UFuncs + "equal", "not_equal", "less", "less_equal", "greater", "greater_equal", + "add", "multiply", "isalpha", "isdigit", "isspace", "isalnum", "islower", + "isupper", "istitle", "isdecimal", "isnumeric", "str_len", "find", + "rfind", "index", "rindex", "count", "startswith", "endswith", "lstrip", + "rstrip", "strip", "replace", "expandtabs", "center", "ljust", "rjust", + "zfill", "partition", "rpartition", "slice", + + # _vec_string - Will gradually become ufuncs as well + "upper", "lower", "swapcase", "capitalize", "title", + + # _vec_string - Will probably not become ufuncs + "mod", "decode", "encode", "translate", + + # Removed from namespace until behavior has been crystallized + # "join", "split", "rsplit", "splitlines", +] + + +MAX = np.iinfo(np.int64).max + +array_function_dispatch = functools.partial( + array_function_dispatch, module='numpy.strings') + + +def _get_num_chars(a): + """ + Helper function that returns the number of characters per field in + a string or unicode array. This is to abstract out the fact that + for a unicode array this is itemsize / 4. + """ + if issubclass(a.dtype.type, np.str_): + return a.itemsize // 4 + return a.itemsize + + +def _to_bytes_or_str_array(result, output_dtype_like): + """ + Helper function to cast a result back into an array + with the appropriate dtype if an object array must be used + as an intermediary. + """ + output_dtype_like = np.asarray(output_dtype_like) + if result.size == 0: + # Calling asarray & tolist in an empty array would result + # in losing shape information + return result.astype(output_dtype_like.dtype) + ret = np.asarray(result.tolist()) + if isinstance(output_dtype_like.dtype, np.dtypes.StringDType): + return ret.astype(type(output_dtype_like.dtype)) + return ret.astype(type(output_dtype_like.dtype)(_get_num_chars(ret))) + + +def _clean_args(*args): + """ + Helper function for delegating arguments to Python string + functions. + + Many of the Python string operations that have optional arguments + do not use 'None' to indicate a default value. In these cases, + we need to remove all None arguments, and those following them. + """ + newargs = [] + for chk in args: + if chk is None: + break + newargs.append(chk) + return newargs + + +def _multiply_dispatcher(a, i): + return (a,) + + +@set_module("numpy.strings") +@array_function_dispatch(_multiply_dispatcher) +def multiply(a, i): + """ + Return (a * i), that is string multiple concatenation, + element-wise. + + Values in ``i`` of less than 0 are treated as 0 (which yields an + empty string). + + Parameters + ---------- + a : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype + + i : array_like, with any integer dtype + + Returns + ------- + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types + + Examples + -------- + >>> import numpy as np + >>> a = np.array(["a", "b", "c"]) + >>> np.strings.multiply(a, 3) + array(['aaa', 'bbb', 'ccc'], dtype='>> i = np.array([1, 2, 3]) + >>> np.strings.multiply(a, i) + array(['a', 'bb', 'ccc'], dtype='>> np.strings.multiply(np.array(['a']), i) + array(['a', 'aa', 'aaa'], dtype='>> a = np.array(['a', 'b', 'c', 'd', 'e', 'f']).reshape((2, 3)) + >>> np.strings.multiply(a, 3) + array([['aaa', 'bbb', 'ccc'], + ['ddd', 'eee', 'fff']], dtype='>> np.strings.multiply(a, i) + array([['a', 'bb', 'ccc'], + ['d', 'ee', 'fff']], dtype=' sys.maxsize / np.maximum(i, 1)): + raise OverflowError("Overflow encountered in string multiply") + + buffersizes = a_len * i + out_dtype = f"{a.dtype.char}{buffersizes.max()}" + out = np.empty_like(a, shape=buffersizes.shape, dtype=out_dtype) + return _multiply_ufunc(a, i, out=out) + + +def _mod_dispatcher(a, values): + return (a, values) + + +@set_module("numpy.strings") +@array_function_dispatch(_mod_dispatcher) +def mod(a, values): + """ + Return (a % i), that is pre-Python 2.6 string formatting + (interpolation), element-wise for a pair of array_likes of str + or unicode. + + Parameters + ---------- + a : array_like, with `np.bytes_` or `np.str_` dtype + + values : array_like of values + These values will be element-wise interpolated into the string. + + Returns + ------- + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types + + Examples + -------- + >>> import numpy as np + >>> a = np.array(["NumPy is a %s library"]) + >>> np.strings.mod(a, values=["Python"]) + array(['NumPy is a Python library'], dtype='>> a = np.array([b'%d bytes', b'%d bits']) + >>> values = np.array([8, 64]) + >>> np.strings.mod(a, values) + array([b'8 bytes', b'64 bits'], dtype='|S7') + + """ + return _to_bytes_or_str_array( + _vec_string(a, np.object_, '__mod__', (values,)), a) + + +@set_module("numpy.strings") +def find(a, sub, start=0, end=None): + """ + For each element, return the lowest index in the string where + substring ``sub`` is found, such that ``sub`` is contained in the + range [``start``, ``end``). + + Parameters + ---------- + a : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype + + sub : array_like, with `np.bytes_` or `np.str_` dtype + The substring to search for. + + start, end : array_like, with any integer dtype + The range to look in, interpreted as in slice notation. + + Returns + ------- + y : ndarray + Output array of ints + + See Also + -------- + str.find + + Examples + -------- + >>> import numpy as np + >>> a = np.array(["NumPy is a Python library"]) + >>> np.strings.find(a, "Python") + array([11]) + + """ + end = end if end is not None else MAX + return _find_ufunc(a, sub, start, end) + + +@set_module("numpy.strings") +def rfind(a, sub, start=0, end=None): + """ + For each element, return the highest index in the string where + substring ``sub`` is found, such that ``sub`` is contained in the + range [``start``, ``end``). + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + sub : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + The substring to search for. + + start, end : array_like, with any integer dtype + The range to look in, interpreted as in slice notation. + + Returns + ------- + y : ndarray + Output array of ints + + See Also + -------- + str.rfind + + Examples + -------- + >>> import numpy as np + >>> a = np.array(["Computer Science"]) + >>> np.strings.rfind(a, "Science", start=0, end=None) + array([9]) + >>> np.strings.rfind(a, "Science", start=0, end=8) + array([-1]) + >>> b = np.array(["Computer Science", "Science"]) + >>> np.strings.rfind(b, "Science", start=0, end=None) + array([9, 0]) + + """ + end = end if end is not None else MAX + return _rfind_ufunc(a, sub, start, end) + + +@set_module("numpy.strings") +def index(a, sub, start=0, end=None): + """ + Like `find`, but raises :exc:`ValueError` when the substring is not found. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + sub : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + start, end : array_like, with any integer dtype, optional + + Returns + ------- + out : ndarray + Output array of ints. + + See Also + -------- + find, str.index + + Examples + -------- + >>> import numpy as np + >>> a = np.array(["Computer Science"]) + >>> np.strings.index(a, "Science", start=0, end=None) + array([9]) + + """ + end = end if end is not None else MAX + return _index_ufunc(a, sub, start, end) + + +@set_module("numpy.strings") +def rindex(a, sub, start=0, end=None): + """ + Like `rfind`, but raises :exc:`ValueError` when the substring `sub` is + not found. + + Parameters + ---------- + a : array-like, with `np.bytes_` or `np.str_` dtype + + sub : array-like, with `np.bytes_` or `np.str_` dtype + + start, end : array-like, with any integer dtype, optional + + Returns + ------- + out : ndarray + Output array of ints. + + See Also + -------- + rfind, str.rindex + + Examples + -------- + >>> a = np.array(["Computer Science"]) + >>> np.strings.rindex(a, "Science", start=0, end=None) + array([9]) + + """ + end = end if end is not None else MAX + return _rindex_ufunc(a, sub, start, end) + + +@set_module("numpy.strings") +def count(a, sub, start=0, end=None): + """ + Returns an array with the number of non-overlapping occurrences of + substring ``sub`` in the range [``start``, ``end``). + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + sub : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + The substring to search for. + + start, end : array_like, with any integer dtype + The range to look in, interpreted as in slice notation. + + Returns + ------- + y : ndarray + Output array of ints + + See Also + -------- + str.count + + Examples + -------- + >>> import numpy as np + >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> c + array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.strings.count(c, 'A') + array([3, 1, 1]) + >>> np.strings.count(c, 'aA') + array([3, 1, 0]) + >>> np.strings.count(c, 'A', start=1, end=4) + array([2, 1, 1]) + >>> np.strings.count(c, 'A', start=1, end=3) + array([1, 0, 0]) + + """ + end = end if end is not None else MAX + return _count_ufunc(a, sub, start, end) + + +@set_module("numpy.strings") +def startswith(a, prefix, start=0, end=None): + """ + Returns a boolean array which is `True` where the string element + in ``a`` starts with ``prefix``, otherwise `False`. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + prefix : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + start, end : array_like, with any integer dtype + With ``start``, test beginning at that position. With ``end``, + stop comparing at that position. + + Returns + ------- + out : ndarray + Output array of bools + + See Also + -------- + str.startswith + + Examples + -------- + >>> import numpy as np + >>> s = np.array(['foo', 'bar']) + >>> s + array(['foo', 'bar'], dtype='>> np.strings.startswith(s, 'fo') + array([True, False]) + >>> np.strings.startswith(s, 'o', start=1, end=2) + array([True, False]) + + """ + end = end if end is not None else MAX + return _startswith_ufunc(a, prefix, start, end) + + +@set_module("numpy.strings") +def endswith(a, suffix, start=0, end=None): + """ + Returns a boolean array which is `True` where the string element + in ``a`` ends with ``suffix``, otherwise `False`. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + suffix : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + start, end : array_like, with any integer dtype + With ``start``, test beginning at that position. With ``end``, + stop comparing at that position. + + Returns + ------- + out : ndarray + Output array of bools + + See Also + -------- + str.endswith + + Examples + -------- + >>> import numpy as np + >>> s = np.array(['foo', 'bar']) + >>> s + array(['foo', 'bar'], dtype='>> np.strings.endswith(s, 'ar') + array([False, True]) + >>> np.strings.endswith(s, 'a', start=1, end=2) + array([False, True]) + + """ + end = end if end is not None else MAX + return _endswith_ufunc(a, suffix, start, end) + + +def _code_dispatcher(a, encoding=None, errors=None): + return (a,) + + +@set_module("numpy.strings") +@array_function_dispatch(_code_dispatcher) +def decode(a, encoding=None, errors=None): + r""" + Calls :meth:`bytes.decode` element-wise. + + The set of available codecs comes from the Python standard library, + and may be extended at runtime. For more information, see the + :mod:`codecs` module. + + Parameters + ---------- + a : array_like, with ``bytes_`` dtype + + encoding : str, optional + The name of an encoding + + errors : str, optional + Specifies how to handle encoding errors + + Returns + ------- + out : ndarray + + See Also + -------- + :py:meth:`bytes.decode` + + Notes + ----- + The type of the result will depend on the encoding specified. + + Examples + -------- + >>> import numpy as np + >>> c = np.array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@', + ... b'\x81\x82\xc2\xc1\xc2\x82\x81']) + >>> c + array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@', + b'\x81\x82\xc2\xc1\xc2\x82\x81'], dtype='|S7') + >>> np.strings.decode(c, encoding='cp037') + array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> import numpy as np + >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> np.strings.encode(a, encoding='cp037') + array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@', + b'\x81\x82\xc2\xc1\xc2\x82\x81'], dtype='|S7') + + """ + return _to_bytes_or_str_array( + _vec_string(a, np.object_, 'encode', _clean_args(encoding, errors)), + np.bytes_(b'')) + + +def _expandtabs_dispatcher(a, tabsize=None): + return (a,) + + +@set_module("numpy.strings") +@array_function_dispatch(_expandtabs_dispatcher) +def expandtabs(a, tabsize=8): + """ + Return a copy of each string element where all tab characters are + replaced by one or more spaces. + + Calls :meth:`str.expandtabs` element-wise. + + Return a copy of each string element where all tab characters are + replaced by one or more spaces, depending on the current column + and the given `tabsize`. The column number is reset to zero after + each newline occurring in the string. This doesn't understand other + non-printing characters or escape sequences. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + Input array + tabsize : int, optional + Replace tabs with `tabsize` number of spaces. If not given defaults + to 8 spaces. + + Returns + ------- + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input type + + See Also + -------- + str.expandtabs + + Examples + -------- + >>> import numpy as np + >>> a = np.array(['\t\tHello\tworld']) + >>> np.strings.expandtabs(a, tabsize=4) # doctest: +SKIP + array([' Hello world'], dtype='>> import numpy as np + >>> c = np.array(['a1b2','1b2a','b2a1','2a1b']); c + array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='>> np.strings.center(c, width=9) + array([' a1b2 ', ' 1b2a ', ' b2a1 ', ' 2a1b '], dtype='>> np.strings.center(c, width=9, fillchar='*') + array(['***a1b2**', '***1b2a**', '***b2a1**', '***2a1b**'], dtype='>> np.strings.center(c, width=1) + array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='>> import numpy as np + >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> np.strings.ljust(c, width=3) + array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.strings.ljust(c, width=9) + array(['aAaAaA ', ' aA ', 'abBABba '], dtype='>> import numpy as np + >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> np.strings.rjust(a, width=3) + array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.strings.rjust(a, width=9) + array([' aAaAaA', ' aA ', ' abBABba'], dtype='>> import numpy as np + >>> np.strings.zfill(['1', '-1', '+1'], 3) + array(['001', '-01', '+01'], dtype='>> import numpy as np + >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> c + array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.strings.lstrip(c, 'a') + array(['AaAaA', ' aA ', 'bBABba'], dtype='>> np.strings.lstrip(c, 'A') # leaves c unchanged + array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> (np.strings.lstrip(c, ' ') == np.strings.lstrip(c, '')).all() + np.False_ + >>> (np.strings.lstrip(c, ' ') == np.strings.lstrip(c)).all() + np.True_ + + """ + if chars is None: + return _lstrip_whitespace(a) + return _lstrip_chars(a, chars) + + +@set_module("numpy.strings") +def rstrip(a, chars=None): + """ + For each element in `a`, return a copy with the trailing characters + removed. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + chars : scalar with the same dtype as ``a``, optional + The ``chars`` argument is a string specifying the set of + characters to be removed. If ``None``, the ``chars`` + argument defaults to removing whitespace. The ``chars`` argument + is not a prefix or suffix; rather, all combinations of its + values are stripped. + + Returns + ------- + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types + + See Also + -------- + str.rstrip + + Examples + -------- + >>> import numpy as np + >>> c = np.array(['aAaAaA', 'abBABba']) + >>> c + array(['aAaAaA', 'abBABba'], dtype='>> np.strings.rstrip(c, 'a') + array(['aAaAaA', 'abBABb'], dtype='>> np.strings.rstrip(c, 'A') + array(['aAaAa', 'abBABba'], dtype='>> import numpy as np + >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> c + array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.strings.strip(c) + array(['aAaAaA', 'aA', 'abBABba'], dtype='>> np.strings.strip(c, 'a') + array(['AaAaA', ' aA ', 'bBABb'], dtype='>> np.strings.strip(c, 'A') + array(['aAaAa', ' aA ', 'abBABba'], dtype='>> import numpy as np + >>> c = np.array(['a1b c', '1bca', 'bca1']); c + array(['a1b c', '1bca', 'bca1'], dtype='>> np.strings.upper(c) + array(['A1B C', '1BCA', 'BCA1'], dtype='>> import numpy as np + >>> c = np.array(['A1B C', '1BCA', 'BCA1']); c + array(['A1B C', '1BCA', 'BCA1'], dtype='>> np.strings.lower(c) + array(['a1b c', '1bca', 'bca1'], dtype='>> import numpy as np + >>> c=np.array(['a1B c','1b Ca','b Ca1','cA1b'],'S5'); c + array(['a1B c', '1b Ca', 'b Ca1', 'cA1b'], + dtype='|S5') + >>> np.strings.swapcase(c) + array(['A1b C', '1B cA', 'B cA1', 'Ca1B'], + dtype='|S5') + + """ + a_arr = np.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'swapcase') + + +@set_module("numpy.strings") +@array_function_dispatch(_unary_op_dispatcher) +def capitalize(a): + """ + Return a copy of ``a`` with only the first character of each element + capitalized. + + Calls :meth:`str.capitalize` element-wise. + + For byte strings, this method is locale-dependent. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + Input array of strings to capitalize. + + Returns + ------- + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types + + See Also + -------- + str.capitalize + + Examples + -------- + >>> import numpy as np + >>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c + array(['a1b2', '1b2a', 'b2a1', '2a1b'], + dtype='|S4') + >>> np.strings.capitalize(c) + array(['A1b2', '1b2a', 'B2a1', '2a1b'], + dtype='|S4') + + """ + a_arr = np.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'capitalize') + + +@set_module("numpy.strings") +@array_function_dispatch(_unary_op_dispatcher) +def title(a): + """ + Return element-wise title cased version of string or unicode. + + Title case words start with uppercase characters, all remaining cased + characters are lowercase. + + Calls :meth:`str.title` element-wise. + + For 8-bit strings, this method is locale-dependent. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + Input array. + + Returns + ------- + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types + + See Also + -------- + str.title + + Examples + -------- + >>> import numpy as np + >>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c + array(['a1b c', '1b ca', 'b ca1', 'ca1b'], + dtype='|S5') + >>> np.strings.title(c) + array(['A1B C', '1B Ca', 'B Ca1', 'Ca1B'], + dtype='|S5') + + """ + a_arr = np.asarray(a) + return _vec_string(a_arr, a_arr.dtype, 'title') + + +def _replace_dispatcher(a, old, new, count=None): + return (a,) + + +@set_module("numpy.strings") +@array_function_dispatch(_replace_dispatcher) +def replace(a, old, new, count=-1): + """ + For each element in ``a``, return a copy of the string with + occurrences of substring ``old`` replaced by ``new``. + + Parameters + ---------- + a : array_like, with ``bytes_`` or ``str_`` dtype + + old, new : array_like, with ``bytes_`` or ``str_`` dtype + + count : array_like, with ``int_`` dtype + If the optional argument ``count`` is given, only the first + ``count`` occurrences are replaced. + + Returns + ------- + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types + + See Also + -------- + str.replace + + Examples + -------- + >>> import numpy as np + >>> a = np.array(["That is a mango", "Monkeys eat mangos"]) + >>> np.strings.replace(a, 'mango', 'banana') + array(['That is a banana', 'Monkeys eat bananas'], dtype='>> a = np.array(["The dish is fresh", "This is it"]) + >>> np.strings.replace(a, 'is', 'was') + array(['The dwash was fresh', 'Thwas was it'], dtype='>> import numpy as np + >>> np.strings.join('-', 'osd') # doctest: +SKIP + array('o-s-d', dtype='>> np.strings.join(['-', '.'], ['ghc', 'osd']) # doctest: +SKIP + array(['g-h-c', 'o.s.d'], dtype='>> import numpy as np + >>> x = np.array("Numpy is nice!") + >>> np.strings.split(x, " ") # doctest: +SKIP + array(list(['Numpy', 'is', 'nice!']), dtype=object) # doctest: +SKIP + + >>> np.strings.split(x, " ", 1) # doctest: +SKIP + array(list(['Numpy', 'is nice!']), dtype=object) # doctest: +SKIP + + See Also + -------- + str.split, rsplit + + """ + # This will return an array of lists of different sizes, so we + # leave it as an object array + return _vec_string( + a, np.object_, 'split', [sep] + _clean_args(maxsplit)) + + +@array_function_dispatch(_split_dispatcher) +def _rsplit(a, sep=None, maxsplit=None): + """ + For each element in `a`, return a list of the words in the + string, using `sep` as the delimiter string. + + Calls :meth:`str.rsplit` element-wise. + + Except for splitting from the right, `rsplit` + behaves like `split`. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + sep : str or unicode, optional + If `sep` is not specified or None, any whitespace string + is a separator. + maxsplit : int, optional + If `maxsplit` is given, at most `maxsplit` splits are done, + the rightmost ones. + + Returns + ------- + out : ndarray + Array of list objects + + See Also + -------- + str.rsplit, split + + Examples + -------- + >>> import numpy as np + >>> a = np.array(['aAaAaA', 'abBABba']) + >>> np.strings.rsplit(a, 'A') # doctest: +SKIP + array([list(['a', 'a', 'a', '']), # doctest: +SKIP + list(['abB', 'Bba'])], dtype=object) # doctest: +SKIP + + """ + # This will return an array of lists of different sizes, so we + # leave it as an object array + return _vec_string( + a, np.object_, 'rsplit', [sep] + _clean_args(maxsplit)) + + +def _splitlines_dispatcher(a, keepends=None): + return (a,) + + +@array_function_dispatch(_splitlines_dispatcher) +def _splitlines(a, keepends=None): + """ + For each element in `a`, return a list of the lines in the + element, breaking at line boundaries. + + Calls :meth:`str.splitlines` element-wise. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + + keepends : bool, optional + Line breaks are not included in the resulting list unless + keepends is given and true. + + Returns + ------- + out : ndarray + Array of list objects + + See Also + -------- + str.splitlines + + Examples + -------- + >>> np.char.splitlines("first line\\nsecond line") + array(list(['first line', 'second line']), dtype=object) + >>> a = np.array(["first\\nsecond", "third\\nfourth"]) + >>> np.char.splitlines(a) + array([list(['first', 'second']), list(['third', 'fourth'])], dtype=object) + + """ + return _vec_string( + a, np.object_, 'splitlines', _clean_args(keepends)) + + +def _partition_dispatcher(a, sep): + return (a,) + + +@set_module("numpy.strings") +@array_function_dispatch(_partition_dispatcher) +def partition(a, sep): + """ + Partition each element in ``a`` around ``sep``. + + For each element in ``a``, split the element at the first + occurrence of ``sep``, and return a 3-tuple containing the part + before the separator, the separator itself, and the part after + the separator. If the separator is not found, the first item of + the tuple will contain the whole string, and the second and third + ones will be the empty string. + + Parameters + ---------- + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + Input array + sep : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + Separator to split each string element in ``a``. + + Returns + ------- + out : 3-tuple: + - array with ``StringDType``, ``bytes_`` or ``str_`` dtype with the + part before the separator + - array with ``StringDType``, ``bytes_`` or ``str_`` dtype with the + separator + - array with ``StringDType``, ``bytes_`` or ``str_`` dtype with the + part after the separator + + See Also + -------- + str.partition + + Examples + -------- + >>> import numpy as np + >>> x = np.array(["Numpy is nice!"]) + >>> np.strings.partition(x, " ") + (array(['Numpy'], dtype='>> import numpy as np + >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> np.strings.rpartition(a, 'A') + (array(['aAaAa', ' a', 'abB'], dtype='>> import numpy as np + >>> a = np.array(['a1b c', '1bca', 'bca1']) + >>> table = a[0].maketrans('abc', '123') + >>> deletechars = ' ' + >>> np.char.translate(a, table, deletechars) + array(['112 3', '1231', '2311'], dtype='>> import numpy as np + >>> a = np.array(['hello', 'world']) + >>> np.strings.slice(a, 2) + array(['he', 'wo'], dtype='>> np.strings.slice(a, 2, None) + array(['llo', 'rld'], dtype='>> np.strings.slice(a, 1, 5, 2) + array(['el', 'ol'], dtype='>> np.strings.slice(a, np.array([1, 2]), np.array([4, 5])) + array(['ell', 'rld'], dtype='>> b = np.array(['hello world', 'γεια σου κόσμε', '你好世界', '👋 🌍'], + ... dtype=np.dtypes.StringDType()) + >>> np.strings.slice(b, -2) + array(['hello wor', 'γεια σου κόσ', '你好', '👋'], dtype=StringDType()) + + >>> np.strings.slice(b, -2, None) + array(['ld', 'με', '世界', ' 🌍'], dtype=StringDType()) + + >>> np.strings.slice(b, [3, -10, 2, -3], [-1, -2, -1, 3]) + array(['lo worl', ' σου κόσ', '世', '👋 🌍'], dtype=StringDType()) + + >>> np.strings.slice(b, None, None, -1) + array(['dlrow olleh', 'εμσόκ υοσ αιεγ', '界世好你', '🌍 👋'], + dtype=StringDType()) + + """ + # Just like in the construction of a regular slice object, if only start + # is specified then start will become stop, see logic in slice_new. + if stop is np._NoValue: + stop = start + start = None + + # adjust start, stop, step to be integers, see logic in PySlice_Unpack + if step is None: + step = 1 + step = np.asanyarray(step) + if not np.issubdtype(step.dtype, np.integer): + raise TypeError(f"unsupported type {step.dtype} for operand 'step'") + if np.any(step == 0): + raise ValueError("slice step cannot be zero") + + if start is None: + start = np.where(step < 0, np.iinfo(np.intp).max, 0) + + if stop is None: + stop = np.where(step < 0, np.iinfo(np.intp).min, np.iinfo(np.intp).max) + + return _slice(a, start, stop, step) diff --git a/python/numpy/_core/strings.pyi b/python/numpy/_core/strings.pyi new file mode 100644 index 000000000..b187ce71d --- /dev/null +++ b/python/numpy/_core/strings.pyi @@ -0,0 +1,511 @@ +from typing import TypeAlias, overload + +import numpy as np +from numpy._typing import NDArray, _AnyShape, _SupportsArray +from numpy._typing import _ArrayLikeAnyString_co as UST_co +from numpy._typing import _ArrayLikeBytes_co as S_co +from numpy._typing import _ArrayLikeInt_co as i_co +from numpy._typing import _ArrayLikeStr_co as U_co +from numpy._typing import _ArrayLikeString_co as T_co + +__all__ = [ + "add", + "capitalize", + "center", + "count", + "decode", + "encode", + "endswith", + "equal", + "expandtabs", + "find", + "greater", + "greater_equal", + "index", + "isalnum", + "isalpha", + "isdecimal", + "isdigit", + "islower", + "isnumeric", + "isspace", + "istitle", + "isupper", + "less", + "less_equal", + "ljust", + "lower", + "lstrip", + "mod", + "multiply", + "not_equal", + "partition", + "replace", + "rfind", + "rindex", + "rjust", + "rpartition", + "rstrip", + "startswith", + "str_len", + "strip", + "swapcase", + "title", + "translate", + "upper", + "zfill", + "slice", +] + +_StringDTypeArray: TypeAlias = np.ndarray[_AnyShape, np.dtypes.StringDType] +_StringDTypeSupportsArray: TypeAlias = _SupportsArray[np.dtypes.StringDType] +_StringDTypeOrUnicodeArray: TypeAlias = np.ndarray[_AnyShape, np.dtype[np.str_]] | _StringDTypeArray + +@overload +def equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def not_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def not_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def not_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def greater_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def greater_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def greater_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def less_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def less_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def less_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def greater(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def greater(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def greater(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def less(x1: U_co, x2: U_co) -> NDArray[np.bool]: ... +@overload +def less(x1: S_co, x2: S_co) -> NDArray[np.bool]: ... +@overload +def less(x1: T_co, x2: T_co) -> NDArray[np.bool]: ... + +@overload +def add(x1: U_co, x2: U_co) -> NDArray[np.str_]: ... +@overload +def add(x1: S_co, x2: S_co) -> NDArray[np.bytes_]: ... +@overload +def add(x1: _StringDTypeSupportsArray, x2: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def add(x1: T_co, x2: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def multiply(a: U_co, i: i_co) -> NDArray[np.str_]: ... +@overload +def multiply(a: S_co, i: i_co) -> NDArray[np.bytes_]: ... +@overload +def multiply(a: _StringDTypeSupportsArray, i: i_co) -> _StringDTypeArray: ... +@overload +def multiply(a: T_co, i: i_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def mod(a: U_co, value: object) -> NDArray[np.str_]: ... +@overload +def mod(a: S_co, value: object) -> NDArray[np.bytes_]: ... +@overload +def mod(a: _StringDTypeSupportsArray, value: object) -> _StringDTypeArray: ... +@overload +def mod(a: T_co, value: object) -> _StringDTypeOrUnicodeArray: ... + +def isalpha(x: UST_co) -> NDArray[np.bool]: ... +def isalnum(a: UST_co) -> NDArray[np.bool]: ... +def isdigit(x: UST_co) -> NDArray[np.bool]: ... +def isspace(x: UST_co) -> NDArray[np.bool]: ... +def isdecimal(x: U_co | T_co) -> NDArray[np.bool]: ... +def isnumeric(x: U_co | T_co) -> NDArray[np.bool]: ... +def islower(a: UST_co) -> NDArray[np.bool]: ... +def istitle(a: UST_co) -> NDArray[np.bool]: ... +def isupper(a: UST_co) -> NDArray[np.bool]: ... + +def str_len(x: UST_co) -> NDArray[np.int_]: ... + +@overload +def find( + a: U_co, + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... +@overload +def find( + a: S_co, + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... +@overload +def find( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... + +@overload +def rfind( + a: U_co, + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... +@overload +def rfind( + a: S_co, + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... +@overload +def rfind( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... + +@overload +def index( + a: U_co, + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... +@overload +def index( + a: S_co, + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... +@overload +def index( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... + +@overload +def rindex( + a: U_co, + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... +@overload +def rindex( + a: S_co, + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... +@overload +def rindex( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... + +@overload +def count( + a: U_co, + sub: U_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... +@overload +def count( + a: S_co, + sub: S_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... +@overload +def count( + a: T_co, + sub: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.int_]: ... + +@overload +def startswith( + a: U_co, + prefix: U_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.bool]: ... +@overload +def startswith( + a: S_co, + prefix: S_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.bool]: ... +@overload +def startswith( + a: T_co, + prefix: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.bool]: ... + +@overload +def endswith( + a: U_co, + suffix: U_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.bool]: ... +@overload +def endswith( + a: S_co, + suffix: S_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.bool]: ... +@overload +def endswith( + a: T_co, + suffix: T_co, + start: i_co = ..., + end: i_co | None = ..., +) -> NDArray[np.bool]: ... + +def decode( + a: S_co, + encoding: str | None = None, + errors: str | None = None, +) -> NDArray[np.str_]: ... +def encode( + a: U_co | T_co, + encoding: str | None = None, + errors: str | None = None, +) -> NDArray[np.bytes_]: ... + +@overload +def expandtabs(a: U_co, tabsize: i_co = ...) -> NDArray[np.str_]: ... +@overload +def expandtabs(a: S_co, tabsize: i_co = ...) -> NDArray[np.bytes_]: ... +@overload +def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = ...) -> _StringDTypeArray: ... +@overload +def expandtabs(a: T_co, tabsize: i_co = ...) -> _StringDTypeOrUnicodeArray: ... + +@overload +def center(a: U_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.str_]: ... +@overload +def center(a: S_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.bytes_]: ... +@overload +def center(a: _StringDTypeSupportsArray, width: i_co, fillchar: UST_co = " ") -> _StringDTypeArray: ... +@overload +def center(a: T_co, width: i_co, fillchar: UST_co = " ") -> _StringDTypeOrUnicodeArray: ... + +@overload +def ljust(a: U_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.str_]: ... +@overload +def ljust(a: S_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.bytes_]: ... +@overload +def ljust(a: _StringDTypeSupportsArray, width: i_co, fillchar: UST_co = " ") -> _StringDTypeArray: ... +@overload +def ljust(a: T_co, width: i_co, fillchar: UST_co = " ") -> _StringDTypeOrUnicodeArray: ... + +@overload +def rjust(a: U_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.str_]: ... +@overload +def rjust(a: S_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.bytes_]: ... +@overload +def rjust(a: _StringDTypeSupportsArray, width: i_co, fillchar: UST_co = " ") -> _StringDTypeArray: ... +@overload +def rjust(a: T_co, width: i_co, fillchar: UST_co = " ") -> _StringDTypeOrUnicodeArray: ... + +@overload +def lstrip(a: U_co, chars: U_co | None = None) -> NDArray[np.str_]: ... +@overload +def lstrip(a: S_co, chars: S_co | None = None) -> NDArray[np.bytes_]: ... +@overload +def lstrip(a: _StringDTypeSupportsArray, chars: T_co | None = None) -> _StringDTypeArray: ... +@overload +def lstrip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... + +@overload +def rstrip(a: U_co, chars: U_co | None = None) -> NDArray[np.str_]: ... +@overload +def rstrip(a: S_co, chars: S_co | None = None) -> NDArray[np.bytes_]: ... +@overload +def rstrip(a: _StringDTypeSupportsArray, chars: T_co | None = None) -> _StringDTypeArray: ... +@overload +def rstrip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... + +@overload +def strip(a: U_co, chars: U_co | None = None) -> NDArray[np.str_]: ... +@overload +def strip(a: S_co, chars: S_co | None = None) -> NDArray[np.bytes_]: ... +@overload +def strip(a: _StringDTypeSupportsArray, chars: T_co | None = None) -> _StringDTypeArray: ... +@overload +def strip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ... + +@overload +def zfill(a: U_co, width: i_co) -> NDArray[np.str_]: ... +@overload +def zfill(a: S_co, width: i_co) -> NDArray[np.bytes_]: ... +@overload +def zfill(a: _StringDTypeSupportsArray, width: i_co) -> _StringDTypeArray: ... +@overload +def zfill(a: T_co, width: i_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def upper(a: U_co) -> NDArray[np.str_]: ... +@overload +def upper(a: S_co) -> NDArray[np.bytes_]: ... +@overload +def upper(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def upper(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def lower(a: U_co) -> NDArray[np.str_]: ... +@overload +def lower(a: S_co) -> NDArray[np.bytes_]: ... +@overload +def lower(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def lower(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def swapcase(a: U_co) -> NDArray[np.str_]: ... +@overload +def swapcase(a: S_co) -> NDArray[np.bytes_]: ... +@overload +def swapcase(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def swapcase(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def capitalize(a: U_co) -> NDArray[np.str_]: ... +@overload +def capitalize(a: S_co) -> NDArray[np.bytes_]: ... +@overload +def capitalize(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def capitalize(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def title(a: U_co) -> NDArray[np.str_]: ... +@overload +def title(a: S_co) -> NDArray[np.bytes_]: ... +@overload +def title(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def title(a: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def replace( + a: U_co, + old: U_co, + new: U_co, + count: i_co = ..., +) -> NDArray[np.str_]: ... +@overload +def replace( + a: S_co, + old: S_co, + new: S_co, + count: i_co = ..., +) -> NDArray[np.bytes_]: ... +@overload +def replace( + a: _StringDTypeSupportsArray, + old: _StringDTypeSupportsArray, + new: _StringDTypeSupportsArray, + count: i_co = ..., +) -> _StringDTypeArray: ... +@overload +def replace( + a: T_co, + old: T_co, + new: T_co, + count: i_co = ..., +) -> _StringDTypeOrUnicodeArray: ... + +@overload +def partition(a: U_co, sep: U_co) -> NDArray[np.str_]: ... +@overload +def partition(a: S_co, sep: S_co) -> NDArray[np.bytes_]: ... +@overload +def partition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def partition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def rpartition(a: U_co, sep: U_co) -> NDArray[np.str_]: ... +@overload +def rpartition(a: S_co, sep: S_co) -> NDArray[np.bytes_]: ... +@overload +def rpartition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ... +@overload +def rpartition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ... + +@overload +def translate( + a: U_co, + table: str, + deletechars: str | None = None, +) -> NDArray[np.str_]: ... +@overload +def translate( + a: S_co, + table: str, + deletechars: str | None = None, +) -> NDArray[np.bytes_]: ... +@overload +def translate( + a: _StringDTypeSupportsArray, + table: str, + deletechars: str | None = None, +) -> _StringDTypeArray: ... +@overload +def translate( + a: T_co, + table: str, + deletechars: str | None = None, +) -> _StringDTypeOrUnicodeArray: ... + +# +@overload +def slice(a: U_co, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, /) -> NDArray[np.str_]: ... # type: ignore[overload-overlap] +@overload +def slice(a: S_co, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, /) -> NDArray[np.bytes_]: ... +@overload +def slice( + a: _StringDTypeSupportsArray, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, / +) -> _StringDTypeArray: ... +@overload +def slice( + a: T_co, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, / +) -> _StringDTypeOrUnicodeArray: ... diff --git a/python/numpy/_core/tests/__pycache__/_locales.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/_locales.cpython-312.pyc new file mode 100644 index 000000000..93c70a70d Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/_locales.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/_natype.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/_natype.cpython-312.pyc new file mode 100644 index 000000000..9b7dcbe80 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/_natype.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test__exceptions.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test__exceptions.cpython-312.pyc new file mode 100644 index 000000000..fa8a6e10a Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test__exceptions.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_abc.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_abc.cpython-312.pyc new file mode 100644 index 000000000..d98f2200c Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_abc.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_api.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_api.cpython-312.pyc new file mode 100644 index 000000000..f4fe93731 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_api.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_argparse.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_argparse.cpython-312.pyc new file mode 100644 index 000000000..fac29d444 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_argparse.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_array_api_info.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_array_api_info.cpython-312.pyc new file mode 100644 index 000000000..26b44698a Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_array_api_info.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_array_coercion.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_array_coercion.cpython-312.pyc new file mode 100644 index 000000000..f4718f4a3 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_array_coercion.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_array_interface.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_array_interface.cpython-312.pyc new file mode 100644 index 000000000..e5518b4cc Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_array_interface.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_arraymethod.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_arraymethod.cpython-312.pyc new file mode 100644 index 000000000..6b67fe54e Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_arraymethod.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_arrayobject.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_arrayobject.cpython-312.pyc new file mode 100644 index 000000000..325fd3b25 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_arrayobject.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_arrayprint.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_arrayprint.cpython-312.pyc new file mode 100644 index 000000000..5543c357f Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_arrayprint.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_casting_floatingpoint_errors.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_casting_floatingpoint_errors.cpython-312.pyc new file mode 100644 index 000000000..21e86fb18 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_casting_floatingpoint_errors.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_casting_unittests.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_casting_unittests.cpython-312.pyc new file mode 100644 index 000000000..7c6dcee78 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_casting_unittests.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_conversion_utils.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_conversion_utils.cpython-312.pyc new file mode 100644 index 000000000..afd74fcda Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_conversion_utils.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_cpu_dispatcher.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_cpu_dispatcher.cpython-312.pyc new file mode 100644 index 000000000..909cc6361 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_cpu_dispatcher.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_cpu_features.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_cpu_features.cpython-312.pyc new file mode 100644 index 000000000..f50894457 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_cpu_features.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_custom_dtypes.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_custom_dtypes.cpython-312.pyc new file mode 100644 index 000000000..8e833f339 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_custom_dtypes.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_cython.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_cython.cpython-312.pyc new file mode 100644 index 000000000..6fc7ff421 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_cython.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_datetime.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_datetime.cpython-312.pyc new file mode 100644 index 000000000..a7b55886a Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_datetime.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_defchararray.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_defchararray.cpython-312.pyc new file mode 100644 index 000000000..28df6716d Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_defchararray.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_deprecations.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_deprecations.cpython-312.pyc new file mode 100644 index 000000000..afc43b9a6 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_deprecations.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_dlpack.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_dlpack.cpython-312.pyc new file mode 100644 index 000000000..2d4b8a244 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_dlpack.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_dtype.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_dtype.cpython-312.pyc new file mode 100644 index 000000000..5f158e3fc Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_dtype.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_einsum.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_einsum.cpython-312.pyc new file mode 100644 index 000000000..41a541522 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_einsum.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_errstate.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_errstate.cpython-312.pyc new file mode 100644 index 000000000..431296f65 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_errstate.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_extint128.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_extint128.cpython-312.pyc new file mode 100644 index 000000000..312864618 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_extint128.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_function_base.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_function_base.cpython-312.pyc new file mode 100644 index 000000000..2fa6527df Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_function_base.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_getlimits.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_getlimits.cpython-312.pyc new file mode 100644 index 000000000..e3cea8567 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_getlimits.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_half.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_half.cpython-312.pyc new file mode 100644 index 000000000..68f2fc920 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_half.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_hashtable.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_hashtable.cpython-312.pyc new file mode 100644 index 000000000..2612aa7be Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_hashtable.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_indexerrors.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_indexerrors.cpython-312.pyc new file mode 100644 index 000000000..188ed9589 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_indexerrors.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_indexing.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_indexing.cpython-312.pyc new file mode 100644 index 000000000..137c38fb4 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_indexing.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_item_selection.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_item_selection.cpython-312.pyc new file mode 100644 index 000000000..0a21dc068 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_item_selection.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_limited_api.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_limited_api.cpython-312.pyc new file mode 100644 index 000000000..35a8483af Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_limited_api.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_longdouble.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_longdouble.cpython-312.pyc new file mode 100644 index 000000000..82cd81368 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_longdouble.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_machar.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_machar.cpython-312.pyc new file mode 100644 index 000000000..2571f2db1 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_machar.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_mem_overlap.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_mem_overlap.cpython-312.pyc new file mode 100644 index 000000000..1792fdcd6 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_mem_overlap.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_mem_policy.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_mem_policy.cpython-312.pyc new file mode 100644 index 000000000..775670a60 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_mem_policy.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_memmap.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_memmap.cpython-312.pyc new file mode 100644 index 000000000..78ec6410d Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_memmap.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_multiarray.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_multiarray.cpython-312.pyc new file mode 100644 index 000000000..42371da18 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_multiarray.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_multithreading.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_multithreading.cpython-312.pyc new file mode 100644 index 000000000..b47e2b541 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_multithreading.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_nditer.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_nditer.cpython-312.pyc new file mode 100644 index 000000000..1aa8ff113 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_nditer.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_nep50_promotions.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_nep50_promotions.cpython-312.pyc new file mode 100644 index 000000000..d14779bfb Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_nep50_promotions.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_numeric.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_numeric.cpython-312.pyc new file mode 100644 index 000000000..b95188fe4 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_numeric.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_numerictypes.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_numerictypes.cpython-312.pyc new file mode 100644 index 000000000..ca84d84cb Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_numerictypes.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_overrides.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_overrides.cpython-312.pyc new file mode 100644 index 000000000..bc1f314c5 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_overrides.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_print.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_print.cpython-312.pyc new file mode 100644 index 000000000..91db01a36 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_print.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_protocols.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_protocols.cpython-312.pyc new file mode 100644 index 000000000..e8b6dfd11 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_protocols.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_records.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_records.cpython-312.pyc new file mode 100644 index 000000000..f90a1c062 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_records.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_regression.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_regression.cpython-312.pyc new file mode 100644 index 000000000..1213acb19 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_regression.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_scalar_ctors.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_scalar_ctors.cpython-312.pyc new file mode 100644 index 000000000..67989016f Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_scalar_ctors.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_scalar_methods.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_scalar_methods.cpython-312.pyc new file mode 100644 index 000000000..a5e409117 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_scalar_methods.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_scalarbuffer.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_scalarbuffer.cpython-312.pyc new file mode 100644 index 000000000..cfe7713e4 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_scalarbuffer.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_scalarinherit.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_scalarinherit.cpython-312.pyc new file mode 100644 index 000000000..21474a2b6 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_scalarinherit.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_scalarmath.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_scalarmath.cpython-312.pyc new file mode 100644 index 000000000..5e82fef69 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_scalarmath.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_scalarprint.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_scalarprint.cpython-312.pyc new file mode 100644 index 000000000..9b7691e59 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_scalarprint.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_shape_base.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_shape_base.cpython-312.pyc new file mode 100644 index 000000000..746675758 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_shape_base.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_simd.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_simd.cpython-312.pyc new file mode 100644 index 000000000..d4f69b885 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_simd.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_simd_module.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_simd_module.cpython-312.pyc new file mode 100644 index 000000000..4a5d6505c Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_simd_module.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_stringdtype.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_stringdtype.cpython-312.pyc new file mode 100644 index 000000000..1fe00fbc2 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_stringdtype.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_strings.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_strings.cpython-312.pyc new file mode 100644 index 000000000..8feff01da Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_strings.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_ufunc.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_ufunc.cpython-312.pyc new file mode 100644 index 000000000..a49ffceb8 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_ufunc.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_umath.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_umath.cpython-312.pyc new file mode 100644 index 000000000..726efccdc Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_umath.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_umath_accuracy.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_umath_accuracy.cpython-312.pyc new file mode 100644 index 000000000..c504d8c46 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_umath_accuracy.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_umath_complex.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_umath_complex.cpython-312.pyc new file mode 100644 index 000000000..9645bcd72 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_umath_complex.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/__pycache__/test_unicode.cpython-312.pyc b/python/numpy/_core/tests/__pycache__/test_unicode.cpython-312.pyc new file mode 100644 index 000000000..0b60e09c4 Binary files /dev/null and b/python/numpy/_core/tests/__pycache__/test_unicode.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/_locales.py b/python/numpy/_core/tests/_locales.py new file mode 100644 index 000000000..debda9639 --- /dev/null +++ b/python/numpy/_core/tests/_locales.py @@ -0,0 +1,72 @@ +"""Provide class for testing in French locale + +""" +import locale +import sys + +import pytest + +__ALL__ = ['CommaDecimalPointLocale'] + + +def find_comma_decimal_point_locale(): + """See if platform has a decimal point as comma locale. + + Find a locale that uses a comma instead of a period as the + decimal point. + + Returns + ------- + old_locale: str + Locale when the function was called. + new_locale: {str, None) + First French locale found, None if none found. + + """ + if sys.platform == 'win32': + locales = ['FRENCH'] + else: + locales = ['fr_FR', 'fr_FR.UTF-8', 'fi_FI', 'fi_FI.UTF-8'] + + old_locale = locale.getlocale(locale.LC_NUMERIC) + new_locale = None + try: + for loc in locales: + try: + locale.setlocale(locale.LC_NUMERIC, loc) + new_locale = loc + break + except locale.Error: + pass + finally: + locale.setlocale(locale.LC_NUMERIC, locale=old_locale) + return old_locale, new_locale + + +class CommaDecimalPointLocale: + """Sets LC_NUMERIC to a locale with comma as decimal point. + + Classes derived from this class have setup and teardown methods that run + tests with locale.LC_NUMERIC set to a locale where commas (',') are used as + the decimal point instead of periods ('.'). On exit the locale is restored + to the initial locale. It also serves as context manager with the same + effect. If no such locale is available, the test is skipped. + + """ + (cur_locale, tst_locale) = find_comma_decimal_point_locale() + + def setup_method(self): + if self.tst_locale is None: + pytest.skip("No French locale available") + locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale) + + def teardown_method(self): + locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale) + + def __enter__(self): + if self.tst_locale is None: + pytest.skip("No French locale available") + locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale) + + def __exit__(self, type, value, traceback): + locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale) diff --git a/python/numpy/_core/tests/_natype.py b/python/numpy/_core/tests/_natype.py new file mode 100644 index 000000000..1c2175b35 --- /dev/null +++ b/python/numpy/_core/tests/_natype.py @@ -0,0 +1,205 @@ +# Vendored implementation of pandas.NA, adapted from pandas/_libs/missing.pyx +# +# This is vendored to avoid adding pandas as a test dependency. + +__all__ = ["pd_NA"] + +import numbers + +import numpy as np + + +def _create_binary_propagating_op(name, is_divmod=False): + is_cmp = name.strip("_") in ["eq", "ne", "le", "lt", "ge", "gt"] + + def method(self, other): + if ( + other is pd_NA + or isinstance(other, (str, bytes, numbers.Number, np.bool)) + or (isinstance(other, np.ndarray) and not other.shape) + ): + # Need the other.shape clause to handle NumPy scalars, + # since we do a setitem on `out` below, which + # won't work for NumPy scalars. + if is_divmod: + return pd_NA, pd_NA + else: + return pd_NA + + elif isinstance(other, np.ndarray): + out = np.empty(other.shape, dtype=object) + out[:] = pd_NA + + if is_divmod: + return out, out.copy() + else: + return out + + elif is_cmp and isinstance(other, (np.datetime64, np.timedelta64)): + return pd_NA + + elif isinstance(other, np.datetime64): + if name in ["__sub__", "__rsub__"]: + return pd_NA + + elif isinstance(other, np.timedelta64): + if name in ["__sub__", "__rsub__", "__add__", "__radd__"]: + return pd_NA + + return NotImplemented + + method.__name__ = name + return method + + +def _create_unary_propagating_op(name: str): + def method(self): + return pd_NA + + method.__name__ = name + return method + + +class NAType: + def __repr__(self) -> str: + return "" + + def __format__(self, format_spec) -> str: + try: + return self.__repr__().__format__(format_spec) + except ValueError: + return self.__repr__() + + def __bool__(self): + raise TypeError("boolean value of NA is ambiguous") + + def __hash__(self): + exponent = 31 if is_32bit else 61 + return 2**exponent - 1 + + def __reduce__(self): + return "pd_NA" + + # Binary arithmetic and comparison ops -> propagate + + __add__ = _create_binary_propagating_op("__add__") + __radd__ = _create_binary_propagating_op("__radd__") + __sub__ = _create_binary_propagating_op("__sub__") + __rsub__ = _create_binary_propagating_op("__rsub__") + __mul__ = _create_binary_propagating_op("__mul__") + __rmul__ = _create_binary_propagating_op("__rmul__") + __matmul__ = _create_binary_propagating_op("__matmul__") + __rmatmul__ = _create_binary_propagating_op("__rmatmul__") + __truediv__ = _create_binary_propagating_op("__truediv__") + __rtruediv__ = _create_binary_propagating_op("__rtruediv__") + __floordiv__ = _create_binary_propagating_op("__floordiv__") + __rfloordiv__ = _create_binary_propagating_op("__rfloordiv__") + __mod__ = _create_binary_propagating_op("__mod__") + __rmod__ = _create_binary_propagating_op("__rmod__") + __divmod__ = _create_binary_propagating_op("__divmod__", is_divmod=True) + __rdivmod__ = _create_binary_propagating_op("__rdivmod__", is_divmod=True) + # __lshift__ and __rshift__ are not implemented + + __eq__ = _create_binary_propagating_op("__eq__") + __ne__ = _create_binary_propagating_op("__ne__") + __le__ = _create_binary_propagating_op("__le__") + __lt__ = _create_binary_propagating_op("__lt__") + __gt__ = _create_binary_propagating_op("__gt__") + __ge__ = _create_binary_propagating_op("__ge__") + + # Unary ops + + __neg__ = _create_unary_propagating_op("__neg__") + __pos__ = _create_unary_propagating_op("__pos__") + __abs__ = _create_unary_propagating_op("__abs__") + __invert__ = _create_unary_propagating_op("__invert__") + + # pow has special + def __pow__(self, other): + if other is pd_NA: + return pd_NA + elif isinstance(other, (numbers.Number, np.bool)): + if other == 0: + # returning positive is correct for +/- 0. + return type(other)(1) + else: + return pd_NA + elif util.is_array(other): + return np.where(other == 0, other.dtype.type(1), pd_NA) + + return NotImplemented + + def __rpow__(self, other): + if other is pd_NA: + return pd_NA + elif isinstance(other, (numbers.Number, np.bool)): + if other == 1: + return other + else: + return pd_NA + elif util.is_array(other): + return np.where(other == 1, other, pd_NA) + return NotImplemented + + # Logical ops using Kleene logic + + def __and__(self, other): + if other is False: + return False + elif other is True or other is pd_NA: + return pd_NA + return NotImplemented + + __rand__ = __and__ + + def __or__(self, other): + if other is True: + return True + elif other is False or other is pd_NA: + return pd_NA + return NotImplemented + + __ror__ = __or__ + + def __xor__(self, other): + if other is False or other is True or other is pd_NA: + return pd_NA + return NotImplemented + + __rxor__ = __xor__ + + __array_priority__ = 1000 + _HANDLED_TYPES = (np.ndarray, numbers.Number, str, np.bool) + + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + types = self._HANDLED_TYPES + (NAType,) + for x in inputs: + if not isinstance(x, types): + return NotImplemented + + if method != "__call__": + raise ValueError(f"ufunc method '{method}' not supported for NA") + result = maybe_dispatch_ufunc_to_dunder_op( + self, ufunc, method, *inputs, **kwargs + ) + if result is NotImplemented: + # For a NumPy ufunc that's not a binop, like np.logaddexp + index = next(i for i, x in enumerate(inputs) if x is pd_NA) + result = np.broadcast_arrays(*inputs)[index] + if result.ndim == 0: + result = result.item() + if ufunc.nout > 1: + result = (pd_NA,) * ufunc.nout + + return result + + +pd_NA = NAType() + + +def get_stringdtype_dtype(na_object, coerce=True): + # explicit is check for pd_NA because != with pd_NA returns pd_NA + if na_object is pd_NA or na_object != "unset": + return np.dtypes.StringDType(na_object=na_object, coerce=coerce) + else: + return np.dtypes.StringDType(coerce=coerce) diff --git a/python/numpy/_core/tests/data/astype_copy.pkl b/python/numpy/_core/tests/data/astype_copy.pkl new file mode 100644 index 000000000..7397c9782 Binary files /dev/null and b/python/numpy/_core/tests/data/astype_copy.pkl differ diff --git a/python/numpy/_core/tests/data/generate_umath_validation_data.cpp b/python/numpy/_core/tests/data/generate_umath_validation_data.cpp new file mode 100644 index 000000000..88ff45e16 --- /dev/null +++ b/python/numpy/_core/tests/data/generate_umath_validation_data.cpp @@ -0,0 +1,170 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +struct ufunc { + std::string name; + double (*f32func)(double); + long double (*f64func)(long double); + float f32ulp; + float f64ulp; +}; + +template +T +RandomFloat(T a, T b) +{ + T random = ((T)rand()) / (T)RAND_MAX; + T diff = b - a; + T r = random * diff; + return a + r; +} + +template +void +append_random_array(std::vector &arr, T min, T max, size_t N) +{ + for (size_t ii = 0; ii < N; ++ii) + arr.emplace_back(RandomFloat(min, max)); +} + +template +std::vector +computeTrueVal(const std::vector &in, T2 (*mathfunc)(T2)) +{ + std::vector out; + for (T1 elem : in) { + T2 elem_d = (T2)elem; + T1 out_elem = (T1)mathfunc(elem_d); + out.emplace_back(out_elem); + } + return out; +} + +/* + * FP range: + * [-inf, -maxflt, -1., -minflt, -minden, 0., minden, minflt, 1., maxflt, inf] + */ + +#define MINDEN std::numeric_limits::denorm_min() +#define MINFLT std::numeric_limits::min() +#define MAXFLT std::numeric_limits::max() +#define INF std::numeric_limits::infinity() +#define qNAN std::numeric_limits::quiet_NaN() +#define sNAN std::numeric_limits::signaling_NaN() + +template +std::vector +generate_input_vector(std::string func) +{ + std::vector input = {MINDEN, -MINDEN, MINFLT, -MINFLT, MAXFLT, + -MAXFLT, INF, -INF, qNAN, sNAN, + -1.0, 1.0, 0.0, -0.0}; + + // [-1.0, 1.0] + if ((func == "arcsin") || (func == "arccos") || (func == "arctanh")) { + append_random_array(input, -1.0, 1.0, 700); + } + // (0.0, INF] + else if ((func == "log2") || (func == "log10")) { + append_random_array(input, 0.0, 1.0, 200); + append_random_array(input, MINDEN, MINFLT, 200); + append_random_array(input, MINFLT, 1.0, 200); + append_random_array(input, 1.0, MAXFLT, 200); + } + // (-1.0, INF] + else if (func == "log1p") { + append_random_array(input, -1.0, 1.0, 200); + append_random_array(input, -MINFLT, -MINDEN, 100); + append_random_array(input, -1.0, -MINFLT, 100); + append_random_array(input, MINDEN, MINFLT, 100); + append_random_array(input, MINFLT, 1.0, 100); + append_random_array(input, 1.0, MAXFLT, 100); + } + // [1.0, INF] + else if (func == "arccosh") { + append_random_array(input, 1.0, 2.0, 400); + append_random_array(input, 2.0, MAXFLT, 300); + } + // [-INF, INF] + else { + append_random_array(input, -1.0, 1.0, 100); + append_random_array(input, MINDEN, MINFLT, 100); + append_random_array(input, -MINFLT, -MINDEN, 100); + append_random_array(input, MINFLT, 1.0, 100); + append_random_array(input, -1.0, -MINFLT, 100); + append_random_array(input, 1.0, MAXFLT, 100); + append_random_array(input, -MAXFLT, -100.0, 100); + } + + std::random_shuffle(input.begin(), input.end()); + return input; +} + +int +main() +{ + srand(42); + std::vector umathfunc = { + {"sin", sin, sin, 1.49, 1.00}, + {"cos", cos, cos, 1.49, 1.00}, + {"tan", tan, tan, 3.91, 1.00}, + {"arcsin", asin, asin, 3.12, 1.00}, + {"arccos", acos, acos, 2.1, 1.00}, + {"arctan", atan, atan, 2.3, 1.00}, + {"sinh", sinh, sinh, 1.55, 1.00}, + {"cosh", cosh, cosh, 2.48, 1.00}, + {"tanh", tanh, tanh, 1.38, 2.00}, + {"arcsinh", asinh, asinh, 1.01, 1.00}, + {"arccosh", acosh, acosh, 1.16, 1.00}, + {"arctanh", atanh, atanh, 1.45, 1.00}, + {"cbrt", cbrt, cbrt, 1.94, 2.00}, + //{"exp",exp,exp,3.76,1.00}, + {"exp2", exp2, exp2, 1.01, 1.00}, + {"expm1", expm1, expm1, 2.62, 1.00}, + //{"log",log,log,1.84,1.00}, + {"log10", log10, log10, 3.5, 1.00}, + {"log1p", log1p, log1p, 1.96, 1.0}, + {"log2", log2, log2, 2.12, 1.00}, + }; + + for (int ii = 0; ii < umathfunc.size(); ++ii) { + // ignore sin/cos + if ((umathfunc[ii].name != "sin") && (umathfunc[ii].name != "cos")) { + std::string fileName = + "umath-validation-set-" + umathfunc[ii].name + ".csv"; + std::ofstream txtOut; + txtOut.open(fileName, std::ofstream::trunc); + txtOut << "dtype,input,output,ulperrortol" << std::endl; + + // Single Precision + auto f32in = generate_input_vector(umathfunc[ii].name); + auto f32out = computeTrueVal(f32in, + umathfunc[ii].f32func); + for (int jj = 0; jj < f32in.size(); ++jj) { + txtOut << "np.float32" << std::hex << ",0x" + << *reinterpret_cast(&f32in[jj]) << ",0x" + << *reinterpret_cast(&f32out[jj]) << "," + << ceil(umathfunc[ii].f32ulp) << std::endl; + } + + // Double Precision + auto f64in = generate_input_vector(umathfunc[ii].name); + auto f64out = computeTrueVal( + f64in, umathfunc[ii].f64func); + for (int jj = 0; jj < f64in.size(); ++jj) { + txtOut << "np.float64" << std::hex << ",0x" + << *reinterpret_cast(&f64in[jj]) << ",0x" + << *reinterpret_cast(&f64out[jj]) << "," + << ceil(umathfunc[ii].f64ulp) << std::endl; + } + txtOut.close(); + } + } + return 0; +} diff --git a/python/numpy/_core/tests/data/recarray_from_file.fits b/python/numpy/_core/tests/data/recarray_from_file.fits new file mode 100644 index 000000000..ca48ee851 Binary files /dev/null and b/python/numpy/_core/tests/data/recarray_from_file.fits differ diff --git a/python/numpy/_core/tests/data/umath-validation-set-README.txt b/python/numpy/_core/tests/data/umath-validation-set-README.txt new file mode 100644 index 000000000..cfc9e4145 --- /dev/null +++ b/python/numpy/_core/tests/data/umath-validation-set-README.txt @@ -0,0 +1,15 @@ +Steps to validate transcendental functions: +1) Add a file 'umath-validation-set-.txt', where ufuncname is name of + the function in NumPy you want to validate +2) The file should contain 4 columns: dtype,input,expected output,ulperror + a. dtype: one of np.float16, np.float32, np.float64 + b. input: floating point input to ufunc in hex. Example: 0x414570a4 + represents 12.340000152587890625 + c. expected output: floating point output for the corresponding input in hex. + This should be computed using a high(er) precision library and then rounded to + same format as the input. + d. ulperror: expected maximum ulp error of the function. This + should be same across all rows of the same dtype. Otherwise, the function is + tested for the maximum ulp error among all entries of that dtype. +3) Add file umath-validation-set-.txt to the test file test_umath_accuracy.py + which will then validate your ufunc. diff --git a/python/numpy/_core/tests/data/umath-validation-set-arccos.csv b/python/numpy/_core/tests/data/umath-validation-set-arccos.csv new file mode 100644 index 000000000..82c8595cb --- /dev/null +++ b/python/numpy/_core/tests/data/umath-validation-set-arccos.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xbddd7f50,0x3fd6eec2,3 +np.float32,0xbe32a20c,0x3fdf8182,3 +np.float32,0xbf607c09,0x4028f84f,3 +np.float32,0x3f25d906,0x3f5db544,3 +np.float32,0x3f01cec8,0x3f84febf,3 +np.float32,0x3f1d5c6e,0x3f68a735,3 +np.float32,0xbf0cab89,0x4009c36d,3 +np.float32,0xbf176b40,0x400d0941,3 +np.float32,0x3f3248b2,0x3f4ce6d4,3 +np.float32,0x3f390b48,0x3f434e0d,3 +np.float32,0xbe261698,0x3fddea43,3 +np.float32,0x3f0e1154,0x3f7b848b,3 +np.float32,0xbf379a3c,0x4017b764,3 +np.float32,0xbeda6f2c,0x4000bd62,3 +np.float32,0xbf6a0c3f,0x402e5d5a,3 +np.float32,0x3ef1d700,0x3f8a17b7,3 +np.float32,0xbf6f4f65,0x4031d30d,3 +np.float32,0x3f2c9eee,0x3f54adfd,3 +np.float32,0x3f3cfb18,0x3f3d8a1e,3 +np.float32,0x3ba80800,0x3fc867d2,3 +np.float32,0x3e723b08,0x3faa7e4d,3 +np.float32,0xbf65820f,0x402bb054,3 +np.float32,0xbee64e7a,0x40026410,3 +np.float32,0x3cb15140,0x3fc64a87,3 +np.float32,0x3f193660,0x3f6ddf2a,3 +np.float32,0xbf0e5b52,0x400a44f7,3 +np.float32,0x3ed55f14,0x3f920a4b,3 +np.float32,0x3dd11a80,0x3fbbf85c,3 +np.float32,0xbf4f5c4b,0x4020f4f9,3 +np.float32,0x3f787532,0x3e792e87,3 +np.float32,0x3f40e6ac,0x3f37a74f,3 +np.float32,0x3f1c1318,0x3f6a47b6,3 +np.float32,0xbe3c48d8,0x3fe0bb70,3 +np.float32,0xbe94d4bc,0x3feed08e,3 +np.float32,0xbe5c3688,0x3fe4ce26,3 +np.float32,0xbf6fe026,0x403239cb,3 +np.float32,0x3ea5983c,0x3f9ee7bf,3 +np.float32,0x3f1471e6,0x3f73c5bb,3 +np.float32,0x3f0e2622,0x3f7b6b87,3 +np.float32,0xbf597180,0x40257ad1,3 +np.float32,0xbeb5321c,0x3ff75d34,3 +np.float32,0x3f5afcd2,0x3f0b6012,3 +np.float32,0xbef2ff88,0x40042e14,3 +np.float32,0xbedc747e,0x400104f5,3 +np.float32,0xbee0c2f4,0x40019dfc,3 +np.float32,0xbf152cd8,0x400c57dc,3 +np.float32,0xbf6cf9e2,0x40303bbe,3 +np.float32,0x3ed9cd74,0x3f90d1a1,3 +np.float32,0xbf754406,0x4036767f,3 +np.float32,0x3f59c5c2,0x3f0db42f,3 +np.float32,0x3f2eefd8,0x3f518684,3 +np.float32,0xbf156bf9,0x400c6b49,3 +np.float32,0xbd550790,0x3fcfb8dc,3 +np.float32,0x3ede58fc,0x3f8f8f77,3 +np.float32,0xbf00ac19,0x40063c4b,3 +np.float32,0x3f4d25ba,0x3f24280e,3 +np.float32,0xbe9568be,0x3feef73c,3 +np.float32,0x3f67d154,0x3ee05547,3 +np.float32,0x3f617226,0x3efcb4f4,3 +np.float32,0xbf3ab41a,0x4018d6cc,3 +np.float32,0xbf3186fe,0x401592cd,3 +np.float32,0x3de3ba50,0x3fbacca9,3 +np.float32,0x3e789f98,0x3fa9ab97,3 +np.float32,0x3f016e08,0x3f8536d8,3 +np.float32,0x3e8b618c,0x3fa5c571,3 +np.float32,0x3eff97bc,0x3f8628a9,3 +np.float32,0xbf6729f0,0x402ca32f,3 +np.float32,0xbebec146,0x3ff9eddc,3 +np.float32,0x3ddb2e60,0x3fbb563a,3 +np.float32,0x3caa8e40,0x3fc66595,3 +np.float32,0xbf5973f2,0x40257bfa,3 +np.float32,0xbdd82c70,0x3fd69916,3 +np.float32,0xbedf4c82,0x400169ef,3 +np.float32,0x3ef8f22c,0x3f881184,3 +np.float32,0xbf1d74d4,0x400eedc9,3 +np.float32,0x3f2e10a6,0x3f52b790,3 +np.float32,0xbf08ecc0,0x4008a628,3 +np.float32,0x3ecb7db4,0x3f94be9f,3 +np.float32,0xbf052ded,0x40078bfc,3 +np.float32,0x3f2ee78a,0x3f5191e4,3 +np.float32,0xbf56f4e1,0x40245194,3 +np.float32,0x3f600a3e,0x3f014a25,3 +np.float32,0x3f3836f8,0x3f44808b,3 +np.float32,0x3ecabfbc,0x3f94f25c,3 +np.float32,0x3c70f500,0x3fc72dec,3 +np.float32,0x3f17c444,0x3f6fabf0,3 +np.float32,0xbf4c22a5,0x401f9a09,3 +np.float32,0xbe4205dc,0x3fe1765a,3 +np.float32,0x3ea49138,0x3f9f2d36,3 +np.float32,0xbece0082,0x3ffe106b,3 +np.float32,0xbe387578,0x3fe03eef,3 +np.float32,0xbf2b6466,0x40137a30,3 +np.float32,0xbe9dadb2,0x3ff12204,3 +np.float32,0xbf56b3f2,0x402433bb,3 +np.float32,0xbdf9b4d8,0x3fd8b51f,3 +np.float32,0x3f58a596,0x3f0fd4b4,3 +np.float32,0xbedf5748,0x40016b6e,3 +np.float32,0x3f446442,0x3f32476f,3 +np.float32,0x3f5be886,0x3f099658,3 +np.float32,0x3ea1e44c,0x3f9fe1de,3 +np.float32,0xbf11e9b8,0x400b585f,3 +np.float32,0xbf231f8f,0x4010befb,3 +np.float32,0xbf4395ea,0x401c2dd0,3 +np.float32,0x3e9e7784,0x3fa0c8a6,3 +np.float32,0xbe255184,0x3fddd14c,3 +np.float32,0x3f70d25e,0x3eb13148,3 +np.float32,0x3f220cdc,0x3f62a722,3 +np.float32,0xbd027bf0,0x3fcd23e7,3 +np.float32,0x3e4ef8b8,0x3faf02d2,3 +np.float32,0xbf76fc6b,0x40380728,3 +np.float32,0xbf57e761,0x4024c1cd,3 +np.float32,0x3ed4fc20,0x3f922580,3 +np.float32,0xbf09b64a,0x4008e1db,3 +np.float32,0x3f21ca62,0x3f62fcf5,3 +np.float32,0xbe55f610,0x3fe40170,3 +np.float32,0xbc0def80,0x3fca2bbb,3 +np.float32,0xbebc8764,0x3ff9547b,3 +np.float32,0x3ec1b200,0x3f9766d1,3 +np.float32,0xbf4ee44e,0x4020c1ee,3 +np.float32,0xbea85852,0x3ff3f22a,3 +np.float32,0xbf195c0c,0x400da3d3,3 +np.float32,0xbf754b5d,0x40367ce8,3 +np.float32,0xbdcbfe50,0x3fd5d52b,3 +np.float32,0xbf1adb87,0x400e1be3,3 +np.float32,0xbf6f8491,0x4031f898,3 +np.float32,0xbf6f9ae7,0x4032086e,3 +np.float32,0xbf52b3f0,0x40226790,3 +np.float32,0xbf698452,0x402e09f4,3 +np.float32,0xbf43dc9a,0x401c493a,3 +np.float32,0xbf165f7f,0x400cb664,3 +np.float32,0x3e635468,0x3fac682f,3 +np.float32,0xbe8cf2b6,0x3fecc28a,3 +np.float32,0x7f7fffff,0x7fc00000,3 +np.float32,0xbf4c6513,0x401fb597,3 +np.float32,0xbf02b8f8,0x4006d47e,3 +np.float32,0x3ed3759c,0x3f9290c8,3 +np.float32,0xbf2a7a5f,0x40132b98,3 +np.float32,0xbae65000,0x3fc9496f,3 +np.float32,0x3f65f5ea,0x3ee8ef07,3 +np.float32,0xbe7712fc,0x3fe84106,3 +np.float32,0xbb9ff700,0x3fc9afd2,3 +np.float32,0x3d8d87a0,0x3fc03592,3 +np.float32,0xbefc921c,0x40058c23,3 +np.float32,0xbf286566,0x401279d8,3 +np.float32,0x3f53857e,0x3f192eaf,3 +np.float32,0xbee9b0f4,0x4002dd90,3 +np.float32,0x3f4041f8,0x3f38a14a,3 +np.float32,0x3f54ea96,0x3f16b02d,3 +np.float32,0x3ea50ef8,0x3f9f0c01,3 +np.float32,0xbeaad2dc,0x3ff49a4a,3 +np.float32,0xbec428c8,0x3ffb636f,3 +np.float32,0xbda46178,0x3fd358c7,3 +np.float32,0xbefacfc4,0x40054b7f,3 +np.float32,0xbf7068f9,0x40329c85,3 +np.float32,0x3f70b850,0x3eb1caa7,3 +np.float32,0x7fa00000,0x7fe00000,3 +np.float32,0x80000000,0x3fc90fdb,3 +np.float32,0x3f68d5c8,0x3edb7cf3,3 +np.float32,0x3d9443d0,0x3fbfc98a,3 +np.float32,0xff7fffff,0x7fc00000,3 +np.float32,0xbeee7ba8,0x40038a5e,3 +np.float32,0xbf0aaaba,0x40092a73,3 +np.float32,0x3f36a4e8,0x3f46c0ee,3 +np.float32,0x3ed268e4,0x3f92da82,3 +np.float32,0xbee6002c,0x4002591b,3 +np.float32,0xbe8f2752,0x3fed5576,3 +np.float32,0x3f525912,0x3f1b40e0,3 +np.float32,0xbe8e151e,0x3fed0e16,3 +np.float32,0x1,0x3fc90fdb,3 +np.float32,0x3ee23b84,0x3f8e7ae1,3 +np.float32,0xbf5961ca,0x40257361,3 +np.float32,0x3f6bbca0,0x3ecd14cd,3 +np.float32,0x3e27b230,0x3fb4014d,3 +np.float32,0xbf183bb8,0x400d49fc,3 +np.float32,0x3f57759c,0x3f120b68,3 +np.float32,0xbd6994c0,0x3fd05d84,3 +np.float32,0xbf1dd684,0x400f0cc8,3 +np.float32,0xbececc1c,0x3ffe480a,3 +np.float32,0xbf48855f,0x401e206d,3 +np.float32,0x3f28c922,0x3f59d382,3 +np.float32,0xbf65c094,0x402bd3b0,3 +np.float32,0x3f657d42,0x3eeb11dd,3 +np.float32,0xbed32d4e,0x3fff7b15,3 +np.float32,0xbf31af02,0x4015a0b1,3 +np.float32,0x3d89eb00,0x3fc06f7f,3 +np.float32,0x3dac2830,0x3fbe4a17,3 +np.float32,0x3f7f7cb6,0x3d81a7df,3 +np.float32,0xbedbb570,0x4000ea82,3 +np.float32,0x3db37830,0x3fbdd4a8,3 +np.float32,0xbf376f48,0x4017a7fd,3 +np.float32,0x3f319f12,0x3f4dd2c9,3 +np.float32,0x7fc00000,0x7fc00000,3 +np.float32,0x3f1b4f70,0x3f6b3e31,3 +np.float32,0x3e33c880,0x3fb278d1,3 +np.float32,0x3f2796e0,0x3f5b69bd,3 +np.float32,0x3f4915d6,0x3f2ad4d0,3 +np.float32,0x3e4db120,0x3faf2ca0,3 +np.float32,0x3ef03dd4,0x3f8a8ba9,3 +np.float32,0x3e96ca88,0x3fa2cbf7,3 +np.float32,0xbeb136ce,0x3ff64d2b,3 +np.float32,0xbf2f3938,0x4014c75e,3 +np.float32,0x3f769dde,0x3e8b0d76,3 +np.float32,0x3f67cec8,0x3ee06148,3 +np.float32,0x3f0a1ade,0x3f80204e,3 +np.float32,0x3e4b9718,0x3faf7144,3 +np.float32,0x3cccb480,0x3fc5dcf3,3 +np.float32,0x3caeb740,0x3fc654f0,3 +np.float32,0x3f684e0e,0x3ede0678,3 +np.float32,0x3f0ba93c,0x3f7e6663,3 +np.float32,0xbf12bbc4,0x400b985e,3 +np.float32,0xbf2a8e1a,0x40133235,3 +np.float32,0x3f42029c,0x3f35f5c5,3 +np.float32,0x3eed1728,0x3f8b6f9c,3 +np.float32,0xbe5779ac,0x3fe432fd,3 +np.float32,0x3f6ed8b8,0x3ebc7e4b,3 +np.float32,0x3eea25b0,0x3f8c43c7,3 +np.float32,0x3f1988a4,0x3f6d786b,3 +np.float32,0xbe751674,0x3fe7ff8a,3 +np.float32,0xbe9f7418,0x3ff1997d,3 +np.float32,0x3dca11d0,0x3fbc6979,3 +np.float32,0x3f795226,0x3e6a6cab,3 +np.float32,0xbea780e0,0x3ff3b926,3 +np.float32,0xbed92770,0x4000901e,3 +np.float32,0xbf3e9f8c,0x401a49f8,3 +np.float32,0x3f0f7054,0x3f79ddb2,3 +np.float32,0x3a99d400,0x3fc8e966,3 +np.float32,0xbef082b0,0x4003d3c6,3 +np.float32,0xbf0d0790,0x4009defb,3 +np.float32,0xbf1649da,0x400cafb4,3 +np.float32,0xbea5aca8,0x3ff33d5c,3 +np.float32,0xbf4e1843,0x40206ba1,3 +np.float32,0xbe3d7d5c,0x3fe0e2ad,3 +np.float32,0xbf0e802d,0x400a500e,3 +np.float32,0xbf0de8f0,0x400a2295,3 +np.float32,0xbf3016ba,0x4015137e,3 +np.float32,0x3f36b1ea,0x3f46ae5d,3 +np.float32,0xbd27f170,0x3fce4fc7,3 +np.float32,0x3e96ec54,0x3fa2c31f,3 +np.float32,0x3eb4dfdc,0x3f9ad87d,3 +np.float32,0x3f5cac6c,0x3f0815cc,3 +np.float32,0xbf0489aa,0x40075bf1,3 +np.float32,0x3df010c0,0x3fba05f5,3 +np.float32,0xbf229f4a,0x4010956a,3 +np.float32,0x3f75e474,0x3e905a99,3 +np.float32,0xbcece6a0,0x3fccc397,3 +np.float32,0xbdb41528,0x3fd454e7,3 +np.float32,0x3ec8b2f8,0x3f958118,3 +np.float32,0x3f5eaa70,0x3f041a1d,3 +np.float32,0xbf32e1cc,0x40160b91,3 +np.float32,0xbe8e6026,0x3fed219c,3 +np.float32,0x3e6b3160,0x3fab65e3,3 +np.float32,0x3e6d7460,0x3fab1b81,3 +np.float32,0xbf13fbde,0x400bfa3b,3 +np.float32,0xbe8235ec,0x3fe9f9e3,3 +np.float32,0x3d71c4a0,0x3fc18096,3 +np.float32,0x3eb769d0,0x3f9a2aa0,3 +np.float32,0xbf68cb3b,0x402d99e4,3 +np.float32,0xbd917610,0x3fd22932,3 +np.float32,0x3d3cba60,0x3fc3297f,3 +np.float32,0xbf383cbe,0x4017f1cc,3 +np.float32,0xbeee96d0,0x40038e34,3 +np.float32,0x3ec89cb4,0x3f958725,3 +np.float32,0x3ebf92d8,0x3f97f95f,3 +np.float32,0x3f30f3da,0x3f4ec021,3 +np.float32,0xbd26b560,0x3fce45e4,3 +np.float32,0xbec0eb12,0x3ffa8330,3 +np.float32,0x3f6d592a,0x3ec4a6c1,3 +np.float32,0x3ea6d39c,0x3f9e9463,3 +np.float32,0x3e884184,0x3fa6951e,3 +np.float32,0x3ea566c4,0x3f9ef4d1,3 +np.float32,0x3f0c8f4c,0x3f7d5380,3 +np.float32,0x3f28e1ba,0x3f59b2cb,3 +np.float32,0x3f798538,0x3e66e1c3,3 +np.float32,0xbe2889b8,0x3fde39b8,3 +np.float32,0x3f3da05e,0x3f3c949c,3 +np.float32,0x3f24d700,0x3f5f073e,3 +np.float32,0xbe5b5768,0x3fe4b198,3 +np.float32,0xbed3b03a,0x3fff9f05,3 +np.float32,0x3e8a1c4c,0x3fa619eb,3 +np.float32,0xbf075d24,0x40083030,3 +np.float32,0x3f765648,0x3e8d1f52,3 +np.float32,0xbf70fc5e,0x403308bb,3 +np.float32,0x3f557ae8,0x3f15ab76,3 +np.float32,0x3f02f7ea,0x3f84521c,3 +np.float32,0x3f7ebbde,0x3dcbc5c5,3 +np.float32,0xbefbdfc6,0x40057285,3 +np.float32,0x3ec687ac,0x3f9617d9,3 +np.float32,0x3e4831c8,0x3fafe01b,3 +np.float32,0x3e25cde0,0x3fb43ea8,3 +np.float32,0x3e4f2ab8,0x3faefc70,3 +np.float32,0x3ea60ae4,0x3f9ec973,3 +np.float32,0xbf1ed55f,0x400f5dde,3 +np.float32,0xbf5ad4aa,0x40262479,3 +np.float32,0x3e8b3594,0x3fa5d0de,3 +np.float32,0x3f3a77aa,0x3f413c80,3 +np.float32,0xbf07512b,0x40082ca9,3 +np.float32,0x3f33d990,0x3f4ab5e5,3 +np.float32,0x3f521556,0x3f1bb78f,3 +np.float32,0xbecf6036,0x3ffe7086,3 +np.float32,0x3db91bd0,0x3fbd7a11,3 +np.float32,0x3ef63a74,0x3f88d839,3 +np.float32,0xbf2f1116,0x4014b99c,3 +np.float32,0xbf17fdc0,0x400d36b9,3 +np.float32,0xbe87df2c,0x3feb7117,3 +np.float32,0x80800000,0x3fc90fdb,3 +np.float32,0x3ee24c1c,0x3f8e7641,3 +np.float32,0x3f688dce,0x3edcd644,3 +np.float32,0xbf0f4e1c,0x400a8e1b,3 +np.float32,0x0,0x3fc90fdb,3 +np.float32,0x3f786eba,0x3e7999d4,3 +np.float32,0xbf404f80,0x401aeca8,3 +np.float32,0xbe9ffb6a,0x3ff1bd18,3 +np.float32,0x3f146bfc,0x3f73ccfd,3 +np.float32,0xbe47d630,0x3fe233ee,3 +np.float32,0xbe95847c,0x3feefe7c,3 +np.float32,0xbf135df0,0x400bc9e5,3 +np.float32,0x3ea19f3c,0x3f9ff411,3 +np.float32,0x3f235e20,0x3f60f247,3 +np.float32,0xbec789ec,0x3ffc4def,3 +np.float32,0x3f04b656,0x3f834db6,3 +np.float32,0x3dfaf440,0x3fb95679,3 +np.float32,0xbe4a7f28,0x3fe28abe,3 +np.float32,0x3ed4850c,0x3f92463b,3 +np.float32,0x3ec4ba5c,0x3f9694dd,3 +np.float32,0xbce24ca0,0x3fcc992b,3 +np.float32,0xbf5b7c6e,0x402675a0,3 +np.float32,0xbea3ce2a,0x3ff2bf04,3 +np.float32,0x3db02c60,0x3fbe0998,3 +np.float32,0x3c47b780,0x3fc78069,3 +np.float32,0x3ed33b20,0x3f92a0d5,3 +np.float32,0xbf4556d7,0x401cdcde,3 +np.float32,0xbe1b6e28,0x3fdc90ec,3 +np.float32,0xbf3289b7,0x4015ecd0,3 +np.float32,0x3df3f240,0x3fb9c76d,3 +np.float32,0x3eefa7d0,0x3f8ab61d,3 +np.float32,0xbe945838,0x3feeb006,3 +np.float32,0xbf0b1386,0x400949a3,3 +np.float32,0x3f77e546,0x3e812cc1,3 +np.float32,0x3e804ba0,0x3fa8a480,3 +np.float32,0x3f43dcea,0x3f331a06,3 +np.float32,0x3eb87450,0x3f99e33c,3 +np.float32,0x3e5f4898,0x3facecea,3 +np.float32,0x3f646640,0x3eeff10e,3 +np.float32,0x3f1aa832,0x3f6c1051,3 +np.float32,0xbebf6bfa,0x3ffa1bdc,3 +np.float32,0xbb77f300,0x3fc98bd4,3 +np.float32,0x3f3587fe,0x3f485645,3 +np.float32,0x3ef85f34,0x3f883b8c,3 +np.float32,0x3f50e584,0x3f1dc82c,3 +np.float32,0x3f1d30a8,0x3f68deb0,3 +np.float32,0x3ee75a78,0x3f8d0c86,3 +np.float32,0x3f2c023a,0x3f5581e1,3 +np.float32,0xbf074e34,0x40082bca,3 +np.float32,0xbead71f0,0x3ff54c6d,3 +np.float32,0xbf39ed88,0x40188e69,3 +np.float32,0x3f5d2fe6,0x3f07118b,3 +np.float32,0xbf1f79f8,0x400f9267,3 +np.float32,0x3e900c58,0x3fa48e99,3 +np.float32,0xbf759cb2,0x4036c47b,3 +np.float32,0x3f63329c,0x3ef5359c,3 +np.float32,0xbf5d6755,0x40276709,3 +np.float32,0x3f2ce31c,0x3f54519a,3 +np.float32,0x7f800000,0x7fc00000,3 +np.float32,0x3f1bf50e,0x3f6a6d9a,3 +np.float32,0x3f258334,0x3f5e25d8,3 +np.float32,0xbf661a3f,0x402c06ac,3 +np.float32,0x3d1654c0,0x3fc45cef,3 +np.float32,0xbef14a36,0x4003f009,3 +np.float32,0xbf356051,0x4016ec3a,3 +np.float32,0x3f6ccc42,0x3ec79193,3 +np.float32,0xbf2fe3d6,0x401501f9,3 +np.float32,0x3deedc80,0x3fba195b,3 +np.float32,0x3f2e5a28,0x3f52533e,3 +np.float32,0x3e6b68b8,0x3fab5ec8,3 +np.float32,0x3e458240,0x3fb037b7,3 +np.float32,0xbf24bab0,0x401144cb,3 +np.float32,0x3f600f4c,0x3f013fb2,3 +np.float32,0x3f021a04,0x3f84d316,3 +np.float32,0x3f741732,0x3e9cc948,3 +np.float32,0x3f0788aa,0x3f81a5b0,3 +np.float32,0x3f28802c,0x3f5a347c,3 +np.float32,0x3c9eb400,0x3fc69500,3 +np.float32,0x3e5d11e8,0x3fad357a,3 +np.float32,0x3d921250,0x3fbfecb9,3 +np.float32,0x3f354866,0x3f48b066,3 +np.float32,0xbf72cf43,0x40346d84,3 +np.float32,0x3eecdbb8,0x3f8b805f,3 +np.float32,0xbee585d0,0x400247fd,3 +np.float32,0x3e3607a8,0x3fb22fc6,3 +np.float32,0xbf0cb7d6,0x4009c71c,3 +np.float32,0xbf56b230,0x402432ec,3 +np.float32,0xbf4ced02,0x401fee29,3 +np.float32,0xbf3a325c,0x4018a776,3 +np.float32,0x3ecae8bc,0x3f94e732,3 +np.float32,0xbe48c7e8,0x3fe252bd,3 +np.float32,0xbe175d7c,0x3fdc0d5b,3 +np.float32,0x3ea78dac,0x3f9e632d,3 +np.float32,0xbe7434a8,0x3fe7e279,3 +np.float32,0x3f1f9e02,0x3f65c7b9,3 +np.float32,0xbe150f2c,0x3fdbc2c2,3 +np.float32,0x3ee13480,0x3f8ec423,3 +np.float32,0x3ecb7d54,0x3f94beb9,3 +np.float32,0x3f1cef42,0x3f693181,3 +np.float32,0xbf1ec06a,0x400f5730,3 +np.float32,0xbe112acc,0x3fdb44e8,3 +np.float32,0xbe77b024,0x3fe85545,3 +np.float32,0x3ec86fe0,0x3f959353,3 +np.float32,0x3f36b326,0x3f46ac9a,3 +np.float32,0x3e581a70,0x3fadd829,3 +np.float32,0xbf032c0c,0x4006f5f9,3 +np.float32,0xbf43b1fd,0x401c38b1,3 +np.float32,0x3f3701b4,0x3f463c5c,3 +np.float32,0x3f1a995a,0x3f6c22f1,3 +np.float32,0xbf05de0b,0x4007bf97,3 +np.float32,0x3d4bd960,0x3fc2b063,3 +np.float32,0x3f0e1618,0x3f7b7ed0,3 +np.float32,0x3edfd420,0x3f8f2628,3 +np.float32,0xbf6662fe,0x402c3047,3 +np.float32,0x3ec0690c,0x3f97bf9b,3 +np.float32,0xbeaf4146,0x3ff5c7a0,3 +np.float32,0x3f5e7764,0x3f04816d,3 +np.float32,0xbedd192c,0x40011bc5,3 +np.float32,0x3eb76350,0x3f9a2c5e,3 +np.float32,0xbed8108c,0x400069a5,3 +np.float32,0xbe59f31c,0x3fe48401,3 +np.float32,0xbea3e1e6,0x3ff2c439,3 +np.float32,0x3e26d1f8,0x3fb41db5,3 +np.float32,0x3f3a0a7c,0x3f41dba5,3 +np.float32,0x3ebae068,0x3f993ce4,3 +np.float32,0x3f2d8e30,0x3f536942,3 +np.float32,0xbe838bbe,0x3fea5247,3 +np.float32,0x3ebe4420,0x3f98538f,3 +np.float32,0xbcc59b80,0x3fcc265c,3 +np.float32,0x3eebb5c8,0x3f8bd334,3 +np.float32,0xbafc3400,0x3fc94ee8,3 +np.float32,0xbf63ddc1,0x402ac683,3 +np.float32,0xbeabdf80,0x3ff4e18f,3 +np.float32,0x3ea863f0,0x3f9e2a78,3 +np.float32,0x3f45b292,0x3f303bc1,3 +np.float32,0xbe68aa60,0x3fe666bf,3 +np.float32,0x3eb9de18,0x3f998239,3 +np.float32,0xbf719d85,0x4033815e,3 +np.float32,0x3edef9a8,0x3f8f62db,3 +np.float32,0xbd7781c0,0x3fd0cd1e,3 +np.float32,0x3f0b3b90,0x3f7ee92a,3 +np.float32,0xbe3eb3b4,0x3fe10a27,3 +np.float32,0xbf31a4c4,0x40159d23,3 +np.float32,0x3e929434,0x3fa3e5b0,3 +np.float32,0xbeb1a90e,0x3ff66b9e,3 +np.float32,0xbeba9b5e,0x3ff8d048,3 +np.float32,0xbf272a84,0x4012119e,3 +np.float32,0x3f1ebbd0,0x3f66e889,3 +np.float32,0x3ed3cdc8,0x3f927893,3 +np.float32,0xbf50dfce,0x40219b58,3 +np.float32,0x3f0c02de,0x3f7dfb62,3 +np.float32,0xbf694de3,0x402de8d2,3 +np.float32,0xbeaeb13e,0x3ff5a14f,3 +np.float32,0xbf61aa7a,0x40299702,3 +np.float32,0xbf13d159,0x400bed35,3 +np.float32,0xbeecd034,0x40034e0b,3 +np.float32,0xbe50c2e8,0x3fe35761,3 +np.float32,0x3f714406,0x3eae8e57,3 +np.float32,0xbf1ca486,0x400eabd8,3 +np.float32,0x3f5858cc,0x3f106497,3 +np.float32,0x3f670288,0x3ee41c84,3 +np.float32,0xbf20bd2c,0x400ff9f5,3 +np.float32,0xbe29afd8,0x3fde5eff,3 +np.float32,0xbf635e6a,0x402a80f3,3 +np.float32,0x3e82b7b0,0x3fa80446,3 +np.float32,0x3e982e7c,0x3fa26ece,3 +np.float32,0x3d9f0e00,0x3fbf1c6a,3 +np.float32,0x3e8299b4,0x3fa80c07,3 +np.float32,0xbf0529c1,0x40078ac3,3 +np.float32,0xbf403b8a,0x401ae519,3 +np.float32,0xbe57e09c,0x3fe44027,3 +np.float32,0x3ea1c8f4,0x3f9fe913,3 +np.float32,0xbe216a94,0x3fdd52d0,3 +np.float32,0x3f59c442,0x3f0db709,3 +np.float32,0xbd636260,0x3fd02bdd,3 +np.float32,0xbdbbc788,0x3fd4d08d,3 +np.float32,0x3dd19560,0x3fbbf0a3,3 +np.float32,0x3f060ad4,0x3f828641,3 +np.float32,0x3b102e00,0x3fc8c7c4,3 +np.float32,0x3f42b3b8,0x3f34e5a6,3 +np.float32,0x3f0255ac,0x3f84b071,3 +np.float32,0xbf014898,0x40066996,3 +np.float32,0x3e004dc0,0x3fb8fb51,3 +np.float32,0xbf594ff8,0x40256af2,3 +np.float32,0x3efafddc,0x3f877b80,3 +np.float32,0xbf5f0780,0x40283899,3 +np.float32,0x3ee95e54,0x3f8c7bcc,3 +np.float32,0x3eba2f0c,0x3f996c80,3 +np.float32,0x3f37721c,0x3f459b68,3 +np.float32,0x3e2be780,0x3fb378bf,3 +np.float32,0x3e550270,0x3fae3d69,3 +np.float32,0x3e0f9500,0x3fb70e0a,3 +np.float32,0xbf51974a,0x4021eaf4,3 +np.float32,0x3f393832,0x3f430d05,3 +np.float32,0x3f3df16a,0x3f3c1bd8,3 +np.float32,0xbd662340,0x3fd041ed,3 +np.float32,0x3f7e8418,0x3ddc9fce,3 +np.float32,0xbf392734,0x40184672,3 +np.float32,0x3ee3b278,0x3f8e124e,3 +np.float32,0x3eed4808,0x3f8b61d2,3 +np.float32,0xbf6fccbd,0x40322beb,3 +np.float32,0x3e3ecdd0,0x3fb1123b,3 +np.float32,0x3f4419e0,0x3f32bb45,3 +np.float32,0x3f595e00,0x3f0e7914,3 +np.float32,0xbe8c1486,0x3fec88c6,3 +np.float32,0xbf800000,0x40490fdb,3 +np.float32,0xbdaf5020,0x3fd4084d,3 +np.float32,0xbf407660,0x401afb63,3 +np.float32,0x3f0c3aa8,0x3f7db8b8,3 +np.float32,0xbcdb5980,0x3fcc7d5b,3 +np.float32,0x3f4738d4,0x3f2dd1ed,3 +np.float32,0x3f4d7064,0x3f23ab14,3 +np.float32,0xbeb1d576,0x3ff67774,3 +np.float32,0xbf507166,0x40216bb3,3 +np.float32,0x3e86484c,0x3fa71813,3 +np.float32,0x3f09123e,0x3f80bd35,3 +np.float32,0xbe9abe0e,0x3ff05cb2,3 +np.float32,0x3f3019dc,0x3f4fed21,3 +np.float32,0xbe99e00e,0x3ff0227d,3 +np.float32,0xbf155ec5,0x400c6739,3 +np.float32,0x3f5857ba,0x3f106698,3 +np.float32,0x3edf619c,0x3f8f45fb,3 +np.float32,0xbf5ab76a,0x40261664,3 +np.float32,0x3e54b5a8,0x3fae4738,3 +np.float32,0xbee92772,0x4002ca40,3 +np.float32,0x3f2fd610,0x3f504a7a,3 +np.float32,0xbf38521c,0x4017f97e,3 +np.float32,0xff800000,0x7fc00000,3 +np.float32,0x3e2da348,0x3fb34077,3 +np.float32,0x3f2f85fa,0x3f50b894,3 +np.float32,0x3e88f9c8,0x3fa66551,3 +np.float32,0xbf61e570,0x4029b648,3 +np.float32,0xbeab362c,0x3ff4b4a1,3 +np.float32,0x3ec6c310,0x3f9607bd,3 +np.float32,0x3f0d7bda,0x3f7c3810,3 +np.float32,0xbeba5d36,0x3ff8bf99,3 +np.float32,0x3f4b0554,0x3f27adda,3 +np.float32,0x3f60f5dc,0x3efebfb3,3 +np.float32,0x3f36ce2c,0x3f468603,3 +np.float32,0xbe70afac,0x3fe76e8e,3 +np.float32,0x3f673350,0x3ee339b5,3 +np.float32,0xbe124cf0,0x3fdb698c,3 +np.float32,0xbf1243dc,0x400b73d0,3 +np.float32,0x3f3c8850,0x3f3e3407,3 +np.float32,0x3ea02f24,0x3fa05500,3 +np.float32,0xbeffed34,0x400607db,3 +np.float32,0x3f5c75c2,0x3f08817c,3 +np.float32,0x3f4b2fbe,0x3f27682d,3 +np.float32,0x3ee47c34,0x3f8dd9f9,3 +np.float32,0x3f50d48c,0x3f1de584,3 +np.float32,0x3f12dc5e,0x3f75b628,3 +np.float32,0xbefe7e4a,0x4005d2f4,3 +np.float32,0xbec2e846,0x3ffb0cbc,3 +np.float32,0xbedc3036,0x4000fb80,3 +np.float32,0xbf48aedc,0x401e311f,3 +np.float32,0x3f6e032e,0x3ec11363,3 +np.float32,0xbf60de15,0x40292b72,3 +np.float32,0x3f06585e,0x3f8258ba,3 +np.float32,0x3ef49b98,0x3f894e66,3 +np.float32,0x3cc5fe00,0x3fc5f7cf,3 +np.float32,0xbf7525c5,0x40365c2c,3 +np.float32,0x3f64f9f8,0x3eed5fb2,3 +np.float32,0x3e8849c0,0x3fa692fb,3 +np.float32,0x3e50c878,0x3faec79e,3 +np.float32,0x3ed61530,0x3f91d831,3 +np.float32,0xbf54872e,0x40233724,3 +np.float32,0xbf52ee7f,0x4022815e,3 +np.float32,0xbe708c24,0x3fe769fc,3 +np.float32,0xbf26fc54,0x40120260,3 +np.float32,0x3f226e8a,0x3f6228db,3 +np.float32,0xbef30406,0x40042eb8,3 +np.float32,0x3f5d996c,0x3f063f5f,3 +np.float32,0xbf425f9c,0x401bb618,3 +np.float32,0x3e4bb260,0x3faf6dc9,3 +np.float32,0xbe52d5a4,0x3fe39b29,3 +np.float32,0xbe169cf0,0x3fdbf505,3 +np.float32,0xbedfc422,0x40017a8e,3 +np.float32,0x3d8ffef0,0x3fc00e05,3 +np.float32,0xbf12bdab,0x400b98f2,3 +np.float32,0x3f295d0a,0x3f590e88,3 +np.float32,0x3f49d8e4,0x3f2998aa,3 +np.float32,0xbef914f4,0x40050c12,3 +np.float32,0xbf4ea2b5,0x4020a61e,3 +np.float32,0xbf3a89e5,0x4018c762,3 +np.float32,0x3e8707b4,0x3fa6e67a,3 +np.float32,0x3ac55400,0x3fc8de86,3 +np.float32,0x800000,0x3fc90fdb,3 +np.float32,0xbeb9762c,0x3ff8819b,3 +np.float32,0xbebbe23c,0x3ff92815,3 +np.float32,0xbf598c88,0x402587a1,3 +np.float32,0x3e95d864,0x3fa30b4a,3 +np.float32,0x3f7f6f40,0x3d882486,3 +np.float32,0xbf53658c,0x4022b604,3 +np.float32,0xbf2a35f2,0x401314ad,3 +np.float32,0x3eb14380,0x3f9bcf28,3 +np.float32,0x3f0e0c64,0x3f7b8a7a,3 +np.float32,0x3d349920,0x3fc36a9a,3 +np.float32,0xbec2092c,0x3ffad071,3 +np.float32,0xbe1d08e8,0x3fdcc4e0,3 +np.float32,0xbf008968,0x40063243,3 +np.float32,0xbefad582,0x40054c51,3 +np.float32,0xbe52d010,0x3fe39a72,3 +np.float32,0x3f4afdac,0x3f27ba6b,3 +np.float32,0x3f6c483c,0x3eca4408,3 +np.float32,0xbef3cb68,0x40044b0c,3 +np.float32,0x3e94687c,0x3fa36b6f,3 +np.float32,0xbf64ae5c,0x402b39bb,3 +np.float32,0xbf0022b4,0x40061497,3 +np.float32,0x80000001,0x3fc90fdb,3 +np.float32,0x3f25bcd0,0x3f5dda4b,3 +np.float32,0x3ed91b40,0x3f9102d7,3 +np.float32,0x3f800000,0x0,3 +np.float32,0xbebc6aca,0x3ff94cca,3 +np.float32,0x3f239e9a,0x3f609e7d,3 +np.float32,0xbf7312be,0x4034a305,3 +np.float32,0x3efd16d0,0x3f86e148,3 +np.float32,0x3f52753a,0x3f1b0f72,3 +np.float32,0xbde58960,0x3fd7702c,3 +np.float32,0x3ef88580,0x3f883099,3 +np.float32,0x3eebaefc,0x3f8bd51e,3 +np.float32,0x3e877d2c,0x3fa6c807,3 +np.float32,0x3f1a0324,0x3f6cdf32,3 +np.float32,0xbedfe20a,0x40017eb6,3 +np.float32,0x3f205a3c,0x3f64d69d,3 +np.float32,0xbeed5b7c,0x400361b0,3 +np.float32,0xbf69ba10,0x402e2ad0,3 +np.float32,0x3c4fe200,0x3fc77014,3 +np.float32,0x3f043310,0x3f839a69,3 +np.float32,0xbeaf359a,0x3ff5c485,3 +np.float32,0x3db3f110,0x3fbdcd12,3 +np.float32,0x3e24af88,0x3fb462ed,3 +np.float32,0xbf34e858,0x4016c1c8,3 +np.float32,0x3f3334f2,0x3f4b9cd0,3 +np.float32,0xbf145882,0x400c16a2,3 +np.float32,0xbf541c38,0x40230748,3 +np.float32,0x3eba7e10,0x3f99574b,3 +np.float32,0xbe34c6e0,0x3fdfc731,3 +np.float32,0xbe957abe,0x3feefbf0,3 +np.float32,0xbf595a59,0x40256fdb,3 +np.float32,0xbdedc7b8,0x3fd7f4f0,3 +np.float32,0xbf627c02,0x402a06a9,3 +np.float32,0x3f339b78,0x3f4b0d18,3 +np.float32,0xbf2df6d2,0x40145929,3 +np.float32,0x3f617726,0x3efc9fd8,3 +np.float32,0xbee3a8fc,0x40020561,3 +np.float32,0x3efe9f68,0x3f867043,3 +np.float32,0xbf2c3e76,0x4013c3ba,3 +np.float32,0xbf218f28,0x40103d84,3 +np.float32,0xbf1ea847,0x400f4f7f,3 +np.float32,0x3ded9160,0x3fba2e31,3 +np.float32,0x3bce1b00,0x3fc841bf,3 +np.float32,0xbe90566e,0x3feda46a,3 +np.float32,0xbf5ea2ba,0x4028056b,3 +np.float32,0x3f538e62,0x3f191ee6,3 +np.float32,0xbf59e054,0x4025af74,3 +np.float32,0xbe8c98ba,0x3fecab24,3 +np.float32,0x3ee7bdb0,0x3f8cf0b7,3 +np.float32,0xbf2eb828,0x40149b2b,3 +np.float32,0xbe5eb904,0x3fe52068,3 +np.float32,0xbf16b422,0x400cd08d,3 +np.float32,0x3f1ab9b4,0x3f6bfa58,3 +np.float32,0x3dc23040,0x3fbce82a,3 +np.float32,0xbf29d9e7,0x4012f5e5,3 +np.float32,0xbf38f30a,0x40183393,3 +np.float32,0x3e88e798,0x3fa66a09,3 +np.float32,0x3f1d07e6,0x3f69124f,3 +np.float32,0xbe1d3d34,0x3fdccb7e,3 +np.float32,0xbf1715be,0x400ceec2,3 +np.float32,0x3f7a0eac,0x3e5d11f7,3 +np.float32,0xbe764924,0x3fe82707,3 +np.float32,0xbf01a1f8,0x4006837c,3 +np.float32,0x3f2be730,0x3f55a661,3 +np.float32,0xbf7bb070,0x403d4ce5,3 +np.float32,0xbd602110,0x3fd011c9,3 +np.float32,0x3f5d080c,0x3f07609d,3 +np.float32,0xbda20400,0x3fd332d1,3 +np.float32,0x3f1c62da,0x3f69e308,3 +np.float32,0xbf2c6916,0x4013d223,3 +np.float32,0xbf44f8fd,0x401cb816,3 +np.float32,0x3f4da392,0x3f235539,3 +np.float32,0x3e9e8aa0,0x3fa0c3a0,3 +np.float32,0x3e9633c4,0x3fa2f366,3 +np.float32,0xbf0422ab,0x40073ddd,3 +np.float32,0x3f518386,0x3f1cb603,3 +np.float32,0x3f24307a,0x3f5fe096,3 +np.float32,0xbdfb4220,0x3fd8ce24,3 +np.float32,0x3f179d28,0x3f6fdc7d,3 +np.float32,0xbecc2df0,0x3ffd911e,3 +np.float32,0x3f3dff0c,0x3f3c0782,3 +np.float32,0xbf58c4d8,0x4025295b,3 +np.float32,0xbdcf8438,0x3fd60dd3,3 +np.float32,0xbeeaf1b2,0x40030aa7,3 +np.float32,0xbf298a28,0x4012db45,3 +np.float32,0x3f6c4dec,0x3eca2678,3 +np.float32,0x3f4d1ac8,0x3f243a59,3 +np.float32,0x3f62cdfa,0x3ef6e8f8,3 +np.float32,0xbee8acce,0x4002b909,3 +np.float32,0xbd5f2af0,0x3fd00a15,3 +np.float32,0x3f5fde8e,0x3f01a453,3 +np.float32,0x3e95233c,0x3fa33aa4,3 +np.float32,0x3ecd2a60,0x3f9449be,3 +np.float32,0x3f10aa86,0x3f78619d,3 +np.float32,0x3f3888e8,0x3f440a70,3 +np.float32,0x3eeb5bfc,0x3f8bec7d,3 +np.float32,0xbe12d654,0x3fdb7ae6,3 +np.float32,0x3eca3110,0x3f951931,3 +np.float32,0xbe2d1b7c,0x3fdece05,3 +np.float32,0xbf29e9db,0x4012fb3a,3 +np.float32,0xbf0c50b8,0x4009a845,3 +np.float32,0xbed9f0e4,0x4000abef,3 +np.float64,0x3fd078ec5ba0f1d8,0x3ff4f7c00595a4d3,1 +np.float64,0xbfdbc39743b7872e,0x400027f85bce43b2,1 +np.float64,0xbfacd2707c39a4e0,0x3ffa08ae1075d766,1 +np.float64,0xbfc956890f32ad14,0x3ffc52308e7285fd,1 +np.float64,0xbf939c2298273840,0x3ff9706d18e6ea6b,1 +np.float64,0xbfe0d7048961ae09,0x4000fff4406bd395,1 +np.float64,0xbfe9d19b86f3a337,0x4004139bc683a69f,1 +np.float64,0x3fd35c7f90a6b900,0x3ff437220e9123f8,1 +np.float64,0x3fdddca171bbb944,0x3ff15da61e61ec08,1 +np.float64,0x3feb300de9f6601c,0x3fe1c6fadb68cdca,1 +np.float64,0xbfef1815327e302a,0x400739808fc6f964,1 +np.float64,0xbfe332d78e6665af,0x4001b6c4ef922f7c,1 +np.float64,0xbfedbf4dfb7b7e9c,0x40061cefed62a58b,1 +np.float64,0xbfd8dcc7e3b1b990,0x3fff84307713c2c3,1 +np.float64,0xbfedaf161c7b5e2c,0x400612027c1b2b25,1 +np.float64,0xbfed9bde897b37bd,0x4006053f05bd7d26,1 +np.float64,0xbfe081ebc26103d8,0x4000e70755eb66e0,1 +np.float64,0xbfe0366f9c606cdf,0x4000d11212f29afd,1 +np.float64,0xbfc7c115212f822c,0x3ffc1e8c9d58f7db,1 +np.float64,0x3fd8dd9a78b1bb34,0x3ff2bf8d0f4c9376,1 +np.float64,0xbfe54eff466a9dfe,0x4002655950b611f4,1 +np.float64,0xbfe4aad987e955b3,0x40022efb19882518,1 +np.float64,0x3f70231ca0204600,0x3ff911d834e7abf4,1 +np.float64,0x3fede01d047bc03a,0x3fd773cecbd8561b,1 +np.float64,0xbfd6a00d48ad401a,0x3ffee9fd7051633f,1 +np.float64,0x3fd44f3d50a89e7c,0x3ff3f74dd0fc9c91,1 +np.float64,0x3fe540f0d0ea81e2,0x3feb055a7c7d43d6,1 +np.float64,0xbf3ba2e200374800,0x3ff923b582650c6c,1 +np.float64,0x3fe93b2d3f72765a,0x3fe532fa15331072,1 +np.float64,0x3fee8ce5a17d19cc,0x3fd35666eefbe336,1 +np.float64,0x3fe55d5f8feabac0,0x3feadf3dcfe251d4,1 +np.float64,0xbfd1d2ede8a3a5dc,0x3ffda600041ac884,1 +np.float64,0xbfee41186e7c8231,0x40067a625cc6f64d,1 +np.float64,0x3fe521a8b9ea4352,0x3feb2f1a6c8084e5,1 +np.float64,0x3fc65378ef2ca6f0,0x3ff653dfe81ee9f2,1 +np.float64,0x3fdaba0fbcb57420,0x3ff23d630995c6ba,1 +np.float64,0xbfe6b7441d6d6e88,0x4002e182539a2994,1 +np.float64,0x3fda00b6dcb4016c,0x3ff2703d516f28e7,1 +np.float64,0xbfe8699f01f0d33e,0x400382326920ea9e,1 +np.float64,0xbfef5889367eb112,0x4007832af5983793,1 +np.float64,0x3fefb57c8aff6afa,0x3fc14700ab38dcef,1 +np.float64,0xbfda0dfdaab41bfc,0x3fffd75b6fd497f6,1 +np.float64,0xbfb059c36620b388,0x3ffa27c528b97a42,1 +np.float64,0xbfdd450ab1ba8a16,0x40005dcac6ab50fd,1 +np.float64,0xbfe54d6156ea9ac2,0x400264ce9f3f0fb9,1 +np.float64,0xbfe076e94760edd2,0x4000e3d1374884da,1 +np.float64,0xbfc063286720c650,0x3ffb2fd1d6bff0ef,1 +np.float64,0xbfe24680f2e48d02,0x40016ddfbb5bcc0e,1 +np.float64,0xbfdc9351d2b926a4,0x400044e3756fb765,1 +np.float64,0x3fefb173d8ff62e8,0x3fc1bd5626f80850,1 +np.float64,0x3fe77c117a6ef822,0x3fe7e57089bad2ec,1 +np.float64,0xbfddbcebf7bb79d8,0x40006eadb60406b3,1 +np.float64,0xbfecf6625ff9ecc5,0x40059e6c6961a6db,1 +np.float64,0x3fdc8950b8b912a0,0x3ff1bcfb2e27795b,1 +np.float64,0xbfeb2fa517765f4a,0x4004b00aee3e6888,1 +np.float64,0x3fd0efc88da1df90,0x3ff4d8f7cbd8248a,1 +np.float64,0xbfe6641a2becc834,0x4002c43362c1bd0f,1 +np.float64,0xbfe28aec0fe515d8,0x400182c91d4df039,1 +np.float64,0xbfd5ede8d0abdbd2,0x3ffeba7baef05ae8,1 +np.float64,0xbfbd99702a3b32e0,0x3ffafca21c1053f1,1 +np.float64,0x3f96f043f82de080,0x3ff8c6384d5eb610,1 +np.float64,0xbfe5badbc9eb75b8,0x400289c8cd5873d1,1 +np.float64,0x3fe5c6bf95eb8d80,0x3fea5093e9a3e43e,1 +np.float64,0x3fb1955486232ab0,0x3ff8086d4c3e71d5,1 +np.float64,0xbfea145f397428be,0x4004302237a35871,1 +np.float64,0xbfdabe685db57cd0,0x400003e2e29725fb,1 +np.float64,0xbfefc79758ff8f2f,0x400831814e23bfc8,1 +np.float64,0x3fd7edb66cafdb6c,0x3ff3006c5123bfaf,1 +np.float64,0xbfeaf7644bf5eec8,0x400495a7963ce4ed,1 +np.float64,0x3fdf838d78bf071c,0x3ff0e527eed73800,1 +np.float64,0xbfd1a0165ba3402c,0x3ffd98c5ab76d375,1 +np.float64,0x3fd75b67a9aeb6d0,0x3ff327c8d80b17cf,1 +np.float64,0x3fc2aa9647255530,0x3ff6ca854b157df1,1 +np.float64,0xbfe0957fd4612b00,0x4000ecbf3932becd,1 +np.float64,0x3fda1792c0b42f24,0x3ff269fbb2360487,1 +np.float64,0x3fd480706ca900e0,0x3ff3ea53a6aa3ae8,1 +np.float64,0xbfd0780ed9a0f01e,0x3ffd4bfd544c7d47,1 +np.float64,0x3feeec0cd77dd81a,0x3fd0a8a241fdb441,1 +np.float64,0x3fcfa933e93f5268,0x3ff5223478621a6b,1 +np.float64,0x3fdad2481fb5a490,0x3ff236b86c6b2b49,1 +np.float64,0x3fe03b129de07626,0x3ff09f21fb868451,1 +np.float64,0xbfc01212cd202424,0x3ffb259a07159ae9,1 +np.float64,0x3febdb912df7b722,0x3fe0768e20dac8c9,1 +np.float64,0xbfbf2148763e4290,0x3ffb154c361ce5bf,1 +np.float64,0xbfb1a7eb1e234fd8,0x3ffa3cb37ac4a176,1 +np.float64,0xbfe26ad1ec64d5a4,0x400178f480ecce8d,1 +np.float64,0x3fe6d1cd1b6da39a,0x3fe8dc20ec4dad3b,1 +np.float64,0xbfede0e53dfbc1ca,0x4006340d3bdd7c97,1 +np.float64,0xbfe8fd1bd9f1fa38,0x4003bc3477f93f40,1 +np.float64,0xbfe329d0f26653a2,0x4001b3f345af5648,1 +np.float64,0xbfe4bb20eee97642,0x40023451404d6d08,1 +np.float64,0x3fb574832e2ae900,0x3ff7ca4bed0c7110,1 +np.float64,0xbfdf3c098fbe7814,0x4000a525bb72d659,1 +np.float64,0x3fa453e6d428a7c0,0x3ff87f512bb9b0c6,1 +np.float64,0x3faaec888435d920,0x3ff84a7d9e4def63,1 +np.float64,0xbfcdc240df3b8480,0x3ffce30ece754e7f,1 +np.float64,0xbf8c3220f0386440,0x3ff95a600ae6e157,1 +np.float64,0x3fe806076c700c0e,0x3fe71784a96c76eb,1 +np.float64,0x3fedf9b0e17bf362,0x3fd6e35fc0a7b6c3,1 +np.float64,0xbfe1b48422636908,0x400141bd8ed251bc,1 +np.float64,0xbfe82e2817705c50,0x40036b5a5556d021,1 +np.float64,0xbfc8ef8ff931df20,0x3ffc450ffae7ce58,1 +np.float64,0xbfe919fa94f233f5,0x4003c7cce4697fe8,1 +np.float64,0xbfc3ace4a72759c8,0x3ffb9a197bb22651,1 +np.float64,0x3fe479f71ee8f3ee,0x3fec0bd2f59097aa,1 +np.float64,0xbfeeb54a967d6a95,0x4006da12c83649c5,1 +np.float64,0x3fe5e74ea8ebce9e,0x3fea2407cef0f08c,1 +np.float64,0x3fb382baf2270570,0x3ff7e98213b921ba,1 +np.float64,0xbfdd86fd3cbb0dfa,0x40006712952ddbcf,1 +np.float64,0xbfd250eb52a4a1d6,0x3ffdc6d56253b1cd,1 +np.float64,0x3fea30c4ed74618a,0x3fe3962deba4f30e,1 +np.float64,0x3fc895963d312b30,0x3ff60a5d52fcbccc,1 +np.float64,0x3fe9cc4f6273989e,0x3fe442740942c80f,1 +np.float64,0xbfe8769f5cf0ed3f,0x4003873b4cb5bfce,1 +np.float64,0xbfe382f3726705e7,0x4001cfeb3204d110,1 +np.float64,0x3fbfe9a9163fd350,0x3ff7220bd2b97c8f,1 +np.float64,0xbfca6162bb34c2c4,0x3ffc743f939358f1,1 +np.float64,0x3fe127a014e24f40,0x3ff0147c4bafbc39,1 +np.float64,0x3fee9cdd2a7d39ba,0x3fd2e9ef45ab122f,1 +np.float64,0x3fa9ffb97c33ff80,0x3ff851e69fa3542c,1 +np.float64,0x3fd378f393a6f1e8,0x3ff42faafa77de56,1 +np.float64,0xbfe4df1e1669be3c,0x400240284df1c321,1 +np.float64,0x3fed0ed79bfa1db0,0x3fdba89060aa96fb,1 +np.float64,0x3fdef2ee52bde5dc,0x3ff10e942244f4f1,1 +np.float64,0xbfdab38f3ab5671e,0x40000264d8d5b49b,1 +np.float64,0x3fbe95a96e3d2b50,0x3ff73774cb59ce2d,1 +np.float64,0xbfe945653af28aca,0x4003d9657bf129c2,1 +np.float64,0xbfb18f3f2a231e80,0x3ffa3b27cba23f50,1 +np.float64,0xbfef50bf22fea17e,0x40077998a850082c,1 +np.float64,0xbfc52b8c212a5718,0x3ffbca8d6560a2da,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x3fc1e3a02d23c740,0x3ff6e3a5fcac12a4,1 +np.float64,0xbfeb5e4ea5f6bc9d,0x4004c65abef9426f,1 +np.float64,0xbfe425b132684b62,0x400203c29608b00d,1 +np.float64,0xbfbfa1c19e3f4380,0x3ffb1d6367711158,1 +np.float64,0x3fbba2776e3744f0,0x3ff766f6df586fad,1 +np.float64,0xbfb5d0951e2ba128,0x3ffa7f712480b25e,1 +np.float64,0xbfe949fdab7293fb,0x4003db4530a18507,1 +np.float64,0xbfcf13519b3e26a4,0x3ffd0e6f0a6c38ee,1 +np.float64,0x3f91e6d72823cdc0,0x3ff8da5f08909b6e,1 +np.float64,0x3f78a2e360314600,0x3ff909586727caef,1 +np.float64,0xbfe1ae7e8fe35cfd,0x40013fef082caaa3,1 +np.float64,0x3fe97a6dd1f2f4dc,0x3fe4cb4b99863478,1 +np.float64,0xbfcc1e1e69383c3c,0x3ffcad250a949843,1 +np.float64,0x3faccb797c399700,0x3ff83b8066b49330,1 +np.float64,0x3fe7a2647a6f44c8,0x3fe7acceae6ec425,1 +np.float64,0xbfec3bfcf0f877fa,0x4005366af5a7175b,1 +np.float64,0xbfe2310b94646217,0x400167588fceb228,1 +np.float64,0x3feb167372762ce6,0x3fe1f74c0288fad8,1 +np.float64,0xbfb722b4ee2e4568,0x3ffa94a81b94dfca,1 +np.float64,0x3fc58da9712b1b50,0x3ff66cf8f072aa14,1 +np.float64,0xbfe7fff9d6effff4,0x400359d01b8141de,1 +np.float64,0xbfd56691c5aacd24,0x3ffe9686697797e8,1 +np.float64,0x3fe3ab0557e7560a,0x3fed1593959ef8e8,1 +np.float64,0x3fdd458995ba8b14,0x3ff1883d6f22a322,1 +np.float64,0x3fe7bbed2cef77da,0x3fe786d618094cda,1 +np.float64,0x3fa31a30c4263460,0x3ff88920b936fd79,1 +np.float64,0x8010000000000000,0x3ff921fb54442d18,1 +np.float64,0xbfdc5effbdb8be00,0x40003d95fe0dff11,1 +np.float64,0x3febfdad7e77fb5a,0x3fe030b5297dbbdd,1 +np.float64,0x3fe4f3f3b2e9e7e8,0x3feb6bc59eeb2be2,1 +np.float64,0xbfe44469fd6888d4,0x40020daa5488f97a,1 +np.float64,0xbfe19fddb0e33fbc,0x40013b8c902b167b,1 +np.float64,0x3fa36ad17c26d5a0,0x3ff8869b3e828134,1 +np.float64,0x3fcf23e6c93e47d0,0x3ff5336491a65d1e,1 +np.float64,0xffefffffffffffff,0x7ff8000000000000,1 +np.float64,0xbfe375f4cee6ebea,0x4001cbd2ba42e8b5,1 +np.float64,0xbfaef1215c3de240,0x3ffa19ab02081189,1 +np.float64,0xbfec39c59c78738b,0x4005353dc38e3d78,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xbfec09bb7b781377,0x40051c0a5754cb3a,1 +np.float64,0x3fe8301f2870603e,0x3fe6d783c5ef0944,1 +np.float64,0xbfed418c987a8319,0x4005cbae1b8693d1,1 +np.float64,0xbfdc16e7adb82dd0,0x4000338b634eaf03,1 +np.float64,0x3fd5d361bdaba6c4,0x3ff390899300a54c,1 +np.float64,0xbff0000000000000,0x400921fb54442d18,1 +np.float64,0x3fd5946232ab28c4,0x3ff3a14767813f29,1 +np.float64,0x3fe833e5fef067cc,0x3fe6d1be720edf2d,1 +np.float64,0x3fedf746a67bee8e,0x3fd6f127fdcadb7b,1 +np.float64,0x3fd90353d3b206a8,0x3ff2b54f7d369ba9,1 +np.float64,0x3fec4b4b72f89696,0x3fdf1b38d2e93532,1 +np.float64,0xbfe9c67596f38ceb,0x40040ee5f524ce03,1 +np.float64,0x3fd350d91aa6a1b4,0x3ff43a303c0da27f,1 +np.float64,0x3fd062603ba0c4c0,0x3ff4fd9514b935d8,1 +np.float64,0xbfe24c075f64980e,0x40016f8e9f2663b3,1 +np.float64,0x3fdaa546eeb54a8c,0x3ff2431a88fef1d5,1 +np.float64,0x3fe92b8151f25702,0x3fe54c67e005cbf9,1 +np.float64,0xbfe1be8b8a637d17,0x400144c078f67c6e,1 +np.float64,0xbfe468a1d7e8d144,0x40021964b118cbf4,1 +np.float64,0xbfdc6de4fab8dbca,0x40003fa9e27893d8,1 +np.float64,0xbfe3c2788ae784f1,0x4001e407ba3aa956,1 +np.float64,0xbfe2bf1542e57e2a,0x400192d4a9072016,1 +np.float64,0xbfe6982f4c6d305e,0x4002d681b1991bbb,1 +np.float64,0x3fdbceb1c4b79d64,0x3ff1f0f117b9d354,1 +np.float64,0x3fdb3705e7b66e0c,0x3ff21af01ca27ace,1 +np.float64,0x3fe3e6358ee7cc6c,0x3fecca4585053983,1 +np.float64,0xbfe16d6a9a62dad5,0x40012c7988aee247,1 +np.float64,0xbfce66e4413ccdc8,0x3ffcf83b08043a0c,1 +np.float64,0xbfeb6cd46876d9a9,0x4004cd61733bfb79,1 +np.float64,0xbfdb1cdd64b639ba,0x400010e6cf087cb7,1 +np.float64,0xbfe09e4e30e13c9c,0x4000ef5277c47721,1 +np.float64,0xbfee88dd127d11ba,0x4006b3cd443643ac,1 +np.float64,0xbf911e06c8223c00,0x3ff966744064fb05,1 +np.float64,0xbfe8f22bc471e458,0x4003b7d5513af295,1 +np.float64,0x3fe3d7329567ae66,0x3fecdd6c241f83ee,1 +np.float64,0x3fc8a9404b315280,0x3ff607dc175edf3f,1 +np.float64,0x3fe7eb80ad6fd702,0x3fe73f8fdb3e6a6c,1 +np.float64,0x3fef0931e37e1264,0x3fcf7fde80a3c5ab,1 +np.float64,0x3fe2ed3c3fe5da78,0x3fee038334cd1860,1 +np.float64,0x3fe251fdb8e4a3fc,0x3feec26dc636ac31,1 +np.float64,0x3feb239436764728,0x3fe1de9462455da7,1 +np.float64,0xbfe63fd7eeec7fb0,0x4002b78cfa3d2fa6,1 +np.float64,0x3fdd639cb5bac738,0x3ff17fc7d92b3eee,1 +np.float64,0x3fd0a7a13fa14f44,0x3ff4eba95c559c84,1 +np.float64,0x3fe804362d70086c,0x3fe71a44cd91ffa4,1 +np.float64,0xbfe0fecf6e61fd9f,0x40010bac8edbdc4f,1 +np.float64,0x3fcb74acfd36e958,0x3ff5ac84437f1b7c,1 +np.float64,0x3fe55053e1eaa0a8,0x3feaf0bf76304c30,1 +np.float64,0x3fc06b508d20d6a0,0x3ff7131da17f3902,1 +np.float64,0x3fdd78750fbaf0ec,0x3ff179e97fbf7f65,1 +np.float64,0x3fe44cb946689972,0x3fec46859b5da6be,1 +np.float64,0xbfeb165a7ff62cb5,0x4004a41c9cc9589e,1 +np.float64,0x3fe01ffb2b603ff6,0x3ff0aed52bf1c3c1,1 +np.float64,0x3f983c60a83078c0,0x3ff8c107805715ab,1 +np.float64,0x3fd8b5ff13b16c00,0x3ff2ca4a837a476a,1 +np.float64,0x3fc80510a1300a20,0x3ff61cc3b4af470b,1 +np.float64,0xbfd3935b06a726b6,0x3ffe1b3a2066f473,1 +np.float64,0xbfdd4a1f31ba943e,0x40005e81979ed445,1 +np.float64,0xbfa76afdd42ed600,0x3ff9dd63ffba72d2,1 +np.float64,0x3fe7e06d496fc0da,0x3fe7503773566707,1 +np.float64,0xbfea5fbfe874bf80,0x40045106af6c538f,1 +np.float64,0x3fee000c487c0018,0x3fd6bef1f8779d88,1 +np.float64,0xbfb39f4ee2273ea0,0x3ffa5c3f2b3888ab,1 +np.float64,0x3feb9247b0772490,0x3fe1092d2905efce,1 +np.float64,0x3fdaa39b4cb54738,0x3ff243901da0da17,1 +np.float64,0x3fcd5b2b493ab658,0x3ff56e262e65b67d,1 +np.float64,0x3fcf82512f3f04a0,0x3ff52738847c55f2,1 +np.float64,0x3fe2af5e0c655ebc,0x3fee4ffab0c82348,1 +np.float64,0xbfec0055d0f800ac,0x4005172d325933e8,1 +np.float64,0x3fe71da9336e3b52,0x3fe86f2e12f6e303,1 +np.float64,0x3fbefab0723df560,0x3ff731188ac716ec,1 +np.float64,0xbfe11dca28623b94,0x400114d3d4ad370d,1 +np.float64,0x3fbcbda8ca397b50,0x3ff755281078abd4,1 +np.float64,0x3fe687c7126d0f8e,0x3fe945099a7855cc,1 +np.float64,0xbfecde510579bca2,0x400590606e244591,1 +np.float64,0xbfd72de681ae5bce,0x3fff0ff797ad1755,1 +np.float64,0xbfe7c0f7386f81ee,0x40034226e0805309,1 +np.float64,0x3fd8d55619b1aaac,0x3ff2c1cb3267b14e,1 +np.float64,0x3fecd7a2ad79af46,0x3fdcabbffeaa279e,1 +np.float64,0x3fee7fb1a8fcff64,0x3fd3ae620286fe19,1 +np.float64,0xbfc5f3a3592be748,0x3ffbe3ed204d9842,1 +np.float64,0x3fec9e5527793caa,0x3fddb00bc8687e4b,1 +np.float64,0x3fc35dc70f26bb90,0x3ff6b3ded7191e33,1 +np.float64,0x3fda91c07ab52380,0x3ff24878848fec8f,1 +np.float64,0xbfe12cde1fe259bc,0x4001194ab99d5134,1 +np.float64,0xbfd35ab736a6b56e,0x3ffe0c5ce8356d16,1 +np.float64,0x3fc9c94123339280,0x3ff5e3239f3ad795,1 +np.float64,0xbfe72f54926e5ea9,0x40030c95d1d02b56,1 +np.float64,0xbfee283186fc5063,0x40066786bd0feb79,1 +np.float64,0xbfe7b383f56f6708,0x40033d23ef0e903d,1 +np.float64,0x3fd6037327ac06e8,0x3ff383bf2f311ddb,1 +np.float64,0x3fe0e344b561c68a,0x3ff03cd90fd4ba65,1 +np.float64,0xbfef0ff54b7e1feb,0x400730fa5fce381e,1 +np.float64,0x3fd269929da4d324,0x3ff476b230136d32,1 +np.float64,0xbfbc5fb9f638bf70,0x3ffae8e63a4e3234,1 +np.float64,0xbfe2e8bc84e5d179,0x40019fb5874f4310,1 +np.float64,0xbfd7017413ae02e8,0x3fff040d843c1531,1 +np.float64,0x3fefd362fa7fa6c6,0x3fbababc3ddbb21d,1 +np.float64,0x3fecb62ed3f96c5e,0x3fdd44ba77ccff94,1 +np.float64,0xbfb16fad5222df58,0x3ffa392d7f02b522,1 +np.float64,0x3fbcf4abc639e950,0x3ff751b23c40e27f,1 +np.float64,0x3fe128adbce2515c,0x3ff013dc91db04b5,1 +np.float64,0x3fa5dd9d842bbb40,0x3ff87300c88d512f,1 +np.float64,0xbfe61efcaf6c3dfa,0x4002ac27117f87c9,1 +np.float64,0x3feffe1233fffc24,0x3f9638d3796a4954,1 +np.float64,0xbfe78548b66f0a92,0x40032c0447b7bfe2,1 +np.float64,0x3fe7bd38416f7a70,0x3fe784e86d6546b6,1 +np.float64,0x3fe0d6bc5961ad78,0x3ff0443899e747ac,1 +np.float64,0xbfd0bb6e47a176dc,0x3ffd5d6dff390d41,1 +np.float64,0xbfec1d16b8f83a2e,0x40052620378d3b78,1 +np.float64,0x3fe9bbec20f377d8,0x3fe45e167c7a3871,1 +np.float64,0xbfeed81d9dfdb03b,0x4006f9dec2db7310,1 +np.float64,0xbfe1e35179e3c6a3,0x40014fd1b1186ac0,1 +np.float64,0xbfc9c7e605338fcc,0x3ffc60a6bd1a7126,1 +np.float64,0x3feec92810fd9250,0x3fd1afde414ab338,1 +np.float64,0xbfeb9f1d90773e3b,0x4004e606b773f5b0,1 +np.float64,0x3fcbabdf6b3757c0,0x3ff5a573866404af,1 +np.float64,0x3fe9f4e1fff3e9c4,0x3fe3fd7b6712dd7b,1 +np.float64,0xbfe6c0175ded802e,0x4002e4a4dc12f3fe,1 +np.float64,0xbfeefc96f37df92e,0x40071d367cd721ff,1 +np.float64,0xbfeaab58dc7556b2,0x400472ce37e31e50,1 +np.float64,0xbfc62668772c4cd0,0x3ffbea5e6c92010a,1 +np.float64,0x3fafe055fc3fc0a0,0x3ff822ce6502519a,1 +np.float64,0x3fd7b648ffaf6c90,0x3ff30f5a42f11418,1 +np.float64,0xbfe934fe827269fd,0x4003d2b9fed9e6ad,1 +np.float64,0xbfe6d691f2edad24,0x4002eca6a4b1797b,1 +np.float64,0x3fc7e62ced2fcc58,0x3ff620b1f44398b7,1 +np.float64,0xbfc89be9f33137d4,0x3ffc3a67a497f59c,1 +np.float64,0xbfe7793d536ef27a,0x40032794bf14dd64,1 +np.float64,0x3fde55a02dbcab40,0x3ff13b5f82d223e4,1 +np.float64,0xbfc8eabd7b31d57c,0x3ffc4472a81cb6d0,1 +np.float64,0x3fddcb5468bb96a8,0x3ff162899c381f2e,1 +np.float64,0xbfec7554d8f8eaaa,0x40055550e18ec463,1 +np.float64,0x3fd0b6e8b6a16dd0,0x3ff4e7b4781a50e3,1 +np.float64,0x3fedaae01b7b55c0,0x3fd8964916cdf53d,1 +np.float64,0x3fe0870f8a610e20,0x3ff072e7db95c2a2,1 +np.float64,0xbfec3e3ce2787c7a,0x4005379d0f6be873,1 +np.float64,0xbfe65502586caa04,0x4002beecff89147f,1 +np.float64,0xbfe0df39a961be74,0x4001025e36d1c061,1 +np.float64,0xbfb5d8edbe2bb1d8,0x3ffa7ff72b7d6a2b,1 +np.float64,0xbfde89574bbd12ae,0x40008ba4cd74544d,1 +np.float64,0xbfe72938f0ee5272,0x40030a5efd1acb6d,1 +np.float64,0xbfcd500d133aa01c,0x3ffcd462f9104689,1 +np.float64,0x3fe0350766606a0e,0x3ff0a2a3664e2c14,1 +np.float64,0xbfc892fb573125f8,0x3ffc3944641cc69d,1 +np.float64,0xbfba7dc7c634fb90,0x3ffaca9a6a0ffe61,1 +np.float64,0xbfeac94478759289,0x40048068a8b83e45,1 +np.float64,0xbfe8f60c1af1ec18,0x4003b961995b6e51,1 +np.float64,0x3fea1c0817743810,0x3fe3ba28c1643cf7,1 +np.float64,0xbfe42a0fefe85420,0x4002052aadd77f01,1 +np.float64,0x3fd2c61c56a58c38,0x3ff45e84cb9a7fa9,1 +np.float64,0xbfd83fb7cdb07f70,0x3fff59ab4790074c,1 +np.float64,0x3fd95e630fb2bcc8,0x3ff29c8bee1335ad,1 +np.float64,0x3feee88f387dd11e,0x3fd0c3ad3ded4094,1 +np.float64,0x3fe061291160c252,0x3ff0890010199bbc,1 +np.float64,0xbfdc7db3b5b8fb68,0x400041dea3759443,1 +np.float64,0x3fee23b320fc4766,0x3fd5ee73d7aa5c56,1 +np.float64,0xbfdc25c590b84b8c,0x4000359cf98a00b4,1 +np.float64,0xbfd63cbfd2ac7980,0x3ffecf7b9cf99b3c,1 +np.float64,0xbfbeb3c29a3d6788,0x3ffb0e66ecc0fc3b,1 +np.float64,0xbfd2f57fd6a5eb00,0x3ffdf1d7c79e1532,1 +np.float64,0xbfab3eda9c367db0,0x3ff9fc0c875f42e9,1 +np.float64,0xbfe12df1c6e25be4,0x4001199c673e698c,1 +np.float64,0x3fef8ab23a7f1564,0x3fc5aff358c59f1c,1 +np.float64,0x3fe562f50feac5ea,0x3fead7bce205f7d9,1 +np.float64,0x3fdc41adbeb8835c,0x3ff1d0f71341b8f2,1 +np.float64,0x3fe2748967e4e912,0x3fee9837f970ff9e,1 +np.float64,0xbfdaa89d57b5513a,0x400000e3889ba4cf,1 +np.float64,0x3fdf2a137dbe5428,0x3ff0fecfbecbbf86,1 +np.float64,0xbfea1fdcd2f43fba,0x4004351974b32163,1 +np.float64,0xbfe34a93a3e69528,0x4001be323946a3e0,1 +np.float64,0x3fe929bacff25376,0x3fe54f47bd7f4cf2,1 +np.float64,0xbfd667fbd6accff8,0x3ffedb04032b3a1a,1 +np.float64,0xbfeb695796f6d2af,0x4004cbb08ec6f525,1 +np.float64,0x3fd204df2ea409c0,0x3ff490f51e6670f5,1 +np.float64,0xbfd89a2757b1344e,0x3fff722127b988c4,1 +np.float64,0xbfd0787187a0f0e4,0x3ffd4c16dbe94f32,1 +np.float64,0x3fd44239bfa88474,0x3ff3fabbfb24b1fa,1 +np.float64,0xbfeb0b3489f61669,0x40049ee33d811d33,1 +np.float64,0x3fdcf04eaab9e09c,0x3ff1a02a29996c4e,1 +np.float64,0x3fd4c51e4fa98a3c,0x3ff3d8302c68fc9a,1 +np.float64,0x3fd1346645a268cc,0x3ff4c72b4970ecaf,1 +np.float64,0x3fd6a89d09ad513c,0x3ff357af6520afac,1 +np.float64,0xbfba0f469a341e90,0x3ffac3a8f41bed23,1 +np.float64,0xbfe13f8ddce27f1c,0x40011ed557719fd6,1 +np.float64,0x3fd43e5e26a87cbc,0x3ff3fbc040fc30dc,1 +np.float64,0x3fe838125a707024,0x3fe6cb5c987248f3,1 +np.float64,0x3fe128c30c625186,0x3ff013cff238dd1b,1 +np.float64,0xbfcd4718833a8e30,0x3ffcd33c96bde6f9,1 +np.float64,0x3fe43fcd08e87f9a,0x3fec573997456ec1,1 +np.float64,0xbfe9a29104734522,0x4003ffd502a1b57f,1 +np.float64,0xbfe4709d7968e13b,0x40021bfc5cd55af4,1 +np.float64,0x3fd21c3925a43874,0x3ff48adf48556cbb,1 +np.float64,0x3fe9a521b2734a44,0x3fe4844fc054e839,1 +np.float64,0xbfdfa6a912bf4d52,0x4000b4730ad8521e,1 +np.float64,0x3fe3740702e6e80e,0x3fed5b106283b6ed,1 +np.float64,0x3fd0a3aa36a14754,0x3ff4ecb02a5e3f49,1 +np.float64,0x3fdcb903d0b97208,0x3ff1afa5d692c5b9,1 +np.float64,0xbfe7d67839efacf0,0x40034a3146abf6f2,1 +np.float64,0x3f9981c6d8330380,0x3ff8bbf1853d7b90,1 +np.float64,0xbfe9d4191673a832,0x400414a9ab453c5d,1 +np.float64,0x3fef0a1e5c7e143c,0x3fcf70b02a54c415,1 +np.float64,0xbfd996dee6b32dbe,0x3fffb6cf707ad8e4,1 +np.float64,0x3fe19bef17e337de,0x3fef9e70d4fcedae,1 +np.float64,0x3fe34a59716694b2,0x3fed8f6d5cfba474,1 +np.float64,0x3fdf27e27cbe4fc4,0x3ff0ff70500e0c7c,1 +np.float64,0xbfe19df87fe33bf1,0x40013afb401de24c,1 +np.float64,0xbfbdfd97ba3bfb30,0x3ffb02ef8c225e57,1 +np.float64,0xbfe3d3417267a683,0x4001e95ed240b0f8,1 +np.float64,0x3fe566498b6acc94,0x3fead342957d4910,1 +np.float64,0x3ff0000000000000,0x0,1 +np.float64,0x3feb329bd8766538,0x3fe1c2225aafe3b4,1 +np.float64,0xbfc19ca703233950,0x3ffb575b5df057b9,1 +np.float64,0x3fe755027d6eaa04,0x3fe81eb99c262e00,1 +np.float64,0xbfe6c2b8306d8570,0x4002e594199f9eec,1 +np.float64,0x3fd69438e6ad2870,0x3ff35d2275ae891d,1 +np.float64,0x3fda3e7285b47ce4,0x3ff25f5573dd47ae,1 +np.float64,0x3fe7928a166f2514,0x3fe7c4490ef4b9a9,1 +np.float64,0xbfd4eb71b9a9d6e4,0x3ffe75e8ccb74be1,1 +np.float64,0xbfcc3a07f1387410,0x3ffcb0b8af914a5b,1 +np.float64,0xbfe6e80225edd004,0x4002f2e26eae8999,1 +np.float64,0xbfb347728a268ee8,0x3ffa56bd526a12db,1 +np.float64,0x3fe5140ead6a281e,0x3feb4132c9140a1c,1 +np.float64,0xbfc147f125228fe4,0x3ffb4cab18b9050f,1 +np.float64,0xbfcb9145b537228c,0x3ffc9b1b6227a8c9,1 +np.float64,0xbfda84ef4bb509de,0x3ffff7f8a674e17d,1 +np.float64,0x3fd2eb6bbfa5d6d8,0x3ff454c225529d7e,1 +np.float64,0x3fe18c95f1e3192c,0x3fefb0cf0efba75a,1 +np.float64,0x3fe78606efef0c0e,0x3fe7d6c3a092d64c,1 +np.float64,0x3fbad5119a35aa20,0x3ff773dffe3ce660,1 +np.float64,0x3fd0cf5903a19eb4,0x3ff4e15fd21fdb42,1 +np.float64,0xbfd85ce90bb0b9d2,0x3fff618ee848e974,1 +np.float64,0x3fe90e11b9f21c24,0x3fe57be62f606f4a,1 +np.float64,0x3fd7a2040faf4408,0x3ff314ce85457ec2,1 +np.float64,0xbfd73fba69ae7f74,0x3fff14bff3504811,1 +np.float64,0x3fa04b4bd42096a0,0x3ff89f9b52f521a2,1 +np.float64,0xbfd7219ce5ae433a,0x3fff0cac0b45cc18,1 +np.float64,0xbfe0cf4661e19e8d,0x4000fdadb14e3c22,1 +np.float64,0x3fd07469fea0e8d4,0x3ff4f8eaa9b2394a,1 +np.float64,0x3f9b05c5d8360b80,0x3ff8b5e10672db5c,1 +np.float64,0x3fe4c25b916984b8,0x3febad29bd0e25e2,1 +np.float64,0xbfde8b4891bd1692,0x40008beb88d5c409,1 +np.float64,0xbfe199a7efe33350,0x400139b089aee21c,1 +np.float64,0x3fecdad25cf9b5a4,0x3fdc9d062867e8c3,1 +np.float64,0xbfe979b277f2f365,0x4003eedb061e25a4,1 +np.float64,0x3fc8c7311f318e60,0x3ff6040b9aeaad9d,1 +np.float64,0x3fd2b605b8a56c0c,0x3ff462b9a955c224,1 +np.float64,0x3fc073b6ad20e770,0x3ff7120e9f2fd63c,1 +np.float64,0xbfec60ede678c1dc,0x40054a3863e24dc2,1 +np.float64,0x3fe225171be44a2e,0x3feef910dca420ea,1 +np.float64,0xbfd7529762aea52e,0x3fff19d00661f650,1 +np.float64,0xbfd781783daf02f0,0x3fff2667b90be461,1 +np.float64,0x3fe3f6ec6d67edd8,0x3fecb4e814a2e33a,1 +np.float64,0x3fece6702df9cce0,0x3fdc6719d92a50d2,1 +np.float64,0xbfb5c602ce2b8c08,0x3ffa7ec761ba856a,1 +np.float64,0xbfd61f0153ac3e02,0x3ffec78e3b1a6c4d,1 +np.float64,0xbfec3462b2f868c5,0x400532630bbd7050,1 +np.float64,0xbfdd248485ba490a,0x400059391c07c1bb,1 +np.float64,0xbfd424921fa84924,0x3ffe416a85d1dcdf,1 +np.float64,0x3fbb23a932364750,0x3ff76eef79209f7f,1 +np.float64,0x3fca248b0f344918,0x3ff5d77c5c1b4e5e,1 +np.float64,0xbfe69af4a4ed35ea,0x4002d77c2e4fbd4e,1 +np.float64,0x3fdafe3cdcb5fc78,0x3ff22a9be6efbbf2,1 +np.float64,0xbfebba3377f77467,0x4004f3836e1fe71a,1 +np.float64,0xbfe650fae06ca1f6,0x4002bd851406377c,1 +np.float64,0x3fda630007b4c600,0x3ff2554f1832bd94,1 +np.float64,0xbfda8107d9b50210,0x3ffff6e6209659f3,1 +np.float64,0x3fea759a02f4eb34,0x3fe31d1a632c9aae,1 +np.float64,0x3fbf88149e3f1030,0x3ff728313aa12ccb,1 +np.float64,0x3f7196d2a0232e00,0x3ff910647e1914c1,1 +np.float64,0x3feeae51d17d5ca4,0x3fd2709698d31f6f,1 +np.float64,0xbfd73cd663ae79ac,0x3fff13f96300b55a,1 +np.float64,0x3fd4fc5f06a9f8c0,0x3ff3c99359854b97,1 +np.float64,0x3fb29f5d6e253ec0,0x3ff7f7c20e396b20,1 +np.float64,0xbfd757c82aaeaf90,0x3fff1b34c6141e98,1 +np.float64,0x3fc56fd4cf2adfa8,0x3ff670c145122909,1 +np.float64,0x3fc609a2f52c1348,0x3ff65d3ef3cade2c,1 +np.float64,0xbfe1de631163bcc6,0x40014e5528fadb73,1 +np.float64,0xbfe7eb4a726fd695,0x40035202f49d95c4,1 +np.float64,0xbfc9223771324470,0x3ffc4b84d5e263b9,1 +np.float64,0x3fee91a8a87d2352,0x3fd3364befde8de6,1 +np.float64,0x3fbc9784fe392f10,0x3ff7578e29f6a1b2,1 +np.float64,0xbfec627c2c78c4f8,0x40054b0ff2cb9c55,1 +np.float64,0xbfb8b406a6316810,0x3ffaadd97062fb8c,1 +np.float64,0xbfecf98384f9f307,0x4005a043d9110d79,1 +np.float64,0xbfe5834bab6b0698,0x400276f114aebee4,1 +np.float64,0xbfd90f391eb21e72,0x3fff91e26a8f48f3,1 +np.float64,0xbfee288ce2fc511a,0x400667cb09aa04b3,1 +np.float64,0x3fd5aa5e32ab54bc,0x3ff39b7080a52214,1 +np.float64,0xbfee7ef907fcfdf2,0x4006ab96a8eba4c5,1 +np.float64,0x3fd6097973ac12f4,0x3ff3822486978bd1,1 +np.float64,0xbfe02d14b8e05a2a,0x4000ce5be53047b1,1 +np.float64,0xbf9c629a6838c540,0x3ff993897728c3f9,1 +np.float64,0xbfee2024667c4049,0x40066188782fb1f0,1 +np.float64,0xbfa42a88fc285510,0x3ff9c35a4bbce104,1 +np.float64,0x3fa407af5c280f60,0x3ff881b360d8eea1,1 +np.float64,0x3fed0ba42cfa1748,0x3fdbb7d55609175f,1 +np.float64,0xbfdd0b5844ba16b0,0x400055b0bb59ebb2,1 +np.float64,0x3fd88d97e6b11b30,0x3ff2d53c1ecb8f8c,1 +np.float64,0xbfeb7a915ef6f523,0x4004d410812eb84c,1 +np.float64,0xbfb5f979ca2bf2f0,0x3ffa8201d73cd4ca,1 +np.float64,0x3fb3b65dd6276cc0,0x3ff7e64576199505,1 +np.float64,0x3fcd47a7793a8f50,0x3ff570a7b672f160,1 +np.float64,0xbfa41dd30c283ba0,0x3ff9c2f488127eb3,1 +np.float64,0x3fe4b1ea1f6963d4,0x3febc2bed7760427,1 +np.float64,0xbfdd0f81d2ba1f04,0x400056463724b768,1 +np.float64,0x3fd15d93f7a2bb28,0x3ff4bc7a24eacfd7,1 +np.float64,0xbfe3213af8e64276,0x4001b14579dfded3,1 +np.float64,0x3fd90dfbeab21bf8,0x3ff2b26a6c2c3bb3,1 +np.float64,0xbfd02d54bca05aaa,0x3ffd38ab3886b203,1 +np.float64,0x3fc218dcad2431b8,0x3ff6dced56d5b417,1 +np.float64,0x3fea5edf71f4bdbe,0x3fe3455ee09f27e6,1 +np.float64,0x3fa74319042e8640,0x3ff867d224545438,1 +np.float64,0x3fd970ad92b2e15c,0x3ff2979084815dc1,1 +np.float64,0x3fce0a4bf73c1498,0x3ff557a4df32df3e,1 +np.float64,0x3fef5c8e10feb91c,0x3fc99ca0eeaaebe4,1 +np.float64,0xbfedae997ffb5d33,0x400611af18f407ab,1 +np.float64,0xbfbcf07d6239e0f8,0x3ffaf201177a2d36,1 +np.float64,0xbfc3c52541278a4c,0x3ffb9d2af0408e4a,1 +np.float64,0x3fe4ef44e4e9de8a,0x3feb71f7331255e5,1 +np.float64,0xbfccd9f5f539b3ec,0x3ffcc53a99339592,1 +np.float64,0xbfda32c745b4658e,0x3fffe16e8727ef89,1 +np.float64,0xbfef54932a7ea926,0x40077e4605e61ca1,1 +np.float64,0x3fe9d4ae3573a95c,0x3fe4344a069a3fd0,1 +np.float64,0x3fda567e73b4acfc,0x3ff258bd77a663c7,1 +np.float64,0xbfd5bcac5eab7958,0x3ffead6379c19c52,1 +np.float64,0xbfee5e56f97cbcae,0x40069131fc54018d,1 +np.float64,0x3fc2d4413925a880,0x3ff6c54163816298,1 +np.float64,0xbfe9ddf6e873bbee,0x400418d8c722f7c5,1 +np.float64,0x3fdaf2a683b5e54c,0x3ff22dcda599d69c,1 +np.float64,0xbfca69789f34d2f0,0x3ffc7547ff10b1a6,1 +np.float64,0x3fed076f62fa0ede,0x3fdbcbda03c1d72a,1 +np.float64,0xbfcb38326f367064,0x3ffc8fb55dadeae5,1 +np.float64,0x3fe1938705e3270e,0x3fefa88130c5adda,1 +np.float64,0x3feaffae3b75ff5c,0x3fe221e3da537c7e,1 +np.float64,0x3fefc94acb7f9296,0x3fbd9a360ace67b4,1 +np.float64,0xbfe8bddeb0f17bbe,0x4003a316685c767e,1 +np.float64,0x3fbe10fbee3c21f0,0x3ff73fceb10650f5,1 +np.float64,0x3fde9126c1bd224c,0x3ff12a742f734d0a,1 +np.float64,0xbfe9686c91f2d0d9,0x4003e7bc6ee77906,1 +np.float64,0xbfb1ba4892237490,0x3ffa3dda064c2509,1 +np.float64,0xbfe2879100e50f22,0x400181c1a5b16f0f,1 +np.float64,0x3fd1cd40b6a39a80,0x3ff49f70e3064e95,1 +np.float64,0xbfc965869132cb0c,0x3ffc5419f3b43701,1 +np.float64,0x3fea7a6f2874f4de,0x3fe31480fb2dd862,1 +np.float64,0x3fc3bc56892778b0,0x3ff6a7e8fa0e8b0e,1 +np.float64,0x3fec1ed451f83da8,0x3fdfd78e564b8ad7,1 +np.float64,0x3feb77d16df6efa2,0x3fe13d083344e45e,1 +np.float64,0xbfe822e7c67045d0,0x400367104a830cf6,1 +np.float64,0x8000000000000001,0x3ff921fb54442d18,1 +np.float64,0xbfd4900918a92012,0x3ffe5dc0e19737b4,1 +np.float64,0x3fed184187fa3084,0x3fdb7b7a39f234f4,1 +np.float64,0x3fecef846179df08,0x3fdc3cb2228c3682,1 +np.float64,0xbfe2d2aed165a55e,0x400198e21c5b861b,1 +np.float64,0x7ff0000000000000,0x7ff8000000000000,1 +np.float64,0xbfee9409a07d2813,0x4006bd358232d073,1 +np.float64,0xbfecedc2baf9db86,0x4005995df566fc21,1 +np.float64,0x3fe6d857396db0ae,0x3fe8d2cb8794aa99,1 +np.float64,0xbf9a579e7834af40,0x3ff98b5cc8021e1c,1 +np.float64,0x3fc664fefb2cca00,0x3ff651a664ccf8fa,1 +np.float64,0xbfe8a7aa0e714f54,0x40039a5b4df938a0,1 +np.float64,0xbfdf27d380be4fa8,0x4000a241074dbae6,1 +np.float64,0x3fe00ddf55e01bbe,0x3ff0b94eb1ea1851,1 +np.float64,0x3feb47edbff68fdc,0x3fe199822d075959,1 +np.float64,0x3fb4993822293270,0x3ff7d80c838186d0,1 +np.float64,0xbfca2cd1473459a4,0x3ffc6d88c8de3d0d,1 +np.float64,0xbfea7d9c7674fb39,0x40045e4559e9e52d,1 +np.float64,0x3fe0dce425e1b9c8,0x3ff04099cab23289,1 +np.float64,0x3fd6bb7e97ad76fc,0x3ff352a30434499c,1 +np.float64,0x3fd4a4f16da949e4,0x3ff3e0b07432c9aa,1 +np.float64,0x8000000000000000,0x3ff921fb54442d18,1 +np.float64,0x3fe688f5b56d11ec,0x3fe9435f63264375,1 +np.float64,0xbfdf5a427ebeb484,0x4000a97a6c5d4abc,1 +np.float64,0xbfd1f3483fa3e690,0x3ffdae6c8a299383,1 +np.float64,0xbfeac920db759242,0x4004805862be51ec,1 +np.float64,0x3fef5bc711feb78e,0x3fc9ac40fba5b93b,1 +np.float64,0x3fe4bd9e12e97b3c,0x3febb363c787d381,1 +np.float64,0x3fef6a59ab7ed4b4,0x3fc880f1324eafce,1 +np.float64,0x3fc07a362120f470,0x3ff7113cf2c672b3,1 +np.float64,0xbfe4d6dbe2e9adb8,0x40023d6f6bea44b7,1 +np.float64,0xbfec2d6a15785ad4,0x40052eb425cc37a2,1 +np.float64,0x3fc90dae05321b60,0x3ff5fb10015d2934,1 +np.float64,0xbfa9239f74324740,0x3ff9eb2d057068ea,1 +np.float64,0xbfeb4fc8baf69f92,0x4004bf5e17fb08a4,1 +np.float64,0x0,0x3ff921fb54442d18,1 +np.float64,0x3faaf1884c35e320,0x3ff84a5591dbe1f3,1 +np.float64,0xbfed842561fb084b,0x4005f5c0a19116ce,1 +np.float64,0xbfc64850c32c90a0,0x3ffbeeac2ee70f9a,1 +np.float64,0x3fd7d879f5afb0f4,0x3ff306254c453436,1 +np.float64,0xbfdabaa586b5754c,0x4000035e6ac83a2b,1 +np.float64,0xbfebfeefa977fddf,0x4005167446fb9faf,1 +np.float64,0xbfe9383462727069,0x4003d407aa6a1577,1 +np.float64,0x3fe108dfb6e211c0,0x3ff026ac924b281d,1 +np.float64,0xbf85096df02a12c0,0x3ff94c0e60a22ede,1 +np.float64,0xbfe3121cd566243a,0x4001ac8f90db5882,1 +np.float64,0xbfd227f62aa44fec,0x3ffdbc26bb175dcc,1 +np.float64,0x3fd931af2cb26360,0x3ff2a8b62dfe003c,1 +np.float64,0xbfd9b794e3b36f2a,0x3fffbfbc89ec013d,1 +np.float64,0x3fc89b2e6f313660,0x3ff609a6e67f15f2,1 +np.float64,0x3fc0b14a8f216298,0x3ff70a4b6905aad2,1 +np.float64,0xbfeda11a657b4235,0x400608b3f9fff574,1 +np.float64,0xbfed2ee9ec7a5dd4,0x4005c040b7c02390,1 +np.float64,0xbfef7819d8fef034,0x4007ac6bf75cf09d,1 +np.float64,0xbfcc4720fb388e40,0x3ffcb2666a00b336,1 +np.float64,0xbfe05dec4be0bbd8,0x4000dc8a25ca3760,1 +np.float64,0x3fb093416e212680,0x3ff81897b6d8b374,1 +np.float64,0xbfc6ab89332d5714,0x3ffbfb4559d143e7,1 +np.float64,0x3fc51948512a3290,0x3ff67bb9df662c0a,1 +np.float64,0x3fed4d94177a9b28,0x3fda76c92f0c0132,1 +np.float64,0x3fdd195fbeba32c0,0x3ff194a5586dd18e,1 +np.float64,0x3fe3f82799e7f050,0x3fecb354c2faf55c,1 +np.float64,0x3fecac2169f95842,0x3fdd7222296cb7a7,1 +np.float64,0x3fe3d3f36fe7a7e6,0x3fece18f45e30dd7,1 +np.float64,0x3fe31ff63d663fec,0x3fedc46c77d30c6a,1 +np.float64,0xbfe3120c83e62419,0x4001ac8a7c4aa742,1 +np.float64,0x3fe7c1a7976f8350,0x3fe77e4a9307c9f8,1 +np.float64,0x3fe226fe9de44dfe,0x3feef6c0f3cb00fa,1 +np.float64,0x3fd5c933baab9268,0x3ff3933e8a37de42,1 +np.float64,0x3feaa98496f5530a,0x3fe2c003832ebf21,1 +np.float64,0xbfc6f80a2f2df014,0x3ffc04fd54cb1317,1 +np.float64,0x3fde5e18d0bcbc30,0x3ff138f7b32a2ca3,1 +np.float64,0xbfe30c8dd566191c,0x4001aad4af935a78,1 +np.float64,0x3fbe8d196e3d1a30,0x3ff737fec8149ecc,1 +np.float64,0x3feaee6731f5dcce,0x3fe241fa42cce22d,1 +np.float64,0x3fef9cc46cff3988,0x3fc3f17b708dbdbb,1 +np.float64,0xbfdb181bdeb63038,0x4000103ecf405602,1 +np.float64,0xbfc58de0ed2b1bc0,0x3ffbd704c14e15cd,1 +np.float64,0xbfee05d5507c0bab,0x40064e480faba6d8,1 +np.float64,0x3fe27d0ffa64fa20,0x3fee8dc71ef79f2c,1 +np.float64,0xbfe4f7ad4c69ef5a,0x400248456cd09a07,1 +np.float64,0xbfe4843e91e9087d,0x4002225f3e139c84,1 +np.float64,0x3fe7158b9c6e2b18,0x3fe87ae845c5ba96,1 +np.float64,0xbfea64316074c863,0x400452fd2bc23a44,1 +np.float64,0xbfc9f3ae4133e75c,0x3ffc663d482afa42,1 +np.float64,0xbfd5e18513abc30a,0x3ffeb72fc76d7071,1 +np.float64,0xbfd52f6438aa5ec8,0x3ffe87e5b18041e5,1 +np.float64,0xbfea970650f52e0d,0x400469a4a6758154,1 +np.float64,0xbfe44321b7e88644,0x40020d404a2141b1,1 +np.float64,0x3fdf5a39bbbeb474,0x3ff0f10453059dbd,1 +np.float64,0xbfa1d4069423a810,0x3ff9b0a2eacd2ce2,1 +np.float64,0xbfc36d16a326da2c,0x3ffb92077d41d26a,1 +np.float64,0x1,0x3ff921fb54442d18,1 +np.float64,0x3feb232a79764654,0x3fe1df5beeb249d0,1 +np.float64,0xbfed2003d5fa4008,0x4005b737c2727583,1 +np.float64,0x3fd5b093a3ab6128,0x3ff399ca2db1d96d,1 +np.float64,0x3fca692c3d34d258,0x3ff5ceb86b79223e,1 +np.float64,0x3fd6bbdf89ad77c0,0x3ff3528916df652d,1 +np.float64,0xbfefdadd46ffb5bb,0x40085ee735e19f19,1 +np.float64,0x3feb69fb2676d3f6,0x3fe157ee0c15691e,1 +np.float64,0x3fe44c931f689926,0x3fec46b6f5e3f265,1 +np.float64,0xbfc43ddbcb287bb8,0x3ffbac71d268d74d,1 +np.float64,0x3fe6e16d43edc2da,0x3fe8c5cf0f0daa66,1 +np.float64,0x3fe489efc76913e0,0x3febf704ca1ac2a6,1 +np.float64,0xbfe590aadceb2156,0x40027b764205cf78,1 +np.float64,0xbf782e8aa0305d00,0x3ff93a29e81928ab,1 +np.float64,0x3fedcb80cffb9702,0x3fd7e5d1f98a418b,1 +np.float64,0x3fe075858060eb0c,0x3ff07d23ab46b60f,1 +np.float64,0x3fe62a68296c54d0,0x3fe9c77f7068043b,1 +np.float64,0x3feff16a3c7fe2d4,0x3fae8e8a739cc67a,1 +np.float64,0xbfd6ed93e3addb28,0x3ffefebab206fa99,1 +np.float64,0x3fe40d8ccf681b1a,0x3fec97e9cd29966d,1 +np.float64,0x3fd6408210ac8104,0x3ff3737a7d374107,1 +np.float64,0x3fec8023b8f90048,0x3fde35ebfb2b3afd,1 +np.float64,0xbfe13babd4627758,0x40011dae5c07c56b,1 +np.float64,0xbfd2183e61a4307c,0x3ffdb80dd747cfbe,1 +np.float64,0x3feae8eb1d75d1d6,0x3fe24c1f6e42ae77,1 +np.float64,0xbfea559b9c74ab37,0x40044c8e5e123b20,1 +np.float64,0xbfd12c9d57a2593a,0x3ffd7ac6222f561c,1 +np.float64,0x3fe32eb697e65d6e,0x3fedb202693875b6,1 +np.float64,0xbfde0808c3bc1012,0x4000794bd8616ea3,1 +np.float64,0x3fe14958a06292b2,0x3ff0007b40ac648a,1 +np.float64,0x3fe3d388a6e7a712,0x3fece21751a6dd7c,1 +np.float64,0x3fe7ad7897ef5af2,0x3fe79c5b3da302a7,1 +np.float64,0x3fec75527e78eaa4,0x3fde655de0cf0508,1 +np.float64,0x3fea920d4c75241a,0x3fe2ea48f031d908,1 +np.float64,0x7fefffffffffffff,0x7ff8000000000000,1 +np.float64,0xbfc17a68cb22f4d0,0x3ffb530925f41aa0,1 +np.float64,0xbfe1c93166e39263,0x400147f3cb435dec,1 +np.float64,0x3feb97c402f72f88,0x3fe0fe5b561bf869,1 +np.float64,0x3fb58ff5162b1ff0,0x3ff7c8933fa969dc,1 +np.float64,0x3fe68e2beded1c58,0x3fe93c075283703b,1 +np.float64,0xbf94564cc828aca0,0x3ff97355e5ee35db,1 +np.float64,0x3fd31061c9a620c4,0x3ff44b150ec96998,1 +np.float64,0xbfc7d0c89f2fa190,0x3ffc208bf4eddc4d,1 +np.float64,0x3fe5736f1d6ae6de,0x3feac18f84992d1e,1 +np.float64,0x3fdb62e480b6c5c8,0x3ff20ecfdc4afe7c,1 +np.float64,0xbfc417228b282e44,0x3ffba78afea35979,1 +np.float64,0x3f8f5ba1303eb780,0x3ff8e343714630ff,1 +np.float64,0x3fe8e99126f1d322,0x3fe5b6511d4c0798,1 +np.float64,0xbfe2ec08a1e5d812,0x4001a0bb28a85875,1 +np.float64,0x3fea3b46cf74768e,0x3fe383dceaa74296,1 +np.float64,0xbfe008b5ed60116c,0x4000c3d62c275d40,1 +np.float64,0xbfcd9f8a4b3b3f14,0x3ffcde98d6484202,1 +np.float64,0xbfdb5fb112b6bf62,0x40001a22137ef1c9,1 +np.float64,0xbfe9079565f20f2b,0x4003c0670c92e401,1 +np.float64,0xbfce250dc53c4a1c,0x3ffcefc2b3dc3332,1 +np.float64,0x3fe9ba85d373750c,0x3fe4607131b28773,1 +np.float64,0x10000000000000,0x3ff921fb54442d18,1 +np.float64,0xbfeb9ef42c773de8,0x4004e5f239203ad8,1 +np.float64,0xbfd6bf457dad7e8a,0x3ffef2563d87b18d,1 +np.float64,0x3fe4de9aa5e9bd36,0x3feb87f97defb04a,1 +np.float64,0x3fedb4f67cfb69ec,0x3fd8603c465bffac,1 +np.float64,0x3fe7b6d9506f6db2,0x3fe78e670c7bdb67,1 +np.float64,0x3fe071717460e2e2,0x3ff07f84472d9cc5,1 +np.float64,0xbfed2e79dbfa5cf4,0x4005bffc6f9ad24f,1 +np.float64,0x3febb8adc377715c,0x3fe0bcebfbd45900,1 +np.float64,0xbfee2cffd87c5a00,0x40066b20a037c478,1 +np.float64,0x3fef7e358d7efc6c,0x3fc6d0ba71a542a8,1 +np.float64,0xbfef027eef7e04fe,0x400723291cb00a7a,1 +np.float64,0x3fac96da34392dc0,0x3ff83d260a936c6a,1 +np.float64,0x3fe9dba94a73b752,0x3fe428736b94885e,1 +np.float64,0x3fed37581efa6eb0,0x3fdae49dcadf1d90,1 +np.float64,0xbfe6e61037edcc20,0x4002f23031b8d522,1 +np.float64,0xbfdea7204dbd4e40,0x40008fe1f37918b7,1 +np.float64,0x3feb9f8edb773f1e,0x3fe0eef20bd4387b,1 +np.float64,0x3feeb0b6ed7d616e,0x3fd25fb3b7a525d6,1 +np.float64,0xbfd7ce9061af9d20,0x3fff3b25d531aa2b,1 +np.float64,0xbfc806b509300d6c,0x3ffc2768743a8360,1 +np.float64,0xbfa283882c250710,0x3ff9b61fda28914a,1 +np.float64,0x3fdec70050bd8e00,0x3ff11b1d769b578f,1 +np.float64,0xbfc858a44930b148,0x3ffc31d6758b4721,1 +np.float64,0x3fdc321150b86424,0x3ff1d5504c3c91e4,1 +np.float64,0x3fd9416870b282d0,0x3ff2a46f3a850f5b,1 +np.float64,0x3fdd756968baead4,0x3ff17ac510a5573f,1 +np.float64,0xbfedfd632cfbfac6,0x400648345a2f89b0,1 +np.float64,0x3fd6874285ad0e84,0x3ff36098ebff763f,1 +np.float64,0x3fe6daacc9edb55a,0x3fe8cf75fae1e35f,1 +np.float64,0x3fe53f19766a7e32,0x3feb07d0e97cd55b,1 +np.float64,0x3fd13cc36ca27988,0x3ff4c4ff801b1faa,1 +np.float64,0x3fe4f21cbce9e43a,0x3feb6e34a72ef529,1 +np.float64,0xbfc21c1cc9243838,0x3ffb67726394ca89,1 +np.float64,0x3fe947a3f2728f48,0x3fe51eae4660e23c,1 +np.float64,0xbfce78cd653cf19c,0x3ffcfa89194b3f5e,1 +np.float64,0x3fe756f049eeade0,0x3fe81be7f2d399e2,1 +np.float64,0xbfcc727cf138e4f8,0x3ffcb7f547841bb0,1 +np.float64,0xbfc2d8d58f25b1ac,0x3ffb7f496cc72458,1 +np.float64,0xbfcfd0e4653fa1c8,0x3ffd26e1309bc80b,1 +np.float64,0xbfe2126c106424d8,0x40015e0e01db6a4a,1 +np.float64,0x3fe580e4306b01c8,0x3feaaf683ce51aa5,1 +np.float64,0x3fcea8a1b93d5140,0x3ff543456c0d28c7,1 +np.float64,0xfff0000000000000,0x7ff8000000000000,1 +np.float64,0xbfd9d5da72b3abb4,0x3fffc8013113f968,1 +np.float64,0xbfe1fdfcea63fbfa,0x400157def2e4808d,1 +np.float64,0xbfc0022e0720045c,0x3ffb239963e7cbf2,1 diff --git a/python/numpy/_core/tests/data/umath-validation-set-arccosh.csv b/python/numpy/_core/tests/data/umath-validation-set-arccosh.csv new file mode 100644 index 000000000..1b3eda484 --- /dev/null +++ b/python/numpy/_core/tests/data/umath-validation-set-arccosh.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0x3f83203f,0x3e61d9d6,2 +np.float32,0x3f98dea1,0x3f1d1af6,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0x7eba99af,0x42b0d032,2 +np.float32,0x3fc95a13,0x3f833650,2 +np.float32,0x3fce9a45,0x3f8771e1,2 +np.float32,0x3fc1bd96,0x3f797811,2 +np.float32,0x7eba2391,0x42b0ceed,2 +np.float32,0x7d4e8f15,0x42acdb8c,2 +np.float32,0x3feca42e,0x3f9cc88e,2 +np.float32,0x7e2b314e,0x42af412e,2 +np.float32,0x7f7fffff,0x42b2d4fc,2 +np.float32,0x3f803687,0x3d6c4380,2 +np.float32,0x3fa0edbd,0x3f33e706,2 +np.float32,0x3faa8074,0x3f4b3d3c,2 +np.float32,0x3fa0c49e,0x3f337af3,2 +np.float32,0x3f8c9ec4,0x3ee18812,2 +np.float32,0x7efef78e,0x42b17006,2 +np.float32,0x3fc75720,0x3f818aa4,2 +np.float32,0x7f52d4c8,0x42b27198,2 +np.float32,0x3f88f21e,0x3ebe52b0,2 +np.float32,0x3ff7a042,0x3fa3a07a,2 +np.float32,0x7f52115c,0x42b26fbd,2 +np.float32,0x3fc6bf6f,0x3f810b42,2 +np.float32,0x3fd105d0,0x3f895649,2 +np.float32,0x3fee7c2a,0x3f9df66e,2 +np.float32,0x7f0ff9a5,0x42b1ae4f,2 +np.float32,0x7e81f075,0x42b016e7,2 +np.float32,0x3fa57d65,0x3f3f70c6,2 +np.float32,0x80800000,0xffc00000,2 +np.float32,0x7da239f5,0x42adc2bf,2 +np.float32,0x3f9e432c,0x3f2cbd80,2 +np.float32,0x3ff2839b,0x3fa07ee4,2 +np.float32,0x3fec8aef,0x3f9cb850,2 +np.float32,0x7d325893,0x42ac905b,2 +np.float32,0x3fa27431,0x3f37dade,2 +np.float32,0x3fce7408,0x3f8753ae,2 +np.float32,0x3fde6684,0x3f93353f,2 +np.float32,0x3feb9a3e,0x3f9c1cff,2 +np.float32,0x7deb34bb,0x42ae80f0,2 +np.float32,0x3fed9300,0x3f9d61b7,2 +np.float32,0x7f35e253,0x42b225fb,2 +np.float32,0x7e6db57f,0x42afe93f,2 +np.float32,0x3fa41f08,0x3f3c10bc,2 +np.float32,0x3fb0d4da,0x3f590de3,2 +np.float32,0x3fb5c690,0x3f632351,2 +np.float32,0x3fcde9ce,0x3f86e638,2 +np.float32,0x3f809c7b,0x3dc81161,2 +np.float32,0x3fd77291,0x3f8e3226,2 +np.float32,0x3fc21a06,0x3f7a1a82,2 +np.float32,0x3fba177e,0x3f6b8139,2 +np.float32,0x7f370dff,0x42b22944,2 +np.float32,0x3fe5bfcc,0x3f9841c1,2 +np.float32,0x3feb0caa,0x3f9bc139,2 +np.float32,0x7f4fe5c3,0x42b26a6c,2 +np.float32,0x7f1e1419,0x42b1de28,2 +np.float32,0x7f5e3c96,0x42b28c92,2 +np.float32,0x3f8cd313,0x3ee3521e,2 +np.float32,0x3fa97824,0x3f48e049,2 +np.float32,0x7d8ca281,0x42ad799e,2 +np.float32,0x3f96b51b,0x3f165193,2 +np.float32,0x3f81328a,0x3e0bf504,2 +np.float32,0x3ff60bf3,0x3fa2ab45,2 +np.float32,0x3ff9b629,0x3fa4e107,2 +np.float32,0x3fecacfc,0x3f9cce37,2 +np.float32,0x3fba8804,0x3f6c5600,2 +np.float32,0x3f81f752,0x3e333fdd,2 +np.float32,0x3fb5b262,0x3f62fb46,2 +np.float32,0x3fa21bc0,0x3f36f7e6,2 +np.float32,0x3fbc87bb,0x3f7011dc,2 +np.float32,0x3fe18b32,0x3f9565ae,2 +np.float32,0x7dfb6dd5,0x42aea316,2 +np.float32,0x3fb7c602,0x3f670ee3,2 +np.float32,0x7efeb6a2,0x42b16f84,2 +np.float32,0x3fa56180,0x3f3f2ca4,2 +np.float32,0x3f8dcaff,0x3eeb9ac0,2 +np.float32,0x7e876238,0x42b02beb,2 +np.float32,0x7f0bb67d,0x42b19eec,2 +np.float32,0x3faca01c,0x3f4fffa5,2 +np.float32,0x3fdb57ee,0x3f9108b8,2 +np.float32,0x3fe3bade,0x3f96e4b7,2 +np.float32,0x7f7aa2dd,0x42b2ca25,2 +np.float32,0x3fed92ec,0x3f9d61aa,2 +np.float32,0x7eb789b1,0x42b0c7b9,2 +np.float32,0x7f7f16e4,0x42b2d329,2 +np.float32,0x3fb6647e,0x3f645b84,2 +np.float32,0x3f99335e,0x3f1e1d96,2 +np.float32,0x7e690a11,0x42afdf17,2 +np.float32,0x7dff2f95,0x42aeaaae,2 +np.float32,0x7f70adfd,0x42b2b564,2 +np.float32,0x3fe92252,0x3f9a80fe,2 +np.float32,0x3fef54ce,0x3f9e7fe5,2 +np.float32,0x3ff24eaa,0x3fa05df9,2 +np.float32,0x7f04565a,0x42b18328,2 +np.float32,0x3fcb8b80,0x3f85007f,2 +np.float32,0x3fcd4d0a,0x3f866983,2 +np.float32,0x3fbe7d82,0x3f73a911,2 +np.float32,0x3f8a7a8a,0x3ecdc8f6,2 +np.float32,0x3f912441,0x3f030d56,2 +np.float32,0x3f9b29d6,0x3f23f663,2 +np.float32,0x3fab7f36,0x3f4d7c6c,2 +np.float32,0x7dfedafc,0x42aeaa04,2 +np.float32,0x3fe190c0,0x3f956982,2 +np.float32,0x3f927515,0x3f07e0bb,2 +np.float32,0x3ff6442a,0x3fa2cd7e,2 +np.float32,0x7f6656d0,0x42b29ee8,2 +np.float32,0x3fe29aa0,0x3f96201f,2 +np.float32,0x3fa4a247,0x3f3d5687,2 +np.float32,0x3fa1cf19,0x3f363226,2 +np.float32,0x3fc20037,0x3f79ed36,2 +np.float32,0x7cc1241a,0x42ab5645,2 +np.float32,0x3fafd540,0x3f56f25a,2 +np.float32,0x7e5b3f5f,0x42afbfdb,2 +np.float32,0x7f48de5f,0x42b258d0,2 +np.float32,0x3fce1ca0,0x3f870e85,2 +np.float32,0x7ee40bb2,0x42b136e4,2 +np.float32,0x7ecdb133,0x42b10212,2 +np.float32,0x3f9f181c,0x3f2f02ca,2 +np.float32,0x3f936cbf,0x3f0b4f63,2 +np.float32,0x3fa4f8ea,0x3f3e2c2f,2 +np.float32,0x3fcc03e2,0x3f8561ac,2 +np.float32,0x3fb801f2,0x3f67831b,2 +np.float32,0x7e141dad,0x42aef70c,2 +np.float32,0x3fe8c04e,0x3f9a4087,2 +np.float32,0x3f8548d5,0x3e929f37,2 +np.float32,0x7f148d7d,0x42b1be56,2 +np.float32,0x3fd2c9a2,0x3f8ab1ed,2 +np.float32,0x7eb374fd,0x42b0bc36,2 +np.float32,0x7f296d36,0x42b201a7,2 +np.float32,0x3ff138e2,0x3f9fb09d,2 +np.float32,0x3ff42898,0x3fa18347,2 +np.float32,0x7da8c5e1,0x42add700,2 +np.float32,0x7dcf72c4,0x42ae40a4,2 +np.float32,0x7ea571fc,0x42b09296,2 +np.float32,0x3fc0953d,0x3f776ba3,2 +np.float32,0x7f1773dd,0x42b1c83c,2 +np.float32,0x7ef53b68,0x42b15c17,2 +np.float32,0x3f85d69f,0x3e9a0f3a,2 +np.float32,0x7e8b9a05,0x42b03ba0,2 +np.float32,0x3ff07d20,0x3f9f3ad2,2 +np.float32,0x7e8da32c,0x42b0430a,2 +np.float32,0x7ef96004,0x42b164ab,2 +np.float32,0x3fdfaa62,0x3f941837,2 +np.float32,0x7f0057c5,0x42b17377,2 +np.float32,0x3fb2663f,0x3f5c5065,2 +np.float32,0x3fd3d8c3,0x3f8b8055,2 +np.float32,0x1,0xffc00000,2 +np.float32,0x3fd536c1,0x3f8c8862,2 +np.float32,0x3f91b953,0x3f053619,2 +np.float32,0x3fb3305c,0x3f5deee1,2 +np.float32,0x7ecd86b9,0x42b101a8,2 +np.float32,0x3fbf71c5,0x3f75624d,2 +np.float32,0x3ff5f0f4,0x3fa29ad2,2 +np.float32,0x3fe50389,0x3f97c328,2 +np.float32,0x3fa325a1,0x3f399e69,2 +np.float32,0x3fe4397a,0x3f973a9f,2 +np.float32,0x3f8684c6,0x3ea2b784,2 +np.float32,0x7f25ae00,0x42b1f634,2 +np.float32,0x3ff7cbf7,0x3fa3badb,2 +np.float32,0x7f73f0e0,0x42b2bc48,2 +np.float32,0x3fc88b70,0x3f828b92,2 +np.float32,0x3fb01c16,0x3f578886,2 +np.float32,0x7e557623,0x42afb229,2 +np.float32,0x3fcbcd5b,0x3f8535b4,2 +np.float32,0x7f7157e4,0x42b2b6cd,2 +np.float32,0x7f51d9d4,0x42b26f36,2 +np.float32,0x7f331a3b,0x42b21e17,2 +np.float32,0x7f777fb5,0x42b2c3b2,2 +np.float32,0x3f832001,0x3e61d11f,2 +np.float32,0x7f2cd055,0x42b20bca,2 +np.float32,0x3f89831f,0x3ec42f76,2 +np.float32,0x7f21da33,0x42b1ea3d,2 +np.float32,0x3f99e416,0x3f20330a,2 +np.float32,0x7f2c8ea1,0x42b20b07,2 +np.float32,0x7f462c98,0x42b251e6,2 +np.float32,0x7f4fdb3f,0x42b26a52,2 +np.float32,0x3fcc1338,0x3f856e07,2 +np.float32,0x3f823673,0x3e3e20da,2 +np.float32,0x7dbfe89d,0x42ae18c6,2 +np.float32,0x3fc9b04c,0x3f837d38,2 +np.float32,0x7dba3213,0x42ae094d,2 +np.float32,0x7ec5a483,0x42b0eda1,2 +np.float32,0x3fbc4d14,0x3f6fa543,2 +np.float32,0x3fc85ce2,0x3f8264f1,2 +np.float32,0x7f77c816,0x42b2c447,2 +np.float32,0x3f9c9281,0x3f280492,2 +np.float32,0x7f49b3e2,0x42b25aef,2 +np.float32,0x3fa7e4da,0x3f45347c,2 +np.float32,0x7e0c9df5,0x42aedc72,2 +np.float32,0x7f21fd1a,0x42b1eaab,2 +np.float32,0x7f7c63ad,0x42b2cdb6,2 +np.float32,0x7f4eb80a,0x42b26783,2 +np.float32,0x7e98038c,0x42b0673c,2 +np.float32,0x7e89ba08,0x42b034b4,2 +np.float32,0x3ffc06ba,0x3fa64094,2 +np.float32,0x3fae63f6,0x3f53db36,2 +np.float32,0x3fbc2d30,0x3f6f6a1c,2 +np.float32,0x7de0e5e5,0x42ae69fe,2 +np.float32,0x7e09ed18,0x42aed28d,2 +np.float32,0x3fea78f8,0x3f9b6129,2 +np.float32,0x7dfe0bcc,0x42aea863,2 +np.float32,0x7ee21d03,0x42b13289,2 +np.float32,0x3fcc3aed,0x3f858dfc,2 +np.float32,0x3fe6b3ba,0x3f98e4ea,2 +np.float32,0x3f90f25f,0x3f025225,2 +np.float32,0x7f1bcaf4,0x42b1d6b3,2 +np.float32,0x3f83ac81,0x3e74c20e,2 +np.float32,0x3f98681d,0x3f1bae16,2 +np.float32,0x3fe1f2d9,0x3f95ad08,2 +np.float32,0x3fa279d7,0x3f37e951,2 +np.float32,0x3feb922a,0x3f9c17c4,2 +np.float32,0x7f1c72e8,0x42b1d8da,2 +np.float32,0x3fea156b,0x3f9b2038,2 +np.float32,0x3fed6bda,0x3f9d48aa,2 +np.float32,0x3fa86142,0x3f46589c,2 +np.float32,0x3ff16bc2,0x3f9fd072,2 +np.float32,0x3fbebf65,0x3f74207b,2 +np.float32,0x7e7b78b5,0x42b00610,2 +np.float32,0x3ff51ab8,0x3fa217f0,2 +np.float32,0x3f8361bb,0x3e6adf07,2 +np.float32,0x7edbceed,0x42b1240e,2 +np.float32,0x7f10e2c0,0x42b1b18a,2 +np.float32,0x3fa7bc58,0x3f44d4ef,2 +np.float32,0x3f813bde,0x3e0e1138,2 +np.float32,0x7f30d5b9,0x42b21791,2 +np.float32,0x3fb4f450,0x3f61806a,2 +np.float32,0x7eee02c4,0x42b14cca,2 +np.float32,0x7ec74b62,0x42b0f1e4,2 +np.float32,0x3ff96bca,0x3fa4b498,2 +np.float32,0x7f50e304,0x42b26cda,2 +np.float32,0x7eb14c57,0x42b0b603,2 +np.float32,0x7c3f0733,0x42a9edbf,2 +np.float32,0x7ea57acb,0x42b092b1,2 +np.float32,0x7f2788dc,0x42b1fbe7,2 +np.float32,0x3fa39f14,0x3f3ad09b,2 +np.float32,0x3fc3a7e0,0x3f7ccfa0,2 +np.float32,0x3fe70a73,0x3f991eb0,2 +np.float32,0x7f4831f7,0x42b25718,2 +np.float32,0x3fe947d0,0x3f9a999c,2 +np.float32,0x7ef2b1c7,0x42b156c4,2 +np.float32,0x3fede0ea,0x3f9d937f,2 +np.float32,0x3f9fef8e,0x3f314637,2 +np.float32,0x3fc313c5,0x3f7bcebd,2 +np.float32,0x7ee99337,0x42b14328,2 +np.float32,0x7eb9042e,0x42b0cbd5,2 +np.float32,0x3fc9d3dc,0x3f839a69,2 +np.float32,0x3fb2c018,0x3f5d091d,2 +np.float32,0x3fcc4e8f,0x3f859dc5,2 +np.float32,0x3fa9363b,0x3f484819,2 +np.float32,0x7f72ce2e,0x42b2b9e4,2 +np.float32,0x7e639326,0x42afd2f1,2 +np.float32,0x7f4595d3,0x42b25060,2 +np.float32,0x7f6d0ac4,0x42b2ad97,2 +np.float32,0x7f1bda0d,0x42b1d6e5,2 +np.float32,0x3fd85ffd,0x3f8ee0ed,2 +np.float32,0x3f91d53f,0x3f059c8e,2 +np.float32,0x7d06e103,0x42ac0155,2 +np.float32,0x3fb83126,0x3f67de6e,2 +np.float32,0x7d81ce1f,0x42ad5097,2 +np.float32,0x7f79cb3b,0x42b2c86b,2 +np.float32,0x7f800000,0x7f800000,2 +np.float32,0x3fdbfffd,0x3f918137,2 +np.float32,0x7f4ecb1c,0x42b267b2,2 +np.float32,0x3fc2c122,0x3f7b3ed3,2 +np.float32,0x7f415854,0x42b24544,2 +np.float32,0x7e3d988b,0x42af7575,2 +np.float32,0x3f83ca99,0x3e789fcb,2 +np.float32,0x7f274f70,0x42b1fb38,2 +np.float32,0x7f0d20e6,0x42b1a416,2 +np.float32,0x3fdf3a1d,0x3f93c9c1,2 +np.float32,0x7efaa13e,0x42b1673d,2 +np.float32,0x3fb20b15,0x3f5b9434,2 +np.float32,0x3f86af9f,0x3ea4c664,2 +np.float32,0x3fe4fcb0,0x3f97be8a,2 +np.float32,0x3f920683,0x3f065085,2 +np.float32,0x3fa4b278,0x3f3d7e8b,2 +np.float32,0x3f8077a8,0x3daef77f,2 +np.float32,0x7e865be4,0x42b02807,2 +np.float32,0x3fcea7e2,0x3f877c9f,2 +np.float32,0x7e7e9db1,0x42b00c6d,2 +np.float32,0x3f9819aa,0x3f1aba7e,2 +np.float32,0x7f2b6c4b,0x42b207a7,2 +np.float32,0x7ef85e3e,0x42b16299,2 +np.float32,0x3fbd8290,0x3f71df8b,2 +np.float32,0x3fbbb615,0x3f6e8c8c,2 +np.float32,0x7f1bc7f5,0x42b1d6a9,2 +np.float32,0x3fbb4fea,0x3f6dcdad,2 +np.float32,0x3fb67e09,0x3f648dd1,2 +np.float32,0x3fc83495,0x3f824374,2 +np.float32,0x3fe52980,0x3f97dcbc,2 +np.float32,0x3f87d893,0x3eb25d7c,2 +np.float32,0x3fdb805a,0x3f9125c0,2 +np.float32,0x3fb33f0f,0x3f5e0ce1,2 +np.float32,0x3facc524,0x3f50516b,2 +np.float32,0x3ff40484,0x3fa16d0e,2 +np.float32,0x3ff078bf,0x3f9f3811,2 +np.float32,0x7f736747,0x42b2bb27,2 +np.float32,0x7f55768b,0x42b277f3,2 +np.float32,0x80000001,0xffc00000,2 +np.float32,0x7f6463d1,0x42b29a8e,2 +np.float32,0x3f8f8b59,0x3ef9d792,2 +np.float32,0x3f8a6f4d,0x3ecd5bf4,2 +np.float32,0x3fe958d9,0x3f9aa4ca,2 +np.float32,0x7f1e2ce2,0x42b1de78,2 +np.float32,0x3fb8584a,0x3f682a05,2 +np.float32,0x7dea3dc6,0x42ae7ed5,2 +np.float32,0x7f53a815,0x42b27399,2 +np.float32,0x7e0cf986,0x42aeddbf,2 +np.float32,0x7f3afb71,0x42b23422,2 +np.float32,0x3fd87d6e,0x3f8ef685,2 +np.float32,0x3ffcaa46,0x3fa6a0d7,2 +np.float32,0x7eecd276,0x42b14a3a,2 +np.float32,0x3ffc30b4,0x3fa65951,2 +np.float32,0x7e9c85e2,0x42b07634,2 +np.float32,0x3f95d862,0x3f1383de,2 +np.float32,0x7ef21410,0x42b15577,2 +np.float32,0x3fbfa1b5,0x3f75b86e,2 +np.float32,0x3fd6d90f,0x3f8dc086,2 +np.float32,0x0,0xffc00000,2 +np.float32,0x7e885dcd,0x42b02f9f,2 +np.float32,0x3fb3e057,0x3f5f54bf,2 +np.float32,0x7f40afdd,0x42b24385,2 +np.float32,0x3fb795c2,0x3f66b120,2 +np.float32,0x3fba7c11,0x3f6c3f73,2 +np.float32,0x3ffef620,0x3fa7f828,2 +np.float32,0x7d430508,0x42acbe1e,2 +np.float32,0x3f8d2892,0x3ee6369f,2 +np.float32,0x3fbea139,0x3f73e9d5,2 +np.float32,0x3ffaa928,0x3fa571b9,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0x7f16f9ce,0x42b1c69f,2 +np.float32,0x3fa8f753,0x3f47b657,2 +np.float32,0x3fd48a63,0x3f8c06ac,2 +np.float32,0x7f13419e,0x42b1b9d9,2 +np.float32,0x3fdf1526,0x3f93afde,2 +np.float32,0x3f903c8b,0x3eff3be8,2 +np.float32,0x7f085323,0x42b1925b,2 +np.float32,0x7cdbe309,0x42ab98ac,2 +np.float32,0x3fba2cfd,0x3f6ba9f1,2 +np.float32,0x7f5a805d,0x42b283e4,2 +np.float32,0x7f6753dd,0x42b2a119,2 +np.float32,0x3fed9f02,0x3f9d6964,2 +np.float32,0x3f96422c,0x3f14ddba,2 +np.float32,0x7f22f2a9,0x42b1edb1,2 +np.float32,0x3fe3fcfd,0x3f97119d,2 +np.float32,0x7e018ad0,0x42aeb271,2 +np.float32,0x7db896f5,0x42ae04de,2 +np.float32,0x7e55c795,0x42afb2ec,2 +np.float32,0x7f58ef8d,0x42b28036,2 +np.float32,0x7f24a16a,0x42b1f2f3,2 +np.float32,0x3fcf714c,0x3f881b09,2 +np.float32,0x3fcdd056,0x3f86d200,2 +np.float32,0x7f02fad0,0x42b17de0,2 +np.float32,0x7eeab877,0x42b145a9,2 +np.float32,0x3fd6029d,0x3f8d20f7,2 +np.float32,0x3fd4f8cd,0x3f8c59d6,2 +np.float32,0x3fb29d4a,0x3f5cc1a5,2 +np.float32,0x3fb11e2d,0x3f59a77a,2 +np.float32,0x7eded576,0x42b12b0e,2 +np.float32,0x7f26c2a5,0x42b1f988,2 +np.float32,0x3fb6165b,0x3f63c151,2 +np.float32,0x7f3bca47,0x42b23657,2 +np.float32,0x7d8c93bf,0x42ad7968,2 +np.float32,0x3f8ede02,0x3ef47176,2 +np.float32,0x3fbef762,0x3f7485b9,2 +np.float32,0x7f1419af,0x42b1bcc6,2 +np.float32,0x7d9e8c79,0x42adb701,2 +np.float32,0x3fa26336,0x3f37af63,2 +np.float32,0x7f5f5590,0x42b28f18,2 +np.float32,0x3fddc93a,0x3f92c651,2 +np.float32,0x3ff0a5fc,0x3f9f547f,2 +np.float32,0x3fb2f6b8,0x3f5d790e,2 +np.float32,0x3ffe59a4,0x3fa79d2c,2 +np.float32,0x7e4df848,0x42af9fde,2 +np.float32,0x3fb0ab3b,0x3f58b678,2 +np.float32,0x7ea54d47,0x42b09225,2 +np.float32,0x3fdd6404,0x3f927eb2,2 +np.float32,0x3f846dc0,0x3e864caa,2 +np.float32,0x7d046aee,0x42abf7e7,2 +np.float32,0x7f7c5a05,0x42b2cda3,2 +np.float32,0x3faf6126,0x3f55fb21,2 +np.float32,0x7f36a910,0x42b22829,2 +np.float32,0x3fdc7b36,0x3f91d938,2 +np.float32,0x3fff443e,0x3fa82577,2 +np.float32,0x7ee7154a,0x42b13daa,2 +np.float32,0x3f944742,0x3f0e435c,2 +np.float32,0x7f5b510a,0x42b285cc,2 +np.float32,0x3f9bc940,0x3f25c4d2,2 +np.float32,0x3fee4782,0x3f9dd4ea,2 +np.float32,0x3fcfc2dd,0x3f885aea,2 +np.float32,0x7eab65cf,0x42b0a4af,2 +np.float32,0x3f9cf908,0x3f292689,2 +np.float32,0x7ed35501,0x42b10feb,2 +np.float32,0x7dabb70a,0x42addfd9,2 +np.float32,0x7f348919,0x42b2222b,2 +np.float32,0x3fb137d4,0x3f59dd17,2 +np.float32,0x7e7b36c9,0x42b0058a,2 +np.float32,0x7e351fa4,0x42af5e0d,2 +np.float32,0x3f973c0c,0x3f18011e,2 +np.float32,0xff800000,0xffc00000,2 +np.float32,0x3f9b0a4b,0x3f239a33,2 +np.float32,0x3f87c4cf,0x3eb17e7e,2 +np.float32,0x7ef67760,0x42b15eaa,2 +np.float32,0x3fc4d2c8,0x3f7ed20f,2 +np.float32,0x7e940dac,0x42b059b8,2 +np.float32,0x7f6e6a52,0x42b2b08d,2 +np.float32,0x3f838752,0x3e6fe4b2,2 +np.float32,0x3fd8f046,0x3f8f4a94,2 +np.float32,0x3fa82112,0x3f45c223,2 +np.float32,0x3fd49b16,0x3f8c1345,2 +np.float32,0x7f02a941,0x42b17ca1,2 +np.float32,0x3f8a9d2c,0x3ecf1768,2 +np.float32,0x7c9372e3,0x42aacc0f,2 +np.float32,0x3fd260b3,0x3f8a619a,2 +np.float32,0x3f8a1b88,0x3eca27cb,2 +np.float32,0x7d25d510,0x42ac6b1c,2 +np.float32,0x7ef5a578,0x42b15cf5,2 +np.float32,0x3fe6625d,0x3f98ae9a,2 +np.float32,0x3ff53240,0x3fa22658,2 +np.float32,0x3f8bb2e6,0x3ed944cf,2 +np.float32,0x7f4679b1,0x42b252ad,2 +np.float32,0x3fa8db30,0x3f4774fc,2 +np.float32,0x7ee5fafd,0x42b13b37,2 +np.float32,0x3fc405e0,0x3f7d71fb,2 +np.float32,0x3f9303cd,0x3f09ddfd,2 +np.float32,0x7f486e67,0x42b257b2,2 +np.float32,0x7e73f12b,0x42aff680,2 +np.float32,0x3fe80f8b,0x3f99cbe4,2 +np.float32,0x3f84200a,0x3e81a3f3,2 +np.float32,0x3fa14e5c,0x3f34e3ce,2 +np.float32,0x3fda22ec,0x3f9029bb,2 +np.float32,0x3f801772,0x3d1aef98,2 +np.float32,0x7eaa1428,0x42b0a0bb,2 +np.float32,0x3feae0b3,0x3f9ba4aa,2 +np.float32,0x7ea439b4,0x42b08ecc,2 +np.float32,0x3fa28b1c,0x3f381579,2 +np.float32,0x7e8af247,0x42b03937,2 +np.float32,0x3fd19216,0x3f89c2b7,2 +np.float32,0x7f6ea033,0x42b2b100,2 +np.float32,0x3fad4fbf,0x3f518224,2 +np.float32,0x3febd940,0x3f9c45bd,2 +np.float32,0x7f4643a3,0x42b25221,2 +np.float32,0x7ec34478,0x42b0e771,2 +np.float32,0x7f18c83b,0x42b1ccb5,2 +np.float32,0x3fc665ad,0x3f80bf94,2 +np.float32,0x3ff0a999,0x3f9f56c4,2 +np.float32,0x3faf1cd2,0x3f5568fe,2 +np.float32,0x7ecd9dc6,0x42b101e1,2 +np.float32,0x3faad282,0x3f4bf754,2 +np.float32,0x3ff905a0,0x3fa47771,2 +np.float32,0x7f596481,0x42b28149,2 +np.float32,0x7f1cb31f,0x42b1d9ac,2 +np.float32,0x7e266719,0x42af32a6,2 +np.float32,0x7eccce06,0x42b0ffdb,2 +np.float32,0x3f9b6f71,0x3f24c102,2 +np.float32,0x3f80e4ba,0x3df1d6bc,2 +np.float32,0x3f843d51,0x3e836a60,2 +np.float32,0x7f70bd88,0x42b2b585,2 +np.float32,0x3fe4cc96,0x3f979e18,2 +np.float32,0x3ff737c7,0x3fa36151,2 +np.float32,0x3ff1197e,0x3f9f9cf4,2 +np.float32,0x7f08e190,0x42b19471,2 +np.float32,0x3ff1542e,0x3f9fc1b2,2 +np.float32,0x3ff6673c,0x3fa2e2d2,2 +np.float32,0xbf800000,0xffc00000,2 +np.float32,0x7e3f9ba7,0x42af7add,2 +np.float32,0x7f658ff6,0x42b29d2d,2 +np.float32,0x3f93441c,0x3f0ac0d9,2 +np.float32,0x7f526a74,0x42b27096,2 +np.float32,0x7f5b00c8,0x42b28511,2 +np.float32,0x3ff212f8,0x3fa038cf,2 +np.float32,0x7e0bd60d,0x42aed998,2 +np.float32,0x7f71ef7f,0x42b2b80e,2 +np.float32,0x7f7a897e,0x42b2c9f1,2 +np.float32,0x7e8b76a6,0x42b03b1e,2 +np.float32,0x7efa0da3,0x42b1660f,2 +np.float32,0x3fce9166,0x3f876ae0,2 +np.float32,0x3fc4163d,0x3f7d8e30,2 +np.float32,0x3fdb3784,0x3f90f16b,2 +np.float32,0x7c5f177b,0x42aa3d30,2 +np.float32,0x3fc6276d,0x3f808af5,2 +np.float32,0x7bac9cc2,0x42a856f4,2 +np.float32,0x3fe5876f,0x3f981bea,2 +np.float32,0x3fef60e3,0x3f9e878a,2 +np.float32,0x3fb23cd8,0x3f5bfb06,2 +np.float32,0x3fe114e2,0x3f951402,2 +np.float32,0x7ca8ef04,0x42ab11b4,2 +np.float32,0x7d93c2ad,0x42ad92ec,2 +np.float32,0x3fe5bb8a,0x3f983ee6,2 +np.float32,0x7f0182fd,0x42b1781b,2 +np.float32,0x7da63bb2,0x42adcf3d,2 +np.float32,0x3fac46b7,0x3f4f399e,2 +np.float32,0x7f7a5d8f,0x42b2c997,2 +np.float32,0x7f76572e,0x42b2c14b,2 +np.float32,0x7f42d53e,0x42b24931,2 +np.float32,0x7f7ffd00,0x42b2d4f6,2 +np.float32,0x3fc346c3,0x3f7c2756,2 +np.float32,0x7f1f6ae3,0x42b1e27a,2 +np.float32,0x3f87fb56,0x3eb3e2ee,2 +np.float32,0x3fed17a2,0x3f9d12b4,2 +np.float32,0x7f5ea903,0x42b28d8c,2 +np.float32,0x3f967f82,0x3f15a4ab,2 +np.float32,0x7d3b540c,0x42aca984,2 +np.float32,0x7f56711a,0x42b27a4a,2 +np.float32,0x7f122223,0x42b1b5ee,2 +np.float32,0x3fd6fa34,0x3f8dd919,2 +np.float32,0x3fadd62e,0x3f52a7b3,2 +np.float32,0x3fb7bf0c,0x3f67015f,2 +np.float32,0x7edf4ba7,0x42b12c1d,2 +np.float32,0x7e33cc65,0x42af5a4b,2 +np.float32,0x3fa6be17,0x3f427831,2 +np.float32,0x3fa07aa8,0x3f32b7d4,2 +np.float32,0x3fa4a3af,0x3f3d5a01,2 +np.float32,0x3fdbb267,0x3f9149a8,2 +np.float32,0x7ed45e25,0x42b1126c,2 +np.float32,0x3fe3f432,0x3f970ba6,2 +np.float32,0x7f752080,0x42b2bec3,2 +np.float32,0x3f872747,0x3eaa62ea,2 +np.float32,0x7e52175d,0x42afaa03,2 +np.float32,0x3fdc766c,0x3f91d5ce,2 +np.float32,0x7ecd6841,0x42b1015c,2 +np.float32,0x7f3d6c40,0x42b23ac6,2 +np.float32,0x3fb80c14,0x3f6796b9,2 +np.float32,0x3ff6ad56,0x3fa30d68,2 +np.float32,0x3fda44c3,0x3f90423e,2 +np.float32,0x3fdcba0c,0x3f9205fc,2 +np.float32,0x7e14a720,0x42aef8e6,2 +np.float32,0x3fe9e489,0x3f9b0047,2 +np.float32,0x7e69f933,0x42afe123,2 +np.float32,0x3ff3ee6d,0x3fa15f71,2 +np.float32,0x3f8538cd,0x3e91c1a7,2 +np.float32,0x3fdc3f07,0x3f91ae46,2 +np.float32,0x3fba2ef0,0x3f6bada2,2 +np.float32,0x7da64cd8,0x42adcf71,2 +np.float32,0x3fc34bd2,0x3f7c301d,2 +np.float32,0x3fa273aa,0x3f37d984,2 +np.float32,0x3ff0338c,0x3f9f0c86,2 +np.float32,0x7ed62cef,0x42b116c3,2 +np.float32,0x3f911e7e,0x3f02f7c6,2 +np.float32,0x7c8514c9,0x42aa9792,2 +np.float32,0x3fea2a74,0x3f9b2df5,2 +np.float32,0x3fe036f8,0x3f947a25,2 +np.float32,0x7c5654bf,0x42aa28ad,2 +np.float32,0x3fd9e423,0x3f8ffc32,2 +np.float32,0x7eec0439,0x42b1487b,2 +np.float32,0x3fc580f4,0x3f7ffb62,2 +np.float32,0x3fb0e316,0x3f592bbe,2 +np.float32,0x7c4cfb7d,0x42aa11d8,2 +np.float32,0x3faf9704,0x3f566e00,2 +np.float32,0x3fa7cf8a,0x3f45023d,2 +np.float32,0x7f7b724d,0x42b2cbcc,2 +np.float32,0x7f05bfe3,0x42b18897,2 +np.float32,0x3f90bde3,0x3f018bf3,2 +np.float32,0x7c565479,0x42aa28ad,2 +np.float32,0x3f94b517,0x3f0fb8e5,2 +np.float32,0x3fd6aadd,0x3f8d9e3c,2 +np.float32,0x7f09b37c,0x42b1977f,2 +np.float32,0x7f2b45ea,0x42b20734,2 +np.float32,0x3ff1d15e,0x3fa00fe9,2 +np.float32,0x3f99bce6,0x3f1fbd6c,2 +np.float32,0x7ecd1f76,0x42b100a7,2 +np.float32,0x7f443e2b,0x42b24ce2,2 +np.float32,0x7da7d6a5,0x42add428,2 +np.float32,0x7ebe0193,0x42b0d975,2 +np.float32,0x7ee13c43,0x42b1308b,2 +np.float32,0x3f8adf1b,0x3ed18e0c,2 +np.float32,0x7f76ce65,0x42b2c242,2 +np.float32,0x7e34f43d,0x42af5d92,2 +np.float32,0x7f306b76,0x42b2165d,2 +np.float32,0x7e1fd07f,0x42af1df7,2 +np.float32,0x3fab9a41,0x3f4db909,2 +np.float32,0x3fc23d1a,0x3f7a5803,2 +np.float32,0x3f8b7403,0x3ed70245,2 +np.float32,0x3f8c4dd6,0x3edebbae,2 +np.float32,0x3fe5f411,0x3f9864cd,2 +np.float32,0x3f88128b,0x3eb4e508,2 +np.float32,0x3fcb09de,0x3f84976f,2 +np.float32,0x7f32f2f5,0x42b21da6,2 +np.float32,0x3fe75610,0x3f9950f6,2 +np.float32,0x3f993edf,0x3f1e408d,2 +np.float32,0x3fc4a9d7,0x3f7e8be9,2 +np.float32,0x7f74551a,0x42b2bd1a,2 +np.float32,0x7de87129,0x42ae7ae2,2 +np.float32,0x7f18bbbd,0x42b1cc8c,2 +np.float32,0x7e7e1dd4,0x42b00b6c,2 +np.float32,0x3ff6e55b,0x3fa32f64,2 +np.float32,0x3fa634c8,0x3f412df3,2 +np.float32,0x3fd0fb7c,0x3f894e49,2 +np.float32,0x3ff4f6a6,0x3fa201d7,2 +np.float32,0x7f69d418,0x42b2a69a,2 +np.float32,0x7cb9632d,0x42ab414a,2 +np.float32,0x3fc57d36,0x3f7ff503,2 +np.float32,0x7e9e2ed7,0x42b07b9b,2 +np.float32,0x7f2e6868,0x42b2107d,2 +np.float32,0x3fa3169a,0x3f39785d,2 +np.float32,0x7f03cde0,0x42b18117,2 +np.float32,0x7f6d75d2,0x42b2ae7f,2 +np.float32,0x3ff483f2,0x3fa1bb75,2 +np.float32,0x7f1b39f7,0x42b1d4d6,2 +np.float32,0x3f8c7a7d,0x3ee0481e,2 +np.float32,0x3f989095,0x3f1c2b19,2 +np.float32,0x3fa4cbfd,0x3f3dbd87,2 +np.float32,0x7f75b00f,0x42b2bfef,2 +np.float32,0x3f940724,0x3f0d6756,2 +np.float32,0x7f5e5a1a,0x42b28cd6,2 +np.float32,0x800000,0xffc00000,2 +np.float32,0x7edd1d29,0x42b12716,2 +np.float32,0x3fa3e9e4,0x3f3b8c16,2 +np.float32,0x7e46d70e,0x42af8dd5,2 +np.float32,0x3f824745,0x3e40ec1e,2 +np.float32,0x3fd67623,0x3f8d770a,2 +np.float32,0x3fe9a6f3,0x3f9ad7fa,2 +np.float32,0x3fdda67c,0x3f92adc1,2 +np.float32,0x7ccb6c9a,0x42ab70d4,2 +np.float32,0x3ffd364a,0x3fa6f2fe,2 +np.float32,0x7e02424c,0x42aeb545,2 +np.float32,0x3fb6d2f2,0x3f6534a1,2 +np.float32,0x3fe1fe26,0x3f95b4cc,2 +np.float32,0x7e93ac57,0x42b05867,2 +np.float32,0x7f7b3433,0x42b2cb4d,2 +np.float32,0x3fb76803,0x3f66580d,2 +np.float32,0x3f9af881,0x3f23661b,2 +np.float32,0x3fd58062,0x3f8cbf98,2 +np.float32,0x80000000,0xffc00000,2 +np.float32,0x7f1af8f4,0x42b1d3ff,2 +np.float32,0x3fe66bba,0x3f98b4dc,2 +np.float32,0x7f6bd7bf,0x42b2aaff,2 +np.float32,0x3f84f79a,0x3e8e2e49,2 +np.float32,0x7e475b06,0x42af8f28,2 +np.float32,0x3faff89b,0x3f573d5e,2 +np.float32,0x7de5aa77,0x42ae74bb,2 +np.float32,0x3f8e9e42,0x3ef26cd2,2 +np.float32,0x3fb1cec3,0x3f5b1740,2 +np.float32,0x3f8890d6,0x3eba4821,2 +np.float32,0x3f9b39e9,0x3f242547,2 +np.float32,0x3fc895a4,0x3f829407,2 +np.float32,0x7f77943c,0x42b2c3dc,2 +np.float32,0x7f390d58,0x42b22ed2,2 +np.float32,0x3fe7e160,0x3f99ad58,2 +np.float32,0x3f93d2a0,0x3f0cb205,2 +np.float32,0x7f29499b,0x42b2013c,2 +np.float32,0x3f8c11b2,0x3edca10f,2 +np.float32,0x7e898ef8,0x42b03413,2 +np.float32,0x3fdff942,0x3f944f34,2 +np.float32,0x7f3d602f,0x42b23aa5,2 +np.float32,0x3f8a50f3,0x3ecc345b,2 +np.float32,0x3fa1f86d,0x3f369ce4,2 +np.float32,0x3f97ad95,0x3f19681d,2 +np.float32,0x3ffad1e0,0x3fa589e5,2 +np.float32,0x3fa70590,0x3f432311,2 +np.float32,0x7e6840cb,0x42afdd5c,2 +np.float32,0x3fd4036d,0x3f8ba0aa,2 +np.float32,0x7f7cc953,0x42b2ce84,2 +np.float32,0x7f228e1e,0x42b1ec74,2 +np.float32,0x7e37a866,0x42af652a,2 +np.float32,0x3fda22d0,0x3f9029a7,2 +np.float32,0x7f736bff,0x42b2bb31,2 +np.float32,0x3f9833b6,0x3f1b0b8e,2 +np.float32,0x7f466001,0x42b2526a,2 +np.float32,0xff7fffff,0xffc00000,2 +np.float32,0x7dd62bcd,0x42ae50f8,2 +np.float32,0x7f1d2bfe,0x42b1db36,2 +np.float32,0x7ecffe9e,0x42b107c5,2 +np.float32,0x7ebefe0a,0x42b0dc1b,2 +np.float32,0x7f45c63d,0x42b250dd,2 +np.float32,0x7f601af0,0x42b290db,2 +np.float32,0x3fcbb88a,0x3f8524e5,2 +np.float32,0x7ede55ff,0x42b129e8,2 +np.float32,0x7ea5dd5a,0x42b093e2,2 +np.float32,0x3ff53857,0x3fa22a12,2 +np.float32,0x3f8dbd6a,0x3eeb28a4,2 +np.float32,0x3fd1b467,0x3f89dd2c,2 +np.float32,0x3fe0423f,0x3f9481fc,2 +np.float32,0x3f84b421,0x3e8a6174,2 +np.float32,0x7f4efc97,0x42b2682c,2 +np.float32,0x7f601b33,0x42b290dc,2 +np.float32,0x3f94f240,0x3f108719,2 +np.float32,0x7decd251,0x42ae8471,2 +np.float32,0x3fdc457c,0x3f91b2e2,2 +np.float32,0x3f92a966,0x3f089c5a,2 +np.float32,0x3fc9732f,0x3f834afc,2 +np.float32,0x3f97948f,0x3f19194e,2 +np.float32,0x7f0824a1,0x42b191ac,2 +np.float32,0x7f0365a5,0x42b17f81,2 +np.float32,0x3f800000,0x0,2 +np.float32,0x7f0054c6,0x42b1736b,2 +np.float32,0x3fe86544,0x3f9a0484,2 +np.float32,0x7e95f844,0x42b0604e,2 +np.float32,0x3fce8602,0x3f8761e2,2 +np.float32,0x3fc726c8,0x3f81621d,2 +np.float32,0x3fcf6b03,0x3f88161b,2 +np.float32,0x3fceb843,0x3f87898a,2 +np.float32,0x3fe2f8b2,0x3f966071,2 +np.float32,0x7f3c8e7f,0x42b2386d,2 +np.float32,0x3fcee13a,0x3f87a9d2,2 +np.float32,0x3fc4df27,0x3f7ee73c,2 +np.float32,0x3ffde486,0x3fa758e3,2 +np.float32,0x3fa91be0,0x3f480b17,2 +np.float32,0x7f2a5a7d,0x42b20472,2 +np.float32,0x7e278d80,0x42af362d,2 +np.float32,0x3f96d091,0x3f16a9d5,2 +np.float32,0x7e925225,0x42b053b2,2 +np.float32,0x7f7ef83a,0x42b2d2ec,2 +np.float32,0x7eb4923a,0x42b0bf61,2 +np.float32,0x7e98bf19,0x42b069b3,2 +np.float32,0x3fac93a2,0x3f4fe410,2 +np.float32,0x7f46389c,0x42b25205,2 +np.float32,0x3f9fd447,0x3f30fd54,2 +np.float32,0x3fef42d4,0x3f9e7483,2 +np.float32,0x7f482174,0x42b256ed,2 +np.float32,0x3f97aedb,0x3f196c1e,2 +np.float32,0x7f764edd,0x42b2c13a,2 +np.float32,0x3f9117b5,0x3f02de5c,2 +np.float32,0x3fc7984e,0x3f81c12d,2 +np.float64,0x3ff1e2cb7463c597,0x3fdec6caf39e0c0e,1 +np.float64,0x3ffe4f89789c9f13,0x3ff40f4b1da0f3e9,1 +np.float64,0x7f6a5c9ac034b935,0x408605e51703c145,1 +np.float64,0x7fdcb6ece3b96dd9,0x40862d6521e16d60,1 +np.float64,0x3ff6563e182cac7c,0x3feb9d8210f3fa88,1 +np.float64,0x7fde32025f3c6404,0x40862dcc1d1a9b7f,1 +np.float64,0x7fd755ed35aeabd9,0x40862bbc5522b779,1 +np.float64,0x3ff5c81f4bcb903e,0x3fea71f10b954ea3,1 +np.float64,0x3fffe805d35fd00c,0x3ff50463a1ba2938,1 +np.float64,0x7fd045a1c1a08b43,0x408628d9f431f2f5,1 +np.float64,0x3ff49f7dd9893efc,0x3fe7c6736e17ea8e,1 +np.float64,0x7fccfbc1fd39f783,0x408627eca79acf51,1 +np.float64,0x3ff1af0a00035e14,0x3fdd1c0e7d5706ea,1 +np.float64,0x7fe7bd17162f7a2d,0x4086316af683502b,1 +np.float64,0x3ff0941b8d012837,0x3fd128d274065ac0,1 +np.float64,0x3ffa0c5d98b418bb,0x3ff11af9c8edd17f,1 +np.float64,0x3ffad9733355b2e6,0x3ff1b6d1307acb42,1 +np.float64,0x3ffabb2a33d57654,0x3ff1a0442b034e50,1 +np.float64,0x3ff36118b0c6c231,0x3fe472b7dfb23516,1 +np.float64,0x3ff2441d3664883a,0x3fe0d61145608f0c,1 +np.float64,0x7fe039862d20730b,0x40862e5f8ed752d3,1 +np.float64,0x7fb1dde24023bbc4,0x40861e824cdb0664,1 +np.float64,0x7face6335839cc66,0x40861ccf90a26e16,1 +np.float64,0x3ffb5d0e1af6ba1c,0x3ff2170f6f42fafe,1 +np.float64,0x3ff5c2c6a50b858d,0x3fea665aabf04407,1 +np.float64,0x3ffabb409db57681,0x3ff1a054ea32bfc3,1 +np.float64,0x3ff1e054e983c0aa,0x3fdeb30c17286cb6,1 +np.float64,0x7fe467f73268cfed,0x4086303529e52e9b,1 +np.float64,0x7fe0e86bf961d0d7,0x40862eb40788b04a,1 +np.float64,0x3ffb743542f6e86a,0x3ff227b4ea5acee0,1 +np.float64,0x3ff2de6826e5bcd0,0x3fe2e31fcde0a96c,1 +np.float64,0x7fd6b27ccfad64f9,0x40862b8385697c31,1 +np.float64,0x7fe0918e8d21231c,0x40862e8a82d9517a,1 +np.float64,0x7fd0ca0395a19406,0x4086291a0696ed33,1 +np.float64,0x3ffb042496960849,0x3ff1d658c928abfc,1 +np.float64,0x3ffcd0409799a081,0x3ff31877df0cb245,1 +np.float64,0x7fe429bd06685379,0x4086301c9f259934,1 +np.float64,0x3ff933076092660f,0x3ff06d2e5f4d9ab7,1 +np.float64,0x7feaefcb28f5df95,0x4086326dccf88e6f,1 +np.float64,0x7fb5f2c1f82be583,0x40862027ac02a39d,1 +np.float64,0x3ffb5d9e3bd6bb3c,0x3ff21777501d097e,1 +np.float64,0x10000000000000,0xfff8000000000000,1 +np.float64,0x3ff70361596e06c3,0x3fecf675ceda7e19,1 +np.float64,0x3ff71a21b5ee3444,0x3fed224fa048d9a9,1 +np.float64,0x3ffb102b86762057,0x3ff1df2cc9390518,1 +np.float64,0x7feaaeb35c355d66,0x4086325a60704a90,1 +np.float64,0x7fd9a3d0a93347a0,0x40862c7d300fc076,1 +np.float64,0x7fabcf159c379e2a,0x40861c80cdbbff27,1 +np.float64,0x7fd1c066ec2380cd,0x4086298c3006fee6,1 +np.float64,0x3ff3d5ae2d67ab5c,0x3fe5bc16447428db,1 +np.float64,0x3ff4b76add696ed6,0x3fe800f5bbf21376,1 +np.float64,0x3ff60d89ee0c1b14,0x3feb063fdebe1a68,1 +np.float64,0x7f1d2648003a4c8f,0x4085eaf9238af95a,1 +np.float64,0x7fe8b45f6df168be,0x408631bca5abf6d6,1 +np.float64,0x7fe9ea5308f3d4a5,0x4086321ea2bd3af9,1 +np.float64,0x7fcb6ba5a636d74a,0x4086277b208075ed,1 +np.float64,0x3ff621cfd74c43a0,0x3feb30d59baf5919,1 +np.float64,0x3ff7bc8ca0af7919,0x3fee524da8032896,1 +np.float64,0x7fda22dd0c3445b9,0x40862ca47326d063,1 +np.float64,0x7fd02ed4b2a05da8,0x408628ceb6919421,1 +np.float64,0x3ffe64309fdcc861,0x3ff41c1b18940709,1 +np.float64,0x3ffee4042abdc808,0x3ff46a6005bccb41,1 +np.float64,0x3ff078145b00f029,0x3fceeb3d6bfae0eb,1 +np.float64,0x7fda20fd20b441f9,0x40862ca3e03b990b,1 +np.float64,0x3ffa9e9e9af53d3d,0x3ff18ade3cbee789,1 +np.float64,0x3ff0a1062501420c,0x3fd1e32de6d18c0d,1 +np.float64,0x3ff3bdf118477be2,0x3fe57ad89b7fdf8b,1 +np.float64,0x3ff101c0d5c20382,0x3fd6965d3539be47,1 +np.float64,0x7feba3b53b774769,0x408632a28c7aca4d,1 +np.float64,0x3ff598db5d4b31b7,0x3fea0aa65c0b421a,1 +np.float64,0x3ff5fdfbb72bfbf8,0x3feae55accde4a5e,1 +np.float64,0x7fe5bae53aab75c9,0x408630b5e7a5b92a,1 +np.float64,0x3ff8f668afd1ecd2,0x3ff03af686666c9c,1 +np.float64,0x3ff5ba72dd2b74e6,0x3fea5441f223c093,1 +np.float64,0x3ff8498147109302,0x3fef4e45d501601d,1 +np.float64,0x7feddcfa5efbb9f4,0x4086334106a6e76b,1 +np.float64,0x7fd1a30200234603,0x4086297ee5cc562c,1 +np.float64,0x3ffffa8ee07ff51e,0x3ff50f1dc46f1303,1 +np.float64,0x7fef7ed00ebefd9f,0x408633ae01dabe52,1 +np.float64,0x3ffb6e062276dc0c,0x3ff22344c58c2016,1 +np.float64,0x7fcf2b59943e56b2,0x4086288190dd5eeb,1 +np.float64,0x3ffa589f9254b13f,0x3ff155cc081eee0b,1 +np.float64,0x3ff05415ca60a82c,0x3fc9e45565baef0a,1 +np.float64,0x7feb34bed576697d,0x408632822d5a178c,1 +np.float64,0x3ff3993845c73270,0x3fe51423baf246c3,1 +np.float64,0x3ff88367aaf106d0,0x3fefb2d9ca9f1192,1 +np.float64,0x7fef364304fe6c85,0x4086339b7ed82997,1 +np.float64,0x7fcba2c317374585,0x4086278b24e42934,1 +np.float64,0x3ff1aef885e35df1,0x3fdd1b79f55b20c0,1 +np.float64,0x7fe19367886326ce,0x40862f035f867445,1 +np.float64,0x3ff3c8295e279053,0x3fe5970aa670d32e,1 +np.float64,0x3ff6edda164ddbb4,0x3feccca9eb59d6b9,1 +np.float64,0x7fdeaea940bd5d52,0x40862dece02d151b,1 +np.float64,0x7fea9d6324353ac5,0x408632552ddf0d4f,1 +np.float64,0x7fe60e39e66c1c73,0x408630d45b1ad0c4,1 +np.float64,0x7fde06325abc0c64,0x40862dc07910038c,1 +np.float64,0x7f9ec89d303d9139,0x408617c55ea4c576,1 +np.float64,0x3ff9801930530032,0x3ff0abe5be046051,1 +np.float64,0x3ff4d5859689ab0b,0x3fe849a7f7a19fa3,1 +np.float64,0x3ff38afbc48715f8,0x3fe4ebb7710cbab9,1 +np.float64,0x3ffd88a0e77b1142,0x3ff3916964407e21,1 +np.float64,0x1,0xfff8000000000000,1 +np.float64,0x3ff5db59e58bb6b4,0x3fea9b6b5ccc116f,1 +np.float64,0x3ffd4b05b15a960c,0x3ff369792f661a90,1 +np.float64,0x7fdcebc4fb39d789,0x40862d73cd623378,1 +np.float64,0x3ff5b56f944b6adf,0x3fea4955d6b06ca3,1 +np.float64,0x7fd4e4abf2a9c957,0x40862ad9e9da3c61,1 +np.float64,0x7fe08e0d6aa11c1a,0x40862e88d17ef277,1 +np.float64,0x3ff0dfc97da1bf93,0x3fd50f9004136d8f,1 +np.float64,0x7fdec38eaebd871c,0x40862df2511e26b4,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x3ff21865504430cb,0x3fe033fe3cf3947a,1 +np.float64,0x7fdc139708b8272d,0x40862d371cfbad03,1 +np.float64,0x7fe1fe3be3a3fc77,0x40862f336e3ba63a,1 +np.float64,0x7fd9fa2493b3f448,0x40862c97f2960be9,1 +np.float64,0x3ff0a027db414050,0x3fd1d6e54a707c87,1 +np.float64,0x3ff568b16f4ad163,0x3fe99f5c6d7b6e18,1 +np.float64,0x3ffe2f82877c5f05,0x3ff3fb54bd0da753,1 +np.float64,0x7fbaf5778435eaee,0x408621ccc9e2c1be,1 +np.float64,0x7fc5aaf8362b55ef,0x40862598e7072a49,1 +np.float64,0x7fe0ebfdd4a1d7fb,0x40862eb5b7bf99d5,1 +np.float64,0x7fd8efeb5931dfd6,0x40862c444636f408,1 +np.float64,0x3ff361a308c6c346,0x3fe4744cae63e6df,1 +np.float64,0x7fef287d39be50f9,0x40863397f65c807e,1 +np.float64,0x7fe72c4a14ae5893,0x4086313992e52082,1 +np.float64,0x3ffd1be44cba37c8,0x3ff34a9a45239eb9,1 +np.float64,0x3ff50369c18a06d4,0x3fe8b69319f091f1,1 +np.float64,0x3ffb333c25766678,0x3ff1f8c78eeb28f1,1 +np.float64,0x7fe12050416240a0,0x40862ece4e2f2f24,1 +np.float64,0x7fe348f5526691ea,0x40862fc16fbe7b6c,1 +np.float64,0x3ff343cc4d068799,0x3fe41c2a30cab7d2,1 +np.float64,0x7fd1b0daaa2361b4,0x408629852b3104ff,1 +np.float64,0x3ff6a41f37ad483e,0x3fec3b36ee6c6d4a,1 +np.float64,0x3ffad9439435b287,0x3ff1b6add9a1b3d7,1 +np.float64,0x7fbeb9a2f23d7345,0x408622d89ac1eaba,1 +np.float64,0x3ffab3d39fb567a7,0x3ff19ac75b4427f3,1 +np.float64,0x3ff890003ed12000,0x3fefc8844471c6ad,1 +np.float64,0x3ffc9f595e593eb2,0x3ff2f7a8699f06d8,1 +np.float64,0x7fe2224ef6e4449d,0x40862f43684a154a,1 +np.float64,0x3ffa67ba08d4cf74,0x3ff161525778df99,1 +np.float64,0x7fe87e24b570fc48,0x408631ab02b159fb,1 +np.float64,0x7fd6e99be92dd337,0x40862b96dba73685,1 +np.float64,0x7fe90f39fdf21e73,0x408631d9dbd36c1e,1 +np.float64,0x3ffb7806abd6f00e,0x3ff22a719b0f4c46,1 +np.float64,0x3ffa511ba3d4a238,0x3ff1500c124f6e17,1 +np.float64,0x3ff5d7a569abaf4b,0x3fea937391c280e8,1 +np.float64,0x7fc4279d20284f39,0x40862504a5cdcb96,1 +np.float64,0x3ffe8791b1fd0f24,0x3ff431f1ed7eaba0,1 +np.float64,0x7fe3b2f5276765e9,0x40862fecf15e2535,1 +np.float64,0x7feeab0e7abd561c,0x408633778044cfbc,1 +np.float64,0x7fdba88531375109,0x40862d1860306d7a,1 +np.float64,0x7fe7b19b3def6335,0x4086316716d6890b,1 +np.float64,0x3ff9e9437413d287,0x3ff0ff89431c748c,1 +np.float64,0x3ff960716a52c0e3,0x3ff092498028f802,1 +np.float64,0x3ff271bf56a4e37f,0x3fe1786fc8dd775d,1 +np.float64,0x3fff2a6578be54cb,0x3ff494bbe303eeb5,1 +np.float64,0x3ffd842eb5fb085e,0x3ff38e8b7ba42bc5,1 +np.float64,0x3ff91600e5d22c02,0x3ff0553c6a6b3d93,1 +np.float64,0x3ff9153f45f22a7e,0x3ff0549c0eaecf95,1 +np.float64,0x7fe0ab319da15662,0x40862e96da3b19f9,1 +np.float64,0x3ff06acd1f60d59a,0x3fcd2aca543d2772,1 +np.float64,0x3ffb3e7a54d67cf4,0x3ff200f288cd391b,1 +np.float64,0x3ffd01356f1a026b,0x3ff339003462a56c,1 +np.float64,0x3ffacd35def59a6c,0x3ff1adb8d32b3ec0,1 +np.float64,0x3ff6f953264df2a6,0x3fece2f992948d6e,1 +np.float64,0x3ff0fa91f5a1f524,0x3fd64609a28f1590,1 +np.float64,0x7fd1b7610ca36ec1,0x408629881e03dc7d,1 +np.float64,0x3ff4317fb7c86300,0x3fe6b086ed265887,1 +np.float64,0x3ff3856198070ac3,0x3fe4dbb6bc88b9e3,1 +np.float64,0x7fed7fc4573aff88,0x40863327e7013a81,1 +np.float64,0x3ffe53cbbf5ca798,0x3ff411f07a29b1f4,1 +np.float64,0x3ff092195b012433,0x3fd10b1c0b4b14fe,1 +np.float64,0x3ff1a3171163462e,0x3fdcb5c301d5d40d,1 +np.float64,0x3ffa1401f1742804,0x3ff120eb319e9faa,1 +np.float64,0x7fd352f6f426a5ed,0x40862a3a048feb6d,1 +np.float64,0x7fd4ee246fa9dc48,0x40862add895d808f,1 +np.float64,0x3ff0675cfa00ceba,0x3fccb2222c5493ca,1 +np.float64,0x3ffe5cb38f3cb967,0x3ff417773483d161,1 +np.float64,0x7fe11469ea2228d3,0x40862ec8bd3e497f,1 +np.float64,0x3fff13cba67e2798,0x3ff4872fe2c26104,1 +np.float64,0x3ffb73d3d316e7a8,0x3ff2276f08612ea2,1 +np.float64,0x7febfb70f237f6e1,0x408632bbc9450721,1 +np.float64,0x3ff84a0d87b0941b,0x3fef4f3b707e3145,1 +np.float64,0x7fd71fd5082e3fa9,0x40862ba9b4091172,1 +np.float64,0x3ff560737d8ac0e7,0x3fe98cc9c9ba2f61,1 +np.float64,0x3ff46a266ae8d44d,0x3fe74190e5234822,1 +np.float64,0x7fe8cc9225719923,0x408631c477db9708,1 +np.float64,0x3ff871de5930e3bc,0x3fef948f7d00fbef,1 +np.float64,0x3ffd0bc7895a178f,0x3ff33ffc18357721,1 +np.float64,0x3ff66099f9ccc134,0x3febb2bc775b4720,1 +np.float64,0x7fe91f1be9723e37,0x408631deec3a5c9e,1 +np.float64,0x7fd60462f12c08c5,0x40862b4537e1c1c6,1 +np.float64,0x3ff053100ba0a620,0x3fc9bc0c21e2284f,1 +np.float64,0x7fd864c611b0c98b,0x40862c1724506255,1 +np.float64,0x7fd191decb2323bd,0x408629771bfb68cc,1 +np.float64,0x3ff792a1656f2543,0x3fee054f2e135fcf,1 +np.float64,0x7fd03625cea06c4b,0x408628d253b840e3,1 +np.float64,0x7fc3967716272ced,0x408624ca35451042,1 +np.float64,0x7fe6636cb32cc6d8,0x408630f3073a22a7,1 +np.float64,0x3ffc2d3976585a73,0x3ff2a9d4c0dae607,1 +np.float64,0x3fffd10ee79fa21e,0x3ff4f70db69888be,1 +np.float64,0x3ff1d4fcae23a9f9,0x3fde57675007b23c,1 +np.float64,0x3ffa5da19e14bb43,0x3ff1599f74d1c113,1 +np.float64,0x3ff7f4eb0d6fe9d6,0x3feeb85189659e99,1 +np.float64,0x7fbcca44d8399489,0x408622536234f7c1,1 +np.float64,0x7fef5f97ec3ebf2f,0x408633a60fdde0d7,1 +np.float64,0x7fde4a66da3c94cd,0x40862dd290ebc184,1 +np.float64,0x3ff072957a40e52b,0x3fce34d913d87613,1 +np.float64,0x3ff2bc4c9dc57899,0x3fe27497e6ebe27d,1 +np.float64,0x7fd7d152b4afa2a4,0x40862be63469eecd,1 +np.float64,0x3ff957d768f2afaf,0x3ff08b4ad8062a73,1 +np.float64,0x7fe4bc5f45a978be,0x40863055fd66e4eb,1 +np.float64,0x7fc90de345321bc6,0x408626c24ce7e370,1 +np.float64,0x3ff2d7a37d85af47,0x3fe2cd6a40b544a0,1 +np.float64,0x7fe536ea1f6a6dd3,0x40863084bade76a3,1 +np.float64,0x3fff970c9cdf2e19,0x3ff4d524572356dd,1 +np.float64,0x3ffe173ae63c2e76,0x3ff3ec1ee35ad28c,1 +np.float64,0x3ff714025cce2805,0x3fed168aedff4a2b,1 +np.float64,0x7fce7b414c3cf682,0x40862853dcdd19d4,1 +np.float64,0x3ff019623f2032c4,0x3fbc7c602df0bbaf,1 +np.float64,0x3ff72f57fd0e5eb0,0x3fed4ae75f697432,1 +np.float64,0x3ff283778e8506ef,0x3fe1b5c5725b0dfd,1 +np.float64,0x3ff685a29aed0b45,0x3febfdfdedd581e2,1 +np.float64,0x3ff942d24fb285a4,0x3ff07a224c3ecfaf,1 +np.float64,0x3ff2e4a9f465c954,0x3fe2f71905399e8f,1 +np.float64,0x7fdfa1c7fa3f438f,0x40862e2b4e06f098,1 +np.float64,0x3ff49b59c26936b4,0x3fe7bc41c8c1e59d,1 +np.float64,0x3ff2102d3704205a,0x3fe014bf7e28924e,1 +np.float64,0x3ff88de3b8311bc8,0x3fefc4e3e0a15a89,1 +np.float64,0x7fea5ba25374b744,0x40863241519c9b66,1 +np.float64,0x3fffe5df637fcbbf,0x3ff5032488f570f9,1 +np.float64,0x7fe67cfefe6cf9fd,0x408630fc25333cb4,1 +np.float64,0x3ff090bf2b01217e,0x3fd0f6fcf1092b4a,1 +np.float64,0x7fecd75bc5f9aeb7,0x408632f9b6c2e013,1 +np.float64,0x7fe15df38c62bbe6,0x40862eeae5ac944b,1 +np.float64,0x3ff4757875a8eaf1,0x3fe75e0eafbe28ce,1 +np.float64,0x7fecca8a51b99514,0x408632f627c23923,1 +np.float64,0x3ff91ca529d2394a,0x3ff05abb327fd1ca,1 +np.float64,0x3ffb962993b72c53,0x3ff23ff831717579,1 +np.float64,0x3ffd548a2c7aa914,0x3ff36fac7f56d716,1 +np.float64,0x7fbafb5cb035f6b8,0x408621ce898a02fb,1 +np.float64,0x3ff1d86daca3b0db,0x3fde73536c29218c,1 +np.float64,0x7fa8d0f8f431a1f1,0x40861b97a03c3a18,1 +np.float64,0x3ff44f1067489e21,0x3fe6fcbd8144ab2a,1 +np.float64,0x7fec062b07380c55,0x408632bed9c6ce85,1 +np.float64,0x3ff7e11e0fcfc23c,0x3fee94ada7efaac4,1 +np.float64,0x7fe77505c1aeea0b,0x4086315287dda0ba,1 +np.float64,0x7fc465af2728cb5d,0x4086251d236107f7,1 +np.float64,0x3ffe811c4a7d0238,0x3ff42df7e8b6cf2d,1 +np.float64,0x7fe05a471260b48d,0x40862e6fa502738b,1 +np.float64,0x7fec32cd9778659a,0x408632cb8d98c5a3,1 +np.float64,0x7fd203a220a40743,0x408629aa43b010c0,1 +np.float64,0x7fed71f7d17ae3ef,0x4086332428207101,1 +np.float64,0x3ff3918999e72313,0x3fe4fe5e8991402f,1 +np.float64,0x3ff3ecae38c7d95c,0x3fe5fa787d887981,1 +np.float64,0x7fd65345b82ca68a,0x40862b61aed8c64e,1 +np.float64,0x3ff1efdd01c3dfba,0x3fdf2eae36139204,1 +np.float64,0x3ffba9344f375268,0x3ff24d7fdcfc313b,1 +np.float64,0x7fd0469b35208d35,0x408628da6ed24bdd,1 +np.float64,0x7fe525782daa4aef,0x4086307e240c8b30,1 +np.float64,0x3ff8e473d371c8e8,0x3ff02beebd4171c7,1 +np.float64,0x3ff59a43898b3487,0x3fea0dc0a6acea0a,1 +np.float64,0x7fef50c7263ea18d,0x408633a247d7cd42,1 +np.float64,0x7fe8b5a301f16b45,0x408631bd0e71c855,1 +np.float64,0x3ff209369de4126d,0x3fdff4264334446b,1 +np.float64,0x3ffbe2ff4437c5fe,0x3ff2763b356814c7,1 +np.float64,0x3ff55938156ab270,0x3fe97c70514f91bf,1 +np.float64,0x3fff5d8bf81ebb18,0x3ff4b333b230672a,1 +np.float64,0x3ff16a317bc2d463,0x3fdab84e7faa468f,1 +np.float64,0x3ff7e64f8dafcc9f,0x3fee9e0bd57e9566,1 +np.float64,0x7fef4dc065be9b80,0x408633a181e25abb,1 +np.float64,0x3ff64a24a62c9449,0x3feb849ced76437e,1 +np.float64,0x7fc3cb85ef27970b,0x408624dfc39c8f74,1 +np.float64,0x7fec2162a77842c4,0x408632c69b0d43b6,1 +np.float64,0x7feccee6dc399dcd,0x408632f75de98c46,1 +np.float64,0x7faff4f5f43fe9eb,0x40861d9d89be14c9,1 +np.float64,0x7fee82df60fd05be,0x4086336cfdeb7317,1 +np.float64,0x3ffe54588d9ca8b1,0x3ff41247eb2f75ca,1 +np.float64,0x3ffe5615b55cac2c,0x3ff4135c4eb11620,1 +np.float64,0x3ffdaf9a6a1b5f35,0x3ff3aa70e50d1692,1 +np.float64,0x3ff69c045f4d3809,0x3fec2b00734e2cde,1 +np.float64,0x7fd049239aa09246,0x408628dbad6dd995,1 +np.float64,0x3ff2acbe8465597d,0x3fe24138652195e1,1 +np.float64,0x3ffb288302365106,0x3ff1f0f86ca7e5d1,1 +np.float64,0x3fff6fe8d87edfd2,0x3ff4be136acf53c5,1 +np.float64,0x3ffc87c8bfb90f92,0x3ff2e7bbd65867cb,1 +np.float64,0x3ff173327ca2e665,0x3fdb0b945abb00d7,1 +np.float64,0x3ff9a5cf7a134b9f,0x3ff0ca2450f07c78,1 +np.float64,0x7faf782b043ef055,0x40861d7e0e9b35ef,1 +np.float64,0x3ffa0874975410e9,0x3ff117ee3dc8f5ba,1 +np.float64,0x7fc710fc7f2e21f8,0x40862618fed167fb,1 +np.float64,0x7feb73f4c876e7e9,0x40863294ae3ac1eb,1 +np.float64,0x8000000000000000,0xfff8000000000000,1 +np.float64,0x7fb46615c028cc2b,0x40861f91bade4dad,1 +np.float64,0x7fc26b064624d60c,0x4086244c1b76c938,1 +np.float64,0x3ff06ab9fa40d574,0x3fcd282fd971d1b4,1 +np.float64,0x3ff61da7410c3b4e,0x3feb28201031af02,1 +np.float64,0x3ffec7ba1b9d8f74,0x3ff459342511f952,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0x7fe5d570422baae0,0x408630bfa75008c9,1 +np.float64,0x3ffa895832f512b0,0x3ff17ad41555dccb,1 +np.float64,0x7fd343ac21a68757,0x40862a33ad59947a,1 +np.float64,0x3ffc1eeb37383dd6,0x3ff29ff29e55a006,1 +np.float64,0x7fee3c5c507c78b8,0x4086335a6b768090,1 +np.float64,0x7fe96d774a32daee,0x408631f7b9937e36,1 +np.float64,0x7fb878362430f06b,0x40862106603497b6,1 +np.float64,0x7fec0a79c03814f3,0x408632c01479905e,1 +np.float64,0x3ffa2f143c145e28,0x3ff135e25d902e1a,1 +np.float64,0x3ff14ccff80299a0,0x3fd9a0cd3397b14c,1 +np.float64,0x3ff97980dcb2f302,0x3ff0a6942a8133ab,1 +np.float64,0x3ff872e2d1f0e5c6,0x3fef96526eb2f756,1 +np.float64,0x7fdf1c9b46be3936,0x40862e0957fee329,1 +np.float64,0x7fcab6525d356ca4,0x408627458791f029,1 +np.float64,0x3ff964e74a52c9ce,0x3ff095e8845d523c,1 +np.float64,0x3ffb3aa23c967544,0x3ff1fe282d897c13,1 +np.float64,0x7fdd8a36afbb146c,0x40862d9f2b05f61b,1 +np.float64,0x3ffea39f42fd473e,0x3ff4432a48176399,1 +np.float64,0x7fea614f68b4c29e,0x408632430a750385,1 +np.float64,0x7feeafb86abd5f70,0x40863378b79f70cf,1 +np.float64,0x3ff80bc94eb01792,0x3feee138e9d626bd,1 +np.float64,0x7fcaca74743594e8,0x4086274b8ce4d1e1,1 +np.float64,0x3ff8b14815316290,0x3ff000b3526c8321,1 +np.float64,0x7fc698eb5f2d31d6,0x408625eeec86cd2b,1 +np.float64,0x7fe15429a3e2a852,0x40862ee6621205b8,1 +np.float64,0x7fee37f81b7c6fef,0x4086335941ed80dd,1 +np.float64,0x3ff8097ab3f012f6,0x3feedd1bafc3196e,1 +np.float64,0x7fe7c889ceaf9113,0x4086316ed13f2394,1 +np.float64,0x7fceca94513d9528,0x4086286893a06824,1 +np.float64,0x3ff593a103cb2742,0x3fe9ff1af4f63cc9,1 +np.float64,0x7fee237d24bc46f9,0x40863353d4142c87,1 +np.float64,0x3ffbf71e4777ee3c,0x3ff2844c0ed9f4d9,1 +np.float64,0x3ff490c65c09218d,0x3fe7a2216d9f69fd,1 +np.float64,0x3fff5ceaf1feb9d6,0x3ff4b2d430a90110,1 +np.float64,0x3ff55baecceab75e,0x3fe98203980666c4,1 +np.float64,0x3ff511bc306a2378,0x3fe8d81ce7be7b50,1 +np.float64,0x3ff38f83dcc71f08,0x3fe4f89f130d5f87,1 +np.float64,0x3ff73a3676ee746d,0x3fed5f98a65107ee,1 +np.float64,0x7fc27e50c824fca1,0x408624547828bc49,1 +np.float64,0xfff0000000000000,0xfff8000000000000,1 +np.float64,0x3fff38959ebe712b,0x3ff49d362c7ba16a,1 +np.float64,0x3ffad6d23a75ada4,0x3ff1b4dda6394ed0,1 +np.float64,0x3ffe77c6c2dcef8e,0x3ff4283698835ecb,1 +np.float64,0x3fff5feb413ebfd6,0x3ff4b49bcbdb3aa9,1 +np.float64,0x3ff0d30aa161a615,0x3fd4751bcdd7d727,1 +np.float64,0x3ff51e07e00a3c10,0x3fe8f4bd1408d694,1 +np.float64,0x8010000000000000,0xfff8000000000000,1 +np.float64,0x7fd231d2fe2463a5,0x408629beaceafcba,1 +np.float64,0x3fff6b4aee1ed696,0x3ff4bb58544bf8eb,1 +np.float64,0x3ff91fcd2f323f9a,0x3ff05d56e33db6b3,1 +np.float64,0x3ff3b889ab477113,0x3fe56bdeab74cce5,1 +np.float64,0x3ff99bfe30d337fc,0x3ff0c24bbf265561,1 +np.float64,0x3ffbe9e5eaf7d3cc,0x3ff27b0fe60f827a,1 +np.float64,0x7fd65678e92cacf1,0x40862b62d44fe8b6,1 +np.float64,0x7fd9cc477233988e,0x40862c89c638ee48,1 +np.float64,0x3ffc123c72d82479,0x3ff297294d05cbc0,1 +np.float64,0x3ff58abad58b1576,0x3fe9eb65da2a867a,1 +np.float64,0x7fe534887b2a6910,0x40863083d4ec2877,1 +np.float64,0x7fe1d3dcb123a7b8,0x40862f208116c55e,1 +np.float64,0x7fd4d570dba9aae1,0x40862ad412c413cd,1 +np.float64,0x3fffce7d3fdf9cfa,0x3ff4f58f02451928,1 +np.float64,0x3ffa76901c74ed20,0x3ff16c9a5851539c,1 +np.float64,0x7fdd88ffa23b11fe,0x40862d9ed6c6f426,1 +np.float64,0x3ff09fdbb9e13fb7,0x3fd1d2ae4fcbf713,1 +np.float64,0x7fe64567772c8ace,0x408630e845dbc290,1 +np.float64,0x7fb1a849ba235092,0x40861e6a291535b2,1 +np.float64,0x3ffaddb105f5bb62,0x3ff1b9f68f4c419b,1 +np.float64,0x7fd2fc3d5025f87a,0x40862a15cbc1df75,1 +np.float64,0x7fdea7d872bd4fb0,0x40862deb190b2c50,1 +np.float64,0x7fd50ea97eaa1d52,0x40862ae9edc4c812,1 +np.float64,0x3fff659c245ecb38,0x3ff4b7fb18b31aea,1 +np.float64,0x3ff3f1fbb7c7e3f7,0x3fe608bd9d76268c,1 +np.float64,0x3ff76869d9aed0d4,0x3fedb6c23d3a317b,1 +np.float64,0x7fedd4efe93ba9df,0x4086333edeecaa43,1 +np.float64,0x3ff9a5bd4eb34b7a,0x3ff0ca15d02bc960,1 +np.float64,0x3ffd9359cc5b26b4,0x3ff39850cb1a6b6c,1 +np.float64,0x7fe912d0427225a0,0x408631db00e46272,1 +np.float64,0x3ffb3802fe567006,0x3ff1fc4093646465,1 +np.float64,0x3ff02cc38a205987,0x3fc2e8182802a07b,1 +np.float64,0x3ffda953dd1b52a8,0x3ff3a66c504cf207,1 +np.float64,0x7fe0a487e4a1490f,0x40862e93a6f20152,1 +np.float64,0x7fed265ed1fa4cbd,0x4086330f838ae431,1 +np.float64,0x7fd0000114200001,0x408628b76ec48b5c,1 +np.float64,0x3ff2c262786584c5,0x3fe288860d354b0f,1 +np.float64,0x8000000000000001,0xfff8000000000000,1 +np.float64,0x3ffdae9f075b5d3e,0x3ff3a9d006ae55c1,1 +np.float64,0x3ffb69c72156d38e,0x3ff22037cbb85e5b,1 +np.float64,0x7feeae255f7d5c4a,0x408633784e89bc05,1 +np.float64,0x7feb13927c362724,0x408632786630c55d,1 +np.float64,0x7fef49e072be93c0,0x408633a08451d476,1 +np.float64,0x3fff23d6337e47ac,0x3ff490ceb6e634ae,1 +np.float64,0x3ffba82cf8f7505a,0x3ff24cc51c73234d,1 +np.float64,0x7fe948719ef290e2,0x408631ec0b36476e,1 +np.float64,0x3ff41926c5e8324e,0x3fe670e14bbda8cd,1 +np.float64,0x3ff91f09c1523e14,0x3ff05cb5731878da,1 +np.float64,0x3ff6ae6afccd5cd6,0x3fec4fbeca764086,1 +np.float64,0x3ff927f7e0f24ff0,0x3ff06413eeb8eb1e,1 +np.float64,0x3ff19dd2b9e33ba5,0x3fdc882f97994600,1 +np.float64,0x7fe8e502c5b1ca05,0x408631cc56526fff,1 +np.float64,0x7feb49f70fb693ed,0x4086328868486fcd,1 +np.float64,0x3ffd942d535b285a,0x3ff398d8d89f52ca,1 +np.float64,0x7fc3b9c5c627738b,0x408624d893e692ca,1 +np.float64,0x7fea0780ff340f01,0x408632279fa46704,1 +np.float64,0x7fe4c90066a99200,0x4086305adb47a598,1 +np.float64,0x7fdb209113364121,0x40862cf0ab64fd7d,1 +np.float64,0x3ff38617e5470c30,0x3fe4ddc0413b524f,1 +np.float64,0x7fea1b5b803436b6,0x4086322db767f091,1 +np.float64,0x7fe2004898e40090,0x40862f3457795dc5,1 +np.float64,0x3ff3c4360ac7886c,0x3fe58c29843a4c75,1 +np.float64,0x3ff504bc168a0978,0x3fe8b9ada7f698e6,1 +np.float64,0x3ffd3e936fda7d27,0x3ff3615912c5b4ac,1 +np.float64,0x3ffbdc52fb97b8a6,0x3ff2718dae5f1f2b,1 +np.float64,0x3fffef6d84ffdedb,0x3ff508adbc8556cf,1 +np.float64,0x3ff23b65272476ca,0x3fe0b646ed2579eb,1 +np.float64,0x7fe4633068a8c660,0x408630334a4b7ff7,1 +np.float64,0x3ff769b754aed36f,0x3fedb932af0223f9,1 +np.float64,0x7fe7482d92ee905a,0x408631432de1b057,1 +np.float64,0x3ff5dd682aabbad0,0x3fea9fd5e506a86d,1 +np.float64,0x7fd68399a2ad0732,0x40862b72ed89805d,1 +np.float64,0x3ffad7acc3d5af5a,0x3ff1b57fe632c948,1 +np.float64,0x3ffc68e43698d1c8,0x3ff2d2be6f758761,1 +np.float64,0x3ff4e517fbc9ca30,0x3fe86eddf5e63a58,1 +np.float64,0x3ff34c63c56698c8,0x3fe435b74ccd6a13,1 +np.float64,0x7fea9456c17528ad,0x4086325275237015,1 +np.float64,0x7fee6573f2fccae7,0x4086336543760346,1 +np.float64,0x7fd5496fb9aa92de,0x40862b0023235667,1 +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0x3ffb70e31256e1c6,0x3ff22552f54b13e0,1 +np.float64,0x3ff66a33988cd467,0x3febc656da46a1ca,1 +np.float64,0x3fff0af2eb1e15e6,0x3ff481dec325f5c8,1 +np.float64,0x3ff6a0233d0d4046,0x3fec33400958eda1,1 +np.float64,0x7fdb11e2d5b623c5,0x40862cec55e405f9,1 +np.float64,0x3ffb8a015ad71402,0x3ff2374d7b563a72,1 +np.float64,0x3ff1807d8ce300fb,0x3fdb849e4bce8335,1 +np.float64,0x3ffefd535e3dfaa6,0x3ff479aaac6ffe79,1 +np.float64,0x3ff701e23a6e03c4,0x3fecf39072d96fc7,1 +np.float64,0x3ff4ac809f895901,0x3fe7e6598f2335a5,1 +np.float64,0x3ff0309f26a0613e,0x3fc3b3f4b2783690,1 +np.float64,0x3ff241dd0ce483ba,0x3fe0cde2cb639144,1 +np.float64,0x3ffabce63fb579cc,0x3ff1a18fe2a2da59,1 +np.float64,0x3ffd84b967db0973,0x3ff38ee4f240645d,1 +np.float64,0x7fc3f88b9a27f116,0x408624f1e10cdf3f,1 +np.float64,0x7fe1d5fd5923abfa,0x40862f2175714a3a,1 +np.float64,0x7fe487b145690f62,0x4086304190700183,1 +np.float64,0x7fe7997feaef32ff,0x4086315eeefdddd2,1 +np.float64,0x3ff8f853b671f0a8,0x3ff03c907353a8da,1 +np.float64,0x7fca4c23b5349846,0x408627257ace5778,1 +np.float64,0x7fe0c9bf3a21937d,0x40862ea576c3ea43,1 +np.float64,0x7fc442b389288566,0x4086250f5f126ec9,1 +np.float64,0x7fc6d382ed2da705,0x40862603900431b0,1 +np.float64,0x7fe40b069068160c,0x4086301066468124,1 +np.float64,0x3ff7f62a146fec54,0x3feeba8dfc4363fe,1 +np.float64,0x3ff721e8e94e43d2,0x3fed313a6755d34f,1 +np.float64,0x7fe579feaf2af3fc,0x4086309ddefb6112,1 +np.float64,0x3ffe2c6bde5c58d8,0x3ff3f9665dc9a16e,1 +np.float64,0x7fcf9998ed3f3331,0x4086289dab274788,1 +np.float64,0x7fdb03af2236075d,0x40862ce82252e490,1 +np.float64,0x7fe72799392e4f31,0x40863137f428ee71,1 +np.float64,0x7f9f2190603e4320,0x408617dc5b3b3c3c,1 +np.float64,0x3ff69c56d52d38ae,0x3fec2ba59fe938b2,1 +np.float64,0x7fdcde27bf39bc4e,0x40862d70086cd06d,1 +np.float64,0x3ff654d6b8eca9ae,0x3feb9aa0107609a6,1 +np.float64,0x7fdf69d967bed3b2,0x40862e1d1c2b94c2,1 +np.float64,0xffefffffffffffff,0xfff8000000000000,1 +np.float64,0x7fedfd073f3bfa0d,0x40863349980c2c8b,1 +np.float64,0x7f7c1856803830ac,0x40860bf312b458c7,1 +np.float64,0x7fe9553f1bb2aa7d,0x408631f0173eadd5,1 +np.float64,0x3ff6e92efc2dd25e,0x3fecc38f98e7e1a7,1 +np.float64,0x7fe9719ac532e335,0x408631f906cd79c3,1 +np.float64,0x3ff60e56ae4c1cad,0x3feb07ef8637ec7e,1 +np.float64,0x3ff0d0803501a100,0x3fd455c0af195a9c,1 +np.float64,0x7fe75248a3eea490,0x40863146a614aec1,1 +np.float64,0x7fdff61ead3fec3c,0x40862e408643d7aa,1 +np.float64,0x7fed4ac7a4fa958e,0x408633197b5cf6ea,1 +np.float64,0x7fe58d44562b1a88,0x408630a5098d1bbc,1 +np.float64,0x7fd89dcdb1b13b9a,0x40862c29c2979288,1 +np.float64,0x3ff205deda240bbe,0x3fdfda67c84fd3a8,1 +np.float64,0x7fdf84c15abf0982,0x40862e23f361923d,1 +np.float64,0x3ffe012b3afc0256,0x3ff3de3dfa5f47ce,1 +np.float64,0x3ffe2f3512dc5e6a,0x3ff3fb245206398e,1 +np.float64,0x7fed6174c2bac2e9,0x4086331faa699617,1 +np.float64,0x3ff1f30f8783e61f,0x3fdf47e06f2c40d1,1 +np.float64,0x3ff590da9eab21b5,0x3fe9f8f7b4baf3c2,1 +np.float64,0x3ffb3ca1eb967944,0x3ff1ff9baf66d704,1 +np.float64,0x7fe50ba9a5aa1752,0x408630745ab7fd3c,1 +np.float64,0x3ff43743a4a86e87,0x3fe6bf7ae80b1dda,1 +np.float64,0x3ff47e1a24e8fc34,0x3fe773acca44c7d6,1 +np.float64,0x3ff589ede9eb13dc,0x3fe9e99f28fab3a4,1 +np.float64,0x3ff72f2cbf8e5e5a,0x3fed4a94e7edbf24,1 +np.float64,0x3ffa4f9bbc549f38,0x3ff14ee60aea45d3,1 +np.float64,0x3ff975dae732ebb6,0x3ff0a3a1fbd7284a,1 +np.float64,0x7fbcf14ee039e29d,0x4086225e33f3793e,1 +np.float64,0x3ff10e027f621c05,0x3fd71cce2452b4e0,1 +np.float64,0x3ff33ea193067d43,0x3fe40cbac4daaddc,1 +np.float64,0x7fbef8f2263df1e3,0x408622e905c8e1b4,1 +np.float64,0x3fff7f5bfe3efeb8,0x3ff4c732e83df253,1 +np.float64,0x3ff5700a6b4ae015,0x3fe9afdd7b8b82b0,1 +np.float64,0x3ffd5099da5aa134,0x3ff36d1bf26e55bf,1 +np.float64,0x3ffed8e0f89db1c2,0x3ff4639ff065107a,1 +np.float64,0x3fff9d0c463f3a18,0x3ff4d8a9f297cf52,1 +np.float64,0x3ff23db5b2e47b6b,0x3fe0bebdd48f961a,1 +np.float64,0x3ff042bff1e08580,0x3fc713bf24cc60ef,1 +np.float64,0x7feb4fe97a769fd2,0x4086328a26675646,1 +np.float64,0x3ffeafbfeedd5f80,0x3ff44a955a553b1c,1 +np.float64,0x3ff83fb524507f6a,0x3fef3d1729ae0976,1 +np.float64,0x3ff1992294433245,0x3fdc5f5ce53dd197,1 +np.float64,0x7fe89fe629b13fcb,0x408631b601a83867,1 +np.float64,0x7fe53e4d74aa7c9a,0x40863087839b52f1,1 +np.float64,0x3ff113713e6226e2,0x3fd757631ca7cd09,1 +np.float64,0x7fd4a0b7a629416e,0x40862abfba27a09b,1 +np.float64,0x3ff184c6e2a3098e,0x3fdbab2e3966ae57,1 +np.float64,0x3ffafbbf77f5f77f,0x3ff1d02bb331d9f9,1 +np.float64,0x3ffc6099a358c134,0x3ff2cd16941613d1,1 +np.float64,0x3ffb7c441ef6f888,0x3ff22d7b12e31432,1 +np.float64,0x3ff625ba5eec4b75,0x3feb39060e55fb79,1 +np.float64,0x7fde879acbbd0f35,0x40862de2aab4d72d,1 +np.float64,0x7f930aed982615da,0x408613edb6df8528,1 +np.float64,0x7fa4b82dac29705a,0x40861a261c0a9aae,1 +np.float64,0x7fced5c16b3dab82,0x4086286b7a73e611,1 +np.float64,0x7fe133749d2266e8,0x40862ed73a41b112,1 +np.float64,0x3ff2d8146ea5b029,0x3fe2ced55dbf997d,1 +np.float64,0x3ff60dac77ac1b59,0x3feb0688b0e54c7b,1 +np.float64,0x3ff275d9b024ebb3,0x3fe186b87258b834,1 +np.float64,0x3ff533e6500a67cd,0x3fe92746c8b50ddd,1 +np.float64,0x7fe370896666e112,0x40862fd1ca144736,1 +np.float64,0x7fee7695357ced29,0x40863369c459420e,1 +np.float64,0x7fd1e0528023c0a4,0x4086299a85caffd0,1 +np.float64,0x7fd05c7b24a0b8f5,0x408628e52824386f,1 +np.float64,0x3ff11dcc3b023b98,0x3fd7c56c8cef1be1,1 +np.float64,0x7fc9d9fae933b3f5,0x408627027404bc5f,1 +np.float64,0x7fe2359981246b32,0x40862f4be675e90d,1 +np.float64,0x3ffb10a949962152,0x3ff1df88f83b8cde,1 +np.float64,0x3ffa65b53654cb6a,0x3ff15fc8956ccc87,1 +np.float64,0x3ff0000000000000,0x0,1 +np.float64,0x7fad97ef703b2fde,0x40861d002f3d02da,1 +np.float64,0x3ff57aaf93aaf55f,0x3fe9c7b01f194edb,1 +np.float64,0x7fe9ecd73f33d9ad,0x4086321f69917205,1 +np.float64,0x3ff0dcb79c61b96f,0x3fd4eac86a7a9c38,1 +np.float64,0x7fee9c12ffbd3825,0x4086337396cd706d,1 +np.float64,0x3ff52c40af4a5881,0x3fe915a8a7de8f00,1 +np.float64,0x3ffbcfff59779ffe,0x3ff268e523fe8dda,1 +np.float64,0x7fe014cb4b602996,0x40862e4d5de42a03,1 +np.float64,0x7fae2370e83c46e1,0x40861d258dd5b3ee,1 +np.float64,0x7fe9e33602f3c66b,0x4086321c704ac2bb,1 +np.float64,0x3ff648acd74c915a,0x3feb8195ca53bcaa,1 +np.float64,0x7fe385f507670be9,0x40862fda95ebaf44,1 +np.float64,0x3ffb0e382c361c70,0x3ff1ddbea963e0a7,1 +np.float64,0x3ff47d6b6ae8fad7,0x3fe771f80ad37cd2,1 +np.float64,0x3ffca7d538f94faa,0x3ff2fd5f62e851ac,1 +np.float64,0x3ff83e949c107d29,0x3fef3b1c5bbac99b,1 +np.float64,0x7fc6fb933a2df725,0x408626118e51a286,1 +np.float64,0x7fe43a1454e87428,0x4086302318512d9b,1 +np.float64,0x7fe51fe32aaa3fc5,0x4086307c07271348,1 +np.float64,0x3ff35e563966bcac,0x3fe46aa2856ef85f,1 +np.float64,0x3ff84dd4e4909baa,0x3fef55d86d1d5c2e,1 +np.float64,0x7febe3d84077c7b0,0x408632b507686f03,1 +np.float64,0x3ff6aca2e32d5946,0x3fec4c32a2368ee3,1 +np.float64,0x7fe7070e3e6e0e1b,0x4086312caddb0454,1 +np.float64,0x7fd3657f2aa6cafd,0x40862a41acf47e70,1 +np.float64,0x3ff61534456c2a68,0x3feb1663900af13b,1 +np.float64,0x3ff8bc556eb178ab,0x3ff00a16b5403f88,1 +np.float64,0x3ffa7782e3f4ef06,0x3ff16d529c94a438,1 +np.float64,0x7fc15785ed22af0b,0x408623d0cd94fb86,1 +np.float64,0x3ff2e3eeb6e5c7dd,0x3fe2f4c4876d3edf,1 +np.float64,0x3ff2e4e17e85c9c3,0x3fe2f7c9e437b22e,1 +np.float64,0x7feb3aaf67f6755e,0x40863283ec4a0d76,1 +np.float64,0x7fe89efcf7313df9,0x408631b5b5e41263,1 +np.float64,0x3ffcc6fad4f98df6,0x3ff31245778dff6d,1 +np.float64,0x3ff356114466ac22,0x3fe45253d040a024,1 +np.float64,0x3ff81c70d2d038e2,0x3feefed71ebac776,1 +np.float64,0x7fdb75c96136eb92,0x40862d09a603f03e,1 +np.float64,0x3ff340f91b8681f2,0x3fe413bb6e6d4a54,1 +np.float64,0x3fff906079df20c1,0x3ff4d13869d16bc7,1 +np.float64,0x3ff226a42d644d48,0x3fe0698d316f1ac0,1 +np.float64,0x3ff948abc3b29158,0x3ff07eeb0b3c81ba,1 +np.float64,0x3ffc25df1fb84bbe,0x3ff2a4c13ad4edad,1 +np.float64,0x7fe07ea3b960fd46,0x40862e815b4cf43d,1 +np.float64,0x3ff497d3dae92fa8,0x3fe7b3917bf10311,1 +np.float64,0x7fea561db1f4ac3a,0x4086323fa4aef2a9,1 +np.float64,0x7fd1b49051236920,0x40862986d8759ce5,1 +np.float64,0x7f7ba3bd6037477a,0x40860bd19997fd90,1 +np.float64,0x3ff01126dd00224e,0x3fb76b67938dfb11,1 +np.float64,0x3ff29e1105053c22,0x3fe2102a4c5fa102,1 +np.float64,0x3ff9de2a6553bc55,0x3ff0f6cfe4dea30e,1 +np.float64,0x7fc558e7d42ab1cf,0x4086257a608fc055,1 +np.float64,0x3ff79830a74f3061,0x3fee0f93db153d65,1 +np.float64,0x7fe2661648e4cc2c,0x40862f6117a71eb2,1 +np.float64,0x3ff140cf4262819e,0x3fd92aefedae1ab4,1 +np.float64,0x3ff5f36251abe6c5,0x3feaced481ceaee3,1 +np.float64,0x7fc80911d5301223,0x4086266d4757f768,1 +np.float64,0x3ff9079a6c320f35,0x3ff04949d21ebe1e,1 +np.float64,0x3ffde8d2e09bd1a6,0x3ff3cedca8a5db5d,1 +np.float64,0x3ffadd1de375ba3c,0x3ff1b989790e8d93,1 +np.float64,0x3ffdbc40ee1b7882,0x3ff3b286b1c7da57,1 +np.float64,0x3ff8ff514771fea2,0x3ff04264add00971,1 +np.float64,0x7fefd7d0e63fafa1,0x408633c47d9f7ae4,1 +np.float64,0x3ffc47798c588ef3,0x3ff2bbe441fa783a,1 +np.float64,0x7fe6ebc55b6dd78a,0x408631232d9abf31,1 +np.float64,0xbff0000000000000,0xfff8000000000000,1 +np.float64,0x7fd378e4afa6f1c8,0x40862a49a8f98cb4,1 +np.float64,0x0,0xfff8000000000000,1 +np.float64,0x3ffe88ed7efd11db,0x3ff432c7ecb95492,1 +np.float64,0x3ff4f5509289eaa1,0x3fe8955a11656323,1 +np.float64,0x7fda255b41344ab6,0x40862ca53676a23e,1 +np.float64,0x3ffebe85b9bd7d0c,0x3ff453992cd55dea,1 +np.float64,0x3ff5d6180b8bac30,0x3fea901c2160c3bc,1 +np.float64,0x3ffcdfb8fcf9bf72,0x3ff322c83b3bc735,1 +np.float64,0x3ff3c91c26679238,0x3fe599a652b7cf59,1 +np.float64,0x7fc389f7a62713ee,0x408624c518edef93,1 +np.float64,0x3ffe1245ba1c248c,0x3ff3e901b2c4a47a,1 +np.float64,0x7fe1e76e95e3cedc,0x40862f29446f9eff,1 +np.float64,0x3ff02ae4f92055ca,0x3fc28221abd63daa,1 +np.float64,0x7fbf648a143ec913,0x40862304a0619d03,1 +np.float64,0x3ff2be7ef8657cfe,0x3fe27bcc6c97522e,1 +np.float64,0x3ffa7595e514eb2c,0x3ff16bdc64249ad1,1 +np.float64,0x3ff4ee130049dc26,0x3fe884354cbad8c9,1 +np.float64,0x3ff19211fc232424,0x3fdc2160bf3eae40,1 +np.float64,0x3ffec215aedd842c,0x3ff455c4cdd50c32,1 +np.float64,0x7fe7cb50ffaf96a1,0x4086316fc06a53af,1 +np.float64,0x3fffa679161f4cf2,0x3ff4de30ba7ac5b8,1 +np.float64,0x7fdcb459763968b2,0x40862d646a21011d,1 +np.float64,0x3ff9f338d6d3e672,0x3ff1075835d8f64e,1 +np.float64,0x3ff8de3319d1bc66,0x3ff026ae858c0458,1 +np.float64,0x7fee0199d33c0333,0x4086334ad03ac683,1 +np.float64,0x3ffc06076c380c0f,0x3ff28eaec3814faa,1 +np.float64,0x3ffe9e2e235d3c5c,0x3ff43fd4d2191a7f,1 +np.float64,0x3ffd93b06adb2761,0x3ff398888239cde8,1 +np.float64,0x7fefe4b71cffc96d,0x408633c7ba971b92,1 +np.float64,0x7fb2940352252806,0x40861ed244bcfed6,1 +np.float64,0x3ffba4647e3748c9,0x3ff24a15f02e11b9,1 +np.float64,0x7fd2d9543725b2a7,0x40862a0708446596,1 +np.float64,0x7fc04997f120932f,0x4086235055d35251,1 +np.float64,0x3ff6d14313ada286,0x3fec94b177f5d3fc,1 +np.float64,0x3ff279fc8684f3f9,0x3fe19511c3e5b9a8,1 +np.float64,0x3ff42f4609085e8c,0x3fe6aabe526ce2bc,1 +np.float64,0x7fc1c6c62a238d8b,0x408624037de7f6ec,1 +np.float64,0x7fe31ff4b8e63fe8,0x40862fb05b40fd16,1 +np.float64,0x7fd2a8825fa55104,0x408629f234d460d6,1 +np.float64,0x3ffe8c1d725d183b,0x3ff434bdc444143f,1 +np.float64,0x3ff0e9dc3e21d3b8,0x3fd58676e2c13fc9,1 +np.float64,0x3ffed03172fda063,0x3ff45e59f7aa6c8b,1 +np.float64,0x7fd74621962e8c42,0x40862bb6e90d66f8,1 +np.float64,0x3ff1faa29663f545,0x3fdf833a2c5efde1,1 +np.float64,0x7fda02834db40506,0x40862c9a860d6747,1 +np.float64,0x7f709b2fc021365f,0x408607be328eb3eb,1 +np.float64,0x7fec0d58aa381ab0,0x408632c0e61a1af6,1 +np.float64,0x3ff524d1720a49a3,0x3fe90479968d40fd,1 +np.float64,0x7fd64cb3b32c9966,0x40862b5f53c4b0b4,1 +np.float64,0x3ff9593e3ed2b27c,0x3ff08c6eea5f6e8b,1 +np.float64,0x3ff7de8b1f6fbd16,0x3fee9007abcfdf7b,1 +np.float64,0x7fe8d816d6b1b02d,0x408631c82e38a894,1 +np.float64,0x7fd726bbe22e4d77,0x40862bac16ee8d52,1 +np.float64,0x7fa70b07d42e160f,0x40861affcc4265e2,1 +np.float64,0x7fe18b4091e31680,0x40862effa8bce66f,1 +np.float64,0x3ff830253010604a,0x3fef21b2eaa75758,1 +np.float64,0x3fffcade407f95bc,0x3ff4f3734b24c419,1 +np.float64,0x3ff8c17cecb182fa,0x3ff00e75152d7bda,1 +np.float64,0x7fdad9b9d035b373,0x40862cdbabb793ba,1 +np.float64,0x3ff9f9e154f3f3c2,0x3ff10c8dfdbd2510,1 +np.float64,0x3ff465e162e8cbc3,0x3fe736c751c75b73,1 +np.float64,0x3ff9b4cd8493699b,0x3ff0d616235544b8,1 +np.float64,0x7fe557c4a56aaf88,0x4086309114ed12d9,1 +np.float64,0x7fe5999133eb3321,0x408630a9991a9b54,1 +np.float64,0x7fe7c9009e2f9200,0x4086316ef9359a47,1 +np.float64,0x3ff8545cabd0a8ba,0x3fef6141f1030c36,1 +np.float64,0x3ffa1f1712943e2e,0x3ff129849d492ce3,1 +np.float64,0x7fea803a14750073,0x4086324c652c276c,1 +np.float64,0x3ff5b6f97fcb6df3,0x3fea4cb0b97b18e9,1 +np.float64,0x7fc2efdfc425dfbf,0x40862485036a5c6e,1 +np.float64,0x7fe2c78e5be58f1c,0x40862f8b0a5e7baf,1 +np.float64,0x7fe80d7fff301aff,0x40863185e234060a,1 +np.float64,0x3ffd895d457b12ba,0x3ff391e2cac7a3f8,1 +np.float64,0x3ff44c9764a8992f,0x3fe6f6690396c232,1 +np.float64,0x3ff731688b8e62d1,0x3fed4ed70fac3839,1 +np.float64,0x3ff060200460c040,0x3fcbad4a07d97f0e,1 +np.float64,0x3ffbd2f70a17a5ee,0x3ff26afb46ade929,1 +np.float64,0x7febe9e841f7d3d0,0x408632b6c465ddd9,1 +np.float64,0x3ff2532f8be4a65f,0x3fe10c6cd8d64cf4,1 +np.float64,0x7fefffffffffffff,0x408633ce8fb9f87e,1 +np.float64,0x3ff3a1ae3a47435c,0x3fe52c00210cc459,1 +np.float64,0x7fe9c34ae6b38695,0x408632128d150149,1 +np.float64,0x3fff311029fe6220,0x3ff498b852f30bff,1 +np.float64,0x3ffd4485a1ba890c,0x3ff3653b6fa701cd,1 +np.float64,0x7fd52718b1aa4e30,0x40862af330d9c68c,1 +np.float64,0x3ff10b695a4216d3,0x3fd7009294e367b7,1 +np.float64,0x3ffdf73de59bee7c,0x3ff3d7fa96d2c1ae,1 +np.float64,0x3ff2f1c75965e38f,0x3fe320aaff3db882,1 +np.float64,0x3ff2a56a5a854ad5,0x3fe228cc4ad7e7a5,1 +np.float64,0x7fe60cd1cf6c19a3,0x408630d3d87a04b3,1 +np.float64,0x3ff89fa65c113f4c,0x3fefe3543773180c,1 +np.float64,0x3ffd253130ba4a62,0x3ff350b76ba692a0,1 +np.float64,0x7feaad7051f55ae0,0x40863259ff932d62,1 +np.float64,0x7fd9cc37cf33986f,0x40862c89c15f963b,1 +np.float64,0x3ff8c08de771811c,0x3ff00daa9c17acd7,1 +np.float64,0x7fea58b25d34b164,0x408632406d54cc6f,1 +np.float64,0x7fe5f161fd2be2c3,0x408630c9ddf272a5,1 +np.float64,0x3ff5840dbf8b081c,0x3fe9dc9117b4cbc7,1 +np.float64,0x3ff3fd762307faec,0x3fe6277cd530c640,1 +np.float64,0x3ff9095c98b212b9,0x3ff04abff170ac24,1 +np.float64,0x7feaac66017558cb,0x40863259afb4f8ce,1 +np.float64,0x7fd78f96bcaf1f2c,0x40862bd00175fdf9,1 +np.float64,0x3ffaca27e0959450,0x3ff1ab72b8f8633e,1 +np.float64,0x3ffb7f18cb96fe32,0x3ff22f81bcb8907b,1 +np.float64,0x3ffcce48d1199c92,0x3ff317276f62c0b2,1 +np.float64,0x3ffcb9a7f3797350,0x3ff30958e0d6a34d,1 +np.float64,0x7fda569ef6b4ad3d,0x40862cb43b33275a,1 +np.float64,0x7fde9f0893bd3e10,0x40862de8cc036283,1 +np.float64,0x3ff428be3928517c,0x3fe699bb5ab58904,1 +np.float64,0x7fa4d3344029a668,0x40861a3084989291,1 +np.float64,0x3ff03607bd006c0f,0x3fc4c4840cf35f48,1 +np.float64,0x3ff2b1335c056267,0x3fe25000846b75a2,1 +np.float64,0x7fe0cb8bd8e19717,0x40862ea65237d496,1 +np.float64,0x3fff4b1b7b9e9637,0x3ff4a83fb08e7b24,1 +np.float64,0x7fe7526140aea4c2,0x40863146ae86069c,1 +np.float64,0x7fbfcfb7c23f9f6f,0x4086231fc246ede5,1 diff --git a/python/numpy/_core/tests/data/umath-validation-set-arcsin.csv b/python/numpy/_core/tests/data/umath-validation-set-arcsin.csv new file mode 100644 index 000000000..75d570721 --- /dev/null +++ b/python/numpy/_core/tests/data/umath-validation-set-arcsin.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xbe7d3a7c,0xbe7fe217,4 +np.float32,0x3dc102f0,0x3dc14c60,4 +np.float32,0xbe119c28,0xbe121aef,4 +np.float32,0xbe51cd68,0xbe534c75,4 +np.float32,0x3c04a300,0x3c04a35f,4 +np.float32,0xbf4f0b62,0xbf712a69,4 +np.float32,0x3ef61a5c,0x3f005cf6,4 +np.float32,0xbf13024c,0xbf1c97df,4 +np.float32,0x3e93b580,0x3e95d6b5,4 +np.float32,0x3e44e7b8,0x3e4623a5,4 +np.float32,0xbe35df20,0xbe36d773,4 +np.float32,0x3eecd2c0,0x3ef633cf,4 +np.float32,0x3f2772ba,0x3f36862a,4 +np.float32,0x3e211ea8,0x3e21cac5,4 +np.float32,0x3e3b3d90,0x3e3c4cc6,4 +np.float32,0x3f37c962,0x3f4d018c,4 +np.float32,0x3e92ad88,0x3e94c31a,4 +np.float32,0x3f356ffc,0x3f49a766,4 +np.float32,0x3f487ba2,0x3f665254,4 +np.float32,0x3f061c46,0x3f0d27ae,4 +np.float32,0xbee340a2,0xbeeb7722,4 +np.float32,0xbe85aede,0xbe874026,4 +np.float32,0x3f34cf9a,0x3f48c474,4 +np.float32,0x3e29a690,0x3e2a6fbd,4 +np.float32,0xbeb29428,0xbeb669d1,4 +np.float32,0xbe606d40,0xbe624370,4 +np.float32,0x3dae6860,0x3dae9e85,4 +np.float32,0xbf04872b,0xbf0b4d25,4 +np.float32,0x3f2080e2,0x3f2d7ab0,4 +np.float32,0xbec77dcc,0xbecceb27,4 +np.float32,0x3e0dda10,0x3e0e4f38,4 +np.float32,0xbefaf970,0xbf03262c,4 +np.float32,0x3f576a0c,0x3f7ffee6,4 +np.float32,0x3f222382,0x3f2f95d6,4 +np.float32,0x7fc00000,0x7fc00000,4 +np.float32,0x3e41c468,0x3e42f14e,4 +np.float32,0xbf2f64dd,0xbf4139a8,4 +np.float32,0xbf60ef90,0xbf895956,4 +np.float32,0xbf67c855,0xbf90eff0,4 +np.float32,0xbed35aee,0xbed9df00,4 +np.float32,0xbf2c7d92,0xbf3d448f,4 +np.float32,0x3f7b1604,0x3faff122,4 +np.float32,0xbf7c758b,0xbfb3bf87,4 +np.float32,0x3ecda1c8,0x3ed39acf,4 +np.float32,0x3f3af8ae,0x3f519fcb,4 +np.float32,0xbf16e6a3,0xbf2160fd,4 +np.float32,0x3f0c97d2,0x3f14d668,4 +np.float32,0x3f0a8060,0x3f1257b9,4 +np.float32,0x3f27905a,0x3f36ad57,4 +np.float32,0x3eeaeba4,0x3ef40efe,4 +np.float32,0x3e58dde0,0x3e5a8580,4 +np.float32,0xbf0cabe2,0xbf14ee6b,4 +np.float32,0xbe805ca8,0xbe81bf03,4 +np.float32,0x3f5462ba,0x3f7a7b85,4 +np.float32,0xbee235d0,0xbeea4d8b,4 +np.float32,0xbe880cb0,0xbe89b426,4 +np.float32,0x80000001,0x80000001,4 +np.float32,0x3f208c00,0x3f2d88f6,4 +np.float32,0xbf34f3d2,0xbf48f7a2,4 +np.float32,0x3f629428,0x3f8b1763,4 +np.float32,0xbf52a900,0xbf776b4a,4 +np.float32,0xbd17f8d0,0xbd1801be,4 +np.float32,0xbef7cada,0xbf0153d1,4 +np.float32,0x3f7d3b90,0x3fb63967,4 +np.float32,0xbd6a20b0,0xbd6a4160,4 +np.float32,0x3f740496,0x3fa1beb7,4 +np.float32,0x3ed8762c,0x3edf7dd9,4 +np.float32,0x3f53b066,0x3f793d42,4 +np.float32,0xbe9de718,0xbea084f9,4 +np.float32,0x3ea3ae90,0x3ea69b4b,4 +np.float32,0x3f1b8f00,0x3f273183,4 +np.float32,0x3f5cd6ac,0x3f852ead,4 +np.float32,0x3f29d510,0x3f39b169,4 +np.float32,0x3ee2a934,0x3eeace33,4 +np.float32,0x3eecac94,0x3ef608c2,4 +np.float32,0xbea915e2,0xbeac5203,4 +np.float32,0xbd316e90,0xbd317cc8,4 +np.float32,0xbf70b495,0xbf9c97b6,4 +np.float32,0xbe80d976,0xbe823ff3,4 +np.float32,0x3e9205f8,0x3e94143f,4 +np.float32,0x3f49247e,0x3f676296,4 +np.float32,0x3d9030c0,0x3d904f50,4 +np.float32,0x3e4df058,0x3e4f5a5c,4 +np.float32,0xbe1fd360,0xbe207b58,4 +np.float32,0xbf69dc7c,0xbf937006,4 +np.float32,0x3f36babe,0x3f4b7df3,4 +np.float32,0xbe8c9758,0xbe8e6bb7,4 +np.float32,0xbf4de72d,0xbf6f3c20,4 +np.float32,0xbecdad68,0xbed3a780,4 +np.float32,0xbf73e2cf,0xbfa18702,4 +np.float32,0xbece16a8,0xbed41a75,4 +np.float32,0x3f618a96,0x3f89fc6d,4 +np.float32,0xbf325853,0xbf454ea9,4 +np.float32,0x3f138568,0x3f1d3828,4 +np.float32,0xbf56a6e9,0xbf7e9748,4 +np.float32,0x3ef5d594,0x3f0035bf,4 +np.float32,0xbf408220,0xbf59dfaa,4 +np.float32,0xbed120e6,0xbed76dd5,4 +np.float32,0xbf6dbda5,0xbf986cee,4 +np.float32,0x3f744a38,0x3fa23282,4 +np.float32,0xbe4b56d8,0xbe4cb329,4 +np.float32,0x3f54c5f2,0x3f7b2d97,4 +np.float32,0xbd8b1c90,0xbd8b3801,4 +np.float32,0x3ee19a48,0x3ee9a03b,4 +np.float32,0x3f48460e,0x3f65fc3d,4 +np.float32,0x3eb541c0,0x3eb9461e,4 +np.float32,0xbea7d098,0xbeaaf98c,4 +np.float32,0xbda99e40,0xbda9d00c,4 +np.float32,0xbefb2ca6,0xbf03438d,4 +np.float32,0x3f4256be,0x3f5cab0b,4 +np.float32,0xbdbdb198,0xbdbdf74d,4 +np.float32,0xbf325b5f,0xbf4552e9,4 +np.float32,0xbf704d1a,0xbf9c00b4,4 +np.float32,0x3ebb1d04,0x3ebf8cf8,4 +np.float32,0xbed03566,0xbed66bf1,4 +np.float32,0x3e8fcee8,0x3e91c501,4 +np.float32,0xbf2e1eec,0xbf3f7b9d,4 +np.float32,0x3f33c4d2,0x3f474cac,4 +np.float32,0x3f598ef4,0x3f8201b4,4 +np.float32,0x3e09bb30,0x3e0a2660,4 +np.float32,0x3ed4e228,0x3edb8cdb,4 +np.float32,0x3eb7a190,0x3ebbd0a1,4 +np.float32,0xbd9ae630,0xbd9b0c18,4 +np.float32,0x3f43020e,0x3f5db2d7,4 +np.float32,0xbec06ac0,0xbec542d4,4 +np.float32,0x3f3dfde0,0x3f561674,4 +np.float32,0xbf64084a,0xbf8cabe6,4 +np.float32,0xbd6f95b0,0xbd6fb8b7,4 +np.float32,0x3f268640,0x3f354e2d,4 +np.float32,0xbe72b4bc,0xbe7509b2,4 +np.float32,0xbf3414fa,0xbf47bd5a,4 +np.float32,0xbf375218,0xbf4c566b,4 +np.float32,0x3f203c1a,0x3f2d2273,4 +np.float32,0xbd503530,0xbd504c2b,4 +np.float32,0xbc45e540,0xbc45e67b,4 +np.float32,0xbf175c4f,0xbf21f2c6,4 +np.float32,0x3f7432a6,0x3fa20b2b,4 +np.float32,0xbf43367f,0xbf5e03d8,4 +np.float32,0x3eb3997c,0x3eb780c4,4 +np.float32,0x3e5574c8,0x3e570878,4 +np.float32,0xbf04b57b,0xbf0b8349,4 +np.float32,0x3f6216d8,0x3f8a914b,4 +np.float32,0xbf57a237,0xbf80337d,4 +np.float32,0xbee1403a,0xbee93bee,4 +np.float32,0xbeaf9b9a,0xbeb33f3b,4 +np.float32,0xbf109374,0xbf19a223,4 +np.float32,0xbeae6824,0xbeb1f810,4 +np.float32,0xbcff9320,0xbcff9dbe,4 +np.float32,0x3ed205c0,0x3ed868a9,4 +np.float32,0x3d897c30,0x3d8996ad,4 +np.float32,0xbf2899d2,0xbf380d4c,4 +np.float32,0xbf54cb0b,0xbf7b36c2,4 +np.float32,0x3ea8e8ec,0x3eac2262,4 +np.float32,0x3ef5e1a0,0x3f003c9d,4 +np.float32,0xbf00c81e,0xbf06f1e2,4 +np.float32,0xbf346775,0xbf483181,4 +np.float32,0x3f7a4fe4,0x3fae077c,4 +np.float32,0x3f00776e,0x3f06948f,4 +np.float32,0xbe0a3078,0xbe0a9cbc,4 +np.float32,0xbeba0b06,0xbebe66be,4 +np.float32,0xbdff4e38,0xbdfff8b2,4 +np.float32,0xbe927f70,0xbe9492ff,4 +np.float32,0x3ebb07e0,0x3ebf7642,4 +np.float32,0x3ebcf8e0,0x3ec18c95,4 +np.float32,0x3f49bdfc,0x3f685b51,4 +np.float32,0x3cbc29c0,0x3cbc2dfd,4 +np.float32,0xbe9e951a,0xbea13bf1,4 +np.float32,0xbe8c237c,0xbe8df33d,4 +np.float32,0x3e17f198,0x3e1881c4,4 +np.float32,0xbd0b5220,0xbd0b5902,4 +np.float32,0xbf34c4a2,0xbf48b4f5,4 +np.float32,0xbedaa814,0xbee1ea94,4 +np.float32,0x3ebf5d6c,0x3ec42053,4 +np.float32,0x3cd04b40,0x3cd050ff,4 +np.float32,0xbec33fe0,0xbec85244,4 +np.float32,0xbf00b27a,0xbf06d8d8,4 +np.float32,0x3f15d7be,0x3f201243,4 +np.float32,0xbe3debd0,0xbe3f06f7,4 +np.float32,0xbea81704,0xbeab4418,4 +np.float32,0x1,0x1,4 +np.float32,0x3f49e6ba,0x3f689d8b,4 +np.float32,0x3f351030,0x3f491fc0,4 +np.float32,0x3e607de8,0x3e625482,4 +np.float32,0xbe8dbbe4,0xbe8f9c0e,4 +np.float32,0x3edbf350,0x3ee35924,4 +np.float32,0xbf0c84c4,0xbf14bf9c,4 +np.float32,0x3eb218b0,0x3eb5e61a,4 +np.float32,0x3e466dd0,0x3e47b138,4 +np.float32,0xbe8ece94,0xbe90ba01,4 +np.float32,0xbe82ec2a,0xbe84649a,4 +np.float32,0xbf7e1f10,0xbfb98b9e,4 +np.float32,0xbf2d00ea,0xbf3df688,4 +np.float32,0x3db7cdd0,0x3db80d36,4 +np.float32,0xbe388b98,0xbe398f25,4 +np.float32,0xbd86cb40,0xbd86e436,4 +np.float32,0x7f7fffff,0x7fc00000,4 +np.float32,0x3f472a60,0x3f6436c6,4 +np.float32,0xbf5b2c1d,0xbf838d87,4 +np.float32,0x3f0409ea,0x3f0abad8,4 +np.float32,0x3f47dd0e,0x3f6553f0,4 +np.float32,0x3e3eab00,0x3e3fc98a,4 +np.float32,0xbf7c2a7f,0xbfb2e19b,4 +np.float32,0xbeda0048,0xbee13112,4 +np.float32,0x3f46600a,0x3f62f5b2,4 +np.float32,0x3f45aef4,0x3f61de43,4 +np.float32,0x3dd40a50,0x3dd46bc4,4 +np.float32,0xbf6cdd0b,0xbf974191,4 +np.float32,0x3f78de4c,0x3faac725,4 +np.float32,0x3f3c39a4,0x3f53777f,4 +np.float32,0xbe2a30ec,0xbe2afc0b,4 +np.float32,0xbf3c0ef0,0xbf533887,4 +np.float32,0x3ecb6548,0x3ed12a53,4 +np.float32,0x3eb994e8,0x3ebde7fc,4 +np.float32,0x3d4c1ee0,0x3d4c3487,4 +np.float32,0xbf52cb6d,0xbf77a7eb,4 +np.float32,0x3eb905d4,0x3ebd4e80,4 +np.float32,0x3e712428,0x3e736d72,4 +np.float32,0xbf79ee6e,0xbfad22be,4 +np.float32,0x3de6f8b0,0x3de776c1,4 +np.float32,0x3e9b2898,0x3e9da325,4 +np.float32,0x3ea09b20,0x3ea35d20,4 +np.float32,0x3d0ea9a0,0x3d0eb103,4 +np.float32,0xbd911500,0xbd913423,4 +np.float32,0x3e004618,0x3e009c97,4 +np.float32,0x3f5e0e5a,0x3f86654c,4 +np.float32,0x3f2e6300,0x3f3fd88b,4 +np.float32,0x3e0cf5d0,0x3e0d68c3,4 +np.float32,0x3d6a16c0,0x3d6a376c,4 +np.float32,0x3f7174aa,0x3f9db53c,4 +np.float32,0xbe04bba0,0xbe051b81,4 +np.float32,0xbe6fdcb4,0xbe721c92,4 +np.float32,0x3f4379f0,0x3f5e6c31,4 +np.float32,0xbf680098,0xbf913257,4 +np.float32,0xbf3c31ca,0xbf536bea,4 +np.float32,0x3f59db58,0x3f824a4e,4 +np.float32,0xbf3ffc84,0xbf591554,4 +np.float32,0x3d1d5160,0x3d1d5b48,4 +np.float32,0x3f6c64ae,0x3f96a3da,4 +np.float32,0xbf1b49fd,0xbf26daaa,4 +np.float32,0x3ec80be0,0x3ecd8576,4 +np.float32,0x3f3becc0,0x3f530629,4 +np.float32,0xbea93890,0xbeac76c1,4 +np.float32,0x3f5b3acc,0x3f839bbd,4 +np.float32,0xbf5d6818,0xbf85bef9,4 +np.float32,0x3f794266,0x3fab9fa6,4 +np.float32,0xbee8eb7c,0xbef1cf3b,4 +np.float32,0xbf360a06,0xbf4a821e,4 +np.float32,0x3f441cf6,0x3f5f693d,4 +np.float32,0x3e60de40,0x3e62b742,4 +np.float32,0xbebb3d7e,0xbebfafdc,4 +np.float32,0x3e56a3a0,0x3e583e28,4 +np.float32,0x3f375bfe,0x3f4c6499,4 +np.float32,0xbf384d7d,0xbf4dbf9a,4 +np.float32,0x3efb03a4,0x3f032c06,4 +np.float32,0x3f1d5d10,0x3f29794d,4 +np.float32,0xbe25f7dc,0xbe26b41d,4 +np.float32,0x3f6d2f88,0x3f97aebb,4 +np.float32,0xbe9fa100,0xbea255cb,4 +np.float32,0xbf21dafa,0xbf2f382a,4 +np.float32,0x3d3870e0,0x3d3880d9,4 +np.float32,0x3eeaf00c,0x3ef413f4,4 +np.float32,0xbc884ea0,0xbc88503c,4 +np.float32,0xbf7dbdad,0xbfb80b6d,4 +np.float32,0xbf4eb713,0xbf709b46,4 +np.float32,0xbf1c0ad4,0xbf27cd92,4 +np.float32,0x3f323088,0x3f451737,4 +np.float32,0x3e405d88,0x3e4183e1,4 +np.float32,0x3d7ad580,0x3d7afdb4,4 +np.float32,0xbf207338,0xbf2d6927,4 +np.float32,0xbecf7948,0xbed59e1a,4 +np.float32,0x3f16ff94,0x3f217fde,4 +np.float32,0xbdf19588,0xbdf225dd,4 +np.float32,0xbf4d9654,0xbf6eb442,4 +np.float32,0xbf390b9b,0xbf4ed220,4 +np.float32,0xbe155a74,0xbe15e354,4 +np.float32,0x3f519e4c,0x3f759850,4 +np.float32,0xbee3f08c,0xbeec3b84,4 +np.float32,0xbf478be7,0xbf64d23b,4 +np.float32,0xbefdee50,0xbf04d92a,4 +np.float32,0x3e8def78,0x3e8fd1bc,4 +np.float32,0x3e3df2a8,0x3e3f0dee,4 +np.float32,0xbf413e22,0xbf5afd97,4 +np.float32,0xbf1b8bc4,0xbf272d71,4 +np.float32,0xbf31e5be,0xbf44af22,4 +np.float32,0x3de7e080,0x3de86010,4 +np.float32,0xbf5ddf7e,0xbf863645,4 +np.float32,0x3f3eba6a,0x3f57306e,4 +np.float32,0xff7fffff,0x7fc00000,4 +np.float32,0x3ec22d5c,0x3ec72973,4 +np.float32,0x80800000,0x80800000,4 +np.float32,0x3f032e0c,0x3f09ba82,4 +np.float32,0x3d74bd60,0x3d74e2b7,4 +np.float32,0xbea0d61e,0xbea39b42,4 +np.float32,0xbefdfa78,0xbf04e02a,4 +np.float32,0x3e5cb220,0x3e5e70ec,4 +np.float32,0xbe239e54,0xbe2452a4,4 +np.float32,0x3f452738,0x3f61090e,4 +np.float32,0x3e99a2e0,0x3e9c0a66,4 +np.float32,0x3e4394d8,0x3e44ca5f,4 +np.float32,0x3f4472e2,0x3f5fef14,4 +np.float32,0xbf46bc70,0xbf638814,4 +np.float32,0xbf0b910f,0xbf139c7a,4 +np.float32,0x3f36b4a6,0x3f4b753f,4 +np.float32,0x3e0bf478,0x3e0c64f6,4 +np.float32,0x3ce02480,0x3ce02ba9,4 +np.float32,0xbd904b10,0xbd9069b1,4 +np.float32,0xbf7f5d72,0xbfc00b70,4 +np.float32,0x3f62127e,0x3f8a8ca8,4 +np.float32,0xbf320253,0xbf44d6e4,4 +np.float32,0x3f2507be,0x3f335833,4 +np.float32,0x3f299284,0x3f395887,4 +np.float32,0xbd8211b0,0xbd82281d,4 +np.float32,0xbd3374c0,0xbd338376,4 +np.float32,0x3f36c56a,0x3f4b8d30,4 +np.float32,0xbf51f704,0xbf76331f,4 +np.float32,0xbe9871ca,0xbe9acab2,4 +np.float32,0xbe818d8c,0xbe82fa0f,4 +np.float32,0x3f08b958,0x3f103c18,4 +np.float32,0x3f22559a,0x3f2fd698,4 +np.float32,0xbf11f388,0xbf1b4db8,4 +np.float32,0x3ebe1990,0x3ec2c359,4 +np.float32,0xbe75ab38,0xbe7816b6,4 +np.float32,0x3e96102c,0x3e984c99,4 +np.float32,0xbe80d9d2,0xbe824052,4 +np.float32,0x3ef47588,0x3efeda7f,4 +np.float32,0xbe45e524,0xbe4725ea,4 +np.float32,0x3f7f9e7a,0x3fc213ff,4 +np.float32,0x3f1d3c36,0x3f294faa,4 +np.float32,0xbf3c58db,0xbf53a591,4 +np.float32,0x3f0d3d20,0x3f159c69,4 +np.float32,0x3f744be6,0x3fa23552,4 +np.float32,0x3f2e0cea,0x3f3f630e,4 +np.float32,0x3e193c10,0x3e19cff7,4 +np.float32,0xbf4150ac,0xbf5b19dd,4 +np.float32,0xbf145f72,0xbf1e4355,4 +np.float32,0xbb76cc00,0xbb76cc26,4 +np.float32,0x3f756780,0x3fa41b3e,4 +np.float32,0x3ea9b868,0x3eacfe3c,4 +np.float32,0x3d07c920,0x3d07cf7f,4 +np.float32,0xbf2263d4,0xbf2fe8ff,4 +np.float32,0x3e53b3f8,0x3e553daa,4 +np.float32,0xbf785be8,0xbfa9b5ba,4 +np.float32,0x3f324f7a,0x3f454254,4 +np.float32,0xbf2188f2,0xbf2ece5b,4 +np.float32,0xbe33781c,0xbe3466a2,4 +np.float32,0xbd3cf120,0xbd3d024c,4 +np.float32,0x3f06b18a,0x3f0dd70f,4 +np.float32,0x3f40d63e,0x3f5a5f6a,4 +np.float32,0x3f752340,0x3fa3a41e,4 +np.float32,0xbe1cf1c0,0xbe1d90bc,4 +np.float32,0xbf02d948,0xbf0957d7,4 +np.float32,0x3f73bed0,0x3fa14bf7,4 +np.float32,0x3d914920,0x3d916864,4 +np.float32,0x7fa00000,0x7fe00000,4 +np.float32,0xbe67a5d8,0xbe69aba7,4 +np.float32,0x3f689c4a,0x3f91eb9f,4 +np.float32,0xbf196e00,0xbf248601,4 +np.float32,0xbf50dacb,0xbf7444fe,4 +np.float32,0x3f628b86,0x3f8b0e1e,4 +np.float32,0x3f6ee2f2,0x3f99fe7f,4 +np.float32,0x3ee5df40,0x3eee6492,4 +np.float32,0x3f501746,0x3f72f41b,4 +np.float32,0xbf1f0f18,0xbf2ba164,4 +np.float32,0xbf1a8bfd,0xbf25ec01,4 +np.float32,0xbd4926f0,0xbd493ba9,4 +np.float32,0xbf4e364f,0xbf6fc17b,4 +np.float32,0x3e50c578,0x3e523ed4,4 +np.float32,0x3f65bf10,0x3f8e95ce,4 +np.float32,0xbe8d75a2,0xbe8f52f2,4 +np.float32,0xbf3f557e,0xbf581962,4 +np.float32,0xbeff2bfc,0xbf05903a,4 +np.float32,0x3f5e8bde,0x3f86e3d8,4 +np.float32,0xbf7a0012,0xbfad4b9b,4 +np.float32,0x3edefce0,0x3ee6b790,4 +np.float32,0xbf0003de,0xbf060f09,4 +np.float32,0x3efc4650,0x3f03e548,4 +np.float32,0x3f4582e4,0x3f6198f5,4 +np.float32,0x3f10086c,0x3f18f9d0,4 +np.float32,0x3f1cd304,0x3f28ca77,4 +np.float32,0x3f683366,0x3f916e8d,4 +np.float32,0xbed49392,0xbedb3675,4 +np.float32,0xbf6fe5f6,0xbf9b6c0e,4 +np.float32,0xbf59b416,0xbf8224f6,4 +np.float32,0x3d20c960,0x3d20d3f4,4 +np.float32,0x3f6b00d6,0x3f94dbe7,4 +np.float32,0x3f6c26ae,0x3f965352,4 +np.float32,0xbf370ea6,0xbf4bf5dd,4 +np.float32,0x3dfe7230,0x3dff1af1,4 +np.float32,0xbefc21a8,0xbf03d038,4 +np.float32,0x3f16a990,0x3f21156a,4 +np.float32,0xbef8ac0c,0xbf01d48f,4 +np.float32,0x3f170de8,0x3f21919d,4 +np.float32,0x3db9ef80,0x3dba3122,4 +np.float32,0x3d696400,0x3d698461,4 +np.float32,0x3f007aa2,0x3f069843,4 +np.float32,0x3f22827c,0x3f3010a9,4 +np.float32,0x3f3650dc,0x3f4ae6f1,4 +np.float32,0xbf1d8037,0xbf29a5e1,4 +np.float32,0xbf08fdc4,0xbf108d0e,4 +np.float32,0xbd8df350,0xbd8e1079,4 +np.float32,0xbf36bb32,0xbf4b7e98,4 +np.float32,0x3f2e3756,0x3f3f9ced,4 +np.float32,0x3d5a6f20,0x3d5a89aa,4 +np.float32,0x3f55d568,0x3f7d1889,4 +np.float32,0x3e1ed110,0x3e1f75d9,4 +np.float32,0x3e7386b8,0x3e75e1dc,4 +np.float32,0x3f48ea0e,0x3f670434,4 +np.float32,0x3e921fb0,0x3e942f14,4 +np.float32,0xbf0d4d0b,0xbf15af7f,4 +np.float32,0x3f179ed2,0x3f224549,4 +np.float32,0xbf3a328e,0xbf507e6d,4 +np.float32,0xbf74591a,0xbfa24b6e,4 +np.float32,0x3ec7d1c4,0x3ecd4657,4 +np.float32,0xbf6ecbed,0xbf99de85,4 +np.float32,0x3db0bd00,0x3db0f559,4 +np.float32,0x7f800000,0x7fc00000,4 +np.float32,0x3e0373b8,0x3e03d0d6,4 +np.float32,0xbf439784,0xbf5e9a04,4 +np.float32,0xbef97a9e,0xbf024ac6,4 +np.float32,0x3e4d71a8,0x3e4ed90a,4 +np.float32,0xbf14d868,0xbf1ed7e3,4 +np.float32,0xbf776870,0xbfa7ce37,4 +np.float32,0xbe32a500,0xbe339038,4 +np.float32,0xbf326d8a,0xbf456c3d,4 +np.float32,0xbe9b758c,0xbe9df3e7,4 +np.float32,0x3d9515a0,0x3d95376a,4 +np.float32,0x3e3f7320,0x3e40953e,4 +np.float32,0xbee57e7e,0xbeedf84f,4 +np.float32,0x3e821e94,0x3e838ffd,4 +np.float32,0x3f74beaa,0x3fa2f721,4 +np.float32,0xbe9b7672,0xbe9df4d9,4 +np.float32,0x3f4041fc,0x3f597e71,4 +np.float32,0xbe9ea7c4,0xbea14f92,4 +np.float32,0xbf800000,0xbfc90fdb,4 +np.float32,0x3e04fb90,0x3e055bfd,4 +np.float32,0xbf14d3d6,0xbf1ed245,4 +np.float32,0xbe84ebec,0xbe86763e,4 +np.float32,0x3f08e568,0x3f107039,4 +np.float32,0x3d8dc9e0,0x3d8de6ef,4 +np.float32,0x3ea4549c,0x3ea74a94,4 +np.float32,0xbebd2806,0xbec1bf51,4 +np.float32,0x3f311a26,0x3f439498,4 +np.float32,0xbf3d2222,0xbf54cf7e,4 +np.float32,0x3e00c500,0x3e011c81,4 +np.float32,0xbe35ed1c,0xbe36e5a9,4 +np.float32,0xbd4ec020,0xbd4ed6a0,4 +np.float32,0x3e1eb088,0x3e1f54eb,4 +np.float32,0x3cf94840,0x3cf9521a,4 +np.float32,0xbf010c5d,0xbf0740e0,4 +np.float32,0xbf3bd63b,0xbf52e502,4 +np.float32,0x3f233f30,0x3f310542,4 +np.float32,0x3ea24128,0x3ea519d7,4 +np.float32,0x3f478b38,0x3f64d124,4 +np.float32,0x3f1e0c6c,0x3f2a57ec,4 +np.float32,0xbf3ad294,0xbf51680a,4 +np.float32,0x3ede0554,0x3ee5a4b4,4 +np.float32,0x3e451a98,0x3e46577d,4 +np.float32,0x3f520164,0x3f764542,4 +np.float32,0x0,0x0,4 +np.float32,0xbd056cd0,0xbd0572db,4 +np.float32,0xbf58b018,0xbf812f5e,4 +np.float32,0x3e036eb0,0x3e03cbc3,4 +np.float32,0x3d1377a0,0x3d137fc9,4 +np.float32,0xbf692d3a,0xbf929a2c,4 +np.float32,0xbec60fb8,0xbecb5dea,4 +np.float32,0x3ed23340,0x3ed89a8e,4 +np.float32,0x3c87f040,0x3c87f1d9,4 +np.float32,0x3dac62f0,0x3dac9737,4 +np.float32,0xbed97c16,0xbee09f02,4 +np.float32,0xbf2d5f3c,0xbf3e769c,4 +np.float32,0xbc3b7c40,0xbc3b7d4c,4 +np.float32,0x3ed998ec,0x3ee0bedd,4 +np.float32,0x3dd86630,0x3dd8cdcb,4 +np.float32,0x3e8b4304,0x3e8d09ea,4 +np.float32,0x3f51e6b0,0x3f761697,4 +np.float32,0x3ec51f24,0x3eca5923,4 +np.float32,0xbf647430,0xbf8d2307,4 +np.float32,0x3f253d9c,0x3f339eb2,4 +np.float32,0x3dc969d0,0x3dc9bd4b,4 +np.float32,0xbc2f1300,0xbc2f13da,4 +np.float32,0xbf170007,0xbf21806d,4 +np.float32,0x3f757d10,0x3fa4412e,4 +np.float32,0xbe7864ac,0xbe7ae564,4 +np.float32,0x3f2ffe90,0x3f420cfb,4 +np.float32,0xbe576138,0xbe590012,4 +np.float32,0xbf517a21,0xbf755959,4 +np.float32,0xbf159cfe,0xbf1fc9d5,4 +np.float32,0xbf638b2a,0xbf8c22cf,4 +np.float32,0xff800000,0x7fc00000,4 +np.float32,0x3ed19ca0,0x3ed7f569,4 +np.float32,0x3f7c4460,0x3fb32d26,4 +np.float32,0x3ebfae6c,0x3ec477ab,4 +np.float32,0x3dd452d0,0x3dd4b4a8,4 +np.float32,0x3f471482,0x3f6413fb,4 +np.float32,0xbf49d704,0xbf6883fe,4 +np.float32,0xbd42c4e0,0xbd42d7af,4 +np.float32,0xbeb02994,0xbeb3d668,4 +np.float32,0x3f4d1fd8,0x3f6dedd2,4 +np.float32,0x3efb591c,0x3f035d11,4 +np.float32,0x80000000,0x80000000,4 +np.float32,0xbf50f782,0xbf7476ad,4 +np.float32,0x3d7232c0,0x3d7256f0,4 +np.float32,0x3f649460,0x3f8d46bb,4 +np.float32,0x3f5561bc,0x3f7c46a9,4 +np.float32,0x3e64f6a0,0x3e66ea5d,4 +np.float32,0x3e5b0470,0x3e5cb8f9,4 +np.float32,0xbe9b6b2c,0xbe9de904,4 +np.float32,0x3f6c33f4,0x3f966486,4 +np.float32,0x3f5cee54,0x3f854613,4 +np.float32,0x3ed3e044,0x3eda716e,4 +np.float32,0xbf3cac7f,0xbf542131,4 +np.float32,0x3c723500,0x3c723742,4 +np.float32,0x3de59900,0x3de614d3,4 +np.float32,0xbdf292f8,0xbdf32517,4 +np.float32,0x3f05c8b2,0x3f0cc59b,4 +np.float32,0xbf1ab182,0xbf261b14,4 +np.float32,0xbda396f0,0xbda3c39a,4 +np.float32,0xbf270ed0,0xbf360231,4 +np.float32,0x3f2063e6,0x3f2d557e,4 +np.float32,0x3c550280,0x3c550409,4 +np.float32,0xbe103b48,0xbe10b679,4 +np.float32,0xbebae390,0xbebf4f40,4 +np.float32,0x3f3bc868,0x3f52d0aa,4 +np.float32,0xbd62f880,0xbd631647,4 +np.float32,0xbe7a38f4,0xbe7cc833,4 +np.float32,0x3f09d796,0x3f118f39,4 +np.float32,0xbf5fa558,0xbf8802d0,4 +np.float32,0x3f111cc8,0x3f1a48b0,4 +np.float32,0x3e831958,0x3e849356,4 +np.float32,0xbf614dbd,0xbf89bc3b,4 +np.float32,0xbd521510,0xbd522cac,4 +np.float32,0x3f05af22,0x3f0ca7a0,4 +np.float32,0xbf1ac60e,0xbf2634df,4 +np.float32,0xbf6bd05e,0xbf95e3fe,4 +np.float32,0xbd1fa6e0,0xbd1fb13b,4 +np.float32,0xbeb82f7a,0xbebc68b1,4 +np.float32,0xbd92aaf8,0xbd92cb23,4 +np.float32,0xbe073a54,0xbe079fbf,4 +np.float32,0xbf198655,0xbf24a468,4 +np.float32,0x3f62f6d8,0x3f8b81ba,4 +np.float32,0x3eef4310,0x3ef8f4f9,4 +np.float32,0x3e8988e0,0x3e8b3eae,4 +np.float32,0xbf3ddba5,0xbf55e367,4 +np.float32,0x3dc6d2e0,0x3dc7232b,4 +np.float32,0xbf31040e,0xbf437601,4 +np.float32,0x3f1bb74a,0x3f276442,4 +np.float32,0xbf0075d2,0xbf0692b3,4 +np.float32,0xbf606ce0,0xbf88d0ff,4 +np.float32,0xbf083856,0xbf0fa39d,4 +np.float32,0xbdb25b20,0xbdb2950a,4 +np.float32,0xbeb86860,0xbebca5ae,4 +np.float32,0x3de83160,0x3de8b176,4 +np.float32,0xbf33a98f,0xbf472664,4 +np.float32,0x3e7795f8,0x3e7a1058,4 +np.float32,0x3e0ca6f8,0x3e0d192a,4 +np.float32,0xbf1aef60,0xbf2668c3,4 +np.float32,0xbda53b58,0xbda5695e,4 +np.float32,0xbf178096,0xbf221fc5,4 +np.float32,0xbf0a4159,0xbf120ccf,4 +np.float32,0x3f7bca36,0x3fb1d0df,4 +np.float32,0xbef94360,0xbf022b26,4 +np.float32,0xbef16f36,0xbefb6ad6,4 +np.float32,0x3f53a7e6,0x3f792e25,4 +np.float32,0xbf7c536f,0xbfb35993,4 +np.float32,0xbe84aaa0,0xbe8632a2,4 +np.float32,0x3ecb3998,0x3ed0fab9,4 +np.float32,0x3f539304,0x3f79090a,4 +np.float32,0xbf3c7816,0xbf53d3b3,4 +np.float32,0xbe7a387c,0xbe7cc7b7,4 +np.float32,0x3f7000e4,0x3f9b92b1,4 +np.float32,0x3e08fd70,0x3e0966e5,4 +np.float32,0x3db97ba0,0x3db9bcc8,4 +np.float32,0xbee99056,0xbef2886a,4 +np.float32,0xbf0668da,0xbf0d819e,4 +np.float32,0x3e58a408,0x3e5a4a51,4 +np.float32,0x3f3440b8,0x3f47faed,4 +np.float32,0xbf19a2ce,0xbf24c7ff,4 +np.float32,0xbe75e990,0xbe7856ee,4 +np.float32,0x3f3c865c,0x3f53e8cb,4 +np.float32,0x3e5e03d0,0x3e5fcac9,4 +np.float32,0x3edb8e34,0x3ee2e932,4 +np.float32,0xbf7e1f5f,0xbfb98ce4,4 +np.float32,0xbf7372ff,0xbfa0d0ae,4 +np.float32,0xbf3ee850,0xbf577548,4 +np.float32,0x3ef19658,0x3efb9737,4 +np.float32,0xbe8088de,0xbe81ecaf,4 +np.float32,0x800000,0x800000,4 +np.float32,0xbde39dd8,0xbde4167a,4 +np.float32,0xbf065d7a,0xbf0d7441,4 +np.float32,0xbde52c78,0xbde5a79b,4 +np.float32,0xbe3a28c0,0xbe3b333e,4 +np.float32,0x3f6e8b3c,0x3f998516,4 +np.float32,0x3f3485c2,0x3f485c39,4 +np.float32,0x3e6f2c68,0x3e71673e,4 +np.float32,0xbe4ec9cc,0xbe50385e,4 +np.float32,0xbf1c3bb0,0xbf280b39,4 +np.float32,0x3ec8ea18,0x3ece76f7,4 +np.float32,0x3e26b5f8,0x3e2774c9,4 +np.float32,0x3e1e4a38,0x3e1eed5c,4 +np.float32,0xbee7a106,0xbef05c6b,4 +np.float32,0xbf305928,0xbf4289d8,4 +np.float32,0x3f0c431c,0x3f147118,4 +np.float32,0xbe57ba6c,0xbe595b52,4 +np.float32,0x3eabc9cc,0x3eaf2fc7,4 +np.float32,0xbef1ed24,0xbefbf9ae,4 +np.float32,0xbf61b576,0xbf8a29cc,4 +np.float32,0x3e9c1ff4,0x3e9ea6cb,4 +np.float32,0x3f6c53b2,0x3f968dbe,4 +np.float32,0x3e2d1b80,0x3e2df156,4 +np.float32,0x3e9f2f70,0x3ea1de4a,4 +np.float32,0xbf5861ee,0xbf80e61a,4 +np.float32,0x3f429144,0x3f5d0505,4 +np.float32,0x3e235cc8,0x3e24103e,4 +np.float32,0xbf354879,0xbf496f6a,4 +np.float32,0xbf20a146,0xbf2da447,4 +np.float32,0x3e8d8968,0x3e8f6785,4 +np.float32,0x3f3fbc94,0x3f58b4c1,4 +np.float32,0x3f2c5f50,0x3f3d1b9f,4 +np.float32,0x3f7bf0f8,0x3fb23d23,4 +np.float32,0xbf218282,0xbf2ec60f,4 +np.float32,0x3f2545aa,0x3f33a93e,4 +np.float32,0xbf4b17be,0xbf6a9018,4 +np.float32,0xbb9df700,0xbb9df728,4 +np.float32,0x3f685d54,0x3f91a06c,4 +np.float32,0x3efdfe2c,0x3f04e24c,4 +np.float32,0x3ef1c5a0,0x3efbccd9,4 +np.float32,0xbf41d731,0xbf5be76e,4 +np.float32,0x3ebd1360,0x3ec1a919,4 +np.float32,0xbf706bd4,0xbf9c2d58,4 +np.float32,0x3ea525e4,0x3ea8279d,4 +np.float32,0xbe51f1b0,0xbe537186,4 +np.float32,0x3f5e8cf6,0x3f86e4f4,4 +np.float32,0xbdad2520,0xbdad5a19,4 +np.float32,0xbf5c5704,0xbf84b0e5,4 +np.float32,0x3f47b54e,0x3f65145e,4 +np.float32,0x3eb4fc78,0x3eb8fc0c,4 +np.float32,0x3dca1450,0x3dca68a1,4 +np.float32,0x3eb02a74,0x3eb3d757,4 +np.float32,0x3f74ae6a,0x3fa2db75,4 +np.float32,0x3f800000,0x3fc90fdb,4 +np.float32,0xbdb46a00,0xbdb4a5f2,4 +np.float32,0xbe9f2ba6,0xbea1da4e,4 +np.float32,0x3f0afa70,0x3f12e8f7,4 +np.float32,0xbf677b20,0xbf909547,4 +np.float32,0x3eff9188,0x3f05cacf,4 +np.float32,0x3f720562,0x3f9e911b,4 +np.float32,0xbf7180d8,0xbf9dc794,4 +np.float32,0xbee7d076,0xbef0919d,4 +np.float32,0x3f0432ce,0x3f0aea95,4 +np.float32,0x3f3bc4c8,0x3f52cb54,4 +np.float32,0xbea72f30,0xbeaa4ebe,4 +np.float32,0x3e90ed00,0x3e92ef33,4 +np.float32,0xbda63670,0xbda6654a,4 +np.float32,0xbf5a6f85,0xbf82d7e0,4 +np.float32,0x3e6e8808,0x3e70be34,4 +np.float32,0xbf4f3822,0xbf71768f,4 +np.float32,0x3e5c8a68,0x3e5e483f,4 +np.float32,0xbf0669d4,0xbf0d82c4,4 +np.float32,0xbf79f77c,0xbfad37b0,4 +np.float32,0x3f25c82c,0x3f345453,4 +np.float32,0x3f1b2948,0x3f26b188,4 +np.float32,0x3ef7e288,0x3f016159,4 +np.float32,0x3c274280,0x3c27433e,4 +np.float32,0xbf4c8fa0,0xbf6cfd5e,4 +np.float32,0x3ea4ccb4,0x3ea7c966,4 +np.float32,0xbf7b157e,0xbfafefca,4 +np.float32,0xbee4c2b0,0xbeed264d,4 +np.float32,0xbc1fd640,0xbc1fd6e6,4 +np.float32,0x3e892308,0x3e8ad4f6,4 +np.float32,0xbf3f69c7,0xbf5837ed,4 +np.float32,0x3ec879e8,0x3ecdfd05,4 +np.float32,0x3f07a8c6,0x3f0efa30,4 +np.float32,0x3f67b880,0x3f90dd4d,4 +np.float32,0x3e8a11c8,0x3e8bccd5,4 +np.float32,0x3f7df6fc,0x3fb8e935,4 +np.float32,0xbef3e498,0xbefe3599,4 +np.float32,0xbf18ad7d,0xbf2395d8,4 +np.float32,0x3f2bce74,0x3f3c57f5,4 +np.float32,0xbf38086e,0xbf4d5c2e,4 +np.float32,0x3f772d7a,0x3fa75c35,4 +np.float32,0xbf3b6e24,0xbf524c00,4 +np.float32,0xbdd39108,0xbdd3f1d4,4 +np.float32,0xbf691f6b,0xbf928974,4 +np.float32,0x3f146188,0x3f1e45e4,4 +np.float32,0xbf56045b,0xbf7d6e03,4 +np.float32,0xbf4b2ee4,0xbf6ab622,4 +np.float32,0xbf3fa3f6,0xbf588f9d,4 +np.float32,0x3f127bb0,0x3f1bf398,4 +np.float32,0x3ed858a0,0x3edf5d3e,4 +np.float32,0xbd6de3b0,0xbd6e05fa,4 +np.float32,0xbecc662c,0xbed24261,4 +np.float32,0xbd6791d0,0xbd67b170,4 +np.float32,0xbf146016,0xbf1e441e,4 +np.float32,0xbf61f04c,0xbf8a6841,4 +np.float32,0xbe7f16d0,0xbe80e6e7,4 +np.float32,0xbebf93e6,0xbec45b10,4 +np.float32,0xbe8a59fc,0xbe8c17d1,4 +np.float32,0xbebc7a0c,0xbec10426,4 +np.float32,0xbf2a682e,0xbf3a7649,4 +np.float32,0xbe18d0cc,0xbe19637b,4 +np.float32,0x3d7f5100,0x3d7f7b66,4 +np.float32,0xbf10f5fa,0xbf1a1998,4 +np.float32,0x3f25e956,0x3f347fdc,4 +np.float32,0x3e6e8658,0x3e70bc78,4 +np.float32,0x3f21a5de,0x3f2ef3a5,4 +np.float32,0xbf4e71d4,0xbf702607,4 +np.float32,0xbf49d6b6,0xbf688380,4 +np.float32,0xbdb729c0,0xbdb7687c,4 +np.float32,0xbf63e1f4,0xbf8c81c7,4 +np.float32,0x3dda6cb0,0x3ddad73e,4 +np.float32,0x3ee1bc40,0x3ee9c612,4 +np.float32,0x3ebdb5f8,0x3ec2581b,4 +np.float32,0x3f7d9576,0x3fb77646,4 +np.float32,0x3e087140,0x3e08d971,4 +np.float64,0xbfdba523cfb74a48,0xbfdc960ddd9c0506,1 +np.float64,0x3fb51773622a2ee0,0x3fb51d93f77089d5,1 +np.float64,0x3fc839f6d33073f0,0x3fc85f9a47dfe8e6,1 +np.float64,0xbfecba2d82f9745b,0xbff1d55416c6c993,1 +np.float64,0x3fd520fe47aa41fc,0x3fd58867f1179634,1 +np.float64,0x3fe1b369c56366d4,0x3fe2c1ac9dd2c45a,1 +np.float64,0xbfec25a7cd784b50,0xbff133417389b12d,1 +np.float64,0xbfd286342ea50c68,0xbfd2cb0bca22e66d,1 +np.float64,0x3fd5f6fe5eabedfc,0x3fd66bad16680d08,1 +np.float64,0xbfe863a87570c751,0xbfebbb9b637eb6dc,1 +np.float64,0x3fc97f5b4d32feb8,0x3fc9ab5066d8eaec,1 +np.float64,0xbfcb667af936ccf4,0xbfcb9d3017047a1d,1 +np.float64,0xbfd1b7b9afa36f74,0xbfd1f3c175706154,1 +np.float64,0x3fef97385b7f2e70,0x3ff6922a1a6c709f,1 +np.float64,0xbfd13e4205a27c84,0xbfd1757c993cdb74,1 +np.float64,0xbfd18d88aca31b12,0xbfd1c7dd75068f7d,1 +np.float64,0x3fe040ce0f60819c,0x3fe10c59d2a27089,1 +np.float64,0xbfddc7deddbb8fbe,0xbfdef9de5baecdda,1 +np.float64,0xbfcf6e96193edd2c,0xbfcfc1bb7396b9a3,1 +np.float64,0x3fd544f494aa89e8,0x3fd5ae850e2b37dd,1 +np.float64,0x3fe15b381fe2b670,0x3fe25841c7bfe2af,1 +np.float64,0xbfde793420bcf268,0xbfdfc2ddc7b4a341,1 +np.float64,0x3fd0d5db30a1abb8,0x3fd1092cef4aa4fb,1 +np.float64,0x3fe386a08c670d42,0x3fe50059bbf7f491,1 +np.float64,0xbfe0aae3a96155c8,0xbfe1880ef13e95ce,1 +np.float64,0xbfe80eeb03f01dd6,0xbfeb39e9f107e944,1 +np.float64,0xbfd531af3caa635e,0xbfd59a178f17552a,1 +np.float64,0x3fcced14ab39da28,0x3fcd2d9a806337ef,1 +np.float64,0xbfdb4c71bcb698e4,0xbfdc33d9d9daf708,1 +np.float64,0xbfde7375ecbce6ec,0xbfdfbc5611bc48ff,1 +np.float64,0x3fecc5707a798ae0,0x3ff1e2268d778017,1 +np.float64,0x3fe8f210a1f1e422,0x3fec9b3349a5baa2,1 +np.float64,0x3fe357f9b8e6aff4,0x3fe4c5a0b89a9228,1 +np.float64,0xbfe0f863b761f0c8,0xbfe1e3283494c3d4,1 +np.float64,0x3fd017c395a02f88,0x3fd044761f2f4a66,1 +np.float64,0x3febeb4746f7d68e,0x3ff0f6b955e7feb6,1 +np.float64,0xbfbdaaeeae3b55e0,0xbfbdbc0950109261,1 +np.float64,0xbfea013095f40261,0xbfee5b8fe8ad8593,1 +np.float64,0xbfe9f87b7973f0f7,0xbfee4ca3a8438d72,1 +np.float64,0x3fd37f77cfa6fef0,0x3fd3d018c825f057,1 +np.float64,0x3fb0799cee20f340,0x3fb07c879e7cb63f,1 +np.float64,0xbfdcfd581cb9fab0,0xbfde15e35314b52d,1 +np.float64,0xbfd49781b8a92f04,0xbfd4f6fa1516fefc,1 +np.float64,0x3fb3fcb6d627f970,0x3fb401ed44a713a8,1 +np.float64,0x3fd5737ef8aae6fc,0x3fd5dfe42d4416c7,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xbfe56ae780ead5cf,0xbfe776ea5721b900,1 +np.float64,0x3fd4567786a8acf0,0x3fd4b255421c161a,1 +np.float64,0x3fef6fb58cfedf6c,0x3ff62012dfcf0a33,1 +np.float64,0xbfd1dbcd3da3b79a,0xbfd2194fd628f74d,1 +np.float64,0x3fd9350016b26a00,0x3fd9e8b01eb023e9,1 +np.float64,0xbfe4fb3a69e9f675,0xbfe6e1d2c9eca56c,1 +np.float64,0x3fe9fe0f73f3fc1e,0x3fee5631cfd39772,1 +np.float64,0xbfd51c1bc6aa3838,0xbfd5833b3bd53543,1 +np.float64,0x3fc64158e12c82b0,0x3fc65e7352f237d7,1 +np.float64,0x3fd0d8ee1ba1b1dc,0x3fd10c5c99a16f0e,1 +np.float64,0x3fd5554e15aaaa9c,0x3fd5bfdb9ec9e873,1 +np.float64,0x3fe61ce209ec39c4,0x3fe869bc4c28437d,1 +np.float64,0xbfe4e42c8c69c859,0xbfe6c356dac7e2db,1 +np.float64,0xbfe157021062ae04,0xbfe2533ed39f4212,1 +np.float64,0x3fe844066cf0880c,0x3feb8aea0b7bd0a4,1 +np.float64,0x3fe55016586aa02c,0x3fe752e4b2a67b9f,1 +np.float64,0x3fdabce619b579cc,0x3fdb95809bc789d9,1 +np.float64,0x3fee03bae37c0776,0x3ff3778ba38ca882,1 +np.float64,0xbfeb2f5844f65eb0,0xbff03dd1b767d3c8,1 +np.float64,0x3fedcfdbaffb9fb8,0x3ff32e81d0639164,1 +np.float64,0x3fe06fc63ee0df8c,0x3fe142fc27f92eaf,1 +np.float64,0x3fe7ce90fd6f9d22,0x3fead8f832bbbf5d,1 +np.float64,0xbfbc0015ce380028,0xbfbc0e7470e06e86,1 +np.float64,0xbfe9b3de90f367bd,0xbfedd857931dfc6b,1 +np.float64,0xbfcb588f5936b120,0xbfcb8ef0124a4f21,1 +np.float64,0x3f8d376a503a6f00,0x3f8d37ab43e7988d,1 +np.float64,0xbfdb123a40b62474,0xbfdbf38b6cf5db92,1 +np.float64,0xbfee7da6be7cfb4e,0xbff433042cd9d5eb,1 +np.float64,0xbfc4c9e01b2993c0,0xbfc4e18dbafe37ef,1 +np.float64,0x3fedd42faffba860,0x3ff334790cd18a19,1 +np.float64,0x3fe9cdf772f39bee,0x3fee044f87b856ab,1 +np.float64,0x3fe0245881e048b2,0x3fe0eb5a1f739c8d,1 +np.float64,0xbfe4712bd9e8e258,0xbfe62cb3d82034aa,1 +np.float64,0x3fe9a16b46f342d6,0x3fedb972b2542551,1 +np.float64,0xbfe57ab4536af568,0xbfe78c34b03569c2,1 +np.float64,0x3fb6d6ceb22dada0,0x3fb6de976964d6dd,1 +np.float64,0x3fc3ac23a3275848,0x3fc3c02de53919b8,1 +np.float64,0xbfccb531e7396a64,0xbfccf43ec69f6281,1 +np.float64,0xbfd2f07fc8a5e100,0xbfd33a35a8c41b62,1 +np.float64,0xbfe3e5dd04e7cbba,0xbfe57940157c27ba,1 +np.float64,0x3feefe40757dfc80,0x3ff51bc72b846af6,1 +np.float64,0x8000000000000001,0x8000000000000001,1 +np.float64,0x3fecb7b766796f6e,0x3ff1d28972a0fc7e,1 +np.float64,0xbfea1bf1357437e2,0xbfee89a6532bfd71,1 +np.float64,0xbfca3983b7347308,0xbfca696463b791ef,1 +np.float64,0x10000000000000,0x10000000000000,1 +np.float64,0xbf886b45d030d680,0xbf886b6bbc04314b,1 +np.float64,0x3fd5224bb5aa4498,0x3fd589c92e82218f,1 +np.float64,0xbfec799874f8f331,0xbff18d5158b8e640,1 +np.float64,0xbf88124410302480,0xbf88126863350a16,1 +np.float64,0xbfe37feaaa66ffd6,0xbfe4f7e24382e79d,1 +np.float64,0x3fd777eca1aeefd8,0x3fd8076ead6d55dc,1 +np.float64,0x3fecaaeb3af955d6,0x3ff1c4159fa3e965,1 +np.float64,0xbfeb81e4e6f703ca,0xbff08d4e4c77fada,1 +np.float64,0xbfd7d0a0edafa142,0xbfd866e37010312e,1 +np.float64,0x3feda48c00fb4918,0x3ff2f3fd33c36307,1 +np.float64,0x3feb87ecc4770fda,0x3ff09336e490deda,1 +np.float64,0xbfefd78ad27faf16,0xbff78abbafb50ac1,1 +np.float64,0x3fe58e918c6b1d24,0x3fe7a70b38cbf016,1 +np.float64,0x3fda163b95b42c78,0x3fdade86b88ba4ee,1 +np.float64,0x3fe8fc1aaf71f836,0x3fecab3f93b59df5,1 +np.float64,0xbf8de56f903bcac0,0xbf8de5b527cec797,1 +np.float64,0xbfec112db2f8225b,0xbff11dd648de706f,1 +np.float64,0x3fc3214713264290,0x3fc333b1c862f7d0,1 +np.float64,0xbfeb5e5836f6bcb0,0xbff06ac364b49177,1 +np.float64,0x3fc23d9777247b30,0x3fc24d8ae3bcb615,1 +np.float64,0xbfdf0eed65be1dda,0xbfe036cea9b9dfb6,1 +np.float64,0xbfb2d5c85a25ab90,0xbfb2da24bb409ff3,1 +np.float64,0xbfecdda0c3f9bb42,0xbff1fdf94fc6e89e,1 +np.float64,0x3fdfe79154bfcf24,0x3fe0b338e0476a9d,1 +np.float64,0xbfd712ac6bae2558,0xbfd79abde21f287b,1 +np.float64,0x3fea3f148a747e2a,0x3feec6bed9d4fa04,1 +np.float64,0x3fd4879e4ca90f3c,0x3fd4e632fa4e2edd,1 +np.float64,0x3fe9137a9e7226f6,0x3fecd0c441088d6a,1 +np.float64,0xbfc75bf4ef2eb7e8,0xbfc77da8347d742d,1 +np.float64,0xbfd94090a0b28122,0xbfd9f5458816ed5a,1 +np.float64,0x3fde439cbcbc8738,0x3fdf85fbf496b61f,1 +np.float64,0xbfe18bacdce3175a,0xbfe29210e01237f7,1 +np.float64,0xbfd58ec413ab1d88,0xbfd5fcd838f0a934,1 +np.float64,0xbfeae5af2d75cb5e,0xbfeff1de1b4a06be,1 +np.float64,0x3fb64d1a162c9a30,0x3fb65458fb831354,1 +np.float64,0x3fc18b1e15231640,0x3fc1994c6ffd7a6a,1 +np.float64,0xbfd7b881bcaf7104,0xbfd84ce89a9ee8c7,1 +np.float64,0x3feb916a40f722d4,0x3ff09c8aa851d7c4,1 +np.float64,0x3fdab5fbb5b56bf8,0x3fdb8de43961bbde,1 +np.float64,0x3fe4f35402e9e6a8,0x3fe6d75dc5082894,1 +np.float64,0x3fe2fdb2e5e5fb66,0x3fe454e32a5d2182,1 +np.float64,0x3fe8607195f0c0e4,0x3febb6a4c3bf6a5c,1 +np.float64,0x3fd543ca9aaa8794,0x3fd5ad49203ae572,1 +np.float64,0x3fe8e05ca1f1c0ba,0x3fec7eff123dcc58,1 +np.float64,0x3fe298b6ca65316e,0x3fe3d81d2927c4dd,1 +np.float64,0x3fcfecea733fd9d8,0x3fd0220f1d0faf78,1 +np.float64,0xbfe2e739f065ce74,0xbfe439004e73772a,1 +np.float64,0xbfd1ae6b82a35cd8,0xbfd1ea129a5ee756,1 +np.float64,0xbfeb7edff576fdc0,0xbff08a5a638b8a8b,1 +np.float64,0x3fe5b645ff6b6c8c,0x3fe7dcee1faefe3f,1 +np.float64,0xbfd478427ba8f084,0xbfd4d5fc7c239e60,1 +np.float64,0xbfe39904e3e7320a,0xbfe517972b30b1e5,1 +np.float64,0xbfd3b75b6ba76eb6,0xbfd40acf20a6e074,1 +np.float64,0x3fd596267aab2c4c,0x3fd604b01faeaf75,1 +np.float64,0x3fe134463762688c,0x3fe229fc36784a72,1 +np.float64,0x3fd25dadf7a4bb5c,0x3fd2a0b9e04ea060,1 +np.float64,0xbfc05d3e0b20ba7c,0xbfc068bd2bb9966f,1 +np.float64,0x3f8cf517b039ea00,0x3f8cf556ed74b163,1 +np.float64,0x3fda87361cb50e6c,0x3fdb5a75af897e7f,1 +np.float64,0x3fe53e1926ea7c32,0x3fe73acf01b8ff31,1 +np.float64,0x3fe2e94857e5d290,0x3fe43b8cc820f9c7,1 +np.float64,0x3fd81fe6acb03fcc,0x3fd8bc623c0068cf,1 +np.float64,0xbfddf662c3bbecc6,0xbfdf2e76dc90786e,1 +np.float64,0x3fece174fbf9c2ea,0x3ff2026a1a889580,1 +np.float64,0xbfdc83c5b8b9078c,0xbfdd8dcf6ee3b7da,1 +np.float64,0x3feaf5448f75ea8a,0x3ff0075b108bcd0d,1 +np.float64,0xbfebf32f7ef7e65f,0xbff0fed42aaa826a,1 +np.float64,0x3fe389e5e8e713cc,0x3fe5047ade055ccb,1 +np.float64,0x3f635cdcc026ba00,0x3f635cddeea082ce,1 +np.float64,0x3fae580f543cb020,0x3fae5c9d5108a796,1 +np.float64,0x3fec9fafce793f60,0x3ff1b77bec654f00,1 +np.float64,0x3fb19d226e233a40,0x3fb1a0b32531f7ee,1 +np.float64,0xbfdf9a71e7bf34e4,0xbfe086cef88626c7,1 +np.float64,0x8010000000000000,0x8010000000000000,1 +np.float64,0xbfef170ba2fe2e17,0xbff54ed4675f5b8a,1 +np.float64,0xbfcc6e2f8f38dc60,0xbfccab65fc34d183,1 +np.float64,0x3fee756c4bfcead8,0x3ff4258782c137e6,1 +np.float64,0xbfd461c218a8c384,0xbfd4be3e391f0ff4,1 +np.float64,0xbfe3b64686e76c8d,0xbfe53caa16d6c90f,1 +np.float64,0xbfc1c65d8d238cbc,0xbfc1d51e58f82403,1 +np.float64,0x3fe6e06c63edc0d8,0x3fe97cb832eeb6a2,1 +np.float64,0xbfc9fc20b933f840,0xbfca2ab004312d85,1 +np.float64,0xbfe29aa6df65354e,0xbfe3da7ecf3ba466,1 +np.float64,0x3fea4df7d1749bf0,0x3feee0d448bd4746,1 +np.float64,0xbfedec6161fbd8c3,0xbff3563e1d943aa2,1 +np.float64,0x3fdb6f0437b6de08,0x3fdc5a1888b1213d,1 +np.float64,0xbfe270cbd3e4e198,0xbfe3a72ac27a0b0c,1 +np.float64,0xbfdfff8068bfff00,0xbfe0c1088e3b8983,1 +np.float64,0xbfd28edbe6a51db8,0xbfd2d416c8ed363e,1 +np.float64,0xbfb4e35f9229c6c0,0xbfb4e9531d2a737f,1 +np.float64,0xbfee6727e97cce50,0xbff40e7717576e46,1 +np.float64,0xbfddb5fbddbb6bf8,0xbfdee5aad78f5361,1 +np.float64,0xbfdf9d3e9dbf3a7e,0xbfe0886b191f2957,1 +np.float64,0x3fa57e77042afce0,0x3fa5801518ea9342,1 +np.float64,0x3f95c4e4882b89c0,0x3f95c55003c8e714,1 +np.float64,0x3fd9b10f61b36220,0x3fda6fe5d635a8aa,1 +np.float64,0xbfe2973411652e68,0xbfe3d641fe9885fd,1 +np.float64,0xbfee87bd5a7d0f7b,0xbff443bea81b3fff,1 +np.float64,0x3f9ea064c83d40c0,0x3f9ea19025085b2f,1 +np.float64,0xbfe4b823dfe97048,0xbfe689623d30dc75,1 +np.float64,0xbfa06a326c20d460,0xbfa06aeacbcd3eb8,1 +np.float64,0x3fe1e5c4c1e3cb8a,0x3fe2fe44b822f20e,1 +np.float64,0x3f99dafaa833b600,0x3f99dbaec10a1a0a,1 +np.float64,0xbfed7cb3877af967,0xbff2bfe9e556aaf9,1 +np.float64,0x3fd604f2e2ac09e4,0x3fd67a89408ce6ba,1 +np.float64,0x3fec57b60f78af6c,0x3ff16881f46d60f7,1 +np.float64,0xbfea2e3a17745c74,0xbfeea95c7190fd42,1 +np.float64,0xbfd60a7c37ac14f8,0xbfd6806ed642de35,1 +np.float64,0xbfe544b9726a8973,0xbfe743ac399d81d7,1 +np.float64,0xbfd13520faa26a42,0xbfd16c02034a8fe0,1 +np.float64,0xbfea9ea59ff53d4b,0xbfef70538ee12e00,1 +np.float64,0x3fd66633f8accc68,0x3fd6e23c13ab0e9e,1 +np.float64,0xbfe4071bd3e80e38,0xbfe5a3c9ba897d81,1 +np.float64,0xbfbe1659fa3c2cb0,0xbfbe2831d4fed196,1 +np.float64,0xbfd3312777a6624e,0xbfd37df09b9baeba,1 +np.float64,0x3fd13997caa27330,0x3fd170a4900c8907,1 +np.float64,0xbfe7cbc235ef9784,0xbfead4c4d6cbf129,1 +np.float64,0xbfe1456571628acb,0xbfe23e4ec768c8e2,1 +np.float64,0xbfedf1a044fbe340,0xbff35da96773e176,1 +np.float64,0x3fce38b1553c7160,0x3fce8270709774f9,1 +np.float64,0xbfecb01761f9602f,0xbff1c9e9d382f1f8,1 +np.float64,0xbfe0a03560e1406b,0xbfe17b8d5a1ca662,1 +np.float64,0x3fe50f37cbea1e70,0x3fe6fc55e1ae7da6,1 +np.float64,0xbfe12d64a0625aca,0xbfe221d3a7834e43,1 +np.float64,0xbf6fb288403f6500,0xbf6fb28d6f389db6,1 +np.float64,0x3fda831765b50630,0x3fdb55eecae58ca9,1 +np.float64,0x3fe1a0fe4c6341fc,0x3fe2ab9564304425,1 +np.float64,0xbfef2678a77e4cf1,0xbff56ff42b2797bb,1 +np.float64,0xbfab269c1c364d40,0xbfab29df1cd48779,1 +np.float64,0x3fe8ec82a271d906,0x3fec92567d7a6675,1 +np.float64,0xbfc235115f246a24,0xbfc244ee567682ea,1 +np.float64,0x3feef5bf8d7deb80,0x3ff50ad4875ee9bd,1 +np.float64,0x3fe768b5486ed16a,0x3fea421356160e65,1 +np.float64,0xbfd4255684a84aae,0xbfd47e8baf7ec7f6,1 +np.float64,0x3fc7f67f2b2fed00,0x3fc81ae83cf92dd5,1 +np.float64,0x3fe9b1b19a736364,0x3fedd4b0e24ee741,1 +np.float64,0x3fb27eb9e624fd70,0x3fb282dacd89ce28,1 +np.float64,0xbfd490b710a9216e,0xbfd4efcdeb213458,1 +np.float64,0xbfd1347b2ca268f6,0xbfd16b55dece2d38,1 +np.float64,0x3fc6a5668d2d4ad0,0x3fc6c41452c0c087,1 +np.float64,0xbfca7b209f34f640,0xbfcaac710486f6bd,1 +np.float64,0x3fc23a1a47247438,0x3fc24a047fd4c27a,1 +np.float64,0x3fdb1413a8b62828,0x3fdbf595e2d994bc,1 +np.float64,0xbfea69b396f4d367,0xbfef11bdd2b0709a,1 +np.float64,0x3fd14c9958a29934,0x3fd1846161b10422,1 +np.float64,0xbfe205f44be40be8,0xbfe325283aa3c6a8,1 +np.float64,0x3fecd03c9ef9a07a,0x3ff1ee85aaf52a01,1 +np.float64,0x3fe34281d7e68504,0x3fe4aab63e6de816,1 +np.float64,0xbfe120e2376241c4,0xbfe213023ab03939,1 +np.float64,0xbfe951edc4f2a3dc,0xbfed3615e38576f8,1 +np.float64,0x3fe5a2286f6b4450,0x3fe7c196e0ec10ed,1 +np.float64,0xbfed7a3e1f7af47c,0xbff2bcc0793555d2,1 +np.float64,0x3fe050274960a04e,0x3fe11e2e256ea5cc,1 +np.float64,0xbfcfa71f653f4e40,0xbfcffc11483d6a06,1 +np.float64,0x3f6ead2e403d5a00,0x3f6ead32f314c052,1 +np.float64,0x3fe3a2a026674540,0x3fe523bfe085f6ec,1 +np.float64,0xbfe294a62e65294c,0xbfe3d31ebd0b4ca2,1 +np.float64,0xbfb4894d06291298,0xbfb48ef4b8e256b8,1 +np.float64,0xbfc0c042c1218084,0xbfc0cc98ac2767c4,1 +np.float64,0xbfc6a32cb52d4658,0xbfc6c1d1597ed06b,1 +np.float64,0xbfd30f7777a61eee,0xbfd35aa39fee34eb,1 +np.float64,0x3fe7fc2c2eeff858,0x3feb1d8a558b5537,1 +np.float64,0x7fefffffffffffff,0x7ff8000000000000,1 +np.float64,0xbfdadf917bb5bf22,0xbfdbbbae9a9f67a0,1 +np.float64,0xbfcf0395e13e072c,0xbfcf5366015f7362,1 +np.float64,0xbfe8644c9170c899,0xbfebbc98e74a227d,1 +np.float64,0x3fc3b2d8e52765b0,0x3fc3c6f7d44cffaa,1 +np.float64,0x3fc57407b92ae810,0x3fc58e12ccdd47a1,1 +np.float64,0x3fd56a560daad4ac,0x3fd5d62b8dfcc058,1 +np.float64,0x3fd595deefab2bbc,0x3fd6046420b2f79b,1 +np.float64,0xbfd5360f50aa6c1e,0xbfd59ebaacd815b8,1 +np.float64,0x3fdfb6aababf6d54,0x3fe0970b8aac9f61,1 +np.float64,0x3ff0000000000000,0x3ff921fb54442d18,1 +np.float64,0xbfeb3a8958f67513,0xbff04872e8278c79,1 +np.float64,0x3f9e1ea6683c3d40,0x3f9e1fc326186705,1 +np.float64,0x3fe6b6d5986d6dac,0x3fe94175bd60b19d,1 +np.float64,0xbfee4d90b77c9b21,0xbff3e60e9134edc2,1 +np.float64,0x3fd806ce0cb00d9c,0x3fd8a14c4855a8f5,1 +np.float64,0x3fd54acc75aa9598,0x3fd5b4b72fcbb5df,1 +np.float64,0xbfe59761f16b2ec4,0xbfe7b2fa5d0244ac,1 +np.float64,0xbfcd4fa3513a9f48,0xbfcd92d0814a5383,1 +np.float64,0xbfdc827523b904ea,0xbfdd8c577b53053c,1 +np.float64,0xbfd4bb7f34a976fe,0xbfd51d00d9a99360,1 +np.float64,0xbfe818bc87f03179,0xbfeb48d1ea0199c5,1 +np.float64,0xbfa8a2e15c3145c0,0xbfa8a5510ba0e45c,1 +np.float64,0xbfb6d15f422da2c0,0xbfb6d922689da015,1 +np.float64,0x3fcd04eaab3a09d8,0x3fcd46131746ef08,1 +np.float64,0x3fcfb5cfbb3f6ba0,0x3fd0059d308237f3,1 +np.float64,0x3fe8dcf609f1b9ec,0x3fec7997973010b6,1 +np.float64,0xbfdf1834d7be306a,0xbfe03c1d4e2b48f0,1 +np.float64,0x3fee82ae50fd055c,0x3ff43b545066fe1a,1 +np.float64,0xbfde039c08bc0738,0xbfdf3d6ed4d2ee5c,1 +np.float64,0x3fec07389bf80e72,0x3ff1137ed0acd161,1 +np.float64,0xbfef44c010fe8980,0xbff5b488ad22a4c5,1 +np.float64,0x3f76e722e02dce00,0x3f76e72ab2759d88,1 +np.float64,0xbfcaa9e6053553cc,0xbfcadc41125fca93,1 +np.float64,0x3fed6088147ac110,0x3ff29c06c4ef35fc,1 +np.float64,0x3fd32bd836a657b0,0x3fd3785fdb75909f,1 +np.float64,0xbfeedbb1d97db764,0xbff4d87f6c82a93c,1 +np.float64,0xbfe40f31d5e81e64,0xbfe5ae292cf258a2,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0xbfeb2b25bc76564c,0xbff039d81388550c,1 +np.float64,0x3fec5008fa78a012,0x3ff1604195801da3,1 +np.float64,0x3fce2d4f293c5aa0,0x3fce76b99c2db4da,1 +np.float64,0xbfdc435412b886a8,0xbfdd45e7b7813f1e,1 +np.float64,0x3fdf2c9d06be593c,0x3fe047cb03c141b6,1 +np.float64,0x3fddefc61ebbdf8c,0x3fdf26fb8fad9fae,1 +np.float64,0x3fab50218436a040,0x3fab537395eaf3bb,1 +np.float64,0xbfd5b95a8fab72b6,0xbfd62a191a59343a,1 +np.float64,0x3fdbf803b4b7f008,0x3fdcf211578e98c3,1 +np.float64,0xbfec8c255979184b,0xbff1a1bee108ed30,1 +np.float64,0x3fe33cdaffe679b6,0x3fe4a3a318cd994f,1 +np.float64,0x3fd8cf585cb19eb0,0x3fd97a408bf3c38c,1 +np.float64,0x3fe919dde07233bc,0x3fecdb0ea13a2455,1 +np.float64,0xbfd5ba35e4ab746c,0xbfd62b024805542d,1 +np.float64,0x3fd2f933e7a5f268,0x3fd343527565e97c,1 +np.float64,0xbfe5b9f8ddeb73f2,0xbfe7e1f772c3e438,1 +np.float64,0x3fe843cd92f0879c,0x3feb8a92d68eae3e,1 +np.float64,0xbfd096b234a12d64,0xbfd0c7beca2c6605,1 +np.float64,0xbfef3363da7e66c8,0xbff58c98dde6c27c,1 +np.float64,0x3fd51b01ddaa3604,0x3fd582109d89ead1,1 +np.float64,0x3fea0f10ff741e22,0x3fee736c2d2a2067,1 +np.float64,0x3fc276e7b724edd0,0x3fc28774520bc6d4,1 +np.float64,0xbfef9abc9f7f3579,0xbff69d49762b1889,1 +np.float64,0x3fe1539ec0e2a73e,0x3fe24f370b7687d0,1 +np.float64,0x3fad72350c3ae460,0x3fad765e7766682a,1 +np.float64,0x3fa289a47c251340,0x3fa28aae12f41646,1 +np.float64,0xbfe5c488e5eb8912,0xbfe7f05d7e7dcddb,1 +np.float64,0xbfc22ef1d7245de4,0xbfc23ebeb990a1b8,1 +np.float64,0x3fe59a0b80eb3418,0x3fe7b695fdcba1de,1 +np.float64,0xbfe9cad619f395ac,0xbfedff0514d91e2c,1 +np.float64,0x3fc8bc74eb3178e8,0x3fc8e48cb22da666,1 +np.float64,0xbfc5389a3f2a7134,0xbfc551cd6febc544,1 +np.float64,0x3fce82feb33d0600,0x3fceceecce2467ef,1 +np.float64,0x3fda346791b468d0,0x3fdaff95154a4ca6,1 +np.float64,0x3fd04501fea08a04,0x3fd073397b32607e,1 +np.float64,0xbfb6be498a2d7c90,0xbfb6c5f93aeb0e57,1 +np.float64,0x3fe1f030dd63e062,0x3fe30ad8fb97cce0,1 +np.float64,0xbfee3fb36dfc7f67,0xbff3d0a5e380b86f,1 +np.float64,0xbfa876773c30ecf0,0xbfa878d9d3df6a3f,1 +np.float64,0x3fdb58296eb6b054,0x3fdc40ceffb17f82,1 +np.float64,0xbfea16b5d8742d6c,0xbfee809b99fd6adc,1 +np.float64,0xbfdc5062b6b8a0c6,0xbfdd547623275fdb,1 +np.float64,0x3fef6db242fedb64,0x3ff61ab4cdaef467,1 +np.float64,0xbfc9f778f933eef0,0xbfca25eef1088167,1 +np.float64,0xbfd22063eba440c8,0xbfd260c8766c69cf,1 +np.float64,0x3fdd2379f2ba46f4,0x3fde40b025cb1ffa,1 +np.float64,0xbfea967af2f52cf6,0xbfef61a178774636,1 +np.float64,0x3fe4f5b49fe9eb6a,0x3fe6da8311a5520e,1 +np.float64,0x3feccde17b799bc2,0x3ff1ebd0ea228b71,1 +np.float64,0x3fe1bb76506376ec,0x3fe2cb56fca01840,1 +np.float64,0xbfef94e583ff29cb,0xbff68aeab8ba75a2,1 +np.float64,0x3fed024a55fa0494,0x3ff228ea5d456e9d,1 +np.float64,0xbfe877b2a8f0ef65,0xbfebdaa1a4712459,1 +np.float64,0x3fef687a8d7ed0f6,0x3ff60cf5fef8d448,1 +np.float64,0xbfeeb2dc8afd65b9,0xbff48dda6a906cd6,1 +np.float64,0x3fdb2e28aeb65c50,0x3fdc12620655eb7a,1 +np.float64,0x3fedc1863afb830c,0x3ff31ae823315e83,1 +np.float64,0xbfe6b1bb546d6376,0xbfe93a38163e3a59,1 +np.float64,0x3fe479c78468f390,0x3fe637e5c0fc5730,1 +np.float64,0x3fbad1fade35a3f0,0x3fbade9a43ca05cf,1 +np.float64,0xbfe2d1c563e5a38b,0xbfe41e712785900c,1 +np.float64,0xbfc08c33ed211868,0xbfc09817a752d500,1 +np.float64,0xbfecce0935f99c12,0xbff1ebfe84524037,1 +np.float64,0x3fce4ef0e73c9de0,0x3fce995638a3dc48,1 +np.float64,0xbfd2fb2343a5f646,0xbfd345592517ca18,1 +np.float64,0x3fd848f7cdb091f0,0x3fd8e8bee5f7b49a,1 +np.float64,0x3fe532b7d2ea6570,0x3fe72b9ac747926a,1 +np.float64,0x3fd616aadcac2d54,0x3fd68d692c5cad42,1 +np.float64,0x3fd7720eb3aee41c,0x3fd801206a0e1e43,1 +np.float64,0x3fee835a35fd06b4,0x3ff43c7175eb7a54,1 +np.float64,0xbfe2e8f70b65d1ee,0xbfe43b2800a947a7,1 +np.float64,0xbfed38f45d7a71e9,0xbff26acd6bde7174,1 +np.float64,0xbfc0c62661218c4c,0xbfc0d28964d66120,1 +np.float64,0x3fe97940bef2f282,0x3fed76b986a74ee3,1 +np.float64,0x3fc96f7dc532def8,0x3fc99b20044c8fcf,1 +np.float64,0xbfd60201eeac0404,0xbfd677675efaaedc,1 +np.float64,0x3fe63c0867ec7810,0x3fe894f060200140,1 +np.float64,0xbfef6144b37ec289,0xbff5fa589a515ba8,1 +np.float64,0xbfde2da0c8bc5b42,0xbfdf6d0b59e3232a,1 +np.float64,0xbfd7401612ae802c,0xbfd7cb74ddd413b9,1 +np.float64,0x3fe41c012de83802,0x3fe5be9d87da3f82,1 +np.float64,0x3fdf501609bea02c,0x3fe05c1d96a2270b,1 +np.float64,0x3fcf9fa1233f3f40,0x3fcff45598e72f07,1 +np.float64,0x3fd4e3895ea9c714,0x3fd547580d8392a2,1 +np.float64,0x3fe1e8ff5fe3d1fe,0x3fe3022a0b86a2ab,1 +np.float64,0xbfe0aa55956154ab,0xbfe18768823da589,1 +np.float64,0x3fb2a0aa26254150,0x3fb2a4e1faff1c93,1 +np.float64,0x3fd3823417a70468,0x3fd3d2f808dbb167,1 +np.float64,0xbfaed323643da640,0xbfaed7e9bef69811,1 +np.float64,0x3fe661e8c4ecc3d2,0x3fe8c9c535f43c16,1 +np.float64,0xbfa429777c2852f0,0xbfa42acd38ba02a6,1 +np.float64,0x3fb5993ea22b3280,0x3fb59fd353e47397,1 +np.float64,0x3fee62d21efcc5a4,0x3ff40788f9278ade,1 +np.float64,0xbf813fb810227f80,0xbf813fc56d8f3c53,1 +np.float64,0x3fd56205deaac40c,0x3fd5cd59671ef193,1 +np.float64,0x3fd31a4de5a6349c,0x3fd365fe401b66e8,1 +np.float64,0xbfec7cc7a478f98f,0xbff190cf69703ca4,1 +np.float64,0xbf755881a02ab100,0xbf755887f52e7794,1 +np.float64,0x3fdd1c92e6ba3924,0x3fde38efb4e8605c,1 +np.float64,0x3fdf49da80be93b4,0x3fe0588af8dd4a34,1 +np.float64,0x3fe1fcdbf2e3f9b8,0x3fe31a27b9d273f2,1 +np.float64,0x3fe2a0f18be541e4,0x3fe3e23b159ce20f,1 +np.float64,0xbfed0f1561fa1e2b,0xbff23820fc0a54ca,1 +np.float64,0x3fe34a006c669400,0x3fe4b419b9ed2b83,1 +np.float64,0xbfd51be430aa37c8,0xbfd583005a4d62e7,1 +np.float64,0x3fe5ec4e336bd89c,0x3fe826caad6b0f65,1 +np.float64,0xbfdad71b1fb5ae36,0xbfdbb25bef8b53d8,1 +np.float64,0xbfe8eac2d871d586,0xbfec8f8cac7952f9,1 +np.float64,0xbfe1d5aef663ab5e,0xbfe2eae14b7ccdfd,1 +np.float64,0x3fec11d3157823a6,0x3ff11e8279506753,1 +np.float64,0xbfe67ff1166cffe2,0xbfe8f3e61c1dfd32,1 +np.float64,0xbfd101eecda203de,0xbfd136e0e9557022,1 +np.float64,0x3fde6c9e5cbcd93c,0x3fdfb48ee7efe134,1 +np.float64,0x3fec3ede9c787dbe,0x3ff14dead1e5cc1c,1 +np.float64,0x3fe7a022086f4044,0x3fea93ce2980b161,1 +np.float64,0xbfc3b2b1b7276564,0xbfc3c6d02d60bb21,1 +np.float64,0x7ff0000000000000,0x7ff8000000000000,1 +np.float64,0x3fe60b5647ec16ac,0x3fe8517ef0544b40,1 +np.float64,0xbfd20ab654a4156c,0xbfd24a2f1b8e4932,1 +np.float64,0xbfe4aa1e2f69543c,0xbfe677005cbd2646,1 +np.float64,0xbfc831cc0b306398,0xbfc8574910d0b86d,1 +np.float64,0xbfc3143495262868,0xbfc3267961b79198,1 +np.float64,0x3fc14d64c1229ac8,0x3fc15afea90a319d,1 +np.float64,0x3fc0a5a207214b48,0x3fc0b1bd2f15c1b0,1 +np.float64,0xbfc0b8351521706c,0xbfc0c4792672d6db,1 +np.float64,0xbfdc383600b8706c,0xbfdd398429e163bd,1 +np.float64,0x3fd9e17321b3c2e8,0x3fdaa4c4d140a622,1 +np.float64,0xbfd44f079ea89e10,0xbfd4aa7d6deff4ab,1 +np.float64,0xbfc3de52a927bca4,0xbfc3f2f8f65f4c3f,1 +np.float64,0x3fe7779d566eef3a,0x3fea57f8592dbaad,1 +np.float64,0xbfe309039e661207,0xbfe462f47f9a64e5,1 +np.float64,0x3fd8e06d08b1c0dc,0x3fd98cc946e440a6,1 +np.float64,0x3fdde66c9ebbccd8,0x3fdf1c68009a8dc1,1 +np.float64,0x3fd4369c6ba86d38,0x3fd490bf460a69e4,1 +np.float64,0xbfe132252fe2644a,0xbfe22775e109cc2e,1 +np.float64,0x3fee15483c7c2a90,0x3ff39111de89036f,1 +np.float64,0xbfc1d5ee8123abdc,0xbfc1e4d66c6871a5,1 +np.float64,0x3fc851c52b30a388,0x3fc877d93fb4ae1a,1 +np.float64,0x3fdaade707b55bd0,0x3fdb85001661fffe,1 +np.float64,0xbfe79fb7f96f3f70,0xbfea9330ec27ac10,1 +np.float64,0xbfe8b0f725f161ee,0xbfec3411c0e4517a,1 +np.float64,0xbfea79f5f374f3ec,0xbfef2e9dd9270488,1 +np.float64,0x3fe0b5fe5b616bfc,0x3fe19512a36a4534,1 +np.float64,0xbfad7c622c3af8c0,0xbfad808fea96a804,1 +np.float64,0xbfe3e24dbce7c49c,0xbfe574b4c1ea9818,1 +np.float64,0xbfe80b038af01607,0xbfeb33fec279576a,1 +np.float64,0xbfef69e2ea7ed3c6,0xbff610a5593a18bc,1 +np.float64,0x3fdcc0bb39b98178,0x3fddd1f8c9a46430,1 +np.float64,0xbfba39976a347330,0xbfba4563bb5369a4,1 +np.float64,0xbfebf9768ef7f2ed,0xbff10548ab725f74,1 +np.float64,0xbfec21c066f84381,0xbff12f2803ba052f,1 +np.float64,0xbfca216a6b3442d4,0xbfca50c5e1e5748e,1 +np.float64,0x3fd5e40da4abc81c,0x3fd65783f9a22946,1 +np.float64,0x3fc235ca17246b98,0x3fc245a8f453173f,1 +np.float64,0x3fecb5b867796b70,0x3ff1d046a0bfda69,1 +np.float64,0x3fcb457fef368b00,0x3fcb7b6daa8165a7,1 +np.float64,0xbfa5ed6f7c2bdae0,0xbfa5ef27244e2e42,1 +np.float64,0x3fecf618a1f9ec32,0x3ff21a86cc104542,1 +np.float64,0x3fe9d95413f3b2a8,0x3fee178dcafa11fc,1 +np.float64,0xbfe93a5357f274a7,0xbfed0f9a565da84a,1 +np.float64,0xbfeb9e45ff773c8c,0xbff0a93cab8e258d,1 +np.float64,0x3fcbd9d0bd37b3a0,0x3fcc134e87cae241,1 +np.float64,0x3fe55d4db76aba9c,0x3fe764a0e028475a,1 +np.float64,0xbfc8a6fc71314df8,0xbfc8ceaafbfc59a7,1 +np.float64,0x3fe0615fa660c2c0,0x3fe1323611c4cbc2,1 +np.float64,0x3fb965558632cab0,0x3fb9700b84de20ab,1 +np.float64,0x8000000000000000,0x8000000000000000,1 +np.float64,0x3fe76776c6eeceee,0x3fea40403e24a9f1,1 +np.float64,0x3fe3b7f672676fec,0x3fe53ece71a1a1b1,1 +np.float64,0xbfa9b82ba4337050,0xbfa9baf15394ca64,1 +np.float64,0xbfe31faf49663f5e,0xbfe47f31b1ca73dc,1 +np.float64,0xbfcc4c6beb3898d8,0xbfcc88c5f814b2c1,1 +np.float64,0x3fd481530aa902a8,0x3fd4df8df03bc155,1 +np.float64,0x3fd47593b8a8eb28,0x3fd4d327ab78a1a8,1 +np.float64,0x3fd70e6ccbae1cd8,0x3fd7962fe8b63d46,1 +np.float64,0x3fd25191f7a4a324,0x3fd2941623c88e02,1 +np.float64,0x3fd0603ef0a0c07c,0x3fd08f64e97588dc,1 +np.float64,0xbfc653bae52ca774,0xbfc6711e5e0d8ea9,1 +np.float64,0xbfd11db8fea23b72,0xbfd153b63c6e8812,1 +np.float64,0xbfea9bde25f537bc,0xbfef6b52268e139a,1 +np.float64,0x1,0x1,1 +np.float64,0xbfefd3806d7fa701,0xbff776dcef9583ca,1 +np.float64,0xbfe0fb8cfde1f71a,0xbfe1e6e2e774a8f8,1 +np.float64,0x3fea384534f4708a,0x3feebadaa389be0d,1 +np.float64,0x3feff761c97feec4,0x3ff866157b9d072d,1 +np.float64,0x3fe7131ccb6e263a,0x3fe9c58b4389f505,1 +np.float64,0x3fe9084f7872109e,0x3fecbed0355dbc8f,1 +np.float64,0x3f708e89e0211d00,0x3f708e8cd4946b9e,1 +np.float64,0xbfe39185f067230c,0xbfe50e1cd178244d,1 +np.float64,0x3fd67cc1a9acf984,0x3fd6fa514784b48c,1 +np.float64,0xbfecaef005f95de0,0xbff1c89c9c3ef94a,1 +np.float64,0xbfe12eec81e25dd9,0xbfe223a4285bba9a,1 +np.float64,0x3fbe7f9faa3cff40,0x3fbe92363525068d,1 +np.float64,0xbfe1950b2b632a16,0xbfe29d45fc1e4ce9,1 +np.float64,0x3fe45049e6e8a094,0x3fe6020de759e383,1 +np.float64,0x3fe4d10c8969a21a,0x3fe6aa1fe42cbeb9,1 +np.float64,0xbfe9d04658f3a08d,0xbfee08370a0dbf0c,1 +np.float64,0x3fe14fb314e29f66,0x3fe24a8d73663521,1 +np.float64,0xbfef4abfe4fe9580,0xbff5c2c1ff1250ca,1 +np.float64,0xbfe6162b366c2c56,0xbfe86073ac3c6243,1 +np.float64,0x3feffe781e7ffcf0,0x3ff8d2cbedd6a1b5,1 +np.float64,0xbff0000000000000,0xbff921fb54442d18,1 +np.float64,0x3fc1dc45ad23b888,0x3fc1eb3d9bddda58,1 +np.float64,0xbfe793f6fcef27ee,0xbfea81c93d65aa64,1 +np.float64,0x3fdef6d2bbbdeda4,0x3fe029079d42efb5,1 +np.float64,0xbfdf0ac479be1588,0xbfe0346dbc95963f,1 +np.float64,0xbfd33927d7a67250,0xbfd38653f90a5b73,1 +np.float64,0xbfe248b072e49161,0xbfe37631ef6572e1,1 +np.float64,0xbfc8ceb6af319d6c,0xbfc8f7288657f471,1 +np.float64,0x3fdd7277fcbae4f0,0x3fde99886e6766ef,1 +np.float64,0xbfe0d30c6561a619,0xbfe1b72f90bf53d6,1 +np.float64,0xbfcb0fe07d361fc0,0xbfcb448e2eae9542,1 +np.float64,0xbfe351f57fe6a3eb,0xbfe4be13eef250f2,1 +np.float64,0x3fe85ec02cf0bd80,0x3febb407e2e52e4c,1 +np.float64,0x3fc8bc59b53178b0,0x3fc8e470f65800ec,1 +np.float64,0xbfd278d447a4f1a8,0xbfd2bd133c9c0620,1 +np.float64,0x3feda5cfd87b4ba0,0x3ff2f5ab4324f43f,1 +np.float64,0xbfd2b32a36a56654,0xbfd2fa09c36afd34,1 +np.float64,0xbfed4a81cb7a9504,0xbff28077a4f4fff4,1 +np.float64,0x3fdf079bf9be0f38,0x3fe0329f7fb13f54,1 +np.float64,0x3fd14097f6a28130,0x3fd177e9834ec23f,1 +np.float64,0xbfaeab11843d5620,0xbfaeafc5531eb6b5,1 +np.float64,0xbfac3f8c14387f20,0xbfac433893d53360,1 +np.float64,0xbfc139d7ed2273b0,0xbfc14743adbbe660,1 +np.float64,0x3fe78cb02cef1960,0x3fea7707f76edba9,1 +np.float64,0x3fefe16b41ffc2d6,0x3ff7bff36a7aa7b8,1 +np.float64,0x3fec5260d378a4c2,0x3ff162c588b0da38,1 +np.float64,0x3fedb146f17b628e,0x3ff304f90d3a15d1,1 +np.float64,0x3fd1fd45f7a3fa8c,0x3fd23c2dc3929e20,1 +np.float64,0x3fe0898a5ee11314,0x3fe1610c63e726eb,1 +np.float64,0x3fe7719946eee332,0x3fea4f205eecb59f,1 +np.float64,0x3fe955218972aa44,0x3fed3b530c1f7651,1 +np.float64,0x3fe0ccbf4461997e,0x3fe1afc7b4587836,1 +np.float64,0xbfe9204314f24086,0xbfece5605780e346,1 +np.float64,0xbfe552017feaa403,0xbfe755773cbd74d5,1 +np.float64,0x3fd8ce4b32b19c98,0x3fd9791c8dd44eae,1 +np.float64,0x3fef89acd9ff135a,0x3ff668f78adf7ced,1 +np.float64,0x3fc9d713ad33ae28,0x3fca04da6c293bbd,1 +np.float64,0xbfe22d9c4de45b38,0xbfe3553effadcf92,1 +np.float64,0x3fa5cda38c2b9b40,0x3fa5cf53c5787482,1 +np.float64,0x3fa878ebdc30f1e0,0x3fa87b4f2bf1d4c3,1 +np.float64,0x3fe8030353700606,0x3feb27e196928789,1 +np.float64,0x3fb50607222a0c10,0x3fb50c188ce391e6,1 +np.float64,0x3fd9ba4ab4b37494,0x3fda79fa8bd40f45,1 +np.float64,0x3fb564598e2ac8b0,0x3fb56abe42d1ba13,1 +np.float64,0xbfd1177c83a22efa,0xbfd14d3d7ef30cc4,1 +np.float64,0xbfd952cec7b2a59e,0xbfda09215d17c0ac,1 +np.float64,0x3fe1d8066663b00c,0x3fe2edb35770b8dd,1 +np.float64,0xbfc89427a3312850,0xbfc8bb7a7c389497,1 +np.float64,0xbfe86ebfd3f0dd80,0xbfebccc2ba0f506c,1 +np.float64,0x3fc390578b2720b0,0x3fc3a40cb7f5f728,1 +np.float64,0xbfd122f9b8a245f4,0xbfd15929dc57a897,1 +np.float64,0x3f8d0636d03a0c80,0x3f8d06767de576df,1 +np.float64,0xbfe4b55d8b696abb,0xbfe685be537a9637,1 +np.float64,0xbfdfd51cf9bfaa3a,0xbfe0a894fcff0c76,1 +np.float64,0xbfd37c1f52a6f83e,0xbfd3cc9593c37aad,1 +np.float64,0x3fd0e8283ea1d050,0x3fd11c25c800785a,1 +np.float64,0x3fd3160784a62c10,0x3fd36183a6c2880c,1 +np.float64,0x3fd4c66e57a98cdc,0x3fd5288fe3394eff,1 +np.float64,0x3fee2f7e3afc5efc,0x3ff3b8063eb30cdc,1 +np.float64,0xbfe526773a6a4cee,0xbfe71b4364215b18,1 +np.float64,0x3fea01181e740230,0x3fee5b65eccfd130,1 +np.float64,0xbfe51c03f76a3808,0xbfe70d5919d37587,1 +np.float64,0x3fd97e1375b2fc28,0x3fda3845da40b22b,1 +np.float64,0x3fd5c14a14ab8294,0x3fd632890d07ed03,1 +np.float64,0xbfec9b474279368e,0xbff1b28f50584fe3,1 +np.float64,0x3fe0139ca860273a,0x3fe0d7fc377f001c,1 +np.float64,0x3fdb080c9db61018,0x3fdbe85056358fa0,1 +np.float64,0xbfdd72ceb1bae59e,0xbfde99ea171661eb,1 +np.float64,0xbfe64e934fec9d26,0xbfe8aec2ef24be63,1 +np.float64,0x3fd1036a93a206d4,0x3fd1386adabe01bd,1 +np.float64,0x3febc9d4a5f793aa,0x3ff0d4c069f1e67d,1 +np.float64,0xbfe547a16fea8f43,0xbfe747902fe6fb4d,1 +np.float64,0x3fc289b0f9251360,0x3fc29a709de6bdd9,1 +np.float64,0xbfe694494a6d2892,0xbfe9108f3dc133e2,1 +np.float64,0x3fd827dfe4b04fc0,0x3fd8c4fe40532b91,1 +np.float64,0xbfe8b89418f17128,0xbfec400c5a334b2e,1 +np.float64,0x3fed5605147aac0a,0x3ff28ed1f612814a,1 +np.float64,0xbfed36af31fa6d5e,0xbff26804e1f71af0,1 +np.float64,0x3fdbb01c02b76038,0x3fdca2381558bbf0,1 +np.float64,0x3fe2a951666552a2,0x3fe3ec88f780f9e6,1 +np.float64,0x3fe662defbecc5be,0x3fe8cb1dbfca98ab,1 +np.float64,0x3fd098b1b3a13164,0x3fd0c9d064e4eaf2,1 +np.float64,0x3fefa10edeff421e,0x3ff6b1c6187b18a8,1 +np.float64,0xbfec4feb7a789fd7,0xbff16021ef37a219,1 +np.float64,0x3fd8e415bbb1c82c,0x3fd990c1f8b786bd,1 +np.float64,0xbfead5a09275ab41,0xbfefd44fab5b4f6e,1 +np.float64,0xbfe8666c16f0ccd8,0xbfebbfe0c9f2a9ae,1 +np.float64,0x3fdc962132b92c44,0x3fdda2525a6f406c,1 +np.float64,0xbfe2037f03e406fe,0xbfe3222ec2a3449e,1 +np.float64,0xbfec82c27e790585,0xbff197626ea9df1e,1 +np.float64,0x3fd2b4e03ca569c0,0x3fd2fbd3c7fda23e,1 +np.float64,0xbfe9b0dee5f361be,0xbfedd34f6d3dfe8a,1 +np.float64,0x3feef45cd17de8ba,0x3ff508180687b591,1 +np.float64,0x3f82c39bf0258700,0x3f82c3ad24c3b3f1,1 +np.float64,0xbfca848cfd350918,0xbfcab612ce258546,1 +np.float64,0x3fd6442aaaac8854,0x3fd6bdea54016e48,1 +np.float64,0x3fe550799e6aa0f4,0x3fe75369c9ea5b1e,1 +np.float64,0xbfe0e9d5a361d3ac,0xbfe1d20011139d89,1 +np.float64,0x3fbfc9ff1e3f9400,0x3fbfdf0ea6885c80,1 +np.float64,0xbfa187e8b4230fd0,0xbfa188c95072092e,1 +np.float64,0x3fcd28c9533a5190,0x3fcd6ae879c21b47,1 +np.float64,0x3fc6227ec52c4500,0x3fc63f1fbb441d29,1 +np.float64,0x3fe9b7a2ed736f46,0x3feddeab49b2d176,1 +np.float64,0x3fd4aee93da95dd4,0x3fd50fb3b71e0339,1 +np.float64,0xbfe164dacf62c9b6,0xbfe263bb2f7dd5d9,1 +np.float64,0x3fec62e525f8c5ca,0x3ff17496416d9921,1 +np.float64,0x3fdd363ee0ba6c7c,0x3fde55c6a49a5f86,1 +np.float64,0x3fe65cbf75ecb97e,0x3fe8c28d31ff3ebd,1 +np.float64,0xbfe76d27ca6eda50,0xbfea4899e3661425,1 +np.float64,0xbfc305738d260ae8,0xbfc3178dcfc9d30f,1 +np.float64,0xbfd3aa2a54a75454,0xbfd3fcf1e1ce8328,1 +np.float64,0x3fd1609fc9a2c140,0x3fd1992efa539b9f,1 +np.float64,0xbfac1291bc382520,0xbfac162cc7334b4d,1 +np.float64,0xbfedb461ea7b68c4,0xbff309247850455d,1 +np.float64,0xbfe8d2adf8f1a55c,0xbfec6947be90ba92,1 +np.float64,0xbfd7128965ae2512,0xbfd79a9855bcfc5a,1 +np.float64,0x3fe8deb09471bd62,0x3fec7c56b3aee531,1 +np.float64,0xbfe5f4d329ebe9a6,0xbfe8327ea8189af8,1 +np.float64,0xbfd3b46ac9a768d6,0xbfd407b80b12ff17,1 +np.float64,0x3fec899d7cf9133a,0x3ff19ef26baca36f,1 +np.float64,0xbfec192fd5783260,0xbff126306e507fd0,1 +np.float64,0x3fe945bdaef28b7c,0x3fed222f787310bf,1 +np.float64,0xbfeff9635d7ff2c7,0xbff87d6773f318eb,1 +np.float64,0xbfd604b81cac0970,0xbfd67a4aa852559a,1 +np.float64,0x3fcd1cc9d53a3990,0x3fcd5e962e237c24,1 +np.float64,0xbfed77b0fffaef62,0xbff2b97a1c9b6483,1 +np.float64,0xbfc9c69325338d28,0xbfc9f401500402fb,1 +np.float64,0xbfdf97e246bf2fc4,0xbfe0855601ea9db3,1 +np.float64,0x3fc7e6304f2fcc60,0x3fc80a4e718504cd,1 +np.float64,0x3fec3b599e7876b4,0x3ff14a2d1b9c68e6,1 +np.float64,0xbfe98618e1f30c32,0xbfed8bfbb31c394a,1 +np.float64,0xbfe59b3c0feb3678,0xbfe7b832d6df81de,1 +np.float64,0xbfe54ce2fe6a99c6,0xbfe74e9a85be4116,1 +np.float64,0x3fc9db49cb33b690,0x3fca092737ef500a,1 +np.float64,0xbfb4a922ae295248,0xbfb4aee4e39078a9,1 +np.float64,0xbfd0e542e0a1ca86,0xbfd11925208d66af,1 +np.float64,0x3fd70543f2ae0a88,0x3fd78c5e9238a3ee,1 +np.float64,0x3fd67f7a7facfef4,0x3fd6fd3998df8545,1 +np.float64,0xbfe40b643d6816c8,0xbfe5a947e427f298,1 +np.float64,0xbfcd85f69b3b0bec,0xbfcdcaa24b75f1a3,1 +np.float64,0x3fec705fb4f8e0c0,0x3ff1833c82163ee2,1 +np.float64,0x3fb37650ea26eca0,0x3fb37b20c16fb717,1 +np.float64,0x3fe5ebfa55ebd7f4,0x3fe826578d716e70,1 +np.float64,0x3fe991dfe5f323c0,0x3fed9f8a4bf1f588,1 +np.float64,0xbfd658bd0aacb17a,0xbfd6d3dd06e54900,1 +np.float64,0xbfc24860252490c0,0xbfc258701a0b9290,1 +np.float64,0xbfefb8d763ff71af,0xbff705b6ea4a569d,1 +np.float64,0x3fb8fcb4ae31f970,0x3fb906e809e7899f,1 +np.float64,0x3fce6343cb3cc688,0x3fceae41d1629625,1 +np.float64,0xbfd43d5a11a87ab4,0xbfd497da25687e07,1 +np.float64,0xbfe9568851f2ad11,0xbfed3d9e5fe83a76,1 +np.float64,0x3fe1b66153e36cc2,0x3fe2c53c7e016271,1 +np.float64,0x3fef27452bfe4e8a,0x3ff571b3486ed416,1 +np.float64,0x3fca87c0a7350f80,0x3fcab958a7bb82d4,1 +np.float64,0xbfd8776a8fb0eed6,0xbfd91afaf2f50edf,1 +np.float64,0x3fe9522a76f2a454,0x3fed3679264e1525,1 +np.float64,0x3fea14ff2cf429fe,0x3fee7da6431cc316,1 +np.float64,0x3fe970618bf2e0c4,0x3fed68154d54dd97,1 +np.float64,0x3fd3410cfca68218,0x3fd38e9b21792240,1 +np.float64,0xbf6a8070c0350100,0xbf6a8073c7c34517,1 +np.float64,0xbfbe449de23c8938,0xbfbe56c8e5e4d98b,1 +np.float64,0x3fedbc92e27b7926,0x3ff314313216d8e6,1 +np.float64,0xbfe3be4706677c8e,0xbfe546d3ceb85aea,1 +np.float64,0x3fe30cd6d76619ae,0x3fe467b6f2664a8d,1 +np.float64,0x3fd7d69b21afad38,0x3fd86d54284d05ad,1 +np.float64,0xbfe501001fea0200,0xbfe6e978afcff4d9,1 +np.float64,0xbfe44ba3d8e89748,0xbfe5fc0a31cd1e3e,1 +np.float64,0x3fec52f7c078a5f0,0x3ff16367acb209b2,1 +np.float64,0xbfcb19efcb3633e0,0xbfcb4ed9235a7d47,1 +np.float64,0xbfab86796c370cf0,0xbfab89df7bf15710,1 +np.float64,0xbfb962feda32c600,0xbfb96db1e1679c98,1 +np.float64,0x3fe0dd14e861ba2a,0x3fe1c2fc72810567,1 +np.float64,0x3fe41bcc6de83798,0x3fe5be59b7f9003b,1 +np.float64,0x3fc82f4c4f305e98,0x3fc854bd9798939f,1 +np.float64,0xbfcd143a613a2874,0xbfcd55cbd1619d84,1 +np.float64,0xbfd52da61baa5b4c,0xbfd595d0b3543439,1 +np.float64,0xbfb71b4a8e2e3698,0xbfb7235a4ab8432f,1 +np.float64,0xbfec141a19782834,0xbff120e1e39fc856,1 +np.float64,0xbfdba9319db75264,0xbfdc9a8ca2578bb2,1 +np.float64,0xbfbce5d74639cbb0,0xbfbcf5a4878cfa51,1 +np.float64,0x3fde67f7b3bccff0,0x3fdfaf45a9f843ad,1 +np.float64,0xbfe12d87bc625b10,0xbfe221fd4476eb71,1 +np.float64,0x3fe35b8f6be6b71e,0x3fe4ca20f65179e1,1 +np.float64,0xbfdbada1d3b75b44,0xbfdc9f78b19f93d1,1 +np.float64,0xbfc60159c52c02b4,0xbfc61d79b879f598,1 +np.float64,0x3fd6b81c38ad7038,0x3fd739c27bfa16d8,1 +np.float64,0xbfd646a253ac8d44,0xbfd6c08c19612bbb,1 +np.float64,0xbfe6babef0ed757e,0xbfe94703d0bfa311,1 +np.float64,0xbfed5671f1faace4,0xbff28f5a3f3683d0,1 +np.float64,0x3fc01d1e85203a40,0x3fc02817ec0dfd38,1 +np.float64,0xbfe9188a61f23115,0xbfecd8eb5da84223,1 +np.float64,0x3fdca3bab9b94774,0x3fddb1868660c239,1 +np.float64,0xbfa255750c24aaf0,0xbfa25675f7b36343,1 +np.float64,0x3fb3602db626c060,0x3fb364ed2d5b2876,1 +np.float64,0xbfd30a14bda6142a,0xbfd354ff703b8862,1 +np.float64,0xbfe1cfe381639fc7,0xbfe2e3e720b968c8,1 +np.float64,0xbfd2af6a4fa55ed4,0xbfd2f61e190bcd1f,1 +np.float64,0xbfe93c50937278a1,0xbfed12d64bb10d73,1 +np.float64,0x3fddd8bc44bbb178,0x3fdf0ced7f9005cc,1 +np.float64,0x3fdb2bc73cb65790,0x3fdc0fc0e18e425e,1 +np.float64,0xbfd073f6aba0e7ee,0xbfd0a3cb5468a961,1 +np.float64,0x3fed4bad7b7a975a,0x3ff281ebeb75e414,1 +np.float64,0xbfdc75b50bb8eb6a,0xbfdd7e1a7631cb22,1 +np.float64,0x3fd458a90fa8b154,0x3fd4b4a5817248ce,1 +np.float64,0x3feead5db57d5abc,0x3ff484286fab55ff,1 +np.float64,0x3fb3894382271280,0x3fb38e217b4e7905,1 +np.float64,0xffefffffffffffff,0x7ff8000000000000,1 +np.float64,0xbfe428212ae85042,0xbfe5ce36f226bea8,1 +np.float64,0xbfc08b39f7211674,0xbfc0971b93ebc7ad,1 +np.float64,0xbfc2e7cf5525cfa0,0xbfc2f994eb72b623,1 +np.float64,0xbfdb0d85afb61b0c,0xbfdbee5a2de3c5db,1 +np.float64,0xfff0000000000000,0x7ff8000000000000,1 +np.float64,0xbfd0d36af7a1a6d6,0xbfd106a5f05ef6ff,1 +np.float64,0xbfc333d0912667a0,0xbfc3467162b7289a,1 +np.float64,0x3fcdababc53b5758,0x3fcdf16458c20fa8,1 +np.float64,0x3fd0821b38a10438,0x3fd0b26e3e0b9185,1 +np.float64,0x0,0x0,1 +np.float64,0x3feb7f70edf6fee2,0x3ff08ae81854bf20,1 +np.float64,0x3fe6e075716dc0ea,0x3fe97cc5254be6ff,1 +np.float64,0x3fea13b682f4276e,0x3fee7b6f18073b5b,1 diff --git a/python/numpy/_core/tests/data/umath-validation-set-arcsinh.csv b/python/numpy/_core/tests/data/umath-validation-set-arcsinh.csv new file mode 100644 index 000000000..9eedb1a13 --- /dev/null +++ b/python/numpy/_core/tests/data/umath-validation-set-arcsinh.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xbf24142a,0xbf1a85ef,2 +np.float32,0x3e71cf91,0x3e6f9e37,2 +np.float32,0xe52a7,0xe52a7,2 +np.float32,0x3ef1e074,0x3ee9add9,2 +np.float32,0x806160ac,0x806160ac,2 +np.float32,0x7e2d59a2,0x42af4798,2 +np.float32,0xbf32cac9,0xbf26bf96,2 +np.float32,0x3f081701,0x3f026142,2 +np.float32,0x3f23cc88,0x3f1a499c,2 +np.float32,0xbf090d94,0xbf033ad0,2 +np.float32,0x803af2fc,0x803af2fc,2 +np.float32,0x807eb17e,0x807eb17e,2 +np.float32,0x5c0d8e,0x5c0d8e,2 +np.float32,0x3f7b79d2,0x3f5e6b1d,2 +np.float32,0x806feeae,0x806feeae,2 +np.float32,0x3e4b423a,0x3e49f274,2 +np.float32,0x3f49e5ac,0x3f394a41,2 +np.float32,0x3f18cd4e,0x3f10ef35,2 +np.float32,0xbed75734,0xbed17322,2 +np.float32,0x7f591151,0x42b28085,2 +np.float32,0xfefe9da6,0xc2b16f51,2 +np.float32,0xfeac90fc,0xc2b0a82a,2 +np.float32,0x805c198e,0x805c198e,2 +np.float32,0x7f66d6df,0x42b2a004,2 +np.float32,0x505438,0x505438,2 +np.float32,0xbf39a209,0xbf2c5255,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0xc84cb,0xc84cb,2 +np.float32,0x7f07d6f5,0x42b19088,2 +np.float32,0x79d7e4,0x79d7e4,2 +np.float32,0xff32f6a0,0xc2b21db1,2 +np.float32,0x7c005c05,0x42a9222e,2 +np.float32,0x3ec449aa,0x3ebfc5ae,2 +np.float32,0x800ec323,0x800ec323,2 +np.float32,0xff1c904c,0xc2b1d93a,2 +np.float32,0x7f4eca52,0x42b267b0,2 +np.float32,0x3ee06540,0x3ed9c514,2 +np.float32,0x6aab4,0x6aab4,2 +np.float32,0x3e298d8c,0x3e28c99e,2 +np.float32,0xbf38d162,0xbf2ba94a,2 +np.float32,0x2d9083,0x2d9083,2 +np.float32,0x7eae5032,0x42b0ad52,2 +np.float32,0x3ead5b3c,0x3eaa3443,2 +np.float32,0x806fef66,0x806fef66,2 +np.float32,0x3f5b614e,0x3f46ca71,2 +np.float32,0xbf4c906a,0xbf3b60fc,2 +np.float32,0x8049453e,0x8049453e,2 +np.float32,0x3d305220,0x3d304432,2 +np.float32,0x2e1a89,0x2e1a89,2 +np.float32,0xbf4e74ec,0xbf3cdacf,2 +np.float32,0x807a827a,0x807a827a,2 +np.float32,0x80070745,0x80070745,2 +np.float32,0xbe1ba2fc,0xbe1b0b28,2 +np.float32,0xbe5131d0,0xbe4fc421,2 +np.float32,0x5bfd98,0x5bfd98,2 +np.float32,0xbd8e1a48,0xbd8dfd27,2 +np.float32,0x8006c160,0x8006c160,2 +np.float32,0x346490,0x346490,2 +np.float32,0xbdbdf060,0xbdbdaaf0,2 +np.float32,0x3ea9d0c4,0x3ea6d8c7,2 +np.float32,0xbf2aaa28,0xbf200916,2 +np.float32,0xbf160c26,0xbf0e9047,2 +np.float32,0x80081fd4,0x80081fd4,2 +np.float32,0x7db44283,0x42adf8b6,2 +np.float32,0xbf1983f8,0xbf118bf5,2 +np.float32,0x2c4a35,0x2c4a35,2 +np.float32,0x6165a7,0x6165a7,2 +np.float32,0xbe776b44,0xbe75129f,2 +np.float32,0xfe81841a,0xc2b0153b,2 +np.float32,0xbf7d1b2f,0xbf5f9461,2 +np.float32,0x80602d36,0x80602d36,2 +np.float32,0xfe8d5046,0xc2b041dd,2 +np.float32,0xfe5037bc,0xc2afa56d,2 +np.float32,0x4bbea6,0x4bbea6,2 +np.float32,0xfea039de,0xc2b0822d,2 +np.float32,0x7ea627a4,0x42b094c7,2 +np.float32,0x3f556198,0x3f423591,2 +np.float32,0xfedbae04,0xc2b123c1,2 +np.float32,0xbe30432c,0xbe2f6744,2 +np.float32,0x80202c77,0x80202c77,2 +np.float32,0xff335cc1,0xc2b21ed5,2 +np.float32,0x3e1e1ebe,0x3e1d7f95,2 +np.float32,0x8021c9c0,0x8021c9c0,2 +np.float32,0x7dc978,0x7dc978,2 +np.float32,0xff6cfabc,0xc2b2ad75,2 +np.float32,0x7f2bd542,0x42b208e0,2 +np.float32,0x53bf33,0x53bf33,2 +np.float32,0x804e04bb,0x804e04bb,2 +np.float32,0x3f30d2f9,0x3f2521ca,2 +np.float32,0x3dfde876,0x3dfd4316,2 +np.float32,0x46f8b1,0x46f8b1,2 +np.float32,0xbd5f9e20,0xbd5f81ba,2 +np.float32,0x807d6a22,0x807d6a22,2 +np.float32,0xff3881da,0xc2b22d50,2 +np.float32,0x1b1cb5,0x1b1cb5,2 +np.float32,0x3f75f2d0,0x3f5a7435,2 +np.float32,0xfee39c1a,0xc2b135e9,2 +np.float32,0x7f79f14a,0x42b2c8b9,2 +np.float32,0x8000e2d1,0x8000e2d1,2 +np.float32,0xab779,0xab779,2 +np.float32,0xbede6690,0xbed7f102,2 +np.float32,0x76e20d,0x76e20d,2 +np.float32,0x3ed714cb,0x3ed135e9,2 +np.float32,0xbeaa6f44,0xbea76f31,2 +np.float32,0x7f7dc8b1,0x42b2d089,2 +np.float32,0x108cb2,0x108cb2,2 +np.float32,0x7d37ba82,0x42ac9f94,2 +np.float32,0x3f31d068,0x3f25f221,2 +np.float32,0x8010a331,0x8010a331,2 +np.float32,0x3f2fdc7c,0x3f2456cd,2 +np.float32,0x7f7a9a67,0x42b2ca13,2 +np.float32,0x3f2acb31,0x3f202492,2 +np.float32,0x7f54fa94,0x42b276c9,2 +np.float32,0x3ebf8a70,0x3ebb553c,2 +np.float32,0x7f75b1a7,0x42b2bff2,2 +np.float32,0x7daebe07,0x42ade8cc,2 +np.float32,0xbd3a3ef0,0xbd3a2e86,2 +np.float32,0x8078ec9e,0x8078ec9e,2 +np.float32,0x3eda206a,0x3ed403ec,2 +np.float32,0x3f7248f2,0x3f57cd77,2 +np.float32,0x805d55ba,0x805d55ba,2 +np.float32,0xff30dc3e,0xc2b217a3,2 +np.float32,0xbe12b27c,0xbe123333,2 +np.float32,0xbf6ed9cf,0xbf554cd0,2 +np.float32,0xbed9eb5c,0xbed3d31c,2 +np.float32,0xbf1c9aea,0xbf14307b,2 +np.float32,0x3f540ac4,0x3f412de2,2 +np.float32,0x800333ac,0x800333ac,2 +np.float32,0x3f74cdb4,0x3f59a09a,2 +np.float32,0xbf41dc41,0xbf32ee6f,2 +np.float32,0xff2c7804,0xc2b20ac4,2 +np.float32,0x514493,0x514493,2 +np.float32,0xbddf1220,0xbddea1cf,2 +np.float32,0xfeaf74de,0xc2b0b0ab,2 +np.float32,0xfe5dfb30,0xc2afc633,2 +np.float32,0xbf4785c4,0xbf376bdb,2 +np.float32,0x80191cd3,0x80191cd3,2 +np.float32,0xfe44f708,0xc2af88fb,2 +np.float32,0x3d4cd8a0,0x3d4cc2ca,2 +np.float32,0x7f572eff,0x42b27c0f,2 +np.float32,0x8031bacb,0x8031bacb,2 +np.float32,0x7f2ea684,0x42b21133,2 +np.float32,0xbea1976a,0xbe9f05bb,2 +np.float32,0x3d677b41,0x3d675bc1,2 +np.float32,0x3f61bf24,0x3f4b9870,2 +np.float32,0x7ef55ddf,0x42b15c5f,2 +np.float32,0x3eabcb20,0x3ea8b91c,2 +np.float32,0xff73d9ec,0xc2b2bc18,2 +np.float32,0x77b9f5,0x77b9f5,2 +np.float32,0x4c6c6c,0x4c6c6c,2 +np.float32,0x7ed09c94,0x42b10949,2 +np.float32,0xdeeec,0xdeeec,2 +np.float32,0x7eac5858,0x42b0a782,2 +np.float32,0x7e190658,0x42af07bd,2 +np.float32,0xbe3c8980,0xbe3b7ce2,2 +np.float32,0x8059e86e,0x8059e86e,2 +np.float32,0xff201836,0xc2b1e4a5,2 +np.float32,0xbeac109c,0xbea8fafb,2 +np.float32,0x7edd1e2b,0x42b12718,2 +np.float32,0x639cd8,0x639cd8,2 +np.float32,0x3f5e4cae,0x3f490059,2 +np.float32,0x3d84c185,0x3d84a9c4,2 +np.float32,0xbe8c1130,0xbe8a605b,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0x3f1da5e4,0x3f151404,2 +np.float32,0x7f75a873,0x42b2bfdf,2 +np.float32,0xbd873540,0xbd871c28,2 +np.float32,0xbe8e5e10,0xbe8c9808,2 +np.float32,0x7f004bf2,0x42b17347,2 +np.float32,0x800000,0x800000,2 +np.float32,0xbf6d6b79,0xbf544095,2 +np.float32,0x7ed7b563,0x42b11a6a,2 +np.float32,0x80693745,0x80693745,2 +np.float32,0x3ee0f608,0x3eda49a8,2 +np.float32,0xfe1285a4,0xc2aef181,2 +np.float32,0x72d946,0x72d946,2 +np.float32,0x6a0dca,0x6a0dca,2 +np.float32,0x3f5c9df6,0x3f47ba99,2 +np.float32,0xff002af6,0xc2b172c4,2 +np.float32,0x3f4ac98f,0x3f39fd0a,2 +np.float32,0x8066acf7,0x8066acf7,2 +np.float32,0xbcaa4e60,0xbcaa4b3c,2 +np.float32,0x80162813,0x80162813,2 +np.float32,0xff34b318,0xc2b222a2,2 +np.float32,0x7f1ce33c,0x42b1da49,2 +np.float32,0x3f0e55ab,0x3f07ddb0,2 +np.float32,0x7c75d996,0x42aa6eec,2 +np.float32,0xbf221bc6,0xbf18dc89,2 +np.float32,0x3f5a1a4c,0x3f45d1d4,2 +np.float32,0x7f2451b8,0x42b1f1fb,2 +np.float32,0x3ec55ca0,0x3ec0c655,2 +np.float32,0x3f752dc2,0x3f59e600,2 +np.float32,0xbe33f638,0xbe330c4d,2 +np.float32,0x3e2a9148,0x3e29c9d8,2 +np.float32,0x3f3362a1,0x3f273c01,2 +np.float32,0x5f83b3,0x5f83b3,2 +np.float32,0x3e362488,0x3e353216,2 +np.float32,0x140bcf,0x140bcf,2 +np.float32,0x7e3e96df,0x42af7822,2 +np.float32,0xbebc7082,0xbeb86ce6,2 +np.float32,0xbe92a92e,0xbe90b9d2,2 +np.float32,0xff3d8afc,0xc2b23b19,2 +np.float32,0x804125e3,0x804125e3,2 +np.float32,0x3f3675d1,0x3f29bedb,2 +np.float32,0xff70bb09,0xc2b2b57f,2 +np.float32,0x3f29681c,0x3f1efcd2,2 +np.float32,0xbdc70380,0xbdc6b3a8,2 +np.float32,0x54e0dd,0x54e0dd,2 +np.float32,0x3d545de0,0x3d54458c,2 +np.float32,0x7f800000,0x7f800000,2 +np.float32,0x8014a4c2,0x8014a4c2,2 +np.float32,0xbe93f58a,0xbe91f938,2 +np.float32,0x17de33,0x17de33,2 +np.float32,0xfefb679a,0xc2b168d2,2 +np.float32,0xbf23423e,0xbf19d511,2 +np.float32,0x7e893fa1,0x42b032ec,2 +np.float32,0x3f44fe2d,0x3f356bda,2 +np.float32,0xbebb2e78,0xbeb73e8f,2 +np.float32,0x3f5632e0,0x3f42d633,2 +np.float32,0x3ddd8698,0x3ddd1896,2 +np.float32,0x80164ea7,0x80164ea7,2 +np.float32,0x80087b37,0x80087b37,2 +np.float32,0xbf06ab1e,0xbf011f95,2 +np.float32,0x3db95524,0x3db9149f,2 +np.float32,0x7aa1fbb3,0x42a570a1,2 +np.float32,0xbd84fc48,0xbd84e467,2 +np.float32,0x3d65c6f5,0x3d65a826,2 +np.float32,0xfe987800,0xc2b068c4,2 +np.float32,0x7ec59532,0x42b0ed7a,2 +np.float32,0x3ea0232c,0x3e9da29a,2 +np.float32,0x80292a08,0x80292a08,2 +np.float32,0x734cfe,0x734cfe,2 +np.float32,0x3f3b6d63,0x3f2dc596,2 +np.float32,0x3f27bcc1,0x3f1d97e6,2 +np.float32,0xfe1da554,0xc2af16f9,2 +np.float32,0x7c91f5,0x7c91f5,2 +np.float32,0xfe4e78cc,0xc2afa11e,2 +np.float32,0x7e4b4e08,0x42af9933,2 +np.float32,0xfe0949ec,0xc2aed02e,2 +np.float32,0x7e2f057f,0x42af4c81,2 +np.float32,0xbf200ae0,0xbf171ce1,2 +np.float32,0x3ebcc244,0x3eb8b99e,2 +np.float32,0xbf68f58d,0xbf50f7aa,2 +np.float32,0x4420b1,0x4420b1,2 +np.float32,0x3f5b61bf,0x3f46cac7,2 +np.float32,0x3fec78,0x3fec78,2 +np.float32,0x7f4183c8,0x42b245b7,2 +np.float32,0xbf10587c,0xbf099ee2,2 +np.float32,0x0,0x0,2 +np.float32,0x7ec84dc3,0x42b0f47a,2 +np.float32,0x3f5fbd7b,0x3f4a166d,2 +np.float32,0xbd884eb8,0xbd883502,2 +np.float32,0xfe3f10a4,0xc2af7969,2 +np.float32,0xff3f4920,0xc2b23fc9,2 +np.float32,0x8013900f,0x8013900f,2 +np.float32,0x8003529d,0x8003529d,2 +np.float32,0xbf032384,0xbefbfb3c,2 +np.float32,0xff418c7c,0xc2b245ce,2 +np.float32,0xbec0aad0,0xbebc633b,2 +np.float32,0xfdbff178,0xc2ae18de,2 +np.float32,0x68ab15,0x68ab15,2 +np.float32,0xbdfc4a88,0xbdfba848,2 +np.float32,0xbf5adec6,0xbf466747,2 +np.float32,0x807d5dcc,0x807d5dcc,2 +np.float32,0x61d144,0x61d144,2 +np.float32,0x807e3a03,0x807e3a03,2 +np.float32,0x1872f2,0x1872f2,2 +np.float32,0x7f2a272c,0x42b203d8,2 +np.float32,0xfe7f8314,0xc2b00e3a,2 +np.float32,0xbe42aeac,0xbe418737,2 +np.float32,0x8024b614,0x8024b614,2 +np.float32,0xbe41b6b8,0xbe40939a,2 +np.float32,0xa765c,0xa765c,2 +np.float32,0x7ea74f4b,0x42b09853,2 +np.float32,0x7f7ef631,0x42b2d2e7,2 +np.float32,0x7eaef5e6,0x42b0af38,2 +np.float32,0xff733d85,0xc2b2bacf,2 +np.float32,0x537ac0,0x537ac0,2 +np.float32,0xbeca4790,0xbec55b1d,2 +np.float32,0x80117314,0x80117314,2 +np.float32,0xfe958536,0xc2b05ec5,2 +np.float32,0x8066ecc2,0x8066ecc2,2 +np.float32,0xbf56baf3,0xbf433e82,2 +np.float32,0x1f7fd7,0x1f7fd7,2 +np.float32,0x3e942104,0x3e9222fc,2 +np.float32,0xfeaffe82,0xc2b0b23c,2 +np.float32,0xfe0e02b0,0xc2aee17e,2 +np.float32,0xbf800000,0xbf61a1b3,2 +np.float32,0x800b7e49,0x800b7e49,2 +np.float32,0x6c514f,0x6c514f,2 +np.float32,0xff800000,0xff800000,2 +np.float32,0x7f7d9a45,0x42b2d02b,2 +np.float32,0x800c9c69,0x800c9c69,2 +np.float32,0x274b14,0x274b14,2 +np.float32,0xbf4b22b0,0xbf3a42e2,2 +np.float32,0x63e5ae,0x63e5ae,2 +np.float32,0xbe18facc,0xbe186a90,2 +np.float32,0x7e137351,0x42aef4bd,2 +np.float32,0x80518ffd,0x80518ffd,2 +np.float32,0xbf0a8ffc,0xbf048f0d,2 +np.float32,0x841d,0x841d,2 +np.float32,0x7edfdc9e,0x42b12d69,2 +np.float32,0xfd1092b0,0xc2ac24de,2 +np.float32,0x7e2c9bdf,0x42af4566,2 +np.float32,0x7f7fffff,0x42b2d4fc,2 +np.float32,0x3f4954a6,0x3f38d853,2 +np.float32,0xbe83efd2,0xbe8284c3,2 +np.float32,0x800e8e02,0x800e8e02,2 +np.float32,0x78ad39,0x78ad39,2 +np.float32,0x7eb0f967,0x42b0b514,2 +np.float32,0xbe39aa94,0xbe38a9ee,2 +np.float32,0x80194e7b,0x80194e7b,2 +np.float32,0x3cf3a340,0x3cf39a0f,2 +np.float32,0x3ed3117a,0x3ecd8173,2 +np.float32,0x7f530b11,0x42b2721c,2 +np.float32,0xff756ba2,0xc2b2bf60,2 +np.float32,0x15ea25,0x15ea25,2 +np.float32,0x803cbb64,0x803cbb64,2 +np.float32,0x3f34722d,0x3f281a2c,2 +np.float32,0x3ddd88e0,0x3ddd1adb,2 +np.float32,0x3f54244c,0x3f41418b,2 +np.float32,0x3e0adb98,0x3e0a6f8b,2 +np.float32,0x80800000,0x80800000,2 +np.float32,0x58902b,0x58902b,2 +np.float32,0xfe3b50b8,0xc2af6f43,2 +np.float32,0xfe0846d0,0xc2aecc64,2 +np.float32,0xbe0299d0,0xbe023fd4,2 +np.float32,0x18dde6,0x18dde6,2 +np.float32,0x8039fe8b,0x8039fe8b,2 +np.float32,0x8015d179,0x8015d179,2 +np.float32,0x3f551322,0x3f41f947,2 +np.float32,0x2ab387,0x2ab387,2 +np.float32,0xbf7e311e,0xbf6059d0,2 +np.float32,0xbdba58a8,0xbdba1713,2 +np.float32,0xbf1d008a,0xbf148724,2 +np.float32,0xbf6b9c97,0xbf52ec98,2 +np.float32,0x802acf04,0x802acf04,2 +np.float32,0x1,0x1,2 +np.float32,0xbe9e16d6,0xbe9bade3,2 +np.float32,0xbf048a14,0xbefe78c7,2 +np.float32,0x7e432ad3,0x42af8449,2 +np.float32,0xbdcc7fe0,0xbdcc2944,2 +np.float32,0x6dfc27,0x6dfc27,2 +np.float32,0xfef6eed8,0xc2b15fa1,2 +np.float32,0xbeeff6e8,0xbee7f2e4,2 +np.float32,0x7e3a6ca8,0x42af6cd2,2 +np.float32,0xff2c82e8,0xc2b20ae4,2 +np.float32,0x3e9f8d74,0x3e9d13b0,2 +np.float32,0x7ea36191,0x42b08c29,2 +np.float32,0x7f734bed,0x42b2baed,2 +np.float32,0x7f2df96d,0x42b20f37,2 +np.float32,0x5036fd,0x5036fd,2 +np.float32,0x806eab38,0x806eab38,2 +np.float32,0xbe9db90e,0xbe9b5446,2 +np.float32,0xfeef6fac,0xc2b14fd9,2 +np.float32,0xc2bf7,0xc2bf7,2 +np.float32,0xff53ec3d,0xc2b2743d,2 +np.float32,0x7e837637,0x42b01cde,2 +np.float32,0xbefb5934,0xbef23662,2 +np.float32,0x3f6cec80,0x3f53e371,2 +np.float32,0x3e86e7de,0x3e85643f,2 +np.float32,0x3f09cb42,0x3f03e1ef,2 +np.float32,0xbec3d236,0xbebf5620,2 +np.float32,0xfedef246,0xc2b12b50,2 +np.float32,0xbf08d6a8,0xbf030a62,2 +np.float32,0x8036cbf9,0x8036cbf9,2 +np.float32,0x3f74d3e3,0x3f59a512,2 +np.float32,0x6a600c,0x6a600c,2 +np.float32,0xfd1295b0,0xc2ac2bf1,2 +np.float32,0xbeb61142,0xbeb26efa,2 +np.float32,0x80216556,0x80216556,2 +np.float32,0xbf1fa0f6,0xbf16c30a,2 +np.float32,0x3e0af8e1,0x3e0a8c90,2 +np.float32,0x80434709,0x80434709,2 +np.float32,0x49efd9,0x49efd9,2 +np.float32,0x7f7cce6c,0x42b2ce8f,2 +np.float32,0x6e5450,0x6e5450,2 +np.float32,0x7f0fc115,0x42b1ad86,2 +np.float32,0x632db0,0x632db0,2 +np.float32,0x3f6f4c2a,0x3f55a064,2 +np.float32,0x7ec4f273,0x42b0ebd3,2 +np.float32,0x61ae1e,0x61ae1e,2 +np.float32,0x5f47c4,0x5f47c4,2 +np.float32,0xbf3c8f62,0xbf2eaf54,2 +np.float32,0xfca38900,0xc2ab0113,2 +np.float32,0x3ec89d52,0x3ec3ce78,2 +np.float32,0xbe0e3f70,0xbe0dcb53,2 +np.float32,0x805d3156,0x805d3156,2 +np.float32,0x3eee33f8,0x3ee65a4e,2 +np.float32,0xbeda7e9a,0xbed45a90,2 +np.float32,0x7e2fac7b,0x42af4e69,2 +np.float32,0x7efd0e28,0x42b16c2c,2 +np.float32,0x3f0c7b17,0x3f063e46,2 +np.float32,0xbf395bec,0xbf2c198f,2 +np.float32,0xfdf1c3f8,0xc2ae8f05,2 +np.float32,0xbe11f4e4,0xbe117783,2 +np.float32,0x7eddc901,0x42b128a3,2 +np.float32,0x3f4bad09,0x3f3aaf33,2 +np.float32,0xfefb5d76,0xc2b168bd,2 +np.float32,0x3ed3a4cf,0x3ece09a3,2 +np.float32,0x7ec582e4,0x42b0ed4a,2 +np.float32,0x3dc2268a,0x3dc1dc64,2 +np.float32,0x3ef9b17c,0x3ef0b9c9,2 +np.float32,0x2748ac,0x2748ac,2 +np.float32,0xfed6a602,0xc2b117e4,2 +np.float32,0xbefc9c36,0xbef35832,2 +np.float32,0x7e0476,0x7e0476,2 +np.float32,0x804be1a0,0x804be1a0,2 +np.float32,0xbefbc1c2,0xbef2943a,2 +np.float32,0xbd4698f0,0xbd46850a,2 +np.float32,0x688627,0x688627,2 +np.float32,0x3f7f7685,0x3f61406f,2 +np.float32,0x827fb,0x827fb,2 +np.float32,0x3f503264,0x3f3e34fd,2 +np.float32,0x7f5458d1,0x42b27543,2 +np.float32,0x800ac01f,0x800ac01f,2 +np.float32,0x6188dd,0x6188dd,2 +np.float32,0x806ac0ba,0x806ac0ba,2 +np.float32,0xbe14493c,0xbe13c5cc,2 +np.float32,0x3f77542c,0x3f5b72ae,2 +np.float32,0xfeaacab6,0xc2b0a2df,2 +np.float32,0x7f2893d5,0x42b1ff15,2 +np.float32,0x66b528,0x66b528,2 +np.float32,0xbf653e24,0xbf4e3573,2 +np.float32,0x801a2853,0x801a2853,2 +np.float32,0x3f3d8c98,0x3f2f7b04,2 +np.float32,0xfdffbad8,0xc2aeabc5,2 +np.float32,0x3dd50f,0x3dd50f,2 +np.float32,0x3f325a4c,0x3f266353,2 +np.float32,0xfcc48ec0,0xc2ab5f3f,2 +np.float32,0x3e6f5b9a,0x3e6d3ae5,2 +np.float32,0x3dbcd62b,0x3dbc91ee,2 +np.float32,0xbf7458d9,0xbf594c1c,2 +np.float32,0xff5adb24,0xc2b284b9,2 +np.float32,0x807b246d,0x807b246d,2 +np.float32,0x3f800000,0x3f61a1b3,2 +np.float32,0x231a28,0x231a28,2 +np.float32,0xbdc66258,0xbdc61341,2 +np.float32,0x3c84b4b4,0x3c84b338,2 +np.float32,0xbf215894,0xbf183783,2 +np.float32,0xff4ee298,0xc2b267ec,2 +np.float32,0x801ef52e,0x801ef52e,2 +np.float32,0x1040b0,0x1040b0,2 +np.float32,0xff545582,0xc2b2753b,2 +np.float32,0x3f3b9dda,0x3f2decaf,2 +np.float32,0x730f99,0x730f99,2 +np.float32,0xff7fffff,0xc2b2d4fc,2 +np.float32,0xff24cc5e,0xc2b1f379,2 +np.float32,0xbe9b456a,0xbe98fc0b,2 +np.float32,0x188fb,0x188fb,2 +np.float32,0x3f5c7ce2,0x3f47a18a,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0x806ea4da,0x806ea4da,2 +np.float32,0xfe810570,0xc2b01345,2 +np.float32,0x8036af89,0x8036af89,2 +np.float32,0x8043cec6,0x8043cec6,2 +np.float32,0x80342bb3,0x80342bb3,2 +np.float32,0x1a2bd4,0x1a2bd4,2 +np.float32,0x3f6248c2,0x3f4bff9a,2 +np.float32,0x8024eb35,0x8024eb35,2 +np.float32,0x7ea55872,0x42b09247,2 +np.float32,0x806d6e56,0x806d6e56,2 +np.float32,0x25c21a,0x25c21a,2 +np.float32,0x3f4e95f3,0x3f3cf483,2 +np.float32,0x15ca38,0x15ca38,2 +np.float32,0x803f01b2,0x803f01b2,2 +np.float32,0xbe731634,0xbe70dc10,2 +np.float32,0x3e80cee4,0x3e7ef933,2 +np.float32,0x3ef6dda5,0x3eee2e7b,2 +np.float32,0x3f3dfdc2,0x3f2fd5ed,2 +np.float32,0xff0492a7,0xc2b18411,2 +np.float32,0xbf1d0adf,0xbf148ff3,2 +np.float32,0xfcf75460,0xc2abd4e3,2 +np.float32,0x3f46fca6,0x3f36ffa6,2 +np.float32,0xbe63b5c0,0xbe61dfb3,2 +np.float32,0xff019bec,0xc2b1787d,2 +np.float32,0x801f14a9,0x801f14a9,2 +np.float32,0x3f176cfa,0x3f0fc051,2 +np.float32,0x3f69d976,0x3f51a015,2 +np.float32,0x3f4917cb,0x3f38a87a,2 +np.float32,0x3b2a0bea,0x3b2a0bdd,2 +np.float32,0xbf41d857,0xbf32eb50,2 +np.float32,0xbf08841a,0xbf02c18f,2 +np.float32,0x7ec86f14,0x42b0f4d0,2 +np.float32,0xbf7d15d1,0xbf5f9090,2 +np.float32,0xbd080550,0xbd07feea,2 +np.float32,0xbf6f1bef,0xbf557d26,2 +np.float32,0xfebc282c,0xc2b0d473,2 +np.float32,0x3e68d2f5,0x3e66dd03,2 +np.float32,0x3f3ed8fe,0x3f3085d5,2 +np.float32,0xff2f78ae,0xc2b2139a,2 +np.float32,0xff647a70,0xc2b29ac1,2 +np.float32,0xfd0859a0,0xc2ac06e2,2 +np.float32,0x3ea578a8,0x3ea2b7e1,2 +np.float32,0x6c58c6,0x6c58c6,2 +np.float32,0xff23f26a,0xc2b1f0d2,2 +np.float32,0x800902a4,0x800902a4,2 +np.float32,0xfe8ba64e,0xc2b03bcd,2 +np.float32,0x3f091143,0x3f033e0f,2 +np.float32,0x8017c4bd,0x8017c4bd,2 +np.float32,0xbf708fd4,0xbf568c8c,2 +np.float32,0x3be1d8,0x3be1d8,2 +np.float32,0x80091f07,0x80091f07,2 +np.float32,0x68eabe,0x68eabe,2 +np.float32,0xfe9ab2c8,0xc2b07033,2 +np.float32,0x3eabe752,0x3ea8d3d7,2 +np.float32,0xbf7adcb2,0xbf5dfaf5,2 +np.float32,0x801ecc01,0x801ecc01,2 +np.float32,0xbf5570a9,0xbf424123,2 +np.float32,0x3e89eecd,0x3e88510e,2 +np.float32,0xfeb2feee,0xc2b0bae4,2 +np.float32,0xbeb25ec2,0xbeaef22b,2 +np.float32,0x201e49,0x201e49,2 +np.float32,0x800a35f6,0x800a35f6,2 +np.float32,0xbf02d449,0xbefb6e2a,2 +np.float32,0x3f062bea,0x3f00aef6,2 +np.float32,0x7f5219ff,0x42b26fd2,2 +np.float32,0xbd4561d0,0xbd454e47,2 +np.float32,0x3f6c4789,0x3f536a4b,2 +np.float32,0x7f58b06d,0x42b27fa1,2 +np.float32,0x7f132f39,0x42b1b999,2 +np.float32,0x3e05dcb4,0x3e057bd8,2 +np.float32,0x7f526045,0x42b2707d,2 +np.float32,0x3f6117d0,0x3f4b1adb,2 +np.float32,0xbf21f47d,0xbf18bb57,2 +np.float32,0x1a26d6,0x1a26d6,2 +np.float32,0x46b114,0x46b114,2 +np.float32,0x3eb24518,0x3eaed9ef,2 +np.float32,0xfe2139c8,0xc2af2278,2 +np.float32,0xbf7c36fb,0xbf5ef1f6,2 +np.float32,0x3f193834,0x3f114af7,2 +np.float32,0xff3ea650,0xc2b23e14,2 +np.float32,0xfeeb3bca,0xc2b146c7,2 +np.float32,0x7e8b8ca0,0x42b03b6f,2 +np.float32,0x3eed903d,0x3ee5c5d2,2 +np.float32,0xbdc73740,0xbdc6e72a,2 +np.float32,0x7e500307,0x42afa4ec,2 +np.float32,0xe003c,0xe003c,2 +np.float32,0x3e612bb4,0x3e5f64fd,2 +np.float32,0xfd81e248,0xc2ad50e6,2 +np.float32,0x766a4f,0x766a4f,2 +np.float32,0x3e8708c9,0x3e858414,2 +np.float32,0xbf206c58,0xbf176f7f,2 +np.float32,0x7e93aeb0,0x42b0586f,2 +np.float32,0xfd9d36b8,0xc2adb2ad,2 +np.float32,0xff1f4e0e,0xc2b1e21d,2 +np.float32,0x3f22bd5a,0x3f1964f8,2 +np.float32,0x7f6a517a,0x42b2a7ad,2 +np.float32,0xff6ca773,0xc2b2acc1,2 +np.float32,0x7f6bf453,0x42b2ab3d,2 +np.float32,0x3edfdd64,0x3ed9489f,2 +np.float32,0xbeafc5ba,0xbeac7daa,2 +np.float32,0x7d862039,0x42ad615b,2 +np.float32,0xbe9d2002,0xbe9ac1fc,2 +np.float32,0xbdcc54c0,0xbdcbfe5b,2 +np.float32,0xbf1bc0aa,0xbf13762a,2 +np.float32,0xbf4679ce,0xbf36984b,2 +np.float32,0x3ef45696,0x3eebe713,2 +np.float32,0xff6eb999,0xc2b2b137,2 +np.float32,0xbe4b2e4c,0xbe49dee8,2 +np.float32,0x3f498951,0x3f3901b7,2 +np.float32,0xbe9692f4,0xbe947be1,2 +np.float32,0xbf44ce26,0xbf3545c8,2 +np.float32,0x805787a8,0x805787a8,2 +np.float32,0xbf342650,0xbf27dc26,2 +np.float32,0x3edafbf0,0x3ed4cdd2,2 +np.float32,0x3f6fb858,0x3f55ef63,2 +np.float32,0xff227d0a,0xc2b1ec3f,2 +np.float32,0xfeb9a202,0xc2b0cd89,2 +np.float32,0x7f5b12c1,0x42b2853b,2 +np.float32,0x584578,0x584578,2 +np.float32,0x7ec0b76f,0x42b0e0b5,2 +np.float32,0x3f57f54b,0x3f442f10,2 +np.float32,0x7eef3620,0x42b14f5d,2 +np.float32,0x4525b5,0x4525b5,2 +np.float32,0x801bd407,0x801bd407,2 +np.float32,0xbed1f166,0xbecc7703,2 +np.float32,0x3f57e732,0x3f442449,2 +np.float32,0x80767cd5,0x80767cd5,2 +np.float32,0xbef1a7d2,0xbee97aa3,2 +np.float32,0x3dd5b1af,0x3dd54ee6,2 +np.float32,0x960c,0x960c,2 +np.float32,0x7c392d41,0x42a9ddd1,2 +np.float32,0x3f5c9a34,0x3f47b7c1,2 +np.float32,0x3f5cecee,0x3f47f667,2 +np.float32,0xbee482ce,0xbedd8899,2 +np.float32,0x8066ba7e,0x8066ba7e,2 +np.float32,0x7ed76127,0x42b119a2,2 +np.float32,0x805ca40b,0x805ca40b,2 +np.float32,0x7f5ed5d1,0x42b28df3,2 +np.float32,0xfe9e1b1e,0xc2b07b5b,2 +np.float32,0x3f0201a2,0x3ef9f6c4,2 +np.float32,0xbf2e6430,0xbf232039,2 +np.float32,0x80326b4d,0x80326b4d,2 +np.float32,0x3f11dc7c,0x3f0af06e,2 +np.float32,0xbe89c42e,0xbe8827e6,2 +np.float32,0x3f3c69f8,0x3f2e9133,2 +np.float32,0x806326a9,0x806326a9,2 +np.float32,0x3f1c5286,0x3f13f2b6,2 +np.float32,0xff5c0ead,0xc2b28786,2 +np.float32,0xff32b952,0xc2b21d01,2 +np.float32,0x7dd27c4e,0x42ae4815,2 +np.float32,0xbf7a6816,0xbf5da7a2,2 +np.float32,0xfeac72f8,0xc2b0a7d1,2 +np.float32,0x335ad7,0x335ad7,2 +np.float32,0xbe682da4,0xbe663bcc,2 +np.float32,0x3f2df244,0x3f22c208,2 +np.float32,0x80686e8e,0x80686e8e,2 +np.float32,0x7f50120f,0x42b26ad9,2 +np.float32,0x3dbc596a,0x3dbc15b3,2 +np.float32,0xbf4f2868,0xbf3d666d,2 +np.float32,0x80000001,0x80000001,2 +np.float32,0xff66c059,0xc2b29fd2,2 +np.float32,0xfe8bbcaa,0xc2b03c1f,2 +np.float32,0x3ece6a51,0x3ec93271,2 +np.float32,0x7f06cd26,0x42b18c9a,2 +np.float32,0x7e41e6dc,0x42af80f5,2 +np.float32,0x7d878334,0x42ad669f,2 +np.float32,0xfe8c5c4c,0xc2b03e67,2 +np.float32,0x337a05,0x337a05,2 +np.float32,0x3e63801d,0x3e61ab58,2 +np.float32,0x62c315,0x62c315,2 +np.float32,0x802aa888,0x802aa888,2 +np.float32,0x80038b43,0x80038b43,2 +np.float32,0xff5c1271,0xc2b2878f,2 +np.float32,0xff4184a5,0xc2b245b9,2 +np.float32,0x7ef58f4b,0x42b15cc6,2 +np.float32,0x7f42d8ac,0x42b2493a,2 +np.float32,0x806609f2,0x806609f2,2 +np.float32,0x801e763b,0x801e763b,2 +np.float32,0x7f2bc073,0x42b208a2,2 +np.float32,0x801d7d7f,0x801d7d7f,2 +np.float32,0x7d415dc1,0x42acb9c2,2 +np.float32,0xbf624ff9,0xbf4c0502,2 +np.float32,0xbf603afd,0xbf4a74e2,2 +np.float32,0x8007fe42,0x8007fe42,2 +np.float32,0x800456db,0x800456db,2 +np.float32,0x620871,0x620871,2 +np.float32,0x3e9c6c1e,0x3e9a15fa,2 +np.float32,0x4245d,0x4245d,2 +np.float32,0x8035bde9,0x8035bde9,2 +np.float32,0xbf597418,0xbf45533c,2 +np.float32,0x3c730f80,0x3c730d38,2 +np.float32,0x3f7cd8ed,0x3f5f6540,2 +np.float32,0x807e49c3,0x807e49c3,2 +np.float32,0x3d6584c0,0x3d65660c,2 +np.float32,0xff42a744,0xc2b248b8,2 +np.float32,0xfedc6f56,0xc2b12583,2 +np.float32,0x806263a4,0x806263a4,2 +np.float32,0x175a17,0x175a17,2 +np.float32,0x3f1e8537,0x3f15d208,2 +np.float32,0x4055b5,0x4055b5,2 +np.float32,0x438aa6,0x438aa6,2 +np.float32,0x8038507f,0x8038507f,2 +np.float32,0xbed75348,0xbed16f85,2 +np.float32,0x7f07b7d6,0x42b19012,2 +np.float32,0xfe8b9d30,0xc2b03bac,2 +np.float32,0x805c501c,0x805c501c,2 +np.float32,0x3ef22b1d,0x3ee9f159,2 +np.float32,0x802b6759,0x802b6759,2 +np.float32,0x45281a,0x45281a,2 +np.float32,0xbf7e9970,0xbf60a3cf,2 +np.float32,0xbf14d152,0xbf0d8062,2 +np.float32,0x3d9ff950,0x3d9fcfc8,2 +np.float32,0x7865d9,0x7865d9,2 +np.float32,0xbee67fa4,0xbedf58eb,2 +np.float32,0x7dc822d1,0x42ae2e44,2 +np.float32,0x3f3af0fe,0x3f2d612c,2 +np.float32,0xbefea106,0xbef5274e,2 +np.float32,0xbf758a3f,0xbf5a28c5,2 +np.float32,0xbf331bdd,0xbf270209,2 +np.float32,0x7f51c901,0x42b26f0d,2 +np.float32,0x3f67c33b,0x3f5014d8,2 +np.float32,0xbbc9d980,0xbbc9d92c,2 +np.float32,0xbc407540,0xbc40741e,2 +np.float32,0x7eed9a3c,0x42b14be9,2 +np.float32,0x1be0fe,0x1be0fe,2 +np.float32,0xbf6b4913,0xbf52af1f,2 +np.float32,0xbda8eba8,0xbda8bac6,2 +np.float32,0x8004bcea,0x8004bcea,2 +np.float32,0xff6f6afe,0xc2b2b2b3,2 +np.float32,0xbf205810,0xbf175e50,2 +np.float32,0x80651944,0x80651944,2 +np.float32,0xbec73016,0xbec27a3f,2 +np.float32,0x5701b9,0x5701b9,2 +np.float32,0xbf1062ce,0xbf09a7df,2 +np.float32,0x3e0306ae,0x3e02abd1,2 +np.float32,0x7bfc62,0x7bfc62,2 +np.float32,0xbf48dd3c,0xbf387a6b,2 +np.float32,0x8009573e,0x8009573e,2 +np.float32,0x660a2c,0x660a2c,2 +np.float32,0xff2280da,0xc2b1ec4b,2 +np.float32,0xbf7034fe,0xbf564a54,2 +np.float32,0xbeeb448e,0xbee3b045,2 +np.float32,0xff4e949c,0xc2b2672b,2 +np.float32,0xbf3c4486,0xbf2e7309,2 +np.float32,0x7eb086d8,0x42b0b3c8,2 +np.float32,0x7eac8aca,0x42b0a817,2 +np.float32,0xfd3d2d60,0xc2acae8b,2 +np.float32,0xbf363226,0xbf2987bd,2 +np.float32,0x7f02e524,0x42b17d8c,2 +np.float32,0x8049a148,0x8049a148,2 +np.float32,0x147202,0x147202,2 +np.float32,0x8031d3f6,0x8031d3f6,2 +np.float32,0xfe78bf68,0xc2b0007d,2 +np.float32,0x7ebd16d0,0x42b0d6fb,2 +np.float32,0xbdaed2e8,0xbdae9cbb,2 +np.float32,0x802833ae,0x802833ae,2 +np.float32,0x7f62adf6,0x42b296b5,2 +np.float32,0xff2841c0,0xc2b1fe1b,2 +np.float32,0xbeb2c47e,0xbeaf523b,2 +np.float32,0x7e42a36e,0x42af82e6,2 +np.float32,0x41ea29,0x41ea29,2 +np.float32,0xbcaaa800,0xbcaaa4d7,2 +np.float64,0x3fed71f27ebae3e5,0x3fea5c6095012ca6,1 +np.float64,0x224dc392449b9,0x224dc392449b9,1 +np.float64,0x3fdf897a7d3f12f5,0x3fde620339360992,1 +np.float64,0xbfe1f99a5123f334,0xbfe124a57cfaf556,1 +np.float64,0xbfd9725c3bb2e4b8,0xbfd8d1e3f75110c7,1 +np.float64,0x3fe38977546712ee,0x3fe27d9d37f4b91f,1 +np.float64,0xbfc36c29e526d854,0xbfc3594743ee45c4,1 +np.float64,0xbfe5cbec332b97d8,0xbfe4638802316849,1 +np.float64,0x2ff35efe5fe6d,0x2ff35efe5fe6d,1 +np.float64,0x7fd3f828e227f051,0x40862a7d4a40b1e0,1 +np.float64,0xffd06fc11620df82,0xc08628ee8f1bf6c8,1 +np.float64,0x3fe5321bf4aa6438,0x3fe3e3d9fa453199,1 +np.float64,0xffd07a323ca0f464,0xc08628f3a2930f8c,1 +np.float64,0x3fdf7abe7abef57c,0x3fde54cb193d49cb,1 +np.float64,0x40941f1881285,0x40941f1881285,1 +np.float64,0xffef18defc7e31bd,0xc0863393f2c9f061,1 +np.float64,0xbfe379f871e6f3f1,0xbfe270620cb68347,1 +np.float64,0xffec829848f90530,0xc08632e210edaa2b,1 +np.float64,0x80070c00574e1801,0x80070c00574e1801,1 +np.float64,0xffce7654b23ceca8,0xc086285291e89975,1 +np.float64,0x7fc9932daa33265a,0x408626ec6cc2b807,1 +np.float64,0x355ee98c6abde,0x355ee98c6abde,1 +np.float64,0x3fac54962c38a920,0x3fac50e40b6c19f2,1 +np.float64,0x800857984af0af31,0x800857984af0af31,1 +np.float64,0x7fea6a3d55f4d47a,0x40863245bf39f179,1 +np.float64,0x3fdb8fab33371f56,0x3fdac5ffc9e1c347,1 +np.float64,0x800a887a7bf510f5,0x800a887a7bf510f5,1 +np.float64,0xbfbdbda3c63b7b48,0xbfbdac9dd5a2d3e8,1 +np.float64,0xbfd4a2457b29448a,0xbfd44acb3b316d6d,1 +np.float64,0x7fd5329a502a6534,0x40862af789b528b5,1 +np.float64,0x3fd96a7bceb2d4f8,0x3fd8ca92104d6cd6,1 +np.float64,0x3fde6a0cd6bcd41a,0x3fdd5f4b85abf749,1 +np.float64,0xbfc7faaff32ff560,0xbfc7d7560b8c4a52,1 +np.float64,0x7fec381b2f787035,0x408632cd0e9c095c,1 +np.float64,0x1fc2eb543f85e,0x1fc2eb543f85e,1 +np.float64,0x7ac6000af58c1,0x7ac6000af58c1,1 +np.float64,0xffe060a87920c150,0xc0862e72c37d5a4e,1 +np.float64,0xbfb7d8c89e2fb190,0xbfb7cffd3c3f8e3a,1 +np.float64,0x3fd91033deb22068,0x3fd87695b067aa1e,1 +np.float64,0x3fec1aff01b835fe,0x3fe95d5cbd729af7,1 +np.float64,0x7fb97f69ec32fed3,0x4086215aaae5c697,1 +np.float64,0x7feaf1e4e5f5e3c9,0x4086326e6ca6a2bb,1 +np.float64,0x800537e44d0a6fc9,0x800537e44d0a6fc9,1 +np.float64,0x800b2a0d0d36541a,0x800b2a0d0d36541a,1 +np.float64,0x3fe2193846e43270,0x3fe140308550138e,1 +np.float64,0x5e2a0a32bc542,0x5e2a0a32bc542,1 +np.float64,0xffe5888b09eb1116,0xc08630a348783aa3,1 +np.float64,0xbfceb9b5033d736c,0xbfce701049c10435,1 +np.float64,0x7fe5d68589abad0a,0x408630c00ce63f23,1 +np.float64,0x8009b5457ff36a8b,0x8009b5457ff36a8b,1 +np.float64,0xbfb5518c2e2aa318,0xbfb54b42638ca718,1 +np.float64,0x3f9c58469838b080,0x3f9c575974fbcd7b,1 +np.float64,0x3fe8db4b4731b697,0x3fe6dc9231587966,1 +np.float64,0x8007d0f77f4fa1f0,0x8007d0f77f4fa1f0,1 +np.float64,0x7fe79eef542f3dde,0x40863160c673c67f,1 +np.float64,0xffbdc0b6163b8170,0xc0862296be4bf032,1 +np.float64,0x3fbb8d3312371a66,0x3fbb7fa76fb4cf8d,1 +np.float64,0xffd8a0eedbb141de,0xc0862c2ac6e512f0,1 +np.float64,0x7fee99d8d87d33b1,0x4086337301c4c8df,1 +np.float64,0xffe7479b552e8f36,0xc0863142fba0f0ec,1 +np.float64,0xffedf8ef4abbf1de,0xc08633488068fe69,1 +np.float64,0x895c4d9f12b8a,0x895c4d9f12b8a,1 +np.float64,0x29b4caf05369a,0x29b4caf05369a,1 +np.float64,0xbfefb90d657f721b,0xbfec01efa2425b35,1 +np.float64,0xde07c3bdbc0f9,0xde07c3bdbc0f9,1 +np.float64,0x7feae9fd02f5d3f9,0x4086326c1368ed5a,1 +np.float64,0x3feab792da756f26,0x3fe84f6e15338ed7,1 +np.float64,0xbfeff8ed72fff1db,0xbfec2f35da06daaf,1 +np.float64,0x8004b2c132896583,0x8004b2c132896583,1 +np.float64,0xbf9fcb00103f9600,0xbf9fc9b1751c569e,1 +np.float64,0x4182b72e83058,0x4182b72e83058,1 +np.float64,0x90820d812105,0x90820d812105,1 +np.float64,0xbfdec9a0ba3d9342,0xbfddb585df607ce1,1 +np.float64,0x7fdc0a69a03814d2,0x40862d347f201b63,1 +np.float64,0xbfef0708937e0e11,0xbfeb82d27f8ea97f,1 +np.float64,0xffda57e4ddb4afca,0xc0862cb49e2e0c4c,1 +np.float64,0xbfa30b9af4261730,0xbfa30a7b4a633060,1 +np.float64,0x7feb57fcc4b6aff9,0x4086328c83957a0b,1 +np.float64,0x7fe6759153eceb22,0x408630f980433963,1 +np.float64,0x7fdd3278c8ba64f1,0x40862d87445243e9,1 +np.float64,0xd3b8e6b9a771d,0xd3b8e6b9a771d,1 +np.float64,0x6267dc88c4cfc,0x6267dc88c4cfc,1 +np.float64,0x7fedd3cf00bba79d,0x4086333e91712ff5,1 +np.float64,0xffbe512ce03ca258,0xc08622bd39314cea,1 +np.float64,0xbfe71742ca6e2e86,0xbfe572ccbf2d010d,1 +np.float64,0x8002fb048c65f60a,0x8002fb048c65f60a,1 +np.float64,0x800d9d9ddf7b3b3c,0x800d9d9ddf7b3b3c,1 +np.float64,0xbfeaf6230df5ec46,0xbfe87f5d751ec3d5,1 +np.float64,0xbfe69973a42d32e8,0xbfe50c680f7002fe,1 +np.float64,0x3fe309cf87e613a0,0x3fe21048714ce1ac,1 +np.float64,0x800435d17a286ba4,0x800435d17a286ba4,1 +np.float64,0x7fefffffffffffff,0x408633ce8fb9f87e,1 +np.float64,0x3fe36ade1766d5bc,0x3fe26379fb285dde,1 +np.float64,0x3f98d8d94831b1c0,0x3f98d839885dc527,1 +np.float64,0xbfd08f7ae5211ef6,0xbfd0618ab5293e1e,1 +np.float64,0xbfcf630bd53ec618,0xbfcf14a0cd20704d,1 +np.float64,0xbfe58f0ca6eb1e1a,0xbfe4312225df8e28,1 +np.float64,0xffef4f6406be9ec7,0xc08633a1ed1d27e5,1 +np.float64,0x7fe10120b3e20240,0x40862ebfaf94e6e8,1 +np.float64,0xffe96c52fbb2d8a5,0xc08631f75d9a59a0,1 +np.float64,0xbfe448a333e89146,0xbfe31fee44c3ec43,1 +np.float64,0x80045ff4e788bfeb,0x80045ff4e788bfeb,1 +np.float64,0x7fefaa2f823f545e,0x408633b8fea29524,1 +np.float64,0xffea6b8bf234d717,0xc0863246248e5960,1 +np.float64,0xbfdb085d80b610bc,0xbfda498b15b43eec,1 +np.float64,0xbfd5e12da3abc25c,0xbfd57970e2b8aecc,1 +np.float64,0x3fcc84928a390925,0x3fcc497c417a89f3,1 +np.float64,0xbfdcb713bf396e28,0xbfdbd46c5e731fd9,1 +np.float64,0xffdf50c0453ea180,0xc0862e16b5562f25,1 +np.float64,0x800342c2f7268587,0x800342c2f7268587,1 +np.float64,0x7feb8b6d743716da,0x4086329b8248de2c,1 +np.float64,0x800a9b18b4953632,0x800a9b18b4953632,1 +np.float64,0xffedaf0d12fb5e19,0xc0863334af82de1a,1 +np.float64,0x800aebda4ab5d7b5,0x800aebda4ab5d7b5,1 +np.float64,0xbfa9f5848433eb10,0xbfa9f2ac7ac065d4,1 +np.float64,0x3fea375928f46eb2,0x3fe7ec9f10eeac7d,1 +np.float64,0x3fd6c213fead8428,0x3fd64dcc1eff5f1b,1 +np.float64,0xbfa0476f44208ee0,0xbfa046bb986007ac,1 +np.float64,0x6c8e18aed91c4,0x6c8e18aed91c4,1 +np.float64,0x8000000000000001,0x8000000000000001,1 +np.float64,0x7fea86b5ba350d6a,0x4086324e59f13027,1 +np.float64,0x2316c3b0462d9,0x2316c3b0462d9,1 +np.float64,0x3fec4e3281389c65,0x3fe983c5c9d65940,1 +np.float64,0x3fbb87c47f772,0x3fbb87c47f772,1 +np.float64,0x8004af00fdc95e03,0x8004af00fdc95e03,1 +np.float64,0xbfd316db9ba62db8,0xbfd2d12765b9d155,1 +np.float64,0x3fec1a7a99f834f6,0x3fe95cf941889b3d,1 +np.float64,0x3feff7e1477fefc3,0x3fec2e782392d4b9,1 +np.float64,0xbfc683ea042d07d4,0xbfc66698cfa5026e,1 +np.float64,0x3fdbc8aaa9b79154,0x3fdafa50e6fc3fff,1 +np.float64,0xfb3b630ff676d,0xfb3b630ff676d,1 +np.float64,0x7fe715ef8eae2bde,0x40863131d794b41f,1 +np.float64,0x7fefa06c11bf40d7,0x408633b686c7996a,1 +np.float64,0x80002a40f5205483,0x80002a40f5205483,1 +np.float64,0x7fe95f3c74b2be78,0x408631f33e37bf76,1 +np.float64,0x3fb2977b32252ef0,0x3fb2934eaf5a4be8,1 +np.float64,0x3fc0f3dbc821e7b8,0x3fc0e745288c84c3,1 +np.float64,0x3fda98da56b531b5,0x3fd9e2b19447dacc,1 +np.float64,0x3f95b9d5202b73aa,0x3f95b96a53282949,1 +np.float64,0x3fdc1ace7738359d,0x3fdb4597d31df7ff,1 +np.float64,0xffeac5bb2e358b76,0xc0863261452ab66c,1 +np.float64,0xbfefb1b78f7f636f,0xbfebfcb9be100ced,1 +np.float64,0xf5c9e191eb93c,0xf5c9e191eb93c,1 +np.float64,0x3fe83a977630752f,0x3fe65d0df90ff6ef,1 +np.float64,0x3fc317515d262ea0,0x3fc3056072b719f0,1 +np.float64,0x7fe2dcfab225b9f4,0x40862f94257c28a2,1 +np.float64,0xca2b115794562,0xca2b115794562,1 +np.float64,0x3fd495301aa92a60,0x3fd43e57108761d5,1 +np.float64,0x800ccc4293199885,0x800ccc4293199885,1 +np.float64,0xc8d3173d91a63,0xc8d3173d91a63,1 +np.float64,0xbf2541bb7e4a8,0xbf2541bb7e4a8,1 +np.float64,0xbfe9a330df334662,0xbfe779816573f5be,1 +np.float64,0xffd5e4c8252bc990,0xc0862b39b3ca5d72,1 +np.float64,0x3fe90f3a53721e75,0x3fe70585ae09531d,1 +np.float64,0xbfe2b5ddc7a56bbc,0xbfe1c7fa91a675ed,1 +np.float64,0xbf981a0360303400,0xbf9819719345073a,1 +np.float64,0x19174b0e322ea,0x19174b0e322ea,1 +np.float64,0xbfd2f71a1725ee34,0xbfd2b2b6f7cd10b1,1 +np.float64,0x80056e83236add07,0x80056e83236add07,1 +np.float64,0x7fe4bc41d9697883,0x40863055f20ce0cb,1 +np.float64,0xffe76e06c46edc0d,0xc086315024b25559,1 +np.float64,0x3fe3c4f0f96789e2,0x3fe2b04b584609bf,1 +np.float64,0x3fe6cfc533ed9f8a,0x3fe538b4d784d5ee,1 +np.float64,0x7fd234a640a4694c,0x408629bfead4f0b2,1 +np.float64,0x3fdbc49c9ab78939,0x3fdaf698a83d08e2,1 +np.float64,0x3fe4c5336ee98a66,0x3fe388c6ddb60e0a,1 +np.float64,0xf4b9497be9729,0xf4b9497be9729,1 +np.float64,0x3fb312be12262580,0x3fb30e3c847c1d16,1 +np.float64,0x3fe9554218f2aa84,0x3fe73c8b311c7a98,1 +np.float64,0xff899816a0333040,0xc08610bfb2cd8559,1 +np.float64,0x8006008ad52c0116,0x8006008ad52c0116,1 +np.float64,0x3fd7d47be4afa8f8,0x3fd74fa71ec17fd0,1 +np.float64,0x8010000000000000,0x8010000000000000,1 +np.float64,0xdf2a9943be553,0xdf2a9943be553,1 +np.float64,0xbfeb86bf1eb70d7e,0xbfe8ed797580ba5c,1 +np.float64,0x800e2c0c28bc5818,0x800e2c0c28bc5818,1 +np.float64,0xbfe2be65d4657ccc,0xbfe1cf578dec2323,1 +np.float64,0xbfedea3a5afbd475,0xbfeab490bf05e585,1 +np.float64,0xbfe04b1583a0962b,0xbfdf523dfd7be25c,1 +np.float64,0x75929bb4eb254,0x75929bb4eb254,1 +np.float64,0x3fd7b4968caf692d,0x3fd731c0938ff97c,1 +np.float64,0x60bd8fd2c17b3,0x60bd8fd2c17b3,1 +np.float64,0xbfdaf15e70b5e2bc,0xbfda345a95ce18fe,1 +np.float64,0x7fdd7c35c2baf86b,0x40862d9b5f40c6b2,1 +np.float64,0x7feeb4d2ab7d69a4,0x4086337a0c0dffaf,1 +np.float64,0xffe65b5a1decb6b4,0xc08630f024420efb,1 +np.float64,0x7feb272b30764e55,0x4086327e2e553aa2,1 +np.float64,0x3fd27513e8a4ea28,0x3fd235ea49670f6a,1 +np.float64,0x3fe6541a6aeca834,0x3fe4d3a5b69fd1b6,1 +np.float64,0xbfe0c6ca0f618d94,0xbfe017058259efdb,1 +np.float64,0x7fc1bf07b7237e0e,0x4086240000fa5a52,1 +np.float64,0x7fe96af9c0f2d5f3,0x408631f6f0f4faa2,1 +np.float64,0x3fe0728be7a0e518,0x3fdf9881a5869de9,1 +np.float64,0xffe8ea4441b1d488,0xc08631ce0685ae7e,1 +np.float64,0xffd0b973f02172e8,0xc08629121e7fdf85,1 +np.float64,0xffe37b907a26f720,0xc0862fd6529401a0,1 +np.float64,0x3fe0ee826461dd05,0x3fe03a2a424a1b40,1 +np.float64,0xbfe8073c92300e79,0xbfe6340cbd179ac1,1 +np.float64,0x800768383f8ed071,0x800768383f8ed071,1 +np.float64,0x8002e467c7c5c8d0,0x8002e467c7c5c8d0,1 +np.float64,0xbfd8d53ea5b1aa7e,0xbfd83fa7243289d7,1 +np.float64,0xffebefce2bb7df9c,0xc08632b874f4f8dc,1 +np.float64,0xffe3be9eb9277d3d,0xc0862ff1ac70ad0b,1 +np.float64,0xffe2f8a82e65f150,0xc0862f9fd9e77d86,1 +np.float64,0xbfa01d151c203a30,0xbfa01c66dc13a70a,1 +np.float64,0x800877062d30ee0d,0x800877062d30ee0d,1 +np.float64,0xaade16a755bc3,0xaade16a755bc3,1 +np.float64,0xbfeb1abc70363579,0xbfe89b52c3b003aa,1 +np.float64,0x80097d0b2ad2fa17,0x80097d0b2ad2fa17,1 +np.float64,0x8001499907429333,0x8001499907429333,1 +np.float64,0x3fe8db2aaf71b656,0x3fe6dc7873f1b235,1 +np.float64,0x5cfeadc4b9fd6,0x5cfeadc4b9fd6,1 +np.float64,0xff3f77d1fe7ef,0xff3f77d1fe7ef,1 +np.float64,0xffeecd56f9bd9aad,0xc08633806cb1163d,1 +np.float64,0xbf96f3ca582de7a0,0xbf96f34c6b8e1c85,1 +np.float64,0x7ed6b44afdad7,0x7ed6b44afdad7,1 +np.float64,0x80071808da4e3012,0x80071808da4e3012,1 +np.float64,0x3feb8aee2bf715dc,0x3fe8f0a55516615c,1 +np.float64,0x800038f62e2071ed,0x800038f62e2071ed,1 +np.float64,0x3fb13f9af2227f30,0x3fb13c456ced8e08,1 +np.float64,0xffd584d1812b09a4,0xc0862b165558ec0c,1 +np.float64,0x800b20c30fb64186,0x800b20c30fb64186,1 +np.float64,0x80024f9646e49f2d,0x80024f9646e49f2d,1 +np.float64,0xffefffffffffffff,0xc08633ce8fb9f87e,1 +np.float64,0x3fdddbcb5bbbb797,0x3fdcde981111f650,1 +np.float64,0xffed14077f3a280e,0xc086330a795ad634,1 +np.float64,0x800fec2da7ffd85b,0x800fec2da7ffd85b,1 +np.float64,0x3fe8205ffc7040c0,0x3fe6482318d217f9,1 +np.float64,0x3013e5226027d,0x3013e5226027d,1 +np.float64,0xffe4e5aad469cb55,0xc0863065dc2fb4e3,1 +np.float64,0x5cb0f7b2b9620,0x5cb0f7b2b9620,1 +np.float64,0xbfeb4537d2768a70,0xbfe8bbb2c1d3bff9,1 +np.float64,0xbfd859e297b0b3c6,0xbfd7cc807948bf9d,1 +np.float64,0x71f00b8ce3e02,0x71f00b8ce3e02,1 +np.float64,0xf5c1b875eb837,0xf5c1b875eb837,1 +np.float64,0xa0f35c8141e8,0xa0f35c8141e8,1 +np.float64,0xffe24860b42490c1,0xc0862f54222f616e,1 +np.float64,0xffcd9ae8583b35d0,0xc08628181e643a42,1 +np.float64,0x7fe9b710c7736e21,0x4086320ec033490f,1 +np.float64,0x3fd2b9ca1d257394,0x3fd277e631f0c0b3,1 +np.float64,0x23559bfc46ab4,0x23559bfc46ab4,1 +np.float64,0x8002adf75e455bef,0x8002adf75e455bef,1 +np.float64,0xbfefa4d75cbf49af,0xbfebf392e51d6a1a,1 +np.float64,0xffcfef263e3fde4c,0xc08628b336adb611,1 +np.float64,0x80061acaa8ec3596,0x80061acaa8ec3596,1 +np.float64,0x7fc1b33be0236677,0x408623faaddcc17e,1 +np.float64,0x7fe3a84083675080,0x40862fe8972e41e1,1 +np.float64,0xbfe756c1276ead82,0xbfe5a6318b061e1b,1 +np.float64,0xbfae4b71b43c96e0,0xbfae46ed0b6203a4,1 +np.float64,0x800421c6d0a8438e,0x800421c6d0a8438e,1 +np.float64,0x8009ad56fe335aae,0x8009ad56fe335aae,1 +np.float64,0xbfe71afc976e35f9,0xbfe575d21f3d7193,1 +np.float64,0x7fec0bbe4c38177c,0x408632c0710f1d8a,1 +np.float64,0x750e1daeea1c4,0x750e1daeea1c4,1 +np.float64,0x800501d4240a03a9,0x800501d4240a03a9,1 +np.float64,0x800794955cef292b,0x800794955cef292b,1 +np.float64,0x3fdf8a87f5bf1510,0x3fde62f4f00cfa19,1 +np.float64,0xbfebebdbc7f7d7b8,0xbfe939e51ba1340c,1 +np.float64,0xbfe3a16217a742c4,0xbfe292039dd08a71,1 +np.float64,0x3fed6cd04c3ad9a1,0x3fea58995973f74b,1 +np.float64,0xffcad8787335b0f0,0xc086274fbb35dd37,1 +np.float64,0x3fcb178e3d362f1c,0x3fcae4c9f3e6dddc,1 +np.float64,0xbfcadc669435b8cc,0xbfcaaae7cf075420,1 +np.float64,0x7fe0e3906321c720,0x40862eb1bacc5c43,1 +np.float64,0xff8ad5edb035abc0,0xc0861120b6404d0b,1 +np.float64,0x3fe175a21562eb44,0x3fe0b13120a46549,1 +np.float64,0xbfeb4c4a5f769895,0xbfe8c1147f1c9d8f,1 +np.float64,0x7fca22f4e63445e9,0x40862718e9b4094e,1 +np.float64,0x3fe4269d0c684d3a,0x3fe3032aa2015c53,1 +np.float64,0x3fef551c09beaa38,0x3febbabe03f49c83,1 +np.float64,0xffd843df9fb087c0,0xc0862c0c52d5e5d9,1 +np.float64,0x7fc497e2ca292fc5,0x40862530bbd9fcc7,1 +np.float64,0x3fee02919efc0523,0x3feac655588a4acd,1 +np.float64,0x7fed1e52c0fa3ca5,0x4086330d4ddd8a2c,1 +np.float64,0xba04d4ef7409b,0xba04d4ef7409b,1 +np.float64,0x3fee22d0937c45a2,0x3feaddd4ca66b447,1 +np.float64,0xffeb2558cf764ab1,0xc086327da4e84053,1 +np.float64,0xbfe103d987e207b3,0xbfe04d04818ad1ff,1 +np.float64,0x3f9fd7fed03faffe,0x3f9fd6ae9a45be84,1 +np.float64,0x800a53ec4c34a7d9,0x800a53ec4c34a7d9,1 +np.float64,0xbfe2feb17f65fd63,0xbfe206b9d33a78a2,1 +np.float64,0x989bdd613139,0x989bdd613139,1 +np.float64,0xbfdd0ad3fb3a15a8,0xbfdc20c32a530741,1 +np.float64,0xbfc4222163284444,0xbfc40d1c612784b5,1 +np.float64,0xc30cf5c78619f,0xc30cf5c78619f,1 +np.float64,0x3fe913bd6732277b,0x3fe70912f76bad71,1 +np.float64,0x98f175f531e2f,0x98f175f531e2f,1 +np.float64,0x3fed8c1f717b183f,0x3fea6f9fb3af3423,1 +np.float64,0x7fee46b085bc8d60,0x4086335d269eb7e9,1 +np.float64,0x8007480f564e901f,0x8007480f564e901f,1 +np.float64,0xc9b96e179372e,0xc9b96e179372e,1 +np.float64,0x3fe44deac4289bd6,0x3fe32463a74a69e7,1 +np.float64,0x80021d6c5c243ad9,0x80021d6c5c243ad9,1 +np.float64,0xbfebc805a6f7900b,0xbfe91edcf65a1c19,1 +np.float64,0x80044748adc88e92,0x80044748adc88e92,1 +np.float64,0x4007ee44800fe,0x4007ee44800fe,1 +np.float64,0xbfe24307a4648610,0xbfe1648ad5c47b6f,1 +np.float64,0xbfee6d3a93fcda75,0xbfeb13e1a3196e78,1 +np.float64,0x3fe49a287f293451,0x3fe364a11b9f0068,1 +np.float64,0x80052b37ceaa5670,0x80052b37ceaa5670,1 +np.float64,0xbfd42be893a857d2,0xbfd3da05dac7c286,1 +np.float64,0xffb4bbe4ac2977c8,0xc0861fb31bda6956,1 +np.float64,0xbfc732a4142e6548,0xbfc7129a4eafa399,1 +np.float64,0x7fd0696791a0d2ce,0x408628eb7756cb9c,1 +np.float64,0x3fe46c8f8d68d91f,0x3fe33e3df16187c1,1 +np.float64,0x3fe3a28f1ce7451e,0x3fe293043238d08c,1 +np.float64,0xffedc4eb723b89d6,0xc086333a92258c15,1 +np.float64,0x8000d15b4c41a2b7,0x8000d15b4c41a2b7,1 +np.float64,0xffeb73450236e689,0xc08632947b0148ab,1 +np.float64,0xffe68cf4722d19e8,0xc0863101d08d77bd,1 +np.float64,0x800c70eb4698e1d7,0x800c70eb4698e1d7,1 +np.float64,0xffa94387ff529,0xffa94387ff529,1 +np.float64,0x7fe3835d996706ba,0x40862fd985ff8e7d,1 +np.float64,0x3fe55e476feabc8e,0x3fe408a15594ec52,1 +np.float64,0xffc69672222d2ce4,0xc08625ee0c4c0f6a,1 +np.float64,0xbf9d900b883b2020,0xbf9d8efe811d36df,1 +np.float64,0xbfdb9b9755b7372e,0xbfdad0f2aa2cb110,1 +np.float64,0xffeade6073b5bcc0,0xc08632689f17a25d,1 +np.float64,0xffd1d6a6baa3ad4e,0xc086299630a93a7b,1 +np.float64,0x7fd05ba25620b744,0x408628e4be1ef845,1 +np.float64,0xbfc7d422d52fa844,0xbfc7b170a61531bf,1 +np.float64,0x3fd5196797aa32d0,0x3fd4bc0f0e7d8e1d,1 +np.float64,0x617594a4c2eb3,0x617594a4c2eb3,1 +np.float64,0x7fd779bc4caef378,0x40862bc89271b882,1 +np.float64,0xffd2fb262ba5f64c,0xc0862a15561e9524,1 +np.float64,0x72fd661ae5fad,0x72fd661ae5fad,1 +np.float64,0x3fecf441f339e884,0x3fe9ff880d584f64,1 +np.float64,0x7fc3a8968827512c,0x408624d198b05c61,1 +np.float64,0x3fe7a25c56ef44b9,0x3fe5e32509a7c32d,1 +np.float64,0x7fd117d514222fa9,0x4086293ec640d5f2,1 +np.float64,0x3fe37dfe5ee6fbfc,0x3fe273d1bcaa1ef0,1 +np.float64,0xbfed4cd19d7a99a3,0xbfea41064cba4c8b,1 +np.float64,0x8003ff12aaa7fe26,0x8003ff12aaa7fe26,1 +np.float64,0x3fcbc3d1193787a2,0x3fcb8d39e3e88264,1 +np.float64,0xe9ba1a91d3744,0xe9ba1a91d3744,1 +np.float64,0x8002ab71998556e4,0x8002ab71998556e4,1 +np.float64,0x800110057922200c,0x800110057922200c,1 +np.float64,0xbfe3b7af19a76f5e,0xbfe2a502fc0a2882,1 +np.float64,0x7fd9de9d5e33bd3a,0x40862c8f73cccabf,1 +np.float64,0xbfba0f0a86341e18,0xbfba0392f44c2771,1 +np.float64,0x8000000000000000,0x8000000000000000,1 +np.float64,0x7fe5d162e96ba2c5,0x408630be2b15e01b,1 +np.float64,0x800b7f0eac76fe1e,0x800b7f0eac76fe1e,1 +np.float64,0xff98bed150317da0,0xc086160633164f5f,1 +np.float64,0x3fef91fd70ff23fb,0x3febe629709d0ae7,1 +np.float64,0x7fe5bea7f16b7d4f,0x408630b749f445e9,1 +np.float64,0xbfe3dc428467b885,0xbfe2c41ea93fab07,1 +np.float64,0xbfeba1fbfcf743f8,0xbfe9021b52851bb9,1 +np.float64,0x7fd2fb2108a5f641,0x40862a1553f45830,1 +np.float64,0x7feb8199a4370332,0x40863298a7169dad,1 +np.float64,0x800f97ff8d7f2fff,0x800f97ff8d7f2fff,1 +np.float64,0x3fd5e20b6b2bc417,0x3fd57a42bd1c0993,1 +np.float64,0x8006b4072dad680f,0x8006b4072dad680f,1 +np.float64,0x605dccf2c0bba,0x605dccf2c0bba,1 +np.float64,0x3fc705ed142e0bda,0x3fc6e69971d86f73,1 +np.float64,0xffd2ba1aad257436,0xc08629f9bc918f8b,1 +np.float64,0x8002954e23c52a9d,0x8002954e23c52a9d,1 +np.float64,0xbfecc65da7798cbb,0xbfe9dd745be18562,1 +np.float64,0x7fc66110482cc220,0x408625db0db57ef8,1 +np.float64,0x3fcd09446d3a1289,0x3fcccaf2dd0a41ea,1 +np.float64,0x3febe7095437ce13,0x3fe93642d1e73b2a,1 +np.float64,0x8004773c7da8ee7a,0x8004773c7da8ee7a,1 +np.float64,0x8001833241230665,0x8001833241230665,1 +np.float64,0x3fe6a262db6d44c6,0x3fe513b3dab5adce,1 +np.float64,0xe6282cc1cc506,0xe6282cc1cc506,1 +np.float64,0x800b9d8553973b0b,0x800b9d8553973b0b,1 +np.float64,0x3fdfbe0c7b3f7c19,0x3fde912375d867a8,1 +np.float64,0x7fd5ac11ebab5823,0x40862b24dfc6d08e,1 +np.float64,0x800e4b7cb1fc96f9,0x800e4b7cb1fc96f9,1 +np.float64,0x3fe14706da628e0e,0x3fe0883aec2a917a,1 +np.float64,0x7fc963f97532c7f2,0x408626dd9b0cafe1,1 +np.float64,0xbfe9c250b5b384a2,0xbfe791c5eabcb05d,1 +np.float64,0x3fe8d16e6c71a2dd,0x3fe6d4c7a33a0bf4,1 +np.float64,0x3fe474ae4628e95d,0x3fe34515c93f4733,1 +np.float64,0x3fbf3257ee3e64b0,0x3fbf1eb530e126ea,1 +np.float64,0x8005f089b3abe114,0x8005f089b3abe114,1 +np.float64,0x3fece07bccf9c0f8,0x3fe9f0dc228124d5,1 +np.float64,0xbfc52521632a4a44,0xbfc50ccebdf59c2c,1 +np.float64,0x7fdf53beb13ea77c,0x40862e177918195e,1 +np.float64,0x8003d9f6ad07b3ee,0x8003d9f6ad07b3ee,1 +np.float64,0xffeacf96bbb59f2d,0xc086326436b38b1a,1 +np.float64,0xdccaea29b995e,0xdccaea29b995e,1 +np.float64,0x5948d21eb291b,0x5948d21eb291b,1 +np.float64,0x10000000000000,0x10000000000000,1 +np.float64,0x7fef6d2c543eda58,0x408633a98593cdf5,1 +np.float64,0x7feda454f47b48a9,0x40863331cb6dc9f7,1 +np.float64,0x3fdd377cecba6ef8,0x3fdc4968f74a9c83,1 +np.float64,0x800644096d4c8814,0x800644096d4c8814,1 +np.float64,0xbfe33ca15ae67942,0xbfe23be5de832bd8,1 +np.float64,0xffce9582bd3d2b04,0xc086285abdf9bf9d,1 +np.float64,0x3fe6621e86acc43d,0x3fe4df231bfa93e1,1 +np.float64,0xee7d19e9dcfa3,0xee7d19e9dcfa3,1 +np.float64,0x800be5997277cb33,0x800be5997277cb33,1 +np.float64,0x82069041040e,0x82069041040e,1 +np.float64,0x800d6efdc19addfc,0x800d6efdc19addfc,1 +np.float64,0x7fb27770ee24eee1,0x40861ec5ed91b839,1 +np.float64,0x3fd506064caa0c0d,0x3fd4a9a66353fefd,1 +np.float64,0xbfeca9b36bf95367,0xbfe9c81f03ba37b8,1 +np.float64,0xffeab1b7bab5636f,0xc086325b47f61f2b,1 +np.float64,0xffc99f5b2e333eb8,0xc08626f03b08b412,1 +np.float64,0x3fbf1a71bc3e34e3,0x3fbf06fbcaa5de58,1 +np.float64,0x3fe75015736ea02b,0x3fe5a0cd8d763d8d,1 +np.float64,0xffe6a7442fad4e88,0xc086310b20addba4,1 +np.float64,0x3fe5d62ff86bac60,0x3fe46c033195bf28,1 +np.float64,0x7fd0b1f0362163df,0x4086290e857dc1be,1 +np.float64,0xbe0353737c06b,0xbe0353737c06b,1 +np.float64,0x7fec912d8739225a,0x408632e627704635,1 +np.float64,0xded8ba2fbdb18,0xded8ba2fbdb18,1 +np.float64,0x7fec0b53fdf816a7,0x408632c052bc1bd2,1 +np.float64,0x7fe9640d12b2c819,0x408631f4c2ba54d8,1 +np.float64,0x800be714eeb7ce2a,0x800be714eeb7ce2a,1 +np.float64,0xbfcf444a793e8894,0xbfcef6c126b54853,1 +np.float64,0xffeb20cf1bf6419e,0xc086327c4e6ffe80,1 +np.float64,0xc07de22180fd,0xc07de22180fd,1 +np.float64,0xffed129d387a253a,0xc086330a15ad0adb,1 +np.float64,0x3fd9e94fedb3d2a0,0x3fd94049924706a8,1 +np.float64,0x7fe6ba488c2d7490,0x40863111d51e7861,1 +np.float64,0xbfebbdf25db77be5,0xbfe91740ad7ba521,1 +np.float64,0x7fbc6c3c4838d878,0x40862239160cb613,1 +np.float64,0xbfefa82ecebf505e,0xbfebf5f31957dffd,1 +np.float64,0x800bebeb7ad7d7d7,0x800bebeb7ad7d7d7,1 +np.float64,0x7fecccc6f8f9998d,0x408632f6c6da8aac,1 +np.float64,0xcbe4926197ca,0xcbe4926197ca,1 +np.float64,0x2c5d9fd858bb5,0x2c5d9fd858bb5,1 +np.float64,0xbfe9fb021073f604,0xbfe7bddc61f1151a,1 +np.float64,0xbfebb18572f7630b,0xbfe90ddc5002313f,1 +np.float64,0x13bb0d3227763,0x13bb0d3227763,1 +np.float64,0x3feefa5e5cbdf4bd,0x3feb79b9e8ce16bf,1 +np.float64,0x3fc97f086132fe10,0x3fc9549fc8e15ecb,1 +np.float64,0xffe70887c06e110f,0xc086312d30fd31cf,1 +np.float64,0xa00c113540182,0xa00c113540182,1 +np.float64,0x800950984772a131,0x800950984772a131,1 +np.float64,0x1,0x1,1 +np.float64,0x3fd83b4026b07680,0x3fd7afdc659d9a34,1 +np.float64,0xbfe32348fbe64692,0xbfe226292a706a1a,1 +np.float64,0x800b894dcc77129c,0x800b894dcc77129c,1 +np.float64,0xeb2ca419d6595,0xeb2ca419d6595,1 +np.float64,0xbff0000000000000,0xbfec34366179d427,1 +np.float64,0x3feb269e99f64d3d,0x3fe8a4634b927a21,1 +np.float64,0xbfe83149d7706294,0xbfe655a2b245254e,1 +np.float64,0xbfe6eef3ca6ddde8,0xbfe5521310e24d16,1 +np.float64,0x3fea89a4b7b51349,0x3fe82c1fc69edcec,1 +np.float64,0x800f2a8bf17e5518,0x800f2a8bf17e5518,1 +np.float64,0x800f71fac29ee3f6,0x800f71fac29ee3f6,1 +np.float64,0xe7cb31f1cf966,0xe7cb31f1cf966,1 +np.float64,0x3b0f8752761f2,0x3b0f8752761f2,1 +np.float64,0x3fea27dea3744fbd,0x3fe7e0a4705476b2,1 +np.float64,0xbfa97c019c32f800,0xbfa97950c1257b92,1 +np.float64,0xffeff13647ffe26c,0xc08633cadc7105ed,1 +np.float64,0x3feee162353dc2c4,0x3feb67c2da0fbce8,1 +np.float64,0x80088c0807911810,0x80088c0807911810,1 +np.float64,0x3fe936ab1db26d56,0x3fe72489bc69719d,1 +np.float64,0xa2f84bd545f0a,0xa2f84bd545f0a,1 +np.float64,0xbfed445ed27a88be,0xbfea3acac0aaf482,1 +np.float64,0x800faf3e69df5e7d,0x800faf3e69df5e7d,1 +np.float64,0x3fc145a330228b46,0x3fc13853f11b1c90,1 +np.float64,0xbfe25ec5abe4bd8c,0xbfe17c9e9b486f07,1 +np.float64,0x3fe119b160e23363,0x3fe0604b10178966,1 +np.float64,0x7fe0cbf2836197e4,0x40862ea6831e5f4a,1 +np.float64,0x3fe75dd3b4eebba8,0x3fe5abe80fd628fb,1 +np.float64,0x3f7c391000387220,0x3f7c39015d8f3a36,1 +np.float64,0x899d9cad133b4,0x899d9cad133b4,1 +np.float64,0x3fe5f0e34febe1c6,0x3fe4820cefe138fc,1 +np.float64,0x7fe060dfdba0c1bf,0x40862e72de8afcd0,1 +np.float64,0xbfae42f7103c85f0,0xbfae3e7630819c60,1 +np.float64,0x35f1f2c06be5,0x35f1f2c06be5,1 +np.float64,0xffc5194d362a329c,0xc086256266c8b7ad,1 +np.float64,0xbfda034f1b34069e,0xbfd95860a44c43ad,1 +np.float64,0x32bcebca6579e,0x32bcebca6579e,1 +np.float64,0xbfd1751ebca2ea3e,0xbfd13f79f45bf75c,1 +np.float64,0x3fee4fa1e5bc9f44,0x3feafe69e0d6c1c7,1 +np.float64,0x7f9c03cd5038079a,0x4086170459172900,1 +np.float64,0x7fc5fb6d6d2bf6da,0x408625b6651cfc73,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0xffd1a8162ca3502c,0xc0862981333931ad,1 +np.float64,0x7fc415c198282b82,0x408624fd8c155d1b,1 +np.float64,0xffda37fbe7b46ff8,0xc0862caae7865c43,1 +np.float64,0xbfef4312257e8624,0xbfebadd89f3ee31c,1 +np.float64,0xbfec45e1fd788bc4,0xbfe97d8b14db6274,1 +np.float64,0xbfe6fdcfd26dfba0,0xbfe55e25b770d00a,1 +np.float64,0x7feb66d424f6cda7,0x40863290d9ff7ea2,1 +np.float64,0x8b08a29916115,0x8b08a29916115,1 +np.float64,0xffe12ca25c625944,0xc0862ed40d769f72,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0x804925e100925,0x804925e100925,1 +np.float64,0xcebf3e019d9,0xcebf3e019d9,1 +np.float64,0xbfd5d75d4aabaeba,0xbfd57027671dedf7,1 +np.float64,0x800b829ecd37053e,0x800b829ecd37053e,1 +np.float64,0x800b1205daf6240c,0x800b1205daf6240c,1 +np.float64,0x3fdf7e9889befd31,0x3fde583fdff406c3,1 +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0x3fdc09760d3812ec,0x3fdb35b55c8090c6,1 +np.float64,0x800c4d99e4f89b34,0x800c4d99e4f89b34,1 +np.float64,0xffbaa6772e354cf0,0xc08621b535badb2f,1 +np.float64,0xbfc91188fd322310,0xbfc8e933b5d25ea7,1 +np.float64,0xffc1b947f4237290,0xc08623fd69164251,1 +np.float64,0x3fc6ab3b252d5678,0x3fc68d50bbac106d,1 +np.float64,0xffac8eb968391d70,0xc0861cb734833355,1 +np.float64,0xffe29a35c365346b,0xc0862f77a1aed6d8,1 +np.float64,0x3fde14b9543c2973,0x3fdd122697779015,1 +np.float64,0xbf10f5400021e000,0xbf10f53fffef1383,1 +np.float64,0xffe0831aa3e10635,0xc0862e838553d0ca,1 +np.float64,0x3fccbadbcf3975b8,0x3fcc7e768d0154ec,1 +np.float64,0x3fe092ef66e125df,0x3fdfd212a7116c9b,1 +np.float64,0xbfd727f039ae4fe0,0xbfd6adad040b2334,1 +np.float64,0xbfe4223b93a84477,0xbfe2ff7587364db4,1 +np.float64,0x3f4e5c3a003cb874,0x3f4e5c39b75c70f7,1 +np.float64,0x800e76b1a87ced63,0x800e76b1a87ced63,1 +np.float64,0x3fed2b7368fa56e7,0x3fea2863b9131b8c,1 +np.float64,0xffadb76ec43b6ee0,0xc0861d08ae79f20c,1 +np.float64,0x800b6a0cd1f6d41a,0x800b6a0cd1f6d41a,1 +np.float64,0xffee6aa943fcd552,0xc0863366a24250d5,1 +np.float64,0xbfe68cbc4e6d1978,0xbfe502040591aa5b,1 +np.float64,0xff859a38002b3480,0xc0860f64726235cc,1 +np.float64,0x3474d13e68e9b,0x3474d13e68e9b,1 +np.float64,0xffc11d49f6223a94,0xc08623b5c2df9712,1 +np.float64,0x800d82d019bb05a0,0x800d82d019bb05a0,1 +np.float64,0xbfe2af0192255e03,0xbfe1c20e38106388,1 +np.float64,0x3fe97d13c032fa28,0x3fe75bba11a65f86,1 +np.float64,0x7fcd457e133a8afb,0x40862800e80f5863,1 +np.float64,0x9d7254cf3ae4b,0x9d7254cf3ae4b,1 +np.float64,0x8003047675a608ee,0x8003047675a608ee,1 +np.float64,0x3fead6cd7d75ad9a,0x3fe8676138e5ff93,1 +np.float64,0x3fea6ee3b0f4ddc7,0x3fe817838a2bcbe3,1 +np.float64,0x3feed0edea7da1dc,0x3feb5bea3cb12fe2,1 +np.float64,0x88003fe510008,0x88003fe510008,1 +np.float64,0x3fe64cadc56c995c,0x3fe4cd8ead87fc79,1 +np.float64,0xaae30c5955c62,0xaae30c5955c62,1 +np.float64,0x7fc8c97cae3192f8,0x408626ac579f4fc5,1 +np.float64,0xbfc2bc0e8b25781c,0xbfc2ab188fdab7dc,1 +np.float64,0xc8f8e5e791f1d,0xc8f8e5e791f1d,1 +np.float64,0x3fecfaa5d6f9f54c,0x3fea0444dabe5a15,1 +np.float64,0xbfeb93740ff726e8,0xbfe8f71a9ab13baf,1 +np.float64,0xffd951236c32a246,0xc0862c633a4661eb,1 +np.float64,0x3fddbc5fcd3b78c0,0x3fdcc21c1a0a9246,1 +np.float64,0xbfd242443da48488,0xbfd20512d91f7924,1 +np.float64,0x2a3689b2546d2,0x2a3689b2546d2,1 +np.float64,0xffe24c67382498ce,0xc0862f55e4ea6283,1 +np.float64,0x800cbfce22197f9c,0x800cbfce22197f9c,1 +np.float64,0x8002269428044d29,0x8002269428044d29,1 +np.float64,0x7fd44babbd289756,0x40862a9e79b51c3b,1 +np.float64,0x3feea056a27d40ad,0x3feb38dcddb682f0,1 +np.float64,0xffeca8174b39502e,0xc08632ec8f88a5b2,1 +np.float64,0x7fbe0853a03c10a6,0x408622a9e8d53a9e,1 +np.float64,0xbfa9704b2432e090,0xbfa96d9dfc8c0cc2,1 +np.float64,0x800bda28fab7b452,0x800bda28fab7b452,1 +np.float64,0xbfb0ffa2f621ff48,0xbfb0fc71f405e82a,1 +np.float64,0xbfe66c04216cd808,0xbfe4e73ea3b58cf6,1 +np.float64,0x3fe336ea5d266dd5,0x3fe236ffcf078c62,1 +np.float64,0xbfe7729ae6aee536,0xbfe5bcad4b8ac62d,1 +np.float64,0x558cfc96ab1a0,0x558cfc96ab1a0,1 +np.float64,0xbfe7d792aaefaf26,0xbfe60de1b8f0279d,1 +np.float64,0xffd19ef6bda33dee,0xc086297d0ffee3c7,1 +np.float64,0x666b3ab4ccd68,0x666b3ab4ccd68,1 +np.float64,0xffa3d89e3c27b140,0xc08619cdeb2c1e49,1 +np.float64,0xbfb1728f7f62f,0xbfb1728f7f62f,1 +np.float64,0x3fc76319f32ec634,0x3fc74247bd005e20,1 +np.float64,0xbfbf1caee23e3960,0xbfbf0934c13d70e2,1 +np.float64,0x7fe79626f32f2c4d,0x4086315dcc68a5cb,1 +np.float64,0xffee78c4603cf188,0xc086336a572c05c2,1 +np.float64,0x3fce546eda3ca8de,0x3fce0d8d737fd31d,1 +np.float64,0xa223644d4446d,0xa223644d4446d,1 +np.float64,0x3fecea878b79d510,0x3fe9f850d50973f6,1 +np.float64,0x3fc20e0ea1241c1d,0x3fc1fedda87c5e75,1 +np.float64,0xffd1c5a99ca38b54,0xc086298e8e94cd47,1 +np.float64,0x7feb2c299d765852,0x4086327fa6db2808,1 +np.float64,0xcaf9d09595f3a,0xcaf9d09595f3a,1 +np.float64,0xbfe293bf21e5277e,0xbfe1aa7f6ac274ef,1 +np.float64,0xbfbaa3c8ce354790,0xbfba97891df19c01,1 +np.float64,0x3faf5784543eaf09,0x3faf5283acc7d71d,1 +np.float64,0x7fc014f8f62029f1,0x40862336531c662d,1 +np.float64,0xbfe0d9ac2d61b358,0xbfe027bce36699ca,1 +np.float64,0x8003e112ff27c227,0x8003e112ff27c227,1 +np.float64,0xffec0d4151381a82,0xc08632c0df718dd0,1 +np.float64,0x7fa2156fb0242ade,0x4086190f7587d708,1 +np.float64,0xd698358dad307,0xd698358dad307,1 +np.float64,0xbfed8d1b0efb1a36,0xbfea70588ef9ba18,1 +np.float64,0xbfd2cae6a92595ce,0xbfd28851e2185dee,1 +np.float64,0xffe7a36764ef46ce,0xc086316249c9287a,1 +np.float64,0xbfdb8ad8e5b715b2,0xbfdac19213c14315,1 +np.float64,0x3b5dba6076bc,0x3b5dba6076bc,1 +np.float64,0x800e6e8347bcdd07,0x800e6e8347bcdd07,1 +np.float64,0x800bea9f3fb7d53f,0x800bea9f3fb7d53f,1 +np.float64,0x7fb6d0e5fc2da1cb,0x4086207714c4ab85,1 +np.float64,0x0,0x0,1 +np.float64,0xbfe2aa1e1465543c,0xbfe1bdd550ef2966,1 +np.float64,0x7fd3f6a47fa7ed48,0x40862a7caea33055,1 +np.float64,0x800094e292c129c6,0x800094e292c129c6,1 +np.float64,0x800e1500ecbc2a02,0x800e1500ecbc2a02,1 +np.float64,0xbfd8ff6f97b1fee0,0xbfd866f84346ecdc,1 +np.float64,0x681457d0d028c,0x681457d0d028c,1 +np.float64,0x3feed0b5987da16b,0x3feb5bc1ab424984,1 +np.float64,0x3fdbcb34cdb79668,0x3fdafca540f32c06,1 +np.float64,0xbfdc9eacdcb93d5a,0xbfdbbe274aa8aeb0,1 +np.float64,0xffe6e35d526dc6ba,0xc08631203df38ed2,1 +np.float64,0x3fcac1cc65358398,0x3fca90de41889613,1 +np.float64,0xbfebf07a55b7e0f5,0xbfe93d6007db0c67,1 +np.float64,0xbfd7a7b1e7af4f64,0xbfd725a9081c22cb,1 +np.float64,0x800232bd7de4657c,0x800232bd7de4657c,1 +np.float64,0x7fb1dae43c23b5c7,0x40861e80f5c0a64e,1 +np.float64,0x8013ded70027c,0x8013ded70027c,1 +np.float64,0x7fc4373a59286e74,0x4086250ad60575d0,1 +np.float64,0xbfe9980fd6733020,0xbfe770d1352d0ed3,1 +np.float64,0x8008a66b8dd14cd7,0x8008a66b8dd14cd7,1 +np.float64,0xbfaebc67f83d78d0,0xbfaeb7b015848478,1 +np.float64,0xffd0c52762218a4e,0xc0862917b564afc6,1 +np.float64,0xbfd503860aaa070c,0xbfd4a74618441561,1 +np.float64,0x5bdacabcb7b5a,0x5bdacabcb7b5a,1 +np.float64,0xf3623cffe6c48,0xf3623cffe6c48,1 +np.float64,0x7fe16c6c7ea2d8d8,0x40862ef18d90201f,1 +np.float64,0x3ff0000000000000,0x3fec34366179d427,1 +np.float64,0x7fe19cbc84233978,0x40862f079dcbc169,1 +np.float64,0x3fcfd3d6933fa7ad,0x3fcf822187907f6b,1 +np.float64,0x8007d65d672facbc,0x8007d65d672facbc,1 +np.float64,0xffca6115aa34c22c,0xc086272bd7728750,1 +np.float64,0xbfe77ab1556ef562,0xbfe5c332fb55b66e,1 +np.float64,0x8001ed797c23daf4,0x8001ed797c23daf4,1 +np.float64,0x7fdd3d16cb3a7a2d,0x40862d8a2c869281,1 +np.float64,0x75f36beaebe6e,0x75f36beaebe6e,1 +np.float64,0xffda3c2798b47850,0xc0862cac2d3435df,1 +np.float64,0xbfa37cc3c426f980,0xbfa37b8f9d3ec4b7,1 +np.float64,0x80030ea8bd061d52,0x80030ea8bd061d52,1 +np.float64,0xffe41f7617683eec,0xc08630188a3e135e,1 +np.float64,0x800e40590dfc80b2,0x800e40590dfc80b2,1 +np.float64,0x3fea950d80f52a1c,0x3fe834e74481e66f,1 +np.float64,0xffec95e39a792bc6,0xc08632e779150084,1 +np.float64,0xbfd54310ecaa8622,0xbfd4e39c4d767002,1 +np.float64,0xffd40c9971a81932,0xc0862a85764eb2f4,1 +np.float64,0xb0a2230761445,0xb0a2230761445,1 +np.float64,0x80092973661252e7,0x80092973661252e7,1 +np.float64,0x7fb13b030a227605,0x40861e380aeb5549,1 +np.float64,0x3fbd5d8db23abb1b,0x3fbd4d2a0b94af36,1 +np.float64,0xbfd6cb8567ad970a,0xbfd656b19ab8fa61,1 +np.float64,0xbfe7c0fd346f81fa,0xbfe5fbc28807c794,1 +np.float64,0xffd586579eab0cb0,0xc0862b16e65c0754,1 +np.float64,0x8000e52da461ca5c,0x8000e52da461ca5c,1 +np.float64,0x3fc69d17112d3a2e,0x3fc67f63fe1fea1c,1 +np.float64,0x3fd36ba892a6d750,0x3fd3225be1fa87af,1 +np.float64,0x7fe2850598e50a0a,0x40862f6e7fcd6c1a,1 +np.float64,0x80074a4dacce949c,0x80074a4dacce949c,1 +np.float64,0x3fe25eea4d64bdd5,0x3fe17cbe5fefbd4e,1 +np.float64,0xbfe250c08be4a181,0xbfe17074c520e5de,1 +np.float64,0x8000f5665481eacd,0x8000f5665481eacd,1 +np.float64,0x7fdb3172f83662e5,0x40862cf5a46764f1,1 +np.float64,0x7fd8ed82d631db05,0x40862c4380658afa,1 +np.float64,0xffec5163feb8a2c7,0xc08632d4366aab06,1 +np.float64,0x800ff14ac6ffe296,0x800ff14ac6ffe296,1 +np.float64,0xbfc7cc7aea2f98f4,0xbfc7a9e9cb38f023,1 +np.float64,0xbfd50cdfc32a19c0,0xbfd4b0282b452fb2,1 +np.float64,0xbfec256d75b84adb,0xbfe965328c1860b2,1 +np.float64,0xffe860c4cdb0c189,0xc08631a164b7059a,1 +np.float64,0xbfe23de164247bc3,0xbfe16011bffa4651,1 +np.float64,0xcc96b39d992d7,0xcc96b39d992d7,1 +np.float64,0xbfec43acf938875a,0xbfe97be3a13b50c3,1 +np.float64,0xc4f587bb89eb1,0xc4f587bb89eb1,1 +np.float64,0xbfcd971d9a3b2e3c,0xbfcd5537ad15dab4,1 +np.float64,0xffcaf00d8035e01c,0xc0862756bf2cdf8f,1 +np.float64,0x8008c26f93f184e0,0x8008c26f93f184e0,1 +np.float64,0xfff0000000000000,0xfff0000000000000,1 +np.float64,0xbfd13552c3a26aa6,0xbfd101e5e252eb7b,1 +np.float64,0x7fe497235e292e46,0x4086304792fb423a,1 +np.float64,0x7fd6dc0192adb802,0x40862b921a5e935d,1 +np.float64,0xf16d49a1e2da9,0xf16d49a1e2da9,1 +np.float64,0xffef6b1b71bed636,0xc08633a8feed0178,1 +np.float64,0x7fe15ec62f62bd8b,0x40862eeb46b193dc,1 +np.float64,0x3fef4369ec7e86d4,0x3febae1768be52cc,1 +np.float64,0x4f84e8e89f09e,0x4f84e8e89f09e,1 +np.float64,0xbfe19e71ade33ce4,0xbfe0d4fad05e0ebc,1 +np.float64,0xbfe7e1df1defc3be,0xbfe616233e15b3d0,1 +np.float64,0x7fe9349afdb26935,0x408631e5c1c5c6cd,1 +np.float64,0xff90c35ac82186c0,0xc08612e896a06467,1 +np.float64,0xbfe88bf8807117f1,0xbfe69dc786464422,1 +np.float64,0x3feaf9ff6475f3fe,0x3fe8825132410d18,1 +np.float64,0x9ff487a33fe91,0x9ff487a33fe91,1 +np.float64,0x7fedb30159bb6602,0x40863335c0419322,1 +np.float64,0x800bddf6ed77bbee,0x800bddf6ed77bbee,1 +np.float64,0x3fd919df133233be,0x3fd87f963b9584ce,1 +np.float64,0x7fd64da3b52c9b46,0x40862b5fa9dd3b6d,1 +np.float64,0xbfce288db43c511c,0xbfcde2d953407ae8,1 +np.float64,0x3fe88bc72771178e,0x3fe69da05e9e9b4e,1 +np.float64,0x800feafe259fd5fc,0x800feafe259fd5fc,1 +np.float64,0x3febbbff4a7777ff,0x3fe915c78f6a280f,1 +np.float64,0xbfefbde4417f7bc9,0xbfec055f4fb2cd21,1 +np.float64,0xf13ca103e2794,0xf13ca103e2794,1 +np.float64,0x3fe6423884ec8471,0x3fe4c4f97eaa876a,1 +np.float64,0x800ca01c8cb94039,0x800ca01c8cb94039,1 +np.float64,0x3fbc5073f638a0e0,0x3fbc41c163ac0001,1 +np.float64,0xbfda0d83cfb41b08,0xbfd961d4cacc82cf,1 +np.float64,0x800f37b8f17e6f72,0x800f37b8f17e6f72,1 +np.float64,0x7fe0b08cd7216119,0x40862e996becb771,1 +np.float64,0xffd4222a40a84454,0xc0862a8e0c984917,1 +np.float64,0x7feb3df98ff67bf2,0x40863284e3a86ee6,1 +np.float64,0x8001d5d291e3aba6,0x8001d5d291e3aba6,1 +np.float64,0xbfd3c21629a7842c,0xbfd3750095a5894a,1 +np.float64,0xbfd069eb48a0d3d6,0xbfd03d2b1c2ae9db,1 +np.float64,0xffeb1be2973637c4,0xc086327ada954662,1 +np.float64,0x3fc659f97e2cb3f3,0x3fc63d497a451f10,1 +np.float64,0xbfeb624bc776c498,0xbfe8d1cf7c0626ca,1 +np.float64,0xffeedf26e23dbe4d,0xc08633850baab425,1 +np.float64,0xffe70da48a6e1b48,0xc086312ef75d5036,1 +np.float64,0x2b4f4830569ea,0x2b4f4830569ea,1 +np.float64,0xffe82e7fcfb05cff,0xc0863190d4771f75,1 +np.float64,0x3fcc2c1fd5385840,0x3fcbf3211ddc5123,1 +np.float64,0x7fe22ced5a6459da,0x40862f481629ee6a,1 +np.float64,0x7fe13d2895e27a50,0x40862edbbc411899,1 +np.float64,0x3fd54c4280aa9884,0x3fd4ec55a946c5d7,1 +np.float64,0xffd75b8e01aeb71c,0xc0862bbe42d76e5e,1 +np.float64,0x7f1d5376fe3ab,0x7f1d5376fe3ab,1 +np.float64,0x3fe6ec6c902dd8d9,0x3fe55004f35192bd,1 +np.float64,0x5634504aac68b,0x5634504aac68b,1 +np.float64,0x3feedb0d83bdb61b,0x3feb633467467ce6,1 +np.float64,0x3fddb1c0dcbb6380,0x3fdcb87a02daf1fa,1 +np.float64,0xbfa832da443065b0,0xbfa8308c70257209,1 +np.float64,0x87a9836b0f531,0x87a9836b0f531,1 diff --git a/python/numpy/_core/tests/data/umath-validation-set-arctan.csv b/python/numpy/_core/tests/data/umath-validation-set-arctan.csv new file mode 100644 index 000000000..c03e144a9 --- /dev/null +++ b/python/numpy/_core/tests/data/umath-validation-set-arctan.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0x3f338252,0x3f1c8d9c,3 +np.float32,0x7e569df2,0x3fc90fdb,3 +np.float32,0xbf347e25,0xbf1d361f,3 +np.float32,0xbf0a654e,0xbefdbfd2,3 +np.float32,0x8070968e,0x8070968e,3 +np.float32,0x803cfb27,0x803cfb27,3 +np.float32,0x8024362e,0x8024362e,3 +np.float32,0xfd55dca0,0xbfc90fdb,3 +np.float32,0x592b82,0x592b82,3 +np.float32,0x802eb8e1,0x802eb8e1,3 +np.float32,0xbc5fef40,0xbc5febae,3 +np.float32,0x3f1f6ce8,0x3f0e967c,3 +np.float32,0x20bedc,0x20bedc,3 +np.float32,0xbf058860,0xbef629c7,3 +np.float32,0x311504,0x311504,3 +np.float32,0xbd23f560,0xbd23defa,3 +np.float32,0x800ff4e8,0x800ff4e8,3 +np.float32,0x355009,0x355009,3 +np.float32,0x3f7be42e,0x3f46fdb3,3 +np.float32,0xbf225f7c,0xbf10b364,3 +np.float32,0x8074fa9e,0x8074fa9e,3 +np.float32,0xbea4b418,0xbe9f59ce,3 +np.float32,0xbe909c14,0xbe8cf045,3 +np.float32,0x80026bee,0x80026bee,3 +np.float32,0x3d789c20,0x3d784e25,3 +np.float32,0x7f56a4ba,0x3fc90fdb,3 +np.float32,0xbf70d141,0xbf413db7,3 +np.float32,0xbf2c4886,0xbf17a505,3 +np.float32,0x7e2993bf,0x3fc90fdb,3 +np.float32,0xbe2c8a30,0xbe2aef28,3 +np.float32,0x803f82d9,0x803f82d9,3 +np.float32,0x3f062fbc,0x3ef730a1,3 +np.float32,0x3f349ee0,0x3f1d4bfa,3 +np.float32,0x3eccfb69,0x3ec2f9e8,3 +np.float32,0x7e8a85dd,0x3fc90fdb,3 +np.float32,0x25331,0x25331,3 +np.float32,0x464f19,0x464f19,3 +np.float32,0x8035c818,0x8035c818,3 +np.float32,0x802e5799,0x802e5799,3 +np.float32,0x64e1c0,0x64e1c0,3 +np.float32,0x701cc2,0x701cc2,3 +np.float32,0x265c57,0x265c57,3 +np.float32,0x807a053f,0x807a053f,3 +np.float32,0x3bd2c412,0x3bd2c354,3 +np.float32,0xff28f1c8,0xbfc90fdb,3 +np.float32,0x7f08f08b,0x3fc90fdb,3 +np.float32,0x800c50e4,0x800c50e4,3 +np.float32,0x369674,0x369674,3 +np.float32,0xbf5b7db3,0xbf3571bf,3 +np.float32,0x7edcf5e2,0x3fc90fdb,3 +np.float32,0x800e5d4b,0x800e5d4b,3 +np.float32,0x80722554,0x80722554,3 +np.float32,0x693f33,0x693f33,3 +np.float32,0x800844e4,0x800844e4,3 +np.float32,0xbf111b82,0xbf0402ec,3 +np.float32,0x7df9c9ac,0x3fc90fdb,3 +np.float32,0xbf6619a6,0xbf3b6f57,3 +np.float32,0x8002fafe,0x8002fafe,3 +np.float32,0xfe1e67f8,0xbfc90fdb,3 +np.float32,0x3f7f4bf8,0x3f48b5b7,3 +np.float32,0x7f017b20,0x3fc90fdb,3 +np.float32,0x2d9b07,0x2d9b07,3 +np.float32,0x803aa174,0x803aa174,3 +np.float32,0x7d530336,0x3fc90fdb,3 +np.float32,0x80662195,0x80662195,3 +np.float32,0xfd5ebcf0,0xbfc90fdb,3 +np.float32,0xbe7b8dcc,0xbe76ab59,3 +np.float32,0x7f2bacaf,0x3fc90fdb,3 +np.float32,0x3f194fc4,0x3f0a229e,3 +np.float32,0x7ee21cdf,0x3fc90fdb,3 +np.float32,0x3f5a17fc,0x3f34a307,3 +np.float32,0x7f100c58,0x3fc90fdb,3 +np.float32,0x7e9128f5,0x3fc90fdb,3 +np.float32,0xbf2107c6,0xbf0fbdb4,3 +np.float32,0xbd29c800,0xbd29af22,3 +np.float32,0xbf5af499,0xbf3522a6,3 +np.float32,0x801bde44,0x801bde44,3 +np.float32,0xfeb4761a,0xbfc90fdb,3 +np.float32,0x3d88aa1b,0x3d887650,3 +np.float32,0x7eba5e0b,0x3fc90fdb,3 +np.float32,0x803906bd,0x803906bd,3 +np.float32,0x80101512,0x80101512,3 +np.float32,0x7e898f83,0x3fc90fdb,3 +np.float32,0x806406d3,0x806406d3,3 +np.float32,0x7ed20fc0,0x3fc90fdb,3 +np.float32,0x20827d,0x20827d,3 +np.float32,0x3f361359,0x3f1e43fe,3 +np.float32,0xfe4ef8d8,0xbfc90fdb,3 +np.float32,0x805e7d2d,0x805e7d2d,3 +np.float32,0xbe4316b0,0xbe40c745,3 +np.float32,0xbf0a1c06,0xbefd4e5a,3 +np.float32,0x3e202860,0x3e1edee1,3 +np.float32,0xbeb32a2c,0xbeac5899,3 +np.float32,0xfe528838,0xbfc90fdb,3 +np.float32,0x2f73e2,0x2f73e2,3 +np.float32,0xbe16e010,0xbe15cc27,3 +np.float32,0x3f50d6c5,0x3f2f2d75,3 +np.float32,0xbe88a6a2,0xbe8589c7,3 +np.float32,0x3ee36060,0x3ed5fb36,3 +np.float32,0x6c978b,0x6c978b,3 +np.float32,0x7f1b735f,0x3fc90fdb,3 +np.float32,0x3dad8256,0x3dad1885,3 +np.float32,0x807f5094,0x807f5094,3 +np.float32,0x65c358,0x65c358,3 +np.float32,0xff315ce4,0xbfc90fdb,3 +np.float32,0x7411a6,0x7411a6,3 +np.float32,0x80757b04,0x80757b04,3 +np.float32,0x3eec73a6,0x3edd82f4,3 +np.float32,0xfe9f69e8,0xbfc90fdb,3 +np.float32,0x801f4fa8,0x801f4fa8,3 +np.float32,0xbf6f2fae,0xbf405f79,3 +np.float32,0xfea206b6,0xbfc90fdb,3 +np.float32,0x3f257301,0x3f12e1ee,3 +np.float32,0x7ea6a506,0x3fc90fdb,3 +np.float32,0x80800000,0x80800000,3 +np.float32,0xff735c2d,0xbfc90fdb,3 +np.float32,0x80197f95,0x80197f95,3 +np.float32,0x7f4a354f,0x3fc90fdb,3 +np.float32,0xff320c00,0xbfc90fdb,3 +np.float32,0x3f2659de,0x3f138484,3 +np.float32,0xbe5451bc,0xbe515a52,3 +np.float32,0x3f6e228c,0x3f3fcf7c,3 +np.float32,0x66855a,0x66855a,3 +np.float32,0x8034b3a3,0x8034b3a3,3 +np.float32,0xbe21a2fc,0xbe20505d,3 +np.float32,0x7f79e2dc,0x3fc90fdb,3 +np.float32,0xbe19a8e0,0xbe18858c,3 +np.float32,0x10802c,0x10802c,3 +np.float32,0xfeee579e,0xbfc90fdb,3 +np.float32,0x3f3292c8,0x3f1becc0,3 +np.float32,0xbf595a71,0xbf34350a,3 +np.float32,0xbf7c3373,0xbf4725f4,3 +np.float32,0xbdd30938,0xbdd24b36,3 +np.float32,0x153a17,0x153a17,3 +np.float32,0x807282a0,0x807282a0,3 +np.float32,0xfe817322,0xbfc90fdb,3 +np.float32,0x3f1b3628,0x3f0b8771,3 +np.float32,0x41be8f,0x41be8f,3 +np.float32,0x7f4a8343,0x3fc90fdb,3 +np.float32,0x3dc4ea2b,0x3dc44fae,3 +np.float32,0x802aac25,0x802aac25,3 +np.float32,0xbf20e1d7,0xbf0fa284,3 +np.float32,0xfd91a1b0,0xbfc90fdb,3 +np.float32,0x3f0d5476,0x3f012265,3 +np.float32,0x21c916,0x21c916,3 +np.float32,0x807df399,0x807df399,3 +np.float32,0x7e207b4c,0x3fc90fdb,3 +np.float32,0x8055f8ff,0x8055f8ff,3 +np.float32,0x7edf3b01,0x3fc90fdb,3 +np.float32,0x803a8df3,0x803a8df3,3 +np.float32,0x3ce3b002,0x3ce3a101,3 +np.float32,0x3f62dd54,0x3f39a248,3 +np.float32,0xff33ae10,0xbfc90fdb,3 +np.float32,0x7e3de69d,0x3fc90fdb,3 +np.float32,0x8024581e,0x8024581e,3 +np.float32,0xbf4ac99d,0xbf2b807a,3 +np.float32,0x3f157d19,0x3f074d8c,3 +np.float32,0xfed383f4,0xbfc90fdb,3 +np.float32,0xbf5a39fa,0xbf34b6b8,3 +np.float32,0x800d757d,0x800d757d,3 +np.float32,0x807d606b,0x807d606b,3 +np.float32,0x3e828f89,0x3e7fac2d,3 +np.float32,0x7a6604,0x7a6604,3 +np.float32,0x7dc7e72b,0x3fc90fdb,3 +np.float32,0x80144146,0x80144146,3 +np.float32,0x7c2eed69,0x3fc90fdb,3 +np.float32,0x3f5b4d8c,0x3f3555fc,3 +np.float32,0xfd8b7778,0xbfc90fdb,3 +np.float32,0xfc9d9140,0xbfc90fdb,3 +np.float32,0xbea265d4,0xbe9d4232,3 +np.float32,0xbe9344d0,0xbe8f65da,3 +np.float32,0x3f71f19a,0x3f41d65b,3 +np.float32,0x804a3f59,0x804a3f59,3 +np.float32,0x3e596290,0x3e563476,3 +np.float32,0x3e994ee4,0x3e94f546,3 +np.float32,0xbc103e00,0xbc103d0c,3 +np.float32,0xbf1cd896,0xbf0cb889,3 +np.float32,0x7f52b080,0x3fc90fdb,3 +np.float32,0xff584452,0xbfc90fdb,3 +np.float32,0x58b26b,0x58b26b,3 +np.float32,0x3f23cd4c,0x3f11b799,3 +np.float32,0x707d7,0x707d7,3 +np.float32,0xff732cff,0xbfc90fdb,3 +np.float32,0x3e41c2a6,0x3e3f7f0f,3 +np.float32,0xbf7058e9,0xbf40fdcf,3 +np.float32,0x7dca9857,0x3fc90fdb,3 +np.float32,0x7f0eb44b,0x3fc90fdb,3 +np.float32,0x8000405c,0x8000405c,3 +np.float32,0x4916ab,0x4916ab,3 +np.float32,0x4811a8,0x4811a8,3 +np.float32,0x3d69bf,0x3d69bf,3 +np.float32,0xfeadcf1e,0xbfc90fdb,3 +np.float32,0x3e08dbbf,0x3e080d58,3 +np.float32,0xff031f88,0xbfc90fdb,3 +np.float32,0xbe09cab8,0xbe08f818,3 +np.float32,0x21d7cd,0x21d7cd,3 +np.float32,0x3f23230d,0x3f113ea9,3 +np.float32,0x7e8a48d4,0x3fc90fdb,3 +np.float32,0x413869,0x413869,3 +np.float32,0x7e832990,0x3fc90fdb,3 +np.float32,0x800f5c09,0x800f5c09,3 +np.float32,0x7f5893b6,0x3fc90fdb,3 +np.float32,0x7f06b5b1,0x3fc90fdb,3 +np.float32,0xbe1cbee8,0xbe1b89d6,3 +np.float32,0xbf279f14,0xbf1468a8,3 +np.float32,0xfea86060,0xbfc90fdb,3 +np.float32,0x3e828174,0x3e7f91bb,3 +np.float32,0xff682c82,0xbfc90fdb,3 +np.float32,0x4e20f3,0x4e20f3,3 +np.float32,0x7f17d7e9,0x3fc90fdb,3 +np.float32,0x80671f92,0x80671f92,3 +np.float32,0x7f6dd100,0x3fc90fdb,3 +np.float32,0x3f219a4d,0x3f102695,3 +np.float32,0x803c9808,0x803c9808,3 +np.float32,0x3c432ada,0x3c43287d,3 +np.float32,0xbd3db450,0xbd3d91a2,3 +np.float32,0x3baac135,0x3baac0d0,3 +np.float32,0xff7fffe1,0xbfc90fdb,3 +np.float32,0xfe38a6f4,0xbfc90fdb,3 +np.float32,0x3dfb0a04,0x3df9cb04,3 +np.float32,0x800b05c2,0x800b05c2,3 +np.float32,0x644163,0x644163,3 +np.float32,0xff03a025,0xbfc90fdb,3 +np.float32,0x3f7d506c,0x3f47b641,3 +np.float32,0xff0e682a,0xbfc90fdb,3 +np.float32,0x3e09b7b0,0x3e08e567,3 +np.float32,0x7f72a216,0x3fc90fdb,3 +np.float32,0x7f800000,0x3fc90fdb,3 +np.float32,0x8050a281,0x8050a281,3 +np.float32,0x7edafa2f,0x3fc90fdb,3 +np.float32,0x3f4e0df6,0x3f2d7f2f,3 +np.float32,0xbf6728e0,0xbf3c050f,3 +np.float32,0x3e904ce4,0x3e8ca6eb,3 +np.float32,0x0,0x0,3 +np.float32,0xfd215070,0xbfc90fdb,3 +np.float32,0x7e406b15,0x3fc90fdb,3 +np.float32,0xbf2803c9,0xbf14af18,3 +np.float32,0x5950c8,0x5950c8,3 +np.float32,0xbeddcec8,0xbed14faa,3 +np.float32,0xbec6457e,0xbebd2aa5,3 +np.float32,0xbf42843c,0xbf2656db,3 +np.float32,0x3ee9cba8,0x3edb5163,3 +np.float32,0xbe30c954,0xbe2f0f90,3 +np.float32,0xbeee6b44,0xbedf216f,3 +np.float32,0xbe35d818,0xbe33f7cd,3 +np.float32,0xbe47c630,0xbe454bc6,3 +np.float32,0x801b146f,0x801b146f,3 +np.float32,0x7f6788da,0x3fc90fdb,3 +np.float32,0x3eaef088,0x3ea8927d,3 +np.float32,0x3eb5983e,0x3eae81fc,3 +np.float32,0x40b51d,0x40b51d,3 +np.float32,0xfebddd04,0xbfc90fdb,3 +np.float32,0x3e591aee,0x3e55efea,3 +np.float32,0xbe2b6b48,0xbe29d81f,3 +np.float32,0xff4a8826,0xbfc90fdb,3 +np.float32,0x3e791df0,0x3e745eac,3 +np.float32,0x7c8f681f,0x3fc90fdb,3 +np.float32,0xfe7a15c4,0xbfc90fdb,3 +np.float32,0x3c8963,0x3c8963,3 +np.float32,0x3f0afa0a,0x3efea5cc,3 +np.float32,0xbf0d2680,0xbf00ff29,3 +np.float32,0x3dc306b0,0x3dc27096,3 +np.float32,0x7f4cf105,0x3fc90fdb,3 +np.float32,0xbe196060,0xbe183ea4,3 +np.float32,0x5caf1c,0x5caf1c,3 +np.float32,0x801f2852,0x801f2852,3 +np.float32,0xbe01aa0c,0xbe00fa53,3 +np.float32,0x3f0cfd32,0x3f00df7a,3 +np.float32,0x7d82038e,0x3fc90fdb,3 +np.float32,0x7f7b927f,0x3fc90fdb,3 +np.float32,0xbe93b2e4,0xbe8fcb7f,3 +np.float32,0x1ffe8c,0x1ffe8c,3 +np.float32,0x3faaf6,0x3faaf6,3 +np.float32,0x3e32b1b8,0x3e30e9ab,3 +np.float32,0x802953c0,0x802953c0,3 +np.float32,0xfe5d9844,0xbfc90fdb,3 +np.float32,0x3e1a59d0,0x3e193292,3 +np.float32,0x801c6edc,0x801c6edc,3 +np.float32,0x1ecf41,0x1ecf41,3 +np.float32,0xfe56b09c,0xbfc90fdb,3 +np.float32,0x7e878351,0x3fc90fdb,3 +np.float32,0x3f401e2c,0x3f24cfcb,3 +np.float32,0xbf204a40,0xbf0f35bb,3 +np.float32,0x3e155a98,0x3e144ee1,3 +np.float32,0xbf34f929,0xbf1d8838,3 +np.float32,0x801bbf70,0x801bbf70,3 +np.float32,0x7e7c9730,0x3fc90fdb,3 +np.float32,0x7cc23432,0x3fc90fdb,3 +np.float32,0xbf351638,0xbf1d9b97,3 +np.float32,0x80152094,0x80152094,3 +np.float32,0x3f2d731c,0x3f187219,3 +np.float32,0x804ab0b7,0x804ab0b7,3 +np.float32,0x37d6db,0x37d6db,3 +np.float32,0xbf3ccc56,0xbf22acbf,3 +np.float32,0x3e546f8c,0x3e5176e7,3 +np.float32,0xbe90e87e,0xbe8d3707,3 +np.float32,0x48256c,0x48256c,3 +np.float32,0x7e2468d0,0x3fc90fdb,3 +np.float32,0x807af47e,0x807af47e,3 +np.float32,0x3ed4b221,0x3ec996f0,3 +np.float32,0x3d3b1956,0x3d3af811,3 +np.float32,0xbe69d93c,0xbe65e7f0,3 +np.float32,0xff03ff14,0xbfc90fdb,3 +np.float32,0x801e79dc,0x801e79dc,3 +np.float32,0x3f467c53,0x3f28d63d,3 +np.float32,0x3eab6baa,0x3ea56a1c,3 +np.float32,0xbf15519c,0xbf072d1c,3 +np.float32,0x7f0bd8e8,0x3fc90fdb,3 +np.float32,0xbe1e0d1c,0xbe1cd053,3 +np.float32,0x8016edab,0x8016edab,3 +np.float32,0x7ecaa09b,0x3fc90fdb,3 +np.float32,0x3f72e6d9,0x3f4257a8,3 +np.float32,0xbefe787e,0xbeec29a4,3 +np.float32,0xbee989e8,0xbedb1af9,3 +np.float32,0xbe662db0,0xbe626a45,3 +np.float32,0x495bf7,0x495bf7,3 +np.float32,0x26c379,0x26c379,3 +np.float32,0x7f54d41a,0x3fc90fdb,3 +np.float32,0x801e7dd9,0x801e7dd9,3 +np.float32,0x80000000,0x80000000,3 +np.float32,0xfa3d3000,0xbfc90fdb,3 +np.float32,0xfa3cb800,0xbfc90fdb,3 +np.float32,0x264894,0x264894,3 +np.float32,0xff6de011,0xbfc90fdb,3 +np.float32,0x7e9045b2,0x3fc90fdb,3 +np.float32,0x3f2253a8,0x3f10aaf4,3 +np.float32,0xbd462bf0,0xbd460469,3 +np.float32,0x7f1796af,0x3fc90fdb,3 +np.float32,0x3e718858,0x3e6d3279,3 +np.float32,0xff437d7e,0xbfc90fdb,3 +np.float32,0x805ae7cb,0x805ae7cb,3 +np.float32,0x807e32e9,0x807e32e9,3 +np.float32,0x3ee0bafc,0x3ed3c453,3 +np.float32,0xbf721dee,0xbf41edc3,3 +np.float32,0xfec9f792,0xbfc90fdb,3 +np.float32,0x7f050720,0x3fc90fdb,3 +np.float32,0x182261,0x182261,3 +np.float32,0x3e39e678,0x3e37e5be,3 +np.float32,0x7e096e4b,0x3fc90fdb,3 +np.float32,0x103715,0x103715,3 +np.float32,0x3f7e7741,0x3f484ae4,3 +np.float32,0x3e29aea5,0x3e28277c,3 +np.float32,0x58c183,0x58c183,3 +np.float32,0xff72fdb2,0xbfc90fdb,3 +np.float32,0xbd9a9420,0xbd9a493c,3 +np.float32,0x7f1e07e7,0x3fc90fdb,3 +np.float32,0xff79f522,0xbfc90fdb,3 +np.float32,0x7c7d0e96,0x3fc90fdb,3 +np.float32,0xbeba9e8e,0xbeb2f504,3 +np.float32,0xfd880a80,0xbfc90fdb,3 +np.float32,0xff7f2a33,0xbfc90fdb,3 +np.float32,0x3e861ae0,0x3e83289c,3 +np.float32,0x7f0161c1,0x3fc90fdb,3 +np.float32,0xfe844ff8,0xbfc90fdb,3 +np.float32,0xbebf4b98,0xbeb7128e,3 +np.float32,0x652bee,0x652bee,3 +np.float32,0xff188a4b,0xbfc90fdb,3 +np.float32,0xbf800000,0xbf490fdb,3 +np.float32,0x80418711,0x80418711,3 +np.float32,0xbeb712d4,0xbeafd1f6,3 +np.float32,0xbf7cee28,0xbf478491,3 +np.float32,0xfe66c59c,0xbfc90fdb,3 +np.float32,0x4166a2,0x4166a2,3 +np.float32,0x3dfa1a2c,0x3df8deb5,3 +np.float32,0xbdbfbcb8,0xbdbf2e0f,3 +np.float32,0xfe60ef70,0xbfc90fdb,3 +np.float32,0xfe009444,0xbfc90fdb,3 +np.float32,0xfeb27aa0,0xbfc90fdb,3 +np.float32,0xbe99f7bc,0xbe95902b,3 +np.float32,0x8043d28d,0x8043d28d,3 +np.float32,0xfe5328c4,0xbfc90fdb,3 +np.float32,0x8017b27e,0x8017b27e,3 +np.float32,0x3ef1d2cf,0x3ee1ebd7,3 +np.float32,0x805ddd90,0x805ddd90,3 +np.float32,0xbf424263,0xbf262d17,3 +np.float32,0xfc99dde0,0xbfc90fdb,3 +np.float32,0xbf7ec13b,0xbf487015,3 +np.float32,0xbef727ea,0xbee64377,3 +np.float32,0xff15ce95,0xbfc90fdb,3 +np.float32,0x1fbba4,0x1fbba4,3 +np.float32,0x3f3b2368,0x3f2198a9,3 +np.float32,0xfefda26e,0xbfc90fdb,3 +np.float32,0x801519ad,0x801519ad,3 +np.float32,0x80473fa2,0x80473fa2,3 +np.float32,0x7e7a8bc1,0x3fc90fdb,3 +np.float32,0x3e8a9289,0x3e87548a,3 +np.float32,0x3ed68987,0x3ecb2872,3 +np.float32,0x805bca66,0x805bca66,3 +np.float32,0x8079c4e3,0x8079c4e3,3 +np.float32,0x3a2510,0x3a2510,3 +np.float32,0x7eedc598,0x3fc90fdb,3 +np.float32,0x80681956,0x80681956,3 +np.float32,0xff64c778,0xbfc90fdb,3 +np.float32,0x806bbc46,0x806bbc46,3 +np.float32,0x433643,0x433643,3 +np.float32,0x705b92,0x705b92,3 +np.float32,0xff359392,0xbfc90fdb,3 +np.float32,0xbee78672,0xbed96fa7,3 +np.float32,0x3e21717b,0x3e202010,3 +np.float32,0xfea13c34,0xbfc90fdb,3 +np.float32,0x2c8895,0x2c8895,3 +np.float32,0x3ed33290,0x3ec84f7c,3 +np.float32,0x3e63031e,0x3e5f662e,3 +np.float32,0x7e30907b,0x3fc90fdb,3 +np.float32,0xbe293708,0xbe27b310,3 +np.float32,0x3ed93738,0x3ecd6ea3,3 +np.float32,0x9db7e,0x9db7e,3 +np.float32,0x3f7cd1b8,0x3f47762c,3 +np.float32,0x3eb5143c,0x3eae0cb0,3 +np.float32,0xbe69b234,0xbe65c2d7,3 +np.float32,0x3f6e74de,0x3f3ffb97,3 +np.float32,0x5d0559,0x5d0559,3 +np.float32,0x3e1e8c30,0x3e1d4c70,3 +np.float32,0xbf2d1878,0xbf1833ef,3 +np.float32,0xff2adf82,0xbfc90fdb,3 +np.float32,0x8012e2c1,0x8012e2c1,3 +np.float32,0x7f031be3,0x3fc90fdb,3 +np.float32,0x805ff94e,0x805ff94e,3 +np.float32,0x3e9d5b27,0x3e98aa31,3 +np.float32,0x3f56d5cf,0x3f32bc9e,3 +np.float32,0x3eaa0412,0x3ea4267f,3 +np.float32,0xbe899ea4,0xbe86712f,3 +np.float32,0x800f2f48,0x800f2f48,3 +np.float32,0x3f1c2269,0x3f0c33ea,3 +np.float32,0x3f4a5f64,0x3f2b3f28,3 +np.float32,0x80739318,0x80739318,3 +np.float32,0x806e9b47,0x806e9b47,3 +np.float32,0x3c8cd300,0x3c8ccf73,3 +np.float32,0x7f39a39d,0x3fc90fdb,3 +np.float32,0x3ec95d61,0x3ebfd9dc,3 +np.float32,0xff351ff8,0xbfc90fdb,3 +np.float32,0xff3a8f58,0xbfc90fdb,3 +np.float32,0x7f313ec0,0x3fc90fdb,3 +np.float32,0x803aed13,0x803aed13,3 +np.float32,0x7f771d9b,0x3fc90fdb,3 +np.float32,0x8045a6d6,0x8045a6d6,3 +np.float32,0xbc85f280,0xbc85ef72,3 +np.float32,0x7e9c68f5,0x3fc90fdb,3 +np.float32,0xbf0f9379,0xbf02d975,3 +np.float32,0x7e97bcb1,0x3fc90fdb,3 +np.float32,0x804a07d5,0x804a07d5,3 +np.float32,0x802e6117,0x802e6117,3 +np.float32,0x7ed5e388,0x3fc90fdb,3 +np.float32,0x80750455,0x80750455,3 +np.float32,0xff4a8325,0xbfc90fdb,3 +np.float32,0xbedb6866,0xbecf497c,3 +np.float32,0x52ea3b,0x52ea3b,3 +np.float32,0xff773172,0xbfc90fdb,3 +np.float32,0xbeaa8ff0,0xbea4a46e,3 +np.float32,0x7eef2058,0x3fc90fdb,3 +np.float32,0x3f712472,0x3f4169d3,3 +np.float32,0xff6c8608,0xbfc90fdb,3 +np.float32,0xbf6eaa41,0xbf40182a,3 +np.float32,0x3eb03c24,0x3ea9bb34,3 +np.float32,0xfe118cd4,0xbfc90fdb,3 +np.float32,0x3e5b03b0,0x3e57c378,3 +np.float32,0x7f34d92d,0x3fc90fdb,3 +np.float32,0x806c3418,0x806c3418,3 +np.float32,0x7f3074e3,0x3fc90fdb,3 +np.float32,0x8002df02,0x8002df02,3 +np.float32,0x3f6df63a,0x3f3fb7b7,3 +np.float32,0xfd2b4100,0xbfc90fdb,3 +np.float32,0x80363d5c,0x80363d5c,3 +np.float32,0xbeac1f98,0xbea60bd6,3 +np.float32,0xff7fffff,0xbfc90fdb,3 +np.float32,0x80045097,0x80045097,3 +np.float32,0xfe011100,0xbfc90fdb,3 +np.float32,0x80739ef5,0x80739ef5,3 +np.float32,0xff3976ed,0xbfc90fdb,3 +np.float32,0xbe18e3a0,0xbe17c49e,3 +np.float32,0xbe289294,0xbe2712f6,3 +np.float32,0x3f1d41e7,0x3f0d050e,3 +np.float32,0x39364a,0x39364a,3 +np.float32,0x8072b77e,0x8072b77e,3 +np.float32,0x3f7cfec0,0x3f478cf6,3 +np.float32,0x2f68f6,0x2f68f6,3 +np.float32,0xbf031fb8,0xbef25c84,3 +np.float32,0xbf0b842c,0xbeff7afc,3 +np.float32,0x3f081e7e,0x3efa3676,3 +np.float32,0x7f7fffff,0x3fc90fdb,3 +np.float32,0xff15da0e,0xbfc90fdb,3 +np.float32,0x3d2001b2,0x3d1fece1,3 +np.float32,0x7f76efef,0x3fc90fdb,3 +np.float32,0x3f2405dd,0x3f11dfb7,3 +np.float32,0xa0319,0xa0319,3 +np.float32,0x3e23d2bd,0x3e227255,3 +np.float32,0xbd4d4c50,0xbd4d205e,3 +np.float32,0x382344,0x382344,3 +np.float32,0x21bbf,0x21bbf,3 +np.float32,0xbf209e82,0xbf0f7239,3 +np.float32,0xff03bf9f,0xbfc90fdb,3 +np.float32,0x7b1789,0x7b1789,3 +np.float32,0xff314944,0xbfc90fdb,3 +np.float32,0x1a63eb,0x1a63eb,3 +np.float32,0x803dc983,0x803dc983,3 +np.float32,0x3f0ff558,0x3f0323dc,3 +np.float32,0x3f544f2c,0x3f313f58,3 +np.float32,0xff032948,0xbfc90fdb,3 +np.float32,0x7f4933cc,0x3fc90fdb,3 +np.float32,0x7f14c5ed,0x3fc90fdb,3 +np.float32,0x803aeebf,0x803aeebf,3 +np.float32,0xbf0d4c0f,0xbf011bf5,3 +np.float32,0xbeaf8de2,0xbea91f57,3 +np.float32,0xff3ae030,0xbfc90fdb,3 +np.float32,0xbb362d00,0xbb362ce1,3 +np.float32,0x3d1f79e0,0x3d1f6544,3 +np.float32,0x3f56e9d9,0x3f32c860,3 +np.float32,0x3f723e5e,0x3f41fee2,3 +np.float32,0x4c0179,0x4c0179,3 +np.float32,0xfee36132,0xbfc90fdb,3 +np.float32,0x619ae6,0x619ae6,3 +np.float32,0xfde5d670,0xbfc90fdb,3 +np.float32,0xff079ac5,0xbfc90fdb,3 +np.float32,0x3e974fbd,0x3e931fae,3 +np.float32,0x8020ae6b,0x8020ae6b,3 +np.float32,0x6b5af1,0x6b5af1,3 +np.float32,0xbeb57cd6,0xbeae69a3,3 +np.float32,0x806e7eb2,0x806e7eb2,3 +np.float32,0x7e666edb,0x3fc90fdb,3 +np.float32,0xbf458c18,0xbf283ff0,3 +np.float32,0x3e50518e,0x3e4d8399,3 +np.float32,0x3e9ce224,0x3e983b98,3 +np.float32,0x3e6bc067,0x3e67b6c6,3 +np.float32,0x13783d,0x13783d,3 +np.float32,0xff3d518c,0xbfc90fdb,3 +np.float32,0xfeba5968,0xbfc90fdb,3 +np.float32,0xbf0b9f76,0xbeffa50f,3 +np.float32,0xfe174900,0xbfc90fdb,3 +np.float32,0x3f38bb0a,0x3f200527,3 +np.float32,0x7e94a77d,0x3fc90fdb,3 +np.float32,0x29d776,0x29d776,3 +np.float32,0xbf4e058d,0xbf2d7a15,3 +np.float32,0xbd94abc8,0xbd946923,3 +np.float32,0xbee62db0,0xbed85124,3 +np.float32,0x800000,0x800000,3 +np.float32,0xbef1df7e,0xbee1f636,3 +np.float32,0xbcf3cd20,0xbcf3bab5,3 +np.float32,0x80007b05,0x80007b05,3 +np.float32,0x3d9b3f2e,0x3d9af351,3 +np.float32,0xbf714a68,0xbf417dee,3 +np.float32,0xbf2a2d37,0xbf163069,3 +np.float32,0x8055104f,0x8055104f,3 +np.float32,0x7f5c40d7,0x3fc90fdb,3 +np.float32,0x1,0x1,3 +np.float32,0xff35f3a6,0xbfc90fdb,3 +np.float32,0xd9c7c,0xd9c7c,3 +np.float32,0xbf440cfc,0xbf274f22,3 +np.float32,0x8050ac43,0x8050ac43,3 +np.float32,0x63ee16,0x63ee16,3 +np.float32,0x7d90419b,0x3fc90fdb,3 +np.float32,0xfee22198,0xbfc90fdb,3 +np.float32,0xc2ead,0xc2ead,3 +np.float32,0x7f5cd6a6,0x3fc90fdb,3 +np.float32,0x3f6fab7e,0x3f40a184,3 +np.float32,0x3ecf998c,0x3ec53a73,3 +np.float32,0x7e5271f0,0x3fc90fdb,3 +np.float32,0x67c016,0x67c016,3 +np.float32,0x2189c8,0x2189c8,3 +np.float32,0x27d892,0x27d892,3 +np.float32,0x3f0d02c4,0x3f00e3c0,3 +np.float32,0xbf69ebca,0xbf3d8862,3 +np.float32,0x3e60c0d6,0x3e5d3ebb,3 +np.float32,0x3f45206c,0x3f27fc66,3 +np.float32,0xbf6b47dc,0xbf3e4592,3 +np.float32,0xfe9be2e2,0xbfc90fdb,3 +np.float32,0x7fa00000,0x7fe00000,3 +np.float32,0xff271562,0xbfc90fdb,3 +np.float32,0x3e2e5270,0x3e2caaaf,3 +np.float32,0x80222934,0x80222934,3 +np.float32,0xbd01d220,0xbd01c701,3 +np.float32,0x223aa0,0x223aa0,3 +np.float32,0x3f4b5a7e,0x3f2bd967,3 +np.float32,0x3f217d85,0x3f101200,3 +np.float32,0xbf57663a,0xbf331144,3 +np.float32,0x3f219862,0x3f102536,3 +np.float32,0x28a28c,0x28a28c,3 +np.float32,0xbf3f55f4,0xbf244f86,3 +np.float32,0xbf3de287,0xbf236092,3 +np.float32,0xbf1c1ce2,0xbf0c2fe3,3 +np.float32,0x80000001,0x80000001,3 +np.float32,0x3db695d0,0x3db61a90,3 +np.float32,0x6c39bf,0x6c39bf,3 +np.float32,0x7e33a12f,0x3fc90fdb,3 +np.float32,0x67623a,0x67623a,3 +np.float32,0x3e45dc54,0x3e4373b6,3 +np.float32,0x7f62fa68,0x3fc90fdb,3 +np.float32,0x3f0e1d01,0x3f01bbe5,3 +np.float32,0x3f13dc69,0x3f0615f5,3 +np.float32,0x246703,0x246703,3 +np.float32,0xbf1055b5,0xbf036d07,3 +np.float32,0x7f46d3d0,0x3fc90fdb,3 +np.float32,0x3d2b8086,0x3d2b66e5,3 +np.float32,0xbf03be44,0xbef35776,3 +np.float32,0x3f800000,0x3f490fdb,3 +np.float32,0xbec8d226,0xbebf613d,3 +np.float32,0x3d8faf00,0x3d8f72d4,3 +np.float32,0x170c4e,0x170c4e,3 +np.float32,0xff14c0f0,0xbfc90fdb,3 +np.float32,0xff16245d,0xbfc90fdb,3 +np.float32,0x7f44ce6d,0x3fc90fdb,3 +np.float32,0xbe8175d8,0xbe7d9aeb,3 +np.float32,0x3df7a4a1,0x3df67254,3 +np.float32,0xfe2cc46c,0xbfc90fdb,3 +np.float32,0x3f284e63,0x3f14e335,3 +np.float32,0x7e46e5d6,0x3fc90fdb,3 +np.float32,0x397be4,0x397be4,3 +np.float32,0xbf2560bc,0xbf12d50b,3 +np.float32,0x3ed9b8c1,0x3ecddc60,3 +np.float32,0xfec18c5a,0xbfc90fdb,3 +np.float32,0x64894d,0x64894d,3 +np.float32,0x36a65d,0x36a65d,3 +np.float32,0x804ffcd7,0x804ffcd7,3 +np.float32,0x800f79e4,0x800f79e4,3 +np.float32,0x5d45ac,0x5d45ac,3 +np.float32,0x6cdda0,0x6cdda0,3 +np.float32,0xbf7f2077,0xbf489fe5,3 +np.float32,0xbf152f78,0xbf0713a1,3 +np.float32,0x807bf344,0x807bf344,3 +np.float32,0x3f775023,0x3f44a4d8,3 +np.float32,0xbf3edf67,0xbf240365,3 +np.float32,0x7eed729c,0x3fc90fdb,3 +np.float32,0x14cc29,0x14cc29,3 +np.float32,0x7edd7b6b,0x3fc90fdb,3 +np.float32,0xbf3c6e2c,0xbf226fb7,3 +np.float32,0x51b9ad,0x51b9ad,3 +np.float32,0x3f617ee8,0x3f38dd7c,3 +np.float32,0xff800000,0xbfc90fdb,3 +np.float32,0x7f440ea0,0x3fc90fdb,3 +np.float32,0x3e639893,0x3e5ff49e,3 +np.float32,0xbd791bb0,0xbd78cd3c,3 +np.float32,0x8059fcbc,0x8059fcbc,3 +np.float32,0xbf7d1214,0xbf4796bd,3 +np.float32,0x3ef368fa,0x3ee33788,3 +np.float32,0xbecec0f4,0xbec48055,3 +np.float32,0xbc83d940,0xbc83d656,3 +np.float32,0xbce01220,0xbce003d4,3 +np.float32,0x803192a5,0x803192a5,3 +np.float32,0xbe40e0c0,0xbe3ea4f0,3 +np.float32,0xfb692600,0xbfc90fdb,3 +np.float32,0x3f1bec65,0x3f0c0c88,3 +np.float32,0x7f042798,0x3fc90fdb,3 +np.float32,0xbe047374,0xbe03b83b,3 +np.float32,0x7f7c6630,0x3fc90fdb,3 +np.float32,0x7f58dae3,0x3fc90fdb,3 +np.float32,0x80691c92,0x80691c92,3 +np.float32,0x7dbe76,0x7dbe76,3 +np.float32,0xbf231384,0xbf11339d,3 +np.float32,0xbef4acf8,0xbee43f8b,3 +np.float32,0x3ee9f9d0,0x3edb7793,3 +np.float32,0x3f0064f6,0x3eee04a8,3 +np.float32,0x313732,0x313732,3 +np.float32,0xfd58cf80,0xbfc90fdb,3 +np.float32,0x3f7a2bc9,0x3f461d30,3 +np.float32,0x7f7681af,0x3fc90fdb,3 +np.float32,0x7f504211,0x3fc90fdb,3 +np.float32,0xfeae0c00,0xbfc90fdb,3 +np.float32,0xbee14396,0xbed436d1,3 +np.float32,0x7fc00000,0x7fc00000,3 +np.float32,0x693406,0x693406,3 +np.float32,0x3eb4a679,0x3eadab1b,3 +np.float32,0x550505,0x550505,3 +np.float32,0xfd493d10,0xbfc90fdb,3 +np.float32,0x3f4fc907,0x3f2e8b2c,3 +np.float32,0x80799aa4,0x80799aa4,3 +np.float32,0xff1ea89b,0xbfc90fdb,3 +np.float32,0xff424510,0xbfc90fdb,3 +np.float32,0x7f68d026,0x3fc90fdb,3 +np.float32,0xbea230ca,0xbe9d1200,3 +np.float32,0x7ea585da,0x3fc90fdb,3 +np.float32,0x3f3db211,0x3f23414c,3 +np.float32,0xfea4d964,0xbfc90fdb,3 +np.float32,0xbf17fe18,0xbf092984,3 +np.float32,0x7cc8a2,0x7cc8a2,3 +np.float32,0xff0330ba,0xbfc90fdb,3 +np.float32,0x3f769835,0x3f444592,3 +np.float32,0xeb0ac,0xeb0ac,3 +np.float32,0x7f7e45de,0x3fc90fdb,3 +np.float32,0xbdb510a8,0xbdb49873,3 +np.float32,0x3ebf900b,0x3eb74e9c,3 +np.float32,0xbf21bbce,0xbf103e89,3 +np.float32,0xbf3f4682,0xbf24459d,3 +np.float32,0x7eb6e9c8,0x3fc90fdb,3 +np.float32,0xbf42532d,0xbf2637be,3 +np.float32,0xbd3b2600,0xbd3b04b4,3 +np.float32,0x3f1fa9aa,0x3f0ec23e,3 +np.float32,0x7ed6a0f1,0x3fc90fdb,3 +np.float32,0xff4759a1,0xbfc90fdb,3 +np.float32,0x6d26e3,0x6d26e3,3 +np.float32,0xfe1108e0,0xbfc90fdb,3 +np.float32,0xfdf76900,0xbfc90fdb,3 +np.float32,0xfec66f22,0xbfc90fdb,3 +np.float32,0xbf3d097f,0xbf22d458,3 +np.float32,0x3d85be25,0x3d858d99,3 +np.float32,0x7f36739f,0x3fc90fdb,3 +np.float32,0x7bc0a304,0x3fc90fdb,3 +np.float32,0xff48dd90,0xbfc90fdb,3 +np.float32,0x48cab0,0x48cab0,3 +np.float32,0x3ed3943c,0x3ec8a2ef,3 +np.float32,0xbf61488e,0xbf38bede,3 +np.float32,0x3f543df5,0x3f313525,3 +np.float32,0x5cf2ca,0x5cf2ca,3 +np.float32,0x572686,0x572686,3 +np.float32,0x80369c7c,0x80369c7c,3 +np.float32,0xbd2c1d20,0xbd2c0338,3 +np.float32,0x3e255428,0x3e23ea0b,3 +np.float32,0xbeba9ee0,0xbeb2f54c,3 +np.float32,0x8015c165,0x8015c165,3 +np.float32,0x3d31f488,0x3d31d7e6,3 +np.float32,0x3f68591c,0x3f3cac43,3 +np.float32,0xf5ed5,0xf5ed5,3 +np.float32,0xbf3b1d34,0xbf21949e,3 +np.float32,0x1f0343,0x1f0343,3 +np.float32,0x3f0e52b5,0x3f01e4ef,3 +np.float32,0x7f57c596,0x3fc90fdb,3 +np.float64,0x7fd8e333ddb1c667,0x3ff921fb54442d18,1 +np.float64,0x800bcc9cdad7993a,0x800bcc9cdad7993a,1 +np.float64,0x3fcd6f81df3adf00,0x3fcceebbafc5d55e,1 +np.float64,0x3fed7338a57ae671,0x3fe7ce3e5811fc0a,1 +np.float64,0x7fe64994fcac9329,0x3ff921fb54442d18,1 +np.float64,0xfa5a6345f4b4d,0xfa5a6345f4b4d,1 +np.float64,0xe9dcd865d3b9b,0xe9dcd865d3b9b,1 +np.float64,0x7fea6cffabf4d9fe,0x3ff921fb54442d18,1 +np.float64,0xa9e1de6153c3c,0xa9e1de6153c3c,1 +np.float64,0xab6bdc5356d7c,0xab6bdc5356d7c,1 +np.float64,0x80062864a02c50ca,0x80062864a02c50ca,1 +np.float64,0xbfdac03aa7b58076,0xbfd9569f3230128d,1 +np.float64,0xbfe61b77752c36ef,0xbfe3588f51b8be8f,1 +np.float64,0x800bc854c8d790aa,0x800bc854c8d790aa,1 +np.float64,0x3feed1a2da3da346,0x3fe887f9b8ea031f,1 +np.float64,0x3fe910d3697221a7,0x3fe54365a53d840e,1 +np.float64,0x7fe7ab4944ef5692,0x3ff921fb54442d18,1 +np.float64,0x3fa462f1a028c5e3,0x3fa460303a6a4e69,1 +np.float64,0x800794f1a3af29e4,0x800794f1a3af29e4,1 +np.float64,0x3fee6fe7fafcdfd0,0x3fe854f863816d55,1 +np.float64,0x8000000000000000,0x8000000000000000,1 +np.float64,0x7f336472fe66d,0x7f336472fe66d,1 +np.float64,0xffb1623ac822c478,0xbff921fb54442d18,1 +np.float64,0x3fbacd68ce359ad2,0x3fbab480b3638846,1 +np.float64,0xffd5c02706ab804e,0xbff921fb54442d18,1 +np.float64,0xbfd4daf03d29b5e0,0xbfd42928f069c062,1 +np.float64,0x800c6e85dbd8dd0c,0x800c6e85dbd8dd0c,1 +np.float64,0x800e3599c5bc6b34,0x800e3599c5bc6b34,1 +np.float64,0x2c0d654c581ad,0x2c0d654c581ad,1 +np.float64,0xbfdd3eb13fba7d62,0xbfdb6e8143302de7,1 +np.float64,0x800b60cb8776c197,0x800b60cb8776c197,1 +np.float64,0x80089819ad113034,0x80089819ad113034,1 +np.float64,0x29fe721453fcf,0x29fe721453fcf,1 +np.float64,0x3fe8722f4df0e45f,0x3fe4e026d9eadb4d,1 +np.float64,0xffd1fbcd01a3f79a,0xbff921fb54442d18,1 +np.float64,0x7fc74e1e982e9c3c,0x3ff921fb54442d18,1 +np.float64,0x800c09d3d15813a8,0x800c09d3d15813a8,1 +np.float64,0xbfeee4578b3dc8af,0xbfe891ab3d6c3ce4,1 +np.float64,0xffdd01a6f33a034e,0xbff921fb54442d18,1 +np.float64,0x7fcc130480382608,0x3ff921fb54442d18,1 +np.float64,0xffcbb6bd1d376d7c,0xbff921fb54442d18,1 +np.float64,0xc068a53780d15,0xc068a53780d15,1 +np.float64,0xbfc974f15532e9e4,0xbfc92100b355f3e7,1 +np.float64,0x3fe6da79442db4f3,0x3fe3d87393b082e7,1 +np.float64,0xd9d9be4db3b38,0xd9d9be4db3b38,1 +np.float64,0x5ea50a20bd4a2,0x5ea50a20bd4a2,1 +np.float64,0xbfe5597f7d2ab2ff,0xbfe2d3ccc544b52b,1 +np.float64,0x80019364e4e326cb,0x80019364e4e326cb,1 +np.float64,0x3fed2902c3fa5206,0x3fe7a5e1df07e5c1,1 +np.float64,0xbfa7b72b5c2f6e50,0xbfa7b2d545b3cc1f,1 +np.float64,0xffdb60dd43b6c1ba,0xbff921fb54442d18,1 +np.float64,0x81a65d8b034cc,0x81a65d8b034cc,1 +np.float64,0x8000c30385818608,0x8000c30385818608,1 +np.float64,0x6022f5f4c045f,0x6022f5f4c045f,1 +np.float64,0x8007a2bb810f4578,0x8007a2bb810f4578,1 +np.float64,0x7fdc68893238d111,0x3ff921fb54442d18,1 +np.float64,0x7fd443454ea8868a,0x3ff921fb54442d18,1 +np.float64,0xffe6b04209ed6084,0xbff921fb54442d18,1 +np.float64,0x7fcd9733d13b2e67,0x3ff921fb54442d18,1 +np.float64,0xf5ee80a9ebdd0,0xf5ee80a9ebdd0,1 +np.float64,0x3fe3788e8de6f11e,0x3fe17dec7e6843a0,1 +np.float64,0x3fee36f62f7c6dec,0x3fe836f832515b43,1 +np.float64,0xf6cb49aded969,0xf6cb49aded969,1 +np.float64,0x3fd2b15ea4a562bc,0x3fd22fdc09920e67,1 +np.float64,0x7fccf6aef139ed5d,0x3ff921fb54442d18,1 +np.float64,0x3fd396b8ce272d72,0x3fd3026118857bd4,1 +np.float64,0x7fe53d3c80ea7a78,0x3ff921fb54442d18,1 +np.float64,0x3feae88fc4f5d120,0x3fe65fb04b18ef7a,1 +np.float64,0x3fedc643747b8c86,0x3fe7fafa6c20e25a,1 +np.float64,0xffdb2dc0df365b82,0xbff921fb54442d18,1 +np.float64,0xbfa2af3658255e70,0xbfa2ad17348f4253,1 +np.float64,0x3f8aa77b30354ef6,0x3f8aa71892336a69,1 +np.float64,0xbfdd1b1efbba363e,0xbfdb510dcd186820,1 +np.float64,0x800f50d99c5ea1b3,0x800f50d99c5ea1b3,1 +np.float64,0xff6ed602403dac00,0xbff921fb54442d18,1 +np.float64,0x800477d71aa8efaf,0x800477d71aa8efaf,1 +np.float64,0xbfe729a9e86e5354,0xbfe40ca78d9eefcf,1 +np.float64,0x3fd81ab2d4303566,0x3fd70d7e3937ea22,1 +np.float64,0xb617cbab6c2fa,0xb617cbab6c2fa,1 +np.float64,0x7fefffffffffffff,0x3ff921fb54442d18,1 +np.float64,0xffa40933ac281260,0xbff921fb54442d18,1 +np.float64,0xbfe1ede621e3dbcc,0xbfe057bb2b341ced,1 +np.float64,0xbfec700f03b8e01e,0xbfe73fb190bc722e,1 +np.float64,0x6e28af02dc517,0x6e28af02dc517,1 +np.float64,0x3fe37ad37ae6f5a7,0x3fe17f94674818a9,1 +np.float64,0x8000cbdeeae197bf,0x8000cbdeeae197bf,1 +np.float64,0x3fe8fd1f01f1fa3e,0x3fe5372bbec5d72c,1 +np.float64,0x3f8f9229103f2452,0x3f8f918531894256,1 +np.float64,0x800536858e0a6d0c,0x800536858e0a6d0c,1 +np.float64,0x7fe82bb4f9f05769,0x3ff921fb54442d18,1 +np.float64,0xffc1c2fb592385f8,0xbff921fb54442d18,1 +np.float64,0x7f924ddfc0249bbf,0x3ff921fb54442d18,1 +np.float64,0xffd5e125c52bc24c,0xbff921fb54442d18,1 +np.float64,0xbfef0d8738be1b0e,0xbfe8a6ef17b16c10,1 +np.float64,0x3fc9c8875233910f,0x3fc9715e708503cb,1 +np.float64,0xbfe2d926f4e5b24e,0xbfe108956e61cbb3,1 +np.float64,0x7fd61c496dac3892,0x3ff921fb54442d18,1 +np.float64,0x7fed545c6b7aa8b8,0x3ff921fb54442d18,1 +np.float64,0x8003746fea86e8e1,0x8003746fea86e8e1,1 +np.float64,0x3fdf515e75bea2bd,0x3fdd201a5585caa3,1 +np.float64,0xffda87c8ee350f92,0xbff921fb54442d18,1 +np.float64,0xffc675d8e22cebb0,0xbff921fb54442d18,1 +np.float64,0xffcdc173433b82e8,0xbff921fb54442d18,1 +np.float64,0xffed9df1517b3be2,0xbff921fb54442d18,1 +np.float64,0x3fd6a2eec72d45de,0x3fd5c1f1d7dcddcf,1 +np.float64,0xffec116a66f822d4,0xbff921fb54442d18,1 +np.float64,0x8007c2a2458f8545,0x8007c2a2458f8545,1 +np.float64,0x3fe4ee80d969dd02,0x3fe2895076094668,1 +np.float64,0x3fe3cae7116795ce,0x3fe1b9c07e0d03a7,1 +np.float64,0xbfd81bf8d8b037f2,0xbfd70e9bbbb4ca57,1 +np.float64,0x800c88ccd1f9119a,0x800c88ccd1f9119a,1 +np.float64,0xffdab2aee2b5655e,0xbff921fb54442d18,1 +np.float64,0x3fe743d227ee87a4,0x3fe41dcaef186d96,1 +np.float64,0x3fb060fd0220c1fa,0x3fb05b47f56ebbb4,1 +np.float64,0xbfd3f03772a7e06e,0xbfd3541522377291,1 +np.float64,0x190a5ae03216,0x190a5ae03216,1 +np.float64,0x3fe48c71916918e4,0x3fe24442f45b3183,1 +np.float64,0x800862470590c48e,0x800862470590c48e,1 +np.float64,0x7fd3ced89d279db0,0x3ff921fb54442d18,1 +np.float64,0x3feb3d9b4ab67b37,0x3fe69140cf2623f7,1 +np.float64,0xbc3f296b787e5,0xbc3f296b787e5,1 +np.float64,0xbfed6b905dfad721,0xbfe7ca1881a8c0fd,1 +np.float64,0xbfe621c2aaac4386,0xbfe35cd1969a82db,1 +np.float64,0x8009e7b17593cf63,0x8009e7b17593cf63,1 +np.float64,0x80045f580ca8beb1,0x80045f580ca8beb1,1 +np.float64,0xbfea2e177e745c2f,0xbfe5f13971633339,1 +np.float64,0x3fee655787fccab0,0x3fe84f6b98b6de26,1 +np.float64,0x3fc9cde92f339bd0,0x3fc9768a88b2c97c,1 +np.float64,0x3fc819c3b3303388,0x3fc7d25e1526e731,1 +np.float64,0x3fd3e848d2a7d090,0x3fd34cd9e6af558f,1 +np.float64,0x3fe19dacac633b5a,0x3fe01a6b4d27adc2,1 +np.float64,0x800b190da316321c,0x800b190da316321c,1 +np.float64,0xd5c69711ab8d3,0xd5c69711ab8d3,1 +np.float64,0xbfdc31bed7b8637e,0xbfda8ea3c1309d6d,1 +np.float64,0xbfd02ba007a05740,0xbfcfad86f0d756dc,1 +np.float64,0x3fe874473d70e88e,0x3fe4e1793cd82123,1 +np.float64,0xffb465585c28cab0,0xbff921fb54442d18,1 +np.float64,0xbfb5d8e13e2bb1c0,0xbfb5cb5c7807fc4d,1 +np.float64,0xffe80f933bf01f26,0xbff921fb54442d18,1 +np.float64,0x7feea783f5fd4f07,0x3ff921fb54442d18,1 +np.float64,0xbfae6665f43cccd0,0xbfae5d45b0a6f90a,1 +np.float64,0x800bd6ef5a77addf,0x800bd6ef5a77addf,1 +np.float64,0x800d145babda28b8,0x800d145babda28b8,1 +np.float64,0x39de155473bc3,0x39de155473bc3,1 +np.float64,0x3fefbd6bb1ff7ad8,0x3fe9008e73a3296e,1 +np.float64,0x3fc40bca3d281798,0x3fc3e2710e167007,1 +np.float64,0x3fcae0918335c120,0x3fca7e09e704a678,1 +np.float64,0x51287fbea2511,0x51287fbea2511,1 +np.float64,0x7fa6bc33a82d7866,0x3ff921fb54442d18,1 +np.float64,0xe72a2bebce546,0xe72a2bebce546,1 +np.float64,0x3fe1c8fd686391fa,0x3fe03b9622aeb4e3,1 +np.float64,0x3fe2a73ac3654e76,0x3fe0e36bc1ee4ac4,1 +np.float64,0x59895218b312b,0x59895218b312b,1 +np.float64,0xc6dc25c78db85,0xc6dc25c78db85,1 +np.float64,0xbfc06cfac520d9f4,0xbfc0561f85d2c907,1 +np.float64,0xbfea912dc4f5225c,0xbfe62c3b1c01c793,1 +np.float64,0x3fb78ce89a2f19d0,0x3fb77bfcb65a67d3,1 +np.float64,0xbfece5cdea39cb9c,0xbfe78103d24099e5,1 +np.float64,0x30d3054e61a61,0x30d3054e61a61,1 +np.float64,0xbfd3fe26fba7fc4e,0xbfd360c8447c4f7a,1 +np.float64,0x800956072a92ac0f,0x800956072a92ac0f,1 +np.float64,0x7fe639b3b6ec7366,0x3ff921fb54442d18,1 +np.float64,0x800ee30240bdc605,0x800ee30240bdc605,1 +np.float64,0x7fef6af0d2bed5e1,0x3ff921fb54442d18,1 +np.float64,0xffefce8725ff9d0d,0xbff921fb54442d18,1 +np.float64,0x3fe2e311da65c624,0x3fe10ff1623089dc,1 +np.float64,0xbfe7e5cbe56fcb98,0xbfe486c3daeda67c,1 +np.float64,0x80095bc14472b783,0x80095bc14472b783,1 +np.float64,0xffef0cb4553e1968,0xbff921fb54442d18,1 +np.float64,0xe3e60567c7cc1,0xe3e60567c7cc1,1 +np.float64,0xffde919f06bd233e,0xbff921fb54442d18,1 +np.float64,0x3fe3f9632e27f2c6,0x3fe1db49ebd21c4e,1 +np.float64,0x9dee9a233bdd4,0x9dee9a233bdd4,1 +np.float64,0xbfe3bb0602e7760c,0xbfe1ae41b6d4c488,1 +np.float64,0x3fc46945a128d288,0x3fc43da54c6c6a2a,1 +np.float64,0x7fdef149ac3de292,0x3ff921fb54442d18,1 +np.float64,0x800a96c76d752d8f,0x800a96c76d752d8f,1 +np.float64,0x3f971a32382e3464,0x3f9719316b9e9baf,1 +np.float64,0x7fe97bcf15b2f79d,0x3ff921fb54442d18,1 +np.float64,0x7fea894558f5128a,0x3ff921fb54442d18,1 +np.float64,0x3fc9e3be1933c780,0x3fc98b847c3923eb,1 +np.float64,0x3f7accac40359959,0x3f7acc9330741b64,1 +np.float64,0xa80c136950183,0xa80c136950183,1 +np.float64,0x3fe408732b2810e6,0x3fe1e61e7cbc8824,1 +np.float64,0xffa775bc042eeb80,0xbff921fb54442d18,1 +np.float64,0x3fbf04bd223e0980,0x3fbede37b8fc697e,1 +np.float64,0x7fd999b34c333366,0x3ff921fb54442d18,1 +np.float64,0xe72146dfce429,0xe72146dfce429,1 +np.float64,0x4f511ee49ea24,0x4f511ee49ea24,1 +np.float64,0xffb3e6e58827cdc8,0xbff921fb54442d18,1 +np.float64,0x3fd1f180cfa3e300,0x3fd17e85b2871de2,1 +np.float64,0x97c8e45b2f91d,0x97c8e45b2f91d,1 +np.float64,0xbfeeb20e88fd641d,0xbfe8778f878440bf,1 +np.float64,0xbfe1fc6dee23f8dc,0xbfe062c815a93cde,1 +np.float64,0xab4bf71f5697f,0xab4bf71f5697f,1 +np.float64,0xa9675a2952cec,0xa9675a2952cec,1 +np.float64,0xbfef3ea4a33e7d49,0xbfe8c02743ebc1b6,1 +np.float64,0x3fe22a2eafa4545d,0x3fe08577afca52a9,1 +np.float64,0x3fe8a08daaf1411c,0x3fe4fd5a34f05305,1 +np.float64,0xbfc6cda77b2d9b50,0xbfc6910bcfa0cf4f,1 +np.float64,0x3fec398394387307,0x3fe7211dd5276500,1 +np.float64,0x3fe36c95c626d92c,0x3fe1752e5aa2357b,1 +np.float64,0xffd8b9e7073173ce,0xbff921fb54442d18,1 +np.float64,0xffe19f043ae33e08,0xbff921fb54442d18,1 +np.float64,0x800e3640709c6c81,0x800e3640709c6c81,1 +np.float64,0x3fe7d6c20aafad84,0x3fe47d1a3307d9c8,1 +np.float64,0x80093fd63b727fad,0x80093fd63b727fad,1 +np.float64,0xffe1a671a4634ce3,0xbff921fb54442d18,1 +np.float64,0xbfe53a6b386a74d6,0xbfe2be41859cb10d,1 +np.float64,0xbfed149a097a2934,0xbfe79ab7e3e93c1c,1 +np.float64,0x7fc2769a5724ed34,0x3ff921fb54442d18,1 +np.float64,0xffd01e4e99a03c9e,0xbff921fb54442d18,1 +np.float64,0xa61f38434c3e7,0xa61f38434c3e7,1 +np.float64,0x800ad4ac5195a959,0x800ad4ac5195a959,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x80034a45b6c6948c,0x80034a45b6c6948c,1 +np.float64,0x6350b218c6a17,0x6350b218c6a17,1 +np.float64,0xfff0000000000000,0xbff921fb54442d18,1 +np.float64,0x3fe363e759e6c7cf,0x3fe16ed58d80f9ce,1 +np.float64,0xffe3b98e59e7731c,0xbff921fb54442d18,1 +np.float64,0x3fdbf7b40337ef68,0x3fda5df7ad3c80f9,1 +np.float64,0xbfe9cdf784739bef,0xbfe5b74f346ef93d,1 +np.float64,0xbfc321bea326437c,0xbfc2fdc0d4ff7561,1 +np.float64,0xbfe40f77d2a81ef0,0xbfe1eb28c4ae4dde,1 +np.float64,0x7fe071806960e300,0x3ff921fb54442d18,1 +np.float64,0x7fd269006ea4d200,0x3ff921fb54442d18,1 +np.float64,0x80017a56e0e2f4af,0x80017a56e0e2f4af,1 +np.float64,0x8004b4ea09a969d5,0x8004b4ea09a969d5,1 +np.float64,0xbfedbb01e63b7604,0xbfe7f4f0e84297df,1 +np.float64,0x3fe44454826888a9,0x3fe210ff6d005706,1 +np.float64,0xbfe0e77e6ea1cefd,0xbfdf1a977da33402,1 +np.float64,0xbfed6d4c8c3ada99,0xbfe7cb0932093f60,1 +np.float64,0x1d74cb9e3ae9a,0x1d74cb9e3ae9a,1 +np.float64,0x80082a785d1054f1,0x80082a785d1054f1,1 +np.float64,0x3fe58393266b0726,0x3fe2f0d8e91d4887,1 +np.float64,0xffe4028899680510,0xbff921fb54442d18,1 +np.float64,0x783a2e5af0746,0x783a2e5af0746,1 +np.float64,0x7fcdce88e73b9d11,0x3ff921fb54442d18,1 +np.float64,0x3fc58672a72b0ce5,0x3fc5535e090e56e2,1 +np.float64,0x800889c839b11391,0x800889c839b11391,1 +np.float64,0xffe5e05c466bc0b8,0xbff921fb54442d18,1 +np.float64,0xbfcbef6ebe37dedc,0xbfcb810752468f49,1 +np.float64,0xffe9408563b2810a,0xbff921fb54442d18,1 +np.float64,0xbfee4738367c8e70,0xbfe83f8e5dd7602f,1 +np.float64,0xbfe4aeb587295d6b,0xbfe25c7a0c76a454,1 +np.float64,0xffc9aea0a7335d40,0xbff921fb54442d18,1 +np.float64,0xe1e02199c3c04,0xe1e02199c3c04,1 +np.float64,0xbfbd9400783b2800,0xbfbd729345d1d14f,1 +np.float64,0x7a5418bcf4a84,0x7a5418bcf4a84,1 +np.float64,0x3fdc1c2fa5b83860,0x3fda7c935965ae72,1 +np.float64,0x80076a9f58ced53f,0x80076a9f58ced53f,1 +np.float64,0x3fedc4bf957b897f,0x3fe7fa2a83148f1c,1 +np.float64,0x800981b8a9d30372,0x800981b8a9d30372,1 +np.float64,0xffe1082311621046,0xbff921fb54442d18,1 +np.float64,0xe0091f89c0124,0xe0091f89c0124,1 +np.float64,0xbfce8d674f3d1ad0,0xbfcdfdbf2ddaa0ca,1 +np.float64,0x800516e72eaa2dcf,0x800516e72eaa2dcf,1 +np.float64,0xffe61ee64c6c3dcc,0xbff921fb54442d18,1 +np.float64,0x7fed2683cafa4d07,0x3ff921fb54442d18,1 +np.float64,0xffd4faf27729f5e4,0xbff921fb54442d18,1 +np.float64,0x7fe308fa842611f4,0x3ff921fb54442d18,1 +np.float64,0x3fc612a62b2c2550,0x3fc5db9ddbd4e159,1 +np.float64,0xbfe5b01e766b603d,0xbfe30f72a875e988,1 +np.float64,0x3fc2dd8b9a25bb17,0x3fc2bb06246b9f78,1 +np.float64,0x8170908102e12,0x8170908102e12,1 +np.float64,0x800c1c8a8a583915,0x800c1c8a8a583915,1 +np.float64,0xffe5d91e8b6bb23c,0xbff921fb54442d18,1 +np.float64,0xffd140adee22815c,0xbff921fb54442d18,1 +np.float64,0xbfe2f1f5f8e5e3ec,0xbfe11afa5d749952,1 +np.float64,0xbfed6d1d587ada3b,0xbfe7caef9ecf7651,1 +np.float64,0x3fe9b85e67f370bd,0x3fe5aa3474768982,1 +np.float64,0x7fdc8932edb91265,0x3ff921fb54442d18,1 +np.float64,0x7fd136bc54a26d78,0x3ff921fb54442d18,1 +np.float64,0x800a1ea12a343d43,0x800a1ea12a343d43,1 +np.float64,0x3fec6a5c1b78d4b8,0x3fe73c82235c3f8f,1 +np.float64,0x800fbf6a00df7ed4,0x800fbf6a00df7ed4,1 +np.float64,0xbfd0e6e0cda1cdc2,0xbfd0864bf8cad294,1 +np.float64,0x3fc716df482e2dbf,0x3fc6d7fbfd4a8470,1 +np.float64,0xbfe75990936eb321,0xbfe42bffec3fa0d7,1 +np.float64,0x3fd58e54a02b1ca9,0x3fd4cace1107a5cc,1 +np.float64,0xbfc9c04136338084,0xbfc9696ad2591d54,1 +np.float64,0xdd1f0147ba3e0,0xdd1f0147ba3e0,1 +np.float64,0x5c86a940b90e,0x5c86a940b90e,1 +np.float64,0xbfecae3b8e795c77,0xbfe7624d4988c612,1 +np.float64,0xffd0370595206e0c,0xbff921fb54442d18,1 +np.float64,0xbfdc26d443384da8,0xbfda857ecd33ba9f,1 +np.float64,0xbfd1c849d9a39094,0xbfd15849449cc378,1 +np.float64,0xffee04acdb3c0959,0xbff921fb54442d18,1 +np.float64,0xbfded1056dbda20a,0xbfdcb83b30e1528c,1 +np.float64,0x7fb7b826622f704c,0x3ff921fb54442d18,1 +np.float64,0xbfee4df8ae7c9bf1,0xbfe8431df9dfd05d,1 +np.float64,0x7fe7f3670e2fe6cd,0x3ff921fb54442d18,1 +np.float64,0x8008ac9ae0d15936,0x8008ac9ae0d15936,1 +np.float64,0x800dce9f3b3b9d3f,0x800dce9f3b3b9d3f,1 +np.float64,0x7fbb19db203633b5,0x3ff921fb54442d18,1 +np.float64,0x3fe56c7f302ad8fe,0x3fe2e0eec3ad45fd,1 +np.float64,0x7fe82c05c570580b,0x3ff921fb54442d18,1 +np.float64,0xc0552b7780aa6,0xc0552b7780aa6,1 +np.float64,0x39d40e3073a83,0x39d40e3073a83,1 +np.float64,0x3fd8db54d731b6aa,0x3fd7b589b3ee9b20,1 +np.float64,0xffcdd355233ba6ac,0xbff921fb54442d18,1 +np.float64,0x3fbe97b3a43d2f67,0x3fbe72bca9be0348,1 +np.float64,0xbff0000000000000,0xbfe921fb54442d18,1 +np.float64,0xbfb4f55e6229eac0,0xbfb4e96df18a75a7,1 +np.float64,0xbfc66399ba2cc734,0xbfc62a3298bd96fc,1 +np.float64,0x3fd00988bb201311,0x3fcf6d67a9374c38,1 +np.float64,0x7fe471867d28e30c,0x3ff921fb54442d18,1 +np.float64,0xbfe38e0e64271c1d,0xbfe18d9888b7523b,1 +np.float64,0x8009dc127573b825,0x8009dc127573b825,1 +np.float64,0x800047bde4608f7d,0x800047bde4608f7d,1 +np.float64,0xffeede42c77dbc85,0xbff921fb54442d18,1 +np.float64,0xd8cf6d13b19ee,0xd8cf6d13b19ee,1 +np.float64,0xbfd08fb302a11f66,0xbfd034b1f8235e23,1 +np.float64,0x7fdb404c0b368097,0x3ff921fb54442d18,1 +np.float64,0xbfd6ba0438ad7408,0xbfd5d673e3276ec1,1 +np.float64,0xffd9568027b2ad00,0xbff921fb54442d18,1 +np.float64,0xbfb313b73e262770,0xbfb30ab4acb4fa67,1 +np.float64,0xbfe2dc1a15e5b834,0xbfe10ac5f8f3acd3,1 +np.float64,0xbfee426bf4bc84d8,0xbfe83d061df91edd,1 +np.float64,0xd9142c2fb2286,0xd9142c2fb2286,1 +np.float64,0x7feb0d11dff61a23,0x3ff921fb54442d18,1 +np.float64,0x800fea5b509fd4b7,0x800fea5b509fd4b7,1 +np.float64,0x3fe1a8818da35103,0x3fe022ba1bdf366e,1 +np.float64,0x8010000000000000,0x8010000000000000,1 +np.float64,0xbfd8fc6de6b1f8dc,0xbfd7d24726ed8dcc,1 +np.float64,0xf4b3dc2de967c,0xf4b3dc2de967c,1 +np.float64,0x8af0409b15e08,0x8af0409b15e08,1 +np.float64,0x3fb21e6934243cd2,0x3fb216b065f8709a,1 +np.float64,0x3fc53069392a60d2,0x3fc4ffa931211fb9,1 +np.float64,0xffc955812c32ab04,0xbff921fb54442d18,1 +np.float64,0xbfe3de42b1a7bc86,0xbfe1c7bd1324de75,1 +np.float64,0x1dc149a03b82a,0x1dc149a03b82a,1 +np.float64,0x8001bc5a24a378b5,0x8001bc5a24a378b5,1 +np.float64,0x3da14c407b44,0x3da14c407b44,1 +np.float64,0x80025e8da924bd1c,0x80025e8da924bd1c,1 +np.float64,0xbfcb0141c9360284,0xbfca9d572ea5e1f3,1 +np.float64,0xc90036fd92007,0xc90036fd92007,1 +np.float64,0x138312c427063,0x138312c427063,1 +np.float64,0x800dda3a963bb475,0x800dda3a963bb475,1 +np.float64,0x3fe9339934f26732,0x3fe558e723291f78,1 +np.float64,0xbfea8357027506ae,0xbfe6240826faaf48,1 +np.float64,0x7fe04735cae08e6b,0x3ff921fb54442d18,1 +np.float64,0x3fe29aca3c653594,0x3fe0da214c8bc6a4,1 +np.float64,0x3fbe1f09a03c3e13,0x3fbdfbbefef0155b,1 +np.float64,0x816ee4ad02ddd,0x816ee4ad02ddd,1 +np.float64,0xffddd1b31d3ba366,0xbff921fb54442d18,1 +np.float64,0x3fe2e01e0625c03c,0x3fe10dc0bd6677c2,1 +np.float64,0x3fec6bcf1978d79e,0x3fe73d518cddeb7c,1 +np.float64,0x7fe01aaaf8603555,0x3ff921fb54442d18,1 +np.float64,0xdf300cc5be602,0xdf300cc5be602,1 +np.float64,0xbfe71c01a36e3804,0xbfe403af80ce47b8,1 +np.float64,0xffa5be00ac2b7c00,0xbff921fb54442d18,1 +np.float64,0xbfda9ba711b5374e,0xbfd93775e3ac6bda,1 +np.float64,0xbfe56d8a27eadb14,0xbfe2e1a7185e8e6d,1 +np.float64,0x800f1bc937be3792,0x800f1bc937be3792,1 +np.float64,0x800a61d93c74c3b3,0x800a61d93c74c3b3,1 +np.float64,0x7fe71a52fcae34a5,0x3ff921fb54442d18,1 +np.float64,0x7fb4aef256295de4,0x3ff921fb54442d18,1 +np.float64,0x3fe6c1e861ed83d1,0x3fe3c828f281a7ef,1 +np.float64,0x3fba128402342508,0x3fb9fb94cf141860,1 +np.float64,0x3fee55a7ecfcab50,0x3fe8472a9af893ee,1 +np.float64,0x3fe586f31b2b0de6,0x3fe2f32bce9e91bc,1 +np.float64,0xbfbb1d1442363a28,0xbfbb034c7729d5f2,1 +np.float64,0xc78b4d3f8f16a,0xc78b4d3f8f16a,1 +np.float64,0x7fdbc277d4b784ef,0x3ff921fb54442d18,1 +np.float64,0xbfa728ca2c2e5190,0xbfa724c04e73ccbd,1 +np.float64,0x7fefc7b2143f8f63,0x3ff921fb54442d18,1 +np.float64,0x3fd153a3dda2a748,0x3fd0ebccd33a4dca,1 +np.float64,0xbfe18a6eace314de,0xbfe00ba32ec89d30,1 +np.float64,0x7feef518537dea30,0x3ff921fb54442d18,1 +np.float64,0x8005f007cd4be010,0x8005f007cd4be010,1 +np.float64,0x7fd890b840b12170,0x3ff921fb54442d18,1 +np.float64,0x7feed0582ebda0af,0x3ff921fb54442d18,1 +np.float64,0x1013f53220280,0x1013f53220280,1 +np.float64,0xbfe77273986ee4e7,0xbfe43c375a8bf6de,1 +np.float64,0x7fe3ab8918675711,0x3ff921fb54442d18,1 +np.float64,0xbfc6ad515b2d5aa4,0xbfc671b2f7f86624,1 +np.float64,0x7fcd86231d3b0c45,0x3ff921fb54442d18,1 +np.float64,0xffe2523299a4a464,0xbff921fb54442d18,1 +np.float64,0x7fcadc5a1b35b8b3,0x3ff921fb54442d18,1 +np.float64,0x3fe5e020c4ebc042,0x3fe330418eec75bd,1 +np.float64,0x7fe332a9dc266553,0x3ff921fb54442d18,1 +np.float64,0xfa11dc21f425,0xfa11dc21f425,1 +np.float64,0xbec800177d900,0xbec800177d900,1 +np.float64,0x3fcadd057835ba0b,0x3fca7aa42face8bc,1 +np.float64,0xbfe6b9a206ad7344,0xbfe3c2a9719803de,1 +np.float64,0x3fbb4250b63684a0,0x3fbb281e9cefc519,1 +np.float64,0x7fef8787517f0f0e,0x3ff921fb54442d18,1 +np.float64,0x8001315c2d6262b9,0x8001315c2d6262b9,1 +np.float64,0xbfd94e3cf2b29c7a,0xbfd819257d36f56c,1 +np.float64,0xf1f325abe3e65,0xf1f325abe3e65,1 +np.float64,0x7fd6c07079ad80e0,0x3ff921fb54442d18,1 +np.float64,0x7fe328b075a65160,0x3ff921fb54442d18,1 +np.float64,0x7fe7998f812f331e,0x3ff921fb54442d18,1 +np.float64,0xffe026bb65604d76,0xbff921fb54442d18,1 +np.float64,0xffd6c06de8ad80dc,0xbff921fb54442d18,1 +np.float64,0x3fcd5a37bf3ab46f,0x3fccda82935d98ce,1 +np.float64,0xffc3e5a45227cb48,0xbff921fb54442d18,1 +np.float64,0x3febf7dd8177efbc,0x3fe6fc0bb999883e,1 +np.float64,0x7fd7047ea92e08fc,0x3ff921fb54442d18,1 +np.float64,0x35b3fc406b680,0x35b3fc406b680,1 +np.float64,0x7fd52e97632a5d2e,0x3ff921fb54442d18,1 +np.float64,0x3fd464d401a8c9a8,0x3fd3be2967fc97c3,1 +np.float64,0x800e815b2ebd02b6,0x800e815b2ebd02b6,1 +np.float64,0x3fca8428af350850,0x3fca257b466b8970,1 +np.float64,0x8007b7526f6f6ea6,0x8007b7526f6f6ea6,1 +np.float64,0x82f60a8f05ec2,0x82f60a8f05ec2,1 +np.float64,0x3fb71a5d0a2e34c0,0x3fb70a629ef8e2a2,1 +np.float64,0x7fc8570c7d30ae18,0x3ff921fb54442d18,1 +np.float64,0x7fe5528e77eaa51c,0x3ff921fb54442d18,1 +np.float64,0xffc20dbbf1241b78,0xbff921fb54442d18,1 +np.float64,0xeb13368fd6267,0xeb13368fd6267,1 +np.float64,0x7fe7d529056faa51,0x3ff921fb54442d18,1 +np.float64,0x3fecd02eabf9a05d,0x3fe77516f0ba1ac4,1 +np.float64,0x800fcba6a09f974d,0x800fcba6a09f974d,1 +np.float64,0x7fe7e8e015afd1bf,0x3ff921fb54442d18,1 +np.float64,0xbfd271a382a4e348,0xbfd1f513a191c595,1 +np.float64,0x9f1014013e21,0x9f1014013e21,1 +np.float64,0x3fc05da47f20bb49,0x3fc04708a13a3a47,1 +np.float64,0x3fe0f427dda1e850,0x3fdf2e60ba8678b9,1 +np.float64,0xbfecb29fa539653f,0xbfe764bc791c45dd,1 +np.float64,0x45881ec68b104,0x45881ec68b104,1 +np.float64,0x8000000000000001,0x8000000000000001,1 +np.float64,0x3fe9c67ee1338cfe,0x3fe5b2c7b3df6ce8,1 +np.float64,0x7fedb8fef6bb71fd,0x3ff921fb54442d18,1 +np.float64,0x3fe54f6aaaea9ed6,0x3fe2ccd1df2abaa9,1 +np.float64,0x7feff58a1bbfeb13,0x3ff921fb54442d18,1 +np.float64,0x7fe3b62827276c4f,0x3ff921fb54442d18,1 +np.float64,0x3fe5feb682ebfd6d,0x3fe345105bc6d980,1 +np.float64,0x3fe49f38d9693e72,0x3fe2518b2824757f,1 +np.float64,0x8006bfd27c6d7fa6,0x8006bfd27c6d7fa6,1 +np.float64,0x3fc13409e2226814,0x3fc119ce0c01a5a2,1 +np.float64,0x95f8c7212bf19,0x95f8c7212bf19,1 +np.float64,0x3fd9f0fa6133e1f5,0x3fd8a567515edecf,1 +np.float64,0x3fef95cbe5ff2b98,0x3fe8ec88c768ba0b,1 +np.float64,0x3fbed28bba3da510,0x3fbeacbf136e51c2,1 +np.float64,0xbfd3987aeca730f6,0xbfd303fca58e3e60,1 +np.float64,0xbfed0f90cbfa1f22,0xbfe797f59249410d,1 +np.float64,0xffe55d8cbf2abb19,0xbff921fb54442d18,1 +np.float64,0x3feb4d9fc6769b40,0x3fe69a88131a1f1f,1 +np.float64,0x80085569acd0aad4,0x80085569acd0aad4,1 +np.float64,0x20557a6e40ab0,0x20557a6e40ab0,1 +np.float64,0x3fead2fd5df5a5fb,0x3fe653091f33b27f,1 +np.float64,0x3fe7b9983eaf7330,0x3fe46a50c4b5235e,1 +np.float64,0xffdad237ffb5a470,0xbff921fb54442d18,1 +np.float64,0xbfe5cc39a4eb9874,0xbfe322ad3a903f93,1 +np.float64,0x800ad6eecb35adde,0x800ad6eecb35adde,1 +np.float64,0xffec620f6438c41e,0xbff921fb54442d18,1 +np.float64,0xbfe5ef29122bde52,0xbfe33a7dfcc255e2,1 +np.float64,0x3fd451e7d0a8a3d0,0x3fd3acfa4939af10,1 +np.float64,0x8003ea93c127d528,0x8003ea93c127d528,1 +np.float64,0x800b48d37c9691a7,0x800b48d37c9691a7,1 +np.float64,0x3fe7e202acafc405,0x3fe484558246069b,1 +np.float64,0x80070c9b686e1938,0x80070c9b686e1938,1 +np.float64,0xbfda90bbc6352178,0xbfd92e25fcd12288,1 +np.float64,0x800e1ffebb1c3ffe,0x800e1ffebb1c3ffe,1 +np.float64,0x3ff0000000000000,0x3fe921fb54442d18,1 +np.float64,0xffd8cfdd46319fba,0xbff921fb54442d18,1 +np.float64,0x7fd8cd4182319a82,0x3ff921fb54442d18,1 +np.float64,0x3fed8bb778bb176f,0x3fe7db7c77c4c694,1 +np.float64,0x3fc74a70302e94e0,0x3fc709e95d6defec,1 +np.float64,0x3fe87269d070e4d4,0x3fe4e04bcc4a2137,1 +np.float64,0x7fb48223f6290447,0x3ff921fb54442d18,1 +np.float64,0xffe8ec444b71d888,0xbff921fb54442d18,1 +np.float64,0x7fde17d280bc2fa4,0x3ff921fb54442d18,1 +np.float64,0x3fd1cbde01a397bc,0x3fd15b9bb7b3147b,1 +np.float64,0x800883a64451074d,0x800883a64451074d,1 +np.float64,0x7fe3160a3f262c13,0x3ff921fb54442d18,1 +np.float64,0xbfe051d4d9a0a3aa,0xbfde2ecf14dc75fb,1 +np.float64,0xbfd89de689b13bce,0xbfd780176d1a28a3,1 +np.float64,0x3fecde2bf779bc58,0x3fe77ccf10bdd8e2,1 +np.float64,0xffe75774dc6eaee9,0xbff921fb54442d18,1 +np.float64,0x7fe834414d706882,0x3ff921fb54442d18,1 +np.float64,0x1,0x1,1 +np.float64,0xbfea5e4e4a74bc9c,0xbfe60e0601711835,1 +np.float64,0xffec248d4cb8491a,0xbff921fb54442d18,1 +np.float64,0xffd9942c2c332858,0xbff921fb54442d18,1 +np.float64,0xa9db36a553b67,0xa9db36a553b67,1 +np.float64,0x7fec630718b8c60d,0x3ff921fb54442d18,1 +np.float64,0xbfd062188f20c432,0xbfd009ecd652be89,1 +np.float64,0x8001b84e3023709d,0x8001b84e3023709d,1 +np.float64,0xbfe9e26d7cb3c4db,0xbfe5c3b157ecf668,1 +np.float64,0xbfef66ddf33ecdbc,0xbfe8d4b1f6410a24,1 +np.float64,0x3fd8d7109431ae21,0x3fd7b1d4860719a2,1 +np.float64,0xffee0f53107c1ea5,0xbff921fb54442d18,1 +np.float64,0x80000b4fd60016a0,0x80000b4fd60016a0,1 +np.float64,0xbfd99ff6e5333fee,0xbfd85fb3cbdaa049,1 +np.float64,0xbfe9cfd268339fa5,0xbfe5b86ef021a1b1,1 +np.float64,0xe32eace1c65d6,0xe32eace1c65d6,1 +np.float64,0xffc81f6627303ecc,0xbff921fb54442d18,1 +np.float64,0x7fe98dadde331b5b,0x3ff921fb54442d18,1 +np.float64,0xbfbcebd11e39d7a0,0xbfbccc8ec47883c7,1 +np.float64,0x7fe164880f22c90f,0x3ff921fb54442d18,1 +np.float64,0x800467c0cae8cf82,0x800467c0cae8cf82,1 +np.float64,0x800071e4b140e3ca,0x800071e4b140e3ca,1 +np.float64,0xbfc87a7eae30f4fc,0xbfc82fbc55bb0f24,1 +np.float64,0xffb2e0e23225c1c8,0xbff921fb54442d18,1 +np.float64,0x20ef338041df,0x20ef338041df,1 +np.float64,0x7fe6de71ca6dbce3,0x3ff921fb54442d18,1 +np.float64,0x5d1fa026ba3f5,0x5d1fa026ba3f5,1 +np.float64,0xffd112a9ce222554,0xbff921fb54442d18,1 +np.float64,0x3fb351f66626a3ed,0x3fb3489ab578c452,1 +np.float64,0x7fef7b2bd3bef657,0x3ff921fb54442d18,1 +np.float64,0xffe144f5d4e289eb,0xbff921fb54442d18,1 +np.float64,0xffd63a6750ac74ce,0xbff921fb54442d18,1 +np.float64,0x7fd2d8bb25a5b175,0x3ff921fb54442d18,1 +np.float64,0x3fec5920a078b242,0x3fe732dcffcf6521,1 +np.float64,0x80009a8b7f813518,0x80009a8b7f813518,1 +np.float64,0x3fdea220893d4441,0x3fdc921edf6bf3d8,1 +np.float64,0x8006cee2208d9dc5,0x8006cee2208d9dc5,1 +np.float64,0xdd0b0081ba17,0xdd0b0081ba17,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xbfdac33955358672,0xbfd9592bce7daf1f,1 +np.float64,0x7fe8301d7170603a,0x3ff921fb54442d18,1 +np.float64,0xbfc1d34d8523a69c,0xbfc1b62449af9684,1 +np.float64,0x800c62239458c447,0x800c62239458c447,1 +np.float64,0xffd398c009a73180,0xbff921fb54442d18,1 +np.float64,0xbfe0c6d9ee218db4,0xbfdee777557f4401,1 +np.float64,0x3feccdd373799ba7,0x3fe773c9c2263f89,1 +np.float64,0xbfd21898bda43132,0xbfd1a2be8545fcc5,1 +np.float64,0x3fd77019b62ee033,0x3fd67793cabdf267,1 +np.float64,0x7fa609cad42c1395,0x3ff921fb54442d18,1 +np.float64,0x7fb4eaea5a29d5d4,0x3ff921fb54442d18,1 +np.float64,0x3fc570dc9a2ae1b9,0x3fc53e5f6218a799,1 +np.float64,0x800344ae8466895e,0x800344ae8466895e,1 +np.float64,0xbfc7c985252f930c,0xbfc784d60fa27bac,1 +np.float64,0xffaa2929fc345250,0xbff921fb54442d18,1 +np.float64,0xffe63e5ee9ac7cbe,0xbff921fb54442d18,1 +np.float64,0x73f0280ce7e06,0x73f0280ce7e06,1 +np.float64,0xffc525f8822a4bf0,0xbff921fb54442d18,1 +np.float64,0x7fd744d00aae899f,0x3ff921fb54442d18,1 +np.float64,0xbfe0fe590761fcb2,0xbfdf3e493e8b1f32,1 +np.float64,0xfae04ae7f5c0a,0xfae04ae7f5c0a,1 +np.float64,0xef821939df043,0xef821939df043,1 +np.float64,0x7fef6135843ec26a,0x3ff921fb54442d18,1 +np.float64,0xbfebf34dcbf7e69c,0xbfe6f97588a8f911,1 +np.float64,0xbfeec0b498fd8169,0xbfe87f2eceeead12,1 +np.float64,0x7fb67161b42ce2c2,0x3ff921fb54442d18,1 +np.float64,0x3fdcfd998639fb33,0x3fdb38934927c096,1 +np.float64,0xffda5960bc34b2c2,0xbff921fb54442d18,1 +np.float64,0xbfe11f8c71223f19,0xbfdf71fe770c96ab,1 +np.float64,0x3fe4ac1bab695838,0x3fe25aa4517b8322,1 +np.float64,0x3f730458a02608b1,0x3f73044fabb5e999,1 +np.float64,0x3fdb14ffcdb62a00,0x3fd99ea6c241a3ed,1 +np.float64,0xbfc93208cd326410,0xbfc8e09d78b6d4db,1 +np.float64,0x19e734dc33ce8,0x19e734dc33ce8,1 +np.float64,0x3fe5e98428abd308,0x3fe336a6a085eb55,1 +np.float64,0x7fec672a1378ce53,0x3ff921fb54442d18,1 +np.float64,0x800f8bd8d4ff17b2,0x800f8bd8d4ff17b2,1 +np.float64,0xbfe5a12e4e6b425c,0xbfe30533f99d5d06,1 +np.float64,0x75a34cb0eb46a,0x75a34cb0eb46a,1 +np.float64,0x7fe1d21d16a3a439,0x3ff921fb54442d18,1 +np.float64,0x7ff0000000000000,0x3ff921fb54442d18,1 +np.float64,0xffe0f50db261ea1b,0xbff921fb54442d18,1 +np.float64,0xbfd9dc22feb3b846,0xbfd8937ec965a501,1 +np.float64,0x8009d68e48d3ad1d,0x8009d68e48d3ad1d,1 +np.float64,0xbfe2eba620e5d74c,0xbfe1164d7d273c60,1 +np.float64,0x992efa09325e0,0x992efa09325e0,1 +np.float64,0x3fdab640ea356c82,0x3fd94e20cab88db2,1 +np.float64,0x69a6f04ad34df,0x69a6f04ad34df,1 +np.float64,0x3fe397df25272fbe,0x3fe194bd1a3a6192,1 +np.float64,0xebcce9fdd799d,0xebcce9fdd799d,1 +np.float64,0x3fbb49490c369292,0x3fbb2f02eccc497d,1 +np.float64,0xffd871f980b0e3f4,0xbff921fb54442d18,1 +np.float64,0x800348f6966691ee,0x800348f6966691ee,1 +np.float64,0xbfebc270a7f784e1,0xbfe6dda8d0d80f26,1 +np.float64,0xffd6d559b1adaab4,0xbff921fb54442d18,1 +np.float64,0x3fec3635c0b86c6c,0x3fe71f420256e43e,1 +np.float64,0x7fbc82ad7039055a,0x3ff921fb54442d18,1 +np.float64,0x7f873050602e60a0,0x3ff921fb54442d18,1 +np.float64,0x3fca44b8c3348970,0x3fc9e8a1a1a2d96e,1 +np.float64,0x3fe0fc308fe1f861,0x3fdf3aeb469ea225,1 +np.float64,0x7fefc27de8bf84fb,0x3ff921fb54442d18,1 +np.float64,0x8005f3f3916be7e8,0x8005f3f3916be7e8,1 +np.float64,0xbfd4278c7c284f18,0xbfd38678988873b6,1 +np.float64,0x435eafc486bd7,0x435eafc486bd7,1 +np.float64,0xbfd01f5199203ea4,0xbfcf96631f2108a3,1 +np.float64,0xffd5ee9185abdd24,0xbff921fb54442d18,1 +np.float64,0xffedb363257b66c5,0xbff921fb54442d18,1 +np.float64,0x800d68e6e11ad1ce,0x800d68e6e11ad1ce,1 +np.float64,0xbfcf687f8e3ed100,0xbfceccb771b0d39a,1 +np.float64,0x7feb3b9ef2f6773d,0x3ff921fb54442d18,1 +np.float64,0x3fe15ec5ca62bd8c,0x3fdfd3fab9d96f81,1 +np.float64,0x10000000000000,0x10000000000000,1 +np.float64,0xd2386f81a470e,0xd2386f81a470e,1 +np.float64,0xb9feed4573fde,0xb9feed4573fde,1 +np.float64,0x3fe7ed25c9efda4c,0x3fe48b7b72db4014,1 +np.float64,0xbfe01478726028f1,0xbfddcd1f5a2efc59,1 +np.float64,0x9946d02f328da,0x9946d02f328da,1 +np.float64,0xbfe3bb67f06776d0,0xbfe1ae88aa81c5a6,1 +np.float64,0xbfd3fd8a4c27fb14,0xbfd3603982e3b78d,1 +np.float64,0xffd5c3ab912b8758,0xbff921fb54442d18,1 +np.float64,0xffd5f502b12bea06,0xbff921fb54442d18,1 +np.float64,0xbfc64981ec2c9304,0xbfc610e0382b1fa6,1 +np.float64,0xffec42e3413885c6,0xbff921fb54442d18,1 +np.float64,0x80084eb4ed109d6a,0x80084eb4ed109d6a,1 +np.float64,0xbfd17cac9fa2f95a,0xbfd112020588a4b3,1 +np.float64,0xbfd06c1359a0d826,0xbfd0134a28aa9a66,1 +np.float64,0x7fdc3d7c03b87af7,0x3ff921fb54442d18,1 +np.float64,0x7bdf5aaaf7bec,0x7bdf5aaaf7bec,1 +np.float64,0xbfee3cd966fc79b3,0xbfe83a14bc07ac3b,1 +np.float64,0x7fec910da3f9221a,0x3ff921fb54442d18,1 +np.float64,0xffb4ea667029d4d0,0xbff921fb54442d18,1 +np.float64,0x800103d7cce207b0,0x800103d7cce207b0,1 +np.float64,0x7fbb229a6c364534,0x3ff921fb54442d18,1 +np.float64,0x0,0x0,1 +np.float64,0xffd8fccd0331f99a,0xbff921fb54442d18,1 +np.float64,0xbfd0784ae1a0f096,0xbfd01ebff62e39ad,1 +np.float64,0xbfed2ec9b3ba5d93,0xbfe7a9099410bc76,1 +np.float64,0x800690b8d16d2172,0x800690b8d16d2172,1 +np.float64,0x7fc061b26520c364,0x3ff921fb54442d18,1 +np.float64,0x8007ec47054fd88f,0x8007ec47054fd88f,1 +np.float64,0x775546b6eeaa9,0x775546b6eeaa9,1 +np.float64,0x8005e00fb56bc020,0x8005e00fb56bc020,1 +np.float64,0xbfe510f8d0ea21f2,0xbfe2a16862b5a37f,1 +np.float64,0xffd87a6bf3b0f4d8,0xbff921fb54442d18,1 +np.float64,0x800906e3d0520dc8,0x800906e3d0520dc8,1 +np.float64,0x2296f000452f,0x2296f000452f,1 +np.float64,0xbfe3189fa2e63140,0xbfe1378c0e005be4,1 +np.float64,0xb4d2447f69a49,0xb4d2447f69a49,1 +np.float64,0xffd056a24a20ad44,0xbff921fb54442d18,1 +np.float64,0xbfe3b23fe4e76480,0xbfe1a7e5840fcbeb,1 +np.float64,0x80018ee270831dc6,0x80018ee270831dc6,1 +np.float64,0x800df89f245bf13e,0x800df89f245bf13e,1 +np.float64,0x3fee1409d7bc2814,0x3fe824779d133232,1 +np.float64,0xbfef8d81667f1b03,0xbfe8e85523620368,1 +np.float64,0xffd8a6519b314ca4,0xbff921fb54442d18,1 +np.float64,0x7fc7bc86f32f790d,0x3ff921fb54442d18,1 +np.float64,0xffea6159e674c2b3,0xbff921fb54442d18,1 +np.float64,0x3fe153c3fba2a788,0x3fdfc2f74769d300,1 +np.float64,0xffc4261ef3284c3c,0xbff921fb54442d18,1 +np.float64,0x7fe8a8961ff1512b,0x3ff921fb54442d18,1 +np.float64,0xbfe3fb1fd167f640,0xbfe1dc89dcb7ecdf,1 +np.float64,0x3fd88577c2b10af0,0x3fd76acc09660704,1 +np.float64,0x3fe128ec27e251d8,0x3fdf808fc7ebcd8f,1 +np.float64,0xbfed6ca7c4fad950,0xbfe7caafe9a3e213,1 +np.float64,0xbf9a3912b8347220,0xbf9a379b3349352e,1 +np.float64,0xbfd724d7bcae49b0,0xbfd6351efa2a5fc5,1 +np.float64,0xbfed59700a7ab2e0,0xbfe7c043014c694c,1 +np.float64,0x8002ad435bc55a87,0x8002ad435bc55a87,1 +np.float64,0xffe46ed345a8dda6,0xbff921fb54442d18,1 +np.float64,0x7fd2f1d1d825e3a3,0x3ff921fb54442d18,1 +np.float64,0xbfea0265e23404cc,0xbfe5d6fb3fd30464,1 +np.float64,0xbfd17e049122fc0a,0xbfd113421078bbae,1 +np.float64,0xffea03b986b40772,0xbff921fb54442d18,1 +np.float64,0x800b55331a16aa67,0x800b55331a16aa67,1 +np.float64,0xbfc6fcafbf2df960,0xbfc6be9ecd0ebc1f,1 +np.float64,0xd6a36017ad46c,0xd6a36017ad46c,1 +np.float64,0xbfe9ba86dfb3750e,0xbfe5ab840cb0ef86,1 +np.float64,0x75c4a108eb895,0x75c4a108eb895,1 +np.float64,0x8008d6bc8051ad79,0x8008d6bc8051ad79,1 +np.float64,0xbfd3dc5984a7b8b4,0xbfd341f78e0528ec,1 +np.float64,0xffe1cbb01aa39760,0xbff921fb54442d18,1 +np.float64,0x3fc7e292f52fc526,0x3fc79d0ce9365767,1 +np.float64,0xbfcbeae2bd37d5c4,0xbfcb7cb034f82467,1 +np.float64,0x8000f0c62e21e18d,0x8000f0c62e21e18d,1 +np.float64,0xbfe23d8bc6247b18,0xbfe09418ee35c3c7,1 +np.float64,0x717394bae2e73,0x717394bae2e73,1 +np.float64,0xffa2ef1cc425de40,0xbff921fb54442d18,1 +np.float64,0x3fd938c229b27184,0x3fd806900735c99d,1 +np.float64,0x800bf3ec8a77e7d9,0x800bf3ec8a77e7d9,1 +np.float64,0xffeef41dd57de83b,0xbff921fb54442d18,1 +np.float64,0x8008df97e5b1bf30,0x8008df97e5b1bf30,1 +np.float64,0xffe9ab9d0db35739,0xbff921fb54442d18,1 +np.float64,0x99ff391333fe7,0x99ff391333fe7,1 +np.float64,0x3fb864b4a630c969,0x3fb851e883ea2cf9,1 +np.float64,0x22c1230a45825,0x22c1230a45825,1 +np.float64,0xff2336fbfe467,0xff2336fbfe467,1 +np.float64,0xbfd488f4cea911ea,0xbfd3def0490f5414,1 +np.float64,0x3fa379c78426f38f,0x3fa377607370800b,1 +np.float64,0xbfb0873302210e68,0xbfb08155b78dfd53,1 +np.float64,0xbfdf9ff7c2bf3ff0,0xbfdd5f658e357ad2,1 +np.float64,0x800978719192f0e4,0x800978719192f0e4,1 +np.float64,0xbfba8759ea350eb0,0xbfba6f325013b9e5,1 +np.float64,0xbfdd3e6b06ba7cd6,0xbfdb6e472b6091b0,1 +np.float64,0x7fe0c334a7a18668,0x3ff921fb54442d18,1 +np.float64,0xbfeb971feb772e40,0xbfe6c4e0f61404d1,1 +np.float64,0x3fe2a50968e54a13,0x3fe0e1c8b8d96e85,1 +np.float64,0x800fa9c5515f538b,0x800fa9c5515f538b,1 +np.float64,0x800f8532fbbf0a66,0x800f8532fbbf0a66,1 +np.float64,0x167d6f1e2cfaf,0x167d6f1e2cfaf,1 +np.float64,0xffee88e769fd11ce,0xbff921fb54442d18,1 +np.float64,0xbfeecc8529fd990a,0xbfe885520cdad8ea,1 +np.float64,0xffefffffffffffff,0xbff921fb54442d18,1 +np.float64,0xbfef6a566afed4ad,0xbfe8d6767b4c4235,1 +np.float64,0xffec12415af82482,0xbff921fb54442d18,1 +np.float64,0x3678a20a6cf15,0x3678a20a6cf15,1 +np.float64,0xffe468d54ee8d1aa,0xbff921fb54442d18,1 +np.float64,0x800ad6006795ac01,0x800ad6006795ac01,1 +np.float64,0x8001d5b61063ab6d,0x8001d5b61063ab6d,1 +np.float64,0x800dfcd1863bf9a3,0x800dfcd1863bf9a3,1 +np.float64,0xc9fbff6f93f80,0xc9fbff6f93f80,1 +np.float64,0xffe55c20f9eab842,0xbff921fb54442d18,1 +np.float64,0xbfcb596b6536b2d8,0xbfcaf1b339c5c615,1 +np.float64,0xbfe092689ea124d1,0xbfde94fa58946e51,1 +np.float64,0x3fe9ec733af3d8e6,0x3fe5c9bf5dee2623,1 +np.float64,0x3fe30f3d83261e7b,0x3fe1309fd6620e03,1 +np.float64,0xffd31d7f84263b00,0xbff921fb54442d18,1 +np.float64,0xbfe88d2d3e711a5a,0xbfe4f12b5a136178,1 +np.float64,0xffc81e4ce1303c98,0xbff921fb54442d18,1 +np.float64,0xffe5b96ebfab72dd,0xbff921fb54442d18,1 +np.float64,0x512f0502a25e1,0x512f0502a25e1,1 +np.float64,0x7fa3a376982746ec,0x3ff921fb54442d18,1 +np.float64,0x80005b5f2f60b6bf,0x80005b5f2f60b6bf,1 +np.float64,0xc337cc69866fa,0xc337cc69866fa,1 +np.float64,0x3fe7719c4caee339,0x3fe43bab42b19e64,1 +np.float64,0x7fde7ec1d93cfd83,0x3ff921fb54442d18,1 +np.float64,0x3fd2f38f3825e71e,0x3fd26cc7b1dd0acb,1 +np.float64,0x7fce298b993c5316,0x3ff921fb54442d18,1 +np.float64,0x56ae3b2cad5c8,0x56ae3b2cad5c8,1 +np.float64,0x3fe9299f2bf2533e,0x3fe552bddd999e72,1 +np.float64,0x7feff3a4823fe748,0x3ff921fb54442d18,1 +np.float64,0xbfd05c670aa0b8ce,0xbfd00494d78e9e97,1 +np.float64,0xffe745323eae8a64,0xbff921fb54442d18,1 diff --git a/python/numpy/_core/tests/data/umath-validation-set-arctanh.csv b/python/numpy/_core/tests/data/umath-validation-set-arctanh.csv new file mode 100644 index 000000000..68ecaab37 --- /dev/null +++ b/python/numpy/_core/tests/data/umath-validation-set-arctanh.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0x3ee82930,0x3efa60fd,2 +np.float32,0x3f0aa640,0x3f1b3e13,2 +np.float32,0x3ec1a21c,0x3ecbbf8d,2 +np.float32,0x3cdb1740,0x3cdb24a1,2 +np.float32,0xbf28b6f3,0xbf4a86ac,2 +np.float32,0xbe490dcc,0xbe4bb2eb,2 +np.float32,0x80000001,0x80000001,2 +np.float32,0xbf44f9dd,0xbf826ce1,2 +np.float32,0xbf1d66c4,0xbf37786b,2 +np.float32,0x3f0ad26a,0x3f1b7c9b,2 +np.float32,0x3f7b6c54,0x4016aab0,2 +np.float32,0xbf715bb8,0xbfe1a0bc,2 +np.float32,0xbee8a562,0xbefafd6a,2 +np.float32,0x3db94d00,0x3db9cf16,2 +np.float32,0x3ee2970c,0x3ef368b3,2 +np.float32,0x3f3f8614,0x3f77fdca,2 +np.float32,0xbf1fb5f0,0xbf3b3789,2 +np.float32,0x3f798dc0,0x400b96bb,2 +np.float32,0x3e975d64,0x3e9c0573,2 +np.float32,0xbe3f1908,0xbe415d1f,2 +np.float32,0x3f2cea38,0x3f52192e,2 +np.float32,0x3e82f1ac,0x3e85eaa1,2 +np.float32,0x3eab6b30,0x3eb24acd,2 +np.float32,0xbe9bb90c,0xbea0cf5f,2 +np.float32,0xbf43e847,0xbf81202f,2 +np.float32,0xbd232fa0,0xbd2345c0,2 +np.float32,0xbbabbc00,0xbbabbc67,2 +np.float32,0xbf0b2975,0xbf1bf808,2 +np.float32,0xbef5ab0a,0xbf05d305,2 +np.float32,0x3f2cad16,0x3f51a8e2,2 +np.float32,0xbef75940,0xbf06eb08,2 +np.float32,0xbf0c1216,0xbf1d4325,2 +np.float32,0x3e7bdc08,0x3e8090c2,2 +np.float32,0x3da14e10,0x3da1a3c5,2 +np.float32,0x3f627412,0x3fb2bf21,2 +np.float32,0xbd6d08c0,0xbd6d4ca0,2 +np.float32,0x3f3e2368,0x3f74df8b,2 +np.float32,0xbe0df104,0xbe0edc77,2 +np.float32,0x3e8a265c,0x3e8da833,2 +np.float32,0xbdccdbb0,0xbdcd8ba8,2 +np.float32,0x3eb080c4,0x3eb80a44,2 +np.float32,0x3e627800,0x3e6645fe,2 +np.float32,0xbd8be0b0,0xbd8c1886,2 +np.float32,0xbf3282ac,0xbf5cae8c,2 +np.float32,0xbe515910,0xbe545707,2 +np.float32,0xbf2e64ac,0xbf54d637,2 +np.float32,0x3e0fc230,0x3e10b6de,2 +np.float32,0x3eb13ca0,0x3eb8df94,2 +np.float32,0x3f07a3ca,0x3f170572,2 +np.float32,0x3f2c7026,0x3f513935,2 +np.float32,0x3f3c4ec8,0x3f70d67c,2 +np.float32,0xbee9cce8,0xbefc724f,2 +np.float32,0xbe53ca60,0xbe56e3f3,2 +np.float32,0x3dd9e9a0,0x3ddabd98,2 +np.float32,0x3f38b8d4,0x3f69319b,2 +np.float32,0xbe176dc4,0xbe188c1d,2 +np.float32,0xbf322f2e,0xbf5c0c51,2 +np.float32,0xbe9b8676,0xbea097a2,2 +np.float32,0xbca44280,0xbca44823,2 +np.float32,0xbe2b0248,0xbe2ca036,2 +np.float32,0x3d101e80,0x3d102dbd,2 +np.float32,0xbf4eb610,0xbf8f526d,2 +np.float32,0xbec32a50,0xbecd89d1,2 +np.float32,0x3d549100,0x3d54c1ee,2 +np.float32,0x3f78e55e,0x40087025,2 +np.float32,0x3e592798,0x3e5c802d,2 +np.float32,0x3de045d0,0x3de12cfb,2 +np.float32,0xbdad28e0,0xbdad92f7,2 +np.float32,0x3e9a69e0,0x3e9f5e59,2 +np.float32,0x3e809778,0x3e836716,2 +np.float32,0xbf3278d9,0xbf5c9b6d,2 +np.float32,0x3f39fa00,0x3f6bd4a5,2 +np.float32,0xbec8143c,0xbed34ffa,2 +np.float32,0x3ddb7f40,0x3ddc57e6,2 +np.float32,0x3f0e8342,0x3f20c634,2 +np.float32,0x3f353dda,0x3f6213a4,2 +np.float32,0xbe96b400,0xbe9b4bea,2 +np.float32,0x3e626580,0x3e66328a,2 +np.float32,0xbde091c8,0xbde179df,2 +np.float32,0x3eb47b5c,0x3ebc91ca,2 +np.float32,0xbf282182,0xbf497f2f,2 +np.float32,0x3ea9f64c,0x3eb0a748,2 +np.float32,0x3f28dd4e,0x3f4aca86,2 +np.float32,0xbf71de18,0xbfe3f587,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0xbf6696a6,0xbfbcf11a,2 +np.float32,0xbc853ae0,0xbc853de2,2 +np.float32,0xbeced246,0xbedb51b8,2 +np.float32,0x3f3472a4,0x3f607e00,2 +np.float32,0xbee90124,0xbefb7117,2 +np.float32,0x3eb45b90,0x3ebc6d7c,2 +np.float32,0xbe53ead0,0xbe5705d6,2 +np.float32,0x3f630c80,0x3fb420e2,2 +np.float32,0xbf408cd0,0xbf7a56a2,2 +np.float32,0x3dda4ed0,0x3ddb23f1,2 +np.float32,0xbf37ae88,0xbf67096b,2 +np.float32,0xbdd48c28,0xbdd550c9,2 +np.float32,0xbf5745b0,0xbf9cb4a4,2 +np.float32,0xbf44e6fc,0xbf8255c1,2 +np.float32,0x3f5c8e6a,0x3fa65020,2 +np.float32,0xbea45fe8,0xbeaa6630,2 +np.float32,0x3f08bdee,0x3f188ef5,2 +np.float32,0x3ec77e74,0x3ed29f4b,2 +np.float32,0xbf1a1d3c,0xbf324029,2 +np.float32,0x3cad7340,0x3cad79e3,2 +np.float32,0xbf4fac2e,0xbf90b72a,2 +np.float32,0x3f58516e,0x3f9e8330,2 +np.float32,0x3f442008,0x3f816391,2 +np.float32,0xbf6e0c6c,0xbfd42854,2 +np.float32,0xbf266f7a,0xbf4689b2,2 +np.float32,0x3eb7e2f0,0x3ec077ba,2 +np.float32,0xbf320fd0,0xbf5bcf83,2 +np.float32,0xbf6a76b9,0xbfc80a11,2 +np.float32,0xbf2a91b4,0xbf4dd526,2 +np.float32,0x3f176e30,0x3f2e150e,2 +np.float32,0xbdcccad0,0xbdcd7a9c,2 +np.float32,0x3f60a8a4,0x3faebbf7,2 +np.float32,0x3d9706f0,0x3d974d40,2 +np.float32,0x3ef3cd34,0x3f049d58,2 +np.float32,0xbf73c615,0xbfed79fe,2 +np.float32,0x3df1b170,0x3df2d31b,2 +np.float32,0x3f632a46,0x3fb466c7,2 +np.float32,0xbf3ea18e,0xbf75f9ce,2 +np.float32,0xbf3ea05c,0xbf75f71f,2 +np.float32,0xbdd76750,0xbdd83403,2 +np.float32,0xbca830c0,0xbca836cd,2 +np.float32,0x3f1d4162,0x3f373c59,2 +np.float32,0x3c115700,0x3c1157fa,2 +np.float32,0x3dae8ab0,0x3daef758,2 +np.float32,0xbcad5020,0xbcad56bf,2 +np.float32,0x3ee299c4,0x3ef36c15,2 +np.float32,0xbf7f566c,0xc054c3bd,2 +np.float32,0x3f0cc698,0x3f1e4557,2 +np.float32,0xbe75c648,0xbe7aaa04,2 +np.float32,0x3ea29238,0x3ea86417,2 +np.float32,0x3f09d9c0,0x3f1a1d61,2 +np.float32,0x3f67275c,0x3fbe74b3,2 +np.float32,0x3e1a4e18,0x3e1b7d3a,2 +np.float32,0xbef6e3fc,0xbf069e98,2 +np.float32,0xbf6038ac,0xbfadc9fd,2 +np.float32,0xbe46bdd4,0xbe494b7f,2 +np.float32,0xbf4df1f4,0xbf8e3a98,2 +np.float32,0x3d094dc0,0x3d095aed,2 +np.float32,0x3f44c7d2,0x3f822fa3,2 +np.float32,0xbea30816,0xbea8e737,2 +np.float32,0xbe3c27c4,0xbe3e511b,2 +np.float32,0x3f3bb47c,0x3f6f8789,2 +np.float32,0xbe423760,0xbe4498c3,2 +np.float32,0x3ece1a74,0x3eda7634,2 +np.float32,0x3f14d1f6,0x3f2a1a89,2 +np.float32,0xbf4d9e8f,0xbf8dc4c1,2 +np.float32,0xbe92968e,0xbe96cd7f,2 +np.float32,0x3e99e6c0,0x3e9ece26,2 +np.float32,0xbf397361,0xbf6ab878,2 +np.float32,0xbf4fcea4,0xbf90e99f,2 +np.float32,0x3de37640,0x3de46779,2 +np.float32,0x3eb1b604,0x3eb9698c,2 +np.float32,0xbf52d0a2,0xbf957361,2 +np.float32,0xbe20435c,0xbe21975a,2 +np.float32,0x3f437a58,0x3f809bf1,2 +np.float32,0x3f27d1cc,0x3f48f335,2 +np.float32,0x3f7d4ff2,0x4027d1e2,2 +np.float32,0xbef732e4,0xbf06d205,2 +np.float32,0x3f4a0ae6,0x3f88e18e,2 +np.float32,0x3f800000,0x7f800000,2 +np.float32,0x3e3e56a0,0x3e4093ba,2 +np.float32,0xbed2fcfa,0xbee0517d,2 +np.float32,0xbe0e0114,0xbe0eecd7,2 +np.float32,0xbe808574,0xbe8353db,2 +np.float32,0x3f572e2a,0x3f9c8c86,2 +np.float32,0x80800000,0x80800000,2 +np.float32,0x3f3f3c82,0x3f775703,2 +np.float32,0xbf6e2482,0xbfd4818b,2 +np.float32,0xbf3943b0,0xbf6a5439,2 +np.float32,0x3f6e42ac,0x3fd4f1ea,2 +np.float32,0x3eb676c4,0x3ebed619,2 +np.float32,0xbe5e56c4,0xbe61ef6c,2 +np.float32,0x3eea200c,0x3efcdb65,2 +np.float32,0x3e3d2c78,0x3e3f5ef8,2 +np.float32,0xbdfd8fb0,0xbdfede71,2 +np.float32,0xbee69c8a,0xbef86e89,2 +np.float32,0x3e9efca0,0x3ea46a1c,2 +np.float32,0x3e4c2498,0x3e4ee9ee,2 +np.float32,0xbf3cc93c,0xbf71e21d,2 +np.float32,0x3ee0d77c,0x3ef13d2b,2 +np.float32,0xbefbcd2a,0xbf09d6a3,2 +np.float32,0x3f6dbe5c,0x3fd30a3e,2 +np.float32,0x3dae63e0,0x3daed03f,2 +np.float32,0xbd5001e0,0xbd502fb9,2 +np.float32,0x3f59632a,0x3fa067c8,2 +np.float32,0x3f0d355a,0x3f1ee452,2 +np.float32,0x3f2cbe5c,0x3f51c896,2 +np.float32,0x3c5e6e80,0x3c5e7200,2 +np.float32,0xbe8ac49c,0xbe8e52f0,2 +np.float32,0x3f54e576,0x3f98c0e6,2 +np.float32,0xbeaa0762,0xbeb0ba7c,2 +np.float32,0x3ec81e88,0x3ed35c21,2 +np.float32,0x3f5a6738,0x3fa23fb6,2 +np.float32,0xbf24a682,0xbf43784a,2 +np.float32,0x1,0x1,2 +np.float32,0x3ee6bc24,0x3ef89630,2 +np.float32,0x3f19444a,0x3f30ecf5,2 +np.float32,0x3ec1fc70,0x3ecc28fc,2 +np.float32,0xbf706e14,0xbfdd92fb,2 +np.float32,0x3eccb630,0x3ed8cd98,2 +np.float32,0xbcdf7aa0,0xbcdf88d3,2 +np.float32,0xbe450da8,0xbe478a8e,2 +np.float32,0x3ec9c210,0x3ed54c0b,2 +np.float32,0xbf3b86ca,0xbf6f24d1,2 +np.float32,0x3edcc7a0,0x3eec3a5c,2 +np.float32,0x3f075d5c,0x3f16a39a,2 +np.float32,0xbf5719ce,0xbf9c69de,2 +np.float32,0x3f62cb22,0x3fb3885a,2 +np.float32,0x3f639216,0x3fb55c93,2 +np.float32,0xbf473ee7,0xbf85413a,2 +np.float32,0xbf01b66c,0xbf0eea86,2 +np.float32,0x3e872d80,0x3e8a74f8,2 +np.float32,0xbf60957e,0xbfae925c,2 +np.float32,0xbf6847b2,0xbfc1929b,2 +np.float32,0x3f78bb94,0x4007b363,2 +np.float32,0xbf47efdb,0xbf8622db,2 +np.float32,0xbe1f2308,0xbe206fd6,2 +np.float32,0xbf414926,0xbf7c0a7e,2 +np.float32,0x3eecc268,0x3f00194d,2 +np.float32,0x3eb086d0,0x3eb81120,2 +np.float32,0xbef1af80,0xbf033ff5,2 +np.float32,0xbf454e56,0xbf82d4aa,2 +np.float32,0x3e622560,0x3e65ef20,2 +np.float32,0x3f50d2b2,0x3f926a83,2 +np.float32,0x3eb2c45c,0x3eba9d2c,2 +np.float32,0x3e42d1a0,0x3e4538c9,2 +np.float32,0xbf24cc5c,0xbf43b8e3,2 +np.float32,0x3e8c6464,0x3e90141a,2 +np.float32,0xbf3abff2,0xbf6d79c5,2 +np.float32,0xbec8f2e6,0xbed456fa,2 +np.float32,0xbf787b38,0xc00698b4,2 +np.float32,0xbf58d5cd,0xbf9f6c03,2 +np.float32,0x3df4ee20,0x3df61ba8,2 +np.float32,0xbf34581e,0xbf604951,2 +np.float32,0xbeba5cf4,0xbec35119,2 +np.float32,0xbf76c22d,0xbfffc51c,2 +np.float32,0x3ef63b2c,0x3f0630b4,2 +np.float32,0x3eeadb64,0x3efdc877,2 +np.float32,0x3dfd8c70,0x3dfedb24,2 +np.float32,0x3f441600,0x3f81576d,2 +np.float32,0x3f23a0d8,0x3f41bbf6,2 +np.float32,0x3cb84d40,0x3cb85536,2 +np.float32,0xbf25cb5c,0xbf456e38,2 +np.float32,0xbc108540,0xbc108636,2 +np.float32,0xbc5b9140,0xbc5b949e,2 +np.float32,0xbf62ff40,0xbfb401dd,2 +np.float32,0x3e8e0710,0x3e91d93e,2 +np.float32,0x3f1b6ae0,0x3f344dfd,2 +np.float32,0xbf4dbbbe,0xbf8dedea,2 +np.float32,0x3f1a5fb2,0x3f32a880,2 +np.float32,0xbe56bd00,0xbe59f8cb,2 +np.float32,0xbf490a5c,0xbf87902d,2 +np.float32,0xbf513072,0xbf92f717,2 +np.float32,0x3e73ee28,0x3e78b542,2 +np.float32,0x3f0a4c7a,0x3f1abf2c,2 +np.float32,0x3e10d5c8,0x3e11d00b,2 +np.float32,0xbf771aac,0xc001207e,2 +np.float32,0x3efe2f54,0x3f0b6a46,2 +np.float32,0xbea5f3ea,0xbeac291f,2 +np.float32,0xbf1a73e8,0xbf32c845,2 +np.float32,0x3ebcc82c,0x3ec61c4f,2 +np.float32,0xbf24f492,0xbf43fd9a,2 +np.float32,0x3ecbd908,0x3ed7c691,2 +np.float32,0x3f461c5e,0x3f83d3f0,2 +np.float32,0x3eed0524,0x3f0043c1,2 +np.float32,0x3d06e840,0x3d06f4bf,2 +np.float32,0x3eb6c974,0x3ebf34d7,2 +np.float32,0xbf1c85e1,0xbf36100f,2 +np.float32,0x3ed697d0,0x3ee4ad04,2 +np.float32,0x3eab0484,0x3eb1d733,2 +np.float32,0xbf3b02f2,0xbf6e0935,2 +np.float32,0xbeeab154,0xbefd9334,2 +np.float32,0xbf695372,0xbfc49881,2 +np.float32,0x3e8aaa7c,0x3e8e36be,2 +np.float32,0xbf208754,0xbf3c8f7b,2 +np.float32,0xbe0dbf28,0xbe0ea9a1,2 +np.float32,0x3ca780c0,0x3ca786ba,2 +np.float32,0xbeb320b4,0xbebb065e,2 +np.float32,0x3f13c698,0x3f288821,2 +np.float32,0xbe8cbbec,0xbe9072c4,2 +np.float32,0x3f1ed534,0x3f39c8df,2 +np.float32,0x3e1ca450,0x3e1de190,2 +np.float32,0x3f54be1c,0x3f988134,2 +np.float32,0x3f34e4ee,0x3f6161b4,2 +np.float32,0xbf7e6913,0xc038b246,2 +np.float32,0x3d3c3f20,0x3d3c6119,2 +np.float32,0x3ca9dc80,0x3ca9e2bc,2 +np.float32,0xbf577ea2,0xbf9d161a,2 +np.float32,0xbedb22c8,0xbeea3644,2 +np.float32,0x3f22a044,0x3f400bfa,2 +np.float32,0xbe214b8c,0xbe22a637,2 +np.float32,0x3e8cd300,0x3e908bbc,2 +np.float32,0xbec4d214,0xbecf7a58,2 +np.float32,0x3e9399a4,0x3e97e7e4,2 +np.float32,0xbee6a1a2,0xbef874ed,2 +np.float32,0xbf323742,0xbf5c1bfd,2 +np.float32,0x3f48b882,0x3f8725ac,2 +np.float32,0xbf4d4dba,0xbf8d532e,2 +np.float32,0xbf59640a,0xbfa0695a,2 +np.float32,0xbf2ad562,0xbf4e4f03,2 +np.float32,0x3e317d98,0x3e334d03,2 +np.float32,0xbf6a5b71,0xbfc7b5a2,2 +np.float32,0x3e87b434,0x3e8b05cf,2 +np.float32,0xbf1c344c,0xbf358dee,2 +np.float32,0x3e449428,0x3e470c65,2 +np.float32,0xbf2c0f2f,0xbf508808,2 +np.float32,0xbec5b5ac,0xbed0859c,2 +np.float32,0xbf4aa956,0xbf89b4b1,2 +np.float32,0x3f6dd374,0x3fd35717,2 +np.float32,0x3f45f76c,0x3f83a5ef,2 +np.float32,0xbed1fba8,0xbedf1bd5,2 +np.float32,0xbd26b2d0,0xbd26ca66,2 +np.float32,0xbe9817c2,0xbe9cd1c3,2 +np.float32,0x3e725988,0x3e770875,2 +np.float32,0xbf1a8ded,0xbf32f132,2 +np.float32,0xbe695860,0xbe6d83d3,2 +np.float32,0x3d8cecd0,0x3d8d25ea,2 +np.float32,0x3f574706,0x3f9cb6ec,2 +np.float32,0xbf5c5a1f,0xbfa5eaf3,2 +np.float32,0x3e7a7c88,0x3e7fab83,2 +np.float32,0xff800000,0xffc00000,2 +np.float32,0x3f66396a,0x3fbbfbb0,2 +np.float32,0x3ed6e588,0x3ee50b53,2 +np.float32,0xbb56d500,0xbb56d532,2 +np.float32,0x3ebd23fc,0x3ec6869a,2 +np.float32,0xbf70d490,0xbfdf4af5,2 +np.float32,0x3e514f88,0x3e544d15,2 +np.float32,0x3e660f98,0x3e6a0dac,2 +np.float32,0xbf034da1,0xbf1110bb,2 +np.float32,0xbf60d9be,0xbfaf2714,2 +np.float32,0x3df67b10,0x3df7ae64,2 +np.float32,0xbeeedc0a,0xbf017010,2 +np.float32,0xbe149224,0xbe15a072,2 +np.float32,0x3f455084,0x3f82d759,2 +np.float32,0x3f210f9e,0x3f3d7093,2 +np.float32,0xbeaea3e0,0xbeb5edd3,2 +np.float32,0x3e0724b0,0x3e07efad,2 +np.float32,0x3f09a784,0x3f19d6ac,2 +np.float32,0xbf044340,0xbf125ee8,2 +np.float32,0xbf71adc9,0xbfe315fe,2 +np.float32,0x3efd3870,0x3f0ac6a8,2 +np.float32,0xbf53c7a6,0xbf96f6df,2 +np.float32,0xbf3cf784,0xbf7247af,2 +np.float32,0x3e0ce9e0,0x3e0dd035,2 +np.float32,0xbd3051a0,0xbd306d89,2 +np.float32,0x3ecab804,0x3ed66f77,2 +np.float32,0x3e984350,0x3e9d0189,2 +np.float32,0x3edd1c00,0x3eeca20b,2 +np.float32,0xbe8e22a0,0xbe91f71b,2 +np.float32,0x3ebebc18,0x3ec85fd6,2 +np.float32,0xba275c00,0xba275c01,2 +np.float32,0x3f1d8190,0x3f37a385,2 +np.float32,0x3f17343e,0x3f2dbbfe,2 +np.float32,0x3caa8000,0x3caa864e,2 +np.float32,0x3e7a7308,0x3e7fa168,2 +np.float32,0x3f7359a6,0x3feb3e1a,2 +np.float32,0xbf7ad15a,0xc012a743,2 +np.float32,0xbf122efb,0xbf262812,2 +np.float32,0xbf03ba04,0xbf11a3fa,2 +np.float32,0x3ed7a90c,0x3ee5f8d4,2 +np.float32,0xbe23e318,0xbe254eed,2 +np.float32,0xbe2866f4,0xbe29f20a,2 +np.float32,0xbeaedff2,0xbeb631d0,2 +np.float32,0x0,0x0,2 +np.float32,0x3ef2a034,0x3f03dafd,2 +np.float32,0x3f35806c,0x3f62994e,2 +np.float32,0xbf655e19,0xbfb9c718,2 +np.float32,0x3f5d54ce,0x3fa7d4f4,2 +np.float32,0x3f33e64a,0x3f5f67e3,2 +np.float32,0x3ebf4010,0x3ec8f923,2 +np.float32,0xbe050dc8,0xbe05cf70,2 +np.float32,0x3f61693e,0x3fb063b0,2 +np.float32,0xbd94ac00,0xbd94ef12,2 +np.float32,0x3e9de008,0x3ea32f61,2 +np.float32,0xbe3d042c,0xbe3f3540,2 +np.float32,0x3e8fdfc0,0x3e93d9e4,2 +np.float32,0x3f28bc48,0x3f4a9019,2 +np.float32,0x3edea928,0x3eee8b09,2 +np.float32,0xbf05f673,0xbf14b362,2 +np.float32,0xbf360730,0xbf63a914,2 +np.float32,0xbe3fb454,0xbe41fe0a,2 +np.float32,0x3f6d99a8,0x3fd28552,2 +np.float32,0xbf3ae866,0xbf6dd052,2 +np.float32,0x3f5b1164,0x3fa37aec,2 +np.float32,0xbf64a451,0xbfb7f61b,2 +np.float32,0xbdd79bd0,0xbdd86919,2 +np.float32,0x3e89fc00,0x3e8d7a85,2 +np.float32,0x3f4bf690,0x3f8b77ea,2 +np.float32,0x3cbdf280,0x3cbdfb38,2 +np.float32,0x3f138f98,0x3f2835b4,2 +np.float32,0xbe33967c,0xbe3576bc,2 +np.float32,0xbf298164,0xbf4bedda,2 +np.float32,0x3e9955cc,0x3e9e2edb,2 +np.float32,0xbf79b383,0xc00c56c0,2 +np.float32,0x3ea0834c,0x3ea61aea,2 +np.float32,0xbf511184,0xbf92c89a,2 +np.float32,0x3f4d9fba,0x3f8dc666,2 +np.float32,0x3f3387c2,0x3f5ead80,2 +np.float32,0x3e3f7360,0x3e41babb,2 +np.float32,0xbf3cc4d6,0xbf71d879,2 +np.float32,0x3f2e4402,0x3f54994e,2 +np.float32,0x3e6a7118,0x3e6eabff,2 +np.float32,0xbf05d83e,0xbf1489cc,2 +np.float32,0xbdce4fd8,0xbdcf039a,2 +np.float32,0xbf03e2f4,0xbf11dbaf,2 +np.float32,0x3f1ea0a0,0x3f397375,2 +np.float32,0x3f7aff54,0x4013cb1b,2 +np.float32,0x3f5ef158,0x3fab1801,2 +np.float32,0xbe33bcc8,0xbe359e40,2 +np.float32,0xbf04dd0e,0xbf133111,2 +np.float32,0xbf14f887,0xbf2a54d1,2 +np.float32,0x3f75c37a,0x3ff9196e,2 +np.float32,0x3f35c3c8,0x3f6320f2,2 +np.float32,0x3f53bb94,0x3f96e3c3,2 +np.float32,0x3f4d473e,0x3f8d4a19,2 +np.float32,0xbdfe19e0,0xbdff6ac9,2 +np.float32,0xbf7f0cc4,0xc049342d,2 +np.float32,0xbdbfc778,0xbdc057bb,2 +np.float32,0xbf7575b7,0xbff73067,2 +np.float32,0xbe9df488,0xbea34609,2 +np.float32,0xbefbd3c6,0xbf09daff,2 +np.float32,0x3f19962c,0x3f316cbd,2 +np.float32,0x3f7acec6,0x40129732,2 +np.float32,0xbf5db7de,0xbfa89a21,2 +np.float32,0x3f62f444,0x3fb3e830,2 +np.float32,0xbf522adb,0xbf94737f,2 +np.float32,0xbef6ceb2,0xbf0690ba,2 +np.float32,0xbf57c41e,0xbf9d8db0,2 +np.float32,0x3eb3360c,0x3ebb1eb0,2 +np.float32,0x3f29327e,0x3f4b618e,2 +np.float32,0xbf08d099,0xbf18a916,2 +np.float32,0x3ea21014,0x3ea7d369,2 +np.float32,0x3f39e516,0x3f6ba861,2 +np.float32,0x3e7c4f28,0x3e80ce08,2 +np.float32,0xbec5a7f8,0xbed07582,2 +np.float32,0xbf0b1b46,0xbf1be3e7,2 +np.float32,0xbef0e0ec,0xbf02bb2e,2 +np.float32,0x3d835a30,0x3d838869,2 +np.float32,0x3f08aa40,0x3f18736e,2 +np.float32,0x3eb0e4c8,0x3eb87bcd,2 +np.float32,0x3eb3821c,0x3ebb7564,2 +np.float32,0xbe3a7320,0xbe3c8d5a,2 +np.float32,0x3e43f8c0,0x3e466b10,2 +np.float32,0x3e914288,0x3e955b69,2 +np.float32,0x3ec7d800,0x3ed308e7,2 +np.float32,0x3e603df8,0x3e63eef2,2 +np.float32,0x3f225cac,0x3f3f9ac6,2 +np.float32,0x3e3db8f0,0x3e3ff06b,2 +np.float32,0x3f358d78,0x3f62b38c,2 +np.float32,0xbed9bd64,0xbee88158,2 +np.float32,0x800000,0x800000,2 +np.float32,0x3f1adfce,0x3f337230,2 +np.float32,0xbefdc346,0xbf0b229d,2 +np.float32,0xbf091018,0xbf190208,2 +np.float32,0xbf800000,0xff800000,2 +np.float32,0x3f27c2c4,0x3f48d8db,2 +np.float32,0x3ef59c80,0x3f05c993,2 +np.float32,0x3e18a340,0x3e19c893,2 +np.float32,0x3f209610,0x3f3ca7c5,2 +np.float32,0x3f69cc22,0x3fc60087,2 +np.float32,0xbf66cf07,0xbfbd8721,2 +np.float32,0xbf768098,0xbffdfcc4,2 +np.float32,0x3df27a40,0x3df39ec4,2 +np.float32,0x3daf5bd0,0x3dafca02,2 +np.float32,0x3f53f2be,0x3f973b41,2 +np.float32,0xbf7edcbc,0xc0436ce3,2 +np.float32,0xbdf61db8,0xbdf74fae,2 +np.float32,0x3e2c9328,0x3e2e3cb2,2 +np.float32,0x3f1a4570,0x3f327f41,2 +np.float32,0xbf766306,0xbffd32f1,2 +np.float32,0xbf468b9d,0xbf845f0f,2 +np.float32,0x3e398970,0x3e3b9bb1,2 +np.float32,0xbbefa900,0xbbefaa18,2 +np.float32,0xbf54c989,0xbf9893ad,2 +np.float32,0x3f262cf6,0x3f46169d,2 +np.float32,0x3f638a8a,0x3fb54a98,2 +np.float32,0xbeb36c78,0xbebb5cb8,2 +np.float32,0xbeac4d42,0xbeb34993,2 +np.float32,0x3f1d1942,0x3f36fbf2,2 +np.float32,0xbf5d49ba,0xbfa7bf07,2 +np.float32,0xbf182b5c,0xbf2f38d0,2 +np.float32,0x3f41a742,0x3f7ce5ef,2 +np.float32,0x3f0b9a6c,0x3f1c9898,2 +np.float32,0x3e847494,0x3e8788f3,2 +np.float32,0xbde41608,0xbde50941,2 +np.float32,0x3f693944,0x3fc44b5a,2 +np.float32,0x3f0386b2,0x3f115e37,2 +np.float32,0x3f3a08b0,0x3f6bf3c1,2 +np.float32,0xbf78ee64,0xc0089977,2 +np.float32,0xbf013a11,0xbf0e436e,2 +np.float32,0x3f00668e,0x3f0d2836,2 +np.float32,0x3e6d9850,0x3e720081,2 +np.float32,0x3eacf578,0x3eb4075d,2 +np.float32,0x3f18aef8,0x3f3004b4,2 +np.float32,0x3de342f0,0x3de43385,2 +np.float32,0x3e56cee8,0x3e5a0b85,2 +np.float32,0xbf287912,0xbf4a1966,2 +np.float32,0x3e92c948,0x3e9704c2,2 +np.float32,0x3c07d080,0x3c07d14c,2 +np.float32,0xbe90f6a0,0xbe9508e0,2 +np.float32,0x3e8b4f28,0x3e8ee884,2 +np.float32,0xbf35b56c,0xbf6303ff,2 +np.float32,0xbef512b8,0xbf057027,2 +np.float32,0x3e36c630,0x3e38c0cd,2 +np.float32,0x3f0b3ca8,0x3f1c134a,2 +np.float32,0x3e4cd610,0x3e4fa2c5,2 +np.float32,0xbf5a8372,0xbfa273a3,2 +np.float32,0xbecaad3c,0xbed662ae,2 +np.float32,0xbec372d2,0xbecddeac,2 +np.float32,0x3f6fb2b2,0x3fda8a22,2 +np.float32,0x3f365f28,0x3f645b5a,2 +np.float32,0xbecd00fa,0xbed926a4,2 +np.float32,0xbebafa32,0xbec40672,2 +np.float32,0xbf235b73,0xbf4146c4,2 +np.float32,0x3f7a4658,0x400f6e2c,2 +np.float32,0x3f35e824,0x3f636a54,2 +np.float32,0x3cb87640,0x3cb87e3c,2 +np.float32,0xbf296288,0xbf4bb6ee,2 +np.float32,0x7f800000,0xffc00000,2 +np.float32,0xbf4de86e,0xbf8e2d1a,2 +np.float32,0xbf4ace12,0xbf89e5f3,2 +np.float32,0x3d65a300,0x3d65e0b5,2 +np.float32,0xbe10c534,0xbe11bf21,2 +np.float32,0xbeba3c1c,0xbec32b3e,2 +np.float32,0x3e87eaf8,0x3e8b40b8,2 +np.float32,0x3d5c3bc0,0x3d5c722d,2 +np.float32,0x3e8c14b8,0x3e8fbdf8,2 +np.float32,0xbf06c6f0,0xbf15d327,2 +np.float32,0xbe0f1e30,0xbe100f96,2 +np.float32,0xbee244b0,0xbef30251,2 +np.float32,0x3f2a21b0,0x3f4d0c1d,2 +np.float32,0xbf5f7f81,0xbfac408e,2 +np.float32,0xbe3dba2c,0xbe3ff1b2,2 +np.float32,0x3f3ffc22,0x3f790abf,2 +np.float32,0x3edc3dac,0x3eeb90fd,2 +np.float32,0x7f7fffff,0xffc00000,2 +np.float32,0x3ecfaaac,0x3edc5485,2 +np.float32,0x3f0affbe,0x3f1bbcd9,2 +np.float32,0x3f5f2264,0x3fab7dca,2 +np.float32,0x3f37394c,0x3f66186c,2 +np.float32,0xbe6b2f6c,0xbe6f74e3,2 +np.float32,0x3f284772,0x3f49c1f1,2 +np.float32,0xbdf27bc8,0xbdf3a051,2 +np.float32,0xbc8b14e0,0xbc8b184c,2 +np.float32,0x3f6a867c,0x3fc83b07,2 +np.float32,0x3f1ec876,0x3f39b429,2 +np.float32,0x3f6fd9a8,0x3fdb28d6,2 +np.float32,0xbf473cca,0xbf853e8c,2 +np.float32,0x3e23eff8,0x3e255c23,2 +np.float32,0x3ebefdfc,0x3ec8ac5d,2 +np.float32,0x3f6c8c22,0x3fced2b1,2 +np.float32,0x3f168388,0x3f2cad44,2 +np.float32,0xbece2410,0xbeda81ac,2 +np.float32,0x3f5532f0,0x3f993eea,2 +np.float32,0x3ef1938c,0x3f032dfa,2 +np.float32,0xbef05268,0xbf025fba,2 +np.float32,0x3f552e4a,0x3f993754,2 +np.float32,0x3e9ed068,0x3ea4392d,2 +np.float32,0xbe1a0c24,0xbe1b39be,2 +np.float32,0xbf2623aa,0xbf46068c,2 +np.float32,0xbe1cc300,0xbe1e00fc,2 +np.float32,0xbe9c0576,0xbea12397,2 +np.float32,0xbd827338,0xbd82a07e,2 +np.float32,0x3f0fc31a,0x3f229786,2 +np.float32,0x3e577810,0x3e5abc7d,2 +np.float32,0x3e0e1cb8,0x3e0f0906,2 +np.float32,0x3e84d344,0x3e87ee73,2 +np.float32,0xbf39c45e,0xbf6b6337,2 +np.float32,0x3edfb25c,0x3eefd273,2 +np.float32,0x3e016398,0x3e021596,2 +np.float32,0xbefeb1be,0xbf0bc0de,2 +np.float32,0x3f37e104,0x3f677196,2 +np.float32,0x3f545316,0x3f97d500,2 +np.float32,0xbefc165a,0xbf0a06ed,2 +np.float32,0xbf0923e6,0xbf191dcd,2 +np.float32,0xbf386508,0xbf68831f,2 +np.float32,0xbf3d4630,0xbf72f4e1,2 +np.float32,0x3f3dbe82,0x3f73ff13,2 +np.float32,0xbf703de4,0xbfdcc7e2,2 +np.float32,0xbf531482,0xbf95dd1a,2 +np.float32,0xbf0af1b6,0xbf1ba8f4,2 +np.float32,0xbec8fd9c,0xbed463a4,2 +np.float32,0xbe230320,0xbe24691a,2 +np.float32,0xbf7de541,0xc02faf38,2 +np.float32,0x3efd2360,0x3f0ab8b7,2 +np.float32,0x3db7f350,0x3db87291,2 +np.float32,0x3e74c510,0x3e799924,2 +np.float32,0x3da549c0,0x3da5a5fc,2 +np.float32,0x3e8a3bc4,0x3e8dbf4a,2 +np.float32,0xbf69f086,0xbfc66e84,2 +np.float32,0x3f323f8e,0x3f5c2c17,2 +np.float32,0x3ec0ae3c,0x3ecaa334,2 +np.float32,0xbebe8966,0xbec824fc,2 +np.float32,0x3f34691e,0x3f606b13,2 +np.float32,0x3f13790e,0x3f2813f5,2 +np.float32,0xbf61c027,0xbfb12618,2 +np.float32,0x3e90c690,0x3e94d4a1,2 +np.float32,0xbefce8f0,0xbf0a920e,2 +np.float32,0xbf5c0e8a,0xbfa559a7,2 +np.float32,0x3f374f60,0x3f6645b6,2 +np.float32,0x3f25f6fa,0x3f45b967,2 +np.float32,0x3f2421aa,0x3f42963a,2 +np.float32,0x3ebfa328,0x3ec96c57,2 +np.float32,0x3e3bef28,0x3e3e1685,2 +np.float32,0x3ea3fa3c,0x3ea9f4dd,2 +np.float32,0x3f362b8e,0x3f63f2b2,2 +np.float32,0xbedcef18,0xbeec6ada,2 +np.float32,0xbdd29c88,0xbdd35bd0,2 +np.float32,0x3f261aea,0x3f45f76f,2 +np.float32,0xbe62c470,0xbe66965e,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0xbee991aa,0xbefc277b,2 +np.float32,0xbf571960,0xbf9c6923,2 +np.float32,0xbe6fb410,0xbe743b41,2 +np.float32,0x3eb1bed0,0x3eb9738d,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0x3eddcbe4,0x3eed7a69,2 +np.float32,0xbf2a81ba,0xbf4db86d,2 +np.float32,0x3f74da54,0x3ff38737,2 +np.float32,0xbeb6bff4,0xbebf29f4,2 +np.float32,0x3f445752,0x3f81a698,2 +np.float32,0x3ed081b4,0x3edd5618,2 +np.float32,0xbee73802,0xbef931b4,2 +np.float32,0xbd13f2a0,0xbd14031c,2 +np.float32,0xbb4d1200,0xbb4d122c,2 +np.float32,0xbee8777a,0xbefac393,2 +np.float32,0x3f42047c,0x3f7dc06c,2 +np.float32,0xbd089270,0xbd089f67,2 +np.float32,0xbf628c16,0xbfb2f66b,2 +np.float32,0x3e72e098,0x3e77978d,2 +np.float32,0x3ed967cc,0x3ee818e4,2 +np.float32,0x3e284c80,0x3e29d6d9,2 +np.float32,0x3f74e8ba,0x3ff3dbef,2 +np.float32,0x3f013e86,0x3f0e4969,2 +np.float32,0xbf610d4f,0xbfaf983c,2 +np.float32,0xbf3c8d36,0xbf715eba,2 +np.float32,0xbedbc756,0xbeeaffdb,2 +np.float32,0x3e143ec8,0x3e154b4c,2 +np.float32,0xbe1c9808,0xbe1dd4fc,2 +np.float32,0xbe887a1e,0xbe8bdac5,2 +np.float32,0xbe85c4bc,0xbe88f17a,2 +np.float32,0x3f35967e,0x3f62c5b4,2 +np.float32,0x3ea2c4a4,0x3ea89c2d,2 +np.float32,0xbc8703c0,0xbc8706e1,2 +np.float32,0xbf13d52c,0xbf289dff,2 +np.float32,0xbf63bb56,0xbfb5bf29,2 +np.float32,0xbf61c5ef,0xbfb13319,2 +np.float32,0xbf128410,0xbf26a675,2 +np.float32,0x3f03fcf2,0x3f11ff13,2 +np.float32,0xbe49c924,0xbe4c75cd,2 +np.float32,0xbf211a9c,0xbf3d82c5,2 +np.float32,0x3f7e9d52,0x403d1b42,2 +np.float32,0x3edfefd4,0x3ef01e71,2 +np.float32,0x3ebc5bd8,0x3ec59efb,2 +np.float32,0x3d7b02e0,0x3d7b537f,2 +np.float32,0xbf1163ba,0xbf24fb43,2 +np.float32,0x3f5072f2,0x3f91dbf1,2 +np.float32,0xbee700ce,0xbef8ec60,2 +np.float32,0x3f534168,0x3f962359,2 +np.float32,0x3e6d6c40,0x3e71d1ef,2 +np.float32,0x3def9d70,0x3df0b7a8,2 +np.float32,0x3e89cf80,0x3e8d4a8a,2 +np.float32,0xbf687ca7,0xbfc2290f,2 +np.float32,0x3f35e134,0x3f635c51,2 +np.float32,0x3e59eef8,0x3e5d50fa,2 +np.float32,0xbf65c9e1,0xbfbada61,2 +np.float32,0xbf759292,0xbff7e43d,2 +np.float32,0x3f4635a0,0x3f83f372,2 +np.float32,0x3f29baaa,0x3f4c53f1,2 +np.float32,0x3f6b15a6,0x3fc9fe04,2 +np.float32,0x3edabc88,0x3ee9b922,2 +np.float32,0x3ef382e0,0x3f046d4d,2 +np.float32,0xbe351310,0xbe36ff7f,2 +np.float32,0xbf05c935,0xbf14751c,2 +np.float32,0xbf0e7c50,0xbf20bc24,2 +np.float32,0xbf69bc94,0xbfc5d1b8,2 +np.float32,0xbed41aca,0xbee1aa23,2 +np.float32,0x3f518c08,0x3f938162,2 +np.float32,0xbf3d7974,0xbf73661a,2 +np.float32,0x3f1951a6,0x3f3101c9,2 +np.float32,0xbeb3f436,0xbebbf787,2 +np.float32,0xbf77a190,0xc0031d43,2 +np.float32,0x3eb5b3cc,0x3ebdf6e7,2 +np.float32,0xbed534b4,0xbee2fed2,2 +np.float32,0xbe53e1b8,0xbe56fc56,2 +np.float32,0x3f679e20,0x3fbfb91c,2 +np.float32,0xff7fffff,0xffc00000,2 +np.float32,0xbf7b9bcb,0xc0180073,2 +np.float32,0xbf5635e8,0xbf9aea15,2 +np.float32,0xbe5a3318,0xbe5d9856,2 +np.float32,0xbe003284,0xbe00df9a,2 +np.float32,0x3eb119a4,0x3eb8b7d6,2 +np.float32,0xbf3bccf8,0xbf6fbc84,2 +np.float32,0x3f36f600,0x3f658ea8,2 +np.float32,0x3f1ea834,0x3f397fc2,2 +np.float32,0xbe7cfb54,0xbe8129b3,2 +np.float32,0xbe9b3746,0xbea0406a,2 +np.float32,0x3edc0f90,0x3eeb586c,2 +np.float32,0x3e1842e8,0x3e19660c,2 +np.float32,0xbd8f10b0,0xbd8f4c70,2 +np.float32,0xbf064aca,0xbf1527a2,2 +np.float32,0x3e632e58,0x3e6705be,2 +np.float32,0xbef28ba4,0xbf03cdbb,2 +np.float32,0x3f27b21e,0x3f48bbaf,2 +np.float32,0xbe6f30d4,0xbe73b06e,2 +np.float32,0x3f3e6cb0,0x3f75834b,2 +np.float32,0xbf264aa5,0xbf4649f0,2 +np.float32,0xbf690775,0xbfc3b978,2 +np.float32,0xbf3e4a38,0xbf753632,2 +np.float64,0x3fe12bbe8c62577e,0x3fe32de8e5f961b0,1 +np.float64,0x3fc9b8909b337120,0x3fca1366da00efff,1 +np.float64,0x3feaee4245f5dc84,0x3ff3a011ea0432f3,1 +np.float64,0xbfe892c000f12580,0xbff03e5adaed6f0c,1 +np.float64,0xbf9be8de4837d1c0,0xbf9beaa367756bd1,1 +np.float64,0x3fe632e58fec65cc,0x3feb5ccc5114ca38,1 +np.float64,0x3fe78a0ef7ef141e,0x3fee1b4521d8eb6c,1 +np.float64,0x3feec27a65fd84f4,0x3fff643c8318e81e,1 +np.float64,0x3fbed6efce3dade0,0x3fbefd76cff00111,1 +np.float64,0xbfe3a05fab6740c0,0xbfe6db078aeeb0ca,1 +np.float64,0x3fdca11a56b94234,0x3fdece9e6eacff1b,1 +np.float64,0x3fe0fb15aae1f62c,0x3fe2e9e095ec2089,1 +np.float64,0x3fede12abf7bc256,0x3ffafd0ff4142807,1 +np.float64,0x3feb919edcf7233e,0x3ff4c9aa0bc2432f,1 +np.float64,0x3fd39633b5a72c68,0x3fd43c2e6d5f441c,1 +np.float64,0x3fd9efcbfeb3df98,0x3fdb83f03e58f91c,1 +np.float64,0x3fe2867a36650cf4,0x3fe525858c8ce72e,1 +np.float64,0x3fdacbb8f3b59770,0x3fdc8cd431b6e3ff,1 +np.float64,0x3fcc120503382408,0x3fcc88a8fa43e1c6,1 +np.float64,0xbfd99ff4eab33fea,0xbfdb24a20ae3687d,1 +np.float64,0xbfe8caf0157195e0,0xbff083b8dd0941d3,1 +np.float64,0x3fddc9bf92bb9380,0x3fe022aac0f761d5,1 +np.float64,0x3fe2dbb66e65b76c,0x3fe5a6e7caf3f1f2,1 +np.float64,0x3fe95f5c4a72beb8,0x3ff1444697e96138,1 +np.float64,0xbfc6b163d92d62c8,0xbfc6ef6e006658a1,1 +np.float64,0x3fdf1b2616be364c,0x3fe0fcbd2848c9e8,1 +np.float64,0xbfdca1ccf7b9439a,0xbfdecf7dc0eaa663,1 +np.float64,0x3fe078d6a260f1ae,0x3fe236a7c66ef6c2,1 +np.float64,0x3fdf471bb9be8e38,0x3fe11990ec74e704,1 +np.float64,0xbfe417626be82ec5,0xbfe79c9aa5ed2e2f,1 +np.float64,0xbfeb9cf5677739eb,0xbff4dfc24c012c90,1 +np.float64,0x3f8d9142b03b2280,0x3f8d91c9559d4779,1 +np.float64,0x3fb052c67220a590,0x3fb05873c90d1cd6,1 +np.float64,0x3fd742e2c7ae85c4,0x3fd860128947d15d,1 +np.float64,0x3fec2e2a2bf85c54,0x3ff60eb554bb8d71,1 +np.float64,0xbfeb2b8bc8f65718,0xbff40b734679497a,1 +np.float64,0x3fe25f8e0d64bf1c,0x3fe4eb381d077803,1 +np.float64,0x3fe56426256ac84c,0x3fe9dafbe79370f0,1 +np.float64,0x3feecc1e5d7d983c,0x3fffa49bedc7aa25,1 +np.float64,0xbfc88ce94b3119d4,0xbfc8dbba0fdee2d2,1 +np.float64,0xbfabcf51ac379ea0,0xbfabd6552aa63da3,1 +np.float64,0xbfccc8b849399170,0xbfcd48d6ff057a4d,1 +np.float64,0x3fd2f831e8a5f064,0x3fd38e67b0dda905,1 +np.float64,0x3fcafdcd6135fb98,0x3fcb670ae2ef4d36,1 +np.float64,0x3feda6042efb4c08,0x3ffa219442ac4ea5,1 +np.float64,0x3fed382b157a7056,0x3ff8bc01bc6d10bc,1 +np.float64,0x3fed858a50fb0b14,0x3ff9b1c05cb6cc0f,1 +np.float64,0x3fcc3960653872c0,0x3fccb2045373a3d1,1 +np.float64,0xbfec5177e478a2f0,0xbff65eb4557d94eb,1 +np.float64,0x3feafe0d5e75fc1a,0x3ff3bb4a260a0dcb,1 +np.float64,0x3fe08bc87ee11790,0x3fe25078aac99d31,1 +np.float64,0xffefffffffffffff,0xfff8000000000000,1 +np.float64,0x3f79985ce0333100,0x3f799872b591d1cb,1 +np.float64,0xbfd4001cf9a8003a,0xbfd4b14b9035b94f,1 +np.float64,0x3fe54a17e6ea9430,0x3fe9ac0f18682343,1 +np.float64,0xbfb4e07fea29c100,0xbfb4ec6520dd0689,1 +np.float64,0xbfed2b6659fa56cd,0xbff895ed57dc1450,1 +np.float64,0xbfe81fc8b5f03f92,0xbfef6b95e72a7a7c,1 +np.float64,0xbfe6aced16ed59da,0xbfec4ce131ee3704,1 +np.float64,0xbfe599f30ceb33e6,0xbfea3d07c1cd78e2,1 +np.float64,0xbfe0ff278b61fe4f,0xbfe2ef8b5efa89ed,1 +np.float64,0xbfe3e9406467d281,0xbfe750e43e841736,1 +np.float64,0x3fcc6b52cf38d6a8,0x3fcce688f4fb2cf1,1 +np.float64,0xbfc890e8133121d0,0xbfc8dfdfee72d258,1 +np.float64,0x3fe46e81dbe8dd04,0x3fe82e09783811a8,1 +np.float64,0x3fd94455e5b288ac,0x3fdab7cef2de0b1f,1 +np.float64,0xbfe82151fff042a4,0xbfef6f254c9696ca,1 +np.float64,0x3fcee1ac1d3dc358,0x3fcf80a6ed07070a,1 +np.float64,0x3fcce8f90939d1f0,0x3fcd6ad18d34f8b5,1 +np.float64,0x3fd6afe56fad5fcc,0x3fd7b7567526b1fb,1 +np.float64,0x3fb1a77092234ee0,0x3fb1ae9fe0d176fc,1 +np.float64,0xbfeb758b0d76eb16,0xbff493d105652edc,1 +np.float64,0xbfb857c24e30af88,0xbfb86aa4da3be53f,1 +np.float64,0x3fe89064eff120ca,0x3ff03b7c5b3339a8,1 +np.float64,0xbfc1bd2fef237a60,0xbfc1da99893473ed,1 +np.float64,0xbfe5ad6e2eeb5adc,0xbfea60ed181b5c05,1 +np.float64,0x3fd5a66358ab4cc8,0x3fd6899e640aeb1f,1 +np.float64,0xbfe198e832e331d0,0xbfe3c8c9496d0de5,1 +np.float64,0xbfdaa5c0d7b54b82,0xbfdc5ed7d3c5ce49,1 +np.float64,0x3fcceccb6939d998,0x3fcd6ed88c2dd3a5,1 +np.float64,0xbfe44413eae88828,0xbfe7e6cd32b34046,1 +np.float64,0xbfc7cbeccf2f97d8,0xbfc8139a2626edae,1 +np.float64,0x3fbf31e4fa3e63d0,0x3fbf59c6e863255e,1 +np.float64,0x3fdf03fa05be07f4,0x3fe0ed953f7989ad,1 +np.float64,0x3fe7f4eaceefe9d6,0x3fef092ca7e2ac39,1 +np.float64,0xbfc084e9d92109d4,0xbfc09ca10fd6aaea,1 +np.float64,0xbf88cfbf70319f80,0xbf88d00effa6d897,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xbfa0176e9c202ee0,0xbfa018ca0a6ceef3,1 +np.float64,0xbfd88d0815b11a10,0xbfd9dfc6c6bcbe4e,1 +np.float64,0x3fe89f7730713eee,0x3ff04de52fb536f3,1 +np.float64,0xbfedc9707bfb92e1,0xbffaa25fcf9dd6da,1 +np.float64,0x3fe936d1a6726da4,0x3ff10e40c2d94bc9,1 +np.float64,0x3fdb64aec7b6c95c,0x3fdd473177317b3f,1 +np.float64,0xbfee4f9aaefc9f35,0xbffcdd212667003c,1 +np.float64,0x3fe3730067e6e600,0x3fe692b0a0babf5f,1 +np.float64,0xbfc257e58924afcc,0xbfc27871f8c218d7,1 +np.float64,0x3fe62db12dec5b62,0x3feb52c61b97d9f6,1 +np.float64,0xbfe3ff491367fe92,0xbfe774f1b3a96fd6,1 +np.float64,0x3fea43255274864a,0x3ff28b0c4b7b8d21,1 +np.float64,0xbfea37923c746f24,0xbff27962159f2072,1 +np.float64,0x3fcd0ac3c73a1588,0x3fcd8e6f8de41755,1 +np.float64,0xbfdccafde6b995fc,0xbfdf030fea8a0630,1 +np.float64,0x3fdba35268b746a4,0x3fdd94094f6f50c1,1 +np.float64,0x3fc68ea1d92d1d40,0x3fc6cb8d07cbb0e4,1 +np.float64,0xbfb88b1f6e311640,0xbfb89e7af4e58778,1 +np.float64,0xbfedc7cadffb8f96,0xbffa9c3766227956,1 +np.float64,0x3fe7928d3eef251a,0x3fee2dcf2ac7961b,1 +np.float64,0xbfeff42ede7fe85e,0xc00cef6b0f1e8323,1 +np.float64,0xbfebf07fa477e0ff,0xbff5893f99e15236,1 +np.float64,0x3fe3002ab9660056,0x3fe5defba550c583,1 +np.float64,0x3feb8f4307f71e86,0x3ff4c517ec8d6de9,1 +np.float64,0x3fd3c16f49a782e0,0x3fd46becaacf74da,1 +np.float64,0x3fc7613df12ec278,0x3fc7a52b2a3c3368,1 +np.float64,0xbfe33af560e675eb,0xbfe63a6528ff1587,1 +np.float64,0xbfde86495abd0c92,0xbfe09bd7ba05b461,1 +np.float64,0x3fe1e7fb4ee3cff6,0x3fe43b04311c0ab6,1 +np.float64,0xbfc528b6bd2a516c,0xbfc55ae0a0c184c8,1 +np.float64,0xbfd81025beb0204c,0xbfd94dd72d804613,1 +np.float64,0x10000000000000,0x10000000000000,1 +np.float64,0x3fc1151c47222a38,0x3fc12f5aad80a6bf,1 +np.float64,0x3feafa136775f426,0x3ff3b46854da0b3a,1 +np.float64,0x3fed2da0747a5b40,0x3ff89c85b658459e,1 +np.float64,0x3fda2a4b51b45498,0x3fdbca0d908ddbbd,1 +np.float64,0xbfd04cf518a099ea,0xbfd0aae0033b9e4c,1 +np.float64,0xbfb9065586320ca8,0xbfb91adb7e31f322,1 +np.float64,0xbfd830b428b06168,0xbfd973ca3c484d8d,1 +np.float64,0x3fc952f7ed32a5f0,0x3fc9a9994561fc1a,1 +np.float64,0xbfeb06c83c760d90,0xbff3ca77b326df20,1 +np.float64,0xbfeb1c98ac763931,0xbff3f0d0900f6149,1 +np.float64,0x3fdf061dbebe0c3c,0x3fe0eefb32b48d17,1 +np.float64,0xbf9acbaf28359760,0xbf9acd4024be9fec,1 +np.float64,0x3fec0adde2f815bc,0x3ff5c1628423794d,1 +np.float64,0xbfc4bc750d2978ec,0xbfc4eba43f590b94,1 +np.float64,0x3fdbe47878b7c8f0,0x3fdde44a2b500d73,1 +np.float64,0x3fe160d18162c1a4,0x3fe378cff08f18f0,1 +np.float64,0x3fc3b58dfd276b18,0x3fc3de01d3802de9,1 +np.float64,0x3fa860343430c060,0x3fa864ecd07ec962,1 +np.float64,0x3fcaebfb4b35d7f8,0x3fcb546512d1b4c7,1 +np.float64,0x3fe3fda558e7fb4a,0x3fe772412e5776de,1 +np.float64,0xbfe8169f2c702d3e,0xbfef5666c9a10f6d,1 +np.float64,0x3feda78e9efb4f1e,0x3ffa270712ded769,1 +np.float64,0xbfda483161b49062,0xbfdbedfbf2e850ba,1 +np.float64,0x3fd7407cf3ae80f8,0x3fd85d4f52622743,1 +np.float64,0xbfd63de4d4ac7bca,0xbfd73550a33e3c32,1 +np.float64,0xbfd9c30b90b38618,0xbfdb4e7695c856f3,1 +np.float64,0x3fcd70c00b3ae180,0x3fcdfa0969e0a119,1 +np.float64,0x3feb4f127f769e24,0x3ff44bf42514e0f4,1 +np.float64,0xbfec1db44af83b69,0xbff5ea54aed1f8e9,1 +np.float64,0x3fd68ff051ad1fe0,0x3fd792d0ed6d6122,1 +np.float64,0x3fe0a048a5614092,0x3fe26c80a826b2a2,1 +np.float64,0x3fd59f3742ab3e70,0x3fd6818563fcaf80,1 +np.float64,0x3fca26ecf9344dd8,0x3fca867ceb5d7ba8,1 +np.float64,0x3fdc1d547ab83aa8,0x3fde2a9cea866484,1 +np.float64,0xbfc78df6312f1bec,0xbfc7d3719b698a39,1 +np.float64,0x3fe754e72b6ea9ce,0x3feda89ea844a2e5,1 +np.float64,0x3fe740c1a4ee8184,0x3fed7dc56ec0c425,1 +np.float64,0x3fe77566a9eeeace,0x3fedee6f408df6de,1 +np.float64,0xbfbbf5bf8e37eb80,0xbfbc126a223781b4,1 +np.float64,0xbfe0acb297615965,0xbfe27d86681ca2b5,1 +np.float64,0xbfc20a0487241408,0xbfc228f5f7d52ce8,1 +np.float64,0xfff0000000000000,0xfff8000000000000,1 +np.float64,0x3fef98a4dbff314a,0x40043cfb60bd46fa,1 +np.float64,0x3fd059102ca0b220,0x3fd0b7d2be6d7822,1 +np.float64,0x3fe89f18a1f13e32,0x3ff04d714bbbf400,1 +np.float64,0x3fd45b6275a8b6c4,0x3fd516a44a276a4b,1 +np.float64,0xbfe04463e86088c8,0xbfe1ef9dfc9f9a53,1 +np.float64,0xbfe086e279610dc5,0xbfe249c9c1040a13,1 +np.float64,0x3f89c9b110339380,0x3f89ca0a641454b5,1 +np.float64,0xbfb5f5b4322beb68,0xbfb6038dc3fd1516,1 +np.float64,0x3fe6eae76f6dd5ce,0x3feccabae04d5c14,1 +np.float64,0x3fa9ef6c9c33dee0,0x3fa9f51c9a8c8a2f,1 +np.float64,0xbfe171b45f62e368,0xbfe390ccc4c01bf6,1 +np.float64,0x3fb2999442253330,0x3fb2a1fc006804b5,1 +np.float64,0x3fd124bf04a24980,0x3fd1927abb92472d,1 +np.float64,0xbfe6e05938edc0b2,0xbfecb519ba78114f,1 +np.float64,0x3fed466ee6fa8cde,0x3ff8e75405b50490,1 +np.float64,0xbfb999aa92333358,0xbfb9afa4f19f80a2,1 +np.float64,0xbfe98969ed7312d4,0xbff17d887b0303e7,1 +np.float64,0x3fe782843e6f0508,0x3fee0adbeebe3486,1 +np.float64,0xbfe232fcc26465fa,0xbfe4a90a68d46040,1 +np.float64,0x3fd190a90fa32154,0x3fd206f56ffcdca2,1 +np.float64,0xbfc4f8b75929f170,0xbfc5298b2d4e7740,1 +np.float64,0xbfba3a63d63474c8,0xbfba520835c2fdc2,1 +np.float64,0xbfb7708eea2ee120,0xbfb781695ec17846,1 +np.float64,0x3fed9fb7a5fb3f70,0x3ffa0b717bcd1609,1 +np.float64,0xbfc1b158cd2362b0,0xbfc1ce87345f3473,1 +np.float64,0x3f963478082c6900,0x3f96355c3000953b,1 +np.float64,0x3fc5050e532a0a20,0x3fc536397f38f616,1 +np.float64,0x3fe239f9eee473f4,0x3fe4b360da3b2faa,1 +np.float64,0xbfd66bd80eacd7b0,0xbfd769a29fd784c0,1 +np.float64,0x3fc57cdad52af9b8,0x3fc5b16b937f5f72,1 +np.float64,0xbfd3c36a0aa786d4,0xbfd46e1cd0b4eddc,1 +np.float64,0x3feff433487fe866,0x400cf0ea1def3161,1 +np.float64,0xbfed5577807aaaef,0xbff915e8f6bfdf22,1 +np.float64,0xbfca0dd3eb341ba8,0xbfca6c4d11836cb6,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0xbf974deaa82e9be0,0xbf974ef26a3130d1,1 +np.float64,0xbfe7f425e1efe84c,0xbfef076cb00d649d,1 +np.float64,0xbfe4413605e8826c,0xbfe7e20448b8a4b1,1 +np.float64,0xbfdfad202cbf5a40,0xbfe15cd9eb2be707,1 +np.float64,0xbfe43261ee6864c4,0xbfe7c952c951fe33,1 +np.float64,0xbfec141225782824,0xbff5d54d33861d98,1 +np.float64,0x3fd0f47abaa1e8f4,0x3fd15e8691a7f1c2,1 +np.float64,0x3fd378f0baa6f1e0,0x3fd41bea4a599081,1 +np.float64,0xbfb52523462a4a48,0xbfb5317fa7f436e2,1 +np.float64,0x3fcb30797d3660f0,0x3fcb9c174ea401ff,1 +np.float64,0xbfd48480dea90902,0xbfd5446e02c8b329,1 +np.float64,0xbfee4ae3ab7c95c7,0xbffcc650340ba274,1 +np.float64,0xbfeab086d075610e,0xbff3387f4e83ae26,1 +np.float64,0x3fa17cddf422f9c0,0x3fa17e9bf1b25736,1 +np.float64,0xbfe3064536e60c8a,0xbfe5e86aa5244319,1 +np.float64,0x3feb2882c5765106,0x3ff40604c7d97d44,1 +np.float64,0xbfa6923ff42d2480,0xbfa695ff57b2fc3f,1 +np.float64,0xbfa8bdbdcc317b80,0xbfa8c2ada0d94aa7,1 +np.float64,0x3fe7f16b8e6fe2d8,0x3fef013948c391a6,1 +np.float64,0x3fe4e7169f69ce2e,0x3fe8fceef835050a,1 +np.float64,0x3fed877638fb0eec,0x3ff9b83694127959,1 +np.float64,0xbfe0cc9ecf61993e,0xbfe2a978234cbde5,1 +np.float64,0xbfe977e79672efcf,0xbff16589ea494a38,1 +np.float64,0xbfe240130ae48026,0xbfe4bc69113e0d7f,1 +np.float64,0x3feb1e9b70763d36,0x3ff3f4615938a491,1 +np.float64,0xbfdf197dfcbe32fc,0xbfe0fba78a0fc816,1 +np.float64,0xbfee0f8543fc1f0a,0xbffbb9d9a4ee5387,1 +np.float64,0x3fe88d2191f11a44,0x3ff037843b5b6313,1 +np.float64,0xbfd11bb850a23770,0xbfd188c1cef40007,1 +np.float64,0xbfa1b36e9c2366e0,0xbfa1b53d1d8a8bc4,1 +np.float64,0xbfea2d70d9f45ae2,0xbff26a0629e36b3e,1 +np.float64,0xbfd9188703b2310e,0xbfda83f9ddc18348,1 +np.float64,0xbfee194894fc3291,0xbffbe3c83b61e7cb,1 +np.float64,0xbfe093b4a9e1276a,0xbfe25b4ad6f8f83d,1 +np.float64,0x3fea031489f4062a,0x3ff22accc000082e,1 +np.float64,0xbfc6c0827b2d8104,0xbfc6ff0a94326381,1 +np.float64,0x3fef5cd340feb9a6,0x4002659c5a1b34af,1 +np.float64,0x8010000000000000,0x8010000000000000,1 +np.float64,0x3fd97cb533b2f96c,0x3fdafab28aaae8e3,1 +np.float64,0x3fe2123334642466,0x3fe478bd83a8ce02,1 +np.float64,0xbfd9a69637b34d2c,0xbfdb2c87c6b6fb8c,1 +np.float64,0x3fc58def7f2b1be0,0x3fc5c2ff724a9f61,1 +np.float64,0xbfedd5da1f7babb4,0xbffad15949b7fb22,1 +np.float64,0x3fe90e92a0721d26,0x3ff0d9b64323efb8,1 +np.float64,0x3fd34b9442a69728,0x3fd3e9f8fe80654e,1 +np.float64,0xbfc5f509ab2bea14,0xbfc62d2ad325c59f,1 +np.float64,0x3feb245634f648ac,0x3ff3fe91a46acbe1,1 +np.float64,0x3fd101e539a203cc,0x3fd16cf52ae6d203,1 +np.float64,0xbfc51e9ba72a3d38,0xbfc5507d00521ba3,1 +np.float64,0x3fe5fe1683ebfc2e,0x3feaf7dd8b1f92b0,1 +np.float64,0x3fc362e59126c5c8,0x3fc389601814170b,1 +np.float64,0x3fea34dbd77469b8,0x3ff27542eb721e7e,1 +np.float64,0xbfc13ed241227da4,0xbfc159d42c0a35a9,1 +np.float64,0xbfe6df118cedbe23,0xbfecb27bb5d3f784,1 +np.float64,0x3fd92895f6b2512c,0x3fda96f5f94b625e,1 +np.float64,0xbfe7ea3aa76fd476,0xbfeef0e93939086e,1 +np.float64,0xbfc855498330aa94,0xbfc8a1ff690c9533,1 +np.float64,0x3fd9f27b3ab3e4f8,0x3fdb8726979afc3b,1 +np.float64,0x3fc65d52232cbaa8,0x3fc698ac4367afba,1 +np.float64,0x3fd1271dd0a24e3c,0x3fd195087649d54e,1 +np.float64,0xbfe983445df30689,0xbff175158b773b90,1 +np.float64,0xbfe0d9b13261b362,0xbfe2bb8908fc9e6e,1 +np.float64,0x3fd7671f2aaece40,0x3fd889dccbf21629,1 +np.float64,0x3fe748aebfee915e,0x3fed8e970d94c17d,1 +np.float64,0x3fea756e4e74eadc,0x3ff2d947ef3a54f4,1 +np.float64,0x3fde22311cbc4464,0x3fe05b4ce9df1fdd,1 +np.float64,0x3fe2b55ec1e56abe,0x3fe56c6849e3985a,1 +np.float64,0x3fed7b47437af68e,0x3ff98f8e82de99a0,1 +np.float64,0x3fec8184b179030a,0x3ff6d03aaf0135ba,1 +np.float64,0x3fc9ea825533d508,0x3fca4776d7190e71,1 +np.float64,0xbfe8ddd58b71bbab,0xbff09b770ed7bc9a,1 +np.float64,0xbfed41741bfa82e8,0xbff8d81c2a9fc615,1 +np.float64,0x3fe0a73888e14e72,0x3fe27602ad9a3726,1 +np.float64,0xbfe9d0a565f3a14b,0xbff1e1897b628f66,1 +np.float64,0x3fda12b381b42568,0x3fdbadbec22fbd5a,1 +np.float64,0x3fef0081187e0102,0x4000949eff8313c2,1 +np.float64,0x3fef6942b67ed286,0x4002b7913eb1ee76,1 +np.float64,0x3fda10f882b421f0,0x3fdbababa2d6659d,1 +np.float64,0x3fe5828971eb0512,0x3fea122b5088315a,1 +np.float64,0x3fe9d4b53ff3a96a,0x3ff1e75c148bda01,1 +np.float64,0x3fe95d246bf2ba48,0x3ff1414a61a136ec,1 +np.float64,0x3f9e575eb83caec0,0x3f9e59a4f17179e3,1 +np.float64,0x3fdb0a20b5b61440,0x3fdcd8a56178a17f,1 +np.float64,0xbfdef425e3bde84c,0xbfe0e33eeacf3861,1 +np.float64,0x3fd6afcf6bad5fa0,0x3fd7b73d47288347,1 +np.float64,0x3fe89256367124ac,0x3ff03dd9f36ce40e,1 +np.float64,0x3fe7e560fcefcac2,0x3feee5ef8688b60b,1 +np.float64,0x3fedef55e1fbdeac,0x3ffb350ee1df986b,1 +np.float64,0xbfe44b926de89725,0xbfe7f3539910c41f,1 +np.float64,0x3fc58310f32b0620,0x3fc5b7cfdba15bd0,1 +np.float64,0x3f736d256026da00,0x3f736d2eebe91a90,1 +np.float64,0x3feb012d2076025a,0x3ff3c0b5d21a7259,1 +np.float64,0xbfe466a6c468cd4e,0xbfe820c9c197601f,1 +np.float64,0x3fe1aba8aa635752,0x3fe3e3b73920f64c,1 +np.float64,0x3fe5597c336ab2f8,0x3fe9c7bc4b765b15,1 +np.float64,0x3fe1004ac5e20096,0x3fe2f12116e99821,1 +np.float64,0x3fecbc67477978ce,0x3ff76377434dbdad,1 +np.float64,0x3fe0e64515e1cc8a,0x3fe2ccf5447c1579,1 +np.float64,0x3febcfa874f79f50,0x3ff54528f0822144,1 +np.float64,0x3fc36915ed26d228,0x3fc38fb5b28d3f72,1 +np.float64,0xbfe01213e5e02428,0xbfe1ac0e1e7418f1,1 +np.float64,0x3fcd97875b3b2f10,0x3fce22fe3fc98702,1 +np.float64,0xbfe30383c5e60708,0xbfe5e427e62cc957,1 +np.float64,0xbfde339bf9bc6738,0xbfe0667f337924f5,1 +np.float64,0xbfda7c1c49b4f838,0xbfdc2c8801ce654a,1 +np.float64,0x3fb6b3489e2d6690,0x3fb6c29650387b92,1 +np.float64,0xbfe1fd4d76e3fa9b,0xbfe45a1f60077678,1 +np.float64,0xbf67c5e0402f8c00,0xbf67c5e49fce115a,1 +np.float64,0xbfd4f9aa2da9f354,0xbfd5c759603d0b9b,1 +np.float64,0x3fe83c227bf07844,0x3fefada9f1bd7fa9,1 +np.float64,0xbf97f717982fee20,0xbf97f836701a8cd5,1 +np.float64,0x3fe9688a2472d114,0x3ff150aa575e7d51,1 +np.float64,0xbfc5a9779d2b52f0,0xbfc5df56509c48b1,1 +np.float64,0xbfe958d5f472b1ac,0xbff13b813f9bee20,1 +np.float64,0xbfd7b3b944af6772,0xbfd8e276c2b2920f,1 +np.float64,0x3fed10198e7a2034,0x3ff8469c817572f0,1 +np.float64,0xbfeeecc4517dd989,0xc000472b1f858be3,1 +np.float64,0xbfdbcce47eb799c8,0xbfddc734aa67812b,1 +np.float64,0xbfd013ee24a027dc,0xbfd06df3089384ca,1 +np.float64,0xbfd215f2bfa42be6,0xbfd29774ffe26a74,1 +np.float64,0x3fdfd0ae67bfa15c,0x3fe1746e3a963a9f,1 +np.float64,0xbfc84aa10b309544,0xbfc896f0d25b723a,1 +np.float64,0xbfcd0c627d3a18c4,0xbfcd9024c73747a9,1 +np.float64,0x3fd87df6dbb0fbec,0x3fd9ce1dde757f31,1 +np.float64,0xbfdad85e05b5b0bc,0xbfdc9c2addb6ce47,1 +np.float64,0xbfee4f8977fc9f13,0xbffcdccd68e514b3,1 +np.float64,0x3fa5c290542b8520,0x3fa5c5ebdf09ca70,1 +np.float64,0xbfd7e401d2afc804,0xbfd91a7e4eb5a026,1 +np.float64,0xbfe33ff73b667fee,0xbfe6423cc6eb07d7,1 +np.float64,0x3fdfb7d6c4bf6fac,0x3fe163f2e8175177,1 +np.float64,0xbfd515d69eaa2bae,0xbfd5e6eedd6a1598,1 +np.float64,0x3fb322232e264440,0x3fb32b49d91c3cbe,1 +np.float64,0xbfe20ac39e641587,0xbfe46dd4b3803f19,1 +np.float64,0x3fe282dc18e505b8,0x3fe520152120c297,1 +np.float64,0xbfc905a4cd320b48,0xbfc95929b74865fb,1 +np.float64,0x3fe0ae3b83615c78,0x3fe27fa1dafc825b,1 +np.float64,0xbfc1bfed0f237fdc,0xbfc1dd6466225cdf,1 +np.float64,0xbfeca4d47d7949a9,0xbff72761a34fb682,1 +np.float64,0xbfe8cf8c48f19f18,0xbff0897ebc003626,1 +np.float64,0xbfe1aaf0a36355e2,0xbfe3e2ae7b17a286,1 +np.float64,0x3fe2ca442e659488,0x3fe58c3a2fb4f14a,1 +np.float64,0xbfda3c2deeb4785c,0xbfdbdf89fe96a243,1 +np.float64,0xbfdc12bfecb82580,0xbfde1d81dea3c221,1 +np.float64,0xbfe2d6d877e5adb1,0xbfe59f73e22c1fc7,1 +np.float64,0x3fe5f930636bf260,0x3feaee96a462e4de,1 +np.float64,0x3fcf3c0ea53e7820,0x3fcfe0b0f92be7e9,1 +np.float64,0xbfa5bb90f42b7720,0xbfa5bee9424004cc,1 +np.float64,0xbfe2fb3a3265f674,0xbfe5d75b988bb279,1 +np.float64,0x3fcaec7aab35d8f8,0x3fcb54ea582fff6f,1 +np.float64,0xbfd8d3228db1a646,0xbfda322297747fbc,1 +np.float64,0x3fedd2e0ad7ba5c2,0x3ffac6002b65c424,1 +np.float64,0xbfd9edeca2b3dbda,0xbfdb81b2b7785e33,1 +np.float64,0xbfef5febb17ebfd7,0xc002796b15950960,1 +np.float64,0x3fde22f787bc45f0,0x3fe05bcc624b9ba2,1 +np.float64,0xbfc716a4ab2e2d48,0xbfc758073839dd44,1 +np.float64,0xbf9bed852837db00,0xbf9bef4b2a3f3bdc,1 +np.float64,0x3fef8f88507f1f10,0x4003e5e566444571,1 +np.float64,0xbfdc1bbed6b8377e,0xbfde28a64e174e60,1 +np.float64,0x3fe02d30eae05a62,0x3fe1d064ec027cd3,1 +np.float64,0x3fd9dbb500b3b76c,0x3fdb6bea40162279,1 +np.float64,0x3fe353ff1d66a7fe,0x3fe661b3358c925e,1 +np.float64,0x3fac3ebfb4387d80,0x3fac4618effff2b0,1 +np.float64,0x3fe63cf0ba6c79e2,0x3feb7030cff5f434,1 +np.float64,0x3fd0e915f8a1d22c,0x3fd152464597b510,1 +np.float64,0xbfd36987cda6d310,0xbfd40af049d7621e,1 +np.float64,0xbfdc5b4dc7b8b69c,0xbfde7790a35da2bc,1 +np.float64,0x3feee7ff4a7dcffe,0x40003545989e07c7,1 +np.float64,0xbfeb2c8308765906,0xbff40d2e6469249e,1 +np.float64,0x3fe535a894ea6b52,0x3fe98781648550d0,1 +np.float64,0xbfef168eb9fe2d1d,0xc000f274ed3cd312,1 +np.float64,0x3fc3e2d98927c5b0,0x3fc40c6991b8900c,1 +np.float64,0xbfcd8fe3e73b1fc8,0xbfce1aec7f9b7f7d,1 +np.float64,0xbfd55d8c3aaabb18,0xbfd6378132ee4892,1 +np.float64,0xbfe424a66168494d,0xbfe7b289d72c98b3,1 +np.float64,0x3fd81af13eb035e4,0x3fd95a6a9696ab45,1 +np.float64,0xbfe3016722e602ce,0xbfe5e0e46db228cd,1 +np.float64,0x3fe9a20beff34418,0x3ff19faca17fc468,1 +np.float64,0xbfe2124bc7e42498,0xbfe478e19927e723,1 +np.float64,0x3fd96f8622b2df0c,0x3fdaeb08da6b08ae,1 +np.float64,0x3fecd6796579acf2,0x3ff7a7d02159e181,1 +np.float64,0x3fe60015df6c002c,0x3feafba6f2682a61,1 +np.float64,0x3fc7181cf72e3038,0x3fc7598c2cc3c3b4,1 +np.float64,0xbfce6e2e0b3cdc5c,0xbfcf0621b3e37115,1 +np.float64,0xbfe52a829e6a5505,0xbfe973a785980af9,1 +np.float64,0x3fed4bbac37a9776,0x3ff8f7a0e68a2bbe,1 +np.float64,0x3fabdfaacc37bf60,0x3fabe6bab42bd246,1 +np.float64,0xbfcd9598cb3b2b30,0xbfce20f3c4c2c261,1 +np.float64,0x3fd717d859ae2fb0,0x3fd82e88eca09ab1,1 +np.float64,0x3fe28ccb18e51996,0x3fe52f071d2694fd,1 +np.float64,0xbfe43f064ae87e0c,0xbfe7de5eab36b5b9,1 +np.float64,0x7fefffffffffffff,0xfff8000000000000,1 +np.float64,0xbfb39b045a273608,0xbfb3a4dd3395fdd5,1 +np.float64,0xbfb3358bae266b18,0xbfb33ece5e95970a,1 +np.float64,0xbfeeafb6717d5f6d,0xbffeec3f9695b575,1 +np.float64,0xbfe7a321afef4644,0xbfee522dd80f41f4,1 +np.float64,0x3fe3a17e5be742fc,0x3fe6dcd32af51e92,1 +np.float64,0xbfc61694bd2c2d28,0xbfc64fbbd835f6e7,1 +np.float64,0xbfd795906faf2b20,0xbfd8bf89b370655c,1 +np.float64,0xbfe4b39b59e96736,0xbfe8a3c5c645b6e3,1 +np.float64,0x3fd310af3ba62160,0x3fd3a9442e825e1c,1 +np.float64,0xbfd45198a6a8a332,0xbfd50bc10311a0a3,1 +np.float64,0x3fd0017eaaa002fc,0x3fd05a472a837999,1 +np.float64,0xbfea974d98752e9b,0xbff30f67f1835183,1 +np.float64,0xbf978f60582f1ec0,0xbf979070e1c2b59d,1 +np.float64,0x3fe1c715d4e38e2c,0x3fe40b479e1241a2,1 +np.float64,0xbfccb965cd3972cc,0xbfcd38b40c4a352d,1 +np.float64,0xbfd9897048b312e0,0xbfdb09d55624c2a3,1 +np.float64,0x3fe7f5de4befebbc,0x3fef0b56be259f9c,1 +np.float64,0x3fcc6c6d4338d8d8,0x3fcce7b20ed68a78,1 +np.float64,0xbfe63884046c7108,0xbfeb67a3b945c3ee,1 +np.float64,0xbfce64e2ad3cc9c4,0xbfcefc47fae2e81f,1 +np.float64,0x3fefeb57b27fd6b0,0x400ab2eac6321cfb,1 +np.float64,0x3fe679627e6cf2c4,0x3febe6451b6ee0c4,1 +np.float64,0x3fc5f710172bee20,0x3fc62f40f85cb040,1 +np.float64,0x3fc34975e52692e8,0x3fc36f58588c7fa2,1 +np.float64,0x3fe8a3784cf146f0,0x3ff052ced9bb9406,1 +np.float64,0x3fd11a607ca234c0,0x3fd1874f876233fe,1 +np.float64,0x3fb2d653f625aca0,0x3fb2df0f4c9633f3,1 +np.float64,0x3fe555f39eeaabe8,0x3fe9c15ee962a28c,1 +np.float64,0xbfea297e3bf452fc,0xbff264107117f709,1 +np.float64,0x3fe1581cdde2b03a,0x3fe36c79acedf99c,1 +np.float64,0x3fd4567063a8ace0,0x3fd51123dbd9106f,1 +np.float64,0x3fa3883aec271080,0x3fa38aa86ec71218,1 +np.float64,0x3fe40e5d7de81cba,0x3fe78dbb9b568850,1 +np.float64,0xbfe9a2f7347345ee,0xbff1a0f4faa05041,1 +np.float64,0x3f9eef03a83dde00,0x3f9ef16caa0c1478,1 +np.float64,0xbfcb4641d1368c84,0xbfcbb2e7ff8c266d,1 +np.float64,0xbfa8403b2c308070,0xbfa844e148b735b7,1 +np.float64,0xbfe1875cd6e30eba,0xbfe3afadc08369f5,1 +np.float64,0xbfdd3c3d26ba787a,0xbfdf919b3e296766,1 +np.float64,0x3fcd6c4c853ad898,0x3fcdf55647b518b8,1 +np.float64,0xbfe360a173e6c143,0xbfe6759eb3a08cf2,1 +np.float64,0x3fe5a13147eb4262,0x3fea4a5a060f5adb,1 +np.float64,0x3feb3cdd7af679ba,0x3ff42aae0cf61234,1 +np.float64,0x3fe5205128ea40a2,0x3fe9618f3d0c54af,1 +np.float64,0x3fce35343f3c6a68,0x3fcec9c4e612b050,1 +np.float64,0xbfc345724d268ae4,0xbfc36b3ce6338e6a,1 +np.float64,0x3fedc4fc0e7b89f8,0x3ffa91c1d775c1f7,1 +np.float64,0x3fe41fbf21683f7e,0x3fe7aa6c174a0e65,1 +np.float64,0xbfc7a1a5d32f434c,0xbfc7e7d27a4c5241,1 +np.float64,0x3fd3e33eaca7c67c,0x3fd4915264441e2f,1 +np.float64,0x3feb3f02f6f67e06,0x3ff42e942249e596,1 +np.float64,0x3fdb75fcb0b6ebf8,0x3fdd5c63f98b6275,1 +np.float64,0x3fd6476603ac8ecc,0x3fd74020b164cf38,1 +np.float64,0x3fed535372faa6a6,0x3ff90f3791821841,1 +np.float64,0x3fe8648ead70c91e,0x3ff006a62befd7ed,1 +np.float64,0x3fd0f90760a1f210,0x3fd1636b39bb1525,1 +np.float64,0xbfca052443340a48,0xbfca633d6e777ae0,1 +np.float64,0xbfa6a5e3342d4bc0,0xbfa6a9ac6a488f5f,1 +np.float64,0x3fd5598038aab300,0x3fd632f35c0c3d52,1 +np.float64,0xbfdf66218fbecc44,0xbfe12df83b19f300,1 +np.float64,0x3fe78e15b56f1c2c,0x3fee240d12489cd1,1 +np.float64,0x3fe3d6a7b3e7ad50,0x3fe7329dcf7401e2,1 +np.float64,0xbfddb8e97bbb71d2,0xbfe017ed6d55a673,1 +np.float64,0xbfd57afd55aaf5fa,0xbfd658a9607c3370,1 +np.float64,0xbfdba4c9abb74994,0xbfdd95d69e5e8814,1 +np.float64,0xbfe71d8090ee3b01,0xbfed3390be6d2eef,1 +np.float64,0xbfc738ac0f2e7158,0xbfc77b3553b7c026,1 +np.float64,0x3f873656302e6c80,0x3f873697556ae011,1 +np.float64,0x3fe559491d6ab292,0x3fe9c7603b12c608,1 +np.float64,0xbfe262776864c4ef,0xbfe4ef905dda8599,1 +np.float64,0x3fe59d8917eb3b12,0x3fea439f44b7573f,1 +np.float64,0xbfd4b5afb5a96b60,0xbfd57b4e3df4dbc8,1 +np.float64,0x3fe81158447022b0,0x3fef4a3cea3eb6a9,1 +np.float64,0xbfeb023441f60468,0xbff3c27f0fc1a4dc,1 +np.float64,0x3fefb212eaff6426,0x40055fc6d949cf44,1 +np.float64,0xbfe1300ac1e26016,0xbfe333f297a1260e,1 +np.float64,0xbfeae0a2f575c146,0xbff388d58c380b8c,1 +np.float64,0xbfeddd8e55fbbb1d,0xbffaef045b2e21d9,1 +np.float64,0x3fec7c6c1d78f8d8,0x3ff6c3ebb019a8e5,1 +np.float64,0xbfe27e071f64fc0e,0xbfe518d2ff630f33,1 +np.float64,0x8000000000000001,0x8000000000000001,1 +np.float64,0x3fc5872abf2b0e58,0x3fc5bc083105db76,1 +np.float64,0x3fe65114baeca22a,0x3feb9745b82ef15a,1 +np.float64,0xbfc783abe52f0758,0xbfc7c8cb23f93e79,1 +np.float64,0x3fe4b7a5dd696f4c,0x3fe8aab9d492f0ca,1 +np.float64,0xbf91a8e8a82351e0,0xbf91a95b6ae806f1,1 +np.float64,0xbfee482eb77c905d,0xbffcb952830e715a,1 +np.float64,0x3fba0eee2a341de0,0x3fba261d495e3a1b,1 +np.float64,0xbfeb8876ae7710ed,0xbff4b7f7f4343506,1 +np.float64,0xbfe4d29e46e9a53c,0xbfe8d9547a601ba7,1 +np.float64,0xbfe12413b8e24828,0xbfe3232656541d10,1 +np.float64,0x3fc0bd8f61217b20,0x3fc0d63f937f0aa4,1 +np.float64,0xbfd3debafda7bd76,0xbfd48c534e5329e4,1 +np.float64,0x3fc0f92de921f258,0x3fc112eb7d47349b,1 +np.float64,0xbfe576b95f6aed72,0xbfe9fca859239b3c,1 +np.float64,0x3fd10e520da21ca4,0x3fd17a546e4152f7,1 +np.float64,0x3fcef917eb3df230,0x3fcf998677a8fa8f,1 +np.float64,0x3fdfcf863abf9f0c,0x3fe173a98af1cb13,1 +np.float64,0x3fc28c4b4f251898,0x3fc2adf43792e917,1 +np.float64,0x3fceb837ad3d7070,0x3fcf54a63b7d8c5c,1 +np.float64,0x3fc0140a05202818,0x3fc029e4f75330cb,1 +np.float64,0xbfd76c3362aed866,0xbfd88fb9e790b4e8,1 +np.float64,0xbfe475300868ea60,0xbfe8395334623e1f,1 +np.float64,0x3fea70b9b4f4e174,0x3ff2d1dad92173ba,1 +np.float64,0xbfe2edbd4965db7a,0xbfe5c29449a9365d,1 +np.float64,0xbfddf86f66bbf0de,0xbfe0408439cada9b,1 +np.float64,0xbfb443cdfa288798,0xbfb44eae796ad3ea,1 +np.float64,0xbf96a8a0482d5140,0xbf96a992b6ef073b,1 +np.float64,0xbfd279db2fa4f3b6,0xbfd3043db6acbd9e,1 +np.float64,0x3fe5d99088ebb322,0x3feab30be14e1605,1 +np.float64,0xbfe1a917abe35230,0xbfe3e0063d0f5f63,1 +np.float64,0x3fc77272f52ee4e8,0x3fc7b6f8ab6f4591,1 +np.float64,0x3fd6b62146ad6c44,0x3fd7be77eef8390a,1 +np.float64,0xbfe39fd9bc673fb4,0xbfe6da30dc4eadde,1 +np.float64,0x3fe35545c066aa8c,0x3fe663b5873e4d4b,1 +np.float64,0xbfcbbeffb3377e00,0xbfcc317edf7f6992,1 +np.float64,0xbfe28a58366514b0,0xbfe52b5734579ffa,1 +np.float64,0xbfbf0c87023e1910,0xbfbf33d970a0dfa5,1 +np.float64,0xbfd31144cba6228a,0xbfd3a9e84f9168f9,1 +np.float64,0xbfe5c044056b8088,0xbfea83d607c1a88a,1 +np.float64,0x3fdaabdf18b557c0,0x3fdc663ee8eddc83,1 +np.float64,0xbfeb883006f71060,0xbff4b76feff615be,1 +np.float64,0xbfebaef41d775de8,0xbff5034111440754,1 +np.float64,0x3fd9b6eb3bb36dd8,0x3fdb3fff5071dacf,1 +np.float64,0x3fe4e33c45e9c678,0x3fe8f637779ddedf,1 +np.float64,0x3fe52213a06a4428,0x3fe964adeff5c14e,1 +np.float64,0x3fe799254cef324a,0x3fee3c3ecfd3cdc5,1 +np.float64,0x3fd0533f35a0a680,0x3fd0b19a003469d3,1 +np.float64,0x3fec7ef5c7f8fdec,0x3ff6ca0abe055048,1 +np.float64,0xbfd1b5da82a36bb6,0xbfd22f357acbee79,1 +np.float64,0xbfd8f9c652b1f38c,0xbfda5faacbce9cf9,1 +np.float64,0x3fc8fc818b31f900,0x3fc94fa9a6aa53c8,1 +np.float64,0x3fcf42cc613e8598,0x3fcfe7dc128f33f2,1 +np.float64,0x3fd393a995a72754,0x3fd4396127b19305,1 +np.float64,0x3fec7b7df9f8f6fc,0x3ff6c1ae51753ef2,1 +np.float64,0x3fc07f175b20fe30,0x3fc096b55c11568c,1 +np.float64,0xbf979170082f22e0,0xbf979280d9555f44,1 +np.float64,0xbfb9d110c633a220,0xbfb9e79ba19b3c4a,1 +np.float64,0x3fedcd7d417b9afa,0x3ffab19734e86d58,1 +np.float64,0xbfec116f27f822de,0xbff5cf9425cb415b,1 +np.float64,0xbfec4fa0bef89f42,0xbff65a771982c920,1 +np.float64,0x3f94d4452829a880,0x3f94d501789ad11c,1 +np.float64,0xbfefe5ede27fcbdc,0xc009c440d3c2a4ce,1 +np.float64,0xbfe7e5f7b5efcbf0,0xbfeee74449aee1db,1 +np.float64,0xbfeb71dc8976e3b9,0xbff48cd84ea54ed2,1 +np.float64,0xbfe4cdb65f699b6c,0xbfe8d0d3bce901ef,1 +np.float64,0x3fb78ef1ee2f1de0,0x3fb7a00e7d183c48,1 +np.float64,0x3fb681864a2d0310,0x3fb6906fe64b4cd7,1 +np.float64,0xbfd2ad3b31a55a76,0xbfd33c57b5985399,1 +np.float64,0x3fdcdaaa95b9b554,0x3fdf16b99628db1e,1 +np.float64,0x3fa4780b7428f020,0x3fa47ad6ce9b8081,1 +np.float64,0x3fc546b0ad2a8d60,0x3fc579b361b3b18f,1 +np.float64,0x3feaf98dd6f5f31c,0x3ff3b38189c3539c,1 +np.float64,0x3feb0b2eca76165e,0x3ff3d22797083f9a,1 +np.float64,0xbfdc02ae3ab8055c,0xbfde099ecb5dbacf,1 +np.float64,0x3fd248bf17a49180,0x3fd2ceb77b346d1d,1 +np.float64,0x3fe349d666e693ac,0x3fe651b9933a8853,1 +np.float64,0xbfca526fc534a4e0,0xbfcab3e83f0d9b93,1 +np.float64,0x3fc156421722ac88,0x3fc171b38826563b,1 +np.float64,0xbfe4244569e8488b,0xbfe7b1e93e7d4f92,1 +np.float64,0x3fe010faabe021f6,0x3fe1aa961338886d,1 +np.float64,0xbfc52dacb72a5b58,0xbfc55ffa50eba380,1 +np.float64,0x8000000000000000,0x8000000000000000,1 +np.float64,0x3fea1d4865f43a90,0x3ff251b839eb4817,1 +np.float64,0xbfa0f65c8421ecc0,0xbfa0f7f37c91be01,1 +np.float64,0x3fcab29c0b356538,0x3fcb1863edbee184,1 +np.float64,0x3fe7949162ef2922,0x3fee323821958b88,1 +np.float64,0x3fdaf9288ab5f250,0x3fdcc400190a4839,1 +np.float64,0xbfe13ece6be27d9d,0xbfe348ba07553179,1 +np.float64,0x3f8a0c4fd0341880,0x3f8a0cabdf710185,1 +np.float64,0x3fdd0442a2ba0884,0x3fdf4b016c4da452,1 +np.float64,0xbfaf06d2343e0da0,0xbfaf1090b1600422,1 +np.float64,0xbfd3b65225a76ca4,0xbfd45fa49ae76cca,1 +np.float64,0x3fef5d75fefebaec,0x400269a5e7c11891,1 +np.float64,0xbfe048e35ce091c6,0xbfe1f5af45dd64f8,1 +np.float64,0xbfe27d4599e4fa8b,0xbfe517b07843d04c,1 +np.float64,0xbfe6f2a637ede54c,0xbfecdaa730462576,1 +np.float64,0x3fc63fbb752c7f78,0x3fc67a2854974109,1 +np.float64,0x3fedda6bfbfbb4d8,0x3ffae2e6131f3475,1 +np.float64,0x3fe7a6f5286f4dea,0x3fee5a9b1ef46016,1 +np.float64,0xbfd4ea8bcea9d518,0xbfd5b66ab7e5cf00,1 +np.float64,0x3fdc116568b822cc,0x3fde1bd4d0d9fd6c,1 +np.float64,0x3fdc45cb1bb88b98,0x3fde5cd1d2751032,1 +np.float64,0x3feabd932f757b26,0x3ff34e06e56a62a1,1 +np.float64,0xbfae5dbe0c3cbb80,0xbfae66e062ac0d65,1 +np.float64,0xbfdb385a00b670b4,0xbfdd10fedf3a58a7,1 +np.float64,0xbfebb14755f7628f,0xbff507e123a2b47c,1 +np.float64,0x3fe6de2fdfedbc60,0x3fecb0ae6e131da2,1 +np.float64,0xbfd86de640b0dbcc,0xbfd9bb4dbf0bf6af,1 +np.float64,0x3fe39e86d9e73d0e,0x3fe6d811c858d5d9,1 +np.float64,0x7ff0000000000000,0xfff8000000000000,1 +np.float64,0x3fa8101684302020,0x3fa814a12176e937,1 +np.float64,0x3fefdd5ad37fbab6,0x4008a08c0b76fbb5,1 +np.float64,0x3fe645c727ec8b8e,0x3feb814ebc470940,1 +np.float64,0x3fe3ba79dce774f4,0x3fe70500db564cb6,1 +np.float64,0xbfe0e5a254e1cb44,0xbfe2cc13940c6d9a,1 +np.float64,0x3fe2cac62465958c,0x3fe58d008c5e31f8,1 +np.float64,0xbfd3ffb531a7ff6a,0xbfd4b0d88cff2040,1 +np.float64,0x3fe0929104612522,0x3fe259bc42dce788,1 +np.float64,0x1,0x1,1 +np.float64,0xbfe7db77e6efb6f0,0xbfeecf93e8a61cb3,1 +np.float64,0xbfe37e9559e6fd2a,0xbfe6a514e29cb7aa,1 +np.float64,0xbfc53a843f2a7508,0xbfc56d2e9ad8b716,1 +np.float64,0xbfedb04485fb6089,0xbffa4615d4334ec3,1 +np.float64,0xbfc44349b1288694,0xbfc46f484b6f1cd6,1 +np.float64,0xbfe265188264ca31,0xbfe4f37d61cd9e17,1 +np.float64,0xbfd030351da0606a,0xbfd08c2537287ee1,1 +np.float64,0x3fd8fb131db1f628,0x3fda613363ca601e,1 +np.float64,0xbff0000000000000,0xfff0000000000000,1 +np.float64,0xbfe48d9a60691b35,0xbfe862c02d8fec1e,1 +np.float64,0x3fd185e050a30bc0,0x3fd1fb4c614ddb07,1 +np.float64,0xbfe4a5807e694b01,0xbfe88b8ff2d6caa7,1 +np.float64,0xbfc934d7ad3269b0,0xbfc98a405d25a666,1 +np.float64,0xbfea0e3c62741c79,0xbff23b4bd3a7b15d,1 +np.float64,0x3fe7244071ee4880,0x3fed41b27ba6bb22,1 +np.float64,0xbfd419f81ba833f0,0xbfd4cdf71b4533a3,1 +np.float64,0xbfe1e73a34e3ce74,0xbfe439eb15fa6baf,1 +np.float64,0x3fcdd9a63f3bb350,0x3fce68e1c401eff0,1 +np.float64,0x3fd1b5960ba36b2c,0x3fd22eeb566f1976,1 +np.float64,0x3fe9ad18e0735a32,0x3ff1af23c534260d,1 +np.float64,0xbfd537918aaa6f24,0xbfd60ccc8df0962b,1 +np.float64,0x3fcba3d3c73747a8,0x3fcc14fd5e5c49ad,1 +np.float64,0x3fd367e3c0a6cfc8,0x3fd40921b14e288e,1 +np.float64,0x3fe94303c6f28608,0x3ff11e62db2db6ac,1 +np.float64,0xbfcc5f77fd38bef0,0xbfccda110c087519,1 +np.float64,0xbfd63b74d7ac76ea,0xbfd7328af9f37402,1 +np.float64,0xbfe5321289ea6425,0xbfe9811ce96609ad,1 +np.float64,0xbfde910879bd2210,0xbfe0a2cd0ed1d368,1 +np.float64,0xbfcc9d9bad393b38,0xbfcd1b722a0b1371,1 +np.float64,0xbfe6dd39e16dba74,0xbfecaeb7c8c069f6,1 +np.float64,0xbfe98316eff3062e,0xbff174d7347d48bf,1 +np.float64,0xbfda88f8d1b511f2,0xbfdc3c0e75dad903,1 +np.float64,0x3fd400d8c2a801b0,0x3fd4b21bacff1f5d,1 +np.float64,0xbfe1ed335863da66,0xbfe4429e45e99779,1 +np.float64,0xbf3423a200284800,0xbf3423a20acb0342,1 +np.float64,0xbfe97bc59672f78b,0xbff16ad1adc44a33,1 +np.float64,0xbfeeca60d7fd94c2,0xbfff98d7f18f7728,1 +np.float64,0x3fd1eb13b2a3d628,0x3fd268e6ff4d56ce,1 +np.float64,0xbfa5594c242ab2a0,0xbfa55c77d6740a39,1 +np.float64,0x3fe72662006e4cc4,0x3fed462a9dedbfee,1 +np.float64,0x3fef4bb221fe9764,0x4001fe4f4cdfedb2,1 +np.float64,0xbfe938d417f271a8,0xbff110e78724ca2b,1 +np.float64,0xbfcc29ab2f385358,0xbfcca182140ef541,1 +np.float64,0x3fe18cd42c6319a8,0x3fe3b77e018165e7,1 +np.float64,0xbfec6c5cae78d8b9,0xbff69d8e01309b48,1 +np.float64,0xbfd5723da7aae47c,0xbfd64ecde17da471,1 +np.float64,0xbfe3096722e612ce,0xbfe5ed43634f37ff,1 +np.float64,0xbfdacaceb1b5959e,0xbfdc8bb826bbed39,1 +np.float64,0x3fc59a57cb2b34b0,0x3fc5cfc4a7c9bac8,1 +np.float64,0x3f84adce10295b80,0x3f84adfc1f1f6e97,1 +np.float64,0x3fdd5b28bbbab650,0x3fdfb8b906d77df4,1 +np.float64,0x3fdebf94c6bd7f28,0x3fe0c10188e1bc7c,1 +np.float64,0x3fdb30c612b6618c,0x3fdd07bf18597821,1 +np.float64,0x3fe7eeb3176fdd66,0x3feefb0be694b855,1 +np.float64,0x0,0x0,1 +np.float64,0xbfe10057e9e200b0,0xbfe2f13365e5b1c9,1 +np.float64,0xbfeb61a82376c350,0xbff46e665d3a60f5,1 +np.float64,0xbfe7f54aec6fea96,0xbfef0a0759f726dc,1 +np.float64,0xbfe4f6da3de9edb4,0xbfe9187d85bd1ab5,1 +np.float64,0xbfeb8be1b3f717c4,0xbff4be8efaab2e75,1 +np.float64,0x3fed40bc31fa8178,0x3ff8d5ec4a7f3e9b,1 +np.float64,0xbfe40f8711681f0e,0xbfe78fa5c62b191b,1 +np.float64,0x3fd1034d94a2069c,0x3fd16e78e9efb85b,1 +np.float64,0x3fc74db15b2e9b60,0x3fc790f26e894098,1 +np.float64,0x3fd912a88cb22550,0x3fda7d0ab3b21308,1 +np.float64,0x3fd8948a3bb12914,0x3fd9e8950c7874c8,1 +np.float64,0xbfa7ada5242f5b50,0xbfa7b1f8db50c104,1 +np.float64,0x3feeb2e1c27d65c4,0x3fff000b7d09c9b7,1 +np.float64,0x3fe9d46cbbf3a8da,0x3ff1e6f405265a6e,1 +np.float64,0xbfe2480b77e49017,0xbfe4c83b9b37bf0c,1 +np.float64,0x3fe950ea9372a1d6,0x3ff130e62468bf2c,1 +np.float64,0x3fefa7272a7f4e4e,0x4004d8c9bf31ab58,1 +np.float64,0xbfe7309209ee6124,0xbfed5b94acef917a,1 +np.float64,0x3fd05e8c64a0bd18,0x3fd0bdb11e0903c6,1 +np.float64,0x3fd9236043b246c0,0x3fda90ccbe4bab1e,1 +np.float64,0xbfdc3d6805b87ad0,0xbfde5266e17154c3,1 +np.float64,0x3fe5e6bad76bcd76,0x3feacbc306c63445,1 +np.float64,0x3ff0000000000000,0x7ff0000000000000,1 +np.float64,0xbfde3d7390bc7ae8,0xbfe06cd480bd0196,1 +np.float64,0xbfd3e2e3c0a7c5c8,0xbfd490edc0a45e26,1 +np.float64,0x3fe39871d76730e4,0x3fe6ce54d1719953,1 +np.float64,0x3fdff00ebcbfe01c,0x3fe1894b6655a6d0,1 +np.float64,0x3f91b7ad58236f40,0x3f91b8213bcb8b0b,1 +np.float64,0xbfd99f48f7b33e92,0xbfdb23d544f62591,1 +np.float64,0x3fae3512cc3c6a20,0x3fae3e10939fd7b5,1 +np.float64,0x3fcc4cf3db3899e8,0x3fccc698a15176d6,1 +np.float64,0xbfd0927e39a124fc,0xbfd0f5522e2bc030,1 +np.float64,0x3fcee859633dd0b0,0x3fcf87bdef7a1e82,1 +np.float64,0xbfe2a8b69565516d,0xbfe5593437b6659a,1 +np.float64,0x3fecf61e20f9ec3c,0x3ff7fda16b0209d4,1 +np.float64,0xbfbf37571e3e6eb0,0xbfbf5f4e1379a64c,1 +np.float64,0xbfd54e1b75aa9c36,0xbfd626223b68971a,1 +np.float64,0x3fe1035a56e206b4,0x3fe2f5651ca0f4b0,1 +np.float64,0x3fe4992989e93254,0x3fe876751afa70dc,1 +np.float64,0x3fc8c313d3318628,0x3fc913faf15d1562,1 +np.float64,0x3f99f6ba8833ed80,0x3f99f8274fb94828,1 +np.float64,0xbfd4a58af0a94b16,0xbfd56947c276e04f,1 +np.float64,0x3fc66f8c872cdf18,0x3fc6ab7a14372a73,1 +np.float64,0x3fc41eee0d283de0,0x3fc449ff1ff0e7a6,1 +np.float64,0x3fefd04d287fa09a,0x4007585010cfa9b0,1 +np.float64,0x3fce9e746f3d3ce8,0x3fcf39514bbe5070,1 +np.float64,0xbfe8056f72700adf,0xbfef2ee2c13e67ba,1 +np.float64,0x3fdd6b1ec0bad63c,0x3fdfccf2ba144fa8,1 +np.float64,0x3fd92ee432b25dc8,0x3fda9e6b96b2b142,1 +np.float64,0xbfc4d18f9529a320,0xbfc50150fb4de0cc,1 +np.float64,0xbfe09939a7613274,0xbfe262d703c317af,1 +np.float64,0xbfd130b132a26162,0xbfd19f5a00ae29c4,1 +np.float64,0x3fa06e21d420dc40,0x3fa06f93aba415fb,1 +np.float64,0x3fc5c48fbd2b8920,0x3fc5fb3bfad3bf55,1 +np.float64,0xbfdfa2bacbbf4576,0xbfe155f839825308,1 +np.float64,0x3fe3e1fa0f67c3f4,0x3fe745081dd4fd03,1 +np.float64,0x3fdae58289b5cb04,0x3fdcac1f6789130a,1 +np.float64,0xbf8ed3ba103da780,0xbf8ed452a9cc1442,1 +np.float64,0xbfec06b46f780d69,0xbff5b86f30d70908,1 +np.float64,0xbfe990c13b732182,0xbff187a90ae611f8,1 +np.float64,0xbfdd46c738ba8d8e,0xbfdf9eee0a113230,1 +np.float64,0x3fe08b83f3611708,0x3fe2501b1c77035c,1 +np.float64,0xbfd501b65baa036c,0xbfd5d05de3fceac8,1 +np.float64,0xbfcf4fa21f3e9f44,0xbfcff5829582c0b6,1 +np.float64,0xbfefbc0bfbff7818,0xc005eca1a2c56b38,1 +np.float64,0xbfe1ba6959e374d2,0xbfe3f8f88d128ce5,1 +np.float64,0xbfd4e74ee3a9ce9e,0xbfd5b2cabeb45e6c,1 +np.float64,0xbfe77c38eaeef872,0xbfedfd332d6f1c75,1 +np.float64,0x3fa9b5e4fc336bc0,0x3fa9bb6f6b80b4af,1 +np.float64,0xbfecba63917974c7,0xbff75e44df7f8e81,1 +np.float64,0x3fd6cf17b2ad9e30,0x3fd7db0b93b7f2b5,1 diff --git a/python/numpy/_core/tests/data/umath-validation-set-cbrt.csv b/python/numpy/_core/tests/data/umath-validation-set-cbrt.csv new file mode 100644 index 000000000..ad141cb4f --- /dev/null +++ b/python/numpy/_core/tests/data/umath-validation-set-cbrt.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0x3ee7054c,0x3f4459ea,2 +np.float32,0x7d1e2489,0x54095925,2 +np.float32,0x7ee5edf5,0x549b992b,2 +np.float32,0x380607,0x2a425e72,2 +np.float32,0x34a8f3,0x2a3e6603,2 +np.float32,0x3eee2844,0x3f465a45,2 +np.float32,0x59e49c,0x2a638d0a,2 +np.float32,0xbf72c77a,0xbf7b83d4,2 +np.float32,0x7f2517b4,0x54af8bf0,2 +np.float32,0x80068a69,0xa9bdfe8b,2 +np.float32,0xbe8e3578,0xbf270775,2 +np.float32,0xbe4224dc,0xbf131119,2 +np.float32,0xbe0053b8,0xbf001be2,2 +np.float32,0x70e8d,0x29c2ddc5,2 +np.float32,0xff63f7b5,0xd4c37b7f,2 +np.float32,0x3f00bbed,0x3f4b9335,2 +np.float32,0x3f135f4e,0x3f54f5d4,2 +np.float32,0xbe13a488,0xbf063d13,2 +np.float32,0x3f14ec78,0x3f55b478,2 +np.float32,0x7ec35cfb,0x54935fbf,2 +np.float32,0x7d41c589,0x5412f904,2 +np.float32,0x3ef8a16e,0x3f4937f7,2 +np.float32,0x3f5d8464,0x3f73f279,2 +np.float32,0xbeec85ac,0xbf45e5cb,2 +np.float32,0x7f11f722,0x54a87cb1,2 +np.float32,0x8032c085,0xaa3c1219,2 +np.float32,0x80544bac,0xaa5eb9f2,2 +np.float32,0x3e944a10,0x3f296065,2 +np.float32,0xbf29fe50,0xbf5f5796,2 +np.float32,0x7e204d8d,0x545b03d5,2 +np.float32,0xfe1d0254,0xd4598127,2 +np.float32,0x80523129,0xaa5cdba9,2 +np.float32,0x806315fa,0xaa6b0eaf,2 +np.float32,0x3ed3d2a4,0x3f3ec117,2 +np.float32,0x7ee15007,0x549a8cc0,2 +np.float32,0x801ffb5e,0xaa213d4f,2 +np.float32,0x807f9f4a,0xaa7fbf76,2 +np.float32,0xbe45e854,0xbf1402d3,2 +np.float32,0x3d9e2e70,0x3eda0b64,2 +np.float32,0x51f404,0x2a5ca4d7,2 +np.float32,0xbe26a8b0,0xbf0bc54d,2 +np.float32,0x22c99a,0x2a25d2a7,2 +np.float32,0xbf71248b,0xbf7af2d5,2 +np.float32,0x7219fe,0x2a76608e,2 +np.float32,0x7f16fd7d,0x54aa6610,2 +np.float32,0x80716faa,0xaa75e5b9,2 +np.float32,0xbe24f9a4,0xbf0b4c65,2 +np.float32,0x800000,0x2a800000,2 +np.float32,0x80747456,0xaa780f27,2 +np.float32,0x68f9e8,0x2a6fa035,2 +np.float32,0x3f6a297e,0x3f7880d8,2 +np.float32,0x3f28b973,0x3f5ec8f6,2 +np.float32,0x7f58c577,0x54c03a70,2 +np.float32,0x804befcc,0xaa571b4f,2 +np.float32,0x3e2be027,0x3f0d36cf,2 +np.float32,0xfe7e80a4,0xd47f7ff7,2 +np.float32,0xfe9d444a,0xd489181b,2 +np.float32,0x3db3e790,0x3ee399d6,2 +np.float32,0xbf154c3e,0xbf55e23e,2 +np.float32,0x3d1096b7,0x3ea7f4aa,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0x804e2521,0xaa592c06,2 +np.float32,0xbeda2f00,0xbf40a513,2 +np.float32,0x3f191788,0x3f57ae30,2 +np.float32,0x3ed24ade,0x3f3e4b34,2 +np.float32,0x807fadb4,0xaa7fc917,2 +np.float32,0xbe0a06dc,0xbf034234,2 +np.float32,0x3f250bba,0x3f5d276d,2 +np.float32,0x7e948b00,0x548682c8,2 +np.float32,0xfe65ecdc,0xd476fed2,2 +np.float32,0x6fdbdd,0x2a74c095,2 +np.float32,0x800112de,0xa9500fa6,2 +np.float32,0xfe63225c,0xd475fdee,2 +np.float32,0x7f3d9acd,0x54b7d648,2 +np.float32,0xfc46f480,0xd3bacf87,2 +np.float32,0xfe5deaac,0xd47417ff,2 +np.float32,0x60ce53,0x2a693d93,2 +np.float32,0x6a6e2f,0x2a70ba2c,2 +np.float32,0x7f43f0f1,0x54b9dcd0,2 +np.float32,0xbf6170c9,0xbf756104,2 +np.float32,0xbe5c9f74,0xbf197852,2 +np.float32,0xff1502b0,0xd4a9a693,2 +np.float32,0x8064f6af,0xaa6c886e,2 +np.float32,0xbf380564,0xbf6552e5,2 +np.float32,0xfeb9b7dc,0xd490e85f,2 +np.float32,0x7f34f941,0x54b5010d,2 +np.float32,0xbe9d4ca0,0xbf2cbd5f,2 +np.float32,0x3f6e43d2,0x3f79f240,2 +np.float32,0xbdad0530,0xbee0a8f2,2 +np.float32,0x3da18459,0x3edb9105,2 +np.float32,0xfd968340,0xd42a3808,2 +np.float32,0x3ea03e64,0x3f2dcf96,2 +np.float32,0x801d2f5b,0xaa1c6525,2 +np.float32,0xbf47d92d,0xbf6bb7e9,2 +np.float32,0x55a6b9,0x2a5fe9fb,2 +np.float32,0x77a7c2,0x2a7a4fb8,2 +np.float32,0xfebbc16e,0xd4916f88,2 +np.float32,0x3f5d3d6e,0x3f73d86a,2 +np.float32,0xfccd2b60,0xd3edcacb,2 +np.float32,0xbd026460,0xbea244b0,2 +np.float32,0x3e55bd,0x2a4968e4,2 +np.float32,0xbe7b5708,0xbf20490d,2 +np.float32,0xfe413cf4,0xd469171f,2 +np.float32,0x7710e3,0x2a79e657,2 +np.float32,0xfc932520,0xd3d4d9ca,2 +np.float32,0xbf764a1b,0xbf7cb8aa,2 +np.float32,0x6b1923,0x2a713aca,2 +np.float32,0xfe4dcd04,0xd46e092d,2 +np.float32,0xff3085ac,0xd4b381f8,2 +np.float32,0x3f72c438,0x3f7b82b4,2 +np.float32,0xbf6f0c6e,0xbf7a3852,2 +np.float32,0x801d2b1b,0xaa1c5d8d,2 +np.float32,0x3e9db91e,0x3f2ce50d,2 +np.float32,0x3f684f9d,0x3f77d8c5,2 +np.float32,0x7dc784,0x2a7e82cc,2 +np.float32,0x7d2c88e9,0x540d64f8,2 +np.float32,0x807fb708,0xaa7fcf51,2 +np.float32,0x8003c49a,0xa99e16e0,2 +np.float32,0x3ee4f5b8,0x3f43c3ff,2 +np.float32,0xfe992c5e,0xd487e4ec,2 +np.float32,0x4b4dfa,0x2a568216,2 +np.float32,0x3d374c80,0x3eb5c6a8,2 +np.float32,0xbd3a4700,0xbeb6c15c,2 +np.float32,0xbf13cb80,0xbf5529e5,2 +np.float32,0xbe7306d4,0xbf1e7f91,2 +np.float32,0xbf800000,0xbf800000,2 +np.float32,0xbea42efe,0xbf2f394e,2 +np.float32,0x3e1981d0,0x3f07fe2c,2 +np.float32,0x3f17ea1d,0x3f572047,2 +np.float32,0x7dc1e0,0x2a7e7efe,2 +np.float32,0x80169c08,0xaa0fa320,2 +np.float32,0x3f3e1972,0x3f67d248,2 +np.float32,0xfe5d3c88,0xd473d815,2 +np.float32,0xbf677448,0xbf778aac,2 +np.float32,0x7e799b7d,0x547dd9e4,2 +np.float32,0x3f00bb2c,0x3f4b92cf,2 +np.float32,0xbeb29f9c,0xbf343798,2 +np.float32,0xbd6b7830,0xbec59a86,2 +np.float32,0x807a524a,0xaa7c282a,2 +np.float32,0xbe0a7a04,0xbf0366ab,2 +np.float32,0x80237470,0xaa26e061,2 +np.float32,0x3ccbc0f6,0x3e95744f,2 +np.float32,0x3edec6bc,0x3f41fcb6,2 +np.float32,0x3f635198,0x3f760efa,2 +np.float32,0x800eca4f,0xa9f960d8,2 +np.float32,0x3f800000,0x3f800000,2 +np.float32,0xff4eeb9e,0xd4bd456a,2 +np.float32,0x56f4e,0x29b29e70,2 +np.float32,0xff5383a0,0xd4bea95c,2 +np.float32,0x3f4c3a77,0x3f6d6d94,2 +np.float32,0x3f6c324a,0x3f79388c,2 +np.float32,0xbebdc092,0xbf37e27c,2 +np.float32,0xff258956,0xd4afb42e,2 +np.float32,0xdc78c,0x29f39012,2 +np.float32,0xbf2db06a,0xbf60f2f5,2 +np.float32,0xbe3c5808,0xbf119660,2 +np.float32,0xbf1ba866,0xbf58e0f4,2 +np.float32,0x80377640,0xaa41b79d,2 +np.float32,0x4fdc4d,0x2a5abfea,2 +np.float32,0x7f5e7560,0x54c1e516,2 +np.float32,0xfeb4d3f2,0xd48f9fde,2 +np.float32,0x3f12a622,0x3f549c7d,2 +np.float32,0x7f737ed7,0x54c7d2dc,2 +np.float32,0xa0ddc,0x29db456d,2 +np.float32,0xfe006740,0xd44b6689,2 +np.float32,0x3f17dfd4,0x3f571b6c,2 +np.float32,0x67546e,0x2a6e5dd1,2 +np.float32,0xff0d0f11,0xd4a693e2,2 +np.float32,0xbd170090,0xbeaa6738,2 +np.float32,0x5274a0,0x2a5d1806,2 +np.float32,0x3e154fe0,0x3f06be1a,2 +np.float32,0x7ddb302e,0x5440f0a7,2 +np.float32,0x3f579d10,0x3f71c2af,2 +np.float32,0xff2bc5bb,0xd4b1e20c,2 +np.float32,0xfee8fa6a,0xd49c4872,2 +np.float32,0xbea551b0,0xbf2fa07b,2 +np.float32,0xfeabc75c,0xd48d3004,2 +np.float32,0x7f50a5a8,0x54bdcbd1,2 +np.float32,0x50354b,0x2a5b110d,2 +np.float32,0x7d139f13,0x54063b6b,2 +np.float32,0xbeee1b08,0xbf465699,2 +np.float32,0xfe5e1650,0xd47427fe,2 +np.float32,0x7f7fffff,0x54cb2ff5,2 +np.float32,0xbf52ede8,0xbf6fff35,2 +np.float32,0x804bba81,0xaa56e8f1,2 +np.float32,0x6609e2,0x2a6d5e94,2 +np.float32,0x692621,0x2a6fc1d6,2 +np.float32,0xbf288bb6,0xbf5eb4d3,2 +np.float32,0x804f28c4,0xaa5a1b82,2 +np.float32,0xbdaad2a8,0xbedfb46e,2 +np.float32,0x5e04f8,0x2a66fb13,2 +np.float32,0x804c10da,0xaa573a81,2 +np.float32,0xbe412764,0xbf12d0fd,2 +np.float32,0x801c35cc,0xaa1aa250,2 +np.float32,0x6364d4,0x2a6b4cf9,2 +np.float32,0xbf6d3cea,0xbf79962f,2 +np.float32,0x7e5a9935,0x5472defb,2 +np.float32,0xbe73a38c,0xbf1ea19c,2 +np.float32,0xbd35e950,0xbeb550f2,2 +np.float32,0x46cc16,0x2a5223d6,2 +np.float32,0x3f005288,0x3f4b5b97,2 +np.float32,0x8034e8b7,0xaa3eb2be,2 +np.float32,0xbea775fc,0xbf3061cf,2 +np.float32,0xea0e9,0x29f87751,2 +np.float32,0xbf38faaf,0xbf65b89d,2 +np.float32,0xbedf3184,0xbf421bb0,2 +np.float32,0xbe04250c,0xbf015def,2 +np.float32,0x7f56dae8,0x54bfa901,2 +np.float32,0xfebe3e04,0xd492132e,2 +np.float32,0x3e4dc326,0x3f15f19e,2 +np.float32,0x803da197,0xaa48a621,2 +np.float32,0x7eeb35aa,0x549cc7c6,2 +np.float32,0xfebb3eb6,0xd4914dc0,2 +np.float32,0xfed17478,0xd496d5e2,2 +np.float32,0x80243694,0xaa280ed2,2 +np.float32,0x8017e666,0xaa1251d3,2 +np.float32,0xbf07e942,0xbf4f4a3e,2 +np.float32,0xbf578fa6,0xbf71bdab,2 +np.float32,0x7ed8d80f,0x549896b6,2 +np.float32,0x3f2277ae,0x3f5bff11,2 +np.float32,0x7e6f195b,0x547a3cd4,2 +np.float32,0xbf441559,0xbf6a3a91,2 +np.float32,0x7f1fb427,0x54ad9d8d,2 +np.float32,0x71695f,0x2a75e12d,2 +np.float32,0xbd859588,0xbece19a1,2 +np.float32,0x7f5702fc,0x54bfb4eb,2 +np.float32,0x3f040008,0x3f4d4842,2 +np.float32,0x3de00ca5,0x3ef4df89,2 +np.float32,0x3eeabb03,0x3f45658c,2 +np.float32,0x3dfe5e65,0x3eff7480,2 +np.float32,0x1,0x26a14518,2 +np.float32,0x8065e400,0xaa6d4130,2 +np.float32,0xff50e1bb,0xd4bdde07,2 +np.float32,0xbe88635a,0xbf24b7e9,2 +np.float32,0x3f46bfab,0x3f6b4908,2 +np.float32,0xbd85c3c8,0xbece3168,2 +np.float32,0xbe633f64,0xbf1afdb1,2 +np.float32,0xff2c7706,0xd4b21f2a,2 +np.float32,0xbf02816c,0xbf4c812a,2 +np.float32,0x80653aeb,0xaa6cbdab,2 +np.float32,0x3eef1d10,0x3f469e24,2 +np.float32,0x3d9944bf,0x3ed7c36a,2 +np.float32,0x1b03d4,0x2a186b2b,2 +np.float32,0x3f251b7c,0x3f5d2e76,2 +np.float32,0x3edebab0,0x3f41f937,2 +np.float32,0xfefc2148,0xd4a073ff,2 +np.float32,0x7448ee,0x2a77f051,2 +np.float32,0x3bb8a400,0x3e3637ee,2 +np.float32,0x57df36,0x2a61d527,2 +np.float32,0xfd8b9098,0xd425fccb,2 +np.float32,0x7f67627e,0x54c4744d,2 +np.float32,0x801165d7,0xaa039fba,2 +np.float32,0x53aae5,0x2a5e2bfd,2 +np.float32,0x8014012b,0xaa09e4f1,2 +np.float32,0x3f7a2d53,0x3f7e0b4b,2 +np.float32,0x3f5fb700,0x3f74c052,2 +np.float32,0x7f192a06,0x54ab366c,2 +np.float32,0x3f569611,0x3f71603b,2 +np.float32,0x25e2dc,0x2a2a9b65,2 +np.float32,0x8036465e,0xaa405342,2 +np.float32,0x804118e1,0xaa4c5785,2 +np.float32,0xbef08d3e,0xbf4703e1,2 +np.float32,0x3447e2,0x2a3df0be,2 +np.float32,0xbf2a350b,0xbf5f6f8c,2 +np.float32,0xbec87e3e,0xbf3b4a73,2 +np.float32,0xbe99a4a8,0xbf2b6412,2 +np.float32,0x2ea2ae,0x2a36d77e,2 +np.float32,0xfcb69600,0xd3e4b9e3,2 +np.float32,0x717700,0x2a75eb06,2 +np.float32,0xbf4e81ce,0xbf6e4ecc,2 +np.float32,0xbe2021ac,0xbf09ebee,2 +np.float32,0xfef94eee,0xd49fda31,2 +np.float32,0x8563e,0x29ce0015,2 +np.float32,0x7f5d0ca5,0x54c17c0f,2 +np.float32,0x3f16459a,0x3f56590f,2 +np.float32,0xbe12f7bc,0xbf0608a0,2 +np.float32,0x3f10fd3d,0x3f53ce5f,2 +np.float32,0x3ca5e1b0,0x3e8b8d96,2 +np.float32,0xbe5288e0,0xbf17181f,2 +np.float32,0xbf7360f6,0xbf7bb8c9,2 +np.float32,0x7e989d33,0x5487ba88,2 +np.float32,0x3ea7b5dc,0x3f307839,2 +np.float32,0x7e8da0c9,0x548463f0,2 +np.float32,0xfeaf7888,0xd48e3122,2 +np.float32,0x7d90402d,0x5427d321,2 +np.float32,0x72e309,0x2a76f0ee,2 +np.float32,0xbe1faa34,0xbf09c998,2 +np.float32,0xbf2b1652,0xbf5fd1f4,2 +np.float32,0x8051eb0c,0xaa5c9cca,2 +np.float32,0x7edf02bf,0x549a058e,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0x3f67f873,0x3f77b9c1,2 +np.float32,0x3f276b63,0x3f5e358c,2 +np.float32,0x7eeb4bf2,0x549cccb9,2 +np.float32,0x3bfa2c,0x2a46d675,2 +np.float32,0x3e133c50,0x3f061d75,2 +np.float32,0x3ca302c0,0x3e8abe4a,2 +np.float32,0x802e152e,0xaa361dd5,2 +np.float32,0x3f504810,0x3f6efd0a,2 +np.float32,0xbf43e0b5,0xbf6a2599,2 +np.float32,0x80800000,0xaa800000,2 +np.float32,0x3f1c0980,0x3f590e03,2 +np.float32,0xbf0084f6,0xbf4b7638,2 +np.float32,0xfee72d32,0xd49be10d,2 +np.float32,0x3f3c00ed,0x3f66f763,2 +np.float32,0x80511e81,0xaa5be492,2 +np.float32,0xfdd1b8a0,0xd43e1f0d,2 +np.float32,0x7d877474,0x54245785,2 +np.float32,0x7f110bfe,0x54a82207,2 +np.float32,0xff800000,0xff800000,2 +np.float32,0x6b6a2,0x29bfa706,2 +np.float32,0xbf5bdfd9,0xbf7357b7,2 +np.float32,0x8025bfa3,0xaa2a6676,2 +np.float32,0x3a3581,0x2a44dd3a,2 +np.float32,0x542c2a,0x2a5e9e2f,2 +np.float32,0xbe1d5650,0xbf091d57,2 +np.float32,0x3e97760d,0x3f2a935e,2 +np.float32,0x7f5dcde2,0x54c1b460,2 +np.float32,0x800bde1e,0xa9e7bbaf,2 +np.float32,0x3e6b9e61,0x3f1cdf07,2 +np.float32,0x7d46c003,0x54143884,2 +np.float32,0x80073fbb,0xa9c49e67,2 +np.float32,0x503c23,0x2a5b1748,2 +np.float32,0x7eb7b070,0x549060c8,2 +np.float32,0xe9d8f,0x29f86456,2 +np.float32,0xbeedd4f0,0xbf464320,2 +np.float32,0x3f40d5d6,0x3f68eda1,2 +np.float32,0xff201f28,0xd4adc44b,2 +np.float32,0xbdf61e98,0xbefca9c7,2 +np.float32,0x3e8a0dc9,0x3f2562e3,2 +np.float32,0xbc0c0c80,0xbe515f61,2 +np.float32,0x2b3c15,0x2a3248e3,2 +np.float32,0x42a7bb,0x2a4df592,2 +np.float32,0x7f337947,0x54b480af,2 +np.float32,0xfec21db4,0xd4930f4b,2 +np.float32,0x7f4fdbf3,0x54bd8e94,2 +np.float32,0x1e2253,0x2a1e1286,2 +np.float32,0x800c4c80,0xa9ea819e,2 +np.float32,0x7e96f5b7,0x54873c88,2 +np.float32,0x7ce4e131,0x53f69ed4,2 +np.float32,0xbead8372,0xbf327b63,2 +np.float32,0x3e15ca7e,0x3f06e2f3,2 +np.float32,0xbf63e17b,0xbf7642da,2 +np.float32,0xff5bdbdb,0xd4c122f9,2 +np.float32,0x3f44411e,0x3f6a4bfd,2 +np.float32,0xfd007da0,0xd40029d2,2 +np.float32,0xbe940168,0xbf2944b7,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0x3d28e356,0x3eb0e1b8,2 +np.float32,0x3eb9fcd8,0x3f36a918,2 +np.float32,0x4f6410,0x2a5a51eb,2 +np.float32,0xbdf18e30,0xbefb1775,2 +np.float32,0x32edbd,0x2a3c49e3,2 +np.float32,0x801f70a5,0xaa2052da,2 +np.float32,0x8045a045,0xaa50f98c,2 +np.float32,0xbdd6cb00,0xbef17412,2 +np.float32,0x3f118f2c,0x3f541557,2 +np.float32,0xbe65c378,0xbf1b8f95,2 +np.float32,0xfd9a9060,0xd42bbb8b,2 +np.float32,0x3f04244f,0x3f4d5b0f,2 +np.float32,0xff05214b,0xd4a3656f,2 +np.float32,0xfe342cd0,0xd463b706,2 +np.float32,0x3f3409a8,0x3f63a836,2 +np.float32,0x80205db2,0xaa21e1e5,2 +np.float32,0xbf37c982,0xbf653a03,2 +np.float32,0x3f36ce8f,0x3f64d17e,2 +np.float32,0x36ffda,0x2a412d61,2 +np.float32,0xff569752,0xd4bf94e6,2 +np.float32,0x802fdb0f,0xaa386c3a,2 +np.float32,0x7ec55a87,0x5493df71,2 +np.float32,0x7f2234c7,0x54ae847e,2 +np.float32,0xbf02df76,0xbf4cb23d,2 +np.float32,0x3d68731a,0x3ec4c156,2 +np.float32,0x8146,0x2921cd8e,2 +np.float32,0x80119364,0xaa041235,2 +np.float32,0xfe6c1c00,0xd47930b5,2 +np.float32,0x8070da44,0xaa757996,2 +np.float32,0xfefbf50c,0xd4a06a9d,2 +np.float32,0xbf01b6a8,0xbf4c170a,2 +np.float32,0x110702,0x2a02aedb,2 +np.float32,0xbf063cd4,0xbf4e6f87,2 +np.float32,0x3f1ff178,0x3f5ad9dd,2 +np.float32,0xbf76dcd4,0xbf7cead0,2 +np.float32,0x80527281,0xaa5d1620,2 +np.float32,0xfea96df8,0xd48c8a7f,2 +np.float32,0x68db02,0x2a6f88b0,2 +np.float32,0x62d971,0x2a6adec7,2 +np.float32,0x3e816fe0,0x3f21df04,2 +np.float32,0x3f586379,0x3f720cc0,2 +np.float32,0x804a3718,0xaa5577ff,2 +np.float32,0x2e2506,0x2a3632b2,2 +np.float32,0x3f297d,0x2a4a4bf3,2 +np.float32,0xbe37aba8,0xbf105f88,2 +np.float32,0xbf18b264,0xbf577ea7,2 +np.float32,0x7f50d02d,0x54bdd8b5,2 +np.float32,0xfee296dc,0xd49ad757,2 +np.float32,0x7ec5137e,0x5493cdb1,2 +np.float32,0x3f4811f4,0x3f6bce3a,2 +np.float32,0xfdff32a0,0xd44af991,2 +np.float32,0x3f6ef140,0x3f7a2ed6,2 +np.float32,0x250838,0x2a2950b5,2 +np.float32,0x25c28e,0x2a2a6ada,2 +np.float32,0xbe875e50,0xbf244e90,2 +np.float32,0x3e3bdff8,0x3f11776a,2 +np.float32,0x3e9fe493,0x3f2daf17,2 +np.float32,0x804d8599,0xaa5897d9,2 +np.float32,0x3f0533da,0x3f4de759,2 +np.float32,0xbe63023c,0xbf1aefc8,2 +np.float32,0x80636e5e,0xaa6b547f,2 +np.float32,0xff112958,0xd4a82d5d,2 +np.float32,0x3e924112,0x3f28991f,2 +np.float32,0xbe996ffc,0xbf2b507a,2 +np.float32,0x802a7cda,0xaa314081,2 +np.float32,0x8022b524,0xaa25b21e,2 +np.float32,0x3f0808c8,0x3f4f5a43,2 +np.float32,0xbef0ec2a,0xbf471e0b,2 +np.float32,0xff4c2345,0xd4bc6b3c,2 +np.float32,0x25ccc8,0x2a2a7a3b,2 +np.float32,0x7f4467d6,0x54ba0260,2 +np.float32,0x7f506539,0x54bdb846,2 +np.float32,0x412ab4,0x2a4c6a2a,2 +np.float32,0x80672c4a,0xaa6e3ef0,2 +np.float32,0xbddfb7f8,0xbef4c0ac,2 +np.float32,0xbf250bb9,0xbf5d276c,2 +np.float32,0x807dca65,0xaa7e84bd,2 +np.float32,0xbf63b8e0,0xbf763438,2 +np.float32,0xbeed1b0c,0xbf460f6b,2 +np.float32,0x8021594f,0xaa238136,2 +np.float32,0xbebc74c8,0xbf377710,2 +np.float32,0x3e9f8e3b,0x3f2d8fce,2 +np.float32,0x7f50ca09,0x54bdd6d8,2 +np.float32,0x805797c1,0xaa6197df,2 +np.float32,0x3de198f9,0x3ef56f98,2 +np.float32,0xf154d,0x29fb0392,2 +np.float32,0xff7fffff,0xd4cb2ff5,2 +np.float32,0xfed22fa8,0xd49702c4,2 +np.float32,0xbf733736,0xbf7baa64,2 +np.float32,0xbf206a8a,0xbf5b1108,2 +np.float32,0xbca49680,0xbe8b3078,2 +np.float32,0xfecba794,0xd4956e1a,2 +np.float32,0x80126582,0xaa061886,2 +np.float32,0xfee5cc82,0xd49b919f,2 +np.float32,0xbf7ad6ae,0xbf7e4491,2 +np.float32,0x7ea88c81,0x548c4c0c,2 +np.float32,0xbf493a0d,0xbf6c4255,2 +np.float32,0xbf06dda0,0xbf4ec1d4,2 +np.float32,0xff3f6e84,0xd4b86cf6,2 +np.float32,0x3e4fe093,0x3f1674b0,2 +np.float32,0x8048ad60,0xaa53fbde,2 +np.float32,0x7ebb7112,0x54915ac5,2 +np.float32,0x5bd191,0x2a652a0d,2 +np.float32,0xfe3121d0,0xd4626cfb,2 +np.float32,0x7e4421c6,0x546a3f83,2 +np.float32,0x19975b,0x2a15b14f,2 +np.float32,0x801c8087,0xaa1b2a64,2 +np.float32,0xfdf6e950,0xd448c0f6,2 +np.float32,0x74e711,0x2a786083,2 +np.float32,0xbf2b2f2e,0xbf5fdccb,2 +np.float32,0x7ed19ece,0x5496e00b,2 +np.float32,0x7f6f8322,0x54c6ba63,2 +np.float32,0x3e90316d,0x3f27cd69,2 +np.float32,0x7ecb42ce,0x54955571,2 +np.float32,0x3f6d49be,0x3f799aaf,2 +np.float32,0x8053d327,0xaa5e4f9a,2 +np.float32,0x7ebd7361,0x5491df3e,2 +np.float32,0xfdb6eed0,0xd435a7aa,2 +np.float32,0x7f3e79f4,0x54b81e4b,2 +np.float32,0xfe83afa6,0xd4813794,2 +np.float32,0x37c443,0x2a421246,2 +np.float32,0xff075a10,0xd4a44cd8,2 +np.float32,0x3ebc5fe0,0x3f377047,2 +np.float32,0x739694,0x2a77714e,2 +np.float32,0xfe832946,0xd4810b91,2 +np.float32,0x7f2638e6,0x54aff235,2 +np.float32,0xfe87f7a6,0xd4829a3f,2 +np.float32,0x3f50f3f8,0x3f6f3eb8,2 +np.float32,0x3eafa3d0,0x3f333548,2 +np.float32,0xbec26ee6,0xbf39626f,2 +np.float32,0x7e6f924f,0x547a66ff,2 +np.float32,0x7f0baa46,0x54a606f8,2 +np.float32,0xbf6dfc49,0xbf79d939,2 +np.float32,0x7f005709,0x54a1699d,2 +np.float32,0x7ee3d7ef,0x549b2057,2 +np.float32,0x803709a4,0xaa4138d7,2 +np.float32,0x3f7bf49a,0x3f7ea509,2 +np.float32,0x509db7,0x2a5b6ff5,2 +np.float32,0x7eb1b0d4,0x548ec9ff,2 +np.float32,0x7eb996ec,0x5490dfce,2 +np.float32,0xbf1fcbaa,0xbf5ac89e,2 +np.float32,0x3e2c9a98,0x3f0d69cc,2 +np.float32,0x3ea77994,0x3f306312,2 +np.float32,0x3f3cbfe4,0x3f67457c,2 +np.float32,0x8422a,0x29cd5a30,2 +np.float32,0xbd974558,0xbed6d264,2 +np.float32,0xfecee77a,0xd496387f,2 +np.float32,0x3f51876b,0x3f6f76f1,2 +np.float32,0x3b1a25,0x2a45ddad,2 +np.float32,0xfe9912f0,0xd487dd67,2 +np.float32,0x3f3ab13d,0x3f666d99,2 +np.float32,0xbf35565a,0xbf64341b,2 +np.float32,0x7d4e84aa,0x54162091,2 +np.float32,0x4c2570,0x2a574dea,2 +np.float32,0x7e82dca6,0x5480f26b,2 +np.float32,0x7f5503e7,0x54bf1c8d,2 +np.float32,0xbeb85034,0xbf361c59,2 +np.float32,0x80460a69,0xaa516387,2 +np.float32,0x805fbbab,0xaa68602c,2 +np.float32,0x7d4b4c1b,0x541557b8,2 +np.float32,0xbefa9a0a,0xbf49bfbc,2 +np.float32,0x3dbd233f,0x3ee76e09,2 +np.float32,0x58b6df,0x2a628d50,2 +np.float32,0xfcdcc180,0xd3f3aad9,2 +np.float32,0x423a37,0x2a4d8487,2 +np.float32,0xbed8b32a,0xbf403507,2 +np.float32,0x3f68e85d,0x3f780f0b,2 +np.float32,0x7ee13c4b,0x549a883d,2 +np.float32,0xff2ed4c5,0xd4b2eec1,2 +np.float32,0xbf54dadc,0xbf70b99a,2 +np.float32,0x3f78b0af,0x3f7d8a32,2 +np.float32,0x3f377372,0x3f651635,2 +np.float32,0xfdaa6178,0xd43166bc,2 +np.float32,0x8060c337,0xaa6934a6,2 +np.float32,0x7ec752c2,0x54945cf6,2 +np.float32,0xbd01a760,0xbea1f624,2 +np.float32,0x6f6599,0x2a746a35,2 +np.float32,0x3f6315b0,0x3f75f95b,2 +np.float32,0x7f2baf32,0x54b1da44,2 +np.float32,0x3e400353,0x3f1286d8,2 +np.float32,0x40d3bf,0x2a4c0f15,2 +np.float32,0x7f733aca,0x54c7c03d,2 +np.float32,0x7e5c5407,0x5473828b,2 +np.float32,0x80191703,0xaa14b56a,2 +np.float32,0xbf4fc144,0xbf6ec970,2 +np.float32,0xbf1137a7,0xbf53eacd,2 +np.float32,0x80575410,0xaa615db3,2 +np.float32,0xbd0911d0,0xbea4fe07,2 +np.float32,0x3e98534a,0x3f2ae643,2 +np.float32,0x3f3b089a,0x3f669185,2 +np.float32,0x4fc752,0x2a5aacc1,2 +np.float32,0xbef44ddc,0xbf480b6e,2 +np.float32,0x80464217,0xaa519af4,2 +np.float32,0x80445fae,0xaa4fb6de,2 +np.float32,0x80771cf4,0xaa79eec8,2 +np.float32,0xfd9182e8,0xd4284fed,2 +np.float32,0xff0a5d16,0xd4a58288,2 +np.float32,0x3f33e169,0x3f63973e,2 +np.float32,0x8021a247,0xaa23f820,2 +np.float32,0xbf362522,0xbf648ab8,2 +np.float32,0x3f457cd7,0x3f6ac95e,2 +np.float32,0xbcadf400,0xbe8dc7e2,2 +np.float32,0x80237210,0xaa26dca7,2 +np.float32,0xbf1293c9,0xbf54939f,2 +np.float32,0xbc5e73c0,0xbe744a37,2 +np.float32,0x3c03f980,0x3e4d44df,2 +np.float32,0x7da46f,0x2a7e6b20,2 +np.float32,0x5d4570,0x2a665dd0,2 +np.float32,0x3e93fbac,0x3f294287,2 +np.float32,0x7e6808fd,0x5477bfa4,2 +np.float32,0xff5aa9a6,0xd4c0c925,2 +np.float32,0xbf5206ba,0xbf6fa767,2 +np.float32,0xbf6e513e,0xbf79f6f1,2 +np.float32,0x3ed01c0f,0x3f3da20f,2 +np.float32,0xff47d93d,0xd4bb1704,2 +np.float32,0x7f466cfd,0x54baa514,2 +np.float32,0x665e10,0x2a6d9fc8,2 +np.float32,0x804d0629,0xaa5820e8,2 +np.float32,0x7e0beaa0,0x54514e7e,2 +np.float32,0xbf7fcb6c,0xbf7fee78,2 +np.float32,0x3f6c5b03,0x3f7946dd,2 +np.float32,0x3e941504,0x3f294c30,2 +np.float32,0xbf2749ad,0xbf5e26a1,2 +np.float32,0xfec2a00a,0xd493302d,2 +np.float32,0x3f15a358,0x3f560bce,2 +np.float32,0x3f15c4e7,0x3f561bcd,2 +np.float32,0xfedc8692,0xd499728c,2 +np.float32,0x7e8f6902,0x5484f180,2 +np.float32,0x7f663d62,0x54c42136,2 +np.float32,0x8027ea62,0xaa2d99b4,2 +np.float32,0x3f3d093d,0x3f67636d,2 +np.float32,0x7f118c33,0x54a85382,2 +np.float32,0x803e866a,0xaa499d43,2 +np.float32,0x80053632,0xa9b02407,2 +np.float32,0xbf36dd66,0xbf64d7af,2 +np.float32,0xbf560358,0xbf71292b,2 +np.float32,0x139a8,0x29596bc0,2 +np.float32,0xbe04f75c,0xbf01a26c,2 +np.float32,0xfe1c3268,0xd45920fa,2 +np.float32,0x7ec77f72,0x5494680c,2 +np.float32,0xbedde724,0xbf41bbba,2 +np.float32,0x3e81dbe0,0x3f220bfd,2 +np.float32,0x800373ac,0xa99989d4,2 +np.float32,0x3f7f859a,0x3f7fd72d,2 +np.float32,0x3eb9dc7e,0x3f369e80,2 +np.float32,0xff5f8eb7,0xd4c236b1,2 +np.float32,0xff1c03cb,0xd4ac44ac,2 +np.float32,0x18cfe1,0x2a14285b,2 +np.float32,0x7f21b075,0x54ae54fd,2 +np.float32,0xff490bd8,0xd4bb7680,2 +np.float32,0xbf15dc22,0xbf5626de,2 +np.float32,0xfe1d5a10,0xd459a9a3,2 +np.float32,0x750544,0x2a7875e4,2 +np.float32,0x8023d5df,0xaa2778b3,2 +np.float32,0x3e42aa08,0x3f1332b2,2 +np.float32,0x3ecaa751,0x3f3bf60d,2 +np.float32,0x0,0x0,2 +np.float32,0x80416da6,0xaa4cb011,2 +np.float32,0x3f4ea9ae,0x3f6e5e22,2 +np.float32,0x2113f4,0x2a230f8e,2 +np.float32,0x3f35c2e6,0x3f64619a,2 +np.float32,0xbf50db8a,0xbf6f3564,2 +np.float32,0xff4d5cea,0xd4bccb8a,2 +np.float32,0x7ee54420,0x549b72d2,2 +np.float32,0x64ee68,0x2a6c81f7,2 +np.float32,0x5330da,0x2a5dbfc2,2 +np.float32,0x80047f88,0xa9a7b467,2 +np.float32,0xbda01078,0xbedae800,2 +np.float32,0xfe96d05a,0xd487315f,2 +np.float32,0x8003cc10,0xa99e7ef4,2 +np.float32,0x8007b4ac,0xa9c8aa3d,2 +np.float32,0x5d4bcf,0x2a66630e,2 +np.float32,0xfdd0c0b0,0xd43dd403,2 +np.float32,0xbf7a1d82,0xbf7e05f0,2 +np.float32,0x74ca33,0x2a784c0f,2 +np.float32,0x804f45e5,0xaa5a3640,2 +np.float32,0x7e6d16aa,0x547988c4,2 +np.float32,0x807d5762,0xaa7e3714,2 +np.float32,0xfecf93d0,0xd4966229,2 +np.float32,0xfecbd25c,0xd4957890,2 +np.float32,0xff7db31c,0xd4ca93b0,2 +np.float32,0x3dac9e18,0x3ee07c4a,2 +np.float32,0xbf4b2d28,0xbf6d0509,2 +np.float32,0xbd4f4c50,0xbebd62e0,2 +np.float32,0xbd2eac40,0xbeb2e0ee,2 +np.float32,0x3d01b69b,0x3ea1fc7b,2 +np.float32,0x7ec63902,0x549416ed,2 +np.float32,0xfcc47700,0xd3ea616d,2 +np.float32,0xbf5ddec2,0xbf7413a1,2 +np.float32,0xff6a6110,0xd4c54c52,2 +np.float32,0xfdfae2a0,0xd449d335,2 +np.float32,0x7e54868c,0x547099cd,2 +np.float32,0x802b5b88,0xaa327413,2 +np.float32,0x80440e72,0xaa4f647a,2 +np.float32,0x3e313c94,0x3f0eaad5,2 +np.float32,0x3ebb492a,0x3f3715a2,2 +np.float32,0xbef56286,0xbf4856d5,2 +np.float32,0x3f0154ba,0x3f4be3a0,2 +np.float32,0xff2df86c,0xd4b2a376,2 +np.float32,0x3ef6a850,0x3f48af57,2 +np.float32,0x3d8d33e1,0x3ed1f22d,2 +np.float32,0x4dd9b9,0x2a58e615,2 +np.float32,0x7f1caf83,0x54ac83c9,2 +np.float32,0xbf7286b3,0xbf7b6d73,2 +np.float32,0x80064f88,0xa9bbbd9f,2 +np.float32,0xbf1f55fa,0xbf5a92db,2 +np.float32,0x546a81,0x2a5ed516,2 +np.float32,0xbe912880,0xbf282d0a,2 +np.float32,0x5df587,0x2a66ee6e,2 +np.float32,0x801f706c,0xaa205279,2 +np.float32,0x58cb6d,0x2a629ece,2 +np.float32,0xfe754f8c,0xd47c62da,2 +np.float32,0xbefb6f4c,0xbf49f8e7,2 +np.float32,0x80000001,0xa6a14518,2 +np.float32,0xbf067837,0xbf4e8df4,2 +np.float32,0x3e8e715c,0x3f271ee4,2 +np.float32,0x8009de9b,0xa9d9ebc8,2 +np.float32,0xbf371ff1,0xbf64f36e,2 +np.float32,0x7f5ce661,0x54c170e4,2 +np.float32,0x3f3c47d1,0x3f671467,2 +np.float32,0xfea5e5a6,0xd48b8eb2,2 +np.float32,0xff62b17f,0xd4c31e15,2 +np.float32,0xff315932,0xd4b3c98f,2 +np.float32,0xbf1c3ca8,0xbf5925b9,2 +np.float32,0x7f800000,0x7f800000,2 +np.float32,0xfdf20868,0xd4476c3b,2 +np.float32,0x5b790e,0x2a64e052,2 +np.float32,0x3f5ddf4e,0x3f7413d4,2 +np.float32,0x7f1a3182,0x54ab9861,2 +np.float32,0x3f4b906e,0x3f6d2b9d,2 +np.float32,0x7ebac760,0x54912edb,2 +np.float32,0x7f626d3f,0x54c30a7e,2 +np.float32,0x3e27b058,0x3f0c0edc,2 +np.float32,0x8041e69c,0xaa4d2de8,2 +np.float32,0x3f42cee0,0x3f69b84a,2 +np.float32,0x7ec5fe83,0x5494085b,2 +np.float32,0x9d3e6,0x29d99cde,2 +np.float32,0x3edc50c0,0x3f41452d,2 +np.float32,0xbf2c463a,0xbf60562c,2 +np.float32,0x800bfa33,0xa9e871e8,2 +np.float32,0x7c9f2c,0x2a7dba4d,2 +np.float32,0x7f2ef9fd,0x54b2fb73,2 +np.float32,0x80741847,0xaa77cdb9,2 +np.float32,0x7e9c462a,0x5488ce1b,2 +np.float32,0x3ea47ec1,0x3f2f55a9,2 +np.float32,0x7f311c43,0x54b3b4f5,2 +np.float32,0x3d8f4c73,0x3ed2facd,2 +np.float32,0x806d7bd2,0xaa7301ef,2 +np.float32,0xbf633d24,0xbf760799,2 +np.float32,0xff4f9a3f,0xd4bd7a99,2 +np.float32,0x3f6021ca,0x3f74e73d,2 +np.float32,0x7e447015,0x546a5eac,2 +np.float32,0x6bff3c,0x2a71e711,2 +np.float32,0xe9c9f,0x29f85f06,2 +np.float32,0x8009fe14,0xa9dad277,2 +np.float32,0x807cf79c,0xaa7df644,2 +np.float32,0xff440e1b,0xd4b9e608,2 +np.float32,0xbddf9a50,0xbef4b5db,2 +np.float32,0x7f3b1c39,0x54b706fc,2 +np.float32,0x3c7471a0,0x3e7c16a7,2 +np.float32,0x8065b02b,0xaa6d18ee,2 +np.float32,0x7f63a3b2,0x54c36379,2 +np.float32,0xbe9c9d92,0xbf2c7d33,2 +np.float32,0x3d93aad3,0x3ed51a2e,2 +np.float32,0xbf41b040,0xbf694571,2 +np.float32,0x80396b9e,0xaa43f899,2 +np.float64,0x800fa025695f404b,0xaaa4000ff64bb00c,2 +np.float64,0xbfecc00198f98003,0xbfeee0b623fbd94b,2 +np.float64,0x7f9eeb60b03dd6c0,0x55291bf8554bb303,2 +np.float64,0x3fba74485634e890,0x3fde08710bdb148d,2 +np.float64,0xbfdd9a75193b34ea,0xbfe8bf711660a2f5,2 +np.float64,0xbfcf92e17a3f25c4,0xbfe4119eda6f3773,2 +np.float64,0xbfe359e2ba66b3c6,0xbfeb0f7ae97ea142,2 +np.float64,0x20791a5640f24,0x2a9441f13d262bed,2 +np.float64,0x3fe455fbfae8abf8,0x3feb830d63e1022c,2 +np.float64,0xbd112b7b7a226,0x2aa238c097ec269a,2 +np.float64,0x93349ba126694,0x2aa0c363cd74465a,2 +np.float64,0x20300cd440602,0x2a9432b4f4081209,2 +np.float64,0x3fdcfae677b9f5cc,0x3fe892a9ee56fe8d,2 +np.float64,0xbfefaae3f7bf55c8,0xbfefe388066132c4,2 +np.float64,0x1a7d6eb634faf,0x2a92ed9851d29ab5,2 +np.float64,0x7fd5308d39aa6119,0x553be444e30326c6,2 +np.float64,0xff811c7390223900,0xd5205cb404952fa7,2 +np.float64,0x80083d24aff07a4a,0xaaa0285cf764d898,2 +np.float64,0x800633810ccc6703,0xaa9d65341419586b,2 +np.float64,0x800ff456223fe8ac,0xaaa423bbcc24dff1,2 +np.float64,0x7fde5c99aebcb932,0x553f71be7d6d9daa,2 +np.float64,0x3fed961c4b3b2c39,0x3fef2ca146270cac,2 +np.float64,0x7fe744d30c6e89a5,0x554220a4cdc78e62,2 +np.float64,0x3fd8f527c7b1ea50,0x3fe76101085be1cb,2 +np.float64,0xbfc96a14b232d428,0xbfe2ab1a8962606c,2 +np.float64,0xffe85f540cf0bea7,0xd54268dff964519a,2 +np.float64,0x800e3be0fe7c77c2,0xaaa3634efd7f020b,2 +np.float64,0x3feb90d032f721a0,0x3fee72a4579e8b12,2 +np.float64,0xffe05674aaa0ace9,0xd5401c9e3fb4abcf,2 +np.float64,0x3fefc2e32c3f85c6,0x3fefeb940924bf42,2 +np.float64,0xbfecfd89e9f9fb14,0xbfeef6addf73ee49,2 +np.float64,0xf5862717eb0c5,0x2aa3e1428780382d,2 +np.float64,0xffc3003b32260078,0xd53558f92202dcdb,2 +np.float64,0x3feb4c152c36982a,0x3fee5940f7da0825,2 +np.float64,0x3fe7147b002e28f6,0x3fecb2948f46d1e3,2 +np.float64,0x7fe00ad9b4a015b2,0x5540039d15e1da54,2 +np.float64,0x8010000000000000,0xaaa428a2f98d728b,2 +np.float64,0xbfd3a41bfea74838,0xbfe595ab45b1be91,2 +np.float64,0x7fdbfd6e5537fadc,0x553e9a6e1107b8d0,2 +np.float64,0x800151d9d9a2a3b4,0xaa918cd8fb63f40f,2 +np.float64,0x7fe6828401ad0507,0x5541eda05dcd1fcf,2 +np.float64,0x3fdae1e7a1b5c3d0,0x3fe7f711e72ecc35,2 +np.float64,0x7fdf4936133e926b,0x553fc29c8d5edea3,2 +np.float64,0x80079de12d4f3bc3,0xaa9f7b06a9286da4,2 +np.float64,0x3fe1261cade24c39,0x3fe9fe09488e417a,2 +np.float64,0xbfc20dce21241b9c,0xbfe0a842fb207a28,2 +np.float64,0x3fe3285dfa2650bc,0x3feaf85215f59ef9,2 +np.float64,0x7fe42b93aea85726,0x554148c3c3bb35e3,2 +np.float64,0xffe6c74e7f6d8e9c,0xd541ffd13fa36dbd,2 +np.float64,0x3fe73ea139ee7d42,0x3fecc402242ab7d3,2 +np.float64,0xffbd4b46be3a9690,0xd53392de917c72e4,2 +np.float64,0x800caed8df395db2,0xaaa2a811a02e6be4,2 +np.float64,0x800aacdb6c9559b7,0xaaa19d6fbc8feebf,2 +np.float64,0x839fb4eb073f7,0x2aa0264b98327c12,2 +np.float64,0xffd0157ba9a02af8,0xd5397157a11c0d05,2 +np.float64,0x7fddc8ff173b91fd,0x553f3e7663fb2ac7,2 +np.float64,0x67b365facf66d,0x2a9dd4d838b0d853,2 +np.float64,0xffe12e7fc7225cff,0xd5406272a83a8e1b,2 +np.float64,0x7fea5b19a034b632,0x5542e567658b3e36,2 +np.float64,0x124989d824932,0x2a90ba8dc7a39532,2 +np.float64,0xffe12ef098225de0,0xd54062968450a078,2 +np.float64,0x3fea2f44a3f45e8a,0x3fedee3c461f4716,2 +np.float64,0x3fe6b033e66d6068,0x3fec88c8035e06b1,2 +np.float64,0x3fe928a2ccf25146,0x3fed88d4cde7a700,2 +np.float64,0x3feead27e97d5a50,0x3fef8d7537d82e60,2 +np.float64,0x8003ab80b6875702,0xaa98adfedd7715a9,2 +np.float64,0x45a405828b481,0x2a9a1fa99a4eff1e,2 +np.float64,0x8002ddebad85bbd8,0xaa96babfda4e0031,2 +np.float64,0x3fc278c32824f186,0x3fe0c8e7c979fbd5,2 +np.float64,0x2e10fffc5c221,0x2a96c30a766d06fa,2 +np.float64,0xffd6ba8c2ead7518,0xd53c8d1d92bc2788,2 +np.float64,0xbfeb5ec3a036bd87,0xbfee602bbf0a0d01,2 +np.float64,0x3fed5bd58f7ab7ab,0x3fef181bf591a4a7,2 +np.float64,0x7feb5274a5b6a4e8,0x55431fcf81876218,2 +np.float64,0xaf8fd6cf5f1fb,0x2aa1c6edbb1e2aaf,2 +np.float64,0x7fece718f179ce31,0x55437c74efb90933,2 +np.float64,0xbfa3c42d0c278860,0xbfd5a16407c77e73,2 +np.float64,0x800b5cff0576b9fe,0xaaa1fc4ecb0dec4f,2 +np.float64,0x800be89ae557d136,0xaaa244d115fc0963,2 +np.float64,0x800d2578f5ba4af2,0xaaa2e18a3a3fc134,2 +np.float64,0x80090ff93e321ff3,0xaaa0add578e3cc3c,2 +np.float64,0x28c5a240518c,0x2a81587cccd7e202,2 +np.float64,0x7fec066929780cd1,0x55434971435d1069,2 +np.float64,0x7fc84d4d15309a99,0x55372c204515694f,2 +np.float64,0xffe070a75de0e14e,0xd54025365046dad2,2 +np.float64,0x7fe5b27cc36b64f9,0x5541b5b822f0b6ca,2 +np.float64,0x3fdea35ac8bd46b6,0x3fe9086a0fb792c2,2 +np.float64,0xbfe79996f7af332e,0xbfece9571d37a5b3,2 +np.float64,0xffdfb47f943f6900,0xd53fe6c14c3366db,2 +np.float64,0xc015cf63802ba,0x2aa2517164d075f4,2 +np.float64,0x7feba98948375312,0x5543340b5b1f1181,2 +np.float64,0x8008678e6550cf1d,0xaaa043e7cea90da5,2 +np.float64,0x3fb11b92fa223726,0x3fd9f8b53be4d90b,2 +np.float64,0x7fc9b18cf0336319,0x55379b42da882047,2 +np.float64,0xbfe5043e736a087d,0xbfebd0c67db7a8e3,2 +np.float64,0x7fde88546a3d10a8,0x553f80cfe5bcf5fe,2 +np.float64,0x8006a6c82dcd4d91,0xaa9e171d182ba049,2 +np.float64,0xbfa0f707ac21ee10,0xbfd48e5d3faa1699,2 +np.float64,0xbfe7716bffaee2d8,0xbfecd8e6abfb8964,2 +np.float64,0x9511ccab2a23a,0x2aa0d56d748f0313,2 +np.float64,0x8003ddb9b847bb74,0xaa991ca06fd9d308,2 +np.float64,0x80030710fac60e23,0xaa9725845ac95fe8,2 +np.float64,0xffece5bbaeb9cb76,0xd5437c2670f894f4,2 +np.float64,0x3fd9be5c72b37cb9,0x3fe79f2e932a5708,2 +np.float64,0x1f050cca3e0a3,0x2a93f36499fe5228,2 +np.float64,0x3fd5422becaa8458,0x3fe6295d6150df58,2 +np.float64,0xffd72c050e2e580a,0xd53cbc52d73b495f,2 +np.float64,0xbfe66d5235ecdaa4,0xbfec6ca27e60bf23,2 +np.float64,0x17ac49a42f58a,0x2a923b5b757087a0,2 +np.float64,0xffd39edc40273db8,0xd53b2f7bb99b96bf,2 +np.float64,0x7fde6cf009bcd9df,0x553f77614eb30d75,2 +np.float64,0x80042b4c3fa85699,0xaa99c05fbdd057db,2 +np.float64,0xbfde5547f8bcaa90,0xbfe8f3147d67a940,2 +np.float64,0xbfdd02f9bf3a05f4,0xbfe894f2048aa3fe,2 +np.float64,0xbfa20ec82c241d90,0xbfd4fd02ee55aac7,2 +np.float64,0x8002f670f8c5ece3,0xaa96fad7e53dd479,2 +np.float64,0x80059f24d7eb3e4a,0xaa9c7312dae0d7bc,2 +np.float64,0x7fe6ae7423ad5ce7,0x5541f9430be53062,2 +np.float64,0xe135ea79c26be,0x2aa350d8f8c526e1,2 +np.float64,0x3fec188ce4f8311a,0x3feea44d21c23f68,2 +np.float64,0x800355688286aad2,0xaa97e6ca51eb8357,2 +np.float64,0xa2d6530b45acb,0x2aa15635bbd366e8,2 +np.float64,0x600e0150c01c1,0x2a9d1456ea6c239c,2 +np.float64,0x8009c30863338611,0xaaa118f94b188bcf,2 +np.float64,0x3fe7e4c0dfefc982,0x3fed07e8480b8c07,2 +np.float64,0xbfddac6407bb58c8,0xbfe8c46f63a50225,2 +np.float64,0xbc85e977790bd,0x2aa2344636ed713d,2 +np.float64,0xfff0000000000000,0xfff0000000000000,2 +np.float64,0xffcd1570303a2ae0,0xd5389a27d5148701,2 +np.float64,0xbf937334d026e660,0xbfd113762e4e29a7,2 +np.float64,0x3fdbfdaa9b37fb55,0x3fe84a425fdff7df,2 +np.float64,0xffc10800f5221000,0xd5349535ffe12030,2 +np.float64,0xaf40f3755e81f,0x2aa1c443af16cd27,2 +np.float64,0x800f7da34f7efb47,0xaaa3f14bf25fc89f,2 +np.float64,0xffe4a60125a94c02,0xd5416b764a294128,2 +np.float64,0xbf8e25aa903c4b40,0xbfcf5ebc275b4789,2 +np.float64,0x3fca681bbb34d038,0x3fe2e882bcaee320,2 +np.float64,0xbfd0f3c9c1a1e794,0xbfe48d0df7b47572,2 +np.float64,0xffeb99b49d373368,0xd5433060dc641910,2 +np.float64,0x3fe554fb916aa9f8,0x3febf437cf30bd67,2 +np.float64,0x80079518d0af2a32,0xaa9f6ee87044745a,2 +np.float64,0x5e01a8a0bc036,0x2a9cdf0badf222c3,2 +np.float64,0xbfea9831b3f53064,0xbfee1601ee953ab3,2 +np.float64,0xbfc369d1a826d3a4,0xbfe110b675c311e0,2 +np.float64,0xa82e640d505cd,0x2aa1863d4e523b9c,2 +np.float64,0x3fe506d70a2a0dae,0x3febd1eba3aa83fa,2 +np.float64,0xcbacba7197598,0x2aa2adeb9927f1f2,2 +np.float64,0xc112d6038225b,0x2aa25978f12038b0,2 +np.float64,0xffa7f5f44c2febf0,0xd52d0ede02d4e18b,2 +np.float64,0x8006f218e34de433,0xaa9e870cf373b4eb,2 +np.float64,0xffe6d9a5d06db34b,0xd54204a4adc608c7,2 +np.float64,0x7fe717210eae2e41,0x554214bf3e2b5228,2 +np.float64,0xbfdd4b45cdba968c,0xbfe8a94c7f225f8e,2 +np.float64,0x883356571066b,0x2aa055ab0b2a8833,2 +np.float64,0x3fe307fc02a60ff8,0x3feae9175053288f,2 +np.float64,0x3fefa985f77f530c,0x3fefe31289446615,2 +np.float64,0x8005698a98aad316,0xaa9c17814ff7d630,2 +np.float64,0x3fea77333c74ee66,0x3fee098ba70e10fd,2 +np.float64,0xbfd1d00b0023a016,0xbfe4e497fd1cbea1,2 +np.float64,0x80009b0c39813619,0xaa8b130a6909cc3f,2 +np.float64,0x3fdbeb896fb7d714,0x3fe84502ba5437f8,2 +np.float64,0x3fb6e7e3562dcfc7,0x3fdca00d35c389ad,2 +np.float64,0xb2d46ebf65a8e,0x2aa1e2fe158d0838,2 +np.float64,0xbfd5453266aa8a64,0xbfe62a6a74c8ef6e,2 +np.float64,0x7fe993aa07732753,0x5542b5438bf31cb7,2 +np.float64,0xbfda5a098cb4b414,0xbfe7ce6d4d606203,2 +np.float64,0xbfe40c3ce068187a,0xbfeb61a32c57a6d0,2 +np.float64,0x3fcf17671d3e2ed0,0x3fe3f753170ab686,2 +np.float64,0xbfe4f814b6e9f02a,0xbfebcb67c60b7b08,2 +np.float64,0x800efedf59fdfdbf,0xaaa3ba4ed44ad45a,2 +np.float64,0x800420b556e8416b,0xaa99aa7fb14edeab,2 +np.float64,0xbf6e4ae6403c9600,0xbfc3cb2b29923989,2 +np.float64,0x3fda5c760a34b8ec,0x3fe7cf2821c52391,2 +np.float64,0x7f898faac0331f55,0x5522b44a01408188,2 +np.float64,0x3fd55af4b7aab5e9,0x3fe631f6d19503b3,2 +np.float64,0xbfa30a255c261450,0xbfd55caf0826361d,2 +np.float64,0x7fdfb801343f7001,0x553fe7ee50b9199a,2 +np.float64,0x7fa89ee91c313dd1,0x552d528ca2a4d659,2 +np.float64,0xffea72921d34e524,0xd542eb01af2e470d,2 +np.float64,0x3feddf0f33fbbe1e,0x3fef462b67fc0a91,2 +np.float64,0x3fe36700b566ce01,0x3feb1596caa8eff7,2 +np.float64,0x7fe6284a25ac5093,0x5541d58be3956601,2 +np.float64,0xffda16f7c8b42df0,0xd53de4f722485205,2 +np.float64,0x7f9355b94026ab72,0x552578cdeb41d2ca,2 +np.float64,0xffd3a9b022275360,0xd53b347b02dcea21,2 +np.float64,0x3fcb7f4f4a36fe9f,0x3fe32a40e9f6c1aa,2 +np.float64,0x7fdb958836372b0f,0x553e746103f92111,2 +np.float64,0x3fd37761c0a6eec4,0x3fe5853c5654027e,2 +np.float64,0x3fe449f1a2e893e4,0x3feb7d9e4eacc356,2 +np.float64,0x80077dfbef0efbf9,0xaa9f4ed788d2fadd,2 +np.float64,0x4823aa7890476,0x2a9a6eb4b653bad5,2 +np.float64,0xbfede01a373bc034,0xbfef468895fbcd29,2 +np.float64,0xbfe2bac5f125758c,0xbfeac4811c4dd66f,2 +np.float64,0x3fec10373af8206e,0x3feea14529e0f178,2 +np.float64,0x3fe305e30ca60bc6,0x3feae81a2f9d0302,2 +np.float64,0xa9668c5f52cd2,0x2aa1910e3a8f2113,2 +np.float64,0xbfd98b1717b3162e,0xbfe78f75995335d2,2 +np.float64,0x800fa649c35f4c94,0xaaa402ae79026a8f,2 +np.float64,0xbfb07dacf620fb58,0xbfd9a7d33d93a30f,2 +np.float64,0x80015812f382b027,0xaa91a843e9c85c0e,2 +np.float64,0x3fc687d96c2d0fb3,0x3fe1ef0ac16319c5,2 +np.float64,0xbfecad2ecd795a5e,0xbfeed9f786697af0,2 +np.float64,0x1608c1242c119,0x2a91cd11e9b4ccd2,2 +np.float64,0x6df775e8dbeef,0x2a9e6ba8c71130eb,2 +np.float64,0xffe96e9332b2dd26,0xd542ac342d06299b,2 +np.float64,0x7fecb6a3b8396d46,0x5543718af8162472,2 +np.float64,0x800d379f893a6f3f,0xaaa2ea36bbcb9308,2 +np.float64,0x3f924cdb202499b6,0x3fd0bb90af8d1f79,2 +np.float64,0x0,0x0,2 +np.float64,0x7feaf3b365f5e766,0x5543099a160e2427,2 +np.float64,0x3fea169ed0742d3e,0x3fede4d526e404f8,2 +np.float64,0x7feaf5f2f775ebe5,0x55430a2196c5f35a,2 +np.float64,0xbfc80d4429301a88,0xbfe2541f2ddd3334,2 +np.float64,0xffc75203b32ea408,0xd536db2837068689,2 +np.float64,0xffed2850e63a50a1,0xd5438b1217b72b8a,2 +np.float64,0x7fc16b0e7f22d61c,0x5534bcd0bfddb6f0,2 +np.float64,0x7feee8ed09fdd1d9,0x5543ed5b3ca483ab,2 +np.float64,0x7fb6c7ee662d8fdc,0x5531fffb5d46dafb,2 +np.float64,0x3fd77cebf8aef9d8,0x3fe6e9242e2bd29d,2 +np.float64,0x3f81c33f70238680,0x3fca4c7f3c9848f7,2 +np.float64,0x3fd59fea92ab3fd5,0x3fe649c1558cadd5,2 +np.float64,0xffeba82d4bf7505a,0xd54333bad387f7bd,2 +np.float64,0xffd37630e1a6ec62,0xd53b1ca62818c670,2 +np.float64,0xffec2c1e70b8583c,0xd5435213dcd27c22,2 +np.float64,0x7fec206971f840d2,0x55434f6660a8ae41,2 +np.float64,0x3fed2964adba52c9,0x3fef0642fe72e894,2 +np.float64,0xffd08e30d6211c62,0xd539b060e0ae02da,2 +np.float64,0x3e5f976c7cbf4,0x2a992e6ff991a122,2 +np.float64,0xffe6eee761adddce,0xd5420a393c67182f,2 +np.float64,0xbfe8ec9a31f1d934,0xbfed714426f58147,2 +np.float64,0x7fefffffffffffff,0x554428a2f98d728b,2 +np.float64,0x3fb3ae8b2c275d16,0x3fdb36b81b18a546,2 +np.float64,0x800f73df4dfee7bf,0xaaa3ed1a3e2cf49c,2 +np.float64,0xffd0c8873b21910e,0xd539ce6a3eab5dfd,2 +np.float64,0x3facd6c49439ad80,0x3fd8886f46335df1,2 +np.float64,0x3935859c726b2,0x2a98775f6438dbb1,2 +np.float64,0x7feed879fbfdb0f3,0x5543e9d1ac239469,2 +np.float64,0xbfe84dd990f09bb3,0xbfed323af09543b1,2 +np.float64,0xbfe767cc5a6ecf98,0xbfecd4f39aedbacb,2 +np.float64,0xffd8bd91d5b17b24,0xd53d5eb3734a2609,2 +np.float64,0xbfe13edeb2a27dbe,0xbfea0a856f0b9656,2 +np.float64,0xd933dd53b267c,0x2aa3158784e428c9,2 +np.float64,0xbfef6fef987edfdf,0xbfefcfb1c160462b,2 +np.float64,0x8009eeda4893ddb5,0xaaa13268a41045b1,2 +np.float64,0xab48c7a156919,0x2aa1a1a9c124c87d,2 +np.float64,0xa997931d532f3,0x2aa192bfe5b7bbb4,2 +np.float64,0xffe39ce8b1e739d1,0xd5411fa1c5c2cbd8,2 +np.float64,0x7e7ac2f6fcf59,0x2a9fdf6f263a9e9f,2 +np.float64,0xbfee1e35a6fc3c6b,0xbfef5c25d32b4047,2 +np.float64,0xffe5589c626ab138,0xd5419d220cc9a6da,2 +np.float64,0x7fe12509bf224a12,0x55405f7036dc5932,2 +np.float64,0xa6f15ba94de2c,0x2aa17b3367b1fc1b,2 +np.float64,0x3fca8adbfa3515b8,0x3fe2f0ca775749e5,2 +np.float64,0xbfcb03aa21360754,0xbfe30d5b90ca41f7,2 +np.float64,0x3fefafb2da7f5f66,0x3fefe5251aead4e7,2 +np.float64,0xffd90a59d23214b4,0xd53d7cf63a644f0e,2 +np.float64,0x3fba499988349333,0x3fddf84154fab7e5,2 +np.float64,0x800a76a0bc54ed42,0xaaa17f68cf67f2fa,2 +np.float64,0x3fea33d15bb467a3,0x3fedeff7f445b2ff,2 +np.float64,0x8005d9b0726bb362,0xaa9cd48624afeca9,2 +np.float64,0x7febf42e9a77e85c,0x55434541d8073376,2 +np.float64,0xbfedfc4469bbf889,0xbfef505989f7ee7d,2 +np.float64,0x8001211f1422423f,0xaa90a9889d865349,2 +np.float64,0x800e852f7fdd0a5f,0xaaa3845f11917f8e,2 +np.float64,0xffefd613c87fac27,0xd5441fd17ec669b4,2 +np.float64,0x7fed2a74543a54e8,0x55438b8c637da8b8,2 +np.float64,0xb83d50ff707aa,0x2aa210b4fc11e4b2,2 +np.float64,0x10000000000000,0x2aa428a2f98d728b,2 +np.float64,0x474ad9208e97,0x2a84e5a31530368a,2 +np.float64,0xffd0c5498ea18a94,0xd539ccc0e5cb425e,2 +np.float64,0x8001a8e9c82351d4,0xaa92f1aee6ca5b7c,2 +np.float64,0xd28db1e5a51b6,0x2aa2e328c0788f4a,2 +np.float64,0x3bf734ac77ee7,0x2a98da65c014b761,2 +np.float64,0x3fe56e17c96adc30,0x3febff2b6b829b7a,2 +np.float64,0x7783113eef063,0x2a9f46c3f09eb42c,2 +np.float64,0x3fd69d4e42ad3a9d,0x3fe69f83a21679f4,2 +np.float64,0x3fd34f4841a69e90,0x3fe5766b3c771616,2 +np.float64,0x3febb49895b76931,0x3fee7fcb603416c9,2 +np.float64,0x7fe8d6cb55f1ad96,0x554286c3b3bf4313,2 +np.float64,0xbfe67c6ba36cf8d8,0xbfec730218f2e284,2 +np.float64,0xffef9d97723f3b2e,0xd54413e38b6c29be,2 +np.float64,0x12d8cd2a25b1b,0x2a90e5ccd37b8563,2 +np.float64,0x81fe019103fc0,0x2aa01524155e73c5,2 +np.float64,0x7fe95d546f72baa8,0x5542a7fabfd425ff,2 +np.float64,0x800e742f1f9ce85e,0xaaa37cbe09e1f874,2 +np.float64,0xffd96bd3a732d7a8,0xd53da3086071264a,2 +np.float64,0x4ef2691e9de4e,0x2a9b3d316047fd6d,2 +np.float64,0x1a91684c3522e,0x2a92f25913c213de,2 +np.float64,0x3d5151b87aa2b,0x2a9909dbd9a44a84,2 +np.float64,0x800d9049435b2093,0xaaa31424e32d94a2,2 +np.float64,0xffe5b25fcc2b64bf,0xd541b5b0416b40b5,2 +np.float64,0xffe0eb784c21d6f0,0xd5404d083c3d6bc6,2 +np.float64,0x8007ceefbf0f9de0,0xaa9fbe0d739368b4,2 +np.float64,0xb78529416f0b,0x2a8ca3b29b5b3f18,2 +np.float64,0x7fba61130034c225,0x5532e6d4ca0f2918,2 +np.float64,0x3fba8d67ae351acf,0x3fde11efd6239b09,2 +np.float64,0x3fe7f24c576fe498,0x3fed0d63947a854d,2 +np.float64,0x2bb58dec576b3,0x2a965de7fca12aff,2 +np.float64,0xbfe86ceec4f0d9de,0xbfed3ea7f1d084e2,2 +np.float64,0x7fd1a7f7bca34fee,0x553a3f01b67fad2a,2 +np.float64,0x3fd9a43acfb34874,0x3fe7972dc5d8dfd6,2 +np.float64,0x7fd9861acdb30c35,0x553dad3b1bbb3b4d,2 +np.float64,0xffecc0c388398186,0xd54373d3b903deec,2 +np.float64,0x3fa6f86e9c2df0e0,0x3fd6bdbe40fcf710,2 +np.float64,0x800ddd99815bbb33,0xaaa33820d2f889bb,2 +np.float64,0x7fe087089b610e10,0x55402c868348a6d3,2 +np.float64,0x3fdf43d249be87a5,0x3fe933d29fbf7c23,2 +np.float64,0x7fe4f734c7a9ee69,0x5541822e56c40725,2 +np.float64,0x3feb39a9d3b67354,0x3fee526bf1f69f0e,2 +np.float64,0x3fe61454a0ec28a9,0x3fec46d7c36f7566,2 +np.float64,0xbfeafaa0a375f541,0xbfee3af2e49d457a,2 +np.float64,0x3fda7378e1b4e6f0,0x3fe7d613a3f92c40,2 +np.float64,0xe3e31c5fc7c64,0x2aa3645c12e26171,2 +np.float64,0xbfe97a556df2f4ab,0xbfeda8aa84cf3544,2 +np.float64,0xff612f9c80225f00,0xd514a51e5a2a8a97,2 +np.float64,0x800c51c8a0f8a391,0xaaa279fe7d40b50b,2 +np.float64,0xffd6f9d2312df3a4,0xd53ca783a5f8d110,2 +np.float64,0xbfead48bd7f5a918,0xbfee2cb2f89c5e57,2 +np.float64,0x800f5949e89eb294,0xaaa3e1a67a10cfef,2 +np.float64,0x800faf292b7f5e52,0xaaa40675e0c96cfd,2 +np.float64,0xbfedc238453b8470,0xbfef3c179d2d0209,2 +np.float64,0x3feb0443c5760888,0x3fee3e8bf29089c2,2 +np.float64,0xb26f69e164ded,0x2aa1df9f3dd7d765,2 +np.float64,0x3fcacdc053359b80,0x3fe300a67765b667,2 +np.float64,0x3fe8b274647164e8,0x3fed5a4cd4da8155,2 +np.float64,0x291e6782523ce,0x2a95ea7ac1b13a68,2 +np.float64,0xbfc4fc094e29f814,0xbfe1838671fc8513,2 +np.float64,0x3fbf1301f23e2600,0x3fdfb03a6f13e597,2 +np.float64,0xffeb36554ab66caa,0xd543193d8181e4f9,2 +np.float64,0xbfd969a52db2d34a,0xbfe78528ae61f16d,2 +np.float64,0x800cccd04d3999a1,0xaaa2b6b7a2d2d2d6,2 +np.float64,0x808eb4cb011d7,0x2aa005effecb2b4a,2 +np.float64,0x7fe839b3f9b07367,0x55425f61e344cd6d,2 +np.float64,0xbfeb25b6ed764b6e,0xbfee4b0234fee365,2 +np.float64,0xffefffffffffffff,0xd54428a2f98d728b,2 +np.float64,0xbfe01305da60260c,0xbfe9700b784af7e9,2 +np.float64,0xffcbf36b0a37e6d8,0xd538474b1d74ffe1,2 +np.float64,0xffaeebe3e83dd7c0,0xd52fa2e8dabf7209,2 +np.float64,0xbfd9913bf0b32278,0xbfe7915907aab13c,2 +np.float64,0xbfe7d125d9efa24c,0xbfecfff563177706,2 +np.float64,0xbfee98d23cbd31a4,0xbfef867ae393e446,2 +np.float64,0x3fe30efb67e61df6,0x3feaec6344633d11,2 +np.float64,0x1,0x2990000000000000,2 +np.float64,0x7fd5524fd3aaa49f,0x553bf30d18ab877e,2 +np.float64,0xc98b403f93168,0x2aa29d2fadb13c07,2 +np.float64,0xffe57080046ae100,0xd541a3b1b687360e,2 +np.float64,0x7fe20bade5e4175b,0x5540a79b94294f40,2 +np.float64,0x3fe155400a22aa80,0x3fea15c45f5b5837,2 +np.float64,0x7fe428dc8f6851b8,0x554147fd2ce93cc1,2 +np.float64,0xffefb77eb67f6efc,0xd544195dcaff4980,2 +np.float64,0x3fe49e733b293ce6,0x3feba394b833452a,2 +np.float64,0x38e01e3e71c05,0x2a986b2c955bad21,2 +np.float64,0x7fe735eb376e6bd5,0x55421cc51290d92d,2 +np.float64,0xbfd81d8644b03b0c,0xbfe71ce6d6fbd51a,2 +np.float64,0x8009a32325134647,0xaaa10645d0e6b0d7,2 +np.float64,0x56031ab8ac064,0x2a9c074be40b1f80,2 +np.float64,0xff8989aa30331340,0xd522b2d319a0ac6e,2 +np.float64,0xbfd6c183082d8306,0xbfe6ab8ffb3a8293,2 +np.float64,0x7ff8000000000000,0x7ff8000000000000,2 +np.float64,0xbfe17b68b1e2f6d2,0xbfea28dac8e0c457,2 +np.float64,0x3fbb50e42236a1c8,0x3fde5b090d51e3bd,2 +np.float64,0xffc2bb7cbf2576f8,0xd5353f1b3571c17f,2 +np.float64,0xbfe7576bca6eaed8,0xbfecce388241f47c,2 +np.float64,0x3fe7b52b04ef6a56,0x3fecf495bef99e7e,2 +np.float64,0xffe5511af82aa236,0xd5419b11524e8350,2 +np.float64,0xbfe66d5edf2cdabe,0xbfec6ca7d7b5be8c,2 +np.float64,0xc84a0ba790942,0x2aa29346f16a2cb4,2 +np.float64,0x6db5e7a0db6be,0x2a9e659c0e8244a0,2 +np.float64,0x7fef8f7b647f1ef6,0x554410e67af75d27,2 +np.float64,0xbfe2b4ada7e5695c,0xbfeac1997ec5a064,2 +np.float64,0xbfe99372e03326e6,0xbfedb2662b287543,2 +np.float64,0x3fa45d352428ba6a,0x3fd5d8a895423abb,2 +np.float64,0x3fa029695c2052d3,0x3fd439f858998886,2 +np.float64,0xffe0a9bd3261537a,0xd54037d0cd8bfcda,2 +np.float64,0xbfef83e09a7f07c1,0xbfefd66a4070ce73,2 +np.float64,0x7fee3dcc31fc7b97,0x5543c8503869407e,2 +np.float64,0xffbd16f1603a2de0,0xd533872fa5be978b,2 +np.float64,0xbfe8173141b02e62,0xbfed1c478614c6f4,2 +np.float64,0xbfef57aa277eaf54,0xbfefc77fdab27771,2 +np.float64,0x7fe883a02f31073f,0x554271ff0e3208da,2 +np.float64,0xe3adb63bc75b7,0x2aa362d833d0e41c,2 +np.float64,0x8001c430bac38862,0xaa93575026d26510,2 +np.float64,0x12fb347225f67,0x2a90f00eb9edb3fe,2 +np.float64,0x3fe53f83cbaa7f08,0x3febead40de452c2,2 +np.float64,0xbfe7f67227efece4,0xbfed0f10e32ad220,2 +np.float64,0xb8c5b45d718b7,0x2aa2152912cda86d,2 +np.float64,0x3fd23bb734a4776e,0x3fe50e5d3008c095,2 +np.float64,0x8001fd558ee3faac,0xaa941faa1f7ed450,2 +np.float64,0xffe6bbeda9ed77db,0xd541fcd185a63afa,2 +np.float64,0x4361d79086c3c,0x2a99d692237c30b7,2 +np.float64,0xbfd012f004a025e0,0xbfe43093e290fd0d,2 +np.float64,0xffe1d8850423b10a,0xd54097cf79d8d01e,2 +np.float64,0x3fccf4df7939e9bf,0x3fe37f8cf8be6436,2 +np.float64,0x8000546bc6c0a8d8,0xaa861bb3588556f2,2 +np.float64,0xbfecb4d6ba7969ae,0xbfeedcb6239135fe,2 +np.float64,0xbfaeb425cc3d6850,0xbfd90cfc103bb896,2 +np.float64,0x800ec037ec7d8070,0xaaa39eae8bde9774,2 +np.float64,0xbfeeaf863dfd5f0c,0xbfef8e4514772a8a,2 +np.float64,0xffec67c6c4b8cf8d,0xd5435fad89f900cf,2 +np.float64,0x3fda4498da348932,0x3fe7c7f6b3f84048,2 +np.float64,0xbfd05fd3dea0bfa8,0xbfe4509265a9b65f,2 +np.float64,0x3fe42cc713a8598e,0x3feb706ba9cd533c,2 +np.float64,0xec22d4d7d845b,0x2aa39f8cccb9711c,2 +np.float64,0x7fda30606c3460c0,0x553deea865065196,2 +np.float64,0xbfd58cba8bab1976,0xbfe64327ce32d611,2 +np.float64,0xadd521c75baa4,0x2aa1b7efce201a98,2 +np.float64,0x7fed43c1027a8781,0x55439131832b6429,2 +np.float64,0x800bee278fb7dc4f,0xaaa247a71e776db4,2 +np.float64,0xbfe9be5dd2737cbc,0xbfedc2f9501755b0,2 +np.float64,0x8003f4854447e90b,0xaa994d9b5372b13b,2 +np.float64,0xbfe5d0f867eba1f1,0xbfec29f8dd8b33a4,2 +np.float64,0x3fd79102d5af2206,0x3fe6efaa7a1efddb,2 +np.float64,0xbfeae783c835cf08,0xbfee33cdb4a44e81,2 +np.float64,0x3fcf1713e83e2e28,0x3fe3f7414753ddfb,2 +np.float64,0xffe5ab3cff2b567a,0xd541b3bf0213274a,2 +np.float64,0x7fe0fc65d8a1f8cb,0x554052761ac96386,2 +np.float64,0x7e81292efd026,0x2a9fdff8c01ae86f,2 +np.float64,0x80091176039222ec,0xaaa0aebf0565dfa6,2 +np.float64,0x800d2bf5ab5a57ec,0xaaa2e4a4c31e7e29,2 +np.float64,0xffd1912ea923225e,0xd53a33b2856726ab,2 +np.float64,0x800869918ed0d323,0xaaa0453408e1295d,2 +np.float64,0xffba0898fa341130,0xd532d19b202a9646,2 +np.float64,0xbfe09fac29613f58,0xbfe9b9687b5811a1,2 +np.float64,0xbfbd4ae82e3a95d0,0xbfdf1220f6f0fdfa,2 +np.float64,0xffea11d27bb423a4,0xd542d3d3e1522474,2 +np.float64,0xbfe6b05705ad60ae,0xbfec88d6bcab2683,2 +np.float64,0x3fe624a3f2ec4948,0x3fec4dcc78ddf871,2 +np.float64,0x53483018a6907,0x2a9bba8f92006b69,2 +np.float64,0xbfec0a6eeb7814de,0xbfee9f2a741248d7,2 +np.float64,0x3fe8c8ce6371919d,0x3fed63250c643482,2 +np.float64,0xbfe26b0ef964d61e,0xbfea9e511db83437,2 +np.float64,0xffa0408784208110,0xd52987f62c369ae9,2 +np.float64,0xffc153abc322a758,0xd534b384b5c5fe63,2 +np.float64,0xbfbdce88a63b9d10,0xbfdf4065ef0b01d4,2 +np.float64,0xffed4a4136fa9482,0xd54392a450f8b0af,2 +np.float64,0x8007aa18748f5432,0xaa9f8bd2226d4299,2 +np.float64,0xbfdab4d3e8b569a8,0xbfe7e9a5402540e5,2 +np.float64,0x7fe68914f92d1229,0x5541ef5e78fa35de,2 +np.float64,0x800a538bb1b4a718,0xaaa16bc487711295,2 +np.float64,0xffe02edbc8605db7,0xd5400f8f713df890,2 +np.float64,0xffe8968053712d00,0xd54276b9cc7f460a,2 +np.float64,0x800a4ce211d499c5,0xaaa1680491deb40c,2 +np.float64,0x3f988080f8310102,0x3fd2713691e99329,2 +np.float64,0xf64e42a7ec9c9,0x2aa3e6a7af780878,2 +np.float64,0xff73cc7100279900,0xd51b4478c3409618,2 +np.float64,0x71e6722ce3ccf,0x2a9ec76ddf296ce0,2 +np.float64,0x8006ca16ab0d942e,0xaa9e4bfd862af570,2 +np.float64,0x8000000000000000,0x8000000000000000,2 +np.float64,0xbfed373e02ba6e7c,0xbfef0b2b7bb767b3,2 +np.float64,0xa6cb0f694d962,0x2aa179dd16b0242b,2 +np.float64,0x7fec14626cf828c4,0x55434ca55b7c85d5,2 +np.float64,0x3fcda404513b4808,0x3fe3a68e8d977752,2 +np.float64,0xbfeb94995f772933,0xbfee74091d288b81,2 +np.float64,0x3fce2299a13c4530,0x3fe3c2603f28d23b,2 +np.float64,0xffd07f4534a0fe8a,0xd539a8a6ebc5a603,2 +np.float64,0x7fdb1c651e3638c9,0x553e478a6385c86b,2 +np.float64,0x3fec758336f8eb06,0x3feec5f3b92c8b28,2 +np.float64,0x796fc87cf2dfa,0x2a9f7184a4ad8c49,2 +np.float64,0x3fef9ba866ff3750,0x3fefde6a446fc2cd,2 +np.float64,0x964d26c72c9a5,0x2aa0e143f1820179,2 +np.float64,0xbfef6af750bed5ef,0xbfefce04870a97bd,2 +np.float64,0x3fe2f3961aa5e72c,0x3feadf769321a3ff,2 +np.float64,0xbfd6b706e9ad6e0e,0xbfe6a8141c5c3b5d,2 +np.float64,0x7fe0ecc40a21d987,0x55404d72c2b46a82,2 +np.float64,0xbfe560d19deac1a3,0xbfebf962681a42a4,2 +np.float64,0xbfea37170ab46e2e,0xbfedf136ee9df02b,2 +np.float64,0xbfebf78947b7ef12,0xbfee9847ef160257,2 +np.float64,0x800551f8312aa3f1,0xaa9bee7d3aa5491b,2 +np.float64,0xffed2513897a4a26,0xd5438a58c4ae28ec,2 +np.float64,0x7fd962d75cb2c5ae,0x553d9f8a0c2016f3,2 +np.float64,0x3fefdd8512bfbb0a,0x3feff47d8da7424d,2 +np.float64,0xbfefa5b43bff4b68,0xbfefe1ca42867af0,2 +np.float64,0xbfc8a2853531450c,0xbfe279bb7b965729,2 +np.float64,0x800c8843bc391088,0xaaa2951344e7b29b,2 +np.float64,0x7fe22587bae44b0e,0x5540af8bb58cfe86,2 +np.float64,0xbfe159fae822b3f6,0xbfea182394eafd8d,2 +np.float64,0xbfe6fdfd50edfbfa,0xbfeca93f2a3597d0,2 +np.float64,0xbfe5cd5afaeb9ab6,0xbfec286a8ce0470f,2 +np.float64,0xbfc84bb97f309774,0xbfe263ef0f8f1f6e,2 +np.float64,0x7fd9c1e548b383ca,0x553dc4556874ecb9,2 +np.float64,0x7fda43d33bb487a5,0x553df60f61532fc0,2 +np.float64,0xbfe774bd25eee97a,0xbfecda42e8578c1f,2 +np.float64,0x800df1f5ab9be3ec,0xaaa34184712e69db,2 +np.float64,0xbff0000000000000,0xbff0000000000000,2 +np.float64,0x3fe14ec21b629d84,0x3fea128244215713,2 +np.float64,0x7fc1ce7843239cf0,0x5534e3fa8285b7b8,2 +np.float64,0xbfe922b204724564,0xbfed86818687d649,2 +np.float64,0x3fc58924fb2b1248,0x3fe1aa715ff6ebbf,2 +np.float64,0x8008b637e4d16c70,0xaaa0760b53abcf46,2 +np.float64,0xffbf55bd4c3eab78,0xd53404a23091a842,2 +np.float64,0x9f6b4a753ed6a,0x2aa136ef9fef9596,2 +np.float64,0xbfd11da7f8a23b50,0xbfe49deb493710d8,2 +np.float64,0x800a2f07fcd45e10,0xaaa157237c98b4f6,2 +np.float64,0x3fdd4defa4ba9bdf,0x3fe8aa0bcf895f4f,2 +np.float64,0x7fe9b0ab05f36155,0x5542bc5335414473,2 +np.float64,0x3fe89c97de313930,0x3fed51a1189b8982,2 +np.float64,0x3fdd45c8773a8b91,0x3fe8a7c2096fbf5a,2 +np.float64,0xbfeb6f64daf6deca,0xbfee665167ef43ad,2 +np.float64,0xffdf9da1c4bf3b44,0xd53fdf141944a983,2 +np.float64,0x3fde092ed0bc125c,0x3fe8de25bfbfc2db,2 +np.float64,0xbfcb21f96b3643f4,0xbfe3147904c258cf,2 +np.float64,0x800c9c934f993927,0xaaa29f17c43f021b,2 +np.float64,0x9b91814d37230,0x2aa11329e59bf6b0,2 +np.float64,0x3fe28a7e0b6514fc,0x3feaad6d23e2eadd,2 +np.float64,0xffecf38395f9e706,0xd5437f3ee1cd61e4,2 +np.float64,0x3fcade92a935bd25,0x3fe3049f4c1da1d0,2 +np.float64,0x800ab25d95d564bc,0xaaa1a076d7c66e04,2 +np.float64,0xffc0989e1e21313c,0xd53467f3b8158298,2 +np.float64,0x3fd81523eeb02a48,0x3fe71a38d2da8a82,2 +np.float64,0x7fe5b9dd402b73ba,0x5541b7b9b8631010,2 +np.float64,0x2c160d94582c3,0x2a966e51b503a3d1,2 +np.float64,0x2c416ffa5882f,0x2a9675aaef8b29c4,2 +np.float64,0x7fefe2ff01bfc5fd,0x55442289faf22b86,2 +np.float64,0xbfd469bf5d28d37e,0xbfe5dd239ffdc7eb,2 +np.float64,0xbfdd56f3eabaade8,0xbfe8ac93244ca17b,2 +np.float64,0xbfe057b89160af71,0xbfe9941557340bb3,2 +np.float64,0x800c50e140b8a1c3,0xaaa2798ace9097ee,2 +np.float64,0xbfda5a8984b4b514,0xbfe7ce93d65a56b0,2 +np.float64,0xbfcd6458323ac8b0,0xbfe39872514127bf,2 +np.float64,0x3fefb1f5ebff63ec,0x3fefe5e761b49b89,2 +np.float64,0x3fea3abc1df47578,0x3fedf29a1c997863,2 +np.float64,0x7fcb4a528e3694a4,0x553815f169667213,2 +np.float64,0x8c77da7b18efc,0x2aa080e52bdedb54,2 +np.float64,0x800e5dde4c5cbbbd,0xaaa372b16fd8b1ad,2 +np.float64,0x3fd2976038a52ec0,0x3fe5316b4f79fdbc,2 +np.float64,0x69413a0ed2828,0x2a9dfacd9cb44286,2 +np.float64,0xbfebbac0bdb77582,0xbfee820d9288b631,2 +np.float64,0x1a12aa7c34256,0x2a92d407e073bbfe,2 +np.float64,0xbfc41a27c3283450,0xbfe143c8665b0d3c,2 +np.float64,0xffe4faa41369f548,0xd54183230e0ce613,2 +np.float64,0xbfdeae81f23d5d04,0xbfe90b734bf35b68,2 +np.float64,0x3fc984ba58330975,0x3fe2b19e9052008e,2 +np.float64,0x7fe6e51b8d2dca36,0x554207a74ae2bb39,2 +np.float64,0x80081a58a81034b2,0xaaa0117d4aff11c8,2 +np.float64,0x7fde3fddfe3c7fbb,0x553f67d0082acc67,2 +np.float64,0x3fac7c999038f933,0x3fd86ec2f5dc3aa4,2 +np.float64,0x7fa26b4c4c24d698,0x552a9e6ea8545c18,2 +np.float64,0x3fdacd06e6b59a0e,0x3fe7f0dc0e8f9c6d,2 +np.float64,0x80064b62cbec96c6,0xaa9d8ac0506fdd05,2 +np.float64,0xb858116170b1,0x2a8caea703d9ccc8,2 +np.float64,0xbfe8d94ccef1b29a,0xbfed69a8782cbf3d,2 +np.float64,0x8005607d6a6ac0fc,0xaa9c07cf8620b037,2 +np.float64,0xbfe66a52daacd4a6,0xbfec6b5e403e6864,2 +np.float64,0x7fc398c2e0273185,0x5535918245894606,2 +np.float64,0x74b2d7dce965c,0x2a9f077020defdbc,2 +np.float64,0x7fe8f7a4d9b1ef49,0x55428eeae210e8eb,2 +np.float64,0x80027deddc84fbdc,0xaa95b11ff9089745,2 +np.float64,0xffeba2a94e774552,0xd5433273f6568902,2 +np.float64,0x80002f8259405f05,0xaa8240b68d7b9dc4,2 +np.float64,0xbfdf0d84883e1b0a,0xbfe92532c69c5802,2 +np.float64,0xbfcdfa7b6b3bf4f8,0xbfe3b997a84d0914,2 +np.float64,0x800c18b04e183161,0xaaa25d46d60b15c6,2 +np.float64,0xffeaf1e37c35e3c6,0xd543092cd929ac19,2 +np.float64,0xbfc5aa07752b5410,0xbfe1b36ab5ec741f,2 +np.float64,0x3fe5c491d1eb8924,0x3fec24a1c3f6a178,2 +np.float64,0xbfeb736937f6e6d2,0xbfee67cd296e6fa9,2 +np.float64,0xffec3d5718787aad,0xd5435602e1a2cc43,2 +np.float64,0x7fe71e1da86e3c3a,0x55421691ead882cb,2 +np.float64,0x3fdd6ed0c93adda2,0x3fe8b341d066c43c,2 +np.float64,0x7fbe3d7a203c7af3,0x5533c83e53283430,2 +np.float64,0x3fdc20cb56384197,0x3fe854676360aba9,2 +np.float64,0xb7a1ac636f436,0x2aa20b9d40d66e78,2 +np.float64,0x3fb1491bb8229237,0x3fda0fabad1738ee,2 +np.float64,0xbfdf9c0ce73f381a,0xbfe94b716dbe35ee,2 +np.float64,0xbfbd4f0ad23a9e18,0xbfdf1397329a2dce,2 +np.float64,0xbfe4e0caac69c196,0xbfebc119b8a181cd,2 +np.float64,0x5753641aaea6d,0x2a9c2ba3e92b0cd2,2 +np.float64,0x72bb814ae5771,0x2a9eda92fada66de,2 +np.float64,0x57ed8f5aafdb3,0x2a9c3c2e1d42e609,2 +np.float64,0xffec33359c38666a,0xd54353b2acd0daf1,2 +np.float64,0x3fa5fe6e8c2bfce0,0x3fd66a0b3bf2720a,2 +np.float64,0xffe2dc8d7ca5b91a,0xd540e6ebc097d601,2 +np.float64,0x7fd99d260eb33a4b,0x553db626c9c75f78,2 +np.float64,0xbfe2dd73e425bae8,0xbfead4fc4b93a727,2 +np.float64,0xdcd4a583b9a95,0x2aa33094c9a17ad7,2 +np.float64,0x7fb0af6422215ec7,0x553039a606e8e64f,2 +np.float64,0x7fdfab6227bf56c3,0x553fe3b26164aeda,2 +np.float64,0x1e4d265e3c9a6,0x2a93cba8a1a8ae6d,2 +np.float64,0xbfdc7d097238fa12,0xbfe86ee2f24fd473,2 +np.float64,0x7fe5d35d29eba6b9,0x5541bea5878bce2b,2 +np.float64,0xffcb886a903710d4,0xd53828281710aab5,2 +np.float64,0xffe058c7ffe0b190,0xd5401d61e9a7cbcf,2 +np.float64,0x3ff0000000000000,0x3ff0000000000000,2 +np.float64,0xffd5b1c1132b6382,0xd53c1c839c098340,2 +np.float64,0x3fe2e7956725cf2b,0x3fead9c907b9d041,2 +np.float64,0x800a8ee293951dc6,0xaaa18ce3f079f118,2 +np.float64,0x7febcd3085b79a60,0x55433c47e1f822ad,2 +np.float64,0x3feb0e14cd761c2a,0x3fee423542102546,2 +np.float64,0x3fb45e6d0628bcda,0x3fdb86db67d0c992,2 +np.float64,0x7fa836e740306dce,0x552d2907cb8118b2,2 +np.float64,0x3fd15ba25b22b745,0x3fe4b6b018409d78,2 +np.float64,0xbfb59980ce2b3300,0xbfdc1206274cb51d,2 +np.float64,0x3fdef1b87fbde371,0x3fe91dafc62124a1,2 +np.float64,0x7fed37a4337a6f47,0x55438e7e0b50ae37,2 +np.float64,0xffe6c87633ad90ec,0xd542001f216ab448,2 +np.float64,0x8008d2548ab1a4a9,0xaaa087ad272d8e17,2 +np.float64,0xbfd1d6744da3ace8,0xbfe4e71965adda74,2 +np.float64,0xbfb27f751224fee8,0xbfdaa82132775406,2 +np.float64,0x3fe2b336ae65666d,0x3feac0e6b13ec2d2,2 +np.float64,0xffc6bac2262d7584,0xd536a951a2eecb49,2 +np.float64,0x7fdb661321b6cc25,0x553e62dfd7fcd3f3,2 +np.float64,0xffe83567d5706acf,0xd5425e4bb5027568,2 +np.float64,0xbf7f0693e03e0d00,0xbfc9235314d53f82,2 +np.float64,0x3feb32b218766564,0x3fee4fd5847f3722,2 +np.float64,0x3fec25d33df84ba6,0x3feea91fcd4aebab,2 +np.float64,0x7fe17abecb22f57d,0x55407a8ba661207c,2 +np.float64,0xbfe5674b1eeace96,0xbfebfc351708dc70,2 +np.float64,0xbfe51a2d2f6a345a,0xbfebda702c9d302a,2 +np.float64,0x3fec05584af80ab0,0x3fee9d502a7bf54d,2 +np.float64,0xffda8871dcb510e4,0xd53e10105f0365b5,2 +np.float64,0xbfc279c31824f388,0xbfe0c9354d871484,2 +np.float64,0x1cbed61e397dc,0x2a937364712cd518,2 +np.float64,0x800787d198af0fa4,0xaa9f5c847affa1d2,2 +np.float64,0x80079f6d65af3edc,0xaa9f7d2863368bbd,2 +np.float64,0xb942f1e97285e,0x2aa2193e0c513b7f,2 +np.float64,0x7fe9078263320f04,0x554292d85dee2c18,2 +np.float64,0xbfe4de0761a9bc0f,0xbfebbfe04116b829,2 +np.float64,0xbfdbe6f3fc37cde8,0xbfe843aea59a0749,2 +np.float64,0xffcb6c0de136d81c,0xd5381fd9c525b813,2 +np.float64,0x9b6bda9336d7c,0x2aa111c924c35386,2 +np.float64,0x3fe17eece422fdda,0x3fea2a9bacd78607,2 +np.float64,0xd8011c49b0024,0x2aa30c87574fc0c6,2 +np.float64,0xbfc0a08b3f214118,0xbfe034d48f0d8dc0,2 +np.float64,0x3fd60adb1eac15b8,0x3fe66e42e4e7e6b5,2 +np.float64,0x80011d68ea023ad3,0xaa909733befbb962,2 +np.float64,0xffb35ac32426b588,0xd5310c4be1c37270,2 +np.float64,0x3fee8b56c9bd16ae,0x3fef81d8d15f6939,2 +np.float64,0x3fdc10a45e382149,0x3fe84fbe4cf11e68,2 +np.float64,0xbfc85dc45e30bb88,0xbfe2687b5518abde,2 +np.float64,0x3fd53b85212a770a,0x3fe6270d6d920d0f,2 +np.float64,0x800fc158927f82b1,0xaaa40e303239586f,2 +np.float64,0x11af5e98235ed,0x2a908b04a790083f,2 +np.float64,0xbfe2a097afe54130,0xbfeab80269eece99,2 +np.float64,0xbfd74ac588ae958c,0xbfe6d8ca3828d0b8,2 +np.float64,0xffea18ab2ef43156,0xd542d579ab31df1e,2 +np.float64,0xbfecda7058f9b4e1,0xbfeeea29c33b7913,2 +np.float64,0x3fc4ac56ed2958b0,0x3fe16d3e2bd7806d,2 +np.float64,0x3feccc898cb99913,0x3feee531f217dcfa,2 +np.float64,0xffeb3a64c5b674c9,0xd5431a30a41f0905,2 +np.float64,0x3fe5a7ee212b4fdc,0x3fec1844af9076fc,2 +np.float64,0x80080fdb52301fb7,0xaaa00a8b4274db67,2 +np.float64,0x800b3e7e47d67cfd,0xaaa1ec2876959852,2 +np.float64,0x80063fb8ee2c7f73,0xaa9d7875c9f20d6f,2 +np.float64,0x7fdacf80d0b59f01,0x553e2acede4c62a8,2 +np.float64,0x401e9b24803d4,0x2a996a0a75d0e093,2 +np.float64,0x3fe6c29505ed852a,0x3fec907a6d8c10af,2 +np.float64,0x8005c04ee2cb809f,0xaa9caa9813faef46,2 +np.float64,0xbfe1360f21e26c1e,0xbfea06155d6985b6,2 +np.float64,0xffc70606682e0c0c,0xd536c239b9d4be0a,2 +np.float64,0x800e639afefcc736,0xaaa37547d0229a26,2 +np.float64,0x3fe5589290aab125,0x3febf5c925c4e6db,2 +np.float64,0x8003b59330276b27,0xaa98c47e44524335,2 +np.float64,0x800d67ec22dacfd8,0xaaa301251b6a730a,2 +np.float64,0x7fdaeb5025b5d69f,0x553e35397dfe87eb,2 +np.float64,0x3fdae32a24b5c654,0x3fe7f771bc108f6c,2 +np.float64,0xffe6c1fc93ad83f8,0xd541fe6a6a716756,2 +np.float64,0xbfd7b9c1d32f7384,0xbfe6fcdae563d638,2 +np.float64,0x800e1bea06fc37d4,0xaaa354c0bf61449c,2 +np.float64,0xbfd78f097aaf1e12,0xbfe6ef068329bdf4,2 +np.float64,0x7fea6a400874d47f,0x5542e905978ad722,2 +np.float64,0x8008b4377cb1686f,0xaaa074c87eee29f9,2 +np.float64,0x8002f3fb8d45e7f8,0xaa96f47ac539b614,2 +np.float64,0xbfcf2b3fd13e5680,0xbfe3fb91c0cc66ad,2 +np.float64,0xffecca2f5279945e,0xd54375f361075927,2 +np.float64,0x7ff0000000000000,0x7ff0000000000000,2 +np.float64,0x7f84d5a5a029ab4a,0x552178d1d4e8640e,2 +np.float64,0x3fea8a4b64351497,0x3fee10c332440eb2,2 +np.float64,0x800fe01ac1dfc036,0xaaa41b34d91a4bee,2 +np.float64,0x3fc0b3d8872167b1,0x3fe03b178d354f8d,2 +np.float64,0x5ee8b0acbdd17,0x2a9cf69f2e317729,2 +np.float64,0x8006ef0407adde09,0xaa9e82888f3dd83e,2 +np.float64,0x7fdbb08a07b76113,0x553e7e4e35b938b9,2 +np.float64,0x49663f9c92cc9,0x2a9a95e0affe5108,2 +np.float64,0x7fd9b87e79b370fc,0x553dc0b5cff3dc7d,2 +np.float64,0xbfd86ae657b0d5cc,0xbfe73584d02bdd2b,2 +np.float64,0x3fd4d4a13729a942,0x3fe6030a962aaaf8,2 +np.float64,0x7fcc246bcb3848d7,0x5538557309449bba,2 +np.float64,0xbfdc86a7d5b90d50,0xbfe871a2983c2a29,2 +np.float64,0xd2a6e995a54dd,0x2aa2e3e9c0fdd6c0,2 +np.float64,0x3f92eb447825d680,0x3fd0eb4fd2ba16d2,2 +np.float64,0x800d4001697a8003,0xaaa2ee358661b75c,2 +np.float64,0x3fd3705fd1a6e0c0,0x3fe582a6f321d7d6,2 +np.float64,0xbfcfdf51533fbea4,0xbfe421c3bdd9f2a3,2 +np.float64,0x3fe268e87964d1d1,0x3fea9d47e08aad8a,2 +np.float64,0x24b8901e49713,0x2a951adeefe7b31b,2 +np.float64,0x3fedb35d687b66bb,0x3fef36e440850bf8,2 +np.float64,0x3fb7ab5cbe2f56c0,0x3fdcf097380721c6,2 +np.float64,0x3f8c4eaa10389d54,0x3fceb7ecb605b73b,2 +np.float64,0xbfed831ed6fb063e,0xbfef25f462a336f1,2 +np.float64,0x7fd8c52112318a41,0x553d61b0ee609f58,2 +np.float64,0xbfe71c4ff76e38a0,0xbfecb5d32e789771,2 +np.float64,0xbfe35fb7b166bf70,0xbfeb12328e75ee6b,2 +np.float64,0x458e1a3a8b1c4,0x2a9a1cebadc81342,2 +np.float64,0x8003c1b3ad478368,0xaa98df5ed060b28c,2 +np.float64,0x7ff4000000000000,0x7ffc000000000000,2 +np.float64,0x7fe17098c162e131,0x5540775a9a3a104f,2 +np.float64,0xbfd95cb71732b96e,0xbfe7812acf7ea511,2 +np.float64,0x8000000000000001,0xa990000000000000,2 +np.float64,0xbfde0e7d9ebc1cfc,0xbfe8df9ca9e49a5b,2 +np.float64,0xffef4f67143e9ecd,0xd5440348a6a2f231,2 +np.float64,0x7fe37d23c826fa47,0x5541165de17caa03,2 +np.float64,0xbfcc0e5f85381cc0,0xbfe34b44b0deefe9,2 +np.float64,0x3fe858f1c470b1e4,0x3fed36ab90557d89,2 +np.float64,0x800e857278fd0ae5,0xaaa3847d13220545,2 +np.float64,0x3febd31a66f7a635,0x3fee8af90e66b043,2 +np.float64,0x7fd3fde1b127fbc2,0x553b5b186a49b968,2 +np.float64,0x3fd3dabb8b27b577,0x3fe5a99b446bed26,2 +np.float64,0xffeb4500f1768a01,0xd5431cab828e254a,2 +np.float64,0xffccca8fc6399520,0xd53884f8b505e79e,2 +np.float64,0xffeee9406b7dd280,0xd543ed6d27a1a899,2 +np.float64,0xffecdde0f0f9bbc1,0xd5437a6258b14092,2 +np.float64,0xe6b54005cd6a8,0x2aa378c25938dfda,2 +np.float64,0x7fe610f1022c21e1,0x5541cf460b972925,2 +np.float64,0xbfe5a170ec6b42e2,0xbfec1576081e3232,2 diff --git a/python/numpy/_core/tests/data/umath-validation-set-cos.csv b/python/numpy/_core/tests/data/umath-validation-set-cos.csv new file mode 100644 index 000000000..258ae48ce --- /dev/null +++ b/python/numpy/_core/tests/data/umath-validation-set-cos.csv @@ -0,0 +1,1375 @@ +dtype,input,output,ulperrortol +## +ve denormals ## +np.float32,0x004b4716,0x3f800000,2 +np.float32,0x007b2490,0x3f800000,2 +np.float32,0x007c99fa,0x3f800000,2 +np.float32,0x00734a0c,0x3f800000,2 +np.float32,0x0070de24,0x3f800000,2 +np.float32,0x007fffff,0x3f800000,2 +np.float32,0x00000001,0x3f800000,2 +## -ve denormals ## +np.float32,0x80495d65,0x3f800000,2 +np.float32,0x806894f6,0x3f800000,2 +np.float32,0x80555a76,0x3f800000,2 +np.float32,0x804e1fb8,0x3f800000,2 +np.float32,0x80687de9,0x3f800000,2 +np.float32,0x807fffff,0x3f800000,2 +np.float32,0x80000001,0x3f800000,2 +## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ## +np.float32,0x00000000,0x3f800000,2 +np.float32,0x80000000,0x3f800000,2 +np.float32,0x00800000,0x3f800000,2 +np.float32,0x80800000,0x3f800000,2 +## 1.00f + 0x00000001 ## +np.float32,0x3f800000,0x3f0a5140,2 +np.float32,0x3f800001,0x3f0a513f,2 +np.float32,0x3f800002,0x3f0a513d,2 +np.float32,0xc090a8b0,0xbe4332ce,2 +np.float32,0x41ce3184,0x3f4d1de1,2 +np.float32,0xc1d85848,0xbeaa8980,2 +np.float32,0x402b8820,0xbf653aa3,2 +np.float32,0x42b4e454,0xbf4a338b,2 +np.float32,0x42a67a60,0x3c58202e,2 +np.float32,0x41d92388,0xbed987c7,2 +np.float32,0x422dd66c,0x3f5dcab3,2 +np.float32,0xc28f5be6,0xbf5688d8,2 +np.float32,0x41ab2674,0xbf53aa3b,2 +np.float32,0x3f490fdb,0x3f3504f3,2 +np.float32,0xbf490fdb,0x3f3504f3,2 +np.float32,0x3fc90fdb,0xb33bbd2e,2 +np.float32,0xbfc90fdb,0xb33bbd2e,2 +np.float32,0x40490fdb,0xbf800000,2 +np.float32,0xc0490fdb,0xbf800000,2 +np.float32,0x3fc90fdb,0xb33bbd2e,2 +np.float32,0xbfc90fdb,0xb33bbd2e,2 +np.float32,0x40490fdb,0xbf800000,2 +np.float32,0xc0490fdb,0xbf800000,2 +np.float32,0x40c90fdb,0x3f800000,2 +np.float32,0xc0c90fdb,0x3f800000,2 +np.float32,0x4016cbe4,0xbf3504f3,2 +np.float32,0xc016cbe4,0xbf3504f3,2 +np.float32,0x4096cbe4,0x324cde2e,2 +np.float32,0xc096cbe4,0x324cde2e,2 +np.float32,0x4116cbe4,0xbf800000,2 +np.float32,0xc116cbe4,0xbf800000,2 +np.float32,0x40490fdb,0xbf800000,2 +np.float32,0xc0490fdb,0xbf800000,2 +np.float32,0x40c90fdb,0x3f800000,2 +np.float32,0xc0c90fdb,0x3f800000,2 +np.float32,0x41490fdb,0x3f800000,2 +np.float32,0xc1490fdb,0x3f800000,2 +np.float32,0x407b53d2,0xbf3504f1,2 +np.float32,0xc07b53d2,0xbf3504f1,2 +np.float32,0x40fb53d2,0xb4b5563d,2 +np.float32,0xc0fb53d2,0xb4b5563d,2 +np.float32,0x417b53d2,0xbf800000,2 +np.float32,0xc17b53d2,0xbf800000,2 +np.float32,0x4096cbe4,0x324cde2e,2 +np.float32,0xc096cbe4,0x324cde2e,2 +np.float32,0x4116cbe4,0xbf800000,2 +np.float32,0xc116cbe4,0xbf800000,2 +np.float32,0x4196cbe4,0x3f800000,2 +np.float32,0xc196cbe4,0x3f800000,2 +np.float32,0x40afede0,0x3f3504f7,2 +np.float32,0xc0afede0,0x3f3504f7,2 +np.float32,0x412fede0,0x353222c4,2 +np.float32,0xc12fede0,0x353222c4,2 +np.float32,0x41afede0,0xbf800000,2 +np.float32,0xc1afede0,0xbf800000,2 +np.float32,0x40c90fdb,0x3f800000,2 +np.float32,0xc0c90fdb,0x3f800000,2 +np.float32,0x41490fdb,0x3f800000,2 +np.float32,0xc1490fdb,0x3f800000,2 +np.float32,0x41c90fdb,0x3f800000,2 +np.float32,0xc1c90fdb,0x3f800000,2 +np.float32,0x40e231d6,0x3f3504f3,2 +np.float32,0xc0e231d6,0x3f3504f3,2 +np.float32,0x416231d6,0xb319a6a2,2 +np.float32,0xc16231d6,0xb319a6a2,2 +np.float32,0x41e231d6,0xbf800000,2 +np.float32,0xc1e231d6,0xbf800000,2 +np.float32,0x40fb53d2,0xb4b5563d,2 +np.float32,0xc0fb53d2,0xb4b5563d,2 +np.float32,0x417b53d2,0xbf800000,2 +np.float32,0xc17b53d2,0xbf800000,2 +np.float32,0x41fb53d2,0x3f800000,2 +np.float32,0xc1fb53d2,0x3f800000,2 +np.float32,0x410a3ae7,0xbf3504fb,2 +np.float32,0xc10a3ae7,0xbf3504fb,2 +np.float32,0x418a3ae7,0x35b08908,2 +np.float32,0xc18a3ae7,0x35b08908,2 +np.float32,0x420a3ae7,0xbf800000,2 +np.float32,0xc20a3ae7,0xbf800000,2 +np.float32,0x4116cbe4,0xbf800000,2 +np.float32,0xc116cbe4,0xbf800000,2 +np.float32,0x4196cbe4,0x3f800000,2 +np.float32,0xc196cbe4,0x3f800000,2 +np.float32,0x4216cbe4,0x3f800000,2 +np.float32,0xc216cbe4,0x3f800000,2 +np.float32,0x41235ce2,0xbf3504ef,2 +np.float32,0xc1235ce2,0xbf3504ef,2 +np.float32,0x41a35ce2,0xb53889b6,2 +np.float32,0xc1a35ce2,0xb53889b6,2 +np.float32,0x42235ce2,0xbf800000,2 +np.float32,0xc2235ce2,0xbf800000,2 +np.float32,0x412fede0,0x353222c4,2 +np.float32,0xc12fede0,0x353222c4,2 +np.float32,0x41afede0,0xbf800000,2 +np.float32,0xc1afede0,0xbf800000,2 +np.float32,0x422fede0,0x3f800000,2 +np.float32,0xc22fede0,0x3f800000,2 +np.float32,0x413c7edd,0x3f3504f4,2 +np.float32,0xc13c7edd,0x3f3504f4,2 +np.float32,0x41bc7edd,0x33800add,2 +np.float32,0xc1bc7edd,0x33800add,2 +np.float32,0x423c7edd,0xbf800000,2 +np.float32,0xc23c7edd,0xbf800000,2 +np.float32,0x41490fdb,0x3f800000,2 +np.float32,0xc1490fdb,0x3f800000,2 +np.float32,0x41c90fdb,0x3f800000,2 +np.float32,0xc1c90fdb,0x3f800000,2 +np.float32,0x42490fdb,0x3f800000,2 +np.float32,0xc2490fdb,0x3f800000,2 +np.float32,0x4155a0d9,0x3f3504eb,2 +np.float32,0xc155a0d9,0x3f3504eb,2 +np.float32,0x41d5a0d9,0xb5b3bc81,2 +np.float32,0xc1d5a0d9,0xb5b3bc81,2 +np.float32,0x4255a0d9,0xbf800000,2 +np.float32,0xc255a0d9,0xbf800000,2 +np.float32,0x416231d6,0xb319a6a2,2 +np.float32,0xc16231d6,0xb319a6a2,2 +np.float32,0x41e231d6,0xbf800000,2 +np.float32,0xc1e231d6,0xbf800000,2 +np.float32,0x426231d6,0x3f800000,2 +np.float32,0xc26231d6,0x3f800000,2 +np.float32,0x416ec2d4,0xbf3504f7,2 +np.float32,0xc16ec2d4,0xbf3504f7,2 +np.float32,0x41eec2d4,0x353ef0a7,2 +np.float32,0xc1eec2d4,0x353ef0a7,2 +np.float32,0x426ec2d4,0xbf800000,2 +np.float32,0xc26ec2d4,0xbf800000,2 +np.float32,0x417b53d2,0xbf800000,2 +np.float32,0xc17b53d2,0xbf800000,2 +np.float32,0x41fb53d2,0x3f800000,2 +np.float32,0xc1fb53d2,0x3f800000,2 +np.float32,0x427b53d2,0x3f800000,2 +np.float32,0xc27b53d2,0x3f800000,2 +np.float32,0x4183f268,0xbf3504e7,2 +np.float32,0xc183f268,0xbf3504e7,2 +np.float32,0x4203f268,0xb6059a13,2 +np.float32,0xc203f268,0xb6059a13,2 +np.float32,0x4283f268,0xbf800000,2 +np.float32,0xc283f268,0xbf800000,2 +np.float32,0x418a3ae7,0x35b08908,2 +np.float32,0xc18a3ae7,0x35b08908,2 +np.float32,0x420a3ae7,0xbf800000,2 +np.float32,0xc20a3ae7,0xbf800000,2 +np.float32,0x428a3ae7,0x3f800000,2 +np.float32,0xc28a3ae7,0x3f800000,2 +np.float32,0x41908365,0x3f3504f0,2 +np.float32,0xc1908365,0x3f3504f0,2 +np.float32,0x42108365,0xb512200d,2 +np.float32,0xc2108365,0xb512200d,2 +np.float32,0x42908365,0xbf800000,2 +np.float32,0xc2908365,0xbf800000,2 +np.float32,0x4196cbe4,0x3f800000,2 +np.float32,0xc196cbe4,0x3f800000,2 +np.float32,0x4216cbe4,0x3f800000,2 +np.float32,0xc216cbe4,0x3f800000,2 +np.float32,0x4296cbe4,0x3f800000,2 +np.float32,0xc296cbe4,0x3f800000,2 +np.float32,0x419d1463,0x3f3504ef,2 +np.float32,0xc19d1463,0x3f3504ef,2 +np.float32,0x421d1463,0xb5455799,2 +np.float32,0xc21d1463,0xb5455799,2 +np.float32,0x429d1463,0xbf800000,2 +np.float32,0xc29d1463,0xbf800000,2 +np.float32,0x41a35ce2,0xb53889b6,2 +np.float32,0xc1a35ce2,0xb53889b6,2 +np.float32,0x42235ce2,0xbf800000,2 +np.float32,0xc2235ce2,0xbf800000,2 +np.float32,0x42a35ce2,0x3f800000,2 +np.float32,0xc2a35ce2,0x3f800000,2 +np.float32,0x41a9a561,0xbf3504ff,2 +np.float32,0xc1a9a561,0xbf3504ff,2 +np.float32,0x4229a561,0x360733d0,2 +np.float32,0xc229a561,0x360733d0,2 +np.float32,0x42a9a561,0xbf800000,2 +np.float32,0xc2a9a561,0xbf800000,2 +np.float32,0x41afede0,0xbf800000,2 +np.float32,0xc1afede0,0xbf800000,2 +np.float32,0x422fede0,0x3f800000,2 +np.float32,0xc22fede0,0x3f800000,2 +np.float32,0x42afede0,0x3f800000,2 +np.float32,0xc2afede0,0x3f800000,2 +np.float32,0x41b6365e,0xbf3504f6,2 +np.float32,0xc1b6365e,0xbf3504f6,2 +np.float32,0x4236365e,0x350bb91c,2 +np.float32,0xc236365e,0x350bb91c,2 +np.float32,0x42b6365e,0xbf800000,2 +np.float32,0xc2b6365e,0xbf800000,2 +np.float32,0x41bc7edd,0x33800add,2 +np.float32,0xc1bc7edd,0x33800add,2 +np.float32,0x423c7edd,0xbf800000,2 +np.float32,0xc23c7edd,0xbf800000,2 +np.float32,0x42bc7edd,0x3f800000,2 +np.float32,0xc2bc7edd,0x3f800000,2 +np.float32,0x41c2c75c,0x3f3504f8,2 +np.float32,0xc1c2c75c,0x3f3504f8,2 +np.float32,0x4242c75c,0x354bbe8a,2 +np.float32,0xc242c75c,0x354bbe8a,2 +np.float32,0x42c2c75c,0xbf800000,2 +np.float32,0xc2c2c75c,0xbf800000,2 +np.float32,0x41c90fdb,0x3f800000,2 +np.float32,0xc1c90fdb,0x3f800000,2 +np.float32,0x42490fdb,0x3f800000,2 +np.float32,0xc2490fdb,0x3f800000,2 +np.float32,0x42c90fdb,0x3f800000,2 +np.float32,0xc2c90fdb,0x3f800000,2 +np.float32,0x41cf585a,0x3f3504e7,2 +np.float32,0xc1cf585a,0x3f3504e7,2 +np.float32,0x424f585a,0xb608cd8c,2 +np.float32,0xc24f585a,0xb608cd8c,2 +np.float32,0x42cf585a,0xbf800000,2 +np.float32,0xc2cf585a,0xbf800000,2 +np.float32,0x41d5a0d9,0xb5b3bc81,2 +np.float32,0xc1d5a0d9,0xb5b3bc81,2 +np.float32,0x4255a0d9,0xbf800000,2 +np.float32,0xc255a0d9,0xbf800000,2 +np.float32,0x42d5a0d9,0x3f800000,2 +np.float32,0xc2d5a0d9,0x3f800000,2 +np.float32,0x41dbe958,0xbf350507,2 +np.float32,0xc1dbe958,0xbf350507,2 +np.float32,0x425be958,0x365eab75,2 +np.float32,0xc25be958,0x365eab75,2 +np.float32,0x42dbe958,0xbf800000,2 +np.float32,0xc2dbe958,0xbf800000,2 +np.float32,0x41e231d6,0xbf800000,2 +np.float32,0xc1e231d6,0xbf800000,2 +np.float32,0x426231d6,0x3f800000,2 +np.float32,0xc26231d6,0x3f800000,2 +np.float32,0x42e231d6,0x3f800000,2 +np.float32,0xc2e231d6,0x3f800000,2 +np.float32,0x41e87a55,0xbf3504ef,2 +np.float32,0xc1e87a55,0xbf3504ef,2 +np.float32,0x42687a55,0xb552257b,2 +np.float32,0xc2687a55,0xb552257b,2 +np.float32,0x42e87a55,0xbf800000,2 +np.float32,0xc2e87a55,0xbf800000,2 +np.float32,0x41eec2d4,0x353ef0a7,2 +np.float32,0xc1eec2d4,0x353ef0a7,2 +np.float32,0x426ec2d4,0xbf800000,2 +np.float32,0xc26ec2d4,0xbf800000,2 +np.float32,0x42eec2d4,0x3f800000,2 +np.float32,0xc2eec2d4,0x3f800000,2 +np.float32,0x41f50b53,0x3f3504ff,2 +np.float32,0xc1f50b53,0x3f3504ff,2 +np.float32,0x42750b53,0x360a6748,2 +np.float32,0xc2750b53,0x360a6748,2 +np.float32,0x42f50b53,0xbf800000,2 +np.float32,0xc2f50b53,0xbf800000,2 +np.float32,0x41fb53d2,0x3f800000,2 +np.float32,0xc1fb53d2,0x3f800000,2 +np.float32,0x427b53d2,0x3f800000,2 +np.float32,0xc27b53d2,0x3f800000,2 +np.float32,0x42fb53d2,0x3f800000,2 +np.float32,0xc2fb53d2,0x3f800000,2 +np.float32,0x4200ce28,0x3f3504f6,2 +np.float32,0xc200ce28,0x3f3504f6,2 +np.float32,0x4280ce28,0x34fdd672,2 +np.float32,0xc280ce28,0x34fdd672,2 +np.float32,0x4300ce28,0xbf800000,2 +np.float32,0xc300ce28,0xbf800000,2 +np.float32,0x4203f268,0xb6059a13,2 +np.float32,0xc203f268,0xb6059a13,2 +np.float32,0x4283f268,0xbf800000,2 +np.float32,0xc283f268,0xbf800000,2 +np.float32,0x4303f268,0x3f800000,2 +np.float32,0xc303f268,0x3f800000,2 +np.float32,0x420716a7,0xbf3504f8,2 +np.float32,0xc20716a7,0xbf3504f8,2 +np.float32,0x428716a7,0x35588c6d,2 +np.float32,0xc28716a7,0x35588c6d,2 +np.float32,0x430716a7,0xbf800000,2 +np.float32,0xc30716a7,0xbf800000,2 +np.float32,0x420a3ae7,0xbf800000,2 +np.float32,0xc20a3ae7,0xbf800000,2 +np.float32,0x428a3ae7,0x3f800000,2 +np.float32,0xc28a3ae7,0x3f800000,2 +np.float32,0x430a3ae7,0x3f800000,2 +np.float32,0xc30a3ae7,0x3f800000,2 +np.float32,0x420d5f26,0xbf3504e7,2 +np.float32,0xc20d5f26,0xbf3504e7,2 +np.float32,0x428d5f26,0xb60c0105,2 +np.float32,0xc28d5f26,0xb60c0105,2 +np.float32,0x430d5f26,0xbf800000,2 +np.float32,0xc30d5f26,0xbf800000,2 +np.float32,0x42108365,0xb512200d,2 +np.float32,0xc2108365,0xb512200d,2 +np.float32,0x42908365,0xbf800000,2 +np.float32,0xc2908365,0xbf800000,2 +np.float32,0x43108365,0x3f800000,2 +np.float32,0xc3108365,0x3f800000,2 +np.float32,0x4213a7a5,0x3f350507,2 +np.float32,0xc213a7a5,0x3f350507,2 +np.float32,0x4293a7a5,0x3661deee,2 +np.float32,0xc293a7a5,0x3661deee,2 +np.float32,0x4313a7a5,0xbf800000,2 +np.float32,0xc313a7a5,0xbf800000,2 +np.float32,0x4216cbe4,0x3f800000,2 +np.float32,0xc216cbe4,0x3f800000,2 +np.float32,0x4296cbe4,0x3f800000,2 +np.float32,0xc296cbe4,0x3f800000,2 +np.float32,0x4316cbe4,0x3f800000,2 +np.float32,0xc316cbe4,0x3f800000,2 +np.float32,0x4219f024,0x3f3504d8,2 +np.float32,0xc219f024,0x3f3504d8,2 +np.float32,0x4299f024,0xb69bde6c,2 +np.float32,0xc299f024,0xb69bde6c,2 +np.float32,0x4319f024,0xbf800000,2 +np.float32,0xc319f024,0xbf800000,2 +np.float32,0x421d1463,0xb5455799,2 +np.float32,0xc21d1463,0xb5455799,2 +np.float32,0x429d1463,0xbf800000,2 +np.float32,0xc29d1463,0xbf800000,2 +np.float32,0x431d1463,0x3f800000,2 +np.float32,0xc31d1463,0x3f800000,2 +np.float32,0x422038a3,0xbf350516,2 +np.float32,0xc22038a3,0xbf350516,2 +np.float32,0x42a038a3,0x36c6cd61,2 +np.float32,0xc2a038a3,0x36c6cd61,2 +np.float32,0x432038a3,0xbf800000,2 +np.float32,0xc32038a3,0xbf800000,2 +np.float32,0x42235ce2,0xbf800000,2 +np.float32,0xc2235ce2,0xbf800000,2 +np.float32,0x42a35ce2,0x3f800000,2 +np.float32,0xc2a35ce2,0x3f800000,2 +np.float32,0x43235ce2,0x3f800000,2 +np.float32,0xc3235ce2,0x3f800000,2 +np.float32,0x42268121,0xbf3504f6,2 +np.float32,0xc2268121,0xbf3504f6,2 +np.float32,0x42a68121,0x34e43aac,2 +np.float32,0xc2a68121,0x34e43aac,2 +np.float32,0x43268121,0xbf800000,2 +np.float32,0xc3268121,0xbf800000,2 +np.float32,0x4229a561,0x360733d0,2 +np.float32,0xc229a561,0x360733d0,2 +np.float32,0x42a9a561,0xbf800000,2 +np.float32,0xc2a9a561,0xbf800000,2 +np.float32,0x4329a561,0x3f800000,2 +np.float32,0xc329a561,0x3f800000,2 +np.float32,0x422cc9a0,0x3f3504f8,2 +np.float32,0xc22cc9a0,0x3f3504f8,2 +np.float32,0x42acc9a0,0x35655a50,2 +np.float32,0xc2acc9a0,0x35655a50,2 +np.float32,0x432cc9a0,0xbf800000,2 +np.float32,0xc32cc9a0,0xbf800000,2 +np.float32,0x422fede0,0x3f800000,2 +np.float32,0xc22fede0,0x3f800000,2 +np.float32,0x42afede0,0x3f800000,2 +np.float32,0xc2afede0,0x3f800000,2 +np.float32,0x432fede0,0x3f800000,2 +np.float32,0xc32fede0,0x3f800000,2 +np.float32,0x4233121f,0x3f3504e7,2 +np.float32,0xc233121f,0x3f3504e7,2 +np.float32,0x42b3121f,0xb60f347d,2 +np.float32,0xc2b3121f,0xb60f347d,2 +np.float32,0x4333121f,0xbf800000,2 +np.float32,0xc333121f,0xbf800000,2 +np.float32,0x4236365e,0x350bb91c,2 +np.float32,0xc236365e,0x350bb91c,2 +np.float32,0x42b6365e,0xbf800000,2 +np.float32,0xc2b6365e,0xbf800000,2 +np.float32,0x4336365e,0x3f800000,2 +np.float32,0xc336365e,0x3f800000,2 +np.float32,0x42395a9e,0xbf350507,2 +np.float32,0xc2395a9e,0xbf350507,2 +np.float32,0x42b95a9e,0x36651267,2 +np.float32,0xc2b95a9e,0x36651267,2 +np.float32,0x43395a9e,0xbf800000,2 +np.float32,0xc3395a9e,0xbf800000,2 +np.float32,0x423c7edd,0xbf800000,2 +np.float32,0xc23c7edd,0xbf800000,2 +np.float32,0x42bc7edd,0x3f800000,2 +np.float32,0xc2bc7edd,0x3f800000,2 +np.float32,0x433c7edd,0x3f800000,2 +np.float32,0xc33c7edd,0x3f800000,2 +np.float32,0x423fa31d,0xbf3504d7,2 +np.float32,0xc23fa31d,0xbf3504d7,2 +np.float32,0x42bfa31d,0xb69d7828,2 +np.float32,0xc2bfa31d,0xb69d7828,2 +np.float32,0x433fa31d,0xbf800000,2 +np.float32,0xc33fa31d,0xbf800000,2 +np.float32,0x4242c75c,0x354bbe8a,2 +np.float32,0xc242c75c,0x354bbe8a,2 +np.float32,0x42c2c75c,0xbf800000,2 +np.float32,0xc2c2c75c,0xbf800000,2 +np.float32,0x4342c75c,0x3f800000,2 +np.float32,0xc342c75c,0x3f800000,2 +np.float32,0x4245eb9c,0x3f350517,2 +np.float32,0xc245eb9c,0x3f350517,2 +np.float32,0x42c5eb9c,0x36c8671d,2 +np.float32,0xc2c5eb9c,0x36c8671d,2 +np.float32,0x4345eb9c,0xbf800000,2 +np.float32,0xc345eb9c,0xbf800000,2 +np.float32,0x42490fdb,0x3f800000,2 +np.float32,0xc2490fdb,0x3f800000,2 +np.float32,0x42c90fdb,0x3f800000,2 +np.float32,0xc2c90fdb,0x3f800000,2 +np.float32,0x43490fdb,0x3f800000,2 +np.float32,0xc3490fdb,0x3f800000,2 +np.float32,0x424c341a,0x3f3504f5,2 +np.float32,0xc24c341a,0x3f3504f5,2 +np.float32,0x42cc341a,0x34ca9ee6,2 +np.float32,0xc2cc341a,0x34ca9ee6,2 +np.float32,0x434c341a,0xbf800000,2 +np.float32,0xc34c341a,0xbf800000,2 +np.float32,0x424f585a,0xb608cd8c,2 +np.float32,0xc24f585a,0xb608cd8c,2 +np.float32,0x42cf585a,0xbf800000,2 +np.float32,0xc2cf585a,0xbf800000,2 +np.float32,0x434f585a,0x3f800000,2 +np.float32,0xc34f585a,0x3f800000,2 +np.float32,0x42527c99,0xbf3504f9,2 +np.float32,0xc2527c99,0xbf3504f9,2 +np.float32,0x42d27c99,0x35722833,2 +np.float32,0xc2d27c99,0x35722833,2 +np.float32,0x43527c99,0xbf800000,2 +np.float32,0xc3527c99,0xbf800000,2 +np.float32,0x4255a0d9,0xbf800000,2 +np.float32,0xc255a0d9,0xbf800000,2 +np.float32,0x42d5a0d9,0x3f800000,2 +np.float32,0xc2d5a0d9,0x3f800000,2 +np.float32,0x4355a0d9,0x3f800000,2 +np.float32,0xc355a0d9,0x3f800000,2 +np.float32,0x4258c518,0xbf3504e6,2 +np.float32,0xc258c518,0xbf3504e6,2 +np.float32,0x42d8c518,0xb61267f6,2 +np.float32,0xc2d8c518,0xb61267f6,2 +np.float32,0x4358c518,0xbf800000,2 +np.float32,0xc358c518,0xbf800000,2 +np.float32,0x425be958,0x365eab75,2 +np.float32,0xc25be958,0x365eab75,2 +np.float32,0x42dbe958,0xbf800000,2 +np.float32,0xc2dbe958,0xbf800000,2 +np.float32,0x435be958,0x3f800000,2 +np.float32,0xc35be958,0x3f800000,2 +np.float32,0x425f0d97,0x3f350508,2 +np.float32,0xc25f0d97,0x3f350508,2 +np.float32,0x42df0d97,0x366845e0,2 +np.float32,0xc2df0d97,0x366845e0,2 +np.float32,0x435f0d97,0xbf800000,2 +np.float32,0xc35f0d97,0xbf800000,2 +np.float32,0x426231d6,0x3f800000,2 +np.float32,0xc26231d6,0x3f800000,2 +np.float32,0x42e231d6,0x3f800000,2 +np.float32,0xc2e231d6,0x3f800000,2 +np.float32,0x436231d6,0x3f800000,2 +np.float32,0xc36231d6,0x3f800000,2 +np.float32,0x42655616,0x3f3504d7,2 +np.float32,0xc2655616,0x3f3504d7,2 +np.float32,0x42e55616,0xb69f11e5,2 +np.float32,0xc2e55616,0xb69f11e5,2 +np.float32,0x43655616,0xbf800000,2 +np.float32,0xc3655616,0xbf800000,2 +np.float32,0x42687a55,0xb552257b,2 +np.float32,0xc2687a55,0xb552257b,2 +np.float32,0x42e87a55,0xbf800000,2 +np.float32,0xc2e87a55,0xbf800000,2 +np.float32,0x43687a55,0x3f800000,2 +np.float32,0xc3687a55,0x3f800000,2 +np.float32,0x426b9e95,0xbf350517,2 +np.float32,0xc26b9e95,0xbf350517,2 +np.float32,0x42eb9e95,0x36ca00d9,2 +np.float32,0xc2eb9e95,0x36ca00d9,2 +np.float32,0x436b9e95,0xbf800000,2 +np.float32,0xc36b9e95,0xbf800000,2 +np.float32,0x426ec2d4,0xbf800000,2 +np.float32,0xc26ec2d4,0xbf800000,2 +np.float32,0x42eec2d4,0x3f800000,2 +np.float32,0xc2eec2d4,0x3f800000,2 +np.float32,0x436ec2d4,0x3f800000,2 +np.float32,0xc36ec2d4,0x3f800000,2 +np.float32,0x4271e713,0xbf3504f5,2 +np.float32,0xc271e713,0xbf3504f5,2 +np.float32,0x42f1e713,0x34b10321,2 +np.float32,0xc2f1e713,0x34b10321,2 +np.float32,0x4371e713,0xbf800000,2 +np.float32,0xc371e713,0xbf800000,2 +np.float32,0x42750b53,0x360a6748,2 +np.float32,0xc2750b53,0x360a6748,2 +np.float32,0x42f50b53,0xbf800000,2 +np.float32,0xc2f50b53,0xbf800000,2 +np.float32,0x43750b53,0x3f800000,2 +np.float32,0xc3750b53,0x3f800000,2 +np.float32,0x42782f92,0x3f3504f9,2 +np.float32,0xc2782f92,0x3f3504f9,2 +np.float32,0x42f82f92,0x357ef616,2 +np.float32,0xc2f82f92,0x357ef616,2 +np.float32,0x43782f92,0xbf800000,2 +np.float32,0xc3782f92,0xbf800000,2 +np.float32,0x427b53d2,0x3f800000,2 +np.float32,0xc27b53d2,0x3f800000,2 +np.float32,0x42fb53d2,0x3f800000,2 +np.float32,0xc2fb53d2,0x3f800000,2 +np.float32,0x437b53d2,0x3f800000,2 +np.float32,0xc37b53d2,0x3f800000,2 +np.float32,0x427e7811,0x3f3504e6,2 +np.float32,0xc27e7811,0x3f3504e6,2 +np.float32,0x42fe7811,0xb6159b6f,2 +np.float32,0xc2fe7811,0xb6159b6f,2 +np.float32,0x437e7811,0xbf800000,2 +np.float32,0xc37e7811,0xbf800000,2 +np.float32,0x4280ce28,0x34fdd672,2 +np.float32,0xc280ce28,0x34fdd672,2 +np.float32,0x4300ce28,0xbf800000,2 +np.float32,0xc300ce28,0xbf800000,2 +np.float32,0x4380ce28,0x3f800000,2 +np.float32,0xc380ce28,0x3f800000,2 +np.float32,0x42826048,0xbf350508,2 +np.float32,0xc2826048,0xbf350508,2 +np.float32,0x43026048,0x366b7958,2 +np.float32,0xc3026048,0x366b7958,2 +np.float32,0x43826048,0xbf800000,2 +np.float32,0xc3826048,0xbf800000,2 +np.float32,0x4283f268,0xbf800000,2 +np.float32,0xc283f268,0xbf800000,2 +np.float32,0x4303f268,0x3f800000,2 +np.float32,0xc303f268,0x3f800000,2 +np.float32,0x4383f268,0x3f800000,2 +np.float32,0xc383f268,0x3f800000,2 +np.float32,0x42858487,0xbf350504,2 +np.float32,0xc2858487,0xbf350504,2 +np.float32,0x43058487,0x363ea8be,2 +np.float32,0xc3058487,0x363ea8be,2 +np.float32,0x43858487,0xbf800000,2 +np.float32,0xc3858487,0xbf800000,2 +np.float32,0x428716a7,0x35588c6d,2 +np.float32,0xc28716a7,0x35588c6d,2 +np.float32,0x430716a7,0xbf800000,2 +np.float32,0xc30716a7,0xbf800000,2 +np.float32,0x438716a7,0x3f800000,2 +np.float32,0xc38716a7,0x3f800000,2 +np.float32,0x4288a8c7,0x3f350517,2 +np.float32,0xc288a8c7,0x3f350517,2 +np.float32,0x4308a8c7,0x36cb9a96,2 +np.float32,0xc308a8c7,0x36cb9a96,2 +np.float32,0x4388a8c7,0xbf800000,2 +np.float32,0xc388a8c7,0xbf800000,2 +np.float32,0x428a3ae7,0x3f800000,2 +np.float32,0xc28a3ae7,0x3f800000,2 +np.float32,0x430a3ae7,0x3f800000,2 +np.float32,0xc30a3ae7,0x3f800000,2 +np.float32,0x438a3ae7,0x3f800000,2 +np.float32,0xc38a3ae7,0x3f800000,2 +np.float32,0x428bcd06,0x3f3504f5,2 +np.float32,0xc28bcd06,0x3f3504f5,2 +np.float32,0x430bcd06,0x3497675b,2 +np.float32,0xc30bcd06,0x3497675b,2 +np.float32,0x438bcd06,0xbf800000,2 +np.float32,0xc38bcd06,0xbf800000,2 +np.float32,0x428d5f26,0xb60c0105,2 +np.float32,0xc28d5f26,0xb60c0105,2 +np.float32,0x430d5f26,0xbf800000,2 +np.float32,0xc30d5f26,0xbf800000,2 +np.float32,0x438d5f26,0x3f800000,2 +np.float32,0xc38d5f26,0x3f800000,2 +np.float32,0x428ef146,0xbf350526,2 +np.float32,0xc28ef146,0xbf350526,2 +np.float32,0x430ef146,0x3710bc40,2 +np.float32,0xc30ef146,0x3710bc40,2 +np.float32,0x438ef146,0xbf800000,2 +np.float32,0xc38ef146,0xbf800000,2 +np.float32,0x42908365,0xbf800000,2 +np.float32,0xc2908365,0xbf800000,2 +np.float32,0x43108365,0x3f800000,2 +np.float32,0xc3108365,0x3f800000,2 +np.float32,0x43908365,0x3f800000,2 +np.float32,0xc3908365,0x3f800000,2 +np.float32,0x42921585,0xbf3504e6,2 +np.float32,0xc2921585,0xbf3504e6,2 +np.float32,0x43121585,0xb618cee8,2 +np.float32,0xc3121585,0xb618cee8,2 +np.float32,0x43921585,0xbf800000,2 +np.float32,0xc3921585,0xbf800000,2 +np.float32,0x4293a7a5,0x3661deee,2 +np.float32,0xc293a7a5,0x3661deee,2 +np.float32,0x4313a7a5,0xbf800000,2 +np.float32,0xc313a7a5,0xbf800000,2 +np.float32,0x4393a7a5,0x3f800000,2 +np.float32,0xc393a7a5,0x3f800000,2 +np.float32,0x429539c5,0x3f350536,2 +np.float32,0xc29539c5,0x3f350536,2 +np.float32,0x431539c5,0x373bab34,2 +np.float32,0xc31539c5,0x373bab34,2 +np.float32,0x439539c5,0xbf800000,2 +np.float32,0xc39539c5,0xbf800000,2 +np.float32,0x4296cbe4,0x3f800000,2 +np.float32,0xc296cbe4,0x3f800000,2 +np.float32,0x4316cbe4,0x3f800000,2 +np.float32,0xc316cbe4,0x3f800000,2 +np.float32,0x4396cbe4,0x3f800000,2 +np.float32,0xc396cbe4,0x3f800000,2 +np.float32,0x42985e04,0x3f3504d7,2 +np.float32,0xc2985e04,0x3f3504d7,2 +np.float32,0x43185e04,0xb6a2455d,2 +np.float32,0xc3185e04,0xb6a2455d,2 +np.float32,0x43985e04,0xbf800000,2 +np.float32,0xc3985e04,0xbf800000,2 +np.float32,0x4299f024,0xb69bde6c,2 +np.float32,0xc299f024,0xb69bde6c,2 +np.float32,0x4319f024,0xbf800000,2 +np.float32,0xc319f024,0xbf800000,2 +np.float32,0x4399f024,0x3f800000,2 +np.float32,0xc399f024,0x3f800000,2 +np.float32,0x429b8243,0xbf3504ea,2 +np.float32,0xc29b8243,0xbf3504ea,2 +np.float32,0x431b8243,0xb5cb2eb8,2 +np.float32,0xc31b8243,0xb5cb2eb8,2 +np.float32,0x439b8243,0xbf800000,2 +np.float32,0xc39b8243,0xbf800000,2 +np.float32,0x435b2047,0x3f3504c1,2 +np.float32,0x42a038a2,0xb5e4ca7e,2 +np.float32,0x432038a2,0xbf800000,2 +np.float32,0x4345eb9b,0xbf800000,2 +np.float32,0x42c5eb9b,0xb5de638c,2 +np.float32,0x42eb9e94,0xb5d7fc9b,2 +np.float32,0x4350ea79,0x3631dadb,2 +np.float32,0x42dbe957,0xbf800000,2 +np.float32,0x425be957,0xb505522a,2 +np.float32,0x435be957,0x3f800000,2 +np.float32,0x46027eb2,0x3e7d94c9,2 +np.float32,0x4477baed,0xbe7f1824,2 +np.float32,0x454b8024,0x3e7f5268,2 +np.float32,0x455d2c09,0x3e7f40cb,2 +np.float32,0x4768d3de,0xba14b4af,2 +np.float32,0x46c1e7cd,0x3e7fb102,2 +np.float32,0x44a52949,0xbe7dc9d5,2 +np.float32,0x4454633a,0x3e7dbc7d,2 +np.float32,0x4689810b,0x3e7eb02b,2 +np.float32,0x473473cd,0xbe7eef6f,2 +np.float32,0x44a5193f,0x3e7e1b1f,2 +np.float32,0x46004b36,0x3e7dac59,2 +np.float32,0x467f604b,0x3d7ffd3a,2 +np.float32,0x45ea1805,0x3dffd2e0,2 +np.float32,0x457b6af3,0x3dff7831,2 +np.float32,0x44996159,0xbe7d85f4,2 +np.float32,0x47883553,0xbb80584e,2 +np.float32,0x44e19f0c,0xbdffcfe6,2 +np.float32,0x472b3bf6,0xbe7f7a82,2 +np.float32,0x4600bb4e,0x3a135e33,2 +np.float32,0x449f4556,0x3e7e42e5,2 +np.float32,0x474e9420,0x3dff77b2,2 +np.float32,0x45cbdb23,0x3dff7240,2 +np.float32,0x44222747,0x3dffb039,2 +np.float32,0x4772e419,0xbdff74b8,2 +np.float64,0x1,0x3ff0000000000000,1 +np.float64,0x8000000000000001,0x3ff0000000000000,1 +np.float64,0x10000000000000,0x3ff0000000000000,1 +np.float64,0x8010000000000000,0x3ff0000000000000,1 +np.float64,0x7fefffffffffffff,0xbfefffe62ecfab75,1 +np.float64,0xffefffffffffffff,0xbfefffe62ecfab75,1 +np.float64,0x7ff0000000000000,0xfff8000000000000,1 +np.float64,0xfff0000000000000,0xfff8000000000000,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xbfc28bd9dd2517b4,0x3fefaa28ba13a702,1 +np.float64,0x3fb673c62e2ce790,0x3fefe083847a717f,1 +np.float64,0xbfe3e1dac7e7c3b6,0x3fea0500ba099f3a,1 +np.float64,0xbfbe462caa3c8c58,0x3fefc6c8b9c1c87c,1 +np.float64,0xbfb9353576326a68,0x3fefd8513e50e6b1,1 +np.float64,0xbfc05e798520bcf4,0x3fefbd1ad81cf089,1 +np.float64,0xbfe3ca3be2e79478,0x3fea12b995ea6574,1 +np.float64,0xbfde875d46bd0eba,0x3fec6d888662a824,1 +np.float64,0x3fafc4e02c3f89c0,0x3feff03c34bffd69,1 +np.float64,0xbf98855848310ac0,0x3feffda6c1588bdb,1 +np.float64,0x3fe66c51186cd8a2,0x3fe875c61c630ecb,1 +np.float64,0xbfedff1c3b7bfe38,0x3fe2f0c8c9e8fa39,1 +np.float64,0x3fd6082267ac1044,0x3fee1f6023695050,1 +np.float64,0xbfe78449b06f0894,0x3fe7bda2b223850e,1 +np.float64,0x3feedb8e63fdb71c,0x3fe23d5dfd2dd33f,1 +np.float64,0xbfc0a9de3d2153bc,0x3fefbaadf5e5285e,1 +np.float64,0x3fc04c67432098d0,0x3fefbdae07b7de8d,1 +np.float64,0xbfeeef84c4fddf0a,0x3fe22cf37f309d88,1 +np.float64,0x3fc04bb025209760,0x3fefbdb3d7d34ecf,1 +np.float64,0x3fd6b84d48ad709c,0x3fee013403da6e2a,1 +np.float64,0x3fec1ae25d7835c4,0x3fe46e62195cf274,1 +np.float64,0xbfdc6fdf9bb8dfc0,0x3fece48dc78bbb2e,1 +np.float64,0x3fb4db2c9229b660,0x3fefe4d42f79bf49,1 +np.float64,0xbfc0ed698521dad4,0x3fefb8785ea658c9,1 +np.float64,0xbfee82772b7d04ee,0x3fe2864a80efe8e9,1 +np.float64,0x3fd575b664aaeb6c,0x3fee37c669a12879,1 +np.float64,0x3fe4afb1c5e95f64,0x3fe98b177194439c,1 +np.float64,0x3fd93962f9b272c4,0x3fed8bef61876294,1 +np.float64,0x3fd97ae025b2f5c0,0x3fed7f4cfbf4d300,1 +np.float64,0xbfd9afdb1bb35fb6,0x3fed74fdc44dabb1,1 +np.float64,0x3f8ae65e3035cc80,0x3fefff4b1a0ea62b,1 +np.float64,0xbfe7e58664efcb0d,0x3fe77c02a1cbb670,1 +np.float64,0x3fe5f68b37ebed16,0x3fe8c10f849a5d4d,1 +np.float64,0x3fd9137d61b226fc,0x3fed9330eb4815a1,1 +np.float64,0x3fc146d019228da0,0x3fefb57e2d4d52f8,1 +np.float64,0xbfda6036edb4c06e,0x3fed521b2b578679,1 +np.float64,0xbfe78ddfb0ef1bc0,0x3fe7b734319a77e4,1 +np.float64,0x3fe0877823610ef0,0x3febd33a993dd786,1 +np.float64,0x3fbc61af2e38c360,0x3fefcdb4f889756d,1 +np.float64,0x3fd4dcdca4a9b9b8,0x3fee50962ffea5ae,1 +np.float64,0xbfe03cb29f607965,0x3febf7dbf640a75a,1 +np.float64,0xbfc81de407303bc8,0x3fef6f066cef64bc,1 +np.float64,0x3fd8dea42db1bd48,0x3fed9d3e00dbe0b3,1 +np.float64,0x3feac75e94f58ebe,0x3fe56f1f47f97896,1 +np.float64,0x3fb3a1ea6e2743d0,0x3fefe7ec1247cdaa,1 +np.float64,0x3fd695c0f4ad2b80,0x3fee0730bd40883d,1 +np.float64,0xbfd2c631f5a58c64,0x3feea20cbd1105d7,1 +np.float64,0xbfe978a8e1f2f152,0x3fe663014d40ad7a,1 +np.float64,0x3fd8b6b76ab16d70,0x3feda4c879aacc19,1 +np.float64,0x3feaafd30e755fa6,0x3fe5809514c28453,1 +np.float64,0x3fe1e37dc263c6fc,0x3feb20f9ad1f3f5c,1 +np.float64,0x3fd0ec7c24a1d8f8,0x3feee34048f43b75,1 +np.float64,0xbfe3881cbf67103a,0x3fea38d7886e6f53,1 +np.float64,0xbfd7023957ae0472,0x3fedf4471c765a1c,1 +np.float64,0xbfebc51c4ef78a38,0x3fe4b01c424e297b,1 +np.float64,0xbfe20a93eae41528,0x3feb0c2aa321d2e0,1 +np.float64,0x3fef39be867e737e,0x3fe1efaba9164d27,1 +np.float64,0x3fe8ea9576f1d52a,0x3fe6c7a8826ce1be,1 +np.float64,0x3fea921d91f5243c,0x3fe5968c6cf78963,1 +np.float64,0x3fd7ee5d31afdcbc,0x3fedc9f19d43fe61,1 +np.float64,0xbfe3ed581767dab0,0x3fe9fe4ee2f2b1cd,1 +np.float64,0xbfc40923d5281248,0x3fef9bd8ee9f6e68,1 +np.float64,0x3fe411a834682350,0x3fe9e9103854f057,1 +np.float64,0xbfedf6ccdf7bed9a,0x3fe2f77ad6543246,1 +np.float64,0xbfe8788a44f0f114,0x3fe7172f3aa0c742,1 +np.float64,0xbfce728f173ce520,0x3fef1954083bea04,1 +np.float64,0xbfd64dd0acac9ba2,0x3fee138c3293c246,1 +np.float64,0xbfe00669f5600cd4,0x3fec121443945350,1 +np.float64,0xbfe7152ba2ee2a58,0x3fe8079465d09846,1 +np.float64,0x3fe8654d8f70ca9c,0x3fe7247c94f09596,1 +np.float64,0x3fea68045cf4d008,0x3fe5b58cfe81a243,1 +np.float64,0xbfcd4779073a8ef4,0x3fef2a9d78153fa5,1 +np.float64,0xbfdb4456e5b688ae,0x3fed23b11614203f,1 +np.float64,0x3fcb5d59cd36bab0,0x3fef45818216a515,1 +np.float64,0xbfd914ff5ab229fe,0x3fed92e73746fea8,1 +np.float64,0x3fe4d211db69a424,0x3fe97653f433d15f,1 +np.float64,0xbfdbbb9224b77724,0x3fed0adb593dde80,1 +np.float64,0x3fd424ceafa8499c,0x3fee6d9124795d33,1 +np.float64,0x3feb5968f976b2d2,0x3fe501d116efbf54,1 +np.float64,0x3fee7d92a2fcfb26,0x3fe28a479b6a9dcf,1 +np.float64,0x3fc308e9972611d0,0x3fefa595f4df0c89,1 +np.float64,0x3fda79cd77b4f39c,0x3fed4cf8e69ba1f8,1 +np.float64,0x3fcbcf42d5379e88,0x3fef3f6a6a77c187,1 +np.float64,0x3fe13a1da662743c,0x3feb79504faea888,1 +np.float64,0xbfee4435f07c886c,0x3fe2b8ea98d2fc29,1 +np.float64,0x3fd65d68ccacbad0,0x3fee10e1ac7ada89,1 +np.float64,0x3fef2f89bb7e5f14,0x3fe1f81e882cc3f4,1 +np.float64,0xbfef0a7769fe14ef,0x3fe216bf384fc646,1 +np.float64,0x3fc065277320ca50,0x3fefbce44835c193,1 +np.float64,0x3fe9c1a74d73834e,0x3fe62e9ee0c2f2bf,1 +np.float64,0x3fd9d96e5db3b2dc,0x3fed6cd88eb51f6a,1 +np.float64,0x3fe02bf1c56057e4,0x3febfffc24b5a7ba,1 +np.float64,0xbfd6814350ad0286,0x3fee0ab9ad318b84,1 +np.float64,0x3f9fcbec583f97c0,0x3feffc0d0f1d8e75,1 +np.float64,0x3fe23524e5e46a4a,0x3feaf55372949a06,1 +np.float64,0xbfbdc95f6a3b92c0,0x3fefc89c21d44995,1 +np.float64,0x3fe961bb9cf2c378,0x3fe6735d6e1cca58,1 +np.float64,0xbfe8f1c370f1e387,0x3fe6c29d1be8bee9,1 +np.float64,0x3fd880d43ab101a8,0x3fedaee3c7ccfc96,1 +np.float64,0xbfedb37005fb66e0,0x3fe32d91ef2e3bd3,1 +np.float64,0xfdce287bfb9c5,0x3ff0000000000000,1 +np.float64,0x9aa1b9e735437,0x3ff0000000000000,1 +np.float64,0x6beac6e0d7d59,0x3ff0000000000000,1 +np.float64,0x47457aae8e8b0,0x3ff0000000000000,1 +np.float64,0x35ff13b46bfe3,0x3ff0000000000000,1 +np.float64,0xb9c0c82b73819,0x3ff0000000000000,1 +np.float64,0x1a8dc21a351b9,0x3ff0000000000000,1 +np.float64,0x7e87ef6afd0ff,0x3ff0000000000000,1 +np.float64,0x620a6588c414d,0x3ff0000000000000,1 +np.float64,0x7f366000fe6e,0x3ff0000000000000,1 +np.float64,0x787e39f4f0fc8,0x3ff0000000000000,1 +np.float64,0xf5134f1fea26a,0x3ff0000000000000,1 +np.float64,0xbce700ef79ce0,0x3ff0000000000000,1 +np.float64,0x144d7cc8289b1,0x3ff0000000000000,1 +np.float64,0xb9fbc5b973f79,0x3ff0000000000000,1 +np.float64,0xc3d6292d87ac5,0x3ff0000000000000,1 +np.float64,0xc1084e618210a,0x3ff0000000000000,1 +np.float64,0xb6b9eca56d73e,0x3ff0000000000000,1 +np.float64,0xc7ac4b858f58a,0x3ff0000000000000,1 +np.float64,0x516d75d2a2daf,0x3ff0000000000000,1 +np.float64,0x9dc089d93b811,0x3ff0000000000000,1 +np.float64,0x7b5f2840f6be6,0x3ff0000000000000,1 +np.float64,0x121d3ce8243a9,0x3ff0000000000000,1 +np.float64,0xf0be0337e17c1,0x3ff0000000000000,1 +np.float64,0xff58a5cbfeb15,0x3ff0000000000000,1 +np.float64,0xdaf1d07fb5e3a,0x3ff0000000000000,1 +np.float64,0x61d95382c3b2b,0x3ff0000000000000,1 +np.float64,0xe4df943fc9bf3,0x3ff0000000000000,1 +np.float64,0xf72ac2bdee559,0x3ff0000000000000,1 +np.float64,0x12dafbf625b60,0x3ff0000000000000,1 +np.float64,0xee11d427dc23b,0x3ff0000000000000,1 +np.float64,0xf4f8eb37e9f1e,0x3ff0000000000000,1 +np.float64,0xad7cb5df5af97,0x3ff0000000000000,1 +np.float64,0x59fc9b06b3f94,0x3ff0000000000000,1 +np.float64,0x3c3e65e4787ce,0x3ff0000000000000,1 +np.float64,0xe37bc993c6f79,0x3ff0000000000000,1 +np.float64,0x13bd6330277ad,0x3ff0000000000000,1 +np.float64,0x56cc2800ad986,0x3ff0000000000000,1 +np.float64,0x6203b8fcc4078,0x3ff0000000000000,1 +np.float64,0x75c7c8b8eb8fa,0x3ff0000000000000,1 +np.float64,0x5ebf8e00bd7f2,0x3ff0000000000000,1 +np.float64,0xda81f2f1b503f,0x3ff0000000000000,1 +np.float64,0x6adb17d6d5b64,0x3ff0000000000000,1 +np.float64,0x1ba68eee374d3,0x3ff0000000000000,1 +np.float64,0xeecf6fbbdd9ee,0x3ff0000000000000,1 +np.float64,0x24d6dd8e49add,0x3ff0000000000000,1 +np.float64,0xdf7cb81bbef97,0x3ff0000000000000,1 +np.float64,0xafd7be1b5faf8,0x3ff0000000000000,1 +np.float64,0xdb90ca35b721a,0x3ff0000000000000,1 +np.float64,0xa72903a14e521,0x3ff0000000000000,1 +np.float64,0x14533ee028a7,0x3ff0000000000000,1 +np.float64,0x7951540cf2a2b,0x3ff0000000000000,1 +np.float64,0x22882be045106,0x3ff0000000000000,1 +np.float64,0x136270d626c4f,0x3ff0000000000000,1 +np.float64,0x6a0f5744d41ec,0x3ff0000000000000,1 +np.float64,0x21e0d1aa43c1b,0x3ff0000000000000,1 +np.float64,0xee544155dca88,0x3ff0000000000000,1 +np.float64,0xcbe8aac797d16,0x3ff0000000000000,1 +np.float64,0x6c065e80d80e,0x3ff0000000000000,1 +np.float64,0xe57f0411cafe1,0x3ff0000000000000,1 +np.float64,0xdec3a6bdbd875,0x3ff0000000000000,1 +np.float64,0xf4d23a0fe9a48,0x3ff0000000000000,1 +np.float64,0xda77ef47b4efe,0x3ff0000000000000,1 +np.float64,0x8c405c9b1880c,0x3ff0000000000000,1 +np.float64,0x4eced5149d9db,0x3ff0000000000000,1 +np.float64,0x16b6552c2d6cc,0x3ff0000000000000,1 +np.float64,0x6fbc262cdf785,0x3ff0000000000000,1 +np.float64,0x628c3844c5188,0x3ff0000000000000,1 +np.float64,0x6d827d2cdb050,0x3ff0000000000000,1 +np.float64,0xd1bfdf29a37fc,0x3ff0000000000000,1 +np.float64,0xd85400fdb0a80,0x3ff0000000000000,1 +np.float64,0xcc420b2d98842,0x3ff0000000000000,1 +np.float64,0xac41d21b5883b,0x3ff0000000000000,1 +np.float64,0x432f18d4865e4,0x3ff0000000000000,1 +np.float64,0xe7e89a1bcfd14,0x3ff0000000000000,1 +np.float64,0x9b1141d536228,0x3ff0000000000000,1 +np.float64,0x6805f662d00bf,0x3ff0000000000000,1 +np.float64,0xc76552358ecab,0x3ff0000000000000,1 +np.float64,0x4ae8ffee95d21,0x3ff0000000000000,1 +np.float64,0x4396c096872d9,0x3ff0000000000000,1 +np.float64,0x6e8e55d4dd1cb,0x3ff0000000000000,1 +np.float64,0x4c2e33dc985c7,0x3ff0000000000000,1 +np.float64,0xbce814a579d03,0x3ff0000000000000,1 +np.float64,0x911681b5222d0,0x3ff0000000000000,1 +np.float64,0x5f90a4b2bf215,0x3ff0000000000000,1 +np.float64,0x26f76be84deee,0x3ff0000000000000,1 +np.float64,0xb2f7536165eeb,0x3ff0000000000000,1 +np.float64,0x4de4e6089bc9d,0x3ff0000000000000,1 +np.float64,0xf2e016afe5c03,0x3ff0000000000000,1 +np.float64,0xb9b7b949736f7,0x3ff0000000000000,1 +np.float64,0x3363ea1866c7e,0x3ff0000000000000,1 +np.float64,0xd1a3bd6ba3478,0x3ff0000000000000,1 +np.float64,0xae89f3595d13f,0x3ff0000000000000,1 +np.float64,0xddbd9601bb7c,0x3ff0000000000000,1 +np.float64,0x5de41a06bbc84,0x3ff0000000000000,1 +np.float64,0xfd58c86dfab19,0x3ff0000000000000,1 +np.float64,0x24922e8c49247,0x3ff0000000000000,1 +np.float64,0xcda040339b408,0x3ff0000000000000,1 +np.float64,0x5fe500b2bfca1,0x3ff0000000000000,1 +np.float64,0x9214abb924296,0x3ff0000000000000,1 +np.float64,0x800609fe0a2c13fd,0x3ff0000000000000,1 +np.float64,0x800c7c6fe518f8e0,0x3ff0000000000000,1 +np.float64,0x800a1a9491b4352a,0x3ff0000000000000,1 +np.float64,0x800b45e0e8968bc2,0x3ff0000000000000,1 +np.float64,0x8008497e57d092fd,0x3ff0000000000000,1 +np.float64,0x800b9c0af0173816,0x3ff0000000000000,1 +np.float64,0x800194cccb43299a,0x3ff0000000000000,1 +np.float64,0x8001c91ef183923f,0x3ff0000000000000,1 +np.float64,0x800f25b5ccde4b6c,0x3ff0000000000000,1 +np.float64,0x800ce63ccc79cc7a,0x3ff0000000000000,1 +np.float64,0x800d8fb2e83b1f66,0x3ff0000000000000,1 +np.float64,0x80083cd06f7079a1,0x3ff0000000000000,1 +np.float64,0x800823598e9046b3,0x3ff0000000000000,1 +np.float64,0x8001c1319de38264,0x3ff0000000000000,1 +np.float64,0x800f2b68543e56d1,0x3ff0000000000000,1 +np.float64,0x80022a4f4364549f,0x3ff0000000000000,1 +np.float64,0x800f51badf7ea376,0x3ff0000000000000,1 +np.float64,0x8003fbf31e27f7e7,0x3ff0000000000000,1 +np.float64,0x800d4c00e2fa9802,0x3ff0000000000000,1 +np.float64,0x800023b974804774,0x3ff0000000000000,1 +np.float64,0x800860778990c0ef,0x3ff0000000000000,1 +np.float64,0x800a15c241542b85,0x3ff0000000000000,1 +np.float64,0x8003097d9dc612fc,0x3ff0000000000000,1 +np.float64,0x800d77d8541aefb1,0x3ff0000000000000,1 +np.float64,0x80093804ab52700a,0x3ff0000000000000,1 +np.float64,0x800d2b3bfd7a5678,0x3ff0000000000000,1 +np.float64,0x800da24bcd5b4498,0x3ff0000000000000,1 +np.float64,0x8006eee1c28dddc4,0x3ff0000000000000,1 +np.float64,0x80005137fa40a271,0x3ff0000000000000,1 +np.float64,0x8007a3fbc22f47f8,0x3ff0000000000000,1 +np.float64,0x800dcd97071b9b2e,0x3ff0000000000000,1 +np.float64,0x80065b36048cb66d,0x3ff0000000000000,1 +np.float64,0x8004206ba72840d8,0x3ff0000000000000,1 +np.float64,0x8007e82b98cfd058,0x3ff0000000000000,1 +np.float64,0x8001a116ed23422f,0x3ff0000000000000,1 +np.float64,0x800c69e9ff18d3d4,0x3ff0000000000000,1 +np.float64,0x8003843688e7086e,0x3ff0000000000000,1 +np.float64,0x800335e3b8866bc8,0x3ff0000000000000,1 +np.float64,0x800e3308f0bc6612,0x3ff0000000000000,1 +np.float64,0x8002a9ec55c553d9,0x3ff0000000000000,1 +np.float64,0x80001c2084e03842,0x3ff0000000000000,1 +np.float64,0x800bc2bbd8d78578,0x3ff0000000000000,1 +np.float64,0x800ae6bcc555cd7a,0x3ff0000000000000,1 +np.float64,0x80083f7a13907ef5,0x3ff0000000000000,1 +np.float64,0x800d83ed76db07db,0x3ff0000000000000,1 +np.float64,0x800a12251974244b,0x3ff0000000000000,1 +np.float64,0x800a69c95714d393,0x3ff0000000000000,1 +np.float64,0x800cd5a85639ab51,0x3ff0000000000000,1 +np.float64,0x800e0e1837bc1c31,0x3ff0000000000000,1 +np.float64,0x8007b5ca39ef6b95,0x3ff0000000000000,1 +np.float64,0x800cf961cad9f2c4,0x3ff0000000000000,1 +np.float64,0x80066e8fc14cdd20,0x3ff0000000000000,1 +np.float64,0x8001cb8c7b43971a,0x3ff0000000000000,1 +np.float64,0x800002df68a005c0,0x3ff0000000000000,1 +np.float64,0x8003e6681567ccd1,0x3ff0000000000000,1 +np.float64,0x800b039126b60723,0x3ff0000000000000,1 +np.float64,0x800d2e1b663a5c37,0x3ff0000000000000,1 +np.float64,0x800188b3e2a31169,0x3ff0000000000000,1 +np.float64,0x8001f272e943e4e7,0x3ff0000000000000,1 +np.float64,0x800d7f53607afea7,0x3ff0000000000000,1 +np.float64,0x80092cafa4f25960,0x3ff0000000000000,1 +np.float64,0x800fc009f07f8014,0x3ff0000000000000,1 +np.float64,0x8003da896507b514,0x3ff0000000000000,1 +np.float64,0x800d4d1b4c3a9a37,0x3ff0000000000000,1 +np.float64,0x8007a835894f506c,0x3ff0000000000000,1 +np.float64,0x80057ba0522af741,0x3ff0000000000000,1 +np.float64,0x8009b7054b336e0b,0x3ff0000000000000,1 +np.float64,0x800b2c6c125658d9,0x3ff0000000000000,1 +np.float64,0x8008b1840ad16308,0x3ff0000000000000,1 +np.float64,0x8007ea0e3befd41d,0x3ff0000000000000,1 +np.float64,0x800dd658683bacb1,0x3ff0000000000000,1 +np.float64,0x8008cda48fd19b49,0x3ff0000000000000,1 +np.float64,0x8003acca14c75995,0x3ff0000000000000,1 +np.float64,0x8008bd152d717a2b,0x3ff0000000000000,1 +np.float64,0x80010d1ea3621a3e,0x3ff0000000000000,1 +np.float64,0x800130b78b826170,0x3ff0000000000000,1 +np.float64,0x8002cf3a46e59e75,0x3ff0000000000000,1 +np.float64,0x800b76e7fa76edd0,0x3ff0000000000000,1 +np.float64,0x800e065fe1dc0cc0,0x3ff0000000000000,1 +np.float64,0x8000dd527ea1baa6,0x3ff0000000000000,1 +np.float64,0x80032cb234665965,0x3ff0000000000000,1 +np.float64,0x800affc1acb5ff84,0x3ff0000000000000,1 +np.float64,0x80074be23fee97c5,0x3ff0000000000000,1 +np.float64,0x8004f83eafc9f07e,0x3ff0000000000000,1 +np.float64,0x800b02a115560543,0x3ff0000000000000,1 +np.float64,0x800b324a55766495,0x3ff0000000000000,1 +np.float64,0x800ffbcfd69ff7a0,0x3ff0000000000000,1 +np.float64,0x800830bc7b906179,0x3ff0000000000000,1 +np.float64,0x800cbafe383975fd,0x3ff0000000000000,1 +np.float64,0x8001ee42bfe3dc86,0x3ff0000000000000,1 +np.float64,0x8005b00fdc0b6020,0x3ff0000000000000,1 +np.float64,0x8005e7addd0bcf5c,0x3ff0000000000000,1 +np.float64,0x8001ae4cb0635c9a,0x3ff0000000000000,1 +np.float64,0x80098a9941131533,0x3ff0000000000000,1 +np.float64,0x800334c929466993,0x3ff0000000000000,1 +np.float64,0x8009568239d2ad05,0x3ff0000000000000,1 +np.float64,0x800f0639935e0c73,0x3ff0000000000000,1 +np.float64,0x800cebce7499d79d,0x3ff0000000000000,1 +np.float64,0x800482ee4c2905dd,0x3ff0000000000000,1 +np.float64,0x8007b7bd9e2f6f7c,0x3ff0000000000000,1 +np.float64,0x3fe654469f2ca88d,0x3fe8853f6c01ffb3,1 +np.float64,0x3feb4d7297369ae5,0x3fe50ad5bb621408,1 +np.float64,0x3feef53ba43dea77,0x3fe2283f356f8658,1 +np.float64,0x3fddf564eabbeaca,0x3fec8ec0e0dead9c,1 +np.float64,0x3fd3a69078274d21,0x3fee80e05c320000,1 +np.float64,0x3fecdafe5d39b5fd,0x3fe3d91a5d440fd9,1 +np.float64,0x3fd93286bc32650d,0x3fed8d40696cd10e,1 +np.float64,0x3fc0d34eb821a69d,0x3fefb954023d4284,1 +np.float64,0x3fc7b4b9a02f6973,0x3fef73e8739787ce,1 +np.float64,0x3fe08c839a611907,0x3febd0bc6f5641cd,1 +np.float64,0x3fb3d1758627a2eb,0x3fefe776f6183f96,1 +np.float64,0x3fef93c9ff3f2794,0x3fe1a4d2f622627d,1 +np.float64,0x3fea8d0041351a01,0x3fe59a52a1c78c9e,1 +np.float64,0x3fe3e26a30e7c4d4,0x3fea04ad3e0bbf8d,1 +np.float64,0x3fe5a34c9f6b4699,0x3fe8f57c5ccd1eab,1 +np.float64,0x3fc21ef859243df1,0x3fefae0b68a3a2e7,1 +np.float64,0x3fed7dd585fafbab,0x3fe35860041e5b0d,1 +np.float64,0x3fe5abacf22b575a,0x3fe8f03d8b6ef0f2,1 +np.float64,0x3fe426451f284c8a,0x3fe9dcf21f13205b,1 +np.float64,0x3fc01f6456203ec9,0x3fefbf19e2a8e522,1 +np.float64,0x3fe1cf2772239e4f,0x3feb2bbd645c7697,1 +np.float64,0x3fd18c4ace231896,0x3feecdfdd086c110,1 +np.float64,0x3fe8387d5b7070fb,0x3fe74358f2ec4910,1 +np.float64,0x3fdce51c2239ca38,0x3feccb2ae5459632,1 +np.float64,0x3fe5b0f2e4eb61e6,0x3fe8ecef4dbe4277,1 +np.float64,0x3fe1ceeb08a39dd6,0x3feb2bdd4dcfb3df,1 +np.float64,0x3febc5899d778b13,0x3fe4afc8dd8ad228,1 +np.float64,0x3fe7a47fbe2f48ff,0x3fe7a7fd9b352ea5,1 +np.float64,0x3fe7f74e1fafee9c,0x3fe76feb2755b247,1 +np.float64,0x3fe2bfad04e57f5a,0x3feaa9b46adddaeb,1 +np.float64,0x3fd06a090320d412,0x3feef40c334f8fba,1 +np.float64,0x3fdc97297d392e53,0x3fecdc16a3e22fcb,1 +np.float64,0x3fdc1a3f3838347e,0x3fecf6db2769d404,1 +np.float64,0x3fcca90096395201,0x3fef338156fcd218,1 +np.float64,0x3fed464733fa8c8e,0x3fe38483f0465d91,1 +np.float64,0x3fe7e067d82fc0d0,0x3fe77f7c8c9de896,1 +np.float64,0x3fc014fa0b2029f4,0x3fefbf6d84c933f8,1 +np.float64,0x3fd3bf1524277e2a,0x3fee7d2997b74dec,1 +np.float64,0x3fec153b86782a77,0x3fe472bb5497bb2a,1 +np.float64,0x3fd3e4d9d5a7c9b4,0x3fee776842691902,1 +np.float64,0x3fea6c0e2c74d81c,0x3fe5b2954cb458d9,1 +np.float64,0x3fee8f6a373d1ed4,0x3fe27bb9e348125b,1 +np.float64,0x3fd30c6dd42618dc,0x3fee97d2cab2b0bc,1 +np.float64,0x3fe4f90e6d69f21d,0x3fe95ea3dd4007f2,1 +np.float64,0x3fe271d467e4e3a9,0x3fead470d6d4008b,1 +np.float64,0x3fef2983897e5307,0x3fe1fd1a4debe33b,1 +np.float64,0x3fe980cc83b30199,0x3fe65d2fb8a0eb46,1 +np.float64,0x3fdfdf53db3fbea8,0x3fec1cf95b2a1cc7,1 +np.float64,0x3fe4d5307ba9aa61,0x3fe974701b4156cb,1 +np.float64,0x3fdb4e2345b69c47,0x3fed21aa6c146512,1 +np.float64,0x3fe3f7830327ef06,0x3fe9f85f6c88c2a8,1 +np.float64,0x3fca915fb63522bf,0x3fef502b73a52ecf,1 +np.float64,0x3fe66d3709ecda6e,0x3fe87531d7372d7a,1 +np.float64,0x3fd86000bcb0c001,0x3fedb5018dd684ca,1 +np.float64,0x3fe516e5feea2dcc,0x3fe94c68b111404e,1 +np.float64,0x3fd83c53dd3078a8,0x3fedbb9e5dd9e165,1 +np.float64,0x3fedfeeb673bfdd7,0x3fe2f0f0253c5d5d,1 +np.float64,0x3fe0dc6f9c21b8df,0x3feba8e2452410c2,1 +np.float64,0x3fbe154d643c2a9b,0x3fefc780a9357457,1 +np.float64,0x3fe5f63986abec73,0x3fe8c1434951a40a,1 +np.float64,0x3fbce0e50839c1ca,0x3fefcbeeaa27de75,1 +np.float64,0x3fd7ef5c5c2fdeb9,0x3fedc9c3022495b3,1 +np.float64,0x3fc1073914220e72,0x3fefb79de80fc0fd,1 +np.float64,0x3fe1a93c3d235278,0x3feb3fb21f86ac67,1 +np.float64,0x3fe321ee53e643dd,0x3fea72e2999f1e22,1 +np.float64,0x3fa881578c3102af,0x3feff69e6e51e0d6,1 +np.float64,0x3fd313482a262690,0x3fee96d161199495,1 +np.float64,0x3fe7272cd6ae4e5a,0x3fe7fbacbd0d8f43,1 +np.float64,0x3fd6cf4015ad9e80,0x3fedfd3513d544b8,1 +np.float64,0x3fc67b7e6d2cf6fd,0x3fef81f5c16923a4,1 +np.float64,0x3fa1999c14233338,0x3feffb2913a14184,1 +np.float64,0x3fc74eb8dd2e9d72,0x3fef78909a138e3c,1 +np.float64,0x3fc0b9274921724f,0x3fefba2ebd5f3e1c,1 +np.float64,0x3fd53fa156aa7f43,0x3fee40a18e952e88,1 +np.float64,0x3feaccbca4b59979,0x3fe56b22b33eb713,1 +np.float64,0x3fe6a01e3a2d403c,0x3fe8543fbd820ecc,1 +np.float64,0x3fd392a869a72551,0x3fee83e0ffe0e8de,1 +np.float64,0x3fe44d8928689b12,0x3fe9c5bf3c8fffdb,1 +np.float64,0x3fca3f209f347e41,0x3fef5461b6fa0924,1 +np.float64,0x3fee9e84b07d3d09,0x3fe26f638f733549,1 +np.float64,0x3faf49acb03e9359,0x3feff0b583cd8c48,1 +np.float64,0x3fea874b2af50e96,0x3fe59e882fa6febf,1 +np.float64,0x3fc50b72772a16e5,0x3fef918777dc41be,1 +np.float64,0x3fe861d1d4f0c3a4,0x3fe726e44d9d42c2,1 +np.float64,0x3fcadd2e2535ba5c,0x3fef4c3e2b56da38,1 +np.float64,0x3fea59c29cb4b385,0x3fe5c0043e586439,1 +np.float64,0x3fc1ffef0d23ffde,0x3fefaf22be452d13,1 +np.float64,0x3fc2d8dbc125b1b8,0x3fefa75b646d8e4e,1 +np.float64,0x3fd66c6471acd8c9,0x3fee0e5038b895c0,1 +np.float64,0x3fd0854adfa10a96,0x3feef0945bcc5c99,1 +np.float64,0x3feaac7076f558e1,0x3fe58316c23a82ad,1 +np.float64,0x3fdda49db3bb493b,0x3feca0e347c0ad6f,1 +np.float64,0x3fe43a539de874a7,0x3fe9d11d722d4822,1 +np.float64,0x3feeee3ebbfddc7d,0x3fe22dffd251e9af,1 +np.float64,0x3f8ee2c5b03dc58b,0x3fefff11855a7b6c,1 +np.float64,0x3fcd7107c63ae210,0x3fef2840bb55ca52,1 +np.float64,0x3f8d950d203b2a1a,0x3fefff253a08e40e,1 +np.float64,0x3fd40a5e57a814bd,0x3fee71a633c761fc,1 +np.float64,0x3fee836ec83d06de,0x3fe28580975be2fd,1 +np.float64,0x3fd7bbe87f2f77d1,0x3fedd31f661890cc,1 +np.float64,0xbfe05bf138a0b7e2,0x3febe8a000d96e47,1 +np.float64,0xbf88bddd90317bc0,0x3fefff66f6e2ff26,1 +np.float64,0xbfdc9cbb12393976,0x3fecdae2982335db,1 +np.float64,0xbfd85b4eccb0b69e,0x3fedb5e0dd87f702,1 +np.float64,0xbfe5c326cb2b864e,0x3fe8e180f525fa12,1 +np.float64,0xbfe381a0e4a70342,0x3fea3c8e5e3ab78e,1 +np.float64,0xbfe58d892c2b1b12,0x3fe9031551617aed,1 +np.float64,0xbfd7f3a52cafe74a,0x3fedc8fa97edd080,1 +np.float64,0xbfef3417bc7e682f,0x3fe1f45989f6a009,1 +np.float64,0xbfddfb8208bbf704,0x3fec8d5fa9970773,1 +np.float64,0xbfdab69bcc356d38,0x3fed40b2f6c347c6,1 +np.float64,0xbfed3f7cf17a7efa,0x3fe389e4ff4d9235,1 +np.float64,0xbfe47675d9a8ecec,0x3fe9ad6829a69e94,1 +np.float64,0xbfd030e2902061c6,0x3feefb3f811e024f,1 +np.float64,0xbfc376ac7226ed58,0x3fefa1798712b37e,1 +np.float64,0xbfdb7e54a0b6fcaa,0x3fed17a974c4bc28,1 +np.float64,0xbfdb7d5d5736faba,0x3fed17dcf31a8d84,1 +np.float64,0xbf876bd6502ed7c0,0x3fefff76dce6232c,1 +np.float64,0xbfd211e6c02423ce,0x3feebba41f0a1764,1 +np.float64,0xbfb443e3962887c8,0x3fefe658953629d4,1 +np.float64,0xbfe81b09e9b03614,0x3fe757882e4fdbae,1 +np.float64,0xbfdcb905d2b9720c,0x3fecd4c22cfe84e5,1 +np.float64,0xbfe3b62d99276c5b,0x3fea1e5520b3098d,1 +np.float64,0xbfbf05b25c3e0b68,0x3fefc3ecc04bca8e,1 +np.float64,0xbfdedc885b3db910,0x3fec59e22feb49f3,1 +np.float64,0xbfe33aa282667545,0x3fea64f2d55ec471,1 +np.float64,0xbfec84745a3908e9,0x3fe41cb3214e7044,1 +np.float64,0xbfddefdff1bbdfc0,0x3fec8fff88d4d0ec,1 +np.float64,0xbfd26ae6aca4d5ce,0x3feeaf208c7fedf6,1 +np.float64,0xbfee010591fc020b,0x3fe2ef3e57211a5e,1 +np.float64,0xbfb8cfddca319fb8,0x3fefd98d8f7918ed,1 +np.float64,0xbfe991648f3322c9,0x3fe6514e54670bae,1 +np.float64,0xbfee63fd087cc7fa,0x3fe29f1bfa3297cc,1 +np.float64,0xbfe1685942a2d0b2,0x3feb617f5f839eee,1 +np.float64,0xbfc6fc2fd62df860,0x3fef7c4698fd58cf,1 +np.float64,0xbfe42723d3a84e48,0x3fe9dc6ef7243e90,1 +np.float64,0xbfc3a7e89d274fd0,0x3fef9f99e3314e77,1 +np.float64,0xbfeb4c9521f6992a,0x3fe50b7c919bc6d8,1 +np.float64,0xbf707b34e020f680,0x3fefffef05e30264,1 +np.float64,0xbfc078478e20f090,0x3fefbc479305d5aa,1 +np.float64,0xbfd494ac4ca92958,0x3fee5c11f1cd8269,1 +np.float64,0xbfdaf888a035f112,0x3fed3346ae600469,1 +np.float64,0xbfa5d8ed502bb1e0,0x3feff88b0f262609,1 +np.float64,0xbfeec0cbfffd8198,0x3fe253543b2371cb,1 +np.float64,0xbfe594b5986b296b,0x3fe8fe9b39fb3940,1 +np.float64,0xbfc8ece7c631d9d0,0x3fef652bd0611ac7,1 +np.float64,0xbfd8ffeca0b1ffda,0x3fed96ebdf9b65cb,1 +np.float64,0xbfba9b221e353648,0x3fefd3cc21e2f15c,1 +np.float64,0xbfca63a52c34c74c,0x3fef52848eb9ed3b,1 +np.float64,0xbfe588e9b06b11d4,0x3fe905f7403e8881,1 +np.float64,0xbfc76f82db2edf04,0x3fef77138fe9bbc2,1 +np.float64,0xbfeeb3f334bd67e6,0x3fe25ddadb1096d6,1 +np.float64,0xbfbf2b64ce3e56c8,0x3fefc35a9555f6df,1 +np.float64,0xbfe9920e4ff3241c,0x3fe650d4ab8f5c42,1 +np.float64,0xbfb4a54c02294a98,0x3fefe55fc85ae5e9,1 +np.float64,0xbfe353b0c766a762,0x3fea56c02d17e4b7,1 +np.float64,0xbfd99961a4b332c4,0x3fed795fcd00dbf9,1 +np.float64,0xbfef191ddabe323c,0x3fe20aa79524f636,1 +np.float64,0xbfb25d060224ba10,0x3fefeaeee5cc8c0b,1 +np.float64,0xbfe6022428ec0448,0x3fe8b9b46e776194,1 +np.float64,0xbfed1a236cba3447,0x3fe3a76bee0d9861,1 +np.float64,0xbfc59671e72b2ce4,0x3fef8bc4daef6f14,1 +np.float64,0xbfdf2711703e4e22,0x3fec4886a8c9ceb5,1 +np.float64,0xbfeb7e207536fc41,0x3fe4e610c783f168,1 +np.float64,0xbfe6cdf5bcad9bec,0x3fe8365f8a59bc81,1 +np.float64,0xbfe55294adaaa52a,0x3fe927b0af5ccd09,1 +np.float64,0xbfdf4a88913e9512,0x3fec4036df58ba74,1 +np.float64,0xbfebb7efe4376fe0,0x3fe4ba276006992d,1 +np.float64,0xbfe09f29cfa13e54,0x3febc77f4f9c95e7,1 +np.float64,0xbfdf8c75653f18ea,0x3fec30ac924e4f46,1 +np.float64,0xbfefd601c7ffac04,0x3fe16d6f21bcb9c1,1 +np.float64,0xbfeae97ff5f5d300,0x3fe555bb5b87efe9,1 +np.float64,0xbfed427f02fa84fe,0x3fe387830db093bc,1 +np.float64,0xbfa33909cc267210,0x3feffa3a1bcb50dd,1 +np.float64,0xbfe9aa4bf5f35498,0x3fe63f6e98f6aa0f,1 +np.float64,0xbfe2d7349b25ae69,0x3fea9caa7c331e7e,1 +np.float64,0xbfcdbb2a3a3b7654,0x3fef2401c9659e4b,1 +np.float64,0xbfc8a90919315214,0x3fef686fe7fc0513,1 +np.float64,0xbfe62a98df2c5532,0x3fe89ff22a02cc6b,1 +np.float64,0xbfdc0f67b3b81ed0,0x3fecf928b637798f,1 +np.float64,0xbfebb32bf6f76658,0x3fe4bdc893c09698,1 +np.float64,0xbfec067996380cf3,0x3fe47e132741db97,1 +np.float64,0xbfd9774e1d32ee9c,0x3fed7ffe1e87c434,1 +np.float64,0xbfef989890bf3131,0x3fe1a0d025c80cf4,1 +np.float64,0xbfe59887e62b3110,0x3fe8fc382a3d4197,1 +np.float64,0xbfdea0a11e3d4142,0x3fec67b987e236ec,1 +np.float64,0xbfe2ec495825d892,0x3fea90efb231602d,1 +np.float64,0xbfb329c5c2265388,0x3fefe90f1b8209c3,1 +np.float64,0xbfdcd2dcd339a5ba,0x3feccf24c60b1478,1 +np.float64,0xbfe537ea18aa6fd4,0x3fe938237e217fe0,1 +np.float64,0xbfe8675ce170ceba,0x3fe723105925ce3a,1 +np.float64,0xbfd70723acae0e48,0x3fedf369ac070e65,1 +np.float64,0xbfea9d8692b53b0d,0x3fe58e1ee42e3fdb,1 +np.float64,0xbfcfeb96653fd72c,0x3fef029770033bdc,1 +np.float64,0xbfcc06c92d380d94,0x3fef3c69797d9b0a,1 +np.float64,0xbfe16b7c4f62d6f8,0x3feb5fdf9f0a9a07,1 +np.float64,0xbfed4d7a473a9af4,0x3fe37ecee27b1eb7,1 +np.float64,0xbfe6a6f6942d4ded,0x3fe84fccdf762b19,1 +np.float64,0xbfda46d867348db0,0x3fed572d928fa657,1 +np.float64,0xbfdbd9482db7b290,0x3fed049b5f907b52,1 +np.float64,0x7fe992ceb933259c,0xbfeb15af92aad70e,1 +np.float64,0x7fe3069204a60d23,0xbfe5eeff454240e9,1 +np.float64,0x7fe729dbf32e53b7,0xbfefe0528a330e4c,1 +np.float64,0x7fec504fb638a09e,0x3fd288e95dbedf65,1 +np.float64,0x7fe1d30167a3a602,0xbfeffc41f946fd02,1 +np.float64,0x7fed7f8ffd3aff1f,0x3fefe68ec604a19d,1 +np.float64,0x7fd2f23635a5e46b,0x3fea63032efbb447,1 +np.float64,0x7fd4c86db1a990da,0x3fdf6b9f7888db5d,1 +np.float64,0x7fe7554db6eeaa9a,0x3fe1b41476861bb0,1 +np.float64,0x7fe34e823ba69d03,0x3fefc435532e6294,1 +np.float64,0x7fec5c82fef8b905,0x3fef8f0c6473034f,1 +np.float64,0x7feba221bff74442,0xbfea95b81eb19b47,1 +np.float64,0x7fe74808a5ae9010,0xbfd3aa322917c3e5,1 +np.float64,0x7fdf41b7e0be836f,0x3fd14283c7147282,1 +np.float64,0x7fec09892f381311,0x3fe5240376ae484b,1 +np.float64,0x7faaf80bf435f017,0x3fe20227fa811423,1 +np.float64,0x7f8422d8402845b0,0x3fe911714593b8a0,1 +np.float64,0x7fd23a7fada474fe,0x3feff9f40aa37e9c,1 +np.float64,0x7fef4a4806fe948f,0x3fec6eca89cb4a62,1 +np.float64,0x7fe1e71cf763ce39,0xbfea6ac63f9ba457,1 +np.float64,0x7fe3e555be27caaa,0xbfe75b305d0dbbfd,1 +np.float64,0x7fcb8bac96371758,0xbfe8b126077f9d4c,1 +np.float64,0x7fc98e2c84331c58,0x3fef9092eb0bc85a,1 +np.float64,0x7fe947cf2b728f9d,0xbfebfff2c5b7d198,1 +np.float64,0x7feee8058c3dd00a,0xbfef21ebaae2eb17,1 +np.float64,0x7fef61d8d5bec3b1,0xbfdf1a032fb1c864,1 +np.float64,0x7fcf714b6f3ee296,0x3fe6fc89a8084098,1 +np.float64,0x7fa9a8b44c335168,0xbfeb16c149cea943,1 +np.float64,0x7fd175c482a2eb88,0xbfef64d341e73f88,1 +np.float64,0x7feab8e6a87571cc,0x3feb10069c397464,1 +np.float64,0x7fe3ade72de75bcd,0x3fd1753e333d5790,1 +np.float64,0x7fb26d87d224db0f,0xbfe753d36b18f4ca,1 +np.float64,0x7fdb7ef159b6fde2,0x3fe5c0a6044d3607,1 +np.float64,0x7fd5af86422b5f0c,0x3fe77193c95f6484,1 +np.float64,0x7fee9e00b07d3c00,0x3fe864d494596845,1 +np.float64,0x7fef927a147f24f3,0xbfe673b14715693d,1 +np.float64,0x7fd0aea63c215d4b,0xbfeff435f119fce9,1 +np.float64,0x7fd02e3796a05c6e,0x3fe4f7e3706e9a3d,1 +np.float64,0x7fd3ed61da27dac3,0xbfefef2f057f168c,1 +np.float64,0x7fefaca0d4ff5941,0x3fd3e8ad205cd4ab,1 +np.float64,0x7feb659e06f6cb3b,0x3fd64d803203e027,1 +np.float64,0x7fc94ccfaf32999e,0x3fee04922209369a,1 +np.float64,0x7feb4ec294f69d84,0xbfd102763a056c89,1 +np.float64,0x7fe2ada6ac655b4c,0x3fef4f6792aa6093,1 +np.float64,0x7fe5f40fdc2be81f,0xbfb4a6327186eee8,1 +np.float64,0x7fe7584bc3eeb097,0xbfd685b8ff94651d,1 +np.float64,0x7fe45d276be8ba4e,0x3fee53b13f7e442f,1 +np.float64,0x7fe6449b3d6c8935,0xbfe7e08bafa75251,1 +np.float64,0x7f8d62e6b03ac5cc,0x3fe73d30762f38fd,1 +np.float64,0x7fe3a76f72a74ede,0xbfeb48a28bc60968,1 +np.float64,0x7fd057706920aee0,0x3fdece8fa06f626c,1 +np.float64,0x7fe45ae158e8b5c2,0x3fe7a70f47b4d349,1 +np.float64,0x7fea8a5a983514b4,0x3fefb053d5f9ddd7,1 +np.float64,0x7fdd1e86ab3a3d0c,0x3fe3cded1b93816b,1 +np.float64,0x7fdb456108b68ac1,0xbfe37574c0b9bf8f,1 +np.float64,0x7fe972602432e4bf,0x3fef9a26e65ec01c,1 +np.float64,0x7fdbe2385637c470,0x3fed541df57969e1,1 +np.float64,0x7fe57f03602afe06,0x3fbd90f595cbbd94,1 +np.float64,0x7feb0ceb68f619d6,0xbfeae9cb8ee5261f,1 +np.float64,0x7fe6abfe6c6d57fc,0xbfef40a6edaca26f,1 +np.float64,0x7fe037ea08606fd3,0xbfda817d75858597,1 +np.float64,0x7fdd75a52dbaeb49,0x3feef2a0d91d6aa1,1 +np.float64,0x7fe8f9af66b1f35e,0xbfedfceef2a3bfc9,1 +np.float64,0x7fedf762b53beec4,0x3fd8b4f21ef69ee3,1 +np.float64,0x7fe99295b7f3252a,0x3feffc24d970383e,1 +np.float64,0x7fe797b0172f2f5f,0x3fee089aa56f7ce8,1 +np.float64,0x7fed89dcc97b13b9,0xbfcfa2bb0c3ea41f,1 +np.float64,0x7fae9e8d5c3d3d1a,0xbfe512ffe16c6b08,1 +np.float64,0x7fefaecbe27f5d97,0x3fbfc718a5e972f1,1 +np.float64,0x7fce0236d93c046d,0xbfa9b7cd790db256,1 +np.float64,0x7fa9689aac32d134,0x3feced501946628a,1 +np.float64,0x7feb1469e93628d3,0x3fef2a988e7673ed,1 +np.float64,0x7fdba78344b74f06,0xbfe092e78965b30c,1 +np.float64,0x7fece54c3fb9ca97,0x3fd3cfd184bed2e6,1 +np.float64,0x7fdb84212b370841,0xbfe25ebf2db6ee55,1 +np.float64,0x7fbe3e8bf23c7d17,0x3fe2ee72df573345,1 +np.float64,0x7fe43d9803687b2f,0xbfed2eff6a9e66a0,1 +np.float64,0x7fb0f9c00a21f37f,0x3feff70f3276fdb7,1 +np.float64,0x7fea0c6cbbb418d8,0xbfefa612494798b2,1 +np.float64,0x7fe4b3239e296646,0xbfe74dd959af8cdc,1 +np.float64,0x7fe5c6a773eb8d4e,0xbfd06944048f8d2b,1 +np.float64,0x7fb1c1278223824e,0xbfeb533a34655bde,1 +np.float64,0x7fd21c09ee243813,0xbfe921ccbc9255c3,1 +np.float64,0x7fe051020c20a203,0x3fbd519d700c1f2f,1 +np.float64,0x7fe0c76845e18ed0,0x3fefb9595191a31b,1 +np.float64,0x7fe6b0b57b6d616a,0xbf8c59a8ba5fcd9a,1 +np.float64,0x7fd386c460270d88,0x3fe8ffea5d1a5c46,1 +np.float64,0x7feeb884713d7108,0x3fee9b2247ef6c0d,1 +np.float64,0x7fd85f71b6b0bee2,0xbfefc30ec3e28f07,1 +np.float64,0x7fc341366426826c,0x3fd4234d35386d3b,1 +np.float64,0x7fe56482dd6ac905,0x3fe7189de6a50668,1 +np.float64,0x7fec67a2e3f8cf45,0xbfef86d0b940f37f,1 +np.float64,0x7fe38b202fe7163f,0x3feb90b75caa2030,1 +np.float64,0x7fdcbc64883978c8,0x3fed4f758fbf64d4,1 +np.float64,0x7fea5f0598f4be0a,0x3fdd503a417b3d4d,1 +np.float64,0x7fda3b6bcf3476d7,0x3fea6e9af3f7f9f5,1 +np.float64,0x7fc7d7896c2faf12,0x3fda2bebc36a2363,1 +np.float64,0x7fe7e8e2626fd1c4,0xbfe7d5e390c4cc3f,1 +np.float64,0x7fde0f3d7abc1e7a,0xbfede7a0ecfa3606,1 +np.float64,0x7fc692b8f52d2571,0x3feff0cd7ab6f61b,1 +np.float64,0xff92d1fce825a400,0xbfc921c36fc014fa,1 +np.float64,0xffdec3af2fbd875e,0xbfed6a77e6a0364e,1 +np.float64,0xffef46e7d9be8dcf,0xbfed7d39476f7e27,1 +np.float64,0xffe2c2ce4525859c,0x3fe1757261316bc9,1 +np.float64,0xffe27c8b5864f916,0xbfefe017c0d43457,1 +np.float64,0xffe184d7442309ae,0x3fa1fb8c49dba596,1 +np.float64,0xffddf5f98d3bebf4,0x3fee4f8eaa5f847e,1 +np.float64,0xffee3ef354fc7de6,0xbfebfd60fa51b2ba,1 +np.float64,0xffdecb3e85bd967e,0x3fbfad2667a8b468,1 +np.float64,0xffe4ee900b29dd20,0xbfdc02dc626f91cd,1 +np.float64,0xffd3179f6da62f3e,0xbfe2cfe442511776,1 +np.float64,0xffe99ef7cef33def,0x3f50994542a7f303,1 +np.float64,0xffe2b66b1ae56cd6,0xbfefe3e066eb6329,1 +np.float64,0xff8f72aff03ee540,0x3fe9c46224cf5003,1 +np.float64,0xffd29beb85a537d8,0x3fefcb0b6166be71,1 +np.float64,0xffaef02d4c3de060,0xbfef5fb71028fc72,1 +np.float64,0xffd39a2a89273456,0x3fe6d4b183205dca,1 +np.float64,0xffef8a9392ff1526,0x3fedb99fbf402468,1 +np.float64,0xffb9b3f31e3367e8,0x3fee1005270fcf80,1 +np.float64,0xffed9d5c693b3ab8,0x3fd110f4b02365d5,1 +np.float64,0xffeaba45f9f5748b,0x3fe499e0a6f4afb2,1 +np.float64,0xffdba3f70d3747ee,0xbfca0c30493ae519,1 +np.float64,0xffa35b985426b730,0xbfdb625df56bcf45,1 +np.float64,0xffccbc9728397930,0x3fc53cbc59020704,1 +np.float64,0xffef73c942bee792,0xbfdc647a7a5e08be,1 +np.float64,0xffcb5acfb236b5a0,0x3feeb4ec038c39fc,1 +np.float64,0xffea116fe2b422df,0x3fefe03b6ae0b435,1 +np.float64,0xffe97de6e7b2fbcd,0xbfd2025698fab9eb,1 +np.float64,0xffdddba314bbb746,0x3fd31f0fdb8f93be,1 +np.float64,0xffd613a24a2c2744,0xbfebbb1efae884b3,1 +np.float64,0xffe3d938aa67b271,0xbfc2099cead3d3be,1 +np.float64,0xffdf08c2e33e1186,0xbfefd236839b900d,1 +np.float64,0xffea6ba8bd34d751,0x3fe8dfc032114719,1 +np.float64,0xffe3202083e64040,0x3fed513b81432a22,1 +np.float64,0xffb2397db62472f8,0xbfee7d7fe1c3f76c,1 +np.float64,0xffd9d0682ab3a0d0,0x3fe0bcf9e531ad79,1 +np.float64,0xffc293df202527c0,0xbfe58d0bdece5e64,1 +np.float64,0xffe1422c7da28458,0xbf81bd72595f2341,1 +np.float64,0xffd64e4ed4ac9c9e,0x3fa4334cc011c703,1 +np.float64,0xffe40a970ae8152e,0x3fead3d258b55b7d,1 +np.float64,0xffc8c2f2223185e4,0xbfef685f07c8b9fd,1 +np.float64,0xffe4b2f7216965ee,0x3fe3861d3d896a83,1 +np.float64,0xffdb531db3b6a63c,0x3fe18cb8332dd59d,1 +np.float64,0xffe8e727a3b1ce4e,0xbfe57b15abb677b9,1 +np.float64,0xffe530c1e12a6184,0xbfb973ea5535e48f,1 +np.float64,0xffe6f7849cedef08,0x3fd39a37ec5af4b6,1 +np.float64,0xffead62a78b5ac54,0x3fe69b3f6c7aa24b,1 +np.float64,0xffeefdd725fdfbad,0xbfc08a456111fdd5,1 +np.float64,0xffe682182fed0430,0x3fecc7c1292761d2,1 +np.float64,0xffee0ca8dcbc1951,0x3fef6cc361ef2c19,1 +np.float64,0xffec9b338f393666,0x3fefa9ab8e0471b5,1 +np.float64,0xffe13c5e29a278bc,0xbfef8da74ad83398,1 +np.float64,0xffd7bd48c62f7a92,0x3fe3468cd4ac9d34,1 +np.float64,0xffedd0ed14bba1d9,0xbfd563a83477077b,1 +np.float64,0xffe86b83f3f0d707,0x3fe9eb3c658e4b2d,1 +np.float64,0xffd6a4db4bad49b6,0xbfc7e11276166e17,1 +np.float64,0xffc29e8404253d08,0x3fd35971961c789f,1 +np.float64,0xffe27cf3d664f9e7,0xbfeca0f73c72f810,1 +np.float64,0xffc34152352682a4,0x3fef384e564c002c,1 +np.float64,0xffe395728ba72ae4,0x3f8fe18c2de86eba,1 +np.float64,0xffed86c4fbbb0d89,0x3fef709db881c672,1 +np.float64,0xffe8a98d37f1531a,0x3fd4879c8f73c3dc,1 +np.float64,0xffb8ce9fea319d40,0xbfb853c8fe46b08d,1 +np.float64,0xffe7f26db8efe4db,0xbfec1cfd3e5c2ac1,1 +np.float64,0xffd7935b77af26b6,0x3fb7368c89b2a460,1 +np.float64,0xffc5840ed02b081c,0x3fd92220b56631f3,1 +np.float64,0xffc36a873926d510,0x3fa84d61baf61811,1 +np.float64,0xffe06ea583e0dd4a,0x3feb647e348b9e39,1 +np.float64,0xffe6a33031ed4660,0xbfe096b851dc1a0a,1 +np.float64,0xffe001c938e00392,0x3fe4eece77623e7a,1 +np.float64,0xffc1e4f23b23c9e4,0xbfdb9bb1f83f6ac4,1 +np.float64,0xffecd3ecbab9a7d9,0x3fbafb1f800f177d,1 +np.float64,0xffc2d3016825a604,0xbfef650e8b0d6afb,1 +np.float64,0xffe222cb68e44596,0x3fde3690e44de5bd,1 +np.float64,0xffe5bb145e2b7628,0x3fedbb98e23c9dc1,1 +np.float64,0xffe9e5823b73cb04,0xbfee41661016c03c,1 +np.float64,0xffd234a00ba46940,0x3fda0312cda580c2,1 +np.float64,0xffe0913ed6e1227d,0xbfed508bb529bd23,1 +np.float64,0xffe8e3596171c6b2,0xbfdc33e1c1d0310e,1 +np.float64,0xffef9c6835ff38cf,0x3fea8ce6d27dfba3,1 +np.float64,0xffdd3bcf66ba779e,0x3fe50523d2b6470e,1 +np.float64,0xffe57e8cf06afd1a,0xbfee600933347247,1 +np.float64,0xffe0d8c65fa1b18c,0x3fe75091f93d5e4c,1 +np.float64,0xffea7c8c16b4f918,0x3fee681724795198,1 +np.float64,0xffe34f7a05269ef4,0xbfe3c3e179676f13,1 +np.float64,0xffd28894a6a5112a,0xbfe5d1027aee615d,1 +np.float64,0xffc73be6f22e77cc,0x3fe469bbc08b472a,1 +np.float64,0xffe7f71b066fee36,0x3fe7ed136c8fdfaa,1 +np.float64,0xffebc13e29f7827c,0x3fefcdc6e677d314,1 +np.float64,0xffd53e9c942a7d3a,0x3fea5a02c7341749,1 +np.float64,0xffd7191b23ae3236,0x3fea419b66023443,1 +np.float64,0xffe9480325b29006,0xbfefeaff5fa38cd5,1 +np.float64,0xffba46dc0e348db8,0xbfefa54f4de28eba,1 +np.float64,0xffdd4cc31eba9986,0x3fe60bb41fe1c4da,1 +np.float64,0xffe13a70dea274e1,0xbfaa9192f7bd6c9b,1 +np.float64,0xffde25127bbc4a24,0x3f7c75f45e29be7d,1 +np.float64,0xffe4076543a80eca,0x3fea5aad50d2f687,1 +np.float64,0xffe61512acec2a25,0xbfefffeb67401649,1 +np.float64,0xffef812ec1ff025d,0xbfe919c7c073c766,1 +np.float64,0xffd5552aeaaaaa56,0x3fc89d38ab047396,1 diff --git a/python/numpy/_core/tests/data/umath-validation-set-cosh.csv b/python/numpy/_core/tests/data/umath-validation-set-cosh.csv new file mode 100644 index 000000000..af14d8475 --- /dev/null +++ b/python/numpy/_core/tests/data/umath-validation-set-cosh.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xfe0ac238,0x7f800000,3 +np.float32,0xbf553b86,0x3faf079b,3 +np.float32,0xff4457da,0x7f800000,3 +np.float32,0xff7253f3,0x7f800000,3 +np.float32,0x5a5802,0x3f800000,3 +np.float32,0x3db03413,0x3f80795b,3 +np.float32,0x7f6795c9,0x7f800000,3 +np.float32,0x805b9142,0x3f800000,3 +np.float32,0xfeea581a,0x7f800000,3 +np.float32,0x3f7e2dba,0x3fc472f6,3 +np.float32,0x3d9c4d74,0x3f805f7a,3 +np.float32,0x7f18c665,0x7f800000,3 +np.float32,0x7f003e23,0x7f800000,3 +np.float32,0x3d936fa0,0x3f8054f3,3 +np.float32,0x3f32034f,0x3fa0368e,3 +np.float32,0xff087604,0x7f800000,3 +np.float32,0x380a5,0x3f800000,3 +np.float32,0x3f59694e,0x3fb10077,3 +np.float32,0x3e63e648,0x3f832ee4,3 +np.float32,0x80712f42,0x3f800000,3 +np.float32,0x3e169908,0x3f816302,3 +np.float32,0x3f2d766e,0x3f9e8692,3 +np.float32,0x3d6412e0,0x3f8032d0,3 +np.float32,0xbde689e8,0x3f80cfd4,3 +np.float32,0x483e2e,0x3f800000,3 +np.float32,0xff1ba2d0,0x7f800000,3 +np.float32,0x80136bff,0x3f800000,3 +np.float32,0x3f72534c,0x3fbdc1d4,3 +np.float32,0x3e9eb381,0x3f8632c6,3 +np.float32,0x3e142892,0x3f815795,3 +np.float32,0x0,0x3f800000,3 +np.float32,0x2f2528,0x3f800000,3 +np.float32,0x7f38be13,0x7f800000,3 +np.float32,0xfeee6896,0x7f800000,3 +np.float32,0x7f09095d,0x7f800000,3 +np.float32,0xbe94d,0x3f800000,3 +np.float32,0xbedcf8d4,0x3f8c1b74,3 +np.float32,0xbf694c02,0x3fb8ef07,3 +np.float32,0x3e2261f8,0x3f819cde,3 +np.float32,0xbf01d3ce,0x3f90d0e0,3 +np.float32,0xbeb7b3a2,0x3f8853de,3 +np.float32,0x8046de7b,0x3f800000,3 +np.float32,0xbcb45ea0,0x3f8007f1,3 +np.float32,0x3eef14af,0x3f8e35dd,3 +np.float32,0xbf047316,0x3f91846e,3 +np.float32,0x801cef45,0x3f800000,3 +np.float32,0x3e9ad891,0x3f85e609,3 +np.float32,0xff20e9cf,0x7f800000,3 +np.float32,0x80068434,0x3f800000,3 +np.float32,0xbe253020,0x3f81ab49,3 +np.float32,0x3f13f4b8,0x3f95fac9,3 +np.float32,0x804accd1,0x3f800000,3 +np.float32,0x3dee3e10,0x3f80ddf7,3 +np.float32,0xbe6c4690,0x3f836c29,3 +np.float32,0xff30d431,0x7f800000,3 +np.float32,0xbec82416,0x3f89e791,3 +np.float32,0x3f30bbcb,0x3f9fbbcc,3 +np.float32,0x3f5620a2,0x3faf72b8,3 +np.float32,0x807a8130,0x3f800000,3 +np.float32,0x3e3cb02d,0x3f822de0,3 +np.float32,0xff4839ac,0x7f800000,3 +np.float32,0x800a3e9c,0x3f800000,3 +np.float32,0x3dffd65b,0x3f810002,3 +np.float32,0xbf2b1492,0x3f9da987,3 +np.float32,0xbf21602c,0x3f9a48fe,3 +np.float32,0x512531,0x3f800000,3 +np.float32,0x24b99a,0x3f800000,3 +np.float32,0xbf53e345,0x3fae67b1,3 +np.float32,0xff2126ec,0x7f800000,3 +np.float32,0x7e79b49d,0x7f800000,3 +np.float32,0x3ea3cf04,0x3f869b6f,3 +np.float32,0x7f270059,0x7f800000,3 +np.float32,0x3f625b2f,0x3fb561e1,3 +np.float32,0xbf59947e,0x3fb11519,3 +np.float32,0xfe0d1c64,0x7f800000,3 +np.float32,0xbf3f3eae,0x3fa568e2,3 +np.float32,0x7c04d1,0x3f800000,3 +np.float32,0x7e66bd,0x3f800000,3 +np.float32,0x8011880d,0x3f800000,3 +np.float32,0x3f302f07,0x3f9f8759,3 +np.float32,0x4e3375,0x3f800000,3 +np.float32,0xfe67a134,0x7f800000,3 +np.float32,0xff670249,0x7f800000,3 +np.float32,0x7e19f27d,0x7f800000,3 +np.float32,0xbf36ce12,0x3fa20b81,3 +np.float32,0xbe6bcfc4,0x3f8368b5,3 +np.float32,0x76fcba,0x3f800000,3 +np.float32,0x7f30abaf,0x7f800000,3 +np.float32,0x3f4c1f6d,0x3faae43c,3 +np.float32,0x7f61f44a,0x7f800000,3 +np.float32,0xbf4bb3c9,0x3faab4af,3 +np.float32,0xbda15ee0,0x3f8065c6,3 +np.float32,0xfbb4e800,0x7f800000,3 +np.float32,0x7fa00000,0x7fe00000,3 +np.float32,0x80568501,0x3f800000,3 +np.float32,0xfeb285e4,0x7f800000,3 +np.float32,0x804423a7,0x3f800000,3 +np.float32,0x7e6c0f21,0x7f800000,3 +np.float32,0x7f136b3c,0x7f800000,3 +np.float32,0x3f2d08e6,0x3f9e5e9c,3 +np.float32,0xbf6b454e,0x3fb9f7e6,3 +np.float32,0x3e6bceb0,0x3f8368ad,3 +np.float32,0xff1ad16a,0x7f800000,3 +np.float32,0x7cce1a04,0x7f800000,3 +np.float32,0xff7bcf95,0x7f800000,3 +np.float32,0x8049788d,0x3f800000,3 +np.float32,0x7ec45918,0x7f800000,3 +np.float32,0xff7fffff,0x7f800000,3 +np.float32,0x8039a1a0,0x3f800000,3 +np.float32,0x7e90cd72,0x7f800000,3 +np.float32,0xbf7dfd53,0x3fc456cc,3 +np.float32,0x3eeeb664,0x3f8e2a76,3 +np.float32,0x8055ef9b,0x3f800000,3 +np.float32,0x7ee06ddd,0x7f800000,3 +np.float32,0xba2cc000,0x3f800002,3 +np.float32,0x806da632,0x3f800000,3 +np.float32,0x7ecfaaf5,0x7f800000,3 +np.float32,0x3ddd12e6,0x3f80bf19,3 +np.float32,0xbf754394,0x3fbf60b1,3 +np.float32,0x6f3f19,0x3f800000,3 +np.float32,0x800a9af0,0x3f800000,3 +np.float32,0xfeef13ea,0x7f800000,3 +np.float32,0x7f74841f,0x7f800000,3 +np.float32,0xbeb9a2f0,0x3f888181,3 +np.float32,0x77cbb,0x3f800000,3 +np.float32,0xbf587f84,0x3fb0911b,3 +np.float32,0x210ba5,0x3f800000,3 +np.float32,0x3ee60a28,0x3f8d2367,3 +np.float32,0xbe3731ac,0x3f820dc7,3 +np.float32,0xbee8cfee,0x3f8d765e,3 +np.float32,0x7b2ef179,0x7f800000,3 +np.float32,0xfe81377c,0x7f800000,3 +np.float32,0x6ac98c,0x3f800000,3 +np.float32,0x3f51f144,0x3fad8288,3 +np.float32,0x80785750,0x3f800000,3 +np.float32,0x3f46615a,0x3fa864ff,3 +np.float32,0xbf35ac9e,0x3fa19b8e,3 +np.float32,0x7f0982ac,0x7f800000,3 +np.float32,0x1b2610,0x3f800000,3 +np.float32,0x3ed8bb25,0x3f8ba3df,3 +np.float32,0xbeb41bac,0x3f88006d,3 +np.float32,0xff48e89d,0x7f800000,3 +np.float32,0x3ed0ab8c,0x3f8ac755,3 +np.float32,0xbe64671c,0x3f833282,3 +np.float32,0x64bce4,0x3f800000,3 +np.float32,0x284f79,0x3f800000,3 +np.float32,0x7e09faa7,0x7f800000,3 +np.float32,0x4376c1,0x3f800000,3 +np.float32,0x805ca8c0,0x3f800000,3 +np.float32,0xff0859d5,0x7f800000,3 +np.float32,0xbed2f3b2,0x3f8b04dd,3 +np.float32,0x8045bd0c,0x3f800000,3 +np.float32,0x3f0e6216,0x3f94503f,3 +np.float32,0x3f41e3ae,0x3fa68035,3 +np.float32,0x80088ccc,0x3f800000,3 +np.float32,0x3f37fc19,0x3fa2812f,3 +np.float32,0x71c87d,0x3f800000,3 +np.float32,0x8024f4b2,0x3f800000,3 +np.float32,0xff78dd88,0x7f800000,3 +np.float32,0xbda66c90,0x3f806c40,3 +np.float32,0x7f33ef0d,0x7f800000,3 +np.float32,0x46a343,0x3f800000,3 +np.float32,0xff1dce38,0x7f800000,3 +np.float32,0x1b935d,0x3f800000,3 +np.float32,0x3ebec598,0x3f88fd0e,3 +np.float32,0xff115530,0x7f800000,3 +np.float32,0x803916aa,0x3f800000,3 +np.float32,0xff60a3e2,0x7f800000,3 +np.float32,0x3b8ddd48,0x3f80004f,3 +np.float32,0x3f761b6e,0x3fbfd8ea,3 +np.float32,0xbdf55b88,0x3f80eb70,3 +np.float32,0x37374,0x3f800000,3 +np.float32,0x3de150e0,0x3f80c682,3 +np.float32,0x3f343278,0x3fa10a83,3 +np.float32,0xbe9baefa,0x3f85f68b,3 +np.float32,0x3d8d43,0x3f800000,3 +np.float32,0x3e80994b,0x3f840f0c,3 +np.float32,0xbe573c6c,0x3f82d685,3 +np.float32,0x805b83b4,0x3f800000,3 +np.float32,0x683d88,0x3f800000,3 +np.float32,0x692465,0x3f800000,3 +np.float32,0xbdc345f8,0x3f809511,3 +np.float32,0x3f7c1c5a,0x3fc3406f,3 +np.float32,0xbf40bef3,0x3fa606df,3 +np.float32,0xff1e25b9,0x7f800000,3 +np.float32,0x3e4481e0,0x3f825d37,3 +np.float32,0x75d188,0x3f800000,3 +np.float32,0x3ea53cec,0x3f86b956,3 +np.float32,0xff105a54,0x7f800000,3 +np.float32,0x7f800000,0x7f800000,3 +np.float32,0x7f11f0b0,0x7f800000,3 +np.float32,0xbf58a57d,0x3fb0a328,3 +np.float32,0xbdd11e38,0x3f80aaf8,3 +np.float32,0xbea94adc,0x3f870fa0,3 +np.float32,0x3e9dd780,0x3f862180,3 +np.float32,0xff1786b9,0x7f800000,3 +np.float32,0xfec46aa2,0x7f800000,3 +np.float32,0x7f4300c1,0x7f800000,3 +np.float32,0x29ba2b,0x3f800000,3 +np.float32,0x3f4112e2,0x3fa62993,3 +np.float32,0xbe6c9224,0x3f836e5d,3 +np.float32,0x7f0e42a3,0x7f800000,3 +np.float32,0xff6390ad,0x7f800000,3 +np.float32,0x3f54e374,0x3faede94,3 +np.float32,0x7f2642a2,0x7f800000,3 +np.float32,0x7f46b2be,0x7f800000,3 +np.float32,0xfe59095c,0x7f800000,3 +np.float32,0x7146a0,0x3f800000,3 +np.float32,0x3f07763d,0x3f925786,3 +np.float32,0x3d172780,0x3f801651,3 +np.float32,0xff66f1c5,0x7f800000,3 +np.float32,0xff025349,0x7f800000,3 +np.float32,0x6ce99d,0x3f800000,3 +np.float32,0xbf7e4f50,0x3fc48685,3 +np.float32,0xbeff8ca2,0x3f904708,3 +np.float32,0x3e6c8,0x3f800000,3 +np.float32,0x7f7153dc,0x7f800000,3 +np.float32,0xbedcf612,0x3f8c1b26,3 +np.float32,0xbbc2f180,0x3f800094,3 +np.float32,0xbf397399,0x3fa314b8,3 +np.float32,0x6c6e35,0x3f800000,3 +np.float32,0x7f50a88b,0x7f800000,3 +np.float32,0xfe84093e,0x7f800000,3 +np.float32,0x3f737b9d,0x3fbe6478,3 +np.float32,0x7f6a5340,0x7f800000,3 +np.float32,0xbde83c20,0x3f80d2e7,3 +np.float32,0xff769ce9,0x7f800000,3 +np.float32,0xfdd33c30,0x7f800000,3 +np.float32,0xbc95cb60,0x3f80057a,3 +np.float32,0x8007a40d,0x3f800000,3 +np.float32,0x3f55d90c,0x3faf5132,3 +np.float32,0x80282082,0x3f800000,3 +np.float32,0xbf43b1f2,0x3fa7418c,3 +np.float32,0x3f1dc7cb,0x3f991731,3 +np.float32,0xbd4346a0,0x3f80253f,3 +np.float32,0xbf5aa82a,0x3fb19946,3 +np.float32,0x3f4b8c22,0x3faaa333,3 +np.float32,0x3d13468c,0x3f80152f,3 +np.float32,0x7db77097,0x7f800000,3 +np.float32,0x4a00df,0x3f800000,3 +np.float32,0xbedea5e0,0x3f8c4b64,3 +np.float32,0x80482543,0x3f800000,3 +np.float32,0xbef344fe,0x3f8eb8dd,3 +np.float32,0x7ebd4044,0x7f800000,3 +np.float32,0xbf512c0e,0x3fad287e,3 +np.float32,0x3db28cce,0x3f807c9c,3 +np.float32,0xbd0f5ae0,0x3f801412,3 +np.float32,0xfe7ed9ac,0x7f800000,3 +np.float32,0x3eb1aa82,0x3f87c8b4,3 +np.float32,0xfef1679e,0x7f800000,3 +np.float32,0xff3629f2,0x7f800000,3 +np.float32,0xff3562b4,0x7f800000,3 +np.float32,0x3dcafe1d,0x3f80a118,3 +np.float32,0xfedf242a,0x7f800000,3 +np.float32,0xbf43102a,0x3fa6fda4,3 +np.float32,0x8028834e,0x3f800000,3 +np.float32,0x805c8513,0x3f800000,3 +np.float32,0x3f59306a,0x3fb0e550,3 +np.float32,0x3eda2c9c,0x3f8bcc4a,3 +np.float32,0x80023524,0x3f800000,3 +np.float32,0x7ef72879,0x7f800000,3 +np.float32,0x661c8a,0x3f800000,3 +np.float32,0xfec3ba6c,0x7f800000,3 +np.float32,0x805aaca6,0x3f800000,3 +np.float32,0xff5c1f13,0x7f800000,3 +np.float32,0x3f6ab3f4,0x3fb9ab6b,3 +np.float32,0x3f014896,0x3f90ac20,3 +np.float32,0x3f030584,0x3f91222a,3 +np.float32,0xbf74853d,0x3fbef71d,3 +np.float32,0xbf534ee0,0x3fae2323,3 +np.float32,0x2c90c3,0x3f800000,3 +np.float32,0x7f62ad25,0x7f800000,3 +np.float32,0x1c8847,0x3f800000,3 +np.float32,0x7e2a8d43,0x7f800000,3 +np.float32,0x807a09cd,0x3f800000,3 +np.float32,0x413871,0x3f800000,3 +np.float32,0x80063692,0x3f800000,3 +np.float32,0x3edaf29b,0x3f8be211,3 +np.float32,0xbf64a7ab,0x3fb68b2d,3 +np.float32,0xfe56a720,0x7f800000,3 +np.float32,0xbf54a8d4,0x3faec350,3 +np.float32,0x3ecbaef7,0x3f8a4350,3 +np.float32,0x3f413714,0x3fa63890,3 +np.float32,0x7d3aa8,0x3f800000,3 +np.float32,0xbea9a13c,0x3f8716e7,3 +np.float32,0x7ef7553e,0x7f800000,3 +np.float32,0x8056f29f,0x3f800000,3 +np.float32,0xff1f7ffe,0x7f800000,3 +np.float32,0x3f41953b,0x3fa65f9c,3 +np.float32,0x3daa2f,0x3f800000,3 +np.float32,0xff0893e4,0x7f800000,3 +np.float32,0xbefc7ec6,0x3f8fe207,3 +np.float32,0xbb026800,0x3f800011,3 +np.float32,0x341e4f,0x3f800000,3 +np.float32,0x3e7b708a,0x3f83e0d1,3 +np.float32,0xa18cb,0x3f800000,3 +np.float32,0x7e290239,0x7f800000,3 +np.float32,0xbf4254f2,0x3fa6af62,3 +np.float32,0x80000000,0x3f800000,3 +np.float32,0x3f0a6c,0x3f800000,3 +np.float32,0xbec44d28,0x3f898609,3 +np.float32,0xf841f,0x3f800000,3 +np.float32,0x7f01a693,0x7f800000,3 +np.float32,0x8053340b,0x3f800000,3 +np.float32,0xfd4e7990,0x7f800000,3 +np.float32,0xbf782f1f,0x3fc10356,3 +np.float32,0xbe962118,0x3f858acc,3 +np.float32,0xfe8cd702,0x7f800000,3 +np.float32,0x7ecd986f,0x7f800000,3 +np.float32,0x3ebe775f,0x3f88f59b,3 +np.float32,0x8065524f,0x3f800000,3 +np.float32,0x3ede7fc4,0x3f8c471e,3 +np.float32,0x7f5e15ea,0x7f800000,3 +np.float32,0xbe871ada,0x3f847b78,3 +np.float32,0x3f21958b,0x3f9a5af7,3 +np.float32,0x3f64d480,0x3fb6a1fa,3 +np.float32,0xff18b0e9,0x7f800000,3 +np.float32,0xbf0840dd,0x3f928fd9,3 +np.float32,0x80104f5d,0x3f800000,3 +np.float32,0x643b94,0x3f800000,3 +np.float32,0xbc560a80,0x3f8002cc,3 +np.float32,0x3f5c75d6,0x3fb2786e,3 +np.float32,0x7f365fc9,0x7f800000,3 +np.float32,0x54e965,0x3f800000,3 +np.float32,0x6dcd4d,0x3f800000,3 +np.float32,0x3f2057a0,0x3f99f04d,3 +np.float32,0x272fa3,0x3f800000,3 +np.float32,0xff423dc9,0x7f800000,3 +np.float32,0x80273463,0x3f800000,3 +np.float32,0xfe21cc78,0x7f800000,3 +np.float32,0x7fc00000,0x7fc00000,3 +np.float32,0x802feb65,0x3f800000,3 +np.float32,0x3dc733d0,0x3f809b21,3 +np.float32,0x65d56b,0x3f800000,3 +np.float32,0x80351d8e,0x3f800000,3 +np.float32,0xbf244247,0x3f9b43dd,3 +np.float32,0x7f328e7e,0x7f800000,3 +np.float32,0x7f4d9712,0x7f800000,3 +np.float32,0x2c505d,0x3f800000,3 +np.float32,0xbf232ebe,0x3f9ae5a0,3 +np.float32,0x804a363a,0x3f800000,3 +np.float32,0x80417102,0x3f800000,3 +np.float32,0xbf48b170,0x3fa963d4,3 +np.float32,0x7ea3e3b6,0x7f800000,3 +np.float32,0xbf41415b,0x3fa63cd2,3 +np.float32,0xfe3af7c8,0x7f800000,3 +np.float32,0x7f478010,0x7f800000,3 +np.float32,0x80143113,0x3f800000,3 +np.float32,0x3f7626a7,0x3fbfdf2e,3 +np.float32,0xfea20b0a,0x7f800000,3 +np.float32,0x80144d64,0x3f800000,3 +np.float32,0x7db9ba47,0x7f800000,3 +np.float32,0x7f7fffff,0x7f800000,3 +np.float32,0xbe410834,0x3f8247ef,3 +np.float32,0x14a7af,0x3f800000,3 +np.float32,0x7eaebf9e,0x7f800000,3 +np.float32,0xff800000,0x7f800000,3 +np.float32,0x3f0a7d8e,0x3f9330fd,3 +np.float32,0x3ef780,0x3f800000,3 +np.float32,0x3f62253e,0x3fb546d1,3 +np.float32,0x3f4cbeac,0x3fab2acc,3 +np.float32,0x25db1,0x3f800000,3 +np.float32,0x65c54a,0x3f800000,3 +np.float32,0x800f0645,0x3f800000,3 +np.float32,0x3ed28c78,0x3f8af9f0,3 +np.float32,0x8040c6ce,0x3f800000,3 +np.float32,0x5e4e9a,0x3f800000,3 +np.float32,0xbd3fd2b0,0x3f8023f1,3 +np.float32,0xbf5d2d3f,0x3fb2d1b6,3 +np.float32,0x7ead999f,0x7f800000,3 +np.float32,0xbf30dc86,0x3f9fc805,3 +np.float32,0xff2b0a62,0x7f800000,3 +np.float32,0x3d5180e9,0x3f802adf,3 +np.float32,0x3f62716f,0x3fb56d0d,3 +np.float32,0x7e82ae9c,0x7f800000,3 +np.float32,0xfe2d4bdc,0x7f800000,3 +np.float32,0x805cc7d4,0x3f800000,3 +np.float32,0xfb50f700,0x7f800000,3 +np.float32,0xff57b684,0x7f800000,3 +np.float32,0x80344f01,0x3f800000,3 +np.float32,0x7f2af372,0x7f800000,3 +np.float32,0xfeab6204,0x7f800000,3 +np.float32,0x30b251,0x3f800000,3 +np.float32,0x3eed8cc4,0x3f8e0698,3 +np.float32,0x7eeb1c6a,0x7f800000,3 +np.float32,0x3f17ece6,0x3f9735b0,3 +np.float32,0x21e985,0x3f800000,3 +np.float32,0x3f3a7df3,0x3fa37e34,3 +np.float32,0x802a14a2,0x3f800000,3 +np.float32,0x807d4d5b,0x3f800000,3 +np.float32,0x7f6093ce,0x7f800000,3 +np.float32,0x3f800000,0x3fc583ab,3 +np.float32,0x3da2c26e,0x3f806789,3 +np.float32,0xfe05f278,0x7f800000,3 +np.float32,0x800000,0x3f800000,3 +np.float32,0xbee63342,0x3f8d282e,3 +np.float32,0xbf225586,0x3f9a9bd4,3 +np.float32,0xbed60e86,0x3f8b59ba,3 +np.float32,0xbec99484,0x3f8a0ca3,3 +np.float32,0x3e967c71,0x3f859199,3 +np.float32,0x7f26ab62,0x7f800000,3 +np.float32,0xca7f4,0x3f800000,3 +np.float32,0xbf543790,0x3fae8ebc,3 +np.float32,0x3e4c1ed9,0x3f828d2d,3 +np.float32,0xbdf37f88,0x3f80e7e1,3 +np.float32,0xff0cc44e,0x7f800000,3 +np.float32,0x5dea48,0x3f800000,3 +np.float32,0x31023c,0x3f800000,3 +np.float32,0x3ea10733,0x3f866208,3 +np.float32,0x3e11e6f2,0x3f814d2e,3 +np.float32,0x80641960,0x3f800000,3 +np.float32,0x3ef779a8,0x3f8f3edb,3 +np.float32,0x3f2a5062,0x3f9d632a,3 +np.float32,0x2b7d34,0x3f800000,3 +np.float32,0x3eeb95c5,0x3f8dca67,3 +np.float32,0x805c1357,0x3f800000,3 +np.float32,0x3db3a79d,0x3f807e29,3 +np.float32,0xfded1900,0x7f800000,3 +np.float32,0x45f362,0x3f800000,3 +np.float32,0x451f38,0x3f800000,3 +np.float32,0x801d3ae5,0x3f800000,3 +np.float32,0x458d45,0x3f800000,3 +np.float32,0xfda9d298,0x7f800000,3 +np.float32,0x467439,0x3f800000,3 +np.float32,0x7f66554a,0x7f800000,3 +np.float32,0xfef2375a,0x7f800000,3 +np.float32,0xbf33fc47,0x3fa0f5d7,3 +np.float32,0x3f75ba69,0x3fbfa2d0,3 +np.float32,0xfeb625b2,0x7f800000,3 +np.float32,0x8066b371,0x3f800000,3 +np.float32,0x3f5cb4e9,0x3fb29718,3 +np.float32,0x7f3b6a58,0x7f800000,3 +np.float32,0x7f6b35ea,0x7f800000,3 +np.float32,0xbf6ee555,0x3fbbe5be,3 +np.float32,0x3d836e21,0x3f804380,3 +np.float32,0xff43cd0c,0x7f800000,3 +np.float32,0xff55c1fa,0x7f800000,3 +np.float32,0xbf0dfccc,0x3f9432a6,3 +np.float32,0x3ed92121,0x3f8baf00,3 +np.float32,0x80068cc1,0x3f800000,3 +np.float32,0xff0103f9,0x7f800000,3 +np.float32,0x7e51b175,0x7f800000,3 +np.float32,0x8012f214,0x3f800000,3 +np.float32,0x62d298,0x3f800000,3 +np.float32,0xbf3e1525,0x3fa4ef8d,3 +np.float32,0x806b4882,0x3f800000,3 +np.float32,0xbf38c146,0x3fa2ce7c,3 +np.float32,0xbed59c30,0x3f8b4d70,3 +np.float32,0x3d1910c0,0x3f8016e2,3 +np.float32,0x7f33d55b,0x7f800000,3 +np.float32,0x7f5800e3,0x7f800000,3 +np.float32,0x5b2c5d,0x3f800000,3 +np.float32,0x807be750,0x3f800000,3 +np.float32,0x7eb297c1,0x7f800000,3 +np.float32,0x7dafee62,0x7f800000,3 +np.float32,0x7d9e23f0,0x7f800000,3 +np.float32,0x3e580537,0x3f82dbd8,3 +np.float32,0xbf800000,0x3fc583ab,3 +np.float32,0x7f40f880,0x7f800000,3 +np.float32,0x775ad3,0x3f800000,3 +np.float32,0xbedacd36,0x3f8bddf3,3 +np.float32,0x2138f6,0x3f800000,3 +np.float32,0x52c3b7,0x3f800000,3 +np.float32,0x8041cfdd,0x3f800000,3 +np.float32,0x7bf16791,0x7f800000,3 +np.float32,0xbe95869c,0x3f857f55,3 +np.float32,0xbf199796,0x3f97bcaf,3 +np.float32,0x3ef8da38,0x3f8f6b45,3 +np.float32,0x803f3648,0x3f800000,3 +np.float32,0x80026fd2,0x3f800000,3 +np.float32,0x7eb3ac26,0x7f800000,3 +np.float32,0x3e49921b,0x3f827ce8,3 +np.float32,0xbf689aed,0x3fb892de,3 +np.float32,0x3f253509,0x3f9b9779,3 +np.float32,0xff17894a,0x7f800000,3 +np.float32,0x3cd12639,0x3f800aae,3 +np.float32,0x1db14b,0x3f800000,3 +np.float32,0x39a0bf,0x3f800000,3 +np.float32,0xfdfe1d08,0x7f800000,3 +np.float32,0xff416cd2,0x7f800000,3 +np.float32,0x8070d818,0x3f800000,3 +np.float32,0x3e516e12,0x3f82afb8,3 +np.float32,0x80536651,0x3f800000,3 +np.float32,0xbf2903d2,0x3f9cecb7,3 +np.float32,0x3e896ae4,0x3f84a353,3 +np.float32,0xbd6ba2c0,0x3f80363d,3 +np.float32,0x80126d3e,0x3f800000,3 +np.float32,0xfd9d43d0,0x7f800000,3 +np.float32,0x7b56b6,0x3f800000,3 +np.float32,0xff04718e,0x7f800000,3 +np.float32,0x31440f,0x3f800000,3 +np.float32,0xbf7a1313,0x3fc215c9,3 +np.float32,0x7f43d6a0,0x7f800000,3 +np.float32,0x3f566503,0x3faf92cc,3 +np.float32,0xbf39eb0e,0x3fa343f1,3 +np.float32,0xbe35fd70,0x3f8206df,3 +np.float32,0x800c36ac,0x3f800000,3 +np.float32,0x60d061,0x3f800000,3 +np.float32,0x80453e12,0x3f800000,3 +np.float32,0xfe17c36c,0x7f800000,3 +np.float32,0x3d8c72,0x3f800000,3 +np.float32,0xfe8e9134,0x7f800000,3 +np.float32,0xff5d89de,0x7f800000,3 +np.float32,0x7f45020e,0x7f800000,3 +np.float32,0x3f28225e,0x3f9c9d01,3 +np.float32,0xbf3b6900,0x3fa3dbdd,3 +np.float32,0x80349023,0x3f800000,3 +np.float32,0xbf14d780,0x3f964042,3 +np.float32,0x3f56b5d2,0x3fafb8c3,3 +np.float32,0x800c639c,0x3f800000,3 +np.float32,0x7f7a19c8,0x7f800000,3 +np.float32,0xbf7a0815,0x3fc20f86,3 +np.float32,0xbec55926,0x3f89a06e,3 +np.float32,0x4b2cd2,0x3f800000,3 +np.float32,0xbf271eb2,0x3f9c41c8,3 +np.float32,0xff26e168,0x7f800000,3 +np.float32,0x800166b2,0x3f800000,3 +np.float32,0xbde97e38,0x3f80d532,3 +np.float32,0xbf1f93ec,0x3f99af1a,3 +np.float32,0x7f2896ed,0x7f800000,3 +np.float32,0x3da7d96d,0x3f806e1d,3 +np.float32,0x802b7237,0x3f800000,3 +np.float32,0xfdca6bc0,0x7f800000,3 +np.float32,0xbed2e300,0x3f8b0318,3 +np.float32,0x8079d9e8,0x3f800000,3 +np.float32,0x3f388c81,0x3fa2b9c2,3 +np.float32,0x3ed2607c,0x3f8af54a,3 +np.float32,0xff287de6,0x7f800000,3 +np.float32,0x3f55ed89,0x3faf5ac9,3 +np.float32,0x7f5b6af7,0x7f800000,3 +np.float32,0xbeb24730,0x3f87d698,3 +np.float32,0x1,0x3f800000,3 +np.float32,0x3f3a2350,0x3fa35a3b,3 +np.float32,0x8013b422,0x3f800000,3 +np.float32,0x3e9a6560,0x3f85dd35,3 +np.float32,0x80510631,0x3f800000,3 +np.float32,0xfeae39d6,0x7f800000,3 +np.float32,0x7eb437ad,0x7f800000,3 +np.float32,0x8047545b,0x3f800000,3 +np.float32,0x806a1c71,0x3f800000,3 +np.float32,0xbe5543f0,0x3f82c93b,3 +np.float32,0x40e8d,0x3f800000,3 +np.float32,0x63d18b,0x3f800000,3 +np.float32,0x1fa1ea,0x3f800000,3 +np.float32,0x801944e0,0x3f800000,3 +np.float32,0xbf4c7ac6,0x3fab0cae,3 +np.float32,0x7f2679d4,0x7f800000,3 +np.float32,0x3f0102fc,0x3f9099d0,3 +np.float32,0x7e44bdc1,0x7f800000,3 +np.float32,0xbf2072f6,0x3f99f970,3 +np.float32,0x5c7d38,0x3f800000,3 +np.float32,0x30a2e6,0x3f800000,3 +np.float32,0x805b9ca3,0x3f800000,3 +np.float32,0x7cc24ad5,0x7f800000,3 +np.float32,0x3f4f7920,0x3fac6357,3 +np.float32,0x111d62,0x3f800000,3 +np.float32,0xbf4de40a,0x3fabad77,3 +np.float32,0x805d0354,0x3f800000,3 +np.float32,0xbb3d2b00,0x3f800023,3 +np.float32,0x3ef229e7,0x3f8e960b,3 +np.float32,0x3f15754e,0x3f9670e0,3 +np.float32,0xbf689c6b,0x3fb893a5,3 +np.float32,0xbf3796c6,0x3fa2599b,3 +np.float32,0xbe95303c,0x3f8578f2,3 +np.float32,0xfee330de,0x7f800000,3 +np.float32,0xff0d9705,0x7f800000,3 +np.float32,0xbeb0ebd0,0x3f87b7dd,3 +np.float32,0xbf4d5a13,0x3fab6fe7,3 +np.float32,0x80142f5a,0x3f800000,3 +np.float32,0x7e01a87b,0x7f800000,3 +np.float32,0xbe45e5ec,0x3f8265d7,3 +np.float32,0x7f4ac255,0x7f800000,3 +np.float32,0x3ebf6a60,0x3f890ccb,3 +np.float32,0x7f771e16,0x7f800000,3 +np.float32,0x3f41834e,0x3fa6582b,3 +np.float32,0x3f7f6f98,0x3fc52ef0,3 +np.float32,0x7e4ad775,0x7f800000,3 +np.float32,0x3eb39991,0x3f87f4c4,3 +np.float32,0x1e3f4,0x3f800000,3 +np.float32,0x7e84ba19,0x7f800000,3 +np.float32,0x80640be4,0x3f800000,3 +np.float32,0x3f459fc8,0x3fa81272,3 +np.float32,0x3f554ed0,0x3faf109b,3 +np.float32,0x3c6617,0x3f800000,3 +np.float32,0x7f441158,0x7f800000,3 +np.float32,0x7f66e6d8,0x7f800000,3 +np.float32,0x7f565152,0x7f800000,3 +np.float32,0x7f16d550,0x7f800000,3 +np.float32,0xbd4f1950,0x3f8029e5,3 +np.float32,0xcf722,0x3f800000,3 +np.float32,0x3f37d6fd,0x3fa272ad,3 +np.float32,0xff7324ea,0x7f800000,3 +np.float32,0x804bc246,0x3f800000,3 +np.float32,0x7f099ef8,0x7f800000,3 +np.float32,0x5f838b,0x3f800000,3 +np.float32,0x80523534,0x3f800000,3 +np.float32,0x3f595e84,0x3fb0fb50,3 +np.float32,0xfdef8ac8,0x7f800000,3 +np.float32,0x3d9a07,0x3f800000,3 +np.float32,0x410f61,0x3f800000,3 +np.float32,0xbf715dbb,0x3fbd3bcb,3 +np.float32,0xbedd4734,0x3f8c242f,3 +np.float32,0x7e86739a,0x7f800000,3 +np.float32,0x3e81f144,0x3f8424fe,3 +np.float32,0x7f6342d1,0x7f800000,3 +np.float32,0xff6919a3,0x7f800000,3 +np.float32,0xff051878,0x7f800000,3 +np.float32,0x800ba28f,0x3f800000,3 +np.float32,0xfefab3d8,0x7f800000,3 +np.float32,0xff612a84,0x7f800000,3 +np.float32,0x800cd5ab,0x3f800000,3 +np.float32,0x802a07ae,0x3f800000,3 +np.float32,0xfef6ee3a,0x7f800000,3 +np.float32,0x8037e896,0x3f800000,3 +np.float32,0x3ef2d86f,0x3f8eab7d,3 +np.float32,0x3eafe53d,0x3f87a0cb,3 +np.float32,0xba591c00,0x3f800003,3 +np.float32,0x3e9ed028,0x3f863508,3 +np.float32,0x4a12a8,0x3f800000,3 +np.float32,0xbee55c84,0x3f8d0f45,3 +np.float32,0x8038a8d3,0x3f800000,3 +np.float32,0xff055243,0x7f800000,3 +np.float32,0xbf659067,0x3fb701ca,3 +np.float32,0xbee36a86,0x3f8cd5e0,3 +np.float32,0x7f1d74c1,0x7f800000,3 +np.float32,0xbf7657df,0x3fbffaad,3 +np.float32,0x7e37ee34,0x7f800000,3 +np.float32,0xff04bc74,0x7f800000,3 +np.float32,0x806d194e,0x3f800000,3 +np.float32,0x7f5596c3,0x7f800000,3 +np.float32,0xbe09d268,0x3f81293e,3 +np.float32,0x79ff75,0x3f800000,3 +np.float32,0xbf55479c,0x3faf0d3e,3 +np.float32,0xbe5428ec,0x3f82c1d4,3 +np.float32,0x3f624134,0x3fb554d7,3 +np.float32,0x2ccb8a,0x3f800000,3 +np.float32,0xfc082040,0x7f800000,3 +np.float32,0xff315467,0x7f800000,3 +np.float32,0x3e6ea2d2,0x3f837dd5,3 +np.float32,0x8020fdd1,0x3f800000,3 +np.float32,0x7f0416a1,0x7f800000,3 +np.float32,0x710a1b,0x3f800000,3 +np.float32,0x3dfcd050,0x3f80f9fc,3 +np.float32,0xfe995e96,0x7f800000,3 +np.float32,0x3f020d00,0x3f90e006,3 +np.float32,0x8064263e,0x3f800000,3 +np.float32,0xfcee4160,0x7f800000,3 +np.float32,0x801b3a18,0x3f800000,3 +np.float32,0x3f62c984,0x3fb59955,3 +np.float32,0x806e8355,0x3f800000,3 +np.float32,0x7e94f65d,0x7f800000,3 +np.float32,0x1173de,0x3f800000,3 +np.float32,0x3e3ff3b7,0x3f824166,3 +np.float32,0x803b4aea,0x3f800000,3 +np.float32,0x804c5bcc,0x3f800000,3 +np.float32,0x509fe5,0x3f800000,3 +np.float32,0xbf33b5ee,0x3fa0db0b,3 +np.float32,0x3f2ac15c,0x3f9d8ba4,3 +np.float32,0x7f2c54f8,0x7f800000,3 +np.float32,0x7f33d933,0x7f800000,3 +np.float32,0xbf09b2b4,0x3f92f795,3 +np.float32,0x805db8d6,0x3f800000,3 +np.float32,0x6d6e66,0x3f800000,3 +np.float32,0x3ddfea92,0x3f80c40c,3 +np.float32,0xfda719b8,0x7f800000,3 +np.float32,0x5d657f,0x3f800000,3 +np.float32,0xbf005ba3,0x3f906df6,3 +np.float32,0xbf45e606,0x3fa8305c,3 +np.float32,0x5e9fd1,0x3f800000,3 +np.float32,0x8079dc45,0x3f800000,3 +np.float32,0x7e9c40e3,0x7f800000,3 +np.float32,0x6bd5f6,0x3f800000,3 +np.float32,0xbea14a0e,0x3f866761,3 +np.float32,0x7e7323f3,0x7f800000,3 +np.float32,0x7f0c0a79,0x7f800000,3 +np.float32,0xbf7d7aeb,0x3fc40b0f,3 +np.float32,0x437588,0x3f800000,3 +np.float32,0xbf356376,0x3fa17f63,3 +np.float32,0x7f129921,0x7f800000,3 +np.float32,0x7f47a52e,0x7f800000,3 +np.float32,0xba8cb400,0x3f800005,3 +np.float32,0x802284e0,0x3f800000,3 +np.float32,0xbe820f56,0x3f8426ec,3 +np.float32,0x7f2ef6cf,0x7f800000,3 +np.float32,0xbf70a090,0x3fbcd501,3 +np.float32,0xbf173fea,0x3f96ff6d,3 +np.float32,0x3e19c489,0x3f817224,3 +np.float32,0x7f429b30,0x7f800000,3 +np.float32,0xbdae4118,0x3f8076af,3 +np.float32,0x3e70ad30,0x3f838d41,3 +np.float32,0x335fed,0x3f800000,3 +np.float32,0xff5359cf,0x7f800000,3 +np.float32,0xbf17e42b,0x3f9732f1,3 +np.float32,0xff3a950b,0x7f800000,3 +np.float32,0xbcca70c0,0x3f800a02,3 +np.float32,0x3f2cda62,0x3f9e4dad,3 +np.float32,0x3f50c185,0x3facf805,3 +np.float32,0x80000001,0x3f800000,3 +np.float32,0x807b86d2,0x3f800000,3 +np.float32,0x8010c2cf,0x3f800000,3 +np.float32,0x3f130fb8,0x3f95b519,3 +np.float32,0x807dc546,0x3f800000,3 +np.float32,0xbee20740,0x3f8cad3f,3 +np.float32,0x80800000,0x3f800000,3 +np.float32,0x3cbd90c0,0x3f8008c6,3 +np.float32,0x3e693488,0x3f835571,3 +np.float32,0xbe70cd44,0x3f838e35,3 +np.float32,0xbe348dc8,0x3f81feb1,3 +np.float32,0x3f31ea90,0x3fa02d3f,3 +np.float32,0xfcd7e180,0x7f800000,3 +np.float32,0xbe30a75c,0x3f81e8d0,3 +np.float32,0x3e552c5a,0x3f82c89d,3 +np.float32,0xff513f74,0x7f800000,3 +np.float32,0xbdb16248,0x3f807afd,3 +np.float64,0x7fbbf954e437f2a9,0x7ff0000000000000,1 +np.float64,0x581bbf0cb0379,0x3ff0000000000000,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0xffb959a2a632b348,0x7ff0000000000000,1 +np.float64,0xbfdbd6baebb7ad76,0x3ff189a5ca25a6e1,1 +np.float64,0xbfd094ec9aa129da,0x3ff08a3f6b918065,1 +np.float64,0x3fe236753f646cea,0x3ff2a982660b8b43,1 +np.float64,0xbfe537fadfaa6ff6,0x3ff3a5f1c49c31bf,1 +np.float64,0xbfe31fa7dc663f50,0x3ff2f175374aef0e,1 +np.float64,0x3fc4b6569f296cb0,0x3ff035bde801bb53,1 +np.float64,0x800ce3c00f99c780,0x3ff0000000000000,1 +np.float64,0xbfebcde33e779bc6,0x3ff66de82cd30fc5,1 +np.float64,0x800dc09d3b7b813b,0x3ff0000000000000,1 +np.float64,0x80067d4c450cfa99,0x3ff0000000000000,1 +np.float64,0x1f6ade203ed7,0x3ff0000000000000,1 +np.float64,0xbfd4e311eca9c624,0x3ff0dc1383d6c3db,1 +np.float64,0x800649b3a54c9368,0x3ff0000000000000,1 +np.float64,0xcc14d1ab9829a,0x3ff0000000000000,1 +np.float64,0x3fc290c5bb25218b,0x3ff02b290f46dd6d,1 +np.float64,0x3fe78eb8376f1d70,0x3ff488f3bc259537,1 +np.float64,0xffc60f58e82c1eb0,0x7ff0000000000000,1 +np.float64,0x3fd35666ad26accd,0x3ff0bc6573da6bcd,1 +np.float64,0x7fc20257a62404ae,0x7ff0000000000000,1 +np.float64,0x80076d842e0edb09,0x3ff0000000000000,1 +np.float64,0x3fd8e44b08b1c898,0x3ff139b9a1f8428e,1 +np.float64,0x7fd6f6fc7a2dedf8,0x7ff0000000000000,1 +np.float64,0x3fa01b9f0820373e,0x3ff00206f8ad0f1b,1 +np.float64,0x69ed190ed3da4,0x3ff0000000000000,1 +np.float64,0xbfd997eb34b32fd6,0x3ff14be65a5db4a0,1 +np.float64,0x7feada2d0935b459,0x7ff0000000000000,1 +np.float64,0xbf80987120213100,0x3ff000226d29a9fc,1 +np.float64,0xbfef203e37fe407c,0x3ff82f51f04e8821,1 +np.float64,0xffe3dcf91fa7b9f2,0x7ff0000000000000,1 +np.float64,0x9a367283346cf,0x3ff0000000000000,1 +np.float64,0x800feb09f7bfd614,0x3ff0000000000000,1 +np.float64,0xbfe0319f9520633f,0x3ff217c5205c403f,1 +np.float64,0xbfa91eabd4323d50,0x3ff004ee4347f627,1 +np.float64,0x3fd19cbf7d23397f,0x3ff09c13e8e43571,1 +np.float64,0xffeb8945f0b7128b,0x7ff0000000000000,1 +np.float64,0x800a0eb4f2141d6a,0x3ff0000000000000,1 +np.float64,0xffe83e7312f07ce6,0x7ff0000000000000,1 +np.float64,0xffca53fee834a7fc,0x7ff0000000000000,1 +np.float64,0x800881cbf1710398,0x3ff0000000000000,1 +np.float64,0x80003e6abbe07cd6,0x3ff0000000000000,1 +np.float64,0xbfef6a998afed533,0x3ff859b7852d1b4d,1 +np.float64,0x3fd4eb7577a9d6eb,0x3ff0dcc601261aab,1 +np.float64,0xbfc9c12811338250,0x3ff05331268b05c8,1 +np.float64,0x7fddf84e5e3bf09c,0x7ff0000000000000,1 +np.float64,0xbfd4d6fbbc29adf8,0x3ff0db12db19d187,1 +np.float64,0x80077892bfaef126,0x3ff0000000000000,1 +np.float64,0xffae9d49543d3a90,0x7ff0000000000000,1 +np.float64,0xbfd8bef219317de4,0x3ff136034e5d2f1b,1 +np.float64,0xffe89c74ddb138e9,0x7ff0000000000000,1 +np.float64,0x8003b6bbb7e76d78,0x3ff0000000000000,1 +np.float64,0x315a4e8462b4b,0x3ff0000000000000,1 +np.float64,0x800ee616edddcc2e,0x3ff0000000000000,1 +np.float64,0xdfb27f97bf650,0x3ff0000000000000,1 +np.float64,0x8004723dc328e47c,0x3ff0000000000000,1 +np.float64,0xbfe529500daa52a0,0x3ff3a0b9b33fc84c,1 +np.float64,0xbfe4e46a7ce9c8d5,0x3ff3886ce0f92612,1 +np.float64,0xbf52003680240000,0x3ff00000a203d61a,1 +np.float64,0xffd3400458268008,0x7ff0000000000000,1 +np.float64,0x80076deb444edbd7,0x3ff0000000000000,1 +np.float64,0xa612f6c14c27,0x3ff0000000000000,1 +np.float64,0xbfd41c74c9a838ea,0x3ff0cbe61e16aecf,1 +np.float64,0x43f464a887e8d,0x3ff0000000000000,1 +np.float64,0x800976e748b2edcf,0x3ff0000000000000,1 +np.float64,0xffc79d6ba12f3ad8,0x7ff0000000000000,1 +np.float64,0xffd6dbcb022db796,0x7ff0000000000000,1 +np.float64,0xffd6a9672a2d52ce,0x7ff0000000000000,1 +np.float64,0x3fe95dcfa632bb9f,0x3ff54bbad2ee919e,1 +np.float64,0x3febadd2e1375ba6,0x3ff65e336c47c018,1 +np.float64,0x7fd47c37d828f86f,0x7ff0000000000000,1 +np.float64,0xbfd4ea59e0a9d4b4,0x3ff0dcae6af3e443,1 +np.float64,0x2c112afc58226,0x3ff0000000000000,1 +np.float64,0x8008122bced02458,0x3ff0000000000000,1 +np.float64,0x7fe7105ab3ee20b4,0x7ff0000000000000,1 +np.float64,0x80089634df312c6a,0x3ff0000000000000,1 +np.float64,0x68e9fbc8d1d40,0x3ff0000000000000,1 +np.float64,0xbfec1e1032f83c20,0x3ff69590b9f18ea8,1 +np.float64,0xbfedf181623be303,0x3ff787ef48935dc6,1 +np.float64,0xffe8600457f0c008,0x7ff0000000000000,1 +np.float64,0x7a841ec6f5084,0x3ff0000000000000,1 +np.float64,0x459a572e8b34c,0x3ff0000000000000,1 +np.float64,0x3fe8a232bef14465,0x3ff4fac1780f731e,1 +np.float64,0x3fcb37597d366eb3,0x3ff05cf08ab14ebd,1 +np.float64,0xbfb0261d00204c38,0x3ff00826fb86ca8a,1 +np.float64,0x3fc6e7a6dd2dcf4e,0x3ff041c1222ffa79,1 +np.float64,0xee65dd03dccbc,0x3ff0000000000000,1 +np.float64,0xffe26fdc23e4dfb8,0x7ff0000000000000,1 +np.float64,0x7fe8d6c8cab1ad91,0x7ff0000000000000,1 +np.float64,0xbfeb64bf2676c97e,0x3ff63abb8607828c,1 +np.float64,0x3fd28417b425082f,0x3ff0ac9eb22a732b,1 +np.float64,0xbfd26835b3a4d06c,0x3ff0aa94c48fb6d2,1 +np.float64,0xffec617a01b8c2f3,0x7ff0000000000000,1 +np.float64,0xe1bfff01c3800,0x3ff0000000000000,1 +np.float64,0x3fd4def913a9bdf4,0x3ff0dbbc7271046f,1 +np.float64,0x94f4c17129e98,0x3ff0000000000000,1 +np.float64,0x8009b2eaa33365d6,0x3ff0000000000000,1 +np.float64,0x3fd9633b41b2c678,0x3ff1468388bdfb65,1 +np.float64,0xffe0ae5c80e15cb8,0x7ff0000000000000,1 +np.float64,0x7fdfc35996bf86b2,0x7ff0000000000000,1 +np.float64,0x3fcfc5bdc23f8b7c,0x3ff07ed5caa4545c,1 +np.float64,0xd48b4907a9169,0x3ff0000000000000,1 +np.float64,0xbfe0a2cc52614598,0x3ff2361665895d95,1 +np.float64,0xbfe9068f90720d1f,0x3ff525b82491a1a5,1 +np.float64,0x4238b9208472,0x3ff0000000000000,1 +np.float64,0x800e6b2bf69cd658,0x3ff0000000000000,1 +np.float64,0x7fb638b6ae2c716c,0x7ff0000000000000,1 +np.float64,0x7fe267641764cec7,0x7ff0000000000000,1 +np.float64,0xffc0933d3521267c,0x7ff0000000000000,1 +np.float64,0x7fddfdfb533bfbf6,0x7ff0000000000000,1 +np.float64,0xced2a8e99da55,0x3ff0000000000000,1 +np.float64,0x2a80d5165501b,0x3ff0000000000000,1 +np.float64,0xbfeead2ab63d5a55,0x3ff7eeb5cbcfdcab,1 +np.float64,0x80097f6f92f2fee0,0x3ff0000000000000,1 +np.float64,0x3fee1f29b77c3e54,0x3ff7a0a58c13df62,1 +np.float64,0x3f9d06b8383a0d70,0x3ff001a54a2d8cf8,1 +np.float64,0xbfc8b41d3f31683c,0x3ff04c85379dd6b0,1 +np.float64,0xffd2a04c1e254098,0x7ff0000000000000,1 +np.float64,0xbfb71c01e02e3800,0x3ff010b34220e838,1 +np.float64,0xbfe69249ef6d2494,0x3ff425e48d1e938b,1 +np.float64,0xffefffffffffffff,0x7ff0000000000000,1 +np.float64,0x3feb1d52fbf63aa6,0x3ff618813ae922d7,1 +np.float64,0x7fb8d1a77e31a34e,0x7ff0000000000000,1 +np.float64,0xffc3cfc4ed279f88,0x7ff0000000000000,1 +np.float64,0x2164b9fc42c98,0x3ff0000000000000,1 +np.float64,0x3fbb868cee370d1a,0x3ff017b31b0d4d27,1 +np.float64,0x3fcd6dea583adbd5,0x3ff06cbd16bf44a0,1 +np.float64,0xbfecd041d479a084,0x3ff6efb25f61012d,1 +np.float64,0xbfb0552e6e20aa60,0x3ff00856ca83834a,1 +np.float64,0xe6293cbfcc528,0x3ff0000000000000,1 +np.float64,0x7fba58394034b072,0x7ff0000000000000,1 +np.float64,0x33bc96d467794,0x3ff0000000000000,1 +np.float64,0xffe90ea86bf21d50,0x7ff0000000000000,1 +np.float64,0xbfc626ea6d2c4dd4,0x3ff03d7e01ec3849,1 +np.float64,0x65b56fe4cb6af,0x3ff0000000000000,1 +np.float64,0x3fea409fb7f4813f,0x3ff5b171deab0ebd,1 +np.float64,0x3fe849c1df709384,0x3ff4d59063ff98c4,1 +np.float64,0x169073082d20f,0x3ff0000000000000,1 +np.float64,0xcc8b6add9916e,0x3ff0000000000000,1 +np.float64,0xbfef3d78d5fe7af2,0x3ff83fecc26abeea,1 +np.float64,0x3fe8c65a4a718cb4,0x3ff50a23bfeac7df,1 +np.float64,0x3fde9fa5c8bd3f4c,0x3ff1ddeb12b9d623,1 +np.float64,0xffe2af536da55ea6,0x7ff0000000000000,1 +np.float64,0x800186d0b0c30da2,0x3ff0000000000000,1 +np.float64,0x3fe9ba3c1d737478,0x3ff574ab2bf3a560,1 +np.float64,0xbfe1489c46a29138,0x3ff2641d36b30e21,1 +np.float64,0xbfe4b6b7c0e96d70,0x3ff37880ac8b0540,1 +np.float64,0x800e66ad82fccd5b,0x3ff0000000000000,1 +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0x7febb0fd477761fa,0x7ff0000000000000,1 +np.float64,0xbfdc433f2eb8867e,0x3ff195ec2a6cce27,1 +np.float64,0x3fe12c5a172258b4,0x3ff25c225b8a34bb,1 +np.float64,0xbfef6f116c3ede23,0x3ff85c47eaed49a0,1 +np.float64,0x800af6f60f35edec,0x3ff0000000000000,1 +np.float64,0xffe567999a2acf32,0x7ff0000000000000,1 +np.float64,0xbfc5ac5ae72b58b4,0x3ff03adb50ec04f3,1 +np.float64,0x3fea1b57e23436b0,0x3ff5a06f98541767,1 +np.float64,0x7fcc3e36fb387c6d,0x7ff0000000000000,1 +np.float64,0x8000c8dc698191ba,0x3ff0000000000000,1 +np.float64,0x3fee5085ed7ca10c,0x3ff7bb92f61245b8,1 +np.float64,0x7fbb9f803a373eff,0x7ff0000000000000,1 +np.float64,0xbfe1e5e806e3cbd0,0x3ff2918f2d773007,1 +np.float64,0x8008f8c3f3b1f188,0x3ff0000000000000,1 +np.float64,0x7fe53df515ea7be9,0x7ff0000000000000,1 +np.float64,0x7fdbb87fb3b770fe,0x7ff0000000000000,1 +np.float64,0x3fefcc0f50ff981f,0x3ff89210a6a04e6b,1 +np.float64,0x3fe33f87d0267f10,0x3ff2fb989ea4f2bc,1 +np.float64,0x1173992022e8,0x3ff0000000000000,1 +np.float64,0x3fef534632bea68c,0x3ff84c5ca9713ff9,1 +np.float64,0x3fc5991d552b3238,0x3ff03a72bfdb6e5f,1 +np.float64,0x3fdad90dc1b5b21c,0x3ff16db868180034,1 +np.float64,0xffe20b8078e41700,0x7ff0000000000000,1 +np.float64,0x7fdf409a82be8134,0x7ff0000000000000,1 +np.float64,0x3fccb7e691396fcd,0x3ff06786b6ccdbcb,1 +np.float64,0xffe416e0b7282dc1,0x7ff0000000000000,1 +np.float64,0xffe3a8a981275152,0x7ff0000000000000,1 +np.float64,0x3fd9c8bd31b3917c,0x3ff150ee6f5f692f,1 +np.float64,0xffeab6fef6356dfd,0x7ff0000000000000,1 +np.float64,0x3fe9c5e3faf38bc8,0x3ff579e18c9bd548,1 +np.float64,0x800b173e44762e7d,0x3ff0000000000000,1 +np.float64,0xffe2719db764e33b,0x7ff0000000000000,1 +np.float64,0x3fd1fcf31223f9e6,0x3ff0a2da7ad99856,1 +np.float64,0x80082c4afcd05896,0x3ff0000000000000,1 +np.float64,0xa56e5e4b4adcc,0x3ff0000000000000,1 +np.float64,0xffbbbddab2377bb8,0x7ff0000000000000,1 +np.float64,0x3b3927c076726,0x3ff0000000000000,1 +np.float64,0x3fec03fd58f807fb,0x3ff6889b8a774728,1 +np.float64,0xbfaa891fb4351240,0x3ff00580987bd914,1 +np.float64,0x7fb4800c4a290018,0x7ff0000000000000,1 +np.float64,0xffbb5d2b6036ba58,0x7ff0000000000000,1 +np.float64,0x7fd6608076acc100,0x7ff0000000000000,1 +np.float64,0x31267e4c624d1,0x3ff0000000000000,1 +np.float64,0x33272266664e5,0x3ff0000000000000,1 +np.float64,0x47bb37f28f768,0x3ff0000000000000,1 +np.float64,0x3fe134bb4ee26977,0x3ff25e7ea647a928,1 +np.float64,0xbfe2b5f42ba56be8,0x3ff2d05cbdc7344b,1 +np.float64,0xbfe0e013fd61c028,0x3ff246dfce572914,1 +np.float64,0x7fecedcda4f9db9a,0x7ff0000000000000,1 +np.float64,0x8001816c2da302d9,0x3ff0000000000000,1 +np.float64,0xffced8b65b3db16c,0x7ff0000000000000,1 +np.float64,0xffdc1d4a0b383a94,0x7ff0000000000000,1 +np.float64,0x7fe94e7339f29ce5,0x7ff0000000000000,1 +np.float64,0x33fb846667f71,0x3ff0000000000000,1 +np.float64,0x800a1380e9542702,0x3ff0000000000000,1 +np.float64,0x800b74eaa776e9d6,0x3ff0000000000000,1 +np.float64,0x5681784aad030,0x3ff0000000000000,1 +np.float64,0xbfee0eb7917c1d6f,0x3ff797b949f7f6b4,1 +np.float64,0xffe4ec5fd2a9d8bf,0x7ff0000000000000,1 +np.float64,0xbfcd7401dd3ae804,0x3ff06cea52c792c0,1 +np.float64,0x800587563beb0ead,0x3ff0000000000000,1 +np.float64,0x3fc15c6f3322b8de,0x3ff025bbd030166d,1 +np.float64,0x7feb6b4caf76d698,0x7ff0000000000000,1 +np.float64,0x7fe136ef82a26dde,0x7ff0000000000000,1 +np.float64,0xf592dac3eb25c,0x3ff0000000000000,1 +np.float64,0x7fd300baf6a60175,0x7ff0000000000000,1 +np.float64,0x7fc880de9e3101bc,0x7ff0000000000000,1 +np.float64,0x7fe7a1aa5caf4354,0x7ff0000000000000,1 +np.float64,0x2f9b8e0e5f373,0x3ff0000000000000,1 +np.float64,0xffcc9071993920e4,0x7ff0000000000000,1 +np.float64,0x8009e151b313c2a4,0x3ff0000000000000,1 +np.float64,0xbfd46e2d18a8dc5a,0x3ff0d27a7b37c1ae,1 +np.float64,0x3fe65c7961acb8f3,0x3ff4116946062a4c,1 +np.float64,0x7fd31b371626366d,0x7ff0000000000000,1 +np.float64,0x98dc924d31b93,0x3ff0000000000000,1 +np.float64,0x268bef364d17f,0x3ff0000000000000,1 +np.float64,0x7fd883ba56310774,0x7ff0000000000000,1 +np.float64,0x3fc53f01a32a7e03,0x3ff0388dea9cd63e,1 +np.float64,0xffe1ea8c0563d518,0x7ff0000000000000,1 +np.float64,0x3fd0bf0e63a17e1d,0x3ff08d0577f5ffa6,1 +np.float64,0x7fef42418f7e8482,0x7ff0000000000000,1 +np.float64,0x8000bccd38c1799b,0x3ff0000000000000,1 +np.float64,0xbfe6c48766ed890f,0x3ff43936fa4048c8,1 +np.float64,0xbfb2a38f3a254720,0x3ff00adc7f7b2822,1 +np.float64,0x3fd5262b2eaa4c56,0x3ff0e1af492c08f5,1 +np.float64,0x80065b4691ecb68e,0x3ff0000000000000,1 +np.float64,0xfb6b9e9ff6d74,0x3ff0000000000000,1 +np.float64,0x8006c71e6ecd8e3e,0x3ff0000000000000,1 +np.float64,0x3fd0a3e43ca147c8,0x3ff08b3ad7b42485,1 +np.float64,0xbfc82d8607305b0c,0x3ff04949d6733ef6,1 +np.float64,0xde048c61bc092,0x3ff0000000000000,1 +np.float64,0xffcf73e0fa3ee7c0,0x7ff0000000000000,1 +np.float64,0xbfe8639d7830c73b,0x3ff4e05f97948376,1 +np.float64,0x8010000000000000,0x3ff0000000000000,1 +np.float64,0x67f01a2acfe04,0x3ff0000000000000,1 +np.float64,0x3fe222e803e445d0,0x3ff2a3a75e5f29d8,1 +np.float64,0xffef84c6387f098b,0x7ff0000000000000,1 +np.float64,0x3fe5969c1e6b2d38,0x3ff3c80130462bb2,1 +np.float64,0x8009f56953d3ead3,0x3ff0000000000000,1 +np.float64,0x3fe05c9b6360b937,0x3ff2232e1cba5617,1 +np.float64,0x3fd8888d63b1111b,0x3ff130a5b788d52f,1 +np.float64,0xffe3a9e6f26753ce,0x7ff0000000000000,1 +np.float64,0x800e2aaa287c5554,0x3ff0000000000000,1 +np.float64,0x3fea8d6c82351ad9,0x3ff5d4d8cde9a11d,1 +np.float64,0x7feef700723dee00,0x7ff0000000000000,1 +np.float64,0x3fa5cb77242b96e0,0x3ff003b62b3e50f1,1 +np.float64,0x7fb68f0a862d1e14,0x7ff0000000000000,1 +np.float64,0x7fb97ee83432fdcf,0x7ff0000000000000,1 +np.float64,0x7fd74a78632e94f0,0x7ff0000000000000,1 +np.float64,0x7fcfe577713fcaee,0x7ff0000000000000,1 +np.float64,0xffe192ee5ea325dc,0x7ff0000000000000,1 +np.float64,0x477d6ae48efae,0x3ff0000000000000,1 +np.float64,0xffe34d5237669aa4,0x7ff0000000000000,1 +np.float64,0x7fe3ce8395a79d06,0x7ff0000000000000,1 +np.float64,0x80019c01ffa33805,0x3ff0000000000000,1 +np.float64,0x74b5b56ce96b7,0x3ff0000000000000,1 +np.float64,0x7fe05ecdeda0bd9b,0x7ff0000000000000,1 +np.float64,0xffe9693eb232d27d,0x7ff0000000000000,1 +np.float64,0xffd2be2c7da57c58,0x7ff0000000000000,1 +np.float64,0x800dbd5cbc1b7aba,0x3ff0000000000000,1 +np.float64,0xbfa36105d426c210,0x3ff002ef2e3a87f7,1 +np.float64,0x800b2d69fb765ad4,0x3ff0000000000000,1 +np.float64,0xbfdb81c9a9370394,0x3ff1802d409cbf7a,1 +np.float64,0x7fd481d014a9039f,0x7ff0000000000000,1 +np.float64,0xffe66c3c1fecd878,0x7ff0000000000000,1 +np.float64,0x3fc55865192ab0c8,0x3ff03915b51e8839,1 +np.float64,0xd6a78987ad4f1,0x3ff0000000000000,1 +np.float64,0x800c6cc80d58d990,0x3ff0000000000000,1 +np.float64,0x979435a12f29,0x3ff0000000000000,1 +np.float64,0xbfbd971e7a3b2e40,0x3ff01b647e45f5a6,1 +np.float64,0x80067565bfeceacc,0x3ff0000000000000,1 +np.float64,0x8001ad689ce35ad2,0x3ff0000000000000,1 +np.float64,0x7fa43253dc2864a7,0x7ff0000000000000,1 +np.float64,0xbfe3dda307e7bb46,0x3ff32ef99a2efe1d,1 +np.float64,0x3fe5d7b395ebaf68,0x3ff3dfd33cdc8ef4,1 +np.float64,0xd94cc9c3b2999,0x3ff0000000000000,1 +np.float64,0x3fee5a513fbcb4a2,0x3ff7c0f17b876ce5,1 +np.float64,0xffe27761fa64eec4,0x7ff0000000000000,1 +np.float64,0x3feb788119b6f102,0x3ff64446f67f4efa,1 +np.float64,0xbfed6e10dffadc22,0x3ff741d5ef610ca0,1 +np.float64,0x7fe73cf98b2e79f2,0x7ff0000000000000,1 +np.float64,0x7847d09af08fb,0x3ff0000000000000,1 +np.float64,0x29ded2da53bdb,0x3ff0000000000000,1 +np.float64,0xbfe51c1ec1aa383e,0x3ff39c0b7cf832e2,1 +np.float64,0xbfeafd5e65f5fabd,0x3ff609548a787f57,1 +np.float64,0x3fd872a26fb0e545,0x3ff12e7fbd95505c,1 +np.float64,0x7fed6b7c1b7ad6f7,0x7ff0000000000000,1 +np.float64,0xffe7ba9ec16f753d,0x7ff0000000000000,1 +np.float64,0x7f89b322f0336645,0x7ff0000000000000,1 +np.float64,0xbfad1677383a2cf0,0x3ff0069ca67e7baa,1 +np.float64,0x3fe0906d04a120da,0x3ff2311b04b7bfef,1 +np.float64,0xffe4b3c9d4296793,0x7ff0000000000000,1 +np.float64,0xbfe476bb0ce8ed76,0x3ff36277d2921a74,1 +np.float64,0x7fc35655cf26acab,0x7ff0000000000000,1 +np.float64,0x7fe9980f0373301d,0x7ff0000000000000,1 +np.float64,0x9e6e04cb3cdc1,0x3ff0000000000000,1 +np.float64,0x800b89e0afb713c2,0x3ff0000000000000,1 +np.float64,0x800bd951a3f7b2a4,0x3ff0000000000000,1 +np.float64,0x29644a9e52c8a,0x3ff0000000000000,1 +np.float64,0x3fe1be2843637c51,0x3ff285e90d8387e4,1 +np.float64,0x7fa233cce4246799,0x7ff0000000000000,1 +np.float64,0xbfcfb7bc2d3f6f78,0x3ff07e657de3e2ed,1 +np.float64,0xffd7c953e7af92a8,0x7ff0000000000000,1 +np.float64,0xbfc5bbaf772b7760,0x3ff03b2ee4febb1e,1 +np.float64,0x8007b7315a6f6e63,0x3ff0000000000000,1 +np.float64,0xbfe906d902320db2,0x3ff525d7e16acfe0,1 +np.float64,0x3fde33d8553c67b1,0x3ff1d09faa19aa53,1 +np.float64,0x61fe76a0c3fcf,0x3ff0000000000000,1 +np.float64,0xa75e355b4ebc7,0x3ff0000000000000,1 +np.float64,0x3fc9e6d86033cdb1,0x3ff05426299c7064,1 +np.float64,0x7fd83f489eb07e90,0x7ff0000000000000,1 +np.float64,0x8000000000000001,0x3ff0000000000000,1 +np.float64,0x80014434ae62886a,0x3ff0000000000000,1 +np.float64,0xbfe21af9686435f3,0x3ff2a149338bdefe,1 +np.float64,0x9354e6cd26a9d,0x3ff0000000000000,1 +np.float64,0xb42b95f768573,0x3ff0000000000000,1 +np.float64,0xbfecb4481bb96890,0x3ff6e15d269dd651,1 +np.float64,0x3f97842ae82f0840,0x3ff0011485156f28,1 +np.float64,0xffdef63d90bdec7c,0x7ff0000000000000,1 +np.float64,0x7fe511a8d36a2351,0x7ff0000000000000,1 +np.float64,0xbf8cb638a0396c80,0x3ff000670c318fb6,1 +np.float64,0x3fe467e1f668cfc4,0x3ff35d65f93ccac6,1 +np.float64,0xbfce7d88f03cfb10,0x3ff074c22475fe5b,1 +np.float64,0x6d0a4994da14a,0x3ff0000000000000,1 +np.float64,0xbfb3072580260e48,0x3ff00b51d3913e9f,1 +np.float64,0x8008fcde36b1f9bd,0x3ff0000000000000,1 +np.float64,0x3fd984df66b309c0,0x3ff149f29125eca4,1 +np.float64,0xffee2a10fe7c5421,0x7ff0000000000000,1 +np.float64,0x80039168ace722d2,0x3ff0000000000000,1 +np.float64,0xffda604379b4c086,0x7ff0000000000000,1 +np.float64,0xffdc6a405bb8d480,0x7ff0000000000000,1 +np.float64,0x3fe62888b26c5111,0x3ff3fdda754c4372,1 +np.float64,0x8008b452cb5168a6,0x3ff0000000000000,1 +np.float64,0x6165d540c2cbb,0x3ff0000000000000,1 +np.float64,0xbfee0c04d17c180a,0x3ff796431c64bcbe,1 +np.float64,0x800609b8448c1371,0x3ff0000000000000,1 +np.float64,0x800fc3fca59f87f9,0x3ff0000000000000,1 +np.float64,0x77f64848efeca,0x3ff0000000000000,1 +np.float64,0x8007cf522d8f9ea5,0x3ff0000000000000,1 +np.float64,0xbfe9fb0b93f3f617,0x3ff591cb0052e22c,1 +np.float64,0x7fd569d5f0aad3ab,0x7ff0000000000000,1 +np.float64,0x7fe5cf489d6b9e90,0x7ff0000000000000,1 +np.float64,0x7fd6e193e92dc327,0x7ff0000000000000,1 +np.float64,0xf78988a5ef131,0x3ff0000000000000,1 +np.float64,0x3fe8f97562b1f2eb,0x3ff5201080fbc12d,1 +np.float64,0x7febfd69d7b7fad3,0x7ff0000000000000,1 +np.float64,0xffc07b5c1720f6b8,0x7ff0000000000000,1 +np.float64,0xbfd966926832cd24,0x3ff146da9adf492e,1 +np.float64,0x7fef5bd9edfeb7b3,0x7ff0000000000000,1 +np.float64,0xbfd2afbc96255f7a,0x3ff0afd601febf44,1 +np.float64,0x7fdd4ea6293a9d4b,0x7ff0000000000000,1 +np.float64,0xbfe8a1e916b143d2,0x3ff4faa23c2793e5,1 +np.float64,0x800188fcd8c311fa,0x3ff0000000000000,1 +np.float64,0xbfe30803f1661008,0x3ff2e9fc729baaee,1 +np.float64,0x7fefffffffffffff,0x7ff0000000000000,1 +np.float64,0x3fd287bec3250f7e,0x3ff0ace34d3102f6,1 +np.float64,0x1f0ee9443e1de,0x3ff0000000000000,1 +np.float64,0xbfd92f73da325ee8,0x3ff14143e4fa2c5a,1 +np.float64,0x3fed7c9bdffaf938,0x3ff74984168734d3,1 +np.float64,0x8002c4d1696589a4,0x3ff0000000000000,1 +np.float64,0xfe03011bfc060,0x3ff0000000000000,1 +np.float64,0x7f7a391e6034723c,0x7ff0000000000000,1 +np.float64,0xffd6fd46f82dfa8e,0x7ff0000000000000,1 +np.float64,0xbfd7520a742ea414,0x3ff112f1ba5d4f91,1 +np.float64,0x8009389d8812713b,0x3ff0000000000000,1 +np.float64,0x7fefb846aaff708c,0x7ff0000000000000,1 +np.float64,0x3fd98a0983331413,0x3ff14a79efb8adbf,1 +np.float64,0xbfd897158db12e2c,0x3ff132137902cf3e,1 +np.float64,0xffc4048d5928091c,0x7ff0000000000000,1 +np.float64,0x80036ae46046d5ca,0x3ff0000000000000,1 +np.float64,0x7faba7ed3c374fd9,0x7ff0000000000000,1 +np.float64,0xbfec4265e1f884cc,0x3ff6a7b8602422c9,1 +np.float64,0xaa195e0b5432c,0x3ff0000000000000,1 +np.float64,0x3feac15d317582ba,0x3ff5ed115758145f,1 +np.float64,0x6c13a5bcd8275,0x3ff0000000000000,1 +np.float64,0xbfed20b8883a4171,0x3ff7194dbd0dc988,1 +np.float64,0x800cde65c899bccc,0x3ff0000000000000,1 +np.float64,0x7c72912af8e53,0x3ff0000000000000,1 +np.float64,0x3fe49d2bb4e93a57,0x3ff36fab3aba15d4,1 +np.float64,0xbfd598fa02ab31f4,0x3ff0eb72fc472025,1 +np.float64,0x8007a191712f4324,0x3ff0000000000000,1 +np.float64,0xbfdeb14872bd6290,0x3ff1e01ca83f35fd,1 +np.float64,0xbfe1da46b3e3b48e,0x3ff28e23ad2f5615,1 +np.float64,0x800a2f348e745e69,0x3ff0000000000000,1 +np.float64,0xbfee66928afccd25,0x3ff7c7ac7dbb3273,1 +np.float64,0xffd78a0a2b2f1414,0x7ff0000000000000,1 +np.float64,0x7fc5fa80b82bf500,0x7ff0000000000000,1 +np.float64,0x800e6d7260dcdae5,0x3ff0000000000000,1 +np.float64,0xbfd6cff2aaad9fe6,0x3ff106f78ee61642,1 +np.float64,0x7fe1041d1d220839,0x7ff0000000000000,1 +np.float64,0xbfdf75586cbeeab0,0x3ff1f8dbaa7e57f0,1 +np.float64,0xffdcaae410b955c8,0x7ff0000000000000,1 +np.float64,0x800fe5e0d1ffcbc2,0x3ff0000000000000,1 +np.float64,0x800d7999527af333,0x3ff0000000000000,1 +np.float64,0xbfe62c233bac5846,0x3ff3ff34220a204c,1 +np.float64,0x7fe99bbff8f3377f,0x7ff0000000000000,1 +np.float64,0x7feeaf471d3d5e8d,0x7ff0000000000000,1 +np.float64,0xd5904ff5ab20a,0x3ff0000000000000,1 +np.float64,0x3fd07aae3320f55c,0x3ff08888c227c968,1 +np.float64,0x7fea82b8dff50571,0x7ff0000000000000,1 +np.float64,0xffef2db9057e5b71,0x7ff0000000000000,1 +np.float64,0xbfe2077fef640f00,0x3ff29b7dd0d39d36,1 +np.float64,0xbfe09a4d7c61349b,0x3ff233c7e88881f4,1 +np.float64,0x3fda50c4cbb4a188,0x3ff15f28a71deee7,1 +np.float64,0x7fe7d9ee6b2fb3dc,0x7ff0000000000000,1 +np.float64,0x3febbf6faeb77edf,0x3ff666d13682ea93,1 +np.float64,0xc401a32988035,0x3ff0000000000000,1 +np.float64,0xbfeab30aa8f56615,0x3ff5e65dcc6603f8,1 +np.float64,0x92c8cea32591a,0x3ff0000000000000,1 +np.float64,0xbff0000000000000,0x3ff8b07551d9f550,1 +np.float64,0xbfbddfb4dc3bbf68,0x3ff01bebaec38faa,1 +np.float64,0xbfd8de3e2a31bc7c,0x3ff1391f4830d20b,1 +np.float64,0xffc83a8f8a307520,0x7ff0000000000000,1 +np.float64,0x3fee026ef53c04de,0x3ff7911337085827,1 +np.float64,0x7fbaf380b235e700,0x7ff0000000000000,1 +np.float64,0xffe5b89fa62b713f,0x7ff0000000000000,1 +np.float64,0xbfdc1ff54ab83fea,0x3ff191e8c0b60bb2,1 +np.float64,0x6ae3534cd5c6b,0x3ff0000000000000,1 +np.float64,0xbfea87e558750fcb,0x3ff5d24846013794,1 +np.float64,0xffe0f467bee1e8cf,0x7ff0000000000000,1 +np.float64,0x7fee3b0dc7bc761b,0x7ff0000000000000,1 +np.float64,0x3fed87521afb0ea4,0x3ff74f2f5cd36a5c,1 +np.float64,0x7b3c9882f6794,0x3ff0000000000000,1 +np.float64,0x7fdd1a62243a34c3,0x7ff0000000000000,1 +np.float64,0x800f1dc88d3e3b91,0x3ff0000000000000,1 +np.float64,0x7fc3213cfa264279,0x7ff0000000000000,1 +np.float64,0x3fe40e0f3d681c1e,0x3ff33f135e9d5ded,1 +np.float64,0x7febf14e51f7e29c,0x7ff0000000000000,1 +np.float64,0xffe96c630c72d8c5,0x7ff0000000000000,1 +np.float64,0x7fdd82fbe7bb05f7,0x7ff0000000000000,1 +np.float64,0xbf9a6a0b1034d420,0x3ff0015ce009f7d8,1 +np.float64,0xbfceb4f8153d69f0,0x3ff0766e3ecc77df,1 +np.float64,0x3fd9de31e633bc64,0x3ff15327b794a16e,1 +np.float64,0x3faa902a30352054,0x3ff00583848d1969,1 +np.float64,0x0,0x3ff0000000000000,1 +np.float64,0x3fbe3459c43c68b4,0x3ff01c8af6710ef6,1 +np.float64,0xbfa8df010031be00,0x3ff004d5632dc9f5,1 +np.float64,0x7fbcf6cf2a39ed9d,0x7ff0000000000000,1 +np.float64,0xffe4236202a846c4,0x7ff0000000000000,1 +np.float64,0x3fd35ed52e26bdaa,0x3ff0bd0b231f11f7,1 +np.float64,0x7fe7a2df532f45be,0x7ff0000000000000,1 +np.float64,0xffe32f8315665f06,0x7ff0000000000000,1 +np.float64,0x7fe1a69f03e34d3d,0x7ff0000000000000,1 +np.float64,0x7fa5542b742aa856,0x7ff0000000000000,1 +np.float64,0x3fe84e9f8ef09d3f,0x3ff4d79816359765,1 +np.float64,0x29076fe6520ef,0x3ff0000000000000,1 +np.float64,0xffd70894f7ae112a,0x7ff0000000000000,1 +np.float64,0x800188edcbe311dc,0x3ff0000000000000,1 +np.float64,0x3fe2c7acda258f5a,0x3ff2d5dad4617703,1 +np.float64,0x3f775d41a02ebb00,0x3ff000110f212445,1 +np.float64,0x7fe8a084d1714109,0x7ff0000000000000,1 +np.float64,0x3fe31562d8a62ac6,0x3ff2ee35055741cd,1 +np.float64,0xbfd195d4d1a32baa,0x3ff09b98a50c151b,1 +np.float64,0xffaae9ff0c35d400,0x7ff0000000000000,1 +np.float64,0xff819866502330c0,0x7ff0000000000000,1 +np.float64,0x7fddc64815bb8c8f,0x7ff0000000000000,1 +np.float64,0xbfd442b428288568,0x3ff0cef70aa73ae6,1 +np.float64,0x8002e7625aa5cec5,0x3ff0000000000000,1 +np.float64,0x7fe8d4f70e71a9ed,0x7ff0000000000000,1 +np.float64,0xbfc3bd015f277a04,0x3ff030cbf16f29d9,1 +np.float64,0x3fd315d5baa62bab,0x3ff0b77a551a5335,1 +np.float64,0x7fa638b4642c7168,0x7ff0000000000000,1 +np.float64,0x3fdea8b795bd516f,0x3ff1df0bb70cdb79,1 +np.float64,0xbfd78754762f0ea8,0x3ff117ee0f29abed,1 +np.float64,0x8009f6a37633ed47,0x3ff0000000000000,1 +np.float64,0x3fea1daf75343b5f,0x3ff5a1804789bf13,1 +np.float64,0x3fd044b6c0a0896e,0x3ff0850b7297d02f,1 +np.float64,0x8003547a9c86a8f6,0x3ff0000000000000,1 +np.float64,0x3fa6c2cd782d859b,0x3ff0040c4ac8f44a,1 +np.float64,0x3fe225baaae44b76,0x3ff2a47f5e1f5e85,1 +np.float64,0x8000000000000000,0x3ff0000000000000,1 +np.float64,0x3fcb53da8736a7b8,0x3ff05db45af470ac,1 +np.float64,0x80079f8f140f3f1f,0x3ff0000000000000,1 +np.float64,0xbfcd1d7e2b3a3afc,0x3ff06a6b6845d05f,1 +np.float64,0x96df93672dbf3,0x3ff0000000000000,1 +np.float64,0xdef86e43bdf0e,0x3ff0000000000000,1 +np.float64,0xbfec05a09db80b41,0x3ff6896b768eea08,1 +np.float64,0x7fe3ff91d267ff23,0x7ff0000000000000,1 +np.float64,0xffea3eaa07347d53,0x7ff0000000000000,1 +np.float64,0xbfebde1cc1f7bc3a,0x3ff675e34ac2afc2,1 +np.float64,0x629bcde8c537a,0x3ff0000000000000,1 +np.float64,0xbfdde4fcff3bc9fa,0x3ff1c7061d21f0fe,1 +np.float64,0x3fee60fd003cc1fa,0x3ff7c49af3878a51,1 +np.float64,0x3fe5c92ac32b9256,0x3ff3da7a7929588b,1 +np.float64,0xbfe249c78f64938f,0x3ff2af52a06f1a50,1 +np.float64,0xbfc6de9dbe2dbd3c,0x3ff0418d284ee29f,1 +np.float64,0xffc8ef094631de14,0x7ff0000000000000,1 +np.float64,0x3fdef05f423de0bf,0x3ff1e800caba8ab5,1 +np.float64,0xffc1090731221210,0x7ff0000000000000,1 +np.float64,0xbfedec9b5fbbd937,0x3ff7854b6792a24a,1 +np.float64,0xbfb873507630e6a0,0x3ff012b23b3b7a67,1 +np.float64,0xbfe3cd6692679acd,0x3ff3299d6936ec4b,1 +np.float64,0xbfb107c890220f90,0x3ff0091122162472,1 +np.float64,0xbfe4e6ee48e9cddc,0x3ff3894e5a5e70a6,1 +np.float64,0xffe6fa3413edf468,0x7ff0000000000000,1 +np.float64,0x3fe2faf79b65f5ef,0x3ff2e5e11fae8b54,1 +np.float64,0xbfdfeb8df9bfd71c,0x3ff208189691b15f,1 +np.float64,0x75d2d03ceba5b,0x3ff0000000000000,1 +np.float64,0x3feb48c182b69183,0x3ff62d4462eba6cb,1 +np.float64,0xffcda9f7ff3b53f0,0x7ff0000000000000,1 +np.float64,0x7fcafbdcbd35f7b8,0x7ff0000000000000,1 +np.float64,0xbfd1895523a312aa,0x3ff09aba642a78d9,1 +np.float64,0x3fe3129c3f662538,0x3ff2ed546bbfafcf,1 +np.float64,0x3fb444dee02889be,0x3ff00cd86273b964,1 +np.float64,0xbf73b32d7ee77,0x3ff0000000000000,1 +np.float64,0x3fae19904c3c3321,0x3ff00714865c498a,1 +np.float64,0x7fefbfaef5bf7f5d,0x7ff0000000000000,1 +np.float64,0x8000dc3816e1b871,0x3ff0000000000000,1 +np.float64,0x8003f957ba47f2b0,0x3ff0000000000000,1 +np.float64,0xbfe3563c7ea6ac79,0x3ff302dcebc92856,1 +np.float64,0xbfdc80fbae3901f8,0x3ff19cfe73e58092,1 +np.float64,0x8009223b04524476,0x3ff0000000000000,1 +np.float64,0x3fd95f431c32be86,0x3ff1461c21cb03f0,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xbfe7c12ed3ef825e,0x3ff49d59c265efcd,1 +np.float64,0x10000000000000,0x3ff0000000000000,1 +np.float64,0x7fc5e2632f2bc4c5,0x7ff0000000000000,1 +np.float64,0xffd8f6b4c7b1ed6a,0x7ff0000000000000,1 +np.float64,0x80034b93d4069728,0x3ff0000000000000,1 +np.float64,0xffdf5d4c1dbeba98,0x7ff0000000000000,1 +np.float64,0x800bc63d70178c7b,0x3ff0000000000000,1 +np.float64,0xbfeba31ea0f7463d,0x3ff658fa27073d2b,1 +np.float64,0xbfeebeede97d7ddc,0x3ff7f89a8e80dec4,1 +np.float64,0x7feb0f1f91361e3e,0x7ff0000000000000,1 +np.float64,0xffec3158d0b862b1,0x7ff0000000000000,1 +np.float64,0x3fde51cbfbbca398,0x3ff1d44c2ff15b3d,1 +np.float64,0xd58fb2b3ab1f7,0x3ff0000000000000,1 +np.float64,0x80028b9e32e5173d,0x3ff0000000000000,1 +np.float64,0x7fea77a56c74ef4a,0x7ff0000000000000,1 +np.float64,0x3fdaabbd4a35577b,0x3ff168d82edf2fe0,1 +np.float64,0xbfe69c39cc2d3874,0x3ff429b2f4cdb362,1 +np.float64,0x3b78f5d876f20,0x3ff0000000000000,1 +np.float64,0x7fa47d116428fa22,0x7ff0000000000000,1 +np.float64,0xbfe4118b0ce82316,0x3ff3403d989f780f,1 +np.float64,0x800482e793c905d0,0x3ff0000000000000,1 +np.float64,0xbfe48e5728e91cae,0x3ff36a9020bf9d20,1 +np.float64,0x7fe078ba8860f174,0x7ff0000000000000,1 +np.float64,0x3fd80843e5b01088,0x3ff1242f401e67da,1 +np.float64,0x3feb1f6965f63ed3,0x3ff6197fc590e143,1 +np.float64,0xffa41946d8283290,0x7ff0000000000000,1 +np.float64,0xffe30de129661bc2,0x7ff0000000000000,1 +np.float64,0x3fec9c8e1ab9391c,0x3ff6d542ea2f49b4,1 +np.float64,0x3fdc3e4490387c89,0x3ff1955ae18cac37,1 +np.float64,0xffef49d9c77e93b3,0x7ff0000000000000,1 +np.float64,0xfff0000000000000,0x7ff0000000000000,1 +np.float64,0x3fe0442455608849,0x3ff21cab90067d5c,1 +np.float64,0xbfed86aebd3b0d5e,0x3ff74ed8d4b75f50,1 +np.float64,0xffe4600d2b28c01a,0x7ff0000000000000,1 +np.float64,0x7fc1e8ccff23d199,0x7ff0000000000000,1 +np.float64,0x8008d49b0091a936,0x3ff0000000000000,1 +np.float64,0xbfe4139df028273c,0x3ff340ef3c86227c,1 +np.float64,0xbfe9ab4542b3568a,0x3ff56dfe32061247,1 +np.float64,0xbfd76dd365aedba6,0x3ff11589bab5fe71,1 +np.float64,0x3fd42cf829a859f0,0x3ff0cd3844bb0e11,1 +np.float64,0x7fd077cf2e20ef9d,0x7ff0000000000000,1 +np.float64,0x3fd7505760aea0b0,0x3ff112c937b3f088,1 +np.float64,0x1f93341a3f267,0x3ff0000000000000,1 +np.float64,0x7fe3c3c1b0678782,0x7ff0000000000000,1 +np.float64,0x800f85cec97f0b9e,0x3ff0000000000000,1 +np.float64,0xd93ab121b2756,0x3ff0000000000000,1 +np.float64,0xbfef8066fd7f00ce,0x3ff8663ed7d15189,1 +np.float64,0xffe31dd4af663ba9,0x7ff0000000000000,1 +np.float64,0xbfd7ff05a6affe0c,0x3ff1234c09bb686d,1 +np.float64,0xbfe718c31fee3186,0x3ff45a0c2d0ef7b0,1 +np.float64,0x800484bf33e9097f,0x3ff0000000000000,1 +np.float64,0xffd409dad02813b6,0x7ff0000000000000,1 +np.float64,0x3fe59679896b2cf4,0x3ff3c7f49e4fbbd3,1 +np.float64,0xbfd830c54d30618a,0x3ff1281729861390,1 +np.float64,0x1d4fc81c3a9fa,0x3ff0000000000000,1 +np.float64,0x3fd334e4272669c8,0x3ff0b9d5d82894f0,1 +np.float64,0xffc827e65c304fcc,0x7ff0000000000000,1 +np.float64,0xffe2d1814aa5a302,0x7ff0000000000000,1 +np.float64,0xffd7b5b8d32f6b72,0x7ff0000000000000,1 +np.float64,0xbfdbc9f077b793e0,0x3ff18836b9106ad0,1 +np.float64,0x7fc724c2082e4983,0x7ff0000000000000,1 +np.float64,0x3fa39ed72c273da0,0x3ff00302051ce17e,1 +np.float64,0xbfe3c4c209678984,0x3ff326c4fd16b5cd,1 +np.float64,0x7fe91f6d00f23ed9,0x7ff0000000000000,1 +np.float64,0x8004ee93fea9dd29,0x3ff0000000000000,1 +np.float64,0xbfe7c32d0eaf865a,0x3ff49e290ed2ca0e,1 +np.float64,0x800ea996b29d532d,0x3ff0000000000000,1 +np.float64,0x2df9ec1c5bf3e,0x3ff0000000000000,1 +np.float64,0xabb175df5762f,0x3ff0000000000000,1 +np.float64,0xffe3fc9c8e27f938,0x7ff0000000000000,1 +np.float64,0x7fb358a62826b14b,0x7ff0000000000000,1 +np.float64,0x800aedcccaf5db9a,0x3ff0000000000000,1 +np.float64,0xffca530c5234a618,0x7ff0000000000000,1 +np.float64,0x40f91e9681f24,0x3ff0000000000000,1 +np.float64,0x80098f4572f31e8b,0x3ff0000000000000,1 +np.float64,0xbfdc58c21fb8b184,0x3ff1986115f8fe92,1 +np.float64,0xbfebeafd40b7d5fa,0x3ff67c3cf34036e3,1 +np.float64,0x7fd108861a22110b,0x7ff0000000000000,1 +np.float64,0xff8e499ae03c9340,0x7ff0000000000000,1 +np.float64,0xbfd2f58caa25eb1a,0x3ff0b50b1bffafdf,1 +np.float64,0x3fa040c9bc208193,0x3ff002105e95aefa,1 +np.float64,0xbfd2ebc0a5a5d782,0x3ff0b44ed5a11584,1 +np.float64,0xffe237bc93a46f78,0x7ff0000000000000,1 +np.float64,0x3fd557c5eeaaaf8c,0x3ff0e5e0a575e1ba,1 +np.float64,0x7abb419ef5769,0x3ff0000000000000,1 +np.float64,0xffefa1fe353f43fb,0x7ff0000000000000,1 +np.float64,0x3fa6f80ba02df017,0x3ff0041f51fa0d76,1 +np.float64,0xbfdce79488b9cf2a,0x3ff1a8e32877beb4,1 +np.float64,0x2285f3e4450bf,0x3ff0000000000000,1 +np.float64,0x3bf7eb7277efe,0x3ff0000000000000,1 +np.float64,0xbfd5925fd3ab24c0,0x3ff0eae1c2ac2e78,1 +np.float64,0xbfed6325227ac64a,0x3ff73c14a2ad5bfe,1 +np.float64,0x8000429c02408539,0x3ff0000000000000,1 +np.float64,0xb67c21e76cf84,0x3ff0000000000000,1 +np.float64,0x3fec3d3462f87a69,0x3ff6a51e4c027eb7,1 +np.float64,0x3feae69cbcf5cd3a,0x3ff5fe9387314afd,1 +np.float64,0x7fd0c9a0ec219341,0x7ff0000000000000,1 +np.float64,0x8004adb7f6295b71,0x3ff0000000000000,1 +np.float64,0xffd61fe8bb2c3fd2,0x7ff0000000000000,1 +np.float64,0xffe7fb3834aff670,0x7ff0000000000000,1 +np.float64,0x7fd1eef163a3dde2,0x7ff0000000000000,1 +np.float64,0x2e84547a5d08b,0x3ff0000000000000,1 +np.float64,0x8002d8875ee5b10f,0x3ff0000000000000,1 +np.float64,0x3fe1d1c5f763a38c,0x3ff28ba524fb6de8,1 +np.float64,0x8001dea0bc43bd42,0x3ff0000000000000,1 +np.float64,0xfecfad91fd9f6,0x3ff0000000000000,1 +np.float64,0xffed7965fa3af2cb,0x7ff0000000000000,1 +np.float64,0xbfe6102ccc2c205a,0x3ff3f4c082506686,1 +np.float64,0x3feff75b777feeb6,0x3ff8ab6222578e0c,1 +np.float64,0x3fb8a97bd43152f8,0x3ff013057f0a9d89,1 +np.float64,0xffe234b5e964696c,0x7ff0000000000000,1 +np.float64,0x984d9137309b2,0x3ff0000000000000,1 +np.float64,0xbfe42e9230e85d24,0x3ff349fb7d1a7560,1 +np.float64,0xbfecc8b249f99165,0x3ff6ebd0fea0ea72,1 +np.float64,0x8000840910410813,0x3ff0000000000000,1 +np.float64,0xbfd81db9e7303b74,0x3ff126402d3539ec,1 +np.float64,0x800548eb7fea91d8,0x3ff0000000000000,1 +np.float64,0xbfe4679ad0e8cf36,0x3ff35d4db89296a3,1 +np.float64,0x3fd4c55b5a298ab7,0x3ff0d99da31081f9,1 +np.float64,0xbfa8f5b38c31eb60,0x3ff004de3a23b32d,1 +np.float64,0x80005d348e80ba6a,0x3ff0000000000000,1 +np.float64,0x800c348d6118691b,0x3ff0000000000000,1 +np.float64,0xffd6b88f84ad7120,0x7ff0000000000000,1 +np.float64,0x3fc1aaaa82235555,0x3ff027136afd08e0,1 +np.float64,0x7fca7d081b34fa0f,0x7ff0000000000000,1 +np.float64,0x1,0x3ff0000000000000,1 +np.float64,0xbfdc810d1139021a,0x3ff19d007408cfe3,1 +np.float64,0xbfe5dce05f2bb9c0,0x3ff3e1bb9234617b,1 +np.float64,0xffecfe2c32b9fc58,0x7ff0000000000000,1 +np.float64,0x95b2891b2b651,0x3ff0000000000000,1 +np.float64,0x8000b60c6c616c1a,0x3ff0000000000000,1 +np.float64,0x4944f0889289f,0x3ff0000000000000,1 +np.float64,0x3fe6e508696dca10,0x3ff445d1b94863e9,1 +np.float64,0xbfe63355d0ec66ac,0x3ff401e74f16d16f,1 +np.float64,0xbfe9b9595af372b3,0x3ff57445e1b4d670,1 +np.float64,0x800e16f7313c2dee,0x3ff0000000000000,1 +np.float64,0xffe898f5f0b131eb,0x7ff0000000000000,1 +np.float64,0x3fe91ac651f2358d,0x3ff52e787c21c004,1 +np.float64,0x7fbfaac6783f558c,0x7ff0000000000000,1 +np.float64,0xd8ef3dfbb1de8,0x3ff0000000000000,1 +np.float64,0xbfc58c13a52b1828,0x3ff03a2c19d65019,1 +np.float64,0xbfbde55e8a3bcac0,0x3ff01bf648a3e0a7,1 +np.float64,0xffc3034930260694,0x7ff0000000000000,1 +np.float64,0xea77a64dd4ef5,0x3ff0000000000000,1 +np.float64,0x800cfe7e7739fcfd,0x3ff0000000000000,1 +np.float64,0x4960f31a92c1f,0x3ff0000000000000,1 +np.float64,0x3fd9552c94b2aa58,0x3ff14515a29add09,1 +np.float64,0xffe8b3244c316648,0x7ff0000000000000,1 +np.float64,0x3fe8201e6a70403d,0x3ff4c444fa679cce,1 +np.float64,0xffe9ab7c20f356f8,0x7ff0000000000000,1 +np.float64,0x3fed8bba5f7b1774,0x3ff751853c4c95c5,1 +np.float64,0x8007639cb76ec73a,0x3ff0000000000000,1 +np.float64,0xbfe396db89672db7,0x3ff317bfd1d6fa8c,1 +np.float64,0xbfeb42f888f685f1,0x3ff62a7e0eee56b1,1 +np.float64,0x3fe894827c712904,0x3ff4f4f561d9ea13,1 +np.float64,0xb66b3caf6cd68,0x3ff0000000000000,1 +np.float64,0x800f8907fdbf1210,0x3ff0000000000000,1 +np.float64,0x7fe9b0cddb73619b,0x7ff0000000000000,1 +np.float64,0xbfda70c0e634e182,0x3ff1628c6fdffc53,1 +np.float64,0x3fe0b5f534a16bea,0x3ff23b4ed4c2b48e,1 +np.float64,0xbfe8eee93671ddd2,0x3ff51b85b3c50ae4,1 +np.float64,0xbfe8c22627f1844c,0x3ff50858787a3bfe,1 +np.float64,0x37bb83c86f771,0x3ff0000000000000,1 +np.float64,0xffb7827ffe2f0500,0x7ff0000000000000,1 +np.float64,0x64317940c864,0x3ff0000000000000,1 +np.float64,0x800430ecee6861db,0x3ff0000000000000,1 +np.float64,0x3fa4291fbc285240,0x3ff0032d0204f6dd,1 +np.float64,0xffec69f76af8d3ee,0x7ff0000000000000,1 +np.float64,0x3ff0000000000000,0x3ff8b07551d9f550,1 +np.float64,0x3fc4cf3c42299e79,0x3ff0363fb1d3c254,1 +np.float64,0x7fe0223a77e04474,0x7ff0000000000000,1 +np.float64,0x800a3d4fa4347aa0,0x3ff0000000000000,1 +np.float64,0x3fdd273f94ba4e7f,0x3ff1b05b686e6879,1 +np.float64,0x3feca79052f94f20,0x3ff6dadedfa283aa,1 +np.float64,0x5e7f6f80bcfef,0x3ff0000000000000,1 +np.float64,0xbfef035892fe06b1,0x3ff81efb39cbeba2,1 +np.float64,0x3fee6c08e07cd812,0x3ff7caad952860a1,1 +np.float64,0xffeda715877b4e2a,0x7ff0000000000000,1 +np.float64,0x800580286b0b0052,0x3ff0000000000000,1 +np.float64,0x800703a73fee074f,0x3ff0000000000000,1 +np.float64,0xbfccf96a6639f2d4,0x3ff0696330a60832,1 +np.float64,0x7feb408442368108,0x7ff0000000000000,1 +np.float64,0x3fedc87a46fb90f5,0x3ff771e3635649a9,1 +np.float64,0x3fd8297b773052f7,0x3ff12762bc0cea76,1 +np.float64,0x3fee41bb03fc8376,0x3ff7b37b2da48ab4,1 +np.float64,0xbfe2b05a226560b4,0x3ff2cea17ae7c528,1 +np.float64,0xbfd2e92cf2a5d25a,0x3ff0b41d605ced61,1 +np.float64,0x4817f03a902ff,0x3ff0000000000000,1 +np.float64,0x8c9d4f0d193aa,0x3ff0000000000000,1 diff --git a/python/numpy/_core/tests/data/umath-validation-set-exp.csv b/python/numpy/_core/tests/data/umath-validation-set-exp.csv new file mode 100644 index 000000000..7c5ef3b33 --- /dev/null +++ b/python/numpy/_core/tests/data/umath-validation-set-exp.csv @@ -0,0 +1,412 @@ +dtype,input,output,ulperrortol +## +ve denormals ## +np.float32,0x004b4716,0x3f800000,3 +np.float32,0x007b2490,0x3f800000,3 +np.float32,0x007c99fa,0x3f800000,3 +np.float32,0x00734a0c,0x3f800000,3 +np.float32,0x0070de24,0x3f800000,3 +np.float32,0x00495d65,0x3f800000,3 +np.float32,0x006894f6,0x3f800000,3 +np.float32,0x00555a76,0x3f800000,3 +np.float32,0x004e1fb8,0x3f800000,3 +np.float32,0x00687de9,0x3f800000,3 +## -ve denormals ## +np.float32,0x805b59af,0x3f800000,3 +np.float32,0x807ed8ed,0x3f800000,3 +np.float32,0x807142ad,0x3f800000,3 +np.float32,0x80772002,0x3f800000,3 +np.float32,0x8062abcb,0x3f800000,3 +np.float32,0x8045e31c,0x3f800000,3 +np.float32,0x805f01c2,0x3f800000,3 +np.float32,0x80506432,0x3f800000,3 +np.float32,0x8060089d,0x3f800000,3 +np.float32,0x8071292f,0x3f800000,3 +## floats that output a denormal ## +np.float32,0xc2cf3fc1,0x00000001,3 +np.float32,0xc2c79726,0x00000021,3 +np.float32,0xc2cb295d,0x00000005,3 +np.float32,0xc2b49e6b,0x00068c4c,3 +np.float32,0xc2ca8116,0x00000008,3 +np.float32,0xc2c23f82,0x000001d7,3 +np.float32,0xc2cb69c0,0x00000005,3 +np.float32,0xc2cc1f4d,0x00000003,3 +np.float32,0xc2ae094e,0x00affc4c,3 +np.float32,0xc2c86c44,0x00000015,3 +## random floats between -87.0f and 88.0f ## +np.float32,0x4030d7e0,0x417d9a05,3 +np.float32,0x426f60e8,0x6aa1be2c,3 +np.float32,0x41a1b220,0x4e0efc11,3 +np.float32,0xc20cc722,0x26159da7,3 +np.float32,0x41c492bc,0x512ec79d,3 +np.float32,0x40980210,0x42e73a0e,3 +np.float32,0xbf1f7b80,0x3f094de3,3 +np.float32,0x42a678a4,0x7b87a383,3 +np.float32,0xc20f3cfd,0x25a1c304,3 +np.float32,0x423ff34c,0x6216467f,3 +np.float32,0x00000000,0x3f800000,3 +## floats that cause an overflow ## +np.float32,0x7f06d8c1,0x7f800000,3 +np.float32,0x7f451912,0x7f800000,3 +np.float32,0x7ecceac3,0x7f800000,3 +np.float32,0x7f643b45,0x7f800000,3 +np.float32,0x7e910ea0,0x7f800000,3 +np.float32,0x7eb4756b,0x7f800000,3 +np.float32,0x7f4ec708,0x7f800000,3 +np.float32,0x7f6b4551,0x7f800000,3 +np.float32,0x7d8edbda,0x7f800000,3 +np.float32,0x7f730718,0x7f800000,3 +np.float32,0x42b17217,0x7f7fff84,3 +np.float32,0x42b17218,0x7f800000,3 +np.float32,0x42b17219,0x7f800000,3 +np.float32,0xfef2b0bc,0x00000000,3 +np.float32,0xff69f83e,0x00000000,3 +np.float32,0xff4ecb12,0x00000000,3 +np.float32,0xfeac6d86,0x00000000,3 +np.float32,0xfde0cdb8,0x00000000,3 +np.float32,0xff26aef4,0x00000000,3 +np.float32,0xff6f9277,0x00000000,3 +np.float32,0xff7adfc4,0x00000000,3 +np.float32,0xff0ad40e,0x00000000,3 +np.float32,0xff6fd8f3,0x00000000,3 +np.float32,0xc2cff1b4,0x00000001,3 +np.float32,0xc2cff1b5,0x00000000,3 +np.float32,0xc2cff1b6,0x00000000,3 +np.float32,0x7f800000,0x7f800000,3 +np.float32,0xff800000,0x00000000,3 +np.float32,0x4292f27c,0x7480000a,3 +np.float32,0x42a920be,0x7c7fff94,3 +np.float32,0x41c214c9,0x50ffffd9,3 +np.float32,0x41abe686,0x4effffd9,3 +np.float32,0x4287db5a,0x707fffd3,3 +np.float32,0x41902cbb,0x4c800078,3 +np.float32,0x42609466,0x67ffffeb,3 +np.float32,0x41a65af5,0x4e7fffd1,3 +np.float32,0x417f13ff,0x4affffc9,3 +np.float32,0x426d0e6c,0x6a3504f2,3 +np.float32,0x41bc8934,0x507fff51,3 +np.float32,0x42a7bdde,0x7c0000d6,3 +np.float32,0x4120cf66,0x46b504f6,3 +np.float32,0x4244da8f,0x62ffff1a,3 +np.float32,0x41a0cf69,0x4e000034,3 +np.float32,0x41cd2bec,0x52000005,3 +np.float32,0x42893e41,0x7100009e,3 +np.float32,0x41b437e1,0x4fb50502,3 +np.float32,0x41d8430f,0x5300001d,3 +np.float32,0x4244da92,0x62ffffda,3 +np.float32,0x41a0cf63,0x4dffffa9,3 +np.float32,0x3eb17218,0x3fb504f3,3 +np.float32,0x428729e8,0x703504dc,3 +np.float32,0x41a0cf67,0x4e000014,3 +np.float32,0x4252b77d,0x65800011,3 +np.float32,0x41902cb9,0x4c800058,3 +np.float32,0x42a0cf67,0x79800052,3 +np.float32,0x4152b77b,0x48ffffe9,3 +np.float32,0x41265af3,0x46ffffc8,3 +np.float32,0x42187e0b,0x5affff9a,3 +np.float32,0xc0d2b77c,0x3ab504f6,3 +np.float32,0xc283b2ac,0x10000072,3 +np.float32,0xc1cff1b4,0x2cb504f5,3 +np.float32,0xc05dce9e,0x3d000000,3 +np.float32,0xc28ec9d2,0x0bfffea5,3 +np.float32,0xc23c893a,0x1d7fffde,3 +np.float32,0xc2a920c0,0x027fff6c,3 +np.float32,0xc1f9886f,0x2900002b,3 +np.float32,0xc2c42920,0x000000b5,3 +np.float32,0xc2893e41,0x0dfffec5,3 +np.float32,0xc2c4da93,0x00000080,3 +np.float32,0xc17f1401,0x3400000c,3 +np.float32,0xc1902cb6,0x327fffaf,3 +np.float32,0xc27c4e3b,0x11ffffc5,3 +np.float32,0xc268e5c5,0x157ffe9d,3 +np.float32,0xc2b4e953,0x0005a826,3 +np.float32,0xc287db5a,0x0e800016,3 +np.float32,0xc207db5a,0x2700000b,3 +np.float32,0xc2b2d4fe,0x000ffff1,3 +np.float32,0xc268e5c0,0x157fffdd,3 +np.float32,0xc22920bd,0x2100003b,3 +np.float32,0xc2902caf,0x0b80011e,3 +np.float32,0xc1902cba,0x327fff2f,3 +np.float32,0xc2ca6625,0x00000008,3 +np.float32,0xc280ece8,0x10fffeb5,3 +np.float32,0xc2918f94,0x0b0000ea,3 +np.float32,0xc29b43d5,0x077ffffc,3 +np.float32,0xc1e61ff7,0x2ab504f5,3 +np.float32,0xc2867878,0x0effff15,3 +np.float32,0xc2a2324a,0x04fffff4,3 +#float64 +## near zero ## +np.float64,0x8000000000000000,0x3ff0000000000000,1 +np.float64,0x8010000000000000,0x3ff0000000000000,1 +np.float64,0x8000000000000001,0x3ff0000000000000,1 +np.float64,0x8360000000000000,0x3ff0000000000000,1 +np.float64,0x9a70000000000000,0x3ff0000000000000,1 +np.float64,0xb9b0000000000000,0x3ff0000000000000,1 +np.float64,0xb810000000000000,0x3ff0000000000000,1 +np.float64,0xbc30000000000000,0x3ff0000000000000,1 +np.float64,0xb6a0000000000000,0x3ff0000000000000,1 +np.float64,0x0000000000000000,0x3ff0000000000000,1 +np.float64,0x0010000000000000,0x3ff0000000000000,1 +np.float64,0x0000000000000001,0x3ff0000000000000,1 +np.float64,0x0360000000000000,0x3ff0000000000000,1 +np.float64,0x1a70000000000000,0x3ff0000000000000,1 +np.float64,0x3c30000000000000,0x3ff0000000000000,1 +np.float64,0x36a0000000000000,0x3ff0000000000000,1 +np.float64,0x39b0000000000000,0x3ff0000000000000,1 +np.float64,0x3810000000000000,0x3ff0000000000000,1 +## underflow ## +np.float64,0xc0c6276800000000,0x0000000000000000,1 +np.float64,0xc0c62d918ce2421d,0x0000000000000000,1 +np.float64,0xc0c62d918ce2421e,0x0000000000000000,1 +np.float64,0xc0c62d91a0000000,0x0000000000000000,1 +np.float64,0xc0c62d9180000000,0x0000000000000000,1 +np.float64,0xc0c62dea45ee3e06,0x0000000000000000,1 +np.float64,0xc0c62dea45ee3e07,0x0000000000000000,1 +np.float64,0xc0c62dea40000000,0x0000000000000000,1 +np.float64,0xc0c62dea60000000,0x0000000000000000,1 +np.float64,0xc0875f1120000000,0x0000000000000000,1 +np.float64,0xc0875f113c30b1c8,0x0000000000000000,1 +np.float64,0xc0875f1140000000,0x0000000000000000,1 +np.float64,0xc093480000000000,0x0000000000000000,1 +np.float64,0xffefffffffffffff,0x0000000000000000,1 +np.float64,0xc7efffffe0000000,0x0000000000000000,1 +## overflow ## +np.float64,0x40862e52fefa39ef,0x7ff0000000000000,1 +np.float64,0x40872e42fefa39ef,0x7ff0000000000000,1 +## +/- INF, +/- NAN ## +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0xfff0000000000000,0x0000000000000000,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0xfff8000000000000,0xfff8000000000000,1 +## output denormal ## +np.float64,0xc087438520000000,0x0000000000000001,1 +np.float64,0xc08743853f2f4461,0x0000000000000001,1 +np.float64,0xc08743853f2f4460,0x0000000000000001,1 +np.float64,0xc087438540000000,0x0000000000000001,1 +## between -745.13321910 and 709.78271289 ## +np.float64,0xbff760cd14774bd9,0x3fcdb14ced00ceb6,1 +np.float64,0xbff760cd20000000,0x3fcdb14cd7993879,1 +np.float64,0xbff760cd00000000,0x3fcdb14d12fbd264,1 +np.float64,0xc07f1cf360000000,0x130c1b369af14fda,1 +np.float64,0xbeb0000000000000,0x3feffffe00001000,1 +np.float64,0xbd70000000000000,0x3fefffffffffe000,1 +np.float64,0xc084fd46e5c84952,0x0360000000000139,1 +np.float64,0xc084fd46e5c84953,0x035ffffffffffe71,1 +np.float64,0xc084fd46e0000000,0x0360000b9096d32c,1 +np.float64,0xc084fd4700000000,0x035fff9721d12104,1 +np.float64,0xc086232bc0000000,0x0010003af5e64635,1 +np.float64,0xc086232bdd7abcd2,0x001000000000007c,1 +np.float64,0xc086232bdd7abcd3,0x000ffffffffffe7c,1 +np.float64,0xc086232be0000000,0x000ffffaf57a6fc9,1 +np.float64,0xc086233920000000,0x000fe590e3b45eb0,1 +np.float64,0xc086233938000000,0x000fe56133493c57,1 +np.float64,0xc086233940000000,0x000fe5514deffbbc,1 +np.float64,0xc086234c98000000,0x000fbf1024c32ccb,1 +np.float64,0xc086234ca0000000,0x000fbf0065bae78d,1 +np.float64,0xc086234c80000000,0x000fbf3f623a7724,1 +np.float64,0xc086234ec0000000,0x000fbad237c846f9,1 +np.float64,0xc086234ec8000000,0x000fbac27cfdec97,1 +np.float64,0xc086234ee0000000,0x000fba934cfd3dc2,1 +np.float64,0xc086234ef0000000,0x000fba73d7f618d9,1 +np.float64,0xc086234f00000000,0x000fba54632dddc0,1 +np.float64,0xc0862356e0000000,0x000faae0945b761a,1 +np.float64,0xc0862356f0000000,0x000faac13eb9a310,1 +np.float64,0xc086235700000000,0x000faaa1e9567b0a,1 +np.float64,0xc086236020000000,0x000f98cd75c11ed7,1 +np.float64,0xc086236ca0000000,0x000f8081b4d93f89,1 +np.float64,0xc086236cb0000000,0x000f8062b3f4d6c5,1 +np.float64,0xc086236cc0000000,0x000f8043b34e6f8c,1 +np.float64,0xc086238d98000000,0x000f41220d9b0d2c,1 +np.float64,0xc086238da0000000,0x000f4112cc80a01f,1 +np.float64,0xc086238d80000000,0x000f414fd145db5b,1 +np.float64,0xc08624fd00000000,0x000cbfce8ea1e6c4,1 +np.float64,0xc086256080000000,0x000c250747fcd46e,1 +np.float64,0xc08626c480000000,0x000a34f4bd975193,1 +np.float64,0xbf50000000000000,0x3feff800ffeaac00,1 +np.float64,0xbe10000000000000,0x3fefffffff800000,1 +np.float64,0xbcd0000000000000,0x3feffffffffffff8,1 +np.float64,0xc055d589e0000000,0x38100004bf94f63e,1 +np.float64,0xc055d58a00000000,0x380ffff97f292ce8,1 +np.float64,0xbfd962d900000000,0x3fe585a4b00110e1,1 +np.float64,0x3ff4bed280000000,0x400d411e7a58a303,1 +np.float64,0x3fff0b3620000000,0x401bd7737ffffcf3,1 +np.float64,0x3ff0000000000000,0x4005bf0a8b145769,1 +np.float64,0x3eb0000000000000,0x3ff0000100000800,1 +np.float64,0x3d70000000000000,0x3ff0000000001000,1 +np.float64,0x40862e42e0000000,0x7fefff841808287f,1 +np.float64,0x40862e42fefa39ef,0x7fefffffffffff2a,1 +np.float64,0x40862e0000000000,0x7feef85a11e73f2d,1 +np.float64,0x4000000000000000,0x401d8e64b8d4ddae,1 +np.float64,0x4009242920000000,0x40372a52c383a488,1 +np.float64,0x4049000000000000,0x44719103e4080b45,1 +np.float64,0x4008000000000000,0x403415e5bf6fb106,1 +np.float64,0x3f50000000000000,0x3ff00400800aab55,1 +np.float64,0x3e10000000000000,0x3ff0000000400000,1 +np.float64,0x3cd0000000000000,0x3ff0000000000004,1 +np.float64,0x40562e40a0000000,0x47effed088821c3f,1 +np.float64,0x40562e42e0000000,0x47effff082e6c7ff,1 +np.float64,0x40562e4300000000,0x47f00000417184b8,1 +np.float64,0x3fe8000000000000,0x4000ef9db467dcf8,1 +np.float64,0x402b12e8d4f33589,0x412718f68c71a6fe,1 +np.float64,0x402b12e8d4f3358a,0x412718f68c71a70a,1 +np.float64,0x402b12e8c0000000,0x412718f59a7f472e,1 +np.float64,0x402b12e8e0000000,0x412718f70c0eac62,1 +##use 1th entry +np.float64,0x40631659AE147CB4,0x4db3a95025a4890f,1 +np.float64,0xC061B87D2E85A4E2,0x332640c8e2de2c51,1 +np.float64,0x405A4A50BE243AF4,0x496a45e4b7f0339a,1 +np.float64,0xC0839898B98EC5C6,0x0764027828830df4,1 +#use 2th entry +np.float64,0xC072428C44B6537C,0x2596ade838b96f3e,1 +np.float64,0xC053057C5E1AE9BF,0x3912c8fad18fdadf,1 +np.float64,0x407E89C78328BAA3,0x6bfe35d5b9a1a194,1 +np.float64,0x4083501B6DD87112,0x77a855503a38924e,1 +#use 3th entry +np.float64,0x40832C6195F24540,0x7741e73c80e5eb2f,1 +np.float64,0xC083D4CD557C2EC9,0x06b61727c2d2508e,1 +np.float64,0x400C48F5F67C99BD,0x404128820f02b92e,1 +np.float64,0x4056E36D9B2DF26A,0x4830f52ff34a8242,1 +#use 4th entry +np.float64,0x4080FF700D8CBD06,0x70fa70df9bc30f20,1 +np.float64,0x406C276D39E53328,0x543eb8e20a8f4741,1 +np.float64,0xC070D6159BBD8716,0x27a4a0548c904a75,1 +np.float64,0xC052EBCF8ED61F83,0x391c0e92368d15e4,1 +#use 5th entry +np.float64,0xC061F892A8AC5FBE,0x32f807a89efd3869,1 +np.float64,0x4021D885D2DBA085,0x40bd4dc86d3e3270,1 +np.float64,0x40767AEEEE7D4FCF,0x605e22851ee2afb7,1 +np.float64,0xC0757C5D75D08C80,0x20f0751599b992a2,1 +#use 6th entry +np.float64,0x405ACF7A284C4CE3,0x499a4e0b7a27027c,1 +np.float64,0xC085A6C9E80D7AF5,0x0175914009d62ec2,1 +np.float64,0xC07E4C02F86F1DAE,0x1439269b29a9231e,1 +np.float64,0x4080D80F9691CC87,0x7088a6cdafb041de,1 +#use 7th entry +np.float64,0x407FDFD84FBA0AC1,0x6deb1ae6f9bc4767,1 +np.float64,0x40630C06A1A2213D,0x4dac7a9d51a838b7,1 +np.float64,0x40685FDB30BB8B4F,0x5183f5cc2cac9e79,1 +np.float64,0x408045A2208F77F4,0x6ee299e08e2aa2f0,1 +#use 8th entry +np.float64,0xC08104E391F5078B,0x0ed397b7cbfbd230,1 +np.float64,0xC031501CAEFAE395,0x3e6040fd1ea35085,1 +np.float64,0xC079229124F6247C,0x1babf4f923306b1e,1 +np.float64,0x407FB65F44600435,0x6db03beaf2512b8a,1 +#use 9th entry +np.float64,0xC07EDEE8E8E8A5AC,0x136536cec9cbef48,1 +np.float64,0x4072BB4086099A14,0x5af4d3c3008b56cc,1 +np.float64,0x4050442A2EC42CB4,0x45cd393bd8fad357,1 +np.float64,0xC06AC28FB3D419B4,0x2ca1b9d3437df85f,1 +#use 10th entry +np.float64,0x40567FC6F0A68076,0x480c977fd5f3122e,1 +np.float64,0x40620A2F7EDA59BB,0x4cf278e96f4ce4d7,1 +np.float64,0xC085044707CD557C,0x034aad6c968a045a,1 +np.float64,0xC07374EA5AC516AA,0x23dd6afdc03e83d5,1 +#use 11th entry +np.float64,0x4073CC95332619C1,0x5c804b1498bbaa54,1 +np.float64,0xC0799FEBBE257F31,0x1af6a954c43b87d2,1 +np.float64,0x408159F19EA424F6,0x7200858efcbfc84d,1 +np.float64,0x404A81F6F24C0792,0x44b664a07ce5bbfa,1 +#use 12th entry +np.float64,0x40295FF1EFB9A741,0x4113c0e74c52d7b0,1 +np.float64,0x4073975F4CC411DA,0x5c32be40b4fec2c1,1 +np.float64,0x406E9DE52E82A77E,0x56049c9a3f1ae089,1 +np.float64,0x40748C2F52560ED9,0x5d93bc14fd4cd23b,1 +#use 13th entry +np.float64,0x4062A553CDC4D04C,0x4d6266bfde301318,1 +np.float64,0xC079EC1D63598AB7,0x1a88cb184dab224c,1 +np.float64,0xC0725C1CB3167427,0x25725b46f8a081f6,1 +np.float64,0x407888771D9B45F9,0x6353b1ec6bd7ce80,1 +#use 14th entry +np.float64,0xC082CBA03AA89807,0x09b383723831ce56,1 +np.float64,0xC083A8961BB67DD7,0x0735b118d5275552,1 +np.float64,0xC076BC6ECA12E7E3,0x1f2222679eaef615,1 +np.float64,0xC072752503AA1A5B,0x254eb832242c77e1,1 +#use 15th entry +np.float64,0xC058800792125DEC,0x371882372a0b48d4,1 +np.float64,0x4082909FD863E81C,0x7580d5f386920142,1 +np.float64,0xC071616F8FB534F9,0x26dbe20ef64a412b,1 +np.float64,0x406D1AB571CAA747,0x54ee0d55cb38ac20,1 +#use 16th entry +np.float64,0x406956428B7DAD09,0x52358682c271237f,1 +np.float64,0xC07EFC2D9D17B621,0x133b3e77c27a4d45,1 +np.float64,0xC08469BAC5BA3CCA,0x050863e5f42cc52f,1 +np.float64,0x407189D9626386A5,0x593cb1c0b3b5c1d3,1 +#use 17th entry +np.float64,0x4077E652E3DEB8C6,0x6269a10dcbd3c752,1 +np.float64,0x407674C97DB06878,0x605485dcc2426ec2,1 +np.float64,0xC07CE9969CF4268D,0x16386cf8996669f2,1 +np.float64,0x40780EE32D5847C4,0x62a436bd1abe108d,1 +#use 18th entry +np.float64,0x4076C3AA5E1E8DA1,0x60c62f56a5e72e24,1 +np.float64,0xC0730AFC7239B9BE,0x24758ead095cec1e,1 +np.float64,0xC085CC2B9C420DDB,0x0109cdaa2e5694c1,1 +np.float64,0x406D0765CB6D7AA4,0x54e06f8dd91bd945,1 +#use 19th entry +np.float64,0xC082D011F3B495E7,0x09a6647661d279c2,1 +np.float64,0xC072826AF8F6AFBC,0x253acd3cd224507e,1 +np.float64,0x404EB9C4810CEA09,0x457933dbf07e8133,1 +np.float64,0x408284FBC97C58CE,0x755f6eb234aa4b98,1 +#use 20th entry +np.float64,0x40856008CF6EDC63,0x7d9c0b3c03f4f73c,1 +np.float64,0xC077CB2E9F013B17,0x1d9b3d3a166a55db,1 +np.float64,0xC0479CA3C20AD057,0x3bad40e081555b99,1 +np.float64,0x40844CD31107332A,0x7a821d70aea478e2,1 +#use 21th entry +np.float64,0xC07C8FCC0BFCC844,0x16ba1cc8c539d19b,1 +np.float64,0xC085C4E9A3ABA488,0x011ff675ba1a2217,1 +np.float64,0x4074D538B32966E5,0x5dfd9d78043c6ad9,1 +np.float64,0xC0630CA16902AD46,0x3231a446074cede6,1 +#use 22th entry +np.float64,0xC06C826733D7D0B7,0x2b5f1078314d41e1,1 +np.float64,0xC0520DF55B2B907F,0x396c13a6ce8e833e,1 +np.float64,0xC080712072B0F437,0x107eae02d11d98ea,1 +np.float64,0x40528A6150E19EFB,0x469fdabda02228c5,1 +#use 23th entry +np.float64,0xC07B1D74B6586451,0x18d1253883ae3b48,1 +np.float64,0x4045AFD7867DAEC0,0x43d7d634fc4c5d98,1 +np.float64,0xC07A08B91F9ED3E2,0x1a60973e6397fc37,1 +np.float64,0x407B3ECF0AE21C8C,0x673e03e9d98d7235,1 +#use 24th entry +np.float64,0xC078AEB6F30CEABF,0x1c530b93ab54a1b3,1 +np.float64,0x4084495006A41672,0x7a775b6dc7e63064,1 +np.float64,0x40830B1C0EBF95DD,0x76e1e6eed77cfb89,1 +np.float64,0x407D93E8F33D8470,0x6a9adbc9e1e4f1e5,1 +#use 25th entry +np.float64,0x4066B11A09EFD9E8,0x504dd528065c28a7,1 +np.float64,0x408545823723AEEB,0x7d504a9b1844f594,1 +np.float64,0xC068C711F2CA3362,0x2e104f3496ea118e,1 +np.float64,0x407F317FCC3CA873,0x6cf0732c9948ebf4,1 +#use 26th entry +np.float64,0x407AFB3EBA2ED50F,0x66dc28a129c868d5,1 +np.float64,0xC075377037708ADE,0x21531a329f3d793e,1 +np.float64,0xC07C30066A1F3246,0x174448baa16ded2b,1 +np.float64,0xC06689A75DE2ABD3,0x2fad70662fae230b,1 +#use 27th entry +np.float64,0x4081514E9FCCF1E0,0x71e673b9efd15f44,1 +np.float64,0xC0762C710AF68460,0x1ff1ed7d8947fe43,1 +np.float64,0xC0468102FF70D9C4,0x3be0c3a8ff3419a3,1 +np.float64,0xC07EA4CEEF02A83E,0x13b908f085102c61,1 +#use 28th entry +np.float64,0xC06290B04AE823C4,0x328a83da3c2e3351,1 +np.float64,0xC0770EB1D1C395FB,0x1eab281c1f1db5fe,1 +np.float64,0xC06F5D4D838A5BAE,0x29500ea32fb474ea,1 +np.float64,0x40723B3133B54C5D,0x5a3c82c7c3a2b848,1 +#use 29th entry +np.float64,0x4085E6454CE3B4AA,0x7f20319b9638d06a,1 +np.float64,0x408389F2A0585D4B,0x7850667c58aab3d0,1 +np.float64,0xC0382798F9C8AE69,0x3dc1c79fe8739d6d,1 +np.float64,0xC08299D827608418,0x0a4335f76cdbaeb5,1 +#use 30th entry +np.float64,0xC06F3DED43301BF1,0x2965670ae46750a8,1 +np.float64,0xC070CAF6BDD577D9,0x27b4aa4ffdd29981,1 +np.float64,0x4078529AD4B2D9F2,0x6305c12755d5e0a6,1 +np.float64,0xC055B14E75A31B96,0x381c2eda6d111e5d,1 +#use 31th entry +np.float64,0x407B13EE414FA931,0x6700772c7544564d,1 +np.float64,0x407EAFDE9DE3EC54,0x6c346a0e49724a3c,1 +np.float64,0xC08362F398B9530D,0x07ffeddbadf980cb,1 +np.float64,0x407E865CDD9EEB86,0x6bf866cac5e0d126,1 +#use 32th entry +np.float64,0x407FB62DBC794C86,0x6db009f708ac62cb,1 +np.float64,0xC063D0BAA68CDDDE,0x31a3b2a51ce50430,1 +np.float64,0xC05E7706A2231394,0x34f24bead6fab5c9,1 +np.float64,0x4083E3A06FDE444E,0x79527b7a386d1937,1 diff --git a/python/numpy/_core/tests/data/umath-validation-set-exp2.csv b/python/numpy/_core/tests/data/umath-validation-set-exp2.csv new file mode 100644 index 000000000..4e0a63e8e --- /dev/null +++ b/python/numpy/_core/tests/data/umath-validation-set-exp2.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xbdfe94b0,0x3f6adda6,2 +np.float32,0x3f20f8f8,0x3fc5ec69,2 +np.float32,0x7040b5,0x3f800000,2 +np.float32,0x30ec5,0x3f800000,2 +np.float32,0x3eb63070,0x3fa3ce29,2 +np.float32,0xff4dda3d,0x0,2 +np.float32,0x805b832f,0x3f800000,2 +np.float32,0x3e883fb7,0x3f99ed8c,2 +np.float32,0x3f14d71f,0x3fbf8708,2 +np.float32,0xff7b1e55,0x0,2 +np.float32,0xbf691ac6,0x3f082fa2,2 +np.float32,0x7ee3e6ab,0x7f800000,2 +np.float32,0xbec6e2b4,0x3f439248,2 +np.float32,0xbf5f5ec2,0x3f0bd2c0,2 +np.float32,0x8025cc2c,0x3f800000,2 +np.float32,0x7e0d7672,0x7f800000,2 +np.float32,0xff4bbc5c,0x0,2 +np.float32,0xbd94fb30,0x3f73696b,2 +np.float32,0x6cc079,0x3f800000,2 +np.float32,0x803cf080,0x3f800000,2 +np.float32,0x71d418,0x3f800000,2 +np.float32,0xbf24a442,0x3f23ec1e,2 +np.float32,0xbe6c9510,0x3f5a1e1d,2 +np.float32,0xbe8fb284,0x3f52be38,2 +np.float32,0x7ea64754,0x7f800000,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0x80620cfd,0x3f800000,2 +np.float32,0x3f3e20e8,0x3fd62e72,2 +np.float32,0x3f384600,0x3fd2d00e,2 +np.float32,0xff362150,0x0,2 +np.float32,0xbf349fa8,0x3f1cfaef,2 +np.float32,0xbf776cf2,0x3f0301a6,2 +np.float32,0x8021fc60,0x3f800000,2 +np.float32,0xbdb75280,0x3f70995c,2 +np.float32,0x7e9363a6,0x7f800000,2 +np.float32,0x7e728422,0x7f800000,2 +np.float32,0xfe91edc2,0x0,2 +np.float32,0x3f5f438c,0x3fea491d,2 +np.float32,0x3f2afae9,0x3fcb5c1f,2 +np.float32,0xbef8e766,0x3f36c448,2 +np.float32,0xba522c00,0x3f7fdb97,2 +np.float32,0xff18ee8c,0x0,2 +np.float32,0xbee8c5f4,0x3f3acd44,2 +np.float32,0x3e790448,0x3f97802c,2 +np.float32,0x3e8c9541,0x3f9ad571,2 +np.float32,0xbf03fa9f,0x3f331460,2 +np.float32,0x801ee053,0x3f800000,2 +np.float32,0xbf773230,0x3f03167f,2 +np.float32,0x356fd9,0x3f800000,2 +np.float32,0x8009cd88,0x3f800000,2 +np.float32,0x7f2bac51,0x7f800000,2 +np.float32,0x4d9eeb,0x3f800000,2 +np.float32,0x3133,0x3f800000,2 +np.float32,0x7f4290e0,0x7f800000,2 +np.float32,0xbf5e6523,0x3f0c3161,2 +np.float32,0x3f19182e,0x3fc1bf10,2 +np.float32,0x7e1248bb,0x7f800000,2 +np.float32,0xff5f7aae,0x0,2 +np.float32,0x7e8557b5,0x7f800000,2 +np.float32,0x26fc7f,0x3f800000,2 +np.float32,0x80397d61,0x3f800000,2 +np.float32,0x3cb1825d,0x3f81efe0,2 +np.float32,0x3ed808d0,0x3fab7c45,2 +np.float32,0xbf6f668a,0x3f05e259,2 +np.float32,0x3e3c7802,0x3f916abd,2 +np.float32,0xbd5ac5a0,0x3f76b21b,2 +np.float32,0x805aa6c9,0x3f800000,2 +np.float32,0xbe4d6f68,0x3f5ec3e1,2 +np.float32,0x3f3108b2,0x3fceb87f,2 +np.float32,0x3ec385cc,0x3fa6c9fb,2 +np.float32,0xbe9fc1ce,0x3f4e35e8,2 +np.float32,0x43b68,0x3f800000,2 +np.float32,0x3ef0cdcc,0x3fb15557,2 +np.float32,0x3e3f729b,0x3f91b5e1,2 +np.float32,0x7f52a4df,0x7f800000,2 +np.float32,0xbf56da96,0x3f0f15b9,2 +np.float32,0xbf161d2b,0x3f2a7faf,2 +np.float32,0x3e8df763,0x3f9b1fbe,2 +np.float32,0xff4f0780,0x0,2 +np.float32,0x8048f594,0x3f800000,2 +np.float32,0x3e62bb1d,0x3f953b7e,2 +np.float32,0xfe58e764,0x0,2 +np.float32,0x3dd2c922,0x3f897718,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0xff07b4b2,0x0,2 +np.float32,0x7f6231a0,0x7f800000,2 +np.float32,0xb8d1d,0x3f800000,2 +np.float32,0x3ee01d24,0x3fad5f16,2 +np.float32,0xbf43f59f,0x3f169869,2 +np.float32,0x801f5257,0x3f800000,2 +np.float32,0x803c15d8,0x3f800000,2 +np.float32,0x3f171a08,0x3fc0b42a,2 +np.float32,0x127aef,0x3f800000,2 +np.float32,0xfd1c6,0x3f800000,2 +np.float32,0x3f1ed13e,0x3fc4c59a,2 +np.float32,0x57fd4f,0x3f800000,2 +np.float32,0x6e8c61,0x3f800000,2 +np.float32,0x804019ab,0x3f800000,2 +np.float32,0x3ef4e5c6,0x3fb251a1,2 +np.float32,0x5044c3,0x3f800000,2 +np.float32,0x3f04460f,0x3fb7204b,2 +np.float32,0x7e326b47,0x7f800000,2 +np.float32,0x800a7e4c,0x3f800000,2 +np.float32,0xbf47ec82,0x3f14fccc,2 +np.float32,0xbedb1b3e,0x3f3e4a4d,2 +np.float32,0x3f741d86,0x3ff7e4b0,2 +np.float32,0xbe249d20,0x3f6501a6,2 +np.float32,0xbf2ea152,0x3f1f8c68,2 +np.float32,0x3ec6dbcc,0x3fa78b3f,2 +np.float32,0x7ebd9bb4,0x7f800000,2 +np.float32,0x3f61b574,0x3febd77a,2 +np.float32,0x3f3dfb2b,0x3fd61891,2 +np.float32,0x3c7d95,0x3f800000,2 +np.float32,0x8071e840,0x3f800000,2 +np.float32,0x15c6fe,0x3f800000,2 +np.float32,0xbf096601,0x3f307893,2 +np.float32,0x7f5c2ef9,0x7f800000,2 +np.float32,0xbe79f750,0x3f582689,2 +np.float32,0x1eb692,0x3f800000,2 +np.float32,0xbd8024f0,0x3f75226d,2 +np.float32,0xbf5a8be8,0x3f0da950,2 +np.float32,0xbf4d28f3,0x3f12e3e1,2 +np.float32,0x7f800000,0x7f800000,2 +np.float32,0xfea8a758,0x0,2 +np.float32,0x8075d2cf,0x3f800000,2 +np.float32,0xfd99af58,0x0,2 +np.float32,0x9e6a,0x3f800000,2 +np.float32,0x2fa19f,0x3f800000,2 +np.float32,0x3e9f4206,0x3f9ecc56,2 +np.float32,0xbee0b666,0x3f3cd9fc,2 +np.float32,0xbec558c4,0x3f43fab1,2 +np.float32,0x7e9a77df,0x7f800000,2 +np.float32,0xff3a9694,0x0,2 +np.float32,0x3f3b3708,0x3fd47f9a,2 +np.float32,0x807cd6d4,0x3f800000,2 +np.float32,0x804aa422,0x3f800000,2 +np.float32,0xfead7a70,0x0,2 +np.float32,0x3f08c610,0x3fb95efe,2 +np.float32,0xff390126,0x0,2 +np.float32,0x5d2d47,0x3f800000,2 +np.float32,0x8006849c,0x3f800000,2 +np.float32,0x654f6e,0x3f800000,2 +np.float32,0xff478a16,0x0,2 +np.float32,0x3f480b0c,0x3fdc024c,2 +np.float32,0xbc3b96c0,0x3f7df9f4,2 +np.float32,0xbcc96460,0x3f7bacb5,2 +np.float32,0x7f349f30,0x7f800000,2 +np.float32,0xbe08fa98,0x3f6954a1,2 +np.float32,0x4f3a13,0x3f800000,2 +np.float32,0x7f6a5ab4,0x7f800000,2 +np.float32,0x7eb85247,0x7f800000,2 +np.float32,0xbf287246,0x3f223e08,2 +np.float32,0x801584d0,0x3f800000,2 +np.float32,0x7ec25371,0x7f800000,2 +np.float32,0x3f002165,0x3fb51552,2 +np.float32,0x3e1108a8,0x3f8d3429,2 +np.float32,0x4f0f88,0x3f800000,2 +np.float32,0x7f67c1ce,0x7f800000,2 +np.float32,0xbf4348f8,0x3f16dedf,2 +np.float32,0xbe292b64,0x3f644d24,2 +np.float32,0xbf2bfa36,0x3f20b2d6,2 +np.float32,0xbf2a6e58,0x3f215f71,2 +np.float32,0x3e97d5d3,0x3f9d35df,2 +np.float32,0x31f597,0x3f800000,2 +np.float32,0x100544,0x3f800000,2 +np.float32,0x10a197,0x3f800000,2 +np.float32,0x3f44df50,0x3fda20d2,2 +np.float32,0x59916d,0x3f800000,2 +np.float32,0x707472,0x3f800000,2 +np.float32,0x8054194e,0x3f800000,2 +np.float32,0x80627b01,0x3f800000,2 +np.float32,0x7f4d5a5b,0x7f800000,2 +np.float32,0xbcecad00,0x3f7aeca5,2 +np.float32,0xff69c541,0x0,2 +np.float32,0xbe164e20,0x3f673c3a,2 +np.float32,0x3dd321de,0x3f897b39,2 +np.float32,0x3c9c4900,0x3f81b431,2 +np.float32,0x7f0efae3,0x7f800000,2 +np.float32,0xbf1b3ee6,0x3f282567,2 +np.float32,0x3ee858ac,0x3faf5083,2 +np.float32,0x3f0e6a39,0x3fbc3965,2 +np.float32,0x7f0c06d8,0x7f800000,2 +np.float32,0x801dd236,0x3f800000,2 +np.float32,0x564245,0x3f800000,2 +np.float32,0x7e99d3ad,0x7f800000,2 +np.float32,0xff3b0164,0x0,2 +np.float32,0x3f386f18,0x3fd2e785,2 +np.float32,0x7f603c39,0x7f800000,2 +np.float32,0x3cbd9b00,0x3f8211f0,2 +np.float32,0x2178e2,0x3f800000,2 +np.float32,0x5db226,0x3f800000,2 +np.float32,0xfec78d62,0x0,2 +np.float32,0x7f40bc1e,0x7f800000,2 +np.float32,0x80325064,0x3f800000,2 +np.float32,0x3f6068dc,0x3feb0377,2 +np.float32,0xfe8b95c6,0x0,2 +np.float32,0xbe496894,0x3f5f5f87,2 +np.float32,0xbf18722a,0x3f296cf4,2 +np.float32,0x332d0e,0x3f800000,2 +np.float32,0x3f6329dc,0x3fecc5c0,2 +np.float32,0x807d1802,0x3f800000,2 +np.float32,0x3e8afcee,0x3f9a7ff1,2 +np.float32,0x26a0a7,0x3f800000,2 +np.float32,0x7f13085d,0x7f800000,2 +np.float32,0x68d547,0x3f800000,2 +np.float32,0x7e9b04ae,0x7f800000,2 +np.float32,0x3f3ecdfe,0x3fd692ea,2 +np.float32,0x805256f4,0x3f800000,2 +np.float32,0x3f312dc8,0x3fcecd42,2 +np.float32,0x23ca15,0x3f800000,2 +np.float32,0x3f53c455,0x3fe31ad6,2 +np.float32,0xbf21186c,0x3f2580fd,2 +np.float32,0x803b9bb1,0x3f800000,2 +np.float32,0xff6ae1fc,0x0,2 +np.float32,0x2103cf,0x3f800000,2 +np.float32,0xbedcec6c,0x3f3dd29d,2 +np.float32,0x7f520afa,0x7f800000,2 +np.float32,0x7e8b44f2,0x7f800000,2 +np.float32,0xfef7f6ce,0x0,2 +np.float32,0xbd5e7c30,0x3f768a6f,2 +np.float32,0xfeb36848,0x0,2 +np.float32,0xff49effb,0x0,2 +np.float32,0xbec207c0,0x3f44dc74,2 +np.float32,0x3e91147f,0x3f9bc77f,2 +np.float32,0xfe784cd4,0x0,2 +np.float32,0xfd1a7250,0x0,2 +np.float32,0xff3b3f48,0x0,2 +np.float32,0x3f685db5,0x3ff0219f,2 +np.float32,0x3f370976,0x3fd21bae,2 +np.float32,0xfed4cc20,0x0,2 +np.float32,0xbf41e337,0x3f17714a,2 +np.float32,0xbf4e8638,0x3f12593a,2 +np.float32,0x3edaf0f1,0x3fac295e,2 +np.float32,0x803cbb4f,0x3f800000,2 +np.float32,0x7f492043,0x7f800000,2 +np.float32,0x2cabcf,0x3f800000,2 +np.float32,0x17f8ac,0x3f800000,2 +np.float32,0x3e846478,0x3f99205a,2 +np.float32,0x76948f,0x3f800000,2 +np.float32,0x1,0x3f800000,2 +np.float32,0x7ea6419e,0x7f800000,2 +np.float32,0xa5315,0x3f800000,2 +np.float32,0xff3a8e32,0x0,2 +np.float32,0xbe5714e8,0x3f5d50b7,2 +np.float32,0xfeadf960,0x0,2 +np.float32,0x3ebbd1a8,0x3fa50efc,2 +np.float32,0x7f31dce7,0x7f800000,2 +np.float32,0x80314999,0x3f800000,2 +np.float32,0x8017f41b,0x3f800000,2 +np.float32,0x7ed6d051,0x7f800000,2 +np.float32,0x7f525688,0x7f800000,2 +np.float32,0x7f7fffff,0x7f800000,2 +np.float32,0x3e8b0461,0x3f9a8180,2 +np.float32,0x3d9fe46e,0x3f871e1f,2 +np.float32,0x5e6d8f,0x3f800000,2 +np.float32,0xbf09ae55,0x3f305608,2 +np.float32,0xfe7028c4,0x0,2 +np.float32,0x7f3ade56,0x7f800000,2 +np.float32,0xff4c9ef9,0x0,2 +np.float32,0x7e3199cf,0x7f800000,2 +np.float32,0x8048652f,0x3f800000,2 +np.float32,0x805e1237,0x3f800000,2 +np.float32,0x189ed8,0x3f800000,2 +np.float32,0xbea7c094,0x3f4bfd98,2 +np.float32,0xbf2f109c,0x3f1f5c5c,2 +np.float32,0xbf0e7f4c,0x3f2e0d2c,2 +np.float32,0x8005981f,0x3f800000,2 +np.float32,0xbf762005,0x3f0377f3,2 +np.float32,0xbf0f60ab,0x3f2da317,2 +np.float32,0xbf4aa3e7,0x3f13e54e,2 +np.float32,0xbf348fd2,0x3f1d01aa,2 +np.float32,0x3e530b50,0x3f93a7fb,2 +np.float32,0xbf0b05a4,0x3f2fb26a,2 +np.float32,0x3eea416c,0x3fafc4aa,2 +np.float32,0x805ad04d,0x3f800000,2 +np.float32,0xbf6328d8,0x3f0a655e,2 +np.float32,0x3f7347b9,0x3ff75558,2 +np.float32,0xfda3ca68,0x0,2 +np.float32,0x80497d21,0x3f800000,2 +np.float32,0x3e740452,0x3f96fd22,2 +np.float32,0x3e528e57,0x3f939b7e,2 +np.float32,0x3e9e19fa,0x3f9e8cbd,2 +np.float32,0x8078060b,0x3f800000,2 +np.float32,0x3f3fea7a,0x3fd73872,2 +np.float32,0xfcfa30a0,0x0,2 +np.float32,0x7f4eb4bf,0x7f800000,2 +np.float32,0x3f712618,0x3ff5e900,2 +np.float32,0xbf668f0e,0x3f0920c6,2 +np.float32,0x3f3001e9,0x3fce259d,2 +np.float32,0xbe9b6fac,0x3f4f6b9c,2 +np.float32,0xbf61fcf3,0x3f0ad5ec,2 +np.float32,0xff08a55c,0x0,2 +np.float32,0x3e805014,0x3f984872,2 +np.float32,0x6ce04c,0x3f800000,2 +np.float32,0x7f7cbc07,0x7f800000,2 +np.float32,0x3c87dc,0x3f800000,2 +np.float32,0x3f2ee498,0x3fcd869a,2 +np.float32,0x4b1116,0x3f800000,2 +np.float32,0x3d382d06,0x3f840d5f,2 +np.float32,0xff7de21e,0x0,2 +np.float32,0x3f2f1d6d,0x3fcda63c,2 +np.float32,0xbf1c1618,0x3f27c38a,2 +np.float32,0xff4264b1,0x0,2 +np.float32,0x8026e5e7,0x3f800000,2 +np.float32,0xbe6fa180,0x3f59ab02,2 +np.float32,0xbe923c02,0x3f52053b,2 +np.float32,0xff3aa453,0x0,2 +np.float32,0x3f77a7ac,0x3ffa47d0,2 +np.float32,0xbed15f36,0x3f40d08a,2 +np.float32,0xa62d,0x3f800000,2 +np.float32,0xbf342038,0x3f1d3123,2 +np.float32,0x7f2f7f80,0x7f800000,2 +np.float32,0x7f2b6fc1,0x7f800000,2 +np.float32,0xff323540,0x0,2 +np.float32,0x3f1a2b6e,0x3fc24faa,2 +np.float32,0x800cc1d2,0x3f800000,2 +np.float32,0xff38fa01,0x0,2 +np.float32,0x80800000,0x3f800000,2 +np.float32,0xbf3d22e0,0x3f196745,2 +np.float32,0x7f40fd62,0x7f800000,2 +np.float32,0x7e1785c7,0x7f800000,2 +np.float32,0x807408c4,0x3f800000,2 +np.float32,0xbf300192,0x3f1ef485,2 +np.float32,0x351e3d,0x3f800000,2 +np.float32,0x7f5ab736,0x7f800000,2 +np.float32,0x2f1696,0x3f800000,2 +np.float32,0x806ac5d7,0x3f800000,2 +np.float32,0x42ec59,0x3f800000,2 +np.float32,0x7f79f52d,0x7f800000,2 +np.float32,0x44ad28,0x3f800000,2 +np.float32,0xbf49dc9c,0x3f143532,2 +np.float32,0x3f6c1f1f,0x3ff295e7,2 +np.float32,0x1589b3,0x3f800000,2 +np.float32,0x3f49b44e,0x3fdd0031,2 +np.float32,0x7f5942c9,0x7f800000,2 +np.float32,0x3f2dab28,0x3fccd877,2 +np.float32,0xff7fffff,0x0,2 +np.float32,0x80578eb2,0x3f800000,2 +np.float32,0x3f39ba67,0x3fd3a50b,2 +np.float32,0x8020340d,0x3f800000,2 +np.float32,0xbf6025b2,0x3f0b8783,2 +np.float32,0x8015ccfe,0x3f800000,2 +np.float32,0x3f6b9762,0x3ff23cd0,2 +np.float32,0xfeeb0c86,0x0,2 +np.float32,0x802779bc,0x3f800000,2 +np.float32,0xbf32bf64,0x3f1dc796,2 +np.float32,0xbf577eb6,0x3f0ed631,2 +np.float32,0x0,0x3f800000,2 +np.float32,0xfe99de6c,0x0,2 +np.float32,0x7a4e53,0x3f800000,2 +np.float32,0x1a15d3,0x3f800000,2 +np.float32,0x8035fe16,0x3f800000,2 +np.float32,0x3e845784,0x3f991dab,2 +np.float32,0x43d688,0x3f800000,2 +np.float32,0xbd447cc0,0x3f77a0b7,2 +np.float32,0x3f83fa,0x3f800000,2 +np.float32,0x3f141df2,0x3fbf2719,2 +np.float32,0x805c586a,0x3f800000,2 +np.float32,0x14c47e,0x3f800000,2 +np.float32,0x3d3bed00,0x3f8422d4,2 +np.float32,0x7f6f4ecd,0x7f800000,2 +np.float32,0x3f0a5e5a,0x3fba2c5c,2 +np.float32,0x523ecf,0x3f800000,2 +np.float32,0xbef4a6e8,0x3f37d262,2 +np.float32,0xff54eb58,0x0,2 +np.float32,0xff3fc875,0x0,2 +np.float32,0x8067c392,0x3f800000,2 +np.float32,0xfedae910,0x0,2 +np.float32,0x80595979,0x3f800000,2 +np.float32,0x3ee87d1d,0x3faf5929,2 +np.float32,0x7f5bad33,0x7f800000,2 +np.float32,0xbf45b868,0x3f15e109,2 +np.float32,0x3ef2277d,0x3fb1a868,2 +np.float32,0x3ca5a950,0x3f81ce8c,2 +np.float32,0x3e70f4e6,0x3f96ad25,2 +np.float32,0xfe3515bc,0x0,2 +np.float32,0xfe4af088,0x0,2 +np.float32,0xff3c78b2,0x0,2 +np.float32,0x7f50f51a,0x7f800000,2 +np.float32,0x3e3a232a,0x3f913009,2 +np.float32,0x7dfec6ff,0x7f800000,2 +np.float32,0x3e1bbaec,0x3f8e3ad6,2 +np.float32,0xbd658fa0,0x3f763ee7,2 +np.float32,0xfe958684,0x0,2 +np.float32,0x503670,0x3f800000,2 +np.float32,0x3f800000,0x40000000,2 +np.float32,0x1bbec6,0x3f800000,2 +np.float32,0xbea7bb7c,0x3f4bff00,2 +np.float32,0xff3a24a2,0x0,2 +np.float32,0xbf416240,0x3f17a635,2 +np.float32,0xbf800000,0x3f000000,2 +np.float32,0xff0c965c,0x0,2 +np.float32,0x80000000,0x3f800000,2 +np.float32,0xbec2c69a,0x3f44a99e,2 +np.float32,0x5b68d4,0x3f800000,2 +np.float32,0xb9a93000,0x3f7ff158,2 +np.float32,0x3d5a0dd8,0x3f84cfbc,2 +np.float32,0xbeaf7a28,0x3f49de4e,2 +np.float32,0x3ee83555,0x3faf4820,2 +np.float32,0xfd320330,0x0,2 +np.float32,0xe1af2,0x3f800000,2 +np.float32,0x7cf28caf,0x7f800000,2 +np.float32,0x80781009,0x3f800000,2 +np.float32,0xbf1e0baf,0x3f26e04d,2 +np.float32,0x7edb05b1,0x7f800000,2 +np.float32,0x3de004,0x3f800000,2 +np.float32,0xff436af6,0x0,2 +np.float32,0x802a9408,0x3f800000,2 +np.float32,0x7ed82205,0x7f800000,2 +np.float32,0x3e3f8212,0x3f91b767,2 +np.float32,0x16a2b2,0x3f800000,2 +np.float32,0xff1e5af3,0x0,2 +np.float32,0xbf1c860c,0x3f2790b7,2 +np.float32,0x3f3bc5da,0x3fd4d1d6,2 +np.float32,0x7f5f7085,0x7f800000,2 +np.float32,0x7f68e409,0x7f800000,2 +np.float32,0x7f4b3388,0x7f800000,2 +np.float32,0x7ecaf440,0x7f800000,2 +np.float32,0x80078785,0x3f800000,2 +np.float32,0x3ebd800d,0x3fa56f45,2 +np.float32,0xbe39a140,0x3f61c58e,2 +np.float32,0x803b587e,0x3f800000,2 +np.float32,0xbeaaa418,0x3f4b31c4,2 +np.float32,0xff7e2b9f,0x0,2 +np.float32,0xff5180a3,0x0,2 +np.float32,0xbf291394,0x3f21f73c,2 +np.float32,0x7f7b9698,0x7f800000,2 +np.float32,0x4218da,0x3f800000,2 +np.float32,0x7f135262,0x7f800000,2 +np.float32,0x804c10e8,0x3f800000,2 +np.float32,0xbf1c2a54,0x3f27ba5a,2 +np.float32,0x7f41fd32,0x7f800000,2 +np.float32,0x3e5cc464,0x3f94a195,2 +np.float32,0xff7a2fa7,0x0,2 +np.float32,0x3e05dc30,0x3f8c23c9,2 +np.float32,0x7f206d99,0x7f800000,2 +np.float32,0xbe9ae520,0x3f4f9287,2 +np.float32,0xfe4f4d58,0x0,2 +np.float32,0xbf44db42,0x3f163ae3,2 +np.float32,0x3f65ac48,0x3fee6300,2 +np.float32,0x3ebfaf36,0x3fa5ecb0,2 +np.float32,0x3f466719,0x3fdb08b0,2 +np.float32,0x80000001,0x3f800000,2 +np.float32,0xff4b3c7b,0x0,2 +np.float32,0x3df44374,0x3f8b0819,2 +np.float32,0xfea4b540,0x0,2 +np.float32,0x7f358e3d,0x7f800000,2 +np.float32,0x801f5e63,0x3f800000,2 +np.float32,0x804ae77e,0x3f800000,2 +np.float32,0xdbb5,0x3f800000,2 +np.float32,0x7f0a7e3b,0x7f800000,2 +np.float32,0xbe4152e4,0x3f609953,2 +np.float32,0x4b9579,0x3f800000,2 +np.float32,0x3ece0bd4,0x3fa92ea5,2 +np.float32,0x7e499d9a,0x7f800000,2 +np.float32,0x80637d8a,0x3f800000,2 +np.float32,0x3e50a425,0x3f936a8b,2 +np.float32,0xbf0e8cb0,0x3f2e06dd,2 +np.float32,0x802763e2,0x3f800000,2 +np.float32,0xff73041b,0x0,2 +np.float32,0xfea466da,0x0,2 +np.float32,0x80064c73,0x3f800000,2 +np.float32,0xbef29222,0x3f385728,2 +np.float32,0x8029c215,0x3f800000,2 +np.float32,0xbd3994e0,0x3f7815d1,2 +np.float32,0xbe6ac9e4,0x3f5a61f3,2 +np.float32,0x804b58b0,0x3f800000,2 +np.float32,0xbdb83be0,0x3f70865c,2 +np.float32,0x7ee18da2,0x7f800000,2 +np.float32,0xfd4ca010,0x0,2 +np.float32,0x807c668b,0x3f800000,2 +np.float32,0xbd40ed90,0x3f77c6e9,2 +np.float32,0x7efc6881,0x7f800000,2 +np.float32,0xfe633bfc,0x0,2 +np.float32,0x803ce363,0x3f800000,2 +np.float32,0x7ecba81e,0x7f800000,2 +np.float32,0xfdcb2378,0x0,2 +np.float32,0xbebc5524,0x3f4662b2,2 +np.float32,0xfaa30000,0x0,2 +np.float32,0x805d451b,0x3f800000,2 +np.float32,0xbee85600,0x3f3ae996,2 +np.float32,0xfefb0a54,0x0,2 +np.float32,0xbdfc6690,0x3f6b0a08,2 +np.float32,0x58a57,0x3f800000,2 +np.float32,0x3b41b7,0x3f800000,2 +np.float32,0x7c99812d,0x7f800000,2 +np.float32,0xbd3ae740,0x3f78079d,2 +np.float32,0xbf4a48a7,0x3f1409dd,2 +np.float32,0xfdeaad58,0x0,2 +np.float32,0xbe9aa65a,0x3f4fa42c,2 +np.float32,0x3f79d78c,0x3ffbc458,2 +np.float32,0x805e7389,0x3f800000,2 +np.float32,0x7ebb3612,0x7f800000,2 +np.float32,0x2e27dc,0x3f800000,2 +np.float32,0x80726dec,0x3f800000,2 +np.float32,0xfe8fb738,0x0,2 +np.float32,0xff1ff3bd,0x0,2 +np.float32,0x7f5264a2,0x7f800000,2 +np.float32,0x3f5a6893,0x3fe739ca,2 +np.float32,0xbec4029c,0x3f44558d,2 +np.float32,0xbef65cfa,0x3f37657e,2 +np.float32,0x63aba1,0x3f800000,2 +np.float32,0xfbb6e200,0x0,2 +np.float32,0xbf3466fc,0x3f1d1307,2 +np.float32,0x3f258844,0x3fc861d7,2 +np.float32,0xbf5f29a7,0x3f0be6dc,2 +np.float32,0x802b51cd,0x3f800000,2 +np.float32,0xbe9094dc,0x3f527dae,2 +np.float32,0xfec2e68c,0x0,2 +np.float32,0x807b38bd,0x3f800000,2 +np.float32,0xbf594662,0x3f0e2663,2 +np.float32,0x7cbcf747,0x7f800000,2 +np.float32,0xbe4b88f0,0x3f5f0d47,2 +np.float32,0x3c53c4,0x3f800000,2 +np.float32,0xbe883562,0x3f54e3f7,2 +np.float32,0xbf1efaf0,0x3f267456,2 +np.float32,0x3e22cd3e,0x3f8ee98b,2 +np.float32,0x80434875,0x3f800000,2 +np.float32,0xbf000b44,0x3f34ff6e,2 +np.float32,0x7f311c3a,0x7f800000,2 +np.float32,0x802f7f3f,0x3f800000,2 +np.float32,0x805155fe,0x3f800000,2 +np.float32,0x7f5d7485,0x7f800000,2 +np.float32,0x80119197,0x3f800000,2 +np.float32,0x3f445b8b,0x3fd9d30d,2 +np.float32,0xbf638eb3,0x3f0a3f38,2 +np.float32,0x402410,0x3f800000,2 +np.float32,0xbc578a40,0x3f7dad1d,2 +np.float32,0xbeecbf8a,0x3f39cc9e,2 +np.float32,0x7f2935a4,0x7f800000,2 +np.float32,0x3f570fea,0x3fe523e2,2 +np.float32,0xbf06bffa,0x3f31bdb6,2 +np.float32,0xbf2afdfd,0x3f2120ba,2 +np.float32,0x7f76f7ab,0x7f800000,2 +np.float32,0xfee2d1e8,0x0,2 +np.float32,0x800b026d,0x3f800000,2 +np.float32,0xff0eda75,0x0,2 +np.float32,0x3d4c,0x3f800000,2 +np.float32,0xbed538a2,0x3f3fcffb,2 +np.float32,0x3f73f4f9,0x3ff7c979,2 +np.float32,0x2aa9fc,0x3f800000,2 +np.float32,0x806a45b3,0x3f800000,2 +np.float32,0xff770d35,0x0,2 +np.float32,0x7e999be3,0x7f800000,2 +np.float32,0x80741128,0x3f800000,2 +np.float32,0xff6aac34,0x0,2 +np.float32,0x470f74,0x3f800000,2 +np.float32,0xff423b7b,0x0,2 +np.float32,0x17dfdd,0x3f800000,2 +np.float32,0x7f029e12,0x7f800000,2 +np.float32,0x803fcb9d,0x3f800000,2 +np.float32,0x3f3dc3,0x3f800000,2 +np.float32,0x7f3a27bc,0x7f800000,2 +np.float32,0x3e473108,0x3f9279ec,2 +np.float32,0x7f4add5d,0x7f800000,2 +np.float32,0xfd9736e0,0x0,2 +np.float32,0x805f1df2,0x3f800000,2 +np.float32,0x6c49c1,0x3f800000,2 +np.float32,0x7ec733c7,0x7f800000,2 +np.float32,0x804c1abf,0x3f800000,2 +np.float32,0x3de2e887,0x3f8a37a5,2 +np.float32,0x3f51630a,0x3fe1a561,2 +np.float32,0x3de686a8,0x3f8a62ff,2 +np.float32,0xbedb3538,0x3f3e439c,2 +np.float32,0xbf3aa892,0x3f1a6f9e,2 +np.float32,0x7ee5fb32,0x7f800000,2 +np.float32,0x7e916c9b,0x7f800000,2 +np.float32,0x3f033f1c,0x3fb69e19,2 +np.float32,0x25324b,0x3f800000,2 +np.float32,0x3f348d1d,0x3fd0b2e2,2 +np.float32,0x3f5797e8,0x3fe57851,2 +np.float32,0xbf69c316,0x3f07f1a0,2 +np.float32,0xbe8b7fb0,0x3f53f1bf,2 +np.float32,0xbdbbc190,0x3f703d00,2 +np.float32,0xff6c4fc0,0x0,2 +np.float32,0x7f29fcbe,0x7f800000,2 +np.float32,0x3f678d19,0x3fef9a23,2 +np.float32,0x73d140,0x3f800000,2 +np.float32,0x3e25bdd2,0x3f8f326b,2 +np.float32,0xbeb775ec,0x3f47b2c6,2 +np.float32,0xff451c4d,0x0,2 +np.float32,0x8072c466,0x3f800000,2 +np.float32,0x3f65e836,0x3fee89b2,2 +np.float32,0x52ca7a,0x3f800000,2 +np.float32,0x62cfed,0x3f800000,2 +np.float32,0xbf583dd0,0x3f0e8c5c,2 +np.float32,0xbf683842,0x3f088342,2 +np.float32,0x3f1a7828,0x3fc2780c,2 +np.float32,0x800ea979,0x3f800000,2 +np.float32,0xbeb9133c,0x3f474328,2 +np.float32,0x3ef09fc7,0x3fb14a4b,2 +np.float32,0x7ebbcb75,0x7f800000,2 +np.float32,0xff316c0e,0x0,2 +np.float32,0x805b84e3,0x3f800000,2 +np.float32,0x3d6a55e0,0x3f852d8a,2 +np.float32,0x3e755788,0x3f971fd1,2 +np.float32,0x3ee7aacb,0x3faf2743,2 +np.float32,0x7f714039,0x7f800000,2 +np.float32,0xff70bad8,0x0,2 +np.float32,0xbe0b74c8,0x3f68f08c,2 +np.float32,0xbf6cb170,0x3f06de86,2 +np.float32,0x7ec1fbff,0x7f800000,2 +np.float32,0x8014b1f6,0x3f800000,2 +np.float32,0xfe8b45fe,0x0,2 +np.float32,0x6e2220,0x3f800000,2 +np.float32,0x3ed1777d,0x3fa9f7ab,2 +np.float32,0xff48e467,0x0,2 +np.float32,0xff76c5aa,0x0,2 +np.float32,0x3e9bd330,0x3f9e0fd7,2 +np.float32,0x3f17de4f,0x3fc11aae,2 +np.float32,0x7eeaa2fd,0x7f800000,2 +np.float32,0xbf572746,0x3f0ef806,2 +np.float32,0x7e235554,0x7f800000,2 +np.float32,0xfe24fc1c,0x0,2 +np.float32,0x7daf71ad,0x7f800000,2 +np.float32,0x800d4a6b,0x3f800000,2 +np.float32,0xbf6fc31d,0x3f05c0ce,2 +np.float32,0x1c4d93,0x3f800000,2 +np.float32,0x7ee9200c,0x7f800000,2 +np.float32,0x3f54b4da,0x3fe3aeec,2 +np.float32,0x2b37b1,0x3f800000,2 +np.float32,0x3f7468bd,0x3ff81731,2 +np.float32,0x3f2850ea,0x3fc9e5f4,2 +np.float32,0xbe0d47ac,0x3f68a6f9,2 +np.float32,0x314877,0x3f800000,2 +np.float32,0x802700c3,0x3f800000,2 +np.float32,0x7e2c915f,0x7f800000,2 +np.float32,0x800d0059,0x3f800000,2 +np.float32,0x3f7f3c25,0x3fff7862,2 +np.float32,0xff735d31,0x0,2 +np.float32,0xff7e339e,0x0,2 +np.float32,0xbef96cf0,0x3f36a340,2 +np.float32,0x3db6ea21,0x3f882cb2,2 +np.float32,0x67cb3d,0x3f800000,2 +np.float32,0x801f349d,0x3f800000,2 +np.float32,0x3f1390ec,0x3fbede29,2 +np.float32,0x7f13644a,0x7f800000,2 +np.float32,0x804a369b,0x3f800000,2 +np.float32,0x80262666,0x3f800000,2 +np.float32,0x7e850fbc,0x7f800000,2 +np.float32,0x18b002,0x3f800000,2 +np.float32,0x8051f1ed,0x3f800000,2 +np.float32,0x3eba48f6,0x3fa4b753,2 +np.float32,0xbf3f4130,0x3f1886a9,2 +np.float32,0xbedac006,0x3f3e61cf,2 +np.float32,0xbf097c70,0x3f306ddc,2 +np.float32,0x4aba6d,0x3f800000,2 +np.float32,0x580078,0x3f800000,2 +np.float32,0x3f64d82e,0x3fedda40,2 +np.float32,0x7f781fd6,0x7f800000,2 +np.float32,0x6aff3d,0x3f800000,2 +np.float32,0xff25e074,0x0,2 +np.float32,0x7ea9ec89,0x7f800000,2 +np.float32,0xbf63b816,0x3f0a2fbb,2 +np.float32,0x133f07,0x3f800000,2 +np.float32,0xff800000,0x0,2 +np.float32,0x8013dde7,0x3f800000,2 +np.float32,0xff770b95,0x0,2 +np.float32,0x806154e8,0x3f800000,2 +np.float32,0x3f1e7bce,0x3fc4981a,2 +np.float32,0xff262c78,0x0,2 +np.float32,0x3f59a652,0x3fe6c04c,2 +np.float32,0x7f220166,0x7f800000,2 +np.float32,0x7eb24939,0x7f800000,2 +np.float32,0xbed58bb0,0x3f3fba6a,2 +np.float32,0x3c2ad000,0x3f80eda7,2 +np.float32,0x2adb2e,0x3f800000,2 +np.float32,0xfe8b213e,0x0,2 +np.float32,0xbf2e0c1e,0x3f1fccea,2 +np.float32,0x7e1716be,0x7f800000,2 +np.float32,0x80184e73,0x3f800000,2 +np.float32,0xbf254743,0x3f23a3d5,2 +np.float32,0x8063a722,0x3f800000,2 +np.float32,0xbe50adf0,0x3f5e46c7,2 +np.float32,0x3f614158,0x3feb8d60,2 +np.float32,0x8014bbc8,0x3f800000,2 +np.float32,0x283bc7,0x3f800000,2 +np.float32,0x3ffb5c,0x3f800000,2 +np.float32,0xfe8de6bc,0x0,2 +np.float32,0xbea6e086,0x3f4c3b82,2 +np.float32,0xfee64b92,0x0,2 +np.float32,0x506c1a,0x3f800000,2 +np.float32,0xff342af8,0x0,2 +np.float32,0x6b6f4c,0x3f800000,2 +np.float32,0xfeb42b1e,0x0,2 +np.float32,0x3e49384a,0x3f92ad71,2 +np.float32,0x152d08,0x3f800000,2 +np.float32,0x804c8f09,0x3f800000,2 +np.float32,0xff5e927d,0x0,2 +np.float32,0x6374da,0x3f800000,2 +np.float32,0x3f48f011,0x3fdc8ae4,2 +np.float32,0xbf446a30,0x3f1668e8,2 +np.float32,0x3ee77073,0x3faf196e,2 +np.float32,0xff4caa40,0x0,2 +np.float32,0x7efc9363,0x7f800000,2 +np.float32,0xbf706dcc,0x3f05830d,2 +np.float32,0xfe29c7e8,0x0,2 +np.float32,0x803cfe58,0x3f800000,2 +np.float32,0x3ec34c7c,0x3fa6bd0a,2 +np.float32,0x3eb85b62,0x3fa44968,2 +np.float32,0xfda1b9d8,0x0,2 +np.float32,0x802932cd,0x3f800000,2 +np.float32,0xbf5cde78,0x3f0cc5fa,2 +np.float32,0x3f31bf44,0x3fcf1ec8,2 +np.float32,0x803a0882,0x3f800000,2 +np.float32,0x800000,0x3f800000,2 +np.float32,0x3f54110e,0x3fe34a08,2 +np.float32,0x80645ea9,0x3f800000,2 +np.float32,0xbd8c1070,0x3f7425c3,2 +np.float32,0x801a006a,0x3f800000,2 +np.float32,0x7f5d161e,0x7f800000,2 +np.float32,0x805b5df3,0x3f800000,2 +np.float32,0xbf71a7c0,0x3f0511be,2 +np.float32,0xbe9a55c0,0x3f4fbad6,2 +np.float64,0xde7e2fd9bcfc6,0x3ff0000000000000,1 +np.float64,0xbfd8cd88eb319b12,0x3fe876349efbfa2b,1 +np.float64,0x3fe4fa13ace9f428,0x3ff933fbb117d196,1 +np.float64,0x475b3d048eb68,0x3ff0000000000000,1 +np.float64,0x7fef39ed07be73d9,0x7ff0000000000000,1 +np.float64,0x80026b84d904d70a,0x3ff0000000000000,1 +np.float64,0xebd60627d7ac1,0x3ff0000000000000,1 +np.float64,0xbfd7cbefdbaf97e0,0x3fe8bad30f6cf8e1,1 +np.float64,0x7fc17c605a22f8c0,0x7ff0000000000000,1 +np.float64,0x8cdac05119b58,0x3ff0000000000000,1 +np.float64,0x3fc45cd60a28b9ac,0x3ff1dd8028ec3f41,1 +np.float64,0x7fef4fce137e9f9b,0x7ff0000000000000,1 +np.float64,0xe5a2b819cb457,0x3ff0000000000000,1 +np.float64,0xe3bcfd4dc77a0,0x3ff0000000000000,1 +np.float64,0x68f0b670d1e17,0x3ff0000000000000,1 +np.float64,0xae69a6455cd35,0x3ff0000000000000,1 +np.float64,0xffe7007a0c6e00f4,0x0,1 +np.float64,0x59fc57a8b3f8c,0x3ff0000000000000,1 +np.float64,0xbfeee429c0bdc854,0x3fe0638fa62bed9f,1 +np.float64,0x80030bb6e206176f,0x3ff0000000000000,1 +np.float64,0x8006967a36ad2cf5,0x3ff0000000000000,1 +np.float64,0x3fe128176a22502f,0x3ff73393301e5dc8,1 +np.float64,0x218de20c431bd,0x3ff0000000000000,1 +np.float64,0x3fe7dbc48aafb789,0x3ffad38989b5955c,1 +np.float64,0xffda1ef411343de8,0x0,1 +np.float64,0xc6b392838d673,0x3ff0000000000000,1 +np.float64,0x7fe6d080c1ada101,0x7ff0000000000000,1 +np.float64,0xbfed36dd67fa6dbb,0x3fe0fec342c4ee89,1 +np.float64,0x3fee2bb6a3fc576e,0x3ffec1c149f1f092,1 +np.float64,0xbfd1f785eb23ef0c,0x3fea576eb01233cb,1 +np.float64,0x7fdad29a1f35a533,0x7ff0000000000000,1 +np.float64,0xffe8928c4fb12518,0x0,1 +np.float64,0x7fb123160022462b,0x7ff0000000000000,1 +np.float64,0x8007ab56cfaf56ae,0x3ff0000000000000,1 +np.float64,0x7fda342d6634685a,0x7ff0000000000000,1 +np.float64,0xbfe3b7e42c676fc8,0x3fe4e05cf8685b8a,1 +np.float64,0xffa708be7c2e1180,0x0,1 +np.float64,0xbfe8ffbece31ff7e,0x3fe29eb84077a34a,1 +np.float64,0xbf91002008220040,0x3fefa245058f05cb,1 +np.float64,0x8000281f0ee0503f,0x3ff0000000000000,1 +np.float64,0x8005617adc2ac2f6,0x3ff0000000000000,1 +np.float64,0x7fa84fec60309fd8,0x7ff0000000000000,1 +np.float64,0x8d00c0231a018,0x3ff0000000000000,1 +np.float64,0xbfdfe52ca63fca5a,0x3fe6a7324cc00d57,1 +np.float64,0x7fcc81073d39020d,0x7ff0000000000000,1 +np.float64,0x800134ff5a6269ff,0x3ff0000000000000,1 +np.float64,0xffc7fff98d2ffff4,0x0,1 +np.float64,0x8000925ce50124bb,0x3ff0000000000000,1 +np.float64,0xffe2530c66a4a618,0x0,1 +np.float64,0x7fc99070673320e0,0x7ff0000000000000,1 +np.float64,0xbfddd5c1f13bab84,0x3fe72a0c80f8df39,1 +np.float64,0x3fe1c220fee38442,0x3ff7817ec66aa55b,1 +np.float64,0x3fb9a1e1043343c2,0x3ff1265e575e6404,1 +np.float64,0xffef72e0833ee5c0,0x0,1 +np.float64,0x3fe710c0416e2181,0x3ffa5e93588aaa69,1 +np.float64,0xbfd8d23cbab1a47a,0x3fe874f5b9d99885,1 +np.float64,0x7fe9628ebd72c51c,0x7ff0000000000000,1 +np.float64,0xdd5fa611babf5,0x3ff0000000000000,1 +np.float64,0x8002bafac86575f6,0x3ff0000000000000,1 +np.float64,0x68acea44d159e,0x3ff0000000000000,1 +np.float64,0xffd776695eaeecd2,0x0,1 +np.float64,0x80059b59bb4b36b4,0x3ff0000000000000,1 +np.float64,0xbdcdd2af7b9bb,0x3ff0000000000000,1 +np.float64,0x8002b432ee856867,0x3ff0000000000000,1 +np.float64,0xcbc72f09978e6,0x3ff0000000000000,1 +np.float64,0xbfee8f4bf6fd1e98,0x3fe081cc0318b170,1 +np.float64,0xffc6e2892d2dc514,0x0,1 +np.float64,0x7feb682e4db6d05c,0x7ff0000000000000,1 +np.float64,0x8004b70a04296e15,0x3ff0000000000000,1 +np.float64,0x42408a4284812,0x3ff0000000000000,1 +np.float64,0xbfe9b8b197f37163,0x3fe254b4c003ce0a,1 +np.float64,0x3fcaadf5f5355bec,0x3ff27ca7876a8d20,1 +np.float64,0xfff0000000000000,0x0,1 +np.float64,0x7fea8376d33506ed,0x7ff0000000000000,1 +np.float64,0xffef73c2d63ee785,0x0,1 +np.float64,0xffe68b2bae2d1657,0x0,1 +np.float64,0x3fd8339cb2306739,0x3ff4cb774d616f90,1 +np.float64,0xbfc6d1db4d2da3b8,0x3fec47bb873a309c,1 +np.float64,0x7fe858016230b002,0x7ff0000000000000,1 +np.float64,0x7fe74cb99d2e9972,0x7ff0000000000000,1 +np.float64,0xffec2e96dc385d2d,0x0,1 +np.float64,0xb762a9876ec55,0x3ff0000000000000,1 +np.float64,0x3feca230c5794462,0x3ffdbfe62a572f52,1 +np.float64,0xbfb5ebad3a2bd758,0x3fee27eed86dcc39,1 +np.float64,0x471c705a8e38f,0x3ff0000000000000,1 +np.float64,0x7fc79bb5cf2f376b,0x7ff0000000000000,1 +np.float64,0xbfe53d6164ea7ac3,0x3fe4331b3beb73bd,1 +np.float64,0xbfe375a3f766eb48,0x3fe4fe67edb516e6,1 +np.float64,0x3fe1c7686ca38ed1,0x3ff7842f04770ba9,1 +np.float64,0x242e74dc485cf,0x3ff0000000000000,1 +np.float64,0x8009c06ab71380d6,0x3ff0000000000000,1 +np.float64,0x3fd08505efa10a0c,0x3ff3227b735b956d,1 +np.float64,0xffe3dfcecda7bf9d,0x0,1 +np.float64,0x8001f079bbc3e0f4,0x3ff0000000000000,1 +np.float64,0x3fddc706b6bb8e0c,0x3ff616d927987363,1 +np.float64,0xbfd151373ea2a26e,0x3fea870ba53ec126,1 +np.float64,0x7fe89533bfb12a66,0x7ff0000000000000,1 +np.float64,0xffed302cbc3a6059,0x0,1 +np.float64,0x3fd871cc28b0e398,0x3ff4d97d58c16ae2,1 +np.float64,0x7fbe9239683d2472,0x7ff0000000000000,1 +np.float64,0x848a445909149,0x3ff0000000000000,1 +np.float64,0x8007b104ce2f620a,0x3ff0000000000000,1 +np.float64,0x7fc2cd6259259ac4,0x7ff0000000000000,1 +np.float64,0xbfeadb640df5b6c8,0x3fe1e2b068de10af,1 +np.float64,0x800033b2f1a06767,0x3ff0000000000000,1 +np.float64,0x7fe54e5b7caa9cb6,0x7ff0000000000000,1 +np.float64,0x4f928f209f26,0x3ff0000000000000,1 +np.float64,0x8003c3dc6f2787ba,0x3ff0000000000000,1 +np.float64,0xbfd55a59daaab4b4,0x3fe9649d57b32b5d,1 +np.float64,0xffe3e2968d67c52c,0x0,1 +np.float64,0x80087434d550e86a,0x3ff0000000000000,1 +np.float64,0xffdde800083bd000,0x0,1 +np.float64,0xffe291f0542523e0,0x0,1 +np.float64,0xbfe1419bc3e28338,0x3fe6051d4f95a34a,1 +np.float64,0x3fd9d00ee1b3a01e,0x3ff5292bb8d5f753,1 +np.float64,0x3fdb720b60b6e417,0x3ff589d133625374,1 +np.float64,0xbfe3e21f0967c43e,0x3fe4cd4d02e3ef9a,1 +np.float64,0x7fd7e27f3dafc4fd,0x7ff0000000000000,1 +np.float64,0x3fd1cc2620a3984c,0x3ff366befbc38e3e,1 +np.float64,0x3fe78d05436f1a0b,0x3ffaa5ee4ea54b79,1 +np.float64,0x7e2acc84fc55a,0x3ff0000000000000,1 +np.float64,0x800ffb861c5ff70c,0x3ff0000000000000,1 +np.float64,0xffb2b0db1a2561b8,0x0,1 +np.float64,0xbfe80c2363701847,0x3fe301fdfe789576,1 +np.float64,0x7fe383c1c3e70783,0x7ff0000000000000,1 +np.float64,0xbfeefc02e6fdf806,0x3fe05b1a8528bf6c,1 +np.float64,0xbfe42c9268285925,0x3fe4abdc14793cb8,1 +np.float64,0x1,0x3ff0000000000000,1 +np.float64,0xa71c7ce94e390,0x3ff0000000000000,1 +np.float64,0x800ed4e6777da9cd,0x3ff0000000000000,1 +np.float64,0x3fde11b35d3c2367,0x3ff628bdc6dd1b78,1 +np.float64,0x3fef3964dbfe72ca,0x3fff777cae357608,1 +np.float64,0x3fefe369b7ffc6d4,0x3fffec357be508a3,1 +np.float64,0xbfdef1855f3de30a,0x3fe6e348c58e3fed,1 +np.float64,0x3fee0e2bc13c1c58,0x3ffeae1909c1b973,1 +np.float64,0xbfd31554ffa62aaa,0x3fea06628b2f048a,1 +np.float64,0x800dc56bcc7b8ad8,0x3ff0000000000000,1 +np.float64,0x7fbba01b8e374036,0x7ff0000000000000,1 +np.float64,0x7fd9737a92b2e6f4,0x7ff0000000000000,1 +np.float64,0x3feeae0fac3d5c1f,0x3fff1913705f1f07,1 +np.float64,0x3fdcc64fcdb98ca0,0x3ff5d9c3e5862972,1 +np.float64,0x3fdad9f83db5b3f0,0x3ff56674e81c1bd1,1 +np.float64,0x32b8797065710,0x3ff0000000000000,1 +np.float64,0x3fd20deae6241bd6,0x3ff37495bc057394,1 +np.float64,0x7fc899f0763133e0,0x7ff0000000000000,1 +np.float64,0x80045805fc08b00d,0x3ff0000000000000,1 +np.float64,0xbfcd8304cb3b0608,0x3feb4611f1eaa30c,1 +np.float64,0x3fd632a2fcac6544,0x3ff4592e1ea14fb0,1 +np.float64,0xffeeb066007d60cb,0x0,1 +np.float64,0x800bb12a42b76255,0x3ff0000000000000,1 +np.float64,0xbfe060fe1760c1fc,0x3fe6714640ab2574,1 +np.float64,0x80067ed737acfdaf,0x3ff0000000000000,1 +np.float64,0x3fd5ec3211abd864,0x3ff449adea82e73e,1 +np.float64,0x7fc4b2fdc22965fb,0x7ff0000000000000,1 +np.float64,0xff656afd002ad600,0x0,1 +np.float64,0xffeadefcdcb5bdf9,0x0,1 +np.float64,0x80052f18610a5e32,0x3ff0000000000000,1 +np.float64,0xbfd5b75c78ab6eb8,0x3fe94b15e0f39194,1 +np.float64,0xa4d3de2b49a7c,0x3ff0000000000000,1 +np.float64,0xbfe321c93de64392,0x3fe524ac7bbee401,1 +np.float64,0x3feb32f5def665ec,0x3ffcd6e4e5f9c271,1 +np.float64,0x7fe6b07e4ced60fc,0x7ff0000000000000,1 +np.float64,0x3fe013bb2de02776,0x3ff6aa4c32ab5ba4,1 +np.float64,0xbfeadd81d375bb04,0x3fe1e1de89b4aebf,1 +np.float64,0xffece7678079cece,0x0,1 +np.float64,0x3fe3d87b8467b0f8,0x3ff897cf22505e4d,1 +np.float64,0xffc4e3a05129c740,0x0,1 +np.float64,0xbfddee6b03bbdcd6,0x3fe723dd83ab49bd,1 +np.float64,0x3fcc4e2672389c4d,0x3ff2a680db769116,1 +np.float64,0x3fd8ed221ab1da44,0x3ff4f569aec8b850,1 +np.float64,0x80000a3538a0146b,0x3ff0000000000000,1 +np.float64,0x8004832eb109065e,0x3ff0000000000000,1 +np.float64,0xffdca83c60395078,0x0,1 +np.float64,0xffef551cda3eaa39,0x0,1 +np.float64,0x800fd95dd65fb2bc,0x3ff0000000000000,1 +np.float64,0x3ff0000000000000,0x4000000000000000,1 +np.float64,0xbfc06f5c4f20deb8,0x3fed466c17305ad8,1 +np.float64,0xbfeb01b5f476036c,0x3fe1d3de0f4211f4,1 +np.float64,0xbfdb2b9284365726,0x3fe7d7b02f790b05,1 +np.float64,0xff76ba83202d7500,0x0,1 +np.float64,0x3fd3f1c59ea7e38c,0x3ff3db96b3a0aaad,1 +np.float64,0x8b99ff6d17340,0x3ff0000000000000,1 +np.float64,0xbfeb383aa0f67075,0x3fe1bedcf2531c08,1 +np.float64,0x3fe321e35fa643c7,0x3ff83749a5d686ee,1 +np.float64,0xbfd863eb2130c7d6,0x3fe8923fcc39bac7,1 +np.float64,0x9e71dd333ce3c,0x3ff0000000000000,1 +np.float64,0x9542962b2a853,0x3ff0000000000000,1 +np.float64,0xba2c963b74593,0x3ff0000000000000,1 +np.float64,0x80019f4d0ca33e9b,0x3ff0000000000000,1 +np.float64,0xffde3e39a73c7c74,0x0,1 +np.float64,0x800258ae02c4b15d,0x3ff0000000000000,1 +np.float64,0xbfd99a535a3334a6,0x3fe8402f3a0662a5,1 +np.float64,0xe6c62143cd8c4,0x3ff0000000000000,1 +np.float64,0x7fbcc828f0399051,0x7ff0000000000000,1 +np.float64,0xbfe42e3596285c6b,0x3fe4ab2066d66071,1 +np.float64,0xffe2ee42d365dc85,0x0,1 +np.float64,0x3fe1f98abea3f315,0x3ff79dc68002a80b,1 +np.float64,0x7fd7225891ae44b0,0x7ff0000000000000,1 +np.float64,0x477177408ee30,0x3ff0000000000000,1 +np.float64,0xbfe16a7e2162d4fc,0x3fe5f1a5c745385d,1 +np.float64,0xbf98aaee283155e0,0x3fef785952e9c089,1 +np.float64,0x7fd7c14a8daf8294,0x7ff0000000000000,1 +np.float64,0xf7e7713defcee,0x3ff0000000000000,1 +np.float64,0x800769aa11aed355,0x3ff0000000000000,1 +np.float64,0xbfed30385e3a6071,0x3fe10135a3bd9ae6,1 +np.float64,0x3fe6dd7205edbae4,0x3ffa4155899efd70,1 +np.float64,0x800d705d26bae0ba,0x3ff0000000000000,1 +np.float64,0xa443ac1f48876,0x3ff0000000000000,1 +np.float64,0xbfec8cfec43919fe,0x3fe13dbf966e6633,1 +np.float64,0x7fd246efaa248dde,0x7ff0000000000000,1 +np.float64,0x800f2ad14afe55a3,0x3ff0000000000000,1 +np.float64,0x800487a894c90f52,0x3ff0000000000000,1 +np.float64,0x80014c4f19e2989f,0x3ff0000000000000,1 +np.float64,0x3fc11f265f223e4d,0x3ff18def05c971e5,1 +np.float64,0xffeb6d565776daac,0x0,1 +np.float64,0x7fd5ca5df8ab94bb,0x7ff0000000000000,1 +np.float64,0xbfe33de4fde67bca,0x3fe517d0e212cd1c,1 +np.float64,0xbfd1c738e5a38e72,0x3fea6539e9491693,1 +np.float64,0xbfec1d8c33b83b18,0x3fe16790fbca0c65,1 +np.float64,0xbfeecb464b7d968d,0x3fe06c67e2aefa55,1 +np.float64,0xbfd621dbf1ac43b8,0x3fe92dfa32d93846,1 +np.float64,0x80069a02860d3406,0x3ff0000000000000,1 +np.float64,0xbfe84f650e309eca,0x3fe2e661300f1975,1 +np.float64,0x7fc1d2cec523a59d,0x7ff0000000000000,1 +np.float64,0x3fd7706d79aee0db,0x3ff49fb033353dfe,1 +np.float64,0xffd94ba458329748,0x0,1 +np.float64,0x7fea98ba1a753173,0x7ff0000000000000,1 +np.float64,0xbfe756ba092ead74,0x3fe34d428d1857bc,1 +np.float64,0xffecfbd836b9f7b0,0x0,1 +np.float64,0x3fd211fbe5a423f8,0x3ff375711a3641e0,1 +np.float64,0x7fee24f7793c49ee,0x7ff0000000000000,1 +np.float64,0x7fe6a098886d4130,0x7ff0000000000000,1 +np.float64,0xbfd4ade909a95bd2,0x3fe99436524db1f4,1 +np.float64,0xbfeb704e6476e09d,0x3fe1a95be4a21bc6,1 +np.float64,0xffefc0f6627f81ec,0x0,1 +np.float64,0x7feff3f896ffe7f0,0x7ff0000000000000,1 +np.float64,0xa3f74edb47eea,0x3ff0000000000000,1 +np.float64,0xbfe0a551cf214aa4,0x3fe65027a7ff42e3,1 +np.float64,0x3fe164b23622c964,0x3ff7521c6225f51d,1 +np.float64,0x7fc258752324b0e9,0x7ff0000000000000,1 +np.float64,0x4739b3348e737,0x3ff0000000000000,1 +np.float64,0xb0392b1d60726,0x3ff0000000000000,1 +np.float64,0x7fe26f42e5e4de85,0x7ff0000000000000,1 +np.float64,0x8004601f87e8c040,0x3ff0000000000000,1 +np.float64,0xffe92ce37b3259c6,0x0,1 +np.float64,0x3fe620da3a6c41b4,0x3ff9d6ee3d005466,1 +np.float64,0x3fd850cfa2b0a1a0,0x3ff4d20bd249d411,1 +np.float64,0xffdcdfdfb5b9bfc0,0x0,1 +np.float64,0x800390297d672054,0x3ff0000000000000,1 +np.float64,0x3fde5864f6bcb0ca,0x3ff639bb9321f5ef,1 +np.float64,0x3fee484cec7c909a,0x3ffed4d2c6274219,1 +np.float64,0x7fe9b9a064b37340,0x7ff0000000000000,1 +np.float64,0xffe50028b8aa0051,0x0,1 +np.float64,0x3fe37774ade6eee9,0x3ff864558498a9a8,1 +np.float64,0x7fef83c724bf078d,0x7ff0000000000000,1 +np.float64,0xbfeb58450fb6b08a,0x3fe1b290556be73d,1 +np.float64,0x7fd7161475ae2c28,0x7ff0000000000000,1 +np.float64,0x3fece09621f9c12c,0x3ffde836a583bbdd,1 +np.float64,0x3fd045790ea08af2,0x3ff31554778fd4e2,1 +np.float64,0xbfe7c7dd6cef8fbb,0x3fe31e2eeda857fc,1 +np.float64,0xffe9632f5372c65e,0x0,1 +np.float64,0x800d4f3a703a9e75,0x3ff0000000000000,1 +np.float64,0xffea880e4df5101c,0x0,1 +np.float64,0xbfeb7edc4ff6fdb8,0x3fe1a3cb5dc33594,1 +np.float64,0xbfcaae4bab355c98,0x3febb1ee65e16b58,1 +np.float64,0xbfde598a19bcb314,0x3fe709145eafaaf8,1 +np.float64,0x3feefb6d78fdf6db,0x3fff4d5c8c68e39a,1 +np.float64,0x13efc75427dfa,0x3ff0000000000000,1 +np.float64,0xffe26f65c064decb,0x0,1 +np.float64,0xbfed5c1addfab836,0x3fe0f1133bd2189a,1 +np.float64,0x7fe7a7cf756f4f9e,0x7ff0000000000000,1 +np.float64,0xffc681702e2d02e0,0x0,1 +np.float64,0x8003d6ab5067ad57,0x3ff0000000000000,1 +np.float64,0xffa695f1342d2be0,0x0,1 +np.float64,0xbfcf8857db3f10b0,0x3feafa14da8c29a4,1 +np.float64,0xbfe8ca06be71940e,0x3fe2b46f6d2c64b4,1 +np.float64,0x3451c74468a3a,0x3ff0000000000000,1 +np.float64,0x3fde47d5f6bc8fac,0x3ff635bf8e024716,1 +np.float64,0xffda159d5db42b3a,0x0,1 +np.float64,0x7fef9fecaa3f3fd8,0x7ff0000000000000,1 +np.float64,0x3fd4e745e3a9ce8c,0x3ff410a9cb6fd8bf,1 +np.float64,0xffef57019b3eae02,0x0,1 +np.float64,0xbfe6604f4f6cc09e,0x3fe3b55de43c626d,1 +np.float64,0xffe066a424a0cd48,0x0,1 +np.float64,0x3fd547de85aa8fbc,0x3ff425b2a7a16675,1 +np.float64,0xffb3c69280278d28,0x0,1 +np.float64,0xffebe0b759f7c16e,0x0,1 +np.float64,0x3fefc84106ff9082,0x3fffd973687337d8,1 +np.float64,0x501c42a4a0389,0x3ff0000000000000,1 +np.float64,0x7feb45d13eb68ba1,0x7ff0000000000000,1 +np.float64,0xbfb16a8c2e22d518,0x3fee86a9c0f9291a,1 +np.float64,0x3be327b877c66,0x3ff0000000000000,1 +np.float64,0x7fe4a58220694b03,0x7ff0000000000000,1 +np.float64,0x3fe0286220a050c4,0x3ff6b472157ab8f2,1 +np.float64,0x3fc9381825327030,0x3ff2575fbea2bf5d,1 +np.float64,0xbfd1af7ee8a35efe,0x3fea6c032cf7e669,1 +np.float64,0xbfea9b0f39b5361e,0x3fe1fbae14b40b4d,1 +np.float64,0x39efe4aa73dfd,0x3ff0000000000000,1 +np.float64,0xffeb06fdc8360dfb,0x0,1 +np.float64,0xbfda481e72b4903c,0x3fe812b4b08d4884,1 +np.float64,0xbfd414ba5ba82974,0x3fe9bec9474bdfe6,1 +np.float64,0x7fe707177b6e0e2e,0x7ff0000000000000,1 +np.float64,0x8000000000000001,0x3ff0000000000000,1 +np.float64,0xbfede6a75bbbcd4f,0x3fe0be874cccd399,1 +np.float64,0x8006cdb577cd9b6c,0x3ff0000000000000,1 +np.float64,0x800051374f20a26f,0x3ff0000000000000,1 +np.float64,0x3fe5cba8c96b9752,0x3ff9a76b3adcc122,1 +np.float64,0xbfee3933487c7267,0x3fe0a0b190f9609a,1 +np.float64,0x3fd574b8d8aae970,0x3ff42f7e83de1af9,1 +np.float64,0xba5db72b74bb7,0x3ff0000000000000,1 +np.float64,0x3fa9bf512c337ea0,0x3ff0914a7f743a94,1 +np.float64,0xffe8cb736c3196e6,0x0,1 +np.float64,0x3761b2f06ec37,0x3ff0000000000000,1 +np.float64,0x8b4d4433169a9,0x3ff0000000000000,1 +np.float64,0x800f0245503e048b,0x3ff0000000000000,1 +np.float64,0x7fb20d54ac241aa8,0x7ff0000000000000,1 +np.float64,0x3fdf26666b3e4ccd,0x3ff66b8995142017,1 +np.float64,0xbfcbf2a83737e550,0x3feb8173a7b9d6b5,1 +np.float64,0x3fd31572a0a62ae5,0x3ff3ac6c94313dcd,1 +np.float64,0x7fb6c2807a2d8500,0x7ff0000000000000,1 +np.float64,0x800799758f2f32ec,0x3ff0000000000000,1 +np.float64,0xe72f1f6bce5e4,0x3ff0000000000000,1 +np.float64,0x3fe0e0f223a1c1e4,0x3ff70fed5b761673,1 +np.float64,0x3fe6d4f133eda9e2,0x3ffa3c8000c169eb,1 +np.float64,0xbfe1ccc3d8639988,0x3fe5c32148bedbda,1 +np.float64,0x3fea71c53574e38a,0x3ffc5f31201fe9be,1 +np.float64,0x9e0323eb3c065,0x3ff0000000000000,1 +np.float64,0x8005cc79a5cb98f4,0x3ff0000000000000,1 +np.float64,0x1dace1f83b59d,0x3ff0000000000000,1 +np.float64,0x10000000000000,0x3ff0000000000000,1 +np.float64,0xbfdef50830bdea10,0x3fe6e269fc17ebef,1 +np.float64,0x8010000000000000,0x3ff0000000000000,1 +np.float64,0xbfdfa82192bf5044,0x3fe6b6313ee0a095,1 +np.float64,0x3fd9398fe2b27320,0x3ff506ca2093c060,1 +np.float64,0x8002721fe664e441,0x3ff0000000000000,1 +np.float64,0x800c04166ad8082d,0x3ff0000000000000,1 +np.float64,0xffec3918b3387230,0x0,1 +np.float64,0x3fec62d5dfb8c5ac,0x3ffd972ea4a54b32,1 +np.float64,0x3fe7e42a0b6fc854,0x3ffad86b0443181d,1 +np.float64,0x3fc0aff5f3215fec,0x3ff1836058d4d210,1 +np.float64,0xbf82ff68a025fec0,0x3fefcb7f06862dce,1 +np.float64,0xae2e35195c5c7,0x3ff0000000000000,1 +np.float64,0x3fece3bddf79c77c,0x3ffdea41fb1ba8fa,1 +np.float64,0xbfa97b947832f730,0x3feeea34ebedbbd2,1 +np.float64,0xbfdfb1b1ce3f6364,0x3fe6b3d72871335c,1 +np.float64,0xbfe61a4f24ac349e,0x3fe3d356bf991b06,1 +np.float64,0x7fe23117a5e4622e,0x7ff0000000000000,1 +np.float64,0x800552a8cccaa552,0x3ff0000000000000,1 +np.float64,0x625b4d0ac4b6a,0x3ff0000000000000,1 +np.float64,0x3f86cf15702d9e00,0x3ff01fbe0381676d,1 +np.float64,0x800d7d1b685afa37,0x3ff0000000000000,1 +np.float64,0x3fe2cb6e40a596dd,0x3ff80a1a562f7fc9,1 +np.float64,0x3fe756eb8e2eadd7,0x3ffa86c638aad07d,1 +np.float64,0x800dc9a5513b934b,0x3ff0000000000000,1 +np.float64,0xbfbbdd118a37ba20,0x3fedacb4624f3cee,1 +np.float64,0x800de01f8efbc03f,0x3ff0000000000000,1 +np.float64,0x800da1a3fe9b4348,0x3ff0000000000000,1 +np.float64,0xbf87d8c7602fb180,0x3fefbe2614998ab6,1 +np.float64,0xbfdfff6141bffec2,0x3fe6a0c54d9f1bc8,1 +np.float64,0xee8fbba5dd1f8,0x3ff0000000000000,1 +np.float64,0x3fe79dc93e6f3b92,0x3ffaaf9d7d955b2c,1 +np.float64,0xffedd4b3d07ba967,0x0,1 +np.float64,0x800905dfc1720bc0,0x3ff0000000000000,1 +np.float64,0x3fd9e483b8b3c907,0x3ff52ddc6c950e7f,1 +np.float64,0xe34ffefdc6a00,0x3ff0000000000000,1 +np.float64,0x2168e62242d1e,0x3ff0000000000000,1 +np.float64,0x800349950e26932b,0x3ff0000000000000,1 +np.float64,0x7fc50da8532a1b50,0x7ff0000000000000,1 +np.float64,0xae1a4d115c34a,0x3ff0000000000000,1 +np.float64,0xa020f0b74041e,0x3ff0000000000000,1 +np.float64,0x3fd2aa2f77a5545f,0x3ff3959f09519a25,1 +np.float64,0x3fbfefc3223fdf86,0x3ff171f3df2d408b,1 +np.float64,0xbfea9fc340b53f86,0x3fe1f9d92b712654,1 +np.float64,0xffe9b920a5337240,0x0,1 +np.float64,0xbfe2eb0265e5d605,0x3fe53dd195782de3,1 +np.float64,0x7fb932c70e32658d,0x7ff0000000000000,1 +np.float64,0x3fda816bfcb502d8,0x3ff551f8d5c84c82,1 +np.float64,0x3fed68cbe9fad198,0x3ffe40f6692d5693,1 +np.float64,0x32df077665be2,0x3ff0000000000000,1 +np.float64,0x7fdc9c2f3539385d,0x7ff0000000000000,1 +np.float64,0x7fe71091a2ee2122,0x7ff0000000000000,1 +np.float64,0xbfe68106c46d020e,0x3fe3a76b56024c2c,1 +np.float64,0xffcf0572823e0ae4,0x0,1 +np.float64,0xbfeeab341fbd5668,0x3fe077d496941cda,1 +np.float64,0x7fe7ada0d2af5b41,0x7ff0000000000000,1 +np.float64,0xffacdef2a439bde0,0x0,1 +np.float64,0x3fe4200f3128401e,0x3ff8be0ddf30fd1e,1 +np.float64,0xffd9022a69320454,0x0,1 +np.float64,0xbfe8e06914f1c0d2,0x3fe2ab5fe7fffb5a,1 +np.float64,0x3fc4b976602972ed,0x3ff1e6786fa7a890,1 +np.float64,0xbfd784c105af0982,0x3fe8cdeb1cdbd57e,1 +np.float64,0x7feb20a20eb64143,0x7ff0000000000000,1 +np.float64,0xbfc87dd83630fbb0,0x3fec067c1e7e6983,1 +np.float64,0x7fe5400cbe6a8018,0x7ff0000000000000,1 +np.float64,0xbfb4a1f5e22943e8,0x3fee42e6c81559a9,1 +np.float64,0x3fe967c575f2cf8a,0x3ffbbd8bc0d5c50d,1 +np.float64,0xbfeb059cf4760b3a,0x3fe1d25c592c4dab,1 +np.float64,0xbfeef536d5bdea6e,0x3fe05d832c15c64a,1 +np.float64,0x3fa90b3f6432167f,0x3ff08d410dd732cc,1 +np.float64,0xbfeaff265e75fe4d,0x3fe1d4db3fb3208d,1 +np.float64,0x6d93d688db27b,0x3ff0000000000000,1 +np.float64,0x800ab9b4ea55736a,0x3ff0000000000000,1 +np.float64,0x3fd444b39d288967,0x3ff3ed749d48d444,1 +np.float64,0xbfd5f2c0d0abe582,0x3fe93ad6124d88e7,1 +np.float64,0x3fea8fd915f51fb2,0x3ffc71b32cb92d60,1 +np.float64,0xbfd23d6491a47aca,0x3fea43875709b0f0,1 +np.float64,0xffe76f75ce6edeeb,0x0,1 +np.float64,0x1f5670da3eacf,0x3ff0000000000000,1 +np.float64,0x8000d89c9621b13a,0x3ff0000000000000,1 +np.float64,0x3fedb51c52bb6a39,0x3ffe732279c228ff,1 +np.float64,0x7f99215ac83242b5,0x7ff0000000000000,1 +np.float64,0x742a6864e854e,0x3ff0000000000000,1 +np.float64,0xbfe02fb340205f66,0x3fe689495f9164e3,1 +np.float64,0x7fef4c12b0fe9824,0x7ff0000000000000,1 +np.float64,0x3fd40e17c2a81c30,0x3ff3e1aee8ed972f,1 +np.float64,0x7fdcd264e939a4c9,0x7ff0000000000000,1 +np.float64,0x3fdb675838b6ceb0,0x3ff587526241c550,1 +np.float64,0x3fdf1a4081be3480,0x3ff66896a18c2385,1 +np.float64,0xbfea5082b874a106,0x3fe218cf8f11be13,1 +np.float64,0xffe1a0ebf7e341d8,0x0,1 +np.float64,0x3fed0a2222ba1444,0x3ffe032ce928ae7d,1 +np.float64,0xffeae036da75c06d,0x0,1 +np.float64,0x5b05fc8ab60c0,0x3ff0000000000000,1 +np.float64,0x7fd8aae5f03155cb,0x7ff0000000000000,1 +np.float64,0xbfd0b4d9fda169b4,0x3feab41e58b6ccb7,1 +np.float64,0xffdcaffa57395ff4,0x0,1 +np.float64,0xbfcbf1455437e28c,0x3feb81a884182c5d,1 +np.float64,0x3f9d6700b83ace01,0x3ff0525657db35d4,1 +np.float64,0x4fd5b0b29fab7,0x3ff0000000000000,1 +np.float64,0x3fe9af2df5b35e5c,0x3ffbe895684df916,1 +np.float64,0x800dfd41f9dbfa84,0x3ff0000000000000,1 +np.float64,0xbf2a30457e546,0x3ff0000000000000,1 +np.float64,0x7fc6be37182d7c6d,0x7ff0000000000000,1 +np.float64,0x800e0f9788dc1f2f,0x3ff0000000000000,1 +np.float64,0x8006890c704d121a,0x3ff0000000000000,1 +np.float64,0xffecb1a7cbb9634f,0x0,1 +np.float64,0xffb35c330426b868,0x0,1 +np.float64,0x7fe8f2ba8a71e574,0x7ff0000000000000,1 +np.float64,0xf3ccff8fe79a0,0x3ff0000000000000,1 +np.float64,0x3fdf19a84e3e3351,0x3ff66871b17474c1,1 +np.float64,0x80049a662d0934cd,0x3ff0000000000000,1 +np.float64,0xdf5bb4bbbeb77,0x3ff0000000000000,1 +np.float64,0x8005eca030cbd941,0x3ff0000000000000,1 +np.float64,0xffe5f239586be472,0x0,1 +np.float64,0xbfc4526a0728a4d4,0x3fecaa52fbf5345e,1 +np.float64,0xbfe8f1ecda31e3da,0x3fe2a44c080848b3,1 +np.float64,0x3feebd32f4bd7a66,0x3fff234788938c3e,1 +np.float64,0xffd6ca04e9ad940a,0x0,1 +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0xbfd4c560a9a98ac2,0x3fe98db6d97442fc,1 +np.float64,0x8005723471cae46a,0x3ff0000000000000,1 +np.float64,0xbfeb278299764f05,0x3fe1c54b48f8ba4b,1 +np.float64,0x8007907b376f20f7,0x3ff0000000000000,1 +np.float64,0x7fe9c2fd01b385f9,0x7ff0000000000000,1 +np.float64,0x7fdaa37368b546e6,0x7ff0000000000000,1 +np.float64,0xbfe6d0f3786da1e7,0x3fe38582271cada7,1 +np.float64,0xbfea9b77823536ef,0x3fe1fb8575cd1b7d,1 +np.float64,0xbfe90ac38bf21587,0x3fe29a471b47a2e8,1 +np.float64,0xbfe9c51844738a30,0x3fe24fc8de03ea84,1 +np.float64,0x3fe45a9013a8b520,0x3ff8dd7c80f1cf75,1 +np.float64,0xbfe5780551eaf00a,0x3fe419832a6a4c56,1 +np.float64,0xffefffffffffffff,0x0,1 +np.float64,0x7fe3778c84a6ef18,0x7ff0000000000000,1 +np.float64,0xbfdc8a60413914c0,0x3fe77dc55b85028f,1 +np.float64,0xef47ae2fde8f6,0x3ff0000000000000,1 +np.float64,0x8001269fa4c24d40,0x3ff0000000000000,1 +np.float64,0x3fe9d2d39e73a5a7,0x3ffbfe2a66c4148e,1 +np.float64,0xffee61f528fcc3e9,0x0,1 +np.float64,0x3fe8a259ab7144b3,0x3ffb47e797a34bd2,1 +np.float64,0x3f906d610820dac0,0x3ff02dccda8e1a75,1 +np.float64,0x3fe70739f32e0e74,0x3ffa59232f4fcd07,1 +np.float64,0x3fe6b7f5e6ad6fec,0x3ffa2c0cc54f2c16,1 +np.float64,0x95a91a792b524,0x3ff0000000000000,1 +np.float64,0xbfedf6fcf57bedfa,0x3fe0b89bb40081cc,1 +np.float64,0xbfa4d2de9c29a5c0,0x3fef1c485678d657,1 +np.float64,0x3fe130470d22608e,0x3ff737b0be409a38,1 +np.float64,0x3fcf8035423f006b,0x3ff2f9d7c3c6a302,1 +np.float64,0xffe5995a3eab32b4,0x0,1 +np.float64,0xffca68c63034d18c,0x0,1 +np.float64,0xff9d53af903aa760,0x0,1 +np.float64,0x800563f1de6ac7e4,0x3ff0000000000000,1 +np.float64,0x7fce284fa63c509e,0x7ff0000000000000,1 +np.float64,0x7fb2a3959a25472a,0x7ff0000000000000,1 +np.float64,0x7fdbe2652f37c4c9,0x7ff0000000000000,1 +np.float64,0x800d705bbc1ae0b8,0x3ff0000000000000,1 +np.float64,0x7fd9bd2347b37a46,0x7ff0000000000000,1 +np.float64,0x3fcac3c0fb358782,0x3ff27ed62d6c8221,1 +np.float64,0x800110691ec220d3,0x3ff0000000000000,1 +np.float64,0x3fef79a8157ef350,0x3fffa368513eb909,1 +np.float64,0x7fe8bd2f0e317a5d,0x7ff0000000000000,1 +np.float64,0x7fd3040e60a6081c,0x7ff0000000000000,1 +np.float64,0xffea50723234a0e4,0x0,1 +np.float64,0xbfe6220054ac4400,0x3fe3d00961238a93,1 +np.float64,0x3f9eddd8c83dbbc0,0x3ff0567b0c73005a,1 +np.float64,0xbfa4a062c42940c0,0x3fef1e68badde324,1 +np.float64,0xbfd077ad4720ef5a,0x3feac5d577581d07,1 +np.float64,0x7fdfd4b025bfa95f,0x7ff0000000000000,1 +np.float64,0xd00d3cf3a01a8,0x3ff0000000000000,1 +np.float64,0x7fe3010427260207,0x7ff0000000000000,1 +np.float64,0x22ea196645d44,0x3ff0000000000000,1 +np.float64,0x7fd747e8cd2e8fd1,0x7ff0000000000000,1 +np.float64,0xd50665e7aa0cd,0x3ff0000000000000,1 +np.float64,0x7fe1da580ae3b4af,0x7ff0000000000000,1 +np.float64,0xffeb218ecfb6431d,0x0,1 +np.float64,0xbf887d0dd030fa00,0x3fefbc6252c8b354,1 +np.float64,0x3fcaa31067354621,0x3ff27b904c07e07f,1 +np.float64,0x7fe698cc4ded3198,0x7ff0000000000000,1 +np.float64,0x1c40191a38804,0x3ff0000000000000,1 +np.float64,0x80086fd20e30dfa4,0x3ff0000000000000,1 +np.float64,0x7fed34d5eaba69ab,0x7ff0000000000000,1 +np.float64,0xffd00b52622016a4,0x0,1 +np.float64,0x3f80abcdb021579b,0x3ff0172d27945851,1 +np.float64,0x3fe614cfd66c29a0,0x3ff9d031e1839191,1 +np.float64,0x80021d71c8843ae4,0x3ff0000000000000,1 +np.float64,0x800bc2adc657855c,0x3ff0000000000000,1 +np.float64,0x6b9fec1cd73fe,0x3ff0000000000000,1 +np.float64,0xffd9093b5f321276,0x0,1 +np.float64,0x800d3c6c77fa78d9,0x3ff0000000000000,1 +np.float64,0xffe80fc1cbf01f83,0x0,1 +np.float64,0xffbffbaf2a3ff760,0x0,1 +np.float64,0x3fea1ed29eb43da5,0x3ffc2c64ec0e17a3,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0x3fd944a052328941,0x3ff5094f4c43ecca,1 +np.float64,0x800b1f9416163f29,0x3ff0000000000000,1 +np.float64,0x800f06bf33de0d7e,0x3ff0000000000000,1 +np.float64,0xbfdbf0d226b7e1a4,0x3fe7a4f73793d95b,1 +np.float64,0xffe7306c30ae60d8,0x0,1 +np.float64,0x7fe991accfb32359,0x7ff0000000000000,1 +np.float64,0x3fcc0040d2380082,0x3ff29ea47e4f07d4,1 +np.float64,0x7fefffffffffffff,0x7ff0000000000000,1 +np.float64,0x0,0x3ff0000000000000,1 +np.float64,0x3fe1423f7be2847e,0x3ff740bc1d3b20f8,1 +np.float64,0xbfeae3a3cab5c748,0x3fe1df7e936f8504,1 +np.float64,0x800b2da7d6165b50,0x3ff0000000000000,1 +np.float64,0x800b2404fcd6480a,0x3ff0000000000000,1 +np.float64,0x6fcbcf88df97b,0x3ff0000000000000,1 +np.float64,0xa248c0e14492,0x3ff0000000000000,1 +np.float64,0xffd255776824aaee,0x0,1 +np.float64,0x80057b3effeaf67f,0x3ff0000000000000,1 +np.float64,0x3feb0b07d7761610,0x3ffcbdfe1be5a594,1 +np.float64,0x924e1019249c2,0x3ff0000000000000,1 +np.float64,0x80074307e80e8611,0x3ff0000000000000,1 +np.float64,0xffb207fa46240ff8,0x0,1 +np.float64,0x95ac388d2b587,0x3ff0000000000000,1 +np.float64,0xbff0000000000000,0x3fe0000000000000,1 +np.float64,0x3fd38b6a492716d5,0x3ff3c59f62b5add5,1 +np.float64,0x7fe49362c3e926c5,0x7ff0000000000000,1 +np.float64,0x7fe842889db08510,0x7ff0000000000000,1 +np.float64,0xbfba6003e834c008,0x3fedcb620a2d9856,1 +np.float64,0xffe7e782bd6fcf05,0x0,1 +np.float64,0x7fd9b93d9433727a,0x7ff0000000000000,1 +np.float64,0x7fc8fcb61d31f96b,0x7ff0000000000000,1 +np.float64,0xbfef9be8db3f37d2,0x3fe022d603b81dc2,1 +np.float64,0x6f4fc766de9fa,0x3ff0000000000000,1 +np.float64,0xbfe93016f132602e,0x3fe28b42d782d949,1 +np.float64,0x3fe10e52b8e21ca5,0x3ff726a38b0bb895,1 +np.float64,0x3fbbba0ae6377416,0x3ff13f56084a9da3,1 +np.float64,0x3fe09e42ece13c86,0x3ff6eeb57e775e24,1 +np.float64,0x800942e39fb285c8,0x3ff0000000000000,1 +np.float64,0xffe5964370eb2c86,0x0,1 +np.float64,0x3fde479f32bc8f3e,0x3ff635b2619ba53a,1 +np.float64,0x3fe826e187f04dc3,0x3ffaff52b79c3a08,1 +np.float64,0x3febcbf1eab797e4,0x3ffd37152e5e2598,1 +np.float64,0x3fa0816a202102d4,0x3ff05c8e6a8b00d5,1 +np.float64,0xbd005ccb7a00c,0x3ff0000000000000,1 +np.float64,0x44c12fdc89827,0x3ff0000000000000,1 +np.float64,0xffc8fdffa431fc00,0x0,1 +np.float64,0xffeb4f5a87b69eb4,0x0,1 +np.float64,0xbfb07e7f8420fd00,0x3fee9a32924fe6a0,1 +np.float64,0xbfbd9d1bb63b3a38,0x3fed88ca81e5771c,1 +np.float64,0x8008682a74f0d055,0x3ff0000000000000,1 +np.float64,0x3fdeedbc7b3ddb79,0x3ff65dcb7c55f4dc,1 +np.float64,0x8009e889c613d114,0x3ff0000000000000,1 +np.float64,0x3faea831f43d5064,0x3ff0ad935e890e49,1 +np.float64,0xf0af1703e15e3,0x3ff0000000000000,1 +np.float64,0xffec06c4a5f80d88,0x0,1 +np.float64,0x53a1cc0ca743a,0x3ff0000000000000,1 +np.float64,0x7fd10c9eea22193d,0x7ff0000000000000,1 +np.float64,0xbfd48a6bf0a914d8,0x3fe99e0d109f2bac,1 +np.float64,0x3fd6dfe931adbfd4,0x3ff47f81c2dfc5d3,1 +np.float64,0x3fed20e86b7a41d0,0x3ffe11fecc7bc686,1 +np.float64,0xbfea586818b4b0d0,0x3fe215b7747d5cb8,1 +np.float64,0xbfd4ad3e20295a7c,0x3fe99465ab8c3275,1 +np.float64,0x3fd6619ee4acc33e,0x3ff4638b7b80c08a,1 +np.float64,0x3fdf6fcb63bedf97,0x3ff67d62fd3d560c,1 +np.float64,0x800a9191e7152324,0x3ff0000000000000,1 +np.float64,0x3fd2ff3c0da5fe78,0x3ff3a7b17e892a28,1 +np.float64,0x8003dbf1f327b7e5,0x3ff0000000000000,1 +np.float64,0xffea6b89a934d712,0x0,1 +np.float64,0x7fcfb879043f70f1,0x7ff0000000000000,1 +np.float64,0xea6a84dbd4d51,0x3ff0000000000000,1 +np.float64,0x800ec97a815d92f5,0x3ff0000000000000,1 +np.float64,0xffe304c3a8660987,0x0,1 +np.float64,0xbfefe24dd3ffc49c,0x3fe00a4e065be96d,1 +np.float64,0xffd3cc8c00a79918,0x0,1 +np.float64,0x95be8b7b2b7d2,0x3ff0000000000000,1 +np.float64,0x7fe20570cba40ae1,0x7ff0000000000000,1 +np.float64,0x7f97a06da02f40da,0x7ff0000000000000,1 +np.float64,0xffe702b9522e0572,0x0,1 +np.float64,0x3fada2d8543b45b1,0x3ff0a7adc4201e08,1 +np.float64,0x235e6acc46bce,0x3ff0000000000000,1 +np.float64,0x3fea6bc28ef4d786,0x3ffc5b7fc68fddac,1 +np.float64,0xffdbc9f505b793ea,0x0,1 +np.float64,0xffe98b137ff31626,0x0,1 +np.float64,0x800e26c6721c4d8d,0x3ff0000000000000,1 +np.float64,0x80080de445301bc9,0x3ff0000000000000,1 +np.float64,0x37e504a86fca1,0x3ff0000000000000,1 +np.float64,0x8002f5f60325ebed,0x3ff0000000000000,1 +np.float64,0x5c8772feb90ef,0x3ff0000000000000,1 +np.float64,0xbfe021abb4604358,0x3fe69023a51d22b8,1 +np.float64,0x3fde744f8fbce8a0,0x3ff64074dc84edd7,1 +np.float64,0xbfdd92899f3b2514,0x3fe73aefd9701858,1 +np.float64,0x7fc1ad5c51235ab8,0x7ff0000000000000,1 +np.float64,0xaae2f98955c5f,0x3ff0000000000000,1 +np.float64,0x7f9123d5782247aa,0x7ff0000000000000,1 +np.float64,0xbfe3f8e94b67f1d2,0x3fe4c30ab28e9cb7,1 +np.float64,0x7fdaba8b4cb57516,0x7ff0000000000000,1 +np.float64,0x7fefc85cfeff90b9,0x7ff0000000000000,1 +np.float64,0xffb83b4f523076a0,0x0,1 +np.float64,0xbfe888a68c71114d,0x3fe2ceff17c203d1,1 +np.float64,0x800de1dac4bbc3b6,0x3ff0000000000000,1 +np.float64,0xbfe4f27f09e9e4fe,0x3fe453f9af407eac,1 +np.float64,0xffe3d2713467a4e2,0x0,1 +np.float64,0xbfebaab840375570,0x3fe1931131b98842,1 +np.float64,0x93892a1b27126,0x3ff0000000000000,1 +np.float64,0x1e8e7f983d1d1,0x3ff0000000000000,1 +np.float64,0x3fecc950627992a0,0x3ffdd926f036add0,1 +np.float64,0xbfd41dfb1aa83bf6,0x3fe9bc34ece35b94,1 +np.float64,0x800aebfc6555d7f9,0x3ff0000000000000,1 +np.float64,0x7fe33ba52ca67749,0x7ff0000000000000,1 +np.float64,0xffe57c9b3feaf936,0x0,1 +np.float64,0x3fdd12464fba248c,0x3ff5ebc5598e6bd0,1 +np.float64,0xffe06d7f0fe0dafe,0x0,1 +np.float64,0x800e55b7fe9cab70,0x3ff0000000000000,1 +np.float64,0x3fd33803c8267008,0x3ff3b3cb78b2d642,1 +np.float64,0xe9cab8a1d3957,0x3ff0000000000000,1 +np.float64,0x3fb38ac166271580,0x3ff0de906947c0f0,1 +np.float64,0xbfd67aa552acf54a,0x3fe915cf64a389fd,1 +np.float64,0x1db96daa3b72f,0x3ff0000000000000,1 +np.float64,0xbfee9f08f4fd3e12,0x3fe07c2c615add3c,1 +np.float64,0xf14f6d65e29ee,0x3ff0000000000000,1 +np.float64,0x800bce089e179c12,0x3ff0000000000000,1 +np.float64,0xffc42dcc37285b98,0x0,1 +np.float64,0x7fd5f37063abe6e0,0x7ff0000000000000,1 +np.float64,0xbfd943c2cbb28786,0x3fe856f6452ec753,1 +np.float64,0x8ddfbc091bbf8,0x3ff0000000000000,1 +np.float64,0xbfe153491e22a692,0x3fe5fcb075dbbd5d,1 +np.float64,0xffe7933999ef2672,0x0,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x8000000000000000,0x3ff0000000000000,1 +np.float64,0xbfe9154580b22a8b,0x3fe2960bac3a8220,1 +np.float64,0x800dc6dda21b8dbb,0x3ff0000000000000,1 +np.float64,0xbfb26225a824c448,0x3fee7239a457df81,1 +np.float64,0xbfd7b68c83af6d1a,0x3fe8c08e351ab468,1 +np.float64,0xffde01f7213c03ee,0x0,1 +np.float64,0x3fe54cbe0faa997c,0x3ff9614527191d72,1 +np.float64,0xbfd6bec3732d7d86,0x3fe90354909493de,1 +np.float64,0xbfef3c85bd7e790b,0x3fe0444f8c489ca6,1 +np.float64,0x899501b7132a0,0x3ff0000000000000,1 +np.float64,0xbfe17a456462f48b,0x3fe5ea2719a9a84b,1 +np.float64,0xffe34003b8668007,0x0,1 +np.float64,0x7feff6a3633fed46,0x7ff0000000000000,1 +np.float64,0x3fba597ecc34b2fe,0x3ff12ee72e4de474,1 +np.float64,0x4084c7b68109a,0x3ff0000000000000,1 +np.float64,0x3fad23bf4c3a4780,0x3ff0a4d06193ff6d,1 +np.float64,0xffd0fe2707a1fc4e,0x0,1 +np.float64,0xb96cb43f72d97,0x3ff0000000000000,1 +np.float64,0x7fc4d684d829ad09,0x7ff0000000000000,1 +np.float64,0x7fdc349226b86923,0x7ff0000000000000,1 +np.float64,0x7fd82851cd3050a3,0x7ff0000000000000,1 +np.float64,0x800cde0041b9bc01,0x3ff0000000000000,1 +np.float64,0x4e8caa1e9d196,0x3ff0000000000000,1 +np.float64,0xbfed06a6d2fa0d4e,0x3fe1108c3682b05a,1 +np.float64,0xffe8908122312102,0x0,1 +np.float64,0xffe56ed6d9aaddad,0x0,1 +np.float64,0x3fedd6db00fbadb6,0x3ffe896c68c4b26e,1 +np.float64,0x3fde31f9b4bc63f4,0x3ff6307e08f8b6ba,1 +np.float64,0x6bb963c2d772d,0x3ff0000000000000,1 +np.float64,0x787b7142f0f6f,0x3ff0000000000000,1 +np.float64,0x3fe6e4147c6dc829,0x3ffa451bbdece240,1 +np.float64,0x8003857401470ae9,0x3ff0000000000000,1 +np.float64,0xbfeae82c3c75d058,0x3fe1ddbd66e65aab,1 +np.float64,0x7fe174707c62e8e0,0x7ff0000000000000,1 +np.float64,0x80008d2545e11a4b,0x3ff0000000000000,1 +np.float64,0xbfecc2dce17985ba,0x3fe129ad4325985a,1 +np.float64,0xbfe1fa1daf63f43c,0x3fe5adcb0731a44b,1 +np.float64,0x7fcf2530203e4a5f,0x7ff0000000000000,1 +np.float64,0xbfea5cefe874b9e0,0x3fe213f134b61f4a,1 +np.float64,0x800103729f2206e6,0x3ff0000000000000,1 +np.float64,0xbfe8442ff7708860,0x3fe2eaf850faa169,1 +np.float64,0x8006c78e19ed8f1d,0x3ff0000000000000,1 +np.float64,0x3fc259589c24b2b1,0x3ff1abe6a4d28816,1 +np.float64,0xffed02b7b5ba056e,0x0,1 +np.float64,0xbfce0aa4fe3c1548,0x3feb32115d92103e,1 +np.float64,0x7fec06e78bf80dce,0x7ff0000000000000,1 +np.float64,0xbfe0960bbc612c18,0x3fe6578ab29b70d4,1 +np.float64,0x3fee45841cbc8b08,0x3ffed2f6ca808ad3,1 +np.float64,0xbfeb0f8ebef61f1e,0x3fe1ce86003044cd,1 +np.float64,0x8002c357358586af,0x3ff0000000000000,1 +np.float64,0x3fe9aa10cc735422,0x3ffbe57e294ce68b,1 +np.float64,0x800256c0a544ad82,0x3ff0000000000000,1 +np.float64,0x4de6e1449bcdd,0x3ff0000000000000,1 +np.float64,0x65e9bc9ccbd38,0x3ff0000000000000,1 +np.float64,0xbfe53b0fa9aa7620,0x3fe4341f0aa29bbc,1 +np.float64,0xbfcdd94cd13bb298,0x3feb3956acd2e2dd,1 +np.float64,0x8004a49b65a94938,0x3ff0000000000000,1 +np.float64,0x800d3d05deba7a0c,0x3ff0000000000000,1 +np.float64,0x3fe4e05bce69c0b8,0x3ff925f55602a7e0,1 +np.float64,0xffe391e3256723c6,0x0,1 +np.float64,0xbfe92f0f37b25e1e,0x3fe28bacc76ae753,1 +np.float64,0x3f990238d8320472,0x3ff045edd36e2d62,1 +np.float64,0xffed8d15307b1a2a,0x0,1 +np.float64,0x3fee82e01afd05c0,0x3ffefc09e8b9c2b7,1 +np.float64,0xffb2d94b2225b298,0x0,1 diff --git a/python/numpy/_core/tests/data/umath-validation-set-expm1.csv b/python/numpy/_core/tests/data/umath-validation-set-expm1.csv new file mode 100644 index 000000000..dcbc7cd91 --- /dev/null +++ b/python/numpy/_core/tests/data/umath-validation-set-expm1.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0x80606724,0x80606724,3 +np.float32,0xbf16790f,0xbee38e14,3 +np.float32,0xbf1778a1,0xbee4a97f,3 +np.float32,0x7d4fc610,0x7f800000,3 +np.float32,0xbec30a20,0xbea230d5,3 +np.float32,0x3eae8a36,0x3ecffac5,3 +np.float32,0xbf1f08f1,0xbeece93c,3 +np.float32,0x80374376,0x80374376,3 +np.float32,0x3f2e04ca,0x3f793115,3 +np.float32,0x7e2c7e36,0x7f800000,3 +np.float32,0xbf686cae,0xbf18bcf0,3 +np.float32,0xbf5518cd,0xbf10a3da,3 +np.float32,0x807e233c,0x807e233c,3 +np.float32,0x7f4edd54,0x7f800000,3 +np.float32,0x7ed70088,0x7f800000,3 +np.float32,0x801675da,0x801675da,3 +np.float32,0x806735d5,0x806735d5,3 +np.float32,0xfe635fec,0xbf800000,3 +np.float32,0xfed88a0a,0xbf800000,3 +np.float32,0xff52c052,0xbf800000,3 +np.float32,0x7fc00000,0x7fc00000,3 +np.float32,0xff4f65f9,0xbf800000,3 +np.float32,0xfe0f6c20,0xbf800000,3 +np.float32,0x80322b30,0x80322b30,3 +np.float32,0xfb757000,0xbf800000,3 +np.float32,0x3c81e0,0x3c81e0,3 +np.float32,0x79d56a,0x79d56a,3 +np.float32,0x8029d7af,0x8029d7af,3 +np.float32,0x8058a593,0x8058a593,3 +np.float32,0x3f3a13c7,0x3f88c75c,3 +np.float32,0x2a6b05,0x2a6b05,3 +np.float32,0xbd64c960,0xbd5e83ae,3 +np.float32,0x80471052,0x80471052,3 +np.float32,0xbe5dd950,0xbe47766c,3 +np.float32,0xfd8f88f0,0xbf800000,3 +np.float32,0x75a4b7,0x75a4b7,3 +np.float32,0x3f726f2e,0x3fc9fb7d,3 +np.float32,0x3ed6795c,0x3f053115,3 +np.float32,0x17d7f5,0x17d7f5,3 +np.float32,0xbf4cf19b,0xbf0d094f,3 +np.float32,0x3e0ec532,0x3e1933c6,3 +np.float32,0xff084016,0xbf800000,3 +np.float32,0x800829aa,0x800829aa,3 +np.float32,0x806d7302,0x806d7302,3 +np.float32,0x7f59d9da,0x7f800000,3 +np.float32,0x15f8b9,0x15f8b9,3 +np.float32,0x803befb3,0x803befb3,3 +np.float32,0x525043,0x525043,3 +np.float32,0x51a647,0x51a647,3 +np.float32,0xbf1cfce4,0xbeeab3d9,3 +np.float32,0x3f1f27a4,0x3f5cb1d2,3 +np.float32,0xbebc3a04,0xbe9d8142,3 +np.float32,0xbeea548c,0xbebc07e5,3 +np.float32,0x3f47401c,0x3f96c2a3,3 +np.float32,0x806b1ea3,0x806b1ea3,3 +np.float32,0x3ea56bb8,0x3ec3450c,3 +np.float32,0x3f7b4963,0x3fd597b5,3 +np.float32,0x7f051fa0,0x7f800000,3 +np.float32,0x1d411c,0x1d411c,3 +np.float32,0xff0b6a35,0xbf800000,3 +np.float32,0xbead63c0,0xbe9314f7,3 +np.float32,0x3738be,0x3738be,3 +np.float32,0x3f138cc8,0x3f479155,3 +np.float32,0x800a539f,0x800a539f,3 +np.float32,0x801b0ebd,0x801b0ebd,3 +np.float32,0x318fcd,0x318fcd,3 +np.float32,0x3ed67556,0x3f052e06,3 +np.float32,0x702886,0x702886,3 +np.float32,0x80000001,0x80000001,3 +np.float32,0x70a174,0x70a174,3 +np.float32,0x4f9c66,0x4f9c66,3 +np.float32,0x3e3e1927,0x3e50e351,3 +np.float32,0x7eac9a4d,0x7f800000,3 +np.float32,0x4b7407,0x4b7407,3 +np.float32,0x7f5bd2fd,0x7f800000,3 +np.float32,0x3eaafc58,0x3ecaffbd,3 +np.float32,0xbc989360,0xbc9729e2,3 +np.float32,0x3f470e5c,0x3f968c7b,3 +np.float32,0x4c5672,0x4c5672,3 +np.float32,0xff2b2ee2,0xbf800000,3 +np.float32,0xbf28a104,0xbef7079b,3 +np.float32,0x2c6175,0x2c6175,3 +np.float32,0x3d7e4fb0,0x3d832f9f,3 +np.float32,0x763276,0x763276,3 +np.float32,0x3cf364,0x3cf364,3 +np.float32,0xbf7ace75,0xbf1fe48c,3 +np.float32,0xff19e858,0xbf800000,3 +np.float32,0x80504c70,0x80504c70,3 +np.float32,0xff390210,0xbf800000,3 +np.float32,0x8046a743,0x8046a743,3 +np.float32,0x80000000,0x80000000,3 +np.float32,0x806c51da,0x806c51da,3 +np.float32,0x806ab38f,0x806ab38f,3 +np.float32,0x3f3de863,0x3f8cc538,3 +np.float32,0x7f6d45bb,0x7f800000,3 +np.float32,0xfd16ec60,0xbf800000,3 +np.float32,0x80513cba,0x80513cba,3 +np.float32,0xbf68996b,0xbf18cefa,3 +np.float32,0xfe039f2c,0xbf800000,3 +np.float32,0x3f013207,0x3f280c55,3 +np.float32,0x7ef4bc07,0x7f800000,3 +np.float32,0xbe8b65ac,0xbe741069,3 +np.float32,0xbf7a8186,0xbf1fc7a6,3 +np.float32,0x802532e5,0x802532e5,3 +np.float32,0x32c7df,0x32c7df,3 +np.float32,0x3ce4dceb,0x3ce81701,3 +np.float32,0xfe801118,0xbf800000,3 +np.float32,0x3d905f20,0x3d9594fb,3 +np.float32,0xbe11ed28,0xbe080168,3 +np.float32,0x59e773,0x59e773,3 +np.float32,0x3e9a2547,0x3eb3dd57,3 +np.float32,0x7ecb7c67,0x7f800000,3 +np.float32,0x7f69a67e,0x7f800000,3 +np.float32,0xff121e11,0xbf800000,3 +np.float32,0x3f7917cb,0x3fd2ad8c,3 +np.float32,0xbf1a7da8,0xbee7fc0c,3 +np.float32,0x3f077e66,0x3f329c40,3 +np.float32,0x3ce8e040,0x3cec37b3,3 +np.float32,0xbf3f0b8e,0xbf069f4d,3 +np.float32,0x3f52f194,0x3fa3c9d6,3 +np.float32,0xbf0e7422,0xbeda80f2,3 +np.float32,0xfd67e230,0xbf800000,3 +np.float32,0xff14d9a9,0xbf800000,3 +np.float32,0x3f3546e3,0x3f83dc2b,3 +np.float32,0x3e152e3a,0x3e20983d,3 +np.float32,0x4a89a3,0x4a89a3,3 +np.float32,0x63217,0x63217,3 +np.float32,0xbeb9e2a8,0xbe9be153,3 +np.float32,0x7e9fa049,0x7f800000,3 +np.float32,0x7f58110c,0x7f800000,3 +np.float32,0x3e88290c,0x3e9bfba9,3 +np.float32,0xbf2cb206,0xbefb3494,3 +np.float32,0xff5880c4,0xbf800000,3 +np.float32,0x7ecff3ac,0x7f800000,3 +np.float32,0x3f4b3de6,0x3f9b23fd,3 +np.float32,0xbebd2048,0xbe9e208c,3 +np.float32,0xff08f7a2,0xbf800000,3 +np.float32,0xff473330,0xbf800000,3 +np.float32,0x1,0x1,3 +np.float32,0xbf5dc239,0xbf14584b,3 +np.float32,0x458e3f,0x458e3f,3 +np.float32,0xbdb8a650,0xbdb091f8,3 +np.float32,0xff336ffc,0xbf800000,3 +np.float32,0x3c60bd00,0x3c624966,3 +np.float32,0xbe16a4f8,0xbe0c1664,3 +np.float32,0x3f214246,0x3f60a0f0,3 +np.float32,0x7fa00000,0x7fe00000,3 +np.float32,0x7e08737e,0x7f800000,3 +np.float32,0x3f70574c,0x3fc74b8e,3 +np.float32,0xbed5745c,0xbeae8c77,3 +np.float32,0x361752,0x361752,3 +np.float32,0x3eb276d6,0x3ed584ea,3 +np.float32,0x3f03fc1e,0x3f2cb1a5,3 +np.float32,0x3fafd1,0x3fafd1,3 +np.float32,0x7e50d74c,0x7f800000,3 +np.float32,0x3eeca5,0x3eeca5,3 +np.float32,0x5dc963,0x5dc963,3 +np.float32,0x7f0e63ae,0x7f800000,3 +np.float32,0x8021745f,0x8021745f,3 +np.float32,0xbf5881a9,0xbf121d07,3 +np.float32,0x7dadc7fd,0x7f800000,3 +np.float32,0xbf2c0798,0xbefa86bb,3 +np.float32,0x3e635f50,0x3e7e97a9,3 +np.float32,0xbf2053fa,0xbeee4c0e,3 +np.float32,0x3e8eee2b,0x3ea4dfcc,3 +np.float32,0xfc8a03c0,0xbf800000,3 +np.float32,0xfd9e4948,0xbf800000,3 +np.float32,0x801e817e,0x801e817e,3 +np.float32,0xbf603a27,0xbf1560c3,3 +np.float32,0x7f729809,0x7f800000,3 +np.float32,0x3f5a1864,0x3fac0e04,3 +np.float32,0x3e7648b8,0x3e8b3677,3 +np.float32,0x3edade24,0x3f088bc1,3 +np.float32,0x65e16e,0x65e16e,3 +np.float32,0x3f24aa50,0x3f671117,3 +np.float32,0x803cb1d0,0x803cb1d0,3 +np.float32,0xbe7b1858,0xbe5eadcc,3 +np.float32,0xbf19bb27,0xbee726fb,3 +np.float32,0xfd1f6e60,0xbf800000,3 +np.float32,0xfeb0de60,0xbf800000,3 +np.float32,0xff511a52,0xbf800000,3 +np.float32,0xff7757f7,0xbf800000,3 +np.float32,0x463ff5,0x463ff5,3 +np.float32,0x3f770d12,0x3fcffcc2,3 +np.float32,0xbf208562,0xbeee80dc,3 +np.float32,0x6df204,0x6df204,3 +np.float32,0xbf62d24f,0xbf1673fb,3 +np.float32,0x3dfcf210,0x3e069d5f,3 +np.float32,0xbef26002,0xbec114d7,3 +np.float32,0x7f800000,0x7f800000,3 +np.float32,0x7f30fb85,0x7f800000,3 +np.float32,0x7ee5dfef,0x7f800000,3 +np.float32,0x3f317829,0x3f800611,3 +np.float32,0x3f4b0bbd,0x3f9aec88,3 +np.float32,0x7edf708c,0x7f800000,3 +np.float32,0xff071260,0xbf800000,3 +np.float32,0x3e7b8c30,0x3e8e9198,3 +np.float32,0x3f33778b,0x3f82077f,3 +np.float32,0x3e8cd11d,0x3ea215fd,3 +np.float32,0x8004483d,0x8004483d,3 +np.float32,0x801633e3,0x801633e3,3 +np.float32,0x7e76eb15,0x7f800000,3 +np.float32,0x3c1571,0x3c1571,3 +np.float32,0x7de3de52,0x7f800000,3 +np.float32,0x804ae906,0x804ae906,3 +np.float32,0x7f3a2616,0x7f800000,3 +np.float32,0xff7fffff,0xbf800000,3 +np.float32,0xff5d17e4,0xbf800000,3 +np.float32,0xbeaa6704,0xbe90f252,3 +np.float32,0x7e6a43af,0x7f800000,3 +np.float32,0x2a0f35,0x2a0f35,3 +np.float32,0xfd8fece0,0xbf800000,3 +np.float32,0xfeef2e2a,0xbf800000,3 +np.float32,0xff800000,0xbf800000,3 +np.float32,0xbeefcc52,0xbebf78e4,3 +np.float32,0x3db6c490,0x3dbf2bd5,3 +np.float32,0x8290f,0x8290f,3 +np.float32,0xbeace648,0xbe92bb7f,3 +np.float32,0x801fea79,0x801fea79,3 +np.float32,0x3ea6c230,0x3ec51ebf,3 +np.float32,0x3e5f2ca3,0x3e795c8a,3 +np.float32,0x3eb6f634,0x3edbeb9f,3 +np.float32,0xff790b45,0xbf800000,3 +np.float32,0x3d82e240,0x3d872816,3 +np.float32,0x3f0d6a57,0x3f3cc7db,3 +np.float32,0x7f08531a,0x7f800000,3 +np.float32,0x702b6d,0x702b6d,3 +np.float32,0x7d3a3c38,0x7f800000,3 +np.float32,0x3d0a7fb3,0x3d0cddf3,3 +np.float32,0xff28084c,0xbf800000,3 +np.float32,0xfeee8804,0xbf800000,3 +np.float32,0x804094eb,0x804094eb,3 +np.float32,0x7acb39,0x7acb39,3 +np.float32,0x3f01c07a,0x3f28f88c,3 +np.float32,0x3e05c500,0x3e0ee674,3 +np.float32,0xbe6f7c38,0xbe558ac1,3 +np.float32,0x803b1f4b,0x803b1f4b,3 +np.float32,0xbf76561f,0xbf1e332b,3 +np.float32,0xff30d368,0xbf800000,3 +np.float32,0x7e2e1f38,0x7f800000,3 +np.float32,0x3ee085b8,0x3f0ce7c0,3 +np.float32,0x8064c4a7,0x8064c4a7,3 +np.float32,0xa7c1d,0xa7c1d,3 +np.float32,0x3f27498a,0x3f6c14bc,3 +np.float32,0x137ca,0x137ca,3 +np.float32,0x3d0a5c60,0x3d0cb969,3 +np.float32,0x80765f1f,0x80765f1f,3 +np.float32,0x80230a71,0x80230a71,3 +np.float32,0x3f321ed2,0x3f80acf4,3 +np.float32,0x7d61e7f4,0x7f800000,3 +np.float32,0xbf39f7f2,0xbf0430f7,3 +np.float32,0xbe2503f8,0xbe1867e8,3 +np.float32,0x29333d,0x29333d,3 +np.float32,0x7edc5a0e,0x7f800000,3 +np.float32,0xbe81a8a2,0xbe651663,3 +np.float32,0x7f76ab6d,0x7f800000,3 +np.float32,0x7f46111f,0x7f800000,3 +np.float32,0xff0fc888,0xbf800000,3 +np.float32,0x805ece89,0x805ece89,3 +np.float32,0xc390b,0xc390b,3 +np.float32,0xff64bdee,0xbf800000,3 +np.float32,0x3dd07e4e,0x3ddb79bd,3 +np.float32,0xfecc1f10,0xbf800000,3 +np.float32,0x803f5177,0x803f5177,3 +np.float32,0x802a24d2,0x802a24d2,3 +np.float32,0x7f27d0cc,0x7f800000,3 +np.float32,0x3ef57c98,0x3f1d7e88,3 +np.float32,0x7b848d,0x7b848d,3 +np.float32,0x7f7fffff,0x7f800000,3 +np.float32,0xfe889c46,0xbf800000,3 +np.float32,0xff2d6dc5,0xbf800000,3 +np.float32,0x3f53a186,0x3fa492a6,3 +np.float32,0xbf239c94,0xbef1c90c,3 +np.float32,0xff7c0f4e,0xbf800000,3 +np.float32,0x3e7c69a9,0x3e8f1f3a,3 +np.float32,0xbf47c9e9,0xbf0ab2a9,3 +np.float32,0xbc1eaf00,0xbc1deae9,3 +np.float32,0x3f4a6d39,0x3f9a3d8e,3 +np.float32,0x3f677930,0x3fbc26eb,3 +np.float32,0x3f45eea1,0x3f955418,3 +np.float32,0x7f61a1f8,0x7f800000,3 +np.float32,0xff58c7c6,0xbf800000,3 +np.float32,0x80239801,0x80239801,3 +np.float32,0xff56e616,0xbf800000,3 +np.float32,0xff62052c,0xbf800000,3 +np.float32,0x8009b615,0x8009b615,3 +np.float32,0x293d6b,0x293d6b,3 +np.float32,0xfe9e585c,0xbf800000,3 +np.float32,0x7f58ff4b,0x7f800000,3 +np.float32,0x10937c,0x10937c,3 +np.float32,0x7f5cc13f,0x7f800000,3 +np.float32,0x110c5d,0x110c5d,3 +np.float32,0x805e51fc,0x805e51fc,3 +np.float32,0xbedcf70a,0xbeb3766c,3 +np.float32,0x3f4d5e42,0x3f9d8091,3 +np.float32,0xff5925a0,0xbf800000,3 +np.float32,0x7e87cafa,0x7f800000,3 +np.float32,0xbf6474b2,0xbf171fee,3 +np.float32,0x4b39b2,0x4b39b2,3 +np.float32,0x8020cc28,0x8020cc28,3 +np.float32,0xff004ed8,0xbf800000,3 +np.float32,0xbf204cf5,0xbeee448d,3 +np.float32,0x3e30cf10,0x3e40fdb1,3 +np.float32,0x80202bee,0x80202bee,3 +np.float32,0xbf55a985,0xbf10e2bc,3 +np.float32,0xbe297dd8,0xbe1c351c,3 +np.float32,0x5780d9,0x5780d9,3 +np.float32,0x7ef729fa,0x7f800000,3 +np.float32,0x8039a3b5,0x8039a3b5,3 +np.float32,0x7cdd3f,0x7cdd3f,3 +np.float32,0x7ef0145a,0x7f800000,3 +np.float32,0x807ad7ae,0x807ad7ae,3 +np.float32,0x7f6c2643,0x7f800000,3 +np.float32,0xbec56124,0xbea3c929,3 +np.float32,0x512c3b,0x512c3b,3 +np.float32,0xbed3effe,0xbead8c1e,3 +np.float32,0x7f5e0a4d,0x7f800000,3 +np.float32,0x3f315316,0x3f7fc200,3 +np.float32,0x7eca5727,0x7f800000,3 +np.float32,0x7f4834f3,0x7f800000,3 +np.float32,0x8004af6d,0x8004af6d,3 +np.float32,0x3f223ca4,0x3f6277e3,3 +np.float32,0x7eea4fdd,0x7f800000,3 +np.float32,0x3e7143e8,0x3e880763,3 +np.float32,0xbf737008,0xbf1d160e,3 +np.float32,0xfc408b00,0xbf800000,3 +np.float32,0x803912ca,0x803912ca,3 +np.float32,0x7db31f4e,0x7f800000,3 +np.float32,0xff578b54,0xbf800000,3 +np.float32,0x3f068ec4,0x3f31062b,3 +np.float32,0x35f64f,0x35f64f,3 +np.float32,0x80437df4,0x80437df4,3 +np.float32,0x568059,0x568059,3 +np.float32,0x8005f8ba,0x8005f8ba,3 +np.float32,0x6824ad,0x6824ad,3 +np.float32,0xff3fdf30,0xbf800000,3 +np.float32,0xbf6f7682,0xbf1b89d6,3 +np.float32,0x3dcea8a0,0x3dd971f5,3 +np.float32,0x3ee32a62,0x3f0ef5a9,3 +np.float32,0xbf735bcd,0xbf1d0e3d,3 +np.float32,0x7e8c7c28,0x7f800000,3 +np.float32,0x3ed552bc,0x3f045161,3 +np.float32,0xfed90a8a,0xbf800000,3 +np.float32,0xbe454368,0xbe336d2a,3 +np.float32,0xbf171d26,0xbee4442d,3 +np.float32,0x80652bf9,0x80652bf9,3 +np.float32,0xbdbaaa20,0xbdb26914,3 +np.float32,0x3f56063d,0x3fa7522e,3 +np.float32,0x3d3d4fd3,0x3d41c13f,3 +np.float32,0x80456040,0x80456040,3 +np.float32,0x3dc15586,0x3dcac0ef,3 +np.float32,0x7f753060,0x7f800000,3 +np.float32,0x7f7d8039,0x7f800000,3 +np.float32,0xfdebf280,0xbf800000,3 +np.float32,0xbf1892c3,0xbee5e116,3 +np.float32,0xbf0f1468,0xbedb3878,3 +np.float32,0x40d85c,0x40d85c,3 +np.float32,0x3f93dd,0x3f93dd,3 +np.float32,0xbf5730fd,0xbf118c24,3 +np.float32,0xfe17aa44,0xbf800000,3 +np.float32,0x3dc0baf4,0x3dca1716,3 +np.float32,0xbf3433d8,0xbf015efb,3 +np.float32,0x1c59f5,0x1c59f5,3 +np.float32,0x802b1540,0x802b1540,3 +np.float32,0xbe47df6c,0xbe35936e,3 +np.float32,0xbe8e7070,0xbe78af32,3 +np.float32,0xfe7057f4,0xbf800000,3 +np.float32,0x80668b69,0x80668b69,3 +np.float32,0xbe677810,0xbe4f2c2d,3 +np.float32,0xbe7a2f1c,0xbe5df733,3 +np.float32,0xfeb79e3c,0xbf800000,3 +np.float32,0xbeb6e320,0xbe99c9e8,3 +np.float32,0xfea188f2,0xbf800000,3 +np.float32,0x7dcaeb15,0x7f800000,3 +np.float32,0x1be567,0x1be567,3 +np.float32,0xbf4041cc,0xbf07320d,3 +np.float32,0x3f721aa7,0x3fc98e9a,3 +np.float32,0x7f5aa835,0x7f800000,3 +np.float32,0x15180e,0x15180e,3 +np.float32,0x3f73d739,0x3fcbccdb,3 +np.float32,0xbeecd380,0xbebd9b36,3 +np.float32,0x3f2caec7,0x3f768fea,3 +np.float32,0xbeaf65f2,0xbe9482bb,3 +np.float32,0xfe6aa384,0xbf800000,3 +np.float32,0xbf4f2c0a,0xbf0e085e,3 +np.float32,0xbf2b5907,0xbef9d431,3 +np.float32,0x3e855e0d,0x3e985960,3 +np.float32,0x8056cc64,0x8056cc64,3 +np.float32,0xff746bb5,0xbf800000,3 +np.float32,0x3e0332f6,0x3e0bf986,3 +np.float32,0xff637720,0xbf800000,3 +np.float32,0xbf330676,0xbf00c990,3 +np.float32,0x3ec449a1,0x3eef3862,3 +np.float32,0x766541,0x766541,3 +np.float32,0xfe2edf6c,0xbf800000,3 +np.float32,0xbebb28ca,0xbe9cc3e2,3 +np.float32,0x3f16c930,0x3f4d5ce4,3 +np.float32,0x7f1a9a4a,0x7f800000,3 +np.float32,0x3e9ba1,0x3e9ba1,3 +np.float32,0xbf73d5f6,0xbf1d3d69,3 +np.float32,0xfdc8a8b0,0xbf800000,3 +np.float32,0x50f051,0x50f051,3 +np.float32,0xff0add02,0xbf800000,3 +np.float32,0x1e50bf,0x1e50bf,3 +np.float32,0x3f04d287,0x3f2e1948,3 +np.float32,0x7f1e50,0x7f1e50,3 +np.float32,0x2affb3,0x2affb3,3 +np.float32,0x80039f07,0x80039f07,3 +np.float32,0x804ba79e,0x804ba79e,3 +np.float32,0x7b5a8eed,0x7f800000,3 +np.float32,0x3e1a8b28,0x3e26d0a7,3 +np.float32,0x3ea95f29,0x3ec8bfa4,3 +np.float32,0x7e09fa55,0x7f800000,3 +np.float32,0x7eacb1b3,0x7f800000,3 +np.float32,0x3e8ad7c0,0x3e9f7dec,3 +np.float32,0x7e0e997c,0x7f800000,3 +np.float32,0x3f4422b4,0x3f936398,3 +np.float32,0x806bd222,0x806bd222,3 +np.float32,0x677ae6,0x677ae6,3 +np.float32,0x62cf68,0x62cf68,3 +np.float32,0x7e4e594e,0x7f800000,3 +np.float32,0x80445fd1,0x80445fd1,3 +np.float32,0xff3a0d04,0xbf800000,3 +np.float32,0x8052b256,0x8052b256,3 +np.float32,0x3cb34440,0x3cb53e11,3 +np.float32,0xbf0e3865,0xbeda3c6d,3 +np.float32,0x3f49f5df,0x3f99ba17,3 +np.float32,0xbed75a22,0xbeafcc09,3 +np.float32,0xbf7aec64,0xbf1fefc8,3 +np.float32,0x7f35a62d,0x7f800000,3 +np.float32,0xbf787b03,0xbf1f03fc,3 +np.float32,0x8006a62a,0x8006a62a,3 +np.float32,0x3f6419e7,0x3fb803c7,3 +np.float32,0x3ecea2e5,0x3efe8f01,3 +np.float32,0x80603577,0x80603577,3 +np.float32,0xff73198c,0xbf800000,3 +np.float32,0x7def110a,0x7f800000,3 +np.float32,0x544efd,0x544efd,3 +np.float32,0x3f052340,0x3f2ea0fc,3 +np.float32,0xff306666,0xbf800000,3 +np.float32,0xbf800000,0xbf21d2a7,3 +np.float32,0xbed3e150,0xbead826a,3 +np.float32,0x3f430c99,0x3f92390f,3 +np.float32,0xbf4bffa4,0xbf0c9c73,3 +np.float32,0xfd97a710,0xbf800000,3 +np.float32,0x3cadf0fe,0x3cafcd1a,3 +np.float32,0x807af7b4,0x807af7b4,3 +np.float32,0xbc508600,0xbc4f33bc,3 +np.float32,0x7f3e0ec7,0x7f800000,3 +np.float32,0xbe51334c,0xbe3d36f7,3 +np.float32,0xfe7b7fb4,0xbf800000,3 +np.float32,0xfed9c45e,0xbf800000,3 +np.float32,0x3da024eb,0x3da6926a,3 +np.float32,0x7eed9e76,0x7f800000,3 +np.float32,0xbf2b8f1f,0xbefa0b91,3 +np.float32,0x3f2b9286,0x3f746318,3 +np.float32,0xfe8af49c,0xbf800000,3 +np.float32,0x9c4f7,0x9c4f7,3 +np.float32,0x801d7543,0x801d7543,3 +np.float32,0xbf66474a,0xbf17de66,3 +np.float32,0xbf562155,0xbf1116b1,3 +np.float32,0x46a8de,0x46a8de,3 +np.float32,0x8053fe6b,0x8053fe6b,3 +np.float32,0xbf6ee842,0xbf1b51f3,3 +np.float32,0xbf6ad78e,0xbf19b565,3 +np.float32,0xbf012574,0xbecad7ff,3 +np.float32,0x748364,0x748364,3 +np.float32,0x8073f59b,0x8073f59b,3 +np.float32,0xff526825,0xbf800000,3 +np.float32,0xfeb02dc4,0xbf800000,3 +np.float32,0x8033eb1c,0x8033eb1c,3 +np.float32,0x3f3685ea,0x3f8520cc,3 +np.float32,0x7f657902,0x7f800000,3 +np.float32,0xbf75eac4,0xbf1e0a1f,3 +np.float32,0xfe67f384,0xbf800000,3 +np.float32,0x3f56d3cc,0x3fa83faf,3 +np.float32,0x44a4ce,0x44a4ce,3 +np.float32,0x1dc4b3,0x1dc4b3,3 +np.float32,0x4fb3b2,0x4fb3b2,3 +np.float32,0xbea904a4,0xbe8ff3ed,3 +np.float32,0x7e668f16,0x7f800000,3 +np.float32,0x7f538378,0x7f800000,3 +np.float32,0x80541709,0x80541709,3 +np.float32,0x80228040,0x80228040,3 +np.float32,0x7ef9694e,0x7f800000,3 +np.float32,0x3f5fca9b,0x3fb2ce54,3 +np.float32,0xbe9c43c2,0xbe86ab84,3 +np.float32,0xfecee000,0xbf800000,3 +np.float32,0x5a65c2,0x5a65c2,3 +np.float32,0x3f736572,0x3fcb3985,3 +np.float32,0xbf2a03f7,0xbef87600,3 +np.float32,0xfe96b488,0xbf800000,3 +np.float32,0xfedd8800,0xbf800000,3 +np.float32,0x80411804,0x80411804,3 +np.float32,0x7edcb0a6,0x7f800000,3 +np.float32,0x2bb882,0x2bb882,3 +np.float32,0x3f800000,0x3fdbf0a9,3 +np.float32,0x764b27,0x764b27,3 +np.float32,0x7e92035d,0x7f800000,3 +np.float32,0x3e80facb,0x3e92ae1d,3 +np.float32,0x8040b81a,0x8040b81a,3 +np.float32,0x7f487fe4,0x7f800000,3 +np.float32,0xbc641780,0xbc6282ed,3 +np.float32,0x804b0bb9,0x804b0bb9,3 +np.float32,0x7d0b7c39,0x7f800000,3 +np.float32,0xff072080,0xbf800000,3 +np.float32,0xbed7aff8,0xbeb00462,3 +np.float32,0x35e247,0x35e247,3 +np.float32,0xbf7edd19,0xbf216766,3 +np.float32,0x8004a539,0x8004a539,3 +np.float32,0xfdfc1790,0xbf800000,3 +np.float32,0x8037a841,0x8037a841,3 +np.float32,0xfed0a8a8,0xbf800000,3 +np.float32,0x7f1f1697,0x7f800000,3 +np.float32,0x3f2ccc6e,0x3f76ca23,3 +np.float32,0x35eada,0x35eada,3 +np.float32,0xff111f42,0xbf800000,3 +np.float32,0x3ee1ab7f,0x3f0dcbbe,3 +np.float32,0xbf6e89ee,0xbf1b2cd4,3 +np.float32,0x3f58611c,0x3faa0cdc,3 +np.float32,0x1ac6a6,0x1ac6a6,3 +np.float32,0xbf1286fa,0xbedf2312,3 +np.float32,0x7e451137,0x7f800000,3 +np.float32,0xbe92c326,0xbe7f3405,3 +np.float32,0x3f2fdd16,0x3f7cd87b,3 +np.float32,0xbe5c0ea0,0xbe4604c2,3 +np.float32,0xbdb29968,0xbdab0883,3 +np.float32,0x3964,0x3964,3 +np.float32,0x3f0dc236,0x3f3d60a0,3 +np.float32,0x7c3faf06,0x7f800000,3 +np.float32,0xbef41f7a,0xbec22b16,3 +np.float32,0x3f4c0289,0x3f9bfdcc,3 +np.float32,0x806084e9,0x806084e9,3 +np.float32,0x3ed1d8dd,0x3f01b0c1,3 +np.float32,0x806d8d8b,0x806d8d8b,3 +np.float32,0x3f052180,0x3f2e9e0a,3 +np.float32,0x803d85d5,0x803d85d5,3 +np.float32,0x3e0afd70,0x3e14dd48,3 +np.float32,0x2fbc63,0x2fbc63,3 +np.float32,0x2e436f,0x2e436f,3 +np.float32,0xbf7b19e6,0xbf2000da,3 +np.float32,0x3f34022e,0x3f829362,3 +np.float32,0x3d2b40e0,0x3d2ee246,3 +np.float32,0x3f5298b4,0x3fa3649b,3 +np.float32,0xbdb01328,0xbda8b7de,3 +np.float32,0x7f693c81,0x7f800000,3 +np.float32,0xbeb1abc0,0xbe961edc,3 +np.float32,0x801d9b5d,0x801d9b5d,3 +np.float32,0x80628668,0x80628668,3 +np.float32,0x800f57dd,0x800f57dd,3 +np.float32,0x8017c94f,0x8017c94f,3 +np.float32,0xbf16f5f4,0xbee418b8,3 +np.float32,0x3e686476,0x3e827022,3 +np.float32,0xbf256796,0xbef3abd9,3 +np.float32,0x7f1b4485,0x7f800000,3 +np.float32,0xbea0b3cc,0xbe89ed21,3 +np.float32,0xfee08b2e,0xbf800000,3 +np.float32,0x523cb4,0x523cb4,3 +np.float32,0x3daf2cb2,0x3db6e273,3 +np.float32,0xbd531c40,0xbd4dc323,3 +np.float32,0x80078fe5,0x80078fe5,3 +np.float32,0x80800000,0x80800000,3 +np.float32,0x3f232438,0x3f642d1a,3 +np.float32,0x3ec29446,0x3eecb7c0,3 +np.float32,0x3dbcd2a4,0x3dc5cd1d,3 +np.float32,0x7f045b0d,0x7f800000,3 +np.float32,0x7f22e6d1,0x7f800000,3 +np.float32,0xbf5d3430,0xbf141c80,3 +np.float32,0xbe03ec70,0xbdf78ee6,3 +np.float32,0x3e93ec9a,0x3eab822f,3 +np.float32,0x7f3b9262,0x7f800000,3 +np.float32,0x65ac6a,0x65ac6a,3 +np.float32,0x3db9a8,0x3db9a8,3 +np.float32,0xbf37ab59,0xbf031306,3 +np.float32,0x33c40e,0x33c40e,3 +np.float32,0x7f7a478f,0x7f800000,3 +np.float32,0xbe8532d0,0xbe6a906f,3 +np.float32,0x801c081d,0x801c081d,3 +np.float32,0xbe4212a0,0xbe30ca73,3 +np.float32,0xff0b603e,0xbf800000,3 +np.float32,0x4554dc,0x4554dc,3 +np.float32,0x3dd324be,0x3dde695e,3 +np.float32,0x3f224c44,0x3f629557,3 +np.float32,0x8003cd79,0x8003cd79,3 +np.float32,0xbf31351c,0xbeffc2fd,3 +np.float32,0x8034603a,0x8034603a,3 +np.float32,0xbf6fcb70,0xbf1bab24,3 +np.float32,0x804eb67e,0x804eb67e,3 +np.float32,0xff05c00e,0xbf800000,3 +np.float32,0x3eb5b36f,0x3eda1ec7,3 +np.float32,0x3f1ed7f9,0x3f5c1d90,3 +np.float32,0x3f052d8a,0x3f2eb24b,3 +np.float32,0x5ddf51,0x5ddf51,3 +np.float32,0x7e50c11c,0x7f800000,3 +np.float32,0xff74f55a,0xbf800000,3 +np.float32,0x4322d,0x4322d,3 +np.float32,0x3f16f8a9,0x3f4db27a,3 +np.float32,0x3f4f23d6,0x3f9f7c2c,3 +np.float32,0xbf706c1e,0xbf1bea0a,3 +np.float32,0x3f2cbd52,0x3f76ac77,3 +np.float32,0xf3043,0xf3043,3 +np.float32,0xfee79de0,0xbf800000,3 +np.float32,0x7e942f69,0x7f800000,3 +np.float32,0x180139,0x180139,3 +np.float32,0xff69c678,0xbf800000,3 +np.float32,0x3f46773f,0x3f95e840,3 +np.float32,0x804aae1c,0x804aae1c,3 +np.float32,0x3eb383b4,0x3ed7024c,3 +np.float32,0x8032624e,0x8032624e,3 +np.float32,0xbd0a0f80,0xbd07c27d,3 +np.float32,0xbf1c9b98,0xbeea4a61,3 +np.float32,0x7f370999,0x7f800000,3 +np.float32,0x801931f9,0x801931f9,3 +np.float32,0x3f6f45ce,0x3fc5eea0,3 +np.float32,0xff0ab4cc,0xbf800000,3 +np.float32,0x4c043d,0x4c043d,3 +np.float32,0x8002a599,0x8002a599,3 +np.float32,0xbc4a6080,0xbc4921d7,3 +np.float32,0x3f008d14,0x3f26fb72,3 +np.float32,0x7f48b3d9,0x7f800000,3 +np.float32,0x7cb2ec7e,0x7f800000,3 +np.float32,0xbf1338bd,0xbedfeb61,3 +np.float32,0x0,0x0,3 +np.float32,0xbf2f5b64,0xbefde71c,3 +np.float32,0xbe422974,0xbe30dd56,3 +np.float32,0x3f776be8,0x3fd07950,3 +np.float32,0xbf3e97a1,0xbf06684a,3 +np.float32,0x7d28cb26,0x7f800000,3 +np.float32,0x801618d2,0x801618d2,3 +np.float32,0x807e4f83,0x807e4f83,3 +np.float32,0x8006b07d,0x8006b07d,3 +np.float32,0xfea1c042,0xbf800000,3 +np.float32,0xff24ef74,0xbf800000,3 +np.float32,0xfef7ab16,0xbf800000,3 +np.float32,0x70b771,0x70b771,3 +np.float32,0x7daeb64e,0x7f800000,3 +np.float32,0xbe66e378,0xbe4eb59c,3 +np.float32,0xbead1534,0xbe92dcf7,3 +np.float32,0x7e6769b8,0x7f800000,3 +np.float32,0x7ecd0890,0x7f800000,3 +np.float32,0xbe7380d8,0xbe58b747,3 +np.float32,0x3efa6f2f,0x3f218265,3 +np.float32,0x3f59dada,0x3fabc5eb,3 +np.float32,0xff0f2d20,0xbf800000,3 +np.float32,0x8060210e,0x8060210e,3 +np.float32,0x3ef681e8,0x3f1e51c8,3 +np.float32,0x77a6dd,0x77a6dd,3 +np.float32,0xbebfdd0e,0xbea00399,3 +np.float32,0xfe889b72,0xbf800000,3 +np.float32,0x8049ed2c,0x8049ed2c,3 +np.float32,0x3b089dc4,0x3b08c23e,3 +np.float32,0xbf13c7c4,0xbee08c28,3 +np.float32,0x3efa13b9,0x3f2137d7,3 +np.float32,0x3e9385dc,0x3eaaf914,3 +np.float32,0x7e0e6a43,0x7f800000,3 +np.float32,0x7df6d63f,0x7f800000,3 +np.float32,0x3f3efead,0x3f8dea03,3 +np.float32,0xff52548c,0xbf800000,3 +np.float32,0x803ff9d8,0x803ff9d8,3 +np.float32,0x3c825823,0x3c836303,3 +np.float32,0xfc9e97a0,0xbf800000,3 +np.float32,0xfe644f48,0xbf800000,3 +np.float32,0x802f5017,0x802f5017,3 +np.float32,0x3d5753b9,0x3d5d1661,3 +np.float32,0x7f2a55d2,0x7f800000,3 +np.float32,0x7f4dabfe,0x7f800000,3 +np.float32,0x3f49492a,0x3f98fc47,3 +np.float32,0x3f4d1589,0x3f9d2f82,3 +np.float32,0xff016208,0xbf800000,3 +np.float32,0xbf571cb7,0xbf118365,3 +np.float32,0xbf1ef297,0xbeecd136,3 +np.float32,0x36266b,0x36266b,3 +np.float32,0xbed07b0e,0xbeab4129,3 +np.float32,0x7f553365,0x7f800000,3 +np.float32,0xfe9bb8c6,0xbf800000,3 +np.float32,0xbeb497d6,0xbe982e19,3 +np.float32,0xbf27af6c,0xbef60d16,3 +np.float32,0x55cf51,0x55cf51,3 +np.float32,0x3eab1db0,0x3ecb2e4f,3 +np.float32,0x3e777603,0x3e8bf62f,3 +np.float32,0x7f10e374,0x7f800000,3 +np.float32,0xbf1f6480,0xbeed4b8d,3 +np.float32,0x40479d,0x40479d,3 +np.float32,0x156259,0x156259,3 +np.float32,0x3d852e30,0x3d899b2d,3 +np.float32,0x80014ff3,0x80014ff3,3 +np.float32,0xbd812fa8,0xbd7a645c,3 +np.float32,0x800ab780,0x800ab780,3 +np.float32,0x3ea02ff4,0x3ebc13bd,3 +np.float32,0x7e858b8e,0x7f800000,3 +np.float32,0x75d63b,0x75d63b,3 +np.float32,0xbeb15c94,0xbe95e6e3,3 +np.float32,0x3da0cee0,0x3da74a39,3 +np.float32,0xff21c01c,0xbf800000,3 +np.float32,0x8049b5eb,0x8049b5eb,3 +np.float32,0x80177ab0,0x80177ab0,3 +np.float32,0xff137a50,0xbf800000,3 +np.float32,0x3f7febba,0x3fdbd51c,3 +np.float32,0x8041e4dd,0x8041e4dd,3 +np.float32,0x99b8c,0x99b8c,3 +np.float32,0x5621ba,0x5621ba,3 +np.float32,0x14b534,0x14b534,3 +np.float32,0xbe2eb3a8,0xbe209c95,3 +np.float32,0x7e510c28,0x7f800000,3 +np.float32,0x804ec2f2,0x804ec2f2,3 +np.float32,0x3f662406,0x3fba82b0,3 +np.float32,0x800000,0x800000,3 +np.float32,0x3f3120d6,0x3f7f5d96,3 +np.float32,0x7f179b8e,0x7f800000,3 +np.float32,0x7f65278e,0x7f800000,3 +np.float32,0xfeb50f52,0xbf800000,3 +np.float32,0x7f051bd1,0x7f800000,3 +np.float32,0x7ea0558d,0x7f800000,3 +np.float32,0xbd0a96c0,0xbd08453f,3 +np.float64,0xee82da5ddd05c,0xee82da5ddd05c,1 +np.float64,0x800c3a22d7f87446,0x800c3a22d7f87446,1 +np.float64,0xbfd34b20eaa69642,0xbfd0a825e7688d3e,1 +np.float64,0x3fd6a0f2492d41e5,0x3fdb253b906057b3,1 +np.float64,0xbfda13d8783427b0,0xbfd56b1d76684332,1 +np.float64,0xbfe50b5a99ea16b5,0xbfded7dd82c6f746,1 +np.float64,0x3f82468fc0248d20,0x3f825b7fa9378ee9,1 +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0x856e50290adca,0x856e50290adca,1 +np.float64,0x7fde55a5fa3cab4b,0x7ff0000000000000,1 +np.float64,0x7fcf2c8dd93e591b,0x7ff0000000000000,1 +np.float64,0x8001b3a0e3236743,0x8001b3a0e3236743,1 +np.float64,0x8000fdb14821fb63,0x8000fdb14821fb63,1 +np.float64,0xbfe3645e08e6c8bc,0xbfdd161362a5e9ef,1 +np.float64,0x7feb34d28b3669a4,0x7ff0000000000000,1 +np.float64,0x80099dd810933bb1,0x80099dd810933bb1,1 +np.float64,0xbfedbcc1097b7982,0xbfe35d86414d53dc,1 +np.float64,0x7fdc406fbdb880de,0x7ff0000000000000,1 +np.float64,0x800c4bf85ab897f1,0x800c4bf85ab897f1,1 +np.float64,0x3fd8f7b0e0b1ef60,0x3fde89b497ae20d8,1 +np.float64,0xffe4fced5c69f9da,0xbff0000000000000,1 +np.float64,0xbfe54d421fea9a84,0xbfdf1be0cbfbfcba,1 +np.float64,0x800af72f3535ee5f,0x800af72f3535ee5f,1 +np.float64,0x3fe24e6570e49ccb,0x3fe8b3a86d970411,1 +np.float64,0xbfdd7b22d0baf646,0xbfd79fac2e4f7558,1 +np.float64,0xbfe6a7654c6d4eca,0xbfe03c1f13f3b409,1 +np.float64,0x3fe2c3eb662587d7,0x3fe98566e625d4f5,1 +np.float64,0x3b1ef71e763e0,0x3b1ef71e763e0,1 +np.float64,0xffed03c6baba078d,0xbff0000000000000,1 +np.float64,0x3febac19d0b75834,0x3ff5fdacc9d51bcd,1 +np.float64,0x800635d6794c6bae,0x800635d6794c6bae,1 +np.float64,0xbfe8cafc827195f9,0xbfe1411438608ae1,1 +np.float64,0x7feeb616a83d6c2c,0x7ff0000000000000,1 +np.float64,0x3fd52d62a2aa5ac5,0x3fd91a07a7f18f44,1 +np.float64,0x80036996b8a6d32e,0x80036996b8a6d32e,1 +np.float64,0x2b1945965632a,0x2b1945965632a,1 +np.float64,0xbfecb5e8c9796bd2,0xbfe2f40fca276aa2,1 +np.float64,0x3fe8669ed4f0cd3e,0x3ff24c89fc9cdbff,1 +np.float64,0x71e9f65ee3d3f,0x71e9f65ee3d3f,1 +np.float64,0xbfd5ab262bab564c,0xbfd261ae108ef79e,1 +np.float64,0xbfe7091342ee1226,0xbfe06bf5622d75f6,1 +np.float64,0x49e888d093d12,0x49e888d093d12,1 +np.float64,0x2272f3dc44e5f,0x2272f3dc44e5f,1 +np.float64,0x7fe98736e0b30e6d,0x7ff0000000000000,1 +np.float64,0x30fa9cde61f54,0x30fa9cde61f54,1 +np.float64,0x7fdc163fc0382c7f,0x7ff0000000000000,1 +np.float64,0xffb40d04ee281a08,0xbff0000000000000,1 +np.float64,0xffe624617f2c48c2,0xbff0000000000000,1 +np.float64,0x3febb582bd376b05,0x3ff608da584d1716,1 +np.float64,0xfc30a5a5f8615,0xfc30a5a5f8615,1 +np.float64,0x3fef202efd7e405e,0x3ffa52009319b069,1 +np.float64,0x8004d0259829a04c,0x8004d0259829a04c,1 +np.float64,0x800622dc71ec45ba,0x800622dc71ec45ba,1 +np.float64,0xffefffffffffffff,0xbff0000000000000,1 +np.float64,0x800e89113c9d1223,0x800e89113c9d1223,1 +np.float64,0x7fba7fde3034ffbb,0x7ff0000000000000,1 +np.float64,0xbfeea31e807d463d,0xbfe3b7369b725915,1 +np.float64,0x3feb7c9589f6f92c,0x3ff5c56cf71b0dff,1 +np.float64,0x3fd52d3b59aa5a77,0x3fd919d0f683fd07,1 +np.float64,0x800de90a43fbd215,0x800de90a43fbd215,1 +np.float64,0x3fe7eb35a9efd66b,0x3ff1c940dbfc6ef9,1 +np.float64,0xbda0adcb7b416,0xbda0adcb7b416,1 +np.float64,0x7fc5753e3a2aea7b,0x7ff0000000000000,1 +np.float64,0xffdd101d103a203a,0xbff0000000000000,1 +np.float64,0x7fcb54f56836a9ea,0x7ff0000000000000,1 +np.float64,0xbfd61c8d6eac391a,0xbfd2b23bc0a2cef4,1 +np.float64,0x3feef55de37deabc,0x3ffa198639a0161d,1 +np.float64,0x7fe4ffbfaea9ff7e,0x7ff0000000000000,1 +np.float64,0x9d1071873a20e,0x9d1071873a20e,1 +np.float64,0x3fef1ecb863e3d97,0x3ffa502a81e09cfc,1 +np.float64,0xad2da12b5a5b4,0xad2da12b5a5b4,1 +np.float64,0xffe614b74c6c296e,0xbff0000000000000,1 +np.float64,0xffe60d3f286c1a7e,0xbff0000000000000,1 +np.float64,0x7fda7d91f4b4fb23,0x7ff0000000000000,1 +np.float64,0x800023f266a047e6,0x800023f266a047e6,1 +np.float64,0x7fdf5f9ad23ebf35,0x7ff0000000000000,1 +np.float64,0x3fa7459f002e8b3e,0x3fa7cf178dcf0af6,1 +np.float64,0x3fe9938d61f3271b,0x3ff39516a13caec3,1 +np.float64,0xbfd59314c3ab262a,0xbfd250830f73efd2,1 +np.float64,0xbfc7e193f72fc328,0xbfc5c924339dd7a8,1 +np.float64,0x7fec1965f17832cb,0x7ff0000000000000,1 +np.float64,0xbfd932908eb26522,0xbfd4d4312d272580,1 +np.float64,0xbfdf2d08e2be5a12,0xbfd8add1413b0b1b,1 +np.float64,0x7fdcf7cc74b9ef98,0x7ff0000000000000,1 +np.float64,0x7fc79300912f2600,0x7ff0000000000000,1 +np.float64,0xffd4bd8f23297b1e,0xbff0000000000000,1 +np.float64,0x41869ce0830e,0x41869ce0830e,1 +np.float64,0x3fe5dcec91ebb9da,0x3fef5e213598cbd4,1 +np.float64,0x800815d9c2902bb4,0x800815d9c2902bb4,1 +np.float64,0x800ba1a4b877434a,0x800ba1a4b877434a,1 +np.float64,0x80069d7bdc4d3af8,0x80069d7bdc4d3af8,1 +np.float64,0xcf00d4339e01b,0xcf00d4339e01b,1 +np.float64,0x80072b71bd4e56e4,0x80072b71bd4e56e4,1 +np.float64,0x80059ca6fbab394f,0x80059ca6fbab394f,1 +np.float64,0x3fe522fc092a45f8,0x3fedf212682bf894,1 +np.float64,0x7fe17f384ea2fe70,0x7ff0000000000000,1 +np.float64,0x0,0x0,1 +np.float64,0x3f72bb4c20257698,0x3f72c64766b52069,1 +np.float64,0x7fbc97c940392f92,0x7ff0000000000000,1 +np.float64,0xffc5904ebd2b209c,0xbff0000000000000,1 +np.float64,0xbfe34fb55b669f6a,0xbfdcff81dd30a49d,1 +np.float64,0x8007ccda006f99b5,0x8007ccda006f99b5,1 +np.float64,0x3fee50e4c8fca1ca,0x3ff9434c7750ad0f,1 +np.float64,0x7fee7b07c67cf60f,0x7ff0000000000000,1 +np.float64,0x3fdcce4a5a399c95,0x3fe230c83f28218a,1 +np.float64,0x7fee5187b37ca30e,0x7ff0000000000000,1 +np.float64,0x3fc48f6a97291ed8,0x3fc64db6200a9833,1 +np.float64,0xc7fec3498ffd9,0xc7fec3498ffd9,1 +np.float64,0x800769c59d2ed38c,0x800769c59d2ed38c,1 +np.float64,0xffe69ede782d3dbc,0xbff0000000000000,1 +np.float64,0x3fecd9770979b2ee,0x3ff76a1f2f0f08f2,1 +np.float64,0x5aa358a8b546c,0x5aa358a8b546c,1 +np.float64,0xbfe795a0506f2b40,0xbfe0afcc52c0166b,1 +np.float64,0xffd4ada1e8a95b44,0xbff0000000000000,1 +np.float64,0xffcac1dc213583b8,0xbff0000000000000,1 +np.float64,0xffe393c15fa72782,0xbff0000000000000,1 +np.float64,0xbfcd6a3c113ad478,0xbfca47a2157b9cdd,1 +np.float64,0xffedde20647bbc40,0xbff0000000000000,1 +np.float64,0x3fd0d011b1a1a024,0x3fd33a57945559f4,1 +np.float64,0x3fef27e29f7e4fc6,0x3ffa5c314e0e3d69,1 +np.float64,0xffe96ff71f72dfee,0xbff0000000000000,1 +np.float64,0xffe762414f2ec482,0xbff0000000000000,1 +np.float64,0x3fc2dcfd3d25b9fa,0x3fc452f41682a12e,1 +np.float64,0xbfbdb125b63b6248,0xbfbc08e6553296d4,1 +np.float64,0x7b915740f724,0x7b915740f724,1 +np.float64,0x60b502b2c16a1,0x60b502b2c16a1,1 +np.float64,0xbfeb38b0be367162,0xbfe254f6782cfc47,1 +np.float64,0x800dc39a3edb8735,0x800dc39a3edb8735,1 +np.float64,0x3fea4fb433349f68,0x3ff468b97cf699f5,1 +np.float64,0xbfd49967962932d0,0xbfd19ceb41ff4cd0,1 +np.float64,0xbfebf75cd377eeba,0xbfe2a576bdbccccc,1 +np.float64,0xbfb653d65c2ca7b0,0xbfb561ab8fcb3f26,1 +np.float64,0xffe3f34b8727e696,0xbff0000000000000,1 +np.float64,0x3fdd798064baf301,0x3fe2b7c130a6fc63,1 +np.float64,0x3febe027e6b7c050,0x3ff63bac1b22e12d,1 +np.float64,0x7fcaa371af3546e2,0x7ff0000000000000,1 +np.float64,0xbfe6ee980a2ddd30,0xbfe05f0bc5dc80d2,1 +np.float64,0xc559c33f8ab39,0xc559c33f8ab39,1 +np.float64,0x84542c2b08a86,0x84542c2b08a86,1 +np.float64,0xbfe5645e046ac8bc,0xbfdf3398dc3cc1bd,1 +np.float64,0x3fee8c48ae7d1892,0x3ff9902899480526,1 +np.float64,0x3fb706471c2e0c8e,0x3fb817787aace8db,1 +np.float64,0x7fefe78f91ffcf1e,0x7ff0000000000000,1 +np.float64,0xbfcf6d560b3edaac,0xbfcbddc72a2130df,1 +np.float64,0x7fd282bfd925057f,0x7ff0000000000000,1 +np.float64,0x3fb973dbee32e7b8,0x3fbac2c87cbd0215,1 +np.float64,0x3fd1ce38ff239c72,0x3fd4876de5164420,1 +np.float64,0x8008ac2e3c31585d,0x8008ac2e3c31585d,1 +np.float64,0x3fa05e06dc20bc00,0x3fa0a1b7de904dce,1 +np.float64,0x7fd925f215324be3,0x7ff0000000000000,1 +np.float64,0x3f949d95d0293b2c,0x3f94d31197d51874,1 +np.float64,0xffdded9e67bbdb3c,0xbff0000000000000,1 +np.float64,0x3fed390dcfba721c,0x3ff7e08c7a709240,1 +np.float64,0x7fe6e62300adcc45,0x7ff0000000000000,1 +np.float64,0xbfd779bc312ef378,0xbfd3a6cb64bb0181,1 +np.float64,0x3fe43e9877287d31,0x3fec3e100ef935fd,1 +np.float64,0x210b68e44216e,0x210b68e44216e,1 +np.float64,0x3fcdffc1e73bff84,0x3fd0e729d02ec539,1 +np.float64,0xcea10c0f9d422,0xcea10c0f9d422,1 +np.float64,0x7feb97a82d772f4f,0x7ff0000000000000,1 +np.float64,0x9b4b4d953696a,0x9b4b4d953696a,1 +np.float64,0x3fd1bd8e95237b1d,0x3fd4716dd34cf828,1 +np.float64,0x800fc273841f84e7,0x800fc273841f84e7,1 +np.float64,0xbfd2aef167255de2,0xbfd0340f30d82f18,1 +np.float64,0x800d021a551a0435,0x800d021a551a0435,1 +np.float64,0xffebf934a8b7f268,0xbff0000000000000,1 +np.float64,0x3fd819849fb03308,0x3fdd43bca0aac749,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x27c34b064f86a,0x27c34b064f86a,1 +np.float64,0x7fef4f5a373e9eb3,0x7ff0000000000000,1 +np.float64,0x7fd92fccce325f99,0x7ff0000000000000,1 +np.float64,0x800520869d6a410e,0x800520869d6a410e,1 +np.float64,0x3fccbcaddf397958,0x3fd01bf6b0c4d97f,1 +np.float64,0x80039ebfc4273d80,0x80039ebfc4273d80,1 +np.float64,0xbfed1f0b3c7a3e16,0xbfe31ea6e4c69141,1 +np.float64,0x7fee1bb7c4bc376f,0x7ff0000000000000,1 +np.float64,0xbfa8bee1d8317dc0,0xbfa8283b7dbf95a9,1 +np.float64,0x3fe797db606f2fb6,0x3ff171b1c2bc8fe5,1 +np.float64,0xbfee2ecfdbbc5da0,0xbfe38a3f0a43d14e,1 +np.float64,0x3fe815c7f1302b90,0x3ff1f65165c45d71,1 +np.float64,0xbfbb265c94364cb8,0xbfb9c27ec61a9a1d,1 +np.float64,0x3fcf1cab5d3e3957,0x3fd19c07444642f9,1 +np.float64,0xbfe6ae753f6d5cea,0xbfe03f99666dbe17,1 +np.float64,0xbfd18a2a73a31454,0xbfceaee204aca016,1 +np.float64,0x3fb8a1dffc3143c0,0x3fb9db38341ab1a3,1 +np.float64,0x7fd2a0376025406e,0x7ff0000000000000,1 +np.float64,0x7fe718c0e3ae3181,0x7ff0000000000000,1 +np.float64,0x3fb264d42424c9a8,0x3fb3121f071d4db4,1 +np.float64,0xd27190a7a4e32,0xd27190a7a4e32,1 +np.float64,0xbfe467668c68cecd,0xbfde2c4616738d5e,1 +np.float64,0x800ab9a2b9357346,0x800ab9a2b9357346,1 +np.float64,0x7fcbd108d537a211,0x7ff0000000000000,1 +np.float64,0x3fb79bba6e2f3770,0x3fb8bb2c140d3445,1 +np.float64,0xffefa7165e3f4e2c,0xbff0000000000000,1 +np.float64,0x7fb40185a428030a,0x7ff0000000000000,1 +np.float64,0xbfe9e3d58e73c7ab,0xbfe1c04d51c83d69,1 +np.float64,0x7fef5b97b17eb72e,0x7ff0000000000000,1 +np.float64,0x800a2957683452af,0x800a2957683452af,1 +np.float64,0x800f54f1925ea9e3,0x800f54f1925ea9e3,1 +np.float64,0xeffa4e77dff4a,0xeffa4e77dff4a,1 +np.float64,0xffbe501aa03ca038,0xbff0000000000000,1 +np.float64,0x8006c651bced8ca4,0x8006c651bced8ca4,1 +np.float64,0x3fe159faff22b3f6,0x3fe708f78efbdbed,1 +np.float64,0x800e7d59a31cfab3,0x800e7d59a31cfab3,1 +np.float64,0x3fe6ac2f272d585e,0x3ff07ee5305385c3,1 +np.float64,0x7fd014c054202980,0x7ff0000000000000,1 +np.float64,0xbfe4800b11e90016,0xbfde4648c6f29ce5,1 +np.float64,0xbfe6738470ece709,0xbfe0227b5b42b713,1 +np.float64,0x3fed052add3a0a56,0x3ff7a01819e65c6e,1 +np.float64,0xffe03106f120620e,0xbff0000000000000,1 +np.float64,0x7fe11df4d4e23be9,0x7ff0000000000000,1 +np.float64,0xbfcea25d7b3d44bc,0xbfcb3e808e7ce852,1 +np.float64,0xd0807b03a1010,0xd0807b03a1010,1 +np.float64,0x8004eda4fec9db4b,0x8004eda4fec9db4b,1 +np.float64,0x3fceb5c98d3d6b90,0x3fd15a894b15dd9f,1 +np.float64,0xbfee27228afc4e45,0xbfe38741702f3c0b,1 +np.float64,0xbfe606278c6c0c4f,0xbfdfd7cb6093652d,1 +np.float64,0xbfd66f59bc2cdeb4,0xbfd2ecb2297f6afc,1 +np.float64,0x4aee390095dc8,0x4aee390095dc8,1 +np.float64,0xbfe391355d67226a,0xbfdd46ddc0997014,1 +np.float64,0xffd27765e7a4eecc,0xbff0000000000000,1 +np.float64,0xbfe795e20a2f2bc4,0xbfe0afebc66c4dbd,1 +np.float64,0x7fc9a62e81334c5c,0x7ff0000000000000,1 +np.float64,0xffe4e57e52a9cafc,0xbff0000000000000,1 +np.float64,0x7fac326c8c3864d8,0x7ff0000000000000,1 +np.float64,0x3fe8675f6370cebf,0x3ff24d5863029c15,1 +np.float64,0x7fcf4745e73e8e8b,0x7ff0000000000000,1 +np.float64,0x7fcc9aec9f3935d8,0x7ff0000000000000,1 +np.float64,0x3fec2e8fcab85d20,0x3ff699ccd0b2fed6,1 +np.float64,0x3fd110a968222153,0x3fd38e81a88c2d13,1 +np.float64,0xffb3a68532274d08,0xbff0000000000000,1 +np.float64,0xf0e562bbe1cad,0xf0e562bbe1cad,1 +np.float64,0xbfe815b9e5f02b74,0xbfe0ec9f5023aebc,1 +np.float64,0xbf5151d88022a400,0xbf514f80c465feea,1 +np.float64,0x2547e3144a8fd,0x2547e3144a8fd,1 +np.float64,0x3fedcc0c28fb9818,0x3ff899612fbeb4c5,1 +np.float64,0x3fdc3d1c0f387a38,0x3fe1bf6e2d39bd75,1 +np.float64,0x7fe544dbe62a89b7,0x7ff0000000000000,1 +np.float64,0x8001500e48e2a01d,0x8001500e48e2a01d,1 +np.float64,0xbfed3b2b09fa7656,0xbfe329f3e7bada64,1 +np.float64,0xbfe76a943aeed528,0xbfe09b24e3aa3f79,1 +np.float64,0x3fe944330e328866,0x3ff33d472dee70c5,1 +np.float64,0x8004bbbd6cc9777c,0x8004bbbd6cc9777c,1 +np.float64,0xbfe28133fb650268,0xbfdc1ac230ac4ef5,1 +np.float64,0xc1370af7826e2,0xc1370af7826e2,1 +np.float64,0x7fcfa47f5f3f48fe,0x7ff0000000000000,1 +np.float64,0xbfa3002a04260050,0xbfa2a703a538b54e,1 +np.float64,0xffef44f3903e89e6,0xbff0000000000000,1 +np.float64,0xc32cce298659a,0xc32cce298659a,1 +np.float64,0x7b477cc2f68f0,0x7b477cc2f68f0,1 +np.float64,0x40a7f4ec814ff,0x40a7f4ec814ff,1 +np.float64,0xffee38edf67c71db,0xbff0000000000000,1 +np.float64,0x3fe23f6f1ce47ede,0x3fe8992b8bb03499,1 +np.float64,0x7fc8edfe7f31dbfc,0x7ff0000000000000,1 +np.float64,0x800bb8e6fb3771ce,0x800bb8e6fb3771ce,1 +np.float64,0xbfe11d364ee23a6c,0xbfda82a0c2ef9e46,1 +np.float64,0xbfeb993cb4b7327a,0xbfe27df565da85dc,1 +np.float64,0x10000000000000,0x10000000000000,1 +np.float64,0x3fc1f997d723f330,0x3fc34c5cff060af1,1 +np.float64,0x6e326fa0dc64f,0x6e326fa0dc64f,1 +np.float64,0x800fa30c2c5f4618,0x800fa30c2c5f4618,1 +np.float64,0x7fed16ad603a2d5a,0x7ff0000000000000,1 +np.float64,0x9411cf172823a,0x9411cf172823a,1 +np.float64,0xffece51d4cb9ca3a,0xbff0000000000000,1 +np.float64,0x3fdda3d1453b47a3,0x3fe2d954f7849890,1 +np.float64,0xffd58330172b0660,0xbff0000000000000,1 +np.float64,0xbfc6962ae52d2c54,0xbfc4b4bdf0069f17,1 +np.float64,0xbfb4010a8e280218,0xbfb33e1236f7efa0,1 +np.float64,0x7fd0444909208891,0x7ff0000000000000,1 +np.float64,0xbfe027a24de04f44,0xbfd95e9064101e7c,1 +np.float64,0xa6f3f3214de9,0xa6f3f3214de9,1 +np.float64,0xbfe112eb0fe225d6,0xbfda768f7cbdf346,1 +np.float64,0xbfe99e90d4b33d22,0xbfe1a153e45a382a,1 +np.float64,0xffecb34f8e79669e,0xbff0000000000000,1 +np.float64,0xbfdf32c9653e6592,0xbfd8b159caf5633d,1 +np.float64,0x3fe9519829b2a330,0x3ff34c0a8152e20f,1 +np.float64,0xffd08ec8a7a11d92,0xbff0000000000000,1 +np.float64,0xffd19b71b6a336e4,0xbff0000000000000,1 +np.float64,0x7feda6b9377b4d71,0x7ff0000000000000,1 +np.float64,0x800fda2956bfb453,0x800fda2956bfb453,1 +np.float64,0x3fe54f601bea9ec0,0x3fee483cb03cbde4,1 +np.float64,0xbfe2a8ad5ee5515a,0xbfdc46ee7a10bf0d,1 +np.float64,0xbfd336c8bd266d92,0xbfd09916d432274a,1 +np.float64,0xfff0000000000000,0xbff0000000000000,1 +np.float64,0x3fd9a811a9b35024,0x3fdf8fa68cc048e3,1 +np.float64,0x3fe078c68520f18d,0x3fe58aecc1f9649b,1 +np.float64,0xbfc6d5aa3a2dab54,0xbfc4e9ea84f3d73c,1 +np.float64,0xf9682007f2d04,0xf9682007f2d04,1 +np.float64,0x3fee54523dbca8a4,0x3ff947b826de81f4,1 +np.float64,0x80461e5d008c4,0x80461e5d008c4,1 +np.float64,0x3fdd6d12d5bada26,0x3fe2ade8dee2fa02,1 +np.float64,0x3fcd5f0dfd3abe18,0x3fd081d6cd25731d,1 +np.float64,0x7fa36475c826c8eb,0x7ff0000000000000,1 +np.float64,0xbfdf3ce052be79c0,0xbfd8b78baccfb908,1 +np.float64,0x7fcd890dd13b121b,0x7ff0000000000000,1 +np.float64,0x8000000000000001,0x8000000000000001,1 +np.float64,0x800ec0f4281d81e8,0x800ec0f4281d81e8,1 +np.float64,0xbfba960116352c00,0xbfb94085424496d9,1 +np.float64,0x3fdddedc9bbbbdb8,0x3fe30853fe4ef5ce,1 +np.float64,0x238092a847013,0x238092a847013,1 +np.float64,0xbfe38d4803271a90,0xbfdd429a955c46af,1 +np.float64,0xbfd4c9067329920c,0xbfd1bf6255ed91a4,1 +np.float64,0xbfbee213923dc428,0xbfbd17ce1bda6088,1 +np.float64,0xffd5a2d337ab45a6,0xbff0000000000000,1 +np.float64,0x7fe21bfcf82437f9,0x7ff0000000000000,1 +np.float64,0x3fe2a2714da544e3,0x3fe949594a74ea25,1 +np.float64,0x800e05cf8ebc0b9f,0x800e05cf8ebc0b9f,1 +np.float64,0x559a1526ab343,0x559a1526ab343,1 +np.float64,0xffe6a1b7906d436e,0xbff0000000000000,1 +np.float64,0xffef27d6253e4fab,0xbff0000000000000,1 +np.float64,0xbfe0f90ab0a1f216,0xbfda5828a1edde48,1 +np.float64,0x9675d2ab2cebb,0x9675d2ab2cebb,1 +np.float64,0xffee0f7eecfc1efd,0xbff0000000000000,1 +np.float64,0x2ec005625d801,0x2ec005625d801,1 +np.float64,0x7fde35ff14bc6bfd,0x7ff0000000000000,1 +np.float64,0xffe03f36d9e07e6d,0xbff0000000000000,1 +np.float64,0x7fe09ff7c4213fef,0x7ff0000000000000,1 +np.float64,0xffeac29dd1b5853b,0xbff0000000000000,1 +np.float64,0x3fb63120aa2c6241,0x3fb72ea3de98a853,1 +np.float64,0xffd079eb84a0f3d8,0xbff0000000000000,1 +np.float64,0xbfd3c2cc75a78598,0xbfd1005996880b3f,1 +np.float64,0x7fb80507ee300a0f,0x7ff0000000000000,1 +np.float64,0xffe8006105f000c1,0xbff0000000000000,1 +np.float64,0x8009138b0ab22716,0x8009138b0ab22716,1 +np.float64,0xbfd6dfb40b2dbf68,0xbfd33b8e4008e3b0,1 +np.float64,0xbfe7c2cf9bef859f,0xbfe0c55c807460df,1 +np.float64,0xbfe75fe4da6ebfca,0xbfe09600256d3b81,1 +np.float64,0xffd662fc73acc5f8,0xbff0000000000000,1 +np.float64,0x20b99dbc41735,0x20b99dbc41735,1 +np.float64,0x3fe10b38ade21671,0x3fe68229a9bbeefc,1 +np.float64,0x3743b99c6e878,0x3743b99c6e878,1 +np.float64,0xff9eb5ed903d6be0,0xbff0000000000000,1 +np.float64,0x3ff0000000000000,0x3ffb7e151628aed3,1 +np.float64,0xffb9e0569e33c0b0,0xbff0000000000000,1 +np.float64,0x7fd39c804fa73900,0x7ff0000000000000,1 +np.float64,0x3fe881ef67f103df,0x3ff269dd704b7129,1 +np.float64,0x1b6eb40236dd7,0x1b6eb40236dd7,1 +np.float64,0xbfe734ea432e69d4,0xbfe0813e6355d02f,1 +np.float64,0xffcf48f3743e91e8,0xbff0000000000000,1 +np.float64,0xffed10bcf6fa2179,0xbff0000000000000,1 +np.float64,0x3fef07723b7e0ee4,0x3ffa3156123f3c15,1 +np.float64,0xffe45c704aa8b8e0,0xbff0000000000000,1 +np.float64,0xb7b818d96f703,0xb7b818d96f703,1 +np.float64,0x42fcc04085f99,0x42fcc04085f99,1 +np.float64,0xbfda7ced01b4f9da,0xbfd5b0ce1e5524ae,1 +np.float64,0xbfe1e5963d63cb2c,0xbfdb6a87b6c09185,1 +np.float64,0x7fdfa18003bf42ff,0x7ff0000000000000,1 +np.float64,0xbfe3790a43e6f214,0xbfdd2c9a38b4f089,1 +np.float64,0xffe0ff5b9ae1feb6,0xbff0000000000000,1 +np.float64,0x80085a7d3110b4fb,0x80085a7d3110b4fb,1 +np.float64,0xffd6bfa6622d7f4c,0xbff0000000000000,1 +np.float64,0xbfef5ddc7cfebbb9,0xbfe3fe170521593e,1 +np.float64,0x3fc21773fa242ee8,0x3fc36ebda1f91a72,1 +np.float64,0x7fc04d98da209b31,0x7ff0000000000000,1 +np.float64,0xbfeba3b535b7476a,0xbfe282602e3c322e,1 +np.float64,0xffd41fb5c1a83f6c,0xbff0000000000000,1 +np.float64,0xf87d206df0fa4,0xf87d206df0fa4,1 +np.float64,0x800060946fc0c12a,0x800060946fc0c12a,1 +np.float64,0x3fe69d5f166d3abe,0x3ff06fdddcf4ca93,1 +np.float64,0x7fe9b5793b336af1,0x7ff0000000000000,1 +np.float64,0x7fe0dd4143e1ba82,0x7ff0000000000000,1 +np.float64,0xbfa8eaea3c31d5d0,0xbfa8522e397da3bd,1 +np.float64,0x119f0078233e1,0x119f0078233e1,1 +np.float64,0xbfd78a207aaf1440,0xbfd3b225bbf2ab4f,1 +np.float64,0xc66a6d4d8cd4e,0xc66a6d4d8cd4e,1 +np.float64,0xe7fc4b57cff8a,0xe7fc4b57cff8a,1 +np.float64,0x800883e8091107d0,0x800883e8091107d0,1 +np.float64,0x3fa6520c842ca419,0x3fa6d06e1041743a,1 +np.float64,0x3fa563182c2ac630,0x3fa5d70e27a84c97,1 +np.float64,0xe6a30b61cd462,0xe6a30b61cd462,1 +np.float64,0x3fee85dac37d0bb6,0x3ff987cfa41a9778,1 +np.float64,0x3fe8f621db71ec44,0x3ff2e7b768a2e9d0,1 +np.float64,0x800f231d861e463b,0x800f231d861e463b,1 +np.float64,0xbfe22eb07c645d61,0xbfdbbdbb853ab4c6,1 +np.float64,0x7fd2dda2dea5bb45,0x7ff0000000000000,1 +np.float64,0xbfd09b79a0a136f4,0xbfcd4147606ffd27,1 +np.float64,0xca039cc394074,0xca039cc394074,1 +np.float64,0x8000000000000000,0x8000000000000000,1 +np.float64,0xcb34575d9668b,0xcb34575d9668b,1 +np.float64,0x3fea62c1f3f4c584,0x3ff47e6dc67ec89f,1 +np.float64,0x7fe544c8606a8990,0x7ff0000000000000,1 +np.float64,0xffe0a980c4615301,0xbff0000000000000,1 +np.float64,0x3fdd67d5f8bacfac,0x3fe2a9c3421830f1,1 +np.float64,0xffe41d3dda283a7b,0xbff0000000000000,1 +np.float64,0xffeed59e5ffdab3c,0xbff0000000000000,1 +np.float64,0xffeeae8326fd5d05,0xbff0000000000000,1 +np.float64,0x800d70b4fa7ae16a,0x800d70b4fa7ae16a,1 +np.float64,0xffec932e6839265c,0xbff0000000000000,1 +np.float64,0xee30b185dc616,0xee30b185dc616,1 +np.float64,0x7fc3cf4397279e86,0x7ff0000000000000,1 +np.float64,0xbfeab34f1875669e,0xbfe21b868229de7d,1 +np.float64,0xf45f5f7de8bec,0xf45f5f7de8bec,1 +np.float64,0x3fad2c4b203a5896,0x3fae0528b568f3cf,1 +np.float64,0xbfe2479543e48f2a,0xbfdbd9e57cf64028,1 +np.float64,0x3fd41a1473283429,0x3fd79df2bc60debb,1 +np.float64,0x3febb5155ef76a2a,0x3ff608585afd698b,1 +np.float64,0xffe21f5303e43ea6,0xbff0000000000000,1 +np.float64,0x7fe9ef390833de71,0x7ff0000000000000,1 +np.float64,0xffe8ee873d71dd0e,0xbff0000000000000,1 +np.float64,0x7fd7cbc55e2f978a,0x7ff0000000000000,1 +np.float64,0x80081f9080d03f21,0x80081f9080d03f21,1 +np.float64,0x7fecbafc8b3975f8,0x7ff0000000000000,1 +np.float64,0x800b6c4b0b16d896,0x800b6c4b0b16d896,1 +np.float64,0xbfaa0fc2d4341f80,0xbfa968cdf32b98ad,1 +np.float64,0x3fec79fe4078f3fc,0x3ff6f5361a4a5d93,1 +np.float64,0xbfb14b79de2296f0,0xbfb0b93b75ecec11,1 +np.float64,0x800009d084c013a2,0x800009d084c013a2,1 +np.float64,0x4a4cdfe29499d,0x4a4cdfe29499d,1 +np.float64,0xbfe721c2d56e4386,0xbfe077f541987d76,1 +np.float64,0x3e5f539e7cbeb,0x3e5f539e7cbeb,1 +np.float64,0x3fd23f044c247e09,0x3fd51ceafcdd64aa,1 +np.float64,0x3fc70785b02e0f0b,0x3fc93b2a37eb342a,1 +np.float64,0xbfe7ab4ec7af569e,0xbfe0ba28eecbf6b0,1 +np.float64,0x800c1d4134583a83,0x800c1d4134583a83,1 +np.float64,0xffd9a73070334e60,0xbff0000000000000,1 +np.float64,0x68a4bf24d1499,0x68a4bf24d1499,1 +np.float64,0x7feba9d9507753b2,0x7ff0000000000000,1 +np.float64,0xbfe9d747db73ae90,0xbfe1bab53d932010,1 +np.float64,0x800a9a4aed953496,0x800a9a4aed953496,1 +np.float64,0xffcb89b0ad371360,0xbff0000000000000,1 +np.float64,0xbfc62388b82c4710,0xbfc4547be442a38c,1 +np.float64,0x800a006d187400db,0x800a006d187400db,1 +np.float64,0x3fcef2fbd33de5f8,0x3fd18177b2150148,1 +np.float64,0x8000b74e3da16e9d,0x8000b74e3da16e9d,1 +np.float64,0x25be536e4b7cb,0x25be536e4b7cb,1 +np.float64,0x3fa86e189430dc31,0x3fa905b4684c9f01,1 +np.float64,0xa7584b114eb0a,0xa7584b114eb0a,1 +np.float64,0x800331133c866227,0x800331133c866227,1 +np.float64,0x3fb52b48142a5690,0x3fb611a6f6e7c664,1 +np.float64,0x3fe825797cf04af2,0x3ff206fd60e98116,1 +np.float64,0x3fd0bec4e5217d8a,0x3fd323db3ffd59b2,1 +np.float64,0x907b43a120f7,0x907b43a120f7,1 +np.float64,0x3fed31eb1d3a63d6,0x3ff7d7a91c6930a4,1 +np.float64,0x7f97a13d782f427a,0x7ff0000000000000,1 +np.float64,0xffc7121a702e2434,0xbff0000000000000,1 +np.float64,0xbfe8bb4cbbf1769a,0xbfe139d7f46f1fb1,1 +np.float64,0xbfe3593cc5a6b27a,0xbfdd09ec91d6cd48,1 +np.float64,0x7fcff218ff9ff,0x7fcff218ff9ff,1 +np.float64,0x3fe73651d4ae6ca4,0x3ff10c5c1d21d127,1 +np.float64,0x80054e396eaa9c74,0x80054e396eaa9c74,1 +np.float64,0x3fe527d5f9aa4fac,0x3fedfb7743db9b53,1 +np.float64,0x7fec6f28c5f8de51,0x7ff0000000000000,1 +np.float64,0x3fcd2bbff53a5780,0x3fd061987416b49b,1 +np.float64,0xffd1f0046423e008,0xbff0000000000000,1 +np.float64,0x80034d97fac69b31,0x80034d97fac69b31,1 +np.float64,0x3faa803f14350080,0x3fab32e3f8073be4,1 +np.float64,0x3fcf8da0163f1b40,0x3fd1e42ba2354c8e,1 +np.float64,0x3fd573c2632ae785,0x3fd97c37609d18d7,1 +np.float64,0x7f922960482452c0,0x7ff0000000000000,1 +np.float64,0x800ebd0c5d3d7a19,0x800ebd0c5d3d7a19,1 +np.float64,0xbfee63b7807cc76f,0xbfe39ec7981035db,1 +np.float64,0xffdc023f8e380480,0xbff0000000000000,1 +np.float64,0x3fe3ffa02c67ff40,0x3febc7f8b900ceba,1 +np.float64,0x36c508b86d8a2,0x36c508b86d8a2,1 +np.float64,0x3fc9fbb0f133f760,0x3fcccee9f6ba801c,1 +np.float64,0x3fd75c1d5faeb83b,0x3fdc3150f9eff99e,1 +np.float64,0x3fe9a8d907b351b2,0x3ff3accc78a31df8,1 +np.float64,0x3fdd8fdcafbb1fb8,0x3fe2c97c97757994,1 +np.float64,0x3fb10c34ca22186a,0x3fb1a0cc42c76b86,1 +np.float64,0xbff0000000000000,0xbfe43a54e4e98864,1 +np.float64,0xffd046aefda08d5e,0xbff0000000000000,1 +np.float64,0x80067989758cf314,0x80067989758cf314,1 +np.float64,0x3fee9d77763d3aef,0x3ff9a67ff0841ba5,1 +np.float64,0xffe4d3cbf8e9a798,0xbff0000000000000,1 +np.float64,0x800f9cab273f3956,0x800f9cab273f3956,1 +np.float64,0x800a5c84f9f4b90a,0x800a5c84f9f4b90a,1 +np.float64,0x4fd377009fa8,0x4fd377009fa8,1 +np.float64,0xbfe7ba26af6f744e,0xbfe0c13ce45d6f95,1 +np.float64,0x609c8a86c1392,0x609c8a86c1392,1 +np.float64,0x7fe4d0296ea9a052,0x7ff0000000000000,1 +np.float64,0x59847bccb3090,0x59847bccb3090,1 +np.float64,0xbfdf944157bf2882,0xbfd8ed092bacad43,1 +np.float64,0xbfe7560a632eac15,0xbfe091405ec34973,1 +np.float64,0x3fea0699f4340d34,0x3ff415eb72089230,1 +np.float64,0x800a5533f374aa68,0x800a5533f374aa68,1 +np.float64,0xbf8e8cdb103d19c0,0xbf8e52cffcb83774,1 +np.float64,0x3fe87d9e52f0fb3d,0x3ff2653952344b81,1 +np.float64,0x7fca3950f73472a1,0x7ff0000000000000,1 +np.float64,0xffd5d1068aaba20e,0xbff0000000000000,1 +np.float64,0x3fd1a5f169a34be4,0x3fd4524b6ef17f91,1 +np.float64,0x3fdc4b95a8b8972c,0x3fe1caafd8652bf7,1 +np.float64,0x3fe333f65a6667ed,0x3fea502fb1f8a578,1 +np.float64,0xbfc117aaac222f54,0xbfc00018a4b84b6e,1 +np.float64,0x7fecf2efdf39e5df,0x7ff0000000000000,1 +np.float64,0x4e99d83e9d33c,0x4e99d83e9d33c,1 +np.float64,0x800d18937bda3127,0x800d18937bda3127,1 +np.float64,0x3fd6c67778ad8cef,0x3fdb5aba70a3ea9e,1 +np.float64,0x3fdbb71770b76e2f,0x3fe157ae8da20bc5,1 +np.float64,0xbfe9faf6ebf3f5ee,0xbfe1ca963d83f17f,1 +np.float64,0x80038850ac0710a2,0x80038850ac0710a2,1 +np.float64,0x8006beb72f8d7d6f,0x8006beb72f8d7d6f,1 +np.float64,0x3feead67bffd5acf,0x3ff9bb43e8b15e2f,1 +np.float64,0xbfd1174b89222e98,0xbfcdff9972799907,1 +np.float64,0x7fee2c077cfc580e,0x7ff0000000000000,1 +np.float64,0xbfbdbd904e3b7b20,0xbfbc13f4916ed466,1 +np.float64,0xffee47b8fe3c8f71,0xbff0000000000000,1 +np.float64,0xffd161884222c310,0xbff0000000000000,1 +np.float64,0xbfd42f27c4a85e50,0xbfd14fa8d67ba5ee,1 +np.float64,0x7fefffffffffffff,0x7ff0000000000000,1 +np.float64,0x8008151791b02a30,0x8008151791b02a30,1 +np.float64,0xbfba79029234f208,0xbfb926616cf41755,1 +np.float64,0x8004c486be29890e,0x8004c486be29890e,1 +np.float64,0x7fe5325a252a64b3,0x7ff0000000000000,1 +np.float64,0x5a880f04b5103,0x5a880f04b5103,1 +np.float64,0xbfe6f4b7702de96f,0xbfe06209002dd72c,1 +np.float64,0xbfdf8b3739bf166e,0xbfd8e783efe3c30f,1 +np.float64,0xbfe32571c8e64ae4,0xbfdcd128b9aa49a1,1 +np.float64,0xbfe97c98c172f932,0xbfe1920ac0fc040f,1 +np.float64,0x3fd0b513a2a16a28,0x3fd31744e3a1bf0a,1 +np.float64,0xffe3ab70832756e0,0xbff0000000000000,1 +np.float64,0x80030f055ce61e0b,0x80030f055ce61e0b,1 +np.float64,0xffd5f3b21b2be764,0xbff0000000000000,1 +np.float64,0x800c1f2d6c783e5b,0x800c1f2d6c783e5b,1 +np.float64,0x80075f4f148ebe9f,0x80075f4f148ebe9f,1 +np.float64,0xbfa5a046f42b4090,0xbfa52cfbf8992256,1 +np.float64,0xffd6702583ace04c,0xbff0000000000000,1 +np.float64,0x800dc0a5cf1b814c,0x800dc0a5cf1b814c,1 +np.float64,0x14f2203a29e45,0x14f2203a29e45,1 +np.float64,0x800421a40ee84349,0x800421a40ee84349,1 +np.float64,0xbfea7c279df4f84f,0xbfe2037fff3ed877,1 +np.float64,0xbfe9b41ddcf3683c,0xbfe1aafe18a44bf8,1 +np.float64,0xffe7b037022f606e,0xbff0000000000000,1 +np.float64,0x800bafb648775f6d,0x800bafb648775f6d,1 +np.float64,0x800b81681d5702d1,0x800b81681d5702d1,1 +np.float64,0x3fe29f8dc8653f1c,0x3fe9442da1c32c6b,1 +np.float64,0xffef9a05dc7f340b,0xbff0000000000000,1 +np.float64,0x800c8c65a65918cb,0x800c8c65a65918cb,1 +np.float64,0xffe99df0d5f33be1,0xbff0000000000000,1 +np.float64,0x9afeb22535fd7,0x9afeb22535fd7,1 +np.float64,0x7fc620dd822c41ba,0x7ff0000000000000,1 +np.float64,0x29c2cdf25385b,0x29c2cdf25385b,1 +np.float64,0x2d92284e5b246,0x2d92284e5b246,1 +np.float64,0xffc794aa942f2954,0xbff0000000000000,1 +np.float64,0xbfe7ed907eafdb21,0xbfe0d9a7b1442497,1 +np.float64,0xbfd4e0d4aea9c1aa,0xbfd1d09366dba2a7,1 +np.float64,0xa70412c34e083,0xa70412c34e083,1 +np.float64,0x41dc0ee083b9,0x41dc0ee083b9,1 +np.float64,0x8000ece20da1d9c5,0x8000ece20da1d9c5,1 +np.float64,0x3fdf3dae103e7b5c,0x3fe42314bf826bc5,1 +np.float64,0x3fe972533c72e4a6,0x3ff3703761e70f04,1 +np.float64,0xffba1d2b82343a58,0xbff0000000000000,1 +np.float64,0xe0086c83c010e,0xe0086c83c010e,1 +np.float64,0x3fe6fb0dde6df61c,0x3ff0cf5fae01aa08,1 +np.float64,0x3fcfaf057e3f5e0b,0x3fd1f98c1fd20139,1 +np.float64,0xbfdca19d9239433c,0xbfd7158745192ca9,1 +np.float64,0xffb17f394e22fe70,0xbff0000000000000,1 +np.float64,0x7fe40f05c7681e0b,0x7ff0000000000000,1 +np.float64,0x800b3c575d5678af,0x800b3c575d5678af,1 +np.float64,0x7fa4ab20ac295640,0x7ff0000000000000,1 +np.float64,0xbfd2fff4f6a5ffea,0xbfd07069bb50e1a6,1 +np.float64,0xbfef81b9147f0372,0xbfe40b845a749787,1 +np.float64,0x7fd7400e54ae801c,0x7ff0000000000000,1 +np.float64,0x3fd4401a17a88034,0x3fd7d20fb76a4f3d,1 +np.float64,0xbfd3e907fd27d210,0xbfd11c64b7577fc5,1 +np.float64,0x7fe34bed9ae697da,0x7ff0000000000000,1 +np.float64,0x80039119c0472234,0x80039119c0472234,1 +np.float64,0xbfe2e36ac565c6d6,0xbfdc88454ee997b3,1 +np.float64,0xbfec57204478ae40,0xbfe2cd3183de1d2d,1 +np.float64,0x7fed7e2a12fafc53,0x7ff0000000000000,1 +np.float64,0x7fd5c5fa7d2b8bf4,0x7ff0000000000000,1 +np.float64,0x3fdcf368d6b9e6d0,0x3fe24decce1ebd35,1 +np.float64,0xbfe0ebfcf2e1d7fa,0xbfda48c9247ae8cf,1 +np.float64,0xbfe10dbea2e21b7e,0xbfda707d68b59674,1 +np.float64,0xbfdf201b6ebe4036,0xbfd8a5df27742fdf,1 +np.float64,0xffe16555be62caab,0xbff0000000000000,1 +np.float64,0xffc23a5db22474bc,0xbff0000000000000,1 +np.float64,0xffe1cbb3f8a39768,0xbff0000000000000,1 +np.float64,0x8007b823be0f7048,0x8007b823be0f7048,1 +np.float64,0xbfa5d1f3042ba3e0,0xbfa55c97cd77bf6e,1 +np.float64,0xbfe316a074662d41,0xbfdcc0da4e7334d0,1 +np.float64,0xbfdfab2bf2bf5658,0xbfd8fb046b88b51f,1 +np.float64,0xfacc9dabf5994,0xfacc9dabf5994,1 +np.float64,0xffe7e420a4efc841,0xbff0000000000000,1 +np.float64,0x800bb986cd57730e,0x800bb986cd57730e,1 +np.float64,0xbfe314fa38e629f4,0xbfdcbf09302c3bf5,1 +np.float64,0x7fc56b17772ad62e,0x7ff0000000000000,1 +np.float64,0x8006a87d54ad50fb,0x8006a87d54ad50fb,1 +np.float64,0xbfe6633e4a6cc67c,0xbfe01a67c3b3ff32,1 +np.float64,0x3fe0ff56eb21feae,0x3fe66df01defb0fb,1 +np.float64,0xffc369cfc126d3a0,0xbff0000000000000,1 +np.float64,0x7fe8775d9a30eeba,0x7ff0000000000000,1 +np.float64,0x3fb53db13e2a7b60,0x3fb625a7279cdac3,1 +np.float64,0xffee76e7e6fcedcf,0xbff0000000000000,1 +np.float64,0xb45595b568ab3,0xb45595b568ab3,1 +np.float64,0xffa09a1d50213440,0xbff0000000000000,1 +np.float64,0x7d11dc16fa23c,0x7d11dc16fa23c,1 +np.float64,0x7fd4cc2928299851,0x7ff0000000000000,1 +np.float64,0x6a30e0ead461d,0x6a30e0ead461d,1 +np.float64,0x7fd3ee735a27dce6,0x7ff0000000000000,1 +np.float64,0x8008d7084b31ae11,0x8008d7084b31ae11,1 +np.float64,0x3fe469353fe8d26a,0x3fec8e7e2df38590,1 +np.float64,0x3fcecef2743d9de5,0x3fd16a888b715dfd,1 +np.float64,0x460130d68c027,0x460130d68c027,1 +np.float64,0xbfd76510c62eca22,0xbfd398766b741d6e,1 +np.float64,0x800ec88c2a5d9118,0x800ec88c2a5d9118,1 +np.float64,0x3fac969c6c392d40,0x3fad66ca6a1e583c,1 +np.float64,0x3fe5c616bf6b8c2e,0x3fef30f931e8dde5,1 +np.float64,0xb4cb6cd56996e,0xb4cb6cd56996e,1 +np.float64,0xffc3eacf8827d5a0,0xbff0000000000000,1 +np.float64,0x3fe1ceaf60e39d5f,0x3fe7d31e0a627cf9,1 +np.float64,0xffea69b42ff4d368,0xbff0000000000000,1 +np.float64,0x800ff8aef99ff15e,0x800ff8aef99ff15e,1 +np.float64,0x6c3953f0d872b,0x6c3953f0d872b,1 +np.float64,0x8007ca5a0d0f94b5,0x8007ca5a0d0f94b5,1 +np.float64,0x800993ce3ad3279d,0x800993ce3ad3279d,1 +np.float64,0x3fe5a4d1516b49a2,0x3feeef67b22ac65b,1 +np.float64,0x8003d7512a67aea3,0x8003d7512a67aea3,1 +np.float64,0x33864430670c9,0x33864430670c9,1 +np.float64,0xbfdbf477e3b7e8f0,0xbfd6a63f1b36f424,1 +np.float64,0x3fb5da92582bb525,0x3fb6d04ef1a1d31a,1 +np.float64,0xe38aae71c7156,0xe38aae71c7156,1 +np.float64,0x3fcaf5590a35eab2,0x3fce01ed6eb6188e,1 +np.float64,0x800deba9b05bd754,0x800deba9b05bd754,1 +np.float64,0x7fee0cde287c19bb,0x7ff0000000000000,1 +np.float64,0xbfe0c2ae70e1855d,0xbfda17fa64d84fcf,1 +np.float64,0x518618faa30c4,0x518618faa30c4,1 +np.float64,0xbfeb4c49b8769894,0xbfe25d52cd7e529f,1 +np.float64,0xbfeb3aa21b367544,0xbfe255cae1df4cfd,1 +np.float64,0xffd23f1c5d247e38,0xbff0000000000000,1 +np.float64,0xff9a75132034ea20,0xbff0000000000000,1 +np.float64,0xbfef9d96307f3b2c,0xbfe415e8b6ce0e50,1 +np.float64,0x8004046f2f0808df,0x8004046f2f0808df,1 +np.float64,0x3fe15871aea2b0e3,0x3fe706532ea5c770,1 +np.float64,0x7fd86b1576b0d62a,0x7ff0000000000000,1 +np.float64,0xbfc240a5c724814c,0xbfc102c7971ca455,1 +np.float64,0xffd8ea670bb1d4ce,0xbff0000000000000,1 +np.float64,0xbfeb1ddd1ff63bba,0xbfe2497c4e27bb8e,1 +np.float64,0x3fcd47e0a33a8fc1,0x3fd0734444150d83,1 +np.float64,0xe00b6a65c016e,0xe00b6a65c016e,1 +np.float64,0xbfc7d582142fab04,0xbfc5bf1fbe755a4c,1 +np.float64,0x8cc91ca11993,0x8cc91ca11993,1 +np.float64,0x7fdbc530e3b78a61,0x7ff0000000000000,1 +np.float64,0x7fee437522bc86e9,0x7ff0000000000000,1 +np.float64,0xffe9e09ae2b3c135,0xbff0000000000000,1 +np.float64,0x8002841cada5083a,0x8002841cada5083a,1 +np.float64,0x3fd6b485f8ad690c,0x3fdb412135932699,1 +np.float64,0x80070e8d0b0e1d1b,0x80070e8d0b0e1d1b,1 +np.float64,0x7fed5df165babbe2,0x7ff0000000000000,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0x7fe99d08cd333a11,0x7ff0000000000000,1 +np.float64,0xdfff4201bfff,0xdfff4201bfff,1 +np.float64,0x800ccf7aaf999ef6,0x800ccf7aaf999ef6,1 +np.float64,0x3fddb05aad3b60b5,0x3fe2e34bdd1dd9d5,1 +np.float64,0xbfe5e1c60e6bc38c,0xbfdfb3275cc1675f,1 +np.float64,0x8004fe674269fccf,0x8004fe674269fccf,1 +np.float64,0x7fe9280363325006,0x7ff0000000000000,1 +np.float64,0xf605b9f1ec0b7,0xf605b9f1ec0b7,1 +np.float64,0x800c7c214018f843,0x800c7c214018f843,1 +np.float64,0x7fd97eb6b9b2fd6c,0x7ff0000000000000,1 +np.float64,0x7fd03f8fb6207f1e,0x7ff0000000000000,1 +np.float64,0x7fc526b64d2a4d6c,0x7ff0000000000000,1 +np.float64,0xbfef1a7c42fe34f9,0xbfe3e4b4399e0fcf,1 +np.float64,0xffdde10a2fbbc214,0xbff0000000000000,1 +np.float64,0xbfdd274f72ba4e9e,0xbfd76aa73788863c,1 +np.float64,0xbfecf7f77af9efef,0xbfe30ee2ae03fed1,1 +np.float64,0xffde709322bce126,0xbff0000000000000,1 +np.float64,0x268b5dac4d16d,0x268b5dac4d16d,1 +np.float64,0x8005c099606b8134,0x8005c099606b8134,1 +np.float64,0xffcf54c1593ea984,0xbff0000000000000,1 +np.float64,0xbfee9b8ebabd371d,0xbfe3b44f2663139d,1 +np.float64,0x3faf0330643e0661,0x3faff88fab74b447,1 +np.float64,0x7fe1c6011be38c01,0x7ff0000000000000,1 +np.float64,0xbfe9d58053b3ab01,0xbfe1b9ea12242485,1 +np.float64,0xbfe15a80fee2b502,0xbfdaca2aa7d1231a,1 +np.float64,0x7fe0d766d8a1aecd,0x7ff0000000000000,1 +np.float64,0x800f65e6a21ecbcd,0x800f65e6a21ecbcd,1 +np.float64,0x7fc85e45a530bc8a,0x7ff0000000000000,1 +np.float64,0x3fcc240e5438481d,0x3fcf7954fc080ac3,1 +np.float64,0xffddd49da2bba93c,0xbff0000000000000,1 +np.float64,0x1376f36c26edf,0x1376f36c26edf,1 +np.float64,0x3feffb7af17ff6f6,0x3ffb77f0ead2f881,1 +np.float64,0x3fd9354ea9b26a9d,0x3fdee4e4c8db8239,1 +np.float64,0xffdf7beed4bef7de,0xbff0000000000000,1 +np.float64,0xbfdef256ecbde4ae,0xbfd889b0e213a019,1 +np.float64,0x800d78bd1e7af17a,0x800d78bd1e7af17a,1 +np.float64,0xb66d66276cdad,0xb66d66276cdad,1 +np.float64,0x7fd8f51138b1ea21,0x7ff0000000000000,1 +np.float64,0xffe8c9c302b19385,0xbff0000000000000,1 +np.float64,0x8000be4cf5417c9b,0x8000be4cf5417c9b,1 +np.float64,0xbfe2293a25645274,0xbfdbb78a8c547c68,1 +np.float64,0xce8392c19d08,0xce8392c19d08,1 +np.float64,0xbfe075736b60eae7,0xbfd9bc0f6e34a283,1 +np.float64,0xbfe8d6fe6a71adfd,0xbfe1469ba80b4915,1 +np.float64,0xffe0c7993fa18f32,0xbff0000000000000,1 +np.float64,0x3fce5210fd3ca422,0x3fd11b40a1270a95,1 +np.float64,0x6c0534a8d80a7,0x6c0534a8d80a7,1 +np.float64,0x23c1823647831,0x23c1823647831,1 +np.float64,0x3fc901253732024a,0x3fcb9d264accb07c,1 +np.float64,0x3fe42b8997685714,0x3fec1a39e207b6e4,1 +np.float64,0x3fec4fd00fb89fa0,0x3ff6c1fdd0c262c8,1 +np.float64,0x8007b333caaf6668,0x8007b333caaf6668,1 +np.float64,0x800f9275141f24ea,0x800f9275141f24ea,1 +np.float64,0xffbba361a23746c0,0xbff0000000000000,1 +np.float64,0xbfee4effa9fc9dff,0xbfe396c11d0cd524,1 +np.float64,0x3e47e84c7c8fe,0x3e47e84c7c8fe,1 +np.float64,0x3fe80eb7b1301d6f,0x3ff1eed318a00153,1 +np.float64,0x7fd3f4c5b4a7e98a,0x7ff0000000000000,1 +np.float64,0x158abab02b158,0x158abab02b158,1 +np.float64,0x1,0x1,1 +np.float64,0x1f1797883e2f4,0x1f1797883e2f4,1 +np.float64,0x3feec055d03d80ac,0x3ff9d3fb0394de33,1 +np.float64,0x8010000000000000,0x8010000000000000,1 +np.float64,0xbfd070860ea0e10c,0xbfccfeec2828efef,1 +np.float64,0x80015c8b3e82b917,0x80015c8b3e82b917,1 +np.float64,0xffef9956d9ff32ad,0xbff0000000000000,1 +np.float64,0x7fe7f087dd2fe10f,0x7ff0000000000000,1 +np.float64,0x8002e7718665cee4,0x8002e7718665cee4,1 +np.float64,0x3fdfb9adb2bf735c,0x3fe4887a86214c1e,1 +np.float64,0xffc7747dfb2ee8fc,0xbff0000000000000,1 +np.float64,0x3fec309bb5386137,0x3ff69c44e1738547,1 +np.float64,0xffdbe2bf9ab7c580,0xbff0000000000000,1 +np.float64,0xbfe6a274daed44ea,0xbfe039aff2be9d48,1 +np.float64,0x7fd5a4e4efab49c9,0x7ff0000000000000,1 +np.float64,0xffbe6aaeb03cd560,0xbff0000000000000,1 diff --git a/python/numpy/_core/tests/data/umath-validation-set-log.csv b/python/numpy/_core/tests/data/umath-validation-set-log.csv new file mode 100644 index 000000000..b8f6b0875 --- /dev/null +++ b/python/numpy/_core/tests/data/umath-validation-set-log.csv @@ -0,0 +1,271 @@ +dtype,input,output,ulperrortol +## +ve denormals ## +np.float32,0x004b4716,0xc2afbc1b,4 +np.float32,0x007b2490,0xc2aec01e,4 +np.float32,0x007c99fa,0xc2aeba17,4 +np.float32,0x00734a0c,0xc2aee1dc,4 +np.float32,0x0070de24,0xc2aeecba,4 +np.float32,0x007fffff,0xc2aeac50,4 +np.float32,0x00000001,0xc2ce8ed0,4 +## -ve denormals ## +np.float32,0x80495d65,0xffc00000,4 +np.float32,0x806894f6,0xffc00000,4 +np.float32,0x80555a76,0xffc00000,4 +np.float32,0x804e1fb8,0xffc00000,4 +np.float32,0x80687de9,0xffc00000,4 +np.float32,0x807fffff,0xffc00000,4 +np.float32,0x80000001,0xffc00000,4 +## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ## +np.float32,0x00000000,0xff800000,4 +np.float32,0x80000000,0xff800000,4 +np.float32,0x7f7fffff,0x42b17218,4 +np.float32,0x80800000,0xffc00000,4 +np.float32,0xff7fffff,0xffc00000,4 +## 1.00f + 0x00000001 ## +np.float32,0x3f800000,0x00000000,4 +np.float32,0x3f800001,0x33ffffff,4 +np.float32,0x3f800002,0x347ffffe,4 +np.float32,0x3f7fffff,0xb3800000,4 +np.float32,0x3f7ffffe,0xb4000000,4 +np.float32,0x3f7ffffd,0xb4400001,4 +np.float32,0x402df853,0x3f7ffffe,4 +np.float32,0x402df854,0x3f7fffff,4 +np.float32,0x402df855,0x3f800000,4 +np.float32,0x402df856,0x3f800001,4 +np.float32,0x3ebc5ab0,0xbf800001,4 +np.float32,0x3ebc5ab1,0xbf800000,4 +np.float32,0x3ebc5ab2,0xbf800000,4 +np.float32,0x3ebc5ab3,0xbf7ffffe,4 +np.float32,0x423ef575,0x407768ab,4 +np.float32,0x427b8c61,0x408485dd,4 +np.float32,0x4211e9ee,0x406630b0,4 +np.float32,0x424d5c41,0x407c0fed,4 +np.float32,0x42be722a,0x4091cc91,4 +np.float32,0x42b73d30,0x4090908b,4 +np.float32,0x427e48e2,0x4084de7f,4 +np.float32,0x428f759b,0x4088bba3,4 +np.float32,0x41629069,0x4029a0cc,4 +np.float32,0x4272c99d,0x40836379,4 +np.float32,0x4d1b7458,0x4197463d,4 +np.float32,0x4f10c594,0x41ace2b2,4 +np.float32,0x4ea397c2,0x41a85171,4 +np.float32,0x4fefa9d1,0x41b6769c,4 +np.float32,0x4ebac6ab,0x41a960dc,4 +np.float32,0x4f6efb42,0x41b0e535,4 +np.float32,0x4e9ab8e7,0x41a7df44,4 +np.float32,0x4e81b5d1,0x41a67625,4 +np.float32,0x5014d9f2,0x41b832bd,4 +np.float32,0x4f02175c,0x41ac07b8,4 +np.float32,0x7f034f89,0x42b01c47,4 +np.float32,0x7f56d00e,0x42b11849,4 +np.float32,0x7f1cd5f6,0x42b0773a,4 +np.float32,0x7e979174,0x42af02d7,4 +np.float32,0x7f23369f,0x42b08ba2,4 +np.float32,0x7f0637ae,0x42b0277d,4 +np.float32,0x7efcb6e8,0x42b00897,4 +np.float32,0x7f7907c8,0x42b163f6,4 +np.float32,0x7e95c4c2,0x42aefcba,4 +np.float32,0x7f4577b2,0x42b0ed2d,4 +np.float32,0x3f49c92e,0xbe73ae84,4 +np.float32,0x3f4a23d1,0xbe71e2f8,4 +np.float32,0x3f4abb67,0xbe6ee430,4 +np.float32,0x3f48169a,0xbe7c5532,4 +np.float32,0x3f47f5fa,0xbe7cfc37,4 +np.float32,0x3f488309,0xbe7a2ad8,4 +np.float32,0x3f479df4,0xbe7ebf5f,4 +np.float32,0x3f47cfff,0xbe7dbec9,4 +np.float32,0x3f496704,0xbe75a125,4 +np.float32,0x3f478ee8,0xbe7f0c92,4 +np.float32,0x3f4a763b,0xbe7041ce,4 +np.float32,0x3f47a108,0xbe7eaf94,4 +np.float32,0x3f48136c,0xbe7c6578,4 +np.float32,0x3f481c17,0xbe7c391c,4 +np.float32,0x3f47cd28,0xbe7dcd56,4 +np.float32,0x3f478be8,0xbe7f1bf7,4 +np.float32,0x3f4c1f8e,0xbe67e367,4 +np.float32,0x3f489b0c,0xbe79b03f,4 +np.float32,0x3f4934cf,0xbe76a08a,4 +np.float32,0x3f4954df,0xbe75fd6a,4 +np.float32,0x3f47a3f5,0xbe7ea093,4 +np.float32,0x3f4ba4fc,0xbe6a4b02,4 +np.float32,0x3f47a0e1,0xbe7eb05c,4 +np.float32,0x3f48c30a,0xbe78e42f,4 +np.float32,0x3f48cab8,0xbe78bd05,4 +np.float32,0x3f4b0569,0xbe6d6ea4,4 +np.float32,0x3f47de32,0xbe7d7607,4 +np.float32,0x3f477328,0xbe7f9b00,4 +np.float32,0x3f496dab,0xbe757f52,4 +np.float32,0x3f47662c,0xbe7fddac,4 +np.float32,0x3f48ddd8,0xbe785b80,4 +np.float32,0x3f481866,0xbe7c4bff,4 +np.float32,0x3f48b119,0xbe793fb6,4 +np.float32,0x3f48c7e8,0xbe78cb5c,4 +np.float32,0x3f4985f6,0xbe7503da,4 +np.float32,0x3f483fdf,0xbe7b8212,4 +np.float32,0x3f4b1c76,0xbe6cfa67,4 +np.float32,0x3f480b2e,0xbe7c8fa8,4 +np.float32,0x3f48745f,0xbe7a75bf,4 +np.float32,0x3f485bda,0xbe7af308,4 +np.float32,0x3f47a660,0xbe7e942c,4 +np.float32,0x3f47d4d5,0xbe7da600,4 +np.float32,0x3f4b0a26,0xbe6d56be,4 +np.float32,0x3f4a4883,0xbe712924,4 +np.float32,0x3f4769e7,0xbe7fca84,4 +np.float32,0x3f499702,0xbe74ad3f,4 +np.float32,0x3f494ab1,0xbe763131,4 +np.float32,0x3f476b69,0xbe7fc2c6,4 +np.float32,0x3f4884e8,0xbe7a214a,4 +np.float32,0x3f486945,0xbe7aae76,4 +#float64 +## +ve denormal ## +np.float64,0x0000000000000001,0xc0874385446d71c3,1 +np.float64,0x0001000000000000,0xc086395a2079b70c,1 +np.float64,0x000fffffffffffff,0xc086232bdd7abcd2,1 +np.float64,0x0007ad63e2168cb6,0xc086290bc0b2980f,1 +## -ve denormal ## +np.float64,0x8000000000000001,0xfff8000000000001,1 +np.float64,0x8001000000000000,0xfff8000000000001,1 +np.float64,0x800fffffffffffff,0xfff8000000000001,1 +np.float64,0x8007ad63e2168cb6,0xfff8000000000001,1 +## +/-0.0f, MAX, MIN## +np.float64,0x0000000000000000,0xfff0000000000000,1 +np.float64,0x8000000000000000,0xfff0000000000000,1 +np.float64,0x7fefffffffffffff,0x40862e42fefa39ef,1 +np.float64,0xffefffffffffffff,0xfff8000000000001,1 +## near 1.0f ## +np.float64,0x3ff0000000000000,0x0000000000000000,1 +np.float64,0x3fe8000000000000,0xbfd269621134db92,1 +np.float64,0x3ff0000000000001,0x3cafffffffffffff,1 +np.float64,0x3ff0000020000000,0x3e7fffffe000002b,1 +np.float64,0x3ff0000000000001,0x3cafffffffffffff,1 +np.float64,0x3fefffffe0000000,0xbe70000008000005,1 +np.float64,0x3fefffffffffffff,0xbca0000000000000,1 +## random numbers ## +np.float64,0x02500186f3d9da56,0xc0855b8abf135773,1 +np.float64,0x09200815a3951173,0xc082ff1ad7131bdc,1 +np.float64,0x0da029623b0243d4,0xc0816fc994695bb5,1 +np.float64,0x48703b8ac483a382,0x40579213a313490b,1 +np.float64,0x09207b74c87c9860,0xc082fee20ff349ef,1 +np.float64,0x62c077698e8df947,0x407821c996d110f0,1 +np.float64,0x2350b45e87c3cfb0,0xc073d6b16b51d072,1 +np.float64,0x3990a23f9ff2b623,0xc051aa60eadd8c61,1 +np.float64,0x0d011386a116c348,0xc081a6cc7ea3b8fb,1 +np.float64,0x1fe0f0303ebe273a,0xc0763870b78a81ca,1 +np.float64,0x0cd1260121d387da,0xc081b7668d61a9d1,1 +np.float64,0x1e6135a8f581d422,0xc077425ac10f08c2,1 +np.float64,0x622168db5fe52d30,0x4077b3c669b9fadb,1 +np.float64,0x69f188e1ec6d1718,0x407d1e2f18c63889,1 +np.float64,0x3aa1bf1d9c4dd1a3,0xc04d682e24bde479,1 +np.float64,0x6c81c4011ce4f683,0x407ee5190e8a8e6a,1 +np.float64,0x2191fa55aa5a5095,0xc0750c0c318b5e2d,1 +np.float64,0x32a1f602a32bf360,0xc06270caa493fc17,1 +np.float64,0x16023c90ba93249b,0xc07d0f88e0801638,1 +np.float64,0x1c525fe6d71fa9ff,0xc078af49c66a5d63,1 +np.float64,0x1a927675815d65b7,0xc079e5bdd7fe376e,1 +np.float64,0x41227b8fe70da028,0x402aa0c9f9a84c71,1 +np.float64,0x4962bb6e853fe87d,0x405a34aa04c83747,1 +np.float64,0x23d2cda00b26b5a4,0xc0737c13a06d00ea,1 +np.float64,0x2d13083fd62987fa,0xc06a25055aeb474e,1 +np.float64,0x10e31e4c9b4579a1,0xc0804e181929418e,1 +np.float64,0x26d3247d556a86a9,0xc0716774171da7e8,1 +np.float64,0x6603379398d0d4ac,0x407a64f51f8a887b,1 +np.float64,0x02d38af17d9442ba,0xc0852d955ac9dd68,1 +np.float64,0x6a2382b4818dd967,0x407d4129d688e5d4,1 +np.float64,0x2ee3c403c79b3934,0xc067a091fefaf8b6,1 +np.float64,0x6493a699acdbf1a4,0x4079663c8602bfc5,1 +np.float64,0x1c8413c4f0de3100,0xc0788c99697059b6,1 +np.float64,0x4573f1ed350d9622,0x404e9bd1e4c08920,1 +np.float64,0x2f34265c9200b69c,0xc067310cfea4e986,1 +np.float64,0x19b43e65fa22029b,0xc07a7f8877de22d6,1 +np.float64,0x0af48ab7925ed6bc,0xc0825c4fbc0e5ade,1 +np.float64,0x4fa49699cad82542,0x4065c76d2a318235,1 +np.float64,0x7204a15e56ade492,0x40815bb87484dffb,1 +np.float64,0x4734aa08a230982d,0x40542a4bf7a361a9,1 +np.float64,0x1ae4ed296c2fd749,0xc079ac4921f20abb,1 +np.float64,0x472514ea4370289c,0x4053ff372bd8f18f,1 +np.float64,0x53a54b3f73820430,0x406b5411fc5f2e33,1 +np.float64,0x64754de5a15684fa,0x407951592e99a5ab,1 +np.float64,0x69358e279868a7c3,0x407c9c671a882c31,1 +np.float64,0x284579ec61215945,0xc0706688e55f0927,1 +np.float64,0x68b5c58806447adc,0x407c43d6f4eff760,1 +np.float64,0x1945a83f98b0e65d,0xc07acc15eeb032cc,1 +np.float64,0x0fc5eb98a16578bf,0xc080b0d02eddca0e,1 +np.float64,0x6a75e208f5784250,0x407d7a7383bf8f05,1 +np.float64,0x0fe63a029c47645d,0xc080a59ca1e98866,1 +np.float64,0x37963ac53f065510,0xc057236281f7bdb6,1 +np.float64,0x135661bb07067ff7,0xc07ee924930c21e4,1 +np.float64,0x4b4699469d458422,0x405f73843756e887,1 +np.float64,0x1a66d73e4bf4881b,0xc07a039ba1c63adf,1 +np.float64,0x12a6b9b119a7da59,0xc07f62e49c6431f3,1 +np.float64,0x24c719aa8fd1bdb5,0xc072d26da4bf84d3,1 +np.float64,0x0fa6ff524ffef314,0xc080bb8514662e77,1 +np.float64,0x1db751d66fdd4a9a,0xc077b77cb50d7c92,1 +np.float64,0x4947374c516da82c,0x4059e9acfc7105bf,1 +np.float64,0x1b1771ab98f3afc8,0xc07989326b8e1f66,1 +np.float64,0x25e78805baac8070,0xc0720a818e6ef080,1 +np.float64,0x4bd7a148225d3687,0x406082d004ea3ee7,1 +np.float64,0x53d7d6b2bbbda00a,0x406b9a398967cbd5,1 +np.float64,0x6997fb9f4e1c685f,0x407ce0a703413eba,1 +np.float64,0x069802c2ff71b951,0xc083df39bf7acddc,1 +np.float64,0x4d683ac9890f66d8,0x4062ae21d8c2acf0,1 +np.float64,0x5a2825863ec14f4c,0x40722d718d549552,1 +np.float64,0x0398799a88f4db80,0xc084e93dab8e2158,1 +np.float64,0x5ed87a8b77e135a5,0x40756d7051777b33,1 +np.float64,0x5828cd6d79b9bede,0x4070cafb22fc6ca1,1 +np.float64,0x7b18ba2a5ec6f068,0x408481386b3ed6fe,1 +np.float64,0x4938fd60922198fe,0x4059c206b762ea7e,1 +np.float64,0x31b8f44fcdd1a46e,0xc063b2faa8b6434e,1 +np.float64,0x5729341c0d918464,0x407019cac0c4a7d7,1 +np.float64,0x13595e9228ee878e,0xc07ee7235a7d8088,1 +np.float64,0x17698b0dc9dd4135,0xc07c1627e3a5ad5f,1 +np.float64,0x63b977c283abb0cc,0x4078cf1ec6ed65be,1 +np.float64,0x7349cc0d4dc16943,0x4081cc697ce4cb53,1 +np.float64,0x4e49a80b732fb28d,0x4063e67e3c5cbe90,1 +np.float64,0x07ba14b848a8ae02,0xc0837ac032a094e0,1 +np.float64,0x3da9f17b691bfddc,0xc03929c25366acda,1 +np.float64,0x02ea39aa6c3ac007,0xc08525af6f21e1c4,1 +np.float64,0x3a6a42f04ed9563d,0xc04e98e825dca46b,1 +np.float64,0x1afa877cd7900be7,0xc0799d6648cb34a9,1 +np.float64,0x58ea986649e052c6,0x4071512e939ad790,1 +np.float64,0x691abbc04647f536,0x407c89aaae0fcb83,1 +np.float64,0x43aabc5063e6f284,0x4044b45d18106fd2,1 +np.float64,0x488b003c893e0bea,0x4057df012a2dafbe,1 +np.float64,0x77eb076ed67caee5,0x40836720de94769e,1 +np.float64,0x5c1b46974aba46f4,0x40738731ba256007,1 +np.float64,0x1a5b29ecb5d3c261,0xc07a0becc77040d6,1 +np.float64,0x5d8b6ccf868c6032,0x4074865c1865e2db,1 +np.float64,0x4cfb6690b4aaf5af,0x406216cd8c7e8ddb,1 +np.float64,0x76cbd8eb5c5fc39e,0x4083038dc66d682b,1 +np.float64,0x28bbd1fec5012814,0xc07014c2dd1b9711,1 +np.float64,0x33dc1b3a4fd6bf7a,0xc060bd0756e07d8a,1 +np.float64,0x52bbe89b37de99f3,0x406a10041aa7d343,1 +np.float64,0x07bc479d15eb2dd3,0xc0837a1a6e3a3b61,1 +np.float64,0x18fc5275711a901d,0xc07aff3e9d62bc93,1 +np.float64,0x114c9758e247dc71,0xc080299a7cf15b05,1 +np.float64,0x25ac8f6d60755148,0xc07233c4c0c511d4,1 +np.float64,0x260cae2bb9e9fd7e,0xc071f128c7e82eac,1 +np.float64,0x572ccdfe0241de82,0x40701bedc84bb504,1 +np.float64,0x0ddcef6c8d41f5ee,0xc0815a7e16d07084,1 +np.float64,0x6dad1d59c988af68,0x407fb4a0bc0142b1,1 +np.float64,0x025d200580d8b6d1,0xc08556c0bc32b1b2,1 +np.float64,0x7aad344b6aa74c18,0x40845bbc453f22be,1 +np.float64,0x5b5d9d6ad9d14429,0x4073036d2d21f382,1 +np.float64,0x49cd8d8dcdf19954,0x405b5c034f5c7353,1 +np.float64,0x63edb9483335c1e6,0x4078f2dd21378786,1 +np.float64,0x7b1dd64c9d2c26bd,0x408482b922017bc9,1 +np.float64,0x782e13e0b574be5f,0x40837e2a0090a5ad,1 +np.float64,0x592dfe18b9d6db2f,0x40717f777fbcb1ec,1 +np.float64,0x654e3232ac60d72c,0x4079e71a95a70446,1 +np.float64,0x7b8e42ad22091456,0x4084a9a6f1e61722,1 +np.float64,0x570e88dfd5860ae6,0x407006ae6c0d137a,1 +np.float64,0x294e98346cb98ef1,0xc06f5edaac12bd44,1 +np.float64,0x1adeaa4ab792e642,0xc079b1431d5e2633,1 +np.float64,0x7b6ead3377529ac8,0x40849eabc8c7683c,1 +np.float64,0x2b8eedae8a9b2928,0xc06c400054deef11,1 +np.float64,0x65defb45b2dcf660,0x407a4b53f181c05a,1 +np.float64,0x1baf582d475e7701,0xc07920bcad4a502c,1 +np.float64,0x461f39cf05a0f15a,0x405126368f984fa1,1 +np.float64,0x7e5f6f5dcfff005b,0x4085a37d610439b4,1 +np.float64,0x136f66e4d09bd662,0xc07ed8a2719f2511,1 +np.float64,0x65afd8983fb6ca1f,0x407a2a7f48bf7fc1,1 +np.float64,0x572fa7f95ed22319,0x40701d706cf82e6f,1 diff --git a/python/numpy/_core/tests/data/umath-validation-set-log10.csv b/python/numpy/_core/tests/data/umath-validation-set-log10.csv new file mode 100644 index 000000000..c7657773e --- /dev/null +++ b/python/numpy/_core/tests/data/umath-validation-set-log10.csv @@ -0,0 +1,1629 @@ +dtype,input,output,ulperrortol +np.float32,0x3f6fd5c8,0xbce80e8e,4 +np.float32,0x3ea4ab17,0xbefc3deb,4 +np.float32,0x3e87a133,0xbf13b0b7,4 +np.float32,0x3f0d9069,0xbe83bb19,4 +np.float32,0x3f7b9269,0xbbf84f47,4 +np.float32,0x3f7a9ffa,0xbc16fd97,4 +np.float32,0x7f535d34,0x4219cb66,4 +np.float32,0x3e79ad7c,0xbf1ce857,4 +np.float32,0x7e8bfd3b,0x4217dfe9,4 +np.float32,0x3f2d2ee9,0xbe2dcec6,4 +np.float32,0x572e04,0xc21862e4,4 +np.float32,0x7f36f8,0xc217bad5,4 +np.float32,0x3f7982fb,0xbc36aaed,4 +np.float32,0x45b019,0xc218c67c,4 +np.float32,0x3f521c46,0xbdafb3e3,4 +np.float32,0x80000001,0x7fc00000,4 +np.float32,0x3f336c81,0xbe1e107f,4 +np.float32,0x3eac92d7,0xbef1d0bb,4 +np.float32,0x47bdfc,0xc218b990,4 +np.float32,0x7f2d94c8,0x421973d1,4 +np.float32,0x7d53ff8d,0x4214fbb6,4 +np.float32,0x3f581e4e,0xbd96a079,4 +np.float32,0x7ddaf20d,0x42163e4e,4 +np.float32,0x3f341d3c,0xbe1c5b4c,4 +np.float32,0x7ef04ba9,0x4218d032,4 +np.float32,0x620ed2,0xc2182e99,4 +np.float32,0x507850,0xc2188682,4 +np.float32,0x7d08f9,0xc217c284,4 +np.float32,0x7f0cf2aa,0x42191734,4 +np.float32,0x3f109a17,0xbe7e04fe,4 +np.float32,0x7f426152,0x4219a625,4 +np.float32,0x7f32d5a3,0x42198113,4 +np.float32,0x2e14b2,0xc2197e6f,4 +np.float32,0x3a5acd,0xc219156a,4 +np.float32,0x50a565,0xc2188589,4 +np.float32,0x5b751c,0xc2184d97,4 +np.float32,0x7e4149f6,0x42173b22,4 +np.float32,0x3dc34bf9,0xbf82a42a,4 +np.float32,0x3d12bc28,0xbfb910d6,4 +np.float32,0x7ebd2584,0x421865c1,4 +np.float32,0x7f6b3375,0x4219faeb,4 +np.float32,0x7fa00000,0x7fe00000,4 +np.float32,0x3f35fe7d,0xbe17bd33,4 +np.float32,0x7db45c87,0x4215e818,4 +np.float32,0x3efff366,0xbe9a2b8d,4 +np.float32,0x3eb331d0,0xbee971a3,4 +np.float32,0x3f259d5f,0xbe41ae2e,4 +np.float32,0x3eab85ec,0xbef32c4a,4 +np.float32,0x7f194b8a,0x42193c8c,4 +np.float32,0x3f11a614,0xbe7acfc7,4 +np.float32,0x5b17,0xc221f16b,4 +np.float32,0x3f33dadc,0xbe1cff4d,4 +np.float32,0x3cda1506,0xbfc9920f,4 +np.float32,0x3f6856f1,0xbd2c8290,4 +np.float32,0x7f3357fb,0x42198257,4 +np.float32,0x7f56f329,0x4219d2e1,4 +np.float32,0x3ef84108,0xbea0f595,4 +np.float32,0x3f72340f,0xbcc51916,4 +np.float32,0x3daf28,0xc218fcbd,4 +np.float32,0x131035,0xc21b06f4,4 +np.float32,0x3f275c3b,0xbe3d0487,4 +np.float32,0x3ef06130,0xbea82069,4 +np.float32,0x3f57f3b0,0xbd974fef,4 +np.float32,0x7f6c4a78,0x4219fcfa,4 +np.float32,0x7e8421d0,0x4217c639,4 +np.float32,0x3f17a479,0xbe68e08e,4 +np.float32,0x7f03774e,0x4218f83b,4 +np.float32,0x441a33,0xc218d0b8,4 +np.float32,0x539158,0xc21875b6,4 +np.float32,0x3e8fcc75,0xbf0d3018,4 +np.float32,0x7ef74130,0x4218dce4,4 +np.float32,0x3ea6f4fa,0xbef92c38,4 +np.float32,0x7f3948ab,0x421990d5,4 +np.float32,0x7db6f8f5,0x4215ee7c,4 +np.float32,0x3ee44a2f,0xbeb399e5,4 +np.float32,0x156c59,0xc21ad30d,4 +np.float32,0x3f21ee53,0xbe4baf16,4 +np.float32,0x3f2c08f4,0xbe30c424,4 +np.float32,0x3f49885c,0xbdd4c6a9,4 +np.float32,0x3eae0b9c,0xbeefed54,4 +np.float32,0x1b5c1f,0xc21a6646,4 +np.float32,0x3e7330e2,0xbf1fd592,4 +np.float32,0x3ebbeb4c,0xbededf82,4 +np.float32,0x427154,0xc218dbb1,4 +np.float32,0x3f6b8b4b,0xbd142498,4 +np.float32,0x8e769,0xc21c5981,4 +np.float32,0x3e9db557,0xbf02ec1c,4 +np.float32,0x3f001bef,0xbe99f019,4 +np.float32,0x3e58b48c,0xbf2ca77a,4 +np.float32,0x3d46c16b,0xbfa8327c,4 +np.float32,0x7eeeb305,0x4218cd3b,4 +np.float32,0x3e3f163d,0xbf3aa446,4 +np.float32,0x3f66c872,0xbd3877d9,4 +np.float32,0x7f7162f8,0x421a0677,4 +np.float32,0x3edca3bc,0xbebb2e28,4 +np.float32,0x3dc1055b,0xbf834afa,4 +np.float32,0x12b16f,0xc21b0fad,4 +np.float32,0x3f733898,0xbcb62e16,4 +np.float32,0x3e617af8,0xbf283db0,4 +np.float32,0x7e86577a,0x4217cd99,4 +np.float32,0x3f0ba3c7,0xbe86c633,4 +np.float32,0x3f4cad25,0xbdc70247,4 +np.float32,0xb6cdf,0xc21bea9f,4 +np.float32,0x3f42971a,0xbdf3f49e,4 +np.float32,0x3e6ccad2,0xbf22cc78,4 +np.float32,0x7f2121b2,0x421952b8,4 +np.float32,0x3f6d3f55,0xbd075366,4 +np.float32,0x3f524f,0xc218f117,4 +np.float32,0x3e95b5d9,0xbf08b56a,4 +np.float32,0x7f6ae47d,0x4219fa56,4 +np.float32,0x267539,0xc219ceda,4 +np.float32,0x3ef72f6d,0xbea1eb2e,4 +np.float32,0x2100b2,0xc21a12e2,4 +np.float32,0x3d9777d1,0xbf90c4e7,4 +np.float32,0x44c6f5,0xc218cc56,4 +np.float32,0x7f2a613d,0x42196b8a,4 +np.float32,0x390a25,0xc2191f8d,4 +np.float32,0x3f1de5ad,0xbe56e703,4 +np.float32,0x2f59ce,0xc2197258,4 +np.float32,0x7f3b12a1,0x4219951b,4 +np.float32,0x3ecb66d4,0xbecd44ca,4 +np.float32,0x7e74ff,0xc217bd7d,4 +np.float32,0x7ed83f78,0x4218a14d,4 +np.float32,0x685994,0xc21812f1,4 +np.float32,0xbf800000,0x7fc00000,4 +np.float32,0x736f47,0xc217e60b,4 +np.float32,0x7f09c371,0x42190d0a,4 +np.float32,0x3f7ca51d,0xbbbbbce0,4 +np.float32,0x7f4b4d3b,0x4219ba1a,4 +np.float32,0x3f6c4471,0xbd0eb076,4 +np.float32,0xd944e,0xc21b9dcf,4 +np.float32,0x7cb06ffc,0x421375cd,4 +np.float32,0x586187,0xc2185cce,4 +np.float32,0x3f3cbf5b,0xbe078911,4 +np.float32,0x3f30b504,0xbe24d983,4 +np.float32,0x3f0a16ba,0xbe8941fd,4 +np.float32,0x5c43b0,0xc21849af,4 +np.float32,0x3dad74f6,0xbf893bd5,4 +np.float32,0x3c586958,0xbff087a6,4 +np.float32,0x3e8307a8,0xbf1786ba,4 +np.float32,0x7dcd1776,0x4216213d,4 +np.float32,0x3f44d107,0xbde9d662,4 +np.float32,0x3e2e6823,0xbf44cbec,4 +np.float32,0x3d87ea27,0xbf96caca,4 +np.float32,0x3e0c715b,0xbf5ce07e,4 +np.float32,0x7ec9cd5a,0x4218828e,4 +np.float32,0x3e26c0b4,0xbf49c93e,4 +np.float32,0x75b94e,0xc217dd50,4 +np.float32,0x3df7b9f5,0xbf6ad7f4,4 +np.float32,0x0,0xff800000,4 +np.float32,0x3f284795,0xbe3a94da,4 +np.float32,0x7ee49092,0x4218b9f0,4 +np.float32,0x7f4c20e0,0x4219bbe8,4 +np.float32,0x3efbbce8,0xbe9ddc4b,4 +np.float32,0x12274a,0xc21b1cb4,4 +np.float32,0x5fa1b1,0xc21839be,4 +np.float32,0x7f0b210e,0x4219116d,4 +np.float32,0x3f67092a,0xbd368545,4 +np.float32,0x3d572721,0xbfa3ca5b,4 +np.float32,0x3f7913ce,0xbc431028,4 +np.float32,0x3b0613,0xc2191059,4 +np.float32,0x3e1d16c0,0xbf506c6f,4 +np.float32,0xab130,0xc21c081d,4 +np.float32,0x3e23ac97,0xbf4bdb9d,4 +np.float32,0x7ef52368,0x4218d911,4 +np.float32,0x7f38e686,0x42198fe9,4 +np.float32,0x3f106a21,0xbe7e9897,4 +np.float32,0x3ecef8d5,0xbec96644,4 +np.float32,0x3ec37e02,0xbed61683,4 +np.float32,0x3efbd063,0xbe9dcb17,4 +np.float32,0x3f318fe3,0xbe22b402,4 +np.float32,0x7e5e5228,0x4217795d,4 +np.float32,0x72a046,0xc217e92c,4 +np.float32,0x7f6f970b,0x421a0324,4 +np.float32,0x3ed871b4,0xbebf72fb,4 +np.float32,0x7a2eaa,0xc217ccc8,4 +np.float32,0x3e819655,0xbf18c1d7,4 +np.float32,0x80800000,0x7fc00000,4 +np.float32,0x7eab0719,0x421838f9,4 +np.float32,0x7f0763cb,0x4219054f,4 +np.float32,0x3f191672,0xbe64a8af,4 +np.float32,0x7d4327,0xc217c1b6,4 +np.float32,0x3f724ba6,0xbcc3bea3,4 +np.float32,0x60fe06,0xc2183375,4 +np.float32,0x48cd59,0xc218b30b,4 +np.float32,0x3f7fec2b,0xb909d3f3,4 +np.float32,0x1c7bb9,0xc21a5460,4 +np.float32,0x24d8a8,0xc219e1e4,4 +np.float32,0x3e727c52,0xbf20283c,4 +np.float32,0x4bc460,0xc218a14a,4 +np.float32,0x63e313,0xc2182661,4 +np.float32,0x7f625581,0x4219e9d4,4 +np.float32,0x3eeb3e77,0xbeacedc0,4 +np.float32,0x7ef27a47,0x4218d437,4 +np.float32,0x27105a,0xc219c7e6,4 +np.float32,0x22a10b,0xc219fd7d,4 +np.float32,0x3f41e907,0xbdf711ab,4 +np.float32,0x7c1fbf95,0x4212155b,4 +np.float32,0x7e5acceb,0x42177244,4 +np.float32,0x3e0892fa,0xbf5ffb83,4 +np.float32,0x3ea0e51d,0xbf00b2c0,4 +np.float32,0x3e56fc29,0xbf2d8a51,4 +np.float32,0x7ee724ed,0x4218beed,4 +np.float32,0x7ebf142b,0x42186a46,4 +np.float32,0x7f6cf35c,0x4219fe37,4 +np.float32,0x3f11abf7,0xbe7abdcd,4 +np.float32,0x588d7a,0xc2185bf1,4 +np.float32,0x3f6e81d2,0xbcfbcf97,4 +np.float32,0x3f1b6be8,0xbe5dee2b,4 +np.float32,0x7f3815e0,0x42198df2,4 +np.float32,0x3f5bfc88,0xbd86d93d,4 +np.float32,0x3f3775d0,0xbe142bbc,4 +np.float32,0x78a958,0xc217d25a,4 +np.float32,0x2ff7c3,0xc2196c96,4 +np.float32,0x4b9c0,0xc21d733c,4 +np.float32,0x3ec025af,0xbed9ecf3,4 +np.float32,0x6443f0,0xc21824b3,4 +np.float32,0x3f754e28,0xbc97d299,4 +np.float32,0x3eaa91d3,0xbef4699d,4 +np.float32,0x3e5f2837,0xbf296478,4 +np.float32,0xe5676,0xc21b85a4,4 +np.float32,0x3f6859f2,0xbd2c6b90,4 +np.float32,0x3f68686b,0xbd2bfcc6,4 +np.float32,0x4b39b8,0xc218a47b,4 +np.float32,0x630ac4,0xc2182a28,4 +np.float32,0x160980,0xc21ac67d,4 +np.float32,0x3ed91c4d,0xbebec3fd,4 +np.float32,0x7ec27b0d,0x4218721f,4 +np.float32,0x3f3c0a5f,0xbe09344b,4 +np.float32,0x3dbff9c1,0xbf839841,4 +np.float32,0x7f0e8ea7,0x42191c40,4 +np.float32,0x3f36b162,0xbe1608e4,4 +np.float32,0x228bb3,0xc219fe90,4 +np.float32,0x2fdd30,0xc2196d8c,4 +np.float32,0x3e8fce8e,0xbf0d2e79,4 +np.float32,0x3f36acc7,0xbe16141a,4 +np.float32,0x7f44b51c,0x4219ab70,4 +np.float32,0x3ec3371c,0xbed66736,4 +np.float32,0x4388a2,0xc218d473,4 +np.float32,0x3f5aa6c3,0xbd8c4344,4 +np.float32,0x7f09fce4,0x42190dc3,4 +np.float32,0x7ed7854a,0x42189fce,4 +np.float32,0x7f4da83a,0x4219bf3a,4 +np.float32,0x3db8da28,0xbf85b25a,4 +np.float32,0x7f449686,0x4219ab2b,4 +np.float32,0x2eb25,0xc21e498c,4 +np.float32,0x3f2bcc08,0xbe3161bd,4 +np.float32,0x36c923,0xc219317b,4 +np.float32,0x3d52a866,0xbfa4f6d2,4 +np.float32,0x3f7d6688,0xbb913e4e,4 +np.float32,0x3f5a6ba4,0xbd8d33e3,4 +np.float32,0x719740,0xc217ed35,4 +np.float32,0x78a472,0xc217d26c,4 +np.float32,0x7ee33d0c,0x4218b759,4 +np.float32,0x7f668c1d,0x4219f208,4 +np.float32,0x3e29c600,0xbf47ca46,4 +np.float32,0x3f3cefc3,0xbe071712,4 +np.float32,0x3e224ebd,0xbf4cca41,4 +np.float32,0x7f1417be,0x42192d31,4 +np.float32,0x7f29d7d5,0x42196a23,4 +np.float32,0x3338ce,0xc2194f65,4 +np.float32,0x2a7897,0xc219a2b6,4 +np.float32,0x3d6bc3d8,0xbf9eb468,4 +np.float32,0x3f6bd7bf,0xbd11e392,4 +np.float32,0x7f6d26bf,0x4219fe98,4 +np.float32,0x3f52d378,0xbdacadb5,4 +np.float32,0x3efac453,0xbe9eb84a,4 +np.float32,0x3f692eb7,0xbd261184,4 +np.float32,0x3f6a0bb5,0xbd1f7ec1,4 +np.float32,0x3f037a49,0xbe942aa8,4 +np.float32,0x3f465bd4,0xbde2e530,4 +np.float32,0x7ef0f47b,0x4218d16a,4 +np.float32,0x637127,0xc218285e,4 +np.float32,0x3f41e511,0xbdf723d7,4 +np.float32,0x7f800000,0x7f800000,4 +np.float32,0x3f3342d5,0xbe1e77d5,4 +np.float32,0x7f57cfe6,0x4219d4a9,4 +np.float32,0x3e4358ed,0xbf3830a7,4 +np.float32,0x3ce25f15,0xbfc77f2b,4 +np.float32,0x7ed057e7,0x421890be,4 +np.float32,0x7ce154d9,0x4213e295,4 +np.float32,0x3ee91984,0xbeaef703,4 +np.float32,0x7e4e919c,0x421758af,4 +np.float32,0x6830e7,0xc218139e,4 +np.float32,0x3f12f08e,0xbe76e328,4 +np.float32,0x7f0a7a32,0x42190f56,4 +np.float32,0x7f38e,0xc21c8bd3,4 +np.float32,0x3e01def9,0xbf6593e3,4 +np.float32,0x3f5c8c6d,0xbd849432,4 +np.float32,0x3eed8747,0xbeaac7a3,4 +np.float32,0x3cadaa0e,0xbfd63b21,4 +np.float32,0x3f7532a9,0xbc996178,4 +np.float32,0x31f3ac,0xc2195a8f,4 +np.float32,0x3f0e0f97,0xbe82f3af,4 +np.float32,0x3f2a1f35,0xbe35bd3f,4 +np.float32,0x3f4547b2,0xbde7bebd,4 +np.float32,0x3f7988a6,0xbc36094c,4 +np.float32,0x74464c,0xc217e2d2,4 +np.float32,0x7f7518be,0x421a0d3f,4 +np.float32,0x7e97fa0a,0x42180473,4 +np.float32,0x584e3a,0xc2185d2f,4 +np.float32,0x3e7291f3,0xbf201e52,4 +np.float32,0xc0a05,0xc21bd359,4 +np.float32,0x3a3177,0xc21916a6,4 +np.float32,0x4f417f,0xc2188d45,4 +np.float32,0x263fce,0xc219d145,4 +np.float32,0x7e1d58,0xc217beb1,4 +np.float32,0x7f056af3,0x4218fec9,4 +np.float32,0x3f21c181,0xbe4c2a3f,4 +np.float32,0x7eca4956,0x4218839f,4 +np.float32,0x3e58afa8,0xbf2ca9fd,4 +np.float32,0x3f40d583,0xbdfc04ef,4 +np.float32,0x7f432fbb,0x4219a7fc,4 +np.float32,0x43aaa4,0xc218d393,4 +np.float32,0x7f2c9b62,0x42197150,4 +np.float32,0x5c3876,0xc21849e5,4 +np.float32,0x7f2034e8,0x42195029,4 +np.float32,0x7e5be772,0x42177481,4 +np.float32,0x80000000,0xff800000,4 +np.float32,0x3f5be03b,0xbd874bb0,4 +np.float32,0x3e32494f,0xbf4259be,4 +np.float32,0x3e1f4671,0xbf4ee30b,4 +np.float32,0x4606cc,0xc218c454,4 +np.float32,0x425cbc,0xc218dc3b,4 +np.float32,0x7dd9b8bf,0x42163bd0,4 +np.float32,0x3f0465d0,0xbe929db7,4 +np.float32,0x3f735077,0xbcb4d0fa,4 +np.float32,0x4d6a43,0xc21897b8,4 +np.float32,0x3e27d600,0xbf4910f5,4 +np.float32,0x3f06e0cc,0xbe8e7d24,4 +np.float32,0x3f3fd064,0xbe005e45,4 +np.float32,0x176f1,0xc21f7c2d,4 +np.float32,0x3eb64e6f,0xbee59d9c,4 +np.float32,0x7f0f075d,0x42191db8,4 +np.float32,0x3f718cbe,0xbcceb621,4 +np.float32,0x3ead7bda,0xbef0a54a,4 +np.float32,0x7f77c1a8,0x421a120c,4 +np.float32,0x3f6a79c5,0xbd1c3afd,4 +np.float32,0x3e992d1f,0xbf062a02,4 +np.float32,0x3e6f6335,0xbf219639,4 +np.float32,0x7f6d9a3e,0x4219ff70,4 +np.float32,0x557ed1,0xc2186b91,4 +np.float32,0x3f13a456,0xbe74c457,4 +np.float32,0x15c2dc,0xc21acc17,4 +np.float32,0x71f36f,0xc217ebcc,4 +np.float32,0x748dea,0xc217e1c1,4 +np.float32,0x7f0f32e0,0x42191e3f,4 +np.float32,0x5b1da8,0xc2184f41,4 +np.float32,0x3d865d3a,0xbf976e11,4 +np.float32,0x3f800000,0x0,4 +np.float32,0x7f67b56d,0x4219f444,4 +np.float32,0x6266a1,0xc2182d0c,4 +np.float32,0x3ec9c5e4,0xbecf0e6b,4 +np.float32,0x6a6a0e,0xc2180a3b,4 +np.float32,0x7e9db6fd,0x421814ef,4 +np.float32,0x3e7458f7,0xbf1f4e88,4 +np.float32,0x3ead8016,0xbef09fdc,4 +np.float32,0x3e263d1c,0xbf4a211e,4 +np.float32,0x7f6b3329,0x4219faeb,4 +np.float32,0x800000,0xc217b818,4 +np.float32,0x3f0654c7,0xbe8f6471,4 +np.float32,0x3f281b71,0xbe3b0990,4 +np.float32,0x7c4c8e,0xc217c524,4 +np.float32,0x7d113a87,0x4214537d,4 +np.float32,0x734b5f,0xc217e696,4 +np.float32,0x7f079d05,0x4219060b,4 +np.float32,0x3ee830b1,0xbeafd58b,4 +np.float32,0x3f1c3b8b,0xbe5b9d96,4 +np.float32,0x3f2bf0c6,0xbe3102aa,4 +np.float32,0x7ddffe22,0x42164871,4 +np.float32,0x3f1e58b4,0xbe55a37f,4 +np.float32,0x5f3edf,0xc2183b8a,4 +np.float32,0x7f1fb6ec,0x42194eca,4 +np.float32,0x3f78718e,0xbc55311e,4 +np.float32,0x3e574b7d,0xbf2d6152,4 +np.float32,0x7eab27c6,0x4218394e,4 +np.float32,0x7f34603c,0x421984e5,4 +np.float32,0x3f3a8b57,0xbe0cc1ca,4 +np.float32,0x3f744181,0xbca7134e,4 +np.float32,0x3f7e3bc4,0xbb45156b,4 +np.float32,0x93ab4,0xc21c498b,4 +np.float32,0x7ed5541e,0x42189b42,4 +np.float32,0x6bf8ec,0xc21803c4,4 +np.float32,0x757395,0xc217de58,4 +np.float32,0x7f177214,0x42193726,4 +np.float32,0x59935f,0xc21856d6,4 +np.float32,0x2cd9ba,0xc2198a78,4 +np.float32,0x3ef6fd5c,0xbea2183c,4 +np.float32,0x3ebb6c63,0xbedf75e0,4 +np.float32,0x7f43272c,0x4219a7e9,4 +np.float32,0x7f42e67d,0x4219a755,4 +np.float32,0x3f3f744f,0xbe0133f6,4 +np.float32,0x7f5fddaa,0x4219e4f4,4 +np.float32,0x3dc9874f,0xbf80e529,4 +np.float32,0x3f2efe64,0xbe292ec8,4 +np.float32,0x3e0406a6,0xbf63bf7c,4 +np.float32,0x3cdbb0aa,0xbfc92984,4 +np.float32,0x3e6597e7,0xbf263b30,4 +np.float32,0x3f0c1153,0xbe861807,4 +np.float32,0x7fce16,0xc217b8c6,4 +np.float32,0x3f5f4e5f,0xbd730dc6,4 +np.float32,0x3ed41ffa,0xbec3ee69,4 +np.float32,0x3f216c78,0xbe4d1446,4 +np.float32,0x3f123ed7,0xbe78fe4b,4 +np.float32,0x7f7e0ca9,0x421a1d34,4 +np.float32,0x7e318af4,0x42171558,4 +np.float32,0x7f1e1659,0x42194a3d,4 +np.float32,0x34d12a,0xc21941c2,4 +np.float32,0x3d9566ad,0xbf918870,4 +np.float32,0x3e799a47,0xbf1cf0e5,4 +np.float32,0x3e89dd6f,0xbf11df76,4 +np.float32,0x32f0d3,0xc21951d8,4 +np.float32,0x7e89d17e,0x4217d8f6,4 +np.float32,0x1f3b38,0xc21a2b6b,4 +np.float32,0x7ee9e060,0x4218c427,4 +np.float32,0x31a673,0xc2195d41,4 +np.float32,0x5180f1,0xc21880d5,4 +np.float32,0x3cd36f,0xc21902f8,4 +np.float32,0x3bb63004,0xc01050cb,4 +np.float32,0x3e8ee9d1,0xbf0ddfde,4 +np.float32,0x3d2a7da3,0xbfb0b970,4 +np.float32,0x3ea58107,0xbefb1dc3,4 +np.float32,0x7f6760b0,0x4219f3a2,4 +np.float32,0x7f7f9e08,0x421a1ff0,4 +np.float32,0x37e7f1,0xc219287b,4 +np.float32,0x3ef7eb53,0xbea14267,4 +np.float32,0x3e2eb581,0xbf449aa5,4 +np.float32,0x3da7671c,0xbf8b3568,4 +np.float32,0x7af36f7b,0x420f33ee,4 +np.float32,0x3eb3602c,0xbee93823,4 +np.float32,0x3f68bcff,0xbd2975de,4 +np.float32,0x3ea7cefb,0xbef80a9d,4 +np.float32,0x3f329689,0xbe202414,4 +np.float32,0x7f0c7c80,0x421915be,4 +np.float32,0x7f4739b8,0x4219b118,4 +np.float32,0x73af58,0xc217e515,4 +np.float32,0x7f13eb2a,0x42192cab,4 +np.float32,0x30f2d9,0xc2196395,4 +np.float32,0x7ea7066c,0x42182e71,4 +np.float32,0x669fec,0xc2181a5b,4 +np.float32,0x3f7d6876,0xbb90d1ef,4 +np.float32,0x3f08a4ef,0xbe8b9897,4 +np.float32,0x7f2a906c,0x42196c05,4 +np.float32,0x3ed3ca42,0xbec44856,4 +np.float32,0x9d27,0xc220fee2,4 +np.float32,0x3e4508a1,0xbf373c03,4 +np.float32,0x3e41f8de,0xbf38f9bb,4 +np.float32,0x3e912714,0xbf0c255b,4 +np.float32,0xff800000,0x7fc00000,4 +np.float32,0x7eefd13d,0x4218cf4f,4 +np.float32,0x3f491674,0xbdd6bded,4 +np.float32,0x3ef49512,0xbea445c9,4 +np.float32,0x3f045b79,0xbe92af15,4 +np.float32,0x3ef6c412,0xbea24bd5,4 +np.float32,0x3e6f3c28,0xbf21a85d,4 +np.float32,0x3ef71839,0xbea2000e,4 +np.float32,0x1,0xc23369f4,4 +np.float32,0x3e3fcfe4,0xbf3a3876,4 +np.float32,0x3e9d7a65,0xbf0315b2,4 +np.float32,0x20b7c4,0xc21a16bd,4 +np.float32,0x7f707b10,0x421a04cb,4 +np.float32,0x7fc00000,0x7fc00000,4 +np.float32,0x3f285ebd,0xbe3a57ac,4 +np.float32,0x74c9ea,0xc217e0dc,4 +np.float32,0x3f6501f2,0xbd4634ab,4 +np.float32,0x3f248959,0xbe4495cc,4 +np.float32,0x7e915ff0,0x4217f0b3,4 +np.float32,0x7edbb910,0x4218a864,4 +np.float32,0x3f7042dd,0xbce1bddb,4 +np.float32,0x6f08c9,0xc217f754,4 +np.float32,0x7f423993,0x4219a5ca,4 +np.float32,0x3f125704,0xbe78b4cd,4 +np.float32,0x7ef7f5ae,0x4218de28,4 +np.float32,0x3f2dd940,0xbe2c1a33,4 +np.float32,0x3f1ca78e,0xbe5a6a8b,4 +np.float32,0x244863,0xc219e8be,4 +np.float32,0x3f2614fe,0xbe406d6b,4 +np.float32,0x3e75e7a3,0xbf1e99b5,4 +np.float32,0x2bdd6e,0xc2199459,4 +np.float32,0x7e49e279,0x42174e7b,4 +np.float32,0x3e3bb09a,0xbf3ca2cd,4 +np.float32,0x649f06,0xc2182320,4 +np.float32,0x7f4a44e1,0x4219b7d6,4 +np.float32,0x400473,0xc218ec3a,4 +np.float32,0x3edb19ad,0xbebcbcad,4 +np.float32,0x3d8ee956,0xbf94006c,4 +np.float32,0x7e91c603,0x4217f1eb,4 +np.float32,0x221384,0xc21a04a6,4 +np.float32,0x7f7dd660,0x421a1cd5,4 +np.float32,0x7ef34609,0x4218d5ac,4 +np.float32,0x7f5ed529,0x4219e2e5,4 +np.float32,0x7f1bf685,0x42194438,4 +np.float32,0x3cdd094a,0xbfc8d294,4 +np.float32,0x7e87fc8e,0x4217d303,4 +np.float32,0x7f53d971,0x4219cc6b,4 +np.float32,0xabc8b,0xc21c0646,4 +np.float32,0x7f5011e6,0x4219c46a,4 +np.float32,0x7e460638,0x421745e5,4 +np.float32,0xa8126,0xc21c0ffd,4 +np.float32,0x3eec2a66,0xbeac0f2d,4 +np.float32,0x3f3a1213,0xbe0de340,4 +np.float32,0x7f5908db,0x4219d72c,4 +np.float32,0x7e0ad3c5,0x4216a7f3,4 +np.float32,0x3f2de40e,0xbe2bfe90,4 +np.float32,0x3d0463c5,0xbfbec8e4,4 +np.float32,0x7c7cde0b,0x4212e19a,4 +np.float32,0x74c24f,0xc217e0f9,4 +np.float32,0x3f14b4cb,0xbe71929b,4 +np.float32,0x3e94e192,0xbf09537f,4 +np.float32,0x3eebde71,0xbeac56bd,4 +np.float32,0x3f65e413,0xbd3f5b8a,4 +np.float32,0x7e109199,0x4216b9f9,4 +np.float32,0x3f22f5d0,0xbe48ddc0,4 +np.float32,0x3e22d3bc,0xbf4c6f4d,4 +np.float32,0x3f7a812f,0xbc1a680b,4 +np.float32,0x3f67f361,0xbd2f7d7c,4 +np.float32,0x3f1caa63,0xbe5a6281,4 +np.float32,0x3f306fde,0xbe2587ab,4 +np.float32,0x3e8df9d3,0xbf0e9b2f,4 +np.float32,0x3eaaccc4,0xbef41cd4,4 +np.float32,0x7f3f65ec,0x42199f45,4 +np.float32,0x3dc706e0,0xbf8196ec,4 +np.float32,0x3e14eaba,0xbf565cf6,4 +np.float32,0xcc60,0xc2208a09,4 +np.float32,0x358447,0xc2193be7,4 +np.float32,0x3dcecade,0xbf7eec70,4 +np.float32,0x3f20b4f8,0xbe4f0ef0,4 +np.float32,0x7e7c979f,0x4217b222,4 +np.float32,0x7f2387b9,0x4219594a,4 +np.float32,0x3f6f6e5c,0xbcee0e05,4 +np.float32,0x7f19ad81,0x42193da8,4 +np.float32,0x5635e1,0xc21867dd,4 +np.float32,0x4c5e97,0xc2189dc4,4 +np.float32,0x7f35f97f,0x421988d1,4 +np.float32,0x7f685224,0x4219f571,4 +np.float32,0x3eca0616,0xbecec7b8,4 +np.float32,0x3f436d0d,0xbdf024ca,4 +np.float32,0x12a97d,0xc21b106a,4 +np.float32,0x7f0fdc93,0x4219204d,4 +np.float32,0x3debfb42,0xbf703e65,4 +np.float32,0x3c6c54d2,0xbfeba291,4 +np.float32,0x7e5d7491,0x421777a1,4 +np.float32,0x3f4bd2f0,0xbdcab87d,4 +np.float32,0x3f7517f4,0xbc9ae510,4 +np.float32,0x3f71a59a,0xbccd480d,4 +np.float32,0x3f514653,0xbdb33f61,4 +np.float32,0x3f4e6ea4,0xbdbf694b,4 +np.float32,0x3eadadec,0xbef06526,4 +np.float32,0x3f3b41c1,0xbe0b0fbf,4 +np.float32,0xc35a,0xc2209e1e,4 +np.float32,0x384982,0xc2192575,4 +np.float32,0x3464c3,0xc2194556,4 +np.float32,0x7f5e20d9,0x4219e17d,4 +np.float32,0x3ea18b62,0xbf004016,4 +np.float32,0x63a02b,0xc218278c,4 +np.float32,0x7ef547ba,0x4218d953,4 +np.float32,0x3f2496fb,0xbe4470f4,4 +np.float32,0x7ea0c8c6,0x42181d81,4 +np.float32,0x3f42ba60,0xbdf35372,4 +np.float32,0x7e40d9,0xc217be34,4 +np.float32,0x3e95883b,0xbf08d750,4 +np.float32,0x3e0cddf3,0xbf5c8aa8,4 +np.float32,0x3f2305d5,0xbe48b20a,4 +np.float32,0x7f0d0941,0x4219177b,4 +np.float32,0x3f7b98d3,0xbbf6e477,4 +np.float32,0x3f687cdc,0xbd2b6057,4 +np.float32,0x3f42ce91,0xbdf2f73d,4 +np.float32,0x3ee00fc0,0xbeb7c217,4 +np.float32,0x7f3d483a,0x42199a53,4 +np.float32,0x3e1e08eb,0xbf4fc18d,4 +np.float32,0x7e202ff5,0x4216e798,4 +np.float32,0x582898,0xc2185ded,4 +np.float32,0x3e3552b1,0xbf40790c,4 +np.float32,0x3d3f7c87,0xbfaa44b6,4 +np.float32,0x669d8e,0xc2181a65,4 +np.float32,0x3f0e21b4,0xbe82d757,4 +np.float32,0x686f95,0xc2181293,4 +np.float32,0x3f48367f,0xbdda9ead,4 +np.float32,0x3dc27802,0xbf82e0a0,4 +np.float32,0x3f6ac40c,0xbd1a07d4,4 +np.float32,0x3bba6d,0xc2190b12,4 +np.float32,0x3ec7b6b0,0xbed15665,4 +np.float32,0x3f1f9ca4,0xbe521955,4 +np.float32,0x3ef2f147,0xbea5c4b8,4 +np.float32,0x7c65f769,0x4212b762,4 +np.float32,0x7e98e162,0x42180716,4 +np.float32,0x3f0f0c09,0xbe8169ea,4 +np.float32,0x3d67f03b,0xbf9f9d48,4 +np.float32,0x7f3751e4,0x42198c18,4 +np.float32,0x7f1fac61,0x42194ead,4 +np.float32,0x3e9b698b,0xbf048d89,4 +np.float32,0x7e66507b,0x42178913,4 +np.float32,0x7f5cb680,0x4219dea5,4 +np.float32,0x234700,0xc219f53e,4 +np.float32,0x3d9984ad,0xbf900591,4 +np.float32,0x3f33a3f2,0xbe1d872a,4 +np.float32,0x3eaf52b6,0xbeee4cf4,4 +np.float32,0x7f078930,0x421905ca,4 +np.float32,0x3f083b39,0xbe8c44df,4 +np.float32,0x3e3823f8,0xbf3ec231,4 +np.float32,0x3eef6f5d,0xbea9008c,4 +np.float32,0x6145e1,0xc218322c,4 +np.float32,0x16d9ae,0xc21ab65f,4 +np.float32,0x7e543376,0x421764a5,4 +np.float32,0x3ef77ccb,0xbea1a5a0,4 +np.float32,0x3f4a443f,0xbdd18af5,4 +np.float32,0x8f209,0xc21c5770,4 +np.float32,0x3ecac126,0xbecdfa33,4 +np.float32,0x3e8662f9,0xbf14b6c7,4 +np.float32,0x23759a,0xc219f2f4,4 +np.float32,0xf256d,0xc21b6d3f,4 +np.float32,0x3f579f93,0xbd98aaa2,4 +np.float32,0x3ed4cc8e,0xbec339cb,4 +np.float32,0x3ed25400,0xbec5d2a1,4 +np.float32,0x3ed6f8ba,0xbec0f795,4 +np.float32,0x7f36efd9,0x42198b2a,4 +np.float32,0x7f5169dd,0x4219c746,4 +np.float32,0x7de18a20,0x42164b80,4 +np.float32,0x3e8de526,0xbf0eab61,4 +np.float32,0x3de0cbcd,0xbf75a47e,4 +np.float32,0xe265f,0xc21b8b82,4 +np.float32,0x3df3cdbd,0xbf6c9e40,4 +np.float32,0x3f38a25a,0xbe115589,4 +np.float32,0x7f01f2c0,0x4218f311,4 +np.float32,0x3da7d5f4,0xbf8b10a5,4 +np.float32,0x4d4fe8,0xc2189850,4 +np.float32,0x3cc96d9d,0xbfcdfc8d,4 +np.float32,0x259a88,0xc219d8d7,4 +np.float32,0x7f1d5102,0x42194810,4 +np.float32,0x7e17ca91,0x4216cfa7,4 +np.float32,0x3f73d110,0xbcad7a8f,4 +np.float32,0x3f009383,0xbe9920ed,4 +np.float32,0x7e22af,0xc217be9f,4 +np.float32,0x3f7de2ce,0xbb6c0394,4 +np.float32,0x3edd0cd2,0xbebac45a,4 +np.float32,0x3ec9b5c1,0xbecf2035,4 +np.float32,0x3168c5,0xc2195f6b,4 +np.float32,0x3e935522,0xbf0a7d18,4 +np.float32,0x3e494077,0xbf34e120,4 +np.float32,0x3f52ed06,0xbdac41ec,4 +np.float32,0x3f73d51e,0xbcad3f65,4 +np.float32,0x3f03d453,0xbe939295,4 +np.float32,0x7ef4ee68,0x4218d8b1,4 +np.float32,0x3ed0e2,0xc218f4a7,4 +np.float32,0x4efab8,0xc2188ed3,4 +np.float32,0x3dbd5632,0xbf845d3b,4 +np.float32,0x7eecad4f,0x4218c972,4 +np.float32,0x9d636,0xc21c2d32,4 +np.float32,0x3e5f3b6b,0xbf295ae7,4 +np.float32,0x7f4932df,0x4219b57a,4 +np.float32,0x4b59b5,0xc218a3be,4 +np.float32,0x3e5de97f,0xbf2a03b4,4 +np.float32,0x3f1c479d,0xbe5b7b3c,4 +np.float32,0x3f42e7e4,0xbdf283a5,4 +np.float32,0x2445,0xc2238af2,4 +np.float32,0x7aa71b43,0x420e8c9e,4 +np.float32,0x3ede6e4e,0xbeb961e1,4 +np.float32,0x7f05dd3b,0x42190045,4 +np.float32,0x3ef5b55c,0xbea3404b,4 +np.float32,0x7f738624,0x421a0a62,4 +np.float32,0x3e7d50a1,0xbf1b4cb4,4 +np.float32,0x3f44cc4a,0xbde9ebcc,4 +np.float32,0x7e1a7b0b,0x4216d777,4 +np.float32,0x3f1d9868,0xbe57c0da,4 +np.float32,0x1ebee2,0xc21a3263,4 +np.float32,0x31685f,0xc2195f6e,4 +np.float32,0x368a8e,0xc2193379,4 +np.float32,0xa9847,0xc21c0c2e,4 +np.float32,0x3bd3b3,0xc2190a56,4 +np.float32,0x3961e4,0xc2191ce3,4 +np.float32,0x7e13a243,0x4216c34e,4 +np.float32,0x7f7b1790,0x421a17ff,4 +np.float32,0x3e55f020,0xbf2e1545,4 +np.float32,0x3f513861,0xbdb37aa8,4 +np.float32,0x3dd9e754,0xbf791ad2,4 +np.float32,0x5e8d86,0xc2183ec9,4 +np.float32,0x26b796,0xc219cbdd,4 +np.float32,0x429daa,0xc218da89,4 +np.float32,0x3f477caa,0xbdddd9ba,4 +np.float32,0x3f0e5114,0xbe828d45,4 +np.float32,0x3f54f362,0xbda3c286,4 +np.float32,0x6eac1c,0xc217f8c8,4 +np.float32,0x3f04c479,0xbe91fef5,4 +np.float32,0x3e993765,0xbf06228e,4 +np.float32,0x3eafd99f,0xbeeda21b,4 +np.float32,0x3f2a759e,0xbe34db96,4 +np.float32,0x3f05adfb,0xbe907937,4 +np.float32,0x3f6e2dfc,0xbd005980,4 +np.float32,0x3f2f2daa,0xbe28b6b5,4 +np.float32,0x15e746,0xc21ac931,4 +np.float32,0x7d34ca26,0x4214b4e5,4 +np.float32,0x7ebd175c,0x4218659f,4 +np.float32,0x7f1ed26b,0x42194c4c,4 +np.float32,0x2588b,0xc21eaab0,4 +np.float32,0x3f0065e3,0xbe996fe2,4 +np.float32,0x3f610376,0xbd658122,4 +np.float32,0x451995,0xc218ca41,4 +np.float32,0x70e083,0xc217f002,4 +np.float32,0x7e19821a,0x4216d4a8,4 +np.float32,0x3e7cd9a0,0xbf1b80fb,4 +np.float32,0x7f1a8f18,0x42194033,4 +np.float32,0x3f008fee,0xbe99271f,4 +np.float32,0xff7fffff,0x7fc00000,4 +np.float32,0x7f31d826,0x42197e9b,4 +np.float32,0x3f18cf12,0xbe657838,4 +np.float32,0x3e5c1bc7,0xbf2aebf9,4 +np.float32,0x3e3d3993,0xbf3bbaf8,4 +np.float32,0x68457a,0xc2181347,4 +np.float32,0x7ddf7561,0x42164761,4 +np.float32,0x7f47341b,0x4219b10c,4 +np.float32,0x4d3ecd,0xc21898b2,4 +np.float32,0x7f43dee8,0x4219a98b,4 +np.float32,0x3f0def7c,0xbe8325f5,4 +np.float32,0x3d5a551f,0xbfa2f994,4 +np.float32,0x7ed26602,0x4218951b,4 +np.float32,0x3ee7fa5b,0xbeb0099a,4 +np.float32,0x7ef74ea8,0x4218dcfc,4 +np.float32,0x6a3bb2,0xc2180afd,4 +np.float32,0x7f4c1e6e,0x4219bbe3,4 +np.float32,0x3e26f625,0xbf49a5a2,4 +np.float32,0xb8482,0xc21be70b,4 +np.float32,0x3f32f077,0xbe1f445b,4 +np.float32,0x7dd694b6,0x4216355a,4 +np.float32,0x7f3d62fd,0x42199a92,4 +np.float32,0x3f48e41a,0xbdd79cbf,4 +np.float32,0x338fc3,0xc2194c75,4 +np.float32,0x3e8355f0,0xbf174462,4 +np.float32,0x7f487e83,0x4219b3eb,4 +np.float32,0x2227f7,0xc21a039b,4 +np.float32,0x7e4383dd,0x4217403a,4 +np.float32,0x52d28b,0xc21879b2,4 +np.float32,0x12472c,0xc21b19a9,4 +np.float32,0x353530,0xc2193e7b,4 +np.float32,0x3f4e4728,0xbdc0137a,4 +np.float32,0x3bf169,0xc2190979,4 +np.float32,0x3eb3ee2e,0xbee8885f,4 +np.float32,0x3f03e3c0,0xbe937892,4 +np.float32,0x3c9f8408,0xbfdaf47f,4 +np.float32,0x40e792,0xc218e61b,4 +np.float32,0x5a6b29,0xc21852ab,4 +np.float32,0x7f268b83,0x4219616a,4 +np.float32,0x3ee25997,0xbeb57fa7,4 +np.float32,0x3f175324,0xbe69cf53,4 +np.float32,0x3f781d91,0xbc5e9827,4 +np.float32,0x7dba5210,0x4215f68c,4 +np.float32,0x7f1e66,0xc217bb2b,4 +np.float32,0x7f7fffff,0x421a209b,4 +np.float32,0x3f646202,0xbd4b10b8,4 +np.float32,0x575248,0xc218622b,4 +np.float32,0x7c67faa1,0x4212bb42,4 +np.float32,0x7f1683f2,0x42193469,4 +np.float32,0x1a3864,0xc21a7931,4 +np.float32,0x7f30ad75,0x42197bae,4 +np.float32,0x7f1c9d05,0x42194612,4 +np.float32,0x3e791795,0xbf1d2b2c,4 +np.float32,0x7e9ebc19,0x421817cd,4 +np.float32,0x4999b7,0xc218ae31,4 +np.float32,0x3d130e2c,0xbfb8f1cc,4 +np.float32,0x3f7e436f,0xbb41bb07,4 +np.float32,0x3ee00241,0xbeb7cf7d,4 +np.float32,0x7e496181,0x42174d5f,4 +np.float32,0x7efe58be,0x4218e978,4 +np.float32,0x3f5e5b0c,0xbd7aa43f,4 +np.float32,0x7ee4c6ab,0x4218ba59,4 +np.float32,0x3f6da8c6,0xbd043d7e,4 +np.float32,0x3e3e6e0f,0xbf3b064b,4 +np.float32,0x3f0143b3,0xbe97f10a,4 +np.float32,0x79170f,0xc217d0c6,4 +np.float32,0x517645,0xc218810f,4 +np.float32,0x3f1f9960,0xbe52226e,4 +np.float32,0x2a8df9,0xc219a1d6,4 +np.float32,0x2300a6,0xc219f8b8,4 +np.float32,0x3ee31355,0xbeb4c97a,4 +np.float32,0x3f20b05f,0xbe4f1ba9,4 +np.float32,0x3ee64249,0xbeb1b0ff,4 +np.float32,0x3a94b7,0xc21913b2,4 +np.float32,0x7ef7ef43,0x4218de1d,4 +np.float32,0x3f1abb5d,0xbe5fe872,4 +np.float32,0x7f65360b,0x4219ef72,4 +np.float32,0x3d315d,0xc219004c,4 +np.float32,0x3f26bbc4,0xbe3eafb9,4 +np.float32,0x3ee8c6e9,0xbeaf45de,4 +np.float32,0x7e5f1452,0x42177ae1,4 +np.float32,0x3f32e777,0xbe1f5aba,4 +np.float32,0x4d39a1,0xc21898d0,4 +np.float32,0x3e59ad15,0xbf2c2841,4 +np.float32,0x3f4be746,0xbdca5fc4,4 +np.float32,0x72e4fd,0xc217e821,4 +np.float32,0x1af0b8,0xc21a6d25,4 +np.float32,0x3f311147,0xbe23f18d,4 +np.float32,0x3f1ecebb,0xbe545880,4 +np.float32,0x7e90d293,0x4217ef02,4 +np.float32,0x3e3b366a,0xbf3ceb46,4 +np.float32,0x3f133239,0xbe761c96,4 +np.float32,0x7541ab,0xc217df15,4 +np.float32,0x3d8c8275,0xbf94f1a1,4 +np.float32,0x483b92,0xc218b689,4 +np.float32,0x3eb0dbed,0xbeec5c6b,4 +np.float32,0x3f00c676,0xbe98c8e2,4 +np.float32,0x3f445ac2,0xbdebed7c,4 +np.float32,0x3d2af4,0xc219007a,4 +np.float32,0x7f196ee1,0x42193cf2,4 +np.float32,0x290c94,0xc219b1db,4 +np.float32,0x3f5dbdc9,0xbd7f9019,4 +np.float32,0x3e80c62e,0xbf1974fc,4 +np.float32,0x3ec9ed2c,0xbecee326,4 +np.float32,0x7f469d60,0x4219afbb,4 +np.float32,0x3f698413,0xbd2386ce,4 +np.float32,0x42163f,0xc218de14,4 +np.float32,0x67a554,0xc21815f4,4 +np.float32,0x3f4bff74,0xbdc9f651,4 +np.float32,0x16a743,0xc21aba39,4 +np.float32,0x2eb8b0,0xc219784b,4 +np.float32,0x3eed9be1,0xbeaab45b,4 +np.float64,0x7fe0d76873e1aed0,0x40733f9d783bad7a,1 +np.float64,0x3fe22626bb244c4d,0xbfcf86a59864eea2,1 +np.float64,0x7f874113d02e8227,0x407324f54c4015b8,1 +np.float64,0x3fe40a46a9e8148d,0xbfca0411f533fcb9,1 +np.float64,0x3fd03932eea07266,0xbfe312bc9cf5649e,1 +np.float64,0x7fee5d2a1b3cba53,0x407343b5f56367a0,1 +np.float64,0x3feb7bda4a76f7b5,0xbfb0ea2c6edc784a,1 +np.float64,0x3fd6cd831a2d9b06,0xbfdcaf2e1a5faf51,1 +np.float64,0x98324e273064a,0xc0733e0e4c6d11c6,1 +np.float64,0x7fe1dd63b363bac6,0x4073400667c405c3,1 +np.float64,0x3fec5971f178b2e4,0xbfaaef32a7d94563,1 +np.float64,0x17abc07e2f579,0xc0734afca4da721e,1 +np.float64,0x3feec6ab5cfd8d57,0xbf9157f3545a8235,1 +np.float64,0x3fe3ae9622a75d2c,0xbfcb04b5ad254581,1 +np.float64,0x7fea73d854b4e7b0,0x407342c0a548f4c5,1 +np.float64,0x7fe29babf4653757,0x4073404eeb5fe714,1 +np.float64,0x7fd3a55d85a74aba,0x40733bde72e86c27,1 +np.float64,0x3fe83ce305f079c6,0xbfbee3511e85e0f1,1 +np.float64,0x3fd72087ea2e4110,0xbfdc4ab30802d7c2,1 +np.float64,0x7feb54ddab76a9ba,0x407342facb6f3ede,1 +np.float64,0xc57e34a18afd,0xc0734f82ec815baa,1 +np.float64,0x7a8cb97ef5198,0xc0733f8fb3777a67,1 +np.float64,0x7fe801032c300205,0x40734213dbe4eda9,1 +np.float64,0x3aefb1f475df7,0xc07344a5f08a0584,1 +np.float64,0x7fee85f1dd3d0be3,0x407343bf4441c2a7,1 +np.float64,0x3fdc7f1055b8fe21,0xbfd67d300630e893,1 +np.float64,0xe8ecddb3d1d9c,0xc0733b194f18f466,1 +np.float64,0x3fdf2b23c73e5648,0xbfd3ff6872c1f887,1 +np.float64,0x3fdba4aef2b7495e,0xbfd7557205e18b7b,1 +np.float64,0x3fe2ac34c6e5586a,0xbfcdf1dac69bfa08,1 +np.float64,0x3fc9852628330a4c,0xbfe66914f0fb9b0a,1 +np.float64,0x7fda211acf344235,0x40733dd9c2177aeb,1 +np.float64,0x3fe9420eb432841d,0xbfba4dd969a32575,1 +np.float64,0xb2f9d1ed65f3a,0xc0733cedfb6527ff,1 +np.float64,0x3fe9768a68f2ed15,0xbfb967c39c35c435,1 +np.float64,0x7fe8268462b04d08,0x4073421eaed32734,1 +np.float64,0x3fcf331f063e663e,0xbfe39e2f4b427ca9,1 +np.float64,0x7fd4eb9e2b29d73b,0x40733c4e4141418d,1 +np.float64,0x7fd2bba658a5774c,0x40733b89cd53d5b1,1 +np.float64,0x3fdfdf04913fbe09,0xbfd360c7fd9d251b,1 +np.float64,0x3fca5bfd0534b7fa,0xbfe5f5f844b2b20c,1 +np.float64,0x3feacd5032f59aa0,0xbfb3b5234ba8bf7b,1 +np.float64,0x7fe9241cec724839,0x4073426631362cec,1 +np.float64,0x3fe57aca20eaf594,0xbfc628e3ac2c6387,1 +np.float64,0x3fec6553ca38caa8,0xbfaa921368d3b222,1 +np.float64,0x3fe1e9676563d2cf,0xbfd020f866ba9b24,1 +np.float64,0x3fd5590667aab20d,0xbfde8458af5a4fd6,1 +np.float64,0x3fdf7528f43eea52,0xbfd3bdb438d6ba5e,1 +np.float64,0xb8dddc5571bbc,0xc0733cb4601e5bb2,1 +np.float64,0xe6d4e1fbcda9c,0xc0733b295ef4a4ba,1 +np.float64,0x3fe7019d962e033b,0xbfc257c0a6e8de16,1 +np.float64,0x3f94ef585029deb1,0xbffb07e5dfb0e936,1 +np.float64,0x7fc863b08030c760,0x4073388e28d7b354,1 +np.float64,0xf684443bed089,0xc0733ab46cfbff9a,1 +np.float64,0x7fe00e901d201d1f,0x40733f489c05a0f0,1 +np.float64,0x9e5c0a273cb82,0xc0733dc7af797e19,1 +np.float64,0x7fe49734f0692e69,0x4073410303680df0,1 +np.float64,0x7fb7b584442f6b08,0x4073338acff72502,1 +np.float64,0x3f99984c30333098,0xbff9a2642a6ed8cc,1 +np.float64,0x7fea2fcda8745f9a,0x407342aeae7f5e64,1 +np.float64,0xe580caadcb01a,0xc0733b33a3639217,1 +np.float64,0x1899ab3831336,0xc0734ab823729417,1 +np.float64,0x39bd4c76737aa,0xc07344ca6fac6d21,1 +np.float64,0xd755b2dbaeab7,0xc0733ba4fe19f2cc,1 +np.float64,0x3f952bebf82a57d8,0xbffaf3e7749c2512,1 +np.float64,0x3fe62ee5d72c5dcc,0xbfc45e3cb5baad08,1 +np.float64,0xb1264a7d624ca,0xc0733d003a1d0a66,1 +np.float64,0x3fc4bd1bcd297a38,0xbfe94b3058345c46,1 +np.float64,0x7fc5758bb32aeb16,0x407337aa7805497f,1 +np.float64,0x3fb0edcaf421db96,0xbff2dfb09c405294,1 +np.float64,0x3fd240fceaa481fa,0xbfe16f356bb36134,1 +np.float64,0x38c0c62a7181a,0xc07344e916d1e9b7,1 +np.float64,0x3fe98f2b3bf31e56,0xbfb8fc6eb622a820,1 +np.float64,0x3fe2bdf99c257bf3,0xbfcdbd0dbbae4d0b,1 +np.float64,0xce4b390d9c967,0xc0733bf14ada3134,1 +np.float64,0x3fd2ad607ba55ac1,0xbfe11da15167b37b,1 +np.float64,0x3fd8154f11b02a9e,0xbfdb2a6fabb9a026,1 +np.float64,0xf37849fde6f09,0xc0733aca8c64344c,1 +np.float64,0x3fcbae43b2375c87,0xbfe547f267c8e570,1 +np.float64,0x3fcd46fd7d3a8dfb,0xbfe48070f7232929,1 +np.float64,0x7fcdd245273ba489,0x407339f3d907b101,1 +np.float64,0x3fac75cd0838eb9a,0xbff4149d177b057b,1 +np.float64,0x7fe8ff3fd7f1fe7f,0x4073425bf968ba6f,1 +np.float64,0x7febadaa4df75b54,0x407343113a91f0e9,1 +np.float64,0x7fd5e4649c2bc8c8,0x40733c9f0620b065,1 +np.float64,0x903429812069,0xc07351b255e27887,1 +np.float64,0x3fe1d8c51c63b18a,0xbfd03ad448c1f1ee,1 +np.float64,0x3fe573ea646ae7d5,0xbfc63ab0bfd0e601,1 +np.float64,0x3f83b3f3c02767e8,0xc00022677e310649,1 +np.float64,0x7fd15d1582a2ba2a,0x40733b02c469c1d6,1 +np.float64,0x3fe63d3dabec7a7b,0xbfc43a56ee97b27e,1 +np.float64,0x7fe3a452fb2748a5,0x407340af1973c228,1 +np.float64,0x3fafac6b303f58d6,0xbff35651703ae9f2,1 +np.float64,0x513ddd24a27bc,0xc073426af96aaebb,1 +np.float64,0x3fef152246be2a45,0xbf89df79d7719282,1 +np.float64,0x3fe8c923e9f19248,0xbfbc67228e8db5f6,1 +np.float64,0x3fd6e2325fadc465,0xbfdc9602fb0b950f,1 +np.float64,0x3fe9616815f2c2d0,0xbfb9c4311a3b415b,1 +np.float64,0x2fe4e4005fc9d,0xc0734616fe294395,1 +np.float64,0x3fbceb02dc39d606,0xbfee4e68f1c7886f,1 +np.float64,0x7fe35e843d66bd07,0x407340963b066ad6,1 +np.float64,0x7fecd6c648f9ad8c,0x4073435a4c176e94,1 +np.float64,0x7fcbd72bf437ae57,0x4073397994b85665,1 +np.float64,0x3feff6443b3fec88,0xbf40eb380d5318ae,1 +np.float64,0x7fb9373cf6326e79,0x407333f869edef08,1 +np.float64,0x63790d9cc6f22,0xc0734102d4793cda,1 +np.float64,0x3f9de6efe83bcde0,0xbff88db6f0a6b56e,1 +np.float64,0xe00f2dc1c01f,0xc0734ea26ab84ff2,1 +np.float64,0xd7a9aa8baf536,0xc0733ba248fa33ab,1 +np.float64,0x3fee0089ea7c0114,0xbf9cab936ac31c4b,1 +np.float64,0x3fdec0d51cbd81aa,0xbfd45ed8878c5860,1 +np.float64,0x7fe91bf5e9f237eb,0x40734263f005081d,1 +np.float64,0x34ea7d1e69d50,0xc07345659dde7444,1 +np.float64,0x7fe67321a3ace642,0x4073419cc8130d95,1 +np.float64,0x9d1aeb2f3a35e,0xc0733dd5d506425c,1 +np.float64,0x7fbb01df003603bd,0x4073347282f1391d,1 +np.float64,0x42b945b285729,0xc07343c92d1bbef9,1 +np.float64,0x7fc92799b8324f32,0x407338c51e3f0733,1 +np.float64,0x3fe119c19b223383,0xbfd16ab707f65686,1 +np.float64,0x3fc9f9ac5333f359,0xbfe62a2f91ec0dff,1 +np.float64,0x3fd820d5a8b041ab,0xbfdb1d2586fe7b18,1 +np.float64,0x10000000000000,0xc0733a7146f72a42,1 +np.float64,0x3fe7e1543eafc2a8,0xbfc045362889592d,1 +np.float64,0xcbc0e1819783,0xc0734f4b68e05b1c,1 +np.float64,0xeb57e411d6afd,0xc0733b06efec001a,1 +np.float64,0xa9b74b47536ea,0xc0733d4c7bd06ddc,1 +np.float64,0x3fe56d4022eada80,0xbfc64bf8c7e3dd59,1 +np.float64,0x3fd445ca27288b94,0xbfdff40aecd0f882,1 +np.float64,0x3fe5af1cf5ab5e3a,0xbfc5a21d83699a04,1 +np.float64,0x7fed3431eb7a6863,0x40734370aa6131e1,1 +np.float64,0x3fd878dea1b0f1bd,0xbfdab8730dc00517,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x3feba9fcc1f753fa,0xbfb03027dcecbf65,1 +np.float64,0x7fca4feed6349fdd,0x4073391526327eb0,1 +np.float64,0x3fe7748ddbaee91c,0xbfc144b438218065,1 +np.float64,0x3fb5fbd94c2bf7b3,0xbff10ee6342c21a0,1 +np.float64,0x3feb603b97f6c077,0xbfb15a1f99d6d25e,1 +np.float64,0x3fe2e6fc8ce5cdf9,0xbfcd43edd7f3b4e6,1 +np.float64,0x7feb2b31f7765663,0x407342f02b306688,1 +np.float64,0x3fe290e2282521c4,0xbfce436deb8dbcf3,1 +np.float64,0x3fe3d5adf9e7ab5c,0xbfca96b8aa55d942,1 +np.float64,0x691899f2d2314,0xc07340a1026897c8,1 +np.float64,0x7fe468b008e8d15f,0x407340f33eadc628,1 +np.float64,0x3fb3a4c416274988,0xbff1d71da539a56e,1 +np.float64,0x3fe2442b29e48856,0xbfcf2b0037322661,1 +np.float64,0x3f376fbc7e6ef,0xc073442939a84643,1 +np.float64,0x3fe7c78d65ef8f1b,0xbfc08157cff411de,1 +np.float64,0xd4f27acba9e50,0xc0733bb8d38daa50,1 +np.float64,0x5198919ea3313,0xc07342633ba7cbea,1 +np.float64,0x7fd09f66f0a13ecd,0x40733ab5310b4385,1 +np.float64,0x3fdfe5531dbfcaa6,0xbfd35b487c7e739f,1 +np.float64,0x3fc4b0fecc2961fe,0xbfe95350c38c1640,1 +np.float64,0x7fd5ae21962b5c42,0x40733c8db78b7250,1 +np.float64,0x3fa4a8fcd42951fa,0xbff64e62fe602b72,1 +np.float64,0x7fc8e0e25831c1c4,0x407338b179b91223,1 +np.float64,0x7fdde1df6f3bc3be,0x40733ec87f9f027e,1 +np.float64,0x3fd8b9ad86b1735b,0xbfda6f385532c41b,1 +np.float64,0x3fd9f20ee933e41e,0xbfd91872fd858597,1 +np.float64,0x7feb35332df66a65,0x407342f2b9c715f0,1 +np.float64,0x7fe783dc7eaf07b8,0x407341ef41873706,1 +np.float64,0x7fceee929f3ddd24,0x40733a34e3c660fd,1 +np.float64,0x985b58d730b6b,0xc0733e0c6cfbb6f8,1 +np.float64,0x3fef4bb55cfe976b,0xbf83cb246c6f2a78,1 +np.float64,0x3fe218014f243003,0xbfcfb20ac683e1f6,1 +np.float64,0x7fe43b9fbea8773e,0x407340e3d5d5d29e,1 +np.float64,0x7fe148c74c62918e,0x40733fcba4367b8b,1 +np.float64,0x3feea4ad083d495a,0xbf93443917f3c991,1 +np.float64,0x8bcf6311179ed,0xc0733ea54d59dd31,1 +np.float64,0xf4b7a2dbe96f5,0xc0733ac175182401,1 +np.float64,0x543338baa8668,0xc073422b59165fe4,1 +np.float64,0x3fdb467317368ce6,0xbfd7b4d515929635,1 +np.float64,0x7fe3bbbc89e77778,0x407340b75cdf3de7,1 +np.float64,0x7fe693377aad266e,0x407341a6af60a0f1,1 +np.float64,0x3fc66210502cc421,0xbfe83bb940610a24,1 +np.float64,0x7fa75638982eac70,0x40732e9da476b816,1 +np.float64,0x3fe0d72a4761ae55,0xbfd1d7c82c479fab,1 +np.float64,0x97dec0dd2fbd8,0xc0733e121e072804,1 +np.float64,0x3fef33ec8c7e67d9,0xbf86701be6be8df1,1 +np.float64,0x7fcfca9b423f9536,0x40733a65a51efb94,1 +np.float64,0x9f2215633e443,0xc0733dbf043de9ed,1 +np.float64,0x2469373e48d28,0xc07347fe9e904b77,1 +np.float64,0x7fecc2e18cb985c2,0x407343557f58dfa2,1 +np.float64,0x3fde4acbfdbc9598,0xbfd4ca559e575e74,1 +np.float64,0x3fd6b11cf1ad623a,0xbfdcd1e17ef36114,1 +np.float64,0x3fc19ec494233d89,0xbfeb8ef228e8826a,1 +np.float64,0x4c89ee389913e,0xc07342d50c904f61,1 +np.float64,0x88c2046f11841,0xc0733ecc91369431,1 +np.float64,0x7fc88c13fd311827,0x40733899a125b392,1 +np.float64,0x3fcebd893a3d7b12,0xbfe3d2f35ab93765,1 +np.float64,0x3feb582a1476b054,0xbfb17ae8ec6a0465,1 +np.float64,0x7fd4369e5da86d3c,0x40733c1118b8cd67,1 +np.float64,0x3fda013fc1340280,0xbfd90831b85e98b2,1 +np.float64,0x7fed33d73fba67ad,0x4073437094ce1bd9,1 +np.float64,0x3fed3191053a6322,0xbfa468cc26a8f685,1 +np.float64,0x3fc04ed51c209daa,0xbfeca24a6f093bca,1 +np.float64,0x3fee4ac8763c9591,0xbf986458abbb90b5,1 +np.float64,0xa2d39dd145a74,0xc0733d9633651fbc,1 +np.float64,0x3fe7d9f86f2fb3f1,0xbfc0565a0b059f1c,1 +np.float64,0x3fe3250144e64a03,0xbfcc8eb2b9ae494b,1 +np.float64,0x7fe2b29507a56529,0x4073405774492075,1 +np.float64,0x7fdcdfcbe2b9bf97,0x40733e8b736b1bd8,1 +np.float64,0x3fc832730f3064e6,0xbfe7267ac9b2e7c3,1 +np.float64,0x3fc7e912e52fd226,0xbfe750dfc0aeae57,1 +np.float64,0x7fc960472f32c08d,0x407338d4b4cb3957,1 +np.float64,0x3fbdf182ea3be306,0xbfedd27150283ffb,1 +np.float64,0x3fd1e9359823d26b,0xbfe1b2ac7fd25f8d,1 +np.float64,0x7fbcf75f6039eebe,0x407334ef13eb16f8,1 +np.float64,0x3fe5a3c910eb4792,0xbfc5bf2f57c5d643,1 +np.float64,0x3fcf4f2a6e3e9e55,0xbfe391b6f065c4b8,1 +np.float64,0x3fee067873fc0cf1,0xbf9c53af0373fc0e,1 +np.float64,0xd3f08b85a7e12,0xc0733bc14357e686,1 +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0x3fc8635f6430c6bf,0xbfe70a7dc77749a7,1 +np.float64,0x3fe3ff5c52a7feb9,0xbfca22617c6636d5,1 +np.float64,0x3fbbae91fa375d24,0xbfeee9d4c300543f,1 +np.float64,0xe3f71b59c7ee4,0xc0733b3f99187375,1 +np.float64,0x7fca93d3be3527a6,0x40733926fd48ecd6,1 +np.float64,0x3fcd29f7223a53ee,0xbfe48e3edf32fe57,1 +np.float64,0x7fdc4ef6f8389ded,0x40733e68401cf2a6,1 +np.float64,0xe009bc81c014,0xc0734ea295ee3e5b,1 +np.float64,0x61f56c78c3eae,0xc073411e1dbd7c54,1 +np.float64,0x3fde131928bc2632,0xbfd4fda024f6927c,1 +np.float64,0x3fb21ee530243dca,0xbff266aaf0358129,1 +np.float64,0x7feaac82a4f55904,0x407342cf7809d9f9,1 +np.float64,0x3fe66ab177ecd563,0xbfc3c92d4d522819,1 +np.float64,0xfe9f9c2bfd3f4,0xc0733a7ade3a88a7,1 +np.float64,0x7fd0c5217c218a42,0x40733ac4e4c6dfa5,1 +np.float64,0x430f4ae6861ea,0xc07343c03d8a9442,1 +np.float64,0x494bff2a92981,0xc073432209d2fd16,1 +np.float64,0x3f8860e9d030c1d4,0xbffeca059ebf5e89,1 +np.float64,0x3fe43732dc286e66,0xbfc98800388bad2e,1 +np.float64,0x6443b60ec8877,0xc07340f4bab11827,1 +np.float64,0x3feda9be6d7b537d,0xbfa0dcb9a6914069,1 +np.float64,0x3fc5ceb6772b9d6d,0xbfe89868c881db70,1 +np.float64,0x3fbdf153023be2a6,0xbfedd2878c3b4949,1 +np.float64,0x7fe8f6b8e8f1ed71,0x407342599a30b273,1 +np.float64,0x3fea6fbdb8b4df7b,0xbfb53bf66f71ee96,1 +np.float64,0xc7ac3dbb8f588,0xc0733c2b525b7963,1 +np.float64,0x3fef3a91f77e7524,0xbf85b2bd3adbbe31,1 +np.float64,0x3f887cb97030f973,0xbffec21ccbb5d22a,1 +np.float64,0x8b2f1c9f165e4,0xc0733ead49300951,1 +np.float64,0x2c1cb32058397,0xc07346a951bd8d2b,1 +np.float64,0x3fe057edd620afdc,0xbfd2acf1881b7e99,1 +np.float64,0x7f82e9530025d2a5,0x4073238591dd52ce,1 +np.float64,0x3fe4e03dff69c07c,0xbfc7be96c5c006fc,1 +np.float64,0x52727b4aa4e50,0xc0734250c58ebbc1,1 +np.float64,0x3f99a62160334c43,0xbff99ea3ca09d8f9,1 +np.float64,0x3fd5314b4faa6297,0xbfdeb843daf01e03,1 +np.float64,0x3fefde89e13fbd14,0xbf5d1facb7a1e9de,1 +np.float64,0x7fb460f1a228c1e2,0x4073327d8cbc5f86,1 +np.float64,0xeb93efb3d727e,0xc0733b052a4990e4,1 +np.float64,0x3fe884baecf10976,0xbfbd9ba9cfe23713,1 +np.float64,0x7fefffffffffffff,0x40734413509f79ff,1 +np.float64,0x149dc7c6293ba,0xc0734bf26b1df025,1 +np.float64,0x64188f88c8313,0xc07340f7b8e6f4b5,1 +np.float64,0x3fdfac314abf5863,0xbfd38d3e9dba1b0e,1 +np.float64,0x3fd72052a42e40a5,0xbfdc4af30ee0b245,1 +np.float64,0x7fdd951f743b2a3e,0x40733eb68fafa838,1 +np.float64,0x65a2dd5acb45c,0xc07340dc8ed625e1,1 +np.float64,0x7fe89a79997134f2,0x4073423fbceb1cbe,1 +np.float64,0x3fe70a000d6e1400,0xbfc24381e09d02f7,1 +np.float64,0x3fe2cec160259d83,0xbfcd8b5e92354129,1 +np.float64,0x3feb9ef77a773def,0xbfb05c7b2ee6f388,1 +np.float64,0xe0d66689c1acd,0xc0733b582c779620,1 +np.float64,0x3fee86bd0ffd0d7a,0xbf94f7870502c325,1 +np.float64,0x186afc6230d60,0xc0734ac55fb66d5d,1 +np.float64,0xc0631f4b80c64,0xc0733c6d7149d373,1 +np.float64,0x3fdad1b87735a371,0xbfd82cca73ec663b,1 +np.float64,0x7fe7f6d313efeda5,0x40734210e84576ab,1 +np.float64,0x7fd7b7fce6af6ff9,0x40733d2d92ffdaaf,1 +np.float64,0x3fe6f35a28ade6b4,0xbfc27a4239b540c3,1 +np.float64,0x7fdb0b834eb61706,0x40733e17073a61f3,1 +np.float64,0x82f4661105e8d,0xc0733f19b34adeed,1 +np.float64,0x3fc77230112ee460,0xbfe796a7603c0d16,1 +np.float64,0x8000000000000000,0xfff0000000000000,1 +np.float64,0x7fb8317bc63062f7,0x407333aec761a739,1 +np.float64,0x7fd165609a22cac0,0x40733b061541ff15,1 +np.float64,0x3fed394768fa728f,0xbfa42e1596e1faf6,1 +np.float64,0x7febab693d7756d1,0x40734310a9ac828e,1 +np.float64,0x7fe809a69230134c,0x407342165b9acb69,1 +np.float64,0x3fc091d38f2123a7,0xbfec69a70fc23548,1 +np.float64,0x3fb2a8f5dc2551ec,0xbff2327f2641dd0d,1 +np.float64,0x7fc60b6fe02c16df,0x407337da5adc342c,1 +np.float64,0x3fefa53c3bbf4a78,0xbf73d1be15b73b00,1 +np.float64,0x7fee09c1717c1382,0x407343a2c479e1cb,1 +np.float64,0x8000000000000001,0x7ff8000000000000,1 +np.float64,0x3fede0b2733bc165,0xbf9e848ac2ecf604,1 +np.float64,0x3fee2ac331bc5586,0xbf9a3b699b721c9a,1 +np.float64,0x3fd4db12d829b626,0xbfdf2a413d1e453a,1 +np.float64,0x7fe605230dec0a45,0x4073417a67db06be,1 +np.float64,0x3fe378b2bf26f165,0xbfcb9dbb2b6d6832,1 +np.float64,0xc1d4c1ab83a98,0xc0733c60244cadbf,1 +np.float64,0x3feb15500e762aa0,0xbfb28c071d5efc22,1 +np.float64,0x3fe36225a626c44b,0xbfcbde4259e9047e,1 +np.float64,0x3fe7c586a72f8b0d,0xbfc08614b13ed4b2,1 +np.float64,0x7fb0f2d8cc21e5b1,0x40733135b2c7dd99,1 +np.float64,0x5957f3feb2aff,0xc07341c1df75638c,1 +np.float64,0x3fca4851bd3490a3,0xbfe6005ae5279485,1 +np.float64,0x824217d904843,0xc0733f232fd58f0f,1 +np.float64,0x4f9332269f267,0xc073428fd8e9cb32,1 +np.float64,0x3fea6f087374de11,0xbfb53ef0d03918b2,1 +np.float64,0x3fd9409ab4328135,0xbfd9d9231381e2b8,1 +np.float64,0x3fdba03b00374076,0xbfd759ec94a7ab5b,1 +np.float64,0x3fe0ce3766619c6f,0xbfd1e6912582ccf0,1 +np.float64,0x3fabd45ddc37a8bc,0xbff43c78d3188423,1 +np.float64,0x3fc3cadd592795bb,0xbfe9f1576c9b2c79,1 +np.float64,0x3fe10df049621be1,0xbfd17df2f2c28022,1 +np.float64,0x945b5d1328b6c,0xc0733e3bc06f1e75,1 +np.float64,0x7fc1c3742b2386e7,0x4073365a403d1051,1 +np.float64,0x7fdc957138b92ae1,0x40733e7977717586,1 +np.float64,0x7f943fa1a0287f42,0x407328d01de143f5,1 +np.float64,0x3fec9631c4392c64,0xbfa914b176d8f9d2,1 +np.float64,0x3fd8e7c008b1cf80,0xbfda3b9d9b6da8f4,1 +np.float64,0x7222f9fee4460,0xc073400e371516cc,1 +np.float64,0x3fe890e43eb121c8,0xbfbd64921462e823,1 +np.float64,0x3fcfd7fe2a3faffc,0xbfe3557e2f207800,1 +np.float64,0x3fed5dd1c1babba4,0xbfa318bb20db64e6,1 +np.float64,0x3fe6aa34c66d546a,0xbfc32c8a8991c11e,1 +np.float64,0x8ca79801196,0xc0736522bd5adf6a,1 +np.float64,0x3feb274079364e81,0xbfb2427b24b0ca20,1 +np.float64,0x7fe04927e4a0924f,0x40733f61c96f7f89,1 +np.float64,0x7c05f656f80bf,0xc0733f7a70555b4e,1 +np.float64,0x7fe97819eff2f033,0x4073427d4169b0f8,1 +np.float64,0x9def86e33bdf1,0xc0733dcc740b7175,1 +np.float64,0x7fedd1ef3f3ba3dd,0x40734395ceab8238,1 +np.float64,0x77bed86cef7dc,0xc0733fb8e0e9bf73,1 +np.float64,0x9274b41b24e97,0xc0733e52b16dff71,1 +np.float64,0x8010000000000000,0x7ff8000000000000,1 +np.float64,0x9c977855392ef,0xc0733ddba7d421d9,1 +np.float64,0xfb4560a3f68ac,0xc0733a9271e6a118,1 +np.float64,0xa67d9f394cfb4,0xc0733d6e9d58cc94,1 +np.float64,0x3fbfa766b03f4ecd,0xbfed0cccfecfc900,1 +np.float64,0x3fe177417522ee83,0xbfd0d45803bff01a,1 +np.float64,0x7fe85e077bb0bc0e,0x4073422e957a4aa3,1 +np.float64,0x7feeb0a6883d614c,0x407343c8f6568f7c,1 +np.float64,0xbab82edb75706,0xc0733ca2a2b20094,1 +np.float64,0xfadb44bdf5b69,0xc0733a9561b7ec04,1 +np.float64,0x3fefb9b82b3f7370,0xbf6ea776b2dcc3a9,1 +np.float64,0x7fe080ba8a610174,0x40733f795779b220,1 +np.float64,0x3f87faa1c02ff544,0xbffee76acafc92b7,1 +np.float64,0x7fed474108fa8e81,0x4073437531d4313e,1 +np.float64,0x3fdb7b229336f645,0xbfd77f583a4a067f,1 +np.float64,0x256dbf0c4adb9,0xc07347cd94e6fa81,1 +np.float64,0x3fd034ae25a0695c,0xbfe3169c15decdac,1 +np.float64,0x3a72177274e44,0xc07344b4cf7d68cd,1 +np.float64,0x7fa2522d5c24a45a,0x40732cef2f793470,1 +np.float64,0x3fb052bdde20a57c,0xbff3207fd413c848,1 +np.float64,0x3fdccfecbbb99fd9,0xbfd62ec04a1a687a,1 +np.float64,0x3fd403ac53280759,0xbfe027a31df2c8cc,1 +np.float64,0x3fab708e4036e11d,0xbff45591df4f2e8b,1 +np.float64,0x7fcfc001993f8002,0x40733a63539acf9d,1 +np.float64,0x3fd2b295dfa5652c,0xbfe119c1b476c536,1 +np.float64,0x7fe8061262b00c24,0x4073421552ae4538,1 +np.float64,0xffefffffffffffff,0x7ff8000000000000,1 +np.float64,0x7fed52093ffaa411,0x40734377c072a7e8,1 +np.float64,0xf3df902fe7bf2,0xc0733ac79a75ff7a,1 +np.float64,0x7fe13d382e227a6f,0x40733fc6fd0486bd,1 +np.float64,0x3621d5086c43b,0xc073453d31effbcd,1 +np.float64,0x3ff0000000000000,0x0,1 +np.float64,0x3fdaffea27b5ffd4,0xbfd7fd139dc1c2c5,1 +np.float64,0x7fea6536dc34ca6d,0x407342bccc564fdd,1 +np.float64,0x7fd478f00c28f1df,0x40733c27c0072fde,1 +np.float64,0x7fa72ef0502e5de0,0x40732e91e83db75c,1 +np.float64,0x7fd302970626052d,0x40733ba3ec6775f6,1 +np.float64,0x7fbb57ab0036af55,0x407334887348e613,1 +np.float64,0x3fda0ff722b41fee,0xbfd8f87b77930330,1 +np.float64,0x1e983ce23d309,0xc073493438f57e61,1 +np.float64,0x7fc90de97c321bd2,0x407338be01ffd4bd,1 +np.float64,0x7fe074b09c20e960,0x40733f7443f0dbe1,1 +np.float64,0x3fed5dec9fbabbd9,0xbfa317efb1fe8a95,1 +np.float64,0x7fdb877632b70eeb,0x40733e3697c88ba8,1 +np.float64,0x7fe4fb0067e9f600,0x40734124604b99e8,1 +np.float64,0x7fd447dc96288fb8,0x40733c1703ab2cce,1 +np.float64,0x3feb2d1e64f65a3d,0xbfb22a781df61c05,1 +np.float64,0xb6c8e6676d91d,0xc0733cc8859a0b91,1 +np.float64,0x3fdc3c2418387848,0xbfd6bec3a3c3cdb5,1 +np.float64,0x3fdecb9ccdbd973a,0xbfd4551c05721a8e,1 +np.float64,0x3feb1100e7762202,0xbfb29db911fe6768,1 +np.float64,0x3fe0444bc2a08898,0xbfd2ce69582e78c1,1 +np.float64,0x7fda403218b48063,0x40733de201d8340c,1 +np.float64,0x3fdc70421238e084,0xbfd68ba4bd48322b,1 +np.float64,0x3fe06e747c60dce9,0xbfd286bcac34a981,1 +np.float64,0x7fc1931d9623263a,0x407336473da54de4,1 +np.float64,0x229914da45323,0xc073485979ff141c,1 +np.float64,0x3fe142f92da285f2,0xbfd1280909992cb6,1 +np.float64,0xf1d02fa9e3a06,0xc0733ad6b19d71a0,1 +np.float64,0x3fb1fe9b0023fd36,0xbff27317d8252c16,1 +np.float64,0x3fa544b9242a8972,0xbff61ac38569bcfc,1 +np.float64,0x3feeb129d4fd6254,0xbf928f23ad20c1ee,1 +np.float64,0xa2510b7f44a22,0xc0733d9bc81ea0a1,1 +np.float64,0x3fca75694d34ead3,0xbfe5e8975b3646c2,1 +np.float64,0x7fece10621b9c20b,0x4073435cc3dd9a1b,1 +np.float64,0x7fe98a57d3b314af,0x4073428239b6a135,1 +np.float64,0x3fe259c62a64b38c,0xbfcee96682a0f355,1 +np.float64,0x3feaaa9b9d755537,0xbfb445779f3359af,1 +np.float64,0xdaadecfdb55be,0xc0733b899338432a,1 +np.float64,0x3fed00eae4fa01d6,0xbfa5dc8d77be5991,1 +np.float64,0x7fcc96c773392d8e,0x407339a8c5cd786e,1 +np.float64,0x3fef7b8b203ef716,0xbf7cff655ecb6424,1 +np.float64,0x7fd4008113a80101,0x40733bfe6552acb7,1 +np.float64,0x7fe99ff035b33fdf,0x407342881753ee2e,1 +np.float64,0x3ee031e87dc07,0xc0734432d736e492,1 +np.float64,0x3fddfe390f3bfc72,0xbfd510f1d9ec3e36,1 +np.float64,0x3fd9ddce74b3bb9d,0xbfd92e2d75a061bb,1 +np.float64,0x7fe5f742edebee85,0x40734176058e3a77,1 +np.float64,0x3fdb04185b360831,0xbfd7f8c63aa5e1c4,1 +np.float64,0xea2b0f43d4562,0xc0733b0fd77c8118,1 +np.float64,0x7fc3f4973527e92d,0x407337293bbb22c4,1 +np.float64,0x3fb9adfb38335bf6,0xbfeff4f3ea85821a,1 +np.float64,0x87fb98750ff73,0xc0733ed6ad83c269,1 +np.float64,0x3fe005721a200ae4,0xbfd33a9f1ebfb0ac,1 +np.float64,0xd9e04fe7b3c0a,0xc0733b901ee257f3,1 +np.float64,0x2c39102658723,0xc07346a4db63bf55,1 +np.float64,0x3f7dc28e003b851c,0xc0011c1d1233d948,1 +np.float64,0x3430fd3868620,0xc073457e24e0b70d,1 +np.float64,0xbff0000000000000,0x7ff8000000000000,1 +np.float64,0x3fd23e45e0247c8c,0xbfe17146bcf87b57,1 +np.float64,0x6599df3ecb33d,0xc07340dd2c41644c,1 +np.float64,0x3fdf074f31be0e9e,0xbfd41f6e9dbb68a5,1 +np.float64,0x7fdd6233f3bac467,0x40733eaa8f674b72,1 +np.float64,0x7fe03e8481607d08,0x40733f5d3df3b087,1 +np.float64,0x3fcc3b79f13876f4,0xbfe501bf3b379b77,1 +np.float64,0xe5d97ae3cbb30,0xc0733b30f47cbd12,1 +np.float64,0x8acbc4a115979,0xc0733eb240a4d2c6,1 +np.float64,0x3fedbdbc48bb7b79,0xbfa0470fd70c4359,1 +np.float64,0x3fde1611103c2c22,0xbfd4fae1fa8e7e5e,1 +np.float64,0x3fe09478bd2128f1,0xbfd246b7e85711dc,1 +np.float64,0x3fd6dfe8f3adbfd2,0xbfdc98ca2f32c1ad,1 +np.float64,0x72ccf274e599f,0xc0734003e5b0da63,1 +np.float64,0xe27c7265c4f8f,0xc0733b4b2d808566,1 +np.float64,0x7fee3161703c62c2,0x407343abe90f5649,1 +np.float64,0xf54fb5c1eaa0,0xc0734e01384fcf78,1 +np.float64,0xcde5924d9bcb3,0xc0733bf4b83c66c2,1 +np.float64,0x3fc46fdbe528dfb8,0xbfe97f55ef5e9683,1 +np.float64,0x7fe513528a2a26a4,0x4073412c69baceca,1 +np.float64,0x3fd29eca4aa53d95,0xbfe128801cd33ed0,1 +np.float64,0x7febb21718b7642d,0x4073431256def857,1 +np.float64,0x3fcab536c0356a6e,0xbfe5c73c59f41578,1 +np.float64,0x7fc7e9f0d82fd3e1,0x4073386b213e5dfe,1 +np.float64,0xb5b121276b624,0xc0733cd33083941c,1 +np.float64,0x7e0dd9bcfc1bc,0xc0733f5d8bf35050,1 +np.float64,0x3fd1c75106238ea2,0xbfe1cd11cccda0f4,1 +np.float64,0x9f060e673e0c2,0xc0733dc03da71909,1 +np.float64,0x7fd915a2f3322b45,0x40733d912af07189,1 +np.float64,0x3fd8cbae4431975d,0xbfda5b02ca661139,1 +np.float64,0x3fde8b411f3d1682,0xbfd48f6f710a53b6,1 +np.float64,0x3fc17a780622f4f0,0xbfebabb10c55255f,1 +np.float64,0x3fde5cbe5f3cb97d,0xbfd4b9e2e0101fb1,1 +np.float64,0x7fd859036530b206,0x40733d5c2252ff81,1 +np.float64,0xb0f5040f61ea1,0xc0733d02292f527b,1 +np.float64,0x3fde5c49ae3cb893,0xbfd4ba4db3ce2cf3,1 +np.float64,0x3fecc4518df988a3,0xbfa7af0bfc98bc65,1 +np.float64,0x3feffee03cbffdc0,0xbf0f3ede6ca7d695,1 +np.float64,0xbc5eac9b78bd6,0xc0733c92fb51c8ae,1 +np.float64,0x3fe2bb4ef765769e,0xbfcdc4f70a65dadc,1 +np.float64,0x5089443ca1129,0xc073427a7d0cde4a,1 +np.float64,0x3fd0d6e29121adc5,0xbfe28e28ece1db86,1 +np.float64,0xbe171e397c2e4,0xc0733c82cede5d02,1 +np.float64,0x4ede27be9dbc6,0xc073429fba1a4af1,1 +np.float64,0x3fe2aff3af655fe7,0xbfcde6b52a8ed3c1,1 +np.float64,0x7fd85ca295b0b944,0x40733d5d2adcccf1,1 +np.float64,0x24919bba49234,0xc07347f6ed704a6f,1 +np.float64,0x7fd74bc1eeae9783,0x40733d0d94a89011,1 +np.float64,0x3fc1cd12cb239a26,0xbfeb6a9c25c2a11d,1 +np.float64,0x3fdafbc0ac35f781,0xbfd8015ccf1f1b51,1 +np.float64,0x3fee01327c3c0265,0xbf9ca1d0d762dc18,1 +np.float64,0x3fe65bd7702cb7af,0xbfc3ee0de5c36b8d,1 +np.float64,0x7349c82ee693a,0xc0733ffc5b6eccf2,1 +np.float64,0x3fdc5906f738b20e,0xbfd6a26288eb5933,1 +np.float64,0x1,0xc07434e6420f4374,1 +np.float64,0x3fb966128a32cc25,0xbff00e0aa7273838,1 +np.float64,0x3fd501ff9a2a03ff,0xbfdef69133482121,1 +np.float64,0x194d4f3c329ab,0xc0734a861b44cfbe,1 +np.float64,0x3fec5d34f8f8ba6a,0xbfaad1b31510e70b,1 +np.float64,0x1635e4c22c6be,0xc0734b6dec650943,1 +np.float64,0x3fead2f8edb5a5f2,0xbfb39dac30a962cf,1 +np.float64,0x3f7dfa4ce03bf49a,0xc00115a112141aa7,1 +np.float64,0x3fef6827223ed04e,0xbf80a42c9edebfe9,1 +np.float64,0xe771f303cee3f,0xc0733b24a6269fe4,1 +np.float64,0x1160ccc622c1b,0xc0734d22604eacb9,1 +np.float64,0x3fc485cd08290b9a,0xbfe970723008c8c9,1 +np.float64,0x7fef99c518bf3389,0x407343fcf9ed202f,1 +np.float64,0x7fd8c1447a318288,0x40733d79a440b44d,1 +np.float64,0xaf219f955e434,0xc0733d149c13f440,1 +np.float64,0xcf45f6239e8bf,0xc0733be8ddda045d,1 +np.float64,0x7599394aeb328,0xc0733fd90fdbb0ea,1 +np.float64,0xc7f6390f8fec7,0xc0733c28bfbc66a3,1 +np.float64,0x3fd39ae96c2735d3,0xbfe0712274a8742b,1 +np.float64,0xa4d6c18f49ad8,0xc0733d805a0528f7,1 +np.float64,0x7fd9ea78d7b3d4f1,0x40733dcb2b74802a,1 +np.float64,0x3fecd251cb39a4a4,0xbfa742ed41d4ae57,1 +np.float64,0x7fed7a07cd7af40f,0x407343813476027e,1 +np.float64,0x3fd328ae7f26515d,0xbfe0c30b56a83c64,1 +np.float64,0x7fc937ff7a326ffe,0x407338c9a45b9140,1 +np.float64,0x3fcf1d31143e3a62,0xbfe3a7f760fbd6a8,1 +np.float64,0x7fb911dcbc3223b8,0x407333ee158cccc7,1 +np.float64,0x3fd352fc83a6a5f9,0xbfe0a47d2f74d283,1 +np.float64,0x7fd310753fa620e9,0x40733ba8fc4300dd,1 +np.float64,0x3febd64b4577ac97,0xbfaefd4a79f95c4b,1 +np.float64,0x6a6961a4d4d2d,0xc073408ae1687943,1 +np.float64,0x3fe4ba73d16974e8,0xbfc8239341b9e457,1 +np.float64,0x3fed8e7cac3b1cf9,0xbfa1a96a0cc5fcdc,1 +np.float64,0x7fd505ec04aa0bd7,0x40733c56f86e3531,1 +np.float64,0x3fdf166e9abe2cdd,0xbfd411e5f8569d70,1 +np.float64,0x7fe1bc6434e378c7,0x40733ff9861bdabb,1 +np.float64,0x3fd3b0b175a76163,0xbfe061ba5703f3c8,1 +np.float64,0x7fed75d7ffbaebaf,0x4073438037ba6f19,1 +np.float64,0x5a9e109cb53c3,0xc07341a8b04819c8,1 +np.float64,0x3fe14786b4e28f0d,0xbfd120b541bb880e,1 +np.float64,0x3fed4948573a9291,0xbfa3b471ff91614b,1 +np.float64,0x66aac5d8cd559,0xc07340ca9b18af46,1 +np.float64,0x3fdb48efd23691e0,0xbfd7b24c5694838b,1 +np.float64,0x7fe6da7d1eadb4f9,0x407341bc7d1fae43,1 +np.float64,0x7feb702cf336e059,0x40734301b96cc3c0,1 +np.float64,0x3fd1e60987a3cc13,0xbfe1b522cfcc3d0e,1 +np.float64,0x3feca57f50794aff,0xbfa89dc90625d39c,1 +np.float64,0x7fdc46dc56b88db8,0x40733e664294a0f9,1 +np.float64,0x8dc8fd811b920,0xc0733e8c5955df06,1 +np.float64,0xf01634abe02c7,0xc0733ae370a76d0c,1 +np.float64,0x3fc6f8d8ab2df1b1,0xbfe7df5093829464,1 +np.float64,0xda3d7597b47af,0xc0733b8d2702727a,1 +np.float64,0x7feefd53227dfaa5,0x407343da3d04db28,1 +np.float64,0x3fe2fbca3525f794,0xbfcd06e134417c08,1 +np.float64,0x7fd36d3ce226da79,0x40733bca7c322df1,1 +np.float64,0x7fec37e00b786fbf,0x4073433397b48a5b,1 +np.float64,0x3fbf133f163e267e,0xbfed4e72f1362a77,1 +np.float64,0x3fc11efbb9223df7,0xbfebf53002a561fe,1 +np.float64,0x3fc89c0e5431381d,0xbfe6ea562364bf81,1 +np.float64,0x3f9cd45da839a8bb,0xbff8ceb14669ee4b,1 +np.float64,0x23dc8fa647b93,0xc0734819aaa9b0ee,1 +np.float64,0x3fe829110d305222,0xbfbf3e60c45e2399,1 +np.float64,0x7fed8144e57b0289,0x40734382e917a02a,1 +np.float64,0x7fe033fbf7a067f7,0x40733f58bb00b20f,1 +np.float64,0xe3807f45c7010,0xc0733b43379415d1,1 +np.float64,0x3fd708fb342e11f6,0xbfdc670ef9793782,1 +np.float64,0x3fe88c924b311925,0xbfbd78210d9e7164,1 +np.float64,0x3fe0a2a7c7614550,0xbfd22efaf0472c4a,1 +np.float64,0x7fe3a37501a746e9,0x407340aecaeade41,1 +np.float64,0x3fd05077ec20a0f0,0xbfe2fedbf07a5302,1 +np.float64,0x7fd33bf61da677eb,0x40733bb8c58912aa,1 +np.float64,0x3feb29bdae76537b,0xbfb2384a8f61b5f9,1 +np.float64,0x3fec0fc14ff81f83,0xbfad3423e7ade174,1 +np.float64,0x3fd0f8b1a1a1f163,0xbfe2725dd4ccea8b,1 +np.float64,0x3fe382d26a6705a5,0xbfcb80dba4218bdf,1 +np.float64,0x3fa873f2cc30e7e6,0xbff522911cb34279,1 +np.float64,0x7fed7fd7377affad,0x4073438292f6829b,1 +np.float64,0x3feeacd8067d59b0,0xbf92cdbeda94b35e,1 +np.float64,0x7fe464d62228c9ab,0x407340f1eee19aa9,1 +np.float64,0xe997648bd32ed,0xc0733b143aa0fad3,1 +np.float64,0x7fea4869f13490d3,0x407342b5333b54f7,1 +np.float64,0x935b871926b71,0xc0733e47c6683319,1 +np.float64,0x28a9d0c05155,0xc0735a7e3532af83,1 +np.float64,0x79026548f204d,0xc0733fa6339ffa2f,1 +np.float64,0x3fdb1daaabb63b55,0xbfd7de839c240ace,1 +np.float64,0x3fc0db73b421b6e7,0xbfec2c6e36c4f416,1 +np.float64,0xb8b50ac1716b,0xc0734ff9fc60ebce,1 +np.float64,0x7fdf13e0c6be27c1,0x40733f0e44f69437,1 +np.float64,0x3fcd0cb97b3a1973,0xbfe49c34ff531273,1 +np.float64,0x3fcbac034b375807,0xbfe54913d73f180d,1 +np.float64,0x3fe091d2a2e123a5,0xbfd24b290a9218de,1 +np.float64,0xede43627dbc87,0xc0733af3c7c7f716,1 +np.float64,0x7fc037e7ed206fcf,0x407335b85fb0fedb,1 +np.float64,0x3fce7ae4c63cf5ca,0xbfe3f1350fe03f28,1 +np.float64,0x7fcdd862263bb0c3,0x407339f5458bb20e,1 +np.float64,0x4d7adf709af5d,0xc07342bf4edfadb2,1 +np.float64,0xdc6c03f3b8d81,0xc0733b7b74d6a635,1 +np.float64,0x3fe72ae0a4ee55c1,0xbfc1f4665608b21f,1 +np.float64,0xcd62f19d9ac5e,0xc0733bf92235e4d8,1 +np.float64,0xe3a7b8fdc74f7,0xc0733b4204f8e166,1 +np.float64,0x3fdafd35adb5fa6b,0xbfd7ffdca0753b36,1 +np.float64,0x3fa023e8702047d1,0xbff8059150ea1464,1 +np.float64,0x99ff336933fe7,0xc0733df961197517,1 +np.float64,0x7feeb365b9bd66ca,0x407343c995864091,1 +np.float64,0x7fe449b49f689368,0x407340e8aa3369e3,1 +np.float64,0x7faf5843043eb085,0x407330aa700136ca,1 +np.float64,0x3fd47b2922a8f652,0xbfdfab3de86f09ee,1 +np.float64,0x7fd9fc3248b3f864,0x40733dcfea6f9b3e,1 +np.float64,0xe20b0d8dc4162,0xc0733b4ea8fe7b3f,1 +np.float64,0x7feff8e0e23ff1c1,0x40734411c490ed70,1 +np.float64,0x7fa58382d02b0705,0x40732e0cf28e14fe,1 +np.float64,0xb8ad9a1b715b4,0xc0733cb630b8f2d4,1 +np.float64,0xe90abcf1d2158,0xc0733b186b04eeee,1 +np.float64,0x7fd6aa6f32ad54dd,0x40733cdccc636604,1 +np.float64,0x3fd8f84eedb1f09e,0xbfda292909a5298a,1 +np.float64,0x7fecd6b1d9f9ad63,0x4073435a472b05b5,1 +np.float64,0x3fd9f47604b3e8ec,0xbfd915e028cbf4a6,1 +np.float64,0x3fd20d9398241b27,0xbfe19691363dd508,1 +np.float64,0x3fe5ed09bbabda13,0xbfc5043dfc9c8081,1 +np.float64,0x7fbe5265363ca4c9,0x407335406f8e4fac,1 +np.float64,0xac2878af5850f,0xc0733d3311be9786,1 +np.float64,0xac2074555840f,0xc0733d3364970018,1 +np.float64,0x3fcd49b96b3a9373,0xbfe47f24c8181d9c,1 +np.float64,0x3fd10caca6a21959,0xbfe2620ae5594f9a,1 +np.float64,0xec5b87e9d8b71,0xc0733aff499e72ca,1 +np.float64,0x9d5e9fad3abd4,0xc0733dd2d70eeb4a,1 +np.float64,0x7fe3d3a24227a744,0x407340bfc2072fdb,1 +np.float64,0x3fc5f7a77c2bef4f,0xbfe87e69d502d784,1 +np.float64,0x33161a66662c4,0xc07345a436308244,1 +np.float64,0xa27acdc744f5a,0xc0733d99feb3d8ea,1 +np.float64,0x3fe2d9301565b260,0xbfcd6c914e204437,1 +np.float64,0x7fd5d111e12ba223,0x40733c98e14a6fd0,1 +np.float64,0x6c3387bed8672,0xc073406d3648171a,1 +np.float64,0x24d89fe849b15,0xc07347e97bec008c,1 +np.float64,0x3fefd763677faec7,0xbf61ae69caa9cad9,1 +np.float64,0x7fe0a4684ba148d0,0x40733f884d32c464,1 +np.float64,0x3fd5c3c939ab8792,0xbfddfaaefc1c7fca,1 +np.float64,0x3fec9b87a6b9370f,0xbfa8eb34efcc6b9b,1 +np.float64,0x3feb062431f60c48,0xbfb2ca6036698877,1 +np.float64,0x3fef97f6633f2fed,0xbf76bc742860a340,1 +np.float64,0x74477490e88ef,0xc0733fed220986bc,1 +np.float64,0x3fe4bea67ce97d4d,0xbfc818525292b0f6,1 +np.float64,0x3fc6add3a92d5ba7,0xbfe80cfdc9a90bda,1 +np.float64,0x847c9ce308f94,0xc0733f05026f5965,1 +np.float64,0x7fea53fd2eb4a7f9,0x407342b841fc4723,1 +np.float64,0x3fc55a16fc2ab42e,0xbfe8e3849130da34,1 +np.float64,0x3fbdf7d07c3befa1,0xbfedcf84b9c6c161,1 +np.float64,0x3fe5fb25aa6bf64b,0xbfc4e083ff96b116,1 +np.float64,0x61c776a8c38ef,0xc0734121611d84d7,1 +np.float64,0x3fec413164f88263,0xbfabadbd05131546,1 +np.float64,0x9bf06fe137e0e,0xc0733de315469ee0,1 +np.float64,0x2075eefc40ebf,0xc07348cae84de924,1 +np.float64,0x3fdd42e0143a85c0,0xbfd5c0b6f60b3cea,1 +np.float64,0xdbb1ab45b7636,0xc0733b8157329daf,1 +np.float64,0x3feac6d56bf58dab,0xbfb3d00771b28621,1 +np.float64,0x7fb2dc825025b904,0x407331f3e950751a,1 +np.float64,0x3fecea6efd79d4de,0xbfa689309cc0e3fe,1 +np.float64,0x3fd83abec7b0757e,0xbfdaff5c674a9c59,1 +np.float64,0x3fd396f7c0272df0,0xbfe073ee75c414ba,1 +np.float64,0x3fe10036c162006e,0xbfd1945a38342ae1,1 +np.float64,0x3fd5bbded52b77be,0xbfde04cca40d4156,1 +np.float64,0x3fe870945ab0e129,0xbfbdf72f0e6206fa,1 +np.float64,0x3fef72fddcbee5fc,0xbf7ee2dba88b1bad,1 +np.float64,0x4e111aa09c224,0xc07342b1e2b29643,1 +np.float64,0x3fd926d8b5b24db1,0xbfd9f58b78d6b061,1 +np.float64,0x3fc55679172aacf2,0xbfe8e5df687842e2,1 +np.float64,0x7f5f1749803e2e92,0x40731886e16cfc4d,1 +np.float64,0x7fea082b53b41056,0x407342a42227700e,1 +np.float64,0x3fece1d1d039c3a4,0xbfa6cb780988a469,1 +np.float64,0x3b2721d8764e5,0xc073449f6a5a4832,1 +np.float64,0x365cb7006cba,0xc0735879ba5f0b6e,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0x7fe606ce92ac0d9c,0x4073417aeebe97e8,1 +np.float64,0x3fe237b544a46f6b,0xbfcf50f8f76d7df9,1 +np.float64,0x3fe7265e5eee4cbd,0xbfc1ff39089ec8d0,1 +np.float64,0x7fe2bb3c5ea57678,0x4073405aaad81cf2,1 +np.float64,0x3fd811df84b023bf,0xbfdb2e670ea8d8de,1 +np.float64,0x3f6a0efd00341dfa,0xc003fac1ae831241,1 +np.float64,0x3fd0d214afa1a429,0xbfe2922080a91c72,1 +np.float64,0x3feca6a350b94d47,0xbfa894eea3a96809,1 +np.float64,0x7fe23e5c76247cb8,0x4073402bbaaf71c7,1 +np.float64,0x3fe739a1fdae7344,0xbfc1d109f66efb5d,1 +np.float64,0x3fdf4b8e283e971c,0xbfd3e28f46169cc5,1 +np.float64,0x38f2535271e4b,0xc07344e3085219fa,1 +np.float64,0x7fd263a0f9a4c741,0x40733b68d945dae0,1 +np.float64,0x7fdd941863bb2830,0x40733eb651e3dca9,1 +np.float64,0xace7279159ce5,0xc0733d2b63b5947e,1 +np.float64,0x7fe34670b2268ce0,0x4073408d92770cb5,1 +np.float64,0x7fd11fa6dfa23f4d,0x40733aea02e76ea3,1 +np.float64,0x3fe6d9cbca6db398,0xbfc2b84b5c8c7eab,1 +np.float64,0x3fd69a0274ad3405,0xbfdcee3c7e52c463,1 +np.float64,0x3feb5af671f6b5ed,0xbfb16f88d739477f,1 +np.float64,0x3feea400163d4800,0xbf934e071c64fd0b,1 +np.float64,0x3fefd6bcf17fad7a,0xbf61f711c392b119,1 +np.float64,0x3fe148d43da291a8,0xbfd11e9cd3f91cd3,1 +np.float64,0x7fedf1308b7be260,0x4073439d135656da,1 +np.float64,0x3fe614c99c6c2993,0xbfc49fd1984dfd6d,1 +np.float64,0xd6e8d4e5add1b,0xc0733ba88256026e,1 +np.float64,0xfff0000000000000,0x7ff8000000000000,1 +np.float64,0x3fb530b5562a616b,0xbff1504bcc5c8f73,1 +np.float64,0xb7da68396fb4d,0xc0733cbe2790f52e,1 +np.float64,0x7fad78e26c3af1c4,0x4073303cdbfb0a15,1 +np.float64,0x7fee5698447cad30,0x407343b474573a8b,1 +np.float64,0x3fd488325c291065,0xbfdf999296d901e7,1 +np.float64,0x2669283a4cd26,0xc073479f823109a4,1 +np.float64,0x7fef3b090afe7611,0x407343e805a3b264,1 +np.float64,0x7fe8b96ae0f172d5,0x4073424874a342ab,1 +np.float64,0x7fef409f56fe813e,0x407343e943c3cd44,1 +np.float64,0x3fed28073dfa500e,0xbfa4b17e4cd31a3a,1 +np.float64,0x7f87ecc4802fd988,0x40732527e027b24b,1 +np.float64,0x3fdda24da0bb449b,0xbfd566a43ac035af,1 +np.float64,0x179fc9e62f3fa,0xc0734b0028c80fc1,1 +np.float64,0x3fef85b0927f0b61,0xbf7ac27565d5ab4f,1 +np.float64,0x5631501aac62b,0xc0734201be12c5d4,1 +np.float64,0x3fd782e424af05c8,0xbfdbd57544f8a7c3,1 +np.float64,0x3fe603a9a6ac0753,0xbfc4caff04dc3caf,1 +np.float64,0x7fbd5225163aa449,0x40733504b88f0a56,1 +np.float64,0x3fecd27506b9a4ea,0xbfa741dd70e6b08c,1 +np.float64,0x9c99603b3932c,0xc0733ddb922dc5db,1 +np.float64,0x3fbeb57f1a3d6afe,0xbfed789ff217aa08,1 +np.float64,0x3fef9c0f85bf381f,0xbf75d5c3d6cb281a,1 +np.float64,0x3fde4afb613c95f7,0xbfd4ca2a231c9005,1 +np.float64,0x396233d472c47,0xc07344d56ee70631,1 +np.float64,0x3fb31ea1c6263d44,0xbff207356152138d,1 +np.float64,0x3fe50bdf78aa17bf,0xbfc74ae0cbffb735,1 +np.float64,0xef74c701dee99,0xc0733ae81e4bb443,1 +np.float64,0x9a3e13a1347c3,0xc0733df68b60afc7,1 +np.float64,0x33ba4f886774b,0xc073458e03f0c13e,1 +np.float64,0x3fe8ba0e9931741d,0xbfbcaadf974e8f64,1 +np.float64,0x3fe090a4cd61214a,0xbfd24d236cf365d6,1 +np.float64,0x7fd87d992930fb31,0x40733d668b73b820,1 +np.float64,0x3fe6422b296c8456,0xbfc42e070b695d01,1 +np.float64,0x3febe9334677d267,0xbfae667864606cfe,1 +np.float64,0x771a3ce4ee348,0xc0733fc274d12c97,1 +np.float64,0x3fe0413542e0826b,0xbfd2d3b08fb5b8a6,1 +np.float64,0x3fd00870ea2010e2,0xbfe33cc04cbd42e0,1 +np.float64,0x3fe74fb817ae9f70,0xbfc19c45dbf919e1,1 +np.float64,0x40382fa08071,0xc07357514ced5577,1 +np.float64,0xa14968474292d,0xc0733da71a990f3a,1 +np.float64,0x5487c740a90fa,0xc0734224622d5801,1 +np.float64,0x3fed7d8d14fafb1a,0xbfa228f7ecc2ac03,1 +np.float64,0x3fe39bb485e73769,0xbfcb3a235a722960,1 +np.float64,0x3fd01090b2202121,0xbfe335b752589a22,1 +np.float64,0x3fd21a3e7da4347d,0xbfe18cd435a7c582,1 +np.float64,0x3fe7fa855a2ff50b,0xbfc00ab0665709fe,1 +np.float64,0x3fedc0d4577b81a9,0xbfa02fef3ff553fc,1 +np.float64,0x3fe99d4906333a92,0xbfb8bf18220e5e8e,1 +np.float64,0x3fd944ee3c3289dc,0xbfd9d46071675e73,1 +np.float64,0x3fe3ed8d52e7db1b,0xbfca53f8d4aef484,1 +np.float64,0x7fe748623a6e90c3,0x407341dd97c9dd79,1 +np.float64,0x3fea1b4b98343697,0xbfb6a1560a56927f,1 +np.float64,0xe1215715c242b,0xc0733b55dbf1f0a8,1 +np.float64,0x3fd0d5bccca1ab7a,0xbfe28f1b66d7a470,1 +np.float64,0x881a962710353,0xc0733ed51848a30d,1 +np.float64,0x3fcf022afe3e0456,0xbfe3b40eabf24501,1 +np.float64,0x3fdf1ac6bbbe358d,0xbfd40e03e888288d,1 +np.float64,0x3fa51a5eac2a34bd,0xbff628a7c34d51b3,1 +np.float64,0x3fdbaf408d375e81,0xbfd74ad39d97c92a,1 +np.float64,0x3fcd2418ea3a4832,0xbfe4910b009d8b11,1 +np.float64,0x3fc7b3062a2f660c,0xbfe7706dc47993e1,1 +np.float64,0x7fb8232218304643,0x407333aaa7041a9f,1 +np.float64,0x7fd5f186362be30b,0x40733ca32fdf9cc6,1 +np.float64,0x3fe57ef1d6aafde4,0xbfc61e23d00210c7,1 +np.float64,0x7c6830baf8d07,0xc0733f74f19e9dad,1 +np.float64,0xcacbfd5595980,0xc0733c0fb49edca7,1 +np.float64,0x3fdfdeac873fbd59,0xbfd36114c56bed03,1 +np.float64,0x3fd31f0889263e11,0xbfe0ca0cc1250169,1 +np.float64,0x3fe839fbe47073f8,0xbfbef0a2abc3d63f,1 +np.float64,0x3fc36af57e26d5eb,0xbfea3553f38770b7,1 +np.float64,0x3fe73dbc44ee7b79,0xbfc1c738f8fa6b3d,1 +np.float64,0x3fd3760e4da6ec1d,0xbfe08b5b609d11e5,1 +np.float64,0x3fee1cfa297c39f4,0xbf9b06d081bc9d5b,1 +np.float64,0xdfb01561bf61,0xc0734ea55e559888,1 +np.float64,0x687bd01cd0f7b,0xc07340ab67fe1816,1 +np.float64,0x3fefc88f4cbf911f,0xbf6828c359cf19dc,1 +np.float64,0x8ad34adb15a6a,0xc0733eb1e03811e5,1 +np.float64,0x3fe2b49c12e56938,0xbfcdd8dbdbc0ce59,1 +np.float64,0x6e05037adc0a1,0xc073404f91261635,1 +np.float64,0x3fe2fd737fe5fae7,0xbfcd020407ef4d78,1 +np.float64,0x3fd0f3c0dc21e782,0xbfe2766a1ab02eae,1 +np.float64,0x28564d9850acb,0xc073474875f87c5e,1 +np.float64,0x3fe4758015a8eb00,0xbfc8ddb45134a1bd,1 +np.float64,0x7fe7f19306efe325,0x4073420f626141a7,1 +np.float64,0x7fd27f34c0a4fe69,0x40733b733d2a5b50,1 +np.float64,0x92c2366325847,0xc0733e4f04f8195a,1 +np.float64,0x3fc21f8441243f09,0xbfeb2ad23bc1ab0b,1 +np.float64,0x3fc721d3e42e43a8,0xbfe7c69bb47b40c2,1 +np.float64,0x3fe2f11a1625e234,0xbfcd26363b9c36c3,1 +np.float64,0x3fdcb585acb96b0b,0xbfd648446237cb55,1 +np.float64,0x3fd4060bf2280c18,0xbfe025fd4c8a658b,1 +np.float64,0x7fb8ae2750315c4e,0x407333d23b025d08,1 +np.float64,0x3fe3a03119a74062,0xbfcb2d6c91b38552,1 +np.float64,0x7fdd2af92bba55f1,0x40733e9d737e16e6,1 +np.float64,0x3fe50b05862a160b,0xbfc74d20815fe36b,1 +np.float64,0x164409f82c882,0xc0734b6980e19c03,1 +np.float64,0x3fe4093712a8126e,0xbfca070367fda5e3,1 +np.float64,0xae3049935c609,0xc0733d1e3608797b,1 +np.float64,0x3fd71df4b4ae3be9,0xbfdc4dcb7637600d,1 +np.float64,0x7fca01e8023403cf,0x407339006c521c49,1 +np.float64,0x3fb0c5c43e218b88,0xbff2f03211c63f25,1 +np.float64,0x3fee757af83ceaf6,0xbf95f33a6e56b454,1 +np.float64,0x3f865f1f402cbe3f,0xbfff62d9c9072bd7,1 +np.float64,0x89864e95130ca,0xc0733ec29f1e32c6,1 +np.float64,0x3fe51482bcea2905,0xbfc73414ddc8f1b7,1 +np.float64,0x7fd802f8fa3005f1,0x40733d43684e460a,1 +np.float64,0x3fbeb86ca63d70d9,0xbfed774ccca9b8f5,1 +np.float64,0x3fb355dcc826abba,0xbff1f33f9339e7a3,1 +np.float64,0x3fe506c61eaa0d8c,0xbfc7585a3f7565a6,1 +np.float64,0x7fe393f25ba727e4,0x407340a94bcea73b,1 +np.float64,0xf66f532decdeb,0xc0733ab5041feb0f,1 +np.float64,0x3fe26e872be4dd0e,0xbfceaaab466f32e0,1 +np.float64,0x3fefd9e290bfb3c5,0xbf60977d24496295,1 +np.float64,0x7fe19c5f692338be,0x40733fecef53ad95,1 +np.float64,0x3fe80365ab3006cb,0xbfbfec4090ef76ec,1 +np.float64,0x3fe88ab39eb11567,0xbfbd8099388d054d,1 +np.float64,0x3fe68fb09fad1f61,0xbfc36db9de38c2c0,1 +np.float64,0x3fe9051883b20a31,0xbfbb5b75b8cb8f24,1 +np.float64,0x3fd4708683a8e10d,0xbfdfb9b085dd8a83,1 +np.float64,0x3fe00ac11a601582,0xbfd3316af3e43500,1 +np.float64,0xd16af30ba2d5f,0xc0733bd68e8252f9,1 +np.float64,0x3fb97d654632facb,0xbff007ac1257f575,1 +np.float64,0x7fd637c10fac6f81,0x40733cb949d76546,1 +np.float64,0x7fed2cab6dba5956,0x4073436edfc3764e,1 +np.float64,0x3fed04afbbba095f,0xbfa5bfaa5074b7f4,1 +np.float64,0x0,0xfff0000000000000,1 +np.float64,0x389a1dc671345,0xc07344edd4206338,1 +np.float64,0x3fbc9ba25a393745,0xbfee74c34f49b921,1 +np.float64,0x3feee749947dce93,0xbf8f032d9cf6b5ae,1 +np.float64,0xedc4cf89db89a,0xc0733af4b2a57920,1 +np.float64,0x3fe41629eba82c54,0xbfc9e321faf79e1c,1 +np.float64,0x3feb0bcbf7b61798,0xbfb2b31e5d952869,1 +np.float64,0xad60654b5ac0d,0xc0733d26860df676,1 +np.float64,0x3fe154e1ff22a9c4,0xbfd10b416e58c867,1 +np.float64,0x7fb20e9c8a241d38,0x407331a66453b8bc,1 +np.float64,0x7fcbbaaf7d37755e,0x4073397274f28008,1 +np.float64,0x187d0fbc30fa3,0xc0734ac03cc98cc9,1 +np.float64,0x7fd153afeaa2a75f,0x40733aff00b4311d,1 +np.float64,0x3fe05310a5e0a621,0xbfd2b5386aeecaac,1 +np.float64,0x7fea863b2b750c75,0x407342c57807f700,1 +np.float64,0x3fed5f0c633abe19,0xbfa30f6cfbc4bf94,1 +np.float64,0xf227c8b3e44f9,0xc0733ad42daaec9f,1 +np.float64,0x3fe956524772aca5,0xbfb9f4cabed7081d,1 +np.float64,0xefd11af7dfa24,0xc0733ae570ed2552,1 +np.float64,0x1690fff02d221,0xc0734b51a56c2980,1 +np.float64,0x7fd2e547a825ca8e,0x40733b992d6d9635,1 diff --git a/python/numpy/_core/tests/data/umath-validation-set-log1p.csv b/python/numpy/_core/tests/data/umath-validation-set-log1p.csv new file mode 100644 index 000000000..094e052a5 --- /dev/null +++ b/python/numpy/_core/tests/data/umath-validation-set-log1p.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0x3e10aca8,0x3e075347,2 +np.float32,0x3f776e66,0x3f2d2003,2 +np.float32,0xbf34e8ce,0xbf9cfd5c,2 +np.float32,0xbf0260ee,0xbf363f69,2 +np.float32,0x3ed285e8,0x3eb05870,2 +np.float32,0x262b88,0x262b88,2 +np.float32,0x3eeffd6c,0x3ec4cfdb,2 +np.float32,0x3ee86808,0x3ebf9f54,2 +np.float32,0x3f36eba8,0x3f0a0524,2 +np.float32,0xbf1c047a,0xbf70afc7,2 +np.float32,0x3ead2916,0x3e952902,2 +np.float32,0x61c9c9,0x61c9c9,2 +np.float32,0xff7fffff,0xffc00000,2 +np.float32,0x7f64ee52,0x42b138e0,2 +np.float32,0x7ed00b1e,0x42afa4ff,2 +np.float32,0x3db53340,0x3dada0b2,2 +np.float32,0x3e6b0a4a,0x3e5397a4,2 +np.float32,0x7ed5d64f,0x42afb310,2 +np.float32,0xbf12bc5f,0xbf59f5ee,2 +np.float32,0xbda12710,0xbda7d8b5,2 +np.float32,0xbe2e89d8,0xbe3f5a9f,2 +np.float32,0x3f5bee75,0x3f1ebea4,2 +np.float32,0x9317a,0x9317a,2 +np.float32,0x7ee00130,0x42afcad8,2 +np.float32,0x7ef0d16d,0x42afefe7,2 +np.float32,0xbec7463a,0xbefc6a44,2 +np.float32,0xbf760ecc,0xc04fe59c,2 +np.float32,0xbecacb3c,0xbf011ae3,2 +np.float32,0x3ead92be,0x3e9577f0,2 +np.float32,0xbf41510d,0xbfb41b3a,2 +np.float32,0x7f71d489,0x42b154f1,2 +np.float32,0x8023bcd5,0x8023bcd5,2 +np.float32,0x801d33d8,0x801d33d8,2 +np.float32,0x3f3f545d,0x3f0ee0d4,2 +np.float32,0xbf700682,0xc0318c25,2 +np.float32,0xbe54e990,0xbe6eb0a3,2 +np.float32,0x7f0289bf,0x42b01941,2 +np.float32,0xbd61ac90,0xbd682113,2 +np.float32,0xbf2ff310,0xbf94cd6f,2 +np.float32,0x7f10064a,0x42b04b98,2 +np.float32,0x804d0d6d,0x804d0d6d,2 +np.float32,0x80317b0a,0x80317b0a,2 +np.float32,0xbddfef18,0xbded2640,2 +np.float32,0x3f00c9ab,0x3ed0a5bd,2 +np.float32,0x7f04b905,0x42b021c1,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0x6524c4,0x6524c4,2 +np.float32,0x3da08ae0,0x3d9a8f88,2 +np.float32,0x293ea9,0x293ea9,2 +np.float32,0x71499e,0x71499e,2 +np.float32,0xbf14f54d,0xbf5f38a5,2 +np.float32,0x806e60f5,0x806e60f5,2 +np.float32,0x3f5f34bb,0x3f207fff,2 +np.float32,0x80513427,0x80513427,2 +np.float32,0x7f379670,0x42b0c7dc,2 +np.float32,0x3efba888,0x3eccb20b,2 +np.float32,0x3eeadd1b,0x3ec14f4b,2 +np.float32,0x7ec5a27f,0x42af8ab8,2 +np.float32,0x3f2afe4e,0x3f02f7a2,2 +np.float32,0x5591c8,0x5591c8,2 +np.float32,0x3dbb7240,0x3db35bab,2 +np.float32,0x805b911b,0x805b911b,2 +np.float32,0x800000,0x800000,2 +np.float32,0x7e784c04,0x42ae9cab,2 +np.float32,0x7ebaae14,0x42af6d86,2 +np.float32,0xbec84f7a,0xbefe1d42,2 +np.float32,0x7cea8281,0x42aa56bf,2 +np.float32,0xbf542cf6,0xbfe1eb1b,2 +np.float32,0xbf6bfb13,0xc0231a5b,2 +np.float32,0x7d6eeaef,0x42abc32c,2 +np.float32,0xbf062f6b,0xbf3e2000,2 +np.float32,0x8073d8e9,0x8073d8e9,2 +np.float32,0xbea4db14,0xbec6f485,2 +np.float32,0x7d7e8d62,0x42abe3a0,2 +np.float32,0x7e8fc34e,0x42aee7c6,2 +np.float32,0x7dcbb0c3,0x42acd464,2 +np.float32,0x7e123c,0x7e123c,2 +np.float32,0x3d77af62,0x3d707c34,2 +np.float32,0x498cc8,0x498cc8,2 +np.float32,0x7f4e2206,0x42b1032a,2 +np.float32,0x3f734e0a,0x3f2b04a1,2 +np.float32,0x8053a9d0,0x8053a9d0,2 +np.float32,0xbe8a67e0,0xbea15be9,2 +np.float32,0xbf78e0ea,0xc065409e,2 +np.float32,0x352bdd,0x352bdd,2 +np.float32,0x3ee42be7,0x3ebcb38a,2 +np.float32,0x7f482d10,0x42b0f427,2 +np.float32,0xbf23155e,0xbf81b993,2 +np.float32,0x594920,0x594920,2 +np.float32,0x63f53f,0x63f53f,2 +np.float32,0x363592,0x363592,2 +np.float32,0x7dafbb78,0x42ac88cc,2 +np.float32,0x7f69516c,0x42b14298,2 +np.float32,0x3e1d5be2,0x3e126131,2 +np.float32,0x410c23,0x410c23,2 +np.float32,0x7ec9563c,0x42af9439,2 +np.float32,0xbedd3a0e,0xbf10d705,2 +np.float32,0x7f7c4f1f,0x42b16aa8,2 +np.float32,0xbe99b34e,0xbeb6c2d3,2 +np.float32,0x6cdc84,0x6cdc84,2 +np.float32,0x5b3bbe,0x5b3bbe,2 +np.float32,0x252178,0x252178,2 +np.float32,0x7d531865,0x42ab83c8,2 +np.float32,0xbf565b44,0xbfe873bf,2 +np.float32,0x5977ce,0x5977ce,2 +np.float32,0x588a58,0x588a58,2 +np.float32,0x3eae7054,0x3e961d51,2 +np.float32,0x725049,0x725049,2 +np.float32,0x7f2b9386,0x42b0a538,2 +np.float32,0xbe674714,0xbe831245,2 +np.float32,0x8044f0d8,0x8044f0d8,2 +np.float32,0x800a3c21,0x800a3c21,2 +np.float32,0x807b275b,0x807b275b,2 +np.float32,0xbf2463b6,0xbf83896e,2 +np.float32,0x801cca42,0x801cca42,2 +np.float32,0xbf28f2d0,0xbf8a121a,2 +np.float32,0x3f4168c2,0x3f1010ce,2 +np.float32,0x6f91a1,0x6f91a1,2 +np.float32,0xbf2b9eeb,0xbf8e0fc5,2 +np.float32,0xbea4c858,0xbec6d8e4,2 +np.float32,0xbf7abba0,0xc0788e88,2 +np.float32,0x802f18f7,0x802f18f7,2 +np.float32,0xbf7f6c75,0xc0c3145c,2 +np.float32,0xbe988210,0xbeb50f5e,2 +np.float32,0xbf219b7e,0xbf7f6a3b,2 +np.float32,0x7f800000,0x7f800000,2 +np.float32,0x7f7fffff,0x42b17218,2 +np.float32,0xbdca8d90,0xbdd5487e,2 +np.float32,0xbef683b0,0xbf2821b0,2 +np.float32,0x8043e648,0x8043e648,2 +np.float32,0xbf4319a4,0xbfb7cd1b,2 +np.float32,0x62c2b2,0x62c2b2,2 +np.float32,0xbf479ccd,0xbfc1a7b1,2 +np.float32,0x806c8a32,0x806c8a32,2 +np.float32,0x7f004447,0x42b01045,2 +np.float32,0x3f737d36,0x3f2b1ccf,2 +np.float32,0x3ee71f24,0x3ebebced,2 +np.float32,0x3ea0b6b4,0x3e8bc606,2 +np.float32,0x358fd7,0x358fd7,2 +np.float32,0xbe69780c,0xbe847d17,2 +np.float32,0x7f6bed18,0x42b14849,2 +np.float32,0xbf6a5113,0xc01dfe1d,2 +np.float32,0xbf255693,0xbf84de88,2 +np.float32,0x7f34acac,0x42b0bfac,2 +np.float32,0xbe8a3b6a,0xbea11efe,2 +np.float32,0x3f470d84,0x3f1342ab,2 +np.float32,0xbf2cbde3,0xbf8fc602,2 +np.float32,0x47c103,0x47c103,2 +np.float32,0xe3c94,0xe3c94,2 +np.float32,0xbec07afa,0xbef1693a,2 +np.float32,0x6a9cfe,0x6a9cfe,2 +np.float32,0xbe4339e0,0xbe5899da,2 +np.float32,0x7ea9bf1e,0x42af3cd6,2 +np.float32,0x3f6378b4,0x3f22c4c4,2 +np.float32,0xbd989ff0,0xbd9e9c77,2 +np.float32,0xbe6f2f50,0xbe88343d,2 +np.float32,0x3f7f2ac5,0x3f310764,2 +np.float32,0x3f256704,0x3eff2fb2,2 +np.float32,0x80786aca,0x80786aca,2 +np.float32,0x65d02f,0x65d02f,2 +np.float32,0x50d1c3,0x50d1c3,2 +np.float32,0x3f4a9d76,0x3f1541b4,2 +np.float32,0x802cf491,0x802cf491,2 +np.float32,0x3e935cec,0x3e81829b,2 +np.float32,0x3e2ad478,0x3e1dfd81,2 +np.float32,0xbf107cbd,0xbf54bef2,2 +np.float32,0xbf58c02e,0xbff007fe,2 +np.float32,0x80090808,0x80090808,2 +np.float32,0x805d1f66,0x805d1f66,2 +np.float32,0x6aec95,0x6aec95,2 +np.float32,0xbee3fc6e,0xbf16dc73,2 +np.float32,0x7f63314b,0x42b134f9,2 +np.float32,0x550443,0x550443,2 +np.float32,0xbefa8174,0xbf2c026e,2 +np.float32,0x3f7fb380,0x3f314bd5,2 +np.float32,0x80171f2c,0x80171f2c,2 +np.float32,0x3f2f56ae,0x3f058f2d,2 +np.float32,0x3eacaecb,0x3e94cd97,2 +np.float32,0xbe0c4f0c,0xbe16e69d,2 +np.float32,0x3f48e4cb,0x3f144b42,2 +np.float32,0x7f03efe2,0x42b01eb7,2 +np.float32,0xbf1019ac,0xbf53dbe9,2 +np.float32,0x3e958524,0x3e832eb5,2 +np.float32,0xbf1b23c6,0xbf6e72f2,2 +np.float32,0x12c554,0x12c554,2 +np.float32,0x7dee588c,0x42ad24d6,2 +np.float32,0xbe8c216c,0xbea3ba70,2 +np.float32,0x804553cb,0x804553cb,2 +np.float32,0xbe446324,0xbe5a0966,2 +np.float32,0xbef7150a,0xbf28adff,2 +np.float32,0xbf087282,0xbf42ec6e,2 +np.float32,0x3eeef15c,0x3ec41937,2 +np.float32,0x61bbd2,0x61bbd2,2 +np.float32,0x3e51b28d,0x3e3ec538,2 +np.float32,0x57e869,0x57e869,2 +np.float32,0x7e5e7711,0x42ae646c,2 +np.float32,0x8050b173,0x8050b173,2 +np.float32,0xbf63c90c,0xc00d2438,2 +np.float32,0xbeba774c,0xbee7dcf8,2 +np.float32,0x8016faac,0x8016faac,2 +np.float32,0xbe8b448c,0xbea28aaf,2 +np.float32,0x3e8cd448,0x3e78d29e,2 +np.float32,0x80484e02,0x80484e02,2 +np.float32,0x3f63ba68,0x3f22e78c,2 +np.float32,0x2e87bb,0x2e87bb,2 +np.float32,0x230496,0x230496,2 +np.float32,0x1327b2,0x1327b2,2 +np.float32,0xbf046c56,0xbf3a72d2,2 +np.float32,0x3ecefe60,0x3eadd69a,2 +np.float32,0x49c56e,0x49c56e,2 +np.float32,0x3df22d60,0x3de4e550,2 +np.float32,0x3f67c19d,0x3f250707,2 +np.float32,0x3f20eb9c,0x3ef9b624,2 +np.float32,0x3f05ca75,0x3ed742fa,2 +np.float32,0xbe8514f8,0xbe9a1d45,2 +np.float32,0x8070a003,0x8070a003,2 +np.float32,0x7e49650e,0x42ae317a,2 +np.float32,0x3de16ce9,0x3dd5dc3e,2 +np.float32,0xbf4ae952,0xbfc95f1f,2 +np.float32,0xbe44dd84,0xbe5aa0db,2 +np.float32,0x803c3bc0,0x803c3bc0,2 +np.float32,0x3eebb9e8,0x3ec1e692,2 +np.float32,0x80588275,0x80588275,2 +np.float32,0xbea1e69a,0xbec29d86,2 +np.float32,0x3f7b4bf8,0x3f2f154c,2 +np.float32,0x7eb47ecc,0x42af5c46,2 +np.float32,0x3d441e00,0x3d3f911a,2 +np.float32,0x7f54d40e,0x42b11388,2 +np.float32,0xbf47f17e,0xbfc26882,2 +np.float32,0x3ea7da57,0x3e912db4,2 +np.float32,0x3f59cc7b,0x3f1d984e,2 +np.float32,0x570e08,0x570e08,2 +np.float32,0x3e99560c,0x3e8620a2,2 +np.float32,0x3ecfbd14,0x3eae5e55,2 +np.float32,0x7e86be08,0x42aec698,2 +np.float32,0x3f10f28a,0x3ee5b5d3,2 +np.float32,0x7f228722,0x42b0897a,2 +np.float32,0x3f4b979b,0x3f15cd30,2 +np.float32,0xbf134283,0xbf5b30f9,2 +np.float32,0x3f2ae16a,0x3f02e64f,2 +np.float32,0x3e98e158,0x3e85c6cc,2 +np.float32,0x7ec39f27,0x42af857a,2 +np.float32,0x3effedb0,0x3ecf8cea,2 +np.float32,0xbd545620,0xbd5a09c1,2 +np.float32,0x503a28,0x503a28,2 +np.float32,0x3f712744,0x3f29e9a1,2 +np.float32,0x3edc6194,0x3eb748b1,2 +np.float32,0xbf4ec1e5,0xbfd2ff5f,2 +np.float32,0x3f46669e,0x3f12e4b5,2 +np.float32,0xabad3,0xabad3,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0x803f2e6d,0x803f2e6d,2 +np.float32,0xbf431542,0xbfb7c3e6,2 +np.float32,0x3f6f2d53,0x3f28e496,2 +np.float32,0x546bd8,0x546bd8,2 +np.float32,0x25c80a,0x25c80a,2 +np.float32,0x3e50883c,0x3e3dcd7e,2 +np.float32,0xbf5fa2ba,0xc0045c14,2 +np.float32,0x80271c07,0x80271c07,2 +np.float32,0x8043755d,0x8043755d,2 +np.float32,0xbf3c5cea,0xbfaa5ee9,2 +np.float32,0x3f2fea38,0x3f05e6af,2 +np.float32,0x6da3dc,0x6da3dc,2 +np.float32,0xbf095945,0xbf44dc70,2 +np.float32,0xbe33d584,0xbe45c1f5,2 +np.float32,0x7eb41b2e,0x42af5b2b,2 +np.float32,0xbf0feb74,0xbf537242,2 +np.float32,0xbe96225a,0xbeb1b0b1,2 +np.float32,0x3f63b95f,0x3f22e700,2 +np.float32,0x0,0x0,2 +np.float32,0x3e20b0cc,0x3e154374,2 +np.float32,0xbf79880c,0xc06b6801,2 +np.float32,0xbea690b6,0xbec97b93,2 +np.float32,0xbf3e11ca,0xbfada449,2 +np.float32,0x7e7e6292,0x42aea912,2 +np.float32,0x3e793350,0x3e5f0b7b,2 +np.float32,0x802e7183,0x802e7183,2 +np.float32,0x3f1b3695,0x3ef2a788,2 +np.float32,0x801efa20,0x801efa20,2 +np.float32,0x3f1ec43a,0x3ef70f42,2 +np.float32,0xbf12c5ed,0xbf5a0c52,2 +np.float32,0x8005e99c,0x8005e99c,2 +np.float32,0xbf79f5e7,0xc06fcca5,2 +np.float32,0x3ecbaf50,0x3eab7a03,2 +np.float32,0x46b0fd,0x46b0fd,2 +np.float32,0x3edb9023,0x3eb6b631,2 +np.float32,0x7f24bc41,0x42b09063,2 +np.float32,0xbd8d9328,0xbd92b4c6,2 +np.float32,0x3f2c5d7f,0x3f03c9d9,2 +np.float32,0x807bebc9,0x807bebc9,2 +np.float32,0x7f797a99,0x42b164e2,2 +np.float32,0x756e3c,0x756e3c,2 +np.float32,0x80416f8a,0x80416f8a,2 +np.float32,0x3e0d512a,0x3e04611a,2 +np.float32,0x3f7be3e6,0x3f2f61ec,2 +np.float32,0x80075c41,0x80075c41,2 +np.float32,0xbe850294,0xbe9a046c,2 +np.float32,0x684679,0x684679,2 +np.float32,0x3eb393c4,0x3e99eed2,2 +np.float32,0x3f4177c6,0x3f10195b,2 +np.float32,0x3dd1f402,0x3dc7dfe5,2 +np.float32,0x3ef484d4,0x3ec7e2e1,2 +np.float32,0x53eb8f,0x53eb8f,2 +np.float32,0x7f072cb6,0x42b02b20,2 +np.float32,0xbf1b6b55,0xbf6f28d4,2 +np.float32,0xbd8a98d8,0xbd8f827d,2 +np.float32,0x3eafb418,0x3e970e96,2 +np.float32,0x6555af,0x6555af,2 +np.float32,0x7dd5118e,0x42aceb6f,2 +np.float32,0x800a13f7,0x800a13f7,2 +np.float32,0x331a9d,0x331a9d,2 +np.float32,0x8063773f,0x8063773f,2 +np.float32,0x3e95e068,0x3e837553,2 +np.float32,0x80654b32,0x80654b32,2 +np.float32,0x3dabe0e0,0x3da50bb3,2 +np.float32,0xbf6283c3,0xc00a5280,2 +np.float32,0x80751cc5,0x80751cc5,2 +np.float32,0x3f668eb6,0x3f2465c0,2 +np.float32,0x3e13c058,0x3e0a048c,2 +np.float32,0x77780c,0x77780c,2 +np.float32,0x3f7d6e48,0x3f302868,2 +np.float32,0x7e31f9e3,0x42adf22f,2 +np.float32,0x246c7b,0x246c7b,2 +np.float32,0xbe915bf0,0xbeaafa6c,2 +np.float32,0xbf800000,0xff800000,2 +np.float32,0x3f698f42,0x3f25f8e0,2 +np.float32,0x7e698885,0x42ae7d48,2 +np.float32,0x3f5bbd42,0x3f1ea42c,2 +np.float32,0x5b8444,0x5b8444,2 +np.float32,0xbf6065f6,0xc005e2c6,2 +np.float32,0xbeb95036,0xbee60dad,2 +np.float32,0xbf44f846,0xbfbbcade,2 +np.float32,0xc96e5,0xc96e5,2 +np.float32,0xbf213e90,0xbf7e6eae,2 +np.float32,0xbeb309cc,0xbedc4fe6,2 +np.float32,0xbe781cf4,0xbe8e0fe6,2 +np.float32,0x7f0cf0db,0x42b04083,2 +np.float32,0xbf7b6143,0xc08078f9,2 +np.float32,0x80526fc6,0x80526fc6,2 +np.float32,0x3f092bf3,0x3edbaeec,2 +np.float32,0x3ecdf154,0x3ead16df,2 +np.float32,0x2fe85b,0x2fe85b,2 +np.float32,0xbf5100a0,0xbfd8f871,2 +np.float32,0xbec09d40,0xbef1a028,2 +np.float32,0x5e6a85,0x5e6a85,2 +np.float32,0xbec0e2a0,0xbef20f6b,2 +np.float32,0x3f72e788,0x3f2ad00d,2 +np.float32,0x880a6,0x880a6,2 +np.float32,0x3d9e90bf,0x3d98b9fc,2 +np.float32,0x15cf25,0x15cf25,2 +np.float32,0x10171b,0x10171b,2 +np.float32,0x805cf1aa,0x805cf1aa,2 +np.float32,0x3f19bd36,0x3ef0d0d2,2 +np.float32,0x3ebe2bda,0x3ea1b774,2 +np.float32,0xbecd8192,0xbf035c49,2 +np.float32,0x3e2ce508,0x3e1fc21b,2 +np.float32,0x290f,0x290f,2 +np.float32,0x803b679f,0x803b679f,2 +np.float32,0x1,0x1,2 +np.float32,0x807a9c76,0x807a9c76,2 +np.float32,0xbf65fced,0xc01257f8,2 +np.float32,0x3f783414,0x3f2d8475,2 +np.float32,0x3f2d9d92,0x3f0488da,2 +np.float32,0xbddb5798,0xbde80018,2 +np.float32,0x3e91afb8,0x3e8034e7,2 +np.float32,0xbf1b775a,0xbf6f476d,2 +np.float32,0xbf73a32c,0xc041f3ba,2 +np.float32,0xbea39364,0xbec5121b,2 +np.float32,0x80375b94,0x80375b94,2 +np.float32,0x3f331252,0x3f07c3e9,2 +np.float32,0xbf285774,0xbf892e74,2 +np.float32,0x3e699bb8,0x3e526d55,2 +np.float32,0x3f08208a,0x3eda523a,2 +np.float32,0xbf42fb4a,0xbfb78d60,2 +np.float32,0x8029c894,0x8029c894,2 +np.float32,0x3e926c0c,0x3e80c76e,2 +np.float32,0x801e4715,0x801e4715,2 +np.float32,0x3e4b36d8,0x3e395ffd,2 +np.float32,0x8041556b,0x8041556b,2 +np.float32,0xbf2d99ba,0xbf9119bd,2 +np.float32,0x3ed83ea8,0x3eb46250,2 +np.float32,0xbe94a280,0xbeaf92b4,2 +np.float32,0x7f4c7a64,0x42b0ff0a,2 +np.float32,0x806d4022,0x806d4022,2 +np.float32,0xbed382f8,0xbf086d26,2 +np.float32,0x1846fe,0x1846fe,2 +np.float32,0xbe702558,0xbe88d4d8,2 +np.float32,0xbe650ee0,0xbe81a3cc,2 +np.float32,0x3ee9d088,0x3ec0970c,2 +np.float32,0x7f6d4498,0x42b14b30,2 +np.float32,0xbef9f9e6,0xbf2b7ddb,2 +np.float32,0xbf70c384,0xc0349370,2 +np.float32,0xbeff9e9e,0xbf3110c8,2 +np.float32,0xbef06372,0xbf224aa9,2 +np.float32,0xbf15a692,0xbf60e1fa,2 +np.float32,0x8058c117,0x8058c117,2 +np.float32,0xbd9f74b8,0xbda6017b,2 +np.float32,0x801bf130,0x801bf130,2 +np.float32,0x805da84c,0x805da84c,2 +np.float32,0xff800000,0xffc00000,2 +np.float32,0xbeb01de2,0xbed7d6d6,2 +np.float32,0x8077de08,0x8077de08,2 +np.float32,0x3e327668,0x3e2482c1,2 +np.float32,0xbe7add88,0xbe8fe1ab,2 +np.float32,0x805a3c2e,0x805a3c2e,2 +np.float32,0x80326a73,0x80326a73,2 +np.float32,0x800b8a34,0x800b8a34,2 +np.float32,0x8048c83a,0x8048c83a,2 +np.float32,0xbf3799d6,0xbfa1a975,2 +np.float32,0x807649c7,0x807649c7,2 +np.float32,0x3dfdbf90,0x3def3798,2 +np.float32,0xbf1b538a,0xbf6eec4c,2 +np.float32,0xbf1e5989,0xbf76baa0,2 +np.float32,0xc7a80,0xc7a80,2 +np.float32,0x8001be54,0x8001be54,2 +np.float32,0x3f435bbc,0x3f112c6d,2 +np.float32,0xbeabcff8,0xbed151d1,2 +np.float32,0x7de20c78,0x42ad09b7,2 +np.float32,0x3f0e6d2e,0x3ee27b1e,2 +np.float32,0xbf0cb352,0xbf4c3267,2 +np.float32,0x7f6ec06f,0x42b14e61,2 +np.float32,0x7f6fa8ef,0x42b15053,2 +np.float32,0xbf3d2a6a,0xbfabe623,2 +np.float32,0x7f077a4c,0x42b02c46,2 +np.float32,0xbf2a68dc,0xbf8c3cc4,2 +np.float32,0x802a5dbe,0x802a5dbe,2 +np.float32,0x807f631c,0x807f631c,2 +np.float32,0x3dc9b8,0x3dc9b8,2 +np.float32,0x3ebdc1b7,0x3ea16a0a,2 +np.float32,0x7ef29dab,0x42aff3b5,2 +np.float32,0x3e8ab1cc,0x3e757806,2 +np.float32,0x3f27e88e,0x3f011c6d,2 +np.float32,0x3cfd1455,0x3cf93fb5,2 +np.float32,0x7f7eebf5,0x42b16fef,2 +np.float32,0x3c9b2140,0x3c99ade9,2 +np.float32,0x7e928601,0x42aef183,2 +np.float32,0xbd7d2db0,0xbd82abae,2 +np.float32,0x3e6f0df3,0x3e56da20,2 +np.float32,0x7d36a2fc,0x42ab39a3,2 +np.float32,0xbf49d3a2,0xbfc6c859,2 +np.float32,0x7ee541d3,0x42afd6b6,2 +np.float32,0x80753dc0,0x80753dc0,2 +np.float32,0x3f4ce486,0x3f16865d,2 +np.float32,0x39e701,0x39e701,2 +np.float32,0x3f3d9ede,0x3f0de5fa,2 +np.float32,0x7fafb2,0x7fafb2,2 +np.float32,0x3e013fdc,0x3df37090,2 +np.float32,0x807b6a2c,0x807b6a2c,2 +np.float32,0xbe86800a,0xbe9c08c7,2 +np.float32,0x7f40f080,0x42b0e14d,2 +np.float32,0x7eef5afe,0x42afecc8,2 +np.float32,0x7ec30052,0x42af83da,2 +np.float32,0x3eacf768,0x3e9503e1,2 +np.float32,0x7f13ef0e,0x42b0594e,2 +np.float32,0x80419f4a,0x80419f4a,2 +np.float32,0xbf485932,0xbfc3562a,2 +np.float32,0xbe8a24d6,0xbea10011,2 +np.float32,0xbda791c0,0xbdaed2bc,2 +np.float32,0x3e9b5169,0x3e87a67d,2 +np.float32,0x807dd882,0x807dd882,2 +np.float32,0x7f40170e,0x42b0df0a,2 +np.float32,0x7f02f7f9,0x42b01af1,2 +np.float32,0x3ea38bf9,0x3e8decde,2 +np.float32,0x3e2e7ce8,0x3e211ed4,2 +np.float32,0x70a7a6,0x70a7a6,2 +np.float32,0x7d978592,0x42ac3ce7,2 +np.float32,0x804d12d0,0x804d12d0,2 +np.float32,0x80165dc8,0x80165dc8,2 +np.float32,0x80000001,0x80000001,2 +np.float32,0x3e325da0,0x3e246da6,2 +np.float32,0xbe063bb8,0xbe0fe281,2 +np.float32,0x160b8,0x160b8,2 +np.float32,0xbe5687a4,0xbe70bbef,2 +np.float32,0x7f11ab34,0x42b05168,2 +np.float32,0xc955c,0xc955c,2 +np.float32,0xbea0003a,0xbebfd826,2 +np.float32,0x3f7fbdd9,0x3f315102,2 +np.float32,0xbe61aefc,0xbe7ef121,2 +np.float32,0xbf1b9873,0xbf6f9bc3,2 +np.float32,0x3a6d14,0x3a6d14,2 +np.float32,0xbf1ad3b4,0xbf6da808,2 +np.float32,0x3ed2dd24,0x3eb0963d,2 +np.float32,0xbe81a4ca,0xbe957d52,2 +np.float32,0x7f1be3e9,0x42b07421,2 +np.float32,0x7f5ce943,0x42b1269e,2 +np.float32,0x7eebcbdf,0x42afe51d,2 +np.float32,0x807181b5,0x807181b5,2 +np.float32,0xbecb03ba,0xbf0149ad,2 +np.float32,0x42edb8,0x42edb8,2 +np.float32,0xbf3aeec8,0xbfa7b13f,2 +np.float32,0xbd0c4f00,0xbd0ec4a0,2 +np.float32,0x3e48d260,0x3e376070,2 +np.float32,0x1a9731,0x1a9731,2 +np.float32,0x7f323be4,0x42b0b8b5,2 +np.float32,0x1a327f,0x1a327f,2 +np.float32,0x17f1fc,0x17f1fc,2 +np.float32,0xbf2f4f9b,0xbf93c91a,2 +np.float32,0x3ede8934,0x3eb8c9c3,2 +np.float32,0xbf56aaac,0xbfe968bb,2 +np.float32,0x3e22cb5a,0x3e17148c,2 +np.float32,0x7d9def,0x7d9def,2 +np.float32,0x8045b963,0x8045b963,2 +np.float32,0x77404f,0x77404f,2 +np.float32,0x7e2c9efb,0x42ade28b,2 +np.float32,0x8058ad89,0x8058ad89,2 +np.float32,0x7f4139,0x7f4139,2 +np.float32,0x8020e12a,0x8020e12a,2 +np.float32,0x800c9daa,0x800c9daa,2 +np.float32,0x7f2c5ac5,0x42b0a789,2 +np.float32,0x3f04a47b,0x3ed5c043,2 +np.float32,0x804692d5,0x804692d5,2 +np.float32,0xbf6e7fa4,0xc02bb493,2 +np.float32,0x80330756,0x80330756,2 +np.float32,0x7f3e29ad,0x42b0d9e1,2 +np.float32,0xbebf689a,0xbeefb24d,2 +np.float32,0x3f29a86c,0x3f022a56,2 +np.float32,0x3e3bd1c0,0x3e2c72b3,2 +np.float32,0x3f78f2e8,0x3f2de546,2 +np.float32,0x3f3709be,0x3f0a16af,2 +np.float32,0x3e11f150,0x3e086f97,2 +np.float32,0xbf5867ad,0xbfeee8a0,2 +np.float32,0xbebfb328,0xbef0296c,2 +np.float32,0x2f7f15,0x2f7f15,2 +np.float32,0x805cfe84,0x805cfe84,2 +np.float32,0xbf504e01,0xbfd71589,2 +np.float32,0x3ee0903c,0x3eba330c,2 +np.float32,0xbd838990,0xbd87f399,2 +np.float32,0x3f14444e,0x3ee9ee7d,2 +np.float32,0x7e352583,0x42adfb3a,2 +np.float32,0x7e76f824,0x42ae99ec,2 +np.float32,0x3f772d00,0x3f2cfebf,2 +np.float32,0x801f7763,0x801f7763,2 +np.float32,0x3f760bf5,0x3f2c6b87,2 +np.float32,0xbf0bb696,0xbf4a03a5,2 +np.float32,0x3f175d2c,0x3eedd6d2,2 +np.float32,0xbf5723f8,0xbfeae288,2 +np.float32,0x24de0a,0x24de0a,2 +np.float32,0x3cd73f80,0x3cd47801,2 +np.float32,0x7f013305,0x42b013fa,2 +np.float32,0x3e3ad425,0x3e2b9c50,2 +np.float32,0x7d3d16,0x7d3d16,2 +np.float32,0x3ef49738,0x3ec7ef54,2 +np.float32,0x3f5b8612,0x3f1e8678,2 +np.float32,0x7f0eeb5c,0x42b047a7,2 +np.float32,0x7e9d7cb0,0x42af1675,2 +np.float32,0xbdd1cfb0,0xbddd5aa0,2 +np.float32,0xbf645dba,0xc00e78fe,2 +np.float32,0x3f511174,0x3f18d56c,2 +np.float32,0x3d91ad00,0x3d8cba62,2 +np.float32,0x805298da,0x805298da,2 +np.float32,0xbedb6af4,0xbf0f4090,2 +np.float32,0x3d23b1ba,0x3d208205,2 +np.float32,0xbea5783e,0xbec7dc87,2 +np.float32,0x79d191,0x79d191,2 +np.float32,0x3e894413,0x3e7337da,2 +np.float32,0x80800000,0x80800000,2 +np.float32,0xbf34a8d3,0xbf9c907b,2 +np.float32,0x3bae779a,0x3bae011f,2 +np.float32,0x8049284d,0x8049284d,2 +np.float32,0x3eb42cc4,0x3e9a600b,2 +np.float32,0x3da1e2d0,0x3d9bce5f,2 +np.float32,0x3f364b8a,0x3f09a7af,2 +np.float32,0x3d930b10,0x3d8e0118,2 +np.float32,0x8061f8d7,0x8061f8d7,2 +np.float32,0x3f473213,0x3f13573b,2 +np.float32,0x3f1e2a38,0x3ef65102,2 +np.float32,0x8068f7d9,0x8068f7d9,2 +np.float32,0x3f181ef8,0x3eeeca2c,2 +np.float32,0x3eeb6168,0x3ec1a9f5,2 +np.float32,0xc2db6,0xc2db6,2 +np.float32,0x3ef7b578,0x3eca0a69,2 +np.float32,0xbf5b5a84,0xbff8d075,2 +np.float32,0x7f479d5f,0x42b0f2b7,2 +np.float32,0x3e6f3c24,0x3e56ff92,2 +np.float32,0x3f45543a,0x3f1249f0,2 +np.float32,0xbea7c1fa,0xbecb40d2,2 +np.float32,0x7de082,0x7de082,2 +np.float32,0x383729,0x383729,2 +np.float32,0xbd91cb90,0xbd973eb3,2 +np.float32,0x7f320218,0x42b0b80f,2 +np.float32,0x5547f2,0x5547f2,2 +np.float32,0x291fe4,0x291fe4,2 +np.float32,0xbe078ba0,0xbe11655f,2 +np.float32,0x7e0c0658,0x42ad7764,2 +np.float32,0x7e129a2b,0x42ad8ee5,2 +np.float32,0x3f7c96d4,0x3f2fbc0c,2 +np.float32,0x3f800000,0x3f317218,2 +np.float32,0x7f131754,0x42b05662,2 +np.float32,0x15f833,0x15f833,2 +np.float32,0x80392ced,0x80392ced,2 +np.float32,0x3f7c141a,0x3f2f7a36,2 +np.float32,0xbf71c03f,0xc038dcfd,2 +np.float32,0xbe14fb2c,0xbe20fff3,2 +np.float32,0xbee0bac6,0xbf13f14c,2 +np.float32,0x801a32dd,0x801a32dd,2 +np.float32,0x8e12d,0x8e12d,2 +np.float32,0x3f48c606,0x3f143a04,2 +np.float32,0x7f418af5,0x42b0e2e6,2 +np.float32,0x3f1f2918,0x3ef78bb7,2 +np.float32,0x11141b,0x11141b,2 +np.float32,0x3e9fc9e8,0x3e8b11ad,2 +np.float32,0xbea5447a,0xbec79010,2 +np.float32,0xbe31d904,0xbe4359db,2 +np.float32,0x80184667,0x80184667,2 +np.float32,0xbf00503c,0xbf3212c2,2 +np.float32,0x3e0328cf,0x3df6d425,2 +np.float32,0x7ee8e1b7,0x42afdebe,2 +np.float32,0xbef95e24,0xbf2ae5db,2 +np.float32,0x7f3e4eed,0x42b0da45,2 +np.float32,0x3f43ee85,0x3f117fa0,2 +np.float32,0xbcfa2ac0,0xbcfe10fe,2 +np.float32,0x80162774,0x80162774,2 +np.float32,0x372e8b,0x372e8b,2 +np.float32,0x3f263802,0x3f0016b0,2 +np.float32,0x8008725f,0x8008725f,2 +np.float32,0x800beb40,0x800beb40,2 +np.float32,0xbe93308e,0xbead8a77,2 +np.float32,0x3d8a4240,0x3d85cab8,2 +np.float32,0x80179de0,0x80179de0,2 +np.float32,0x7f4a98f2,0x42b0fa4f,2 +np.float32,0x3f0d214e,0x3ee0cff1,2 +np.float32,0x80536c2c,0x80536c2c,2 +np.float32,0x7e7038ed,0x42ae8bbe,2 +np.float32,0x7f345af9,0x42b0bec4,2 +np.float32,0xbf243219,0xbf83442f,2 +np.float32,0x7e0d5555,0x42ad7c27,2 +np.float32,0x762e95,0x762e95,2 +np.float32,0x7ebf4548,0x42af79f6,2 +np.float32,0x8079639e,0x8079639e,2 +np.float32,0x3ef925c0,0x3ecb0260,2 +np.float32,0x3f708695,0x3f2996d6,2 +np.float32,0xfca9f,0xfca9f,2 +np.float32,0x8060dbf4,0x8060dbf4,2 +np.float32,0x4c8840,0x4c8840,2 +np.float32,0xbea922ee,0xbecd4ed5,2 +np.float32,0xbf4f28a9,0xbfd40b98,2 +np.float32,0xbe25ad48,0xbe34ba1b,2 +np.float32,0x3f2fb254,0x3f05c58c,2 +np.float32,0x3f73bcc2,0x3f2b3d5f,2 +np.float32,0xbf479a07,0xbfc1a165,2 +np.float32,0xbeb9a808,0xbee69763,2 +np.float32,0x7eb16a65,0x42af5376,2 +np.float32,0xbeb3e442,0xbedda042,2 +np.float32,0x3d8f439c,0x3d8a79ac,2 +np.float32,0x80347516,0x80347516,2 +np.float32,0x3e8a0c5d,0x3e74738c,2 +np.float32,0xbf0383a4,0xbf389289,2 +np.float32,0x806be8f5,0x806be8f5,2 +np.float32,0x8023f0c5,0x8023f0c5,2 +np.float32,0x2060e9,0x2060e9,2 +np.float32,0xbf759eba,0xc04d239f,2 +np.float32,0x3d84cc5a,0x3d80ab96,2 +np.float32,0xbf57746b,0xbfebdf87,2 +np.float32,0x3e418417,0x3e31401f,2 +np.float32,0xaecce,0xaecce,2 +np.float32,0x3cd1766f,0x3cced45c,2 +np.float32,0x53724a,0x53724a,2 +np.float32,0x3f773710,0x3f2d03de,2 +np.float32,0x8013d040,0x8013d040,2 +np.float32,0x4d0eb2,0x4d0eb2,2 +np.float32,0x8014364a,0x8014364a,2 +np.float32,0x7f3c56c9,0x42b0d4f2,2 +np.float32,0x3eee1e1c,0x3ec3891a,2 +np.float32,0xbdda3eb8,0xbde6c5a0,2 +np.float32,0x26ef4a,0x26ef4a,2 +np.float32,0x7ed3370c,0x42afacbf,2 +np.float32,0xbf06e31b,0xbf3f9ab7,2 +np.float32,0xbe3185f0,0xbe42f556,2 +np.float32,0x3dcf9abe,0x3dc5be41,2 +np.float32,0xbf3696d9,0xbf9fe2bd,2 +np.float32,0x3e68ee50,0x3e51e01a,2 +np.float32,0x3f3d4cc2,0x3f0db6ca,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0xbf03070c,0xbf3792d0,2 +np.float32,0x3ea79e6c,0x3e910092,2 +np.float32,0xbf1a393a,0xbf6c2251,2 +np.float32,0x3f41eb0e,0x3f105afc,2 +np.float32,0x3ceadb2f,0x3ce78d79,2 +np.float32,0xbf5dc105,0xc000be2c,2 +np.float32,0x7ebb5a0e,0x42af6f5c,2 +np.float32,0xbf7c44eb,0xc0875058,2 +np.float32,0x6aaaf4,0x6aaaf4,2 +np.float32,0x807d8f23,0x807d8f23,2 +np.float32,0xbee6b142,0xbf194fef,2 +np.float32,0xbe83f256,0xbe989526,2 +np.float32,0x7d588e,0x7d588e,2 +np.float32,0x7cc80131,0x42aa0542,2 +np.float32,0x3e0ab198,0x3e02124f,2 +np.float32,0xbf6e64db,0xc02b52eb,2 +np.float32,0x3d238b56,0x3d205d1b,2 +np.float32,0xbeb408e2,0xbeddd8bc,2 +np.float32,0x3f78340d,0x3f2d8471,2 +np.float32,0x806162a3,0x806162a3,2 +np.float32,0x804e484f,0x804e484f,2 +np.float32,0xbeb8c576,0xbee53466,2 +np.float32,0x807aab15,0x807aab15,2 +np.float32,0x3f523e20,0x3f197ab8,2 +np.float32,0xbf009190,0xbf3295de,2 +np.float32,0x3df43da5,0x3de6bd82,2 +np.float32,0x7f639aea,0x42b135e6,2 +np.float32,0x3f1e638a,0x3ef697da,2 +np.float32,0xbf4884de,0xbfc3bac3,2 +np.float32,0xbe9336b6,0xbead931b,2 +np.float32,0x6daf7f,0x6daf7f,2 +np.float32,0xbf1fc152,0xbf7a70b1,2 +np.float32,0x3f103720,0x3ee4c649,2 +np.float32,0x3eeaa227,0x3ec126df,2 +np.float32,0x7f7ea945,0x42b16f69,2 +np.float32,0x3d3cd800,0x3d389ead,2 +np.float32,0x3f3d7268,0x3f0dcc6e,2 +np.float32,0xbf3c1b41,0xbfa9e2e3,2 +np.float32,0x3ecf3818,0x3eadffb2,2 +np.float32,0x3f1af312,0x3ef25372,2 +np.float32,0x48fae4,0x48fae4,2 +np.float64,0x7fedaa1ee4fb543d,0x40862da7ca7c308e,1 +np.float64,0x8007d2d810efa5b1,0x8007d2d810efa5b1,1 +np.float64,0x3fc385e069270bc0,0x3fc22b8884cf2c3b,1 +np.float64,0x68ed4130d1da9,0x68ed4130d1da9,1 +np.float64,0x8008e93e58d1d27d,0x8008e93e58d1d27d,1 +np.float64,0xbfd3d62852a7ac50,0xbfd7be3a7ad1af02,1 +np.float64,0xbfc1fa0ba923f418,0xbfc35f0f19447df7,1 +np.float64,0xbfe01b8cec20371a,0xbfe6658c7e6c8e50,1 +np.float64,0xbfeda81a147b5034,0xc004e9c94f2b91c1,1 +np.float64,0xbfe1c36a97e386d5,0xbfe9ead4d6beaa92,1 +np.float64,0x3fe50be51f2a17ca,0x3fe02c8067d9e5c5,1 +np.float64,0x3febed4d3337da9a,0x3fe413956466134f,1 +np.float64,0x80068ea59ced1d4c,0x80068ea59ced1d4c,1 +np.float64,0x3febe77d5877cefb,0x3fe4107ac088bc71,1 +np.float64,0x800ae77617d5ceed,0x800ae77617d5ceed,1 +np.float64,0x3fd0546b60a0a8d7,0x3fcd16c2e995ab23,1 +np.float64,0xbfe33e1476667c29,0xbfed6d7faec4db2f,1 +np.float64,0x3fe9d2fd51b3a5fb,0x3fe2eef834310219,1 +np.float64,0x8004249878284932,0x8004249878284932,1 +np.float64,0xbfd5b485c72b690c,0xbfda828ccc6a7a5c,1 +np.float64,0x7fcd6e6b6b3adcd6,0x408622807f04768e,1 +np.float64,0x3fd7f9c32caff386,0x3fd45d024514b8da,1 +np.float64,0x7f87eb9d702fd73a,0x40860aa99fcff27f,1 +np.float64,0xbfc5d1f6fb2ba3ec,0xbfc7ec367cb3fecc,1 +np.float64,0x8008316a44d062d5,0x8008316a44d062d5,1 +np.float64,0xbfd54e4358aa9c86,0xbfd9e889d2998a4a,1 +np.float64,0xda65facdb4cc0,0xda65facdb4cc0,1 +np.float64,0x3fc5b4f6f32b69f0,0x3fc40d13aa8e248b,1 +np.float64,0x3fd825a5d5b04b4c,0x3fd47ce73e04d3ff,1 +np.float64,0x7ac9d56ef593b,0x7ac9d56ef593b,1 +np.float64,0xbfd0a51977214a32,0xbfd34702071428be,1 +np.float64,0x3fd21f620b243ec4,0x3fcfea0c02193640,1 +np.float64,0x3fe6fb3f1b2df67e,0x3fe151ffb18c983b,1 +np.float64,0x700de022e01bd,0x700de022e01bd,1 +np.float64,0xbfbb76b81236ed70,0xbfbd0d31deea1ec7,1 +np.float64,0x3fecfc3856f9f870,0x3fe4a2fcadf221e0,1 +np.float64,0x3fede286517bc50c,0x3fe51af2fbd6ef63,1 +np.float64,0x7fdc8da96c391b52,0x408627ce09cfef2b,1 +np.float64,0x8000edfcfb81dbfb,0x8000edfcfb81dbfb,1 +np.float64,0x8009ebc42af3d789,0x8009ebc42af3d789,1 +np.float64,0x7fd658aaf8acb155,0x408625d80cd1ccc9,1 +np.float64,0x3feea584a37d4b09,0x3fe57f29a73729cd,1 +np.float64,0x4cfe494699fca,0x4cfe494699fca,1 +np.float64,0xbfe9d96460b3b2c9,0xbffa62ecfa026c77,1 +np.float64,0x7fdb3852c3b670a5,0x4086276c191dc9b1,1 +np.float64,0xbfe4d1fc9ee9a3f9,0xbff0d37ce37cf479,1 +np.float64,0xffefffffffffffff,0xfff8000000000000,1 +np.float64,0xbfd1c43d7fa3887a,0xbfd4cfbefb5f2c43,1 +np.float64,0x3fec4a8e0d78951c,0x3fe4453a82ca2570,1 +np.float64,0x7fafed74583fdae8,0x4086181017b8dac9,1 +np.float64,0x80076c4ebcced89e,0x80076c4ebcced89e,1 +np.float64,0x8001a9aa7b235356,0x8001a9aa7b235356,1 +np.float64,0x121260fe2424d,0x121260fe2424d,1 +np.float64,0x3fddd028e3bba052,0x3fd87998c4c43c5b,1 +np.float64,0x800ed1cf4a9da39f,0x800ed1cf4a9da39f,1 +np.float64,0xbfef2e63d7fe5cc8,0xc00d53480b16971b,1 +np.float64,0xbfedde3309fbbc66,0xc005ab55b7a7c127,1 +np.float64,0x3fda3e1e85b47c3d,0x3fd5fddafd8d6729,1 +np.float64,0x8007c6443c6f8c89,0x8007c6443c6f8c89,1 +np.float64,0xbfe101705f2202e0,0xbfe8420817665121,1 +np.float64,0x7fe0bff3c1e17fe7,0x4086291539c56d80,1 +np.float64,0x7fe6001dab6c003a,0x40862b43aa7cb060,1 +np.float64,0x7fbdecf7de3bd9ef,0x40861d170b1c51a5,1 +np.float64,0xbfc0fd508c21faa0,0xbfc23a5876e99fa3,1 +np.float64,0xbfcf6eb14f3edd64,0xbfd208cbf742c8ea,1 +np.float64,0x3f6d40ea403a81d5,0x3f6d33934ab8e799,1 +np.float64,0x7fc32600b6264c00,0x40861f10302357e0,1 +np.float64,0x3fd05870baa0b0e0,0x3fcd1d2af420fac7,1 +np.float64,0x80051d5120aa3aa3,0x80051d5120aa3aa3,1 +np.float64,0x3fdb783fcfb6f080,0x3fd6db229658c083,1 +np.float64,0x3fe0b61199e16c24,0x3fdae41e277be2eb,1 +np.float64,0x3daf62167b5ed,0x3daf62167b5ed,1 +np.float64,0xbfec3c53b6f878a7,0xc0011f0ce7a78a2a,1 +np.float64,0x800fc905161f920a,0x800fc905161f920a,1 +np.float64,0x3fdc7b9cc138f73a,0x3fd78f9c2360e661,1 +np.float64,0x7fe4079e97a80f3c,0x40862a83795f2443,1 +np.float64,0x8010000000000000,0x8010000000000000,1 +np.float64,0x7fe6da5345adb4a6,0x40862b9183c1e4b0,1 +np.float64,0xbfd0a76667214ecc,0xbfd34a1e0c1f6186,1 +np.float64,0x37fb0b906ff62,0x37fb0b906ff62,1 +np.float64,0x7fe170e59fa2e1ca,0x408629680a55e5c5,1 +np.float64,0x3fea900c77752019,0x3fe356eec75aa345,1 +np.float64,0x3fc575c63a2aeb8c,0x3fc3d701167d76b5,1 +np.float64,0x3fe8b45da87168bc,0x3fe24ecbb778fd44,1 +np.float64,0xbfcb990ab5373214,0xbfcf1596c076813c,1 +np.float64,0xf146fdfbe28e0,0xf146fdfbe28e0,1 +np.float64,0x8001fcd474c3f9aa,0x8001fcd474c3f9aa,1 +np.float64,0xbfe9b555eeb36aac,0xbffa0630c3bb485b,1 +np.float64,0x800f950be83f2a18,0x800f950be83f2a18,1 +np.float64,0x7feb0e03ab761c06,0x40862ceb30e36887,1 +np.float64,0x7fca51bd4a34a37a,0x4086219b9dfd35c9,1 +np.float64,0xbfdc27c34cb84f86,0xbfe28ccde8d6bc08,1 +np.float64,0x80009ce1714139c4,0x80009ce1714139c4,1 +np.float64,0x8005290fb1ea5220,0x8005290fb1ea5220,1 +np.float64,0xbfee81e6473d03cd,0xc00885972ca1699b,1 +np.float64,0x7fcfb11a373f6233,0x408623180b8f75d9,1 +np.float64,0xbfcb9c4bfd373898,0xbfcf19bd25881928,1 +np.float64,0x7feaec5885f5d8b0,0x40862ce136050e6c,1 +np.float64,0x8009e17a4a53c2f5,0x8009e17a4a53c2f5,1 +np.float64,0xbfe1cceb9e6399d7,0xbfea0038bd3def20,1 +np.float64,0x8009170bd7122e18,0x8009170bd7122e18,1 +np.float64,0xb2b6f7f1656df,0xb2b6f7f1656df,1 +np.float64,0x3fc75bfd1f2eb7f8,0x3fc574c858332265,1 +np.float64,0x3fa24c06ec249800,0x3fa1fa462ffcb8ec,1 +np.float64,0xaa9a4d2d5534a,0xaa9a4d2d5534a,1 +np.float64,0xbfd7b76208af6ec4,0xbfdda0c3200dcc9f,1 +np.float64,0x7f8cbab73039756d,0x40860c20cba57a94,1 +np.float64,0x3fdbcf9f48b79f3f,0x3fd71827a60e8b6d,1 +np.float64,0xbfdd60f71a3ac1ee,0xbfe3a94bc8cf134d,1 +np.float64,0xb9253589724a7,0xb9253589724a7,1 +np.float64,0xbfcf28e37e3e51c8,0xbfd1da9977b741e3,1 +np.float64,0x80011457f7e228b1,0x80011457f7e228b1,1 +np.float64,0x7fec33df737867be,0x40862d404a897122,1 +np.float64,0xae55f8f95cabf,0xae55f8f95cabf,1 +np.float64,0xbfc1ab9397235728,0xbfc303e5533d4a5f,1 +np.float64,0x7fef0f84b3be1f08,0x40862e05f9ba7118,1 +np.float64,0x7fdc94f328b929e5,0x408627d01449d825,1 +np.float64,0x3fee1b598c7c36b3,0x3fe53847be166834,1 +np.float64,0x3fee8326f37d064e,0x3fe56d96f3fbcf43,1 +np.float64,0x3fe7b18a83ef6316,0x3fe1bb6a6d48c675,1 +np.float64,0x3fe5db969c6bb72e,0x3fe0a8d7d151996c,1 +np.float64,0x3e3391d27c673,0x3e3391d27c673,1 +np.float64,0x3fe79a46d76f348e,0x3fe1ae09a96ea628,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0x7fe57d6505aafac9,0x40862b13925547f1,1 +np.float64,0x3fc433371d28666e,0x3fc2c196a764c47b,1 +np.float64,0x8008dbf69cd1b7ee,0x8008dbf69cd1b7ee,1 +np.float64,0xbfe744f459ee89e8,0xbff4c847ad3ee152,1 +np.float64,0x80098aa245331545,0x80098aa245331545,1 +np.float64,0x6747112ece8e3,0x6747112ece8e3,1 +np.float64,0x5d342a40ba69,0x5d342a40ba69,1 +np.float64,0xf7a17739ef42f,0xf7a17739ef42f,1 +np.float64,0x3fe1b34a9d236695,0x3fdc2d7c4e2c347a,1 +np.float64,0x7fb53bf5ec2a77eb,0x40861a585ec8f7ff,1 +np.float64,0xbfe6256f1cec4ade,0xbff2d89a36be65ae,1 +np.float64,0xb783bc9b6f078,0xb783bc9b6f078,1 +np.float64,0xbfedf74a3bfbee94,0xc0060bb6f2bc11ef,1 +np.float64,0x3fda2a5eccb454be,0x3fd5efd7f18b8e81,1 +np.float64,0xbfb3838ab2270718,0xbfb44c337fbca3c3,1 +np.float64,0x3fb4ac6dc22958e0,0x3fb3e194ca01a502,1 +np.float64,0x76c11aaaed824,0x76c11aaaed824,1 +np.float64,0x80025bb1af04b764,0x80025bb1af04b764,1 +np.float64,0x3fdc02740ab804e8,0x3fd73b8cd6f95f19,1 +np.float64,0x3fe71856f5ee30ae,0x3fe162e9fafb4428,1 +np.float64,0x800236f332646de7,0x800236f332646de7,1 +np.float64,0x7fe13fd9d2e27fb3,0x408629516b42a317,1 +np.float64,0x7fdf6bbd34bed779,0x40862892069d805c,1 +np.float64,0x3fd4727beba8e4f8,0x3fd1be5b48d9e282,1 +np.float64,0x800e0fac9e5c1f59,0x800e0fac9e5c1f59,1 +np.float64,0xfb54423ff6a89,0xfb54423ff6a89,1 +np.float64,0x800fbf7ed47f7efe,0x800fbf7ed47f7efe,1 +np.float64,0x3fe9d41fa2f3a840,0x3fe2ef98dc1fd463,1 +np.float64,0x800d733e805ae67d,0x800d733e805ae67d,1 +np.float64,0x3feebe4c46fd7c98,0x3fe58bcf7f47264e,1 +np.float64,0x7fe1ab77b5e356ee,0x40862982bb3dce34,1 +np.float64,0xbfdddac05abbb580,0xbfe41aa45f72d5a2,1 +np.float64,0x3fe14219dee28434,0x3fdb9b137d1f1220,1 +np.float64,0x3fe25d3d5a24ba7b,0x3fdd06e1cf32d35a,1 +np.float64,0x8000fa4fbe81f4a0,0x8000fa4fbe81f4a0,1 +np.float64,0x3fe303e23e6607c4,0x3fddd94982efa9f1,1 +np.float64,0x3fe89cf5d83139ec,0x3fe24193a2e12f75,1 +np.float64,0x3fe9b36ef87366de,0x3fe2dd7cdc25a4a5,1 +np.float64,0xbfdb8b38f8371672,0xbfe2023ba7e002bb,1 +np.float64,0xafc354955f86b,0xafc354955f86b,1 +np.float64,0xbfe2f3d49e65e7a9,0xbfecb557a94123d3,1 +np.float64,0x800496617c092cc4,0x800496617c092cc4,1 +np.float64,0x32db0cfa65b62,0x32db0cfa65b62,1 +np.float64,0xbfd893bfa2b12780,0xbfdf02a8c1e545aa,1 +np.float64,0x7fd5ac927d2b5924,0x408625997e7c1f9b,1 +np.float64,0x3fde9defb8bd3be0,0x3fd9056190986349,1 +np.float64,0x80030cfeb54619fe,0x80030cfeb54619fe,1 +np.float64,0x3fcba85b273750b8,0x3fc90a5ca976594f,1 +np.float64,0x3fe98f6f5cf31edf,0x3fe2c97fcb4eca25,1 +np.float64,0x3fe33dbf90667b80,0x3fde21b83321b993,1 +np.float64,0x3fe4686636e8d0cc,0x3fdf928cdca751b3,1 +np.float64,0x80018ade6ce315be,0x80018ade6ce315be,1 +np.float64,0x7fa9af70c8335ee1,0x408616528cd5a906,1 +np.float64,0x3fbeb460aa3d68c0,0x3fbcff96b00a2193,1 +np.float64,0x7fa82c869830590c,0x408615d6598d9368,1 +np.float64,0xd08c0e6fa1182,0xd08c0e6fa1182,1 +np.float64,0x3fef4eb750fe9d6f,0x3fe5d522fd4e7f64,1 +np.float64,0xbfc586f5492b0dec,0xbfc791eaae92aad1,1 +np.float64,0x7fede64ac7bbcc95,0x40862db7f444fa7b,1 +np.float64,0x3fe540003d6a8000,0x3fe04bdfc2916a0b,1 +np.float64,0x8009417fe6f28300,0x8009417fe6f28300,1 +np.float64,0x3fe6959cf16d2b3a,0x3fe116a1ce01887b,1 +np.float64,0x3fb0a40036214800,0x3fb01f447778219a,1 +np.float64,0x3feff26e91ffe4dd,0x3fe627798fc859a7,1 +np.float64,0x7fed8e46cd7b1c8d,0x40862da044a1d102,1 +np.float64,0x7fec4eb774f89d6e,0x40862d47e43edb53,1 +np.float64,0x3fe800e5e07001cc,0x3fe1e8e2b9105fc2,1 +np.float64,0x800f4eb2f9be9d66,0x800f4eb2f9be9d66,1 +np.float64,0x800611659bcc22cc,0x800611659bcc22cc,1 +np.float64,0x3fd66e65d2acdccc,0x3fd33ad63a5e1000,1 +np.float64,0x800a9085b7f5210c,0x800a9085b7f5210c,1 +np.float64,0x7fdf933a3fbf2673,0x4086289c0e292f2b,1 +np.float64,0x1cd1ba7a39a38,0x1cd1ba7a39a38,1 +np.float64,0xbfefd0b10fffa162,0xc0149ded900ed851,1 +np.float64,0xbfe8c63485b18c69,0xbff7cf3078b1574f,1 +np.float64,0x3fecde56ca79bcae,0x3fe4934afbd7dda9,1 +np.float64,0x8006cd6888cd9ad2,0x8006cd6888cd9ad2,1 +np.float64,0x3fd7a391c2af4724,0x3fd41e2f74df2329,1 +np.float64,0x3fe6a8ad58ed515a,0x3fe121ccfb28e6f5,1 +np.float64,0x7fe18a80dd631501,0x40862973c09086b9,1 +np.float64,0xbf74fd6d8029fb00,0xbf750b3e368ebe6b,1 +np.float64,0x3fdd35e93dba6bd4,0x3fd810071faaffad,1 +np.float64,0x3feb0d8f57361b1f,0x3fe39b3abdef8b7a,1 +np.float64,0xbfd5ec7288abd8e6,0xbfdad764df0d2ca1,1 +np.float64,0x7fdc848272b90904,0x408627cb78f3fb9e,1 +np.float64,0x800ed3eda91da7db,0x800ed3eda91da7db,1 +np.float64,0x3fefac64857f58c9,0x3fe60459dbaad1ba,1 +np.float64,0x3fd1df7a5ba3bef4,0x3fcf864a39b926ff,1 +np.float64,0xfe26ca4bfc4da,0xfe26ca4bfc4da,1 +np.float64,0xbfd1099f8da21340,0xbfd3cf6e6efe934b,1 +np.float64,0xbfe15de9a7a2bbd4,0xbfe909cc895f8795,1 +np.float64,0x3fe89714ed712e2a,0x3fe23e40d31242a4,1 +np.float64,0x800387113e470e23,0x800387113e470e23,1 +np.float64,0x3fe4f80730e9f00e,0x3fe0208219314cf1,1 +np.float64,0x2f95a97c5f2b6,0x2f95a97c5f2b6,1 +np.float64,0x800ea7cdd87d4f9c,0x800ea7cdd87d4f9c,1 +np.float64,0xbf64b967c0297300,0xbf64c020a145b7a5,1 +np.float64,0xbfc5a91a342b5234,0xbfc7bafd77a61d81,1 +np.float64,0xbfe2226fe76444e0,0xbfeac33eb1d1b398,1 +np.float64,0x3fc6aaa8d42d5552,0x3fc4de79f5c68cd4,1 +np.float64,0x3fe54fd4c1ea9faa,0x3fe05561a9a5922b,1 +np.float64,0x80029c1f75653840,0x80029c1f75653840,1 +np.float64,0xbfcb4a84a2369508,0xbfceb1a23bac3995,1 +np.float64,0x80010abeff02157f,0x80010abeff02157f,1 +np.float64,0x7f92d12cf825a259,0x40860e49bde3a5b6,1 +np.float64,0x800933e7027267ce,0x800933e7027267ce,1 +np.float64,0x3fc022b12e204562,0x3fbe64acc53ed887,1 +np.float64,0xbfe35f938de6bf27,0xbfedc1f3e443c016,1 +np.float64,0x1f8d9bae3f1b4,0x1f8d9bae3f1b4,1 +np.float64,0x3fe552f22ceaa5e4,0x3fe057404072350f,1 +np.float64,0xbfa73753442e6ea0,0xbfa7c24a100190f1,1 +np.float64,0x7fb3e2982827c52f,0x408619d1efa676b6,1 +np.float64,0xbfd80cb7a5301970,0xbfde28e65f344f33,1 +np.float64,0xbfcde835973bd06c,0xbfd10806fba46c8f,1 +np.float64,0xbfd4e3c749a9c78e,0xbfd949aff65de39c,1 +np.float64,0x3fcb4b9d6f36973b,0x3fc8be02ad6dc0d3,1 +np.float64,0x1a63000034c7,0x1a63000034c7,1 +np.float64,0x7fdc9c751e3938e9,0x408627d22df71959,1 +np.float64,0x3fd74f3f712e9e7f,0x3fd3e07df0c37ec1,1 +np.float64,0xbfceab74d33d56e8,0xbfd187e99bf82903,1 +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0xbfb2cca466259948,0xbfb3868208e8de30,1 +np.float64,0x800204688b8408d2,0x800204688b8408d2,1 +np.float64,0x3e4547407c8aa,0x3e4547407c8aa,1 +np.float64,0xbfe4668846e8cd10,0xbff03c85189f3818,1 +np.float64,0x800dd350245ba6a0,0x800dd350245ba6a0,1 +np.float64,0xbfbc13c160382780,0xbfbdbd56ce996d16,1 +np.float64,0x7fe25a628a24b4c4,0x408629d06eb2d64d,1 +np.float64,0x3fd19dabbc233b57,0x3fcf1f3ed1d34c8c,1 +np.float64,0x547e20faa8fc5,0x547e20faa8fc5,1 +np.float64,0xbfe19392c6232726,0xbfe97ffe4f303335,1 +np.float64,0x3f87f9f6702ff400,0x3f87d64fb471bb04,1 +np.float64,0x9dfc52db3bf8b,0x9dfc52db3bf8b,1 +np.float64,0x800e1f5a9adc3eb5,0x800e1f5a9adc3eb5,1 +np.float64,0xbfddbd09c8bb7a14,0xbfe3fed7d7cffc70,1 +np.float64,0xbfeda71af87b4e36,0xc004e6631c514544,1 +np.float64,0xbfdbfcfe1bb7f9fc,0xbfe266b5d4a56265,1 +np.float64,0x3fe4ee78cd69dcf2,0x3fe01abba4e81fc9,1 +np.float64,0x800f13b820de2770,0x800f13b820de2770,1 +np.float64,0x3f861e09702c3c00,0x3f85ffae83b02c4f,1 +np.float64,0xbfc0972479212e48,0xbfc1c4bf70b30cbc,1 +np.float64,0x7fef057ef57e0afd,0x40862e036479f6a9,1 +np.float64,0x8bdbabe517b76,0x8bdbabe517b76,1 +np.float64,0xbfec495417f892a8,0xc0013ade88746d18,1 +np.float64,0x3fec680ab3f8d015,0x3fe454dd304b560d,1 +np.float64,0xbfae7ce60c3cf9d0,0xbfaf6eef15bbe56b,1 +np.float64,0x3fec314124786282,0x3fe437ca06294f5a,1 +np.float64,0x7fd5ed05b82bda0a,0x408625b125518e58,1 +np.float64,0x3feac9f02f3593e0,0x3fe3768104dd5cb7,1 +np.float64,0x0,0x0,1 +np.float64,0xbfddd2abd5bba558,0xbfe41312b8ea20de,1 +np.float64,0xbfedf9558c7bf2ab,0xc00613c53e0bb33a,1 +np.float64,0x3fef245ffefe48c0,0x3fe5bfb4dfe3b7a5,1 +np.float64,0x7fe178604922f0c0,0x4086296b77d5eaef,1 +np.float64,0x10000000000000,0x10000000000000,1 +np.float64,0x7fed026766ba04ce,0x40862d7a0dc45643,1 +np.float64,0xbfde27d8c3bc4fb2,0xbfe46336b6447697,1 +np.float64,0x3fe9485d9cb290bb,0x3fe2a1e4b6419423,1 +np.float64,0xbfe27b8a7464f715,0xbfeb9382f5b16f65,1 +np.float64,0x5c34d274b869b,0x5c34d274b869b,1 +np.float64,0xbfeee0b7453dc16f,0xc00acdb46459b6e6,1 +np.float64,0x7fe3dfb4d4e7bf69,0x40862a73785fdf12,1 +np.float64,0xb4635eef68c6c,0xb4635eef68c6c,1 +np.float64,0xbfe522a2c82a4546,0xbff148912a59a1d6,1 +np.float64,0x8009ba38a9737472,0x8009ba38a9737472,1 +np.float64,0xbfc056ff3820ae00,0xbfc17b2205fa180d,1 +np.float64,0x7fe1c8b8a0239170,0x4086298feeee6133,1 +np.float64,0x3fe2d2c6b9e5a58e,0x3fdd9b907471031b,1 +np.float64,0x3fa0a161bc2142c0,0x3fa05db36f6a073b,1 +np.float64,0x3fdef4268ebde84c,0x3fd93f980794d1e7,1 +np.float64,0x800ecd9fe2fd9b40,0x800ecd9fe2fd9b40,1 +np.float64,0xbfc9fbd45e33f7a8,0xbfcd0afc47c340f6,1 +np.float64,0x3fe8c3035b718606,0x3fe2570eb65551a1,1 +np.float64,0xbfe78c4ad2ef1896,0xbff54d25b3328742,1 +np.float64,0x8006f5dcf8adebbb,0x8006f5dcf8adebbb,1 +np.float64,0x800301dca2a603ba,0x800301dca2a603ba,1 +np.float64,0xad4289e55a851,0xad4289e55a851,1 +np.float64,0x80037764f9e6eecb,0x80037764f9e6eecb,1 +np.float64,0xbfe73575b26e6aec,0xbff4abfb5e985c62,1 +np.float64,0xbfc6cb91652d9724,0xbfc91a8001b33ec2,1 +np.float64,0xbfe3a918ffe75232,0xbfee7e6e4fd34c53,1 +np.float64,0x9bc84e2b3790a,0x9bc84e2b3790a,1 +np.float64,0x7fdeec303cbdd85f,0x408628714a49d996,1 +np.float64,0x3fe1d1dcb763a3ba,0x3fdc54ce060dc7f4,1 +np.float64,0x8008ae6432b15cc9,0x8008ae6432b15cc9,1 +np.float64,0x3fd8022fa2b00460,0x3fd46322bf02a609,1 +np.float64,0xbfc55b64472ab6c8,0xbfc75d9568f462e0,1 +np.float64,0xbfe8b165437162ca,0xbff7a15e2ead645f,1 +np.float64,0x7f759330feeb3,0x7f759330feeb3,1 +np.float64,0xbfd504f68eaa09ee,0xbfd97b06c01d7473,1 +np.float64,0x54702d5aa8e06,0x54702d5aa8e06,1 +np.float64,0xbfed1779337a2ef2,0xc0032f7109ef5a51,1 +np.float64,0xe248bd4dc4918,0xe248bd4dc4918,1 +np.float64,0xbfd8c59150318b22,0xbfdf53bca6ca8b1e,1 +np.float64,0xbfe3b9d942e773b2,0xbfeea9fcad277ba7,1 +np.float64,0x800934ec127269d9,0x800934ec127269d9,1 +np.float64,0xbfbb7f535a36fea8,0xbfbd16d61b6c52b8,1 +np.float64,0xccb185a199631,0xccb185a199631,1 +np.float64,0x3fe3dda76fe7bb4e,0x3fdee83bc6094301,1 +np.float64,0xbfe0c902f5e19206,0xbfe7ca7c0e888006,1 +np.float64,0xbfefeed08cbfdda1,0xc018aadc483c8724,1 +np.float64,0x7fd0c05c52a180b8,0x40862389daf64aac,1 +np.float64,0xbfd28e3323a51c66,0xbfd5e9ba278fb685,1 +np.float64,0xbef4103b7de82,0xbef4103b7de82,1 +np.float64,0x3fe7661fd12ecc40,0x3fe18ff7dfb696e2,1 +np.float64,0x3fddd5f2f0bbabe4,0x3fd87d8bb6719c3b,1 +np.float64,0x800b3914cfd6722a,0x800b3914cfd6722a,1 +np.float64,0xf3f09a97e7e14,0xf3f09a97e7e14,1 +np.float64,0x7f97092b502e1256,0x40860fe8054cf54e,1 +np.float64,0xbfdbec7917b7d8f2,0xbfe2580b4b792c79,1 +np.float64,0x7fe7ff215aaffe42,0x40862bf5887fa062,1 +np.float64,0x80080186e570030e,0x80080186e570030e,1 +np.float64,0xbfc27f05e624fe0c,0xbfc3fa214be4adc4,1 +np.float64,0x3fe4481be1689038,0x3fdf6b11e9c4ca72,1 +np.float64,0x3fd642cc9cac8598,0x3fd31a857fe70227,1 +np.float64,0xbef8782d7df0f,0xbef8782d7df0f,1 +np.float64,0x8003077dc2e60efc,0x8003077dc2e60efc,1 +np.float64,0x80083eb5a2507d6c,0x80083eb5a2507d6c,1 +np.float64,0x800e8d1eb77d1a3e,0x800e8d1eb77d1a3e,1 +np.float64,0xbfc7737cd22ee6f8,0xbfc9e7716f03f1fc,1 +np.float64,0xbfe9a2b4ddf3456a,0xbff9d71664a8fc78,1 +np.float64,0x7fe67c7d322cf8f9,0x40862b7066465194,1 +np.float64,0x3fec080ce2b8101a,0x3fe421dac225be46,1 +np.float64,0xbfe6d27beb6da4f8,0xbff3fbb1add521f7,1 +np.float64,0x3fdd4f96ceba9f2e,0x3fd821a638986dbe,1 +np.float64,0x3fbd89f1303b13e2,0x3fbbf49223a9d002,1 +np.float64,0xbfe94e2b9d329c57,0xbff907e549c534f5,1 +np.float64,0x3fe2f2cc51e5e599,0x3fddc3d6b4a834a1,1 +np.float64,0xfdcb5b49fb96c,0xfdcb5b49fb96c,1 +np.float64,0xbfea7108fa74e212,0xbffc01b392f4897b,1 +np.float64,0x3fd38baef7a7175c,0x3fd10e7fd3b958dd,1 +np.float64,0x3fa75bf9cc2eb800,0x3fa6d792ecdedb8e,1 +np.float64,0x7fd19fd20aa33fa3,0x408623f1e2cd04c3,1 +np.float64,0x3fd62c708dac58e0,0x3fd309ec7818d16e,1 +np.float64,0x3fdf489047be9120,0x3fd978640617c758,1 +np.float64,0x1,0x1,1 +np.float64,0xbfe21e7c3ea43cf8,0xbfeaba21320697d3,1 +np.float64,0xbfd3649047a6c920,0xbfd71a6f14223744,1 +np.float64,0xbfd68ca68c2d194e,0xbfdbcce6784e5d44,1 +np.float64,0x3fdb26b0ea364d62,0x3fd6a1f86f64ff74,1 +np.float64,0xbfd843821cb08704,0xbfde80e90805ab3f,1 +np.float64,0x3fd508a27aaa1144,0x3fd22fc203a7b9d8,1 +np.float64,0xbfdb951c7eb72a38,0xbfe20aeaec13699b,1 +np.float64,0x3fef556ba57eaad7,0x3fe5d8865cce0a6d,1 +np.float64,0x3fd0d224b3a1a448,0x3fcdde7be5d7e21e,1 +np.float64,0x8007ff272baffe4f,0x8007ff272baffe4f,1 +np.float64,0x3fe1c7bddf638f7c,0x3fdc47cc6cf2f5cd,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x2016d560402f,0x2016d560402f,1 +np.float64,0xbfcca10be9394218,0xbfd033f36b94fc54,1 +np.float64,0xbfdb833628b7066c,0xbfe1fb344b840c70,1 +np.float64,0x3fd8529cb3b0a539,0x3fd49d847fe77218,1 +np.float64,0xbfc0b0ebab2161d8,0xbfc1e260c60ffd1b,1 +np.float64,0xbfea8b9a79f51735,0xbffc4ee6be8a0fa2,1 +np.float64,0x7feca8fab7f951f4,0x40862d613e454646,1 +np.float64,0x7fd8c52d82318a5a,0x408626aaf37423a3,1 +np.float64,0xbfe364ad4526c95a,0xbfedcee39bc93ff5,1 +np.float64,0x800b78161256f02d,0x800b78161256f02d,1 +np.float64,0xbfd55f0153aabe02,0xbfda01a78f72d494,1 +np.float64,0x800315a5f0662b4d,0x800315a5f0662b4d,1 +np.float64,0x7fe4c0dca02981b8,0x40862acc27e4819f,1 +np.float64,0x8009825c703304b9,0x8009825c703304b9,1 +np.float64,0x3fe6e94e1cadd29c,0x3fe1478ccc634f49,1 +np.float64,0x7fe622d8586c45b0,0x40862b504177827e,1 +np.float64,0x3fe4458600688b0c,0x3fdf67e79a84b953,1 +np.float64,0xbfdd75d8a1baebb2,0xbfe3bc9e6ca1bbb5,1 +np.float64,0x3fde789c6bbcf138,0x3fd8ec1d435531b3,1 +np.float64,0x3fe7052b94ee0a58,0x3fe157c5c4418dc1,1 +np.float64,0x7fef31652abe62c9,0x40862e0eaeabcfc0,1 +np.float64,0x3fe279691ee4f2d2,0x3fdd2aa41eb43cd4,1 +np.float64,0xbfd533fa95aa67f6,0xbfd9c12f516d29d7,1 +np.float64,0x3fe6d057f96da0b0,0x3fe138fd96693a6a,1 +np.float64,0x800bad984f775b31,0x800bad984f775b31,1 +np.float64,0x7fdd6fdba4badfb6,0x4086280c73d8ef97,1 +np.float64,0x7fe9b5c0eef36b81,0x40862c82c6f57a53,1 +np.float64,0x8000bc02ece17807,0x8000bc02ece17807,1 +np.float64,0xbff0000000000000,0xfff0000000000000,1 +np.float64,0xbfed430be3fa8618,0xc003aaf338c75b3c,1 +np.float64,0x3fee17b759fc2f6f,0x3fe53668696bf48b,1 +np.float64,0x3f8d4cf9d03a9a00,0x3f8d17d2f532afdc,1 +np.float64,0x8005d6257b8bac4c,0x8005d6257b8bac4c,1 +np.float64,0xbfd17a6df9a2f4dc,0xbfd469e3848adc6e,1 +np.float64,0xb28a293965145,0xb28a293965145,1 +np.float64,0xbfe7d011e42fa024,0xbff5cf818998c8ec,1 +np.float64,0xbfe74f0f136e9e1e,0xbff4dad6ebb0443c,1 +np.float64,0x800f249fc9be4940,0x800f249fc9be4940,1 +np.float64,0x2542f8fe4a860,0x2542f8fe4a860,1 +np.float64,0xc48d40cd891a8,0xc48d40cd891a8,1 +np.float64,0x3fe4e64bc8e9cc98,0x3fe015c9eb3caa53,1 +np.float64,0x3fd33881eca67104,0x3fd0cea886be2457,1 +np.float64,0xbfd01748fba02e92,0xbfd28875959e6901,1 +np.float64,0x7fb7ab01f22f5603,0x40861b369927bf53,1 +np.float64,0xbfe340274ce6804e,0xbfed72b39f0ebb24,1 +np.float64,0x7fc16c0c3422d817,0x40861e4eaf1a286c,1 +np.float64,0x3fc26944a324d288,0x3fc133a77b356ac4,1 +np.float64,0xa149d7134293b,0xa149d7134293b,1 +np.float64,0x800837382d106e71,0x800837382d106e71,1 +np.float64,0x797d1740f2fa4,0x797d1740f2fa4,1 +np.float64,0xc3f15b7787e2c,0xc3f15b7787e2c,1 +np.float64,0x80cad1b90195a,0x80cad1b90195a,1 +np.float64,0x3fdd8f1142bb1e23,0x3fd84d21490d1ce6,1 +np.float64,0xbfbde6c9123bcd90,0xbfbfcc030a86836a,1 +np.float64,0x8007f77e032feefd,0x8007f77e032feefd,1 +np.float64,0x3fe74fed1c6e9fda,0x3fe18322cf19cb61,1 +np.float64,0xbfd8a40bbcb14818,0xbfdf1d23520ba74b,1 +np.float64,0xbfeb7a0e6076f41d,0xbfff4ddfb926efa5,1 +np.float64,0xbfcb8c5f663718c0,0xbfcf0570f702bda9,1 +np.float64,0xf668cd97ecd1a,0xf668cd97ecd1a,1 +np.float64,0xbfe92accf572559a,0xbff8b4393878ffdb,1 +np.float64,0xbfeaa955567552ab,0xbffca70c7d73eee5,1 +np.float64,0xbfe083a14f610742,0xbfe739d84bc35077,1 +np.float64,0x78290568f0521,0x78290568f0521,1 +np.float64,0x3fe94bae2372975c,0x3fe2a3beac5c9858,1 +np.float64,0x3fca4fbab9349f78,0x3fc7edbca2492acb,1 +np.float64,0x8000000000000000,0x8000000000000000,1 +np.float64,0x7fb9eb505433d6a0,0x40861bf0adedb74d,1 +np.float64,0x7fdc66f72a38cded,0x408627c32aeecf0f,1 +np.float64,0x2e8e6f445d1cf,0x2e8e6f445d1cf,1 +np.float64,0xbfec43195af88633,0xc0012d7e3f91b7e8,1 +np.float64,0x7fcdb971e93b72e3,0x40862294c9e3a7bc,1 +np.float64,0x800cabc461195789,0x800cabc461195789,1 +np.float64,0x2c79709c58f2f,0x2c79709c58f2f,1 +np.float64,0x8005d772d3cbaee6,0x8005d772d3cbaee6,1 +np.float64,0x3fe84d8c03709b18,0x3fe21490ce3673dd,1 +np.float64,0x7fe5578adc2aaf15,0x40862b056e8437d4,1 +np.float64,0xbf91298c58225320,0xbf914ec86c32d11f,1 +np.float64,0xc7ed2b6d8fda6,0xc7ed2b6d8fda6,1 +np.float64,0x2761404c4ec29,0x2761404c4ec29,1 +np.float64,0x3fbad3c48835a789,0x3fb9833c02385305,1 +np.float64,0x3fa46fee5428dfe0,0x3fa40a357fb24c23,1 +np.float64,0xbfe3900c6fe72019,0xbfee3dba29dd9d43,1 +np.float64,0x3fe7a9e41a6f53c8,0x3fe1b704dfb9884b,1 +np.float64,0xbfe74a7a1eee94f4,0xbff4d269cacb1f29,1 +np.float64,0xbfee609c72fcc139,0xc007da8499d34123,1 +np.float64,0x3fef2d5fc23e5ac0,0x3fe5c44414e59cb4,1 +np.float64,0xbfd7bdc0402f7b80,0xbfddaae1e7bb78fb,1 +np.float64,0xd71ee01dae3dc,0xd71ee01dae3dc,1 +np.float64,0x3fe98cbcdef3197a,0x3fe2c7ffe33c4541,1 +np.float64,0x8000f8dbb3a1f1b8,0x8000f8dbb3a1f1b8,1 +np.float64,0x3fe3e98ad567d316,0x3fdef6e58058313f,1 +np.float64,0x41ad0bfc835a2,0x41ad0bfc835a2,1 +np.float64,0x7fdcc2dc0d3985b7,0x408627dce39f77af,1 +np.float64,0xbfe47b980de8f730,0xbff059acdccd6e2b,1 +np.float64,0xbfef49b6577e936d,0xc00e714f46b2ccc1,1 +np.float64,0x3fac31816c386300,0x3fab71cb92b0db8f,1 +np.float64,0x3fe59097e76b2130,0x3fe07c299fd1127c,1 +np.float64,0xbfecf0df5cf9e1bf,0xc002c7ebdd65039c,1 +np.float64,0x3fd2b7d0b6a56fa1,0x3fd06b638990ae02,1 +np.float64,0xbfeb68deecf6d1be,0xbfff1187e042d3e4,1 +np.float64,0x3fd44a9771a8952f,0x3fd1a01867c5e302,1 +np.float64,0xf79a9dedef354,0xf79a9dedef354,1 +np.float64,0x800c25a170d84b43,0x800c25a170d84b43,1 +np.float64,0x3ff0000000000000,0x3fe62e42fefa39ef,1 +np.float64,0x3fbff4f7623fe9f0,0x3fbe1d3878f4c417,1 +np.float64,0xd284c845a5099,0xd284c845a5099,1 +np.float64,0xbfe3c7815f678f02,0xbfeecdab5ca2e651,1 +np.float64,0x3fc19c934e233927,0x3fc08036104b1f23,1 +np.float64,0x800b6096de16c12e,0x800b6096de16c12e,1 +np.float64,0xbfe962a67e32c54d,0xbff9392313a112a1,1 +np.float64,0x2b9d0116573a1,0x2b9d0116573a1,1 +np.float64,0x3fcab269ed3564d4,0x3fc83f7e1c3095b7,1 +np.float64,0x3fc8c78d86318f1b,0x3fc6a6cde5696f99,1 +np.float64,0xd5b1e9b5ab63d,0xd5b1e9b5ab63d,1 +np.float64,0xbfed802a47fb0054,0xc00465cad3b5b0ef,1 +np.float64,0xbfd73aaf08ae755e,0xbfdcdbd62b8af271,1 +np.float64,0xbfd4f13c0229e278,0xbfd95dacff79e570,1 +np.float64,0xbfe9622808f2c450,0xbff937f13c397e8d,1 +np.float64,0xbfeddfa62efbbf4c,0xc005b0c835eed829,1 +np.float64,0x3fd65663d4acacc8,0x3fd3290cd0e675dc,1 +np.float64,0x8005e890f1abd123,0x8005e890f1abd123,1 +np.float64,0xbfe924919fb24923,0xbff8a5a827a28756,1 +np.float64,0x3fe8cdf490719be9,0x3fe25d39535e8366,1 +np.float64,0x7fc229e6ff2453cd,0x40861ea40ef87a5a,1 +np.float64,0x3fe5cf53ceeb9ea8,0x3fe0a18e0b65f27e,1 +np.float64,0xa79cf6fb4f39f,0xa79cf6fb4f39f,1 +np.float64,0x7fddbb3c0f3b7677,0x40862820d5edf310,1 +np.float64,0x3e1011de7c203,0x3e1011de7c203,1 +np.float64,0x3fc0b59a83216b38,0x3fbf6916510ff411,1 +np.float64,0x8647f98d0c8ff,0x8647f98d0c8ff,1 +np.float64,0x8005dad33ecbb5a7,0x8005dad33ecbb5a7,1 +np.float64,0x8a80d0631501a,0x8a80d0631501a,1 +np.float64,0xbfe18f7d6ee31efb,0xbfe976f06713afc1,1 +np.float64,0xbfe06eaed560dd5e,0xbfe70eac696933e6,1 +np.float64,0xbfed8ef93c7b1df2,0xc00495bfa3195b53,1 +np.float64,0x3febe9c24677d385,0x3fe411b10db16c42,1 +np.float64,0x7fd5d80c1fabb017,0x408625a97a7787ba,1 +np.float64,0x3fca79b59334f368,0x3fc8108a521341dc,1 +np.float64,0xbfccf8db4339f1b8,0xbfd06c9a5424aadb,1 +np.float64,0xbfea5ac5a574b58b,0xbffbc21d1405d840,1 +np.float64,0x800ce2bf4b19c57f,0x800ce2bf4b19c57f,1 +np.float64,0xbfe8df896d31bf13,0xbff807ab38ac41ab,1 +np.float64,0x3feab83da9f5707c,0x3fe36cdd827c0eff,1 +np.float64,0x3fee717683bce2ed,0x3fe564879171719b,1 +np.float64,0x80025e5577c4bcac,0x80025e5577c4bcac,1 +np.float64,0x3fe3e5378e67ca70,0x3fdef1902c5d1efd,1 +np.float64,0x3fa014bb7c202980,0x3f9faacf9238d499,1 +np.float64,0x3fddbf5e16bb7ebc,0x3fd86e2311cb0f6d,1 +np.float64,0x3fd24e50e6a49ca0,0x3fd0198f04f82186,1 +np.float64,0x656b5214cad6b,0x656b5214cad6b,1 +np.float64,0x8b0a4bfd1614a,0x8b0a4bfd1614a,1 +np.float64,0xbfeeb6bd9e7d6d7b,0xc009b669285e319e,1 +np.float64,0x8000000000000001,0x8000000000000001,1 +np.float64,0xbfe719feceee33fe,0xbff47a4c8cbf0cca,1 +np.float64,0xbfd14fa8c8a29f52,0xbfd42f27b1aced39,1 +np.float64,0x7fec9dcb80f93b96,0x40862d5e1e70bbb9,1 +np.float64,0x7fecacb826f9596f,0x40862d6249746915,1 +np.float64,0x973459f52e68b,0x973459f52e68b,1 +np.float64,0x7f40a59e00214b3b,0x4085f194f45f82b1,1 +np.float64,0x7fc5dbaec32bb75d,0x4086201f3e7065d9,1 +np.float64,0x82d0801305a10,0x82d0801305a10,1 +np.float64,0x7fec81c0f4790381,0x40862d5643c0fc85,1 +np.float64,0xbfe2d81e9ee5b03d,0xbfec71a8e864ea40,1 +np.float64,0x6c545c9ad8a8c,0x6c545c9ad8a8c,1 +np.float64,0x3f9be95a5037d2b5,0x3f9b89b48ac8f5d8,1 +np.float64,0x8000cae9702195d4,0x8000cae9702195d4,1 +np.float64,0xbfd375f45126ebe8,0xbfd733677e54a80d,1 +np.float64,0x3fd29a5b81a534b7,0x3fd05494bf200278,1 +np.float64,0xfff0000000000000,0xfff8000000000000,1 +np.float64,0x7fca8fc195351f82,0x408621ae61aa6c13,1 +np.float64,0x1b28e2ae3651d,0x1b28e2ae3651d,1 +np.float64,0x3fe7fdbd14effb7a,0x3fe1e714884b46a8,1 +np.float64,0x3fdf1ce068be39c0,0x3fd95b054e0fad3d,1 +np.float64,0x3fe79f9a636f3f34,0x3fe1b11a40c00b3e,1 +np.float64,0x3fe60eb7036c1d6e,0x3fe0c72a02176874,1 +np.float64,0x229da17e453b5,0x229da17e453b5,1 +np.float64,0x3fc1a921b5235240,0x3fc08b3f35e47fb1,1 +np.float64,0xbb92d2af7725b,0xbb92d2af7725b,1 +np.float64,0x3fe4110cb1e8221a,0x3fdf2787de6c73f7,1 +np.float64,0xbfbc87771a390ef0,0xbfbe3f6e95622363,1 +np.float64,0xbfe74025dfee804c,0xbff4bf7b1895e697,1 +np.float64,0x964eb6592c9d7,0x964eb6592c9d7,1 +np.float64,0x3f951689b82a2d00,0x3f94dfb38d746fdf,1 +np.float64,0x800356271be6ac4f,0x800356271be6ac4f,1 +np.float64,0x7fefffffffffffff,0x40862e42fefa39ef,1 +np.float64,0xbfed5ce250fab9c5,0xc003f7ddfeb94345,1 +np.float64,0x3fec3d5dc1387abc,0x3fe43e39c02d86f4,1 +np.float64,0x3999897e73332,0x3999897e73332,1 +np.float64,0xbfdcb57744b96aee,0xbfe30c4b98f3d088,1 +np.float64,0x7f961fb0b82c3f60,0x40860f9549c3a380,1 +np.float64,0x67d6efcacfadf,0x67d6efcacfadf,1 +np.float64,0x8002c9498f859294,0x8002c9498f859294,1 +np.float64,0xbfa3033800260670,0xbfa35fe3bf43e188,1 +np.float64,0xbfeab2fc157565f8,0xbffcc413c486b4eb,1 +np.float64,0x3fe25e62f364bcc6,0x3fdd0856e19e3430,1 +np.float64,0x7fb2f42dda25e85b,0x4086196fb34a65fd,1 +np.float64,0x3fe0f1a5af61e34c,0x3fdb3235a1786efb,1 +np.float64,0x800a340ca1f4681a,0x800a340ca1f4681a,1 +np.float64,0x7c20b9def8418,0x7c20b9def8418,1 +np.float64,0xdf0842a1be109,0xdf0842a1be109,1 +np.float64,0x3fe9f22cc2f3e45a,0x3fe300359b842bf0,1 +np.float64,0x3fe389ed73e713da,0x3fde809780fe4432,1 +np.float64,0x9500fb932a020,0x9500fb932a020,1 +np.float64,0x3fd8a21ffdb14440,0x3fd4d70862345d86,1 +np.float64,0x800d99c15cbb3383,0x800d99c15cbb3383,1 +np.float64,0x3fd96c98c932d932,0x3fd568959c9b028f,1 +np.float64,0x7fc228483a24508f,0x40861ea358420976,1 +np.float64,0x7fc6737bef2ce6f7,0x408620560ffc6a98,1 +np.float64,0xbfb2c27cee2584f8,0xbfb37b8cc7774b5f,1 +np.float64,0xbfd18409f9230814,0xbfd4771d1a9a24fb,1 +np.float64,0x3fb53cb3f42a7968,0x3fb466f06f88044b,1 +np.float64,0x3fef61d0187ec3a0,0x3fe5dec8a9d13dd9,1 +np.float64,0x3fe59a6ffd2b34e0,0x3fe0820a99c6143d,1 +np.float64,0x3fce18aff43c3160,0x3fcb07c7b523f0d1,1 +np.float64,0xbfb1319a62226338,0xbfb1cc62f31b2b40,1 +np.float64,0xa00cce6d4019a,0xa00cce6d4019a,1 +np.float64,0x80068ae8e0ed15d3,0x80068ae8e0ed15d3,1 +np.float64,0x3fecef353239de6a,0x3fe49c280adc607b,1 +np.float64,0x3fdf1a7fb0be34ff,0x3fd9596bafe2d766,1 +np.float64,0x3feb5e12eeb6bc26,0x3fe3c6be3ede8d07,1 +np.float64,0x3fdeff5cd43dfeba,0x3fd947262ec96b05,1 +np.float64,0x3f995e75e832bd00,0x3f990f511f4c7f1c,1 +np.float64,0xbfeb5b3ed0b6b67e,0xbffee24fc0fc2881,1 +np.float64,0x7fb82aad0a305559,0x40861b614d901182,1 +np.float64,0xbfe5c3a4926b8749,0xbff23cd0ad144fe6,1 +np.float64,0x3fef47da373e8fb4,0x3fe5d1aaa4031993,1 +np.float64,0x7fc6a8c3872d5186,0x40862068f5ca84be,1 +np.float64,0x7fc0c2276221844e,0x40861dff2566d001,1 +np.float64,0x7fc9ce7d28339cf9,0x40862173541f84d1,1 +np.float64,0x3fce2c34933c5869,0x3fcb179428ad241d,1 +np.float64,0xbfcf864c293f0c98,0xbfd21872c4821cfc,1 +np.float64,0x3fc51fd1f82a3fa4,0x3fc38d4f1685c166,1 +np.float64,0xbfe2707b70a4e0f7,0xbfeb795fbd5bb444,1 +np.float64,0x46629b568cc54,0x46629b568cc54,1 +np.float64,0x7fe5f821f32bf043,0x40862b40c2cdea3f,1 +np.float64,0x3fedd2c9457ba592,0x3fe512ce92394526,1 +np.float64,0x7fe6dcb8ceadb971,0x40862b925a7dc05d,1 +np.float64,0x3fd1b983b4a37307,0x3fcf4ae2545cf64e,1 +np.float64,0xbfe1c93104639262,0xbfe9f7d28e4c0c82,1 +np.float64,0x995ebc2932bd8,0x995ebc2932bd8,1 +np.float64,0x800a4c3ee614987e,0x800a4c3ee614987e,1 +np.float64,0x3fbb58766e36b0f0,0x3fb9fb3b9810ec16,1 +np.float64,0xbfe36d636666dac7,0xbfede5080f69053c,1 +np.float64,0x3f4feee1003fddc2,0x3f4feae5f05443d1,1 +np.float64,0x3fed0b772ffa16ee,0x3fe4aafb924903c6,1 +np.float64,0x800bb3faef3767f6,0x800bb3faef3767f6,1 +np.float64,0x3fe285cda5e50b9c,0x3fdd3a58df06c427,1 +np.float64,0x7feb9d560bb73aab,0x40862d152362bb94,1 +np.float64,0x3fecd1f447f9a3e9,0x3fe48cc78288cb3f,1 +np.float64,0x3fca927b0c3524f6,0x3fc8250f49ba28df,1 +np.float64,0x7fcc19944e383328,0x40862221b02fcf43,1 +np.float64,0xbfd8ddf41db1bbe8,0xbfdf7b92073ff2fd,1 +np.float64,0x80006fe736e0dfcf,0x80006fe736e0dfcf,1 +np.float64,0x800bbeb66d577d6d,0x800bbeb66d577d6d,1 +np.float64,0xbfe4329353e86526,0xbfefeaf19ab92b42,1 +np.float64,0x2fad72805f5af,0x2fad72805f5af,1 +np.float64,0x3fe1b827aa637050,0x3fdc33bf46012c0d,1 +np.float64,0x3fc3f3f8e227e7f2,0x3fc28aeb86d65278,1 +np.float64,0x3fec018933780312,0x3fe41e619aa4285c,1 +np.float64,0xbfd92428e0b24852,0xbfdfeecb08d154df,1 +np.float64,0x2d7046845ae0a,0x2d7046845ae0a,1 +np.float64,0x7fde7fd2233cffa3,0x408628550f8a948f,1 +np.float64,0x8000a32cd241465a,0x8000a32cd241465a,1 +np.float64,0x8004267a45084cf5,0x8004267a45084cf5,1 +np.float64,0xbfe6b422556d6844,0xbff3c71f67661e6e,1 +np.float64,0x3fe3a37d922746fb,0x3fdea04e04d6195c,1 +np.float64,0xbfddcc54b53b98aa,0xbfe40d2389cdb848,1 +np.float64,0x3fe18b4b92a31697,0x3fdbf9e68cbf5794,1 +np.float64,0x7fc9c5b2ee338b65,0x408621709a17a47a,1 +np.float64,0x1ebd1ce03d7b,0x1ebd1ce03d7b,1 +np.float64,0x8008a6fc39d14df9,0x8008a6fc39d14df9,1 +np.float64,0x3fec11384c782270,0x3fe426bdaedd2965,1 +np.float64,0x3fefc28344ff8507,0x3fe60f75d34fc3d2,1 +np.float64,0xc35f379786be7,0xc35f379786be7,1 +np.float64,0x3feef51f4a7dea3e,0x3fe5a7b95d7786b5,1 +np.float64,0x3fec9b9f0379373e,0x3fe4702477abbb63,1 +np.float64,0x3fde94f8cdbd29f0,0x3fd8ff50f7df0a6f,1 +np.float64,0xbfed32d1cdfa65a4,0xc0037c1470f6f979,1 +np.float64,0x800d3ba44f5a7749,0x800d3ba44f5a7749,1 +np.float64,0x3fe3c56c8fe78ad9,0x3fdeca4eb9bb8918,1 +np.float64,0xbfe7c97242ef92e4,0xbff5c2950dfd6f69,1 +np.float64,0xbd9440057b288,0xbd9440057b288,1 +np.float64,0x7feb2fc111f65f81,0x40862cf524bd2001,1 +np.float64,0x800a431e2df4863d,0x800a431e2df4863d,1 +np.float64,0x80038a3b79e71478,0x80038a3b79e71478,1 +np.float64,0x80000c93d4601928,0x80000c93d4601928,1 +np.float64,0x7fe9fec022f3fd7f,0x40862c995db8ada0,1 +np.float64,0x3fead0129c35a025,0x3fe379d7a92c8f79,1 +np.float64,0x3fdd8cbaf7bb1974,0x3fd84b87ff0c26c7,1 +np.float64,0x3fe8fb7c60b1f6f9,0x3fe276d5339e7135,1 +np.float64,0x85a255e10b44b,0x85a255e10b44b,1 +np.float64,0xbfe507c23fea0f84,0xbff1212d2260022a,1 +np.float64,0x3fc5487c7b2a90f9,0x3fc3b03222d3d148,1 +np.float64,0x7fec0bdcb8f817b8,0x40862d34e8fd11e7,1 +np.float64,0xbfc5f34b4f2be698,0xbfc8146a899c7a0c,1 +np.float64,0xbfa2a49c14254940,0xbfa2fdab2eae3826,1 +np.float64,0x800ec52f15dd8a5e,0x800ec52f15dd8a5e,1 +np.float64,0xbfe3ba4b12a77496,0xbfeeab256b3e9422,1 +np.float64,0x80034d6c7ba69ada,0x80034d6c7ba69ada,1 +np.float64,0x7fd394d4202729a7,0x408624c98a216742,1 +np.float64,0xbfd4493a38289274,0xbfd865d67af2de91,1 +np.float64,0xe47d6203c8fad,0xe47d6203c8fad,1 +np.float64,0x98eb4e4b31d6a,0x98eb4e4b31d6a,1 +np.float64,0x4507fb128a100,0x4507fb128a100,1 +np.float64,0xbfc77032e42ee064,0xbfc9e36ab747a14d,1 +np.float64,0xa1f8a03b43f14,0xa1f8a03b43f14,1 +np.float64,0xbfc3d4da8527a9b4,0xbfc58c27af2476b0,1 +np.float64,0x3fc0eb7d6921d6fb,0x3fbfc858a077ed61,1 +np.float64,0x7fddb2e9403b65d2,0x4086281e98443709,1 +np.float64,0xbfa7ea62942fd4c0,0xbfa87dfd06b05d2a,1 +np.float64,0xbfe7d5c5426fab8a,0xbff5daa969c6d9e5,1 +np.float64,0x3fbf7cba0c3ef974,0x3fbdb23cd8fe875b,1 +np.float64,0x7fe92021eb324043,0x40862c53aee8b154,1 +np.float64,0x7fefbaa1827f7542,0x40862e3194737072,1 +np.float64,0x3fc6f82c402df059,0x3fc520432cbc533f,1 +np.float64,0x7fb37679a826ecf2,0x408619a5f857e27f,1 +np.float64,0x79ec1528f3d83,0x79ec1528f3d83,1 +np.float64,0x3fbefe1d0c3dfc3a,0x3fbd41650ba2c893,1 +np.float64,0x3fc3e5e11827cbc2,0x3fc27eb9b47c9c42,1 +np.float64,0x16aed1922d5db,0x16aed1922d5db,1 +np.float64,0x800124f7e58249f1,0x800124f7e58249f1,1 +np.float64,0x8004f7d12489efa3,0x8004f7d12489efa3,1 +np.float64,0x3fef80b8e27f0172,0x3fe5ee5fd43322c6,1 +np.float64,0xbfe7740c88eee819,0xbff51f823c8da14d,1 +np.float64,0xbfe6e1f1f6edc3e4,0xbff416bcb1302e7c,1 +np.float64,0x8001a2c4a7e3458a,0x8001a2c4a7e3458a,1 +np.float64,0x3fe861e155f0c3c2,0x3fe2201d3000c329,1 +np.float64,0x3fd00a101a201420,0x3fcca01087dbd728,1 +np.float64,0x7fdf0eb1133e1d61,0x4086287a327839b8,1 +np.float64,0x95e3ffdb2bc80,0x95e3ffdb2bc80,1 +np.float64,0x3fd87a1e8230f43d,0x3fd4ba1eb9be1270,1 +np.float64,0x3fedc4792afb88f2,0x3fe50b6529080f73,1 +np.float64,0x7fc9e81fa833d03e,0x4086217b428cc6ff,1 +np.float64,0xbfd21f1ba5a43e38,0xbfd54e048b988e09,1 +np.float64,0xbfbf52af5a3ea560,0xbfc0b4ab3b81fafc,1 +np.float64,0x7fe475f8e268ebf1,0x40862aaf14fee029,1 +np.float64,0x3fcf56899f3ead10,0x3fcc081de28ae9cf,1 +np.float64,0x917d407122fa8,0x917d407122fa8,1 +np.float64,0x22e23e3245c49,0x22e23e3245c49,1 +np.float64,0xbfeec2814f3d8503,0xc00a00ecca27b426,1 +np.float64,0xbfd97fee1c32ffdc,0xbfe04351dfe306ec,1 diff --git a/python/numpy/_core/tests/data/umath-validation-set-log2.csv b/python/numpy/_core/tests/data/umath-validation-set-log2.csv new file mode 100644 index 000000000..26921ef1d --- /dev/null +++ b/python/numpy/_core/tests/data/umath-validation-set-log2.csv @@ -0,0 +1,1629 @@ +dtype,input,output,ulperrortol +np.float32,0x80000000,0xff800000,3 +np.float32,0x7f12870a,0x42fe63db,3 +np.float32,0x3ef29cf5,0xbf89eb12,3 +np.float32,0x3d6ba8fb,0xc083d26c,3 +np.float32,0x3d9907e8,0xc06f8230,3 +np.float32,0x4ee592,0xc2fd656e,3 +np.float32,0x58d8b1,0xc2fd0db3,3 +np.float32,0x7ba103,0xc2fc19aa,3 +np.float32,0x7f52e90e,0x42ff70e4,3 +np.float32,0x7fcb15,0xc2fc0132,3 +np.float32,0x7cb7129f,0x42f50855,3 +np.float32,0x9faba,0xc301ae59,3 +np.float32,0x7f300a,0xc2fc04b4,3 +np.float32,0x3f0bf047,0xbf5f10cb,3 +np.float32,0x2fb1fb,0xc2fed934,3 +np.float32,0x3eedb0d1,0xbf8db417,3 +np.float32,0x3d7a0b40,0xc0811638,3 +np.float32,0x2e0bac,0xc2fef334,3 +np.float32,0x6278c1,0xc2fcc1b9,3 +np.float32,0x7f61ab2e,0x42ffa2d9,3 +np.float32,0x8fe7c,0xc301d4be,3 +np.float32,0x3f25e6ee,0xbf203536,3 +np.float32,0x7efc78f0,0x42fdf5c0,3 +np.float32,0x6d7304,0xc2fc73a7,3 +np.float32,0x7f1a472a,0x42fe89ed,3 +np.float32,0x7dd029a6,0x42f96734,3 +np.float32,0x3e9b9327,0xbfdbf8f7,3 +np.float32,0x3f4eefc1,0xbe9d2942,3 +np.float32,0x7f5b9b64,0x42ff8ebc,3 +np.float32,0x3e458ee1,0xc017ed6e,3 +np.float32,0x3f7b766b,0xbcd35acf,3 +np.float32,0x3e616070,0xc00bc378,3 +np.float32,0x7f20e633,0x42fea8f8,3 +np.float32,0x3ee3b461,0xbf95a126,3 +np.float32,0x7e7722ba,0x42fbe5f8,3 +np.float32,0x3f0873d7,0xbf6861fa,3 +np.float32,0x7b4cb2,0xc2fc1ba3,3 +np.float32,0x3f0b6b02,0xbf60712e,3 +np.float32,0x9bff4,0xc301b6f2,3 +np.float32,0x3f07be25,0xbf6a4f0c,3 +np.float32,0x3ef10e57,0xbf8b1b75,3 +np.float32,0x46ad75,0xc2fdb6b1,3 +np.float32,0x3f7bc542,0xbcc4e3a9,3 +np.float32,0x3f6673d4,0xbe1b509c,3 +np.float32,0x7f19fe59,0x42fe8890,3 +np.float32,0x7f800000,0x7f800000,3 +np.float32,0x7f2fe696,0x42feead0,3 +np.float32,0x3dc9432d,0xc0563655,3 +np.float32,0x3ee47623,0xbf950446,3 +np.float32,0x3f1f8817,0xbf2eab51,3 +np.float32,0x7f220ec5,0x42feae44,3 +np.float32,0x2325e3,0xc2ffbab1,3 +np.float32,0x29dfc8,0xc2ff395a,3 +np.float32,0x7f524950,0x42ff6eb3,3 +np.float32,0x3e2234e0,0xc02a21c8,3 +np.float32,0x7f1c6f5a,0x42fe942f,3 +np.float32,0x3b6a61,0xc2fe36e7,3 +np.float32,0x3f1df90e,0xbf324ba9,3 +np.float32,0xb57f0,0xc3017f07,3 +np.float32,0x7d0eba,0xc2fc112e,3 +np.float32,0x403aa9,0xc2fdfd5c,3 +np.float32,0x3e74ecc7,0xc004155f,3 +np.float32,0x17509c,0xc30074f2,3 +np.float32,0x7f62196b,0x42ffa442,3 +np.float32,0x3ecef9a9,0xbfa7417a,3 +np.float32,0x7f14b158,0x42fe6eb1,3 +np.float32,0x3ede12be,0xbf9a40fe,3 +np.float32,0x42cfaa,0xc2fde03f,3 +np.float32,0x3f407b0f,0xbed2a6f5,3 +np.float32,0x7f7fffff,0x43000000,3 +np.float32,0x5467c6,0xc2fd3394,3 +np.float32,0x7ea6b80f,0x42fcc336,3 +np.float32,0x3f21e7b2,0xbf293704,3 +np.float32,0x3dc7e9eb,0xc056d542,3 +np.float32,0x7f3e6e67,0x42ff2571,3 +np.float32,0x3e3e809d,0xc01b4911,3 +np.float32,0x3f800000,0x0,3 +np.float32,0x3d8fd238,0xc0753d52,3 +np.float32,0x3f74aa65,0xbd85cd0e,3 +np.float32,0x7ec30305,0x42fd36ff,3 +np.float32,0x3e97bb93,0xbfe0971d,3 +np.float32,0x3e109d9c,0xc034bb1b,3 +np.float32,0x3f4a0b67,0xbeaed537,3 +np.float32,0x3f25a7aa,0xbf20c228,3 +np.float32,0x3ebc05eb,0xbfb8fd6b,3 +np.float32,0x3eebe749,0xbf8f18e5,3 +np.float32,0x3e9dc479,0xbfd96356,3 +np.float32,0x7f245200,0x42feb882,3 +np.float32,0x1573a8,0xc30093b5,3 +np.float32,0x3e66c4b9,0xc00994a6,3 +np.float32,0x3e73bffc,0xc0048709,3 +np.float32,0x3dfef8e5,0xc0405f16,3 +np.float32,0x403750,0xc2fdfd83,3 +np.float32,0x3ebedf17,0xbfb636a4,3 +np.float32,0x15cae6,0xc3008de2,3 +np.float32,0x3edf4d4e,0xbf993c24,3 +np.float32,0x3f7cc41e,0xbc963fb3,3 +np.float32,0x3e9e12a4,0xbfd907ee,3 +np.float32,0x7ded7b59,0x42f9c889,3 +np.float32,0x7f034878,0x42fe12b5,3 +np.float32,0x7ddce43f,0x42f9930b,3 +np.float32,0x3d82b257,0xc07e1333,3 +np.float32,0x3dae89c1,0xc0635dd4,3 +np.float32,0x6b1d00,0xc2fc8396,3 +np.float32,0x449a5a,0xc2fdccb3,3 +np.float32,0x4e89d2,0xc2fd68cb,3 +np.float32,0x7e1ae83f,0x42fa8cef,3 +np.float32,0x7e4bb22c,0x42fb572e,3 +np.float32,0x3de308ea,0xc04b1634,3 +np.float32,0x7f238c7a,0x42feb508,3 +np.float32,0x3f6c62a3,0xbdeb86f3,3 +np.float32,0x3e58cba6,0xc00f5908,3 +np.float32,0x7f7dd91f,0x42fff9c4,3 +np.float32,0x3d989376,0xc06fc88d,3 +np.float32,0x3dd013c5,0xc0532339,3 +np.float32,0x4b17e6,0xc2fd89ed,3 +np.float32,0x7f67f287,0x42ffb71e,3 +np.float32,0x3f69365e,0xbe09ba3c,3 +np.float32,0x3e4b8b21,0xc0152bf1,3 +np.float32,0x3a75b,0xc3032171,3 +np.float32,0x7f303676,0x42feec1f,3 +np.float32,0x7f6570e5,0x42ffaf18,3 +np.float32,0x3f5ed61e,0xbe4cf676,3 +np.float32,0x3e9b22f9,0xbfdc7e4f,3 +np.float32,0x2c095e,0xc2ff1428,3 +np.float32,0x3f1b17c1,0xbf391754,3 +np.float32,0x422dc6,0xc2fde746,3 +np.float32,0x3f677c8d,0xbe14b365,3 +np.float32,0x3ef85d0c,0xbf8597a9,3 +np.float32,0x3ecaaa6b,0xbfab2430,3 +np.float32,0x3f0607d1,0xbf6eff3d,3 +np.float32,0x3f011fdb,0xbf7cc50d,3 +np.float32,0x6ed7c1,0xc2fc6a4e,3 +np.float32,0x7ec2d1a2,0x42fd3644,3 +np.float32,0x3f75b7fe,0xbd7238a2,3 +np.float32,0x3ef2d146,0xbf89c344,3 +np.float32,0x7ec2cd27,0x42fd3633,3 +np.float32,0x7ee1e55a,0x42fda397,3 +np.float32,0x7f464d6a,0x42ff435c,3 +np.float32,0x7f469a93,0x42ff447b,3 +np.float32,0x7ece752f,0x42fd6121,3 +np.float32,0x2ed878,0xc2fee67b,3 +np.float32,0x75b23,0xc3021eff,3 +np.float32,0x3e0f4be4,0xc03593b8,3 +np.float32,0x2778e1,0xc2ff64fc,3 +np.float32,0x5fe2b7,0xc2fcd561,3 +np.float32,0x19b8a9,0xc30050ab,3 +np.float32,0x7df303e5,0x42f9d98d,3 +np.float32,0x608b8d,0xc2fcd051,3 +np.float32,0x588f46,0xc2fd1017,3 +np.float32,0x3eec6a11,0xbf8eb2a1,3 +np.float32,0x3f714121,0xbdaf4906,3 +np.float32,0x7f4f7b9e,0x42ff64c9,3 +np.float32,0x3c271606,0xc0d3b29c,3 +np.float32,0x3f002fe0,0xbf7f75f6,3 +np.float32,0x7efa4798,0x42fdef4f,3 +np.float32,0x3f61a865,0xbe3a601a,3 +np.float32,0x7e8087aa,0x42fc030d,3 +np.float32,0x3f70f0c7,0xbdb321ba,3 +np.float32,0x5db898,0xc2fce63f,3 +np.float32,0x7a965f,0xc2fc1fea,3 +np.float32,0x7f68b112,0x42ffb97c,3 +np.float32,0x7ef0ed3d,0x42fdd32d,3 +np.float32,0x7f3156a1,0x42fef0d3,3 +np.float32,0x3f1d405f,0xbf33fc6e,3 +np.float32,0x3e3494cf,0xc0203945,3 +np.float32,0x6018de,0xc2fcd3c1,3 +np.float32,0x623e49,0xc2fcc370,3 +np.float32,0x3ea29f0f,0xbfd3cad4,3 +np.float32,0xa514,0xc305a20c,3 +np.float32,0x3e1b2ab1,0xc02e3a8f,3 +np.float32,0x3f450b6f,0xbec1578f,3 +np.float32,0x7eb12908,0x42fcf015,3 +np.float32,0x3f10b720,0xbf52ab48,3 +np.float32,0x3e0a93,0xc2fe16f6,3 +np.float32,0x93845,0xc301cb96,3 +np.float32,0x7f4e9ce3,0x42ff61af,3 +np.float32,0x3f6d4296,0xbde09ceb,3 +np.float32,0x6ddede,0xc2fc70d0,3 +np.float32,0x3f4fb6fd,0xbe9a636d,3 +np.float32,0x3f6d08de,0xbde36c0b,3 +np.float32,0x3f56f057,0xbe8122ad,3 +np.float32,0x334e95,0xc2fea349,3 +np.float32,0x7efadbcd,0x42fdf104,3 +np.float32,0x3db02e88,0xc0628046,3 +np.float32,0x3f3309d1,0xbf041066,3 +np.float32,0x2d8722,0xc2fefb8f,3 +np.float32,0x7e926cac,0x42fc6356,3 +np.float32,0x3e3674ab,0xc01f452e,3 +np.float32,0x1b46ce,0xc3003afc,3 +np.float32,0x3f06a338,0xbf6d53fc,3 +np.float32,0x1b1ba7,0xc3003d46,3 +np.float32,0x319dfb,0xc2febc06,3 +np.float32,0x3e2f126a,0xc02315a5,3 +np.float32,0x3f40fe65,0xbed0af9e,3 +np.float32,0x3f1d842f,0xbf335d4b,3 +np.float32,0x3d044e4f,0xc09e78f8,3 +np.float32,0x7f272674,0x42fec51f,3 +np.float32,0x3cda6d8f,0xc0a753db,3 +np.float32,0x3eb92f12,0xbfbbccbb,3 +np.float32,0x7e4318f4,0x42fb3752,3 +np.float32,0x3c5890,0xc2fe2b6d,3 +np.float32,0x3d1993c9,0xc09796f8,3 +np.float32,0x7f18ef24,0x42fe8377,3 +np.float32,0x3e30c3a0,0xc0223244,3 +np.float32,0x3f27cd27,0xbf1c00ef,3 +np.float32,0x3f150957,0xbf47cd6c,3 +np.float32,0x7e7178a3,0x42fbd4d8,3 +np.float32,0x3f298db8,0xbf182ac3,3 +np.float32,0x7cb3be,0xc2fc1348,3 +np.float32,0x3ef64266,0xbf8729de,3 +np.float32,0x3eeb06ce,0xbf8fc8f2,3 +np.float32,0x3f406e36,0xbed2d845,3 +np.float32,0x7f1e1bd3,0x42fe9c0b,3 +np.float32,0x478dcc,0xc2fdad97,3 +np.float32,0x7f7937b5,0x42ffec2b,3 +np.float32,0x3f20f350,0xbf2b6624,3 +np.float32,0x7f13661a,0x42fe683c,3 +np.float32,0x208177,0xc2fff46b,3 +np.float32,0x263cfb,0xc2ff7c72,3 +np.float32,0x7f0bd28c,0x42fe4141,3 +np.float32,0x7230d8,0xc2fc5453,3 +np.float32,0x3f261bbf,0xbf1fbfb4,3 +np.float32,0x737b56,0xc2fc4c05,3 +np.float32,0x3ef88f33,0xbf857263,3 +np.float32,0x7e036464,0x42fa1352,3 +np.float32,0x4b5c4f,0xc2fd874d,3 +np.float32,0x3f77984d,0xbd454596,3 +np.float32,0x3f674202,0xbe162932,3 +np.float32,0x3e7157d9,0xc0057197,3 +np.float32,0x3f3f21da,0xbed7d861,3 +np.float32,0x7f1fb40f,0x42fea375,3 +np.float32,0x7ef0157f,0x42fdd096,3 +np.float32,0x3f71e88d,0xbda74962,3 +np.float32,0x3f174855,0xbf424728,3 +np.float32,0x3f3fdd2c,0xbed505d5,3 +np.float32,0x7b95d1,0xc2fc19ed,3 +np.float32,0x7f23f4e5,0x42feb6df,3 +np.float32,0x7d741925,0x42f7dcd6,3 +np.float32,0x60f81d,0xc2fccd14,3 +np.float32,0x3f17d267,0xbf40f6ae,3 +np.float32,0x3f036fc8,0xbf7636f8,3 +np.float32,0x167653,0xc30082b5,3 +np.float32,0x256d05,0xc2ff8c4f,3 +np.float32,0x3eccc63d,0xbfa93adb,3 +np.float32,0x7f6c91ea,0x42ffc5b2,3 +np.float32,0x2ee52a,0xc2fee5b3,3 +np.float32,0x3dc3579e,0xc058f80d,3 +np.float32,0x4c7170,0xc2fd7cc4,3 +np.float32,0x7f737f20,0x42ffdb03,3 +np.float32,0x3f2f9dbf,0xbf0b3119,3 +np.float32,0x3f4d0c54,0xbea3eec5,3 +np.float32,0x7e380862,0x42fb0c32,3 +np.float32,0x5d637f,0xc2fce8df,3 +np.float32,0x3f0aa623,0xbf627c27,3 +np.float32,0x3e4d5896,0xc0145b88,3 +np.float32,0x3f6cacdc,0xbde7e7ca,3 +np.float32,0x63a2c3,0xc2fcb90a,3 +np.float32,0x6c138c,0xc2fc7cfa,3 +np.float32,0x2063c,0xc303fb88,3 +np.float32,0x7e9e5a3e,0x42fc9d2f,3 +np.float32,0x56ec64,0xc2fd1ddd,3 +np.float32,0x7f1d6a35,0x42fe98cc,3 +np.float32,0x73dc96,0xc2fc4998,3 +np.float32,0x3e5d74e5,0xc00d6238,3 +np.float32,0x7f033cbb,0x42fe1273,3 +np.float32,0x3f5143fc,0xbe94e4e7,3 +np.float32,0x1d56d9,0xc3002010,3 +np.float32,0x2bf3e4,0xc2ff1591,3 +np.float32,0x3f2a6ef1,0xbf164170,3 +np.float32,0x3f33238b,0xbf03db58,3 +np.float32,0x22780e,0xc2ffc91a,3 +np.float32,0x7f00b873,0x42fe0425,3 +np.float32,0x3f7f6145,0xbb654706,3 +np.float32,0x7fc00000,0x7fc00000,3 +np.float32,0x63895a,0xc2fcb9c7,3 +np.float32,0x18a1b2,0xc30060a8,3 +np.float32,0x7e43c6a6,0x42fb39e3,3 +np.float32,0x78676e,0xc2fc2d30,3 +np.float32,0x3f16d839,0xbf435940,3 +np.float32,0x7eff78ba,0x42fdfe79,3 +np.float32,0x3f2e152c,0xbf0e6e54,3 +np.float32,0x3db20ced,0xc06186e1,3 +np.float32,0x3f0cd1d8,0xbf5cbf57,3 +np.float32,0x3fd7a8,0xc2fe01d2,3 +np.float32,0x3ebb075e,0xbfb9f816,3 +np.float32,0x7f94ef,0xc2fc026b,3 +np.float32,0x3d80ba0e,0xc07f7a2b,3 +np.float32,0x7f227e15,0x42feb03f,3 +np.float32,0x792264bf,0x42e6afcc,3 +np.float32,0x7f501576,0x42ff66ec,3 +np.float32,0x223629,0xc2ffcea3,3 +np.float32,0x40a79e,0xc2fdf87b,3 +np.float32,0x449483,0xc2fdccf2,3 +np.float32,0x3f4fa978,0xbe9a9382,3 +np.float32,0x7f148c53,0x42fe6df9,3 +np.float32,0x3ec98b3c,0xbfac2a98,3 +np.float32,0x3e4da320,0xc0143a0a,3 +np.float32,0x3d1d94bb,0xc09666d0,3 +np.float32,0x3c8e624e,0xc0bb155b,3 +np.float32,0x66a9af,0xc2fca2ef,3 +np.float32,0x3ec76ed7,0xbfae1c57,3 +np.float32,0x3f4b52f3,0xbeaa2b81,3 +np.float32,0x7e99bbb5,0x42fc8750,3 +np.float32,0x3f69a46b,0xbe0701be,3 +np.float32,0x3f775400,0xbd4ba495,3 +np.float32,0x131e56,0xc300be3c,3 +np.float32,0x3f30abb4,0xbf08fb10,3 +np.float32,0x7f7e528c,0x42fffb25,3 +np.float32,0x3eb89515,0xbfbc668a,3 +np.float32,0x7e9191b6,0x42fc5f02,3 +np.float32,0x7e80c7e9,0x42fc047e,3 +np.float32,0x3f77ef58,0xbd3d2995,3 +np.float32,0x7ddb1f8a,0x42f98d1b,3 +np.float32,0x7ebc6c4f,0x42fd1d9c,3 +np.float32,0x3f6638e0,0xbe1ccab8,3 +np.float32,0x7f4c45,0xc2fc0410,3 +np.float32,0x3e7d8aad,0xc000e414,3 +np.float32,0x3f4d148b,0xbea3d12e,3 +np.float32,0x3e98c45c,0xbfdf55f4,3 +np.float32,0x3d754c78,0xc081f8a9,3 +np.float32,0x17e4cf,0xc3006be3,3 +np.float32,0x7eb65814,0x42fd0563,3 +np.float32,0x3f65e0d8,0xbe1f0008,3 +np.float32,0x3e99541f,0xbfdea87e,3 +np.float32,0x3f3cb80e,0xbee13b27,3 +np.float32,0x3e99f0c0,0xbfddec3b,3 +np.float32,0x3f43903e,0xbec6ea66,3 +np.float32,0x7e211cd4,0x42faa9f2,3 +np.float32,0x824af,0xc301f971,3 +np.float32,0x3e16a56e,0xc030f56c,3 +np.float32,0x542b3b,0xc2fd35a6,3 +np.float32,0x3eeea2d1,0xbf8cf873,3 +np.float32,0x232e93,0xc2ffb9fa,3 +np.float32,0x3e8c52b9,0xbfef06aa,3 +np.float32,0x7f69c7e3,0x42ffbcef,3 +np.float32,0x3f573e43,0xbe801714,3 +np.float32,0x43b009,0xc2fdd69f,3 +np.float32,0x3ee571ab,0xbf943966,3 +np.float32,0x3ee3d5d8,0xbf958604,3 +np.float32,0x338b12,0xc2fe9fe4,3 +np.float32,0x29cb1f,0xc2ff3ac6,3 +np.float32,0x3f0892b4,0xbf680e7a,3 +np.float32,0x3e8c4f7f,0xbfef0ae9,3 +np.float32,0x7c9d3963,0x42f497e6,3 +np.float32,0x3f26ba84,0xbf1e5f59,3 +np.float32,0x3dd0acc0,0xc052df6f,3 +np.float32,0x3e43fbda,0xc018aa8c,3 +np.float32,0x3ec4fd0f,0xbfb0635d,3 +np.float32,0x3f52c8c6,0xbe8f8d85,3 +np.float32,0x3f5fdc5d,0xbe462fdb,3 +np.float32,0x3f461920,0xbebd6743,3 +np.float32,0x6161ff,0xc2fcc9ef,3 +np.float32,0x7f7ed306,0x42fffc9a,3 +np.float32,0x3d212263,0xc0955f46,3 +np.float32,0x3eca5826,0xbfab6f36,3 +np.float32,0x7d6317ac,0x42f7a77e,3 +np.float32,0x3eb02063,0xbfc50f60,3 +np.float32,0x7f71a6f8,0x42ffd565,3 +np.float32,0x1a3efe,0xc3004935,3 +np.float32,0x3dc599c9,0xc057e856,3 +np.float32,0x3f3e1301,0xbedbf205,3 +np.float32,0xf17d4,0xc301158d,3 +np.float32,0x3f615f84,0xbe3c3d85,3 +np.float32,0x3de63be1,0xc049cb77,3 +np.float32,0x3e8d2f51,0xbfede541,3 +np.float32,0x3a5cdd,0xc2fe441c,3 +np.float32,0x3f443ec0,0xbec4586a,3 +np.float32,0x3eacbd00,0xbfc8a5ad,3 +np.float32,0x3f600f6a,0xbe44df1b,3 +np.float32,0x5f77a6,0xc2fcd89c,3 +np.float32,0x476706,0xc2fdaf28,3 +np.float32,0x2f469,0xc3036fde,3 +np.float32,0x7dc4ba24,0x42f93d77,3 +np.float32,0x3e2d6080,0xc023fb9b,3 +np.float32,0x7e8d7135,0x42fc49c3,3 +np.float32,0x3f589065,0xbe77247b,3 +np.float32,0x3f59e210,0xbe6e2c05,3 +np.float32,0x7f51d388,0x42ff6d15,3 +np.float32,0x7d9a5fda,0x42f88a63,3 +np.float32,0x3e67d5bc,0xc00927ab,3 +np.float32,0x61d72c,0xc2fcc679,3 +np.float32,0x3ef3351d,0xbf897766,3 +np.float32,0x1,0xc3150000,3 +np.float32,0x7f653429,0x42ffae54,3 +np.float32,0x7e1ad3e5,0x42fa8c8e,3 +np.float32,0x3f4ca01d,0xbea57500,3 +np.float32,0x3f7606db,0xbd6ad13e,3 +np.float32,0x7ec4a27d,0x42fd3d1f,3 +np.float32,0x3efe4fd5,0xbf8138c7,3 +np.float32,0x77c2f1,0xc2fc3124,3 +np.float32,0x7e4d3251,0x42fb5c9a,3 +np.float32,0x3f543ac7,0xbe8a8154,3 +np.float32,0x7c3dbe29,0x42f322c4,3 +np.float32,0x408e01,0xc2fdf9a0,3 +np.float32,0x45069b,0xc2fdc829,3 +np.float32,0x3d7ecab7,0xc08037e8,3 +np.float32,0xf8c22,0xc3010a99,3 +np.float32,0x7f69af63,0x42ffbca2,3 +np.float32,0x7ec7d228,0x42fd48fe,3 +np.float32,0xff800000,0xffc00000,3 +np.float32,0xdd7c5,0xc301357c,3 +np.float32,0x143f38,0xc300a90e,3 +np.float32,0x7e65c176,0x42fbb01b,3 +np.float32,0x2c1a9e,0xc2ff1307,3 +np.float32,0x7f6e9224,0x42ffcbeb,3 +np.float32,0x3d32ab39,0xc0909a77,3 +np.float32,0x3e150b42,0xc031f22b,3 +np.float32,0x1f84b4,0xc300059a,3 +np.float32,0x3f71ce21,0xbda88c2a,3 +np.float32,0x2625c4,0xc2ff7e33,3 +np.float32,0x3dd0b293,0xc052dcdc,3 +np.float32,0x625c11,0xc2fcc290,3 +np.float32,0x3f610297,0xbe3e9f24,3 +np.float32,0x7ebdd5e5,0x42fd2320,3 +np.float32,0x3e883458,0xbff486ff,3 +np.float32,0x782313,0xc2fc2ed4,3 +np.float32,0x7f39c843,0x42ff132f,3 +np.float32,0x7f326aa7,0x42fef54d,3 +np.float32,0x4d2c71,0xc2fd75be,3 +np.float32,0x3f55747c,0xbe86409e,3 +np.float32,0x7f7f0867,0x42fffd34,3 +np.float32,0x321316,0xc2feb53f,3 +np.float32,0x3e1b37ed,0xc02e32b0,3 +np.float32,0x80edf,0xc301fd54,3 +np.float32,0x3f0b08ad,0xbf617607,3 +np.float32,0x7f3f4174,0x42ff28a2,3 +np.float32,0x3d79306d,0xc0813eb0,3 +np.float32,0x3f5f657a,0xbe49413d,3 +np.float32,0x3f56c63a,0xbe81b376,3 +np.float32,0x7f667123,0x42ffb24f,3 +np.float32,0x3f71021b,0xbdb24d43,3 +np.float32,0x7f434ab1,0x42ff380f,3 +np.float32,0x3dcae496,0xc055779c,3 +np.float32,0x3f5a7d88,0xbe6a0f5b,3 +np.float32,0x3cdf5c32,0xc0a64bf5,3 +np.float32,0x3e56222c,0xc0107d11,3 +np.float32,0x561a3a,0xc2fd24df,3 +np.float32,0x7ddd953c,0x42f9955a,3 +np.float32,0x7e35d839,0x42fb035c,3 +np.float32,0x3ec1816c,0xbfb3aeb2,3 +np.float32,0x7c87cfcd,0x42f42bc2,3 +np.float32,0xd9cd,0xc3053baf,3 +np.float32,0x3f388234,0xbef1e5b7,3 +np.float32,0x3edfcaca,0xbf98d47b,3 +np.float32,0x3ef28852,0xbf89fac8,3 +np.float32,0x7f7525df,0x42ffe001,3 +np.float32,0x7f6c33ef,0x42ffc48c,3 +np.float32,0x3ea4a881,0xbfd17e61,3 +np.float32,0x3f3e379f,0xbedb63c6,3 +np.float32,0x3f0524c1,0xbf717301,3 +np.float32,0x3db3e7f0,0xc06091d3,3 +np.float32,0x800000,0xc2fc0000,3 +np.float32,0x3f2f2897,0xbf0c27ce,3 +np.float32,0x7eb1776d,0x42fcf15c,3 +np.float32,0x3f039018,0xbf75dc37,3 +np.float32,0x3c4055,0xc2fe2c96,3 +np.float32,0x3f603653,0xbe43dea5,3 +np.float32,0x7f700d24,0x42ffd07c,3 +np.float32,0x3f4741a3,0xbeb918dc,3 +np.float32,0x3f5fe959,0xbe45da2d,3 +np.float32,0x3f3e4401,0xbedb33b1,3 +np.float32,0x7f0705ff,0x42fe2775,3 +np.float32,0x3ea85662,0xbfcd69b0,3 +np.float32,0x3f15f49f,0xbf458829,3 +np.float32,0x3f17c50e,0xbf411728,3 +np.float32,0x3e483f60,0xc016add2,3 +np.float32,0x3f1ab9e5,0xbf39f71b,3 +np.float32,0x3de0b6fb,0xc04c08fe,3 +np.float32,0x7e671225,0x42fbb452,3 +np.float32,0x80800000,0xffc00000,3 +np.float32,0xe2df3,0xc3012c9d,3 +np.float32,0x3ede1e3c,0xbf9a3770,3 +np.float32,0x3df2ffde,0xc044cfec,3 +np.float32,0x3eed8da5,0xbf8dcf6c,3 +np.float32,0x3ead15c3,0xbfc846e1,3 +np.float32,0x7ef3750a,0x42fddae4,3 +np.float32,0x7e6ab7c0,0x42fbbfe4,3 +np.float32,0x7ea4bbe5,0x42fcba5d,3 +np.float32,0x3f227706,0xbf27f0a1,3 +np.float32,0x3ef39bfd,0xbf89295a,3 +np.float32,0x3f289a20,0xbf1a3edd,3 +np.float32,0x7f225f82,0x42feafb4,3 +np.float32,0x768963,0xc2fc38bc,3 +np.float32,0x3f493c00,0xbeb1ccfc,3 +np.float32,0x3f4e7249,0xbe9ee9a7,3 +np.float32,0x1d0c3a,0xc30023c0,3 +np.float32,0x7f3c5f78,0x42ff1d6a,3 +np.float32,0xff7fffff,0xffc00000,3 +np.float32,0x3ee7896a,0xbf928c2a,3 +np.float32,0x3e788479,0xc002bd2e,3 +np.float32,0x3ee4df17,0xbf94af84,3 +np.float32,0x5e06d7,0xc2fce3d7,3 +np.float32,0x3d7b2776,0xc080e1dc,3 +np.float32,0x3e3d39d3,0xc01be7fd,3 +np.float32,0x7c81dece,0x42f40ab7,3 +np.float32,0x3f7d2085,0xbc856255,3 +np.float32,0x7f7f6627,0x42fffe44,3 +np.float32,0x7f5f2e94,0x42ff9aaa,3 +np.float32,0x7f5835f2,0x42ff8339,3 +np.float32,0x3f6a0e32,0xbe046580,3 +np.float32,0x7e16f586,0x42fa79dd,3 +np.float32,0x3f04a2f2,0xbf72dbc5,3 +np.float32,0x3f35e334,0xbefc7740,3 +np.float32,0x3f0d056e,0xbf5c3824,3 +np.float32,0x7ebeb95e,0x42fd2693,3 +np.float32,0x3c6192,0xc2fe2aff,3 +np.float32,0x3e892b4f,0xbff33958,3 +np.float32,0x3f61d694,0xbe3931df,3 +np.float32,0x29d183,0xc2ff3a56,3 +np.float32,0x7f0b0598,0x42fe3d04,3 +np.float32,0x7f743b28,0x42ffdd3d,3 +np.float32,0x3a2ed6,0xc2fe4663,3 +np.float32,0x3e27403a,0xc0274de8,3 +np.float32,0x3f58ee78,0xbe74a349,3 +np.float32,0x3eaa4b,0xc2fe0f92,3 +np.float32,0x3ecb613b,0xbfaa7de8,3 +np.float32,0x7f637d81,0x42ffa8c9,3 +np.float32,0x3f026e96,0xbf790c73,3 +np.float32,0x386cdf,0xc2fe5d0c,3 +np.float32,0x35abd1,0xc2fe8202,3 +np.float32,0x3eac3cd1,0xbfc92ee8,3 +np.float32,0x3f567869,0xbe82bf47,3 +np.float32,0x3f65c643,0xbe1faae6,3 +np.float32,0x7f5422b9,0x42ff752b,3 +np.float32,0x7c26e9,0xc2fc168c,3 +np.float32,0x7eff5cfd,0x42fdfe29,3 +np.float32,0x3f728e7f,0xbd9f6142,3 +np.float32,0x3f10fd43,0xbf51f874,3 +np.float32,0x7e7ada08,0x42fbf0fe,3 +np.float32,0x3e82a611,0xbffc37be,3 +np.float32,0xbf800000,0xffc00000,3 +np.float32,0x3dbe2e12,0xc05b711c,3 +np.float32,0x7e768fa9,0x42fbe440,3 +np.float32,0x5e44e8,0xc2fce1f0,3 +np.float32,0x7f25071a,0x42febbae,3 +np.float32,0x3f54db5e,0xbe885339,3 +np.float32,0x3f0f2c26,0xbf56a0b8,3 +np.float32,0x22f9a7,0xc2ffbe55,3 +np.float32,0x7ed63dcb,0x42fd7c77,3 +np.float32,0x7ea4fae2,0x42fcbb78,3 +np.float32,0x3f1d7766,0xbf337b47,3 +np.float32,0x7f16d59f,0x42fe7941,3 +np.float32,0x3f3a1bb6,0xbeeb855c,3 +np.float32,0x3ef57128,0xbf87c709,3 +np.float32,0xb24ff,0xc3018591,3 +np.float32,0x3ef99e27,0xbf84a983,3 +np.float32,0x3eac2ccf,0xbfc94013,3 +np.float32,0x3e9d3e1e,0xbfda00dc,3 +np.float32,0x718213,0xc2fc58c1,3 +np.float32,0x7edbf509,0x42fd8fea,3 +np.float32,0x70c7f1,0xc2fc5d80,3 +np.float32,0x3f7012f5,0xbdbdc6cd,3 +np.float32,0x12cba,0xc304c487,3 +np.float32,0x7f5d445d,0x42ff944c,3 +np.float32,0x7f3e30bd,0x42ff2481,3 +np.float32,0x63b110,0xc2fcb8a0,3 +np.float32,0x3f39f728,0xbeec1680,3 +np.float32,0x3f5bea58,0xbe6074b1,3 +np.float32,0x3f350749,0xbefff679,3 +np.float32,0x3e91ab2c,0xbfe81f3e,3 +np.float32,0x7ec53fe0,0x42fd3f6d,3 +np.float32,0x3f6cbbdc,0xbde72c8e,3 +np.float32,0x3f4df49f,0xbea0abcf,3 +np.float32,0x3e9c9638,0xbfdac674,3 +np.float32,0x7f3b82ec,0x42ff1a07,3 +np.float32,0x7f612a09,0x42ffa132,3 +np.float32,0x7ea26650,0x42fcafd3,3 +np.float32,0x3a615138,0xc122f26d,3 +np.float32,0x3f1108bd,0xbf51db39,3 +np.float32,0x6f80f6,0xc2fc65ea,3 +np.float32,0x3f7cb578,0xbc98ecb1,3 +np.float32,0x7f54d31a,0x42ff7790,3 +np.float32,0x196868,0xc3005532,3 +np.float32,0x3f01ee0a,0xbf7a7925,3 +np.float32,0x3e184013,0xc02ffb11,3 +np.float32,0xadde3,0xc3018ee3,3 +np.float32,0x252a91,0xc2ff9173,3 +np.float32,0x3f0382c2,0xbf7601a9,3 +np.float32,0x6d818c,0xc2fc7345,3 +np.float32,0x3bfbfd,0xc2fe2fdd,3 +np.float32,0x7f3cad19,0x42ff1e9a,3 +np.float32,0x4169a7,0xc2fdefdf,3 +np.float32,0x3f615d96,0xbe3c4a2b,3 +np.float32,0x3f036480,0xbf7656ac,3 +np.float32,0x7f5fbda3,0x42ff9c83,3 +np.float32,0x3d202d,0xc2fe21f1,3 +np.float32,0x3d0f5e5d,0xc09ac3e9,3 +np.float32,0x3f0fff6e,0xbf548142,3 +np.float32,0x7f11ed32,0x42fe60d2,3 +np.float32,0x3e6f856b,0xc00624b6,3 +np.float32,0x7f7c4dd7,0x42fff542,3 +np.float32,0x3e76fb86,0xc0034fa0,3 +np.float32,0x3e8a0d6e,0xbff209e7,3 +np.float32,0x3eacad19,0xbfc8b6ad,3 +np.float32,0xa7776,0xc3019cbe,3 +np.float32,0x3dc84d74,0xc056a754,3 +np.float32,0x3efb8052,0xbf834626,3 +np.float32,0x3f0e55fc,0xbf58cacc,3 +np.float32,0x7e0e71e3,0x42fa4efb,3 +np.float32,0x3ed5a800,0xbfa1639c,3 +np.float32,0x3f33335b,0xbf03babf,3 +np.float32,0x38cad7,0xc2fe5842,3 +np.float32,0x3bc21256,0xc0ecc927,3 +np.float32,0x3f09522d,0xbf660a19,3 +np.float32,0xcbd5d,0xc3015428,3 +np.float32,0x492752,0xc2fd9d42,3 +np.float32,0x3f2b9b32,0xbf13b904,3 +np.float32,0x6544ac,0xc2fcad09,3 +np.float32,0x52eb12,0xc2fd40b5,3 +np.float32,0x3f66a7c0,0xbe1a03e8,3 +np.float32,0x7ab289,0xc2fc1f41,3 +np.float32,0x62af5e,0xc2fcc020,3 +np.float32,0x7f73e9cf,0x42ffdc46,3 +np.float32,0x3e5eca,0xc2fe130e,3 +np.float32,0x3e3a10f4,0xc01d7602,3 +np.float32,0x3f04db46,0xbf723f0d,3 +np.float32,0x18fc4a,0xc3005b63,3 +np.float32,0x525bcb,0xc2fd45b6,3 +np.float32,0x3f6b9108,0xbdf5c769,3 +np.float32,0x3e992e8c,0xbfded5c5,3 +np.float32,0x7efea647,0x42fdfc18,3 +np.float32,0x7e8371db,0x42fc139e,3 +np.float32,0x3f397cfb,0xbeedfc69,3 +np.float32,0x7e46d233,0x42fb454a,3 +np.float32,0x7d5281ad,0x42f76f79,3 +np.float32,0x7f4c1878,0x42ff58a1,3 +np.float32,0x3e96ca5e,0xbfe1bd97,3 +np.float32,0x6a2743,0xc2fc8a3d,3 +np.float32,0x7f688781,0x42ffb8f8,3 +np.float32,0x7814b7,0xc2fc2f2d,3 +np.float32,0x3f2ffdc9,0xbf0a6756,3 +np.float32,0x3f766fa8,0xbd60fe24,3 +np.float32,0x4dc64e,0xc2fd7003,3 +np.float32,0x3a296f,0xc2fe46a8,3 +np.float32,0x3f2af942,0xbf15162e,3 +np.float32,0x7f702c32,0x42ffd0dc,3 +np.float32,0x7e61e318,0x42fba390,3 +np.float32,0x7f7d3bdb,0x42fff7fa,3 +np.float32,0x3ee87f3f,0xbf91c881,3 +np.float32,0x2bbc28,0xc2ff193c,3 +np.float32,0x3e01f918,0xc03e966e,3 +np.float32,0x7f0b39f4,0x42fe3e1a,3 +np.float32,0x3eaa4d64,0xbfcb4516,3 +np.float32,0x3e53901e,0xc0119a88,3 +np.float32,0x603cb,0xc3026957,3 +np.float32,0x7e81f926,0x42fc0b4d,3 +np.float32,0x5dab7c,0xc2fce6a6,3 +np.float32,0x3f46fefd,0xbeba1018,3 +np.float32,0x648448,0xc2fcb28a,3 +np.float32,0x3ec49470,0xbfb0c58b,3 +np.float32,0x3e8a5393,0xbff1ac2b,3 +np.float32,0x3f27ccfc,0xbf1c014e,3 +np.float32,0x3ed886e6,0xbf9eeca8,3 +np.float32,0x7cfbe06e,0x42f5f401,3 +np.float32,0x3f5aa7ba,0xbe68f229,3 +np.float32,0x9500d,0xc301c7e3,3 +np.float32,0x3f4861,0xc2fe0853,3 +np.float32,0x3e5ae104,0xc00e76f5,3 +np.float32,0x71253a,0xc2fc5b1e,3 +np.float32,0xcf7b8,0xc3014d9c,3 +np.float32,0x7f7edd2d,0x42fffcb7,3 +np.float32,0x3e9039ee,0xbfe9f5ab,3 +np.float32,0x2fd54e,0xc2fed712,3 +np.float32,0x3f600752,0xbe45147a,3 +np.float32,0x3f4da8f6,0xbea1bb5c,3 +np.float32,0x3f2d34a9,0xbf104bd9,3 +np.float32,0x3e1e66dd,0xc02c52d2,3 +np.float32,0x798276,0xc2fc2670,3 +np.float32,0xd55e2,0xc3014347,3 +np.float32,0x80000001,0xffc00000,3 +np.float32,0x3e7a5ead,0xc0020da6,3 +np.float32,0x7ec4c744,0x42fd3da9,3 +np.float32,0x597e00,0xc2fd085a,3 +np.float32,0x3dff6bf4,0xc0403575,3 +np.float32,0x5d6f1a,0xc2fce883,3 +np.float32,0x7e21faff,0x42faadea,3 +np.float32,0x3e570fea,0xc01016c6,3 +np.float32,0x28e6b6,0xc2ff4ab7,3 +np.float32,0x7e77062d,0x42fbe5a3,3 +np.float32,0x74cac4,0xc2fc43b0,3 +np.float32,0x3f707273,0xbdb93078,3 +np.float32,0x228e96,0xc2ffc737,3 +np.float32,0x686ac1,0xc2fc966b,3 +np.float32,0x3d76400d,0xc081cae8,3 +np.float32,0x3e9f502f,0xbfd7966b,3 +np.float32,0x3f6bc656,0xbdf32b1f,3 +np.float32,0x3edb828b,0xbf9c65d4,3 +np.float32,0x6c6e56,0xc2fc7a8e,3 +np.float32,0x3f04552e,0xbf73b48f,3 +np.float32,0x3f39cb69,0xbeecc457,3 +np.float32,0x7f681c44,0x42ffb7a3,3 +np.float32,0x7f5b44ee,0x42ff8d99,3 +np.float32,0x3e71430a,0xc005798d,3 +np.float32,0x3edcfde3,0xbf9b27c6,3 +np.float32,0x3f616a5a,0xbe3bf67f,3 +np.float32,0x3f523936,0xbe918548,3 +np.float32,0x3f39ce3a,0xbeecb925,3 +np.float32,0x3eac589a,0xbfc91120,3 +np.float32,0x7efc8d3d,0x42fdf5fc,3 +np.float32,0x5704b0,0xc2fd1d0f,3 +np.float32,0x7e7972e9,0x42fbecda,3 +np.float32,0x3eb0811c,0xbfc4aa13,3 +np.float32,0x7f1efcbb,0x42fea023,3 +np.float32,0x3e0b9e32,0xc037fa6b,3 +np.float32,0x7eef6a48,0x42fdce87,3 +np.float32,0x3cc0a373,0xc0ad20c0,3 +np.float32,0x3f2a75bb,0xbf1632ba,3 +np.float32,0x0,0xff800000,3 +np.float32,0x7ecdb6f4,0x42fd5e77,3 +np.float32,0x7f2e2dfd,0x42fee38d,3 +np.float32,0x3ee17f6e,0xbf976d8c,3 +np.float32,0x3f51e7ee,0xbe92a319,3 +np.float32,0x3f06942f,0xbf6d7d3c,3 +np.float32,0x3f7ba528,0xbccac6f1,3 +np.float32,0x3f413787,0xbecfd513,3 +np.float32,0x3e085e48,0xc03a2716,3 +np.float32,0x7e4c5e0e,0x42fb599c,3 +np.float32,0x306f76,0xc2fecdd4,3 +np.float32,0x7f5c2203,0x42ff9081,3 +np.float32,0x3d5355b4,0xc088da05,3 +np.float32,0x9a2a,0xc305bb4f,3 +np.float32,0x3db93a1f,0xc05de0db,3 +np.float32,0x4e50c6,0xc2fd6ae4,3 +np.float32,0x7ec4afed,0x42fd3d51,3 +np.float32,0x3a8f27,0xc2fe41a0,3 +np.float32,0x7f213caf,0x42feaa84,3 +np.float32,0x7e7b5f00,0x42fbf286,3 +np.float32,0x7e367194,0x42fb05ca,3 +np.float32,0x7f56e6de,0x42ff7ebd,3 +np.float32,0x3ed7383e,0xbfa00aef,3 +np.float32,0x7e844752,0x42fc184a,3 +np.float32,0x15157,0xc3049a19,3 +np.float32,0x3f78cd92,0xbd28824a,3 +np.float32,0x7ecddb16,0x42fd5ef9,3 +np.float32,0x3e479f16,0xc016f7d8,3 +np.float32,0x3f5cb418,0xbe5b2bd3,3 +np.float32,0x7c0934cb,0x42f2334e,3 +np.float32,0x3ebe5505,0xbfb6bc69,3 +np.float32,0x3eb1335a,0xbfc3eff5,3 +np.float32,0x3f2488a3,0xbf234444,3 +np.float32,0x642906,0xc2fcb52a,3 +np.float32,0x3da635fa,0xc067e15a,3 +np.float32,0x7e0d80db,0x42fa4a15,3 +np.float32,0x4f0b9d,0xc2fd640a,3 +np.float32,0x7e083806,0x42fa2df8,3 +np.float32,0x7f77f8c6,0x42ffe877,3 +np.float32,0x3e7bb46a,0xc0018ff5,3 +np.float32,0x3f06eb2e,0xbf6c8eca,3 +np.float32,0x7eae8f7c,0x42fce52a,3 +np.float32,0x3de481a0,0xc04a7d7f,3 +np.float32,0x3eed4311,0xbf8e096f,3 +np.float32,0x3f7b0300,0xbce8903d,3 +np.float32,0x3811b,0xc30330dd,3 +np.float32,0x3eb6f8e1,0xbfbe04bc,3 +np.float32,0x3ec35210,0xbfb1f55a,3 +np.float32,0x3d386916,0xc08f24a5,3 +np.float32,0x3f1fa197,0xbf2e704d,3 +np.float32,0x7f2020a5,0x42fea56a,3 +np.float32,0x7e1ea53f,0x42fa9e8c,3 +np.float32,0x3f148903,0xbf490bf9,3 +np.float32,0x3f2f56a0,0xbf0bc6c9,3 +np.float32,0x7da9fc,0xc2fc0d9b,3 +np.float32,0x3d802134,0xc07fe810,3 +np.float32,0x3f6cb927,0xbde74e57,3 +np.float32,0x7e05b125,0x42fa2023,3 +np.float32,0x3f3307f9,0xbf041433,3 +np.float32,0x5666bf,0xc2fd2250,3 +np.float32,0x3f51c93b,0xbe930f28,3 +np.float32,0x3eb5dcfe,0xbfbf241e,3 +np.float32,0xb2773,0xc301853f,3 +np.float32,0x7f4dee96,0x42ff5f3f,3 +np.float32,0x3e3f5c33,0xc01adee1,3 +np.float32,0x3f2ed29a,0xbf0cdd4a,3 +np.float32,0x3e3c01ef,0xc01c80ab,3 +np.float32,0x3ec2236e,0xbfb31458,3 +np.float32,0x7e841dc4,0x42fc1761,3 +np.float32,0x3df2cd8e,0xc044e30c,3 +np.float32,0x3f010901,0xbf7d0670,3 +np.float32,0x3c05ceaa,0xc0ddf39b,3 +np.float32,0x3f517226,0xbe944206,3 +np.float32,0x3f23c83d,0xbf24f522,3 +np.float32,0x7fc9da,0xc2fc0139,3 +np.float32,0x7f1bde53,0x42fe9181,3 +np.float32,0x3ea3786c,0xbfd2d4a5,3 +np.float32,0x3e83a71b,0xbffacdd2,3 +np.float32,0x3f6f0d4f,0xbdca61d5,3 +np.float32,0x7f5ab613,0x42ff8bb7,3 +np.float32,0x3ab1ec,0xc2fe3fea,3 +np.float32,0x4fbf58,0xc2fd5d82,3 +np.float32,0x3dea141b,0xc0484403,3 +np.float32,0x7d86ad3b,0x42f8258f,3 +np.float32,0x7f345315,0x42fefd29,3 +np.float32,0x3f3752fe,0xbef6a780,3 +np.float32,0x64830d,0xc2fcb293,3 +np.float32,0x3d9dc1eb,0xc06cb32a,3 +np.float32,0x3f2f935a,0xbf0b46f6,3 +np.float32,0xb90a4,0xc30177e3,3 +np.float32,0x4111dd,0xc2fdf3c1,3 +np.float32,0x3d4cd078,0xc08a4c68,3 +np.float32,0x3e95c3f1,0xbfe30011,3 +np.float32,0x3ec9f356,0xbfabcb4e,3 +np.float32,0x1b90d5,0xc3003717,3 +np.float32,0xee70f,0xc3011a3e,3 +np.float32,0x7fa00000,0x7fe00000,3 +np.float32,0x3f74cdb6,0xbd8422af,3 +np.float32,0x3d9b56fe,0xc06e2037,3 +np.float32,0x3f1853df,0xbf3fbc40,3 +np.float32,0x7d86a011,0x42f82547,3 +np.float32,0x3dff9629,0xc0402634,3 +np.float32,0x46f8c9,0xc2fdb39f,3 +np.float32,0x3e9b410b,0xbfdc5a87,3 +np.float32,0x3f5aed42,0xbe671cac,3 +np.float32,0x3b739886,0xc101257f,3 +np.float64,0x3fe2f58d6565eb1b,0xbfe82a641138e19a,1 +np.float64,0x3fee7f0642fcfe0d,0xbfb1c702f6974932,1 +np.float64,0x25b71f244b6e5,0xc090030d3b3c5d2b,1 +np.float64,0x8c9cc8e1193b,0xc0900b752a678fa8,1 +np.float64,0x3fd329b5d326536c,0xbffbd607f6db945c,1 +np.float64,0x3fb5109b3a2a2136,0xc00cd36bd15dfb18,1 +np.float64,0x3fd5393ae12a7276,0xbff97a7e4a157154,1 +np.float64,0x3fd374d1b926e9a3,0xbffb7c3e1a3a7ed3,1 +np.float64,0x3fe2c7f4e2658fea,0xbfe899f15ca78fcb,1 +np.float64,0x7fe3d6b81ee7ad6f,0x408ffa7b63d407ee,1 +np.float64,0x3fe086d097e10da1,0xbfee81456ce8dd03,1 +np.float64,0x7fd374a64ca6e94c,0x408ff241c7306d39,1 +np.float64,0x3fc0709a5b20e135,0xc007afdede31b29c,1 +np.float64,0x3fd4218f4b28431f,0xbffab2c696966e2d,1 +np.float64,0x143134c828628,0xc09006a8372c4d8a,1 +np.float64,0x3f8bd0aa0037a154,0xc018cf0e8b9c3107,1 +np.float64,0x7fe0ce905ee19d20,0x408ff8915e71bd67,1 +np.float64,0x3fda0f5f32b41ebe,0xbff4bd5e0869e820,1 +np.float64,0x7fe9ae63d0b35cc7,0x408ffd760ca4f292,1 +np.float64,0x3fe75abd9eeeb57b,0xbfdd1476fc8b3089,1 +np.float64,0x786c3110f0d87,0xc08ff8b44cedbeea,1 +np.float64,0x22c5fe80458d,0xc09013853591c2f2,1 +np.float64,0x3fdc250797384a0f,0xbff2f6a02c961f0b,1 +np.float64,0x3fa2b367b02566cf,0xc013199238485054,1 +np.float64,0x3fd26a910ca4d522,0xbffcc0e2089b1c0c,1 +np.float64,0x8068d3b300d1b,0xc08ff7f690210aac,1 +np.float64,0x3fe663bfa9ecc77f,0xbfe07cd95a43a5ce,1 +np.float64,0x3fd0ddb07321bb61,0xbffec886665e895e,1 +np.float64,0x3f91c730b0238e61,0xc0176452badc8d22,1 +np.float64,0x4dd10d309ba22,0xc08ffdbe738b1d8d,1 +np.float64,0x7fe322afa4a6455e,0x408ffa10c038f9de,1 +np.float64,0x7fdf7f7c42befef8,0x408ff7d147ddaad5,1 +np.float64,0x7fd673f386ace7e6,0x408ff3e920d00eef,1 +np.float64,0x3feaebfcadb5d7f9,0xbfcfe8ec27083478,1 +np.float64,0x3fdc6dc23738db84,0xbff2bb46794f07b8,1 +np.float64,0xcd8819599b103,0xc08ff288c5b2cf0f,1 +np.float64,0xfda00e77fb402,0xc08ff01b895d2236,1 +np.float64,0x840b02ff08161,0xc08ff7a41e41114c,1 +np.float64,0x3fbdce3a383b9c74,0xc008d1e61903a289,1 +np.float64,0x3fd24ed3c4a49da8,0xbffce3c12136b6d3,1 +np.float64,0x3fe8d0834131a107,0xbfd77b194e7051d4,1 +np.float64,0x3fdd0cb11aba1962,0xbff23b9dbd554455,1 +np.float64,0x1a32d97e3465c,0xc090052781a37271,1 +np.float64,0x3fdb09d2b1b613a5,0xbff3e396b862bd83,1 +np.float64,0x3fe04c848aa09909,0xbfef2540dd90103a,1 +np.float64,0x3fce0c48613c1891,0xc000b9f76877d744,1 +np.float64,0x3fc37109a226e213,0xc005c05d8b2b9a2f,1 +np.float64,0x81cf3837039e7,0xc08ff7d686517dff,1 +np.float64,0xd9342c29b2686,0xc08ff1e591c9a895,1 +np.float64,0x7fec731b0638e635,0x408ffea4884550a9,1 +np.float64,0x3fba0fc138341f82,0xc00a5e839b085f64,1 +np.float64,0x7fdda893b03b5126,0x408ff71f7c5a2797,1 +np.float64,0xd2a4bb03a5498,0xc08ff2402f7a907c,1 +np.float64,0x3fea61fb0d34c3f6,0xbfd1d293fbe76183,1 +np.float64,0x3fed5cf486fab9e9,0xbfbfc2e01a7ffff1,1 +np.float64,0x3fcbabc2bf375785,0xc001ad7750c9dbdf,1 +np.float64,0x3fdb5fff53b6bfff,0xbff39a7973a0c6a5,1 +np.float64,0x7feef05a00bde0b3,0x408fff9c5cbc8651,1 +np.float64,0xb1cf24f1639e5,0xc08ff434de10fffb,1 +np.float64,0x3fa583989c2b0731,0xc0124a8a3bbf18ce,1 +np.float64,0x7feae90bf9f5d217,0x408ffe002e7bbbea,1 +np.float64,0x3fe9ef41c4b3de84,0xbfd367878ae4528e,1 +np.float64,0x9be24ce337c4a,0xc08ff5b9b1c31cf9,1 +np.float64,0x3fe916894cb22d13,0xbfd677f915d58503,1 +np.float64,0x3fec1bab20f83756,0xbfc7f2777aabe8ee,1 +np.float64,0x3feaabf2873557e5,0xbfd0d11f28341233,1 +np.float64,0x3fd4d3c3b529a787,0xbff9e9e47acc8ca9,1 +np.float64,0x3fe4cfe96c699fd3,0xbfe3dc53fa739169,1 +np.float64,0xccfdb97399fb7,0xc08ff2908d893400,1 +np.float64,0x3fec7598be78eb31,0xbfc5a750f8f3441a,1 +np.float64,0x355be5fc6ab7e,0xc090010ca315b50b,1 +np.float64,0x3fba9f9074353f21,0xc00a1f80eaf5e581,1 +np.float64,0x7fdcaff189395fe2,0x408ff6bd1c5b90d9,1 +np.float64,0x3fd94d3b64b29a77,0xbff56be1b43d25f3,1 +np.float64,0x4e5f29949cbe6,0xc08ffda972da1d73,1 +np.float64,0x3fe654e2d9aca9c6,0xbfe09b88dcd8f15d,1 +np.float64,0x7fdc130190b82602,0x408ff67d496c1a27,1 +np.float64,0x3fbcd4701e39a8e0,0xc009343e36627e80,1 +np.float64,0x7fdaa4d38f3549a6,0x408ff5e2c6d8678f,1 +np.float64,0x3febe95e5237d2bd,0xbfc93e16d453fe3a,1 +np.float64,0x9ef5ca553deba,0xc08ff57ff4f7883d,1 +np.float64,0x7fe878e91170f1d1,0x408ffce795868fc8,1 +np.float64,0x3fe63dff466c7bff,0xbfe0caf2b79c9e5f,1 +np.float64,0x6561446ccac29,0xc08ffab0e383834c,1 +np.float64,0x30c6c2ae618d9,0xc09001914b30381b,1 +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0x3fe5c9daf1ab93b6,0xbfe1be81baf4dbdb,1 +np.float64,0x3fe0a03e24a1407c,0xbfee3a73c4c0e8f8,1 +np.float64,0xff2a2cf3fe546,0xc08ff009a7e6e782,1 +np.float64,0x7fcf0332213e0663,0x408fefa36235e210,1 +np.float64,0x3fb612affc2c2560,0xc00c494be9c8c33b,1 +np.float64,0x3fd2b259702564b3,0xbffc67967f077e75,1 +np.float64,0x7fcb63685d36c6d0,0x408fee343343f913,1 +np.float64,0x3fe369f1d5a6d3e4,0xbfe71251139939ad,1 +np.float64,0x3fdd17c618ba2f8c,0xbff232d11c986251,1 +np.float64,0x3f92cc8040259901,0xc01711d8e06b52ee,1 +np.float64,0x69a81dc2d3504,0xc08ffa36cdaf1141,1 +np.float64,0x3fea0fad99b41f5b,0xbfd2f4625a652645,1 +np.float64,0xd1cd5799a39ab,0xc08ff24c02b90d26,1 +np.float64,0x324e59ce649cc,0xc0900163ad091c76,1 +np.float64,0x3fc3d460a227a8c1,0xc00585f903dc7a7f,1 +np.float64,0xa7185ec74e30c,0xc08ff4ec7d65ccd9,1 +np.float64,0x3fa254eaac24a9d5,0xc01337053963321a,1 +np.float64,0x3feaeb112435d622,0xbfcfef3be17f81f6,1 +np.float64,0x60144c3ac028a,0xc08ffb4f8eb94595,1 +np.float64,0x7fa4d2ec6829a5d8,0x408fdb0a9670ab83,1 +np.float64,0x3fed1372f97a26e6,0xbfc1b1fe50d48a55,1 +np.float64,0x3fd5ade5972b5bcb,0xbff8fcf28f525031,1 +np.float64,0x7fe72e335bee5c66,0x408ffc4759236437,1 +np.float64,0x7fdfafab143f5f55,0x408ff7e2e22a8129,1 +np.float64,0x3fe90d0db9321a1b,0xbfd69ae5fe10eb9e,1 +np.float64,0x7fe20a59072414b1,0x408ff962a2492484,1 +np.float64,0x3fed853690bb0a6d,0xbfbdc9dc5f199d2b,1 +np.float64,0x3fd709d469ae13a9,0xbff795a218deb700,1 +np.float64,0x3fe21c35f5e4386c,0xbfea47d71789329b,1 +np.float64,0x9ea5ec053d4be,0xc08ff585c2f6b7a3,1 +np.float64,0x3fc0580f9e20b01f,0xc007c1268f49d037,1 +np.float64,0xd99127abb3225,0xc08ff1e0a1ff339d,1 +np.float64,0x3fdc8c9bbfb91937,0xbff2a2478354effb,1 +np.float64,0x3fe15fc6b162bf8d,0xbfec323ac358e008,1 +np.float64,0xffefffffffffffff,0x7ff8000000000000,1 +np.float64,0x3fee341afb3c6836,0xbfb556b6faee9a84,1 +np.float64,0x3fe4b64c56296c99,0xbfe4154835ad2afe,1 +np.float64,0x85de22810bbc5,0xc08ff77b914fe5b5,1 +np.float64,0x3fd22c72e3a458e6,0xbffd0f4269d20bb9,1 +np.float64,0xc090e5218123,0xc09009a4a65a8a8f,1 +np.float64,0x7fd9641692b2c82c,0x408ff5547782bdfc,1 +np.float64,0x3fd9b9cb28b37396,0xbff509a8fb59a9f1,1 +np.float64,0x3fcd2726f93a4e4e,0xc001135059a22117,1 +np.float64,0x3fa4b493d4296928,0xc0128323c7a55f4a,1 +np.float64,0x47455e788e8ac,0xc08ffec2101c1e82,1 +np.float64,0x3fe0d7e2e261afc6,0xbfeda0f1e2d0f4bd,1 +np.float64,0x3fe860fc5b70c1f9,0xbfd91dc42eaf72c2,1 +np.float64,0xa5d7805b4baf0,0xc08ff502bc819ff6,1 +np.float64,0xd83395b1b0673,0xc08ff1f33c3f94c2,1 +np.float64,0x3f865972e02cb2e6,0xc01a1243651565c8,1 +np.float64,0x52fc6952a5f8e,0xc08ffd006b158179,1 +np.float64,0x7fecac6c793958d8,0x408ffebbb1c09a70,1 +np.float64,0x7fe621ff606c43fe,0x408ffbbeb2b1473a,1 +np.float64,0x3fdb9f3f9db73e7f,0xbff365610c52bda7,1 +np.float64,0x7feab92992757252,0x408ffdeb92a04813,1 +np.float64,0xcc46c79f988d9,0xc08ff29adf03fb7c,1 +np.float64,0x3fe3156a03262ad4,0xbfe7dd0f598781c7,1 +np.float64,0x3fc00e3a61201c75,0xc007f5c121a87302,1 +np.float64,0x3fdce8e9f739d1d4,0xbff2581d41ef50ef,1 +np.float64,0x0,0xfff0000000000000,1 +np.float64,0x7d373ac4fa6e8,0xc08ff840fa8beaec,1 +np.float64,0x3fee41e0653c83c1,0xbfb4ae786f2a0d54,1 +np.float64,0x3ff0000000000000,0x0,1 +np.float64,0x7feca6fff9794dff,0x408ffeb982a70556,1 +np.float64,0x7fc532716d2a64e2,0x408feb3f0f6c095b,1 +np.float64,0x3fe4ec2954a9d853,0xbfe39dd44aa5a040,1 +np.float64,0x7fd3321d52a6643a,0x408ff21a0ab9cd85,1 +np.float64,0x7fd8f1b2dfb1e365,0x408ff52001fa7922,1 +np.float64,0x3fee5e58cabcbcb2,0xbfb3539734a24d8b,1 +np.float64,0x3feebf6e7dfd7edd,0xbfad7c648f025102,1 +np.float64,0x6008026ec0101,0xc08ffb5108b54a93,1 +np.float64,0x3fea06f5e2340dec,0xbfd3134a48283360,1 +np.float64,0x41cad13c8395b,0xc08fffae654b2426,1 +np.float64,0x7fedb5c9353b6b91,0x408fff249f1f32b6,1 +np.float64,0xe00c5af9c018c,0xc08ff189e68c655f,1 +np.float64,0x7feac398ddf58731,0x408ffdf01374de9f,1 +np.float64,0x3fed21127c7a4225,0xbfc15b8cf55628fa,1 +np.float64,0x3fd3446711a688ce,0xbffbb5f7252a9fa3,1 +np.float64,0x7fe75fa07a6ebf40,0x408ffc5fdb096018,1 +np.float64,0x3feeb1618cbd62c3,0xbfaece3bd0863070,1 +np.float64,0x7f5226e180244dc2,0x408fb174d506e52f,1 +np.float64,0x3fcd67deca3acfbe,0xc000f9cd7a490749,1 +np.float64,0xdc6f30efb8de6,0xc08ff1b9f2a22d2e,1 +np.float64,0x9c14931338293,0xc08ff5b5f975ec5d,1 +np.float64,0x7fe93e802df27cff,0x408ffd4354eba0e0,1 +np.float64,0x3feb92ae5077255d,0xbfcb7f2084e44dbb,1 +np.float64,0xd78dbfddaf1b8,0xc08ff1fc19fa5a13,1 +np.float64,0x7fe14c301fa2985f,0x408ff8e666cb6592,1 +np.float64,0xbda3d8b77b47b,0xc08ff37689f4b2e5,1 +np.float64,0x8a42953b14853,0xc08ff71c2db3b8cf,1 +np.float64,0x7fe4ca7e186994fb,0x408ffb05e94254a7,1 +np.float64,0x7fe92ffc5e325ff8,0x408ffd3cb0265b12,1 +np.float64,0x91b262912364d,0xc08ff681619be214,1 +np.float64,0x33fe2b0667fc6,0xc0900132f3fab55e,1 +np.float64,0x3fde10e9183c21d2,0xbff17060fb4416c7,1 +np.float64,0xb6b811cb6d702,0xc08ff3e46303b541,1 +np.float64,0x3fe4a7bda0a94f7b,0xbfe435c6481cd0e3,1 +np.float64,0x7fd9fe6057b3fcc0,0x408ff599c79a822c,1 +np.float64,0x3fef44bf917e897f,0xbfa11484e351a6e9,1 +np.float64,0x3fe57d701daafae0,0xbfe2618ab40fc01b,1 +np.float64,0x7fe52d2adbaa5a55,0x408ffb3c2fb1c99d,1 +np.float64,0xb432f66d6865f,0xc08ff40d6b4084fe,1 +np.float64,0xbff0000000000000,0x7ff8000000000000,1 +np.float64,0x7fecd2292bf9a451,0x408ffecad860de6f,1 +np.float64,0x3fddd2ae153ba55c,0xbff1a059adaca33e,1 +np.float64,0x3fee55d6e5bcabae,0xbfb3bb1c6179d820,1 +np.float64,0x7fc1d0085623a010,0x408fe93d16ada7a7,1 +np.float64,0x829b000105360,0xc08ff7c47629a68f,1 +np.float64,0x7fe1e0257523c04a,0x408ff94782cf0717,1 +np.float64,0x7fd652f9ad2ca5f2,0x408ff3d820ec892e,1 +np.float64,0x3fef2246203e448c,0xbfa444ab6209d8cd,1 +np.float64,0x3fec6c0ae178d816,0xbfc5e559ebd4e790,1 +np.float64,0x3fe6ddfee92dbbfe,0xbfdf06dd7d3fa7a8,1 +np.float64,0x3fb7fbcbea2ff798,0xc00b5404d859d148,1 +np.float64,0x7feb9a154d37342a,0x408ffe4b26c29e55,1 +np.float64,0x3fe4db717aa9b6e3,0xbfe3c2c6b3ef13bc,1 +np.float64,0x3fbae17dda35c2fc,0xc00a030f7f4b37e7,1 +np.float64,0x7fd632b9082c6571,0x408ff3c76826ef19,1 +np.float64,0x7fc4184a15283093,0x408feaa14adf00be,1 +np.float64,0x3fe052d19920a5a3,0xbfef136b5df81a3e,1 +np.float64,0x7fe38b872b67170d,0x408ffa4f51aafc86,1 +np.float64,0x3fef9842d03f3086,0xbf92d3d2a21d4be2,1 +np.float64,0x9cea662139d4d,0xc08ff5a634810daa,1 +np.float64,0x3fe35f0855e6be11,0xbfe72c4b564e62aa,1 +np.float64,0x3fecee3d3779dc7a,0xbfc29ee942f8729e,1 +np.float64,0x3fe7903fd72f2080,0xbfdc41db9b5f4048,1 +np.float64,0xb958889572b11,0xc08ff3ba366cf84b,1 +np.float64,0x3fcb3a67c53674d0,0xc001dd21081ad1ea,1 +np.float64,0xe3b1b53fc7637,0xc08ff15a3505e1ce,1 +np.float64,0xe5954ae9cb2aa,0xc08ff141cbbf0ae4,1 +np.float64,0x3fe394af74e7295f,0xbfe6ad1d13f206e8,1 +np.float64,0x7fe21dd704643bad,0x408ff96f13f80c1a,1 +np.float64,0x3fd23a7cf02474fa,0xbffcfd7454117a05,1 +np.float64,0x7fe257515e24aea2,0x408ff99378764d52,1 +np.float64,0x7fe4c5d0a6e98ba0,0x408ffb03503cf939,1 +np.float64,0x3fadc2c1603b8583,0xc0106b2c17550e3a,1 +np.float64,0x3fc0f7f02421efe0,0xc007525ac446864c,1 +np.float64,0x3feaf0b27275e165,0xbfcfc8a03eaa32ad,1 +np.float64,0x5ce7503cb9ceb,0xc08ffbb2de365fa8,1 +np.float64,0x2a0014f654003,0xc090026e41761a0d,1 +np.float64,0x7fe2c848a8e59090,0x408ff9d9b723ee89,1 +np.float64,0x7f66f54bc02dea97,0x408fbc2ae0ec5623,1 +np.float64,0xa35a890146b6,0xc0900a97b358ddbd,1 +np.float64,0x7fee267ded7c4cfb,0x408fff501560c9f5,1 +np.float64,0x3fe07c328520f865,0xbfee9ef7c3435b58,1 +np.float64,0x3fe67122cf6ce246,0xbfe06147001932ba,1 +np.float64,0x3fdacc8925359912,0xbff41824cece219e,1 +np.float64,0xffa3047fff461,0xc08ff00431ec9be3,1 +np.float64,0x3e1af43e7c35f,0xc090002c6573d29b,1 +np.float64,0x86fa94590df53,0xc08ff7632525ed92,1 +np.float64,0x7fec4c76227898eb,0x408ffe94d032c657,1 +np.float64,0x7fe2274ce1e44e99,0x408ff975194cfdff,1 +np.float64,0x7fe670e1b4ace1c2,0x408ffbe78cc451de,1 +np.float64,0x7fe853871db0a70d,0x408ffcd5e6a6ff47,1 +np.float64,0x3fcbf265db37e4cc,0xc0019026336e1176,1 +np.float64,0x3fef033cef3e067a,0xbfa726712eaae7f0,1 +np.float64,0x5d74973abae94,0xc08ffba15e6bb992,1 +np.float64,0x7fdd9c99b6bb3932,0x408ff71ad24a7ae0,1 +np.float64,0xbdc8e09b7b91c,0xc08ff3744939e9a3,1 +np.float64,0xdbfcff71b7fa0,0xc08ff1bfeecc9dfb,1 +np.float64,0xf9b38cf5f3672,0xc08ff0499af34a43,1 +np.float64,0x3fea820aa6b50415,0xbfd162a38e1927b1,1 +np.float64,0x3fe67f59a12cfeb3,0xbfe04412adca49dc,1 +np.float64,0x3feb301d9c76603b,0xbfce17e6edeb92d5,1 +np.float64,0x828ce00b0519c,0xc08ff7c5b5c57cde,1 +np.float64,0x4f935e229f26c,0xc08ffd7c67c1c54f,1 +np.float64,0x7fcd139e023a273b,0x408feee4f12ff11e,1 +np.float64,0x666a9944ccd54,0xc08ffa92d5e5cd64,1 +np.float64,0x3fe792f0fa6f25e2,0xbfdc374fda28f470,1 +np.float64,0xe996029bd32c1,0xc08ff10eb9b47a11,1 +np.float64,0x3fe7b0dd1eef61ba,0xbfdbc2676dc77db0,1 +np.float64,0x7fd3ec0127a7d801,0x408ff287bf47e27d,1 +np.float64,0x3fe793a8ea6f2752,0xbfdc347f7717e48d,1 +np.float64,0x7fdb89d15e3713a2,0x408ff64457a13ea2,1 +np.float64,0x3fe35b3cbbe6b679,0xbfe73557c8321b70,1 +np.float64,0x66573c94ccae8,0xc08ffa9504af7eb5,1 +np.float64,0x3fc620a2302c4144,0xc00442036b944a67,1 +np.float64,0x49b2fe0693660,0xc08ffe5f131c3c7e,1 +np.float64,0x7fda936cdfb526d9,0x408ff5db3ab3f701,1 +np.float64,0xc774ceef8ee9a,0xc08ff2e16d082fa1,1 +np.float64,0x4da9f8a09b55,0xc0900ee2206d0c88,1 +np.float64,0x3fe2ca5d5ae594bb,0xbfe89406611a5f1a,1 +np.float64,0x7fe0832497e10648,0x408ff85d1de6056e,1 +np.float64,0x3fe6a9e3222d53c6,0xbfdfda35a9bc2de1,1 +np.float64,0x3fed3d92c8ba7b26,0xbfc0a73620db8b98,1 +np.float64,0x3fdd2ec093ba5d81,0xbff2209cf78ce3f1,1 +np.float64,0x62fcb968c5f98,0xc08ffaf775a593c7,1 +np.float64,0xfcfb019ff9f60,0xc08ff0230e95bd16,1 +np.float64,0x3fd7a63e8f2f4c7d,0xbff6faf4fff7dbe0,1 +np.float64,0x3fef23b0ec3e4762,0xbfa4230cb176f917,1 +np.float64,0x340d1e6a681a5,0xc09001314b68a0a2,1 +np.float64,0x7fc0b85ba02170b6,0x408fe8821487b802,1 +np.float64,0x7fe9976e84f32edc,0x408ffd6bb6aaf467,1 +np.float64,0x329a0e9e65343,0xc090015b044e3270,1 +np.float64,0x3fea4928d3f49252,0xbfd2299b05546eab,1 +np.float64,0x3f188c70003118e0,0xc02ac3ce23bc5d5a,1 +np.float64,0x3fecce5020b99ca0,0xbfc36b23153d5f50,1 +np.float64,0x3fe203873e24070e,0xbfea86edb3690830,1 +np.float64,0x3fe02d9eaa205b3d,0xbfef7d18c54a76d2,1 +np.float64,0xef7537ebdeea7,0xc08ff0c55e9d89e7,1 +np.float64,0x3fedf7572efbeeae,0xbfb840af357cf07c,1 +np.float64,0xd1a97a61a354,0xc0900926fdfb96cc,1 +np.float64,0x7fe6a0daeced41b5,0x408ffc001edf1407,1 +np.float64,0x3fe5063625aa0c6c,0xbfe3647cfb949d62,1 +np.float64,0x7fe9b28d31736519,0x408ffd77eb4a922b,1 +np.float64,0x7feea90d033d5219,0x408fff81a4bbff62,1 +np.float64,0x3fe9494d17f2929a,0xbfd5bde02eb5287a,1 +np.float64,0x7feee17a8cbdc2f4,0x408fff96cf0dc16a,1 +np.float64,0xb2ad18ef655a3,0xc08ff4267eda8af8,1 +np.float64,0x3fad3b52683a76a5,0xc01085ab75b797ce,1 +np.float64,0x2300a65846016,0xc090037b81ce9500,1 +np.float64,0x3feb1041f9b62084,0xbfcef0c87d8b3249,1 +np.float64,0x3fdd887d3e3b10fa,0xbff1da0e1ede6db2,1 +np.float64,0x3fd3e410eb27c822,0xbffaf9b5fc9cc8cc,1 +np.float64,0x3fe0aa53e3e154a8,0xbfee1e7b5c486578,1 +np.float64,0x7fe33e389aa67c70,0x408ffa214fe50961,1 +np.float64,0x3fd27e3a43a4fc75,0xbffca84a79e8adeb,1 +np.float64,0x3fb309e0082613c0,0xc00dfe407b77a508,1 +np.float64,0x7feaf2ed8cf5e5da,0x408ffe046a9d1ba9,1 +np.float64,0x1e76167a3cec4,0xc0900448cd35ec67,1 +np.float64,0x3fe0a18e1721431c,0xbfee36cf1165a0d4,1 +np.float64,0x3fa73b78c02e76f2,0xc011d9069823b172,1 +np.float64,0x3fef6d48287eda90,0xbf9ab2d08722c101,1 +np.float64,0x8fdf0da31fbe2,0xc08ff6a6a2accaa1,1 +np.float64,0x3fc3638db826c71b,0xc005c86191688826,1 +np.float64,0xaa9c09c555381,0xc08ff4aefe1d9473,1 +np.float64,0x7fccb0f4523961e8,0x408feebd84773f23,1 +np.float64,0xede75dcfdbcec,0xc08ff0d89ba887d1,1 +np.float64,0x7f8a051520340a29,0x408fcd9cc17f0d95,1 +np.float64,0x3fef5ca2babeb945,0xbf9dc221f3618e6a,1 +np.float64,0x7fea0ff4bcf41fe8,0x408ffda193359f22,1 +np.float64,0x7fe05c53fd20b8a7,0x408ff841dc7123e8,1 +np.float64,0x3fc625664b2c4acd,0xc0043f8749b9a1d8,1 +np.float64,0x7fed58f98f7ab1f2,0x408fff00585f48c2,1 +np.float64,0x3fb3e5e51427cbca,0xc00d7bcb6528cafe,1 +np.float64,0x3fe728bd3d6e517a,0xbfdddafa72bd0f60,1 +np.float64,0x3fe3f005dd27e00c,0xbfe5d7b3ec93bca0,1 +np.float64,0x3fd74fbd1a2e9f7a,0xbff750001b63ce81,1 +np.float64,0x3fd3af6d85a75edb,0xbffb371d678d11b4,1 +np.float64,0x7fa690ad8c2d215a,0x408fdbf7db9c7640,1 +np.float64,0x3fbdfd38e23bfa72,0xc008bfc1c5c9b89e,1 +np.float64,0x3fe2374684a46e8d,0xbfea030c4595dfba,1 +np.float64,0x7fc0806c372100d7,0x408fe85b36fee334,1 +np.float64,0x3fef3ac47b7e7589,0xbfa2007195c5213f,1 +np.float64,0x3fb55473922aa8e7,0xc00cae7af8230e0c,1 +np.float64,0x7fe018dc152031b7,0x408ff811e0d712fa,1 +np.float64,0x3fe3b3fca56767f9,0xbfe6638ae2c99c62,1 +np.float64,0x7fac79818c38f302,0x408fdea720b39c3c,1 +np.float64,0x7fefffffffffffff,0x4090000000000000,1 +np.float64,0xd2b290cba5652,0xc08ff23f6d7152a6,1 +np.float64,0x7fc5848eb52b091c,0x408feb6b6f8b77d0,1 +np.float64,0xf399f62de733f,0xc08ff092ae319ad8,1 +np.float64,0x7fdec56c12bd8ad7,0x408ff78c4ddbc667,1 +np.float64,0x3fca640f1e34c81e,0xc0023969c5cbfa4c,1 +np.float64,0x3fd55225db2aa44c,0xbff95f7442a2189e,1 +np.float64,0x7fefa009a97f4012,0x408fffdd2f42ef9f,1 +np.float64,0x4a3b70609478,0xc0900f24e449bc3d,1 +np.float64,0x7fe3738b1ba6e715,0x408ffa411f2cb5e7,1 +np.float64,0x7fe5e53f0b6bca7d,0x408ffb9ed8d95cea,1 +np.float64,0x3fe274dd24a4e9ba,0xbfe967fb114b2a83,1 +np.float64,0x3fcbc58b8c378b17,0xc001a2bb1e158bcc,1 +np.float64,0x3fefc2c0043f8580,0xbf862c9b464dcf38,1 +np.float64,0xc2c4fafd858a0,0xc08ff327aecc409b,1 +np.float64,0x3fd8bc39a9b17873,0xbff5f1ad46e5a51c,1 +np.float64,0x3fdf341656be682d,0xbff094f41e7cb4c4,1 +np.float64,0x3fef8495c13f092c,0xbf966cf6313bae4c,1 +np.float64,0x3fe14e0f05229c1e,0xbfec6166f26b7161,1 +np.float64,0x3fed42d3b2ba85a7,0xbfc0860b773d35d8,1 +np.float64,0x7fd92bbac5b25775,0x408ff53abcb3fe0c,1 +np.float64,0xb1635b6f62c6c,0xc08ff43bdf47accf,1 +np.float64,0x4a3a2dbc94746,0xc08ffe49fabddb36,1 +np.float64,0x87d831290fb06,0xc08ff750419dc6fb,1 +np.float64,0x3fec4713f7f88e28,0xbfc6d6217c9f5cf9,1 +np.float64,0x7fed43ba2d3a8773,0x408ffef7fa2fc303,1 +np.float64,0x7fd1ec5b56a3d8b6,0x408ff14f62615f1e,1 +np.float64,0x3fee534b6c7ca697,0xbfb3da1951aa3e68,1 +np.float64,0x3febb564c2b76aca,0xbfca9737062e55e7,1 +np.float64,0x943e6b0f287ce,0xc08ff64e2d09335c,1 +np.float64,0xf177d957e2efb,0xc08ff0acab2999fa,1 +np.float64,0x7fb5b881a82b7102,0x408fe3872b4fde5e,1 +np.float64,0x3fdb2b4a97b65695,0xbff3c715c91359bc,1 +np.float64,0x3fac0a17e4381430,0xc010c330967309fb,1 +np.float64,0x7fd8057990b00af2,0x408ff4b0a287a348,1 +np.float64,0x1f9026a23f206,0xc09004144f3a19dd,1 +np.float64,0x3fdb2977243652ee,0xbff3c8a2fd05803d,1 +np.float64,0x3fe0f6e74b21edcf,0xbfed4c3bb956bae0,1 +np.float64,0xde9cc3bbbd399,0xc08ff19ce5c1e762,1 +np.float64,0x3fe72ce106ae59c2,0xbfddca7ab14ceba2,1 +np.float64,0x3fa8ee14e031dc2a,0xc01170d54ca88e86,1 +np.float64,0x3fe0b09bbb216137,0xbfee0d189a95b877,1 +np.float64,0x7fdfdcb157bfb962,0x408ff7f33cf2afea,1 +np.float64,0x3fef84d5f53f09ac,0xbf966134e2a154f4,1 +np.float64,0x3fea0e0b1bb41c16,0xbfd2fa2d36637d19,1 +np.float64,0x1ab76fd6356ef,0xc090050a9616ffbd,1 +np.float64,0x7fd0ccf79a2199ee,0x408ff09045af2dee,1 +np.float64,0x7fea929345f52526,0x408ffddadc322b07,1 +np.float64,0x3fe9ef629cf3dec5,0xbfd367129c166838,1 +np.float64,0x3feedf0ea2fdbe1d,0xbfaa862afca44c00,1 +np.float64,0x7fce725f723ce4be,0x408fef6cfd2769a8,1 +np.float64,0x7fe4313b3ca86275,0x408ffaaf9557ef8c,1 +np.float64,0xe2d46463c5a8d,0xc08ff165725c6b08,1 +np.float64,0x7fbacb4ace359695,0x408fe5f3647bd0d5,1 +np.float64,0x3fbafd009635fa01,0xc009f745a7a5c5d5,1 +np.float64,0x3fe3cea66ce79d4d,0xbfe6253b895e2838,1 +np.float64,0x7feaa71484354e28,0x408ffde3c0bad2a6,1 +np.float64,0x3fd755b8b42eab71,0xbff74a1444c6e654,1 +np.float64,0x3fc313e2172627c4,0xc005f830e77940c3,1 +np.float64,0x12d699a225ad4,0xc090070ec00f2338,1 +np.float64,0x3fa975fe8432ebfd,0xc01151b3da48b3f9,1 +np.float64,0x7fdce3103b39c61f,0x408ff6d19b3326fa,1 +np.float64,0x7fd341cbba268396,0x408ff2237490fdca,1 +np.float64,0x3fd8405885b080b1,0xbff6666d8802a7d5,1 +np.float64,0x3fe0f0cca3a1e199,0xbfed5cdb3e600791,1 +np.float64,0x7fbd56680c3aaccf,0x408fe6ff55bf378d,1 +np.float64,0x3f939c4f3027389e,0xc016d364dd6313fb,1 +np.float64,0x3fe9e87fac73d0ff,0xbfd37f9a2be4fe38,1 +np.float64,0x7fc93c6a883278d4,0x408fed4260e614f1,1 +np.float64,0x7fa88c0ff031181f,0x408fdcf09a46bd3a,1 +np.float64,0xd5487f99aa910,0xc08ff21b6390ab3b,1 +np.float64,0x3fe34acc96e69599,0xbfe75c9d290428fb,1 +np.float64,0x3fd17f5964a2feb3,0xbffdef50b524137b,1 +np.float64,0xe23dec0dc47be,0xc08ff16d1ce61dcb,1 +np.float64,0x3fec8bd64fb917ad,0xbfc5173941614b8f,1 +np.float64,0x3fc81d97d7303b30,0xc00343ccb791401d,1 +np.float64,0x7fe79ad18e2f35a2,0x408ffc7cf0ab0f2a,1 +np.float64,0x3f96306b402c60d7,0xc0161ce54754cac1,1 +np.float64,0xfb09fc97f6140,0xc08ff039d1d30123,1 +np.float64,0x3fec9c4afa793896,0xbfc4ace43ee46079,1 +np.float64,0x3f9262dac824c5b6,0xc01732a3a7eeb598,1 +np.float64,0x3fa5cd33f42b9a68,0xc01236ed4d315a3a,1 +np.float64,0x3fe7bb336caf7667,0xbfdb9a268a82e267,1 +np.float64,0xc6c338f98d867,0xc08ff2ebb8475bbc,1 +np.float64,0x3fd50714482a0e29,0xbff9b14a9f84f2c2,1 +np.float64,0xfff0000000000000,0x7ff8000000000000,1 +np.float64,0x3fde2cd0f93c59a2,0xbff15afe35a43a37,1 +np.float64,0xf1719cb9e2e34,0xc08ff0acf77b06d3,1 +np.float64,0xfd3caaf9fa796,0xc08ff020101771bd,1 +np.float64,0x7f750d63a02a1ac6,0x408fc32ad0caa362,1 +np.float64,0x7fcc50f4e238a1e9,0x408fee96a5622f1a,1 +np.float64,0x421d1da0843a4,0xc08fff9ffe62d869,1 +np.float64,0x3fd9e17023b3c2e0,0xbff4e631d687ee8e,1 +np.float64,0x3fe4999a09693334,0xbfe4556b3734c215,1 +np.float64,0xd619ef03ac33e,0xc08ff21013c85529,1 +np.float64,0x3fc4da522229b4a4,0xc004f150b2c573aa,1 +np.float64,0x3feb04b053b60961,0xbfcf3fc9e00ebc40,1 +np.float64,0x3fbedec5ea3dbd8c,0xc0086a33dc22fab5,1 +np.float64,0x7fec3b217ab87642,0x408ffe8dbc8ca041,1 +np.float64,0xdb257d33b64b0,0xc08ff1cb42d3c182,1 +np.float64,0x7fa2d92ec025b25d,0x408fd9e414d11cb0,1 +np.float64,0x3fa425c550284b8b,0xc012ab7cbf83be12,1 +np.float64,0x10b4869021692,0xc09007c0487d648a,1 +np.float64,0x7f97918c902f2318,0x408fd47867806574,1 +np.float64,0x3fe4f91238e9f224,0xbfe38160b4e99919,1 +np.float64,0x3fc2b1af6125635f,0xc00634343bc58461,1 +np.float64,0x3fc2a98071255301,0xc0063942bc8301be,1 +np.float64,0x3fe4cfc585299f8b,0xbfe3dca39f114f34,1 +np.float64,0x3fd1ea75b3a3d4eb,0xbffd63acd02c5406,1 +np.float64,0x3fd6bf48492d7e91,0xbff7e0cd249f80f9,1 +np.float64,0x76643d36ecc88,0xc08ff8e68f13b38c,1 +np.float64,0x7feeabab3e7d5755,0x408fff82a0fd4501,1 +np.float64,0x46c0d4a68d81b,0xc08ffed79abaddc9,1 +np.float64,0x3fd088d57ca111ab,0xbfff3dd0ed7128ea,1 +np.float64,0x3fed25887cba4b11,0xbfc13f47639bd645,1 +np.float64,0x7fd90984b4b21308,0x408ff52b022c7fb4,1 +np.float64,0x3fe6ef31daadde64,0xbfdec185760cbf21,1 +np.float64,0x3fe48dbe83291b7d,0xbfe47005b99920bd,1 +np.float64,0x3fdce8422f39d084,0xbff258a33a96cc8e,1 +np.float64,0xb8ecdef771d9c,0xc08ff3c0eca61b10,1 +np.float64,0x3fe9bbf9a03377f3,0xbfd41ecfdcc336b9,1 +np.float64,0x7fe2565339a4aca5,0x408ff992d8851eaf,1 +np.float64,0x3fe1693e3822d27c,0xbfec1919da2ca697,1 +np.float64,0x3fd3680488a6d009,0xbffb8b7330275947,1 +np.float64,0x7fbe4f3d2c3c9e79,0x408fe75fa3f4e600,1 +np.float64,0x7fd4cfef3ca99fdd,0x408ff308ee3ab50f,1 +np.float64,0x3fd9c9a51cb3934a,0xbff4fb7440055ce6,1 +np.float64,0x3fe08a9640a1152d,0xbfee76bd1bfbf5c2,1 +np.float64,0x3fef012c41fe0259,0xbfa757a2da7f9707,1 +np.float64,0x3fee653fe2fcca80,0xbfb2ffae0c95025c,1 +np.float64,0x7fd0776933a0eed1,0x408ff054e7b43d41,1 +np.float64,0x4c94e5c09929d,0xc08ffdedb7f49e5e,1 +np.float64,0xca3e3d17947c8,0xc08ff2b86dce2f7a,1 +np.float64,0x3fb528e1342a51c2,0xc00cc626c8e2d9ba,1 +np.float64,0xd774df81aee9c,0xc08ff1fd6f0a7548,1 +np.float64,0x3fc47a9b6128f537,0xc00526c577b80849,1 +np.float64,0x3fe29a6f6a6534df,0xbfe90a5f83644911,1 +np.float64,0x3fecda4f59f9b49f,0xbfc31e4a80c4cbb6,1 +np.float64,0x7fe51d44f5aa3a89,0x408ffb3382437426,1 +np.float64,0x3fd677fc412ceff9,0xbff82999086977e7,1 +np.float64,0x3fe2a3c7e7254790,0xbfe8f33415cdba9d,1 +np.float64,0x3fe6d8d1dc6db1a4,0xbfdf1bc61bc24dff,1 +np.float64,0x7febb32d8ef7665a,0x408ffe55a043ded1,1 +np.float64,0x60677860c0d0,0xc0900da2caa7d571,1 +np.float64,0x7390c2e0e7219,0xc08ff92df18bb5d2,1 +np.float64,0x3fca53711b34a6e2,0xc00240b07a9b529b,1 +np.float64,0x7fe7ce6dd8ef9cdb,0x408ffc961164ead9,1 +np.float64,0x7fc0c9de0d2193bb,0x408fe88e245767f6,1 +np.float64,0xc0ee217981dc4,0xc08ff343b77ea770,1 +np.float64,0x72bd4668e57a9,0xc08ff94323fd74fc,1 +np.float64,0x7fd6970e252d2e1b,0x408ff3fb1e2fead2,1 +np.float64,0x7fdcb61040396c20,0x408ff6bf926bc98f,1 +np.float64,0xda4faa25b49f6,0xc08ff1d68b3877f0,1 +np.float64,0x3feb344749f6688f,0xbfcdfba2d66c72c5,1 +np.float64,0x3fe2aa4284e55485,0xbfe8e32ae0683f57,1 +np.float64,0x3f8e8fcfd03d1fa0,0xc01843efb2129908,1 +np.float64,0x8000000000000000,0xfff0000000000000,1 +np.float64,0x3fd8e01155b1c023,0xbff5d0529dae9515,1 +np.float64,0x3fe8033f3370067e,0xbfda837c80b87e7c,1 +np.float64,0x7fc5bf831e2b7f05,0x408feb8ae3b039a0,1 +np.float64,0x3fd8dcdf5331b9bf,0xbff5d349e1ed422a,1 +np.float64,0x3fe58b4e302b169c,0xbfe243c9cbccde44,1 +np.float64,0x3fea8a2e47b5145d,0xbfd1464e37221894,1 +np.float64,0x75cd1e88eb9a4,0xc08ff8f553ef0475,1 +np.float64,0x7fcfc876e23f90ed,0x408fefebe6cc95e6,1 +np.float64,0x7f51aceb002359d5,0x408fb1263f9003fb,1 +np.float64,0x7fc2a1b877254370,0x408fe9c1ec52f8b9,1 +np.float64,0x7fd495810e292b01,0x408ff2e859414d31,1 +np.float64,0x7fd72048632e4090,0x408ff440690cebdb,1 +np.float64,0x7fd7aafaffaf6,0xc08ff803a390779f,1 +np.float64,0x7fe18067d4a300cf,0x408ff9090a02693f,1 +np.float64,0x3fdc1080f8b82102,0xbff3077bf44a89bd,1 +np.float64,0x3fc34a462f26948c,0xc005d777b3cdf139,1 +np.float64,0x3fe21e4a1fe43c94,0xbfea428acfbc6ea9,1 +np.float64,0x1f0d79083e1b0,0xc090042c65a7abf2,1 +np.float64,0x3fe8d0d15931a1a3,0xbfd779f6bbd4db78,1 +np.float64,0x3fe74578022e8af0,0xbfdd68b6c15e9f5e,1 +np.float64,0x50995dd0a132c,0xc08ffd56a5c8accf,1 +np.float64,0x3f9a6342b034c685,0xc0151ce1973c62bd,1 +np.float64,0x3f30856a00210ad4,0xc027e852f4d1fcbc,1 +np.float64,0x3febcf7646b79eed,0xbfc9e9cc9d12425c,1 +np.float64,0x8010000000000000,0x7ff8000000000000,1 +np.float64,0x3fdf520c02bea418,0xbff07ed5013f3062,1 +np.float64,0x3fe5433ecbea867e,0xbfe2df38968b6d14,1 +np.float64,0x3fb933a84e326751,0xc00ac1a144ad26c5,1 +np.float64,0x7b6d72c2f6daf,0xc08ff86b7a67f962,1 +np.float64,0xaef5dae75debc,0xc08ff46496bb2932,1 +np.float64,0x522d869aa45b1,0xc08ffd1d55281e98,1 +np.float64,0xa2462b05448c6,0xc08ff542fe0ac5fd,1 +np.float64,0x3fe2b71dd6e56e3c,0xbfe8c3690cf15415,1 +np.float64,0x3fe5778231aaef04,0xbfe26e495d09b783,1 +np.float64,0x3fe9b8d564f371ab,0xbfd42a161132970d,1 +np.float64,0x3f89ebc34033d787,0xc019373f90bfc7f1,1 +np.float64,0x3fe438ddc6e871bc,0xbfe53039341b0a93,1 +np.float64,0x873c75250e78f,0xc08ff75d8478dccd,1 +np.float64,0x807134cb00e27,0xc08ff7f5cf59c57a,1 +np.float64,0x3fac459878388b31,0xc010b6fe803bcdc2,1 +np.float64,0xca9dc7eb953b9,0xc08ff2b2fb480784,1 +np.float64,0x7feb38587bb670b0,0x408ffe21ff6d521e,1 +np.float64,0x7fd70e9b782e1d36,0x408ff437936b393a,1 +np.float64,0x3fa4037bbc2806f7,0xc012b55744c65ab2,1 +np.float64,0x3fd3d4637427a8c7,0xbffb0beebf4311ef,1 +np.float64,0x7fdabbda5db577b4,0x408ff5ecbc0d4428,1 +np.float64,0x7fda9be0a2b537c0,0x408ff5dee5d03d5a,1 +np.float64,0x7fe9c74396338e86,0x408ffd813506a18a,1 +np.float64,0x3fd058243e20b048,0xbfff822ffd8a7f21,1 +np.float64,0x3fe6aa6ca9ed54d9,0xbfdfd805629ff49e,1 +np.float64,0x3fd91431d5322864,0xbff5a025eea8c78b,1 +np.float64,0x7fe4d7f02329afdf,0x408ffb0d5d9b7878,1 +np.float64,0x3fe2954a12252a94,0xbfe917266e3e22d5,1 +np.float64,0x3fb25f7c8224bef9,0xc00e6764c81b3718,1 +np.float64,0x3fda4bddeeb497bc,0xbff4880638908c81,1 +np.float64,0x55dfd12eabbfb,0xc08ffc9b54ff4002,1 +np.float64,0x3fe8f399e031e734,0xbfd6f8e5c4dcd93f,1 +np.float64,0x3fd954a24832a945,0xbff56521f4707a06,1 +np.float64,0x3fdea911f2bd5224,0xbff0fcb2d0c2b2e2,1 +np.float64,0x3fe6b4ff8a2d69ff,0xbfdfacfc85cafeab,1 +np.float64,0x3fc7fa02042ff404,0xc00354e13b0767ad,1 +np.float64,0x3fe955088c72aa11,0xbfd593130f29949e,1 +np.float64,0xd7e74ec1afcea,0xc08ff1f74f61721c,1 +np.float64,0x3fe9d69c1ab3ad38,0xbfd3bf710a337e06,1 +np.float64,0x3fd85669a2b0acd3,0xbff65176143ccc1e,1 +np.float64,0x3fea99b285353365,0xbfd11062744783f2,1 +np.float64,0x3fe2c79f80a58f3f,0xbfe89ac33f990289,1 +np.float64,0x3f8332ba30266574,0xc01af2cb7b635783,1 +np.float64,0x30d0150061a1,0xc090119030f74c5d,1 +np.float64,0x3fdbf4cb06b7e996,0xbff31e5207aaa754,1 +np.float64,0x3fe6b56c216d6ad8,0xbfdfab42fb2941c5,1 +np.float64,0x7fc4dc239829b846,0x408feb0fb0e13fbe,1 +np.float64,0x3fd0ab85ef21570c,0xbfff0d95d6c7a35c,1 +np.float64,0x7fe13d75e5e27aeb,0x408ff8dc8efa476b,1 +np.float64,0x3fece3b832f9c770,0xbfc2e21b165d583f,1 +np.float64,0x3fe3a279c4e744f4,0xbfe68ca4fbb55dbf,1 +np.float64,0x3feb64659ef6c8cb,0xbfccb6204b6bf724,1 +np.float64,0x2279a6bc44f36,0xc0900391eeeb3e7c,1 +np.float64,0xb88046d571009,0xc08ff3c7b5b45300,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0x3fe49af059a935e1,0xbfe4526c294f248f,1 +np.float64,0xa3e5508147cc,0xc0900a92ce5924b1,1 +np.float64,0x7fc56def3d2adbdd,0x408feb5f46c360e8,1 +np.float64,0x7fd99f3574333e6a,0x408ff56f3807987c,1 +np.float64,0x3fdc38d56fb871ab,0xbff2e667cad8f36a,1 +np.float64,0xd0b03507a1607,0xc08ff25bbcf8aa9d,1 +np.float64,0xc493f9078927f,0xc08ff30c5fa4e759,1 +np.float64,0x3fc86ddbcb30dbb8,0xc0031da1fcb56d75,1 +np.float64,0x7fe75dc395aebb86,0x408ffc5eef841491,1 +np.float64,0x1647618a2c8ed,0xc0900616ef9479c1,1 +np.float64,0xdf144763be289,0xc08ff196b527f3c9,1 +np.float64,0x3fe0b29da6a1653b,0xbfee078b5f4d7744,1 +np.float64,0x3feb055852b60ab1,0xbfcf3b4db5779a7a,1 +np.float64,0x3fe8bc1625f1782c,0xbfd7c739ade904bc,1 +np.float64,0x7fd19bfb8ea337f6,0x408ff11b2b55699c,1 +np.float64,0x3fed1d80d1ba3b02,0xbfc1722e8d3ce094,1 +np.float64,0x2d9c65925b38e,0xc09001f46bcd3bc5,1 +np.float64,0x7fed6f4d857ade9a,0x408fff091cf6a3b4,1 +np.float64,0x3fd070cd6ba0e19b,0xbfff5f7609ca29e8,1 +np.float64,0x7fea3508b8f46a10,0x408ffdb1f30bd6be,1 +np.float64,0x508b897ca1172,0xc08ffd58a0eb3583,1 +np.float64,0x7feba367b07746ce,0x408ffe4f0bf4bd4e,1 +np.float64,0x3fefebd5c4bfd7ac,0xbf6d20b4fcf21b69,1 +np.float64,0x3fd8ef07b8b1de0f,0xbff5c2745c0795a5,1 +np.float64,0x3fd38ed518271daa,0xbffb5d75f00f6900,1 +np.float64,0x6de0fecedbc20,0xc08ff9c307bbc647,1 +np.float64,0xafc0ffc35f820,0xc08ff45737e5d6b4,1 +np.float64,0x7fd282097ca50412,0x408ff1ae3b27bf3b,1 +np.float64,0x3fe2f2d50b65e5aa,0xbfe831042e6a1e99,1 +np.float64,0x3faa437bac3486f7,0xc01123d8d962205a,1 +np.float64,0x3feea54434fd4a88,0xbfaff202cc456647,1 +np.float64,0x3fc9e65b8633ccb7,0xc00270e77ffd19da,1 +np.float64,0x7fee15af61fc2b5e,0x408fff49a49154a3,1 +np.float64,0x7fefe670a73fcce0,0x408ffff6c44c1005,1 +np.float64,0x3fc0832d0f21065a,0xc007a2dc2f25384a,1 +np.float64,0x3fecfc96bcb9f92d,0xbfc24367c3912620,1 +np.float64,0x3feb705682b6e0ad,0xbfcc65b1bb16f9c5,1 +np.float64,0x3fe185c4f9630b8a,0xbfebcdb401af67a4,1 +np.float64,0x3fb0a5a9f6214b54,0xc00f8ada2566a047,1 +np.float64,0x7fe2908cdda52119,0x408ff9b744861fb1,1 +np.float64,0x7fee776e183ceedb,0x408fff6ee7c2f86e,1 +np.float64,0x3fce1d608f3c3ac1,0xc000b3685d006474,1 +np.float64,0x7fecf92aa339f254,0x408ffeda6c998267,1 +np.float64,0xce13cb519c27a,0xc08ff280f02882a9,1 +np.float64,0x1,0xc090c80000000000,1 +np.float64,0x3fe485a8afa90b51,0xbfe4823265d5a50a,1 +np.float64,0x3feea60908bd4c12,0xbfafdf7ad7fe203f,1 +np.float64,0x3fd2253033a44a60,0xbffd187d0ec8d5b9,1 +np.float64,0x435338fc86a68,0xc08fff6a591059dd,1 +np.float64,0x7fce8763a73d0ec6,0x408fef74f1e715ff,1 +np.float64,0x3fbe5ddb783cbbb7,0xc0089acc5afa794b,1 +np.float64,0x7fe4cf19ada99e32,0x408ffb0877ca302b,1 +np.float64,0x3fe94c9ea1b2993d,0xbfd5b1c2e867b911,1 +np.float64,0x3fe75541c72eaa84,0xbfdd2a27aa117699,1 +np.float64,0x8000000000000001,0x7ff8000000000000,1 +np.float64,0x7fdbec7f2c37d8fd,0x408ff66d69a7f818,1 +np.float64,0x8ef10d091de22,0xc08ff6b9ca5094f8,1 +np.float64,0x3fea69025b74d205,0xbfd1b9fe2c252c70,1 +np.float64,0x562376d0ac46f,0xc08ffc924111cd31,1 +np.float64,0x8e8097ab1d013,0xc08ff6c2e2706f67,1 +np.float64,0x3fca6803ed34d008,0xc00237aef808825b,1 +np.float64,0x7fe8fe9067b1fd20,0x408ffd25f459a7d1,1 +np.float64,0x3f918e8c7f233,0xc0900009fe011d54,1 +np.float64,0x3fdfe773833fcee7,0xbff011bc1af87bb9,1 +np.float64,0xefffef6fdfffe,0xc08ff0beb0f09eb0,1 +np.float64,0x7fe64610282c8c1f,0x408ffbd17209db18,1 +np.float64,0xe66be8c1ccd7d,0xc08ff13706c056e1,1 +np.float64,0x2837e570506fd,0xc09002ae4dae0c1a,1 +np.float64,0x3febe3a081f7c741,0xbfc964171f2a5a47,1 +np.float64,0x3fe21ed09a243da1,0xbfea41342d29c3ff,1 +np.float64,0x3fe1596c8162b2d9,0xbfec431eee30823a,1 +np.float64,0x8f2b9a131e574,0xc08ff6b51104ed4e,1 +np.float64,0x3fe88ed179711da3,0xbfd870d08a4a4b0c,1 +np.float64,0x34159bc2682b4,0xc09001305a885f94,1 +np.float64,0x1ed31e543da65,0xc0900437481577f8,1 +np.float64,0x3feafbe9de75f7d4,0xbfcf7bcdbacf1c61,1 +np.float64,0xfb16fb27f62e0,0xc08ff03938e682a2,1 +np.float64,0x3fe5cd5ba7eb9ab7,0xbfe1b7165771af3c,1 +np.float64,0x7fe72905e76e520b,0x408ffc44c4e7e80c,1 +np.float64,0x7fb7136e2e2e26db,0x408fe439fd383fb7,1 +np.float64,0x8fa585e11f4c,0xc0900b55a08a486b,1 +np.float64,0x7fed985ce47b30b9,0x408fff192b596821,1 +np.float64,0x3feaaf0869755e11,0xbfd0c671571b3764,1 +np.float64,0x3fa40fd4ec281faa,0xc012b1c8dc0b9e5f,1 +np.float64,0x7fda2a70993454e0,0x408ff5ad47b0c68a,1 +np.float64,0x3fe5f7e931abefd2,0xbfe15d52b3605abf,1 +np.float64,0x3fe9fc6d3533f8da,0xbfd338b06a790994,1 +np.float64,0x3fe060649420c0c9,0xbfeeed1756111891,1 +np.float64,0x3fce8435e33d086c,0xc0008c41cea9ed40,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x617820aec2f05,0xc08ffb251e9af0f0,1 +np.float64,0x7fcc4ab6ee38956d,0x408fee9419c8f77d,1 +np.float64,0x7fdefda2fc3dfb45,0x408ff7a15063bc05,1 +np.float64,0x7fe5138ccaaa2719,0x408ffb2e30f3a46e,1 +np.float64,0x3fe3817a836702f5,0xbfe6da7c2b25e35a,1 +np.float64,0x3fb8a7dafa314fb6,0xc00b025bc0784ebe,1 +np.float64,0x349dc420693d,0xc09011215825d2c8,1 +np.float64,0x6b0e504ad61cb,0xc08ffa0fee9c5cd6,1 +np.float64,0x273987644e732,0xc09002d34294ed79,1 +np.float64,0x3fc0bd8a6e217b15,0xc0077a5828b4d2f5,1 +np.float64,0x758b48c4eb16a,0xc08ff8fbc8fbe46a,1 +np.float64,0x3fc8a9a52631534a,0xc00301854ec0ef81,1 +np.float64,0x7fe79d29a76f3a52,0x408ffc7e1607a4c1,1 +np.float64,0x3fd7d3ebce2fa7d8,0xbff6ce8a94aebcda,1 +np.float64,0x7fd1cb68a52396d0,0x408ff13a17533b2b,1 +np.float64,0x7fda514a5d34a294,0x408ff5be5e081578,1 +np.float64,0x3fc40b4382281687,0xc0056632c8067228,1 +np.float64,0x7feff1208c3fe240,0x408ffffaa180fa0d,1 +np.float64,0x8f58739f1eb0f,0xc08ff6b17402689d,1 +np.float64,0x1fdbe9a23fb7e,0xc090040685b2d24f,1 +np.float64,0xcb1d0e87963a2,0xc08ff2abbd903b82,1 +np.float64,0x3fc45a6a1a28b4d4,0xc00538f86c4aeaee,1 +np.float64,0x3fe61885b1ac310b,0xbfe118fd2251d2ec,1 +np.float64,0x3fedf584c8fbeb0a,0xbfb8572433ff67a9,1 +np.float64,0x7fb0bddd1a217bb9,0x408fe085e0d621db,1 +np.float64,0x72d8d3e0e5b3,0xc0900ca02f68c7a1,1 +np.float64,0x5cca6ff6b994f,0xc08ffbb6751fda01,1 +np.float64,0x7fe3197839a632ef,0x408ffa0b2fccfb68,1 +np.float64,0x3fcce4d9c139c9b4,0xc0012dae05baa91b,1 +np.float64,0x3fe76d00f62eda02,0xbfdccc5f12799be1,1 +np.float64,0x3fc53c22f72a7846,0xc004bbaa9cbc7958,1 +np.float64,0x7fdda02f1ebb405d,0x408ff71c37c71659,1 +np.float64,0x3fe0844eaba1089d,0xbfee884722762583,1 +np.float64,0x3febb438dc776872,0xbfca9f05e1c691f1,1 +np.float64,0x3fdf4170cdbe82e2,0xbff08b1561c8d848,1 +np.float64,0x3fce1b8d6f3c371b,0xc000b41b69507671,1 +np.float64,0x8370e60706e1d,0xc08ff7b19ea0b4ca,1 +np.float64,0x7fa5bf92382b7f23,0x408fdb8aebb3df87,1 +np.float64,0x7fe4a59979a94b32,0x408ffaf15c1358cd,1 +np.float64,0x3faa66086034cc11,0xc0111c466b7835d6,1 +np.float64,0x7fb7a958262f52af,0x408fe48408b1e093,1 +np.float64,0x3fdaacc5f635598c,0xbff43390d06b5614,1 +np.float64,0x3fd2825b9e2504b7,0xbffca3234264f109,1 +np.float64,0x3fcede160a3dbc2c,0xc0006a759e29060c,1 +np.float64,0x7fd3b19603a7632b,0x408ff265b528371c,1 +np.float64,0x7fcf8a86ea3f150d,0x408fefd552e7f3b2,1 +np.float64,0xedbcc0f7db798,0xc08ff0daad12096b,1 +np.float64,0xf1e1683de3c2d,0xc08ff0a7a0a37e00,1 +np.float64,0xb6ebd9bf6dd7b,0xc08ff3e11e28378d,1 +np.float64,0x3fec8090d6f90122,0xbfc56031b72194cc,1 +np.float64,0x3fd3e10e37a7c21c,0xbffafd34a3ebc933,1 +np.float64,0x7fbb1c96aa36392c,0x408fe616347b3342,1 +np.float64,0x3fe2f3996f25e733,0xbfe82f25bc5d1bbd,1 +np.float64,0x7fe8709da870e13a,0x408ffce3ab6ce59a,1 +np.float64,0x7fea3233d1b46467,0x408ffdb0b3bbc6de,1 +np.float64,0x65fa4112cbf49,0xc08ffa9f85eb72b9,1 +np.float64,0x3fca2cae9f34595d,0xc00251bb275afb87,1 +np.float64,0x8135fd9f026c0,0xc08ff7e42e14dce7,1 +np.float64,0x7fe0a6f057e14de0,0x408ff876081a4bfe,1 +np.float64,0x10000000000000,0xc08ff00000000000,1 +np.float64,0x3fe1fd506263faa1,0xbfea96dd8c543b72,1 +np.float64,0xa5532c554aa66,0xc08ff50bf5bfc66d,1 +np.float64,0xc239d00b8473a,0xc08ff32ff0ea3f92,1 +np.float64,0x7fdb5314e336a629,0x408ff62d4ff60d82,1 +np.float64,0x3fe5f506e2abea0e,0xbfe16362a4682120,1 +np.float64,0x3fa20c60202418c0,0xc0134e08d82608b6,1 +np.float64,0x7fe03864b22070c8,0x408ff82866d65e9a,1 +np.float64,0x3fe72cf5656e59eb,0xbfddca298969effa,1 +np.float64,0x5c295386b852b,0xc08ffbca90b136c9,1 +np.float64,0x7fd71e5020ae3c9f,0x408ff43f6d58eb7c,1 +np.float64,0x3fd1905a842320b5,0xbffdd8ecd288159c,1 +np.float64,0x3fe6bddb256d7bb6,0xbfdf88fee1a820bb,1 +np.float64,0xe061b967c0c37,0xc08ff18581951561,1 +np.float64,0x3fe534f65cea69ed,0xbfe2fe45fe7d3040,1 +np.float64,0xdc7dae07b8fb6,0xc08ff1b93074ea76,1 +np.float64,0x3fd0425082a084a1,0xbfffa11838b21633,1 +np.float64,0xba723fc974e48,0xc08ff3a8b8d01c58,1 +np.float64,0x3fce42ffc73c8600,0xc000a5062678406e,1 +np.float64,0x3f2e6d3c7e5ce,0xc090001304cfd1c7,1 +np.float64,0x3fd4b2e5f7a965cc,0xbffa0e6e6bae0a68,1 +np.float64,0x3fe6db1d18edb63a,0xbfdf128158ee92d9,1 +np.float64,0x7fe4e5792f29caf1,0x408ffb14d9dbf133,1 +np.float64,0x3fc11cdf992239bf,0xc00739569619cd77,1 +np.float64,0x3fc05ea11220bd42,0xc007bc841b48a890,1 +np.float64,0x4bd592d497ab3,0xc08ffe0ab1c962e2,1 +np.float64,0x280068fc5000e,0xc09002b64955e865,1 +np.float64,0x7fe2f2637065e4c6,0x408ff9f379c1253a,1 +np.float64,0x3fefc38467ff8709,0xbf85e53e64b9a424,1 +np.float64,0x2d78ec5a5af1e,0xc09001f8ea8601e0,1 +np.float64,0x7feeef2b957dde56,0x408fff9bebe995f7,1 +np.float64,0x2639baf44c738,0xc09002f9618d623b,1 +np.float64,0x3fc562964d2ac52d,0xc004a6d76959ef78,1 +np.float64,0x3fe21b071fe4360e,0xbfea4adb2cd96ade,1 +np.float64,0x7fe56aa6802ad54c,0x408ffb5d81d1a898,1 +np.float64,0x4296b452852d7,0xc08fff8ad7fbcbe1,1 +np.float64,0x7fe3fac4ff27f589,0x408ffa9049eec479,1 +np.float64,0x7fe7a83e6caf507c,0x408ffc837f436604,1 +np.float64,0x3fc4ac5b872958b7,0xc0050add72381ac3,1 +np.float64,0x3fd6d697c02dad30,0xbff7c931a3eefb01,1 +np.float64,0x3f61e391c023c724,0xc021ad91e754f94b,1 +np.float64,0x10817f9c21031,0xc09007d20434d7bc,1 +np.float64,0x3fdb9c4c4cb73899,0xbff367d8615c5ece,1 +np.float64,0x3fe26ead6b64dd5b,0xbfe977771def5989,1 +np.float64,0x3fc43ea5c3287d4c,0xc00548c2163ae631,1 +np.float64,0x3fe05bd8bba0b7b1,0xbfeef9ea0db91abc,1 +np.float64,0x3feac78369358f07,0xbfd071e2b0aeab39,1 +np.float64,0x7fe254922ca4a923,0x408ff991bdd4e5d3,1 +np.float64,0x3fe5a2f5842b45eb,0xbfe21135c9a71666,1 +np.float64,0x3fd5daf98c2bb5f3,0xbff8cd24f7c07003,1 +np.float64,0x3fcb2a1384365427,0xc001e40f0d04299a,1 +np.float64,0x3fe073974360e72f,0xbfeeb7183a9930b7,1 +np.float64,0xcf3440819e688,0xc08ff270d3a71001,1 +np.float64,0x3fd35656cda6acae,0xbffba083fba4939d,1 +np.float64,0x7fe6c59b4ded8b36,0x408ffc12ce725425,1 +np.float64,0x3fba896f943512df,0xc00a291cb6947701,1 +np.float64,0x7fe54917e86a922f,0x408ffb4b5e0fb848,1 +np.float64,0x7fed2a3f51ba547e,0x408ffeede945a948,1 +np.float64,0x3fdc72bd5038e57b,0xbff2b73b7e93e209,1 +np.float64,0x7fefdb3f9f3fb67e,0x408ffff2b702a768,1 +np.float64,0x3fb0184430203088,0xc00fee8c1351763c,1 +np.float64,0x7d6c3668fad87,0xc08ff83c195f2cca,1 +np.float64,0x3fd5aa254aab544b,0xbff900f16365991b,1 +np.float64,0x3f963daab02c7b55,0xc0161974495b1b71,1 +np.float64,0x3fa7a9c5982f538b,0xc011bde0f6052a89,1 +np.float64,0xb3a5a74b674b5,0xc08ff4167bc97c81,1 +np.float64,0x7fad0c14503a1828,0x408fdee1f2d56cd7,1 +np.float64,0x43e0e9d887c1e,0xc08fff522837b13b,1 +np.float64,0x3fe513b20aea2764,0xbfe346ea994100e6,1 +np.float64,0x7fe4e10393e9c206,0x408ffb12630f6a06,1 +np.float64,0x68b286e2d1651,0xc08ffa51c0d795d4,1 +np.float64,0x7fe8de453331bc89,0x408ffd17012b75ac,1 +np.float64,0x1b3d77d4367b0,0xc09004edea60aa36,1 +np.float64,0x3fd351cbc326a398,0xbffba5f0f4d5fdba,1 +np.float64,0x3fd264951b24c92a,0xbffcc8636788b9bf,1 +np.float64,0xd2465761a48cb,0xc08ff2455c9c53e5,1 +np.float64,0x7fe46a0ef028d41d,0x408ffacfe32c6f5d,1 +np.float64,0x3fafd8ac4c3fb159,0xc010071bf33195d0,1 +np.float64,0x902aec5d2055e,0xc08ff6a08e28aabc,1 +np.float64,0x3fcea61bb03d4c37,0xc0007f76e509b657,1 +np.float64,0x7fe8d90f9571b21e,0x408ffd1495f952e7,1 +np.float64,0x7fa650c9442ca192,0x408fdbd6ff22fdd8,1 +np.float64,0x3fe8ecfdf171d9fc,0xbfd7115df40e8580,1 +np.float64,0x7fd4e6fe7f29cdfc,0x408ff315b0dae183,1 +np.float64,0x77df4c52efbea,0xc08ff8c1d5c1df33,1 +np.float64,0xe200b0cfc4016,0xc08ff1703cfb8e79,1 +np.float64,0x3fe230ea7e2461d5,0xbfea132d2385160e,1 +np.float64,0x7fd1f7ced723ef9d,0x408ff156bfbf92a4,1 +np.float64,0x3fea762818f4ec50,0xbfd18c12a88e5f79,1 +np.float64,0x7feea4ba7c7d4974,0x408fff8004164054,1 +np.float64,0x833ec605067d9,0xc08ff7b606383841,1 +np.float64,0x7fd0c2d7fea185af,0x408ff0894f3a0cf4,1 +np.float64,0x3fe1d7d61d23afac,0xbfeaf76fee875d3e,1 +np.float64,0x65adecb0cb5be,0xc08ffaa82cb09d68,1 diff --git a/python/numpy/_core/tests/data/umath-validation-set-sin.csv b/python/numpy/_core/tests/data/umath-validation-set-sin.csv new file mode 100644 index 000000000..03e76ffc2 --- /dev/null +++ b/python/numpy/_core/tests/data/umath-validation-set-sin.csv @@ -0,0 +1,1370 @@ +dtype,input,output,ulperrortol +## +ve denormals ## +np.float32,0x004b4716,0x004b4716,2 +np.float32,0x007b2490,0x007b2490,2 +np.float32,0x007c99fa,0x007c99fa,2 +np.float32,0x00734a0c,0x00734a0c,2 +np.float32,0x0070de24,0x0070de24,2 +np.float32,0x007fffff,0x007fffff,2 +np.float32,0x00000001,0x00000001,2 +## -ve denormals ## +np.float32,0x80495d65,0x80495d65,2 +np.float32,0x806894f6,0x806894f6,2 +np.float32,0x80555a76,0x80555a76,2 +np.float32,0x804e1fb8,0x804e1fb8,2 +np.float32,0x80687de9,0x80687de9,2 +np.float32,0x807fffff,0x807fffff,2 +np.float32,0x80000001,0x80000001,2 +## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ## +np.float32,0x00000000,0x00000000,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0x00800000,0x00800000,2 +np.float32,0x80800000,0x80800000,2 +## 1.00f ## +np.float32,0x3f800000,0x3f576aa4,2 +np.float32,0x3f800001,0x3f576aa6,2 +np.float32,0x3f800002,0x3f576aa7,2 +np.float32,0xc090a8b0,0x3f7b4e48,2 +np.float32,0x41ce3184,0x3f192d43,2 +np.float32,0xc1d85848,0xbf7161cb,2 +np.float32,0x402b8820,0x3ee3f29f,2 +np.float32,0x42b4e454,0x3f1d0151,2 +np.float32,0x42a67a60,0x3f7ffa4c,2 +np.float32,0x41d92388,0x3f67beef,2 +np.float32,0x422dd66c,0xbeffb0c1,2 +np.float32,0xc28f5be6,0xbf0bae79,2 +np.float32,0x41ab2674,0x3f0ffe2b,2 +np.float32,0x3f490fdb,0x3f3504f3,2 +np.float32,0xbf490fdb,0xbf3504f3,2 +np.float32,0x3fc90fdb,0x3f800000,2 +np.float32,0xbfc90fdb,0xbf800000,2 +np.float32,0x40490fdb,0xb3bbbd2e,2 +np.float32,0xc0490fdb,0x33bbbd2e,2 +np.float32,0x3fc90fdb,0x3f800000,2 +np.float32,0xbfc90fdb,0xbf800000,2 +np.float32,0x40490fdb,0xb3bbbd2e,2 +np.float32,0xc0490fdb,0x33bbbd2e,2 +np.float32,0x40c90fdb,0x343bbd2e,2 +np.float32,0xc0c90fdb,0xb43bbd2e,2 +np.float32,0x4016cbe4,0x3f3504f3,2 +np.float32,0xc016cbe4,0xbf3504f3,2 +np.float32,0x4096cbe4,0xbf800000,2 +np.float32,0xc096cbe4,0x3f800000,2 +np.float32,0x4116cbe4,0xb2ccde2e,2 +np.float32,0xc116cbe4,0x32ccde2e,2 +np.float32,0x40490fdb,0xb3bbbd2e,2 +np.float32,0xc0490fdb,0x33bbbd2e,2 +np.float32,0x40c90fdb,0x343bbd2e,2 +np.float32,0xc0c90fdb,0xb43bbd2e,2 +np.float32,0x41490fdb,0x34bbbd2e,2 +np.float32,0xc1490fdb,0xb4bbbd2e,2 +np.float32,0x407b53d2,0xbf3504f5,2 +np.float32,0xc07b53d2,0x3f3504f5,2 +np.float32,0x40fb53d2,0x3f800000,2 +np.float32,0xc0fb53d2,0xbf800000,2 +np.float32,0x417b53d2,0xb535563d,2 +np.float32,0xc17b53d2,0x3535563d,2 +np.float32,0x4096cbe4,0xbf800000,2 +np.float32,0xc096cbe4,0x3f800000,2 +np.float32,0x4116cbe4,0xb2ccde2e,2 +np.float32,0xc116cbe4,0x32ccde2e,2 +np.float32,0x4196cbe4,0x334cde2e,2 +np.float32,0xc196cbe4,0xb34cde2e,2 +np.float32,0x40afede0,0xbf3504ef,2 +np.float32,0xc0afede0,0x3f3504ef,2 +np.float32,0x412fede0,0xbf800000,2 +np.float32,0xc12fede0,0x3f800000,2 +np.float32,0x41afede0,0xb5b222c4,2 +np.float32,0xc1afede0,0x35b222c4,2 +np.float32,0x40c90fdb,0x343bbd2e,2 +np.float32,0xc0c90fdb,0xb43bbd2e,2 +np.float32,0x41490fdb,0x34bbbd2e,2 +np.float32,0xc1490fdb,0xb4bbbd2e,2 +np.float32,0x41c90fdb,0x353bbd2e,2 +np.float32,0xc1c90fdb,0xb53bbd2e,2 +np.float32,0x40e231d6,0x3f3504f3,2 +np.float32,0xc0e231d6,0xbf3504f3,2 +np.float32,0x416231d6,0x3f800000,2 +np.float32,0xc16231d6,0xbf800000,2 +np.float32,0x41e231d6,0xb399a6a2,2 +np.float32,0xc1e231d6,0x3399a6a2,2 +np.float32,0x40fb53d2,0x3f800000,2 +np.float32,0xc0fb53d2,0xbf800000,2 +np.float32,0x417b53d2,0xb535563d,2 +np.float32,0xc17b53d2,0x3535563d,2 +np.float32,0x41fb53d2,0x35b5563d,2 +np.float32,0xc1fb53d2,0xb5b5563d,2 +np.float32,0x410a3ae7,0x3f3504eb,2 +np.float32,0xc10a3ae7,0xbf3504eb,2 +np.float32,0x418a3ae7,0xbf800000,2 +np.float32,0xc18a3ae7,0x3f800000,2 +np.float32,0x420a3ae7,0xb6308908,2 +np.float32,0xc20a3ae7,0x36308908,2 +np.float32,0x4116cbe4,0xb2ccde2e,2 +np.float32,0xc116cbe4,0x32ccde2e,2 +np.float32,0x4196cbe4,0x334cde2e,2 +np.float32,0xc196cbe4,0xb34cde2e,2 +np.float32,0x4216cbe4,0x33ccde2e,2 +np.float32,0xc216cbe4,0xb3ccde2e,2 +np.float32,0x41235ce2,0xbf3504f7,2 +np.float32,0xc1235ce2,0x3f3504f7,2 +np.float32,0x41a35ce2,0x3f800000,2 +np.float32,0xc1a35ce2,0xbf800000,2 +np.float32,0x42235ce2,0xb5b889b6,2 +np.float32,0xc2235ce2,0x35b889b6,2 +np.float32,0x412fede0,0xbf800000,2 +np.float32,0xc12fede0,0x3f800000,2 +np.float32,0x41afede0,0xb5b222c4,2 +np.float32,0xc1afede0,0x35b222c4,2 +np.float32,0x422fede0,0x363222c4,2 +np.float32,0xc22fede0,0xb63222c4,2 +np.float32,0x413c7edd,0xbf3504f3,2 +np.float32,0xc13c7edd,0x3f3504f3,2 +np.float32,0x41bc7edd,0xbf800000,2 +np.float32,0xc1bc7edd,0x3f800000,2 +np.float32,0x423c7edd,0xb4000add,2 +np.float32,0xc23c7edd,0x34000add,2 +np.float32,0x41490fdb,0x34bbbd2e,2 +np.float32,0xc1490fdb,0xb4bbbd2e,2 +np.float32,0x41c90fdb,0x353bbd2e,2 +np.float32,0xc1c90fdb,0xb53bbd2e,2 +np.float32,0x42490fdb,0x35bbbd2e,2 +np.float32,0xc2490fdb,0xb5bbbd2e,2 +np.float32,0x4155a0d9,0x3f3504fb,2 +np.float32,0xc155a0d9,0xbf3504fb,2 +np.float32,0x41d5a0d9,0x3f800000,2 +np.float32,0xc1d5a0d9,0xbf800000,2 +np.float32,0x4255a0d9,0xb633bc81,2 +np.float32,0xc255a0d9,0x3633bc81,2 +np.float32,0x416231d6,0x3f800000,2 +np.float32,0xc16231d6,0xbf800000,2 +np.float32,0x41e231d6,0xb399a6a2,2 +np.float32,0xc1e231d6,0x3399a6a2,2 +np.float32,0x426231d6,0x3419a6a2,2 +np.float32,0xc26231d6,0xb419a6a2,2 +np.float32,0x416ec2d4,0x3f3504ef,2 +np.float32,0xc16ec2d4,0xbf3504ef,2 +np.float32,0x41eec2d4,0xbf800000,2 +np.float32,0xc1eec2d4,0x3f800000,2 +np.float32,0x426ec2d4,0xb5bef0a7,2 +np.float32,0xc26ec2d4,0x35bef0a7,2 +np.float32,0x417b53d2,0xb535563d,2 +np.float32,0xc17b53d2,0x3535563d,2 +np.float32,0x41fb53d2,0x35b5563d,2 +np.float32,0xc1fb53d2,0xb5b5563d,2 +np.float32,0x427b53d2,0x3635563d,2 +np.float32,0xc27b53d2,0xb635563d,2 +np.float32,0x4183f268,0xbf3504ff,2 +np.float32,0xc183f268,0x3f3504ff,2 +np.float32,0x4203f268,0x3f800000,2 +np.float32,0xc203f268,0xbf800000,2 +np.float32,0x4283f268,0xb6859a13,2 +np.float32,0xc283f268,0x36859a13,2 +np.float32,0x418a3ae7,0xbf800000,2 +np.float32,0xc18a3ae7,0x3f800000,2 +np.float32,0x420a3ae7,0xb6308908,2 +np.float32,0xc20a3ae7,0x36308908,2 +np.float32,0x428a3ae7,0x36b08908,2 +np.float32,0xc28a3ae7,0xb6b08908,2 +np.float32,0x41908365,0xbf3504f6,2 +np.float32,0xc1908365,0x3f3504f6,2 +np.float32,0x42108365,0xbf800000,2 +np.float32,0xc2108365,0x3f800000,2 +np.float32,0x42908365,0x3592200d,2 +np.float32,0xc2908365,0xb592200d,2 +np.float32,0x4196cbe4,0x334cde2e,2 +np.float32,0xc196cbe4,0xb34cde2e,2 +np.float32,0x4216cbe4,0x33ccde2e,2 +np.float32,0xc216cbe4,0xb3ccde2e,2 +np.float32,0x4296cbe4,0x344cde2e,2 +np.float32,0xc296cbe4,0xb44cde2e,2 +np.float32,0x419d1463,0x3f3504f8,2 +np.float32,0xc19d1463,0xbf3504f8,2 +np.float32,0x421d1463,0x3f800000,2 +np.float32,0xc21d1463,0xbf800000,2 +np.float32,0x429d1463,0xb5c55799,2 +np.float32,0xc29d1463,0x35c55799,2 +np.float32,0x41a35ce2,0x3f800000,2 +np.float32,0xc1a35ce2,0xbf800000,2 +np.float32,0x42235ce2,0xb5b889b6,2 +np.float32,0xc2235ce2,0x35b889b6,2 +np.float32,0x42a35ce2,0x363889b6,2 +np.float32,0xc2a35ce2,0xb63889b6,2 +np.float32,0x41a9a561,0x3f3504e7,2 +np.float32,0xc1a9a561,0xbf3504e7,2 +np.float32,0x4229a561,0xbf800000,2 +np.float32,0xc229a561,0x3f800000,2 +np.float32,0x42a9a561,0xb68733d0,2 +np.float32,0xc2a9a561,0x368733d0,2 +np.float32,0x41afede0,0xb5b222c4,2 +np.float32,0xc1afede0,0x35b222c4,2 +np.float32,0x422fede0,0x363222c4,2 +np.float32,0xc22fede0,0xb63222c4,2 +np.float32,0x42afede0,0x36b222c4,2 +np.float32,0xc2afede0,0xb6b222c4,2 +np.float32,0x41b6365e,0xbf3504f0,2 +np.float32,0xc1b6365e,0x3f3504f0,2 +np.float32,0x4236365e,0x3f800000,2 +np.float32,0xc236365e,0xbf800000,2 +np.float32,0x42b6365e,0x358bb91c,2 +np.float32,0xc2b6365e,0xb58bb91c,2 +np.float32,0x41bc7edd,0xbf800000,2 +np.float32,0xc1bc7edd,0x3f800000,2 +np.float32,0x423c7edd,0xb4000add,2 +np.float32,0xc23c7edd,0x34000add,2 +np.float32,0x42bc7edd,0x34800add,2 +np.float32,0xc2bc7edd,0xb4800add,2 +np.float32,0x41c2c75c,0xbf3504ef,2 +np.float32,0xc1c2c75c,0x3f3504ef,2 +np.float32,0x4242c75c,0xbf800000,2 +np.float32,0xc242c75c,0x3f800000,2 +np.float32,0x42c2c75c,0xb5cbbe8a,2 +np.float32,0xc2c2c75c,0x35cbbe8a,2 +np.float32,0x41c90fdb,0x353bbd2e,2 +np.float32,0xc1c90fdb,0xb53bbd2e,2 +np.float32,0x42490fdb,0x35bbbd2e,2 +np.float32,0xc2490fdb,0xb5bbbd2e,2 +np.float32,0x42c90fdb,0x363bbd2e,2 +np.float32,0xc2c90fdb,0xb63bbd2e,2 +np.float32,0x41cf585a,0x3f3504ff,2 +np.float32,0xc1cf585a,0xbf3504ff,2 +np.float32,0x424f585a,0x3f800000,2 +np.float32,0xc24f585a,0xbf800000,2 +np.float32,0x42cf585a,0xb688cd8c,2 +np.float32,0xc2cf585a,0x3688cd8c,2 +np.float32,0x41d5a0d9,0x3f800000,2 +np.float32,0xc1d5a0d9,0xbf800000,2 +np.float32,0x4255a0d9,0xb633bc81,2 +np.float32,0xc255a0d9,0x3633bc81,2 +np.float32,0x42d5a0d9,0x36b3bc81,2 +np.float32,0xc2d5a0d9,0xb6b3bc81,2 +np.float32,0x41dbe958,0x3f3504e0,2 +np.float32,0xc1dbe958,0xbf3504e0,2 +np.float32,0x425be958,0xbf800000,2 +np.float32,0xc25be958,0x3f800000,2 +np.float32,0x42dbe958,0xb6deab75,2 +np.float32,0xc2dbe958,0x36deab75,2 +np.float32,0x41e231d6,0xb399a6a2,2 +np.float32,0xc1e231d6,0x3399a6a2,2 +np.float32,0x426231d6,0x3419a6a2,2 +np.float32,0xc26231d6,0xb419a6a2,2 +np.float32,0x42e231d6,0x3499a6a2,2 +np.float32,0xc2e231d6,0xb499a6a2,2 +np.float32,0x41e87a55,0xbf3504f8,2 +np.float32,0xc1e87a55,0x3f3504f8,2 +np.float32,0x42687a55,0x3f800000,2 +np.float32,0xc2687a55,0xbf800000,2 +np.float32,0x42e87a55,0xb5d2257b,2 +np.float32,0xc2e87a55,0x35d2257b,2 +np.float32,0x41eec2d4,0xbf800000,2 +np.float32,0xc1eec2d4,0x3f800000,2 +np.float32,0x426ec2d4,0xb5bef0a7,2 +np.float32,0xc26ec2d4,0x35bef0a7,2 +np.float32,0x42eec2d4,0x363ef0a7,2 +np.float32,0xc2eec2d4,0xb63ef0a7,2 +np.float32,0x41f50b53,0xbf3504e7,2 +np.float32,0xc1f50b53,0x3f3504e7,2 +np.float32,0x42750b53,0xbf800000,2 +np.float32,0xc2750b53,0x3f800000,2 +np.float32,0x42f50b53,0xb68a6748,2 +np.float32,0xc2f50b53,0x368a6748,2 +np.float32,0x41fb53d2,0x35b5563d,2 +np.float32,0xc1fb53d2,0xb5b5563d,2 +np.float32,0x427b53d2,0x3635563d,2 +np.float32,0xc27b53d2,0xb635563d,2 +np.float32,0x42fb53d2,0x36b5563d,2 +np.float32,0xc2fb53d2,0xb6b5563d,2 +np.float32,0x4200ce28,0x3f3504f0,2 +np.float32,0xc200ce28,0xbf3504f0,2 +np.float32,0x4280ce28,0x3f800000,2 +np.float32,0xc280ce28,0xbf800000,2 +np.float32,0x4300ce28,0x357dd672,2 +np.float32,0xc300ce28,0xb57dd672,2 +np.float32,0x4203f268,0x3f800000,2 +np.float32,0xc203f268,0xbf800000,2 +np.float32,0x4283f268,0xb6859a13,2 +np.float32,0xc283f268,0x36859a13,2 +np.float32,0x4303f268,0x37059a13,2 +np.float32,0xc303f268,0xb7059a13,2 +np.float32,0x420716a7,0x3f3504ee,2 +np.float32,0xc20716a7,0xbf3504ee,2 +np.float32,0x428716a7,0xbf800000,2 +np.float32,0xc28716a7,0x3f800000,2 +np.float32,0x430716a7,0xb5d88c6d,2 +np.float32,0xc30716a7,0x35d88c6d,2 +np.float32,0x420a3ae7,0xb6308908,2 +np.float32,0xc20a3ae7,0x36308908,2 +np.float32,0x428a3ae7,0x36b08908,2 +np.float32,0xc28a3ae7,0xb6b08908,2 +np.float32,0x430a3ae7,0x37308908,2 +np.float32,0xc30a3ae7,0xb7308908,2 +np.float32,0x420d5f26,0xbf350500,2 +np.float32,0xc20d5f26,0x3f350500,2 +np.float32,0x428d5f26,0x3f800000,2 +np.float32,0xc28d5f26,0xbf800000,2 +np.float32,0x430d5f26,0xb68c0105,2 +np.float32,0xc30d5f26,0x368c0105,2 +np.float32,0x42108365,0xbf800000,2 +np.float32,0xc2108365,0x3f800000,2 +np.float32,0x42908365,0x3592200d,2 +np.float32,0xc2908365,0xb592200d,2 +np.float32,0x43108365,0xb612200d,2 +np.float32,0xc3108365,0x3612200d,2 +np.float32,0x4213a7a5,0xbf3504df,2 +np.float32,0xc213a7a5,0x3f3504df,2 +np.float32,0x4293a7a5,0xbf800000,2 +np.float32,0xc293a7a5,0x3f800000,2 +np.float32,0x4313a7a5,0xb6e1deee,2 +np.float32,0xc313a7a5,0x36e1deee,2 +np.float32,0x4216cbe4,0x33ccde2e,2 +np.float32,0xc216cbe4,0xb3ccde2e,2 +np.float32,0x4296cbe4,0x344cde2e,2 +np.float32,0xc296cbe4,0xb44cde2e,2 +np.float32,0x4316cbe4,0x34ccde2e,2 +np.float32,0xc316cbe4,0xb4ccde2e,2 +np.float32,0x4219f024,0x3f35050f,2 +np.float32,0xc219f024,0xbf35050f,2 +np.float32,0x4299f024,0x3f800000,2 +np.float32,0xc299f024,0xbf800000,2 +np.float32,0x4319f024,0xb71bde6c,2 +np.float32,0xc319f024,0x371bde6c,2 +np.float32,0x421d1463,0x3f800000,2 +np.float32,0xc21d1463,0xbf800000,2 +np.float32,0x429d1463,0xb5c55799,2 +np.float32,0xc29d1463,0x35c55799,2 +np.float32,0x431d1463,0x36455799,2 +np.float32,0xc31d1463,0xb6455799,2 +np.float32,0x422038a3,0x3f3504d0,2 +np.float32,0xc22038a3,0xbf3504d0,2 +np.float32,0x42a038a3,0xbf800000,2 +np.float32,0xc2a038a3,0x3f800000,2 +np.float32,0x432038a3,0xb746cd61,2 +np.float32,0xc32038a3,0x3746cd61,2 +np.float32,0x42235ce2,0xb5b889b6,2 +np.float32,0xc2235ce2,0x35b889b6,2 +np.float32,0x42a35ce2,0x363889b6,2 +np.float32,0xc2a35ce2,0xb63889b6,2 +np.float32,0x43235ce2,0x36b889b6,2 +np.float32,0xc3235ce2,0xb6b889b6,2 +np.float32,0x42268121,0xbf3504f1,2 +np.float32,0xc2268121,0x3f3504f1,2 +np.float32,0x42a68121,0x3f800000,2 +np.float32,0xc2a68121,0xbf800000,2 +np.float32,0x43268121,0x35643aac,2 +np.float32,0xc3268121,0xb5643aac,2 +np.float32,0x4229a561,0xbf800000,2 +np.float32,0xc229a561,0x3f800000,2 +np.float32,0x42a9a561,0xb68733d0,2 +np.float32,0xc2a9a561,0x368733d0,2 +np.float32,0x4329a561,0x370733d0,2 +np.float32,0xc329a561,0xb70733d0,2 +np.float32,0x422cc9a0,0xbf3504ee,2 +np.float32,0xc22cc9a0,0x3f3504ee,2 +np.float32,0x42acc9a0,0xbf800000,2 +np.float32,0xc2acc9a0,0x3f800000,2 +np.float32,0x432cc9a0,0xb5e55a50,2 +np.float32,0xc32cc9a0,0x35e55a50,2 +np.float32,0x422fede0,0x363222c4,2 +np.float32,0xc22fede0,0xb63222c4,2 +np.float32,0x42afede0,0x36b222c4,2 +np.float32,0xc2afede0,0xb6b222c4,2 +np.float32,0x432fede0,0x373222c4,2 +np.float32,0xc32fede0,0xb73222c4,2 +np.float32,0x4233121f,0x3f350500,2 +np.float32,0xc233121f,0xbf350500,2 +np.float32,0x42b3121f,0x3f800000,2 +np.float32,0xc2b3121f,0xbf800000,2 +np.float32,0x4333121f,0xb68f347d,2 +np.float32,0xc333121f,0x368f347d,2 +np.float32,0x4236365e,0x3f800000,2 +np.float32,0xc236365e,0xbf800000,2 +np.float32,0x42b6365e,0x358bb91c,2 +np.float32,0xc2b6365e,0xb58bb91c,2 +np.float32,0x4336365e,0xb60bb91c,2 +np.float32,0xc336365e,0x360bb91c,2 +np.float32,0x42395a9e,0x3f3504df,2 +np.float32,0xc2395a9e,0xbf3504df,2 +np.float32,0x42b95a9e,0xbf800000,2 +np.float32,0xc2b95a9e,0x3f800000,2 +np.float32,0x43395a9e,0xb6e51267,2 +np.float32,0xc3395a9e,0x36e51267,2 +np.float32,0x423c7edd,0xb4000add,2 +np.float32,0xc23c7edd,0x34000add,2 +np.float32,0x42bc7edd,0x34800add,2 +np.float32,0xc2bc7edd,0xb4800add,2 +np.float32,0x433c7edd,0x35000add,2 +np.float32,0xc33c7edd,0xb5000add,2 +np.float32,0x423fa31d,0xbf35050f,2 +np.float32,0xc23fa31d,0x3f35050f,2 +np.float32,0x42bfa31d,0x3f800000,2 +np.float32,0xc2bfa31d,0xbf800000,2 +np.float32,0x433fa31d,0xb71d7828,2 +np.float32,0xc33fa31d,0x371d7828,2 +np.float32,0x4242c75c,0xbf800000,2 +np.float32,0xc242c75c,0x3f800000,2 +np.float32,0x42c2c75c,0xb5cbbe8a,2 +np.float32,0xc2c2c75c,0x35cbbe8a,2 +np.float32,0x4342c75c,0x364bbe8a,2 +np.float32,0xc342c75c,0xb64bbe8a,2 +np.float32,0x4245eb9c,0xbf3504d0,2 +np.float32,0xc245eb9c,0x3f3504d0,2 +np.float32,0x42c5eb9c,0xbf800000,2 +np.float32,0xc2c5eb9c,0x3f800000,2 +np.float32,0x4345eb9c,0xb748671d,2 +np.float32,0xc345eb9c,0x3748671d,2 +np.float32,0x42490fdb,0x35bbbd2e,2 +np.float32,0xc2490fdb,0xb5bbbd2e,2 +np.float32,0x42c90fdb,0x363bbd2e,2 +np.float32,0xc2c90fdb,0xb63bbd2e,2 +np.float32,0x43490fdb,0x36bbbd2e,2 +np.float32,0xc3490fdb,0xb6bbbd2e,2 +np.float32,0x424c341a,0x3f3504f1,2 +np.float32,0xc24c341a,0xbf3504f1,2 +np.float32,0x42cc341a,0x3f800000,2 +np.float32,0xc2cc341a,0xbf800000,2 +np.float32,0x434c341a,0x354a9ee6,2 +np.float32,0xc34c341a,0xb54a9ee6,2 +np.float32,0x424f585a,0x3f800000,2 +np.float32,0xc24f585a,0xbf800000,2 +np.float32,0x42cf585a,0xb688cd8c,2 +np.float32,0xc2cf585a,0x3688cd8c,2 +np.float32,0x434f585a,0x3708cd8c,2 +np.float32,0xc34f585a,0xb708cd8c,2 +np.float32,0x42527c99,0x3f3504ee,2 +np.float32,0xc2527c99,0xbf3504ee,2 +np.float32,0x42d27c99,0xbf800000,2 +np.float32,0xc2d27c99,0x3f800000,2 +np.float32,0x43527c99,0xb5f22833,2 +np.float32,0xc3527c99,0x35f22833,2 +np.float32,0x4255a0d9,0xb633bc81,2 +np.float32,0xc255a0d9,0x3633bc81,2 +np.float32,0x42d5a0d9,0x36b3bc81,2 +np.float32,0xc2d5a0d9,0xb6b3bc81,2 +np.float32,0x4355a0d9,0x3733bc81,2 +np.float32,0xc355a0d9,0xb733bc81,2 +np.float32,0x4258c518,0xbf350500,2 +np.float32,0xc258c518,0x3f350500,2 +np.float32,0x42d8c518,0x3f800000,2 +np.float32,0xc2d8c518,0xbf800000,2 +np.float32,0x4358c518,0xb69267f6,2 +np.float32,0xc358c518,0x369267f6,2 +np.float32,0x425be958,0xbf800000,2 +np.float32,0xc25be958,0x3f800000,2 +np.float32,0x42dbe958,0xb6deab75,2 +np.float32,0xc2dbe958,0x36deab75,2 +np.float32,0x435be958,0x375eab75,2 +np.float32,0xc35be958,0xb75eab75,2 +np.float32,0x425f0d97,0xbf3504df,2 +np.float32,0xc25f0d97,0x3f3504df,2 +np.float32,0x42df0d97,0xbf800000,2 +np.float32,0xc2df0d97,0x3f800000,2 +np.float32,0x435f0d97,0xb6e845e0,2 +np.float32,0xc35f0d97,0x36e845e0,2 +np.float32,0x426231d6,0x3419a6a2,2 +np.float32,0xc26231d6,0xb419a6a2,2 +np.float32,0x42e231d6,0x3499a6a2,2 +np.float32,0xc2e231d6,0xb499a6a2,2 +np.float32,0x436231d6,0x3519a6a2,2 +np.float32,0xc36231d6,0xb519a6a2,2 +np.float32,0x42655616,0x3f35050f,2 +np.float32,0xc2655616,0xbf35050f,2 +np.float32,0x42e55616,0x3f800000,2 +np.float32,0xc2e55616,0xbf800000,2 +np.float32,0x43655616,0xb71f11e5,2 +np.float32,0xc3655616,0x371f11e5,2 +np.float32,0x42687a55,0x3f800000,2 +np.float32,0xc2687a55,0xbf800000,2 +np.float32,0x42e87a55,0xb5d2257b,2 +np.float32,0xc2e87a55,0x35d2257b,2 +np.float32,0x43687a55,0x3652257b,2 +np.float32,0xc3687a55,0xb652257b,2 +np.float32,0x426b9e95,0x3f3504cf,2 +np.float32,0xc26b9e95,0xbf3504cf,2 +np.float32,0x42eb9e95,0xbf800000,2 +np.float32,0xc2eb9e95,0x3f800000,2 +np.float32,0x436b9e95,0xb74a00d9,2 +np.float32,0xc36b9e95,0x374a00d9,2 +np.float32,0x426ec2d4,0xb5bef0a7,2 +np.float32,0xc26ec2d4,0x35bef0a7,2 +np.float32,0x42eec2d4,0x363ef0a7,2 +np.float32,0xc2eec2d4,0xb63ef0a7,2 +np.float32,0x436ec2d4,0x36bef0a7,2 +np.float32,0xc36ec2d4,0xb6bef0a7,2 +np.float32,0x4271e713,0xbf3504f1,2 +np.float32,0xc271e713,0x3f3504f1,2 +np.float32,0x42f1e713,0x3f800000,2 +np.float32,0xc2f1e713,0xbf800000,2 +np.float32,0x4371e713,0x35310321,2 +np.float32,0xc371e713,0xb5310321,2 +np.float32,0x42750b53,0xbf800000,2 +np.float32,0xc2750b53,0x3f800000,2 +np.float32,0x42f50b53,0xb68a6748,2 +np.float32,0xc2f50b53,0x368a6748,2 +np.float32,0x43750b53,0x370a6748,2 +np.float32,0xc3750b53,0xb70a6748,2 +np.float32,0x42782f92,0xbf3504ee,2 +np.float32,0xc2782f92,0x3f3504ee,2 +np.float32,0x42f82f92,0xbf800000,2 +np.float32,0xc2f82f92,0x3f800000,2 +np.float32,0x43782f92,0xb5fef616,2 +np.float32,0xc3782f92,0x35fef616,2 +np.float32,0x427b53d2,0x3635563d,2 +np.float32,0xc27b53d2,0xb635563d,2 +np.float32,0x42fb53d2,0x36b5563d,2 +np.float32,0xc2fb53d2,0xb6b5563d,2 +np.float32,0x437b53d2,0x3735563d,2 +np.float32,0xc37b53d2,0xb735563d,2 +np.float32,0x427e7811,0x3f350500,2 +np.float32,0xc27e7811,0xbf350500,2 +np.float32,0x42fe7811,0x3f800000,2 +np.float32,0xc2fe7811,0xbf800000,2 +np.float32,0x437e7811,0xb6959b6f,2 +np.float32,0xc37e7811,0x36959b6f,2 +np.float32,0x4280ce28,0x3f800000,2 +np.float32,0xc280ce28,0xbf800000,2 +np.float32,0x4300ce28,0x357dd672,2 +np.float32,0xc300ce28,0xb57dd672,2 +np.float32,0x4380ce28,0xb5fdd672,2 +np.float32,0xc380ce28,0x35fdd672,2 +np.float32,0x42826048,0x3f3504de,2 +np.float32,0xc2826048,0xbf3504de,2 +np.float32,0x43026048,0xbf800000,2 +np.float32,0xc3026048,0x3f800000,2 +np.float32,0x43826048,0xb6eb7958,2 +np.float32,0xc3826048,0x36eb7958,2 +np.float32,0x4283f268,0xb6859a13,2 +np.float32,0xc283f268,0x36859a13,2 +np.float32,0x4303f268,0x37059a13,2 +np.float32,0xc303f268,0xb7059a13,2 +np.float32,0x4383f268,0x37859a13,2 +np.float32,0xc383f268,0xb7859a13,2 +np.float32,0x42858487,0xbf3504e2,2 +np.float32,0xc2858487,0x3f3504e2,2 +np.float32,0x43058487,0x3f800000,2 +np.float32,0xc3058487,0xbf800000,2 +np.float32,0x43858487,0x36bea8be,2 +np.float32,0xc3858487,0xb6bea8be,2 +np.float32,0x428716a7,0xbf800000,2 +np.float32,0xc28716a7,0x3f800000,2 +np.float32,0x430716a7,0xb5d88c6d,2 +np.float32,0xc30716a7,0x35d88c6d,2 +np.float32,0x438716a7,0x36588c6d,2 +np.float32,0xc38716a7,0xb6588c6d,2 +np.float32,0x4288a8c7,0xbf3504cf,2 +np.float32,0xc288a8c7,0x3f3504cf,2 +np.float32,0x4308a8c7,0xbf800000,2 +np.float32,0xc308a8c7,0x3f800000,2 +np.float32,0x4388a8c7,0xb74b9a96,2 +np.float32,0xc388a8c7,0x374b9a96,2 +np.float32,0x428a3ae7,0x36b08908,2 +np.float32,0xc28a3ae7,0xb6b08908,2 +np.float32,0x430a3ae7,0x37308908,2 +np.float32,0xc30a3ae7,0xb7308908,2 +np.float32,0x438a3ae7,0x37b08908,2 +np.float32,0xc38a3ae7,0xb7b08908,2 +np.float32,0x428bcd06,0x3f3504f2,2 +np.float32,0xc28bcd06,0xbf3504f2,2 +np.float32,0x430bcd06,0x3f800000,2 +np.float32,0xc30bcd06,0xbf800000,2 +np.float32,0x438bcd06,0x3517675b,2 +np.float32,0xc38bcd06,0xb517675b,2 +np.float32,0x428d5f26,0x3f800000,2 +np.float32,0xc28d5f26,0xbf800000,2 +np.float32,0x430d5f26,0xb68c0105,2 +np.float32,0xc30d5f26,0x368c0105,2 +np.float32,0x438d5f26,0x370c0105,2 +np.float32,0xc38d5f26,0xb70c0105,2 +np.float32,0x428ef146,0x3f3504c0,2 +np.float32,0xc28ef146,0xbf3504c0,2 +np.float32,0x430ef146,0xbf800000,2 +np.float32,0xc30ef146,0x3f800000,2 +np.float32,0x438ef146,0xb790bc40,2 +np.float32,0xc38ef146,0x3790bc40,2 +np.float32,0x42908365,0x3592200d,2 +np.float32,0xc2908365,0xb592200d,2 +np.float32,0x43108365,0xb612200d,2 +np.float32,0xc3108365,0x3612200d,2 +np.float32,0x43908365,0xb692200d,2 +np.float32,0xc3908365,0x3692200d,2 +np.float32,0x42921585,0xbf350501,2 +np.float32,0xc2921585,0x3f350501,2 +np.float32,0x43121585,0x3f800000,2 +np.float32,0xc3121585,0xbf800000,2 +np.float32,0x43921585,0xb698cee8,2 +np.float32,0xc3921585,0x3698cee8,2 +np.float32,0x4293a7a5,0xbf800000,2 +np.float32,0xc293a7a5,0x3f800000,2 +np.float32,0x4313a7a5,0xb6e1deee,2 +np.float32,0xc313a7a5,0x36e1deee,2 +np.float32,0x4393a7a5,0x3761deee,2 +np.float32,0xc393a7a5,0xb761deee,2 +np.float32,0x429539c5,0xbf3504b1,2 +np.float32,0xc29539c5,0x3f3504b1,2 +np.float32,0x431539c5,0xbf800000,2 +np.float32,0xc31539c5,0x3f800000,2 +np.float32,0x439539c5,0xb7bbab34,2 +np.float32,0xc39539c5,0x37bbab34,2 +np.float32,0x4296cbe4,0x344cde2e,2 +np.float32,0xc296cbe4,0xb44cde2e,2 +np.float32,0x4316cbe4,0x34ccde2e,2 +np.float32,0xc316cbe4,0xb4ccde2e,2 +np.float32,0x4396cbe4,0x354cde2e,2 +np.float32,0xc396cbe4,0xb54cde2e,2 +np.float32,0x42985e04,0x3f350510,2 +np.float32,0xc2985e04,0xbf350510,2 +np.float32,0x43185e04,0x3f800000,2 +np.float32,0xc3185e04,0xbf800000,2 +np.float32,0x43985e04,0xb722455d,2 +np.float32,0xc3985e04,0x3722455d,2 +np.float32,0x4299f024,0x3f800000,2 +np.float32,0xc299f024,0xbf800000,2 +np.float32,0x4319f024,0xb71bde6c,2 +np.float32,0xc319f024,0x371bde6c,2 +np.float32,0x4399f024,0x379bde6c,2 +np.float32,0xc399f024,0xb79bde6c,2 +np.float32,0x429b8243,0x3f3504fc,2 +np.float32,0xc29b8243,0xbf3504fc,2 +np.float32,0x431b8243,0xbf800000,2 +np.float32,0xc31b8243,0x3f800000,2 +np.float32,0x439b8243,0x364b2eb8,2 +np.float32,0xc39b8243,0xb64b2eb8,2 +np.float32,0x435b2047,0xbf350525,2 +np.float32,0x42a038a2,0xbf800000,2 +np.float32,0x432038a2,0x3664ca7e,2 +np.float32,0x4345eb9b,0x365e638c,2 +np.float32,0x42c5eb9b,0xbf800000,2 +np.float32,0x42eb9e94,0xbf800000,2 +np.float32,0x4350ea79,0x3f800000,2 +np.float32,0x42dbe957,0x3585522a,2 +np.float32,0x425be957,0xbf800000,2 +np.float32,0x435be957,0xb605522a,2 +np.float32,0x476362a2,0xbd7ff911,2 +np.float32,0x464c99a4,0x3e7f4d41,2 +np.float32,0x4471f73d,0x3e7fe1b0,2 +np.float32,0x445a6752,0x3e7ef367,2 +np.float32,0x474fa400,0x3e7f9fcd,2 +np.float32,0x45c1e72f,0xbe7fc7af,2 +np.float32,0x4558c91d,0x3e7e9f31,2 +np.float32,0x43784f94,0xbdff6654,2 +np.float32,0x466e8500,0xbe7ea0a3,2 +np.float32,0x468e1c25,0x3e7e22fb,2 +np.float32,0x44ea6cfc,0x3dff70c3,2 +np.float32,0x4605126c,0x3e7f89ef,2 +np.float32,0x4788b3c6,0xbb87d853,2 +np.float32,0x4531b042,0x3dffd163,2 +np.float32,0x43f1f71d,0x3dfff387,2 +np.float32,0x462c3fa5,0xbd7fe13d,2 +np.float32,0x441c5354,0xbdff76b4,2 +np.float32,0x44908b69,0x3e7dcf0d,2 +np.float32,0x478813ad,0xbe7e9d80,2 +np.float32,0x441c4351,0x3dff937b,2 +np.float64,0x1,0x1,1 +np.float64,0x8000000000000001,0x8000000000000001,1 +np.float64,0x10000000000000,0x10000000000000,1 +np.float64,0x8010000000000000,0x8010000000000000,1 +np.float64,0x7fefffffffffffff,0x3f7452fc98b34e97,1 +np.float64,0xffefffffffffffff,0xbf7452fc98b34e97,1 +np.float64,0x7ff0000000000000,0xfff8000000000000,1 +np.float64,0xfff0000000000000,0xfff8000000000000,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xbfda51b226b4a364,0xbfd9956328ff876c,1 +np.float64,0xbfb4a65aee294cb8,0xbfb4a09fd744f8a5,1 +np.float64,0xbfd73b914fae7722,0xbfd6b9cce55af379,1 +np.float64,0xbfd90c12b4b21826,0xbfd869a3867b51c2,1 +np.float64,0x3fe649bb3d6c9376,0x3fe48778d9b48a21,1 +np.float64,0xbfd5944532ab288a,0xbfd52c30e1951b42,1 +np.float64,0x3fb150c45222a190,0x3fb14d633eb8275d,1 +np.float64,0x3fe4a6ffa9e94e00,0x3fe33f8a95c33299,1 +np.float64,0x3fe8d2157171a42a,0x3fe667d904ac95a6,1 +np.float64,0xbfa889f52c3113f0,0xbfa8878d90a23fa5,1 +np.float64,0x3feb3234bef6646a,0x3fe809d541d9017a,1 +np.float64,0x3fc6de266f2dbc50,0x3fc6bf0ee80a0d86,1 +np.float64,0x3fe8455368f08aa6,0x3fe6028254338ed5,1 +np.float64,0xbfe5576079eaaec1,0xbfe3cb4a8f6bc3f5,1 +np.float64,0xbfe9f822ff73f046,0xbfe7360d7d5cb887,1 +np.float64,0xbfb1960e7e232c20,0xbfb1928438258602,1 +np.float64,0xbfca75938d34eb28,0xbfca4570979bf2fa,1 +np.float64,0x3fd767dd15aecfbc,0x3fd6e33039018bab,1 +np.float64,0xbfe987750ef30eea,0xbfe6e7ed30ce77f0,1 +np.float64,0xbfe87f95a1f0ff2b,0xbfe62ca7e928bb2a,1 +np.float64,0xbfd2465301a48ca6,0xbfd2070245775d76,1 +np.float64,0xbfb1306ed22260e0,0xbfb12d2088eaa4f9,1 +np.float64,0xbfd8089010b01120,0xbfd778f9db77f2f3,1 +np.float64,0x3fbf9cf4ee3f39f0,0x3fbf88674fde1ca2,1 +np.float64,0x3fe6d8468a6db08e,0x3fe4f403f38b7bec,1 +np.float64,0xbfd9e5deefb3cbbe,0xbfd932692c722351,1 +np.float64,0x3fd1584d55a2b09c,0x3fd122253eeecc2e,1 +np.float64,0x3fe857979cf0af30,0x3fe60fc12b5ba8db,1 +np.float64,0x3fe3644149e6c882,0x3fe239f47013cfe6,1 +np.float64,0xbfe22ea62be45d4c,0xbfe13834c17d56fe,1 +np.float64,0xbfe8d93e1df1b27c,0xbfe66cf4ee467fd2,1 +np.float64,0xbfe9c497c9f38930,0xbfe7127417da4204,1 +np.float64,0x3fd6791cecacf238,0x3fd6039ccb5a7fde,1 +np.float64,0xbfc1dc1b1523b838,0xbfc1cd48edd9ae19,1 +np.float64,0xbfc92a8491325508,0xbfc901176e0158a5,1 +np.float64,0x3fa8649b3430c940,0x3fa8623e82d9504f,1 +np.float64,0x3fe0bed6a1617dae,0x3fdffbb307fb1abe,1 +np.float64,0x3febdf7765f7beee,0x3fe87ad01a89b74a,1 +np.float64,0xbfd3a56d46a74ada,0xbfd356cf41bf83cd,1 +np.float64,0x3fd321d824a643b0,0x3fd2d93846a224b3,1 +np.float64,0xbfc6a49fb52d4940,0xbfc686704906e7d3,1 +np.float64,0xbfdd4103c9ba8208,0xbfdc3ef0c03615b4,1 +np.float64,0xbfe0b78a51e16f14,0xbfdfef0d9ffc38b5,1 +np.float64,0xbfdac7a908b58f52,0xbfda0158956ceecf,1 +np.float64,0xbfbfbf12f23f7e28,0xbfbfaa428989258c,1 +np.float64,0xbfd55f5aa2aabeb6,0xbfd4fa39de65f33a,1 +np.float64,0x3fe06969abe0d2d4,0x3fdf6744fafdd9cf,1 +np.float64,0x3fe56ab8be6ad572,0x3fe3da7a1986d543,1 +np.float64,0xbfeefbbec67df77e,0xbfea5d426132f4aa,1 +np.float64,0x3fe6e1f49cedc3ea,0x3fe4fb53f3d8e3d5,1 +np.float64,0x3feceb231c79d646,0x3fe923d3efa55414,1 +np.float64,0xbfd03dd08ea07ba2,0xbfd011549aa1998a,1 +np.float64,0xbfd688327aad1064,0xbfd611c61b56adbe,1 +np.float64,0xbfde3249d8bc6494,0xbfdd16a7237a39d5,1 +np.float64,0x3febd4b65677a96c,0x3fe873e1a401ef03,1 +np.float64,0xbfe46bd2b368d7a6,0xbfe31023c2467749,1 +np.float64,0x3fbf9f5cde3f3ec0,0x3fbf8aca8ec53c45,1 +np.float64,0x3fc20374032406e8,0x3fc1f43f1f2f4d5e,1 +np.float64,0xbfec143b16f82876,0xbfe89caa42582381,1 +np.float64,0xbfd14fa635a29f4c,0xbfd119ced11da669,1 +np.float64,0x3fe25236d4e4a46e,0x3fe156242d644b7a,1 +np.float64,0xbfe4ed793469daf2,0xbfe377a88928fd77,1 +np.float64,0xbfb363572626c6b0,0xbfb35e98d8fe87ae,1 +np.float64,0xbfb389d5aa2713a8,0xbfb384fae55565a7,1 +np.float64,0x3fca6e001934dc00,0x3fca3e0661eaca84,1 +np.float64,0x3fe748f3f76e91e8,0x3fe548ab2168aea6,1 +np.float64,0x3fef150efdfe2a1e,0x3fea6b92d74f60d3,1 +np.float64,0xbfd14b52b1a296a6,0xbfd115a387c0fa93,1 +np.float64,0x3fe3286b5ce650d6,0x3fe208a6469a7527,1 +np.float64,0xbfd57b4f4baaf69e,0xbfd514a12a9f7ab0,1 +np.float64,0xbfef14bd467e297b,0xbfea6b64bbfd42ce,1 +np.float64,0xbfe280bc90650179,0xbfe17d2c49955dba,1 +np.float64,0x3fca8759d7350eb0,0x3fca56d5c17bbc14,1 +np.float64,0xbfdf988f30bf311e,0xbfde53f96f69b05f,1 +np.float64,0x3f6b6eeb4036de00,0x3f6b6ee7e3f86f9a,1 +np.float64,0xbfed560be8faac18,0xbfe9656c5cf973d8,1 +np.float64,0x3fc6102c592c2058,0x3fc5f43efad5396d,1 +np.float64,0xbfdef64ed2bdec9e,0xbfddc4b7fbd45aea,1 +np.float64,0x3fe814acd570295a,0x3fe5df183d543bfe,1 +np.float64,0x3fca21313f344260,0x3fc9f2d47f64fbe2,1 +np.float64,0xbfe89932cc713266,0xbfe63f186a2f60ce,1 +np.float64,0x3fe4ffcff169ffa0,0x3fe386336115ee21,1 +np.float64,0x3fee6964087cd2c8,0x3fea093d31e2c2c5,1 +np.float64,0xbfbeea604e3dd4c0,0xbfbed72734852669,1 +np.float64,0xbfea1954fb7432aa,0xbfe74cdad8720032,1 +np.float64,0x3fea3e1a5ef47c34,0x3fe765ffba65a11d,1 +np.float64,0x3fcedb850b3db708,0x3fce8f39d92f00ba,1 +np.float64,0x3fd3b52d41a76a5c,0x3fd365d22b0003f9,1 +np.float64,0xbfa4108a0c282110,0xbfa40f397fcd844f,1 +np.float64,0x3fd7454c57ae8a98,0x3fd6c2e5542c6c83,1 +np.float64,0xbfeecd3c7a7d9a79,0xbfea42ca943a1695,1 +np.float64,0xbfdddda397bbbb48,0xbfdccb27283d4c4c,1 +np.float64,0x3fe6b52cf76d6a5a,0x3fe4d96ff32925ff,1 +np.float64,0xbfa39a75ec2734f0,0xbfa3993c0da84f87,1 +np.float64,0x3fdd3fe6fdba7fcc,0x3fdc3df12fe9e525,1 +np.float64,0xbfb57a98162af530,0xbfb5742525d5fbe2,1 +np.float64,0xbfd3e166cfa7c2ce,0xbfd38ff2891be9b0,1 +np.float64,0x3fdb6a04f9b6d408,0x3fda955e5018e9dc,1 +np.float64,0x3fe4ab03a4e95608,0x3fe342bfa76e1aa8,1 +np.float64,0xbfe6c8480b6d9090,0xbfe4e7eaa935b3f5,1 +np.float64,0xbdd6b5a17bae,0xbdd6b5a17bae,1 +np.float64,0xd6591979acb23,0xd6591979acb23,1 +np.float64,0x5adbed90b5b7e,0x5adbed90b5b7e,1 +np.float64,0xa664c5314cc99,0xa664c5314cc99,1 +np.float64,0x1727fb162e500,0x1727fb162e500,1 +np.float64,0xdb49a93db6935,0xdb49a93db6935,1 +np.float64,0xb10c958d62193,0xb10c958d62193,1 +np.float64,0xad38276f5a705,0xad38276f5a705,1 +np.float64,0x1d5d0b983aba2,0x1d5d0b983aba2,1 +np.float64,0x915f48e122be9,0x915f48e122be9,1 +np.float64,0x475958ae8eb2c,0x475958ae8eb2c,1 +np.float64,0x3af8406675f09,0x3af8406675f09,1 +np.float64,0x655e88a4cabd2,0x655e88a4cabd2,1 +np.float64,0x40fee8ce81fde,0x40fee8ce81fde,1 +np.float64,0xab83103f57062,0xab83103f57062,1 +np.float64,0x7cf934b8f9f27,0x7cf934b8f9f27,1 +np.float64,0x29f7524853eeb,0x29f7524853eeb,1 +np.float64,0x4a5e954894bd3,0x4a5e954894bd3,1 +np.float64,0x24638f3a48c73,0x24638f3a48c73,1 +np.float64,0xa4f32fc749e66,0xa4f32fc749e66,1 +np.float64,0xf8e92df7f1d26,0xf8e92df7f1d26,1 +np.float64,0x292e9d50525d4,0x292e9d50525d4,1 +np.float64,0xe937e897d26fd,0xe937e897d26fd,1 +np.float64,0xd3bde1d5a77bc,0xd3bde1d5a77bc,1 +np.float64,0xa447ffd548900,0xa447ffd548900,1 +np.float64,0xa3b7b691476f7,0xa3b7b691476f7,1 +np.float64,0x490095c892013,0x490095c892013,1 +np.float64,0xfc853235f90a7,0xfc853235f90a7,1 +np.float64,0x5a8bc082b5179,0x5a8bc082b5179,1 +np.float64,0x1baca45a37595,0x1baca45a37595,1 +np.float64,0x2164120842c83,0x2164120842c83,1 +np.float64,0x66692bdeccd26,0x66692bdeccd26,1 +np.float64,0xf205bdd3e40b8,0xf205bdd3e40b8,1 +np.float64,0x7c3fff98f8801,0x7c3fff98f8801,1 +np.float64,0xccdf10e199bf,0xccdf10e199bf,1 +np.float64,0x92db8e8125b8,0x92db8e8125b8,1 +np.float64,0x5789a8d6af136,0x5789a8d6af136,1 +np.float64,0xbdda869d7bb51,0xbdda869d7bb51,1 +np.float64,0xb665e0596ccbc,0xb665e0596ccbc,1 +np.float64,0x74e6b46ee9cd7,0x74e6b46ee9cd7,1 +np.float64,0x4f39cf7c9e73b,0x4f39cf7c9e73b,1 +np.float64,0xfdbf3907fb7e7,0xfdbf3907fb7e7,1 +np.float64,0xafdef4d55fbdf,0xafdef4d55fbdf,1 +np.float64,0xb49858236930b,0xb49858236930b,1 +np.float64,0x3ebe21d47d7c5,0x3ebe21d47d7c5,1 +np.float64,0x5b620512b6c41,0x5b620512b6c41,1 +np.float64,0x31918cda63232,0x31918cda63232,1 +np.float64,0x68b5741ed16af,0x68b5741ed16af,1 +np.float64,0xa5c09a5b4b814,0xa5c09a5b4b814,1 +np.float64,0x55f51c14abea4,0x55f51c14abea4,1 +np.float64,0xda8a3e41b515,0xda8a3e41b515,1 +np.float64,0x9ea9c8513d539,0x9ea9c8513d539,1 +np.float64,0x7f23b964fe478,0x7f23b964fe478,1 +np.float64,0xf6e08c7bedc12,0xf6e08c7bedc12,1 +np.float64,0x7267aa24e4cf6,0x7267aa24e4cf6,1 +np.float64,0x236bb93a46d78,0x236bb93a46d78,1 +np.float64,0x9a98430b35309,0x9a98430b35309,1 +np.float64,0xbb683fef76d08,0xbb683fef76d08,1 +np.float64,0x1ff0eb6e3fe1e,0x1ff0eb6e3fe1e,1 +np.float64,0xf524038fea481,0xf524038fea481,1 +np.float64,0xd714e449ae29d,0xd714e449ae29d,1 +np.float64,0x4154fd7682aa0,0x4154fd7682aa0,1 +np.float64,0x5b8d2f6cb71a7,0x5b8d2f6cb71a7,1 +np.float64,0xc91aa21d92355,0xc91aa21d92355,1 +np.float64,0xbd94fd117b2a0,0xbd94fd117b2a0,1 +np.float64,0x685b207ad0b65,0x685b207ad0b65,1 +np.float64,0xd2485b05a490c,0xd2485b05a490c,1 +np.float64,0x151ea5e62a3d6,0x151ea5e62a3d6,1 +np.float64,0x2635a7164c6b6,0x2635a7164c6b6,1 +np.float64,0x88ae3b5d115c8,0x88ae3b5d115c8,1 +np.float64,0x8a055a55140ac,0x8a055a55140ac,1 +np.float64,0x756f7694eadef,0x756f7694eadef,1 +np.float64,0x866d74630cdaf,0x866d74630cdaf,1 +np.float64,0x39e44f2873c8b,0x39e44f2873c8b,1 +np.float64,0x2a07ceb6540fb,0x2a07ceb6540fb,1 +np.float64,0xc52b96398a573,0xc52b96398a573,1 +np.float64,0x9546543b2a8cb,0x9546543b2a8cb,1 +np.float64,0x5b995b90b732c,0x5b995b90b732c,1 +np.float64,0x2de10a565bc22,0x2de10a565bc22,1 +np.float64,0x3b06ee94760df,0x3b06ee94760df,1 +np.float64,0xb18e77a5631cf,0xb18e77a5631cf,1 +np.float64,0x3b89ae3a77137,0x3b89ae3a77137,1 +np.float64,0xd9b0b6e5b3617,0xd9b0b6e5b3617,1 +np.float64,0x30b2310861647,0x30b2310861647,1 +np.float64,0x326a3ab464d48,0x326a3ab464d48,1 +np.float64,0x4c18610a9830d,0x4c18610a9830d,1 +np.float64,0x541dea42a83be,0x541dea42a83be,1 +np.float64,0xcd027dbf9a050,0xcd027dbf9a050,1 +np.float64,0x780a0f80f015,0x780a0f80f015,1 +np.float64,0x740ed5b2e81db,0x740ed5b2e81db,1 +np.float64,0xc226814d844d0,0xc226814d844d0,1 +np.float64,0xde958541bd2b1,0xde958541bd2b1,1 +np.float64,0xb563d3296ac7b,0xb563d3296ac7b,1 +np.float64,0x1db3b0b83b677,0x1db3b0b83b677,1 +np.float64,0xa7b0275d4f605,0xa7b0275d4f605,1 +np.float64,0x72f8d038e5f1b,0x72f8d038e5f1b,1 +np.float64,0x860ed1350c1da,0x860ed1350c1da,1 +np.float64,0x79f88262f3f11,0x79f88262f3f11,1 +np.float64,0x8817761f102ef,0x8817761f102ef,1 +np.float64,0xac44784b5888f,0xac44784b5888f,1 +np.float64,0x800fd594241fab28,0x800fd594241fab28,1 +np.float64,0x800ede32f8ddbc66,0x800ede32f8ddbc66,1 +np.float64,0x800de4c1121bc982,0x800de4c1121bc982,1 +np.float64,0x80076ebcddcedd7a,0x80076ebcddcedd7a,1 +np.float64,0x800b3fee06567fdc,0x800b3fee06567fdc,1 +np.float64,0x800b444426b68889,0x800b444426b68889,1 +np.float64,0x800b1c037a563807,0x800b1c037a563807,1 +np.float64,0x8001eb88c2a3d712,0x8001eb88c2a3d712,1 +np.float64,0x80058aae6dab155e,0x80058aae6dab155e,1 +np.float64,0x80083df2d4f07be6,0x80083df2d4f07be6,1 +np.float64,0x800e3b19d97c7634,0x800e3b19d97c7634,1 +np.float64,0x800a71c6f374e38e,0x800a71c6f374e38e,1 +np.float64,0x80048557f1490ab1,0x80048557f1490ab1,1 +np.float64,0x8000a00e6b01401e,0x8000a00e6b01401e,1 +np.float64,0x800766a3e2cecd49,0x800766a3e2cecd49,1 +np.float64,0x80015eb44602bd69,0x80015eb44602bd69,1 +np.float64,0x800bde885a77bd11,0x800bde885a77bd11,1 +np.float64,0x800224c53ea4498b,0x800224c53ea4498b,1 +np.float64,0x80048e8c6a291d1a,0x80048e8c6a291d1a,1 +np.float64,0x800b667e4af6ccfd,0x800b667e4af6ccfd,1 +np.float64,0x800ae3d7e395c7b0,0x800ae3d7e395c7b0,1 +np.float64,0x80086c245550d849,0x80086c245550d849,1 +np.float64,0x800d7d25f6fafa4c,0x800d7d25f6fafa4c,1 +np.float64,0x800f8d9ab0ff1b35,0x800f8d9ab0ff1b35,1 +np.float64,0x800690e949cd21d3,0x800690e949cd21d3,1 +np.float64,0x8003022381060448,0x8003022381060448,1 +np.float64,0x80085e0dad70bc1c,0x80085e0dad70bc1c,1 +np.float64,0x800e2ffc369c5ff9,0x800e2ffc369c5ff9,1 +np.float64,0x800b629b5af6c537,0x800b629b5af6c537,1 +np.float64,0x800fdc964b7fb92d,0x800fdc964b7fb92d,1 +np.float64,0x80036bb4b1c6d76a,0x80036bb4b1c6d76a,1 +np.float64,0x800b382f7f16705f,0x800b382f7f16705f,1 +np.float64,0x800ebac9445d7593,0x800ebac9445d7593,1 +np.float64,0x80015075c3e2a0ec,0x80015075c3e2a0ec,1 +np.float64,0x8002a6ec5ce54dd9,0x8002a6ec5ce54dd9,1 +np.float64,0x8009fab74a93f56f,0x8009fab74a93f56f,1 +np.float64,0x800c94b9ea992974,0x800c94b9ea992974,1 +np.float64,0x800dc2efd75b85e0,0x800dc2efd75b85e0,1 +np.float64,0x800be6400d57cc80,0x800be6400d57cc80,1 +np.float64,0x80021f6858443ed1,0x80021f6858443ed1,1 +np.float64,0x800600e2ac4c01c6,0x800600e2ac4c01c6,1 +np.float64,0x800a2159e6b442b4,0x800a2159e6b442b4,1 +np.float64,0x800c912f4bb9225f,0x800c912f4bb9225f,1 +np.float64,0x800a863a9db50c76,0x800a863a9db50c76,1 +np.float64,0x800ac16851d582d1,0x800ac16851d582d1,1 +np.float64,0x8003f7d32e87efa7,0x8003f7d32e87efa7,1 +np.float64,0x800be4eee3d7c9de,0x800be4eee3d7c9de,1 +np.float64,0x80069ff0ac4d3fe2,0x80069ff0ac4d3fe2,1 +np.float64,0x80061c986d4c3932,0x80061c986d4c3932,1 +np.float64,0x8000737b4de0e6f7,0x8000737b4de0e6f7,1 +np.float64,0x8002066ef7440cdf,0x8002066ef7440cdf,1 +np.float64,0x8001007050c200e1,0x8001007050c200e1,1 +np.float64,0x8008df9fa351bf40,0x8008df9fa351bf40,1 +np.float64,0x800f8394ee5f072a,0x800f8394ee5f072a,1 +np.float64,0x80008e0b01c11c17,0x80008e0b01c11c17,1 +np.float64,0x800f7088ed3ee112,0x800f7088ed3ee112,1 +np.float64,0x800285b86f650b72,0x800285b86f650b72,1 +np.float64,0x8008ec18af51d832,0x8008ec18af51d832,1 +np.float64,0x800da08523bb410a,0x800da08523bb410a,1 +np.float64,0x800de853ca7bd0a8,0x800de853ca7bd0a8,1 +np.float64,0x8008c8aefad1915e,0x8008c8aefad1915e,1 +np.float64,0x80010c39d5821874,0x80010c39d5821874,1 +np.float64,0x8009208349724107,0x8009208349724107,1 +np.float64,0x800783783f0f06f1,0x800783783f0f06f1,1 +np.float64,0x80025caf9984b960,0x80025caf9984b960,1 +np.float64,0x800bc76fa6778ee0,0x800bc76fa6778ee0,1 +np.float64,0x80017e2f89a2fc60,0x80017e2f89a2fc60,1 +np.float64,0x800ef169843de2d3,0x800ef169843de2d3,1 +np.float64,0x80098a5f7db314bf,0x80098a5f7db314bf,1 +np.float64,0x800d646f971ac8df,0x800d646f971ac8df,1 +np.float64,0x800110d1dc6221a4,0x800110d1dc6221a4,1 +np.float64,0x800f8b422a1f1684,0x800f8b422a1f1684,1 +np.float64,0x800785c97dcf0b94,0x800785c97dcf0b94,1 +np.float64,0x800da201283b4403,0x800da201283b4403,1 +np.float64,0x800a117cc7b422fa,0x800a117cc7b422fa,1 +np.float64,0x80024731cfa48e64,0x80024731cfa48e64,1 +np.float64,0x800199d456c333a9,0x800199d456c333a9,1 +np.float64,0x8005f66bab8becd8,0x8005f66bab8becd8,1 +np.float64,0x8008e7227c11ce45,0x8008e7227c11ce45,1 +np.float64,0x8007b66cc42f6cda,0x8007b66cc42f6cda,1 +np.float64,0x800669e6f98cd3cf,0x800669e6f98cd3cf,1 +np.float64,0x800aed917375db23,0x800aed917375db23,1 +np.float64,0x8008b6dd15116dbb,0x8008b6dd15116dbb,1 +np.float64,0x800f49869cfe930d,0x800f49869cfe930d,1 +np.float64,0x800a712661b4e24d,0x800a712661b4e24d,1 +np.float64,0x800944e816f289d1,0x800944e816f289d1,1 +np.float64,0x800eba0f8a1d741f,0x800eba0f8a1d741f,1 +np.float64,0x800cf6ded139edbe,0x800cf6ded139edbe,1 +np.float64,0x80023100c6246202,0x80023100c6246202,1 +np.float64,0x800c5a94add8b52a,0x800c5a94add8b52a,1 +np.float64,0x800adf329b95be66,0x800adf329b95be66,1 +np.float64,0x800af9afc115f360,0x800af9afc115f360,1 +np.float64,0x800d66ce837acd9d,0x800d66ce837acd9d,1 +np.float64,0x8003ffb5e507ff6d,0x8003ffb5e507ff6d,1 +np.float64,0x80027d280024fa51,0x80027d280024fa51,1 +np.float64,0x800fc37e1d1f86fc,0x800fc37e1d1f86fc,1 +np.float64,0x800fc7258b9f8e4b,0x800fc7258b9f8e4b,1 +np.float64,0x8003fb5789e7f6b0,0x8003fb5789e7f6b0,1 +np.float64,0x800eb4e7a13d69cf,0x800eb4e7a13d69cf,1 +np.float64,0x800951850952a30a,0x800951850952a30a,1 +np.float64,0x3fed4071be3a80e3,0x3fe95842074431df,1 +np.float64,0x3f8d2341203a4682,0x3f8d2300b453bd9f,1 +np.float64,0x3fdc8ce332b919c6,0x3fdb9cdf1440c28f,1 +np.float64,0x3fdc69bd84b8d37b,0x3fdb7d25c8166b7b,1 +np.float64,0x3fc4c22ad0298456,0x3fc4aae73e231b4f,1 +np.float64,0x3fea237809f446f0,0x3fe753cc6ca96193,1 +np.float64,0x3fd34cf6462699ed,0x3fd30268909bb47e,1 +np.float64,0x3fafce20643f9c41,0x3fafc8e41a240e35,1 +np.float64,0x3fdc6d416538da83,0x3fdb805262292863,1 +np.float64,0x3fe7d8362aefb06c,0x3fe5b2ce659db7fd,1 +np.float64,0x3fe290087de52011,0x3fe189f9a3eb123d,1 +np.float64,0x3fa62d2bf82c5a58,0x3fa62b65958ca2b8,1 +np.float64,0x3fafd134403fa269,0x3fafcbf670f8a6f3,1 +np.float64,0x3fa224e53c2449ca,0x3fa223ec5de1631b,1 +np.float64,0x3fb67e2c2c2cfc58,0x3fb676c445fb70a0,1 +np.float64,0x3fda358d01346b1a,0x3fd97b9441666eb2,1 +np.float64,0x3fdd30fc4bba61f9,0x3fdc308da423778d,1 +np.float64,0x3fc56e99c52add34,0x3fc5550004492621,1 +np.float64,0x3fe32d08de265a12,0x3fe20c761a73cec2,1 +np.float64,0x3fd46cf932a8d9f2,0x3fd414a7f3db03df,1 +np.float64,0x3fd94cfa2b3299f4,0x3fd8a5961b3e4bdd,1 +np.float64,0x3fed6ea3a6fadd47,0x3fe9745b2f6c9204,1 +np.float64,0x3fe4431d1768863a,0x3fe2ef61d0481de0,1 +np.float64,0x3fe1d8e00ea3b1c0,0x3fe0efab5050ee78,1 +np.float64,0x3fe56f37dcaade70,0x3fe3de00b0f392e0,1 +np.float64,0x3fde919a2dbd2334,0x3fdd6b6d2dcf2396,1 +np.float64,0x3fe251e3d4a4a3c8,0x3fe155de69605d60,1 +np.float64,0x3fe5e0ecc5abc1da,0x3fe436a5de5516cf,1 +np.float64,0x3fcd48780c3a90f0,0x3fcd073fa907ba9b,1 +np.float64,0x3fe4e8149229d029,0x3fe37360801d5b66,1 +np.float64,0x3fb9ef159633de2b,0x3fb9e3bc05a15d1d,1 +np.float64,0x3fc24a3f0424947e,0x3fc23a5432ca0e7c,1 +np.float64,0x3fe55ca196aab943,0x3fe3cf6b3143435a,1 +np.float64,0x3fe184544c2308a9,0x3fe0a7b49fa80aec,1 +np.float64,0x3fe2c76e83658edd,0x3fe1b8355c1ea771,1 +np.float64,0x3fea8d2c4ab51a59,0x3fe79ba85aabc099,1 +np.float64,0x3fd74f98abae9f31,0x3fd6cc85005d0593,1 +np.float64,0x3fec6de9a678dbd3,0x3fe8d59a1d23cdd1,1 +np.float64,0x3fec8a0e50f9141d,0x3fe8e7500f6f6a00,1 +np.float64,0x3fe9de6d08b3bcda,0x3fe7245319508767,1 +np.float64,0x3fe4461fd1688c40,0x3fe2f1cf0b93aba6,1 +np.float64,0x3fde342d9d3c685b,0x3fdd185609d5719d,1 +np.float64,0x3feb413fc8368280,0x3fe813c091d2519a,1 +np.float64,0x3fe64333156c8666,0x3fe48275b9a6a358,1 +np.float64,0x3fe03c65226078ca,0x3fdf18b26786be35,1 +np.float64,0x3fee11054dbc220b,0x3fe9d579a1cfa7ad,1 +np.float64,0x3fbaefccae35df99,0x3fbae314fef7c7ea,1 +np.float64,0x3feed4e3487da9c7,0x3fea4729241c8811,1 +np.float64,0x3fbb655df836cabc,0x3fbb57fcf9a097be,1 +np.float64,0x3fe68b0273ed1605,0x3fe4b96109afdf76,1 +np.float64,0x3fd216bfc3242d80,0x3fd1d957363f6a43,1 +np.float64,0x3fe01328d4a02652,0x3fded083bbf94aba,1 +np.float64,0x3fe3f9a61ae7f34c,0x3fe2b3f701b79028,1 +np.float64,0x3fed4e7cf8fa9cfa,0x3fe960d27084fb40,1 +np.float64,0x3faec08e343d811c,0x3faebbd2aa07ac1f,1 +np.float64,0x3fd2d1bbeea5a378,0x3fd28c9aefcf48ad,1 +np.float64,0x3fd92e941fb25d28,0x3fd889857f88410d,1 +np.float64,0x3fe43decb7e87bd9,0x3fe2eb32b4ee4667,1 +np.float64,0x3fef49cabcfe9395,0x3fea892f9a233f76,1 +np.float64,0x3fe3e96812e7d2d0,0x3fe2a6c6b45dd6ee,1 +np.float64,0x3fd24c0293a49805,0x3fd20c76d54473cb,1 +np.float64,0x3fb43d6b7e287ad7,0x3fb438060772795a,1 +np.float64,0x3fe87bf7d3f0f7f0,0x3fe62a0c47411c62,1 +np.float64,0x3fee82a2e07d0546,0x3fea17e27e752b7b,1 +np.float64,0x3fe40c01bbe81803,0x3fe2c2d9483f44d8,1 +np.float64,0x3fd686ccae2d0d99,0x3fd610763fb61097,1 +np.float64,0x3fe90fcf2af21f9e,0x3fe693c12df59ba9,1 +np.float64,0x3fefb3ce11ff679c,0x3feac3dd4787529d,1 +np.float64,0x3fcec53ff63d8a80,0x3fce79992af00c58,1 +np.float64,0x3fe599dd7bab33bb,0x3fe3ff5da7575d85,1 +np.float64,0x3fe9923b1a732476,0x3fe6ef71d13db456,1 +np.float64,0x3febf76fcef7eee0,0x3fe88a3952e11373,1 +np.float64,0x3fc2cfd128259fa2,0x3fc2be7fd47fd811,1 +np.float64,0x3fe4d37ae269a6f6,0x3fe36300d45e3745,1 +np.float64,0x3fe23aa2e4247546,0x3fe1424e172f756f,1 +np.float64,0x3fe4f0596ca9e0b3,0x3fe379f0c49de7ef,1 +np.float64,0x3fe2e4802fe5c900,0x3fe1d062a8812601,1 +np.float64,0x3fe5989c79eb3139,0x3fe3fe6308552dec,1 +np.float64,0x3fe3c53cb4e78a79,0x3fe28956e573aca4,1 +np.float64,0x3fe6512beeeca258,0x3fe48d2d5ece979f,1 +np.float64,0x3fd8473ddb308e7c,0x3fd7b33e38adc6ad,1 +np.float64,0x3fecd09c9679a139,0x3fe91361fa0c5bcb,1 +np.float64,0x3fc991530e3322a6,0x3fc965e2c514a9e9,1 +np.float64,0x3f6d4508403a8a11,0x3f6d45042b68acc5,1 +np.float64,0x3fea1f198f743e33,0x3fe750ce918d9330,1 +np.float64,0x3fd0a0bb4da14177,0x3fd07100f9c71e1c,1 +np.float64,0x3fd30c45ffa6188c,0x3fd2c499f9961f66,1 +np.float64,0x3fcad98e7c35b31d,0x3fcaa74293cbc52e,1 +np.float64,0x3fec8e4a5eb91c95,0x3fe8e9f898d118db,1 +np.float64,0x3fd19fdb79233fb7,0x3fd1670c00febd24,1 +np.float64,0x3fea9fcbb1f53f97,0x3fe7a836b29c4075,1 +np.float64,0x3fc6d12ea12da25d,0x3fc6b24bd2f89f59,1 +np.float64,0x3fd6af3658ad5e6d,0x3fd636613e08df3f,1 +np.float64,0x3fe31bc385a63787,0x3fe1fe3081621213,1 +np.float64,0x3fc0dbba2221b774,0x3fc0cf42c9313dba,1 +np.float64,0x3fef639ce87ec73a,0x3fea9795454f1036,1 +np.float64,0x3fee5f29dcbcbe54,0x3fea0349b288f355,1 +np.float64,0x3fed46bdb37a8d7b,0x3fe95c199f5aa569,1 +np.float64,0x3fef176afa3e2ed6,0x3fea6ce78b2aa3aa,1 +np.float64,0x3fc841e7683083cf,0x3fc81cccb84848cc,1 +np.float64,0xbfda3ec9a2347d94,0xbfd9840d180e9de3,1 +np.float64,0xbfcd5967ae3ab2d0,0xbfcd17be13142bb9,1 +np.float64,0xbfedf816573bf02d,0xbfe9c6bb06476c60,1 +np.float64,0xbfd0d6e10e21adc2,0xbfd0a54f99d2f3dc,1 +np.float64,0xbfe282df096505be,0xbfe17ef5e2e80760,1 +np.float64,0xbfd77ae6e62ef5ce,0xbfd6f4f6b603ad8a,1 +np.float64,0xbfe37b171aa6f62e,0xbfe24cb4b2d0ade4,1 +np.float64,0xbfef9e5ed9bf3cbe,0xbfeab817b41000bd,1 +np.float64,0xbfe624d6f96c49ae,0xbfe46b1e9c9aff86,1 +np.float64,0xbfefb5da65ff6bb5,0xbfeac4fc9c982772,1 +np.float64,0xbfd29a65d52534cc,0xbfd2579df8ff87b9,1 +np.float64,0xbfd40270172804e0,0xbfd3af6471104aef,1 +np.float64,0xbfb729ee7a2e53e0,0xbfb721d7dbd2705e,1 +np.float64,0xbfb746f1382e8de0,0xbfb73ebc1207f8e3,1 +np.float64,0xbfd3c7e606a78fcc,0xbfd377a8aa1b0dd9,1 +np.float64,0xbfd18c4880231892,0xbfd1543506584ad5,1 +np.float64,0xbfea988080753101,0xbfe7a34cba0d0fa1,1 +np.float64,0xbf877400e02ee800,0xbf8773df47fa7e35,1 +np.float64,0xbfb07e050820fc08,0xbfb07b198d4a52c9,1 +np.float64,0xbfee0a3621fc146c,0xbfe9d1745a05ba77,1 +np.float64,0xbfe78de246ef1bc4,0xbfe57bf2baab91c8,1 +np.float64,0xbfcdbfd3bd3b7fa8,0xbfcd7b728a955a06,1 +np.float64,0xbfe855ea79b0abd5,0xbfe60e8a4a17b921,1 +np.float64,0xbfd86c8e3530d91c,0xbfd7d5e36c918dc1,1 +np.float64,0xbfe4543169e8a863,0xbfe2fd23d42f552e,1 +np.float64,0xbfe41efbf1283df8,0xbfe2d235a2faed1a,1 +np.float64,0xbfd9a55464b34aa8,0xbfd8f7083f7281e5,1 +np.float64,0xbfe5f5078d6bea0f,0xbfe44637d910c270,1 +np.float64,0xbfe6d83e3dedb07c,0xbfe4f3fdadd10552,1 +np.float64,0xbfdb767e70b6ecfc,0xbfdaa0b6c17f3fb1,1 +np.float64,0xbfdfc91b663f9236,0xbfde7eb0dfbeaa26,1 +np.float64,0xbfbfbd18783f7a30,0xbfbfa84bf2fa1c8d,1 +np.float64,0xbfe51199242a2332,0xbfe39447dbe066ae,1 +np.float64,0xbfdbb94814b77290,0xbfdadd63bd796972,1 +np.float64,0xbfd8c6272cb18c4e,0xbfd828f2d9e8607e,1 +np.float64,0xbfce51e0b63ca3c0,0xbfce097ee908083a,1 +np.float64,0xbfe99a177d73342f,0xbfe6f4ec776a57ae,1 +np.float64,0xbfefde2ab0ffbc55,0xbfeadafdcbf54733,1 +np.float64,0xbfcccb5c1c3996b8,0xbfcc8d586a73d126,1 +np.float64,0xbfdf7ddcedbefbba,0xbfde3c749a906de7,1 +np.float64,0xbfef940516ff280a,0xbfeab26429e89f4b,1 +np.float64,0xbfe08009f1e10014,0xbfdf8eab352997eb,1 +np.float64,0xbfe9c02682b3804d,0xbfe70f5fd05f79ee,1 +np.float64,0xbfb3ca1732279430,0xbfb3c50bec5b453a,1 +np.float64,0xbfe368e81926d1d0,0xbfe23dc704d0887c,1 +np.float64,0xbfbd20cc2e3a4198,0xbfbd10b7e6d81c6c,1 +np.float64,0xbfd67ece4d2cfd9c,0xbfd608f527dcc5e7,1 +np.float64,0xbfdc02d1333805a2,0xbfdb20104454b79f,1 +np.float64,0xbfc007a626200f4c,0xbfbff9dc9dc70193,1 +np.float64,0xbfda9e4f8fb53ca0,0xbfd9db8af35dc630,1 +np.float64,0xbfd8173d77302e7a,0xbfd786a0cf3e2914,1 +np.float64,0xbfeb8fcbd0b71f98,0xbfe84734debc10fb,1 +np.float64,0xbfe4bf1cb7697e3a,0xbfe352c891113f29,1 +np.float64,0xbfc18624d5230c48,0xbfc178248e863b64,1 +np.float64,0xbfcf184bac3e3098,0xbfceca3b19be1ebe,1 +np.float64,0xbfd2269c42a44d38,0xbfd1e8920d72b694,1 +np.float64,0xbfe8808526b1010a,0xbfe62d5497292495,1 +np.float64,0xbfe498bd1da9317a,0xbfe334245eadea93,1 +np.float64,0xbfef0855aebe10ab,0xbfea6462f29aeaf9,1 +np.float64,0xbfdeb186c93d630e,0xbfdd87c37943c602,1 +np.float64,0xbfb29fe2ae253fc8,0xbfb29bae3c87efe4,1 +np.float64,0xbfddd9c6c3bbb38e,0xbfdcc7b400bf384b,1 +np.float64,0xbfe3506673e6a0cd,0xbfe2299f26295553,1 +np.float64,0xbfe765957a2ecb2b,0xbfe55e03cf22edab,1 +np.float64,0xbfecc9876c79930f,0xbfe90efaf15b6207,1 +np.float64,0xbfefb37a0a7f66f4,0xbfeac3af3898e7c2,1 +np.float64,0xbfeefa0da7bdf41b,0xbfea5c4cde53c1c3,1 +np.float64,0xbfe6639ee9ecc73e,0xbfe49b4e28a72482,1 +np.float64,0xbfef91a4bb7f2349,0xbfeab114ac9e25dd,1 +np.float64,0xbfc8b392bb316724,0xbfc88c657f4441a3,1 +np.float64,0xbfc88a358231146c,0xbfc863cb900970fe,1 +np.float64,0xbfef25a9d23e4b54,0xbfea74eda432aabe,1 +np.float64,0xbfe6aceea0ed59de,0xbfe4d32e54a3fd01,1 +np.float64,0xbfefe2b3e37fc568,0xbfeadd74f4605835,1 +np.float64,0xbfa9eecb8833dd90,0xbfa9ebf4f4cb2591,1 +np.float64,0xbfd42bad7428575a,0xbfd3d69de8e52d0a,1 +np.float64,0xbfbc366b4a386cd8,0xbfbc27ceee8f3019,1 +np.float64,0xbfd9bca7be337950,0xbfd90c80e6204e57,1 +np.float64,0xbfe8173f53f02e7f,0xbfe5e0f8d8ed329c,1 +np.float64,0xbfce22dbcb3c45b8,0xbfcddbc8159b63af,1 +np.float64,0xbfea2d7ba7345af7,0xbfe75aa62ad5b80a,1 +np.float64,0xbfc08b783e2116f0,0xbfc07faf8d501558,1 +np.float64,0xbfb8c4161c318830,0xbfb8ba33950748ec,1 +np.float64,0xbfddd930bcbbb262,0xbfdcc72dffdf51bb,1 +np.float64,0xbfd108ce8a22119e,0xbfd0d5801e7698bd,1 +np.float64,0xbfd5bd2b5dab7a56,0xbfd552c52c468c76,1 +np.float64,0xbfe7ffe67fefffcd,0xbfe5cfe96e35e6e5,1 +np.float64,0xbfa04ec6bc209d90,0xbfa04e120a2c25cc,1 +np.float64,0xbfef7752cc7eeea6,0xbfeaa28715addc4f,1 +np.float64,0xbfe7083c2eae1078,0xbfe5182bf8ddfc8e,1 +np.float64,0xbfe05dafd0a0bb60,0xbfdf52d397cfe5f6,1 +np.float64,0xbfacb4f2243969e0,0xbfacb118991ea235,1 +np.float64,0xbfc7d47e422fa8fc,0xbfc7b1504714a4fd,1 +np.float64,0xbfbd70b2243ae168,0xbfbd60182efb61de,1 +np.float64,0xbfe930e49cb261c9,0xbfe6ab272b3f9cfc,1 +np.float64,0xbfb5f537e62bea70,0xbfb5ee540dcdc635,1 +np.float64,0xbfbb0c8278361908,0xbfbaffa1f7642a87,1 +np.float64,0xbfe82af2447055e4,0xbfe5ef54ca8db9e8,1 +np.float64,0xbfe92245e6f2448c,0xbfe6a0d32168040b,1 +np.float64,0xbfb799a8522f3350,0xbfb7911a7ada3640,1 +np.float64,0x7faa8290c8350521,0x3fe5916f67209cd6,1 +np.float64,0x7f976597082ecb2d,0x3fcf94dce396bd37,1 +np.float64,0x7fede721237bce41,0x3fe3e7b1575b005f,1 +np.float64,0x7fd5f674d72bece9,0x3fe3210628eba199,1 +np.float64,0x7f9b0f1aa0361e34,0x3feffd34d15d1da7,1 +np.float64,0x7fec48346ab89068,0x3fe93dd84253d9a2,1 +np.float64,0x7f9cac76283958eb,0xbfec4cd999653868,1 +np.float64,0x7fed51ab6bbaa356,0x3fecc27fb5f37bca,1 +np.float64,0x7fded3c116bda781,0xbfda473efee47cf1,1 +np.float64,0x7fd19c48baa33890,0xbfe25700cbfc0326,1 +np.float64,0x7fe5c8f478ab91e8,0xbfee4ab6d84806be,1 +np.float64,0x7fe53c64e46a78c9,0x3fee19c3f227f4e1,1 +np.float64,0x7fc2ad1936255a31,0xbfe56db9b877f807,1 +np.float64,0x7fe2b071b52560e2,0xbfce3990a8d390a9,1 +np.float64,0x7fc93f3217327e63,0xbfd1f6d7ef838d2b,1 +np.float64,0x7fec26df08784dbd,0x3fd5397be41c93d9,1 +np.float64,0x7fcf4770183e8edf,0x3fe6354f5a785016,1 +np.float64,0x7fdc9fcc0bb93f97,0xbfeeeae952e8267d,1 +np.float64,0x7feb21f29c7643e4,0x3fec20122e33f1bf,1 +np.float64,0x7fd0b51273216a24,0x3fefb09f8daba00b,1 +np.float64,0x7fe747a9d76e8f53,0x3feb46a3232842a4,1 +np.float64,0x7fd58885972b110a,0xbfce5ea57c186221,1 +np.float64,0x7fca3ce85c3479d0,0x3fef93a24548e8ca,1 +np.float64,0x7fe1528a46a2a514,0xbfb54bb578d9da91,1 +np.float64,0x7fcc58b21b38b163,0x3feffb5b741ffc2d,1 +np.float64,0x7fdabcaaf5357955,0x3fecbf855db524d1,1 +np.float64,0x7fdd27c6933a4f8c,0xbfef2f41bb80144b,1 +np.float64,0x7fbda4e1be3b49c2,0x3fdb9b33f84f5381,1 +np.float64,0x7fe53363362a66c5,0x3fe4daff3a6a4ed0,1 +np.float64,0x7fe5719d62eae33a,0xbfef761d98f625d5,1 +np.float64,0x7f982ce5a83059ca,0x3fd0b27c3365f0a8,1 +np.float64,0x7fe6db8c42edb718,0x3fe786f4b1fe11a6,1 +np.float64,0x7fe62cca1b2c5993,0x3fd425b6c4c9714a,1 +np.float64,0x7feea88850bd5110,0xbfd7bbb432017175,1 +np.float64,0x7fad6c6ae43ad8d5,0x3fe82e49098bc6de,1 +np.float64,0x7fe70542f02e0a85,0x3fec3017960b4822,1 +np.float64,0x7feaf0bcbb35e178,0xbfc3aac74dd322d5,1 +np.float64,0x7fb5e152fe2bc2a5,0x3fd4b27a4720614c,1 +np.float64,0x7fe456ee5be8addc,0xbfe9e15ab5cff229,1 +np.float64,0x7fd4b53a8d296a74,0xbfefff450f503326,1 +np.float64,0x7fd7149d7a2e293a,0x3fef4ef0a9009096,1 +np.float64,0x7fd43fc5a8a87f8a,0x3fe0c929fee9dce7,1 +np.float64,0x7fef97022aff2e03,0x3fd4ea52a813da20,1 +np.float64,0x7fe035950ae06b29,0x3fef4e125394fb05,1 +np.float64,0x7fecd0548979a0a8,0x3fe89d226244037b,1 +np.float64,0x7fc79b3ac22f3675,0xbfee9c9cf78c8270,1 +np.float64,0x7fd8b8e8263171cf,0x3fe8e24437961db0,1 +np.float64,0x7fc288c23e251183,0xbfbaf8eca50986ca,1 +np.float64,0x7fe436b4b6686d68,0xbfecd661741931c4,1 +np.float64,0x7fcdf99abe3bf334,0x3feaa75c90830b92,1 +np.float64,0x7fd9f9739233f2e6,0xbfebbfcb301b0da5,1 +np.float64,0x7fd6fcbd1b2df979,0xbfccf2c77cb65f56,1 +np.float64,0x7fe242a97b248552,0xbfe5b0f13bcbabc8,1 +np.float64,0x7fe38bf3e06717e7,0x3fbc8fa9004d2668,1 +np.float64,0x7fecd0e8d479a1d1,0xbfe886a6b4f73a4a,1 +np.float64,0x7fe958d60232b1ab,0xbfeb7c4cf0cee2dd,1 +np.float64,0x7f9d492b583a9256,0xbfebe975d00221cb,1 +np.float64,0x7fd6c9983bad932f,0xbfefe817621a31f6,1 +np.float64,0x7fed0d7239fa1ae3,0x3feac7e1b6455b4b,1 +np.float64,0x7fe61dac90ec3b58,0x3fef845b9efe8421,1 +np.float64,0x7f9acd3010359a5f,0xbfe460d376200130,1 +np.float64,0x7fedced9673b9db2,0xbfeeaf23445e1944,1 +np.float64,0x7fd9f271a733e4e2,0xbfd41544535ecb78,1 +np.float64,0x7fe703339bee0666,0x3fef93334626b56c,1 +np.float64,0x7fec7761b7b8eec2,0xbfe6da9179e8e714,1 +np.float64,0x7fdd9fff043b3ffd,0xbfc0761dfb8d94f9,1 +np.float64,0x7fdc10ed17b821d9,0x3fe1481e2a26c77f,1 +np.float64,0x7fe7681e72aed03c,0x3fefff94a6d47c84,1 +np.float64,0x7fe18c29e1e31853,0x3fe86ebd2fd89456,1 +np.float64,0x7fb2fb273c25f64d,0xbfefc136f57e06de,1 +np.float64,0x7fac2bbb90385776,0x3fe25d8e3cdae7e3,1 +np.float64,0x7fed16789efa2cf0,0x3fe94555091fdfd9,1 +np.float64,0x7fd8fe8f7831fd1e,0xbfed58d520361902,1 +np.float64,0x7fa59bde3c2b37bb,0x3fef585391c077ff,1 +np.float64,0x7fda981b53353036,0x3fde02ca08737b5f,1 +np.float64,0x7fd29f388aa53e70,0xbfe04f5499246df2,1 +np.float64,0x7fcd0232513a0464,0xbfd9737f2f565829,1 +np.float64,0x7fe9a881bcf35102,0xbfe079cf285b35dd,1 +np.float64,0x7fdbe399a9b7c732,0x3fe965bc4220f340,1 +np.float64,0x7feb77414af6ee82,0xbfb7df2fcd491f55,1 +np.float64,0x7fa26e86c424dd0d,0xbfea474c3d65b9be,1 +np.float64,0x7feaee869e35dd0c,0xbfd7b333a888cd14,1 +np.float64,0x7fcbd67f6137acfe,0xbfe15a7a15dfcee6,1 +np.float64,0x7fe36991e766d323,0xbfeb288077c4ed9f,1 +np.float64,0x7fdcf4f4fcb9e9e9,0xbfea331ef7a75e7b,1 +np.float64,0x7fbe3445643c688a,0x3fedf21b94ae8e37,1 +np.float64,0x7fd984cfd2b3099f,0x3fc0d3ade71c395e,1 +np.float64,0x7fdec987b23d930e,0x3fe4af5e48f6c26e,1 +np.float64,0x7fde56a9953cad52,0x3fc8e7762cefb8b0,1 +np.float64,0x7fd39fb446273f68,0xbfe6c3443208f44d,1 +np.float64,0x7fc609c1a72c1382,0x3fe884e639571baa,1 +np.float64,0x7fe001be4b20037c,0xbfed0d90cbcb6010,1 +np.float64,0x7fce7ace283cf59b,0xbfd0303792e51f49,1 +np.float64,0x7fe27ba93da4f751,0x3fe548b5ce740d71,1 +np.float64,0x7fcc13c79b38278e,0xbfe2e14f5b64a1e9,1 +np.float64,0x7fc058550620b0a9,0x3fe44bb55ebd0590,1 +np.float64,0x7fa4ba8bf8297517,0x3fee59b39f9d08c4,1 +np.float64,0x7fe50d6872ea1ad0,0xbfea1eaa2d059e13,1 +np.float64,0x7feb7e33b476fc66,0xbfeff28a4424dd3e,1 +np.float64,0x7fe2d7d2a165afa4,0xbfdbaff0ba1ea460,1 +np.float64,0xffd126654b224cca,0xbfef0cd3031fb97c,1 +np.float64,0xffb5f884942bf108,0x3fe0de589bea2e4c,1 +np.float64,0xffe011b4bfe02369,0xbfe805a0edf1e1f2,1 +np.float64,0xffec13eae9b827d5,0x3fb5f30347d78447,1 +np.float64,0xffa6552ae82caa50,0x3fb1ecee60135f2f,1 +np.float64,0xffb62d38b02c5a70,0x3fbd35903148fd12,1 +np.float64,0xffe2c44ea425889d,0xbfd7616547f99a7d,1 +np.float64,0xffea24c61a74498c,0x3fef4a1b15ae9005,1 +np.float64,0xffd23a4ab2a47496,0x3fe933bfaa569ae9,1 +np.float64,0xffc34a073d269410,0xbfeec0f510bb7474,1 +np.float64,0xffeead84cfbd5b09,0x3feb2d635e5a78bd,1 +np.float64,0xffcfd8f3b43fb1e8,0xbfdd59625801771b,1 +np.float64,0xffd3c7f662a78fec,0x3f9cf3209edfbc4e,1 +np.float64,0xffe7b7e4f72f6fca,0xbfefdcff4925632c,1 +np.float64,0xffe48cab05e91956,0x3fe6b41217948423,1 +np.float64,0xffeb6980b336d301,0xbfca5de148f69324,1 +np.float64,0xffe3f15c4aa7e2b8,0xbfeb18efae892081,1 +np.float64,0xffcf290c713e5218,0x3fefe6f1a513ed26,1 +np.float64,0xffd80979b43012f4,0xbfde6c8df91af976,1 +np.float64,0xffc3181e0026303c,0x3fe7448f681def38,1 +np.float64,0xffedfa68f97bf4d1,0xbfeca6efb802d109,1 +np.float64,0xffca0931c0341264,0x3fe31b9f073b08cd,1 +np.float64,0xffe4c44934e98892,0x3feda393a2e8a0f7,1 +np.float64,0xffe65bb56f2cb76a,0xbfeffaf638a4b73e,1 +np.float64,0xffe406a332a80d46,0x3fe8151dadb853c1,1 +np.float64,0xffdb7eae9c36fd5e,0xbfeff89abf5ab16e,1 +np.float64,0xffe245a02da48b40,0x3fef1fb43e85f4b8,1 +np.float64,0xffe2bafa732575f4,0x3fcbab115c6fd86e,1 +np.float64,0xffe8b1eedb7163dd,0x3feff263df6f6b12,1 +np.float64,0xffe6c76c796d8ed8,0xbfe61a8668511293,1 +np.float64,0xffefe327d1ffc64f,0xbfd9b92887a84827,1 +np.float64,0xffa452180c28a430,0xbfa9b9e578a4e52f,1 +np.float64,0xffe9867d0bf30cf9,0xbfca577867588408,1 +np.float64,0xffdfe9b923bfd372,0x3fdab5c15f085c2d,1 +np.float64,0xffed590c6abab218,0xbfd7e7b6c5a120e6,1 +np.float64,0xffeaebcfbab5d79f,0x3fed58be8a9e2c3b,1 +np.float64,0xffe2ba83a8257507,0x3fe6c42a4ac1d4d9,1 +np.float64,0xffe01d5b0ee03ab6,0xbfe5dad6c9247db7,1 +np.float64,0xffe51095d52a212b,0x3fef822cebc32d8e,1 +np.float64,0xffebd7a901b7af51,0xbfe5e63f3e3b1185,1 +np.float64,0xffe4efdcde29dfb9,0xbfe811294dfa758f,1 +np.float64,0xffe3be1aa4a77c35,0x3fdd8dcfcd409bb1,1 +np.float64,0xffbe6f2f763cde60,0x3fd13766e43bd622,1 +np.float64,0xffeed3d80fbda7af,0x3fec10a23c1b7a4a,1 +np.float64,0xffd6ebff37add7fe,0xbfe6177411607c86,1 +np.float64,0xffe85a90f4b0b521,0x3fc09fdd66c8fde9,1 +np.float64,0xffea3d58c2b47ab1,0x3feb5bd4a04b3562,1 +np.float64,0xffef675be6beceb7,0x3fecd840683d1044,1 +np.float64,0xff726a088024d400,0x3feff2b4f47b5214,1 +np.float64,0xffc90856733210ac,0xbfe3c6ffbf6840a5,1 +np.float64,0xffc0b58d9a216b1c,0xbfe10314267d0611,1 +np.float64,0xffee1f3d0abc3e79,0xbfd12ea7efea9067,1 +np.float64,0xffd988c41a331188,0x3febe83802d8a32e,1 +np.float64,0xffe8f1ac9bb1e358,0xbfdbf5fa7e84f2f2,1 +np.float64,0xffe47af279e8f5e4,0x3fef11e339e5fa78,1 +np.float64,0xff9960a7f832c140,0xbfa150363f8ec5b2,1 +np.float64,0xffcac40fa7358820,0xbfec3d5847a3df1d,1 +np.float64,0xffcb024a9d360494,0xbfd060fa31fd6b6a,1 +np.float64,0xffe385ffb3270bff,0xbfee6859e8dcd9e8,1 +np.float64,0xffef62f2c53ec5e5,0x3fe0a71ffddfc718,1 +np.float64,0xffed87ff20fb0ffd,0xbfe661db7c4098e3,1 +np.float64,0xffe369278526d24e,0x3fd64d89a41822fc,1 +np.float64,0xff950288c02a0520,0x3fe1df91d1ad7d5c,1 +np.float64,0xffe70e7c2cee1cf8,0x3fc9fece08df2fd8,1 +np.float64,0xffbaf020b635e040,0xbfc68c43ff9911a7,1 +np.float64,0xffee0120b0fc0240,0x3f9f792e17b490b0,1 +np.float64,0xffe1fa4be7a3f498,0xbfef4b18ab4b319e,1 +np.float64,0xffe61887bf2c310f,0x3fe846714826cb32,1 +np.float64,0xffdc3cf77f3879ee,0x3fe033b948a36125,1 +np.float64,0xffcc2b86f238570c,0xbfefdcceac3f220f,1 +np.float64,0xffe1f030c0a3e061,0x3fef502a808c359a,1 +np.float64,0xffb872c4ee30e588,0x3fef66ed8d3e6175,1 +np.float64,0xffeac8fc617591f8,0xbfe5d8448602aac9,1 +np.float64,0xffe5be16afab7c2d,0x3fee75ccde3cd14d,1 +np.float64,0xffae230ad83c4610,0xbfe49bbe6074d459,1 +np.float64,0xffc8fbeff531f7e0,0x3f77201e0c927f97,1 +np.float64,0xffdc314f48b8629e,0x3fef810dfc5db118,1 +np.float64,0xffec1f8970783f12,0x3fe15567102e042a,1 +np.float64,0xffc6995f902d32c0,0xbfecd5d2eedf342c,1 +np.float64,0xffdc7af76b38f5ee,0xbfd6e754476ab320,1 +np.float64,0xffb30cf8682619f0,0x3fd5ac3dfc4048d0,1 +np.float64,0xffd3a77695a74eee,0xbfefb5d6889e36e9,1 +np.float64,0xffd8b971803172e4,0xbfeb7f62f0b6c70b,1 +np.float64,0xffde4c0234bc9804,0xbfed50ba9e16d5e0,1 +np.float64,0xffb62b3f342c5680,0xbfeabc0de4069b84,1 +np.float64,0xff9af5674035eac0,0xbfed6c198b6b1bd8,1 +np.float64,0xffdfe20cb43fc41a,0x3fb11f8238f66306,1 +np.float64,0xffd2ecd7a0a5d9b0,0xbfec17ef1a62b1e3,1 +np.float64,0xffce60f7863cc1f0,0x3fe6dbcad3e3a006,1 +np.float64,0xffbbb8306a377060,0xbfbfd0fbef485c4c,1 +np.float64,0xffd1b2bd2b23657a,0xbfda3e046d987b99,1 +np.float64,0xffc480f4092901e8,0xbfeeff0427f6897b,1 +np.float64,0xffe6e02d926dc05a,0xbfcd59552778890b,1 +np.float64,0xffd302e5b7a605cc,0xbfee7c08641366b0,1 +np.float64,0xffec2eb92f785d72,0xbfef5c9c7f771050,1 +np.float64,0xffea3e31a9747c62,0xbfc49cd54755faf0,1 +np.float64,0xffce0a4e333c149c,0x3feeb9a6d0db4aee,1 +np.float64,0xffdc520a2db8a414,0x3fefc7b72613dcd0,1 +np.float64,0xffe056b968a0ad72,0xbfe47a9fe1f827fb,1 +np.float64,0xffe5a10f4cab421e,0x3fec2b1f74b73dec,1 diff --git a/python/numpy/_core/tests/data/umath-validation-set-sinh.csv b/python/numpy/_core/tests/data/umath-validation-set-sinh.csv new file mode 100644 index 000000000..1ef7b6e76 --- /dev/null +++ b/python/numpy/_core/tests/data/umath-validation-set-sinh.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xfee27582,0xff800000,2 +np.float32,0xff19f092,0xff800000,2 +np.float32,0xbf393576,0xbf49cb31,2 +np.float32,0x8020fdea,0x8020fdea,2 +np.float32,0x455f4e,0x455f4e,2 +np.float32,0xff718c35,0xff800000,2 +np.float32,0x3f3215e3,0x3f40cce5,2 +np.float32,0x19e833,0x19e833,2 +np.float32,0xff2dcd49,0xff800000,2 +np.float32,0x7e8f6c95,0x7f800000,2 +np.float32,0xbf159dac,0xbf1e47a5,2 +np.float32,0x100d3d,0x100d3d,2 +np.float32,0xff673441,0xff800000,2 +np.float32,0x80275355,0x80275355,2 +np.float32,0x4812d0,0x4812d0,2 +np.float32,0x8072b956,0x8072b956,2 +np.float32,0xff3bb918,0xff800000,2 +np.float32,0x0,0x0,2 +np.float32,0xfe327798,0xff800000,2 +np.float32,0x41d4e2,0x41d4e2,2 +np.float32,0xfe34b1b8,0xff800000,2 +np.float32,0x80199f72,0x80199f72,2 +np.float32,0x807242ce,0x807242ce,2 +np.float32,0x3ef4202d,0x3efd7b48,2 +np.float32,0x763529,0x763529,2 +np.float32,0x4f6662,0x4f6662,2 +np.float32,0x3f18efe9,0x3f2232b5,2 +np.float32,0x80701846,0x80701846,2 +np.float32,0x3f599948,0x3f74c393,2 +np.float32,0x5a3d69,0x5a3d69,2 +np.float32,0xbf4a7e65,0xbf6047a3,2 +np.float32,0xff0d4c82,0xff800000,2 +np.float32,0x7a74db,0x7a74db,2 +np.float32,0x803388e6,0x803388e6,2 +np.float32,0x7f4430bb,0x7f800000,2 +np.float32,0x14c5b1,0x14c5b1,2 +np.float32,0xfa113400,0xff800000,2 +np.float32,0x7f4b3209,0x7f800000,2 +np.float32,0x8038d88c,0x8038d88c,2 +np.float32,0xbef2f9de,0xbefc330b,2 +np.float32,0xbe147b38,0xbe15008f,2 +np.float32,0x2b61e6,0x2b61e6,2 +np.float32,0x80000001,0x80000001,2 +np.float32,0x8060456c,0x8060456c,2 +np.float32,0x3f30fa82,0x3f3f6a99,2 +np.float32,0xfd1f0220,0xff800000,2 +np.float32,0xbf2b7555,0xbf389151,2 +np.float32,0xff100b7a,0xff800000,2 +np.float32,0x70d3cd,0x70d3cd,2 +np.float32,0x2a8d4a,0x2a8d4a,2 +np.float32,0xbf7b733f,0xbf92f05f,2 +np.float32,0x3f7106dc,0x3f8b1fc6,2 +np.float32,0x3f39da7a,0x3f4a9d79,2 +np.float32,0x3f5dd73f,0x3f7aaab5,2 +np.float32,0xbe8c8754,0xbe8e4cba,2 +np.float32,0xbf6c74c9,0xbf87c556,2 +np.float32,0x800efbbb,0x800efbbb,2 +np.float32,0xff054ab5,0xff800000,2 +np.float32,0x800b4b46,0x800b4b46,2 +np.float32,0xff77fd74,0xff800000,2 +np.float32,0x257d0,0x257d0,2 +np.float32,0x7caa0c,0x7caa0c,2 +np.float32,0x8025d24d,0x8025d24d,2 +np.float32,0x3d9f1b60,0x3d9f445c,2 +np.float32,0xbe3bf6e8,0xbe3d0595,2 +np.float32,0x54bb93,0x54bb93,2 +np.float32,0xbf3e6a45,0xbf507716,2 +np.float32,0x3f4bb26e,0x3f61e1cd,2 +np.float32,0x3f698edc,0x3f85aac5,2 +np.float32,0xff7bd0ef,0xff800000,2 +np.float32,0xbed07b68,0xbed64a8e,2 +np.float32,0xbf237c72,0xbf2ed3d2,2 +np.float32,0x27b0fa,0x27b0fa,2 +np.float32,0x3f7606d1,0x3f8ed7d6,2 +np.float32,0x790dc0,0x790dc0,2 +np.float32,0x7f68f3ac,0x7f800000,2 +np.float32,0xbed39288,0xbed9a52f,2 +np.float32,0x3f6f8266,0x3f8a0187,2 +np.float32,0x3fbdca,0x3fbdca,2 +np.float32,0xbf7c3e5d,0xbf938b2c,2 +np.float32,0x802321a8,0x802321a8,2 +np.float32,0x3eecab66,0x3ef53031,2 +np.float32,0x62b324,0x62b324,2 +np.float32,0x3f13afac,0x3f1c03fe,2 +np.float32,0xff315ad7,0xff800000,2 +np.float32,0xbf1fac0d,0xbf2a3a63,2 +np.float32,0xbf543984,0xbf6d61d6,2 +np.float32,0x71a212,0x71a212,2 +np.float32,0x114fbe,0x114fbe,2 +np.float32,0x3f5b6ff2,0x3f77505f,2 +np.float32,0xff6ff89e,0xff800000,2 +np.float32,0xff4527a1,0xff800000,2 +np.float32,0x22cb3,0x22cb3,2 +np.float32,0x7f53bb6b,0x7f800000,2 +np.float32,0xff3d2dea,0xff800000,2 +np.float32,0xfd21dac0,0xff800000,2 +np.float32,0xfc486140,0xff800000,2 +np.float32,0x7e2b693a,0x7f800000,2 +np.float32,0x8022a9fb,0x8022a9fb,2 +np.float32,0x80765de0,0x80765de0,2 +np.float32,0x13d299,0x13d299,2 +np.float32,0x7ee53713,0x7f800000,2 +np.float32,0xbde1c770,0xbde23c96,2 +np.float32,0xbd473fc0,0xbd4753de,2 +np.float32,0x3f1cb455,0x3f26acf3,2 +np.float32,0x683e49,0x683e49,2 +np.float32,0x3ed5a9fc,0x3edbeb79,2 +np.float32,0x3f4fe3f6,0x3f67814f,2 +np.float32,0x802a2bce,0x802a2bce,2 +np.float32,0x7e951b4c,0x7f800000,2 +np.float32,0xbe6eb260,0xbe70dd44,2 +np.float32,0xbe3daca8,0xbe3ec2cb,2 +np.float32,0xbe9c38b2,0xbe9ea822,2 +np.float32,0xff2e29dc,0xff800000,2 +np.float32,0x7f62c7cc,0x7f800000,2 +np.float32,0xbf6799a4,0xbf84416c,2 +np.float32,0xbe30a7f0,0xbe318898,2 +np.float32,0xc83d9,0xc83d9,2 +np.float32,0x3f05abf4,0x3f0bd447,2 +np.float32,0x7e9b018a,0x7f800000,2 +np.float32,0xbf0ed72e,0xbf165e5b,2 +np.float32,0x8011ac8c,0x8011ac8c,2 +np.float32,0xbeb7c706,0xbebbbfcb,2 +np.float32,0x803637f9,0x803637f9,2 +np.float32,0xfe787cc8,0xff800000,2 +np.float32,0x3f533d4b,0x3f6c0a50,2 +np.float32,0x3f5c0f1c,0x3f782dde,2 +np.float32,0x3f301f36,0x3f3e590d,2 +np.float32,0x2dc929,0x2dc929,2 +np.float32,0xff15018a,0xff800000,2 +np.float32,0x3f4d0c56,0x3f63afeb,2 +np.float32,0xbf7a2ae3,0xbf91f6e4,2 +np.float32,0xbe771b84,0xbe798346,2 +np.float32,0x80800000,0x80800000,2 +np.float32,0x7f5689ba,0x7f800000,2 +np.float32,0x3f1c3177,0x3f2610df,2 +np.float32,0x3f1b9664,0x3f255825,2 +np.float32,0x3f7e5066,0x3f9520d4,2 +np.float32,0xbf1935f8,0xbf2285ab,2 +np.float32,0x3f096cc7,0x3f101ef9,2 +np.float32,0x8030c180,0x8030c180,2 +np.float32,0x6627ed,0x6627ed,2 +np.float32,0x454595,0x454595,2 +np.float32,0x7de66a33,0x7f800000,2 +np.float32,0xbf800000,0xbf966cfe,2 +np.float32,0xbf35c0a8,0xbf456939,2 +np.float32,0x3f6a6266,0x3f8643e0,2 +np.float32,0x3f0cbcee,0x3f13ef6a,2 +np.float32,0x7efd1e58,0x7f800000,2 +np.float32,0xfe9a74c6,0xff800000,2 +np.float32,0x807ebe6c,0x807ebe6c,2 +np.float32,0x80656736,0x80656736,2 +np.float32,0x800e0608,0x800e0608,2 +np.float32,0xbf30e39a,0xbf3f4e00,2 +np.float32,0x802015fd,0x802015fd,2 +np.float32,0x3e3ce26d,0x3e3df519,2 +np.float32,0x7ec142ac,0x7f800000,2 +np.float32,0xbf68c9ce,0xbf851c78,2 +np.float32,0xfede8356,0xff800000,2 +np.float32,0xbf1507ce,0xbf1d978d,2 +np.float32,0x3e53914c,0x3e551374,2 +np.float32,0x7f3e1c14,0x7f800000,2 +np.float32,0x8070d2ba,0x8070d2ba,2 +np.float32,0xbf4eb793,0xbf65ecee,2 +np.float32,0x7365a6,0x7365a6,2 +np.float32,0x8045cba2,0x8045cba2,2 +np.float32,0x7e4af521,0x7f800000,2 +np.float32,0xbf228625,0xbf2da9e1,2 +np.float32,0x7ee0536c,0x7f800000,2 +np.float32,0x3e126607,0x3e12e5d5,2 +np.float32,0x80311d92,0x80311d92,2 +np.float32,0xbf386b8b,0xbf48ca54,2 +np.float32,0x7f800000,0x7f800000,2 +np.float32,0x8049ec7a,0x8049ec7a,2 +np.float32,0xbf1dfde4,0xbf2836be,2 +np.float32,0x7e719a8c,0x7f800000,2 +np.float32,0x3eb9c856,0x3ebde2e6,2 +np.float32,0xfe3efda8,0xff800000,2 +np.float32,0xbe89d60c,0xbe8b81d1,2 +np.float32,0x3eaad338,0x3eae0317,2 +np.float32,0x7f4e5217,0x7f800000,2 +np.float32,0x3e9d0f40,0x3e9f88ce,2 +np.float32,0xbe026708,0xbe02c155,2 +np.float32,0x5fc22f,0x5fc22f,2 +np.float32,0x1c4572,0x1c4572,2 +np.float32,0xbed89d96,0xbedf22c5,2 +np.float32,0xbf3debee,0xbf4fd441,2 +np.float32,0xbf465520,0xbf5ac6e5,2 +np.float32,0x3f797081,0x3f9169b3,2 +np.float32,0xbf250734,0xbf30b2aa,2 +np.float32,0x7f5068e9,0x7f800000,2 +np.float32,0x3f1b814e,0x3f253f0c,2 +np.float32,0xbf27c5d3,0xbf340b05,2 +np.float32,0x3f1b78ae,0x3f2534c8,2 +np.float32,0x8059b51a,0x8059b51a,2 +np.float32,0x8059f182,0x8059f182,2 +np.float32,0xbf1bb36e,0xbf257ab8,2 +np.float32,0x41ac35,0x41ac35,2 +np.float32,0x68f41f,0x68f41f,2 +np.float32,0xbea504dc,0xbea7e40f,2 +np.float32,0x1,0x1,2 +np.float32,0x3e96b5b0,0x3e98e542,2 +np.float32,0x7f7fffff,0x7f800000,2 +np.float32,0x3c557a80,0x3c557c0c,2 +np.float32,0x800ca3ec,0x800ca3ec,2 +np.float32,0x8077d4aa,0x8077d4aa,2 +np.float32,0x3f000af0,0x3f0572d6,2 +np.float32,0x3e0434dd,0x3e0492f8,2 +np.float32,0x7d1a710a,0x7f800000,2 +np.float32,0x3f70f996,0x3f8b15f8,2 +np.float32,0x8033391d,0x8033391d,2 +np.float32,0x11927c,0x11927c,2 +np.float32,0x7f7784be,0x7f800000,2 +np.float32,0x7acb22af,0x7f800000,2 +np.float32,0x7e8b153c,0x7f800000,2 +np.float32,0x66d402,0x66d402,2 +np.float32,0xfed6e7b0,0xff800000,2 +np.float32,0x7f6872d3,0x7f800000,2 +np.float32,0x1bd49c,0x1bd49c,2 +np.float32,0xfdc4f1b8,0xff800000,2 +np.float32,0xbed8a466,0xbedf2a33,2 +np.float32,0x7ee789,0x7ee789,2 +np.float32,0xbece94b4,0xbed43b52,2 +np.float32,0x3cf3f734,0x3cf4006f,2 +np.float32,0x7e44aa00,0x7f800000,2 +np.float32,0x7f19e99c,0x7f800000,2 +np.float32,0x806ff1bc,0x806ff1bc,2 +np.float32,0x80296934,0x80296934,2 +np.float32,0x7f463363,0x7f800000,2 +np.float32,0xbf212ac3,0xbf2c06bb,2 +np.float32,0x3dc63778,0x3dc686ba,2 +np.float32,0x7f1b4328,0x7f800000,2 +np.float32,0x6311f6,0x6311f6,2 +np.float32,0xbf6b6fb6,0xbf870751,2 +np.float32,0xbf2c44cf,0xbf399155,2 +np.float32,0x3e7a67bc,0x3e7ce887,2 +np.float32,0x7f57c5f7,0x7f800000,2 +np.float32,0x7f2bb4ff,0x7f800000,2 +np.float32,0xbe9d448e,0xbe9fc0a4,2 +np.float32,0xbf4840f0,0xbf5d4f6b,2 +np.float32,0x7f1e1176,0x7f800000,2 +np.float32,0xff76638e,0xff800000,2 +np.float32,0xff055555,0xff800000,2 +np.float32,0x3f32b82b,0x3f419834,2 +np.float32,0xff363aa8,0xff800000,2 +np.float32,0x7f737fd0,0x7f800000,2 +np.float32,0x3da5d798,0x3da60602,2 +np.float32,0x3f1cc126,0x3f26bc3e,2 +np.float32,0x7eb07541,0x7f800000,2 +np.float32,0x3f7b2ff2,0x3f92bd2a,2 +np.float32,0x474f7,0x474f7,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0xff2b0a4e,0xff800000,2 +np.float32,0xfeb24f16,0xff800000,2 +np.float32,0x2cb9fc,0x2cb9fc,2 +np.float32,0x67189d,0x67189d,2 +np.float32,0x8033d854,0x8033d854,2 +np.float32,0xbe85e94c,0xbe87717a,2 +np.float32,0x80767c6c,0x80767c6c,2 +np.float32,0x7ea84d65,0x7f800000,2 +np.float32,0x3f024bc7,0x3f07fead,2 +np.float32,0xbdcb0100,0xbdcb5625,2 +np.float32,0x3f160a9e,0x3f1ec7c9,2 +np.float32,0xff1734c8,0xff800000,2 +np.float32,0x7f424d5e,0x7f800000,2 +np.float32,0xbf75b215,0xbf8e9862,2 +np.float32,0x3f262a42,0x3f3214c4,2 +np.float32,0xbf4cfb53,0xbf639927,2 +np.float32,0x3f4ac8b8,0x3f60aa7c,2 +np.float32,0x3e90e593,0x3e92d6b3,2 +np.float32,0xbf66bccf,0xbf83a2d8,2 +np.float32,0x7d3d851a,0x7f800000,2 +np.float32,0x7bac783c,0x7f800000,2 +np.float32,0x8001c626,0x8001c626,2 +np.float32,0xbdffd480,0xbe003f7b,2 +np.float32,0x7f6680bf,0x7f800000,2 +np.float32,0xbecf448e,0xbed4f9bb,2 +np.float32,0x584c7,0x584c7,2 +np.float32,0x3f3e8ea0,0x3f50a5fb,2 +np.float32,0xbf5a5f04,0xbf75d56e,2 +np.float32,0x8065ae47,0x8065ae47,2 +np.float32,0xbf48dce3,0xbf5e1dba,2 +np.float32,0xbe8dae2e,0xbe8f7ed8,2 +np.float32,0x3f7ca6ab,0x3f93dace,2 +np.float32,0x4c3e81,0x4c3e81,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0x3ee1f7d9,0x3ee96033,2 +np.float32,0x80588c6f,0x80588c6f,2 +np.float32,0x5ba34e,0x5ba34e,2 +np.float32,0x80095d28,0x80095d28,2 +np.float32,0xbe7ba198,0xbe7e2bdd,2 +np.float32,0xbe0bdcb4,0xbe0c4c22,2 +np.float32,0x1776f7,0x1776f7,2 +np.float32,0x80328b2a,0x80328b2a,2 +np.float32,0x3e978d37,0x3e99c63e,2 +np.float32,0x7ed50906,0x7f800000,2 +np.float32,0x3f776a54,0x3f8fe2bd,2 +np.float32,0xbed624c4,0xbedc7120,2 +np.float32,0x7f0b6a31,0x7f800000,2 +np.float32,0x7eb13913,0x7f800000,2 +np.float32,0xbe733684,0xbe758190,2 +np.float32,0x80016474,0x80016474,2 +np.float32,0x7a51ee,0x7a51ee,2 +np.float32,0x3f6cb91e,0x3f87f729,2 +np.float32,0xbd99b050,0xbd99d540,2 +np.float32,0x7c6e3cba,0x7f800000,2 +np.float32,0xbf00179a,0xbf05811e,2 +np.float32,0x3e609b29,0x3e626954,2 +np.float32,0xff3fd71a,0xff800000,2 +np.float32,0x5d8c2,0x5d8c2,2 +np.float32,0x7ee93662,0x7f800000,2 +np.float32,0x4b0b31,0x4b0b31,2 +np.float32,0x3ec243b7,0x3ec6f594,2 +np.float32,0x804d60f1,0x804d60f1,2 +np.float32,0xbf0cb784,0xbf13e929,2 +np.float32,0x3f13b74d,0x3f1c0cee,2 +np.float32,0xfe37cb64,0xff800000,2 +np.float32,0x1a88,0x1a88,2 +np.float32,0x3e22a472,0x3e2353ba,2 +np.float32,0x7f07d6a0,0x7f800000,2 +np.float32,0x3f78f435,0x3f910bb5,2 +np.float32,0x555a4a,0x555a4a,2 +np.float32,0x3e306c1f,0x3e314be3,2 +np.float32,0x8005877c,0x8005877c,2 +np.float32,0x4df389,0x4df389,2 +np.float32,0x8069ffc7,0x8069ffc7,2 +np.float32,0x3f328f24,0x3f4164c6,2 +np.float32,0x53a31b,0x53a31b,2 +np.float32,0xbe4d6768,0xbe4ec8be,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0x3f484c1b,0x3f5d5e2f,2 +np.float32,0x8038be05,0x8038be05,2 +np.float32,0x58ac0f,0x58ac0f,2 +np.float32,0x7ed7fb72,0x7f800000,2 +np.float32,0x5a22e1,0x5a22e1,2 +np.float32,0xbebb7394,0xbebfaad6,2 +np.float32,0xbda98160,0xbda9b2ef,2 +np.float32,0x7f3e5c42,0x7f800000,2 +np.float32,0xfed204ae,0xff800000,2 +np.float32,0xbf5ef782,0xbf7c3ec5,2 +np.float32,0xbef7a0a8,0xbf00b292,2 +np.float32,0xfee6e176,0xff800000,2 +np.float32,0xfe121140,0xff800000,2 +np.float32,0xfe9e13be,0xff800000,2 +np.float32,0xbf3c98b1,0xbf4e2003,2 +np.float32,0x77520d,0x77520d,2 +np.float32,0xf17b2,0xf17b2,2 +np.float32,0x724d2f,0x724d2f,2 +np.float32,0x7eb326f5,0x7f800000,2 +np.float32,0x3edd6bf2,0x3ee4636e,2 +np.float32,0x350f57,0x350f57,2 +np.float32,0xff7d4435,0xff800000,2 +np.float32,0x802b2b9d,0x802b2b9d,2 +np.float32,0xbf7fbeee,0xbf963acf,2 +np.float32,0x804f3100,0x804f3100,2 +np.float32,0x7c594a71,0x7f800000,2 +np.float32,0x3ef49340,0x3efdfbb6,2 +np.float32,0x2e0659,0x2e0659,2 +np.float32,0x8006d5fe,0x8006d5fe,2 +np.float32,0xfd2a00b0,0xff800000,2 +np.float32,0xbee1c016,0xbee922ed,2 +np.float32,0x3e3b7de8,0x3e3c8a8b,2 +np.float32,0x805e6bba,0x805e6bba,2 +np.float32,0x1a7da2,0x1a7da2,2 +np.float32,0x6caba4,0x6caba4,2 +np.float32,0x802f7eab,0x802f7eab,2 +np.float32,0xff68b16b,0xff800000,2 +np.float32,0x8064f5e5,0x8064f5e5,2 +np.float32,0x2e39b4,0x2e39b4,2 +np.float32,0x800000,0x800000,2 +np.float32,0xfd0334c0,0xff800000,2 +np.float32,0x3e952fc4,0x3e974e7e,2 +np.float32,0x80057d33,0x80057d33,2 +np.float32,0x3ed3ddc4,0x3ed9f6f1,2 +np.float32,0x3f74ce18,0x3f8dedf4,2 +np.float32,0xff6bb7c0,0xff800000,2 +np.float32,0xff43bc21,0xff800000,2 +np.float32,0x80207570,0x80207570,2 +np.float32,0x7e1dda75,0x7f800000,2 +np.float32,0x3efe335c,0x3f0462ff,2 +np.float32,0xbf252c0c,0xbf30df70,2 +np.float32,0x3ef4b8e3,0x3efe25ba,2 +np.float32,0x7c33938d,0x7f800000,2 +np.float32,0x3eb1593c,0x3eb4ea95,2 +np.float32,0xfe1d0068,0xff800000,2 +np.float32,0xbf10da9b,0xbf18b551,2 +np.float32,0xfeb65748,0xff800000,2 +np.float32,0xfe8c6014,0xff800000,2 +np.float32,0x3f0503e2,0x3f0b14e3,2 +np.float32,0xfe5e5248,0xff800000,2 +np.float32,0xbd10afa0,0xbd10b754,2 +np.float32,0xff64b609,0xff800000,2 +np.float32,0xbf674a96,0xbf84089c,2 +np.float32,0x7f5d200d,0x7f800000,2 +np.float32,0x3cf44900,0x3cf45245,2 +np.float32,0x8044445a,0x8044445a,2 +np.float32,0xff35b676,0xff800000,2 +np.float32,0x806452cd,0x806452cd,2 +np.float32,0xbf2930fb,0xbf35c7b4,2 +np.float32,0x7e500617,0x7f800000,2 +np.float32,0x543719,0x543719,2 +np.float32,0x3ed11068,0x3ed6ec1d,2 +np.float32,0xbd8db068,0xbd8dcd59,2 +np.float32,0x3ede62c8,0x3ee571d0,2 +np.float32,0xbf00a410,0xbf061f9c,2 +np.float32,0xbf44fa39,0xbf58ff5b,2 +np.float32,0x3f1c3114,0x3f261069,2 +np.float32,0xbdea6210,0xbdeae521,2 +np.float32,0x80059f6d,0x80059f6d,2 +np.float32,0xbdba15f8,0xbdba578c,2 +np.float32,0x6d8a61,0x6d8a61,2 +np.float32,0x6f5428,0x6f5428,2 +np.float32,0x18d0e,0x18d0e,2 +np.float32,0x50e131,0x50e131,2 +np.float32,0x3f2f52be,0x3f3d5a7e,2 +np.float32,0x7399d8,0x7399d8,2 +np.float32,0x106524,0x106524,2 +np.float32,0x7ebf1c53,0x7f800000,2 +np.float32,0x80276458,0x80276458,2 +np.float32,0x3ebbde67,0x3ec01ceb,2 +np.float32,0x80144d9d,0x80144d9d,2 +np.float32,0x8017ea6b,0x8017ea6b,2 +np.float32,0xff38f201,0xff800000,2 +np.float32,0x7f2daa82,0x7f800000,2 +np.float32,0x3f3cb7c7,0x3f4e47ed,2 +np.float32,0x7f08c779,0x7f800000,2 +np.float32,0xbecc907a,0xbed20cec,2 +np.float32,0x7d440002,0x7f800000,2 +np.float32,0xbd410d80,0xbd411fcd,2 +np.float32,0x3d63ae07,0x3d63cc0c,2 +np.float32,0x805a9c13,0x805a9c13,2 +np.float32,0x803bdcdc,0x803bdcdc,2 +np.float32,0xbe88b354,0xbe8a5497,2 +np.float32,0x3f4eaf43,0x3f65e1c2,2 +np.float32,0x3f15e5b8,0x3f1e9c60,2 +np.float32,0x3e8a870c,0x3e8c394e,2 +np.float32,0x7e113de9,0x7f800000,2 +np.float32,0x7ee5ba41,0x7f800000,2 +np.float32,0xbe73d178,0xbe7620eb,2 +np.float32,0xfe972e6a,0xff800000,2 +np.float32,0xbf65567d,0xbf82a25a,2 +np.float32,0x3f38247e,0x3f487010,2 +np.float32,0xbece1c62,0xbed3b918,2 +np.float32,0x442c8d,0x442c8d,2 +np.float32,0x2dc52,0x2dc52,2 +np.float32,0x802ed923,0x802ed923,2 +np.float32,0x788cf8,0x788cf8,2 +np.float32,0x8024888e,0x8024888e,2 +np.float32,0x3f789bde,0x3f90c8fc,2 +np.float32,0x3f5de620,0x3f7abf88,2 +np.float32,0x3f0ffc45,0x3f17b2a7,2 +np.float32,0xbf709678,0xbf8accd4,2 +np.float32,0x12181f,0x12181f,2 +np.float32,0xfe54bbe4,0xff800000,2 +np.float32,0x7f1daba0,0x7f800000,2 +np.float32,0xbf6226df,0xbf805e3c,2 +np.float32,0xbd120610,0xbd120dfb,2 +np.float32,0x7f75e951,0x7f800000,2 +np.float32,0x80068048,0x80068048,2 +np.float32,0x45f04a,0x45f04a,2 +np.float32,0xff4c4f58,0xff800000,2 +np.float32,0x311604,0x311604,2 +np.float32,0x805e809c,0x805e809c,2 +np.float32,0x3d1d62c0,0x3d1d6caa,2 +np.float32,0x7f14ccf9,0x7f800000,2 +np.float32,0xff10017c,0xff800000,2 +np.float32,0xbf43ec48,0xbf579df4,2 +np.float32,0xff64da57,0xff800000,2 +np.float32,0x7f0622c5,0x7f800000,2 +np.float32,0x7f5460cd,0x7f800000,2 +np.float32,0xff0ef1c6,0xff800000,2 +np.float32,0xbece1146,0xbed3ad13,2 +np.float32,0x3f4d457f,0x3f63fc70,2 +np.float32,0xbdc1da28,0xbdc2244b,2 +np.float32,0xbe46d3f4,0xbe481463,2 +np.float32,0xff36b3d6,0xff800000,2 +np.float32,0xbec2e76c,0xbec7a540,2 +np.float32,0x8078fb81,0x8078fb81,2 +np.float32,0x7ec819cb,0x7f800000,2 +np.float32,0x39c4d,0x39c4d,2 +np.float32,0xbe8cddc2,0xbe8ea670,2 +np.float32,0xbf36dffb,0xbf46d48b,2 +np.float32,0xbf2302a3,0xbf2e4065,2 +np.float32,0x3e7b34a2,0x3e7dbb9a,2 +np.float32,0x3e3d87e1,0x3e3e9d62,2 +np.float32,0x7f3c94b1,0x7f800000,2 +np.float32,0x80455a85,0x80455a85,2 +np.float32,0xfd875568,0xff800000,2 +np.float32,0xbf618103,0xbf7fd1c8,2 +np.float32,0xbe332e3c,0xbe3418ac,2 +np.float32,0x80736b79,0x80736b79,2 +np.float32,0x3f705d9a,0x3f8aa2e6,2 +np.float32,0xbf3a36d2,0xbf4b134b,2 +np.float32,0xfddc55c0,0xff800000,2 +np.float32,0x805606fd,0x805606fd,2 +np.float32,0x3f4f0bc4,0x3f665e25,2 +np.float32,0xfebe7494,0xff800000,2 +np.float32,0xff0c541b,0xff800000,2 +np.float32,0xff0b8e7f,0xff800000,2 +np.float32,0xbcc51640,0xbcc51b1e,2 +np.float32,0x7ec1c4d0,0x7f800000,2 +np.float32,0xfc5c8e00,0xff800000,2 +np.float32,0x7f48d682,0x7f800000,2 +np.float32,0x7d5c7d8d,0x7f800000,2 +np.float32,0x8052ed03,0x8052ed03,2 +np.float32,0x7d4db058,0x7f800000,2 +np.float32,0xff3a65ee,0xff800000,2 +np.float32,0x806eeb93,0x806eeb93,2 +np.float32,0x803f9733,0x803f9733,2 +np.float32,0xbf2d1388,0xbf3a90e3,2 +np.float32,0x68e260,0x68e260,2 +np.float32,0x3e47a69f,0x3e48eb0e,2 +np.float32,0x3f0c4623,0x3f136646,2 +np.float32,0x3f37a831,0x3f47d249,2 +np.float32,0xff153a0c,0xff800000,2 +np.float32,0x2e8086,0x2e8086,2 +np.float32,0xc3f5e,0xc3f5e,2 +np.float32,0x7f31dc14,0x7f800000,2 +np.float32,0xfee37d68,0xff800000,2 +np.float32,0x711d4,0x711d4,2 +np.float32,0x7ede2ce4,0x7f800000,2 +np.float32,0xbf5d76d0,0xbf7a23d0,2 +np.float32,0xbe2b9eb4,0xbe2c6cac,2 +np.float32,0x2b14d7,0x2b14d7,2 +np.float32,0x3ea1db72,0x3ea4910e,2 +np.float32,0x7f3f03f7,0x7f800000,2 +np.float32,0x92de5,0x92de5,2 +np.float32,0x80322e1b,0x80322e1b,2 +np.float32,0xbf5eb214,0xbf7bdd55,2 +np.float32,0xbf21bf87,0xbf2cba14,2 +np.float32,0xbf5d4b78,0xbf79e73a,2 +np.float32,0xbc302840,0xbc30291e,2 +np.float32,0xfee567c6,0xff800000,2 +np.float32,0x7f70ee14,0x7f800000,2 +np.float32,0x7e5c4b33,0x7f800000,2 +np.float32,0x3f1e7b64,0x3f28ccfd,2 +np.float32,0xbf6309f7,0xbf80ff3e,2 +np.float32,0x1c2fe3,0x1c2fe3,2 +np.float32,0x8e78d,0x8e78d,2 +np.float32,0x7f2fce73,0x7f800000,2 +np.float32,0x7f25f690,0x7f800000,2 +np.float32,0x8074cba5,0x8074cba5,2 +np.float32,0x16975f,0x16975f,2 +np.float32,0x8012cf5c,0x8012cf5c,2 +np.float32,0x7da72138,0x7f800000,2 +np.float32,0xbf563f35,0xbf7025be,2 +np.float32,0x3f69d3f5,0x3f85dcbe,2 +np.float32,0xbf15c148,0xbf1e7184,2 +np.float32,0xbe7a077c,0xbe7c8564,2 +np.float32,0x3ebb6ef1,0x3ebfa5e3,2 +np.float32,0xbe41fde4,0xbe43277b,2 +np.float32,0x7f10b479,0x7f800000,2 +np.float32,0x3e021ace,0x3e02747d,2 +np.float32,0x3e93d984,0x3e95e9be,2 +np.float32,0xfe17e924,0xff800000,2 +np.float32,0xfe21a7cc,0xff800000,2 +np.float32,0x8019b660,0x8019b660,2 +np.float32,0x7e954631,0x7f800000,2 +np.float32,0x7e7330d1,0x7f800000,2 +np.float32,0xbe007d98,0xbe00d3fb,2 +np.float32,0x3ef3870e,0x3efcd077,2 +np.float32,0x7f5bbde8,0x7f800000,2 +np.float32,0x14a5b3,0x14a5b3,2 +np.float32,0x3e84d23f,0x3e8650e8,2 +np.float32,0x80763017,0x80763017,2 +np.float32,0xfe871f36,0xff800000,2 +np.float32,0x7ed43150,0x7f800000,2 +np.float32,0x3cc44547,0x3cc44a16,2 +np.float32,0x3ef0c0fa,0x3ef9b97d,2 +np.float32,0xbede9944,0xbee5ad86,2 +np.float32,0xbf10f0b2,0xbf18cf0a,2 +np.float32,0x3ecdaa78,0x3ed33dd9,2 +np.float32,0x3f7cc058,0x3f93ee6b,2 +np.float32,0x2d952f,0x2d952f,2 +np.float32,0x3f2cf2de,0x3f3a687a,2 +np.float32,0x8029b33c,0x8029b33c,2 +np.float32,0xbf22c737,0xbf2df888,2 +np.float32,0xff53c84a,0xff800000,2 +np.float32,0x40a509,0x40a509,2 +np.float32,0x56abce,0x56abce,2 +np.float32,0xff7fffff,0xff800000,2 +np.float32,0xbf3e67f6,0xbf50741c,2 +np.float32,0xfde67580,0xff800000,2 +np.float32,0x3f103e9b,0x3f17ffc7,2 +np.float32,0x3f3f7232,0x3f51cbe2,2 +np.float32,0x803e6d78,0x803e6d78,2 +np.float32,0x3a61da,0x3a61da,2 +np.float32,0xbc04de80,0xbc04dedf,2 +np.float32,0x7f1e7c52,0x7f800000,2 +np.float32,0x8058ee88,0x8058ee88,2 +np.float32,0x806dd660,0x806dd660,2 +np.float32,0x7e4af9,0x7e4af9,2 +np.float32,0x80702d27,0x80702d27,2 +np.float32,0x802cdad1,0x802cdad1,2 +np.float32,0x3e9b5c23,0x3e9dc149,2 +np.float32,0x7f076e89,0x7f800000,2 +np.float32,0x7f129d68,0x7f800000,2 +np.float32,0x7f6f0b0a,0x7f800000,2 +np.float32,0x7eafafb5,0x7f800000,2 +np.float32,0xbf2ef2ca,0xbf3ce332,2 +np.float32,0xff34c000,0xff800000,2 +np.float32,0x7f559274,0x7f800000,2 +np.float32,0xfed08556,0xff800000,2 +np.float32,0xbf014621,0xbf06d6ad,2 +np.float32,0xff23086a,0xff800000,2 +np.float32,0x6cb33f,0x6cb33f,2 +np.float32,0xfe6e3ffc,0xff800000,2 +np.float32,0x3e6bbec0,0x3e6dd546,2 +np.float32,0x8036afa6,0x8036afa6,2 +np.float32,0xff800000,0xff800000,2 +np.float32,0x3e0ed05c,0x3e0f46ff,2 +np.float32,0x3ec9215c,0x3ece57e6,2 +np.float32,0xbf449fa4,0xbf5888aa,2 +np.float32,0xff2c6640,0xff800000,2 +np.float32,0x7f08f4a7,0x7f800000,2 +np.float32,0xbf4f63e5,0xbf66d4c1,2 +np.float32,0x3f800000,0x3f966cfe,2 +np.float32,0xfe86c7d2,0xff800000,2 +np.float32,0x3f63f969,0x3f81a970,2 +np.float32,0xbd7022d0,0xbd704609,2 +np.float32,0xbead906c,0xbeb0e853,2 +np.float32,0x7ef149ee,0x7f800000,2 +np.float32,0xff0b9ff7,0xff800000,2 +np.float32,0x3f38380d,0x3f4888e7,2 +np.float32,0x3ef3a3e2,0x3efcf09e,2 +np.float32,0xff616477,0xff800000,2 +np.float32,0x3f3f83e4,0x3f51e2c3,2 +np.float32,0xbf79963c,0xbf918642,2 +np.float32,0x801416f4,0x801416f4,2 +np.float32,0xff75ce6d,0xff800000,2 +np.float32,0xbdbf3588,0xbdbf7cad,2 +np.float32,0xbe6ea938,0xbe70d3dc,2 +np.float32,0x8066f977,0x8066f977,2 +np.float32,0x3f5b5362,0x3f7728aa,2 +np.float32,0xbf72052c,0xbf8bdbd8,2 +np.float32,0xbe21ed74,0xbe229a6f,2 +np.float32,0x8062d19c,0x8062d19c,2 +np.float32,0x3ed8d01f,0x3edf59e6,2 +np.float32,0x803ed42b,0x803ed42b,2 +np.float32,0xbe099a64,0xbe0a0481,2 +np.float32,0xbe173eb4,0xbe17cba2,2 +np.float32,0xbebdcf02,0xbec22faf,2 +np.float32,0x7e3ff29e,0x7f800000,2 +np.float32,0x367c92,0x367c92,2 +np.float32,0xbf5c9db8,0xbf78f4a4,2 +np.float32,0xff0b49ea,0xff800000,2 +np.float32,0x3f4f9bc4,0x3f672001,2 +np.float32,0x85d4a,0x85d4a,2 +np.float32,0x80643e33,0x80643e33,2 +np.float32,0x8013aabd,0x8013aabd,2 +np.float32,0xff6997c3,0xff800000,2 +np.float32,0x3f4dd43c,0x3f64bbb6,2 +np.float32,0xff13bbb9,0xff800000,2 +np.float32,0x3f34efa2,0x3f446187,2 +np.float32,0x3e4b2f10,0x3e4c850d,2 +np.float32,0xfef695c6,0xff800000,2 +np.float32,0x7f7e0057,0x7f800000,2 +np.float32,0x3f6e1b9c,0x3f88fa40,2 +np.float32,0x806e46cf,0x806e46cf,2 +np.float32,0x3f15a88a,0x3f1e546c,2 +np.float32,0xbd2de7d0,0xbd2df530,2 +np.float32,0xbf63cae0,0xbf818854,2 +np.float32,0xbdc3e1a0,0xbdc42e1e,2 +np.float32,0xbf11a038,0xbf199b98,2 +np.float32,0xbec13706,0xbec5d56b,2 +np.float32,0x3f1c5f54,0x3f26478d,2 +np.float32,0x3e9ea97e,0x3ea136b4,2 +np.float32,0xfeb5a508,0xff800000,2 +np.float32,0x7f4698f4,0x7f800000,2 +np.float32,0xff51ee2c,0xff800000,2 +np.float32,0xff5994df,0xff800000,2 +np.float32,0x4b9fb9,0x4b9fb9,2 +np.float32,0xfda10d98,0xff800000,2 +np.float32,0x525555,0x525555,2 +np.float32,0x7ed571ef,0x7f800000,2 +np.float32,0xbf600d18,0xbf7dc50c,2 +np.float32,0x3ec674ca,0x3ecb768b,2 +np.float32,0x3cb69115,0x3cb694f3,2 +np.float32,0x7eac75f2,0x7f800000,2 +np.float32,0x804d4d75,0x804d4d75,2 +np.float32,0xfed5292e,0xff800000,2 +np.float32,0x800ed06a,0x800ed06a,2 +np.float32,0xfec37584,0xff800000,2 +np.float32,0x3ef96ac7,0x3f01b326,2 +np.float32,0x42f743,0x42f743,2 +np.float32,0x3f56f442,0x3f711e39,2 +np.float32,0xbf7ea726,0xbf956375,2 +np.float32,0x806c7202,0x806c7202,2 +np.float32,0xbd8ee980,0xbd8f0733,2 +np.float32,0xbdf2e930,0xbdf37b18,2 +np.float32,0x3f103910,0x3f17f955,2 +np.float32,0xff123e8f,0xff800000,2 +np.float32,0x806e4b5d,0x806e4b5d,2 +np.float32,0xbf4f3bfc,0xbf669f07,2 +np.float32,0xbf070c16,0xbf0d6609,2 +np.float32,0xff00e0ba,0xff800000,2 +np.float32,0xff49d828,0xff800000,2 +np.float32,0x7e47f04a,0x7f800000,2 +np.float32,0x7e984dac,0x7f800000,2 +np.float32,0x3f77473c,0x3f8fc858,2 +np.float32,0x3f017439,0x3f070ac8,2 +np.float32,0x118417,0x118417,2 +np.float32,0xbcf7a2c0,0xbcf7ac68,2 +np.float32,0xfee46fee,0xff800000,2 +np.float32,0x3e42a648,0x3e43d2e9,2 +np.float32,0x80131916,0x80131916,2 +np.float32,0x806209d3,0x806209d3,2 +np.float32,0x807c1f12,0x807c1f12,2 +np.float32,0x2f3696,0x2f3696,2 +np.float32,0xff28722b,0xff800000,2 +np.float32,0x7f1416a1,0x7f800000,2 +np.float32,0x8054e7a1,0x8054e7a1,2 +np.float32,0xbddc39a0,0xbddca656,2 +np.float32,0x7dc60175,0x7f800000,2 +np.float64,0x7fd0ae584da15cb0,0x7ff0000000000000,1 +np.float64,0x7fd41d68e5283ad1,0x7ff0000000000000,1 +np.float64,0x7fe93073bb7260e6,0x7ff0000000000000,1 +np.float64,0x3fb4fd19d229fa34,0x3fb5031f57dbac0f,1 +np.float64,0x85609ce10ac2,0x85609ce10ac2,1 +np.float64,0xbfd7aa12ccaf5426,0xbfd8351003a320e2,1 +np.float64,0x8004487c9b4890fa,0x8004487c9b4890fa,1 +np.float64,0x7fe7584cfd2eb099,0x7ff0000000000000,1 +np.float64,0x800ea8edc6dd51dc,0x800ea8edc6dd51dc,1 +np.float64,0x3fe0924aa5a12495,0x3fe15276e271c6dc,1 +np.float64,0x3feb1abf6d36357f,0x3fee76b4d3d06964,1 +np.float64,0x3fa8c14534318280,0x3fa8c3bd5ce5923c,1 +np.float64,0x800b9f5915d73eb3,0x800b9f5915d73eb3,1 +np.float64,0xffc05aaa7820b554,0xfff0000000000000,1 +np.float64,0x800157eda8c2afdc,0x800157eda8c2afdc,1 +np.float64,0xffe8d90042b1b200,0xfff0000000000000,1 +np.float64,0x3feda02ea93b405d,0x3ff1057e61d08d59,1 +np.float64,0xffd03b7361a076e6,0xfff0000000000000,1 +np.float64,0x3fe1a8ecd7e351da,0x3fe291eda9080847,1 +np.float64,0xffc5bfdff82b7fc0,0xfff0000000000000,1 +np.float64,0xbfe6fb3d386df67a,0xbfe9022c05df0565,1 +np.float64,0x7fefffffffffffff,0x7ff0000000000000,1 +np.float64,0x7fa10c340c221867,0x7ff0000000000000,1 +np.float64,0x3fe55cbf1daab97e,0x3fe6fc1648258b75,1 +np.float64,0xbfddeb5f60bbd6be,0xbfdf056d4fb5825f,1 +np.float64,0xffddb1a8213b6350,0xfff0000000000000,1 +np.float64,0xbfb20545e4240a88,0xbfb2091579375176,1 +np.float64,0x3f735ded2026bbda,0x3f735df1dad4ee3a,1 +np.float64,0xbfd1eb91efa3d724,0xbfd227c044dead61,1 +np.float64,0xffd737c588ae6f8c,0xfff0000000000000,1 +np.float64,0x3fc46818ec28d032,0x3fc47e416c4237a6,1 +np.float64,0x0,0x0,1 +np.float64,0xffb632097a2c6410,0xfff0000000000000,1 +np.float64,0xbfcb5ae84b36b5d0,0xbfcb905613af55b8,1 +np.float64,0xbfe7b926402f724c,0xbfe9f4f0be6aacc3,1 +np.float64,0x80081840b3f03082,0x80081840b3f03082,1 +np.float64,0x3fe767a656eecf4d,0x3fe98c53b4779de7,1 +np.float64,0x8005834c088b0699,0x8005834c088b0699,1 +np.float64,0x80074e92658e9d26,0x80074e92658e9d26,1 +np.float64,0x80045d60c268bac2,0x80045d60c268bac2,1 +np.float64,0xffb9aecfe8335da0,0xfff0000000000000,1 +np.float64,0x7fcad3e1cd35a7c3,0x7ff0000000000000,1 +np.float64,0xbf881853d03030c0,0xbf8818783e28fc87,1 +np.float64,0xe18c6d23c318e,0xe18c6d23c318e,1 +np.float64,0x7fcb367b8f366cf6,0x7ff0000000000000,1 +np.float64,0x5c13436cb8269,0x5c13436cb8269,1 +np.float64,0xffe5399938aa7332,0xfff0000000000000,1 +np.float64,0xbfdc45dbc3b88bb8,0xbfdd33958222c27e,1 +np.float64,0xbfd714691bae28d2,0xbfd7954edbef810b,1 +np.float64,0xbfdf18b02b3e3160,0xbfe02ad13634c651,1 +np.float64,0x8003e6f276e7cde6,0x8003e6f276e7cde6,1 +np.float64,0x3febb6b412776d68,0x3fef4f753def31f9,1 +np.float64,0x7fe016a3b4a02d46,0x7ff0000000000000,1 +np.float64,0x3fdc899ac7b91336,0x3fdd7e1cee1cdfc8,1 +np.float64,0x800219271e24324f,0x800219271e24324f,1 +np.float64,0x1529d93e2a53c,0x1529d93e2a53c,1 +np.float64,0x800d5bc827fab790,0x800d5bc827fab790,1 +np.float64,0x3e1495107c293,0x3e1495107c293,1 +np.float64,0x3fe89da0f2b13b42,0x3feb1dc1f3015ad7,1 +np.float64,0x800ba8c17b975183,0x800ba8c17b975183,1 +np.float64,0x8002dacf0265b59f,0x8002dacf0265b59f,1 +np.float64,0xffe6d0a4cc2da149,0xfff0000000000000,1 +np.float64,0x3fdf23fe82be47fc,0x3fe03126d8e2b309,1 +np.float64,0xffe41b1f1c28363e,0xfff0000000000000,1 +np.float64,0xbfd635c634ac6b8c,0xbfd6a8966da6adaa,1 +np.float64,0x800755bc08eeab79,0x800755bc08eeab79,1 +np.float64,0x800ba4c47c374989,0x800ba4c47c374989,1 +np.float64,0x7fec9f7649793eec,0x7ff0000000000000,1 +np.float64,0x7fdbf45738b7e8ad,0x7ff0000000000000,1 +np.float64,0x3f5597f07eab4,0x3f5597f07eab4,1 +np.float64,0xbfbf4599183e8b30,0xbfbf5985d8c65097,1 +np.float64,0xbf5b200580364000,0xbf5b2006501b21ae,1 +np.float64,0x7f91868370230d06,0x7ff0000000000000,1 +np.float64,0x3838e2a67071d,0x3838e2a67071d,1 +np.float64,0xffefe3ff5d3fc7fe,0xfff0000000000000,1 +np.float64,0xffe66b26d06cd64d,0xfff0000000000000,1 +np.float64,0xbfd830a571b0614a,0xbfd8c526927c742c,1 +np.float64,0x7fe8442122f08841,0x7ff0000000000000,1 +np.float64,0x800efa8c637df519,0x800efa8c637df519,1 +np.float64,0xf0026835e004d,0xf0026835e004d,1 +np.float64,0xffb11beefe2237e0,0xfff0000000000000,1 +np.float64,0x3fef9bbb327f3776,0x3ff2809f10641c32,1 +np.float64,0x350595306a0b3,0x350595306a0b3,1 +np.float64,0xf7f6538befecb,0xf7f6538befecb,1 +np.float64,0xffe36379c4a6c6f3,0xfff0000000000000,1 +np.float64,0x28b1d82e5163c,0x28b1d82e5163c,1 +np.float64,0x70a3d804e147c,0x70a3d804e147c,1 +np.float64,0xffd96c1bc9b2d838,0xfff0000000000000,1 +np.float64,0xffce8e00893d1c00,0xfff0000000000000,1 +np.float64,0x800f2bdcb25e57b9,0x800f2bdcb25e57b9,1 +np.float64,0xbfe0d9c63361b38c,0xbfe1a3eb02192b76,1 +np.float64,0xbfdc7b8711b8f70e,0xbfdd6e9db3a01e51,1 +np.float64,0x99e22ec133c46,0x99e22ec133c46,1 +np.float64,0xffeaef6ddab5dedb,0xfff0000000000000,1 +np.float64,0x7fe89c22c0f13845,0x7ff0000000000000,1 +np.float64,0x8002d5207de5aa42,0x8002d5207de5aa42,1 +np.float64,0x3fd1b13353236267,0x3fd1eb1b9345dfca,1 +np.float64,0x800ccae0a41995c1,0x800ccae0a41995c1,1 +np.float64,0x3fdbdaba38b7b574,0x3fdcbdfcbca37ce6,1 +np.float64,0x5b06d12cb60db,0x5b06d12cb60db,1 +np.float64,0xffd52262752a44c4,0xfff0000000000000,1 +np.float64,0x5a17f050b42ff,0x5a17f050b42ff,1 +np.float64,0x3d24205e7a485,0x3d24205e7a485,1 +np.float64,0x7fbed4dec63da9bd,0x7ff0000000000000,1 +np.float64,0xbfe56e9776aadd2f,0xbfe71212863c284f,1 +np.float64,0x7fea0bc952341792,0x7ff0000000000000,1 +np.float64,0x800f692d139ed25a,0x800f692d139ed25a,1 +np.float64,0xffdb63feab36c7fe,0xfff0000000000000,1 +np.float64,0x3fe1c2297fe38452,0x3fe2af21293c9571,1 +np.float64,0x7fede384747bc708,0x7ff0000000000000,1 +np.float64,0x800440169288802e,0x800440169288802e,1 +np.float64,0xffe3241eeb26483e,0xfff0000000000000,1 +np.float64,0xffe28f3879651e70,0xfff0000000000000,1 +np.float64,0xa435cbc1486d,0xa435cbc1486d,1 +np.float64,0x7fe55e08db6abc11,0x7ff0000000000000,1 +np.float64,0x1405e624280be,0x1405e624280be,1 +np.float64,0x3fd861bdf0b0c37c,0x3fd8f9d2e33e45e5,1 +np.float64,0x3feeb67cdc3d6cfa,0x3ff1d337d81d1c14,1 +np.float64,0x3fd159a10e22b342,0x3fd1903be7c2ea0c,1 +np.float64,0x3fd84626bc308c4d,0x3fd8dc373645e65b,1 +np.float64,0xffd3da81d9a7b504,0xfff0000000000000,1 +np.float64,0xbfd4a768b8294ed2,0xbfd503aa7c240051,1 +np.float64,0x3fe3059f2a660b3e,0x3fe42983e0c6bb2e,1 +np.float64,0x3fe3b8353827706a,0x3fe4fdd635c7269b,1 +np.float64,0xbfe4af0399695e07,0xbfe6277d9002b46c,1 +np.float64,0xbfd7e18a92afc316,0xbfd87066b54c4fe6,1 +np.float64,0x800432bcab48657a,0x800432bcab48657a,1 +np.float64,0x80033d609d267ac2,0x80033d609d267ac2,1 +np.float64,0x7fef5f758e7ebeea,0x7ff0000000000000,1 +np.float64,0xbfed7833dbfaf068,0xbff0e85bf45a5ebc,1 +np.float64,0x3fe2283985a45073,0x3fe325b0a9099c74,1 +np.float64,0xe820b4b3d0417,0xe820b4b3d0417,1 +np.float64,0x8003ecb72aa7d96f,0x8003ecb72aa7d96f,1 +np.float64,0xbfeab2c755b5658f,0xbfede7c83e92a625,1 +np.float64,0xbfc7b287f72f6510,0xbfc7d53ef2ffe9dc,1 +np.float64,0xffd9a41d0f33483a,0xfff0000000000000,1 +np.float64,0x3fd3a5b6e3a74b6c,0x3fd3f516f39a4725,1 +np.float64,0x800bc72091578e42,0x800bc72091578e42,1 +np.float64,0x800ff405ce9fe80c,0x800ff405ce9fe80c,1 +np.float64,0x57918600af24,0x57918600af24,1 +np.float64,0x2a5be7fa54b7e,0x2a5be7fa54b7e,1 +np.float64,0xbfdca7886bb94f10,0xbfdd9f142b5b43e4,1 +np.float64,0xbfe216993ee42d32,0xbfe3112936590995,1 +np.float64,0xbfe06bd9cf20d7b4,0xbfe126cd353ab42f,1 +np.float64,0x8003e6c31827cd87,0x8003e6c31827cd87,1 +np.float64,0x8005f37d810be6fc,0x8005f37d810be6fc,1 +np.float64,0x800715b081ae2b62,0x800715b081ae2b62,1 +np.float64,0x3fef94c35bff2986,0x3ff27b4bed2f4051,1 +np.float64,0x6f5798e0deb0,0x6f5798e0deb0,1 +np.float64,0x3fcef1f05c3de3e1,0x3fcf3f557550598f,1 +np.float64,0xbf9a91c400352380,0xbf9a92876273b85c,1 +np.float64,0x3fc9143f7f322880,0x3fc93d678c05d26b,1 +np.float64,0x78ad847af15b1,0x78ad847af15b1,1 +np.float64,0x8000fdc088c1fb82,0x8000fdc088c1fb82,1 +np.float64,0x800200fd304401fb,0x800200fd304401fb,1 +np.float64,0x7fb8ab09dc315613,0x7ff0000000000000,1 +np.float64,0x3fe949771b7292ee,0x3fec00891c3fc5a2,1 +np.float64,0xbfc54cae0e2a995c,0xbfc565e0f3d0e3af,1 +np.float64,0xffd546161e2a8c2c,0xfff0000000000000,1 +np.float64,0x800fe1d1279fc3a2,0x800fe1d1279fc3a2,1 +np.float64,0x3fd9c45301b388a8,0x3fda77fa1f4c79bf,1 +np.float64,0x7fe10ff238221fe3,0x7ff0000000000000,1 +np.float64,0xbfbc2181ae384300,0xbfbc3002229155c4,1 +np.float64,0xbfe7bbfae4ef77f6,0xbfe9f895e91f468d,1 +np.float64,0x800d3d994f7a7b33,0x800d3d994f7a7b33,1 +np.float64,0xffe6e15a896dc2b4,0xfff0000000000000,1 +np.float64,0x800e6b6c8abcd6d9,0x800e6b6c8abcd6d9,1 +np.float64,0xbfd862c938b0c592,0xbfd8faf1cdcb09db,1 +np.float64,0xffe2411f8464823e,0xfff0000000000000,1 +np.float64,0xffd0b32efaa1665e,0xfff0000000000000,1 +np.float64,0x3ac4ace475896,0x3ac4ace475896,1 +np.float64,0xf9c3a7ebf3875,0xf9c3a7ebf3875,1 +np.float64,0xdb998ba5b7332,0xdb998ba5b7332,1 +np.float64,0xbfe438a14fe87142,0xbfe5981751e4c5cd,1 +np.float64,0xbfbcf48cbc39e918,0xbfbd045d60e65d3a,1 +np.float64,0x7fde499615bc932b,0x7ff0000000000000,1 +np.float64,0x800bba269057744e,0x800bba269057744e,1 +np.float64,0x3fc9bb1ba3337638,0x3fc9e78fdb6799c1,1 +np.float64,0xffd9f974fbb3f2ea,0xfff0000000000000,1 +np.float64,0x7fcf1ad1693e35a2,0x7ff0000000000000,1 +np.float64,0x7fe5dcedd32bb9db,0x7ff0000000000000,1 +np.float64,0xeb06500bd60ca,0xeb06500bd60ca,1 +np.float64,0x7fd73e7b592e7cf6,0x7ff0000000000000,1 +np.float64,0xbfe9d91ae873b236,0xbfecc08482849bcd,1 +np.float64,0xffc85338b730a670,0xfff0000000000000,1 +np.float64,0x7fbba41eee37483d,0x7ff0000000000000,1 +np.float64,0x3fed5624fb7aac4a,0x3ff0cf9f0de1fd54,1 +np.float64,0xffe566d80d6acdb0,0xfff0000000000000,1 +np.float64,0x3fd4477884a88ef1,0x3fd49ec7acdd25a0,1 +np.float64,0x3fcb98c5fd37318c,0x3fcbcfa20e2c2712,1 +np.float64,0xffdeba71d5bd74e4,0xfff0000000000000,1 +np.float64,0x8001edc59dc3db8c,0x8001edc59dc3db8c,1 +np.float64,0x3fe6b09e896d613e,0x3fe8a3bb541ec0e3,1 +np.float64,0x3fe8694b4970d296,0x3fead94d271d05cf,1 +np.float64,0xb52c27bf6a585,0xb52c27bf6a585,1 +np.float64,0x7fcb0a21d9361443,0x7ff0000000000000,1 +np.float64,0xbfd9efc68cb3df8e,0xbfdaa7058c0ccbd1,1 +np.float64,0x8007cd170fef9a2f,0x8007cd170fef9a2f,1 +np.float64,0x3fe83325e770664c,0x3fea92c55c9d567e,1 +np.float64,0x800bd0085537a011,0x800bd0085537a011,1 +np.float64,0xffe05b9e7820b73c,0xfff0000000000000,1 +np.float64,0x3fea4ce4347499c8,0x3fed5cea9fdc541b,1 +np.float64,0x7fe08aae1921155b,0x7ff0000000000000,1 +np.float64,0x3fe7a5e7deef4bd0,0x3fe9dc2e20cfb61c,1 +np.float64,0xbfe0ccc8e6e19992,0xbfe195175f32ee3f,1 +np.float64,0xbfe8649717f0c92e,0xbfead3298974dcf0,1 +np.float64,0x7fed6c5308bad8a5,0x7ff0000000000000,1 +np.float64,0xffdbd8c7af37b190,0xfff0000000000000,1 +np.float64,0xbfb2bc4d06257898,0xbfb2c09569912839,1 +np.float64,0x3fc62eca512c5d95,0x3fc64b4251bce8f9,1 +np.float64,0xbfcae2ddbd35c5bc,0xbfcb15971fc61312,1 +np.float64,0x18d26ce831a4f,0x18d26ce831a4f,1 +np.float64,0x7fe38b279267164e,0x7ff0000000000000,1 +np.float64,0x97e1d9ab2fc3b,0x97e1d9ab2fc3b,1 +np.float64,0xbfee8e4785fd1c8f,0xbff1b52d16807627,1 +np.float64,0xbfb189b4a6231368,0xbfb18d37e83860ee,1 +np.float64,0xffd435761ea86aec,0xfff0000000000000,1 +np.float64,0x3fe6c48ebced891e,0x3fe8bcea189c3867,1 +np.float64,0x7fdadd3678b5ba6c,0x7ff0000000000000,1 +np.float64,0x7fea8f15b7b51e2a,0x7ff0000000000000,1 +np.float64,0xbff0000000000000,0xbff2cd9fc44eb982,1 +np.float64,0x80004c071120980f,0x80004c071120980f,1 +np.float64,0x8005367adfea6cf6,0x8005367adfea6cf6,1 +np.float64,0x3fbdc9139a3b9220,0x3fbdda4aba667ce5,1 +np.float64,0x7fed5ee3ad7abdc6,0x7ff0000000000000,1 +np.float64,0x51563fb2a2ac9,0x51563fb2a2ac9,1 +np.float64,0xbfba7d26ce34fa50,0xbfba894229c50ea1,1 +np.float64,0x6c10db36d821c,0x6c10db36d821c,1 +np.float64,0xbfbdaec0d03b5d80,0xbfbdbfca6ede64f4,1 +np.float64,0x800a1cbe7414397d,0x800a1cbe7414397d,1 +np.float64,0x800ae6e7f2d5cdd0,0x800ae6e7f2d5cdd0,1 +np.float64,0x3fea63d3fef4c7a8,0x3fed7c1356688ddc,1 +np.float64,0xbfde1e3a88bc3c76,0xbfdf3dfb09cc2260,1 +np.float64,0xbfd082d75a2105ae,0xbfd0b1e28c84877b,1 +np.float64,0x7fea1e5e85f43cbc,0x7ff0000000000000,1 +np.float64,0xffe2237a1a6446f4,0xfff0000000000000,1 +np.float64,0x3fd1e2be8523c57d,0x3fd21e93dfd1bbc4,1 +np.float64,0x3fd1acd428a359a8,0x3fd1e6916a42bc3a,1 +np.float64,0x61a152f0c342b,0x61a152f0c342b,1 +np.float64,0xbfc61a6b902c34d8,0xbfc6369557690ba0,1 +np.float64,0x7fd1a84b1f235095,0x7ff0000000000000,1 +np.float64,0x1c5cc7e638b9a,0x1c5cc7e638b9a,1 +np.float64,0x8008039755f0072f,0x8008039755f0072f,1 +np.float64,0x80097532d6f2ea66,0x80097532d6f2ea66,1 +np.float64,0xbfc6d979a12db2f4,0xbfc6f89777c53f8f,1 +np.float64,0x8004293ab1085276,0x8004293ab1085276,1 +np.float64,0x3fc2af5c21255eb8,0x3fc2c05dc0652554,1 +np.float64,0xbfd9a5ab87b34b58,0xbfda56d1076abc98,1 +np.float64,0xbfebd360ba77a6c2,0xbfef779fd6595f9b,1 +np.float64,0xffd5313c43aa6278,0xfff0000000000000,1 +np.float64,0xbfe994a262b32945,0xbfec64b969852ed5,1 +np.float64,0x3fce01a52e3c034a,0x3fce48324eb29c31,1 +np.float64,0x56bd74b2ad7af,0x56bd74b2ad7af,1 +np.float64,0xb84093ff70813,0xb84093ff70813,1 +np.float64,0x7fe776df946eedbe,0x7ff0000000000000,1 +np.float64,0xbfe294ac2e652958,0xbfe3a480938afa26,1 +np.float64,0x7fe741b4d0ee8369,0x7ff0000000000000,1 +np.float64,0x800b7e8a1056fd15,0x800b7e8a1056fd15,1 +np.float64,0x7fd28f1269251e24,0x7ff0000000000000,1 +np.float64,0x8009d4492e73a893,0x8009d4492e73a893,1 +np.float64,0x3fe3f27fca67e500,0x3fe543aff825e244,1 +np.float64,0x3fd12447e5a24890,0x3fd158efe43c0452,1 +np.float64,0xbfd58df0f2ab1be2,0xbfd5f6d908e3ebce,1 +np.float64,0xffc0a8e4642151c8,0xfff0000000000000,1 +np.float64,0xbfedb197787b632f,0xbff112367ec9d3e7,1 +np.float64,0xffdde07a7f3bc0f4,0xfff0000000000000,1 +np.float64,0x3fe91f3e5b723e7d,0x3febc886a1d48364,1 +np.float64,0x3fe50415236a082a,0x3fe68f43a5468d8c,1 +np.float64,0xd9a0c875b3419,0xd9a0c875b3419,1 +np.float64,0xbfee04ccf4bc099a,0xbff14f4740a114cf,1 +np.float64,0xbfd2bcc6a125798e,0xbfd30198b1e7d7ed,1 +np.float64,0xbfeb3c16f8f6782e,0xbfeea4ce47d09f58,1 +np.float64,0xffd3ba19e4a77434,0xfff0000000000000,1 +np.float64,0x8010000000000000,0x8010000000000000,1 +np.float64,0x3fdef0a642bde14d,0x3fe0146677b3a488,1 +np.float64,0x3fdc3dd0a2b87ba0,0x3fdd2abe65651487,1 +np.float64,0x3fdbb1fd47b763fb,0x3fdc915a2fd19f4b,1 +np.float64,0x7fbaa375e63546eb,0x7ff0000000000000,1 +np.float64,0x433ef8ee867e0,0x433ef8ee867e0,1 +np.float64,0xf5345475ea68b,0xf5345475ea68b,1 +np.float64,0xa126419b424c8,0xa126419b424c8,1 +np.float64,0x3fe0057248200ae5,0x3fe0b2f488339709,1 +np.float64,0xffc5e3b82f2bc770,0xfff0000000000000,1 +np.float64,0xffb215c910242b90,0xfff0000000000000,1 +np.float64,0xbfeba4ae0837495c,0xbfef3642e4b54aac,1 +np.float64,0xffbb187ebe363100,0xfff0000000000000,1 +np.float64,0x3fe4c6a496a98d49,0x3fe64440cdf06aab,1 +np.float64,0x800767a28f6ecf46,0x800767a28f6ecf46,1 +np.float64,0x3fdbed63b1b7dac8,0x3fdcd27318c0b683,1 +np.float64,0x80006d8339e0db07,0x80006d8339e0db07,1 +np.float64,0x8000b504f0416a0b,0x8000b504f0416a0b,1 +np.float64,0xbfe88055bfb100ac,0xbfeaf767bd2767b9,1 +np.float64,0x3fefe503317fca06,0x3ff2b8d4057240c8,1 +np.float64,0x7fe307538b660ea6,0x7ff0000000000000,1 +np.float64,0x944963c12892d,0x944963c12892d,1 +np.float64,0xbfd2c20b38a58416,0xbfd30717900f8233,1 +np.float64,0x7feed04e3e3da09b,0x7ff0000000000000,1 +np.float64,0x3fe639619cac72c3,0x3fe80de7b8560a8d,1 +np.float64,0x3fde066c66bc0cd9,0x3fdf237fb759a652,1 +np.float64,0xbfc56b22b52ad644,0xbfc584c267a47ebd,1 +np.float64,0x3fc710d5b12e21ab,0x3fc730d817ba0d0c,1 +np.float64,0x3fee1dfc347c3bf8,0x3ff161d9c3e15f68,1 +np.float64,0x3fde400954bc8013,0x3fdf639e5cc9e7a9,1 +np.float64,0x56e701f8adce1,0x56e701f8adce1,1 +np.float64,0xbfe33bbc89e67779,0xbfe46996b39381fe,1 +np.float64,0x7fec89e2f87913c5,0x7ff0000000000000,1 +np.float64,0xbfdad58b40b5ab16,0xbfdba098cc0ad5d3,1 +np.float64,0x3fe99c76a13338ed,0x3fec6f31bae613e7,1 +np.float64,0x3fe4242a29a84854,0x3fe57f6b45e5c0ef,1 +np.float64,0xbfe79d3199ef3a63,0xbfe9d0fb96c846ba,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0xbfeb35a6cf766b4e,0xbfee9be4e7e943f7,1 +np.float64,0x3e047f267c091,0x3e047f267c091,1 +np.float64,0x4bf1376a97e28,0x4bf1376a97e28,1 +np.float64,0x800ef419685de833,0x800ef419685de833,1 +np.float64,0x3fe0efa61a21df4c,0x3fe1bce98baf2f0f,1 +np.float64,0x3fcc13c4d738278a,0x3fcc4d8c778bcaf7,1 +np.float64,0x800f1d291afe3a52,0x800f1d291afe3a52,1 +np.float64,0x3fd3f10e6da7e21d,0x3fd444106761ea1d,1 +np.float64,0x800706d6d76e0dae,0x800706d6d76e0dae,1 +np.float64,0xffa1ffbc9023ff80,0xfff0000000000000,1 +np.float64,0xbfe098f26d6131e5,0xbfe15a08a5f3eac0,1 +np.float64,0x3fe984f9cc7309f4,0x3fec4fcdbdb1cb9b,1 +np.float64,0x7fd7c2f1eaaf85e3,0x7ff0000000000000,1 +np.float64,0x800a8adb64f515b7,0x800a8adb64f515b7,1 +np.float64,0x80060d3ffc8c1a81,0x80060d3ffc8c1a81,1 +np.float64,0xbfec37e4aef86fc9,0xbff0029a6a1d61e2,1 +np.float64,0x800b21bcfcf6437a,0x800b21bcfcf6437a,1 +np.float64,0xbfc08facc1211f58,0xbfc09b8380ea8032,1 +np.float64,0xffebb4b52577696a,0xfff0000000000000,1 +np.float64,0x800b08096df61013,0x800b08096df61013,1 +np.float64,0x8000000000000000,0x8000000000000000,1 +np.float64,0xffd2f0c9c8a5e194,0xfff0000000000000,1 +np.float64,0xffe78b2299af1644,0xfff0000000000000,1 +np.float64,0x7fd0444794a0888e,0x7ff0000000000000,1 +np.float64,0x307c47b460f8a,0x307c47b460f8a,1 +np.float64,0xffe6b4c851ad6990,0xfff0000000000000,1 +np.float64,0xffe1877224a30ee4,0xfff0000000000000,1 +np.float64,0x48d7b5c091af7,0x48d7b5c091af7,1 +np.float64,0xbfa1dc6b1c23b8d0,0xbfa1dd5889e1b7da,1 +np.float64,0x3fe5004737ea008e,0x3fe68a9c310b08c1,1 +np.float64,0x7fec5f0742b8be0e,0x7ff0000000000000,1 +np.float64,0x3fd0a86285a150c5,0x3fd0d8b238d557fa,1 +np.float64,0x7fed60380efac06f,0x7ff0000000000000,1 +np.float64,0xeeca74dfdd94f,0xeeca74dfdd94f,1 +np.float64,0x3fda05aaa8b40b54,0x3fdabebdbf405e84,1 +np.float64,0x800e530ceb1ca61a,0x800e530ceb1ca61a,1 +np.float64,0x800b3866379670cd,0x800b3866379670cd,1 +np.float64,0xffedb3e7fa3b67cf,0xfff0000000000000,1 +np.float64,0xffdfa4c0713f4980,0xfff0000000000000,1 +np.float64,0x7fe4679e0728cf3b,0x7ff0000000000000,1 +np.float64,0xffe978611ef2f0c2,0xfff0000000000000,1 +np.float64,0x7fc9f4601f33e8bf,0x7ff0000000000000,1 +np.float64,0x3fd4942de6a9285c,0x3fd4ef6e089357dd,1 +np.float64,0x3faafe064435fc00,0x3fab0139cd6564dc,1 +np.float64,0x800d145a519a28b5,0x800d145a519a28b5,1 +np.float64,0xbfd82636f2304c6e,0xbfd8b9f75ddd2f02,1 +np.float64,0xbfdf2e975e3e5d2e,0xbfe037174280788c,1 +np.float64,0x7fd7051d7c2e0a3a,0x7ff0000000000000,1 +np.float64,0x8007933d452f267b,0x8007933d452f267b,1 +np.float64,0xb2043beb64088,0xb2043beb64088,1 +np.float64,0x3febfd9708f7fb2e,0x3fefb2ef090f18d2,1 +np.float64,0xffd9bc6bc83378d8,0xfff0000000000000,1 +np.float64,0xc10f9fd3821f4,0xc10f9fd3821f4,1 +np.float64,0x3fe3c83413a79068,0x3fe510fa1dd8edf7,1 +np.float64,0x3fbe26ccda3c4da0,0x3fbe38a892279975,1 +np.float64,0x3fcc1873103830e6,0x3fcc5257a6ae168d,1 +np.float64,0xe7e000e9cfc00,0xe7e000e9cfc00,1 +np.float64,0xffda73852bb4e70a,0xfff0000000000000,1 +np.float64,0xbfe831be19f0637c,0xbfea90f1b34da3e5,1 +np.float64,0xbfeb568f3076ad1e,0xbfeec97eebfde862,1 +np.float64,0x510a6ad0a214e,0x510a6ad0a214e,1 +np.float64,0x3fe6ba7e35ed74fc,0x3fe8b032a9a28c6a,1 +np.float64,0xffeb5cdcff76b9b9,0xfff0000000000000,1 +np.float64,0x4f0a23e89e145,0x4f0a23e89e145,1 +np.float64,0x446ec20288dd9,0x446ec20288dd9,1 +np.float64,0x7fe2521b02e4a435,0x7ff0000000000000,1 +np.float64,0x8001cd2969e39a54,0x8001cd2969e39a54,1 +np.float64,0x3fdfe90600bfd20c,0x3fe09fdcca10001c,1 +np.float64,0x7fd660c5762cc18a,0x7ff0000000000000,1 +np.float64,0xbfb11b23aa223648,0xbfb11e661949b377,1 +np.float64,0x800e025285fc04a5,0x800e025285fc04a5,1 +np.float64,0xffb180bb18230178,0xfff0000000000000,1 +np.float64,0xaaf590df55eb2,0xaaf590df55eb2,1 +np.float64,0xbfe8637d9df0c6fb,0xbfead1ba429462ec,1 +np.float64,0x7fd2577866a4aef0,0x7ff0000000000000,1 +np.float64,0xbfcfb2ab5a3f6558,0xbfd002ee87f272b9,1 +np.float64,0x7fdd64ae2f3ac95b,0x7ff0000000000000,1 +np.float64,0xffd1a502c9234a06,0xfff0000000000000,1 +np.float64,0x7fc4be4b60297c96,0x7ff0000000000000,1 +np.float64,0xbfb46b712a28d6e0,0xbfb470fca9919172,1 +np.float64,0xffdef913033df226,0xfff0000000000000,1 +np.float64,0x3fd94a3545b2946b,0x3fd9f40431ce9f9c,1 +np.float64,0x7fef88a0b6ff1140,0x7ff0000000000000,1 +np.float64,0xbfbcc81876399030,0xbfbcd7a0ab6cb388,1 +np.float64,0x800a4acfdd9495a0,0x800a4acfdd9495a0,1 +np.float64,0xffe270b3d5e4e167,0xfff0000000000000,1 +np.float64,0xbfd23f601e247ec0,0xbfd27eeca50a49eb,1 +np.float64,0x7fec6e796a78dcf2,0x7ff0000000000000,1 +np.float64,0x3fb85e0c9630bc19,0x3fb867791ccd6c72,1 +np.float64,0x7fe49fc424a93f87,0x7ff0000000000000,1 +np.float64,0xbfe75a99fbaeb534,0xbfe97ba37663de4c,1 +np.float64,0xffe85011b630a023,0xfff0000000000000,1 +np.float64,0xffe5962e492b2c5c,0xfff0000000000000,1 +np.float64,0x6f36ed4cde6de,0x6f36ed4cde6de,1 +np.float64,0x3feb72170af6e42e,0x3feeefbe6f1a2084,1 +np.float64,0x80014d8d60629b1c,0x80014d8d60629b1c,1 +np.float64,0xbfe0eb40d321d682,0xbfe1b7e31f252bf1,1 +np.float64,0x31fe305663fc7,0x31fe305663fc7,1 +np.float64,0x3fd2cd6381a59ac7,0x3fd312edc9868a4d,1 +np.float64,0xffcf0720793e0e40,0xfff0000000000000,1 +np.float64,0xbfeef1ef133de3de,0xbff1ffd5e1a3b648,1 +np.float64,0xbfd01c787aa038f0,0xbfd0482be3158a01,1 +np.float64,0x3fda3607c5b46c10,0x3fdaf3301e217301,1 +np.float64,0xffda9a9911b53532,0xfff0000000000000,1 +np.float64,0x3fc0b37c392166f8,0x3fc0bfa076f3c43e,1 +np.float64,0xbfe06591c760cb24,0xbfe11fad179ea12c,1 +np.float64,0x8006e369c20dc6d4,0x8006e369c20dc6d4,1 +np.float64,0x3fdf2912a8be5224,0x3fe033ff74b92f4d,1 +np.float64,0xffc0feb07821fd60,0xfff0000000000000,1 +np.float64,0xa4b938c949727,0xa4b938c949727,1 +np.float64,0x8008fe676571fccf,0x8008fe676571fccf,1 +np.float64,0xbfdda68459bb4d08,0xbfdeb8faab34fcbc,1 +np.float64,0xbfda18b419343168,0xbfdad360ca52ec7c,1 +np.float64,0x3febcbae35b7975c,0x3fef6cd51c9ebc15,1 +np.float64,0x3fbec615f63d8c30,0x3fbed912ba729926,1 +np.float64,0x7f99a831c8335063,0x7ff0000000000000,1 +np.float64,0x3fe663e8826cc7d1,0x3fe84330bd9aada8,1 +np.float64,0x70a9f9e6e1540,0x70a9f9e6e1540,1 +np.float64,0x8a13a5db14275,0x8a13a5db14275,1 +np.float64,0x7fc4330a3b286613,0x7ff0000000000000,1 +np.float64,0xbfe580c6136b018c,0xbfe728806cc7a99a,1 +np.float64,0x8000000000000001,0x8000000000000001,1 +np.float64,0xffec079d5df80f3a,0xfff0000000000000,1 +np.float64,0x8e1173c31c22f,0x8e1173c31c22f,1 +np.float64,0x3fe088456d21108b,0x3fe14712ca414103,1 +np.float64,0x3fe1b76f73636edf,0x3fe2a2b658557112,1 +np.float64,0xbfd4a1dd162943ba,0xbfd4fdd45cae8fb8,1 +np.float64,0x7fd60b46c8ac168d,0x7ff0000000000000,1 +np.float64,0xffe36cc3b166d987,0xfff0000000000000,1 +np.float64,0x3fdc2ae0cfb855c0,0x3fdd15f026773151,1 +np.float64,0xbfc41aa203283544,0xbfc42fd1b145fdd5,1 +np.float64,0xffed90c55fbb218a,0xfff0000000000000,1 +np.float64,0x3fe67e3a9aecfc75,0x3fe86440db65b4f6,1 +np.float64,0x7fd12dbeaba25b7c,0x7ff0000000000000,1 +np.float64,0xbfe1267c0de24cf8,0xbfe1fbb611bdf1e9,1 +np.float64,0x22e5619645cad,0x22e5619645cad,1 +np.float64,0x7fe327c72ea64f8d,0x7ff0000000000000,1 +np.float64,0x7fd2c3f545a587ea,0x7ff0000000000000,1 +np.float64,0x7fc7b689372f6d11,0x7ff0000000000000,1 +np.float64,0xc5e140bd8bc28,0xc5e140bd8bc28,1 +np.float64,0x3fccb3627a3966c5,0x3fccf11b44fa4102,1 +np.float64,0xbfd2cf725c259ee4,0xbfd315138d0e5dca,1 +np.float64,0x10000000000000,0x10000000000000,1 +np.float64,0xbfd3dfa8b627bf52,0xbfd431d17b235477,1 +np.float64,0xbfb82124e6304248,0xbfb82a4b6d9c2663,1 +np.float64,0x3fdcd590d9b9ab22,0x3fddd1d548806347,1 +np.float64,0x7fdee0cd1b3dc199,0x7ff0000000000000,1 +np.float64,0x8004ebfc60a9d7fa,0x8004ebfc60a9d7fa,1 +np.float64,0x3fe8eb818b71d704,0x3feb842679806108,1 +np.float64,0xffdd5e8fe63abd20,0xfff0000000000000,1 +np.float64,0xbfe3efcbd9e7df98,0xbfe54071436645ee,1 +np.float64,0x3fd5102557aa204b,0x3fd57203d31a05b8,1 +np.float64,0x3fe6318af7ec6316,0x3fe8041a177cbf96,1 +np.float64,0x3fdf3cecdabe79da,0x3fe03f2084ffbc78,1 +np.float64,0x7fe0ab6673a156cc,0x7ff0000000000000,1 +np.float64,0x800037d5c6c06fac,0x800037d5c6c06fac,1 +np.float64,0xffce58b86a3cb170,0xfff0000000000000,1 +np.float64,0xbfe3455d6ce68abb,0xbfe475034cecb2b8,1 +np.float64,0x991b663d3236d,0x991b663d3236d,1 +np.float64,0x3fda82d37c3505a7,0x3fdb46973da05c12,1 +np.float64,0x3f9b736fa036e6df,0x3f9b74471c234411,1 +np.float64,0x8001c96525e392cb,0x8001c96525e392cb,1 +np.float64,0x7ff0000000000000,0x7ff0000000000000,1 +np.float64,0xbfaf59122c3eb220,0xbfaf5e15f8b272b0,1 +np.float64,0xbf9aa7d288354fa0,0xbf9aa897d2a40cb5,1 +np.float64,0x8004a43428694869,0x8004a43428694869,1 +np.float64,0x7feead476dbd5a8e,0x7ff0000000000000,1 +np.float64,0xffca150f81342a20,0xfff0000000000000,1 +np.float64,0x80047ec3bc88fd88,0x80047ec3bc88fd88,1 +np.float64,0xbfee3e5b123c7cb6,0xbff179c8b8334278,1 +np.float64,0x3fd172359f22e46b,0x3fd1a9ba6b1420a1,1 +np.float64,0x3fe8e5e242f1cbc5,0x3feb7cbcaefc4d5c,1 +np.float64,0x8007fb059a6ff60c,0x8007fb059a6ff60c,1 +np.float64,0xe3899e71c7134,0xe3899e71c7134,1 +np.float64,0x7fe3b98326a77305,0x7ff0000000000000,1 +np.float64,0x7fec4e206cb89c40,0x7ff0000000000000,1 +np.float64,0xbfa3b012c4276020,0xbfa3b150c13b3cc5,1 +np.float64,0xffefffffffffffff,0xfff0000000000000,1 +np.float64,0xffe28a5b9aa514b6,0xfff0000000000000,1 +np.float64,0xbfd76a6cc2aed4da,0xbfd7f10f4d04e7f6,1 +np.float64,0xbc2b1c0178564,0xbc2b1c0178564,1 +np.float64,0x6d9d444adb3a9,0x6d9d444adb3a9,1 +np.float64,0xbfdcadd368395ba6,0xbfdda6037b5c429c,1 +np.float64,0x3fe11891fde23124,0x3fe1ebc1c204b14b,1 +np.float64,0x3fdd66c3eebacd88,0x3fde72526b5304c4,1 +np.float64,0xbfe79d85612f3b0b,0xbfe9d1673bd1f6d6,1 +np.float64,0x3fed60abdabac158,0x3ff0d7426b3800a2,1 +np.float64,0xbfb0ffa54021ff48,0xbfb102d81073a9f0,1 +np.float64,0xd2452af5a48a6,0xd2452af5a48a6,1 +np.float64,0xf4b835c1e971,0xf4b835c1e971,1 +np.float64,0x7e269cdafc4d4,0x7e269cdafc4d4,1 +np.float64,0x800097a21d812f45,0x800097a21d812f45,1 +np.float64,0x3fdfcc85e8bf990c,0x3fe08fcf770fd456,1 +np.float64,0xd8d53155b1aa6,0xd8d53155b1aa6,1 +np.float64,0x7fb8ed658831daca,0x7ff0000000000000,1 +np.float64,0xbfec865415b90ca8,0xbff03a4584d719f9,1 +np.float64,0xffd8cda62a319b4c,0xfff0000000000000,1 +np.float64,0x273598d84e6b4,0x273598d84e6b4,1 +np.float64,0x7fd566b5c32acd6b,0x7ff0000000000000,1 +np.float64,0xff61d9d48023b400,0xfff0000000000000,1 +np.float64,0xbfec5c3bf4f8b878,0xbff01c594243337c,1 +np.float64,0x7fd1be0561a37c0a,0x7ff0000000000000,1 +np.float64,0xffeaee3271b5dc64,0xfff0000000000000,1 +np.float64,0x800c0e1931b81c33,0x800c0e1931b81c33,1 +np.float64,0xbfad1171583a22e0,0xbfad1570e5c466d2,1 +np.float64,0x7fd783b0fe2f0761,0x7ff0000000000000,1 +np.float64,0x7fc39903e6273207,0x7ff0000000000000,1 +np.float64,0xffe00003c5600007,0xfff0000000000000,1 +np.float64,0x35a7b9c06b50,0x35a7b9c06b50,1 +np.float64,0x7fee441a22bc8833,0x7ff0000000000000,1 +np.float64,0xff6e47fbc03c9000,0xfff0000000000000,1 +np.float64,0xbfd3c3c9c8a78794,0xbfd41499b1912534,1 +np.float64,0x82c9c87f05939,0x82c9c87f05939,1 +np.float64,0xbfedeb0fe4fbd620,0xbff13c573ce9d3d0,1 +np.float64,0x2b79298656f26,0x2b79298656f26,1 +np.float64,0xbf5ee44f003dc800,0xbf5ee4503353c0ba,1 +np.float64,0xbfe1dd264e63ba4c,0xbfe2ce68116c7bf6,1 +np.float64,0x3fece10b7579c217,0x3ff07b21b11799c6,1 +np.float64,0x3fba47143a348e28,0x3fba52e601adf24c,1 +np.float64,0xffe9816e7a7302dc,0xfff0000000000000,1 +np.float64,0x8009a8047fd35009,0x8009a8047fd35009,1 +np.float64,0x800ac28e4e95851d,0x800ac28e4e95851d,1 +np.float64,0x80093facf4f27f5a,0x80093facf4f27f5a,1 +np.float64,0x3ff0000000000000,0x3ff2cd9fc44eb982,1 +np.float64,0x3fe76a9857eed530,0x3fe99018a5895a4f,1 +np.float64,0xbfd13c59a3a278b4,0xbfd171e133df0b16,1 +np.float64,0x7feb43bc83368778,0x7ff0000000000000,1 +np.float64,0xbfe2970c5fa52e18,0xbfe3a74a434c6efe,1 +np.float64,0xffd091c380212388,0xfff0000000000000,1 +np.float64,0x3febb3b9d2f76774,0x3fef4b4af2bd8580,1 +np.float64,0x7fec66787ef8ccf0,0x7ff0000000000000,1 +np.float64,0xbf935e185826bc40,0xbf935e640557a354,1 +np.float64,0x979df1552f3be,0x979df1552f3be,1 +np.float64,0x7fc096ee73212ddc,0x7ff0000000000000,1 +np.float64,0xbfe9de88faf3bd12,0xbfecc7d1ae691d1b,1 +np.float64,0x7fdc733f06b8e67d,0x7ff0000000000000,1 +np.float64,0xffd71be1a0ae37c4,0xfff0000000000000,1 +np.float64,0xb50dabd36a1b6,0xb50dabd36a1b6,1 +np.float64,0x7fce3d94d63c7b29,0x7ff0000000000000,1 +np.float64,0x7fbaf95e4435f2bc,0x7ff0000000000000,1 +np.float64,0x81a32a6f03466,0x81a32a6f03466,1 +np.float64,0xa99b5b4d5336c,0xa99b5b4d5336c,1 +np.float64,0x7f97c1eeb82f83dc,0x7ff0000000000000,1 +np.float64,0x3fe761636d6ec2c6,0x3fe98451160d2ffb,1 +np.float64,0xbfe3224ef5e6449e,0xbfe44b73eeadac52,1 +np.float64,0x7fde6feb0dbcdfd5,0x7ff0000000000000,1 +np.float64,0xbfee87f9ca7d0ff4,0xbff1b079e9d7f706,1 +np.float64,0x3fe46f4c9828de99,0x3fe5da2ab9609ea5,1 +np.float64,0xffb92fe882325fd0,0xfff0000000000000,1 +np.float64,0x80054bc63cea978d,0x80054bc63cea978d,1 +np.float64,0x3d988bea7b312,0x3d988bea7b312,1 +np.float64,0x3fe6468e1d6c8d1c,0x3fe81e64d37d39a8,1 +np.float64,0x3fd68eefc22d1de0,0x3fd7074264faeead,1 +np.float64,0xffb218a074243140,0xfff0000000000000,1 +np.float64,0x3fdbcb3b6cb79678,0x3fdcad011de40b7d,1 +np.float64,0x7fe3c161772782c2,0x7ff0000000000000,1 +np.float64,0x25575c904aaec,0x25575c904aaec,1 +np.float64,0x800fa43a8f5f4875,0x800fa43a8f5f4875,1 +np.float64,0x3fe41fc9e1e83f94,0x3fe57a25dd1a37f1,1 +np.float64,0x3fd895f4a7b12be9,0x3fd931e7b721a08a,1 +np.float64,0xce31469f9c629,0xce31469f9c629,1 +np.float64,0xffea0f55ca341eab,0xfff0000000000000,1 +np.float64,0xffe831c9ba306393,0xfff0000000000000,1 +np.float64,0x7fe2056f03a40add,0x7ff0000000000000,1 +np.float64,0x7fd6b075e02d60eb,0x7ff0000000000000,1 +np.float64,0x3fdfbef4273f7de8,0x3fe0882c1f59efc0,1 +np.float64,0x8005b9e094ab73c2,0x8005b9e094ab73c2,1 +np.float64,0x3fea881ac6351036,0x3fedad7a319b887c,1 +np.float64,0xbfe2c61c7ee58c39,0xbfe3de9a99d8a9c6,1 +np.float64,0x30b0d3786161b,0x30b0d3786161b,1 +np.float64,0x3fa51d56a02a3aad,0x3fa51edee2d2ecef,1 +np.float64,0x79745732f2e8c,0x79745732f2e8c,1 +np.float64,0x800d55b4907aab69,0x800d55b4907aab69,1 +np.float64,0xbfbe8fcf0a3d1fa0,0xbfbea267fbb5bfdf,1 +np.float64,0xbfd04e2756a09c4e,0xbfd07b74d079f9a2,1 +np.float64,0x3fc65170552ca2e1,0x3fc66e6eb00c82ed,1 +np.float64,0xbfb0674b8020ce98,0xbfb06a2b4771b64c,1 +np.float64,0x2059975840b34,0x2059975840b34,1 +np.float64,0x33d1385467a28,0x33d1385467a28,1 +np.float64,0x3fea41b74ff4836f,0x3fed4dc1a09e53cc,1 +np.float64,0xbfe8e08c9d71c119,0xbfeb75b4c59a6bec,1 +np.float64,0x7fdbbf14d6377e29,0x7ff0000000000000,1 +np.float64,0x3fcd8b71513b16e0,0x3fcdcec80174f9ad,1 +np.float64,0x5c50bc94b8a18,0x5c50bc94b8a18,1 +np.float64,0x969a18f52d343,0x969a18f52d343,1 +np.float64,0x3fd7ae44462f5c89,0x3fd8398bc34e395c,1 +np.float64,0xffdd0f8617ba1f0c,0xfff0000000000000,1 +np.float64,0xfff0000000000000,0xfff0000000000000,1 +np.float64,0xbfe2f9badb65f376,0xbfe41b771320ece8,1 +np.float64,0x3fd140bc7fa29,0x3fd140bc7fa29,1 +np.float64,0xbfe14523b5628a48,0xbfe21ee850972043,1 +np.float64,0x3feedd0336bdba06,0x3ff1f01afc1f3a06,1 +np.float64,0x800de423ad7bc848,0x800de423ad7bc848,1 +np.float64,0x4cef857c99df1,0x4cef857c99df1,1 +np.float64,0xbfea55e0e374abc2,0xbfed691e41d648dd,1 +np.float64,0x3fe70d7a18ae1af4,0x3fe91955a34d8094,1 +np.float64,0xbfc62fc3032c5f88,0xbfc64c3ec25decb8,1 +np.float64,0x3fc915abb5322b58,0x3fc93edac5cc73fe,1 +np.float64,0x69aaff66d3561,0x69aaff66d3561,1 +np.float64,0x5c6a90f2b8d53,0x5c6a90f2b8d53,1 +np.float64,0x3fefe30dc1bfc61c,0x3ff2b752257bdacd,1 +np.float64,0x3fef15db15fe2bb6,0x3ff21aea05601396,1 +np.float64,0xbfe353e5ac66a7cc,0xbfe48644e6553d1a,1 +np.float64,0x3fe6d30cffada61a,0x3fe8cf3e4c61ddac,1 +np.float64,0x7fb7857eb62f0afc,0x7ff0000000000000,1 +np.float64,0xbfdd9b53d23b36a8,0xbfdeac91a7af1340,1 +np.float64,0x3fd1456357228ac7,0x3fd17b3f7d39b27a,1 +np.float64,0x3fb57d10ae2afa21,0x3fb5838702b806f4,1 +np.float64,0x800c59c96c98b393,0x800c59c96c98b393,1 +np.float64,0x7fc1f2413823e481,0x7ff0000000000000,1 +np.float64,0xbfa3983624273070,0xbfa3996fa26c419a,1 +np.float64,0x7fb28874ae2510e8,0x7ff0000000000000,1 +np.float64,0x3fe826d02a304da0,0x3fea82bec50bc0b6,1 +np.float64,0x8008d6f0d3d1ade2,0x8008d6f0d3d1ade2,1 +np.float64,0xffe7c970ca2f92e1,0xfff0000000000000,1 +np.float64,0x7fcf42bcaa3e8578,0x7ff0000000000000,1 +np.float64,0x7fda1ab517343569,0x7ff0000000000000,1 +np.float64,0xbfe7926a65ef24d5,0xbfe9c323dd890d5b,1 +np.float64,0xbfcaf6282d35ec50,0xbfcb294f36a0a33d,1 +np.float64,0x800ca49df8d9493c,0x800ca49df8d9493c,1 +np.float64,0xffea18d26af431a4,0xfff0000000000000,1 +np.float64,0x3fb72f276e2e5e50,0x3fb7374539fd1221,1 +np.float64,0xffa6b613842d6c20,0xfff0000000000000,1 +np.float64,0xbfeb3c7263f678e5,0xbfeea54cdb60b54c,1 +np.float64,0x3fc976d2ba32eda5,0x3fc9a1e83a058de4,1 +np.float64,0xbfe4acd4b0e959aa,0xbfe624d5d4f9b9a6,1 +np.float64,0x7fca410a0f348213,0x7ff0000000000000,1 +np.float64,0xbfde368f77bc6d1e,0xbfdf5910c8c8bcb0,1 +np.float64,0xbfed7412937ae825,0xbff0e55afc428453,1 +np.float64,0xffef6b7b607ed6f6,0xfff0000000000000,1 +np.float64,0xbfb936f17e326de0,0xbfb941629a53c694,1 +np.float64,0x800dbb0c469b7619,0x800dbb0c469b7619,1 +np.float64,0x800f68b0581ed161,0x800f68b0581ed161,1 +np.float64,0x3fe25b2aad64b656,0x3fe361266fa9c5eb,1 +np.float64,0xbfb87e445a30fc88,0xbfb887d676910c3f,1 +np.float64,0x6e6ba9b6dcd76,0x6e6ba9b6dcd76,1 +np.float64,0x3fad27ce583a4f9d,0x3fad2bd72782ffdb,1 +np.float64,0xbfec0bc5d638178c,0xbfefc6e8c8f9095f,1 +np.float64,0x7fcba4a296374944,0x7ff0000000000000,1 +np.float64,0x8004ca237cc99448,0x8004ca237cc99448,1 +np.float64,0xffe85b8c3270b718,0xfff0000000000000,1 +np.float64,0x7fe7ee3eddafdc7d,0x7ff0000000000000,1 +np.float64,0xffd275967ca4eb2c,0xfff0000000000000,1 +np.float64,0xbfa95bc3a032b780,0xbfa95e6b288ecf43,1 +np.float64,0x3fc9e3214b33c643,0x3fca10667e7e7ff4,1 +np.float64,0x8001b89c5d837139,0x8001b89c5d837139,1 +np.float64,0xbf8807dfc0300fc0,0xbf880803e3badfbd,1 +np.float64,0x800aca94b895952a,0x800aca94b895952a,1 +np.float64,0x7fd79534a02f2a68,0x7ff0000000000000,1 +np.float64,0x3fe1b81179e37023,0x3fe2a371d8cc26f0,1 +np.float64,0x800699539d6d32a8,0x800699539d6d32a8,1 +np.float64,0xffe51dfbb3aa3bf7,0xfff0000000000000,1 +np.float64,0xbfdfb775abbf6eec,0xbfe083f48be2f98f,1 +np.float64,0x3fe87979d7b0f2f4,0x3feaee701d959079,1 +np.float64,0x3fd8e4e6a731c9cd,0x3fd986d29f25f982,1 +np.float64,0x3fe3dadaaf67b5b6,0x3fe527520fb02920,1 +np.float64,0x8003c2262bc7844d,0x8003c2262bc7844d,1 +np.float64,0x800c930add392616,0x800c930add392616,1 +np.float64,0xffb7a152a22f42a8,0xfff0000000000000,1 +np.float64,0x80028fe03dc51fc1,0x80028fe03dc51fc1,1 +np.float64,0xffe32ae60c6655cc,0xfff0000000000000,1 +np.float64,0x3fea3527e4746a50,0x3fed3cbbf47f18eb,1 +np.float64,0x800a53059e14a60c,0x800a53059e14a60c,1 +np.float64,0xbfd79e3b202f3c76,0xbfd828672381207b,1 +np.float64,0xffeed7e2eb7dafc5,0xfff0000000000000,1 +np.float64,0x3fec51ed6778a3db,0x3ff01509e34df61d,1 +np.float64,0xbfd84bc577b0978a,0xbfd8e23ec55e42e8,1 +np.float64,0x2483aff849077,0x2483aff849077,1 +np.float64,0x6f57883adeaf2,0x6f57883adeaf2,1 +np.float64,0xffd3fd74d927faea,0xfff0000000000000,1 +np.float64,0x7fca49ec773493d8,0x7ff0000000000000,1 +np.float64,0x7fd08fe2e8211fc5,0x7ff0000000000000,1 +np.float64,0x800852086db0a411,0x800852086db0a411,1 +np.float64,0x3fe5b1f2c9eb63e6,0x3fe7654f511bafc6,1 +np.float64,0xbfe01e2a58e03c54,0xbfe0cedb68f021e6,1 +np.float64,0x800988421d331085,0x800988421d331085,1 +np.float64,0xffd5038b18aa0716,0xfff0000000000000,1 +np.float64,0x8002c9264c85924d,0x8002c9264c85924d,1 +np.float64,0x3fd21ca302243946,0x3fd25ac653a71aab,1 +np.float64,0xbfea60d6e6f4c1ae,0xbfed78031d9dfa2b,1 +np.float64,0xffef97b6263f2f6b,0xfff0000000000000,1 +np.float64,0xbfd524732faa48e6,0xbfd5876ecc415dcc,1 +np.float64,0x660387e8cc072,0x660387e8cc072,1 +np.float64,0x7fcfc108a33f8210,0x7ff0000000000000,1 +np.float64,0x7febe5b0f877cb61,0x7ff0000000000000,1 +np.float64,0xbfa55fdfac2abfc0,0xbfa56176991851a8,1 +np.float64,0x25250f4c4a4a3,0x25250f4c4a4a3,1 +np.float64,0xffe2f6a2f2a5ed46,0xfff0000000000000,1 +np.float64,0x7fa754fcc02ea9f9,0x7ff0000000000000,1 +np.float64,0x3febd19dea37a33c,0x3fef75279f75d3b8,1 +np.float64,0xc5ed55218bdab,0xc5ed55218bdab,1 +np.float64,0x3fe72ff6b3ee5fed,0x3fe945388b979882,1 +np.float64,0xbfe16b854e22d70a,0xbfe24b10fc0dff14,1 +np.float64,0xffb22cbe10245980,0xfff0000000000000,1 +np.float64,0xa54246b54a849,0xa54246b54a849,1 +np.float64,0x3fe7f4cda76fe99c,0x3fea41edc74888b6,1 +np.float64,0x1,0x1,1 +np.float64,0x800d84acce9b095a,0x800d84acce9b095a,1 +np.float64,0xb0eef04761dde,0xb0eef04761dde,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xffecaf1dbb795e3b,0xfff0000000000000,1 +np.float64,0x90dbab8d21b76,0x90dbab8d21b76,1 +np.float64,0x3fe79584a9ef2b09,0x3fe9c71fa9e40eb5,1 diff --git a/python/numpy/_core/tests/data/umath-validation-set-tan.csv b/python/numpy/_core/tests/data/umath-validation-set-tan.csv new file mode 100644 index 000000000..ac97624ec --- /dev/null +++ b/python/numpy/_core/tests/data/umath-validation-set-tan.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xfd97ece0,0xc11186e9,4 +np.float32,0x8013bb34,0x8013bb34,4 +np.float32,0x316389,0x316389,4 +np.float32,0x7f7fffff,0xbf1c9eca,4 +np.float32,0x3f7674bb,0x3fb7e450,4 +np.float32,0x80800000,0x80800000,4 +np.float32,0x7f5995e8,0xbf94106c,4 +np.float32,0x74527,0x74527,4 +np.float32,0x7f08caea,0xbeceddb6,4 +np.float32,0x2d49b2,0x2d49b2,4 +np.float32,0x3f74e5e4,0x3fb58695,4 +np.float32,0x3f3fcd51,0x3f6e1e81,4 +np.float32,0xbf4f3608,0xbf864d3d,4 +np.float32,0xbed974a0,0xbee78c70,4 +np.float32,0xff5f483c,0x3ecf3cb2,4 +np.float32,0x7f4532f4,0xc0b96f7b,4 +np.float32,0x3f0a4f7c,0x3f198cc0,4 +np.float32,0x210193,0x210193,4 +np.float32,0xfeebad7a,0xbf92eba8,4 +np.float32,0xfed29f74,0xc134cab6,4 +np.float32,0x803433a0,0x803433a0,4 +np.float32,0x64eb46,0x64eb46,4 +np.float32,0xbf54ef22,0xbf8c757b,4 +np.float32,0x3f3d5fdd,0x3f69a17b,4 +np.float32,0x80000001,0x80000001,4 +np.float32,0x800a837a,0x800a837a,4 +np.float32,0x6ff0be,0x6ff0be,4 +np.float32,0xfe8f1186,0x3f518820,4 +np.float32,0x804963e5,0x804963e5,4 +np.float32,0xfebaa59a,0x3fa1dbb0,4 +np.float32,0x637970,0x637970,4 +np.float32,0x3e722a6b,0x3e76c89a,4 +np.float32,0xff2b0478,0xbddccb5f,4 +np.float32,0xbf7bd85b,0xbfc06821,4 +np.float32,0x3ec33600,0x3ecd4126,4 +np.float32,0x3e0a43b9,0x3e0b1c69,4 +np.float32,0x7f7511b6,0xbe427083,4 +np.float32,0x3f28c114,0x3f465a73,4 +np.float32,0x3f179e1c,0x3f2c3e7c,4 +np.float32,0x7b2963,0x7b2963,4 +np.float32,0x3f423d06,0x3f72b442,4 +np.float32,0x3f5a24c6,0x3f925508,4 +np.float32,0xff18c834,0xbf79b5c8,4 +np.float32,0x3f401ece,0x3f6eb6ac,4 +np.float32,0x7b8a3013,0xbffab968,4 +np.float32,0x80091ff0,0x80091ff0,4 +np.float32,0x3f389c51,0x3f610b47,4 +np.float32,0x5ea174,0x5ea174,4 +np.float32,0x807a9eb2,0x807a9eb2,4 +np.float32,0x806ce61e,0x806ce61e,4 +np.float32,0xbe956acc,0xbe99cefc,4 +np.float32,0x7e60e247,0xbf5e64a5,4 +np.float32,0x7f398e24,0x404d12ed,4 +np.float32,0x3d9049f8,0x3d908735,4 +np.float32,0x7db17ffc,0xbf5b3d87,4 +np.float32,0xff453f78,0xc0239c9f,4 +np.float32,0x3f024aac,0x3f0ed802,4 +np.float32,0xbe781c30,0xbe7d1508,4 +np.float32,0x3f77962a,0x3fb9a28e,4 +np.float32,0xff7fffff,0x3f1c9eca,4 +np.float32,0x3f7152e3,0x3fb03f9d,4 +np.float32,0xff7cb167,0x3f9ce831,4 +np.float32,0x3e763e30,0x3e7b1a10,4 +np.float32,0xbf126527,0xbf24c253,4 +np.float32,0x803f6660,0x803f6660,4 +np.float32,0xbf79de38,0xbfbd38b1,4 +np.float32,0x8046c2f0,0x8046c2f0,4 +np.float32,0x6dc74e,0x6dc74e,4 +np.float32,0xbec9c45e,0xbed4e768,4 +np.float32,0x3f0eedb6,0x3f1fe610,4 +np.float32,0x7e031999,0xbcc13026,4 +np.float32,0x7efc2fd7,0x41e4b284,4 +np.float32,0xbeab7454,0xbeb22a1b,4 +np.float32,0x805ee67b,0x805ee67b,4 +np.float32,0x7f76e58e,0xc2436659,4 +np.float32,0xbe62b024,0xbe667718,4 +np.float32,0x3eea0808,0x3efbd182,4 +np.float32,0xbf7fd00c,0xbfc70719,4 +np.float32,0x7f27b640,0xbf0d97e0,4 +np.float32,0x3f1b58a4,0x3f31b6f4,4 +np.float32,0x252a9f,0x252a9f,4 +np.float32,0x7f65f95a,0xbead5de3,4 +np.float32,0xfc6ea780,0x42d15801,4 +np.float32,0x7eac4c52,0xc0682424,4 +np.float32,0xbe8a3f5a,0xbe8db54d,4 +np.float32,0xbf1644e2,0xbf2a4abd,4 +np.float32,0x3fc96a,0x3fc96a,4 +np.float32,0x7f38c0e4,0x3cc04af8,4 +np.float32,0x3f623d75,0x3f9c065d,4 +np.float32,0x3ee6a51a,0x3ef7a058,4 +np.float32,0x3dd11020,0x3dd1cacf,4 +np.float32,0xb6918,0xb6918,4 +np.float32,0xfdd7a540,0x3f22f081,4 +np.float32,0x80798563,0x80798563,4 +np.float32,0x3e9a8b7a,0x3e9f6a7e,4 +np.float32,0xbea515d4,0xbeab0df5,4 +np.float32,0xbea9b9f4,0xbeb03abe,4 +np.float32,0xbf11a5fa,0xbf23b478,4 +np.float32,0xfd6cadf0,0xbfa2a878,4 +np.float32,0xbf6edd07,0xbfacbb78,4 +np.float32,0xff5c5328,0x3e2d1552,4 +np.float32,0xbea2f788,0xbea8b3f5,4 +np.float32,0x802efaeb,0x802efaeb,4 +np.float32,0xff1c85e5,0x41f8560e,4 +np.float32,0x3f53b123,0x3f8b18e1,4 +np.float32,0xff798c4a,0x4092e66f,4 +np.float32,0x7f2e6fe7,0xbdcbd58f,4 +np.float32,0xfe8a8196,0x3fd7fc56,4 +np.float32,0x5e7ad4,0x5e7ad4,4 +np.float32,0xbf23a02d,0xbf3e4533,4 +np.float32,0x3f31c55c,0x3f5531bf,4 +np.float32,0x80331be3,0x80331be3,4 +np.float32,0x8056960a,0x8056960a,4 +np.float32,0xff1c06ae,0xbfd26992,4 +np.float32,0xbe0cc4b0,0xbe0da96c,4 +np.float32,0x7e925ad5,0xbf8dba54,4 +np.float32,0x2c8cec,0x2c8cec,4 +np.float32,0x8011951e,0x8011951e,4 +np.float32,0x3f2caf84,0x3f4cb89f,4 +np.float32,0xbd32c220,0xbd32df33,4 +np.float32,0xbec358d6,0xbecd6996,4 +np.float32,0x3f6e4930,0x3fabeb92,4 +np.float32,0xbf6a3afd,0xbfa65a3a,4 +np.float32,0x80067764,0x80067764,4 +np.float32,0x3d8df1,0x3d8df1,4 +np.float32,0x7ee51cf2,0x409e4061,4 +np.float32,0x435f5d,0x435f5d,4 +np.float32,0xbf5b17f7,0xbf936ebe,4 +np.float32,0x3ecaacb5,0x3ed5f81f,4 +np.float32,0x807b0aa5,0x807b0aa5,4 +np.float32,0x52b40b,0x52b40b,4 +np.float32,0x146a97,0x146a97,4 +np.float32,0x7f42b952,0xbfdcb413,4 +np.float32,0xbf1a1af2,0xbf2fe1bb,4 +np.float32,0x3f312034,0x3f541aa2,4 +np.float32,0x3f281d60,0x3f4554f9,4 +np.float32,0x50e451,0x50e451,4 +np.float32,0xbe45838c,0xbe480016,4 +np.float32,0xff7d0aeb,0x3eb0746e,4 +np.float32,0x7f32a489,0xbf96af6d,4 +np.float32,0xbf1b4e27,0xbf31a769,4 +np.float32,0x3f242936,0x3f3f1a44,4 +np.float32,0xbf7482ff,0xbfb4f201,4 +np.float32,0x4bda38,0x4bda38,4 +np.float32,0xbf022208,0xbf0ea2bb,4 +np.float32,0x7d08ca95,0xbe904602,4 +np.float32,0x7ed2f356,0xc02b55ad,4 +np.float32,0xbf131204,0xbf25b734,4 +np.float32,0xff3464b4,0x3fb23706,4 +np.float32,0x5a97cf,0x5a97cf,4 +np.float32,0xbe52db70,0xbe55e388,4 +np.float32,0x3f52934f,0x3f89e2aa,4 +np.float32,0xfeea866a,0x40a2b33f,4 +np.float32,0x80333925,0x80333925,4 +np.float32,0xfef5d13e,0xc00139ec,4 +np.float32,0x3f4750ab,0x3f7c87ad,4 +np.float32,0x3e41bfdd,0x3e44185a,4 +np.float32,0xbf5b0572,0xbf935935,4 +np.float32,0xbe93c9da,0xbe9808d8,4 +np.float32,0x7f501f33,0xc0f9973c,4 +np.float32,0x800af035,0x800af035,4 +np.float32,0x3f29faf8,0x3f4852a8,4 +np.float32,0xbe1e4c20,0xbe1f920c,4 +np.float32,0xbf7e8616,0xbfc4d79d,4 +np.float32,0x43ffbf,0x43ffbf,4 +np.float32,0x7f28e8a9,0xbfa1ac24,4 +np.float32,0xbf1f9f92,0xbf3820bc,4 +np.float32,0x3f07e004,0x3f1641c4,4 +np.float32,0x3ef7ea7f,0x3f06a64a,4 +np.float32,0x7e013101,0x3f6080e6,4 +np.float32,0x7f122a4f,0xbf0a796f,4 +np.float32,0xfe096960,0x3ed7273a,4 +np.float32,0x3f06abf1,0x3f14a4b2,4 +np.float32,0x3e50ded3,0x3e53d0f1,4 +np.float32,0x7f50b346,0x3eabb536,4 +np.float32,0xff5adb0f,0xbd441972,4 +np.float32,0xbecefe46,0xbedb0f66,4 +np.float32,0x7da70bd4,0xbec66273,4 +np.float32,0x169811,0x169811,4 +np.float32,0xbee4dfee,0xbef5721a,4 +np.float32,0x3efbeae3,0x3f0936e6,4 +np.float32,0x8031bd61,0x8031bd61,4 +np.float32,0x8048e443,0x8048e443,4 +np.float32,0xff209aa6,0xbeb364cb,4 +np.float32,0xff477499,0x3c1b0041,4 +np.float32,0x803fe929,0x803fe929,4 +np.float32,0x3f70158b,0x3fae7725,4 +np.float32,0x7f795723,0x3e8e850a,4 +np.float32,0x3cba99,0x3cba99,4 +np.float32,0x80588d2a,0x80588d2a,4 +np.float32,0x805d1f05,0x805d1f05,4 +np.float32,0xff4ac09a,0xbefe614d,4 +np.float32,0x804af084,0x804af084,4 +np.float32,0x7c64ae63,0xc1a8b563,4 +np.float32,0x8078d793,0x8078d793,4 +np.float32,0x7f3e2436,0xbf8bf9d3,4 +np.float32,0x7ccec1,0x7ccec1,4 +np.float32,0xbf6462c7,0xbf9eb830,4 +np.float32,0x3f1002ca,0x3f216843,4 +np.float32,0xfe878ca6,0x409e73a5,4 +np.float32,0x3bd841d9,0x3bd842a7,4 +np.float32,0x7d406f41,0xbd9dcfa3,4 +np.float32,0x7c6d6,0x7c6d6,4 +np.float32,0x3f4ef360,0x3f86074b,4 +np.float32,0x805f534a,0x805f534a,4 +np.float32,0x1,0x1,4 +np.float32,0x3f739ee2,0x3fb39db2,4 +np.float32,0x3d0c2352,0x3d0c3153,4 +np.float32,0xfe8a4f2c,0x3edd8add,4 +np.float32,0x3e52eaa0,0x3e55f362,4 +np.float32,0x7bde9758,0xbf5ba5cf,4 +np.float32,0xff422654,0xbf41e487,4 +np.float32,0x385e5b,0x385e5b,4 +np.float32,0x5751dd,0x5751dd,4 +np.float32,0xff6c671c,0xc03e2d6d,4 +np.float32,0x1458be,0x1458be,4 +np.float32,0x80153d4d,0x80153d4d,4 +np.float32,0x7efd2adb,0x3e25458f,4 +np.float32,0xbe161880,0xbe172e12,4 +np.float32,0x7ecea1aa,0x40a66d79,4 +np.float32,0xbf5b02a2,0xbf9355f0,4 +np.float32,0x15d9ab,0x15d9ab,4 +np.float32,0x2dc7c7,0x2dc7c7,4 +np.float32,0xfebbf81a,0x4193f6e6,4 +np.float32,0xfe8e3594,0xc00a6695,4 +np.float32,0x185aa8,0x185aa8,4 +np.float32,0x3daea156,0x3daf0e00,4 +np.float32,0x3e071688,0x3e07e08e,4 +np.float32,0x802db9e6,0x802db9e6,4 +np.float32,0x7f7be2c4,0x3f1363dd,4 +np.float32,0x7eba3f5e,0xc13eb497,4 +np.float32,0x3de04a00,0x3de130a9,4 +np.float32,0xbf1022bc,0xbf2194eb,4 +np.float32,0xbf5b547e,0xbf93b53b,4 +np.float32,0x3e867bd6,0x3e89aa10,4 +np.float32,0xbea5eb5c,0xbeabfb73,4 +np.float32,0x7f1efae9,0x3ffca038,4 +np.float32,0xff5d0344,0xbe55dbbb,4 +np.float32,0x805167e7,0x805167e7,4 +np.float32,0xbdb3a020,0xbdb41667,4 +np.float32,0xbedea6b4,0xbeedd5fd,4 +np.float32,0x8053b45c,0x8053b45c,4 +np.float32,0x7ed370e9,0x3d90eba5,4 +np.float32,0xbefcd7da,0xbf09cf91,4 +np.float32,0x78b9ac,0x78b9ac,4 +np.float32,0xbf2f6dc0,0xbf5141ef,4 +np.float32,0x802d3a7b,0x802d3a7b,4 +np.float32,0xfd45d120,0x3fec31cc,4 +np.float32,0xbf7e7020,0xbfc4b2af,4 +np.float32,0xf04da,0xf04da,4 +np.float32,0xbe9819d4,0xbe9cbd35,4 +np.float32,0x8075ab35,0x8075ab35,4 +np.float32,0xbf052fdc,0xbf12aa2c,4 +np.float32,0x3f1530d0,0x3f28bd9f,4 +np.float32,0x80791881,0x80791881,4 +np.float32,0x67f309,0x67f309,4 +np.float32,0x3f12f16a,0x3f2588f5,4 +np.float32,0x3ecdac47,0x3ed97ff8,4 +np.float32,0xbf297fb7,0xbf478c39,4 +np.float32,0x8069fa80,0x8069fa80,4 +np.float32,0x807f940e,0x807f940e,4 +np.float32,0xbf648dc8,0xbf9eeecb,4 +np.float32,0x3de873b0,0x3de9748d,4 +np.float32,0x3f1aa645,0x3f30af1f,4 +np.float32,0xff227a62,0x3d8283cc,4 +np.float32,0xbf37187d,0xbf5e5f4c,4 +np.float32,0x803b1b1f,0x803b1b1f,4 +np.float32,0x3f58142a,0x3f8ff8da,4 +np.float32,0x8004339e,0x8004339e,4 +np.float32,0xbf0f5654,0xbf2077a4,4 +np.float32,0x3f17e509,0x3f2ca598,4 +np.float32,0x3f800000,0x3fc75923,4 +np.float32,0xfdf79980,0x42f13047,4 +np.float32,0x7f111381,0x3f13c4c9,4 +np.float32,0xbea40c70,0xbea9e724,4 +np.float32,0x110520,0x110520,4 +np.float32,0x60490d,0x60490d,4 +np.float32,0x3f6703ec,0x3fa21951,4 +np.float32,0xbf098256,0xbf187652,4 +np.float32,0x658951,0x658951,4 +np.float32,0x3f53bf16,0x3f8b2818,4 +np.float32,0xff451811,0xc0026068,4 +np.float32,0x80777ee0,0x80777ee0,4 +np.float32,0x3e4fcc19,0x3e52b286,4 +np.float32,0x7f387ee0,0x3ce93eb6,4 +np.float32,0xff51181f,0xbfca3ee4,4 +np.float32,0xbf5655ae,0xbf8e0304,4 +np.float32,0xff2f1dcd,0x40025471,4 +np.float32,0x7f6e58e5,0xbe9930d5,4 +np.float32,0x7adf11,0x7adf11,4 +np.float32,0xbe9a2bc2,0xbe9f0185,4 +np.float32,0x8065d3a0,0x8065d3a0,4 +np.float32,0x3ed6e826,0x3ee47c45,4 +np.float32,0x80598ea0,0x80598ea0,4 +np.float32,0x7f10b90a,0x40437bd0,4 +np.float32,0x27b447,0x27b447,4 +np.float32,0x7ecd861c,0x3fce250f,4 +np.float32,0x0,0x0,4 +np.float32,0xbeba82d6,0xbec3394c,4 +np.float32,0xbf4958b0,0xbf8048ea,4 +np.float32,0x7c643e,0x7c643e,4 +np.float32,0x580770,0x580770,4 +np.float32,0x805bf54a,0x805bf54a,4 +np.float32,0x7f1f3cee,0xbe1a54d6,4 +np.float32,0xfefefdea,0x3fa84576,4 +np.float32,0x7f007b7a,0x3e8a6d25,4 +np.float32,0xbf177959,0xbf2c0919,4 +np.float32,0xbf30fda0,0xbf53e058,4 +np.float32,0x3f0576be,0x3f130861,4 +np.float32,0x3f49380e,0x3f80283a,4 +np.float32,0xebc56,0xebc56,4 +np.float32,0x654e3b,0x654e3b,4 +np.float32,0x14a4d8,0x14a4d8,4 +np.float32,0xff69b3cb,0xbf822a88,4 +np.float32,0xbe9b6c1c,0xbea06109,4 +np.float32,0xbefddd7e,0xbf0a787b,4 +np.float32,0x4c4ebb,0x4c4ebb,4 +np.float32,0x7d0a74,0x7d0a74,4 +np.float32,0xbebb5f80,0xbec43635,4 +np.float32,0x7ee79723,0xc1c7f3f3,4 +np.float32,0x7f2be4c7,0xbfa6c693,4 +np.float32,0x805bc7d5,0x805bc7d5,4 +np.float32,0x8042f12c,0x8042f12c,4 +np.float32,0x3ef91be8,0x3f07697b,4 +np.float32,0x3cf37ac0,0x3cf38d1c,4 +np.float32,0x800000,0x800000,4 +np.float32,0xbe1ebf4c,0xbe200806,4 +np.float32,0x7f380862,0xbeb512e8,4 +np.float32,0xbe320064,0xbe33d0fc,4 +np.float32,0xff300b0c,0xbfadb805,4 +np.float32,0x308a06,0x308a06,4 +np.float32,0xbf084f6e,0xbf16d7b6,4 +np.float32,0xff47cab6,0x3f892b65,4 +np.float32,0xbed99f4a,0xbee7bfd5,4 +np.float32,0xff7d74c0,0x3ee88c9a,4 +np.float32,0x3c3d23,0x3c3d23,4 +np.float32,0x8074bde8,0x8074bde8,4 +np.float32,0x80042164,0x80042164,4 +np.float32,0x3e97c92a,0x3e9c6500,4 +np.float32,0x3b80e0,0x3b80e0,4 +np.float32,0xbf16646a,0xbf2a783d,4 +np.float32,0x7f3b4cb1,0xc01339be,4 +np.float32,0xbf31f36e,0xbf557fd0,4 +np.float32,0x7f540618,0xbe5f6fc1,4 +np.float32,0x7eee47d0,0x40a27e94,4 +np.float32,0x7f12f389,0xbebed654,4 +np.float32,0x56cff5,0x56cff5,4 +np.float32,0x8056032b,0x8056032b,4 +np.float32,0x3ed34e40,0x3ee02e38,4 +np.float32,0x7d51a908,0xbf19a90e,4 +np.float32,0x80000000,0x80000000,4 +np.float32,0xfdf73fd0,0xbf0f8cad,4 +np.float32,0x7ee4fe6d,0xbf1ea7e4,4 +np.float32,0x1f15ba,0x1f15ba,4 +np.float32,0xd18c3,0xd18c3,4 +np.float32,0x80797705,0x80797705,4 +np.float32,0x7ef07091,0x3f2f3b9a,4 +np.float32,0x7f552f41,0x3faf608c,4 +np.float32,0x3f779977,0x3fb9a7ad,4 +np.float32,0xfe1a7a50,0xbdadc4d1,4 +np.float32,0xbf449cf0,0xbf7740db,4 +np.float32,0xbe44e620,0xbe475cad,4 +np.float32,0x3f63a098,0x3f9dc2b5,4 +np.float32,0xfed40a12,0x4164533a,4 +np.float32,0x7a2bbb,0x7a2bbb,4 +np.float32,0xff7f7b9e,0xbeee8740,4 +np.float32,0x7ee27f8b,0x4233f53b,4 +np.float32,0xbf044c06,0xbf117c28,4 +np.float32,0xbeffde54,0xbf0bc49f,4 +np.float32,0xfeaef2e8,0x3ff258fe,4 +np.float32,0x527451,0x527451,4 +np.float32,0xbcef8d00,0xbcef9e7c,4 +np.float32,0xbf0e20c0,0xbf1ec9b2,4 +np.float32,0x8024afda,0x8024afda,4 +np.float32,0x7ef6cb3e,0x422cad0b,4 +np.float32,0x3c120,0x3c120,4 +np.float32,0xbf125c8f,0xbf24b62c,4 +np.float32,0x7e770a93,0x402c9d86,4 +np.float32,0xbd30a4e0,0xbd30c0ee,4 +np.float32,0xbf4d3388,0xbf843530,4 +np.float32,0x3f529072,0x3f89df92,4 +np.float32,0xff0270b1,0xbf81be9a,4 +np.float32,0x5e07e7,0x5e07e7,4 +np.float32,0x7bec32,0x7bec32,4 +np.float32,0x7fc00000,0x7fc00000,4 +np.float32,0x3e3ba5e0,0x3e3dc6e9,4 +np.float32,0x3ecb62d4,0x3ed6ce2c,4 +np.float32,0x3eb3dde8,0x3ebba68f,4 +np.float32,0x8063f952,0x8063f952,4 +np.float32,0x7f204aeb,0x3e88614e,4 +np.float32,0xbeae1ddc,0xbeb5278e,4 +np.float32,0x6829e9,0x6829e9,4 +np.float32,0xbf361a99,0xbf5ca354,4 +np.float32,0xbf24fbe6,0xbf406326,4 +np.float32,0x3f329d41,0x3f56a061,4 +np.float32,0xfed6d666,0x3e8f71a5,4 +np.float32,0x337f92,0x337f92,4 +np.float32,0xbe1c4970,0xbe1d8305,4 +np.float32,0xbe6b7e18,0xbe6fbbde,4 +np.float32,0x3f2267b9,0x3f3c61da,4 +np.float32,0xbee1ee94,0xbef1d628,4 +np.float32,0x7ecffc1a,0x3f02987e,4 +np.float32,0xbe9b1306,0xbe9fff3b,4 +np.float32,0xbeffacae,0xbf0ba468,4 +np.float32,0x7f800000,0xffc00000,4 +np.float32,0xfefc9aa8,0xc19de2a3,4 +np.float32,0x7d7185bb,0xbf9090ec,4 +np.float32,0x7edfbafd,0x3fe9352f,4 +np.float32,0x4ef2ec,0x4ef2ec,4 +np.float32,0x7f4cab2e,0xbff4e5dd,4 +np.float32,0xff3b1788,0x3e3c22e9,4 +np.float32,0x4e15ee,0x4e15ee,4 +np.float32,0xbf5451e6,0xbf8bc8a7,4 +np.float32,0x3f7f6d2e,0x3fc65e8b,4 +np.float32,0xbf1d9184,0xbf35071b,4 +np.float32,0xbf3a81cf,0xbf646d9b,4 +np.float32,0xbe71acc4,0xbe7643ab,4 +np.float32,0x528b7d,0x528b7d,4 +np.float32,0x2cb1d0,0x2cb1d0,4 +np.float32,0x3f324bf8,0x3f56161a,4 +np.float32,0x80709a21,0x80709a21,4 +np.float32,0x4bc448,0x4bc448,4 +np.float32,0x3e8bd600,0x3e8f6b7a,4 +np.float32,0xbeb97d30,0xbec20dd6,4 +np.float32,0x2a5669,0x2a5669,4 +np.float32,0x805f2689,0x805f2689,4 +np.float32,0xfe569f50,0x3fc51952,4 +np.float32,0x1de44c,0x1de44c,4 +np.float32,0x3ec7036c,0x3ed1ae67,4 +np.float32,0x8052b8e5,0x8052b8e5,4 +np.float32,0xff740a6b,0x3f4981a8,4 +np.float32,0xfee9bb70,0xc05e23be,4 +np.float32,0xff4e12c9,0x4002b4ad,4 +np.float32,0x803de0c2,0x803de0c2,4 +np.float32,0xbf433a07,0xbf74966f,4 +np.float32,0x803e60ca,0x803e60ca,4 +np.float32,0xbf19ee98,0xbf2fa07a,4 +np.float32,0x92929,0x92929,4 +np.float32,0x7f709c27,0x4257ba2d,4 +np.float32,0x803167c6,0x803167c6,4 +np.float32,0xbf095ead,0xbf184607,4 +np.float32,0x617060,0x617060,4 +np.float32,0x2d85b3,0x2d85b3,4 +np.float32,0x53d20b,0x53d20b,4 +np.float32,0x3e046838,0x3e052666,4 +np.float32,0xbe7c5fdc,0xbe80ce4b,4 +np.float32,0x3d18d060,0x3d18e289,4 +np.float32,0x804dc031,0x804dc031,4 +np.float32,0x3f224166,0x3f3c26cd,4 +np.float32,0x7d683e3c,0xbea24f25,4 +np.float32,0xbf3a92aa,0xbf648be4,4 +np.float32,0x8072670b,0x8072670b,4 +np.float32,0xbe281aec,0xbe29a1bc,4 +np.float32,0x7f09d918,0xc0942490,4 +np.float32,0x7ca9fd07,0x4018b990,4 +np.float32,0x7d36ac5d,0x3cf57184,4 +np.float32,0x8039b62f,0x8039b62f,4 +np.float32,0x6cad7b,0x6cad7b,4 +np.float32,0x3c0fd9ab,0x3c0fda9d,4 +np.float32,0x80299883,0x80299883,4 +np.float32,0x3c2d0e3e,0x3c2d0fe4,4 +np.float32,0x8002cf62,0x8002cf62,4 +np.float32,0x801dde97,0x801dde97,4 +np.float32,0x80411856,0x80411856,4 +np.float32,0x6ebce8,0x6ebce8,4 +np.float32,0x7b7d1a,0x7b7d1a,4 +np.float32,0x8031d3de,0x8031d3de,4 +np.float32,0x8005c4ab,0x8005c4ab,4 +np.float32,0xbf7dd803,0xbfc3b3ef,4 +np.float32,0x8017ae60,0x8017ae60,4 +np.float32,0xfe9316ce,0xbfe0544a,4 +np.float32,0x3f136bfe,0x3f2636ff,4 +np.float32,0x3df87b80,0x3df9b57d,4 +np.float32,0xff44c356,0xbf11c7ad,4 +np.float32,0x4914ae,0x4914ae,4 +np.float32,0x80524c21,0x80524c21,4 +np.float32,0x805c7dc8,0x805c7dc8,4 +np.float32,0xfed3c0aa,0xbff0c0ab,4 +np.float32,0x7eb2bfbb,0xbf4600bc,4 +np.float32,0xfec8df84,0x3f5bd350,4 +np.float32,0x3e5431a4,0x3e5748c3,4 +np.float32,0xbee6a3a0,0xbef79e86,4 +np.float32,0xbf6cc9b2,0xbfa9d61a,4 +np.float32,0x3f132bd5,0x3f25dbd9,4 +np.float32,0x7e6d2e48,0x3f9d025b,4 +np.float32,0x3edf430c,0x3eee942d,4 +np.float32,0x3f0d1b8a,0x3f1d60e1,4 +np.float32,0xbdf2f688,0xbdf41bfb,4 +np.float32,0xbe47a284,0xbe4a33ff,4 +np.float32,0x3eaa9fbc,0x3eb13be7,4 +np.float32,0xfe98d45e,0x3eb84517,4 +np.float32,0x7efc23b3,0x3dcc1c99,4 +np.float32,0x3ca36242,0x3ca367ce,4 +np.float32,0x3f76a944,0x3fb834e3,4 +np.float32,0xbf45207c,0xbf783f9b,4 +np.float32,0x3e7c1220,0x3e80a4f8,4 +np.float32,0x3f018200,0x3f0dd14e,4 +np.float32,0x3f53cdde,0x3f8b3839,4 +np.float32,0xbdbacb58,0xbdbb5063,4 +np.float32,0x804af68d,0x804af68d,4 +np.float32,0x3e2c12fc,0x3e2db65b,4 +np.float32,0x3f039433,0x3f10895a,4 +np.float32,0x7ef5193d,0x3f4115f7,4 +np.float32,0x8030afbe,0x8030afbe,4 +np.float32,0x3f06fa2a,0x3f150d5d,4 +np.float32,0x3f124442,0x3f2493d2,4 +np.float32,0xbeb5b792,0xbebdc090,4 +np.float32,0xbedc90a4,0xbeeb4de9,4 +np.float32,0x3f3ff8,0x3f3ff8,4 +np.float32,0x3ee75bc5,0x3ef881e4,4 +np.float32,0xfe80e3de,0xbf5cd535,4 +np.float32,0xf52eb,0xf52eb,4 +np.float32,0x80660ee8,0x80660ee8,4 +np.float32,0x3e173a58,0x3e185648,4 +np.float32,0xfe49520c,0xbf728d7c,4 +np.float32,0xbecbb8ec,0xbed73373,4 +np.float32,0xbf027ae0,0xbf0f173e,4 +np.float32,0xbcab6740,0xbcab6da8,4 +np.float32,0xbf2a15e2,0xbf487e11,4 +np.float32,0x3b781b,0x3b781b,4 +np.float32,0x44f559,0x44f559,4 +np.float32,0xff6a0ca6,0xc174d7c3,4 +np.float32,0x6460ef,0x6460ef,4 +np.float32,0xfe58009c,0x3ee2bb30,4 +np.float32,0xfec3c038,0x3e30d617,4 +np.float32,0x7f0687c0,0xbf62c820,4 +np.float32,0xbf44655e,0xbf76d589,4 +np.float32,0xbf42968c,0xbf735e78,4 +np.float32,0x80385503,0x80385503,4 +np.float32,0xbea7e3a2,0xbeae2d59,4 +np.float32,0x3dd0b770,0x3dd17131,4 +np.float32,0xbf4bc185,0xbf82b907,4 +np.float32,0xfefd7d64,0xbee05650,4 +np.float32,0xfaac3c00,0xbff23bc9,4 +np.float32,0xbf562f0d,0xbf8dd7f4,4 +np.float32,0x7fa00000,0x7fe00000,4 +np.float32,0x3e01bdb8,0x3e027098,4 +np.float32,0x3e2868ab,0x3e29f19e,4 +np.float32,0xfec55f2e,0x3f39f304,4 +np.float32,0xed4e,0xed4e,4 +np.float32,0x3e2b7330,0x3e2d11fa,4 +np.float32,0x7f738542,0x40cbbe16,4 +np.float32,0x3f123521,0x3f247e71,4 +np.float32,0x73572c,0x73572c,4 +np.float32,0x804936c8,0x804936c8,4 +np.float32,0x803b80d8,0x803b80d8,4 +np.float32,0x7f566c57,0xbee2855a,4 +np.float32,0xff0e3bd8,0xbff0543f,4 +np.float32,0x7d2b2fe7,0xbf94ba4c,4 +np.float32,0xbf0da470,0xbf1e1dc2,4 +np.float32,0xbd276500,0xbd277ce0,4 +np.float32,0xfcd15dc0,0x403ccc2a,4 +np.float32,0x80071e59,0x80071e59,4 +np.float32,0xbe9b0c34,0xbe9ff7be,4 +np.float32,0x3f4f9069,0x3f86ac50,4 +np.float32,0x80042a95,0x80042a95,4 +np.float32,0x7de28e39,0x3bc9b7f4,4 +np.float32,0xbf641935,0xbf9e5af8,4 +np.float32,0x8034f068,0x8034f068,4 +np.float32,0xff33a3d2,0xbf408e75,4 +np.float32,0xbcc51540,0xbcc51efc,4 +np.float32,0xff6d1ddf,0x3ef58f0e,4 +np.float32,0xbf64dfc4,0xbf9f5725,4 +np.float32,0xff068a06,0x3eea8987,4 +np.float32,0xff01c0af,0x3f24cdfe,4 +np.float32,0x3f4def7e,0x3f84f802,4 +np.float32,0xbf1b4ae7,0xbf31a299,4 +np.float32,0x8077df2d,0x8077df2d,4 +np.float32,0x3f0155c5,0x3f0d9785,4 +np.float32,0x5a54b2,0x5a54b2,4 +np.float32,0x7f271f9e,0x3efb2ef3,4 +np.float32,0xbf0ff2ec,0xbf215217,4 +np.float32,0x7f500130,0xbf8a7fdd,4 +np.float32,0xfed9891c,0xbf65c872,4 +np.float32,0xfecbfaae,0x403bdbc2,4 +np.float32,0x3f3a5aba,0x3f642772,4 +np.float32,0x7ebc681e,0xbd8df059,4 +np.float32,0xfe05e400,0xbfe35d74,4 +np.float32,0xbf295ace,0xbf4750ea,4 +np.float32,0x7ea055b2,0x3f62d6be,4 +np.float32,0xbd00b520,0xbd00bff9,4 +np.float32,0xbf7677aa,0xbfb7e8cf,4 +np.float32,0x3e83f788,0x3e86f816,4 +np.float32,0x801f6710,0x801f6710,4 +np.float32,0x801133cc,0x801133cc,4 +np.float32,0x41da2a,0x41da2a,4 +np.float32,0xff1622fd,0x3f023650,4 +np.float32,0x806c7a72,0x806c7a72,4 +np.float32,0x3f10779c,0x3f220bb4,4 +np.float32,0xbf08cf94,0xbf17848d,4 +np.float32,0xbecb55b4,0xbed6bebd,4 +np.float32,0xbf0a1528,0xbf193d7b,4 +np.float32,0x806a16bd,0x806a16bd,4 +np.float32,0xc222a,0xc222a,4 +np.float32,0x3930de,0x3930de,4 +np.float32,0x3f5c3588,0x3f94bca2,4 +np.float32,0x1215ad,0x1215ad,4 +np.float32,0x3ed15030,0x3eddcf67,4 +np.float32,0x7da83b2e,0x3fce0d39,4 +np.float32,0x32b0a8,0x32b0a8,4 +np.float32,0x805aed6b,0x805aed6b,4 +np.float32,0x3ef8e02f,0x3f074346,4 +np.float32,0xbdeb6780,0xbdec7250,4 +np.float32,0x3f6e3cec,0x3fabda61,4 +np.float32,0xfefd467a,0x3ef7821a,4 +np.float32,0xfef090fe,0x3bb752a2,4 +np.float32,0x8019c538,0x8019c538,4 +np.float32,0x3e8cf284,0x3e909e81,4 +np.float32,0xbe6c6618,0xbe70b0a2,4 +np.float32,0x7f50a539,0x3f367be1,4 +np.float32,0x8019fe2f,0x8019fe2f,4 +np.float32,0x800c3f48,0x800c3f48,4 +np.float32,0xfd054cc0,0xc0f52802,4 +np.float32,0x3d0cca20,0x3d0cd853,4 +np.float32,0xbf4a7c44,0xbf816e74,4 +np.float32,0x3f46fc40,0x3f7be153,4 +np.float32,0x807c5849,0x807c5849,4 +np.float32,0xd7e41,0xd7e41,4 +np.float32,0x70589b,0x70589b,4 +np.float32,0x80357b95,0x80357b95,4 +np.float32,0x3de239f0,0x3de326a5,4 +np.float32,0x800b08e3,0x800b08e3,4 +np.float32,0x807ec946,0x807ec946,4 +np.float32,0x3e2e4b83,0x3e2fff76,4 +np.float32,0x3f198e0f,0x3f2f12a6,4 +np.float32,0xbecb1aca,0xbed67979,4 +np.float32,0x80134082,0x80134082,4 +np.float32,0x3f3a269f,0x3f63ca05,4 +np.float32,0x3f1381e4,0x3f265622,4 +np.float32,0xff293080,0xbf10be6f,4 +np.float32,0xff800000,0xffc00000,4 +np.float32,0x37d196,0x37d196,4 +np.float32,0x7e57eea7,0x3e7d8138,4 +np.float32,0x804b1dae,0x804b1dae,4 +np.float32,0x7d9508f9,0xc1075b35,4 +np.float32,0x3f7bf468,0x3fc095e0,4 +np.float32,0x55472c,0x55472c,4 +np.float32,0x3ecdcd86,0x3ed9a738,4 +np.float32,0x3ed9be0f,0x3ee7e4e9,4 +np.float32,0x3e7e0ddb,0x3e81b2fe,4 +np.float32,0x7ee6c1d3,0x3f850634,4 +np.float32,0x800f6fad,0x800f6fad,4 +np.float32,0xfefb3bd6,0xbff68ecc,4 +np.float32,0x8013d6e2,0x8013d6e2,4 +np.float32,0x3f3a2cb6,0x3f63d4ee,4 +np.float32,0xff383c84,0x3e7854bb,4 +np.float32,0x3f21946e,0x3f3b1cea,4 +np.float32,0xff322ea2,0x3fb22f31,4 +np.float32,0x8065a024,0x8065a024,4 +np.float32,0x7f395e30,0xbefe0de1,4 +np.float32,0x5b52db,0x5b52db,4 +np.float32,0x7f7caea7,0x3dac8ded,4 +np.float32,0xbf0431f8,0xbf1159b2,4 +np.float32,0x7f15b25b,0xc02a3833,4 +np.float32,0x80131abc,0x80131abc,4 +np.float32,0x7e829d81,0xbeb2e93d,4 +np.float32,0x3f2c64d7,0x3f4c3e4d,4 +np.float32,0x7f228d48,0xc1518c74,4 +np.float32,0xfc3c6f40,0xbf00d585,4 +np.float32,0x7f754f0f,0x3e2152f5,4 +np.float32,0xff65d32b,0xbe8bd56c,4 +np.float32,0xfea6b8c0,0x41608655,4 +np.float32,0x3f7d4b05,0x3fc2c96a,4 +np.float32,0x3f463230,0x3f7a54da,4 +np.float32,0x805117bb,0x805117bb,4 +np.float32,0xbf2ad4f7,0xbf49b30e,4 +np.float32,0x3eaa01ff,0x3eb08b56,4 +np.float32,0xff7a02bb,0x3f095f73,4 +np.float32,0x759176,0x759176,4 +np.float32,0x803c18d5,0x803c18d5,4 +np.float32,0xbe0722d8,0xbe07ed16,4 +np.float32,0x3f4b4a99,0x3f823fc6,4 +np.float32,0x3f7d0451,0x3fc25463,4 +np.float32,0xfee31e40,0xbfb41091,4 +np.float32,0xbf733d2c,0xbfb30cf1,4 +np.float32,0x7ed81015,0x417c380c,4 +np.float32,0x7daafc3e,0xbe2a37ed,4 +np.float32,0x3e44f82b,0x3e476f67,4 +np.float32,0x7c8d99,0x7c8d99,4 +np.float32,0x3f7aec5a,0x3fbee991,4 +np.float32,0xff09fd55,0x3e0709d3,4 +np.float32,0xff4ba4df,0x4173c01f,4 +np.float32,0x3f43d944,0x3f75c7bd,4 +np.float32,0xff6a9106,0x40a10eff,4 +np.float32,0x3bc8341c,0x3bc834bf,4 +np.float32,0x3eea82,0x3eea82,4 +np.float32,0xfea36a3c,0x435729b2,4 +np.float32,0x7dcc1fb0,0x3e330053,4 +np.float32,0x3f616ae6,0x3f9b01ae,4 +np.float32,0x8030963f,0x8030963f,4 +np.float32,0x10d1e2,0x10d1e2,4 +np.float32,0xfeb9a8a6,0x40e6daac,4 +np.float32,0xbe1aba00,0xbe1bea3a,4 +np.float32,0x3cb6b4ea,0x3cb6bcac,4 +np.float32,0x3d8b0b64,0x3d8b422f,4 +np.float32,0x7b6894,0x7b6894,4 +np.float32,0x3e89dcde,0x3e8d4b4b,4 +np.float32,0x3f12b952,0x3f253974,4 +np.float32,0x1c316c,0x1c316c,4 +np.float32,0x7e2da535,0x3f95fe6b,4 +np.float32,0x3ae9a494,0x3ae9a4a4,4 +np.float32,0xbc5f5500,0xbc5f588b,4 +np.float32,0x3e7850fc,0x3e7d4d0e,4 +np.float32,0xbf800000,0xbfc75923,4 +np.float32,0x3e652d69,0x3e691502,4 +np.float32,0xbf6bdd26,0xbfa89129,4 +np.float32,0x3f441cfc,0x3f764a02,4 +np.float32,0x7f5445ff,0xc0906191,4 +np.float32,0x807b2ee3,0x807b2ee3,4 +np.float32,0xbeb6cab8,0xbebef9c0,4 +np.float32,0xff737277,0xbf327011,4 +np.float32,0xfc832aa0,0x402fd52e,4 +np.float32,0xbf0c7538,0xbf1c7c0f,4 +np.float32,0x7e1301c7,0xbf0ee63e,4 +np.float64,0xbfe0ef7df7a1defc,0xbfe2b76a8d8aeb35,1 +np.float64,0x7fdd9c2eae3b385c,0xbfc00d6885485039,1 +np.float64,0xbfb484c710290990,0xbfb4900e0a527555,1 +np.float64,0x7fe73e5d6cee7cba,0x3fefbf70a56b60d3,1 +np.float64,0x800a110aa8d42216,0x800a110aa8d42216,1 +np.float64,0xffedd4f3f3bba9e7,0xbff076f8c4124919,1 +np.float64,0x800093407f812682,0x800093407f812682,1 +np.float64,0x800a23150e54462a,0x800a23150e54462a,1 +np.float64,0xbfb1076864220ed0,0xbfb10dd95a74b733,1 +np.float64,0x3fed1f8b37fa3f16,0x3ff496100985211f,1 +np.float64,0x3fdf762f84beec5f,0x3fe1223eb04a17e0,1 +np.float64,0x53fd4e0aa7faa,0x53fd4e0aa7faa,1 +np.float64,0x3fdbd283bdb7a507,0x3fddb7ec9856a546,1 +np.float64,0xbfe43f449d687e89,0xbfe77724a0d3072b,1 +np.float64,0x618b73bcc316f,0x618b73bcc316f,1 +np.float64,0x67759424ceeb3,0x67759424ceeb3,1 +np.float64,0xbfe4b6f7d9a96df0,0xbfe831371f3bd7a8,1 +np.float64,0x800a531b8b74a637,0x800a531b8b74a637,1 +np.float64,0xffeeffd5c37dffab,0x3fea140cbc2c3726,1 +np.float64,0x3fe648e2002c91c4,0x3feac1b8816f972a,1 +np.float64,0x800f16242a1e2c48,0x800f16242a1e2c48,1 +np.float64,0xffeeff8e1dbdff1b,0xc000b555f117dce7,1 +np.float64,0x3fdf1cf73fbe39f0,0x3fe0e9032401135b,1 +np.float64,0x7fe19c388b633870,0x3fd5271b69317d5b,1 +np.float64,0x918f226d231e5,0x918f226d231e5,1 +np.float64,0x4cc19ab499834,0x4cc19ab499834,1 +np.float64,0xbd3121d57a624,0xbd3121d57a624,1 +np.float64,0xbfd145d334a28ba6,0xbfd1b468866124d6,1 +np.float64,0x8bdbf41517b7f,0x8bdbf41517b7f,1 +np.float64,0x3fd1b8cb3ea37198,0x3fd2306b13396cae,1 +np.float64,0xbfd632a959ac6552,0xbfd7220fcfb5ef78,1 +np.float64,0x1cdaafc639b57,0x1cdaafc639b57,1 +np.float64,0x3febdcce1577b99c,0x3ff2fe076195a2bc,1 +np.float64,0x7fca6e945934dd28,0x3ff43040df7024e8,1 +np.float64,0x3fbe08e78e3c11cf,0x3fbe2c60e6b48f75,1 +np.float64,0x7fc1ed0d0523da19,0x3ff55f8dcad9440f,1 +np.float64,0xbfdc729b8cb8e538,0xbfde7b6e15dd60c4,1 +np.float64,0x3fd219404f243281,0x3fd298d7b3546531,1 +np.float64,0x3fe715c3f56e2b88,0x3fec255b5a59456e,1 +np.float64,0x7fe8b88e74b1711c,0x3ff60efd2c81d13d,1 +np.float64,0xa1d2b9fd43a57,0xa1d2b9fd43a57,1 +np.float64,0xffc1818223230304,0xbfb85c6c1e8018e7,1 +np.float64,0x3fde38ac8b3c7159,0x3fe0580c7e228576,1 +np.float64,0x8008faf7b491f5f0,0x8008faf7b491f5f0,1 +np.float64,0xffe7a1d751af43ae,0xbf7114cd7bbcd981,1 +np.float64,0xffec2db1b4b85b62,0xbff5cae759667f83,1 +np.float64,0x7fefce1ae27f9c35,0x3ff4b8b88f4876cf,1 +np.float64,0x7fd1ff56a523feac,0xbff342ce192f14dd,1 +np.float64,0x80026b3e3f84d67d,0x80026b3e3f84d67d,1 +np.float64,0xffedee5879bbdcb0,0xc02fae11508b2be0,1 +np.float64,0x8003c0dc822781ba,0x8003c0dc822781ba,1 +np.float64,0xffe38a79eca714f4,0xc008aa23b7a63980,1 +np.float64,0xbfda70411eb4e082,0xbfdc0d7e29c89010,1 +np.float64,0x800a5e34f574bc6a,0x800a5e34f574bc6a,1 +np.float64,0x3fc19fac6e233f59,0x3fc1bc66ac0d73d4,1 +np.float64,0x3a8a61ea7514d,0x3a8a61ea7514d,1 +np.float64,0x3fb57b536e2af6a0,0x3fb588451f72f44c,1 +np.float64,0x7fd68c6d082d18d9,0xc032ac926b665c9a,1 +np.float64,0xd5b87cfdab710,0xd5b87cfdab710,1 +np.float64,0xfe80b20bfd017,0xfe80b20bfd017,1 +np.float64,0x3fef8781e37f0f04,0x3ff8215fe2c1315a,1 +np.float64,0xffedddbb9c3bbb76,0x3fd959b82258a32a,1 +np.float64,0x3fc7d41f382fa83e,0x3fc81b94c3a091ba,1 +np.float64,0xffc3275dcf264ebc,0x3fb2b3d4985c6078,1 +np.float64,0x7fe34d2b7ba69a56,0x40001f3618e3c7c9,1 +np.float64,0x3fd64ae35fac95c7,0x3fd73d77e0b730f8,1 +np.float64,0x800e53bf6b3ca77f,0x800e53bf6b3ca77f,1 +np.float64,0xbfddf7c9083bef92,0xbfe02f392744d2d1,1 +np.float64,0x1c237cc038471,0x1c237cc038471,1 +np.float64,0x3fe4172beea82e58,0x3fe739b4bf16bc7e,1 +np.float64,0xfa950523f52a1,0xfa950523f52a1,1 +np.float64,0xffc839a2c5307344,0xbff70ff8a3c9247f,1 +np.float64,0x264f828c4c9f1,0x264f828c4c9f1,1 +np.float64,0x148a650a2914e,0x148a650a2914e,1 +np.float64,0x3fe8d255c0b1a4ac,0x3fef623c3ea8d6e3,1 +np.float64,0x800f4fbb28be9f76,0x800f4fbb28be9f76,1 +np.float64,0x7fdca57bcfb94af7,0x3ff51207563fb6cb,1 +np.float64,0x3fe4944107692882,0x3fe7fad593235364,1 +np.float64,0x800119b4f1a2336b,0x800119b4f1a2336b,1 +np.float64,0xbfe734075e6e680e,0xbfec5b35381069f2,1 +np.float64,0xffeb3c00db767801,0xbfbbd7d22df7b4b3,1 +np.float64,0xbfe95c658cb2b8cb,0xbff03ad5e0bc888a,1 +np.float64,0xffeefeb58fbdfd6a,0xbfd5c9264deb0e11,1 +np.float64,0x7fccc80fde39901f,0xc012c60f914f3ca2,1 +np.float64,0x3fe5da289c2bb451,0x3fea07ad00a0ca63,1 +np.float64,0x800e364b0a5c6c96,0x800e364b0a5c6c96,1 +np.float64,0x3fcf9ea7d23f3d50,0x3fd023b72e8c9dcf,1 +np.float64,0x800a475cfc948eba,0x800a475cfc948eba,1 +np.float64,0xffd4e0d757a9c1ae,0xbfa89d573352e011,1 +np.float64,0xbfd4dbec8229b7da,0xbfd5a165f12c7c40,1 +np.float64,0xffe307ab51260f56,0x3fe6b1639da58c3f,1 +np.float64,0xbfe6955a546d2ab4,0xbfeb44ae2183fee9,1 +np.float64,0xbfca1f18f5343e30,0xbfca7d804ccccdf4,1 +np.float64,0xe9f4dfebd3e9c,0xe9f4dfebd3e9c,1 +np.float64,0xfff0000000000000,0xfff8000000000000,1 +np.float64,0x8008e69c0fb1cd38,0x8008e69c0fb1cd38,1 +np.float64,0xbfead1ccf975a39a,0xbff1c84b3db8ca93,1 +np.float64,0x25a982424b531,0x25a982424b531,1 +np.float64,0x8010000000000000,0x8010000000000000,1 +np.float64,0x80056204ea0ac40b,0x80056204ea0ac40b,1 +np.float64,0x800d1442d07a2886,0x800d1442d07a2886,1 +np.float64,0xbfaef3dadc3de7b0,0xbfaefd85ae6205f0,1 +np.float64,0x7fe969ce4b32d39c,0xbff3c4364fc6778f,1 +np.float64,0x7fe418bac0a83175,0x402167d16b1efe0b,1 +np.float64,0x3fd7c82a25af9054,0x3fd8f0c701315672,1 +np.float64,0x80013782a7826f06,0x80013782a7826f06,1 +np.float64,0x7fc031c7ee20638f,0x400747ab705e6904,1 +np.float64,0x3fe8cf327ff19e65,0x3fef5c14f8aafa89,1 +np.float64,0xbfe331a416a66348,0xbfe5e2290a098dd4,1 +np.float64,0x800607b2116c0f65,0x800607b2116c0f65,1 +np.float64,0x7fb40448f0280891,0xbfd43d4f0ffa1d64,1 +np.float64,0x7fefffffffffffff,0xbf74530cfe729484,1 +np.float64,0x3fe39b5444a736a9,0x3fe67eaa0b6acf27,1 +np.float64,0x3fee4733c4fc8e68,0x3ff631eabeef9696,1 +np.float64,0xbfec840f3b79081e,0xbff3cc8563ab2e74,1 +np.float64,0xbfc8f6854c31ed0c,0xbfc948caacb3bba0,1 +np.float64,0xffbcf754a639eea8,0xbfc88d17cad3992b,1 +np.float64,0x8000bd3163417a64,0x8000bd3163417a64,1 +np.float64,0x3fe766d0eaeecda2,0x3fecb660882f7024,1 +np.float64,0xb6cc30156d986,0xb6cc30156d986,1 +np.float64,0xffc0161f9f202c40,0x3fe19bdefe5cf8b1,1 +np.float64,0xffe1e462caa3c8c5,0x3fe392c47feea17b,1 +np.float64,0x30a36a566146e,0x30a36a566146e,1 +np.float64,0x3fa996f580332deb,0x3fa99c6b4f2abebe,1 +np.float64,0x3fba71716e34e2e0,0x3fba899f35edba1d,1 +np.float64,0xbfe8f7e5e971efcc,0xbfefac431a0e3d55,1 +np.float64,0xf48f1803e91e3,0xf48f1803e91e3,1 +np.float64,0x7fe3edc0a127db80,0xc03d1a579a5d74a8,1 +np.float64,0xffeba82056375040,0x3fdfd701308700db,1 +np.float64,0xbfeb5a924cf6b524,0xbff2640de7cd107f,1 +np.float64,0xfa4cd1a9f499a,0xfa4cd1a9f499a,1 +np.float64,0x800de1be7b9bc37d,0x800de1be7b9bc37d,1 +np.float64,0xffd44e56ad289cae,0x3fdf4b8085db9b67,1 +np.float64,0xbfe4fb3aea69f676,0xbfe89d2cc46fcc50,1 +np.float64,0xbfe596495d6b2c92,0xbfe997a589a1f632,1 +np.float64,0x6f55a2b8deab5,0x6f55a2b8deab5,1 +np.float64,0x7fe72dc4712e5b88,0x4039c4586b28c2bc,1 +np.float64,0x89348bd712692,0x89348bd712692,1 +np.float64,0xffe062156120c42a,0x4005f0580973bc77,1 +np.float64,0xbfeabc714d7578e2,0xbff1b07e2fa57dc0,1 +np.float64,0x8003a56b3e874ad7,0x8003a56b3e874ad7,1 +np.float64,0x800eeadfb85dd5c0,0x800eeadfb85dd5c0,1 +np.float64,0x46d77a4c8daf0,0x46d77a4c8daf0,1 +np.float64,0x8000c06e7dc180de,0x8000c06e7dc180de,1 +np.float64,0x3fe428d211e851a4,0x3fe754b1c00a89bc,1 +np.float64,0xc5be11818b7c2,0xc5be11818b7c2,1 +np.float64,0x7fefc244893f8488,0x401133dc54f52de5,1 +np.float64,0x3fde30eee93c61de,0x3fe0532b827543a6,1 +np.float64,0xbfd447f48b288fea,0xbfd4fd0654f90718,1 +np.float64,0xbfde98dc7b3d31b8,0xbfe094df12f84a06,1 +np.float64,0x3fed2c1a1dfa5834,0x3ff4a6c4f3470a65,1 +np.float64,0xbfe992165073242d,0xbff071ab039c9177,1 +np.float64,0x3fd0145d1b2028ba,0x3fd06d3867b703dc,1 +np.float64,0x3fe179457362f28b,0x3fe3722f1d045fda,1 +np.float64,0x800e28964fbc512d,0x800e28964fbc512d,1 +np.float64,0x8004a5d785294bb0,0x8004a5d785294bb0,1 +np.float64,0xbfd652f2272ca5e4,0xbfd7469713125120,1 +np.float64,0x7fe61f49036c3e91,0xbf9b6ccdf2d87e70,1 +np.float64,0xffb7d47dd02fa8f8,0xc004449a82320b13,1 +np.float64,0x3feb82f996b705f3,0x3ff29336c738a4c5,1 +np.float64,0x3fbb7fceea36ffa0,0x3fbb9b02c8ad7f93,1 +np.float64,0x80004519fb208a35,0x80004519fb208a35,1 +np.float64,0xbfe0539114e0a722,0xbfe1e86dc5aa039c,1 +np.float64,0x0,0x0,1 +np.float64,0xbfe99d1125f33a22,0xbff07cf8ec04300f,1 +np.float64,0xffd4fbeecc29f7de,0x3ffab76775a8455f,1 +np.float64,0xbfbf1c618e3e38c0,0xbfbf43d2764a8333,1 +np.float64,0x800cae02a9d95c06,0x800cae02a9d95c06,1 +np.float64,0x3febc47d3bf788fa,0x3ff2e0d7cf8ef509,1 +np.float64,0x3fef838f767f071f,0x3ff81aeac309bca0,1 +np.float64,0xbfd5e70716abce0e,0xbfd6ccb033ef7a35,1 +np.float64,0x3f9116fa60222df5,0x3f9117625f008e0b,1 +np.float64,0xffe02b1e5f20563c,0xbfe6b2ec293520b7,1 +np.float64,0xbf9b5aec3036b5e0,0xbf9b5c96c4c7f951,1 +np.float64,0xfdb0169bfb603,0xfdb0169bfb603,1 +np.float64,0x7fcdd1d51c3ba3a9,0x401f0e12fa0b7570,1 +np.float64,0xbfd088103fa11020,0xbfd0e8c4a333ffb2,1 +np.float64,0x3fe22df82ee45bf0,0x3fe46d03a7c14de2,1 +np.float64,0xbfd57b0c28aaf618,0xbfd65349a6191de5,1 +np.float64,0x3fe0a42f50a1485f,0x3fe252e26775d9a4,1 +np.float64,0x800fab4e363f569c,0x800fab4e363f569c,1 +np.float64,0xffe9f0ed63f3e1da,0xbfe278c341b171d5,1 +np.float64,0x7fe26c244664d848,0xbfb325269dad1996,1 +np.float64,0xffe830410bf06081,0xc00181a39f606e96,1 +np.float64,0x800c548a0c78a914,0x800c548a0c78a914,1 +np.float64,0x800f94761ebf28ec,0x800f94761ebf28ec,1 +np.float64,0x3fe5984845eb3091,0x3fe99aeb653c666d,1 +np.float64,0x7fe93e5bf8f27cb7,0xc010d159fa27396a,1 +np.float64,0xffefffffffffffff,0x3f74530cfe729484,1 +np.float64,0x4c83f1269907f,0x4c83f1269907f,1 +np.float64,0x3fde0065a8bc00cc,0x3fe034a1cdf026d4,1 +np.float64,0x800743810d6e8703,0x800743810d6e8703,1 +np.float64,0x80040662d5280cc6,0x80040662d5280cc6,1 +np.float64,0x3fed20b2c5ba4166,0x3ff497988519d7aa,1 +np.float64,0xffe8fa15e5f1f42b,0x3fff82ca76d797b4,1 +np.float64,0xbb72e22f76e5d,0xbb72e22f76e5d,1 +np.float64,0x7fc18ffa7c231ff4,0xbff4b8b4c3315026,1 +np.float64,0xbfe8d1ac44f1a358,0xbfef60efc4f821e3,1 +np.float64,0x3fd38c1fe8271840,0x3fd42dc37ff7262b,1 +np.float64,0xe577bee5caef8,0xe577bee5caef8,1 +np.float64,0xbff0000000000000,0xbff8eb245cbee3a6,1 +np.float64,0xffcb3a9dd436753c,0x3fcd1a3aff1c3fc7,1 +np.float64,0x7fe44bf2172897e3,0x3ff60bfe82a379f4,1 +np.float64,0x8009203823924071,0x8009203823924071,1 +np.float64,0x7fef8e0abc7f1c14,0x3fe90e4962d47ce5,1 +np.float64,0xffda50004434a000,0x3fb50dee03e1418b,1 +np.float64,0x7fe2ff276ea5fe4e,0xc0355b7d2a0a8d9d,1 +np.float64,0x3fd0711ba5a0e238,0x3fd0d03823d2d259,1 +np.float64,0xe7625b03cec4c,0xe7625b03cec4c,1 +np.float64,0xbfd492c8d7a92592,0xbfd55006cde8d300,1 +np.float64,0x8001fee99f23fdd4,0x8001fee99f23fdd4,1 +np.float64,0x7ff4000000000000,0x7ffc000000000000,1 +np.float64,0xfa15df97f42bc,0xfa15df97f42bc,1 +np.float64,0xbfec3fdca9787fb9,0xbff377164b13c7a9,1 +np.float64,0xbcec10e579d82,0xbcec10e579d82,1 +np.float64,0xbfc3b4e2132769c4,0xbfc3dd1fcc7150a6,1 +np.float64,0x80045b149ee8b62a,0x80045b149ee8b62a,1 +np.float64,0xffe044554c2088aa,0xbff741436d558785,1 +np.float64,0xffcc65f09f38cbe0,0xc0172b4adc2d317d,1 +np.float64,0xf68b2d3bed166,0xf68b2d3bed166,1 +np.float64,0x7fc7f44c572fe898,0x3fec69f3b1eca790,1 +np.float64,0x3fac51f61438a3ec,0x3fac595d34156002,1 +np.float64,0xbfeaa9f256f553e5,0xbff19bfdf5984326,1 +np.float64,0x800e4742149c8e84,0x800e4742149c8e84,1 +np.float64,0xbfc493df132927c0,0xbfc4c1ba4268ead9,1 +np.float64,0xbfbf0c56383e18b0,0xbfbf3389fcf50c72,1 +np.float64,0xbf978a0e082f1420,0xbf978b1dd1da3d3c,1 +np.float64,0xbfe04375356086ea,0xbfe1d34c57314dd1,1 +np.float64,0x3feaeeb29b75dd65,0x3ff1e8b772374979,1 +np.float64,0xbfe15e42c3a2bc86,0xbfe34d45d56c5c15,1 +np.float64,0x3fe507429a6a0e85,0x3fe8b058176b3225,1 +np.float64,0x3feee2b26c3dc565,0x3ff71b73203de921,1 +np.float64,0xbfd496577aa92cae,0xbfd553fa7fe15a5f,1 +np.float64,0x7fe2c10953e58212,0x3fc8ead6a0d14bbf,1 +np.float64,0x800035b77aa06b70,0x800035b77aa06b70,1 +np.float64,0x2329201e46525,0x2329201e46525,1 +np.float64,0xbfe6225c9a6c44b9,0xbfea80861590fa02,1 +np.float64,0xbfd6925030ad24a0,0xbfd78e70b1c2215d,1 +np.float64,0xbfd82225c4b0444c,0xbfd958a60f845b39,1 +np.float64,0xbb03d8a17609,0xbb03d8a17609,1 +np.float64,0x7fc33967b12672ce,0x40001e00c9af4002,1 +np.float64,0xff9373c6d026e780,0xbff308654a459d3d,1 +np.float64,0x3feab1f9c5f563f4,0x3ff1a4e0fd2f093d,1 +np.float64,0xbf993ef768327de0,0xbf994046b64e308b,1 +np.float64,0xffb87382fc30e708,0xbfde0accb83c891b,1 +np.float64,0x800bb3a118176743,0x800bb3a118176743,1 +np.float64,0x800c810250d90205,0x800c810250d90205,1 +np.float64,0xbfd2c4eb9ba589d8,0xbfd3539508b4a4a8,1 +np.float64,0xbee1f5437dc3f,0xbee1f5437dc3f,1 +np.float64,0x3fc07aeab520f5d8,0x3fc0926272f9d8e2,1 +np.float64,0xbfe23747a3246e90,0xbfe47a20a6e98687,1 +np.float64,0x3fde1296debc252c,0x3fe0401143ff6b5c,1 +np.float64,0xbfcec8c2f73d9184,0xbfcf644e25ed3b74,1 +np.float64,0xff9314f2c82629e0,0x40559a0f9099dfd1,1 +np.float64,0xbfe27487afa4e910,0xbfe4d0e01200bde6,1 +np.float64,0xffb3d6637627acc8,0x3fe326d4b1e1834f,1 +np.float64,0xffe6f84d642df09a,0x3fc73fa9f57c3acb,1 +np.float64,0xffe67cf76fecf9ee,0xc01cf48c97937ef9,1 +np.float64,0x7fdc73fc12b8e7f7,0xbfcfcecde9331104,1 +np.float64,0xffdcf8789239f0f2,0x3fe345e3b8e28776,1 +np.float64,0x800a70af5314e15f,0x800a70af5314e15f,1 +np.float64,0xffc862300730c460,0x3fc4e9ea813beca7,1 +np.float64,0xbfcc6961bd38d2c4,0xbfcce33bfa6c6bd1,1 +np.float64,0xbfc9b76bbf336ed8,0xbfca117456ac37e5,1 +np.float64,0x7fb86e829430dd04,0x400a5bd7a18e302d,1 +np.float64,0x7fb9813ef833027d,0xbfe5a6494f143625,1 +np.float64,0x8005085e2c2a10bd,0x8005085e2c2a10bd,1 +np.float64,0xffe5af099d6b5e12,0x40369bbe31e03e06,1 +np.float64,0xffde03b1fd3c0764,0x3ff061120aa1f52a,1 +np.float64,0x7fa4eb6cdc29d6d9,0x3fe9defbe9010322,1 +np.float64,0x800803f4b11007ea,0x800803f4b11007ea,1 +np.float64,0x7febd50f6df7aa1e,0xbffcf540ccf220dd,1 +np.float64,0x7fed454f08fa8a9d,0xbffc2a8b81079403,1 +np.float64,0xbfed7e8c69bafd19,0xbff5161e51ba6634,1 +np.float64,0xffef92e78eff25ce,0xbffefeecddae0ad3,1 +np.float64,0x7fe5b9b413ab7367,0xbfc681ba29704176,1 +np.float64,0x29284e805252,0x29284e805252,1 +np.float64,0xffed3955bcfa72ab,0xbfc695acb5f468de,1 +np.float64,0x3fe464ee1ca8c9dc,0x3fe7b140ce50fdca,1 +np.float64,0xffe522ae4bea455c,0x3feb957c146e66ef,1 +np.float64,0x8000000000000000,0x8000000000000000,1 +np.float64,0x3fd0c353a2a186a8,0x3fd1283aaa43a411,1 +np.float64,0x3fdb30a749b6614f,0x3fdcf40df006ed10,1 +np.float64,0x800109213cc21243,0x800109213cc21243,1 +np.float64,0xbfe72aa0c5ee5542,0xbfec4a713f513bc5,1 +np.float64,0x800865344ad0ca69,0x800865344ad0ca69,1 +np.float64,0x7feb7df60eb6fbeb,0x3fb1df06a67aa22f,1 +np.float64,0x3fe83a5dd93074bc,0x3fee3d63cda72636,1 +np.float64,0xbfde70e548bce1ca,0xbfe07b8e19c9dac6,1 +np.float64,0xbfeea38d537d471b,0xbff6bb18c230c0be,1 +np.float64,0x3fefeebbc47fdd78,0x3ff8cdaa53b7c7b4,1 +np.float64,0x7fe6512e20eca25b,0xbff623cee44a22b5,1 +np.float64,0xf8fa5ca3f1f4c,0xf8fa5ca3f1f4c,1 +np.float64,0x7fd12d00ed225a01,0xbfe90d518ea61faf,1 +np.float64,0x80027db43504fb69,0x80027db43504fb69,1 +np.float64,0xffc10a01aa221404,0x3fcc2065b3d0157b,1 +np.float64,0xbfef8286e87f050e,0xbff8193a54449b59,1 +np.float64,0xbfc73178092e62f0,0xbfc7735072ba4593,1 +np.float64,0x3fc859d70630b3ae,0x3fc8a626522af1c0,1 +np.float64,0x3fe4654c4268ca99,0x3fe7b1d2913eda1a,1 +np.float64,0xbfce93cd843d279c,0xbfcf2c2ef16a0957,1 +np.float64,0xffbcaa16d4395430,0xbfd511ced032d784,1 +np.float64,0xbfe91f980e723f30,0xbfeffb39cf8c7746,1 +np.float64,0x800556fb6f0aadf8,0x800556fb6f0aadf8,1 +np.float64,0xffd009cde520139c,0x3fe4fa83b1e93d28,1 +np.float64,0x7febc0675e3780ce,0x3feb53930c004dae,1 +np.float64,0xbfe7f975bdeff2ec,0xbfedc36e6729b010,1 +np.float64,0x45aff57c8b5ff,0x45aff57c8b5ff,1 +np.float64,0xbfec7ebd0138fd7a,0xbff3c5cab680aae0,1 +np.float64,0x8009448003b28900,0x8009448003b28900,1 +np.float64,0x3fca4b992d349732,0x3fcaabebcc86aa9c,1 +np.float64,0x3fca069161340d20,0x3fca63ecc742ff3a,1 +np.float64,0x80063bc80bec7791,0x80063bc80bec7791,1 +np.float64,0xbfe1764bffe2ec98,0xbfe36e1cb30cec94,1 +np.float64,0xffd0dba72f21b74e,0x3fb1834964d57ef6,1 +np.float64,0xbfe31848fc263092,0xbfe5bd066445cbc3,1 +np.float64,0xbfd1fb227323f644,0xbfd278334e27f02d,1 +np.float64,0xffdc59069fb8b20e,0xbfdfc363f559ea2c,1 +np.float64,0x3fdea52a52bd4a55,0x3fe09cada4e5344c,1 +np.float64,0x3f715e55a022bd00,0x3f715e5c72a2809e,1 +np.float64,0x1d1ac6023a35a,0x1d1ac6023a35a,1 +np.float64,0x7feacc71627598e2,0x400486b82121da19,1 +np.float64,0xa0287fa340510,0xa0287fa340510,1 +np.float64,0xffe352c5abe6a58b,0xc002623346060543,1 +np.float64,0x7fed577a23baaef3,0x3fda19bc8fa3b21f,1 +np.float64,0x3fde8dd5263d1baa,0x3fe08de0fedf7029,1 +np.float64,0x3feddd3be2bbba78,0x3ff599b2f3e018cc,1 +np.float64,0xc7a009f58f401,0xc7a009f58f401,1 +np.float64,0xbfef03d5a4fe07ab,0xbff74ee08681f47b,1 +np.float64,0x7fe2cf60eea59ec1,0x3fe905fb44f8cc60,1 +np.float64,0xbfe498fcab6931fa,0xbfe8023a6ff8becf,1 +np.float64,0xbfef7142acfee285,0xbff7fd196133a595,1 +np.float64,0xd214ffdba42a0,0xd214ffdba42a0,1 +np.float64,0x8006de7d78cdbcfc,0x8006de7d78cdbcfc,1 +np.float64,0xb247d34f648fb,0xb247d34f648fb,1 +np.float64,0xbfdd5bece6bab7da,0xbfdf9ba63ca2c5b2,1 +np.float64,0x7fe874650af0e8c9,0x3fe74204e122c10f,1 +np.float64,0x800768c49baed18a,0x800768c49baed18a,1 +np.float64,0x3fb4c0a192298140,0x3fb4cc4c8aa43300,1 +np.float64,0xbfa740531c2e80a0,0xbfa7446b7c74ae8e,1 +np.float64,0x7fe10d6edf221add,0x3fedbcd2eae26657,1 +np.float64,0xbfe9175d0f722eba,0xbfefeaca7f32c6e3,1 +np.float64,0x953e11d32a7c2,0x953e11d32a7c2,1 +np.float64,0x80032df90c465bf3,0x80032df90c465bf3,1 +np.float64,0xffec5b799638b6f2,0xbfe95cd2c69be12c,1 +np.float64,0xffe0c3cfa9a1879f,0x3fe20b99b0c108ce,1 +np.float64,0x3fb610d8e22c21b2,0x3fb61ee0d6c16df8,1 +np.float64,0xffe16bb39962d766,0xc016d370381b6b42,1 +np.float64,0xbfdc72edb238e5dc,0xbfde7bd2de10717a,1 +np.float64,0xffed52dee3baa5bd,0xc01994c08899129a,1 +np.float64,0xffa92aab08325550,0xbff2b881ce363cbd,1 +np.float64,0x7fe028282de0504f,0xc0157ff96c69a9c7,1 +np.float64,0xbfdb2151bf3642a4,0xbfdce196fcc35857,1 +np.float64,0x3fcffbd13c3ff7a2,0x3fd0554b5f0371ac,1 +np.float64,0x800d206bff1a40d8,0x800d206bff1a40d8,1 +np.float64,0x458f818c8b1f1,0x458f818c8b1f1,1 +np.float64,0x800a7b56a234f6ae,0x800a7b56a234f6ae,1 +np.float64,0xffe3d86161e7b0c2,0xbff58d0dbde9f188,1 +np.float64,0xe8ed82e3d1db1,0xe8ed82e3d1db1,1 +np.float64,0x3fe234e0176469c0,0x3fe476bd36b96a75,1 +np.float64,0xbfc7cb9c132f9738,0xbfc812c46e185e0b,1 +np.float64,0xbfeba116c1f7422e,0xbff2b6b7563ad854,1 +np.float64,0x7fe7041de62e083b,0x3f5d2b42aca47274,1 +np.float64,0xbfcf60f4ff3ec1e8,0xbfd002eb83406436,1 +np.float64,0xbfc06067a520c0d0,0xbfc0776e5839ecda,1 +np.float64,0x4384965a87093,0x4384965a87093,1 +np.float64,0xd2ed9d01a5db4,0xd2ed9d01a5db4,1 +np.float64,0x3fbea88cb63d5119,0x3fbece49cc34a379,1 +np.float64,0x3fe7e982ebefd306,0x3feda5bd4c435d43,1 +np.float64,0xffdb60a3e036c148,0xbfcb7ed21e7a8f49,1 +np.float64,0x7fdba9231eb75245,0xbfd750cab1536398,1 +np.float64,0x800d593534dab26b,0x800d593534dab26b,1 +np.float64,0xffdf15fb683e2bf6,0x3fb3aaea23357f06,1 +np.float64,0xbfd6f8a2e5adf146,0xbfd802e509d67c67,1 +np.float64,0x3feeaa31513d5463,0x3ff6c52147dc053c,1 +np.float64,0xf2f6dfd3e5edc,0xf2f6dfd3e5edc,1 +np.float64,0x7fd58d8279ab1b04,0x403243f23d02af2a,1 +np.float64,0x8000000000000001,0x8000000000000001,1 +np.float64,0x3fdffb8e0ebff71c,0x3fe1786cb0a6b0f3,1 +np.float64,0xc999826b93331,0xc999826b93331,1 +np.float64,0xffc4966f19292ce0,0x3ff0836c75c56cc7,1 +np.float64,0x7fef95a4b2ff2b48,0xbfbbe2c27c78154f,1 +np.float64,0xb8f1307f71e26,0xb8f1307f71e26,1 +np.float64,0x3fe807bc7eb00f79,0x3fedde19f2d3c42d,1 +np.float64,0x5e4b6580bc98,0x5e4b6580bc98,1 +np.float64,0xffe19353576326a6,0xc0278c51fee07d36,1 +np.float64,0xbfb0ca6f3e2194e0,0xbfb0d09be673fa72,1 +np.float64,0x3fea724211b4e484,0x3ff15ee06f0a0a13,1 +np.float64,0xbfda21e1c4b443c4,0xbfdbb041f3c86832,1 +np.float64,0x8008082b24901057,0x8008082b24901057,1 +np.float64,0xbfd031aa4ea06354,0xbfd08c77729634bb,1 +np.float64,0xbfc407e153280fc4,0xbfc432275711df5f,1 +np.float64,0xbb4fa4b5769f5,0xbb4fa4b5769f5,1 +np.float64,0x7fed6d1daffada3a,0xc037a14bc7b41fab,1 +np.float64,0xffeee589943dcb12,0x3ff2abfe47037778,1 +np.float64,0x301379d260270,0x301379d260270,1 +np.float64,0xbfec2fefc2b85fe0,0xbff36362c0363e06,1 +np.float64,0xbfe0b1c82e216390,0xbfe264f503f7c22c,1 +np.float64,0xbfea2bce78f4579d,0xbff112d6f07935ea,1 +np.float64,0x18508ef230a13,0x18508ef230a13,1 +np.float64,0x800667a74d6ccf4f,0x800667a74d6ccf4f,1 +np.float64,0x79ce5c8cf39cc,0x79ce5c8cf39cc,1 +np.float64,0x3feda61c8efb4c39,0x3ff54c9ade076f54,1 +np.float64,0x3fe27e06b0e4fc0d,0x3fe4de665c1dc3ca,1 +np.float64,0xbfd15fea2722bfd4,0xbfd1d081c55813b0,1 +np.float64,0xbfe5222c4cea4458,0xbfe8db62deb7d2ad,1 +np.float64,0xbfe8a16c33b142d8,0xbfef02d5831592a8,1 +np.float64,0x3fdb60e7c4b6c1d0,0x3fdd2e4265c4c3b6,1 +np.float64,0x800076d62b60edad,0x800076d62b60edad,1 +np.float64,0xbfec8f1527791e2a,0xbff3da7ed3641e8d,1 +np.float64,0x2af03bfe55e08,0x2af03bfe55e08,1 +np.float64,0xa862ee0950c5e,0xa862ee0950c5e,1 +np.float64,0x7fea5a7c1eb4b4f7,0xbffa6f07d28ef211,1 +np.float64,0x90e118fb21c23,0x90e118fb21c23,1 +np.float64,0xbfead0721bf5a0e4,0xbff1c6c7a771a128,1 +np.float64,0x3f63f4a4c027e94a,0x3f63f4a75665da67,1 +np.float64,0x3fece0efa579c1e0,0x3ff443bec52f021e,1 +np.float64,0xbfdbe743b737ce88,0xbfddd129bff89c15,1 +np.float64,0x3fd48c9b8fa91938,0x3fd5492a630a8cb5,1 +np.float64,0x3ff0000000000000,0x3ff8eb245cbee3a6,1 +np.float64,0xbfd51ea33baa3d46,0xbfd5ebd5dc710204,1 +np.float64,0x3fcfbab0183f7560,0x3fd032a054580b00,1 +np.float64,0x8007abce13cf579d,0x8007abce13cf579d,1 +np.float64,0xbfef0f4723be1e8e,0xbff760c7008e8913,1 +np.float64,0x8006340f524c681f,0x8006340f524c681f,1 +np.float64,0x87b7d7010f71,0x87b7d7010f71,1 +np.float64,0x3fe9422da9b2845b,0x3ff02052e6148c45,1 +np.float64,0x7fddd259b93ba4b2,0xc000731aa33d84b6,1 +np.float64,0x3fe0156d12202ada,0x3fe1972ba309cb29,1 +np.float64,0x8004f1264b89e24d,0x8004f1264b89e24d,1 +np.float64,0x3fececdcacb9d9b9,0x3ff4534d5861f731,1 +np.float64,0x3fd1790ab822f215,0x3fd1eb97b1bb6fb4,1 +np.float64,0xffce5d11863cba24,0xbfcb4f38c17210da,1 +np.float64,0x800a30c32a546187,0x800a30c32a546187,1 +np.float64,0x3fa58cc61c2b198c,0x3fa59008add7233e,1 +np.float64,0xbfe0ac77d62158f0,0xbfe25de3dba0bc4a,1 +np.float64,0xeb8c5753d718b,0xeb8c5753d718b,1 +np.float64,0x3fee5438dafca872,0x3ff644fef7e7adb5,1 +np.float64,0x3faad1eb2c35a3e0,0x3faad83499f94057,1 +np.float64,0x3fe39152c46722a6,0x3fe66fba0b96ab6e,1 +np.float64,0xffd6fd17712dfa2e,0xc010d697d1ab8731,1 +np.float64,0x5214a888a4296,0x5214a888a4296,1 +np.float64,0x8000127a5da024f5,0x8000127a5da024f5,1 +np.float64,0x7feb3a366cb6746c,0x3fbe49bd8d5f213a,1 +np.float64,0xca479501948f3,0xca479501948f3,1 +np.float64,0x7fe7c799ce6f8f33,0xbfd796cd98dc620c,1 +np.float64,0xffe20bcf30a4179e,0xbff8ca5453fa088f,1 +np.float64,0x3fe624638a6c48c7,0x3fea83f123832c3c,1 +np.float64,0xbfe5f1377c6be26f,0xbfea2e143a2d522c,1 +np.float64,0x7fd193f9f8a327f3,0xbfb04ee2602574d4,1 +np.float64,0xbfe7419d2fee833a,0xbfec737f140d363d,1 +np.float64,0x1,0x1,1 +np.float64,0x7fe2ac246c655848,0x3fd14fee3237727a,1 +np.float64,0xa459b42948b37,0xa459b42948b37,1 +np.float64,0x3fb26155ae24c2ab,0x3fb2696fc446d4c6,1 +np.float64,0xbfdd7b332e3af666,0xbfdfc296c21f1aa8,1 +np.float64,0xbfe00dbda4a01b7c,0xbfe18d2b060f0506,1 +np.float64,0x8003bb22d3e77646,0x8003bb22d3e77646,1 +np.float64,0x3fee21b0a57c4361,0x3ff5fb6a21dc911c,1 +np.float64,0x80ca69270194d,0x80ca69270194d,1 +np.float64,0xbfd6d80350adb006,0xbfd7ddb501edbde0,1 +np.float64,0xd2f8b801a5f2,0xd2f8b801a5f2,1 +np.float64,0xbfe856b3f170ad68,0xbfee7334fdc49296,1 +np.float64,0x3fed5c1b20bab836,0x3ff4e73ee5d5c7f3,1 +np.float64,0xbfd58085a5ab010c,0xbfd6596ddc381ffa,1 +np.float64,0x3fe4f0134b29e027,0x3fe88b70602fbd21,1 +np.float64,0xffc9098fdc321320,0x4011c334a74a92cf,1 +np.float64,0x794749bef28ea,0x794749bef28ea,1 +np.float64,0xbfc86b547f30d6a8,0xbfc8b84a4fafe0af,1 +np.float64,0x7fe1356b9da26ad6,0x3fd270bca208d899,1 +np.float64,0x7fca0ef1aa341de2,0xbff851044c0734fa,1 +np.float64,0x80064cb8b62c9972,0x80064cb8b62c9972,1 +np.float64,0xffd3a09a83a74136,0x3ffb66dae0accdf5,1 +np.float64,0x800e301aa15c6035,0x800e301aa15c6035,1 +np.float64,0x800e51f323bca3e6,0x800e51f323bca3e6,1 +np.float64,0x7ff0000000000000,0xfff8000000000000,1 +np.float64,0x800c4278c87884f2,0x800c4278c87884f2,1 +np.float64,0xbfe8481649f0902c,0xbfee576772695096,1 +np.float64,0xffe2344e3fa4689c,0x3fb10442ec0888de,1 +np.float64,0xbfeada313d75b462,0xbff1d1aee3fab3a9,1 +np.float64,0x8009ddfb1333bbf7,0x8009ddfb1333bbf7,1 +np.float64,0x7fed3314c93a6629,0x3ff7a9b12dc1cd37,1 +np.float64,0x3fd55c26da2ab84e,0x3fd630a7b8aac78a,1 +np.float64,0x800cdb5203f9b6a4,0x800cdb5203f9b6a4,1 +np.float64,0xffd04a875da0950e,0x4009a13810ab121d,1 +np.float64,0x800f1acb527e3597,0x800f1acb527e3597,1 +np.float64,0xbf9519bf282a3380,0xbf951a82e9b955ff,1 +np.float64,0x3fcd7a42fa3af486,0x3fce028f3c51072d,1 +np.float64,0xbfdd3e21b73a7c44,0xbfdf769f2ff2480b,1 +np.float64,0xffd4361e2aa86c3c,0xbfc211ce8e9f792c,1 +np.float64,0x7fccf97f6939f2fe,0xbff8464bad830f06,1 +np.float64,0x800ce47fb939c900,0x800ce47fb939c900,1 +np.float64,0xffe9e51df173ca3b,0xbfceaf990d652c4e,1 +np.float64,0x3fe05bba5b20b775,0x3fe1f326e4455442,1 +np.float64,0x800a29b4b134536a,0x800a29b4b134536a,1 +np.float64,0xe6f794b7cdef3,0xe6f794b7cdef3,1 +np.float64,0xffb5b688ce2b6d10,0x3ff924bb97ae2f6d,1 +np.float64,0x7fa74105d82e820b,0x3fd49643aaa9eee4,1 +np.float64,0x80020d15f7a41a2d,0x80020d15f7a41a2d,1 +np.float64,0x3fd6a983d5ad5308,0x3fd7a8cc8835b5b8,1 +np.float64,0x7fcd9798f03b2f31,0x3fc534c2f7bf4721,1 +np.float64,0xffdd31873a3a630e,0xbfe3171fcdffb3f7,1 +np.float64,0x80075183234ea307,0x80075183234ea307,1 +np.float64,0x82f3132505e63,0x82f3132505e63,1 +np.float64,0x3febfd9cb837fb39,0x3ff325bbf812515d,1 +np.float64,0xbfb4630fda28c620,0xbfb46e1f802ec278,1 +np.float64,0x3feeed7c89fddafa,0x3ff72c20ce5a9ee4,1 +np.float64,0x7fd3dcb3c127b967,0x40123d27ec9ec31d,1 +np.float64,0xbfe923450c72468a,0xbff00149c5742725,1 +np.float64,0x7fdef7f91abdeff1,0xbfe02ceb21f7923d,1 +np.float64,0x7fdd70d28fbae1a4,0xbfefcc5c9d10cdfd,1 +np.float64,0x800ca445a8d9488c,0x800ca445a8d9488c,1 +np.float64,0x7fec2754e1f84ea9,0x40173f6c1c97f825,1 +np.float64,0x7fcbca31f7379463,0x401e26bd2667075b,1 +np.float64,0x8003fa1d0847f43b,0x8003fa1d0847f43b,1 +np.float64,0xffe95cf85932b9f0,0xc01308e60278aa11,1 +np.float64,0x8009c53948f38a73,0x8009c53948f38a73,1 +np.float64,0x3fdcca9226b99524,0x3fdee7a008f75d41,1 +np.float64,0xbfe9ee241f33dc48,0xbff0d16bfff6c8e9,1 +np.float64,0xbfb3365058266ca0,0xbfb33f9176ebb51d,1 +np.float64,0x7fa98e10f4331c21,0x3fdee04ffd31314e,1 +np.float64,0xbfe1a11aea634236,0xbfe3a8e3d84fda38,1 +np.float64,0xbfd8df051131be0a,0xbfda342805d1948b,1 +np.float64,0x3d49a2407a935,0x3d49a2407a935,1 +np.float64,0xfc51eefff8a3e,0xfc51eefff8a3e,1 +np.float64,0xda63950bb4c73,0xda63950bb4c73,1 +np.float64,0x80050f3d4fea1e7b,0x80050f3d4fea1e7b,1 +np.float64,0x3fcdbd6e453b7ae0,0x3fce497478c28e77,1 +np.float64,0x7ebd4932fd7aa,0x7ebd4932fd7aa,1 +np.float64,0x7fa3904eac27209c,0xc0015f3125efc151,1 +np.float64,0x7fc59f956b2b3f2a,0xc00c012e7a2c281f,1 +np.float64,0xbfd436d716a86dae,0xbfd4ea13533a942b,1 +np.float64,0x9347ae3d268f6,0x9347ae3d268f6,1 +np.float64,0xffd001764d2002ec,0xbffab3462e515623,1 +np.float64,0x3fe6f406662de80d,0x3febe9bac3954999,1 +np.float64,0x3f943ecaf8287d96,0x3f943f77dee5e77f,1 +np.float64,0x3fd6250efcac4a1c,0x3fd712afa947d56f,1 +np.float64,0xbfe849ff777093ff,0xbfee5b089d03391f,1 +np.float64,0xffd3b8ef8f2771e0,0x4000463ff7f29214,1 +np.float64,0xbfc3bae9252775d4,0xbfc3e34c133f1933,1 +np.float64,0xbfea93943df52728,0xbff18355e4fc341d,1 +np.float64,0x3fc4d922ad29b245,0x3fc508d66869ef29,1 +np.float64,0x4329694a8652e,0x4329694a8652e,1 +np.float64,0x8834f1a71069e,0x8834f1a71069e,1 +np.float64,0xe0e5be8dc1cb8,0xe0e5be8dc1cb8,1 +np.float64,0x7fef4d103afe9a1f,0xc0047b88b94554fe,1 +np.float64,0x3fe9b57af4f36af6,0x3ff0963831d51c3f,1 +np.float64,0x3fe081e2fa6103c6,0x3fe22572e41be655,1 +np.float64,0x3fd78cf7b42f19ef,0x3fd8acafa1ad776a,1 +np.float64,0x7fbffd58d43ffab1,0x3fb16092c7de6036,1 +np.float64,0xbfe1e8bfae23d180,0xbfe40c1c6277dd52,1 +np.float64,0x800a9f59fb153eb4,0x800a9f59fb153eb4,1 +np.float64,0xffebe14e33b7c29c,0x3fe0ec532f4deedd,1 +np.float64,0xffc36ca00426d940,0xc000806a712d6e83,1 +np.float64,0xbfcc2be82d3857d0,0xbfcca2a7d372ec64,1 +np.float64,0x800c03b908780772,0x800c03b908780772,1 +np.float64,0xf315a64be62b5,0xf315a64be62b5,1 +np.float64,0xbfe644043cec8808,0xbfeab974d3dc6d80,1 +np.float64,0x3fedb7de3cbb6fbc,0x3ff56549a5acd324,1 +np.float64,0xbfb1a875522350e8,0xbfb1afa41dee338d,1 +np.float64,0xffee8d4a407d1a94,0x3fead1749a636ff6,1 +np.float64,0x8004061c13080c39,0x8004061c13080c39,1 +np.float64,0x3fe650ae7feca15c,0x3feacefb8bc25f64,1 +np.float64,0x3fda8340e6b50682,0x3fdc24275cab1df8,1 +np.float64,0x8009084344321087,0x8009084344321087,1 +np.float64,0x7fdd19cb823a3396,0xbfd1d8fb35d89e3f,1 +np.float64,0xbfe893172571262e,0xbfeee716b592b93c,1 +np.float64,0x8ff5acc11fec,0x8ff5acc11fec,1 +np.float64,0xbfdca0c57cb9418a,0xbfdeb42465a1b59e,1 +np.float64,0xffd77bd2a3aef7a6,0x4012cd69e85b82d8,1 +np.float64,0xbfe6ea78982dd4f1,0xbfebd8ec61fb9e1f,1 +np.float64,0x7fe14b1d80a2963a,0xc02241642102cf71,1 +np.float64,0x3fe712bf286e257e,0x3fec20012329a7fb,1 +np.float64,0x7fcb6fa4d636df49,0x400b899d14a886b3,1 +np.float64,0x3fb82cb39a305960,0x3fb83f29c5f0822e,1 +np.float64,0x7fed694c8b3ad298,0xbfe2724373c69808,1 +np.float64,0xbfcd21229f3a4244,0xbfcda497fc3e1245,1 +np.float64,0x564d3770ac9a8,0x564d3770ac9a8,1 +np.float64,0xf4409e13e8814,0xf4409e13e8814,1 +np.float64,0x80068dca9a8d1b96,0x80068dca9a8d1b96,1 +np.float64,0xbfe13f82afe27f06,0xbfe3236ddded353f,1 +np.float64,0x80023f8114647f03,0x80023f8114647f03,1 +np.float64,0xeafba7dfd5f75,0xeafba7dfd5f75,1 +np.float64,0x3feca74ddeb94e9c,0x3ff3f95dcce5a227,1 +np.float64,0x10000000000000,0x10000000000000,1 +np.float64,0xbfebdb4141f7b682,0xbff2fc29823ac64a,1 +np.float64,0xbfcd75ee2f3aebdc,0xbfcdfdfd87cc6a29,1 +np.float64,0x7fc010cda420219a,0x3fae4ca2cf1f2657,1 +np.float64,0x1a90209e35205,0x1a90209e35205,1 +np.float64,0x8008057d01900afa,0x8008057d01900afa,1 +np.float64,0x3f9cb5f280396be5,0x3f9cb7dfb4e4be4e,1 +np.float64,0xffe1bbb60b63776c,0xc00011b1ffcb2561,1 +np.float64,0xffda883f6fb5107e,0x4044238ef4e2a198,1 +np.float64,0x3fc07c0b4a20f817,0x3fc09387de9eebcf,1 +np.float64,0x8003a9ebc0c753d8,0x8003a9ebc0c753d8,1 +np.float64,0x1d7fd5923affc,0x1d7fd5923affc,1 +np.float64,0xbfe9cd8cf9b39b1a,0xbff0af43e567ba4a,1 +np.float64,0x11285cb42250c,0x11285cb42250c,1 +np.float64,0xffe81ae1ccb035c3,0xbfe038be7eb563a6,1 +np.float64,0xbfe56473b1eac8e8,0xbfe94654d8ab9e75,1 +np.float64,0x3fee904619fd208c,0x3ff69e198152fe17,1 +np.float64,0xbfeeb9a2cbfd7346,0xbff6dc8d96da78cd,1 +np.float64,0x8006cdfa59ed9bf5,0x8006cdfa59ed9bf5,1 +np.float64,0x8008f2366d31e46d,0x8008f2366d31e46d,1 +np.float64,0x8008d5f91e31abf3,0x8008d5f91e31abf3,1 +np.float64,0x3fe85886f8b0b10e,0x3fee76af16f5a126,1 +np.float64,0x3fefb9b2b73f7365,0x3ff8745128fa3e3b,1 +np.float64,0x7fdf3e721f3e7ce3,0xbfb19381541ca2a8,1 +np.float64,0x3fd2768c41a4ed18,0x3fd2fe2f85a3f3a6,1 +np.float64,0xbfcabe3c6a357c78,0xbfcb239fb88bc260,1 +np.float64,0xffdffb6a3dbff6d4,0xbff7af4759fd557c,1 +np.float64,0x800817f75f302fef,0x800817f75f302fef,1 +np.float64,0xbfe6a1d1762d43a3,0xbfeb5a399a095ef3,1 +np.float64,0x7fd6f32f912de65e,0x40016dedc51aabd0,1 +np.float64,0x3fc6cb26652d964d,0x3fc7099f047d924a,1 +np.float64,0x3fe8b975d67172ec,0x3fef31946123c0e7,1 +np.float64,0xffe44a09d1e89413,0x3fdee9e5eac6e540,1 +np.float64,0xbfece76d4cb9cedb,0xbff44c34849d07ba,1 +np.float64,0x7feb76027036ec04,0x3fe08595a5e263ac,1 +np.float64,0xffe194f591a329ea,0x3fbe5bd626400a70,1 +np.float64,0xbfc170698122e0d4,0xbfc18c3de8b63565,1 +np.float64,0x3fc82b2c0f305658,0x3fc875c3b5fbcd08,1 +np.float64,0x3fd5015634aa02ac,0x3fd5cb1df07213c3,1 +np.float64,0x7fe640884b6c8110,0xbff66255a420abb5,1 +np.float64,0x5a245206b448b,0x5a245206b448b,1 +np.float64,0xffe9d9fa2f73b3f4,0xc0272b0dd34ab9bf,1 +np.float64,0x3fd990e8aab321d0,0x3fdb04cd3a29bcc3,1 +np.float64,0xde9dda8bbd3bc,0xde9dda8bbd3bc,1 +np.float64,0xbfe81b32b4703666,0xbfee029937fa9f5a,1 +np.float64,0xbfe68116886d022d,0xbfeb21c62081cb73,1 +np.float64,0x3fb8da191231b432,0x3fb8ee28c71507d3,1 +np.float64,0x3fb111395a222273,0x3fb117b57de3dea4,1 +np.float64,0xffbafadc6a35f5b8,0x3ffcc6d2370297b9,1 +np.float64,0x8002ca475b05948f,0x8002ca475b05948f,1 +np.float64,0xbfeafef57875fdeb,0xbff1fb1315676f24,1 +np.float64,0x7fcda427d73b484f,0xbff9f70212694d17,1 +np.float64,0xffe2517b3ba4a2f6,0xc029ca6707305bf4,1 +np.float64,0x7fc5ee156b2bdc2a,0xbff8384b59e9056e,1 +np.float64,0xbfec22af3278455e,0xbff3530fe25816b4,1 +np.float64,0x6b5a8c2cd6b52,0x6b5a8c2cd6b52,1 +np.float64,0xffdaf6c4b935ed8a,0x4002f00ce58affcf,1 +np.float64,0x800a41813c748303,0x800a41813c748303,1 +np.float64,0xbfd09a1269213424,0xbfd0fc0a0c5de8eb,1 +np.float64,0x7fa2cb74d42596e9,0x3fc3d40e000fa69d,1 +np.float64,0x7ff8000000000000,0x7ff8000000000000,1 +np.float64,0x3fbfbf8ed63f7f1e,0x3fbfe97bcad9f53a,1 +np.float64,0x7fe0ebba65a1d774,0x401b0f17b28618df,1 +np.float64,0x3fd02c3a25a05874,0x3fd086aa55b19c9c,1 +np.float64,0xec628f95d8c52,0xec628f95d8c52,1 +np.float64,0x3fd319329fa63264,0x3fd3afb04e0dec63,1 +np.float64,0x180e0ade301c2,0x180e0ade301c2,1 +np.float64,0xbfe8d78324f1af06,0xbfef6c66153064ee,1 +np.float64,0xffb89fa200313f48,0xbfeb96ff2d9358dc,1 +np.float64,0x7fe6abcf86ed579e,0xc0269f4de86365ec,1 +np.float64,0x7fdff8cd65bff19a,0xbfd0f7c6b9052c9a,1 +np.float64,0xbfd2e3a53d25c74a,0xbfd37520cda5f6b2,1 +np.float64,0x7fe844b096708960,0x3ff696a6182e5a7a,1 +np.float64,0x7fdce0c7a3b9c18e,0x3fd42875d69ed379,1 +np.float64,0xffba5a91cc34b520,0x4001b571e8991951,1 +np.float64,0xffe78fe4a6ef1fc9,0x3ff4507b31f5b3bc,1 +np.float64,0xbfd7047493ae08ea,0xbfd810618a53fffb,1 +np.float64,0xc6559def8cab4,0xc6559def8cab4,1 +np.float64,0x3fe75d67a76ebacf,0x3feca56817de65e4,1 +np.float64,0xffd24adbd6a495b8,0xc012c491addf2df5,1 +np.float64,0x7fed35e28dba6bc4,0x403a0fa555ff7ec6,1 +np.float64,0x80078c4afa0f1897,0x80078c4afa0f1897,1 +np.float64,0xa6ec39114dd87,0xa6ec39114dd87,1 +np.float64,0x7fb1bd33ba237a66,0x4010092bb6810fd4,1 +np.float64,0x800ecf215edd9e43,0x800ecf215edd9e43,1 +np.float64,0x3fb7c169242f82d2,0x3fb7d2ed30c462e6,1 +np.float64,0xbf71b46d60236900,0xbf71b4749a10c112,1 +np.float64,0x800d7851787af0a3,0x800d7851787af0a3,1 +np.float64,0x3fcb4a45e7369488,0x3fcbb61701a1bcec,1 +np.float64,0x3fd4e3682429c6d0,0x3fd5a9bcb916eb94,1 +np.float64,0x800497564c292ead,0x800497564c292ead,1 +np.float64,0xbfca3737a1346e70,0xbfca96a86ae5d687,1 +np.float64,0x19aa87e03356,0x19aa87e03356,1 +np.float64,0xffb2593fe624b280,0xc05fedb99b467ced,1 +np.float64,0xbfdd8748fbbb0e92,0xbfdfd1a7df17252c,1 +np.float64,0x8004c7afc7098f60,0x8004c7afc7098f60,1 +np.float64,0x7fde48b2bf3c9164,0xbfe36ef1158ed420,1 +np.float64,0xbfec8e0eb0f91c1d,0xbff3d9319705a602,1 +np.float64,0xffea1be204f437c3,0xc0144f67298c3e6f,1 +np.float64,0x7fdb906b593720d6,0xbfce99233396eda7,1 +np.float64,0x3fef0f114ffe1e22,0x3ff76072a258a51b,1 +np.float64,0x3fe3e284c8e7c50a,0x3fe6e9b05e17c999,1 +np.float64,0xbfbda9eef23b53e0,0xbfbdcc1abb443597,1 +np.float64,0x3feb6454d4f6c8aa,0x3ff26f65a85baba4,1 +np.float64,0x3fea317439f462e8,0x3ff118e2187ef33f,1 +np.float64,0x376ad0646ed5b,0x376ad0646ed5b,1 +np.float64,0x7fdd461a1c3a8c33,0x3f7ba20fb79e785f,1 +np.float64,0xebc520a3d78a4,0xebc520a3d78a4,1 +np.float64,0x3fca90fe53352200,0x3fcaf45c7fae234d,1 +np.float64,0xbfe80dd1de701ba4,0xbfede97e12cde9de,1 +np.float64,0x3fd242b00ea48560,0x3fd2c5cf9bf69a31,1 +np.float64,0x7fe46c057828d80a,0xbfe2f76837488f94,1 +np.float64,0x3fc162bea322c580,0x3fc17e517c958867,1 +np.float64,0xffebf0452ff7e08a,0x3ffc3fd95c257b54,1 +np.float64,0xffd88043c6310088,0x4008b05598d0d95f,1 +np.float64,0x800d8c49da5b1894,0x800d8c49da5b1894,1 +np.float64,0xbfed33b487ba6769,0xbff4b0ea941f8a6a,1 +np.float64,0x16b881e22d711,0x16b881e22d711,1 +np.float64,0x288bae0051177,0x288bae0051177,1 +np.float64,0xffc83a0fe8307420,0x4006eff03da17f86,1 +np.float64,0x3fc7868b252f0d18,0x3fc7cb4954290324,1 +np.float64,0xbfe195514b232aa2,0xbfe398aae6c8ed76,1 +np.float64,0x800c001ae7f80036,0x800c001ae7f80036,1 +np.float64,0x7feb82abe7370557,0xbff1e13fe6fad23c,1 +np.float64,0xffecf609cdf9ec13,0xc0112aa1805ae59e,1 +np.float64,0xffddd654f63bacaa,0x3fe46cce899f710d,1 +np.float64,0x3fe2163138642c62,0x3fe44b9c760acd4c,1 +np.float64,0x4e570dc09cae2,0x4e570dc09cae2,1 +np.float64,0x7fe9e8d091f3d1a0,0xc000fe20f8e9a4b5,1 +np.float64,0x7fe60042952c0084,0x3fd0aa740f394c2a,1 diff --git a/python/numpy/_core/tests/data/umath-validation-set-tanh.csv b/python/numpy/_core/tests/data/umath-validation-set-tanh.csv new file mode 100644 index 000000000..9e3ddc60f --- /dev/null +++ b/python/numpy/_core/tests/data/umath-validation-set-tanh.csv @@ -0,0 +1,1429 @@ +dtype,input,output,ulperrortol +np.float32,0xbe26ebb0,0xbe25752f,2 +np.float32,0xbe22ecc0,0xbe219054,2 +np.float32,0x8010a6b3,0x8010a6b3,2 +np.float32,0x3135da,0x3135da,2 +np.float32,0xbe982afc,0xbe93d727,2 +np.float32,0x16a51f,0x16a51f,2 +np.float32,0x491e56,0x491e56,2 +np.float32,0x4bf7ca,0x4bf7ca,2 +np.float32,0x3eebc21c,0x3edc65b2,2 +np.float32,0x80155c94,0x80155c94,2 +np.float32,0x3e14f626,0x3e13eb6a,2 +np.float32,0x801a238f,0x801a238f,2 +np.float32,0xbde33a80,0xbde24cf9,2 +np.float32,0xbef8439c,0xbee67a51,2 +np.float32,0x7f60d0a5,0x3f800000,2 +np.float32,0x190ee3,0x190ee3,2 +np.float32,0x80759113,0x80759113,2 +np.float32,0x800afa9f,0x800afa9f,2 +np.float32,0x7110cf,0x7110cf,2 +np.float32,0x3cf709f0,0x3cf6f6c6,2 +np.float32,0x3ef58da4,0x3ee44fa7,2 +np.float32,0xbf220ff2,0xbf0f662c,2 +np.float32,0xfd888078,0xbf800000,2 +np.float32,0xbe324734,0xbe307f9b,2 +np.float32,0x3eb5cb4f,0x3eae8560,2 +np.float32,0xbf7e7d02,0xbf425493,2 +np.float32,0x3ddcdcf0,0x3ddc02c2,2 +np.float32,0x8026d27a,0x8026d27a,2 +np.float32,0x3d4c0fb1,0x3d4be484,2 +np.float32,0xbf27d2c9,0xbf134d7c,2 +np.float32,0x8029ff80,0x8029ff80,2 +np.float32,0x7f046d2c,0x3f800000,2 +np.float32,0x13f94b,0x13f94b,2 +np.float32,0x7f4ff922,0x3f800000,2 +np.float32,0x3f4ea2ed,0x3f2b03e4,2 +np.float32,0x3e7211f0,0x3e6da8cf,2 +np.float32,0x7f39d0cf,0x3f800000,2 +np.float32,0xfee57fc6,0xbf800000,2 +np.float32,0xff6fb326,0xbf800000,2 +np.float32,0xff800000,0xbf800000,2 +np.float32,0x3f0437a4,0x3ef32fcd,2 +np.float32,0xff546d1e,0xbf800000,2 +np.float32,0x3eb5645b,0x3eae2a5c,2 +np.float32,0x3f08a6e5,0x3ef9ff8f,2 +np.float32,0x80800000,0x80800000,2 +np.float32,0x7f3413da,0x3f800000,2 +np.float32,0xfd760140,0xbf800000,2 +np.float32,0x7f3ad24a,0x3f800000,2 +np.float32,0xbf56e812,0xbf2f7f14,2 +np.float32,0xbece0338,0xbec3920a,2 +np.float32,0xbeede54a,0xbede22ae,2 +np.float32,0x7eaeb215,0x3f800000,2 +np.float32,0x3c213c00,0x3c213aab,2 +np.float32,0x7eaac217,0x3f800000,2 +np.float32,0xbf2f740e,0xbf1851a6,2 +np.float32,0x7f6ca5b8,0x3f800000,2 +np.float32,0xff42ce95,0xbf800000,2 +np.float32,0x802e4189,0x802e4189,2 +np.float32,0x80000001,0x80000001,2 +np.float32,0xbf31f298,0xbf19ebbe,2 +np.float32,0x3dcb0e6c,0x3dca64c1,2 +np.float32,0xbf29599c,0xbf145204,2 +np.float32,0x2e33f2,0x2e33f2,2 +np.float32,0x1c11e7,0x1c11e7,2 +np.float32,0x3f3b188d,0x3f1fa302,2 +np.float32,0x113300,0x113300,2 +np.float32,0x8054589e,0x8054589e,2 +np.float32,0x2a9e69,0x2a9e69,2 +np.float32,0xff513af7,0xbf800000,2 +np.float32,0x7f2e987a,0x3f800000,2 +np.float32,0x807cd426,0x807cd426,2 +np.float32,0x7f0dc4e4,0x3f800000,2 +np.float32,0x7e7c0d56,0x3f800000,2 +np.float32,0x5cb076,0x5cb076,2 +np.float32,0x80576426,0x80576426,2 +np.float32,0xff616222,0xbf800000,2 +np.float32,0xbf7accb5,0xbf40c005,2 +np.float32,0xfe4118c8,0xbf800000,2 +np.float32,0x804b9327,0x804b9327,2 +np.float32,0x3ed2b428,0x3ec79026,2 +np.float32,0x3f4a048f,0x3f286d41,2 +np.float32,0x800000,0x800000,2 +np.float32,0x7efceb9f,0x3f800000,2 +np.float32,0xbf5fe2d3,0xbf34246f,2 +np.float32,0x807e086a,0x807e086a,2 +np.float32,0x7ef5e856,0x3f800000,2 +np.float32,0xfc546f00,0xbf800000,2 +np.float32,0x3a65b890,0x3a65b88c,2 +np.float32,0x800cfa70,0x800cfa70,2 +np.float32,0x80672ea7,0x80672ea7,2 +np.float32,0x3f2bf3f2,0x3f160a12,2 +np.float32,0xbf0ab67e,0xbefd2004,2 +np.float32,0x3f2a0bb4,0x3f14c824,2 +np.float32,0xbeff5374,0xbeec12d7,2 +np.float32,0xbf221b58,0xbf0f6dff,2 +np.float32,0x7cc1f3,0x7cc1f3,2 +np.float32,0x7f234e3c,0x3f800000,2 +np.float32,0x3f60ff10,0x3f34b37d,2 +np.float32,0xbdd957f0,0xbdd887fe,2 +np.float32,0x801ce048,0x801ce048,2 +np.float32,0x7f3a8f76,0x3f800000,2 +np.float32,0xfdd13d08,0xbf800000,2 +np.float32,0x3e9af4a4,0x3e966445,2 +np.float32,0x1e55f3,0x1e55f3,2 +np.float32,0x327905,0x327905,2 +np.float32,0xbf03cf0b,0xbef28dad,2 +np.float32,0x3f0223d3,0x3eeff4f4,2 +np.float32,0xfdd96ff8,0xbf800000,2 +np.float32,0x428db8,0x428db8,2 +np.float32,0xbd74a200,0xbd7457a5,2 +np.float32,0x2a63a3,0x2a63a3,2 +np.float32,0x7e8aa9d7,0x3f800000,2 +np.float32,0x7f50b810,0x3f800000,2 +np.float32,0xbce5ec80,0xbce5dd0d,2 +np.float32,0x54711,0x54711,2 +np.float32,0x8074212a,0x8074212a,2 +np.float32,0xbf13d0ec,0xbf0551b5,2 +np.float32,0x80217f89,0x80217f89,2 +np.float32,0x3f300824,0x3f18b12f,2 +np.float32,0x7d252462,0x3f800000,2 +np.float32,0x807a154c,0x807a154c,2 +np.float32,0x8064d4b9,0x8064d4b9,2 +np.float32,0x804543b4,0x804543b4,2 +np.float32,0x4c269e,0x4c269e,2 +np.float32,0xff39823b,0xbf800000,2 +np.float32,0x3f5040b1,0x3f2be80b,2 +np.float32,0xbf7028c1,0xbf3bfee5,2 +np.float32,0x3e94eb78,0x3e90db93,2 +np.float32,0x3ccc1b40,0x3ccc1071,2 +np.float32,0xbe8796f0,0xbe8481a1,2 +np.float32,0xfc767bc0,0xbf800000,2 +np.float32,0xbdd81ed0,0xbdd75259,2 +np.float32,0xbed31bfc,0xbec7e82d,2 +np.float32,0xbf350a9e,0xbf1be1c6,2 +np.float32,0x33d41f,0x33d41f,2 +np.float32,0x3f73e076,0x3f3db0b5,2 +np.float32,0x3f800000,0x3f42f7d6,2 +np.float32,0xfee27c14,0xbf800000,2 +np.float32,0x7f6e4388,0x3f800000,2 +np.float32,0x4ea19b,0x4ea19b,2 +np.float32,0xff2d75f2,0xbf800000,2 +np.float32,0x7ee225ca,0x3f800000,2 +np.float32,0x3f31cb4b,0x3f19d2a4,2 +np.float32,0x80554a9d,0x80554a9d,2 +np.float32,0x3f4d57fa,0x3f2a4c03,2 +np.float32,0x3eac6a88,0x3ea62e72,2 +np.float32,0x773520,0x773520,2 +np.float32,0x8079c20a,0x8079c20a,2 +np.float32,0xfeb1eb94,0xbf800000,2 +np.float32,0xfe8d81c0,0xbf800000,2 +np.float32,0xfeed6902,0xbf800000,2 +np.float32,0x8066bb65,0x8066bb65,2 +np.float32,0x7f800000,0x3f800000,2 +np.float32,0x1,0x1,2 +np.float32,0x3f2c66a4,0x3f16554a,2 +np.float32,0x3cd231,0x3cd231,2 +np.float32,0x3e932a64,0x3e8f3e0c,2 +np.float32,0xbf3ab1c3,0xbf1f6420,2 +np.float32,0xbc902b20,0xbc902751,2 +np.float32,0x7dac0a5b,0x3f800000,2 +np.float32,0x3f2b7e06,0x3f15bc93,2 +np.float32,0x75de0,0x75de0,2 +np.float32,0x8020b7bc,0x8020b7bc,2 +np.float32,0x3f257cda,0x3f11bb6b,2 +np.float32,0x807480e5,0x807480e5,2 +np.float32,0xfe00d758,0xbf800000,2 +np.float32,0xbd9b54e0,0xbd9b08cd,2 +np.float32,0x4dfbe3,0x4dfbe3,2 +np.float32,0xff645788,0xbf800000,2 +np.float32,0xbe92c80a,0xbe8ee360,2 +np.float32,0x3eb9b400,0x3eb1f77c,2 +np.float32,0xff20b69c,0xbf800000,2 +np.float32,0x623c28,0x623c28,2 +np.float32,0xff235748,0xbf800000,2 +np.float32,0xbf3bbc56,0xbf2006f3,2 +np.float32,0x7e6f78b1,0x3f800000,2 +np.float32,0x7e1584e9,0x3f800000,2 +np.float32,0xff463423,0xbf800000,2 +np.float32,0x8002861e,0x8002861e,2 +np.float32,0xbf0491d8,0xbef3bb6a,2 +np.float32,0x7ea3bc17,0x3f800000,2 +np.float32,0xbedde7ea,0xbed0fb49,2 +np.float32,0xbf4bac48,0xbf295c8b,2 +np.float32,0xff28e276,0xbf800000,2 +np.float32,0x7e8f3bf5,0x3f800000,2 +np.float32,0xbf0a4a73,0xbefc7c9d,2 +np.float32,0x7ec5bd96,0x3f800000,2 +np.float32,0xbf4c22e8,0xbf299f2c,2 +np.float32,0x3e3970a0,0x3e377064,2 +np.float32,0x3ecb1118,0x3ec10c88,2 +np.float32,0xff548a7a,0xbf800000,2 +np.float32,0xfe8ec550,0xbf800000,2 +np.float32,0x3e158985,0x3e147bb2,2 +np.float32,0x7eb79ad7,0x3f800000,2 +np.float32,0xbe811384,0xbe7cd1ab,2 +np.float32,0xbdc4b9e8,0xbdc41f94,2 +np.float32,0xe0fd5,0xe0fd5,2 +np.float32,0x3f2485f2,0x3f11142b,2 +np.float32,0xfdd3c3d8,0xbf800000,2 +np.float32,0xfe8458e6,0xbf800000,2 +np.float32,0x3f06e398,0x3ef74dd8,2 +np.float32,0xff4752cf,0xbf800000,2 +np.float32,0x6998e3,0x6998e3,2 +np.float32,0x626751,0x626751,2 +np.float32,0x806631d6,0x806631d6,2 +np.float32,0xbf0c3cf4,0xbeff6c54,2 +np.float32,0x802860f8,0x802860f8,2 +np.float32,0xff2952cb,0xbf800000,2 +np.float32,0xff31d40b,0xbf800000,2 +np.float32,0x7c389473,0x3f800000,2 +np.float32,0x3dcd2f1b,0x3dcc8010,2 +np.float32,0x3d70c29f,0x3d707bbc,2 +np.float32,0x3f6bd386,0x3f39f979,2 +np.float32,0x1efec9,0x1efec9,2 +np.float32,0x3f675518,0x3f37d338,2 +np.float32,0x5fdbe3,0x5fdbe3,2 +np.float32,0x5d684e,0x5d684e,2 +np.float32,0xbedfe748,0xbed2a4c7,2 +np.float32,0x3f0cb07a,0x3f000cdc,2 +np.float32,0xbf77151e,0xbf3f1f5d,2 +np.float32,0x7f038ea0,0x3f800000,2 +np.float32,0x3ea91be9,0x3ea3376f,2 +np.float32,0xbdf20738,0xbdf0e861,2 +np.float32,0x807ea380,0x807ea380,2 +np.float32,0x2760ca,0x2760ca,2 +np.float32,0x7f20a544,0x3f800000,2 +np.float32,0x76ed83,0x76ed83,2 +np.float32,0x15a441,0x15a441,2 +np.float32,0x74c76d,0x74c76d,2 +np.float32,0xff3d5c2a,0xbf800000,2 +np.float32,0x7f6a76a6,0x3f800000,2 +np.float32,0x3eb87067,0x3eb0dabe,2 +np.float32,0xbf515cfa,0xbf2c83af,2 +np.float32,0xbdececc0,0xbdebdf9d,2 +np.float32,0x7f51b7c2,0x3f800000,2 +np.float32,0x3eb867ac,0x3eb0d30d,2 +np.float32,0xff50fd84,0xbf800000,2 +np.float32,0x806945e9,0x806945e9,2 +np.float32,0x298eed,0x298eed,2 +np.float32,0x441f53,0x441f53,2 +np.float32,0x8066d4b0,0x8066d4b0,2 +np.float32,0x3f6a479c,0x3f393dae,2 +np.float32,0xbf6ce2a7,0xbf3a7921,2 +np.float32,0x8064c3cf,0x8064c3cf,2 +np.float32,0xbf2d8146,0xbf170dfd,2 +np.float32,0x3b0e82,0x3b0e82,2 +np.float32,0xbea97574,0xbea387dc,2 +np.float32,0x67ad15,0x67ad15,2 +np.float32,0xbf68478f,0xbf38485a,2 +np.float32,0xff6f593b,0xbf800000,2 +np.float32,0xbeda26f2,0xbecdd806,2 +np.float32,0xbd216d50,0xbd2157ee,2 +np.float32,0x7a8544db,0x3f800000,2 +np.float32,0x801df20b,0x801df20b,2 +np.float32,0xbe14ba24,0xbe13b0a8,2 +np.float32,0xfdc6d8a8,0xbf800000,2 +np.float32,0x1d6b49,0x1d6b49,2 +np.float32,0x7f5ff1b8,0x3f800000,2 +np.float32,0x3f75e032,0x3f3e9625,2 +np.float32,0x7f2c5687,0x3f800000,2 +np.float32,0x3d95fb6c,0x3d95b6ee,2 +np.float32,0xbea515e4,0xbe9f97c8,2 +np.float32,0x7f2b2cd7,0x3f800000,2 +np.float32,0x3f076f7a,0x3ef8241e,2 +np.float32,0x5178ca,0x5178ca,2 +np.float32,0xbeb5976a,0xbeae5781,2 +np.float32,0x3e3c3563,0x3e3a1e13,2 +np.float32,0xbd208530,0xbd20702a,2 +np.float32,0x3eb03b04,0x3ea995ef,2 +np.float32,0x17fb9c,0x17fb9c,2 +np.float32,0xfca68e40,0xbf800000,2 +np.float32,0xbf5e7433,0xbf336a9f,2 +np.float32,0xff5b8d3d,0xbf800000,2 +np.float32,0x8003121d,0x8003121d,2 +np.float32,0xbe6dd344,0xbe69a3b0,2 +np.float32,0x67cc4,0x67cc4,2 +np.float32,0x9b01d,0x9b01d,2 +np.float32,0x127c13,0x127c13,2 +np.float32,0xfea5e3d6,0xbf800000,2 +np.float32,0xbdf5c610,0xbdf499c1,2 +np.float32,0x3aff4c00,0x3aff4beb,2 +np.float32,0x3b00afd0,0x3b00afc5,2 +np.float32,0x479618,0x479618,2 +np.float32,0x801cbd05,0x801cbd05,2 +np.float32,0x3ec9249f,0x3ebf6579,2 +np.float32,0x3535c4,0x3535c4,2 +np.float32,0xbeb4f662,0xbeadc915,2 +np.float32,0x8006fda6,0x8006fda6,2 +np.float32,0xbf4f3097,0xbf2b5239,2 +np.float32,0xbf3cb9a8,0xbf20a0e9,2 +np.float32,0x32ced0,0x32ced0,2 +np.float32,0x7ea34e76,0x3f800000,2 +np.float32,0x80063046,0x80063046,2 +np.float32,0x80727e8b,0x80727e8b,2 +np.float32,0xfd6b5780,0xbf800000,2 +np.float32,0x80109815,0x80109815,2 +np.float32,0xfdcc8a78,0xbf800000,2 +np.float32,0x81562,0x81562,2 +np.float32,0x803dfacc,0x803dfacc,2 +np.float32,0xbe204318,0xbe1ef75f,2 +np.float32,0xbf745d34,0xbf3de8e2,2 +np.float32,0xff13fdcc,0xbf800000,2 +np.float32,0x7f75ba8c,0x3f800000,2 +np.float32,0x806c04b4,0x806c04b4,2 +np.float32,0x3ec61ca6,0x3ebcc877,2 +np.float32,0xbeaea984,0xbea8301f,2 +np.float32,0xbf4dcd0e,0xbf2a8d34,2 +np.float32,0x802a01d3,0x802a01d3,2 +np.float32,0xbf747be5,0xbf3df6ad,2 +np.float32,0xbf75cbd2,0xbf3e8d0f,2 +np.float32,0x7db86576,0x3f800000,2 +np.float32,0xff49a2c3,0xbf800000,2 +np.float32,0xbedc5314,0xbecfa978,2 +np.float32,0x8078877b,0x8078877b,2 +np.float32,0xbead4824,0xbea6f499,2 +np.float32,0xbf3926e3,0xbf1e716c,2 +np.float32,0x807f4a1c,0x807f4a1c,2 +np.float32,0x7f2cd8fd,0x3f800000,2 +np.float32,0x806cfcca,0x806cfcca,2 +np.float32,0xff1aa048,0xbf800000,2 +np.float32,0x7eb9ea08,0x3f800000,2 +np.float32,0xbf1034bc,0xbf02ab3a,2 +np.float32,0xbd087830,0xbd086b44,2 +np.float32,0x7e071034,0x3f800000,2 +np.float32,0xbefcc9de,0xbeea122f,2 +np.float32,0x80796d7a,0x80796d7a,2 +np.float32,0x33ce46,0x33ce46,2 +np.float32,0x8074a783,0x8074a783,2 +np.float32,0xbe95a56a,0xbe918691,2 +np.float32,0xbf2ff3f4,0xbf18a42d,2 +np.float32,0x1633e9,0x1633e9,2 +np.float32,0x7f0f104b,0x3f800000,2 +np.float32,0xbf800000,0xbf42f7d6,2 +np.float32,0x3d2cd6,0x3d2cd6,2 +np.float32,0xfed43e16,0xbf800000,2 +np.float32,0x3ee6faec,0x3ed87d2c,2 +np.float32,0x3f2c32d0,0x3f163352,2 +np.float32,0xff4290c0,0xbf800000,2 +np.float32,0xbf66500e,0xbf37546a,2 +np.float32,0x7dfb8fe3,0x3f800000,2 +np.float32,0x3f20ba5d,0x3f0e7b16,2 +np.float32,0xff30c7ae,0xbf800000,2 +np.float32,0x1728a4,0x1728a4,2 +np.float32,0x340d82,0x340d82,2 +np.float32,0xff7870b7,0xbf800000,2 +np.float32,0xbeac6ac4,0xbea62ea7,2 +np.float32,0xbef936fc,0xbee73c36,2 +np.float32,0x3ec7e12c,0x3ebe4ef8,2 +np.float32,0x80673488,0x80673488,2 +np.float32,0xfdf14c90,0xbf800000,2 +np.float32,0x3f182568,0x3f08726e,2 +np.float32,0x7ed7dcd0,0x3f800000,2 +np.float32,0x3de4da34,0x3de3e790,2 +np.float32,0xff7fffff,0xbf800000,2 +np.float32,0x4ff90c,0x4ff90c,2 +np.float32,0x3efb0d1c,0x3ee8b1d6,2 +np.float32,0xbf66e952,0xbf379ef4,2 +np.float32,0xba9dc,0xba9dc,2 +np.float32,0xff67c766,0xbf800000,2 +np.float32,0x7f1ffc29,0x3f800000,2 +np.float32,0x3f51c906,0x3f2cbe99,2 +np.float32,0x3f2e5792,0x3f179968,2 +np.float32,0x3ecb9750,0x3ec17fa0,2 +np.float32,0x7f3fcefc,0x3f800000,2 +np.float32,0xbe4e30fc,0xbe4b72f9,2 +np.float32,0x7e9bc4ce,0x3f800000,2 +np.float32,0x7e70aa1f,0x3f800000,2 +np.float32,0x14c6e9,0x14c6e9,2 +np.float32,0xbcf327c0,0xbcf3157a,2 +np.float32,0xff1fd204,0xbf800000,2 +np.float32,0x7d934a03,0x3f800000,2 +np.float32,0x8028bf1e,0x8028bf1e,2 +np.float32,0x7f0800b7,0x3f800000,2 +np.float32,0xfe04825c,0xbf800000,2 +np.float32,0x807210ac,0x807210ac,2 +np.float32,0x3f7faf7c,0x3f42d5fd,2 +np.float32,0x3e04a543,0x3e03e899,2 +np.float32,0x3e98ea15,0x3e94863e,2 +np.float32,0x3d2a2e48,0x3d2a153b,2 +np.float32,0x7fa00000,0x7fe00000,2 +np.float32,0x20a488,0x20a488,2 +np.float32,0x3f6ba86a,0x3f39e51a,2 +np.float32,0x0,0x0,2 +np.float32,0x3e892ddd,0x3e85fcfe,2 +np.float32,0x3e2da627,0x3e2c00e0,2 +np.float32,0xff000a50,0xbf800000,2 +np.float32,0x3eb749f4,0x3eafd739,2 +np.float32,0x8024c0ae,0x8024c0ae,2 +np.float32,0xfc8f3b40,0xbf800000,2 +np.float32,0xbf685fc7,0xbf385405,2 +np.float32,0x3f1510e6,0x3f063a4f,2 +np.float32,0x3f68e8ad,0x3f3895d8,2 +np.float32,0x3dba8608,0x3dba0271,2 +np.float32,0xbf16ea10,0xbf079017,2 +np.float32,0xb3928,0xb3928,2 +np.float32,0xfe447c00,0xbf800000,2 +np.float32,0x3db9cd57,0x3db94b45,2 +np.float32,0x803b66b0,0x803b66b0,2 +np.float32,0x805b5e02,0x805b5e02,2 +np.float32,0x7ec93f61,0x3f800000,2 +np.float32,0x8005a126,0x8005a126,2 +np.float32,0x6d8888,0x6d8888,2 +np.float32,0x3e21b7de,0x3e206314,2 +np.float32,0xbec9c31e,0xbebfedc2,2 +np.float32,0xbea88aa8,0xbea2b4e5,2 +np.float32,0x3d8fc310,0x3d8f86bb,2 +np.float32,0xbf3cc68a,0xbf20a8b8,2 +np.float32,0x432690,0x432690,2 +np.float32,0xbe51d514,0xbe4ef1a3,2 +np.float32,0xbcda6d20,0xbcda5fe1,2 +np.float32,0xfe24e458,0xbf800000,2 +np.float32,0xfedc8c14,0xbf800000,2 +np.float32,0x7f7e9bd4,0x3f800000,2 +np.float32,0x3ebcc880,0x3eb4ab44,2 +np.float32,0xbe0aa490,0xbe09cd44,2 +np.float32,0x3dc9158c,0x3dc870c3,2 +np.float32,0x3e5c319e,0x3e58dc90,2 +np.float32,0x1d4527,0x1d4527,2 +np.float32,0x2dbf5,0x2dbf5,2 +np.float32,0xbf1f121f,0xbf0d5534,2 +np.float32,0x7e3e9ab5,0x3f800000,2 +np.float32,0x7f74b5c1,0x3f800000,2 +np.float32,0xbf6321ba,0xbf35c42b,2 +np.float32,0xbe5c7488,0xbe591c79,2 +np.float32,0x7e7b02cd,0x3f800000,2 +np.float32,0xfe7cbfa4,0xbf800000,2 +np.float32,0xbeace360,0xbea69a86,2 +np.float32,0x7e149b00,0x3f800000,2 +np.float32,0xbf61a700,0xbf35079a,2 +np.float32,0x7eb592a7,0x3f800000,2 +np.float32,0x3f2105e6,0x3f0eaf30,2 +np.float32,0xfd997a88,0xbf800000,2 +np.float32,0xff5d093b,0xbf800000,2 +np.float32,0x63aede,0x63aede,2 +np.float32,0x6907ee,0x6907ee,2 +np.float32,0xbf7578ee,0xbf3e680f,2 +np.float32,0xfea971e8,0xbf800000,2 +np.float32,0x3f21d0f5,0x3f0f3aed,2 +np.float32,0x3a50e2,0x3a50e2,2 +np.float32,0x7f0f5b1e,0x3f800000,2 +np.float32,0x805b9765,0x805b9765,2 +np.float32,0xbe764ab8,0xbe71a664,2 +np.float32,0x3eafac7f,0x3ea91701,2 +np.float32,0x807f4130,0x807f4130,2 +np.float32,0x7c5f31,0x7c5f31,2 +np.float32,0xbdbe0e30,0xbdbd8300,2 +np.float32,0x7ecfe4e0,0x3f800000,2 +np.float32,0xff7cb628,0xbf800000,2 +np.float32,0xff1842bc,0xbf800000,2 +np.float32,0xfd4163c0,0xbf800000,2 +np.float32,0x800e11f7,0x800e11f7,2 +np.float32,0x7f3adec8,0x3f800000,2 +np.float32,0x7f597514,0x3f800000,2 +np.float32,0xbe986e14,0xbe9414a4,2 +np.float32,0x800fa9d7,0x800fa9d7,2 +np.float32,0xff5b79c4,0xbf800000,2 +np.float32,0x80070565,0x80070565,2 +np.float32,0xbee5628e,0xbed72d60,2 +np.float32,0x3f438ef2,0x3f24b3ca,2 +np.float32,0xcda91,0xcda91,2 +np.float32,0x7e64151a,0x3f800000,2 +np.float32,0xbe95d584,0xbe91b2c7,2 +np.float32,0x8022c2a1,0x8022c2a1,2 +np.float32,0x7e7097bf,0x3f800000,2 +np.float32,0x80139035,0x80139035,2 +np.float32,0x804de2cb,0x804de2cb,2 +np.float32,0xfde5d178,0xbf800000,2 +np.float32,0x6d238,0x6d238,2 +np.float32,0x807abedc,0x807abedc,2 +np.float32,0x3f450a12,0x3f259129,2 +np.float32,0x3ef1c120,0x3ee141f2,2 +np.float32,0xfeb64dae,0xbf800000,2 +np.float32,0x8001732c,0x8001732c,2 +np.float32,0x3f76062e,0x3f3ea711,2 +np.float32,0x3eddd550,0x3ed0ebc8,2 +np.float32,0xff5ca1d4,0xbf800000,2 +np.float32,0xbf49dc5e,0xbf285673,2 +np.float32,0x7e9e5438,0x3f800000,2 +np.float32,0x7e83625e,0x3f800000,2 +np.float32,0x3f5dc41c,0x3f3310da,2 +np.float32,0x3f583efa,0x3f30342f,2 +np.float32,0xbe26bf88,0xbe254a2d,2 +np.float32,0xff1e0beb,0xbf800000,2 +np.float32,0xbe2244c8,0xbe20ec86,2 +np.float32,0xff0b1630,0xbf800000,2 +np.float32,0xff338dd6,0xbf800000,2 +np.float32,0x3eafc22c,0x3ea92a51,2 +np.float32,0x800ea07f,0x800ea07f,2 +np.float32,0x3f46f006,0x3f26aa7e,2 +np.float32,0x3e5f57cd,0x3e5bde16,2 +np.float32,0xbf1b2d8e,0xbf0a9a93,2 +np.float32,0xfeacdbe0,0xbf800000,2 +np.float32,0x7e5ea4bc,0x3f800000,2 +np.float32,0xbf51cbe2,0xbf2cc027,2 +np.float32,0x8073644c,0x8073644c,2 +np.float32,0xff2d6bfe,0xbf800000,2 +np.float32,0x3f65f0f6,0x3f37260a,2 +np.float32,0xff4b37a6,0xbf800000,2 +np.float32,0x712df7,0x712df7,2 +np.float32,0x7f71ef17,0x3f800000,2 +np.float32,0x8042245c,0x8042245c,2 +np.float32,0x3e5dde7b,0x3e5a760d,2 +np.float32,0x8069317d,0x8069317d,2 +np.float32,0x807932dd,0x807932dd,2 +np.float32,0x802f847e,0x802f847e,2 +np.float32,0x7e9300,0x7e9300,2 +np.float32,0x8040b4ab,0x8040b4ab,2 +np.float32,0xff76ef8e,0xbf800000,2 +np.float32,0x4aae3a,0x4aae3a,2 +np.float32,0x8058de73,0x8058de73,2 +np.float32,0x7e4d58c0,0x3f800000,2 +np.float32,0x3d811b30,0x3d80ef79,2 +np.float32,0x7ec952cc,0x3f800000,2 +np.float32,0xfe162b1c,0xbf800000,2 +np.float32,0x3f0f1187,0x3f01d367,2 +np.float32,0xbf2f3458,0xbf182878,2 +np.float32,0x5ceb14,0x5ceb14,2 +np.float32,0xbec29476,0xbeb9b939,2 +np.float32,0x3e71f943,0x3e6d9176,2 +np.float32,0x3ededefc,0x3ed1c909,2 +np.float32,0x805df6ac,0x805df6ac,2 +np.float32,0x3e5ae2c8,0x3e579ca8,2 +np.float32,0x3f6ad2c3,0x3f397fdf,2 +np.float32,0x7d5f94d3,0x3f800000,2 +np.float32,0xbeec7fe4,0xbedd0037,2 +np.float32,0x3f645304,0x3f365b0d,2 +np.float32,0xbf69a087,0xbf38edef,2 +np.float32,0x8025102e,0x8025102e,2 +np.float32,0x800db486,0x800db486,2 +np.float32,0x4df6c7,0x4df6c7,2 +np.float32,0x806d8cdd,0x806d8cdd,2 +np.float32,0x7f0c78cc,0x3f800000,2 +np.float32,0x7e1cf70b,0x3f800000,2 +np.float32,0x3e0ae570,0x3e0a0cf7,2 +np.float32,0x80176ef8,0x80176ef8,2 +np.float32,0x3f38b60c,0x3f1e2bbb,2 +np.float32,0x3d3071e0,0x3d3055f5,2 +np.float32,0x3ebfcfdd,0x3eb750a9,2 +np.float32,0xfe2cdec0,0xbf800000,2 +np.float32,0x7eeb2eed,0x3f800000,2 +np.float32,0x8026c904,0x8026c904,2 +np.float32,0xbec79bde,0xbebe133a,2 +np.float32,0xbf7dfab6,0xbf421d47,2 +np.float32,0x805b3cfd,0x805b3cfd,2 +np.float32,0xfdfcfb68,0xbf800000,2 +np.float32,0xbd537ec0,0xbd534eaf,2 +np.float32,0x52ce73,0x52ce73,2 +np.float32,0xfeac6ea6,0xbf800000,2 +np.float32,0x3f2c2990,0x3f162d41,2 +np.float32,0x3e3354e0,0x3e318539,2 +np.float32,0x802db22b,0x802db22b,2 +np.float32,0x7f0faa83,0x3f800000,2 +np.float32,0x7f10e161,0x3f800000,2 +np.float32,0x7f165c60,0x3f800000,2 +np.float32,0xbf5a756f,0xbf315c82,2 +np.float32,0x7f5a4b68,0x3f800000,2 +np.float32,0xbd77fbf0,0xbd77ae7c,2 +np.float32,0x65d83c,0x65d83c,2 +np.float32,0x3e5f28,0x3e5f28,2 +np.float32,0x8040ec92,0x8040ec92,2 +np.float32,0xbf2b41a6,0xbf1594d5,2 +np.float32,0x7f2f88f1,0x3f800000,2 +np.float32,0xfdb64ab8,0xbf800000,2 +np.float32,0xbf7a3ff1,0xbf4082f5,2 +np.float32,0x1948fc,0x1948fc,2 +np.float32,0x802c1039,0x802c1039,2 +np.float32,0x80119274,0x80119274,2 +np.float32,0x7e885d7b,0x3f800000,2 +np.float32,0xfaf6a,0xfaf6a,2 +np.float32,0x3eba28c4,0x3eb25e1d,2 +np.float32,0x3e4df370,0x3e4b37da,2 +np.float32,0xbf19eff6,0xbf09b97d,2 +np.float32,0xbeddd3c6,0xbed0ea7f,2 +np.float32,0xff6fc971,0xbf800000,2 +np.float32,0x7e93de29,0x3f800000,2 +np.float32,0x3eb12332,0x3eaa6485,2 +np.float32,0x3eb7c6e4,0x3eb04563,2 +np.float32,0x4a67ee,0x4a67ee,2 +np.float32,0xff1cafde,0xbf800000,2 +np.float32,0x3f5e2812,0x3f3343da,2 +np.float32,0x3f060e04,0x3ef605d4,2 +np.float32,0x3e9027d8,0x3e8c76a6,2 +np.float32,0xe2d33,0xe2d33,2 +np.float32,0xff4c94fc,0xbf800000,2 +np.float32,0xbf574908,0xbf2fb26b,2 +np.float32,0xbf786c08,0xbf3fb68e,2 +np.float32,0x8011ecab,0x8011ecab,2 +np.float32,0xbf061c6a,0xbef61bfa,2 +np.float32,0x7eea5f9d,0x3f800000,2 +np.float32,0x3ea2e19c,0x3e9d99a5,2 +np.float32,0x8071550c,0x8071550c,2 +np.float32,0x41c70b,0x41c70b,2 +np.float32,0x80291fc8,0x80291fc8,2 +np.float32,0x43b1ec,0x43b1ec,2 +np.float32,0x32f5a,0x32f5a,2 +np.float32,0xbe9310ec,0xbe8f2692,2 +np.float32,0x7f75f6bf,0x3f800000,2 +np.float32,0x3e6642a6,0x3e6274d2,2 +np.float32,0x3ecb88e0,0x3ec1733f,2 +np.float32,0x804011b6,0x804011b6,2 +np.float32,0x80629cca,0x80629cca,2 +np.float32,0x8016b914,0x8016b914,2 +np.float32,0xbdd05fc0,0xbdcfa870,2 +np.float32,0x807b824d,0x807b824d,2 +np.float32,0xfeec2576,0xbf800000,2 +np.float32,0xbf54bf22,0xbf2e584c,2 +np.float32,0xbf185eb0,0xbf089b6b,2 +np.float32,0xfbc09480,0xbf800000,2 +np.float32,0x3f413054,0x3f234e25,2 +np.float32,0x7e9e32b8,0x3f800000,2 +np.float32,0x266296,0x266296,2 +np.float32,0x460284,0x460284,2 +np.float32,0x3eb0b056,0x3ea9fe5a,2 +np.float32,0x1a7be5,0x1a7be5,2 +np.float32,0x7f099895,0x3f800000,2 +np.float32,0x3f3614f0,0x3f1c88ef,2 +np.float32,0x7e757dc2,0x3f800000,2 +np.float32,0x801fc91e,0x801fc91e,2 +np.float32,0x3f5ce37d,0x3f329ddb,2 +np.float32,0x3e664d70,0x3e627f15,2 +np.float32,0xbf38ed78,0xbf1e4dfa,2 +np.float32,0xbf5c563d,0xbf325543,2 +np.float32,0xbe91cc54,0xbe8dfb24,2 +np.float32,0x3d767fbe,0x3d7633ac,2 +np.float32,0xbf6aeb40,0xbf398b7f,2 +np.float32,0x7f40508b,0x3f800000,2 +np.float32,0x2650df,0x2650df,2 +np.float32,0xbe8cea3c,0xbe897628,2 +np.float32,0x80515af8,0x80515af8,2 +np.float32,0x7f423986,0x3f800000,2 +np.float32,0xbdf250e8,0xbdf1310c,2 +np.float32,0xfe89288a,0xbf800000,2 +np.float32,0x397b3b,0x397b3b,2 +np.float32,0x7e5e91b0,0x3f800000,2 +np.float32,0x6866e2,0x6866e2,2 +np.float32,0x7f4d8877,0x3f800000,2 +np.float32,0x3e6c4a21,0x3e682ee3,2 +np.float32,0xfc3d5980,0xbf800000,2 +np.float32,0x7eae2cd0,0x3f800000,2 +np.float32,0xbf241222,0xbf10c579,2 +np.float32,0xfebc02de,0xbf800000,2 +np.float32,0xff6e0645,0xbf800000,2 +np.float32,0x802030b6,0x802030b6,2 +np.float32,0x7ef9a441,0x3f800000,2 +np.float32,0x3fcf9f,0x3fcf9f,2 +np.float32,0xbf0ccf13,0xbf0023cc,2 +np.float32,0xfefee688,0xbf800000,2 +np.float32,0xbf6c8e0c,0xbf3a5160,2 +np.float32,0xfe749c28,0xbf800000,2 +np.float32,0x7f7fffff,0x3f800000,2 +np.float32,0x58c1a0,0x58c1a0,2 +np.float32,0x3f2de0a1,0x3f174c17,2 +np.float32,0xbf5f7138,0xbf33eb03,2 +np.float32,0x3da15270,0x3da0fd3c,2 +np.float32,0x3da66560,0x3da607e4,2 +np.float32,0xbf306f9a,0xbf18f3c6,2 +np.float32,0x3e81a4de,0x3e7de293,2 +np.float32,0xbebb5fb8,0xbeb36f1a,2 +np.float32,0x14bf64,0x14bf64,2 +np.float32,0xbeac46c6,0xbea60e73,2 +np.float32,0xbdcdf210,0xbdcd4111,2 +np.float32,0x3f7e3cd9,0x3f42395e,2 +np.float32,0xbc4be640,0xbc4be38e,2 +np.float32,0xff5f53b4,0xbf800000,2 +np.float32,0xbf1315ae,0xbf04c90b,2 +np.float32,0x80000000,0x80000000,2 +np.float32,0xbf6a4149,0xbf393aaa,2 +np.float32,0x3f66b8ee,0x3f378772,2 +np.float32,0xff29293e,0xbf800000,2 +np.float32,0xbcc989c0,0xbcc97f58,2 +np.float32,0xbd9a1b70,0xbd99d125,2 +np.float32,0xfef353cc,0xbf800000,2 +np.float32,0xbdc30cf0,0xbdc27683,2 +np.float32,0xfdfd6768,0xbf800000,2 +np.float32,0x7ebac44c,0x3f800000,2 +np.float32,0xff453cd6,0xbf800000,2 +np.float32,0x3ef07720,0x3ee03787,2 +np.float32,0x80219c14,0x80219c14,2 +np.float32,0x805553a8,0x805553a8,2 +np.float32,0x80703928,0x80703928,2 +np.float32,0xff16d3a7,0xbf800000,2 +np.float32,0x3f1472bc,0x3f05c77b,2 +np.float32,0x3eeea37a,0x3edebcf9,2 +np.float32,0x3db801e6,0x3db7838d,2 +np.float32,0x800870d2,0x800870d2,2 +np.float32,0xbea1172c,0xbe9bfa32,2 +np.float32,0x3f1f5e7c,0x3f0d8a42,2 +np.float32,0x123cdb,0x123cdb,2 +np.float32,0x7f6e6b06,0x3f800000,2 +np.float32,0x3ed80573,0x3ecc0def,2 +np.float32,0xfea31b82,0xbf800000,2 +np.float32,0x6744e0,0x6744e0,2 +np.float32,0x695e8b,0x695e8b,2 +np.float32,0xbee3888a,0xbed5a67d,2 +np.float32,0x7f64bc2a,0x3f800000,2 +np.float32,0x7f204244,0x3f800000,2 +np.float32,0x7f647102,0x3f800000,2 +np.float32,0x3dd8ebc0,0x3dd81d03,2 +np.float32,0x801e7ab1,0x801e7ab1,2 +np.float32,0x7d034b56,0x3f800000,2 +np.float32,0x7fc00000,0x7fc00000,2 +np.float32,0x80194193,0x80194193,2 +np.float32,0xfe31c8d4,0xbf800000,2 +np.float32,0x7fc0c4,0x7fc0c4,2 +np.float32,0xd95bf,0xd95bf,2 +np.float32,0x7e4f991d,0x3f800000,2 +np.float32,0x7fc563,0x7fc563,2 +np.float32,0xbe3fcccc,0xbe3d968a,2 +np.float32,0xfdaaa1c8,0xbf800000,2 +np.float32,0xbf48e449,0xbf27c949,2 +np.float32,0x3eb6c584,0x3eaf625e,2 +np.float32,0xbea35a74,0xbe9e0702,2 +np.float32,0x3eeab47a,0x3edb89d5,2 +np.float32,0xbed99556,0xbecd5de5,2 +np.float64,0xbfb94a81e0329500,0xbfb935867ba761fe,2 +np.float64,0xbfec132f1678265e,0xbfe6900eb097abc3,2 +np.float64,0x5685ea72ad0be,0x5685ea72ad0be,2 +np.float64,0xbfd74d3169ae9a62,0xbfd652e09b9daf32,2 +np.float64,0xbfe28df53d651bea,0xbfe0b8a7f50ab433,2 +np.float64,0x0,0x0,2 +np.float64,0xbfed912738bb224e,0xbfe749e3732831ae,2 +np.float64,0x7fcc6faed838df5d,0x3ff0000000000000,2 +np.float64,0xbfe95fe9a432bfd3,0xbfe51f6349919910,2 +np.float64,0xbfc4d5900b29ab20,0xbfc4a6f496179b8b,2 +np.float64,0xbfcd6025033ac04c,0xbfccded7b34b49b0,2 +np.float64,0xbfdfa655b43f4cac,0xbfdd4ca1e5bb9db8,2 +np.float64,0xe7ea5c7fcfd4c,0xe7ea5c7fcfd4c,2 +np.float64,0xffa5449ca42a8940,0xbff0000000000000,2 +np.float64,0xffe63294c1ac6529,0xbff0000000000000,2 +np.float64,0x7feb9cbae7f73975,0x3ff0000000000000,2 +np.float64,0x800eb07c3e3d60f9,0x800eb07c3e3d60f9,2 +np.float64,0x3fc95777e932aef0,0x3fc9040391e20c00,2 +np.float64,0x800736052dee6c0b,0x800736052dee6c0b,2 +np.float64,0x3fe9ae4afd335c96,0x3fe54b569bab45c7,2 +np.float64,0x7fee4c94217c9927,0x3ff0000000000000,2 +np.float64,0x80094b594bd296b3,0x80094b594bd296b3,2 +np.float64,0xffe5adbcee6b5b7a,0xbff0000000000000,2 +np.float64,0x3fecb8eab47971d5,0x3fe6e236be6f27e9,2 +np.float64,0x44956914892ae,0x44956914892ae,2 +np.float64,0xbfe3bd18ef677a32,0xbfe190bf1e07200c,2 +np.float64,0x800104e5b46209cc,0x800104e5b46209cc,2 +np.float64,0x8008fbcecf71f79e,0x8008fbcecf71f79e,2 +np.float64,0x800f0a46a0be148d,0x800f0a46a0be148d,2 +np.float64,0x7fe657a0702caf40,0x3ff0000000000000,2 +np.float64,0xffd3ff1a9027fe36,0xbff0000000000000,2 +np.float64,0x3fe78bc87bef1790,0x3fe40d2e63aaf029,2 +np.float64,0x7feeabdc4c7d57b8,0x3ff0000000000000,2 +np.float64,0xbfabd28d8437a520,0xbfabcb8ce03a0e56,2 +np.float64,0xbfddc3a133bb8742,0xbfdbc9fdb2594451,2 +np.float64,0x7fec911565b9222a,0x3ff0000000000000,2 +np.float64,0x71302604e2605,0x71302604e2605,2 +np.float64,0xee919d2bdd234,0xee919d2bdd234,2 +np.float64,0xbfc04fcff3209fa0,0xbfc0395a739a2ce4,2 +np.float64,0xffe4668a36e8cd14,0xbff0000000000000,2 +np.float64,0xbfeeafeebefd5fde,0xbfe7cd5f3d61a3ec,2 +np.float64,0x7fddb34219bb6683,0x3ff0000000000000,2 +np.float64,0xbfd2cac6cba5958e,0xbfd24520abb2ff36,2 +np.float64,0xbfb857e49630afc8,0xbfb8452d5064dec2,2 +np.float64,0x3fd2dbf90b25b7f2,0x3fd254eaf48484c2,2 +np.float64,0x800af65c94f5ecba,0x800af65c94f5ecba,2 +np.float64,0xa0eef4bf41ddf,0xa0eef4bf41ddf,2 +np.float64,0xffd8e0a4adb1c14a,0xbff0000000000000,2 +np.float64,0xffe858f6e870b1ed,0xbff0000000000000,2 +np.float64,0x3f94c2c308298580,0x3f94c208a4bb006d,2 +np.float64,0xffb45f0d7428be18,0xbff0000000000000,2 +np.float64,0x800ed4f43dbda9e9,0x800ed4f43dbda9e9,2 +np.float64,0x8002dd697e85bad4,0x8002dd697e85bad4,2 +np.float64,0x787ceab2f0f9e,0x787ceab2f0f9e,2 +np.float64,0xbfdff5fcc2bfebfa,0xbfdd8b736b128589,2 +np.float64,0x7fdb2b4294365684,0x3ff0000000000000,2 +np.float64,0xffe711e5e92e23cc,0xbff0000000000000,2 +np.float64,0x800b1c93f1163928,0x800b1c93f1163928,2 +np.float64,0x7fc524d2f22a49a5,0x3ff0000000000000,2 +np.float64,0x7fc88013b5310026,0x3ff0000000000000,2 +np.float64,0x3fe1a910c5e35222,0x3fe00fd779ebaa2a,2 +np.float64,0xbfb57ec9ca2afd90,0xbfb571e47ecb9335,2 +np.float64,0x7fd7594b20aeb295,0x3ff0000000000000,2 +np.float64,0x7fba4641ca348c83,0x3ff0000000000000,2 +np.float64,0xffe61393706c2726,0xbff0000000000000,2 +np.float64,0x7fd54f3c7baa9e78,0x3ff0000000000000,2 +np.float64,0xffe65ffb12ecbff6,0xbff0000000000000,2 +np.float64,0xbfba3b0376347608,0xbfba239cbbbd1b11,2 +np.float64,0x800200886d640112,0x800200886d640112,2 +np.float64,0xbfecf0ba4679e174,0xbfe6fd59de44a3ec,2 +np.float64,0xffe5c57e122b8afc,0xbff0000000000000,2 +np.float64,0x7fdaad0143355a02,0x3ff0000000000000,2 +np.float64,0x46ab32c08d567,0x46ab32c08d567,2 +np.float64,0x7ff8000000000000,0x7ff8000000000000,2 +np.float64,0xbfda7980fdb4f302,0xbfd90fa9c8066109,2 +np.float64,0x3fe237703c646ee0,0x3fe07969f8d8805a,2 +np.float64,0x8000e9fcfc21d3fb,0x8000e9fcfc21d3fb,2 +np.float64,0xbfdfe6e958bfcdd2,0xbfdd7f952fe87770,2 +np.float64,0xbd7baf217af8,0xbd7baf217af8,2 +np.float64,0xbfceba9e4b3d753c,0xbfce26e54359869a,2 +np.float64,0xb95a2caf72b46,0xb95a2caf72b46,2 +np.float64,0x3fb407e25a280fc5,0x3fb3fd71e457b628,2 +np.float64,0xa1da09d943b41,0xa1da09d943b41,2 +np.float64,0xbfe9c7271cf38e4e,0xbfe559296b471738,2 +np.float64,0x3fefae6170ff5cc3,0x3fe83c70ba82f0e1,2 +np.float64,0x7fe7375348ae6ea6,0x3ff0000000000000,2 +np.float64,0xffe18c9cc6e31939,0xbff0000000000000,2 +np.float64,0x800483d13a6907a3,0x800483d13a6907a3,2 +np.float64,0x7fe772a18caee542,0x3ff0000000000000,2 +np.float64,0xffefff64e7bffec9,0xbff0000000000000,2 +np.float64,0x7fcffc31113ff861,0x3ff0000000000000,2 +np.float64,0x3fd91e067e323c0d,0x3fd7e70bf365a7b3,2 +np.float64,0xb0a6673d614cd,0xb0a6673d614cd,2 +np.float64,0xffef9a297e3f3452,0xbff0000000000000,2 +np.float64,0xffe87cc15e70f982,0xbff0000000000000,2 +np.float64,0xffefd6ad8e7fad5a,0xbff0000000000000,2 +np.float64,0x7fe3aaa3a8a75546,0x3ff0000000000000,2 +np.float64,0xddab0341bb561,0xddab0341bb561,2 +np.float64,0x3fe996d6d7332dae,0x3fe53e3ed5be2922,2 +np.float64,0x3fdbe66a18b7ccd4,0x3fda41e6053c1512,2 +np.float64,0x8914775d1228f,0x8914775d1228f,2 +np.float64,0x3fe44621d4688c44,0x3fe1ef9c7225f8bd,2 +np.float64,0xffab29a2a4365340,0xbff0000000000000,2 +np.float64,0xffc8d4a0c431a940,0xbff0000000000000,2 +np.float64,0xbfd426e085284dc2,0xbfd382e2a9617b87,2 +np.float64,0xbfd3b0a525a7614a,0xbfd3176856faccf1,2 +np.float64,0x80036dedcb06dbdc,0x80036dedcb06dbdc,2 +np.float64,0x3feb13823b762704,0x3fe60ca3facdb696,2 +np.float64,0x3fd7246b7bae48d8,0x3fd62f08afded155,2 +np.float64,0x1,0x1,2 +np.float64,0x3fe8ade4b9715bc9,0x3fe4b97cc1387d27,2 +np.float64,0x3fdf2dbec53e5b7e,0x3fdcecfeee33de95,2 +np.float64,0x3fe4292bf9685258,0x3fe1dbb5a6704090,2 +np.float64,0xbfd21acbb8243598,0xbfd1a2ff42174cae,2 +np.float64,0xdd0d2d01ba1a6,0xdd0d2d01ba1a6,2 +np.float64,0x3fa3f3d2f427e7a0,0x3fa3f13d6f101555,2 +np.float64,0x7fdabf4aceb57e95,0x3ff0000000000000,2 +np.float64,0xd4d9e39ba9b3d,0xd4d9e39ba9b3d,2 +np.float64,0xffec773396f8ee66,0xbff0000000000000,2 +np.float64,0x3fa88cc79031198f,0x3fa887f7ade722ba,2 +np.float64,0xffe63a92066c7524,0xbff0000000000000,2 +np.float64,0xbfcf514e2e3ea29c,0xbfceb510e99aaa19,2 +np.float64,0x9d78c19d3af18,0x9d78c19d3af18,2 +np.float64,0x7fdd748bfbbae917,0x3ff0000000000000,2 +np.float64,0xffb3594c4626b298,0xbff0000000000000,2 +np.float64,0x80068ce5b32d19cc,0x80068ce5b32d19cc,2 +np.float64,0x3fec63d60e78c7ac,0x3fe6b85536e44217,2 +np.float64,0x80080bad4dd0175b,0x80080bad4dd0175b,2 +np.float64,0xbfec6807baf8d010,0xbfe6ba69740f9687,2 +np.float64,0x7fedbae0bbfb75c0,0x3ff0000000000000,2 +np.float64,0x8001cb7aa3c396f6,0x8001cb7aa3c396f6,2 +np.float64,0x7fe1f1f03563e3df,0x3ff0000000000000,2 +np.float64,0x7fd83d3978307a72,0x3ff0000000000000,2 +np.float64,0xbfc05ffe9d20bffc,0xbfc049464e3f0af2,2 +np.float64,0xfe6e053ffcdc1,0xfe6e053ffcdc1,2 +np.float64,0xbfd3bdf39d277be8,0xbfd32386edf12726,2 +np.float64,0x800f41b27bde8365,0x800f41b27bde8365,2 +np.float64,0xbfe2c98390e59307,0xbfe0e3c9260fe798,2 +np.float64,0xffdd6206bcbac40e,0xbff0000000000000,2 +np.float64,0x67f35ef4cfe6c,0x67f35ef4cfe6c,2 +np.float64,0x800337e02ae66fc1,0x800337e02ae66fc1,2 +np.float64,0x3fe0ff70afe1fee1,0x3fdf1f46434330df,2 +np.float64,0x3fd7e0a1df2fc144,0x3fd6d3f82c8031e4,2 +np.float64,0x8008da5cd1b1b4ba,0x8008da5cd1b1b4ba,2 +np.float64,0x80065ec9e4ccbd95,0x80065ec9e4ccbd95,2 +np.float64,0x3fe1d1e559a3a3cb,0x3fe02e4f146aa1ab,2 +np.float64,0x7feb7d2f0836fa5d,0x3ff0000000000000,2 +np.float64,0xbfcb33ce9736679c,0xbfcaccd431b205bb,2 +np.float64,0x800e6d0adf5cda16,0x800e6d0adf5cda16,2 +np.float64,0x7fe46f272ca8de4d,0x3ff0000000000000,2 +np.float64,0x4fdfc73e9fbfa,0x4fdfc73e9fbfa,2 +np.float64,0x800958a13112b143,0x800958a13112b143,2 +np.float64,0xbfea01f877f403f1,0xbfe579a541594247,2 +np.float64,0xeefaf599ddf5f,0xeefaf599ddf5f,2 +np.float64,0x80038766c5e70ece,0x80038766c5e70ece,2 +np.float64,0x7fd31bc28ba63784,0x3ff0000000000000,2 +np.float64,0xbfe4df77eee9bef0,0xbfe257abe7083b77,2 +np.float64,0x7fe6790c78acf218,0x3ff0000000000000,2 +np.float64,0xffe7c66884af8cd0,0xbff0000000000000,2 +np.float64,0x800115e36f422bc8,0x800115e36f422bc8,2 +np.float64,0x3fc601945d2c0329,0x3fc5cab917bb20bc,2 +np.float64,0x3fd6ac9546ad592b,0x3fd5c55437ec3508,2 +np.float64,0xa7bd59294f7ab,0xa7bd59294f7ab,2 +np.float64,0x8005c26c8b8b84da,0x8005c26c8b8b84da,2 +np.float64,0x8257501704aea,0x8257501704aea,2 +np.float64,0x5b12aae0b6256,0x5b12aae0b6256,2 +np.float64,0x800232fe02c465fd,0x800232fe02c465fd,2 +np.float64,0x800dae28f85b5c52,0x800dae28f85b5c52,2 +np.float64,0x3fdade1ac135bc36,0x3fd964a2000ace25,2 +np.float64,0x3fed72ca04fae594,0x3fe73b9170d809f9,2 +np.float64,0x7fc6397e2b2c72fb,0x3ff0000000000000,2 +np.float64,0x3fe1f5296d23ea53,0x3fe048802d17621e,2 +np.float64,0xffe05544b920aa89,0xbff0000000000000,2 +np.float64,0xbfdb2e1588365c2c,0xbfd9a7e4113c713e,2 +np.float64,0xbfed6a06fa3ad40e,0xbfe7376be60535f8,2 +np.float64,0xbfe31dcaf5e63b96,0xbfe120417c46cac1,2 +np.float64,0xbfb7ed67ae2fdad0,0xbfb7dba14af33b00,2 +np.float64,0xffd32bb7eb265770,0xbff0000000000000,2 +np.float64,0x80039877b04730f0,0x80039877b04730f0,2 +np.float64,0x3f832e5630265cac,0x3f832e316f47f218,2 +np.float64,0xffe7fa7f732ff4fe,0xbff0000000000000,2 +np.float64,0x9649b87f2c937,0x9649b87f2c937,2 +np.float64,0xffaee447183dc890,0xbff0000000000000,2 +np.float64,0x7fe4e02dd869c05b,0x3ff0000000000000,2 +np.float64,0x3fe1d35e7463a6bd,0x3fe02f67bd21e86e,2 +np.float64,0xffe57f40fe2afe82,0xbff0000000000000,2 +np.float64,0xbfea1362b93426c6,0xbfe5833421dba8fc,2 +np.float64,0xffe9c689fe338d13,0xbff0000000000000,2 +np.float64,0xffc592dd102b25bc,0xbff0000000000000,2 +np.float64,0x3fd283c7aba5078f,0x3fd203d61d1398c3,2 +np.float64,0x8001d6820243ad05,0x8001d6820243ad05,2 +np.float64,0x3fe0ad5991e15ab4,0x3fdea14ef0d47fbd,2 +np.float64,0x3fe3916f2ee722de,0x3fe1722684a9ffb1,2 +np.float64,0xffef9e54e03f3ca9,0xbff0000000000000,2 +np.float64,0x7fe864faebb0c9f5,0x3ff0000000000000,2 +np.float64,0xbfed3587c3fa6b10,0xbfe71e7112df8a68,2 +np.float64,0xbfdd9efc643b3df8,0xbfdbac3a16caf208,2 +np.float64,0xbfd5ac08feab5812,0xbfd4e14575a6e41b,2 +np.float64,0xffda90fae6b521f6,0xbff0000000000000,2 +np.float64,0x8001380ecf22701e,0x8001380ecf22701e,2 +np.float64,0x7fed266fa5fa4cde,0x3ff0000000000000,2 +np.float64,0xffec6c0ac3b8d815,0xbff0000000000000,2 +np.float64,0x3fe7de43c32fbc88,0x3fe43ef62821a5a6,2 +np.float64,0x800bf4ffc357ea00,0x800bf4ffc357ea00,2 +np.float64,0x3fe125c975624b93,0x3fdf59b2de3eff5d,2 +np.float64,0x8004714c1028e299,0x8004714c1028e299,2 +np.float64,0x3fef1bfbf5fe37f8,0x3fe7fd2ba1b63c8a,2 +np.float64,0x800cae15c3195c2c,0x800cae15c3195c2c,2 +np.float64,0x7fde708e083ce11b,0x3ff0000000000000,2 +np.float64,0x7fbcee5df639dcbb,0x3ff0000000000000,2 +np.float64,0x800b1467141628cf,0x800b1467141628cf,2 +np.float64,0x3fe525e0d36a4bc2,0x3fe286b6e59e30f5,2 +np.float64,0xffe987f8b8330ff1,0xbff0000000000000,2 +np.float64,0x7e0a8284fc151,0x7e0a8284fc151,2 +np.float64,0x8006f982442df305,0x8006f982442df305,2 +np.float64,0xbfd75a3cb62eb47a,0xbfd65e54cee981c9,2 +np.float64,0x258e91104b1d3,0x258e91104b1d3,2 +np.float64,0xbfecd0056779a00b,0xbfe6ed7ae97fff1b,2 +np.float64,0x7fc3a4f9122749f1,0x3ff0000000000000,2 +np.float64,0x6e2b1024dc563,0x6e2b1024dc563,2 +np.float64,0x800d575ad4daaeb6,0x800d575ad4daaeb6,2 +np.float64,0xbfceafb1073d5f64,0xbfce1c93023d8414,2 +np.float64,0xffe895cb5f312b96,0xbff0000000000000,2 +np.float64,0x7fe7811ed4ef023d,0x3ff0000000000000,2 +np.float64,0xbfd93f952f327f2a,0xbfd803e6b5576b99,2 +np.float64,0xffdd883a3fbb1074,0xbff0000000000000,2 +np.float64,0x7fee5624eefcac49,0x3ff0000000000000,2 +np.float64,0xbfe264bb2624c976,0xbfe09a9b7cc896e7,2 +np.float64,0xffef14b417be2967,0xbff0000000000000,2 +np.float64,0xbfecbd0d94397a1b,0xbfe6e43bef852d9f,2 +np.float64,0xbfe20d9e4ba41b3c,0xbfe05a98e05846d9,2 +np.float64,0x10000000000000,0x10000000000000,2 +np.float64,0x7fefde93f7bfbd27,0x3ff0000000000000,2 +np.float64,0x80076b9e232ed73d,0x80076b9e232ed73d,2 +np.float64,0xbfe80df52c701bea,0xbfe45b754b433792,2 +np.float64,0x7fe3b5a637676b4b,0x3ff0000000000000,2 +np.float64,0x2c81d14c5903b,0x2c81d14c5903b,2 +np.float64,0x80038945c767128c,0x80038945c767128c,2 +np.float64,0xffeebaf544bd75ea,0xbff0000000000000,2 +np.float64,0xffdb1867d2b630d0,0xbff0000000000000,2 +np.float64,0x3fe3376eaee66ede,0x3fe13285579763d8,2 +np.float64,0xffddf65ca43becba,0xbff0000000000000,2 +np.float64,0xffec8e3e04791c7b,0xbff0000000000000,2 +np.float64,0x80064f4bde2c9e98,0x80064f4bde2c9e98,2 +np.float64,0x7fe534a085ea6940,0x3ff0000000000000,2 +np.float64,0xbfcbabe31d3757c8,0xbfcb3f8e70adf7e7,2 +np.float64,0xbfe45ca11e28b942,0xbfe1ff04515ef809,2 +np.float64,0x65f4df02cbe9d,0x65f4df02cbe9d,2 +np.float64,0xb08b0cbb61162,0xb08b0cbb61162,2 +np.float64,0x3feae2e8b975c5d1,0x3fe5f302b5e8eda2,2 +np.float64,0x7fcf277ff93e4eff,0x3ff0000000000000,2 +np.float64,0x80010999c4821334,0x80010999c4821334,2 +np.float64,0xbfd7f65911afecb2,0xbfd6e6e9cd098f8b,2 +np.float64,0x800e0560ec3c0ac2,0x800e0560ec3c0ac2,2 +np.float64,0x7fec4152ba3882a4,0x3ff0000000000000,2 +np.float64,0xbfb5c77cd42b8ef8,0xbfb5ba1336084908,2 +np.float64,0x457ff1b68afff,0x457ff1b68afff,2 +np.float64,0x5323ec56a647e,0x5323ec56a647e,2 +np.float64,0xbfeed16cf8bda2da,0xbfe7dc49fc9ae549,2 +np.float64,0xffe8446106b088c1,0xbff0000000000000,2 +np.float64,0xffb93cd13c3279a0,0xbff0000000000000,2 +np.float64,0x7fe515c2aeea2b84,0x3ff0000000000000,2 +np.float64,0x80099df83f933bf1,0x80099df83f933bf1,2 +np.float64,0x7fb3a375562746ea,0x3ff0000000000000,2 +np.float64,0x7fcd7efa243afdf3,0x3ff0000000000000,2 +np.float64,0xffe40cddb12819bb,0xbff0000000000000,2 +np.float64,0x8008b68eecd16d1e,0x8008b68eecd16d1e,2 +np.float64,0x2aec688055d8e,0x2aec688055d8e,2 +np.float64,0xffe23750bc646ea1,0xbff0000000000000,2 +np.float64,0x5adacf60b5b7,0x5adacf60b5b7,2 +np.float64,0x7fefb29b1cbf6535,0x3ff0000000000000,2 +np.float64,0xbfeadbf90175b7f2,0xbfe5ef55e2194794,2 +np.float64,0xeaad2885d55a5,0xeaad2885d55a5,2 +np.float64,0xffd7939fba2f2740,0xbff0000000000000,2 +np.float64,0x3fd187ea3aa30fd4,0x3fd11af023472386,2 +np.float64,0xbf6eb579c03d6b00,0xbf6eb57052f47019,2 +np.float64,0x3fefb67b3bff6cf6,0x3fe83fe4499969ac,2 +np.float64,0xbfe5183aacea3076,0xbfe27da1aa0b61a0,2 +np.float64,0xbfb83e47a2307c90,0xbfb82bcb0e12db42,2 +np.float64,0x80088849b1b11094,0x80088849b1b11094,2 +np.float64,0x800ceeed7399dddb,0x800ceeed7399dddb,2 +np.float64,0x80097cd90892f9b2,0x80097cd90892f9b2,2 +np.float64,0x7ec73feefd8e9,0x7ec73feefd8e9,2 +np.float64,0x7fe3291de5a6523b,0x3ff0000000000000,2 +np.float64,0xbfd537086daa6e10,0xbfd4787af5f60653,2 +np.float64,0x800e8ed4455d1da9,0x800e8ed4455d1da9,2 +np.float64,0x800ef8d19cbdf1a3,0x800ef8d19cbdf1a3,2 +np.float64,0x800dc4fa3a5b89f5,0x800dc4fa3a5b89f5,2 +np.float64,0xaa8b85cd55171,0xaa8b85cd55171,2 +np.float64,0xffd67a5f40acf4be,0xbff0000000000000,2 +np.float64,0xbfb7496db22e92d8,0xbfb7390a48130861,2 +np.float64,0x3fd86a8e7ab0d51d,0x3fd74bfba0f72616,2 +np.float64,0xffb7f5b7fc2feb70,0xbff0000000000000,2 +np.float64,0xbfea0960a7f412c1,0xbfe57db6d0ff4191,2 +np.float64,0x375f4fc26ebeb,0x375f4fc26ebeb,2 +np.float64,0x800c537e70b8a6fd,0x800c537e70b8a6fd,2 +np.float64,0x800b3f4506d67e8a,0x800b3f4506d67e8a,2 +np.float64,0x7fe61f2d592c3e5a,0x3ff0000000000000,2 +np.float64,0xffefffffffffffff,0xbff0000000000000,2 +np.float64,0x8005d0bb84eba178,0x8005d0bb84eba178,2 +np.float64,0x800c78b0ec18f162,0x800c78b0ec18f162,2 +np.float64,0xbfc42cccfb285998,0xbfc4027392f66b0d,2 +np.float64,0x3fd8fdc73fb1fb8e,0x3fd7cb46f928153f,2 +np.float64,0x800c71754298e2eb,0x800c71754298e2eb,2 +np.float64,0x3fe4aa7a96a954f5,0x3fe233f5d3bc1352,2 +np.float64,0x7fd53841f6aa7083,0x3ff0000000000000,2 +np.float64,0x3fd0a887b8a15110,0x3fd04ac3b9c0d1ca,2 +np.float64,0x8007b8e164cf71c4,0x8007b8e164cf71c4,2 +np.float64,0xbfddc35c66bb86b8,0xbfdbc9c5dddfb014,2 +np.float64,0x6a3756fed46eb,0x6a3756fed46eb,2 +np.float64,0xffd3dcd05527b9a0,0xbff0000000000000,2 +np.float64,0xbfd7dc75632fb8ea,0xbfd6d0538b340a98,2 +np.float64,0x17501f822ea05,0x17501f822ea05,2 +np.float64,0xbfe1f98b99a3f317,0xbfe04bbf8f8b6cb3,2 +np.float64,0x66ea65d2cdd4d,0x66ea65d2cdd4d,2 +np.float64,0xbfd12241e2224484,0xbfd0bc62f46ea5e1,2 +np.float64,0x3fed6e6fb3fadcdf,0x3fe7398249097285,2 +np.float64,0x3fe0b5ebeba16bd8,0x3fdeae84b3000a47,2 +np.float64,0x66d1bce8cda38,0x66d1bce8cda38,2 +np.float64,0x3fdd728db3bae51b,0x3fdb880f28c52713,2 +np.float64,0xffb45dbe5228bb80,0xbff0000000000000,2 +np.float64,0x1ff8990c3ff14,0x1ff8990c3ff14,2 +np.float64,0x800a68e8f294d1d2,0x800a68e8f294d1d2,2 +np.float64,0xbfe4d08b84a9a117,0xbfe24da40bff6be7,2 +np.float64,0x3fe0177f0ee02efe,0x3fddb83c5971df51,2 +np.float64,0xffc56893692ad128,0xbff0000000000000,2 +np.float64,0x51b44f6aa368b,0x51b44f6aa368b,2 +np.float64,0x2258ff4e44b21,0x2258ff4e44b21,2 +np.float64,0x3fe913649e7226c9,0x3fe4f3f119530f53,2 +np.float64,0xffe3767df766ecfc,0xbff0000000000000,2 +np.float64,0xbfe62ae12fec55c2,0xbfe33108f1f22a94,2 +np.float64,0x7fb6a6308e2d4c60,0x3ff0000000000000,2 +np.float64,0xbfe00f2085e01e41,0xbfddab19b6fc77d1,2 +np.float64,0x3fb66447dc2cc890,0x3fb655b4f46844f0,2 +np.float64,0x3fd80238f6b00470,0x3fd6f143be1617d6,2 +np.float64,0xbfd05bfeb3a0b7fe,0xbfd0031ab3455e15,2 +np.float64,0xffc3a50351274a08,0xbff0000000000000,2 +np.float64,0xffd8f4241cb1e848,0xbff0000000000000,2 +np.float64,0xbfca72a88c34e550,0xbfca13ebe85f2aca,2 +np.float64,0x3fd47d683ba8fad0,0x3fd3d13f1176ed8c,2 +np.float64,0x3fb6418e642c831d,0x3fb6333ebe479ff2,2 +np.float64,0x800fde8e023fbd1c,0x800fde8e023fbd1c,2 +np.float64,0x8001fb01e323f605,0x8001fb01e323f605,2 +np.float64,0x3febb21ff9f76440,0x3fe65ed788d52fee,2 +np.float64,0x3fe47553ffe8eaa8,0x3fe20fe01f853603,2 +np.float64,0x7fca20b3f9344167,0x3ff0000000000000,2 +np.float64,0x3fe704f4ec6e09ea,0x3fe3ba7277201805,2 +np.float64,0xf864359df0c87,0xf864359df0c87,2 +np.float64,0x4d96b01c9b2d7,0x4d96b01c9b2d7,2 +np.float64,0x3fe8a09fe9f14140,0x3fe4b1c6a2d2e095,2 +np.float64,0xffc46c61b228d8c4,0xbff0000000000000,2 +np.float64,0x3fe680a837ed0150,0x3fe3679d6eeb6485,2 +np.float64,0xbfecedc20f39db84,0xbfe6fbe9ee978bf6,2 +np.float64,0x3fb2314eae24629d,0x3fb2297ba6d55d2d,2 +np.float64,0x3fe9f0b8e7b3e172,0x3fe57026eae36db3,2 +np.float64,0x80097a132ed2f427,0x80097a132ed2f427,2 +np.float64,0x800ae5a41955cb49,0x800ae5a41955cb49,2 +np.float64,0xbfd7527279aea4e4,0xbfd6577de356e1bd,2 +np.float64,0x3fe27d3e01e4fa7c,0x3fe0ac7dd96f9179,2 +np.float64,0x7fedd8cb01bbb195,0x3ff0000000000000,2 +np.float64,0x78f8695af1f0e,0x78f8695af1f0e,2 +np.float64,0x800d2d0e927a5a1d,0x800d2d0e927a5a1d,2 +np.float64,0xffe74b46fb2e968e,0xbff0000000000000,2 +np.float64,0xbfdd12d4c8ba25aa,0xbfdb39dae49e1c10,2 +np.float64,0xbfd6c14710ad828e,0xbfd5d79ef5a8d921,2 +np.float64,0x921f4e55243ea,0x921f4e55243ea,2 +np.float64,0x800b4e4c80969c99,0x800b4e4c80969c99,2 +np.float64,0x7fe08c6ab7e118d4,0x3ff0000000000000,2 +np.float64,0xbfed290014fa5200,0xbfe71871f7e859ed,2 +np.float64,0x8008c1d5c59183ac,0x8008c1d5c59183ac,2 +np.float64,0x3fd339e68c2673cd,0x3fd2aaff3f165a9d,2 +np.float64,0xbfdd20d8113a41b0,0xbfdb4553ea2cb2fb,2 +np.float64,0x3fe52a25deea544c,0x3fe2898d5bf4442c,2 +np.float64,0x498602d4930c1,0x498602d4930c1,2 +np.float64,0x3fd8c450113188a0,0x3fd799b0b2a6c43c,2 +np.float64,0xbfd72bc2f2ae5786,0xbfd6357e15ba7f70,2 +np.float64,0xbfd076188ea0ec32,0xbfd01b8fce44d1af,2 +np.float64,0x9aace1713559c,0x9aace1713559c,2 +np.float64,0x8008a730e8914e62,0x8008a730e8914e62,2 +np.float64,0x7fe9e9a3d833d347,0x3ff0000000000000,2 +np.float64,0x800d3a0d69da741b,0x800d3a0d69da741b,2 +np.float64,0xbfe3e28a29e7c514,0xbfe1aad7643a2d19,2 +np.float64,0x7fe9894c71331298,0x3ff0000000000000,2 +np.float64,0xbfe7c6acb5ef8d5a,0xbfe430c9e258ce62,2 +np.float64,0xffb5a520a62b4a40,0xbff0000000000000,2 +np.float64,0x7fc02109ae204212,0x3ff0000000000000,2 +np.float64,0xb5c58f196b8b2,0xb5c58f196b8b2,2 +np.float64,0x3feb4ee82e769dd0,0x3fe62bae9a39d8b1,2 +np.float64,0x3fec5c3cf278b87a,0x3fe6b49000f12441,2 +np.float64,0x81f64b8103eca,0x81f64b8103eca,2 +np.float64,0xbfeab00d73f5601b,0xbfe5d7f755ab73d9,2 +np.float64,0x3fd016bf28a02d7e,0x3fcf843ea23bcd3c,2 +np.float64,0xbfa1db617423b6c0,0xbfa1d9872ddeb5a8,2 +np.float64,0x3fe83c879d70790f,0x3fe4771502d8f012,2 +np.float64,0x6b267586d64cf,0x6b267586d64cf,2 +np.float64,0x3fc91b6d3f3236d8,0x3fc8ca3eb4da25a9,2 +np.float64,0x7fd4e3f8f3a9c7f1,0x3ff0000000000000,2 +np.float64,0x800a75899214eb14,0x800a75899214eb14,2 +np.float64,0x7fdb1f2e07b63e5b,0x3ff0000000000000,2 +np.float64,0xffe7805a11ef00b4,0xbff0000000000000,2 +np.float64,0x3fc8e1b88a31c371,0x3fc892af45330818,2 +np.float64,0xbfe809fe447013fc,0xbfe45918f07da4d9,2 +np.float64,0xbfeb9d7f2ab73afe,0xbfe65446bfddc792,2 +np.float64,0x3fb47f0a5c28fe15,0x3fb473db9113e880,2 +np.float64,0x800a17ae3cb42f5d,0x800a17ae3cb42f5d,2 +np.float64,0xf5540945eaa81,0xf5540945eaa81,2 +np.float64,0xbfe577fc26aaeff8,0xbfe2bcfbf2cf69ff,2 +np.float64,0xbfb99b3e06333680,0xbfb98577b88e0515,2 +np.float64,0x7fd9290391b25206,0x3ff0000000000000,2 +np.float64,0x7fe1aa62ffa354c5,0x3ff0000000000000,2 +np.float64,0x7b0189a0f604,0x7b0189a0f604,2 +np.float64,0x3f9000ed602001db,0x3f900097fe168105,2 +np.float64,0x3fd576128d2aec25,0x3fd4b1002c92286f,2 +np.float64,0xffecc98ece79931d,0xbff0000000000000,2 +np.float64,0x800a1736c7f42e6e,0x800a1736c7f42e6e,2 +np.float64,0xbfed947548bb28eb,0xbfe74b71479ae739,2 +np.float64,0xa45c032148b9,0xa45c032148b9,2 +np.float64,0xbfc13d011c227a04,0xbfc1228447de5e9f,2 +np.float64,0xffed8baa6ebb1754,0xbff0000000000000,2 +np.float64,0x800ea2de243d45bc,0x800ea2de243d45bc,2 +np.float64,0x8001396be52272d9,0x8001396be52272d9,2 +np.float64,0xd018d1cda031a,0xd018d1cda031a,2 +np.float64,0x7fe1fece1fe3fd9b,0x3ff0000000000000,2 +np.float64,0x8009ac484c135891,0x8009ac484c135891,2 +np.float64,0x3fc560ad132ac15a,0x3fc52e5a9479f08e,2 +np.float64,0x3fd6f80ebe2df01d,0x3fd607f70ce8e3f4,2 +np.float64,0xbfd3e69e82a7cd3e,0xbfd34887c2a40699,2 +np.float64,0x3fe232d9baa465b3,0x3fe0760a822ada0c,2 +np.float64,0x3fe769bbc6eed378,0x3fe3f872680f6631,2 +np.float64,0xffe63dbd952c7b7a,0xbff0000000000000,2 +np.float64,0x4e0c00da9c181,0x4e0c00da9c181,2 +np.float64,0xffeae4d89735c9b0,0xbff0000000000000,2 +np.float64,0x3fe030bcbb606179,0x3fdddfc66660bfce,2 +np.float64,0x7fe35ca40d66b947,0x3ff0000000000000,2 +np.float64,0xbfd45bd66628b7ac,0xbfd3b2e04bfe7866,2 +np.float64,0x3fd1f0be2323e17c,0x3fd17c1c340d7a48,2 +np.float64,0x3fd7123b6cae2478,0x3fd61f0675aa9ae1,2 +np.float64,0xbfe918a377723147,0xbfe4f6efe66f5714,2 +np.float64,0x7fc400356f28006a,0x3ff0000000000000,2 +np.float64,0x7fd2dead70a5bd5a,0x3ff0000000000000,2 +np.float64,0xffe9c28f81f3851e,0xbff0000000000000,2 +np.float64,0x3fd09b1ec7a1363e,0x3fd03e3894320140,2 +np.float64,0x7fe6e80c646dd018,0x3ff0000000000000,2 +np.float64,0x7fec3760a4786ec0,0x3ff0000000000000,2 +np.float64,0x309eb6ee613d8,0x309eb6ee613d8,2 +np.float64,0x800731cb0ece6397,0x800731cb0ece6397,2 +np.float64,0xbfdb0c553db618aa,0xbfd98b8a4680ee60,2 +np.float64,0x3fd603a52eac074c,0x3fd52f6b53de7455,2 +np.float64,0x9ecb821b3d971,0x9ecb821b3d971,2 +np.float64,0x3feb7d64dc36faca,0x3fe643c2754bb7f4,2 +np.float64,0xffeb94825ef72904,0xbff0000000000000,2 +np.float64,0x24267418484cf,0x24267418484cf,2 +np.float64,0xbfa6b2fbac2d65f0,0xbfa6af2dca5bfa6f,2 +np.float64,0x8010000000000000,0x8010000000000000,2 +np.float64,0xffe6873978ed0e72,0xbff0000000000000,2 +np.float64,0x800447934ba88f27,0x800447934ba88f27,2 +np.float64,0x3fef305f09fe60be,0x3fe806156b8ca47c,2 +np.float64,0xffd441c697a8838e,0xbff0000000000000,2 +np.float64,0xbfa7684f6c2ed0a0,0xbfa764238d34830c,2 +np.float64,0xffb2c976142592f0,0xbff0000000000000,2 +np.float64,0xbfcc9d1585393a2c,0xbfcc25756bcbca1f,2 +np.float64,0xbfd477bb1ba8ef76,0xbfd3cc1d2114e77e,2 +np.float64,0xbfed1559983a2ab3,0xbfe70f03afd994ee,2 +np.float64,0xbfeb51139036a227,0xbfe62ccf56bc7fff,2 +np.float64,0x7d802890fb006,0x7d802890fb006,2 +np.float64,0x800e00af777c015f,0x800e00af777c015f,2 +np.float64,0x800647ce128c8f9d,0x800647ce128c8f9d,2 +np.float64,0x800a26da91d44db6,0x800a26da91d44db6,2 +np.float64,0x3fdc727eddb8e4fe,0x3fdab5fd9db630b3,2 +np.float64,0x7fd06def2ba0dbdd,0x3ff0000000000000,2 +np.float64,0xffe23678c4a46cf1,0xbff0000000000000,2 +np.float64,0xbfe7198e42ee331c,0xbfe3c7326c9c7553,2 +np.float64,0xffae465f3c3c8cc0,0xbff0000000000000,2 +np.float64,0xff9aea7c5035d500,0xbff0000000000000,2 +np.float64,0xbfeae49c0f35c938,0xbfe5f3e9326cb08b,2 +np.float64,0x3f9a16f300342de6,0x3f9a1581212be50f,2 +np.float64,0x8d99e2c31b33d,0x8d99e2c31b33d,2 +np.float64,0xffd58af253ab15e4,0xbff0000000000000,2 +np.float64,0xbfd205cd25a40b9a,0xbfd18f97155f8b25,2 +np.float64,0xbfebe839bbf7d074,0xbfe67a6024e8fefe,2 +np.float64,0xbfe4fb3595a9f66b,0xbfe26a42f99819ea,2 +np.float64,0x800e867c739d0cf9,0x800e867c739d0cf9,2 +np.float64,0x8bc4274f17885,0x8bc4274f17885,2 +np.float64,0xaec8914b5d912,0xaec8914b5d912,2 +np.float64,0x7fd1d64473a3ac88,0x3ff0000000000000,2 +np.float64,0xbfe6d6f69cedaded,0xbfe39dd61bc7e23e,2 +np.float64,0x7fed05039d7a0a06,0x3ff0000000000000,2 +np.float64,0xbfc40eab0f281d58,0xbfc3e50d14b79265,2 +np.float64,0x45179aec8a2f4,0x45179aec8a2f4,2 +np.float64,0xbfe717e362ee2fc7,0xbfe3c62a95b07d13,2 +np.float64,0xbfe5b8df0d6b71be,0xbfe2e76c7ec5013d,2 +np.float64,0x5c67ba6eb8cf8,0x5c67ba6eb8cf8,2 +np.float64,0xbfda72ce4cb4e59c,0xbfd909fdc7ecfe20,2 +np.float64,0x7fdf59a1e2beb343,0x3ff0000000000000,2 +np.float64,0xc4f7897f89ef1,0xc4f7897f89ef1,2 +np.float64,0x8fcd0a351f9a2,0x8fcd0a351f9a2,2 +np.float64,0x3fb161761022c2ec,0x3fb15aa31c464de2,2 +np.float64,0x8008a985be71530c,0x8008a985be71530c,2 +np.float64,0x3fca4ddb5e349bb7,0x3fc9f0a3b60e49c6,2 +np.float64,0x7fcc10a2d9382145,0x3ff0000000000000,2 +np.float64,0x78902b3af1206,0x78902b3af1206,2 +np.float64,0x7fe1e2765f23c4ec,0x3ff0000000000000,2 +np.float64,0xc1d288cf83a51,0xc1d288cf83a51,2 +np.float64,0x7fe8af692bb15ed1,0x3ff0000000000000,2 +np.float64,0x80057d90fb8afb23,0x80057d90fb8afb23,2 +np.float64,0x3fdc136b8fb826d8,0x3fda6749582b2115,2 +np.float64,0x800ec8ea477d91d5,0x800ec8ea477d91d5,2 +np.float64,0x4c0f4796981ea,0x4c0f4796981ea,2 +np.float64,0xec34c4a5d8699,0xec34c4a5d8699,2 +np.float64,0x7fce343dfb3c687b,0x3ff0000000000000,2 +np.float64,0xbfc95a98a332b530,0xbfc90705b2cc2fec,2 +np.float64,0x800d118e1dba231c,0x800d118e1dba231c,2 +np.float64,0x3fd354f310a6a9e8,0x3fd2c3bb90054154,2 +np.float64,0xbfdac0d4fab581aa,0xbfd94bf37424928e,2 +np.float64,0x3fe7f5391fefea72,0x3fe44cb49d51985b,2 +np.float64,0xd4c3c329a9879,0xd4c3c329a9879,2 +np.float64,0x3fc53977692a72f0,0x3fc50835d85c9ed1,2 +np.float64,0xbfd6989538ad312a,0xbfd5b3a2c08511fe,2 +np.float64,0xbfe329f2906653e5,0xbfe128ec1525a1c0,2 +np.float64,0x7ff0000000000000,0x3ff0000000000000,2 +np.float64,0xbfea57c90974af92,0xbfe5a87b04aa3116,2 +np.float64,0x7fdfba94043f7527,0x3ff0000000000000,2 +np.float64,0x3feedabddafdb57c,0x3fe7e06c0661978d,2 +np.float64,0x4bd9f3b697b3f,0x4bd9f3b697b3f,2 +np.float64,0x3fdd15bbfc3a2b78,0x3fdb3c3b8d070f7e,2 +np.float64,0x3fbd89ccd23b13a0,0x3fbd686b825cff80,2 +np.float64,0x7ff4000000000000,0x7ffc000000000000,2 +np.float64,0x3f9baa8928375512,0x3f9ba8d01ddd5300,2 +np.float64,0x4a3ebdf2947d8,0x4a3ebdf2947d8,2 +np.float64,0x3fe698d5c06d31ac,0x3fe376dff48312c8,2 +np.float64,0xffd5323df12a647c,0xbff0000000000000,2 +np.float64,0xffea7f111174fe22,0xbff0000000000000,2 +np.float64,0x3feb4656a9b68cad,0x3fe627392eb2156f,2 +np.float64,0x7fc1260e9c224c1c,0x3ff0000000000000,2 +np.float64,0x80056e45e5eadc8d,0x80056e45e5eadc8d,2 +np.float64,0x7fd0958ef6a12b1d,0x3ff0000000000000,2 +np.float64,0x8001f85664e3f0ae,0x8001f85664e3f0ae,2 +np.float64,0x3fe553853beaa70a,0x3fe2a4f5e7c83558,2 +np.float64,0xbfeb33ce6276679d,0xbfe61d8ec9e5ff8c,2 +np.float64,0xbfd1b24e21a3649c,0xbfd14245df6065e9,2 +np.float64,0x3fe286fc40650df9,0x3fe0b395c8059429,2 +np.float64,0xffed378058fa6f00,0xbff0000000000000,2 +np.float64,0xbfd0c4a2d7a18946,0xbfd06509a434d6a0,2 +np.float64,0xbfea31d581f463ab,0xbfe593d976139f94,2 +np.float64,0xbfe0705c85e0e0b9,0xbfde42efa978eb0c,2 +np.float64,0xe4c4c339c9899,0xe4c4c339c9899,2 +np.float64,0x3fd68befa9ad17df,0x3fd5a870b3f1f83e,2 +np.float64,0x8000000000000001,0x8000000000000001,2 +np.float64,0x3fe294256965284b,0x3fe0bd271e22d86b,2 +np.float64,0x8005327a862a64f6,0x8005327a862a64f6,2 +np.float64,0xbfdb8155ce3702ac,0xbfd9ed9ef97920f8,2 +np.float64,0xbff0000000000000,0xbfe85efab514f394,2 +np.float64,0xffe66988f1ecd312,0xbff0000000000000,2 +np.float64,0x3fb178a85e22f150,0x3fb171b9fbf95f1d,2 +np.float64,0x7f829b900025371f,0x3ff0000000000000,2 +np.float64,0x8000000000000000,0x8000000000000000,2 +np.float64,0x8006cb77f60d96f1,0x8006cb77f60d96f1,2 +np.float64,0x3fe0c5d53aa18baa,0x3fdec7012ab92b42,2 +np.float64,0x77266426ee4cd,0x77266426ee4cd,2 +np.float64,0xbfec95f468392be9,0xbfe6d11428f60136,2 +np.float64,0x3fedbf532dfb7ea6,0x3fe75f8436dd1d58,2 +np.float64,0x8002fadd3f85f5bb,0x8002fadd3f85f5bb,2 +np.float64,0xbfefebaa8d3fd755,0xbfe8566c6aa90fba,2 +np.float64,0xffc7dd2b712fba58,0xbff0000000000000,2 +np.float64,0x7fe5d3a6e8aba74d,0x3ff0000000000000,2 +np.float64,0x2da061525b40d,0x2da061525b40d,2 +np.float64,0x7fcb9b9953373732,0x3ff0000000000000,2 +np.float64,0x2ca2f6fc59460,0x2ca2f6fc59460,2 +np.float64,0xffeb84b05af70960,0xbff0000000000000,2 +np.float64,0xffe551e86c6aa3d0,0xbff0000000000000,2 +np.float64,0xbfdb311311366226,0xbfd9aa6688faafb9,2 +np.float64,0xbfd4f3875629e70e,0xbfd43bcd73534c66,2 +np.float64,0x7fe95666f932accd,0x3ff0000000000000,2 +np.float64,0x3fc73dfb482e7bf7,0x3fc6fd70c20ebf60,2 +np.float64,0x800cd9e40939b3c8,0x800cd9e40939b3c8,2 +np.float64,0x3fb0c9fa422193f0,0x3fb0c3d38879a2ac,2 +np.float64,0xffd59a38372b3470,0xbff0000000000000,2 +np.float64,0x3fa8320ef4306420,0x3fa82d739e937d35,2 +np.float64,0x3fd517f16caa2fe4,0x3fd45c8de1e93b37,2 +np.float64,0xaed921655db24,0xaed921655db24,2 +np.float64,0x93478fb9268f2,0x93478fb9268f2,2 +np.float64,0x1615e28a2c2bd,0x1615e28a2c2bd,2 +np.float64,0xbfead23010f5a460,0xbfe5ea24d5d8f820,2 +np.float64,0x774a6070ee94d,0x774a6070ee94d,2 +np.float64,0x3fdf5874bd3eb0e9,0x3fdd0ef121dd915c,2 +np.float64,0x8004b25f53a964bf,0x8004b25f53a964bf,2 +np.float64,0xbfddacdd2ebb59ba,0xbfdbb78198fab36b,2 +np.float64,0x8008a3acf271475a,0x8008a3acf271475a,2 +np.float64,0xbfdb537c8736a6fa,0xbfd9c741038bb8f0,2 +np.float64,0xbfe56a133f6ad426,0xbfe2b3d5b8d259a1,2 +np.float64,0xffda1db531343b6a,0xbff0000000000000,2 +np.float64,0x3fcbe05f3a37c0be,0x3fcb71a54a64ddfb,2 +np.float64,0x7fe1ccaa7da39954,0x3ff0000000000000,2 +np.float64,0x3faeadd8343d5bb0,0x3faea475608860e6,2 +np.float64,0x3fe662ba1c2cc574,0x3fe354a6176e90df,2 +np.float64,0xffe4d49f4e69a93e,0xbff0000000000000,2 +np.float64,0xbfeadbc424f5b788,0xbfe5ef39dbe66343,2 +np.float64,0x99cf66f1339ed,0x99cf66f1339ed,2 +np.float64,0x33af77a2675f0,0x33af77a2675f0,2 +np.float64,0x7fec7b32ecf8f665,0x3ff0000000000000,2 +np.float64,0xffef3e44993e7c88,0xbff0000000000000,2 +np.float64,0xffe8f8ceac31f19c,0xbff0000000000000,2 +np.float64,0x7fe0d15b6da1a2b6,0x3ff0000000000000,2 +np.float64,0x4ba795c2974f3,0x4ba795c2974f3,2 +np.float64,0x3fe361aa37a6c354,0x3fe15079021d6b15,2 +np.float64,0xffe709714f6e12e2,0xbff0000000000000,2 +np.float64,0xffe7ea6a872fd4d4,0xbff0000000000000,2 +np.float64,0xffdb9441c8b72884,0xbff0000000000000,2 +np.float64,0xffd5e11ae9abc236,0xbff0000000000000,2 +np.float64,0xffe092a08b612540,0xbff0000000000000,2 +np.float64,0x3fe1f27e1ca3e4fc,0x3fe04685b5131207,2 +np.float64,0xbfe71ce1bdee39c4,0xbfe3c940809a7081,2 +np.float64,0xffe8c3aa68318754,0xbff0000000000000,2 +np.float64,0x800d4e2919da9c52,0x800d4e2919da9c52,2 +np.float64,0x7fe6c8bca76d9178,0x3ff0000000000000,2 +np.float64,0x7fced8751e3db0e9,0x3ff0000000000000,2 +np.float64,0xd61d0c8bac3a2,0xd61d0c8bac3a2,2 +np.float64,0x3fec57732938aee6,0x3fe6b22f15f38352,2 +np.float64,0xff9251cc7024a3a0,0xbff0000000000000,2 +np.float64,0xf4a68cb9e94d2,0xf4a68cb9e94d2,2 +np.float64,0x3feed76703bdaece,0x3fe7def0fc9a080c,2 +np.float64,0xbfe8971ff7712e40,0xbfe4ac3eb8ebff07,2 +np.float64,0x3fe4825f682904bf,0x3fe218c1952fe67d,2 +np.float64,0xbfd60f7698ac1eee,0xbfd539f0979b4b0c,2 +np.float64,0x3fcf0845993e1088,0x3fce7032f7180144,2 +np.float64,0x7fc83443f3306887,0x3ff0000000000000,2 +np.float64,0x3fe93123ae726247,0x3fe504e4fc437e89,2 +np.float64,0x3fbf9eb8363f3d70,0x3fbf75cdfa6828d5,2 +np.float64,0xbf8b45e5d0368bc0,0xbf8b457c29dfe1a9,2 +np.float64,0x8006c2853d0d850b,0x8006c2853d0d850b,2 +np.float64,0xffef26e25ffe4dc4,0xbff0000000000000,2 +np.float64,0x7fefffffffffffff,0x3ff0000000000000,2 +np.float64,0xbfde98f2c2bd31e6,0xbfdc761bfab1c4cb,2 +np.float64,0xffb725e6222e4bd0,0xbff0000000000000,2 +np.float64,0x800c63ead5d8c7d6,0x800c63ead5d8c7d6,2 +np.float64,0x3fea087e95f410fd,0x3fe57d3ab440706c,2 +np.float64,0xbfdf9f8a603f3f14,0xbfdd4742d77dfa57,2 +np.float64,0xfff0000000000000,0xbff0000000000000,2 +np.float64,0xbfcdc0841d3b8108,0xbfcd3a401debba9a,2 +np.float64,0x800f0c8f4f7e191f,0x800f0c8f4f7e191f,2 +np.float64,0x800ba6e75fd74dcf,0x800ba6e75fd74dcf,2 +np.float64,0x7fee4927e8bc924f,0x3ff0000000000000,2 +np.float64,0x3fadf141903be283,0x3fade8878d9d3551,2 +np.float64,0x3efb1a267df64,0x3efb1a267df64,2 +np.float64,0xffebf55f22b7eabe,0xbff0000000000000,2 +np.float64,0x7fbe8045663d008a,0x3ff0000000000000,2 +np.float64,0x3fefc0129f7f8026,0x3fe843f8b7d6cf38,2 +np.float64,0xbfe846b420f08d68,0xbfe47d1709e43937,2 +np.float64,0x7fe8e87043f1d0e0,0x3ff0000000000000,2 +np.float64,0x3fcfb718453f6e31,0x3fcf14ecee7b32b4,2 +np.float64,0x7fe4306b71a860d6,0x3ff0000000000000,2 +np.float64,0x7fee08459f7c108a,0x3ff0000000000000,2 +np.float64,0x3fed705165fae0a3,0x3fe73a66369c5700,2 +np.float64,0x7fd0e63f4da1cc7e,0x3ff0000000000000,2 +np.float64,0xffd1a40c2ea34818,0xbff0000000000000,2 +np.float64,0xbfa369795c26d2f0,0xbfa36718218d46b3,2 +np.float64,0xef70b9f5dee17,0xef70b9f5dee17,2 +np.float64,0x3fb50a0a6e2a1410,0x3fb4fdf27724560a,2 +np.float64,0x7fe30a0f6166141e,0x3ff0000000000000,2 +np.float64,0xbfd7b3ca7daf6794,0xbfd6accb81032b2d,2 +np.float64,0x3fc21dceb3243b9d,0x3fc1ff15d5d277a3,2 +np.float64,0x3fe483e445a907c9,0x3fe219ca0e269552,2 +np.float64,0x3fb2b1e2a22563c0,0x3fb2a96554900eaf,2 +np.float64,0x4b1ff6409641,0x4b1ff6409641,2 +np.float64,0xbfd92eabc9b25d58,0xbfd7f55d7776d64e,2 +np.float64,0x8003b8604c8770c1,0x8003b8604c8770c1,2 +np.float64,0x800d20a9df1a4154,0x800d20a9df1a4154,2 +np.float64,0xecf8a535d9f15,0xecf8a535d9f15,2 +np.float64,0x3fe92d15bab25a2b,0x3fe50296aa15ae85,2 +np.float64,0x800239c205a47385,0x800239c205a47385,2 +np.float64,0x3fc48664a9290cc8,0x3fc459d126320ef6,2 +np.float64,0x3fe7620625eec40c,0x3fe3f3bcbee3e8c6,2 +np.float64,0x3fd242ff4ca48600,0x3fd1c81ed7a971c8,2 +np.float64,0xbfe39bafcfa73760,0xbfe17959c7a279db,2 +np.float64,0x7fdcd2567239a4ac,0x3ff0000000000000,2 +np.float64,0x3fe5f2f292ebe5e6,0x3fe30d12f05e2752,2 +np.float64,0x7fda3819d1347033,0x3ff0000000000000,2 +np.float64,0xffca5b4d4334b69c,0xbff0000000000000,2 +np.float64,0xb8a2b7cd71457,0xb8a2b7cd71457,2 +np.float64,0x3fee689603fcd12c,0x3fe7ad4ace26d6dd,2 +np.float64,0x7fe26541a564ca82,0x3ff0000000000000,2 +np.float64,0x3fe6912ee66d225e,0x3fe3720d242c4d82,2 +np.float64,0xffe6580c75ecb018,0xbff0000000000000,2 +np.float64,0x7fe01a3370603466,0x3ff0000000000000,2 +np.float64,0xffe84e3f84b09c7e,0xbff0000000000000,2 +np.float64,0x3ff0000000000000,0x3fe85efab514f394,2 +np.float64,0x3fe214d4266429a8,0x3fe05fec03a3c247,2 +np.float64,0x3fd00aec5da015d8,0x3fcf6e070ad4ad62,2 +np.float64,0x800aac8631f5590d,0x800aac8631f5590d,2 +np.float64,0xbfe7c4f5f76f89ec,0xbfe42fc1c57b4a13,2 +np.float64,0xaf146c7d5e28e,0xaf146c7d5e28e,2 +np.float64,0xbfe57188b66ae312,0xbfe2b8be4615ef75,2 +np.float64,0xffef8cb8e1ff1971,0xbff0000000000000,2 +np.float64,0x8001daf8aa63b5f2,0x8001daf8aa63b5f2,2 +np.float64,0x3fdddcc339bbb986,0x3fdbde5f3783538b,2 +np.float64,0xdd8c92c3bb193,0xdd8c92c3bb193,2 +np.float64,0xbfe861a148f0c342,0xbfe48cf1d228a336,2 +np.float64,0xffe260a32e24c146,0xbff0000000000000,2 +np.float64,0x1f7474b43ee8f,0x1f7474b43ee8f,2 +np.float64,0x3fe81dbd89703b7c,0x3fe464d78df92b7b,2 +np.float64,0x7fed0101177a0201,0x3ff0000000000000,2 +np.float64,0x7fd8b419a8316832,0x3ff0000000000000,2 +np.float64,0x3fe93debccf27bd8,0x3fe50c27727917f0,2 +np.float64,0xe5ead05bcbd5a,0xe5ead05bcbd5a,2 +np.float64,0xbfebbbc4cff7778a,0xbfe663c4ca003bbf,2 +np.float64,0xbfea343eb474687e,0xbfe59529f73ea151,2 +np.float64,0x3fbe74a5963ce94b,0x3fbe50123ed05d8d,2 +np.float64,0x3fd31d3a5d263a75,0x3fd290c026cb38a5,2 +np.float64,0xbfd79908acaf3212,0xbfd695620e31c3c6,2 +np.float64,0xbfc26a350324d46c,0xbfc249f335f3e465,2 +np.float64,0xbfac38d5583871b0,0xbfac31866d12a45e,2 +np.float64,0x3fe40cea672819d5,0x3fe1c83754e72c92,2 +np.float64,0xbfa74770642e8ee0,0xbfa74355fcf67332,2 +np.float64,0x7fc60942d32c1285,0x3ff0000000000000,2 diff --git a/python/numpy/_core/tests/examples/cython/__pycache__/setup.cpython-312.pyc b/python/numpy/_core/tests/examples/cython/__pycache__/setup.cpython-312.pyc new file mode 100644 index 000000000..8d94c85f2 Binary files /dev/null and b/python/numpy/_core/tests/examples/cython/__pycache__/setup.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/examples/cython/checks.pyx b/python/numpy/_core/tests/examples/cython/checks.pyx new file mode 100644 index 000000000..57df05c1e --- /dev/null +++ b/python/numpy/_core/tests/examples/cython/checks.pyx @@ -0,0 +1,373 @@ +#cython: language_level=3 + +""" +Functions in this module give python-space wrappers for cython functions +exposed in numpy/__init__.pxd, so they can be tested in test_cython.py +""" +cimport numpy as cnp +cnp.import_array() + + +def is_td64(obj): + return cnp.is_timedelta64_object(obj) + + +def is_dt64(obj): + return cnp.is_datetime64_object(obj) + + +def get_dt64_value(obj): + return cnp.get_datetime64_value(obj) + + +def get_td64_value(obj): + return cnp.get_timedelta64_value(obj) + + +def get_dt64_unit(obj): + return cnp.get_datetime64_unit(obj) + + +def is_integer(obj): + return isinstance(obj, (cnp.integer, int)) + + +def get_datetime_iso_8601_strlen(): + return cnp.get_datetime_iso_8601_strlen(0, cnp.NPY_FR_ns) + + +def convert_datetime64_to_datetimestruct(): + cdef: + cnp.npy_datetimestruct dts + cnp.PyArray_DatetimeMetaData meta + cnp.int64_t value = 1647374515260292 + # i.e. (time.time() * 10**6) at 2022-03-15 20:01:55.260292 UTC + + meta.base = cnp.NPY_FR_us + meta.num = 1 + cnp.convert_datetime64_to_datetimestruct(&meta, value, &dts) + return dts + + +def make_iso_8601_datetime(dt: "datetime"): + cdef: + cnp.npy_datetimestruct dts + char result[36] # 36 corresponds to NPY_FR_s passed below + int local = 0 + int utc = 0 + int tzoffset = 0 + + dts.year = dt.year + dts.month = dt.month + dts.day = dt.day + dts.hour = dt.hour + dts.min = dt.minute + dts.sec = dt.second + dts.us = dt.microsecond + dts.ps = dts.as = 0 + + cnp.make_iso_8601_datetime( + &dts, + result, + sizeof(result), + local, + utc, + cnp.NPY_FR_s, + tzoffset, + cnp.NPY_NO_CASTING, + ) + return result + + +cdef cnp.broadcast multiiter_from_broadcast_obj(object bcast): + cdef dict iter_map = { + 1: cnp.PyArray_MultiIterNew1, + 2: cnp.PyArray_MultiIterNew2, + 3: cnp.PyArray_MultiIterNew3, + 4: cnp.PyArray_MultiIterNew4, + 5: cnp.PyArray_MultiIterNew5, + } + arrays = [x.base for x in bcast.iters] + cdef cnp.broadcast result = iter_map[len(arrays)](*arrays) + return result + + +def get_multiiter_size(bcast: "broadcast"): + cdef cnp.broadcast multi = multiiter_from_broadcast_obj(bcast) + return multi.size + + +def get_multiiter_number_of_dims(bcast: "broadcast"): + cdef cnp.broadcast multi = multiiter_from_broadcast_obj(bcast) + return multi.nd + + +def get_multiiter_current_index(bcast: "broadcast"): + cdef cnp.broadcast multi = multiiter_from_broadcast_obj(bcast) + return multi.index + + +def get_multiiter_num_of_iterators(bcast: "broadcast"): + cdef cnp.broadcast multi = multiiter_from_broadcast_obj(bcast) + return multi.numiter + + +def get_multiiter_shape(bcast: "broadcast"): + cdef cnp.broadcast multi = multiiter_from_broadcast_obj(bcast) + return tuple([multi.dimensions[i] for i in range(bcast.nd)]) + + +def get_multiiter_iters(bcast: "broadcast"): + cdef cnp.broadcast multi = multiiter_from_broadcast_obj(bcast) + return tuple([multi.iters[i] for i in range(bcast.numiter)]) + + +def get_default_integer(): + if cnp.NPY_DEFAULT_INT == cnp.NPY_LONG: + return cnp.dtype("long") + if cnp.NPY_DEFAULT_INT == cnp.NPY_INTP: + return cnp.dtype("intp") + return None + +def get_ravel_axis(): + return cnp.NPY_RAVEL_AXIS + + +def conv_intp(cnp.intp_t val): + return val + + +def get_dtype_flags(cnp.dtype dtype): + return dtype.flags + + +cdef cnp.NpyIter* npyiter_from_nditer_obj(object it): + """A function to create a NpyIter struct from a nditer object. + + This function is only meant for testing purposes and only extracts the + necessary info from nditer to test the functionality of NpyIter methods + """ + cdef: + cnp.NpyIter* cit + cnp.PyArray_Descr* op_dtypes[3] + cnp.npy_uint32 op_flags[3] + cnp.PyArrayObject* ops[3] + cnp.npy_uint32 flags = 0 + + if it.has_index: + flags |= cnp.NPY_ITER_C_INDEX + if it.has_delayed_bufalloc: + flags |= cnp.NPY_ITER_BUFFERED | cnp.NPY_ITER_DELAY_BUFALLOC + if it.has_multi_index: + flags |= cnp.NPY_ITER_MULTI_INDEX + + # one of READWRITE, READONLY and WRTIEONLY at the minimum must be specified for op_flags + for i in range(it.nop): + op_flags[i] = cnp.NPY_ITER_READONLY + + for i in range(it.nop): + op_dtypes[i] = cnp.PyArray_DESCR(it.operands[i]) + ops[i] = it.operands[i] + + cit = cnp.NpyIter_MultiNew(it.nop, &ops[0], flags, cnp.NPY_KEEPORDER, + cnp.NPY_NO_CASTING, &op_flags[0], + NULL) + return cit + + +def get_npyiter_size(it: "nditer"): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + result = cnp.NpyIter_GetIterSize(cit) + cnp.NpyIter_Deallocate(cit) + return result + + +def get_npyiter_ndim(it: "nditer"): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + result = cnp.NpyIter_GetNDim(cit) + cnp.NpyIter_Deallocate(cit) + return result + + +def get_npyiter_nop(it: "nditer"): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + result = cnp.NpyIter_GetNOp(cit) + cnp.NpyIter_Deallocate(cit) + return result + + +def get_npyiter_operands(it: "nditer"): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + try: + arr = cnp.NpyIter_GetOperandArray(cit) + return tuple([arr[i] for i in range(it.nop)]) + finally: + cnp.NpyIter_Deallocate(cit) + + +def get_npyiter_itviews(it: "nditer"): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + result = tuple([cnp.NpyIter_GetIterView(cit, i) for i in range(it.nop)]) + cnp.NpyIter_Deallocate(cit) + return result + + +def get_npyiter_dtypes(it: "nditer"): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + try: + arr = cnp.NpyIter_GetDescrArray(cit) + return tuple([arr[i] for i in range(it.nop)]) + finally: + cnp.NpyIter_Deallocate(cit) + + +def npyiter_has_delayed_bufalloc(it: "nditer"): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + result = cnp.NpyIter_HasDelayedBufAlloc(cit) + cnp.NpyIter_Deallocate(cit) + return result + + +def npyiter_has_index(it: "nditer"): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + result = cnp.NpyIter_HasIndex(cit) + cnp.NpyIter_Deallocate(cit) + return result + + +def npyiter_has_multi_index(it: "nditer"): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + result = cnp.NpyIter_HasMultiIndex(cit) + cnp.NpyIter_Deallocate(cit) + return result + + +def test_get_multi_index_iter_next(it: "nditer", cnp.ndarray[cnp.float64_t, ndim=2] arr): + cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it) + cdef cnp.NpyIter_GetMultiIndexFunc get_multi_index = \ + cnp.NpyIter_GetGetMultiIndex(cit, NULL) + cdef cnp.NpyIter_IterNextFunc iternext = \ + cnp.NpyIter_GetIterNext(cit, NULL) + return 1 + + +def npyiter_has_finished(it: "nditer"): + cdef cnp.NpyIter* cit + try: + cit = npyiter_from_nditer_obj(it) + cnp.NpyIter_GotoIterIndex(cit, it.index) + return not (cnp.NpyIter_GetIterIndex(cit) < cnp.NpyIter_GetIterSize(cit)) + finally: + cnp.NpyIter_Deallocate(cit) + +def compile_fillwithbyte(): + # Regression test for gh-25878, mostly checks it compiles. + cdef cnp.npy_intp dims[2] + dims = (1, 2) + pos = cnp.PyArray_ZEROS(2, dims, cnp.NPY_UINT8, 0) + cnp.PyArray_FILLWBYTE(pos, 1) + return pos + +def inc2_cfloat_struct(cnp.ndarray[cnp.cfloat_t] arr): + # This works since we compile in C mode, it will fail in cpp mode + arr[1].real += 1 + arr[1].imag += 1 + # This works in both modes + arr[1].real = arr[1].real + 1 + arr[1].imag = arr[1].imag + 1 + + +def npystring_pack(arr): + cdef char *string = "Hello world" + cdef size_t size = 11 + + allocator = cnp.NpyString_acquire_allocator( + cnp.PyArray_DESCR(arr) + ) + + # copy string->packed_string, the pointer to the underlying array buffer + ret = cnp.NpyString_pack( + allocator, cnp.PyArray_DATA(arr), string, size, + ) + + cnp.NpyString_release_allocator(allocator) + return ret + + +def npystring_load(arr): + allocator = cnp.NpyString_acquire_allocator( + cnp.PyArray_DESCR(arr) + ) + + cdef cnp.npy_static_string sdata + sdata.size = 0 + sdata.buf = NULL + + cdef cnp.npy_packed_static_string *packed_string = cnp.PyArray_DATA(arr) + cdef int is_null = cnp.NpyString_load(allocator, packed_string, &sdata) + cnp.NpyString_release_allocator(allocator) + if is_null == -1: + raise ValueError("String unpacking failed.") + elif is_null == 1: + # String in the array buffer is the null string + return "" + else: + # Cython syntax for copying a c string to python bytestring: + # slice the char * by the length of the string + return sdata.buf[:sdata.size].decode('utf-8') + + +def npystring_pack_multiple(arr1, arr2): + cdef cnp.npy_string_allocator *allocators[2] + cdef cnp.PyArray_Descr *descrs[2] + descrs[0] = cnp.PyArray_DESCR(arr1) + descrs[1] = cnp.PyArray_DESCR(arr2) + + cnp.NpyString_acquire_allocators(2, descrs, allocators) + + # Write into the first element of each array + cdef int ret1 = cnp.NpyString_pack( + allocators[0], cnp.PyArray_DATA(arr1), "Hello world", 11, + ) + cdef int ret2 = cnp.NpyString_pack( + allocators[1], cnp.PyArray_DATA(arr2), "test this", 9, + ) + + # Write a null string into the last element + cdef cnp.npy_intp elsize = cnp.PyArray_ITEMSIZE(arr1) + cdef int ret3 = cnp.NpyString_pack_null( + allocators[0], + (cnp.PyArray_DATA(arr1) + 2*elsize), + ) + + cnp.NpyString_release_allocators(2, allocators) + if ret1 == -1 or ret2 == -1 or ret3 == -1: + return -1 + + return 0 + + +def npystring_allocators_other_types(arr1, arr2): + cdef cnp.npy_string_allocator *allocators[2] + cdef cnp.PyArray_Descr *descrs[2] + descrs[0] = cnp.PyArray_DESCR(arr1) + descrs[1] = cnp.PyArray_DESCR(arr2) + + cnp.NpyString_acquire_allocators(2, descrs, allocators) + + # None of the dtypes here are StringDType, so every allocator + # should be NULL upon acquisition. + cdef int ret = 0 + for allocator in allocators: + if allocator != NULL: + ret = -1 + break + + cnp.NpyString_release_allocators(2, allocators) + return ret + + +def check_npy_uintp_type_enum(): + # Regression test for gh-27890: cnp.NPY_UINTP was not defined. + # Cython would fail to compile this before gh-27890 was fixed. + return cnp.NPY_UINTP > 0 diff --git a/python/numpy/_core/tests/examples/cython/meson.build b/python/numpy/_core/tests/examples/cython/meson.build new file mode 100644 index 000000000..8362c339a --- /dev/null +++ b/python/numpy/_core/tests/examples/cython/meson.build @@ -0,0 +1,43 @@ +project('checks', 'c', 'cython') + +py = import('python').find_installation(pure: false) + +cc = meson.get_compiler('c') +cy = meson.get_compiler('cython') + +# Keep synced with pyproject.toml +if not cy.version().version_compare('>=3.0.6') + error('tests requires Cython >= 3.0.6') +endif + +cython_args = [] +if cy.version().version_compare('>=3.1.0') + cython_args += ['-Xfreethreading_compatible=True'] +endif + +npy_include_path = run_command(py, [ + '-c', + 'import os; os.chdir(".."); import numpy; print(os.path.abspath(numpy.get_include()))' + ], check: true).stdout().strip() + +npy_path = run_command(py, [ + '-c', + 'import os; os.chdir(".."); import numpy; print(os.path.dirname(numpy.__file__).removesuffix("numpy"))' + ], check: true).stdout().strip() + +# TODO: This is a hack due to gh-25135, where cython may not find the right +# __init__.pyd file. +add_project_arguments('-I', npy_path, language : 'cython') + +py.extension_module( + 'checks', + 'checks.pyx', + install: false, + c_args: [ + '-DNPY_NO_DEPRECATED_API=0', # Cython still uses old NumPy C API + # Require 1.25+ to test datetime additions + '-DNPY_TARGET_VERSION=NPY_2_0_API_VERSION', + ], + include_directories: [npy_include_path], + cython_args: cython_args, +) diff --git a/python/numpy/_core/tests/examples/cython/setup.py b/python/numpy/_core/tests/examples/cython/setup.py new file mode 100644 index 000000000..eb57477fc --- /dev/null +++ b/python/numpy/_core/tests/examples/cython/setup.py @@ -0,0 +1,39 @@ +""" +Provide python-space access to the functions exposed in numpy/__init__.pxd +for testing. +""" + +import os +from distutils.core import setup + +import Cython +from Cython.Build import cythonize +from setuptools.extension import Extension + +import numpy as np +from numpy._utils import _pep440 + +macros = [ + ("NPY_NO_DEPRECATED_API", 0), + # Require 1.25+ to test datetime additions + ("NPY_TARGET_VERSION", "NPY_2_0_API_VERSION"), +] + +checks = Extension( + "checks", + sources=[os.path.join('.', "checks.pyx")], + include_dirs=[np.get_include()], + define_macros=macros, +) + +extensions = [checks] + +compiler_directives = {} +if _pep440.parse(Cython.__version__) >= _pep440.parse("3.1.0a0"): + compiler_directives['freethreading_compatible'] = True + +setup( + ext_modules=cythonize( + extensions, + compiler_directives=compiler_directives) +) diff --git a/python/numpy/_core/tests/examples/limited_api/__pycache__/setup.cpython-312.pyc b/python/numpy/_core/tests/examples/limited_api/__pycache__/setup.cpython-312.pyc new file mode 100644 index 000000000..7c515118b Binary files /dev/null and b/python/numpy/_core/tests/examples/limited_api/__pycache__/setup.cpython-312.pyc differ diff --git a/python/numpy/_core/tests/examples/limited_api/limited_api1.c b/python/numpy/_core/tests/examples/limited_api/limited_api1.c new file mode 100644 index 000000000..3dbf5698f --- /dev/null +++ b/python/numpy/_core/tests/examples/limited_api/limited_api1.c @@ -0,0 +1,17 @@ +#define Py_LIMITED_API 0x03060000 + +#include +#include +#include + +static PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "limited_api1" +}; + +PyMODINIT_FUNC PyInit_limited_api1(void) +{ + import_array(); + import_umath(); + return PyModule_Create(&moduledef); +} diff --git a/python/numpy/_core/tests/examples/limited_api/limited_api2.pyx b/python/numpy/_core/tests/examples/limited_api/limited_api2.pyx new file mode 100644 index 000000000..327d5b038 --- /dev/null +++ b/python/numpy/_core/tests/examples/limited_api/limited_api2.pyx @@ -0,0 +1,11 @@ +#cython: language_level=3 + +""" +Make sure cython can compile in limited API mode (see meson.build) +""" + +cdef extern from "numpy/arrayobject.h": + pass +cdef extern from "numpy/arrayscalars.h": + pass + diff --git a/python/numpy/_core/tests/examples/limited_api/limited_api_latest.c b/python/numpy/_core/tests/examples/limited_api/limited_api_latest.c new file mode 100644 index 000000000..13668f2f0 --- /dev/null +++ b/python/numpy/_core/tests/examples/limited_api/limited_api_latest.c @@ -0,0 +1,19 @@ +#if Py_LIMITED_API != PY_VERSION_HEX & 0xffff0000 + # error "Py_LIMITED_API not defined to Python major+minor version" +#endif + +#include +#include +#include + +static PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "limited_api_latest" +}; + +PyMODINIT_FUNC PyInit_limited_api_latest(void) +{ + import_array(); + import_umath(); + return PyModule_Create(&moduledef); +} diff --git a/python/numpy/_core/tests/examples/limited_api/meson.build b/python/numpy/_core/tests/examples/limited_api/meson.build new file mode 100644 index 000000000..65287d865 --- /dev/null +++ b/python/numpy/_core/tests/examples/limited_api/meson.build @@ -0,0 +1,59 @@ +project('checks', 'c', 'cython') + +py = import('python').find_installation(pure: false) + +cc = meson.get_compiler('c') +cy = meson.get_compiler('cython') + +# Keep synced with pyproject.toml +if not cy.version().version_compare('>=3.0.6') + error('tests requires Cython >= 3.0.6') +endif + +npy_include_path = run_command(py, [ + '-c', + 'import os; os.chdir(".."); import numpy; print(os.path.abspath(numpy.get_include()))' + ], check: true).stdout().strip() + +npy_path = run_command(py, [ + '-c', + 'import os; os.chdir(".."); import numpy; print(os.path.dirname(numpy.__file__).removesuffix("numpy"))' + ], check: true).stdout().strip() + +# TODO: This is a hack due to https://github.com/cython/cython/issues/5820, +# where cython may not find the right __init__.pyd file. +add_project_arguments('-I', npy_path, language : 'cython') + +py.extension_module( + 'limited_api1', + 'limited_api1.c', + c_args: [ + '-DNPY_NO_DEPRECATED_API=NPY_1_21_API_VERSION', + ], + include_directories: [npy_include_path], + limited_api: '3.6', +) + +py.extension_module( + 'limited_api_latest', + 'limited_api_latest.c', + c_args: [ + '-DNPY_NO_DEPRECATED_API=NPY_1_21_API_VERSION', + ], + include_directories: [npy_include_path], + limited_api: py.language_version(), +) + +py.extension_module( + 'limited_api2', + 'limited_api2.pyx', + install: false, + c_args: [ + '-DNPY_NO_DEPRECATED_API=0', + # Require 1.25+ to test datetime additions + '-DNPY_TARGET_VERSION=NPY_2_0_API_VERSION', + '-DCYTHON_LIMITED_API=1', + ], + include_directories: [npy_include_path], + limited_api: '3.7', +) diff --git a/python/numpy/_core/tests/examples/limited_api/setup.py b/python/numpy/_core/tests/examples/limited_api/setup.py new file mode 100644 index 000000000..16adcd123 --- /dev/null +++ b/python/numpy/_core/tests/examples/limited_api/setup.py @@ -0,0 +1,24 @@ +""" +Build an example package using the limited Python C API. +""" + +import os + +from setuptools import Extension, setup + +import numpy as np + +macros = [("NPY_NO_DEPRECATED_API", 0), ("Py_LIMITED_API", "0x03060000")] + +limited_api = Extension( + "limited_api", + sources=[os.path.join('.', "limited_api.c")], + include_dirs=[np.get_include()], + define_macros=macros, +) + +extensions = [limited_api] + +setup( + ext_modules=extensions +) diff --git a/python/numpy/_core/tests/test__exceptions.py b/python/numpy/_core/tests/test__exceptions.py new file mode 100644 index 000000000..35782e7a5 --- /dev/null +++ b/python/numpy/_core/tests/test__exceptions.py @@ -0,0 +1,90 @@ +""" +Tests of the ._exceptions module. Primarily for exercising the __str__ methods. +""" + +import pickle + +import pytest + +import numpy as np +from numpy.exceptions import AxisError + +_ArrayMemoryError = np._core._exceptions._ArrayMemoryError +_UFuncNoLoopError = np._core._exceptions._UFuncNoLoopError + +class TestArrayMemoryError: + def test_pickling(self): + """ Test that _ArrayMemoryError can be pickled """ + error = _ArrayMemoryError((1023,), np.dtype(np.uint8)) + res = pickle.loads(pickle.dumps(error)) + assert res._total_size == error._total_size + + def test_str(self): + e = _ArrayMemoryError((1023,), np.dtype(np.uint8)) + str(e) # not crashing is enough + + # testing these properties is easier than testing the full string repr + def test__size_to_string(self): + """ Test e._size_to_string """ + f = _ArrayMemoryError._size_to_string + Ki = 1024 + assert f(0) == '0 bytes' + assert f(1) == '1 bytes' + assert f(1023) == '1023 bytes' + assert f(Ki) == '1.00 KiB' + assert f(Ki + 1) == '1.00 KiB' + assert f(10 * Ki) == '10.0 KiB' + assert f(int(999.4 * Ki)) == '999. KiB' + assert f(int(1023.4 * Ki)) == '1023. KiB' + assert f(int(1023.5 * Ki)) == '1.00 MiB' + assert f(Ki * Ki) == '1.00 MiB' + + # 1023.9999 Mib should round to 1 GiB + assert f(int(Ki * Ki * Ki * 0.9999)) == '1.00 GiB' + assert f(Ki * Ki * Ki * Ki * Ki * Ki) == '1.00 EiB' + # larger than sys.maxsize, adding larger prefixes isn't going to help + # anyway. + assert f(Ki * Ki * Ki * Ki * Ki * Ki * 123456) == '123456. EiB' + + def test__total_size(self): + """ Test e._total_size """ + e = _ArrayMemoryError((1,), np.dtype(np.uint8)) + assert e._total_size == 1 + + e = _ArrayMemoryError((2, 4), np.dtype((np.uint64, 16))) + assert e._total_size == 1024 + + +class TestUFuncNoLoopError: + def test_pickling(self): + """ Test that _UFuncNoLoopError can be pickled """ + assert isinstance(pickle.dumps(_UFuncNoLoopError), bytes) + + +@pytest.mark.parametrize("args", [ + (2, 1, None), + (2, 1, "test_prefix"), + ("test message",), +]) +class TestAxisError: + def test_attr(self, args): + """Validate attribute types.""" + exc = AxisError(*args) + if len(args) == 1: + assert exc.axis is None + assert exc.ndim is None + else: + axis, ndim, *_ = args + assert exc.axis == axis + assert exc.ndim == ndim + + def test_pickling(self, args): + """Test that `AxisError` can be pickled.""" + exc = AxisError(*args) + exc2 = pickle.loads(pickle.dumps(exc)) + + assert type(exc) is type(exc2) + for name in ("axis", "ndim", "args"): + attr1 = getattr(exc, name) + attr2 = getattr(exc2, name) + assert attr1 == attr2, name diff --git a/python/numpy/_core/tests/test_abc.py b/python/numpy/_core/tests/test_abc.py new file mode 100644 index 000000000..aee1904f1 --- /dev/null +++ b/python/numpy/_core/tests/test_abc.py @@ -0,0 +1,54 @@ +import numbers + +import numpy as np +from numpy._core.numerictypes import sctypes +from numpy.testing import assert_ + + +class TestABC: + def test_abstract(self): + assert_(issubclass(np.number, numbers.Number)) + + assert_(issubclass(np.inexact, numbers.Complex)) + assert_(issubclass(np.complexfloating, numbers.Complex)) + assert_(issubclass(np.floating, numbers.Real)) + + assert_(issubclass(np.integer, numbers.Integral)) + assert_(issubclass(np.signedinteger, numbers.Integral)) + assert_(issubclass(np.unsignedinteger, numbers.Integral)) + + def test_floats(self): + for t in sctypes['float']: + assert_(isinstance(t(), numbers.Real), + f"{t.__name__} is not instance of Real") + assert_(issubclass(t, numbers.Real), + f"{t.__name__} is not subclass of Real") + assert_(not isinstance(t(), numbers.Rational), + f"{t.__name__} is instance of Rational") + assert_(not issubclass(t, numbers.Rational), + f"{t.__name__} is subclass of Rational") + + def test_complex(self): + for t in sctypes['complex']: + assert_(isinstance(t(), numbers.Complex), + f"{t.__name__} is not instance of Complex") + assert_(issubclass(t, numbers.Complex), + f"{t.__name__} is not subclass of Complex") + assert_(not isinstance(t(), numbers.Real), + f"{t.__name__} is instance of Real") + assert_(not issubclass(t, numbers.Real), + f"{t.__name__} is subclass of Real") + + def test_int(self): + for t in sctypes['int']: + assert_(isinstance(t(), numbers.Integral), + f"{t.__name__} is not instance of Integral") + assert_(issubclass(t, numbers.Integral), + f"{t.__name__} is not subclass of Integral") + + def test_uint(self): + for t in sctypes['uint']: + assert_(isinstance(t(), numbers.Integral), + f"{t.__name__} is not instance of Integral") + assert_(issubclass(t, numbers.Integral), + f"{t.__name__} is not subclass of Integral") diff --git a/python/numpy/_core/tests/test_api.py b/python/numpy/_core/tests/test_api.py new file mode 100644 index 000000000..5bc88e93d --- /dev/null +++ b/python/numpy/_core/tests/test_api.py @@ -0,0 +1,654 @@ +import sys + +import pytest +from numpy._core._rational_tests import rational + +import numpy as np +import numpy._core.umath as ncu +from numpy.testing import ( + HAS_REFCOUNT, + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_warns, +) + + +def test_array_array(): + tobj = type(object) + ones11 = np.ones((1, 1), np.float64) + tndarray = type(ones11) + # Test is_ndarray + assert_equal(np.array(ones11, dtype=np.float64), ones11) + if HAS_REFCOUNT: + old_refcount = sys.getrefcount(tndarray) + np.array(ones11) + assert_equal(old_refcount, sys.getrefcount(tndarray)) + + # test None + assert_equal(np.array(None, dtype=np.float64), + np.array(np.nan, dtype=np.float64)) + if HAS_REFCOUNT: + old_refcount = sys.getrefcount(tobj) + np.array(None, dtype=np.float64) + assert_equal(old_refcount, sys.getrefcount(tobj)) + + # test scalar + assert_equal(np.array(1.0, dtype=np.float64), + np.ones((), dtype=np.float64)) + if HAS_REFCOUNT: + old_refcount = sys.getrefcount(np.float64) + np.array(np.array(1.0, dtype=np.float64), dtype=np.float64) + assert_equal(old_refcount, sys.getrefcount(np.float64)) + + # test string + S2 = np.dtype((bytes, 2)) + S3 = np.dtype((bytes, 3)) + S5 = np.dtype((bytes, 5)) + assert_equal(np.array(b"1.0", dtype=np.float64), + np.ones((), dtype=np.float64)) + assert_equal(np.array(b"1.0").dtype, S3) + assert_equal(np.array(b"1.0", dtype=bytes).dtype, S3) + assert_equal(np.array(b"1.0", dtype=S2), np.array(b"1.")) + assert_equal(np.array(b"1", dtype=S5), np.ones((), dtype=S5)) + + # test string + U2 = np.dtype((str, 2)) + U3 = np.dtype((str, 3)) + U5 = np.dtype((str, 5)) + assert_equal(np.array("1.0", dtype=np.float64), + np.ones((), dtype=np.float64)) + assert_equal(np.array("1.0").dtype, U3) + assert_equal(np.array("1.0", dtype=str).dtype, U3) + assert_equal(np.array("1.0", dtype=U2), np.array("1.")) + assert_equal(np.array("1", dtype=U5), np.ones((), dtype=U5)) + + builtins = getattr(__builtins__, '__dict__', __builtins__) + assert_(hasattr(builtins, 'get')) + + # test memoryview + dat = np.array(memoryview(b'1.0'), dtype=np.float64) + assert_equal(dat, [49.0, 46.0, 48.0]) + assert_(dat.dtype.type is np.float64) + + dat = np.array(memoryview(b'1.0')) + assert_equal(dat, [49, 46, 48]) + assert_(dat.dtype.type is np.uint8) + + # test array interface + a = np.array(100.0, dtype=np.float64) + o = type("o", (object,), + {"__array_interface__": a.__array_interface__}) + assert_equal(np.array(o, dtype=np.float64), a) + + # test array_struct interface + a = np.array([(1, 4.0, 'Hello'), (2, 6.0, 'World')], + dtype=[('f0', int), ('f1', float), ('f2', str)]) + o = type("o", (object,), + {"__array_struct__": a.__array_struct__}) + # wasn't what I expected... is np.array(o) supposed to equal a ? + # instead we get a array([...], dtype=">V18") + assert_equal(bytes(np.array(o).data), bytes(a.data)) + + # test __array__ + def custom__array__(self, dtype=None, copy=None): + return np.array(100.0, dtype=dtype, copy=copy) + + o = type("o", (object,), {"__array__": custom__array__})() + assert_equal(np.array(o, dtype=np.float64), np.array(100.0, np.float64)) + + # test recursion + nested = 1.5 + for i in range(ncu.MAXDIMS): + nested = [nested] + + # no error + np.array(nested) + + # Exceeds recursion limit + assert_raises(ValueError, np.array, [nested], dtype=np.float64) + + # Try with lists... + # float32 + assert_equal(np.array([None] * 10, dtype=np.float32), + np.full((10,), np.nan, dtype=np.float32)) + assert_equal(np.array([[None]] * 10, dtype=np.float32), + np.full((10, 1), np.nan, dtype=np.float32)) + assert_equal(np.array([[None] * 10], dtype=np.float32), + np.full((1, 10), np.nan, dtype=np.float32)) + assert_equal(np.array([[None] * 10] * 10, dtype=np.float32), + np.full((10, 10), np.nan, dtype=np.float32)) + # float64 + assert_equal(np.array([None] * 10, dtype=np.float64), + np.full((10,), np.nan, dtype=np.float64)) + assert_equal(np.array([[None]] * 10, dtype=np.float64), + np.full((10, 1), np.nan, dtype=np.float64)) + assert_equal(np.array([[None] * 10], dtype=np.float64), + np.full((1, 10), np.nan, dtype=np.float64)) + assert_equal(np.array([[None] * 10] * 10, dtype=np.float64), + np.full((10, 10), np.nan, dtype=np.float64)) + + assert_equal(np.array([1.0] * 10, dtype=np.float64), + np.ones((10,), dtype=np.float64)) + assert_equal(np.array([[1.0]] * 10, dtype=np.float64), + np.ones((10, 1), dtype=np.float64)) + assert_equal(np.array([[1.0] * 10], dtype=np.float64), + np.ones((1, 10), dtype=np.float64)) + assert_equal(np.array([[1.0] * 10] * 10, dtype=np.float64), + np.ones((10, 10), dtype=np.float64)) + + # Try with tuples + assert_equal(np.array((None,) * 10, dtype=np.float64), + np.full((10,), np.nan, dtype=np.float64)) + assert_equal(np.array([(None,)] * 10, dtype=np.float64), + np.full((10, 1), np.nan, dtype=np.float64)) + assert_equal(np.array([(None,) * 10], dtype=np.float64), + np.full((1, 10), np.nan, dtype=np.float64)) + assert_equal(np.array([(None,) * 10] * 10, dtype=np.float64), + np.full((10, 10), np.nan, dtype=np.float64)) + + assert_equal(np.array((1.0,) * 10, dtype=np.float64), + np.ones((10,), dtype=np.float64)) + assert_equal(np.array([(1.0,)] * 10, dtype=np.float64), + np.ones((10, 1), dtype=np.float64)) + assert_equal(np.array([(1.0,) * 10], dtype=np.float64), + np.ones((1, 10), dtype=np.float64)) + assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64), + np.ones((10, 10), dtype=np.float64)) + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test___array___refcount(): + class MyArray: + def __init__(self, dtype): + self.val = np.array(-1, dtype=dtype) + + def __array__(self, dtype=None, copy=None): + return self.val.__array__(dtype=dtype, copy=copy) + + # test all possible scenarios: + # dtype(none | same | different) x copy(true | false | none) + dt = np.dtype(np.int32) + old_refcount = sys.getrefcount(dt) + np.array(MyArray(dt)) + assert_equal(old_refcount, sys.getrefcount(dt)) + np.array(MyArray(dt), dtype=dt) + assert_equal(old_refcount, sys.getrefcount(dt)) + np.array(MyArray(dt), copy=None) + assert_equal(old_refcount, sys.getrefcount(dt)) + np.array(MyArray(dt), dtype=dt, copy=None) + assert_equal(old_refcount, sys.getrefcount(dt)) + dt2 = np.dtype(np.int16) + old_refcount2 = sys.getrefcount(dt2) + np.array(MyArray(dt), dtype=dt2) + assert_equal(old_refcount2, sys.getrefcount(dt2)) + np.array(MyArray(dt), dtype=dt2, copy=None) + assert_equal(old_refcount2, sys.getrefcount(dt2)) + with pytest.raises(ValueError): + np.array(MyArray(dt), dtype=dt2, copy=False) + assert_equal(old_refcount2, sys.getrefcount(dt2)) + + +@pytest.mark.parametrize("array", [True, False]) +def test_array_impossible_casts(array): + # All builtin types can be forcibly cast, at least theoretically, + # but user dtypes cannot necessarily. + rt = rational(1, 2) + if array: + rt = np.array(rt) + with assert_raises(TypeError): + np.array(rt, dtype="M8") + + +def test_array_astype(): + a = np.arange(6, dtype='f4').reshape(2, 3) + # Default behavior: allows unsafe casts, keeps memory layout, + # always copies. + b = a.astype('i4') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('i4')) + assert_equal(a.strides, b.strides) + b = a.T.astype('i4') + assert_equal(a.T, b) + assert_equal(b.dtype, np.dtype('i4')) + assert_equal(a.T.strides, b.strides) + b = a.astype('f4') + assert_equal(a, b) + assert_(not (a is b)) + + # copy=False parameter skips a copy + b = a.astype('f4', copy=False) + assert_(a is b) + + # order parameter allows overriding of the memory layout, + # forcing a copy if the layout is wrong + b = a.astype('f4', order='F', copy=False) + assert_equal(a, b) + assert_(not (a is b)) + assert_(b.flags.f_contiguous) + + b = a.astype('f4', order='C', copy=False) + assert_equal(a, b) + assert_(a is b) + assert_(b.flags.c_contiguous) + + # casting parameter allows catching bad casts + b = a.astype('c8', casting='safe') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('c8')) + + assert_raises(TypeError, a.astype, 'i4', casting='safe') + + # subok=False passes through a non-subclassed array + b = a.astype('f4', subok=0, copy=False) + assert_(a is b) + + class MyNDArray(np.ndarray): + pass + + a = np.array([[0, 1, 2], [3, 4, 5]], dtype='f4').view(MyNDArray) + + # subok=True passes through a subclass + b = a.astype('f4', subok=True, copy=False) + assert_(a is b) + + # subok=True is default, and creates a subtype on a cast + b = a.astype('i4', copy=False) + assert_equal(a, b) + assert_equal(type(b), MyNDArray) + + # subok=False never returns a subclass + b = a.astype('f4', subok=False, copy=False) + assert_equal(a, b) + assert_(not (a is b)) + assert_(type(b) is not MyNDArray) + + # Make sure converting from string object to fixed length string + # does not truncate. + a = np.array([b'a' * 100], dtype='O') + b = a.astype('S') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('S100')) + a = np.array(['a' * 100], dtype='O') + b = a.astype('U') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('U100')) + + # Same test as above but for strings shorter than 64 characters + a = np.array([b'a' * 10], dtype='O') + b = a.astype('S') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('S10')) + a = np.array(['a' * 10], dtype='O') + b = a.astype('U') + assert_equal(a, b) + assert_equal(b.dtype, np.dtype('U10')) + + a = np.array(123456789012345678901234567890, dtype='O').astype('S') + assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) + a = np.array(123456789012345678901234567890, dtype='O').astype('U') + assert_array_equal(a, np.array('1234567890' * 3, dtype='U30')) + + a = np.array([123456789012345678901234567890], dtype='O').astype('S') + assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) + a = np.array([123456789012345678901234567890], dtype='O').astype('U') + assert_array_equal(a, np.array('1234567890' * 3, dtype='U30')) + + a = np.array(123456789012345678901234567890, dtype='S') + assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30')) + a = np.array(123456789012345678901234567890, dtype='U') + assert_array_equal(a, np.array('1234567890' * 3, dtype='U30')) + + a = np.array('a\u0140', dtype='U') + b = np.ndarray(buffer=a, dtype='uint32', shape=2) + assert_(b.size == 2) + + a = np.array([1000], dtype='i4') + assert_raises(TypeError, a.astype, 'S1', casting='safe') + + a = np.array(1000, dtype='i4') + assert_raises(TypeError, a.astype, 'U1', casting='safe') + + # gh-24023 + assert_raises(TypeError, a.astype) + +@pytest.mark.parametrize("dt", ["S", "U"]) +def test_array_astype_to_string_discovery_empty(dt): + # See also gh-19085 + arr = np.array([""], dtype=object) + # Note, the itemsize is the `0 -> 1` logic, which should change. + # The important part the test is rather that it does not error. + assert arr.astype(dt).dtype.itemsize == np.dtype(f"{dt}1").itemsize + + # check the same thing for `np.can_cast` (since it accepts arrays) + assert np.can_cast(arr, dt, casting="unsafe") + assert not np.can_cast(arr, dt, casting="same_kind") + # as well as for the object as a descriptor: + assert np.can_cast("O", dt, casting="unsafe") + +@pytest.mark.parametrize("dt", ["d", "f", "S13", "U32"]) +def test_array_astype_to_void(dt): + dt = np.dtype(dt) + arr = np.array([], dtype=dt) + assert arr.astype("V").dtype.itemsize == dt.itemsize + +def test_object_array_astype_to_void(): + # This is different to `test_array_astype_to_void` as object arrays + # are inspected. The default void is "V8" (8 is the length of double) + arr = np.array([], dtype="O").astype("V") + assert arr.dtype == "V8" + +@pytest.mark.parametrize("t", + np._core.sctypes['uint'] + + np._core.sctypes['int'] + + np._core.sctypes['float'] +) +def test_array_astype_warning(t): + # test ComplexWarning when casting from complex to float or int + a = np.array(10, dtype=np.complex128) + assert_warns(np.exceptions.ComplexWarning, a.astype, t) + +@pytest.mark.parametrize(["dtype", "out_dtype"], + [(np.bytes_, np.bool), + (np.str_, np.bool), + (np.dtype("S10,S9"), np.dtype("?,?")), + # The following also checks unaligned unicode access: + (np.dtype("S7,U9"), np.dtype("?,?"))]) +def test_string_to_boolean_cast(dtype, out_dtype): + # Only the last two (empty) strings are falsy (the `\0` is stripped): + arr = np.array( + ["10", "10\0\0\0", "0\0\0", "0", "False", " ", "", "\0"], + dtype=dtype) + expected = np.array( + [True, True, True, True, True, True, False, False], + dtype=out_dtype) + assert_array_equal(arr.astype(out_dtype), expected) + # As it's similar, check that nonzero behaves the same (structs are + # nonzero if all entries are) + assert_array_equal(np.nonzero(arr), np.nonzero(expected)) + +@pytest.mark.parametrize("str_type", [str, bytes, np.str_]) +@pytest.mark.parametrize("scalar_type", + [np.complex64, np.complex128, np.clongdouble]) +def test_string_to_complex_cast(str_type, scalar_type): + value = scalar_type(b"1+3j") + assert scalar_type(value) == 1 + 3j + assert np.array([value], dtype=object).astype(scalar_type)[()] == 1 + 3j + assert np.array(value).astype(scalar_type)[()] == 1 + 3j + arr = np.zeros(1, dtype=scalar_type) + arr[0] = value + assert arr[0] == 1 + 3j + +@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) +def test_none_to_nan_cast(dtype): + # Note that at the time of writing this test, the scalar constructors + # reject None + arr = np.zeros(1, dtype=dtype) + arr[0] = None + assert np.isnan(arr)[0] + assert np.isnan(np.array(None, dtype=dtype))[()] + assert np.isnan(np.array([None], dtype=dtype))[0] + assert np.isnan(np.array(None).astype(dtype))[()] + +def test_copyto_fromscalar(): + a = np.arange(6, dtype='f4').reshape(2, 3) + + # Simple copy + np.copyto(a, 1.5) + assert_equal(a, 1.5) + np.copyto(a.T, 2.5) + assert_equal(a, 2.5) + + # Where-masked copy + mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?') + np.copyto(a, 3.5, where=mask) + assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]]) + mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?') + np.copyto(a.T, 4.5, where=mask) + assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]]) + +def test_copyto(): + a = np.arange(6, dtype='i4').reshape(2, 3) + + # Simple copy + np.copyto(a, [[3, 1, 5], [6, 2, 1]]) + assert_equal(a, [[3, 1, 5], [6, 2, 1]]) + + # Overlapping copy should work + np.copyto(a[:, :2], a[::-1, 1::-1]) + assert_equal(a, [[2, 6, 5], [1, 3, 1]]) + + # Defaults to 'same_kind' casting + assert_raises(TypeError, np.copyto, a, 1.5) + + # Force a copy with 'unsafe' casting, truncating 1.5 to 1 + np.copyto(a, 1.5, casting='unsafe') + assert_equal(a, 1) + + # Copying with a mask + np.copyto(a, 3, where=[True, False, True]) + assert_equal(a, [[3, 1, 3], [3, 1, 3]]) + + # Casting rule still applies with a mask + assert_raises(TypeError, np.copyto, a, 3.5, where=[True, False, True]) + + # Lists of integer 0's and 1's is ok too + np.copyto(a, 4.0, casting='unsafe', where=[[0, 1, 1], [1, 0, 0]]) + assert_equal(a, [[3, 4, 4], [4, 1, 3]]) + + # Overlapping copy with mask should work + np.copyto(a[:, :2], a[::-1, 1::-1], where=[[0, 1], [1, 1]]) + assert_equal(a, [[3, 4, 4], [4, 3, 3]]) + + # 'dst' must be an array + assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4]) + + +def test_copyto_cast_safety(): + with pytest.raises(TypeError): + np.copyto(np.arange(3), 3., casting="safe") + + # Can put integer and float scalars safely (and equiv): + np.copyto(np.arange(3), 3, casting="equiv") + np.copyto(np.arange(3.), 3., casting="equiv") + # And also with less precision safely: + np.copyto(np.arange(3, dtype="uint8"), 3, casting="safe") + np.copyto(np.arange(3., dtype="float32"), 3., casting="safe") + + # But not equiv: + with pytest.raises(TypeError): + np.copyto(np.arange(3, dtype="uint8"), 3, casting="equiv") + + with pytest.raises(TypeError): + np.copyto(np.arange(3., dtype="float32"), 3., casting="equiv") + + # As a special thing, object is equiv currently: + np.copyto(np.arange(3, dtype=object), 3, casting="equiv") + + # The following raises an overflow error/gives a warning but not + # type error (due to casting), though: + with pytest.raises(OverflowError): + np.copyto(np.arange(3), 2**80, casting="safe") + + with pytest.warns(RuntimeWarning): + np.copyto(np.arange(3, dtype=np.float32), 2e300, casting="safe") + + +def test_copyto_permut(): + # test explicit overflow case + pad = 500 + l = [True] * pad + [True, True, True, True] + r = np.zeros(len(l) - pad) + d = np.ones(len(l) - pad) + mask = np.array(l)[pad:] + np.copyto(r, d, where=mask[::-1]) + + # test all permutation of possible masks, 9 should be sufficient for + # current 4 byte unrolled code + power = 9 + d = np.ones(power) + for i in range(2**power): + r = np.zeros(power) + l = [(i & x) != 0 for x in range(power)] + mask = np.array(l) + np.copyto(r, d, where=mask) + assert_array_equal(r == 1, l) + assert_equal(r.sum(), sum(l)) + + r = np.zeros(power) + np.copyto(r, d, where=mask[::-1]) + assert_array_equal(r == 1, l[::-1]) + assert_equal(r.sum(), sum(l)) + + r = np.zeros(power) + np.copyto(r[::2], d[::2], where=mask[::2]) + assert_array_equal(r[::2] == 1, l[::2]) + assert_equal(r[::2].sum(), sum(l[::2])) + + r = np.zeros(power) + np.copyto(r[::2], d[::2], where=mask[::-2]) + assert_array_equal(r[::2] == 1, l[::-2]) + assert_equal(r[::2].sum(), sum(l[::-2])) + + for c in [0xFF, 0x7F, 0x02, 0x10]: + r = np.zeros(power) + mask = np.array(l) + imask = np.array(l).view(np.uint8) + imask[mask != 0] = c + np.copyto(r, d, where=mask) + assert_array_equal(r == 1, l) + assert_equal(r.sum(), sum(l)) + + r = np.zeros(power) + np.copyto(r, d, where=True) + assert_equal(r.sum(), r.size) + r = np.ones(power) + d = np.zeros(power) + np.copyto(r, d, where=False) + assert_equal(r.sum(), r.size) + +def test_copy_order(): + a = np.arange(24).reshape(2, 1, 3, 4) + b = a.copy(order='F') + c = np.arange(24).reshape(2, 1, 4, 3).swapaxes(2, 3) + + def check_copy_result(x, y, ccontig, fcontig, strides=False): + assert_(not (x is y)) + assert_equal(x, y) + assert_equal(res.flags.c_contiguous, ccontig) + assert_equal(res.flags.f_contiguous, fcontig) + + # Validate the initial state of a, b, and c + assert_(a.flags.c_contiguous) + assert_(not a.flags.f_contiguous) + assert_(not b.flags.c_contiguous) + assert_(b.flags.f_contiguous) + assert_(not c.flags.c_contiguous) + assert_(not c.flags.f_contiguous) + + # Copy with order='C' + res = a.copy(order='C') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = b.copy(order='C') + check_copy_result(res, b, ccontig=True, fcontig=False, strides=False) + res = c.copy(order='C') + check_copy_result(res, c, ccontig=True, fcontig=False, strides=False) + res = np.copy(a, order='C') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = np.copy(b, order='C') + check_copy_result(res, b, ccontig=True, fcontig=False, strides=False) + res = np.copy(c, order='C') + check_copy_result(res, c, ccontig=True, fcontig=False, strides=False) + + # Copy with order='F' + res = a.copy(order='F') + check_copy_result(res, a, ccontig=False, fcontig=True, strides=False) + res = b.copy(order='F') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = c.copy(order='F') + check_copy_result(res, c, ccontig=False, fcontig=True, strides=False) + res = np.copy(a, order='F') + check_copy_result(res, a, ccontig=False, fcontig=True, strides=False) + res = np.copy(b, order='F') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = np.copy(c, order='F') + check_copy_result(res, c, ccontig=False, fcontig=True, strides=False) + + # Copy with order='K' + res = a.copy(order='K') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = b.copy(order='K') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = c.copy(order='K') + check_copy_result(res, c, ccontig=False, fcontig=False, strides=True) + res = np.copy(a, order='K') + check_copy_result(res, a, ccontig=True, fcontig=False, strides=True) + res = np.copy(b, order='K') + check_copy_result(res, b, ccontig=False, fcontig=True, strides=True) + res = np.copy(c, order='K') + check_copy_result(res, c, ccontig=False, fcontig=False, strides=True) + +def test_contiguous_flags(): + a = np.ones((4, 4, 1))[::2, :, :] + a.strides = a.strides[:2] + (-123,) + b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4) + + def check_contig(a, ccontig, fcontig): + assert_(a.flags.c_contiguous == ccontig) + assert_(a.flags.f_contiguous == fcontig) + + # Check if new arrays are correct: + check_contig(a, False, False) + check_contig(b, False, False) + check_contig(np.empty((2, 2, 0, 2, 2)), True, True) + check_contig(np.array([[[1], [2]]], order='F'), True, True) + check_contig(np.empty((2, 2)), True, False) + check_contig(np.empty((2, 2), order='F'), False, True) + + # Check that np.array creates correct contiguous flags: + check_contig(np.array(a, copy=None), False, False) + check_contig(np.array(a, copy=None, order='C'), True, False) + check_contig(np.array(a, ndmin=4, copy=None, order='F'), False, True) + + # Check slicing update of flags and : + check_contig(a[0], True, True) + check_contig(a[None, ::4, ..., None], True, True) + check_contig(b[0, 0, ...], False, True) + check_contig(b[:, :, 0:0, :, :], True, True) + + # Test ravel and squeeze. + check_contig(a.ravel(), True, True) + check_contig(np.ones((1, 3, 1)).squeeze(), True, True) + +def test_broadcast_arrays(): + # Test user defined dtypes + a = np.array([(1, 2, 3)], dtype='u4,u4,u4') + b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4') + result = np.broadcast_arrays(a, b) + assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4')) + assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')) + +@pytest.mark.parametrize(["shape", "fill_value", "expected_output"], + [((2, 2), [5.0, 6.0], np.array([[5.0, 6.0], [5.0, 6.0]])), + ((3, 2), [1.0, 2.0], np.array([[1.0, 2.0], [1.0, 2.0], [1.0, 2.0]]))]) +def test_full_from_list(shape, fill_value, expected_output): + output = np.full(shape, fill_value) + assert_equal(output, expected_output) + +def test_astype_copyflag(): + # test the various copyflag options + arr = np.arange(10, dtype=np.intp) + + res_true = arr.astype(np.intp, copy=True) + assert not np.shares_memory(arr, res_true) + + res_false = arr.astype(np.intp, copy=False) + assert np.shares_memory(arr, res_false) + + res_false_float = arr.astype(np.float64, copy=False) + assert not np.shares_memory(arr, res_false_float) + + # _CopyMode enum isn't allowed + assert_raises(ValueError, arr.astype, np.float64, + copy=np._CopyMode.NEVER) diff --git a/python/numpy/_core/tests/test_argparse.py b/python/numpy/_core/tests/test_argparse.py new file mode 100644 index 000000000..7f949c105 --- /dev/null +++ b/python/numpy/_core/tests/test_argparse.py @@ -0,0 +1,92 @@ +""" +Tests for the private NumPy argument parsing functionality. +They mainly exists to ensure good test coverage without having to try the +weirder cases on actual numpy functions but test them in one place. + +The test function is defined in C to be equivalent to (errors may not always +match exactly, and could be adjusted): + + def func(arg1, /, arg2, *, arg3): + i = integer(arg1) # reproducing the 'i' parsing in Python. + return None +""" + +import threading + +import pytest +from numpy._core._multiarray_tests import ( + argparse_example_function as func, +) +from numpy._core._multiarray_tests import ( + threaded_argparse_example_function as thread_func, +) + +import numpy as np +from numpy.testing import IS_WASM + + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for threads") +def test_thread_safe_argparse_cache(): + b = threading.Barrier(8) + + def call_thread_func(): + b.wait() + thread_func(arg1=3, arg2=None) + + tasks = [threading.Thread(target=call_thread_func) for _ in range(8)] + [t.start() for t in tasks] + [t.join() for t in tasks] + + +def test_invalid_integers(): + with pytest.raises(TypeError, + match="integer argument expected, got float"): + func(1.) + with pytest.raises(OverflowError): + func(2**100) + + +def test_missing_arguments(): + with pytest.raises(TypeError, + match="missing required positional argument 0"): + func() + with pytest.raises(TypeError, + match="missing required positional argument 0"): + func(arg2=1, arg3=4) + with pytest.raises(TypeError, + match=r"missing required argument \'arg2\' \(pos 1\)"): + func(1, arg3=5) + + +def test_too_many_positional(): + # the second argument is positional but can be passed as keyword. + with pytest.raises(TypeError, + match="takes from 2 to 3 positional arguments but 4 were given"): + func(1, 2, 3, 4) + + +def test_multiple_values(): + with pytest.raises(TypeError, + match=r"given by name \('arg2'\) and position \(position 1\)"): + func(1, 2, arg2=3) + + +def test_string_fallbacks(): + # We can (currently?) use numpy strings to test the "slow" fallbacks + # that should normally not be taken due to string interning. + arg2 = np.str_("arg2") + missing_arg = np.str_("missing_arg") + func(1, **{arg2: 3}) + with pytest.raises(TypeError, + match="got an unexpected keyword argument 'missing_arg'"): + func(2, **{missing_arg: 3}) + + +def test_too_many_arguments_method_forwarding(): + # Not directly related to the standard argument parsing, but we sometimes + # forward methods to Python: arr.mean() calls np._core._methods._mean() + # This adds code coverage for this `npy_forward_method`. + arr = np.arange(3) + args = range(1000) + with pytest.raises(TypeError): + arr.mean(*args) diff --git a/python/numpy/_core/tests/test_array_api_info.py b/python/numpy/_core/tests/test_array_api_info.py new file mode 100644 index 000000000..4842dbfa9 --- /dev/null +++ b/python/numpy/_core/tests/test_array_api_info.py @@ -0,0 +1,113 @@ +import pytest + +import numpy as np + +info = np.__array_namespace_info__() + + +def test_capabilities(): + caps = info.capabilities() + assert caps["boolean indexing"] is True + assert caps["data-dependent shapes"] is True + + # This will be added in the 2024.12 release of the array API standard. + + # assert caps["max rank"] == 64 + # np.zeros((1,)*64) + # with pytest.raises(ValueError): + # np.zeros((1,)*65) + + +def test_default_device(): + assert info.default_device() == "cpu" == np.asarray(0).device + + +def test_default_dtypes(): + dtypes = info.default_dtypes() + assert dtypes["real floating"] == np.float64 == np.asarray(0.0).dtype + assert dtypes["complex floating"] == np.complex128 == \ + np.asarray(0.0j).dtype + assert dtypes["integral"] == np.intp == np.asarray(0).dtype + assert dtypes["indexing"] == np.intp == np.argmax(np.zeros(10)).dtype + + with pytest.raises(ValueError, match="Device not understood"): + info.default_dtypes(device="gpu") + + +def test_dtypes_all(): + dtypes = info.dtypes() + assert dtypes == { + "bool": np.bool_, + "int8": np.int8, + "int16": np.int16, + "int32": np.int32, + "int64": np.int64, + "uint8": np.uint8, + "uint16": np.uint16, + "uint32": np.uint32, + "uint64": np.uint64, + "float32": np.float32, + "float64": np.float64, + "complex64": np.complex64, + "complex128": np.complex128, + } + + +dtype_categories = { + "bool": {"bool": np.bool_}, + "signed integer": { + "int8": np.int8, + "int16": np.int16, + "int32": np.int32, + "int64": np.int64, + }, + "unsigned integer": { + "uint8": np.uint8, + "uint16": np.uint16, + "uint32": np.uint32, + "uint64": np.uint64, + }, + "integral": ("signed integer", "unsigned integer"), + "real floating": {"float32": np.float32, "float64": np.float64}, + "complex floating": {"complex64": np.complex64, "complex128": + np.complex128}, + "numeric": ("integral", "real floating", "complex floating"), +} + + +@pytest.mark.parametrize("kind", dtype_categories) +def test_dtypes_kind(kind): + expected = dtype_categories[kind] + if isinstance(expected, tuple): + assert info.dtypes(kind=kind) == info.dtypes(kind=expected) + else: + assert info.dtypes(kind=kind) == expected + + +def test_dtypes_tuple(): + dtypes = info.dtypes(kind=("bool", "integral")) + assert dtypes == { + "bool": np.bool_, + "int8": np.int8, + "int16": np.int16, + "int32": np.int32, + "int64": np.int64, + "uint8": np.uint8, + "uint16": np.uint16, + "uint32": np.uint32, + "uint64": np.uint64, + } + + +def test_dtypes_invalid_kind(): + with pytest.raises(ValueError, match="unsupported kind"): + info.dtypes(kind="invalid") + + +def test_dtypes_invalid_device(): + with pytest.raises(ValueError, match="Device not understood"): + info.dtypes(device="gpu") + + +def test_devices(): + assert info.devices() == ["cpu"] diff --git a/python/numpy/_core/tests/test_array_coercion.py b/python/numpy/_core/tests/test_array_coercion.py new file mode 100644 index 000000000..883aee63a --- /dev/null +++ b/python/numpy/_core/tests/test_array_coercion.py @@ -0,0 +1,911 @@ +""" +Tests for array coercion, mainly through testing `np.array` results directly. +Note that other such tests exist, e.g., in `test_api.py` and many corner-cases +are tested (sometimes indirectly) elsewhere. +""" + +from itertools import permutations, product + +import numpy._core._multiarray_umath as ncu +import pytest +from numpy._core._rational_tests import rational +from pytest import param + +import numpy as np +from numpy.testing import IS_64BIT, IS_PYPY, assert_array_equal + + +def arraylikes(): + """ + Generator for functions converting an array into various array-likes. + If full is True (default) it includes array-likes not capable of handling + all dtypes. + """ + # base array: + def ndarray(a): + return a + + yield param(ndarray, id="ndarray") + + # subclass: + class MyArr(np.ndarray): + pass + + def subclass(a): + return a.view(MyArr) + + yield subclass + + class _SequenceLike: + # Older NumPy versions, sometimes cared whether a protocol array was + # also _SequenceLike. This shouldn't matter, but keep it for now + # for __array__ and not the others. + def __len__(self): + raise TypeError + + def __getitem__(self, _, /): + raise TypeError + + # Array-interface + class ArrayDunder(_SequenceLike): + def __init__(self, a): + self.a = a + + def __array__(self, dtype=None, copy=None): + if dtype is None: + return self.a + return self.a.astype(dtype) + + yield param(ArrayDunder, id="__array__") + + # memory-view + yield param(memoryview, id="memoryview") + + # Array-interface + class ArrayInterface: + def __init__(self, a): + self.a = a # need to hold on to keep interface valid + self.__array_interface__ = a.__array_interface__ + + yield param(ArrayInterface, id="__array_interface__") + + # Array-Struct + class ArrayStruct: + def __init__(self, a): + self.a = a # need to hold on to keep struct valid + self.__array_struct__ = a.__array_struct__ + + yield param(ArrayStruct, id="__array_struct__") + + +def scalar_instances(times=True, extended_precision=True, user_dtype=True): + # Hard-coded list of scalar instances. + # Floats: + yield param(np.sqrt(np.float16(5)), id="float16") + yield param(np.sqrt(np.float32(5)), id="float32") + yield param(np.sqrt(np.float64(5)), id="float64") + if extended_precision: + yield param(np.sqrt(np.longdouble(5)), id="longdouble") + + # Complex: + yield param(np.sqrt(np.complex64(2 + 3j)), id="complex64") + yield param(np.sqrt(np.complex128(2 + 3j)), id="complex128") + if extended_precision: + yield param(np.sqrt(np.clongdouble(2 + 3j)), id="clongdouble") + + # Bool: + # XFAIL: Bool should be added, but has some bad properties when it + # comes to strings, see also gh-9875 + # yield param(np.bool(0), id="bool") + + # Integers: + yield param(np.int8(2), id="int8") + yield param(np.int16(2), id="int16") + yield param(np.int32(2), id="int32") + yield param(np.int64(2), id="int64") + + yield param(np.uint8(2), id="uint8") + yield param(np.uint16(2), id="uint16") + yield param(np.uint32(2), id="uint32") + yield param(np.uint64(2), id="uint64") + + # Rational: + if user_dtype: + yield param(rational(1, 2), id="rational") + + # Cannot create a structured void scalar directly: + structured = np.array([(1, 3)], "i,i")[0] + assert isinstance(structured, np.void) + assert structured.dtype == np.dtype("i,i") + yield param(structured, id="structured") + + if times: + # Datetimes and timedelta + yield param(np.timedelta64(2), id="timedelta64[generic]") + yield param(np.timedelta64(23, "s"), id="timedelta64[s]") + yield param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)") + + yield param(np.datetime64("NaT"), id="datetime64[generic](NaT)") + yield param(np.datetime64("2020-06-07 12:43", "ms"), id="datetime64[ms]") + + # Strings and unstructured void: + yield param(np.bytes_(b"1234"), id="bytes") + yield param(np.str_("2345"), id="unicode") + yield param(np.void(b"4321"), id="unstructured_void") + + +def is_parametric_dtype(dtype): + """Returns True if the dtype is a parametric legacy dtype (itemsize + is 0, or a datetime without units) + """ + if dtype.itemsize == 0: + return True + if issubclass(dtype.type, (np.datetime64, np.timedelta64)): + if dtype.name.endswith("64"): + # Generic time units + return True + return False + + +class TestStringDiscovery: + @pytest.mark.parametrize("obj", + [object(), 1.2, 10**43, None, "string"], + ids=["object", "1.2", "10**43", "None", "string"]) + def test_basic_stringlength(self, obj): + length = len(str(obj)) + expected = np.dtype(f"S{length}") + + assert np.array(obj, dtype="S").dtype == expected + assert np.array([obj], dtype="S").dtype == expected + + # A nested array is also discovered correctly + arr = np.array(obj, dtype="O") + assert np.array(arr, dtype="S").dtype == expected + # Also if we use the dtype class + assert np.array(arr, dtype=type(expected)).dtype == expected + # Check that .astype() behaves identical + assert arr.astype("S").dtype == expected + # The DType class is accepted by `.astype()` + assert arr.astype(type(np.dtype("S"))).dtype == expected + + @pytest.mark.parametrize("obj", + [object(), 1.2, 10**43, None, "string"], + ids=["object", "1.2", "10**43", "None", "string"]) + def test_nested_arrays_stringlength(self, obj): + length = len(str(obj)) + expected = np.dtype(f"S{length}") + arr = np.array(obj, dtype="O") + assert np.array([arr, arr], dtype="S").dtype == expected + + @pytest.mark.parametrize("arraylike", arraylikes()) + def test_unpack_first_level(self, arraylike): + # We unpack exactly one level of array likes + obj = np.array([None]) + obj[0] = np.array(1.2) + # the length of the included item, not of the float dtype + length = len(str(obj[0])) + expected = np.dtype(f"S{length}") + + obj = arraylike(obj) + # casting to string usually calls str(obj) + arr = np.array([obj], dtype="S") + assert arr.shape == (1, 1) + assert arr.dtype == expected + + +class TestScalarDiscovery: + def test_void_special_case(self): + # Void dtypes with structures discover tuples as elements + arr = np.array((1, 2, 3), dtype="i,i,i") + assert arr.shape == () + arr = np.array([(1, 2, 3)], dtype="i,i,i") + assert arr.shape == (1,) + + def test_char_special_case(self): + arr = np.array("string", dtype="c") + assert arr.shape == (6,) + assert arr.dtype.char == "c" + arr = np.array(["string"], dtype="c") + assert arr.shape == (1, 6) + assert arr.dtype.char == "c" + + def test_char_special_case_deep(self): + # Check that the character special case errors correctly if the + # array is too deep: + nested = ["string"] # 2 dimensions (due to string being sequence) + for i in range(ncu.MAXDIMS - 2): + nested = [nested] + + arr = np.array(nested, dtype='c') + assert arr.shape == (1,) * (ncu.MAXDIMS - 1) + (6,) + with pytest.raises(ValueError): + np.array([nested], dtype="c") + + def test_unknown_object(self): + arr = np.array(object()) + assert arr.shape == () + assert arr.dtype == np.dtype("O") + + @pytest.mark.parametrize("scalar", scalar_instances()) + def test_scalar(self, scalar): + arr = np.array(scalar) + assert arr.shape == () + assert arr.dtype == scalar.dtype + + arr = np.array([[scalar, scalar]]) + assert arr.shape == (1, 2) + assert arr.dtype == scalar.dtype + + # Additionally to string this test also runs into a corner case + # with datetime promotion (the difference is the promotion order). + @pytest.mark.filterwarnings("ignore:Promotion of numbers:FutureWarning") + def test_scalar_promotion(self): + for sc1, sc2 in product(scalar_instances(), scalar_instances()): + sc1, sc2 = sc1.values[0], sc2.values[0] + # test all combinations: + try: + arr = np.array([sc1, sc2]) + except (TypeError, ValueError): + # The promotion between two times can fail + # XFAIL (ValueError): Some object casts are currently undefined + continue + assert arr.shape == (2,) + try: + dt1, dt2 = sc1.dtype, sc2.dtype + expected_dtype = np.promote_types(dt1, dt2) + assert arr.dtype == expected_dtype + except TypeError as e: + # Will currently always go to object dtype + assert arr.dtype == np.dtype("O") + + @pytest.mark.parametrize("scalar", scalar_instances()) + def test_scalar_coercion(self, scalar): + # This tests various scalar coercion paths, mainly for the numerical + # types. It includes some paths not directly related to `np.array`. + if isinstance(scalar, np.inexact): + # Ensure we have a full-precision number if available + scalar = type(scalar)((scalar * 2)**0.5) + + if type(scalar) is rational: + # Rational generally fails due to a missing cast. In the future + # object casts should automatically be defined based on `setitem`. + pytest.xfail("Rational to object cast is undefined currently.") + + # Use casting from object: + arr = np.array(scalar, dtype=object).astype(scalar.dtype) + + # Test various ways to create an array containing this scalar: + arr1 = np.array(scalar).reshape(1) + arr2 = np.array([scalar]) + arr3 = np.empty(1, dtype=scalar.dtype) + arr3[0] = scalar + arr4 = np.empty(1, dtype=scalar.dtype) + arr4[:] = [scalar] + # All of these methods should yield the same results + assert_array_equal(arr, arr1) + assert_array_equal(arr, arr2) + assert_array_equal(arr, arr3) + assert_array_equal(arr, arr4) + + @pytest.mark.xfail(IS_PYPY, reason="`int(np.complex128(3))` fails on PyPy") + @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") + @pytest.mark.parametrize("cast_to", scalar_instances()) + def test_scalar_coercion_same_as_cast_and_assignment(self, cast_to): + """ + Test that in most cases: + * `np.array(scalar, dtype=dtype)` + * `np.empty((), dtype=dtype)[()] = scalar` + * `np.array(scalar).astype(dtype)` + should behave the same. The only exceptions are parametric dtypes + (mainly datetime/timedelta without unit) and void without fields. + """ + dtype = cast_to.dtype # use to parametrize only the target dtype + + for scalar in scalar_instances(times=False): + scalar = scalar.values[0] + + if dtype.type == np.void: + if scalar.dtype.fields is not None and dtype.fields is None: + # Here, coercion to "V6" works, but the cast fails. + # Since the types are identical, SETITEM takes care of + # this, but has different rules than the cast. + with pytest.raises(TypeError): + np.array(scalar).astype(dtype) + np.array(scalar, dtype=dtype) + np.array([scalar], dtype=dtype) + continue + + # The main test, we first try to use casting and if it succeeds + # continue below testing that things are the same, otherwise + # test that the alternative paths at least also fail. + try: + cast = np.array(scalar).astype(dtype) + except (TypeError, ValueError, RuntimeError): + # coercion should also raise (error type may change) + with pytest.raises(Exception): # noqa: B017 + np.array(scalar, dtype=dtype) + + if (isinstance(scalar, rational) and + np.issubdtype(dtype, np.signedinteger)): + return + + with pytest.raises(Exception): # noqa: B017 + np.array([scalar], dtype=dtype) + # assignment should also raise + res = np.zeros((), dtype=dtype) + with pytest.raises(Exception): # noqa: B017 + res[()] = scalar + + return + + # Non error path: + arr = np.array(scalar, dtype=dtype) + assert_array_equal(arr, cast) + # assignment behaves the same + ass = np.zeros((), dtype=dtype) + ass[()] = scalar + assert_array_equal(ass, cast) + + @pytest.mark.parametrize("pyscalar", [10, 10.32, 10.14j, 10**100]) + def test_pyscalar_subclasses(self, pyscalar): + """NumPy arrays are read/write which means that anything but invariant + behaviour is on thin ice. However, we currently are happy to discover + subclasses of Python float, int, complex the same as the base classes. + This should potentially be deprecated. + """ + class MyScalar(type(pyscalar)): + pass + + res = np.array(MyScalar(pyscalar)) + expected = np.array(pyscalar) + assert_array_equal(res, expected) + + @pytest.mark.parametrize("dtype_char", np.typecodes["All"]) + def test_default_dtype_instance(self, dtype_char): + if dtype_char in "SU": + dtype = np.dtype(dtype_char + "1") + elif dtype_char == "V": + # Legacy behaviour was to use V8. The reason was float64 being the + # default dtype and that having 8 bytes. + dtype = np.dtype("V8") + else: + dtype = np.dtype(dtype_char) + + discovered_dtype, _ = ncu._discover_array_parameters([], type(dtype)) + + assert discovered_dtype == dtype + assert discovered_dtype.itemsize == dtype.itemsize + + @pytest.mark.parametrize("dtype", np.typecodes["Integer"]) + @pytest.mark.parametrize(["scalar", "error"], + [(np.float64(np.nan), ValueError), + (np.array(-1).astype(np.ulonglong)[()], OverflowError)]) + def test_scalar_to_int_coerce_does_not_cast(self, dtype, scalar, error): + """ + Signed integers are currently different in that they do not cast other + NumPy scalar, but instead use scalar.__int__(). The hardcoded + exception to this rule is `np.array(scalar, dtype=integer)`. + """ + dtype = np.dtype(dtype) + + # This is a special case using casting logic. It warns for the NaN + # but allows the cast (giving undefined behaviour). + with np.errstate(invalid="ignore"): + coerced = np.array(scalar, dtype=dtype) + cast = np.array(scalar).astype(dtype) + assert_array_equal(coerced, cast) + + # However these fail: + with pytest.raises(error): + np.array([scalar], dtype=dtype) + with pytest.raises(error): + cast[()] = scalar + + +class TestTimeScalars: + @pytest.mark.parametrize("dtype", [np.int64, np.float32]) + @pytest.mark.parametrize("scalar", + [param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)"), + param(np.timedelta64(123, "s"), id="timedelta64[s]"), + param(np.datetime64("NaT", "generic"), id="datetime64[generic](NaT)"), + param(np.datetime64(1, "D"), id="datetime64[D]")],) + def test_coercion_basic(self, dtype, scalar): + # Note the `[scalar]` is there because np.array(scalar) uses stricter + # `scalar.__int__()` rules for backward compatibility right now. + arr = np.array(scalar, dtype=dtype) + cast = np.array(scalar).astype(dtype) + assert_array_equal(arr, cast) + + ass = np.ones((), dtype=dtype) + if issubclass(dtype, np.integer): + with pytest.raises(TypeError): + # raises, as would np.array([scalar], dtype=dtype), this is + # conversion from times, but behaviour of integers. + ass[()] = scalar + else: + ass[()] = scalar + assert_array_equal(ass, cast) + + @pytest.mark.parametrize("dtype", [np.int64, np.float32]) + @pytest.mark.parametrize("scalar", + [param(np.timedelta64(123, "ns"), id="timedelta64[ns]"), + param(np.timedelta64(12, "generic"), id="timedelta64[generic]")]) + def test_coercion_timedelta_convert_to_number(self, dtype, scalar): + # Only "ns" and "generic" timedeltas can be converted to numbers + # so these are slightly special. + arr = np.array(scalar, dtype=dtype) + cast = np.array(scalar).astype(dtype) + ass = np.ones((), dtype=dtype) + ass[()] = scalar # raises, as would np.array([scalar], dtype=dtype) + + assert_array_equal(arr, cast) + assert_array_equal(cast, cast) + + @pytest.mark.parametrize("dtype", ["S6", "U6"]) + @pytest.mark.parametrize(["val", "unit"], + [param(123, "s", id="[s]"), param(123, "D", id="[D]")]) + def test_coercion_assignment_datetime(self, val, unit, dtype): + # String from datetime64 assignment is currently special cased to + # never use casting. This is because casting will error in this + # case, and traditionally in most cases the behaviour is maintained + # like this. (`np.array(scalar, dtype="U6")` would have failed before) + # TODO: This discrepancy _should_ be resolved, either by relaxing the + # cast, or by deprecating the first part. + scalar = np.datetime64(val, unit) + dtype = np.dtype(dtype) + cut_string = dtype.type(str(scalar)[:6]) + + arr = np.array(scalar, dtype=dtype) + assert arr[()] == cut_string + ass = np.ones((), dtype=dtype) + ass[()] = scalar + assert ass[()] == cut_string + + with pytest.raises(RuntimeError): + # However, unlike the above assignment using `str(scalar)[:6]` + # due to being handled by the string DType and not be casting + # the explicit cast fails: + np.array(scalar).astype(dtype) + + @pytest.mark.parametrize(["val", "unit"], + [param(123, "s", id="[s]"), param(123, "D", id="[D]")]) + def test_coercion_assignment_timedelta(self, val, unit): + scalar = np.timedelta64(val, unit) + + # Unlike datetime64, timedelta allows the unsafe cast: + np.array(scalar, dtype="S6") + cast = np.array(scalar).astype("S6") + ass = np.ones((), dtype="S6") + ass[()] = scalar + expected = scalar.astype("S")[:6] + assert cast[()] == expected + assert ass[()] == expected + +class TestNested: + def test_nested_simple(self): + initial = [1.2] + nested = initial + for i in range(ncu.MAXDIMS - 1): + nested = [nested] + + arr = np.array(nested, dtype="float64") + assert arr.shape == (1,) * ncu.MAXDIMS + with pytest.raises(ValueError): + np.array([nested], dtype="float64") + + with pytest.raises(ValueError, match=".*would exceed the maximum"): + np.array([nested]) # user must ask for `object` explicitly + + arr = np.array([nested], dtype=object) + assert arr.dtype == np.dtype("O") + assert arr.shape == (1,) * ncu.MAXDIMS + assert arr.item() is initial + + def test_pathological_self_containing(self): + # Test that this also works for two nested sequences + l = [] + l.append(l) + arr = np.array([l, l, l], dtype=object) + assert arr.shape == (3,) + (1,) * (ncu.MAXDIMS - 1) + + # Also check a ragged case: + arr = np.array([l, [None], l], dtype=object) + assert arr.shape == (3, 1) + + @pytest.mark.parametrize("arraylike", arraylikes()) + def test_nested_arraylikes(self, arraylike): + # We try storing an array like into an array, but the array-like + # will have too many dimensions. This means the shape discovery + # decides that the array-like must be treated as an object (a special + # case of ragged discovery). The result will be an array with one + # dimension less than the maximum dimensions, and the array being + # assigned to it (which does work for object or if `float(arraylike)` + # works). + initial = arraylike(np.ones((1, 1))) + + nested = initial + for i in range(ncu.MAXDIMS - 1): + nested = [nested] + + with pytest.raises(ValueError, match=".*would exceed the maximum"): + # It will refuse to assign the array into + np.array(nested, dtype="float64") + + # If this is object, we end up assigning a (1, 1) array into (1,) + # (due to running out of dimensions), this is currently supported but + # a special case which is not ideal. + arr = np.array(nested, dtype=object) + assert arr.shape == (1,) * ncu.MAXDIMS + assert arr.item() == np.array(initial).item() + + @pytest.mark.parametrize("arraylike", arraylikes()) + def test_uneven_depth_ragged(self, arraylike): + arr = np.arange(4).reshape((2, 2)) + arr = arraylike(arr) + + # Array is ragged in the second dimension already: + out = np.array([arr, [arr]], dtype=object) + assert out.shape == (2,) + assert out[0] is arr + assert type(out[1]) is list + + # Array is ragged in the third dimension: + with pytest.raises(ValueError): + # This is a broadcast error during assignment, because + # the array shape would be (2, 2, 2) but `arr[0, 0] = arr` fails. + np.array([arr, [arr, arr]], dtype=object) + + def test_empty_sequence(self): + arr = np.array([[], [1], [[1]]], dtype=object) + assert arr.shape == (3,) + + # The empty sequence stops further dimension discovery, so the + # result shape will be (0,) which leads to an error during: + with pytest.raises(ValueError): + np.array([[], np.empty((0, 1))], dtype=object) + + def test_array_of_different_depths(self): + # When multiple arrays (or array-likes) are included in a + # sequences and have different depth, we currently discover + # as many dimensions as they share. (see also gh-17224) + arr = np.zeros((3, 2)) + mismatch_first_dim = np.zeros((1, 2)) + mismatch_second_dim = np.zeros((3, 3)) + + dtype, shape = ncu._discover_array_parameters( + [arr, mismatch_second_dim], dtype=np.dtype("O")) + assert shape == (2, 3) + + dtype, shape = ncu._discover_array_parameters( + [arr, mismatch_first_dim], dtype=np.dtype("O")) + assert shape == (2,) + # The second case is currently supported because the arrays + # can be stored as objects: + res = np.asarray([arr, mismatch_first_dim], dtype=np.dtype("O")) + assert res[0] is arr + assert res[1] is mismatch_first_dim + + +class TestBadSequences: + # These are tests for bad objects passed into `np.array`, in general + # these have undefined behaviour. In the old code they partially worked + # when now they will fail. We could (and maybe should) create a copy + # of all sequences to be safe against bad-actors. + + def test_growing_list(self): + # List to coerce, `mylist` will append to it during coercion + obj = [] + + class mylist(list): + def __len__(self): + obj.append([1, 2]) + return super().__len__() + + obj.append(mylist([1, 2])) + + with pytest.raises(RuntimeError): + np.array(obj) + + # Note: We do not test a shrinking list. These do very evil things + # and the only way to fix them would be to copy all sequences. + # (which may be a real option in the future). + + def test_mutated_list(self): + # List to coerce, `mylist` will mutate the first element + obj = [] + + class mylist(list): + def __len__(self): + obj[0] = [2, 3] # replace with a different list. + return super().__len__() + + obj.append([2, 3]) + obj.append(mylist([1, 2])) + # Does not crash: + np.array(obj) + + def test_replace_0d_array(self): + # List to coerce, `mylist` will mutate the first element + obj = [] + + class baditem: + def __len__(self): + obj[0][0] = 2 # replace with a different list. + raise ValueError("not actually a sequence!") + + def __getitem__(self, _, /): + pass + + # Runs into a corner case in the new code, the `array(2)` is cached + # so replacing it invalidates the cache. + obj.append([np.array(2), baditem()]) + with pytest.raises(RuntimeError): + np.array(obj) + + +class TestArrayLikes: + @pytest.mark.parametrize("arraylike", arraylikes()) + def test_0d_object_special_case(self, arraylike): + arr = np.array(0.) + obj = arraylike(arr) + # A single array-like is always converted: + res = np.array(obj, dtype=object) + assert_array_equal(arr, res) + + # But a single 0-D nested array-like never: + res = np.array([obj], dtype=object) + assert res[0] is obj + + @pytest.mark.parametrize("arraylike", arraylikes()) + @pytest.mark.parametrize("arr", [np.array(0.), np.arange(4)]) + def test_object_assignment_special_case(self, arraylike, arr): + obj = arraylike(arr) + empty = np.arange(1, dtype=object) + empty[:] = [obj] + assert empty[0] is obj + + def test_0d_generic_special_case(self): + class ArraySubclass(np.ndarray): + def __float__(self): + raise TypeError("e.g. quantities raise on this") + + arr = np.array(0.) + obj = arr.view(ArraySubclass) + res = np.array(obj) + # The subclass is simply cast: + assert_array_equal(arr, res) + + # If the 0-D array-like is included, __float__ is currently + # guaranteed to be used. We may want to change that, quantities + # and masked arrays half make use of this. + with pytest.raises(TypeError): + np.array([obj]) + + # The same holds for memoryview: + obj = memoryview(arr) + res = np.array(obj) + assert_array_equal(arr, res) + with pytest.raises(ValueError): + # The error type does not matter much here. + np.array([obj]) + + def test_arraylike_classes(self): + # The classes of array-likes should generally be acceptable to be + # stored inside a numpy (object) array. This tests all of the + # special attributes (since all are checked during coercion). + arr = np.array(np.int64) + assert arr[()] is np.int64 + arr = np.array([np.int64]) + assert arr[0] is np.int64 + + # This also works for properties/unbound methods: + class ArrayLike: + @property + def __array_interface__(self): + pass + + @property + def __array_struct__(self): + pass + + def __array__(self, dtype=None, copy=None): + pass + + arr = np.array(ArrayLike) + assert arr[()] is ArrayLike + arr = np.array([ArrayLike]) + assert arr[0] is ArrayLike + + @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") + def test_too_large_array_error_paths(self): + """Test the error paths, including for memory leaks""" + arr = np.array(0, dtype="uint8") + # Guarantees that a contiguous copy won't work: + arr = np.broadcast_to(arr, 2**62) + + for i in range(5): + # repeat, to ensure caching cannot have an effect: + with pytest.raises(MemoryError): + np.array(arr) + with pytest.raises(MemoryError): + np.array([arr]) + + @pytest.mark.parametrize("attribute", + ["__array_interface__", "__array__", "__array_struct__"]) + @pytest.mark.parametrize("error", [RecursionError, MemoryError]) + def test_bad_array_like_attributes(self, attribute, error): + # RecursionError and MemoryError are considered fatal. All errors + # (except AttributeError) should probably be raised in the future, + # but shapely made use of it, so it will require a deprecation. + + class BadInterface: + def __getattr__(self, attr): + if attr == attribute: + raise error + super().__getattr__(attr) + + with pytest.raises(error): + np.array(BadInterface()) + + @pytest.mark.parametrize("error", [RecursionError, MemoryError]) + def test_bad_array_like_bad_length(self, error): + # RecursionError and MemoryError are considered "critical" in + # sequences. We could expand this more generally though. (NumPy 1.20) + class BadSequence: + def __len__(self): + raise error + + def __getitem__(self, _, /): + # must have getitem to be a Sequence + return 1 + + with pytest.raises(error): + np.array(BadSequence()) + + def test_array_interface_descr_optional(self): + # The descr should be optional regression test for gh-27249 + arr = np.ones(10, dtype="V10") + iface = arr.__array_interface__ + iface.pop("descr") + + class MyClass: + __array_interface__ = iface + + assert_array_equal(np.asarray(MyClass), arr) + + +class TestAsArray: + """Test expected behaviors of ``asarray``.""" + + def test_dtype_identity(self): + """Confirm the intended behavior for *dtype* kwarg. + + The result of ``asarray()`` should have the dtype provided through the + keyword argument, when used. This forces unique array handles to be + produced for unique np.dtype objects, but (for equivalent dtypes), the + underlying data (the base object) is shared with the original array + object. + + Ref https://github.com/numpy/numpy/issues/1468 + """ + int_array = np.array([1, 2, 3], dtype='i') + assert np.asarray(int_array) is int_array + + # The character code resolves to the singleton dtype object provided + # by the numpy package. + assert np.asarray(int_array, dtype='i') is int_array + + # Derive a dtype from n.dtype('i'), but add a metadata object to force + # the dtype to be distinct. + unequal_type = np.dtype('i', metadata={'spam': True}) + annotated_int_array = np.asarray(int_array, dtype=unequal_type) + assert annotated_int_array is not int_array + assert annotated_int_array.base is int_array + # Create an equivalent descriptor with a new and distinct dtype + # instance. + equivalent_requirement = np.dtype('i', metadata={'spam': True}) + annotated_int_array_alt = np.asarray(annotated_int_array, + dtype=equivalent_requirement) + assert unequal_type == equivalent_requirement + assert unequal_type is not equivalent_requirement + assert annotated_int_array_alt is not annotated_int_array + assert annotated_int_array_alt.dtype is equivalent_requirement + + # Check the same logic for a pair of C types whose equivalence may vary + # between computing environments. + # Find an equivalent pair. + integer_type_codes = ('i', 'l', 'q') + integer_dtypes = [np.dtype(code) for code in integer_type_codes] + typeA = None + typeB = None + for typeA, typeB in permutations(integer_dtypes, r=2): + if typeA == typeB: + assert typeA is not typeB + break + assert isinstance(typeA, np.dtype) and isinstance(typeB, np.dtype) + + # These ``asarray()`` calls may produce a new view or a copy, + # but never the same object. + long_int_array = np.asarray(int_array, dtype='l') + long_long_int_array = np.asarray(int_array, dtype='q') + assert long_int_array is not int_array + assert long_long_int_array is not int_array + assert np.asarray(long_int_array, dtype='q') is not long_int_array + array_a = np.asarray(int_array, dtype=typeA) + assert typeA == typeB + assert typeA is not typeB + assert array_a.dtype is typeA + assert array_a is not np.asarray(array_a, dtype=typeB) + assert np.asarray(array_a, dtype=typeB).dtype is typeB + assert array_a is np.asarray(array_a, dtype=typeB).base + + +class TestSpecialAttributeLookupFailure: + # An exception was raised while fetching the attribute + + class WeirdArrayLike: + @property + def __array__(self, dtype=None, copy=None): # noqa: PLR0206 + raise RuntimeError("oops!") + + class WeirdArrayInterface: + @property + def __array_interface__(self): + raise RuntimeError("oops!") + + def test_deprecated(self): + with pytest.raises(RuntimeError): + np.array(self.WeirdArrayLike()) + with pytest.raises(RuntimeError): + np.array(self.WeirdArrayInterface()) + + +def test_subarray_from_array_construction(): + # Arrays are more complex, since they "broadcast" on success: + arr = np.array([1, 2]) + + res = arr.astype("2i") + assert_array_equal(res, [[1, 1], [2, 2]]) + + res = np.array(arr, dtype="(2,)i") + + assert_array_equal(res, [[1, 1], [2, 2]]) + + res = np.array([[(1,), (2,)], arr], dtype="2i") + assert_array_equal(res, [[[1, 1], [2, 2]], [[1, 1], [2, 2]]]) + + # Also try a multi-dimensional example: + arr = np.arange(5 * 2).reshape(5, 2) + expected = np.broadcast_to(arr[:, :, np.newaxis, np.newaxis], (5, 2, 2, 2)) + + res = arr.astype("(2,2)f") + assert_array_equal(res, expected) + + res = np.array(arr, dtype="(2,2)f") + assert_array_equal(res, expected) + + +def test_empty_string(): + # Empty strings are unfortunately often converted to S1 and we need to + # make sure we are filling the S1 and not the (possibly) detected S0 + # result. This should likely just return S0 and if not maybe the decision + # to return S1 should be moved. + res = np.array([""] * 10, dtype="S") + assert_array_equal(res, np.array("\0", "S1")) + assert res.dtype == "S1" + + arr = np.array([""] * 10, dtype=object) + + res = arr.astype("S") + assert_array_equal(res, b"") + assert res.dtype == "S1" + + res = np.array(arr, dtype="S") + assert_array_equal(res, b"") + # TODO: This is arguably weird/wrong, but seems old: + assert res.dtype == f"S{np.dtype('O').itemsize}" + + res = np.array([[""] * 10, arr], dtype="S") + assert_array_equal(res, b"") + assert res.shape == (2, 10) + assert res.dtype == "S1" diff --git a/python/numpy/_core/tests/test_array_interface.py b/python/numpy/_core/tests/test_array_interface.py new file mode 100644 index 000000000..afb19f4e2 --- /dev/null +++ b/python/numpy/_core/tests/test_array_interface.py @@ -0,0 +1,222 @@ +import sys +import sysconfig + +import pytest + +import numpy as np +from numpy.testing import IS_EDITABLE, IS_WASM, extbuild + + +@pytest.fixture +def get_module(tmp_path): + """ Some codes to generate data and manage temporary buffers use when + sharing with numpy via the array interface protocol. + """ + if sys.platform.startswith('cygwin'): + pytest.skip('link fails on cygwin') + if IS_WASM: + pytest.skip("Can't build module inside Wasm") + if IS_EDITABLE: + pytest.skip("Can't build module for editable install") + + prologue = ''' + #include + #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + #include + #include + #include + + NPY_NO_EXPORT + void delete_array_struct(PyObject *cap) { + + /* get the array interface structure */ + PyArrayInterface *inter = (PyArrayInterface*) + PyCapsule_GetPointer(cap, NULL); + + /* get the buffer by which data was shared */ + double *ptr = (double*)PyCapsule_GetContext(cap); + + /* for the purposes of the regression test set the elements + to nan */ + for (npy_intp i = 0; i < inter->shape[0]; ++i) + ptr[i] = nan(""); + + /* free the shared buffer */ + free(ptr); + + /* free the array interface structure */ + free(inter->shape); + free(inter); + + fprintf(stderr, "delete_array_struct\\ncap = %ld inter = %ld" + " ptr = %ld\\n", (long)cap, (long)inter, (long)ptr); + } + ''' + + functions = [ + ("new_array_struct", "METH_VARARGS", """ + + long long n_elem = 0; + double value = 0.0; + + if (!PyArg_ParseTuple(args, "Ld", &n_elem, &value)) { + Py_RETURN_NONE; + } + + /* allocate and initialize the data to share with numpy */ + long long n_bytes = n_elem*sizeof(double); + double *data = (double*)malloc(n_bytes); + + if (!data) { + PyErr_Format(PyExc_MemoryError, + "Failed to malloc %lld bytes", n_bytes); + + Py_RETURN_NONE; + } + + for (long long i = 0; i < n_elem; ++i) { + data[i] = value; + } + + /* calculate the shape and stride */ + int nd = 1; + + npy_intp *ss = (npy_intp*)malloc(2*nd*sizeof(npy_intp)); + npy_intp *shape = ss; + npy_intp *stride = ss + nd; + + shape[0] = n_elem; + stride[0] = sizeof(double); + + /* construct the array interface */ + PyArrayInterface *inter = (PyArrayInterface*) + malloc(sizeof(PyArrayInterface)); + + memset(inter, 0, sizeof(PyArrayInterface)); + + inter->two = 2; + inter->nd = nd; + inter->typekind = 'f'; + inter->itemsize = sizeof(double); + inter->shape = shape; + inter->strides = stride; + inter->data = data; + inter->flags = NPY_ARRAY_WRITEABLE | NPY_ARRAY_NOTSWAPPED | + NPY_ARRAY_ALIGNED | NPY_ARRAY_C_CONTIGUOUS; + + /* package into a capsule */ + PyObject *cap = PyCapsule_New(inter, NULL, delete_array_struct); + + /* save the pointer to the data */ + PyCapsule_SetContext(cap, data); + + fprintf(stderr, "new_array_struct\\ncap = %ld inter = %ld" + " ptr = %ld\\n", (long)cap, (long)inter, (long)data); + + return cap; + """) + ] + + more_init = "import_array();" + + try: + import array_interface_testing + return array_interface_testing + except ImportError: + pass + + # if it does not exist, build and load it + if sysconfig.get_platform() == "win-arm64": + pytest.skip("Meson unable to find MSVC linker on win-arm64") + return extbuild.build_and_import_extension('array_interface_testing', + functions, + prologue=prologue, + include_dirs=[np.get_include()], + build_dir=tmp_path, + more_init=more_init) + + +@pytest.mark.slow +def test_cstruct(get_module): + + class data_source: + """ + This class is for testing the timing of the PyCapsule destructor + invoked when numpy release its reference to the shared data as part of + the numpy array interface protocol. If the PyCapsule destructor is + called early the shared data is freed and invalid memory accesses will + occur. + """ + + def __init__(self, size, value): + self.size = size + self.value = value + + @property + def __array_struct__(self): + return get_module.new_array_struct(self.size, self.value) + + # write to the same stream as the C code + stderr = sys.__stderr__ + + # used to validate the shared data. + expected_value = -3.1415 + multiplier = -10000.0 + + # create some data to share with numpy via the array interface + # assign the data an expected value. + stderr.write(' ---- create an object to share data ---- \n') + buf = data_source(256, expected_value) + stderr.write(' ---- OK!\n\n') + + # share the data + stderr.write(' ---- share data via the array interface protocol ---- \n') + arr = np.array(buf, copy=False) + stderr.write(f'arr.__array_interface___ = {str(arr.__array_interface__)}\n') + stderr.write(f'arr.base = {str(arr.base)}\n') + stderr.write(' ---- OK!\n\n') + + # release the source of the shared data. this will not release the data + # that was shared with numpy, that is done in the PyCapsule destructor. + stderr.write(' ---- destroy the object that shared data ---- \n') + buf = None + stderr.write(' ---- OK!\n\n') + + # check that we got the expected data. If the PyCapsule destructor we + # defined was prematurely called then this test will fail because our + # destructor sets the elements of the array to NaN before free'ing the + # buffer. Reading the values here may also cause a SEGV + assert np.allclose(arr, expected_value) + + # read the data. If the PyCapsule destructor we defined was prematurely + # called then reading the values here may cause a SEGV and will be reported + # as invalid reads by valgrind + stderr.write(' ---- read shared data ---- \n') + stderr.write(f'arr = {str(arr)}\n') + stderr.write(' ---- OK!\n\n') + + # write to the shared buffer. If the shared data was prematurely deleted + # this will may cause a SEGV and valgrind will report invalid writes + stderr.write(' ---- modify shared data ---- \n') + arr *= multiplier + expected_value *= multiplier + stderr.write(f'arr.__array_interface___ = {str(arr.__array_interface__)}\n') + stderr.write(f'arr.base = {str(arr.base)}\n') + stderr.write(' ---- OK!\n\n') + + # read the data. If the shared data was prematurely deleted this + # will may cause a SEGV and valgrind will report invalid reads + stderr.write(' ---- read modified shared data ---- \n') + stderr.write(f'arr = {str(arr)}\n') + stderr.write(' ---- OK!\n\n') + + # check that we got the expected data. If the PyCapsule destructor we + # defined was prematurely called then this test will fail because our + # destructor sets the elements of the array to NaN before free'ing the + # buffer. Reading the values here may also cause a SEGV + assert np.allclose(arr, expected_value) + + # free the shared data, the PyCapsule destructor should run here + stderr.write(' ---- free shared data ---- \n') + arr = None + stderr.write(' ---- OK!\n\n') diff --git a/python/numpy/_core/tests/test_arraymethod.py b/python/numpy/_core/tests/test_arraymethod.py new file mode 100644 index 000000000..d8baef7e7 --- /dev/null +++ b/python/numpy/_core/tests/test_arraymethod.py @@ -0,0 +1,84 @@ +""" +This file tests the generic aspects of ArrayMethod. At the time of writing +this is private API, but when added, public API may be added here. +""" + +import types +from typing import Any + +import pytest +from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl + +import numpy as np + + +class TestResolveDescriptors: + # Test mainly error paths of the resolve_descriptors function, + # note that the `casting_unittests` tests exercise this non-error paths. + + # Casting implementations are the main/only current user: + method = get_castingimpl(type(np.dtype("d")), type(np.dtype("f"))) + + @pytest.mark.parametrize("args", [ + (True,), # Not a tuple. + ((None,)), # Too few elements + ((None, None, None),), # Too many + ((None, None),), # Input dtype is None, which is invalid. + ((np.dtype("d"), True),), # Output dtype is not a dtype + ((np.dtype("f"), None),), # Input dtype does not match method + ]) + def test_invalid_arguments(self, args): + with pytest.raises(TypeError): + self.method._resolve_descriptors(*args) + + +class TestSimpleStridedCall: + # Test mainly error paths of the resolve_descriptors function, + # note that the `casting_unittests` tests exercise this non-error paths. + + # Casting implementations are the main/only current user: + method = get_castingimpl(type(np.dtype("d")), type(np.dtype("f"))) + + @pytest.mark.parametrize(["args", "error"], [ + ((True,), TypeError), # Not a tuple + (((None,),), TypeError), # Too few elements + ((None, None), TypeError), # Inputs are not arrays. + (((None, None, None),), TypeError), # Too many + (((np.arange(3), np.arange(3)),), TypeError), # Incorrect dtypes + (((np.ones(3, dtype=">d"), np.ones(3, dtype=" None: + """Test `ndarray.__class_getitem__`.""" + alias = cls[Any, Any] + assert isinstance(alias, types.GenericAlias) + assert alias.__origin__ is cls + + @pytest.mark.parametrize("arg_len", range(4)) + def test_subscript_tup(self, cls: type[np.ndarray], arg_len: int) -> None: + arg_tup = (Any,) * arg_len + if arg_len in (1, 2): + assert cls[arg_tup] + else: + match = f"Too {'few' if arg_len == 0 else 'many'} arguments" + with pytest.raises(TypeError, match=match): + cls[arg_tup] diff --git a/python/numpy/_core/tests/test_arrayobject.py b/python/numpy/_core/tests/test_arrayobject.py new file mode 100644 index 000000000..ffa1ba001 --- /dev/null +++ b/python/numpy/_core/tests/test_arrayobject.py @@ -0,0 +1,75 @@ +import pytest + +import numpy as np +from numpy.testing import assert_array_equal + + +def test_matrix_transpose_raises_error_for_1d(): + msg = "matrix transpose with ndim < 2 is undefined" + arr = np.arange(48) + with pytest.raises(ValueError, match=msg): + arr.mT + + +def test_matrix_transpose_equals_transpose_2d(): + arr = np.arange(48).reshape((6, 8)) + assert_array_equal(arr.T, arr.mT) + + +ARRAY_SHAPES_TO_TEST = ( + (5, 2), + (5, 2, 3), + (5, 2, 3, 4), +) + + +@pytest.mark.parametrize("shape", ARRAY_SHAPES_TO_TEST) +def test_matrix_transpose_equals_swapaxes(shape): + num_of_axes = len(shape) + vec = np.arange(shape[-1]) + arr = np.broadcast_to(vec, shape) + tgt = np.swapaxes(arr, num_of_axes - 2, num_of_axes - 1) + mT = arr.mT + assert_array_equal(tgt, mT) + + +class MyArr(np.ndarray): + def __array_wrap__(self, arr, context=None, return_scalar=None): + return super().__array_wrap__(arr, context, return_scalar) + + +class MyArrNoWrap(np.ndarray): + pass + + +@pytest.mark.parametrize("subclass_self", [np.ndarray, MyArr, MyArrNoWrap]) +@pytest.mark.parametrize("subclass_arr", [np.ndarray, MyArr, MyArrNoWrap]) +def test_array_wrap(subclass_self, subclass_arr): + # NumPy should allow `__array_wrap__` to be called on arrays, it's logic + # is designed in a way that: + # + # * Subclasses never return scalars by default (to preserve their + # information). They can choose to if they wish. + # * NumPy returns scalars, if `return_scalar` is passed as True to allow + # manual calls to `arr.__array_wrap__` to do the right thing. + # * The type of the input should be ignored (it should be a base-class + # array, but I am not sure this is guaranteed). + + arr = np.arange(3).view(subclass_self) + + arr0d = np.array(3, dtype=np.int8).view(subclass_arr) + # With third argument True, ndarray allows "decay" to scalar. + # (I don't think NumPy would pass `None`, but it seems clear to support) + if subclass_self is np.ndarray: + assert type(arr.__array_wrap__(arr0d, None, True)) is np.int8 + else: + assert type(arr.__array_wrap__(arr0d, None, True)) is type(arr) + + # Otherwise, result should be viewed as the subclass + assert type(arr.__array_wrap__(arr0d)) is type(arr) + assert type(arr.__array_wrap__(arr0d, None, None)) is type(arr) + assert type(arr.__array_wrap__(arr0d, None, False)) is type(arr) + + # Non 0-D array can't be converted to scalar, so we ignore that + arr1d = np.array([3], dtype=np.int8).view(subclass_arr) + assert type(arr.__array_wrap__(arr1d, None, True)) is type(arr) diff --git a/python/numpy/_core/tests/test_arrayprint.py b/python/numpy/_core/tests/test_arrayprint.py new file mode 100644 index 000000000..1fd4ac2fd --- /dev/null +++ b/python/numpy/_core/tests/test_arrayprint.py @@ -0,0 +1,1328 @@ +import gc +import sys +import textwrap + +import pytest +from hypothesis import given +from hypothesis.extra import numpy as hynp + +import numpy as np +from numpy._core.arrayprint import _typelessdata +from numpy.testing import ( + HAS_REFCOUNT, + IS_WASM, + assert_, + assert_equal, + assert_raises, + assert_raises_regex, + assert_warns, +) +from numpy.testing._private.utils import run_threaded + + +class TestArrayRepr: + def test_nan_inf(self): + x = np.array([np.nan, np.inf]) + assert_equal(repr(x), 'array([nan, inf])') + + def test_subclass(self): + class sub(np.ndarray): + pass + + # one dimensional + x1d = np.array([1, 2]).view(sub) + assert_equal(repr(x1d), 'sub([1, 2])') + + # two dimensional + x2d = np.array([[1, 2], [3, 4]]).view(sub) + assert_equal(repr(x2d), + 'sub([[1, 2],\n' + ' [3, 4]])') + + # two dimensional with flexible dtype + xstruct = np.ones((2, 2), dtype=[('a', ' 1) + y = sub(None) + x[()] = y + y[()] = x + assert_equal(repr(x), + 'sub(sub(sub(..., dtype=object), dtype=object), dtype=object)') + assert_equal(str(x), '...') + x[()] = 0 # resolve circular references for garbage collector + + # nested 0d-subclass-object + x = sub(None) + x[()] = sub(None) + assert_equal(repr(x), 'sub(sub(None, dtype=object), dtype=object)') + assert_equal(str(x), 'None') + + # gh-10663 + class DuckCounter(np.ndarray): + def __getitem__(self, item): + result = super().__getitem__(item) + if not isinstance(result, DuckCounter): + result = result[...].view(DuckCounter) + return result + + def to_string(self): + return {0: 'zero', 1: 'one', 2: 'two'}.get(self.item(), 'many') + + def __str__(self): + if self.shape == (): + return self.to_string() + else: + fmt = {'all': lambda x: x.to_string()} + return np.array2string(self, formatter=fmt) + + dc = np.arange(5).view(DuckCounter) + assert_equal(str(dc), "[zero one two many many]") + assert_equal(str(dc[0]), "zero") + + def test_self_containing(self): + arr0d = np.array(None) + arr0d[()] = arr0d + assert_equal(repr(arr0d), + 'array(array(..., dtype=object), dtype=object)') + arr0d[()] = 0 # resolve recursion for garbage collector + + arr1d = np.array([None, None]) + arr1d[1] = arr1d + assert_equal(repr(arr1d), + 'array([None, array(..., dtype=object)], dtype=object)') + arr1d[1] = 0 # resolve recursion for garbage collector + + first = np.array(None) + second = np.array(None) + first[()] = second + second[()] = first + assert_equal(repr(first), + 'array(array(array(..., dtype=object), dtype=object), dtype=object)') + first[()] = 0 # resolve circular references for garbage collector + + def test_containing_list(self): + # printing square brackets directly would be ambiguous + arr1d = np.array([None, None]) + arr1d[0] = [1, 2] + arr1d[1] = [3] + assert_equal(repr(arr1d), + 'array([list([1, 2]), list([3])], dtype=object)') + + def test_void_scalar_recursion(self): + # gh-9345 + repr(np.void(b'test')) # RecursionError ? + + def test_fieldless_structured(self): + # gh-10366 + no_fields = np.dtype([]) + arr_no_fields = np.empty(4, dtype=no_fields) + assert_equal(repr(arr_no_fields), 'array([(), (), (), ()], dtype=[])') + + +class TestComplexArray: + def test_str(self): + rvals = [0, 1, -1, np.inf, -np.inf, np.nan] + cvals = [complex(rp, ip) for rp in rvals for ip in rvals] + dtypes = [np.complex64, np.cdouble, np.clongdouble] + actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes] + wanted = [ + '[0.+0.j]', '[0.+0.j]', '[0.+0.j]', + '[0.+1.j]', '[0.+1.j]', '[0.+1.j]', + '[0.-1.j]', '[0.-1.j]', '[0.-1.j]', + '[0.+infj]', '[0.+infj]', '[0.+infj]', + '[0.-infj]', '[0.-infj]', '[0.-infj]', + '[0.+nanj]', '[0.+nanj]', '[0.+nanj]', + '[1.+0.j]', '[1.+0.j]', '[1.+0.j]', + '[1.+1.j]', '[1.+1.j]', '[1.+1.j]', + '[1.-1.j]', '[1.-1.j]', '[1.-1.j]', + '[1.+infj]', '[1.+infj]', '[1.+infj]', + '[1.-infj]', '[1.-infj]', '[1.-infj]', + '[1.+nanj]', '[1.+nanj]', '[1.+nanj]', + '[-1.+0.j]', '[-1.+0.j]', '[-1.+0.j]', + '[-1.+1.j]', '[-1.+1.j]', '[-1.+1.j]', + '[-1.-1.j]', '[-1.-1.j]', '[-1.-1.j]', + '[-1.+infj]', '[-1.+infj]', '[-1.+infj]', + '[-1.-infj]', '[-1.-infj]', '[-1.-infj]', + '[-1.+nanj]', '[-1.+nanj]', '[-1.+nanj]', + '[inf+0.j]', '[inf+0.j]', '[inf+0.j]', + '[inf+1.j]', '[inf+1.j]', '[inf+1.j]', + '[inf-1.j]', '[inf-1.j]', '[inf-1.j]', + '[inf+infj]', '[inf+infj]', '[inf+infj]', + '[inf-infj]', '[inf-infj]', '[inf-infj]', + '[inf+nanj]', '[inf+nanj]', '[inf+nanj]', + '[-inf+0.j]', '[-inf+0.j]', '[-inf+0.j]', + '[-inf+1.j]', '[-inf+1.j]', '[-inf+1.j]', + '[-inf-1.j]', '[-inf-1.j]', '[-inf-1.j]', + '[-inf+infj]', '[-inf+infj]', '[-inf+infj]', + '[-inf-infj]', '[-inf-infj]', '[-inf-infj]', + '[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]', + '[nan+0.j]', '[nan+0.j]', '[nan+0.j]', + '[nan+1.j]', '[nan+1.j]', '[nan+1.j]', + '[nan-1.j]', '[nan-1.j]', '[nan-1.j]', + '[nan+infj]', '[nan+infj]', '[nan+infj]', + '[nan-infj]', '[nan-infj]', '[nan-infj]', + '[nan+nanj]', '[nan+nanj]', '[nan+nanj]'] + + for res, val in zip(actual, wanted): + assert_equal(res, val) + +class TestArray2String: + def test_basic(self): + """Basic test of array2string.""" + a = np.arange(3) + assert_(np.array2string(a) == '[0 1 2]') + assert_(np.array2string(a, max_line_width=4, legacy='1.13') == '[0 1\n 2]') + assert_(np.array2string(a, max_line_width=4) == '[0\n 1\n 2]') + + def test_unexpected_kwarg(self): + # ensure than an appropriate TypeError + # is raised when array2string receives + # an unexpected kwarg + + with assert_raises_regex(TypeError, 'nonsense'): + np.array2string(np.array([1, 2, 3]), + nonsense=None) + + def test_format_function(self): + """Test custom format function for each element in array.""" + def _format_function(x): + if np.abs(x) < 1: + return '.' + elif np.abs(x) < 2: + return 'o' + else: + return 'O' + + x = np.arange(3) + x_hex = "[0x0 0x1 0x2]" + x_oct = "[0o0 0o1 0o2]" + assert_(np.array2string(x, formatter={'all': _format_function}) == + "[. o O]") + assert_(np.array2string(x, formatter={'int_kind': _format_function}) == + "[. o O]") + assert_(np.array2string(x, formatter={'all': lambda x: f"{x:.4f}"}) == + "[0.0000 1.0000 2.0000]") + assert_equal(np.array2string(x, formatter={'int': hex}), + x_hex) + assert_equal(np.array2string(x, formatter={'int': oct}), + x_oct) + + x = np.arange(3.) + assert_(np.array2string(x, formatter={'float_kind': lambda x: f"{x:.2f}"}) == + "[0.00 1.00 2.00]") + assert_(np.array2string(x, formatter={'float': lambda x: f"{x:.2f}"}) == + "[0.00 1.00 2.00]") + + s = np.array(['abc', 'def']) + assert_(np.array2string(s, formatter={'numpystr': lambda s: s * 2}) == + '[abcabc defdef]') + + def test_structure_format_mixed(self): + dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt) + assert_equal(np.array2string(x), + "[('Sarah', [8., 7.]) ('John', [6., 7.])]") + + np.set_printoptions(legacy='1.13') + try: + # for issue #5692 + A = np.zeros(shape=10, dtype=[("A", "M8[s]")]) + A[5:].fill(np.datetime64('NaT')) + assert_equal( + np.array2string(A), + textwrap.dedent("""\ + [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) + ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',) + ('NaT',) ('NaT',) ('NaT',)]""") + ) + finally: + np.set_printoptions(legacy=False) + + # same again, but with non-legacy behavior + assert_equal( + np.array2string(A), + textwrap.dedent("""\ + [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) + ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) + ('1970-01-01T00:00:00',) ( 'NaT',) + ( 'NaT',) ( 'NaT',) + ( 'NaT',) ( 'NaT',)]""") + ) + + # and again, with timedeltas + A = np.full(10, 123456, dtype=[("A", "m8[s]")]) + A[5:].fill(np.datetime64('NaT')) + assert_equal( + np.array2string(A), + textwrap.dedent("""\ + [(123456,) (123456,) (123456,) (123456,) (123456,) ( 'NaT',) ( 'NaT',) + ( 'NaT',) ( 'NaT',) ( 'NaT',)]""") + ) + + def test_structure_format_int(self): + # See #8160 + struct_int = np.array([([1, -1],), ([123, 1],)], + dtype=[('B', 'i4', 2)]) + assert_equal(np.array2string(struct_int), + "[([ 1, -1],) ([123, 1],)]") + struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)], + dtype=[('B', 'i4', (2, 2))]) + assert_equal(np.array2string(struct_2dint), + "[([[ 0, 1], [ 2, 3]],) ([[12, 0], [ 0, 0]],)]") + + def test_structure_format_float(self): + # See #8172 + array_scalar = np.array( + (1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8')) + assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)") + + def test_unstructured_void_repr(self): + a = np.array([27, 91, 50, 75, 7, 65, 10, 8, 27, 91, 51, 49, 109, 82, 101, 100], + dtype='u1').view('V8') + assert_equal(repr(a[0]), + r"np.void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')") + assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'") + assert_equal(repr(a), + r"array([b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'," + "\n" + r" b'\x1B\x5B\x33\x31\x6D\x52\x65\x64'], dtype='|V8')") + + assert_equal(eval(repr(a), vars(np)), a) + assert_equal(eval(repr(a[0]), {'np': np}), a[0]) + + def test_edgeitems_kwarg(self): + # previously the global print options would be taken over the kwarg + arr = np.zeros(3, int) + assert_equal( + np.array2string(arr, edgeitems=1, threshold=0), + "[0 ... 0]" + ) + + def test_summarize_1d(self): + A = np.arange(1001) + strA = '[ 0 1 2 ... 998 999 1000]' + assert_equal(str(A), strA) + + reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])' + try: + np.set_printoptions(legacy='2.1') + assert_equal(repr(A), reprA) + finally: + np.set_printoptions(legacy=False) + + assert_equal(repr(A), reprA.replace(')', ', shape=(1001,))')) + + def test_summarize_2d(self): + A = np.arange(1002).reshape(2, 501) + strA = '[[ 0 1 2 ... 498 499 500]\n' \ + ' [ 501 502 503 ... 999 1000 1001]]' + assert_equal(str(A), strA) + + reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \ + ' [ 501, 502, 503, ..., 999, 1000, 1001]])' + try: + np.set_printoptions(legacy='2.1') + assert_equal(repr(A), reprA) + finally: + np.set_printoptions(legacy=False) + + assert_equal(repr(A), reprA.replace(')', ', shape=(2, 501))')) + + def test_summarize_2d_dtype(self): + A = np.arange(1002, dtype='i2').reshape(2, 501) + strA = '[[ 0 1 2 ... 498 499 500]\n' \ + ' [ 501 502 503 ... 999 1000 1001]]' + assert_equal(str(A), strA) + + reprA = ('array([[ 0, 1, 2, ..., 498, 499, 500],\n' + ' [ 501, 502, 503, ..., 999, 1000, 1001]],\n' + ' shape=(2, 501), dtype=int16)') + assert_equal(repr(A), reprA) + + def test_summarize_structure(self): + A = (np.arange(2002, dtype="i8", (2, 1001))]) + strB = "[([[1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1]],)]" + assert_equal(str(B), strB) + + reprB = ( + "array([([[1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1]],)],\n" + " dtype=[('i', '>i8', (2, 1001))])" + ) + assert_equal(repr(B), reprB) + + C = (np.arange(22, dtype=" 1: + # if the type is >1 byte, the non-native endian version + # must show endianness. + assert non_native_repr != native_repr + assert f"dtype='{non_native_dtype.byteorder}" in non_native_repr + + def test_linewidth_repr(self): + a = np.full(7, fill_value=2) + np.set_printoptions(linewidth=17) + assert_equal( + repr(a), + textwrap.dedent("""\ + array([2, 2, 2, + 2, 2, 2, + 2])""") + ) + np.set_printoptions(linewidth=17, legacy='1.13') + assert_equal( + repr(a), + textwrap.dedent("""\ + array([2, 2, 2, + 2, 2, 2, 2])""") + ) + + a = np.full(8, fill_value=2) + + np.set_printoptions(linewidth=18, legacy=False) + assert_equal( + repr(a), + textwrap.dedent("""\ + array([2, 2, 2, + 2, 2, 2, + 2, 2])""") + ) + + np.set_printoptions(linewidth=18, legacy='1.13') + assert_equal( + repr(a), + textwrap.dedent("""\ + array([2, 2, 2, 2, + 2, 2, 2, 2])""") + ) + + def test_linewidth_str(self): + a = np.full(18, fill_value=2) + np.set_printoptions(linewidth=18) + assert_equal( + str(a), + textwrap.dedent("""\ + [2 2 2 2 2 2 2 2 + 2 2 2 2 2 2 2 2 + 2 2]""") + ) + np.set_printoptions(linewidth=18, legacy='1.13') + assert_equal( + str(a), + textwrap.dedent("""\ + [2 2 2 2 2 2 2 2 2 + 2 2 2 2 2 2 2 2 2]""") + ) + + def test_edgeitems(self): + np.set_printoptions(edgeitems=1, threshold=1) + a = np.arange(27).reshape((3, 3, 3)) + assert_equal( + repr(a), + textwrap.dedent("""\ + array([[[ 0, ..., 2], + ..., + [ 6, ..., 8]], + + ..., + + [[18, ..., 20], + ..., + [24, ..., 26]]], shape=(3, 3, 3))""") + ) + + b = np.zeros((3, 3, 1, 1)) + assert_equal( + repr(b), + textwrap.dedent("""\ + array([[[[0.]], + + ..., + + [[0.]]], + + + ..., + + + [[[0.]], + + ..., + + [[0.]]]], shape=(3, 3, 1, 1))""") + ) + + # 1.13 had extra trailing spaces, and was missing newlines + try: + np.set_printoptions(legacy='1.13') + assert_equal(repr(a), ( + "array([[[ 0, ..., 2],\n" + " ..., \n" + " [ 6, ..., 8]],\n" + "\n" + " ..., \n" + " [[18, ..., 20],\n" + " ..., \n" + " [24, ..., 26]]])") + ) + assert_equal(repr(b), ( + "array([[[[ 0.]],\n" + "\n" + " ..., \n" + " [[ 0.]]],\n" + "\n" + "\n" + " ..., \n" + " [[[ 0.]],\n" + "\n" + " ..., \n" + " [[ 0.]]]])") + ) + finally: + np.set_printoptions(legacy=False) + + def test_edgeitems_structured(self): + np.set_printoptions(edgeitems=1, threshold=1) + A = np.arange(5 * 2 * 3, dtype="f4')])"), + (np.void(b'a'), r"void(b'\x61')", r"np.void(b'\x61')"), + ]) +def test_scalar_repr_special(scalar, legacy_repr, representation): + # Test NEP 51 scalar repr (and legacy option) for numeric types + assert repr(scalar) == representation + + with np.printoptions(legacy="1.25"): + assert repr(scalar) == legacy_repr + +def test_scalar_void_float_str(): + # Note that based on this currently we do not print the same as a tuple + # would, since the tuple would include the repr() inside for floats, but + # we do not do that. + scalar = np.void((1.0, 2.0), dtype=[('f0', 'f4')]) + assert str(scalar) == "(1.0, 2.0)" + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't support asyncio") +@pytest.mark.skipif(sys.version_info < (3, 11), + reason="asyncio.barrier was added in Python 3.11") +def test_printoptions_asyncio_safe(): + asyncio = pytest.importorskip("asyncio") + + b = asyncio.Barrier(2) + + async def legacy_113(): + np.set_printoptions(legacy='1.13', precision=12) + await b.wait() + po = np.get_printoptions() + assert po['legacy'] == '1.13' + assert po['precision'] == 12 + orig_linewidth = po['linewidth'] + with np.printoptions(linewidth=34, legacy='1.21'): + po = np.get_printoptions() + assert po['legacy'] == '1.21' + assert po['precision'] == 12 + assert po['linewidth'] == 34 + po = np.get_printoptions() + assert po['linewidth'] == orig_linewidth + assert po['legacy'] == '1.13' + assert po['precision'] == 12 + + async def legacy_125(): + np.set_printoptions(legacy='1.25', precision=7) + await b.wait() + po = np.get_printoptions() + assert po['legacy'] == '1.25' + assert po['precision'] == 7 + orig_linewidth = po['linewidth'] + with np.printoptions(linewidth=6, legacy='1.13'): + po = np.get_printoptions() + assert po['legacy'] == '1.13' + assert po['precision'] == 7 + assert po['linewidth'] == 6 + po = np.get_printoptions() + assert po['linewidth'] == orig_linewidth + assert po['legacy'] == '1.25' + assert po['precision'] == 7 + + async def main(): + await asyncio.gather(legacy_125(), legacy_125()) + + loop = asyncio.new_event_loop() + asyncio.run(main()) + loop.close() + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't support threads") +def test_multithreaded_array_printing(): + # the dragon4 implementation uses a static scratch space for performance + # reasons this test makes sure it is set up in a thread-safe manner + + run_threaded(TestPrintOptions().test_floatmode, 500) diff --git a/python/numpy/_core/tests/test_casting_floatingpoint_errors.py b/python/numpy/_core/tests/test_casting_floatingpoint_errors.py new file mode 100644 index 000000000..2f9c01f90 --- /dev/null +++ b/python/numpy/_core/tests/test_casting_floatingpoint_errors.py @@ -0,0 +1,154 @@ +import pytest +from pytest import param + +import numpy as np +from numpy.testing import IS_WASM + + +def values_and_dtypes(): + """ + Generate value+dtype pairs that generate floating point errors during + casts. The invalid casts to integers will generate "invalid" value + warnings, the float casts all generate "overflow". + + (The Python int/float paths don't need to get tested in all the same + situations, but it does not hurt.) + """ + # Casting to float16: + yield param(70000, "float16", id="int-to-f2") + yield param("70000", "float16", id="str-to-f2") + yield param(70000.0, "float16", id="float-to-f2") + yield param(np.longdouble(70000.), "float16", id="longdouble-to-f2") + yield param(np.float64(70000.), "float16", id="double-to-f2") + yield param(np.float32(70000.), "float16", id="float-to-f2") + # Casting to float32: + yield param(10**100, "float32", id="int-to-f4") + yield param(1e100, "float32", id="float-to-f2") + yield param(np.longdouble(1e300), "float32", id="longdouble-to-f2") + yield param(np.float64(1e300), "float32", id="double-to-f2") + # Casting to float64: + # If longdouble is double-double, its max can be rounded down to the double + # max. So we correct the double spacing (a bit weird, admittedly): + max_ld = np.finfo(np.longdouble).max + spacing = np.spacing(np.nextafter(np.finfo("f8").max, 0)) + if max_ld - spacing > np.finfo("f8").max: + yield param(np.finfo(np.longdouble).max, "float64", + id="longdouble-to-f8") + + # Cast to complex32: + yield param(2e300, "complex64", id="float-to-c8") + yield param(2e300 + 0j, "complex64", id="complex-to-c8") + yield param(2e300j, "complex64", id="complex-to-c8") + yield param(np.longdouble(2e300), "complex64", id="longdouble-to-c8") + + # Invalid float to integer casts: + with np.errstate(over="ignore"): + for to_dt in np.typecodes["AllInteger"]: + for value in [np.inf, np.nan]: + for from_dt in np.typecodes["AllFloat"]: + from_dt = np.dtype(from_dt) + from_val = from_dt.type(value) + + yield param(from_val, to_dt, id=f"{from_val}-to-{to_dt}") + + +def check_operations(dtype, value): + """ + There are many dedicated paths in NumPy which cast and should check for + floating point errors which occurred during those casts. + """ + if dtype.kind != 'i': + # These assignments use the stricter setitem logic: + def assignment(): + arr = np.empty(3, dtype=dtype) + arr[0] = value + + yield assignment + + def fill(): + arr = np.empty(3, dtype=dtype) + arr.fill(value) + + yield fill + + def copyto_scalar(): + arr = np.empty(3, dtype=dtype) + np.copyto(arr, value, casting="unsafe") + + yield copyto_scalar + + def copyto(): + arr = np.empty(3, dtype=dtype) + np.copyto(arr, np.array([value, value, value]), casting="unsafe") + + yield copyto + + def copyto_scalar_masked(): + arr = np.empty(3, dtype=dtype) + np.copyto(arr, value, casting="unsafe", + where=[True, False, True]) + + yield copyto_scalar_masked + + def copyto_masked(): + arr = np.empty(3, dtype=dtype) + np.copyto(arr, np.array([value, value, value]), casting="unsafe", + where=[True, False, True]) + + yield copyto_masked + + def direct_cast(): + np.array([value, value, value]).astype(dtype) + + yield direct_cast + + def direct_cast_nd_strided(): + arr = np.full((5, 5, 5), fill_value=value)[:, ::2, :] + arr.astype(dtype) + + yield direct_cast_nd_strided + + def boolean_array_assignment(): + arr = np.empty(3, dtype=dtype) + arr[[True, False, True]] = np.array([value, value]) + + yield boolean_array_assignment + + def integer_array_assignment(): + arr = np.empty(3, dtype=dtype) + values = np.array([value, value]) + + arr[[0, 1]] = values + + yield integer_array_assignment + + def integer_array_assignment_with_subspace(): + arr = np.empty((5, 3), dtype=dtype) + values = np.array([value, value, value]) + + arr[[0, 2]] = values + + yield integer_array_assignment_with_subspace + + def flat_assignment(): + arr = np.empty((3,), dtype=dtype) + values = np.array([value, value, value]) + arr.flat[:] = values + + yield flat_assignment + +@pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") +@pytest.mark.parametrize(["value", "dtype"], values_and_dtypes()) +@pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") +def test_floatingpoint_errors_casting(dtype, value): + dtype = np.dtype(dtype) + for operation in check_operations(dtype, value): + dtype = np.dtype(dtype) + + match = "invalid" if dtype.kind in 'iu' else "overflow" + with pytest.warns(RuntimeWarning, match=match): + operation() + + with np.errstate(all="raise"): + with pytest.raises(FloatingPointError, match=match): + operation() diff --git a/python/numpy/_core/tests/test_casting_unittests.py b/python/numpy/_core/tests/test_casting_unittests.py new file mode 100644 index 000000000..f8441ea9d --- /dev/null +++ b/python/numpy/_core/tests/test_casting_unittests.py @@ -0,0 +1,817 @@ +""" +The tests exercise the casting machinery in a more low-level manner. +The reason is mostly to test a new implementation of the casting machinery. + +Unlike most tests in NumPy, these are closer to unit-tests rather +than integration tests. +""" + +import ctypes +import enum +import random +import textwrap + +import pytest +from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl + +import numpy as np +from numpy.lib.stride_tricks import as_strided +from numpy.testing import assert_array_equal + +# Simple skips object, parametric and long double (unsupported by struct) +simple_dtypes = "?bhilqBHILQefdFD" +if np.dtype("l").itemsize != np.dtype("q").itemsize: + # Remove l and L, the table was generated with 64bit linux in mind. + simple_dtypes = simple_dtypes.replace("l", "").replace("L", "") +simple_dtypes = [type(np.dtype(c)) for c in simple_dtypes] + + +def simple_dtype_instances(): + for dtype_class in simple_dtypes: + dt = dtype_class() + yield pytest.param(dt, id=str(dt)) + if dt.byteorder != "|": + dt = dt.newbyteorder() + yield pytest.param(dt, id=str(dt)) + + +def get_expected_stringlength(dtype): + """Returns the string length when casting the basic dtypes to strings. + """ + if dtype == np.bool: + return 5 + if dtype.kind in "iu": + if dtype.itemsize == 1: + length = 3 + elif dtype.itemsize == 2: + length = 5 + elif dtype.itemsize == 4: + length = 10 + elif dtype.itemsize == 8: + length = 20 + else: + raise AssertionError(f"did not find expected length for {dtype}") + + if dtype.kind == "i": + length += 1 # adds one character for the sign + + return length + + # Note: Can't do dtype comparison for longdouble on windows + if dtype.char == "g": + return 48 + elif dtype.char == "G": + return 48 * 2 + elif dtype.kind == "f": + return 32 # also for half apparently. + elif dtype.kind == "c": + return 32 * 2 + + raise AssertionError(f"did not find expected length for {dtype}") + + +class Casting(enum.IntEnum): + no = 0 + equiv = 1 + safe = 2 + same_kind = 3 + unsafe = 4 + + +def _get_cancast_table(): + table = textwrap.dedent(""" + X ? b h i l q B H I L Q e f d g F D G S U V O M m + ? # = = = = = = = = = = = = = = = = = = = = = . = + b . # = = = = . . . . . = = = = = = = = = = = . = + h . ~ # = = = . . . . . ~ = = = = = = = = = = . = + i . ~ ~ # = = . . . . . ~ ~ = = ~ = = = = = = . = + l . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . = + q . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . = + B . ~ = = = = # = = = = = = = = = = = = = = = . = + H . ~ ~ = = = ~ # = = = ~ = = = = = = = = = = . = + I . ~ ~ ~ = = ~ ~ # = = ~ ~ = = ~ = = = = = = . = + L . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~ + Q . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~ + e . . . . . . . . . . . # = = = = = = = = = = . . + f . . . . . . . . . . . ~ # = = = = = = = = = . . + d . . . . . . . . . . . ~ ~ # = ~ = = = = = = . . + g . . . . . . . . . . . ~ ~ ~ # ~ ~ = = = = = . . + F . . . . . . . . . . . . . . . # = = = = = = . . + D . . . . . . . . . . . . . . . ~ # = = = = = . . + G . . . . . . . . . . . . . . . ~ ~ # = = = = . . + S . . . . . . . . . . . . . . . . . . # = = = . . + U . . . . . . . . . . . . . . . . . . . # = = . . + V . . . . . . . . . . . . . . . . . . . . # = . . + O . . . . . . . . . . . . . . . . . . . . = # . . + M . . . . . . . . . . . . . . . . . . . . = = # . + m . . . . . . . . . . . . . . . . . . . . = = . # + """).strip().split("\n") + dtypes = [type(np.dtype(c)) for c in table[0][2::2]] + + convert_cast = {".": Casting.unsafe, "~": Casting.same_kind, + "=": Casting.safe, "#": Casting.equiv, + " ": -1} + + cancast = {} + for from_dt, row in zip(dtypes, table[1:]): + cancast[from_dt] = {} + for to_dt, c in zip(dtypes, row[2::2]): + cancast[from_dt][to_dt] = convert_cast[c] + + return cancast + + +CAST_TABLE = _get_cancast_table() + + +class TestChanges: + """ + These test cases exercise some behaviour changes + """ + @pytest.mark.parametrize("string", ["S", "U"]) + @pytest.mark.parametrize("floating", ["e", "f", "d", "g"]) + def test_float_to_string(self, floating, string): + assert np.can_cast(floating, string) + # 100 is long enough to hold any formatted floating + assert np.can_cast(floating, f"{string}100") + + def test_to_void(self): + # But in general, we do consider these safe: + assert np.can_cast("d", "V") + assert np.can_cast("S20", "V") + + # Do not consider it a safe cast if the void is too smaller: + assert not np.can_cast("d", "V1") + assert not np.can_cast("S20", "V1") + assert not np.can_cast("U1", "V1") + # Structured to unstructured is just like any other: + assert np.can_cast("d,i", "V", casting="same_kind") + # Unstructured void to unstructured is actually no cast at all: + assert np.can_cast("V3", "V", casting="no") + assert np.can_cast("V0", "V", casting="no") + + +class TestCasting: + size = 1500 # Best larger than NPY_LOWLEVEL_BUFFER_BLOCKSIZE * itemsize + + def get_data(self, dtype1, dtype2): + if dtype2 is None or dtype1.itemsize >= dtype2.itemsize: + length = self.size // dtype1.itemsize + else: + length = self.size // dtype2.itemsize + + # Assume that the base array is well enough aligned for all inputs. + arr1 = np.empty(length, dtype=dtype1) + assert arr1.flags.c_contiguous + assert arr1.flags.aligned + + values = [random.randrange(-128, 128) for _ in range(length)] + + for i, value in enumerate(values): + # Use item assignment to ensure this is not using casting: + if value < 0 and dtype1.kind == "u": + # Manually rollover unsigned integers (-1 -> int.max) + value = value + np.iinfo(dtype1).max + 1 + arr1[i] = value + + if dtype2 is None: + if dtype1.char == "?": + values = [bool(v) for v in values] + return arr1, values + + if dtype2.char == "?": + values = [bool(v) for v in values] + + arr2 = np.empty(length, dtype=dtype2) + assert arr2.flags.c_contiguous + assert arr2.flags.aligned + + for i, value in enumerate(values): + # Use item assignment to ensure this is not using casting: + if value < 0 and dtype2.kind == "u": + # Manually rollover unsigned integers (-1 -> int.max) + value = value + np.iinfo(dtype2).max + 1 + arr2[i] = value + + return arr1, arr2, values + + def get_data_variation(self, arr1, arr2, aligned=True, contig=True): + """ + Returns a copy of arr1 that may be non-contiguous or unaligned, and a + matching array for arr2 (although not a copy). + """ + if contig: + stride1 = arr1.dtype.itemsize + stride2 = arr2.dtype.itemsize + elif aligned: + stride1 = 2 * arr1.dtype.itemsize + stride2 = 2 * arr2.dtype.itemsize + else: + stride1 = arr1.dtype.itemsize + 1 + stride2 = arr2.dtype.itemsize + 1 + + max_size1 = len(arr1) * 3 * arr1.dtype.itemsize + 1 + max_size2 = len(arr2) * 3 * arr2.dtype.itemsize + 1 + from_bytes = np.zeros(max_size1, dtype=np.uint8) + to_bytes = np.zeros(max_size2, dtype=np.uint8) + + # Sanity check that the above is large enough: + assert stride1 * len(arr1) <= from_bytes.nbytes + assert stride2 * len(arr2) <= to_bytes.nbytes + + if aligned: + new1 = as_strided(from_bytes[:-1].view(arr1.dtype), + arr1.shape, (stride1,)) + new2 = as_strided(to_bytes[:-1].view(arr2.dtype), + arr2.shape, (stride2,)) + else: + new1 = as_strided(from_bytes[1:].view(arr1.dtype), + arr1.shape, (stride1,)) + new2 = as_strided(to_bytes[1:].view(arr2.dtype), + arr2.shape, (stride2,)) + + new1[...] = arr1 + + if not contig: + # Ensure we did not overwrite bytes that should not be written: + offset = arr1.dtype.itemsize if aligned else 0 + buf = from_bytes[offset::stride1].tobytes() + assert buf.count(b"\0") == len(buf) + + if contig: + assert new1.flags.c_contiguous + assert new2.flags.c_contiguous + else: + assert not new1.flags.c_contiguous + assert not new2.flags.c_contiguous + + if aligned: + assert new1.flags.aligned + assert new2.flags.aligned + else: + assert not new1.flags.aligned or new1.dtype.alignment == 1 + assert not new2.flags.aligned or new2.dtype.alignment == 1 + + return new1, new2 + + @pytest.mark.parametrize("from_Dt", simple_dtypes) + def test_simple_cancast(self, from_Dt): + for to_Dt in simple_dtypes: + cast = get_castingimpl(from_Dt, to_Dt) + + for from_dt in [from_Dt(), from_Dt().newbyteorder()]: + default = cast._resolve_descriptors((from_dt, None))[1][1] + assert default == to_Dt() + del default + + for to_dt in [to_Dt(), to_Dt().newbyteorder()]: + casting, (from_res, to_res), view_off = ( + cast._resolve_descriptors((from_dt, to_dt))) + assert type(from_res) == from_Dt + assert type(to_res) == to_Dt + if view_off is not None: + # If a view is acceptable, this is "no" casting + # and byte order must be matching. + assert casting == Casting.no + # The above table lists this as "equivalent" + assert Casting.equiv == CAST_TABLE[from_Dt][to_Dt] + # Note that to_res may not be the same as from_dt + assert from_res.isnative == to_res.isnative + else: + if from_Dt == to_Dt: + # Note that to_res may not be the same as from_dt + assert from_res.isnative != to_res.isnative + assert casting == CAST_TABLE[from_Dt][to_Dt] + + if from_Dt is to_Dt: + assert from_dt is from_res + assert to_dt is to_res + + @pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning") + @pytest.mark.parametrize("from_dt", simple_dtype_instances()) + def test_simple_direct_casts(self, from_dt): + """ + This test checks numeric direct casts for dtypes supported also by the + struct module (plus complex). It tries to be test a wide range of + inputs, but skips over possibly undefined behaviour (e.g. int rollover). + Longdouble and CLongdouble are tested, but only using double precision. + + If this test creates issues, it should possibly just be simplified + or even removed (checking whether unaligned/non-contiguous casts give + the same results is useful, though). + """ + for to_dt in simple_dtype_instances(): + to_dt = to_dt.values[0] + cast = get_castingimpl(type(from_dt), type(to_dt)) + + casting, (from_res, to_res), view_off = cast._resolve_descriptors( + (from_dt, to_dt)) + + if from_res is not from_dt or to_res is not to_dt: + # Do not test this case, it is handled in multiple steps, + # each of which should is tested individually. + return + + safe = casting <= Casting.safe + del from_res, to_res, casting + + arr1, arr2, values = self.get_data(from_dt, to_dt) + + cast._simple_strided_call((arr1, arr2)) + + # Check via python list + assert arr2.tolist() == values + + # Check that the same results are achieved for strided loops + arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False) + cast._simple_strided_call((arr1_o, arr2_o)) + + assert_array_equal(arr2_o, arr2) + assert arr2_o.tobytes() == arr2.tobytes() + + # Check if alignment makes a difference, but only if supported + # and only if the alignment can be wrong + if ((from_dt.alignment == 1 and to_dt.alignment == 1) or + not cast._supports_unaligned): + return + + arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, True) + cast._simple_strided_call((arr1_o, arr2_o)) + + assert_array_equal(arr2_o, arr2) + assert arr2_o.tobytes() == arr2.tobytes() + + arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, False) + cast._simple_strided_call((arr1_o, arr2_o)) + + assert_array_equal(arr2_o, arr2) + assert arr2_o.tobytes() == arr2.tobytes() + + del arr1_o, arr2_o, cast + + @pytest.mark.parametrize("from_Dt", simple_dtypes) + def test_numeric_to_times(self, from_Dt): + # We currently only implement contiguous loops, so only need to + # test those. + from_dt = from_Dt() + + time_dtypes = [np.dtype("M8"), np.dtype("M8[ms]"), np.dtype("M8[4D]"), + np.dtype("m8"), np.dtype("m8[ms]"), np.dtype("m8[4D]")] + for time_dt in time_dtypes: + cast = get_castingimpl(type(from_dt), type(time_dt)) + + casting, (from_res, to_res), view_off = cast._resolve_descriptors( + (from_dt, time_dt)) + + assert from_res is from_dt + assert to_res is time_dt + del from_res, to_res + + assert casting & CAST_TABLE[from_Dt][type(time_dt)] + assert view_off is None + + int64_dt = np.dtype(np.int64) + arr1, arr2, values = self.get_data(from_dt, int64_dt) + arr2 = arr2.view(time_dt) + arr2[...] = np.datetime64("NaT") + + if time_dt == np.dtype("M8"): + # This is a bit of a strange path, and could probably be removed + arr1[-1] = 0 # ensure at least one value is not NaT + + # The cast currently succeeds, but the values are invalid: + cast._simple_strided_call((arr1, arr2)) + with pytest.raises(ValueError): + str(arr2[-1]) # e.g. conversion to string fails + return + + cast._simple_strided_call((arr1, arr2)) + + assert [int(v) for v in arr2.tolist()] == values + + # Check that the same results are achieved for strided loops + arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False) + cast._simple_strided_call((arr1_o, arr2_o)) + + assert_array_equal(arr2_o, arr2) + assert arr2_o.tobytes() == arr2.tobytes() + + @pytest.mark.parametrize( + ["from_dt", "to_dt", "expected_casting", "expected_view_off", + "nom", "denom"], + [("M8[ns]", None, Casting.no, 0, 1, 1), + (str(np.dtype("M8[ns]").newbyteorder()), None, + Casting.equiv, None, 1, 1), + ("M8", "M8[ms]", Casting.safe, 0, 1, 1), + # should be invalid cast: + ("M8[ms]", "M8", Casting.unsafe, None, 1, 1), + ("M8[5ms]", "M8[5ms]", Casting.no, 0, 1, 1), + ("M8[ns]", "M8[ms]", Casting.same_kind, None, 1, 10**6), + ("M8[ms]", "M8[ns]", Casting.safe, None, 10**6, 1), + ("M8[ms]", "M8[7ms]", Casting.same_kind, None, 1, 7), + ("M8[4D]", "M8[1M]", Casting.same_kind, None, None, + # give full values based on NumPy 1.19.x + [-2**63, 0, -1, 1314, -1315, 564442610]), + ("m8[ns]", None, Casting.no, 0, 1, 1), + (str(np.dtype("m8[ns]").newbyteorder()), None, + Casting.equiv, None, 1, 1), + ("m8", "m8[ms]", Casting.safe, 0, 1, 1), + # should be invalid cast: + ("m8[ms]", "m8", Casting.unsafe, None, 1, 1), + ("m8[5ms]", "m8[5ms]", Casting.no, 0, 1, 1), + ("m8[ns]", "m8[ms]", Casting.same_kind, None, 1, 10**6), + ("m8[ms]", "m8[ns]", Casting.safe, None, 10**6, 1), + ("m8[ms]", "m8[7ms]", Casting.same_kind, None, 1, 7), + ("m8[4D]", "m8[1M]", Casting.unsafe, None, None, + # give full values based on NumPy 1.19.x + [-2**63, 0, 0, 1314, -1315, 564442610])]) + def test_time_to_time(self, from_dt, to_dt, + expected_casting, expected_view_off, + nom, denom): + from_dt = np.dtype(from_dt) + if to_dt is not None: + to_dt = np.dtype(to_dt) + + # Test a few values for casting (results generated with NumPy 1.19) + values = np.array([-2**63, 1, 2**63 - 1, 10000, -10000, 2**32]) + values = values.astype(np.dtype("int64").newbyteorder(from_dt.byteorder)) + assert values.dtype.byteorder == from_dt.byteorder + assert np.isnat(values.view(from_dt)[0]) + + DType = type(from_dt) + cast = get_castingimpl(DType, DType) + casting, (from_res, to_res), view_off = cast._resolve_descriptors( + (from_dt, to_dt)) + assert from_res is from_dt + assert to_res is to_dt or to_dt is None + assert casting == expected_casting + assert view_off == expected_view_off + + if nom is not None: + expected_out = (values * nom // denom).view(to_res) + expected_out[0] = "NaT" + else: + expected_out = np.empty_like(values) + expected_out[...] = denom + expected_out = expected_out.view(to_dt) + + orig_arr = values.view(from_dt) + orig_out = np.empty_like(expected_out) + + if casting == Casting.unsafe and (to_dt == "m8" or to_dt == "M8"): # noqa: PLR1714 + # Casting from non-generic to generic units is an error and should + # probably be reported as an invalid cast earlier. + with pytest.raises(ValueError): + cast._simple_strided_call((orig_arr, orig_out)) + return + + for aligned in [True, True]: + for contig in [True, True]: + arr, out = self.get_data_variation( + orig_arr, orig_out, aligned, contig) + out[...] = 0 + cast._simple_strided_call((arr, out)) + assert_array_equal(out.view("int64"), expected_out.view("int64")) + + def string_with_modified_length(self, dtype, change_length): + fact = 1 if dtype.char == "S" else 4 + length = dtype.itemsize // fact + change_length + return np.dtype(f"{dtype.byteorder}{dtype.char}{length}") + + @pytest.mark.parametrize("other_DT", simple_dtypes) + @pytest.mark.parametrize("string_char", ["S", "U"]) + def test_string_cancast(self, other_DT, string_char): + fact = 1 if string_char == "S" else 4 + + string_DT = type(np.dtype(string_char)) + cast = get_castingimpl(other_DT, string_DT) + + other_dt = other_DT() + expected_length = get_expected_stringlength(other_dt) + string_dt = np.dtype(f"{string_char}{expected_length}") + + safety, (res_other_dt, res_dt), view_off = cast._resolve_descriptors( + (other_dt, None)) + assert res_dt.itemsize == expected_length * fact + assert safety == Casting.safe # we consider to string casts "safe" + assert view_off is None + assert isinstance(res_dt, string_DT) + + # These casts currently implement changing the string length, so + # check the cast-safety for too long/fixed string lengths: + for change_length in [-1, 0, 1]: + if change_length >= 0: + expected_safety = Casting.safe + else: + expected_safety = Casting.same_kind + + to_dt = self.string_with_modified_length(string_dt, change_length) + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (other_dt, to_dt)) + assert res_dt is to_dt + assert safety == expected_safety + assert view_off is None + + # The opposite direction is always considered unsafe: + cast = get_castingimpl(string_DT, other_DT) + + safety, _, view_off = cast._resolve_descriptors((string_dt, other_dt)) + assert safety == Casting.unsafe + assert view_off is None + + cast = get_castingimpl(string_DT, other_DT) + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (string_dt, None)) + assert safety == Casting.unsafe + assert view_off is None + assert other_dt is res_dt # returns the singleton for simple dtypes + + @pytest.mark.parametrize("string_char", ["S", "U"]) + @pytest.mark.parametrize("other_dt", simple_dtype_instances()) + def test_simple_string_casts_roundtrip(self, other_dt, string_char): + """ + Tests casts from and to string by checking the roundtripping property. + + The test also covers some string to string casts (but not all). + + If this test creates issues, it should possibly just be simplified + or even removed (checking whether unaligned/non-contiguous casts give + the same results is useful, though). + """ + string_DT = type(np.dtype(string_char)) + + cast = get_castingimpl(type(other_dt), string_DT) + cast_back = get_castingimpl(string_DT, type(other_dt)) + _, (res_other_dt, string_dt), _ = cast._resolve_descriptors( + (other_dt, None)) + + if res_other_dt is not other_dt: + # do not support non-native byteorder, skip test in that case + assert other_dt.byteorder != res_other_dt.byteorder + return + + orig_arr, values = self.get_data(other_dt, None) + str_arr = np.zeros(len(orig_arr), dtype=string_dt) + string_dt_short = self.string_with_modified_length(string_dt, -1) + str_arr_short = np.zeros(len(orig_arr), dtype=string_dt_short) + string_dt_long = self.string_with_modified_length(string_dt, 1) + str_arr_long = np.zeros(len(orig_arr), dtype=string_dt_long) + + assert not cast._supports_unaligned # if support is added, should test + assert not cast_back._supports_unaligned + + for contig in [True, False]: + other_arr, str_arr = self.get_data_variation( + orig_arr, str_arr, True, contig) + _, str_arr_short = self.get_data_variation( + orig_arr, str_arr_short.copy(), True, contig) + _, str_arr_long = self.get_data_variation( + orig_arr, str_arr_long, True, contig) + + cast._simple_strided_call((other_arr, str_arr)) + + cast._simple_strided_call((other_arr, str_arr_short)) + assert_array_equal(str_arr.astype(string_dt_short), str_arr_short) + + cast._simple_strided_call((other_arr, str_arr_long)) + assert_array_equal(str_arr, str_arr_long) + + if other_dt.kind == "b": + # Booleans do not roundtrip + continue + + other_arr[...] = 0 + cast_back._simple_strided_call((str_arr, other_arr)) + assert_array_equal(orig_arr, other_arr) + + other_arr[...] = 0 + cast_back._simple_strided_call((str_arr_long, other_arr)) + assert_array_equal(orig_arr, other_arr) + + @pytest.mark.parametrize("other_dt", ["S8", "U8"]) + @pytest.mark.parametrize("string_char", ["S", "U"]) + def test_string_to_string_cancast(self, other_dt, string_char): + other_dt = np.dtype(other_dt) + + fact = 1 if string_char == "S" else 4 + div = 1 if other_dt.char == "S" else 4 + + string_DT = type(np.dtype(string_char)) + cast = get_castingimpl(type(other_dt), string_DT) + + expected_length = other_dt.itemsize // div + string_dt = np.dtype(f"{string_char}{expected_length}") + + safety, (res_other_dt, res_dt), view_off = cast._resolve_descriptors( + (other_dt, None)) + assert res_dt.itemsize == expected_length * fact + assert isinstance(res_dt, string_DT) + + expected_view_off = None + if other_dt.char == string_char: + if other_dt.isnative: + expected_safety = Casting.no + expected_view_off = 0 + else: + expected_safety = Casting.equiv + elif string_char == "U": + expected_safety = Casting.safe + else: + expected_safety = Casting.unsafe + + assert view_off == expected_view_off + assert expected_safety == safety + + for change_length in [-1, 0, 1]: + to_dt = self.string_with_modified_length(string_dt, change_length) + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (other_dt, to_dt)) + + assert res_dt is to_dt + if change_length <= 0: + assert view_off == expected_view_off + else: + assert view_off is None + if expected_safety == Casting.unsafe: + assert safety == expected_safety + elif change_length < 0: + assert safety == Casting.same_kind + elif change_length == 0: + assert safety == expected_safety + elif change_length > 0: + assert safety == Casting.safe + + @pytest.mark.parametrize("order1", [">", "<"]) + @pytest.mark.parametrize("order2", [">", "<"]) + def test_unicode_byteswapped_cast(self, order1, order2): + # Very specific tests (not using the castingimpl directly) + # that tests unicode bytedwaps including for unaligned array data. + dtype1 = np.dtype(f"{order1}U30") + dtype2 = np.dtype(f"{order2}U30") + data1 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype1) + data2 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype2) + if dtype1.alignment != 1: + # alignment should always be >1, but skip the check if not + assert not data1.flags.aligned + assert not data2.flags.aligned + + element = "this is a ünicode string‽" + data1[()] = element + # Test both `data1` and `data1.copy()` (which should be aligned) + for data in [data1, data1.copy()]: + data2[...] = data1 + assert data2[()] == element + assert data2.copy()[()] == element + + def test_void_to_string_special_case(self): + # Cover a small special case in void to string casting that could + # probably just as well be turned into an error (compare + # `test_object_to_parametric_internal_error` below). + assert np.array([], dtype="V5").astype("S").dtype.itemsize == 5 + assert np.array([], dtype="V5").astype("U").dtype.itemsize == 4 * 5 + + def test_object_to_parametric_internal_error(self): + # We reject casting from object to a parametric type, without + # figuring out the correct instance first. + object_dtype = type(np.dtype(object)) + other_dtype = type(np.dtype(str)) + cast = get_castingimpl(object_dtype, other_dtype) + with pytest.raises(TypeError, + match="casting from object to the parametric DType"): + cast._resolve_descriptors((np.dtype("O"), None)) + + @pytest.mark.parametrize("dtype", simple_dtype_instances()) + def test_object_and_simple_resolution(self, dtype): + # Simple test to exercise the cast when no instance is specified + object_dtype = type(np.dtype(object)) + cast = get_castingimpl(object_dtype, type(dtype)) + + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (np.dtype("O"), dtype)) + assert safety == Casting.unsafe + assert view_off is None + assert res_dt is dtype + + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (np.dtype("O"), None)) + assert safety == Casting.unsafe + assert view_off is None + assert res_dt == dtype.newbyteorder("=") + + @pytest.mark.parametrize("dtype", simple_dtype_instances()) + def test_simple_to_object_resolution(self, dtype): + # Simple test to exercise the cast when no instance is specified + object_dtype = type(np.dtype(object)) + cast = get_castingimpl(type(dtype), object_dtype) + + safety, (_, res_dt), view_off = cast._resolve_descriptors( + (dtype, None)) + assert safety == Casting.safe + assert view_off is None + assert res_dt is np.dtype("O") + + @pytest.mark.parametrize("casting", ["no", "unsafe"]) + def test_void_and_structured_with_subarray(self, casting): + # test case corresponding to gh-19325 + dtype = np.dtype([("foo", " casts may succeed or fail, but a NULL'ed array must + # behave the same as one filled with None's. + arr_normal = np.array([None] * 5) + arr_NULLs = np.empty_like(arr_normal) + ctypes.memset(arr_NULLs.ctypes.data, 0, arr_NULLs.nbytes) + # If the check fails (maybe it should) the test would lose its purpose: + assert arr_NULLs.tobytes() == b"\x00" * arr_NULLs.nbytes + + try: + expected = arr_normal.astype(dtype) + except TypeError: + with pytest.raises(TypeError): + arr_NULLs.astype(dtype) + else: + assert_array_equal(expected, arr_NULLs.astype(dtype)) + + @pytest.mark.parametrize("dtype", + np.typecodes["AllInteger"] + np.typecodes["AllFloat"]) + def test_nonstandard_bool_to_other(self, dtype): + # simple test for casting bool_ to numeric types, which should not + # expose the detail that NumPy bools can sometimes take values other + # than 0 and 1. See also gh-19514. + nonstandard_bools = np.array([0, 3, -7], dtype=np.int8).view(bool) + res = nonstandard_bools.astype(dtype) + expected = [0, 1, 1] + assert_array_equal(res, expected) diff --git a/python/numpy/_core/tests/test_conversion_utils.py b/python/numpy/_core/tests/test_conversion_utils.py new file mode 100644 index 000000000..03ba33957 --- /dev/null +++ b/python/numpy/_core/tests/test_conversion_utils.py @@ -0,0 +1,206 @@ +""" +Tests for numpy/_core/src/multiarray/conversion_utils.c +""" +import re + +import numpy._core._multiarray_tests as mt +import pytest + +from numpy._core.multiarray import CLIP, RAISE, WRAP +from numpy.testing import assert_raises + + +class StringConverterTestCase: + allow_bytes = True + case_insensitive = True + exact_match = False + warn = True + + def _check_value_error(self, val): + pattern = fr'\(got {re.escape(repr(val))}\)' + with pytest.raises(ValueError, match=pattern) as exc: + self.conv(val) + + def _check_conv_assert_warn(self, val, expected): + if self.warn: + with assert_raises(ValueError) as exc: + assert self.conv(val) == expected + else: + assert self.conv(val) == expected + + def _check(self, val, expected): + """Takes valid non-deprecated inputs for converters, + runs converters on inputs, checks correctness of outputs, + warnings and errors""" + assert self.conv(val) == expected + + if self.allow_bytes: + assert self.conv(val.encode('ascii')) == expected + else: + with pytest.raises(TypeError): + self.conv(val.encode('ascii')) + + if len(val) != 1: + if self.exact_match: + self._check_value_error(val[:1]) + self._check_value_error(val + '\0') + else: + self._check_conv_assert_warn(val[:1], expected) + + if self.case_insensitive: + if val != val.lower(): + self._check_conv_assert_warn(val.lower(), expected) + if val != val.upper(): + self._check_conv_assert_warn(val.upper(), expected) + else: + if val != val.lower(): + self._check_value_error(val.lower()) + if val != val.upper(): + self._check_value_error(val.upper()) + + def test_wrong_type(self): + # common cases which apply to all the below + with pytest.raises(TypeError): + self.conv({}) + with pytest.raises(TypeError): + self.conv([]) + + def test_wrong_value(self): + # nonsense strings + self._check_value_error('') + self._check_value_error('\N{greek small letter pi}') + + if self.allow_bytes: + self._check_value_error(b'') + # bytes which can't be converted to strings via utf8 + self._check_value_error(b"\xFF") + if self.exact_match: + self._check_value_error("there's no way this is supported") + + +class TestByteorderConverter(StringConverterTestCase): + """ Tests of PyArray_ByteorderConverter """ + conv = mt.run_byteorder_converter + warn = False + + def test_valid(self): + for s in ['big', '>']: + self._check(s, 'NPY_BIG') + for s in ['little', '<']: + self._check(s, 'NPY_LITTLE') + for s in ['native', '=']: + self._check(s, 'NPY_NATIVE') + for s in ['ignore', '|']: + self._check(s, 'NPY_IGNORE') + for s in ['swap']: + self._check(s, 'NPY_SWAP') + + +class TestSortkindConverter(StringConverterTestCase): + """ Tests of PyArray_SortkindConverter """ + conv = mt.run_sortkind_converter + warn = False + + def test_valid(self): + self._check('quicksort', 'NPY_QUICKSORT') + self._check('heapsort', 'NPY_HEAPSORT') + self._check('mergesort', 'NPY_STABLESORT') # alias + self._check('stable', 'NPY_STABLESORT') + + +class TestSelectkindConverter(StringConverterTestCase): + """ Tests of PyArray_SelectkindConverter """ + conv = mt.run_selectkind_converter + case_insensitive = False + exact_match = True + + def test_valid(self): + self._check('introselect', 'NPY_INTROSELECT') + + +class TestSearchsideConverter(StringConverterTestCase): + """ Tests of PyArray_SearchsideConverter """ + conv = mt.run_searchside_converter + + def test_valid(self): + self._check('left', 'NPY_SEARCHLEFT') + self._check('right', 'NPY_SEARCHRIGHT') + + +class TestOrderConverter(StringConverterTestCase): + """ Tests of PyArray_OrderConverter """ + conv = mt.run_order_converter + warn = False + + def test_valid(self): + self._check('c', 'NPY_CORDER') + self._check('f', 'NPY_FORTRANORDER') + self._check('a', 'NPY_ANYORDER') + self._check('k', 'NPY_KEEPORDER') + + def test_flatten_invalid_order(self): + # invalid after gh-14596 + with pytest.raises(ValueError): + self.conv('Z') + for order in [False, True, 0, 8]: + with pytest.raises(TypeError): + self.conv(order) + + +class TestClipmodeConverter(StringConverterTestCase): + """ Tests of PyArray_ClipmodeConverter """ + conv = mt.run_clipmode_converter + + def test_valid(self): + self._check('clip', 'NPY_CLIP') + self._check('wrap', 'NPY_WRAP') + self._check('raise', 'NPY_RAISE') + + # integer values allowed here + assert self.conv(CLIP) == 'NPY_CLIP' + assert self.conv(WRAP) == 'NPY_WRAP' + assert self.conv(RAISE) == 'NPY_RAISE' + + +class TestCastingConverter(StringConverterTestCase): + """ Tests of PyArray_CastingConverter """ + conv = mt.run_casting_converter + case_insensitive = False + exact_match = True + + def test_valid(self): + self._check("no", "NPY_NO_CASTING") + self._check("equiv", "NPY_EQUIV_CASTING") + self._check("safe", "NPY_SAFE_CASTING") + self._check("same_kind", "NPY_SAME_KIND_CASTING") + self._check("unsafe", "NPY_UNSAFE_CASTING") + + +class TestIntpConverter: + """ Tests of PyArray_IntpConverter """ + conv = mt.run_intp_converter + + def test_basic(self): + assert self.conv(1) == (1,) + assert self.conv((1, 2)) == (1, 2) + assert self.conv([1, 2]) == (1, 2) + assert self.conv(()) == () + + def test_none(self): + with pytest.raises(TypeError): + assert self.conv(None) == () + + def test_float(self): + with pytest.raises(TypeError): + self.conv(1.0) + with pytest.raises(TypeError): + self.conv([1, 1.0]) + + def test_too_large(self): + with pytest.raises(ValueError): + self.conv(2**64) + + def test_too_many_dims(self): + assert self.conv([1] * 64) == (1,) * 64 + with pytest.raises(ValueError): + self.conv([1] * 65) diff --git a/python/numpy/_core/tests/test_cpu_dispatcher.py b/python/numpy/_core/tests/test_cpu_dispatcher.py new file mode 100644 index 000000000..0a47685d0 --- /dev/null +++ b/python/numpy/_core/tests/test_cpu_dispatcher.py @@ -0,0 +1,49 @@ +from numpy._core._multiarray_umath import ( + __cpu_baseline__, + __cpu_dispatch__, + __cpu_features__, +) + +from numpy._core import _umath_tests +from numpy.testing import assert_equal + + +def test_dispatcher(): + """ + Testing the utilities of the CPU dispatcher + """ + targets = ( + "SSE2", "SSE41", "AVX2", + "VSX", "VSX2", "VSX3", + "NEON", "ASIMD", "ASIMDHP", + "VX", "VXE", "LSX" + ) + highest_sfx = "" # no suffix for the baseline + all_sfx = [] + for feature in reversed(targets): + # skip baseline features, by the default `CCompilerOpt` do not generate separated objects + # for the baseline, just one object combined all of them via 'baseline' option + # within the configuration statements. + if feature in __cpu_baseline__: + continue + # check compiler and running machine support + if feature not in __cpu_dispatch__ or not __cpu_features__[feature]: + continue + + if not highest_sfx: + highest_sfx = "_" + feature + all_sfx.append("func" + "_" + feature) + + test = _umath_tests.test_dispatch() + assert_equal(test["func"], "func" + highest_sfx) + assert_equal(test["var"], "var" + highest_sfx) + + if highest_sfx: + assert_equal(test["func_xb"], "func" + highest_sfx) + assert_equal(test["var_xb"], "var" + highest_sfx) + else: + assert_equal(test["func_xb"], "nobase") + assert_equal(test["var_xb"], "nobase") + + all_sfx.append("func") # add the baseline + assert_equal(test["all"], all_sfx) diff --git a/python/numpy/_core/tests/test_cpu_features.py b/python/numpy/_core/tests/test_cpu_features.py new file mode 100644 index 000000000..d1e3dc610 --- /dev/null +++ b/python/numpy/_core/tests/test_cpu_features.py @@ -0,0 +1,432 @@ +import os +import pathlib +import platform +import re +import subprocess +import sys + +import pytest +from numpy._core._multiarray_umath import ( + __cpu_baseline__, + __cpu_dispatch__, + __cpu_features__, +) + + +def assert_features_equal(actual, desired, fname): + __tracebackhide__ = True # Hide traceback for py.test + actual, desired = str(actual), str(desired) + if actual == desired: + return + detected = str(__cpu_features__).replace("'", "") + try: + with open("/proc/cpuinfo") as fd: + cpuinfo = fd.read(2048) + except Exception as err: + cpuinfo = str(err) + + try: + import subprocess + auxv = subprocess.check_output(['/bin/true'], env={"LD_SHOW_AUXV": "1"}) + auxv = auxv.decode() + except Exception as err: + auxv = str(err) + + import textwrap + error_report = textwrap.indent( +f""" +########################################### +### Extra debugging information +########################################### +------------------------------------------- +--- NumPy Detections +------------------------------------------- +{detected} +------------------------------------------- +--- SYS / CPUINFO +------------------------------------------- +{cpuinfo}.... +------------------------------------------- +--- SYS / AUXV +------------------------------------------- +{auxv} +""", prefix='\r') + + raise AssertionError(( + "Failure Detection\n" + " NAME: '%s'\n" + " ACTUAL: %s\n" + " DESIRED: %s\n" + "%s" + ) % (fname, actual, desired, error_report)) + +def _text_to_list(txt): + out = txt.strip("][\n").replace("'", "").split(', ') + return None if out[0] == "" else out + +class AbstractTest: + features = [] + features_groups = {} + features_map = {} + features_flags = set() + + def load_flags(self): + # a hook + pass + + def test_features(self): + self.load_flags() + for gname, features in self.features_groups.items(): + test_features = [self.cpu_have(f) for f in features] + assert_features_equal(__cpu_features__.get(gname), all(test_features), gname) + + for feature_name in self.features: + cpu_have = self.cpu_have(feature_name) + npy_have = __cpu_features__.get(feature_name) + assert_features_equal(npy_have, cpu_have, feature_name) + + def cpu_have(self, feature_name): + map_names = self.features_map.get(feature_name, feature_name) + if isinstance(map_names, str): + return map_names in self.features_flags + return any(f in self.features_flags for f in map_names) + + def load_flags_cpuinfo(self, magic_key): + self.features_flags = self.get_cpuinfo_item(magic_key) + + def get_cpuinfo_item(self, magic_key): + values = set() + with open('/proc/cpuinfo') as fd: + for line in fd: + if not line.startswith(magic_key): + continue + flags_value = [s.strip() for s in line.split(':', 1)] + if len(flags_value) == 2: + values = values.union(flags_value[1].upper().split()) + return values + + def load_flags_auxv(self): + auxv = subprocess.check_output(['/bin/true'], env={"LD_SHOW_AUXV": "1"}) + for at in auxv.split(b'\n'): + if not at.startswith(b"AT_HWCAP"): + continue + hwcap_value = [s.strip() for s in at.split(b':', 1)] + if len(hwcap_value) == 2: + self.features_flags = self.features_flags.union( + hwcap_value[1].upper().decode().split() + ) + +@pytest.mark.skipif( + sys.platform == 'emscripten', + reason=( + "The subprocess module is not available on WASM platforms and" + " therefore this test class cannot be properly executed." + ), +) +class TestEnvPrivation: + cwd = pathlib.Path(__file__).parent.resolve() + env = os.environ.copy() + _enable = os.environ.pop('NPY_ENABLE_CPU_FEATURES', None) + _disable = os.environ.pop('NPY_DISABLE_CPU_FEATURES', None) + SUBPROCESS_ARGS = {"cwd": cwd, "capture_output": True, "text": True, "check": True} + unavailable_feats = [ + feat for feat in __cpu_dispatch__ if not __cpu_features__[feat] + ] + UNAVAILABLE_FEAT = ( + None if len(unavailable_feats) == 0 + else unavailable_feats[0] + ) + BASELINE_FEAT = None if len(__cpu_baseline__) == 0 else __cpu_baseline__[0] + SCRIPT = """ +def main(): + from numpy._core._multiarray_umath import ( + __cpu_features__, + __cpu_dispatch__ + ) + + detected = [feat for feat in __cpu_dispatch__ if __cpu_features__[feat]] + print(detected) + +if __name__ == "__main__": + main() + """ + + @pytest.fixture(autouse=True) + def setup_class(self, tmp_path_factory): + file = tmp_path_factory.mktemp("runtime_test_script") + file /= "_runtime_detect.py" + file.write_text(self.SCRIPT) + self.file = file + + def _run(self): + return subprocess.run( + [sys.executable, self.file], + env=self.env, + **self.SUBPROCESS_ARGS, + ) + + # Helper function mimicking pytest.raises for subprocess call + def _expect_error( + self, + msg, + err_type, + no_error_msg="Failed to generate error" + ): + try: + self._run() + except subprocess.CalledProcessError as e: + assertion_message = f"Expected: {msg}\nGot: {e.stderr}" + assert re.search(msg, e.stderr), assertion_message + + assertion_message = ( + f"Expected error of type: {err_type}; see full " + f"error:\n{e.stderr}" + ) + assert re.search(err_type, e.stderr), assertion_message + else: + assert False, no_error_msg + + def setup_method(self): + """Ensure that the environment is reset""" + self.env = os.environ.copy() + + def test_runtime_feature_selection(self): + """ + Ensure that when selecting `NPY_ENABLE_CPU_FEATURES`, only the + features exactly specified are dispatched. + """ + + # Capture runtime-enabled features + out = self._run() + non_baseline_features = _text_to_list(out.stdout) + + if non_baseline_features is None: + pytest.skip( + "No dispatchable features outside of baseline detected." + ) + feature = non_baseline_features[0] + + # Capture runtime-enabled features when `NPY_ENABLE_CPU_FEATURES` is + # specified + self.env['NPY_ENABLE_CPU_FEATURES'] = feature + out = self._run() + enabled_features = _text_to_list(out.stdout) + + # Ensure that only one feature is enabled, and it is exactly the one + # specified by `NPY_ENABLE_CPU_FEATURES` + assert set(enabled_features) == {feature} + + if len(non_baseline_features) < 2: + pytest.skip("Only one non-baseline feature detected.") + # Capture runtime-enabled features when `NPY_ENABLE_CPU_FEATURES` is + # specified + self.env['NPY_ENABLE_CPU_FEATURES'] = ",".join(non_baseline_features) + out = self._run() + enabled_features = _text_to_list(out.stdout) + + # Ensure that both features are enabled, and they are exactly the ones + # specified by `NPY_ENABLE_CPU_FEATURES` + assert set(enabled_features) == set(non_baseline_features) + + @pytest.mark.parametrize("enabled, disabled", + [ + ("feature", "feature"), + ("feature", "same"), + ]) + def test_both_enable_disable_set(self, enabled, disabled): + """ + Ensure that when both environment variables are set then an + ImportError is thrown + """ + self.env['NPY_ENABLE_CPU_FEATURES'] = enabled + self.env['NPY_DISABLE_CPU_FEATURES'] = disabled + msg = "Both NPY_DISABLE_CPU_FEATURES and NPY_ENABLE_CPU_FEATURES" + err_type = "ImportError" + self._expect_error(msg, err_type) + + @pytest.mark.skipif( + not __cpu_dispatch__, + reason=( + "NPY_*_CPU_FEATURES only parsed if " + "`__cpu_dispatch__` is non-empty" + ) + ) + @pytest.mark.parametrize("action", ["ENABLE", "DISABLE"]) + def test_variable_too_long(self, action): + """ + Test that an error is thrown if the environment variables are too long + to be processed. Current limit is 1024, but this may change later. + """ + MAX_VAR_LENGTH = 1024 + # Actual length is MAX_VAR_LENGTH + 1 due to null-termination + self.env[f'NPY_{action}_CPU_FEATURES'] = "t" * MAX_VAR_LENGTH + msg = ( + f"Length of environment variable 'NPY_{action}_CPU_FEATURES' is " + f"{MAX_VAR_LENGTH + 1}, only {MAX_VAR_LENGTH} accepted" + ) + err_type = "RuntimeError" + self._expect_error(msg, err_type) + + @pytest.mark.skipif( + not __cpu_dispatch__, + reason=( + "NPY_*_CPU_FEATURES only parsed if " + "`__cpu_dispatch__` is non-empty" + ) + ) + def test_impossible_feature_disable(self): + """ + Test that a RuntimeError is thrown if an impossible feature-disabling + request is made. This includes disabling a baseline feature. + """ + + if self.BASELINE_FEAT is None: + pytest.skip("There are no unavailable features to test with") + bad_feature = self.BASELINE_FEAT + self.env['NPY_DISABLE_CPU_FEATURES'] = bad_feature + msg = ( + f"You cannot disable CPU feature '{bad_feature}', since it is " + "part of the baseline optimizations" + ) + err_type = "RuntimeError" + self._expect_error(msg, err_type) + + def test_impossible_feature_enable(self): + """ + Test that a RuntimeError is thrown if an impossible feature-enabling + request is made. This includes enabling a feature not supported by the + machine, or disabling a baseline optimization. + """ + + if self.UNAVAILABLE_FEAT is None: + pytest.skip("There are no unavailable features to test with") + bad_feature = self.UNAVAILABLE_FEAT + self.env['NPY_ENABLE_CPU_FEATURES'] = bad_feature + msg = ( + f"You cannot enable CPU features \\({bad_feature}\\), since " + "they are not supported by your machine." + ) + err_type = "RuntimeError" + self._expect_error(msg, err_type) + + # Ensure that it fails even when providing garbage in addition + feats = f"{bad_feature}, Foobar" + self.env['NPY_ENABLE_CPU_FEATURES'] = feats + msg = ( + f"You cannot enable CPU features \\({bad_feature}\\), since they " + "are not supported by your machine." + ) + self._expect_error(msg, err_type) + + if self.BASELINE_FEAT is not None: + # Ensure that only the bad feature gets reported + feats = f"{bad_feature}, {self.BASELINE_FEAT}" + self.env['NPY_ENABLE_CPU_FEATURES'] = feats + msg = ( + f"You cannot enable CPU features \\({bad_feature}\\), since " + "they are not supported by your machine." + ) + self._expect_error(msg, err_type) + + +is_linux = sys.platform.startswith('linux') +is_cygwin = sys.platform.startswith('cygwin') +machine = platform.machine() +is_x86 = re.match(r"^(amd64|x86|i386|i686)", machine, re.IGNORECASE) +@pytest.mark.skipif( + not (is_linux or is_cygwin) or not is_x86, reason="Only for Linux and x86" +) +class Test_X86_Features(AbstractTest): + features = [ + "MMX", "SSE", "SSE2", "SSE3", "SSSE3", "SSE41", "POPCNT", "SSE42", + "AVX", "F16C", "XOP", "FMA4", "FMA3", "AVX2", "AVX512F", "AVX512CD", + "AVX512ER", "AVX512PF", "AVX5124FMAPS", "AVX5124VNNIW", "AVX512VPOPCNTDQ", + "AVX512VL", "AVX512BW", "AVX512DQ", "AVX512VNNI", "AVX512IFMA", + "AVX512VBMI", "AVX512VBMI2", "AVX512BITALG", "AVX512FP16", + ] + features_groups = { + "AVX512_KNL": ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF"], + "AVX512_KNM": ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF", "AVX5124FMAPS", + "AVX5124VNNIW", "AVX512VPOPCNTDQ"], + "AVX512_SKX": ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL"], + "AVX512_CLX": ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512VNNI"], + "AVX512_CNL": ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA", + "AVX512VBMI"], + "AVX512_ICL": ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA", + "AVX512VBMI", "AVX512VNNI", "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ"], + "AVX512_SPR": ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", + "AVX512VL", "AVX512IFMA", "AVX512VBMI", "AVX512VNNI", + "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ", + "AVX512FP16"], + } + features_map = { + "SSE3": "PNI", "SSE41": "SSE4_1", "SSE42": "SSE4_2", "FMA3": "FMA", + "AVX512VNNI": "AVX512_VNNI", "AVX512BITALG": "AVX512_BITALG", + "AVX512VBMI2": "AVX512_VBMI2", "AVX5124FMAPS": "AVX512_4FMAPS", + "AVX5124VNNIW": "AVX512_4VNNIW", "AVX512VPOPCNTDQ": "AVX512_VPOPCNTDQ", + "AVX512FP16": "AVX512_FP16", + } + + def load_flags(self): + self.load_flags_cpuinfo("flags") + + +is_power = re.match(r"^(powerpc|ppc)64", machine, re.IGNORECASE) +@pytest.mark.skipif(not is_linux or not is_power, reason="Only for Linux and Power") +class Test_POWER_Features(AbstractTest): + features = ["VSX", "VSX2", "VSX3", "VSX4"] + features_map = {"VSX2": "ARCH_2_07", "VSX3": "ARCH_3_00", "VSX4": "ARCH_3_1"} + + def load_flags(self): + self.load_flags_auxv() + + +is_zarch = re.match(r"^(s390x)", machine, re.IGNORECASE) +@pytest.mark.skipif(not is_linux or not is_zarch, + reason="Only for Linux and IBM Z") +class Test_ZARCH_Features(AbstractTest): + features = ["VX", "VXE", "VXE2"] + + def load_flags(self): + self.load_flags_auxv() + + +is_arm = re.match(r"^(arm|aarch64)", machine, re.IGNORECASE) +@pytest.mark.skipif(not is_linux or not is_arm, reason="Only for Linux and ARM") +class Test_ARM_Features(AbstractTest): + features = [ + "SVE", "NEON", "ASIMD", "FPHP", "ASIMDHP", "ASIMDDP", "ASIMDFHM" + ] + features_groups = { + "NEON_FP16": ["NEON", "HALF"], + "NEON_VFPV4": ["NEON", "VFPV4"], + } + + def load_flags(self): + self.load_flags_cpuinfo("Features") + arch = self.get_cpuinfo_item("CPU architecture") + # in case of mounting virtual filesystem of aarch64 kernel without linux32 + is_rootfs_v8 = ( + not re.match(r"^armv[0-9]+l$", machine) and + (int('0' + next(iter(arch))) > 7 if arch else 0) + ) + if re.match(r"^(aarch64|AARCH64)", machine) or is_rootfs_v8: + self.features_map = { + "NEON": "ASIMD", "HALF": "ASIMD", "VFPV4": "ASIMD" + } + else: + self.features_map = { + # ELF auxiliary vector and /proc/cpuinfo on Linux kernel(armv8 aarch32) + # doesn't provide information about ASIMD, so we assume that ASIMD is supported + # if the kernel reports any one of the following ARM8 features. + "ASIMD": ("AES", "SHA1", "SHA2", "PMULL", "CRC32") + } + + +is_loongarch = re.match(r"^(loongarch)", machine, re.IGNORECASE) +@pytest.mark.skipif(not is_linux or not is_loongarch, reason="Only for Linux and LoongArch") +class Test_LOONGARCH_Features(AbstractTest): + features = ["LSX"] + + def load_flags(self): + self.load_flags_cpuinfo("Features") diff --git a/python/numpy/_core/tests/test_custom_dtypes.py b/python/numpy/_core/tests/test_custom_dtypes.py new file mode 100644 index 000000000..66e6de35b --- /dev/null +++ b/python/numpy/_core/tests/test_custom_dtypes.py @@ -0,0 +1,315 @@ +from tempfile import NamedTemporaryFile + +import pytest +from numpy._core._multiarray_umath import ( + _discover_array_parameters as discover_array_params, +) +from numpy._core._multiarray_umath import _get_sfloat_dtype + +import numpy as np +from numpy.testing import assert_array_equal + +SF = _get_sfloat_dtype() + + +class TestSFloat: + def _get_array(self, scaling, aligned=True): + if not aligned: + a = np.empty(3 * 8 + 1, dtype=np.uint8)[1:] + a = a.view(np.float64) + a[:] = [1., 2., 3.] + else: + a = np.array([1., 2., 3.]) + + a *= 1. / scaling # the casting code also uses the reciprocal. + return a.view(SF(scaling)) + + def test_sfloat_rescaled(self): + sf = SF(1.) + sf2 = sf.scaled_by(2.) + assert sf2.get_scaling() == 2. + sf6 = sf2.scaled_by(3.) + assert sf6.get_scaling() == 6. + + def test_class_discovery(self): + # This does not test much, since we always discover the scaling as 1. + # But most of NumPy (when writing) does not understand DType classes + dt, _ = discover_array_params([1., 2., 3.], dtype=SF) + assert dt == SF(1.) + + @pytest.mark.parametrize("scaling", [1., -1., 2.]) + def test_scaled_float_from_floats(self, scaling): + a = np.array([1., 2., 3.], dtype=SF(scaling)) + + assert a.dtype.get_scaling() == scaling + assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.]) + + def test_repr(self): + # Check the repr, mainly to cover the code paths: + assert repr(SF(scaling=1.)) == "_ScaledFloatTestDType(scaling=1.0)" + + def test_dtype_str(self): + assert SF(1.).str == "_ScaledFloatTestDType(scaling=1.0)" + + def test_dtype_name(self): + assert SF(1.).name == "_ScaledFloatTestDType64" + + def test_sfloat_structured_dtype_printing(self): + dt = np.dtype([("id", int), ("value", SF(0.5))]) + # repr of structured dtypes need special handling because the + # implementation bypasses the object repr + assert "('value', '_ScaledFloatTestDType64')" in repr(dt) + + @pytest.mark.parametrize("scaling", [1., -1., 2.]) + def test_sfloat_from_float(self, scaling): + a = np.array([1., 2., 3.]).astype(dtype=SF(scaling)) + + assert a.dtype.get_scaling() == scaling + assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.]) + + @pytest.mark.parametrize("aligned", [True, False]) + @pytest.mark.parametrize("scaling", [1., -1., 2.]) + def test_sfloat_getitem(self, aligned, scaling): + a = self._get_array(1., aligned) + assert a.tolist() == [1., 2., 3.] + + @pytest.mark.parametrize("aligned", [True, False]) + def test_sfloat_casts(self, aligned): + a = self._get_array(1., aligned) + + assert np.can_cast(a, SF(-1.), casting="equiv") + assert not np.can_cast(a, SF(-1.), casting="no") + na = a.astype(SF(-1.)) + assert_array_equal(-1 * na.view(np.float64), a.view(np.float64)) + + assert np.can_cast(a, SF(2.), casting="same_kind") + assert not np.can_cast(a, SF(2.), casting="safe") + a2 = a.astype(SF(2.)) + assert_array_equal(2 * a2.view(np.float64), a.view(np.float64)) + + @pytest.mark.parametrize("aligned", [True, False]) + def test_sfloat_cast_internal_errors(self, aligned): + a = self._get_array(2e300, aligned) + + with pytest.raises(TypeError, + match="error raised inside the core-loop: non-finite factor!"): + a.astype(SF(2e-300)) + + def test_sfloat_promotion(self): + assert np.result_type(SF(2.), SF(3.)) == SF(3.) + assert np.result_type(SF(3.), SF(2.)) == SF(3.) + # Float64 -> SF(1.) and then promotes normally, so both of this work: + assert np.result_type(SF(3.), np.float64) == SF(3.) + assert np.result_type(np.float64, SF(0.5)) == SF(1.) + + # Test an undefined promotion: + with pytest.raises(TypeError): + np.result_type(SF(1.), np.int64) + + def test_basic_multiply(self): + a = self._get_array(2.) + b = self._get_array(4.) + + res = a * b + # multiplies dtype scaling and content separately: + assert res.dtype.get_scaling() == 8. + expected_view = a.view(np.float64) * b.view(np.float64) + assert_array_equal(res.view(np.float64), expected_view) + + def test_possible_and_impossible_reduce(self): + # For reductions to work, the first and last operand must have the + # same dtype. For this parametric DType that is not necessarily true. + a = self._get_array(2.) + # Addition reduction works (as of writing requires to pass initial + # because setting a scaled-float from the default `0` fails). + res = np.add.reduce(a, initial=0.) + assert res == a.astype(np.float64).sum() + + # But each multiplication changes the factor, so a reduction is not + # possible (the relaxed version of the old refusal to handle any + # flexible dtype). + with pytest.raises(TypeError, + match="the resolved dtypes are not compatible"): + np.multiply.reduce(a) + + def test_basic_ufunc_at(self): + float_a = np.array([1., 2., 3.]) + b = self._get_array(2.) + + float_b = b.view(np.float64).copy() + np.multiply.at(float_b, [1, 1, 1], float_a) + np.multiply.at(b, [1, 1, 1], float_a) + + assert_array_equal(b.view(np.float64), float_b) + + def test_basic_multiply_promotion(self): + float_a = np.array([1., 2., 3.]) + b = self._get_array(2.) + + res1 = float_a * b + res2 = b * float_a + + # one factor is one, so we get the factor of b: + assert res1.dtype == res2.dtype == b.dtype + expected_view = float_a * b.view(np.float64) + assert_array_equal(res1.view(np.float64), expected_view) + assert_array_equal(res2.view(np.float64), expected_view) + + # Check that promotion works when `out` is used: + np.multiply(b, float_a, out=res2) + with pytest.raises(TypeError): + # The promoter accepts this (maybe it should not), but the SFloat + # result cannot be cast to integer: + np.multiply(b, float_a, out=np.arange(3)) + + def test_basic_addition(self): + a = self._get_array(2.) + b = self._get_array(4.) + + res = a + b + # addition uses the type promotion rules for the result: + assert res.dtype == np.result_type(a.dtype, b.dtype) + expected_view = (a.astype(res.dtype).view(np.float64) + + b.astype(res.dtype).view(np.float64)) + assert_array_equal(res.view(np.float64), expected_view) + + def test_addition_cast_safety(self): + """The addition method is special for the scaled float, because it + includes the "cast" between different factors, thus cast-safety + is influenced by the implementation. + """ + a = self._get_array(2.) + b = self._get_array(-2.) + c = self._get_array(3.) + + # sign change is "equiv": + np.add(a, b, casting="equiv") + with pytest.raises(TypeError): + np.add(a, b, casting="no") + + # Different factor is "same_kind" (default) so check that "safe" fails + with pytest.raises(TypeError): + np.add(a, c, casting="safe") + + # Check that casting the output fails also (done by the ufunc here) + with pytest.raises(TypeError): + np.add(a, a, out=c, casting="safe") + + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or, np.logical_xor]) + def test_logical_ufuncs_casts_to_bool(self, ufunc): + a = self._get_array(2.) + a[0] = 0. # make sure first element is considered False. + + float_equiv = a.astype(float) + expected = ufunc(float_equiv, float_equiv) + res = ufunc(a, a) + assert_array_equal(res, expected) + + # also check that the same works for reductions: + expected = ufunc.reduce(float_equiv) + res = ufunc.reduce(a) + assert_array_equal(res, expected) + + # The output casting does not match the bool, bool -> bool loop: + with pytest.raises(TypeError): + ufunc(a, a, out=np.empty(a.shape, dtype=int), casting="equiv") + + def test_wrapped_and_wrapped_reductions(self): + a = self._get_array(2.) + float_equiv = a.astype(float) + + expected = np.hypot(float_equiv, float_equiv) + res = np.hypot(a, a) + assert res.dtype == a.dtype + res_float = res.view(np.float64) * 2 + assert_array_equal(res_float, expected) + + # Also check reduction (keepdims, due to incorrect getitem) + res = np.hypot.reduce(a, keepdims=True) + assert res.dtype == a.dtype + expected = np.hypot.reduce(float_equiv, keepdims=True) + assert res.view(np.float64) * 2 == expected + + def test_astype_class(self): + # Very simple test that we accept `.astype()` also on the class. + # ScaledFloat always returns the default descriptor, but it does + # check the relevant code paths. + arr = np.array([1., 2., 3.], dtype=object) + + res = arr.astype(SF) # passing the class class + expected = arr.astype(SF(1.)) # above will have discovered 1. scaling + assert_array_equal(res.view(np.float64), expected.view(np.float64)) + + def test_creation_class(self): + # passing in a dtype class should return + # the default descriptor + arr1 = np.array([1., 2., 3.], dtype=SF) + assert arr1.dtype == SF(1.) + arr2 = np.array([1., 2., 3.], dtype=SF(1.)) + assert_array_equal(arr1.view(np.float64), arr2.view(np.float64)) + assert arr1.dtype == arr2.dtype + + assert np.empty(3, dtype=SF).dtype == SF(1.) + assert np.empty_like(arr1, dtype=SF).dtype == SF(1.) + assert np.zeros(3, dtype=SF).dtype == SF(1.) + assert np.zeros_like(arr1, dtype=SF).dtype == SF(1.) + + def test_np_save_load(self): + # this monkeypatch is needed because pickle + # uses the repr of a type to reconstruct it + np._ScaledFloatTestDType = SF + + arr = np.array([1.0, 2.0, 3.0], dtype=SF(1.0)) + + # adapted from RoundtripTest.roundtrip in np.save tests + with NamedTemporaryFile("wb", delete=False, suffix=".npz") as f: + with pytest.warns(UserWarning) as record: + np.savez(f.name, arr) + + assert len(record) == 1 + + with np.load(f.name, allow_pickle=True) as data: + larr = data["arr_0"] + assert_array_equal(arr.view(np.float64), larr.view(np.float64)) + assert larr.dtype == arr.dtype == SF(1.0) + + del np._ScaledFloatTestDType + + def test_flatiter(self): + arr = np.array([1.0, 2.0, 3.0], dtype=SF(1.0)) + + for i, val in enumerate(arr.flat): + assert arr[i] == val + + @pytest.mark.parametrize( + "index", [ + [1, 2], ..., slice(None, 2, None), + np.array([True, True, False]), np.array([0, 1]) + ], ids=["int_list", "ellipsis", "slice", "bool_array", "int_array"]) + def test_flatiter_index(self, index): + arr = np.array([1.0, 2.0, 3.0], dtype=SF(1.0)) + np.testing.assert_array_equal( + arr[index].view(np.float64), arr.flat[index].view(np.float64)) + + arr2 = arr.copy() + arr[index] = 5.0 + arr2.flat[index] = 5.0 + np.testing.assert_array_equal( + arr.view(np.float64), arr2.view(np.float64)) + +def test_type_pickle(): + # can't actually unpickle, but we can pickle (if in namespace) + import pickle + + np._ScaledFloatTestDType = SF + + s = pickle.dumps(SF) + res = pickle.loads(s) + assert res is SF + + del np._ScaledFloatTestDType + + +def test_is_numeric(): + assert SF._is_numeric diff --git a/python/numpy/_core/tests/test_cython.py b/python/numpy/_core/tests/test_cython.py new file mode 100644 index 000000000..fb3839fd2 --- /dev/null +++ b/python/numpy/_core/tests/test_cython.py @@ -0,0 +1,351 @@ +import os +import subprocess +import sys +import sysconfig +from datetime import datetime + +import pytest + +import numpy as np +from numpy.testing import IS_EDITABLE, IS_WASM, assert_array_equal + +# This import is copied from random.tests.test_extending +try: + import cython + from Cython.Compiler.Version import version as cython_version +except ImportError: + cython = None +else: + from numpy._utils import _pep440 + + # Note: keep in sync with the one in pyproject.toml + required_version = "3.0.6" + if _pep440.parse(cython_version) < _pep440.Version(required_version): + # too old or wrong cython, skip the test + cython = None + +pytestmark = pytest.mark.skipif(cython is None, reason="requires cython") + + +if IS_EDITABLE: + pytest.skip( + "Editable install doesn't support tests with a compile step", + allow_module_level=True + ) + + +@pytest.fixture(scope='module') +def install_temp(tmpdir_factory): + # Based in part on test_cython from random.tests.test_extending + if IS_WASM: + pytest.skip("No subprocess") + + srcdir = os.path.join(os.path.dirname(__file__), 'examples', 'cython') + build_dir = tmpdir_factory.mktemp("cython_test") / "build" + os.makedirs(build_dir, exist_ok=True) + # Ensure we use the correct Python interpreter even when `meson` is + # installed in a different Python environment (see gh-24956) + native_file = str(build_dir / 'interpreter-native-file.ini') + with open(native_file, 'w') as f: + f.write("[binaries]\n") + f.write(f"python = '{sys.executable}'\n") + f.write(f"python3 = '{sys.executable}'") + + try: + subprocess.check_call(["meson", "--version"]) + except FileNotFoundError: + pytest.skip("No usable 'meson' found") + if sysconfig.get_platform() == "win-arm64": + pytest.skip("Meson unable to find MSVC linker on win-arm64") + if sys.platform == "win32": + subprocess.check_call(["meson", "setup", + "--buildtype=release", + "--vsenv", "--native-file", native_file, + str(srcdir)], + cwd=build_dir, + ) + else: + subprocess.check_call(["meson", "setup", + "--native-file", native_file, str(srcdir)], + cwd=build_dir + ) + try: + subprocess.check_call(["meson", "compile", "-vv"], cwd=build_dir) + except subprocess.CalledProcessError: + print("----------------") + print("meson build failed when doing") + print(f"'meson setup --native-file {native_file} {srcdir}'") + print("'meson compile -vv'") + print(f"in {build_dir}") + print("----------------") + raise + + sys.path.append(str(build_dir)) + + +def test_is_timedelta64_object(install_temp): + import checks + + assert checks.is_td64(np.timedelta64(1234)) + assert checks.is_td64(np.timedelta64(1234, "ns")) + assert checks.is_td64(np.timedelta64("NaT", "ns")) + + assert not checks.is_td64(1) + assert not checks.is_td64(None) + assert not checks.is_td64("foo") + assert not checks.is_td64(np.datetime64("now", "s")) + + +def test_is_datetime64_object(install_temp): + import checks + + assert checks.is_dt64(np.datetime64(1234, "ns")) + assert checks.is_dt64(np.datetime64("NaT", "ns")) + + assert not checks.is_dt64(1) + assert not checks.is_dt64(None) + assert not checks.is_dt64("foo") + assert not checks.is_dt64(np.timedelta64(1234)) + + +def test_get_datetime64_value(install_temp): + import checks + + dt64 = np.datetime64("2016-01-01", "ns") + + result = checks.get_dt64_value(dt64) + expected = dt64.view("i8") + + assert result == expected + + +def test_get_timedelta64_value(install_temp): + import checks + + td64 = np.timedelta64(12345, "h") + + result = checks.get_td64_value(td64) + expected = td64.view("i8") + + assert result == expected + + +def test_get_datetime64_unit(install_temp): + import checks + + dt64 = np.datetime64("2016-01-01", "ns") + result = checks.get_dt64_unit(dt64) + expected = 10 + assert result == expected + + td64 = np.timedelta64(12345, "h") + result = checks.get_dt64_unit(td64) + expected = 5 + assert result == expected + + +def test_abstract_scalars(install_temp): + import checks + + assert checks.is_integer(1) + assert checks.is_integer(np.int8(1)) + assert checks.is_integer(np.uint64(1)) + +def test_default_int(install_temp): + import checks + + assert checks.get_default_integer() is np.dtype(int) + + +def test_ravel_axis(install_temp): + import checks + + assert checks.get_ravel_axis() == np.iinfo("intc").min + + +def test_convert_datetime64_to_datetimestruct(install_temp): + # GH#21199 + import checks + + res = checks.convert_datetime64_to_datetimestruct() + + exp = { + "year": 2022, + "month": 3, + "day": 15, + "hour": 20, + "min": 1, + "sec": 55, + "us": 260292, + "ps": 0, + "as": 0, + } + + assert res == exp + + +class TestDatetimeStrings: + def test_make_iso_8601_datetime(self, install_temp): + # GH#21199 + import checks + dt = datetime(2016, 6, 2, 10, 45, 19) + # uses NPY_FR_s + result = checks.make_iso_8601_datetime(dt) + assert result == b"2016-06-02T10:45:19" + + def test_get_datetime_iso_8601_strlen(self, install_temp): + # GH#21199 + import checks + # uses NPY_FR_ns + res = checks.get_datetime_iso_8601_strlen() + assert res == 48 + + +@pytest.mark.parametrize( + "arrays", + [ + [np.random.rand(2)], + [np.random.rand(2), np.random.rand(3, 1)], + [np.random.rand(2), np.random.rand(2, 3, 2), np.random.rand(1, 3, 2)], + [np.random.rand(2, 1)] * 4 + [np.random.rand(1, 1, 1)], + ] +) +def test_multiiter_fields(install_temp, arrays): + import checks + bcast = np.broadcast(*arrays) + + assert bcast.ndim == checks.get_multiiter_number_of_dims(bcast) + assert bcast.size == checks.get_multiiter_size(bcast) + assert bcast.numiter == checks.get_multiiter_num_of_iterators(bcast) + assert bcast.shape == checks.get_multiiter_shape(bcast) + assert bcast.index == checks.get_multiiter_current_index(bcast) + assert all( + x.base is y.base + for x, y in zip(bcast.iters, checks.get_multiiter_iters(bcast)) + ) + + +def test_dtype_flags(install_temp): + import checks + dtype = np.dtype("i,O") # dtype with somewhat interesting flags + assert dtype.flags == checks.get_dtype_flags(dtype) + + +def test_conv_intp(install_temp): + import checks + + class myint: + def __int__(self): + return 3 + + # These conversion passes via `__int__`, not `__index__`: + assert checks.conv_intp(3.) == 3 + assert checks.conv_intp(myint()) == 3 + + +def test_npyiter_api(install_temp): + import checks + arr = np.random.rand(3, 2) + + it = np.nditer(arr) + assert checks.get_npyiter_size(it) == it.itersize == np.prod(arr.shape) + assert checks.get_npyiter_ndim(it) == it.ndim == 1 + assert checks.npyiter_has_index(it) == it.has_index == False + + it = np.nditer(arr, flags=["c_index"]) + assert checks.npyiter_has_index(it) == it.has_index == True + assert ( + checks.npyiter_has_delayed_bufalloc(it) + == it.has_delayed_bufalloc + == False + ) + + it = np.nditer(arr, flags=["buffered", "delay_bufalloc"]) + assert ( + checks.npyiter_has_delayed_bufalloc(it) + == it.has_delayed_bufalloc + == True + ) + + it = np.nditer(arr, flags=["multi_index"]) + assert checks.get_npyiter_size(it) == it.itersize == np.prod(arr.shape) + assert checks.npyiter_has_multi_index(it) == it.has_multi_index == True + assert checks.get_npyiter_ndim(it) == it.ndim == 2 + assert checks.test_get_multi_index_iter_next(it, arr) + + arr2 = np.random.rand(2, 1, 2) + it = np.nditer([arr, arr2]) + assert checks.get_npyiter_nop(it) == it.nop == 2 + assert checks.get_npyiter_size(it) == it.itersize == 12 + assert checks.get_npyiter_ndim(it) == it.ndim == 3 + assert all( + x is y for x, y in zip(checks.get_npyiter_operands(it), it.operands) + ) + assert all( + np.allclose(x, y) + for x, y in zip(checks.get_npyiter_itviews(it), it.itviews) + ) + + +def test_fillwithbytes(install_temp): + import checks + + arr = checks.compile_fillwithbyte() + assert_array_equal(arr, np.ones((1, 2))) + + +def test_complex(install_temp): + from checks import inc2_cfloat_struct + + arr = np.array([0, 10 + 10j], dtype="F") + inc2_cfloat_struct(arr) + assert arr[1] == (12 + 12j) + + +def test_npystring_pack(install_temp): + """Check that the cython API can write to a vstring array.""" + import checks + + arr = np.array(['a', 'b', 'c'], dtype='T') + assert checks.npystring_pack(arr) == 0 + + # checks.npystring_pack writes to the beginning of the array + assert arr[0] == "Hello world" + +def test_npystring_load(install_temp): + """Check that the cython API can load strings from a vstring array.""" + import checks + + arr = np.array(['abcd', 'b', 'c'], dtype='T') + result = checks.npystring_load(arr) + assert result == 'abcd' + + +def test_npystring_multiple_allocators(install_temp): + """Check that the cython API can acquire/release multiple vstring allocators.""" + import checks + + dt = np.dtypes.StringDType(na_object=None) + arr1 = np.array(['abcd', 'b', 'c'], dtype=dt) + arr2 = np.array(['a', 'b', 'c'], dtype=dt) + + assert checks.npystring_pack_multiple(arr1, arr2) == 0 + assert arr1[0] == "Hello world" + assert arr1[-1] is None + assert arr2[0] == "test this" + + +def test_npystring_allocators_other_dtype(install_temp): + """Check that allocators for non-StringDType arrays is NULL.""" + import checks + + arr1 = np.array([1, 2, 3], dtype='i') + arr2 = np.array([4, 5, 6], dtype='i') + + assert checks.npystring_allocators_other_types(arr1, arr2) == 0 + + +@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', reason='no checks module on win-arm64') +def test_npy_uintp_type_enum(): + import checks + assert checks.check_npy_uintp_type_enum() diff --git a/python/numpy/_core/tests/test_datetime.py b/python/numpy/_core/tests/test_datetime.py new file mode 100644 index 000000000..88b9143a1 --- /dev/null +++ b/python/numpy/_core/tests/test_datetime.py @@ -0,0 +1,2734 @@ +import datetime +import pickle + +import pytest + +import numpy +import numpy as np +from numpy.testing import ( + IS_WASM, + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, + assert_warns, + suppress_warnings, +) + +# Use pytz to test out various time zones if available +try: + from pytz import timezone as tz + _has_pytz = True +except ImportError: + _has_pytz = False + +try: + RecursionError +except NameError: + RecursionError = RuntimeError # python < 3.5 + + +def _assert_equal_hash(v1, v2): + assert v1 == v2 + assert hash(v1) == hash(v2) + assert v2 in {v1} + + +class TestDateTime: + + def test_string(self): + msg = "no explicit representation of timezones available for " \ + "np.datetime64" + with pytest.warns(UserWarning, match=msg): + np.datetime64('2000-01-01T00+01') + + def test_datetime(self): + msg = "no explicit representation of timezones available for " \ + "np.datetime64" + with pytest.warns(UserWarning, match=msg): + t0 = np.datetime64('2023-06-09T12:18:40Z', 'ns') + + t0 = np.datetime64('2023-06-09T12:18:40', 'ns') + + def test_datetime_dtype_creation(self): + for unit in ['Y', 'M', 'W', 'D', + 'h', 'm', 's', 'ms', 'us', + 'μs', # alias for us + 'ns', 'ps', 'fs', 'as']: + dt1 = np.dtype(f'M8[750{unit}]') + assert_(dt1 == np.dtype(f'datetime64[750{unit}]')) + dt2 = np.dtype(f'm8[{unit}]') + assert_(dt2 == np.dtype(f'timedelta64[{unit}]')) + + # Generic units shouldn't add [] to the end + assert_equal(str(np.dtype("M8")), "datetime64") + + # Should be possible to specify the endianness + assert_equal(np.dtype("=M8"), np.dtype("M8")) + assert_equal(np.dtype("=M8[s]"), np.dtype("M8[s]")) + assert_(np.dtype(">M8") == np.dtype("M8") or + np.dtype("M8[D]") == np.dtype("M8[D]") or + np.dtype("M8") != np.dtype("m8") == np.dtype("m8") or + np.dtype("m8[D]") == np.dtype("m8[D]") or + np.dtype("m8") != np.dtype(" Scalars + assert_equal(np.datetime64(b, '[s]'), np.datetime64('NaT', '[s]')) + assert_equal(np.datetime64(b, '[ms]'), np.datetime64('NaT', '[ms]')) + assert_equal(np.datetime64(b, '[M]'), np.datetime64('NaT', '[M]')) + assert_equal(np.datetime64(b, '[Y]'), np.datetime64('NaT', '[Y]')) + assert_equal(np.datetime64(b, '[W]'), np.datetime64('NaT', '[W]')) + + # Arrays -> Scalars + assert_equal(np.datetime64(a, '[s]'), np.datetime64('NaT', '[s]')) + assert_equal(np.datetime64(a, '[ms]'), np.datetime64('NaT', '[ms]')) + assert_equal(np.datetime64(a, '[M]'), np.datetime64('NaT', '[M]')) + assert_equal(np.datetime64(a, '[Y]'), np.datetime64('NaT', '[Y]')) + assert_equal(np.datetime64(a, '[W]'), np.datetime64('NaT', '[W]')) + + # NaN -> NaT + nan = np.array([np.nan] * 8 + [0]) + fnan = nan.astype('f') + lnan = nan.astype('g') + cnan = nan.astype('D') + cfnan = nan.astype('F') + clnan = nan.astype('G') + hnan = nan.astype(np.half) + + nat = np.array([np.datetime64('NaT')] * 8 + [np.datetime64(0, 'D')]) + assert_equal(nan.astype('M8[ns]'), nat) + assert_equal(fnan.astype('M8[ns]'), nat) + assert_equal(lnan.astype('M8[ns]'), nat) + assert_equal(cnan.astype('M8[ns]'), nat) + assert_equal(cfnan.astype('M8[ns]'), nat) + assert_equal(clnan.astype('M8[ns]'), nat) + assert_equal(hnan.astype('M8[ns]'), nat) + + nat = np.array([np.timedelta64('NaT')] * 8 + [np.timedelta64(0)]) + assert_equal(nan.astype('timedelta64[ns]'), nat) + assert_equal(fnan.astype('timedelta64[ns]'), nat) + assert_equal(lnan.astype('timedelta64[ns]'), nat) + assert_equal(cnan.astype('timedelta64[ns]'), nat) + assert_equal(cfnan.astype('timedelta64[ns]'), nat) + assert_equal(clnan.astype('timedelta64[ns]'), nat) + assert_equal(hnan.astype('timedelta64[ns]'), nat) + + def test_days_creation(self): + assert_equal(np.array('1599', dtype='M8[D]').astype('i8'), + (1600 - 1970) * 365 - (1972 - 1600) / 4 + 3 - 365) + assert_equal(np.array('1600', dtype='M8[D]').astype('i8'), + (1600 - 1970) * 365 - (1972 - 1600) / 4 + 3) + assert_equal(np.array('1601', dtype='M8[D]').astype('i8'), + (1600 - 1970) * 365 - (1972 - 1600) / 4 + 3 + 366) + assert_equal(np.array('1900', dtype='M8[D]').astype('i8'), + (1900 - 1970) * 365 - (1970 - 1900) // 4) + assert_equal(np.array('1901', dtype='M8[D]').astype('i8'), + (1900 - 1970) * 365 - (1970 - 1900) // 4 + 365) + assert_equal(np.array('1967', dtype='M8[D]').astype('i8'), -3 * 365 - 1) + assert_equal(np.array('1968', dtype='M8[D]').astype('i8'), -2 * 365 - 1) + assert_equal(np.array('1969', dtype='M8[D]').astype('i8'), -1 * 365) + assert_equal(np.array('1970', dtype='M8[D]').astype('i8'), 0 * 365) + assert_equal(np.array('1971', dtype='M8[D]').astype('i8'), 1 * 365) + assert_equal(np.array('1972', dtype='M8[D]').astype('i8'), 2 * 365) + assert_equal(np.array('1973', dtype='M8[D]').astype('i8'), 3 * 365 + 1) + assert_equal(np.array('1974', dtype='M8[D]').astype('i8'), 4 * 365 + 1) + assert_equal(np.array('2000', dtype='M8[D]').astype('i8'), + (2000 - 1970) * 365 + (2000 - 1972) // 4) + assert_equal(np.array('2001', dtype='M8[D]').astype('i8'), + (2000 - 1970) * 365 + (2000 - 1972) // 4 + 366) + assert_equal(np.array('2400', dtype='M8[D]').astype('i8'), + (2400 - 1970) * 365 + (2400 - 1972) // 4 - 3) + assert_equal(np.array('2401', dtype='M8[D]').astype('i8'), + (2400 - 1970) * 365 + (2400 - 1972) // 4 - 3 + 366) + + assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('i8'), + (1600 - 1970) * 365 - (1972 - 1600) // 4 + 3 + 31 + 28) + assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('i8'), + (1600 - 1970) * 365 - (1972 - 1600) // 4 + 3 + 31 + 29) + assert_equal(np.array('2000-02-29', dtype='M8[D]').astype('i8'), + (2000 - 1970) * 365 + (2000 - 1972) // 4 + 31 + 28) + assert_equal(np.array('2000-03-01', dtype='M8[D]').astype('i8'), + (2000 - 1970) * 365 + (2000 - 1972) // 4 + 31 + 29) + assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('i8'), + (2000 - 1970) * 365 + (2000 - 1972) // 4 + 366 + 31 + 28 + 21) + + def test_days_to_pydate(self): + assert_equal(np.array('1599', dtype='M8[D]').astype('O'), + datetime.date(1599, 1, 1)) + assert_equal(np.array('1600', dtype='M8[D]').astype('O'), + datetime.date(1600, 1, 1)) + assert_equal(np.array('1601', dtype='M8[D]').astype('O'), + datetime.date(1601, 1, 1)) + assert_equal(np.array('1900', dtype='M8[D]').astype('O'), + datetime.date(1900, 1, 1)) + assert_equal(np.array('1901', dtype='M8[D]').astype('O'), + datetime.date(1901, 1, 1)) + assert_equal(np.array('2000', dtype='M8[D]').astype('O'), + datetime.date(2000, 1, 1)) + assert_equal(np.array('2001', dtype='M8[D]').astype('O'), + datetime.date(2001, 1, 1)) + assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('O'), + datetime.date(1600, 2, 29)) + assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('O'), + datetime.date(1600, 3, 1)) + assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('O'), + datetime.date(2001, 3, 22)) + + def test_dtype_comparison(self): + assert_(not (np.dtype('M8[us]') == np.dtype('M8[ms]'))) + assert_(np.dtype('M8[us]') != np.dtype('M8[ms]')) + assert_(np.dtype('M8[2D]') != np.dtype('M8[D]')) + assert_(np.dtype('M8[D]') != np.dtype('M8[2D]')) + + def test_pydatetime_creation(self): + a = np.array(['1960-03-12', datetime.date(1960, 3, 12)], dtype='M8[D]') + assert_equal(a[0], a[1]) + a = np.array(['1999-12-31', datetime.date(1999, 12, 31)], dtype='M8[D]') + assert_equal(a[0], a[1]) + a = np.array(['2000-01-01', datetime.date(2000, 1, 1)], dtype='M8[D]') + assert_equal(a[0], a[1]) + # Will fail if the date changes during the exact right moment + a = np.array(['today', datetime.date.today()], dtype='M8[D]') + assert_equal(a[0], a[1]) + # datetime.datetime.now() returns local time, not UTC + #a = np.array(['now', datetime.datetime.now()], dtype='M8[s]') + #assert_equal(a[0], a[1]) + + # we can give a datetime.date time units + assert_equal(np.array(datetime.date(1960, 3, 12), dtype='M8[s]'), + np.array(np.datetime64('1960-03-12T00:00:00'))) + + def test_datetime_string_conversion(self): + a = ['2011-03-16', '1920-01-01', '2013-05-19'] + str_a = np.array(a, dtype='S') + uni_a = np.array(a, dtype='U') + dt_a = np.array(a, dtype='M') + + # String to datetime + assert_equal(dt_a, str_a.astype('M')) + assert_equal(dt_a.dtype, str_a.astype('M').dtype) + dt_b = np.empty_like(dt_a) + dt_b[...] = str_a + assert_equal(dt_a, dt_b) + + # Datetime to string + assert_equal(str_a, dt_a.astype('S0')) + str_b = np.empty_like(str_a) + str_b[...] = dt_a + assert_equal(str_a, str_b) + + # Unicode to datetime + assert_equal(dt_a, uni_a.astype('M')) + assert_equal(dt_a.dtype, uni_a.astype('M').dtype) + dt_b = np.empty_like(dt_a) + dt_b[...] = uni_a + assert_equal(dt_a, dt_b) + + # Datetime to unicode + assert_equal(uni_a, dt_a.astype('U')) + uni_b = np.empty_like(uni_a) + uni_b[...] = dt_a + assert_equal(uni_a, uni_b) + + # Datetime to long string - gh-9712 + assert_equal(str_a, dt_a.astype((np.bytes_, 128))) + str_b = np.empty(str_a.shape, dtype=(np.bytes_, 128)) + str_b[...] = dt_a + assert_equal(str_a, str_b) + + @pytest.mark.parametrize("time_dtype", ["m8[D]", "M8[Y]"]) + def test_time_byteswapping(self, time_dtype): + times = np.array(["2017", "NaT"], dtype=time_dtype) + times_swapped = times.astype(times.dtype.newbyteorder()) + assert_array_equal(times, times_swapped) + + unswapped = times_swapped.view(np.dtype("int64").newbyteorder()) + assert_array_equal(unswapped, times.view(np.int64)) + + @pytest.mark.parametrize(["time1", "time2"], + [("M8[s]", "M8[D]"), ("m8[s]", "m8[ns]")]) + def test_time_byteswapped_cast(self, time1, time2): + dtype1 = np.dtype(time1) + dtype2 = np.dtype(time2) + times = np.array(["2017", "NaT"], dtype=dtype1) + expected = times.astype(dtype2) + + # Test that every byte-swapping combination also returns the same + # results (previous tests check that this comparison works fine). + res = times.astype(dtype1.newbyteorder()).astype(dtype2) + assert_array_equal(res, expected) + res = times.astype(dtype2.newbyteorder()) + assert_array_equal(res, expected) + res = times.astype(dtype1.newbyteorder()).astype(dtype2.newbyteorder()) + assert_array_equal(res, expected) + + @pytest.mark.parametrize("time_dtype", ["m8[D]", "M8[Y]"]) + @pytest.mark.parametrize("str_dtype", ["U", "S"]) + def test_datetime_conversions_byteorders(self, str_dtype, time_dtype): + times = np.array(["2017", "NaT"], dtype=time_dtype) + # Unfortunately, timedelta does not roundtrip: + from_strings = np.array(["2017", "NaT"], dtype=str_dtype) + to_strings = times.astype(str_dtype) # assume this is correct + + # Check that conversion from times to string works if src is swapped: + times_swapped = times.astype(times.dtype.newbyteorder()) + res = times_swapped.astype(str_dtype) + assert_array_equal(res, to_strings) + # And also if both are swapped: + res = times_swapped.astype(to_strings.dtype.newbyteorder()) + assert_array_equal(res, to_strings) + # only destination is swapped: + res = times.astype(to_strings.dtype.newbyteorder()) + assert_array_equal(res, to_strings) + + # Check that conversion from string to times works if src is swapped: + from_strings_swapped = from_strings.astype( + from_strings.dtype.newbyteorder()) + res = from_strings_swapped.astype(time_dtype) + assert_array_equal(res, times) + # And if both are swapped: + res = from_strings_swapped.astype(times.dtype.newbyteorder()) + assert_array_equal(res, times) + # Only destination is swapped: + res = from_strings.astype(times.dtype.newbyteorder()) + assert_array_equal(res, times) + + def test_datetime_array_str(self): + a = np.array(['2011-03-16', '1920-01-01', '2013-05-19'], dtype='M') + assert_equal(str(a), "['2011-03-16' '1920-01-01' '2013-05-19']") + + a = np.array(['2011-03-16T13:55', '1920-01-01T03:12'], dtype='M') + assert_equal(np.array2string(a, separator=', ', + formatter={'datetime': lambda x: + f"'{np.datetime_as_string(x, timezone='UTC')}'"}), + "['2011-03-16T13:55Z', '1920-01-01T03:12Z']") + + # Check that one NaT doesn't corrupt subsequent entries + a = np.array(['2010', 'NaT', '2030']).astype('M') + assert_equal(str(a), "['2010' 'NaT' '2030']") + + def test_timedelta_array_str(self): + a = np.array([-1, 0, 100], dtype='m') + assert_equal(str(a), "[ -1 0 100]") + a = np.array(['NaT', 'NaT'], dtype='m') + assert_equal(str(a), "['NaT' 'NaT']") + # Check right-alignment with NaTs + a = np.array([-1, 'NaT', 0], dtype='m') + assert_equal(str(a), "[ -1 'NaT' 0]") + a = np.array([-1, 'NaT', 1234567], dtype='m') + assert_equal(str(a), "[ -1 'NaT' 1234567]") + + # Test with other byteorder: + a = np.array([-1, 'NaT', 1234567], dtype='>m') + assert_equal(str(a), "[ -1 'NaT' 1234567]") + a = np.array([-1, 'NaT', 1234567], dtype=''\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n"\ + b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb." + assert_equal(pickle.loads(pkl), np.dtype('>M8[us]')) + + def test_gh_29555(self): + # check that dtype metadata round-trips when none + dt = np.dtype('>M8[us]') + assert dt.metadata is None + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + res = pickle.loads(pickle.dumps(dt, protocol=proto)) + assert_equal(res, dt) + assert res.metadata is None + + def test_setstate(self): + "Verify that datetime dtype __setstate__ can handle bad arguments" + dt = np.dtype('>M8[us]') + assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1)) + assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) + assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx'))) + assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2]) + + def test_dtype_promotion(self): + # datetime datetime computes the metadata gcd + # timedelta timedelta computes the metadata gcd + for mM in ['m', 'M']: + assert_equal( + np.promote_types(np.dtype(mM + '8[2Y]'), np.dtype(mM + '8[2Y]')), + np.dtype(mM + '8[2Y]')) + assert_equal( + np.promote_types(np.dtype(mM + '8[12Y]'), np.dtype(mM + '8[15Y]')), + np.dtype(mM + '8[3Y]')) + assert_equal( + np.promote_types(np.dtype(mM + '8[62M]'), np.dtype(mM + '8[24M]')), + np.dtype(mM + '8[2M]')) + assert_equal( + np.promote_types(np.dtype(mM + '8[1W]'), np.dtype(mM + '8[2D]')), + np.dtype(mM + '8[1D]')) + assert_equal( + np.promote_types(np.dtype(mM + '8[W]'), np.dtype(mM + '8[13s]')), + np.dtype(mM + '8[s]')) + assert_equal( + np.promote_types(np.dtype(mM + '8[13W]'), np.dtype(mM + '8[49s]')), + np.dtype(mM + '8[7s]')) + # timedelta timedelta raises when there is no reasonable gcd + assert_raises(TypeError, np.promote_types, + np.dtype('m8[Y]'), np.dtype('m8[D]')) + assert_raises(TypeError, np.promote_types, + np.dtype('m8[M]'), np.dtype('m8[W]')) + # timedelta and float cannot be safely cast with each other + assert_raises(TypeError, np.promote_types, "float32", "m8") + assert_raises(TypeError, np.promote_types, "m8", "float32") + assert_raises(TypeError, np.promote_types, "uint64", "m8") + assert_raises(TypeError, np.promote_types, "m8", "uint64") + + # timedelta timedelta may overflow with big unit ranges + assert_raises(OverflowError, np.promote_types, + np.dtype('m8[W]'), np.dtype('m8[fs]')) + assert_raises(OverflowError, np.promote_types, + np.dtype('m8[s]'), np.dtype('m8[as]')) + + def test_cast_overflow(self): + # gh-4486 + def cast(): + numpy.datetime64("1971-01-01 00:00:00.000000000000000").astype("datetime64[{unit}]') + assert_equal(np.isnat(arr), res) + arr = np.array([123, -321, "NaT"], dtype=f'timedelta64[{unit}]') + assert_equal(np.isnat(arr), res) + + def test_isnat_error(self): + # Test that only datetime dtype arrays are accepted + for t in np.typecodes["All"]: + if t in np.typecodes["Datetime"]: + continue + assert_raises(TypeError, np.isnat, np.zeros(10, t)) + + def test_isfinite_scalar(self): + assert_(not np.isfinite(np.datetime64('NaT', 'ms'))) + assert_(not np.isfinite(np.datetime64('NaT', 'ns'))) + assert_(np.isfinite(np.datetime64('2038-01-19T03:14:07'))) + + assert_(not np.isfinite(np.timedelta64('NaT', "ms"))) + assert_(np.isfinite(np.timedelta64(34, "ms"))) + + @pytest.mark.parametrize('unit', ['Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', + 'us', 'ns', 'ps', 'fs', 'as']) + @pytest.mark.parametrize('dstr', ['datetime64[%s]', + 'timedelta64[%s]']) + def test_isfinite_isinf_isnan_units(self, unit, dstr): + '''check isfinite, isinf, isnan for all units of M, m dtypes + ''' + arr_val = [123, -321, "NaT"] + arr = np.array(arr_val, dtype=(dstr % unit)) + pos = np.array([True, True, False]) + neg = np.array([False, False, True]) + false = np.array([False, False, False]) + assert_equal(np.isfinite(arr), pos) + assert_equal(np.isinf(arr), false) + assert_equal(np.isnan(arr), neg) + + def test_assert_equal(self): + assert_raises(AssertionError, assert_equal, + np.datetime64('nat'), np.timedelta64('nat')) + + def test_corecursive_input(self): + # construct a co-recursive list + a, b = [], [] + a.append(b) + b.append(a) + obj_arr = np.array([None]) + obj_arr[0] = a + + # At some point this caused a stack overflow (gh-11154). Now raises + # ValueError since the nested list cannot be converted to a datetime. + assert_raises(ValueError, obj_arr.astype, 'M8') + assert_raises(ValueError, obj_arr.astype, 'm8') + + @pytest.mark.parametrize("shape", [(), (1,)]) + def test_discovery_from_object_array(self, shape): + arr = np.array("2020-10-10", dtype=object).reshape(shape) + res = np.array("2020-10-10", dtype="M8").reshape(shape) + assert res.dtype == np.dtype("M8[D]") + assert_equal(arr.astype("M8"), res) + arr[...] = np.bytes_("2020-10-10") # try a numpy string type + assert_equal(arr.astype("M8"), res) + arr = arr.astype("S") + assert_equal(arr.astype("S").astype("M8"), res) + + @pytest.mark.parametrize("time_unit", [ + "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as", + # compound units + "10D", "2M", + ]) + def test_limit_symmetry(self, time_unit): + """ + Dates should have symmetric limits around the unix epoch at +/-np.int64 + """ + epoch = np.datetime64(0, time_unit) + latest = np.datetime64(np.iinfo(np.int64).max, time_unit) + earliest = np.datetime64(-np.iinfo(np.int64).max, time_unit) + + # above should not have overflowed + assert earliest < epoch < latest + + @pytest.mark.parametrize("time_unit", [ + "Y", "M", + pytest.param("W", marks=pytest.mark.xfail(reason="gh-13197")), + "D", "h", "m", + "s", "ms", "us", "ns", "ps", "fs", "as", + pytest.param("10D", marks=pytest.mark.xfail(reason="similar to gh-13197")), + ]) + @pytest.mark.parametrize("sign", [-1, 1]) + def test_limit_str_roundtrip(self, time_unit, sign): + """ + Limits should roundtrip when converted to strings. + + This tests the conversion to and from npy_datetimestruct. + """ + # TODO: add absolute (gold standard) time span limit strings + limit = np.datetime64(np.iinfo(np.int64).max * sign, time_unit) + + # Convert to string and back. Explicit unit needed since the day and + # week reprs are not distinguishable. + limit_via_str = np.datetime64(str(limit), time_unit) + assert limit_via_str == limit + + def test_datetime_hash_nat(self): + nat1 = np.datetime64() + nat2 = np.datetime64() + assert nat1 is not nat2 + assert nat1 != nat2 + assert hash(nat1) != hash(nat2) + + @pytest.mark.parametrize('unit', ('Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_datetime_hash_weeks(self, unit): + dt = np.datetime64(2348, 'W') # 2015-01-01 + dt2 = np.datetime64(dt, unit) + _assert_equal_hash(dt, dt2) + + dt3 = np.datetime64(int(dt2.astype(int)) + 1, unit) + assert hash(dt) != hash(dt3) # doesn't collide + + @pytest.mark.parametrize('unit', ('h', 'm', 's', 'ms', 'us')) + def test_datetime_hash_weeks_vs_pydatetime(self, unit): + dt = np.datetime64(2348, 'W') # 2015-01-01 + dt2 = np.datetime64(dt, unit) + pydt = dt2.astype(datetime.datetime) + assert isinstance(pydt, datetime.datetime) + _assert_equal_hash(pydt, dt2) + + @pytest.mark.parametrize('unit', ('Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_datetime_hash_big_negative(self, unit): + dt = np.datetime64(-102894, 'W') # -002-01-01 + dt2 = np.datetime64(dt, unit) + _assert_equal_hash(dt, dt2) + + # can only go down to "fs" before integer overflow + @pytest.mark.parametrize('unit', ('m', 's', 'ms', 'us', 'ns', 'ps', 'fs')) + def test_datetime_hash_minutes(self, unit): + dt = np.datetime64(3, 'm') + dt2 = np.datetime64(dt, unit) + _assert_equal_hash(dt, dt2) + + @pytest.mark.parametrize('unit', ('ns', 'ps', 'fs', 'as')) + def test_datetime_hash_ns(self, unit): + dt = np.datetime64(3, 'ns') + dt2 = np.datetime64(dt, unit) + _assert_equal_hash(dt, dt2) + + dt3 = np.datetime64(int(dt2.astype(int)) + 1, unit) + assert hash(dt) != hash(dt3) # doesn't collide + + @pytest.mark.parametrize('wk', range(500000, 500010)) # 11552-09-04 + @pytest.mark.parametrize('unit', ('W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_datetime_hash_big_positive(self, wk, unit): + dt = np.datetime64(wk, 'W') + dt2 = np.datetime64(dt, unit) + _assert_equal_hash(dt, dt2) + + def test_timedelta_hash_generic(self): + assert_raises(ValueError, hash, np.timedelta64(123)) # generic + + @pytest.mark.parametrize('unit', ('Y', 'M')) + def test_timedelta_hash_year_month(self, unit): + td = np.timedelta64(45, 'Y') + td2 = np.timedelta64(td, unit) + _assert_equal_hash(td, td2) + + @pytest.mark.parametrize('unit', ('W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_timedelta_hash_weeks(self, unit): + td = np.timedelta64(10, 'W') + td2 = np.timedelta64(td, unit) + _assert_equal_hash(td, td2) + + td3 = np.timedelta64(int(td2.astype(int)) + 1, unit) + assert hash(td) != hash(td3) # doesn't collide + + @pytest.mark.parametrize('unit', ('W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_timedelta_hash_weeks_vs_pydelta(self, unit): + td = np.timedelta64(10, 'W') + td2 = np.timedelta64(td, unit) + pytd = td2.astype(datetime.timedelta) + assert isinstance(pytd, datetime.timedelta) + _assert_equal_hash(pytd, td2) + + @pytest.mark.parametrize('unit', ('ms', 'us', 'ns', 'ps', 'fs', 'as')) + def test_timedelta_hash_ms(self, unit): + td = np.timedelta64(3, 'ms') + td2 = np.timedelta64(td, unit) + _assert_equal_hash(td, td2) + + td3 = np.timedelta64(int(td2.astype(int)) + 1, unit) + assert hash(td) != hash(td3) # doesn't collide + + @pytest.mark.parametrize('wk', range(500000, 500010)) + @pytest.mark.parametrize('unit', ('W', 'D', 'h', 'm', 's', 'ms', 'us')) + def test_timedelta_hash_big_positive(self, wk, unit): + td = np.timedelta64(wk, 'W') + td2 = np.timedelta64(td, unit) + _assert_equal_hash(td, td2) + + +class TestDateTimeData: + + def test_basic(self): + a = np.array(['1980-03-23'], dtype=np.datetime64) + assert_equal(np.datetime_data(a.dtype), ('D', 1)) + + def test_bytes(self): + # byte units are converted to unicode + dt = np.datetime64('2000', (b'ms', 5)) + assert np.datetime_data(dt.dtype) == ('ms', 5) + + dt = np.datetime64('2000', b'5ms') + assert np.datetime_data(dt.dtype) == ('ms', 5) + + def test_non_ascii(self): + # μs is normalized to μ + dt = np.datetime64('2000', ('μs', 5)) + assert np.datetime_data(dt.dtype) == ('us', 5) + + dt = np.datetime64('2000', '5μs') + assert np.datetime_data(dt.dtype) == ('us', 5) + + +def test_comparisons_return_not_implemented(): + # GH#17017 + + class custom: + __array_priority__ = 10000 + + obj = custom() + + dt = np.datetime64('2000', 'ns') + td = dt - dt + + for item in [dt, td]: + assert item.__eq__(obj) is NotImplemented + assert item.__ne__(obj) is NotImplemented + assert item.__le__(obj) is NotImplemented + assert item.__lt__(obj) is NotImplemented + assert item.__ge__(obj) is NotImplemented + assert item.__gt__(obj) is NotImplemented diff --git a/python/numpy/_core/tests/test_defchararray.py b/python/numpy/_core/tests/test_defchararray.py new file mode 100644 index 000000000..2607953a9 --- /dev/null +++ b/python/numpy/_core/tests/test_defchararray.py @@ -0,0 +1,825 @@ +import pytest + +import numpy as np +from numpy._core.multiarray import _vec_string +from numpy.testing import ( + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) + +kw_unicode_true = {'unicode': True} # make 2to3 work properly +kw_unicode_false = {'unicode': False} + +class TestBasic: + def test_from_object_array(self): + A = np.array([['abc', 2], + ['long ', '0123456789']], dtype='O') + B = np.char.array(A) + assert_equal(B.dtype.itemsize, 10) + assert_array_equal(B, [[b'abc', b'2'], + [b'long', b'0123456789']]) + + def test_from_object_array_unicode(self): + A = np.array([['abc', 'Sigma \u03a3'], + ['long ', '0123456789']], dtype='O') + assert_raises(ValueError, np.char.array, (A,)) + B = np.char.array(A, **kw_unicode_true) + assert_equal(B.dtype.itemsize, 10 * np.array('a', 'U').dtype.itemsize) + assert_array_equal(B, [['abc', 'Sigma \u03a3'], + ['long', '0123456789']]) + + def test_from_string_array(self): + A = np.array([[b'abc', b'foo'], + [b'long ', b'0123456789']]) + assert_equal(A.dtype.type, np.bytes_) + B = np.char.array(A) + assert_array_equal(B, A) + assert_equal(B.dtype, A.dtype) + assert_equal(B.shape, A.shape) + B[0, 0] = 'changed' + assert_(B[0, 0] != A[0, 0]) + C = np.char.asarray(A) + assert_array_equal(C, A) + assert_equal(C.dtype, A.dtype) + C[0, 0] = 'changed again' + assert_(C[0, 0] != B[0, 0]) + assert_(C[0, 0] == A[0, 0]) + + def test_from_unicode_array(self): + A = np.array([['abc', 'Sigma \u03a3'], + ['long ', '0123456789']]) + assert_equal(A.dtype.type, np.str_) + B = np.char.array(A) + assert_array_equal(B, A) + assert_equal(B.dtype, A.dtype) + assert_equal(B.shape, A.shape) + B = np.char.array(A, **kw_unicode_true) + assert_array_equal(B, A) + assert_equal(B.dtype, A.dtype) + assert_equal(B.shape, A.shape) + + def fail(): + np.char.array(A, **kw_unicode_false) + + assert_raises(UnicodeEncodeError, fail) + + def test_unicode_upconvert(self): + A = np.char.array(['abc']) + B = np.char.array(['\u03a3']) + assert_(issubclass((A + B).dtype.type, np.str_)) + + def test_from_string(self): + A = np.char.array(b'abc') + assert_equal(len(A), 1) + assert_equal(len(A[0]), 3) + assert_(issubclass(A.dtype.type, np.bytes_)) + + def test_from_unicode(self): + A = np.char.array('\u03a3') + assert_equal(len(A), 1) + assert_equal(len(A[0]), 1) + assert_equal(A.itemsize, 4) + assert_(issubclass(A.dtype.type, np.str_)) + +class TestVecString: + def test_non_existent_method(self): + + def fail(): + _vec_string('a', np.bytes_, 'bogus') + + assert_raises(AttributeError, fail) + + def test_non_string_array(self): + + def fail(): + _vec_string(1, np.bytes_, 'strip') + + assert_raises(TypeError, fail) + + def test_invalid_args_tuple(self): + + def fail(): + _vec_string(['a'], np.bytes_, 'strip', 1) + + assert_raises(TypeError, fail) + + def test_invalid_type_descr(self): + + def fail(): + _vec_string(['a'], 'BOGUS', 'strip') + + assert_raises(TypeError, fail) + + def test_invalid_function_args(self): + + def fail(): + _vec_string(['a'], np.bytes_, 'strip', (1,)) + + assert_raises(TypeError, fail) + + def test_invalid_result_type(self): + + def fail(): + _vec_string(['a'], np.int_, 'strip') + + assert_raises(TypeError, fail) + + def test_broadcast_error(self): + + def fail(): + _vec_string([['abc', 'def']], np.int_, 'find', (['a', 'd', 'j'],)) + + assert_raises(ValueError, fail) + + +class TestWhitespace: + def setup_method(self): + self.A = np.array([['abc ', '123 '], + ['789 ', 'xyz ']]).view(np.char.chararray) + self.B = np.array([['abc', '123'], + ['789', 'xyz']]).view(np.char.chararray) + + def test1(self): + assert_(np.all(self.A == self.B)) + assert_(np.all(self.A >= self.B)) + assert_(np.all(self.A <= self.B)) + assert_(not np.any(self.A > self.B)) + assert_(not np.any(self.A < self.B)) + assert_(not np.any(self.A != self.B)) + +class TestChar: + def setup_method(self): + self.A = np.array('abc1', dtype='c').view(np.char.chararray) + + def test_it(self): + assert_equal(self.A.shape, (4,)) + assert_equal(self.A.upper()[:2].tobytes(), b'AB') + +class TestComparisons: + def setup_method(self): + self.A = np.array([['abc', 'abcc', '123'], + ['789', 'abc', 'xyz']]).view(np.char.chararray) + self.B = np.array([['efg', 'efg', '123 '], + ['051', 'efgg', 'tuv']]).view(np.char.chararray) + + def test_not_equal(self): + assert_array_equal((self.A != self.B), + [[True, True, False], [True, True, True]]) + + def test_equal(self): + assert_array_equal((self.A == self.B), + [[False, False, True], [False, False, False]]) + + def test_greater_equal(self): + assert_array_equal((self.A >= self.B), + [[False, False, True], [True, False, True]]) + + def test_less_equal(self): + assert_array_equal((self.A <= self.B), + [[True, True, True], [False, True, False]]) + + def test_greater(self): + assert_array_equal((self.A > self.B), + [[False, False, False], [True, False, True]]) + + def test_less(self): + assert_array_equal((self.A < self.B), + [[True, True, False], [False, True, False]]) + + def test_type(self): + out1 = np.char.equal(self.A, self.B) + out2 = np.char.equal('a', 'a') + assert_(isinstance(out1, np.ndarray)) + assert_(isinstance(out2, np.ndarray)) + +class TestComparisonsMixed1(TestComparisons): + """Ticket #1276""" + + def setup_method(self): + TestComparisons.setup_method(self) + self.B = np.array( + [['efg', 'efg', '123 '], + ['051', 'efgg', 'tuv']], np.str_).view(np.char.chararray) + +class TestComparisonsMixed2(TestComparisons): + """Ticket #1276""" + + def setup_method(self): + TestComparisons.setup_method(self) + self.A = np.array( + [['abc', 'abcc', '123'], + ['789', 'abc', 'xyz']], np.str_).view(np.char.chararray) + +class TestInformation: + def setup_method(self): + self.A = np.array([[' abc ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]) \ + .view(np.char.chararray) + self.B = np.array([[' \u03a3 ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]) \ + .view(np.char.chararray) + # Array with longer strings, > MEMCHR_CUT_OFF in code. + self.C = (np.array(['ABCDEFGHIJKLMNOPQRSTUVWXYZ', + '01234567890123456789012345']) + .view(np.char.chararray)) + + def test_len(self): + assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer)) + assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]]) + assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]]) + + def test_count(self): + assert_(issubclass(self.A.count('').dtype.type, np.integer)) + assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]]) + assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]]) + # Python doesn't seem to like counting NULL characters + # assert_array_equal(self.A.count('\0'), [[0, 0], [0, 0], [1, 0]]) + assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]]) + assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]]) + assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]]) + # assert_array_equal(self.B.count('\0'), [[0, 0], [0, 0], [1, 0]]) + + def test_endswith(self): + assert_(issubclass(self.A.endswith('').dtype.type, np.bool)) + assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]]) + assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]]) + + def fail(): + self.A.endswith('3', 'fdjk') + + assert_raises(TypeError, fail) + + @pytest.mark.parametrize( + "dtype, encode", + [("U", str), + ("S", lambda x: x.encode('ascii')), + ]) + def test_find(self, dtype, encode): + A = self.A.astype(dtype) + assert_(issubclass(A.find(encode('a')).dtype.type, np.integer)) + assert_array_equal(A.find(encode('a')), + [[1, -1], [-1, 6], [-1, -1]]) + assert_array_equal(A.find(encode('3')), + [[-1, -1], [2, -1], [2, -1]]) + assert_array_equal(A.find(encode('a'), 0, 2), + [[1, -1], [-1, -1], [-1, -1]]) + assert_array_equal(A.find([encode('1'), encode('P')]), + [[-1, -1], [0, -1], [0, 1]]) + C = self.C.astype(dtype) + assert_array_equal(C.find(encode('M')), [12, -1]) + + def test_index(self): + + def fail(): + self.A.index('a') + + assert_raises(ValueError, fail) + assert_(np.char.index('abcba', 'b') == 1) + assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer)) + + def test_isalnum(self): + assert_(issubclass(self.A.isalnum().dtype.type, np.bool)) + assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]]) + + def test_isalpha(self): + assert_(issubclass(self.A.isalpha().dtype.type, np.bool)) + assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]]) + + def test_isdigit(self): + assert_(issubclass(self.A.isdigit().dtype.type, np.bool)) + assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]]) + + def test_islower(self): + assert_(issubclass(self.A.islower().dtype.type, np.bool)) + assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]]) + + def test_isspace(self): + assert_(issubclass(self.A.isspace().dtype.type, np.bool)) + assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]]) + + def test_istitle(self): + assert_(issubclass(self.A.istitle().dtype.type, np.bool)) + assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]]) + + def test_isupper(self): + assert_(issubclass(self.A.isupper().dtype.type, np.bool)) + assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]]) + + def test_rfind(self): + assert_(issubclass(self.A.rfind('a').dtype.type, np.integer)) + assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]]) + assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]]) + assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]]) + assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]]) + + def test_rindex(self): + + def fail(): + self.A.rindex('a') + + assert_raises(ValueError, fail) + assert_(np.char.rindex('abcba', 'b') == 3) + assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer)) + + def test_startswith(self): + assert_(issubclass(self.A.startswith('').dtype.type, np.bool)) + assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]]) + assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]]) + + def fail(): + self.A.startswith('3', 'fdjk') + + assert_raises(TypeError, fail) + + +class TestMethods: + def setup_method(self): + self.A = np.array([[' abc ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']], + dtype='S').view(np.char.chararray) + self.B = np.array([[' \u03a3 ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']]).view( + np.char.chararray) + + def test_capitalize(self): + tgt = [[b' abc ', b''], + [b'12345', b'Mixedcase'], + [b'123 \t 345 \0 ', b'Upper']] + assert_(issubclass(self.A.capitalize().dtype.type, np.bytes_)) + assert_array_equal(self.A.capitalize(), tgt) + + tgt = [[' \u03c3 ', ''], + ['12345', 'Mixedcase'], + ['123 \t 345 \0 ', 'Upper']] + assert_(issubclass(self.B.capitalize().dtype.type, np.str_)) + assert_array_equal(self.B.capitalize(), tgt) + + def test_center(self): + assert_(issubclass(self.A.center(10).dtype.type, np.bytes_)) + C = self.A.center([10, 20]) + assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) + + C = self.A.center(20, b'#') + assert_(np.all(C.startswith(b'#'))) + assert_(np.all(C.endswith(b'#'))) + + C = np.char.center(b'FOO', [[10, 20], [15, 8]]) + tgt = [[b' FOO ', b' FOO '], + [b' FOO ', b' FOO ']] + assert_(issubclass(C.dtype.type, np.bytes_)) + assert_array_equal(C, tgt) + + def test_decode(self): + A = np.char.array([b'\\u03a3']) + assert_(A.decode('unicode-escape')[0] == '\u03a3') + + def test_encode(self): + B = self.B.encode('unicode_escape') + assert_(B[0][0] == ' \\u03a3 '.encode('latin1')) + + def test_expandtabs(self): + T = self.A.expandtabs() + assert_(T[2, 0] == b'123 345 \0') + + def test_join(self): + # NOTE: list(b'123') == [49, 50, 51] + # so that b','.join(b'123') results to an error on Py3 + A0 = self.A.decode('ascii') + + A = np.char.join([',', '#'], A0) + assert_(issubclass(A.dtype.type, np.str_)) + tgt = np.array([[' ,a,b,c, ', ''], + ['1,2,3,4,5', 'M#i#x#e#d#C#a#s#e'], + ['1,2,3, ,\t, ,3,4,5, ,\x00, ', 'U#P#P#E#R']]) + assert_array_equal(np.char.join([',', '#'], A0), tgt) + + def test_ljust(self): + assert_(issubclass(self.A.ljust(10).dtype.type, np.bytes_)) + + C = self.A.ljust([10, 20]) + assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) + + C = self.A.ljust(20, b'#') + assert_array_equal(C.startswith(b'#'), [ + [False, True], [False, False], [False, False]]) + assert_(np.all(C.endswith(b'#'))) + + C = np.char.ljust(b'FOO', [[10, 20], [15, 8]]) + tgt = [[b'FOO ', b'FOO '], + [b'FOO ', b'FOO ']] + assert_(issubclass(C.dtype.type, np.bytes_)) + assert_array_equal(C, tgt) + + def test_lower(self): + tgt = [[b' abc ', b''], + [b'12345', b'mixedcase'], + [b'123 \t 345 \0 ', b'upper']] + assert_(issubclass(self.A.lower().dtype.type, np.bytes_)) + assert_array_equal(self.A.lower(), tgt) + + tgt = [[' \u03c3 ', ''], + ['12345', 'mixedcase'], + ['123 \t 345 \0 ', 'upper']] + assert_(issubclass(self.B.lower().dtype.type, np.str_)) + assert_array_equal(self.B.lower(), tgt) + + def test_lstrip(self): + tgt = [[b'abc ', b''], + [b'12345', b'MixedCase'], + [b'123 \t 345 \0 ', b'UPPER']] + assert_(issubclass(self.A.lstrip().dtype.type, np.bytes_)) + assert_array_equal(self.A.lstrip(), tgt) + + tgt = [[b' abc', b''], + [b'2345', b'ixedCase'], + [b'23 \t 345 \x00', b'UPPER']] + assert_array_equal(self.A.lstrip([b'1', b'M']), tgt) + + tgt = [['\u03a3 ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']] + assert_(issubclass(self.B.lstrip().dtype.type, np.str_)) + assert_array_equal(self.B.lstrip(), tgt) + + def test_partition(self): + P = self.A.partition([b'3', b'M']) + tgt = [[(b' abc ', b'', b''), (b'', b'', b'')], + [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')], + [(b'12', b'3', b' \t 345 \0 '), (b'UPPER', b'', b'')]] + assert_(issubclass(P.dtype.type, np.bytes_)) + assert_array_equal(P, tgt) + + def test_replace(self): + R = self.A.replace([b'3', b'a'], + [b'##########', b'@']) + tgt = [[b' abc ', b''], + [b'12##########45', b'MixedC@se'], + [b'12########## \t ##########45 \x00 ', b'UPPER']] + assert_(issubclass(R.dtype.type, np.bytes_)) + assert_array_equal(R, tgt) + # Test special cases that should just return the input array, + # since replacements are not possible or do nothing. + S1 = self.A.replace(b'A very long byte string, longer than A', b'') + assert_array_equal(S1, self.A) + S2 = self.A.replace(b'', b'') + assert_array_equal(S2, self.A) + S3 = self.A.replace(b'3', b'3') + assert_array_equal(S3, self.A) + S4 = self.A.replace(b'3', b'', count=0) + assert_array_equal(S4, self.A) + + def test_replace_count_and_size(self): + a = np.array(['0123456789' * i for i in range(4)] + ).view(np.char.chararray) + r1 = a.replace('5', 'ABCDE') + assert r1.dtype.itemsize == (3 * 10 + 3 * 4) * 4 + assert_array_equal(r1, np.array(['01234ABCDE6789' * i + for i in range(4)])) + r2 = a.replace('5', 'ABCDE', count=1) + assert r2.dtype.itemsize == (3 * 10 + 4) * 4 + r3 = a.replace('5', 'ABCDE', count=0) + assert r3.dtype.itemsize == a.dtype.itemsize + assert_array_equal(r3, a) + # Negative values mean to replace all. + r4 = a.replace('5', 'ABCDE', count=-1) + assert r4.dtype.itemsize == (3 * 10 + 3 * 4) * 4 + assert_array_equal(r4, r1) + # We can do count on an element-by-element basis. + r5 = a.replace('5', 'ABCDE', count=[-1, -1, -1, 1]) + assert r5.dtype.itemsize == (3 * 10 + 4) * 4 + assert_array_equal(r5, np.array( + ['01234ABCDE6789' * i for i in range(3)] + + ['01234ABCDE6789' + '0123456789' * 2])) + + def test_replace_broadcasting(self): + a = np.array('0,0,0').view(np.char.chararray) + r1 = a.replace('0', '1', count=np.arange(3)) + assert r1.dtype == a.dtype + assert_array_equal(r1, np.array(['0,0,0', '1,0,0', '1,1,0'])) + r2 = a.replace('0', [['1'], ['2']], count=np.arange(1, 4)) + assert_array_equal(r2, np.array([['1,0,0', '1,1,0', '1,1,1'], + ['2,0,0', '2,2,0', '2,2,2']])) + r3 = a.replace(['0', '0,0', '0,0,0'], 'X') + assert_array_equal(r3, np.array(['X,X,X', 'X,0', 'X'])) + + def test_rjust(self): + assert_(issubclass(self.A.rjust(10).dtype.type, np.bytes_)) + + C = self.A.rjust([10, 20]) + assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]]) + + C = self.A.rjust(20, b'#') + assert_(np.all(C.startswith(b'#'))) + assert_array_equal(C.endswith(b'#'), + [[False, True], [False, False], [False, False]]) + + C = np.char.rjust(b'FOO', [[10, 20], [15, 8]]) + tgt = [[b' FOO', b' FOO'], + [b' FOO', b' FOO']] + assert_(issubclass(C.dtype.type, np.bytes_)) + assert_array_equal(C, tgt) + + def test_rpartition(self): + P = self.A.rpartition([b'3', b'M']) + tgt = [[(b'', b'', b' abc '), (b'', b'', b'')], + [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')], + [(b'123 \t ', b'3', b'45 \0 '), (b'', b'', b'UPPER')]] + assert_(issubclass(P.dtype.type, np.bytes_)) + assert_array_equal(P, tgt) + + def test_rsplit(self): + A = self.A.rsplit(b'3') + tgt = [[[b' abc '], [b'']], + [[b'12', b'45'], [b'MixedCase']], + [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]] + assert_(issubclass(A.dtype.type, np.object_)) + assert_equal(A.tolist(), tgt) + + def test_rstrip(self): + assert_(issubclass(self.A.rstrip().dtype.type, np.bytes_)) + + tgt = [[b' abc', b''], + [b'12345', b'MixedCase'], + [b'123 \t 345', b'UPPER']] + assert_array_equal(self.A.rstrip(), tgt) + + tgt = [[b' abc ', b''], + [b'1234', b'MixedCase'], + [b'123 \t 345 \x00', b'UPP'] + ] + assert_array_equal(self.A.rstrip([b'5', b'ER']), tgt) + + tgt = [[' \u03a3', ''], + ['12345', 'MixedCase'], + ['123 \t 345', 'UPPER']] + assert_(issubclass(self.B.rstrip().dtype.type, np.str_)) + assert_array_equal(self.B.rstrip(), tgt) + + def test_strip(self): + tgt = [[b'abc', b''], + [b'12345', b'MixedCase'], + [b'123 \t 345', b'UPPER']] + assert_(issubclass(self.A.strip().dtype.type, np.bytes_)) + assert_array_equal(self.A.strip(), tgt) + + tgt = [[b' abc ', b''], + [b'234', b'ixedCas'], + [b'23 \t 345 \x00', b'UPP']] + assert_array_equal(self.A.strip([b'15', b'EReM']), tgt) + + tgt = [['\u03a3', ''], + ['12345', 'MixedCase'], + ['123 \t 345', 'UPPER']] + assert_(issubclass(self.B.strip().dtype.type, np.str_)) + assert_array_equal(self.B.strip(), tgt) + + def test_split(self): + A = self.A.split(b'3') + tgt = [ + [[b' abc '], [b'']], + [[b'12', b'45'], [b'MixedCase']], + [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]] + assert_(issubclass(A.dtype.type, np.object_)) + assert_equal(A.tolist(), tgt) + + def test_splitlines(self): + A = np.char.array(['abc\nfds\nwer']).splitlines() + assert_(issubclass(A.dtype.type, np.object_)) + assert_(A.shape == (1,)) + assert_(len(A[0]) == 3) + + def test_swapcase(self): + tgt = [[b' ABC ', b''], + [b'12345', b'mIXEDcASE'], + [b'123 \t 345 \0 ', b'upper']] + assert_(issubclass(self.A.swapcase().dtype.type, np.bytes_)) + assert_array_equal(self.A.swapcase(), tgt) + + tgt = [[' \u03c3 ', ''], + ['12345', 'mIXEDcASE'], + ['123 \t 345 \0 ', 'upper']] + assert_(issubclass(self.B.swapcase().dtype.type, np.str_)) + assert_array_equal(self.B.swapcase(), tgt) + + def test_title(self): + tgt = [[b' Abc ', b''], + [b'12345', b'Mixedcase'], + [b'123 \t 345 \0 ', b'Upper']] + assert_(issubclass(self.A.title().dtype.type, np.bytes_)) + assert_array_equal(self.A.title(), tgt) + + tgt = [[' \u03a3 ', ''], + ['12345', 'Mixedcase'], + ['123 \t 345 \0 ', 'Upper']] + assert_(issubclass(self.B.title().dtype.type, np.str_)) + assert_array_equal(self.B.title(), tgt) + + def test_upper(self): + tgt = [[b' ABC ', b''], + [b'12345', b'MIXEDCASE'], + [b'123 \t 345 \0 ', b'UPPER']] + assert_(issubclass(self.A.upper().dtype.type, np.bytes_)) + assert_array_equal(self.A.upper(), tgt) + + tgt = [[' \u03a3 ', ''], + ['12345', 'MIXEDCASE'], + ['123 \t 345 \0 ', 'UPPER']] + assert_(issubclass(self.B.upper().dtype.type, np.str_)) + assert_array_equal(self.B.upper(), tgt) + + def test_isnumeric(self): + + def fail(): + self.A.isnumeric() + + assert_raises(TypeError, fail) + assert_(issubclass(self.B.isnumeric().dtype.type, np.bool)) + assert_array_equal(self.B.isnumeric(), [ + [False, False], [True, False], [False, False]]) + + def test_isdecimal(self): + + def fail(): + self.A.isdecimal() + + assert_raises(TypeError, fail) + assert_(issubclass(self.B.isdecimal().dtype.type, np.bool)) + assert_array_equal(self.B.isdecimal(), [ + [False, False], [True, False], [False, False]]) + + +class TestOperations: + def setup_method(self): + self.A = np.array([['abc', '123'], + ['789', 'xyz']]).view(np.char.chararray) + self.B = np.array([['efg', '456'], + ['051', 'tuv']]).view(np.char.chararray) + + def test_add(self): + AB = np.array([['abcefg', '123456'], + ['789051', 'xyztuv']]).view(np.char.chararray) + assert_array_equal(AB, (self.A + self.B)) + assert_(len((self.A + self.B)[0][0]) == 6) + + def test_radd(self): + QA = np.array([['qabc', 'q123'], + ['q789', 'qxyz']]).view(np.char.chararray) + assert_array_equal(QA, ('q' + self.A)) + + def test_mul(self): + A = self.A + for r in (2, 3, 5, 7, 197): + Ar = np.array([[A[0, 0] * r, A[0, 1] * r], + [A[1, 0] * r, A[1, 1] * r]]).view(np.char.chararray) + + assert_array_equal(Ar, (self.A * r)) + + for ob in [object(), 'qrs']: + with assert_raises_regex(ValueError, + 'Can only multiply by integers'): + A * ob + + def test_rmul(self): + A = self.A + for r in (2, 3, 5, 7, 197): + Ar = np.array([[A[0, 0] * r, A[0, 1] * r], + [A[1, 0] * r, A[1, 1] * r]]).view(np.char.chararray) + assert_array_equal(Ar, (r * self.A)) + + for ob in [object(), 'qrs']: + with assert_raises_regex(ValueError, + 'Can only multiply by integers'): + ob * A + + def test_mod(self): + """Ticket #856""" + F = np.array([['%d', '%f'], ['%s', '%r']]).view(np.char.chararray) + C = np.array([[3, 7], [19, 1]], dtype=np.int64) + FC = np.array([['3', '7.000000'], + ['19', 'np.int64(1)']]).view(np.char.chararray) + assert_array_equal(FC, F % C) + + A = np.array([['%.3f', '%d'], ['%s', '%r']]).view(np.char.chararray) + A1 = np.array([['1.000', '1'], + ['1', repr(np.array(1)[()])]]).view(np.char.chararray) + assert_array_equal(A1, (A % 1)) + + A2 = np.array([['1.000', '2'], + ['3', repr(np.array(4)[()])]]).view(np.char.chararray) + assert_array_equal(A2, (A % [[1, 2], [3, 4]])) + + def test_rmod(self): + assert_(f"{self.A}" == str(self.A)) + assert_(f"{self.A!r}" == repr(self.A)) + + for ob in [42, object()]: + with assert_raises_regex( + TypeError, "unsupported operand type.* and 'chararray'"): + ob % self.A + + def test_slice(self): + """Regression test for https://github.com/numpy/numpy/issues/5982""" + + arr = np.array([['abc ', 'def '], ['geh ', 'ijk ']], + dtype='S4').view(np.char.chararray) + sl1 = arr[:] + assert_array_equal(sl1, arr) + assert_(sl1.base is arr) + assert_(sl1.base.base is arr.base) + + sl2 = arr[:, :] + assert_array_equal(sl2, arr) + assert_(sl2.base is arr) + assert_(sl2.base.base is arr.base) + + assert_(arr[0, 0] == b'abc') + + @pytest.mark.parametrize('data', [['plate', ' ', 'shrimp'], + [b'retro', b' ', b'encabulator']]) + def test_getitem_length_zero_item(self, data): + # Regression test for gh-26375. + a = np.char.array(data) + # a.dtype.type() will be an empty string or bytes instance. + # The equality test will fail if a[1] has the wrong type + # or does not have length 0. + assert_equal(a[1], a.dtype.type()) + + +class TestMethodsEmptyArray: + def setup_method(self): + self.U = np.array([], dtype='U') + self.S = np.array([], dtype='S') + + def test_encode(self): + res = np.char.encode(self.U) + assert_array_equal(res, []) + assert_(res.dtype.char == 'S') + + def test_decode(self): + res = np.char.decode(self.S) + assert_array_equal(res, []) + assert_(res.dtype.char == 'U') + + def test_decode_with_reshape(self): + res = np.char.decode(self.S.reshape((1, 0, 1))) + assert_(res.shape == (1, 0, 1)) + + +class TestMethodsScalarValues: + def test_mod(self): + A = np.array([[' abc ', ''], + ['12345', 'MixedCase'], + ['123 \t 345 \0 ', 'UPPER']], dtype='S') + tgt = [[b'123 abc ', b'123'], + [b'12312345', b'123MixedCase'], + [b'123123 \t 345 \0 ', b'123UPPER']] + assert_array_equal(np.char.mod(b"123%s", A), tgt) + + def test_decode(self): + bytestring = b'\x81\xc1\x81\xc1\x81\xc1' + assert_equal(np.char.decode(bytestring, encoding='cp037'), + 'aAaAaA') + + def test_encode(self): + unicode = 'aAaAaA' + assert_equal(np.char.encode(unicode, encoding='cp037'), + b'\x81\xc1\x81\xc1\x81\xc1') + + def test_expandtabs(self): + s = "\tone level of indentation\n\t\ttwo levels of indentation" + assert_equal( + np.char.expandtabs(s, tabsize=2), + " one level of indentation\n two levels of indentation" + ) + + def test_join(self): + seps = np.array(['-', '_']) + assert_array_equal(np.char.join(seps, 'hello'), + ['h-e-l-l-o', 'h_e_l_l_o']) + + def test_partition(self): + assert_equal(np.char.partition('This string', ' '), + ['This', ' ', 'string']) + + def test_rpartition(self): + assert_equal(np.char.rpartition('This string here', ' '), + ['This string', ' ', 'here']) + + def test_replace(self): + assert_equal(np.char.replace('Python is good', 'good', 'great'), + 'Python is great') + + +def test_empty_indexing(): + """Regression test for ticket 1948.""" + # Check that indexing a chararray with an empty list/array returns an + # empty chararray instead of a chararray with a single empty string in it. + s = np.char.chararray((4,)) + assert_(s[[]].size == 0) diff --git a/python/numpy/_core/tests/test_deprecations.py b/python/numpy/_core/tests/test_deprecations.py new file mode 100644 index 000000000..d90c15565 --- /dev/null +++ b/python/numpy/_core/tests/test_deprecations.py @@ -0,0 +1,454 @@ +""" +Tests related to deprecation warnings. Also a convenient place +to document how deprecations should eventually be turned into errors. + +""" +import contextlib +import warnings + +import numpy._core._struct_ufunc_tests as struct_ufunc +import pytest +from numpy._core._multiarray_tests import fromstring_null_term_c_api # noqa: F401 + +import numpy as np +from numpy.testing import assert_raises, temppath + +try: + import pytz # noqa: F401 + _has_pytz = True +except ImportError: + _has_pytz = False + + +class _DeprecationTestCase: + # Just as warning: warnings uses re.match, so the start of this message + # must match. + message = '' + warning_cls = DeprecationWarning + + def setup_method(self): + self.warn_ctx = warnings.catch_warnings(record=True) + self.log = self.warn_ctx.__enter__() + + # Do *not* ignore other DeprecationWarnings. Ignoring warnings + # can give very confusing results because of + # https://bugs.python.org/issue4180 and it is probably simplest to + # try to keep the tests cleanly giving only the right warning type. + # (While checking them set to "error" those are ignored anyway) + # We still have them show up, because otherwise they would be raised + warnings.filterwarnings("always", category=self.warning_cls) + warnings.filterwarnings("always", message=self.message, + category=self.warning_cls) + + def teardown_method(self): + self.warn_ctx.__exit__() + + def assert_deprecated(self, function, num=1, ignore_others=False, + function_fails=False, + exceptions=np._NoValue, + args=(), kwargs={}): + """Test if DeprecationWarnings are given and raised. + + This first checks if the function when called gives `num` + DeprecationWarnings, after that it tries to raise these + DeprecationWarnings and compares them with `exceptions`. + The exceptions can be different for cases where this code path + is simply not anticipated and the exception is replaced. + + Parameters + ---------- + function : callable + The function to test + num : int + Number of DeprecationWarnings to expect. This should normally be 1. + ignore_others : bool + Whether warnings of the wrong type should be ignored (note that + the message is not checked) + function_fails : bool + If the function would normally fail, setting this will check for + warnings inside a try/except block. + exceptions : Exception or tuple of Exceptions + Exception to expect when turning the warnings into an error. + The default checks for DeprecationWarnings. If exceptions is + empty the function is expected to run successfully. + args : tuple + Arguments for `function` + kwargs : dict + Keyword arguments for `function` + """ + __tracebackhide__ = True # Hide traceback for py.test + + # reset the log + self.log[:] = [] + + if exceptions is np._NoValue: + exceptions = (self.warning_cls,) + + if function_fails: + context_manager = contextlib.suppress(Exception) + else: + context_manager = contextlib.nullcontext() + with context_manager: + function(*args, **kwargs) + + # just in case, clear the registry + num_found = 0 + for warning in self.log: + if warning.category is self.warning_cls: + num_found += 1 + elif not ignore_others: + raise AssertionError( + "expected %s but got: %s" % + (self.warning_cls.__name__, warning.category)) + if num is not None and num_found != num: + msg = f"{len(self.log)} warnings found but {num} expected." + lst = [str(w) for w in self.log] + raise AssertionError("\n".join([msg] + lst)) + + with warnings.catch_warnings(): + warnings.filterwarnings("error", message=self.message, + category=self.warning_cls) + try: + function(*args, **kwargs) + if exceptions != (): + raise AssertionError( + "No error raised during function call") + except exceptions: + if exceptions == (): + raise AssertionError( + "Error raised during function call") + + def assert_not_deprecated(self, function, args=(), kwargs={}): + """Test that warnings are not raised. + + This is just a shorthand for: + + self.assert_deprecated(function, num=0, ignore_others=True, + exceptions=tuple(), args=args, kwargs=kwargs) + """ + self.assert_deprecated(function, num=0, ignore_others=True, + exceptions=(), args=args, kwargs=kwargs) + + +class _VisibleDeprecationTestCase(_DeprecationTestCase): + warning_cls = np.exceptions.VisibleDeprecationWarning + + +class TestTestDeprecated: + def test_assert_deprecated(self): + test_case_instance = _DeprecationTestCase() + test_case_instance.setup_method() + assert_raises(AssertionError, + test_case_instance.assert_deprecated, + lambda: None) + + def foo(): + warnings.warn("foo", category=DeprecationWarning, stacklevel=2) + + test_case_instance.assert_deprecated(foo) + test_case_instance.teardown_method() + + +class TestBincount(_DeprecationTestCase): + # 2024-07-29, 2.1.0 + @pytest.mark.parametrize('badlist', [[0.5, 1.2, 1.5], + ['0', '1', '1']]) + def test_bincount_bad_list(self, badlist): + self.assert_deprecated(lambda: np.bincount(badlist)) + + +class TestGeneratorSum(_DeprecationTestCase): + # 2018-02-25, 1.15.0 + def test_generator_sum(self): + self.assert_deprecated(np.sum, args=((i for i in range(5)),)) + + +class BuiltInRoundComplexDType(_DeprecationTestCase): + # 2020-03-31 1.19.0 + deprecated_types = [np.csingle, np.cdouble, np.clongdouble] + not_deprecated_types = [ + np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64, + np.float16, np.float32, np.float64, + ] + + def test_deprecated(self): + for scalar_type in self.deprecated_types: + scalar = scalar_type(0) + self.assert_deprecated(round, args=(scalar,)) + self.assert_deprecated(round, args=(scalar, 0)) + self.assert_deprecated(round, args=(scalar,), kwargs={'ndigits': 0}) + + def test_not_deprecated(self): + for scalar_type in self.not_deprecated_types: + scalar = scalar_type(0) + self.assert_not_deprecated(round, args=(scalar,)) + self.assert_not_deprecated(round, args=(scalar, 0)) + self.assert_not_deprecated(round, args=(scalar,), kwargs={'ndigits': 0}) + + +class FlatteningConcatenateUnsafeCast(_DeprecationTestCase): + # NumPy 1.20, 2020-09-03 + message = "concatenate with `axis=None` will use same-kind casting" + + def test_deprecated(self): + self.assert_deprecated(np.concatenate, + args=(([0.], [1.]),), + kwargs={'axis': None, 'out': np.empty(2, dtype=np.int64)}) + + def test_not_deprecated(self): + self.assert_not_deprecated(np.concatenate, + args=(([0.], [1.]),), + kwargs={'axis': None, 'out': np.empty(2, dtype=np.int64), + 'casting': "unsafe"}) + + with assert_raises(TypeError): + # Tests should notice if the deprecation warning is given first... + np.concatenate(([0.], [1.]), out=np.empty(2, dtype=np.int64), + casting="same_kind") + + +class TestCtypesGetter(_DeprecationTestCase): + # Deprecated 2021-05-18, Numpy 1.21.0 + warning_cls = DeprecationWarning + ctypes = np.array([1]).ctypes + + @pytest.mark.parametrize( + "name", ["get_data", "get_shape", "get_strides", "get_as_parameter"] + ) + def test_deprecated(self, name: str) -> None: + func = getattr(self.ctypes, name) + self.assert_deprecated(func) + + @pytest.mark.parametrize( + "name", ["data", "shape", "strides", "_as_parameter_"] + ) + def test_not_deprecated(self, name: str) -> None: + self.assert_not_deprecated(lambda: getattr(self.ctypes, name)) + + +class TestMachAr(_DeprecationTestCase): + # Deprecated 2022-11-22, NumPy 1.25 + warning_cls = DeprecationWarning + + def test_deprecated_module(self): + self.assert_deprecated(lambda: np._core.MachAr) + + +class TestQuantileInterpolationDeprecation(_DeprecationTestCase): + # Deprecated 2021-11-08, NumPy 1.22 + @pytest.mark.parametrize("func", + [np.percentile, np.quantile, np.nanpercentile, np.nanquantile]) + def test_deprecated(self, func): + self.assert_deprecated( + lambda: func([0., 1.], 0., interpolation="linear")) + self.assert_deprecated( + lambda: func([0., 1.], 0., interpolation="nearest")) + + @pytest.mark.parametrize("func", + [np.percentile, np.quantile, np.nanpercentile, np.nanquantile]) + def test_both_passed(self, func): + with warnings.catch_warnings(): + # catch the DeprecationWarning so that it does not raise: + warnings.simplefilter("always", DeprecationWarning) + with pytest.raises(TypeError): + func([0., 1.], 0., interpolation="nearest", method="nearest") + + +class TestScalarConversion(_DeprecationTestCase): + # 2023-01-02, 1.25.0 + def test_float_conversion(self): + self.assert_deprecated(float, args=(np.array([3.14]),)) + + def test_behaviour(self): + b = np.array([[3.14]]) + c = np.zeros(5) + with pytest.warns(DeprecationWarning): + c[0] = b + + +class TestPyIntConversion(_DeprecationTestCase): + message = r".*stop allowing conversion of out-of-bound.*" + + @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) + def test_deprecated_scalar(self, dtype): + dtype = np.dtype(dtype) + info = np.iinfo(dtype) + + # Cover the most common creation paths (all end up in the + # same place): + def scalar(value, dtype): + dtype.type(value) + + def assign(value, dtype): + arr = np.array([0, 0, 0], dtype=dtype) + arr[2] = value + + def create(value, dtype): + np.array([value], dtype=dtype) + + for creation_func in [scalar, assign, create]: + try: + self.assert_deprecated( + lambda: creation_func(info.min - 1, dtype)) + except OverflowError: + pass # OverflowErrors always happened also before and are OK. + + try: + self.assert_deprecated( + lambda: creation_func(info.max + 1, dtype)) + except OverflowError: + pass # OverflowErrors always happened also before and are OK. + + +@pytest.mark.parametrize("name", ["str", "bytes", "object"]) +def test_future_scalar_attributes(name): + # FutureWarning added 2022-11-17, NumPy 1.24, + assert name not in dir(np) # we may want to not add them + with pytest.warns(FutureWarning, + match=f"In the future .*{name}"): + assert not hasattr(np, name) + + # Unfortunately, they are currently still valid via `np.dtype()` + np.dtype(name) + name in np._core.sctypeDict + + +# Ignore the above future attribute warning for this test. +@pytest.mark.filterwarnings("ignore:In the future:FutureWarning") +class TestRemovedGlobals: + # Removed 2023-01-12, NumPy 1.24.0 + # Not a deprecation, but the large error was added to aid those who missed + # the previous deprecation, and should be removed similarly to one + # (or faster). + @pytest.mark.parametrize("name", + ["object", "float", "complex", "str", "int"]) + def test_attributeerror_includes_info(self, name): + msg = f".*\n`np.{name}` was a deprecated alias for the builtin" + with pytest.raises(AttributeError, match=msg): + getattr(np, name) + + +class TestDeprecatedFinfo(_DeprecationTestCase): + # Deprecated in NumPy 1.25, 2023-01-16 + def test_deprecated_none(self): + self.assert_deprecated(np.finfo, args=(None,)) + + +class TestMathAlias(_DeprecationTestCase): + def test_deprecated_np_lib_math(self): + self.assert_deprecated(lambda: np.lib.math) + + +class TestLibImports(_DeprecationTestCase): + # Deprecated in Numpy 1.26.0, 2023-09 + def test_lib_functions_deprecation_call(self): + from numpy import in1d, row_stack, trapz + from numpy._core.numerictypes import maximum_sctype + from numpy.lib._function_base_impl import disp + from numpy.lib._npyio_impl import recfromcsv, recfromtxt + from numpy.lib._shape_base_impl import get_array_wrap + from numpy.lib._utils_impl import safe_eval + from numpy.lib.tests.test_io import TextIO + + self.assert_deprecated(lambda: safe_eval("None")) + + data_gen = lambda: TextIO('A,B\n0,1\n2,3') + kwargs = {'delimiter': ",", 'missing_values': "N/A", 'names': True} + self.assert_deprecated(lambda: recfromcsv(data_gen())) + self.assert_deprecated(lambda: recfromtxt(data_gen(), **kwargs)) + + self.assert_deprecated(lambda: disp("test")) + self.assert_deprecated(get_array_wrap) + self.assert_deprecated(lambda: maximum_sctype(int)) + + self.assert_deprecated(lambda: in1d([1], [1])) + self.assert_deprecated(lambda: row_stack([[]])) + self.assert_deprecated(lambda: trapz([1], [1])) + self.assert_deprecated(lambda: np.chararray) + + +class TestDeprecatedDTypeAliases(_DeprecationTestCase): + + def _check_for_warning(self, func): + with warnings.catch_warnings(record=True) as caught_warnings: + func() + assert len(caught_warnings) == 1 + w = caught_warnings[0] + assert w.category is DeprecationWarning + assert "alias 'a' was deprecated in NumPy 2.0" in str(w.message) + + def test_a_dtype_alias(self): + for dtype in ["a", "a10"]: + f = lambda: np.dtype(dtype) + self._check_for_warning(f) + self.assert_deprecated(f) + f = lambda: np.array(["hello", "world"]).astype("a10") + self._check_for_warning(f) + self.assert_deprecated(f) + + +class TestDeprecatedArrayWrap(_DeprecationTestCase): + message = "__array_wrap__.*" + + def test_deprecated(self): + class Test1: + def __array__(self, dtype=None, copy=None): + return np.arange(4) + + def __array_wrap__(self, arr, context=None): + self.called = True + return 'pass context' + + class Test2(Test1): + def __array_wrap__(self, arr): + self.called = True + return 'pass' + + test1 = Test1() + test2 = Test2() + self.assert_deprecated(lambda: np.negative(test1)) + assert test1.called + self.assert_deprecated(lambda: np.negative(test2)) + assert test2.called + + +class TestDeprecatedDTypeParenthesizedRepeatCount(_DeprecationTestCase): + message = "Passing in a parenthesized single number" + + @pytest.mark.parametrize("string", ["(2)i,", "(3)3S,", "f,(2)f"]) + def test_parenthesized_repeat_count(self, string): + self.assert_deprecated(np.dtype, args=(string,)) + + +class TestDeprecatedSaveFixImports(_DeprecationTestCase): + # Deprecated in Numpy 2.1, 2024-05 + message = "The 'fix_imports' flag is deprecated and has no effect." + + def test_deprecated(self): + with temppath(suffix='.npy') as path: + sample_args = (path, np.array(np.zeros((1024, 10)))) + self.assert_not_deprecated(np.save, args=sample_args) + self.assert_deprecated(np.save, args=sample_args, + kwargs={'fix_imports': True}) + self.assert_deprecated(np.save, args=sample_args, + kwargs={'fix_imports': False}) + for allow_pickle in [True, False]: + self.assert_not_deprecated(np.save, args=sample_args, + kwargs={'allow_pickle': allow_pickle}) + self.assert_deprecated(np.save, args=sample_args, + kwargs={'allow_pickle': allow_pickle, + 'fix_imports': True}) + self.assert_deprecated(np.save, args=sample_args, + kwargs={'allow_pickle': allow_pickle, + 'fix_imports': False}) + + +class TestAddNewdocUFunc(_DeprecationTestCase): + # Deprecated in Numpy 2.2, 2024-11 + def test_deprecated(self): + self.assert_deprecated( + lambda: np._core.umath._add_newdoc_ufunc( + struct_ufunc.add_triplet, "new docs" + ) + ) diff --git a/python/numpy/_core/tests/test_dlpack.py b/python/numpy/_core/tests/test_dlpack.py new file mode 100644 index 000000000..89c24032b --- /dev/null +++ b/python/numpy/_core/tests/test_dlpack.py @@ -0,0 +1,190 @@ +import sys + +import pytest + +import numpy as np +from numpy.testing import IS_PYPY, assert_array_equal + + +def new_and_old_dlpack(): + yield np.arange(5) + + class OldDLPack(np.ndarray): + # Support only the "old" version + def __dlpack__(self, stream=None): + return super().__dlpack__(stream=None) + + yield np.arange(5).view(OldDLPack) + + +class TestDLPack: + @pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.") + @pytest.mark.parametrize("max_version", [(0, 0), None, (1, 0), (100, 3)]) + def test_dunder_dlpack_refcount(self, max_version): + x = np.arange(5) + y = x.__dlpack__(max_version=max_version) + startcount = sys.getrefcount(x) + del y + assert startcount - sys.getrefcount(x) == 1 + + def test_dunder_dlpack_stream(self): + x = np.arange(5) + x.__dlpack__(stream=None) + + with pytest.raises(RuntimeError): + x.__dlpack__(stream=1) + + def test_dunder_dlpack_copy(self): + # Checks the argument parsing of __dlpack__ explicitly. + # Honoring the flag is tested in the from_dlpack round-tripping test. + x = np.arange(5) + x.__dlpack__(copy=True) + x.__dlpack__(copy=None) + x.__dlpack__(copy=False) + + with pytest.raises(ValueError): + # NOTE: The copy converter should be stricter, but not just here. + x.__dlpack__(copy=np.array([1, 2, 3])) + + def test_strides_not_multiple_of_itemsize(self): + dt = np.dtype([('int', np.int32), ('char', np.int8)]) + y = np.zeros((5,), dtype=dt) + z = y['int'] + + with pytest.raises(BufferError): + np.from_dlpack(z) + + @pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.") + @pytest.mark.parametrize("arr", new_and_old_dlpack()) + def test_from_dlpack_refcount(self, arr): + arr = arr.copy() + y = np.from_dlpack(arr) + startcount = sys.getrefcount(arr) + del y + assert startcount - sys.getrefcount(arr) == 1 + + @pytest.mark.parametrize("dtype", [ + np.bool, + np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64, + np.float16, np.float32, np.float64, + np.complex64, np.complex128 + ]) + @pytest.mark.parametrize("arr", new_and_old_dlpack()) + def test_dtype_passthrough(self, arr, dtype): + x = arr.astype(dtype) + y = np.from_dlpack(x) + + assert y.dtype == x.dtype + assert_array_equal(x, y) + + def test_invalid_dtype(self): + x = np.asarray(np.datetime64('2021-05-27')) + + with pytest.raises(BufferError): + np.from_dlpack(x) + + def test_invalid_byte_swapping(self): + dt = np.dtype('=i8').newbyteorder() + x = np.arange(5, dtype=dt) + + with pytest.raises(BufferError): + np.from_dlpack(x) + + def test_non_contiguous(self): + x = np.arange(25).reshape((5, 5)) + + y1 = x[0] + assert_array_equal(y1, np.from_dlpack(y1)) + + y2 = x[:, 0] + assert_array_equal(y2, np.from_dlpack(y2)) + + y3 = x[1, :] + assert_array_equal(y3, np.from_dlpack(y3)) + + y4 = x[1] + assert_array_equal(y4, np.from_dlpack(y4)) + + y5 = np.diagonal(x).copy() + assert_array_equal(y5, np.from_dlpack(y5)) + + @pytest.mark.parametrize("ndim", range(33)) + def test_higher_dims(self, ndim): + shape = (1,) * ndim + x = np.zeros(shape, dtype=np.float64) + + assert shape == np.from_dlpack(x).shape + + def test_dlpack_device(self): + x = np.arange(5) + assert x.__dlpack_device__() == (1, 0) + y = np.from_dlpack(x) + assert y.__dlpack_device__() == (1, 0) + z = y[::2] + assert z.__dlpack_device__() == (1, 0) + + def dlpack_deleter_exception(self, max_version): + x = np.arange(5) + _ = x.__dlpack__(max_version=max_version) + raise RuntimeError + + @pytest.mark.parametrize("max_version", [None, (1, 0)]) + def test_dlpack_destructor_exception(self, max_version): + with pytest.raises(RuntimeError): + self.dlpack_deleter_exception(max_version=max_version) + + def test_readonly(self): + x = np.arange(5) + x.flags.writeable = False + # Raises without max_version + with pytest.raises(BufferError): + x.__dlpack__() + + # But works fine if we try with version + y = np.from_dlpack(x) + assert not y.flags.writeable + + def test_writeable(self): + x_new, x_old = new_and_old_dlpack() + + # new dlpacks respect writeability + y = np.from_dlpack(x_new) + assert y.flags.writeable + + # old dlpacks are not writeable for backwards compatibility + y = np.from_dlpack(x_old) + assert not y.flags.writeable + + def test_ndim0(self): + x = np.array(1.0) + y = np.from_dlpack(x) + assert_array_equal(x, y) + + def test_size1dims_arrays(self): + x = np.ndarray(dtype='f8', shape=(10, 5, 1), strides=(8, 80, 4), + buffer=np.ones(1000, dtype=np.uint8), order='F') + y = np.from_dlpack(x) + assert_array_equal(x, y) + + def test_copy(self): + x = np.arange(5) + + y = np.from_dlpack(x) + assert np.may_share_memory(x, y) + y = np.from_dlpack(x, copy=False) + assert np.may_share_memory(x, y) + y = np.from_dlpack(x, copy=True) + assert not np.may_share_memory(x, y) + + def test_device(self): + x = np.arange(5) + # requesting (1, 0), i.e. CPU device works in both calls: + x.__dlpack__(dl_device=(1, 0)) + np.from_dlpack(x, device="cpu") + np.from_dlpack(x, device=None) + + with pytest.raises(ValueError): + x.__dlpack__(dl_device=(10, 0)) + with pytest.raises(ValueError): + np.from_dlpack(x, device="gpu") diff --git a/python/numpy/_core/tests/test_dtype.py b/python/numpy/_core/tests/test_dtype.py new file mode 100644 index 000000000..684672a9b --- /dev/null +++ b/python/numpy/_core/tests/test_dtype.py @@ -0,0 +1,1995 @@ +import ctypes +import gc +import operator +import pickle +import random +import sys +import types +from itertools import permutations +from typing import Any + +import hypothesis +import pytest +from hypothesis.extra import numpy as hynp +from numpy._core._multiarray_tests import create_custom_field_dtype +from numpy._core._rational_tests import rational + +import numpy as np +import numpy.dtypes +from numpy.testing import ( + HAS_REFCOUNT, + IS_PYSTON, + IS_WASM, + assert_, + assert_array_equal, + assert_equal, + assert_raises, +) + + +def assert_dtype_equal(a, b): + assert_equal(a, b) + assert_equal(hash(a), hash(b), + "two equivalent types do not hash to the same value !") + +def assert_dtype_not_equal(a, b): + assert_(a != b) + assert_(hash(a) != hash(b), + "two different types hash to the same value !") + +class TestBuiltin: + @pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object]) + def test_run(self, t): + """Only test hash runs at all.""" + dt = np.dtype(t) + hash(dt) + + @pytest.mark.parametrize('t', [int, float]) + def test_dtype(self, t): + # Make sure equivalent byte order char hash the same (e.g. < and = on + # little endian) + dt = np.dtype(t) + dt2 = dt.newbyteorder("<") + dt3 = dt.newbyteorder(">") + if dt == dt2: + assert_(dt.byteorder != dt2.byteorder, "bogus test") + assert_dtype_equal(dt, dt2) + else: + assert_(dt.byteorder != dt3.byteorder, "bogus test") + assert_dtype_equal(dt, dt3) + + def test_equivalent_dtype_hashing(self): + # Make sure equivalent dtypes with different type num hash equal + uintp = np.dtype(np.uintp) + if uintp.itemsize == 4: + left = uintp + right = np.dtype(np.uint32) + else: + left = uintp + right = np.dtype(np.ulonglong) + assert_(left == right) + assert_(hash(left) == hash(right)) + + def test_invalid_types(self): + # Make sure invalid type strings raise an error + + assert_raises(TypeError, np.dtype, 'O3') + assert_raises(TypeError, np.dtype, 'O5') + assert_raises(TypeError, np.dtype, 'O7') + assert_raises(TypeError, np.dtype, 'b3') + assert_raises(TypeError, np.dtype, 'h4') + assert_raises(TypeError, np.dtype, 'I5') + assert_raises(TypeError, np.dtype, 'e3') + assert_raises(TypeError, np.dtype, 'f5') + + if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16: + assert_raises(TypeError, np.dtype, 'g12') + elif np.dtype('g').itemsize == 12: + assert_raises(TypeError, np.dtype, 'g16') + + if np.dtype('l').itemsize == 8: + assert_raises(TypeError, np.dtype, 'l4') + assert_raises(TypeError, np.dtype, 'L4') + else: + assert_raises(TypeError, np.dtype, 'l8') + assert_raises(TypeError, np.dtype, 'L8') + + if np.dtype('q').itemsize == 8: + assert_raises(TypeError, np.dtype, 'q4') + assert_raises(TypeError, np.dtype, 'Q4') + else: + assert_raises(TypeError, np.dtype, 'q8') + assert_raises(TypeError, np.dtype, 'Q8') + + # Make sure negative-sized dtype raises an error + assert_raises(TypeError, np.dtype, 'S-1') + assert_raises(TypeError, np.dtype, 'U-1') + assert_raises(TypeError, np.dtype, 'V-1') + + def test_richcompare_invalid_dtype_equality(self): + # Make sure objects that cannot be converted to valid + # dtypes results in False/True when compared to valid dtypes. + # Here 7 cannot be converted to dtype. No exceptions should be raised + + assert not np.dtype(np.int32) == 7, "dtype richcompare failed for ==" + assert np.dtype(np.int32) != 7, "dtype richcompare failed for !=" + + @pytest.mark.parametrize( + 'operation', + [operator.le, operator.lt, operator.ge, operator.gt]) + def test_richcompare_invalid_dtype_comparison(self, operation): + # Make sure TypeError is raised for comparison operators + # for invalid dtypes. Here 7 is an invalid dtype. + + with pytest.raises(TypeError): + operation(np.dtype(np.int32), 7) + + @pytest.mark.parametrize("dtype", + ['Bool', 'Bytes0', 'Complex32', 'Complex64', + 'Datetime64', 'Float16', 'Float32', 'Float64', + 'Int8', 'Int16', 'Int32', 'Int64', + 'Object0', 'Str0', 'Timedelta64', + 'UInt8', 'UInt16', 'Uint32', 'UInt32', + 'Uint64', 'UInt64', 'Void0', + "Float128", "Complex128"]) + def test_numeric_style_types_are_invalid(self, dtype): + with assert_raises(TypeError): + np.dtype(dtype) + + def test_expired_dtypes_with_bad_bytesize(self): + match: str = r".*removed in NumPy 2.0.*" + with pytest.raises(TypeError, match=match): + np.dtype("int0") + with pytest.raises(TypeError, match=match): + np.dtype("uint0") + with pytest.raises(TypeError, match=match): + np.dtype("bool8") + with pytest.raises(TypeError, match=match): + np.dtype("bytes0") + with pytest.raises(TypeError, match=match): + np.dtype("str0") + with pytest.raises(TypeError, match=match): + np.dtype("object0") + with pytest.raises(TypeError, match=match): + np.dtype("void0") + + @pytest.mark.parametrize( + 'value', + ['m8', 'M8', 'datetime64', 'timedelta64', + 'i4, (2,3)f8, f4', 'S3, 3u8, (3,4)S10', + '>f', '= (3, 12), + reason="Python 3.12 has immortal refcounts, this test will no longer " + "work. See gh-23986" +) +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +class TestStructuredObjectRefcounting: + """These tests cover various uses of complicated structured types which + include objects and thus require reference counting. + """ + @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'], + iter_struct_object_dtypes()) + @pytest.mark.parametrize(["creation_func", "creation_obj"], [ + pytest.param(np.empty, None, + # None is probably used for too many things + marks=pytest.mark.skip("unreliable due to python's behaviour")), + (np.ones, 1), + (np.zeros, 0)]) + def test_structured_object_create_delete(self, dt, pat, count, singleton, + creation_func, creation_obj): + """Structured object reference counting in creation and deletion""" + # The test assumes that 0, 1, and None are singletons. + gc.collect() + before = sys.getrefcount(creation_obj) + arr = creation_func(3, dt) + + now = sys.getrefcount(creation_obj) + assert now - before == count * 3 + del arr + now = sys.getrefcount(creation_obj) + assert now == before + + @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'], + iter_struct_object_dtypes()) + def test_structured_object_item_setting(self, dt, pat, count, singleton): + """Structured object reference counting for simple item setting""" + one = 1 + + gc.collect() + before = sys.getrefcount(singleton) + arr = np.array([pat] * 3, dt) + assert sys.getrefcount(singleton) - before == count * 3 + # Fill with `1` and check that it was replaced correctly: + before2 = sys.getrefcount(one) + arr[...] = one + after2 = sys.getrefcount(one) + assert after2 - before2 == count * 3 + del arr + gc.collect() + assert sys.getrefcount(one) == before2 + assert sys.getrefcount(singleton) == before + + @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'], + iter_struct_object_dtypes()) + @pytest.mark.parametrize( + ['shape', 'index', 'items_changed'], + [((3,), ([0, 2],), 2), + ((3, 2), ([0, 2], slice(None)), 4), + ((3, 2), ([0, 2], [1]), 2), + ((3,), ([True, False, True]), 2)]) + def test_structured_object_indexing(self, shape, index, items_changed, + dt, pat, count, singleton): + """Structured object reference counting for advanced indexing.""" + # Use two small negative values (should be singletons, but less likely + # to run into race-conditions). This failed in some threaded envs + # When using 0 and 1. If it fails again, should remove all explicit + # checks, and rely on `pytest-leaks` reference count checker only. + val0 = -4 + val1 = -5 + + arr = np.full(shape, val0, dt) + + gc.collect() + before_val0 = sys.getrefcount(val0) + before_val1 = sys.getrefcount(val1) + # Test item getting: + part = arr[index] + after_val0 = sys.getrefcount(val0) + assert after_val0 - before_val0 == count * items_changed + del part + # Test item setting: + arr[index] = val1 + gc.collect() + after_val0 = sys.getrefcount(val0) + after_val1 = sys.getrefcount(val1) + assert before_val0 - after_val0 == count * items_changed + assert after_val1 - before_val1 == count * items_changed + + @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'], + iter_struct_object_dtypes()) + def test_structured_object_take_and_repeat(self, dt, pat, count, singleton): + """Structured object reference counting for specialized functions. + The older functions such as take and repeat use different code paths + then item setting (when writing this). + """ + indices = [0, 1] + + arr = np.array([pat] * 3, dt) + gc.collect() + before = sys.getrefcount(singleton) + res = arr.take(indices) + after = sys.getrefcount(singleton) + assert after - before == count * 2 + new = res.repeat(10) + gc.collect() + after_repeat = sys.getrefcount(singleton) + assert after_repeat - after == count * 2 * 10 + + +class TestStructuredDtypeSparseFields: + """Tests subarray fields which contain sparse dtypes so that + not all memory is used by the dtype work. Such dtype's should + leave the underlying memory unchanged. + """ + dtype = np.dtype([('a', {'names': ['aa', 'ab'], 'formats': ['f', 'f'], + 'offsets': [0, 4]}, (2, 3))]) + sparse_dtype = np.dtype([('a', {'names': ['ab'], 'formats': ['f'], + 'offsets': [4]}, (2, 3))]) + + def test_sparse_field_assignment(self): + arr = np.zeros(3, self.dtype) + sparse_arr = arr.view(self.sparse_dtype) + + sparse_arr[...] = np.finfo(np.float32).max + # dtype is reduced when accessing the field, so shape is (3, 2, 3): + assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3))) + + def test_sparse_field_assignment_fancy(self): + # Fancy assignment goes to the copyswap function for complex types: + arr = np.zeros(3, self.dtype) + sparse_arr = arr.view(self.sparse_dtype) + + sparse_arr[[0, 1, 2]] = np.finfo(np.float32).max + # dtype is reduced when accessing the field, so shape is (3, 2, 3): + assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3))) + + +class TestMonsterType: + """Test deeply nested subtypes.""" + + def test1(self): + simple1 = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'], + 'titles': ['Red pixel', 'Blue pixel']}) + a = np.dtype([('yo', int), ('ye', simple1), + ('yi', np.dtype((int, (3, 2))))]) + b = np.dtype([('yo', int), ('ye', simple1), + ('yi', np.dtype((int, (3, 2))))]) + assert_dtype_equal(a, b) + + c = np.dtype([('yo', int), ('ye', simple1), + ('yi', np.dtype((a, (3, 2))))]) + d = np.dtype([('yo', int), ('ye', simple1), + ('yi', np.dtype((a, (3, 2))))]) + assert_dtype_equal(c, d) + + @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") + @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + def test_list_recursion(self): + l = [] + l.append(('f', l)) + with pytest.raises(RecursionError): + np.dtype(l) + + @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") + @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + def test_tuple_recursion(self): + d = np.int32 + for i in range(100000): + d = (d, (1,)) + with pytest.raises(RecursionError): + np.dtype(d) + + @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") + @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + def test_dict_recursion(self): + d = {"names": ['self'], "formats": [None], "offsets": [0]} + d['formats'][0] = d + with pytest.raises(RecursionError): + np.dtype(d) + + +class TestMetadata: + def test_no_metadata(self): + d = np.dtype(int) + assert_(d.metadata is None) + + def test_metadata_takes_dict(self): + d = np.dtype(int, metadata={'datum': 1}) + assert_(d.metadata == {'datum': 1}) + + def test_metadata_rejects_nondict(self): + assert_raises(TypeError, np.dtype, int, metadata='datum') + assert_raises(TypeError, np.dtype, int, metadata=1) + assert_raises(TypeError, np.dtype, int, metadata=None) + + def test_nested_metadata(self): + d = np.dtype([('a', np.dtype(int, metadata={'datum': 1}))]) + assert_(d['a'].metadata == {'datum': 1}) + + def test_base_metadata_copied(self): + d = np.dtype((np.void, np.dtype('i4,i4', metadata={'datum': 1}))) + assert_(d.metadata == {'datum': 1}) + +class TestString: + def test_complex_dtype_str(self): + dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), + ('rtile', '>f4', (64, 36))], (3,)), + ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), + ('bright', '>f4', (8, 36))])]) + assert_equal(str(dt), + "[('top', [('tiles', ('>f4', (64, 64)), (1,)), " + "('rtile', '>f4', (64, 36))], (3,)), " + "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), " + "('bright', '>f4', (8, 36))])]") + + # If the sticky aligned flag is set to True, it makes the + # str() function use a dict representation with an 'aligned' flag + dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), + ('rtile', '>f4', (64, 36))], + (3,)), + ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), + ('bright', '>f4', (8, 36))])], + align=True) + assert_equal(str(dt), + "{'names': ['top', 'bottom']," + " 'formats': [([('tiles', ('>f4', (64, 64)), (1,)), " + "('rtile', '>f4', (64, 36))], (3,)), " + "[('bleft', ('>f4', (8, 64)), (1,)), " + "('bright', '>f4', (8, 36))]]," + " 'offsets': [0, 76800]," + " 'itemsize': 80000," + " 'aligned': True}") + with np.printoptions(legacy='1.21'): + assert_equal(str(dt), + "{'names':['top','bottom'], " + "'formats':[([('tiles', ('>f4', (64, 64)), (1,)), " + "('rtile', '>f4', (64, 36))], (3,))," + "[('bleft', ('>f4', (8, 64)), (1,)), " + "('bright', '>f4', (8, 36))]], " + "'offsets':[0,76800], " + "'itemsize':80000, " + "'aligned':True}") + assert_equal(np.dtype(eval(str(dt))), dt) + + dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'], + 'offsets': [0, 1, 2], + 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']}) + assert_equal(str(dt), + "[(('Red pixel', 'r'), 'u1'), " + "(('Green pixel', 'g'), 'u1'), " + "(('Blue pixel', 'b'), 'u1')]") + + dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'], + 'formats': ['f4', (64, 64)), (1,)), + ('rtile', '>f4', (64, 36))], (3,)), + ('bottom', [('bleft', ('>f4', (8, 64)), (1,)), + ('bright', '>f4', (8, 36))])]) + assert_equal(repr(dt), + "dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), " + "('rtile', '>f4', (64, 36))], (3,)), " + "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), " + "('bright', '>f4', (8, 36))])])") + + dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'], + 'offsets': [0, 1, 2], + 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']}, + align=True) + assert_equal(repr(dt), + "dtype([(('Red pixel', 'r'), 'u1'), " + "(('Green pixel', 'g'), 'u1'), " + "(('Blue pixel', 'b'), 'u1')], align=True)") + + def test_repr_structured_not_packed(self): + dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'], + 'formats': ['i4") + assert np.result_type(dt).isnative + assert np.result_type(dt).num == dt.num + + # dtype with empty space: + struct_dt = np.dtype(">i4,i1,f4', (2, 1)), ('b', 'u4')]) + self.check(BigEndStruct, expected) + + def test_little_endian_structure_packed(self): + class LittleEndStruct(ctypes.LittleEndianStructure): + _fields_ = [ + ('one', ctypes.c_uint8), + ('two', ctypes.c_uint32) + ] + _pack_ = 1 + expected = np.dtype([('one', 'u1'), ('two', 'B'), + ('b', '>H') + ], align=True) + self.check(PaddedStruct, expected) + + def test_simple_endian_types(self): + self.check(ctypes.c_uint16.__ctype_le__, np.dtype('u2')) + self.check(ctypes.c_uint8.__ctype_le__, np.dtype('u1')) + self.check(ctypes.c_uint8.__ctype_be__, np.dtype('u1')) + + all_types = set(np.typecodes['All']) + all_pairs = permutations(all_types, 2) + + @pytest.mark.parametrize("pair", all_pairs) + def test_pairs(self, pair): + """ + Check that np.dtype('x,y') matches [np.dtype('x'), np.dtype('y')] + Example: np.dtype('d,I') -> dtype([('f0', ' None: + alias = np.dtype[Any] + assert isinstance(alias, types.GenericAlias) + assert alias.__origin__ is np.dtype + + @pytest.mark.parametrize("code", np.typecodes["All"]) + def test_dtype_subclass(self, code: str) -> None: + cls = type(np.dtype(code)) + alias = cls[Any] + assert isinstance(alias, types.GenericAlias) + assert alias.__origin__ is cls + + @pytest.mark.parametrize("arg_len", range(4)) + def test_subscript_tuple(self, arg_len: int) -> None: + arg_tup = (Any,) * arg_len + if arg_len == 1: + assert np.dtype[arg_tup] + else: + with pytest.raises(TypeError): + np.dtype[arg_tup] + + def test_subscript_scalar(self) -> None: + assert np.dtype[Any] + + +def test_result_type_integers_and_unitless_timedelta64(): + # Regression test for gh-20077. The following call of `result_type` + # would cause a seg. fault. + td = np.timedelta64(4) + result = np.result_type(0, td) + assert_dtype_equal(result, td.dtype) + + +def test_creating_dtype_with_dtype_class_errors(): + # Regression test for #25031, calling `np.dtype` with itself segfaulted. + with pytest.raises(TypeError, match="Cannot convert np.dtype into a"): + np.array(np.ones(10), dtype=np.dtype) diff --git a/python/numpy/_core/tests/test_einsum.py b/python/numpy/_core/tests/test_einsum.py new file mode 100644 index 000000000..0bd180b5e --- /dev/null +++ b/python/numpy/_core/tests/test_einsum.py @@ -0,0 +1,1317 @@ +import itertools + +import pytest + +import numpy as np +from numpy.testing import ( + assert_, + assert_allclose, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, + suppress_warnings, +) + +# Setup for optimize einsum +chars = 'abcdefghij' +sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) +global_size_dict = dict(zip(chars, sizes)) + + +class TestEinsum: + @pytest.mark.parametrize("do_opt", [True, False]) + @pytest.mark.parametrize("einsum_fn", [np.einsum, np.einsum_path]) + def test_einsum_errors(self, do_opt, einsum_fn): + # Need enough arguments + assert_raises(ValueError, einsum_fn, optimize=do_opt) + assert_raises(ValueError, einsum_fn, "", optimize=do_opt) + + # subscripts must be a string + assert_raises(TypeError, einsum_fn, 0, 0, optimize=do_opt) + + # issue 4528 revealed a segfault with this call + assert_raises(TypeError, einsum_fn, *(None,) * 63, optimize=do_opt) + + # number of operands must match count in subscripts string + assert_raises(ValueError, einsum_fn, "", 0, 0, optimize=do_opt) + assert_raises(ValueError, einsum_fn, ",", 0, [0], [0], + optimize=do_opt) + assert_raises(ValueError, einsum_fn, ",", [0], optimize=do_opt) + + # can't have more subscripts than dimensions in the operand + assert_raises(ValueError, einsum_fn, "i", 0, optimize=do_opt) + assert_raises(ValueError, einsum_fn, "ij", [0, 0], optimize=do_opt) + assert_raises(ValueError, einsum_fn, "...i", 0, optimize=do_opt) + assert_raises(ValueError, einsum_fn, "i...j", [0, 0], optimize=do_opt) + assert_raises(ValueError, einsum_fn, "i...", 0, optimize=do_opt) + assert_raises(ValueError, einsum_fn, "ij...", [0, 0], optimize=do_opt) + + # invalid ellipsis + assert_raises(ValueError, einsum_fn, "i..", [0, 0], optimize=do_opt) + assert_raises(ValueError, einsum_fn, ".i...", [0, 0], optimize=do_opt) + assert_raises(ValueError, einsum_fn, "j->..j", [0, 0], optimize=do_opt) + assert_raises(ValueError, einsum_fn, "j->.j...", [0, 0], + optimize=do_opt) + + # invalid subscript character + assert_raises(ValueError, einsum_fn, "i%...", [0, 0], optimize=do_opt) + assert_raises(ValueError, einsum_fn, "...j$", [0, 0], optimize=do_opt) + assert_raises(ValueError, einsum_fn, "i->&", [0, 0], optimize=do_opt) + + # output subscripts must appear in input + assert_raises(ValueError, einsum_fn, "i->ij", [0, 0], optimize=do_opt) + + # output subscripts may only be specified once + assert_raises(ValueError, einsum_fn, "ij->jij", [[0, 0], [0, 0]], + optimize=do_opt) + + # dimensions must match when being collapsed + assert_raises(ValueError, einsum_fn, "ii", + np.arange(6).reshape(2, 3), optimize=do_opt) + assert_raises(ValueError, einsum_fn, "ii->i", + np.arange(6).reshape(2, 3), optimize=do_opt) + + with assert_raises_regex(ValueError, "'b'"): + # gh-11221 - 'c' erroneously appeared in the error message + a = np.ones((3, 3, 4, 5, 6)) + b = np.ones((3, 4, 5)) + einsum_fn('aabcb,abc', a, b) + + def test_einsum_sorting_behavior(self): + # Case 1: 26 dimensions (all lowercase indices) + n1 = 26 + x1 = np.random.random((1,) * n1) + path1 = np.einsum_path(x1, range(n1))[1] # Get einsum path details + output_indices1 = path1.split("->")[-1].strip() # Extract output indices + # Assert indices are only uppercase letters and sorted correctly + assert all(c.isupper() for c in output_indices1), ( + "Output indices for n=26 should use uppercase letters only: " + f"{output_indices1}" + ) + assert_equal( + output_indices1, + ''.join(sorted(output_indices1)), + err_msg=( + "Output indices for n=26 are not lexicographically sorted: " + f"{output_indices1}" + ) + ) + + # Case 2: 27 dimensions (includes uppercase indices) + n2 = 27 + x2 = np.random.random((1,) * n2) + path2 = np.einsum_path(x2, range(n2))[1] + output_indices2 = path2.split("->")[-1].strip() + # Assert indices include both uppercase and lowercase letters + assert any(c.islower() for c in output_indices2), ( + "Output indices for n=27 should include uppercase letters: " + f"{output_indices2}" + ) + # Assert output indices are sorted uppercase before lowercase + assert_equal( + output_indices2, + ''.join(sorted(output_indices2)), + err_msg=( + "Output indices for n=27 are not lexicographically sorted: " + f"{output_indices2}" + ) + ) + + # Additional Check: Ensure dimensions correspond correctly to indices + # Generate expected mapping of dimensions to indices + expected_indices = [ + chr(i + ord('A')) if i < 26 else chr(i - 26 + ord('a')) + for i in range(n2) + ] + assert_equal( + output_indices2, + ''.join(expected_indices), + err_msg=( + "Output indices do not map to the correct dimensions. Expected: " + f"{''.join(expected_indices)}, Got: {output_indices2}" + ) + ) + + @pytest.mark.parametrize("do_opt", [True, False]) + def test_einsum_specific_errors(self, do_opt): + # out parameter must be an array + assert_raises(TypeError, np.einsum, "", 0, out='test', + optimize=do_opt) + + # order parameter must be a valid order + assert_raises(ValueError, np.einsum, "", 0, order='W', + optimize=do_opt) + + # casting parameter must be a valid casting + assert_raises(ValueError, np.einsum, "", 0, casting='blah', + optimize=do_opt) + + # dtype parameter must be a valid dtype + assert_raises(TypeError, np.einsum, "", 0, dtype='bad_data_type', + optimize=do_opt) + + # other keyword arguments are rejected + assert_raises(TypeError, np.einsum, "", 0, bad_arg=0, optimize=do_opt) + + # broadcasting to new dimensions must be enabled explicitly + assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2, 3), + optimize=do_opt) + assert_raises(ValueError, np.einsum, "i->i", [[0, 1], [0, 1]], + out=np.arange(4).reshape(2, 2), optimize=do_opt) + + # Check order kwarg, asanyarray allows 1d to pass through + assert_raises(ValueError, np.einsum, "i->i", + np.arange(6).reshape(-1, 1), optimize=do_opt, order='d') + + def test_einsum_object_errors(self): + # Exceptions created by object arithmetic should + # successfully propagate + + class CustomException(Exception): + pass + + class DestructoBox: + + def __init__(self, value, destruct): + self._val = value + self._destruct = destruct + + def __add__(self, other): + tmp = self._val + other._val + if tmp >= self._destruct: + raise CustomException + else: + self._val = tmp + return self + + def __radd__(self, other): + if other == 0: + return self + else: + return self.__add__(other) + + def __mul__(self, other): + tmp = self._val * other._val + if tmp >= self._destruct: + raise CustomException + else: + self._val = tmp + return self + + def __rmul__(self, other): + if other == 0: + return self + else: + return self.__mul__(other) + + a = np.array([DestructoBox(i, 5) for i in range(1, 10)], + dtype='object').reshape(3, 3) + + # raised from unbuffered_loop_nop1_ndim2 + assert_raises(CustomException, np.einsum, "ij->i", a) + + # raised from unbuffered_loop_nop1_ndim3 + b = np.array([DestructoBox(i, 100) for i in range(27)], + dtype='object').reshape(3, 3, 3) + assert_raises(CustomException, np.einsum, "i...k->...", b) + + # raised from unbuffered_loop_nop2_ndim2 + b = np.array([DestructoBox(i, 55) for i in range(1, 4)], + dtype='object') + assert_raises(CustomException, np.einsum, "ij, j", a, b) + + # raised from unbuffered_loop_nop2_ndim3 + assert_raises(CustomException, np.einsum, "ij, jh", a, a) + + # raised from PyArray_EinsteinSum + assert_raises(CustomException, np.einsum, "ij->", a) + + def test_einsum_views(self): + # pass-through + for do_opt in [True, False]: + a = np.arange(6) + a.shape = (2, 3) + + b = np.einsum("...", a, optimize=do_opt) + assert_(b.base is a) + + b = np.einsum(a, [Ellipsis], optimize=do_opt) + assert_(b.base is a) + + b = np.einsum("ij", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a) + + b = np.einsum(a, [0, 1], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a) + + # output is writeable whenever input is writeable + b = np.einsum("...", a, optimize=do_opt) + assert_(b.flags['WRITEABLE']) + a.flags['WRITEABLE'] = False + b = np.einsum("...", a, optimize=do_opt) + assert_(not b.flags['WRITEABLE']) + + # transpose + a = np.arange(6) + a.shape = (2, 3) + + b = np.einsum("ji", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a.T) + + b = np.einsum(a, [1, 0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a.T) + + # diagonal + a = np.arange(9) + a.shape = (3, 3) + + b = np.einsum("ii->i", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[i, i] for i in range(3)]) + + b = np.einsum(a, [0, 0], [0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[i, i] for i in range(3)]) + + # diagonal with various ways of broadcasting an additional dimension + a = np.arange(27) + a.shape = (3, 3, 3) + + b = np.einsum("...ii->...i", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) + + b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] for x in a]) + + b = np.einsum("ii...->...i", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(2, 0, 1)]) + + b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(2, 0, 1)]) + + b = np.einsum("...ii->i...", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[:, i, i] for i in range(3)]) + + b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[:, i, i] for i in range(3)]) + + b = np.einsum("jii->ij", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[:, i, i] for i in range(3)]) + + b = np.einsum(a, [1, 0, 0], [0, 1], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[:, i, i] for i in range(3)]) + + b = np.einsum("ii...->i...", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) + + b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)]) + + b = np.einsum("i...i->i...", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) + + b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)]) + + b = np.einsum("i...i->...i", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(1, 0, 2)]) + + b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [[x[i, i] for i in range(3)] + for x in a.transpose(1, 0, 2)]) + + # triple diagonal + a = np.arange(27) + a.shape = (3, 3, 3) + + b = np.einsum("iii->i", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[i, i, i] for i in range(3)]) + + b = np.einsum(a, [0, 0, 0], [0], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, [a[i, i, i] for i in range(3)]) + + # swap axes + a = np.arange(24) + a.shape = (2, 3, 4) + + b = np.einsum("ijk->jik", a, optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a.swapaxes(0, 1)) + + b = np.einsum(a, [0, 1, 2], [1, 0, 2], optimize=do_opt) + assert_(b.base is a) + assert_equal(b, a.swapaxes(0, 1)) + + def check_einsum_sums(self, dtype, do_opt=False): + dtype = np.dtype(dtype) + # Check various sums. Does many sizes to exercise unrolled loops. + + # sum(a, axis=-1) + for n in range(1, 17): + a = np.arange(n, dtype=dtype) + b = np.sum(a, axis=-1) + if hasattr(b, 'astype'): + b = b.astype(dtype) + assert_equal(np.einsum("i->", a, optimize=do_opt), b) + assert_equal(np.einsum(a, [0], [], optimize=do_opt), b) + + for n in range(1, 17): + a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) + b = np.sum(a, axis=-1) + if hasattr(b, 'astype'): + b = b.astype(dtype) + assert_equal(np.einsum("...i->...", a, optimize=do_opt), b) + assert_equal(np.einsum(a, [Ellipsis, 0], [Ellipsis], optimize=do_opt), b) + + # sum(a, axis=0) + for n in range(1, 17): + a = np.arange(2 * n, dtype=dtype).reshape(2, n) + b = np.sum(a, axis=0) + if hasattr(b, 'astype'): + b = b.astype(dtype) + assert_equal(np.einsum("i...->...", a, optimize=do_opt), b) + assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), b) + + for n in range(1, 17): + a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) + b = np.sum(a, axis=0) + if hasattr(b, 'astype'): + b = b.astype(dtype) + assert_equal(np.einsum("i...->...", a, optimize=do_opt), b) + assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), b) + + # trace(a) + for n in range(1, 17): + a = np.arange(n * n, dtype=dtype).reshape(n, n) + b = np.trace(a) + if hasattr(b, 'astype'): + b = b.astype(dtype) + assert_equal(np.einsum("ii", a, optimize=do_opt), b) + assert_equal(np.einsum(a, [0, 0], optimize=do_opt), b) + + # gh-15961: should accept numpy int64 type in subscript list + np_array = np.asarray([0, 0]) + assert_equal(np.einsum(a, np_array, optimize=do_opt), b) + assert_equal(np.einsum(a, list(np_array), optimize=do_opt), b) + + # multiply(a, b) + assert_equal(np.einsum("..., ...", 3, 4), 12) # scalar case + for n in range(1, 17): + a = np.arange(3 * n, dtype=dtype).reshape(3, n) + b = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) + assert_equal(np.einsum("..., ...", a, b, optimize=do_opt), + np.multiply(a, b)) + assert_equal(np.einsum(a, [Ellipsis], b, [Ellipsis], optimize=do_opt), + np.multiply(a, b)) + + # inner(a,b) + for n in range(1, 17): + a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n) + b = np.arange(n, dtype=dtype) + assert_equal(np.einsum("...i, ...i", a, b, optimize=do_opt), np.inner(a, b)) + assert_equal(np.einsum(a, [Ellipsis, 0], b, [Ellipsis, 0], optimize=do_opt), + np.inner(a, b)) + + for n in range(1, 11): + a = np.arange(n * 3 * 2, dtype=dtype).reshape(n, 3, 2) + b = np.arange(n, dtype=dtype) + assert_equal(np.einsum("i..., i...", a, b, optimize=do_opt), + np.inner(a.T, b.T).T) + assert_equal(np.einsum(a, [0, Ellipsis], b, [0, Ellipsis], optimize=do_opt), + np.inner(a.T, b.T).T) + + # outer(a,b) + for n in range(1, 17): + a = np.arange(3, dtype=dtype) + 1 + b = np.arange(n, dtype=dtype) + 1 + assert_equal(np.einsum("i,j", a, b, optimize=do_opt), + np.outer(a, b)) + assert_equal(np.einsum(a, [0], b, [1], optimize=do_opt), + np.outer(a, b)) + + # Suppress the complex warnings for the 'as f8' tests + with suppress_warnings() as sup: + sup.filter(np.exceptions.ComplexWarning) + + # matvec(a,b) / a.dot(b) where a is matrix, b is vector + for n in range(1, 17): + a = np.arange(4 * n, dtype=dtype).reshape(4, n) + b = np.arange(n, dtype=dtype) + assert_equal(np.einsum("ij, j", a, b, optimize=do_opt), + np.dot(a, b)) + assert_equal(np.einsum(a, [0, 1], b, [1], optimize=do_opt), + np.dot(a, b)) + + c = np.arange(4, dtype=dtype) + np.einsum("ij,j", a, b, out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(a.astype('f8'), + b.astype('f8')).astype(dtype)) + c[...] = 0 + np.einsum(a, [0, 1], b, [1], out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(a.astype('f8'), + b.astype('f8')).astype(dtype)) + + for n in range(1, 17): + a = np.arange(4 * n, dtype=dtype).reshape(4, n) + b = np.arange(n, dtype=dtype) + assert_equal(np.einsum("ji,j", a.T, b.T, optimize=do_opt), + np.dot(b.T, a.T)) + assert_equal(np.einsum(a.T, [1, 0], b.T, [1], optimize=do_opt), + np.dot(b.T, a.T)) + + c = np.arange(4, dtype=dtype) + np.einsum("ji,j", a.T, b.T, out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(b.T.astype('f8'), + a.T.astype('f8')).astype(dtype)) + c[...] = 0 + np.einsum(a.T, [1, 0], b.T, [1], out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(b.T.astype('f8'), + a.T.astype('f8')).astype(dtype)) + + # matmat(a,b) / a.dot(b) where a is matrix, b is matrix + for n in range(1, 17): + if n < 8 or dtype != 'f2': + a = np.arange(4 * n, dtype=dtype).reshape(4, n) + b = np.arange(n * 6, dtype=dtype).reshape(n, 6) + assert_equal(np.einsum("ij,jk", a, b, optimize=do_opt), + np.dot(a, b)) + assert_equal(np.einsum(a, [0, 1], b, [1, 2], optimize=do_opt), + np.dot(a, b)) + + for n in range(1, 17): + a = np.arange(4 * n, dtype=dtype).reshape(4, n) + b = np.arange(n * 6, dtype=dtype).reshape(n, 6) + c = np.arange(24, dtype=dtype).reshape(4, 6) + np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe', + optimize=do_opt) + assert_equal(c, + np.dot(a.astype('f8'), + b.astype('f8')).astype(dtype)) + c[...] = 0 + np.einsum(a, [0, 1], b, [1, 2], out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, + np.dot(a.astype('f8'), + b.astype('f8')).astype(dtype)) + + # matrix triple product (note this is not currently an efficient + # way to multiply 3 matrices) + a = np.arange(12, dtype=dtype).reshape(3, 4) + b = np.arange(20, dtype=dtype).reshape(4, 5) + c = np.arange(30, dtype=dtype).reshape(5, 6) + if dtype != 'f2': + assert_equal(np.einsum("ij,jk,kl", a, b, c, optimize=do_opt), + a.dot(b).dot(c)) + assert_equal(np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], + optimize=do_opt), a.dot(b).dot(c)) + + d = np.arange(18, dtype=dtype).reshape(3, 6) + np.einsum("ij,jk,kl", a, b, c, out=d, + dtype='f8', casting='unsafe', optimize=do_opt) + tgt = a.astype('f8').dot(b.astype('f8')) + tgt = tgt.dot(c.astype('f8')).astype(dtype) + assert_equal(d, tgt) + + d[...] = 0 + np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], out=d, + dtype='f8', casting='unsafe', optimize=do_opt) + tgt = a.astype('f8').dot(b.astype('f8')) + tgt = tgt.dot(c.astype('f8')).astype(dtype) + assert_equal(d, tgt) + + # tensordot(a, b) + if np.dtype(dtype) != np.dtype('f2'): + a = np.arange(60, dtype=dtype).reshape(3, 4, 5) + b = np.arange(24, dtype=dtype).reshape(4, 3, 2) + assert_equal(np.einsum("ijk, jil -> kl", a, b), + np.tensordot(a, b, axes=([1, 0], [0, 1]))) + assert_equal(np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3]), + np.tensordot(a, b, axes=([1, 0], [0, 1]))) + + c = np.arange(10, dtype=dtype).reshape(5, 2) + np.einsum("ijk,jil->kl", a, b, out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'), + axes=([1, 0], [0, 1])).astype(dtype)) + c[...] = 0 + np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3], out=c, + dtype='f8', casting='unsafe', optimize=do_opt) + assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'), + axes=([1, 0], [0, 1])).astype(dtype)) + + # logical_and(logical_and(a!=0, b!=0), c!=0) + neg_val = -2 if dtype.kind != "u" else np.iinfo(dtype).max - 1 + a = np.array([1, 3, neg_val, 0, 12, 13, 0, 1], dtype=dtype) + b = np.array([0, 3.5, 0., neg_val, 0, 1, 3, 12], dtype=dtype) + c = np.array([True, True, False, True, True, False, True, True]) + + assert_equal(np.einsum("i,i,i->i", a, b, c, + dtype='?', casting='unsafe', optimize=do_opt), + np.logical_and(np.logical_and(a != 0, b != 0), c != 0)) + assert_equal(np.einsum(a, [0], b, [0], c, [0], [0], + dtype='?', casting='unsafe'), + np.logical_and(np.logical_and(a != 0, b != 0), c != 0)) + + a = np.arange(9, dtype=dtype) + assert_equal(np.einsum(",i->", 3, a), 3 * np.sum(a)) + assert_equal(np.einsum(3, [], a, [0], []), 3 * np.sum(a)) + assert_equal(np.einsum("i,->", a, 3), 3 * np.sum(a)) + assert_equal(np.einsum(a, [0], 3, [], []), 3 * np.sum(a)) + + # Various stride0, contiguous, and SSE aligned variants + for n in range(1, 25): + a = np.arange(n, dtype=dtype) + if np.dtype(dtype).itemsize > 1: + assert_equal(np.einsum("...,...", a, a, optimize=do_opt), + np.multiply(a, a)) + assert_equal(np.einsum("i,i", a, a, optimize=do_opt), np.dot(a, a)) + assert_equal(np.einsum("i,->i", a, 2, optimize=do_opt), 2 * a) + assert_equal(np.einsum(",i->i", 2, a, optimize=do_opt), 2 * a) + assert_equal(np.einsum("i,->", a, 2, optimize=do_opt), 2 * np.sum(a)) + assert_equal(np.einsum(",i->", 2, a, optimize=do_opt), 2 * np.sum(a)) + + assert_equal(np.einsum("...,...", a[1:], a[:-1], optimize=do_opt), + np.multiply(a[1:], a[:-1])) + assert_equal(np.einsum("i,i", a[1:], a[:-1], optimize=do_opt), + np.dot(a[1:], a[:-1])) + assert_equal(np.einsum("i,->i", a[1:], 2, optimize=do_opt), 2 * a[1:]) + assert_equal(np.einsum(",i->i", 2, a[1:], optimize=do_opt), 2 * a[1:]) + assert_equal(np.einsum("i,->", a[1:], 2, optimize=do_opt), + 2 * np.sum(a[1:])) + assert_equal(np.einsum(",i->", 2, a[1:], optimize=do_opt), + 2 * np.sum(a[1:])) + + # An object array, summed as the data type + a = np.arange(9, dtype=object) + + b = np.einsum("i->", a, dtype=dtype, casting='unsafe') + assert_equal(b, np.sum(a)) + if hasattr(b, "dtype"): + # Can be a python object when dtype is object + assert_equal(b.dtype, np.dtype(dtype)) + + b = np.einsum(a, [0], [], dtype=dtype, casting='unsafe') + assert_equal(b, np.sum(a)) + if hasattr(b, "dtype"): + # Can be a python object when dtype is object + assert_equal(b.dtype, np.dtype(dtype)) + + # A case which was failing (ticket #1885) + p = np.arange(2) + 1 + q = np.arange(4).reshape(2, 2) + 3 + r = np.arange(4).reshape(2, 2) + 7 + assert_equal(np.einsum('z,mz,zm->', p, q, r), 253) + + # singleton dimensions broadcast (gh-10343) + p = np.ones((10, 2)) + q = np.ones((1, 2)) + assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True), + np.einsum('ij,ij->j', p, q, optimize=False)) + assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True), + [10.] * 2) + + # a blas-compatible contraction broadcasting case which was failing + # for optimize=True (ticket #10930) + x = np.array([2., 3.]) + y = np.array([4.]) + assert_array_equal(np.einsum("i, i", x, y, optimize=False), 20.) + assert_array_equal(np.einsum("i, i", x, y, optimize=True), 20.) + + # all-ones array was bypassing bug (ticket #10930) + p = np.ones((1, 5)) / 2 + q = np.ones((5, 5)) / 2 + for optimize in (True, False): + assert_array_equal(np.einsum("...ij,...jk->...ik", p, p, + optimize=optimize), + np.einsum("...ij,...jk->...ik", p, q, + optimize=optimize)) + assert_array_equal(np.einsum("...ij,...jk->...ik", p, q, + optimize=optimize), + np.full((1, 5), 1.25)) + + # Cases which were failing (gh-10899) + x = np.eye(2, dtype=dtype) + y = np.ones(2, dtype=dtype) + assert_array_equal(np.einsum("ji,i->", x, y, optimize=optimize), + [2.]) # contig_contig_outstride0_two + assert_array_equal(np.einsum("i,ij->", y, x, optimize=optimize), + [2.]) # stride0_contig_outstride0_two + assert_array_equal(np.einsum("ij,i->", x, y, optimize=optimize), + [2.]) # contig_stride0_outstride0_two + + def test_einsum_sums_int8(self): + self.check_einsum_sums('i1') + + def test_einsum_sums_uint8(self): + self.check_einsum_sums('u1') + + def test_einsum_sums_int16(self): + self.check_einsum_sums('i2') + + def test_einsum_sums_uint16(self): + self.check_einsum_sums('u2') + + def test_einsum_sums_int32(self): + self.check_einsum_sums('i4') + self.check_einsum_sums('i4', True) + + def test_einsum_sums_uint32(self): + self.check_einsum_sums('u4') + self.check_einsum_sums('u4', True) + + def test_einsum_sums_int64(self): + self.check_einsum_sums('i8') + + def test_einsum_sums_uint64(self): + self.check_einsum_sums('u8') + + def test_einsum_sums_float16(self): + self.check_einsum_sums('f2') + + def test_einsum_sums_float32(self): + self.check_einsum_sums('f4') + + def test_einsum_sums_float64(self): + self.check_einsum_sums('f8') + self.check_einsum_sums('f8', True) + + def test_einsum_sums_longdouble(self): + self.check_einsum_sums(np.longdouble) + + def test_einsum_sums_cfloat64(self): + self.check_einsum_sums('c8') + self.check_einsum_sums('c8', True) + + def test_einsum_sums_cfloat128(self): + self.check_einsum_sums('c16') + + def test_einsum_sums_clongdouble(self): + self.check_einsum_sums(np.clongdouble) + + def test_einsum_sums_object(self): + self.check_einsum_sums('object') + self.check_einsum_sums('object', True) + + def test_einsum_misc(self): + # This call used to crash because of a bug in + # PyArray_AssignZero + a = np.ones((1, 2)) + b = np.ones((2, 2, 1)) + assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]]) + assert_equal(np.einsum('ij...,j...->i...', a, b, optimize=True), [[[2], [2]]]) + + # Regression test for issue #10369 (test unicode inputs with Python 2) + assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]]) + assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4]), 20) + assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4], + optimize='greedy'), 20) + + # The iterator had an issue with buffering this reduction + a = np.ones((5, 12, 4, 2, 3), np.int64) + b = np.ones((5, 12, 11), np.int64) + assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b), + np.einsum('ijklm,ijn->', a, b)) + assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b, optimize=True), + np.einsum('ijklm,ijn->', a, b, optimize=True)) + + # Issue #2027, was a problem in the contiguous 3-argument + # inner loop implementation + a = np.arange(1, 3) + b = np.arange(1, 5).reshape(2, 2) + c = np.arange(1, 9).reshape(4, 2) + assert_equal(np.einsum('x,yx,zx->xzy', a, b, c), + [[[1, 3], [3, 9], [5, 15], [7, 21]], + [[8, 16], [16, 32], [24, 48], [32, 64]]]) + assert_equal(np.einsum('x,yx,zx->xzy', a, b, c, optimize=True), + [[[1, 3], [3, 9], [5, 15], [7, 21]], + [[8, 16], [16, 32], [24, 48], [32, 64]]]) + + # Ensure explicitly setting out=None does not cause an error + # see issue gh-15776 and issue gh-15256 + assert_equal(np.einsum('i,j', [1], [2], out=None), [[2]]) + + def test_object_loop(self): + + class Mult: + def __mul__(self, other): + return 42 + + objMult = np.array([Mult()]) + objNULL = np.ndarray(buffer=b'\0' * np.intp(0).itemsize, shape=1, dtype=object) + + with pytest.raises(TypeError): + np.einsum("i,j", [1], objNULL) + with pytest.raises(TypeError): + np.einsum("i,j", objNULL, [1]) + assert np.einsum("i,j", objMult, objMult) == 42 + + def test_subscript_range(self): + # Issue #7741, make sure that all letters of Latin alphabet (both uppercase & lowercase) can be used + # when creating a subscript from arrays + a = np.ones((2, 3)) + b = np.ones((3, 4)) + np.einsum(a, [0, 20], b, [20, 2], [0, 2], optimize=False) + np.einsum(a, [0, 27], b, [27, 2], [0, 2], optimize=False) + np.einsum(a, [0, 51], b, [51, 2], [0, 2], optimize=False) + assert_raises(ValueError, lambda: np.einsum(a, [0, 52], b, [52, 2], [0, 2], optimize=False)) + assert_raises(ValueError, lambda: np.einsum(a, [-1, 5], b, [5, 2], [-1, 2], optimize=False)) + + def test_einsum_broadcast(self): + # Issue #2455 change in handling ellipsis + # remove the 'middle broadcast' error + # only use the 'RIGHT' iteration in prepare_op_axes + # adds auto broadcast on left where it belongs + # broadcast on right has to be explicit + # We need to test the optimized parsing as well + + A = np.arange(2 * 3 * 4).reshape(2, 3, 4) + B = np.arange(3) + ref = np.einsum('ijk,j->ijk', A, B, optimize=False) + for opt in [True, False]: + assert_equal(np.einsum('ij...,j...->ij...', A, B, optimize=opt), ref) + assert_equal(np.einsum('ij...,...j->ij...', A, B, optimize=opt), ref) + assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=opt), ref) # used to raise error + + A = np.arange(12).reshape((4, 3)) + B = np.arange(6).reshape((3, 2)) + ref = np.einsum('ik,kj->ij', A, B, optimize=False) + for opt in [True, False]: + assert_equal(np.einsum('ik...,k...->i...', A, B, optimize=opt), ref) + assert_equal(np.einsum('ik...,...kj->i...j', A, B, optimize=opt), ref) + assert_equal(np.einsum('...k,kj', A, B, optimize=opt), ref) # used to raise error + assert_equal(np.einsum('ik,k...->i...', A, B, optimize=opt), ref) # used to raise error + + dims = [2, 3, 4, 5] + a = np.arange(np.prod(dims)).reshape(dims) + v = np.arange(dims[2]) + ref = np.einsum('ijkl,k->ijl', a, v, optimize=False) + for opt in [True, False]: + assert_equal(np.einsum('ijkl,k', a, v, optimize=opt), ref) + assert_equal(np.einsum('...kl,k', a, v, optimize=opt), ref) # used to raise error + assert_equal(np.einsum('...kl,k...', a, v, optimize=opt), ref) + + J, K, M = 160, 160, 120 + A = np.arange(J * K * M).reshape(1, 1, 1, J, K, M) + B = np.arange(J * K * M * 3).reshape(J, K, M, 3) + ref = np.einsum('...lmn,...lmno->...o', A, B, optimize=False) + for opt in [True, False]: + assert_equal(np.einsum('...lmn,lmno->...o', A, B, + optimize=opt), ref) # used to raise error + + def test_einsum_fixedstridebug(self): + # Issue #4485 obscure einsum bug + # This case revealed a bug in nditer where it reported a stride + # as 'fixed' (0) when it was in fact not fixed during processing + # (0 or 4). The reason for the bug was that the check for a fixed + # stride was using the information from the 2D inner loop reuse + # to restrict the iteration dimensions it had to validate to be + # the same, but that 2D inner loop reuse logic is only triggered + # during the buffer copying step, and hence it was invalid to + # rely on those values. The fix is to check all the dimensions + # of the stride in question, which in the test case reveals that + # the stride is not fixed. + # + # NOTE: This test is triggered by the fact that the default buffersize, + # used by einsum, is 8192, and 3*2731 = 8193, is larger than that + # and results in a mismatch between the buffering and the + # striding for operand A. + A = np.arange(2 * 3).reshape(2, 3).astype(np.float32) + B = np.arange(2 * 3 * 2731).reshape(2, 3, 2731).astype(np.int16) + es = np.einsum('cl, cpx->lpx', A, B) + tp = np.tensordot(A, B, axes=(0, 0)) + assert_equal(es, tp) + # The following is the original test case from the bug report, + # made repeatable by changing random arrays to aranges. + A = np.arange(3 * 3).reshape(3, 3).astype(np.float64) + B = np.arange(3 * 3 * 64 * 64).reshape(3, 3, 64, 64).astype(np.float32) + es = np.einsum('cl, cpxy->lpxy', A, B) + tp = np.tensordot(A, B, axes=(0, 0)) + assert_equal(es, tp) + + def test_einsum_fixed_collapsingbug(self): + # Issue #5147. + # The bug only occurred when output argument of einssum was used. + x = np.random.normal(0, 1, (5, 5, 5, 5)) + y1 = np.zeros((5, 5)) + np.einsum('aabb->ab', x, out=y1) + idx = np.arange(5) + y2 = x[idx[:, None], idx[:, None], idx, idx] + assert_equal(y1, y2) + + def test_einsum_failed_on_p9_and_s390x(self): + # Issues gh-14692 and gh-12689 + # Bug with signed vs unsigned char errored on power9 and s390x Linux + tensor = np.random.random_sample((10, 10, 10, 10)) + x = np.einsum('ijij->', tensor) + y = tensor.trace(axis1=0, axis2=2).trace() + assert_allclose(x, y) + + def test_einsum_all_contig_non_contig_output(self): + # Issue gh-5907, tests that the all contiguous special case + # actually checks the contiguity of the output + x = np.ones((5, 5)) + out = np.ones(10)[::2] + correct_base = np.ones(10) + correct_base[::2] = 5 + # Always worked (inner iteration is done with 0-stride): + np.einsum('mi,mi,mi->m', x, x, x, out=out) + assert_array_equal(out.base, correct_base) + # Example 1: + out = np.ones(10)[::2] + np.einsum('im,im,im->m', x, x, x, out=out) + assert_array_equal(out.base, correct_base) + # Example 2, buffering causes x to be contiguous but + # special cases do not catch the operation before: + out = np.ones((2, 2, 2))[..., 0] + correct_base = np.ones((2, 2, 2)) + correct_base[..., 0] = 2 + x = np.ones((2, 2), np.float32) + np.einsum('ij,jk->ik', x, x, out=out) + assert_array_equal(out.base, correct_base) + + @pytest.mark.parametrize("dtype", + np.typecodes["AllFloat"] + np.typecodes["AllInteger"]) + def test_different_paths(self, dtype): + # Test originally added to cover broken float16 path: gh-20305 + # Likely most are covered elsewhere, at least partially. + dtype = np.dtype(dtype) + # Simple test, designed to exercise most specialized code paths, + # note the +0.5 for floats. This makes sure we use a float value + # where the results must be exact. + arr = (np.arange(7) + 0.5).astype(dtype) + scalar = np.array(2, dtype=dtype) + + # contig -> scalar: + res = np.einsum('i->', arr) + assert res == arr.sum() + # contig, contig -> contig: + res = np.einsum('i,i->i', arr, arr) + assert_array_equal(res, arr * arr) + # noncontig, noncontig -> contig: + res = np.einsum('i,i->i', arr.repeat(2)[::2], arr.repeat(2)[::2]) + assert_array_equal(res, arr * arr) + # contig + contig -> scalar + assert np.einsum('i,i->', arr, arr) == (arr * arr).sum() + # contig + scalar -> contig (with out) + out = np.ones(7, dtype=dtype) + res = np.einsum('i,->i', arr, dtype.type(2), out=out) + assert_array_equal(res, arr * dtype.type(2)) + # scalar + contig -> contig (with out) + res = np.einsum(',i->i', scalar, arr) + assert_array_equal(res, arr * dtype.type(2)) + # scalar + contig -> scalar + res = np.einsum(',i->', scalar, arr) + # Use einsum to compare to not have difference due to sum round-offs: + assert res == np.einsum('i->', scalar * arr) + # contig + scalar -> scalar + res = np.einsum('i,->', arr, scalar) + # Use einsum to compare to not have difference due to sum round-offs: + assert res == np.einsum('i->', scalar * arr) + # contig + contig + contig -> scalar + arr = np.array([0.5, 0.5, 0.25, 4.5, 3.], dtype=dtype) + res = np.einsum('i,i,i->', arr, arr, arr) + assert_array_equal(res, (arr * arr * arr).sum()) + # four arrays: + res = np.einsum('i,i,i,i->', arr, arr, arr, arr) + assert_array_equal(res, (arr * arr * arr * arr).sum()) + + def test_small_boolean_arrays(self): + # See gh-5946. + # Use array of True embedded in False. + a = np.zeros((16, 1, 1), dtype=np.bool)[:2] + a[...] = True + out = np.zeros((16, 1, 1), dtype=np.bool)[:2] + tgt = np.ones((2, 1, 1), dtype=np.bool) + res = np.einsum('...ij,...jk->...ik', a, a, out=out) + assert_equal(res, tgt) + + def test_out_is_res(self): + a = np.arange(9).reshape(3, 3) + res = np.einsum('...ij,...jk->...ik', a, a, out=a) + assert res is a + + def optimize_compare(self, subscripts, operands=None): + # Tests all paths of the optimization function against + # conventional einsum + if operands is None: + args = [subscripts] + terms = subscripts.split('->')[0].split(',') + for term in terms: + dims = [global_size_dict[x] for x in term] + args.append(np.random.rand(*dims)) + else: + args = [subscripts] + operands + + noopt = np.einsum(*args, optimize=False) + opt = np.einsum(*args, optimize='greedy') + assert_almost_equal(opt, noopt) + opt = np.einsum(*args, optimize='optimal') + assert_almost_equal(opt, noopt) + + def test_hadamard_like_products(self): + # Hadamard outer products + self.optimize_compare('a,ab,abc->abc') + self.optimize_compare('a,b,ab->ab') + + def test_index_transformations(self): + # Simple index transformation cases + self.optimize_compare('ea,fb,gc,hd,abcd->efgh') + self.optimize_compare('ea,fb,abcd,gc,hd->efgh') + self.optimize_compare('abcd,ea,fb,gc,hd->efgh') + + def test_complex(self): + # Long test cases + self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac') + self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac') + self.optimize_compare('cd,bdhe,aidb,hgca,gc,hgibcd,hgac') + self.optimize_compare('abhe,hidj,jgba,hiab,gab') + self.optimize_compare('bde,cdh,agdb,hica,ibd,hgicd,hiac') + self.optimize_compare('chd,bde,agbc,hiad,hgc,hgi,hiad') + self.optimize_compare('chd,bde,agbc,hiad,bdi,cgh,agdb') + self.optimize_compare('bdhe,acad,hiab,agac,hibd') + + def test_collapse(self): + # Inner products + self.optimize_compare('ab,ab,c->') + self.optimize_compare('ab,ab,c->c') + self.optimize_compare('ab,ab,cd,cd->') + self.optimize_compare('ab,ab,cd,cd->ac') + self.optimize_compare('ab,ab,cd,cd->cd') + self.optimize_compare('ab,ab,cd,cd,ef,ef->') + + def test_expand(self): + # Outer products + self.optimize_compare('ab,cd,ef->abcdef') + self.optimize_compare('ab,cd,ef->acdf') + self.optimize_compare('ab,cd,de->abcde') + self.optimize_compare('ab,cd,de->be') + self.optimize_compare('ab,bcd,cd->abcd') + self.optimize_compare('ab,bcd,cd->abd') + + def test_edge_cases(self): + # Difficult edge cases for optimization + self.optimize_compare('eb,cb,fb->cef') + self.optimize_compare('dd,fb,be,cdb->cef') + self.optimize_compare('bca,cdb,dbf,afc->') + self.optimize_compare('dcc,fce,ea,dbf->ab') + self.optimize_compare('fdf,cdd,ccd,afe->ae') + self.optimize_compare('abcd,ad') + self.optimize_compare('ed,fcd,ff,bcf->be') + self.optimize_compare('baa,dcf,af,cde->be') + self.optimize_compare('bd,db,eac->ace') + self.optimize_compare('fff,fae,bef,def->abd') + self.optimize_compare('efc,dbc,acf,fd->abe') + self.optimize_compare('ba,ac,da->bcd') + + def test_inner_product(self): + # Inner products + self.optimize_compare('ab,ab') + self.optimize_compare('ab,ba') + self.optimize_compare('abc,abc') + self.optimize_compare('abc,bac') + self.optimize_compare('abc,cba') + + def test_random_cases(self): + # Randomly built test cases + self.optimize_compare('aab,fa,df,ecc->bde') + self.optimize_compare('ecb,fef,bad,ed->ac') + self.optimize_compare('bcf,bbb,fbf,fc->') + self.optimize_compare('bb,ff,be->e') + self.optimize_compare('bcb,bb,fc,fff->') + self.optimize_compare('fbb,dfd,fc,fc->') + self.optimize_compare('afd,ba,cc,dc->bf') + self.optimize_compare('adb,bc,fa,cfc->d') + self.optimize_compare('bbd,bda,fc,db->acf') + self.optimize_compare('dba,ead,cad->bce') + self.optimize_compare('aef,fbc,dca->bde') + + def test_combined_views_mapping(self): + # gh-10792 + a = np.arange(9).reshape(1, 1, 3, 1, 3) + b = np.einsum('bbcdc->d', a) + assert_equal(b, [12]) + + def test_broadcasting_dot_cases(self): + # Ensures broadcasting cases are not mistaken for GEMM + + a = np.random.rand(1, 5, 4) + b = np.random.rand(4, 6) + c = np.random.rand(5, 6) + d = np.random.rand(10) + + self.optimize_compare('ijk,kl,jl', operands=[a, b, c]) + self.optimize_compare('ijk,kl,jl,i->i', operands=[a, b, c, d]) + + e = np.random.rand(1, 1, 5, 4) + f = np.random.rand(7, 7) + self.optimize_compare('abjk,kl,jl', operands=[e, b, c]) + self.optimize_compare('abjk,kl,jl,ab->ab', operands=[e, b, c, f]) + + # Edge case found in gh-11308 + g = np.arange(64).reshape(2, 4, 8) + self.optimize_compare('obk,ijk->ioj', operands=[g, g]) + + def test_output_order(self): + # Ensure output order is respected for optimize cases, the below + # contraction should yield a reshaped tensor view + # gh-16415 + + a = np.ones((2, 3, 5), order='F') + b = np.ones((4, 3), order='F') + + for opt in [True, False]: + tmp = np.einsum('...ft,mf->...mt', a, b, order='a', optimize=opt) + assert_(tmp.flags.f_contiguous) + + tmp = np.einsum('...ft,mf->...mt', a, b, order='f', optimize=opt) + assert_(tmp.flags.f_contiguous) + + tmp = np.einsum('...ft,mf->...mt', a, b, order='c', optimize=opt) + assert_(tmp.flags.c_contiguous) + + tmp = np.einsum('...ft,mf->...mt', a, b, order='k', optimize=opt) + assert_(tmp.flags.c_contiguous is False) + assert_(tmp.flags.f_contiguous is False) + + tmp = np.einsum('...ft,mf->...mt', a, b, optimize=opt) + assert_(tmp.flags.c_contiguous is False) + assert_(tmp.flags.f_contiguous is False) + + c = np.ones((4, 3), order='C') + for opt in [True, False]: + tmp = np.einsum('...ft,mf->...mt', a, c, order='a', optimize=opt) + assert_(tmp.flags.c_contiguous) + + d = np.ones((2, 3, 5), order='C') + for opt in [True, False]: + tmp = np.einsum('...ft,mf->...mt', d, c, order='a', optimize=opt) + assert_(tmp.flags.c_contiguous) + +class TestEinsumPath: + def build_operands(self, string, size_dict=global_size_dict): + + # Builds views based off initial operands + operands = [string] + terms = string.split('->')[0].split(',') + for term in terms: + dims = [size_dict[x] for x in term] + operands.append(np.random.rand(*dims)) + + return operands + + def assert_path_equal(self, comp, benchmark): + # Checks if list of tuples are equivalent + ret = (len(comp) == len(benchmark)) + assert_(ret) + for pos in range(len(comp) - 1): + ret &= isinstance(comp[pos + 1], tuple) + ret &= (comp[pos + 1] == benchmark[pos + 1]) + assert_(ret) + + def test_memory_contraints(self): + # Ensure memory constraints are satisfied + + outer_test = self.build_operands('a,b,c->abc') + + path, path_str = np.einsum_path(*outer_test, optimize=('greedy', 0)) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2)]) + + path, path_str = np.einsum_path(*outer_test, optimize=('optimal', 0)) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2)]) + + long_test = self.build_operands('acdf,jbje,gihb,hfac') + path, path_str = np.einsum_path(*long_test, optimize=('greedy', 0)) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)]) + + path, path_str = np.einsum_path(*long_test, optimize=('optimal', 0)) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)]) + + def test_long_paths(self): + # Long complex cases + + # Long test 1 + long_test1 = self.build_operands('acdf,jbje,gihb,hfac,gfac,gifabc,hfac') + path, path_str = np.einsum_path(*long_test1, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', + (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)]) + + path, path_str = np.einsum_path(*long_test1, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', + (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)]) + + # Long test 2 + long_test2 = self.build_operands('chd,bde,agbc,hiad,bdi,cgh,agdb') + path, path_str = np.einsum_path(*long_test2, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', + (3, 4), (0, 3), (3, 4), (1, 3), (1, 2), (0, 1)]) + + path, path_str = np.einsum_path(*long_test2, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', + (0, 5), (1, 4), (3, 4), (1, 3), (1, 2), (0, 1)]) + + def test_edge_paths(self): + # Difficult edge cases + + # Edge test1 + edge_test1 = self.build_operands('eb,cb,fb->cef') + path, path_str = np.einsum_path(*edge_test1, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)]) + + path, path_str = np.einsum_path(*edge_test1, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)]) + + # Edge test2 + edge_test2 = self.build_operands('dd,fb,be,cdb->cef') + path, path_str = np.einsum_path(*edge_test2, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)]) + + path, path_str = np.einsum_path(*edge_test2, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)]) + + # Edge test3 + edge_test3 = self.build_operands('bca,cdb,dbf,afc->') + path, path_str = np.einsum_path(*edge_test3, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)]) + + path, path_str = np.einsum_path(*edge_test3, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)]) + + # Edge test4 + edge_test4 = self.build_operands('dcc,fce,ea,dbf->ab') + path, path_str = np.einsum_path(*edge_test4, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)]) + + path, path_str = np.einsum_path(*edge_test4, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)]) + + # Edge test5 + edge_test4 = self.build_operands('a,ac,ab,ad,cd,bd,bc->', + size_dict={"a": 20, "b": 20, "c": 20, "d": 20}) + path, path_str = np.einsum_path(*edge_test4, optimize='greedy') + self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)]) + + path, path_str = np.einsum_path(*edge_test4, optimize='optimal') + self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)]) + + def test_path_type_input(self): + # Test explicit path handling + path_test = self.build_operands('dcc,fce,ea,dbf->ab') + + path, path_str = np.einsum_path(*path_test, optimize=False) + self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)]) + + path, path_str = np.einsum_path(*path_test, optimize=True) + self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)]) + + exp_path = ['einsum_path', (0, 2), (0, 2), (0, 1)] + path, path_str = np.einsum_path(*path_test, optimize=exp_path) + self.assert_path_equal(path, exp_path) + + # Double check einsum works on the input path + noopt = np.einsum(*path_test, optimize=False) + opt = np.einsum(*path_test, optimize=exp_path) + assert_almost_equal(noopt, opt) + + def test_path_type_input_internal_trace(self): + # gh-20962 + path_test = self.build_operands('cab,cdd->ab') + exp_path = ['einsum_path', (1,), (0, 1)] + + path, path_str = np.einsum_path(*path_test, optimize=exp_path) + self.assert_path_equal(path, exp_path) + + # Double check einsum works on the input path + noopt = np.einsum(*path_test, optimize=False) + opt = np.einsum(*path_test, optimize=exp_path) + assert_almost_equal(noopt, opt) + + def test_path_type_input_invalid(self): + path_test = self.build_operands('ab,bc,cd,de->ae') + exp_path = ['einsum_path', (2, 3), (0, 1)] + assert_raises(RuntimeError, np.einsum, *path_test, optimize=exp_path) + assert_raises( + RuntimeError, np.einsum_path, *path_test, optimize=exp_path) + + path_test = self.build_operands('a,a,a->a') + exp_path = ['einsum_path', (1,), (0, 1)] + assert_raises(RuntimeError, np.einsum, *path_test, optimize=exp_path) + assert_raises( + RuntimeError, np.einsum_path, *path_test, optimize=exp_path) + + def test_spaces(self): + # gh-10794 + arr = np.array([[1]]) + for sp in itertools.product(['', ' '], repeat=4): + # no error for any spacing + np.einsum('{}...a{}->{}...a{}'.format(*sp), arr) + +def test_overlap(): + a = np.arange(9, dtype=int).reshape(3, 3) + b = np.arange(9, dtype=int).reshape(3, 3) + d = np.dot(a, b) + # sanity check + c = np.einsum('ij,jk->ik', a, b) + assert_equal(c, d) + # gh-10080, out overlaps one of the operands + c = np.einsum('ij,jk->ik', a, b, out=b) + assert_equal(c, d) + +def test_einsum_chunking_precision(): + """Most einsum operations are reductions and until NumPy 2.3 reductions + never (or almost never?) used the `GROWINNER` mechanism to increase the + inner loop size when no buffers are needed. + Because einsum reductions work roughly: + + def inner(*inputs, out): + accumulate = 0 + for vals in zip(*inputs): + accumulate += prod(vals) + out[0] += accumulate + + Calling the inner-loop more often actually improves accuracy slightly + (same effect as pairwise summation but much less). + Without adding pairwise summation to the inner-loop it seems best to just + not use GROWINNER, a quick tests suggest that is maybe 1% slowdown for + the simplest `einsum("i,i->i", x, x)` case. + + (It is not clear that we should guarantee precision to this extend.) + """ + num = 1_000_000 + value = 1. + np.finfo(np.float64).eps * 8196 + res = np.einsum("i->", np.broadcast_to(np.array(value), num)) / num + + # At with GROWINNER 11 decimals succeed (larger will be less) + assert_almost_equal(res, value, decimal=15) diff --git a/python/numpy/_core/tests/test_errstate.py b/python/numpy/_core/tests/test_errstate.py new file mode 100644 index 000000000..b72fb65a3 --- /dev/null +++ b/python/numpy/_core/tests/test_errstate.py @@ -0,0 +1,131 @@ +import sysconfig + +import pytest + +import numpy as np +from numpy.testing import IS_WASM, assert_raises + +# The floating point emulation on ARM EABI systems lacking a hardware FPU is +# known to be buggy. This is an attempt to identify these hosts. It may not +# catch all possible cases, but it catches the known cases of gh-413 and +# gh-15562. +hosttype = sysconfig.get_config_var('HOST_GNU_TYPE') +arm_softfloat = False if hosttype is None else hosttype.endswith('gnueabi') + +class TestErrstate: + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.skipif(arm_softfloat, + reason='platform/cpu issue with FPU (gh-413,-15562)') + def test_invalid(self): + with np.errstate(all='raise', under='ignore'): + a = -np.arange(3) + # This should work + with np.errstate(invalid='ignore'): + np.sqrt(a) + # While this should fail! + with assert_raises(FloatingPointError): + np.sqrt(a) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.skipif(arm_softfloat, + reason='platform/cpu issue with FPU (gh-15562)') + def test_divide(self): + with np.errstate(all='raise', under='ignore'): + a = -np.arange(3) + # This should work + with np.errstate(divide='ignore'): + a // 0 + # While this should fail! + with assert_raises(FloatingPointError): + a // 0 + # As should this, see gh-15562 + with assert_raises(FloatingPointError): + a // a + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.skipif(arm_softfloat, + reason='platform/cpu issue with FPU (gh-15562)') + def test_errcall(self): + count = 0 + + def foo(*args): + nonlocal count + count += 1 + + olderrcall = np.geterrcall() + with np.errstate(call=foo): + assert np.geterrcall() is foo + with np.errstate(call=None): + assert np.geterrcall() is None + assert np.geterrcall() is olderrcall + assert count == 0 + + with np.errstate(call=foo, invalid="call"): + np.array(np.inf) - np.array(np.inf) + + assert count == 1 + + def test_errstate_decorator(self): + @np.errstate(all='ignore') + def foo(): + a = -np.arange(3) + a // 0 + + foo() + + def test_errstate_enter_once(self): + errstate = np.errstate(invalid="warn") + with errstate: + pass + + # The errstate context cannot be entered twice as that would not be + # thread-safe + with pytest.raises(TypeError, + match="Cannot enter `np.errstate` twice"): + with errstate: + pass + + @pytest.mark.skipif(IS_WASM, reason="wasm doesn't support asyncio") + def test_asyncio_safe(self): + # asyncio may not always work, lets assume its fine if missing + # Pyodide/wasm doesn't support it. If this test makes problems, + # it should just be skipped liberally (or run differently). + asyncio = pytest.importorskip("asyncio") + + @np.errstate(invalid="ignore") + def decorated(): + # Decorated non-async function (it is not safe to decorate an + # async one) + assert np.geterr()["invalid"] == "ignore" + + async def func1(): + decorated() + await asyncio.sleep(0.1) + decorated() + + async def func2(): + with np.errstate(invalid="raise"): + assert np.geterr()["invalid"] == "raise" + await asyncio.sleep(0.125) + assert np.geterr()["invalid"] == "raise" + + # for good sport, a third one with yet another state: + async def func3(): + with np.errstate(invalid="print"): + assert np.geterr()["invalid"] == "print" + await asyncio.sleep(0.11) + assert np.geterr()["invalid"] == "print" + + async def main(): + # simply run all three function multiple times: + await asyncio.gather( + func1(), func2(), func3(), func1(), func2(), func3(), + func1(), func2(), func3(), func1(), func2(), func3()) + + loop = asyncio.new_event_loop() + with np.errstate(invalid="warn"): + asyncio.run(main()) + assert np.geterr()["invalid"] == "warn" + + assert np.geterr()["invalid"] == "warn" # the default + loop.close() diff --git a/python/numpy/_core/tests/test_extint128.py b/python/numpy/_core/tests/test_extint128.py new file mode 100644 index 000000000..1a05151ac --- /dev/null +++ b/python/numpy/_core/tests/test_extint128.py @@ -0,0 +1,217 @@ +import contextlib +import itertools +import operator + +import numpy._core._multiarray_tests as mt +import pytest + +import numpy as np +from numpy.testing import assert_equal, assert_raises + +INT64_MAX = np.iinfo(np.int64).max +INT64_MIN = np.iinfo(np.int64).min +INT64_MID = 2**32 + +# int128 is not two's complement, the sign bit is separate +INT128_MAX = 2**128 - 1 +INT128_MIN = -INT128_MAX +INT128_MID = 2**64 + +INT64_VALUES = ( + [INT64_MIN + j for j in range(20)] + + [INT64_MAX - j for j in range(20)] + + [INT64_MID + j for j in range(-20, 20)] + + [2 * INT64_MID + j for j in range(-20, 20)] + + [INT64_MID // 2 + j for j in range(-20, 20)] + + list(range(-70, 70)) +) + +INT128_VALUES = ( + [INT128_MIN + j for j in range(20)] + + [INT128_MAX - j for j in range(20)] + + [INT128_MID + j for j in range(-20, 20)] + + [2 * INT128_MID + j for j in range(-20, 20)] + + [INT128_MID // 2 + j for j in range(-20, 20)] + + list(range(-70, 70)) + + [False] # negative zero +) + +INT64_POS_VALUES = [x for x in INT64_VALUES if x > 0] + + +@contextlib.contextmanager +def exc_iter(*args): + """ + Iterate over Cartesian product of *args, and if an exception is raised, + add information of the current iterate. + """ + + value = [None] + + def iterate(): + for v in itertools.product(*args): + value[0] = v + yield v + + try: + yield iterate() + except Exception: + import traceback + msg = f"At: {repr(value[0])!r}\n{traceback.format_exc()}" + raise AssertionError(msg) + + +def test_safe_binop(): + # Test checked arithmetic routines + + ops = [ + (operator.add, 1), + (operator.sub, 2), + (operator.mul, 3) + ] + + with exc_iter(ops, INT64_VALUES, INT64_VALUES) as it: + for xop, a, b in it: + pyop, op = xop + c = pyop(a, b) + + if not (INT64_MIN <= c <= INT64_MAX): + assert_raises(OverflowError, mt.extint_safe_binop, a, b, op) + else: + d = mt.extint_safe_binop(a, b, op) + if c != d: + # assert_equal is slow + assert_equal(d, c) + + +def test_to_128(): + with exc_iter(INT64_VALUES) as it: + for a, in it: + b = mt.extint_to_128(a) + if a != b: + assert_equal(b, a) + + +def test_to_64(): + with exc_iter(INT128_VALUES) as it: + for a, in it: + if not (INT64_MIN <= a <= INT64_MAX): + assert_raises(OverflowError, mt.extint_to_64, a) + else: + b = mt.extint_to_64(a) + if a != b: + assert_equal(b, a) + + +def test_mul_64_64(): + with exc_iter(INT64_VALUES, INT64_VALUES) as it: + for a, b in it: + c = a * b + d = mt.extint_mul_64_64(a, b) + if c != d: + assert_equal(d, c) + + +def test_add_128(): + with exc_iter(INT128_VALUES, INT128_VALUES) as it: + for a, b in it: + c = a + b + if not (INT128_MIN <= c <= INT128_MAX): + assert_raises(OverflowError, mt.extint_add_128, a, b) + else: + d = mt.extint_add_128(a, b) + if c != d: + assert_equal(d, c) + + +def test_sub_128(): + with exc_iter(INT128_VALUES, INT128_VALUES) as it: + for a, b in it: + c = a - b + if not (INT128_MIN <= c <= INT128_MAX): + assert_raises(OverflowError, mt.extint_sub_128, a, b) + else: + d = mt.extint_sub_128(a, b) + if c != d: + assert_equal(d, c) + + +def test_neg_128(): + with exc_iter(INT128_VALUES) as it: + for a, in it: + b = -a + c = mt.extint_neg_128(a) + if b != c: + assert_equal(c, b) + + +def test_shl_128(): + with exc_iter(INT128_VALUES) as it: + for a, in it: + if a < 0: + b = -(((-a) << 1) & (2**128 - 1)) + else: + b = (a << 1) & (2**128 - 1) + c = mt.extint_shl_128(a) + if b != c: + assert_equal(c, b) + + +def test_shr_128(): + with exc_iter(INT128_VALUES) as it: + for a, in it: + if a < 0: + b = -((-a) >> 1) + else: + b = a >> 1 + c = mt.extint_shr_128(a) + if b != c: + assert_equal(c, b) + + +def test_gt_128(): + with exc_iter(INT128_VALUES, INT128_VALUES) as it: + for a, b in it: + c = a > b + d = mt.extint_gt_128(a, b) + if c != d: + assert_equal(d, c) + + +@pytest.mark.slow +def test_divmod_128_64(): + with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it: + for a, b in it: + if a >= 0: + c, cr = divmod(a, b) + else: + c, cr = divmod(-a, b) + c = -c + cr = -cr + + d, dr = mt.extint_divmod_128_64(a, b) + + if c != d or d != dr or b * d + dr != a: + assert_equal(d, c) + assert_equal(dr, cr) + assert_equal(b * d + dr, a) + + +def test_floordiv_128_64(): + with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it: + for a, b in it: + c = a // b + d = mt.extint_floordiv_128_64(a, b) + + if c != d: + assert_equal(d, c) + + +def test_ceildiv_128_64(): + with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it: + for a, b in it: + c = (a + b - 1) // b + d = mt.extint_ceildiv_128_64(a, b) + + if c != d: + assert_equal(d, c) diff --git a/python/numpy/_core/tests/test_function_base.py b/python/numpy/_core/tests/test_function_base.py new file mode 100644 index 000000000..c925cf1f7 --- /dev/null +++ b/python/numpy/_core/tests/test_function_base.py @@ -0,0 +1,503 @@ +import platform +import sys + +import pytest + +import numpy as np +from numpy import ( + arange, + array, + dtype, + errstate, + geomspace, + isnan, + linspace, + logspace, + ndarray, + nextafter, + sqrt, + stack, +) +from numpy._core import sctypes +from numpy._core.function_base import add_newdoc +from numpy.testing import ( + IS_PYPY, + assert_, + assert_allclose, + assert_array_equal, + assert_equal, + assert_raises, +) + + +def _is_armhf(): + # Check if the current platform is ARMHF (32-bit ARM architecture) + return platform.machine().startswith('arm') and platform.architecture()[0] == '32bit' + +class PhysicalQuantity(float): + def __new__(cls, value): + return float.__new__(cls, value) + + def __add__(self, x): + assert_(isinstance(x, PhysicalQuantity)) + return PhysicalQuantity(float(x) + float(self)) + __radd__ = __add__ + + def __sub__(self, x): + assert_(isinstance(x, PhysicalQuantity)) + return PhysicalQuantity(float(self) - float(x)) + + def __rsub__(self, x): + assert_(isinstance(x, PhysicalQuantity)) + return PhysicalQuantity(float(x) - float(self)) + + def __mul__(self, x): + return PhysicalQuantity(float(x) * float(self)) + __rmul__ = __mul__ + + def __truediv__(self, x): + return PhysicalQuantity(float(self) / float(x)) + + def __rtruediv__(self, x): + return PhysicalQuantity(float(x) / float(self)) + + +class PhysicalQuantity2(ndarray): + __array_priority__ = 10 + + +class TestLogspace: + + def test_basic(self): + y = logspace(0, 6) + assert_(len(y) == 50) + y = logspace(0, 6, num=100) + assert_(y[-1] == 10 ** 6) + y = logspace(0, 6, endpoint=False) + assert_(y[-1] < 10 ** 6) + y = logspace(0, 6, num=7) + assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6]) + + def test_start_stop_array(self): + start = array([0., 1.]) + stop = array([6., 7.]) + t1 = logspace(start, stop, 6) + t2 = stack([logspace(_start, _stop, 6) + for _start, _stop in zip(start, stop)], axis=1) + assert_equal(t1, t2) + t3 = logspace(start, stop[0], 6) + t4 = stack([logspace(_start, stop[0], 6) + for _start in start], axis=1) + assert_equal(t3, t4) + t5 = logspace(start, stop, 6, axis=-1) + assert_equal(t5, t2.T) + + @pytest.mark.parametrize("axis", [0, 1, -1]) + def test_base_array(self, axis: int): + start = 1 + stop = 2 + num = 6 + base = array([1, 2]) + t1 = logspace(start, stop, num=num, base=base, axis=axis) + t2 = stack( + [logspace(start, stop, num=num, base=_base) for _base in base], + axis=(axis + 1) % t1.ndim, + ) + assert_equal(t1, t2) + + @pytest.mark.parametrize("axis", [0, 1, -1]) + def test_stop_base_array(self, axis: int): + start = 1 + stop = array([2, 3]) + num = 6 + base = array([1, 2]) + t1 = logspace(start, stop, num=num, base=base, axis=axis) + t2 = stack( + [logspace(start, _stop, num=num, base=_base) + for _stop, _base in zip(stop, base)], + axis=(axis + 1) % t1.ndim, + ) + assert_equal(t1, t2) + + def test_dtype(self): + y = logspace(0, 6, dtype='float32') + assert_equal(y.dtype, dtype('float32')) + y = logspace(0, 6, dtype='float64') + assert_equal(y.dtype, dtype('float64')) + y = logspace(0, 6, dtype='int32') + assert_equal(y.dtype, dtype('int32')) + + def test_physical_quantities(self): + a = PhysicalQuantity(1.0) + b = PhysicalQuantity(5.0) + assert_equal(logspace(a, b), logspace(1.0, 5.0)) + + def test_subclass(self): + a = array(1).view(PhysicalQuantity2) + b = array(7).view(PhysicalQuantity2) + ls = logspace(a, b) + assert type(ls) is PhysicalQuantity2 + assert_equal(ls, logspace(1.0, 7.0)) + ls = logspace(a, b, 1) + assert type(ls) is PhysicalQuantity2 + assert_equal(ls, logspace(1.0, 7.0, 1)) + + +class TestGeomspace: + + def test_basic(self): + y = geomspace(1, 1e6) + assert_(len(y) == 50) + y = geomspace(1, 1e6, num=100) + assert_(y[-1] == 10 ** 6) + y = geomspace(1, 1e6, endpoint=False) + assert_(y[-1] < 10 ** 6) + y = geomspace(1, 1e6, num=7) + assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6]) + + y = geomspace(8, 2, num=3) + assert_allclose(y, [8, 4, 2]) + assert_array_equal(y.imag, 0) + + y = geomspace(-1, -100, num=3) + assert_array_equal(y, [-1, -10, -100]) + assert_array_equal(y.imag, 0) + + y = geomspace(-100, -1, num=3) + assert_array_equal(y, [-100, -10, -1]) + assert_array_equal(y.imag, 0) + + def test_boundaries_match_start_and_stop_exactly(self): + # make sure that the boundaries of the returned array exactly + # equal 'start' and 'stop' - this isn't obvious because + # np.exp(np.log(x)) isn't necessarily exactly equal to x + start = 0.3 + stop = 20.3 + + y = geomspace(start, stop, num=1) + assert_equal(y[0], start) + + y = geomspace(start, stop, num=1, endpoint=False) + assert_equal(y[0], start) + + y = geomspace(start, stop, num=3) + assert_equal(y[0], start) + assert_equal(y[-1], stop) + + y = geomspace(start, stop, num=3, endpoint=False) + assert_equal(y[0], start) + + def test_nan_interior(self): + with errstate(invalid='ignore'): + y = geomspace(-3, 3, num=4) + + assert_equal(y[0], -3.0) + assert_(isnan(y[1:-1]).all()) + assert_equal(y[3], 3.0) + + with errstate(invalid='ignore'): + y = geomspace(-3, 3, num=4, endpoint=False) + + assert_equal(y[0], -3.0) + assert_(isnan(y[1:]).all()) + + def test_complex(self): + # Purely imaginary + y = geomspace(1j, 16j, num=5) + assert_allclose(y, [1j, 2j, 4j, 8j, 16j]) + assert_array_equal(y.real, 0) + + y = geomspace(-4j, -324j, num=5) + assert_allclose(y, [-4j, -12j, -36j, -108j, -324j]) + assert_array_equal(y.real, 0) + + y = geomspace(1 + 1j, 1000 + 1000j, num=4) + assert_allclose(y, [1 + 1j, 10 + 10j, 100 + 100j, 1000 + 1000j]) + + y = geomspace(-1 + 1j, -1000 + 1000j, num=4) + assert_allclose(y, [-1 + 1j, -10 + 10j, -100 + 100j, -1000 + 1000j]) + + # Logarithmic spirals + y = geomspace(-1, 1, num=3, dtype=complex) + assert_allclose(y, [-1, 1j, +1]) + + y = geomspace(0 + 3j, -3 + 0j, 3) + assert_allclose(y, [0 + 3j, -3 / sqrt(2) + 3j / sqrt(2), -3 + 0j]) + y = geomspace(0 + 3j, 3 + 0j, 3) + assert_allclose(y, [0 + 3j, 3 / sqrt(2) + 3j / sqrt(2), 3 + 0j]) + y = geomspace(-3 + 0j, 0 - 3j, 3) + assert_allclose(y, [-3 + 0j, -3 / sqrt(2) - 3j / sqrt(2), 0 - 3j]) + y = geomspace(0 + 3j, -3 + 0j, 3) + assert_allclose(y, [0 + 3j, -3 / sqrt(2) + 3j / sqrt(2), -3 + 0j]) + y = geomspace(-2 - 3j, 5 + 7j, 7) + assert_allclose(y, [-2 - 3j, -0.29058977 - 4.15771027j, + 2.08885354 - 4.34146838j, 4.58345529 - 3.16355218j, + 6.41401745 - 0.55233457j, 6.75707386 + 3.11795092j, + 5 + 7j]) + + # Type promotion should prevent the -5 from becoming a NaN + y = geomspace(3j, -5, 2) + assert_allclose(y, [3j, -5]) + y = geomspace(-5, 3j, 2) + assert_allclose(y, [-5, 3j]) + + def test_complex_shortest_path(self): + # test the shortest logarithmic spiral is used, see gh-25644 + x = 1.2 + 3.4j + y = np.exp(1j * (np.pi - .1)) * x + z = np.geomspace(x, y, 5) + expected = np.array([1.2 + 3.4j, -1.47384 + 3.2905616j, + -3.33577588 + 1.36842949j, -3.36011056 - 1.30753855j, + -1.53343861 - 3.26321406j]) + np.testing.assert_array_almost_equal(z, expected) + + def test_dtype(self): + y = geomspace(1, 1e6, dtype='float32') + assert_equal(y.dtype, dtype('float32')) + y = geomspace(1, 1e6, dtype='float64') + assert_equal(y.dtype, dtype('float64')) + y = geomspace(1, 1e6, dtype='int32') + assert_equal(y.dtype, dtype('int32')) + + # Native types + y = geomspace(1, 1e6, dtype=float) + assert_equal(y.dtype, dtype('float64')) + y = geomspace(1, 1e6, dtype=complex) + assert_equal(y.dtype, dtype('complex128')) + + def test_start_stop_array_scalar(self): + lim1 = array([120, 100], dtype="int8") + lim2 = array([-120, -100], dtype="int8") + lim3 = array([1200, 1000], dtype="uint16") + t1 = geomspace(lim1[0], lim1[1], 5) + t2 = geomspace(lim2[0], lim2[1], 5) + t3 = geomspace(lim3[0], lim3[1], 5) + t4 = geomspace(120.0, 100.0, 5) + t5 = geomspace(-120.0, -100.0, 5) + t6 = geomspace(1200.0, 1000.0, 5) + + # t3 uses float32, t6 uses float64 + assert_allclose(t1, t4, rtol=1e-2) + assert_allclose(t2, t5, rtol=1e-2) + assert_allclose(t3, t6, rtol=1e-5) + + def test_start_stop_array(self): + # Try to use all special cases. + start = array([1.e0, 32., 1j, -4j, 1 + 1j, -1]) + stop = array([1.e4, 2., 16j, -324j, 10000 + 10000j, 1]) + t1 = geomspace(start, stop, 5) + t2 = stack([geomspace(_start, _stop, 5) + for _start, _stop in zip(start, stop)], axis=1) + assert_equal(t1, t2) + t3 = geomspace(start, stop[0], 5) + t4 = stack([geomspace(_start, stop[0], 5) + for _start in start], axis=1) + assert_equal(t3, t4) + t5 = geomspace(start, stop, 5, axis=-1) + assert_equal(t5, t2.T) + + def test_physical_quantities(self): + a = PhysicalQuantity(1.0) + b = PhysicalQuantity(5.0) + assert_equal(geomspace(a, b), geomspace(1.0, 5.0)) + + def test_subclass(self): + a = array(1).view(PhysicalQuantity2) + b = array(7).view(PhysicalQuantity2) + gs = geomspace(a, b) + assert type(gs) is PhysicalQuantity2 + assert_equal(gs, geomspace(1.0, 7.0)) + gs = geomspace(a, b, 1) + assert type(gs) is PhysicalQuantity2 + assert_equal(gs, geomspace(1.0, 7.0, 1)) + + def test_bounds(self): + assert_raises(ValueError, geomspace, 0, 10) + assert_raises(ValueError, geomspace, 10, 0) + assert_raises(ValueError, geomspace, 0, 0) + + +class TestLinspace: + + def test_basic(self): + y = linspace(0, 10) + assert_(len(y) == 50) + y = linspace(2, 10, num=100) + assert_(y[-1] == 10) + y = linspace(2, 10, endpoint=False) + assert_(y[-1] < 10) + assert_raises(ValueError, linspace, 0, 10, num=-1) + + def test_corner(self): + y = list(linspace(0, 1, 1)) + assert_(y == [0.0], y) + assert_raises(TypeError, linspace, 0, 1, num=2.5) + + def test_type(self): + t1 = linspace(0, 1, 0).dtype + t2 = linspace(0, 1, 1).dtype + t3 = linspace(0, 1, 2).dtype + assert_equal(t1, t2) + assert_equal(t2, t3) + + def test_dtype(self): + y = linspace(0, 6, dtype='float32') + assert_equal(y.dtype, dtype('float32')) + y = linspace(0, 6, dtype='float64') + assert_equal(y.dtype, dtype('float64')) + y = linspace(0, 6, dtype='int32') + assert_equal(y.dtype, dtype('int32')) + + def test_start_stop_array_scalar(self): + lim1 = array([-120, 100], dtype="int8") + lim2 = array([120, -100], dtype="int8") + lim3 = array([1200, 1000], dtype="uint16") + t1 = linspace(lim1[0], lim1[1], 5) + t2 = linspace(lim2[0], lim2[1], 5) + t3 = linspace(lim3[0], lim3[1], 5) + t4 = linspace(-120.0, 100.0, 5) + t5 = linspace(120.0, -100.0, 5) + t6 = linspace(1200.0, 1000.0, 5) + assert_equal(t1, t4) + assert_equal(t2, t5) + assert_equal(t3, t6) + + def test_start_stop_array(self): + start = array([-120, 120], dtype="int8") + stop = array([100, -100], dtype="int8") + t1 = linspace(start, stop, 5) + t2 = stack([linspace(_start, _stop, 5) + for _start, _stop in zip(start, stop)], axis=1) + assert_equal(t1, t2) + t3 = linspace(start, stop[0], 5) + t4 = stack([linspace(_start, stop[0], 5) + for _start in start], axis=1) + assert_equal(t3, t4) + t5 = linspace(start, stop, 5, axis=-1) + assert_equal(t5, t2.T) + + def test_complex(self): + lim1 = linspace(1 + 2j, 3 + 4j, 5) + t1 = array([1.0 + 2.j, 1.5 + 2.5j, 2.0 + 3j, 2.5 + 3.5j, 3.0 + 4j]) + lim2 = linspace(1j, 10, 5) + t2 = array([0.0 + 1.j, 2.5 + 0.75j, 5.0 + 0.5j, 7.5 + 0.25j, 10.0 + 0j]) + assert_equal(lim1, t1) + assert_equal(lim2, t2) + + def test_physical_quantities(self): + a = PhysicalQuantity(0.0) + b = PhysicalQuantity(1.0) + assert_equal(linspace(a, b), linspace(0.0, 1.0)) + + def test_subclass(self): + a = array(0).view(PhysicalQuantity2) + b = array(1).view(PhysicalQuantity2) + ls = linspace(a, b) + assert type(ls) is PhysicalQuantity2 + assert_equal(ls, linspace(0.0, 1.0)) + ls = linspace(a, b, 1) + assert type(ls) is PhysicalQuantity2 + assert_equal(ls, linspace(0.0, 1.0, 1)) + + def test_array_interface(self): + # Regression test for https://github.com/numpy/numpy/pull/6659 + # Ensure that start/stop can be objects that implement + # __array_interface__ and are convertible to numeric scalars + + class Arrayish: + """ + A generic object that supports the __array_interface__ and hence + can in principle be converted to a numeric scalar, but is not + otherwise recognized as numeric, but also happens to support + multiplication by floats. + + Data should be an object that implements the buffer interface, + and contains at least 4 bytes. + """ + + def __init__(self, data): + self._data = data + + @property + def __array_interface__(self): + return {'shape': (), 'typestr': ' 300) + assert_(len(np.lib._index_tricks_impl.mgrid.__doc__) > 300) + + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + def test_errors_are_ignored(self): + prev_doc = np._core.flatiter.index.__doc__ + # nothing changed, but error ignored, this should probably + # give a warning (or even error) in the future. + add_newdoc("numpy._core", "flatiter", ("index", "bad docstring")) + assert prev_doc == np._core.flatiter.index.__doc__ diff --git a/python/numpy/_core/tests/test_getlimits.py b/python/numpy/_core/tests/test_getlimits.py new file mode 100644 index 000000000..721c6ac6c --- /dev/null +++ b/python/numpy/_core/tests/test_getlimits.py @@ -0,0 +1,205 @@ +""" Test functions for limits module. + +""" +import types +import warnings + +import pytest + +import numpy as np +from numpy import double, half, longdouble, single +from numpy._core import finfo, iinfo +from numpy._core.getlimits import _discovered_machar, _float_ma +from numpy.testing import assert_, assert_equal, assert_raises + +################################################## + +class TestPythonFloat: + def test_singleton(self): + ftype = finfo(float) + ftype2 = finfo(float) + assert_equal(id(ftype), id(ftype2)) + +class TestHalf: + def test_singleton(self): + ftype = finfo(half) + ftype2 = finfo(half) + assert_equal(id(ftype), id(ftype2)) + +class TestSingle: + def test_singleton(self): + ftype = finfo(single) + ftype2 = finfo(single) + assert_equal(id(ftype), id(ftype2)) + +class TestDouble: + def test_singleton(self): + ftype = finfo(double) + ftype2 = finfo(double) + assert_equal(id(ftype), id(ftype2)) + +class TestLongdouble: + def test_singleton(self): + ftype = finfo(longdouble) + ftype2 = finfo(longdouble) + assert_equal(id(ftype), id(ftype2)) + +def assert_finfo_equal(f1, f2): + # assert two finfo instances have the same attributes + for attr in ('bits', 'eps', 'epsneg', 'iexp', 'machep', + 'max', 'maxexp', 'min', 'minexp', 'negep', 'nexp', + 'nmant', 'precision', 'resolution', 'tiny', + 'smallest_normal', 'smallest_subnormal'): + assert_equal(getattr(f1, attr), getattr(f2, attr), + f'finfo instances {f1} and {f2} differ on {attr}') + +def assert_iinfo_equal(i1, i2): + # assert two iinfo instances have the same attributes + for attr in ('bits', 'min', 'max'): + assert_equal(getattr(i1, attr), getattr(i2, attr), + f'iinfo instances {i1} and {i2} differ on {attr}') + +class TestFinfo: + def test_basic(self): + dts = list(zip(['f2', 'f4', 'f8', 'c8', 'c16'], + [np.float16, np.float32, np.float64, np.complex64, + np.complex128])) + for dt1, dt2 in dts: + assert_finfo_equal(finfo(dt1), finfo(dt2)) + + assert_raises(ValueError, finfo, 'i4') + + def test_regression_gh23108(self): + # np.float32(1.0) and np.float64(1.0) have the same hash and are + # equal under the == operator + f1 = np.finfo(np.float32(1.0)) + f2 = np.finfo(np.float64(1.0)) + assert f1 != f2 + + def test_regression_gh23867(self): + class NonHashableWithDtype: + __hash__ = None + dtype = np.dtype('float32') + + x = NonHashableWithDtype() + assert np.finfo(x) == np.finfo(x.dtype) + + +class TestIinfo: + def test_basic(self): + dts = list(zip(['i1', 'i2', 'i4', 'i8', + 'u1', 'u2', 'u4', 'u8'], + [np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64])) + for dt1, dt2 in dts: + assert_iinfo_equal(iinfo(dt1), iinfo(dt2)) + + assert_raises(ValueError, iinfo, 'f4') + + def test_unsigned_max(self): + types = np._core.sctypes['uint'] + for T in types: + with np.errstate(over="ignore"): + max_calculated = T(0) - T(1) + assert_equal(iinfo(T).max, max_calculated) + +class TestRepr: + def test_iinfo_repr(self): + expected = "iinfo(min=-32768, max=32767, dtype=int16)" + assert_equal(repr(np.iinfo(np.int16)), expected) + + def test_finfo_repr(self): + expected = "finfo(resolution=1e-06, min=-3.4028235e+38,"\ + " max=3.4028235e+38, dtype=float32)" + assert_equal(repr(np.finfo(np.float32)), expected) + + +def test_instances(): + # Test the finfo and iinfo results on numeric instances agree with + # the results on the corresponding types + + for c in [int, np.int16, np.int32, np.int64]: + class_iinfo = iinfo(c) + instance_iinfo = iinfo(c(12)) + + assert_iinfo_equal(class_iinfo, instance_iinfo) + + for c in [float, np.float16, np.float32, np.float64]: + class_finfo = finfo(c) + instance_finfo = finfo(c(1.2)) + assert_finfo_equal(class_finfo, instance_finfo) + + with pytest.raises(ValueError): + iinfo(10.) + + with pytest.raises(ValueError): + iinfo('hi') + + with pytest.raises(ValueError): + finfo(np.int64(1)) + + +def assert_ma_equal(discovered, ma_like): + # Check MachAr-like objects same as calculated MachAr instances + for key, value in discovered.__dict__.items(): + assert_equal(value, getattr(ma_like, key)) + if hasattr(value, 'shape'): + assert_equal(value.shape, getattr(ma_like, key).shape) + assert_equal(value.dtype, getattr(ma_like, key).dtype) + + +def test_known_types(): + # Test we are correctly compiling parameters for known types + for ftype, ma_like in ((np.float16, _float_ma[16]), + (np.float32, _float_ma[32]), + (np.float64, _float_ma[64])): + assert_ma_equal(_discovered_machar(ftype), ma_like) + # Suppress warning for broken discovery of double double on PPC + with np.errstate(all='ignore'): + ld_ma = _discovered_machar(np.longdouble) + bytes = np.dtype(np.longdouble).itemsize + if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16): + # 80-bit extended precision + assert_ma_equal(ld_ma, _float_ma[80]) + elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16: + # IEE 754 128-bit + assert_ma_equal(ld_ma, _float_ma[128]) + + +def test_subnormal_warning(): + """Test that the subnormal is zero warning is not being raised.""" + with np.errstate(all='ignore'): + ld_ma = _discovered_machar(np.longdouble) + bytes = np.dtype(np.longdouble).itemsize + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16): + # 80-bit extended precision + ld_ma.smallest_subnormal + assert len(w) == 0 + elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16: + # IEE 754 128-bit + ld_ma.smallest_subnormal + assert len(w) == 0 + else: + # Double double + ld_ma.smallest_subnormal + # This test may fail on some platforms + assert len(w) == 0 + + +def test_plausible_finfo(): + # Assert that finfo returns reasonable results for all types + for ftype in np._core.sctypes['float'] + np._core.sctypes['complex']: + info = np.finfo(ftype) + assert_(info.nmant > 1) + assert_(info.minexp < -1) + assert_(info.maxexp > 1) + + +class TestRuntimeSubscriptable: + def test_finfo_generic(self): + assert isinstance(np.finfo[np.float64], types.GenericAlias) + + def test_iinfo_generic(self): + assert isinstance(np.iinfo[np.int_], types.GenericAlias) diff --git a/python/numpy/_core/tests/test_half.py b/python/numpy/_core/tests/test_half.py new file mode 100644 index 000000000..68f17b2a5 --- /dev/null +++ b/python/numpy/_core/tests/test_half.py @@ -0,0 +1,568 @@ +import platform + +import pytest + +import numpy as np +from numpy import float16, float32, float64, uint16 +from numpy.testing import IS_WASM, assert_, assert_equal + + +def assert_raises_fpe(strmatch, callable, *args, **kwargs): + try: + callable(*args, **kwargs) + except FloatingPointError as exc: + assert_(str(exc).find(strmatch) >= 0, + f"Did not raise floating point {strmatch} error") + else: + assert_(False, + f"Did not raise floating point {strmatch} error") + +class TestHalf: + def setup_method(self): + # An array of all possible float16 values + self.all_f16 = np.arange(0x10000, dtype=uint16) + self.all_f16.dtype = float16 + + # NaN value can cause an invalid FP exception if HW is being used + with np.errstate(invalid='ignore'): + self.all_f32 = np.array(self.all_f16, dtype=float32) + self.all_f64 = np.array(self.all_f16, dtype=float64) + + # An array of all non-NaN float16 values, in sorted order + self.nonan_f16 = np.concatenate( + (np.arange(0xfc00, 0x7fff, -1, dtype=uint16), + np.arange(0x0000, 0x7c01, 1, dtype=uint16))) + self.nonan_f16.dtype = float16 + self.nonan_f32 = np.array(self.nonan_f16, dtype=float32) + self.nonan_f64 = np.array(self.nonan_f16, dtype=float64) + + # An array of all finite float16 values, in sorted order + self.finite_f16 = self.nonan_f16[1:-1] + self.finite_f32 = self.nonan_f32[1:-1] + self.finite_f64 = self.nonan_f64[1:-1] + + def test_half_conversions(self): + """Checks that all 16-bit values survive conversion + to/from 32-bit and 64-bit float""" + # Because the underlying routines preserve the NaN bits, every + # value is preserved when converting to/from other floats. + + # Convert from float32 back to float16 + with np.errstate(invalid='ignore'): + b = np.array(self.all_f32, dtype=float16) + # avoid testing NaNs due to differing bit patterns in Q/S NaNs + b_nn = b == b + assert_equal(self.all_f16[b_nn].view(dtype=uint16), + b[b_nn].view(dtype=uint16)) + + # Convert from float64 back to float16 + with np.errstate(invalid='ignore'): + b = np.array(self.all_f64, dtype=float16) + b_nn = b == b + assert_equal(self.all_f16[b_nn].view(dtype=uint16), + b[b_nn].view(dtype=uint16)) + + # Convert float16 to longdouble and back + # This doesn't necessarily preserve the extra NaN bits, + # so exclude NaNs. + a_ld = np.array(self.nonan_f16, dtype=np.longdouble) + b = np.array(a_ld, dtype=float16) + assert_equal(self.nonan_f16.view(dtype=uint16), + b.view(dtype=uint16)) + + # Check the range for which all integers can be represented + i_int = np.arange(-2048, 2049) + i_f16 = np.array(i_int, dtype=float16) + j = np.array(i_f16, dtype=int) + assert_equal(i_int, j) + + @pytest.mark.parametrize("string_dt", ["S", "U"]) + def test_half_conversion_to_string(self, string_dt): + # Currently uses S/U32 (which is sufficient for float32) + expected_dt = np.dtype(f"{string_dt}32") + assert np.promote_types(np.float16, string_dt) == expected_dt + assert np.promote_types(string_dt, np.float16) == expected_dt + + arr = np.ones(3, dtype=np.float16).astype(string_dt) + assert arr.dtype == expected_dt + + @pytest.mark.parametrize("string_dt", ["S", "U"]) + def test_half_conversion_from_string(self, string_dt): + string = np.array("3.1416", dtype=string_dt) + assert string.astype(np.float16) == np.array(3.1416, dtype=np.float16) + + @pytest.mark.parametrize("offset", [None, "up", "down"]) + @pytest.mark.parametrize("shift", [None, "up", "down"]) + @pytest.mark.parametrize("float_t", [np.float32, np.float64]) + def test_half_conversion_rounding(self, float_t, shift, offset): + # Assumes that round to even is used during casting. + max_pattern = np.float16(np.finfo(np.float16).max).view(np.uint16) + + # Test all (positive) finite numbers, denormals are most interesting + # however: + f16s_patterns = np.arange(0, max_pattern + 1, dtype=np.uint16) + f16s_float = f16s_patterns.view(np.float16).astype(float_t) + + # Shift the values by half a bit up or a down (or do not shift), + if shift == "up": + f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[1:] + elif shift == "down": + f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[:-1] + else: + f16s_float = f16s_float[1:-1] + + # Increase the float by a minimal value: + if offset == "up": + f16s_float = np.nextafter(f16s_float, float_t(np.inf)) + elif offset == "down": + f16s_float = np.nextafter(f16s_float, float_t(-np.inf)) + + # Convert back to float16 and its bit pattern: + res_patterns = f16s_float.astype(np.float16).view(np.uint16) + + # The above calculation tries the original values, or the exact + # midpoints between the float16 values. It then further offsets them + # by as little as possible. If no offset occurs, "round to even" + # logic will be necessary, an arbitrarily small offset should cause + # normal up/down rounding always. + + # Calculate the expected pattern: + cmp_patterns = f16s_patterns[1:-1].copy() + + if shift == "down" and offset != "up": + shift_pattern = -1 + elif shift == "up" and offset != "down": + shift_pattern = 1 + else: + # There cannot be a shift, either shift is None, so all rounding + # will go back to original, or shift is reduced by offset too much. + shift_pattern = 0 + + # If rounding occurs, is it normal rounding or round to even? + if offset is None: + # Round to even occurs, modify only non-even, cast to allow + (-1) + cmp_patterns[0::2].view(np.int16)[...] += shift_pattern + else: + cmp_patterns.view(np.int16)[...] += shift_pattern + + assert_equal(res_patterns, cmp_patterns) + + @pytest.mark.parametrize(["float_t", "uint_t", "bits"], + [(np.float32, np.uint32, 23), + (np.float64, np.uint64, 52)]) + def test_half_conversion_denormal_round_even(self, float_t, uint_t, bits): + # Test specifically that all bits are considered when deciding + # whether round to even should occur (i.e. no bits are lost at the + # end. Compare also gh-12721. The most bits can get lost for the + # smallest denormal: + smallest_value = np.uint16(1).view(np.float16).astype(float_t) + assert smallest_value == 2**-24 + + # Will be rounded to zero based on round to even rule: + rounded_to_zero = smallest_value / float_t(2) + assert rounded_to_zero.astype(np.float16) == 0 + + # The significand will be all 0 for the float_t, test that we do not + # lose the lower ones of these: + for i in range(bits): + # slightly increasing the value should make it round up: + larger_pattern = rounded_to_zero.view(uint_t) | uint_t(1 << i) + larger_value = larger_pattern.view(float_t) + assert larger_value.astype(np.float16) == smallest_value + + def test_nans_infs(self): + with np.errstate(all='ignore'): + # Check some of the ufuncs + assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32)) + assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32)) + assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32)) + assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32)) + assert_equal(np.spacing(float16(65504)), np.inf) + + # Check comparisons of all values with NaN + nan = float16(np.nan) + + assert_(not (self.all_f16 == nan).any()) + assert_(not (nan == self.all_f16).any()) + + assert_((self.all_f16 != nan).all()) + assert_((nan != self.all_f16).all()) + + assert_(not (self.all_f16 < nan).any()) + assert_(not (nan < self.all_f16).any()) + + assert_(not (self.all_f16 <= nan).any()) + assert_(not (nan <= self.all_f16).any()) + + assert_(not (self.all_f16 > nan).any()) + assert_(not (nan > self.all_f16).any()) + + assert_(not (self.all_f16 >= nan).any()) + assert_(not (nan >= self.all_f16).any()) + + def test_half_values(self): + """Confirms a small number of known half values""" + a = np.array([1.0, -1.0, + 2.0, -2.0, + 0.0999755859375, 0.333251953125, # 1/10, 1/3 + 65504, -65504, # Maximum magnitude + 2.0**(-14), -2.0**(-14), # Minimum normal + 2.0**(-24), -2.0**(-24), # Minimum subnormal + 0, -1 / 1e1000, # Signed zeros + np.inf, -np.inf]) + b = np.array([0x3c00, 0xbc00, + 0x4000, 0xc000, + 0x2e66, 0x3555, + 0x7bff, 0xfbff, + 0x0400, 0x8400, + 0x0001, 0x8001, + 0x0000, 0x8000, + 0x7c00, 0xfc00], dtype=uint16) + b.dtype = float16 + assert_equal(a, b) + + def test_half_rounding(self): + """Checks that rounding when converting to half is correct""" + a = np.array([2.0**-25 + 2.0**-35, # Rounds to minimum subnormal + 2.0**-25, # Underflows to zero (nearest even mode) + 2.0**-26, # Underflows to zero + 1.0 + 2.0**-11 + 2.0**-16, # rounds to 1.0+2**(-10) + 1.0 + 2.0**-11, # rounds to 1.0 (nearest even mode) + 1.0 + 2.0**-12, # rounds to 1.0 + 65519, # rounds to 65504 + 65520], # rounds to inf + dtype=float64) + rounded = [2.0**-24, + 0.0, + 0.0, + 1.0 + 2.0**(-10), + 1.0, + 1.0, + 65504, + np.inf] + + # Check float64->float16 rounding + with np.errstate(over="ignore"): + b = np.array(a, dtype=float16) + assert_equal(b, rounded) + + # Check float32->float16 rounding + a = np.array(a, dtype=float32) + with np.errstate(over="ignore"): + b = np.array(a, dtype=float16) + assert_equal(b, rounded) + + def test_half_correctness(self): + """Take every finite float16, and check the casting functions with + a manual conversion.""" + + # Create an array of all finite float16s + a_bits = self.finite_f16.view(dtype=uint16) + + # Convert to 64-bit float manually + a_sgn = (-1.0)**((a_bits & 0x8000) >> 15) + a_exp = np.array((a_bits & 0x7c00) >> 10, dtype=np.int32) - 15 + a_man = (a_bits & 0x03ff) * 2.0**(-10) + # Implicit bit of normalized floats + a_man[a_exp != -15] += 1 + # Denormalized exponent is -14 + a_exp[a_exp == -15] = -14 + + a_manual = a_sgn * a_man * 2.0**a_exp + + a32_fail = np.nonzero(self.finite_f32 != a_manual)[0] + if len(a32_fail) != 0: + bad_index = a32_fail[0] + assert_equal(self.finite_f32, a_manual, + "First non-equal is half value 0x%x -> %g != %g" % + (a_bits[bad_index], + self.finite_f32[bad_index], + a_manual[bad_index])) + + a64_fail = np.nonzero(self.finite_f64 != a_manual)[0] + if len(a64_fail) != 0: + bad_index = a64_fail[0] + assert_equal(self.finite_f64, a_manual, + "First non-equal is half value 0x%x -> %g != %g" % + (a_bits[bad_index], + self.finite_f64[bad_index], + a_manual[bad_index])) + + def test_half_ordering(self): + """Make sure comparisons are working right""" + + # All non-NaN float16 values in reverse order + a = self.nonan_f16[::-1].copy() + + # 32-bit float copy + b = np.array(a, dtype=float32) + + # Should sort the same + a.sort() + b.sort() + assert_equal(a, b) + + # Comparisons should work + assert_((a[:-1] <= a[1:]).all()) + assert_(not (a[:-1] > a[1:]).any()) + assert_((a[1:] >= a[:-1]).all()) + assert_(not (a[1:] < a[:-1]).any()) + # All != except for +/-0 + assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size - 2) + assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size - 2) + + def test_half_funcs(self): + """Test the various ArrFuncs""" + + # fill + assert_equal(np.arange(10, dtype=float16), + np.arange(10, dtype=float32)) + + # fillwithscalar + a = np.zeros((5,), dtype=float16) + a.fill(1) + assert_equal(a, np.ones((5,), dtype=float16)) + + # nonzero and copyswap + a = np.array([0, 0, -1, -1 / 1e20, 0, 2.0**-24, 7.629e-6], dtype=float16) + assert_equal(a.nonzero()[0], + [2, 5, 6]) + a = a.byteswap() + a = a.view(a.dtype.newbyteorder()) + assert_equal(a.nonzero()[0], + [2, 5, 6]) + + # dot + a = np.arange(0, 10, 0.5, dtype=float16) + b = np.ones((20,), dtype=float16) + assert_equal(np.dot(a, b), + 95) + + # argmax + a = np.array([0, -np.inf, -2, 0.5, 12.55, 7.3, 2.1, 12.4], dtype=float16) + assert_equal(a.argmax(), + 4) + a = np.array([0, -np.inf, -2, np.inf, 12.55, np.nan, 2.1, 12.4], dtype=float16) + assert_equal(a.argmax(), + 5) + + # getitem + a = np.arange(10, dtype=float16) + for i in range(10): + assert_equal(a.item(i), i) + + def test_spacing_nextafter(self): + """Test np.spacing and np.nextafter""" + # All non-negative finite #'s + a = np.arange(0x7c00, dtype=uint16) + hinf = np.array((np.inf,), dtype=float16) + hnan = np.array((np.nan,), dtype=float16) + a_f16 = a.view(dtype=float16) + + assert_equal(np.spacing(a_f16[:-1]), a_f16[1:] - a_f16[:-1]) + + assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:]) + assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1]) + assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1]) + + assert_equal(np.nextafter(hinf, a_f16), a_f16[-1]) + assert_equal(np.nextafter(-hinf, a_f16), -a_f16[-1]) + + assert_equal(np.nextafter(hinf, hinf), hinf) + assert_equal(np.nextafter(hinf, -hinf), a_f16[-1]) + assert_equal(np.nextafter(-hinf, hinf), -a_f16[-1]) + assert_equal(np.nextafter(-hinf, -hinf), -hinf) + + assert_equal(np.nextafter(a_f16, hnan), hnan[0]) + assert_equal(np.nextafter(hnan, a_f16), hnan[0]) + + assert_equal(np.nextafter(hnan, hnan), hnan) + assert_equal(np.nextafter(hinf, hnan), hnan) + assert_equal(np.nextafter(hnan, hinf), hnan) + + # switch to negatives + a |= 0x8000 + + assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1])) + assert_equal(np.spacing(a_f16[1:]), a_f16[:-1] - a_f16[1:]) + + assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1]) + assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1]) + assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:]) + + assert_equal(np.nextafter(hinf, a_f16), -a_f16[-1]) + assert_equal(np.nextafter(-hinf, a_f16), a_f16[-1]) + + assert_equal(np.nextafter(a_f16, hnan), hnan[0]) + assert_equal(np.nextafter(hnan, a_f16), hnan[0]) + + def test_half_ufuncs(self): + """Test the various ufuncs""" + + a = np.array([0, 1, 2, 4, 2], dtype=float16) + b = np.array([-2, 5, 1, 4, 3], dtype=float16) + c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16) + + assert_equal(np.add(a, b), [-2, 6, 3, 8, 5]) + assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1]) + assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6]) + assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625]) + + assert_equal(np.equal(a, b), [False, False, False, True, False]) + assert_equal(np.not_equal(a, b), [True, True, True, False, True]) + assert_equal(np.less(a, b), [False, True, False, False, True]) + assert_equal(np.less_equal(a, b), [False, True, False, True, True]) + assert_equal(np.greater(a, b), [True, False, True, False, False]) + assert_equal(np.greater_equal(a, b), [True, False, True, True, False]) + assert_equal(np.logical_and(a, b), [False, True, True, True, True]) + assert_equal(np.logical_or(a, b), [True, True, True, True, True]) + assert_equal(np.logical_xor(a, b), [True, False, False, False, False]) + assert_equal(np.logical_not(a), [True, False, False, False, False]) + + assert_equal(np.isnan(c), [False, False, False, True, False]) + assert_equal(np.isinf(c), [False, False, True, False, False]) + assert_equal(np.isfinite(c), [True, True, False, False, True]) + assert_equal(np.signbit(b), [True, False, False, False, False]) + + assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3]) + + assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3]) + + x = np.maximum(b, c) + assert_(np.isnan(x[3])) + x[3] = 0 + assert_equal(x, [0, 5, 1, 0, 6]) + + assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2]) + + x = np.minimum(b, c) + assert_(np.isnan(x[3])) + x[3] = 0 + assert_equal(x, [-2, -1, -np.inf, 0, 3]) + + assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3]) + assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6]) + assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2]) + assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3]) + + assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0]) + assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2]) + assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2])) + assert_equal(np.square(b), [4, 25, 1, 16, 9]) + assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125]) + assert_equal(np.ones_like(b), [1, 1, 1, 1, 1]) + assert_equal(np.conjugate(b), b) + assert_equal(np.absolute(b), [2, 5, 1, 4, 3]) + assert_equal(np.negative(b), [2, -5, -1, -4, -3]) + assert_equal(np.positive(b), b) + assert_equal(np.sign(b), [-1, 1, 1, 1, 1]) + assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b)) + assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2])) + assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12]) + + def test_half_coercion(self): + """Test that half gets coerced properly with the other types""" + a16 = np.array((1,), dtype=float16) + a32 = np.array((1,), dtype=float32) + b16 = float16(1) + b32 = float32(1) + + assert np.power(a16, 2).dtype == float16 + assert np.power(a16, 2.0).dtype == float16 + assert np.power(a16, b16).dtype == float16 + assert np.power(a16, b32).dtype == float32 + assert np.power(a16, a16).dtype == float16 + assert np.power(a16, a32).dtype == float32 + + assert np.power(b16, 2).dtype == float16 + assert np.power(b16, 2.0).dtype == float16 + assert np.power(b16, b16).dtype, float16 + assert np.power(b16, b32).dtype, float32 + assert np.power(b16, a16).dtype, float16 + assert np.power(b16, a32).dtype, float32 + + assert np.power(a32, a16).dtype == float32 + assert np.power(a32, b16).dtype == float32 + assert np.power(b32, a16).dtype == float32 + assert np.power(b32, b16).dtype == float32 + + @pytest.mark.skipif(platform.machine() == "armv5tel", + reason="See gh-413.") + @pytest.mark.skipif(IS_WASM, + reason="fp exceptions don't work in wasm.") + def test_half_fpe(self): + with np.errstate(all='raise'): + sx16 = np.array((1e-4,), dtype=float16) + bx16 = np.array((1e4,), dtype=float16) + sy16 = float16(1e-4) + by16 = float16(1e4) + + # Underflow errors + assert_raises_fpe('underflow', lambda a, b: a * b, sx16, sx16) + assert_raises_fpe('underflow', lambda a, b: a * b, sx16, sy16) + assert_raises_fpe('underflow', lambda a, b: a * b, sy16, sx16) + assert_raises_fpe('underflow', lambda a, b: a * b, sy16, sy16) + assert_raises_fpe('underflow', lambda a, b: a / b, sx16, bx16) + assert_raises_fpe('underflow', lambda a, b: a / b, sx16, by16) + assert_raises_fpe('underflow', lambda a, b: a / b, sy16, bx16) + assert_raises_fpe('underflow', lambda a, b: a / b, sy16, by16) + assert_raises_fpe('underflow', lambda a, b: a / b, + float16(2.**-14), float16(2**11)) + assert_raises_fpe('underflow', lambda a, b: a / b, + float16(-2.**-14), float16(2**11)) + assert_raises_fpe('underflow', lambda a, b: a / b, + float16(2.**-14 + 2**-24), float16(2)) + assert_raises_fpe('underflow', lambda a, b: a / b, + float16(-2.**-14 - 2**-24), float16(2)) + assert_raises_fpe('underflow', lambda a, b: a / b, + float16(2.**-14 + 2**-23), float16(4)) + + # Overflow errors + assert_raises_fpe('overflow', lambda a, b: a * b, bx16, bx16) + assert_raises_fpe('overflow', lambda a, b: a * b, bx16, by16) + assert_raises_fpe('overflow', lambda a, b: a * b, by16, bx16) + assert_raises_fpe('overflow', lambda a, b: a * b, by16, by16) + assert_raises_fpe('overflow', lambda a, b: a / b, bx16, sx16) + assert_raises_fpe('overflow', lambda a, b: a / b, bx16, sy16) + assert_raises_fpe('overflow', lambda a, b: a / b, by16, sx16) + assert_raises_fpe('overflow', lambda a, b: a / b, by16, sy16) + assert_raises_fpe('overflow', lambda a, b: a + b, + float16(65504), float16(17)) + assert_raises_fpe('overflow', lambda a, b: a - b, + float16(-65504), float16(17)) + assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf)) + assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf)) + assert_raises_fpe('overflow', np.spacing, float16(65504)) + + # Invalid value errors + assert_raises_fpe('invalid', np.divide, float16(np.inf), float16(np.inf)) + assert_raises_fpe('invalid', np.spacing, float16(np.inf)) + assert_raises_fpe('invalid', np.spacing, float16(np.nan)) + + # These should not raise + float16(65472) + float16(32) + float16(2**-13) / float16(2) + float16(2**-14) / float16(2**10) + np.spacing(float16(-65504)) + np.nextafter(float16(65504), float16(-np.inf)) + np.nextafter(float16(-65504), float16(np.inf)) + np.nextafter(float16(np.inf), float16(0)) + np.nextafter(float16(-np.inf), float16(0)) + np.nextafter(float16(0), float16(np.nan)) + np.nextafter(float16(np.nan), float16(0)) + float16(2**-14) / float16(2**10) + float16(-2**-14) / float16(2**10) + float16(2**-14 + 2**-23) / float16(2) + float16(-2**-14 - 2**-23) / float16(2) + + def test_half_array_interface(self): + """Test that half is compatible with __array_interface__""" + class Dummy: + pass + + a = np.ones((1,), dtype=float16) + b = Dummy() + b.__array_interface__ = a.__array_interface__ + c = np.array(b) + assert_(c.dtype == float16) + assert_equal(a, c) diff --git a/python/numpy/_core/tests/test_hashtable.py b/python/numpy/_core/tests/test_hashtable.py new file mode 100644 index 000000000..74be5219a --- /dev/null +++ b/python/numpy/_core/tests/test_hashtable.py @@ -0,0 +1,35 @@ +import random + +import pytest +from numpy._core._multiarray_tests import identityhash_tester + + +@pytest.mark.parametrize("key_length", [1, 3, 6]) +@pytest.mark.parametrize("length", [1, 16, 2000]) +def test_identity_hashtable(key_length, length): + # use a 30 object pool for everything (duplicates will happen) + pool = [object() for i in range(20)] + keys_vals = [] + for i in range(length): + keys = tuple(random.choices(pool, k=key_length)) + keys_vals.append((keys, random.choice(pool))) + + dictionary = dict(keys_vals) + + # add a random item at the end: + keys_vals.append(random.choice(keys_vals)) + # the expected one could be different with duplicates: + expected = dictionary[keys_vals[-1][0]] + + res = identityhash_tester(key_length, keys_vals, replace=True) + assert res is expected + + if length == 1: + return + + # add a new item with a key that is already used and a new value, this + # should error if replace is False, see gh-26690 + new_key = (keys_vals[1][0], object()) + keys_vals[0] = new_key + with pytest.raises(RuntimeError): + identityhash_tester(key_length, keys_vals) diff --git a/python/numpy/_core/tests/test_indexerrors.py b/python/numpy/_core/tests/test_indexerrors.py new file mode 100644 index 000000000..02110c283 --- /dev/null +++ b/python/numpy/_core/tests/test_indexerrors.py @@ -0,0 +1,125 @@ +import numpy as np +from numpy.testing import ( + assert_raises, + assert_raises_regex, +) + + +class TestIndexErrors: + '''Tests to exercise indexerrors not covered by other tests.''' + + def test_arraytypes_fasttake(self): + 'take from a 0-length dimension' + x = np.empty((2, 3, 0, 4)) + assert_raises(IndexError, x.take, [0], axis=2) + assert_raises(IndexError, x.take, [1], axis=2) + assert_raises(IndexError, x.take, [0], axis=2, mode='wrap') + assert_raises(IndexError, x.take, [0], axis=2, mode='clip') + + def test_take_from_object(self): + # Check exception taking from object array + d = np.zeros(5, dtype=object) + assert_raises(IndexError, d.take, [6]) + + # Check exception taking from 0-d array + d = np.zeros((5, 0), dtype=object) + assert_raises(IndexError, d.take, [1], axis=1) + assert_raises(IndexError, d.take, [0], axis=1) + assert_raises(IndexError, d.take, [0]) + assert_raises(IndexError, d.take, [0], mode='wrap') + assert_raises(IndexError, d.take, [0], mode='clip') + + def test_multiindex_exceptions(self): + a = np.empty(5, dtype=object) + assert_raises(IndexError, a.item, 20) + a = np.empty((5, 0), dtype=object) + assert_raises(IndexError, a.item, (0, 0)) + + def test_put_exceptions(self): + a = np.zeros((5, 5)) + assert_raises(IndexError, a.put, 100, 0) + a = np.zeros((5, 5), dtype=object) + assert_raises(IndexError, a.put, 100, 0) + a = np.zeros((5, 5, 0)) + assert_raises(IndexError, a.put, 100, 0) + a = np.zeros((5, 5, 0), dtype=object) + assert_raises(IndexError, a.put, 100, 0) + + def test_iterators_exceptions(self): + "cases in iterators.c" + def assign(obj, ind, val): + obj[ind] = val + + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a[0, 5, None, 2]) + assert_raises(IndexError, lambda: a[0, 5, 0, 2]) + assert_raises(IndexError, lambda: assign(a, (0, 5, None, 2), 1)) + assert_raises(IndexError, lambda: assign(a, (0, 5, 0, 2), 1)) + + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a[0, 0, None, 2]) + assert_raises(IndexError, lambda: assign(a, (0, 0, None, 2), 1)) + + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a.flat[10]) + assert_raises(IndexError, lambda: assign(a.flat, 10, 5)) + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a.flat[10]) + assert_raises(IndexError, lambda: assign(a.flat, 10, 5)) + + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a.flat[np.array(10)]) + assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5)) + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a.flat[np.array(10)]) + assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5)) + + a = np.zeros([1, 2, 3]) + assert_raises(IndexError, lambda: a.flat[np.array([10])]) + assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5)) + a = np.zeros([1, 0, 3]) + assert_raises(IndexError, lambda: a.flat[np.array([10])]) + assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5)) + + def test_mapping(self): + "cases from mapping.c" + + def assign(obj, ind, val): + obj[ind] = val + + a = np.zeros((0, 10)) + assert_raises(IndexError, lambda: a[12]) + + a = np.zeros((3, 5)) + assert_raises(IndexError, lambda: a[(10, 20)]) + assert_raises(IndexError, lambda: assign(a, (10, 20), 1)) + a = np.zeros((3, 0)) + assert_raises(IndexError, lambda: a[(1, 0)]) + assert_raises(IndexError, lambda: assign(a, (1, 0), 1)) + + a = np.zeros((10,)) + assert_raises(IndexError, lambda: assign(a, 10, 1)) + a = np.zeros((0,)) + assert_raises(IndexError, lambda: assign(a, 10, 1)) + + a = np.zeros((3, 5)) + assert_raises(IndexError, lambda: a[(1, [1, 20])]) + assert_raises(IndexError, lambda: assign(a, (1, [1, 20]), 1)) + a = np.zeros((3, 0)) + assert_raises(IndexError, lambda: a[(1, [0, 1])]) + assert_raises(IndexError, lambda: assign(a, (1, [0, 1]), 1)) + + def test_mapping_error_message(self): + a = np.zeros((3, 5)) + index = (1, 2, 3, 4, 5) + assert_raises_regex( + IndexError, + "too many indices for array: " + "array is 2-dimensional, but 5 were indexed", + lambda: a[index]) + + def test_methods(self): + "cases from methods.c" + + a = np.zeros((3, 3)) + assert_raises(IndexError, lambda: a.item(100)) diff --git a/python/numpy/_core/tests/test_indexing.py b/python/numpy/_core/tests/test_indexing.py new file mode 100644 index 000000000..e722d0c1a --- /dev/null +++ b/python/numpy/_core/tests/test_indexing.py @@ -0,0 +1,1455 @@ +import functools +import operator +import sys +import warnings +from itertools import product + +import pytest +from numpy._core._multiarray_tests import array_indexing + +import numpy as np +from numpy.exceptions import ComplexWarning, VisibleDeprecationWarning +from numpy.testing import ( + HAS_REFCOUNT, + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, + assert_warns, +) + + +class TestIndexing: + def test_index_no_floats(self): + a = np.array([[[5]]]) + + assert_raises(IndexError, lambda: a[0.0]) + assert_raises(IndexError, lambda: a[0, 0.0]) + assert_raises(IndexError, lambda: a[0.0, 0]) + assert_raises(IndexError, lambda: a[0.0, :]) + assert_raises(IndexError, lambda: a[:, 0.0]) + assert_raises(IndexError, lambda: a[:, 0.0, :]) + assert_raises(IndexError, lambda: a[0.0, :, :]) + assert_raises(IndexError, lambda: a[0, 0, 0.0]) + assert_raises(IndexError, lambda: a[0.0, 0, 0]) + assert_raises(IndexError, lambda: a[0, 0.0, 0]) + assert_raises(IndexError, lambda: a[-1.4]) + assert_raises(IndexError, lambda: a[0, -1.4]) + assert_raises(IndexError, lambda: a[-1.4, 0]) + assert_raises(IndexError, lambda: a[-1.4, :]) + assert_raises(IndexError, lambda: a[:, -1.4]) + assert_raises(IndexError, lambda: a[:, -1.4, :]) + assert_raises(IndexError, lambda: a[-1.4, :, :]) + assert_raises(IndexError, lambda: a[0, 0, -1.4]) + assert_raises(IndexError, lambda: a[-1.4, 0, 0]) + assert_raises(IndexError, lambda: a[0, -1.4, 0]) + assert_raises(IndexError, lambda: a[0.0:, 0.0]) + assert_raises(IndexError, lambda: a[0.0:, 0.0, :]) + + def test_slicing_no_floats(self): + a = np.array([[5]]) + + # start as float. + assert_raises(TypeError, lambda: a[0.0:]) + assert_raises(TypeError, lambda: a[0:, 0.0:2]) + assert_raises(TypeError, lambda: a[0.0::2, :0]) + assert_raises(TypeError, lambda: a[0.0:1:2, :]) + assert_raises(TypeError, lambda: a[:, 0.0:]) + # stop as float. + assert_raises(TypeError, lambda: a[:0.0]) + assert_raises(TypeError, lambda: a[:0, 1:2.0]) + assert_raises(TypeError, lambda: a[:0.0:2, :0]) + assert_raises(TypeError, lambda: a[:0.0, :]) + assert_raises(TypeError, lambda: a[:, 0:4.0:2]) + # step as float. + assert_raises(TypeError, lambda: a[::1.0]) + assert_raises(TypeError, lambda: a[0:, :2:2.0]) + assert_raises(TypeError, lambda: a[1::4.0, :0]) + assert_raises(TypeError, lambda: a[::5.0, :]) + assert_raises(TypeError, lambda: a[:, 0:4:2.0]) + # mixed. + assert_raises(TypeError, lambda: a[1.0:2:2.0]) + assert_raises(TypeError, lambda: a[1.0::2.0]) + assert_raises(TypeError, lambda: a[0:, :2.0:2.0]) + assert_raises(TypeError, lambda: a[1.0:1:4.0, :0]) + assert_raises(TypeError, lambda: a[1.0:5.0:5.0, :]) + assert_raises(TypeError, lambda: a[:, 0.4:4.0:2.0]) + # should still get the DeprecationWarning if step = 0. + assert_raises(TypeError, lambda: a[::0.0]) + + def test_index_no_array_to_index(self): + # No non-scalar arrays. + a = np.array([[[1]]]) + + assert_raises(TypeError, lambda: a[a:a:a]) + + def test_none_index(self): + # `None` index adds newaxis + a = np.array([1, 2, 3]) + assert_equal(a[None], a[np.newaxis]) + assert_equal(a[None].ndim, a.ndim + 1) + + def test_empty_tuple_index(self): + # Empty tuple index creates a view + a = np.array([1, 2, 3]) + assert_equal(a[()], a) + assert_(a[()].base is a) + a = np.array(0) + assert_(isinstance(a[()], np.int_)) + + def test_void_scalar_empty_tuple(self): + s = np.zeros((), dtype='V4') + assert_equal(s[()].dtype, s.dtype) + assert_equal(s[()], s) + assert_equal(type(s[...]), np.ndarray) + + def test_same_kind_index_casting(self): + # Indexes should be cast with same-kind and not safe, even if that + # is somewhat unsafe. So test various different code paths. + index = np.arange(5) + u_index = index.astype(np.uintp) + arr = np.arange(10) + + assert_array_equal(arr[index], arr[u_index]) + arr[u_index] = np.arange(5) + assert_array_equal(arr, np.arange(10)) + + arr = np.arange(10).reshape(5, 2) + assert_array_equal(arr[index], arr[u_index]) + + arr[u_index] = np.arange(5)[:, None] + assert_array_equal(arr, np.arange(5)[:, None].repeat(2, axis=1)) + + arr = np.arange(25).reshape(5, 5) + assert_array_equal(arr[u_index, u_index], arr[index, index]) + + def test_empty_fancy_index(self): + # Empty list index creates an empty array + # with the same dtype (but with weird shape) + a = np.array([1, 2, 3]) + assert_equal(a[[]], []) + assert_equal(a[[]].dtype, a.dtype) + + b = np.array([], dtype=np.intp) + assert_equal(a[[]], []) + assert_equal(a[[]].dtype, a.dtype) + + b = np.array([]) + assert_raises(IndexError, a.__getitem__, b) + + def test_gh_26542(self): + a = np.array([0, 1, 2]) + idx = np.array([2, 1, 0]) + a[idx] = a + expected = np.array([2, 1, 0]) + assert_equal(a, expected) + + def test_gh_26542_2d(self): + a = np.array([[0, 1, 2]]) + idx_row = np.zeros(3, dtype=int) + idx_col = np.array([2, 1, 0]) + a[idx_row, idx_col] = a + expected = np.array([[2, 1, 0]]) + assert_equal(a, expected) + + def test_gh_26542_index_overlap(self): + arr = np.arange(100) + expected_vals = np.copy(arr[:-10]) + arr[10:] = arr[:-10] + actual_vals = arr[10:] + assert_equal(actual_vals, expected_vals) + + def test_gh_26844(self): + expected = [0, 1, 3, 3, 3] + a = np.arange(5) + a[2:][a[:-2]] = 3 + assert_equal(a, expected) + + def test_gh_26844_segfault(self): + # check for absence of segfault for: + # https://github.com/numpy/numpy/pull/26958/files#r1854589178 + a = np.arange(5) + expected = [0, 1, 3, 3, 3] + a[2:][None, a[:-2]] = 3 + assert_equal(a, expected) + + def test_ellipsis_index(self): + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + assert_(a[...] is not a) + assert_equal(a[...], a) + # `a[...]` was `a` in numpy <1.9. + assert_(a[...].base is a) + + # Slicing with ellipsis can skip an + # arbitrary number of dimensions + assert_equal(a[0, ...], a[0]) + assert_equal(a[0, ...], a[0, :]) + assert_equal(a[..., 0], a[:, 0]) + + # Slicing with ellipsis always results + # in an array, not a scalar + assert_equal(a[0, ..., 1], np.array(2)) + + # Assignment with `(Ellipsis,)` on 0-d arrays + b = np.array(1) + b[(Ellipsis,)] = 2 + assert_equal(b, 2) + + def test_single_int_index(self): + # Single integer index selects one row + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + + assert_equal(a[0], [1, 2, 3]) + assert_equal(a[-1], [7, 8, 9]) + + # Index out of bounds produces IndexError + assert_raises(IndexError, a.__getitem__, 1 << 30) + # Index overflow produces IndexError + assert_raises(IndexError, a.__getitem__, 1 << 64) + + def test_single_bool_index(self): + # Single boolean index + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + + assert_equal(a[np.array(True)], a[None]) + assert_equal(a[np.array(False)], a[None][0:0]) + + def test_boolean_shape_mismatch(self): + arr = np.ones((5, 4, 3)) + + index = np.array([True]) + assert_raises(IndexError, arr.__getitem__, index) + + index = np.array([False] * 6) + assert_raises(IndexError, arr.__getitem__, index) + + index = np.zeros((4, 4), dtype=bool) + assert_raises(IndexError, arr.__getitem__, index) + + assert_raises(IndexError, arr.__getitem__, (slice(None), index)) + + def test_boolean_indexing_onedim(self): + # Indexing a 2-dimensional array with + # boolean array of length one + a = np.array([[0., 0., 0.]]) + b = np.array([True], dtype=bool) + assert_equal(a[b], a) + # boolean assignment + a[b] = 1. + assert_equal(a, [[1., 1., 1.]]) + + def test_boolean_assignment_value_mismatch(self): + # A boolean assignment should fail when the shape of the values + # cannot be broadcast to the subscription. (see also gh-3458) + a = np.arange(4) + + def f(a, v): + a[a > -1] = v + + assert_raises(ValueError, f, a, []) + assert_raises(ValueError, f, a, [1, 2, 3]) + assert_raises(ValueError, f, a[:1], [1, 2, 3]) + + def test_boolean_assignment_needs_api(self): + # See also gh-7666 + # This caused a segfault on Python 2 due to the GIL not being + # held when the iterator does not need it, but the transfer function + # does + arr = np.zeros(1000) + indx = np.zeros(1000, dtype=bool) + indx[:100] = True + arr[indx] = np.ones(100, dtype=object) + + expected = np.zeros(1000) + expected[:100] = 1 + assert_array_equal(arr, expected) + + def test_boolean_indexing_twodim(self): + # Indexing a 2-dimensional array with + # 2-dimensional boolean array + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + b = np.array([[ True, False, True], + [False, True, False], + [ True, False, True]]) + assert_equal(a[b], [1, 3, 5, 7, 9]) + assert_equal(a[b[1]], [[4, 5, 6]]) + assert_equal(a[b[0]], a[b[2]]) + + # boolean assignment + a[b] = 0 + assert_equal(a, [[0, 2, 0], + [4, 0, 6], + [0, 8, 0]]) + + def test_boolean_indexing_list(self): + # Regression test for #13715. It's a use-after-free bug which the + # test won't directly catch, but it will show up in valgrind. + a = np.array([1, 2, 3]) + b = [True, False, True] + # Two variants of the test because the first takes a fast path + assert_equal(a[b], [1, 3]) + assert_equal(a[None, b], [[1, 3]]) + + def test_reverse_strides_and_subspace_bufferinit(self): + # This tests that the strides are not reversed for simple and + # subspace fancy indexing. + a = np.ones(5) + b = np.zeros(5, dtype=np.intp)[::-1] + c = np.arange(5)[::-1] + + a[b] = c + # If the strides are not reversed, the 0 in the arange comes last. + assert_equal(a[0], 0) + + # This also tests that the subspace buffer is initialized: + a = np.ones((5, 2)) + c = np.arange(10).reshape(5, 2)[::-1] + a[b, :] = c + assert_equal(a[0], [0, 1]) + + def test_reversed_strides_result_allocation(self): + # Test a bug when calculating the output strides for a result array + # when the subspace size was 1 (and test other cases as well) + a = np.arange(10)[:, None] + i = np.arange(10)[::-1] + assert_array_equal(a[i], a[i.copy('C')]) + + a = np.arange(20).reshape(-1, 2) + + def test_uncontiguous_subspace_assignment(self): + # During development there was a bug activating a skip logic + # based on ndim instead of size. + a = np.full((3, 4, 2), -1) + b = np.full((3, 4, 2), -1) + + a[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T + b[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T.copy() + + assert_equal(a, b) + + def test_too_many_fancy_indices_special_case(self): + # Just documents behaviour, this is a small limitation. + a = np.ones((1,) * 64) # 64 is NPY_MAXDIMS + assert_raises(IndexError, a.__getitem__, (np.array([0]),) * 64) + + def test_scalar_array_bool(self): + # NumPy bools can be used as boolean index (python ones as of yet not) + a = np.array(1) + assert_equal(a[np.bool(True)], a[np.array(True)]) + assert_equal(a[np.bool(False)], a[np.array(False)]) + + # After deprecating bools as integers: + #a = np.array([0,1,2]) + #assert_equal(a[True, :], a[None, :]) + #assert_equal(a[:, True], a[:, None]) + # + #assert_(not np.may_share_memory(a, a[True, :])) + + def test_everything_returns_views(self): + # Before `...` would return a itself. + a = np.arange(5) + + assert_(a is not a[()]) + assert_(a is not a[...]) + assert_(a is not a[:]) + + def test_broaderrors_indexing(self): + a = np.zeros((5, 5)) + assert_raises(IndexError, a.__getitem__, ([0, 1], [0, 1, 2])) + assert_raises(IndexError, a.__setitem__, ([0, 1], [0, 1, 2]), 0) + + def test_trivial_fancy_out_of_bounds(self): + a = np.zeros(5) + ind = np.ones(20, dtype=np.intp) + ind[-1] = 10 + assert_raises(IndexError, a.__getitem__, ind) + assert_raises(IndexError, a.__setitem__, ind, 0) + ind = np.ones(20, dtype=np.intp) + ind[0] = 11 + assert_raises(IndexError, a.__getitem__, ind) + assert_raises(IndexError, a.__setitem__, ind, 0) + + def test_trivial_fancy_not_possible(self): + # Test that the fast path for trivial assignment is not incorrectly + # used when the index is not contiguous or 1D, see also gh-11467. + a = np.arange(6) + idx = np.arange(6, dtype=np.intp).reshape(2, 1, 3)[:, :, 0] + assert_array_equal(a[idx], idx) + + # this case must not go into the fast path, note that idx is + # a non-contiguous none 1D array here. + a[idx] = -1 + res = np.arange(6) + res[0] = -1 + res[3] = -1 + assert_array_equal(a, res) + + def test_nonbaseclass_values(self): + class SubClass(np.ndarray): + def __array_finalize__(self, old): + # Have array finalize do funny things + self.fill(99) + + a = np.zeros((5, 5)) + s = a.copy().view(type=SubClass) + s.fill(1) + + a[[0, 1, 2, 3, 4], :] = s + assert_((a == 1).all()) + + # Subspace is last, so transposing might want to finalize + a[:, [0, 1, 2, 3, 4]] = s + assert_((a == 1).all()) + + a.fill(0) + a[...] = s + assert_((a == 1).all()) + + def test_array_like_values(self): + # Similar to the above test, but use a memoryview instead + a = np.zeros((5, 5)) + s = np.arange(25, dtype=np.float64).reshape(5, 5) + + a[[0, 1, 2, 3, 4], :] = memoryview(s) + assert_array_equal(a, s) + + a[:, [0, 1, 2, 3, 4]] = memoryview(s) + assert_array_equal(a, s) + + a[...] = memoryview(s) + assert_array_equal(a, s) + + @pytest.mark.parametrize("writeable", [True, False]) + def test_subclass_writeable(self, writeable): + d = np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)], + dtype=[('target', 'S20'), ('V_mag', '>f4')]) + d.flags.writeable = writeable + # Advanced indexing results are always writeable: + ind = np.array([False, True, True], dtype=bool) + assert d[ind].flags.writeable + ind = np.array([0, 1]) + assert d[ind].flags.writeable + # Views should be writeable if the original array is: + assert d[...].flags.writeable == writeable + assert d[0].flags.writeable == writeable + + def test_memory_order(self): + # This is not necessary to preserve. Memory layouts for + # more complex indices are not as simple. + a = np.arange(10) + b = np.arange(10).reshape(5, 2).T + assert_(a[b].flags.f_contiguous) + + # Takes a different implementation branch: + a = a.reshape(-1, 1) + assert_(a[b, 0].flags.f_contiguous) + + def test_scalar_return_type(self): + # Full scalar indices should return scalars and object + # arrays should not call PyArray_Return on their items + class Zero: + # The most basic valid indexing + def __index__(self): + return 0 + + z = Zero() + + class ArrayLike: + # Simple array, should behave like the array + def __array__(self, dtype=None, copy=None): + return np.array(0) + + a = np.zeros(()) + assert_(isinstance(a[()], np.float64)) + a = np.zeros(1) + assert_(isinstance(a[z], np.float64)) + a = np.zeros((1, 1)) + assert_(isinstance(a[z, np.array(0)], np.float64)) + assert_(isinstance(a[z, ArrayLike()], np.float64)) + + # And object arrays do not call it too often: + b = np.array(0) + a = np.array(0, dtype=object) + a[()] = b + assert_(isinstance(a[()], np.ndarray)) + a = np.array([b, None]) + assert_(isinstance(a[z], np.ndarray)) + a = np.array([[b, None]]) + assert_(isinstance(a[z, np.array(0)], np.ndarray)) + assert_(isinstance(a[z, ArrayLike()], np.ndarray)) + + def test_small_regressions(self): + # Reference count of intp for index checks + a = np.array([0]) + if HAS_REFCOUNT: + refcount = sys.getrefcount(np.dtype(np.intp)) + # item setting always checks indices in separate function: + a[np.array([0], dtype=np.intp)] = 1 + a[np.array([0], dtype=np.uint8)] = 1 + assert_raises(IndexError, a.__setitem__, + np.array([1], dtype=np.intp), 1) + assert_raises(IndexError, a.__setitem__, + np.array([1], dtype=np.uint8), 1) + + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount) + + def test_unaligned(self): + v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7] + d = v.view(np.dtype("S8")) + # unaligned source + x = (np.zeros(16, dtype=np.int8) + ord('a'))[1:-7] + x = x.view(np.dtype("S8")) + x[...] = np.array("b" * 8, dtype="S") + b = np.arange(d.size) + # trivial + assert_equal(d[b], d) + d[b] = x + # nontrivial + # unaligned index array + b = np.zeros(d.size + 1).view(np.int8)[1:-(np.intp(0).itemsize - 1)] + b = b.view(np.intp)[:d.size] + b[...] = np.arange(d.size) + assert_equal(d[b.astype(np.int16)], d) + d[b.astype(np.int16)] = x + # boolean + d[b % 2 == 0] + d[b % 2 == 0] = x[::2] + + def test_tuple_subclass(self): + arr = np.ones((5, 5)) + + # A tuple subclass should also be an nd-index + class TupleSubclass(tuple): + pass + index = ([1], [1]) + index = TupleSubclass(index) + assert_(arr[index].shape == (1,)) + # Unlike the non nd-index: + assert_(arr[index,].shape != (1,)) + + def test_broken_sequence_not_nd_index(self): + # See gh-5063: + # If we have an object which claims to be a sequence, but fails + # on item getting, this should not be converted to an nd-index (tuple) + # If this object happens to be a valid index otherwise, it should work + # This object here is very dubious and probably bad though: + class SequenceLike: + def __index__(self): + return 0 + + def __len__(self): + return 1 + + def __getitem__(self, item): + raise IndexError('Not possible') + + arr = np.arange(10) + assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),]) + + # also test that field indexing does not segfault + # for a similar reason, by indexing a structured array + arr = np.zeros((1,), dtype=[('f1', 'i8'), ('f2', 'i8')]) + assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),]) + + def test_indexing_array_weird_strides(self): + # See also gh-6221 + # the shapes used here come from the issue and create the correct + # size for the iterator buffering size. + x = np.ones(10) + x2 = np.ones((10, 2)) + ind = np.arange(10)[:, None, None, None] + ind = np.broadcast_to(ind, (10, 55, 4, 4)) + + # single advanced index case + assert_array_equal(x[ind], x[ind.copy()]) + # higher dimensional advanced index + zind = np.zeros(4, dtype=np.intp) + assert_array_equal(x2[ind, zind], x2[ind.copy(), zind]) + + def test_indexing_array_negative_strides(self): + # From gh-8264, + # core dumps if negative strides are used in iteration + arro = np.zeros((4, 4)) + arr = arro[::-1, ::-1] + + slices = (slice(None), [0, 1, 2, 3]) + arr[slices] = 10 + assert_array_equal(arr, 10.) + + def test_character_assignment(self): + # This is an example a function going through CopyObject which + # used to have an untested special path for scalars + # (the character special dtype case, should be deprecated probably) + arr = np.zeros((1, 5), dtype="c") + arr[0] = np.str_("asdfg") # must assign as a sequence + assert_array_equal(arr[0], np.array("asdfg", dtype="c")) + assert arr[0, 1] == b"s" # make sure not all were set to "a" for both + + @pytest.mark.parametrize("index", + [True, False, np.array([0])]) + @pytest.mark.parametrize("num", [64, 80]) + @pytest.mark.parametrize("original_ndim", [1, 64]) + def test_too_many_advanced_indices(self, index, num, original_ndim): + # These are limitations based on the number of arguments we can process. + # For `num=32` (and all boolean cases), the result is actually define; + # but the use of NpyIter (NPY_MAXARGS) limits it for technical reasons. + arr = np.ones((1,) * original_ndim) + with pytest.raises(IndexError): + arr[(index,) * num] + with pytest.raises(IndexError): + arr[(index,) * num] = 1. + + def test_nontuple_ndindex(self): + a = np.arange(25).reshape((5, 5)) + assert_equal(a[[0, 1]], np.array([a[0], a[1]])) + assert_equal(a[[0, 1], [0, 1]], np.array([0, 6])) + assert_raises(IndexError, a.__getitem__, [slice(None)]) + + def test_flat_index_on_flatiter(self): + a = np.arange(9).reshape((3, 3)) + b = np.array([0, 5, 6]) + assert_equal(a.flat[b.flat], np.array([0, 5, 6])) + + def test_empty_string_flat_index_on_flatiter(self): + a = np.arange(9).reshape((3, 3)) + b = np.array([], dtype="S") + assert_equal(a.flat[b.flat], np.array([])) + + def test_nonempty_string_flat_index_on_flatiter(self): + a = np.arange(9).reshape((3, 3)) + b = np.array(["a"], dtype="S") + with pytest.raises(IndexError, match="unsupported iterator index"): + a.flat[b.flat] + + +class TestFieldIndexing: + def test_scalar_return_type(self): + # Field access on an array should return an array, even if it + # is 0-d. + a = np.zeros((), [('a', 'f8')]) + assert_(isinstance(a['a'], np.ndarray)) + assert_(isinstance(a[['a']], np.ndarray)) + + +class TestBroadcastedAssignments: + def assign(self, a, ind, val): + a[ind] = val + return a + + def test_prepending_ones(self): + a = np.zeros((3, 2)) + + a[...] = np.ones((1, 3, 2)) + # Fancy with subspace with and without transpose + a[[0, 1, 2], :] = np.ones((1, 3, 2)) + a[:, [0, 1]] = np.ones((1, 3, 2)) + # Fancy without subspace (with broadcasting) + a[[[0], [1], [2]], [0, 1]] = np.ones((1, 3, 2)) + + def test_prepend_not_one(self): + assign = self.assign + s_ = np.s_ + a = np.zeros(5) + + # Too large and not only ones. + assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1))) + assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1))) + assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2, 2, 1))) + + def test_simple_broadcasting_errors(self): + assign = self.assign + s_ = np.s_ + a = np.zeros((5, 1)) + + assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 2))) + assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 0))) + assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 2))) + assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 0))) + assert_raises(ValueError, assign, a, s_[[0], :], np.zeros((2, 1))) + + @pytest.mark.parametrize("index", [ + (..., [1, 2], slice(None)), + ([0, 1], ..., 0), + (..., [1, 2], [1, 2])]) + def test_broadcast_error_reports_correct_shape(self, index): + values = np.zeros((100, 100)) # will never broadcast below + + arr = np.zeros((3, 4, 5, 6, 7)) + # We currently report without any spaces (could be changed) + shape_str = str(arr[index].shape).replace(" ", "") + + with pytest.raises(ValueError) as e: + arr[index] = values + + assert str(e.value).endswith(shape_str) + + def test_index_is_larger(self): + # Simple case of fancy index broadcasting of the index. + a = np.zeros((5, 5)) + a[[[0], [1], [2]], [0, 1, 2]] = [2, 3, 4] + + assert_((a[:3, :3] == [2, 3, 4]).all()) + + def test_broadcast_subspace(self): + a = np.zeros((100, 100)) + v = np.arange(100)[:, None] + b = np.arange(100)[::-1] + a[b] = v + assert_((a[::-1] == v).all()) + + +class TestSubclasses: + def test_basic(self): + # Test that indexing in various ways produces SubClass instances, + # and that the base is set up correctly: the original subclass + # instance for views, and a new ndarray for advanced/boolean indexing + # where a copy was made (latter a regression test for gh-11983). + class SubClass(np.ndarray): + pass + + a = np.arange(5) + s = a.view(SubClass) + s_slice = s[:3] + assert_(type(s_slice) is SubClass) + assert_(s_slice.base is s) + assert_array_equal(s_slice, a[:3]) + + s_fancy = s[[0, 1, 2]] + assert_(type(s_fancy) is SubClass) + assert_(s_fancy.base is not s) + assert_(type(s_fancy.base) is np.ndarray) + assert_array_equal(s_fancy, a[[0, 1, 2]]) + assert_array_equal(s_fancy.base, a[[0, 1, 2]]) + + s_bool = s[s > 0] + assert_(type(s_bool) is SubClass) + assert_(s_bool.base is not s) + assert_(type(s_bool.base) is np.ndarray) + assert_array_equal(s_bool, a[a > 0]) + assert_array_equal(s_bool.base, a[a > 0]) + + def test_fancy_on_read_only(self): + # Test that fancy indexing on read-only SubClass does not make a + # read-only copy (gh-14132) + class SubClass(np.ndarray): + pass + + a = np.arange(5) + s = a.view(SubClass) + s.flags.writeable = False + s_fancy = s[[0, 1, 2]] + assert_(s_fancy.flags.writeable) + + def test_finalize_gets_full_info(self): + # Array finalize should be called on the filled array. + class SubClass(np.ndarray): + def __array_finalize__(self, old): + self.finalize_status = np.array(self) + self.old = old + + s = np.arange(10).view(SubClass) + new_s = s[:3] + assert_array_equal(new_s.finalize_status, new_s) + assert_array_equal(new_s.old, s) + + new_s = s[[0, 1, 2, 3]] + assert_array_equal(new_s.finalize_status, new_s) + assert_array_equal(new_s.old, s) + + new_s = s[s > 0] + assert_array_equal(new_s.finalize_status, new_s) + assert_array_equal(new_s.old, s) + + +class TestFancyIndexingCast: + def test_boolean_index_cast_assign(self): + # Setup the boolean index and float arrays. + shape = (8, 63) + bool_index = np.zeros(shape).astype(bool) + bool_index[0, 1] = True + zero_array = np.zeros(shape) + + # Assigning float is fine. + zero_array[bool_index] = np.array([1]) + assert_equal(zero_array[0, 1], 1) + + # Fancy indexing works, although we get a cast warning. + assert_warns(ComplexWarning, + zero_array.__setitem__, ([0], [1]), np.array([2 + 1j])) + assert_equal(zero_array[0, 1], 2) # No complex part + + # Cast complex to float, throwing away the imaginary portion. + assert_warns(ComplexWarning, + zero_array.__setitem__, bool_index, np.array([1j])) + assert_equal(zero_array[0, 1], 0) + +class TestFancyIndexingEquivalence: + def test_object_assign(self): + # Check that the field and object special case using copyto is active. + # The right hand side cannot be converted to an array here. + a = np.arange(5, dtype=object) + b = a.copy() + a[:3] = [1, (1, 2), 3] + b[[0, 1, 2]] = [1, (1, 2), 3] + assert_array_equal(a, b) + + # test same for subspace fancy indexing + b = np.arange(5, dtype=object)[None, :] + b[[0], :3] = [[1, (1, 2), 3]] + assert_array_equal(a, b[0]) + + # Check that swapping of axes works. + # There was a bug that made the later assignment throw a ValueError + # do to an incorrectly transposed temporary right hand side (gh-5714) + b = b.T + b[:3, [0]] = [[1], [(1, 2)], [3]] + assert_array_equal(a, b[:, 0]) + + # Another test for the memory order of the subspace + arr = np.ones((3, 4, 5), dtype=object) + # Equivalent slicing assignment for comparison + cmp_arr = arr.copy() + cmp_arr[:1, ...] = [[[1], [2], [3], [4]]] + arr[[0], ...] = [[[1], [2], [3], [4]]] + assert_array_equal(arr, cmp_arr) + arr = arr.copy('F') + arr[[0], ...] = [[[1], [2], [3], [4]]] + assert_array_equal(arr, cmp_arr) + + def test_cast_equivalence(self): + # Yes, normal slicing uses unsafe casting. + a = np.arange(5) + b = a.copy() + + a[:3] = np.array(['2', '-3', '-1']) + b[[0, 2, 1]] = np.array(['2', '-1', '-3']) + assert_array_equal(a, b) + + # test the same for subspace fancy indexing + b = np.arange(5)[None, :] + b[[0], :3] = np.array([['2', '-3', '-1']]) + assert_array_equal(a, b[0]) + + +class TestMultiIndexingAutomated: + """ + These tests use code to mimic the C-Code indexing for selection. + + NOTE: + + * This still lacks tests for complex item setting. + * If you change behavior of indexing, you might want to modify + these tests to try more combinations. + * Behavior was written to match numpy version 1.8. (though a + first version matched 1.7.) + * Only tuple indices are supported by the mimicking code. + (and tested as of writing this) + * Error types should match most of the time as long as there + is only one error. For multiple errors, what gets raised + will usually not be the same one. They are *not* tested. + + Update 2016-11-30: It is probably not worth maintaining this test + indefinitely and it can be dropped if maintenance becomes a burden. + + """ + + def setup_method(self): + self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6) + self.b = np.empty((3, 0, 5, 6)) + self.complex_indices = ['skip', Ellipsis, + 0, + # Boolean indices, up to 3-d for some special cases of eating up + # dimensions, also need to test all False + np.array([True, False, False]), + np.array([[True, False], [False, True]]), + np.array([[[False, False], [False, False]]]), + # Some slices: + slice(-5, 5, 2), + slice(1, 1, 100), + slice(4, -1, -2), + slice(None, None, -3), + # Some Fancy indexes: + np.empty((0, 1, 1), dtype=np.intp), # empty and can be broadcast + np.array([0, 1, -2]), + np.array([[2], [0], [1]]), + np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()), + np.array([2, -1], dtype=np.int8), + np.zeros([1] * 31, dtype=int), # trigger too large array. + np.array([0., 1.])] # invalid datatype + # Some simpler indices that still cover a bit more + self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]), + 'skip'] + # Very simple ones to fill the rest: + self.fill_indices = [slice(None, None), 0] + + def _get_multi_index(self, arr, indices): + """Mimic multi dimensional indexing. + + Parameters + ---------- + arr : ndarray + Array to be indexed. + indices : tuple of index objects + + Returns + ------- + out : ndarray + An array equivalent to the indexing operation (but always a copy). + `arr[indices]` should be identical. + no_copy : bool + Whether the indexing operation requires a copy. If this is `True`, + `np.may_share_memory(arr, arr[indices])` should be `True` (with + some exceptions for scalars and possibly 0-d arrays). + + Notes + ----- + While the function may mostly match the errors of normal indexing this + is generally not the case. + """ + in_indices = list(indices) + indices = [] + # if False, this is a fancy or boolean index + no_copy = True + # number of fancy/scalar indexes that are not consecutive + num_fancy = 0 + # number of dimensions indexed by a "fancy" index + fancy_dim = 0 + # NOTE: This is a funny twist (and probably OK to change). + # The boolean array has illegal indexes, but this is + # allowed if the broadcast fancy-indices are 0-sized. + # This variable is to catch that case. + error_unless_broadcast_to_empty = False + + # We need to handle Ellipsis and make arrays from indices, also + # check if this is fancy indexing (set no_copy). + ndim = 0 + ellipsis_pos = None # define here mostly to replace all but first. + for i, indx in enumerate(in_indices): + if indx is None: + continue + if isinstance(indx, np.ndarray) and indx.dtype == bool: + no_copy = False + if indx.ndim == 0: + raise IndexError + # boolean indices can have higher dimensions + ndim += indx.ndim + fancy_dim += indx.ndim + continue + if indx is Ellipsis: + if ellipsis_pos is None: + ellipsis_pos = i + continue # do not increment ndim counter + raise IndexError + if isinstance(indx, slice): + ndim += 1 + continue + if not isinstance(indx, np.ndarray): + # This could be open for changes in numpy. + # numpy should maybe raise an error if casting to intp + # is not safe. It rejects np.array([1., 2.]) but not + # [1., 2.] as index (same for ie. np.take). + # (Note the importance of empty lists if changing this here) + try: + indx = np.array(indx, dtype=np.intp) + except ValueError: + raise IndexError + in_indices[i] = indx + elif indx.dtype.kind not in 'bi': + raise IndexError('arrays used as indices must be of ' + 'integer (or boolean) type') + if indx.ndim != 0: + no_copy = False + ndim += 1 + fancy_dim += 1 + + if arr.ndim - ndim < 0: + # we can't take more dimensions then we have, not even for 0-d + # arrays. since a[()] makes sense, but not a[(),]. We will + # raise an error later on, unless a broadcasting error occurs + # first. + raise IndexError + + if ndim == 0 and None not in in_indices: + # Well we have no indexes or one Ellipsis. This is legal. + return arr.copy(), no_copy + + if ellipsis_pos is not None: + in_indices[ellipsis_pos:ellipsis_pos + 1] = ([slice(None, None)] * + (arr.ndim - ndim)) + + for ax, indx in enumerate(in_indices): + if isinstance(indx, slice): + # convert to an index array + indx = np.arange(*indx.indices(arr.shape[ax])) + indices.append(['s', indx]) + continue + elif indx is None: + # this is like taking a slice with one element from a new axis: + indices.append(['n', np.array([0], dtype=np.intp)]) + arr = arr.reshape(arr.shape[:ax] + (1,) + arr.shape[ax:]) + continue + if isinstance(indx, np.ndarray) and indx.dtype == bool: + if indx.shape != arr.shape[ax:ax + indx.ndim]: + raise IndexError + + try: + flat_indx = np.ravel_multi_index(np.nonzero(indx), + arr.shape[ax:ax + indx.ndim], mode='raise') + except Exception: + error_unless_broadcast_to_empty = True + # fill with 0s instead, and raise error later + flat_indx = np.array([0] * indx.sum(), dtype=np.intp) + # concatenate axis into a single one: + if indx.ndim != 0: + arr = arr.reshape(arr.shape[:ax] + + (np.prod(arr.shape[ax:ax + indx.ndim]),) + + arr.shape[ax + indx.ndim:]) + indx = flat_indx + else: + # This could be changed, a 0-d boolean index can + # make sense (even outside the 0-d indexed array case) + # Note that originally this is could be interpreted as + # integer in the full integer special case. + raise IndexError + # If the index is a singleton, the bounds check is done + # before the broadcasting. This used to be different in <1.9 + elif indx.ndim == 0 and not ( + -arr.shape[ax] <= indx < arr.shape[ax] + ): + raise IndexError + if indx.ndim == 0: + # The index is a scalar. This used to be two fold, but if + # fancy indexing was active, the check was done later, + # possibly after broadcasting it away (1.7. or earlier). + # Now it is always done. + if indx >= arr.shape[ax] or indx < - arr.shape[ax]: + raise IndexError + if (len(indices) > 0 and + indices[-1][0] == 'f' and + ax != ellipsis_pos): + # NOTE: There could still have been a 0-sized Ellipsis + # between them. Checked that with ellipsis_pos. + indices[-1].append(indx) + else: + # We have a fancy index that is not after an existing one. + # NOTE: A 0-d array triggers this as well, while one may + # expect it to not trigger it, since a scalar would not be + # considered fancy indexing. + num_fancy += 1 + indices.append(['f', indx]) + + if num_fancy > 1 and not no_copy: + # We have to flush the fancy indexes left + new_indices = indices[:] + axes = list(range(arr.ndim)) + fancy_axes = [] + new_indices.insert(0, ['f']) + ni = 0 + ai = 0 + for indx in indices: + ni += 1 + if indx[0] == 'f': + new_indices[0].extend(indx[1:]) + del new_indices[ni] + ni -= 1 + for ax in range(ai, ai + len(indx[1:])): + fancy_axes.append(ax) + axes.remove(ax) + ai += len(indx) - 1 # axis we are at + indices = new_indices + # and now we need to transpose arr: + arr = arr.transpose(*(fancy_axes + axes)) + + # We only have one 'f' index now and arr is transposed accordingly. + # Now handle newaxis by reshaping... + ax = 0 + for indx in indices: + if indx[0] == 'f': + if len(indx) == 1: + continue + # First of all, reshape arr to combine fancy axes into one: + orig_shape = arr.shape + orig_slice = orig_shape[ax:ax + len(indx[1:])] + arr = arr.reshape(arr.shape[:ax] + + (np.prod(orig_slice).astype(int),) + + arr.shape[ax + len(indx[1:]):]) + + # Check if broadcasting works + res = np.broadcast(*indx[1:]) + # unfortunately the indices might be out of bounds. So check + # that first, and use mode='wrap' then. However only if + # there are any indices... + if res.size != 0: + if error_unless_broadcast_to_empty: + raise IndexError + for _indx, _size in zip(indx[1:], orig_slice): + if _indx.size == 0: + continue + if np.any(_indx >= _size) or np.any(_indx < -_size): + raise IndexError + if len(indx[1:]) == len(orig_slice): + if np.prod(orig_slice) == 0: + # Work around for a crash or IndexError with 'wrap' + # in some 0-sized cases. + try: + mi = np.ravel_multi_index(indx[1:], orig_slice, + mode='raise') + except Exception: + # This happens with 0-sized orig_slice (sometimes?) + # here it is a ValueError, but indexing gives a: + raise IndexError('invalid index into 0-sized') + else: + mi = np.ravel_multi_index(indx[1:], orig_slice, + mode='wrap') + else: + # Maybe never happens... + raise ValueError + arr = arr.take(mi.ravel(), axis=ax) + try: + arr = arr.reshape(arr.shape[:ax] + + mi.shape + + arr.shape[ax + 1:]) + except ValueError: + # too many dimensions, probably + raise IndexError + ax += mi.ndim + continue + + # If we are here, we have a 1D array for take: + arr = arr.take(indx[1], axis=ax) + ax += 1 + + return arr, no_copy + + def _check_multi_index(self, arr, index): + """Check a multi index item getting and simple setting. + + Parameters + ---------- + arr : ndarray + Array to be indexed, must be a reshaped arange. + index : tuple of indexing objects + Index being tested. + """ + # Test item getting + try: + mimic_get, no_copy = self._get_multi_index(arr, index) + except Exception as e: + if HAS_REFCOUNT: + prev_refcount = sys.getrefcount(arr) + assert_raises(type(e), arr.__getitem__, index) + assert_raises(type(e), arr.__setitem__, index, 0) + if HAS_REFCOUNT: + assert_equal(prev_refcount, sys.getrefcount(arr)) + return + + self._compare_index_result(arr, index, mimic_get, no_copy) + + def _check_single_index(self, arr, index): + """Check a single index item getting and simple setting. + + Parameters + ---------- + arr : ndarray + Array to be indexed, must be an arange. + index : indexing object + Index being tested. Must be a single index and not a tuple + of indexing objects (see also `_check_multi_index`). + """ + try: + mimic_get, no_copy = self._get_multi_index(arr, (index,)) + except Exception as e: + if HAS_REFCOUNT: + prev_refcount = sys.getrefcount(arr) + assert_raises(type(e), arr.__getitem__, index) + assert_raises(type(e), arr.__setitem__, index, 0) + if HAS_REFCOUNT: + assert_equal(prev_refcount, sys.getrefcount(arr)) + return + + self._compare_index_result(arr, index, mimic_get, no_copy) + + def _compare_index_result(self, arr, index, mimic_get, no_copy): + """Compare mimicked result to indexing result. + """ + arr = arr.copy() + if HAS_REFCOUNT: + startcount = sys.getrefcount(arr) + indexed_arr = arr[index] + assert_array_equal(indexed_arr, mimic_get) + # Check if we got a view, unless its a 0-sized or 0-d array. + # (then its not a view, and that does not matter) + if indexed_arr.size != 0 and indexed_arr.ndim != 0: + assert_(np.may_share_memory(indexed_arr, arr) == no_copy) + # Check reference count of the original array + if HAS_REFCOUNT: + if no_copy: + # refcount increases by one: + assert_equal(sys.getrefcount(arr), startcount + 1) + else: + assert_equal(sys.getrefcount(arr), startcount) + + # Test non-broadcast setitem: + b = arr.copy() + b[index] = mimic_get + 1000 + if b.size == 0: + return # nothing to compare here... + if no_copy and indexed_arr.ndim != 0: + # change indexed_arr in-place to manipulate original: + indexed_arr += 1000 + assert_array_equal(arr, b) + return + # Use the fact that the array is originally an arange: + arr.flat[indexed_arr.ravel()] += 1000 + assert_array_equal(arr, b) + + def test_boolean(self): + a = np.array(5) + assert_equal(a[np.array(True)], 5) + a[np.array(True)] = 1 + assert_equal(a, 1) + # NOTE: This is different from normal broadcasting, as + # arr[boolean_array] works like in a multi index. Which means + # it is aligned to the left. This is probably correct for + # consistency with arr[boolean_array,] also no broadcasting + # is done at all + self._check_multi_index( + self.a, (np.zeros_like(self.a, dtype=bool),)) + self._check_multi_index( + self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],)) + self._check_multi_index( + self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],)) + + def test_multidim(self): + # Automatically test combinations with complex indexes on 2nd (or 1st) + # spot and the simple ones in one other spot. + with warnings.catch_warnings(): + # This is so that np.array(True) is not accepted in a full integer + # index, when running the file separately. + warnings.filterwarnings('error', '', DeprecationWarning) + warnings.filterwarnings('error', '', VisibleDeprecationWarning) + + def isskip(idx): + return isinstance(idx, str) and idx == "skip" + + for simple_pos in [0, 2, 3]: + tocheck = [self.fill_indices, self.complex_indices, + self.fill_indices, self.fill_indices] + tocheck[simple_pos] = self.simple_indices + for index in product(*tocheck): + index = tuple(i for i in index if not isskip(i)) + self._check_multi_index(self.a, index) + self._check_multi_index(self.b, index) + + # Check very simple item getting: + self._check_multi_index(self.a, (0, 0, 0, 0)) + self._check_multi_index(self.b, (0, 0, 0, 0)) + # Also check (simple cases of) too many indices: + assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0)) + assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0) + assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0)) + assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0) + + def test_1d(self): + a = np.arange(10) + for index in self.complex_indices: + self._check_single_index(a, index) + +class TestFloatNonIntegerArgument: + """ + These test that ``TypeError`` is raised when you try to use + non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]`` + and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``. + + """ + def test_valid_indexing(self): + # These should raise no errors. + a = np.array([[[5]]]) + + a[np.array([0])] + a[[0, 0]] + a[:, [0, 0]] + a[:, 0, :] + a[:, :, :] + + def test_valid_slicing(self): + # These should raise no errors. + a = np.array([[[5]]]) + + a[::] + a[0:] + a[:2] + a[0:2] + a[::2] + a[1::2] + a[:2:2] + a[1:2:2] + + def test_non_integer_argument_errors(self): + a = np.array([[5]]) + + assert_raises(TypeError, np.reshape, a, (1., 1., -1)) + assert_raises(TypeError, np.reshape, a, (np.array(1.), -1)) + assert_raises(TypeError, np.take, a, [0], 1.) + assert_raises(TypeError, np.take, a, [0], np.float64(1.)) + + def test_non_integer_sequence_multiplication(self): + # NumPy scalar sequence multiply should not work with non-integers + def mult(a, b): + return a * b + + assert_raises(TypeError, mult, [1], np.float64(3)) + # following should be OK + mult([1], np.int_(3)) + + def test_reduce_axis_float_index(self): + d = np.zeros((3, 3, 3)) + assert_raises(TypeError, np.min, d, 0.5) + assert_raises(TypeError, np.min, d, (0.5, 1)) + assert_raises(TypeError, np.min, d, (1, 2.2)) + assert_raises(TypeError, np.min, d, (.2, 1.2)) + + +class TestBooleanIndexing: + # Using a boolean as integer argument/indexing is an error. + def test_bool_as_int_argument_errors(self): + a = np.array([[[1]]]) + + assert_raises(TypeError, np.reshape, a, (True, -1)) + assert_raises(TypeError, np.reshape, a, (np.bool(True), -1)) + # Note that operator.index(np.array(True)) does not work, a boolean + # array is thus also deprecated, but not with the same message: + assert_raises(TypeError, operator.index, np.array(True)) + assert_raises(TypeError, operator.index, np.True_) + assert_raises(TypeError, np.take, args=(a, [0], False)) + + def test_boolean_indexing_weirdness(self): + # Weird boolean indexing things + a = np.ones((2, 3, 4)) + assert a[False, True, ...].shape == (0, 2, 3, 4) + assert a[True, [0, 1], True, True, [1], [[2]]].shape == (1, 2) + assert_raises(IndexError, lambda: a[False, [0, 1], ...]) + + def test_boolean_indexing_fast_path(self): + # These used to either give the wrong error, or incorrectly give no + # error. + a = np.ones((3, 3)) + + # This used to incorrectly work (and give an array of shape (0,)) + idx1 = np.array([[False] * 9]) + assert_raises_regex(IndexError, + "boolean index did not match indexed array along axis 0; " + "size of axis is 3 but size of corresponding boolean axis is 1", + lambda: a[idx1]) + + # This used to incorrectly give a ValueError: operands could not be broadcast together + idx2 = np.array([[False] * 8 + [True]]) + assert_raises_regex(IndexError, + "boolean index did not match indexed array along axis 0; " + "size of axis is 3 but size of corresponding boolean axis is 1", + lambda: a[idx2]) + + # This is the same as it used to be. The above two should work like this. + idx3 = np.array([[False] * 10]) + assert_raises_regex(IndexError, + "boolean index did not match indexed array along axis 0; " + "size of axis is 3 but size of corresponding boolean axis is 1", + lambda: a[idx3]) + + # This used to give ValueError: non-broadcastable operand + a = np.ones((1, 1, 2)) + idx = np.array([[[True], [False]]]) + assert_raises_regex(IndexError, + "boolean index did not match indexed array along axis 1; " + "size of axis is 1 but size of corresponding boolean axis is 2", + lambda: a[idx]) + + +class TestArrayToIndexDeprecation: + """Creating an index from array not 0-D is an error. + + """ + def test_array_to_index_error(self): + # so no exception is expected. The raising is effectively tested above. + a = np.array([[[1]]]) + + assert_raises(TypeError, operator.index, np.array([1])) + assert_raises(TypeError, np.reshape, a, (a, -1)) + assert_raises(TypeError, np.take, a, [0], a) + + +class TestNonIntegerArrayLike: + """Tests that array_likes only valid if can safely cast to integer. + + For instance, lists give IndexError when they cannot be safely cast to + an integer. + + """ + def test_basic(self): + a = np.arange(10) + + assert_raises(IndexError, a.__getitem__, [0.5, 1.5]) + assert_raises(IndexError, a.__getitem__, (['1', '2'],)) + + # The following is valid + a.__getitem__([]) + + +class TestMultipleEllipsisError: + """An index can only have a single ellipsis. + + """ + def test_basic(self): + a = np.arange(10) + assert_raises(IndexError, lambda: a[..., ...]) + assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 2,)) + assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,)) + + +class TestCApiAccess: + def test_getitem(self): + subscript = functools.partial(array_indexing, 0) + + # 0-d arrays don't work: + assert_raises(IndexError, subscript, np.ones(()), 0) + # Out of bound values: + assert_raises(IndexError, subscript, np.ones(10), 11) + assert_raises(IndexError, subscript, np.ones(10), -11) + assert_raises(IndexError, subscript, np.ones((10, 10)), 11) + assert_raises(IndexError, subscript, np.ones((10, 10)), -11) + + a = np.arange(10) + assert_array_equal(a[4], subscript(a, 4)) + a = a.reshape(5, 2) + assert_array_equal(a[-4], subscript(a, -4)) + + def test_setitem(self): + assign = functools.partial(array_indexing, 1) + + # Deletion is impossible: + assert_raises(ValueError, assign, np.ones(10), 0) + # 0-d arrays don't work: + assert_raises(IndexError, assign, np.ones(()), 0, 0) + # Out of bound values: + assert_raises(IndexError, assign, np.ones(10), 11, 0) + assert_raises(IndexError, assign, np.ones(10), -11, 0) + assert_raises(IndexError, assign, np.ones((10, 10)), 11, 0) + assert_raises(IndexError, assign, np.ones((10, 10)), -11, 0) + + a = np.arange(10) + assign(a, 4, 10) + assert_(a[4] == 10) + + a = a.reshape(5, 2) + assign(a, 4, 10) + assert_array_equal(a[-1], [10, 10]) diff --git a/python/numpy/_core/tests/test_item_selection.py b/python/numpy/_core/tests/test_item_selection.py new file mode 100644 index 000000000..79fb82dde --- /dev/null +++ b/python/numpy/_core/tests/test_item_selection.py @@ -0,0 +1,167 @@ +import sys + +import pytest + +import numpy as np +from numpy.testing import HAS_REFCOUNT, assert_, assert_array_equal, assert_raises + + +class TestTake: + def test_simple(self): + a = [[1, 2], [3, 4]] + a_str = [[b'1', b'2'], [b'3', b'4']] + modes = ['raise', 'wrap', 'clip'] + indices = [-1, 4] + index_arrays = [np.empty(0, dtype=np.intp), + np.empty((), dtype=np.intp), + np.empty((1, 1), dtype=np.intp)] + real_indices = {'raise': {-1: 1, 4: IndexError}, + 'wrap': {-1: 1, 4: 0}, + 'clip': {-1: 0, 4: 1}} + # Currently all types but object, use the same function generation. + # So it should not be necessary to test all. However test also a non + # refcounted struct on top of object, which has a size that hits the + # default (non-specialized) path. + types = int, object, np.dtype([('', 'i2', 3)]) + for t in types: + # ta works, even if the array may be odd if buffer interface is used + ta = np.array(a if np.issubdtype(t, np.number) else a_str, dtype=t) + tresult = list(ta.T.copy()) + for index_array in index_arrays: + if index_array.size != 0: + tresult[0].shape = (2,) + index_array.shape + tresult[1].shape = (2,) + index_array.shape + for mode in modes: + for index in indices: + real_index = real_indices[mode][index] + if real_index is IndexError and index_array.size != 0: + index_array.put(0, index) + assert_raises(IndexError, ta.take, index_array, + mode=mode, axis=1) + elif index_array.size != 0: + index_array.put(0, index) + res = ta.take(index_array, mode=mode, axis=1) + assert_array_equal(res, tresult[real_index]) + else: + res = ta.take(index_array, mode=mode, axis=1) + assert_(res.shape == (2,) + index_array.shape) + + def test_refcounting(self): + objects = [object() for i in range(10)] + if HAS_REFCOUNT: + orig_rcs = [sys.getrefcount(o) for o in objects] + for mode in ('raise', 'clip', 'wrap'): + a = np.array(objects) + b = np.array([2, 2, 4, 5, 3, 5]) + a.take(b, out=a[:6], mode=mode) + del a + if HAS_REFCOUNT: + assert_(all(sys.getrefcount(o) == rc + 1 + for o, rc in zip(objects, orig_rcs))) + # not contiguous, example: + a = np.array(objects * 2)[::2] + a.take(b, out=a[:6], mode=mode) + del a + if HAS_REFCOUNT: + assert_(all(sys.getrefcount(o) == rc + 1 + for o, rc in zip(objects, orig_rcs))) + + def test_unicode_mode(self): + d = np.arange(10) + k = b'\xc3\xa4'.decode("UTF8") + assert_raises(ValueError, d.take, 5, mode=k) + + def test_empty_partition(self): + # In reference to github issue #6530 + a_original = np.array([0, 2, 4, 6, 8, 10]) + a = a_original.copy() + + # An empty partition should be a successful no-op + a.partition(np.array([], dtype=np.int16)) + + assert_array_equal(a, a_original) + + def test_empty_argpartition(self): + # In reference to github issue #6530 + a = np.array([0, 2, 4, 6, 8, 10]) + a = a.argpartition(np.array([], dtype=np.int16)) + + b = np.array([0, 1, 2, 3, 4, 5]) + assert_array_equal(a, b) + + +class TestPutMask: + @pytest.mark.parametrize("dtype", list(np.typecodes["All"]) + ["i,O"]) + def test_simple(self, dtype): + if dtype.lower() == "m": + dtype += "8[ns]" + + # putmask is weird and doesn't care about value length (even shorter) + vals = np.arange(1001).astype(dtype=dtype) + + mask = np.random.randint(2, size=1000).astype(bool) + # Use vals.dtype in case of flexible dtype (i.e. string) + arr = np.zeros(1000, dtype=vals.dtype) + zeros = arr.copy() + + np.putmask(arr, mask, vals) + assert_array_equal(arr[mask], vals[:len(mask)][mask]) + assert_array_equal(arr[~mask], zeros[~mask]) + + @pytest.mark.parametrize("dtype", list(np.typecodes["All"])[1:] + ["i,O"]) + @pytest.mark.parametrize("mode", ["raise", "wrap", "clip"]) + def test_empty(self, dtype, mode): + arr = np.zeros(1000, dtype=dtype) + arr_copy = arr.copy() + mask = np.random.randint(2, size=1000).astype(bool) + + # Allowing empty values like this is weird... + np.put(arr, mask, []) + assert_array_equal(arr, arr_copy) + + +class TestPut: + @pytest.mark.parametrize("dtype", list(np.typecodes["All"])[1:] + ["i,O"]) + @pytest.mark.parametrize("mode", ["raise", "wrap", "clip"]) + def test_simple(self, dtype, mode): + if dtype.lower() == "m": + dtype += "8[ns]" + + # put is weird and doesn't care about value length (even shorter) + vals = np.arange(1001).astype(dtype=dtype) + + # Use vals.dtype in case of flexible dtype (i.e. string) + arr = np.zeros(1000, dtype=vals.dtype) + zeros = arr.copy() + + if mode == "clip": + # Special because 0 and -1 value are "reserved" for clip test + indx = np.random.permutation(len(arr) - 2)[:-500] + 1 + + indx[-1] = 0 + indx[-2] = len(arr) - 1 + indx_put = indx.copy() + indx_put[-1] = -1389 + indx_put[-2] = 1321 + else: + # Avoid duplicates (for simplicity) and fill half only + indx = np.random.permutation(len(arr) - 3)[:-500] + indx_put = indx + if mode == "wrap": + indx_put = indx_put + len(arr) + + np.put(arr, indx_put, vals, mode=mode) + assert_array_equal(arr[indx], vals[:len(indx)]) + untouched = np.ones(len(arr), dtype=bool) + untouched[indx] = False + assert_array_equal(arr[untouched], zeros[:untouched.sum()]) + + @pytest.mark.parametrize("dtype", list(np.typecodes["All"])[1:] + ["i,O"]) + @pytest.mark.parametrize("mode", ["raise", "wrap", "clip"]) + def test_empty(self, dtype, mode): + arr = np.zeros(1000, dtype=dtype) + arr_copy = arr.copy() + + # Allowing empty values like this is weird... + np.put(arr, [1, 2, 3], []) + assert_array_equal(arr, arr_copy) diff --git a/python/numpy/_core/tests/test_limited_api.py b/python/numpy/_core/tests/test_limited_api.py new file mode 100644 index 000000000..984210e53 --- /dev/null +++ b/python/numpy/_core/tests/test_limited_api.py @@ -0,0 +1,102 @@ +import os +import subprocess +import sys +import sysconfig + +import pytest + +from numpy.testing import IS_EDITABLE, IS_PYPY, IS_WASM, NOGIL_BUILD + +# This import is copied from random.tests.test_extending +try: + import cython + from Cython.Compiler.Version import version as cython_version +except ImportError: + cython = None +else: + from numpy._utils import _pep440 + + # Note: keep in sync with the one in pyproject.toml + required_version = "3.0.6" + if _pep440.parse(cython_version) < _pep440.Version(required_version): + # too old or wrong cython, skip the test + cython = None + +pytestmark = pytest.mark.skipif(cython is None, reason="requires cython") + + +if IS_EDITABLE: + pytest.skip( + "Editable install doesn't support tests with a compile step", + allow_module_level=True + ) + + +@pytest.fixture(scope='module') +def install_temp(tmpdir_factory): + # Based in part on test_cython from random.tests.test_extending + if IS_WASM: + pytest.skip("No subprocess") + + srcdir = os.path.join(os.path.dirname(__file__), 'examples', 'limited_api') + build_dir = tmpdir_factory.mktemp("limited_api") / "build" + os.makedirs(build_dir, exist_ok=True) + # Ensure we use the correct Python interpreter even when `meson` is + # installed in a different Python environment (see gh-24956) + native_file = str(build_dir / 'interpreter-native-file.ini') + with open(native_file, 'w') as f: + f.write("[binaries]\n") + f.write(f"python = '{sys.executable}'\n") + f.write(f"python3 = '{sys.executable}'") + + try: + subprocess.check_call(["meson", "--version"]) + except FileNotFoundError: + pytest.skip("No usable 'meson' found") + if sysconfig.get_platform() == "win-arm64": + pytest.skip("Meson unable to find MSVC linker on win-arm64") + if sys.platform == "win32": + subprocess.check_call(["meson", "setup", + "--werror", + "--buildtype=release", + "--vsenv", "--native-file", native_file, + str(srcdir)], + cwd=build_dir, + ) + else: + subprocess.check_call(["meson", "setup", "--werror", + "--native-file", native_file, str(srcdir)], + cwd=build_dir + ) + try: + subprocess.check_call( + ["meson", "compile", "-vv"], cwd=build_dir) + except subprocess.CalledProcessError as p: + print(f"{p.stdout=}") + print(f"{p.stderr=}") + raise + + sys.path.append(str(build_dir)) + + +@pytest.mark.skipif(IS_WASM, reason="Can't start subprocess") +@pytest.mark.xfail( + sysconfig.get_config_var("Py_DEBUG"), + reason=( + "Py_LIMITED_API is incompatible with Py_DEBUG, Py_TRACE_REFS, " + "and Py_REF_DEBUG" + ), +) +@pytest.mark.xfail( + NOGIL_BUILD, + reason="Py_GIL_DISABLED builds do not currently support the limited API", +) +@pytest.mark.skipif(IS_PYPY, reason="no support for limited API in PyPy") +def test_limited_api(install_temp): + """Test building a third-party C extension with the limited API + and building a cython extension with the limited API + """ + + import limited_api1 # Earliest (3.6) # noqa: F401 + import limited_api2 # cython # noqa: F401 + import limited_api_latest # Latest version (current Python) # noqa: F401 diff --git a/python/numpy/_core/tests/test_longdouble.py b/python/numpy/_core/tests/test_longdouble.py new file mode 100644 index 000000000..f7edd9774 --- /dev/null +++ b/python/numpy/_core/tests/test_longdouble.py @@ -0,0 +1,369 @@ +import platform +import warnings + +import pytest + +import numpy as np +from numpy._core.tests._locales import CommaDecimalPointLocale +from numpy.testing import ( + IS_MUSL, + assert_, + assert_array_equal, + assert_equal, + assert_raises, + temppath, +) + +LD_INFO = np.finfo(np.longdouble) +longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps) + + +_o = 1 + LD_INFO.eps +string_to_longdouble_inaccurate = (_o != np.longdouble(str(_o))) +del _o + + +def test_scalar_extraction(): + """Confirm that extracting a value doesn't convert to python float""" + o = 1 + LD_INFO.eps + a = np.array([o, o, o]) + assert_equal(a[1], o) + + +# Conversions string -> long double + +# 0.1 not exactly representable in base 2 floating point. +repr_precision = len(repr(np.longdouble(0.1))) +# +2 from macro block starting around line 842 in scalartypes.c.src. + + +@pytest.mark.skipif(IS_MUSL, + reason="test flaky on musllinux") +@pytest.mark.skipif(LD_INFO.precision + 2 >= repr_precision, + reason="repr precision not enough to show eps") +def test_str_roundtrip(): + # We will only see eps in repr if within printing precision. + o = 1 + LD_INFO.eps + assert_equal(np.longdouble(str(o)), o, f"str was {str(o)}") + + +@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") +def test_str_roundtrip_bytes(): + o = 1 + LD_INFO.eps + assert_equal(np.longdouble(str(o).encode("ascii")), o) + + +@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") +@pytest.mark.parametrize("strtype", (np.str_, np.bytes_, str, bytes)) +def test_array_and_stringlike_roundtrip(strtype): + """ + Test that string representations of long-double roundtrip both + for array casting and scalar coercion, see also gh-15608. + """ + o = 1 + LD_INFO.eps + + if strtype in (np.bytes_, bytes): + o_str = strtype(str(o).encode("ascii")) + else: + o_str = strtype(str(o)) + + # Test that `o` is correctly coerced from the string-like + assert o == np.longdouble(o_str) + + # Test that arrays also roundtrip correctly: + o_strarr = np.asarray([o] * 3, dtype=strtype) + assert (o == o_strarr.astype(np.longdouble)).all() + + # And array coercion and casting to string give the same as scalar repr: + assert (o_strarr == o_str).all() + assert (np.asarray([o] * 3).astype(strtype) == o_str).all() + + +def test_bogus_string(): + assert_raises(ValueError, np.longdouble, "spam") + assert_raises(ValueError, np.longdouble, "1.0 flub") + + +@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l") +def test_fromstring(): + o = 1 + LD_INFO.eps + s = (" " + str(o)) * 5 + a = np.array([o] * 5) + assert_equal(np.fromstring(s, sep=" ", dtype=np.longdouble), a, + err_msg=f"reading '{s}'") + + +def test_fromstring_complex(): + for ctype in ["complex", "cdouble"]: + # Check spacing between separator + assert_equal(np.fromstring("1, 2 , 3 ,4", sep=",", dtype=ctype), + np.array([1., 2., 3., 4.])) + # Real component not specified + assert_equal(np.fromstring("1j, -2j, 3j, 4e1j", sep=",", dtype=ctype), + np.array([1.j, -2.j, 3.j, 40.j])) + # Both components specified + assert_equal(np.fromstring("1+1j,2-2j, -3+3j, -4e1+4j", sep=",", dtype=ctype), + np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j])) + # Spaces at wrong places + with assert_raises(ValueError): + np.fromstring("1+2 j,3", dtype=ctype, sep=",") + with assert_raises(ValueError): + np.fromstring("1+ 2j,3", dtype=ctype, sep=",") + with assert_raises(ValueError): + np.fromstring("1 +2j,3", dtype=ctype, sep=",") + with assert_raises(ValueError): + np.fromstring("1+j", dtype=ctype, sep=",") + with assert_raises(ValueError): + np.fromstring("1+", dtype=ctype, sep=",") + with assert_raises(ValueError): + np.fromstring("1j+1", dtype=ctype, sep=",") + + +def test_fromstring_bogus(): + with assert_raises(ValueError): + np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" ") + + +def test_fromstring_empty(): + with assert_raises(ValueError): + np.fromstring("xxxxx", sep="x") + + +def test_fromstring_missing(): + with assert_raises(ValueError): + np.fromstring("1xx3x4x5x6", sep="x") + + +class TestFileBased: + + ldbl = 1 + LD_INFO.eps + tgt = np.array([ldbl] * 5) + out = ''.join([str(t) + '\n' for t in tgt]) + + def test_fromfile_bogus(self): + with temppath() as path: + with open(path, 'w') as f: + f.write("1. 2. 3. flop 4.\n") + + with assert_raises(ValueError): + np.fromfile(path, dtype=float, sep=" ") + + def test_fromfile_complex(self): + for ctype in ["complex", "cdouble"]: + # Check spacing between separator and only real component specified + with temppath() as path: + with open(path, 'w') as f: + f.write("1, 2 , 3 ,4\n") + + res = np.fromfile(path, dtype=ctype, sep=",") + assert_equal(res, np.array([1., 2., 3., 4.])) + + # Real component not specified + with temppath() as path: + with open(path, 'w') as f: + f.write("1j, -2j, 3j, 4e1j\n") + + res = np.fromfile(path, dtype=ctype, sep=",") + assert_equal(res, np.array([1.j, -2.j, 3.j, 40.j])) + + # Both components specified + with temppath() as path: + with open(path, 'w') as f: + f.write("1+1j,2-2j, -3+3j, -4e1+4j\n") + + res = np.fromfile(path, dtype=ctype, sep=",") + assert_equal(res, np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j])) + + # Spaces at wrong places + with temppath() as path: + with open(path, 'w') as f: + f.write("1+2 j,3\n") + + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") + + # Spaces at wrong places + with temppath() as path: + with open(path, 'w') as f: + f.write("1+ 2j,3\n") + + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") + + # Spaces at wrong places + with temppath() as path: + with open(path, 'w') as f: + f.write("1 +2j,3\n") + + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") + + # Wrong sep + with temppath() as path: + with open(path, 'w') as f: + f.write("1+j\n") + + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") + + # Wrong sep + with temppath() as path: + with open(path, 'w') as f: + f.write("1+\n") + + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") + + # Wrong sep + with temppath() as path: + with open(path, 'w') as f: + f.write("1j+1\n") + + with assert_raises(ValueError): + np.fromfile(path, dtype=ctype, sep=",") + + @pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") + def test_fromfile(self): + with temppath() as path: + with open(path, 'w') as f: + f.write(self.out) + res = np.fromfile(path, dtype=np.longdouble, sep="\n") + assert_equal(res, self.tgt) + + @pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") + def test_genfromtxt(self): + with temppath() as path: + with open(path, 'w') as f: + f.write(self.out) + res = np.genfromtxt(path, dtype=np.longdouble) + assert_equal(res, self.tgt) + + @pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") + def test_loadtxt(self): + with temppath() as path: + with open(path, 'w') as f: + f.write(self.out) + res = np.loadtxt(path, dtype=np.longdouble) + assert_equal(res, self.tgt) + + @pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") + def test_tofile_roundtrip(self): + with temppath() as path: + self.tgt.tofile(path, sep=" ") + res = np.fromfile(path, dtype=np.longdouble, sep=" ") + assert_equal(res, self.tgt) + + +# Conversions long double -> string + + +def test_str_exact(): + o = 1 + LD_INFO.eps + assert_(str(o) != '1') + + +@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376") +@pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") +def test_format(): + assert_(f"{1 + LD_INFO.eps:.40g}" != '1') + + +@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376") +@pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") +def test_percent(): + o = 1 + LD_INFO.eps + assert_(f"{o:.40g}" != '1') + + +@pytest.mark.skipif(longdouble_longer_than_double, + reason="array repr problem") +@pytest.mark.skipif(string_to_longdouble_inaccurate, + reason="Need strtold_l") +def test_array_repr(): + o = 1 + LD_INFO.eps + a = np.array([o]) + b = np.array([1], dtype=np.longdouble) + if not np.all(a != b): + raise ValueError("precision loss creating arrays") + assert_(repr(a) != repr(b)) + +# +# Locale tests: scalar types formatting should be independent of the locale +# + +class TestCommaDecimalPointLocale(CommaDecimalPointLocale): + + def test_str_roundtrip_foreign(self): + o = 1.5 + assert_equal(o, np.longdouble(str(o))) + + def test_fromstring_foreign_repr(self): + f = 1.234 + a = np.fromstring(repr(f), dtype=float, sep=" ") + assert_equal(a[0], f) + + def test_fromstring_foreign(self): + s = "1.234" + a = np.fromstring(s, dtype=np.longdouble, sep=" ") + assert_equal(a[0], np.longdouble(s)) + + def test_fromstring_foreign_sep(self): + a = np.array([1, 2, 3, 4]) + b = np.fromstring("1,2,3,4,", dtype=np.longdouble, sep=",") + assert_array_equal(a, b) + + def test_fromstring_foreign_value(self): + with assert_raises(ValueError): + np.fromstring("1,234", dtype=np.longdouble, sep=" ") + + +@pytest.mark.parametrize("int_val", [ + # cases discussed in gh-10723 + # and gh-9968 + 2 ** 1024, 0]) +def test_longdouble_from_int(int_val): + # for issue gh-9968 + str_val = str(int_val) + # we'll expect a RuntimeWarning on platforms + # with np.longdouble equivalent to np.double + # for large integer input + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + # can be inf==inf on some platforms + assert np.longdouble(int_val) == np.longdouble(str_val) + # we can't directly compare the int and + # max longdouble value on all platforms + if np.allclose(np.finfo(np.longdouble).max, + np.finfo(np.double).max) and w: + assert w[0].category is RuntimeWarning + +@pytest.mark.parametrize("bool_val", [ + True, False]) +def test_longdouble_from_bool(bool_val): + assert np.longdouble(bool_val) == np.longdouble(int(bool_val)) + + +@pytest.mark.skipif( + not (IS_MUSL and platform.machine() == "x86_64"), + reason="only need to run on musllinux_x86_64" +) +def test_musllinux_x86_64_signature(): + # this test may fail if you're emulating musllinux_x86_64 on a different + # architecture, but should pass natively. + known_sigs = [b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf'] + sig = (np.longdouble(-1.0) / np.longdouble(10.0)) + sig = sig.view(sig.dtype.newbyteorder('<')).tobytes()[:10] + assert sig in known_sigs + + +def test_eps_positive(): + # np.finfo('g').eps should be positive on all platforms. If this isn't true + # then something may have gone wrong with the MachArLike, e.g. if + # np._core.getlimits._discovered_machar didn't work properly + assert np.finfo(np.longdouble).eps > 0. diff --git a/python/numpy/_core/tests/test_machar.py b/python/numpy/_core/tests/test_machar.py new file mode 100644 index 000000000..2d772dd51 --- /dev/null +++ b/python/numpy/_core/tests/test_machar.py @@ -0,0 +1,30 @@ +""" +Test machar. Given recent changes to hardcode type data, we might want to get +rid of both MachAr and this test at some point. + +""" +import numpy._core.numerictypes as ntypes +from numpy import array, errstate +from numpy._core._machar import MachAr + + +class TestMachAr: + def _run_machar_highprec(self): + # Instantiate MachAr instance with high enough precision to cause + # underflow + try: + hiprec = ntypes.float96 + MachAr(lambda v: array(v, hiprec)) + except AttributeError: + # Fixme, this needs to raise a 'skip' exception. + "Skipping test: no ntypes.float96 available on this platform." + + def test_underlow(self): + # Regression test for #759: + # instantiating MachAr for dtype = np.float96 raises spurious warning. + with errstate(all='raise'): + try: + self._run_machar_highprec() + except FloatingPointError as e: + msg = f"Caught {e} exception, should not have been raised." + raise AssertionError(msg) diff --git a/python/numpy/_core/tests/test_mem_overlap.py b/python/numpy/_core/tests/test_mem_overlap.py new file mode 100644 index 000000000..d1735670a --- /dev/null +++ b/python/numpy/_core/tests/test_mem_overlap.py @@ -0,0 +1,930 @@ +import itertools + +import pytest +from numpy._core._multiarray_tests import internal_overlap, solve_diophantine + +import numpy as np +from numpy._core import _umath_tests +from numpy.lib.stride_tricks import as_strided +from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises + +ndims = 2 +size = 10 +shape = tuple([size] * ndims) + +MAY_SHARE_BOUNDS = 0 +MAY_SHARE_EXACT = -1 + + +def _indices_for_nelems(nelems): + """Returns slices of length nelems, from start onwards, in direction sign.""" + + if nelems == 0: + return [size // 2] # int index + + res = [] + for step in (1, 2): + for sign in (-1, 1): + start = size // 2 - nelems * step * sign // 2 + stop = start + nelems * step * sign + res.append(slice(start, stop, step * sign)) + + return res + + +def _indices_for_axis(): + """Returns (src, dst) pairs of indices.""" + + res = [] + for nelems in (0, 2, 3): + ind = _indices_for_nelems(nelems) + res.extend(itertools.product(ind, ind)) # all assignments of size "nelems" + + return res + + +def _indices(ndims): + """Returns ((axis0_src, axis0_dst), (axis1_src, axis1_dst), ... ) index pairs.""" + + ind = _indices_for_axis() + return itertools.product(ind, repeat=ndims) + + +def _check_assignment(srcidx, dstidx): + """Check assignment arr[dstidx] = arr[srcidx] works.""" + + arr = np.arange(np.prod(shape)).reshape(shape) + + cpy = arr.copy() + + cpy[dstidx] = arr[srcidx] + arr[dstidx] = arr[srcidx] + + assert_(np.all(arr == cpy), + f'assigning arr[{dstidx}] = arr[{srcidx}]') + + +def test_overlapping_assignments(): + # Test automatically generated assignments which overlap in memory. + + inds = _indices(ndims) + + for ind in inds: + srcidx = tuple(a[0] for a in ind) + dstidx = tuple(a[1] for a in ind) + + _check_assignment(srcidx, dstidx) + + +@pytest.mark.slow +def test_diophantine_fuzz(): + # Fuzz test the diophantine solver + rng = np.random.RandomState(1234) + + max_int = np.iinfo(np.intp).max + + for ndim in range(10): + feasible_count = 0 + infeasible_count = 0 + + min_count = 500 // (ndim + 1) + + while min(feasible_count, infeasible_count) < min_count: + # Ensure big and small integer problems + A_max = 1 + rng.randint(0, 11, dtype=np.intp)**6 + U_max = rng.randint(0, 11, dtype=np.intp)**6 + + A_max = min(max_int, A_max) + U_max = min(max_int - 1, U_max) + + A = tuple(int(rng.randint(1, A_max + 1, dtype=np.intp)) + for j in range(ndim)) + U = tuple(int(rng.randint(0, U_max + 2, dtype=np.intp)) + for j in range(ndim)) + + b_ub = min(max_int - 2, sum(a * ub for a, ub in zip(A, U))) + b = int(rng.randint(-1, b_ub + 2, dtype=np.intp)) + + if ndim == 0 and feasible_count < min_count: + b = 0 + + X = solve_diophantine(A, U, b) + + if X is None: + # Check the simplified decision problem agrees + X_simplified = solve_diophantine(A, U, b, simplify=1) + assert_(X_simplified is None, (A, U, b, X_simplified)) + + # Check no solution exists (provided the problem is + # small enough so that brute force checking doesn't + # take too long) + ranges = tuple(range(0, a * ub + 1, a) for a, ub in zip(A, U)) + + size = 1 + for r in ranges: + size *= len(r) + if size < 100000: + assert_(not any(sum(w) == b for w in itertools.product(*ranges))) + infeasible_count += 1 + else: + # Check the simplified decision problem agrees + X_simplified = solve_diophantine(A, U, b, simplify=1) + assert_(X_simplified is not None, (A, U, b, X_simplified)) + + # Check validity + assert_(sum(a * x for a, x in zip(A, X)) == b) + assert_(all(0 <= x <= ub for x, ub in zip(X, U))) + feasible_count += 1 + + +def test_diophantine_overflow(): + # Smoke test integer overflow detection + max_intp = np.iinfo(np.intp).max + max_int64 = np.iinfo(np.int64).max + + if max_int64 <= max_intp: + # Check that the algorithm works internally in 128-bit; + # solving this problem requires large intermediate numbers + A = (max_int64 // 2, max_int64 // 2 - 10) + U = (max_int64 // 2, max_int64 // 2 - 10) + b = 2 * (max_int64 // 2) - 10 + + assert_equal(solve_diophantine(A, U, b), (1, 1)) + + +def check_may_share_memory_exact(a, b): + got = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT) + + assert_equal(np.may_share_memory(a, b), + np.may_share_memory(a, b, max_work=MAY_SHARE_BOUNDS)) + + a.fill(0) + b.fill(0) + a.fill(1) + exact = b.any() + + err_msg = "" + if got != exact: + err_msg = " " + "\n ".join([ + f"base_a - base_b = {a.__array_interface__['data'][0] - b.__array_interface__['data'][0]!r}", + f"shape_a = {a.shape!r}", + f"shape_b = {b.shape!r}", + f"strides_a = {a.strides!r}", + f"strides_b = {b.strides!r}", + f"size_a = {a.size!r}", + f"size_b = {b.size!r}" + ]) + + assert_equal(got, exact, err_msg=err_msg) + + +def test_may_share_memory_manual(): + # Manual test cases for may_share_memory + + # Base arrays + xs0 = [ + np.zeros([13, 21, 23, 22], dtype=np.int8), + np.zeros([13, 21, 23 * 2, 22], dtype=np.int8)[:, :, ::2, :] + ] + + # Generate all negative stride combinations + xs = [] + for x in xs0: + for ss in itertools.product(*(([slice(None), slice(None, None, -1)],) * 4)): + xp = x[ss] + xs.append(xp) + + for x in xs: + # The default is a simple extent check + assert_(np.may_share_memory(x[:, 0, :], x[:, 1, :])) + assert_(np.may_share_memory(x[:, 0, :], x[:, 1, :], max_work=None)) + + # Exact checks + check_may_share_memory_exact(x[:, 0, :], x[:, 1, :]) + check_may_share_memory_exact(x[:, ::7], x[:, 3::3]) + + try: + xp = x.ravel() + if xp.flags.owndata: + continue + xp = xp.view(np.int16) + except ValueError: + continue + + # 0-size arrays cannot overlap + check_may_share_memory_exact(x.ravel()[6:6], + xp.reshape(13, 21, 23, 11)[:, ::7]) + + # Test itemsize is dealt with + check_may_share_memory_exact(x[:, ::7], + xp.reshape(13, 21, 23, 11)) + check_may_share_memory_exact(x[:, ::7], + xp.reshape(13, 21, 23, 11)[:, 3::3]) + check_may_share_memory_exact(x.ravel()[6:7], + xp.reshape(13, 21, 23, 11)[:, ::7]) + + # Check unit size + x = np.zeros([1], dtype=np.int8) + check_may_share_memory_exact(x, x) + check_may_share_memory_exact(x, x.copy()) + + +def iter_random_view_pairs(x, same_steps=True, equal_size=False): + rng = np.random.RandomState(1234) + + if equal_size and same_steps: + raise ValueError + + def random_slice(n, step): + start = rng.randint(0, n + 1, dtype=np.intp) + stop = rng.randint(start, n + 1, dtype=np.intp) + if rng.randint(0, 2, dtype=np.intp) == 0: + stop, start = start, stop + step *= -1 + return slice(start, stop, step) + + def random_slice_fixed_size(n, step, size): + start = rng.randint(0, n + 1 - size * step) + stop = start + (size - 1) * step + 1 + if rng.randint(0, 2) == 0: + stop, start = start - 1, stop - 1 + if stop < 0: + stop = None + step *= -1 + return slice(start, stop, step) + + # First a few regular views + yield x, x + for j in range(1, 7, 3): + yield x[j:], x[:-j] + yield x[..., j:], x[..., :-j] + + # An array with zero stride internal overlap + strides = list(x.strides) + strides[0] = 0 + xp = as_strided(x, shape=x.shape, strides=strides) + yield x, xp + yield xp, xp + + # An array with non-zero stride internal overlap + strides = list(x.strides) + if strides[0] > 1: + strides[0] = 1 + xp = as_strided(x, shape=x.shape, strides=strides) + yield x, xp + yield xp, xp + + # Then discontiguous views + while True: + steps = tuple(rng.randint(1, 11, dtype=np.intp) + if rng.randint(0, 5, dtype=np.intp) == 0 else 1 + for j in range(x.ndim)) + s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps)) + + t1 = np.arange(x.ndim) + rng.shuffle(t1) + + if equal_size: + t2 = t1 + else: + t2 = np.arange(x.ndim) + rng.shuffle(t2) + + a = x[s1] + + if equal_size: + if a.size == 0: + continue + + steps2 = tuple(rng.randint(1, max(2, p // (1 + pa))) + if rng.randint(0, 5) == 0 else 1 + for p, s, pa in zip(x.shape, s1, a.shape)) + s2 = tuple(random_slice_fixed_size(p, s, pa) + for p, s, pa in zip(x.shape, steps2, a.shape)) + elif same_steps: + steps2 = steps + else: + steps2 = tuple(rng.randint(1, 11, dtype=np.intp) + if rng.randint(0, 5, dtype=np.intp) == 0 else 1 + for j in range(x.ndim)) + + if not equal_size: + s2 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps2)) + + a = a.transpose(t1) + b = x[s2].transpose(t2) + + yield a, b + + +def check_may_share_memory_easy_fuzz(get_max_work, same_steps, min_count): + # Check that overlap problems with common strides are solved with + # little work. + x = np.zeros([17, 34, 71, 97], dtype=np.int16) + + feasible = 0 + infeasible = 0 + + pair_iter = iter_random_view_pairs(x, same_steps) + + while min(feasible, infeasible) < min_count: + a, b = next(pair_iter) + + bounds_overlap = np.may_share_memory(a, b) + may_share_answer = np.may_share_memory(a, b) + easy_answer = np.may_share_memory(a, b, max_work=get_max_work(a, b)) + exact_answer = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT) + + if easy_answer != exact_answer: + # assert_equal is slow... + assert_equal(easy_answer, exact_answer) + + if may_share_answer != bounds_overlap: + assert_equal(may_share_answer, bounds_overlap) + + if bounds_overlap: + if exact_answer: + feasible += 1 + else: + infeasible += 1 + + +@pytest.mark.slow +def test_may_share_memory_easy_fuzz(): + # Check that overlap problems with common strides are always + # solved with little work. + + check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: 1, + same_steps=True, + min_count=2000) + + +@pytest.mark.slow +def test_may_share_memory_harder_fuzz(): + # Overlap problems with not necessarily common strides take more + # work. + # + # The work bound below can't be reduced much. Harder problems can + # also exist but not be detected here, as the set of problems + # comes from RNG. + + check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: max(a.size, b.size) // 2, + same_steps=False, + min_count=2000) + + +def test_shares_memory_api(): + x = np.zeros([4, 5, 6], dtype=np.int8) + + assert_equal(np.shares_memory(x, x), True) + assert_equal(np.shares_memory(x, x.copy()), False) + + a = x[:, ::2, ::3] + b = x[:, ::3, ::2] + assert_equal(np.shares_memory(a, b), True) + assert_equal(np.shares_memory(a, b, max_work=None), True) + assert_raises( + np.exceptions.TooHardError, np.shares_memory, a, b, max_work=1 + ) + + +def test_may_share_memory_bad_max_work(): + x = np.zeros([1]) + assert_raises(OverflowError, np.may_share_memory, x, x, max_work=10**100) + assert_raises(OverflowError, np.shares_memory, x, x, max_work=10**100) + + +def test_internal_overlap_diophantine(): + def check(A, U, exists=None): + X = solve_diophantine(A, U, 0, require_ub_nontrivial=1) + + if exists is None: + exists = (X is not None) + + if X is not None: + assert_(sum(a * x for a, x in zip(A, X)) == sum(a * u // 2 for a, u in zip(A, U))) + assert_(all(0 <= x <= u for x, u in zip(X, U))) + assert_(any(x != u // 2 for x, u in zip(X, U))) + + if exists: + assert_(X is not None, repr(X)) + else: + assert_(X is None, repr(X)) + + # Smoke tests + check((3, 2), (2 * 2, 3 * 2), exists=True) + check((3 * 2, 2), (15 * 2, (3 - 1) * 2), exists=False) + + +def test_internal_overlap_slices(): + # Slicing an array never generates internal overlap + + x = np.zeros([17, 34, 71, 97], dtype=np.int16) + + rng = np.random.RandomState(1234) + + def random_slice(n, step): + start = rng.randint(0, n + 1, dtype=np.intp) + stop = rng.randint(start, n + 1, dtype=np.intp) + if rng.randint(0, 2, dtype=np.intp) == 0: + stop, start = start, stop + step *= -1 + return slice(start, stop, step) + + cases = 0 + min_count = 5000 + + while cases < min_count: + steps = tuple(rng.randint(1, 11, dtype=np.intp) + if rng.randint(0, 5, dtype=np.intp) == 0 else 1 + for j in range(x.ndim)) + t1 = np.arange(x.ndim) + rng.shuffle(t1) + s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps)) + a = x[s1].transpose(t1) + + assert_(not internal_overlap(a)) + cases += 1 + + +def check_internal_overlap(a, manual_expected=None): + got = internal_overlap(a) + + # Brute-force check + m = set() + ranges = tuple(range(n) for n in a.shape) + for v in itertools.product(*ranges): + offset = sum(s * w for s, w in zip(a.strides, v)) + if offset in m: + expected = True + break + else: + m.add(offset) + else: + expected = False + + # Compare + if got != expected: + assert_equal(got, expected, err_msg=repr((a.strides, a.shape))) + if manual_expected is not None and expected != manual_expected: + assert_equal(expected, manual_expected) + return got + + +def test_internal_overlap_manual(): + # Stride tricks can construct arrays with internal overlap + + # We don't care about memory bounds, the array is not + # read/write accessed + x = np.arange(1).astype(np.int8) + + # Check low-dimensional special cases + + check_internal_overlap(x, False) # 1-dim + check_internal_overlap(x.reshape([]), False) # 0-dim + + a = as_strided(x, strides=(3, 4), shape=(4, 4)) + check_internal_overlap(a, False) + + a = as_strided(x, strides=(3, 4), shape=(5, 4)) + check_internal_overlap(a, True) + + a = as_strided(x, strides=(0,), shape=(0,)) + check_internal_overlap(a, False) + + a = as_strided(x, strides=(0,), shape=(1,)) + check_internal_overlap(a, False) + + a = as_strided(x, strides=(0,), shape=(2,)) + check_internal_overlap(a, True) + + a = as_strided(x, strides=(0, -9993), shape=(87, 22)) + check_internal_overlap(a, True) + + a = as_strided(x, strides=(0, -9993), shape=(1, 22)) + check_internal_overlap(a, False) + + a = as_strided(x, strides=(0, -9993), shape=(0, 22)) + check_internal_overlap(a, False) + + +def test_internal_overlap_fuzz(): + # Fuzz check; the brute-force check is fairly slow + + x = np.arange(1).astype(np.int8) + + overlap = 0 + no_overlap = 0 + min_count = 100 + + rng = np.random.RandomState(1234) + + while min(overlap, no_overlap) < min_count: + ndim = rng.randint(1, 4, dtype=np.intp) + + strides = tuple(rng.randint(-1000, 1000, dtype=np.intp) + for j in range(ndim)) + shape = tuple(rng.randint(1, 30, dtype=np.intp) + for j in range(ndim)) + + a = as_strided(x, strides=strides, shape=shape) + result = check_internal_overlap(a) + + if result: + overlap += 1 + else: + no_overlap += 1 + + +def test_non_ndarray_inputs(): + # Regression check for gh-5604 + + class MyArray: + def __init__(self, data): + self.data = data + + @property + def __array_interface__(self): + return self.data.__array_interface__ + + class MyArray2: + def __init__(self, data): + self.data = data + + def __array__(self, dtype=None, copy=None): + return self.data + + for cls in [MyArray, MyArray2]: + x = np.arange(5) + + assert_(np.may_share_memory(cls(x[::2]), x[1::2])) + assert_(not np.shares_memory(cls(x[::2]), x[1::2])) + + assert_(np.shares_memory(cls(x[1::3]), x[::2])) + assert_(np.may_share_memory(cls(x[1::3]), x[::2])) + + +def view_element_first_byte(x): + """Construct an array viewing the first byte of each element of `x`""" + from numpy.lib._stride_tricks_impl import DummyArray + interface = dict(x.__array_interface__) + interface['typestr'] = '|b1' + interface['descr'] = [('', '|b1')] + return np.asarray(DummyArray(interface, x)) + + +def assert_copy_equivalent(operation, args, out, **kwargs): + """ + Check that operation(*args, out=out) produces results + equivalent to out[...] = operation(*args, out=out.copy()) + """ + + kwargs['out'] = out + kwargs2 = dict(kwargs) + kwargs2['out'] = out.copy() + + out_orig = out.copy() + out[...] = operation(*args, **kwargs2) + expected = out.copy() + out[...] = out_orig + + got = operation(*args, **kwargs).copy() + + if (got != expected).any(): + assert_equal(got, expected) + + +class TestUFunc: + """ + Test ufunc call memory overlap handling + """ + + def check_unary_fuzz(self, operation, get_out_axis_size, dtype=np.int16, + count=5000): + shapes = [7, 13, 8, 21, 29, 32] + + rng = np.random.RandomState(1234) + + for ndim in range(1, 6): + x = rng.randint(0, 2**16, size=shapes[:ndim]).astype(dtype) + + it = iter_random_view_pairs(x, same_steps=False, equal_size=True) + + min_count = count // (ndim + 1)**2 + + overlapping = 0 + while overlapping < min_count: + a, b = next(it) + + a_orig = a.copy() + b_orig = b.copy() + + if get_out_axis_size is None: + assert_copy_equivalent(operation, [a], out=b) + + if np.shares_memory(a, b): + overlapping += 1 + else: + for axis in itertools.chain(range(ndim), [None]): + a[...] = a_orig + b[...] = b_orig + + # Determine size for reduction axis (None if scalar) + outsize, scalarize = get_out_axis_size(a, b, axis) + if outsize == 'skip': + continue + + # Slice b to get an output array of the correct size + sl = [slice(None)] * ndim + if axis is None: + if outsize is None: + sl = [slice(0, 1)] + [0] * (ndim - 1) + else: + sl = [slice(0, outsize)] + [0] * (ndim - 1) + elif outsize is None: + k = b.shape[axis] // 2 + if ndim == 1: + sl[axis] = slice(k, k + 1) + else: + sl[axis] = k + else: + assert b.shape[axis] >= outsize + sl[axis] = slice(0, outsize) + b_out = b[tuple(sl)] + + if scalarize: + b_out = b_out.reshape([]) + + if np.shares_memory(a, b_out): + overlapping += 1 + + # Check result + assert_copy_equivalent(operation, [a], out=b_out, axis=axis) + + @pytest.mark.slow + def test_unary_ufunc_call_fuzz(self): + self.check_unary_fuzz(np.invert, None, np.int16) + + @pytest.mark.slow + def test_unary_ufunc_call_complex_fuzz(self): + # Complex typically has a smaller alignment than itemsize + self.check_unary_fuzz(np.negative, None, np.complex128, count=500) + + def test_binary_ufunc_accumulate_fuzz(self): + def get_out_axis_size(a, b, axis): + if axis is None: + if a.ndim == 1: + return a.size, False + else: + return 'skip', False # accumulate doesn't support this + else: + return a.shape[axis], False + + self.check_unary_fuzz(np.add.accumulate, get_out_axis_size, + dtype=np.int16, count=500) + + def test_binary_ufunc_reduce_fuzz(self): + def get_out_axis_size(a, b, axis): + return None, (axis is None or a.ndim == 1) + + self.check_unary_fuzz(np.add.reduce, get_out_axis_size, + dtype=np.int16, count=500) + + def test_binary_ufunc_reduceat_fuzz(self): + def get_out_axis_size(a, b, axis): + if axis is None: + if a.ndim == 1: + return a.size, False + else: + return 'skip', False # reduceat doesn't support this + else: + return a.shape[axis], False + + def do_reduceat(a, out, axis): + if axis is None: + size = len(a) + step = size // len(out) + else: + size = a.shape[axis] + step = a.shape[axis] // out.shape[axis] + idx = np.arange(0, size, step) + return np.add.reduceat(a, idx, out=out, axis=axis) + + self.check_unary_fuzz(do_reduceat, get_out_axis_size, + dtype=np.int16, count=500) + + def test_binary_ufunc_reduceat_manual(self): + def check(ufunc, a, ind, out): + c1 = ufunc.reduceat(a.copy(), ind.copy(), out=out.copy()) + c2 = ufunc.reduceat(a, ind, out=out) + assert_array_equal(c1, c2) + + # Exactly same input/output arrays + a = np.arange(10000, dtype=np.int16) + check(np.add, a, a[::-1].copy(), a) + + # Overlap with index + a = np.arange(10000, dtype=np.int16) + check(np.add, a, a[::-1], a) + + @pytest.mark.slow + def test_unary_gufunc_fuzz(self): + shapes = [7, 13, 8, 21, 29, 32] + gufunc = _umath_tests.euclidean_pdist + + rng = np.random.RandomState(1234) + + for ndim in range(2, 6): + x = rng.rand(*shapes[:ndim]) + + it = iter_random_view_pairs(x, same_steps=False, equal_size=True) + + min_count = 500 // (ndim + 1)**2 + + overlapping = 0 + while overlapping < min_count: + a, b = next(it) + + if min(a.shape[-2:]) < 2 or min(b.shape[-2:]) < 2 or a.shape[-1] < 2: + continue + + # Ensure the shapes are so that euclidean_pdist is happy + if b.shape[-1] > b.shape[-2]: + b = b[..., 0, :] + else: + b = b[..., :, 0] + + n = a.shape[-2] + p = n * (n - 1) // 2 + if p <= b.shape[-1] and p > 0: + b = b[..., :p] + else: + n = max(2, int(np.sqrt(b.shape[-1])) // 2) + p = n * (n - 1) // 2 + a = a[..., :n, :] + b = b[..., :p] + + # Call + if np.shares_memory(a, b): + overlapping += 1 + + with np.errstate(over='ignore', invalid='ignore'): + assert_copy_equivalent(gufunc, [a], out=b) + + def test_ufunc_at_manual(self): + def check(ufunc, a, ind, b=None): + a0 = a.copy() + if b is None: + ufunc.at(a0, ind.copy()) + c1 = a0.copy() + ufunc.at(a, ind) + c2 = a.copy() + else: + ufunc.at(a0, ind.copy(), b.copy()) + c1 = a0.copy() + ufunc.at(a, ind, b) + c2 = a.copy() + assert_array_equal(c1, c2) + + # Overlap with index + a = np.arange(10000, dtype=np.int16) + check(np.invert, a[::-1], a) + + # Overlap with second data array + a = np.arange(100, dtype=np.int16) + ind = np.arange(0, 100, 2, dtype=np.int16) + check(np.add, a, ind, a[25:75]) + + def test_unary_ufunc_1d_manual(self): + # Exercise ufunc fast-paths (that avoid creation of an `np.nditer`) + + def check(a, b): + a_orig = a.copy() + b_orig = b.copy() + + b0 = b.copy() + c1 = ufunc(a, out=b0) + c2 = ufunc(a, out=b) + assert_array_equal(c1, c2) + + # Trigger "fancy ufunc loop" code path + mask = view_element_first_byte(b).view(np.bool) + + a[...] = a_orig + b[...] = b_orig + c1 = ufunc(a, out=b.copy(), where=mask.copy()).copy() + + a[...] = a_orig + b[...] = b_orig + c2 = ufunc(a, out=b, where=mask.copy()).copy() + + # Also, mask overlapping with output + a[...] = a_orig + b[...] = b_orig + c3 = ufunc(a, out=b, where=mask).copy() + + assert_array_equal(c1, c2) + assert_array_equal(c1, c3) + + dtypes = [np.int8, np.int16, np.int32, np.int64, np.float32, + np.float64, np.complex64, np.complex128] + dtypes = [np.dtype(x) for x in dtypes] + + for dtype in dtypes: + if np.issubdtype(dtype, np.integer): + ufunc = np.invert + else: + ufunc = np.reciprocal + + n = 1000 + k = 10 + indices = [ + np.index_exp[:n], + np.index_exp[k:k + n], + np.index_exp[n - 1::-1], + np.index_exp[k + n - 1:k - 1:-1], + np.index_exp[:2 * n:2], + np.index_exp[k:k + 2 * n:2], + np.index_exp[2 * n - 1::-2], + np.index_exp[k + 2 * n - 1:k - 1:-2], + ] + + for xi, yi in itertools.product(indices, indices): + v = np.arange(1, 1 + n * 2 + k, dtype=dtype) + x = v[xi] + y = v[yi] + + with np.errstate(all='ignore'): + check(x, y) + + # Scalar cases + check(x[:1], y) + check(x[-1:], y) + check(x[:1].reshape([]), y) + check(x[-1:].reshape([]), y) + + def test_unary_ufunc_where_same(self): + # Check behavior at wheremask overlap + ufunc = np.invert + + def check(a, out, mask): + c1 = ufunc(a, out=out.copy(), where=mask.copy()) + c2 = ufunc(a, out=out, where=mask) + assert_array_equal(c1, c2) + + # Check behavior with same input and output arrays + x = np.arange(100).astype(np.bool) + check(x, x, x) + check(x, x.copy(), x) + check(x, x, x.copy()) + + @pytest.mark.slow + def test_binary_ufunc_1d_manual(self): + ufunc = np.add + + def check(a, b, c): + c0 = c.copy() + c1 = ufunc(a, b, out=c0) + c2 = ufunc(a, b, out=c) + assert_array_equal(c1, c2) + + for dtype in [np.int8, np.int16, np.int32, np.int64, + np.float32, np.float64, np.complex64, np.complex128]: + # Check different data dependency orders + + n = 1000 + k = 10 + + indices = [] + for p in [1, 2]: + indices.extend([ + np.index_exp[:p * n:p], + np.index_exp[k:k + p * n:p], + np.index_exp[p * n - 1::-p], + np.index_exp[k + p * n - 1:k - 1:-p], + ]) + + for x, y, z in itertools.product(indices, indices, indices): + v = np.arange(6 * n).astype(dtype) + x = v[x] + y = v[y] + z = v[z] + + check(x, y, z) + + # Scalar cases + check(x[:1], y, z) + check(x[-1:], y, z) + check(x[:1].reshape([]), y, z) + check(x[-1:].reshape([]), y, z) + check(x, y[:1], z) + check(x, y[-1:], z) + check(x, y[:1].reshape([]), z) + check(x, y[-1:].reshape([]), z) + + def test_inplace_op_simple_manual(self): + rng = np.random.RandomState(1234) + x = rng.rand(200, 200) # bigger than bufsize + + x += x.T + assert_array_equal(x - x.T, 0) diff --git a/python/numpy/_core/tests/test_mem_policy.py b/python/numpy/_core/tests/test_mem_policy.py new file mode 100644 index 000000000..b9f971e73 --- /dev/null +++ b/python/numpy/_core/tests/test_mem_policy.py @@ -0,0 +1,452 @@ +import asyncio +import gc +import os +import sys +import sysconfig +import threading + +import pytest + +import numpy as np +from numpy._core.multiarray import get_handler_name +from numpy.testing import IS_EDITABLE, IS_WASM, assert_warns, extbuild + + +@pytest.fixture +def get_module(tmp_path): + """ Add a memory policy that returns a false pointer 64 bytes into the + actual allocation, and fill the prefix with some text. Then check at each + memory manipulation that the prefix exists, to make sure all alloc/realloc/ + free/calloc go via the functions here. + """ + if sys.platform.startswith('cygwin'): + pytest.skip('link fails on cygwin') + if IS_WASM: + pytest.skip("Can't build module inside Wasm") + if IS_EDITABLE: + pytest.skip("Can't build module for editable install") + + functions = [ + ("get_default_policy", "METH_NOARGS", """ + Py_INCREF(PyDataMem_DefaultHandler); + return PyDataMem_DefaultHandler; + """), + ("set_secret_data_policy", "METH_NOARGS", """ + PyObject *secret_data = + PyCapsule_New(&secret_data_handler, "mem_handler", NULL); + if (secret_data == NULL) { + return NULL; + } + PyObject *old = PyDataMem_SetHandler(secret_data); + Py_DECREF(secret_data); + return old; + """), + ("set_wrong_capsule_name_data_policy", "METH_NOARGS", """ + PyObject *wrong_name_capsule = + PyCapsule_New(&secret_data_handler, "not_mem_handler", NULL); + if (wrong_name_capsule == NULL) { + return NULL; + } + PyObject *old = PyDataMem_SetHandler(wrong_name_capsule); + Py_DECREF(wrong_name_capsule); + return old; + """), + ("set_old_policy", "METH_O", """ + PyObject *old; + if (args != NULL && PyCapsule_CheckExact(args)) { + old = PyDataMem_SetHandler(args); + } + else { + old = PyDataMem_SetHandler(NULL); + } + return old; + """), + ("get_array", "METH_NOARGS", """ + char *buf = (char *)malloc(20); + npy_intp dims[1]; + dims[0] = 20; + PyArray_Descr *descr = PyArray_DescrNewFromType(NPY_UINT8); + return PyArray_NewFromDescr(&PyArray_Type, descr, 1, dims, NULL, + buf, NPY_ARRAY_WRITEABLE, NULL); + """), + ("set_own", "METH_O", """ + if (!PyArray_Check(args)) { + PyErr_SetString(PyExc_ValueError, + "need an ndarray"); + return NULL; + } + PyArray_ENABLEFLAGS((PyArrayObject*)args, NPY_ARRAY_OWNDATA); + // Maybe try this too? + // PyArray_BASE(PyArrayObject *)args) = NULL; + Py_RETURN_NONE; + """), + ("get_array_with_base", "METH_NOARGS", """ + char *buf = (char *)malloc(20); + npy_intp dims[1]; + dims[0] = 20; + PyArray_Descr *descr = PyArray_DescrNewFromType(NPY_UINT8); + PyObject *arr = PyArray_NewFromDescr(&PyArray_Type, descr, 1, dims, + NULL, buf, + NPY_ARRAY_WRITEABLE, NULL); + if (arr == NULL) return NULL; + PyObject *obj = PyCapsule_New(buf, "buf capsule", + (PyCapsule_Destructor)&warn_on_free); + if (obj == NULL) { + Py_DECREF(arr); + return NULL; + } + if (PyArray_SetBaseObject((PyArrayObject *)arr, obj) < 0) { + Py_DECREF(arr); + Py_DECREF(obj); + return NULL; + } + return arr; + + """), + ] + prologue = ''' + #define NPY_TARGET_VERSION NPY_1_22_API_VERSION + #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION + #include + /* + * This struct allows the dynamic configuration of the allocator funcs + * of the `secret_data_allocator`. It is provided here for + * demonstration purposes, as a valid `ctx` use-case scenario. + */ + typedef struct { + void *(*malloc)(size_t); + void *(*calloc)(size_t, size_t); + void *(*realloc)(void *, size_t); + void (*free)(void *); + } SecretDataAllocatorFuncs; + + NPY_NO_EXPORT void * + shift_alloc(void *ctx, size_t sz) { + SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx; + char *real = (char *)funcs->malloc(sz + 64); + if (real == NULL) { + return NULL; + } + snprintf(real, 64, "originally allocated %ld", (unsigned long)sz); + return (void *)(real + 64); + } + NPY_NO_EXPORT void * + shift_zero(void *ctx, size_t sz, size_t cnt) { + SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx; + char *real = (char *)funcs->calloc(sz + 64, cnt); + if (real == NULL) { + return NULL; + } + snprintf(real, 64, "originally allocated %ld via zero", + (unsigned long)sz); + return (void *)(real + 64); + } + NPY_NO_EXPORT void + shift_free(void *ctx, void * p, npy_uintp sz) { + SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx; + if (p == NULL) { + return ; + } + char *real = (char *)p - 64; + if (strncmp(real, "originally allocated", 20) != 0) { + fprintf(stdout, "uh-oh, unmatched shift_free, " + "no appropriate prefix\\n"); + /* Make C runtime crash by calling free on the wrong address */ + funcs->free((char *)p + 10); + /* funcs->free(real); */ + } + else { + npy_uintp i = (npy_uintp)atoi(real +20); + if (i != sz) { + fprintf(stderr, "uh-oh, unmatched shift_free" + "(ptr, %ld) but allocated %ld\\n", sz, i); + /* This happens in some places, only print */ + funcs->free(real); + } + else { + funcs->free(real); + } + } + } + NPY_NO_EXPORT void * + shift_realloc(void *ctx, void * p, npy_uintp sz) { + SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx; + if (p != NULL) { + char *real = (char *)p - 64; + if (strncmp(real, "originally allocated", 20) != 0) { + fprintf(stdout, "uh-oh, unmatched shift_realloc\\n"); + return realloc(p, sz); + } + return (void *)((char *)funcs->realloc(real, sz + 64) + 64); + } + else { + char *real = (char *)funcs->realloc(p, sz + 64); + if (real == NULL) { + return NULL; + } + snprintf(real, 64, "originally allocated " + "%ld via realloc", (unsigned long)sz); + return (void *)(real + 64); + } + } + /* As an example, we use the standard {m|c|re}alloc/free funcs. */ + static SecretDataAllocatorFuncs secret_data_handler_ctx = { + malloc, + calloc, + realloc, + free + }; + static PyDataMem_Handler secret_data_handler = { + "secret_data_allocator", + 1, + { + &secret_data_handler_ctx, /* ctx */ + shift_alloc, /* malloc */ + shift_zero, /* calloc */ + shift_realloc, /* realloc */ + shift_free /* free */ + } + }; + void warn_on_free(void *capsule) { + PyErr_WarnEx(PyExc_UserWarning, "in warn_on_free", 1); + void * obj = PyCapsule_GetPointer(capsule, + PyCapsule_GetName(capsule)); + free(obj); + }; + ''' + more_init = "import_array();" + try: + import mem_policy + return mem_policy + except ImportError: + pass + # if it does not exist, build and load it + if sysconfig.get_platform() == "win-arm64": + pytest.skip("Meson unable to find MSVC linker on win-arm64") + return extbuild.build_and_import_extension('mem_policy', + functions, + prologue=prologue, + include_dirs=[np.get_include()], + build_dir=tmp_path, + more_init=more_init) + + +def test_set_policy(get_module): + + get_handler_name = np._core.multiarray.get_handler_name + get_handler_version = np._core.multiarray.get_handler_version + orig_policy_name = get_handler_name() + + a = np.arange(10).reshape((2, 5)) # a doesn't own its own data + assert get_handler_name(a) is None + assert get_handler_version(a) is None + assert get_handler_name(a.base) == orig_policy_name + assert get_handler_version(a.base) == 1 + + orig_policy = get_module.set_secret_data_policy() + + b = np.arange(10).reshape((2, 5)) # b doesn't own its own data + assert get_handler_name(b) is None + assert get_handler_version(b) is None + assert get_handler_name(b.base) == 'secret_data_allocator' + assert get_handler_version(b.base) == 1 + + if orig_policy_name == 'default_allocator': + get_module.set_old_policy(None) # tests PyDataMem_SetHandler(NULL) + assert get_handler_name() == 'default_allocator' + else: + get_module.set_old_policy(orig_policy) + assert get_handler_name() == orig_policy_name + + with pytest.raises(ValueError, + match="Capsule must be named 'mem_handler'"): + get_module.set_wrong_capsule_name_data_policy() + + +def test_default_policy_singleton(get_module): + get_handler_name = np._core.multiarray.get_handler_name + + # set the policy to default + orig_policy = get_module.set_old_policy(None) + + assert get_handler_name() == 'default_allocator' + + # re-set the policy to default + def_policy_1 = get_module.set_old_policy(None) + + assert get_handler_name() == 'default_allocator' + + # set the policy to original + def_policy_2 = get_module.set_old_policy(orig_policy) + + # since default policy is a singleton, + # these should be the same object + assert def_policy_1 is def_policy_2 is get_module.get_default_policy() + + +def test_policy_propagation(get_module): + # The memory policy goes hand-in-hand with flags.owndata + + class MyArr(np.ndarray): + pass + + get_handler_name = np._core.multiarray.get_handler_name + orig_policy_name = get_handler_name() + a = np.arange(10).view(MyArr).reshape((2, 5)) + assert get_handler_name(a) is None + assert a.flags.owndata is False + + assert get_handler_name(a.base) is None + assert a.base.flags.owndata is False + + assert get_handler_name(a.base.base) == orig_policy_name + assert a.base.base.flags.owndata is True + + +async def concurrent_context1(get_module, orig_policy_name, event): + if orig_policy_name == 'default_allocator': + get_module.set_secret_data_policy() + assert get_handler_name() == 'secret_data_allocator' + else: + get_module.set_old_policy(None) + assert get_handler_name() == 'default_allocator' + event.set() + + +async def concurrent_context2(get_module, orig_policy_name, event): + await event.wait() + # the policy is not affected by changes in parallel contexts + assert get_handler_name() == orig_policy_name + # change policy in the child context + if orig_policy_name == 'default_allocator': + get_module.set_secret_data_policy() + assert get_handler_name() == 'secret_data_allocator' + else: + get_module.set_old_policy(None) + assert get_handler_name() == 'default_allocator' + + +async def async_test_context_locality(get_module): + orig_policy_name = np._core.multiarray.get_handler_name() + + event = asyncio.Event() + # the child contexts inherit the parent policy + concurrent_task1 = asyncio.create_task( + concurrent_context1(get_module, orig_policy_name, event)) + concurrent_task2 = asyncio.create_task( + concurrent_context2(get_module, orig_policy_name, event)) + await concurrent_task1 + await concurrent_task2 + + # the parent context is not affected by child policy changes + assert np._core.multiarray.get_handler_name() == orig_policy_name + + +def test_context_locality(get_module): + if (sys.implementation.name == 'pypy' + and sys.pypy_version_info[:3] < (7, 3, 6)): + pytest.skip('no context-locality support in PyPy < 7.3.6') + asyncio.run(async_test_context_locality(get_module)) + + +def concurrent_thread1(get_module, event): + get_module.set_secret_data_policy() + assert np._core.multiarray.get_handler_name() == 'secret_data_allocator' + event.set() + + +def concurrent_thread2(get_module, event): + event.wait() + # the policy is not affected by changes in parallel threads + assert np._core.multiarray.get_handler_name() == 'default_allocator' + # change policy in the child thread + get_module.set_secret_data_policy() + + +def test_thread_locality(get_module): + orig_policy_name = np._core.multiarray.get_handler_name() + + event = threading.Event() + # the child threads do not inherit the parent policy + concurrent_task1 = threading.Thread(target=concurrent_thread1, + args=(get_module, event)) + concurrent_task2 = threading.Thread(target=concurrent_thread2, + args=(get_module, event)) + concurrent_task1.start() + concurrent_task2.start() + concurrent_task1.join() + concurrent_task2.join() + + # the parent thread is not affected by child policy changes + assert np._core.multiarray.get_handler_name() == orig_policy_name + + +@pytest.mark.skip(reason="too slow, see gh-23975") +def test_new_policy(get_module): + a = np.arange(10) + orig_policy_name = np._core.multiarray.get_handler_name(a) + + orig_policy = get_module.set_secret_data_policy() + + b = np.arange(10) + assert np._core.multiarray.get_handler_name(b) == 'secret_data_allocator' + + # test array manipulation. This is slow + if orig_policy_name == 'default_allocator': + # when the np._core.test tests recurse into this test, the + # policy will be set so this "if" will be false, preventing + # infinite recursion + # + # if needed, debug this by + # - running tests with -- -s (to not capture stdout/stderr + # - setting verbose=2 + # - setting extra_argv=['-vv'] here + assert np._core.test('full', verbose=1, extra_argv=[]) + # also try the ma tests, the pickling test is quite tricky + assert np.ma.test('full', verbose=1, extra_argv=[]) + + get_module.set_old_policy(orig_policy) + + c = np.arange(10) + assert np._core.multiarray.get_handler_name(c) == orig_policy_name + + +@pytest.mark.xfail(sys.implementation.name == "pypy", + reason=("bad interaction between getenv and " + "os.environ inside pytest")) +@pytest.mark.parametrize("policy", ["0", "1", None]) +def test_switch_owner(get_module, policy): + a = get_module.get_array() + assert np._core.multiarray.get_handler_name(a) is None + get_module.set_own(a) + + if policy is None: + # See what we expect to be set based on the env variable + policy = os.getenv("NUMPY_WARN_IF_NO_MEM_POLICY", "0") == "1" + oldval = None + else: + policy = policy == "1" + oldval = np._core._multiarray_umath._set_numpy_warn_if_no_mem_policy( + policy) + try: + # The policy should be NULL, so we have to assume we can call + # "free". A warning is given if the policy == "1" + if policy: + with assert_warns(RuntimeWarning) as w: + del a + gc.collect() + else: + del a + gc.collect() + + finally: + if oldval is not None: + np._core._multiarray_umath._set_numpy_warn_if_no_mem_policy(oldval) + + +def test_owner_is_base(get_module): + a = get_module.get_array_with_base() + with pytest.warns(UserWarning, match='warn_on_free'): + del a + gc.collect() + gc.collect() diff --git a/python/numpy/_core/tests/test_memmap.py b/python/numpy/_core/tests/test_memmap.py new file mode 100644 index 000000000..cbd825205 --- /dev/null +++ b/python/numpy/_core/tests/test_memmap.py @@ -0,0 +1,246 @@ +import mmap +import os +import sys +from pathlib import Path +from tempfile import NamedTemporaryFile, TemporaryFile + +import pytest + +from numpy import ( + add, + allclose, + arange, + asarray, + average, + isscalar, + memmap, + multiply, + ndarray, + prod, + subtract, + sum, +) +from numpy.testing import ( + IS_PYPY, + assert_, + assert_array_equal, + assert_equal, + break_cycles, + suppress_warnings, +) + + +class TestMemmap: + def setup_method(self): + self.tmpfp = NamedTemporaryFile(prefix='mmap') + self.shape = (3, 4) + self.dtype = 'float32' + self.data = arange(12, dtype=self.dtype) + self.data.resize(self.shape) + + def teardown_method(self): + self.tmpfp.close() + self.data = None + if IS_PYPY: + break_cycles() + break_cycles() + + def test_roundtrip(self): + # Write data to file + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + fp[:] = self.data[:] + del fp # Test __del__ machinery, which handles cleanup + + # Read data back from file + newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r', + shape=self.shape) + assert_(allclose(self.data, newfp)) + assert_array_equal(self.data, newfp) + assert_equal(newfp.flags.writeable, False) + + def test_open_with_filename(self, tmp_path): + tmpname = tmp_path / 'mmap' + fp = memmap(tmpname, dtype=self.dtype, mode='w+', + shape=self.shape) + fp[:] = self.data[:] + del fp + + def test_unnamed_file(self): + with TemporaryFile() as f: + fp = memmap(f, dtype=self.dtype, shape=self.shape) + del fp + + def test_attributes(self): + offset = 1 + mode = "w+" + fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode, + shape=self.shape, offset=offset) + assert_equal(offset, fp.offset) + assert_equal(mode, fp.mode) + del fp + + def test_filename(self, tmp_path): + tmpname = tmp_path / "mmap" + fp = memmap(tmpname, dtype=self.dtype, mode='w+', + shape=self.shape) + abspath = Path(os.path.abspath(tmpname)) + fp[:] = self.data[:] + assert_equal(abspath, fp.filename) + b = fp[:1] + assert_equal(abspath, b.filename) + del b + del fp + + def test_path(self, tmp_path): + tmpname = tmp_path / "mmap" + fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+', + shape=self.shape) + # os.path.realpath does not resolve symlinks on Windows + # see: https://bugs.python.org/issue9949 + # use Path.resolve, just as memmap class does internally + abspath = str(Path(tmpname).resolve()) + fp[:] = self.data[:] + assert_equal(abspath, str(fp.filename.resolve())) + b = fp[:1] + assert_equal(abspath, str(b.filename.resolve())) + del b + del fp + + def test_filename_fileobj(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+", + shape=self.shape) + assert_equal(fp.filename, self.tmpfp.name) + + @pytest.mark.skipif(sys.platform == 'gnu0', + reason="Known to fail on hurd") + def test_flush(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + fp[:] = self.data[:] + assert_equal(fp[0], self.data[0]) + fp.flush() + + def test_del(self): + # Make sure a view does not delete the underlying mmap + fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + fp_base[0] = 5 + fp_view = fp_base[0:1] + assert_equal(fp_view[0], 5) + del fp_view + # Should still be able to access and assign values after + # deleting the view + assert_equal(fp_base[0], 5) + fp_base[0] = 6 + assert_equal(fp_base[0], 6) + + def test_arithmetic_drops_references(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + tmp = (fp + 10) + if isinstance(tmp, memmap): + assert_(tmp._mmap is not fp._mmap) + + def test_indexing_drops_references(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + tmp = fp[(1, 2), (2, 3)] + if isinstance(tmp, memmap): + assert_(tmp._mmap is not fp._mmap) + + def test_slicing_keeps_references(self): + fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+', + shape=self.shape) + assert_(fp[:2, :2]._mmap is fp._mmap) + + def test_view(self): + fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) + new1 = fp.view() + new2 = new1.view() + assert_(new1.base is fp) + assert_(new2.base is fp) + new_array = asarray(fp) + assert_(new_array.base is fp) + + def test_ufunc_return_ndarray(self): + fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) + fp[:] = self.data + + with suppress_warnings() as sup: + sup.filter(FutureWarning, "np.average currently does not preserve") + for unary_op in [sum, average, prod]: + result = unary_op(fp) + assert_(isscalar(result)) + assert_(result.__class__ is self.data[0, 0].__class__) + + assert_(unary_op(fp, axis=0).__class__ is ndarray) + assert_(unary_op(fp, axis=1).__class__ is ndarray) + + for binary_op in [add, subtract, multiply]: + assert_(binary_op(fp, self.data).__class__ is ndarray) + assert_(binary_op(self.data, fp).__class__ is ndarray) + assert_(binary_op(fp, fp).__class__ is ndarray) + + fp += 1 + assert fp.__class__ is memmap + add(fp, 1, out=fp) + assert fp.__class__ is memmap + + def test_getitem(self): + fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape) + fp[:] = self.data + + assert_(fp[1:, :-1].__class__ is memmap) + # Fancy indexing returns a copy that is not memmapped + assert_(fp[[0, 1]].__class__ is ndarray) + + def test_memmap_subclass(self): + class MemmapSubClass(memmap): + pass + + fp = MemmapSubClass(self.tmpfp, dtype=self.dtype, shape=self.shape) + fp[:] = self.data + + # We keep previous behavior for subclasses of memmap, i.e. the + # ufunc and __getitem__ output is never turned into a ndarray + assert_(sum(fp, axis=0).__class__ is MemmapSubClass) + assert_(sum(fp).__class__ is MemmapSubClass) + assert_(fp[1:, :-1].__class__ is MemmapSubClass) + assert fp[[0, 1]].__class__ is MemmapSubClass + + def test_mmap_offset_greater_than_allocation_granularity(self): + size = 5 * mmap.ALLOCATIONGRANULARITY + offset = mmap.ALLOCATIONGRANULARITY + 1 + fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset) + assert_(fp.offset == offset) + + def test_empty_array_with_offset_multiple_of_allocation_granularity(self): + self.tmpfp.write(b'a' * mmap.ALLOCATIONGRANULARITY) + size = 0 + offset = mmap.ALLOCATIONGRANULARITY + fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset) + assert_equal(fp.offset, offset) + + def test_no_shape(self): + self.tmpfp.write(b'a' * 16) + mm = memmap(self.tmpfp, dtype='float64') + assert_equal(mm.shape, (2,)) + + def test_empty_array(self): + # gh-12653 + with pytest.raises(ValueError, match='empty file'): + memmap(self.tmpfp, shape=(0, 4), mode='r') + + # gh-27723 + # empty memmap works with mode in ('w+','r+') + memmap(self.tmpfp, shape=(0, 4), mode='w+') + + # ok now the file is not empty + memmap(self.tmpfp, shape=(0, 4), mode='w+') + + def test_shape_type(self): + memmap(self.tmpfp, shape=3, mode='w+') + memmap(self.tmpfp, shape=self.shape, mode='w+') + memmap(self.tmpfp, shape=list(self.shape), mode='w+') + memmap(self.tmpfp, shape=asarray(self.shape), mode='w+') diff --git a/python/numpy/_core/tests/test_multiarray.py b/python/numpy/_core/tests/test_multiarray.py new file mode 100644 index 000000000..01144c19c --- /dev/null +++ b/python/numpy/_core/tests/test_multiarray.py @@ -0,0 +1,10577 @@ +import builtins +import collections.abc +import ctypes +import functools +import gc +import io +import itertools +import mmap +import operator +import os +import pathlib +import pickle +import re +import sys +import tempfile +import warnings +import weakref +from contextlib import contextmanager + +# Need to test an object that does not fully implement math interface +from datetime import datetime, timedelta +from decimal import Decimal + +import numpy._core._multiarray_tests as _multiarray_tests +import pytest +from numpy._core._rational_tests import rational + +import numpy as np +from numpy._core.multiarray import _get_ndarray_c_version, dot +from numpy._core.tests._locales import CommaDecimalPointLocale +from numpy.exceptions import AxisError, ComplexWarning +from numpy.lib.recfunctions import repack_fields +from numpy.testing import ( + BLAS_SUPPORTS_FPE, + HAS_REFCOUNT, + IS_64BIT, + IS_PYPY, + IS_PYSTON, + IS_WASM, + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_compare, + assert_array_equal, + assert_array_less, + assert_equal, + assert_raises, + assert_raises_regex, + assert_warns, + break_cycles, + check_support_sve, + runstring, + suppress_warnings, + temppath, +) +from numpy.testing._private.utils import _no_tracing, requires_memory + + +def assert_arg_sorted(arr, arg): + # resulting array should be sorted and arg values should be unique + assert_equal(arr[arg], np.sort(arr)) + assert_equal(np.sort(arg), np.arange(len(arg))) + + +def assert_arr_partitioned(kth, k, arr_part): + assert_equal(arr_part[k], kth) + assert_array_compare(operator.__le__, arr_part[:k], kth) + assert_array_compare(operator.__ge__, arr_part[k:], kth) + + +def _aligned_zeros(shape, dtype=float, order="C", align=None): + """ + Allocate a new ndarray with aligned memory. + + The ndarray is guaranteed *not* aligned to twice the requested alignment. + Eg, if align=4, guarantees it is not aligned to 8. If align=None uses + dtype.alignment.""" + dtype = np.dtype(dtype) + if dtype == np.dtype(object): + # Can't do this, fall back to standard allocation (which + # should always be sufficiently aligned) + if align is not None: + raise ValueError("object array alignment not supported") + return np.zeros(shape, dtype=dtype, order=order) + if align is None: + align = dtype.alignment + if not hasattr(shape, '__len__'): + shape = (shape,) + size = functools.reduce(operator.mul, shape) * dtype.itemsize + buf = np.empty(size + 2 * align + 1, np.uint8) + + ptr = buf.__array_interface__['data'][0] + offset = ptr % align + if offset != 0: + offset = align - offset + if (ptr % (2 * align)) == 0: + offset += align + + # Note: slices producing 0-size arrays do not necessarily change + # data pointer --- so we use and allocate size+1 + buf = buf[offset:offset + size + 1][:-1] + buf.fill(0) + data = np.ndarray(shape, dtype, buf, order=order) + return data + + +class TestFlags: + def setup_method(self): + self.a = np.arange(10) + + def test_writeable(self): + mydict = locals() + self.a.flags.writeable = False + assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict) + self.a.flags.writeable = True + self.a[0] = 5 + self.a[0] = 0 + + def test_writeable_any_base(self): + # Ensure that any base being writeable is sufficient to change flag; + # this is especially interesting for arrays from an array interface. + arr = np.arange(10) + + class subclass(np.ndarray): + pass + + # Create subclass so base will not be collapsed, this is OK to change + view1 = arr.view(subclass) + view2 = view1[...] + arr.flags.writeable = False + view2.flags.writeable = False + view2.flags.writeable = True # Can be set to True again. + + arr = np.arange(10) + + class frominterface: + def __init__(self, arr): + self.arr = arr + self.__array_interface__ = arr.__array_interface__ + + view1 = np.asarray(frominterface) + view2 = view1[...] + view2.flags.writeable = False + view2.flags.writeable = True + + view1.flags.writeable = False + view2.flags.writeable = False + with assert_raises(ValueError): + # Must assume not writeable, since only base is not: + view2.flags.writeable = True + + def test_writeable_from_readonly(self): + # gh-9440 - make sure fromstring, from buffer on readonly buffers + # set writeable False + data = b'\x00' * 100 + vals = np.frombuffer(data, 'B') + assert_raises(ValueError, vals.setflags, write=True) + types = np.dtype([('vals', 'u1'), ('res3', 'S4')]) + values = np._core.records.fromstring(data, types) + vals = values['vals'] + assert_raises(ValueError, vals.setflags, write=True) + + def test_writeable_from_buffer(self): + data = bytearray(b'\x00' * 100) + vals = np.frombuffer(data, 'B') + assert_(vals.flags.writeable) + vals.setflags(write=False) + assert_(vals.flags.writeable is False) + vals.setflags(write=True) + assert_(vals.flags.writeable) + types = np.dtype([('vals', 'u1'), ('res3', 'S4')]) + values = np._core.records.fromstring(data, types) + vals = values['vals'] + assert_(vals.flags.writeable) + vals.setflags(write=False) + assert_(vals.flags.writeable is False) + vals.setflags(write=True) + assert_(vals.flags.writeable) + + @pytest.mark.skipif(IS_PYPY, reason="PyPy always copies") + def test_writeable_pickle(self): + import pickle + # Small arrays will be copied without setting base. + # See condition for using PyArray_SetBaseObject in + # array_setstate. + a = np.arange(1000) + for v in range(pickle.HIGHEST_PROTOCOL): + vals = pickle.loads(pickle.dumps(a, v)) + assert_(vals.flags.writeable) + assert_(isinstance(vals.base, bytes)) + + def test_writeable_from_c_data(self): + # Test that the writeable flag can be changed for an array wrapping + # low level C-data, but not owning its data. + # Also see that this is deprecated to change from python. + from numpy._core._multiarray_tests import get_c_wrapping_array + + arr_writeable = get_c_wrapping_array(True) + assert not arr_writeable.flags.owndata + assert arr_writeable.flags.writeable + view = arr_writeable[...] + + # Toggling the writeable flag works on the view: + view.flags.writeable = False + assert not view.flags.writeable + view.flags.writeable = True + assert view.flags.writeable + # Flag can be unset on the arr_writeable: + arr_writeable.flags.writeable = False + + arr_readonly = get_c_wrapping_array(False) + assert not arr_readonly.flags.owndata + assert not arr_readonly.flags.writeable + + for arr in [arr_writeable, arr_readonly]: + view = arr[...] + view.flags.writeable = False # make sure it is readonly + arr.flags.writeable = False + assert not arr.flags.writeable + + with assert_raises(ValueError): + view.flags.writeable = True + + with assert_raises(ValueError): + arr.flags.writeable = True + + def test_warnonwrite(self): + a = np.arange(10) + a.flags._warn_on_write = True + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always') + a[1] = 10 + a[2] = 10 + # only warn once + assert_(len(w) == 1) + + @pytest.mark.parametrize(["flag", "flag_value", "writeable"], + [("writeable", True, True), + # Delete _warn_on_write after deprecation and simplify + # the parameterization: + ("_warn_on_write", True, False), + ("writeable", False, False)]) + def test_readonly_flag_protocols(self, flag, flag_value, writeable): + a = np.arange(10) + setattr(a.flags, flag, flag_value) + + class MyArr: + __array_struct__ = a.__array_struct__ + + assert memoryview(a).readonly is not writeable + assert a.__array_interface__['data'][1] is not writeable + assert np.asarray(MyArr()).flags.writeable is writeable + + def test_otherflags(self): + assert_equal(self.a.flags.carray, True) + assert_equal(self.a.flags['C'], True) + assert_equal(self.a.flags.farray, False) + assert_equal(self.a.flags.behaved, True) + assert_equal(self.a.flags.fnc, False) + assert_equal(self.a.flags.forc, True) + assert_equal(self.a.flags.owndata, True) + assert_equal(self.a.flags.writeable, True) + assert_equal(self.a.flags.aligned, True) + assert_equal(self.a.flags.writebackifcopy, False) + assert_equal(self.a.flags['X'], False) + assert_equal(self.a.flags['WRITEBACKIFCOPY'], False) + + def test_string_align(self): + a = np.zeros(4, dtype=np.dtype('|S4')) + assert_(a.flags.aligned) + # not power of two are accessed byte-wise and thus considered aligned + a = np.zeros(5, dtype=np.dtype('|S4')) + assert_(a.flags.aligned) + + def test_void_align(self): + a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")])) + assert_(a.flags.aligned) + + @pytest.mark.parametrize("row_size", [5, 1 << 16]) + @pytest.mark.parametrize("row_count", [1, 5]) + @pytest.mark.parametrize("ndmin", [0, 1, 2]) + def test_xcontiguous_load_txt(self, row_size, row_count, ndmin): + s = io.StringIO('\n'.join(['1.0 ' * row_size] * row_count)) + a = np.loadtxt(s, ndmin=ndmin) + + assert a.flags.c_contiguous + x = [i for i in a.shape if i != 1] + assert a.flags.f_contiguous == (len(x) <= 1) + + +class TestHash: + # see #3793 + def test_int(self): + for st, ut, s in [(np.int8, np.uint8, 8), + (np.int16, np.uint16, 16), + (np.int32, np.uint32, 32), + (np.int64, np.uint64, 64)]: + for i in range(1, s): + assert_equal(hash(st(-2**i)), hash(-2**i), + err_msg="%r: -2**%d" % (st, i)) + assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)), + err_msg="%r: 2**%d" % (st, i - 1)) + assert_equal(hash(st(2**i - 1)), hash(2**i - 1), + err_msg="%r: 2**%d - 1" % (st, i)) + + i = max(i - 1, 1) + assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)), + err_msg="%r: 2**%d" % (ut, i - 1)) + assert_equal(hash(ut(2**i - 1)), hash(2**i - 1), + err_msg="%r: 2**%d - 1" % (ut, i)) + + +class TestAttributes: + def setup_method(self): + self.one = np.arange(10) + self.two = np.arange(20).reshape(4, 5) + self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6) + + def test_attributes(self): + assert_equal(self.one.shape, (10,)) + assert_equal(self.two.shape, (4, 5)) + assert_equal(self.three.shape, (2, 5, 6)) + self.three.shape = (10, 3, 2) + assert_equal(self.three.shape, (10, 3, 2)) + self.three.shape = (2, 5, 6) + assert_equal(self.one.strides, (self.one.itemsize,)) + num = self.two.itemsize + assert_equal(self.two.strides, (5 * num, num)) + num = self.three.itemsize + assert_equal(self.three.strides, (30 * num, 6 * num, num)) + assert_equal(self.one.ndim, 1) + assert_equal(self.two.ndim, 2) + assert_equal(self.three.ndim, 3) + num = self.two.itemsize + assert_equal(self.two.size, 20) + assert_equal(self.two.nbytes, 20 * num) + assert_equal(self.two.itemsize, self.two.dtype.itemsize) + assert_equal(self.two.base, np.arange(20)) + + def test_dtypeattr(self): + assert_equal(self.one.dtype, np.dtype(np.int_)) + assert_equal(self.three.dtype, np.dtype(np.float64)) + assert_equal(self.one.dtype.char, np.dtype(int).char) + assert self.one.dtype.char in "lq" + assert_equal(self.three.dtype.char, 'd') + assert_(self.three.dtype.str[0] in '<>') + assert_equal(self.one.dtype.str[1], 'i') + assert_equal(self.three.dtype.str[1], 'f') + + def test_int_subclassing(self): + # Regression test for https://github.com/numpy/numpy/pull/3526 + + numpy_int = np.int_(0) + + # int_ doesn't inherit from Python int, because it's not fixed-width + assert_(not isinstance(numpy_int, int)) + + def test_stridesattr(self): + x = self.one + + def make_array(size, offset, strides): + return np.ndarray(size, buffer=x, dtype=int, + offset=offset * x.itemsize, + strides=strides * x.itemsize) + + assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) + assert_raises(ValueError, make_array, 4, 4, -2) + assert_raises(ValueError, make_array, 4, 2, -1) + assert_raises(ValueError, make_array, 8, 3, 1) + assert_equal(make_array(8, 3, 0), np.array([3] * 8)) + # Check behavior reported in gh-2503: + assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3])) + make_array(0, 0, 10) + + def test_set_stridesattr(self): + x = self.one + + def make_array(size, offset, strides): + try: + r = np.ndarray([size], dtype=int, buffer=x, + offset=offset * x.itemsize) + except Exception as e: + raise RuntimeError(e) + r.strides = strides = strides * x.itemsize + return r + + assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1])) + assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9])) + assert_raises(ValueError, make_array, 4, 4, -2) + assert_raises(ValueError, make_array, 4, 2, -1) + assert_raises(RuntimeError, make_array, 8, 3, 1) + # Check that the true extent of the array is used. + # Test relies on as_strided base not exposing a buffer. + x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0)) + + def set_strides(arr, strides): + arr.strides = strides + + assert_raises(ValueError, set_strides, x, (10 * x.itemsize, x.itemsize)) + + # Test for offset calculations: + x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1], + shape=(10,), strides=(-1,)) + assert_raises(ValueError, set_strides, x[::-1], -1) + a = x[::-1] + a.strides = 1 + a[::2].strides = 2 + + # test 0d + arr_0d = np.array(0) + arr_0d.strides = () + assert_raises(TypeError, set_strides, arr_0d, None) + + def test_fill(self): + for t in "?bhilqpBHILQPfdgFDGO": + x = np.empty((3, 2, 1), t) + y = np.empty((3, 2, 1), t) + x.fill(1) + y[...] = 1 + assert_equal(x, y) + + def test_fill_max_uint64(self): + x = np.empty((3, 2, 1), dtype=np.uint64) + y = np.empty((3, 2, 1), dtype=np.uint64) + value = 2**64 - 1 + y[...] = value + x.fill(value) + assert_array_equal(x, y) + + def test_fill_struct_array(self): + # Filling from a scalar + x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8') + x.fill(x[0]) + assert_equal(x['f1'][1], x['f1'][0]) + # Filling from a tuple that can be converted + # to a scalar + x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')]) + x.fill((3.5, -2)) + assert_array_equal(x['a'], [3.5, 3.5]) + assert_array_equal(x['b'], [-2, -2]) + + def test_fill_readonly(self): + # gh-22922 + a = np.zeros(11) + a.setflags(write=False) + with pytest.raises(ValueError, match=".*read-only"): + a.fill(0) + + def test_fill_subarrays(self): + # NOTE: + # This is also a regression test for a crash with PYTHONMALLOC=debug + + dtype = np.dtype("2i4')) + assert_(np.dtype([('a', 'i4')])) + + def test_structured_non_void(self): + fields = [('a', 'i8'), ('b', 'f8')]) + assert_equal(a == b, [False, True]) + assert_equal(a != b, [True, False]) + + a = np.array([(5, 42), (10, 1)], dtype=[('a', '>f8'), ('b', 'i8')]) + assert_equal(a == b, [False, True]) + assert_equal(a != b, [True, False]) + + # Including with embedded subarray dtype (although subarray comparison + # itself may still be a bit weird and compare the raw data) + a = np.array([(5, 42), (10, 1)], dtype=[('a', '10>f8'), ('b', '5i8')]) + assert_equal(a == b, [False, True]) + assert_equal(a != b, [True, False]) + + @pytest.mark.parametrize("op", [ + operator.eq, lambda x, y: operator.eq(y, x), + operator.ne, lambda x, y: operator.ne(y, x)]) + def test_void_comparison_failures(self, op): + # In principle, one could decide to return an array of False for some + # if comparisons are impossible. But right now we return TypeError + # when "void" dtype are involved. + x = np.zeros(3, dtype=[('a', 'i1')]) + y = np.zeros(3) + # Cannot compare non-structured to structured: + with pytest.raises(TypeError): + op(x, y) + + # Added title prevents promotion, but casts are OK: + y = np.zeros(3, dtype=[(('title', 'a'), 'i1')]) + assert np.can_cast(y.dtype, x.dtype) + with pytest.raises(TypeError): + op(x, y) + + x = np.zeros(3, dtype="V7") + y = np.zeros(3, dtype="V8") + with pytest.raises(TypeError): + op(x, y) + + def test_casting(self): + # Check that casting a structured array to change its byte order + # works + a = np.array([(1,)], dtype=[('a', 'i4')], casting='unsafe')) + b = a.astype([('a', '>i4')]) + a_tmp = a.byteswap() + a_tmp = a_tmp.view(a_tmp.dtype.newbyteorder()) + assert_equal(b, a_tmp) + assert_equal(a['a'][0], b['a'][0]) + + # Check that equality comparison works on structured arrays if + # they are 'equiv'-castable + a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', 'f8')]) + assert_(np.can_cast(a.dtype, b.dtype, casting='equiv')) + assert_equal(a == b, [True, True]) + + # Check that 'equiv' casting can change byte order + assert_(np.can_cast(a.dtype, b.dtype, casting='equiv')) + c = a.astype(b.dtype, casting='equiv') + assert_equal(a == c, [True, True]) + + # Check that 'safe' casting can change byte order and up-cast + # fields + t = [('a', 'f8')] + assert_(np.can_cast(a.dtype, t, casting='safe')) + c = a.astype(t, casting='safe') + assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)), + [True, True]) + + # Check that 'same_kind' casting can change byte order and + # change field widths within a "kind" + t = [('a', 'f4')] + assert_(np.can_cast(a.dtype, t, casting='same_kind')) + c = a.astype(t, casting='same_kind') + assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)), + [True, True]) + + # Check that casting fails if the casting rule should fail on + # any of the fields + t = [('a', '>i8'), ('b', 'i2'), ('b', 'i8'), ('b', 'i4')] + assert_(not np.can_cast(a.dtype, t, casting=casting)) + t = [('a', '>i4'), ('b', 'i8") + ab = np.array([(1, 2)], dtype=[A, B]) + ba = np.array([(1, 2)], dtype=[B, A]) + assert_raises(TypeError, np.concatenate, ab, ba) + assert_raises(TypeError, np.result_type, ab.dtype, ba.dtype) + assert_raises(TypeError, np.promote_types, ab.dtype, ba.dtype) + + # dtypes with same field names/order but different memory offsets + # and byte-order are promotable to packed nbo. + assert_equal(np.promote_types(ab.dtype, ba[['a', 'b']].dtype), + repack_fields(ab.dtype.newbyteorder('N'))) + + # gh-13667 + # dtypes with different fieldnames but castable field types are castable + assert_equal(np.can_cast(ab.dtype, ba.dtype), True) + assert_equal(ab.astype(ba.dtype).dtype, ba.dtype) + assert_equal(np.can_cast('f8,i8', [('f0', 'f8'), ('f1', 'i8')]), True) + assert_equal(np.can_cast('f8,i8', [('f1', 'f8'), ('f0', 'i8')]), True) + assert_equal(np.can_cast('f8,i8', [('f1', 'i8'), ('f0', 'f8')]), False) + assert_equal(np.can_cast('f8,i8', [('f1', 'i8'), ('f0', 'f8')], + casting='unsafe'), True) + + ab[:] = ba # make sure assignment still works + + # tests of type-promotion of corresponding fields + dt1 = np.dtype([("", "i4")]) + dt2 = np.dtype([("", "i8")]) + assert_equal(np.promote_types(dt1, dt2), np.dtype([('f0', 'i8')])) + assert_equal(np.promote_types(dt2, dt1), np.dtype([('f0', 'i8')])) + assert_raises(TypeError, np.promote_types, dt1, np.dtype([("", "V3")])) + assert_equal(np.promote_types('i4,f8', 'i8,f4'), + np.dtype([('f0', 'i8'), ('f1', 'f8')])) + # test nested case + dt1nest = np.dtype([("", dt1)]) + dt2nest = np.dtype([("", dt2)]) + assert_equal(np.promote_types(dt1nest, dt2nest), + np.dtype([('f0', np.dtype([('f0', 'i8')]))])) + + # note that offsets are lost when promoting: + dt = np.dtype({'names': ['x'], 'formats': ['i4'], 'offsets': [8]}) + a = np.ones(3, dtype=dt) + assert_equal(np.concatenate([a, a]).dtype, np.dtype([('x', 'i4')])) + + @pytest.mark.parametrize("dtype_dict", [ + {"names": ["a", "b"], "formats": ["i4", "f"], "itemsize": 100}, + {"names": ["a", "b"], "formats": ["i4", "f"], + "offsets": [0, 12]}]) + @pytest.mark.parametrize("align", [True, False]) + def test_structured_promotion_packs(self, dtype_dict, align): + # Structured dtypes are packed when promoted (we consider the packed + # form to be "canonical"), so tere is no extra padding. + dtype = np.dtype(dtype_dict, align=align) + # Remove non "canonical" dtype options: + dtype_dict.pop("itemsize", None) + dtype_dict.pop("offsets", None) + expected = np.dtype(dtype_dict, align=align) + + res = np.promote_types(dtype, dtype) + assert res.itemsize == expected.itemsize + assert res.fields == expected.fields + + # But the "expected" one, should just be returned unchanged: + res = np.promote_types(expected, expected) + assert res is expected + + def test_structured_asarray_is_view(self): + # A scalar viewing an array preserves its view even when creating a + # new array. This test documents behaviour, it may not be the best + # desired behaviour. + arr = np.array([1], dtype="i,i") + scalar = arr[0] + assert not scalar.flags.owndata # view into the array + assert np.asarray(scalar).base is scalar + # But never when a dtype is passed in: + assert np.asarray(scalar, dtype=scalar.dtype).base is None + # A scalar which owns its data does not have this property. + # It is not easy to create one, one method is to use pickle: + scalar = pickle.loads(pickle.dumps(scalar)) + assert scalar.flags.owndata + assert np.asarray(scalar).base is None + +class TestBool: + def test_test_interning(self): + a0 = np.bool(0) + b0 = np.bool(False) + assert_(a0 is b0) + a1 = np.bool(1) + b1 = np.bool(True) + assert_(a1 is b1) + assert_(np.array([True])[0] is a1) + assert_(np.array(True)[()] is a1) + + def test_sum(self): + d = np.ones(101, dtype=bool) + assert_equal(d.sum(), d.size) + assert_equal(d[::2].sum(), d[::2].size) + assert_equal(d[::-2].sum(), d[::-2].size) + + d = np.frombuffer(b'\xff\xff' * 100, dtype=bool) + assert_equal(d.sum(), d.size) + assert_equal(d[::2].sum(), d[::2].size) + assert_equal(d[::-2].sum(), d[::-2].size) + + def check_count_nonzero(self, power, length): + powers = [2 ** i for i in range(length)] + for i in range(2**power): + l = [(i & x) != 0 for x in powers] + a = np.array(l, dtype=bool) + c = builtins.sum(l) + assert_equal(np.count_nonzero(a), c) + av = a.view(np.uint8) + av *= 3 + assert_equal(np.count_nonzero(a), c) + av *= 4 + assert_equal(np.count_nonzero(a), c) + av[av != 0] = 0xFF + assert_equal(np.count_nonzero(a), c) + + def test_count_nonzero(self): + # check all 12 bit combinations in a length 17 array + # covers most cases of the 16 byte unrolled code + self.check_count_nonzero(12, 17) + + @pytest.mark.slow + def test_count_nonzero_all(self): + # check all combinations in a length 17 array + # covers all cases of the 16 byte unrolled code + self.check_count_nonzero(17, 17) + + def test_count_nonzero_unaligned(self): + # prevent mistakes as e.g. gh-4060 + for o in range(7): + a = np.zeros((18,), dtype=bool)[o + 1:] + a[:o] = True + assert_equal(np.count_nonzero(a), builtins.sum(a.tolist())) + a = np.ones((18,), dtype=bool)[o + 1:] + a[:o] = False + assert_equal(np.count_nonzero(a), builtins.sum(a.tolist())) + + def _test_cast_from_flexible(self, dtype): + # empty string -> false + for n in range(3): + v = np.array(b'', (dtype, n)) + assert_equal(bool(v), False) + assert_equal(bool(v[()]), False) + assert_equal(v.astype(bool), False) + assert_(isinstance(v.astype(bool), np.ndarray)) + assert_(v[()].astype(bool) is np.False_) + + # anything else -> true + for n in range(1, 4): + for val in [b'a', b'0', b' ']: + v = np.array(val, (dtype, n)) + assert_equal(bool(v), True) + assert_equal(bool(v[()]), True) + assert_equal(v.astype(bool), True) + assert_(isinstance(v.astype(bool), np.ndarray)) + assert_(v[()].astype(bool) is np.True_) + + def test_cast_from_void(self): + self._test_cast_from_flexible(np.void) + + @pytest.mark.xfail(reason="See gh-9847") + def test_cast_from_unicode(self): + self._test_cast_from_flexible(np.str_) + + @pytest.mark.xfail(reason="See gh-9847") + def test_cast_from_bytes(self): + self._test_cast_from_flexible(np.bytes_) + + +class TestZeroSizeFlexible: + @staticmethod + def _zeros(shape, dtype=str): + dtype = np.dtype(dtype) + if dtype == np.void: + return np.zeros(shape, dtype=(dtype, 0)) + + # not constructable directly + dtype = np.dtype([('x', dtype, 0)]) + return np.zeros(shape, dtype=dtype)['x'] + + def test_create(self): + zs = self._zeros(10, bytes) + assert_equal(zs.itemsize, 0) + zs = self._zeros(10, np.void) + assert_equal(zs.itemsize, 0) + zs = self._zeros(10, str) + assert_equal(zs.itemsize, 0) + + def _test_sort_partition(self, name, kinds, **kwargs): + # Previously, these would all hang + for dt in [bytes, np.void, str]: + zs = self._zeros(10, dt) + sort_method = getattr(zs, name) + sort_func = getattr(np, name) + for kind in kinds: + sort_method(kind=kind, **kwargs) + sort_func(zs, kind=kind, **kwargs) + + def test_sort(self): + self._test_sort_partition('sort', kinds='qhs') + + def test_argsort(self): + self._test_sort_partition('argsort', kinds='qhs') + + def test_partition(self): + self._test_sort_partition('partition', kinds=['introselect'], kth=2) + + def test_argpartition(self): + self._test_sort_partition('argpartition', kinds=['introselect'], kth=2) + + def test_resize(self): + # previously an error + for dt in [bytes, np.void, str]: + zs = self._zeros(10, dt) + zs.resize(25) + zs.resize((10, 10)) + + def test_view(self): + for dt in [bytes, np.void, str]: + zs = self._zeros(10, dt) + + # viewing as itself should be allowed + assert_equal(zs.view(dt).dtype, np.dtype(dt)) + + # viewing as any non-empty type gives an empty result + assert_equal(zs.view((dt, 1)).shape, (0,)) + + def test_dumps(self): + zs = self._zeros(10, int) + assert_equal(zs, pickle.loads(zs.dumps())) + + def test_pickle(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + for dt in [bytes, np.void, str]: + zs = self._zeros(10, dt) + p = pickle.dumps(zs, protocol=proto) + zs2 = pickle.loads(p) + + assert_equal(zs.dtype, zs2.dtype) + + def test_pickle_empty(self): + """Checking if an empty array pickled and un-pickled will not cause a + segmentation fault""" + arr = np.array([]).reshape(999999, 0) + pk_dmp = pickle.dumps(arr) + pk_load = pickle.loads(pk_dmp) + + assert pk_load.size == 0 + + @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, + reason="requires pickle protocol 5") + def test_pickle_with_buffercallback(self): + array = np.arange(10) + buffers = [] + bytes_string = pickle.dumps(array, buffer_callback=buffers.append, + protocol=5) + array_from_buffer = pickle.loads(bytes_string, buffers=buffers) + # when using pickle protocol 5 with buffer callbacks, + # array_from_buffer is reconstructed from a buffer holding a view + # to the initial array's data, so modifying an element in array + # should modify it in array_from_buffer too. + array[0] = -1 + assert array_from_buffer[0] == -1, array_from_buffer[0] + + +class TestMethods: + + sort_kinds = ['quicksort', 'heapsort', 'stable'] + + def test_all_where(self): + a = np.array([[True, False, True], + [False, False, False], + [True, True, True]]) + wh_full = np.array([[True, False, True], + [False, False, False], + [True, False, True]]) + wh_lower = np.array([[False], + [False], + [True]]) + for _ax in [0, None]: + assert_equal(a.all(axis=_ax, where=wh_lower), + np.all(a[wh_lower[:, 0], :], axis=_ax)) + assert_equal(np.all(a, axis=_ax, where=wh_lower), + a[wh_lower[:, 0], :].all(axis=_ax)) + + assert_equal(a.all(where=wh_full), True) + assert_equal(np.all(a, where=wh_full), True) + assert_equal(a.all(where=False), True) + assert_equal(np.all(a, where=False), True) + + def test_any_where(self): + a = np.array([[True, False, True], + [False, False, False], + [True, True, True]]) + wh_full = np.array([[False, True, False], + [True, True, True], + [False, False, False]]) + wh_middle = np.array([[False], + [True], + [False]]) + for _ax in [0, None]: + assert_equal(a.any(axis=_ax, where=wh_middle), + np.any(a[wh_middle[:, 0], :], axis=_ax)) + assert_equal(np.any(a, axis=_ax, where=wh_middle), + a[wh_middle[:, 0], :].any(axis=_ax)) + assert_equal(a.any(where=wh_full), False) + assert_equal(np.any(a, where=wh_full), False) + assert_equal(a.any(where=False), False) + assert_equal(np.any(a, where=False), False) + + @pytest.mark.parametrize("dtype", + ["i8", "U10", "object", "datetime64[ms]"]) + def test_any_and_all_result_dtype(self, dtype): + arr = np.ones(3, dtype=dtype) + assert arr.any().dtype == np.bool + assert arr.all().dtype == np.bool + + def test_any_and_all_object_dtype(self): + # (seberg) Not sure we should even allow dtype here, but it is. + arr = np.ones(3, dtype=object) + # keepdims to prevent getting a scalar. + assert arr.any(dtype=object, keepdims=True).dtype == object + assert arr.all(dtype=object, keepdims=True).dtype == object + + def test_compress(self): + tgt = [[5, 6, 7, 8, 9]] + arr = np.arange(10).reshape(2, 5) + out = arr.compress([0, 1], axis=0) + assert_equal(out, tgt) + + tgt = [[1, 3], [6, 8]] + out = arr.compress([0, 1, 0, 1, 0], axis=1) + assert_equal(out, tgt) + + tgt = [[1], [6]] + arr = np.arange(10).reshape(2, 5) + out = arr.compress([0, 1], axis=1) + assert_equal(out, tgt) + + arr = np.arange(10).reshape(2, 5) + out = arr.compress([0, 1]) + assert_equal(out, 1) + + def test_choose(self): + x = 2 * np.ones((3,), dtype=int) + y = 3 * np.ones((3,), dtype=int) + x2 = 2 * np.ones((2, 3), dtype=int) + y2 = 3 * np.ones((2, 3), dtype=int) + ind = np.array([0, 0, 1]) + + A = ind.choose((x, y)) + assert_equal(A, [2, 2, 3]) + + A = ind.choose((x2, y2)) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) + + A = ind.choose((x, y2)) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) + + oned = np.ones(1) + # gh-12031, caused SEGFAULT + assert_raises(TypeError, oned.choose, np.void(0), [oned]) + + out = np.array(0) + ret = np.choose(np.array(1), [10, 20, 30], out=out) + assert out is ret + assert_equal(out[()], 20) + + # gh-6272 check overlap on out + x = np.arange(5) + y = np.choose([0, 0, 0], [x[:3], x[:3], x[:3]], out=x[1:4], mode='wrap') + assert_equal(y, np.array([0, 1, 2])) + + # gh_28206 check fail when out not writeable + x = np.arange(3) + out = np.zeros(3) + out.setflags(write=False) + assert_raises(ValueError, np.choose, [0, 1, 2], [x, x, x], out=out) + + def test_prod(self): + ba = [1, 2, 10, 11, 6, 5, 4] + ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] + + for ctype in [np.int16, np.uint16, np.int32, np.uint32, + np.float32, np.float64, np.complex64, np.complex128]: + a = np.array(ba, ctype) + a2 = np.array(ba2, ctype) + if ctype in ['1', 'b']: + assert_raises(ArithmeticError, a.prod) + assert_raises(ArithmeticError, a2.prod, axis=1) + else: + assert_equal(a.prod(axis=0), 26400) + assert_array_equal(a2.prod(axis=0), + np.array([50, 36, 84, 180], ctype)) + assert_array_equal(a2.prod(axis=-1), + np.array([24, 1890, 600], ctype)) + + @pytest.mark.parametrize('dtype', [None, object]) + def test_repeat(self, dtype): + m = np.array([1, 2, 3, 4, 5, 6], dtype=dtype) + m_rect = m.reshape((2, 3)) + + A = m.repeat([1, 3, 2, 1, 1, 2]) + assert_equal(A, [1, 2, 2, 2, 3, + 3, 4, 5, 6, 6]) + + A = m.repeat(2) + assert_equal(A, [1, 1, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6]) + + A = m_rect.repeat([2, 1], axis=0) + assert_equal(A, [[1, 2, 3], + [1, 2, 3], + [4, 5, 6]]) + + A = m_rect.repeat([1, 3, 2], axis=1) + assert_equal(A, [[1, 2, 2, 2, 3, 3], + [4, 5, 5, 5, 6, 6]]) + + A = m_rect.repeat(2, axis=0) + assert_equal(A, [[1, 2, 3], + [1, 2, 3], + [4, 5, 6], + [4, 5, 6]]) + + A = m_rect.repeat(2, axis=1) + assert_equal(A, [[1, 1, 2, 2, 3, 3], + [4, 4, 5, 5, 6, 6]]) + + def test_reshape(self): + arr = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) + + tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]] + assert_equal(arr.reshape(2, 6), tgt) + + tgt = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]] + assert_equal(arr.reshape(3, 4), tgt) + + tgt = [[1, 10, 8, 6], [4, 2, 11, 9], [7, 5, 3, 12]] + assert_equal(arr.reshape((3, 4), order='F'), tgt) + + tgt = [[1, 4, 7, 10], [2, 5, 8, 11], [3, 6, 9, 12]] + assert_equal(arr.T.reshape((3, 4), order='C'), tgt) + + def test_round(self): + def check_round(arr, expected, *round_args): + assert_equal(arr.round(*round_args), expected) + # With output array + out = np.zeros_like(arr) + res = arr.round(*round_args, out=out) + assert_equal(out, expected) + assert out is res + + check_round(np.array([1.2, 1.5]), [1, 2]) + check_round(np.array(1.5), 2) + check_round(np.array([12.2, 15.5]), [10, 20], -1) + check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1) + # Complex rounding + check_round(np.array([4.5 + 1.5j]), [4 + 2j]) + check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1) + + def test_squeeze(self): + a = np.array([[[1], [2], [3]]]) + assert_equal(a.squeeze(), [1, 2, 3]) + assert_equal(a.squeeze(axis=(0,)), [[1], [2], [3]]) + assert_raises(ValueError, a.squeeze, axis=(1,)) + assert_equal(a.squeeze(axis=(2,)), [[1, 2, 3]]) + + def test_transpose(self): + a = np.array([[1, 2], [3, 4]]) + assert_equal(a.transpose(), [[1, 3], [2, 4]]) + assert_raises(ValueError, lambda: a.transpose(0)) + assert_raises(ValueError, lambda: a.transpose(0, 0)) + assert_raises(ValueError, lambda: a.transpose(0, 1, 2)) + + def test_sort(self): + # test ordering for floats and complex containing nans. It is only + # necessary to check the less-than comparison, so sorts that + # only follow the insertion sort path are sufficient. We only + # test doubles and complex doubles as the logic is the same. + + # check doubles + msg = "Test real sort order with nans" + a = np.array([np.nan, 1, 0]) + b = np.sort(a) + assert_equal(b, a[::-1], msg) + # check complex + msg = "Test complex sort order with nans" + a = np.zeros(9, dtype=np.complex128) + a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0] + a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0] + b = np.sort(a) + assert_equal(b, a[::-1], msg) + + with assert_raises_regex( + ValueError, + "kind` and `stable` parameters can't be provided at the same time" + ): + np.sort(a, kind="stable", stable=True) + + # all c scalar sorts use the same code with different types + # so it suffices to run a quick check with one type. The number + # of sorted items must be greater than ~50 to check the actual + # algorithm because quick and merge sort fall over to insertion + # sort for small arrays. + + @pytest.mark.parametrize('dtype', [np.uint8, np.uint16, np.uint32, np.uint64, + np.float16, np.float32, np.float64, + np.longdouble]) + def test_sort_unsigned(self, dtype): + a = np.arange(101, dtype=dtype) + b = a[::-1].copy() + for kind in self.sort_kinds: + msg = f"scalar sort, kind={kind}" + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + @pytest.mark.parametrize('dtype', + [np.int8, np.int16, np.int32, np.int64, np.float16, + np.float32, np.float64, np.longdouble]) + def test_sort_signed(self, dtype): + a = np.arange(-50, 51, dtype=dtype) + b = a[::-1].copy() + for kind in self.sort_kinds: + msg = f"scalar sort, kind={kind}" + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + @pytest.mark.parametrize('dtype', [np.float32, np.float64, np.longdouble]) + @pytest.mark.parametrize('part', ['real', 'imag']) + def test_sort_complex(self, part, dtype): + # test complex sorts. These use the same code as the scalars + # but the compare function differs. + cdtype = { + np.single: np.csingle, + np.double: np.cdouble, + np.longdouble: np.clongdouble, + }[dtype] + a = np.arange(-50, 51, dtype=dtype) + b = a[::-1].copy() + ai = (a * (1 + 1j)).astype(cdtype) + bi = (b * (1 + 1j)).astype(cdtype) + setattr(ai, part, 1) + setattr(bi, part, 1) + for kind in self.sort_kinds: + msg = f"complex sort, {part} part == 1, kind={kind}" + c = ai.copy() + c.sort(kind=kind) + assert_equal(c, ai, msg) + c = bi.copy() + c.sort(kind=kind) + assert_equal(c, ai, msg) + + def test_sort_complex_byte_swapping(self): + # test sorting of complex arrays requiring byte-swapping, gh-5441 + for endianness in '<>': + for dt in np.typecodes['Complex']: + arr = np.array([1 + 3.j, 2 + 2.j, 3 + 1.j], dtype=endianness + dt) + c = arr.copy() + c.sort() + msg = f'byte-swapped complex sort, dtype={dt}' + assert_equal(c, arr, msg) + + @pytest.mark.parametrize('dtype', [np.bytes_, np.str_]) + def test_sort_string(self, dtype): + # np.array will perform the encoding to bytes for us in the bytes test + a = np.array(['aaaaaaaa' + chr(i) for i in range(101)], dtype=dtype) + b = a[::-1].copy() + for kind in self.sort_kinds: + msg = f"kind={kind}" + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + def test_sort_object(self): + # test object array sorts. + a = np.empty((101,), dtype=object) + a[:] = list(range(101)) + b = a[::-1] + for kind in ['q', 'h', 'm']: + msg = f"kind={kind}" + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + @pytest.mark.parametrize("dt", [ + np.dtype([('f', float), ('i', int)]), + np.dtype([('f', float), ('i', object)])]) + @pytest.mark.parametrize("step", [1, 2]) + def test_sort_structured(self, dt, step): + # test record array sorts. + a = np.array([(i, i) for i in range(101 * step)], dtype=dt) + b = a[::-1] + for kind in ['q', 'h', 'm']: + msg = f"kind={kind}" + c = a.copy()[::step] + indx = c.argsort(kind=kind) + c.sort(kind=kind) + assert_equal(c, a[::step], msg) + assert_equal(a[::step][indx], a[::step], msg) + c = b.copy()[::step] + indx = c.argsort(kind=kind) + c.sort(kind=kind) + assert_equal(c, a[step - 1::step], msg) + assert_equal(b[::step][indx], a[step - 1::step], msg) + + @pytest.mark.parametrize('dtype', ['datetime64[D]', 'timedelta64[D]']) + def test_sort_time(self, dtype): + # test datetime64 and timedelta64 sorts. + a = np.arange(0, 101, dtype=dtype) + b = a[::-1] + for kind in ['q', 'h', 'm']: + msg = f"kind={kind}" + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + c = b.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + def test_sort_axis(self): + # check axis handling. This should be the same for all type + # specific sorts, so we only check it for one type and one kind + a = np.array([[3, 2], [1, 0]]) + b = np.array([[1, 0], [3, 2]]) + c = np.array([[2, 3], [0, 1]]) + d = a.copy() + d.sort(axis=0) + assert_equal(d, b, "test sort with axis=0") + d = a.copy() + d.sort(axis=1) + assert_equal(d, c, "test sort with axis=1") + d = a.copy() + d.sort() + assert_equal(d, c, "test sort with default axis") + + def test_sort_size_0(self): + # check axis handling for multidimensional empty arrays + a = np.array([]) + a.shape = (3, 2, 1, 0) + for axis in range(-a.ndim, a.ndim): + msg = f'test empty array sort with axis={axis}' + assert_equal(np.sort(a, axis=axis), a, msg) + msg = 'test empty array sort with axis=None' + assert_equal(np.sort(a, axis=None), a.ravel(), msg) + + def test_sort_bad_ordering(self): + # test generic class with bogus ordering, + # should not segfault. + class Boom: + def __lt__(self, other): + return True + + a = np.array([Boom()] * 100, dtype=object) + for kind in self.sort_kinds: + msg = f"kind={kind}" + c = a.copy() + c.sort(kind=kind) + assert_equal(c, a, msg) + + def test_void_sort(self): + # gh-8210 - previously segfaulted + for i in range(4): + rand = np.random.randint(256, size=4000, dtype=np.uint8) + arr = rand.view('V4') + arr[::-1].sort() + + dt = np.dtype([('val', 'i4', (1,))]) + for i in range(4): + rand = np.random.randint(256, size=4000, dtype=np.uint8) + arr = rand.view(dt) + arr[::-1].sort() + + def test_sort_raises(self): + # gh-9404 + arr = np.array([0, datetime.now(), 1], dtype=object) + for kind in self.sort_kinds: + assert_raises(TypeError, arr.sort, kind=kind) + # gh-3879 + + class Raiser: + def raises_anything(*args, **kwargs): + raise TypeError("SOMETHING ERRORED") + __eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything + arr = np.array([[Raiser(), n] for n in range(10)]).reshape(-1) + np.random.shuffle(arr) + for kind in self.sort_kinds: + assert_raises(TypeError, arr.sort, kind=kind) + + def test_sort_degraded(self): + # test degraded dataset would take minutes to run with normal qsort + d = np.arange(1000000) + do = d.copy() + x = d + # create a median of 3 killer where each median is the sorted second + # last element of the quicksort partition + while x.size > 3: + mid = x.size // 2 + x[mid], x[-2] = x[-2], x[mid] + x = x[:-2] + + assert_equal(np.sort(d), do) + assert_equal(d[np.argsort(d)], do) + + def test_copy(self): + def assert_fortran(arr): + assert_(arr.flags.fortran) + assert_(arr.flags.f_contiguous) + assert_(not arr.flags.c_contiguous) + + def assert_c(arr): + assert_(not arr.flags.fortran) + assert_(not arr.flags.f_contiguous) + assert_(arr.flags.c_contiguous) + + a = np.empty((2, 2), order='F') + # Test copying a Fortran array + assert_c(a.copy()) + assert_c(a.copy('C')) + assert_fortran(a.copy('F')) + assert_fortran(a.copy('A')) + + # Now test starting with a C array. + a = np.empty((2, 2), order='C') + assert_c(a.copy()) + assert_c(a.copy('C')) + assert_fortran(a.copy('F')) + assert_c(a.copy('A')) + + @pytest.mark.parametrize("dtype", ['O', np.int32, 'i,O']) + def test__deepcopy__(self, dtype): + # Force the entry of NULLs into array + a = np.empty(4, dtype=dtype) + ctypes.memset(a.ctypes.data, 0, a.nbytes) + + # Ensure no error is raised, see gh-21833 + b = a.__deepcopy__({}) + + a[0] = 42 + with pytest.raises(AssertionError): + assert_array_equal(a, b) + + def test__deepcopy__catches_failure(self): + class MyObj: + def __deepcopy__(self, *args, **kwargs): + raise RuntimeError + + arr = np.array([1, MyObj(), 3], dtype='O') + with pytest.raises(RuntimeError): + arr.__deepcopy__({}) + + def test_sort_order(self): + # Test sorting an array with fields + x1 = np.array([21, 32, 14]) + x2 = np.array(['my', 'first', 'name']) + x3 = np.array([3.1, 4.5, 6.2]) + r = np.rec.fromarrays([x1, x2, x3], names='id,word,number') + + r.sort(order=['id']) + assert_equal(r.id, np.array([14, 21, 32])) + assert_equal(r.word, np.array(['name', 'my', 'first'])) + assert_equal(r.number, np.array([6.2, 3.1, 4.5])) + + r.sort(order=['word']) + assert_equal(r.id, np.array([32, 21, 14])) + assert_equal(r.word, np.array(['first', 'my', 'name'])) + assert_equal(r.number, np.array([4.5, 3.1, 6.2])) + + r.sort(order=['number']) + assert_equal(r.id, np.array([21, 32, 14])) + assert_equal(r.word, np.array(['my', 'first', 'name'])) + assert_equal(r.number, np.array([3.1, 4.5, 6.2])) + + assert_raises_regex(ValueError, 'duplicate', + lambda: r.sort(order=['id', 'id'])) + + if sys.byteorder == 'little': + strtype = '>i2' + else: + strtype = '': + for dt in np.typecodes['Complex']: + arr = np.array([1 + 3.j, 2 + 2.j, 3 + 1.j], dtype=endianness + dt) + msg = f'byte-swapped complex argsort, dtype={dt}' + assert_equal(arr.argsort(), + np.arange(len(arr), dtype=np.intp), msg) + + # test string argsorts. + s = 'aaaaaaaa' + a = np.array([s + chr(i) for i in range(101)]) + b = a[::-1].copy() + r = np.arange(101) + rr = r[::-1] + for kind in self.sort_kinds: + msg = f"string argsort, kind={kind}" + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test unicode argsorts. + s = 'aaaaaaaa' + a = np.array([s + chr(i) for i in range(101)], dtype=np.str_) + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in self.sort_kinds: + msg = f"unicode argsort, kind={kind}" + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test object array argsorts. + a = np.empty((101,), dtype=object) + a[:] = list(range(101)) + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in self.sort_kinds: + msg = f"object argsort, kind={kind}" + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test structured array argsorts. + dt = np.dtype([('f', float), ('i', int)]) + a = np.array([(i, i) for i in range(101)], dtype=dt) + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in self.sort_kinds: + msg = f"structured array argsort, kind={kind}" + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test datetime64 argsorts. + a = np.arange(0, 101, dtype='datetime64[D]') + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in ['q', 'h', 'm']: + msg = f"datetime64 argsort, kind={kind}" + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # test timedelta64 argsorts. + a = np.arange(0, 101, dtype='timedelta64[D]') + b = a[::-1] + r = np.arange(101) + rr = r[::-1] + for kind in ['q', 'h', 'm']: + msg = f"timedelta64 argsort, kind={kind}" + assert_equal(a.copy().argsort(kind=kind), r, msg) + assert_equal(b.copy().argsort(kind=kind), rr, msg) + + # check axis handling. This should be the same for all type + # specific argsorts, so we only check it for one type and one kind + a = np.array([[3, 2], [1, 0]]) + b = np.array([[1, 1], [0, 0]]) + c = np.array([[1, 0], [1, 0]]) + assert_equal(a.copy().argsort(axis=0), b) + assert_equal(a.copy().argsort(axis=1), c) + assert_equal(a.copy().argsort(), c) + + # check axis handling for multidimensional empty arrays + a = np.array([]) + a.shape = (3, 2, 1, 0) + for axis in range(-a.ndim, a.ndim): + msg = f'test empty array argsort with axis={axis}' + assert_equal(np.argsort(a, axis=axis), + np.zeros_like(a, dtype=np.intp), msg) + msg = 'test empty array argsort with axis=None' + assert_equal(np.argsort(a, axis=None), + np.zeros_like(a.ravel(), dtype=np.intp), msg) + + # check that stable argsorts are stable + r = np.arange(100) + # scalars + a = np.zeros(100) + assert_equal(a.argsort(kind='m'), r) + # complex + a = np.zeros(100, dtype=complex) + assert_equal(a.argsort(kind='m'), r) + # string + a = np.array(['aaaaaaaaa' for i in range(100)]) + assert_equal(a.argsort(kind='m'), r) + # unicode + a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.str_) + assert_equal(a.argsort(kind='m'), r) + + with assert_raises_regex( + ValueError, + "kind` and `stable` parameters can't be provided at the same time" + ): + np.argsort(a, kind="stable", stable=True) + + def test_sort_unicode_kind(self): + d = np.arange(10) + k = b'\xc3\xa4'.decode("UTF8") + assert_raises(ValueError, d.sort, kind=k) + assert_raises(ValueError, d.argsort, kind=k) + + @pytest.mark.parametrize('a', [ + np.array([0, 1, np.nan], dtype=np.float16), + np.array([0, 1, np.nan], dtype=np.float32), + np.array([0, 1, np.nan]), + ]) + def test_searchsorted_floats(self, a): + # test for floats arrays containing nans. Explicitly test + # half, single, and double precision floats to verify that + # the NaN-handling is correct. + msg = f"Test real ({a.dtype}) searchsorted with nans, side='l'" + b = a.searchsorted(a, side='left') + assert_equal(b, np.arange(3), msg) + msg = f"Test real ({a.dtype}) searchsorted with nans, side='r'" + b = a.searchsorted(a, side='right') + assert_equal(b, np.arange(1, 4), msg) + # check keyword arguments + a.searchsorted(v=1) + x = np.array([0, 1, np.nan], dtype='float32') + y = np.searchsorted(x, x[-1]) + assert_equal(y, 2) + + def test_searchsorted_complex(self): + # test for complex arrays containing nans. + # The search sorted routines use the compare functions for the + # array type, so this checks if that is consistent with the sort + # order. + # check double complex + a = np.zeros(9, dtype=np.complex128) + a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan] + a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan] + msg = "Test complex searchsorted with nans, side='l'" + b = a.searchsorted(a, side='left') + assert_equal(b, np.arange(9), msg) + msg = "Test complex searchsorted with nans, side='r'" + b = a.searchsorted(a, side='right') + assert_equal(b, np.arange(1, 10), msg) + msg = "Test searchsorted with little endian, side='l'" + a = np.array([0, 128], dtype=' p[:, i]).all(), + msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T)) + for row in range(p.shape[0]): + self.assert_partitioned(p[row], [i]) + self.assert_partitioned(parg[row], [i]) + + p = np.partition(d0, i, axis=0, kind=k) + parg = d0[np.argpartition(d0, i, axis=0, kind=k), + np.arange(d0.shape[1])[None, :]] + aae(p[i, :], np.array([i] * d1.shape[0], dtype=dt)) + # array_less does not seem to work right + at((p[:i, :] <= p[i, :]).all(), + msg="%d: %r <= %r" % (i, p[i, :], p[:i, :])) + at((p[i + 1:, :] > p[i, :]).all(), + msg="%d: %r < %r" % (i, p[i, :], p[:, i + 1:])) + for col in range(p.shape[1]): + self.assert_partitioned(p[:, col], [i]) + self.assert_partitioned(parg[:, col], [i]) + + # check inplace + dc = d.copy() + dc.partition(i, kind=k) + assert_equal(dc, np.partition(d, i, kind=k)) + dc = d0.copy() + dc.partition(i, axis=0, kind=k) + assert_equal(dc, np.partition(d0, i, axis=0, kind=k)) + dc = d1.copy() + dc.partition(i, axis=1, kind=k) + assert_equal(dc, np.partition(d1, i, axis=1, kind=k)) + + def assert_partitioned(self, d, kth): + prev = 0 + for k in np.sort(kth): + assert_array_compare(operator.__le__, d[prev:k], d[k], + err_msg='kth %d' % k) + assert_((d[k:] >= d[k]).all(), + msg="kth %d, %r not greater equal %r" % (k, d[k:], d[k])) + prev = k + 1 + + def test_partition_iterative(self): + d = np.arange(17) + kth = (0, 1, 2, 429, 231) + assert_raises(ValueError, d.partition, kth) + assert_raises(ValueError, d.argpartition, kth) + d = np.arange(10).reshape((2, 5)) + assert_raises(ValueError, d.partition, kth, axis=0) + assert_raises(ValueError, d.partition, kth, axis=1) + assert_raises(ValueError, np.partition, d, kth, axis=1) + assert_raises(ValueError, np.partition, d, kth, axis=None) + + d = np.array([3, 4, 2, 1]) + p = np.partition(d, (0, 3)) + self.assert_partitioned(p, (0, 3)) + self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3)) + + assert_array_equal(p, np.partition(d, (-3, -1))) + assert_array_equal(p, d[np.argpartition(d, (-3, -1))]) + + d = np.arange(17) + np.random.shuffle(d) + d.partition(range(d.size)) + assert_array_equal(np.arange(17), d) + np.random.shuffle(d) + assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))]) + + # test unsorted kth + d = np.arange(17) + np.random.shuffle(d) + keys = np.array([1, 3, 8, -2]) + np.random.shuffle(d) + p = np.partition(d, keys) + self.assert_partitioned(p, keys) + p = d[np.argpartition(d, keys)] + self.assert_partitioned(p, keys) + np.random.shuffle(keys) + assert_array_equal(np.partition(d, keys), p) + assert_array_equal(d[np.argpartition(d, keys)], p) + + # equal kth + d = np.arange(20)[::-1] + self.assert_partitioned(np.partition(d, [5] * 4), [5]) + self.assert_partitioned(np.partition(d, [5] * 4 + [6, 13]), + [5] * 4 + [6, 13]) + self.assert_partitioned(d[np.argpartition(d, [5] * 4)], [5]) + self.assert_partitioned(d[np.argpartition(d, [5] * 4 + [6, 13])], + [5] * 4 + [6, 13]) + + d = np.arange(12) + np.random.shuffle(d) + d1 = np.tile(np.arange(12), (4, 1)) + map(np.random.shuffle, d1) + d0 = np.transpose(d1) + + kth = (1, 6, 7, -1) + p = np.partition(d1, kth, axis=1) + pa = d1[np.arange(d1.shape[0])[:, None], + d1.argpartition(kth, axis=1)] + assert_array_equal(p, pa) + for i in range(d1.shape[0]): + self.assert_partitioned(p[i, :], kth) + p = np.partition(d0, kth, axis=0) + pa = d0[np.argpartition(d0, kth, axis=0), + np.arange(d0.shape[1])[None, :]] + assert_array_equal(p, pa) + for i in range(d0.shape[1]): + self.assert_partitioned(p[:, i], kth) + + def test_partition_cdtype(self): + d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), + ('Lancelot', 1.9, 38)], + dtype=[('name', '|S10'), ('height', ' (numpy ufunc, has_in_place_version, preferred_dtype) + ops = { + 'add': (np.add, True, float), + 'sub': (np.subtract, True, float), + 'mul': (np.multiply, True, float), + 'truediv': (np.true_divide, True, float), + 'floordiv': (np.floor_divide, True, float), + 'mod': (np.remainder, True, float), + 'divmod': (np.divmod, False, float), + 'pow': (np.power, True, int), + 'lshift': (np.left_shift, True, int), + 'rshift': (np.right_shift, True, int), + 'and': (np.bitwise_and, True, int), + 'xor': (np.bitwise_xor, True, int), + 'or': (np.bitwise_or, True, int), + 'matmul': (np.matmul, True, float), + # 'ge': (np.less_equal, False), + # 'gt': (np.less, False), + # 'le': (np.greater_equal, False), + # 'lt': (np.greater, False), + # 'eq': (np.equal, False), + # 'ne': (np.not_equal, False), + } + + class Coerced(Exception): + pass + + def array_impl(self): + raise Coerced + + def op_impl(self, other): + return "forward" + + def rop_impl(self, other): + return "reverse" + + def iop_impl(self, other): + return "in-place" + + def array_ufunc_impl(self, ufunc, method, *args, **kwargs): + return ("__array_ufunc__", ufunc, method, args, kwargs) + + # Create an object with the given base, in the given module, with a + # bunch of placeholder __op__ methods, and optionally a + # __array_ufunc__ and __array_priority__. + def make_obj(base, array_priority=False, array_ufunc=False, + alleged_module="__main__"): + class_namespace = {"__array__": array_impl} + if array_priority is not False: + class_namespace["__array_priority__"] = array_priority + for op in ops: + class_namespace[f"__{op}__"] = op_impl + class_namespace[f"__r{op}__"] = rop_impl + class_namespace[f"__i{op}__"] = iop_impl + if array_ufunc is not False: + class_namespace["__array_ufunc__"] = array_ufunc + eval_namespace = {"base": base, + "class_namespace": class_namespace, + "__name__": alleged_module, + } + MyType = eval("type('MyType', (base,), class_namespace)", + eval_namespace) + if issubclass(MyType, np.ndarray): + # Use this range to avoid special case weirdnesses around + # divide-by-0, pow(x, 2), overflow due to pow(big, big), etc. + return np.arange(3, 7).reshape(2, 2).view(MyType) + else: + return MyType() + + def check(obj, binop_override_expected, ufunc_override_expected, + inplace_override_expected, check_scalar=True): + for op, (ufunc, has_inplace, dtype) in ops.items(): + err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s' + % (op, ufunc, has_inplace, dtype)) + check_objs = [np.arange(3, 7, dtype=dtype).reshape(2, 2)] + if check_scalar: + check_objs.append(check_objs[0][0]) + for arr in check_objs: + arr_method = getattr(arr, f"__{op}__") + + def first_out_arg(result): + if op == "divmod": + assert_(isinstance(result, tuple)) + return result[0] + else: + return result + + # arr __op__ obj + if binop_override_expected: + assert_equal(arr_method(obj), NotImplemented, err_msg) + elif ufunc_override_expected: + assert_equal(arr_method(obj)[0], "__array_ufunc__", + err_msg) + elif (isinstance(obj, np.ndarray) and + (type(obj).__array_ufunc__ is + np.ndarray.__array_ufunc__)): + # __array__ gets ignored + res = first_out_arg(arr_method(obj)) + assert_(res.__class__ is obj.__class__, err_msg) + else: + assert_raises((TypeError, Coerced), + arr_method, obj, err_msg=err_msg) + # obj __op__ arr + arr_rmethod = getattr(arr, f"__r{op}__") + if ufunc_override_expected: + res = arr_rmethod(obj) + assert_equal(res[0], "__array_ufunc__", + err_msg=err_msg) + assert_equal(res[1], ufunc, err_msg=err_msg) + elif (isinstance(obj, np.ndarray) and + (type(obj).__array_ufunc__ is + np.ndarray.__array_ufunc__)): + # __array__ gets ignored + res = first_out_arg(arr_rmethod(obj)) + assert_(res.__class__ is obj.__class__, err_msg) + else: + # __array_ufunc__ = "asdf" creates a TypeError + assert_raises((TypeError, Coerced), + arr_rmethod, obj, err_msg=err_msg) + + # arr __iop__ obj + # array scalars don't have in-place operators + if has_inplace and isinstance(arr, np.ndarray): + arr_imethod = getattr(arr, f"__i{op}__") + if inplace_override_expected: + assert_equal(arr_method(obj), NotImplemented, + err_msg=err_msg) + elif ufunc_override_expected: + res = arr_imethod(obj) + assert_equal(res[0], "__array_ufunc__", err_msg) + assert_equal(res[1], ufunc, err_msg) + assert_(type(res[-1]["out"]) is tuple, err_msg) + assert_(res[-1]["out"][0] is arr, err_msg) + elif (isinstance(obj, np.ndarray) and + (type(obj).__array_ufunc__ is + np.ndarray.__array_ufunc__)): + # __array__ gets ignored + assert_(arr_imethod(obj) is arr, err_msg) + else: + assert_raises((TypeError, Coerced), + arr_imethod, obj, + err_msg=err_msg) + + op_fn = getattr(operator, op, None) + if op_fn is None: + op_fn = getattr(operator, op + "_", None) + if op_fn is None: + op_fn = getattr(builtins, op) + assert_equal(op_fn(obj, arr), "forward", err_msg) + if not isinstance(obj, np.ndarray): + if binop_override_expected: + assert_equal(op_fn(arr, obj), "reverse", err_msg) + elif ufunc_override_expected: + assert_equal(op_fn(arr, obj)[0], "__array_ufunc__", + err_msg) + if ufunc_override_expected: + assert_equal(ufunc(obj, arr)[0], "__array_ufunc__", + err_msg) + + # No array priority, no array_ufunc -> nothing called + check(make_obj(object), False, False, False) + # Negative array priority, no array_ufunc -> nothing called + # (has to be very negative, because scalar priority is -1000000.0) + check(make_obj(object, array_priority=-2**30), False, False, False) + # Positive array priority, no array_ufunc -> binops and iops only + check(make_obj(object, array_priority=1), True, False, True) + # ndarray ignores array_priority for ndarray subclasses + check(make_obj(np.ndarray, array_priority=1), False, False, False, + check_scalar=False) + # Positive array_priority and array_ufunc -> array_ufunc only + check(make_obj(object, array_priority=1, + array_ufunc=array_ufunc_impl), False, True, False) + check(make_obj(np.ndarray, array_priority=1, + array_ufunc=array_ufunc_impl), False, True, False) + # array_ufunc set to None -> defer binops only + check(make_obj(object, array_ufunc=None), True, False, False) + check(make_obj(np.ndarray, array_ufunc=None), True, False, False, + check_scalar=False) + + @pytest.mark.parametrize("priority", [None, "runtime error"]) + def test_ufunc_binop_bad_array_priority(self, priority): + # Mainly checks that this does not crash. The second array has a lower + # priority than -1 ("error value"). If the __radd__ actually exists, + # bad things can happen (I think via the scalar paths). + # In principle both of these can probably just be errors in the future. + class BadPriority: + @property + def __array_priority__(self): + if priority == "runtime error": + raise RuntimeError("RuntimeError in __array_priority__!") + return priority + + def __radd__(self, other): + return "result" + + class LowPriority(np.ndarray): + __array_priority__ = -1000 + + # Priority failure uses the same as scalars (smaller -1000). So the + # LowPriority wins with 'result' for each element (inner operation). + res = np.arange(3).view(LowPriority) + BadPriority() + assert res.shape == (3,) + assert res[0] == 'result' + + @pytest.mark.parametrize("scalar", [ + np.longdouble(1), np.timedelta64(120, 'm')]) + @pytest.mark.parametrize("op", [operator.add, operator.xor]) + def test_scalar_binop_guarantees_ufunc(self, scalar, op): + # Test that __array_ufunc__ will always cause ufunc use even when + # we have to protect some other calls from recursing (see gh-26904). + class SomeClass: + def __array_ufunc__(self, ufunc, method, *inputs, **kw): + return "result" + + assert SomeClass() + scalar == "result" + assert scalar + SomeClass() == "result" + + def test_ufunc_override_normalize_signature(self): + # gh-5674 + class SomeClass: + def __array_ufunc__(self, ufunc, method, *inputs, **kw): + return kw + + a = SomeClass() + kw = np.add(a, [1]) + assert_('sig' not in kw and 'signature' not in kw) + kw = np.add(a, [1], sig='ii->i') + assert_('sig' not in kw and 'signature' in kw) + assert_equal(kw['signature'], 'ii->i') + kw = np.add(a, [1], signature='ii->i') + assert_('sig' not in kw and 'signature' in kw) + assert_equal(kw['signature'], 'ii->i') + + def test_array_ufunc_index(self): + # Check that index is set appropriately, also if only an output + # is passed on (latter is another regression tests for github bug 4753) + # This also checks implicitly that 'out' is always a tuple. + class CheckIndex: + def __array_ufunc__(self, ufunc, method, *inputs, **kw): + for i, a in enumerate(inputs): + if a is self: + return i + # calls below mean we must be in an output. + for j, a in enumerate(kw['out']): + if a is self: + return (j,) + + a = CheckIndex() + dummy = np.arange(2.) + # 1 input, 1 output + assert_equal(np.sin(a), 0) + assert_equal(np.sin(dummy, a), (0,)) + assert_equal(np.sin(dummy, out=a), (0,)) + assert_equal(np.sin(dummy, out=(a,)), (0,)) + assert_equal(np.sin(a, a), 0) + assert_equal(np.sin(a, out=a), 0) + assert_equal(np.sin(a, out=(a,)), 0) + # 1 input, 2 outputs + assert_equal(np.modf(dummy, a), (0,)) + assert_equal(np.modf(dummy, None, a), (1,)) + assert_equal(np.modf(dummy, dummy, a), (1,)) + assert_equal(np.modf(dummy, out=(a, None)), (0,)) + assert_equal(np.modf(dummy, out=(a, dummy)), (0,)) + assert_equal(np.modf(dummy, out=(None, a)), (1,)) + assert_equal(np.modf(dummy, out=(dummy, a)), (1,)) + assert_equal(np.modf(a, out=(dummy, a)), 0) + with assert_raises(TypeError): + # Out argument must be tuple, since there are multiple outputs + np.modf(dummy, out=a) + + assert_raises(ValueError, np.modf, dummy, out=(a,)) + + # 2 inputs, 1 output + assert_equal(np.add(a, dummy), 0) + assert_equal(np.add(dummy, a), 1) + assert_equal(np.add(dummy, dummy, a), (0,)) + assert_equal(np.add(dummy, a, a), 1) + assert_equal(np.add(dummy, dummy, out=a), (0,)) + assert_equal(np.add(dummy, dummy, out=(a,)), (0,)) + assert_equal(np.add(a, dummy, out=a), 0) + + def test_out_override(self): + # regression test for github bug 4753 + class OutClass(np.ndarray): + def __array_ufunc__(self, ufunc, method, *inputs, **kw): + if 'out' in kw: + tmp_kw = kw.copy() + tmp_kw.pop('out') + func = getattr(ufunc, method) + kw['out'][0][...] = func(*inputs, **tmp_kw) + + A = np.array([0]).view(OutClass) + B = np.array([5]) + C = np.array([6]) + np.multiply(C, B, A) + assert_equal(A[0], 30) + assert_(isinstance(A, OutClass)) + A[0] = 0 + np.multiply(C, B, out=A) + assert_equal(A[0], 30) + assert_(isinstance(A, OutClass)) + + def test_pow_array_object_dtype(self): + # test pow on arrays of object dtype + class SomeClass: + def __init__(self, num=None): + self.num = num + + # want to ensure a fast pow path is not taken + def __mul__(self, other): + raise AssertionError('__mul__ should not be called') + + def __truediv__(self, other): + raise AssertionError('__truediv__ should not be called') + + def __pow__(self, exp): + return SomeClass(num=self.num ** exp) + + def __eq__(self, other): + if isinstance(other, SomeClass): + return self.num == other.num + + __rpow__ = __pow__ + + def pow_for(exp, arr): + return np.array([x ** exp for x in arr]) + + obj_arr = np.array([SomeClass(1), SomeClass(2), SomeClass(3)]) + + assert_equal(obj_arr ** 0.5, pow_for(0.5, obj_arr)) + assert_equal(obj_arr ** 0, pow_for(0, obj_arr)) + assert_equal(obj_arr ** 1, pow_for(1, obj_arr)) + assert_equal(obj_arr ** -1, pow_for(-1, obj_arr)) + assert_equal(obj_arr ** 2, pow_for(2, obj_arr)) + + def test_pow_calls_square_structured_dtype(self): + # gh-29388 + dt = np.dtype([('a', 'i4'), ('b', 'i4')]) + a = np.array([(1, 2), (3, 4)], dtype=dt) + with pytest.raises(TypeError, match="ufunc 'square' not supported"): + a ** 2 + + def test_pos_array_ufunc_override(self): + class A(np.ndarray): + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return getattr(ufunc, method)(*[i.view(np.ndarray) for + i in inputs], **kwargs) + tst = np.array('foo').view(A) + with assert_raises(TypeError): + +tst + + +class TestTemporaryElide: + # elision is only triggered on relatively large arrays + + def test_extension_incref_elide(self): + # test extension (e.g. cython) calling PyNumber_* slots without + # increasing the reference counts + # + # def incref_elide(a): + # d = input.copy() # refcount 1 + # return d, d + d # PyNumber_Add without increasing refcount + from numpy._core._multiarray_tests import incref_elide + d = np.ones(100000) + orig, res = incref_elide(d) + d + d + # the return original should not be changed to an inplace operation + assert_array_equal(orig, d) + assert_array_equal(res, d + d) + + def test_extension_incref_elide_stack(self): + # scanning if the refcount == 1 object is on the python stack to check + # that we are called directly from python is flawed as object may still + # be above the stack pointer and we have no access to the top of it + # + # def incref_elide_l(d): + # return l[4] + l[4] # PyNumber_Add without increasing refcount + from numpy._core._multiarray_tests import incref_elide_l + # padding with 1 makes sure the object on the stack is not overwritten + l = [1, 1, 1, 1, np.ones(100000)] + res = incref_elide_l(l) + # the return original should not be changed to an inplace operation + assert_array_equal(l[4], np.ones(100000)) + assert_array_equal(res, l[4] + l[4]) + + def test_temporary_with_cast(self): + # check that we don't elide into a temporary which would need casting + d = np.ones(200000, dtype=np.int64) + r = ((d + d) + np.array(2**222, dtype='O')) + assert_equal(r.dtype, np.dtype('O')) + + r = ((d + d) / 2) + assert_equal(r.dtype, np.dtype('f8')) + + r = np.true_divide((d + d), 2) + assert_equal(r.dtype, np.dtype('f8')) + + r = ((d + d) / 2.) + assert_equal(r.dtype, np.dtype('f8')) + + r = ((d + d) // 2) + assert_equal(r.dtype, np.dtype(np.int64)) + + # commutative elision into the astype result + f = np.ones(100000, dtype=np.float32) + assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8')) + + # no elision into lower type + d = f.astype(np.float64) + assert_equal(((f + f) + d).dtype, d.dtype) + l = np.ones(100000, dtype=np.longdouble) + assert_equal(((d + d) + l).dtype, l.dtype) + + # test unary abs with different output dtype + for dt in (np.complex64, np.complex128, np.clongdouble): + c = np.ones(100000, dtype=dt) + r = abs(c * 2.0) + assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2))) + + def test_elide_broadcast(self): + # test no elision on broadcast to higher dimension + # only triggers elision code path in debug mode as triggering it in + # normal mode needs 256kb large matching dimension, so a lot of memory + d = np.ones((2000, 1), dtype=int) + b = np.ones((2000), dtype=bool) + r = (1 - d) + b + assert_equal(r, 1) + assert_equal(r.shape, (2000, 2000)) + + def test_elide_scalar(self): + # check inplace op does not create ndarray from scalars + a = np.bool() + assert_(type(~(a & a)) is np.bool) + + def test_elide_scalar_readonly(self): + # The imaginary part of a real array is readonly. This needs to go + # through fast_scalar_power which is only called for powers of + # +1, -1, 0, 0.5, and 2, so use 2. Also need valid refcount for + # elision which can be gotten for the imaginary part of a real + # array. Should not error. + a = np.empty(100000, dtype=np.float64) + a.imag ** 2 + + def test_elide_readonly(self): + # don't try to elide readonly temporaries + r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0 + assert_equal(r, 0) + + def test_elide_updateifcopy(self): + a = np.ones(2**20)[::2] + b = a.flat.__array__() + 1 + del b + assert_equal(a, 1) + + +class TestCAPI: + def test_IsPythonScalar(self): + from numpy._core._multiarray_tests import IsPythonScalar + assert_(IsPythonScalar(b'foobar')) + assert_(IsPythonScalar(1)) + assert_(IsPythonScalar(2**80)) + assert_(IsPythonScalar(2.)) + assert_(IsPythonScalar("a")) + + @pytest.mark.parametrize("converter", + [_multiarray_tests.run_scalar_intp_converter, + _multiarray_tests.run_scalar_intp_from_sequence]) + def test_intp_sequence_converters(self, converter): + # Test simple values (-1 is special for error return paths) + assert converter(10) == (10,) + assert converter(-1) == (-1,) + # A 0-D array looks a bit like a sequence but must take the integer + # path: + assert converter(np.array(123)) == (123,) + # Test simple sequences (intp_from_sequence only supports length 1): + assert converter((10,)) == (10,) + assert converter(np.array([11])) == (11,) + + @pytest.mark.parametrize("converter", + [_multiarray_tests.run_scalar_intp_converter, + _multiarray_tests.run_scalar_intp_from_sequence]) + @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") + def test_intp_sequence_converters_errors(self, converter): + with pytest.raises(TypeError, + match="expected a sequence of integers or a single integer, "): + converter(object()) + with pytest.raises(TypeError, + match="expected a sequence of integers or a single integer, " + "got '32.0'"): + converter(32.) + with pytest.raises(TypeError, + match="'float' object cannot be interpreted as an integer"): + converter([32.]) + with pytest.raises(ValueError, + match="Maximum allowed dimension"): + # These converters currently convert overflows to a ValueError + converter(2**64) + + +class TestSubscripting: + def test_test_zero_rank(self): + x = np.array([1, 2, 3]) + assert_(isinstance(x[0], np.int_)) + assert_(type(x[0, ...]) is np.ndarray) + + +class TestPickling: + @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5, + reason=('this tests the error messages when trying to' + 'protocol 5 although it is not available')) + def test_correct_protocol5_error_message(self): + array = np.arange(10) + + def test_record_array_with_object_dtype(self): + my_object = object() + + arr_with_object = np.array( + [(my_object, 1, 2.0)], + dtype=[('a', object), ('b', int), ('c', float)]) + arr_without_object = np.array( + [('xxx', 1, 2.0)], + dtype=[('a', str), ('b', int), ('c', float)]) + + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + depickled_arr_with_object = pickle.loads( + pickle.dumps(arr_with_object, protocol=proto)) + depickled_arr_without_object = pickle.loads( + pickle.dumps(arr_without_object, protocol=proto)) + + assert_equal(arr_with_object.dtype, + depickled_arr_with_object.dtype) + assert_equal(arr_without_object.dtype, + depickled_arr_without_object.dtype) + + @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, + reason="requires pickle protocol 5") + def test_f_contiguous_array(self): + f_contiguous_array = np.array([[1, 2, 3], [4, 5, 6]], order='F') + buffers = [] + + # When using pickle protocol 5, Fortran-contiguous arrays can be + # serialized using out-of-band buffers + bytes_string = pickle.dumps(f_contiguous_array, protocol=5, + buffer_callback=buffers.append) + + assert len(buffers) > 0 + + depickled_f_contiguous_array = pickle.loads(bytes_string, + buffers=buffers) + + assert_equal(f_contiguous_array, depickled_f_contiguous_array) + + @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, reason="requires pickle protocol 5") + @pytest.mark.parametrize('transposed_contiguous_array', + [np.random.default_rng(42).random((2, 3, 4)).transpose((1, 0, 2)), + np.random.default_rng(42).random((2, 3, 4, 5)).transpose((1, 3, 0, 2))] + + [np.random.default_rng(42).random(np.arange(2, 7)).transpose(np.random.permutation(5)) for _ in range(3)]) + def test_transposed_contiguous_array(self, transposed_contiguous_array): + buffers = [] + # When using pickle protocol 5, arrays which can be transposed to c_contiguous + # can be serialized using out-of-band buffers + bytes_string = pickle.dumps(transposed_contiguous_array, protocol=5, + buffer_callback=buffers.append) + + assert len(buffers) > 0 + + depickled_transposed_contiguous_array = pickle.loads(bytes_string, + buffers=buffers) + + assert_equal(transposed_contiguous_array, depickled_transposed_contiguous_array) + + @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5, reason="requires pickle protocol 5") + def test_load_legacy_pkl_protocol5(self): + # legacy byte strs are dumped in 2.2.1 + c_contiguous_dumped = b'\x80\x05\x95\x90\x00\x00\x00\x00\x00\x00\x00\x8c\x13numpy._core.numeric\x94\x8c\x0b_frombuffer\x94\x93\x94(\x96\x18\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x94\x8c\x05numpy\x94\x8c\x05dtype\x94\x93\x94\x8c\x02u1\x94\x89\x88\x87\x94R\x94(K\x03\x8c\x01|\x94NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00t\x94bK\x03K\x04K\x02\x87\x94\x8c\x01C\x94t\x94R\x94.' # noqa: E501 + f_contiguous_dumped = b'\x80\x05\x95\x90\x00\x00\x00\x00\x00\x00\x00\x8c\x13numpy._core.numeric\x94\x8c\x0b_frombuffer\x94\x93\x94(\x96\x18\x00\x00\x00\x00\x00\x00\x00\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x94\x8c\x05numpy\x94\x8c\x05dtype\x94\x93\x94\x8c\x02u1\x94\x89\x88\x87\x94R\x94(K\x03\x8c\x01|\x94NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00t\x94bK\x03K\x04K\x02\x87\x94\x8c\x01F\x94t\x94R\x94.' # noqa: E501 + transposed_contiguous_dumped = b'\x80\x05\x95\xa5\x00\x00\x00\x00\x00\x00\x00\x8c\x16numpy._core.multiarray\x94\x8c\x0c_reconstruct\x94\x93\x94\x8c\x05numpy\x94\x8c\x07ndarray\x94\x93\x94K\x00\x85\x94C\x01b\x94\x87\x94R\x94(K\x01K\x04K\x03K\x02\x87\x94h\x03\x8c\x05dtype\x94\x93\x94\x8c\x02u1\x94\x89\x88\x87\x94R\x94(K\x03\x8c\x01|\x94NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00t\x94b\x89C\x18\x00\x01\x08\t\x10\x11\x02\x03\n\x0b\x12\x13\x04\x05\x0c\r\x14\x15\x06\x07\x0e\x0f\x16\x17\x94t\x94b.' # noqa: E501 + no_contiguous_dumped = b'\x80\x05\x95\x91\x00\x00\x00\x00\x00\x00\x00\x8c\x16numpy._core.multiarray\x94\x8c\x0c_reconstruct\x94\x93\x94\x8c\x05numpy\x94\x8c\x07ndarray\x94\x93\x94K\x00\x85\x94C\x01b\x94\x87\x94R\x94(K\x01K\x03K\x02\x86\x94h\x03\x8c\x05dtype\x94\x93\x94\x8c\x02u1\x94\x89\x88\x87\x94R\x94(K\x03\x8c\x01|\x94NNNJ\xff\xff\xff\xffJ\xff\xff\xff\xffK\x00t\x94b\x89C\x06\x00\x01\x04\x05\x08\t\x94t\x94b.' # noqa: E501 + x = np.arange(24, dtype='uint8').reshape(3, 4, 2) + assert_equal(x, pickle.loads(c_contiguous_dumped)) + x = np.arange(24, dtype='uint8').reshape(3, 4, 2, order='F') + assert_equal(x, pickle.loads(f_contiguous_dumped)) + x = np.arange(24, dtype='uint8').reshape(3, 4, 2).transpose((1, 0, 2)) + assert_equal(x, pickle.loads(transposed_contiguous_dumped)) + x = np.arange(12, dtype='uint8').reshape(3, 4)[:, :2] + assert_equal(x, pickle.loads(no_contiguous_dumped)) + + def test_non_contiguous_array(self): + non_contiguous_array = np.arange(12).reshape(3, 4)[:, :2] + assert not non_contiguous_array.flags.c_contiguous + assert not non_contiguous_array.flags.f_contiguous + + # make sure non-contiguous arrays can be pickled-depickled + # using any protocol + buffers = [] + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + depickled_non_contiguous_array = pickle.loads( + pickle.dumps(non_contiguous_array, protocol=proto, + buffer_callback=buffers.append if proto >= 5 else None)) + + assert_equal(len(buffers), 0) + assert_equal(non_contiguous_array, depickled_non_contiguous_array) + + def test_roundtrip(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + carray = np.array([[2, 9], [7, 0], [3, 8]]) + DATA = [ + carray, + np.transpose(carray), + np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int), + ('c', float)]) + ] + + refs = [weakref.ref(a) for a in DATA] + for a in DATA: + assert_equal( + a, pickle.loads(pickle.dumps(a, protocol=proto)), + err_msg=f"{a!r}") + del a, DATA, carray + break_cycles() + # check for reference leaks (gh-12793) + for ref in refs: + assert ref() is None + + def _loads(self, obj): + return pickle.loads(obj, encoding='latin1') + + # version 0 pickles, using protocol=2 to pickle + # version 0 doesn't have a version field + def test_version0_int8(self): + s = b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb." + a = np.array([1, 2, 3, 4], dtype=np.int8) + p = self._loads(s) + assert_equal(a, p) + + def test_version0_float32(self): + s = b"\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) + + def test_mixed(self): + g1 = np.array(["spam", "spa", "spammer", "and eggs"]) + g2 = "spam" + assert_array_equal(g1 == g2, [x == g2 for x in g1]) + assert_array_equal(g1 != g2, [x != g2 for x in g1]) + assert_array_equal(g1 < g2, [x < g2 for x in g1]) + assert_array_equal(g1 > g2, [x > g2 for x in g1]) + assert_array_equal(g1 <= g2, [x <= g2 for x in g1]) + assert_array_equal(g1 >= g2, [x >= g2 for x in g1]) + + def test_unicode(self): + g1 = np.array(["This", "is", "example"]) + g2 = np.array(["This", "was", "example"]) + assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) + assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) + +class TestArgmaxArgminCommon: + + sizes = [(), (3,), (3, 2), (2, 3), + (3, 3), (2, 3, 4), (4, 3, 2), + (1, 2, 3, 4), (2, 3, 4, 1), + (3, 4, 1, 2), (4, 1, 2, 3), + (64,), (128,), (256,)] + + @pytest.mark.parametrize("size, axis", itertools.chain(*[[(size, axis) + for axis in list(range(-len(size), len(size))) + [None]] + for size in sizes])) + @pytest.mark.parametrize('method', [np.argmax, np.argmin]) + def test_np_argmin_argmax_keepdims(self, size, axis, method): + + arr = np.random.normal(size=size) + + # contiguous arrays + if axis is None: + new_shape = [1 for _ in range(len(size))] + else: + new_shape = list(size) + new_shape[axis] = 1 + new_shape = tuple(new_shape) + + _res_orig = method(arr, axis=axis) + res_orig = _res_orig.reshape(new_shape) + res = method(arr, axis=axis, keepdims=True) + assert_equal(res, res_orig) + assert_(res.shape == new_shape) + outarray = np.empty(res.shape, dtype=res.dtype) + res1 = method(arr, axis=axis, out=outarray, + keepdims=True) + assert_(res1 is outarray) + assert_equal(res, outarray) + + if len(size) > 0: + wrong_shape = list(new_shape) + if axis is not None: + wrong_shape[axis] = 2 + else: + wrong_shape[0] = 2 + wrong_outarray = np.empty(wrong_shape, dtype=res.dtype) + with pytest.raises(ValueError): + method(arr.T, axis=axis, + out=wrong_outarray, keepdims=True) + + # non-contiguous arrays + if axis is None: + new_shape = [1 for _ in range(len(size))] + else: + new_shape = list(size)[::-1] + new_shape[axis] = 1 + new_shape = tuple(new_shape) + + _res_orig = method(arr.T, axis=axis) + res_orig = _res_orig.reshape(new_shape) + res = method(arr.T, axis=axis, keepdims=True) + assert_equal(res, res_orig) + assert_(res.shape == new_shape) + outarray = np.empty(new_shape[::-1], dtype=res.dtype) + outarray = outarray.T + res1 = method(arr.T, axis=axis, out=outarray, + keepdims=True) + assert_(res1 is outarray) + assert_equal(res, outarray) + + if len(size) > 0: + # one dimension lesser for non-zero sized + # array should raise an error + with pytest.raises(ValueError): + method(arr[0], axis=axis, + out=outarray, keepdims=True) + + if len(size) > 0: + wrong_shape = list(new_shape) + if axis is not None: + wrong_shape[axis] = 2 + else: + wrong_shape[0] = 2 + wrong_outarray = np.empty(wrong_shape, dtype=res.dtype) + with pytest.raises(ValueError): + method(arr.T, axis=axis, + out=wrong_outarray, keepdims=True) + + @pytest.mark.parametrize('method', ['max', 'min']) + def test_all(self, method): + a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) + arg_method = getattr(a, 'arg' + method) + val_method = getattr(a, method) + for i in range(a.ndim): + a_maxmin = val_method(i) + aarg_maxmin = arg_method(i) + axes = list(range(a.ndim)) + axes.remove(i) + assert_(np.all(a_maxmin == aarg_maxmin.choose( + *a.transpose(i, *axes)))) + + @pytest.mark.parametrize('method', ['argmax', 'argmin']) + def test_output_shape(self, method): + # see also gh-616 + a = np.ones((10, 5)) + arg_method = getattr(a, method) + # Check some simple shape mismatches + out = np.ones(11, dtype=np.int_) + assert_raises(ValueError, arg_method, -1, out) + + out = np.ones((2, 5), dtype=np.int_) + assert_raises(ValueError, arg_method, -1, out) + + # these could be relaxed possibly (used to allow even the previous) + out = np.ones((1, 10), dtype=np.int_) + assert_raises(ValueError, arg_method, -1, out) + + out = np.ones(10, dtype=np.int_) + arg_method(-1, out=out) + assert_equal(out, arg_method(-1)) + + @pytest.mark.parametrize('ndim', [0, 1]) + @pytest.mark.parametrize('method', ['argmax', 'argmin']) + def test_ret_is_out(self, ndim, method): + a = np.ones((4,) + (256,) * ndim) + arg_method = getattr(a, method) + out = np.empty((256,) * ndim, dtype=np.intp) + ret = arg_method(axis=0, out=out) + assert ret is out + + @pytest.mark.parametrize('np_array, method, idx, val', + [(np.zeros, 'argmax', 5942, "as"), + (np.ones, 'argmin', 6001, "0")]) + def test_unicode(self, np_array, method, idx, val): + d = np_array(6031, dtype='= cmin)) + assert_(np.all(x <= cmax)) + + def _clip_type(self, type_group, array_max, + clip_min, clip_max, inplace=False, + expected_min=None, expected_max=None): + if expected_min is None: + expected_min = clip_min + if expected_max is None: + expected_max = clip_max + + for T in np._core.sctypes[type_group]: + if sys.byteorder == 'little': + byte_orders = ['=', '>'] + else: + byte_orders = ['<', '='] + + for byteorder in byte_orders: + dtype = np.dtype(T).newbyteorder(byteorder) + + x = (np.random.random(1000) * array_max).astype(dtype) + if inplace: + # The tests that call us pass clip_min and clip_max that + # might not fit in the destination dtype. They were written + # assuming the previous unsafe casting, which now must be + # passed explicitly to avoid a warning. + x.clip(clip_min, clip_max, x, casting='unsafe') + else: + x = x.clip(clip_min, clip_max) + byteorder = '=' + + if x.dtype.byteorder == '|': + byteorder = '|' + assert_equal(x.dtype.byteorder, byteorder) + self._check_range(x, expected_min, expected_max) + return x + + def test_basic(self): + for inplace in [False, True]: + self._clip_type( + 'float', 1024, -12.8, 100.2, inplace=inplace) + self._clip_type( + 'float', 1024, 0, 0, inplace=inplace) + + self._clip_type( + 'int', 1024, -120, 100, inplace=inplace) + self._clip_type( + 'int', 1024, 0, 0, inplace=inplace) + + self._clip_type( + 'uint', 1024, 0, 0, inplace=inplace) + self._clip_type( + 'uint', 1024, 10, 100, inplace=inplace) + + @pytest.mark.parametrize("inplace", [False, True]) + def test_int_out_of_range(self, inplace): + # Simple check for out-of-bound integers, also testing the in-place + # path. + x = (np.random.random(1000) * 255).astype("uint8") + out = np.empty_like(x) + res = x.clip(-1, 300, out=out if inplace else None) + assert res is out or not inplace + assert (res == x).all() + + res = x.clip(-1, 50, out=out if inplace else None) + assert res is out or not inplace + assert (res <= 50).all() + assert (res[x <= 50] == x[x <= 50]).all() + + res = x.clip(100, 1000, out=out if inplace else None) + assert res is out or not inplace + assert (res >= 100).all() + assert (res[x >= 100] == x[x >= 100]).all() + + def test_record_array(self): + rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], + dtype=[('x', '= 3)) + x = val.clip(min=3) + assert_(np.all(x >= 3)) + x = val.clip(max=4) + assert_(np.all(x <= 4)) + + def test_nan(self): + input_arr = np.array([-2., np.nan, 0.5, 3., 0.25, np.nan]) + result = input_arr.clip(-1, 1) + expected = np.array([-1., np.nan, 0.5, 1., 0.25, np.nan]) + assert_array_equal(result, expected) + + +class TestCompress: + def test_axis(self): + tgt = [[5, 6, 7, 8, 9]] + arr = np.arange(10).reshape(2, 5) + out = np.compress([0, 1], arr, axis=0) + assert_equal(out, tgt) + + tgt = [[1, 3], [6, 8]] + out = np.compress([0, 1, 0, 1, 0], arr, axis=1) + assert_equal(out, tgt) + + def test_truncate(self): + tgt = [[1], [6]] + arr = np.arange(10).reshape(2, 5) + out = np.compress([0, 1], arr, axis=1) + assert_equal(out, tgt) + + def test_flatten(self): + arr = np.arange(10).reshape(2, 5) + out = np.compress([0, 1], arr) + assert_equal(out, 1) + + +class TestPutmask: + def tst_basic(self, x, T, mask, val): + np.putmask(x, mask, val) + assert_equal(x[mask], np.array(val, T)) + + def test_ip_types(self): + unchecked_types = [bytes, str, np.void] + + x = np.random.random(1000) * 100 + mask = x < 40 + + for val in [-100, 0, 15]: + for types in np._core.sctypes.values(): + for T in types: + if T not in unchecked_types: + if val < 0 and np.dtype(T).kind == "u": + val = np.iinfo(T).max - 99 + self.tst_basic(x.copy().astype(T), T, mask, val) + + # Also test string of a length which uses an untypical length + dt = np.dtype("S3") + self.tst_basic(x.astype(dt), dt.type, mask, dt.type(val)[:3]) + + def test_mask_size(self): + assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5) + + @pytest.mark.parametrize('dtype', ('>i4', 'f8'), ('z', '= 2, 3) + + def test_kwargs(self): + x = np.array([0, 0]) + np.putmask(x, [0, 1], [-1, -2]) + assert_array_equal(x, [0, -2]) + + x = np.array([0, 0]) + np.putmask(x, mask=[0, 1], values=[-1, -2]) + assert_array_equal(x, [0, -2]) + + x = np.array([0, 0]) + np.putmask(x, values=[-1, -2], mask=[0, 1]) + assert_array_equal(x, [0, -2]) + + with pytest.raises(TypeError): + np.putmask(a=x, values=[-1, -2], mask=[0, 1]) + + +class TestTake: + def tst_basic(self, x): + ind = list(range(x.shape[0])) + assert_array_equal(x.take(ind, axis=0), x) + + def test_ip_types(self): + unchecked_types = [bytes, str, np.void] + + x = np.random.random(24) * 100 + x.shape = 2, 3, 4 + for types in np._core.sctypes.values(): + for T in types: + if T not in unchecked_types: + self.tst_basic(x.copy().astype(T)) + + # Also test string of a length which uses an untypical length + self.tst_basic(x.astype("S3")) + + def test_raise(self): + x = np.random.random(24) * 100 + x.shape = 2, 3, 4 + assert_raises(IndexError, x.take, [0, 1, 2], axis=0) + assert_raises(IndexError, x.take, [-3], axis=0) + assert_array_equal(x.take([-1], axis=0)[0], x[1]) + + def test_clip(self): + x = np.random.random(24) * 100 + x.shape = 2, 3, 4 + assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0]) + assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1]) + + def test_wrap(self): + x = np.random.random(24) * 100 + x.shape = 2, 3, 4 + assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1]) + assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0]) + assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1]) + + @pytest.mark.parametrize('dtype', ('>i4', 'f8'), ('z', ' 16MB + d = np.zeros(4 * 1024 ** 2) + d.tofile(tmp_filename) + assert_equal(os.path.getsize(tmp_filename), d.nbytes) + assert_array_equal(d, np.fromfile(tmp_filename)) + # check offset + with open(tmp_filename, "r+b") as f: + f.seek(d.nbytes) + d.tofile(f) + assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2) + # check append mode (gh-8329) + open(tmp_filename, "w").close() # delete file contents + with open(tmp_filename, "ab") as f: + d.tofile(f) + assert_array_equal(d, np.fromfile(tmp_filename)) + with open(tmp_filename, "ab") as f: + d.tofile(f) + assert_equal(os.path.getsize(tmp_filename), d.nbytes * 2) + + def test_io_open_buffered_fromfile(self, x, tmp_filename): + # gh-6632 + x.tofile(tmp_filename) + with open(tmp_filename, 'rb', buffering=-1) as f: + y = np.fromfile(f, dtype=x.dtype) + assert_array_equal(y, x.flat) + + def test_file_position_after_fromfile(self, tmp_filename): + # gh-4118 + sizes = [io.DEFAULT_BUFFER_SIZE // 8, + io.DEFAULT_BUFFER_SIZE, + io.DEFAULT_BUFFER_SIZE * 8] + + for size in sizes: + with open(tmp_filename, 'wb') as f: + f.seek(size - 1) + f.write(b'\0') + + for mode in ['rb', 'r+b']: + err_msg = "%d %s" % (size, mode) + + with open(tmp_filename, mode) as f: + f.read(2) + np.fromfile(f, dtype=np.float64, count=1) + pos = f.tell() + assert_equal(pos, 10, err_msg=err_msg) + + def test_file_position_after_tofile(self, tmp_filename): + # gh-4118 + sizes = [io.DEFAULT_BUFFER_SIZE // 8, + io.DEFAULT_BUFFER_SIZE, + io.DEFAULT_BUFFER_SIZE * 8] + + for size in sizes: + err_msg = "%d" % (size,) + + with open(tmp_filename, 'wb') as f: + f.seek(size - 1) + f.write(b'\0') + f.seek(10) + f.write(b'12') + np.array([0], dtype=np.float64).tofile(f) + pos = f.tell() + assert_equal(pos, 10 + 2 + 8, err_msg=err_msg) + + with open(tmp_filename, 'r+b') as f: + f.read(2) + f.seek(0, 1) # seek between read&write required by ANSI C + np.array([0], dtype=np.float64).tofile(f) + pos = f.tell() + assert_equal(pos, 10, err_msg=err_msg) + + def test_load_object_array_fromfile(self, tmp_filename): + # gh-12300 + with open(tmp_filename, 'w') as f: + # Ensure we have a file with consistent contents + pass + + with open(tmp_filename, 'rb') as f: + assert_raises_regex(ValueError, "Cannot read into object array", + np.fromfile, f, dtype=object) + + assert_raises_regex(ValueError, "Cannot read into object array", + np.fromfile, tmp_filename, dtype=object) + + def test_fromfile_offset(self, x, tmp_filename): + with open(tmp_filename, 'wb') as f: + x.tofile(f) + + with open(tmp_filename, 'rb') as f: + y = np.fromfile(f, dtype=x.dtype, offset=0) + assert_array_equal(y, x.flat) + + with open(tmp_filename, 'rb') as f: + count_items = len(x.flat) // 8 + offset_items = len(x.flat) // 4 + offset_bytes = x.dtype.itemsize * offset_items + y = np.fromfile( + f, dtype=x.dtype, count=count_items, offset=offset_bytes + ) + assert_array_equal( + y, x.flat[offset_items:offset_items + count_items] + ) + + # subsequent seeks should stack + offset_bytes = x.dtype.itemsize + z = np.fromfile(f, dtype=x.dtype, offset=offset_bytes) + assert_array_equal(z, x.flat[offset_items + count_items + 1:]) + + with open(tmp_filename, 'wb') as f: + x.tofile(f, sep=",") + + with open(tmp_filename, 'rb') as f: + assert_raises_regex( + TypeError, + "'offset' argument only permitted for binary files", + np.fromfile, tmp_filename, dtype=x.dtype, + sep=",", offset=1) + + @pytest.mark.skipif(IS_PYPY, reason="bug in PyPy's PyNumber_AsSsize_t") + def test_fromfile_bad_dup(self, x, tmp_filename): + def dup_str(fd): + return 'abc' + + def dup_bigint(fd): + return 2**68 + + old_dup = os.dup + try: + with open(tmp_filename, 'wb') as f: + x.tofile(f) + for dup, exc in ((dup_str, TypeError), (dup_bigint, OSError)): + os.dup = dup + assert_raises(exc, np.fromfile, f) + finally: + os.dup = old_dup + + def _check_from(self, s, value, filename, **kw): + if 'sep' not in kw: + y = np.frombuffer(s, **kw) + else: + y = np.fromstring(s, **kw) + assert_array_equal(y, value) + + with open(filename, 'wb') as f: + f.write(s) + y = np.fromfile(filename, **kw) + assert_array_equal(y, value) + + @pytest.fixture(params=["period", "comma"]) + def decimal_sep_localization(self, request): + """ + Including this fixture in a test will automatically + execute it with both types of decimal separator. + + So:: + + def test_decimal(decimal_sep_localization): + pass + + is equivalent to the following two tests:: + + def test_decimal_period_separator(): + pass + + def test_decimal_comma_separator(): + with CommaDecimalPointLocale(): + pass + """ + if request.param == "period": + yield + elif request.param == "comma": + with CommaDecimalPointLocale(): + yield + else: + assert False, request.param + + def test_nan(self, tmp_filename, decimal_sep_localization): + self._check_from( + b"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)", + [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], + tmp_filename, + sep=' ') + + def test_inf(self, tmp_filename, decimal_sep_localization): + self._check_from( + b"inf +inf -inf infinity -Infinity iNfInItY -inF", + [np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf], + tmp_filename, + sep=' ') + + def test_numbers(self, tmp_filename, decimal_sep_localization): + self._check_from( + b"1.234 -1.234 .3 .3e55 -123133.1231e+133", + [1.234, -1.234, .3, .3e55, -123133.1231e+133], + tmp_filename, + sep=' ') + + def test_binary(self, tmp_filename): + self._check_from( + b'\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@', + np.array([1, 2, 3, 4]), + tmp_filename, + dtype='']) + @pytest.mark.parametrize('dtype', [float, int, complex]) + def test_basic(self, byteorder, dtype): + dt = np.dtype(dtype).newbyteorder(byteorder) + x = (np.random.random((4, 7)) * 5).astype(dt) + buf = x.tobytes() + assert_array_equal(np.frombuffer(buf, dtype=dt), x.flat) + + @pytest.mark.parametrize("obj", [np.arange(10), b"12345678"]) + def test_array_base(self, obj): + # Objects (including NumPy arrays), which do not use the + # `release_buffer` slot should be directly used as a base object. + # See also gh-21612 + new = np.frombuffer(obj) + assert new.base is obj + + def test_empty(self): + assert_array_equal(np.frombuffer(b''), np.array([])) + + @pytest.mark.skipif(IS_PYPY, + reason="PyPy's memoryview currently does not track exports. See: " + "https://foss.heptapod.net/pypy/pypy/-/issues/3724") + def test_mmap_close(self): + # The old buffer protocol was not safe for some things that the new + # one is. But `frombuffer` always used the old one for a long time. + # Checks that it is safe with the new one (using memoryviews) + with tempfile.TemporaryFile(mode='wb') as tmp: + tmp.write(b"asdf") + tmp.flush() + mm = mmap.mmap(tmp.fileno(), 0) + arr = np.frombuffer(mm, dtype=np.uint8) + with pytest.raises(BufferError): + mm.close() # cannot close while array uses the buffer + del arr + mm.close() + +class TestFlat: + def setup_method(self): + a0 = np.arange(20.0) + a = a0.reshape(4, 5) + a0.shape = (4, 5) + a.flags.writeable = False + self.a = a + self.b = a[::2, ::2] + self.a0 = a0 + self.b0 = a0[::2, ::2] + + def test_contiguous(self): + testpassed = False + try: + self.a.flat[12] = 100.0 + except ValueError: + testpassed = True + assert_(testpassed) + assert_(self.a.flat[12] == 12.0) + + def test_discontiguous(self): + testpassed = False + try: + self.b.flat[4] = 100.0 + except ValueError: + testpassed = True + assert_(testpassed) + assert_(self.b.flat[4] == 12.0) + + def test___array__(self): + c = self.a.flat.__array__() + d = self.b.flat.__array__() + e = self.a0.flat.__array__() + f = self.b0.flat.__array__() + + assert_(c.flags.writeable is False) + assert_(d.flags.writeable is False) + assert_(e.flags.writeable is True) + assert_(f.flags.writeable is False) + assert_(c.flags.writebackifcopy is False) + assert_(d.flags.writebackifcopy is False) + assert_(e.flags.writebackifcopy is False) + assert_(f.flags.writebackifcopy is False) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_refcount(self): + # includes regression test for reference count error gh-13165 + inds = [np.intp(0), np.array([True] * self.a.size), np.array([0]), None] + indtype = np.dtype(np.intp) + rc_indtype = sys.getrefcount(indtype) + for ind in inds: + rc_ind = sys.getrefcount(ind) + for _ in range(100): + try: + self.a.flat[ind] + except IndexError: + pass + assert_(abs(sys.getrefcount(ind) - rc_ind) < 50) + assert_(abs(sys.getrefcount(indtype) - rc_indtype) < 50) + + def test_index_getset(self): + it = np.arange(10).reshape(2, 1, 5).flat + with pytest.raises(AttributeError): + it.index = 10 + + for _ in it: + pass + # Check the value of `.index` is updated correctly (see also gh-19153) + # If the type was incorrect, this would show up on big-endian machines + assert it.index == it.base.size + + def test_maxdims(self): + # The flat iterator and thus attribute is currently unfortunately + # limited to only 32 dimensions (after bumping it to 64 for 2.0) + a = np.ones((1,) * 64) + + with pytest.raises(RuntimeError, + match=".*32 dimensions but the array has 64"): + a.flat + + +class TestResize: + + @_no_tracing + def test_basic(self): + x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + if IS_PYPY: + x.resize((5, 5), refcheck=False) + else: + x.resize((5, 5)) + assert_array_equal(x.flat[:9], + np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat) + assert_array_equal(x[9:].flat, 0) + + def test_check_reference(self): + x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + y = x + assert_raises(ValueError, x.resize, (5, 1)) + + @_no_tracing + def test_int_shape(self): + x = np.eye(3) + if IS_PYPY: + x.resize(3, refcheck=False) + else: + x.resize(3) + assert_array_equal(x, np.eye(3)[0, :]) + + def test_none_shape(self): + x = np.eye(3) + x.resize(None) + assert_array_equal(x, np.eye(3)) + x.resize() + assert_array_equal(x, np.eye(3)) + + def test_0d_shape(self): + # to it multiple times to test it does not break alloc cache gh-9216 + for i in range(10): + x = np.empty((1,)) + x.resize(()) + assert_equal(x.shape, ()) + assert_equal(x.size, 1) + x = np.empty(()) + x.resize((1,)) + assert_equal(x.shape, (1,)) + assert_equal(x.size, 1) + + def test_invalid_arguments(self): + assert_raises(TypeError, np.eye(3).resize, 'hi') + assert_raises(ValueError, np.eye(3).resize, -1) + assert_raises(TypeError, np.eye(3).resize, order=1) + assert_raises(TypeError, np.eye(3).resize, refcheck='hi') + + @_no_tracing + def test_freeform_shape(self): + x = np.eye(3) + if IS_PYPY: + x.resize(3, 2, 1, refcheck=False) + else: + x.resize(3, 2, 1) + assert_(x.shape == (3, 2, 1)) + + @_no_tracing + def test_zeros_appended(self): + x = np.eye(3) + if IS_PYPY: + x.resize(2, 3, 3, refcheck=False) + else: + x.resize(2, 3, 3) + assert_array_equal(x[0], np.eye(3)) + assert_array_equal(x[1], np.zeros((3, 3))) + + @_no_tracing + def test_obj_obj(self): + # check memory is initialized on resize, gh-4857 + a = np.ones(10, dtype=[('k', object, 2)]) + if IS_PYPY: + a.resize(15, refcheck=False) + else: + a.resize(15,) + assert_equal(a.shape, (15,)) + assert_array_equal(a['k'][-5:], 0) + assert_array_equal(a['k'][:-5], 1) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + @pytest.mark.parametrize("dtype", ["O", "O,O"]) + def test_obj_obj_shrinking(self, dtype): + # check that memory is freed when shrinking an array. + test_obj = object() + expected = sys.getrefcount(test_obj) + a = np.array([test_obj, test_obj, test_obj], dtype=dtype) + assert a.size == 3 + a.resize((2, 1)) # two elements, not three! + assert a.size == 2 + del a + # if all is well, then we reclaimed all references + assert sys.getrefcount(test_obj) == expected + + def test_empty_view(self): + # check that sizes containing a zero don't trigger a reallocate for + # already empty arrays + x = np.zeros((10, 0), int) + x_view = x[...] + x_view.resize((0, 10)) + x_view.resize((0, 100)) + + def test_check_weakref(self): + x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + xref = weakref.ref(x) + assert_raises(ValueError, x.resize, (5, 1)) + + +class TestRecord: + def test_field_rename(self): + dt = np.dtype([('f', float), ('i', int)]) + dt.names = ['p', 'q'] + assert_equal(dt.names, ['p', 'q']) + + def test_multiple_field_name_occurrence(self): + def test_dtype_init(): + np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")]) + + # Error raised when multiple fields have the same name + assert_raises(ValueError, test_dtype_init) + + def test_bytes_fields(self): + # Bytes are not allowed in field names and not recognized in titles + # on Py3 + assert_raises(TypeError, np.dtype, [(b'a', int)]) + assert_raises(TypeError, np.dtype, [(('b', b'a'), int)]) + + dt = np.dtype([((b'a', 'b'), int)]) + assert_raises(TypeError, dt.__getitem__, b'a') + + x = np.array([(1,), (2,), (3,)], dtype=dt) + assert_raises(IndexError, x.__getitem__, b'a') + + y = x[0] + assert_raises(IndexError, y.__getitem__, b'a') + + def test_multiple_field_name_unicode(self): + def test_dtype_unicode(): + np.dtype([("\u20B9", "f8"), ("B", "f8"), ("\u20B9", "f8")]) + + # Error raised when multiple fields have the same name(unicode included) + assert_raises(ValueError, test_dtype_unicode) + + def test_fromarrays_unicode(self): + # A single name string provided to fromarrays() is allowed to be unicode + x = np._core.records.fromarrays( + [[0], [1]], names='a,b', formats='i4,i4') + assert_equal(x['a'][0], 0) + assert_equal(x['b'][0], 1) + + def test_unicode_order(self): + # Test that we can sort with order as a unicode field name + name = 'b' + x = np.array([1, 3, 2], dtype=[(name, int)]) + x.sort(order=name) + assert_equal(x['b'], np.array([1, 2, 3])) + + def test_field_names(self): + # Test unicode and 8-bit / byte strings can be used + a = np.zeros((1,), dtype=[('f1', 'i4'), + ('f2', 'i4'), + ('f3', [('sf1', 'i4')])]) + # byte string indexing fails gracefully + assert_raises(IndexError, a.__setitem__, b'f1', 1) + assert_raises(IndexError, a.__getitem__, b'f1') + assert_raises(IndexError, a['f1'].__setitem__, b'sf1', 1) + assert_raises(IndexError, a['f1'].__getitem__, b'sf1') + b = a.copy() + fn1 = 'f1' + b[fn1] = 1 + assert_equal(b[fn1], 1) + fnn = 'not at all' + assert_raises(ValueError, b.__setitem__, fnn, 1) + assert_raises(ValueError, b.__getitem__, fnn) + b[0][fn1] = 2 + assert_equal(b[fn1], 2) + # Subfield + assert_raises(ValueError, b[0].__setitem__, fnn, 1) + assert_raises(ValueError, b[0].__getitem__, fnn) + # Subfield + fn3 = 'f3' + sfn1 = 'sf1' + b[fn3][sfn1] = 1 + assert_equal(b[fn3][sfn1], 1) + assert_raises(ValueError, b[fn3].__setitem__, fnn, 1) + assert_raises(ValueError, b[fn3].__getitem__, fnn) + # multiple subfields + fn2 = 'f2' + b[fn2] = 3 + + assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3)) + assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2)) + assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,))) + + # non-ascii unicode field indexing is well behaved + assert_raises(ValueError, a.__setitem__, '\u03e0', 1) + assert_raises(ValueError, a.__getitem__, '\u03e0') + + def test_record_hash(self): + a = np.array([(1, 2), (1, 2)], dtype='i1,i2') + a.flags.writeable = False + b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')]) + b.flags.writeable = False + c = np.array([(1, 2), (3, 4)], dtype='i1,i2') + c.flags.writeable = False + assert_(hash(a[0]) == hash(a[1])) + assert_(hash(a[0]) == hash(b[0])) + assert_(hash(a[0]) != hash(b[1])) + assert_(hash(c[0]) == hash(a[0]) and c[0] == a[0]) + + def test_record_no_hash(self): + a = np.array([(1, 2), (1, 2)], dtype='i1,i2') + assert_raises(TypeError, hash, a[0]) + + def test_empty_structure_creation(self): + # make sure these do not raise errors (gh-5631) + np.array([()], dtype={'names': [], 'formats': [], + 'offsets': [], 'itemsize': 12}) + np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [], + 'offsets': [], 'itemsize': 12}) + + def test_multifield_indexing_view(self): + a = np.ones(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u4')]) + v = a[['a', 'c']] + assert_(v.base is a) + assert_(v.dtype == np.dtype({'names': ['a', 'c'], + 'formats': ['i4', 'u4'], + 'offsets': [0, 8]})) + v[:] = (4, 5) + assert_equal(a[0].item(), (4, 1, 5)) + +class TestView: + def test_basic(self): + x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], + dtype=[('r', np.int8), ('g', np.int8), + ('b', np.int8), ('a', np.int8)]) + # We must be specific about the endianness here: + y = x.view(dtype=' 0) + assert_(issubclass(w[0].category, RuntimeWarning)) + + def test_empty(self): + A = np.zeros((0, 3)) + for f in self.funcs: + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(f(A, axis=axis)).all()) + assert_(len(w) > 0) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(f(A, axis=axis), np.zeros([])) + + def test_mean_values(self): + for mat in [self.rmat, self.cmat, self.omat]: + for axis in [0, 1]: + tgt = mat.sum(axis=axis) + res = _mean(mat, axis=axis) * mat.shape[axis] + assert_almost_equal(res, tgt) + for axis in [None]: + tgt = mat.sum(axis=axis) + res = _mean(mat, axis=axis) * np.prod(mat.shape) + assert_almost_equal(res, tgt) + + def test_mean_float16(self): + # This fail if the sum inside mean is done in float16 instead + # of float32. + assert_(_mean(np.ones(100000, dtype='float16')) == 1) + + def test_mean_axis_error(self): + # Ensure that AxisError is raised instead of IndexError when axis is + # out of bounds, see gh-15817. + with assert_raises(np.exceptions.AxisError): + np.arange(10).mean(axis=2) + + def test_mean_where(self): + a = np.arange(16).reshape((4, 4)) + wh_full = np.array([[False, True, False, True], + [True, False, True, False], + [True, True, False, False], + [False, False, True, True]]) + wh_partial = np.array([[False], + [True], + [True], + [False]]) + _cases = [(1, True, [1.5, 5.5, 9.5, 13.5]), + (0, wh_full, [6., 5., 10., 9.]), + (1, wh_full, [2., 5., 8.5, 14.5]), + (0, wh_partial, [6., 7., 8., 9.])] + for _ax, _wh, _res in _cases: + assert_allclose(a.mean(axis=_ax, where=_wh), + np.array(_res)) + assert_allclose(np.mean(a, axis=_ax, where=_wh), + np.array(_res)) + + a3d = np.arange(16).reshape((2, 2, 4)) + _wh_partial = np.array([False, True, True, False]) + _res = [[1.5, 5.5], [9.5, 13.5]] + assert_allclose(a3d.mean(axis=2, where=_wh_partial), + np.array(_res)) + assert_allclose(np.mean(a3d, axis=2, where=_wh_partial), + np.array(_res)) + + with pytest.warns(RuntimeWarning) as w: + assert_allclose(a.mean(axis=1, where=wh_partial), + np.array([np.nan, 5.5, 9.5, np.nan])) + with pytest.warns(RuntimeWarning) as w: + assert_equal(a.mean(where=False), np.nan) + with pytest.warns(RuntimeWarning) as w: + assert_equal(np.mean(a, where=False), np.nan) + + def test_var_values(self): + for mat in [self.rmat, self.cmat, self.omat]: + for axis in [0, 1, None]: + msqr = _mean(mat * mat.conj(), axis=axis) + mean = _mean(mat, axis=axis) + tgt = msqr - mean * mean.conjugate() + res = _var(mat, axis=axis) + assert_almost_equal(res, tgt) + + @pytest.mark.parametrize(('complex_dtype', 'ndec'), ( + ('complex64', 6), + ('complex128', 7), + ('clongdouble', 7), + )) + def test_var_complex_values(self, complex_dtype, ndec): + # Test fast-paths for every builtin complex type + for axis in [0, 1, None]: + mat = self.cmat.copy().astype(complex_dtype) + msqr = _mean(mat * mat.conj(), axis=axis) + mean = _mean(mat, axis=axis) + tgt = msqr - mean * mean.conjugate() + res = _var(mat, axis=axis) + assert_almost_equal(res, tgt, decimal=ndec) + + def test_var_dimensions(self): + # _var paths for complex number introduce additions on views that + # increase dimensions. Ensure this generalizes to higher dims + mat = np.stack([self.cmat] * 3) + for axis in [0, 1, 2, -1, None]: + msqr = _mean(mat * mat.conj(), axis=axis) + mean = _mean(mat, axis=axis) + tgt = msqr - mean * mean.conjugate() + res = _var(mat, axis=axis) + assert_almost_equal(res, tgt) + + def test_var_complex_byteorder(self): + # Test that var fast-path does not cause failures for complex arrays + # with non-native byteorder + cmat = self.cmat.copy().astype('complex128') + cmat_swapped = cmat.astype(cmat.dtype.newbyteorder()) + assert_almost_equal(cmat.var(), cmat_swapped.var()) + + def test_var_axis_error(self): + # Ensure that AxisError is raised instead of IndexError when axis is + # out of bounds, see gh-15817. + with assert_raises(np.exceptions.AxisError): + np.arange(10).var(axis=2) + + def test_var_where(self): + a = np.arange(25).reshape((5, 5)) + wh_full = np.array([[False, True, False, True, True], + [True, False, True, True, False], + [True, True, False, False, True], + [False, True, True, False, True], + [True, False, True, True, False]]) + wh_partial = np.array([[False], + [True], + [True], + [False], + [True]]) + _cases = [(0, True, [50., 50., 50., 50., 50.]), + (1, True, [2., 2., 2., 2., 2.])] + for _ax, _wh, _res in _cases: + assert_allclose(a.var(axis=_ax, where=_wh), + np.array(_res)) + assert_allclose(np.var(a, axis=_ax, where=_wh), + np.array(_res)) + + a3d = np.arange(16).reshape((2, 2, 4)) + _wh_partial = np.array([False, True, True, False]) + _res = [[0.25, 0.25], [0.25, 0.25]] + assert_allclose(a3d.var(axis=2, where=_wh_partial), + np.array(_res)) + assert_allclose(np.var(a3d, axis=2, where=_wh_partial), + np.array(_res)) + + assert_allclose(np.var(a, axis=1, where=wh_full), + np.var(a[wh_full].reshape((5, 3)), axis=1)) + assert_allclose(np.var(a, axis=0, where=wh_partial), + np.var(a[wh_partial[:, 0]], axis=0)) + with pytest.warns(RuntimeWarning) as w: + assert_equal(a.var(where=False), np.nan) + with pytest.warns(RuntimeWarning) as w: + assert_equal(np.var(a, where=False), np.nan) + + def test_std_values(self): + for mat in [self.rmat, self.cmat, self.omat]: + for axis in [0, 1, None]: + tgt = np.sqrt(_var(mat, axis=axis)) + res = _std(mat, axis=axis) + assert_almost_equal(res, tgt) + + def test_std_where(self): + a = np.arange(25).reshape((5, 5))[::-1] + whf = np.array([[False, True, False, True, True], + [True, False, True, False, True], + [True, True, False, True, False], + [True, False, True, True, False], + [False, True, False, True, True]]) + whp = np.array([[False], + [False], + [True], + [True], + [False]]) + _cases = [ + (0, True, 7.07106781 * np.ones(5)), + (1, True, 1.41421356 * np.ones(5)), + (0, whf, + np.array([4.0824829, 8.16496581, 5., 7.39509973, 8.49836586])), + (0, whp, 2.5 * np.ones(5)) + ] + for _ax, _wh, _res in _cases: + assert_allclose(a.std(axis=_ax, where=_wh), _res) + assert_allclose(np.std(a, axis=_ax, where=_wh), _res) + + a3d = np.arange(16).reshape((2, 2, 4)) + _wh_partial = np.array([False, True, True, False]) + _res = [[0.5, 0.5], [0.5, 0.5]] + assert_allclose(a3d.std(axis=2, where=_wh_partial), + np.array(_res)) + assert_allclose(np.std(a3d, axis=2, where=_wh_partial), + np.array(_res)) + + assert_allclose(a.std(axis=1, where=whf), + np.std(a[whf].reshape((5, 3)), axis=1)) + assert_allclose(np.std(a, axis=1, where=whf), + (a[whf].reshape((5, 3))).std(axis=1)) + assert_allclose(a.std(axis=0, where=whp), + np.std(a[whp[:, 0]], axis=0)) + assert_allclose(np.std(a, axis=0, where=whp), + (a[whp[:, 0]]).std(axis=0)) + with pytest.warns(RuntimeWarning) as w: + assert_equal(a.std(where=False), np.nan) + with pytest.warns(RuntimeWarning) as w: + assert_equal(np.std(a, where=False), np.nan) + + def test_subclass(self): + class TestArray(np.ndarray): + def __new__(cls, data, info): + result = np.array(data) + result = result.view(cls) + result.info = info + return result + + def __array_finalize__(self, obj): + self.info = getattr(obj, "info", '') + + dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba') + res = dat.mean(1) + assert_(res.info == dat.info) + res = dat.std(1) + assert_(res.info == dat.info) + res = dat.var(1) + assert_(res.info == dat.info) + + +class TestVdot: + def test_basic(self): + dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger'] + dt_complex = np.typecodes['Complex'] + + # test real + a = np.eye(3) + for dt in dt_numeric + 'O': + b = a.astype(dt) + res = np.vdot(b, b) + assert_(np.isscalar(res)) + assert_equal(np.vdot(b, b), 3) + + # test complex + a = np.eye(3) * 1j + for dt in dt_complex + 'O': + b = a.astype(dt) + res = np.vdot(b, b) + assert_(np.isscalar(res)) + assert_equal(np.vdot(b, b), 3) + + # test boolean + b = np.eye(3, dtype=bool) + res = np.vdot(b, b) + assert_(np.isscalar(res)) + assert_equal(np.vdot(b, b), True) + + def test_vdot_array_order(self): + a = np.array([[1, 2], [3, 4]], order='C') + b = np.array([[1, 2], [3, 4]], order='F') + res = np.vdot(a, a) + + # integer arrays are exact + assert_equal(np.vdot(a, b), res) + assert_equal(np.vdot(b, a), res) + assert_equal(np.vdot(b, b), res) + + def test_vdot_uncontiguous(self): + for size in [2, 1000]: + # Different sizes match different branches in vdot. + a = np.zeros((size, 2, 2)) + b = np.zeros((size, 2, 2)) + a[:, 0, 0] = np.arange(size) + b[:, 0, 0] = np.arange(size) + 1 + # Make a and b uncontiguous: + a = a[..., 0] + b = b[..., 0] + + assert_equal(np.vdot(a, b), + np.vdot(a.flatten(), b.flatten())) + assert_equal(np.vdot(a, b.copy()), + np.vdot(a.flatten(), b.flatten())) + assert_equal(np.vdot(a.copy(), b), + np.vdot(a.flatten(), b.flatten())) + assert_equal(np.vdot(a.copy('F'), b), + np.vdot(a.flatten(), b.flatten())) + assert_equal(np.vdot(a, b.copy('F')), + np.vdot(a.flatten(), b.flatten())) + + +class TestDot: + def setup_method(self): + np.random.seed(128) + self.A = np.random.rand(4, 2) + self.b1 = np.random.rand(2, 1) + self.b2 = np.random.rand(2) + self.b3 = np.random.rand(1, 2) + self.b4 = np.random.rand(4) + self.N = 7 + + def test_dotmatmat(self): + A = self.A + res = np.dot(A.transpose(), A) + tgt = np.array([[1.45046013, 0.86323640], + [0.86323640, 0.84934569]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotmatvec(self): + A, b1 = self.A, self.b1 + res = np.dot(A, b1) + tgt = np.array([[0.32114320], [0.04889721], + [0.15696029], [0.33612621]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotmatvec2(self): + A, b2 = self.A, self.b2 + res = np.dot(A, b2) + tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecmat(self): + A, b4 = self.A, self.b4 + res = np.dot(b4, A) + tgt = np.array([1.23495091, 1.12222648]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecmat2(self): + b3, A = self.b3, self.A + res = np.dot(b3, A.transpose()) + tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecmat3(self): + A, b4 = self.A, self.b4 + res = np.dot(A.transpose(), b4) + tgt = np.array([1.23495091, 1.12222648]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecvecouter(self): + b1, b3 = self.b1, self.b3 + res = np.dot(b1, b3) + tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecvecinner(self): + b1, b3 = self.b1, self.b3 + res = np.dot(b3, b1) + tgt = np.array([[0.23129668]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotcolumnvect1(self): + b1 = np.ones((3, 1)) + b2 = [5.3] + res = np.dot(b1, b2) + tgt = np.array([5.3, 5.3, 5.3]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotcolumnvect2(self): + b1 = np.ones((3, 1)).transpose() + b2 = [6.2] + res = np.dot(b2, b1) + tgt = np.array([6.2, 6.2, 6.2]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecscalar(self): + np.random.seed(100) + b1 = np.random.rand(1, 1) + b2 = np.random.rand(1, 4) + res = np.dot(b1, b2) + tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_dotvecscalar2(self): + np.random.seed(100) + b1 = np.random.rand(4, 1) + b2 = np.random.rand(1, 1) + res = np.dot(b1, b2) + tgt = np.array([[0.00256425], [0.00131359], [0.00200324], [0.00398638]]) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_all(self): + dims = [(), (1,), (1, 1)] + dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)] + for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)): + b1 = np.zeros(dim1) + b2 = np.zeros(dim2) + res = np.dot(b1, b2) + tgt = np.zeros(dim) + assert_(res.shape == tgt.shape) + assert_almost_equal(res, tgt, decimal=self.N) + + def test_vecobject(self): + class Vec: + def __init__(self, sequence=None): + if sequence is None: + sequence = [] + self.array = np.array(sequence) + + def __add__(self, other): + out = Vec() + out.array = self.array + other.array + return out + + def __sub__(self, other): + out = Vec() + out.array = self.array - other.array + return out + + def __mul__(self, other): # with scalar + out = Vec(self.array.copy()) + out.array *= other + return out + + def __rmul__(self, other): + return self * other + + U_non_cont = np.transpose([[1., 1.], [1., 2.]]) + U_cont = np.ascontiguousarray(U_non_cont) + x = np.array([Vec([1., 0.]), Vec([0., 1.])]) + zeros = np.array([Vec([0., 0.]), Vec([0., 0.])]) + zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x) + assert_equal(zeros[0].array, zeros_test[0].array) + assert_equal(zeros[1].array, zeros_test[1].array) + + def test_dot_2args(self): + + a = np.array([[1, 2], [3, 4]], dtype=float) + b = np.array([[1, 0], [1, 1]], dtype=float) + c = np.array([[3, 2], [7, 4]], dtype=float) + + d = dot(a, b) + assert_allclose(c, d) + + def test_dot_3args(self): + + np.random.seed(22) + f = np.random.random_sample((1024, 16)) + v = np.random.random_sample((16, 32)) + + r = np.empty((1024, 32)) + if HAS_REFCOUNT: + orig_refcount = sys.getrefcount(r) + for i in range(12): + dot(f, v, r) + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(r), orig_refcount) + r2 = dot(f, v, out=None) + assert_array_equal(r2, r) + assert_(r is dot(f, v, out=r)) + + v = v[:, 0].copy() # v.shape == (16,) + r = r[:, 0].copy() # r.shape == (1024,) + r2 = dot(f, v) + assert_(r is dot(f, v, r)) + assert_array_equal(r2, r) + + def test_dot_3args_errors(self): + + np.random.seed(22) + f = np.random.random_sample((1024, 16)) + v = np.random.random_sample((16, 32)) + + r = np.empty((1024, 31)) + assert_raises(ValueError, dot, f, v, r) + + r = np.empty((1024,)) + assert_raises(ValueError, dot, f, v, r) + + r = np.empty((32,)) + assert_raises(ValueError, dot, f, v, r) + + r = np.empty((32, 1024)) + assert_raises(ValueError, dot, f, v, r) + assert_raises(ValueError, dot, f, v, r.T) + + r = np.empty((1024, 64)) + assert_raises(ValueError, dot, f, v, r[:, ::2]) + assert_raises(ValueError, dot, f, v, r[:, :32]) + + r = np.empty((1024, 32), dtype=np.float32) + assert_raises(ValueError, dot, f, v, r) + + r = np.empty((1024, 32), dtype=int) + assert_raises(ValueError, dot, f, v, r) + + def test_dot_out_result(self): + x = np.ones((), dtype=np.float16) + y = np.ones((5,), dtype=np.float16) + z = np.zeros((5,), dtype=np.float16) + res = x.dot(y, out=z) + assert np.array_equal(res, y) + assert np.array_equal(z, y) + + def test_dot_out_aliasing(self): + x = np.ones((), dtype=np.float16) + y = np.ones((5,), dtype=np.float16) + z = np.zeros((5,), dtype=np.float16) + res = x.dot(y, out=z) + z[0] = 2 + assert np.array_equal(res, z) + + def test_dot_array_order(self): + a = np.array([[1, 2], [3, 4]], order='C') + b = np.array([[1, 2], [3, 4]], order='F') + res = np.dot(a, a) + + # integer arrays are exact + assert_equal(np.dot(a, b), res) + assert_equal(np.dot(b, a), res) + assert_equal(np.dot(b, b), res) + + def test_accelerate_framework_sgemv_fix(self): + + def aligned_array(shape, align, dtype, order='C'): + d = dtype(0) + N = np.prod(shape) + tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8) + address = tmp.__array_interface__["data"][0] + for offset in range(align): + if (address + offset) % align == 0: + break + tmp = tmp[offset:offset + N * d.nbytes].view(dtype=dtype) + return tmp.reshape(shape, order=order) + + def as_aligned(arr, align, dtype, order='C'): + aligned = aligned_array(arr.shape, align, dtype, order) + aligned[:] = arr[:] + return aligned + + def assert_dot_close(A, X, desired): + assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7) + + m = aligned_array(100, 15, np.float32) + s = aligned_array((100, 100), 15, np.float32) + np.dot(s, m) # this will always segfault if the bug is present + + testdata = itertools.product((15, 32), (10000,), (200, 89), ('C', 'F')) + for align, m, n, a_order in testdata: + # Calculation in double precision + A_d = np.random.rand(m, n) + X_d = np.random.rand(n) + desired = np.dot(A_d, X_d) + # Calculation with aligned single precision + A_f = as_aligned(A_d, align, np.float32, order=a_order) + X_f = as_aligned(X_d, align, np.float32) + assert_dot_close(A_f, X_f, desired) + # Strided A rows + A_d_2 = A_d[::2] + desired = np.dot(A_d_2, X_d) + A_f_2 = A_f[::2] + assert_dot_close(A_f_2, X_f, desired) + # Strided A columns, strided X vector + A_d_22 = A_d_2[:, ::2] + X_d_2 = X_d[::2] + desired = np.dot(A_d_22, X_d_2) + A_f_22 = A_f_2[:, ::2] + X_f_2 = X_f[::2] + assert_dot_close(A_f_22, X_f_2, desired) + # Check the strides are as expected + if a_order == 'F': + assert_equal(A_f_22.strides, (8, 8 * m)) + else: + assert_equal(A_f_22.strides, (8 * n, 8)) + assert_equal(X_f_2.strides, (8,)) + # Strides in A rows + cols only + X_f_2c = as_aligned(X_f_2, align, np.float32) + assert_dot_close(A_f_22, X_f_2c, desired) + # Strides just in A cols + A_d_12 = A_d[:, ::2] + desired = np.dot(A_d_12, X_d_2) + A_f_12 = A_f[:, ::2] + assert_dot_close(A_f_12, X_f_2c, desired) + # Strides in A cols and X + assert_dot_close(A_f_12, X_f_2, desired) + + @pytest.mark.slow + @pytest.mark.parametrize("dtype", [np.float64, np.complex128]) + @requires_memory(free_bytes=18e9) # complex case needs 18GiB+ + def test_huge_vectordot(self, dtype): + # Large vector multiplications are chunked with 32bit BLAS + # Test that the chunking does the right thing, see also gh-22262 + data = np.ones(2**30 + 100, dtype=dtype) + res = np.dot(data, data) + assert res == 2**30 + 100 + + def test_dtype_discovery_fails(self): + # See gh-14247, error checking was missing for failed dtype discovery + class BadObject: + def __array__(self, dtype=None, copy=None): + raise TypeError("just this tiny mint leaf") + + with pytest.raises(TypeError): + np.dot(BadObject(), BadObject()) + + with pytest.raises(TypeError): + np.dot(3.0, BadObject()) + + +class MatmulCommon: + """Common tests for '@' operator and numpy.matmul. + + """ + # Should work with these types. Will want to add + # "O" at some point + types = "?bhilqBHILQefdgFDGO" + + def test_exceptions(self): + dims = [ + ((1,), (2,)), # mismatched vector vector + ((2, 1,), (2,)), # mismatched matrix vector + ((2,), (1, 2)), # mismatched vector matrix + ((1, 2), (3, 1)), # mismatched matrix matrix + ((1,), ()), # vector scalar + ((), (1)), # scalar vector + ((1, 1), ()), # matrix scalar + ((), (1, 1)), # scalar matrix + ((2, 2, 1), (3, 1, 2)), # cannot broadcast + ] + + for dt, (dm1, dm2) in itertools.product(self.types, dims): + a = np.ones(dm1, dtype=dt) + b = np.ones(dm2, dtype=dt) + assert_raises(ValueError, self.matmul, a, b) + + def test_shapes(self): + dims = [ + ((1, 1), (2, 1, 1)), # broadcast first argument + ((2, 1, 1), (1, 1)), # broadcast second argument + ((2, 1, 1), (2, 1, 1)), # matrix stack sizes match + ] + + for dt, (dm1, dm2) in itertools.product(self.types, dims): + a = np.ones(dm1, dtype=dt) + b = np.ones(dm2, dtype=dt) + res = self.matmul(a, b) + assert_(res.shape == (2, 1, 1)) + + # vector vector returns scalars. + for dt in self.types: + a = np.ones((2,), dtype=dt) + b = np.ones((2,), dtype=dt) + c = self.matmul(a, b) + assert_(np.array(c).shape == ()) + + def test_result_types(self): + mat = np.ones((1, 1)) + vec = np.ones((1,)) + for dt in self.types: + m = mat.astype(dt) + v = vec.astype(dt) + for arg in [(m, v), (v, m), (m, m)]: + res = self.matmul(*arg) + assert_(res.dtype == dt) + + # vector vector returns scalars + if dt != "O": + res = self.matmul(v, v) + assert_(type(res) is np.dtype(dt).type) + + def test_scalar_output(self): + vec1 = np.array([2]) + vec2 = np.array([3, 4]).reshape(1, -1) + tgt = np.array([6, 8]) + for dt in self.types[1:]: + v1 = vec1.astype(dt) + v2 = vec2.astype(dt) + res = self.matmul(v1, v2) + assert_equal(res, tgt) + res = self.matmul(v2.T, v1) + assert_equal(res, tgt) + + # boolean type + vec = np.array([True, True], dtype='?').reshape(1, -1) + res = self.matmul(vec[:, 0], vec) + assert_equal(res, True) + + def test_vector_vector_values(self): + vec1 = np.array([1, 2]) + vec2 = np.array([3, 4]).reshape(-1, 1) + tgt1 = np.array([11]) + tgt2 = np.array([[3, 6], [4, 8]]) + for dt in self.types[1:]: + v1 = vec1.astype(dt) + v2 = vec2.astype(dt) + res = self.matmul(v1, v2) + assert_equal(res, tgt1) + # no broadcast, we must make v1 into a 2d ndarray + res = self.matmul(v2, v1.reshape(1, -1)) + assert_equal(res, tgt2) + + # boolean type + vec = np.array([True, True], dtype='?') + res = self.matmul(vec, vec) + assert_equal(res, True) + + def test_vector_matrix_values(self): + vec = np.array([1, 2]) + mat1 = np.array([[1, 2], [3, 4]]) + mat2 = np.stack([mat1] * 2, axis=0) + tgt1 = np.array([7, 10]) + tgt2 = np.stack([tgt1] * 2, axis=0) + for dt in self.types[1:]: + v = vec.astype(dt) + m1 = mat1.astype(dt) + m2 = mat2.astype(dt) + res = self.matmul(v, m1) + assert_equal(res, tgt1) + res = self.matmul(v, m2) + assert_equal(res, tgt2) + + # boolean type + vec = np.array([True, False]) + mat1 = np.array([[True, False], [False, True]]) + mat2 = np.stack([mat1] * 2, axis=0) + tgt1 = np.array([True, False]) + tgt2 = np.stack([tgt1] * 2, axis=0) + + res = self.matmul(vec, mat1) + assert_equal(res, tgt1) + res = self.matmul(vec, mat2) + assert_equal(res, tgt2) + + def test_matrix_vector_values(self): + vec = np.array([1, 2]) + mat1 = np.array([[1, 2], [3, 4]]) + mat2 = np.stack([mat1] * 2, axis=0) + tgt1 = np.array([5, 11]) + tgt2 = np.stack([tgt1] * 2, axis=0) + for dt in self.types[1:]: + v = vec.astype(dt) + m1 = mat1.astype(dt) + m2 = mat2.astype(dt) + res = self.matmul(m1, v) + assert_equal(res, tgt1) + res = self.matmul(m2, v) + assert_equal(res, tgt2) + + # boolean type + vec = np.array([True, False]) + mat1 = np.array([[True, False], [False, True]]) + mat2 = np.stack([mat1] * 2, axis=0) + tgt1 = np.array([True, False]) + tgt2 = np.stack([tgt1] * 2, axis=0) + + res = self.matmul(vec, mat1) + assert_equal(res, tgt1) + res = self.matmul(vec, mat2) + assert_equal(res, tgt2) + + def test_matrix_matrix_values(self): + mat1 = np.array([[1, 2], [3, 4]]) + mat2 = np.array([[1, 0], [1, 1]]) + mat12 = np.stack([mat1, mat2], axis=0) + mat21 = np.stack([mat2, mat1], axis=0) + tgt11 = np.array([[7, 10], [15, 22]]) + tgt12 = np.array([[3, 2], [7, 4]]) + tgt21 = np.array([[1, 2], [4, 6]]) + tgt12_21 = np.stack([tgt12, tgt21], axis=0) + tgt11_12 = np.stack((tgt11, tgt12), axis=0) + tgt11_21 = np.stack((tgt11, tgt21), axis=0) + for dt in self.types[1:]: + m1 = mat1.astype(dt) + m2 = mat2.astype(dt) + m12 = mat12.astype(dt) + m21 = mat21.astype(dt) + + # matrix @ matrix + res = self.matmul(m1, m2) + assert_equal(res, tgt12) + res = self.matmul(m2, m1) + assert_equal(res, tgt21) + + # stacked @ matrix + res = self.matmul(m12, m1) + assert_equal(res, tgt11_21) + + # matrix @ stacked + res = self.matmul(m1, m12) + assert_equal(res, tgt11_12) + + # stacked @ stacked + res = self.matmul(m12, m21) + assert_equal(res, tgt12_21) + + # boolean type + m1 = np.array([[1, 1], [0, 0]], dtype=np.bool) + m2 = np.array([[1, 0], [1, 1]], dtype=np.bool) + m12 = np.stack([m1, m2], axis=0) + m21 = np.stack([m2, m1], axis=0) + tgt11 = m1 + tgt12 = m1 + tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool) + tgt12_21 = np.stack([tgt12, tgt21], axis=0) + tgt11_12 = np.stack((tgt11, tgt12), axis=0) + tgt11_21 = np.stack((tgt11, tgt21), axis=0) + + # matrix @ matrix + res = self.matmul(m1, m2) + assert_equal(res, tgt12) + res = self.matmul(m2, m1) + assert_equal(res, tgt21) + + # stacked @ matrix + res = self.matmul(m12, m1) + assert_equal(res, tgt11_21) + + # matrix @ stacked + res = self.matmul(m1, m12) + assert_equal(res, tgt11_12) + + # stacked @ stacked + res = self.matmul(m12, m21) + assert_equal(res, tgt12_21) + + +class TestMatmul(MatmulCommon): + matmul = np.matmul + + def test_out_arg(self): + a = np.ones((5, 2), dtype=float) + b = np.array([[1, 3], [5, 7]], dtype=float) + tgt = np.dot(a, b) + + # test as positional argument + msg = "out positional argument" + out = np.zeros((5, 2), dtype=float) + self.matmul(a, b, out) + assert_array_equal(out, tgt, err_msg=msg) + + # test as keyword argument + msg = "out keyword argument" + out = np.zeros((5, 2), dtype=float) + self.matmul(a, b, out=out) + assert_array_equal(out, tgt, err_msg=msg) + + # test out with not allowed type cast (safe casting) + msg = "Cannot cast ufunc .* output" + out = np.zeros((5, 2), dtype=np.int32) + assert_raises_regex(TypeError, msg, self.matmul, a, b, out=out) + + # test out with type upcast to complex + out = np.zeros((5, 2), dtype=np.complex128) + c = self.matmul(a, b, out=out) + assert_(c is out) + with suppress_warnings() as sup: + sup.filter(ComplexWarning, '') + c = c.astype(tgt.dtype) + assert_array_equal(c, tgt) + + def test_empty_out(self): + # Check that the output cannot be broadcast, so that it cannot be + # size zero when the outer dimensions (iterator size) has size zero. + arr = np.ones((0, 1, 1)) + out = np.ones((1, 1, 1)) + assert self.matmul(arr, arr).shape == (0, 1, 1) + + with pytest.raises(ValueError, match=r"non-broadcastable"): + self.matmul(arr, arr, out=out) + + def test_out_contiguous(self): + a = np.ones((5, 2), dtype=float) + b = np.array([[1, 3], [5, 7]], dtype=float) + v = np.array([1, 3], dtype=float) + tgt = np.dot(a, b) + tgt_mv = np.dot(a, v) + + # test out non-contiguous + out = np.ones((5, 2, 2), dtype=float) + c = self.matmul(a, b, out=out[..., 0]) + assert c.base is out + assert_array_equal(c, tgt) + c = self.matmul(a, v, out=out[:, 0, 0]) + assert_array_equal(c, tgt_mv) + c = self.matmul(v, a.T, out=out[:, 0, 0]) + assert_array_equal(c, tgt_mv) + + # test out contiguous in only last dim + out = np.ones((10, 2), dtype=float) + c = self.matmul(a, b, out=out[::2, :]) + assert_array_equal(c, tgt) + + # test transposes of out, args + out = np.ones((5, 2), dtype=float) + c = self.matmul(b.T, a.T, out=out.T) + assert_array_equal(out, tgt) + + m1 = np.arange(15.).reshape(5, 3) + m2 = np.arange(21.).reshape(3, 7) + m3 = np.arange(30.).reshape(5, 6)[:, ::2] # non-contiguous + vc = np.arange(10.) + vr = np.arange(6.) + m0 = np.zeros((3, 0)) + + @pytest.mark.parametrize('args', ( + # matrix-matrix + (m1, m2), (m2.T, m1.T), (m2.T.copy(), m1.T), (m2.T, m1.T.copy()), + # matrix-matrix-transpose, contiguous and non + (m1, m1.T), (m1.T, m1), (m1, m3.T), (m3, m1.T), + (m3, m3.T), (m3.T, m3), + # matrix-matrix non-contiguous + (m3, m2), (m2.T, m3.T), (m2.T.copy(), m3.T), + # vector-matrix, matrix-vector, contiguous + (m1, vr[:3]), (vc[:5], m1), (m1.T, vc[:5]), (vr[:3], m1.T), + # vector-matrix, matrix-vector, vector non-contiguous + (m1, vr[::2]), (vc[::2], m1), (m1.T, vc[::2]), (vr[::2], m1.T), + # vector-matrix, matrix-vector, matrix non-contiguous + (m3, vr[:3]), (vc[:5], m3), (m3.T, vc[:5]), (vr[:3], m3.T), + # vector-matrix, matrix-vector, both non-contiguous + (m3, vr[::2]), (vc[::2], m3), (m3.T, vc[::2]), (vr[::2], m3.T), + # size == 0 + (m0, m0.T), (m0.T, m0), (m1, m0), (m0.T, m1.T), + )) + def test_dot_equivalent(self, args): + r1 = np.matmul(*args) + r2 = np.dot(*args) + assert_equal(r1, r2) + + r3 = np.matmul(args[0].copy(), args[1].copy()) + assert_equal(r1, r3) + + # matrix matrix, issue 29164 + if [len(args[0].shape), len(args[1].shape)] == [2, 2]: + out_f = np.zeros((r2.shape[0] * 2, r2.shape[1] * 2), order='F') + r4 = np.matmul(*args, out=out_f[::2, ::2]) + assert_equal(r2, r4) + + def test_matmul_object(self): + import fractions + + f = np.vectorize(fractions.Fraction) + + def random_ints(): + return np.random.randint(1, 1000, size=(10, 3, 3)) + M1 = f(random_ints(), random_ints()) + M2 = f(random_ints(), random_ints()) + + M3 = self.matmul(M1, M2) + + [N1, N2, N3] = [a.astype(float) for a in [M1, M2, M3]] + + assert_allclose(N3, self.matmul(N1, N2)) + + def test_matmul_object_type_scalar(self): + from fractions import Fraction as F + v = np.array([F(2, 3), F(5, 7)]) + res = self.matmul(v, v) + assert_(type(res) is F) + + def test_matmul_empty(self): + a = np.empty((3, 0), dtype=object) + b = np.empty((0, 3), dtype=object) + c = np.zeros((3, 3)) + assert_array_equal(np.matmul(a, b), c) + + def test_matmul_exception_multiply(self): + # test that matmul fails if `__mul__` is missing + class add_not_multiply: + def __add__(self, other): + return self + a = np.full((3, 3), add_not_multiply()) + with assert_raises(TypeError): + b = np.matmul(a, a) + + def test_matmul_exception_add(self): + # test that matmul fails if `__add__` is missing + class multiply_not_add: + def __mul__(self, other): + return self + a = np.full((3, 3), multiply_not_add()) + with assert_raises(TypeError): + b = np.matmul(a, a) + + def test_matmul_bool(self): + # gh-14439 + a = np.array([[1, 0], [1, 1]], dtype=bool) + assert np.max(a.view(np.uint8)) == 1 + b = np.matmul(a, a) + # matmul with boolean output should always be 0, 1 + assert np.max(b.view(np.uint8)) == 1 + + rg = np.random.default_rng(np.random.PCG64(43)) + d = rg.integers(2, size=4 * 5, dtype=np.int8) + d = d.reshape(4, 5) > 0 + out1 = np.matmul(d, d.reshape(5, 4)) + out2 = np.dot(d, d.reshape(5, 4)) + assert_equal(out1, out2) + + c = np.matmul(np.zeros((2, 0), dtype=bool), np.zeros(0, dtype=bool)) + assert not np.any(c) + + +class TestMatmulOperator(MatmulCommon): + import operator + matmul = operator.matmul + + def test_array_priority_override(self): + + class A: + __array_priority__ = 1000 + + def __matmul__(self, other): + return "A" + + def __rmatmul__(self, other): + return "A" + + a = A() + b = np.ones(2) + assert_equal(self.matmul(a, b), "A") + assert_equal(self.matmul(b, a), "A") + + def test_matmul_raises(self): + assert_raises(TypeError, self.matmul, np.int8(5), np.int8(5)) + assert_raises(TypeError, self.matmul, np.void(b'abc'), np.void(b'abc')) + assert_raises(TypeError, self.matmul, np.arange(10), np.void(b'abc')) + + +class TestMatmulInplace: + DTYPES = {} + for i in MatmulCommon.types: + for j in MatmulCommon.types: + if np.can_cast(j, i): + DTYPES[f"{i}-{j}"] = (np.dtype(i), np.dtype(j)) + + @pytest.mark.parametrize("dtype1,dtype2", DTYPES.values(), ids=DTYPES) + def test_basic(self, dtype1: np.dtype, dtype2: np.dtype) -> None: + a = np.arange(10).reshape(5, 2).astype(dtype1) + a_id = id(a) + b = np.ones((2, 2), dtype=dtype2) + + ref = a @ b + a @= b + + assert id(a) == a_id + assert a.dtype == dtype1 + assert a.shape == (5, 2) + if dtype1.kind in "fc": + np.testing.assert_allclose(a, ref) + else: + np.testing.assert_array_equal(a, ref) + + SHAPES = { + "2d_large": ((10**5, 10), (10, 10)), + "3d_large": ((10**4, 10, 10), (1, 10, 10)), + "1d": ((3,), (3,)), + "2d_1d": ((3, 3), (3,)), + "1d_2d": ((3,), (3, 3)), + "2d_broadcast": ((3, 3), (3, 1)), + "2d_broadcast_reverse": ((1, 3), (3, 3)), + "3d_broadcast1": ((3, 3, 3), (1, 3, 1)), + "3d_broadcast2": ((3, 3, 3), (1, 3, 3)), + "3d_broadcast3": ((3, 3, 3), (3, 3, 1)), + "3d_broadcast_reverse1": ((1, 3, 3), (3, 3, 3)), + "3d_broadcast_reverse2": ((3, 1, 3), (3, 3, 3)), + "3d_broadcast_reverse3": ((1, 1, 3), (3, 3, 3)), + } + + @pytest.mark.parametrize("a_shape,b_shape", SHAPES.values(), ids=SHAPES) + def test_shapes(self, a_shape: tuple[int, ...], b_shape: tuple[int, ...]): + a_size = np.prod(a_shape) + a = np.arange(a_size).reshape(a_shape).astype(np.float64) + a_id = id(a) + + b_size = np.prod(b_shape) + b = np.arange(b_size).reshape(b_shape) + + ref = a @ b + if ref.shape != a_shape: + with pytest.raises(ValueError): + a @= b + return + else: + a @= b + + assert id(a) == a_id + assert a.dtype.type == np.float64 + assert a.shape == a_shape + np.testing.assert_allclose(a, ref) + + +def test_matmul_axes(): + a = np.arange(3 * 4 * 5).reshape(3, 4, 5) + c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)]) + assert c.shape == (3, 4, 4) + d = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (0, 1)]) + assert d.shape == (4, 4, 3) + e = np.swapaxes(d, 0, 2) + assert_array_equal(e, c) + f = np.matmul(a, np.arange(3), axes=[(1, 0), (0), (0)]) + assert f.shape == (4, 5) + + +class TestInner: + + def test_inner_type_mismatch(self): + c = 1. + A = np.array((1, 1), dtype='i,i') + + assert_raises(TypeError, np.inner, c, A) + assert_raises(TypeError, np.inner, A, c) + + def test_inner_scalar_and_vector(self): + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + sca = np.array(3, dtype=dt)[()] + vec = np.array([1, 2], dtype=dt) + desired = np.array([3, 6], dtype=dt) + assert_equal(np.inner(vec, sca), desired) + assert_equal(np.inner(sca, vec), desired) + + def test_vecself(self): + # Ticket 844. + # Inner product of a vector with itself segfaults or give + # meaningless result + a = np.zeros(shape=(1, 80), dtype=np.float64) + p = np.inner(a, a) + assert_almost_equal(p, 0, decimal=14) + + def test_inner_product_with_various_contiguities(self): + # github issue 6532 + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + # check an inner product involving a matrix transpose + A = np.array([[1, 2], [3, 4]], dtype=dt) + B = np.array([[1, 3], [2, 4]], dtype=dt) + C = np.array([1, 1], dtype=dt) + desired = np.array([4, 6], dtype=dt) + assert_equal(np.inner(A.T, C), desired) + assert_equal(np.inner(C, A.T), desired) + assert_equal(np.inner(B, C), desired) + assert_equal(np.inner(C, B), desired) + # check a matrix product + desired = np.array([[7, 10], [15, 22]], dtype=dt) + assert_equal(np.inner(A, B), desired) + # check the syrk vs. gemm paths + desired = np.array([[5, 11], [11, 25]], dtype=dt) + assert_equal(np.inner(A, A), desired) + assert_equal(np.inner(A, A.copy()), desired) + # check an inner product involving an aliased and reversed view + a = np.arange(5).astype(dt) + b = a[::-1] + desired = np.array(10, dtype=dt).item() + assert_equal(np.inner(b, a), desired) + + def test_3d_tensor(self): + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + a = np.arange(24).reshape(2, 3, 4).astype(dt) + b = np.arange(24, 48).reshape(2, 3, 4).astype(dt) + desired = np.array( + [[[[ 158, 182, 206], + [ 230, 254, 278]], + + [[ 566, 654, 742], + [ 830, 918, 1006]], + + [[ 974, 1126, 1278], + [1430, 1582, 1734]]], + + [[[1382, 1598, 1814], + [2030, 2246, 2462]], + + [[1790, 2070, 2350], + [2630, 2910, 3190]], + + [[2198, 2542, 2886], + [3230, 3574, 3918]]]] + ).astype(dt) + assert_equal(np.inner(a, b), desired) + assert_equal(np.inner(b, a).transpose(2, 3, 0, 1), desired) + + +class TestChoose: + def setup_method(self): + self.x = 2 * np.ones((3,), dtype=int) + self.y = 3 * np.ones((3,), dtype=int) + self.x2 = 2 * np.ones((2, 3), dtype=int) + self.y2 = 3 * np.ones((2, 3), dtype=int) + self.ind = [0, 0, 1] + + def test_basic(self): + A = np.choose(self.ind, (self.x, self.y)) + assert_equal(A, [2, 2, 3]) + + def test_broadcast1(self): + A = np.choose(self.ind, (self.x2, self.y2)) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) + + def test_broadcast2(self): + A = np.choose(self.ind, (self.x, self.y2)) + assert_equal(A, [[2, 2, 3], [2, 2, 3]]) + + @pytest.mark.parametrize("ops", + [(1000, np.array([1], dtype=np.uint8)), + (-1, np.array([1], dtype=np.uint8)), + (1., np.float32(3)), + (1., np.array([3], dtype=np.float32))],) + def test_output_dtype(self, ops): + expected_dt = np.result_type(*ops) + assert np.choose([0], ops).dtype == expected_dt + + def test_dimension_and_args_limit(self): + # Maxdims for the legacy iterator is 32, but the maximum number + # of arguments is actually larger (a itself also counts here) + a = np.ones((1,) * 32, dtype=np.intp) + res = a.choose([0, a] + [2] * 61) + with pytest.raises(ValueError, + match="Need at least 0 and at most 64 array objects"): + a.choose([0, a] + [2] * 62) + + assert_array_equal(res, a) + # Choose is unfortunately limited to 32 dims as of NumPy 2.0 + a = np.ones((1,) * 60, dtype=np.intp) + with pytest.raises(RuntimeError, + match=".*32 dimensions but the array has 60"): + a.choose([a, a]) + + +class TestRepeat: + def setup_method(self): + self.m = np.array([1, 2, 3, 4, 5, 6]) + self.m_rect = self.m.reshape((2, 3)) + + def test_basic(self): + A = np.repeat(self.m, [1, 3, 2, 1, 1, 2]) + assert_equal(A, [1, 2, 2, 2, 3, + 3, 4, 5, 6, 6]) + + def test_broadcast1(self): + A = np.repeat(self.m, 2) + assert_equal(A, [1, 1, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6]) + + def test_axis_spec(self): + A = np.repeat(self.m_rect, [2, 1], axis=0) + assert_equal(A, [[1, 2, 3], + [1, 2, 3], + [4, 5, 6]]) + + A = np.repeat(self.m_rect, [1, 3, 2], axis=1) + assert_equal(A, [[1, 2, 2, 2, 3, 3], + [4, 5, 5, 5, 6, 6]]) + + def test_broadcast2(self): + A = np.repeat(self.m_rect, 2, axis=0) + assert_equal(A, [[1, 2, 3], + [1, 2, 3], + [4, 5, 6], + [4, 5, 6]]) + + A = np.repeat(self.m_rect, 2, axis=1) + assert_equal(A, [[1, 1, 2, 2, 3, 3], + [4, 4, 5, 5, 6, 6]]) + + +# TODO: test for multidimensional +NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4} + + +@pytest.mark.parametrize('dt', [float, Decimal], ids=['float', 'object']) +class TestNeighborhoodIter: + # Simple, 2d tests + def test_simple2d(self, dt): + # Test zero and one padding for simple data type + x = np.array([[0, 1], [2, 3]], dtype=dt) + r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt), + np.array([[0, 0, 0], [0, 1, 0]], dtype=dt), + np.array([[0, 0, 1], [0, 2, 3]], dtype=dt), + np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], x[0], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt), + np.array([[1, 1, 1], [0, 1, 1]], dtype=dt), + np.array([[1, 0, 1], [1, 2, 3]], dtype=dt), + np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], x[0], NEIGH_MODE['one']) + assert_array_equal(l, r) + + r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt), + np.array([[4, 4, 4], [0, 1, 4]], dtype=dt), + np.array([[4, 0, 1], [4, 2, 3]], dtype=dt), + np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant']) + assert_array_equal(l, r) + + # Test with start in the middle + r = [np.array([[4, 0, 1], [4, 2, 3]], dtype=dt), + np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant'], 2) + assert_array_equal(l, r) + + def test_mirror2d(self, dt): + x = np.array([[0, 1], [2, 3]], dtype=dt) + r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt), + np.array([[0, 1, 1], [0, 1, 1]], dtype=dt), + np.array([[0, 0, 1], [2, 2, 3]], dtype=dt), + np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 0, -1, 1], x[0], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Simple, 1d tests + def test_simple(self, dt): + # Test padding with constant values + x = np.linspace(1, 5, 5).astype(dt) + r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 1], x[0], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 1], x[0], NEIGH_MODE['one']) + assert_array_equal(l, r) + + r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]] + l = _multiarray_tests.test_neighborhood_iterator( + x, [-1, 1], x[4], NEIGH_MODE['constant']) + assert_array_equal(l, r) + + # Test mirror modes + def test_mirror(self, dt): + x = np.linspace(1, 5, 5).astype(dt) + r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5], + [2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt) + l = _multiarray_tests.test_neighborhood_iterator( + x, [-2, 2], x[1], NEIGH_MODE['mirror']) + assert_([i.dtype == dt for i in l]) + assert_array_equal(l, r) + + # Circular mode + def test_circular(self, dt): + x = np.linspace(1, 5, 5).astype(dt) + r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5], + [2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt) + l = _multiarray_tests.test_neighborhood_iterator( + x, [-2, 2], x[0], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + +# Test stacking neighborhood iterators +class TestStackedNeighborhoodIter: + # Simple, 1d test: stacking 2 constant-padded neigh iterators + def test_simple_const(self): + dt = np.float64 + # Test zero and one padding for simple data type + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0], dtype=dt), + np.array([0], dtype=dt), + np.array([1], dtype=dt), + np.array([2], dtype=dt), + np.array([3], dtype=dt), + np.array([0], dtype=dt), + np.array([0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-2, 4], NEIGH_MODE['zero'], [0, 0], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + r = [np.array([1, 0, 1], dtype=dt), + np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt), + np.array([3, 0, 1], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-1, 1], NEIGH_MODE['one']) + assert_array_equal(l, r) + + # 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and + # mirror padding + def test_simple_mirror(self): + dt = np.float64 + # Stacking zero on top of mirror + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 1, 1], dtype=dt), + np.array([1, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 3], dtype=dt), + np.array([3, 3, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['mirror'], [-1, 1], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 0, 0], dtype=dt), + np.array([0, 0, 1], dtype=dt), + np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 2nd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt), + np.array([3, 0, 0], dtype=dt), + np.array([0, 0, 3], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 3rd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 0, 0, 1, 2], dtype=dt), + np.array([0, 0, 1, 2, 3], dtype=dt), + np.array([0, 1, 2, 3, 0], dtype=dt), + np.array([1, 2, 3, 0, 0], dtype=dt), + np.array([2, 3, 0, 0, 3], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and + # circular padding + def test_simple_circular(self): + dt = np.float64 + # Stacking zero on top of mirror + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 3, 1], dtype=dt), + np.array([3, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 1], dtype=dt), + np.array([3, 1, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['circular'], [-1, 1], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([3, 0, 0], dtype=dt), + np.array([0, 0, 1], dtype=dt), + np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 2nd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([0, 1, 2], dtype=dt), + np.array([1, 2, 3], dtype=dt), + np.array([2, 3, 0], dtype=dt), + np.array([3, 0, 0], dtype=dt), + np.array([0, 0, 1], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero: 3rd + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([3, 0, 0, 1, 2], dtype=dt), + np.array([0, 0, 1, 2, 3], dtype=dt), + np.array([0, 1, 2, 3, 0], dtype=dt), + np.array([1, 2, 3, 0, 0], dtype=dt), + np.array([2, 3, 0, 0, 1], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['circular']) + assert_array_equal(l, r) + + # 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator + # being strictly within the array + def test_simple_strict_within(self): + dt = np.float64 + # Stacking zero on top of zero, first neighborhood strictly inside the + # array + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 2, 3, 0], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['zero']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero, first neighborhood strictly inside the + # array + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 2, 3, 3], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['mirror']) + assert_array_equal(l, r) + + # Stacking mirror on top of zero, first neighborhood strictly inside the + # array + x = np.array([1, 2, 3], dtype=dt) + r = [np.array([1, 2, 3, 1], dtype=dt)] + l = _multiarray_tests.test_neighborhood_iterator_oob( + x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['circular']) + assert_array_equal(l, r) + +class TestWarnings: + + def test_complex_warning(self): + x = np.array([1, 2]) + y = np.array([1 - 2j, 1 + 2j]) + + with warnings.catch_warnings(): + warnings.simplefilter("error", ComplexWarning) + assert_raises(ComplexWarning, x.__setitem__, slice(None), y) + assert_equal(x, [1, 2]) + + +class TestMinScalarType: + + def test_usigned_shortshort(self): + dt = np.min_scalar_type(2**8 - 1) + wanted = np.dtype('uint8') + assert_equal(wanted, dt) + + def test_usigned_short(self): + dt = np.min_scalar_type(2**16 - 1) + wanted = np.dtype('uint16') + assert_equal(wanted, dt) + + def test_usigned_int(self): + dt = np.min_scalar_type(2**32 - 1) + wanted = np.dtype('uint32') + assert_equal(wanted, dt) + + def test_usigned_longlong(self): + dt = np.min_scalar_type(2**63 - 1) + wanted = np.dtype('uint64') + assert_equal(wanted, dt) + + def test_object(self): + dt = np.min_scalar_type(2**64) + wanted = np.dtype('O') + assert_equal(wanted, dt) + + +from numpy._core._internal import _dtype_from_pep3118 + + +class TestPEP3118Dtype: + def _check(self, spec, wanted): + dt = np.dtype(wanted) + actual = _dtype_from_pep3118(spec) + assert_equal(actual, dt, + err_msg=f"spec {spec!r} != dtype {wanted!r}") + + def test_native_padding(self): + align = np.dtype('i').alignment + for j in range(8): + if j == 0: + s = 'bi' + else: + s = 'b%dxi' % j + self._check('@' + s, {'f0': ('i1', 0), + 'f1': ('i', align * (1 + j // align))}) + self._check('=' + s, {'f0': ('i1', 0), + 'f1': ('i', 1 + j)}) + + def test_native_padding_2(self): + # Native padding should work also for structs and sub-arrays + self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)}) + self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)}) + + def test_trailing_padding(self): + # Trailing padding should be included, *and*, the item size + # should match the alignment if in aligned mode + align = np.dtype('i').alignment + size = np.dtype('i').itemsize + + def aligned(n): + return align * (1 + (n - 1) // align) + + base = {"formats": ['i'], "names": ['f0']} + + self._check('ix', dict(itemsize=aligned(size + 1), **base)) + self._check('ixx', dict(itemsize=aligned(size + 2), **base)) + self._check('ixxx', dict(itemsize=aligned(size + 3), **base)) + self._check('ixxxx', dict(itemsize=aligned(size + 4), **base)) + self._check('i7x', dict(itemsize=aligned(size + 7), **base)) + + self._check('^ix', dict(itemsize=size + 1, **base)) + self._check('^ixx', dict(itemsize=size + 2, **base)) + self._check('^ixxx', dict(itemsize=size + 3, **base)) + self._check('^ixxxx', dict(itemsize=size + 4, **base)) + self._check('^i7x', dict(itemsize=size + 7, **base)) + + def test_native_padding_3(self): + dt = np.dtype( + [('a', 'b'), ('b', 'i'), + ('sub', np.dtype('b,i')), ('c', 'i')], + align=True) + self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt) + + dt = np.dtype( + [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'), + ('e', 'b'), ('sub', np.dtype('b,i', align=True))]) + self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt) + + def test_padding_with_array_inside_struct(self): + dt = np.dtype( + [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), + ('d', 'i')], + align=True) + self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt) + + def test_byteorder_inside_struct(self): + # The byte order after @T{=i} should be '=', not '@'. + # Check this by noting the absence of native alignment. + self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0), + 'f1': ('i', 5)}) + + def test_intra_padding(self): + # Natively aligned sub-arrays may require some internal padding + align = np.dtype('i').alignment + size = np.dtype('i').itemsize + + def aligned(n): + return (align * (1 + (n - 1) // align)) + + self._check('(3)T{ix}', ({ + "names": ['f0'], + "formats": ['i'], + "offsets": [0], + "itemsize": aligned(size + 1) + }, (3,))) + + def test_char_vs_string(self): + dt = np.dtype('c') + self._check('c', dt) + + dt = np.dtype([('f0', 'S1', (4,)), ('f1', 'S4')]) + self._check('4c4s', dt) + + def test_field_order(self): + # gh-9053 - previously, we relied on dictionary key order + self._check("(0)I:a:f:b:", [('a', 'I', (0,)), ('b', 'f')]) + self._check("(0)I:b:f:a:", [('b', 'I', (0,)), ('a', 'f')]) + + def test_unnamed_fields(self): + self._check('ii', [('f0', 'i'), ('f1', 'i')]) + self._check('ii:f0:', [('f1', 'i'), ('f0', 'i')]) + + self._check('i', 'i') + self._check('i:f0:', [('f0', 'i')]) + + +class TestNewBufferProtocol: + """ Test PEP3118 buffers """ + + def _check_roundtrip(self, obj): + obj = np.asarray(obj) + x = memoryview(obj) + y = np.asarray(x) + y2 = np.array(x) + assert_(not y.flags.owndata) + assert_(y2.flags.owndata) + + assert_equal(y.dtype, obj.dtype) + assert_equal(y.shape, obj.shape) + assert_array_equal(obj, y) + + assert_equal(y2.dtype, obj.dtype) + assert_equal(y2.shape, obj.shape) + assert_array_equal(obj, y2) + + def test_roundtrip(self): + x = np.array([1, 2, 3, 4, 5], dtype='i4') + self._check_roundtrip(x) + + x = np.array([[1, 2], [3, 4]], dtype=np.float64) + self._check_roundtrip(x) + + x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0, :] + self._check_roundtrip(x) + + dt = [('a', 'b'), + ('b', 'h'), + ('c', 'i'), + ('d', 'l'), + ('dx', 'q'), + ('e', 'B'), + ('f', 'H'), + ('g', 'I'), + ('h', 'L'), + ('hx', 'Q'), + ('i', np.single), + ('j', np.double), + ('k', np.longdouble), + ('ix', np.csingle), + ('jx', np.cdouble), + ('kx', np.clongdouble), + ('l', 'S4'), + ('m', 'U4'), + ('n', 'V3'), + ('o', '?'), + ('p', np.half), + ] + x = np.array( + [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + b'aaaa', 'bbbb', b'xxx', True, 1.0)], + dtype=dt) + self._check_roundtrip(x) + + x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))]) + self._check_roundtrip(x) + + x = np.array([1, 2, 3], dtype='>i2') + self._check_roundtrip(x) + + x = np.array([1, 2, 3], dtype='') + x = np.zeros(4, dtype=dt) + self._check_roundtrip(x) + + def test_roundtrip_scalar(self): + # Issue #4015. + self._check_roundtrip(0) + + def test_invalid_buffer_format(self): + # datetime64 cannot be used fully in a buffer yet + # Should be fixed in the next Numpy major release + dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')]) + a = np.empty(3, dt) + assert_raises((ValueError, BufferError), memoryview, a) + assert_raises((ValueError, BufferError), memoryview, np.array((3), 'M8[D]')) + + def test_export_simple_1d(self): + x = np.array([1, 2, 3, 4, 5], dtype='i') + y = memoryview(x) + assert_equal(y.format, 'i') + assert_equal(y.shape, (5,)) + assert_equal(y.ndim, 1) + assert_equal(y.strides, (4,)) + assert_equal(y.suboffsets, ()) + assert_equal(y.itemsize, 4) + + def test_export_simple_nd(self): + x = np.array([[1, 2], [3, 4]], dtype=np.float64) + y = memoryview(x) + assert_equal(y.format, 'd') + assert_equal(y.shape, (2, 2)) + assert_equal(y.ndim, 2) + assert_equal(y.strides, (16, 8)) + assert_equal(y.suboffsets, ()) + assert_equal(y.itemsize, 8) + + def test_export_discontiguous(self): + x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0, :] + y = memoryview(x) + assert_equal(y.format, 'f') + assert_equal(y.shape, (3, 3)) + assert_equal(y.ndim, 2) + assert_equal(y.strides, (36, 4)) + assert_equal(y.suboffsets, ()) + assert_equal(y.itemsize, 4) + + def test_export_record(self): + dt = [('a', 'b'), + ('b', 'h'), + ('c', 'i'), + ('d', 'l'), + ('dx', 'q'), + ('e', 'B'), + ('f', 'H'), + ('g', 'I'), + ('h', 'L'), + ('hx', 'Q'), + ('i', np.single), + ('j', np.double), + ('k', np.longdouble), + ('ix', np.csingle), + ('jx', np.cdouble), + ('kx', np.clongdouble), + ('l', 'S4'), + ('m', 'U4'), + ('n', 'V3'), + ('o', '?'), + ('p', np.half), + ] + x = np.array( + [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + b'aaaa', 'bbbb', b' ', True, 1.0)], + dtype=dt) + y = memoryview(x) + assert_equal(y.shape, (1,)) + assert_equal(y.ndim, 1) + assert_equal(y.suboffsets, ()) + + sz = sum(np.dtype(b).itemsize for a, b in dt) + if np.dtype('l').itemsize == 4: + assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') + else: + assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') + assert_equal(y.strides, (sz,)) + assert_equal(y.itemsize, sz) + + def test_export_subarray(self): + x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))]) + y = memoryview(x) + assert_equal(y.format, 'T{(2,2)i:a:}') + assert_equal(y.shape, ()) + assert_equal(y.ndim, 0) + assert_equal(y.strides, ()) + assert_equal(y.suboffsets, ()) + assert_equal(y.itemsize, 16) + + def test_export_endian(self): + x = np.array([1, 2, 3], dtype='>i') + y = memoryview(x) + if sys.byteorder == 'little': + assert_equal(y.format, '>i') + else: + assert_equal(y.format, 'i') + + x = np.array([1, 2, 3], dtype=' np.array(0, dtype=dt1), f"type {dt1} failed") + assert_(not 1 < np.array(0, dtype=dt1), f"type {dt1} failed") + + for dt2 in np.typecodes['AllInteger']: + assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2), + f"type {dt1} and {dt2} failed") + assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2), + f"type {dt1} and {dt2} failed") + + # Unsigned integers + for dt1 in 'BHILQP': + assert_(-1 < np.array(1, dtype=dt1), f"type {dt1} failed") + assert_(not -1 > np.array(1, dtype=dt1), f"type {dt1} failed") + assert_(-1 != np.array(1, dtype=dt1), f"type {dt1} failed") + + # Unsigned vs signed + for dt2 in 'bhilqp': + assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), + f"type {dt1} and {dt2} failed") + assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), + f"type {dt1} and {dt2} failed") + assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2), + f"type {dt1} and {dt2} failed") + + # Signed integers and floats + for dt1 in 'bhlqp' + np.typecodes['Float']: + assert_(1 > np.array(-1, dtype=dt1), f"type {dt1} failed") + assert_(not 1 < np.array(-1, dtype=dt1), f"type {dt1} failed") + assert_(-1 == np.array(-1, dtype=dt1), f"type {dt1} failed") + + for dt2 in 'bhlqp' + np.typecodes['Float']: + assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2), + f"type {dt1} and {dt2} failed") + assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2), + f"type {dt1} and {dt2} failed") + assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2), + f"type {dt1} and {dt2} failed") + + def test_to_bool_scalar(self): + assert_equal(bool(np.array([False])), False) + assert_equal(bool(np.array([True])), True) + assert_equal(bool(np.array([[42]])), True) + + def test_to_bool_scalar_not_convertible(self): + + class NotConvertible: + def __bool__(self): + raise NotImplementedError + + assert_raises(NotImplementedError, bool, np.array(NotConvertible())) + assert_raises(NotImplementedError, bool, np.array([NotConvertible()])) + if IS_PYSTON: + pytest.skip("Pyston disables recursion checking") + if IS_WASM: + pytest.skip("Pyodide/WASM has limited stack size") + + self_containing = np.array([None]) + self_containing[0] = self_containing + + Error = RecursionError + + assert_raises(Error, bool, self_containing) # previously stack overflow + self_containing[0] = None # resolve circular reference + + def test_to_bool_scalar_size_errors(self): + with pytest.raises(ValueError, match=".*one element is ambiguous"): + bool(np.array([1, 2])) + + with pytest.raises(ValueError, match=".*empty array is ambiguous"): + bool(np.empty((3, 0))) + + with pytest.raises(ValueError, match=".*empty array is ambiguous"): + bool(np.empty((0,))) + + def test_to_int_scalar(self): + # gh-9972 means that these aren't always the same + int_funcs = (int, lambda x: x.__int__()) + for int_func in int_funcs: + assert_equal(int_func(np.array(0)), 0) + with assert_warns(DeprecationWarning): + assert_equal(int_func(np.array([1])), 1) + with assert_warns(DeprecationWarning): + assert_equal(int_func(np.array([[42]])), 42) + assert_raises(TypeError, int_func, np.array([1, 2])) + + # gh-9972 + assert_equal(4, int_func(np.array('4'))) + assert_equal(5, int_func(np.bytes_(b'5'))) + assert_equal(6, int_func(np.str_('6'))) + + class NotConvertible: + def __int__(self): + raise NotImplementedError + assert_raises(NotImplementedError, + int_func, np.array(NotConvertible())) + with assert_warns(DeprecationWarning): + assert_raises(NotImplementedError, + int_func, np.array([NotConvertible()])) + + +class TestWhere: + def test_basic(self): + dts = [bool, np.int16, np.int32, np.int64, np.double, np.complex128, + np.longdouble, np.clongdouble] + for dt in dts: + c = np.ones(53, dtype=bool) + assert_equal(np.where( c, dt(0), dt(1)), dt(0)) + assert_equal(np.where(~c, dt(0), dt(1)), dt(1)) + assert_equal(np.where(True, dt(0), dt(1)), dt(0)) + assert_equal(np.where(False, dt(0), dt(1)), dt(1)) + d = np.ones_like(c).astype(dt) + e = np.zeros_like(d) + r = d.astype(dt) + c[7] = False + r[7] = e[7] + assert_equal(np.where(c, e, e), e) + assert_equal(np.where(c, d, e), r) + assert_equal(np.where(c, d, e[0]), r) + assert_equal(np.where(c, d[0], e), r) + assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2]) + assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2]) + assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3]) + assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3]) + assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2]) + assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3]) + assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3]) + + @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") + def test_exotic(self): + # object + assert_array_equal(np.where(True, None, None), np.array(None)) + # zero sized + m = np.array([], dtype=bool).reshape(0, 3) + b = np.array([], dtype=np.float64).reshape(0, 3) + assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3)) + + # object cast + d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313, + 0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013, + 1.267, 0.229, -1.39, 0.487]) + nan = float('NaN') + e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan, + 'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'], + dtype=object) + m = np.array([0, 0, 1, 0, 1, 1, 0, 0, 1, 1, + 0, 1, 1, 0, 1, 1, 0, 1, 0, 0], dtype=bool) + + r = e[:] + r[np.where(m)] = d[np.where(m)] + assert_array_equal(np.where(m, d, e), r) + + r = e[:] + r[np.where(~m)] = d[np.where(~m)] + assert_array_equal(np.where(m, e, d), r) + + assert_array_equal(np.where(m, e, e), e) + + # minimal dtype result with NaN scalar (e.g required by pandas) + d = np.array([1., 2.], dtype=np.float32) + e = float('NaN') + assert_equal(np.where(True, d, e).dtype, np.float32) + e = float('Infinity') + assert_equal(np.where(True, d, e).dtype, np.float32) + e = float('-Infinity') + assert_equal(np.where(True, d, e).dtype, np.float32) + # With NEP 50 adopted, the float will overflow here: + e = 1e150 + with pytest.warns(RuntimeWarning, match="overflow"): + res = np.where(True, d, e) + assert res.dtype == np.float32 + + def test_ndim(self): + c = [True, False] + a = np.zeros((2, 25)) + b = np.ones((2, 25)) + r = np.where(np.array(c)[:, np.newaxis], a, b) + assert_array_equal(r[0], a[0]) + assert_array_equal(r[1], b[0]) + + a = a.T + b = b.T + r = np.where(c, a, b) + assert_array_equal(r[:, 0], a[:, 0]) + assert_array_equal(r[:, 1], b[:, 0]) + + def test_dtype_mix(self): + c = np.array([False, True, False, False, False, False, True, False, + False, False, True, False]) + a = np.uint32(1) + b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], + dtype=np.float64) + r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], + dtype=np.float64) + assert_equal(np.where(c, a, b), r) + + a = a.astype(np.float32) + b = b.astype(np.int64) + assert_equal(np.where(c, a, b), r) + + # non bool mask + c = c.astype(int) + c[c != 0] = 34242324 + assert_equal(np.where(c, a, b), r) + # invert + tmpmask = c != 0 + c[c == 0] = 41247212 + c[tmpmask] = 0 + assert_equal(np.where(c, b, a), r) + + def test_foreign(self): + c = np.array([False, True, False, False, False, False, True, False, + False, False, True, False]) + r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], + dtype=np.float64) + a = np.ones(1, dtype='>i4') + b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], + dtype=np.float64) + assert_equal(np.where(c, a, b), r) + + b = b.astype('>f8') + assert_equal(np.where(c, a, b), r) + + a = a.astype('i4') + assert_equal(np.where(c, a, b), r) + + def test_error(self): + c = [True, True] + a = np.ones((4, 5)) + b = np.ones((5, 5)) + assert_raises(ValueError, np.where, c, a, a) + assert_raises(ValueError, np.where, c[0], a, b) + + def test_string(self): + # gh-4778 check strings are properly filled with nulls + a = np.array("abc") + b = np.array("x" * 753) + assert_equal(np.where(True, a, b), "abc") + assert_equal(np.where(False, b, a), "abc") + + # check native datatype sized strings + a = np.array("abcd") + b = np.array("x" * 8) + assert_equal(np.where(True, a, b), "abcd") + assert_equal(np.where(False, b, a), "abcd") + + def test_empty_result(self): + # pass empty where result through an assignment which reads the data of + # empty arrays, error detectable with valgrind, see gh-8922 + x = np.zeros((1, 1)) + ibad = np.vstack(np.where(x == 99.)) + assert_array_equal(ibad, + np.atleast_2d(np.array([[], []], dtype=np.intp))) + + def test_largedim(self): + # invalid read regression gh-9304 + shape = [10, 2, 3, 4, 5, 6] + np.random.seed(2) + array = np.random.rand(*shape) + + for i in range(10): + benchmark = array.nonzero() + result = array.nonzero() + assert_array_equal(benchmark, result) + + def test_kwargs(self): + a = np.zeros(1) + with assert_raises(TypeError): + np.where(a, x=a, y=a) + + +if not IS_PYPY: + # sys.getsizeof() is not valid on PyPy + class TestSizeOf: + + def test_empty_array(self): + x = np.array([]) + assert_(sys.getsizeof(x) > 0) + + def check_array(self, dtype): + elem_size = dtype(0).itemsize + + for length in [10, 50, 100, 500]: + x = np.arange(length, dtype=dtype) + assert_(sys.getsizeof(x) > length * elem_size) + + def test_array_int32(self): + self.check_array(np.int32) + + def test_array_int64(self): + self.check_array(np.int64) + + def test_array_float32(self): + self.check_array(np.float32) + + def test_array_float64(self): + self.check_array(np.float64) + + def test_view(self): + d = np.ones(100) + assert_(sys.getsizeof(d[...]) < sys.getsizeof(d)) + + def test_reshape(self): + d = np.ones(100) + assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy())) + + @_no_tracing + def test_resize(self): + d = np.ones(100) + old = sys.getsizeof(d) + d.resize(50) + assert_(old > sys.getsizeof(d)) + d.resize(150) + assert_(old < sys.getsizeof(d)) + + @pytest.mark.parametrize("dtype", ["u4,f4", "u4,O"]) + def test_resize_structured(self, dtype): + a = np.array([(0, 0.0) for i in range(5)], dtype=dtype) + a.resize(1000) + assert_array_equal(a, np.zeros(1000, dtype=dtype)) + + def test_error(self): + d = np.ones(100) + assert_raises(TypeError, d.__sizeof__, "a") + + +class TestHashing: + + def test_arrays_not_hashable(self): + x = np.ones(3) + assert_raises(TypeError, hash, x) + + def test_collections_hashable(self): + x = np.array([]) + assert_(not isinstance(x, collections.abc.Hashable)) + + +class TestArrayPriority: + # This will go away when __array_priority__ is settled, meanwhile + # it serves to check unintended changes. + op = operator + binary_ops = [ + op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod, + op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt, + op.ge, op.lt, op.le, op.ne, op.eq + ] + + class Foo(np.ndarray): + __array_priority__ = 100. + + def __new__(cls, *args, **kwargs): + return np.array(*args, **kwargs).view(cls) + + class Bar(np.ndarray): + __array_priority__ = 101. + + def __new__(cls, *args, **kwargs): + return np.array(*args, **kwargs).view(cls) + + class Other: + __array_priority__ = 1000. + + def _all(self, other): + return self.__class__() + + __add__ = __radd__ = _all + __sub__ = __rsub__ = _all + __mul__ = __rmul__ = _all + __pow__ = __rpow__ = _all + __mod__ = __rmod__ = _all + __truediv__ = __rtruediv__ = _all + __floordiv__ = __rfloordiv__ = _all + __and__ = __rand__ = _all + __xor__ = __rxor__ = _all + __or__ = __ror__ = _all + __lshift__ = __rlshift__ = _all + __rshift__ = __rrshift__ = _all + __eq__ = _all + __ne__ = _all + __gt__ = _all + __ge__ = _all + __lt__ = _all + __le__ = _all + + def test_ndarray_subclass(self): + a = np.array([1, 2]) + b = self.Bar([1, 2]) + for f in self.binary_ops: + msg = repr(f) + assert_(isinstance(f(a, b), self.Bar), msg) + assert_(isinstance(f(b, a), self.Bar), msg) + + def test_ndarray_other(self): + a = np.array([1, 2]) + b = self.Other() + for f in self.binary_ops: + msg = repr(f) + assert_(isinstance(f(a, b), self.Other), msg) + assert_(isinstance(f(b, a), self.Other), msg) + + def test_subclass_subclass(self): + a = self.Foo([1, 2]) + b = self.Bar([1, 2]) + for f in self.binary_ops: + msg = repr(f) + assert_(isinstance(f(a, b), self.Bar), msg) + assert_(isinstance(f(b, a), self.Bar), msg) + + def test_subclass_other(self): + a = self.Foo([1, 2]) + b = self.Other() + for f in self.binary_ops: + msg = repr(f) + assert_(isinstance(f(a, b), self.Other), msg) + assert_(isinstance(f(b, a), self.Other), msg) + + +class TestBytestringArrayNonzero: + + def test_empty_bstring_array_is_falsey(self): + assert_(not np.array([''], dtype=str)) + + def test_whitespace_bstring_array_is_truthy(self): + a = np.array(['spam'], dtype=str) + a[0] = ' \0\0' + assert_(a) + + def test_all_null_bstring_array_is_falsey(self): + a = np.array(['spam'], dtype=str) + a[0] = '\0\0\0\0' + assert_(not a) + + def test_null_inside_bstring_array_is_truthy(self): + a = np.array(['spam'], dtype=str) + a[0] = ' \0 \0' + assert_(a) + + +class TestUnicodeEncoding: + """ + Tests for encoding related bugs, such as UCS2 vs UCS4, round-tripping + issues, etc + """ + def test_round_trip(self): + """ Tests that GETITEM, SETITEM, and PyArray_Scalar roundtrip """ + # gh-15363 + arr = np.zeros(shape=(), dtype="U1") + for i in range(1, sys.maxunicode + 1): + expected = chr(i) + arr[()] = expected + assert arr[()] == expected + assert arr.item() == expected + + def test_assign_scalar(self): + # gh-3258 + l = np.array(['aa', 'bb']) + l[:] = np.str_('cc') + assert_equal(l, ['cc', 'cc']) + + def test_fill_scalar(self): + # gh-7227 + l = np.array(['aa', 'bb']) + l.fill(np.str_('cc')) + assert_equal(l, ['cc', 'cc']) + + +class TestUnicodeArrayNonzero: + + def test_empty_ustring_array_is_falsey(self): + assert_(not np.array([''], dtype=np.str_)) + + def test_whitespace_ustring_array_is_truthy(self): + a = np.array(['eggs'], dtype=np.str_) + a[0] = ' \0\0' + assert_(a) + + def test_all_null_ustring_array_is_falsey(self): + a = np.array(['eggs'], dtype=np.str_) + a[0] = '\0\0\0\0' + assert_(not a) + + def test_null_inside_ustring_array_is_truthy(self): + a = np.array(['eggs'], dtype=np.str_) + a[0] = ' \0 \0' + assert_(a) + + +class TestFormat: + + def test_0d(self): + a = np.array(np.pi) + assert_equal(f'{a:0.3g}', '3.14') + assert_equal(f'{a[()]:0.3g}', '3.14') + + def test_1d_no_format(self): + a = np.array([np.pi]) + assert_equal(f'{a}', str(a)) + + def test_1d_format(self): + # until gh-5543, ensure that the behaviour matches what it used to be + a = np.array([np.pi]) + assert_raises(TypeError, '{:30}'.format, a) + + +from numpy.testing import IS_PYPY + + +class TestCTypes: + + def test_ctypes_is_available(self): + test_arr = np.array([[1, 2, 3], [4, 5, 6]]) + + assert_equal(ctypes, test_arr.ctypes._ctypes) + assert_equal(tuple(test_arr.ctypes.shape), (2, 3)) + + def test_ctypes_is_not_available(self): + from numpy._core import _internal + _internal.ctypes = None + try: + test_arr = np.array([[1, 2, 3], [4, 5, 6]]) + + assert_(isinstance(test_arr.ctypes._ctypes, + _internal._missing_ctypes)) + assert_equal(tuple(test_arr.ctypes.shape), (2, 3)) + finally: + _internal.ctypes = ctypes + + def _make_readonly(x): + x.flags.writeable = False + return x + + @pytest.mark.parametrize('arr', [ + np.array([1, 2, 3]), + np.array([['one', 'two'], ['three', 'four']]), + np.array((1, 2), dtype='i4,i4'), + np.zeros((2,), dtype=np.dtype({ + "formats": [' 2, [44, 55]) + assert_equal(a, np.array([[0, 44], [1, 55], [2, 44]])) + # hit one of the failing paths + assert_raises(ValueError, np.place, a, a > 20, []) + + def test_put_noncontiguous(self): + a = np.arange(6).reshape(2, 3).T # force non-c-contiguous + np.put(a, [0, 2], [44, 55]) + assert_equal(a, np.array([[44, 3], [55, 4], [2, 5]])) + + def test_putmask_noncontiguous(self): + a = np.arange(6).reshape(2, 3).T # force non-c-contiguous + # uses arr_putmask + np.putmask(a, a > 2, a**2) + assert_equal(a, np.array([[0, 9], [1, 16], [2, 25]])) + + def test_take_mode_raise(self): + a = np.arange(6, dtype='int') + out = np.empty(2, dtype='int') + np.take(a, [0, 2], out=out, mode='raise') + assert_equal(out, np.array([0, 2])) + + def test_choose_mod_raise(self): + a = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]]) + out = np.empty((3, 3), dtype='int') + choices = [-10, 10] + np.choose(a, choices, out=out, mode='raise') + assert_equal(out, np.array([[ 10, -10, 10], + [-10, 10, -10], + [ 10, -10, 10]])) + + def test_flatiter__array__(self): + a = np.arange(9).reshape(3, 3) + b = a.T.flat + c = b.__array__() + # triggers the WRITEBACKIFCOPY resolution, assuming refcount semantics + del c + + def test_dot_out(self): + # if HAVE_CBLAS, will use WRITEBACKIFCOPY + a = np.arange(9, dtype=float).reshape(3, 3) + b = np.dot(a, a, out=a) + assert_equal(b, np.array([[15, 18, 21], [42, 54, 66], [69, 90, 111]])) + + def test_view_assign(self): + from numpy._core._multiarray_tests import ( + npy_create_writebackifcopy, + npy_resolve, + ) + + arr = np.arange(9).reshape(3, 3).T + arr_wb = npy_create_writebackifcopy(arr) + assert_(arr_wb.flags.writebackifcopy) + assert_(arr_wb.base is arr) + arr_wb[...] = -100 + npy_resolve(arr_wb) + # arr changes after resolve, even though we assigned to arr_wb + assert_equal(arr, -100) + # after resolve, the two arrays no longer reference each other + assert_(arr_wb.ctypes.data != 0) + assert_equal(arr_wb.base, None) + # assigning to arr_wb does not get transferred to arr + arr_wb[...] = 100 + assert_equal(arr, -100) + + @pytest.mark.leaks_references( + reason="increments self in dealloc; ignore since deprecated path.") + def test_dealloc_warning(self): + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + arr = np.arange(9).reshape(3, 3) + v = arr.T + _multiarray_tests.npy_abuse_writebackifcopy(v) + assert len(sup.log) == 1 + + def test_view_discard_refcount(self): + from numpy._core._multiarray_tests import ( + npy_create_writebackifcopy, + npy_discard, + ) + + arr = np.arange(9).reshape(3, 3).T + orig = arr.copy() + if HAS_REFCOUNT: + arr_cnt = sys.getrefcount(arr) + arr_wb = npy_create_writebackifcopy(arr) + assert_(arr_wb.flags.writebackifcopy) + assert_(arr_wb.base is arr) + arr_wb[...] = -100 + npy_discard(arr_wb) + # arr remains unchanged after discard + assert_equal(arr, orig) + # after discard, the two arrays no longer reference each other + assert_(arr_wb.ctypes.data != 0) + assert_equal(arr_wb.base, None) + if HAS_REFCOUNT: + assert_equal(arr_cnt, sys.getrefcount(arr)) + # assigning to arr_wb does not get transferred to arr + arr_wb[...] = 100 + assert_equal(arr, orig) + + +class TestArange: + def test_infinite(self): + assert_raises_regex( + ValueError, "size exceeded", + np.arange, 0, np.inf + ) + + def test_nan_step(self): + assert_raises_regex( + ValueError, "cannot compute length", + np.arange, 0, 1, np.nan + ) + + def test_zero_step(self): + assert_raises(ZeroDivisionError, np.arange, 0, 10, 0) + assert_raises(ZeroDivisionError, np.arange, 0.0, 10.0, 0.0) + + # empty range + assert_raises(ZeroDivisionError, np.arange, 0, 0, 0) + assert_raises(ZeroDivisionError, np.arange, 0.0, 0.0, 0.0) + + def test_require_range(self): + assert_raises(TypeError, np.arange) + assert_raises(TypeError, np.arange, step=3) + assert_raises(TypeError, np.arange, dtype='int64') + assert_raises(TypeError, np.arange, start=4) + + def test_start_stop_kwarg(self): + keyword_stop = np.arange(stop=3) + keyword_zerotostop = np.arange(0, stop=3) + keyword_start_stop = np.arange(start=3, stop=9) + + assert len(keyword_stop) == 3 + assert len(keyword_zerotostop) == 3 + assert len(keyword_start_stop) == 6 + assert_array_equal(keyword_stop, keyword_zerotostop) + + def test_arange_booleans(self): + # Arange makes some sense for booleans and works up to length 2. + # But it is weird since `arange(2, 4, dtype=bool)` works. + # Arguably, much or all of this could be deprecated/removed. + res = np.arange(False, dtype=bool) + assert_array_equal(res, np.array([], dtype="bool")) + + res = np.arange(True, dtype="bool") + assert_array_equal(res, [False]) + + res = np.arange(2, dtype="bool") + assert_array_equal(res, [False, True]) + + # This case is especially weird, but drops out without special case: + res = np.arange(6, 8, dtype="bool") + assert_array_equal(res, [True, True]) + + with pytest.raises(TypeError): + np.arange(3, dtype="bool") + + @pytest.mark.parametrize("dtype", ["S3", "U", "5i"]) + def test_rejects_bad_dtypes(self, dtype): + dtype = np.dtype(dtype) + DType_name = re.escape(str(type(dtype))) + with pytest.raises(TypeError, + match=rf"arange\(\) not supported for inputs .* {DType_name}"): + np.arange(2, dtype=dtype) + + def test_rejects_strings(self): + # Explicitly test error for strings which may call "b" - "a": + DType_name = re.escape(str(type(np.array("a").dtype))) + with pytest.raises(TypeError, + match=rf"arange\(\) not supported for inputs .* {DType_name}"): + np.arange("a", "b") + + def test_byteswapped(self): + res_be = np.arange(1, 1000, dtype=">i4") + res_le = np.arange(1, 1000, dtype="i4" + assert res_le.dtype == " arr2 + + +@pytest.mark.parametrize("op", [ + operator.eq, operator.ne, operator.le, operator.lt, operator.ge, + operator.gt]) +def test_comparisons_forwards_error(op): + class NotArray: + def __array__(self, dtype=None, copy=None): + raise TypeError("run you fools") + + with pytest.raises(TypeError, match="run you fools"): + op(np.arange(2), NotArray()) + + with pytest.raises(TypeError, match="run you fools"): + op(NotArray(), np.arange(2)) + + +def test_richcompare_scalar_boolean_singleton_return(): + # These are currently guaranteed to be the boolean numpy singletons + assert (np.array(0) == "a") is np.bool_(False) + assert (np.array(0) != "a") is np.bool_(True) + assert (np.int16(0) == "a") is np.bool_(False) + assert (np.int16(0) != "a") is np.bool_(True) + + +@pytest.mark.parametrize("op", [ + operator.eq, operator.ne, operator.le, operator.lt, operator.ge, + operator.gt]) +def test_ragged_comparison_fails(op): + # This needs to convert the internal array to True/False, which fails: + a = np.array([1, np.array([1, 2, 3])], dtype=object) + b = np.array([1, np.array([1, 2, 3])], dtype=object) + + with pytest.raises(ValueError, match="The truth value.*ambiguous"): + op(a, b) + + +@pytest.mark.parametrize( + ["fun", "npfun"], + [ + (_multiarray_tests.npy_cabs, np.absolute), + (_multiarray_tests.npy_carg, np.angle) + ] +) +@pytest.mark.parametrize("x", [1, np.inf, -np.inf, np.nan]) +@pytest.mark.parametrize("y", [1, np.inf, -np.inf, np.nan]) +@pytest.mark.parametrize("test_dtype", np.complexfloating.__subclasses__()) +def test_npymath_complex(fun, npfun, x, y, test_dtype): + # Smoketest npymath functions + z = test_dtype(complex(x, y)) + with np.errstate(invalid='ignore'): + # Fallback implementations may emit a warning for +-inf (see gh-24876): + # RuntimeWarning: invalid value encountered in absolute + got = fun(z) + expected = npfun(z) + assert_allclose(got, expected) + + +def test_npymath_real(): + # Smoketest npymath functions + from numpy._core._multiarray_tests import ( + npy_cosh, + npy_log10, + npy_sinh, + npy_tan, + npy_tanh, + ) + + funcs = {npy_log10: np.log10, + npy_cosh: np.cosh, + npy_sinh: np.sinh, + npy_tan: np.tan, + npy_tanh: np.tanh} + vals = (1, np.inf, -np.inf, np.nan) + types = (np.float32, np.float64, np.longdouble) + + with np.errstate(all='ignore'): + for fun, npfun in funcs.items(): + for x, t in itertools.product(vals, types): + z = t(x) + got = fun(z) + expected = npfun(z) + assert_allclose(got, expected) + +def test_uintalignment_and_alignment(): + # alignment code needs to satisfy these requirements: + # 1. numpy structs match C struct layout + # 2. ufuncs/casting is safe wrt to aligned access + # 3. copy code is safe wrt to "uint alidned" access + # + # Complex types are the main problem, whose alignment may not be the same + # as their "uint alignment". + # + # This test might only fail on certain platforms, where uint64 alignment is + # not equal to complex64 alignment. The second 2 tests will only fail + # for DEBUG=1. + + d1 = np.dtype('u1,c8', align=True) + d2 = np.dtype('u4,c8', align=True) + d3 = np.dtype({'names': ['a', 'b'], 'formats': ['u1', d1]}, align=True) + + assert_equal(np.zeros(1, dtype=d1)['f1'].flags['ALIGNED'], True) + assert_equal(np.zeros(1, dtype=d2)['f1'].flags['ALIGNED'], True) + assert_equal(np.zeros(1, dtype='u1,c8')['f1'].flags['ALIGNED'], False) + + # check that C struct matches numpy struct size + s = _multiarray_tests.get_struct_alignments() + for d, (alignment, size) in zip([d1, d2, d3], s): + assert_equal(d.alignment, alignment) + assert_equal(d.itemsize, size) + + # check that ufuncs don't complain in debug mode + # (this is probably OK if the aligned flag is true above) + src = np.zeros((2, 2), dtype=d1)['f1'] # 4-byte aligned, often + np.exp(src) # assert fails? + + # check that copy code doesn't complain in debug mode + dst = np.zeros((2, 2), dtype='c8') + dst[:, 1] = src[:, 1] # assert in lowlevel_strided_loops fails? + +class TestAlignment: + # adapted from scipy._lib.tests.test__util.test__aligned_zeros + # Checks that unusual memory alignments don't trip up numpy. + + def check(self, shape, dtype, order, align): + err_msg = repr((shape, dtype, order, align)) + x = _aligned_zeros(shape, dtype, order, align=align) + if align is None: + align = np.dtype(dtype).alignment + assert_equal(x.__array_interface__['data'][0] % align, 0) + if hasattr(shape, '__len__'): + assert_equal(x.shape, shape, err_msg) + else: + assert_equal(x.shape, (shape,), err_msg) + assert_equal(x.dtype, dtype) + if order == "C": + assert_(x.flags.c_contiguous, err_msg) + elif order == "F": + if x.size > 0: + assert_(x.flags.f_contiguous, err_msg) + elif order is None: + assert_(x.flags.c_contiguous, err_msg) + else: + raise ValueError + + def test_various_alignments(self): + for align in [1, 2, 3, 4, 8, 12, 16, 32, 64, None]: + for n in [0, 1, 3, 11]: + for order in ["C", "F", None]: + for dtype in list(np.typecodes["All"]) + ['i4,i4,i4']: + if dtype == 'O': + # object dtype can't be misaligned + continue + for shape in [n, (1, 2, 3, n)]: + self.check(shape, np.dtype(dtype), order, align) + + def test_strided_loop_alignments(self): + # particularly test that complex64 and float128 use right alignment + # code-paths, since these are particularly problematic. It is useful to + # turn on USE_DEBUG for this test, so lowlevel-loop asserts are run. + for align in [1, 2, 4, 8, 12, 16, None]: + xf64 = _aligned_zeros(3, np.float64) + + xc64 = _aligned_zeros(3, np.complex64, align=align) + xf128 = _aligned_zeros(3, np.longdouble, align=align) + + # test casting, both to and from misaligned + with suppress_warnings() as sup: + sup.filter(ComplexWarning, "Casting complex values") + xc64.astype('f8') + xf64.astype(np.complex64) + test = xc64 + xf64 + + xf128.astype('f8') + xf64.astype(np.longdouble) + test = xf128 + xf64 + + test = xf128 + xc64 + + # test copy, both to and from misaligned + # contig copy + xf64[:] = xf64.copy() + xc64[:] = xc64.copy() + xf128[:] = xf128.copy() + # strided copy + xf64[::2] = xf64[::2].copy() + xc64[::2] = xc64[::2].copy() + xf128[::2] = xf128[::2].copy() + +def test_getfield(): + a = np.arange(32, dtype='uint16') + if sys.byteorder == 'little': + i = 0 + j = 1 + else: + i = 1 + j = 0 + b = a.getfield('int8', i) + assert_equal(b, a) + b = a.getfield('int8', j) + assert_equal(b, 0) + pytest.raises(ValueError, a.getfield, 'uint8', -1) + pytest.raises(ValueError, a.getfield, 'uint8', 16) + pytest.raises(ValueError, a.getfield, 'uint64', 0) + + +class TestViewDtype: + """ + Verify that making a view of a non-contiguous array works as expected. + """ + def test_smaller_dtype_multiple(self): + # x is non-contiguous + x = np.arange(10, dtype=' rc_a) + assert_(sys.getrefcount(dt) > rc_dt) + # del 'it' + it = None + assert_equal(sys.getrefcount(a), rc_a) + assert_equal(sys.getrefcount(dt), rc_dt) + + # With a copy + a = arange(6, dtype='f4') + dt = np.dtype('f4') + rc_a = sys.getrefcount(a) + rc_dt = sys.getrefcount(dt) + it = nditer(a, [], + [['readwrite']], + op_dtypes=[dt]) + rc2_a = sys.getrefcount(a) + rc2_dt = sys.getrefcount(dt) + it2 = it.copy() + assert_(sys.getrefcount(a) > rc2_a) + if sys.version_info < (3, 13): + # np.dtype('f4') is immortal after Python 3.13 + assert_(sys.getrefcount(dt) > rc2_dt) + it = None + assert_equal(sys.getrefcount(a), rc2_a) + assert_equal(sys.getrefcount(dt), rc2_dt) + it2 = None + assert_equal(sys.getrefcount(a), rc_a) + assert_equal(sys.getrefcount(dt), rc_dt) + +def test_iter_best_order(): + # The iterator should always find the iteration order + # with increasing memory addresses + + # Test the ordering for 1-D to 5-D shapes + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + a = arange(np.prod(shape)) + # Test each combination of positive and negative strides + for dirs in range(2**len(shape)): + dirs_index = [slice(None)] * len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, [], [['readonly']]) + assert_equal(list(i), a) + # Fortran-order + i = nditer(aview.T, [], [['readonly']]) + assert_equal(list(i), a) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), [], [['readonly']]) + assert_equal(list(i), a) + +def test_iter_c_order(): + # Test forcing C order + + # Test the ordering for 1-D to 5-D shapes + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + a = arange(np.prod(shape)) + # Test each combination of positive and negative strides + for dirs in range(2**len(shape)): + dirs_index = [slice(None)] * len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, order='C') + assert_equal(list(i), aview.ravel(order='C')) + # Fortran-order + i = nditer(aview.T, order='C') + assert_equal(list(i), aview.T.ravel(order='C')) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), order='C') + assert_equal(list(i), + aview.swapaxes(0, 1).ravel(order='C')) + +def test_iter_f_order(): + # Test forcing F order + + # Test the ordering for 1-D to 5-D shapes + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + a = arange(np.prod(shape)) + # Test each combination of positive and negative strides + for dirs in range(2**len(shape)): + dirs_index = [slice(None)] * len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, order='F') + assert_equal(list(i), aview.ravel(order='F')) + # Fortran-order + i = nditer(aview.T, order='F') + assert_equal(list(i), aview.T.ravel(order='F')) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), order='F') + assert_equal(list(i), + aview.swapaxes(0, 1).ravel(order='F')) + +def test_iter_c_or_f_order(): + # Test forcing any contiguous (C or F) order + + # Test the ordering for 1-D to 5-D shapes + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + a = arange(np.prod(shape)) + # Test each combination of positive and negative strides + for dirs in range(2**len(shape)): + dirs_index = [slice(None)] * len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, order='A') + assert_equal(list(i), aview.ravel(order='A')) + # Fortran-order + i = nditer(aview.T, order='A') + assert_equal(list(i), aview.T.ravel(order='A')) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), order='A') + assert_equal(list(i), + aview.swapaxes(0, 1).ravel(order='A')) + +def test_nditer_multi_index_set(): + # Test the multi_index set + a = np.arange(6).reshape(2, 3) + it = np.nditer(a, flags=['multi_index']) + + # Removes the iteration on two first elements of a[0] + it.multi_index = (0, 2,) + + assert_equal(list(it), [2, 3, 4, 5]) + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_nditer_multi_index_set_refcount(): + # Test if the reference count on index variable is decreased + + index = 0 + i = np.nditer(np.array([111, 222, 333, 444]), flags=['multi_index']) + + start_count = sys.getrefcount(index) + i.multi_index = (index,) + end_count = sys.getrefcount(index) + + assert_equal(start_count, end_count) + +def test_iter_best_order_multi_index_1d(): + # The multi-indices should be correct with any reordering + + a = arange(4) + # 1D order + i = nditer(a, ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0,), (1,), (2,), (3,)]) + # 1D reversed order + i = nditer(a[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(3,), (2,), (1,), (0,)]) + +def test_iter_best_order_multi_index_2d(): + # The multi-indices should be correct with any reordering + + a = arange(6) + # 2D C-order + i = nditer(a.reshape(2, 3), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)]) + # 2D Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F'), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 0), (1, 0), (0, 1), (1, 1), (0, 2), (1, 2)]) + # 2D reversed C-order + i = nditer(a.reshape(2, 3)[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 0), (1, 1), (1, 2), (0, 0), (0, 1), (0, 2)]) + i = nditer(a.reshape(2, 3)[:, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 2), (0, 1), (0, 0), (1, 2), (1, 1), (1, 0)]) + i = nditer(a.reshape(2, 3)[::-1, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 2), (1, 1), (1, 0), (0, 2), (0, 1), (0, 0)]) + # 2D reversed Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F')[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 0), (0, 0), (1, 1), (0, 1), (1, 2), (0, 2)]) + i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(0, 2), (1, 2), (0, 1), (1, 1), (0, 0), (1, 0)]) + i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), [(1, 2), (0, 2), (1, 1), (0, 1), (1, 0), (0, 0)]) + +def test_iter_best_order_multi_index_3d(): + # The multi-indices should be correct with any reordering + + a = arange(12) + # 3D C-order + i = nditer(a.reshape(2, 3, 2), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1), + (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1)]) + # 3D Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F'), ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0), + (0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1)]) + # 3D reversed C-order + i = nditer(a.reshape(2, 3, 2)[::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1), (1, 2, 0), (1, 2, 1), + (0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (0, 2, 0), (0, 2, 1)]) + i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 2, 0), (0, 2, 1), (0, 1, 0), (0, 1, 1), (0, 0, 0), (0, 0, 1), + (1, 2, 0), (1, 2, 1), (1, 1, 0), (1, 1, 1), (1, 0, 0), (1, 0, 1)]) + i = nditer(a.reshape(2, 3, 2)[:, :, ::-1], ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 0, 1), (0, 0, 0), (0, 1, 1), (0, 1, 0), (0, 2, 1), (0, 2, 0), + (1, 0, 1), (1, 0, 0), (1, 1, 1), (1, 1, 0), (1, 2, 1), (1, 2, 0)]) + # 3D reversed Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(1, 0, 0), (0, 0, 0), (1, 1, 0), (0, 1, 0), (1, 2, 0), (0, 2, 0), + (1, 0, 1), (0, 0, 1), (1, 1, 1), (0, 1, 1), (1, 2, 1), (0, 2, 1)]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 2, 0), (1, 2, 0), (0, 1, 0), (1, 1, 0), (0, 0, 0), (1, 0, 0), + (0, 2, 1), (1, 2, 1), (0, 1, 1), (1, 1, 1), (0, 0, 1), (1, 0, 1)]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, :, ::-1], + ['multi_index'], [['readonly']]) + assert_equal(iter_multi_index(i), + [(0, 0, 1), (1, 0, 1), (0, 1, 1), (1, 1, 1), (0, 2, 1), (1, 2, 1), + (0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 2, 0), (1, 2, 0)]) + +def test_iter_best_order_c_index_1d(): + # The C index should be correct with any reordering + + a = arange(4) + # 1D order + i = nditer(a, ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3]) + # 1D reversed order + i = nditer(a[::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 2, 1, 0]) + +def test_iter_best_order_c_index_2d(): + # The C index should be correct with any reordering + + a = arange(6) + # 2D C-order + i = nditer(a.reshape(2, 3), ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5]) + # 2D Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F'), + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 3, 1, 4, 2, 5]) + # 2D reversed C-order + i = nditer(a.reshape(2, 3)[::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 4, 5, 0, 1, 2]) + i = nditer(a.reshape(2, 3)[:, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [2, 1, 0, 5, 4, 3]) + i = nditer(a.reshape(2, 3)[::-1, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0]) + # 2D reversed Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F')[::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 0, 4, 1, 5, 2]) + i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [2, 5, 1, 4, 0, 3]) + i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 2, 4, 1, 3, 0]) + +def test_iter_best_order_c_index_3d(): + # The C index should be correct with any reordering + + a = arange(12) + # 3D C-order + i = nditer(a.reshape(2, 3, 2), ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + # 3D Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F'), + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11]) + # 3D reversed C-order + i = nditer(a.reshape(2, 3, 2)[::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) + i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) + i = nditer(a.reshape(2, 3, 2)[:, :, ::-1], ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) + # 3D reversed Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, :, ::-1], + ['c_index'], [['readonly']]) + assert_equal(iter_indices(i), + [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) + +def test_iter_best_order_f_index_1d(): + # The Fortran index should be correct with any reordering + + a = arange(4) + # 1D order + i = nditer(a, ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3]) + # 1D reversed order + i = nditer(a[::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [3, 2, 1, 0]) + +def test_iter_best_order_f_index_2d(): + # The Fortran index should be correct with any reordering + + a = arange(6) + # 2D C-order + i = nditer(a.reshape(2, 3), ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 2, 4, 1, 3, 5]) + # 2D Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F'), + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [0, 1, 2, 3, 4, 5]) + # 2D reversed C-order + i = nditer(a.reshape(2, 3)[::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [1, 3, 5, 0, 2, 4]) + i = nditer(a.reshape(2, 3)[:, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [4, 2, 0, 5, 3, 1]) + i = nditer(a.reshape(2, 3)[::-1, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 3, 1, 4, 2, 0]) + # 2D reversed Fortran-order + i = nditer(a.reshape(2, 3).copy(order='F')[::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [1, 0, 3, 2, 5, 4]) + i = nditer(a.reshape(2, 3).copy(order='F')[:, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [4, 5, 2, 3, 0, 1]) + i = nditer(a.reshape(2, 3).copy(order='F')[::-1, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), [5, 4, 3, 2, 1, 0]) + +def test_iter_best_order_f_index_3d(): + # The Fortran index should be correct with any reordering + + a = arange(12) + # 3D C-order + i = nditer(a.reshape(2, 3, 2), ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [0, 6, 2, 8, 4, 10, 1, 7, 3, 9, 5, 11]) + # 3D Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F'), + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + # 3D reversed C-order + i = nditer(a.reshape(2, 3, 2)[::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [1, 7, 3, 9, 5, 11, 0, 6, 2, 8, 4, 10]) + i = nditer(a.reshape(2, 3, 2)[:, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [4, 10, 2, 8, 0, 6, 5, 11, 3, 9, 1, 7]) + i = nditer(a.reshape(2, 3, 2)[:, :, ::-1], ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [6, 0, 8, 2, 10, 4, 7, 1, 9, 3, 11, 5]) + # 3D reversed Fortran-order + i = nditer(a.reshape(2, 3, 2).copy(order='F')[::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [4, 5, 2, 3, 0, 1, 10, 11, 8, 9, 6, 7]) + i = nditer(a.reshape(2, 3, 2).copy(order='F')[:, :, ::-1], + ['f_index'], [['readonly']]) + assert_equal(iter_indices(i), + [6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5]) + +def test_iter_no_inner_full_coalesce(): + # Check no_inner iterators which coalesce into a single inner loop + + for shape in [(5,), (3, 4), (2, 3, 4), (2, 3, 4, 3), (2, 3, 2, 2, 3)]: + size = np.prod(shape) + a = arange(size) + # Test each combination of forward and backwards indexing + for dirs in range(2**len(shape)): + dirs_index = [slice(None)] * len(shape) + for bit in range(len(shape)): + if ((2**bit) & dirs): + dirs_index[bit] = slice(None, None, -1) + dirs_index = tuple(dirs_index) + + aview = a.reshape(shape)[dirs_index] + # C-order + i = nditer(aview, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (size,)) + # Fortran-order + i = nditer(aview.T, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (size,)) + # Other order + if len(shape) > 2: + i = nditer(aview.swapaxes(0, 1), + ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (size,)) + +def test_iter_no_inner_dim_coalescing(): + # Check no_inner iterators whose dimensions may not coalesce completely + + # Skipping the last element in a dimension prevents coalescing + # with the next-bigger dimension + a = arange(24).reshape(2, 3, 4)[:, :, :-1] + i = nditer(a, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 2) + assert_equal(i[0].shape, (3,)) + a = arange(24).reshape(2, 3, 4)[:, :-1, :] + i = nditer(a, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 2) + assert_equal(i[0].shape, (8,)) + a = arange(24).reshape(2, 3, 4)[:-1, :, :] + i = nditer(a, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (12,)) + + # Even with lots of 1-sized dimensions, should still coalesce + a = arange(24).reshape(1, 1, 2, 1, 1, 3, 1, 1, 4, 1, 1) + i = nditer(a, ['external_loop'], [['readonly']]) + assert_equal(i.ndim, 1) + assert_equal(i[0].shape, (24,)) + +def test_iter_dim_coalescing(): + # Check that the correct number of dimensions are coalesced + + # Tracking a multi-index disables coalescing + a = arange(24).reshape(2, 3, 4) + i = nditer(a, ['multi_index'], [['readonly']]) + assert_equal(i.ndim, 3) + + # A tracked index can allow coalescing if it's compatible with the array + a3d = arange(24).reshape(2, 3, 4) + i = nditer(a3d, ['c_index'], [['readonly']]) + assert_equal(i.ndim, 1) + i = nditer(a3d.swapaxes(0, 1), ['c_index'], [['readonly']]) + assert_equal(i.ndim, 3) + i = nditer(a3d.T, ['c_index'], [['readonly']]) + assert_equal(i.ndim, 3) + i = nditer(a3d.T, ['f_index'], [['readonly']]) + assert_equal(i.ndim, 1) + i = nditer(a3d.T.swapaxes(0, 1), ['f_index'], [['readonly']]) + assert_equal(i.ndim, 3) + + # When C or F order is forced, coalescing may still occur + a3d = arange(24).reshape(2, 3, 4) + i = nditer(a3d, order='C') + assert_equal(i.ndim, 1) + i = nditer(a3d.T, order='C') + assert_equal(i.ndim, 3) + i = nditer(a3d, order='F') + assert_equal(i.ndim, 3) + i = nditer(a3d.T, order='F') + assert_equal(i.ndim, 1) + i = nditer(a3d, order='A') + assert_equal(i.ndim, 1) + i = nditer(a3d.T, order='A') + assert_equal(i.ndim, 1) + +def test_iter_broadcasting(): + # Standard NumPy broadcasting rules + + # 1D with scalar + i = nditer([arange(6), np.int32(2)], ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (6,)) + + # 2D with scalar + i = nditer([arange(6).reshape(2, 3), np.int32(2)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (2, 3)) + # 2D with 1D + i = nditer([arange(6).reshape(2, 3), arange(3)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (2, 3)) + i = nditer([arange(2).reshape(2, 1), arange(3)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (2, 3)) + # 2D with 2D + i = nditer([arange(2).reshape(2, 1), arange(3).reshape(1, 3)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 6) + assert_equal(i.shape, (2, 3)) + + # 3D with scalar + i = nditer([np.int32(2), arange(24).reshape(4, 2, 3)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + # 3D with 1D + i = nditer([arange(3), arange(24).reshape(4, 2, 3)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(3), arange(8).reshape(4, 2, 1)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + # 3D with 2D + i = nditer([arange(6).reshape(2, 3), arange(24).reshape(4, 2, 3)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(2).reshape(2, 1), arange(24).reshape(4, 2, 3)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(3).reshape(1, 3), arange(8).reshape(4, 2, 1)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + # 3D with 3D + i = nditer([arange(2).reshape(1, 2, 1), arange(3).reshape(1, 1, 3), + arange(4).reshape(4, 1, 1)], + ['multi_index'], [['readonly']] * 3) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(6).reshape(1, 2, 3), arange(4).reshape(4, 1, 1)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + i = nditer([arange(24).reshape(4, 2, 3), arange(12).reshape(4, 1, 3)], + ['multi_index'], [['readonly']] * 2) + assert_equal(i.itersize, 24) + assert_equal(i.shape, (4, 2, 3)) + +def test_iter_itershape(): + # Check that allocated outputs work with a specified shape + a = np.arange(6, dtype='i2').reshape(2, 3) + i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], + op_axes=[[0, 1, None], None], + itershape=(-1, -1, 4)) + assert_equal(i.operands[1].shape, (2, 3, 4)) + assert_equal(i.operands[1].strides, (24, 8, 2)) + + i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']], + op_axes=[[0, 1, None], None], + itershape=(-1, -1, 4)) + assert_equal(i.operands[1].shape, (3, 2, 4)) + assert_equal(i.operands[1].strides, (8, 24, 2)) + + i = nditer([a.T, None], [], [['readonly'], ['writeonly', 'allocate']], + order='F', + op_axes=[[0, 1, None], None], + itershape=(-1, -1, 4)) + assert_equal(i.operands[1].shape, (3, 2, 4)) + assert_equal(i.operands[1].strides, (2, 6, 12)) + + # If we specify 1 in the itershape, it shouldn't allow broadcasting + # of that dimension to a bigger value + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['writeonly', 'allocate']], + op_axes=[[0, 1, None], None], + itershape=(-1, 1, 4)) + # Test bug that for no op_axes but itershape, they are NULLed correctly + i = np.nditer([np.ones(2), None, None], itershape=(2,)) + +def test_iter_broadcasting_errors(): + # Check that errors are thrown for bad broadcasting shapes + + # 1D with 1D + assert_raises(ValueError, nditer, [arange(2), arange(3)], + [], [['readonly']] * 2) + # 2D with 1D + assert_raises(ValueError, nditer, + [arange(6).reshape(2, 3), arange(2)], + [], [['readonly']] * 2) + # 2D with 2D + assert_raises(ValueError, nditer, + [arange(6).reshape(2, 3), arange(9).reshape(3, 3)], + [], [['readonly']] * 2) + assert_raises(ValueError, nditer, + [arange(6).reshape(2, 3), arange(4).reshape(2, 2)], + [], [['readonly']] * 2) + # 3D with 3D + assert_raises(ValueError, nditer, + [arange(36).reshape(3, 3, 4), arange(24).reshape(2, 3, 4)], + [], [['readonly']] * 2) + assert_raises(ValueError, nditer, + [arange(8).reshape(2, 4, 1), arange(24).reshape(2, 3, 4)], + [], [['readonly']] * 2) + + # Verify that the error message mentions the right shapes + try: + nditer([arange(2).reshape(1, 2, 1), + arange(3).reshape(1, 3), + arange(6).reshape(2, 3)], + [], + [['readonly'], ['readonly'], ['writeonly', 'no_broadcast']]) + raise AssertionError('Should have raised a broadcast error') + except ValueError as e: + msg = str(e) + # The message should contain the shape of the 3rd operand + assert_(msg.find('(2,3)') >= 0, + f'Message "{msg}" doesn\'t contain operand shape (2,3)') + # The message should contain the broadcast shape + assert_(msg.find('(1,2,3)') >= 0, + f'Message "{msg}" doesn\'t contain broadcast shape (1,2,3)') + + try: + nditer([arange(6).reshape(2, 3), arange(2)], + [], + [['readonly'], ['readonly']], + op_axes=[[0, 1], [0, np.newaxis]], + itershape=(4, 3)) + raise AssertionError('Should have raised a broadcast error') + except ValueError as e: + msg = str(e) + # The message should contain "shape->remappedshape" for each operand + assert_(msg.find('(2,3)->(2,3)') >= 0, + f'Message "{msg}" doesn\'t contain operand shape (2,3)->(2,3)') + assert_(msg.find('(2,)->(2,newaxis)') >= 0, + ('Message "%s" doesn\'t contain remapped operand shape' + '(2,)->(2,newaxis)') % msg) + # The message should contain the itershape parameter + assert_(msg.find('(4,3)') >= 0, + f'Message "{msg}" doesn\'t contain itershape parameter (4,3)') + + try: + nditer([np.zeros((2, 1, 1)), np.zeros((2,))], + [], + [['writeonly', 'no_broadcast'], ['readonly']]) + raise AssertionError('Should have raised a broadcast error') + except ValueError as e: + msg = str(e) + # The message should contain the shape of the bad operand + assert_(msg.find('(2,1,1)') >= 0, + f'Message "{msg}" doesn\'t contain operand shape (2,1,1)') + # The message should contain the broadcast shape + assert_(msg.find('(2,1,2)') >= 0, + f'Message "{msg}" doesn\'t contain the broadcast shape (2,1,2)') + +def test_iter_flags_errors(): + # Check that bad combinations of flags produce errors + + a = arange(6) + + # Not enough operands + assert_raises(ValueError, nditer, [], [], []) + # Bad global flag + assert_raises(ValueError, nditer, [a], ['bad flag'], [['readonly']]) + # Bad op flag + assert_raises(ValueError, nditer, [a], [], [['readonly', 'bad flag']]) + # Bad order parameter + assert_raises(ValueError, nditer, [a], [], [['readonly']], order='G') + # Bad casting parameter + assert_raises(ValueError, nditer, [a], [], [['readonly']], casting='noon') + # op_flags must match ops + assert_raises(ValueError, nditer, [a] * 3, [], [['readonly']] * 2) + # Cannot track both a C and an F index + assert_raises(ValueError, nditer, a, + ['c_index', 'f_index'], [['readonly']]) + # Inner iteration and multi-indices/indices are incompatible + assert_raises(ValueError, nditer, a, + ['external_loop', 'multi_index'], [['readonly']]) + assert_raises(ValueError, nditer, a, + ['external_loop', 'c_index'], [['readonly']]) + assert_raises(ValueError, nditer, a, + ['external_loop', 'f_index'], [['readonly']]) + # Must specify exactly one of readwrite/readonly/writeonly per operand + assert_raises(ValueError, nditer, a, [], [[]]) + assert_raises(ValueError, nditer, a, [], [['readonly', 'writeonly']]) + assert_raises(ValueError, nditer, a, [], [['readonly', 'readwrite']]) + assert_raises(ValueError, nditer, a, [], [['writeonly', 'readwrite']]) + assert_raises(ValueError, nditer, a, + [], [['readonly', 'writeonly', 'readwrite']]) + # Python scalars are always readonly + assert_raises(TypeError, nditer, 1.5, [], [['writeonly']]) + assert_raises(TypeError, nditer, 1.5, [], [['readwrite']]) + # Array scalars are always readonly + assert_raises(TypeError, nditer, np.int32(1), [], [['writeonly']]) + assert_raises(TypeError, nditer, np.int32(1), [], [['readwrite']]) + # Check readonly array + a.flags.writeable = False + assert_raises(ValueError, nditer, a, [], [['writeonly']]) + assert_raises(ValueError, nditer, a, [], [['readwrite']]) + a.flags.writeable = True + # Multi-indices available only with the multi_index flag + i = nditer(arange(6), [], [['readonly']]) + assert_raises(ValueError, lambda i: i.multi_index, i) + # Index available only with an index flag + assert_raises(ValueError, lambda i: i.index, i) + # GotoCoords and GotoIndex incompatible with buffering or no_inner + + def assign_multi_index(i): + i.multi_index = (0,) + + def assign_index(i): + i.index = 0 + + def assign_iterindex(i): + i.iterindex = 0 + + def assign_iterrange(i): + i.iterrange = (0, 1) + i = nditer(arange(6), ['external_loop']) + assert_raises(ValueError, assign_multi_index, i) + assert_raises(ValueError, assign_index, i) + assert_raises(ValueError, assign_iterindex, i) + assert_raises(ValueError, assign_iterrange, i) + i = nditer(arange(6), ['buffered']) + assert_raises(ValueError, assign_multi_index, i) + assert_raises(ValueError, assign_index, i) + assert_raises(ValueError, assign_iterrange, i) + # Can't iterate if size is zero + assert_raises(ValueError, nditer, np.array([])) + +def test_iter_slice(): + a, b, c = np.arange(3), np.arange(3), np.arange(3.) + i = nditer([a, b, c], [], ['readwrite']) + with i: + i[0:2] = (3, 3) + assert_equal(a, [3, 1, 2]) + assert_equal(b, [3, 1, 2]) + assert_equal(c, [0, 1, 2]) + i[1] = 12 + assert_equal(i[0:2], [3, 12]) + +def test_iter_assign_mapping(): + a = np.arange(24, dtype='f8').reshape(2, 3, 4).T + it = np.nditer(a, [], [['readwrite', 'updateifcopy']], + casting='same_kind', op_dtypes=[np.dtype('f4')]) + with it: + it.operands[0][...] = 3 + it.operands[0][...] = 14 + assert_equal(a, 14) + it = np.nditer(a, [], [['readwrite', 'updateifcopy']], + casting='same_kind', op_dtypes=[np.dtype('f4')]) + with it: + x = it.operands[0][-1:1] + x[...] = 14 + it.operands[0][...] = -1234 + assert_equal(a, -1234) + # check for no warnings on dealloc + x = None + it = None + +def test_iter_nbo_align_contig(): + # Check that byte order, alignment, and contig changes work + + # Byte order change by requesting a specific dtype + a = np.arange(6, dtype='f4') + au = a.byteswap() + au = au.view(au.dtype.newbyteorder()) + assert_(a.dtype.byteorder != au.dtype.byteorder) + i = nditer(au, [], [['readwrite', 'updateifcopy']], + casting='equiv', + op_dtypes=[np.dtype('f4')]) + with i: + # context manager triggers WRITEBACKIFCOPY on i at exit + assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) + assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) + assert_equal(i.operands[0], a) + i.operands[0][:] = 2 + assert_equal(au, [2] * 6) + del i # should not raise a warning + # Byte order change by requesting NBO + a = np.arange(6, dtype='f4') + au = a.byteswap() + au = au.view(au.dtype.newbyteorder()) + assert_(a.dtype.byteorder != au.dtype.byteorder) + with nditer(au, [], [['readwrite', 'updateifcopy', 'nbo']], + casting='equiv') as i: + # context manager triggers UPDATEIFCOPY on i at exit + assert_equal(i.dtypes[0].byteorder, a.dtype.byteorder) + assert_equal(i.operands[0].dtype.byteorder, a.dtype.byteorder) + assert_equal(i.operands[0], a) + i.operands[0][:] = 12345 + i.operands[0][:] = 2 + assert_equal(au, [2] * 6) + + # Unaligned input + a = np.zeros((6 * 4 + 1,), dtype='i1')[1:] + a.dtype = 'f4' + a[:] = np.arange(6, dtype='f4') + assert_(not a.flags.aligned) + # Without 'aligned', shouldn't copy + i = nditer(a, [], [['readonly']]) + assert_(not i.operands[0].flags.aligned) + assert_equal(i.operands[0], a) + # With 'aligned', should make a copy + with nditer(a, [], [['readwrite', 'updateifcopy', 'aligned']]) as i: + assert_(i.operands[0].flags.aligned) + # context manager triggers UPDATEIFCOPY on i at exit + assert_equal(i.operands[0], a) + i.operands[0][:] = 3 + assert_equal(a, [3] * 6) + + # Discontiguous input + a = arange(12) + # If it is contiguous, shouldn't copy + i = nditer(a[:6], [], [['readonly']]) + assert_(i.operands[0].flags.contiguous) + assert_equal(i.operands[0], a[:6]) + # If it isn't contiguous, should buffer + i = nditer(a[::2], ['buffered', 'external_loop'], + [['readonly', 'contig']], + buffersize=10) + assert_(i[0].flags.contiguous) + assert_equal(i[0], a[::2]) + +def test_iter_array_cast(): + # Check that arrays are cast as requested + + # No cast 'f4' -> 'f4' + a = np.arange(6, dtype='f4').reshape(2, 3) + i = nditer(a, [], [['readwrite']], op_dtypes=[np.dtype('f4')]) + with i: + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('f4')) + + # Byte-order cast ' '>f4' + a = np.arange(6, dtype='f4')]) as i: + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('>f4')) + + # Safe case 'f4' -> 'f8' + a = np.arange(24, dtype='f4').reshape(2, 3, 4).swapaxes(1, 2) + i = nditer(a, [], [['readonly', 'copy']], + casting='safe', + op_dtypes=[np.dtype('f8')]) + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('f8')) + # The memory layout of the temporary should match a (a is (48,4,16)) + # except negative strides get flipped to positive strides. + assert_equal(i.operands[0].strides, (96, 8, 32)) + a = a[::-1, :, ::-1] + i = nditer(a, [], [['readonly', 'copy']], + casting='safe', + op_dtypes=[np.dtype('f8')]) + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('f8')) + assert_equal(i.operands[0].strides, (96, 8, 32)) + + # Same-kind cast 'f8' -> 'f4' -> 'f8' + a = np.arange(24, dtype='f8').reshape(2, 3, 4).T + with nditer(a, [], + [['readwrite', 'updateifcopy']], + casting='same_kind', + op_dtypes=[np.dtype('f4')]) as i: + assert_equal(i.operands[0], a) + assert_equal(i.operands[0].dtype, np.dtype('f4')) + assert_equal(i.operands[0].strides, (4, 16, 48)) + # Check that WRITEBACKIFCOPY is activated at exit + i.operands[0][2, 1, 1] = -12.5 + assert_(a[2, 1, 1] != -12.5) + assert_equal(a[2, 1, 1], -12.5) + + a = np.arange(6, dtype='i4')[::-2] + with nditer(a, [], + [['writeonly', 'updateifcopy']], + casting='unsafe', + op_dtypes=[np.dtype('f4')]) as i: + assert_equal(i.operands[0].dtype, np.dtype('f4')) + # Even though the stride was negative in 'a', it + # becomes positive in the temporary + assert_equal(i.operands[0].strides, (4,)) + i.operands[0][:] = [1, 2, 3] + assert_equal(a, [1, 2, 3]) + +def test_iter_array_cast_errors(): + # Check that invalid casts are caught + + # Need to enable copying for casts to occur + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly']], op_dtypes=[np.dtype('f8')]) + # Also need to allow casting for casts to occur + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly', 'copy']], casting='no', + op_dtypes=[np.dtype('f8')]) + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly', 'copy']], casting='equiv', + op_dtypes=[np.dtype('f8')]) + assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], + [['writeonly', 'updateifcopy']], + casting='no', + op_dtypes=[np.dtype('f4')]) + assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], + [['writeonly', 'updateifcopy']], + casting='equiv', + op_dtypes=[np.dtype('f4')]) + # ' '>f4' should not work with casting='no' + assert_raises(TypeError, nditer, arange(2, dtype='f4')]) + # 'f4' -> 'f8' is a safe cast, but 'f8' -> 'f4' isn't + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readwrite', 'updateifcopy']], + casting='safe', + op_dtypes=[np.dtype('f8')]) + assert_raises(TypeError, nditer, arange(2, dtype='f8'), [], + [['readwrite', 'updateifcopy']], + casting='safe', + op_dtypes=[np.dtype('f4')]) + # 'f4' -> 'i4' is neither a safe nor a same-kind cast + assert_raises(TypeError, nditer, arange(2, dtype='f4'), [], + [['readonly', 'copy']], + casting='same_kind', + op_dtypes=[np.dtype('i4')]) + assert_raises(TypeError, nditer, arange(2, dtype='i4'), [], + [['writeonly', 'updateifcopy']], + casting='same_kind', + op_dtypes=[np.dtype('f4')]) + +def test_iter_scalar_cast(): + # Check that scalars are cast as requested + + # No cast 'f4' -> 'f4' + i = nditer(np.float32(2.5), [], [['readonly']], + op_dtypes=[np.dtype('f4')]) + assert_equal(i.dtypes[0], np.dtype('f4')) + assert_equal(i.value.dtype, np.dtype('f4')) + assert_equal(i.value, 2.5) + # Safe cast 'f4' -> 'f8' + i = nditer(np.float32(2.5), [], + [['readonly', 'copy']], + casting='safe', + op_dtypes=[np.dtype('f8')]) + assert_equal(i.dtypes[0], np.dtype('f8')) + assert_equal(i.value.dtype, np.dtype('f8')) + assert_equal(i.value, 2.5) + # Same-kind cast 'f8' -> 'f4' + i = nditer(np.float64(2.5), [], + [['readonly', 'copy']], + casting='same_kind', + op_dtypes=[np.dtype('f4')]) + assert_equal(i.dtypes[0], np.dtype('f4')) + assert_equal(i.value.dtype, np.dtype('f4')) + assert_equal(i.value, 2.5) + # Unsafe cast 'f8' -> 'i4' + i = nditer(np.float64(3.0), [], + [['readonly', 'copy']], + casting='unsafe', + op_dtypes=[np.dtype('i4')]) + assert_equal(i.dtypes[0], np.dtype('i4')) + assert_equal(i.value.dtype, np.dtype('i4')) + assert_equal(i.value, 3) + # Readonly scalars may be cast even without setting COPY or BUFFERED + i = nditer(3, [], [['readonly']], op_dtypes=[np.dtype('f8')]) + assert_equal(i[0].dtype, np.dtype('f8')) + assert_equal(i[0], 3.) + +def test_iter_scalar_cast_errors(): + # Check that invalid casts are caught + + # Need to allow copying/buffering for write casts of scalars to occur + assert_raises(TypeError, nditer, np.float32(2), [], + [['readwrite']], op_dtypes=[np.dtype('f8')]) + assert_raises(TypeError, nditer, 2.5, [], + [['readwrite']], op_dtypes=[np.dtype('f4')]) + # 'f8' -> 'f4' isn't a safe cast if the value would overflow + assert_raises(TypeError, nditer, np.float64(1e60), [], + [['readonly']], + casting='safe', + op_dtypes=[np.dtype('f4')]) + # 'f4' -> 'i4' is neither a safe nor a same-kind cast + assert_raises(TypeError, nditer, np.float32(2), [], + [['readonly']], + casting='same_kind', + op_dtypes=[np.dtype('i4')]) + +def test_iter_object_arrays_basic(): + # Check that object arrays work + + obj = {'a': 3, 'b': 'd'} + a = np.array([[1, 2, 3], None, obj, None], dtype='O') + if HAS_REFCOUNT: + rc = sys.getrefcount(obj) + + # Need to allow references for object arrays + assert_raises(TypeError, nditer, a) + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(obj), rc) + + i = nditer(a, ['refs_ok'], ['readonly']) + vals = [x_[()] for x_ in i] + assert_equal(np.array(vals, dtype='O'), a) + vals, i, x = [None] * 3 + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(obj), rc) + + i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], + ['readonly'], order='C') + assert_(i.iterationneedsapi) + vals = [x_[()] for x_ in i] + assert_equal(np.array(vals, dtype='O'), a.reshape(2, 2).ravel(order='F')) + vals, i, x = [None] * 3 + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(obj), rc) + + i = nditer(a.reshape(2, 2).T, ['refs_ok', 'buffered'], + ['readwrite'], order='C') + with i: + for x in i: + x[...] = None + vals, i, x = [None] * 3 + if HAS_REFCOUNT: + assert_(sys.getrefcount(obj) == rc - 1) + assert_equal(a, np.array([None] * 4, dtype='O')) + +def test_iter_object_arrays_conversions(): + # Conversions to/from objects + a = np.arange(6, dtype='O') + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], + casting='unsafe', op_dtypes='i4') + with i: + for x in i: + x[...] += 1 + assert_equal(a, np.arange(6) + 1) + + a = np.arange(6, dtype='i4') + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], + casting='unsafe', op_dtypes='O') + with i: + for x in i: + x[...] += 1 + assert_equal(a, np.arange(6) + 1) + + # Non-contiguous object array + a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'O')]) + a = a['a'] + a[:] = np.arange(6) + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], + casting='unsafe', op_dtypes='i4') + with i: + for x in i: + x[...] += 1 + assert_equal(a, np.arange(6) + 1) + + # Non-contiguous value array + a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'i4')]) + a = a['a'] + a[:] = np.arange(6) + 98172488 + i = nditer(a, ['refs_ok', 'buffered'], ['readwrite'], + casting='unsafe', op_dtypes='O') + with i: + ob = i[0][()] + if HAS_REFCOUNT: + rc = sys.getrefcount(ob) + for x in i: + x[...] += 1 + if HAS_REFCOUNT: + newrc = sys.getrefcount(ob) + assert_(newrc == rc - 1) + assert_equal(a, np.arange(6) + 98172489) + +def test_iter_common_dtype(): + # Check that the iterator finds a common data type correctly + # (some checks are somewhat duplicate after adopting NEP 50) + + i = nditer([array([3], dtype='f4'), array([0], dtype='f8')], + ['common_dtype'], + [['readonly', 'copy']] * 2, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('f8')) + assert_equal(i.dtypes[1], np.dtype('f8')) + i = nditer([array([3], dtype='i4'), array([0], dtype='f4')], + ['common_dtype'], + [['readonly', 'copy']] * 2, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('f8')) + assert_equal(i.dtypes[1], np.dtype('f8')) + i = nditer([array([3], dtype='f4'), array(0, dtype='f8')], + ['common_dtype'], + [['readonly', 'copy']] * 2, + casting='same_kind') + assert_equal(i.dtypes[0], np.dtype('f8')) + assert_equal(i.dtypes[1], np.dtype('f8')) + i = nditer([array([3], dtype='u4'), array(0, dtype='i4')], + ['common_dtype'], + [['readonly', 'copy']] * 2, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('i8')) + assert_equal(i.dtypes[1], np.dtype('i8')) + i = nditer([array([3], dtype='u4'), array(-12, dtype='i4')], + ['common_dtype'], + [['readonly', 'copy']] * 2, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('i8')) + assert_equal(i.dtypes[1], np.dtype('i8')) + i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), + array([2j], dtype='c8'), array([9], dtype='f8')], + ['common_dtype'], + [['readonly', 'copy']] * 4, + casting='safe') + assert_equal(i.dtypes[0], np.dtype('c16')) + assert_equal(i.dtypes[1], np.dtype('c16')) + assert_equal(i.dtypes[2], np.dtype('c16')) + assert_equal(i.dtypes[3], np.dtype('c16')) + assert_equal(i.value, (3, -12, 2j, 9)) + + # When allocating outputs, other outputs aren't factored in + i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], [], + [['readonly', 'copy'], + ['writeonly', 'allocate'], + ['writeonly']], + casting='safe') + assert_equal(i.dtypes[0], np.dtype('i4')) + assert_equal(i.dtypes[1], np.dtype('i4')) + assert_equal(i.dtypes[2], np.dtype('c16')) + # But, if common data types are requested, they are + i = nditer([array([3], dtype='i4'), None, array([2j], dtype='c16')], + ['common_dtype'], + [['readonly', 'copy'], + ['writeonly', 'allocate'], + ['writeonly']], + casting='safe') + assert_equal(i.dtypes[0], np.dtype('c16')) + assert_equal(i.dtypes[1], np.dtype('c16')) + assert_equal(i.dtypes[2], np.dtype('c16')) + +def test_iter_copy_if_overlap(): + # Ensure the iterator makes copies on read/write overlap, if requested + + # Copy not needed, 1 op + for flag in ['readonly', 'writeonly', 'readwrite']: + a = arange(10) + i = nditer([a], ['copy_if_overlap'], [[flag]]) + with i: + assert_(i.operands[0] is a) + + # Copy needed, 2 ops, read-write overlap + x = arange(10) + a = x[1:] + b = x[:-1] + with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i: + assert_(not np.shares_memory(*i.operands)) + + # Copy not needed with elementwise, 2 ops, exactly same arrays + x = arange(10) + a = x + b = x + i = nditer([a, b], ['copy_if_overlap'], [['readonly', 'overlap_assume_elementwise'], + ['readwrite', 'overlap_assume_elementwise']]) + with i: + assert_(i.operands[0] is a and i.operands[1] is b) + with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['readwrite']]) as i: + assert_(i.operands[0] is a and not np.shares_memory(i.operands[1], b)) + + # Copy not needed, 2 ops, no overlap + x = arange(10) + a = x[::2] + b = x[1::2] + i = nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) + assert_(i.operands[0] is a and i.operands[1] is b) + + # Copy needed, 2 ops, read-write overlap + x = arange(4, dtype=np.int8) + a = x[3:] + b = x.view(np.int32)[:1] + with nditer([a, b], ['copy_if_overlap'], [['readonly'], ['writeonly']]) as i: + assert_(not np.shares_memory(*i.operands)) + + # Copy needed, 3 ops, read-write overlap + for flag in ['writeonly', 'readwrite']: + x = np.ones([10, 10]) + a = x + b = x.T + c = x + with nditer([a, b, c], ['copy_if_overlap'], + [['readonly'], ['readonly'], [flag]]) as i: + a2, b2, c2 = i.operands + assert_(not np.shares_memory(a2, c2)) + assert_(not np.shares_memory(b2, c2)) + + # Copy not needed, 3 ops, read-only overlap + x = np.ones([10, 10]) + a = x + b = x.T + c = x + i = nditer([a, b, c], ['copy_if_overlap'], + [['readonly'], ['readonly'], ['readonly']]) + a2, b2, c2 = i.operands + assert_(a is a2) + assert_(b is b2) + assert_(c is c2) + + # Copy not needed, 3 ops, read-only overlap + x = np.ones([10, 10]) + a = x + b = np.ones([10, 10]) + c = x.T + i = nditer([a, b, c], ['copy_if_overlap'], + [['readonly'], ['writeonly'], ['readonly']]) + a2, b2, c2 = i.operands + assert_(a is a2) + assert_(b is b2) + assert_(c is c2) + + # Copy not needed, 3 ops, write-only overlap + x = np.arange(7) + a = x[:3] + b = x[3:6] + c = x[4:7] + i = nditer([a, b, c], ['copy_if_overlap'], + [['readonly'], ['writeonly'], ['writeonly']]) + a2, b2, c2 = i.operands + assert_(a is a2) + assert_(b is b2) + assert_(c is c2) + +def test_iter_op_axes(): + # Check that custom axes work + + # Reverse the axes + a = arange(6).reshape(2, 3) + i = nditer([a, a.T], [], [['readonly']] * 2, op_axes=[[0, 1], [1, 0]]) + assert_(all([x == y for (x, y) in i])) + a = arange(24).reshape(2, 3, 4) + i = nditer([a.T, a], [], [['readonly']] * 2, op_axes=[[2, 1, 0], None]) + assert_(all([x == y for (x, y) in i])) + + # Broadcast 1D to any dimension + a = arange(1, 31).reshape(2, 3, 5) + b = arange(1, 3) + i = nditer([a, b], [], [['readonly']] * 2, op_axes=[None, [0, -1, -1]]) + assert_equal([x * y for (x, y) in i], (a * b.reshape(2, 1, 1)).ravel()) + b = arange(1, 4) + i = nditer([a, b], [], [['readonly']] * 2, op_axes=[None, [-1, 0, -1]]) + assert_equal([x * y for (x, y) in i], (a * b.reshape(1, 3, 1)).ravel()) + b = arange(1, 6) + i = nditer([a, b], [], [['readonly']] * 2, + op_axes=[None, [np.newaxis, np.newaxis, 0]]) + assert_equal([x * y for (x, y) in i], (a * b.reshape(1, 1, 5)).ravel()) + + # Inner product-style broadcasting + a = arange(24).reshape(2, 3, 4) + b = arange(40).reshape(5, 2, 4) + i = nditer([a, b], ['multi_index'], [['readonly']] * 2, + op_axes=[[0, 1, -1, -1], [-1, -1, 0, 1]]) + assert_equal(i.shape, (2, 3, 5, 2)) + + # Matrix product-style broadcasting + a = arange(12).reshape(3, 4) + b = arange(20).reshape(4, 5) + i = nditer([a, b], ['multi_index'], [['readonly']] * 2, + op_axes=[[0, -1], [-1, 1]]) + assert_equal(i.shape, (3, 5)) + +def test_iter_op_axes_errors(): + # Check that custom axes throws errors for bad inputs + + # Wrong number of items in op_axes + a = arange(6).reshape(2, 3) + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, + op_axes=[[0], [1], [0]]) + # Out of bounds items in op_axes + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, + op_axes=[[2, 1], [0, 1]]) + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, + op_axes=[[0, 1], [2, -1]]) + # Duplicate items in op_axes + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, + op_axes=[[0, 0], [0, 1]]) + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, + op_axes=[[0, 1], [1, 1]]) + + # Different sized arrays in op_axes + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, + op_axes=[[0, 1], [0, 1, 0]]) + + # Non-broadcastable dimensions in the result + assert_raises(ValueError, nditer, [a, a], [], [['readonly']] * 2, + op_axes=[[0, 1], [1, 0]]) + +def test_iter_copy(): + # Check that copying the iterator works correctly + a = arange(24).reshape(2, 3, 4) + + # Simple iterator + i = nditer(a) + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + i.iterindex = 3 + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + # Buffered iterator + i = nditer(a, ['buffered', 'ranged'], order='F', buffersize=3) + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + i.iterindex = 3 + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + i.iterrange = (3, 9) + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + i.iterrange = (2, 18) + next(i) + next(i) + j = i.copy() + assert_equal([x[()] for x in i], [x[()] for x in j]) + + # Casting iterator + with nditer(a, ['buffered'], order='F', casting='unsafe', + op_dtypes='f8', buffersize=5) as i: + j = i.copy() + assert_equal([x[()] for x in j], a.ravel(order='F')) + + a = arange(24, dtype=' unstructured (any to object), and many other + # casts, which cause this to require all steps in the casting machinery + # one level down as well as the iterator copy (which uses NpyAuxData clone) + in_dtype = np.dtype([("a", np.dtype("i,")), + ("b", np.dtype(">i,d,S17,>d,3f,O,i1"))]) + out_dtype = np.dtype([("a", np.dtype("O")), + ("b", np.dtype(">i,>i,S17,>d,>U3,3d,i1,O"))]) + arr = np.ones(1000, dtype=in_dtype) + + it = np.nditer((arr,), ["buffered", "external_loop", "refs_ok"], + op_dtypes=[out_dtype], casting="unsafe") + it_copy = it.copy() + + res1 = next(it) + del it + res2 = next(it_copy) + del it_copy + + expected = arr["a"].astype(out_dtype["a"]) + assert_array_equal(res1["a"], expected) + assert_array_equal(res2["a"], expected) + + for field in in_dtype["b"].names: + # Note that the .base avoids the subarray field + expected = arr["b"][field].astype(out_dtype["b"][field].base) + assert_array_equal(res1["b"][field], expected) + assert_array_equal(res2["b"][field], expected) + + +def test_iter_copy_casts_structured2(): + # Similar to the above, this is a fairly arcane test to cover internals + in_dtype = np.dtype([("a", np.dtype("O,O")), + ("b", np.dtype("5O,3O,(1,)O,(1,)i,(1,)O"))]) + out_dtype = np.dtype([("a", np.dtype("O")), + ("b", np.dtype("O,3i,4O,4O,4i"))]) + + arr = np.ones(1, dtype=in_dtype) + it = np.nditer((arr,), ["buffered", "external_loop", "refs_ok"], + op_dtypes=[out_dtype], casting="unsafe") + it_copy = it.copy() + + res1 = next(it) + del it + res2 = next(it_copy) + del it_copy + + # Array of two structured scalars: + for res in res1, res2: + # Cast to tuple by getitem, which may be weird and changeable?: + assert isinstance(res["a"][0], tuple) + assert res["a"][0] == (1, 1) + + for res in res1, res2: + assert_array_equal(res["b"]["f0"][0], np.ones(5, dtype=object)) + assert_array_equal(res["b"]["f1"], np.ones((1, 3), dtype="i")) + assert res["b"]["f2"].shape == (1, 4) + assert_array_equal(res["b"]["f2"][0], np.ones(4, dtype=object)) + assert_array_equal(res["b"]["f3"][0], np.ones(4, dtype=object)) + assert_array_equal(res["b"]["f3"][0], np.ones(4, dtype="i")) + + +def test_iter_allocate_output_simple(): + # Check that the iterator will properly allocate outputs + + # Simple case + a = arange(6) + i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')]) + assert_equal(i.operands[1].shape, a.shape) + assert_equal(i.operands[1].dtype, np.dtype('f4')) + +def test_iter_allocate_output_buffered_readwrite(): + # Allocated output with buffering + delay_bufalloc + + a = arange(6) + i = nditer([a, None], ['buffered', 'delay_bufalloc'], + [['readonly'], ['allocate', 'readwrite']]) + with i: + i.operands[1][:] = 1 + i.reset() + for x in i: + x[1][...] += x[0][...] + assert_equal(i.operands[1], a + 1) + +def test_iter_allocate_output_itorder(): + # The allocated output should match the iteration order + + # C-order input, best iteration order + a = arange(6, dtype='i4').reshape(2, 3) + i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')]) + assert_equal(i.operands[1].shape, a.shape) + assert_equal(i.operands[1].strides, a.strides) + assert_equal(i.operands[1].dtype, np.dtype('f4')) + # F-order input, best iteration order + a = arange(24, dtype='i4').reshape(2, 3, 4).T + i = nditer([a, None], [], [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')]) + assert_equal(i.operands[1].shape, a.shape) + assert_equal(i.operands[1].strides, a.strides) + assert_equal(i.operands[1].dtype, np.dtype('f4')) + # Non-contiguous input, C iteration order + a = arange(24, dtype='i4').reshape(2, 3, 4).swapaxes(0, 1) + i = nditer([a, None], [], + [['readonly'], ['writeonly', 'allocate']], + order='C', + op_dtypes=[None, np.dtype('f4')]) + assert_equal(i.operands[1].shape, a.shape) + assert_equal(i.operands[1].strides, (32, 16, 4)) + assert_equal(i.operands[1].dtype, np.dtype('f4')) + +def test_iter_allocate_output_opaxes(): + # Specifying op_axes should work + + a = arange(24, dtype='i4').reshape(2, 3, 4) + i = nditer([None, a], [], [['writeonly', 'allocate'], ['readonly']], + op_dtypes=[np.dtype('u4'), None], + op_axes=[[1, 2, 0], None]) + assert_equal(i.operands[0].shape, (4, 2, 3)) + assert_equal(i.operands[0].strides, (4, 48, 16)) + assert_equal(i.operands[0].dtype, np.dtype('u4')) + +def test_iter_allocate_output_types_promotion(): + # Check type promotion of automatic outputs (this was more interesting + # before NEP 50...) + + i = nditer([array([3], dtype='f4'), array([0], dtype='f8'), None], [], + [['readonly']] * 2 + [['writeonly', 'allocate']]) + assert_equal(i.dtypes[2], np.dtype('f8')) + i = nditer([array([3], dtype='i4'), array([0], dtype='f4'), None], [], + [['readonly']] * 2 + [['writeonly', 'allocate']]) + assert_equal(i.dtypes[2], np.dtype('f8')) + i = nditer([array([3], dtype='f4'), array(0, dtype='f8'), None], [], + [['readonly']] * 2 + [['writeonly', 'allocate']]) + assert_equal(i.dtypes[2], np.dtype('f8')) + i = nditer([array([3], dtype='u4'), array(0, dtype='i4'), None], [], + [['readonly']] * 2 + [['writeonly', 'allocate']]) + assert_equal(i.dtypes[2], np.dtype('i8')) + i = nditer([array([3], dtype='u4'), array(-12, dtype='i4'), None], [], + [['readonly']] * 2 + [['writeonly', 'allocate']]) + assert_equal(i.dtypes[2], np.dtype('i8')) + +def test_iter_allocate_output_types_byte_order(): + # Verify the rules for byte order changes + + # When there's just one input, the output type exactly matches + a = array([3], dtype='u4') + a = a.view(a.dtype.newbyteorder()) + i = nditer([a, None], [], + [['readonly'], ['writeonly', 'allocate']]) + assert_equal(i.dtypes[0], i.dtypes[1]) + # With two or more inputs, the output type is in native byte order + i = nditer([a, a, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) + assert_(i.dtypes[0] != i.dtypes[2]) + assert_equal(i.dtypes[0].newbyteorder('='), i.dtypes[2]) + +def test_iter_allocate_output_types_scalar(): + # If the inputs are all scalars, the output should be a scalar + + i = nditer([None, 1, 2.3, np.float32(12), np.complex128(3)], [], + [['writeonly', 'allocate']] + [['readonly']] * 4) + assert_equal(i.operands[0].dtype, np.dtype('complex128')) + assert_equal(i.operands[0].ndim, 0) + +def test_iter_allocate_output_subtype(): + # Make sure that the subtype with priority wins + class MyNDArray(np.ndarray): + __array_priority__ = 15 + + # subclass vs ndarray + a = np.array([[1, 2], [3, 4]]).view(MyNDArray) + b = np.arange(4).reshape(2, 2).T + i = nditer([a, b, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) + assert_equal(type(a), type(i.operands[2])) + assert_(type(b) is not type(i.operands[2])) + assert_equal(i.operands[2].shape, (2, 2)) + + # If subtypes are disabled, we should get back an ndarray. + i = nditer([a, b, None], [], + [['readonly'], ['readonly'], + ['writeonly', 'allocate', 'no_subtype']]) + assert_equal(type(b), type(i.operands[2])) + assert_(type(a) is not type(i.operands[2])) + assert_equal(i.operands[2].shape, (2, 2)) + +def test_iter_allocate_output_errors(): + # Check that the iterator will throw errors for bad output allocations + + # Need an input if no output data type is specified + a = arange(6) + assert_raises(TypeError, nditer, [a, None], [], + [['writeonly'], ['writeonly', 'allocate']]) + # Allocated output should be flagged for writing + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['allocate', 'readonly']]) + # Allocated output can't have buffering without delayed bufalloc + assert_raises(ValueError, nditer, [a, None], ['buffered'], + ['allocate', 'readwrite']) + # Must specify dtype if there are no inputs (cannot promote existing ones; + # maybe this should use the 'f4' here, but it does not historically.) + assert_raises(TypeError, nditer, [None, None], [], + [['writeonly', 'allocate'], + ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')]) + # If using op_axes, must specify all the axes + a = arange(24, dtype='i4').reshape(2, 3, 4) + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')], + op_axes=[None, [0, np.newaxis, 1]]) + # If using op_axes, the axes must be within bounds + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')], + op_axes=[None, [0, 3, 1]]) + # If using op_axes, there can't be duplicates + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['writeonly', 'allocate']], + op_dtypes=[None, np.dtype('f4')], + op_axes=[None, [0, 2, 1, 0]]) + # Not all axes may be specified if a reduction. If there is a hole + # in op_axes, this is an error. + a = arange(24, dtype='i4').reshape(2, 3, 4) + assert_raises(ValueError, nditer, [a, None], ["reduce_ok"], + [['readonly'], ['readwrite', 'allocate']], + op_dtypes=[None, np.dtype('f4')], + op_axes=[None, [0, np.newaxis, 2]]) + +def test_all_allocated(): + # When no output and no shape is given, `()` is used as shape. + i = np.nditer([None], op_dtypes=["int64"]) + assert i.operands[0].shape == () + assert i.dtypes == (np.dtype("int64"),) + + i = np.nditer([None], op_dtypes=["int64"], itershape=(2, 3, 4)) + assert i.operands[0].shape == (2, 3, 4) + +def test_iter_remove_axis(): + a = arange(24).reshape(2, 3, 4) + + i = nditer(a, ['multi_index']) + i.remove_axis(1) + assert_equal(list(i), a[:, 0, :].ravel()) + + a = a[::-1, :, :] + i = nditer(a, ['multi_index']) + i.remove_axis(0) + assert_equal(list(i), a[0, :, :].ravel()) + +def test_iter_remove_multi_index_inner_loop(): + # Check that removing multi-index support works + + a = arange(24).reshape(2, 3, 4) + + i = nditer(a, ['multi_index']) + assert_equal(i.ndim, 3) + assert_equal(i.shape, (2, 3, 4)) + assert_equal(i.itviews[0].shape, (2, 3, 4)) + + # Removing the multi-index tracking causes all dimensions to coalesce + before = list(i) + i.remove_multi_index() + after = list(i) + + assert_equal(before, after) + assert_equal(i.ndim, 1) + assert_raises(ValueError, lambda i: i.shape, i) + assert_equal(i.itviews[0].shape, (24,)) + + # Removing the inner loop means there's just one iteration + i.reset() + assert_equal(i.itersize, 24) + assert_equal(i[0].shape, ()) + i.enable_external_loop() + assert_equal(i.itersize, 24) + assert_equal(i[0].shape, (24,)) + assert_equal(i.value, arange(24)) + +def test_iter_iterindex(): + # Make sure iterindex works + + buffersize = 5 + a = arange(24).reshape(4, 3, 2) + for flags in ([], ['buffered']): + i = nditer(a, flags, buffersize=buffersize) + assert_equal(iter_iterindices(i), list(range(24))) + i.iterindex = 2 + assert_equal(iter_iterindices(i), list(range(2, 24))) + + i = nditer(a, flags, order='F', buffersize=buffersize) + assert_equal(iter_iterindices(i), list(range(24))) + i.iterindex = 5 + assert_equal(iter_iterindices(i), list(range(5, 24))) + + i = nditer(a[::-1], flags, order='F', buffersize=buffersize) + assert_equal(iter_iterindices(i), list(range(24))) + i.iterindex = 9 + assert_equal(iter_iterindices(i), list(range(9, 24))) + + i = nditer(a[::-1, ::-1], flags, order='C', buffersize=buffersize) + assert_equal(iter_iterindices(i), list(range(24))) + i.iterindex = 13 + assert_equal(iter_iterindices(i), list(range(13, 24))) + + i = nditer(a[::1, ::-1], flags, buffersize=buffersize) + assert_equal(iter_iterindices(i), list(range(24))) + i.iterindex = 23 + assert_equal(iter_iterindices(i), list(range(23, 24))) + i.reset() + i.iterindex = 2 + assert_equal(iter_iterindices(i), list(range(2, 24))) + +def test_iter_iterrange(): + # Make sure getting and resetting the iterrange works + + buffersize = 5 + a = arange(24, dtype='i4').reshape(4, 3, 2) + a_fort = a.ravel(order='F') + + i = nditer(a, ['ranged'], ['readonly'], order='F', + buffersize=buffersize) + assert_equal(i.iterrange, (0, 24)) + assert_equal([x[()] for x in i], a_fort) + for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]: + i.iterrange = r + assert_equal(i.iterrange, r) + assert_equal([x[()] for x in i], a_fort[r[0]:r[1]]) + + i = nditer(a, ['ranged', 'buffered'], ['readonly'], order='F', + op_dtypes='f8', buffersize=buffersize) + assert_equal(i.iterrange, (0, 24)) + assert_equal([x[()] for x in i], a_fort) + for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]: + i.iterrange = r + assert_equal(i.iterrange, r) + assert_equal([x[()] for x in i], a_fort[r[0]:r[1]]) + + def get_array(i): + val = np.array([], dtype='f8') + for x in i: + val = np.concatenate((val, x)) + return val + + i = nditer(a, ['ranged', 'buffered', 'external_loop'], + ['readonly'], order='F', + op_dtypes='f8', buffersize=buffersize) + assert_equal(i.iterrange, (0, 24)) + assert_equal(get_array(i), a_fort) + for r in [(0, 24), (1, 2), (3, 24), (5, 5), (0, 20), (23, 24)]: + i.iterrange = r + assert_equal(i.iterrange, r) + assert_equal(get_array(i), a_fort[r[0]:r[1]]) + +def test_iter_buffering(): + # Test buffering with several buffer sizes and types + arrays = [] + # F-order swapped array + _tmp = np.arange(24, dtype='c16').reshape(2, 3, 4).T + _tmp = _tmp.view(_tmp.dtype.newbyteorder()).byteswap() + arrays.append(_tmp) + # Contiguous 1-dimensional array + arrays.append(np.arange(10, dtype='f4')) + # Unaligned array + a = np.zeros((4 * 16 + 1,), dtype='i1')[1:] + a.dtype = 'i4' + a[:] = np.arange(16, dtype='i4') + arrays.append(a) + # 4-D F-order array + arrays.append(np.arange(120, dtype='i4').reshape(5, 3, 2, 4).T) + for a in arrays: + for buffersize in (1, 2, 3, 5, 8, 11, 16, 1024): + vals = [] + i = nditer(a, ['buffered', 'external_loop'], + [['readonly', 'nbo', 'aligned']], + order='C', + casting='equiv', + buffersize=buffersize) + while not i.finished: + assert_(i[0].size <= buffersize) + vals.append(i[0].copy()) + i.iternext() + assert_equal(np.concatenate(vals), a.ravel(order='C')) + +def test_iter_write_buffering(): + # Test that buffering of writes is working + + # F-order swapped array + a = np.arange(24).reshape(2, 3, 4).T + a = a.view(a.dtype.newbyteorder()).byteswap() + i = nditer(a, ['buffered'], + [['readwrite', 'nbo', 'aligned']], + casting='equiv', + order='C', + buffersize=16) + x = 0 + with i: + while not i.finished: + i[0] = x + x += 1 + i.iternext() + assert_equal(a.ravel(order='C'), np.arange(24)) + +def test_iter_buffering_delayed_alloc(): + # Test that delaying buffer allocation works + + a = np.arange(6) + b = np.arange(1, dtype='f4') + i = nditer([a, b], ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok'], + ['readwrite'], + casting='unsafe', + op_dtypes='f4') + assert_(i.has_delayed_bufalloc) + assert_raises(ValueError, lambda i: i.multi_index, i) + assert_raises(ValueError, lambda i: i[0], i) + assert_raises(ValueError, lambda i: i[0:2], i) + + def assign_iter(i): + i[0] = 0 + assert_raises(ValueError, assign_iter, i) + + i.reset() + assert_(not i.has_delayed_bufalloc) + assert_equal(i.multi_index, (0,)) + with i: + assert_equal(i[0], 0) + i[1] = 1 + assert_equal(i[0:2], [0, 1]) + assert_equal([[x[0][()], x[1][()]] for x in i], list(zip(range(6), [1] * 6))) + +def test_iter_buffered_cast_simple(): + # Test that buffering can handle a simple cast + + a = np.arange(10, dtype='f4') + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('f8')], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + + assert_equal(a, 2 * np.arange(10, dtype='f4')) + +def test_iter_buffered_cast_byteswapped(): + # Test that buffering can handle a cast which requires swap->cast->swap + + a = np.arange(10, dtype='f4') + a = a.view(a.dtype.newbyteorder()).byteswap() + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('f8').newbyteorder()], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + + assert_equal(a, 2 * np.arange(10, dtype='f4')) + + with suppress_warnings() as sup: + sup.filter(np.exceptions.ComplexWarning) + + a = np.arange(10, dtype='f8') + a = a.view(a.dtype.newbyteorder()).byteswap() + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='unsafe', + op_dtypes=[np.dtype('c8').newbyteorder()], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + + assert_equal(a, 2 * np.arange(10, dtype='f8')) + +def test_iter_buffered_cast_byteswapped_complex(): + # Test that buffering can handle a cast which requires swap->cast->copy + + a = np.arange(10, dtype='c8') + a = a.view(a.dtype.newbyteorder()).byteswap() + a += 2j + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('c16')], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + assert_equal(a, 2 * np.arange(10, dtype='c8') + 4j) + + a = np.arange(10, dtype='c8') + a += 2j + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('c16').newbyteorder()], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + assert_equal(a, 2 * np.arange(10, dtype='c8') + 4j) + + a = np.arange(10, dtype=np.clongdouble) + a = a.view(a.dtype.newbyteorder()).byteswap() + a += 2j + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('c16')], + buffersize=3) + with i: + for v in i: + v[...] *= 2 + assert_equal(a, 2 * np.arange(10, dtype=np.clongdouble) + 4j) + + a = np.arange(10, dtype=np.longdouble) + a = a.view(a.dtype.newbyteorder()).byteswap() + i = nditer(a, ['buffered', 'external_loop'], + [['readwrite', 'nbo', 'aligned']], + casting='same_kind', + op_dtypes=[np.dtype('f4')], + buffersize=7) + with i: + for v in i: + v[...] *= 2 + assert_equal(a, 2 * np.arange(10, dtype=np.longdouble)) + +def test_iter_buffered_cast_structured_type(): + # Tests buffering of structured types + + # simple -> struct type (duplicates the value) + sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] + a = np.arange(3, dtype='f4') + 0.5 + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt) + vals = [np.array(x) for x in i] + assert_equal(vals[0]['a'], 0.5) + assert_equal(vals[0]['b'], 0) + assert_equal(vals[0]['c'], [[(0.5)] * 3] * 2) + assert_equal(vals[0]['d'], 0.5) + assert_equal(vals[1]['a'], 1.5) + assert_equal(vals[1]['b'], 1) + assert_equal(vals[1]['c'], [[(1.5)] * 3] * 2) + assert_equal(vals[1]['d'], 1.5) + assert_equal(vals[0].dtype, np.dtype(sdt)) + + # object -> struct type + sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2, 3)), ('d', 'O')] + a = np.zeros((3,), dtype='O') + a[0] = (0.5, 0.5, [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]], 0.5) + a[1] = (1.5, 1.5, [[1.5, 1.5, 1.5], [1.5, 1.5, 1.5]], 1.5) + a[2] = (2.5, 2.5, [[2.5, 2.5, 2.5], [2.5, 2.5, 2.5]], 2.5) + if HAS_REFCOUNT: + rc = sys.getrefcount(a[0]) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt) + vals = [x.copy() for x in i] + assert_equal(vals[0]['a'], 0.5) + assert_equal(vals[0]['b'], 0) + assert_equal(vals[0]['c'], [[(0.5)] * 3] * 2) + assert_equal(vals[0]['d'], 0.5) + assert_equal(vals[1]['a'], 1.5) + assert_equal(vals[1]['b'], 1) + assert_equal(vals[1]['c'], [[(1.5)] * 3] * 2) + assert_equal(vals[1]['d'], 1.5) + assert_equal(vals[0].dtype, np.dtype(sdt)) + vals, i, x = [None] * 3 + if HAS_REFCOUNT: + assert_equal(sys.getrefcount(a[0]), rc) + + # single-field struct type -> simple + sdt = [('a', 'f4')] + a = np.array([(5.5,), (8,)], dtype=sdt) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes='i4') + assert_equal([x_[()] for x_ in i], [5, 8]) + + # make sure multi-field struct type -> simple doesn't work + sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] + a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt) + assert_raises(TypeError, lambda: ( + nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes='i4'))) + + # struct type -> struct type (field-wise copy) + sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] + sdt2 = [('d', 'u2'), ('a', 'O'), ('b', 'f8')] + a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + assert_equal([np.array(x_) for x_ in i], + [np.array((1, 2, 3), dtype=sdt2), + np.array((4, 5, 6), dtype=sdt2)]) + + +def test_iter_buffered_cast_structured_type_failure_with_cleanup(): + # make sure struct type -> struct type with different + # number of fields fails + sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')] + sdt2 = [('b', 'O'), ('a', 'f8')] + a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1) + + for intent in ["readwrite", "readonly", "writeonly"]: + # This test was initially designed to test an error at a different + # place, but will now raise earlier to to the cast not being possible: + # `assert np.can_cast(a.dtype, sdt2, casting="unsafe")` fails. + # Without a faulty DType, there is probably no reliable + # way to get the initial tested behaviour. + simple_arr = np.array([1, 2], dtype="i,i") # requires clean up + with pytest.raises(TypeError): + nditer((simple_arr, a), ['buffered', 'refs_ok'], [intent, intent], + casting='unsafe', op_dtypes=["f,f", sdt2]) + + +def test_buffered_cast_error_paths(): + with pytest.raises(ValueError): + # The input is cast into an `S3` buffer + np.nditer((np.array("a", dtype="S1"),), op_dtypes=["i"], + casting="unsafe", flags=["buffered"]) + + # The `M8[ns]` is cast into the `S3` output + it = np.nditer((np.array(1, dtype="i"),), op_dtypes=["S1"], + op_flags=["writeonly"], casting="unsafe", flags=["buffered"]) + with pytest.raises(ValueError): + with it: + buf = next(it) + buf[...] = "a" # cannot be converted to int. + +@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") +@pytest.mark.skipif(not HAS_REFCOUNT, reason="PyPy seems to not hit this.") +def test_buffered_cast_error_paths_unraisable(): + # The following gives an unraisable error. Pytest sometimes captures that + # (depending python and/or pytest version). So with Python>=3.8 this can + # probably be cleaned out in the future to check for + # pytest.PytestUnraisableExceptionWarning: + code = textwrap.dedent(""" + import numpy as np + + it = np.nditer((np.array(1, dtype="i"),), op_dtypes=["S1"], + op_flags=["writeonly"], casting="unsafe", flags=["buffered"]) + buf = next(it) + buf[...] = "a" + del buf, it # Flushing only happens during deallocate right now. + """) + res = subprocess.check_output([sys.executable, "-c", code], + stderr=subprocess.STDOUT, text=True) + assert "ValueError" in res + + +def test_iter_buffered_cast_subarray(): + # Tests buffering of subarrays + + # one element -> many (copies it to all) + sdt1 = [('a', 'f4')] + sdt2 = [('a', 'f8', (3, 2, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + for x, count in zip(i, list(range(6))): + assert_(np.all(x['a'] == count)) + + # one element -> many -> back (copies it to all) + sdt1 = [('a', 'O', (1, 1))] + sdt2 = [('a', 'O', (3, 2, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'][:, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], + casting='unsafe', + op_dtypes=sdt2) + with i: + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_(np.all(x['a'] == count)) + x['a'][0] += 2 + count += 1 + assert_equal(a['a'], np.arange(6).reshape(6, 1, 1) + 2) + + # many -> one element -> back (copies just element 0) + sdt1 = [('a', 'O', (3, 2, 2))] + sdt2 = [('a', 'O', (1,))] + a = np.zeros((6,), dtype=sdt1) + a['a'][:, 0, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'], + casting='unsafe', + op_dtypes=sdt2) + with i: + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], count) + x['a'] += 2 + count += 1 + assert_equal(a['a'], np.arange(6).reshape(6, 1, 1, 1) * np.ones((1, 3, 2, 2)) + 2) + + # many -> one element -> back (copies just element 0) + sdt1 = [('a', 'f8', (3, 2, 2))] + sdt2 = [('a', 'O', (1,))] + a = np.zeros((6,), dtype=sdt1) + a['a'][:, 0, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], count) + count += 1 + + # many -> one element (copies just element 0) + sdt1 = [('a', 'O', (3, 2, 2))] + sdt2 = [('a', 'f4', (1,))] + a = np.zeros((6,), dtype=sdt1) + a['a'][:, 0, 0, 0] = np.arange(6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], count) + count += 1 + + # many -> matching shape (straightforward copy) + sdt1 = [('a', 'O', (3, 2, 2))] + sdt2 = [('a', 'f4', (3, 2, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6 * 3 * 2 * 2).reshape(6, 3, 2, 2) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], a[count]['a']) + count += 1 + + # vector -> smaller vector (truncates) + sdt1 = [('a', 'f8', (6,))] + sdt2 = [('a', 'f4', (2,))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6 * 6).reshape(6, 6) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'], a[count]['a'][:2]) + count += 1 + + # vector -> bigger vector (pads with zeros) + sdt1 = [('a', 'f8', (2,))] + sdt2 = [('a', 'f4', (6,))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6 * 2).reshape(6, 2) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'][:2], a[count]['a']) + assert_equal(x['a'][2:], [0, 0, 0, 0]) + count += 1 + + # vector -> matrix (broadcasts) + sdt1 = [('a', 'f8', (2,))] + sdt2 = [('a', 'f4', (2, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6 * 2).reshape(6, 2) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'][0], a[count]['a']) + assert_equal(x['a'][1], a[count]['a']) + count += 1 + + # vector -> matrix (broadcasts and zero-pads) + sdt1 = [('a', 'f8', (2, 1))] + sdt2 = [('a', 'f4', (3, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6 * 2).reshape(6, 2, 1) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) + assert_equal(x['a'][:2, 1], a[count]['a'][:, 0]) + assert_equal(x['a'][2, :], [0, 0]) + count += 1 + + # matrix -> matrix (truncates and zero-pads) + sdt1 = [('a', 'f8', (2, 3))] + sdt2 = [('a', 'f4', (3, 2))] + a = np.zeros((6,), dtype=sdt1) + a['a'] = np.arange(6 * 2 * 3).reshape(6, 2, 3) + i = nditer(a, ['buffered', 'refs_ok'], ['readonly'], + casting='unsafe', + op_dtypes=sdt2) + assert_equal(i[0].dtype, np.dtype(sdt2)) + count = 0 + for x in i: + assert_equal(x['a'][:2, 0], a[count]['a'][:, 0]) + assert_equal(x['a'][:2, 1], a[count]['a'][:, 1]) + assert_equal(x['a'][2, :], [0, 0]) + count += 1 + +def test_iter_buffering_badwriteback(): + # Writing back from a buffer cannot combine elements + + # a needs write buffering, but had a broadcast dimension + a = np.arange(6).reshape(2, 3, 1) + b = np.arange(12).reshape(2, 3, 2) + assert_raises(ValueError, nditer, [a, b], + ['buffered', 'external_loop'], + [['readwrite'], ['writeonly']], + order='C') + + # But if a is readonly, it's fine + nditer([a, b], ['buffered', 'external_loop'], + [['readonly'], ['writeonly']], + order='C') + + # If a has just one element, it's fine too (constant 0 stride, a reduction) + a = np.arange(1).reshape(1, 1, 1) + nditer([a, b], ['buffered', 'external_loop', 'reduce_ok'], + [['readwrite'], ['writeonly']], + order='C') + + # check that it fails on other dimensions too + a = np.arange(6).reshape(1, 3, 2) + assert_raises(ValueError, nditer, [a, b], + ['buffered', 'external_loop'], + [['readwrite'], ['writeonly']], + order='C') + a = np.arange(4).reshape(2, 1, 2) + assert_raises(ValueError, nditer, [a, b], + ['buffered', 'external_loop'], + [['readwrite'], ['writeonly']], + order='C') + +def test_iter_buffering_string(): + # Safe casting disallows shrinking strings + a = np.array(['abc', 'a', 'abcd'], dtype=np.bytes_) + assert_equal(a.dtype, np.dtype('S4')) + assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'], + op_dtypes='S2') + i = nditer(a, ['buffered'], ['readonly'], op_dtypes='S6') + assert_equal(i[0], b'abc') + assert_equal(i[0].dtype, np.dtype('S6')) + + a = np.array(['abc', 'a', 'abcd'], dtype=np.str_) + assert_equal(a.dtype, np.dtype('U4')) + assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'], + op_dtypes='U2') + i = nditer(a, ['buffered'], ['readonly'], op_dtypes='U6') + assert_equal(i[0], 'abc') + assert_equal(i[0].dtype, np.dtype('U6')) + +def test_iter_buffering_growinner(): + # Test that the inner loop grows when no buffering is needed + a = np.arange(30) + i = nditer(a, ['buffered', 'growinner', 'external_loop'], + buffersize=5) + # Should end up with just one inner loop here + assert_equal(i[0].size, a.size) + + +@pytest.mark.parametrize("read_or_readwrite", ["readonly", "readwrite"]) +def test_iter_contig_flag_reduce_error(read_or_readwrite): + # Test that a non-contiguous operand is rejected without buffering. + # NOTE: This is true even for a reduction, where we return a 0-stride + # below! + with pytest.raises(TypeError, match="Iterator operand required buffering"): + it = np.nditer( + (np.zeros(()),), flags=["external_loop", "reduce_ok"], + op_flags=[(read_or_readwrite, "contig"),], itershape=(10,)) + + +@pytest.mark.parametrize("arr", [ + lambda: np.zeros(()), + lambda: np.zeros((20, 1))[::20], + lambda: np.zeros((1, 20))[:, ::20] + ]) +def test_iter_contig_flag_single_operand_strides(arr): + """ + Tests the strides with the contig flag for both broadcast and non-broadcast + operands in 3 cases where the logic is needed: + 1. When everything has a zero stride, the broadcast op needs to repeated + 2. When the reduce axis is the last axis (first to iterate). + 3. When the reduce axis is the first axis (last to iterate). + + NOTE: The semantics of the cast flag are not clearly defined when + it comes to reduction. It is unclear that there are any users. + """ + first_op = np.ones((10, 10)) + broadcast_op = arr() + red_op = arr() + # Add a first operand to ensure no axis-reordering and the result shape. + iterator = np.nditer( + (first_op, broadcast_op, red_op), + flags=["external_loop", "reduce_ok", "buffered", "delay_bufalloc"], + op_flags=[("readonly", "contig")] * 2 + [("readwrite", "contig")]) + + with iterator: + iterator.reset() + for f, b, r in iterator: + # The first operand is contigouos, we should have a view + assert np.shares_memory(f, first_op) + # Although broadcast, the second op always has a contiguous stride + assert b.strides[0] == 8 + assert not np.shares_memory(b, broadcast_op) + # The reduction has a contiguous stride or a 0 stride + if red_op.ndim == 0 or red_op.shape[-1] == 1: + assert r.strides[0] == 0 + else: + # The stride is 8, although it was not originally: + assert r.strides[0] == 8 + # If the reduce stride is 0, buffering makes no difference, but we + # do it anyway right now: + assert not np.shares_memory(r, red_op) + + +@pytest.mark.xfail(reason="The contig flag was always buggy.") +def test_iter_contig_flag_incorrect(): + # This case does the wrong thing... + iterator = np.nditer( + (np.ones((10, 10)).T, np.ones((1, 10))), + flags=["external_loop", "reduce_ok", "buffered", "delay_bufalloc"], + op_flags=[("readonly", "contig")] * 2) + + with iterator: + iterator.reset() + for a, b in iterator: + # Remove a and b from locals (pytest may want to format them) + a, b = a.strides, b.strides + assert a == 8 + assert b == 8 # should be 8 but is 0 due to axis reorder + + +@pytest.mark.slow +def test_iter_buffered_reduce_reuse(): + # large enough array for all views, including negative strides. + a = np.arange(2 * 3**5)[3**5:3**5 + 1] + flags = ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok', 'refs_ok'] + op_flags = [('readonly',), ('readwrite', 'allocate')] + op_axes_list = [[(0, 1, 2), (0, 1, -1)], [(0, 1, 2), (0, -1, -1)]] + # wrong dtype to force buffering + op_dtypes = [float, a.dtype] + + def get_params(): + for xs in range(-3**2, 3**2 + 1): + for ys in range(xs, 3**2 + 1): + for op_axes in op_axes_list: + # last stride is reduced and because of that not + # important for this test, as it is the inner stride. + strides = (xs * a.itemsize, ys * a.itemsize, a.itemsize) + arr = np.lib.stride_tricks.as_strided(a, (3, 3, 3), strides) + + for skip in [0, 1]: + yield arr, op_axes, skip + + for arr, op_axes, skip in get_params(): + nditer2 = np.nditer([arr.copy(), None], + op_axes=op_axes, flags=flags, op_flags=op_flags, + op_dtypes=op_dtypes) + with nditer2: + nditer2.operands[-1][...] = 0 + nditer2.reset() + nditer2.iterindex = skip + + for (a2_in, b2_in) in nditer2: + b2_in += a2_in.astype(np.int_) + + comp_res = nditer2.operands[-1] + + for bufsize in range(3**3): + nditer1 = np.nditer([arr, None], + op_axes=op_axes, flags=flags, op_flags=op_flags, + buffersize=bufsize, op_dtypes=op_dtypes) + with nditer1: + nditer1.operands[-1][...] = 0 + nditer1.reset() + nditer1.iterindex = skip + + for (a1_in, b1_in) in nditer1: + b1_in += a1_in.astype(np.int_) + + res = nditer1.operands[-1] + assert_array_equal(res, comp_res) + + +def test_iter_buffered_reduce_reuse_core(): + # NumPy re-uses buffers for broadcast operands (as of writing when reading). + # Test this even if the offset is manually set at some point during + # the iteration. (not a particularly tricky path) + arr = np.empty((1, 6, 4, 1)).reshape(1, 6, 4, 1)[:, ::3, ::2, :] + arr[...] = np.arange(arr.size).reshape(arr.shape) + # First and last dimension are broadcast dimensions. + arr = np.broadcast_to(arr, (100, 2, 2, 2)) + + flags = ['buffered', 'reduce_ok', 'refs_ok', 'multi_index'] + op_flags = [('readonly',)] + + buffersize = 100 # small enough to not fit the whole array + it = np.nditer(arr, flags=flags, op_flags=op_flags, buffersize=100) + + # Iterate a bit (this will cause buffering internally) + expected = [next(it) for i in range(11)] + # Now, manually advance to inside the core (the +1) + it.iterindex = 10 * (2 * 2 * 2) + 1 + result = [next(it) for i in range(10)] + + assert expected[1:] == result + + +def test_iter_no_broadcast(): + # Test that the no_broadcast flag works + a = np.arange(24).reshape(2, 3, 4) + b = np.arange(6).reshape(2, 3, 1) + c = np.arange(12).reshape(3, 4) + + nditer([a, b, c], [], + [['readonly', 'no_broadcast'], + ['readonly'], ['readonly']]) + assert_raises(ValueError, nditer, [a, b, c], [], + [['readonly'], ['readonly', 'no_broadcast'], ['readonly']]) + assert_raises(ValueError, nditer, [a, b, c], [], + [['readonly'], ['readonly'], ['readonly', 'no_broadcast']]) + + +class TestIterNested: + + def test_basic(self): + # Test nested iteration basic usage + a = arange(12).reshape(2, 3, 2) + + i, j = np.nested_iters(a, [[0], [1, 2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[0, 1], [2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + + i, j = np.nested_iters(a, [[0, 2], [1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + def test_reorder(self): + # Test nested iteration basic usage + a = arange(12).reshape(2, 3, 2) + + # In 'K' order (default), it gets reordered + i, j = np.nested_iters(a, [[0], [2, 1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[1, 0], [2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + + i, j = np.nested_iters(a, [[2, 0], [1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + # In 'C' order, it doesn't + i, j = np.nested_iters(a, [[0], [2, 1]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]]) + + i, j = np.nested_iters(a, [[1, 0], [2]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]]) + + i, j = np.nested_iters(a, [[2, 0], [1]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]]) + + def test_flip_axes(self): + # Test nested iteration with negative axes + a = arange(12).reshape(2, 3, 2)[::-1, ::-1, ::-1] + + # In 'K' order (default), the axes all get flipped + i, j = np.nested_iters(a, [[0], [1, 2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[0, 1], [2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]]) + + i, j = np.nested_iters(a, [[0, 2], [1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + # In 'C' order, flipping axes is disabled + i, j = np.nested_iters(a, [[0], [1, 2]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]]) + + i, j = np.nested_iters(a, [[0, 1], [2]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]]) + + i, j = np.nested_iters(a, [[0, 2], [1]], order='C') + vals = [list(j) for _ in i] + assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]]) + + def test_broadcast(self): + # Test nested iteration with broadcasting + a = arange(2).reshape(2, 1) + b = arange(3).reshape(1, 3) + + i, j = np.nested_iters([a, b], [[0], [1]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]]) + + i, j = np.nested_iters([a, b], [[1], [0]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]]) + + def test_dtype_copy(self): + # Test nested iteration with a copy to change dtype + + # copy + a = arange(6, dtype='i4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + op_flags=['readonly', 'copy'], + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2], [3, 4, 5]]) + vals = None + + # writebackifcopy - using context manager + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + op_flags=['readwrite', 'updateifcopy'], + casting='same_kind', + op_dtypes='f8') + with i, j: + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[0, 1, 2], [3, 4, 5]]) + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + + # writebackifcopy - using close() + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + op_flags=['readwrite', 'updateifcopy'], + casting='same_kind', + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[0, 1, 2], [3, 4, 5]]) + i.close() + j.close() + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + + def test_dtype_buffered(self): + # Test nested iteration with buffering to change dtype + + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + flags=['buffered'], + op_flags=['readwrite'], + casting='same_kind', + op_dtypes='f8') + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + + def test_0d(self): + a = np.arange(12).reshape(2, 3, 2) + i, j = np.nested_iters(a, [[], [1, 0, 2]]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]]) + + i, j = np.nested_iters(a, [[1, 0, 2], []]) + vals = [list(j) for _ in i] + assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]]) + + i, j, k = np.nested_iters(a, [[2, 0], [], [1]]) + vals = [] + for x in i: + for y in j: + vals.append(list(k)) + assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]]) + + def test_iter_nested_iters_dtype_buffered(self): + # Test nested iteration with buffering to change dtype + + a = arange(6, dtype='f4').reshape(2, 3) + i, j = np.nested_iters(a, [[0], [1]], + flags=['buffered'], + op_flags=['readwrite'], + casting='same_kind', + op_dtypes='f8') + with i, j: + assert_equal(j[0].dtype, np.dtype('f8')) + for x in i: + for y in j: + y[...] += 1 + assert_equal(a, [[1, 2, 3], [4, 5, 6]]) + +def test_iter_reduction_error(): + + a = np.arange(6) + assert_raises(ValueError, nditer, [a, None], [], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0], [-1]]) + + a = np.arange(6).reshape(2, 3) + assert_raises(ValueError, nditer, [a, None], ['external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0, 1], [-1, -1]]) + +def test_iter_reduction(): + # Test doing reductions with the iterator + + a = np.arange(6) + i = nditer([a, None], ['reduce_ok'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0], [-1]]) + # Need to initialize the output operand to the addition unit + with i: + i.operands[1][...] = 0 + # Do the reduction + for x, y in i: + y[...] += x + # Since no axes were specified, should have allocated a scalar + assert_equal(i.operands[1].ndim, 0) + assert_equal(i.operands[1], np.sum(a)) + + a = np.arange(6).reshape(2, 3) + i = nditer([a, None], ['reduce_ok', 'external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[0, 1], [-1, -1]]) + # Need to initialize the output operand to the addition unit + with i: + i.operands[1][...] = 0 + # Reduction shape/strides for the output + assert_equal(i[1].shape, (6,)) + assert_equal(i[1].strides, (0,)) + # Do the reduction + for x, y in i: + # Use a for loop instead of ``y[...] += x`` + # (equivalent to ``y[...] = y[...].copy() + x``), + # because y has zero strides we use for the reduction + for j in range(len(y)): + y[j] += x[j] + # Since no axes were specified, should have allocated a scalar + assert_equal(i.operands[1].ndim, 0) + assert_equal(i.operands[1], np.sum(a)) + + # This is a tricky reduction case for the buffering double loop + # to handle + a = np.ones((2, 3, 5)) + it1 = nditer([a, None], ['reduce_ok', 'external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0, -1, 1]]) + it2 = nditer([a, None], ['reduce_ok', 'external_loop', + 'buffered', 'delay_bufalloc'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[None, [0, -1, 1]], buffersize=10) + with it1, it2: + it1.operands[1].fill(0) + it2.operands[1].fill(0) + it2.reset() + for x in it1: + x[1][...] += x[0] + for x in it2: + x[1][...] += x[0] + assert_equal(it1.operands[1], it2.operands[1]) + assert_equal(it2.operands[1].sum(), a.size) + +def test_iter_buffering_reduction(): + # Test doing buffered reductions with the iterator + + a = np.arange(6) + b = np.array(0., dtype='f8').byteswap() + b = b.view(b.dtype.newbyteorder()) + i = nditer([a, b], ['reduce_ok', 'buffered'], + [['readonly'], ['readwrite', 'nbo']], + op_axes=[[0], [-1]]) + with i: + assert_equal(i[1].dtype, np.dtype('f8')) + assert_(i[1].dtype != b.dtype) + # Do the reduction + for x, y in i: + y[...] += x + # Since no axes were specified, should have allocated a scalar + assert_equal(b, np.sum(a)) + + a = np.arange(6).reshape(2, 3) + b = np.array([0, 0], dtype='f8').byteswap() + b = b.view(b.dtype.newbyteorder()) + i = nditer([a, b], ['reduce_ok', 'external_loop', 'buffered'], + [['readonly'], ['readwrite', 'nbo']], + op_axes=[[0, 1], [0, -1]]) + # Reduction shape/strides for the output + with i: + assert_equal(i[1].shape, (3,)) + assert_equal(i[1].strides, (0,)) + # Do the reduction + for x, y in i: + # Use a for loop instead of ``y[...] += x`` + # (equivalent to ``y[...] = y[...].copy() + x``), + # because y has zero strides we use for the reduction + for j in range(len(y)): + y[j] += x[j] + assert_equal(b, np.sum(a, axis=1)) + + # Iterator inner double loop was wrong on this one + p = np.arange(2) + 1 + it = np.nditer([p, None], + ['delay_bufalloc', 'reduce_ok', 'buffered', 'external_loop'], + [['readonly'], ['readwrite', 'allocate']], + op_axes=[[-1, 0], [-1, -1]], + itershape=(2, 2)) + with it: + it.operands[1].fill(0) + it.reset() + assert_equal(it[0], [1, 2, 1, 2]) + + # Iterator inner loop should take argument contiguity into account + x = np.ones((7, 13, 8), np.int8)[4:6, 1:11:6, 1:5].transpose(1, 2, 0) + x[...] = np.arange(x.size).reshape(x.shape) + y_base = np.arange(4 * 4, dtype=np.int8).reshape(4, 4) + y_base_copy = y_base.copy() + y = y_base[::2, :, None] + + it = np.nditer([y, x], + ['buffered', 'external_loop', 'reduce_ok'], + [['readwrite'], ['readonly']]) + with it: + for a, b in it: + a.fill(2) + + assert_equal(y_base[1::2], y_base_copy[1::2]) + assert_equal(y_base[::2], 2) + +def test_iter_buffering_reduction_reuse_reduce_loops(): + # There was a bug triggering reuse of the reduce loop inappropriately, + # which caused processing to happen in unnecessarily small chunks + # and overran the buffer. + + a = np.zeros((2, 7)) + b = np.zeros((1, 7)) + it = np.nditer([a, b], flags=['reduce_ok', 'external_loop', 'buffered'], + op_flags=[['readonly'], ['readwrite']], + buffersize=5) + + with it: + bufsizes = [x.shape[0] for x, y in it] + assert_equal(bufsizes, [5, 2, 5, 2]) + assert_equal(sum(bufsizes), a.size) + +def test_iter_writemasked_badinput(): + a = np.zeros((2, 3)) + b = np.zeros((3,)) + m = np.array([[True, True, False], [False, True, False]]) + m2 = np.array([True, True, False]) + m3 = np.array([0, 1, 1], dtype='u1') + mbad1 = np.array([0, 1, 1], dtype='i1') + mbad2 = np.array([0, 1, 1], dtype='f4') + + # Need an 'arraymask' if any operand is 'writemasked' + assert_raises(ValueError, nditer, [a, m], [], + [['readwrite', 'writemasked'], ['readonly']]) + + # A 'writemasked' operand must not be readonly + assert_raises(ValueError, nditer, [a, m], [], + [['readonly', 'writemasked'], ['readonly', 'arraymask']]) + + # 'writemasked' and 'arraymask' may not be used together + assert_raises(ValueError, nditer, [a, m], [], + [['readonly'], ['readwrite', 'arraymask', 'writemasked']]) + + # 'arraymask' may only be specified once + assert_raises(ValueError, nditer, [a, m, m2], [], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask'], + ['readonly', 'arraymask']]) + + # An 'arraymask' with nothing 'writemasked' also doesn't make sense + assert_raises(ValueError, nditer, [a, m], [], + [['readwrite'], ['readonly', 'arraymask']]) + + # A writemasked reduction requires a similarly smaller mask + assert_raises(ValueError, nditer, [a, b, m], ['reduce_ok'], + [['readonly'], + ['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) + # But this should work with a smaller/equal mask to the reduction operand + np.nditer([a, b, m2], ['reduce_ok'], + [['readonly'], + ['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) + # The arraymask itself cannot be a reduction + assert_raises(ValueError, nditer, [a, b, m2], ['reduce_ok'], + [['readonly'], + ['readwrite', 'writemasked'], + ['readwrite', 'arraymask']]) + + # A uint8 mask is ok too + np.nditer([a, m3], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['f4', None], + casting='same_kind') + # An int8 mask isn't ok + assert_raises(TypeError, np.nditer, [a, mbad1], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['f4', None], + casting='same_kind') + # A float32 mask isn't ok + assert_raises(TypeError, np.nditer, [a, mbad2], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['f4', None], + casting='same_kind') + + +def _is_buffered(iterator): + try: + iterator.itviews + except ValueError: + return True + return False + +@pytest.mark.parametrize("a", + [np.zeros((3,), dtype='f8'), + np.zeros((9876, 3 * 5), dtype='f8')[::2, :], + np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, :], + # Also test with the last dimension strided (so it does not fit if + # there is repeated access) + np.zeros((9,), dtype='f8')[::3], + np.zeros((9876, 3 * 10), dtype='f8')[::2, ::5], + np.zeros((4, 312, 124, 3), dtype='f8')[::2, :, ::2, ::-1]]) +def test_iter_writemasked(a): + # Note, the slicing above is to ensure that nditer cannot combine multiple + # axes into one. The repetition is just to make things a bit more + # interesting. + shape = a.shape + reps = shape[-1] // 3 + msk = np.empty(shape, dtype=bool) + msk[...] = [True, True, False] * reps + + # When buffering is unused, 'writemasked' effectively does nothing. + # It's up to the user of the iterator to obey the requested semantics. + it = np.nditer([a, msk], [], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) + with it: + for x, m in it: + x[...] = 1 + # Because we violated the semantics, all the values became 1 + assert_equal(a, np.broadcast_to([1, 1, 1] * reps, shape)) + + # Even if buffering is enabled, we still may be accessing the array + # directly. + it = np.nditer([a, msk], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']]) + # @seberg: I honestly don't currently understand why a "buffered" iterator + # would end up not using a buffer for the small array here at least when + # "writemasked" is used, that seems confusing... Check by testing for + # actual memory overlap! + is_buffered = True + with it: + for x, m in it: + x[...] = 2.5 + if np.may_share_memory(x, a): + is_buffered = False + + if not is_buffered: + # Because we violated the semantics, all the values became 2.5 + assert_equal(a, np.broadcast_to([2.5, 2.5, 2.5] * reps, shape)) + else: + # For large sizes, the iterator may be buffered: + assert_equal(a, np.broadcast_to([2.5, 2.5, 1] * reps, shape)) + a[...] = 2.5 + + # If buffering will definitely happening, for instance because of + # a cast, only the items selected by the mask will be copied back from + # the buffer. + it = np.nditer([a, msk], ['buffered'], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=['i8', None], + casting='unsafe') + with it: + for x, m in it: + x[...] = 3 + # Even though we violated the semantics, only the selected values + # were copied back + assert_equal(a, np.broadcast_to([3, 3, 2.5] * reps, shape)) + + +@pytest.mark.parametrize(["mask", "mask_axes"], [ + # Allocated operand (only broadcasts with -1) + (None, [-1, 0]), + # Reduction along the first dimension (with and without op_axes) + (np.zeros((1, 4), dtype="bool"), [0, 1]), + (np.zeros((1, 4), dtype="bool"), None), + # Test 0-D and -1 op_axes + (np.zeros(4, dtype="bool"), [-1, 0]), + (np.zeros((), dtype="bool"), [-1, -1]), + (np.zeros((), dtype="bool"), None)]) +def test_iter_writemasked_broadcast_error(mask, mask_axes): + # This assumes that a readwrite mask makes sense. This is likely not the + # case and should simply be deprecated. + arr = np.zeros((3, 4)) + itflags = ["reduce_ok"] + mask_flags = ["arraymask", "readwrite", "allocate"] + a_flags = ["writeonly", "writemasked"] + if mask_axes is None: + op_axes = None + else: + op_axes = [mask_axes, [0, 1]] + + with assert_raises(ValueError): + np.nditer((mask, arr), flags=itflags, op_flags=[mask_flags, a_flags], + op_axes=op_axes) + + +def test_iter_writemasked_decref(): + # force casting (to make it interesting) by using a structured dtype. + arr = np.arange(10000).astype(">i,O") + original = arr.copy() + mask = np.random.randint(0, 2, size=10000).astype(bool) + + it = np.nditer([arr, mask], ['buffered', "refs_ok"], + [['readwrite', 'writemasked'], + ['readonly', 'arraymask']], + op_dtypes=[" string -> longdouble` for the + # conversion. But Python may refuse `str(int)` for huge ints. + # In that case, RuntimeWarning would be correct, but conversion + # fails earlier (seems to happen on 32bit linux, possibly only debug). + if dtype in "gG": + try: + str(too_big_int) + except ValueError: + pytest.skip("`huge_int -> string -> longdouble` failed") + + # Otherwise, we overflow to infinity: + with pytest.warns(RuntimeWarning): + res = scalar_type(1) + too_big_int + assert res.dtype == dtype + assert res == np.inf + + with pytest.warns(RuntimeWarning): + # We force the dtype here, since windows may otherwise pick the + # double instead of the longdouble loop. That leads to slightly + # different results (conversion of the int fails as above). + res = np.add(np.array(1, dtype=dtype), too_big_int, dtype=dtype) + assert res.dtype == dtype + assert res == np.inf + + +@pytest.mark.parametrize("op", [operator.add, operator.pow]) +def test_weak_promotion_scalar_path(op): + # Some additional paths exercising the weak scalars. + + # Integer path: + res = op(np.uint8(3), 5) + assert res == op(3, 5) + assert res.dtype == np.uint8 or res.dtype == bool # noqa: PLR1714 + + with pytest.raises(OverflowError): + op(np.uint8(3), 1000) + + # Float path: + res = op(np.float32(3), 5.) + assert res == op(3., 5.) + assert res.dtype == np.float32 or res.dtype == bool # noqa: PLR1714 + + +def test_nep50_complex_promotion(): + with pytest.warns(RuntimeWarning, match=".*overflow"): + res = np.complex64(3) + complex(2**300) + + assert type(res) == np.complex64 + + +def test_nep50_integer_conversion_errors(): + # Implementation for error paths is mostly missing (as of writing) + with pytest.raises(OverflowError, match=".*uint8"): + np.array([1], np.uint8) + 300 + + with pytest.raises(OverflowError, match=".*uint8"): + np.uint8(1) + 300 + + # Error message depends on platform (maybe unsigned int or unsigned long) + with pytest.raises(OverflowError, + match="Python integer -1 out of bounds for uint8"): + np.uint8(1) + -1 + + +def test_nep50_with_axisconcatenator(): + # Concatenate/r_ does not promote, so this has to error: + with pytest.raises(OverflowError): + np.r_[np.arange(5, dtype=np.int8), 255] + + +@pytest.mark.parametrize("ufunc", [np.add, np.power]) +def test_nep50_huge_integers(ufunc): + # Very large integers are complicated, because they go to uint64 or + # object dtype. This tests covers a few possible paths. + with pytest.raises(OverflowError): + ufunc(np.int64(0), 2**63) # 2**63 too large for int64 + + with pytest.raises(OverflowError): + ufunc(np.uint64(0), 2**64) # 2**64 cannot be represented by uint64 + + # However, 2**63 can be represented by the uint64 (and that is used): + res = ufunc(np.uint64(1), 2**63) + + assert res.dtype == np.uint64 + assert res == ufunc(1, 2**63, dtype=object) + + # The following paths fail to warn correctly about the change: + with pytest.raises(OverflowError): + ufunc(np.int64(1), 2**63) # np.array(2**63) would go to uint + + with pytest.raises(OverflowError): + ufunc(np.int64(1), 2**100) # np.array(2**100) would go to object + + # This would go to object and thus a Python float, not a NumPy one: + res = ufunc(1.0, 2**100) + assert isinstance(res, np.float64) + + +def test_nep50_in_concat_and_choose(): + res = np.concatenate([np.float32(1), 1.], axis=None) + assert res.dtype == "float32" + + res = np.choose(1, [np.float32(1), 1.]) + assert res.dtype == "float32" + + +@pytest.mark.parametrize("expected,dtypes,optional_dtypes", [ + (np.float32, [np.float32], + [np.float16, 0.0, np.uint16, np.int16, np.int8, 0]), + (np.complex64, [np.float32, 0j], + [np.float16, 0.0, np.uint16, np.int16, np.int8, 0]), + (np.float32, [np.int16, np.uint16, np.float16], + [np.int8, np.uint8, np.float32, 0., 0]), + (np.int32, [np.int16, np.uint16], + [np.int8, np.uint8, 0, np.bool]), + ]) +@hypothesis.given(data=strategies.data()) +def test_expected_promotion(expected, dtypes, optional_dtypes, data): + # Sample randomly while ensuring "dtypes" is always present: + optional = data.draw(strategies.lists( + strategies.sampled_from(dtypes + optional_dtypes))) + all_dtypes = dtypes + optional + dtypes_sample = data.draw(strategies.permutations(all_dtypes)) + + res = np.result_type(*dtypes_sample) + assert res == expected + + +@pytest.mark.parametrize("sctype", + [np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64]) +@pytest.mark.parametrize("other_val", + [-2 * 100, -1, 0, 9, 10, 11, 2**63, 2 * 100]) +@pytest.mark.parametrize("comp", + [operator.eq, operator.ne, operator.le, operator.lt, + operator.ge, operator.gt]) +def test_integer_comparison(sctype, other_val, comp): + # Test that comparisons with integers (especially out-of-bound) ones + # works correctly. + val_obj = 10 + val = sctype(val_obj) + # Check that the scalar behaves the same as the python int: + assert comp(10, other_val) == comp(val, other_val) + assert comp(val, other_val) == comp(10, other_val) + # Except for the result type: + assert type(comp(val, other_val)) is np.bool + + # Check that the integer array and object array behave the same: + val_obj = np.array([10, 10], dtype=object) + val = val_obj.astype(sctype) + assert_array_equal(comp(val_obj, other_val), comp(val, other_val)) + assert_array_equal(comp(other_val, val_obj), comp(other_val, val)) + + +@pytest.mark.parametrize("arr", [ + np.ones((100, 100), dtype=np.uint8)[::2], # not trivially iterable + np.ones(20000, dtype=">u4"), # cast and >buffersize + np.ones(100, dtype=">u4"), # fast path compatible with cast +]) +def test_integer_comparison_with_cast(arr): + # Similar to above, but mainly test a few cases that cover the slow path + # the test is limited to unsigned ints and -1 for simplicity. + res = arr >= -1 + assert_array_equal(res, np.ones_like(arr, dtype=bool)) + res = arr < -1 + assert_array_equal(res, np.zeros_like(arr, dtype=bool)) + + +@pytest.mark.parametrize("comp", + [np.equal, np.not_equal, np.less_equal, np.less, + np.greater_equal, np.greater]) +def test_integer_integer_comparison(comp): + # Test that the NumPy comparison ufuncs work with large Python integers + assert comp(2**200, -2**200) == comp(2**200, -2**200, dtype=object) + + +def create_with_scalar(sctype, value): + return sctype(value) + + +def create_with_array(sctype, value): + return np.array([value], dtype=sctype) + + +@pytest.mark.parametrize("sctype", + [np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64]) +@pytest.mark.parametrize("create", [create_with_scalar, create_with_array]) +def test_oob_creation(sctype, create): + iinfo = np.iinfo(sctype) + + with pytest.raises(OverflowError): + create(sctype, iinfo.min - 1) + + with pytest.raises(OverflowError): + create(sctype, iinfo.max + 1) + + with pytest.raises(OverflowError): + create(sctype, str(iinfo.min - 1)) + + with pytest.raises(OverflowError): + create(sctype, str(iinfo.max + 1)) + + assert create(sctype, iinfo.min) == iinfo.min + assert create(sctype, iinfo.max) == iinfo.max diff --git a/python/numpy/_core/tests/test_numeric.py b/python/numpy/_core/tests/test_numeric.py new file mode 100644 index 000000000..5b58b3440 --- /dev/null +++ b/python/numpy/_core/tests/test_numeric.py @@ -0,0 +1,4247 @@ +import itertools +import math +import platform +import sys +import warnings +from decimal import Decimal + +import pytest +from hypothesis import given +from hypothesis import strategies as st +from hypothesis.extra import numpy as hynp +from numpy._core._rational_tests import rational + +import numpy as np +from numpy import ma +from numpy._core import sctypes +from numpy._core.numerictypes import obj2sctype +from numpy.exceptions import AxisError +from numpy.random import rand, randint, randn +from numpy.testing import ( + HAS_REFCOUNT, + IS_WASM, + assert_, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_array_max_ulp, + assert_equal, + assert_raises, + assert_raises_regex, +) + + +class TestResize: + def test_copies(self): + A = np.array([[1, 2], [3, 4]]) + Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]]) + assert_equal(np.resize(A, (2, 4)), Ar1) + + Ar2 = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) + assert_equal(np.resize(A, (4, 2)), Ar2) + + Ar3 = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]]) + assert_equal(np.resize(A, (4, 3)), Ar3) + + def test_repeats(self): + A = np.array([1, 2, 3]) + Ar1 = np.array([[1, 2, 3, 1], [2, 3, 1, 2]]) + assert_equal(np.resize(A, (2, 4)), Ar1) + + Ar2 = np.array([[1, 2], [3, 1], [2, 3], [1, 2]]) + assert_equal(np.resize(A, (4, 2)), Ar2) + + Ar3 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]]) + assert_equal(np.resize(A, (4, 3)), Ar3) + + def test_zeroresize(self): + A = np.array([[1, 2], [3, 4]]) + Ar = np.resize(A, (0,)) + assert_array_equal(Ar, np.array([])) + assert_equal(A.dtype, Ar.dtype) + + Ar = np.resize(A, (0, 2)) + assert_equal(Ar.shape, (0, 2)) + + Ar = np.resize(A, (2, 0)) + assert_equal(Ar.shape, (2, 0)) + + def test_reshape_from_zero(self): + # See also gh-6740 + A = np.zeros(0, dtype=[('a', np.float32)]) + Ar = np.resize(A, (2, 1)) + assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype)) + assert_equal(A.dtype, Ar.dtype) + + def test_negative_resize(self): + A = np.arange(0, 10, dtype=np.float32) + new_shape = (-10, -1) + with pytest.raises(ValueError, match=r"negative"): + np.resize(A, new_shape=new_shape) + + def test_unsigned_resize(self): + # ensure unsigned integer sizes don't lead to underflows + for dt_pair in [(np.int32, np.uint32), (np.int64, np.uint64)]: + arr = np.array([[23, 95], [66, 37]]) + assert_array_equal(np.resize(arr, dt_pair[0](1)), + np.resize(arr, dt_pair[1](1))) + + def test_subclass(self): + class MyArray(np.ndarray): + __array_priority__ = 1. + + my_arr = np.array([1]).view(MyArray) + assert type(np.resize(my_arr, 5)) is MyArray + assert type(np.resize(my_arr, 0)) is MyArray + + my_arr = np.array([]).view(MyArray) + assert type(np.resize(my_arr, 5)) is MyArray + + +class TestNonarrayArgs: + # check that non-array arguments to functions wrap them in arrays + def test_choose(self): + choices = [[0, 1, 2], + [3, 4, 5], + [5, 6, 7]] + tgt = [5, 1, 5] + a = [2, 0, 1] + + out = np.choose(a, choices) + assert_equal(out, tgt) + + def test_clip(self): + arr = [-1, 5, 2, 3, 10, -4, -9] + out = np.clip(arr, 2, 7) + tgt = [2, 5, 2, 3, 7, 2, 2] + assert_equal(out, tgt) + + def test_compress(self): + arr = [[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]] + tgt = [[5, 6, 7, 8, 9]] + out = np.compress([0, 1], arr, axis=0) + assert_equal(out, tgt) + + def test_count_nonzero(self): + arr = [[0, 1, 7, 0, 0], + [3, 0, 0, 2, 19]] + tgt = np.array([2, 3]) + out = np.count_nonzero(arr, axis=1) + assert_equal(out, tgt) + + def test_diagonal(self): + a = [[0, 1, 2, 3], + [4, 5, 6, 7], + [8, 9, 10, 11]] + out = np.diagonal(a) + tgt = [0, 5, 10] + + assert_equal(out, tgt) + + def test_mean(self): + A = [[1, 2, 3], [4, 5, 6]] + assert_(np.mean(A) == 3.5) + assert_(np.all(np.mean(A, 0) == np.array([2.5, 3.5, 4.5]))) + assert_(np.all(np.mean(A, 1) == np.array([2., 5.]))) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_(np.isnan(np.mean([]))) + assert_(w[0].category is RuntimeWarning) + + def test_ptp(self): + a = [3, 4, 5, 10, -3, -5, 6.0] + assert_equal(np.ptp(a, axis=0), 15.0) + + def test_prod(self): + arr = [[1, 2, 3, 4], + [5, 6, 7, 9], + [10, 3, 4, 5]] + tgt = [24, 1890, 600] + + assert_equal(np.prod(arr, axis=-1), tgt) + + def test_ravel(self): + a = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]] + tgt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] + assert_equal(np.ravel(a), tgt) + + def test_repeat(self): + a = [1, 2, 3] + tgt = [1, 1, 2, 2, 3, 3] + + out = np.repeat(a, 2) + assert_equal(out, tgt) + + def test_reshape(self): + arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]] + tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]] + assert_equal(np.reshape(arr, (2, 6)), tgt) + + def test_reshape_shape_arg(self): + arr = np.arange(12) + shape = (3, 4) + expected = arr.reshape(shape) + + with pytest.raises( + TypeError, + match="You cannot specify 'newshape' and 'shape' " + "arguments at the same time." + ): + np.reshape(arr, shape=shape, newshape=shape) + with pytest.raises( + TypeError, + match=r"reshape\(\) missing 1 required positional " + "argument: 'shape'" + ): + np.reshape(arr) + + assert_equal(np.reshape(arr, shape), expected) + assert_equal(np.reshape(arr, shape, order="C"), expected) + assert_equal(np.reshape(arr, shape, "C"), expected) + assert_equal(np.reshape(arr, shape=shape), expected) + assert_equal(np.reshape(arr, shape=shape, order="C"), expected) + with pytest.warns(DeprecationWarning): + actual = np.reshape(arr, newshape=shape) + assert_equal(actual, expected) + + def test_reshape_copy_arg(self): + arr = np.arange(24).reshape(2, 3, 4) + arr_f_ord = np.array(arr, order="F") + shape = (12, 2) + + assert np.shares_memory(np.reshape(arr, shape), arr) + assert np.shares_memory(np.reshape(arr, shape, order="C"), arr) + assert np.shares_memory( + np.reshape(arr_f_ord, shape, order="F"), arr_f_ord) + assert np.shares_memory(np.reshape(arr, shape, copy=None), arr) + assert np.shares_memory(np.reshape(arr, shape, copy=False), arr) + assert np.shares_memory(arr.reshape(shape, copy=False), arr) + assert not np.shares_memory(np.reshape(arr, shape, copy=True), arr) + assert not np.shares_memory( + np.reshape(arr, shape, order="C", copy=True), arr) + assert not np.shares_memory( + np.reshape(arr, shape, order="F", copy=True), arr) + assert not np.shares_memory( + np.reshape(arr, shape, order="F", copy=None), arr) + + err_msg = "Unable to avoid creating a copy while reshaping." + with pytest.raises(ValueError, match=err_msg): + np.reshape(arr, shape, order="F", copy=False) + with pytest.raises(ValueError, match=err_msg): + np.reshape(arr_f_ord, shape, order="C", copy=False) + + def test_round(self): + arr = [1.56, 72.54, 6.35, 3.25] + tgt = [1.6, 72.5, 6.4, 3.2] + assert_equal(np.around(arr, decimals=1), tgt) + s = np.float64(1.) + assert_(isinstance(s.round(), np.float64)) + assert_equal(s.round(), 1.) + + @pytest.mark.parametrize('dtype', [ + np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64, + np.float16, np.float32, np.float64, + ]) + def test_dunder_round(self, dtype): + s = dtype(1) + assert_(isinstance(round(s), int)) + assert_(isinstance(round(s, None), int)) + assert_(isinstance(round(s, ndigits=None), int)) + assert_equal(round(s), 1) + assert_equal(round(s, None), 1) + assert_equal(round(s, ndigits=None), 1) + + @pytest.mark.parametrize('val, ndigits', [ + pytest.param(2**31 - 1, -1, + marks=pytest.mark.skip(reason="Out of range of int32") + ), + (2**31 - 1, 1 - math.ceil(math.log10(2**31 - 1))), + (2**31 - 1, -math.ceil(math.log10(2**31 - 1))) + ]) + def test_dunder_round_edgecases(self, val, ndigits): + assert_equal(round(val, ndigits), round(np.int32(val), ndigits)) + + def test_dunder_round_accuracy(self): + f = np.float64(5.1 * 10**73) + assert_(isinstance(round(f, -73), np.float64)) + assert_array_max_ulp(round(f, -73), 5.0 * 10**73) + assert_(isinstance(round(f, ndigits=-73), np.float64)) + assert_array_max_ulp(round(f, ndigits=-73), 5.0 * 10**73) + + i = np.int64(501) + assert_(isinstance(round(i, -2), np.int64)) + assert_array_max_ulp(round(i, -2), 500) + assert_(isinstance(round(i, ndigits=-2), np.int64)) + assert_array_max_ulp(round(i, ndigits=-2), 500) + + @pytest.mark.xfail(raises=AssertionError, reason="gh-15896") + def test_round_py_consistency(self): + f = 5.1 * 10**73 + assert_equal(round(np.float64(f), -73), round(f, -73)) + + def test_searchsorted(self): + arr = [-8, -5, -1, 3, 6, 10] + out = np.searchsorted(arr, 0) + assert_equal(out, 3) + + def test_size(self): + A = [[1, 2, 3], [4, 5, 6]] + assert_(np.size(A) == 6) + assert_(np.size(A, 0) == 2) + assert_(np.size(A, 1) == 3) + + def test_squeeze(self): + A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]] + assert_equal(np.squeeze(A).shape, (3, 3)) + assert_equal(np.squeeze(np.zeros((1, 3, 1))).shape, (3,)) + assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=0).shape, (3, 1)) + assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=-1).shape, (1, 3)) + assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=2).shape, (1, 3)) + assert_equal(np.squeeze([np.zeros((3, 1))]).shape, (3,)) + assert_equal(np.squeeze([np.zeros((3, 1))], axis=0).shape, (3, 1)) + assert_equal(np.squeeze([np.zeros((3, 1))], axis=2).shape, (1, 3)) + assert_equal(np.squeeze([np.zeros((3, 1))], axis=-1).shape, (1, 3)) + + def test_std(self): + A = [[1, 2, 3], [4, 5, 6]] + assert_almost_equal(np.std(A), 1.707825127659933) + assert_almost_equal(np.std(A, 0), np.array([1.5, 1.5, 1.5])) + assert_almost_equal(np.std(A, 1), np.array([0.81649658, 0.81649658])) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_(np.isnan(np.std([]))) + assert_(w[0].category is RuntimeWarning) + + def test_swapaxes(self): + tgt = [[[0, 4], [2, 6]], [[1, 5], [3, 7]]] + a = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]] + out = np.swapaxes(a, 0, 2) + assert_equal(out, tgt) + + def test_sum(self): + m = [[1, 2, 3], + [4, 5, 6], + [7, 8, 9]] + tgt = [[6], [15], [24]] + out = np.sum(m, axis=1, keepdims=True) + + assert_equal(tgt, out) + + def test_take(self): + tgt = [2, 3, 5] + indices = [1, 2, 4] + a = [1, 2, 3, 4, 5] + + out = np.take(a, indices) + assert_equal(out, tgt) + + pairs = [ + (np.int32, np.int32), (np.int32, np.int64), + (np.int64, np.int32), (np.int64, np.int64) + ] + for array_type, indices_type in pairs: + x = np.array([1, 2, 3, 4, 5], dtype=array_type) + ind = np.array([0, 2, 2, 3], dtype=indices_type) + tgt = np.array([1, 3, 3, 4], dtype=array_type) + out = np.take(x, ind) + assert_equal(out, tgt) + assert_equal(out.dtype, tgt.dtype) + + def test_trace(self): + c = [[1, 2], [3, 4], [5, 6]] + assert_equal(np.trace(c), 5) + + def test_transpose(self): + arr = [[1, 2], [3, 4], [5, 6]] + tgt = [[1, 3, 5], [2, 4, 6]] + assert_equal(np.transpose(arr, (1, 0)), tgt) + assert_equal(np.transpose(arr, (-1, -2)), tgt) + assert_equal(np.matrix_transpose(arr), tgt) + + def test_var(self): + A = [[1, 2, 3], [4, 5, 6]] + assert_almost_equal(np.var(A), 2.9166666666666665) + assert_almost_equal(np.var(A, 0), np.array([2.25, 2.25, 2.25])) + assert_almost_equal(np.var(A, 1), np.array([0.66666667, 0.66666667])) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_(np.isnan(np.var([]))) + assert_(w[0].category is RuntimeWarning) + + B = np.array([None, 0]) + B[0] = 1j + assert_almost_equal(np.var(B), 0.25) + + def test_std_with_mean_keyword(self): + # Setting the seed to make the test reproducible + rng = np.random.RandomState(1234) + A = rng.randn(10, 20, 5) + 0.5 + + mean_out = np.zeros((10, 1, 5)) + std_out = np.zeros((10, 1, 5)) + + mean = np.mean(A, + out=mean_out, + axis=1, + keepdims=True) + + # The returned object should be the object specified during calling + assert mean_out is mean + + std = np.std(A, + out=std_out, + axis=1, + keepdims=True, + mean=mean) + + # The returned object should be the object specified during calling + assert std_out is std + + # Shape of returned mean and std should be same + assert std.shape == mean.shape + assert std.shape == (10, 1, 5) + + # Output should be the same as from the individual algorithms + std_old = np.std(A, axis=1, keepdims=True) + + assert std_old.shape == mean.shape + assert_almost_equal(std, std_old) + + def test_var_with_mean_keyword(self): + # Setting the seed to make the test reproducible + rng = np.random.RandomState(1234) + A = rng.randn(10, 20, 5) + 0.5 + + mean_out = np.zeros((10, 1, 5)) + var_out = np.zeros((10, 1, 5)) + + mean = np.mean(A, + out=mean_out, + axis=1, + keepdims=True) + + # The returned object should be the object specified during calling + assert mean_out is mean + + var = np.var(A, + out=var_out, + axis=1, + keepdims=True, + mean=mean) + + # The returned object should be the object specified during calling + assert var_out is var + + # Shape of returned mean and var should be same + assert var.shape == mean.shape + assert var.shape == (10, 1, 5) + + # Output should be the same as from the individual algorithms + var_old = np.var(A, axis=1, keepdims=True) + + assert var_old.shape == mean.shape + assert_almost_equal(var, var_old) + + def test_std_with_mean_keyword_keepdims_false(self): + rng = np.random.RandomState(1234) + A = rng.randn(10, 20, 5) + 0.5 + + mean = np.mean(A, + axis=1, + keepdims=True) + + std = np.std(A, + axis=1, + keepdims=False, + mean=mean) + + # Shape of returned mean and std should be same + assert std.shape == (10, 5) + + # Output should be the same as from the individual algorithms + std_old = np.std(A, axis=1, keepdims=False) + mean_old = np.mean(A, axis=1, keepdims=False) + + assert std_old.shape == mean_old.shape + assert_equal(std, std_old) + + def test_var_with_mean_keyword_keepdims_false(self): + rng = np.random.RandomState(1234) + A = rng.randn(10, 20, 5) + 0.5 + + mean = np.mean(A, + axis=1, + keepdims=True) + + var = np.var(A, + axis=1, + keepdims=False, + mean=mean) + + # Shape of returned mean and var should be same + assert var.shape == (10, 5) + + # Output should be the same as from the individual algorithms + var_old = np.var(A, axis=1, keepdims=False) + mean_old = np.mean(A, axis=1, keepdims=False) + + assert var_old.shape == mean_old.shape + assert_equal(var, var_old) + + def test_std_with_mean_keyword_where_nontrivial(self): + rng = np.random.RandomState(1234) + A = rng.randn(10, 20, 5) + 0.5 + + where = A > 0.5 + + mean = np.mean(A, + axis=1, + keepdims=True, + where=where) + + std = np.std(A, + axis=1, + keepdims=False, + mean=mean, + where=where) + + # Shape of returned mean and std should be same + assert std.shape == (10, 5) + + # Output should be the same as from the individual algorithms + std_old = np.std(A, axis=1, where=where) + mean_old = np.mean(A, axis=1, where=where) + + assert std_old.shape == mean_old.shape + assert_equal(std, std_old) + + def test_var_with_mean_keyword_where_nontrivial(self): + rng = np.random.RandomState(1234) + A = rng.randn(10, 20, 5) + 0.5 + + where = A > 0.5 + + mean = np.mean(A, + axis=1, + keepdims=True, + where=where) + + var = np.var(A, + axis=1, + keepdims=False, + mean=mean, + where=where) + + # Shape of returned mean and var should be same + assert var.shape == (10, 5) + + # Output should be the same as from the individual algorithms + var_old = np.var(A, axis=1, where=where) + mean_old = np.mean(A, axis=1, where=where) + + assert var_old.shape == mean_old.shape + assert_equal(var, var_old) + + def test_std_with_mean_keyword_multiple_axis(self): + # Setting the seed to make the test reproducible + rng = np.random.RandomState(1234) + A = rng.randn(10, 20, 5) + 0.5 + + axis = (0, 2) + + mean = np.mean(A, + out=None, + axis=axis, + keepdims=True) + + std = np.std(A, + out=None, + axis=axis, + keepdims=False, + mean=mean) + + # Shape of returned mean and std should be same + assert std.shape == (20,) + + # Output should be the same as from the individual algorithms + std_old = np.std(A, axis=axis, keepdims=False) + + assert_almost_equal(std, std_old) + + def test_std_with_mean_keyword_axis_None(self): + # Setting the seed to make the test reproducible + rng = np.random.RandomState(1234) + A = rng.randn(10, 20, 5) + 0.5 + + axis = None + + mean = np.mean(A, + out=None, + axis=axis, + keepdims=True) + + std = np.std(A, + out=None, + axis=axis, + keepdims=False, + mean=mean) + + # Shape of returned mean and std should be same + assert std.shape == () + + # Output should be the same as from the individual algorithms + std_old = np.std(A, axis=axis, keepdims=False) + + assert_almost_equal(std, std_old) + + def test_std_with_mean_keyword_keepdims_true_masked(self): + + A = ma.array([[2., 3., 4., 5.], + [1., 2., 3., 4.]], + mask=[[True, False, True, False], + [True, False, True, False]]) + + B = ma.array([[100., 3., 104., 5.], + [101., 2., 103., 4.]], + mask=[[True, False, True, False], + [True, False, True, False]]) + + mean_out = ma.array([[0., 0., 0., 0.]], + mask=[[False, False, False, False]]) + std_out = ma.array([[0., 0., 0., 0.]], + mask=[[False, False, False, False]]) + + axis = 0 + + mean = np.mean(A, out=mean_out, + axis=axis, keepdims=True) + + std = np.std(A, out=std_out, + axis=axis, keepdims=True, + mean=mean) + + # Shape of returned mean and std should be same + assert std.shape == mean.shape + assert std.shape == (1, 4) + + # Output should be the same as from the individual algorithms + std_old = np.std(A, axis=axis, keepdims=True) + mean_old = np.mean(A, axis=axis, keepdims=True) + + assert std_old.shape == mean_old.shape + assert_almost_equal(std, std_old) + assert_almost_equal(mean, mean_old) + + assert mean_out is mean + assert std_out is std + + # masked elements should be ignored + mean_b = np.mean(B, axis=axis, keepdims=True) + std_b = np.std(B, axis=axis, keepdims=True, mean=mean_b) + assert_almost_equal(std, std_b) + assert_almost_equal(mean, mean_b) + + def test_var_with_mean_keyword_keepdims_true_masked(self): + + A = ma.array([[2., 3., 4., 5.], + [1., 2., 3., 4.]], + mask=[[True, False, True, False], + [True, False, True, False]]) + + B = ma.array([[100., 3., 104., 5.], + [101., 2., 103., 4.]], + mask=[[True, False, True, False], + [True, False, True, False]]) + + mean_out = ma.array([[0., 0., 0., 0.]], + mask=[[False, False, False, False]]) + var_out = ma.array([[0., 0., 0., 0.]], + mask=[[False, False, False, False]]) + + axis = 0 + + mean = np.mean(A, out=mean_out, + axis=axis, keepdims=True) + + var = np.var(A, out=var_out, + axis=axis, keepdims=True, + mean=mean) + + # Shape of returned mean and var should be same + assert var.shape == mean.shape + assert var.shape == (1, 4) + + # Output should be the same as from the individual algorithms + var_old = np.var(A, axis=axis, keepdims=True) + mean_old = np.mean(A, axis=axis, keepdims=True) + + assert var_old.shape == mean_old.shape + assert_almost_equal(var, var_old) + assert_almost_equal(mean, mean_old) + + assert mean_out is mean + assert var_out is var + + # masked elements should be ignored + mean_b = np.mean(B, axis=axis, keepdims=True) + var_b = np.var(B, axis=axis, keepdims=True, mean=mean_b) + assert_almost_equal(var, var_b) + assert_almost_equal(mean, mean_b) + + +class TestIsscalar: + def test_isscalar(self): + assert_(np.isscalar(3.1)) + assert_(np.isscalar(np.int16(12345))) + assert_(np.isscalar(False)) + assert_(np.isscalar('numpy')) + assert_(not np.isscalar([3.1])) + assert_(not np.isscalar(None)) + + # PEP 3141 + from fractions import Fraction + assert_(np.isscalar(Fraction(5, 17))) + from numbers import Number + assert_(np.isscalar(Number())) + + +class TestBoolScalar: + def test_logical(self): + f = np.False_ + t = np.True_ + s = "xyz" + assert_((t and s) is s) + assert_((f and s) is f) + + def test_bitwise_or(self): + f = np.False_ + t = np.True_ + assert_((t | t) is t) + assert_((f | t) is t) + assert_((t | f) is t) + assert_((f | f) is f) + + def test_bitwise_and(self): + f = np.False_ + t = np.True_ + assert_((t & t) is t) + assert_((f & t) is f) + assert_((t & f) is f) + assert_((f & f) is f) + + def test_bitwise_xor(self): + f = np.False_ + t = np.True_ + assert_((t ^ t) is f) + assert_((f ^ t) is t) + assert_((t ^ f) is t) + assert_((f ^ f) is f) + + +class TestBoolArray: + def setup_method(self): + # offset for simd tests + self.t = np.array([True] * 41, dtype=bool)[1::] + self.f = np.array([False] * 41, dtype=bool)[1::] + self.o = np.array([False] * 42, dtype=bool)[2::] + self.nm = self.f.copy() + self.im = self.t.copy() + self.nm[3] = True + self.nm[-2] = True + self.im[3] = False + self.im[-2] = False + + def test_all_any(self): + assert_(self.t.all()) + assert_(self.t.any()) + assert_(not self.f.all()) + assert_(not self.f.any()) + assert_(self.nm.any()) + assert_(self.im.any()) + assert_(not self.nm.all()) + assert_(not self.im.all()) + # check bad element in all positions + for i in range(256 - 7): + d = np.array([False] * 256, dtype=bool)[7::] + d[i] = True + assert_(np.any(d)) + e = np.array([True] * 256, dtype=bool)[7::] + e[i] = False + assert_(not np.all(e)) + assert_array_equal(e, ~d) + # big array test for blocked libc loops + for i in list(range(9, 6000, 507)) + [7764, 90021, -10]: + d = np.array([False] * 100043, dtype=bool) + d[i] = True + assert_(np.any(d), msg=f"{i!r}") + e = np.array([True] * 100043, dtype=bool) + e[i] = False + assert_(not np.all(e), msg=f"{i!r}") + + def test_logical_not_abs(self): + assert_array_equal(~self.t, self.f) + assert_array_equal(np.abs(~self.t), self.f) + assert_array_equal(np.abs(~self.f), self.t) + assert_array_equal(np.abs(self.f), self.f) + assert_array_equal(~np.abs(self.f), self.t) + assert_array_equal(~np.abs(self.t), self.f) + assert_array_equal(np.abs(~self.nm), self.im) + np.logical_not(self.t, out=self.o) + assert_array_equal(self.o, self.f) + np.abs(self.t, out=self.o) + assert_array_equal(self.o, self.t) + + def test_logical_and_or_xor(self): + assert_array_equal(self.t | self.t, self.t) + assert_array_equal(self.f | self.f, self.f) + assert_array_equal(self.t | self.f, self.t) + assert_array_equal(self.f | self.t, self.t) + np.logical_or(self.t, self.t, out=self.o) + assert_array_equal(self.o, self.t) + assert_array_equal(self.t & self.t, self.t) + assert_array_equal(self.f & self.f, self.f) + assert_array_equal(self.t & self.f, self.f) + assert_array_equal(self.f & self.t, self.f) + np.logical_and(self.t, self.t, out=self.o) + assert_array_equal(self.o, self.t) + assert_array_equal(self.t ^ self.t, self.f) + assert_array_equal(self.f ^ self.f, self.f) + assert_array_equal(self.t ^ self.f, self.t) + assert_array_equal(self.f ^ self.t, self.t) + np.logical_xor(self.t, self.t, out=self.o) + assert_array_equal(self.o, self.f) + + assert_array_equal(self.nm & self.t, self.nm) + assert_array_equal(self.im & self.f, False) + assert_array_equal(self.nm & True, self.nm) + assert_array_equal(self.im & False, self.f) + assert_array_equal(self.nm | self.t, self.t) + assert_array_equal(self.im | self.f, self.im) + assert_array_equal(self.nm | True, self.t) + assert_array_equal(self.im | False, self.im) + assert_array_equal(self.nm ^ self.t, self.im) + assert_array_equal(self.im ^ self.f, self.im) + assert_array_equal(self.nm ^ True, self.im) + assert_array_equal(self.im ^ False, self.im) + + +class TestBoolCmp: + def setup_method(self): + self.f = np.ones(256, dtype=np.float32) + self.ef = np.ones(self.f.size, dtype=bool) + self.d = np.ones(128, dtype=np.float64) + self.ed = np.ones(self.d.size, dtype=bool) + # generate values for all permutation of 256bit simd vectors + s = 0 + for i in range(32): + self.f[s:s + 8] = [i & 2**x for x in range(8)] + self.ef[s:s + 8] = [(i & 2**x) != 0 for x in range(8)] + s += 8 + s = 0 + for i in range(16): + self.d[s:s + 4] = [i & 2**x for x in range(4)] + self.ed[s:s + 4] = [(i & 2**x) != 0 for x in range(4)] + s += 4 + + self.nf = self.f.copy() + self.nd = self.d.copy() + self.nf[self.ef] = np.nan + self.nd[self.ed] = np.nan + + self.inff = self.f.copy() + self.infd = self.d.copy() + self.inff[::3][self.ef[::3]] = np.inf + self.infd[::3][self.ed[::3]] = np.inf + self.inff[1::3][self.ef[1::3]] = -np.inf + self.infd[1::3][self.ed[1::3]] = -np.inf + self.inff[2::3][self.ef[2::3]] = np.nan + self.infd[2::3][self.ed[2::3]] = np.nan + self.efnonan = self.ef.copy() + self.efnonan[2::3] = False + self.ednonan = self.ed.copy() + self.ednonan[2::3] = False + + self.signf = self.f.copy() + self.signd = self.d.copy() + self.signf[self.ef] *= -1. + self.signd[self.ed] *= -1. + self.signf[1::6][self.ef[1::6]] = -np.inf + self.signd[1::6][self.ed[1::6]] = -np.inf + # On RISC-V, many operations that produce NaNs, such as converting + # a -NaN from f64 to f32, return a canonical NaN. The canonical + # NaNs are always positive. See section 11.3 NaN Generation and + # Propagation of the RISC-V Unprivileged ISA for more details. + # We disable the float32 sign test on riscv64 for -np.nan as the sign + # of the NaN will be lost when it's converted to a float32. + if platform.machine() != 'riscv64': + self.signf[3::6][self.ef[3::6]] = -np.nan + self.signd[3::6][self.ed[3::6]] = -np.nan + self.signf[4::6][self.ef[4::6]] = -0. + self.signd[4::6][self.ed[4::6]] = -0. + + def test_float(self): + # offset for alignment test + for i in range(4): + assert_array_equal(self.f[i:] > 0, self.ef[i:]) + assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:]) + assert_array_equal(self.f[i:] == 0, ~self.ef[i:]) + assert_array_equal(-self.f[i:] < 0, self.ef[i:]) + assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:]) + r = self.f[i:] != 0 + assert_array_equal(r, self.ef[i:]) + r2 = self.f[i:] != np.zeros_like(self.f[i:]) + r3 = 0 != self.f[i:] + assert_array_equal(r, r2) + assert_array_equal(r, r3) + # check bool == 0x1 + assert_array_equal(r.view(np.int8), r.astype(np.int8)) + assert_array_equal(r2.view(np.int8), r2.astype(np.int8)) + assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) + + # isnan on amd64 takes the same code path + assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:]) + assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:]) + assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:]) + assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:]) + assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:]) + + def test_double(self): + # offset for alignment test + for i in range(2): + assert_array_equal(self.d[i:] > 0, self.ed[i:]) + assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:]) + assert_array_equal(self.d[i:] == 0, ~self.ed[i:]) + assert_array_equal(-self.d[i:] < 0, self.ed[i:]) + assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:]) + r = self.d[i:] != 0 + assert_array_equal(r, self.ed[i:]) + r2 = self.d[i:] != np.zeros_like(self.d[i:]) + r3 = 0 != self.d[i:] + assert_array_equal(r, r2) + assert_array_equal(r, r3) + # check bool == 0x1 + assert_array_equal(r.view(np.int8), r.astype(np.int8)) + assert_array_equal(r2.view(np.int8), r2.astype(np.int8)) + assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) + + # isnan on amd64 takes the same code path + assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:]) + assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:]) + assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:]) + assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:]) + assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:]) + + +class TestSeterr: + def test_default(self): + err = np.geterr() + assert_equal(err, + {'divide': 'warn', + 'invalid': 'warn', + 'over': 'warn', + 'under': 'ignore'} + ) + + def test_set(self): + with np.errstate(): + err = np.seterr() + old = np.seterr(divide='print') + assert_(err == old) + new = np.seterr() + assert_(new['divide'] == 'print') + np.seterr(over='raise') + assert_(np.geterr()['over'] == 'raise') + assert_(new['divide'] == 'print') + np.seterr(**old) + assert_(np.geterr() == old) + + @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") + @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.") + def test_divide_err(self): + with np.errstate(divide='raise'): + with assert_raises(FloatingPointError): + np.array([1.]) / np.array([0.]) + + np.seterr(divide='ignore') + np.array([1.]) / np.array([0.]) + + +class TestFloatExceptions: + def assert_raises_fpe(self, fpeerr, flop, x, y): + ftype = type(x) + try: + flop(x, y) + assert_(False, + f"Type {ftype} did not raise fpe error '{fpeerr}'.") + except FloatingPointError as exc: + assert_(str(exc).find(fpeerr) >= 0, + f"Type {ftype} raised wrong fpe error '{exc}'.") + + def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2): + # Check that fpe exception is raised. + # + # Given a floating operation `flop` and two scalar values, check that + # the operation raises the floating point exception specified by + # `fpeerr`. Tests all variants with 0-d array scalars as well. + + self.assert_raises_fpe(fpeerr, flop, sc1, sc2) + self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2) + self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()]) + self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()]) + + # Test for all real and complex float types + @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") + @pytest.mark.parametrize("typecode", np.typecodes["AllFloat"]) + def test_floating_exceptions(self, typecode): + if 'bsd' in sys.platform and typecode in 'gG': + pytest.skip(reason="Fallback impl for (c)longdouble may not raise " + "FPE errors as expected on BSD OSes, " + "see gh-24876, gh-23379") + + # Test basic arithmetic function errors + with np.errstate(all='raise'): + ftype = obj2sctype(typecode) + if np.dtype(ftype).kind == 'f': + # Get some extreme values for the type + fi = np.finfo(ftype) + ft_tiny = fi._machar.tiny + ft_max = fi.max + ft_eps = fi.eps + underflow = 'underflow' + divbyzero = 'divide by zero' + else: + # 'c', complex, corresponding real dtype + rtype = type(ftype(0).real) + fi = np.finfo(rtype) + ft_tiny = ftype(fi._machar.tiny) + ft_max = ftype(fi.max) + ft_eps = ftype(fi.eps) + # The complex types raise different exceptions + underflow = '' + divbyzero = '' + overflow = 'overflow' + invalid = 'invalid' + + # The value of tiny for double double is NaN, so we need to + # pass the assert + if not np.isnan(ft_tiny): + self.assert_raises_fpe(underflow, + lambda a, b: a / b, ft_tiny, ft_max) + self.assert_raises_fpe(underflow, + lambda a, b: a * b, ft_tiny, ft_tiny) + self.assert_raises_fpe(overflow, + lambda a, b: a * b, ft_max, ftype(2)) + self.assert_raises_fpe(overflow, + lambda a, b: a / b, ft_max, ftype(0.5)) + self.assert_raises_fpe(overflow, + lambda a, b: a + b, ft_max, ft_max * ft_eps) + self.assert_raises_fpe(overflow, + lambda a, b: a - b, -ft_max, ft_max * ft_eps) + self.assert_raises_fpe(overflow, + np.power, ftype(2), ftype(2**fi.nexp)) + self.assert_raises_fpe(divbyzero, + lambda a, b: a / b, ftype(1), ftype(0)) + self.assert_raises_fpe( + invalid, lambda a, b: a / b, ftype(np.inf), ftype(np.inf) + ) + self.assert_raises_fpe(invalid, + lambda a, b: a / b, ftype(0), ftype(0)) + self.assert_raises_fpe( + invalid, lambda a, b: a - b, ftype(np.inf), ftype(np.inf) + ) + self.assert_raises_fpe( + invalid, lambda a, b: a + b, ftype(np.inf), ftype(-np.inf) + ) + self.assert_raises_fpe(invalid, + lambda a, b: a * b, ftype(0), ftype(np.inf)) + + @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support") + def test_warnings(self): + # test warning code path + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + with np.errstate(all="warn"): + np.divide(1, 0.) + assert_equal(len(w), 1) + assert_("divide by zero" in str(w[0].message)) + np.array(1e300) * np.array(1e300) + assert_equal(len(w), 2) + assert_("overflow" in str(w[-1].message)) + np.array(np.inf) - np.array(np.inf) + assert_equal(len(w), 3) + assert_("invalid value" in str(w[-1].message)) + np.array(1e-300) * np.array(1e-300) + assert_equal(len(w), 4) + assert_("underflow" in str(w[-1].message)) + + +class TestTypes: + def check_promotion_cases(self, promote_func): + # tests that the scalars get coerced correctly. + b = np.bool(0) + i8, i16, i32, i64 = np.int8(0), np.int16(0), np.int32(0), np.int64(0) + u8, u16, u32, u64 = np.uint8(0), np.uint16(0), np.uint32(0), np.uint64(0) + f32, f64, fld = np.float32(0), np.float64(0), np.longdouble(0) + c64, c128, cld = np.complex64(0), np.complex128(0), np.clongdouble(0) + + # coercion within the same kind + assert_equal(promote_func(i8, i16), np.dtype(np.int16)) + assert_equal(promote_func(i32, i8), np.dtype(np.int32)) + assert_equal(promote_func(i16, i64), np.dtype(np.int64)) + assert_equal(promote_func(u8, u32), np.dtype(np.uint32)) + assert_equal(promote_func(f32, f64), np.dtype(np.float64)) + assert_equal(promote_func(fld, f32), np.dtype(np.longdouble)) + assert_equal(promote_func(f64, fld), np.dtype(np.longdouble)) + assert_equal(promote_func(c128, c64), np.dtype(np.complex128)) + assert_equal(promote_func(cld, c128), np.dtype(np.clongdouble)) + assert_equal(promote_func(c64, fld), np.dtype(np.clongdouble)) + + # coercion between kinds + assert_equal(promote_func(b, i32), np.dtype(np.int32)) + assert_equal(promote_func(b, u8), np.dtype(np.uint8)) + assert_equal(promote_func(i8, u8), np.dtype(np.int16)) + assert_equal(promote_func(u8, i32), np.dtype(np.int32)) + assert_equal(promote_func(i64, u32), np.dtype(np.int64)) + assert_equal(promote_func(u64, i32), np.dtype(np.float64)) + assert_equal(promote_func(i32, f32), np.dtype(np.float64)) + assert_equal(promote_func(i64, f32), np.dtype(np.float64)) + assert_equal(promote_func(f32, i16), np.dtype(np.float32)) + assert_equal(promote_func(f32, u32), np.dtype(np.float64)) + assert_equal(promote_func(f32, c64), np.dtype(np.complex64)) + assert_equal(promote_func(c128, f32), np.dtype(np.complex128)) + assert_equal(promote_func(cld, f64), np.dtype(np.clongdouble)) + + # coercion between scalars and 1-D arrays + assert_equal(promote_func(np.array([b]), i8), np.dtype(np.int8)) + assert_equal(promote_func(np.array([b]), u8), np.dtype(np.uint8)) + assert_equal(promote_func(np.array([b]), i32), np.dtype(np.int32)) + assert_equal(promote_func(np.array([b]), u32), np.dtype(np.uint32)) + assert_equal(promote_func(np.array([i8]), i64), np.dtype(np.int64)) + # unsigned and signed unfortunately tend to promote to float64: + assert_equal(promote_func(u64, np.array([i32])), np.dtype(np.float64)) + assert_equal(promote_func(i64, np.array([u32])), np.dtype(np.int64)) + assert_equal(promote_func(np.array([u16]), i32), np.dtype(np.int32)) + assert_equal(promote_func(np.int32(-1), np.array([u64])), + np.dtype(np.float64)) + assert_equal(promote_func(f64, np.array([f32])), np.dtype(np.float64)) + assert_equal(promote_func(fld, np.array([f32])), + np.dtype(np.longdouble)) + assert_equal(promote_func(np.array([f64]), fld), + np.dtype(np.longdouble)) + assert_equal(promote_func(fld, np.array([c64])), + np.dtype(np.clongdouble)) + assert_equal(promote_func(c64, np.array([f64])), + np.dtype(np.complex128)) + assert_equal(promote_func(np.complex64(3j), np.array([f64])), + np.dtype(np.complex128)) + assert_equal(promote_func(np.array([f32]), c128), + np.dtype(np.complex128)) + + # coercion between scalars and 1-D arrays, where + # the scalar has greater kind than the array + assert_equal(promote_func(np.array([b]), f64), np.dtype(np.float64)) + assert_equal(promote_func(np.array([b]), i64), np.dtype(np.int64)) + assert_equal(promote_func(np.array([b]), u64), np.dtype(np.uint64)) + assert_equal(promote_func(np.array([i8]), f64), np.dtype(np.float64)) + assert_equal(promote_func(np.array([u16]), f64), np.dtype(np.float64)) + + def test_coercion(self): + def res_type(a, b): + return np.add(a, b).dtype + + self.check_promotion_cases(res_type) + + # Use-case: float/complex scalar * bool/int8 array + # shouldn't narrow the float/complex type + for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]: + b = 1.234 * a + assert_equal(b.dtype, np.dtype('f8'), f"array type {a.dtype}") + b = np.longdouble(1.234) * a + assert_equal(b.dtype, np.dtype(np.longdouble), + f"array type {a.dtype}") + b = np.float64(1.234) * a + assert_equal(b.dtype, np.dtype('f8'), f"array type {a.dtype}") + b = np.float32(1.234) * a + assert_equal(b.dtype, np.dtype('f4'), f"array type {a.dtype}") + b = np.float16(1.234) * a + assert_equal(b.dtype, np.dtype('f2'), f"array type {a.dtype}") + + b = 1.234j * a + assert_equal(b.dtype, np.dtype('c16'), f"array type {a.dtype}") + b = np.clongdouble(1.234j) * a + assert_equal(b.dtype, np.dtype(np.clongdouble), + f"array type {a.dtype}") + b = np.complex128(1.234j) * a + assert_equal(b.dtype, np.dtype('c16'), f"array type {a.dtype}") + b = np.complex64(1.234j) * a + assert_equal(b.dtype, np.dtype('c8'), f"array type {a.dtype}") + + # The following use-case is problematic, and to resolve its + # tricky side-effects requires more changes. + # + # Use-case: (1-t)*a, where 't' is a boolean array and 'a' is + # a float32, shouldn't promote to float64 + # + # a = np.array([1.0, 1.5], dtype=np.float32) + # t = np.array([True, False]) + # b = t*a + # assert_equal(b, [1.0, 0.0]) + # assert_equal(b.dtype, np.dtype('f4')) + # b = (1-t)*a + # assert_equal(b, [0.0, 1.5]) + # assert_equal(b.dtype, np.dtype('f4')) + # + # Probably ~t (bitwise negation) is more proper to use here, + # but this is arguably less intuitive to understand at a glance, and + # would fail if 't' is actually an integer array instead of boolean: + # + # b = (~t)*a + # assert_equal(b, [0.0, 1.5]) + # assert_equal(b.dtype, np.dtype('f4')) + + def test_result_type(self): + self.check_promotion_cases(np.result_type) + assert_(np.result_type(None) == np.dtype(None)) + + def test_promote_types_endian(self): + # promote_types should always return native-endian types + assert_equal(np.promote_types('i8', '>i8'), np.dtype('i8')) + + assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21')) + assert_equal(np.promote_types('U16', '>i8'), np.dtype('U21')) + assert_equal(np.promote_types('S5', '>U8'), np.dtype('U8')) + assert_equal(np.promote_types('U8', '>S5'), np.dtype('U8')) + assert_equal(np.promote_types('U8', '>U5'), np.dtype('U8')) + + assert_equal(np.promote_types('M8', '>M8'), np.dtype('M8')) + assert_equal(np.promote_types('m8', '>m8'), np.dtype('m8')) + + def test_can_cast_and_promote_usertypes(self): + # The rational type defines safe casting for signed integers, + # boolean. Rational itself *does* cast safely to double. + # (rational does not actually cast to all signed integers, e.g. + # int64 can be both long and longlong and it registers only the first) + valid_types = ["int8", "int16", "int32", "int64", "bool"] + invalid_types = "BHILQP" + "FDG" + "mM" + "f" + "V" + + rational_dt = np.dtype(rational) + for numpy_dtype in valid_types: + numpy_dtype = np.dtype(numpy_dtype) + assert np.can_cast(numpy_dtype, rational_dt) + assert np.promote_types(numpy_dtype, rational_dt) is rational_dt + + for numpy_dtype in invalid_types: + numpy_dtype = np.dtype(numpy_dtype) + assert not np.can_cast(numpy_dtype, rational_dt) + with pytest.raises(TypeError): + np.promote_types(numpy_dtype, rational_dt) + + double_dt = np.dtype("double") + assert np.can_cast(rational_dt, double_dt) + assert np.promote_types(double_dt, rational_dt) is double_dt + + @pytest.mark.parametrize("swap", ["", "swap"]) + @pytest.mark.parametrize("string_dtype", ["U", "S"]) + def test_promote_types_strings(self, swap, string_dtype): + if swap == "swap": + promote_types = lambda a, b: np.promote_types(b, a) + else: + promote_types = np.promote_types + + S = string_dtype + + # Promote numeric with unsized string: + assert_equal(promote_types('bool', S), np.dtype(S + '5')) + assert_equal(promote_types('b', S), np.dtype(S + '4')) + assert_equal(promote_types('u1', S), np.dtype(S + '3')) + assert_equal(promote_types('u2', S), np.dtype(S + '5')) + assert_equal(promote_types('u4', S), np.dtype(S + '10')) + assert_equal(promote_types('u8', S), np.dtype(S + '20')) + assert_equal(promote_types('i1', S), np.dtype(S + '4')) + assert_equal(promote_types('i2', S), np.dtype(S + '6')) + assert_equal(promote_types('i4', S), np.dtype(S + '11')) + assert_equal(promote_types('i8', S), np.dtype(S + '21')) + # Promote numeric with sized string: + assert_equal(promote_types('bool', S + '1'), np.dtype(S + '5')) + assert_equal(promote_types('bool', S + '30'), np.dtype(S + '30')) + assert_equal(promote_types('b', S + '1'), np.dtype(S + '4')) + assert_equal(promote_types('b', S + '30'), np.dtype(S + '30')) + assert_equal(promote_types('u1', S + '1'), np.dtype(S + '3')) + assert_equal(promote_types('u1', S + '30'), np.dtype(S + '30')) + assert_equal(promote_types('u2', S + '1'), np.dtype(S + '5')) + assert_equal(promote_types('u2', S + '30'), np.dtype(S + '30')) + assert_equal(promote_types('u4', S + '1'), np.dtype(S + '10')) + assert_equal(promote_types('u4', S + '30'), np.dtype(S + '30')) + assert_equal(promote_types('u8', S + '1'), np.dtype(S + '20')) + assert_equal(promote_types('u8', S + '30'), np.dtype(S + '30')) + # Promote with object: + assert_equal(promote_types('O', S + '30'), np.dtype('O')) + + @pytest.mark.parametrize(["dtype1", "dtype2"], + [[np.dtype("V6"), np.dtype("V10")], # mismatch shape + # Mismatching names: + [np.dtype([("name1", "i8")]), np.dtype([("name2", "i8")])], + ]) + def test_invalid_void_promotion(self, dtype1, dtype2): + with pytest.raises(TypeError): + np.promote_types(dtype1, dtype2) + + @pytest.mark.parametrize(["dtype1", "dtype2"], + [[np.dtype("V10"), np.dtype("V10")], + [np.dtype([("name1", "i8")]), + np.dtype([("name1", np.dtype("i8").newbyteorder())])], + [np.dtype("i8,i8"), np.dtype("i8,>i8")], + [np.dtype("i8,i8"), np.dtype("i4,i4")], + ]) + def test_valid_void_promotion(self, dtype1, dtype2): + assert np.promote_types(dtype1, dtype2) == dtype1 + + @pytest.mark.parametrize("dtype", + list(np.typecodes["All"]) + + ["i,i", "10i", "S3", "S100", "U3", "U100", rational]) + def test_promote_identical_types_metadata(self, dtype): + # The same type passed in twice to promote types always + # preserves metadata + metadata = {1: 1} + dtype = np.dtype(dtype, metadata=metadata) + + res = np.promote_types(dtype, dtype) + assert res.metadata == dtype.metadata + + # byte-swapping preserves and makes the dtype native: + dtype = dtype.newbyteorder() + if dtype.isnative: + # The type does not have byte swapping + return + + res = np.promote_types(dtype, dtype) + + # Metadata is (currently) generally lost on byte-swapping (except for + # unicode. + if dtype.char != "U": + assert res.metadata is None + else: + assert res.metadata == metadata + assert res.isnative + + @pytest.mark.slow + @pytest.mark.filterwarnings('ignore:Promotion of numbers:FutureWarning') + @pytest.mark.parametrize(["dtype1", "dtype2"], + itertools.product( + list(np.typecodes["All"]) + + ["i,i", "S3", "S100", "U3", "U100", rational], + repeat=2)) + def test_promote_types_metadata(self, dtype1, dtype2): + """Metadata handling in promotion does not appear formalized + right now in NumPy. This test should thus be considered to + document behaviour, rather than test the correct definition of it. + + This test is very ugly, it was useful for rewriting part of the + promotion, but probably should eventually be replaced/deleted + (i.e. when metadata handling in promotion is better defined). + """ + metadata1 = {1: 1} + metadata2 = {2: 2} + dtype1 = np.dtype(dtype1, metadata=metadata1) + dtype2 = np.dtype(dtype2, metadata=metadata2) + + try: + res = np.promote_types(dtype1, dtype2) + except TypeError: + # Promotion failed, this test only checks metadata + return + + if res.char not in "USV" or res.names is not None or res.shape != (): + # All except string dtypes (and unstructured void) lose metadata + # on promotion (unless both dtypes are identical). + # At some point structured ones did not, but were restrictive. + assert res.metadata is None + elif res == dtype1: + # If one result is the result, it is usually returned unchanged: + assert res is dtype1 + elif res == dtype2: + # dtype1 may have been cast to the same type/kind as dtype2. + # If the resulting dtype is identical we currently pick the cast + # version of dtype1, which lost the metadata: + if np.promote_types(dtype1, dtype2.kind) == dtype2: + res.metadata is None + else: + res.metadata == metadata2 + else: + assert res.metadata is None + + # Try again for byteswapped version + dtype1 = dtype1.newbyteorder() + assert dtype1.metadata == metadata1 + res_bs = np.promote_types(dtype1, dtype2) + assert res_bs == res + assert res_bs.metadata == res.metadata + + def test_can_cast(self): + assert_(np.can_cast(np.int32, np.int64)) + assert_(np.can_cast(np.float64, complex)) + assert_(not np.can_cast(complex, float)) + + assert_(np.can_cast('i8', 'f8')) + assert_(not np.can_cast('i8', 'f4')) + assert_(np.can_cast('i4', 'S11')) + + assert_(np.can_cast('i8', 'i8', 'no')) + assert_(not np.can_cast('i8', 'no')) + + assert_(np.can_cast('i8', 'equiv')) + assert_(not np.can_cast('i8', 'equiv')) + + assert_(np.can_cast('i8', 'safe')) + assert_(not np.can_cast('i4', 'safe')) + + assert_(np.can_cast('i4', 'same_kind')) + assert_(not np.can_cast('u4', 'same_kind')) + + assert_(np.can_cast('u4', 'unsafe')) + + assert_(np.can_cast('bool', 'S5')) + assert_(not np.can_cast('bool', 'S4')) + + assert_(np.can_cast('b', 'S4')) + assert_(not np.can_cast('b', 'S3')) + + assert_(np.can_cast('u1', 'S3')) + assert_(not np.can_cast('u1', 'S2')) + assert_(np.can_cast('u2', 'S5')) + assert_(not np.can_cast('u2', 'S4')) + assert_(np.can_cast('u4', 'S10')) + assert_(not np.can_cast('u4', 'S9')) + assert_(np.can_cast('u8', 'S20')) + assert_(not np.can_cast('u8', 'S19')) + + assert_(np.can_cast('i1', 'S4')) + assert_(not np.can_cast('i1', 'S3')) + assert_(np.can_cast('i2', 'S6')) + assert_(not np.can_cast('i2', 'S5')) + assert_(np.can_cast('i4', 'S11')) + assert_(not np.can_cast('i4', 'S10')) + assert_(np.can_cast('i8', 'S21')) + assert_(not np.can_cast('i8', 'S20')) + + assert_(np.can_cast('bool', 'S5')) + assert_(not np.can_cast('bool', 'S4')) + + assert_(np.can_cast('b', 'U4')) + assert_(not np.can_cast('b', 'U3')) + + assert_(np.can_cast('u1', 'U3')) + assert_(not np.can_cast('u1', 'U2')) + assert_(np.can_cast('u2', 'U5')) + assert_(not np.can_cast('u2', 'U4')) + assert_(np.can_cast('u4', 'U10')) + assert_(not np.can_cast('u4', 'U9')) + assert_(np.can_cast('u8', 'U20')) + assert_(not np.can_cast('u8', 'U19')) + + assert_(np.can_cast('i1', 'U4')) + assert_(not np.can_cast('i1', 'U3')) + assert_(np.can_cast('i2', 'U6')) + assert_(not np.can_cast('i2', 'U5')) + assert_(np.can_cast('i4', 'U11')) + assert_(not np.can_cast('i4', 'U10')) + assert_(np.can_cast('i8', 'U21')) + assert_(not np.can_cast('i8', 'U20')) + + assert_raises(TypeError, np.can_cast, 'i4', None) + assert_raises(TypeError, np.can_cast, None, 'i4') + + # Also test keyword arguments + assert_(np.can_cast(from_=np.int32, to=np.int64)) + + def test_can_cast_simple_to_structured(self): + # Non-structured can only be cast to structured in 'unsafe' mode. + assert_(not np.can_cast('i4', 'i4,i4')) + assert_(not np.can_cast('i4', 'i4,i2')) + assert_(np.can_cast('i4', 'i4,i4', casting='unsafe')) + assert_(np.can_cast('i4', 'i4,i2', casting='unsafe')) + # Even if there is just a single field which is OK. + assert_(not np.can_cast('i2', [('f1', 'i4')])) + assert_(not np.can_cast('i2', [('f1', 'i4')], casting='same_kind')) + assert_(np.can_cast('i2', [('f1', 'i4')], casting='unsafe')) + # It should be the same for recursive structured or subarrays. + assert_(not np.can_cast('i2', [('f1', 'i4,i4')])) + assert_(np.can_cast('i2', [('f1', 'i4,i4')], casting='unsafe')) + assert_(not np.can_cast('i2', [('f1', '(2,3)i4')])) + assert_(np.can_cast('i2', [('f1', '(2,3)i4')], casting='unsafe')) + + def test_can_cast_structured_to_simple(self): + # Need unsafe casting for structured to simple. + assert_(not np.can_cast([('f1', 'i4')], 'i4')) + assert_(np.can_cast([('f1', 'i4')], 'i4', casting='unsafe')) + assert_(np.can_cast([('f1', 'i4')], 'i2', casting='unsafe')) + # Since it is unclear what is being cast, multiple fields to + # single should not work even for unsafe casting. + assert_(not np.can_cast('i4,i4', 'i4', casting='unsafe')) + # But a single field inside a single field is OK. + assert_(not np.can_cast([('f1', [('x', 'i4')])], 'i4')) + assert_(np.can_cast([('f1', [('x', 'i4')])], 'i4', casting='unsafe')) + # And a subarray is fine too - it will just take the first element + # (arguably not very consistently; might also take the first field). + assert_(not np.can_cast([('f0', '(3,)i4')], 'i4')) + assert_(np.can_cast([('f0', '(3,)i4')], 'i4', casting='unsafe')) + # But a structured subarray with multiple fields should fail. + assert_(not np.can_cast([('f0', ('i4,i4'), (2,))], 'i4', + casting='unsafe')) + + def test_can_cast_values(self): + # With NumPy 2 and NEP 50, can_cast errors on Python scalars. We could + # define this as (usually safe) at some point, and already do so + # in `copyto` and ufuncs (but there an error is raised if the integer + # is out of bounds and a warning for out-of-bound floats). + # Raises even for unsafe, previously checked within range (for floats + # that was approximately whether it would overflow to inf). + with pytest.raises(TypeError): + np.can_cast(4, "int8", casting="unsafe") + + with pytest.raises(TypeError): + np.can_cast(4.0, "float64", casting="unsafe") + + with pytest.raises(TypeError): + np.can_cast(4j, "complex128", casting="unsafe") + + @pytest.mark.parametrize("dtype", + list("?bhilqBHILQefdgFDG") + [rational]) + def test_can_cast_scalars(self, dtype): + # Basic test to ensure that scalars are supported in can-cast + # (does not check behavior exhaustively). + dtype = np.dtype(dtype) + scalar = dtype.type(0) + + assert np.can_cast(scalar, "int64") == np.can_cast(dtype, "int64") + assert np.can_cast(scalar, "float32", casting="unsafe") + + +# Custom exception class to test exception propagation in fromiter +class NIterError(Exception): + pass + + +class TestFromiter: + def makegen(self): + return (x**2 for x in range(24)) + + def test_types(self): + ai32 = np.fromiter(self.makegen(), np.int32) + ai64 = np.fromiter(self.makegen(), np.int64) + af = np.fromiter(self.makegen(), float) + assert_(ai32.dtype == np.dtype(np.int32)) + assert_(ai64.dtype == np.dtype(np.int64)) + assert_(af.dtype == np.dtype(float)) + + def test_lengths(self): + expected = np.array(list(self.makegen())) + a = np.fromiter(self.makegen(), int) + a20 = np.fromiter(self.makegen(), int, 20) + assert_(len(a) == len(expected)) + assert_(len(a20) == 20) + assert_raises(ValueError, np.fromiter, + self.makegen(), int, len(expected) + 10) + + def test_values(self): + expected = np.array(list(self.makegen())) + a = np.fromiter(self.makegen(), int) + a20 = np.fromiter(self.makegen(), int, 20) + assert_(np.all(a == expected, axis=0)) + assert_(np.all(a20 == expected[:20], axis=0)) + + def load_data(self, n, eindex): + # Utility method for the issue 2592 tests. + # Raise an exception at the desired index in the iterator. + for e in range(n): + if e == eindex: + raise NIterError(f'error at index {eindex}') + yield e + + @pytest.mark.parametrize("dtype", [int, object]) + @pytest.mark.parametrize(["count", "error_index"], [(10, 5), (10, 9)]) + def test_2592(self, count, error_index, dtype): + # Test iteration exceptions are correctly raised. The data/generator + # has `count` elements but errors at `error_index` + iterable = self.load_data(count, error_index) + with pytest.raises(NIterError): + np.fromiter(iterable, dtype=dtype, count=count) + + @pytest.mark.parametrize("dtype", ["S", "S0", "V0", "U0"]) + def test_empty_not_structured(self, dtype): + # Note, "S0" could be allowed at some point, so long "S" (without + # any length) is rejected. + with pytest.raises(ValueError, match="Must specify length"): + np.fromiter([], dtype=dtype) + + @pytest.mark.parametrize(["dtype", "data"], + [("d", [1, 2, 3, 4, 5, 6, 7, 8, 9]), + ("O", [1, 2, 3, 4, 5, 6, 7, 8, 9]), + ("i,O", [(1, 2), (5, 4), (2, 3), (9, 8), (6, 7)]), + # subarray dtypes (important because their dimensions end up + # in the result arrays dimension: + ("2i", [(1, 2), (5, 4), (2, 3), (9, 8), (6, 7)]), + (np.dtype(("O", (2, 3))), + [((1, 2, 3), (3, 4, 5)), ((3, 2, 1), (5, 4, 3))])]) + @pytest.mark.parametrize("length_hint", [0, 1]) + def test_growth_and_complicated_dtypes(self, dtype, data, length_hint): + dtype = np.dtype(dtype) + + data = data * 100 # make sure we realloc a bit + + class MyIter: + # Class/example from gh-15789 + def __length_hint__(self): + # only required to be an estimate, this is legal + return length_hint # 0 or 1 + + def __iter__(self): + return iter(data) + + res = np.fromiter(MyIter(), dtype=dtype) + expected = np.array(data, dtype=dtype) + + assert_array_equal(res, expected) + + def test_empty_result(self): + class MyIter: + def __length_hint__(self): + return 10 + + def __iter__(self): + return iter([]) # actual iterator is empty. + + res = np.fromiter(MyIter(), dtype="d") + assert res.shape == (0,) + assert res.dtype == "d" + + def test_too_few_items(self): + msg = "iterator too short: Expected 10 but iterator had only 3 items." + with pytest.raises(ValueError, match=msg): + np.fromiter([1, 2, 3], count=10, dtype=int) + + def test_failed_itemsetting(self): + with pytest.raises(TypeError): + np.fromiter([1, None, 3], dtype=int) + + # The following manages to hit somewhat trickier code paths: + iterable = ((2, 3, 4) for i in range(5)) + with pytest.raises(ValueError): + np.fromiter(iterable, dtype=np.dtype((int, 2))) + +class TestNonzero: + def test_nonzero_trivial(self): + assert_equal(np.count_nonzero(np.array([])), 0) + assert_equal(np.count_nonzero(np.array([], dtype='?')), 0) + assert_equal(np.nonzero(np.array([])), ([],)) + + assert_equal(np.count_nonzero(np.array([0])), 0) + assert_equal(np.count_nonzero(np.array([0], dtype='?')), 0) + assert_equal(np.nonzero(np.array([0])), ([],)) + + assert_equal(np.count_nonzero(np.array([1])), 1) + assert_equal(np.count_nonzero(np.array([1], dtype='?')), 1) + assert_equal(np.nonzero(np.array([1])), ([0],)) + + def test_nonzero_zerodim(self): + err_msg = "Calling nonzero on 0d arrays is not allowed" + with assert_raises_regex(ValueError, err_msg): + np.nonzero(np.array(0)) + with assert_raises_regex(ValueError, err_msg): + np.array(1).nonzero() + + def test_nonzero_onedim(self): + x = np.array([1, 0, 2, -1, 0, 0, 8]) + assert_equal(np.count_nonzero(x), 4) + assert_equal(np.count_nonzero(x), 4) + assert_equal(np.nonzero(x), ([0, 2, 3, 6],)) + + # x = np.array([(1, 2), (0, 0), (1, 1), (-1, 3), (0, 7)], + # dtype=[('a', 'i4'), ('b', 'i2')]) + x = np.array([(1, 2, -5, -3), (0, 0, 2, 7), (1, 1, 0, 1), (-1, 3, 1, 0), (0, 7, 0, 4)], + dtype=[('a', 'i4'), ('b', 'i2'), ('c', 'i1'), ('d', 'i8')]) + assert_equal(np.count_nonzero(x['a']), 3) + assert_equal(np.count_nonzero(x['b']), 4) + assert_equal(np.count_nonzero(x['c']), 3) + assert_equal(np.count_nonzero(x['d']), 4) + assert_equal(np.nonzero(x['a']), ([0, 2, 3],)) + assert_equal(np.nonzero(x['b']), ([0, 2, 3, 4],)) + + def test_nonzero_twodim(self): + x = np.array([[0, 1, 0], [2, 0, 3]]) + assert_equal(np.count_nonzero(x.astype('i1')), 3) + assert_equal(np.count_nonzero(x.astype('i2')), 3) + assert_equal(np.count_nonzero(x.astype('i4')), 3) + assert_equal(np.count_nonzero(x.astype('i8')), 3) + assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2])) + + x = np.eye(3) + assert_equal(np.count_nonzero(x.astype('i1')), 3) + assert_equal(np.count_nonzero(x.astype('i2')), 3) + assert_equal(np.count_nonzero(x.astype('i4')), 3) + assert_equal(np.count_nonzero(x.astype('i8')), 3) + assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2])) + + x = np.array([[(0, 1), (0, 0), (1, 11)], + [(1, 1), (1, 0), (0, 0)], + [(0, 0), (1, 5), (0, 1)]], dtype=[('a', 'f4'), ('b', 'u1')]) + assert_equal(np.count_nonzero(x['a']), 4) + assert_equal(np.count_nonzero(x['b']), 5) + assert_equal(np.nonzero(x['a']), ([0, 1, 1, 2], [2, 0, 1, 1])) + assert_equal(np.nonzero(x['b']), ([0, 0, 1, 2, 2], [0, 2, 0, 1, 2])) + + assert_(not x['a'].T.flags.aligned) + assert_equal(np.count_nonzero(x['a'].T), 4) + assert_equal(np.count_nonzero(x['b'].T), 5) + assert_equal(np.nonzero(x['a'].T), ([0, 1, 1, 2], [1, 1, 2, 0])) + assert_equal(np.nonzero(x['b'].T), ([0, 0, 1, 2, 2], [0, 1, 2, 0, 2])) + + def test_sparse(self): + # test special sparse condition boolean code path + for i in range(20): + c = np.zeros(200, dtype=bool) + c[i::20] = True + assert_equal(np.nonzero(c)[0], np.arange(i, 200 + i, 20)) + + c = np.zeros(400, dtype=bool) + c[10 + i:20 + i] = True + c[20 + i * 2] = True + assert_equal(np.nonzero(c)[0], + np.concatenate((np.arange(10 + i, 20 + i), [20 + i * 2]))) + + @pytest.mark.parametrize('dtype', [np.float32, np.float64]) + def test_nonzero_float_dtypes(self, dtype): + rng = np.random.default_rng(seed=10) + x = ((2**33) * rng.normal(size=100)).astype(dtype) + x[rng.choice(50, size=100)] = 0 + idxs = np.nonzero(x)[0] + assert_equal(np.array_equal(np.where(x != 0)[0], idxs), True) + + @pytest.mark.parametrize('dtype', [bool, np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64]) + def test_nonzero_integer_dtypes(self, dtype): + rng = np.random.default_rng(seed=10) + x = rng.integers(0, 255, size=100).astype(dtype) + x[rng.choice(50, size=100)] = 0 + idxs = np.nonzero(x)[0] + assert_equal(np.array_equal(np.where(x != 0)[0], idxs), True) + + def test_return_type(self): + class C(np.ndarray): + pass + + for view in (C, np.ndarray): + for nd in range(1, 4): + shape = tuple(range(2, 2 + nd)) + x = np.arange(np.prod(shape)).reshape(shape).view(view) + for nzx in (np.nonzero(x), x.nonzero()): + for nzx_i in nzx: + assert_(type(nzx_i) is np.ndarray) + assert_(nzx_i.flags.writeable) + + def test_count_nonzero_axis(self): + # Basic check of functionality + m = np.array([[0, 1, 7, 0, 0], [3, 0, 0, 2, 19]]) + + expected = np.array([1, 1, 1, 1, 1]) + assert_equal(np.count_nonzero(m, axis=0), expected) + + expected = np.array([2, 3]) + assert_equal(np.count_nonzero(m, axis=1), expected) + + assert_raises(ValueError, np.count_nonzero, m, axis=(1, 1)) + assert_raises(TypeError, np.count_nonzero, m, axis='foo') + assert_raises(AxisError, np.count_nonzero, m, axis=3) + assert_raises(TypeError, np.count_nonzero, + m, axis=np.array([[1], [2]])) + + def test_count_nonzero_axis_all_dtypes(self): + # More thorough test that the axis argument is respected + # for all dtypes and responds correctly when presented with + # either integer or tuple arguments for axis + msg = "Mismatch for dtype: %s" + + def assert_equal_w_dt(a, b, err_msg): + assert_equal(a.dtype, b.dtype, err_msg=err_msg) + assert_equal(a, b, err_msg=err_msg) + + for dt in np.typecodes['All']: + err_msg = msg % (np.dtype(dt).name,) + + if dt != 'V': + if dt != 'M': + m = np.zeros((3, 3), dtype=dt) + n = np.ones(1, dtype=dt) + + m[0, 0] = n[0] + m[1, 0] = n[0] + + else: # np.zeros doesn't work for np.datetime64 + m = np.array(['1970-01-01'] * 9) + m = m.reshape((3, 3)) + + m[0, 0] = '1970-01-12' + m[1, 0] = '1970-01-12' + m = m.astype(dt) + + expected = np.array([2, 0, 0], dtype=np.intp) + assert_equal_w_dt(np.count_nonzero(m, axis=0), + expected, err_msg=err_msg) + + expected = np.array([1, 1, 0], dtype=np.intp) + assert_equal_w_dt(np.count_nonzero(m, axis=1), + expected, err_msg=err_msg) + + expected = np.array(2) + assert_equal(np.count_nonzero(m, axis=(0, 1)), + expected, err_msg=err_msg) + assert_equal(np.count_nonzero(m, axis=None), + expected, err_msg=err_msg) + assert_equal(np.count_nonzero(m), + expected, err_msg=err_msg) + + if dt == 'V': + # There are no 'nonzero' objects for np.void, so the testing + # setup is slightly different for this dtype + m = np.array([np.void(1)] * 6).reshape((2, 3)) + + expected = np.array([0, 0, 0], dtype=np.intp) + assert_equal_w_dt(np.count_nonzero(m, axis=0), + expected, err_msg=err_msg) + + expected = np.array([0, 0], dtype=np.intp) + assert_equal_w_dt(np.count_nonzero(m, axis=1), + expected, err_msg=err_msg) + + expected = np.array(0) + assert_equal(np.count_nonzero(m, axis=(0, 1)), + expected, err_msg=err_msg) + assert_equal(np.count_nonzero(m, axis=None), + expected, err_msg=err_msg) + assert_equal(np.count_nonzero(m), + expected, err_msg=err_msg) + + def test_count_nonzero_axis_consistent(self): + # Check that the axis behaviour for valid axes in + # non-special cases is consistent (and therefore + # correct) by checking it against an integer array + # that is then casted to the generic object dtype + from itertools import combinations, permutations + + axis = (0, 1, 2, 3) + size = (5, 5, 5, 5) + msg = "Mismatch for axis: %s" + + rng = np.random.RandomState(1234) + m = rng.randint(-100, 100, size=size) + n = m.astype(object) + + for length in range(len(axis)): + for combo in combinations(axis, length): + for perm in permutations(combo): + assert_equal( + np.count_nonzero(m, axis=perm), + np.count_nonzero(n, axis=perm), + err_msg=msg % (perm,)) + + def test_countnonzero_axis_empty(self): + a = np.array([[0, 0, 1], [1, 0, 1]]) + assert_equal(np.count_nonzero(a, axis=()), a.astype(bool)) + + def test_countnonzero_keepdims(self): + a = np.array([[0, 0, 1, 0], + [0, 3, 5, 0], + [7, 9, 2, 0]]) + assert_equal(np.count_nonzero(a, axis=0, keepdims=True), + [[1, 2, 3, 0]]) + assert_equal(np.count_nonzero(a, axis=1, keepdims=True), + [[1], [2], [3]]) + assert_equal(np.count_nonzero(a, keepdims=True), + [[6]]) + + def test_array_method(self): + # Tests that the array method + # call to nonzero works + m = np.array([[1, 0, 0], [4, 0, 6]]) + tgt = [[0, 1, 1], [0, 0, 2]] + + assert_equal(m.nonzero(), tgt) + + def test_nonzero_invalid_object(self): + # gh-9295 + a = np.array([np.array([1, 2]), 3], dtype=object) + assert_raises(ValueError, np.nonzero, a) + + class BoolErrors: + def __bool__(self): + raise ValueError("Not allowed") + + assert_raises(ValueError, np.nonzero, np.array([BoolErrors()])) + + def test_nonzero_sideeffect_safety(self): + # gh-13631 + class FalseThenTrue: + _val = False + + def __bool__(self): + try: + return self._val + finally: + self._val = True + + class TrueThenFalse: + _val = True + + def __bool__(self): + try: + return self._val + finally: + self._val = False + + # result grows on the second pass + a = np.array([True, FalseThenTrue()]) + assert_raises(RuntimeError, np.nonzero, a) + + a = np.array([[True], [FalseThenTrue()]]) + assert_raises(RuntimeError, np.nonzero, a) + + # result shrinks on the second pass + a = np.array([False, TrueThenFalse()]) + assert_raises(RuntimeError, np.nonzero, a) + + a = np.array([[False], [TrueThenFalse()]]) + assert_raises(RuntimeError, np.nonzero, a) + + def test_nonzero_sideffects_structured_void(self): + # Checks that structured void does not mutate alignment flag of + # original array. + arr = np.zeros(5, dtype="i1,i8,i8") # `ones` may short-circuit + assert arr.flags.aligned # structs are considered "aligned" + assert not arr["f2"].flags.aligned + # make sure that nonzero/count_nonzero do not flip the flag: + np.nonzero(arr) + assert arr.flags.aligned + np.count_nonzero(arr) + assert arr.flags.aligned + + def test_nonzero_exception_safe(self): + # gh-13930 + + class ThrowsAfter: + def __init__(self, iters): + self.iters_left = iters + + def __bool__(self): + if self.iters_left == 0: + raise ValueError("called `iters` times") + + self.iters_left -= 1 + return True + + """ + Test that a ValueError is raised instead of a SystemError + + If the __bool__ function is called after the error state is set, + Python (cpython) will raise a SystemError. + """ + + # assert that an exception in first pass is handled correctly + a = np.array([ThrowsAfter(5)] * 10) + assert_raises(ValueError, np.nonzero, a) + + # raise exception in second pass for 1-dimensional loop + a = np.array([ThrowsAfter(15)] * 10) + assert_raises(ValueError, np.nonzero, a) + + # raise exception in second pass for n-dimensional loop + a = np.array([[ThrowsAfter(15)]] * 10) + assert_raises(ValueError, np.nonzero, a) + + def test_nonzero_byteorder(self): + values = [0., -0., 1, float('nan'), 0, 1, + np.float16(0), np.float16(12.3)] + expected_values = [0, 0, 1, 1, 0, 1, 0, 1] + + for value, expected in zip(values, expected_values): + A = np.array([value]) + A_byteswapped = (A.view(A.dtype.newbyteorder()).byteswap()).copy() + + assert np.count_nonzero(A) == expected + assert np.count_nonzero(A_byteswapped) == expected + + def test_count_nonzero_non_aligned_array(self): + # gh-27523 + b = np.zeros(64 + 1, dtype=np.int8)[1:] + b = b.view(int) + b[:] = np.arange(b.size) + b[::2] = 0 + assert b.flags.aligned is False + assert np.count_nonzero(b) == b.size / 2 + + b = np.zeros(64 + 1, dtype=np.float16)[1:] + b = b.view(float) + b[:] = np.arange(b.size) + b[::2] = 0 + assert b.flags.aligned is False + assert np.count_nonzero(b) == b.size / 2 + + +class TestIndex: + def test_boolean(self): + a = rand(3, 5, 8) + V = rand(5, 8) + g1 = randint(0, 5, size=15) + g2 = randint(0, 8, size=15) + V[g1, g2] = -V[g1, g2] + assert_((np.array([a[0][V > 0], a[1][V > 0], a[2][V > 0]]) == a[:, V > 0]).all()) + + def test_boolean_edgecase(self): + a = np.array([], dtype='int32') + b = np.array([], dtype='bool') + c = a[b] + assert_equal(c, []) + assert_equal(c.dtype, np.dtype('int32')) + + +class TestBinaryRepr: + def test_zero(self): + assert_equal(np.binary_repr(0), '0') + + def test_positive(self): + assert_equal(np.binary_repr(10), '1010') + assert_equal(np.binary_repr(12522), + '11000011101010') + assert_equal(np.binary_repr(10736848), + '101000111101010011010000') + + def test_negative(self): + assert_equal(np.binary_repr(-1), '-1') + assert_equal(np.binary_repr(-10), '-1010') + assert_equal(np.binary_repr(-12522), + '-11000011101010') + assert_equal(np.binary_repr(-10736848), + '-101000111101010011010000') + + def test_sufficient_width(self): + assert_equal(np.binary_repr(0, width=5), '00000') + assert_equal(np.binary_repr(10, width=7), '0001010') + assert_equal(np.binary_repr(-5, width=7), '1111011') + + def test_neg_width_boundaries(self): + # see gh-8670 + + # Ensure that the example in the issue does not + # break before proceeding to a more thorough test. + assert_equal(np.binary_repr(-128, width=8), '10000000') + + for width in range(1, 11): + num = -2**(width - 1) + exp = '1' + (width - 1) * '0' + assert_equal(np.binary_repr(num, width=width), exp) + + def test_large_neg_int64(self): + # See gh-14289. + assert_equal(np.binary_repr(np.int64(-2**62), width=64), + '11' + '0' * 62) + + +class TestBaseRepr: + def test_base3(self): + assert_equal(np.base_repr(3**5, 3), '100000') + + def test_positive(self): + assert_equal(np.base_repr(12, 10), '12') + assert_equal(np.base_repr(12, 10, 4), '000012') + assert_equal(np.base_repr(12, 4), '30') + assert_equal(np.base_repr(3731624803700888, 36), '10QR0ROFCEW') + + def test_negative(self): + assert_equal(np.base_repr(-12, 10), '-12') + assert_equal(np.base_repr(-12, 10, 4), '-000012') + assert_equal(np.base_repr(-12, 4), '-30') + + def test_base_range(self): + with assert_raises(ValueError): + np.base_repr(1, 1) + with assert_raises(ValueError): + np.base_repr(1, 37) + + def test_minimal_signed_int(self): + assert_equal(np.base_repr(np.int8(-128)), '-10000000') + + +def _test_array_equal_parametrizations(): + """ + we pre-create arrays as we sometime want to pass the same instance + and sometime not. Passing the same instances may not mean the array are + equal, especially when containing None + """ + # those are 0-d arrays, it used to be a special case + # where (e0 == e0).all() would raise + e0 = np.array(0, dtype="int") + e1 = np.array(1, dtype="float") + # x,y, nan_equal, expected_result + yield (e0, e0.copy(), None, True) + yield (e0, e0.copy(), False, True) + yield (e0, e0.copy(), True, True) + + # + yield (e1, e1.copy(), None, True) + yield (e1, e1.copy(), False, True) + yield (e1, e1.copy(), True, True) + + # Non-nanable - those cannot hold nans + a12 = np.array([1, 2]) + a12b = a12.copy() + a123 = np.array([1, 2, 3]) + a13 = np.array([1, 3]) + a34 = np.array([3, 4]) + + aS1 = np.array(["a"], dtype="S1") + aS1b = aS1.copy() + aS1u4 = np.array([("a", 1)], dtype="S1,u4") + aS1u4b = aS1u4.copy() + + yield (a12, a12b, None, True) + yield (a12, a12, None, True) + yield (a12, a123, None, False) + yield (a12, a34, None, False) + yield (a12, a13, None, False) + yield (aS1, aS1b, None, True) + yield (aS1, aS1, None, True) + + # Non-float dtype - equal_nan should have no effect, + yield (a123, a123, None, True) + yield (a123, a123, False, True) + yield (a123, a123, True, True) + yield (a123, a123.copy(), None, True) + yield (a123, a123.copy(), False, True) + yield (a123, a123.copy(), True, True) + yield (a123.astype("float"), a123.astype("float"), None, True) + yield (a123.astype("float"), a123.astype("float"), False, True) + yield (a123.astype("float"), a123.astype("float"), True, True) + + # these can hold None + b1 = np.array([1, 2, np.nan]) + b2 = np.array([1, np.nan, 2]) + b3 = np.array([1, 2, np.inf]) + b4 = np.array(np.nan) + + # instances are the same + yield (b1, b1, None, False) + yield (b1, b1, False, False) + yield (b1, b1, True, True) + + # equal but not same instance + yield (b1, b1.copy(), None, False) + yield (b1, b1.copy(), False, False) + yield (b1, b1.copy(), True, True) + + # same once stripped of Nan + yield (b1, b2, None, False) + yield (b1, b2, False, False) + yield (b1, b2, True, False) + + # nan's not conflated with inf's + yield (b1, b3, None, False) + yield (b1, b3, False, False) + yield (b1, b3, True, False) + + # all Nan + yield (b4, b4, None, False) + yield (b4, b4, False, False) + yield (b4, b4, True, True) + yield (b4, b4.copy(), None, False) + yield (b4, b4.copy(), False, False) + yield (b4, b4.copy(), True, True) + + t1 = b1.astype("timedelta64") + t2 = b2.astype("timedelta64") + + # Timedeltas are particular + yield (t1, t1, None, False) + yield (t1, t1, False, False) + yield (t1, t1, True, True) + + yield (t1, t1.copy(), None, False) + yield (t1, t1.copy(), False, False) + yield (t1, t1.copy(), True, True) + + yield (t1, t2, None, False) + yield (t1, t2, False, False) + yield (t1, t2, True, False) + + # Multi-dimensional array + md1 = np.array([[0, 1], [np.nan, 1]]) + + yield (md1, md1, None, False) + yield (md1, md1, False, False) + yield (md1, md1, True, True) + yield (md1, md1.copy(), None, False) + yield (md1, md1.copy(), False, False) + yield (md1, md1.copy(), True, True) + # both complexes are nan+nan.j but the same instance + cplx1, cplx2 = [np.array([np.nan + np.nan * 1j])] * 2 + + # only real or img are nan. + cplx3, cplx4 = np.complex64(1, np.nan), np.complex64(np.nan, 1) + + # Complex values + yield (cplx1, cplx2, None, False) + yield (cplx1, cplx2, False, False) + yield (cplx1, cplx2, True, True) + + # Complex values, 1+nan, nan+1j + yield (cplx3, cplx4, None, False) + yield (cplx3, cplx4, False, False) + yield (cplx3, cplx4, True, True) + + +class TestArrayComparisons: + @pytest.mark.parametrize( + "bx,by,equal_nan,expected", _test_array_equal_parametrizations() + ) + def test_array_equal_equal_nan(self, bx, by, equal_nan, expected): + """ + This test array_equal for a few combinations: + + - are the two inputs the same object or not (same object may not + be equal if contains NaNs) + - Whether we should consider or not, NaNs, being equal. + + """ + if equal_nan is None: + res = np.array_equal(bx, by) + else: + res = np.array_equal(bx, by, equal_nan=equal_nan) + assert_(res is expected) + assert_(type(res) is bool) + + def test_array_equal_different_scalar_types(self): + # https://github.com/numpy/numpy/issues/27271 + a = np.array("foo") + b = np.array(1) + assert not np.array_equal(a, b) + assert not np.array_equiv(a, b) + + def test_none_compares_elementwise(self): + a = np.array([None, 1, None], dtype=object) + assert_equal(a == None, [True, False, True]) # noqa: E711 + assert_equal(a != None, [False, True, False]) # noqa: E711 + + a = np.ones(3) + assert_equal(a == None, [False, False, False]) # noqa: E711 + assert_equal(a != None, [True, True, True]) # noqa: E711 + + def test_array_equiv(self): + res = np.array_equiv(np.array([1, 2]), np.array([1, 2])) + assert_(res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([1, 2, 3])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([3, 4])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([1, 3])) + assert_(not res) + assert_(type(res) is bool) + + res = np.array_equiv(np.array([1, 1]), np.array([1])) + assert_(res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 1]), np.array([[1], [1]])) + assert_(res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([2])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([[1], [2]])) + assert_(not res) + assert_(type(res) is bool) + res = np.array_equiv(np.array([1, 2]), np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])) + assert_(not res) + assert_(type(res) is bool) + + @pytest.mark.parametrize("dtype", ["V0", "V3", "V10"]) + def test_compare_unstructured_voids(self, dtype): + zeros = np.zeros(3, dtype=dtype) + + assert_array_equal(zeros, zeros) + assert not (zeros != zeros).any() + + if dtype == "V0": + # Can't test != of actually different data + return + + nonzeros = np.array([b"1", b"2", b"3"], dtype=dtype) + + assert not (zeros == nonzeros).any() + assert (zeros != nonzeros).all() + + +def assert_array_strict_equal(x, y): + assert_array_equal(x, y) + # Check flags, 32 bit arches typically don't provide 16 byte alignment + if ((x.dtype.alignment <= 8 or + np.intp().dtype.itemsize != 4) and + sys.platform != 'win32'): + assert_(x.flags == y.flags) + else: + assert_(x.flags.owndata == y.flags.owndata) + assert_(x.flags.writeable == y.flags.writeable) + assert_(x.flags.c_contiguous == y.flags.c_contiguous) + assert_(x.flags.f_contiguous == y.flags.f_contiguous) + assert_(x.flags.writebackifcopy == y.flags.writebackifcopy) + # check endianness + assert_(x.dtype.isnative == y.dtype.isnative) + + +class TestClip: + def setup_method(self): + self.nr = 5 + self.nc = 3 + + def fastclip(self, a, m, M, out=None, **kwargs): + return a.clip(m, M, out=out, **kwargs) + + def clip(self, a, m, M, out=None): + # use a.choose to verify fastclip result + selector = np.less(a, m) + 2 * np.greater(a, M) + return selector.choose((a, m, M), out=out) + + # Handy functions + def _generate_data(self, n, m): + return randn(n, m) + + def _generate_data_complex(self, n, m): + return randn(n, m) + 1.j * rand(n, m) + + def _generate_flt_data(self, n, m): + return (randn(n, m)).astype(np.float32) + + def _neg_byteorder(self, a): + a = np.asarray(a) + if sys.byteorder == 'little': + a = a.astype(a.dtype.newbyteorder('>')) + else: + a = a.astype(a.dtype.newbyteorder('<')) + return a + + def _generate_non_native_data(self, n, m): + data = randn(n, m) + data = self._neg_byteorder(data) + assert_(not data.dtype.isnative) + return data + + def _generate_int_data(self, n, m): + return (10 * rand(n, m)).astype(np.int64) + + def _generate_int32_data(self, n, m): + return (10 * rand(n, m)).astype(np.int32) + + # Now the real test cases + + @pytest.mark.parametrize("dtype", '?bhilqpBHILQPefdgFDGO') + def test_ones_pathological(self, dtype): + # for preservation of behavior described in + # gh-12519; amin > amax behavior may still change + # in the future + arr = np.ones(10, dtype=dtype) + expected = np.zeros(10, dtype=dtype) + actual = np.clip(arr, 1, 0) + if dtype == 'O': + assert actual.tolist() == expected.tolist() + else: + assert_equal(actual, expected) + + def test_simple_double(self): + # Test native double input with scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = 0.1 + M = 0.6 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_simple_int(self): + # Test native int input with scalar min/max. + a = self._generate_int_data(self.nr, self.nc) + a = a.astype(int) + m = -2 + M = 4 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_array_double(self): + # Test native double input with array min/max. + a = self._generate_data(self.nr, self.nc) + m = np.zeros(a.shape) + M = m + 0.5 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_simple_nonnative(self): + # Test non native double input with scalar min/max. + # Test native double input with non native double scalar min/max. + a = self._generate_non_native_data(self.nr, self.nc) + m = -0.5 + M = 0.6 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_equal(ac, act) + + # Test native double input with non native double scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = self._neg_byteorder(0.6) + assert_(not M.dtype.isnative) + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_equal(ac, act) + + def test_simple_complex(self): + # Test native complex input with native double scalar min/max. + # Test native input with complex double scalar min/max. + a = 3 * self._generate_data_complex(self.nr, self.nc) + m = -0.5 + M = 1. + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + # Test native input with complex double scalar min/max. + a = 3 * self._generate_data(self.nr, self.nc) + m = -0.5 + 1.j + M = 1. + 2.j + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_clip_complex(self): + # Address Issue gh-5354 for clipping complex arrays + # Test native complex input without explicit min/max + # ie, either min=None or max=None + a = np.ones(10, dtype=complex) + m = a.min() + M = a.max() + am = self.fastclip(a, m, None) + aM = self.fastclip(a, None, M) + assert_array_strict_equal(am, a) + assert_array_strict_equal(aM, a) + + def test_clip_non_contig(self): + # Test clip for non contiguous native input and native scalar min/max. + a = self._generate_data(self.nr * 2, self.nc * 3) + a = a[::2, ::3] + assert_(not a.flags['F_CONTIGUOUS']) + assert_(not a.flags['C_CONTIGUOUS']) + ac = self.fastclip(a, -1.6, 1.7) + act = self.clip(a, -1.6, 1.7) + assert_array_strict_equal(ac, act) + + def test_simple_out(self): + # Test native double input with scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = 0.6 + ac = np.zeros(a.shape) + act = np.zeros(a.shape) + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + @pytest.mark.parametrize("casting", [None, "unsafe"]) + def test_simple_int32_inout(self, casting): + # Test native int32 input with double min/max and int32 out. + a = self._generate_int32_data(self.nr, self.nc) + m = np.float64(0) + M = np.float64(2) + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + if casting is None: + with pytest.raises(TypeError): + self.fastclip(a, m, M, ac, casting=casting) + else: + # explicitly passing "unsafe" will silence warning + self.fastclip(a, m, M, ac, casting=casting) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_int64_out(self): + # Test native int32 input with int32 scalar min/max and int64 out. + a = self._generate_int32_data(self.nr, self.nc) + m = np.int32(-1) + M = np.int32(1) + ac = np.zeros(a.shape, dtype=np.int64) + act = ac.copy() + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_int64_inout(self): + # Test native int32 input with double array min/max and int32 out. + a = self._generate_int32_data(self.nr, self.nc) + m = np.zeros(a.shape, np.float64) + M = np.float64(1) + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + self.fastclip(a, m, M, out=ac, casting="unsafe") + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_int32_out(self): + # Test native double input with scalar min/max and int out. + a = self._generate_data(self.nr, self.nc) + m = -1.0 + M = 2.0 + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + self.fastclip(a, m, M, out=ac, casting="unsafe") + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_simple_inplace_01(self): + # Test native double input with array min/max in-place. + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = np.zeros(a.shape) + M = 1.0 + self.fastclip(a, m, M, a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a, ac) + + def test_simple_inplace_02(self): + # Test native double input with scalar min/max in-place. + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = -0.5 + M = 0.6 + self.fastclip(a, m, M, a) + self.clip(ac, m, M, ac) + assert_array_strict_equal(a, ac) + + def test_noncontig_inplace(self): + # Test non contiguous double input with double scalar min/max in-place. + a = self._generate_data(self.nr * 2, self.nc * 3) + a = a[::2, ::3] + assert_(not a.flags['F_CONTIGUOUS']) + assert_(not a.flags['C_CONTIGUOUS']) + ac = a.copy() + m = -0.5 + M = 0.6 + self.fastclip(a, m, M, a) + self.clip(ac, m, M, ac) + assert_array_equal(a, ac) + + def test_type_cast_01(self): + # Test native double input with scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = 0.6 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_02(self): + # Test native int32 input with int32 scalar min/max. + a = self._generate_int_data(self.nr, self.nc) + a = a.astype(np.int32) + m = -2 + M = 4 + ac = self.fastclip(a, m, M) + act = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_03(self): + # Test native int32 input with float64 scalar min/max. + a = self._generate_int32_data(self.nr, self.nc) + m = -2 + M = 4 + ac = self.fastclip(a, np.float64(m), np.float64(M)) + act = self.clip(a, np.float64(m), np.float64(M)) + assert_array_strict_equal(ac, act) + + def test_type_cast_04(self): + # Test native int32 input with float32 scalar min/max. + a = self._generate_int32_data(self.nr, self.nc) + m = np.float32(-2) + M = np.float32(4) + act = self.fastclip(a, m, M) + ac = self.clip(a, m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_05(self): + # Test native int32 with double arrays min/max. + a = self._generate_int_data(self.nr, self.nc) + m = -0.5 + M = 1. + ac = self.fastclip(a, m * np.zeros(a.shape), M) + act = self.clip(a, m * np.zeros(a.shape), M) + assert_array_strict_equal(ac, act) + + def test_type_cast_06(self): + # Test native with NON native scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = 0.5 + m_s = self._neg_byteorder(m) + M = 1. + act = self.clip(a, m_s, M) + ac = self.fastclip(a, m_s, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_07(self): + # Test NON native with native array min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 * np.ones(a.shape) + M = 1. + a_s = self._neg_byteorder(a) + assert_(not a_s.dtype.isnative) + act = a_s.clip(m, M) + ac = self.fastclip(a_s, m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_08(self): + # Test NON native with native scalar min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = 1. + a_s = self._neg_byteorder(a) + assert_(not a_s.dtype.isnative) + ac = self.fastclip(a_s, m, M) + act = a_s.clip(m, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_09(self): + # Test native with NON native array min/max. + a = self._generate_data(self.nr, self.nc) + m = -0.5 * np.ones(a.shape) + M = 1. + m_s = self._neg_byteorder(m) + assert_(not m_s.dtype.isnative) + ac = self.fastclip(a, m_s, M) + act = self.clip(a, m_s, M) + assert_array_strict_equal(ac, act) + + def test_type_cast_10(self): + # Test native int32 with float min/max and float out for output argument. + a = self._generate_int_data(self.nr, self.nc) + b = np.zeros(a.shape, dtype=np.float32) + m = np.float32(-0.5) + M = np.float32(1) + act = self.clip(a, m, M, out=b) + ac = self.fastclip(a, m, M, out=b) + assert_array_strict_equal(ac, act) + + def test_type_cast_11(self): + # Test non native with native scalar, min/max, out non native + a = self._generate_non_native_data(self.nr, self.nc) + b = a.copy() + b = b.astype(b.dtype.newbyteorder('>')) + bt = b.copy() + m = -0.5 + M = 1. + self.fastclip(a, m, M, out=b) + self.clip(a, m, M, out=bt) + assert_array_strict_equal(b, bt) + + def test_type_cast_12(self): + # Test native int32 input and min/max and float out + a = self._generate_int_data(self.nr, self.nc) + b = np.zeros(a.shape, dtype=np.float32) + m = np.int32(0) + M = np.int32(1) + act = self.clip(a, m, M, out=b) + ac = self.fastclip(a, m, M, out=b) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_simple(self): + # Test native double input with scalar min/max + a = self._generate_data(self.nr, self.nc) + m = -0.5 + M = 0.6 + ac = np.zeros(a.shape) + act = np.zeros(a.shape) + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_simple2(self): + # Test native int32 input with double min/max and int32 out + a = self._generate_int32_data(self.nr, self.nc) + m = np.float64(0) + M = np.float64(2) + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + self.fastclip(a, m, M, out=ac, casting="unsafe") + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_simple_int32(self): + # Test native int32 input with int32 scalar min/max and int64 out + a = self._generate_int32_data(self.nr, self.nc) + m = np.int32(-1) + M = np.int32(1) + ac = np.zeros(a.shape, dtype=np.int64) + act = ac.copy() + self.fastclip(a, m, M, ac) + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_array_int32(self): + # Test native int32 input with double array min/max and int32 out + a = self._generate_int32_data(self.nr, self.nc) + m = np.zeros(a.shape, np.float64) + M = np.float64(1) + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + self.fastclip(a, m, M, out=ac, casting="unsafe") + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_array_outint32(self): + # Test native double input with scalar min/max and int out + a = self._generate_data(self.nr, self.nc) + m = -1.0 + M = 2.0 + ac = np.zeros(a.shape, dtype=np.int32) + act = ac.copy() + self.fastclip(a, m, M, out=ac, casting="unsafe") + self.clip(a, m, M, act) + assert_array_strict_equal(ac, act) + + def test_clip_with_out_transposed(self): + # Test that the out argument works when transposed + a = np.arange(16).reshape(4, 4) + out = np.empty_like(a).T + a.clip(4, 10, out=out) + expected = self.clip(a, 4, 10) + assert_array_equal(out, expected) + + def test_clip_with_out_memory_overlap(self): + # Test that the out argument works when it has memory overlap + a = np.arange(16).reshape(4, 4) + ac = a.copy() + a[:-1].clip(4, 10, out=a[1:]) + expected = self.clip(ac[:-1], 4, 10) + assert_array_equal(a[1:], expected) + + def test_clip_inplace_array(self): + # Test native double input with array min/max + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = np.zeros(a.shape) + M = 1.0 + self.fastclip(a, m, M, a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a, ac) + + def test_clip_inplace_simple(self): + # Test native double input with scalar min/max + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = -0.5 + M = 0.6 + self.fastclip(a, m, M, a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a, ac) + + def test_clip_func_takes_out(self): + # Ensure that the clip() function takes an out=argument. + a = self._generate_data(self.nr, self.nc) + ac = a.copy() + m = -0.5 + M = 0.6 + a2 = np.clip(a, m, M, out=a) + self.clip(a, m, M, ac) + assert_array_strict_equal(a2, ac) + assert_(a2 is a) + + def test_clip_nan(self): + d = np.arange(7.) + assert_equal(d.clip(min=np.nan), np.nan) + assert_equal(d.clip(max=np.nan), np.nan) + assert_equal(d.clip(min=np.nan, max=np.nan), np.nan) + assert_equal(d.clip(min=-2, max=np.nan), np.nan) + assert_equal(d.clip(min=np.nan, max=10), np.nan) + + def test_object_clip(self): + a = np.arange(10, dtype=object) + actual = np.clip(a, 1, 5) + expected = np.array([1, 1, 2, 3, 4, 5, 5, 5, 5, 5]) + assert actual.tolist() == expected.tolist() + + def test_clip_all_none(self): + arr = np.arange(10, dtype=object) + assert_equal(np.clip(arr, None, None), arr) + assert_equal(np.clip(arr), arr) + + def test_clip_invalid_casting(self): + a = np.arange(10, dtype=object) + with assert_raises_regex(ValueError, + 'casting must be one of'): + self.fastclip(a, 1, 8, casting="garbage") + + @pytest.mark.parametrize("amin, amax", [ + # two scalars + (1, 0), + # mix scalar and array + (1, np.zeros(10)), + # two arrays + (np.ones(10), np.zeros(10)), + ]) + def test_clip_value_min_max_flip(self, amin, amax): + a = np.arange(10, dtype=np.int64) + # requirement from ufunc_docstrings.py + expected = np.minimum(np.maximum(a, amin), amax) + actual = np.clip(a, amin, amax) + assert_equal(actual, expected) + + @pytest.mark.parametrize("arr, amin, amax, exp", [ + # for a bug in npy_ObjectClip, based on a + # case produced by hypothesis + (np.zeros(10, dtype=object), + 0, + -2**64 + 1, + np.full(10, -2**64 + 1, dtype=object)), + # for bugs in NPY_TIMEDELTA_MAX, based on a case + # produced by hypothesis + (np.zeros(10, dtype='m8') - 1, + 0, + 0, + np.zeros(10, dtype='m8')), + ]) + def test_clip_problem_cases(self, arr, amin, amax, exp): + actual = np.clip(arr, amin, amax) + assert_equal(actual, exp) + + @pytest.mark.parametrize("arr, amin, amax", [ + # problematic scalar nan case from hypothesis + (np.zeros(10, dtype=np.int64), + np.array(np.nan), + np.zeros(10, dtype=np.int32)), + ]) + def test_clip_scalar_nan_propagation(self, arr, amin, amax): + # enforcement of scalar nan propagation for comparisons + # called through clip() + expected = np.minimum(np.maximum(arr, amin), amax) + actual = np.clip(arr, amin, amax) + assert_equal(actual, expected) + + @pytest.mark.xfail(reason="propagation doesn't match spec") + @pytest.mark.parametrize("arr, amin, amax", [ + (np.array([1] * 10, dtype='m8'), + np.timedelta64('NaT'), + np.zeros(10, dtype=np.int32)), + ]) + @pytest.mark.filterwarnings("ignore::DeprecationWarning") + def test_NaT_propagation(self, arr, amin, amax): + # NOTE: the expected function spec doesn't + # propagate NaT, but clip() now does + expected = np.minimum(np.maximum(arr, amin), amax) + actual = np.clip(arr, amin, amax) + assert_equal(actual, expected) + + @given( + data=st.data(), + arr=hynp.arrays( + dtype=hynp.integer_dtypes() | hynp.floating_dtypes(), + shape=hynp.array_shapes() + ) + ) + def test_clip_property(self, data, arr): + """A property-based test using Hypothesis. + + This aims for maximum generality: it could in principle generate *any* + valid inputs to np.clip, and in practice generates much more varied + inputs than human testers come up with. + + Because many of the inputs have tricky dependencies - compatible dtypes + and mutually-broadcastable shapes - we use `st.data()` strategy draw + values *inside* the test function, from strategies we construct based + on previous values. An alternative would be to define a custom strategy + with `@st.composite`, but until we have duplicated code inline is fine. + + That accounts for most of the function; the actual test is just three + lines to calculate and compare actual vs expected results! + """ + numeric_dtypes = hynp.integer_dtypes() | hynp.floating_dtypes() + # Generate shapes for the bounds which can be broadcast with each other + # and with the base shape. Below, we might decide to use scalar bounds, + # but it's clearer to generate these shapes unconditionally in advance. + in_shapes, result_shape = data.draw( + hynp.mutually_broadcastable_shapes( + num_shapes=2, base_shape=arr.shape + ) + ) + # Scalar `nan` is deprecated due to the differing behaviour it shows. + s = numeric_dtypes.flatmap( + lambda x: hynp.from_dtype(x, allow_nan=False)) + amin = data.draw(s | hynp.arrays(dtype=numeric_dtypes, + shape=in_shapes[0], elements={"allow_nan": False})) + amax = data.draw(s | hynp.arrays(dtype=numeric_dtypes, + shape=in_shapes[1], elements={"allow_nan": False})) + + # Then calculate our result and expected result and check that they're + # equal! See gh-12519 and gh-19457 for discussion deciding on this + # property and the result_type argument. + result = np.clip(arr, amin, amax) + t = np.result_type(arr, amin, amax) + expected = np.minimum(amax, np.maximum(arr, amin, dtype=t), dtype=t) + assert result.dtype == t + assert_array_equal(result, expected) + + def test_clip_min_max_args(self): + arr = np.arange(5) + + assert_array_equal(np.clip(arr), arr) + assert_array_equal(np.clip(arr, min=2, max=3), np.clip(arr, 2, 3)) + assert_array_equal(np.clip(arr, min=None, max=2), + np.clip(arr, None, 2)) + + with assert_raises_regex(TypeError, "missing 1 required positional " + "argument: 'a_max'"): + np.clip(arr, 2) + with assert_raises_regex(TypeError, "missing 1 required positional " + "argument: 'a_min'"): + np.clip(arr, a_max=2) + msg = ("Passing `min` or `max` keyword argument when `a_min` and " + "`a_max` are provided is forbidden.") + with assert_raises_regex(ValueError, msg): + np.clip(arr, 2, 3, max=3) + with assert_raises_regex(ValueError, msg): + np.clip(arr, 2, 3, min=2) + + @pytest.mark.parametrize("dtype,min,max", [ + ("int32", -2**32 - 1, 2**32), + ("int32", -2**320, None), + ("int32", None, 2**300), + ("int32", -1000, 2**32), + ("int32", -2**32 - 1, 1000), + ("uint8", -1, 129), + ]) + def test_out_of_bound_pyints(self, dtype, min, max): + a = np.arange(10000).astype(dtype) + # Check min only + c = np.clip(a, min=min, max=max) + assert not np.may_share_memory(a, c) + assert c.dtype == a.dtype + if min is not None: + assert (c >= min).all() + if max is not None: + assert (c <= max).all() + +class TestAllclose: + rtol = 1e-5 + atol = 1e-8 + + def setup_method(self): + self.olderr = np.seterr(invalid='ignore') + + def teardown_method(self): + np.seterr(**self.olderr) + + def tst_allclose(self, x, y): + assert_(np.allclose(x, y), f"{x} and {y} not close") + + def tst_not_allclose(self, x, y): + assert_(not np.allclose(x, y), f"{x} and {y} shouldn't be close") + + def test_ip_allclose(self): + # Parametric test factory. + arr = np.array([100, 1000]) + aran = np.arange(125).reshape((5, 5, 5)) + + atol = self.atol + rtol = self.rtol + + data = [([1, 0], [1, 0]), + ([atol], [0]), + ([1], [1 + rtol + atol]), + (arr, arr + arr * rtol), + (arr, arr + arr * rtol + atol * 2), + (aran, aran + aran * rtol), + (np.inf, np.inf), + (np.inf, [np.inf])] + + for (x, y) in data: + self.tst_allclose(x, y) + + def test_ip_not_allclose(self): + # Parametric test factory. + aran = np.arange(125).reshape((5, 5, 5)) + + atol = self.atol + rtol = self.rtol + + data = [([np.inf, 0], [1, np.inf]), + ([np.inf, 0], [1, 0]), + ([np.inf, np.inf], [1, np.inf]), + ([np.inf, np.inf], [1, 0]), + ([-np.inf, 0], [np.inf, 0]), + ([np.nan, 0], [np.nan, 0]), + ([atol * 2], [0]), + ([1], [1 + rtol + atol * 2]), + (aran, aran + aran * atol + atol * 2), + (np.array([np.inf, 1]), np.array([0, np.inf]))] + + for (x, y) in data: + self.tst_not_allclose(x, y) + + def test_no_parameter_modification(self): + x = np.array([np.inf, 1]) + y = np.array([0, np.inf]) + np.allclose(x, y) + assert_array_equal(x, np.array([np.inf, 1])) + assert_array_equal(y, np.array([0, np.inf])) + + def test_min_int(self): + # Could make problems because of abs(min_int) == min_int + min_int = np.iinfo(np.int_).min + a = np.array([min_int], dtype=np.int_) + assert_(np.allclose(a, a)) + + def test_equalnan(self): + x = np.array([1.0, np.nan]) + assert_(np.allclose(x, x, equal_nan=True)) + + def test_return_class_is_ndarray(self): + # Issue gh-6475 + # Check that allclose does not preserve subtypes + class Foo(np.ndarray): + def __new__(cls, *args, **kwargs): + return np.array(*args, **kwargs).view(cls) + + a = Foo([1]) + assert_(type(np.allclose(a, a)) is bool) + + +class TestIsclose: + rtol = 1e-5 + atol = 1e-8 + + def _setup(self): + atol = self.atol + rtol = self.rtol + arr = np.array([100, 1000]) + aran = np.arange(125).reshape((5, 5, 5)) + + self.all_close_tests = [ + ([1, 0], [1, 0]), + ([atol], [0]), + ([1], [1 + rtol + atol]), + (arr, arr + arr * rtol), + (arr, arr + arr * rtol + atol), + (aran, aran + aran * rtol), + (np.inf, np.inf), + (np.inf, [np.inf]), + ([np.inf, -np.inf], [np.inf, -np.inf]), + ] + self.none_close_tests = [ + ([np.inf, 0], [1, np.inf]), + ([np.inf, -np.inf], [1, 0]), + ([np.inf, np.inf], [1, -np.inf]), + ([np.inf, np.inf], [1, 0]), + ([np.nan, 0], [np.nan, -np.inf]), + ([atol * 2], [0]), + ([1], [1 + rtol + atol * 2]), + (aran, aran + rtol * 1.1 * aran + atol * 1.1), + (np.array([np.inf, 1]), np.array([0, np.inf])), + ] + self.some_close_tests = [ + ([np.inf, 0], [np.inf, atol * 2]), + ([atol, 1, 1e6 * (1 + 2 * rtol) + atol], [0, np.nan, 1e6]), + (np.arange(3), [0, 1, 2.1]), + (np.nan, [np.nan, np.nan, np.nan]), + ([0], [atol, np.inf, -np.inf, np.nan]), + (0, [atol, np.inf, -np.inf, np.nan]), + ] + self.some_close_results = [ + [True, False], + [True, False, False], + [True, True, False], + [False, False, False], + [True, False, False, False], + [True, False, False, False], + ] + + def test_ip_isclose(self): + self._setup() + tests = self.some_close_tests + results = self.some_close_results + for (x, y), result in zip(tests, results): + assert_array_equal(np.isclose(x, y), result) + + x = np.array([2.1, 2.1, 2.1, 2.1, 5, np.nan]) + y = np.array([2, 2, 2, 2, np.nan, 5]) + atol = [0.11, 0.09, 1e-8, 1e-8, 1, 1] + rtol = [1e-8, 1e-8, 0.06, 0.04, 1, 1] + expected = np.array([True, False, True, False, False, False]) + assert_array_equal(np.isclose(x, y, rtol=rtol, atol=atol), expected) + + message = "operands could not be broadcast together..." + atol = np.array([1e-8, 1e-8]) + with assert_raises(ValueError, msg=message): + np.isclose(x, y, atol=atol) + + rtol = np.array([1e-5, 1e-5]) + with assert_raises(ValueError, msg=message): + np.isclose(x, y, rtol=rtol) + + def test_nep50_isclose(self): + below_one = float(1. - np.finfo('f8').eps) + f32 = np.array(below_one, 'f4') # This is just 1 at float32 precision + assert f32 > np.array(below_one) + # NEP 50 broadcasting of python scalars + assert f32 == below_one + # Test that it works for isclose arguments too (and that those fail if + # one uses a numpy float64). + assert np.isclose(f32, below_one, atol=0, rtol=0) + assert np.isclose(f32, np.float32(0), atol=below_one) + assert np.isclose(f32, 2, atol=0, rtol=below_one / 2) + assert not np.isclose(f32, np.float64(below_one), atol=0, rtol=0) + assert not np.isclose(f32, np.float32(0), atol=np.float64(below_one)) + assert not np.isclose(f32, 2, atol=0, rtol=np.float64(below_one / 2)) + + def tst_all_isclose(self, x, y): + assert_(np.all(np.isclose(x, y)), f"{x} and {y} not close") + + def tst_none_isclose(self, x, y): + msg = "%s and %s shouldn't be close" + assert_(not np.any(np.isclose(x, y)), msg % (x, y)) + + def tst_isclose_allclose(self, x, y): + msg = "isclose.all() and allclose aren't same for %s and %s" + msg2 = "isclose and allclose aren't same for %s and %s" + if np.isscalar(x) and np.isscalar(y): + assert_(np.isclose(x, y) == np.allclose(x, y), msg=msg2 % (x, y)) + else: + assert_array_equal(np.isclose(x, y).all(), np.allclose(x, y), msg % (x, y)) + + def test_ip_all_isclose(self): + self._setup() + for (x, y) in self.all_close_tests: + self.tst_all_isclose(x, y) + + x = np.array([2.3, 3.6, 4.4, np.nan]) + y = np.array([2, 3, 4, np.nan]) + atol = [0.31, 0, 0, 1] + rtol = [0, 0.21, 0.11, 1] + assert np.allclose(x, y, atol=atol, rtol=rtol, equal_nan=True) + assert not np.allclose(x, y, atol=0.1, rtol=0.1, equal_nan=True) + + # Show that gh-14330 is resolved + assert np.allclose([1, 2, float('nan')], [1, 2, float('nan')], + atol=[1, 1, 1], equal_nan=True) + + def test_ip_none_isclose(self): + self._setup() + for (x, y) in self.none_close_tests: + self.tst_none_isclose(x, y) + + def test_ip_isclose_allclose(self): + self._setup() + tests = (self.all_close_tests + self.none_close_tests + + self.some_close_tests) + for (x, y) in tests: + self.tst_isclose_allclose(x, y) + + def test_equal_nan(self): + assert_array_equal(np.isclose(np.nan, np.nan, equal_nan=True), [True]) + arr = np.array([1.0, np.nan]) + assert_array_equal(np.isclose(arr, arr, equal_nan=True), [True, True]) + + def test_masked_arrays(self): + # Make sure to test the output type when arguments are interchanged. + + x = np.ma.masked_where([True, True, False], np.arange(3)) + assert_(type(x) is type(np.isclose(2, x))) + assert_(type(x) is type(np.isclose(x, 2))) + + x = np.ma.masked_where([True, True, False], [np.nan, np.inf, np.nan]) + assert_(type(x) is type(np.isclose(np.inf, x))) + assert_(type(x) is type(np.isclose(x, np.inf))) + + x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan]) + y = np.isclose(np.nan, x, equal_nan=True) + assert_(type(x) is type(y)) + # Ensure that the mask isn't modified... + assert_array_equal([True, True, False], y.mask) + y = np.isclose(x, np.nan, equal_nan=True) + assert_(type(x) is type(y)) + # Ensure that the mask isn't modified... + assert_array_equal([True, True, False], y.mask) + + x = np.ma.masked_where([True, True, False], [np.nan, np.nan, np.nan]) + y = np.isclose(x, x, equal_nan=True) + assert_(type(x) is type(y)) + # Ensure that the mask isn't modified... + assert_array_equal([True, True, False], y.mask) + + def test_scalar_return(self): + assert_(np.isscalar(np.isclose(1, 1))) + + def test_no_parameter_modification(self): + x = np.array([np.inf, 1]) + y = np.array([0, np.inf]) + np.isclose(x, y) + assert_array_equal(x, np.array([np.inf, 1])) + assert_array_equal(y, np.array([0, np.inf])) + + def test_non_finite_scalar(self): + # GH7014, when two scalars are compared the output should also be a + # scalar + assert_(np.isclose(np.inf, -np.inf) is np.False_) + assert_(np.isclose(0, np.inf) is np.False_) + assert_(type(np.isclose(0, np.inf)) is np.bool) + + def test_timedelta(self): + # Allclose currently works for timedelta64 as long as `atol` is + # an integer or also a timedelta64 + a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]") + assert np.isclose(a, a, atol=0, equal_nan=True).all() + assert np.isclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True).all() + assert np.allclose(a, a, atol=0, equal_nan=True) + assert np.allclose(a, a, atol=np.timedelta64(1, "ns"), equal_nan=True) + + def test_tol_warnings(self): + a = np.array([1, 2, 3]) + b = np.array([np.inf, np.nan, 1]) + + for i in b: + for j in b: + # Making sure that i and j are not both numbers, because that won't create a warning + if (i == 1) and (j == 1): + continue + + with warnings.catch_warnings(record=True) as w: + + warnings.simplefilter("always") + c = np.isclose(a, a, atol=i, rtol=j) + assert len(w) == 1 + assert issubclass(w[-1].category, RuntimeWarning) + assert f"One of rtol or atol is not valid, atol: {i}, rtol: {j}" in str(w[-1].message) + + +class TestStdVar: + def setup_method(self): + self.A = np.array([1, -1, 1, -1]) + self.real_var = 1 + + def test_basic(self): + assert_almost_equal(np.var(self.A), self.real_var) + assert_almost_equal(np.std(self.A)**2, self.real_var) + + def test_scalars(self): + assert_equal(np.var(1), 0) + assert_equal(np.std(1), 0) + + def test_ddof1(self): + assert_almost_equal(np.var(self.A, ddof=1), + self.real_var * len(self.A) / (len(self.A) - 1)) + assert_almost_equal(np.std(self.A, ddof=1)**2, + self.real_var * len(self.A) / (len(self.A) - 1)) + + def test_ddof2(self): + assert_almost_equal(np.var(self.A, ddof=2), + self.real_var * len(self.A) / (len(self.A) - 2)) + assert_almost_equal(np.std(self.A, ddof=2)**2, + self.real_var * len(self.A) / (len(self.A) - 2)) + + def test_correction(self): + assert_almost_equal( + np.var(self.A, correction=1), np.var(self.A, ddof=1) + ) + assert_almost_equal( + np.std(self.A, correction=1), np.std(self.A, ddof=1) + ) + + err_msg = "ddof and correction can't be provided simultaneously." + + with assert_raises_regex(ValueError, err_msg): + np.var(self.A, ddof=1, correction=0) + + with assert_raises_regex(ValueError, err_msg): + np.std(self.A, ddof=1, correction=1) + + def test_out_scalar(self): + d = np.arange(10) + out = np.array(0.) + r = np.std(d, out=out) + assert_(r is out) + assert_array_equal(r, out) + r = np.var(d, out=out) + assert_(r is out) + assert_array_equal(r, out) + r = np.mean(d, out=out) + assert_(r is out) + assert_array_equal(r, out) + + +class TestStdVarComplex: + def test_basic(self): + A = np.array([1, 1.j, -1, -1.j]) + real_var = 1 + assert_almost_equal(np.var(A), real_var) + assert_almost_equal(np.std(A)**2, real_var) + + def test_scalars(self): + assert_equal(np.var(1j), 0) + assert_equal(np.std(1j), 0) + + +class TestCreationFuncs: + # Test ones, zeros, empty and full. + + def setup_method(self): + dtypes = {np.dtype(tp) for tp in itertools.chain(*sctypes.values())} + # void, bytes, str + variable_sized = {tp for tp in dtypes if tp.str.endswith('0')} + keyfunc = lambda dtype: dtype.str + self.dtypes = sorted(dtypes - variable_sized | + {np.dtype(tp.str.replace("0", str(i))) + for tp in variable_sized for i in range(1, 10)}, + key=keyfunc) + self.dtypes += [type(dt) for dt in sorted(dtypes, key=keyfunc)] + self.orders = {'C': 'c_contiguous', 'F': 'f_contiguous'} + self.ndims = 10 + + def check_function(self, func, fill_value=None): + par = ((0, 1, 2), + range(self.ndims), + self.orders, + self.dtypes) + fill_kwarg = {} + if fill_value is not None: + fill_kwarg = {'fill_value': fill_value} + + for size, ndims, order, dtype in itertools.product(*par): + shape = ndims * [size] + + is_void = dtype is np.dtypes.VoidDType or ( + isinstance(dtype, np.dtype) and dtype.str.startswith('|V')) + + # do not fill void type + if fill_kwarg and is_void: + continue + + arr = func(shape, order=order, dtype=dtype, + **fill_kwarg) + + if isinstance(dtype, np.dtype): + assert_equal(arr.dtype, dtype) + elif isinstance(dtype, type(np.dtype)): + if dtype in (np.dtypes.StrDType, np.dtypes.BytesDType): + dtype_str = np.dtype(dtype.type).str.replace('0', '1') + assert_equal(arr.dtype, np.dtype(dtype_str)) + else: + assert_equal(arr.dtype, np.dtype(dtype.type)) + assert_(getattr(arr.flags, self.orders[order])) + + if fill_value is not None: + if arr.dtype.str.startswith('|S'): + val = str(fill_value) + else: + val = fill_value + assert_equal(arr, dtype.type(val)) + + def test_zeros(self): + self.check_function(np.zeros) + + def test_ones(self): + self.check_function(np.ones) + + def test_empty(self): + self.check_function(np.empty) + + def test_full(self): + self.check_function(np.full, 0) + self.check_function(np.full, 1) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_for_reference_leak(self): + # Make sure we have an object for reference + dim = 1 + beg = sys.getrefcount(dim) + np.zeros([dim] * 10) + assert_(sys.getrefcount(dim) == beg) + np.ones([dim] * 10) + assert_(sys.getrefcount(dim) == beg) + np.empty([dim] * 10) + assert_(sys.getrefcount(dim) == beg) + np.full([dim] * 10, 0) + assert_(sys.getrefcount(dim) == beg) + + +class TestLikeFuncs: + '''Test ones_like, zeros_like, empty_like and full_like''' + + def setup_method(self): + self.data = [ + # Array scalars + (np.array(3.), None), + (np.array(3), 'f8'), + # 1D arrays + (np.arange(6, dtype='f4'), None), + (np.arange(6), 'c16'), + # 2D C-layout arrays + (np.arange(6).reshape(2, 3), None), + (np.arange(6).reshape(3, 2), 'i1'), + # 2D F-layout arrays + (np.arange(6).reshape((2, 3), order='F'), None), + (np.arange(6).reshape((3, 2), order='F'), 'i1'), + # 3D C-layout arrays + (np.arange(24).reshape(2, 3, 4), None), + (np.arange(24).reshape(4, 3, 2), 'f4'), + # 3D F-layout arrays + (np.arange(24).reshape((2, 3, 4), order='F'), None), + (np.arange(24).reshape((4, 3, 2), order='F'), 'f4'), + # 3D non-C/F-layout arrays + (np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None), + (np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'), + ] + self.shapes = [(), (5,), (5, 6,), (5, 6, 7,)] + + def compare_array_value(self, dz, value, fill_value): + if value is not None: + if fill_value: + # Conversion is close to what np.full_like uses + # but we may want to convert directly in the future + # which may result in errors (where this does not). + z = np.array(value).astype(dz.dtype) + assert_(np.all(dz == z)) + else: + assert_(np.all(dz == value)) + + def check_like_function(self, like_function, value, fill_value=False): + if fill_value: + fill_kwarg = {'fill_value': value} + else: + fill_kwarg = {} + for d, dtype in self.data: + # default (K) order, dtype + dz = like_function(d, dtype=dtype, **fill_kwarg) + assert_equal(dz.shape, d.shape) + assert_equal(np.array(dz.strides) * d.dtype.itemsize, + np.array(d.strides) * dz.dtype.itemsize) + assert_equal(d.flags.c_contiguous, dz.flags.c_contiguous) + assert_equal(d.flags.f_contiguous, dz.flags.f_contiguous) + if dtype is None: + assert_equal(dz.dtype, d.dtype) + else: + assert_equal(dz.dtype, np.dtype(dtype)) + self.compare_array_value(dz, value, fill_value) + + # C order, default dtype + dz = like_function(d, order='C', dtype=dtype, **fill_kwarg) + assert_equal(dz.shape, d.shape) + assert_(dz.flags.c_contiguous) + if dtype is None: + assert_equal(dz.dtype, d.dtype) + else: + assert_equal(dz.dtype, np.dtype(dtype)) + self.compare_array_value(dz, value, fill_value) + + # F order, default dtype + dz = like_function(d, order='F', dtype=dtype, **fill_kwarg) + assert_equal(dz.shape, d.shape) + assert_(dz.flags.f_contiguous) + if dtype is None: + assert_equal(dz.dtype, d.dtype) + else: + assert_equal(dz.dtype, np.dtype(dtype)) + self.compare_array_value(dz, value, fill_value) + + # A order + dz = like_function(d, order='A', dtype=dtype, **fill_kwarg) + assert_equal(dz.shape, d.shape) + if d.flags.f_contiguous: + assert_(dz.flags.f_contiguous) + else: + assert_(dz.flags.c_contiguous) + if dtype is None: + assert_equal(dz.dtype, d.dtype) + else: + assert_equal(dz.dtype, np.dtype(dtype)) + self.compare_array_value(dz, value, fill_value) + + # Test the 'shape' parameter + for s in self.shapes: + for o in 'CFA': + sz = like_function(d, dtype=dtype, shape=s, order=o, + **fill_kwarg) + assert_equal(sz.shape, s) + if dtype is None: + assert_equal(sz.dtype, d.dtype) + else: + assert_equal(sz.dtype, np.dtype(dtype)) + if o == 'C' or (o == 'A' and d.flags.c_contiguous): + assert_(sz.flags.c_contiguous) + elif o == 'F' or (o == 'A' and d.flags.f_contiguous): + assert_(sz.flags.f_contiguous) + self.compare_array_value(sz, value, fill_value) + + if (d.ndim != len(s)): + assert_equal(np.argsort(like_function(d, dtype=dtype, + shape=s, order='K', + **fill_kwarg).strides), + np.argsort(np.empty(s, dtype=dtype, + order='C').strides)) + else: + assert_equal(np.argsort(like_function(d, dtype=dtype, + shape=s, order='K', + **fill_kwarg).strides), + np.argsort(d.strides)) + + # Test the 'subok' parameter + class MyNDArray(np.ndarray): + pass + + a = np.array([[1, 2], [3, 4]]).view(MyNDArray) + + b = like_function(a, **fill_kwarg) + assert_(type(b) is MyNDArray) + + b = like_function(a, subok=False, **fill_kwarg) + assert_(type(b) is not MyNDArray) + + # Test invalid dtype + with assert_raises(TypeError): + a = np.array(b"abc") + like_function(a, dtype="S-1", **fill_kwarg) + + def test_ones_like(self): + self.check_like_function(np.ones_like, 1) + + def test_zeros_like(self): + self.check_like_function(np.zeros_like, 0) + + def test_empty_like(self): + self.check_like_function(np.empty_like, None) + + def test_filled_like(self): + self.check_like_function(np.full_like, 0, True) + self.check_like_function(np.full_like, 1, True) + # Large integers may overflow, but using int64 is OK (casts) + # see also gh-27075 + with pytest.raises(OverflowError): + np.full_like(np.ones(3, dtype=np.int8), 1000) + self.check_like_function(np.full_like, np.int64(1000), True) + self.check_like_function(np.full_like, 123.456, True) + # Inf to integer casts cause invalid-value errors: ignore them. + with np.errstate(invalid="ignore"): + self.check_like_function(np.full_like, np.inf, True) + + @pytest.mark.parametrize('likefunc', [np.empty_like, np.full_like, + np.zeros_like, np.ones_like]) + @pytest.mark.parametrize('dtype', [str, bytes]) + def test_dtype_str_bytes(self, likefunc, dtype): + # Regression test for gh-19860 + a = np.arange(16).reshape(2, 8) + b = a[:, ::2] # Ensure b is not contiguous. + kwargs = {'fill_value': ''} if likefunc == np.full_like else {} + result = likefunc(b, dtype=dtype, **kwargs) + if dtype == str: + assert result.strides == (16, 4) + else: + # dtype is bytes + assert result.strides == (4, 1) + + +class TestCorrelate: + def _setup(self, dt): + self.x = np.array([1, 2, 3, 4, 5], dtype=dt) + self.xs = np.arange(1, 20)[::3] + self.y = np.array([-1, -2, -3], dtype=dt) + self.z1 = np.array([-3., -8., -14., -20., -26., -14., -5.], dtype=dt) + self.z1_4 = np.array([-2., -5., -8., -11., -14., -5.], dtype=dt) + self.z1r = np.array([-15., -22., -22., -16., -10., -4., -1.], dtype=dt) + self.z2 = np.array([-5., -14., -26., -20., -14., -8., -3.], dtype=dt) + self.z2r = np.array([-1., -4., -10., -16., -22., -22., -15.], dtype=dt) + self.zs = np.array([-3., -14., -30., -48., -66., -84., + -102., -54., -19.], dtype=dt) + + def test_float(self): + self._setup(float) + z = np.correlate(self.x, self.y, 'full') + assert_array_almost_equal(z, self.z1) + z = np.correlate(self.x, self.y[:-1], 'full') + assert_array_almost_equal(z, self.z1_4) + z = np.correlate(self.y, self.x, 'full') + assert_array_almost_equal(z, self.z2) + z = np.correlate(self.x[::-1], self.y, 'full') + assert_array_almost_equal(z, self.z1r) + z = np.correlate(self.y, self.x[::-1], 'full') + assert_array_almost_equal(z, self.z2r) + z = np.correlate(self.xs, self.y, 'full') + assert_array_almost_equal(z, self.zs) + + def test_object(self): + self._setup(Decimal) + z = np.correlate(self.x, self.y, 'full') + assert_array_almost_equal(z, self.z1) + z = np.correlate(self.y, self.x, 'full') + assert_array_almost_equal(z, self.z2) + + def test_no_overwrite(self): + d = np.ones(100) + k = np.ones(3) + np.correlate(d, k) + assert_array_equal(d, np.ones(100)) + assert_array_equal(k, np.ones(3)) + + def test_complex(self): + x = np.array([1, 2, 3, 4 + 1j], dtype=complex) + y = np.array([-1, -2j, 3 + 1j], dtype=complex) + r_z = np.array([3 - 1j, 6, 8 + 1j, 11 + 5j, -5 + 8j, -4 - 1j], dtype=complex) + r_z = r_z[::-1].conjugate() + z = np.correlate(y, x, mode='full') + assert_array_almost_equal(z, r_z) + + def test_zero_size(self): + with pytest.raises(ValueError): + np.correlate(np.array([]), np.ones(1000), mode='full') + with pytest.raises(ValueError): + np.correlate(np.ones(1000), np.array([]), mode='full') + + def test_mode(self): + d = np.ones(100) + k = np.ones(3) + default_mode = np.correlate(d, k, mode='valid') + with assert_raises(ValueError): + np.correlate(d, k, mode='v') + # integer mode + with assert_raises(ValueError): + np.correlate(d, k, mode=-1) + # assert_array_equal(np.correlate(d, k, mode=), default_mode) + # illegal arguments + with assert_raises(TypeError): + np.correlate(d, k, mode=None) + + +class TestConvolve: + def test_object(self): + d = [1.] * 100 + k = [1.] * 3 + assert_array_almost_equal(np.convolve(d, k)[2:-2], np.full(98, 3)) + + def test_no_overwrite(self): + d = np.ones(100) + k = np.ones(3) + np.convolve(d, k) + assert_array_equal(d, np.ones(100)) + assert_array_equal(k, np.ones(3)) + + def test_mode(self): + d = np.ones(100) + k = np.ones(3) + default_mode = np.convolve(d, k, mode='full') + with assert_raises(ValueError): + np.convolve(d, k, mode='f') + # integer mode + with assert_raises(ValueError): + np.convolve(d, k, mode=-1) + assert_array_equal(np.convolve(d, k, mode=2), default_mode) + # illegal arguments + with assert_raises(TypeError): + np.convolve(d, k, mode=None) + + +class TestArgwhere: + + @pytest.mark.parametrize('nd', [0, 1, 2]) + def test_nd(self, nd): + # get an nd array with multiple elements in every dimension + x = np.empty((2,) * nd, bool) + + # none + x[...] = False + assert_equal(np.argwhere(x).shape, (0, nd)) + + # only one + x[...] = False + x.flat[0] = True + assert_equal(np.argwhere(x).shape, (1, nd)) + + # all but one + x[...] = True + x.flat[0] = False + assert_equal(np.argwhere(x).shape, (x.size - 1, nd)) + + # all + x[...] = True + assert_equal(np.argwhere(x).shape, (x.size, nd)) + + def test_2D(self): + x = np.arange(6).reshape((2, 3)) + assert_array_equal(np.argwhere(x > 1), + [[0, 2], + [1, 0], + [1, 1], + [1, 2]]) + + def test_list(self): + assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]]) + + +class TestRoll: + def test_roll1d(self): + x = np.arange(10) + xr = np.roll(x, 2) + assert_equal(xr, np.array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])) + + def test_roll2d(self): + x2 = np.reshape(np.arange(10), (2, 5)) + x2r = np.roll(x2, 1) + assert_equal(x2r, np.array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]])) + + x2r = np.roll(x2, 1, axis=0) + assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]])) + + x2r = np.roll(x2, 1, axis=1) + assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) + + # Roll multiple axes at once. + x2r = np.roll(x2, 1, axis=(0, 1)) + assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]])) + + x2r = np.roll(x2, (1, 0), axis=(0, 1)) + assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]])) + + x2r = np.roll(x2, (-1, 0), axis=(0, 1)) + assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]])) + + x2r = np.roll(x2, (0, 1), axis=(0, 1)) + assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) + + x2r = np.roll(x2, (0, -1), axis=(0, 1)) + assert_equal(x2r, np.array([[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]])) + + x2r = np.roll(x2, (1, 1), axis=(0, 1)) + assert_equal(x2r, np.array([[9, 5, 6, 7, 8], [4, 0, 1, 2, 3]])) + + x2r = np.roll(x2, (-1, -1), axis=(0, 1)) + assert_equal(x2r, np.array([[6, 7, 8, 9, 5], [1, 2, 3, 4, 0]])) + + # Roll the same axis multiple times. + x2r = np.roll(x2, 1, axis=(0, 0)) + assert_equal(x2r, np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])) + + x2r = np.roll(x2, 1, axis=(1, 1)) + assert_equal(x2r, np.array([[3, 4, 0, 1, 2], [8, 9, 5, 6, 7]])) + + # Roll more than one turn in either direction. + x2r = np.roll(x2, 6, axis=1) + assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) + + x2r = np.roll(x2, -4, axis=1) + assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]])) + + def test_roll_empty(self): + x = np.array([]) + assert_equal(np.roll(x, 1), np.array([])) + + def test_roll_unsigned_shift(self): + x = np.arange(4) + shift = np.uint16(2) + assert_equal(np.roll(x, shift), np.roll(x, 2)) + + shift = np.uint64(2**63 + 2) + assert_equal(np.roll(x, shift), np.roll(x, 2)) + + def test_roll_big_int(self): + x = np.arange(4) + assert_equal(np.roll(x, 2**100), x) + + +class TestRollaxis: + + # expected shape indexed by (axis, start) for array of + # shape (1, 2, 3, 4) + tgtshape = {(0, 0): (1, 2, 3, 4), (0, 1): (1, 2, 3, 4), + (0, 2): (2, 1, 3, 4), (0, 3): (2, 3, 1, 4), + (0, 4): (2, 3, 4, 1), + (1, 0): (2, 1, 3, 4), (1, 1): (1, 2, 3, 4), + (1, 2): (1, 2, 3, 4), (1, 3): (1, 3, 2, 4), + (1, 4): (1, 3, 4, 2), + (2, 0): (3, 1, 2, 4), (2, 1): (1, 3, 2, 4), + (2, 2): (1, 2, 3, 4), (2, 3): (1, 2, 3, 4), + (2, 4): (1, 2, 4, 3), + (3, 0): (4, 1, 2, 3), (3, 1): (1, 4, 2, 3), + (3, 2): (1, 2, 4, 3), (3, 3): (1, 2, 3, 4), + (3, 4): (1, 2, 3, 4)} + + def test_exceptions(self): + a = np.arange(1 * 2 * 3 * 4).reshape(1, 2, 3, 4) + assert_raises(AxisError, np.rollaxis, a, -5, 0) + assert_raises(AxisError, np.rollaxis, a, 0, -5) + assert_raises(AxisError, np.rollaxis, a, 4, 0) + assert_raises(AxisError, np.rollaxis, a, 0, 5) + + def test_results(self): + a = np.arange(1 * 2 * 3 * 4).reshape(1, 2, 3, 4).copy() + aind = np.indices(a.shape) + assert_(a.flags['OWNDATA']) + for (i, j) in self.tgtshape: + # positive axis, positive start + res = np.rollaxis(a, axis=i, start=j) + i0, i1, i2, i3 = aind[np.array(res.shape) - 1] + assert_(np.all(res[i0, i1, i2, i3] == a)) + assert_(res.shape == self.tgtshape[(i, j)], str((i, j))) + assert_(not res.flags['OWNDATA']) + + # negative axis, positive start + ip = i + 1 + res = np.rollaxis(a, axis=-ip, start=j) + i0, i1, i2, i3 = aind[np.array(res.shape) - 1] + assert_(np.all(res[i0, i1, i2, i3] == a)) + assert_(res.shape == self.tgtshape[(4 - ip, j)]) + assert_(not res.flags['OWNDATA']) + + # positive axis, negative start + jp = j + 1 if j < 4 else j + res = np.rollaxis(a, axis=i, start=-jp) + i0, i1, i2, i3 = aind[np.array(res.shape) - 1] + assert_(np.all(res[i0, i1, i2, i3] == a)) + assert_(res.shape == self.tgtshape[(i, 4 - jp)]) + assert_(not res.flags['OWNDATA']) + + # negative axis, negative start + ip = i + 1 + jp = j + 1 if j < 4 else j + res = np.rollaxis(a, axis=-ip, start=-jp) + i0, i1, i2, i3 = aind[np.array(res.shape) - 1] + assert_(np.all(res[i0, i1, i2, i3] == a)) + assert_(res.shape == self.tgtshape[(4 - ip, 4 - jp)]) + assert_(not res.flags['OWNDATA']) + + +class TestMoveaxis: + def test_move_to_end(self): + x = np.random.randn(5, 6, 7) + for source, expected in [(0, (6, 7, 5)), + (1, (5, 7, 6)), + (2, (5, 6, 7)), + (-1, (5, 6, 7))]: + actual = np.moveaxis(x, source, -1).shape + assert_(actual, expected) + + def test_move_new_position(self): + x = np.random.randn(1, 2, 3, 4) + for source, destination, expected in [ + (0, 1, (2, 1, 3, 4)), + (1, 2, (1, 3, 2, 4)), + (1, -1, (1, 3, 4, 2)), + ]: + actual = np.moveaxis(x, source, destination).shape + assert_(actual, expected) + + def test_preserve_order(self): + x = np.zeros((1, 2, 3, 4)) + for source, destination in [ + (0, 0), + (3, -1), + (-1, 3), + ([0, -1], [0, -1]), + ([2, 0], [2, 0]), + (range(4), range(4)), + ]: + actual = np.moveaxis(x, source, destination).shape + assert_(actual, (1, 2, 3, 4)) + + def test_move_multiples(self): + x = np.zeros((0, 1, 2, 3)) + for source, destination, expected in [ + ([0, 1], [2, 3], (2, 3, 0, 1)), + ([2, 3], [0, 1], (2, 3, 0, 1)), + ([0, 1, 2], [2, 3, 0], (2, 3, 0, 1)), + ([3, 0], [1, 0], (0, 3, 1, 2)), + ([0, 3], [0, 1], (0, 3, 1, 2)), + ]: + actual = np.moveaxis(x, source, destination).shape + assert_(actual, expected) + + def test_errors(self): + x = np.random.randn(1, 2, 3) + assert_raises_regex(AxisError, 'source.*out of bounds', + np.moveaxis, x, 3, 0) + assert_raises_regex(AxisError, 'source.*out of bounds', + np.moveaxis, x, -4, 0) + assert_raises_regex(AxisError, 'destination.*out of bounds', + np.moveaxis, x, 0, 5) + assert_raises_regex(ValueError, 'repeated axis in `source`', + np.moveaxis, x, [0, 0], [0, 1]) + assert_raises_regex(ValueError, 'repeated axis in `destination`', + np.moveaxis, x, [0, 1], [1, 1]) + assert_raises_regex(ValueError, 'must have the same number', + np.moveaxis, x, 0, [0, 1]) + assert_raises_regex(ValueError, 'must have the same number', + np.moveaxis, x, [0, 1], [0]) + + def test_array_likes(self): + x = np.ma.zeros((1, 2, 3)) + result = np.moveaxis(x, 0, 0) + assert_(x.shape, result.shape) + assert_(isinstance(result, np.ma.MaskedArray)) + + x = [1, 2, 3] + result = np.moveaxis(x, 0, 0) + assert_(x, list(result)) + assert_(isinstance(result, np.ndarray)) + + +class TestCross: + @pytest.mark.filterwarnings( + "ignore:.*2-dimensional vectors.*:DeprecationWarning" + ) + def test_2x2(self): + u = [1, 2] + v = [3, 4] + z = -2 + cp = np.cross(u, v) + assert_equal(cp, z) + cp = np.cross(v, u) + assert_equal(cp, -z) + + @pytest.mark.filterwarnings( + "ignore:.*2-dimensional vectors.*:DeprecationWarning" + ) + def test_2x3(self): + u = [1, 2] + v = [3, 4, 5] + z = np.array([10, -5, -2]) + cp = np.cross(u, v) + assert_equal(cp, z) + cp = np.cross(v, u) + assert_equal(cp, -z) + + def test_3x3(self): + u = [1, 2, 3] + v = [4, 5, 6] + z = np.array([-3, 6, -3]) + cp = np.cross(u, v) + assert_equal(cp, z) + cp = np.cross(v, u) + assert_equal(cp, -z) + + @pytest.mark.filterwarnings( + "ignore:.*2-dimensional vectors.*:DeprecationWarning" + ) + def test_broadcasting(self): + # Ticket #2624 (Trac #2032) + u = np.tile([1, 2], (11, 1)) + v = np.tile([3, 4], (11, 1)) + z = -2 + assert_equal(np.cross(u, v), z) + assert_equal(np.cross(v, u), -z) + assert_equal(np.cross(u, u), 0) + + u = np.tile([1, 2], (11, 1)).T + v = np.tile([3, 4, 5], (11, 1)) + z = np.tile([10, -5, -2], (11, 1)) + assert_equal(np.cross(u, v, axisa=0), z) + assert_equal(np.cross(v, u.T), -z) + assert_equal(np.cross(v, v), 0) + + u = np.tile([1, 2, 3], (11, 1)).T + v = np.tile([3, 4], (11, 1)).T + z = np.tile([-12, 9, -2], (11, 1)) + assert_equal(np.cross(u, v, axisa=0, axisb=0), z) + assert_equal(np.cross(v.T, u.T), -z) + assert_equal(np.cross(u.T, u.T), 0) + + u = np.tile([1, 2, 3], (5, 1)) + v = np.tile([4, 5, 6], (5, 1)).T + z = np.tile([-3, 6, -3], (5, 1)) + assert_equal(np.cross(u, v, axisb=0), z) + assert_equal(np.cross(v.T, u), -z) + assert_equal(np.cross(u, u), 0) + + @pytest.mark.filterwarnings( + "ignore:.*2-dimensional vectors.*:DeprecationWarning" + ) + def test_broadcasting_shapes(self): + u = np.ones((2, 1, 3)) + v = np.ones((5, 3)) + assert_equal(np.cross(u, v).shape, (2, 5, 3)) + u = np.ones((10, 3, 5)) + v = np.ones((2, 5)) + assert_equal(np.cross(u, v, axisa=1, axisb=0).shape, (10, 5, 3)) + assert_raises(AxisError, np.cross, u, v, axisa=1, axisb=2) + assert_raises(AxisError, np.cross, u, v, axisa=3, axisb=0) + u = np.ones((10, 3, 5, 7)) + v = np.ones((5, 7, 2)) + assert_equal(np.cross(u, v, axisa=1, axisc=2).shape, (10, 5, 3, 7)) + assert_raises(AxisError, np.cross, u, v, axisa=-5, axisb=2) + assert_raises(AxisError, np.cross, u, v, axisa=1, axisb=-4) + # gh-5885 + u = np.ones((3, 4, 2)) + for axisc in range(-2, 2): + assert_equal(np.cross(u, u, axisc=axisc).shape, (3, 4)) + + def test_uint8_int32_mixed_dtypes(self): + # regression test for gh-19138 + u = np.array([[195, 8, 9]], np.uint8) + v = np.array([250, 166, 68], np.int32) + z = np.array([[950, 11010, -30370]], dtype=np.int32) + assert_equal(np.cross(v, u), z) + assert_equal(np.cross(u, v), -z) + + @pytest.mark.parametrize("a, b", [(0, [1, 2]), ([1, 2], 3)]) + def test_zero_dimension(self, a, b): + with pytest.raises(ValueError) as exc: + np.cross(a, b) + assert "At least one array has zero dimension" in str(exc.value) + + +def test_outer_out_param(): + arr1 = np.ones((5,)) + arr2 = np.ones((2,)) + arr3 = np.linspace(-2, 2, 5) + out1 = np.ndarray(shape=(5, 5)) + out2 = np.ndarray(shape=(2, 5)) + res1 = np.outer(arr1, arr3, out1) + assert_equal(res1, out1) + assert_equal(np.outer(arr2, arr3, out2), out2) + + +class TestIndices: + + def test_simple(self): + [x, y] = np.indices((4, 3)) + assert_array_equal(x, np.array([[0, 0, 0], + [1, 1, 1], + [2, 2, 2], + [3, 3, 3]])) + assert_array_equal(y, np.array([[0, 1, 2], + [0, 1, 2], + [0, 1, 2], + [0, 1, 2]])) + + def test_single_input(self): + [x] = np.indices((4,)) + assert_array_equal(x, np.array([0, 1, 2, 3])) + + [x] = np.indices((4,), sparse=True) + assert_array_equal(x, np.array([0, 1, 2, 3])) + + def test_scalar_input(self): + assert_array_equal([], np.indices(())) + assert_array_equal([], np.indices((), sparse=True)) + assert_array_equal([[]], np.indices((0,))) + assert_array_equal([[]], np.indices((0,), sparse=True)) + + def test_sparse(self): + [x, y] = np.indices((4, 3), sparse=True) + assert_array_equal(x, np.array([[0], [1], [2], [3]])) + assert_array_equal(y, np.array([[0, 1, 2]])) + + @pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64]) + @pytest.mark.parametrize("dims", [(), (0,), (4, 3)]) + def test_return_type(self, dtype, dims): + inds = np.indices(dims, dtype=dtype) + assert_(inds.dtype == dtype) + + for arr in np.indices(dims, dtype=dtype, sparse=True): + assert_(arr.dtype == dtype) + + +class TestRequire: + flag_names = ['C', 'C_CONTIGUOUS', 'CONTIGUOUS', + 'F', 'F_CONTIGUOUS', 'FORTRAN', + 'A', 'ALIGNED', + 'W', 'WRITEABLE', + 'O', 'OWNDATA'] + + def generate_all_false(self, dtype): + arr = np.zeros((2, 2), [('junk', 'i1'), ('a', dtype)]) + arr.setflags(write=False) + a = arr['a'] + assert_(not a.flags['C']) + assert_(not a.flags['F']) + assert_(not a.flags['O']) + assert_(not a.flags['W']) + assert_(not a.flags['A']) + return a + + def set_and_check_flag(self, flag, dtype, arr): + if dtype is None: + dtype = arr.dtype + b = np.require(arr, dtype, [flag]) + assert_(b.flags[flag]) + assert_(b.dtype == dtype) + + # a further call to np.require ought to return the same array + # unless OWNDATA is specified. + c = np.require(b, None, [flag]) + if flag[0] != 'O': + assert_(c is b) + else: + assert_(c.flags[flag]) + + def test_require_each(self): + + id = ['f8', 'i4'] + fd = [None, 'f8', 'c16'] + for idtype, fdtype, flag in itertools.product(id, fd, self.flag_names): + a = self.generate_all_false(idtype) + self.set_and_check_flag(flag, fdtype, a) + + def test_unknown_requirement(self): + a = self.generate_all_false('f8') + assert_raises(KeyError, np.require, a, None, 'Q') + + def test_non_array_input(self): + a = np.require([1, 2, 3, 4], 'i4', ['C', 'A', 'O']) + assert_(a.flags['O']) + assert_(a.flags['C']) + assert_(a.flags['A']) + assert_(a.dtype == 'i4') + assert_equal(a, [1, 2, 3, 4]) + + def test_C_and_F_simul(self): + a = self.generate_all_false('f8') + assert_raises(ValueError, np.require, a, None, ['C', 'F']) + + def test_ensure_array(self): + class ArraySubclass(np.ndarray): + pass + + a = ArraySubclass((2, 2)) + b = np.require(a, None, ['E']) + assert_(type(b) is np.ndarray) + + def test_preserve_subtype(self): + class ArraySubclass(np.ndarray): + pass + + for flag in self.flag_names: + a = ArraySubclass((2, 2)) + self.set_and_check_flag(flag, None, a) + + +class TestBroadcast: + def test_broadcast_in_args(self): + # gh-5881 + arrs = [np.empty((6, 7)), np.empty((5, 6, 1)), np.empty((7,)), + np.empty((5, 1, 7))] + mits = [np.broadcast(*arrs), + np.broadcast(np.broadcast(*arrs[:0]), np.broadcast(*arrs[0:])), + np.broadcast(np.broadcast(*arrs[:1]), np.broadcast(*arrs[1:])), + np.broadcast(np.broadcast(*arrs[:2]), np.broadcast(*arrs[2:])), + np.broadcast(arrs[0], np.broadcast(*arrs[1:-1]), arrs[-1])] + for mit in mits: + assert_equal(mit.shape, (5, 6, 7)) + assert_equal(mit.ndim, 3) + assert_equal(mit.nd, 3) + assert_equal(mit.numiter, 4) + for a, ia in zip(arrs, mit.iters): + assert_(a is ia.base) + + def test_broadcast_single_arg(self): + # gh-6899 + arrs = [np.empty((5, 6, 7))] + mit = np.broadcast(*arrs) + assert_equal(mit.shape, (5, 6, 7)) + assert_equal(mit.ndim, 3) + assert_equal(mit.nd, 3) + assert_equal(mit.numiter, 1) + assert_(arrs[0] is mit.iters[0].base) + + def test_number_of_arguments(self): + arr = np.empty((5,)) + for j in range(70): + arrs = [arr] * j + if j > 64: + assert_raises(ValueError, np.broadcast, *arrs) + else: + mit = np.broadcast(*arrs) + assert_equal(mit.numiter, j) + + def test_broadcast_error_kwargs(self): + # gh-13455 + arrs = [np.empty((5, 6, 7))] + mit = np.broadcast(*arrs) + mit2 = np.broadcast(*arrs, **{}) # noqa: PIE804 + assert_equal(mit.shape, mit2.shape) + assert_equal(mit.ndim, mit2.ndim) + assert_equal(mit.nd, mit2.nd) + assert_equal(mit.numiter, mit2.numiter) + assert_(mit.iters[0].base is mit2.iters[0].base) + + assert_raises(ValueError, np.broadcast, 1, x=1) + + def test_shape_mismatch_error_message(self): + with pytest.raises(ValueError, match=r"arg 0 with shape \(1, 3\) and " + r"arg 2 with shape \(2,\)"): + np.broadcast([[1, 2, 3]], [[4], [5]], [6, 7]) + + +class TestKeepdims: + + class sub_array(np.ndarray): + def sum(self, axis=None, dtype=None, out=None): + return np.ndarray.sum(self, axis, dtype, out, keepdims=True) + + def test_raise(self): + sub_class = self.sub_array + x = np.arange(30).view(sub_class) + assert_raises(TypeError, np.sum, x, keepdims=True) + + +class TestTensordot: + + def test_zero_dimension(self): + # Test resolution to issue #5663 + a = np.ndarray((3, 0)) + b = np.ndarray((0, 4)) + td = np.tensordot(a, b, (1, 0)) + assert_array_equal(td, np.dot(a, b)) + assert_array_equal(td, np.einsum('ij,jk', a, b)) + + def test_zero_dimensional(self): + # gh-12130 + arr_0d = np.array(1) + ret = np.tensordot(arr_0d, arr_0d, ([], [])) # contracting no axes is well defined + assert_array_equal(ret, arr_0d) + + +class TestAsType: + + def test_astype(self): + data = [[1, 2], [3, 4]] + actual = np.astype( + np.array(data, dtype=np.int64), np.uint32 + ) + expected = np.array(data, dtype=np.uint32) + + assert_array_equal(actual, expected) + assert_equal(actual.dtype, expected.dtype) + + assert np.shares_memory( + actual, np.astype(actual, actual.dtype, copy=False) + ) + + actual = np.astype(np.int64(10), np.float64) + expected = np.float64(10) + assert_equal(actual, expected) + assert_equal(actual.dtype, expected.dtype) + + with pytest.raises(TypeError, match="Input should be a NumPy array"): + np.astype(data, np.float64) diff --git a/python/numpy/_core/tests/test_numerictypes.py b/python/numpy/_core/tests/test_numerictypes.py new file mode 100644 index 000000000..6d7372284 --- /dev/null +++ b/python/numpy/_core/tests/test_numerictypes.py @@ -0,0 +1,651 @@ +import itertools +import sys + +import pytest + +import numpy as np +import numpy._core.numerictypes as nt +from numpy._core.numerictypes import issctype, maximum_sctype, sctype2char, sctypes +from numpy.testing import ( + IS_PYPY, + assert_, + assert_equal, + assert_raises, + assert_raises_regex, +) + +# This is the structure of the table used for plain objects: +# +# +-+-+-+ +# |x|y|z| +# +-+-+-+ + +# Structure of a plain array description: +Pdescr = [ + ('x', 'i4', (2,)), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +# A plain list of tuples with values for testing: +PbufferT = [ + # x y z + ([3, 2], [[6., 4.], [6., 4.]], 8), + ([4, 3], [[7., 5.], [7., 5.]], 9), + ] + + +# This is the structure of the table used for nested objects (DON'T PANIC!): +# +# +-+---------------------------------+-----+----------+-+-+ +# |x|Info |color|info |y|z| +# | +-----+--+----------------+----+--+ +----+-----+ | | +# | |value|y2|Info2 |name|z2| |Name|Value| | | +# | | | +----+-----+--+--+ | | | | | | | +# | | | |name|value|y3|z3| | | | | | | | +# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+ +# + +# The corresponding nested array description: +Ndescr = [ + ('x', 'i4', (2,)), + ('Info', [ + ('value', 'c16'), + ('y2', 'f8'), + ('Info2', [ + ('name', 'S2'), + ('value', 'c16', (2,)), + ('y3', 'f8', (2,)), + ('z3', 'u4', (2,))]), + ('name', 'S2'), + ('z2', 'b1')]), + ('color', 'S2'), + ('info', [ + ('Name', 'U8'), + ('Value', 'c16')]), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +NbufferT = [ + # x Info color info y z + # value y2 Info2 name z2 Name Value + # name value y3 z3 + ([3, 2], (6j, 6., (b'nn', [6j, 4j], [6., 4.], [1, 2]), b'NN', True), + b'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8), + ([4, 3], (7j, 7., (b'oo', [7j, 5j], [7., 5.], [2, 1]), b'OO', False), + b'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9), + ] + + +byteorder = {'little': '<', 'big': '>'}[sys.byteorder] + +def normalize_descr(descr): + "Normalize a description adding the platform byteorder." + + out = [] + for item in descr: + dtype = item[1] + if isinstance(dtype, str): + if dtype[0] not in ['|', '<', '>']: + onebyte = dtype[1:] == "1" + if onebyte or dtype[0] in ['S', 'V', 'b']: + dtype = "|" + dtype + else: + dtype = byteorder + dtype + if len(item) > 2 and np.prod(item[2]) > 1: + nitem = (item[0], dtype, item[2]) + else: + nitem = (item[0], dtype) + out.append(nitem) + elif isinstance(dtype, list): + l = normalize_descr(dtype) + out.append((item[0], l)) + else: + raise ValueError(f"Expected a str or list and got {type(item)}") + return out + + +############################################################ +# Creation tests +############################################################ + +class CreateZeros: + """Check the creation of heterogeneous arrays zero-valued""" + + def test_zeros0D(self): + """Check creation of 0-dimensional objects""" + h = np.zeros((), dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + assert_(h.dtype.fields['x'][0].name[:4] == 'void') + assert_(h.dtype.fields['x'][0].char == 'V') + assert_(h.dtype.fields['x'][0].type == np.void) + # A small check that data is ok + assert_equal(h['z'], np.zeros((), dtype='u1')) + + def test_zerosSD(self): + """Check creation of single-dimensional objects""" + h = np.zeros((2,), dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + assert_(h.dtype['y'].name[:4] == 'void') + assert_(h.dtype['y'].char == 'V') + assert_(h.dtype['y'].type == np.void) + # A small check that data is ok + assert_equal(h['z'], np.zeros((2,), dtype='u1')) + + def test_zerosMD(self): + """Check creation of multi-dimensional objects""" + h = np.zeros((2, 3), dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + assert_(h.dtype['z'].name == 'uint8') + assert_(h.dtype['z'].char == 'B') + assert_(h.dtype['z'].type == np.uint8) + # A small check that data is ok + assert_equal(h['z'], np.zeros((2, 3), dtype='u1')) + + +class TestCreateZerosPlain(CreateZeros): + """Check the creation of heterogeneous arrays zero-valued (plain)""" + _descr = Pdescr + +class TestCreateZerosNested(CreateZeros): + """Check the creation of heterogeneous arrays zero-valued (nested)""" + _descr = Ndescr + + +class CreateValues: + """Check the creation of heterogeneous arrays with values""" + + def test_tuple(self): + """Check creation from tuples""" + h = np.array(self._buffer, dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + if self.multiple_rows: + assert_(h.shape == (2,)) + else: + assert_(h.shape == ()) + + def test_list_of_tuple(self): + """Check creation from list of tuples""" + h = np.array([self._buffer], dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + if self.multiple_rows: + assert_(h.shape == (1, 2)) + else: + assert_(h.shape == (1,)) + + def test_list_of_list_of_tuple(self): + """Check creation from list of list of tuples""" + h = np.array([[self._buffer]], dtype=self._descr) + assert_(normalize_descr(self._descr) == h.dtype.descr) + if self.multiple_rows: + assert_(h.shape == (1, 1, 2)) + else: + assert_(h.shape == (1, 1)) + + +class TestCreateValuesPlainSingle(CreateValues): + """Check the creation of heterogeneous arrays (plain, single row)""" + _descr = Pdescr + multiple_rows = 0 + _buffer = PbufferT[0] + +class TestCreateValuesPlainMultiple(CreateValues): + """Check the creation of heterogeneous arrays (plain, multiple rows)""" + _descr = Pdescr + multiple_rows = 1 + _buffer = PbufferT + +class TestCreateValuesNestedSingle(CreateValues): + """Check the creation of heterogeneous arrays (nested, single row)""" + _descr = Ndescr + multiple_rows = 0 + _buffer = NbufferT[0] + +class TestCreateValuesNestedMultiple(CreateValues): + """Check the creation of heterogeneous arrays (nested, multiple rows)""" + _descr = Ndescr + multiple_rows = 1 + _buffer = NbufferT + + +############################################################ +# Reading tests +############################################################ + +class ReadValuesPlain: + """Check the reading of values in heterogeneous arrays (plain)""" + + def test_access_fields(self): + h = np.array(self._buffer, dtype=self._descr) + if not self.multiple_rows: + assert_(h.shape == ()) + assert_equal(h['x'], np.array(self._buffer[0], dtype='i4')) + assert_equal(h['y'], np.array(self._buffer[1], dtype='f8')) + assert_equal(h['z'], np.array(self._buffer[2], dtype='u1')) + else: + assert_(len(h) == 2) + assert_equal(h['x'], np.array([self._buffer[0][0], + self._buffer[1][0]], dtype='i4')) + assert_equal(h['y'], np.array([self._buffer[0][1], + self._buffer[1][1]], dtype='f8')) + assert_equal(h['z'], np.array([self._buffer[0][2], + self._buffer[1][2]], dtype='u1')) + + +class TestReadValuesPlainSingle(ReadValuesPlain): + """Check the creation of heterogeneous arrays (plain, single row)""" + _descr = Pdescr + multiple_rows = 0 + _buffer = PbufferT[0] + +class TestReadValuesPlainMultiple(ReadValuesPlain): + """Check the values of heterogeneous arrays (plain, multiple rows)""" + _descr = Pdescr + multiple_rows = 1 + _buffer = PbufferT + +class ReadValuesNested: + """Check the reading of values in heterogeneous arrays (nested)""" + + def test_access_top_fields(self): + """Check reading the top fields of a nested array""" + h = np.array(self._buffer, dtype=self._descr) + if not self.multiple_rows: + assert_(h.shape == ()) + assert_equal(h['x'], np.array(self._buffer[0], dtype='i4')) + assert_equal(h['y'], np.array(self._buffer[4], dtype='f8')) + assert_equal(h['z'], np.array(self._buffer[5], dtype='u1')) + else: + assert_(len(h) == 2) + assert_equal(h['x'], np.array([self._buffer[0][0], + self._buffer[1][0]], dtype='i4')) + assert_equal(h['y'], np.array([self._buffer[0][4], + self._buffer[1][4]], dtype='f8')) + assert_equal(h['z'], np.array([self._buffer[0][5], + self._buffer[1][5]], dtype='u1')) + + def test_nested1_acessors(self): + """Check reading the nested fields of a nested array (1st level)""" + h = np.array(self._buffer, dtype=self._descr) + if not self.multiple_rows: + assert_equal(h['Info']['value'], + np.array(self._buffer[1][0], dtype='c16')) + assert_equal(h['Info']['y2'], + np.array(self._buffer[1][1], dtype='f8')) + assert_equal(h['info']['Name'], + np.array(self._buffer[3][0], dtype='U2')) + assert_equal(h['info']['Value'], + np.array(self._buffer[3][1], dtype='c16')) + else: + assert_equal(h['Info']['value'], + np.array([self._buffer[0][1][0], + self._buffer[1][1][0]], + dtype='c16')) + assert_equal(h['Info']['y2'], + np.array([self._buffer[0][1][1], + self._buffer[1][1][1]], + dtype='f8')) + assert_equal(h['info']['Name'], + np.array([self._buffer[0][3][0], + self._buffer[1][3][0]], + dtype='U2')) + assert_equal(h['info']['Value'], + np.array([self._buffer[0][3][1], + self._buffer[1][3][1]], + dtype='c16')) + + def test_nested2_acessors(self): + """Check reading the nested fields of a nested array (2nd level)""" + h = np.array(self._buffer, dtype=self._descr) + if not self.multiple_rows: + assert_equal(h['Info']['Info2']['value'], + np.array(self._buffer[1][2][1], dtype='c16')) + assert_equal(h['Info']['Info2']['z3'], + np.array(self._buffer[1][2][3], dtype='u4')) + else: + assert_equal(h['Info']['Info2']['value'], + np.array([self._buffer[0][1][2][1], + self._buffer[1][1][2][1]], + dtype='c16')) + assert_equal(h['Info']['Info2']['z3'], + np.array([self._buffer[0][1][2][3], + self._buffer[1][1][2][3]], + dtype='u4')) + + def test_nested1_descriptor(self): + """Check access nested descriptors of a nested array (1st level)""" + h = np.array(self._buffer, dtype=self._descr) + assert_(h.dtype['Info']['value'].name == 'complex128') + assert_(h.dtype['Info']['y2'].name == 'float64') + assert_(h.dtype['info']['Name'].name == 'str256') + assert_(h.dtype['info']['Value'].name == 'complex128') + + def test_nested2_descriptor(self): + """Check access nested descriptors of a nested array (2nd level)""" + h = np.array(self._buffer, dtype=self._descr) + assert_(h.dtype['Info']['Info2']['value'].name == 'void256') + assert_(h.dtype['Info']['Info2']['z3'].name == 'void64') + + +class TestReadValuesNestedSingle(ReadValuesNested): + """Check the values of heterogeneous arrays (nested, single row)""" + _descr = Ndescr + multiple_rows = False + _buffer = NbufferT[0] + +class TestReadValuesNestedMultiple(ReadValuesNested): + """Check the values of heterogeneous arrays (nested, multiple rows)""" + _descr = Ndescr + multiple_rows = True + _buffer = NbufferT + +class TestEmptyField: + def test_assign(self): + a = np.arange(10, dtype=np.float32) + a.dtype = [("int", "<0i4"), ("float", "<2f4")] + assert_(a['int'].shape == (5, 0)) + assert_(a['float'].shape == (5, 2)) + + +class TestMultipleFields: + def setup_method(self): + self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8') + + def _bad_call(self): + return self.ary['f0', 'f1'] + + def test_no_tuple(self): + assert_raises(IndexError, self._bad_call) + + def test_return(self): + res = self.ary[['f0', 'f2']].tolist() + assert_(res == [(1, 3), (5, 7)]) + + +class TestIsSubDType: + # scalar types can be promoted into dtypes + wrappers = [np.dtype, lambda x: x] + + def test_both_abstract(self): + assert_(np.issubdtype(np.floating, np.inexact)) + assert_(not np.issubdtype(np.inexact, np.floating)) + + def test_same(self): + for cls in (np.float32, np.int32): + for w1, w2 in itertools.product(self.wrappers, repeat=2): + assert_(np.issubdtype(w1(cls), w2(cls))) + + def test_subclass(self): + # note we cannot promote floating to a dtype, as it would turn into a + # concrete type + for w in self.wrappers: + assert_(np.issubdtype(w(np.float32), np.floating)) + assert_(np.issubdtype(w(np.float64), np.floating)) + + def test_subclass_backwards(self): + for w in self.wrappers: + assert_(not np.issubdtype(np.floating, w(np.float32))) + assert_(not np.issubdtype(np.floating, w(np.float64))) + + def test_sibling_class(self): + for w1, w2 in itertools.product(self.wrappers, repeat=2): + assert_(not np.issubdtype(w1(np.float32), w2(np.float64))) + assert_(not np.issubdtype(w1(np.float64), w2(np.float32))) + + def test_nondtype_nonscalartype(self): + # See gh-14619 and gh-9505 which introduced the deprecation to fix + # this. These tests are directly taken from gh-9505 + assert not np.issubdtype(np.float32, 'float64') + assert not np.issubdtype(np.float32, 'f8') + assert not np.issubdtype(np.int32, str) + assert not np.issubdtype(np.int32, 'int64') + assert not np.issubdtype(np.str_, 'void') + # for the following the correct spellings are + # np.integer, np.floating, or np.complexfloating respectively: + assert not np.issubdtype(np.int8, int) # np.int8 is never np.int_ + assert not np.issubdtype(np.float32, float) + assert not np.issubdtype(np.complex64, complex) + assert not np.issubdtype(np.float32, "float") + assert not np.issubdtype(np.float64, "f") + + # Test the same for the correct first datatype and abstract one + # in the case of int, float, complex: + assert np.issubdtype(np.float64, 'float64') + assert np.issubdtype(np.float64, 'f8') + assert np.issubdtype(np.str_, str) + assert np.issubdtype(np.int64, 'int64') + assert np.issubdtype(np.void, 'void') + assert np.issubdtype(np.int8, np.integer) + assert np.issubdtype(np.float32, np.floating) + assert np.issubdtype(np.complex64, np.complexfloating) + assert np.issubdtype(np.float64, "float") + assert np.issubdtype(np.float32, "f") + + +class TestIsDType: + """ + Check correctness of `np.isdtype`. The test considers different argument + configurations: `np.isdtype(dtype, k1)` and `np.isdtype(dtype, (k1, k2))` + with concrete dtypes and dtype groups. + """ + dtype_group_dict = { + "signed integer": sctypes["int"], + "unsigned integer": sctypes["uint"], + "integral": sctypes["int"] + sctypes["uint"], + "real floating": sctypes["float"], + "complex floating": sctypes["complex"], + "numeric": ( + sctypes["int"] + sctypes["uint"] + sctypes["float"] + + sctypes["complex"] + ) + } + + @pytest.mark.parametrize( + "dtype,close_dtype", + [ + (np.int64, np.int32), (np.uint64, np.uint32), + (np.float64, np.float32), (np.complex128, np.complex64) + ] + ) + @pytest.mark.parametrize( + "dtype_group", + [ + None, "signed integer", "unsigned integer", "integral", + "real floating", "complex floating", "numeric" + ] + ) + def test_isdtype(self, dtype, close_dtype, dtype_group): + # First check if same dtypes return `true` and different ones + # give `false` (even if they're close in the dtype hierarchy!) + if dtype_group is None: + assert np.isdtype(dtype, dtype) + assert not np.isdtype(dtype, close_dtype) + assert np.isdtype(dtype, (dtype, close_dtype)) + + # Check that dtype and a dtype group that it belongs to + # return `true`, and `false` otherwise. + elif dtype in self.dtype_group_dict[dtype_group]: + assert np.isdtype(dtype, dtype_group) + assert np.isdtype(dtype, (close_dtype, dtype_group)) + else: + assert not np.isdtype(dtype, dtype_group) + + def test_isdtype_invalid_args(self): + with assert_raises_regex(TypeError, r".*must be a NumPy dtype.*"): + np.isdtype("int64", np.int64) + with assert_raises_regex(TypeError, r".*kind argument must.*"): + np.isdtype(np.int64, 1) + with assert_raises_regex(ValueError, r".*not a known kind name.*"): + np.isdtype(np.int64, "int64") + + def test_sctypes_complete(self): + # issue 26439: int32/intc were masking each other on 32-bit builds + assert np.int32 in sctypes['int'] + assert np.intc in sctypes['int'] + assert np.int64 in sctypes['int'] + assert np.uint32 in sctypes['uint'] + assert np.uintc in sctypes['uint'] + assert np.uint64 in sctypes['uint'] + +class TestSctypeDict: + def test_longdouble(self): + assert_(np._core.sctypeDict['float64'] is not np.longdouble) + assert_(np._core.sctypeDict['complex128'] is not np.clongdouble) + + def test_ulong(self): + assert np._core.sctypeDict['ulong'] is np.ulong + assert np.dtype(np.ulong) is np.dtype("ulong") + assert np.dtype(np.ulong).itemsize == np.dtype(np.long).itemsize + + +@pytest.mark.filterwarnings("ignore:.*maximum_sctype.*:DeprecationWarning") +class TestMaximumSctype: + + # note that parametrizing with sctype['int'] and similar would skip types + # with the same size (gh-11923) + + @pytest.mark.parametrize( + 't', [np.byte, np.short, np.intc, np.long, np.longlong] + ) + def test_int(self, t): + assert_equal(maximum_sctype(t), np._core.sctypes['int'][-1]) + + @pytest.mark.parametrize( + 't', [np.ubyte, np.ushort, np.uintc, np.ulong, np.ulonglong] + ) + def test_uint(self, t): + assert_equal(maximum_sctype(t), np._core.sctypes['uint'][-1]) + + @pytest.mark.parametrize('t', [np.half, np.single, np.double, np.longdouble]) + def test_float(self, t): + assert_equal(maximum_sctype(t), np._core.sctypes['float'][-1]) + + @pytest.mark.parametrize('t', [np.csingle, np.cdouble, np.clongdouble]) + def test_complex(self, t): + assert_equal(maximum_sctype(t), np._core.sctypes['complex'][-1]) + + @pytest.mark.parametrize('t', [np.bool, np.object_, np.str_, np.bytes_, + np.void]) + def test_other(self, t): + assert_equal(maximum_sctype(t), t) + + +class Test_sctype2char: + # This function is old enough that we're really just documenting the quirks + # at this point. + + def test_scalar_type(self): + assert_equal(sctype2char(np.double), 'd') + assert_equal(sctype2char(np.long), 'l') + assert_equal(sctype2char(np.int_), np.array(0).dtype.char) + assert_equal(sctype2char(np.str_), 'U') + assert_equal(sctype2char(np.bytes_), 'S') + + def test_other_type(self): + assert_equal(sctype2char(float), 'd') + assert_equal(sctype2char(list), 'O') + assert_equal(sctype2char(np.ndarray), 'O') + + def test_third_party_scalar_type(self): + from numpy._core._rational_tests import rational + assert_raises(KeyError, sctype2char, rational) + assert_raises(KeyError, sctype2char, rational(1)) + + def test_array_instance(self): + assert_equal(sctype2char(np.array([1.0, 2.0])), 'd') + + def test_abstract_type(self): + assert_raises(KeyError, sctype2char, np.floating) + + def test_non_type(self): + assert_raises(ValueError, sctype2char, 1) + +@pytest.mark.parametrize("rep, expected", [ + (np.int32, True), + (list, False), + (1.1, False), + (str, True), + (np.dtype(np.float64), True), + (np.dtype((np.int16, (3, 4))), True), + (np.dtype([('a', np.int8)]), True), + ]) +def test_issctype(rep, expected): + # ensure proper identification of scalar + # data-types by issctype() + actual = issctype(rep) + assert type(actual) is bool + assert_equal(actual, expected) + + +@pytest.mark.skipif(sys.flags.optimize > 1, + reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1") +@pytest.mark.xfail(IS_PYPY, + reason="PyPy cannot modify tp_doc after PyType_Ready") +class TestDocStrings: + def test_platform_dependent_aliases(self): + if np.int64 is np.int_: + assert_('int64' in np.int_.__doc__) + elif np.int64 is np.longlong: + assert_('int64' in np.longlong.__doc__) + + +class TestScalarTypeNames: + # gh-9799 + + numeric_types = [ + np.byte, np.short, np.intc, np.long, np.longlong, + np.ubyte, np.ushort, np.uintc, np.ulong, np.ulonglong, + np.half, np.single, np.double, np.longdouble, + np.csingle, np.cdouble, np.clongdouble, + ] + + def test_names_are_unique(self): + # none of the above may be aliases for each other + assert len(set(self.numeric_types)) == len(self.numeric_types) + + # names must be unique + names = [t.__name__ for t in self.numeric_types] + assert len(set(names)) == len(names) + + @pytest.mark.parametrize('t', numeric_types) + def test_names_reflect_attributes(self, t): + """ Test that names correspond to where the type is under ``np.`` """ + assert getattr(np, t.__name__) is t + + @pytest.mark.parametrize('t', numeric_types) + def test_names_are_undersood_by_dtype(self, t): + """ Test the dtype constructor maps names back to the type """ + assert np.dtype(t.__name__).type is t + + +class TestScalarTypeOrder: + @pytest.mark.parametrize(('a', 'b'), [ + # signedinteger + (np.byte, np.short), + (np.short, np.intc), + (np.intc, np.long), + (np.long, np.longlong), + # unsignedinteger + (np.ubyte, np.ushort), + (np.ushort, np.uintc), + (np.uintc, np.ulong), + (np.ulong, np.ulonglong), + # floating + (np.half, np.single), + (np.single, np.double), + (np.double, np.longdouble), + # complexfloating + (np.csingle, np.cdouble), + (np.cdouble, np.clongdouble), + # flexible + (np.bytes_, np.str_), + (np.str_, np.void), + # bouncy castles + (np.datetime64, np.timedelta64), + ]) + def test_stable_ordering(self, a: type[np.generic], b: type[np.generic]): + assert np.ScalarType.index(a) <= np.ScalarType.index(b) + + +class TestBoolDefinition: + def test_bool_definition(self): + assert nt.bool is np.bool diff --git a/python/numpy/_core/tests/test_overrides.py b/python/numpy/_core/tests/test_overrides.py new file mode 100644 index 000000000..b0d73375e --- /dev/null +++ b/python/numpy/_core/tests/test_overrides.py @@ -0,0 +1,791 @@ +import inspect +import os +import pickle +import sys +import tempfile +from io import StringIO +from unittest import mock + +import pytest + +import numpy as np +from numpy._core.overrides import ( + _get_implementing_args, + array_function_dispatch, + verify_matching_signatures, +) +from numpy.testing import assert_, assert_equal, assert_raises, assert_raises_regex +from numpy.testing.overrides import get_overridable_numpy_array_functions + + +def _return_not_implemented(self, *args, **kwargs): + return NotImplemented + + +# need to define this at the top level to test pickling +@array_function_dispatch(lambda array: (array,)) +def dispatched_one_arg(array): + """Docstring.""" + return 'original' + + +@array_function_dispatch(lambda array1, array2: (array1, array2)) +def dispatched_two_arg(array1, array2): + """Docstring.""" + return 'original' + + +class TestGetImplementingArgs: + + def test_ndarray(self): + array = np.array(1) + + args = _get_implementing_args([array]) + assert_equal(list(args), [array]) + + args = _get_implementing_args([array, array]) + assert_equal(list(args), [array]) + + args = _get_implementing_args([array, 1]) + assert_equal(list(args), [array]) + + args = _get_implementing_args([1, array]) + assert_equal(list(args), [array]) + + def test_ndarray_subclasses(self): + + class OverrideSub(np.ndarray): + __array_function__ = _return_not_implemented + + class NoOverrideSub(np.ndarray): + pass + + array = np.array(1).view(np.ndarray) + override_sub = np.array(1).view(OverrideSub) + no_override_sub = np.array(1).view(NoOverrideSub) + + args = _get_implementing_args([array, override_sub]) + assert_equal(list(args), [override_sub, array]) + + args = _get_implementing_args([array, no_override_sub]) + assert_equal(list(args), [no_override_sub, array]) + + args = _get_implementing_args( + [override_sub, no_override_sub]) + assert_equal(list(args), [override_sub, no_override_sub]) + + def test_ndarray_and_duck_array(self): + + class Other: + __array_function__ = _return_not_implemented + + array = np.array(1) + other = Other() + + args = _get_implementing_args([other, array]) + assert_equal(list(args), [other, array]) + + args = _get_implementing_args([array, other]) + assert_equal(list(args), [array, other]) + + def test_ndarray_subclass_and_duck_array(self): + + class OverrideSub(np.ndarray): + __array_function__ = _return_not_implemented + + class Other: + __array_function__ = _return_not_implemented + + array = np.array(1) + subarray = np.array(1).view(OverrideSub) + other = Other() + + assert_equal(_get_implementing_args([array, subarray, other]), + [subarray, array, other]) + assert_equal(_get_implementing_args([array, other, subarray]), + [subarray, array, other]) + + def test_many_duck_arrays(self): + + class A: + __array_function__ = _return_not_implemented + + class B(A): + __array_function__ = _return_not_implemented + + class C(A): + __array_function__ = _return_not_implemented + + class D: + __array_function__ = _return_not_implemented + + a = A() + b = B() + c = C() + d = D() + + assert_equal(_get_implementing_args([1]), []) + assert_equal(_get_implementing_args([a]), [a]) + assert_equal(_get_implementing_args([a, 1]), [a]) + assert_equal(_get_implementing_args([a, a, a]), [a]) + assert_equal(_get_implementing_args([a, d, a]), [a, d]) + assert_equal(_get_implementing_args([a, b]), [b, a]) + assert_equal(_get_implementing_args([b, a]), [b, a]) + assert_equal(_get_implementing_args([a, b, c]), [b, c, a]) + assert_equal(_get_implementing_args([a, c, b]), [c, b, a]) + + def test_too_many_duck_arrays(self): + namespace = {'__array_function__': _return_not_implemented} + types = [type('A' + str(i), (object,), namespace) for i in range(65)] + relevant_args = [t() for t in types] + + actual = _get_implementing_args(relevant_args[:64]) + assert_equal(actual, relevant_args[:64]) + + with assert_raises_regex(TypeError, 'distinct argument types'): + _get_implementing_args(relevant_args) + + +class TestNDArrayArrayFunction: + + def test_method(self): + + class Other: + __array_function__ = _return_not_implemented + + class NoOverrideSub(np.ndarray): + pass + + class OverrideSub(np.ndarray): + __array_function__ = _return_not_implemented + + array = np.array([1]) + other = Other() + no_override_sub = array.view(NoOverrideSub) + override_sub = array.view(OverrideSub) + + result = array.__array_function__(func=dispatched_two_arg, + types=(np.ndarray,), + args=(array, 1.), kwargs={}) + assert_equal(result, 'original') + + result = array.__array_function__(func=dispatched_two_arg, + types=(np.ndarray, Other), + args=(array, other), kwargs={}) + assert_(result is NotImplemented) + + result = array.__array_function__(func=dispatched_two_arg, + types=(np.ndarray, NoOverrideSub), + args=(array, no_override_sub), + kwargs={}) + assert_equal(result, 'original') + + result = array.__array_function__(func=dispatched_two_arg, + types=(np.ndarray, OverrideSub), + args=(array, override_sub), + kwargs={}) + assert_equal(result, 'original') + + with assert_raises_regex(TypeError, 'no implementation found'): + np.concatenate((array, other)) + + expected = np.concatenate((array, array)) + result = np.concatenate((array, no_override_sub)) + assert_equal(result, expected.view(NoOverrideSub)) + result = np.concatenate((array, override_sub)) + assert_equal(result, expected.view(OverrideSub)) + + def test_no_wrapper(self): + # Regular numpy functions have wrappers, but do not presume + # all functions do (array creation ones do not): check that + # we just call the function in that case. + array = np.array(1) + func = lambda x: x * 2 + result = array.__array_function__(func=func, types=(np.ndarray,), + args=(array,), kwargs={}) + assert_equal(result, array * 2) + + def test_wrong_arguments(self): + # Check our implementation guards against wrong arguments. + a = np.array([1, 2]) + with pytest.raises(TypeError, match="args must be a tuple"): + a.__array_function__(np.reshape, (np.ndarray,), a, (2, 1)) + with pytest.raises(TypeError, match="kwargs must be a dict"): + a.__array_function__(np.reshape, (np.ndarray,), (a,), (2, 1)) + + +class TestArrayFunctionDispatch: + + def test_pickle(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + roundtripped = pickle.loads( + pickle.dumps(dispatched_one_arg, protocol=proto)) + assert_(roundtripped is dispatched_one_arg) + + def test_name_and_docstring(self): + assert_equal(dispatched_one_arg.__name__, 'dispatched_one_arg') + if sys.flags.optimize < 2: + assert_equal(dispatched_one_arg.__doc__, 'Docstring.') + + def test_interface(self): + + class MyArray: + def __array_function__(self, func, types, args, kwargs): + return (self, func, types, args, kwargs) + + original = MyArray() + (obj, func, types, args, kwargs) = dispatched_one_arg(original) + assert_(obj is original) + assert_(func is dispatched_one_arg) + assert_equal(set(types), {MyArray}) + # assert_equal uses the overloaded np.iscomplexobj() internally + assert_(args == (original,)) + assert_equal(kwargs, {}) + + def test_not_implemented(self): + + class MyArray: + def __array_function__(self, func, types, args, kwargs): + return NotImplemented + + array = MyArray() + with assert_raises_regex(TypeError, 'no implementation found'): + dispatched_one_arg(array) + + def test_where_dispatch(self): + + class DuckArray: + def __array_function__(self, ufunc, method, *inputs, **kwargs): + return "overridden" + + array = np.array(1) + duck_array = DuckArray() + + result = np.std(array, where=duck_array) + + assert_equal(result, "overridden") + + +class TestVerifyMatchingSignatures: + + def test_verify_matching_signatures(self): + + verify_matching_signatures(lambda x: 0, lambda x: 0) + verify_matching_signatures(lambda x=None: 0, lambda x=None: 0) + verify_matching_signatures(lambda x=1: 0, lambda x=None: 0) + + with assert_raises(RuntimeError): + verify_matching_signatures(lambda a: 0, lambda b: 0) + with assert_raises(RuntimeError): + verify_matching_signatures(lambda x: 0, lambda x=None: 0) + with assert_raises(RuntimeError): + verify_matching_signatures(lambda x=None: 0, lambda y=None: 0) + with assert_raises(RuntimeError): + verify_matching_signatures(lambda x=1: 0, lambda y=1: 0) + + def test_array_function_dispatch(self): + + with assert_raises(RuntimeError): + @array_function_dispatch(lambda x: (x,)) + def f(y): + pass + + # should not raise + @array_function_dispatch(lambda x: (x,), verify=False) + def f(y): + pass + + +def _new_duck_type_and_implements(): + """Create a duck array type and implements functions.""" + HANDLED_FUNCTIONS = {} + + class MyArray: + def __array_function__(self, func, types, args, kwargs): + if func not in HANDLED_FUNCTIONS: + return NotImplemented + if not all(issubclass(t, MyArray) for t in types): + return NotImplemented + return HANDLED_FUNCTIONS[func](*args, **kwargs) + + def implements(numpy_function): + """Register an __array_function__ implementations.""" + def decorator(func): + HANDLED_FUNCTIONS[numpy_function] = func + return func + return decorator + + return (MyArray, implements) + + +class TestArrayFunctionImplementation: + + def test_one_arg(self): + MyArray, implements = _new_duck_type_and_implements() + + @implements(dispatched_one_arg) + def _(array): + return 'myarray' + + assert_equal(dispatched_one_arg(1), 'original') + assert_equal(dispatched_one_arg(MyArray()), 'myarray') + + def test_optional_args(self): + MyArray, implements = _new_duck_type_and_implements() + + @array_function_dispatch(lambda array, option=None: (array,)) + def func_with_option(array, option='default'): + return option + + @implements(func_with_option) + def my_array_func_with_option(array, new_option='myarray'): + return new_option + + # we don't need to implement every option on __array_function__ + # implementations + assert_equal(func_with_option(1), 'default') + assert_equal(func_with_option(1, option='extra'), 'extra') + assert_equal(func_with_option(MyArray()), 'myarray') + with assert_raises(TypeError): + func_with_option(MyArray(), option='extra') + + # but new options on implementations can't be used + result = my_array_func_with_option(MyArray(), new_option='yes') + assert_equal(result, 'yes') + with assert_raises(TypeError): + func_with_option(MyArray(), new_option='no') + + def test_not_implemented(self): + MyArray, implements = _new_duck_type_and_implements() + + @array_function_dispatch(lambda array: (array,), module='my') + def func(array): + return array + + array = np.array(1) + assert_(func(array) is array) + assert_equal(func.__module__, 'my') + + with assert_raises_regex( + TypeError, "no implementation found for 'my.func'"): + func(MyArray()) + + @pytest.mark.parametrize("name", ["concatenate", "mean", "asarray"]) + def test_signature_error_message_simple(self, name): + func = getattr(np, name) + try: + # all of these functions need an argument: + func() + except TypeError as e: + exc = e + + assert exc.args[0].startswith(f"{name}()") + + def test_signature_error_message(self): + # The lambda function will be named "", but the TypeError + # should show the name as "func" + def _dispatcher(): + return () + + @array_function_dispatch(_dispatcher) + def func(): + pass + + try: + func._implementation(bad_arg=3) + except TypeError as e: + expected_exception = e + + try: + func(bad_arg=3) + raise AssertionError("must fail") + except TypeError as exc: + if exc.args[0].startswith("_dispatcher"): + # We replace the qualname currently, but it used `__name__` + # (relevant functions have the same name and qualname anyway) + pytest.skip("Python version is not using __qualname__ for " + "TypeError formatting.") + + assert exc.args == expected_exception.args + + @pytest.mark.parametrize("value", [234, "this func is not replaced"]) + def test_dispatcher_error(self, value): + # If the dispatcher raises an error, we must not attempt to mutate it + error = TypeError(value) + + def dispatcher(): + raise error + + @array_function_dispatch(dispatcher) + def func(): + return 3 + + try: + func() + raise AssertionError("must fail") + except TypeError as exc: + assert exc is error # unmodified exception + + def test_properties(self): + # Check that str and repr are sensible + func = dispatched_two_arg + assert str(func) == str(func._implementation) + repr_no_id = repr(func).split("at ")[0] + repr_no_id_impl = repr(func._implementation).split("at ")[0] + assert repr_no_id == repr_no_id_impl + + @pytest.mark.parametrize("func", [ + lambda x, y: 0, # no like argument + lambda like=None: 0, # not keyword only + lambda *, like=None, a=3: 0, # not last (not that it matters) + ]) + def test_bad_like_sig(self, func): + # We sanity check the signature, and these should fail. + with pytest.raises(RuntimeError): + array_function_dispatch()(func) + + def test_bad_like_passing(self): + # Cover internal sanity check for passing like as first positional arg + def func(*, like=None): + pass + + func_with_like = array_function_dispatch()(func) + with pytest.raises(TypeError): + func_with_like() + with pytest.raises(TypeError): + func_with_like(like=234) + + def test_too_many_args(self): + # Mainly a unit-test to increase coverage + objs = [] + for i in range(80): + class MyArr: + def __array_function__(self, *args, **kwargs): + return NotImplemented + + objs.append(MyArr()) + + def _dispatch(*args): + return args + + @array_function_dispatch(_dispatch) + def func(*args): + pass + + with pytest.raises(TypeError, match="maximum number"): + func(*objs) + + +class TestNDArrayMethods: + + def test_repr(self): + # gh-12162: should still be defined even if __array_function__ doesn't + # implement np.array_repr() + + class MyArray(np.ndarray): + def __array_function__(*args, **kwargs): + return NotImplemented + + array = np.array(1).view(MyArray) + assert_equal(repr(array), 'MyArray(1)') + assert_equal(str(array), '1') + + +class TestNumPyFunctions: + + def test_set_module(self): + assert_equal(np.sum.__module__, 'numpy') + assert_equal(np.char.equal.__module__, 'numpy.char') + assert_equal(np.fft.fft.__module__, 'numpy.fft') + assert_equal(np.linalg.solve.__module__, 'numpy.linalg') + + def test_inspect_sum(self): + signature = inspect.signature(np.sum) + assert_('axis' in signature.parameters) + + def test_override_sum(self): + MyArray, implements = _new_duck_type_and_implements() + + @implements(np.sum) + def _(array): + return 'yes' + + assert_equal(np.sum(MyArray()), 'yes') + + def test_sum_on_mock_array(self): + + # We need a proxy for mocks because __array_function__ is only looked + # up in the class dict + class ArrayProxy: + def __init__(self, value): + self.value = value + + def __array_function__(self, *args, **kwargs): + return self.value.__array_function__(*args, **kwargs) + + def __array__(self, *args, **kwargs): + return self.value.__array__(*args, **kwargs) + + proxy = ArrayProxy(mock.Mock(spec=ArrayProxy)) + proxy.value.__array_function__.return_value = 1 + result = np.sum(proxy) + assert_equal(result, 1) + proxy.value.__array_function__.assert_called_once_with( + np.sum, (ArrayProxy,), (proxy,), {}) + proxy.value.__array__.assert_not_called() + + def test_sum_forwarding_implementation(self): + + class MyArray(np.ndarray): + + def sum(self, axis, out): + return 'summed' + + def __array_function__(self, func, types, args, kwargs): + return super().__array_function__(func, types, args, kwargs) + + # note: the internal implementation of np.sum() calls the .sum() method + array = np.array(1).view(MyArray) + assert_equal(np.sum(array), 'summed') + + +class TestArrayLike: + def setup_method(self): + class MyArray: + def __init__(self, function=None): + self.function = function + + def __array_function__(self, func, types, args, kwargs): + assert func is getattr(np, func.__name__) + try: + my_func = getattr(self, func.__name__) + except AttributeError: + return NotImplemented + return my_func(*args, **kwargs) + + self.MyArray = MyArray + + class MyNoArrayFunctionArray: + def __init__(self, function=None): + self.function = function + + self.MyNoArrayFunctionArray = MyNoArrayFunctionArray + + class MySubclass(np.ndarray): + def __array_function__(self, func, types, args, kwargs): + result = super().__array_function__(func, types, args, kwargs) + return result.view(self.__class__) + + self.MySubclass = MySubclass + + def add_method(self, name, arr_class, enable_value_error=False): + def _definition(*args, **kwargs): + # Check that `like=` isn't propagated downstream + assert 'like' not in kwargs + + if enable_value_error and 'value_error' in kwargs: + raise ValueError + + return arr_class(getattr(arr_class, name)) + setattr(arr_class, name, _definition) + + def func_args(*args, **kwargs): + return args, kwargs + + def test_array_like_not_implemented(self): + self.add_method('array', self.MyArray) + + ref = self.MyArray.array() + + with assert_raises_regex(TypeError, 'no implementation found'): + array_like = np.asarray(1, like=ref) + + _array_tests = [ + ('array', *func_args((1,))), + ('asarray', *func_args((1,))), + ('asanyarray', *func_args((1,))), + ('ascontiguousarray', *func_args((2, 3))), + ('asfortranarray', *func_args((2, 3))), + ('require', *func_args((np.arange(6).reshape(2, 3),), + requirements=['A', 'F'])), + ('empty', *func_args((1,))), + ('full', *func_args((1,), 2)), + ('ones', *func_args((1,))), + ('zeros', *func_args((1,))), + ('arange', *func_args(3)), + ('frombuffer', *func_args(b'\x00' * 8, dtype=int)), + ('fromiter', *func_args(range(3), dtype=int)), + ('fromstring', *func_args('1,2', dtype=int, sep=',')), + ('loadtxt', *func_args(lambda: StringIO('0 1\n2 3'))), + ('genfromtxt', *func_args(lambda: StringIO('1,2.1'), + dtype=[('int', 'i8'), ('float', 'f8')], + delimiter=',')), + ] + + def test_nep35_functions_as_array_functions(self,): + all_array_functions = get_overridable_numpy_array_functions() + like_array_functions_subset = { + getattr(np, func_name) for func_name, *_ in self.__class__._array_tests + } + assert like_array_functions_subset.issubset(all_array_functions) + + nep35_python_functions = { + np.eye, np.fromfunction, np.full, np.genfromtxt, + np.identity, np.loadtxt, np.ones, np.require, np.tri, + } + assert nep35_python_functions.issubset(all_array_functions) + + nep35_C_functions = { + np.arange, np.array, np.asanyarray, np.asarray, + np.ascontiguousarray, np.asfortranarray, np.empty, + np.frombuffer, np.fromfile, np.fromiter, np.fromstring, + np.zeros, + } + assert nep35_C_functions.issubset(all_array_functions) + + @pytest.mark.parametrize('function, args, kwargs', _array_tests) + @pytest.mark.parametrize('numpy_ref', [True, False]) + def test_array_like(self, function, args, kwargs, numpy_ref): + self.add_method('array', self.MyArray) + self.add_method(function, self.MyArray) + np_func = getattr(np, function) + my_func = getattr(self.MyArray, function) + + if numpy_ref is True: + ref = np.array(1) + else: + ref = self.MyArray.array() + + like_args = tuple(a() if callable(a) else a for a in args) + array_like = np_func(*like_args, **kwargs, like=ref) + + if numpy_ref is True: + assert type(array_like) is np.ndarray + + np_args = tuple(a() if callable(a) else a for a in args) + np_arr = np_func(*np_args, **kwargs) + + # Special-case np.empty to ensure values match + if function == "empty": + np_arr.fill(1) + array_like.fill(1) + + assert_equal(array_like, np_arr) + else: + assert type(array_like) is self.MyArray + assert array_like.function is my_func + + @pytest.mark.parametrize('function, args, kwargs', _array_tests) + @pytest.mark.parametrize('ref', [1, [1], "MyNoArrayFunctionArray"]) + def test_no_array_function_like(self, function, args, kwargs, ref): + self.add_method('array', self.MyNoArrayFunctionArray) + self.add_method(function, self.MyNoArrayFunctionArray) + np_func = getattr(np, function) + + # Instantiate ref if it's the MyNoArrayFunctionArray class + if ref == "MyNoArrayFunctionArray": + ref = self.MyNoArrayFunctionArray.array() + + like_args = tuple(a() if callable(a) else a for a in args) + + with assert_raises_regex(TypeError, + 'The `like` argument must be an array-like that implements'): + np_func(*like_args, **kwargs, like=ref) + + @pytest.mark.parametrize('function, args, kwargs', _array_tests) + def test_subclass(self, function, args, kwargs): + ref = np.array(1).view(self.MySubclass) + np_func = getattr(np, function) + like_args = tuple(a() if callable(a) else a for a in args) + array_like = np_func(*like_args, **kwargs, like=ref) + assert type(array_like) is self.MySubclass + if np_func is np.empty: + return + np_args = tuple(a() if callable(a) else a for a in args) + np_arr = np_func(*np_args, **kwargs) + assert_equal(array_like.view(np.ndarray), np_arr) + + @pytest.mark.parametrize('numpy_ref', [True, False]) + def test_array_like_fromfile(self, numpy_ref): + self.add_method('array', self.MyArray) + self.add_method("fromfile", self.MyArray) + + if numpy_ref is True: + ref = np.array(1) + else: + ref = self.MyArray.array() + + data = np.random.random(5) + + with tempfile.TemporaryDirectory() as tmpdir: + fname = os.path.join(tmpdir, "testfile") + data.tofile(fname) + + array_like = np.fromfile(fname, like=ref) + if numpy_ref is True: + assert type(array_like) is np.ndarray + np_res = np.fromfile(fname, like=ref) + assert_equal(np_res, data) + assert_equal(array_like, np_res) + else: + assert type(array_like) is self.MyArray + assert array_like.function is self.MyArray.fromfile + + def test_exception_handling(self): + self.add_method('array', self.MyArray, enable_value_error=True) + + ref = self.MyArray.array() + + with assert_raises(TypeError): + # Raises the error about `value_error` being invalid first + np.array(1, value_error=True, like=ref) + + @pytest.mark.parametrize('function, args, kwargs', _array_tests) + def test_like_as_none(self, function, args, kwargs): + self.add_method('array', self.MyArray) + self.add_method(function, self.MyArray) + np_func = getattr(np, function) + + like_args = tuple(a() if callable(a) else a for a in args) + # required for loadtxt and genfromtxt to init w/o error. + like_args_exp = tuple(a() if callable(a) else a for a in args) + + array_like = np_func(*like_args, **kwargs, like=None) + expected = np_func(*like_args_exp, **kwargs) + # Special-case np.empty to ensure values match + if function == "empty": + array_like.fill(1) + expected.fill(1) + assert_equal(array_like, expected) + + +def test_function_like(): + # We provide a `__get__` implementation, make sure it works + assert type(np.mean) is np._core._multiarray_umath._ArrayFunctionDispatcher + + class MyClass: + def __array__(self, dtype=None, copy=None): + # valid argument to mean: + return np.arange(3) + + func1 = staticmethod(np.mean) + func2 = np.mean + func3 = classmethod(np.mean) + + m = MyClass() + assert m.func1([10]) == 10 + assert m.func2() == 1 # mean of the arange + with pytest.raises(TypeError, match="unsupported operand type"): + # Tries to operate on the class + m.func3() + + # Manual binding also works (the above may shortcut): + bound = np.mean.__get__(m, MyClass) + assert bound() == 1 + + bound = np.mean.__get__(None, MyClass) # unbound actually + assert bound([10]) == 10 + + bound = np.mean.__get__(MyClass) # classmethod + with pytest.raises(TypeError, match="unsupported operand type"): + bound() diff --git a/python/numpy/_core/tests/test_print.py b/python/numpy/_core/tests/test_print.py new file mode 100644 index 000000000..d99b2794d --- /dev/null +++ b/python/numpy/_core/tests/test_print.py @@ -0,0 +1,200 @@ +import sys +from io import StringIO + +import pytest + +import numpy as np +from numpy._core.tests._locales import CommaDecimalPointLocale +from numpy.testing import IS_MUSL, assert_, assert_equal + +_REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'} + + +@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) +def test_float_types(tp): + """ Check formatting. + + This is only for the str function, and only for simple types. + The precision of np.float32 and np.longdouble aren't the same as the + python float precision. + + """ + for x in [0, 1, -1, 1e20]: + assert_equal(str(tp(x)), str(float(x)), + err_msg=f'Failed str formatting for type {tp}') + + if tp(1e16).itemsize > 4: + assert_equal(str(tp(1e16)), str(float('1e16')), + err_msg=f'Failed str formatting for type {tp}') + else: + ref = '1e+16' + assert_equal(str(tp(1e16)), ref, + err_msg=f'Failed str formatting for type {tp}') + + +@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) +def test_nan_inf_float(tp): + """ Check formatting of nan & inf. + + This is only for the str function, and only for simple types. + The precision of np.float32 and np.longdouble aren't the same as the + python float precision. + + """ + for x in [np.inf, -np.inf, np.nan]: + assert_equal(str(tp(x)), _REF[x], + err_msg=f'Failed str formatting for type {tp}') + + +@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble]) +def test_complex_types(tp): + """Check formatting of complex types. + + This is only for the str function, and only for simple types. + The precision of np.float32 and np.longdouble aren't the same as the + python float precision. + + """ + for x in [0, 1, -1, 1e20]: + assert_equal(str(tp(x)), str(complex(x)), + err_msg=f'Failed str formatting for type {tp}') + assert_equal(str(tp(x * 1j)), str(complex(x * 1j)), + err_msg=f'Failed str formatting for type {tp}') + assert_equal(str(tp(x + x * 1j)), str(complex(x + x * 1j)), + err_msg=f'Failed str formatting for type {tp}') + + if tp(1e16).itemsize > 8: + assert_equal(str(tp(1e16)), str(complex(1e16)), + err_msg=f'Failed str formatting for type {tp}') + else: + ref = '(1e+16+0j)' + assert_equal(str(tp(1e16)), ref, + err_msg=f'Failed str formatting for type {tp}') + + +@pytest.mark.parametrize('dtype', [np.complex64, np.cdouble, np.clongdouble]) +def test_complex_inf_nan(dtype): + """Check inf/nan formatting of complex types.""" + TESTS = { + complex(np.inf, 0): "(inf+0j)", + complex(0, np.inf): "infj", + complex(-np.inf, 0): "(-inf+0j)", + complex(0, -np.inf): "-infj", + complex(np.inf, 1): "(inf+1j)", + complex(1, np.inf): "(1+infj)", + complex(-np.inf, 1): "(-inf+1j)", + complex(1, -np.inf): "(1-infj)", + complex(np.nan, 0): "(nan+0j)", + complex(0, np.nan): "nanj", + complex(-np.nan, 0): "(nan+0j)", + complex(0, -np.nan): "nanj", + complex(np.nan, 1): "(nan+1j)", + complex(1, np.nan): "(1+nanj)", + complex(-np.nan, 1): "(nan+1j)", + complex(1, -np.nan): "(1+nanj)", + } + for c, s in TESTS.items(): + assert_equal(str(dtype(c)), s) + + +# print tests +def _test_redirected_print(x, tp, ref=None): + file = StringIO() + file_tp = StringIO() + stdout = sys.stdout + try: + sys.stdout = file_tp + print(tp(x)) + sys.stdout = file + if ref: + print(ref) + else: + print(x) + finally: + sys.stdout = stdout + + assert_equal(file.getvalue(), file_tp.getvalue(), + err_msg=f'print failed for type{tp}') + + +@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble]) +def test_float_type_print(tp): + """Check formatting when using print """ + for x in [0, 1, -1, 1e20]: + _test_redirected_print(float(x), tp) + + for x in [np.inf, -np.inf, np.nan]: + _test_redirected_print(float(x), tp, _REF[x]) + + if tp(1e16).itemsize > 4: + _test_redirected_print(1e16, tp) + else: + ref = '1e+16' + _test_redirected_print(1e16, tp, ref) + + +@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble]) +def test_complex_type_print(tp): + """Check formatting when using print """ + # We do not create complex with inf/nan directly because the feature is + # missing in python < 2.6 + for x in [0, 1, -1, 1e20]: + _test_redirected_print(complex(x), tp) + + if tp(1e16).itemsize > 8: + _test_redirected_print(complex(1e16), tp) + else: + ref = '(1e+16+0j)' + _test_redirected_print(complex(1e16), tp, ref) + + _test_redirected_print(complex(np.inf, 1), tp, '(inf+1j)') + _test_redirected_print(complex(-np.inf, 1), tp, '(-inf+1j)') + _test_redirected_print(complex(-np.nan, 1), tp, '(nan+1j)') + + +def test_scalar_format(): + """Test the str.format method with NumPy scalar types""" + tests = [('{0}', True, np.bool), + ('{0}', False, np.bool), + ('{0:d}', 130, np.uint8), + ('{0:d}', 50000, np.uint16), + ('{0:d}', 3000000000, np.uint32), + ('{0:d}', 15000000000000000000, np.uint64), + ('{0:d}', -120, np.int8), + ('{0:d}', -30000, np.int16), + ('{0:d}', -2000000000, np.int32), + ('{0:d}', -7000000000000000000, np.int64), + ('{0:g}', 1.5, np.float16), + ('{0:g}', 1.5, np.float32), + ('{0:g}', 1.5, np.float64), + ('{0:g}', 1.5, np.longdouble), + ('{0:g}', 1.5 + 0.5j, np.complex64), + ('{0:g}', 1.5 + 0.5j, np.complex128), + ('{0:g}', 1.5 + 0.5j, np.clongdouble)] + + for (fmat, val, valtype) in tests: + try: + assert_equal(fmat.format(val), fmat.format(valtype(val)), + f"failed with val {val}, type {valtype}") + except ValueError as e: + assert_(False, + "format raised exception (fmt='%s', val=%s, type=%s, exc='%s')" % + (fmat, repr(val), repr(valtype), str(e))) + + +# +# Locale tests: scalar types formatting should be independent of the locale +# + +class TestCommaDecimalPointLocale(CommaDecimalPointLocale): + + def test_locale_single(self): + assert_equal(str(np.float32(1.2)), str(1.2)) + + def test_locale_double(self): + assert_equal(str(np.double(1.2)), str(1.2)) + + @pytest.mark.skipif(IS_MUSL, + reason="test flaky on musllinux") + def test_locale_longdouble(self): + assert_equal(str(np.longdouble('1.2')), str(1.2)) diff --git a/python/numpy/_core/tests/test_protocols.py b/python/numpy/_core/tests/test_protocols.py new file mode 100644 index 000000000..96bb60084 --- /dev/null +++ b/python/numpy/_core/tests/test_protocols.py @@ -0,0 +1,46 @@ +import warnings + +import pytest + +import numpy as np + + +@pytest.mark.filterwarnings("error") +def test_getattr_warning(): + # issue gh-14735: make sure we clear only getattr errors, and let warnings + # through + class Wrapper: + def __init__(self, array): + self.array = array + + def __len__(self): + return len(self.array) + + def __getitem__(self, item): + return type(self)(self.array[item]) + + def __getattr__(self, name): + if name.startswith("__array_"): + warnings.warn("object got converted", UserWarning, stacklevel=1) + + return getattr(self.array, name) + + def __repr__(self): + return f"" + + array = Wrapper(np.arange(10)) + with pytest.raises(UserWarning, match="object got converted"): + np.asarray(array) + + +def test_array_called(): + class Wrapper: + val = '0' * 100 + + def __array__(self, dtype=None, copy=None): + return np.array([self.val], dtype=dtype, copy=copy) + + wrapped = Wrapper() + arr = np.array(wrapped, dtype=str) + assert arr.dtype == 'U100' + assert arr[0] == Wrapper.val diff --git a/python/numpy/_core/tests/test_records.py b/python/numpy/_core/tests/test_records.py new file mode 100644 index 000000000..b4b93aee4 --- /dev/null +++ b/python/numpy/_core/tests/test_records.py @@ -0,0 +1,544 @@ +import collections.abc +import pickle +import textwrap +from io import BytesIO +from os import path +from pathlib import Path + +import pytest + +import numpy as np +from numpy.testing import ( + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + temppath, +) + + +class TestFromrecords: + def test_fromrecords(self): + r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]], + names='col1,col2,col3') + assert_equal(r[0].item(), (456, 'dbe', 1.2)) + assert_equal(r['col1'].dtype.kind, 'i') + assert_equal(r['col2'].dtype.kind, 'U') + assert_equal(r['col2'].dtype.itemsize, 12) + assert_equal(r['col3'].dtype.kind, 'f') + + def test_fromrecords_0len(self): + """ Verify fromrecords works with a 0-length input """ + dtype = [('a', float), ('b', float)] + r = np.rec.fromrecords([], dtype=dtype) + assert_equal(r.shape, (0,)) + + def test_fromrecords_2d(self): + data = [ + [(1, 2), (3, 4), (5, 6)], + [(6, 5), (4, 3), (2, 1)] + ] + expected_a = [[1, 3, 5], [6, 4, 2]] + expected_b = [[2, 4, 6], [5, 3, 1]] + + # try with dtype + r1 = np.rec.fromrecords(data, dtype=[('a', int), ('b', int)]) + assert_equal(r1['a'], expected_a) + assert_equal(r1['b'], expected_b) + + # try with names + r2 = np.rec.fromrecords(data, names=['a', 'b']) + assert_equal(r2['a'], expected_a) + assert_equal(r2['b'], expected_b) + + assert_equal(r1, r2) + + def test_method_array(self): + r = np.rec.array( + b'abcdefg' * 100, formats='i2,S3,i4', shape=3, byteorder='big' + ) + assert_equal(r[1].item(), (25444, b'efg', 1633837924)) + + def test_method_array2(self): + r = np.rec.array( + [ + (1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), + (5, 55, 'ex'), (6, 66, 'f'), (7, 77, 'g') + ], + formats='u1,f4,S1' + ) + assert_equal(r[1].item(), (2, 22.0, b'b')) + + def test_recarray_slices(self): + r = np.rec.array( + [ + (1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'), + (5, 55, 'ex'), (6, 66, 'f'), (7, 77, 'g') + ], + formats='u1,f4,S1' + ) + assert_equal(r[1::2][1].item(), (4, 44.0, b'd')) + + def test_recarray_fromarrays(self): + x1 = np.array([1, 2, 3, 4]) + x2 = np.array(['a', 'dd', 'xyz', '12']) + x3 = np.array([1.1, 2, 3, 4]) + r = np.rec.fromarrays([x1, x2, x3], names='a,b,c') + assert_equal(r[1].item(), (2, 'dd', 2.0)) + x1[1] = 34 + assert_equal(r.a, np.array([1, 2, 3, 4])) + + def test_recarray_fromfile(self): + data_dir = path.join(path.dirname(__file__), 'data') + filename = path.join(data_dir, 'recarray_from_file.fits') + fd = open(filename, 'rb') + fd.seek(2880 * 2) + r1 = np.rec.fromfile(fd, formats='f8,i4,S5', shape=3, byteorder='big') + fd.seek(2880 * 2) + r2 = np.rec.array(fd, formats='f8,i4,S5', shape=3, byteorder='big') + fd.seek(2880 * 2) + bytes_array = BytesIO() + bytes_array.write(fd.read()) + bytes_array.seek(0) + r3 = np.rec.fromfile( + bytes_array, formats='f8,i4,S5', shape=3, byteorder='big' + ) + fd.close() + assert_equal(r1, r2) + assert_equal(r2, r3) + + def test_recarray_from_obj(self): + count = 10 + a = np.zeros(count, dtype='O') + b = np.zeros(count, dtype='f8') + c = np.zeros(count, dtype='f8') + for i in range(len(a)): + a[i] = list(range(1, 10)) + + mine = np.rec.fromarrays([a, b, c], names='date,data1,data2') + for i in range(len(a)): + assert_(mine.date[i] == list(range(1, 10))) + assert_(mine.data1[i] == 0.0) + assert_(mine.data2[i] == 0.0) + + def test_recarray_repr(self): + a = np.array([(1, 0.1), (2, 0.2)], + dtype=[('foo', ' 2) & (a < 6)) + xb = np.where((b > 2) & (b < 6)) + ya = ((a > 2) & (a < 6)) + yb = ((b > 2) & (b < 6)) + assert_array_almost_equal(xa, ya.nonzero()) + assert_array_almost_equal(xb, yb.nonzero()) + assert_(np.all(a[ya] > 0.5)) + assert_(np.all(b[yb] > 0.5)) + + def test_endian_where(self): + # GitHub issue #369 + net = np.zeros(3, dtype='>f4') + net[1] = 0.00458849 + net[2] = 0.605202 + max_net = net.max() + test = np.where(net <= 0., max_net, net) + correct = np.array([0.60520202, 0.00458849, 0.60520202]) + assert_array_almost_equal(test, correct) + + def test_endian_recarray(self): + # Ticket #2185 + dt = np.dtype([ + ('head', '>u4'), + ('data', '>u4', 2), + ]) + buf = np.recarray(1, dtype=dt) + buf[0]['head'] = 1 + buf[0]['data'][:] = [1, 1] + + h = buf[0]['head'] + d = buf[0]['data'][0] + buf[0]['head'] = h + buf[0]['data'][0] = d + assert_(buf[0]['head'] == 1) + + def test_mem_dot(self): + # Ticket #106 + x = np.random.randn(0, 1) + y = np.random.randn(10, 1) + # Dummy array to detect bad memory access: + _z = np.ones(10) + _dummy = np.empty((0, 10)) + z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides) + np.dot(x, np.transpose(y), out=z) + assert_equal(_z, np.ones(10)) + # Do the same for the built-in dot: + np._core.multiarray.dot(x, np.transpose(y), out=z) + assert_equal(_z, np.ones(10)) + + def test_arange_endian(self): + # Ticket #111 + ref = np.arange(10) + x = np.arange(10, dtype=' 1 and x['two'] > 2) + + def test_method_args(self): + # Make sure methods and functions have same default axis + # keyword and arguments + funcs1 = ['argmax', 'argmin', 'sum', 'any', 'all', 'cumsum', + 'cumprod', 'prod', 'std', 'var', 'mean', + 'round', 'min', 'max', 'argsort', 'sort'] + funcs2 = ['compress', 'take', 'repeat'] + + for func in funcs1: + arr = np.random.rand(8, 7) + arr2 = arr.copy() + res1 = getattr(arr, func)() + res2 = getattr(np, func)(arr2) + if res1 is None: + res1 = arr + + if res1.dtype.kind in 'uib': + assert_((res1 == res2).all(), func) + else: + assert_(abs(res1 - res2).max() < 1e-8, func) + + for func in funcs2: + arr1 = np.random.rand(8, 7) + arr2 = np.random.rand(8, 7) + res1 = None + if func == 'compress': + arr1 = arr1.ravel() + res1 = getattr(arr2, func)(arr1) + else: + arr2 = (15 * arr2).astype(int).ravel() + if res1 is None: + res1 = getattr(arr1, func)(arr2) + res2 = getattr(np, func)(arr1, arr2) + assert_(abs(res1 - res2).max() < 1e-8, func) + + def test_mem_lexsort_strings(self): + # Ticket #298 + lst = ['abc', 'cde', 'fgh'] + np.lexsort((lst,)) + + def test_fancy_index(self): + # Ticket #302 + x = np.array([1, 2])[np.array([0])] + assert_equal(x.shape, (1,)) + + def test_recarray_copy(self): + # Ticket #312 + dt = [('x', np.int16), ('y', np.float64)] + ra = np.array([(1, 2.3)], dtype=dt) + rb = np.rec.array(ra, dtype=dt) + rb['x'] = 2. + assert_(ra['x'] != rb['x']) + + def test_rec_fromarray(self): + # Ticket #322 + x1 = np.array([[1, 2], [3, 4], [5, 6]]) + x2 = np.array(['a', 'dd', 'xyz']) + x3 = np.array([1.1, 2, 3]) + np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,S3,f8") + + def test_object_array_assign(self): + x = np.empty((2, 2), object) + x.flat[2] = (1, 2, 3) + assert_equal(x.flat[2], (1, 2, 3)) + + def test_ndmin_float64(self): + # Ticket #324 + x = np.array([1, 2, 3], dtype=np.float64) + assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2) + assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2) + + def test_ndmin_order(self): + # Issue #465 and related checks + assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous) + assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous) + assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous) + assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous) + + def test_mem_axis_minimization(self): + # Ticket #327 + data = np.arange(5) + data = np.add.outer(data, data) + + def test_mem_float_imag(self): + # Ticket #330 + np.float64(1.0).imag + + def test_dtype_tuple(self): + # Ticket #334 + assert_(np.dtype('i4') == np.dtype(('i4', ()))) + + def test_dtype_posttuple(self): + # Ticket #335 + np.dtype([('col1', '()i4')]) + + def test_numeric_carray_compare(self): + # Ticket #341 + assert_equal(np.array(['X'], 'c'), b'X') + + def test_string_array_size(self): + # Ticket #342 + assert_raises(ValueError, + np.array, [['X'], ['X', 'X', 'X']], '|S1') + + def test_dtype_repr(self): + # Ticket #344 + dt1 = np.dtype(('uint32', 2)) + dt2 = np.dtype(('uint32', (2,))) + assert_equal(dt1.__repr__(), dt2.__repr__()) + + def test_reshape_order(self): + # Make sure reshape order works. + a = np.arange(6).reshape(2, 3, order='F') + assert_equal(a, [[0, 2, 4], [1, 3, 5]]) + a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) + b = a[:, 1] + assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]]) + + def test_reshape_zero_strides(self): + # Issue #380, test reshaping of zero strided arrays + a = np.ones(1) + a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,)) + assert_(a.reshape(5, 1).strides[0] == 0) + + def test_reshape_zero_size(self): + # GitHub Issue #2700, setting shape failed for 0-sized arrays + a = np.ones((0, 2)) + a.shape = (-1, 2) + + def test_reshape_trailing_ones_strides(self): + # GitHub issue gh-2949, bad strides for trailing ones of new shape + a = np.zeros(12, dtype=np.int32)[::2] # not contiguous + strides_c = (16, 8, 8, 8) + strides_f = (8, 24, 48, 48) + assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c) + assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f) + assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4)) + + def test_repeat_discont(self): + # Ticket #352 + a = np.arange(12).reshape(4, 3)[:, 2] + assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11]) + + def test_array_index(self): + # Make sure optimization is not called in this case. + a = np.array([1, 2, 3]) + a2 = np.array([[1, 2, 3]]) + assert_equal(a[np.where(a == 3)], a2[np.where(a2 == 3)]) + + def test_object_argmax(self): + a = np.array([1, 2, 3], dtype=object) + assert_(a.argmax() == 2) + + def test_recarray_fields(self): + # Ticket #372 + dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')]) + dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')]) + for a in [np.array([(1, 2), (3, 4)], "i4,i4"), + np.rec.array([(1, 2), (3, 4)], "i4,i4"), + np.rec.array([(1, 2), (3, 4)]), + np.rec.fromarrays([(1, 2), (3, 4)], "i4,i4"), + np.rec.fromarrays([(1, 2), (3, 4)])]: + assert_(a.dtype in [dt0, dt1]) + + def test_random_shuffle(self): + # Ticket #374 + a = np.arange(5).reshape((5, 1)) + b = a.copy() + np.random.shuffle(b) + assert_equal(np.sort(b, axis=0), a) + + def test_refcount_vdot(self): + # Changeset #3443 + _assert_valid_refcount(np.vdot) + + def test_startswith(self): + ca = np.char.array(['Hi', 'There']) + assert_equal(ca.startswith('H'), [True, False]) + + def test_noncommutative_reduce_accumulate(self): + # Ticket #413 + tosubtract = np.arange(5) + todivide = np.array([2.0, 0.5, 0.25]) + assert_equal(np.subtract.reduce(tosubtract), -10) + assert_equal(np.divide.reduce(todivide), 16.0) + assert_array_equal(np.subtract.accumulate(tosubtract), + np.array([0, -1, -3, -6, -10])) + assert_array_equal(np.divide.accumulate(todivide), + np.array([2., 4., 16.])) + + def test_convolve_empty(self): + # Convolve should raise an error for empty input array. + assert_raises(ValueError, np.convolve, [], [1]) + assert_raises(ValueError, np.convolve, [1], []) + + def test_multidim_byteswap(self): + # Ticket #449 + r = np.array([(1, (0, 1, 2))], dtype="i2,3i2") + assert_array_equal(r.byteswap(), + np.array([(256, (0, 256, 512))], r.dtype)) + + def test_string_NULL(self): + # Changeset 3557 + assert_equal(np.array("a\x00\x0b\x0c\x00").item(), + 'a\x00\x0b\x0c') + + def test_junk_in_string_fields_of_recarray(self): + # Ticket #483 + r = np.array([[b'abc']], dtype=[('var1', '|S20')]) + assert_(asbytes(r['var1'][0][0]) == b'abc') + + def test_take_output(self): + # Ensure that 'take' honours output parameter. + x = np.arange(12).reshape((3, 4)) + a = np.take(x, [0, 2], axis=1) + b = np.zeros_like(a) + np.take(x, [0, 2], axis=1, out=b) + assert_array_equal(a, b) + + def test_take_object_fail(self): + # Issue gh-3001 + d = 123. + a = np.array([d, 1], dtype=object) + if HAS_REFCOUNT: + ref_d = sys.getrefcount(d) + try: + a.take([0, 100]) + except IndexError: + pass + if HAS_REFCOUNT: + assert_(ref_d == sys.getrefcount(d)) + + def test_array_str_64bit(self): + # Ticket #501 + s = np.array([1, np.nan], dtype=np.float64) + with np.errstate(all='raise'): + np.array_str(s) # Should succeed + + def test_frompyfunc_endian(self): + # Ticket #503 + from math import radians + uradians = np.frompyfunc(radians, 1, 1) + big_endian = np.array([83.4, 83.5], dtype='>f8') + little_endian = np.array([83.4, 83.5], dtype=' object + # casting succeeds + def rs(): + x = np.ones([484, 286]) + y = np.zeros([484, 286]) + x |= y + + assert_raises(TypeError, rs) + + def test_unicode_scalar(self): + # Ticket #600 + x = np.array(["DROND", "DROND1"], dtype="U6") + el = x[1] + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + new = pickle.loads(pickle.dumps(el, protocol=proto)) + assert_equal(new, el) + + def test_arange_non_native_dtype(self): + # Ticket #616 + for T in ('>f4', ' 0)] = v + + assert_raises(IndexError, ia, x, s, np.zeros(9, dtype=float)) + assert_raises(IndexError, ia, x, s, np.zeros(11, dtype=float)) + + # Old special case (different code path): + assert_raises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float)) + assert_raises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float)) + + def test_mem_scalar_indexing(self): + # Ticket #603 + x = np.array([0], dtype=float) + index = np.array(0, dtype=np.int32) + x[index] + + def test_binary_repr_0_width(self): + assert_equal(np.binary_repr(0, width=3), '000') + + def test_fromstring(self): + assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"), + [12, 9, 9]) + + def test_searchsorted_variable_length(self): + x = np.array(['a', 'aa', 'b']) + y = np.array(['d', 'e']) + assert_equal(x.searchsorted(y), [3, 3]) + + def test_string_argsort_with_zeros(self): + # Check argsort for strings containing zeros. + x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2") + assert_array_equal(x.argsort(kind='m'), np.array([1, 0])) + assert_array_equal(x.argsort(kind='q'), np.array([1, 0])) + + def test_string_sort_with_zeros(self): + # Check sort for strings containing zeros. + x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2") + y = np.frombuffer(b"\x00\x01\x00\x02", dtype="|S2") + assert_array_equal(np.sort(x, kind="q"), y) + + def test_copy_detection_zero_dim(self): + # Ticket #658 + np.indices((0, 3, 4)).T.reshape(-1, 3) + + def test_flat_byteorder(self): + # Ticket #657 + x = np.arange(10) + assert_array_equal(x.astype('>i4'), x.astype('i4').flat[:], x.astype('i4')): + x = np.array([-1, 0, 1], dtype=dt) + assert_equal(x.flat[0].dtype, x[0].dtype) + + def test_copy_detection_corner_case(self): + # Ticket #658 + np.indices((0, 3, 4)).T.reshape(-1, 3) + + def test_object_array_refcounting(self): + # Ticket #633 + if not hasattr(sys, 'getrefcount'): + return + + # NB. this is probably CPython-specific + + cnt = sys.getrefcount + + a = object() + b = object() + c = object() + + cnt0_a = cnt(a) + cnt0_b = cnt(b) + cnt0_c = cnt(c) + + # -- 0d -> 1-d broadcast slice assignment + + arr = np.zeros(5, dtype=np.object_) + + arr[:] = a + assert_equal(cnt(a), cnt0_a + 5) + + arr[:] = b + assert_equal(cnt(a), cnt0_a) + assert_equal(cnt(b), cnt0_b + 5) + + arr[:2] = c + assert_equal(cnt(b), cnt0_b + 3) + assert_equal(cnt(c), cnt0_c + 2) + + del arr + + # -- 1-d -> 2-d broadcast slice assignment + + arr = np.zeros((5, 2), dtype=np.object_) + arr0 = np.zeros(2, dtype=np.object_) + + arr0[0] = a + assert_(cnt(a) == cnt0_a + 1) + arr0[1] = b + assert_(cnt(b) == cnt0_b + 1) + + arr[:, :] = arr0 + assert_(cnt(a) == cnt0_a + 6) + assert_(cnt(b) == cnt0_b + 6) + + arr[:, 0] = None + assert_(cnt(a) == cnt0_a + 1) + + del arr, arr0 + + # -- 2-d copying + flattening + + arr = np.zeros((5, 2), dtype=np.object_) + + arr[:, 0] = a + arr[:, 1] = b + assert_(cnt(a) == cnt0_a + 5) + assert_(cnt(b) == cnt0_b + 5) + + arr2 = arr.copy() + assert_(cnt(a) == cnt0_a + 10) + assert_(cnt(b) == cnt0_b + 10) + + arr2 = arr[:, 0].copy() + assert_(cnt(a) == cnt0_a + 10) + assert_(cnt(b) == cnt0_b + 5) + + arr2 = arr.flatten() + assert_(cnt(a) == cnt0_a + 10) + assert_(cnt(b) == cnt0_b + 10) + + del arr, arr2 + + # -- concatenate, repeat, take, choose + + arr1 = np.zeros((5, 1), dtype=np.object_) + arr2 = np.zeros((5, 1), dtype=np.object_) + + arr1[...] = a + arr2[...] = b + assert_(cnt(a) == cnt0_a + 5) + assert_(cnt(b) == cnt0_b + 5) + + tmp = np.concatenate((arr1, arr2)) + assert_(cnt(a) == cnt0_a + 5 + 5) + assert_(cnt(b) == cnt0_b + 5 + 5) + + tmp = arr1.repeat(3, axis=0) + assert_(cnt(a) == cnt0_a + 5 + 3 * 5) + + tmp = arr1.take([1, 2, 3], axis=0) + assert_(cnt(a) == cnt0_a + 5 + 3) + + x = np.array([[0], [1], [0], [1], [1]], int) + tmp = x.choose(arr1, arr2) + assert_(cnt(a) == cnt0_a + 5 + 2) + assert_(cnt(b) == cnt0_b + 5 + 3) + + def test_mem_custom_float_to_array(self): + # Ticket 702 + class MyFloat: + def __float__(self): + return 1.0 + + tmp = np.atleast_1d([MyFloat()]) + tmp.astype(float) # Should succeed + + def test_object_array_refcount_self_assign(self): + # Ticket #711 + class VictimObject: + deleted = False + + def __del__(self): + self.deleted = True + + d = VictimObject() + arr = np.zeros(5, dtype=np.object_) + arr[:] = d + del d + arr[:] = arr # refcount of 'd' might hit zero here + assert_(not arr[0].deleted) + arr[:] = arr # trying to induce a segfault by doing it again... + assert_(not arr[0].deleted) + + def test_mem_fromiter_invalid_dtype_string(self): + x = [1, 2, 3] + assert_raises(ValueError, + np.fromiter, list(x), dtype='S') + + def test_reduce_big_object_array(self): + # Ticket #713 + oldsize = np.setbufsize(10 * 16) + a = np.array([None] * 161, object) + assert_(not np.any(a)) + np.setbufsize(oldsize) + + def test_mem_0d_array_index(self): + # Ticket #714 + np.zeros(10)[np.array(0)] + + def test_nonnative_endian_fill(self): + # Non-native endian arrays were incorrectly filled with scalars + # before r5034. + if sys.byteorder == 'little': + dtype = np.dtype('>i4') + else: + dtype = np.dtype('data contains non-zero floats + x = np.array([123456789e199], dtype=np.float64) + if IS_PYPY: + x.resize((m, 0), refcheck=False) + else: + x.resize((m, 0)) + y = np.array([123456789e199], dtype=np.float64) + if IS_PYPY: + y.resize((0, n), refcheck=False) + else: + y.resize((0, n)) + + # `dot` should just return zero (m, n) matrix + z = np.dot(x, y) + assert_(np.all(z == 0)) + assert_(z.shape == (m, n)) + + def test_zeros(self): + # Regression test for #1061. + # Set a size which cannot fit into a 64 bits signed integer + sz = 2 ** 64 + with assert_raises_regex(ValueError, + 'Maximum allowed dimension exceeded'): + np.empty(sz) + + def test_huge_arange(self): + # Regression test for #1062. + # Set a size which cannot fit into a 64 bits signed integer + sz = 2 ** 64 + with assert_raises_regex(ValueError, + 'Maximum allowed size exceeded'): + np.arange(sz) + assert_(np.size == sz) + + def test_fromiter_bytes(self): + # Ticket #1058 + a = np.fromiter(list(range(10)), dtype='b') + b = np.fromiter(list(range(10)), dtype='B') + assert_(np.all(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + assert_(np.all(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + + def test_array_from_sequence_scalar_array(self): + # Ticket #1078: segfaults when creating an array with a sequence of + # 0d arrays. + a = np.array((np.ones(2), np.array(2)), dtype=object) + assert_equal(a.shape, (2,)) + assert_equal(a.dtype, np.dtype(object)) + assert_equal(a[0], np.ones(2)) + assert_equal(a[1], np.array(2)) + + a = np.array(((1,), np.array(1)), dtype=object) + assert_equal(a.shape, (2,)) + assert_equal(a.dtype, np.dtype(object)) + assert_equal(a[0], (1,)) + assert_equal(a[1], np.array(1)) + + def test_array_from_sequence_scalar_array2(self): + # Ticket #1081: weird array with strange input... + t = np.array([np.array([]), np.array(0, object)], dtype=object) + assert_equal(t.shape, (2,)) + assert_equal(t.dtype, np.dtype(object)) + + def test_array_too_big(self): + # Ticket #1080. + assert_raises(ValueError, np.zeros, [975] * 7, np.int8) + assert_raises(ValueError, np.zeros, [26244] * 5, np.int8) + + def test_dtype_keyerrors_(self): + # Ticket #1106. + dt = np.dtype([('f1', np.uint)]) + assert_raises(KeyError, dt.__getitem__, "f2") + assert_raises(IndexError, dt.__getitem__, 1) + assert_raises(TypeError, dt.__getitem__, 0.0) + + def test_lexsort_buffer_length(self): + # Ticket #1217, don't segfault. + a = np.ones(100, dtype=np.int8) + b = np.ones(100, dtype=np.int32) + i = np.lexsort((a[::-1], b)) + assert_equal(i, np.arange(100, dtype=int)) + + def test_object_array_to_fixed_string(self): + # Ticket #1235. + a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_) + b = np.array(a, dtype=(np.str_, 8)) + assert_equal(a, b) + c = np.array(a, dtype=(np.str_, 5)) + assert_equal(c, np.array(['abcde', 'ijklm'])) + d = np.array(a, dtype=(np.str_, 12)) + assert_equal(a, d) + e = np.empty((2, ), dtype=(np.str_, 8)) + e[:] = a[:] + assert_equal(a, e) + + def test_unicode_to_string_cast(self): + # Ticket #1240. + a = np.array([['abc', '\u03a3'], + ['asdf', 'erw']], + dtype='U') + assert_raises(UnicodeEncodeError, np.array, a, 'S4') + + def test_unicode_to_string_cast_error(self): + # gh-15790 + a = np.array(['\x80'] * 129, dtype='U3') + assert_raises(UnicodeEncodeError, np.array, a, 'S') + b = a.reshape(3, 43)[:-1, :-1] + assert_raises(UnicodeEncodeError, np.array, b, 'S') + + def test_mixed_string_byte_array_creation(self): + a = np.array(['1234', b'123']) + assert_(a.itemsize == 16) + a = np.array([b'123', '1234']) + assert_(a.itemsize == 16) + a = np.array(['1234', b'123', '12345']) + assert_(a.itemsize == 20) + a = np.array([b'123', '1234', b'12345']) + assert_(a.itemsize == 20) + a = np.array([b'123', '1234', b'1234']) + assert_(a.itemsize == 16) + + def test_misaligned_objects_segfault(self): + # Ticket #1198 and #1267 + a1 = np.zeros((10,), dtype='O,c') + a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10') + a1['f0'] = a2 + repr(a1) + np.argmax(a1['f0']) + a1['f0'][1] = "FOO" + a1['f0'] = "FOO" + np.array(a1['f0'], dtype='S') + np.nonzero(a1['f0']) + a1.sort() + copy.deepcopy(a1) + + def test_misaligned_scalars_segfault(self): + # Ticket #1267 + s1 = np.array(('a', 'Foo'), dtype='c,O') + s2 = np.array(('b', 'Bar'), dtype='c,O') + s1['f1'] = s2['f1'] + s1['f1'] = 'Baz' + + def test_misaligned_dot_product_objects(self): + # Ticket #1267 + # This didn't require a fix, but it's worth testing anyway, because + # it may fail if .dot stops enforcing the arrays to be BEHAVED + a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c') + b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c') + np.dot(a['f0'], b['f0']) + + def test_byteswap_complex_scalar(self): + # Ticket #1259 and gh-441 + for dtype in [np.dtype('<' + t) for t in np.typecodes['Complex']]: + z = np.array([2.2 - 1.1j], dtype) + x = z[0] # always native-endian + y = x.byteswap() + if x.dtype.byteorder == z.dtype.byteorder: + # little-endian machine + assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype.newbyteorder())) + else: + # big-endian machine + assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype)) + # double check real and imaginary parts: + assert_equal(x.real, y.real.byteswap()) + assert_equal(x.imag, y.imag.byteswap()) + + def test_structured_arrays_with_objects1(self): + # Ticket #1299 + stra = 'aaaa' + strb = 'bbbb' + x = np.array([[(0, stra), (1, strb)]], 'i8,O') + x[x.nonzero()] = x.ravel()[:1] + assert_(x[0, 1] == x[0, 0]) + + @pytest.mark.skipif( + sys.version_info >= (3, 12), + reason="Python 3.12 has immortal refcounts, this test no longer works." + ) + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_structured_arrays_with_objects2(self): + # Ticket #1299 second test + stra = 'aaaa' + strb = 'bbbb' + numb = sys.getrefcount(strb) + numa = sys.getrefcount(stra) + x = np.array([[(0, stra), (1, strb)]], 'i8,O') + x[x.nonzero()] = x.ravel()[:1] + assert_(sys.getrefcount(strb) == numb) + assert_(sys.getrefcount(stra) == numa + 2) + + def test_duplicate_title_and_name(self): + # Ticket #1254 + dtspec = [(('a', 'a'), 'i'), ('b', 'i')] + assert_raises(ValueError, np.dtype, dtspec) + + def test_signed_integer_division_overflow(self): + # Ticket #1317. + def test_type(t): + min = np.array([np.iinfo(t).min]) + min //= -1 + + with np.errstate(over="ignore"): + for t in (np.int8, np.int16, np.int32, np.int64, int): + test_type(t) + + def test_buffer_hashlib(self): + from hashlib import sha256 + + x = np.array([1, 2, 3], dtype=np.dtype('c') + + def test_log1p_compiler_shenanigans(self): + # Check if log1p is behaving on 32 bit intel systems. + assert_(np.isfinite(np.log1p(np.exp2(-53)))) + + def test_fromiter_comparison(self): + a = np.fromiter(list(range(10)), dtype='b') + b = np.fromiter(list(range(10)), dtype='B') + assert_(np.all(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + assert_(np.all(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))) + + def test_fromstring_crash(self): + with assert_raises(ValueError): + np.fromstring(b'aa, aa, 1.0', sep=',') + + def test_ticket_1539(self): + dtypes = [x for x in np._core.sctypeDict.values() + if (issubclass(x, np.number) + and not issubclass(x, np.timedelta64))] + a = np.array([], np.bool) # not x[0] because it is unordered + failures = [] + + for x in dtypes: + b = a.astype(x) + for y in dtypes: + c = a.astype(y) + try: + d = np.dot(b, c) + except TypeError: + failures.append((x, y)) + else: + if d != 0: + failures.append((x, y)) + if failures: + raise AssertionError(f"Failures: {failures!r}") + + def test_ticket_1538(self): + x = np.finfo(np.float32) + for name in ('eps', 'epsneg', 'max', 'min', 'resolution', 'tiny'): + assert_equal(type(getattr(x, name)), np.float32, + err_msg=name) + + def test_ticket_1434(self): + # Check that the out= argument in var and std has an effect + data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9))) + out = np.zeros((3,)) + + ret = data.var(axis=1, out=out) + assert_(ret is out) + assert_array_equal(ret, data.var(axis=1)) + + ret = data.std(axis=1, out=out) + assert_(ret is out) + assert_array_equal(ret, data.std(axis=1)) + + def test_complex_nan_maximum(self): + cnan = complex(0, np.nan) + assert_equal(np.maximum(1, cnan), cnan) + + def test_subclass_int_tuple_assignment(self): + # ticket #1563 + class Subclass(np.ndarray): + def __new__(cls, i): + return np.ones((i,)).view(cls) + + x = Subclass(5) + x[(0,)] = 2 # shouldn't raise an exception + assert_equal(x[0], 2) + + def test_ufunc_no_unnecessary_views(self): + # ticket #1548 + class Subclass(np.ndarray): + pass + x = np.array([1, 2, 3]).view(Subclass) + y = np.add(x, x, x) + assert_equal(id(x), id(y)) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_take_refcount(self): + # ticket #939 + a = np.arange(16, dtype=float) + a.shape = (4, 4) + lut = np.ones((5 + 3, 4), float) + rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype) + c1 = sys.getrefcount(rgba) + try: + lut.take(a, axis=0, mode='clip', out=rgba) + except TypeError: + pass + c2 = sys.getrefcount(rgba) + assert_equal(c1, c2) + + def test_fromfile_tofile_seeks(self): + # tofile/fromfile used to get (#1610) the Python file handle out of sync + with tempfile.NamedTemporaryFile() as f: + f.write(np.arange(255, dtype='u1').tobytes()) + + f.seek(20) + ret = np.fromfile(f, count=4, dtype='u1') + assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1')) + assert_equal(f.tell(), 24) + + f.seek(40) + np.array([1, 2, 3], dtype='u1').tofile(f) + assert_equal(f.tell(), 43) + + f.seek(40) + data = f.read(3) + assert_equal(data, b"\x01\x02\x03") + + f.seek(80) + f.read(4) + data = np.fromfile(f, dtype='u1', count=4) + assert_equal(data, np.array([84, 85, 86, 87], dtype='u1')) + + def test_complex_scalar_warning(self): + for tp in [np.csingle, np.cdouble, np.clongdouble]: + x = tp(1 + 2j) + assert_warns(ComplexWarning, float, x) + with suppress_warnings() as sup: + sup.filter(ComplexWarning) + assert_equal(float(x), float(x.real)) + + def test_complex_scalar_complex_cast(self): + for tp in [np.csingle, np.cdouble, np.clongdouble]: + x = tp(1 + 2j) + assert_equal(complex(x), 1 + 2j) + + def test_complex_boolean_cast(self): + # Ticket #2218 + for tp in [np.csingle, np.cdouble, np.clongdouble]: + x = np.array([0, 0 + 0.5j, 0.5 + 0j], dtype=tp) + assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool)) + assert_(np.any(x)) + assert_(np.all(x[1:])) + + def test_uint_int_conversion(self): + x = 2**64 - 1 + assert_equal(int(np.uint64(x)), x) + + def test_duplicate_field_names_assign(self): + ra = np.fromiter(((i * 3, i * 2) for i in range(10)), dtype='i8,f8') + ra.dtype.names = ('f1', 'f2') + repr(ra) # should not cause a segmentation fault + assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1')) + + def test_eq_string_and_object_array(self): + # From e-mail thread "__eq__ with str and object" (Keith Goodman) + a1 = np.array(['a', 'b'], dtype=object) + a2 = np.array(['a', 'c']) + assert_array_equal(a1 == a2, [True, False]) + assert_array_equal(a2 == a1, [True, False]) + + def test_nonzero_byteswap(self): + a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32) + a.dtype = np.float32 + assert_equal(a.nonzero()[0], [1]) + a = a.byteswap() + a = a.view(a.dtype.newbyteorder()) + assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap + + def test_empty_mul(self): + a = np.array([1.]) + a[1:1] *= 2 + assert_equal(a, [1.]) + + def test_array_side_effect(self): + # The second use of itemsize was throwing an exception because in + # ctors.c, discover_itemsize was calling PyObject_Length without + # checking the return code. This failed to get the length of the + # number 2, and the exception hung around until something checked + # PyErr_Occurred() and returned an error. + assert_equal(np.dtype('S10').itemsize, 10) + np.array([['abc', 2], ['long ', '0123456789']], dtype=np.bytes_) + assert_equal(np.dtype('S10').itemsize, 10) + + def test_any_float(self): + # all and any for floats + a = np.array([0.1, 0.9]) + assert_(np.any(a)) + assert_(np.all(a)) + + def test_large_float_sum(self): + a = np.arange(10000, dtype='f') + assert_equal(a.sum(dtype='d'), a.astype('d').sum()) + + def test_ufunc_casting_out(self): + a = np.array(1.0, dtype=np.float32) + b = np.array(1.0, dtype=np.float64) + c = np.array(1.0, dtype=np.float32) + np.add(a, b, out=c) + assert_equal(c, 2.0) + + def test_array_scalar_contiguous(self): + # Array scalars are both C and Fortran contiguous + assert_(np.array(1.0).flags.c_contiguous) + assert_(np.array(1.0).flags.f_contiguous) + assert_(np.array(np.float32(1.0)).flags.c_contiguous) + assert_(np.array(np.float32(1.0)).flags.f_contiguous) + + def test_squeeze_contiguous(self): + # Similar to GitHub issue #387 + a = np.zeros((1, 2)).squeeze() + b = np.zeros((2, 2, 2), order='F')[:, :, ::2].squeeze() + assert_(a.flags.c_contiguous) + assert_(a.flags.f_contiguous) + assert_(b.flags.f_contiguous) + + def test_squeeze_axis_handling(self): + # Issue #10779 + # Ensure proper handling of objects + # that don't support axis specification + # when squeezing + + class OldSqueeze(np.ndarray): + + def __new__(cls, + input_array): + obj = np.asarray(input_array).view(cls) + return obj + + # it is perfectly reasonable that prior + # to numpy version 1.7.0 a subclass of ndarray + # might have been created that did not expect + # squeeze to have an axis argument + # NOTE: this example is somewhat artificial; + # it is designed to simulate an old API + # expectation to guard against regression + def squeeze(self): + return super().squeeze() + + oldsqueeze = OldSqueeze(np.array([[1], [2], [3]])) + + # if no axis argument is specified the old API + # expectation should give the correct result + assert_equal(np.squeeze(oldsqueeze), + np.array([1, 2, 3])) + + # likewise, axis=None should work perfectly well + # with the old API expectation + assert_equal(np.squeeze(oldsqueeze, axis=None), + np.array([1, 2, 3])) + + # however, specification of any particular axis + # should raise a TypeError in the context of the + # old API specification, even when using a valid + # axis specification like 1 for this array + with assert_raises(TypeError): + # this would silently succeed for array + # subclasses / objects that did not support + # squeeze axis argument handling before fixing + # Issue #10779 + np.squeeze(oldsqueeze, axis=1) + + # check for the same behavior when using an invalid + # axis specification -- in this case axis=0 does not + # have size 1, but the priority should be to raise + # a TypeError for the axis argument and NOT a + # ValueError for squeezing a non-empty dimension + with assert_raises(TypeError): + np.squeeze(oldsqueeze, axis=0) + + # the new API knows how to handle the axis + # argument and will return a ValueError if + # attempting to squeeze an axis that is not + # of length 1 + with assert_raises(ValueError): + np.squeeze(np.array([[1], [2], [3]]), axis=0) + + def test_reduce_contiguous(self): + # GitHub issue #387 + a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1)) + b = np.add.reduce(np.zeros((2, 1, 2)), 1) + assert_(a.flags.c_contiguous) + assert_(a.flags.f_contiguous) + assert_(b.flags.c_contiguous) + + @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") + @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + def test_object_array_self_reference(self): + # Object arrays with references to themselves can cause problems + a = np.array(0, dtype=object) + a[()] = a + assert_raises(RecursionError, int, a) + assert_raises(RecursionError, float, a) + a[()] = None + + @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking") + @pytest.mark.skipif(IS_WASM, reason="Pyodide/WASM has limited stack size") + def test_object_array_circular_reference(self): + # Test the same for a circular reference. + a = np.array(0, dtype=object) + b = np.array(0, dtype=object) + a[()] = b + b[()] = a + assert_raises(RecursionError, int, a) + # NumPy has no tp_traverse currently, so circular references + # cannot be detected. So resolve it: + a[()] = None + + # This was causing a to become like the above + a = np.array(0, dtype=object) + a[...] += 1 + assert_equal(a, 1) + + def test_object_array_nested(self): + # but is fine with a reference to a different array + a = np.array(0, dtype=object) + b = np.array(0, dtype=object) + a[()] = b + assert_equal(int(a), int(0)) # noqa: UP018 + assert_equal(float(a), float(0)) + + def test_object_array_self_copy(self): + # An object array being copied into itself DECREF'ed before INCREF'ing + # causing segmentation faults (gh-3787) + a = np.array(object(), dtype=object) + np.copyto(a, a) + if HAS_REFCOUNT: + assert_(sys.getrefcount(a[()]) == 2) + a[()].__class__ # will segfault if object was deleted + + def test_zerosize_accumulate(self): + "Ticket #1733" + x = np.array([[42, 0]], dtype=np.uint32) + assert_equal(np.add.accumulate(x[:-1, 0]), []) + + def test_objectarray_setfield(self): + # Setfield should not overwrite Object fields with non-Object data + x = np.array([1, 2, 3], dtype=object) + assert_raises(TypeError, x.setfield, 4, np.int32, 0) + + def test_setting_rank0_string(self): + "Ticket #1736" + s1 = b"hello1" + s2 = b"hello2" + a = np.zeros((), dtype="S10") + a[()] = s1 + assert_equal(a, np.array(s1)) + a[()] = np.array(s2) + assert_equal(a, np.array(s2)) + + a = np.zeros((), dtype='f4') + a[()] = 3 + assert_equal(a, np.array(3)) + a[()] = np.array(4) + assert_equal(a, np.array(4)) + + def test_string_astype(self): + "Ticket #1748" + s1 = b'black' + s2 = b'white' + s3 = b'other' + a = np.array([[s1], [s2], [s3]]) + assert_equal(a.dtype, np.dtype('S5')) + b = a.astype(np.dtype('S0')) + assert_equal(b.dtype, np.dtype('S5')) + + def test_ticket_1756(self): + # Ticket #1756 + s = b'0123456789abcdef' + a = np.array([s] * 5) + for i in range(1, 17): + a1 = np.array(a, "|S%d" % i) + a2 = np.array([s[:i]] * 5) + assert_equal(a1, a2) + + def test_fields_strides(self): + "gh-2355" + r = np.frombuffer(b'abcdefghijklmnop' * 4 * 3, dtype='i4,(2,3)u2') + assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2]) + assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1']) + assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()]) + assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides) + + def test_alignment_update(self): + # Check that alignment flag is updated on stride setting + a = np.arange(10) + assert_(a.flags.aligned) + a.strides = 3 + assert_(not a.flags.aligned) + + def test_ticket_1770(self): + "Should not segfault on python 3k" + import numpy as np + try: + a = np.zeros((1,), dtype=[('f1', 'f')]) + a['f1'] = 1 + a['f2'] = 1 + except ValueError: + pass + except Exception: + raise AssertionError + + def test_ticket_1608(self): + "x.flat shouldn't modify data" + x = np.array([[1, 2], [3, 4]]).T + np.array(x.flat) + assert_equal(x, [[1, 3], [2, 4]]) + + def test_pickle_string_overwrite(self): + import re + + data = np.array([1], dtype='b') + blob = pickle.dumps(data, protocol=1) + data = pickle.loads(blob) + + # Check that loads does not clobber interned strings + s = re.sub(r"a(.)", "\x01\\1", "a_") + assert_equal(s[0], "\x01") + data[0] = 0x6a + s = re.sub(r"a(.)", "\x01\\1", "a_") + assert_equal(s[0], "\x01") + + def test_pickle_bytes_overwrite(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + data = np.array([1], dtype='b') + data = pickle.loads(pickle.dumps(data, protocol=proto)) + data[0] = 0x7d + bytestring = "\x01 ".encode('ascii') + assert_equal(bytestring[0:1], '\x01'.encode('ascii')) + + def test_pickle_py2_array_latin1_hack(self): + # Check that unpickling hacks in Py3 that support + # encoding='latin1' work correctly. + + # Python2 output for pickle.dumps(numpy.array([129], dtype='b')) + data = b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\np13\ntp14\nb." + # This should work: + result = pickle.loads(data, encoding='latin1') + assert_array_equal(result, np.array([129]).astype('b')) + # Should not segfault: + assert_raises(Exception, pickle.loads, data, encoding='koi8-r') + + def test_pickle_py2_scalar_latin1_hack(self): + # Check that scalar unpickling hack in Py3 that supports + # encoding='latin1' work correctly. + + # Python2 output for pickle.dumps(...) + datas = [ + # (original, python2_pickle, koi8r_validity) + (np.str_('\u6bd2'), + b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\ntp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n.", + 'invalid'), + + (np.float64(9e123), + b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\nbS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n.", + 'invalid'), + + # different 8-bit code point in KOI8-R vs latin1 + (np.bytes_(b'\x9c'), + b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\ntp8\nRp9\n.", + 'different'), + ] + for original, data, koi8r_validity in datas: + result = pickle.loads(data, encoding='latin1') + assert_equal(result, original) + + # Decoding under non-latin1 encoding (e.g.) KOI8-R can + # produce bad results, but should not segfault. + if koi8r_validity == 'different': + # Unicode code points happen to lie within latin1, + # but are different in koi8-r, resulting to silent + # bogus results + result = pickle.loads(data, encoding='koi8-r') + assert_(result != original) + elif koi8r_validity == 'invalid': + # Unicode code points outside latin1, so results + # to an encoding exception + assert_raises( + ValueError, pickle.loads, data, encoding='koi8-r' + ) + else: + raise ValueError(koi8r_validity) + + def test_structured_type_to_object(self): + a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8') + a_obj = np.empty((2,), dtype=object) + a_obj[0] = (0, 1) + a_obj[1] = (3, 2) + # astype records -> object + assert_equal(a_rec.astype(object), a_obj) + # '=' records -> object + b = np.empty_like(a_obj) + b[...] = a_rec + assert_equal(b, a_obj) + # '=' object -> records + b = np.empty_like(a_rec) + b[...] = a_obj + assert_equal(b, a_rec) + + def test_assign_obj_listoflists(self): + # Ticket # 1870 + # The inner list should get assigned to the object elements + a = np.zeros(4, dtype=object) + b = a.copy() + a[0] = [1] + a[1] = [2] + a[2] = [3] + a[3] = [4] + b[...] = [[1], [2], [3], [4]] + assert_equal(a, b) + # The first dimension should get broadcast + a = np.zeros((2, 2), dtype=object) + a[...] = [[1, 2]] + assert_equal(a, [[1, 2], [1, 2]]) + + @pytest.mark.slow_pypy + def test_memoryleak(self): + # Ticket #1917 - ensure that array data doesn't leak + for i in range(1000): + # 100MB times 1000 would give 100GB of memory usage if it leaks + a = np.empty((100000000,), dtype='i1') + del a + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_ufunc_reduce_memoryleak(self): + a = np.arange(6) + acnt = sys.getrefcount(a) + np.add.reduce(a) + assert_equal(sys.getrefcount(a), acnt) + + def test_search_sorted_invalid_arguments(self): + # Ticket #2021, should not segfault. + x = np.arange(0, 4, dtype='datetime64[D]') + assert_raises(TypeError, x.searchsorted, 1) + + def test_string_truncation(self): + # Ticket #1990 - Data can be truncated in creation of an array from a + # mixed sequence of numeric values and strings (gh-2583) + for val in [True, 1234, 123.4, complex(1, 234)]: + for tostr, dtype in [(asunicode, "U"), (asbytes, "S")]: + b = np.array([val, tostr('xx')], dtype=dtype) + assert_equal(tostr(b[0]), tostr(val)) + b = np.array([tostr('xx'), val], dtype=dtype) + assert_equal(tostr(b[1]), tostr(val)) + + # test also with longer strings + b = np.array([val, tostr('xxxxxxxxxx')], dtype=dtype) + assert_equal(tostr(b[0]), tostr(val)) + b = np.array([tostr('xxxxxxxxxx'), val], dtype=dtype) + assert_equal(tostr(b[1]), tostr(val)) + + def test_string_truncation_ucs2(self): + # Ticket #2081. Python compiled with two byte unicode + # can lead to truncation if itemsize is not properly + # adjusted for NumPy's four byte unicode. + a = np.array(['abcd']) + assert_equal(a.dtype.itemsize, 16) + + def test_unique_stable(self): + # Ticket #2063 must always choose stable sort for argsort to + # get consistent results + v = np.array(([0] * 5 + [1] * 6 + [2] * 6) * 4) + res = np.unique(v, return_index=True) + tgt = (np.array([0, 1, 2]), np.array([0, 5, 11])) + assert_equal(res, tgt) + + def test_unicode_alloc_dealloc_match(self): + # Ticket #1578, the mismatch only showed up when running + # python-debug for python versions >= 2.7, and then as + # a core dump and error message. + a = np.array(['abc'], dtype=np.str_)[0] + del a + + def test_refcount_error_in_clip(self): + # Ticket #1588 + a = np.zeros((2,), dtype='>i2').clip(min=0) + x = a + a + # This used to segfault: + y = str(x) + # Check the final string: + assert_(y == "[0 0]") + + def test_searchsorted_wrong_dtype(self): + # Ticket #2189, it used to segfault, so we check that it raises the + # proper exception. + a = np.array([('a', 1)], dtype='S1, int') + assert_raises(TypeError, np.searchsorted, a, 1.2) + # Ticket #2066, similar problem: + dtype = np.rec.format_parser(['i4', 'i4'], [], []) + a = np.recarray((2,), dtype) + a[...] = [(1, 2), (3, 4)] + assert_raises(TypeError, np.searchsorted, a, 1) + + def test_complex64_alignment(self): + # Issue gh-2668 (trac 2076), segfault on sparc due to misalignment + dtt = np.complex64 + arr = np.arange(10, dtype=dtt) + # 2D array + arr2 = np.reshape(arr, (2, 5)) + # Fortran write followed by (C or F) read caused bus error + data_str = arr2.tobytes('F') + data_back = np.ndarray(arr2.shape, + arr2.dtype, + buffer=data_str, + order='F') + assert_array_equal(arr2, data_back) + + def test_structured_count_nonzero(self): + arr = np.array([0, 1]).astype('i4, 2i4')[:1] + count = np.count_nonzero(arr) + assert_equal(count, 0) + + def test_copymodule_preserves_f_contiguity(self): + a = np.empty((2, 2), order='F') + b = copy.copy(a) + c = copy.deepcopy(a) + assert_(b.flags.fortran) + assert_(b.flags.f_contiguous) + assert_(c.flags.fortran) + assert_(c.flags.f_contiguous) + + def test_fortran_order_buffer(self): + import numpy as np + a = np.array([['Hello', 'Foob']], dtype='U5', order='F') + arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a) + arr2 = np.array([[['H', 'e', 'l', 'l', 'o'], + ['F', 'o', 'o', 'b', '']]]) + assert_array_equal(arr, arr2) + + def test_assign_from_sequence_error(self): + # Ticket #4024. + arr = np.array([1, 2, 3]) + assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9]) + arr.__setitem__(slice(None), [9]) + assert_equal(arr, [9, 9, 9]) + + def test_format_on_flex_array_element(self): + # Ticket #4369. + dt = np.dtype([('date', ' 0: + # unpickling ndarray goes through _frombuffer for protocol 5 + assert b'numpy._core.numeric' in s + else: + assert b'numpy._core.multiarray' in s + + def test_object_casting_errors(self): + # gh-11993 update to ValueError (see gh-16909), since strings can in + # principle be converted to complex, but this string cannot. + arr = np.array(['AAAAA', 18465886.0, 18465886.0], dtype=object) + assert_raises(ValueError, arr.astype, 'c8') + + def test_eff1d_casting(self): + # gh-12711 + x = np.array([1, 2, 4, 7, 0], dtype=np.int16) + res = np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99])) + assert_equal(res, [-99, 1, 2, 3, -7, 88, 99]) + + # The use of safe casting means, that 1<<20 is cast unsafely, an + # error may be better, but currently there is no mechanism for it. + res = np.ediff1d(x, to_begin=(1 << 20), to_end=(1 << 20)) + assert_equal(res, [0, 1, 2, 3, -7, 0]) + + def test_pickle_datetime64_array(self): + # gh-12745 (would fail with pickle5 installed) + d = np.datetime64('2015-07-04 12:59:59.50', 'ns') + arr = np.array([d]) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + dumped = pickle.dumps(arr, protocol=proto) + assert_equal(pickle.loads(dumped), arr) + + def test_bad_array_interface(self): + class T: + __array_interface__ = {} + + with assert_raises(ValueError): + np.array([T()]) + + def test_2d__array__shape(self): + class T: + def __array__(self, dtype=None, copy=None): + return np.ndarray(shape=(0, 0)) + + # Make sure __array__ is used instead of Sequence methods. + def __iter__(self): + return iter([]) + + def __getitem__(self, idx): + raise AssertionError("__getitem__ was called") + + def __len__(self): + return 0 + + t = T() + # gh-13659, would raise in broadcasting [x=t for x in result] + arr = np.array([t]) + assert arr.shape == (1, 0, 0) + + @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python') + def test_to_ctypes(self): + # gh-14214 + arr = np.zeros((2 ** 31 + 1,), 'b') + assert arr.size * arr.itemsize > 2 ** 31 + c_arr = np.ctypeslib.as_ctypes(arr) + assert_equal(c_arr._length_, arr.size) + + def test_complex_conversion_error(self): + # gh-17068 + with pytest.raises(TypeError, match=r"Unable to convert dtype.*"): + complex(np.array("now", np.datetime64)) + + def test__array_interface__descr(self): + # gh-17068 + dt = np.dtype({'names': ['a', 'b'], + 'offsets': [0, 0], + 'formats': [np.int64, np.int64]}) + descr = np.array((1, 1), dtype=dt).__array_interface__['descr'] + assert descr == [('', '|V8')] # instead of [(b'', '|V8')] + + @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python') + @requires_memory(free_bytes=9e9) + def test_dot_big_stride(self): + # gh-17111 + # blas stride = stride//itemsize > int32 max + int32_max = np.iinfo(np.int32).max + n = int32_max + 3 + a = np.empty([n], dtype=np.float32) + b = a[::n - 1] + b[...] = 1 + assert b.strides[0] > int32_max * b.dtype.itemsize + assert np.dot(b, b) == 2.0 + + def test_frompyfunc_name(self): + # name conversion was failing for python 3 strings + # resulting in the default '?' name. Also test utf-8 + # encoding using non-ascii name. + def cassé(x): + return x + + f = np.frompyfunc(cassé, 1, 1) + assert str(f) == "" + + @pytest.mark.parametrize("operation", [ + 'add', 'subtract', 'multiply', 'floor_divide', + 'conjugate', 'fmod', 'square', 'reciprocal', + 'power', 'absolute', 'negative', 'positive', + 'greater', 'greater_equal', 'less', + 'less_equal', 'equal', 'not_equal', 'logical_and', + 'logical_not', 'logical_or', 'bitwise_and', 'bitwise_or', + 'bitwise_xor', 'invert', 'left_shift', 'right_shift', + 'gcd', 'lcm' + ] + ) + @pytest.mark.parametrize("order", [ + ('b->', 'B->'), + ('h->', 'H->'), + ('i->', 'I->'), + ('l->', 'L->'), + ('q->', 'Q->'), + ] + ) + def test_ufunc_order(self, operation, order): + # gh-18075 + # Ensure signed types before unsigned + def get_idx(string, str_lst): + for i, s in enumerate(str_lst): + if string in s: + return i + raise ValueError(f"{string} not in list") + types = getattr(np, operation).types + assert get_idx(order[0], types) < get_idx(order[1], types), ( + f"Unexpected types order of ufunc in {operation}" + f"for {order}. Possible fix: Use signed before unsigned" + "in generate_umath.py") + + def test_nonbool_logical(self): + # gh-22845 + # create two arrays with bit patterns that do not overlap. + # needs to be large enough to test both SIMD and scalar paths + size = 100 + a = np.frombuffer(b'\x01' * size, dtype=np.bool) + b = np.frombuffer(b'\x80' * size, dtype=np.bool) + expected = np.ones(size, dtype=np.bool) + assert_array_equal(np.logical_and(a, b), expected) + + @pytest.mark.skipif(IS_PYPY, reason="PyPy issue 2742") + def test_gh_23737(self): + with pytest.raises(TypeError, match="not an acceptable base type"): + class Y(np.flexible): + pass + + with pytest.raises(TypeError, match="not an acceptable base type"): + class X(np.flexible, np.ma.core.MaskedArray): + pass + + def test_load_ufunc_pickle(self): + # ufuncs are pickled with a semi-private path in + # numpy.core._multiarray_umath and must be loadable without warning + # despite np.core being deprecated. + test_data = b'\x80\x04\x95(\x00\x00\x00\x00\x00\x00\x00\x8c\x1cnumpy.core._multiarray_umath\x94\x8c\x03add\x94\x93\x94.' + result = pickle.loads(test_data, encoding='bytes') + assert result is np.add + + def test__array_namespace__(self): + arr = np.arange(2) + + xp = arr.__array_namespace__() + assert xp is np + xp = arr.__array_namespace__(api_version="2021.12") + assert xp is np + xp = arr.__array_namespace__(api_version="2022.12") + assert xp is np + xp = arr.__array_namespace__(api_version="2023.12") + assert xp is np + xp = arr.__array_namespace__(api_version="2024.12") + assert xp is np + xp = arr.__array_namespace__(api_version=None) + assert xp is np + + with pytest.raises( + ValueError, + match="Version \"2025.12\" of the Array API Standard " + "is not supported." + ): + arr.__array_namespace__(api_version="2025.12") + + with pytest.raises( + ValueError, + match="Only None and strings are allowed as the Array API version" + ): + arr.__array_namespace__(api_version=2024) + + def test_isin_refcnt_bug(self): + # gh-25295 + for _ in range(1000): + np.isclose(np.int64(2), np.int64(2), atol=1e-15, rtol=1e-300) + + def test_replace_regression(self): + # gh-25513 segfault + carr = np.char.chararray((2,), itemsize=25) + test_strings = [b' 4.52173913043478315E+00', + b' 4.95652173913043548E+00'] + carr[:] = test_strings + out = carr.replace(b"E", b"D") + expected = np.char.chararray((2,), itemsize=25) + expected[:] = [s.replace(b"E", b"D") for s in test_strings] + assert_array_equal(out, expected) + + def test_logspace_base_does_not_determine_dtype(self): + # gh-24957 and cupy/cupy/issues/7946 + start = np.array([0, 2], dtype=np.float16) + stop = np.array([2, 0], dtype=np.float16) + out = np.logspace(start, stop, num=5, axis=1, dtype=np.float32) + expected = np.array([[1., 3.1621094, 10., 31.625, 100.], + [100., 31.625, 10., 3.1621094, 1.]], + dtype=np.float32) + assert_almost_equal(out, expected) + # Check test fails if the calculation is done in float64, as happened + # before when a python float base incorrectly influenced the dtype. + out2 = np.logspace(start, stop, num=5, axis=1, dtype=np.float32, + base=np.array([10.0])) + with pytest.raises(AssertionError, match="not almost equal"): + assert_almost_equal(out2, expected) + + def test_vectorize_fixed_width_string(self): + arr = np.array(["SOme wOrd DŽ ß ᾛ ΣΣ ffi⁵Å Ç Ⅰ"]).astype(np.str_) + f = str.casefold + res = np.vectorize(f, otypes=[arr.dtype])(arr) + assert res.dtype == "U30" + + def test_repeated_square_consistency(self): + # gh-26940 + buf = np.array([-5.171866611150749e-07 + 2.5618634555957426e-07j, + 0, 0, 0, 0, 0]) + # Test buffer with regular and reverse strides + for in_vec in [buf[:3], buf[:3][::-1]]: + expected_res = np.square(in_vec) + # Output vector immediately follows input vector + # to reproduce off-by-one in nomemoverlap check. + for res in [buf[3:], buf[3:][::-1]]: + res = buf[3:] + np.square(in_vec, out=res) + assert_equal(res, expected_res) + + def test_sort_unique_crash(self): + # gh-27037 + for _ in range(4): + vals = np.linspace(0, 1, num=128) + data = np.broadcast_to(vals, (128, 128, 128)) + data = data.transpose(0, 2, 1).copy() + np.unique(data) + + def test_sort_overlap(self): + # gh-27273 + size = 100 + inp = np.linspace(0, size, num=size, dtype=np.intc) + out = np.sort(inp) + assert_equal(inp, out) + + def test_searchsorted_structured(self): + # gh-28190 + x = np.array([(0, 1.)], dtype=[('time', ' None: + cls = np.dtype(code).type + value = cls(str_value) + assert not value.is_integer() + + @pytest.mark.parametrize( + "code", np.typecodes["Float"] + np.typecodes["AllInteger"] + ) + def test_true(self, code: str) -> None: + float_array = np.arange(-5, 5).astype(code) + for value in float_array: + assert value.is_integer() + + @pytest.mark.parametrize("code", np.typecodes["Float"]) + def test_false(self, code: str) -> None: + float_array = np.arange(-5, 5).astype(code) + float_array *= 1.1 + for value in float_array: + if value == 0: + continue + assert not value.is_integer() + + +class TestClassGetItem: + @pytest.mark.parametrize("cls", [ + np.number, + np.integer, + np.inexact, + np.unsignedinteger, + np.signedinteger, + np.floating, + ]) + def test_abc(self, cls: type[np.number]) -> None: + alias = cls[Any] + assert isinstance(alias, types.GenericAlias) + assert alias.__origin__ is cls + + def test_abc_complexfloating(self) -> None: + alias = np.complexfloating[Any, Any] + assert isinstance(alias, types.GenericAlias) + assert alias.__origin__ is np.complexfloating + + @pytest.mark.parametrize("arg_len", range(4)) + def test_abc_complexfloating_subscript_tuple(self, arg_len: int) -> None: + arg_tup = (Any,) * arg_len + if arg_len in (1, 2): + assert np.complexfloating[arg_tup] + else: + match = f"Too {'few' if arg_len == 0 else 'many'} arguments" + with pytest.raises(TypeError, match=match): + np.complexfloating[arg_tup] + + @pytest.mark.parametrize("cls", [np.generic, np.flexible, np.character]) + def test_abc_non_numeric(self, cls: type[np.generic]) -> None: + with pytest.raises(TypeError): + cls[Any] + + @pytest.mark.parametrize("code", np.typecodes["All"]) + def test_concrete(self, code: str) -> None: + cls = np.dtype(code).type + with pytest.raises(TypeError): + cls[Any] + + @pytest.mark.parametrize("arg_len", range(4)) + def test_subscript_tuple(self, arg_len: int) -> None: + arg_tup = (Any,) * arg_len + if arg_len == 1: + assert np.number[arg_tup] + else: + with pytest.raises(TypeError): + np.number[arg_tup] + + def test_subscript_scalar(self) -> None: + assert np.number[Any] + + +class TestBitCount: + # derived in part from the cpython test "test_bit_count" + + @pytest.mark.parametrize("itype", sctypes['int'] + sctypes['uint']) + def test_small(self, itype): + for a in range(max(np.iinfo(itype).min, 0), 128): + msg = f"Smoke test for {itype}({a}).bit_count()" + assert itype(a).bit_count() == a.bit_count(), msg + + def test_bit_count(self): + for exp in [10, 17, 63]: + a = 2**exp + assert np.uint64(a).bit_count() == 1 + assert np.uint64(a - 1).bit_count() == exp + assert np.uint64(a ^ 63).bit_count() == 7 + assert np.uint64((a - 1) ^ 510).bit_count() == exp - 8 + + +class TestDevice: + """ + Test scalar.device attribute and scalar.to_device() method. + """ + scalars = [np.bool(True), np.int64(1), np.uint64(1), np.float64(1.0), + np.complex128(1 + 1j)] + + @pytest.mark.parametrize("scalar", scalars) + def test_device(self, scalar): + assert scalar.device == "cpu" + + @pytest.mark.parametrize("scalar", scalars) + def test_to_device(self, scalar): + assert scalar.to_device("cpu") is scalar + + @pytest.mark.parametrize("scalar", scalars) + def test___array_namespace__(self, scalar): + assert scalar.__array_namespace__() is np + + +@pytest.mark.parametrize("scalar", [np.bool(True), np.int8(1), np.float64(1)]) +def test_array_wrap(scalar): + # Test scalars array wrap as long as it exists. NumPy itself should + # probably not use it, so it may not be necessary to keep it around. + + arr0d = np.array(3, dtype=np.int8) + # Third argument not passed, None, or True "decays" to scalar. + # (I don't think NumPy would pass `None`, but it seems clear to support) + assert type(scalar.__array_wrap__(arr0d)) is np.int8 + assert type(scalar.__array_wrap__(arr0d, None, None)) is np.int8 + assert type(scalar.__array_wrap__(arr0d, None, True)) is np.int8 + + # Otherwise, result should be the input + assert scalar.__array_wrap__(arr0d, None, False) is arr0d + + # An old bug. A non 0-d array cannot be converted to scalar: + arr1d = np.array([3], dtype=np.int8) + assert scalar.__array_wrap__(arr1d) is arr1d + assert scalar.__array_wrap__(arr1d, None, True) is arr1d diff --git a/python/numpy/_core/tests/test_scalarbuffer.py b/python/numpy/_core/tests/test_scalarbuffer.py new file mode 100644 index 000000000..4d6b5bdd7 --- /dev/null +++ b/python/numpy/_core/tests/test_scalarbuffer.py @@ -0,0 +1,153 @@ +""" +Test scalar buffer interface adheres to PEP 3118 +""" +import pytest +from numpy._core._multiarray_tests import get_buffer_info +from numpy._core._rational_tests import rational + +import numpy as np +from numpy.testing import assert_, assert_equal, assert_raises + +# PEP3118 format strings for native (standard alignment and byteorder) types +scalars_and_codes = [ + (np.bool, '?'), + (np.byte, 'b'), + (np.short, 'h'), + (np.intc, 'i'), + (np.long, 'l'), + (np.longlong, 'q'), + (np.ubyte, 'B'), + (np.ushort, 'H'), + (np.uintc, 'I'), + (np.ulong, 'L'), + (np.ulonglong, 'Q'), + (np.half, 'e'), + (np.single, 'f'), + (np.double, 'd'), + (np.longdouble, 'g'), + (np.csingle, 'Zf'), + (np.cdouble, 'Zd'), + (np.clongdouble, 'Zg'), +] +scalars_only, codes_only = zip(*scalars_and_codes) + + +class TestScalarPEP3118: + + @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only) + def test_scalar_match_array(self, scalar): + x = scalar() + a = np.array([], dtype=np.dtype(scalar)) + mv_x = memoryview(x) + mv_a = memoryview(a) + assert_equal(mv_x.format, mv_a.format) + + @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only) + def test_scalar_dim(self, scalar): + x = scalar() + mv_x = memoryview(x) + assert_equal(mv_x.itemsize, np.dtype(scalar).itemsize) + assert_equal(mv_x.ndim, 0) + assert_equal(mv_x.shape, ()) + assert_equal(mv_x.strides, ()) + assert_equal(mv_x.suboffsets, ()) + + @pytest.mark.parametrize('scalar, code', scalars_and_codes, ids=codes_only) + def test_scalar_code_and_properties(self, scalar, code): + x = scalar() + expected = {'strides': (), 'itemsize': x.dtype.itemsize, 'ndim': 0, + 'shape': (), 'format': code, 'readonly': True} + + mv_x = memoryview(x) + assert self._as_dict(mv_x) == expected + + @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only) + def test_scalar_buffers_readonly(self, scalar): + x = scalar() + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(x, ["WRITABLE"]) + + def test_void_scalar_structured_data(self): + dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + x = np.array(('ndarray_scalar', (1.2, 3.0)), dtype=dt)[()] + assert_(isinstance(x, np.void)) + mv_x = memoryview(x) + expected_size = 16 * np.dtype((np.str_, 1)).itemsize + expected_size += 2 * np.dtype(np.float64).itemsize + assert_equal(mv_x.itemsize, expected_size) + assert_equal(mv_x.ndim, 0) + assert_equal(mv_x.shape, ()) + assert_equal(mv_x.strides, ()) + assert_equal(mv_x.suboffsets, ()) + + # check scalar format string against ndarray format string + a = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt) + assert_(isinstance(a, np.ndarray)) + mv_a = memoryview(a) + assert_equal(mv_x.itemsize, mv_a.itemsize) + assert_equal(mv_x.format, mv_a.format) + + # Check that we do not allow writeable buffer export (technically + # we could allow it sometimes here...) + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(x, ["WRITABLE"]) + + def _as_dict(self, m): + return {'strides': m.strides, 'shape': m.shape, 'itemsize': m.itemsize, + 'ndim': m.ndim, 'format': m.format, 'readonly': m.readonly} + + def test_datetime_memoryview(self): + # gh-11656 + # Values verified with v1.13.3, shape is not () as in test_scalar_dim + + dt1 = np.datetime64('2016-01-01') + dt2 = np.datetime64('2017-01-01') + expected = {'strides': (1,), 'itemsize': 1, 'ndim': 1, 'shape': (8,), + 'format': 'B', 'readonly': True} + v = memoryview(dt1) + assert self._as_dict(v) == expected + + v = memoryview(dt2 - dt1) + assert self._as_dict(v) == expected + + dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')]) + a = np.empty(1, dt) + # Fails to create a PEP 3118 valid buffer + assert_raises((ValueError, BufferError), memoryview, a[0]) + + # Check that we do not allow writeable buffer export + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(dt1, ["WRITABLE"]) + + @pytest.mark.parametrize('s', [ + pytest.param("\x32\x32", id="ascii"), + pytest.param("\uFE0F\uFE0F", id="basic multilingual"), + pytest.param("\U0001f4bb\U0001f4bb", id="non-BMP"), + ]) + def test_str_ucs4(self, s): + s = np.str_(s) # only our subclass implements the buffer protocol + + # all the same, characters always encode as ucs4 + expected = {'strides': (), 'itemsize': 8, 'ndim': 0, 'shape': (), 'format': '2w', + 'readonly': True} + + v = memoryview(s) + assert self._as_dict(v) == expected + + # integers of the paltform-appropriate endianness + code_points = np.frombuffer(v, dtype='i4') + + assert_equal(code_points, [ord(c) for c in s]) + + # Check that we do not allow writeable buffer export + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(s, ["WRITABLE"]) + + def test_user_scalar_fails_buffer(self): + r = rational(1) + with assert_raises(TypeError): + memoryview(r) + + # Check that we do not allow writeable buffer export + with pytest.raises(BufferError, match="scalar buffer is readonly"): + get_buffer_info(r, ["WRITABLE"]) diff --git a/python/numpy/_core/tests/test_scalarinherit.py b/python/numpy/_core/tests/test_scalarinherit.py new file mode 100644 index 000000000..746a15747 --- /dev/null +++ b/python/numpy/_core/tests/test_scalarinherit.py @@ -0,0 +1,105 @@ +""" Test printing of scalar types. + +""" +import pytest + +import numpy as np +from numpy.testing import assert_, assert_raises + + +class A: + pass +class B(A, np.float64): + pass + +class C(B): + pass +class D(C, B): + pass + +class B0(np.float64, A): + pass +class C0(B0): + pass + +class HasNew: + def __new__(cls, *args, **kwargs): + return cls, args, kwargs + +class B1(np.float64, HasNew): + pass + + +class TestInherit: + def test_init(self): + x = B(1.0) + assert_(str(x) == '1.0') + y = C(2.0) + assert_(str(y) == '2.0') + z = D(3.0) + assert_(str(z) == '3.0') + + def test_init2(self): + x = B0(1.0) + assert_(str(x) == '1.0') + y = C0(2.0) + assert_(str(y) == '2.0') + + def test_gh_15395(self): + # HasNew is the second base, so `np.float64` should have priority + x = B1(1.0) + assert_(str(x) == '1.0') + + # previously caused RecursionError!? + with pytest.raises(TypeError): + B1(1.0, 2.0) + + def test_int_repr(self): + # Test that integer repr works correctly for subclasses (gh-27106) + class my_int16(np.int16): + pass + + s = repr(my_int16(3)) + assert s == "my_int16(3)" + +class TestCharacter: + def test_char_radd(self): + # GH issue 9620, reached gentype_add and raise TypeError + np_s = np.bytes_('abc') + np_u = np.str_('abc') + s = b'def' + u = 'def' + assert_(np_s.__radd__(np_s) is NotImplemented) + assert_(np_s.__radd__(np_u) is NotImplemented) + assert_(np_s.__radd__(s) is NotImplemented) + assert_(np_s.__radd__(u) is NotImplemented) + assert_(np_u.__radd__(np_s) is NotImplemented) + assert_(np_u.__radd__(np_u) is NotImplemented) + assert_(np_u.__radd__(s) is NotImplemented) + assert_(np_u.__radd__(u) is NotImplemented) + assert_(s + np_s == b'defabc') + assert_(u + np_u == 'defabc') + + class MyStr(str, np.generic): + # would segfault + pass + + with assert_raises(TypeError): + # Previously worked, but gave completely wrong result + ret = s + MyStr('abc') + + class MyBytes(bytes, np.generic): + # would segfault + pass + + ret = s + MyBytes(b'abc') + assert type(ret) is type(s) + assert ret == b"defabc" + + def test_char_repeat(self): + np_s = np.bytes_('abc') + np_u = np.str_('abc') + res_s = b'abc' * 5 + res_u = 'abc' * 5 + assert_(np_s * 5 == res_s) + assert_(np_u * 5 == res_u) diff --git a/python/numpy/_core/tests/test_scalarmath.py b/python/numpy/_core/tests/test_scalarmath.py new file mode 100644 index 000000000..fc37897bb --- /dev/null +++ b/python/numpy/_core/tests/test_scalarmath.py @@ -0,0 +1,1176 @@ +import contextlib +import itertools +import operator +import platform +import sys +import warnings + +import pytest +from hypothesis import given, settings +from hypothesis.extra import numpy as hynp +from hypothesis.strategies import sampled_from +from numpy._core._rational_tests import rational + +import numpy as np +from numpy._utils import _pep440 +from numpy.exceptions import ComplexWarning +from numpy.testing import ( + IS_PYPY, + _gen_alignment_data, + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + check_support_sve, + suppress_warnings, +) + +types = [np.bool, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc, + np.int_, np.uint, np.longlong, np.ulonglong, + np.single, np.double, np.longdouble, np.csingle, + np.cdouble, np.clongdouble] + +floating_types = np.floating.__subclasses__() +complex_floating_types = np.complexfloating.__subclasses__() + +objecty_things = [object(), None, np.array(None, dtype=object)] + +binary_operators_for_scalars = [ + operator.lt, operator.le, operator.eq, operator.ne, operator.ge, + operator.gt, operator.add, operator.floordiv, operator.mod, + operator.mul, operator.pow, operator.sub, operator.truediv +] +binary_operators_for_scalar_ints = binary_operators_for_scalars + [ + operator.xor, operator.or_, operator.and_ +] + + +# This compares scalarmath against ufuncs. + +class TestTypes: + def test_types(self): + for atype in types: + a = atype(1) + assert_(a == 1, f"error with {atype!r}: got {a!r}") + + def test_type_add(self): + # list of types + for k, atype in enumerate(types): + a_scalar = atype(3) + a_array = np.array([3], dtype=atype) + for l, btype in enumerate(types): + b_scalar = btype(1) + b_array = np.array([1], dtype=btype) + c_scalar = a_scalar + b_scalar + c_array = a_array + b_array + # It was comparing the type numbers, but the new ufunc + # function-finding mechanism finds the lowest function + # to which both inputs can be cast - which produces 'l' + # when you do 'q' + 'b'. The old function finding mechanism + # skipped ahead based on the first argument, but that + # does not produce properly symmetric results... + assert_equal(c_scalar.dtype, c_array.dtype, + "error with types (%d/'%c' + %d/'%c')" % + (k, np.dtype(atype).char, l, np.dtype(btype).char)) + + def test_type_create(self): + for atype in types: + a = np.array([1, 2, 3], atype) + b = atype([1, 2, 3]) + assert_equal(a, b) + + def test_leak(self): + # test leak of scalar objects + # a leak would show up in valgrind as still-reachable of ~2.6MB + for i in range(200000): + np.add(1, 1) + + +def check_ufunc_scalar_equivalence(op, arr1, arr2): + scalar1 = arr1[()] + scalar2 = arr2[()] + assert isinstance(scalar1, np.generic) + assert isinstance(scalar2, np.generic) + + if arr1.dtype.kind == "c" or arr2.dtype.kind == "c": + comp_ops = {operator.ge, operator.gt, operator.le, operator.lt} + if op in comp_ops and (np.isnan(scalar1) or np.isnan(scalar2)): + pytest.xfail("complex comp ufuncs use sort-order, scalars do not.") + if op == operator.pow and arr2.item() in [-1, 0, 0.5, 1, 2]: + # array**scalar special case can have different result dtype + # (Other powers may have issues also, but are not hit here.) + # TODO: It would be nice to resolve this issue. + pytest.skip("array**2 can have incorrect/weird result dtype") + + # ignore fpe's since they may just mismatch for integers anyway. + with warnings.catch_warnings(), np.errstate(all="ignore"): + # Comparisons DeprecationWarnings replacing errors (2022-03): + warnings.simplefilter("error", DeprecationWarning) + try: + res = op(arr1, arr2) + except Exception as e: + with pytest.raises(type(e)): + op(scalar1, scalar2) + else: + scalar_res = op(scalar1, scalar2) + assert_array_equal(scalar_res, res, strict=True) + + +@pytest.mark.slow +@settings(max_examples=10000, deadline=2000) +@given(sampled_from(binary_operators_for_scalars), + hynp.arrays(dtype=hynp.scalar_dtypes(), shape=()), + hynp.arrays(dtype=hynp.scalar_dtypes(), shape=())) +def test_array_scalar_ufunc_equivalence(op, arr1, arr2): + """ + This is a thorough test attempting to cover important promotion paths + and ensuring that arrays and scalars stay as aligned as possible. + However, if it creates troubles, it should maybe just be removed. + """ + check_ufunc_scalar_equivalence(op, arr1, arr2) + + +@pytest.mark.slow +@given(sampled_from(binary_operators_for_scalars), + hynp.scalar_dtypes(), hynp.scalar_dtypes()) +def test_array_scalar_ufunc_dtypes(op, dt1, dt2): + # Same as above, but don't worry about sampling weird values so that we + # do not have to sample as much + arr1 = np.array(2, dtype=dt1) + arr2 = np.array(3, dtype=dt2) # some power do weird things. + + check_ufunc_scalar_equivalence(op, arr1, arr2) + + +@pytest.mark.parametrize("fscalar", [np.float16, np.float32]) +def test_int_float_promotion_truediv(fscalar): + # Promotion for mixed int and float32/float16 must not go to float64 + i = np.int8(1) + f = fscalar(1) + expected = np.result_type(i, f) + assert (i / f).dtype == expected + assert (f / i).dtype == expected + # But normal int / int true division goes to float64: + assert (i / i).dtype == np.dtype("float64") + # For int16, result has to be ast least float32 (takes ufunc path): + assert (np.int16(1) / f).dtype == np.dtype("float32") + + +class TestBaseMath: + @pytest.mark.xfail(check_support_sve(), reason="gh-22982") + def test_blocked(self): + # test alignments offsets for simd instructions + # alignments for vz + 2 * (vs - 1) + 1 + for dt, sz in [(np.float32, 11), (np.float64, 7), (np.int32, 11)]: + for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt, + type='binary', + max_size=sz): + exp1 = np.ones_like(inp1) + inp1[...] = np.ones_like(inp1) + inp2[...] = np.zeros_like(inp2) + assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg) + assert_almost_equal(np.add(inp1, 2), exp1 + 2, err_msg=msg) + assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg) + + np.add(inp1, inp2, out=out) + assert_almost_equal(out, exp1, err_msg=msg) + + inp2[...] += np.arange(inp2.size, dtype=dt) + 1 + assert_almost_equal(np.square(inp2), + np.multiply(inp2, inp2), err_msg=msg) + # skip true divide for ints + if dt != np.int32: + assert_almost_equal(np.reciprocal(inp2), + np.divide(1, inp2), err_msg=msg) + + inp1[...] = np.ones_like(inp1) + np.add(inp1, 2, out=out) + assert_almost_equal(out, exp1 + 2, err_msg=msg) + inp2[...] = np.ones_like(inp2) + np.add(2, inp2, out=out) + assert_almost_equal(out, exp1 + 2, err_msg=msg) + + def test_lower_align(self): + # check data that is not aligned to element size + # i.e doubles are aligned to 4 bytes on i386 + d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + o = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + assert_almost_equal(d + d, d * 2) + np.add(d, d, out=o) + np.add(np.ones_like(d), d, out=o) + np.add(d, np.ones_like(d), out=o) + np.add(np.ones_like(d), d) + np.add(d, np.ones_like(d)) + + +class TestPower: + def test_small_types(self): + for t in [np.int8, np.int16, np.float16]: + a = t(3) + b = a ** 4 + assert_(b == 81, f"error with {t!r}: got {b!r}") + + def test_large_types(self): + for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]: + a = t(51) + b = a ** 4 + msg = f"error with {t!r}: got {b!r}" + if np.issubdtype(t, np.integer): + assert_(b == 6765201, msg) + else: + assert_almost_equal(b, 6765201, err_msg=msg) + + def test_integers_to_negative_integer_power(self): + # Note that the combination of uint64 with a signed integer + # has common type np.float64. The other combinations should all + # raise a ValueError for integer ** negative integer. + exp = [np.array(-1, dt)[()] for dt in 'bhilq'] + + # 1 ** -1 possible special case + base = [np.array(1, dt)[()] for dt in 'bhilqBHILQ'] + for i1, i2 in itertools.product(base, exp): + if i1.dtype != np.uint64: + assert_raises(ValueError, operator.pow, i1, i2) + else: + res = operator.pow(i1, i2) + assert_(res.dtype.type is np.float64) + assert_almost_equal(res, 1.) + + # -1 ** -1 possible special case + base = [np.array(-1, dt)[()] for dt in 'bhilq'] + for i1, i2 in itertools.product(base, exp): + if i1.dtype != np.uint64: + assert_raises(ValueError, operator.pow, i1, i2) + else: + res = operator.pow(i1, i2) + assert_(res.dtype.type is np.float64) + assert_almost_equal(res, -1.) + + # 2 ** -1 perhaps generic + base = [np.array(2, dt)[()] for dt in 'bhilqBHILQ'] + for i1, i2 in itertools.product(base, exp): + if i1.dtype != np.uint64: + assert_raises(ValueError, operator.pow, i1, i2) + else: + res = operator.pow(i1, i2) + assert_(res.dtype.type is np.float64) + assert_almost_equal(res, .5) + + def test_mixed_types(self): + typelist = [np.int8, np.int16, np.float16, + np.float32, np.float64, np.int8, + np.int16, np.int32, np.int64] + for t1 in typelist: + for t2 in typelist: + a = t1(3) + b = t2(2) + result = a**b + msg = f"error with {t1!r} and {t2!r}:got {result!r}, expected {9!r}" + if np.issubdtype(np.dtype(result), np.integer): + assert_(result == 9, msg) + else: + assert_almost_equal(result, 9, err_msg=msg) + + def test_modular_power(self): + # modular power is not implemented, so ensure it errors + a = 5 + b = 4 + c = 10 + expected = pow(a, b, c) # noqa: F841 + for t in (np.int32, np.float32, np.complex64): + # note that 3-operand power only dispatches on the first argument + assert_raises(TypeError, operator.pow, t(a), b, c) + assert_raises(TypeError, operator.pow, np.array(t(a)), b, c) + + +def floordiv_and_mod(x, y): + return (x // y, x % y) + + +def _signs(dt): + if dt in np.typecodes['UnsignedInteger']: + return (+1,) + else: + return (+1, -1) + + +class TestModulus: + + def test_modulus_basic(self): + dt = np.typecodes['AllInteger'] + np.typecodes['Float'] + for op in [floordiv_and_mod, divmod]: + for dt1, dt2 in itertools.product(dt, dt): + for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)): + fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' + msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) + a = np.array(sg1 * 71, dtype=dt1)[()] + b = np.array(sg2 * 19, dtype=dt2)[()] + div, rem = op(a, b) + assert_equal(div * b + rem, a, err_msg=msg) + if sg2 == -1: + assert_(b < rem <= 0, msg) + else: + assert_(b > rem >= 0, msg) + + def test_float_modulus_exact(self): + # test that float results are exact for small integers. This also + # holds for the same integers scaled by powers of two. + nlst = list(range(-127, 0)) + plst = list(range(1, 128)) + dividend = nlst + [0] + plst + divisor = nlst + plst + arg = list(itertools.product(dividend, divisor)) + tgt = [divmod(*t) for t in arg] + + a, b = np.array(arg, dtype=int).T + # convert exact integer results from Python to float so that + # signed zero can be used, it is checked. + tgtdiv, tgtrem = np.array(tgt, dtype=float).T + tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv) + tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem) + + for op in [floordiv_and_mod, divmod]: + for dt in np.typecodes['Float']: + msg = f'op: {op.__name__}, dtype: {dt}' + fa = a.astype(dt) + fb = b.astype(dt) + # use list comprehension so a_ and b_ are scalars + div, rem = zip(*[op(a_, b_) for a_, b_ in zip(fa, fb)]) + assert_equal(div, tgtdiv, err_msg=msg) + assert_equal(rem, tgtrem, err_msg=msg) + + def test_float_modulus_roundoff(self): + # gh-6127 + dt = np.typecodes['Float'] + for op in [floordiv_and_mod, divmod]: + for dt1, dt2 in itertools.product(dt, dt): + for sg1, sg2 in itertools.product((+1, -1), (+1, -1)): + fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' + msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) + a = np.array(sg1 * 78 * 6e-8, dtype=dt1)[()] + b = np.array(sg2 * 6e-8, dtype=dt2)[()] + div, rem = op(a, b) + # Equal assertion should hold when fmod is used + assert_equal(div * b + rem, a, err_msg=msg) + if sg2 == -1: + assert_(b < rem <= 0, msg) + else: + assert_(b > rem >= 0, msg) + + def test_float_modulus_corner_cases(self): + # Check remainder magnitude. + for dt in np.typecodes['Float']: + b = np.array(1.0, dtype=dt) + a = np.nextafter(np.array(0.0, dtype=dt), -b) + rem = operator.mod(a, b) + assert_(rem <= b, f'dt: {dt}') + rem = operator.mod(-a, -b) + assert_(rem >= -b, f'dt: {dt}') + + # Check nans, inf + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in remainder") + sup.filter(RuntimeWarning, "divide by zero encountered in remainder") + sup.filter(RuntimeWarning, "divide by zero encountered in floor_divide") + sup.filter(RuntimeWarning, "divide by zero encountered in divmod") + sup.filter(RuntimeWarning, "invalid value encountered in divmod") + for dt in np.typecodes['Float']: + fone = np.array(1.0, dtype=dt) + fzer = np.array(0.0, dtype=dt) + finf = np.array(np.inf, dtype=dt) + fnan = np.array(np.nan, dtype=dt) + rem = operator.mod(fone, fzer) + assert_(np.isnan(rem), f'dt: {dt}') + # MSVC 2008 returns NaN here, so disable the check. + #rem = operator.mod(fone, finf) + #assert_(rem == fone, 'dt: %s' % dt) + rem = operator.mod(fone, fnan) + assert_(np.isnan(rem), f'dt: {dt}') + rem = operator.mod(finf, fone) + assert_(np.isnan(rem), f'dt: {dt}') + for op in [floordiv_and_mod, divmod]: + div, mod = op(fone, fzer) + assert_(np.isinf(div)) and assert_(np.isnan(mod)) + + def test_inplace_floordiv_handling(self): + # issue gh-12927 + # this only applies to in-place floordiv //=, because the output type + # promotes to float which does not fit + a = np.array([1, 2], np.int64) + b = np.array([1, 2], np.uint64) + with pytest.raises(TypeError, + match=r"Cannot cast ufunc 'floor_divide' output from"): + a //= b + +class TestComparison: + def test_comparision_different_types(self): + x = np.array(1) + y = np.array('s') + eq = x == y + neq = x != y + assert eq is np.bool_(False) + assert neq is np.bool_(True) + + +class TestComplexDivision: + def test_zero_division(self): + with np.errstate(all="ignore"): + for t in [np.complex64, np.complex128]: + a = t(0.0) + b = t(1.0) + assert_(np.isinf(b / a)) + b = t(complex(np.inf, np.inf)) + assert_(np.isinf(b / a)) + b = t(complex(np.inf, np.nan)) + assert_(np.isinf(b / a)) + b = t(complex(np.nan, np.inf)) + assert_(np.isinf(b / a)) + b = t(complex(np.nan, np.nan)) + assert_(np.isnan(b / a)) + b = t(0.) + assert_(np.isnan(b / a)) + + def test_signed_zeros(self): + with np.errstate(all="ignore"): + for t in [np.complex64, np.complex128]: + # tupled (numerator, denominator, expected) + # for testing as expected == numerator/denominator + data = ( + (( 0.0, -1.0), ( 0.0, 1.0), (-1.0, -0.0)), + (( 0.0, -1.0), ( 0.0, -1.0), ( 1.0, -0.0)), + (( 0.0, -1.0), (-0.0, -1.0), ( 1.0, 0.0)), + (( 0.0, -1.0), (-0.0, 1.0), (-1.0, 0.0)), + (( 0.0, 1.0), ( 0.0, -1.0), (-1.0, 0.0)), + (( 0.0, -1.0), ( 0.0, -1.0), ( 1.0, -0.0)), + ((-0.0, -1.0), ( 0.0, -1.0), ( 1.0, -0.0)), + ((-0.0, 1.0), ( 0.0, -1.0), (-1.0, -0.0)) + ) + for cases in data: + n = cases[0] + d = cases[1] + ex = cases[2] + result = t(complex(n[0], n[1])) / t(complex(d[0], d[1])) + # check real and imag parts separately to avoid comparison + # in array context, which does not account for signed zeros + assert_equal(result.real, ex[0]) + assert_equal(result.imag, ex[1]) + + def test_branches(self): + with np.errstate(all="ignore"): + for t in [np.complex64, np.complex128]: + # tupled (numerator, denominator, expected) + # for testing as expected == numerator/denominator + data = [] + + # trigger branch: real(fabs(denom)) > imag(fabs(denom)) + # followed by else condition as neither are == 0 + data.append((( 2.0, 1.0), ( 2.0, 1.0), (1.0, 0.0))) + + # trigger branch: real(fabs(denom)) > imag(fabs(denom)) + # followed by if condition as both are == 0 + # is performed in test_zero_division(), so this is skipped + + # trigger else if branch: real(fabs(denom)) < imag(fabs(denom)) + data.append(((1.0, 2.0), (1.0, 2.0), (1.0, 0.0))) + + for cases in data: + n = cases[0] + d = cases[1] + ex = cases[2] + result = t(complex(n[0], n[1])) / t(complex(d[0], d[1])) + # check real and imag parts separately to avoid comparison + # in array context, which does not account for signed zeros + assert_equal(result.real, ex[0]) + assert_equal(result.imag, ex[1]) + + +class TestConversion: + def test_int_from_long(self): + l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18] + li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18] + for T in [None, np.float64, np.int64]: + a = np.array(l, dtype=T) + assert_equal([int(_m) for _m in a], li) + + a = np.array(l[:3], dtype=np.uint64) + assert_equal([int(_m) for _m in a], li[:3]) + + def test_iinfo_long_values(self): + for code in 'bBhH': + with pytest.raises(OverflowError): + np.array(np.iinfo(code).max + 1, dtype=code) + + for code in np.typecodes['AllInteger']: + res = np.array(np.iinfo(code).max, dtype=code) + tgt = np.iinfo(code).max + assert_(res == tgt) + + for code in np.typecodes['AllInteger']: + res = np.dtype(code).type(np.iinfo(code).max) + tgt = np.iinfo(code).max + assert_(res == tgt) + + def test_int_raise_behaviour(self): + def overflow_error_func(dtype): + dtype(np.iinfo(dtype).max + 1) + + for code in [np.int_, np.uint, np.longlong, np.ulonglong]: + assert_raises(OverflowError, overflow_error_func, code) + + def test_int_from_infinite_longdouble(self): + # gh-627 + x = np.longdouble(np.inf) + assert_raises(OverflowError, int, x) + with suppress_warnings() as sup: + sup.record(ComplexWarning) + x = np.clongdouble(np.inf) + assert_raises(OverflowError, int, x) + assert_equal(len(sup.log), 1) + + @pytest.mark.skipif(not IS_PYPY, reason="Test is PyPy only (gh-9972)") + def test_int_from_infinite_longdouble___int__(self): + x = np.longdouble(np.inf) + assert_raises(OverflowError, x.__int__) + with suppress_warnings() as sup: + sup.record(ComplexWarning) + x = np.clongdouble(np.inf) + assert_raises(OverflowError, x.__int__) + assert_equal(len(sup.log), 1) + + @pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), + reason="long double is same as double") + @pytest.mark.skipif(platform.machine().startswith("ppc"), + reason="IBM double double") + def test_int_from_huge_longdouble(self): + # Produce a longdouble that would overflow a double, + # use exponent that avoids bug in Darwin pow function. + exp = np.finfo(np.double).maxexp - 1 + huge_ld = 2 * 1234 * np.longdouble(2) ** exp + huge_i = 2 * 1234 * 2 ** exp + assert_(huge_ld != np.inf) + assert_equal(int(huge_ld), huge_i) + + def test_int_from_longdouble(self): + x = np.longdouble(1.5) + assert_equal(int(x), 1) + x = np.longdouble(-10.5) + assert_equal(int(x), -10) + + def test_numpy_scalar_relational_operators(self): + # All integer + for dt1 in np.typecodes['AllInteger']: + assert_(1 > np.array(0, dtype=dt1)[()], f"type {dt1} failed") + assert_(not 1 < np.array(0, dtype=dt1)[()], f"type {dt1} failed") + + for dt2 in np.typecodes['AllInteger']: + assert_(np.array(1, dtype=dt1)[()] > np.array(0, dtype=dt2)[()], + f"type {dt1} and {dt2} failed") + assert_(not np.array(1, dtype=dt1)[()] < np.array(0, dtype=dt2)[()], + f"type {dt1} and {dt2} failed") + + # Unsigned integers + for dt1 in 'BHILQP': + assert_(-1 < np.array(1, dtype=dt1)[()], f"type {dt1} failed") + assert_(not -1 > np.array(1, dtype=dt1)[()], f"type {dt1} failed") + assert_(-1 != np.array(1, dtype=dt1)[()], f"type {dt1} failed") + + # unsigned vs signed + for dt2 in 'bhilqp': + assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()], + f"type {dt1} and {dt2} failed") + assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()], + f"type {dt1} and {dt2} failed") + assert_(np.array(1, dtype=dt1)[()] != np.array(-1, dtype=dt2)[()], + f"type {dt1} and {dt2} failed") + + # Signed integers and floats + for dt1 in 'bhlqp' + np.typecodes['Float']: + assert_(1 > np.array(-1, dtype=dt1)[()], f"type {dt1} failed") + assert_(not 1 < np.array(-1, dtype=dt1)[()], f"type {dt1} failed") + assert_(-1 == np.array(-1, dtype=dt1)[()], f"type {dt1} failed") + + for dt2 in 'bhlqp' + np.typecodes['Float']: + assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()], + f"type {dt1} and {dt2} failed") + assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()], + f"type {dt1} and {dt2} failed") + assert_(np.array(-1, dtype=dt1)[()] == np.array(-1, dtype=dt2)[()], + f"type {dt1} and {dt2} failed") + + def test_scalar_comparison_to_none(self): + # Scalars should just return False and not give a warnings. + # The comparisons are flagged by pep8, ignore that. + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', FutureWarning) + assert_(not np.float32(1) == None) # noqa: E711 + assert_(not np.str_('test') == None) # noqa: E711 + # This is dubious (see below): + assert_(not np.datetime64('NaT') == None) # noqa: E711 + + assert_(np.float32(1) != None) # noqa: E711 + assert_(np.str_('test') != None) # noqa: E711 + # This is dubious (see below): + assert_(np.datetime64('NaT') != None) # noqa: E711 + assert_(len(w) == 0) + + # For documentation purposes, this is why the datetime is dubious. + # At the time of deprecation this was no behaviour change, but + # it has to be considered when the deprecations are done. + assert_(np.equal(np.datetime64('NaT'), None)) + + +#class TestRepr: +# def test_repr(self): +# for t in types: +# val = t(1197346475.0137341) +# val_repr = repr(val) +# val2 = eval(val_repr) +# assert_equal( val, val2 ) + + +class TestRepr: + def _test_type_repr(self, t): + finfo = np.finfo(t) + last_fraction_bit_idx = finfo.nexp + finfo.nmant + last_exponent_bit_idx = finfo.nexp + storage_bytes = np.dtype(t).itemsize * 8 + # could add some more types to the list below + for which in ['small denorm', 'small norm']: + # Values from https://en.wikipedia.org/wiki/IEEE_754 + constr = np.array([0x00] * storage_bytes, dtype=np.uint8) + if which == 'small denorm': + byte = last_fraction_bit_idx // 8 + bytebit = 7 - (last_fraction_bit_idx % 8) + constr[byte] = 1 << bytebit + elif which == 'small norm': + byte = last_exponent_bit_idx // 8 + bytebit = 7 - (last_exponent_bit_idx % 8) + constr[byte] = 1 << bytebit + else: + raise ValueError('hmm') + val = constr.view(t)[0] + val_repr = repr(val) + val2 = t(eval(val_repr)) + if not (val2 == 0 and val < 1e-100): + assert_equal(val, val2) + + def test_float_repr(self): + # long double test cannot work, because eval goes through a python + # float + for t in [np.float32, np.float64]: + self._test_type_repr(t) + + +if not IS_PYPY: + # sys.getsizeof() is not valid on PyPy + class TestSizeOf: + + def test_equal_nbytes(self): + for type in types: + x = type(0) + assert_(sys.getsizeof(x) > x.nbytes) + + def test_error(self): + d = np.float32() + assert_raises(TypeError, d.__sizeof__, "a") + + +class TestMultiply: + def test_seq_repeat(self): + # Test that basic sequences get repeated when multiplied with + # numpy integers. And errors are raised when multiplied with others. + # Some of this behaviour may be controversial and could be open for + # change. + accepted_types = set(np.typecodes["AllInteger"]) + deprecated_types = {'?'} + forbidden_types = ( + set(np.typecodes["All"]) - accepted_types - deprecated_types) + forbidden_types -= {'V'} # can't default-construct void scalars + + for seq_type in (list, tuple): + seq = seq_type([1, 2, 3]) + for numpy_type in accepted_types: + i = np.dtype(numpy_type).type(2) + assert_equal(seq * i, seq * int(i)) + assert_equal(i * seq, int(i) * seq) + + for numpy_type in deprecated_types: + i = np.dtype(numpy_type).type() + with assert_raises(TypeError): + operator.mul(seq, i) + + for numpy_type in forbidden_types: + i = np.dtype(numpy_type).type() + assert_raises(TypeError, operator.mul, seq, i) + assert_raises(TypeError, operator.mul, i, seq) + + def test_no_seq_repeat_basic_array_like(self): + # Test that an array-like which does not know how to be multiplied + # does not attempt sequence repeat (raise TypeError). + # See also gh-7428. + class ArrayLike: + def __init__(self, arr): + self.arr = arr + + def __array__(self, dtype=None, copy=None): + return self.arr + + # Test for simple ArrayLike above and memoryviews (original report) + for arr_like in (ArrayLike(np.ones(3)), memoryview(np.ones(3))): + assert_array_equal(arr_like * np.float32(3.), np.full(3, 3.)) + assert_array_equal(np.float32(3.) * arr_like, np.full(3, 3.)) + assert_array_equal(arr_like * np.int_(3), np.full(3, 3)) + assert_array_equal(np.int_(3) * arr_like, np.full(3, 3)) + + +class TestNegative: + def test_exceptions(self): + a = np.ones((), dtype=np.bool)[()] + assert_raises(TypeError, operator.neg, a) + + def test_result(self): + types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + for dt in types: + a = np.ones((), dtype=dt)[()] + if dt in np.typecodes['UnsignedInteger']: + st = np.dtype(dt).type + max = st(np.iinfo(dt).max) + assert_equal(operator.neg(a), max) + else: + assert_equal(operator.neg(a) + a, 0) + +class TestSubtract: + def test_exceptions(self): + a = np.ones((), dtype=np.bool)[()] + assert_raises(TypeError, operator.sub, a, a) + + def test_result(self): + types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + for dt in types: + a = np.ones((), dtype=dt)[()] + assert_equal(operator.sub(a, a), 0) + + +class TestAbs: + def _test_abs_func(self, absfunc, test_dtype): + x = test_dtype(-1.5) + assert_equal(absfunc(x), 1.5) + x = test_dtype(0.0) + res = absfunc(x) + # assert_equal() checks zero signedness + assert_equal(res, 0.0) + x = test_dtype(-0.0) + res = absfunc(x) + assert_equal(res, 0.0) + + x = test_dtype(np.finfo(test_dtype).max) + assert_equal(absfunc(x), x.real) + + with suppress_warnings() as sup: + sup.filter(UserWarning) + x = test_dtype(np.finfo(test_dtype).tiny) + assert_equal(absfunc(x), x.real) + + x = test_dtype(np.finfo(test_dtype).min) + assert_equal(absfunc(x), -x.real) + + @pytest.mark.parametrize("dtype", floating_types + complex_floating_types) + def test_builtin_abs(self, dtype): + if ( + sys.platform == "cygwin" and dtype == np.clongdouble and + ( + _pep440.parse(platform.release().split("-")[0]) + < _pep440.Version("3.3.0") + ) + ): + pytest.xfail( + reason="absl is computed in double precision on cygwin < 3.3" + ) + self._test_abs_func(abs, dtype) + + @pytest.mark.parametrize("dtype", floating_types + complex_floating_types) + def test_numpy_abs(self, dtype): + if ( + sys.platform == "cygwin" and dtype == np.clongdouble and + ( + _pep440.parse(platform.release().split("-")[0]) + < _pep440.Version("3.3.0") + ) + ): + pytest.xfail( + reason="absl is computed in double precision on cygwin < 3.3" + ) + self._test_abs_func(np.abs, dtype) + +class TestBitShifts: + + @pytest.mark.parametrize('type_code', np.typecodes['AllInteger']) + @pytest.mark.parametrize('op', + [operator.rshift, operator.lshift], ids=['>>', '<<']) + def test_shift_all_bits(self, type_code, op): + """Shifts where the shift amount is the width of the type or wider """ + # gh-2449 + dt = np.dtype(type_code) + nbits = dt.itemsize * 8 + for val in [5, -5]: + for shift in [nbits, nbits + 4]: + val_scl = np.array(val).astype(dt)[()] + shift_scl = dt.type(shift) + res_scl = op(val_scl, shift_scl) + if val_scl < 0 and op is operator.rshift: + # sign bit is preserved + assert_equal(res_scl, -1) + else: + assert_equal(res_scl, 0) + + # Result on scalars should be the same as on arrays + val_arr = np.array([val_scl] * 32, dtype=dt) + shift_arr = np.array([shift] * 32, dtype=dt) + res_arr = op(val_arr, shift_arr) + assert_equal(res_arr, res_scl) + + +class TestHash: + @pytest.mark.parametrize("type_code", np.typecodes['AllInteger']) + def test_integer_hashes(self, type_code): + scalar = np.dtype(type_code).type + for i in range(128): + assert hash(i) == hash(scalar(i)) + + @pytest.mark.parametrize("type_code", np.typecodes['AllFloat']) + def test_float_and_complex_hashes(self, type_code): + scalar = np.dtype(type_code).type + for val in [np.pi, np.inf, 3, 6.]: + numpy_val = scalar(val) + # Cast back to Python, in case the NumPy scalar has less precision + if numpy_val.dtype.kind == 'c': + val = complex(numpy_val) + else: + val = float(numpy_val) + assert val == numpy_val + assert hash(val) == hash(numpy_val) + + if hash(float(np.nan)) != hash(float(np.nan)): + # If Python distinguishes different NaNs we do so too (gh-18833) + assert hash(scalar(np.nan)) != hash(scalar(np.nan)) + + @pytest.mark.parametrize("type_code", np.typecodes['Complex']) + def test_complex_hashes(self, type_code): + # Test some complex valued hashes specifically: + scalar = np.dtype(type_code).type + for val in [np.pi + 1j, np.inf - 3j, 3j, 6. + 1j]: + numpy_val = scalar(val) + assert hash(complex(numpy_val)) == hash(numpy_val) + + +@contextlib.contextmanager +def recursionlimit(n): + o = sys.getrecursionlimit() + try: + sys.setrecursionlimit(n) + yield + finally: + sys.setrecursionlimit(o) + + +@given(sampled_from(objecty_things), + sampled_from(binary_operators_for_scalar_ints), + sampled_from(types + [rational])) +def test_operator_object_left(o, op, type_): + try: + with recursionlimit(200): + op(o, type_(1)) + except TypeError: + pass + + +@given(sampled_from(objecty_things), + sampled_from(binary_operators_for_scalar_ints), + sampled_from(types + [rational])) +def test_operator_object_right(o, op, type_): + try: + with recursionlimit(200): + op(type_(1), o) + except TypeError: + pass + + +@given(sampled_from(binary_operators_for_scalars), + sampled_from(types), + sampled_from(types)) +def test_operator_scalars(op, type1, type2): + try: + op(type1(1), type2(1)) + except TypeError: + pass + + +@pytest.mark.parametrize("op", binary_operators_for_scalars) +@pytest.mark.parametrize("sctype", [np.longdouble, np.clongdouble]) +def test_longdouble_operators_with_obj(sctype, op): + # This is/used to be tricky, because NumPy generally falls back to + # using the ufunc via `np.asarray()`, this effectively might do: + # longdouble + None + # -> asarray(longdouble) + np.array(None, dtype=object) + # -> asarray(longdouble).astype(object) + np.array(None, dtype=object) + # And after getting the scalars in the inner loop: + # -> longdouble + None + # + # That would recurse infinitely. Other scalars return the python object + # on cast, so this type of things works OK. + # + # As of NumPy 2.1, this has been consolidated into the np.generic binops + # and now checks `.item()`. That also allows the below path to work now. + try: + op(sctype(3), None) + except TypeError: + pass + try: + op(None, sctype(3)) + except TypeError: + pass + + +@pytest.mark.parametrize("op", [operator.add, operator.pow, operator.sub]) +@pytest.mark.parametrize("sctype", [np.longdouble, np.clongdouble]) +def test_longdouble_with_arrlike(sctype, op): + # As of NumPy 2.1, longdouble behaves like other types and can coerce + # e.g. lists. (Not necessarily better, but consistent.) + assert_array_equal(op(sctype(3), [1, 2]), op(3, np.array([1, 2]))) + assert_array_equal(op([1, 2], sctype(3)), op(np.array([1, 2]), 3)) + + +@pytest.mark.parametrize("op", binary_operators_for_scalars) +@pytest.mark.parametrize("sctype", [np.longdouble, np.clongdouble]) +@np.errstate(all="ignore") +def test_longdouble_operators_with_large_int(sctype, op): + # (See `test_longdouble_operators_with_obj` for why longdouble is special) + # NEP 50 means that the result is clearly a (c)longdouble here: + if sctype == np.clongdouble and op in [operator.mod, operator.floordiv]: + # The above operators are not support for complex though... + with pytest.raises(TypeError): + op(sctype(3), 2**64) + with pytest.raises(TypeError): + op(sctype(3), 2**64) + else: + assert op(sctype(3), -2**64) == op(sctype(3), sctype(-2**64)) + assert op(2**64, sctype(3)) == op(sctype(2**64), sctype(3)) + + +@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) +@pytest.mark.parametrize("operation", [ + lambda min, max: max + max, + lambda min, max: min - max, + lambda min, max: max * max], ids=["+", "-", "*"]) +def test_scalar_integer_operation_overflow(dtype, operation): + st = np.dtype(dtype).type + min = st(np.iinfo(dtype).min) + max = st(np.iinfo(dtype).max) + + with pytest.warns(RuntimeWarning, match="overflow encountered"): + operation(min, max) + + +@pytest.mark.parametrize("dtype", np.typecodes["Integer"]) +@pytest.mark.parametrize("operation", [ + lambda min, neg_1: -min, + lambda min, neg_1: abs(min), + lambda min, neg_1: min * neg_1, + pytest.param(lambda min, neg_1: min // neg_1, + marks=pytest.mark.skip(reason="broken on some platforms"))], + ids=["neg", "abs", "*", "//"]) +def test_scalar_signed_integer_overflow(dtype, operation): + # The minimum signed integer can "overflow" for some additional operations + st = np.dtype(dtype).type + min = st(np.iinfo(dtype).min) + neg_1 = st(-1) + + with pytest.warns(RuntimeWarning, match="overflow encountered"): + operation(min, neg_1) + + +@pytest.mark.parametrize("dtype", np.typecodes["UnsignedInteger"]) +def test_scalar_unsigned_integer_overflow(dtype): + val = np.dtype(dtype).type(8) + with pytest.warns(RuntimeWarning, match="overflow encountered"): + -val + + zero = np.dtype(dtype).type(0) + -zero # does not warn + +@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) +@pytest.mark.parametrize("operation", [ + lambda val, zero: val // zero, + lambda val, zero: val % zero, ], ids=["//", "%"]) +def test_scalar_integer_operation_divbyzero(dtype, operation): + st = np.dtype(dtype).type + val = st(100) + zero = st(0) + + with pytest.warns(RuntimeWarning, match="divide by zero"): + operation(val, zero) + + +ops_with_names = [ + ("__lt__", "__gt__", operator.lt, True), + ("__le__", "__ge__", operator.le, True), + ("__eq__", "__eq__", operator.eq, True), + # Note __op__ and __rop__ may be identical here: + ("__ne__", "__ne__", operator.ne, True), + ("__gt__", "__lt__", operator.gt, True), + ("__ge__", "__le__", operator.ge, True), + ("__floordiv__", "__rfloordiv__", operator.floordiv, False), + ("__truediv__", "__rtruediv__", operator.truediv, False), + ("__add__", "__radd__", operator.add, False), + ("__mod__", "__rmod__", operator.mod, False), + ("__mul__", "__rmul__", operator.mul, False), + ("__pow__", "__rpow__", operator.pow, False), + ("__sub__", "__rsub__", operator.sub, False), +] + + +@pytest.mark.parametrize(["__op__", "__rop__", "op", "cmp"], ops_with_names) +@pytest.mark.parametrize("sctype", [np.float32, np.float64, np.longdouble]) +def test_subclass_deferral(sctype, __op__, __rop__, op, cmp): + """ + This test covers scalar subclass deferral. Note that this is exceedingly + complicated, especially since it tends to fall back to the array paths and + these additionally add the "array priority" mechanism. + + The behaviour was modified subtly in 1.22 (to make it closer to how Python + scalars work). Due to its complexity and the fact that subclassing NumPy + scalars is probably a bad idea to begin with. There is probably room + for adjustments here. + """ + class myf_simple1(sctype): + pass + + class myf_simple2(sctype): + pass + + def op_func(self, other): + return __op__ + + def rop_func(self, other): + return __rop__ + + myf_op = type("myf_op", (sctype,), {__op__: op_func, __rop__: rop_func}) + + # inheritance has to override, or this is correctly lost: + res = op(myf_simple1(1), myf_simple2(2)) + assert type(res) == sctype or type(res) == np.bool + assert op(myf_simple1(1), myf_simple2(2)) == op(1, 2) # inherited + + # Two independent subclasses do not really define an order. This could + # be attempted, but we do not since Python's `int` does neither: + assert op(myf_op(1), myf_simple1(2)) == __op__ + assert op(myf_simple1(1), myf_op(2)) == op(1, 2) # inherited + + +def test_longdouble_complex(): + # Simple test to check longdouble and complex combinations, since these + # need to go through promotion, which longdouble needs to be careful about. + x = np.longdouble(1) + assert x + 1j == 1 + 1j + assert 1j + x == 1 + 1j + + +@pytest.mark.parametrize(["__op__", "__rop__", "op", "cmp"], ops_with_names) +@pytest.mark.parametrize("subtype", [float, int, complex, np.float16]) +def test_pyscalar_subclasses(subtype, __op__, __rop__, op, cmp): + # This tests that python scalar subclasses behave like a float64 (if they + # don't override it). + # In an earlier version of NEP 50, they behaved like the Python buildins. + def op_func(self, other): + return __op__ + + def rop_func(self, other): + return __rop__ + + # Check that deferring is indicated using `__array_ufunc__`: + myt = type("myt", (subtype,), + {__op__: op_func, __rop__: rop_func, "__array_ufunc__": None}) + + # Just like normally, we should never presume we can modify the float. + assert op(myt(1), np.float64(2)) == __op__ + assert op(np.float64(1), myt(2)) == __rop__ + + if op in {operator.mod, operator.floordiv} and subtype == complex: + return # module is not support for complex. Do not test. + + if __rop__ == __op__: + return + + # When no deferring is indicated, subclasses are handled normally. + myt = type("myt", (subtype,), {__rop__: rop_func}) + behaves_like = lambda x: np.array(subtype(x))[()] + + # Check for float32, as a float subclass float64 may behave differently + res = op(myt(1), np.float16(2)) + expected = op(behaves_like(1), np.float16(2)) + assert res == expected + assert type(res) == type(expected) + res = op(np.float32(2), myt(1)) + expected = op(np.float32(2), behaves_like(1)) + assert res == expected + assert type(res) == type(expected) + + # Same check for longdouble (compare via dtype to accept float64 when + # longdouble has the identical size), which is currently not perfectly + # consistent. + res = op(myt(1), np.longdouble(2)) + expected = op(behaves_like(1), np.longdouble(2)) + assert res == expected + assert np.dtype(type(res)) == np.dtype(type(expected)) + res = op(np.float32(2), myt(1)) + expected = op(np.float32(2), behaves_like(1)) + assert res == expected + assert np.dtype(type(res)) == np.dtype(type(expected)) + + +def test_truediv_int(): + # This should work, as the result is float: + assert np.uint8(3) / 123454 == np.float64(3) / 123454 + + +@pytest.mark.slow +@pytest.mark.parametrize("op", + # TODO: Power is a bit special, but here mostly bools seem to behave oddly + [op for op in binary_operators_for_scalars if op is not operator.pow]) +@pytest.mark.parametrize("sctype", types) +@pytest.mark.parametrize("other_type", [float, int, complex]) +@pytest.mark.parametrize("rop", [True, False]) +def test_scalar_matches_array_op_with_pyscalar(op, sctype, other_type, rop): + # Check that the ufunc path matches by coercing to an array explicitly + val1 = sctype(2) + val2 = other_type(2) + + if rop: + _op = op + op = lambda x, y: _op(y, x) + + try: + res = op(val1, val2) + except TypeError: + try: + expected = op(np.asarray(val1), val2) + raise AssertionError("ufunc didn't raise.") + except TypeError: + return + else: + expected = op(np.asarray(val1), val2) + + # Note that we only check dtype equivalency, as ufuncs may pick the lower + # dtype if they are equivalent. + assert res == expected + if isinstance(val1, float) and other_type is complex and rop: + # Python complex accepts float subclasses, so we don't get a chance + # and the result may be a Python complex (thus, the `np.array()``) + assert np.array(res).dtype == expected.dtype + else: + assert res.dtype == expected.dtype diff --git a/python/numpy/_core/tests/test_scalarprint.py b/python/numpy/_core/tests/test_scalarprint.py new file mode 100644 index 000000000..38ed7780f --- /dev/null +++ b/python/numpy/_core/tests/test_scalarprint.py @@ -0,0 +1,403 @@ +""" Test printing of scalar types. + +""" +import platform + +import pytest + +import numpy as np +from numpy.testing import IS_MUSL, assert_, assert_equal, assert_raises + + +class TestRealScalars: + def test_str(self): + svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan] + styps = [np.float16, np.float32, np.float64, np.longdouble] + wanted = [ + ['0.0', '0.0', '0.0', '0.0' ], # noqa: E202 + ['-0.0', '-0.0', '-0.0', '-0.0'], + ['1.0', '1.0', '1.0', '1.0' ], # noqa: E202 + ['-1.0', '-1.0', '-1.0', '-1.0'], + ['inf', 'inf', 'inf', 'inf' ], # noqa: E202 + ['-inf', '-inf', '-inf', '-inf'], + ['nan', 'nan', 'nan', 'nan' ]] # noqa: E202 + + for wants, val in zip(wanted, svals): + for want, styp in zip(wants, styps): + msg = f'for str({np.dtype(styp).name}({val!r}))' + assert_equal(str(styp(val)), want, err_msg=msg) + + def test_scalar_cutoffs(self): + # test that both the str and repr of np.float64 behaves + # like python floats in python3. + def check(v): + assert_equal(str(np.float64(v)), str(v)) + assert_equal(str(np.float64(v)), repr(v)) + assert_equal(repr(np.float64(v)), f"np.float64({v!r})") + assert_equal(repr(np.float64(v)), f"np.float64({v})") + + # check we use the same number of significant digits + check(1.12345678901234567890) + check(0.0112345678901234567890) + + # check switch from scientific output to positional and back + check(1e-5) + check(1e-4) + check(1e15) + check(1e16) + + test_cases_gh_28679 = [ + (np.half, -0.000099, "-9.9e-05"), + (np.half, 0.0001, "0.0001"), + (np.half, 999, "999.0"), + (np.half, -1000, "-1e+03"), + (np.single, 0.000099, "9.9e-05"), + (np.single, -0.000100001, "-0.000100001"), + (np.single, 999999, "999999.0"), + (np.single, -1000000, "-1e+06") + ] + + @pytest.mark.parametrize("dtype, input_val, expected_str", test_cases_gh_28679) + def test_gh_28679(self, dtype, input_val, expected_str): + # test cutoff to exponent notation for half and single + assert_equal(str(dtype(input_val)), expected_str) + + test_cases_legacy_2_2 = [ + (np.half(65504), "65500.0"), + (np.single(1.e15), "1000000000000000.0"), + (np.single(1.e16), "1e+16"), + ] + + @pytest.mark.parametrize("input_val, expected_str", test_cases_legacy_2_2) + def test_legacy_2_2_mode(self, input_val, expected_str): + # test legacy cutoff to exponent notation for half and single + with np.printoptions(legacy='2.2'): + assert_equal(str(input_val), expected_str) + + def test_dragon4(self): + # these tests are adapted from Ryan Juckett's dragon4 implementation, + # see dragon4.c for details. + + fpos32 = lambda x, **k: np.format_float_positional(np.float32(x), **k) + fsci32 = lambda x, **k: np.format_float_scientific(np.float32(x), **k) + fpos64 = lambda x, **k: np.format_float_positional(np.float64(x), **k) + fsci64 = lambda x, **k: np.format_float_scientific(np.float64(x), **k) + + preckwd = lambda prec: {'unique': False, 'precision': prec} + + assert_equal(fpos32('1.0'), "1.") + assert_equal(fsci32('1.0'), "1.e+00") + assert_equal(fpos32('10.234'), "10.234") + assert_equal(fpos32('-10.234'), "-10.234") + assert_equal(fsci32('10.234'), "1.0234e+01") + assert_equal(fsci32('-10.234'), "-1.0234e+01") + assert_equal(fpos32('1000.0'), "1000.") + assert_equal(fpos32('1.0', precision=0), "1.") + assert_equal(fsci32('1.0', precision=0), "1.e+00") + assert_equal(fpos32('10.234', precision=0), "10.") + assert_equal(fpos32('-10.234', precision=0), "-10.") + assert_equal(fsci32('10.234', precision=0), "1.e+01") + assert_equal(fsci32('-10.234', precision=0), "-1.e+01") + assert_equal(fpos32('10.234', precision=2), "10.23") + assert_equal(fsci32('-10.234', precision=2), "-1.02e+01") + assert_equal(fsci64('9.9999999999999995e-08', **preckwd(16)), + '9.9999999999999995e-08') + assert_equal(fsci64('9.8813129168249309e-324', **preckwd(16)), + '9.8813129168249309e-324') + assert_equal(fsci64('9.9999999999999694e-311', **preckwd(16)), + '9.9999999999999694e-311') + + # test rounding + # 3.1415927410 is closest float32 to np.pi + assert_equal(fpos32('3.14159265358979323846', **preckwd(10)), + "3.1415927410") + assert_equal(fsci32('3.14159265358979323846', **preckwd(10)), + "3.1415927410e+00") + assert_equal(fpos64('3.14159265358979323846', **preckwd(10)), + "3.1415926536") + assert_equal(fsci64('3.14159265358979323846', **preckwd(10)), + "3.1415926536e+00") + # 299792448 is closest float32 to 299792458 + assert_equal(fpos32('299792458.0', **preckwd(5)), "299792448.00000") + assert_equal(fsci32('299792458.0', **preckwd(5)), "2.99792e+08") + assert_equal(fpos64('299792458.0', **preckwd(5)), "299792458.00000") + assert_equal(fsci64('299792458.0', **preckwd(5)), "2.99792e+08") + + assert_equal(fpos32('3.14159265358979323846', **preckwd(25)), + "3.1415927410125732421875000") + assert_equal(fpos64('3.14159265358979323846', **preckwd(50)), + "3.14159265358979311599796346854418516159057617187500") + assert_equal(fpos64('3.14159265358979323846'), "3.141592653589793") + + # smallest numbers + assert_equal(fpos32(0.5**(126 + 23), unique=False, precision=149), + "0.00000000000000000000000000000000000000000000140129846432" + "4817070923729583289916131280261941876515771757068283889791" + "08268586060148663818836212158203125") + + assert_equal(fpos64(5e-324, unique=False, precision=1074), + "0.00000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000000000000000000000000000" + "0000000000000000000000000000000000049406564584124654417656" + "8792868221372365059802614324764425585682500675507270208751" + "8652998363616359923797965646954457177309266567103559397963" + "9877479601078187812630071319031140452784581716784898210368" + "8718636056998730723050006387409153564984387312473397273169" + "6151400317153853980741262385655911710266585566867681870395" + "6031062493194527159149245532930545654440112748012970999954" + "1931989409080416563324524757147869014726780159355238611550" + "1348035264934720193790268107107491703332226844753335720832" + "4319360923828934583680601060115061698097530783422773183292" + "4790498252473077637592724787465608477820373446969953364701" + "7972677717585125660551199131504891101451037862738167250955" + "8373897335989936648099411642057026370902792427675445652290" + "87538682506419718265533447265625") + + # largest numbers + f32x = np.finfo(np.float32).max + assert_equal(fpos32(f32x, **preckwd(0)), + "340282346638528859811704183484516925440.") + assert_equal(fpos64(np.finfo(np.float64).max, **preckwd(0)), + "1797693134862315708145274237317043567980705675258449965989" + "1747680315726078002853876058955863276687817154045895351438" + "2464234321326889464182768467546703537516986049910576551282" + "0762454900903893289440758685084551339423045832369032229481" + "6580855933212334827479782620414472316873817718091929988125" + "0404026184124858368.") + # Warning: In unique mode only the integer digits necessary for + # uniqueness are computed, the rest are 0. + assert_equal(fpos32(f32x), + "340282350000000000000000000000000000000.") + + # Further tests of zero-padding vs rounding in different combinations + # of unique, fractional, precision, min_digits + # precision can only reduce digits, not add them. + # min_digits can only extend digits, not reduce them. + assert_equal(fpos32(f32x, unique=True, fractional=True, precision=0), + "340282350000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=True, precision=4), + "340282350000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=True, min_digits=0), + "340282346638528859811704183484516925440.") + assert_equal(fpos32(f32x, unique=True, fractional=True, min_digits=4), + "340282346638528859811704183484516925440.0000") + assert_equal(fpos32(f32x, unique=True, fractional=True, + min_digits=4, precision=4), + "340282346638528859811704183484516925440.0000") + assert_raises(ValueError, fpos32, f32x, unique=True, fractional=False, + precision=0) + assert_equal(fpos32(f32x, unique=True, fractional=False, precision=4), + "340300000000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=False, precision=20), + "340282350000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=False, min_digits=4), + "340282350000000000000000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=False, + min_digits=20), + "340282346638528859810000000000000000000.") + assert_equal(fpos32(f32x, unique=True, fractional=False, + min_digits=15), + "340282346638529000000000000000000000000.") + assert_equal(fpos32(f32x, unique=False, fractional=False, precision=4), + "340300000000000000000000000000000000000.") + # test that unique rounding is preserved when precision is supplied + # but no extra digits need to be printed (gh-18609) + a = np.float64.fromhex('-1p-97') + assert_equal(fsci64(a, unique=True), '-6.310887241768095e-30') + assert_equal(fsci64(a, unique=False, precision=15), + '-6.310887241768094e-30') + assert_equal(fsci64(a, unique=True, precision=15), + '-6.310887241768095e-30') + assert_equal(fsci64(a, unique=True, min_digits=15), + '-6.310887241768095e-30') + assert_equal(fsci64(a, unique=True, precision=15, min_digits=15), + '-6.310887241768095e-30') + # adds/remove digits in unique mode with unbiased rnding + assert_equal(fsci64(a, unique=True, precision=14), + '-6.31088724176809e-30') + assert_equal(fsci64(a, unique=True, min_digits=16), + '-6.3108872417680944e-30') + assert_equal(fsci64(a, unique=True, precision=16), + '-6.310887241768095e-30') + assert_equal(fsci64(a, unique=True, min_digits=14), + '-6.310887241768095e-30') + # test min_digits in unique mode with different rounding cases + assert_equal(fsci64('1e120', min_digits=3), '1.000e+120') + assert_equal(fsci64('1e100', min_digits=3), '1.000e+100') + + # test trailing zeros + assert_equal(fpos32('1.0', unique=False, precision=3), "1.000") + assert_equal(fpos64('1.0', unique=False, precision=3), "1.000") + assert_equal(fsci32('1.0', unique=False, precision=3), "1.000e+00") + assert_equal(fsci64('1.0', unique=False, precision=3), "1.000e+00") + assert_equal(fpos32('1.5', unique=False, precision=3), "1.500") + assert_equal(fpos64('1.5', unique=False, precision=3), "1.500") + assert_equal(fsci32('1.5', unique=False, precision=3), "1.500e+00") + assert_equal(fsci64('1.5', unique=False, precision=3), "1.500e+00") + # gh-10713 + assert_equal(fpos64('324', unique=False, precision=5, + fractional=False), "324.00") + + available_float_dtypes = [np.float16, np.float32, np.float64, np.float128]\ + if hasattr(np, 'float128') else [np.float16, np.float32, np.float64] + + @pytest.mark.parametrize("tp", available_float_dtypes) + def test_dragon4_positional_interface(self, tp): + # test is flaky for musllinux on np.float128 + if IS_MUSL and tp == np.float128: + pytest.skip("Skipping flaky test of float128 on musllinux") + + fpos = np.format_float_positional + + # test padding + assert_equal(fpos(tp('1.0'), pad_left=4, pad_right=4), " 1. ") + assert_equal(fpos(tp('-1.0'), pad_left=4, pad_right=4), " -1. ") + assert_equal(fpos(tp('-10.2'), + pad_left=4, pad_right=4), " -10.2 ") + + # test fixed (non-unique) mode + assert_equal(fpos(tp('1.0'), unique=False, precision=4), "1.0000") + + @pytest.mark.parametrize("tp", available_float_dtypes) + def test_dragon4_positional_interface_trim(self, tp): + # test is flaky for musllinux on np.float128 + if IS_MUSL and tp == np.float128: + pytest.skip("Skipping flaky test of float128 on musllinux") + + fpos = np.format_float_positional + # test trimming + # trim of 'k' or '.' only affects non-unique mode, since unique + # mode will not output trailing 0s. + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='k'), + "1.0000") + + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='.'), + "1.") + assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='.'), + "1.2" if tp != np.float16 else "1.2002") + + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='0'), + "1.0") + assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='0'), + "1.2" if tp != np.float16 else "1.2002") + assert_equal(fpos(tp('1.'), trim='0'), "1.0") + + assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='-'), + "1") + assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='-'), + "1.2" if tp != np.float16 else "1.2002") + assert_equal(fpos(tp('1.'), trim='-'), "1") + assert_equal(fpos(tp('1.001'), precision=1, trim='-'), "1") + + @pytest.mark.parametrize("tp", available_float_dtypes) + @pytest.mark.parametrize("pad_val", [10**5, np.iinfo("int32").max]) + def test_dragon4_positional_interface_overflow(self, tp, pad_val): + # test is flaky for musllinux on np.float128 + if IS_MUSL and tp == np.float128: + pytest.skip("Skipping flaky test of float128 on musllinux") + + fpos = np.format_float_positional + + # gh-28068 + with pytest.raises(RuntimeError, + match="Float formatting result too large"): + fpos(tp('1.047'), unique=False, precision=pad_val) + + with pytest.raises(RuntimeError, + match="Float formatting result too large"): + fpos(tp('1.047'), precision=2, pad_left=pad_val) + + with pytest.raises(RuntimeError, + match="Float formatting result too large"): + fpos(tp('1.047'), precision=2, pad_right=pad_val) + + @pytest.mark.parametrize("tp", available_float_dtypes) + def test_dragon4_scientific_interface(self, tp): + # test is flaky for musllinux on np.float128 + if IS_MUSL and tp == np.float128: + pytest.skip("Skipping flaky test of float128 on musllinux") + + fsci = np.format_float_scientific + + # test exp_digits + assert_equal(fsci(tp('1.23e1'), exp_digits=5), "1.23e+00001") + + # test fixed (non-unique) mode + assert_equal(fsci(tp('1.0'), unique=False, precision=4), + "1.0000e+00") + + @pytest.mark.skipif(not platform.machine().startswith("ppc64"), + reason="only applies to ppc float128 values") + def test_ppc64_ibm_double_double128(self): + # check that the precision decreases once we get into the subnormal + # range. Unlike float64, this starts around 1e-292 instead of 1e-308, + # which happens when the first double is normal and the second is + # subnormal. + x = np.float128('2.123123123123123123123123123123123e-286') + got = [str(x / np.float128('2e' + str(i))) for i in range(40)] + expected = [ + "1.06156156156156156156156156156157e-286", + "1.06156156156156156156156156156158e-287", + "1.06156156156156156156156156156159e-288", + "1.0615615615615615615615615615616e-289", + "1.06156156156156156156156156156157e-290", + "1.06156156156156156156156156156156e-291", + "1.0615615615615615615615615615616e-292", + "1.0615615615615615615615615615615e-293", + "1.061561561561561561561561561562e-294", + "1.06156156156156156156156156155e-295", + "1.0615615615615615615615615616e-296", + "1.06156156156156156156156156e-297", + "1.06156156156156156156156157e-298", + "1.0615615615615615615615616e-299", + "1.06156156156156156156156e-300", + "1.06156156156156156156155e-301", + "1.0615615615615615615616e-302", + "1.061561561561561561562e-303", + "1.06156156156156156156e-304", + "1.0615615615615615618e-305", + "1.06156156156156156e-306", + "1.06156156156156157e-307", + "1.0615615615615616e-308", + "1.06156156156156e-309", + "1.06156156156157e-310", + "1.0615615615616e-311", + "1.06156156156e-312", + "1.06156156154e-313", + "1.0615615616e-314", + "1.06156156e-315", + "1.06156155e-316", + "1.061562e-317", + "1.06156e-318", + "1.06155e-319", + "1.0617e-320", + "1.06e-321", + "1.04e-322", + "1e-323", + "0.0", + "0.0"] + assert_equal(got, expected) + + # Note: we follow glibc behavior, but it (or gcc) might not be right. + # In particular we can get two values that print the same but are not + # equal: + a = np.float128('2') / np.float128('3') + b = np.float128(str(a)) + assert_equal(str(a), str(b)) + assert_(a != b) + + def float32_roundtrip(self): + # gh-9360 + x = np.float32(1024 - 2**-14) + y = np.float32(1024 - 2**-13) + assert_(repr(x) != repr(y)) + assert_equal(np.float32(repr(x)), x) + assert_equal(np.float32(repr(y)), y) + + def float64_vs_python(self): + # gh-2643, gh-6136, gh-6908 + assert_equal(repr(np.float64(0.1)), repr(0.1)) + assert_(repr(np.float64(0.20000000000000004)) != repr(0.2)) diff --git a/python/numpy/_core/tests/test_shape_base.py b/python/numpy/_core/tests/test_shape_base.py new file mode 100644 index 000000000..8de24278f --- /dev/null +++ b/python/numpy/_core/tests/test_shape_base.py @@ -0,0 +1,891 @@ +import sys + +import pytest + +import numpy as np +from numpy._core import ( + arange, + array, + atleast_1d, + atleast_2d, + atleast_3d, + block, + concatenate, + hstack, + newaxis, + stack, + vstack, +) +from numpy._core.shape_base import ( + _block_concatenate, + _block_dispatcher, + _block_setup, + _block_slicing, +) +from numpy.exceptions import AxisError +from numpy.testing import ( + IS_PYPY, + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) +from numpy.testing._private.utils import requires_memory + + +class TestAtleast1d: + def test_0D_array(self): + a = array(1) + b = array(2) + res = [atleast_1d(a), atleast_1d(b)] + desired = [array([1]), array([2])] + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1, 2]) + b = array([2, 3]) + res = [atleast_1d(a), atleast_1d(b)] + desired = [array([1, 2]), array([2, 3])] + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + res = [atleast_1d(a), atleast_1d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + def test_3D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + a = array([a, a]) + b = array([b, b]) + res = [atleast_1d(a), atleast_1d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + def test_r1array(self): + """ Test to make sure equivalent Travis O's r1array function + """ + assert_(atleast_1d(3).shape == (1,)) + assert_(atleast_1d(3j).shape == (1,)) + assert_(atleast_1d(3.0).shape == (1,)) + assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2)) + + +class TestAtleast2d: + def test_0D_array(self): + a = array(1) + b = array(2) + res = [atleast_2d(a), atleast_2d(b)] + desired = [array([[1]]), array([[2]])] + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1, 2]) + b = array([2, 3]) + res = [atleast_2d(a), atleast_2d(b)] + desired = [array([[1, 2]]), array([[2, 3]])] + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + res = [atleast_2d(a), atleast_2d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + def test_3D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + a = array([a, a]) + b = array([b, b]) + res = [atleast_2d(a), atleast_2d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + def test_r2array(self): + """ Test to make sure equivalent Travis O's r2array function + """ + assert_(atleast_2d(3).shape == (1, 1)) + assert_(atleast_2d([3j, 1]).shape == (1, 2)) + assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2)) + + +class TestAtleast3d: + def test_0D_array(self): + a = array(1) + b = array(2) + res = [atleast_3d(a), atleast_3d(b)] + desired = [array([[[1]]]), array([[[2]]])] + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1, 2]) + b = array([2, 3]) + res = [atleast_3d(a), atleast_3d(b)] + desired = [array([[[1], [2]]]), array([[[2], [3]]])] + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + res = [atleast_3d(a), atleast_3d(b)] + desired = [a[:, :, newaxis], b[:, :, newaxis]] + assert_array_equal(res, desired) + + def test_3D_array(self): + a = array([[1, 2], [1, 2]]) + b = array([[2, 3], [2, 3]]) + a = array([a, a]) + b = array([b, b]) + res = [atleast_3d(a), atleast_3d(b)] + desired = [a, b] + assert_array_equal(res, desired) + + +class TestHstack: + def test_non_iterable(self): + assert_raises(TypeError, hstack, 1) + + def test_empty_input(self): + assert_raises(ValueError, hstack, ()) + + def test_0D_array(self): + a = array(1) + b = array(2) + res = hstack([a, b]) + desired = array([1, 2]) + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1]) + b = array([2]) + res = hstack([a, b]) + desired = array([1, 2]) + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1], [2]]) + b = array([[1], [2]]) + res = hstack([a, b]) + desired = array([[1, 1], [2, 2]]) + assert_array_equal(res, desired) + + def test_generator(self): + with pytest.raises(TypeError, match="arrays to stack must be"): + hstack(np.arange(3) for _ in range(2)) + with pytest.raises(TypeError, match="arrays to stack must be"): + hstack(x for x in np.ones((3, 2))) + + def test_casting_and_dtype(self): + a = np.array([1, 2, 3]) + b = np.array([2.5, 3.5, 4.5]) + res = np.hstack((a, b), casting="unsafe", dtype=np.int64) + expected_res = np.array([1, 2, 3, 2, 3, 4]) + assert_array_equal(res, expected_res) + + def test_casting_and_dtype_type_error(self): + a = np.array([1, 2, 3]) + b = np.array([2.5, 3.5, 4.5]) + with pytest.raises(TypeError): + hstack((a, b), casting="safe", dtype=np.int64) + + +class TestVstack: + def test_non_iterable(self): + assert_raises(TypeError, vstack, 1) + + def test_empty_input(self): + assert_raises(ValueError, vstack, ()) + + def test_0D_array(self): + a = array(1) + b = array(2) + res = vstack([a, b]) + desired = array([[1], [2]]) + assert_array_equal(res, desired) + + def test_1D_array(self): + a = array([1]) + b = array([2]) + res = vstack([a, b]) + desired = array([[1], [2]]) + assert_array_equal(res, desired) + + def test_2D_array(self): + a = array([[1], [2]]) + b = array([[1], [2]]) + res = vstack([a, b]) + desired = array([[1], [2], [1], [2]]) + assert_array_equal(res, desired) + + def test_2D_array2(self): + a = array([1, 2]) + b = array([1, 2]) + res = vstack([a, b]) + desired = array([[1, 2], [1, 2]]) + assert_array_equal(res, desired) + + def test_generator(self): + with pytest.raises(TypeError, match="arrays to stack must be"): + vstack(np.arange(3) for _ in range(2)) + + def test_casting_and_dtype(self): + a = np.array([1, 2, 3]) + b = np.array([2.5, 3.5, 4.5]) + res = np.vstack((a, b), casting="unsafe", dtype=np.int64) + expected_res = np.array([[1, 2, 3], [2, 3, 4]]) + assert_array_equal(res, expected_res) + + def test_casting_and_dtype_type_error(self): + a = np.array([1, 2, 3]) + b = np.array([2.5, 3.5, 4.5]) + with pytest.raises(TypeError): + vstack((a, b), casting="safe", dtype=np.int64) + + +class TestConcatenate: + def test_returns_copy(self): + a = np.eye(3) + b = np.concatenate([a]) + b[0, 0] = 2 + assert b[0, 0] != a[0, 0] + + def test_exceptions(self): + # test axis must be in bounds + for ndim in [1, 2, 3]: + a = np.ones((1,) * ndim) + np.concatenate((a, a), axis=0) # OK + assert_raises(AxisError, np.concatenate, (a, a), axis=ndim) + assert_raises(AxisError, np.concatenate, (a, a), axis=-(ndim + 1)) + + # Scalars cannot be concatenated + assert_raises(ValueError, concatenate, (0,)) + assert_raises(ValueError, concatenate, (np.array(0),)) + + # dimensionality must match + assert_raises_regex( + ValueError, + r"all the input arrays must have same number of dimensions, but " + r"the array at index 0 has 1 dimension\(s\) and the array at " + r"index 1 has 2 dimension\(s\)", + np.concatenate, (np.zeros(1), np.zeros((1, 1)))) + + # test shapes must match except for concatenation axis + a = np.ones((1, 2, 3)) + b = np.ones((2, 2, 3)) + axis = list(range(3)) + for i in range(3): + np.concatenate((a, b), axis=axis[0]) # OK + assert_raises_regex( + ValueError, + "all the input array dimensions except for the concatenation axis " + f"must match exactly, but along dimension {i}, the array at " + "index 0 has size 1 and the array at index 1 has size 2", + np.concatenate, (a, b), axis=axis[1]) + assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2]) + a = np.moveaxis(a, -1, 0) + b = np.moveaxis(b, -1, 0) + axis.append(axis.pop(0)) + + # No arrays to concatenate raises ValueError + assert_raises(ValueError, concatenate, ()) + + @pytest.mark.slow + @pytest.mark.skipif(sys.maxsize < 2**32, reason="only problematic on 64bit platforms") + @requires_memory(2 * np.iinfo(np.intc).max) + def test_huge_list_error(self): + a = np.array([1]) + max_int = np.iinfo(np.intc).max + arrs = (a,) * (max_int + 1) + msg = fr"concatenate\(\) only supports up to {max_int} arrays but got {max_int + 1}." + with pytest.raises(ValueError, match=msg): + np.concatenate(arrs) + + def test_concatenate_axis_None(self): + a = np.arange(4, dtype=np.float64).reshape((2, 2)) + b = list(range(3)) + c = ['x'] + r = np.concatenate((a, a), axis=None) + assert_equal(r.dtype, a.dtype) + assert_equal(r.ndim, 1) + r = np.concatenate((a, b), axis=None) + assert_equal(r.size, a.size + len(b)) + assert_equal(r.dtype, a.dtype) + r = np.concatenate((a, b, c), axis=None, dtype="U") + d = array(['0.0', '1.0', '2.0', '3.0', + '0', '1', '2', 'x']) + assert_array_equal(r, d) + + out = np.zeros(a.size + len(b)) + r = np.concatenate((a, b), axis=None) + rout = np.concatenate((a, b), axis=None, out=out) + assert_(out is rout) + assert_equal(r, rout) + + def test_large_concatenate_axis_None(self): + # When no axis is given, concatenate uses flattened versions. + # This also had a bug with many arrays (see gh-5979). + x = np.arange(1, 100) + r = np.concatenate(x, None) + assert_array_equal(x, r) + + # Once upon a time, this was the same as `axis=None` now it fails + # (with an unspecified error, as multiple things are wrong here) + with pytest.raises(ValueError): + np.concatenate(x, 100) + + def test_concatenate(self): + # Test concatenate function + # One sequence returns unmodified (but as array) + r4 = list(range(4)) + assert_array_equal(concatenate((r4,)), r4) + # Any sequence + assert_array_equal(concatenate((tuple(r4),)), r4) + assert_array_equal(concatenate((array(r4),)), r4) + # 1D default concatenation + r3 = list(range(3)) + assert_array_equal(concatenate((r4, r3)), r4 + r3) + # Mixed sequence types + assert_array_equal(concatenate((tuple(r4), r3)), r4 + r3) + assert_array_equal(concatenate((array(r4), r3)), r4 + r3) + # Explicit axis specification + assert_array_equal(concatenate((r4, r3), 0), r4 + r3) + # Including negative + assert_array_equal(concatenate((r4, r3), -1), r4 + r3) + # 2D + a23 = array([[10, 11, 12], [13, 14, 15]]) + a13 = array([[0, 1, 2]]) + res = array([[10, 11, 12], [13, 14, 15], [0, 1, 2]]) + assert_array_equal(concatenate((a23, a13)), res) + assert_array_equal(concatenate((a23, a13), 0), res) + assert_array_equal(concatenate((a23.T, a13.T), 1), res.T) + assert_array_equal(concatenate((a23.T, a13.T), -1), res.T) + # Arrays much match shape + assert_raises(ValueError, concatenate, (a23.T, a13.T), 0) + # 3D + res = arange(2 * 3 * 7).reshape((2, 3, 7)) + a0 = res[..., :4] + a1 = res[..., 4:6] + a2 = res[..., 6:] + assert_array_equal(concatenate((a0, a1, a2), 2), res) + assert_array_equal(concatenate((a0, a1, a2), -1), res) + assert_array_equal(concatenate((a0.T, a1.T, a2.T), 0), res.T) + + out = res.copy() + rout = concatenate((a0, a1, a2), 2, out=out) + assert_(out is rout) + assert_equal(res, rout) + + @pytest.mark.skipif(IS_PYPY, reason="PYPY handles sq_concat, nb_add differently than cpython") + def test_operator_concat(self): + import operator + a = array([1, 2]) + b = array([3, 4]) + n = [1, 2] + res = array([1, 2, 3, 4]) + assert_raises(TypeError, operator.concat, a, b) + assert_raises(TypeError, operator.concat, a, n) + assert_raises(TypeError, operator.concat, n, a) + assert_raises(TypeError, operator.concat, a, 1) + assert_raises(TypeError, operator.concat, 1, a) + + def test_bad_out_shape(self): + a = array([1, 2]) + b = array([3, 4]) + + assert_raises(ValueError, concatenate, (a, b), out=np.empty(5)) + assert_raises(ValueError, concatenate, (a, b), out=np.empty((4, 1))) + assert_raises(ValueError, concatenate, (a, b), out=np.empty((1, 4))) + concatenate((a, b), out=np.empty(4)) + + @pytest.mark.parametrize("axis", [None, 0]) + @pytest.mark.parametrize("out_dtype", ["c8", "f4", "f8", ">f8", "i8", "S4"]) + @pytest.mark.parametrize("casting", + ['no', 'equiv', 'safe', 'same_kind', 'unsafe']) + def test_out_and_dtype(self, axis, out_dtype, casting): + # Compare usage of `out=out` with `dtype=out.dtype` + out = np.empty(4, dtype=out_dtype) + to_concat = (array([1.1, 2.2]), array([3.3, 4.4])) + + if not np.can_cast(to_concat[0], out_dtype, casting=casting): + with assert_raises(TypeError): + concatenate(to_concat, out=out, axis=axis, casting=casting) + with assert_raises(TypeError): + concatenate(to_concat, dtype=out.dtype, + axis=axis, casting=casting) + else: + res_out = concatenate(to_concat, out=out, + axis=axis, casting=casting) + res_dtype = concatenate(to_concat, dtype=out.dtype, + axis=axis, casting=casting) + assert res_out is out + assert_array_equal(out, res_dtype) + assert res_dtype.dtype == out_dtype + + with assert_raises(TypeError): + concatenate(to_concat, out=out, dtype=out_dtype, axis=axis) + + @pytest.mark.parametrize("axis", [None, 0]) + @pytest.mark.parametrize("string_dt", ["S", "U", "S0", "U0"]) + @pytest.mark.parametrize("arrs", + [([0.],), ([0.], [1]), ([0], ["string"], [1.])]) + def test_dtype_with_promotion(self, arrs, string_dt, axis): + # Note that U0 and S0 should be deprecated eventually and changed to + # actually give the empty string result (together with `np.array`) + res = np.concatenate(arrs, axis=axis, dtype=string_dt, casting="unsafe") + # The actual dtype should be identical to a cast (of a double array): + assert res.dtype == np.array(1.).astype(string_dt).dtype + + @pytest.mark.parametrize("axis", [None, 0]) + def test_string_dtype_does_not_inspect(self, axis): + with pytest.raises(TypeError): + np.concatenate(([None], [1]), dtype="S", axis=axis) + with pytest.raises(TypeError): + np.concatenate(([None], [1]), dtype="U", axis=axis) + + @pytest.mark.parametrize("axis", [None, 0]) + def test_subarray_error(self, axis): + with pytest.raises(TypeError, match=".*subarray dtype"): + np.concatenate(([1], [1]), dtype="(2,)i", axis=axis) + + +def test_stack(): + # non-iterable input + assert_raises(TypeError, stack, 1) + + # 0d input + for input_ in [(1, 2, 3), + [np.int32(1), np.int32(2), np.int32(3)], + [np.array(1), np.array(2), np.array(3)]]: + assert_array_equal(stack(input_), [1, 2, 3]) + # 1d input examples + a = np.array([1, 2, 3]) + b = np.array([4, 5, 6]) + r1 = array([[1, 2, 3], [4, 5, 6]]) + assert_array_equal(np.stack((a, b)), r1) + assert_array_equal(np.stack((a, b), axis=1), r1.T) + # all input types + assert_array_equal(np.stack([a, b]), r1) + assert_array_equal(np.stack(array([a, b])), r1) + # all shapes for 1d input + arrays = [np.random.randn(3) for _ in range(10)] + axes = [0, 1, -1, -2] + expected_shapes = [(10, 3), (3, 10), (3, 10), (10, 3)] + for axis, expected_shape in zip(axes, expected_shapes): + assert_equal(np.stack(arrays, axis).shape, expected_shape) + assert_raises_regex(AxisError, 'out of bounds', stack, arrays, axis=2) + assert_raises_regex(AxisError, 'out of bounds', stack, arrays, axis=-3) + # all shapes for 2d input + arrays = [np.random.randn(3, 4) for _ in range(10)] + axes = [0, 1, 2, -1, -2, -3] + expected_shapes = [(10, 3, 4), (3, 10, 4), (3, 4, 10), + (3, 4, 10), (3, 10, 4), (10, 3, 4)] + for axis, expected_shape in zip(axes, expected_shapes): + assert_equal(np.stack(arrays, axis).shape, expected_shape) + # empty arrays + assert_(stack([[], [], []]).shape == (3, 0)) + assert_(stack([[], [], []], axis=1).shape == (0, 3)) + # out + out = np.zeros_like(r1) + np.stack((a, b), out=out) + assert_array_equal(out, r1) + # edge cases + assert_raises_regex(ValueError, 'need at least one array', stack, []) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [1, np.arange(3)]) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [np.arange(3), 1]) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [np.arange(3), 1], axis=1) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [np.zeros((3, 3)), np.zeros(3)], axis=1) + assert_raises_regex(ValueError, 'must have the same shape', + stack, [np.arange(2), np.arange(3)]) + + # do not accept generators + with pytest.raises(TypeError, match="arrays to stack must be"): + stack(x for x in range(3)) + + # casting and dtype test + a = np.array([1, 2, 3]) + b = np.array([2.5, 3.5, 4.5]) + res = np.stack((a, b), axis=1, casting="unsafe", dtype=np.int64) + expected_res = np.array([[1, 2], [2, 3], [3, 4]]) + assert_array_equal(res, expected_res) + # casting and dtype with TypeError + with assert_raises(TypeError): + stack((a, b), dtype=np.int64, axis=1, casting="safe") + + +def test_unstack(): + a = np.arange(24).reshape((2, 3, 4)) + + for stacks in [np.unstack(a), + np.unstack(a, axis=0), + np.unstack(a, axis=-3)]: + assert isinstance(stacks, tuple) + assert len(stacks) == 2 + assert_array_equal(stacks[0], a[0]) + assert_array_equal(stacks[1], a[1]) + + for stacks in [np.unstack(a, axis=1), + np.unstack(a, axis=-2)]: + assert isinstance(stacks, tuple) + assert len(stacks) == 3 + assert_array_equal(stacks[0], a[:, 0]) + assert_array_equal(stacks[1], a[:, 1]) + assert_array_equal(stacks[2], a[:, 2]) + + for stacks in [np.unstack(a, axis=2), + np.unstack(a, axis=-1)]: + assert isinstance(stacks, tuple) + assert len(stacks) == 4 + assert_array_equal(stacks[0], a[:, :, 0]) + assert_array_equal(stacks[1], a[:, :, 1]) + assert_array_equal(stacks[2], a[:, :, 2]) + assert_array_equal(stacks[3], a[:, :, 3]) + + assert_raises(ValueError, np.unstack, a, axis=3) + assert_raises(ValueError, np.unstack, a, axis=-4) + assert_raises(ValueError, np.unstack, np.array(0), axis=0) + + +@pytest.mark.parametrize("axis", [0]) +@pytest.mark.parametrize("out_dtype", ["c8", "f4", "f8", ">f8", "i8"]) +@pytest.mark.parametrize("casting", + ['no', 'equiv', 'safe', 'same_kind', 'unsafe']) +def test_stack_out_and_dtype(axis, out_dtype, casting): + to_concat = (array([1, 2]), array([3, 4])) + res = array([[1, 2], [3, 4]]) + out = np.zeros_like(res) + + if not np.can_cast(to_concat[0], out_dtype, casting=casting): + with assert_raises(TypeError): + stack(to_concat, dtype=out_dtype, + axis=axis, casting=casting) + else: + res_out = stack(to_concat, out=out, + axis=axis, casting=casting) + res_dtype = stack(to_concat, dtype=out_dtype, + axis=axis, casting=casting) + assert res_out is out + assert_array_equal(out, res_dtype) + assert res_dtype.dtype == out_dtype + + with assert_raises(TypeError): + stack(to_concat, out=out, dtype=out_dtype, axis=axis) + + +class TestBlock: + @pytest.fixture(params=['block', 'force_concatenate', 'force_slicing']) + def block(self, request): + # blocking small arrays and large arrays go through different paths. + # the algorithm is triggered depending on the number of element + # copies required. + # We define a test fixture that forces most tests to go through + # both code paths. + # Ultimately, this should be removed if a single algorithm is found + # to be faster for both small and large arrays. + def _block_force_concatenate(arrays): + arrays, list_ndim, result_ndim, _ = _block_setup(arrays) + return _block_concatenate(arrays, list_ndim, result_ndim) + + def _block_force_slicing(arrays): + arrays, list_ndim, result_ndim, _ = _block_setup(arrays) + return _block_slicing(arrays, list_ndim, result_ndim) + + if request.param == 'force_concatenate': + return _block_force_concatenate + elif request.param == 'force_slicing': + return _block_force_slicing + elif request.param == 'block': + return block + else: + raise ValueError('Unknown blocking request. There is a typo in the tests.') + + def test_returns_copy(self, block): + a = np.eye(3) + b = block(a) + b[0, 0] = 2 + assert b[0, 0] != a[0, 0] + + def test_block_total_size_estimate(self, block): + _, _, _, total_size = _block_setup([1]) + assert total_size == 1 + + _, _, _, total_size = _block_setup([[1]]) + assert total_size == 1 + + _, _, _, total_size = _block_setup([[1, 1]]) + assert total_size == 2 + + _, _, _, total_size = _block_setup([[1], [1]]) + assert total_size == 2 + + _, _, _, total_size = _block_setup([[1, 2], [3, 4]]) + assert total_size == 4 + + def test_block_simple_row_wise(self, block): + a_2d = np.ones((2, 2)) + b_2d = 2 * a_2d + desired = np.array([[1, 1, 2, 2], + [1, 1, 2, 2]]) + result = block([a_2d, b_2d]) + assert_equal(desired, result) + + def test_block_simple_column_wise(self, block): + a_2d = np.ones((2, 2)) + b_2d = 2 * a_2d + expected = np.array([[1, 1], + [1, 1], + [2, 2], + [2, 2]]) + result = block([[a_2d], [b_2d]]) + assert_equal(expected, result) + + def test_block_with_1d_arrays_row_wise(self, block): + # # # 1-D vectors are treated as row arrays + a = np.array([1, 2, 3]) + b = np.array([2, 3, 4]) + expected = np.array([1, 2, 3, 2, 3, 4]) + result = block([a, b]) + assert_equal(expected, result) + + def test_block_with_1d_arrays_multiple_rows(self, block): + a = np.array([1, 2, 3]) + b = np.array([2, 3, 4]) + expected = np.array([[1, 2, 3, 2, 3, 4], + [1, 2, 3, 2, 3, 4]]) + result = block([[a, b], [a, b]]) + assert_equal(expected, result) + + def test_block_with_1d_arrays_column_wise(self, block): + # # # 1-D vectors are treated as row arrays + a_1d = np.array([1, 2, 3]) + b_1d = np.array([2, 3, 4]) + expected = np.array([[1, 2, 3], + [2, 3, 4]]) + result = block([[a_1d], [b_1d]]) + assert_equal(expected, result) + + def test_block_mixed_1d_and_2d(self, block): + a_2d = np.ones((2, 2)) + b_1d = np.array([2, 2]) + result = block([[a_2d], [b_1d]]) + expected = np.array([[1, 1], + [1, 1], + [2, 2]]) + assert_equal(expected, result) + + def test_block_complicated(self, block): + # a bit more complicated + one_2d = np.array([[1, 1, 1]]) + two_2d = np.array([[2, 2, 2]]) + three_2d = np.array([[3, 3, 3, 3, 3, 3]]) + four_1d = np.array([4, 4, 4, 4, 4, 4]) + five_0d = np.array(5) + six_1d = np.array([6, 6, 6, 6, 6]) + zero_2d = np.zeros((2, 6)) + + expected = np.array([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 3, 3, 3], + [4, 4, 4, 4, 4, 4], + [5, 6, 6, 6, 6, 6], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]]) + + result = block([[one_2d, two_2d], + [three_2d], + [four_1d], + [five_0d, six_1d], + [zero_2d]]) + assert_equal(result, expected) + + def test_nested(self, block): + one = np.array([1, 1, 1]) + two = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]]) + three = np.array([3, 3, 3]) + four = np.array([4, 4, 4]) + five = np.array(5) + six = np.array([6, 6, 6, 6, 6]) + zero = np.zeros((2, 6)) + + result = block([ + [ + block([ + [one], + [three], + [four] + ]), + two + ], + [five, six], + [zero] + ]) + expected = np.array([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 2, 2, 2], + [4, 4, 4, 2, 2, 2], + [5, 6, 6, 6, 6, 6], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0]]) + + assert_equal(result, expected) + + def test_3d(self, block): + a000 = np.ones((2, 2, 2), int) * 1 + + a100 = np.ones((3, 2, 2), int) * 2 + a010 = np.ones((2, 3, 2), int) * 3 + a001 = np.ones((2, 2, 3), int) * 4 + + a011 = np.ones((2, 3, 3), int) * 5 + a101 = np.ones((3, 2, 3), int) * 6 + a110 = np.ones((3, 3, 2), int) * 7 + + a111 = np.ones((3, 3, 3), int) * 8 + + result = block([ + [ + [a000, a001], + [a010, a011], + ], + [ + [a100, a101], + [a110, a111], + ] + ]) + expected = array([[[1, 1, 4, 4, 4], + [1, 1, 4, 4, 4], + [3, 3, 5, 5, 5], + [3, 3, 5, 5, 5], + [3, 3, 5, 5, 5]], + + [[1, 1, 4, 4, 4], + [1, 1, 4, 4, 4], + [3, 3, 5, 5, 5], + [3, 3, 5, 5, 5], + [3, 3, 5, 5, 5]], + + [[2, 2, 6, 6, 6], + [2, 2, 6, 6, 6], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8]], + + [[2, 2, 6, 6, 6], + [2, 2, 6, 6, 6], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8]], + + [[2, 2, 6, 6, 6], + [2, 2, 6, 6, 6], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8], + [7, 7, 8, 8, 8]]]) + + assert_array_equal(result, expected) + + def test_block_with_mismatched_shape(self, block): + a = np.array([0, 0]) + b = np.eye(2) + assert_raises(ValueError, block, [a, b]) + assert_raises(ValueError, block, [b, a]) + + to_block = [[np.ones((2, 3)), np.ones((2, 2))], + [np.ones((2, 2)), np.ones((2, 2))]] + assert_raises(ValueError, block, to_block) + + def test_no_lists(self, block): + assert_equal(block(1), np.array(1)) + assert_equal(block(np.eye(3)), np.eye(3)) + + def test_invalid_nesting(self, block): + msg = 'depths are mismatched' + assert_raises_regex(ValueError, msg, block, [1, [2]]) + assert_raises_regex(ValueError, msg, block, [1, []]) + assert_raises_regex(ValueError, msg, block, [[1], 2]) + assert_raises_regex(ValueError, msg, block, [[], 2]) + assert_raises_regex(ValueError, msg, block, [ + [[1], [2]], + [[3, 4]], + [5] # missing brackets + ]) + + def test_empty_lists(self, block): + assert_raises_regex(ValueError, 'empty', block, []) + assert_raises_regex(ValueError, 'empty', block, [[]]) + assert_raises_regex(ValueError, 'empty', block, [[1], []]) + + def test_tuple(self, block): + assert_raises_regex(TypeError, 'tuple', block, ([1, 2], [3, 4])) + assert_raises_regex(TypeError, 'tuple', block, [(1, 2), (3, 4)]) + + def test_different_ndims(self, block): + a = 1. + b = 2 * np.ones((1, 2)) + c = 3 * np.ones((1, 1, 3)) + + result = block([a, b, c]) + expected = np.array([[[1., 2., 2., 3., 3., 3.]]]) + + assert_equal(result, expected) + + def test_different_ndims_depths(self, block): + a = 1. + b = 2 * np.ones((1, 2)) + c = 3 * np.ones((1, 2, 3)) + + result = block([[a, b], [c]]) + expected = np.array([[[1., 2., 2.], + [3., 3., 3.], + [3., 3., 3.]]]) + + assert_equal(result, expected) + + def test_block_memory_order(self, block): + # 3D + arr_c = np.zeros((3,) * 3, order='C') + arr_f = np.zeros((3,) * 3, order='F') + + b_c = [[[arr_c, arr_c], + [arr_c, arr_c]], + [[arr_c, arr_c], + [arr_c, arr_c]]] + + b_f = [[[arr_f, arr_f], + [arr_f, arr_f]], + [[arr_f, arr_f], + [arr_f, arr_f]]] + + assert block(b_c).flags['C_CONTIGUOUS'] + assert block(b_f).flags['F_CONTIGUOUS'] + + arr_c = np.zeros((3, 3), order='C') + arr_f = np.zeros((3, 3), order='F') + # 2D + b_c = [[arr_c, arr_c], + [arr_c, arr_c]] + + b_f = [[arr_f, arr_f], + [arr_f, arr_f]] + + assert block(b_c).flags['C_CONTIGUOUS'] + assert block(b_f).flags['F_CONTIGUOUS'] + + +def test_block_dispatcher(): + class ArrayLike: + pass + a = ArrayLike() + b = ArrayLike() + c = ArrayLike() + assert_equal(list(_block_dispatcher(a)), [a]) + assert_equal(list(_block_dispatcher([a])), [a]) + assert_equal(list(_block_dispatcher([a, b])), [a, b]) + assert_equal(list(_block_dispatcher([[a], [b, [c]]])), [a, b, c]) + # don't recurse into non-lists + assert_equal(list(_block_dispatcher((a, b))), [(a, b)]) diff --git a/python/numpy/_core/tests/test_simd.py b/python/numpy/_core/tests/test_simd.py new file mode 100644 index 000000000..697d89bcc --- /dev/null +++ b/python/numpy/_core/tests/test_simd.py @@ -0,0 +1,1341 @@ +# NOTE: Please avoid the use of numpy.testing since NPYV intrinsics +# may be involved in their functionality. +import itertools +import math +import operator +import re + +import pytest +from numpy._core._multiarray_umath import __cpu_baseline__ + +from numpy._core._simd import clear_floatstatus, get_floatstatus, targets + + +def check_floatstatus(divbyzero=False, overflow=False, + underflow=False, invalid=False, + all=False): + #define NPY_FPE_DIVIDEBYZERO 1 + #define NPY_FPE_OVERFLOW 2 + #define NPY_FPE_UNDERFLOW 4 + #define NPY_FPE_INVALID 8 + err = get_floatstatus() + ret = (all or divbyzero) and (err & 1) != 0 + ret |= (all or overflow) and (err & 2) != 0 + ret |= (all or underflow) and (err & 4) != 0 + ret |= (all or invalid) and (err & 8) != 0 + return ret + +class _Test_Utility: + # submodule of the desired SIMD extension, e.g. targets["AVX512F"] + npyv = None + # the current data type suffix e.g. 's8' + sfx = None + # target name can be 'baseline' or one or more of CPU features + target_name = None + + def __getattr__(self, attr): + """ + To call NPV intrinsics without the attribute 'npyv' and + auto suffixing intrinsics according to class attribute 'sfx' + """ + return getattr(self.npyv, attr + "_" + self.sfx) + + def _x2(self, intrin_name): + return getattr(self.npyv, f"{intrin_name}_{self.sfx}x2") + + def _data(self, start=None, count=None, reverse=False): + """ + Create list of consecutive numbers according to number of vector's lanes. + """ + if start is None: + start = 1 + if count is None: + count = self.nlanes + rng = range(start, start + count) + if reverse: + rng = reversed(rng) + if self._is_fp(): + return [x / 1.0 for x in rng] + return list(rng) + + def _is_unsigned(self): + return self.sfx[0] == 'u' + + def _is_signed(self): + return self.sfx[0] == 's' + + def _is_fp(self): + return self.sfx[0] == 'f' + + def _scalar_size(self): + return int(self.sfx[1:]) + + def _int_clip(self, seq): + if self._is_fp(): + return seq + max_int = self._int_max() + min_int = self._int_min() + return [min(max(v, min_int), max_int) for v in seq] + + def _int_max(self): + if self._is_fp(): + return None + max_u = self._to_unsigned(self.setall(-1))[0] + if self._is_signed(): + return max_u // 2 + return max_u + + def _int_min(self): + if self._is_fp(): + return None + if self._is_unsigned(): + return 0 + return -(self._int_max() + 1) + + def _true_mask(self): + max_unsig = getattr(self.npyv, "setall_u" + self.sfx[1:])(-1) + return max_unsig[0] + + def _to_unsigned(self, vector): + if isinstance(vector, (list, tuple)): + return getattr(self.npyv, "load_u" + self.sfx[1:])(vector) + else: + sfx = vector.__name__.replace("npyv_", "") + if sfx[0] == "b": + cvt_intrin = "cvt_u{0}_b{0}" + else: + cvt_intrin = "reinterpret_u{0}_{1}" + return getattr(self.npyv, cvt_intrin.format(sfx[1:], sfx))(vector) + + def _pinfinity(self): + return float("inf") + + def _ninfinity(self): + return -float("inf") + + def _nan(self): + return float("nan") + + def _cpu_features(self): + target = self.target_name + if target == "baseline": + target = __cpu_baseline__ + else: + target = target.split('__') # multi-target separator + return ' '.join(target) + +class _SIMD_BOOL(_Test_Utility): + """ + To test all boolean vector types at once + """ + def _nlanes(self): + return getattr(self.npyv, "nlanes_u" + self.sfx[1:]) + + def _data(self, start=None, count=None, reverse=False): + true_mask = self._true_mask() + rng = range(self._nlanes()) + if reverse: + rng = reversed(rng) + return [true_mask if x % 2 else 0 for x in rng] + + def _load_b(self, data): + len_str = self.sfx[1:] + load = getattr(self.npyv, "load_u" + len_str) + cvt = getattr(self.npyv, f"cvt_b{len_str}_u{len_str}") + return cvt(load(data)) + + def test_operators_logical(self): + """ + Logical operations for boolean types. + Test intrinsics: + npyv_xor_##SFX, npyv_and_##SFX, npyv_or_##SFX, npyv_not_##SFX, + npyv_andc_b8, npvy_orc_b8, nvpy_xnor_b8 + """ + data_a = self._data() + data_b = self._data(reverse=True) + vdata_a = self._load_b(data_a) + vdata_b = self._load_b(data_b) + + data_and = [a & b for a, b in zip(data_a, data_b)] + vand = getattr(self, "and")(vdata_a, vdata_b) + assert vand == data_and + + data_or = [a | b for a, b in zip(data_a, data_b)] + vor = getattr(self, "or")(vdata_a, vdata_b) + assert vor == data_or + + data_xor = [a ^ b for a, b in zip(data_a, data_b)] + vxor = self.xor(vdata_a, vdata_b) + assert vxor == data_xor + + vnot = getattr(self, "not")(vdata_a) + assert vnot == data_b + + # among the boolean types, andc, orc and xnor only support b8 + if self.sfx not in ("b8"): + return + + data_andc = [(a & ~b) & 0xFF for a, b in zip(data_a, data_b)] + vandc = self.andc(vdata_a, vdata_b) + assert data_andc == vandc + + data_orc = [(a | ~b) & 0xFF for a, b in zip(data_a, data_b)] + vorc = self.orc(vdata_a, vdata_b) + assert data_orc == vorc + + data_xnor = [~(a ^ b) & 0xFF for a, b in zip(data_a, data_b)] + vxnor = self.xnor(vdata_a, vdata_b) + assert data_xnor == vxnor + + def test_tobits(self): + data2bits = lambda data: sum(int(x != 0) << i for i, x in enumerate(data, 0)) + for data in (self._data(), self._data(reverse=True)): + vdata = self._load_b(data) + data_bits = data2bits(data) + tobits = self.tobits(vdata) + bin_tobits = bin(tobits) + assert bin_tobits == bin(data_bits) + + def test_pack(self): + """ + Pack multiple vectors into one + Test intrinsics: + npyv_pack_b8_b16 + npyv_pack_b8_b32 + npyv_pack_b8_b64 + """ + if self.sfx not in ("b16", "b32", "b64"): + return + # create the vectors + data = self._data() + rdata = self._data(reverse=True) + vdata = self._load_b(data) + vrdata = self._load_b(rdata) + pack_simd = getattr(self.npyv, f"pack_b8_{self.sfx}") + # for scalar execution, concatenate the elements of the multiple lists + # into a single list (spack) and then iterate over the elements of + # the created list applying a mask to capture the first byte of them. + if self.sfx == "b16": + spack = [(i & 0xFF) for i in (list(rdata) + list(data))] + vpack = pack_simd(vrdata, vdata) + elif self.sfx == "b32": + spack = [(i & 0xFF) for i in (2 * list(rdata) + 2 * list(data))] + vpack = pack_simd(vrdata, vrdata, vdata, vdata) + elif self.sfx == "b64": + spack = [(i & 0xFF) for i in (4 * list(rdata) + 4 * list(data))] + vpack = pack_simd(vrdata, vrdata, vrdata, vrdata, + vdata, vdata, vdata, vdata) + assert vpack == spack + + @pytest.mark.parametrize("intrin", ["any", "all"]) + @pytest.mark.parametrize("data", ( + [-1, 0], + [0, -1], + [-1], + [0] + )) + def test_operators_crosstest(self, intrin, data): + """ + Test intrinsics: + npyv_any_##SFX + npyv_all_##SFX + """ + data_a = self._load_b(data * self._nlanes()) + func = eval(intrin) + intrin = getattr(self, intrin) + desired = func(data_a) + simd = intrin(data_a) + assert not not simd == desired + +class _SIMD_INT(_Test_Utility): + """ + To test all integer vector types at once + """ + def test_operators_shift(self): + if self.sfx in ("u8", "s8"): + return + + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + for count in range(self._scalar_size()): + # load to cast + data_shl_a = self.load([a << count for a in data_a]) + # left shift + shl = self.shl(vdata_a, count) + assert shl == data_shl_a + # load to cast + data_shr_a = self.load([a >> count for a in data_a]) + # right shift + shr = self.shr(vdata_a, count) + assert shr == data_shr_a + + # shift by zero or max or out-range immediate constant is not applicable and illogical + for count in range(1, self._scalar_size()): + # load to cast + data_shl_a = self.load([a << count for a in data_a]) + # left shift by an immediate constant + shli = self.shli(vdata_a, count) + assert shli == data_shl_a + # load to cast + data_shr_a = self.load([a >> count for a in data_a]) + # right shift by an immediate constant + shri = self.shri(vdata_a, count) + assert shri == data_shr_a + + def test_arithmetic_subadd_saturated(self): + if self.sfx in ("u32", "s32", "u64", "s64"): + return + + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + data_adds = self._int_clip([a + b for a, b in zip(data_a, data_b)]) + adds = self.adds(vdata_a, vdata_b) + assert adds == data_adds + + data_subs = self._int_clip([a - b for a, b in zip(data_a, data_b)]) + subs = self.subs(vdata_a, vdata_b) + assert subs == data_subs + + def test_math_max_min(self): + data_a = self._data() + data_b = self._data(self.nlanes) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + data_max = [max(a, b) for a, b in zip(data_a, data_b)] + simd_max = self.max(vdata_a, vdata_b) + assert simd_max == data_max + + data_min = [min(a, b) for a, b in zip(data_a, data_b)] + simd_min = self.min(vdata_a, vdata_b) + assert simd_min == data_min + + @pytest.mark.parametrize("start", [-100, -10000, 0, 100, 10000]) + def test_reduce_max_min(self, start): + """ + Test intrinsics: + npyv_reduce_max_##sfx + npyv_reduce_min_##sfx + """ + vdata_a = self.load(self._data(start)) + assert self.reduce_max(vdata_a) == max(vdata_a) + assert self.reduce_min(vdata_a) == min(vdata_a) + + +class _SIMD_FP32(_Test_Utility): + """ + To only test single precision + """ + def test_conversions(self): + """ + Round to nearest even integer, assume CPU control register is set to rounding. + Test intrinsics: + npyv_round_s32_##SFX + """ + features = self._cpu_features() + if not self.npyv.simd_f64 and re.match(r".*(NEON|ASIMD)", features): + # very costly to emulate nearest even on Armv7 + # instead we round halves to up. e.g. 0.5 -> 1, -0.5 -> -1 + _round = lambda v: int(v + (0.5 if v >= 0 else -0.5)) + else: + _round = round + vdata_a = self.load(self._data()) + vdata_a = self.sub(vdata_a, self.setall(0.5)) + data_round = [_round(x) for x in vdata_a] + vround = self.round_s32(vdata_a) + assert vround == data_round + +class _SIMD_FP64(_Test_Utility): + """ + To only test double precision + """ + def test_conversions(self): + """ + Round to nearest even integer, assume CPU control register is set to rounding. + Test intrinsics: + npyv_round_s32_##SFX + """ + vdata_a = self.load(self._data()) + vdata_a = self.sub(vdata_a, self.setall(0.5)) + vdata_b = self.mul(vdata_a, self.setall(-1.5)) + data_round = [round(x) for x in list(vdata_a) + list(vdata_b)] + vround = self.round_s32(vdata_a, vdata_b) + assert vround == data_round + +class _SIMD_FP(_Test_Utility): + """ + To test all float vector types at once + """ + def test_arithmetic_fused(self): + vdata_a, vdata_b, vdata_c = [self.load(self._data())] * 3 + vdata_cx2 = self.add(vdata_c, vdata_c) + # multiply and add, a*b + c + data_fma = self.load([a * b + c for a, b, c in zip(vdata_a, vdata_b, vdata_c)]) + fma = self.muladd(vdata_a, vdata_b, vdata_c) + assert fma == data_fma + # multiply and subtract, a*b - c + fms = self.mulsub(vdata_a, vdata_b, vdata_c) + data_fms = self.sub(data_fma, vdata_cx2) + assert fms == data_fms + # negate multiply and add, -(a*b) + c + nfma = self.nmuladd(vdata_a, vdata_b, vdata_c) + data_nfma = self.sub(vdata_cx2, data_fma) + assert nfma == data_nfma + # negate multiply and subtract, -(a*b) - c + nfms = self.nmulsub(vdata_a, vdata_b, vdata_c) + data_nfms = self.mul(data_fma, self.setall(-1)) + assert nfms == data_nfms + # multiply, add for odd elements and subtract even elements. + # (a * b) -+ c + fmas = list(self.muladdsub(vdata_a, vdata_b, vdata_c)) + assert fmas[0::2] == list(data_fms)[0::2] + assert fmas[1::2] == list(data_fma)[1::2] + + def test_abs(self): + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + data = self._data() + vdata = self.load(self._data()) + + abs_cases = ((-0, 0), (ninf, pinf), (pinf, pinf), (nan, nan)) + for case, desired in abs_cases: + data_abs = [desired] * self.nlanes + vabs = self.abs(self.setall(case)) + assert vabs == pytest.approx(data_abs, nan_ok=True) + + vabs = self.abs(self.mul(vdata, self.setall(-1))) + assert vabs == data + + def test_sqrt(self): + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + data = self._data() + vdata = self.load(self._data()) + + sqrt_cases = ((-0.0, -0.0), (0.0, 0.0), (-1.0, nan), (ninf, nan), (pinf, pinf)) + for case, desired in sqrt_cases: + data_sqrt = [desired] * self.nlanes + sqrt = self.sqrt(self.setall(case)) + assert sqrt == pytest.approx(data_sqrt, nan_ok=True) + + data_sqrt = self.load([math.sqrt(x) for x in data]) # load to truncate precision + sqrt = self.sqrt(vdata) + assert sqrt == data_sqrt + + def test_square(self): + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + data = self._data() + vdata = self.load(self._data()) + # square + square_cases = ((nan, nan), (pinf, pinf), (ninf, pinf)) + for case, desired in square_cases: + data_square = [desired] * self.nlanes + square = self.square(self.setall(case)) + assert square == pytest.approx(data_square, nan_ok=True) + + data_square = [x * x for x in data] + square = self.square(vdata) + assert square == data_square + + @pytest.mark.parametrize("intrin, func", [("ceil", math.ceil), + ("trunc", math.trunc), ("floor", math.floor), ("rint", round)]) + def test_rounding(self, intrin, func): + """ + Test intrinsics: + npyv_rint_##SFX + npyv_ceil_##SFX + npyv_trunc_##SFX + npyv_floor##SFX + """ + intrin_name = intrin + intrin = getattr(self, intrin) + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + # special cases + round_cases = ((nan, nan), (pinf, pinf), (ninf, ninf)) + for case, desired in round_cases: + data_round = [desired] * self.nlanes + _round = intrin(self.setall(case)) + assert _round == pytest.approx(data_round, nan_ok=True) + + for x in range(0, 2**20, 256**2): + for w in (-1.05, -1.10, -1.15, 1.05, 1.10, 1.15): + data = self.load([(x + a) * w for a in range(self.nlanes)]) + data_round = [func(x) for x in data] + _round = intrin(data) + assert _round == data_round + + # test large numbers + for i in ( + 1.1529215045988576e+18, 4.6116860183954304e+18, + 5.902958103546122e+20, 2.3611832414184488e+21 + ): + x = self.setall(i) + y = intrin(x) + data_round = [func(n) for n in x] + assert y == data_round + + # signed zero + if intrin_name == "floor": + data_szero = (-0.0,) + else: + data_szero = (-0.0, -0.25, -0.30, -0.45, -0.5) + + for w in data_szero: + _round = self._to_unsigned(intrin(self.setall(w))) + data_round = self._to_unsigned(self.setall(-0.0)) + assert _round == data_round + + @pytest.mark.parametrize("intrin", [ + "max", "maxp", "maxn", "min", "minp", "minn" + ]) + def test_max_min(self, intrin): + """ + Test intrinsics: + npyv_max_##sfx + npyv_maxp_##sfx + npyv_maxn_##sfx + npyv_min_##sfx + npyv_minp_##sfx + npyv_minn_##sfx + npyv_reduce_max_##sfx + npyv_reduce_maxp_##sfx + npyv_reduce_maxn_##sfx + npyv_reduce_min_##sfx + npyv_reduce_minp_##sfx + npyv_reduce_minn_##sfx + """ + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + chk_nan = {"xp": 1, "np": 1, "nn": 2, "xn": 2}.get(intrin[-2:], 0) + func = eval(intrin[:3]) + reduce_intrin = getattr(self, "reduce_" + intrin) + intrin = getattr(self, intrin) + hf_nlanes = self.nlanes // 2 + + cases = ( + ([0.0, -0.0], [-0.0, 0.0]), + ([10, -10], [10, -10]), + ([pinf, 10], [10, ninf]), + ([10, pinf], [ninf, 10]), + ([10, -10], [10, -10]), + ([-10, 10], [-10, 10]) + ) + for op1, op2 in cases: + vdata_a = self.load(op1 * hf_nlanes) + vdata_b = self.load(op2 * hf_nlanes) + data = func(vdata_a, vdata_b) + simd = intrin(vdata_a, vdata_b) + assert simd == data + data = func(vdata_a) + simd = reduce_intrin(vdata_a) + assert simd == data + + if not chk_nan: + return + if chk_nan == 1: + test_nan = lambda a, b: ( + b if math.isnan(a) else a if math.isnan(b) else b + ) + else: + test_nan = lambda a, b: ( + nan if math.isnan(a) or math.isnan(b) else b + ) + cases = ( + (nan, 10), + (10, nan), + (nan, pinf), + (pinf, nan), + (nan, nan) + ) + for op1, op2 in cases: + vdata_ab = self.load([op1, op2] * hf_nlanes) + data = test_nan(op1, op2) + simd = reduce_intrin(vdata_ab) + assert simd == pytest.approx(data, nan_ok=True) + vdata_a = self.setall(op1) + vdata_b = self.setall(op2) + data = [data] * self.nlanes + simd = intrin(vdata_a, vdata_b) + assert simd == pytest.approx(data, nan_ok=True) + + def test_reciprocal(self): + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + data = self._data() + vdata = self.load(self._data()) + + recip_cases = ((nan, nan), (pinf, 0.0), (ninf, -0.0), (0.0, pinf), (-0.0, ninf)) + for case, desired in recip_cases: + data_recip = [desired] * self.nlanes + recip = self.recip(self.setall(case)) + assert recip == pytest.approx(data_recip, nan_ok=True) + + data_recip = self.load([1 / x for x in data]) # load to truncate precision + recip = self.recip(vdata) + assert recip == data_recip + + def test_special_cases(self): + """ + Compare Not NaN. Test intrinsics: + npyv_notnan_##SFX + """ + nnan = self.notnan(self.setall(self._nan())) + assert nnan == [0] * self.nlanes + + @pytest.mark.parametrize("intrin_name", [ + "rint", "trunc", "ceil", "floor" + ]) + def test_unary_invalid_fpexception(self, intrin_name): + intrin = getattr(self, intrin_name) + for d in [float("nan"), float("inf"), -float("inf")]: + v = self.setall(d) + clear_floatstatus() + intrin(v) + assert check_floatstatus(invalid=True) is False + + @pytest.mark.parametrize('py_comp,np_comp', [ + (operator.lt, "cmplt"), + (operator.le, "cmple"), + (operator.gt, "cmpgt"), + (operator.ge, "cmpge"), + (operator.eq, "cmpeq"), + (operator.ne, "cmpneq") + ]) + def test_comparison_with_nan(self, py_comp, np_comp): + pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan() + mask_true = self._true_mask() + + def to_bool(vector): + return [lane == mask_true for lane in vector] + + intrin = getattr(self, np_comp) + cmp_cases = ((0, nan), (nan, 0), (nan, nan), (pinf, nan), + (ninf, nan), (-0.0, +0.0)) + for case_operand1, case_operand2 in cmp_cases: + data_a = [case_operand1] * self.nlanes + data_b = [case_operand2] * self.nlanes + vdata_a = self.setall(case_operand1) + vdata_b = self.setall(case_operand2) + vcmp = to_bool(intrin(vdata_a, vdata_b)) + data_cmp = [py_comp(a, b) for a, b in zip(data_a, data_b)] + assert vcmp == data_cmp + + @pytest.mark.parametrize("intrin", ["any", "all"]) + @pytest.mark.parametrize("data", ( + [float("nan"), 0], + [0, float("nan")], + [float("nan"), 1], + [1, float("nan")], + [float("nan"), float("nan")], + [0.0, -0.0], + [-0.0, 0.0], + [1.0, -0.0] + )) + def test_operators_crosstest(self, intrin, data): + """ + Test intrinsics: + npyv_any_##SFX + npyv_all_##SFX + """ + data_a = self.load(data * self.nlanes) + func = eval(intrin) + intrin = getattr(self, intrin) + desired = func(data_a) + simd = intrin(data_a) + assert not not simd == desired + +class _SIMD_ALL(_Test_Utility): + """ + To test all vector types at once + """ + def test_memory_load(self): + data = self._data() + # unaligned load + load_data = self.load(data) + assert load_data == data + # aligned load + loada_data = self.loada(data) + assert loada_data == data + # stream load + loads_data = self.loads(data) + assert loads_data == data + # load lower part + loadl = self.loadl(data) + loadl_half = list(loadl)[:self.nlanes // 2] + data_half = data[:self.nlanes // 2] + assert loadl_half == data_half + assert loadl != data # detect overflow + + def test_memory_store(self): + data = self._data() + vdata = self.load(data) + # unaligned store + store = [0] * self.nlanes + self.store(store, vdata) + assert store == data + # aligned store + store_a = [0] * self.nlanes + self.storea(store_a, vdata) + assert store_a == data + # stream store + store_s = [0] * self.nlanes + self.stores(store_s, vdata) + assert store_s == data + # store lower part + store_l = [0] * self.nlanes + self.storel(store_l, vdata) + assert store_l[:self.nlanes // 2] == data[:self.nlanes // 2] + assert store_l != vdata # detect overflow + # store higher part + store_h = [0] * self.nlanes + self.storeh(store_h, vdata) + assert store_h[:self.nlanes // 2] == data[self.nlanes // 2:] + assert store_h != vdata # detect overflow + + @pytest.mark.parametrize("intrin, elsizes, scale, fill", [ + ("self.load_tillz, self.load_till", (32, 64), 1, [0xffff]), + ("self.load2_tillz, self.load2_till", (32, 64), 2, [0xffff, 0x7fff]), + ]) + def test_memory_partial_load(self, intrin, elsizes, scale, fill): + if self._scalar_size() not in elsizes: + return + npyv_load_tillz, npyv_load_till = eval(intrin) + data = self._data() + lanes = list(range(1, self.nlanes + 1)) + lanes += [self.nlanes**2, self.nlanes**4] # test out of range + for n in lanes: + load_till = npyv_load_till(data, n, *fill) + load_tillz = npyv_load_tillz(data, n) + n *= scale + data_till = data[:n] + fill * ((self.nlanes - n) // scale) + assert load_till == data_till + data_tillz = data[:n] + [0] * (self.nlanes - n) + assert load_tillz == data_tillz + + @pytest.mark.parametrize("intrin, elsizes, scale", [ + ("self.store_till", (32, 64), 1), + ("self.store2_till", (32, 64), 2), + ]) + def test_memory_partial_store(self, intrin, elsizes, scale): + if self._scalar_size() not in elsizes: + return + npyv_store_till = eval(intrin) + data = self._data() + data_rev = self._data(reverse=True) + vdata = self.load(data) + lanes = list(range(1, self.nlanes + 1)) + lanes += [self.nlanes**2, self.nlanes**4] + for n in lanes: + data_till = data_rev.copy() + data_till[:n * scale] = data[:n * scale] + store_till = self._data(reverse=True) + npyv_store_till(store_till, n, vdata) + assert store_till == data_till + + @pytest.mark.parametrize("intrin, elsizes, scale", [ + ("self.loadn", (32, 64), 1), + ("self.loadn2", (32, 64), 2), + ]) + def test_memory_noncont_load(self, intrin, elsizes, scale): + if self._scalar_size() not in elsizes: + return + npyv_loadn = eval(intrin) + for stride in range(-64, 64): + if stride < 0: + data = self._data(stride, -stride * self.nlanes) + data_stride = list(itertools.chain( + *zip(*[data[-i::stride] for i in range(scale, 0, -1)]) + )) + elif stride == 0: + data = self._data() + data_stride = data[0:scale] * (self.nlanes // scale) + else: + data = self._data(count=stride * self.nlanes) + data_stride = list(itertools.chain( + *zip(*[data[i::stride] for i in range(scale)])) + ) + data_stride = self.load(data_stride) # cast unsigned + loadn = npyv_loadn(data, stride) + assert loadn == data_stride + + @pytest.mark.parametrize("intrin, elsizes, scale, fill", [ + ("self.loadn_tillz, self.loadn_till", (32, 64), 1, [0xffff]), + ("self.loadn2_tillz, self.loadn2_till", (32, 64), 2, [0xffff, 0x7fff]), + ]) + def test_memory_noncont_partial_load(self, intrin, elsizes, scale, fill): + if self._scalar_size() not in elsizes: + return + npyv_loadn_tillz, npyv_loadn_till = eval(intrin) + lanes = list(range(1, self.nlanes + 1)) + lanes += [self.nlanes**2, self.nlanes**4] + for stride in range(-64, 64): + if stride < 0: + data = self._data(stride, -stride * self.nlanes) + data_stride = list(itertools.chain( + *zip(*[data[-i::stride] for i in range(scale, 0, -1)]) + )) + elif stride == 0: + data = self._data() + data_stride = data[0:scale] * (self.nlanes // scale) + else: + data = self._data(count=stride * self.nlanes) + data_stride = list(itertools.chain( + *zip(*[data[i::stride] for i in range(scale)]) + )) + data_stride = list(self.load(data_stride)) # cast unsigned + for n in lanes: + nscale = n * scale + llanes = self.nlanes - nscale + data_stride_till = ( + data_stride[:nscale] + fill * (llanes // scale) + ) + loadn_till = npyv_loadn_till(data, stride, n, *fill) + assert loadn_till == data_stride_till + data_stride_tillz = data_stride[:nscale] + [0] * llanes + loadn_tillz = npyv_loadn_tillz(data, stride, n) + assert loadn_tillz == data_stride_tillz + + @pytest.mark.parametrize("intrin, elsizes, scale", [ + ("self.storen", (32, 64), 1), + ("self.storen2", (32, 64), 2), + ]) + def test_memory_noncont_store(self, intrin, elsizes, scale): + if self._scalar_size() not in elsizes: + return + npyv_storen = eval(intrin) + data = self._data() + vdata = self.load(data) + hlanes = self.nlanes // scale + for stride in range(1, 64): + data_storen = [0xff] * stride * self.nlanes + for s in range(0, hlanes * stride, stride): + i = (s // stride) * scale + data_storen[s:s + scale] = data[i:i + scale] + storen = [0xff] * stride * self.nlanes + storen += [0x7f] * 64 + npyv_storen(storen, stride, vdata) + assert storen[:-64] == data_storen + assert storen[-64:] == [0x7f] * 64 # detect overflow + + for stride in range(-64, 0): + data_storen = [0xff] * -stride * self.nlanes + for s in range(0, hlanes * stride, stride): + i = (s // stride) * scale + data_storen[s - scale:s or None] = data[i:i + scale] + storen = [0x7f] * 64 + storen += [0xff] * -stride * self.nlanes + npyv_storen(storen, stride, vdata) + assert storen[64:] == data_storen + assert storen[:64] == [0x7f] * 64 # detect overflow + # stride 0 + data_storen = [0x7f] * self.nlanes + storen = data_storen.copy() + data_storen[0:scale] = data[-scale:] + npyv_storen(storen, 0, vdata) + assert storen == data_storen + + @pytest.mark.parametrize("intrin, elsizes, scale", [ + ("self.storen_till", (32, 64), 1), + ("self.storen2_till", (32, 64), 2), + ]) + def test_memory_noncont_partial_store(self, intrin, elsizes, scale): + if self._scalar_size() not in elsizes: + return + npyv_storen_till = eval(intrin) + data = self._data() + vdata = self.load(data) + lanes = list(range(1, self.nlanes + 1)) + lanes += [self.nlanes**2, self.nlanes**4] + hlanes = self.nlanes // scale + for stride in range(1, 64): + for n in lanes: + data_till = [0xff] * stride * self.nlanes + tdata = data[:n * scale] + [0xff] * (self.nlanes - n * scale) + for s in range(0, hlanes * stride, stride)[:n]: + i = (s // stride) * scale + data_till[s:s + scale] = tdata[i:i + scale] + storen_till = [0xff] * stride * self.nlanes + storen_till += [0x7f] * 64 + npyv_storen_till(storen_till, stride, n, vdata) + assert storen_till[:-64] == data_till + assert storen_till[-64:] == [0x7f] * 64 # detect overflow + + for stride in range(-64, 0): + for n in lanes: + data_till = [0xff] * -stride * self.nlanes + tdata = data[:n * scale] + [0xff] * (self.nlanes - n * scale) + for s in range(0, hlanes * stride, stride)[:n]: + i = (s // stride) * scale + data_till[s - scale:s or None] = tdata[i:i + scale] + storen_till = [0x7f] * 64 + storen_till += [0xff] * -stride * self.nlanes + npyv_storen_till(storen_till, stride, n, vdata) + assert storen_till[64:] == data_till + assert storen_till[:64] == [0x7f] * 64 # detect overflow + + # stride 0 + for n in lanes: + data_till = [0x7f] * self.nlanes + storen_till = data_till.copy() + data_till[0:scale] = data[:n * scale][-scale:] + npyv_storen_till(storen_till, 0, n, vdata) + assert storen_till == data_till + + @pytest.mark.parametrize("intrin, table_size, elsize", [ + ("self.lut32", 32, 32), + ("self.lut16", 16, 64) + ]) + def test_lut(self, intrin, table_size, elsize): + """ + Test lookup table intrinsics: + npyv_lut32_##sfx + npyv_lut16_##sfx + """ + if elsize != self._scalar_size(): + return + intrin = eval(intrin) + idx_itrin = getattr(self.npyv, f"setall_u{elsize}") + table = range(table_size) + for i in table: + broadi = self.setall(i) + idx = idx_itrin(i) + lut = intrin(table, idx) + assert lut == broadi + + def test_misc(self): + broadcast_zero = self.zero() + assert broadcast_zero == [0] * self.nlanes + for i in range(1, 10): + broadcasti = self.setall(i) + assert broadcasti == [i] * self.nlanes + + data_a, data_b = self._data(), self._data(reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + # py level of npyv_set_* don't support ignoring the extra specified lanes or + # fill non-specified lanes with zero. + vset = self.set(*data_a) + assert vset == data_a + # py level of npyv_setf_* don't support ignoring the extra specified lanes or + # fill non-specified lanes with the specified scalar. + vsetf = self.setf(10, *data_a) + assert vsetf == data_a + + # We're testing the sanity of _simd's type-vector, + # reinterpret* intrinsics itself are tested via compiler + # during the build of _simd module + sfxes = ["u8", "s8", "u16", "s16", "u32", "s32", "u64", "s64"] + if self.npyv.simd_f64: + sfxes.append("f64") + if self.npyv.simd_f32: + sfxes.append("f32") + for sfx in sfxes: + vec_name = getattr(self, "reinterpret_" + sfx)(vdata_a).__name__ + assert vec_name == "npyv_" + sfx + + # select & mask operations + select_a = self.select(self.cmpeq(self.zero(), self.zero()), vdata_a, vdata_b) + assert select_a == data_a + select_b = self.select(self.cmpneq(self.zero(), self.zero()), vdata_a, vdata_b) + assert select_b == data_b + + # test extract elements + assert self.extract0(vdata_b) == vdata_b[0] + + # cleanup intrinsic is only used with AVX for + # zeroing registers to avoid the AVX-SSE transition penalty, + # so nothing to test here + self.npyv.cleanup() + + def test_reorder(self): + data_a, data_b = self._data(), self._data(reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + # lower half part + data_a_lo = data_a[:self.nlanes // 2] + data_b_lo = data_b[:self.nlanes // 2] + # higher half part + data_a_hi = data_a[self.nlanes // 2:] + data_b_hi = data_b[self.nlanes // 2:] + # combine two lower parts + combinel = self.combinel(vdata_a, vdata_b) + assert combinel == data_a_lo + data_b_lo + # combine two higher parts + combineh = self.combineh(vdata_a, vdata_b) + assert combineh == data_a_hi + data_b_hi + # combine x2 + combine = self.combine(vdata_a, vdata_b) + assert combine == (data_a_lo + data_b_lo, data_a_hi + data_b_hi) + + # zip(interleave) + data_zipl = self.load([ + v for p in zip(data_a_lo, data_b_lo) for v in p + ]) + data_ziph = self.load([ + v for p in zip(data_a_hi, data_b_hi) for v in p + ]) + vzip = self.zip(vdata_a, vdata_b) + assert vzip == (data_zipl, data_ziph) + vzip = [0] * self.nlanes * 2 + self._x2("store")(vzip, (vdata_a, vdata_b)) + assert vzip == list(data_zipl) + list(data_ziph) + + # unzip(deinterleave) + unzip = self.unzip(data_zipl, data_ziph) + assert unzip == (data_a, data_b) + unzip = self._x2("load")(list(data_zipl) + list(data_ziph)) + assert unzip == (data_a, data_b) + + def test_reorder_rev64(self): + # Reverse elements of each 64-bit lane + ssize = self._scalar_size() + if ssize == 64: + return + data_rev64 = [ + y for x in range(0, self.nlanes, 64 // ssize) + for y in reversed(range(x, x + 64 // ssize)) + ] + rev64 = self.rev64(self.load(range(self.nlanes))) + assert rev64 == data_rev64 + + def test_reorder_permi128(self): + """ + Test permuting elements for each 128-bit lane. + npyv_permi128_##sfx + """ + ssize = self._scalar_size() + if ssize < 32: + return + data = self.load(self._data()) + permn = 128 // ssize + permd = permn - 1 + nlane128 = self.nlanes // permn + shfl = [0, 1] if ssize == 64 else [0, 2, 4, 6] + for i in range(permn): + indices = [(i >> shf) & permd for shf in shfl] + vperm = self.permi128(data, *indices) + data_vperm = [ + data[j + (e & -permn)] + for e, j in enumerate(indices * nlane128) + ] + assert vperm == data_vperm + + @pytest.mark.parametrize('func, intrin', [ + (operator.lt, "cmplt"), + (operator.le, "cmple"), + (operator.gt, "cmpgt"), + (operator.ge, "cmpge"), + (operator.eq, "cmpeq") + ]) + def test_operators_comparison(self, func, intrin): + if self._is_fp(): + data_a = self._data() + else: + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + intrin = getattr(self, intrin) + + mask_true = self._true_mask() + + def to_bool(vector): + return [lane == mask_true for lane in vector] + + data_cmp = [func(a, b) for a, b in zip(data_a, data_b)] + cmp = to_bool(intrin(vdata_a, vdata_b)) + assert cmp == data_cmp + + def test_operators_logical(self): + if self._is_fp(): + data_a = self._data() + else: + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + if self._is_fp(): + data_cast_a = self._to_unsigned(vdata_a) + data_cast_b = self._to_unsigned(vdata_b) + cast, cast_data = self._to_unsigned, self._to_unsigned + else: + data_cast_a, data_cast_b = data_a, data_b + cast, cast_data = lambda a: a, self.load + + data_xor = cast_data([a ^ b for a, b in zip(data_cast_a, data_cast_b)]) + vxor = cast(self.xor(vdata_a, vdata_b)) + assert vxor == data_xor + + data_or = cast_data([a | b for a, b in zip(data_cast_a, data_cast_b)]) + vor = cast(getattr(self, "or")(vdata_a, vdata_b)) + assert vor == data_or + + data_and = cast_data([a & b for a, b in zip(data_cast_a, data_cast_b)]) + vand = cast(getattr(self, "and")(vdata_a, vdata_b)) + assert vand == data_and + + data_not = cast_data([~a for a in data_cast_a]) + vnot = cast(getattr(self, "not")(vdata_a)) + assert vnot == data_not + + if self.sfx not in ("u8"): + return + data_andc = [a & ~b for a, b in zip(data_cast_a, data_cast_b)] + vandc = cast(self.andc(vdata_a, vdata_b)) + assert vandc == data_andc + + @pytest.mark.parametrize("intrin", ["any", "all"]) + @pytest.mark.parametrize("data", ( + [1, 2, 3, 4], + [-1, -2, -3, -4], + [0, 1, 2, 3, 4], + [0x7f, 0x7fff, 0x7fffffff, 0x7fffffffffffffff], + [0, -1, -2, -3, 4], + [0], + [1], + [-1] + )) + def test_operators_crosstest(self, intrin, data): + """ + Test intrinsics: + npyv_any_##SFX + npyv_all_##SFX + """ + data_a = self.load(data * self.nlanes) + func = eval(intrin) + intrin = getattr(self, intrin) + desired = func(data_a) + simd = intrin(data_a) + assert not not simd == desired + + def test_conversion_boolean(self): + bsfx = "b" + self.sfx[1:] + to_boolean = getattr(self.npyv, f"cvt_{bsfx}_{self.sfx}") + from_boolean = getattr(self.npyv, f"cvt_{self.sfx}_{bsfx}") + + false_vb = to_boolean(self.setall(0)) + true_vb = self.cmpeq(self.setall(0), self.setall(0)) + assert false_vb != true_vb + + false_vsfx = from_boolean(false_vb) + true_vsfx = from_boolean(true_vb) + assert false_vsfx != true_vsfx + + def test_conversion_expand(self): + """ + Test expand intrinsics: + npyv_expand_u16_u8 + npyv_expand_u32_u16 + """ + if self.sfx not in ("u8", "u16"): + return + totype = self.sfx[0] + str(int(self.sfx[1:]) * 2) + expand = getattr(self.npyv, f"expand_{totype}_{self.sfx}") + # close enough from the edge to detect any deviation + data = self._data(self._int_max() - self.nlanes) + vdata = self.load(data) + edata = expand(vdata) + # lower half part + data_lo = data[:self.nlanes // 2] + # higher half part + data_hi = data[self.nlanes // 2:] + assert edata == (data_lo, data_hi) + + def test_arithmetic_subadd(self): + if self._is_fp(): + data_a = self._data() + else: + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + # non-saturated + data_add = self.load([a + b for a, b in zip(data_a, data_b)]) # load to cast + add = self.add(vdata_a, vdata_b) + assert add == data_add + data_sub = self.load([a - b for a, b in zip(data_a, data_b)]) + sub = self.sub(vdata_a, vdata_b) + assert sub == data_sub + + def test_arithmetic_mul(self): + if self.sfx in ("u64", "s64"): + return + + if self._is_fp(): + data_a = self._data() + else: + data_a = self._data(self._int_max() - self.nlanes) + data_b = self._data(self._int_min(), reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + data_mul = self.load([a * b for a, b in zip(data_a, data_b)]) + mul = self.mul(vdata_a, vdata_b) + assert mul == data_mul + + def test_arithmetic_div(self): + if not self._is_fp(): + return + + data_a, data_b = self._data(), self._data(reverse=True) + vdata_a, vdata_b = self.load(data_a), self.load(data_b) + + # load to truncate f64 to precision of f32 + data_div = self.load([a / b for a, b in zip(data_a, data_b)]) + div = self.div(vdata_a, vdata_b) + assert div == data_div + + def test_arithmetic_intdiv(self): + """ + Test integer division intrinsics: + npyv_divisor_##sfx + npyv_divc_##sfx + """ + if self._is_fp(): + return + + int_min = self._int_min() + + def trunc_div(a, d): + """ + Divide towards zero works with large integers > 2^53, + and wrap around overflow similar to what C does. + """ + if d == -1 and a == int_min: + return a + sign_a, sign_d = a < 0, d < 0 + if a == 0 or sign_a == sign_d: + return a // d + return (a + sign_d - sign_a) // d + 1 + + data = [1, -int_min] # to test overflow + data += range(0, 2**8, 2**5) + data += range(0, 2**8, 2**5 - 1) + bsize = self._scalar_size() + if bsize > 8: + data += range(2**8, 2**16, 2**13) + data += range(2**8, 2**16, 2**13 - 1) + if bsize > 16: + data += range(2**16, 2**32, 2**29) + data += range(2**16, 2**32, 2**29 - 1) + if bsize > 32: + data += range(2**32, 2**64, 2**61) + data += range(2**32, 2**64, 2**61 - 1) + # negate + data += [-x for x in data] + for dividend, divisor in itertools.product(data, data): + divisor = self.setall(divisor)[0] # cast + if divisor == 0: + continue + dividend = self.load(self._data(dividend)) + data_divc = [trunc_div(a, divisor) for a in dividend] + divisor_parms = self.divisor(divisor) + divc = self.divc(dividend, divisor_parms) + assert divc == data_divc + + def test_arithmetic_reduce_sum(self): + """ + Test reduce sum intrinsics: + npyv_sum_##sfx + """ + if self.sfx not in ("u32", "u64", "f32", "f64"): + return + # reduce sum + data = self._data() + vdata = self.load(data) + + data_sum = sum(data) + vsum = self.sum(vdata) + assert vsum == data_sum + + def test_arithmetic_reduce_sumup(self): + """ + Test extend reduce sum intrinsics: + npyv_sumup_##sfx + """ + if self.sfx not in ("u8", "u16"): + return + rdata = (0, self.nlanes, self._int_min(), self._int_max() - self.nlanes) + for r in rdata: + data = self._data(r) + vdata = self.load(data) + data_sum = sum(data) + vsum = self.sumup(vdata) + assert vsum == data_sum + + def test_mask_conditional(self): + """ + Conditional addition and subtraction for all supported data types. + Test intrinsics: + npyv_ifadd_##SFX, npyv_ifsub_##SFX + """ + vdata_a = self.load(self._data()) + vdata_b = self.load(self._data(reverse=True)) + true_mask = self.cmpeq(self.zero(), self.zero()) + false_mask = self.cmpneq(self.zero(), self.zero()) + + data_sub = self.sub(vdata_b, vdata_a) + ifsub = self.ifsub(true_mask, vdata_b, vdata_a, vdata_b) + assert ifsub == data_sub + ifsub = self.ifsub(false_mask, vdata_a, vdata_b, vdata_b) + assert ifsub == vdata_b + + data_add = self.add(vdata_b, vdata_a) + ifadd = self.ifadd(true_mask, vdata_b, vdata_a, vdata_b) + assert ifadd == data_add + ifadd = self.ifadd(false_mask, vdata_a, vdata_b, vdata_b) + assert ifadd == vdata_b + + if not self._is_fp(): + return + data_div = self.div(vdata_b, vdata_a) + ifdiv = self.ifdiv(true_mask, vdata_b, vdata_a, vdata_b) + assert ifdiv == data_div + ifdivz = self.ifdivz(true_mask, vdata_b, vdata_a) + assert ifdivz == data_div + ifdiv = self.ifdiv(false_mask, vdata_a, vdata_b, vdata_b) + assert ifdiv == vdata_b + ifdivz = self.ifdivz(false_mask, vdata_a, vdata_b) + assert ifdivz == self.zero() + + +bool_sfx = ("b8", "b16", "b32", "b64") +int_sfx = ("u8", "s8", "u16", "s16", "u32", "s32", "u64", "s64") +fp_sfx = ("f32", "f64") +all_sfx = int_sfx + fp_sfx +tests_registry = { + bool_sfx: _SIMD_BOOL, + int_sfx: _SIMD_INT, + fp_sfx: _SIMD_FP, + ("f32",): _SIMD_FP32, + ("f64",): _SIMD_FP64, + all_sfx: _SIMD_ALL +} +for target_name, npyv in targets.items(): + simd_width = npyv.simd if npyv else '' + pretty_name = target_name.split('__') # multi-target separator + if len(pretty_name) > 1: + # multi-target + pretty_name = f"({' '.join(pretty_name)})" + else: + pretty_name = pretty_name[0] + + skip = "" + skip_sfx = {} + if not npyv: + skip = f"target '{pretty_name}' isn't supported by current machine" + elif not npyv.simd: + skip = f"target '{pretty_name}' isn't supported by NPYV" + else: + if not npyv.simd_f32: + skip_sfx["f32"] = f"target '{pretty_name}' "\ + "doesn't support single-precision" + if not npyv.simd_f64: + skip_sfx["f64"] = f"target '{pretty_name}' doesn't"\ + "support double-precision" + + for sfxes, cls in tests_registry.items(): + for sfx in sfxes: + skip_m = skip_sfx.get(sfx, skip) + inhr = (cls,) + attr = {"npyv": targets[target_name], "sfx": sfx, "target_name": target_name} + tcls = type(f"Test{cls.__name__}_{simd_width}_{target_name}_{sfx}", inhr, attr) + if skip_m: + pytest.mark.skip(reason=skip_m)(tcls) + globals()[tcls.__name__] = tcls diff --git a/python/numpy/_core/tests/test_simd_module.py b/python/numpy/_core/tests/test_simd_module.py new file mode 100644 index 000000000..dca83fd42 --- /dev/null +++ b/python/numpy/_core/tests/test_simd_module.py @@ -0,0 +1,103 @@ +import pytest + +from numpy._core._simd import targets + +""" +This testing unit only for checking the sanity of common functionality, +therefore all we need is just to take one submodule that represents any +of enabled SIMD extensions to run the test on it and the second submodule +required to run only one check related to the possibility of mixing +the data types among each submodule. +""" +npyvs = [npyv_mod for npyv_mod in targets.values() if npyv_mod and npyv_mod.simd] +npyv, npyv2 = (npyvs + [None, None])[:2] + +unsigned_sfx = ["u8", "u16", "u32", "u64"] +signed_sfx = ["s8", "s16", "s32", "s64"] +fp_sfx = [] +if npyv and npyv.simd_f32: + fp_sfx.append("f32") +if npyv and npyv.simd_f64: + fp_sfx.append("f64") + +int_sfx = unsigned_sfx + signed_sfx +all_sfx = unsigned_sfx + int_sfx + +@pytest.mark.skipif(not npyv, reason="could not find any SIMD extension with NPYV support") +class Test_SIMD_MODULE: + + @pytest.mark.parametrize('sfx', all_sfx) + def test_num_lanes(self, sfx): + nlanes = getattr(npyv, "nlanes_" + sfx) + vector = getattr(npyv, "setall_" + sfx)(1) + assert len(vector) == nlanes + + @pytest.mark.parametrize('sfx', all_sfx) + def test_type_name(self, sfx): + vector = getattr(npyv, "setall_" + sfx)(1) + assert vector.__name__ == "npyv_" + sfx + + def test_raises(self): + a, b = [npyv.setall_u32(1)] * 2 + for sfx in all_sfx: + vcb = lambda intrin: getattr(npyv, f"{intrin}_{sfx}") + pytest.raises(TypeError, vcb("add"), a) + pytest.raises(TypeError, vcb("add"), a, b, a) + pytest.raises(TypeError, vcb("setall")) + pytest.raises(TypeError, vcb("setall"), [1]) + pytest.raises(TypeError, vcb("load"), 1) + pytest.raises(ValueError, vcb("load"), [1]) + pytest.raises(ValueError, vcb("store"), [1], getattr(npyv, f"reinterpret_{sfx}_u32")(a)) + + @pytest.mark.skipif(not npyv2, reason=( + "could not find a second SIMD extension with NPYV support" + )) + def test_nomix(self): + # mix among submodules isn't allowed + a = npyv.setall_u32(1) + a2 = npyv2.setall_u32(1) + pytest.raises(TypeError, npyv.add_u32, a2, a2) + pytest.raises(TypeError, npyv2.add_u32, a, a) + + @pytest.mark.parametrize('sfx', unsigned_sfx) + def test_unsigned_overflow(self, sfx): + nlanes = getattr(npyv, "nlanes_" + sfx) + maxu = (1 << int(sfx[1:])) - 1 + maxu_72 = (1 << 72) - 1 + lane = getattr(npyv, "setall_" + sfx)(maxu_72)[0] + assert lane == maxu + lanes = getattr(npyv, "load_" + sfx)([maxu_72] * nlanes) + assert lanes == [maxu] * nlanes + lane = getattr(npyv, "setall_" + sfx)(-1)[0] + assert lane == maxu + lanes = getattr(npyv, "load_" + sfx)([-1] * nlanes) + assert lanes == [maxu] * nlanes + + @pytest.mark.parametrize('sfx', signed_sfx) + def test_signed_overflow(self, sfx): + nlanes = getattr(npyv, "nlanes_" + sfx) + maxs_72 = (1 << 71) - 1 + lane = getattr(npyv, "setall_" + sfx)(maxs_72)[0] + assert lane == -1 + lanes = getattr(npyv, "load_" + sfx)([maxs_72] * nlanes) + assert lanes == [-1] * nlanes + mins_72 = -1 << 71 + lane = getattr(npyv, "setall_" + sfx)(mins_72)[0] + assert lane == 0 + lanes = getattr(npyv, "load_" + sfx)([mins_72] * nlanes) + assert lanes == [0] * nlanes + + def test_truncate_f32(self): + if not npyv.simd_f32: + pytest.skip("F32 isn't support by the SIMD extension") + f32 = npyv.setall_f32(0.1)[0] + assert f32 != 0.1 + assert round(f32, 1) == 0.1 + + def test_compare(self): + data_range = range(npyv.nlanes_u32) + vdata = npyv.load_u32(data_range) + assert vdata == list(data_range) + assert vdata == tuple(data_range) + for i in data_range: + assert vdata[i] == data_range[i] diff --git a/python/numpy/_core/tests/test_stringdtype.py b/python/numpy/_core/tests/test_stringdtype.py new file mode 100644 index 000000000..139f86c3f --- /dev/null +++ b/python/numpy/_core/tests/test_stringdtype.py @@ -0,0 +1,1814 @@ +import copy +import itertools +import os +import pickle +import sys +import tempfile + +import pytest + +import numpy as np +from numpy._core.tests._natype import get_stringdtype_dtype as get_dtype +from numpy._core.tests._natype import pd_NA +from numpy.dtypes import StringDType +from numpy.testing import IS_PYPY, assert_array_equal + + +@pytest.fixture +def string_list(): + return ["abc", "def", "ghi" * 10, "A¢☃€ 😊" * 100, "Abc" * 1000, "DEF"] + + +# second copy for cast tests to do a cartesian product over dtypes +@pytest.fixture(params=[True, False]) +def coerce2(request): + return request.param + + +@pytest.fixture( + params=["unset", None, pd_NA, np.nan, float("nan"), "__nan__"], + ids=["unset", "None", "pandas.NA", "np.nan", "float('nan')", "string nan"], +) +def na_object2(request): + return request.param + + +@pytest.fixture() +def dtype2(na_object2, coerce2): + # explicit is check for pd_NA because != with pd_NA returns pd_NA + if na_object2 is pd_NA or na_object2 != "unset": + return StringDType(na_object=na_object2, coerce=coerce2) + else: + return StringDType(coerce=coerce2) + + +def test_dtype_creation(): + hashes = set() + dt = StringDType() + assert not hasattr(dt, "na_object") and dt.coerce is True + hashes.add(hash(dt)) + + dt = StringDType(na_object=None) + assert dt.na_object is None and dt.coerce is True + hashes.add(hash(dt)) + + dt = StringDType(coerce=False) + assert not hasattr(dt, "na_object") and dt.coerce is False + hashes.add(hash(dt)) + + dt = StringDType(na_object=None, coerce=False) + assert dt.na_object is None and dt.coerce is False + hashes.add(hash(dt)) + + assert len(hashes) == 4 + + dt = np.dtype("T") + assert dt == StringDType() + assert dt.kind == "T" + assert dt.char == "T" + + hashes.add(hash(dt)) + assert len(hashes) == 4 + + +def test_dtype_equality(dtype): + assert dtype == dtype + for ch in "SU": + assert dtype != np.dtype(ch) + assert dtype != np.dtype(f"{ch}8") + + +def test_dtype_repr(dtype): + if not hasattr(dtype, "na_object") and dtype.coerce: + assert repr(dtype) == "StringDType()" + elif dtype.coerce: + assert repr(dtype) == f"StringDType(na_object={dtype.na_object!r})" + elif not hasattr(dtype, "na_object"): + assert repr(dtype) == "StringDType(coerce=False)" + else: + assert ( + repr(dtype) + == f"StringDType(na_object={dtype.na_object!r}, coerce=False)" + ) + + +def test_create_with_na(dtype): + if not hasattr(dtype, "na_object"): + pytest.skip("does not have an na object") + na_val = dtype.na_object + string_list = ["hello", na_val, "world"] + arr = np.array(string_list, dtype=dtype) + assert str(arr) == "[" + " ".join([repr(s) for s in string_list]) + "]" + assert arr[1] is dtype.na_object + + +@pytest.mark.parametrize("i", list(range(5))) +def test_set_replace_na(i): + # Test strings of various lengths can be set to NaN and then replaced. + s_empty = "" + s_short = "0123456789" + s_medium = "abcdefghijklmnopqrstuvwxyz" + s_long = "-=+" * 100 + strings = [s_medium, s_empty, s_short, s_medium, s_long] + a = np.array(strings, StringDType(na_object=np.nan)) + for s in [a[i], s_medium + s_short, s_short, s_empty, s_long]: + a[i] = np.nan + assert np.isnan(a[i]) + a[i] = s + assert a[i] == s + assert_array_equal(a, strings[:i] + [s] + strings[i + 1:]) + + +def test_null_roundtripping(): + data = ["hello\0world", "ABC\0DEF\0\0"] + arr = np.array(data, dtype="T") + assert data[0] == arr[0] + assert data[1] == arr[1] + + +def test_string_too_large_error(): + arr = np.array(["a", "b", "c"], dtype=StringDType()) + with pytest.raises(OverflowError): + arr * (sys.maxsize + 1) + + +@pytest.mark.parametrize( + "data", + [ + ["abc", "def", "ghi"], + ["🤣", "📵", "😰"], + ["🚜", "🙃", "😾"], + ["😹", "🚠", "🚌"], + ], +) +def test_array_creation_utf8(dtype, data): + arr = np.array(data, dtype=dtype) + assert str(arr) == "[" + " ".join(["'" + str(d) + "'" for d in data]) + "]" + assert arr.dtype == dtype + + +@pytest.mark.parametrize( + "data", + [ + [1, 2, 3], + [b"abc", b"def", b"ghi"], + [object, object, object], + ], +) +def test_scalars_string_conversion(data, dtype): + try: + str_vals = [str(d.decode('utf-8')) for d in data] + except AttributeError: + str_vals = [str(d) for d in data] + if dtype.coerce: + assert_array_equal( + np.array(data, dtype=dtype), + np.array(str_vals, dtype=dtype), + ) + else: + with pytest.raises(ValueError): + np.array(data, dtype=dtype) + + +@pytest.mark.parametrize( + ("strings"), + [ + ["this", "is", "an", "array"], + ["€", "", "😊"], + ["A¢☃€ 😊", " A☃€¢😊", "☃€😊 A¢", "😊☃A¢ €"], + ], +) +def test_self_casts(dtype, dtype2, strings): + if hasattr(dtype, "na_object"): + strings = strings + [dtype.na_object] + elif hasattr(dtype2, "na_object"): + strings = strings + [""] + arr = np.array(strings, dtype=dtype) + newarr = arr.astype(dtype2) + + if hasattr(dtype, "na_object") and not hasattr(dtype2, "na_object"): + assert newarr[-1] == str(dtype.na_object) + with pytest.raises(TypeError): + arr.astype(dtype2, casting="safe") + elif hasattr(dtype, "na_object") and hasattr(dtype2, "na_object"): + assert newarr[-1] is dtype2.na_object + arr.astype(dtype2, casting="safe") + elif hasattr(dtype2, "na_object"): + assert newarr[-1] == "" + arr.astype(dtype2, casting="safe") + else: + arr.astype(dtype2, casting="safe") + + if hasattr(dtype, "na_object") and hasattr(dtype2, "na_object"): + na1 = dtype.na_object + na2 = dtype2.na_object + if (na1 is not na2 and + # check for pd_NA first because bool(pd_NA) is an error + ((na1 is pd_NA or na2 is pd_NA) or + # the second check is a NaN check, spelled this way + # to avoid errors from math.isnan and np.isnan + (na1 != na2 and not (na1 != na1 and na2 != na2)))): + with pytest.raises(TypeError): + arr[:-1] == newarr[:-1] + return + assert_array_equal(arr[:-1], newarr[:-1]) + + +@pytest.mark.parametrize( + ("strings"), + [ + ["this", "is", "an", "array"], + ["€", "", "😊"], + ["A¢☃€ 😊", " A☃€¢😊", "☃€😊 A¢", "😊☃A¢ €"], + ], +) +class TestStringLikeCasts: + def test_unicode_casts(self, dtype, strings): + arr = np.array(strings, dtype=np.str_).astype(dtype) + expected = np.array(strings, dtype=dtype) + assert_array_equal(arr, expected) + + arr_as_U8 = expected.astype("U8") + assert_array_equal(arr_as_U8, np.array(strings, dtype="U8")) + assert_array_equal(arr_as_U8.astype(dtype), arr) + arr_as_U3 = expected.astype("U3") + assert_array_equal(arr_as_U3, np.array(strings, dtype="U3")) + assert_array_equal( + arr_as_U3.astype(dtype), + np.array([s[:3] for s in strings], dtype=dtype), + ) + + def test_void_casts(self, dtype, strings): + sarr = np.array(strings, dtype=dtype) + utf8_bytes = [s.encode("utf-8") for s in strings] + void_dtype = f"V{max(len(s) for s in utf8_bytes)}" + varr = np.array(utf8_bytes, dtype=void_dtype) + assert_array_equal(varr, sarr.astype(void_dtype)) + assert_array_equal(varr.astype(dtype), sarr) + + def test_bytes_casts(self, dtype, strings): + sarr = np.array(strings, dtype=dtype) + try: + utf8_bytes = [s.encode("ascii") for s in strings] + bytes_dtype = f"S{max(len(s) for s in utf8_bytes)}" + barr = np.array(utf8_bytes, dtype=bytes_dtype) + assert_array_equal(barr, sarr.astype(bytes_dtype)) + assert_array_equal(barr.astype(dtype), sarr) + if dtype.coerce: + barr = np.array(utf8_bytes, dtype=dtype) + assert_array_equal(barr, sarr) + barr = np.array(utf8_bytes, dtype="O") + assert_array_equal(barr.astype(dtype), sarr) + else: + with pytest.raises(ValueError): + np.array(utf8_bytes, dtype=dtype) + except UnicodeEncodeError: + with pytest.raises(UnicodeEncodeError): + sarr.astype("S20") + + +def test_additional_unicode_cast(random_string_list, dtype): + arr = np.array(random_string_list, dtype=dtype) + # test that this short-circuits correctly + assert_array_equal(arr, arr.astype(arr.dtype)) + # tests the casts via the comparison promoter + assert_array_equal(arr, arr.astype(random_string_list.dtype)) + + +def test_insert_scalar(dtype, string_list): + """Test that inserting a scalar works.""" + arr = np.array(string_list, dtype=dtype) + scalar_instance = "what" + arr[1] = scalar_instance + assert_array_equal( + arr, + np.array(string_list[:1] + ["what"] + string_list[2:], dtype=dtype), + ) + + +comparison_operators = [ + np.equal, + np.not_equal, + np.greater, + np.greater_equal, + np.less, + np.less_equal, +] + + +@pytest.mark.parametrize("op", comparison_operators) +@pytest.mark.parametrize("o_dtype", [np.str_, object, StringDType()]) +def test_comparisons(string_list, dtype, op, o_dtype): + sarr = np.array(string_list, dtype=dtype) + oarr = np.array(string_list, dtype=o_dtype) + + # test that comparison operators work + res = op(sarr, sarr) + ores = op(oarr, oarr) + # test that promotion works as well + orres = op(sarr, oarr) + olres = op(oarr, sarr) + + assert_array_equal(res, ores) + assert_array_equal(res, orres) + assert_array_equal(res, olres) + + # test we get the correct answer for unequal length strings + sarr2 = np.array([s + "2" for s in string_list], dtype=dtype) + oarr2 = np.array([s + "2" for s in string_list], dtype=o_dtype) + + res = op(sarr, sarr2) + ores = op(oarr, oarr2) + olres = op(oarr, sarr2) + orres = op(sarr, oarr2) + + assert_array_equal(res, ores) + assert_array_equal(res, olres) + assert_array_equal(res, orres) + + res = op(sarr2, sarr) + ores = op(oarr2, oarr) + olres = op(oarr2, sarr) + orres = op(sarr2, oarr) + + assert_array_equal(res, ores) + assert_array_equal(res, olres) + assert_array_equal(res, orres) + + +def test_isnan(dtype, string_list): + if not hasattr(dtype, "na_object"): + pytest.skip("no na support") + sarr = np.array(string_list + [dtype.na_object], dtype=dtype) + is_nan = isinstance(dtype.na_object, float) and np.isnan(dtype.na_object) + bool_errors = 0 + try: + bool(dtype.na_object) + except TypeError: + bool_errors = 1 + if is_nan or bool_errors: + # isnan is only true when na_object is a NaN + assert_array_equal( + np.isnan(sarr), + np.array([0] * len(string_list) + [1], dtype=np.bool), + ) + else: + assert not np.any(np.isnan(sarr)) + + +def test_pickle(dtype, string_list): + arr = np.array(string_list, dtype=dtype) + + with tempfile.NamedTemporaryFile("wb", delete=False) as f: + pickle.dump([arr, dtype], f) + + with open(f.name, "rb") as f: + res = pickle.load(f) + + assert_array_equal(res[0], arr) + assert res[1] == dtype + + os.remove(f.name) + + +def test_stdlib_copy(dtype, string_list): + arr = np.array(string_list, dtype=dtype) + + assert_array_equal(copy.copy(arr), arr) + assert_array_equal(copy.deepcopy(arr), arr) + + +@pytest.mark.parametrize( + "strings", + [ + ["left", "right", "leftovers", "righty", "up", "down"], + [ + "left" * 10, + "right" * 10, + "leftovers" * 10, + "righty" * 10, + "up" * 10, + ], + ["🤣🤣", "🤣", "📵", "😰"], + ["🚜", "🙃", "😾"], + ["😹", "🚠", "🚌"], + ["A¢☃€ 😊", " A☃€¢😊", "☃€😊 A¢", "😊☃A¢ €"], + ], +) +def test_sort(dtype, strings): + """Test that sorting matches python's internal sorting.""" + + def test_sort(strings, arr_sorted): + arr = np.array(strings, dtype=dtype) + na_object = getattr(arr.dtype, "na_object", "") + if na_object is None and None in strings: + with pytest.raises( + ValueError, + match="Cannot compare null that is not a nan-like value", + ): + np.argsort(arr) + argsorted = None + elif na_object is pd_NA or na_object != '': + argsorted = None + else: + argsorted = np.argsort(arr) + np.random.default_rng().shuffle(arr) + if na_object is None and None in strings: + with pytest.raises( + ValueError, + match="Cannot compare null that is not a nan-like value", + ): + arr.sort() + else: + arr.sort() + assert np.array_equal(arr, arr_sorted, equal_nan=True) + if argsorted is not None: + assert np.array_equal(argsorted, np.argsort(strings)) + + # make a copy so we don't mutate the lists in the fixture + strings = strings.copy() + arr_sorted = np.array(sorted(strings), dtype=dtype) + test_sort(strings, arr_sorted) + + if not hasattr(dtype, "na_object"): + return + + # make sure NAs get sorted to the end of the array and string NAs get + # sorted like normal strings + strings.insert(0, dtype.na_object) + strings.insert(2, dtype.na_object) + # can't use append because doing that with NA converts + # the result to object dtype + if not isinstance(dtype.na_object, str): + arr_sorted = np.array( + arr_sorted.tolist() + [dtype.na_object, dtype.na_object], + dtype=dtype, + ) + else: + arr_sorted = np.array(sorted(strings), dtype=dtype) + + test_sort(strings, arr_sorted) + + +@pytest.mark.parametrize( + "strings", + [ + ["A¢☃€ 😊", " A☃€¢😊", "☃€😊 A¢", "😊☃A¢ €"], + ["A¢☃€ 😊", "", " ", " "], + ["", "a", "😸", "ááðfáíóåéë"], + ], +) +def test_nonzero(strings, na_object): + dtype = get_dtype(na_object) + arr = np.array(strings, dtype=dtype) + is_nonzero = np.array( + [i for i, item in enumerate(strings) if len(item) != 0]) + assert_array_equal(arr.nonzero()[0], is_nonzero) + + if na_object is not pd_NA and na_object == 'unset': + return + + strings_with_na = np.array(strings + [na_object], dtype=dtype) + is_nan = np.isnan(np.array([dtype.na_object], dtype=dtype))[0] + + if is_nan: + assert strings_with_na.nonzero()[0][-1] == 4 + else: + assert strings_with_na.nonzero()[0][-1] == 3 + + # check that the casting to bool and nonzero give consistent results + assert_array_equal(strings_with_na[strings_with_na.nonzero()], + strings_with_na[strings_with_na.astype(bool)]) + + +def test_where(string_list, na_object): + dtype = get_dtype(na_object) + a = np.array(string_list, dtype=dtype) + b = a[::-1] + res = np.where([True, False, True, False, True, False], a, b) + assert_array_equal(res, [a[0], b[1], a[2], b[3], a[4], b[5]]) + + +def test_fancy_indexing(string_list): + sarr = np.array(string_list, dtype="T") + assert_array_equal(sarr, sarr[np.arange(sarr.shape[0])]) + + inds = [ + [True, True], + [0, 1], + ..., + np.array([0, 1], dtype='uint8'), + ] + + lops = [ + ['a' * 25, 'b' * 25], + ['', ''], + ['hello', 'world'], + ['hello', 'world' * 25], + ] + + # see gh-27003 and gh-27053 + for ind in inds: + for lop in lops: + a = np.array(lop, dtype="T") + assert_array_equal(a[ind], a) + rop = ['d' * 25, 'e' * 25] + for b in [rop, np.array(rop, dtype="T")]: + a[ind] = b + assert_array_equal(a, b) + assert a[0] == 'd' * 25 + + # see gh-29279 + data = [ + ["AAAAAAAAAAAAAAAAA"], + ["BBBBBBBBBBBBBBBBBBBBBBBBBBBBB"], + ["CCCCCCCCCCCCCCCCC"], + ["DDDDDDDDDDDDDDDDD"], + ] + sarr = np.array(data, dtype=np.dtypes.StringDType()) + uarr = np.array(data, dtype="U30") + for ind in [[0], [1], [2], [3], [[0, 0]], [[1, 1, 3]], [[1, 1]]]: + assert_array_equal(sarr[ind], uarr[ind]) + + +def test_flatiter_indexing(): + # see gh-29659 + arr = np.array(['hello', 'world'], dtype='T') + arr.flat[:] = 9223372036854775 + assert_array_equal(arr, np.array([9223372036854775] * 2, dtype='T')) + + +def test_creation_functions(): + assert_array_equal(np.zeros(3, dtype="T"), ["", "", ""]) + assert_array_equal(np.empty(3, dtype="T"), ["", "", ""]) + + assert np.zeros(3, dtype="T")[0] == "" + assert np.empty(3, dtype="T")[0] == "" + + +def test_concatenate(string_list): + sarr = np.array(string_list, dtype="T") + sarr_cat = np.array(string_list + string_list, dtype="T") + + assert_array_equal(np.concatenate([sarr], axis=0), sarr) + + +def test_resize_method(string_list): + sarr = np.array(string_list, dtype="T") + if IS_PYPY: + sarr.resize(len(string_list) + 3, refcheck=False) + else: + sarr.resize(len(string_list) + 3) + assert_array_equal(sarr, np.array(string_list + [''] * 3, dtype="T")) + + +def test_create_with_copy_none(string_list): + arr = np.array(string_list, dtype=StringDType()) + # create another stringdtype array with an arena that has a different + # in-memory layout than the first array + arr_rev = np.array(string_list[::-1], dtype=StringDType()) + + # this should create a copy and the resulting array + # shouldn't share an allocator or arena with arr_rev, despite + # explicitly passing arr_rev.dtype + arr_copy = np.array(arr, copy=None, dtype=arr_rev.dtype) + np.testing.assert_array_equal(arr, arr_copy) + assert arr_copy.base is None + + with pytest.raises(ValueError, match="Unable to avoid copy"): + np.array(arr, copy=False, dtype=arr_rev.dtype) + + # because we're using arr's dtype instance, the view is safe + arr_view = np.array(arr, copy=None, dtype=arr.dtype) + np.testing.assert_array_equal(arr, arr) + np.testing.assert_array_equal(arr_view[::-1], arr_rev) + assert arr_view is arr + + +def test_astype_copy_false(): + orig_dt = StringDType() + arr = np.array(["hello", "world"], dtype=StringDType()) + assert not arr.astype(StringDType(coerce=False), copy=False).dtype.coerce + + assert arr.astype(orig_dt, copy=False).dtype is orig_dt + +@pytest.mark.parametrize( + "strings", + [ + ["left", "right", "leftovers", "righty", "up", "down"], + ["🤣🤣", "🤣", "📵", "😰"], + ["🚜", "🙃", "😾"], + ["😹", "🚠", "🚌"], + ["A¢☃€ 😊", " A☃€¢😊", "☃€😊 A¢", "😊☃A¢ €"], + ], +) +def test_argmax(strings): + """Test that argmax/argmin matches what python calculates.""" + arr = np.array(strings, dtype="T") + assert np.argmax(arr) == strings.index(max(strings)) + assert np.argmin(arr) == strings.index(min(strings)) + + +@pytest.mark.parametrize( + "arrfunc,expected", + [ + [np.sort, None], + [np.nonzero, (np.array([], dtype=np.int_),)], + [np.argmax, 0], + [np.argmin, 0], + ], +) +def test_arrfuncs_zeros(arrfunc, expected): + arr = np.zeros(10, dtype="T") + result = arrfunc(arr) + if expected is None: + expected = arr + assert_array_equal(result, expected, strict=True) + + +@pytest.mark.parametrize( + ("strings", "cast_answer", "any_answer", "all_answer"), + [ + [["hello", "world"], [True, True], True, True], + [["", ""], [False, False], False, False], + [["hello", ""], [True, False], True, False], + [["", "world"], [False, True], True, False], + ], +) +def test_cast_to_bool(strings, cast_answer, any_answer, all_answer): + sarr = np.array(strings, dtype="T") + assert_array_equal(sarr.astype("bool"), cast_answer) + + assert np.any(sarr) == any_answer + assert np.all(sarr) == all_answer + + +@pytest.mark.parametrize( + ("strings", "cast_answer"), + [ + [[True, True], ["True", "True"]], + [[False, False], ["False", "False"]], + [[True, False], ["True", "False"]], + [[False, True], ["False", "True"]], + ], +) +def test_cast_from_bool(strings, cast_answer): + barr = np.array(strings, dtype=bool) + assert_array_equal(barr.astype("T"), np.array(cast_answer, dtype="T")) + + +@pytest.mark.parametrize("bitsize", [8, 16, 32, 64]) +@pytest.mark.parametrize("signed", [True, False]) +def test_sized_integer_casts(bitsize, signed): + idtype = f"int{bitsize}" + if signed: + inp = [-(2**p - 1) for p in reversed(range(bitsize - 1))] + inp += [2**p - 1 for p in range(1, bitsize - 1)] + else: + idtype = "u" + idtype + inp = [2**p - 1 for p in range(bitsize)] + ainp = np.array(inp, dtype=idtype) + assert_array_equal(ainp, ainp.astype("T").astype(idtype)) + + # safe casting works + ainp.astype("T", casting="safe") + + with pytest.raises(TypeError): + ainp.astype("T").astype(idtype, casting="safe") + + oob = [str(2**bitsize), str(-(2**bitsize))] + with pytest.raises(OverflowError): + np.array(oob, dtype="T").astype(idtype) + + with pytest.raises(ValueError): + np.array(["1", np.nan, "3"], + dtype=StringDType(na_object=np.nan)).astype(idtype) + + +@pytest.mark.parametrize("typename", ["byte", "short", "int", "longlong"]) +@pytest.mark.parametrize("signed", ["", "u"]) +def test_unsized_integer_casts(typename, signed): + idtype = f"{signed}{typename}" + + inp = [1, 2, 3, 4] + ainp = np.array(inp, dtype=idtype) + assert_array_equal(ainp, ainp.astype("T").astype(idtype)) + + +@pytest.mark.parametrize( + "typename", + [ + pytest.param( + "longdouble", + marks=pytest.mark.xfail( + np.dtypes.LongDoubleDType() != np.dtypes.Float64DType(), + reason="numpy lacks an ld2a implementation", + strict=True, + ), + ), + "float64", + "float32", + "float16", + ], +) +def test_float_casts(typename): + inp = [1.1, 2.8, -3.2, 2.7e4] + ainp = np.array(inp, dtype=typename) + assert_array_equal(ainp, ainp.astype("T").astype(typename)) + + inp = [0.1] + sres = np.array(inp, dtype=typename).astype("T") + res = sres.astype(typename) + assert_array_equal(np.array(inp, dtype=typename), res) + assert sres[0] == "0.1" + + if typename == "longdouble": + # let's not worry about platform-dependent rounding of longdouble + return + + fi = np.finfo(typename) + + inp = [1e-324, fi.smallest_subnormal, -1e-324, -fi.smallest_subnormal] + eres = [0, fi.smallest_subnormal, -0, -fi.smallest_subnormal] + res = np.array(inp, dtype=typename).astype("T").astype(typename) + assert_array_equal(eres, res) + + inp = [2e308, fi.max, -2e308, fi.min] + eres = [np.inf, fi.max, -np.inf, fi.min] + res = np.array(inp, dtype=typename).astype("T").astype(typename) + assert_array_equal(eres, res) + + +def test_float_nan_cast_na_object(): + # gh-28157 + dt = np.dtypes.StringDType(na_object=np.nan) + arr1 = np.full((1,), fill_value=np.nan, dtype=dt) + arr2 = np.full_like(arr1, fill_value=np.nan) + + assert arr1.item() is np.nan + assert arr2.item() is np.nan + + inp = [1.2, 2.3, np.nan] + arr = np.array(inp).astype(dt) + assert arr[2] is np.nan + assert arr[0] == '1.2' + + +@pytest.mark.parametrize( + "typename", + [ + "csingle", + "cdouble", + pytest.param( + "clongdouble", + marks=pytest.mark.xfail( + np.dtypes.CLongDoubleDType() != np.dtypes.Complex128DType(), + reason="numpy lacks an ld2a implementation", + strict=True, + ), + ), + ], +) +def test_cfloat_casts(typename): + inp = [1.1 + 1.1j, 2.8 + 2.8j, -3.2 - 3.2j, 2.7e4 + 2.7e4j] + ainp = np.array(inp, dtype=typename) + assert_array_equal(ainp, ainp.astype("T").astype(typename)) + + inp = [0.1 + 0.1j] + sres = np.array(inp, dtype=typename).astype("T") + res = sres.astype(typename) + assert_array_equal(np.array(inp, dtype=typename), res) + assert sres[0] == "(0.1+0.1j)" + + +def test_take(string_list): + sarr = np.array(string_list, dtype="T") + res = sarr.take(np.arange(len(string_list))) + assert_array_equal(sarr, res) + + # make sure it also works for out + out = np.empty(len(string_list), dtype="T") + out[0] = "hello" + res = sarr.take(np.arange(len(string_list)), out=out) + assert res is out + assert_array_equal(sarr, res) + + +@pytest.mark.parametrize("use_out", [True, False]) +@pytest.mark.parametrize( + "ufunc_name,func", + [ + ("min", min), + ("max", max), + ], +) +def test_ufuncs_minmax(string_list, ufunc_name, func, use_out): + """Test that the min/max ufuncs match Python builtin min/max behavior.""" + arr = np.array(string_list, dtype="T") + uarr = np.array(string_list, dtype=str) + res = np.array(func(string_list), dtype="T") + assert_array_equal(getattr(arr, ufunc_name)(), res) + + ufunc = getattr(np, ufunc_name + "imum") + + if use_out: + res = ufunc(arr, arr, out=arr) + else: + res = ufunc(arr, arr) + + assert_array_equal(uarr, res) + assert_array_equal(getattr(arr, ufunc_name)(), func(string_list)) + + +def test_max_regression(): + arr = np.array(['y', 'y', 'z'], dtype="T") + assert arr.max() == 'z' + + +@pytest.mark.parametrize("use_out", [True, False]) +@pytest.mark.parametrize( + "other_strings", + [ + ["abc", "def" * 500, "ghi" * 16, "🤣" * 100, "📵", "😰"], + ["🚜", "🙃", "😾", "😹", "🚠", "🚌"], + ["🥦", "¨", "⨯", "∰ ", "⨌ ", "⎶ "], + ], +) +def test_ufunc_add(dtype, string_list, other_strings, use_out): + arr1 = np.array(string_list, dtype=dtype) + arr2 = np.array(other_strings, dtype=dtype) + result = np.array([a + b for a, b in zip(arr1, arr2)], dtype=dtype) + + if use_out: + res = np.add(arr1, arr2, out=arr1) + else: + res = np.add(arr1, arr2) + + assert_array_equal(res, result) + + if not hasattr(dtype, "na_object"): + return + + is_nan = isinstance(dtype.na_object, float) and np.isnan(dtype.na_object) + is_str = isinstance(dtype.na_object, str) + bool_errors = 0 + try: + bool(dtype.na_object) + except TypeError: + bool_errors = 1 + + arr1 = np.array([dtype.na_object] + string_list, dtype=dtype) + arr2 = np.array(other_strings + [dtype.na_object], dtype=dtype) + + if is_nan or bool_errors or is_str: + res = np.add(arr1, arr2) + assert_array_equal(res[1:-1], arr1[1:-1] + arr2[1:-1]) + if not is_str: + assert res[0] is dtype.na_object and res[-1] is dtype.na_object + else: + assert res[0] == dtype.na_object + arr2[0] + assert res[-1] == arr1[-1] + dtype.na_object + else: + with pytest.raises(ValueError): + np.add(arr1, arr2) + + +def test_ufunc_add_reduce(dtype): + values = ["a", "this is a long string", "c"] + arr = np.array(values, dtype=dtype) + out = np.empty((), dtype=dtype) + + expected = np.array("".join(values), dtype=dtype) + assert_array_equal(np.add.reduce(arr), expected) + + np.add.reduce(arr, out=out) + assert_array_equal(out, expected) + + +def test_add_promoter(string_list): + arr = np.array(string_list, dtype=StringDType()) + lresult = np.array(["hello" + s for s in string_list], dtype=StringDType()) + rresult = np.array([s + "hello" for s in string_list], dtype=StringDType()) + + for op in ["hello", np.str_("hello"), np.array(["hello"])]: + assert_array_equal(op + arr, lresult) + assert_array_equal(arr + op, rresult) + + # The promoter should be able to handle things if users pass `dtype=` + res = np.add("hello", string_list, dtype=StringDType) + assert res.dtype == StringDType() + + # The promoter should not kick in if users override the input, + # which means arr is cast, this fails because of the unknown length. + with pytest.raises(TypeError, match="cannot cast dtype"): + np.add(arr, "add", signature=("U", "U", None), casting="unsafe") + + # But it must simply reject the following: + with pytest.raises(TypeError, match=".*did not contain a loop"): + np.add(arr, "add", signature=(None, "U", None)) + + with pytest.raises(TypeError, match=".*did not contain a loop"): + np.add("a", "b", signature=("U", "U", StringDType)) + + +def test_add_no_legacy_promote_with_signature(): + # Possibly misplaced, but useful to test with string DType. We check that + # if there is clearly no loop found, a stray `dtype=` doesn't break things + # Regression test for the bad error in gh-26735 + # (If legacy promotion is gone, this can be deleted...) + with pytest.raises(TypeError, match=".*did not contain a loop"): + np.add("3", 6, dtype=StringDType) + + +def test_add_promoter_reduce(): + # Exact TypeError could change, but ensure StringDtype doesn't match + with pytest.raises(TypeError, match="the resolved dtypes are not"): + np.add.reduce(np.array(["a", "b"], dtype="U")) + + # On the other hand, using `dtype=T` in the *ufunc* should work. + np.add.reduce(np.array(["a", "b"], dtype="U"), dtype=np.dtypes.StringDType) + + +def test_multiply_reduce(): + # At the time of writing (NumPy 2.0) this is very limited (and rather + # ridiculous anyway). But it works and actually makes some sense... + # (NumPy does not allow non-scalar initial values) + repeats = np.array([2, 3, 4]) + val = "school-🚌" + res = np.multiply.reduce(repeats, initial=val, dtype=np.dtypes.StringDType) + assert res == val * np.prod(repeats) + + +def test_multiply_two_string_raises(): + arr = np.array(["hello", "world"], dtype="T") + with pytest.raises(np._core._exceptions._UFuncNoLoopError): + np.multiply(arr, arr) + + +@pytest.mark.parametrize("use_out", [True, False]) +@pytest.mark.parametrize("other", [2, [2, 1, 3, 4, 1, 3]]) +@pytest.mark.parametrize( + "other_dtype", + [ + None, + "int8", + "int16", + "int32", + "int64", + "uint8", + "uint16", + "uint32", + "uint64", + "short", + "int", + "intp", + "long", + "longlong", + "ushort", + "uint", + "uintp", + "ulong", + "ulonglong", + ], +) +def test_ufunc_multiply(dtype, string_list, other, other_dtype, use_out): + """Test the two-argument ufuncs match python builtin behavior.""" + arr = np.array(string_list, dtype=dtype) + if other_dtype is not None: + other_dtype = np.dtype(other_dtype) + try: + len(other) + result = [s * o for s, o in zip(string_list, other)] + other = np.array(other) + if other_dtype is not None: + other = other.astype(other_dtype) + except TypeError: + if other_dtype is not None: + other = other_dtype.type(other) + result = [s * other for s in string_list] + + if use_out: + arr_cache = arr.copy() + lres = np.multiply(arr, other, out=arr) + assert_array_equal(lres, result) + arr[:] = arr_cache + assert lres is arr + arr *= other + assert_array_equal(arr, result) + arr[:] = arr_cache + rres = np.multiply(other, arr, out=arr) + assert rres is arr + assert_array_equal(rres, result) + else: + lres = arr * other + assert_array_equal(lres, result) + rres = other * arr + assert_array_equal(rres, result) + + if not hasattr(dtype, "na_object"): + return + + is_nan = np.isnan(np.array([dtype.na_object], dtype=dtype))[0] + is_str = isinstance(dtype.na_object, str) + bool_errors = 0 + try: + bool(dtype.na_object) + except TypeError: + bool_errors = 1 + + arr = np.array(string_list + [dtype.na_object], dtype=dtype) + + try: + len(other) + other = np.append(other, 3) + if other_dtype is not None: + other = other.astype(other_dtype) + except TypeError: + pass + + if is_nan or bool_errors or is_str: + for res in [arr * other, other * arr]: + assert_array_equal(res[:-1], result) + if not is_str: + assert res[-1] is dtype.na_object + else: + try: + assert res[-1] == dtype.na_object * other[-1] + except (IndexError, TypeError): + assert res[-1] == dtype.na_object * other + else: + with pytest.raises(TypeError): + arr * other + with pytest.raises(TypeError): + other * arr + + +def test_findlike_promoters(): + r = "Wally" + l = "Where's Wally?" + s = np.int32(3) + e = np.int8(13) + for dtypes in [("T", "U"), ("U", "T")]: + for function, answer in [ + (np.strings.index, 8), + (np.strings.endswith, True), + ]: + assert answer == function( + np.array(l, dtype=dtypes[0]), np.array(r, dtype=dtypes[1]), s, e + ) + + +def test_strip_promoter(): + arg = ["Hello!!!!", "Hello??!!"] + strip_char = "!" + answer = ["Hello", "Hello??"] + for dtypes in [("T", "U"), ("U", "T")]: + result = np.strings.strip( + np.array(arg, dtype=dtypes[0]), + np.array(strip_char, dtype=dtypes[1]) + ) + assert_array_equal(result, answer) + assert result.dtype.char == "T" + + +def test_replace_promoter(): + arg = ["Hello, planet!", "planet, Hello!"] + old = "planet" + new = "world" + answer = ["Hello, world!", "world, Hello!"] + for dtypes in itertools.product("TU", repeat=3): + if dtypes == ("U", "U", "U"): + continue + answer_arr = np.strings.replace( + np.array(arg, dtype=dtypes[0]), + np.array(old, dtype=dtypes[1]), + np.array(new, dtype=dtypes[2]), + ) + assert_array_equal(answer_arr, answer) + assert answer_arr.dtype.char == "T" + + +def test_center_promoter(): + arg = ["Hello", "planet!"] + fillchar = "/" + for dtypes in [("T", "U"), ("U", "T")]: + answer = np.strings.center( + np.array(arg, dtype=dtypes[0]), 9, np.array(fillchar, dtype=dtypes[1]) + ) + assert_array_equal(answer, ["//Hello//", "/planet!/"]) + assert answer.dtype.char == "T" + + +DATETIME_INPUT = [ + np.datetime64("1923-04-14T12:43:12"), + np.datetime64("1994-06-21T14:43:15"), + np.datetime64("2001-10-15T04:10:32"), + np.datetime64("NaT"), + np.datetime64("1995-11-25T16:02:16"), + np.datetime64("2005-01-04T03:14:12"), + np.datetime64("2041-12-03T14:05:03"), +] + + +TIMEDELTA_INPUT = [ + np.timedelta64(12358, "s"), + np.timedelta64(23, "s"), + np.timedelta64(74, "s"), + np.timedelta64("NaT"), + np.timedelta64(23, "s"), + np.timedelta64(73, "s"), + np.timedelta64(7, "s"), +] + + +@pytest.mark.parametrize( + "input_data, input_dtype", + [ + (DATETIME_INPUT, "M8[s]"), + (TIMEDELTA_INPUT, "m8[s]") + ] +) +def test_datetime_timedelta_cast(dtype, input_data, input_dtype): + + a = np.array(input_data, dtype=input_dtype) + + has_na = hasattr(dtype, "na_object") + is_str = isinstance(getattr(dtype, "na_object", None), str) + + if not has_na or is_str: + a = np.delete(a, 3) + + sa = a.astype(dtype) + ra = sa.astype(a.dtype) + + if has_na and not is_str: + assert sa[3] is dtype.na_object + assert np.isnat(ra[3]) + + assert_array_equal(a, ra) + + if has_na and not is_str: + # don't worry about comparing how NaT is converted + sa = np.delete(sa, 3) + a = np.delete(a, 3) + + if input_dtype.startswith("M"): + assert_array_equal(sa, a.astype("U")) + else: + # The timedelta to unicode cast produces strings + # that aren't round-trippable and we don't want to + # reproduce that behavior in stringdtype + assert_array_equal(sa, a.astype("int64").astype("U")) + + +def test_nat_casts(): + s = 'nat' + all_nats = itertools.product(*zip(s.upper(), s.lower())) + all_nats = list(map(''.join, all_nats)) + NaT_dt = np.datetime64('NaT') + NaT_td = np.timedelta64('NaT') + for na_object in [np._NoValue, None, np.nan, 'nat', '']: + # numpy treats empty string and all case combinations of 'nat' as NaT + dtype = StringDType(na_object=na_object) + arr = np.array([''] + all_nats, dtype=dtype) + dt_array = arr.astype('M8[s]') + td_array = arr.astype('m8[s]') + assert_array_equal(dt_array, NaT_dt) + assert_array_equal(td_array, NaT_td) + + if na_object is np._NoValue: + output_object = 'NaT' + else: + output_object = na_object + + for arr in [dt_array, td_array]: + assert_array_equal( + arr.astype(dtype), + np.array([output_object] * arr.size, dtype=dtype)) + + +def test_nat_conversion(): + for nat in [np.datetime64("NaT", "s"), np.timedelta64("NaT", "s")]: + with pytest.raises(ValueError, match="string coercion is disabled"): + np.array(["a", nat], dtype=StringDType(coerce=False)) + + +def test_growing_strings(dtype): + # growing a string leads to a heap allocation, this tests to make sure + # we do that bookkeeping correctly for all possible starting cases + data = [ + "hello", # a short string + "abcdefghijklmnopqestuvwxyz", # a medium heap-allocated string + "hello" * 200, # a long heap-allocated string + ] + + arr = np.array(data, dtype=dtype) + uarr = np.array(data, dtype=str) + + for _ in range(5): + arr = arr + arr + uarr = uarr + uarr + + assert_array_equal(arr, uarr) + + +def test_assign_medium_strings(): + # see gh-29261 + N = 9 + src = np.array( + ( + ['0' * 256] * 3 + ['0' * 255] + ['0' * 256] + ['0' * 255] + + ['0' * 256] * 2 + ['0' * 255] + ), dtype='T') + dst = np.array( + ( + ['0' * 255] + ['0' * 256] * 2 + ['0' * 255] + ['0' * 256] + + ['0' * 255] + [''] * 5 + ), dtype='T') + + dst[1:N + 1] = src + assert_array_equal(dst[1:N + 1], src) + + +UFUNC_TEST_DATA = [ + "hello" * 10, + "Ae¢☃€ 😊" * 20, + "entry\nwith\nnewlines", + "entry\twith\ttabs", +] + + +@pytest.fixture +def string_array(dtype): + return np.array(UFUNC_TEST_DATA, dtype=dtype) + + +@pytest.fixture +def unicode_array(): + return np.array(UFUNC_TEST_DATA, dtype=np.str_) + + +NAN_PRESERVING_FUNCTIONS = [ + "capitalize", + "expandtabs", + "lower", + "lstrip", + "rstrip", + "splitlines", + "strip", + "swapcase", + "title", + "upper", +] + +BOOL_OUTPUT_FUNCTIONS = [ + "isalnum", + "isalpha", + "isdigit", + "islower", + "isspace", + "istitle", + "isupper", + "isnumeric", + "isdecimal", +] + +UNARY_FUNCTIONS = [ + "str_len", + "capitalize", + "expandtabs", + "isalnum", + "isalpha", + "isdigit", + "islower", + "isspace", + "istitle", + "isupper", + "lower", + "lstrip", + "rstrip", + "splitlines", + "strip", + "swapcase", + "title", + "upper", + "isnumeric", + "isdecimal", + "isalnum", + "islower", + "istitle", + "isupper", +] + +UNIMPLEMENTED_VEC_STRING_FUNCTIONS = [ + "capitalize", + "expandtabs", + "lower", + "splitlines", + "swapcase", + "title", + "upper", +] + +ONLY_IN_NP_CHAR = [ + "join", + "split", + "rsplit", + "splitlines" +] + + +@pytest.mark.parametrize("function_name", UNARY_FUNCTIONS) +def test_unary(string_array, unicode_array, function_name): + if function_name in ONLY_IN_NP_CHAR: + func = getattr(np.char, function_name) + else: + func = getattr(np.strings, function_name) + dtype = string_array.dtype + sres = func(string_array) + ures = func(unicode_array) + if sres.dtype == StringDType(): + ures = ures.astype(StringDType()) + assert_array_equal(sres, ures) + + if not hasattr(dtype, "na_object"): + return + + is_nan = np.isnan(np.array([dtype.na_object], dtype=dtype))[0] + is_str = isinstance(dtype.na_object, str) + na_arr = np.insert(string_array, 0, dtype.na_object) + + if function_name in UNIMPLEMENTED_VEC_STRING_FUNCTIONS: + if not is_str: + # to avoid these errors we'd need to add NA support to _vec_string + with pytest.raises((ValueError, TypeError)): + func(na_arr) + elif function_name == "splitlines": + assert func(na_arr)[0] == func(dtype.na_object)[()] + else: + assert func(na_arr)[0] == func(dtype.na_object) + return + if function_name == "str_len" and not is_str: + # str_len always errors for any non-string null, even NA ones because + # it has an integer result + with pytest.raises(ValueError): + func(na_arr) + return + if function_name in BOOL_OUTPUT_FUNCTIONS: + if is_nan: + assert func(na_arr)[0] is np.False_ + elif is_str: + assert func(na_arr)[0] == func(dtype.na_object) + else: + with pytest.raises(ValueError): + func(na_arr) + return + if not (is_nan or is_str): + with pytest.raises(ValueError): + func(na_arr) + return + res = func(na_arr) + if is_nan and function_name in NAN_PRESERVING_FUNCTIONS: + assert res[0] is dtype.na_object + elif is_str: + assert res[0] == func(dtype.na_object) + + +unicode_bug_fail = pytest.mark.xfail( + reason="unicode output width is buggy", strict=True +) + +# None means that the argument is a string array +BINARY_FUNCTIONS = [ + ("add", (None, None)), + ("multiply", (None, 2)), + ("mod", ("format: %s", None)), + ("center", (None, 25)), + ("count", (None, "A")), + ("encode", (None, "UTF-8")), + ("endswith", (None, "lo")), + ("find", (None, "A")), + ("index", (None, "e")), + ("join", ("-", None)), + ("ljust", (None, 12)), + ("lstrip", (None, "A")), + ("partition", (None, "A")), + ("replace", (None, "A", "B")), + ("rfind", (None, "A")), + ("rindex", (None, "e")), + ("rjust", (None, 12)), + ("rsplit", (None, "A")), + ("rstrip", (None, "A")), + ("rpartition", (None, "A")), + ("split", (None, "A")), + ("strip", (None, "A")), + ("startswith", (None, "A")), + ("zfill", (None, 12)), +] + +PASSES_THROUGH_NAN_NULLS = [ + "add", + "center", + "ljust", + "multiply", + "replace", + "rjust", + "strip", + "lstrip", + "rstrip", + "replace" + "zfill", +] + +NULLS_ARE_FALSEY = [ + "startswith", + "endswith", +] + +NULLS_ALWAYS_ERROR = [ + "count", + "find", + "rfind", +] + +SUPPORTS_NULLS = ( + PASSES_THROUGH_NAN_NULLS + + NULLS_ARE_FALSEY + + NULLS_ALWAYS_ERROR +) + + +def call_func(func, args, array, sanitize=True): + if args == (None, None): + return func(array, array) + if args[0] is None: + if sanitize: + san_args = tuple( + np.array(arg, dtype=array.dtype) if isinstance(arg, str) else + arg for arg in args[1:] + ) + else: + san_args = args[1:] + return func(array, *san_args) + if args[1] is None: + return func(args[0], array) + # shouldn't ever happen + assert 0 + + +@pytest.mark.parametrize("function_name, args", BINARY_FUNCTIONS) +def test_binary(string_array, unicode_array, function_name, args): + if function_name in ONLY_IN_NP_CHAR: + func = getattr(np.char, function_name) + else: + func = getattr(np.strings, function_name) + sres = call_func(func, args, string_array) + ures = call_func(func, args, unicode_array, sanitize=False) + if not isinstance(sres, tuple) and sres.dtype == StringDType(): + ures = ures.astype(StringDType()) + assert_array_equal(sres, ures) + + dtype = string_array.dtype + if function_name not in SUPPORTS_NULLS or not hasattr(dtype, "na_object"): + return + + na_arr = np.insert(string_array, 0, dtype.na_object) + is_nan = np.isnan(np.array([dtype.na_object], dtype=dtype))[0] + is_str = isinstance(dtype.na_object, str) + should_error = not (is_nan or is_str) + + if ( + (function_name in NULLS_ALWAYS_ERROR and not is_str) + or (function_name in PASSES_THROUGH_NAN_NULLS and should_error) + or (function_name in NULLS_ARE_FALSEY and should_error) + ): + with pytest.raises((ValueError, TypeError)): + call_func(func, args, na_arr) + return + + res = call_func(func, args, na_arr) + + if is_str: + assert res[0] == call_func(func, args, na_arr[:1]) + elif function_name in NULLS_ARE_FALSEY: + assert res[0] is np.False_ + elif function_name in PASSES_THROUGH_NAN_NULLS: + assert res[0] is dtype.na_object + else: + # shouldn't ever get here + assert 0 + + +@pytest.mark.parametrize("function, expected", [ + (np.strings.find, [[2, -1], [1, -1]]), + (np.strings.startswith, [[False, False], [True, False]])]) +@pytest.mark.parametrize("start, stop", [ + (1, 4), + (np.int8(1), np.int8(4)), + (np.array([1, 1], dtype='u2'), np.array([4, 4], dtype='u2'))]) +def test_non_default_start_stop(function, start, stop, expected): + a = np.array([["--🐍--", "--🦜--"], + ["-🐍---", "-🦜---"]], "T") + indx = function(a, "🐍", start, stop) + assert_array_equal(indx, expected) + + +@pytest.mark.parametrize("count", [2, np.int8(2), np.array([2, 2], 'u2')]) +def test_replace_non_default_repeat(count): + a = np.array(["🐍--", "🦜-🦜-"], "T") + result = np.strings.replace(a, "🦜-", "🦜†", count) + assert_array_equal(result, np.array(["🐍--", "🦜†🦜†"], "T")) + + +def test_strip_ljust_rjust_consistency(string_array, unicode_array): + rjs = np.char.rjust(string_array, 1000) + rju = np.char.rjust(unicode_array, 1000) + + ljs = np.char.ljust(string_array, 1000) + lju = np.char.ljust(unicode_array, 1000) + + assert_array_equal( + np.char.lstrip(rjs), + np.char.lstrip(rju).astype(StringDType()), + ) + + assert_array_equal( + np.char.rstrip(ljs), + np.char.rstrip(lju).astype(StringDType()), + ) + + assert_array_equal( + np.char.strip(ljs), + np.char.strip(lju).astype(StringDType()), + ) + + assert_array_equal( + np.char.strip(rjs), + np.char.strip(rju).astype(StringDType()), + ) + + +def test_unset_na_coercion(): + # a dtype instance with an unset na object is compatible + # with a dtype that has one set + + # this test uses the "add" and "equal" ufunc but all ufuncs that + # accept more than one string argument and produce a string should + # behave this way + # TODO: generalize to more ufuncs + inp = ["hello", "world"] + arr = np.array(inp, dtype=StringDType(na_object=None)) + for op_dtype in [None, StringDType(), StringDType(coerce=False), + StringDType(na_object=None)]: + if op_dtype is None: + op = "2" + else: + op = np.array("2", dtype=op_dtype) + res = arr + op + assert_array_equal(res, ["hello2", "world2"]) + + # dtype instances with distinct explicitly set NA objects are incompatible + for op_dtype in [StringDType(na_object=pd_NA), StringDType(na_object="")]: + op = np.array("2", dtype=op_dtype) + with pytest.raises(TypeError): + arr + op + + # comparisons only consider the na_object + for op_dtype in [None, StringDType(), StringDType(coerce=True), + StringDType(na_object=None)]: + if op_dtype is None: + op = inp + else: + op = np.array(inp, dtype=op_dtype) + assert_array_equal(arr, op) + + for op_dtype in [StringDType(na_object=pd_NA), + StringDType(na_object=np.nan)]: + op = np.array(inp, dtype=op_dtype) + with pytest.raises(TypeError): + arr == op + + +def test_repeat(string_array): + res = string_array.repeat(1000) + # Create an empty array with expanded dimension, and fill it. Then, + # reshape it to the expected result. + expected = np.empty_like(string_array, shape=string_array.shape + (1000,)) + expected[...] = string_array[:, np.newaxis] + expected = expected.reshape(-1) + + assert_array_equal(res, expected, strict=True) + + +@pytest.mark.parametrize("tile", [1, 6, (2, 5)]) +def test_accumulation(string_array, tile): + """Accumulation is odd for StringDType but tests dtypes with references. + """ + # Fill with mostly empty strings to not create absurdly big strings + arr = np.zeros_like(string_array, shape=(100,)) + arr[:len(string_array)] = string_array + arr[-len(string_array):] = string_array + + # Bloat size a bit (get above thresholds and test >1 ndim). + arr = np.tile(string_array, tile) + + res = np.add.accumulate(arr, axis=0) + res_obj = np.add.accumulate(arr.astype(object), axis=0) + assert_array_equal(res, res_obj.astype(arr.dtype), strict=True) + + if arr.ndim > 1: + res = np.add.accumulate(arr, axis=-1) + res_obj = np.add.accumulate(arr.astype(object), axis=-1) + + assert_array_equal(res, res_obj.astype(arr.dtype), strict=True) + + +class TestImplementation: + """Check that strings are stored in the arena when possible. + + This tests implementation details, so should be adjusted if + the implementation changes. + """ + + @classmethod + def setup_class(cls): + cls.MISSING = 0x80 + cls.INITIALIZED = 0x40 + cls.OUTSIDE_ARENA = 0x20 + cls.LONG = 0x10 + cls.dtype = StringDType(na_object=np.nan) + cls.sizeofstr = cls.dtype.itemsize + sp = cls.dtype.itemsize // 2 # pointer size = sizeof(size_t) + # Below, size is not strictly correct, since it really uses + # 7 (or 3) bytes, but good enough for the tests here. + cls.view_dtype = np.dtype([ + ('offset', f'u{sp}'), + ('size', f'u{sp // 2}'), + ('xsiz', f'V{sp // 2 - 1}'), + ('size_and_flags', 'u1'), + ] if sys.byteorder == 'little' else [ + ('size_and_flags', 'u1'), + ('xsiz', f'V{sp // 2 - 1}'), + ('size', f'u{sp // 2}'), + ('offset', f'u{sp}'), + ]) + cls.s_empty = "" + cls.s_short = "01234" + cls.s_medium = "abcdefghijklmnopqrstuvwxyz" + cls.s_long = "-=+" * 100 + cls.a = np.array( + [cls.s_empty, cls.s_short, cls.s_medium, cls.s_long], + cls.dtype) + + def get_view(self, a): + # Cannot view a StringDType as anything else directly, since + # it has references. So, we use a stride trick hack. + from numpy.lib._stride_tricks_impl import DummyArray + interface = dict(a.__array_interface__) + interface['descr'] = self.view_dtype.descr + interface['typestr'] = self.view_dtype.str + return np.asarray(DummyArray(interface, base=a)) + + def get_flags(self, a): + return self.get_view(a)['size_and_flags'] & 0xf0 + + def is_short(self, a): + return self.get_flags(a) == self.INITIALIZED | self.OUTSIDE_ARENA + + def is_on_heap(self, a): + return self.get_flags(a) == (self.INITIALIZED + | self.OUTSIDE_ARENA + | self.LONG) + + def is_missing(self, a): + return self.get_flags(a) & self.MISSING == self.MISSING + + def in_arena(self, a): + return (self.get_flags(a) & (self.INITIALIZED | self.OUTSIDE_ARENA) + == self.INITIALIZED) + + def test_setup(self): + is_short = self.is_short(self.a) + length = np.strings.str_len(self.a) + assert_array_equal(is_short, (length > 0) & (length <= 15)) + assert_array_equal(self.in_arena(self.a), [False, False, True, True]) + assert_array_equal(self.is_on_heap(self.a), False) + assert_array_equal(self.is_missing(self.a), False) + view = self.get_view(self.a) + sizes = np.where(is_short, view['size_and_flags'] & 0xf, + view['size']) + assert_array_equal(sizes, np.strings.str_len(self.a)) + assert_array_equal(view['xsiz'][2:], + np.void(b'\x00' * (self.sizeofstr // 4 - 1))) + # Check that the medium string uses only 1 byte for its length + # in the arena, while the long string takes 8 (or 4). + offsets = view['offset'] + assert offsets[2] == 1 + assert offsets[3] == 1 + len(self.s_medium) + self.sizeofstr // 2 + + def test_empty(self): + e = np.empty((3,), self.dtype) + assert_array_equal(self.get_flags(e), 0) + assert_array_equal(e, "") + + def test_zeros(self): + z = np.zeros((2,), self.dtype) + assert_array_equal(self.get_flags(z), 0) + assert_array_equal(z, "") + + def test_copy(self): + for c in [self.a.copy(), copy.copy(self.a), copy.deepcopy(self.a)]: + assert_array_equal(self.get_flags(c), self.get_flags(self.a)) + assert_array_equal(c, self.a) + offsets = self.get_view(c)['offset'] + assert offsets[2] == 1 + assert offsets[3] == 1 + len(self.s_medium) + self.sizeofstr // 2 + + def test_arena_use_with_setting(self): + c = np.zeros_like(self.a) + assert_array_equal(self.get_flags(c), 0) + c[:] = self.a + assert_array_equal(self.get_flags(c), self.get_flags(self.a)) + assert_array_equal(c, self.a) + + def test_arena_reuse_with_setting(self): + c = self.a.copy() + c[:] = self.a + assert_array_equal(self.get_flags(c), self.get_flags(self.a)) + assert_array_equal(c, self.a) + + def test_arena_reuse_after_missing(self): + c = self.a.copy() + c[:] = np.nan + assert np.all(self.is_missing(c)) + # Replacing with the original strings, the arena should be reused. + c[:] = self.a + assert_array_equal(self.get_flags(c), self.get_flags(self.a)) + assert_array_equal(c, self.a) + + def test_arena_reuse_after_empty(self): + c = self.a.copy() + c[:] = "" + assert_array_equal(c, "") + # Replacing with the original strings, the arena should be reused. + c[:] = self.a + assert_array_equal(self.get_flags(c), self.get_flags(self.a)) + assert_array_equal(c, self.a) + + def test_arena_reuse_for_shorter(self): + c = self.a.copy() + # A string slightly shorter than the shortest in the arena + # should be used for all strings in the arena. + c[:] = self.s_medium[:-1] + assert_array_equal(c, self.s_medium[:-1]) + # first empty string in original was never initialized, so + # filling it in now leaves it initialized inside the arena. + # second string started as a short string so it can never live + # in the arena. + in_arena = np.array([True, False, True, True]) + assert_array_equal(self.in_arena(c), in_arena) + # But when a short string is replaced, it will go on the heap. + assert_array_equal(self.is_short(c), False) + assert_array_equal(self.is_on_heap(c), ~in_arena) + # We can put the originals back, and they'll still fit, + # and short strings are back as short strings + c[:] = self.a + assert_array_equal(c, self.a) + assert_array_equal(self.in_arena(c), in_arena) + assert_array_equal(self.is_short(c), self.is_short(self.a)) + assert_array_equal(self.is_on_heap(c), False) + + def test_arena_reuse_if_possible(self): + c = self.a.copy() + # A slightly longer string will not fit in the arena for + # the medium string, but will fit for the longer one. + c[:] = self.s_medium + "±" + assert_array_equal(c, self.s_medium + "±") + in_arena_exp = np.strings.str_len(self.a) >= len(self.s_medium) + 1 + # first entry started uninitialized and empty, so filling it leaves + # it in the arena + in_arena_exp[0] = True + assert not np.all(in_arena_exp == self.in_arena(self.a)) + assert_array_equal(self.in_arena(c), in_arena_exp) + assert_array_equal(self.is_short(c), False) + assert_array_equal(self.is_on_heap(c), ~in_arena_exp) + # And once outside arena, it stays outside, since offset is lost. + # But short strings are used again. + c[:] = self.a + is_short_exp = self.is_short(self.a) + assert_array_equal(c, self.a) + assert_array_equal(self.in_arena(c), in_arena_exp) + assert_array_equal(self.is_short(c), is_short_exp) + assert_array_equal(self.is_on_heap(c), ~in_arena_exp & ~is_short_exp) + + def test_arena_no_reuse_after_short(self): + c = self.a.copy() + # If we replace a string with a short string, it cannot + # go into the arena after because the offset is lost. + c[:] = self.s_short + assert_array_equal(c, self.s_short) + assert_array_equal(self.in_arena(c), False) + c[:] = self.a + assert_array_equal(c, self.a) + assert_array_equal(self.in_arena(c), False) + assert_array_equal(self.is_on_heap(c), self.in_arena(self.a)) diff --git a/python/numpy/_core/tests/test_strings.py b/python/numpy/_core/tests/test_strings.py new file mode 100644 index 000000000..78c4b19c6 --- /dev/null +++ b/python/numpy/_core/tests/test_strings.py @@ -0,0 +1,1499 @@ +import operator +import sys + +import pytest + +import numpy as np +from numpy._core._exceptions import _UFuncNoLoopError +from numpy.testing import IS_PYPY, assert_array_equal, assert_raises +from numpy.testing._private.utils import requires_memory + +COMPARISONS = [ + (operator.eq, np.equal, "=="), + (operator.ne, np.not_equal, "!="), + (operator.lt, np.less, "<"), + (operator.le, np.less_equal, "<="), + (operator.gt, np.greater, ">"), + (operator.ge, np.greater_equal, ">="), +] + +MAX = np.iinfo(np.int64).max + +IS_PYPY_LT_7_3_16 = IS_PYPY and sys.implementation.version < (7, 3, 16) + +@pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS) +def test_mixed_string_comparison_ufuncs_fail(op, ufunc, sym): + arr_string = np.array(["a", "b"], dtype="S") + arr_unicode = np.array(["a", "c"], dtype="U") + + with pytest.raises(TypeError, match="did not contain a loop"): + ufunc(arr_string, arr_unicode) + + with pytest.raises(TypeError, match="did not contain a loop"): + ufunc(arr_unicode, arr_string) + +@pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS) +def test_mixed_string_comparisons_ufuncs_with_cast(op, ufunc, sym): + arr_string = np.array(["a", "b"], dtype="S") + arr_unicode = np.array(["a", "c"], dtype="U") + + # While there is no loop, manual casting is acceptable: + res1 = ufunc(arr_string, arr_unicode, signature="UU->?", casting="unsafe") + res2 = ufunc(arr_string, arr_unicode, signature="SS->?", casting="unsafe") + + expected = op(arr_string.astype("U"), arr_unicode) + assert_array_equal(res1, expected) + assert_array_equal(res2, expected) + + +@pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS) +@pytest.mark.parametrize("dtypes", [ + ("S2", "S2"), ("S2", "S10"), + ("U1"), (">U1", ">U1"), + ("U10")]) +@pytest.mark.parametrize("aligned", [True, False]) +def test_string_comparisons(op, ufunc, sym, dtypes, aligned): + # ensure native byte-order for the first view to stay within unicode range + native_dt = np.dtype(dtypes[0]).newbyteorder("=") + arr = np.arange(2**15).view(native_dt).astype(dtypes[0]) + if not aligned: + # Make `arr` unaligned: + new = np.zeros(arr.nbytes + 1, dtype=np.uint8)[1:].view(dtypes[0]) + new[...] = arr + arr = new + + arr2 = arr.astype(dtypes[1], copy=True) + np.random.shuffle(arr2) + arr[0] = arr2[0] # make sure one matches + + expected = [op(d1, d2) for d1, d2 in zip(arr.tolist(), arr2.tolist())] + assert_array_equal(op(arr, arr2), expected) + assert_array_equal(ufunc(arr, arr2), expected) + assert_array_equal( + np.char.compare_chararrays(arr, arr2, sym, False), expected + ) + + expected = [op(d2, d1) for d1, d2 in zip(arr.tolist(), arr2.tolist())] + assert_array_equal(op(arr2, arr), expected) + assert_array_equal(ufunc(arr2, arr), expected) + assert_array_equal( + np.char.compare_chararrays(arr2, arr, sym, False), expected + ) + + +@pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS) +@pytest.mark.parametrize("dtypes", [ + ("S2", "S2"), ("S2", "S10"), ("U10")]) +def test_string_comparisons_empty(op, ufunc, sym, dtypes): + arr = np.empty((1, 0, 1, 5), dtype=dtypes[0]) + arr2 = np.empty((100, 1, 0, 1), dtype=dtypes[1]) + + expected = np.empty(np.broadcast_shapes(arr.shape, arr2.shape), dtype=bool) + assert_array_equal(op(arr, arr2), expected) + assert_array_equal(ufunc(arr, arr2), expected) + assert_array_equal( + np.char.compare_chararrays(arr, arr2, sym, False), expected + ) + + +@pytest.mark.parametrize("str_dt", ["S", "U"]) +@pytest.mark.parametrize("float_dt", np.typecodes["AllFloat"]) +def test_float_to_string_cast(str_dt, float_dt): + float_dt = np.dtype(float_dt) + fi = np.finfo(float_dt) + arr = np.array([np.nan, np.inf, -np.inf, fi.max, fi.min], dtype=float_dt) + expected = ["nan", "inf", "-inf", str(fi.max), str(fi.min)] + if float_dt.kind == "c": + expected = [f"({r}+0j)" for r in expected] + + res = arr.astype(str_dt) + assert_array_equal(res, np.array(expected, dtype=str_dt)) + + +@pytest.mark.parametrize("str_dt", "US") +@pytest.mark.parametrize("size", [-1, np.iinfo(np.intc).max]) +def test_string_size_dtype_errors(str_dt, size): + if size > 0: + size = size // np.dtype(f"{str_dt}1").itemsize + 1 + + with pytest.raises(ValueError): + np.dtype((str_dt, size)) + with pytest.raises(TypeError): + np.dtype(f"{str_dt}{size}") + + +@pytest.mark.parametrize("str_dt", "US") +def test_string_size_dtype_large_repr(str_dt): + size = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize + size_str = str(size) + + dtype = np.dtype((str_dt, size)) + assert size_str in dtype.str + assert size_str in str(dtype) + assert size_str in repr(dtype) + + +@pytest.mark.slow +@requires_memory(2 * np.iinfo(np.intc).max) +@pytest.mark.parametrize("str_dt", "US") +def test_large_string_coercion_error(str_dt): + very_large = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize + try: + large_string = "A" * (very_large + 1) + except Exception: + # We may not be able to create this Python string on 32bit. + pytest.skip("python failed to create huge string") + + class MyStr: + def __str__(self): + return large_string + + try: + # TypeError from NumPy, or OverflowError from 32bit Python. + with pytest.raises((TypeError, OverflowError)): + np.array([large_string], dtype=str_dt) + + # Same as above, but input has to be converted to a string. + with pytest.raises((TypeError, OverflowError)): + np.array([MyStr()], dtype=str_dt) + except MemoryError: + # Catch memory errors, because `requires_memory` would do so. + raise AssertionError("Ops should raise before any large allocation.") + +@pytest.mark.slow +@requires_memory(2 * np.iinfo(np.intc).max) +@pytest.mark.parametrize("str_dt", "US") +def test_large_string_addition_error(str_dt): + very_large = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize + + a = np.array(["A" * very_large], dtype=str_dt) + b = np.array("B", dtype=str_dt) + try: + with pytest.raises(TypeError): + np.add(a, b) + with pytest.raises(TypeError): + np.add(a, a) + except MemoryError: + # Catch memory errors, because `requires_memory` would do so. + raise AssertionError("Ops should raise before any large allocation.") + + +def test_large_string_cast(): + very_large = np.iinfo(np.intc).max // 4 + # Could be nice to test very large path, but it makes too many huge + # allocations right now (need non-legacy cast loops for this). + # a = np.array([], dtype=np.dtype(("S", very_large))) + # assert a.astype("U").dtype.itemsize == very_large * 4 + + a = np.array([], dtype=np.dtype(("S", very_large + 1))) + # It is not perfect but OK if this raises a MemoryError during setup + # (this happens due clunky code and/or buffer setup.) + with pytest.raises((TypeError, MemoryError)): + a.astype("U") + + +@pytest.mark.parametrize("dt", ["S", "U", "T"]) +class TestMethods: + + @pytest.mark.parametrize("in1,in2,out", [ + ("", "", ""), + ("abc", "abc", "abcabc"), + ("12345", "12345", "1234512345"), + ("MixedCase", "MixedCase", "MixedCaseMixedCase"), + ("12345 \0 ", "12345 \0 ", "12345 \0 12345 \0 "), + ("UPPER", "UPPER", "UPPERUPPER"), + (["abc", "def"], ["hello", "world"], ["abchello", "defworld"]), + ]) + def test_add(self, in1, in2, out, dt): + in1 = np.array(in1, dtype=dt) + in2 = np.array(in2, dtype=dt) + out = np.array(out, dtype=dt) + assert_array_equal(np.strings.add(in1, in2), out) + + @pytest.mark.parametrize("in1,in2,out", [ + ("abc", 3, "abcabcabc"), + ("abc", 0, ""), + ("abc", -1, ""), + (["abc", "def"], [1, 4], ["abc", "defdefdefdef"]), + ]) + def test_multiply(self, in1, in2, out, dt): + in1 = np.array(in1, dtype=dt) + out = np.array(out, dtype=dt) + assert_array_equal(np.strings.multiply(in1, in2), out) + + def test_multiply_raises(self, dt): + with pytest.raises(TypeError, match="unsupported type"): + np.strings.multiply(np.array("abc", dtype=dt), 3.14) + + with pytest.raises(OverflowError): + np.strings.multiply(np.array("abc", dtype=dt), sys.maxsize) + + def test_inplace_multiply(self, dt): + arr = np.array(['foo ', 'bar'], dtype=dt) + arr *= 2 + if dt != "T": + assert_array_equal(arr, np.array(['foo ', 'barb'], dtype=dt)) + else: + assert_array_equal(arr, ['foo foo ', 'barbar']) + + with pytest.raises(OverflowError): + arr *= sys.maxsize + + @pytest.mark.parametrize("i_dt", [np.int8, np.int16, np.int32, + np.int64, np.int_]) + def test_multiply_integer_dtypes(self, i_dt, dt): + a = np.array("abc", dtype=dt) + i = np.array(3, dtype=i_dt) + res = np.array("abcabcabc", dtype=dt) + assert_array_equal(np.strings.multiply(a, i), res) + + @pytest.mark.parametrize("in_,out", [ + ("", False), + ("a", True), + ("A", True), + ("\n", False), + ("abc", True), + ("aBc123", False), + ("abc\n", False), + (["abc", "aBc123"], [True, False]), + ]) + def test_isalpha(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.isalpha(in_), out) + + @pytest.mark.parametrize("in_,out", [ + ('', False), + ('a', True), + ('A', True), + ('\n', False), + ('123abc456', True), + ('a1b3c', True), + ('aBc000 ', False), + ('abc\n', False), + ]) + def test_isalnum(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.isalnum(in_), out) + + @pytest.mark.parametrize("in_,out", [ + ("", False), + ("a", False), + ("0", True), + ("012345", True), + ("012345a", False), + (["a", "012345"], [False, True]), + ]) + def test_isdigit(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.isdigit(in_), out) + + @pytest.mark.parametrize("in_,out", [ + ("", False), + ("a", False), + ("1", False), + (" ", True), + ("\t", True), + ("\r", True), + ("\n", True), + (" \t\r \n", True), + (" \t\r\na", False), + (["\t1", " \t\r \n"], [False, True]) + ]) + def test_isspace(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.isspace(in_), out) + + @pytest.mark.parametrize("in_,out", [ + ('', False), + ('a', True), + ('A', False), + ('\n', False), + ('abc', True), + ('aBc', False), + ('abc\n', True), + ]) + def test_islower(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.islower(in_), out) + + @pytest.mark.parametrize("in_,out", [ + ('', False), + ('a', False), + ('A', True), + ('\n', False), + ('ABC', True), + ('AbC', False), + ('ABC\n', True), + ]) + def test_isupper(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.isupper(in_), out) + + @pytest.mark.parametrize("in_,out", [ + ('', False), + ('a', False), + ('A', True), + ('\n', False), + ('A Titlecased Line', True), + ('A\nTitlecased Line', True), + ('A Titlecased, Line', True), + ('Not a capitalized String', False), + ('Not\ta Titlecase String', False), + ('Not--a Titlecase String', False), + ('NOT', False), + ]) + def test_istitle(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.istitle(in_), out) + + @pytest.mark.parametrize("in_,out", [ + ("", 0), + ("abc", 3), + ("12345", 5), + ("MixedCase", 9), + ("12345 \x00 ", 8), + ("UPPER", 5), + (["abc", "12345 \x00 "], [3, 8]), + ]) + def test_str_len(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.str_len(in_), out) + + @pytest.mark.parametrize("a,sub,start,end,out", [ + ("abcdefghiabc", "abc", 0, None, 0), + ("abcdefghiabc", "abc", 1, None, 9), + ("abcdefghiabc", "def", 4, None, -1), + ("abc", "", 0, None, 0), + ("abc", "", 3, None, 3), + ("abc", "", 4, None, -1), + ("rrarrrrrrrrra", "a", 0, None, 2), + ("rrarrrrrrrrra", "a", 4, None, 12), + ("rrarrrrrrrrra", "a", 4, 6, -1), + ("", "", 0, None, 0), + ("", "", 1, 1, -1), + ("", "", MAX, 0, -1), + ("", "xx", 0, None, -1), + ("", "xx", 1, 1, -1), + ("", "xx", MAX, 0, -1), + pytest.param(99 * "a" + "b", "b", 0, None, 99, + id="99*a+b-b-0-None-99"), + pytest.param(98 * "a" + "ba", "ba", 0, None, 98, + id="98*a+ba-ba-0-None-98"), + pytest.param(100 * "a", "b", 0, None, -1, + id="100*a-b-0-None--1"), + pytest.param(30000 * "a" + 100 * "b", 100 * "b", 0, None, 30000, + id="30000*a+100*b-100*b-0-None-30000"), + pytest.param(30000 * "a", 100 * "b", 0, None, -1, + id="30000*a-100*b-0-None--1"), + pytest.param(15000 * "a" + 15000 * "b", 15000 * "b", 0, None, 15000, + id="15000*a+15000*b-15000*b-0-None-15000"), + pytest.param(15000 * "a" + 15000 * "b", 15000 * "c", 0, None, -1, + id="15000*a+15000*b-15000*c-0-None--1"), + (["abcdefghiabc", "rrarrrrrrrrra"], ["def", "arr"], [0, 3], + None, [3, -1]), + ("Ae¢☃€ 😊" * 2, "😊", 0, None, 6), + ("Ae¢☃€ 😊" * 2, "😊", 7, None, 13), + pytest.param("A" * (2 ** 17), r"[\w]+\Z", 0, None, -1, + id=r"A*2**17-[\w]+\Z-0-None--1"), + ]) + def test_find(self, a, sub, start, end, out, dt): + if "😊" in a and dt == "S": + pytest.skip("Bytes dtype does not support non-ascii input") + a = np.array(a, dtype=dt) + sub = np.array(sub, dtype=dt) + assert_array_equal(np.strings.find(a, sub, start, end), out) + + @pytest.mark.parametrize("a,sub,start,end,out", [ + ("abcdefghiabc", "abc", 0, None, 9), + ("abcdefghiabc", "", 0, None, 12), + ("abcdefghiabc", "abcd", 0, None, 0), + ("abcdefghiabc", "abcz", 0, None, -1), + ("abc", "", 0, None, 3), + ("abc", "", 3, None, 3), + ("abc", "", 4, None, -1), + ("rrarrrrrrrrra", "a", 0, None, 12), + ("rrarrrrrrrrra", "a", 4, None, 12), + ("rrarrrrrrrrra", "a", 4, 6, -1), + (["abcdefghiabc", "rrarrrrrrrrra"], ["abc", "a"], [0, 0], + None, [9, 12]), + ("Ae¢☃€ 😊" * 2, "😊", 0, None, 13), + ("Ae¢☃€ 😊" * 2, "😊", 0, 7, 6), + ]) + def test_rfind(self, a, sub, start, end, out, dt): + if "😊" in a and dt == "S": + pytest.skip("Bytes dtype does not support non-ascii input") + a = np.array(a, dtype=dt) + sub = np.array(sub, dtype=dt) + assert_array_equal(np.strings.rfind(a, sub, start, end), out) + + @pytest.mark.parametrize("a,sub,start,end,out", [ + ("aaa", "a", 0, None, 3), + ("aaa", "b", 0, None, 0), + ("aaa", "a", 1, None, 2), + ("aaa", "a", 10, None, 0), + ("aaa", "a", -1, None, 1), + ("aaa", "a", -10, None, 3), + ("aaa", "a", 0, 1, 1), + ("aaa", "a", 0, 10, 3), + ("aaa", "a", 0, -1, 2), + ("aaa", "a", 0, -10, 0), + ("aaa", "", 1, None, 3), + ("aaa", "", 3, None, 1), + ("aaa", "", 10, None, 0), + ("aaa", "", -1, None, 2), + ("aaa", "", -10, None, 4), + ("aaa", "aaaa", 0, None, 0), + pytest.param(98 * "a" + "ba", "ba", 0, None, 1, + id="98*a+ba-ba-0-None-1"), + pytest.param(30000 * "a" + 100 * "b", 100 * "b", 0, None, 1, + id="30000*a+100*b-100*b-0-None-1"), + pytest.param(30000 * "a", 100 * "b", 0, None, 0, + id="30000*a-100*b-0-None-0"), + pytest.param(30000 * "a" + 100 * "ab", "ab", 0, None, 100, + id="30000*a+100*ab-ab-0-None-100"), + pytest.param(15000 * "a" + 15000 * "b", 15000 * "b", 0, None, 1, + id="15000*a+15000*b-15000*b-0-None-1"), + pytest.param(15000 * "a" + 15000 * "b", 15000 * "c", 0, None, 0, + id="15000*a+15000*b-15000*c-0-None-0"), + ("", "", 0, None, 1), + ("", "", 1, 1, 0), + ("", "", MAX, 0, 0), + ("", "xx", 0, None, 0), + ("", "xx", 1, 1, 0), + ("", "xx", MAX, 0, 0), + (["aaa", ""], ["a", ""], [0, 0], None, [3, 1]), + ("Ae¢☃€ 😊" * 100, "😊", 0, None, 100), + ]) + def test_count(self, a, sub, start, end, out, dt): + if "😊" in a and dt == "S": + pytest.skip("Bytes dtype does not support non-ascii input") + a = np.array(a, dtype=dt) + sub = np.array(sub, dtype=dt) + assert_array_equal(np.strings.count(a, sub, start, end), out) + + @pytest.mark.parametrize("a,prefix,start,end,out", [ + ("hello", "he", 0, None, True), + ("hello", "hello", 0, None, True), + ("hello", "hello world", 0, None, False), + ("hello", "", 0, None, True), + ("hello", "ello", 0, None, False), + ("hello", "ello", 1, None, True), + ("hello", "o", 4, None, True), + ("hello", "o", 5, None, False), + ("hello", "", 5, None, True), + ("hello", "lo", 6, None, False), + ("helloworld", "lowo", 3, None, True), + ("helloworld", "lowo", 3, 7, True), + ("helloworld", "lowo", 3, 6, False), + ("", "", 0, 1, True), + ("", "", 0, 0, True), + ("", "", 1, 0, False), + ("hello", "he", 0, -1, True), + ("hello", "he", -53, -1, True), + ("hello", "hello", 0, -1, False), + ("hello", "hello world", -1, -10, False), + ("hello", "ello", -5, None, False), + ("hello", "ello", -4, None, True), + ("hello", "o", -2, None, False), + ("hello", "o", -1, None, True), + ("hello", "", -3, -3, True), + ("hello", "lo", -9, None, False), + (["hello", ""], ["he", ""], [0, 0], None, [True, True]), + ]) + def test_startswith(self, a, prefix, start, end, out, dt): + a = np.array(a, dtype=dt) + prefix = np.array(prefix, dtype=dt) + assert_array_equal(np.strings.startswith(a, prefix, start, end), out) + + @pytest.mark.parametrize("a,suffix,start,end,out", [ + ("hello", "lo", 0, None, True), + ("hello", "he", 0, None, False), + ("hello", "", 0, None, True), + ("hello", "hello world", 0, None, False), + ("helloworld", "worl", 0, None, False), + ("helloworld", "worl", 3, 9, True), + ("helloworld", "world", 3, 12, True), + ("helloworld", "lowo", 1, 7, True), + ("helloworld", "lowo", 2, 7, True), + ("helloworld", "lowo", 3, 7, True), + ("helloworld", "lowo", 4, 7, False), + ("helloworld", "lowo", 3, 8, False), + ("ab", "ab", 0, 1, False), + ("ab", "ab", 0, 0, False), + ("", "", 0, 1, True), + ("", "", 0, 0, True), + ("", "", 1, 0, False), + ("hello", "lo", -2, None, True), + ("hello", "he", -2, None, False), + ("hello", "", -3, -3, True), + ("hello", "hello world", -10, -2, False), + ("helloworld", "worl", -6, None, False), + ("helloworld", "worl", -5, -1, True), + ("helloworld", "worl", -5, 9, True), + ("helloworld", "world", -7, 12, True), + ("helloworld", "lowo", -99, -3, True), + ("helloworld", "lowo", -8, -3, True), + ("helloworld", "lowo", -7, -3, True), + ("helloworld", "lowo", 3, -4, False), + ("helloworld", "lowo", -8, -2, False), + (["hello", "helloworld"], ["lo", "worl"], [0, -6], None, + [True, False]), + ]) + def test_endswith(self, a, suffix, start, end, out, dt): + a = np.array(a, dtype=dt) + suffix = np.array(suffix, dtype=dt) + assert_array_equal(np.strings.endswith(a, suffix, start, end), out) + + @pytest.mark.parametrize("a,chars,out", [ + ("", None, ""), + (" hello ", None, "hello "), + ("hello", None, "hello"), + (" \t\n\r\f\vabc \t\n\r\f\v", None, "abc \t\n\r\f\v"), + ([" hello ", "hello"], None, ["hello ", "hello"]), + ("", "", ""), + ("", "xyz", ""), + ("hello", "", "hello"), + ("xyzzyhelloxyzzy", "xyz", "helloxyzzy"), + ("hello", "xyz", "hello"), + ("xyxz", "xyxz", ""), + ("xyxzx", "x", "yxzx"), + (["xyzzyhelloxyzzy", "hello"], ["xyz", "xyz"], + ["helloxyzzy", "hello"]), + (["ba", "ac", "baa", "bba"], "b", ["a", "ac", "aa", "a"]), + ]) + def test_lstrip(self, a, chars, out, dt): + a = np.array(a, dtype=dt) + out = np.array(out, dtype=dt) + if chars is not None: + chars = np.array(chars, dtype=dt) + assert_array_equal(np.strings.lstrip(a, chars), out) + else: + assert_array_equal(np.strings.lstrip(a), out) + + @pytest.mark.parametrize("a,chars,out", [ + ("", None, ""), + (" hello ", None, " hello"), + ("hello", None, "hello"), + (" \t\n\r\f\vabc \t\n\r\f\v", None, " \t\n\r\f\vabc"), + ([" hello ", "hello"], None, [" hello", "hello"]), + ("", "", ""), + ("", "xyz", ""), + ("hello", "", "hello"), + (["hello ", "abcdefghijklmnop"], None, + ["hello", "abcdefghijklmnop"]), + ("xyzzyhelloxyzzy", "xyz", "xyzzyhello"), + ("hello", "xyz", "hello"), + ("xyxz", "xyxz", ""), + (" ", None, ""), + ("xyxzx", "x", "xyxz"), + (["xyzzyhelloxyzzy", "hello"], ["xyz", "xyz"], + ["xyzzyhello", "hello"]), + (["ab", "ac", "aab", "abb"], "b", ["a", "ac", "aa", "a"]), + ]) + def test_rstrip(self, a, chars, out, dt): + a = np.array(a, dtype=dt) + out = np.array(out, dtype=dt) + if chars is not None: + chars = np.array(chars, dtype=dt) + assert_array_equal(np.strings.rstrip(a, chars), out) + else: + assert_array_equal(np.strings.rstrip(a), out) + + @pytest.mark.parametrize("a,chars,out", [ + ("", None, ""), + (" hello ", None, "hello"), + ("hello", None, "hello"), + (" \t\n\r\f\vabc \t\n\r\f\v", None, "abc"), + ([" hello ", "hello"], None, ["hello", "hello"]), + ("", "", ""), + ("", "xyz", ""), + ("hello", "", "hello"), + ("xyzzyhelloxyzzy", "xyz", "hello"), + ("hello", "xyz", "hello"), + ("xyxz", "xyxz", ""), + ("xyxzx", "x", "yxz"), + (["xyzzyhelloxyzzy", "hello"], ["xyz", "xyz"], + ["hello", "hello"]), + (["bab", "ac", "baab", "bbabb"], "b", ["a", "ac", "aa", "a"]), + ]) + def test_strip(self, a, chars, out, dt): + a = np.array(a, dtype=dt) + if chars is not None: + chars = np.array(chars, dtype=dt) + out = np.array(out, dtype=dt) + assert_array_equal(np.strings.strip(a, chars), out) + + @pytest.mark.parametrize("buf,old,new,count,res", [ + ("", "", "", -1, ""), + ("", "", "A", -1, "A"), + ("", "A", "", -1, ""), + ("", "A", "A", -1, ""), + ("", "", "", 100, ""), + ("", "", "A", 100, "A"), + ("A", "", "", -1, "A"), + ("A", "", "*", -1, "*A*"), + ("A", "", "*1", -1, "*1A*1"), + ("A", "", "*-#", -1, "*-#A*-#"), + ("AA", "", "*-", -1, "*-A*-A*-"), + ("AA", "", "*-", -1, "*-A*-A*-"), + ("AA", "", "*-", 4, "*-A*-A*-"), + ("AA", "", "*-", 3, "*-A*-A*-"), + ("AA", "", "*-", 2, "*-A*-A"), + ("AA", "", "*-", 1, "*-AA"), + ("AA", "", "*-", 0, "AA"), + ("A", "A", "", -1, ""), + ("AAA", "A", "", -1, ""), + ("AAA", "A", "", -1, ""), + ("AAA", "A", "", 4, ""), + ("AAA", "A", "", 3, ""), + ("AAA", "A", "", 2, "A"), + ("AAA", "A", "", 1, "AA"), + ("AAA", "A", "", 0, "AAA"), + ("AAAAAAAAAA", "A", "", -1, ""), + ("ABACADA", "A", "", -1, "BCD"), + ("ABACADA", "A", "", -1, "BCD"), + ("ABACADA", "A", "", 5, "BCD"), + ("ABACADA", "A", "", 4, "BCD"), + ("ABACADA", "A", "", 3, "BCDA"), + ("ABACADA", "A", "", 2, "BCADA"), + ("ABACADA", "A", "", 1, "BACADA"), + ("ABACADA", "A", "", 0, "ABACADA"), + ("ABCAD", "A", "", -1, "BCD"), + ("ABCADAA", "A", "", -1, "BCD"), + ("BCD", "A", "", -1, "BCD"), + ("*************", "A", "", -1, "*************"), + ("^" + "A" * 1000 + "^", "A", "", 999, "^A^"), + ("the", "the", "", -1, ""), + ("theater", "the", "", -1, "ater"), + ("thethe", "the", "", -1, ""), + ("thethethethe", "the", "", -1, ""), + ("theatheatheathea", "the", "", -1, "aaaa"), + ("that", "the", "", -1, "that"), + ("thaet", "the", "", -1, "thaet"), + ("here and there", "the", "", -1, "here and re"), + ("here and there and there", "the", "", -1, "here and re and re"), + ("here and there and there", "the", "", 3, "here and re and re"), + ("here and there and there", "the", "", 2, "here and re and re"), + ("here and there and there", "the", "", 1, "here and re and there"), + ("here and there and there", "the", "", 0, "here and there and there"), + ("here and there and there", "the", "", -1, "here and re and re"), + ("abc", "the", "", -1, "abc"), + ("abcdefg", "the", "", -1, "abcdefg"), + ("bbobob", "bob", "", -1, "bob"), + ("bbobobXbbobob", "bob", "", -1, "bobXbob"), + ("aaaaaaabob", "bob", "", -1, "aaaaaaa"), + ("aaaaaaa", "bob", "", -1, "aaaaaaa"), + ("Who goes there?", "o", "o", -1, "Who goes there?"), + ("Who goes there?", "o", "O", -1, "WhO gOes there?"), + ("Who goes there?", "o", "O", -1, "WhO gOes there?"), + ("Who goes there?", "o", "O", 3, "WhO gOes there?"), + ("Who goes there?", "o", "O", 2, "WhO gOes there?"), + ("Who goes there?", "o", "O", 1, "WhO goes there?"), + ("Who goes there?", "o", "O", 0, "Who goes there?"), + ("Who goes there?", "a", "q", -1, "Who goes there?"), + ("Who goes there?", "W", "w", -1, "who goes there?"), + ("WWho goes there?WW", "W", "w", -1, "wwho goes there?ww"), + ("Who goes there?", "?", "!", -1, "Who goes there!"), + ("Who goes there??", "?", "!", -1, "Who goes there!!"), + ("Who goes there?", ".", "!", -1, "Who goes there?"), + ("This is a tissue", "is", "**", -1, "Th** ** a t**sue"), + ("This is a tissue", "is", "**", -1, "Th** ** a t**sue"), + ("This is a tissue", "is", "**", 4, "Th** ** a t**sue"), + ("This is a tissue", "is", "**", 3, "Th** ** a t**sue"), + ("This is a tissue", "is", "**", 2, "Th** ** a tissue"), + ("This is a tissue", "is", "**", 1, "Th** is a tissue"), + ("This is a tissue", "is", "**", 0, "This is a tissue"), + ("bobob", "bob", "cob", -1, "cobob"), + ("bobobXbobobob", "bob", "cob", -1, "cobobXcobocob"), + ("bobob", "bot", "bot", -1, "bobob"), + ("Reykjavik", "k", "KK", -1, "ReyKKjaviKK"), + ("Reykjavik", "k", "KK", -1, "ReyKKjaviKK"), + ("Reykjavik", "k", "KK", 2, "ReyKKjaviKK"), + ("Reykjavik", "k", "KK", 1, "ReyKKjavik"), + ("Reykjavik", "k", "KK", 0, "Reykjavik"), + ("A.B.C.", ".", "----", -1, "A----B----C----"), + ("Reykjavik", "q", "KK", -1, "Reykjavik"), + ("spam, spam, eggs and spam", "spam", "ham", -1, + "ham, ham, eggs and ham"), + ("spam, spam, eggs and spam", "spam", "ham", -1, + "ham, ham, eggs and ham"), + ("spam, spam, eggs and spam", "spam", "ham", 4, + "ham, ham, eggs and ham"), + ("spam, spam, eggs and spam", "spam", "ham", 3, + "ham, ham, eggs and ham"), + ("spam, spam, eggs and spam", "spam", "ham", 2, + "ham, ham, eggs and spam"), + ("spam, spam, eggs and spam", "spam", "ham", 1, + "ham, spam, eggs and spam"), + ("spam, spam, eggs and spam", "spam", "ham", 0, + "spam, spam, eggs and spam"), + ("bobobob", "bobob", "bob", -1, "bobob"), + ("bobobobXbobobob", "bobob", "bob", -1, "bobobXbobob"), + ("BOBOBOB", "bob", "bobby", -1, "BOBOBOB"), + ("one!two!three!", "!", "@", 1, "one@two!three!"), + ("one!two!three!", "!", "", -1, "onetwothree"), + ("one!two!three!", "!", "@", 2, "one@two@three!"), + ("one!two!three!", "!", "@", 3, "one@two@three@"), + ("one!two!three!", "!", "@", 4, "one@two@three@"), + ("one!two!three!", "!", "@", 0, "one!two!three!"), + ("one!two!three!", "!", "@", -1, "one@two@three@"), + ("one!two!three!", "x", "@", -1, "one!two!three!"), + ("one!two!three!", "x", "@", 2, "one!two!three!"), + ("abc", "", "-", -1, "-a-b-c-"), + ("abc", "", "-", 3, "-a-b-c"), + ("abc", "", "-", 0, "abc"), + ("abc", "ab", "--", 0, "abc"), + ("abc", "xy", "--", -1, "abc"), + (["abbc", "abbd"], "b", "z", [1, 2], ["azbc", "azzd"]), + ]) + def test_replace(self, buf, old, new, count, res, dt): + if "😊" in buf and dt == "S": + pytest.skip("Bytes dtype does not support non-ascii input") + buf = np.array(buf, dtype=dt) + old = np.array(old, dtype=dt) + new = np.array(new, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.replace(buf, old, new, count), res) + + @pytest.mark.parametrize("buf,sub,start,end,res", [ + ("abcdefghiabc", "", 0, None, 0), + ("abcdefghiabc", "def", 0, None, 3), + ("abcdefghiabc", "abc", 0, None, 0), + ("abcdefghiabc", "abc", 1, None, 9), + ]) + def test_index(self, buf, sub, start, end, res, dt): + buf = np.array(buf, dtype=dt) + sub = np.array(sub, dtype=dt) + assert_array_equal(np.strings.index(buf, sub, start, end), res) + + @pytest.mark.parametrize("buf,sub,start,end", [ + ("abcdefghiabc", "hib", 0, None), + ("abcdefghiab", "abc", 1, None), + ("abcdefghi", "ghi", 8, None), + ("abcdefghi", "ghi", -1, None), + ("rrarrrrrrrrra", "a", 4, 6), + ]) + def test_index_raises(self, buf, sub, start, end, dt): + buf = np.array(buf, dtype=dt) + sub = np.array(sub, dtype=dt) + with pytest.raises(ValueError, match="substring not found"): + np.strings.index(buf, sub, start, end) + + @pytest.mark.parametrize("buf,sub,start,end,res", [ + ("abcdefghiabc", "", 0, None, 12), + ("abcdefghiabc", "def", 0, None, 3), + ("abcdefghiabc", "abc", 0, None, 9), + ("abcdefghiabc", "abc", 0, -1, 0), + ]) + def test_rindex(self, buf, sub, start, end, res, dt): + buf = np.array(buf, dtype=dt) + sub = np.array(sub, dtype=dt) + assert_array_equal(np.strings.rindex(buf, sub, start, end), res) + + @pytest.mark.parametrize("buf,sub,start,end", [ + ("abcdefghiabc", "hib", 0, None), + ("defghiabc", "def", 1, None), + ("defghiabc", "abc", 0, -1), + ("abcdefghi", "ghi", 0, 8), + ("abcdefghi", "ghi", 0, -1), + ("rrarrrrrrrrra", "a", 4, 6), + ]) + def test_rindex_raises(self, buf, sub, start, end, dt): + buf = np.array(buf, dtype=dt) + sub = np.array(sub, dtype=dt) + with pytest.raises(ValueError, match="substring not found"): + np.strings.rindex(buf, sub, start, end) + + @pytest.mark.parametrize("buf,tabsize,res", [ + ("abc\rab\tdef\ng\thi", 8, "abc\rab def\ng hi"), + ("abc\rab\tdef\ng\thi", 4, "abc\rab def\ng hi"), + ("abc\r\nab\tdef\ng\thi", 8, "abc\r\nab def\ng hi"), + ("abc\r\nab\tdef\ng\thi", 4, "abc\r\nab def\ng hi"), + ("abc\r\nab\r\ndef\ng\r\nhi", 4, "abc\r\nab\r\ndef\ng\r\nhi"), + (" \ta\n\tb", 1, " a\n b"), + ]) + def test_expandtabs(self, buf, tabsize, res, dt): + buf = np.array(buf, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.expandtabs(buf, tabsize), res) + + def test_expandtabs_raises_overflow(self, dt): + with pytest.raises(OverflowError, match="new string is too long"): + np.strings.expandtabs(np.array("\ta\n\tb", dtype=dt), sys.maxsize) + np.strings.expandtabs(np.array("\ta\n\tb", dtype=dt), 2**61) + + def test_expandtabs_length_not_cause_segfault(self, dt): + # see gh-28829 + with pytest.raises( + _UFuncNoLoopError, + match="did not contain a loop with signature matching types", + ): + np._core.strings._expandtabs_length.reduce(np.zeros(200)) + + with pytest.raises( + _UFuncNoLoopError, + match="did not contain a loop with signature matching types", + ): + np.strings.expandtabs(np.zeros(200)) + + FILL_ERROR = "The fill character must be exactly one character long" + + def test_center_raises_multiple_character_fill(self, dt): + buf = np.array("abc", dtype=dt) + fill = np.array("**", dtype=dt) + with pytest.raises(TypeError, match=self.FILL_ERROR): + np.strings.center(buf, 10, fill) + + def test_ljust_raises_multiple_character_fill(self, dt): + buf = np.array("abc", dtype=dt) + fill = np.array("**", dtype=dt) + with pytest.raises(TypeError, match=self.FILL_ERROR): + np.strings.ljust(buf, 10, fill) + + def test_rjust_raises_multiple_character_fill(self, dt): + buf = np.array("abc", dtype=dt) + fill = np.array("**", dtype=dt) + with pytest.raises(TypeError, match=self.FILL_ERROR): + np.strings.rjust(buf, 10, fill) + + @pytest.mark.parametrize("buf,width,fillchar,res", [ + ('abc', 10, ' ', ' abc '), + ('abc', 6, ' ', ' abc '), + ('abc', 3, ' ', 'abc'), + ('abc', 2, ' ', 'abc'), + ('abc', -2, ' ', 'abc'), + ('abc', 10, '*', '***abc****'), + ]) + def test_center(self, buf, width, fillchar, res, dt): + buf = np.array(buf, dtype=dt) + fillchar = np.array(fillchar, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.center(buf, width, fillchar), res) + + @pytest.mark.parametrize("buf,width,fillchar,res", [ + ('abc', 10, ' ', 'abc '), + ('abc', 6, ' ', 'abc '), + ('abc', 3, ' ', 'abc'), + ('abc', 2, ' ', 'abc'), + ('abc', -2, ' ', 'abc'), + ('abc', 10, '*', 'abc*******'), + ]) + def test_ljust(self, buf, width, fillchar, res, dt): + buf = np.array(buf, dtype=dt) + fillchar = np.array(fillchar, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.ljust(buf, width, fillchar), res) + + @pytest.mark.parametrize("buf,width,fillchar,res", [ + ('abc', 10, ' ', ' abc'), + ('abc', 6, ' ', ' abc'), + ('abc', 3, ' ', 'abc'), + ('abc', 2, ' ', 'abc'), + ('abc', -2, ' ', 'abc'), + ('abc', 10, '*', '*******abc'), + ]) + def test_rjust(self, buf, width, fillchar, res, dt): + buf = np.array(buf, dtype=dt) + fillchar = np.array(fillchar, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.rjust(buf, width, fillchar), res) + + @pytest.mark.parametrize("buf,width,res", [ + ('123', 2, '123'), + ('123', 3, '123'), + ('0123', 4, '0123'), + ('+123', 3, '+123'), + ('+123', 4, '+123'), + ('+123', 5, '+0123'), + ('+0123', 5, '+0123'), + ('-123', 3, '-123'), + ('-123', 4, '-123'), + ('-0123', 5, '-0123'), + ('000', 3, '000'), + ('34', 1, '34'), + ('34', -1, '34'), + ('0034', 4, '0034'), + ]) + def test_zfill(self, buf, width, res, dt): + buf = np.array(buf, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.zfill(buf, width), res) + + @pytest.mark.parametrize("buf,sep,res1,res2,res3", [ + ("this is the partition method", "ti", "this is the par", + "ti", "tion method"), + ("http://www.python.org", "://", "http", "://", "www.python.org"), + ("http://www.python.org", "?", "http://www.python.org", "", ""), + ("http://www.python.org", "http://", "", "http://", "www.python.org"), + ("http://www.python.org", "org", "http://www.python.", "org", ""), + ("http://www.python.org", ["://", "?", "http://", "org"], + ["http", "http://www.python.org", "", "http://www.python."], + ["://", "", "http://", "org"], + ["www.python.org", "", "www.python.org", ""]), + ("mississippi", "ss", "mi", "ss", "issippi"), + ("mississippi", "i", "m", "i", "ssissippi"), + ("mississippi", "w", "mississippi", "", ""), + ]) + def test_partition(self, buf, sep, res1, res2, res3, dt): + buf = np.array(buf, dtype=dt) + sep = np.array(sep, dtype=dt) + res1 = np.array(res1, dtype=dt) + res2 = np.array(res2, dtype=dt) + res3 = np.array(res3, dtype=dt) + act1, act2, act3 = np.strings.partition(buf, sep) + assert_array_equal(act1, res1) + assert_array_equal(act2, res2) + assert_array_equal(act3, res3) + assert_array_equal(act1 + act2 + act3, buf) + + @pytest.mark.parametrize("buf,sep,res1,res2,res3", [ + ("this is the partition method", "ti", "this is the parti", + "ti", "on method"), + ("http://www.python.org", "://", "http", "://", "www.python.org"), + ("http://www.python.org", "?", "", "", "http://www.python.org"), + ("http://www.python.org", "http://", "", "http://", "www.python.org"), + ("http://www.python.org", "org", "http://www.python.", "org", ""), + ("http://www.python.org", ["://", "?", "http://", "org"], + ["http", "", "", "http://www.python."], + ["://", "", "http://", "org"], + ["www.python.org", "http://www.python.org", "www.python.org", ""]), + ("mississippi", "ss", "missi", "ss", "ippi"), + ("mississippi", "i", "mississipp", "i", ""), + ("mississippi", "w", "", "", "mississippi"), + ]) + def test_rpartition(self, buf, sep, res1, res2, res3, dt): + buf = np.array(buf, dtype=dt) + sep = np.array(sep, dtype=dt) + res1 = np.array(res1, dtype=dt) + res2 = np.array(res2, dtype=dt) + res3 = np.array(res3, dtype=dt) + act1, act2, act3 = np.strings.rpartition(buf, sep) + assert_array_equal(act1, res1) + assert_array_equal(act2, res2) + assert_array_equal(act3, res3) + assert_array_equal(act1 + act2 + act3, buf) + + @pytest.mark.parametrize("args", [ + (None,), + (None, None), + (None, None, -1), + (0,), + (0, None), + (0, None, -1), + (1,), + (1, None), + (1, None, -1), + (3,), + (3, None), + (5,), + (5, None), + (5, 5), + (5, 5, -1), + (6,), # test index past the end + (6, None), + (6, None, -1), + (6, 7), # test start and stop index past the end + (4, 3), # test start > stop index + (-1,), + (-1, None), + (-1, None, -1), + (-3,), + (-3, None), + ([3, 4],), + ([3, 4], None), + ([2, 4],), + ([-3, 5],), + ([-3, 5], None), + ([-3, 5], None, -1), + ([0, -5],), + ([0, -5], None), + ([0, -5], None, -1), + (1, 4), + (-3, 5), + (None, -1), + (0, [4, 2]), + ([1, 2], [-1, -2]), + (1, 5, 2), + (None, None, -1), + ([0, 6], [-1, 0], [2, -1]), + ]) + @pytest.mark.parametrize("buf", [ + ["hello", "world"], + ['hello world', 'γεια σου κόσμε', '你好世界', '👋 🌍'], + ]) + def test_slice(self, args, buf, dt): + if dt == "S" and "你好世界" in buf: + pytest.skip("Bytes dtype does not support non-ascii input") + if len(buf) == 4: + args = tuple(s * 2 if isinstance(s, list) else s for s in args) + buf = np.array(buf, dtype=dt) + act = np.strings.slice(buf, *args) + bcast_args = tuple(np.broadcast_to(arg, buf.shape) for arg in args) + res = np.array([s[slice(*arg)] + for s, arg in zip(buf, zip(*bcast_args))], + dtype=dt) + assert_array_equal(act, res) + + def test_slice_unsupported(self, dt): + with pytest.raises(TypeError, match="did not contain a loop"): + np.strings.slice(np.array([1, 2, 3]), 4) + + with pytest.raises(TypeError, match=r"Cannot cast ufunc '_slice' input .* from .* to dtype\('int(64|32)'\)"): + np.strings.slice(np.array(['foo', 'bar'], dtype=dt), np.array(['foo', 'bar'], dtype=dt)) + + @pytest.mark.parametrize("int_dt", [np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64]) + def test_slice_int_type_promotion(self, int_dt, dt): + buf = np.array(["hello", "world"], dtype=dt) + + assert_array_equal(np.strings.slice(buf, int_dt(4)), np.array(["hell", "worl"], dtype=dt)) + assert_array_equal(np.strings.slice(buf, np.array([4, 4], dtype=int_dt)), np.array(["hell", "worl"], dtype=dt)) + + assert_array_equal(np.strings.slice(buf, int_dt(2), int_dt(4)), np.array(["ll", "rl"], dtype=dt)) + assert_array_equal(np.strings.slice(buf, np.array([2, 2], dtype=int_dt), np.array([4, 4], dtype=int_dt)), np.array(["ll", "rl"], dtype=dt)) + + assert_array_equal(np.strings.slice(buf, int_dt(0), int_dt(4), int_dt(2)), np.array(["hl", "wr"], dtype=dt)) + assert_array_equal(np.strings.slice(buf, np.array([0, 0], dtype=int_dt), np.array([4, 4], dtype=int_dt), np.array([2, 2], dtype=int_dt)), np.array(["hl", "wr"], dtype=dt)) + +@pytest.mark.parametrize("dt", ["U", "T"]) +class TestMethodsWithUnicode: + @pytest.mark.parametrize("in_,out", [ + ("", False), + ("a", False), + ("0", True), + ("\u2460", False), # CIRCLED DIGIT 1 + ("\xbc", False), # VULGAR FRACTION ONE QUARTER + ("\u0660", True), # ARABIC_INDIC DIGIT ZERO + ("012345", True), + ("012345a", False), + (["0", "a"], [True, False]), + ]) + def test_isdecimal_unicode(self, in_, out, dt): + buf = np.array(in_, dtype=dt) + assert_array_equal(np.strings.isdecimal(buf), out) + + @pytest.mark.parametrize("in_,out", [ + ("", False), + ("a", False), + ("0", True), + ("\u2460", True), # CIRCLED DIGIT 1 + ("\xbc", True), # VULGAR FRACTION ONE QUARTER + ("\u0660", True), # ARABIC_INDIC DIGIT ZERO + ("012345", True), + ("012345a", False), + (["0", "a"], [True, False]), + ]) + def test_isnumeric_unicode(self, in_, out, dt): + buf = np.array(in_, dtype=dt) + assert_array_equal(np.strings.isnumeric(buf), out) + + @pytest.mark.parametrize("buf,old,new,count,res", [ + ("...\u043c......<", "<", "<", -1, "...\u043c......<"), + ("Ae¢☃€ 😊" * 2, "A", "B", -1, "Be¢☃€ 😊Be¢☃€ 😊"), + ("Ae¢☃€ 😊" * 2, "😊", "B", -1, "Ae¢☃€ BAe¢☃€ B"), + ]) + def test_replace_unicode(self, buf, old, new, count, res, dt): + buf = np.array(buf, dtype=dt) + old = np.array(old, dtype=dt) + new = np.array(new, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.replace(buf, old, new, count), res) + + @pytest.mark.parametrize("in_", [ + '\U00010401', + '\U00010427', + '\U00010429', + '\U0001044E', + '\U0001D7F6', + '\U00011066', + '\U000104A0', + pytest.param('\U0001F107', marks=pytest.mark.xfail( + sys.platform == 'win32' and IS_PYPY_LT_7_3_16, + reason="PYPY bug in Py_UNICODE_ISALNUM", + strict=True)), + ]) + def test_isalnum_unicode(self, in_, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.isalnum(in_), True) + + @pytest.mark.parametrize("in_,out", [ + ('\u1FFc', False), + ('\u2167', False), + ('\U00010401', False), + ('\U00010427', False), + ('\U0001F40D', False), + ('\U0001F46F', False), + ('\u2177', True), + pytest.param('\U00010429', True, marks=pytest.mark.xfail( + sys.platform == 'win32' and IS_PYPY_LT_7_3_16, + reason="PYPY bug in Py_UNICODE_ISLOWER", + strict=True)), + ('\U0001044E', True), + ]) + def test_islower_unicode(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.islower(in_), out) + + @pytest.mark.parametrize("in_,out", [ + ('\u1FFc', False), + ('\u2167', True), + ('\U00010401', True), + ('\U00010427', True), + ('\U0001F40D', False), + ('\U0001F46F', False), + ('\u2177', False), + pytest.param('\U00010429', False, marks=pytest.mark.xfail( + sys.platform == 'win32' and IS_PYPY_LT_7_3_16, + reason="PYPY bug in Py_UNICODE_ISUPPER", + strict=True)), + ('\U0001044E', False), + ]) + def test_isupper_unicode(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.isupper(in_), out) + + @pytest.mark.parametrize("in_,out", [ + ('\u1FFc', True), + ('Greek \u1FFcitlecases ...', True), + pytest.param('\U00010401\U00010429', True, marks=pytest.mark.xfail( + sys.platform == 'win32' and IS_PYPY_LT_7_3_16, + reason="PYPY bug in Py_UNICODE_ISISTITLE", + strict=True)), + ('\U00010427\U0001044E', True), + pytest.param('\U00010429', False, marks=pytest.mark.xfail( + sys.platform == 'win32' and IS_PYPY_LT_7_3_16, + reason="PYPY bug in Py_UNICODE_ISISTITLE", + strict=True)), + ('\U0001044E', False), + ('\U0001F40D', False), + ('\U0001F46F', False), + ]) + def test_istitle_unicode(self, in_, out, dt): + in_ = np.array(in_, dtype=dt) + assert_array_equal(np.strings.istitle(in_), out) + + @pytest.mark.parametrize("buf,sub,start,end,res", [ + ("Ae¢☃€ 😊" * 2, "😊", 0, None, 6), + ("Ae¢☃€ 😊" * 2, "😊", 7, None, 13), + ]) + def test_index_unicode(self, buf, sub, start, end, res, dt): + buf = np.array(buf, dtype=dt) + sub = np.array(sub, dtype=dt) + assert_array_equal(np.strings.index(buf, sub, start, end), res) + + def test_index_raises_unicode(self, dt): + with pytest.raises(ValueError, match="substring not found"): + np.strings.index("Ae¢☃€ 😊", "😀") + + @pytest.mark.parametrize("buf,res", [ + ("Ae¢☃€ \t 😊", "Ae¢☃€ 😊"), + ("\t\U0001044E", " \U0001044E"), + ]) + def test_expandtabs(self, buf, res, dt): + buf = np.array(buf, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.expandtabs(buf), res) + + @pytest.mark.parametrize("buf,width,fillchar,res", [ + ('x', 2, '\U0001044E', 'x\U0001044E'), + ('x', 3, '\U0001044E', '\U0001044Ex\U0001044E'), + ('x', 4, '\U0001044E', '\U0001044Ex\U0001044E\U0001044E'), + ]) + def test_center(self, buf, width, fillchar, res, dt): + buf = np.array(buf, dtype=dt) + fillchar = np.array(fillchar, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.center(buf, width, fillchar), res) + + @pytest.mark.parametrize("buf,width,fillchar,res", [ + ('x', 2, '\U0001044E', 'x\U0001044E'), + ('x', 3, '\U0001044E', 'x\U0001044E\U0001044E'), + ('x', 4, '\U0001044E', 'x\U0001044E\U0001044E\U0001044E'), + ]) + def test_ljust(self, buf, width, fillchar, res, dt): + buf = np.array(buf, dtype=dt) + fillchar = np.array(fillchar, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.ljust(buf, width, fillchar), res) + + @pytest.mark.parametrize("buf,width,fillchar,res", [ + ('x', 2, '\U0001044E', '\U0001044Ex'), + ('x', 3, '\U0001044E', '\U0001044E\U0001044Ex'), + ('x', 4, '\U0001044E', '\U0001044E\U0001044E\U0001044Ex'), + ]) + def test_rjust(self, buf, width, fillchar, res, dt): + buf = np.array(buf, dtype=dt) + fillchar = np.array(fillchar, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.rjust(buf, width, fillchar), res) + + @pytest.mark.parametrize("buf,sep,res1,res2,res3", [ + ("āāāāĀĀĀĀ", "Ă", "āāāāĀĀĀĀ", "", ""), + ("āāāāĂĀĀĀĀ", "Ă", "āāāā", "Ă", "ĀĀĀĀ"), + ("āāāāĂĂĀĀĀĀ", "ĂĂ", "āāāā", "ĂĂ", "ĀĀĀĀ"), + ("𐌁𐌁𐌁𐌁𐌀𐌀𐌀𐌀", "𐌂", "𐌁𐌁𐌁𐌁𐌀𐌀𐌀𐌀", "", ""), + ("𐌁𐌁𐌁𐌁𐌂𐌀𐌀𐌀𐌀", "𐌂", "𐌁𐌁𐌁𐌁", "𐌂", "𐌀𐌀𐌀𐌀"), + ("𐌁𐌁𐌁𐌁𐌂𐌂𐌀𐌀𐌀𐌀", "𐌂𐌂", "𐌁𐌁𐌁𐌁", "𐌂𐌂", "𐌀𐌀𐌀𐌀"), + ("𐌁𐌁𐌁𐌁𐌂𐌂𐌂𐌂𐌀𐌀𐌀𐌀", "𐌂𐌂𐌂𐌂", "𐌁𐌁𐌁𐌁", "𐌂𐌂𐌂𐌂", "𐌀𐌀𐌀𐌀"), + ]) + def test_partition(self, buf, sep, res1, res2, res3, dt): + buf = np.array(buf, dtype=dt) + sep = np.array(sep, dtype=dt) + res1 = np.array(res1, dtype=dt) + res2 = np.array(res2, dtype=dt) + res3 = np.array(res3, dtype=dt) + act1, act2, act3 = np.strings.partition(buf, sep) + assert_array_equal(act1, res1) + assert_array_equal(act2, res2) + assert_array_equal(act3, res3) + assert_array_equal(act1 + act2 + act3, buf) + + @pytest.mark.parametrize("buf,sep,res1,res2,res3", [ + ("āāāāĀĀĀĀ", "Ă", "", "", "āāāāĀĀĀĀ"), + ("āāāāĂĀĀĀĀ", "Ă", "āāāā", "Ă", "ĀĀĀĀ"), + ("āāāāĂĂĀĀĀĀ", "ĂĂ", "āāāā", "ĂĂ", "ĀĀĀĀ"), + ("𐌁𐌁𐌁𐌁𐌀𐌀𐌀𐌀", "𐌂", "", "", "𐌁𐌁𐌁𐌁𐌀𐌀𐌀𐌀"), + ("𐌁𐌁𐌁𐌁𐌂𐌀𐌀𐌀𐌀", "𐌂", "𐌁𐌁𐌁𐌁", "𐌂", "𐌀𐌀𐌀𐌀"), + ("𐌁𐌁𐌁𐌁𐌂𐌂𐌀𐌀𐌀𐌀", "𐌂𐌂", "𐌁𐌁𐌁𐌁", "𐌂𐌂", "𐌀𐌀𐌀𐌀"), + ]) + def test_rpartition(self, buf, sep, res1, res2, res3, dt): + buf = np.array(buf, dtype=dt) + sep = np.array(sep, dtype=dt) + res1 = np.array(res1, dtype=dt) + res2 = np.array(res2, dtype=dt) + res3 = np.array(res3, dtype=dt) + act1, act2, act3 = np.strings.rpartition(buf, sep) + assert_array_equal(act1, res1) + assert_array_equal(act2, res2) + assert_array_equal(act3, res3) + assert_array_equal(act1 + act2 + act3, buf) + + @pytest.mark.parametrize("method", ["strip", "lstrip", "rstrip"]) + @pytest.mark.parametrize( + "source,strip", + [ + ("λμ", "μ"), + ("λμ", "λ"), + ("λ" * 5 + "μ" * 2, "μ"), + ("λ" * 5 + "μ" * 2, "λ"), + ("λ" * 5 + "A" + "μ" * 2, "μλ"), + ("λμ" * 5, "μ"), + ("λμ" * 5, "λ"), + ]) + def test_strip_functions_unicode(self, source, strip, method, dt): + src_array = np.array([source], dtype=dt) + + npy_func = getattr(np.strings, method) + py_func = getattr(str, method) + + expected = np.array([py_func(source, strip)], dtype=dt) + actual = npy_func(src_array, strip) + + assert_array_equal(actual, expected) + + @pytest.mark.parametrize("args", [ + (None,), + (0,), + (1,), + (5,), + (15,), + (22,), + (-1,), + (-3,), + ([3, 4],), + ([-5, 5],), + ([0, -8],), + (1, 12), + (-12, 15), + (None, -1), + (0, [17, 6]), + ([1, 2], [-1, -2]), + (1, 11, 2), + (None, None, -1), + ([0, 10], [-1, 0], [2, -1]), + ]) + def test_slice(self, args, dt): + buf = np.array(["Приве́т नमस्ते שָׁלוֹם", "😀😃😄😁😆😅🤣😂🙂🙃"], + dtype=dt) + act = np.strings.slice(buf, *args) + bcast_args = tuple(np.broadcast_to(arg, buf.shape) for arg in args) + res = np.array([s[slice(*arg)] + for s, arg in zip(buf, zip(*bcast_args))], + dtype=dt) + assert_array_equal(act, res) + + +class TestMixedTypeMethods: + def test_center(self): + buf = np.array("😊", dtype="U") + fill = np.array("*", dtype="S") + res = np.array("*😊*", dtype="U") + assert_array_equal(np.strings.center(buf, 3, fill), res) + + buf = np.array("s", dtype="S") + fill = np.array("*", dtype="U") + res = np.array("*s*", dtype="S") + assert_array_equal(np.strings.center(buf, 3, fill), res) + + with pytest.raises(ValueError, match="'ascii' codec can't encode"): + buf = np.array("s", dtype="S") + fill = np.array("😊", dtype="U") + np.strings.center(buf, 3, fill) + + def test_ljust(self): + buf = np.array("😊", dtype="U") + fill = np.array("*", dtype="S") + res = np.array("😊**", dtype="U") + assert_array_equal(np.strings.ljust(buf, 3, fill), res) + + buf = np.array("s", dtype="S") + fill = np.array("*", dtype="U") + res = np.array("s**", dtype="S") + assert_array_equal(np.strings.ljust(buf, 3, fill), res) + + with pytest.raises(ValueError, match="'ascii' codec can't encode"): + buf = np.array("s", dtype="S") + fill = np.array("😊", dtype="U") + np.strings.ljust(buf, 3, fill) + + def test_rjust(self): + buf = np.array("😊", dtype="U") + fill = np.array("*", dtype="S") + res = np.array("**😊", dtype="U") + assert_array_equal(np.strings.rjust(buf, 3, fill), res) + + buf = np.array("s", dtype="S") + fill = np.array("*", dtype="U") + res = np.array("**s", dtype="S") + assert_array_equal(np.strings.rjust(buf, 3, fill), res) + + with pytest.raises(ValueError, match="'ascii' codec can't encode"): + buf = np.array("s", dtype="S") + fill = np.array("😊", dtype="U") + np.strings.rjust(buf, 3, fill) + + +class TestUnicodeOnlyMethodsRaiseWithBytes: + def test_isdecimal_raises(self): + in_ = np.array(b"1") + with assert_raises(TypeError): + np.strings.isdecimal(in_) + + def test_isnumeric_bytes(self): + in_ = np.array(b"1") + with assert_raises(TypeError): + np.strings.isnumeric(in_) + + +def check_itemsize(n_elem, dt): + if dt == "T": + return np.dtype(dt).itemsize + if dt == "S": + return n_elem + if dt == "U": + return n_elem * 4 + +@pytest.mark.parametrize("dt", ["S", "U", "T"]) +class TestReplaceOnArrays: + + def test_replace_count_and_size(self, dt): + a = np.array(["0123456789" * i for i in range(4)], dtype=dt) + r1 = np.strings.replace(a, "5", "ABCDE") + assert r1.dtype.itemsize == check_itemsize(3 * 10 + 3 * 4, dt) + r1_res = np.array(["01234ABCDE6789" * i for i in range(4)], dtype=dt) + assert_array_equal(r1, r1_res) + r2 = np.strings.replace(a, "5", "ABCDE", 1) + assert r2.dtype.itemsize == check_itemsize(3 * 10 + 4, dt) + r3 = np.strings.replace(a, "5", "ABCDE", 0) + assert r3.dtype.itemsize == a.dtype.itemsize + assert_array_equal(r3, a) + # Negative values mean to replace all. + r4 = np.strings.replace(a, "5", "ABCDE", -1) + assert r4.dtype.itemsize == check_itemsize(3 * 10 + 3 * 4, dt) + assert_array_equal(r4, r1) + # We can do count on an element-by-element basis. + r5 = np.strings.replace(a, "5", "ABCDE", [-1, -1, -1, 1]) + assert r5.dtype.itemsize == check_itemsize(3 * 10 + 4, dt) + assert_array_equal(r5, np.array( + ["01234ABCDE6789" * i for i in range(3)] + + ["01234ABCDE6789" + "0123456789" * 2], dtype=dt)) + + def test_replace_broadcasting(self, dt): + a = np.array("0,0,0", dtype=dt) + r1 = np.strings.replace(a, "0", "1", np.arange(3)) + assert r1.dtype == a.dtype + assert_array_equal(r1, np.array(["0,0,0", "1,0,0", "1,1,0"], dtype=dt)) + r2 = np.strings.replace(a, "0", [["1"], ["2"]], np.arange(1, 4)) + assert_array_equal(r2, np.array([["1,0,0", "1,1,0", "1,1,1"], + ["2,0,0", "2,2,0", "2,2,2"]], + dtype=dt)) + r3 = np.strings.replace(a, ["0", "0,0", "0,0,0"], "X") + assert_array_equal(r3, np.array(["X,X,X", "X,0", "X"], dtype=dt)) + + +class TestOverride: + @classmethod + def setup_class(cls): + class Override: + + def __array_function__(self, *args, **kwargs): + return "function" + + def __array_ufunc__(self, *args, **kwargs): + return "ufunc" + + cls.override = Override() + + @pytest.mark.parametrize("func, kwargs", [ + (np.strings.center, dict(width=10)), + (np.strings.capitalize, {}), + (np.strings.decode, {}), + (np.strings.encode, {}), + (np.strings.expandtabs, {}), + (np.strings.ljust, dict(width=10)), + (np.strings.lower, {}), + (np.strings.mod, dict(values=2)), + (np.strings.multiply, dict(i=2)), + (np.strings.partition, dict(sep="foo")), + (np.strings.rjust, dict(width=10)), + (np.strings.rpartition, dict(sep="foo")), + (np.strings.swapcase, {}), + (np.strings.title, {}), + (np.strings.translate, dict(table=None)), + (np.strings.upper, {}), + (np.strings.zfill, dict(width=10)), + ]) + def test_override_function(self, func, kwargs): + assert func(self.override, **kwargs) == "function" + + @pytest.mark.parametrize("func, args, kwargs", [ + (np.strings.add, (None, ), {}), + (np.strings.lstrip, (), {}), + (np.strings.rstrip, (), {}), + (np.strings.strip, (), {}), + (np.strings.equal, (None, ), {}), + (np.strings.not_equal, (None, ), {}), + (np.strings.greater_equal, (None, ), {}), + (np.strings.less_equal, (None, ), {}), + (np.strings.greater, (None, ), {}), + (np.strings.less, (None, ), {}), + (np.strings.count, ("foo", ), {}), + (np.strings.endswith, ("foo", ), {}), + (np.strings.find, ("foo", ), {}), + (np.strings.index, ("foo", ), {}), + (np.strings.isalnum, (), {}), + (np.strings.isalpha, (), {}), + (np.strings.isdecimal, (), {}), + (np.strings.isdigit, (), {}), + (np.strings.islower, (), {}), + (np.strings.isnumeric, (), {}), + (np.strings.isspace, (), {}), + (np.strings.istitle, (), {}), + (np.strings.isupper, (), {}), + (np.strings.rfind, ("foo", ), {}), + (np.strings.rindex, ("foo", ), {}), + (np.strings.startswith, ("foo", ), {}), + (np.strings.str_len, (), {}), + ]) + def test_override_ufunc(self, func, args, kwargs): + assert func(self.override, *args, **kwargs) == "ufunc" diff --git a/python/numpy/_core/tests/test_ufunc.py b/python/numpy/_core/tests/test_ufunc.py new file mode 100644 index 000000000..af22dcef2 --- /dev/null +++ b/python/numpy/_core/tests/test_ufunc.py @@ -0,0 +1,3313 @@ +import ctypes as ct +import itertools +import pickle +import sys +import warnings + +import numpy._core._operand_flag_tests as opflag_tests +import numpy._core._rational_tests as _rational_tests +import numpy._core._umath_tests as umt +import pytest +from pytest import param + +import numpy as np +import numpy._core.umath as ncu +import numpy.linalg._umath_linalg as uml +from numpy.exceptions import AxisError +from numpy.testing import ( + HAS_REFCOUNT, + IS_PYPY, + IS_WASM, + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_no_warnings, + assert_raises, + suppress_warnings, +) +from numpy.testing._private.utils import requires_memory + +UNARY_UFUNCS = [obj for obj in np._core.umath.__dict__.values() + if isinstance(obj, np.ufunc)] +UNARY_OBJECT_UFUNCS = [uf for uf in UNARY_UFUNCS if "O->O" in uf.types] + +# Remove functions that do not support `floats` +UNARY_OBJECT_UFUNCS.remove(np.bitwise_count) + + +class TestUfuncKwargs: + def test_kwarg_exact(self): + assert_raises(TypeError, np.add, 1, 2, castingx='safe') + assert_raises(TypeError, np.add, 1, 2, dtypex=int) + assert_raises(TypeError, np.add, 1, 2, extobjx=[4096]) + assert_raises(TypeError, np.add, 1, 2, outx=None) + assert_raises(TypeError, np.add, 1, 2, sigx='ii->i') + assert_raises(TypeError, np.add, 1, 2, signaturex='ii->i') + assert_raises(TypeError, np.add, 1, 2, subokx=False) + assert_raises(TypeError, np.add, 1, 2, wherex=[True]) + + def test_sig_signature(self): + assert_raises(TypeError, np.add, 1, 2, sig='ii->i', + signature='ii->i') + + def test_sig_dtype(self): + assert_raises(TypeError, np.add, 1, 2, sig='ii->i', + dtype=int) + assert_raises(TypeError, np.add, 1, 2, signature='ii->i', + dtype=int) + + def test_extobj_removed(self): + assert_raises(TypeError, np.add, 1, 2, extobj=[4096]) + + +class TestUfuncGenericLoops: + """Test generic loops. + + The loops to be tested are: + + PyUFunc_ff_f_As_dd_d + PyUFunc_ff_f + PyUFunc_dd_d + PyUFunc_gg_g + PyUFunc_FF_F_As_DD_D + PyUFunc_DD_D + PyUFunc_FF_F + PyUFunc_GG_G + PyUFunc_OO_O + PyUFunc_OO_O_method + PyUFunc_f_f_As_d_d + PyUFunc_d_d + PyUFunc_f_f + PyUFunc_g_g + PyUFunc_F_F_As_D_D + PyUFunc_F_F + PyUFunc_D_D + PyUFunc_G_G + PyUFunc_O_O + PyUFunc_O_O_method + PyUFunc_On_Om + + Where: + + f -- float + d -- double + g -- long double + F -- complex float + D -- complex double + G -- complex long double + O -- python object + + It is difficult to assure that each of these loops is entered from the + Python level as the special cased loops are a moving target and the + corresponding types are architecture dependent. We probably need to + define C level testing ufuncs to get at them. For the time being, I've + just looked at the signatures registered in the build directory to find + relevant functions. + + """ + np_dtypes = [ + (np.single, np.single), (np.single, np.double), + (np.csingle, np.csingle), (np.csingle, np.cdouble), + (np.double, np.double), (np.longdouble, np.longdouble), + (np.cdouble, np.cdouble), (np.clongdouble, np.clongdouble)] + + @pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes) + def test_unary_PyUFunc(self, input_dtype, output_dtype, f=np.exp, x=0, y=1): + xs = np.full(10, input_dtype(x), dtype=output_dtype) + ys = f(xs)[::2] + assert_allclose(ys, y) + assert_equal(ys.dtype, output_dtype) + + def f2(x, y): + return x**y + + @pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes) + def test_binary_PyUFunc(self, input_dtype, output_dtype, f=f2, x=0, y=1): + xs = np.full(10, input_dtype(x), dtype=output_dtype) + ys = f(xs, xs)[::2] + assert_allclose(ys, y) + assert_equal(ys.dtype, output_dtype) + + # class to use in testing object method loops + class foo: + def conjugate(self): + return np.bool(1) + + def logical_xor(self, obj): + return np.bool(1) + + def test_unary_PyUFunc_O_O(self): + x = np.ones(10, dtype=object) + assert_(np.all(np.abs(x) == 1)) + + def test_unary_PyUFunc_O_O_method_simple(self, foo=foo): + x = np.full(10, foo(), dtype=object) + assert_(np.all(np.conjugate(x) == True)) + + def test_binary_PyUFunc_OO_O(self): + x = np.ones(10, dtype=object) + assert_(np.all(np.add(x, x) == 2)) + + def test_binary_PyUFunc_OO_O_method(self, foo=foo): + x = np.full(10, foo(), dtype=object) + assert_(np.all(np.logical_xor(x, x))) + + def test_binary_PyUFunc_On_Om_method(self, foo=foo): + x = np.full((10, 2, 3), foo(), dtype=object) + assert_(np.all(np.logical_xor(x, x))) + + def test_python_complex_conjugate(self): + # The conjugate ufunc should fall back to calling the method: + arr = np.array([1 + 2j, 3 - 4j], dtype="O") + assert isinstance(arr[0], complex) + res = np.conjugate(arr) + assert res.dtype == np.dtype("O") + assert_array_equal(res, np.array([1 - 2j, 3 + 4j], dtype="O")) + + @pytest.mark.parametrize("ufunc", UNARY_OBJECT_UFUNCS) + def test_unary_PyUFunc_O_O_method_full(self, ufunc): + """Compare the result of the object loop with non-object one""" + val = np.float64(np.pi / 4) + + class MyFloat(np.float64): + def __getattr__(self, attr): + try: + return super().__getattr__(attr) + except AttributeError: + return lambda: getattr(np._core.umath, attr)(val) + + # Use 0-D arrays, to ensure the same element call + num_arr = np.array(val, dtype=np.float64) + obj_arr = np.array(MyFloat(val), dtype="O") + + with np.errstate(all="raise"): + try: + res_num = ufunc(num_arr) + except Exception as exc: + with assert_raises(type(exc)): + ufunc(obj_arr) + else: + res_obj = ufunc(obj_arr) + assert_array_almost_equal(res_num.astype("O"), res_obj) + + +def _pickleable_module_global(): + pass + + +class TestUfunc: + def test_pickle(self): + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + assert_(pickle.loads(pickle.dumps(np.sin, + protocol=proto)) is np.sin) + + # Check that ufunc not defined in the top level numpy namespace + # such as numpy._core._rational_tests.test_add can also be pickled + res = pickle.loads(pickle.dumps(_rational_tests.test_add, + protocol=proto)) + assert_(res is _rational_tests.test_add) + + def test_pickle_withstring(self): + astring = (b"cnumpy.core\n_ufunc_reconstruct\np0\n" + b"(S'numpy._core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.") + assert_(pickle.loads(astring) is np.cos) + + @pytest.mark.skipif(IS_PYPY, reason="'is' check does not work on PyPy") + def test_pickle_name_is_qualname(self): + # This tests that a simplification of our ufunc pickle code will + # lead to allowing qualnames as names. Future ufuncs should + # possible add a specific qualname, or a hook into pickling instead + # (dask+numba may benefit). + _pickleable_module_global.ufunc = umt._pickleable_module_global_ufunc + + obj = pickle.loads(pickle.dumps(_pickleable_module_global.ufunc)) + assert obj is umt._pickleable_module_global_ufunc + + def test_reduceat_shifting_sum(self): + L = 6 + x = np.arange(L) + idx = np.array(list(zip(np.arange(L - 2), np.arange(L - 2) + 2))).ravel() + assert_array_equal(np.add.reduceat(x, idx)[::2], [1, 3, 5, 7]) + + def test_all_ufunc(self): + """Try to check presence and results of all ufuncs. + + The list of ufuncs comes from generate_umath.py and is as follows: + + ===== ==== ============= =============== ======================== + done args function types notes + ===== ==== ============= =============== ======================== + n 1 conjugate nums + O + n 1 absolute nums + O complex -> real + n 1 negative nums + O + n 1 sign nums + O -> int + n 1 invert bool + ints + O flts raise an error + n 1 degrees real + M cmplx raise an error + n 1 radians real + M cmplx raise an error + n 1 arccos flts + M + n 1 arccosh flts + M + n 1 arcsin flts + M + n 1 arcsinh flts + M + n 1 arctan flts + M + n 1 arctanh flts + M + n 1 cos flts + M + n 1 sin flts + M + n 1 tan flts + M + n 1 cosh flts + M + n 1 sinh flts + M + n 1 tanh flts + M + n 1 exp flts + M + n 1 expm1 flts + M + n 1 log flts + M + n 1 log10 flts + M + n 1 log1p flts + M + n 1 sqrt flts + M real x < 0 raises error + n 1 ceil real + M + n 1 trunc real + M + n 1 floor real + M + n 1 fabs real + M + n 1 rint flts + M + n 1 isnan flts -> bool + n 1 isinf flts -> bool + n 1 isfinite flts -> bool + n 1 signbit real -> bool + n 1 modf real -> (frac, int) + n 1 logical_not bool + nums + M -> bool + n 2 left_shift ints + O flts raise an error + n 2 right_shift ints + O flts raise an error + n 2 add bool + nums + O boolean + is || + n 2 subtract bool + nums + O boolean - is ^ + n 2 multiply bool + nums + O boolean * is & + n 2 divide nums + O + n 2 floor_divide nums + O + n 2 true_divide nums + O bBhH -> f, iIlLqQ -> d + n 2 fmod nums + M + n 2 power nums + O + n 2 greater bool + nums + O -> bool + n 2 greater_equal bool + nums + O -> bool + n 2 less bool + nums + O -> bool + n 2 less_equal bool + nums + O -> bool + n 2 equal bool + nums + O -> bool + n 2 not_equal bool + nums + O -> bool + n 2 logical_and bool + nums + M -> bool + n 2 logical_or bool + nums + M -> bool + n 2 logical_xor bool + nums + M -> bool + n 2 maximum bool + nums + O + n 2 minimum bool + nums + O + n 2 bitwise_and bool + ints + O flts raise an error + n 2 bitwise_or bool + ints + O flts raise an error + n 2 bitwise_xor bool + ints + O flts raise an error + n 2 arctan2 real + M + n 2 remainder ints + real + O + n 2 hypot real + M + ===== ==== ============= =============== ======================== + + Types other than those listed will be accepted, but they are cast to + the smallest compatible type for which the function is defined. The + casting rules are: + + bool -> int8 -> float32 + ints -> double + + """ + pass + + # from include/numpy/ufuncobject.h + size_inferred = 2 + can_ignore = 4 + + def test_signature0(self): + # the arguments to test_signature are: nin, nout, core_signature + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(i),(i)->()") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 1, 0)) + assert_equal(ixs, (0, 0)) + assert_equal(flags, (self.size_inferred,)) + assert_equal(sizes, (-1,)) + + def test_signature1(self): + # empty core signature; treat as plain ufunc (with trivial core) + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(),()->()") + assert_equal(enabled, 0) + assert_equal(num_dims, (0, 0, 0)) + assert_equal(ixs, ()) + assert_equal(flags, ()) + assert_equal(sizes, ()) + + def test_signature2(self): + # more complicated names for variables + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(i1,i2),(J_1)->(_kAB)") + assert_equal(enabled, 1) + assert_equal(num_dims, (2, 1, 1)) + assert_equal(ixs, (0, 1, 2, 3)) + assert_equal(flags, (self.size_inferred,) * 4) + assert_equal(sizes, (-1, -1, -1, -1)) + + def test_signature3(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(i1, i12), (J_1)->(i12, i2)") + assert_equal(enabled, 1) + assert_equal(num_dims, (2, 1, 2)) + assert_equal(ixs, (0, 1, 2, 1, 3)) + assert_equal(flags, (self.size_inferred,) * 4) + assert_equal(sizes, (-1, -1, -1, -1)) + + def test_signature4(self): + # matrix_multiply signature from _umath_tests + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(n,k),(k,m)->(n,m)") + assert_equal(enabled, 1) + assert_equal(num_dims, (2, 2, 2)) + assert_equal(ixs, (0, 1, 1, 2, 0, 2)) + assert_equal(flags, (self.size_inferred,) * 3) + assert_equal(sizes, (-1, -1, -1)) + + def test_signature5(self): + # matmul signature from _umath_tests + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 2, 1, "(n?,k),(k,m?)->(n?,m?)") + assert_equal(enabled, 1) + assert_equal(num_dims, (2, 2, 2)) + assert_equal(ixs, (0, 1, 1, 2, 0, 2)) + assert_equal(flags, (self.size_inferred | self.can_ignore, + self.size_inferred, + self.size_inferred | self.can_ignore)) + assert_equal(sizes, (-1, -1, -1)) + + def test_signature6(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 1, 1, "(3)->()") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 0)) + assert_equal(ixs, (0,)) + assert_equal(flags, (0,)) + assert_equal(sizes, (3,)) + + def test_signature7(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 3, 1, "(3),(03,3),(n)->(9)") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 2, 1, 1)) + assert_equal(ixs, (0, 0, 0, 1, 2)) + assert_equal(flags, (0, self.size_inferred, 0)) + assert_equal(sizes, (3, -1, 9)) + + def test_signature8(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 3, 1, "(3?),(3?,3?),(n)->(9)") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 2, 1, 1)) + assert_equal(ixs, (0, 0, 0, 1, 2)) + assert_equal(flags, (self.can_ignore, self.size_inferred, 0)) + assert_equal(sizes, (3, -1, 9)) + + def test_signature9(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 1, 1, "( 3) -> ( )") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 0)) + assert_equal(ixs, (0,)) + assert_equal(flags, (0,)) + assert_equal(sizes, (3,)) + + def test_signature10(self): + enabled, num_dims, ixs, flags, sizes = umt.test_signature( + 3, 1, "( 3? ) , (3? , 3?) ,(n )-> ( 9)") + assert_equal(enabled, 1) + assert_equal(num_dims, (1, 2, 1, 1)) + assert_equal(ixs, (0, 0, 0, 1, 2)) + assert_equal(flags, (self.can_ignore, self.size_inferred, 0)) + assert_equal(sizes, (3, -1, 9)) + + def test_signature_failure_extra_parenthesis(self): + with assert_raises(ValueError): + umt.test_signature(2, 1, "((i)),(i)->()") + + def test_signature_failure_mismatching_parenthesis(self): + with assert_raises(ValueError): + umt.test_signature(2, 1, "(i),)i(->()") + + def test_signature_failure_signature_missing_input_arg(self): + with assert_raises(ValueError): + umt.test_signature(2, 1, "(i),->()") + + def test_signature_failure_signature_missing_output_arg(self): + with assert_raises(ValueError): + umt.test_signature(2, 2, "(i),(i)->()") + + def test_get_signature(self): + assert_equal(np.vecdot.signature, "(n),(n)->()") + + def test_forced_sig(self): + a = 0.5 * np.arange(3, dtype='f8') + assert_equal(np.add(a, 0.5), [0.5, 1, 1.5]) + with assert_raises(TypeError): + np.add(a, 0.5, sig='i', casting='unsafe') + assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1]) + with assert_raises(TypeError): + np.add(a, 0.5, sig=('i4',), casting='unsafe') + assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'), + casting='unsafe'), [0, 0, 1]) + + b = np.zeros((3,), dtype='f8') + np.add(a, 0.5, out=b) + assert_equal(b, [0.5, 1, 1.5]) + b[:] = 0 + with assert_raises(TypeError): + np.add(a, 0.5, sig='i', out=b, casting='unsafe') + assert_equal(b, [0, 0, 0]) + np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe') + assert_equal(b, [0, 0, 1]) + b[:] = 0 + with assert_raises(TypeError): + np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe') + assert_equal(b, [0, 0, 0]) + np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe') + assert_equal(b, [0, 0, 1]) + + def test_signature_all_None(self): + # signature all None, is an acceptable alternative (since 1.21) + # to not providing a signature. + res1 = np.add([3], [4], sig=(None, None, None)) + res2 = np.add([3], [4]) + assert_array_equal(res1, res2) + res1 = np.maximum([3], [4], sig=(None, None, None)) + res2 = np.maximum([3], [4]) + assert_array_equal(res1, res2) + + with pytest.raises(TypeError): + # special case, that would be deprecated anyway, so errors: + np.add(3, 4, signature=(None,)) + + def test_signature_dtype_type(self): + # Since that will be the normal behaviour (past NumPy 1.21) + # we do support the types already: + float_dtype = type(np.dtype(np.float64)) + np.add(3, 4, signature=(float_dtype, float_dtype, None)) + + @pytest.mark.parametrize("get_kwarg", [ + param(lambda dt: {"dtype": dt}, id="dtype"), + param(lambda dt: {"signature": (dt, None, None)}, id="signature")]) + def test_signature_dtype_instances_allowed(self, get_kwarg): + # We allow certain dtype instances when there is a clear singleton + # and the given one is equivalent; mainly for backcompat. + int64 = np.dtype("int64") + int64_2 = pickle.loads(pickle.dumps(int64)) + # Relies on pickling behavior, if assert fails just remove test... + assert int64 is not int64_2 + + assert np.add(1, 2, **get_kwarg(int64_2)).dtype == int64 + td = np.timedelta64(2, "s") + assert np.add(td, td, **get_kwarg("m8")).dtype == "m8[s]" + + msg = "The `dtype` and `signature` arguments to ufuncs" + + with pytest.raises(TypeError, match=msg): + np.add(3, 5, **get_kwarg(np.dtype("int64").newbyteorder())) + with pytest.raises(TypeError, match=msg): + np.add(3, 5, **get_kwarg(np.dtype("m8[ns]"))) + with pytest.raises(TypeError, match=msg): + np.add(3, 5, **get_kwarg("m8[ns]")) + + @pytest.mark.parametrize("casting", ["unsafe", "same_kind", "safe"]) + def test_partial_signature_mismatch(self, casting): + # If the second argument matches already, no need to specify it: + res = np.ldexp(np.float32(1.), np.int_(2), dtype="d") + assert res.dtype == "d" + res = np.ldexp(np.float32(1.), np.int_(2), signature=(None, None, "d")) + assert res.dtype == "d" + + # ldexp only has a loop for long input as second argument, overriding + # the output cannot help with that (no matter the casting) + with pytest.raises(TypeError): + np.ldexp(1., np.uint64(3), dtype="d") + with pytest.raises(TypeError): + np.ldexp(1., np.uint64(3), signature=(None, None, "d")) + + def test_partial_signature_mismatch_with_cache(self): + with pytest.raises(TypeError): + np.add(np.float16(1), np.uint64(2), sig=("e", "d", None)) + # Ensure e,d->None is in the dispatching cache (double loop) + np.add(np.float16(1), np.float64(2)) + # The error must still be raised: + with pytest.raises(TypeError): + np.add(np.float16(1), np.uint64(2), sig=("e", "d", None)) + + def test_use_output_signature_for_all_arguments(self): + # Test that providing only `dtype=` or `signature=(None, None, dtype)` + # is sufficient if falling back to a homogeneous signature works. + # In this case, the `intp, intp -> intp` loop is chosen. + res = np.power(1.5, 2.8, dtype=np.intp, casting="unsafe") + assert res == 1 # the cast happens first. + res = np.power(1.5, 2.8, signature=(None, None, np.intp), + casting="unsafe") + assert res == 1 + with pytest.raises(TypeError): + # the unsafe casting would normally cause errors though: + np.power(1.5, 2.8, dtype=np.intp) + + def test_signature_errors(self): + with pytest.raises(TypeError, + match="the signature object to ufunc must be a string or"): + np.add(3, 4, signature=123.) # neither a string nor a tuple + + with pytest.raises(ValueError): + # bad symbols that do not translate to dtypes + np.add(3, 4, signature="%^->#") + + with pytest.raises(ValueError): + np.add(3, 4, signature=b"ii-i") # incomplete and byte string + + with pytest.raises(ValueError): + np.add(3, 4, signature="ii>i") # incomplete string + + with pytest.raises(ValueError): + np.add(3, 4, signature=(None, "f8")) # bad length + + with pytest.raises(UnicodeDecodeError): + np.add(3, 4, signature=b"\xff\xff->i") + + def test_forced_dtype_times(self): + # Signatures only set the type numbers (not the actual loop dtypes) + # so using `M` in a signature/dtype should generally work: + a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='>M8[D]') + np.maximum(a, a, dtype="M") + np.maximum.reduce(a, dtype="M") + + arr = np.arange(10, dtype="m8[s]") + np.add(arr, arr, dtype="m") + np.maximum(arr, arr, dtype="m") + + @pytest.mark.parametrize("ufunc", [np.add, np.sqrt]) + def test_cast_safety(self, ufunc): + """Basic test for the safest casts, because ufuncs inner loops can + indicate a cast-safety as well (which is normally always "no"). + """ + def call_ufunc(arr, **kwargs): + return ufunc(*(arr,) * ufunc.nin, **kwargs) + + arr = np.array([1., 2., 3.], dtype=np.float32) + arr_bs = arr.astype(arr.dtype.newbyteorder()) + expected = call_ufunc(arr) + # Normally, a "no" cast: + res = call_ufunc(arr, casting="no") + assert_array_equal(expected, res) + # Byte-swapping is not allowed with "no" though: + with pytest.raises(TypeError): + call_ufunc(arr_bs, casting="no") + + # But is allowed with "equiv": + res = call_ufunc(arr_bs, casting="equiv") + assert_array_equal(expected, res) + + # Casting to float64 is safe, but not equiv: + with pytest.raises(TypeError): + call_ufunc(arr_bs, dtype=np.float64, casting="equiv") + + # but it is safe cast: + res = call_ufunc(arr_bs, dtype=np.float64, casting="safe") + expected = call_ufunc(arr.astype(np.float64)) # upcast + assert_array_equal(expected, res) + + @pytest.mark.parametrize("ufunc", [np.add, np.equal]) + def test_cast_safety_scalar(self, ufunc): + # We test add and equal, because equal has special scalar handling + # Note that the "equiv" casting behavior should maybe be considered + # a current implementation detail. + with pytest.raises(TypeError): + # this picks an integer loop, which is not safe + ufunc(3., 4., dtype=int, casting="safe") + + with pytest.raises(TypeError): + # We accept python float as float64 but not float32 for equiv. + ufunc(3., 4., dtype="float32", casting="equiv") + + # Special case for object and equal (note that equiv implies safe) + ufunc(3, 4, dtype=object, casting="equiv") + # Picks a double loop for both, first is equiv, second safe: + ufunc(np.array([3.]), 3., casting="equiv") + ufunc(np.array([3.]), 3, casting="safe") + ufunc(np.array([3]), 3, casting="equiv") + + def test_cast_safety_scalar_special(self): + # We allow this (and it succeeds) via object, although the equiv + # part may not be important. + np.equal(np.array([3]), 2**300, casting="equiv") + + def test_true_divide(self): + a = np.array(10) + b = np.array(20) + tgt = np.array(0.5) + + for tc in 'bhilqBHILQefdgFDG': + dt = np.dtype(tc) + aa = a.astype(dt) + bb = b.astype(dt) + + # Check result value and dtype. + for x, y in itertools.product([aa, -aa], [bb, -bb]): + + # Check with no output type specified + if tc in 'FDG': + tgt = complex(x) / complex(y) + else: + tgt = float(x) / float(y) + + res = np.true_divide(x, y) + rtol = max(np.finfo(res).resolution, 1e-15) + assert_allclose(res, tgt, rtol=rtol) + + if tc in 'bhilqBHILQ': + assert_(res.dtype.name == 'float64') + else: + assert_(res.dtype.name == dt.name) + + # Check with output type specified. This also checks for the + # incorrect casts in issue gh-3484 because the unary '-' does + # not change types, even for unsigned types, Hence casts in the + # ufunc from signed to unsigned and vice versa will lead to + # errors in the values. + for tcout in 'bhilqBHILQ': + dtout = np.dtype(tcout) + assert_raises(TypeError, np.true_divide, x, y, dtype=dtout) + + for tcout in 'efdg': + dtout = np.dtype(tcout) + if tc in 'FDG': + # Casting complex to float is not allowed + assert_raises(TypeError, np.true_divide, x, y, dtype=dtout) + else: + tgt = float(x) / float(y) + rtol = max(np.finfo(dtout).resolution, 1e-15) + # The value of tiny for double double is NaN + with suppress_warnings() as sup: + sup.filter(UserWarning) + if not np.isnan(np.finfo(dtout).tiny): + atol = max(np.finfo(dtout).tiny, 3e-308) + else: + atol = 3e-308 + # Some test values result in invalid for float16 + # and the cast to it may overflow to inf. + with np.errstate(invalid='ignore', over='ignore'): + res = np.true_divide(x, y, dtype=dtout) + if not np.isfinite(res) and tcout == 'e': + continue + assert_allclose(res, tgt, rtol=rtol, atol=atol) + assert_(res.dtype.name == dtout.name) + + for tcout in 'FDG': + dtout = np.dtype(tcout) + tgt = complex(x) / complex(y) + rtol = max(np.finfo(dtout).resolution, 1e-15) + # The value of tiny for double double is NaN + with suppress_warnings() as sup: + sup.filter(UserWarning) + if not np.isnan(np.finfo(dtout).tiny): + atol = max(np.finfo(dtout).tiny, 3e-308) + else: + atol = 3e-308 + res = np.true_divide(x, y, dtype=dtout) + if not np.isfinite(res): + continue + assert_allclose(res, tgt, rtol=rtol, atol=atol) + assert_(res.dtype.name == dtout.name) + + # Check booleans + a = np.ones((), dtype=np.bool) + res = np.true_divide(a, a) + assert_(res == 1.0) + assert_(res.dtype.name == 'float64') + res = np.true_divide(~a, a) + assert_(res == 0.0) + assert_(res.dtype.name == 'float64') + + def test_sum_stability(self): + a = np.ones(500, dtype=np.float32) + assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 4) + + a = np.ones(500, dtype=np.float64) + assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 13) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_sum(self): + for dt in (int, np.float16, np.float32, np.float64, np.longdouble): + for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127, + 128, 1024, 1235): + # warning if sum overflows, which it does in float16 + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always", RuntimeWarning) + + tgt = dt(v * (v + 1) / 2) + overflow = not np.isfinite(tgt) + assert_equal(len(w), 1 * overflow) + + d = np.arange(1, v + 1, dtype=dt) + + assert_almost_equal(np.sum(d), tgt) + assert_equal(len(w), 2 * overflow) + + assert_almost_equal(np.sum(d[::-1]), tgt) + assert_equal(len(w), 3 * overflow) + + d = np.ones(500, dtype=dt) + assert_almost_equal(np.sum(d[::2]), 250.) + assert_almost_equal(np.sum(d[1::2]), 250.) + assert_almost_equal(np.sum(d[::3]), 167.) + assert_almost_equal(np.sum(d[1::3]), 167.) + assert_almost_equal(np.sum(d[::-2]), 250.) + assert_almost_equal(np.sum(d[-1::-2]), 250.) + assert_almost_equal(np.sum(d[::-3]), 167.) + assert_almost_equal(np.sum(d[-1::-3]), 167.) + # sum with first reduction entry != 0 + d = np.ones((1,), dtype=dt) + d += d + assert_almost_equal(d, 2.) + + def test_sum_complex(self): + for dt in (np.complex64, np.complex128, np.clongdouble): + for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127, + 128, 1024, 1235): + tgt = dt(v * (v + 1) / 2) - dt((v * (v + 1) / 2) * 1j) + d = np.empty(v, dtype=dt) + d.real = np.arange(1, v + 1) + d.imag = -np.arange(1, v + 1) + assert_almost_equal(np.sum(d), tgt) + assert_almost_equal(np.sum(d[::-1]), tgt) + + d = np.ones(500, dtype=dt) + 1j + assert_almost_equal(np.sum(d[::2]), 250. + 250j) + assert_almost_equal(np.sum(d[1::2]), 250. + 250j) + assert_almost_equal(np.sum(d[::3]), 167. + 167j) + assert_almost_equal(np.sum(d[1::3]), 167. + 167j) + assert_almost_equal(np.sum(d[::-2]), 250. + 250j) + assert_almost_equal(np.sum(d[-1::-2]), 250. + 250j) + assert_almost_equal(np.sum(d[::-3]), 167. + 167j) + assert_almost_equal(np.sum(d[-1::-3]), 167. + 167j) + # sum with first reduction entry != 0 + d = np.ones((1,), dtype=dt) + 1j + d += d + assert_almost_equal(d, 2. + 2j) + + def test_sum_initial(self): + # Integer, single axis + assert_equal(np.sum([3], initial=2), 5) + + # Floating point + assert_almost_equal(np.sum([0.2], initial=0.1), 0.3) + + # Multiple non-adjacent axes + assert_equal(np.sum(np.ones((2, 3, 5), dtype=np.int64), axis=(0, 2), initial=2), + [12, 12, 12]) + + def test_sum_where(self): + # More extensive tests done in test_reduction_with_where. + assert_equal(np.sum([[1., 2.], [3., 4.]], where=[True, False]), 4.) + assert_equal(np.sum([[1., 2.], [3., 4.]], axis=0, initial=5., + where=[True, False]), [9., 5.]) + + def test_vecdot(self): + arr1 = np.arange(6).reshape((2, 3)) + arr2 = np.arange(3).reshape((1, 3)) + + actual = np.vecdot(arr1, arr2) + expected = np.array([5, 14]) + + assert_array_equal(actual, expected) + + actual2 = np.vecdot(arr1.T, arr2.T, axis=-2) + assert_array_equal(actual2, expected) + + actual3 = np.vecdot(arr1.astype("object"), arr2) + assert_array_equal(actual3, expected.astype("object")) + + def test_matvec(self): + arr1 = np.arange(6).reshape((2, 3)) + arr2 = np.arange(3).reshape((1, 3)) + + actual = np.matvec(arr1, arr2) + expected = np.array([[5, 14]]) + + assert_array_equal(actual, expected) + + actual2 = np.matvec(arr1.T, arr2.T, axes=[(-1, -2), -2, -1]) + assert_array_equal(actual2, expected) + + actual3 = np.matvec(arr1.astype("object"), arr2) + assert_array_equal(actual3, expected.astype("object")) + + @pytest.mark.parametrize("vec", [ + np.array([[1., 2., 3.], [4., 5., 6.]]), + np.array([[1., 2j, 3.], [4., 5., 6j]]), + np.array([[1., 2., 3.], [4., 5., 6.]], dtype=object), + np.array([[1., 2j, 3.], [4., 5., 6j]], dtype=object)]) + @pytest.mark.parametrize("matrix", [ + None, + np.array([[1. + 1j, 0.5, -0.5j], + [0.25, 2j, 0.], + [4., 0., -1j]])]) + def test_vecmatvec_identity(self, matrix, vec): + """Check that (x†A)x equals x†(Ax).""" + mat = matrix if matrix is not None else np.eye(3) + matvec = np.matvec(mat, vec) # Ax + vecmat = np.vecmat(vec, mat) # x†A + if matrix is None: + assert_array_equal(matvec, vec) + assert_array_equal(vecmat.conj(), vec) + assert_array_equal(matvec, (mat @ vec[..., np.newaxis]).squeeze(-1)) + assert_array_equal(vecmat, (vec[..., np.newaxis].mT.conj() + @ mat).squeeze(-2)) + expected = np.einsum('...i,ij,...j', vec.conj(), mat, vec) + vec_matvec = (vec.conj() * matvec).sum(-1) + vecmat_vec = (vecmat * vec).sum(-1) + assert_array_equal(vec_matvec, expected) + assert_array_equal(vecmat_vec, expected) + + @pytest.mark.parametrize("ufunc, shape1, shape2, conj", [ + (np.vecdot, (3,), (3,), True), + (np.vecmat, (3,), (3, 1), True), + (np.matvec, (1, 3), (3,), False), + (np.matmul, (1, 3), (3, 1), False), + ]) + def test_vecdot_matvec_vecmat_complex(self, ufunc, shape1, shape2, conj): + arr1 = np.array([1, 2j, 3]) + arr2 = np.array([1, 2, 3]) + + actual1 = ufunc(arr1.reshape(shape1), arr2.reshape(shape2)) + expected1 = np.array(((arr1.conj() if conj else arr1) * arr2).sum(), + ndmin=min(len(shape1), len(shape2))) + assert_array_equal(actual1, expected1) + # This would fail for conj=True, since matmul omits the conjugate. + if not conj: + assert_array_equal(arr1.reshape(shape1) @ arr2.reshape(shape2), + expected1) + + actual2 = ufunc(arr2.reshape(shape1), arr1.reshape(shape2)) + expected2 = np.array(((arr2.conj() if conj else arr2) * arr1).sum(), + ndmin=min(len(shape1), len(shape2))) + assert_array_equal(actual2, expected2) + + actual3 = ufunc(arr1.reshape(shape1).astype("object"), + arr2.reshape(shape2).astype("object")) + expected3 = expected1.astype(object) + assert_array_equal(actual3, expected3) + + def test_vecdot_subclass(self): + class MySubclass(np.ndarray): + pass + + arr1 = np.arange(6).reshape((2, 3)).view(MySubclass) + arr2 = np.arange(3).reshape((1, 3)).view(MySubclass) + result = np.vecdot(arr1, arr2) + assert isinstance(result, MySubclass) + + def test_vecdot_object_no_conjugate(self): + arr = np.array(["1", "2"], dtype=object) + with pytest.raises(AttributeError, match="conjugate"): + np.vecdot(arr, arr) + + def test_vecdot_object_breaks_outer_loop_on_error(self): + arr1 = np.ones((3, 3)).astype(object) + arr2 = arr1.copy() + arr2[1, 1] = None + out = np.zeros(3).astype(object) + with pytest.raises(TypeError, match=r"\*: 'float' and 'NoneType'"): + np.vecdot(arr1, arr2, out=out) + assert out[0] == 3 + assert out[1] == out[2] == 0 + + def test_broadcast(self): + msg = "broadcast" + a = np.arange(4).reshape((2, 1, 2)) + b = np.arange(4).reshape((1, 2, 2)) + assert_array_equal(np.vecdot(a, b), np.sum(a * b, axis=-1), err_msg=msg) + msg = "extend & broadcast loop dimensions" + b = np.arange(4).reshape((2, 2)) + assert_array_equal(np.vecdot(a, b), np.sum(a * b, axis=-1), err_msg=msg) + # Broadcast in core dimensions should fail + a = np.arange(8).reshape((4, 2)) + b = np.arange(4).reshape((4, 1)) + assert_raises(ValueError, np.vecdot, a, b) + # Extend core dimensions should fail + a = np.arange(8).reshape((4, 2)) + b = np.array(7) + assert_raises(ValueError, np.vecdot, a, b) + # Broadcast should fail + a = np.arange(2).reshape((2, 1, 1)) + b = np.arange(3).reshape((3, 1, 1)) + assert_raises(ValueError, np.vecdot, a, b) + + # Writing to a broadcasted array with overlap should warn, gh-2705 + a = np.arange(2) + b = np.arange(4).reshape((2, 2)) + u, v = np.broadcast_arrays(a, b) + assert_equal(u.strides[0], 0) + x = u + v + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + u += v + assert_equal(len(w), 1) + assert_(x[0, 0] != u[0, 0]) + + # Output reduction should not be allowed. + # See gh-15139 + a = np.arange(6).reshape(3, 2) + b = np.ones(2) + out = np.empty(()) + assert_raises(ValueError, np.vecdot, a, b, out) + out2 = np.empty(3) + c = np.vecdot(a, b, out2) + assert_(c is out2) + + def test_out_broadcasts(self): + # For ufuncs and gufuncs (not for reductions), we currently allow + # the output to cause broadcasting of the input arrays. + # both along dimensions with shape 1 and dimensions which do not + # exist at all in the inputs. + arr = np.arange(3).reshape(1, 3) + out = np.empty((5, 4, 3)) + np.add(arr, arr, out=out) + assert (out == np.arange(3) * 2).all() + + # The same holds for gufuncs (gh-16484) + np.vecdot(arr, arr, out=out) + # the result would be just a scalar `5`, but is broadcast fully: + assert (out == 5).all() + + @pytest.mark.parametrize(["arr", "out"], [ + ([2], np.empty(())), + ([1, 2], np.empty(1)), + (np.ones((4, 3)), np.empty((4, 1)))], + ids=["(1,)->()", "(2,)->(1,)", "(4, 3)->(4, 1)"]) + def test_out_broadcast_errors(self, arr, out): + # Output is (currently) allowed to broadcast inputs, but it cannot be + # smaller than the actual result. + with pytest.raises(ValueError, match="non-broadcastable"): + np.positive(arr, out=out) + + with pytest.raises(ValueError, match="non-broadcastable"): + np.add(np.ones(()), arr, out=out) + + def test_type_cast(self): + msg = "type cast" + a = np.arange(6, dtype='short').reshape((2, 3)) + assert_array_equal(np.vecdot(a, a), np.sum(a * a, axis=-1), + err_msg=msg) + msg = "type cast on one argument" + a = np.arange(6).reshape((2, 3)) + b = a + 0.1 + assert_array_almost_equal(np.vecdot(a, b), np.sum(a * b, axis=-1), + err_msg=msg) + + def test_endian(self): + msg = "big endian" + a = np.arange(6, dtype='>i4').reshape((2, 3)) + assert_array_equal(np.vecdot(a, a), np.sum(a * a, axis=-1), + err_msg=msg) + msg = "little endian" + a = np.arange(6, dtype='()' + a = np.arange(27.).reshape((3, 3, 3)) + b = np.arange(10., 19.).reshape((3, 1, 3)) + # basic tests on inputs (outputs tested below with matrix_multiply). + c = np.vecdot(a, b) + assert_array_equal(c, (a * b).sum(-1)) + # default + c = np.vecdot(a, b, axes=[(-1,), (-1,), ()]) + assert_array_equal(c, (a * b).sum(-1)) + # integers ok for single axis. + c = np.vecdot(a, b, axes=[-1, -1, ()]) + assert_array_equal(c, (a * b).sum(-1)) + # mix fine + c = np.vecdot(a, b, axes=[(-1,), -1, ()]) + assert_array_equal(c, (a * b).sum(-1)) + # can omit last axis. + c = np.vecdot(a, b, axes=[-1, -1]) + assert_array_equal(c, (a * b).sum(-1)) + # can pass in other types of integer (with __index__ protocol) + c = np.vecdot(a, b, axes=[np.int8(-1), np.array(-1, dtype=np.int32)]) + assert_array_equal(c, (a * b).sum(-1)) + # swap some axes + c = np.vecdot(a, b, axes=[0, 0]) + assert_array_equal(c, (a * b).sum(0)) + c = np.vecdot(a, b, axes=[0, 2]) + assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1)) + # Check errors for improperly constructed axes arguments. + # should have list. + assert_raises(TypeError, np.vecdot, a, b, axes=-1) + # needs enough elements + assert_raises(ValueError, np.vecdot, a, b, axes=[-1]) + # should pass in indices. + assert_raises(TypeError, np.vecdot, a, b, axes=[-1.0, -1.0]) + assert_raises(TypeError, np.vecdot, a, b, axes=[(-1.0,), -1]) + assert_raises(TypeError, np.vecdot, a, b, axes=[None, 1]) + # cannot pass an index unless there is only one dimension + # (output is wrong in this case) + assert_raises(AxisError, np.vecdot, a, b, axes=[-1, -1, -1]) + # or pass in generally the wrong number of axes + assert_raises(AxisError, np.vecdot, a, b, axes=[-1, -1, (-1,)]) + assert_raises(AxisError, np.vecdot, a, b, axes=[-1, (-2, -1), ()]) + # axes need to have same length. + assert_raises(ValueError, np.vecdot, a, b, axes=[0, 1]) + + # matrix_multiply signature: '(m,n),(n,p)->(m,p)' + mm = umt.matrix_multiply + a = np.arange(12).reshape((2, 3, 2)) + b = np.arange(8).reshape((2, 2, 2, 1)) + 1 + # Sanity check. + c = mm(a, b) + assert_array_equal(c, np.matmul(a, b)) + # Default axes. + c = mm(a, b, axes=[(-2, -1), (-2, -1), (-2, -1)]) + assert_array_equal(c, np.matmul(a, b)) + # Default with explicit axes. + c = mm(a, b, axes=[(1, 2), (2, 3), (2, 3)]) + assert_array_equal(c, np.matmul(a, b)) + # swap some axes. + c = mm(a, b, axes=[(0, -1), (1, 2), (-2, -1)]) + assert_array_equal(c, np.matmul(a.transpose(1, 0, 2), + b.transpose(0, 3, 1, 2))) + # Default with output array. + c = np.empty((2, 2, 3, 1)) + d = mm(a, b, out=c, axes=[(1, 2), (2, 3), (2, 3)]) + assert_(c is d) + assert_array_equal(c, np.matmul(a, b)) + # Transposed output array + c = np.empty((1, 2, 2, 3)) + d = mm(a, b, out=c, axes=[(-2, -1), (-2, -1), (3, 0)]) + assert_(c is d) + assert_array_equal(c, np.matmul(a, b).transpose(3, 0, 1, 2)) + # Check errors for improperly constructed axes arguments. + # wrong argument + assert_raises(TypeError, mm, a, b, axis=1) + # axes should be list + assert_raises(TypeError, mm, a, b, axes=1) + assert_raises(TypeError, mm, a, b, axes=((-2, -1), (-2, -1), (-2, -1))) + # list needs to have right length + assert_raises(ValueError, mm, a, b, axes=[]) + assert_raises(ValueError, mm, a, b, axes=[(-2, -1)]) + # list should not contain None, or lists + assert_raises(TypeError, mm, a, b, axes=[None, None, None]) + assert_raises(TypeError, + mm, a, b, axes=[[-2, -1], [-2, -1], [-2, -1]]) + assert_raises(TypeError, + mm, a, b, axes=[(-2, -1), (-2, -1), [-2, -1]]) + assert_raises(TypeError, mm, a, b, axes=[(-2, -1), (-2, -1), None]) + # single integers are AxisErrors if more are required + assert_raises(AxisError, mm, a, b, axes=[-1, -1, -1]) + assert_raises(AxisError, mm, a, b, axes=[(-2, -1), (-2, -1), -1]) + # tuples should not have duplicated values + assert_raises(ValueError, mm, a, b, axes=[(-2, -1), (-2, -1), (-2, -2)]) + # arrays should have enough axes. + z = np.zeros((2, 2)) + assert_raises(ValueError, mm, z, z[0]) + assert_raises(ValueError, mm, z, z, out=z[:, 0]) + assert_raises(ValueError, mm, z[1], z, axes=[0, 1]) + assert_raises(ValueError, mm, z, z, out=z[0], axes=[0, 1]) + # Regular ufuncs should not accept axes. + assert_raises(TypeError, np.add, 1., 1., axes=[0]) + # should be able to deal with bad unrelated kwargs. + assert_raises(TypeError, mm, z, z, axes=[0, 1], parrot=True) + + def test_axis_argument(self): + # vecdot signature: '(n),(n)->()' + a = np.arange(27.).reshape((3, 3, 3)) + b = np.arange(10., 19.).reshape((3, 1, 3)) + c = np.vecdot(a, b) + assert_array_equal(c, (a * b).sum(-1)) + c = np.vecdot(a, b, axis=-1) + assert_array_equal(c, (a * b).sum(-1)) + out = np.zeros_like(c) + d = np.vecdot(a, b, axis=-1, out=out) + assert_(d is out) + assert_array_equal(d, c) + c = np.vecdot(a, b, axis=0) + assert_array_equal(c, (a * b).sum(0)) + # Sanity checks on innerwt and cumsum. + a = np.arange(6).reshape((2, 3)) + b = np.arange(10, 16).reshape((2, 3)) + w = np.arange(20, 26).reshape((2, 3)) + assert_array_equal(umt.innerwt(a, b, w, axis=0), + np.sum(a * b * w, axis=0)) + assert_array_equal(umt.cumsum(a, axis=0), np.cumsum(a, axis=0)) + assert_array_equal(umt.cumsum(a, axis=-1), np.cumsum(a, axis=-1)) + out = np.empty_like(a) + b = umt.cumsum(a, out=out, axis=0) + assert_(out is b) + assert_array_equal(b, np.cumsum(a, axis=0)) + b = umt.cumsum(a, out=out, axis=1) + assert_(out is b) + assert_array_equal(b, np.cumsum(a, axis=-1)) + # Check errors. + # Cannot pass in both axis and axes. + assert_raises(TypeError, np.vecdot, a, b, axis=0, axes=[0, 0]) + # Not an integer. + assert_raises(TypeError, np.vecdot, a, b, axis=[0]) + # more than 1 core dimensions. + mm = umt.matrix_multiply + assert_raises(TypeError, mm, a, b, axis=1) + # Output wrong size in axis. + out = np.empty((1, 2, 3), dtype=a.dtype) + assert_raises(ValueError, umt.cumsum, a, out=out, axis=0) + # Regular ufuncs should not accept axis. + assert_raises(TypeError, np.add, 1., 1., axis=0) + + def test_keepdims_argument(self): + # vecdot signature: '(n),(n)->()' + a = np.arange(27.).reshape((3, 3, 3)) + b = np.arange(10., 19.).reshape((3, 1, 3)) + c = np.vecdot(a, b) + assert_array_equal(c, (a * b).sum(-1)) + c = np.vecdot(a, b, keepdims=False) + assert_array_equal(c, (a * b).sum(-1)) + c = np.vecdot(a, b, keepdims=True) + assert_array_equal(c, (a * b).sum(-1, keepdims=True)) + out = np.zeros_like(c) + d = np.vecdot(a, b, keepdims=True, out=out) + assert_(d is out) + assert_array_equal(d, c) + # Now combined with axis and axes. + c = np.vecdot(a, b, axis=-1, keepdims=False) + assert_array_equal(c, (a * b).sum(-1, keepdims=False)) + c = np.vecdot(a, b, axis=-1, keepdims=True) + assert_array_equal(c, (a * b).sum(-1, keepdims=True)) + c = np.vecdot(a, b, axis=0, keepdims=False) + assert_array_equal(c, (a * b).sum(0, keepdims=False)) + c = np.vecdot(a, b, axis=0, keepdims=True) + assert_array_equal(c, (a * b).sum(0, keepdims=True)) + c = np.vecdot(a, b, axes=[(-1,), (-1,), ()], keepdims=False) + assert_array_equal(c, (a * b).sum(-1)) + c = np.vecdot(a, b, axes=[(-1,), (-1,), (-1,)], keepdims=True) + assert_array_equal(c, (a * b).sum(-1, keepdims=True)) + c = np.vecdot(a, b, axes=[0, 0], keepdims=False) + assert_array_equal(c, (a * b).sum(0)) + c = np.vecdot(a, b, axes=[0, 0, 0], keepdims=True) + assert_array_equal(c, (a * b).sum(0, keepdims=True)) + c = np.vecdot(a, b, axes=[0, 2], keepdims=False) + assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1)) + c = np.vecdot(a, b, axes=[0, 2], keepdims=True) + assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1, + keepdims=True)) + c = np.vecdot(a, b, axes=[0, 2, 2], keepdims=True) + assert_array_equal(c, (a.transpose(1, 2, 0) * b).sum(-1, + keepdims=True)) + c = np.vecdot(a, b, axes=[0, 2, 0], keepdims=True) + assert_array_equal(c, (a * b.transpose(2, 0, 1)).sum(0, keepdims=True)) + # Hardly useful, but should work. + c = np.vecdot(a, b, axes=[0, 2, 1], keepdims=True) + assert_array_equal(c, (a.transpose(1, 0, 2) * b.transpose(0, 2, 1)) + .sum(1, keepdims=True)) + # Check with two core dimensions. + a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis] + expected = uml.det(a) + c = uml.det(a, keepdims=False) + assert_array_equal(c, expected) + c = uml.det(a, keepdims=True) + assert_array_equal(c, expected[:, np.newaxis, np.newaxis]) + a = np.eye(3) * np.arange(4.)[:, np.newaxis, np.newaxis] + expected_s, expected_l = uml.slogdet(a) + cs, cl = uml.slogdet(a, keepdims=False) + assert_array_equal(cs, expected_s) + assert_array_equal(cl, expected_l) + cs, cl = uml.slogdet(a, keepdims=True) + assert_array_equal(cs, expected_s[:, np.newaxis, np.newaxis]) + assert_array_equal(cl, expected_l[:, np.newaxis, np.newaxis]) + # Sanity check on innerwt. + a = np.arange(6).reshape((2, 3)) + b = np.arange(10, 16).reshape((2, 3)) + w = np.arange(20, 26).reshape((2, 3)) + assert_array_equal(umt.innerwt(a, b, w, keepdims=True), + np.sum(a * b * w, axis=-1, keepdims=True)) + assert_array_equal(umt.innerwt(a, b, w, axis=0, keepdims=True), + np.sum(a * b * w, axis=0, keepdims=True)) + # Check errors. + # Not a boolean + assert_raises(TypeError, np.vecdot, a, b, keepdims='true') + # More than 1 core dimension, and core output dimensions. + mm = umt.matrix_multiply + assert_raises(TypeError, mm, a, b, keepdims=True) + assert_raises(TypeError, mm, a, b, keepdims=False) + # Regular ufuncs should not accept keepdims. + assert_raises(TypeError, np.add, 1., 1., keepdims=False) + + def test_innerwt(self): + a = np.arange(6).reshape((2, 3)) + b = np.arange(10, 16).reshape((2, 3)) + w = np.arange(20, 26).reshape((2, 3)) + assert_array_equal(umt.innerwt(a, b, w), np.sum(a * b * w, axis=-1)) + a = np.arange(100, 124).reshape((2, 3, 4)) + b = np.arange(200, 224).reshape((2, 3, 4)) + w = np.arange(300, 324).reshape((2, 3, 4)) + assert_array_equal(umt.innerwt(a, b, w), np.sum(a * b * w, axis=-1)) + + def test_innerwt_empty(self): + """Test generalized ufunc with zero-sized operands""" + a = np.array([], dtype='f8') + b = np.array([], dtype='f8') + w = np.array([], dtype='f8') + assert_array_equal(umt.innerwt(a, b, w), np.sum(a * b * w, axis=-1)) + + def test_cross1d(self): + """Test with fixed-sized signature.""" + a = np.eye(3) + assert_array_equal(umt.cross1d(a, a), np.zeros((3, 3))) + out = np.zeros((3, 3)) + result = umt.cross1d(a[0], a, out) + assert_(result is out) + assert_array_equal(result, np.vstack((np.zeros(3), a[2], -a[1]))) + assert_raises(ValueError, umt.cross1d, np.eye(4), np.eye(4)) + assert_raises(ValueError, umt.cross1d, a, np.arange(4.)) + # Wrong output core dimension. + assert_raises(ValueError, umt.cross1d, a, np.arange(3.), np.zeros((3, 4))) + # Wrong output broadcast dimension (see gh-15139). + assert_raises(ValueError, umt.cross1d, a, np.arange(3.), np.zeros(3)) + + def test_can_ignore_signature(self): + # Comparing the effects of ? in signature: + # matrix_multiply: (m,n),(n,p)->(m,p) # all must be there. + # matmul: (m?,n),(n,p?)->(m?,p?) # allow missing m, p. + mat = np.arange(12).reshape((2, 3, 2)) + single_vec = np.arange(2) + col_vec = single_vec[:, np.newaxis] + col_vec_array = np.arange(8).reshape((2, 2, 2, 1)) + 1 + # matrix @ single column vector with proper dimension + mm_col_vec = umt.matrix_multiply(mat, col_vec) + # matmul does the same thing + matmul_col_vec = umt.matmul(mat, col_vec) + assert_array_equal(matmul_col_vec, mm_col_vec) + # matrix @ vector without dimension making it a column vector. + # matrix multiply fails -> missing core dim. + assert_raises(ValueError, umt.matrix_multiply, mat, single_vec) + # matmul mimicker passes, and returns a vector. + matmul_col = umt.matmul(mat, single_vec) + assert_array_equal(matmul_col, mm_col_vec.squeeze()) + # Now with a column array: same as for column vector, + # broadcasting sensibly. + mm_col_vec = umt.matrix_multiply(mat, col_vec_array) + matmul_col_vec = umt.matmul(mat, col_vec_array) + assert_array_equal(matmul_col_vec, mm_col_vec) + # As above, but for row vector + single_vec = np.arange(3) + row_vec = single_vec[np.newaxis, :] + row_vec_array = np.arange(24).reshape((4, 2, 1, 1, 3)) + 1 + # row vector @ matrix + mm_row_vec = umt.matrix_multiply(row_vec, mat) + matmul_row_vec = umt.matmul(row_vec, mat) + assert_array_equal(matmul_row_vec, mm_row_vec) + # single row vector @ matrix + assert_raises(ValueError, umt.matrix_multiply, single_vec, mat) + matmul_row = umt.matmul(single_vec, mat) + assert_array_equal(matmul_row, mm_row_vec.squeeze()) + # row vector array @ matrix + mm_row_vec = umt.matrix_multiply(row_vec_array, mat) + matmul_row_vec = umt.matmul(row_vec_array, mat) + assert_array_equal(matmul_row_vec, mm_row_vec) + # Now for vector combinations + # row vector @ column vector + col_vec = row_vec.T + col_vec_array = row_vec_array.swapaxes(-2, -1) + mm_row_col_vec = umt.matrix_multiply(row_vec, col_vec) + matmul_row_col_vec = umt.matmul(row_vec, col_vec) + assert_array_equal(matmul_row_col_vec, mm_row_col_vec) + # single row vector @ single col vector + assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec) + matmul_row_col = umt.matmul(single_vec, single_vec) + assert_array_equal(matmul_row_col, mm_row_col_vec.squeeze()) + # row vector array @ matrix + mm_row_col_array = umt.matrix_multiply(row_vec_array, col_vec_array) + matmul_row_col_array = umt.matmul(row_vec_array, col_vec_array) + assert_array_equal(matmul_row_col_array, mm_row_col_array) + # Finally, check that things are *not* squeezed if one gives an + # output. + out = np.zeros_like(mm_row_col_array) + out = umt.matrix_multiply(row_vec_array, col_vec_array, out=out) + assert_array_equal(out, mm_row_col_array) + out[:] = 0 + out = umt.matmul(row_vec_array, col_vec_array, out=out) + assert_array_equal(out, mm_row_col_array) + # And check one cannot put missing dimensions back. + out = np.zeros_like(mm_row_col_vec) + assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec, + out) + # But fine for matmul, since it is just a broadcast. + out = umt.matmul(single_vec, single_vec, out) + assert_array_equal(out, mm_row_col_vec.squeeze()) + + def test_matrix_multiply(self): + self.compare_matrix_multiply_results(np.int64) + self.compare_matrix_multiply_results(np.double) + + def test_matrix_multiply_umath_empty(self): + res = umt.matrix_multiply(np.ones((0, 10)), np.ones((10, 0))) + assert_array_equal(res, np.zeros((0, 0))) + res = umt.matrix_multiply(np.ones((10, 0)), np.ones((0, 10))) + assert_array_equal(res, np.zeros((10, 10))) + + def compare_matrix_multiply_results(self, tp): + d1 = np.array(np.random.rand(2, 3, 4), dtype=tp) + d2 = np.array(np.random.rand(2, 3, 4), dtype=tp) + msg = f"matrix multiply on type {d1.dtype.name}" + + def permute_n(n): + if n == 1: + return ([0],) + ret = () + base = permute_n(n - 1) + for perm in base: + for i in range(n): + new = perm + [n - 1] + new[n - 1] = new[i] + new[i] = n - 1 + ret += (new,) + return ret + + def slice_n(n): + if n == 0: + return ((),) + ret = () + base = slice_n(n - 1) + for sl in base: + ret += (sl + (slice(None),),) + ret += (sl + (slice(0, 1),),) + return ret + + def broadcastable(s1, s2): + return s1 == s2 or 1 in {s1, s2} + + permute_3 = permute_n(3) + slice_3 = slice_n(3) + ((slice(None, None, -1),) * 3,) + + ref = True + for p1 in permute_3: + for p2 in permute_3: + for s1 in slice_3: + for s2 in slice_3: + a1 = d1.transpose(p1)[s1] + a2 = d2.transpose(p2)[s2] + ref = ref and a1.base is not None + ref = ref and a2.base is not None + if (a1.shape[-1] == a2.shape[-2] and + broadcastable(a1.shape[0], a2.shape[0])): + assert_array_almost_equal( + umt.matrix_multiply(a1, a2), + np.sum(a2[..., np.newaxis].swapaxes(-3, -1) * + a1[..., np.newaxis, :], axis=-1), + err_msg=msg + f' {str(a1.shape)} {str(a2.shape)}') + + assert_equal(ref, True, err_msg="reference check") + + def test_euclidean_pdist(self): + a = np.arange(12, dtype=float).reshape(4, 3) + out = np.empty((a.shape[0] * (a.shape[0] - 1) // 2,), dtype=a.dtype) + umt.euclidean_pdist(a, out) + b = np.sqrt(np.sum((a[:, None] - a)**2, axis=-1)) + b = b[~np.tri(a.shape[0], dtype=bool)] + assert_almost_equal(out, b) + # An output array is required to determine p with signature (n,d)->(p) + assert_raises(ValueError, umt.euclidean_pdist, a) + + def test_cumsum(self): + a = np.arange(10) + result = umt.cumsum(a) + assert_array_equal(result, a.cumsum()) + + def test_object_logical(self): + a = np.array([3, None, True, False, "test", ""], dtype=object) + assert_equal(np.logical_or(a, None), + np.array([x or None for x in a], dtype=object)) + assert_equal(np.logical_or(a, True), + np.array([x or True for x in a], dtype=object)) + assert_equal(np.logical_or(a, 12), + np.array([x or 12 for x in a], dtype=object)) + assert_equal(np.logical_or(a, "blah"), + np.array([x or "blah" for x in a], dtype=object)) + + assert_equal(np.logical_and(a, None), + np.array([x and None for x in a], dtype=object)) + assert_equal(np.logical_and(a, True), + np.array([x and True for x in a], dtype=object)) + assert_equal(np.logical_and(a, 12), + np.array([x and 12 for x in a], dtype=object)) + assert_equal(np.logical_and(a, "blah"), + np.array([x and "blah" for x in a], dtype=object)) + + assert_equal(np.logical_not(a), + np.array([not x for x in a], dtype=object)) + + assert_equal(np.logical_or.reduce(a), 3) + assert_equal(np.logical_and.reduce(a), None) + + def test_object_comparison(self): + class HasComparisons: + def __eq__(self, other): + return '==' + + arr0d = np.array(HasComparisons()) + assert_equal(arr0d == arr0d, True) + assert_equal(np.equal(arr0d, arr0d), True) # normal behavior is a cast + + arr1d = np.array([HasComparisons()]) + assert_equal(arr1d == arr1d, np.array([True])) + assert_equal(np.equal(arr1d, arr1d), np.array([True])) # normal behavior is a cast + assert_equal(np.equal(arr1d, arr1d, dtype=object), np.array(['=='])) + + def test_object_array_reduction(self): + # Reductions on object arrays + a = np.array(['a', 'b', 'c'], dtype=object) + assert_equal(np.sum(a), 'abc') + assert_equal(np.max(a), 'c') + assert_equal(np.min(a), 'a') + a = np.array([True, False, True], dtype=object) + assert_equal(np.sum(a), 2) + assert_equal(np.prod(a), 0) + assert_equal(np.any(a), True) + assert_equal(np.all(a), False) + assert_equal(np.max(a), True) + assert_equal(np.min(a), False) + assert_equal(np.array([[1]], dtype=object).sum(), 1) + assert_equal(np.array([[[1, 2]]], dtype=object).sum((0, 1)), [1, 2]) + assert_equal(np.array([1], dtype=object).sum(initial=1), 2) + assert_equal(np.array([[1], [2, 3]], dtype=object) + .sum(initial=[0], where=[False, True]), [0, 2, 3]) + + def test_object_array_accumulate_inplace(self): + # Checks that in-place accumulates work, see also gh-7402 + arr = np.ones(4, dtype=object) + arr[:] = [[1] for i in range(4)] + # Twice reproduced also for tuples: + np.add.accumulate(arr, out=arr) + np.add.accumulate(arr, out=arr) + assert_array_equal(arr, + np.array([[1] * i for i in [1, 3, 6, 10]], dtype=object), + ) + + # And the same if the axis argument is used + arr = np.ones((2, 4), dtype=object) + arr[0, :] = [[2] for i in range(4)] + np.add.accumulate(arr, out=arr, axis=-1) + np.add.accumulate(arr, out=arr, axis=-1) + assert_array_equal(arr[0, :], + np.array([[2] * i for i in [1, 3, 6, 10]], dtype=object), + ) + + def test_object_array_accumulate_failure(self): + # Typical accumulation on object works as expected: + res = np.add.accumulate(np.array([1, 0, 2], dtype=object)) + assert_array_equal(res, np.array([1, 1, 3], dtype=object)) + # But errors are propagated from the inner-loop if they occur: + with pytest.raises(TypeError): + np.add.accumulate([1, None, 2]) + + def test_object_array_reduceat_inplace(self): + # Checks that in-place reduceats work, see also gh-7465 + arr = np.empty(4, dtype=object) + arr[:] = [[1] for i in range(4)] + out = np.empty(4, dtype=object) + out[:] = [[1] for i in range(4)] + np.add.reduceat(arr, np.arange(4), out=arr) + np.add.reduceat(arr, np.arange(4), out=arr) + assert_array_equal(arr, out) + + # And the same if the axis argument is used + arr = np.ones((2, 4), dtype=object) + arr[0, :] = [[2] for i in range(4)] + out = np.ones((2, 4), dtype=object) + out[0, :] = [[2] for i in range(4)] + np.add.reduceat(arr, np.arange(4), out=arr, axis=-1) + np.add.reduceat(arr, np.arange(4), out=arr, axis=-1) + assert_array_equal(arr, out) + + def test_object_array_reduceat_failure(self): + # Reduceat works as expected when no invalid operation occurs (None is + # not involved in an operation here) + res = np.add.reduceat(np.array([1, None, 2], dtype=object), [1, 2]) + assert_array_equal(res, np.array([None, 2], dtype=object)) + # But errors when None would be involved in an operation: + with pytest.raises(TypeError): + np.add.reduceat([1, None, 2], [0, 2]) + + def test_zerosize_reduction(self): + # Test with default dtype and object dtype + for a in [[], np.array([], dtype=object)]: + assert_equal(np.sum(a), 0) + assert_equal(np.prod(a), 1) + assert_equal(np.any(a), False) + assert_equal(np.all(a), True) + assert_raises(ValueError, np.max, a) + assert_raises(ValueError, np.min, a) + + def test_axis_out_of_bounds(self): + a = np.array([False, False]) + assert_raises(AxisError, a.all, axis=1) + a = np.array([False, False]) + assert_raises(AxisError, a.all, axis=-2) + + a = np.array([False, False]) + assert_raises(AxisError, a.any, axis=1) + a = np.array([False, False]) + assert_raises(AxisError, a.any, axis=-2) + + def test_scalar_reduction(self): + # The functions 'sum', 'prod', etc allow specifying axis=0 + # even for scalars + assert_equal(np.sum(3, axis=0), 3) + assert_equal(np.prod(3.5, axis=0), 3.5) + assert_equal(np.any(True, axis=0), True) + assert_equal(np.all(False, axis=0), False) + assert_equal(np.max(3, axis=0), 3) + assert_equal(np.min(2.5, axis=0), 2.5) + + # Check scalar behaviour for ufuncs without an identity + assert_equal(np.power.reduce(3), 3) + + # Make sure that scalars are coming out from this operation + assert_(type(np.prod(np.float32(2.5), axis=0)) is np.float32) + assert_(type(np.sum(np.float32(2.5), axis=0)) is np.float32) + assert_(type(np.max(np.float32(2.5), axis=0)) is np.float32) + assert_(type(np.min(np.float32(2.5), axis=0)) is np.float32) + + # check if scalars/0-d arrays get cast + assert_(type(np.any(0, axis=0)) is np.bool) + + # assert that 0-d arrays get wrapped + class MyArray(np.ndarray): + pass + a = np.array(1).view(MyArray) + assert_(type(np.any(a)) is MyArray) + + def test_casting_out_param(self): + # Test that it's possible to do casts on output + a = np.ones((200, 100), np.int64) + b = np.ones((200, 100), np.int64) + c = np.ones((200, 100), np.float64) + np.add(a, b, out=c) + assert_equal(c, 2) + + a = np.zeros(65536) + b = np.zeros(65536, dtype=np.float32) + np.subtract(a, 0, out=b) + assert_equal(b, 0) + + def test_where_param(self): + # Test that the where= ufunc parameter works with regular arrays + a = np.arange(7) + b = np.ones(7) + c = np.zeros(7) + np.add(a, b, out=c, where=(a % 2 == 1)) + assert_equal(c, [0, 2, 0, 4, 0, 6, 0]) + + a = np.arange(4).reshape(2, 2) + 2 + np.power(a, [2, 3], out=a, where=[[0, 1], [1, 0]]) + assert_equal(a, [[2, 27], [16, 5]]) + # Broadcasting the where= parameter + np.subtract(a, 2, out=a, where=[True, False]) + assert_equal(a, [[0, 27], [14, 5]]) + + def test_where_param_buffer_output(self): + # This test is temporarily skipped because it requires + # adding masking features to the nditer to work properly + + # With casting on output + a = np.ones(10, np.int64) + b = np.ones(10, np.int64) + c = 1.5 * np.ones(10, np.float64) + np.add(a, b, out=c, where=[1, 0, 0, 1, 0, 0, 1, 1, 1, 0]) + assert_equal(c, [2, 1.5, 1.5, 2, 1.5, 1.5, 2, 2, 2, 1.5]) + + def test_where_param_alloc(self): + # With casting and allocated output + a = np.array([1], dtype=np.int64) + m = np.array([True], dtype=bool) + assert_equal(np.sqrt(a, where=m), [1]) + + # No casting and allocated output + a = np.array([1], dtype=np.float64) + m = np.array([True], dtype=bool) + assert_equal(np.sqrt(a, where=m), [1]) + + def test_where_with_broadcasting(self): + # See gh-17198 + a = np.random.random((5000, 4)) + b = np.random.random((5000, 1)) + + where = a > 0.3 + out = np.full_like(a, 0) + np.less(a, b, where=where, out=out) + b_where = np.broadcast_to(b, a.shape)[where] + assert_array_equal((a[where] < b_where), out[where].astype(bool)) + assert not out[~where].any() # outside mask, out remains all 0 + + @staticmethod + def identityless_reduce_arrs(): + yield np.empty((2, 3, 4), order='C') + yield np.empty((2, 3, 4), order='F') + # Mixed order (reduce order differs outer) + yield np.empty((2, 4, 3), order='C').swapaxes(1, 2) + # Reversed order + yield np.empty((2, 3, 4), order='C')[::-1, ::-1, ::-1] + # Not contiguous + yield np.empty((3, 5, 4), order='C').swapaxes(1, 2)[1:, 1:, 1:] + # Not contiguous and not aligned + a = np.empty((3 * 4 * 5 * 8 + 1,), dtype='i1') + a = a[1:].view(dtype='f8') + a.shape = (3, 4, 5) + a = a[1:, 1:, 1:] + yield a + + @pytest.mark.parametrize("a", identityless_reduce_arrs()) + @pytest.mark.parametrize("pos", [(1, 0, 0), (0, 1, 0), (0, 0, 1)]) + def test_identityless_reduction(self, a, pos): + # np.minimum.reduce is an identityless reduction + a[...] = 1 + a[pos] = 0 + + for axis in [None, (0, 1), (0, 2), (1, 2), 0, 1, 2, ()]: + if axis is None: + axes = np.array([], dtype=np.intp) + else: + axes = np.delete(np.arange(a.ndim), axis) + + expected_pos = tuple(np.array(pos)[axes]) + expected = np.ones(np.array(a.shape)[axes]) + expected[expected_pos] = 0 + + res = np.minimum.reduce(a, axis=axis) + assert_equal(res, expected, strict=True) + + res = np.full_like(res, np.nan) + np.minimum.reduce(a, axis=axis, out=res) + assert_equal(res, expected, strict=True) + + @requires_memory(6 * 1024**3) + @pytest.mark.skipif(sys.maxsize < 2**32, + reason="test array too large for 32bit platform") + def test_identityless_reduction_huge_array(self): + # Regression test for gh-20921 (copying identity incorrectly failed) + arr = np.zeros((2, 2**31), 'uint8') + arr[:, 0] = [1, 3] + arr[:, -1] = [4, 1] + res = np.maximum.reduce(arr, axis=0) + del arr + assert res[0] == 3 + assert res[-1] == 4 + + def test_reduce_identity_depends_on_loop(self): + """ + The type of the result should always depend on the selected loop, not + necessarily the output (only relevant for object arrays). + """ + # For an object loop, the default value 0 with type int is used: + assert type(np.add.reduce([], dtype=object)) is int + out = np.array(None, dtype=object) + # When the loop is float64 but `out` is object this does not happen, + # the result is float64 cast to object (which gives Python `float`). + np.add.reduce([], out=out, dtype=np.float64) + assert type(out[()]) is float + + def test_initial_reduction(self): + # np.minimum.reduce is an identityless reduction + + # For cases like np.maximum(np.abs(...), initial=0) + # More generally, a supremum over non-negative numbers. + assert_equal(np.maximum.reduce([], initial=0), 0) + + # For cases like reduction of an empty array over the reals. + assert_equal(np.minimum.reduce([], initial=np.inf), np.inf) + assert_equal(np.maximum.reduce([], initial=-np.inf), -np.inf) + + # Random tests + assert_equal(np.minimum.reduce([5], initial=4), 4) + assert_equal(np.maximum.reduce([4], initial=5), 5) + assert_equal(np.maximum.reduce([5], initial=4), 5) + assert_equal(np.minimum.reduce([4], initial=5), 4) + + # Check initial=None raises ValueError for both types of ufunc reductions + assert_raises(ValueError, np.minimum.reduce, [], initial=None) + assert_raises(ValueError, np.add.reduce, [], initial=None) + # Also in the somewhat special object case: + with pytest.raises(ValueError): + np.add.reduce([], initial=None, dtype=object) + + # Check that np._NoValue gives default behavior. + assert_equal(np.add.reduce([], initial=np._NoValue), 0) + + # Check that initial kwarg behaves as intended for dtype=object + a = np.array([10], dtype=object) + res = np.add.reduce(a, initial=5) + assert_equal(res, 15) + + def test_empty_reduction_and_identity(self): + arr = np.zeros((0, 5)) + # OK, since the reduction itself is *not* empty, the result is + assert np.true_divide.reduce(arr, axis=1).shape == (0,) + # Not OK, the reduction itself is empty and we have no identity + with pytest.raises(ValueError): + np.true_divide.reduce(arr, axis=0) + + # Test that an empty reduction fails also if the result is empty + arr = np.zeros((0, 0, 5)) + with pytest.raises(ValueError): + np.true_divide.reduce(arr, axis=1) + + # Division reduction makes sense with `initial=1` (empty or not): + res = np.true_divide.reduce(arr, axis=1, initial=1) + assert_array_equal(res, np.ones((0, 5))) + + @pytest.mark.parametrize('axis', (0, 1, None)) + @pytest.mark.parametrize('where', (np.array([False, True, True]), + np.array([[True], [False], [True]]), + np.array([[True, False, False], + [False, True, False], + [False, True, True]]))) + def test_reduction_with_where(self, axis, where): + a = np.arange(9.).reshape(3, 3) + a_copy = a.copy() + a_check = np.zeros_like(a) + np.positive(a, out=a_check, where=where) + + res = np.add.reduce(a, axis=axis, where=where) + check = a_check.sum(axis) + assert_equal(res, check) + # Check we do not overwrite elements of a internally. + assert_array_equal(a, a_copy) + + @pytest.mark.parametrize(('axis', 'where'), + ((0, np.array([True, False, True])), + (1, [True, True, False]), + (None, True))) + @pytest.mark.parametrize('initial', (-np.inf, 5.)) + def test_reduction_with_where_and_initial(self, axis, where, initial): + a = np.arange(9.).reshape(3, 3) + a_copy = a.copy() + a_check = np.full(a.shape, -np.inf) + np.positive(a, out=a_check, where=where) + + res = np.maximum.reduce(a, axis=axis, where=where, initial=initial) + check = a_check.max(axis, initial=initial) + assert_equal(res, check) + + def test_reduction_where_initial_needed(self): + a = np.arange(9.).reshape(3, 3) + m = [False, True, False] + assert_raises(ValueError, np.maximum.reduce, a, where=m) + + def test_identityless_reduction_nonreorderable(self): + a = np.array([[8.0, 2.0, 2.0], [1.0, 0.5, 0.25]]) + + res = np.divide.reduce(a, axis=0) + assert_equal(res, [8.0, 4.0, 8.0]) + + res = np.divide.reduce(a, axis=1) + assert_equal(res, [2.0, 8.0]) + + res = np.divide.reduce(a, axis=()) + assert_equal(res, a) + + assert_raises(ValueError, np.divide.reduce, a, axis=(0, 1)) + + def test_reduce_zero_axis(self): + # If we have a n x m array and do a reduction with axis=1, then we are + # doing n reductions, and each reduction takes an m-element array. For + # a reduction operation without an identity, then: + # n > 0, m > 0: fine + # n = 0, m > 0: fine, doing 0 reductions of m-element arrays + # n > 0, m = 0: can't reduce a 0-element array, ValueError + # n = 0, m = 0: can't reduce a 0-element array, ValueError (for + # consistency with the above case) + # This test doesn't actually look at return values, it just checks to + # make sure that error we get an error in exactly those cases where we + # expect one, and assumes the calculations themselves are done + # correctly. + + def ok(f, *args, **kwargs): + f(*args, **kwargs) + + def err(f, *args, **kwargs): + assert_raises(ValueError, f, *args, **kwargs) + + def t(expect, func, n, m): + expect(func, np.zeros((n, m)), axis=1) + expect(func, np.zeros((m, n)), axis=0) + expect(func, np.zeros((n // 2, n // 2, m)), axis=2) + expect(func, np.zeros((n // 2, m, n // 2)), axis=1) + expect(func, np.zeros((n, m // 2, m // 2)), axis=(1, 2)) + expect(func, np.zeros((m // 2, n, m // 2)), axis=(0, 2)) + expect(func, np.zeros((m // 3, m // 3, m // 3, + n // 2, n // 2)), + axis=(0, 1, 2)) + # Check what happens if the inner (resp. outer) dimensions are a + # mix of zero and non-zero: + expect(func, np.zeros((10, m, n)), axis=(0, 1)) + expect(func, np.zeros((10, n, m)), axis=(0, 2)) + expect(func, np.zeros((m, 10, n)), axis=0) + expect(func, np.zeros((10, m, n)), axis=1) + expect(func, np.zeros((10, n, m)), axis=2) + + # np.maximum is just an arbitrary ufunc with no reduction identity + assert_equal(np.maximum.identity, None) + t(ok, np.maximum.reduce, 30, 30) + t(ok, np.maximum.reduce, 0, 30) + t(err, np.maximum.reduce, 30, 0) + t(err, np.maximum.reduce, 0, 0) + err(np.maximum.reduce, []) + np.maximum.reduce(np.zeros((0, 0)), axis=()) + + # all of the combinations are fine for a reduction that has an + # identity + t(ok, np.add.reduce, 30, 30) + t(ok, np.add.reduce, 0, 30) + t(ok, np.add.reduce, 30, 0) + t(ok, np.add.reduce, 0, 0) + np.add.reduce([]) + np.add.reduce(np.zeros((0, 0)), axis=()) + + # OTOH, accumulate always makes sense for any combination of n and m, + # because it maps an m-element array to an m-element array. These + # tests are simpler because accumulate doesn't accept multiple axes. + for uf in (np.maximum, np.add): + uf.accumulate(np.zeros((30, 0)), axis=0) + uf.accumulate(np.zeros((0, 30)), axis=0) + uf.accumulate(np.zeros((30, 30)), axis=0) + uf.accumulate(np.zeros((0, 0)), axis=0) + + def test_safe_casting(self): + # In old versions of numpy, in-place operations used the 'unsafe' + # casting rules. In versions >= 1.10, 'same_kind' is the + # default and an exception is raised instead of a warning. + # when 'same_kind' is not satisfied. + a = np.array([1, 2, 3], dtype=int) + # Non-in-place addition is fine + assert_array_equal(assert_no_warnings(np.add, a, 1.1), + [2.1, 3.1, 4.1]) + assert_raises(TypeError, np.add, a, 1.1, out=a) + + def add_inplace(a, b): + a += b + + assert_raises(TypeError, add_inplace, a, 1.1) + # Make sure that explicitly overriding the exception is allowed: + assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe") + assert_array_equal(a, [2, 3, 4]) + + def test_ufunc_custom_out(self): + # Test ufunc with built in input types and custom output type + + a = np.array([0, 1, 2], dtype='i8') + b = np.array([0, 1, 2], dtype='i8') + c = np.empty(3, dtype=_rational_tests.rational) + + # Output must be specified so numpy knows what + # ufunc signature to look for + result = _rational_tests.test_add(a, b, c) + target = np.array([0, 2, 4], dtype=_rational_tests.rational) + assert_equal(result, target) + + # The new resolution means that we can (usually) find custom loops + # as long as they match exactly: + result = _rational_tests.test_add(a, b) + assert_equal(result, target) + + # This works even more generally, so long the default common-dtype + # promoter works out: + result = _rational_tests.test_add(a, b.astype(np.uint16), out=c) + assert_equal(result, target) + + # This scalar path used to go into legacy promotion, but doesn't now: + result = _rational_tests.test_add(a, np.uint16(2)) + target = np.array([2, 3, 4], dtype=_rational_tests.rational) + assert_equal(result, target) + + def test_operand_flags(self): + a = np.arange(16, dtype=int).reshape(4, 4) + b = np.arange(9, dtype=int).reshape(3, 3) + opflag_tests.inplace_add(a[:-1, :-1], b) + assert_equal(a, np.array([[0, 2, 4, 3], [7, 9, 11, 7], + [14, 16, 18, 11], [12, 13, 14, 15]])) + + a = np.array(0) + opflag_tests.inplace_add(a, 3) + assert_equal(a, 3) + opflag_tests.inplace_add(a, [3, 4]) + assert_equal(a, 10) + + def test_struct_ufunc(self): + import numpy._core._struct_ufunc_tests as struct_ufunc + + a = np.array([(1, 2, 3)], dtype='u8,u8,u8') + b = np.array([(1, 2, 3)], dtype='u8,u8,u8') + + result = struct_ufunc.add_triplet(a, b) + assert_equal(result, np.array([(2, 4, 6)], dtype='u8,u8,u8')) + assert_raises(RuntimeError, struct_ufunc.register_fail) + + def test_custom_ufunc(self): + a = np.array( + [_rational_tests.rational(1, 2), + _rational_tests.rational(1, 3), + _rational_tests.rational(1, 4)], + dtype=_rational_tests.rational) + b = np.array( + [_rational_tests.rational(1, 2), + _rational_tests.rational(1, 3), + _rational_tests.rational(1, 4)], + dtype=_rational_tests.rational) + + result = _rational_tests.test_add_rationals(a, b) + expected = np.array( + [_rational_tests.rational(1), + _rational_tests.rational(2, 3), + _rational_tests.rational(1, 2)], + dtype=_rational_tests.rational) + assert_equal(result, expected) + + def test_custom_ufunc_forced_sig(self): + # gh-9351 - looking for a non-first userloop would previously hang + with assert_raises(TypeError): + np.multiply(_rational_tests.rational(1), 1, + signature=(_rational_tests.rational, int, None)) + + def test_custom_array_like(self): + + class MyThing: + __array_priority__ = 1000 + + rmul_count = 0 + getitem_count = 0 + + def __init__(self, shape): + self.shape = shape + + def __len__(self): + return self.shape[0] + + def __getitem__(self, i): + MyThing.getitem_count += 1 + if not isinstance(i, tuple): + i = (i,) + if len(i) > self.ndim: + raise IndexError("boo") + + return MyThing(self.shape[len(i):]) + + def __rmul__(self, other): + MyThing.rmul_count += 1 + return self + + np.float64(5) * MyThing((3, 3)) + assert_(MyThing.rmul_count == 1, MyThing.rmul_count) + assert_(MyThing.getitem_count <= 2, MyThing.getitem_count) + + def test_array_wrap_array_priority(self): + class ArrayPriorityBase(np.ndarray): + @classmethod + def __array_wrap__(cls, array, context=None, return_scalar=False): + return cls + + class ArrayPriorityMinus0(ArrayPriorityBase): + __array_priority__ = 0 + + class ArrayPriorityMinus1000(ArrayPriorityBase): + __array_priority__ = -1000 + + class ArrayPriorityMinus1000b(ArrayPriorityBase): + __array_priority__ = -1000 + + class ArrayPriorityMinus2000(ArrayPriorityBase): + __array_priority__ = -2000 + + x = np.ones(2).view(ArrayPriorityMinus1000) + xb = np.ones(2).view(ArrayPriorityMinus1000b) + y = np.ones(2).view(ArrayPriorityMinus2000) + + assert np.add(x, y) is ArrayPriorityMinus1000 + assert np.add(y, x) is ArrayPriorityMinus1000 + assert np.add(x, xb) is ArrayPriorityMinus1000 + assert np.add(xb, x) is ArrayPriorityMinus1000b + y_minus0 = np.zeros(2).view(ArrayPriorityMinus0) + assert np.add(np.zeros(2), y_minus0) is ArrayPriorityMinus0 + assert type(np.add(xb, x, np.zeros(2))) is np.ndarray + + @pytest.mark.parametrize("a", ( + np.arange(10, dtype=int), + np.arange(10, dtype=_rational_tests.rational), + )) + def test_ufunc_at_basic(self, a): + + aa = a.copy() + np.add.at(aa, [2, 5, 2], 1) + assert_equal(aa, [0, 1, 4, 3, 4, 6, 6, 7, 8, 9]) + + with pytest.raises(ValueError): + # missing second operand + np.add.at(aa, [2, 5, 3]) + + aa = a.copy() + np.negative.at(aa, [2, 5, 3]) + assert_equal(aa, [0, 1, -2, -3, 4, -5, 6, 7, 8, 9]) + + aa = a.copy() + b = np.array([100, 100, 100]) + np.add.at(aa, [2, 5, 2], b) + assert_equal(aa, [0, 1, 202, 3, 4, 105, 6, 7, 8, 9]) + + with pytest.raises(ValueError): + # extraneous second operand + np.negative.at(a, [2, 5, 3], [1, 2, 3]) + + with pytest.raises(ValueError): + # second operand cannot be converted to an array + np.add.at(a, [2, 5, 3], [[1, 2], 1]) + + # ufuncs with indexed loops for performance in ufunc.at + indexed_ufuncs = [np.add, np.subtract, np.multiply, np.floor_divide, + np.maximum, np.minimum, np.fmax, np.fmin] + + @pytest.mark.parametrize( + "typecode", np.typecodes['AllInteger'] + np.typecodes['Float']) + @pytest.mark.parametrize("ufunc", indexed_ufuncs) + def test_ufunc_at_inner_loops(self, typecode, ufunc): + if ufunc is np.divide and typecode in np.typecodes['AllInteger']: + # Avoid divide-by-zero and inf for integer divide + a = np.ones(100, dtype=typecode) + indx = np.random.randint(100, size=30, dtype=np.intp) + vals = np.arange(1, 31, dtype=typecode) + else: + a = np.ones(1000, dtype=typecode) + indx = np.random.randint(1000, size=3000, dtype=np.intp) + vals = np.arange(3000, dtype=typecode) + atag = a.copy() + # Do the calculation twice and compare the answers + with warnings.catch_warnings(record=True) as w_at: + warnings.simplefilter('always') + ufunc.at(a, indx, vals) + with warnings.catch_warnings(record=True) as w_loop: + warnings.simplefilter('always') + for i, v in zip(indx, vals): + # Make sure all the work happens inside the ufunc + # in order to duplicate error/warning handling + ufunc(atag[i], v, out=atag[i:i + 1], casting="unsafe") + assert_equal(atag, a) + # If w_loop warned, make sure w_at warned as well + if len(w_loop) > 0: + # + assert len(w_at) > 0 + assert w_at[0].category == w_loop[0].category + assert str(w_at[0].message)[:10] == str(w_loop[0].message)[:10] + + @pytest.mark.parametrize("typecode", np.typecodes['Complex']) + @pytest.mark.parametrize("ufunc", [np.add, np.subtract, np.multiply]) + def test_ufunc_at_inner_loops_complex(self, typecode, ufunc): + a = np.ones(10, dtype=typecode) + indx = np.concatenate([np.ones(6, dtype=np.intp), + np.full(18, 4, dtype=np.intp)]) + value = a.dtype.type(1j) + ufunc.at(a, indx, value) + expected = np.ones_like(a) + if ufunc is np.multiply: + expected[1] = expected[4] = -1 + else: + expected[1] += 6 * (value if ufunc is np.add else -value) + expected[4] += 18 * (value if ufunc is np.add else -value) + + assert_array_equal(a, expected) + + def test_ufunc_at_ellipsis(self): + # Make sure the indexed loop check does not choke on iters + # with subspaces + arr = np.zeros(5) + np.add.at(arr, slice(None), np.ones(5)) + assert_array_equal(arr, np.ones(5)) + + def test_ufunc_at_negative(self): + arr = np.ones(5, dtype=np.int32) + indx = np.arange(5) + umt.indexed_negative.at(arr, indx) + # If it is [-1, -1, -1, -100, 0] then the regular strided loop was used + assert np.all(arr == [-1, -1, -1, -200, -1]) + + def test_ufunc_at_large(self): + # issue gh-23457 + indices = np.zeros(8195, dtype=np.int16) + b = np.zeros(8195, dtype=float) + b[0] = 10 + b[1] = 5 + b[8192:] = 100 + a = np.zeros(1, dtype=float) + np.add.at(a, indices, b) + assert a[0] == b.sum() + + def test_cast_index_fastpath(self): + arr = np.zeros(10) + values = np.ones(100000) + # index must be cast, which may be buffered in chunks: + index = np.zeros(len(values), dtype=np.uint8) + np.add.at(arr, index, values) + assert arr[0] == len(values) + + @pytest.mark.parametrize("value", [ + np.ones(1), np.ones(()), np.float64(1.), 1.]) + def test_ufunc_at_scalar_value_fastpath(self, value): + arr = np.zeros(1000) + # index must be cast, which may be buffered in chunks: + index = np.repeat(np.arange(1000), 2) + np.add.at(arr, index, value) + assert_array_equal(arr, np.full_like(arr, 2 * value)) + + def test_ufunc_at_multiD(self): + a = np.arange(9).reshape(3, 3) + b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]]) + np.add.at(a, (slice(None), [1, 2, 1]), b) + assert_equal(a, [[0, 201, 102], [3, 404, 205], [6, 607, 308]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (slice(None), slice(None), [1, 2, 1]), b) + assert_equal(a, + [[[0, 401, 202], + [3, 404, 205], + [6, 407, 208]], + + [[9, 410, 211], + [12, 413, 214], + [15, 416, 217]], + + [[18, 419, 220], + [21, 422, 223], + [24, 425, 226]]]) + + a = np.arange(9).reshape(3, 3) + b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]]) + np.add.at(a, ([1, 2, 1], slice(None)), b) + assert_equal(a, [[0, 1, 2], [403, 404, 405], [206, 207, 208]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (slice(None), [1, 2, 1], slice(None)), b) + assert_equal(a, + [[[0, 1, 2], + [203, 404, 605], + [106, 207, 308]], + + [[9, 10, 11], + [212, 413, 614], + [115, 216, 317]], + + [[18, 19, 20], + [221, 422, 623], + [124, 225, 326]]]) + + a = np.arange(9).reshape(3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (0, [1, 2, 1]), b) + assert_equal(a, [[0, 401, 202], [3, 4, 5], [6, 7, 8]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, ([1, 2, 1], 0, slice(None)), b) + assert_equal(a, + [[[0, 1, 2], + [3, 4, 5], + [6, 7, 8]], + + [[209, 410, 611], + [12, 13, 14], + [15, 16, 17]], + + [[118, 219, 320], + [21, 22, 23], + [24, 25, 26]]]) + + a = np.arange(27).reshape(3, 3, 3) + b = np.array([100, 200, 300]) + np.add.at(a, (slice(None), slice(None), slice(None)), b) + assert_equal(a, + [[[100, 201, 302], + [103, 204, 305], + [106, 207, 308]], + + [[109, 210, 311], + [112, 213, 314], + [115, 216, 317]], + + [[118, 219, 320], + [121, 222, 323], + [124, 225, 326]]]) + + def test_ufunc_at_0D(self): + a = np.array(0) + np.add.at(a, (), 1) + assert_equal(a, 1) + + assert_raises(IndexError, np.add.at, a, 0, 1) + assert_raises(IndexError, np.add.at, a, [], 1) + + def test_ufunc_at_dtypes(self): + # Test mixed dtypes + a = np.arange(10) + np.power.at(a, [1, 2, 3, 2], 3.5) + assert_equal(a, np.array([0, 1, 4414, 46, 4, 5, 6, 7, 8, 9])) + + def test_ufunc_at_boolean(self): + # Test boolean indexing and boolean ufuncs + a = np.arange(10) + index = a % 2 == 0 + np.equal.at(a, index, [0, 2, 4, 6, 8]) + assert_equal(a, [1, 1, 1, 3, 1, 5, 1, 7, 1, 9]) + + # Test unary operator + a = np.arange(10, dtype='u4') + np.invert.at(a, [2, 5, 2]) + assert_equal(a, [0, 1, 2, 3, 4, 5 ^ 0xffffffff, 6, 7, 8, 9]) + + def test_ufunc_at_advanced(self): + # Test empty subspace + orig = np.arange(4) + a = orig[:, None][:, 0:0] + np.add.at(a, [0, 1], 3) + assert_array_equal(orig, np.arange(4)) + + # Test with swapped byte order + index = np.array([1, 2, 1], np.dtype('i').newbyteorder()) + values = np.array([1, 2, 3, 4], np.dtype('f').newbyteorder()) + np.add.at(values, index, 3) + assert_array_equal(values, [1, 8, 6, 4]) + + # Test exception thrown + values = np.array(['a', 1], dtype=object) + assert_raises(TypeError, np.add.at, values, [0, 1], 1) + assert_array_equal(values, np.array(['a', 1], dtype=object)) + + # Test multiple output ufuncs raise error, gh-5665 + assert_raises(ValueError, np.modf.at, np.arange(10), [1]) + + # Test maximum + a = np.array([1, 2, 3]) + np.maximum.at(a, [0], 0) + assert_equal(a, np.array([1, 2, 3])) + + @pytest.mark.parametrize("dtype", + np.typecodes['AllInteger'] + np.typecodes['Float']) + @pytest.mark.parametrize("ufunc", + [np.add, np.subtract, np.divide, np.minimum, np.maximum]) + def test_at_negative_indexes(self, dtype, ufunc): + a = np.arange(0, 10).astype(dtype) + indxs = np.array([-1, 1, -1, 2]).astype(np.intp) + vals = np.array([1, 5, 2, 10], dtype=a.dtype) + + expected = a.copy() + for i, v in zip(indxs, vals): + expected[i] = ufunc(expected[i], v) + + ufunc.at(a, indxs, vals) + assert_array_equal(a, expected) + assert np.all(indxs == [-1, 1, -1, 2]) + + def test_at_not_none_signature(self): + # Test ufuncs with non-trivial signature raise a TypeError + a = np.ones((2, 2, 2)) + b = np.ones((1, 2, 2)) + assert_raises(TypeError, np.matmul.at, a, [0], b) + + a = np.array([[[1, 2], [3, 4]]]) + assert_raises(TypeError, np.linalg._umath_linalg.det.at, a, [0]) + + def test_at_no_loop_for_op(self): + # str dtype does not have a ufunc loop for np.add + arr = np.ones(10, dtype=str) + with pytest.raises(np._core._exceptions._UFuncNoLoopError): + np.add.at(arr, [0, 1], [0, 1]) + + def test_at_output_casting(self): + arr = np.array([-1]) + np.equal.at(arr, [0], [0]) + assert arr[0] == 0 + + def test_at_broadcast_failure(self): + arr = np.arange(5) + with pytest.raises(ValueError): + np.add.at(arr, [0, 1], [1, 2, 3]) + + def test_reduce_arguments(self): + f = np.add.reduce + d = np.ones((5, 2), dtype=int) + o = np.ones((2,), dtype=d.dtype) + r = o * 5 + assert_equal(f(d), r) + # a, axis=0, dtype=None, out=None, keepdims=False + assert_equal(f(d, axis=0), r) + assert_equal(f(d, 0), r) + assert_equal(f(d, 0, dtype=None), r) + assert_equal(f(d, 0, dtype='i'), r) + assert_equal(f(d, 0, 'i'), r) + assert_equal(f(d, 0, None), r) + assert_equal(f(d, 0, None, out=None), r) + assert_equal(f(d, 0, None, out=o), r) + assert_equal(f(d, 0, None, o), r) + assert_equal(f(d, 0, None, None), r) + assert_equal(f(d, 0, None, None, keepdims=False), r) + assert_equal(f(d, 0, None, None, True), r.reshape((1,) + r.shape)) + assert_equal(f(d, 0, None, None, False, 0), r) + assert_equal(f(d, 0, None, None, False, initial=0), r) + assert_equal(f(d, 0, None, None, False, 0, True), r) + assert_equal(f(d, 0, None, None, False, 0, where=True), r) + # multiple keywords + assert_equal(f(d, axis=0, dtype=None, out=None, keepdims=False), r) + assert_equal(f(d, 0, dtype=None, out=None, keepdims=False), r) + assert_equal(f(d, 0, None, out=None, keepdims=False), r) + assert_equal(f(d, 0, None, out=None, keepdims=False, initial=0, + where=True), r) + + # too little + assert_raises(TypeError, f) + # too much + assert_raises(TypeError, f, d, 0, None, None, False, 0, True, 1) + # invalid axis + assert_raises(TypeError, f, d, "invalid") + assert_raises(TypeError, f, d, axis="invalid") + assert_raises(TypeError, f, d, axis="invalid", dtype=None, + keepdims=True) + # invalid dtype + assert_raises(TypeError, f, d, 0, "invalid") + assert_raises(TypeError, f, d, dtype="invalid") + assert_raises(TypeError, f, d, dtype="invalid", out=None) + # invalid out + assert_raises(TypeError, f, d, 0, None, "invalid") + assert_raises(TypeError, f, d, out="invalid") + assert_raises(TypeError, f, d, out="invalid", dtype=None) + # keepdims boolean, no invalid value + # assert_raises(TypeError, f, d, 0, None, None, "invalid") + # assert_raises(TypeError, f, d, keepdims="invalid", axis=0, dtype=None) + # invalid mix + assert_raises(TypeError, f, d, 0, keepdims="invalid", dtype="invalid", + out=None) + + # invalid keyword + assert_raises(TypeError, f, d, axis=0, dtype=None, invalid=0) + assert_raises(TypeError, f, d, invalid=0) + assert_raises(TypeError, f, d, 0, keepdims=True, invalid="invalid", + out=None) + assert_raises(TypeError, f, d, axis=0, dtype=None, keepdims=True, + out=None, invalid=0) + assert_raises(TypeError, f, d, axis=0, dtype=None, + out=None, invalid=0) + + def test_structured_equal(self): + # https://github.com/numpy/numpy/issues/4855 + + class MyA(np.ndarray): + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return getattr(ufunc, method)(*(input.view(np.ndarray) + for input in inputs), **kwargs) + a = np.arange(12.).reshape(4, 3) + ra = a.view(dtype=('f8,f8,f8')).squeeze() + mra = ra.view(MyA) + + target = np.array([True, False, False, False], dtype=bool) + assert_equal(np.all(target == (mra == ra[0])), True) + + def test_scalar_equal(self): + # Scalar comparisons should always work, without deprecation warnings. + # even when the ufunc fails. + a = np.array(0.) + b = np.array('a') + assert_(a != b) + assert_(b != a) + assert_(not (a == b)) + assert_(not (b == a)) + + def test_NotImplemented_not_returned(self): + # See gh-5964 and gh-2091. Some of these functions are not operator + # related and were fixed for other reasons in the past. + binary_funcs = [ + np.power, np.add, np.subtract, np.multiply, np.divide, + np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or, + np.bitwise_xor, np.left_shift, np.right_shift, np.fmax, + np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2, + np.maximum, np.minimum, np.mod, + np.greater, np.greater_equal, np.less, np.less_equal, + np.equal, np.not_equal] + + a = np.array('1') + b = 1 + c = np.array([1., 2.]) + for f in binary_funcs: + assert_raises(TypeError, f, a, b) + assert_raises(TypeError, f, c, a) + + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or]) # logical_xor object loop is bad + @pytest.mark.parametrize("signature", + [(None, None, object), (object, None, None), + (None, object, None)]) + def test_logical_ufuncs_object_signatures(self, ufunc, signature): + a = np.array([True, None, False], dtype=object) + res = ufunc(a, a, signature=signature) + assert res.dtype == object + + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or, np.logical_xor]) + @pytest.mark.parametrize("signature", + [(bool, None, object), (object, None, bool), + (None, object, bool)]) + def test_logical_ufuncs_mixed_object_signatures(self, ufunc, signature): + # Most mixed signatures fail (except those with bool out, e.g. `OO->?`) + a = np.array([True, None, False]) + with pytest.raises(TypeError): + ufunc(a, a, signature=signature) + + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or, np.logical_xor]) + def test_logical_ufuncs_support_anything(self, ufunc): + # The logical ufuncs support even input that can't be promoted: + a = np.array(b'1', dtype="V3") + c = np.array([1., 2.]) + assert_array_equal(ufunc(a, c), ufunc([True, True], True)) + assert ufunc.reduce(a) == True + # check that the output has no effect: + out = np.zeros(2, dtype=np.int32) + expected = ufunc([True, True], True).astype(out.dtype) + assert_array_equal(ufunc(a, c, out=out), expected) + out = np.zeros((), dtype=np.int32) + assert ufunc.reduce(a, out=out) == True + # Last check, test reduction when out and a match (the complexity here + # is that the "i,i->?" may seem right, but should not match. + a = np.array([3], dtype="i") + out = np.zeros((), dtype=a.dtype) + assert ufunc.reduce(a, out=out) == 1 + + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or, np.logical_xor]) + @pytest.mark.parametrize("dtype", ["S", "U"]) + @pytest.mark.parametrize("values", [["1", "hi", "0"], ["", ""]]) + def test_logical_ufuncs_supports_string(self, ufunc, dtype, values): + # note that values are either all true or all false + arr = np.array(values, dtype=dtype) + obj_arr = np.array(values, dtype=object) + res = ufunc(arr, arr) + expected = ufunc(obj_arr, obj_arr, dtype=bool) + + assert_array_equal(res, expected) + + res = ufunc.reduce(arr) + expected = ufunc.reduce(obj_arr, dtype=bool) + assert_array_equal(res, expected) + + @pytest.mark.parametrize("ufunc", + [np.logical_and, np.logical_or, np.logical_xor]) + def test_logical_ufuncs_out_cast_check(self, ufunc): + a = np.array('1') + c = np.array([1., 2.]) + out = a.copy() + with pytest.raises(TypeError): + # It would be safe, but not equiv casting: + ufunc(a, c, out=out, casting="equiv") + + def test_reducelike_byteorder_resolution(self): + # See gh-20699, byte-order changes need some extra care in the type + # resolution to make the following succeed: + arr_be = np.arange(10, dtype=">i8") + arr_le = np.arange(10, dtype="i + if 'O' in typ or '?' in typ: + continue + inp, out = typ.split('->') + args = [np.ones((3, 3), t) for t in inp] + with warnings.catch_warnings(record=True): + warnings.filterwarnings("always") + res = ufunc(*args) + if isinstance(res, tuple): + outs = tuple(out) + assert len(res) == len(outs) + for r, t in zip(res, outs): + assert r.dtype == np.dtype(t) + else: + assert res.dtype == np.dtype(out) + +@pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np) + if isinstance(getattr(np, x), np.ufunc)]) +def test_ufunc_noncontiguous(ufunc): + ''' + Check that contiguous and non-contiguous calls to ufuncs + have the same results for values in range(9) + ''' + for typ in ufunc.types: + # types is a list of strings like ii->i + if any(set('O?mM') & set(typ)): + # bool, object, datetime are too irregular for this simple test + continue + inp, out = typ.split('->') + args_c = [np.empty((6, 6), t) for t in inp] + # non contiguous (2, 3 step on the two dimensions) + args_n = [np.empty((12, 18), t)[::2, ::3] for t in inp] + # alignment != itemsize is possible. So create an array with such + # an odd step manually. + args_o = [] + for t in inp: + orig_dt = np.dtype(t) + off_dt = f"S{orig_dt.alignment}" # offset by alignment + dtype = np.dtype([("_", off_dt), ("t", orig_dt)], align=False) + args_o.append(np.empty((6, 6), dtype=dtype)["t"]) + for a in args_c + args_n + args_o: + a.flat = range(1, 37) + + with warnings.catch_warnings(record=True): + warnings.filterwarnings("always") + res_c = ufunc(*args_c) + res_n = ufunc(*args_n) + res_o = ufunc(*args_o) + if len(out) == 1: + res_c = (res_c,) + res_n = (res_n,) + res_o = (res_o,) + for c_ar, n_ar, o_ar in zip(res_c, res_n, res_o): + dt = c_ar.dtype + if np.issubdtype(dt, np.floating): + # for floating point results allow a small fuss in comparisons + # since different algorithms (libm vs. intrinsics) can be used + # for different input strides + res_eps = np.finfo(dt).eps + tol = 3 * res_eps + assert_allclose(res_c, res_n, atol=tol, rtol=tol) + assert_allclose(res_c, res_o, atol=tol, rtol=tol) + else: + assert_equal(c_ar, n_ar) + assert_equal(c_ar, o_ar) + + +@pytest.mark.parametrize('ufunc', [np.sign, np.equal]) +def test_ufunc_warn_with_nan(ufunc): + # issue gh-15127 + # test that calling certain ufuncs with a non-standard `nan` value does not + # emit a warning + # `b` holds a 64 bit signaling nan: the most significant bit of the + # significand is zero. + b = np.array([0x7ff0000000000001], 'i8').view('f8') + assert np.isnan(b) + if ufunc.nin == 1: + ufunc(b) + elif ufunc.nin == 2: + ufunc(b, b.copy()) + else: + raise ValueError('ufunc with more than 2 inputs') + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_ufunc_out_casterrors(): + # Tests that casting errors are correctly reported and buffers are + # cleared. + # The following array can be added to itself as an object array, but + # the result cannot be cast to an integer output: + value = 123 # relies on python cache (leak-check will still find it) + arr = np.array([value] * int(ncu.BUFSIZE * 1.5) + + ["string"] + + [value] * int(1.5 * ncu.BUFSIZE), dtype=object) + out = np.ones(len(arr), dtype=np.intp) + + count = sys.getrefcount(value) + with pytest.raises(ValueError): + # Output casting failure: + np.add(arr, arr, out=out, casting="unsafe") + + assert count == sys.getrefcount(value) + # output is unchanged after the error, this shows that the iteration + # was aborted (this is not necessarily defined behaviour) + assert out[-1] == 1 + + with pytest.raises(ValueError): + # Input casting failure: + np.add(arr, arr, out=out, dtype=np.intp, casting="unsafe") + + assert count == sys.getrefcount(value) + # output is unchanged after the error, this shows that the iteration + # was aborted (this is not necessarily defined behaviour) + assert out[-1] == 1 + + +@pytest.mark.parametrize("bad_offset", [0, int(ncu.BUFSIZE * 1.5)]) +def test_ufunc_input_casterrors(bad_offset): + value = 123 + arr = np.array([value] * bad_offset + + ["string"] + + [value] * int(1.5 * ncu.BUFSIZE), dtype=object) + with pytest.raises(ValueError): + # Force cast inputs, but the buffered cast of `arr` to intp fails: + np.add(arr, arr, dtype=np.intp, casting="unsafe") + + +@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") +@pytest.mark.parametrize("bad_offset", [0, int(ncu.BUFSIZE * 1.5)]) +def test_ufunc_input_floatingpoint_error(bad_offset): + value = 123 + arr = np.array([value] * bad_offset + + [np.nan] + + [value] * int(1.5 * ncu.BUFSIZE)) + with np.errstate(invalid="raise"), pytest.raises(FloatingPointError): + # Force cast inputs, but the buffered cast of `arr` to intp fails: + np.add(arr, arr, dtype=np.intp, casting="unsafe") + + +def test_trivial_loop_invalid_cast(): + # This tests the fast-path "invalid cast", see gh-19904. + with pytest.raises(TypeError, + match="cast ufunc 'add' input 0"): + # the void dtype definitely cannot cast to double: + np.add(np.array(1, "i,i"), 3, signature="dd->d") + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +@pytest.mark.parametrize("offset", + [0, ncu.BUFSIZE // 2, int(1.5 * ncu.BUFSIZE)]) +def test_reduce_casterrors(offset): + # Test reporting of casting errors in reductions, we test various + # offsets to where the casting error will occur, since these may occur + # at different places during the reduction procedure. For example + # the first item may be special. + value = 123 # relies on python cache (leak-check will still find it) + arr = np.array([value] * offset + + ["string"] + + [value] * int(1.5 * ncu.BUFSIZE), dtype=object) + out = np.array(-1, dtype=np.intp) + + count = sys.getrefcount(value) + with pytest.raises(ValueError, match="invalid literal"): + # This is an unsafe cast, but we currently always allow that. + # Note that the double loop is picked, but the cast fails. + # `initial=None` disables the use of an identity here to test failures + # while copying the first values path (not used when identity exists). + np.add.reduce(arr, dtype=np.intp, out=out, initial=None) + assert count == sys.getrefcount(value) + # If an error occurred during casting, the operation is done at most until + # the error occurs (the result of which would be `value * offset`) and -1 + # if the error happened immediately. + # This does not define behaviour, the output is invalid and thus undefined + assert out[()] < value * offset + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_reduction_no_reference_leak(): + # Test that the generic reduction does not leak references. + # gh-29358 + arr = np.array([1, 2, 3], dtype=np.int32) + count = sys.getrefcount(arr) + + np.add.reduce(arr, dtype=np.int32, initial=0) + assert count == sys.getrefcount(arr) + + np.add.accumulate(arr, dtype=np.int32) + assert count == sys.getrefcount(arr) + + np.add.reduceat(arr, [0, 1], dtype=np.int32) + assert count == sys.getrefcount(arr) + + # with `out=` the reference count is not changed + out = np.empty((), dtype=np.int32) + out_count = sys.getrefcount(out) + + np.add.reduce(arr, dtype=np.int32, out=out, initial=0) + assert count == sys.getrefcount(arr) + assert out_count == sys.getrefcount(out) + + out = np.empty(arr.shape, dtype=np.int32) + out_count = sys.getrefcount(out) + + np.add.accumulate(arr, dtype=np.int32, out=out) + assert count == sys.getrefcount(arr) + assert out_count == sys.getrefcount(out) + + out = np.empty((2,), dtype=np.int32) + out_count = sys.getrefcount(out) + + np.add.reduceat(arr, [0, 1], dtype=np.int32, out=out) + assert count == sys.getrefcount(arr) + assert out_count == sys.getrefcount(out) + + +def test_object_reduce_cleanup_on_failure(): + # Test cleanup, including of the initial value (manually provided or not) + with pytest.raises(TypeError): + np.add.reduce([1, 2, None], initial=4) + + with pytest.raises(TypeError): + np.add.reduce([1, 2, None]) + + +@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") +@pytest.mark.parametrize("method", + [np.add.accumulate, np.add.reduce, + pytest.param(lambda x: np.add.reduceat(x, [0]), id="reduceat"), + pytest.param(lambda x: np.log.at(x, [2]), id="at")]) +def test_ufunc_methods_floaterrors(method): + # adding inf and -inf (or log(-inf) creates an invalid float and warns + arr = np.array([np.inf, 0, -np.inf]) + with np.errstate(all="warn"): + with pytest.warns(RuntimeWarning, match="invalid value"): + method(arr) + + arr = np.array([np.inf, 0, -np.inf]) + with np.errstate(all="raise"): + with pytest.raises(FloatingPointError): + method(arr) + + +def _check_neg_zero(value): + if value != 0.0: + return False + if not np.signbit(value.real): + return False + if value.dtype.kind == "c": + return np.signbit(value.imag) + return True + +@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) +def test_addition_negative_zero(dtype): + dtype = np.dtype(dtype) + if dtype.kind == "c": + neg_zero = dtype.type(complex(-0.0, -0.0)) + else: + neg_zero = dtype.type(-0.0) + + arr = np.array(neg_zero) + arr2 = np.array(neg_zero) + + assert _check_neg_zero(arr + arr2) + # In-place ops may end up on a different path (reduce path) see gh-21211 + arr += arr2 + assert _check_neg_zero(arr) + + +@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) +@pytest.mark.parametrize("use_initial", [True, False]) +def test_addition_reduce_negative_zero(dtype, use_initial): + dtype = np.dtype(dtype) + if dtype.kind == "c": + neg_zero = dtype.type(complex(-0.0, -0.0)) + else: + neg_zero = dtype.type(-0.0) + + kwargs = {} + if use_initial: + kwargs["initial"] = neg_zero + else: + pytest.xfail("-0. propagation in sum currently requires initial") + + # Test various length, in case SIMD paths or chunking play a role. + # 150 extends beyond the pairwise blocksize; probably not important. + for i in range(150): + arr = np.array([neg_zero] * i, dtype=dtype) + res = np.sum(arr, **kwargs) + if i > 0 or use_initial: + assert _check_neg_zero(res) + else: + # `sum([])` should probably be 0.0 and not -0.0 like `sum([-0.0])` + assert not np.signbit(res.real) + assert not np.signbit(res.imag) + + +@pytest.mark.parametrize(["dt1", "dt2"], + [("S", "U"), ("U", "S"), ("S", "d"), ("S", "V"), ("U", "l")]) +def test_addition_string_types(dt1, dt2): + arr1 = np.array([1234234], dtype=dt1) + arr2 = np.array([b"423"], dtype=dt2) + with pytest.raises(np._core._exceptions.UFuncTypeError) as exc: + np.add(arr1, arr2) + + +@pytest.mark.parametrize("order1,order2", + [(">", ">"), ("<", "<"), (">", "<"), ("<", ">")]) +def test_addition_unicode_inverse_byte_order(order1, order2): + element = 'abcd' + arr1 = np.array([element], dtype=f"{order1}U4") + arr2 = np.array([element], dtype=f"{order2}U4") + result = arr1 + arr2 + assert result == 2 * element + + +@pytest.mark.parametrize("dtype", [np.int8, np.int16, np.int32, np.int64]) +def test_find_non_long_args(dtype): + element = 'abcd' + start = dtype(0) + end = dtype(len(element)) + arr = np.array([element]) + result = np._core.umath.find(arr, "a", start, end) + assert result.dtype == np.dtype("intp") + assert result == 0 + + +def test_find_access_past_buffer(): + # This checks that no read past the string buffer occurs in + # string_fastsearch.h. The buffer class makes sure this is checked. + # To see it in action, you can remove the checks in the buffer and + # this test will produce an 'Invalid read' if run under valgrind. + arr = np.array([b'abcd', b'ebcd']) + result = np._core.umath.find(arr, b'cde', 0, np.iinfo(np.int64).max) + assert np.all(result == -1) + + +class TestLowlevelAPIAccess: + def test_resolve_dtypes_basic(self): + # Basic test for dtype resolution: + i4 = np.dtype("i4") + f4 = np.dtype("f4") + f8 = np.dtype("f8") + + r = np.add.resolve_dtypes((i4, f4, None)) + assert r == (f8, f8, f8) + + # Signature uses the same logic to parse as ufunc (less strict) + # the following is "same-kind" casting so works: + r = np.add.resolve_dtypes(( + i4, i4, None), signature=(None, None, "f4")) + assert r == (f4, f4, f4) + + # Check NEP 50 "weak" promotion also: + r = np.add.resolve_dtypes((f4, int, None)) + assert r == (f4, f4, f4) + + with pytest.raises(TypeError): + np.add.resolve_dtypes((i4, f4, None), casting="no") + + def test_resolve_dtypes_comparison(self): + i4 = np.dtype("i4") + i8 = np.dtype("i8") + b = np.dtype("?") + r = np.equal.resolve_dtypes((i4, i8, None)) + assert r == (i8, i8, b) + + def test_weird_dtypes(self): + S0 = np.dtype("S0") + # S0 is often converted by NumPy to S1, but not here: + r = np.equal.resolve_dtypes((S0, S0, None)) + assert r == (S0, S0, np.dtype(bool)) + + # Subarray dtypes are weird and may not work fully, we preserve them + # leading to a TypeError (currently no equal loop for void/structured) + dts = np.dtype("10i") + with pytest.raises(TypeError): + np.equal.resolve_dtypes((dts, dts, None)) + + def test_resolve_dtypes_reduction(self): + i2 = np.dtype("i2") + default_int_ = np.dtype(np.int_) + # Check special addition resolution: + res = np.add.resolve_dtypes((None, i2, None), reduction=True) + assert res == (default_int_, default_int_, default_int_) + + def test_resolve_dtypes_reduction_no_output(self): + i4 = np.dtype("i4") + with pytest.raises(TypeError): + # May be allowable at some point? + np.add.resolve_dtypes((i4, i4, i4), reduction=True) + + @pytest.mark.parametrize("dtypes", [ + (np.dtype("i"), np.dtype("i")), + (None, np.dtype("i"), np.dtype("f")), + (np.dtype("i"), None, np.dtype("f")), + ("i4", "i4", None)]) + def test_resolve_dtypes_errors(self, dtypes): + with pytest.raises(TypeError): + np.add.resolve_dtypes(dtypes) + + def test_resolve_dtypes_reduction_errors(self): + i2 = np.dtype("i2") + + with pytest.raises(TypeError): + np.add.resolve_dtypes((None, i2, i2)) + + with pytest.raises(TypeError): + np.add.signature((None, None, "i4")) + + @pytest.mark.skipif(not hasattr(ct, "pythonapi"), + reason="`ctypes.pythonapi` required for capsule unpacking.") + def test_loop_access(self): + # This is a basic test for the full strided loop access + data_t = ct.c_char_p * 2 + dim_t = ct.c_ssize_t * 1 + strides_t = ct.c_ssize_t * 2 + strided_loop_t = ct.CFUNCTYPE( + ct.c_int, ct.c_void_p, data_t, dim_t, strides_t, ct.c_void_p) + + class call_info_t(ct.Structure): + _fields_ = [ + ("strided_loop", strided_loop_t), + ("context", ct.c_void_p), + ("auxdata", ct.c_void_p), + ("requires_pyapi", ct.c_byte), + ("no_floatingpoint_errors", ct.c_byte), + ] + + i4 = np.dtype("i4") + dt, call_info_obj = np.negative._resolve_dtypes_and_context((i4, i4)) + assert dt == (i4, i4) # can be used without casting + + # Fill in the rest of the information: + np.negative._get_strided_loop(call_info_obj) + + ct.pythonapi.PyCapsule_GetPointer.restype = ct.c_void_p + call_info = ct.pythonapi.PyCapsule_GetPointer( + ct.py_object(call_info_obj), + ct.c_char_p(b"numpy_1.24_ufunc_call_info")) + + call_info = ct.cast(call_info, ct.POINTER(call_info_t)).contents + + arr = np.arange(10, dtype=i4) + call_info.strided_loop( + call_info.context, + data_t(arr.ctypes.data, arr.ctypes.data), + arr.ctypes.shape, # is a C-array with 10 here + strides_t(arr.ctypes.strides[0], arr.ctypes.strides[0]), + call_info.auxdata) + + # We just directly called the negative inner-loop in-place: + assert_array_equal(arr, -np.arange(10, dtype=i4)) + + @pytest.mark.parametrize("strides", [1, (1, 2, 3), (1, "2")]) + def test__get_strided_loop_errors_bad_strides(self, strides): + i4 = np.dtype("i4") + dt, call_info = np.negative._resolve_dtypes_and_context((i4, i4)) + + with pytest.raises(TypeError, match="fixed_strides.*tuple.*or None"): + np.negative._get_strided_loop(call_info, fixed_strides=strides) + + def test__get_strided_loop_errors_bad_call_info(self): + i4 = np.dtype("i4") + dt, call_info = np.negative._resolve_dtypes_and_context((i4, i4)) + + with pytest.raises(ValueError, match="PyCapsule"): + np.negative._get_strided_loop("not the capsule!") + + with pytest.raises(TypeError, match=".*incompatible context"): + np.add._get_strided_loop(call_info) + + np.negative._get_strided_loop(call_info) + with pytest.raises(TypeError): + # cannot call it a second time: + np.negative._get_strided_loop(call_info) + + def test_long_arrays(self): + t = np.zeros((1029, 917), dtype=np.single) + t[0][0] = 1 + t[28][414] = 1 + tc = np.cos(t) + assert_equal(tc[0][0], tc[28][414]) diff --git a/python/numpy/_core/tests/test_umath.py b/python/numpy/_core/tests/test_umath.py new file mode 100644 index 000000000..8eac23641 --- /dev/null +++ b/python/numpy/_core/tests/test_umath.py @@ -0,0 +1,4928 @@ +import fnmatch +import itertools +import operator +import platform +import sys +import warnings +from collections import namedtuple +from fractions import Fraction +from functools import reduce + +import pytest + +import numpy as np +import numpy._core.umath as ncu +from numpy._core import _umath_tests as ncu_tests +from numpy._core import sctypes +from numpy.testing import ( + HAS_REFCOUNT, + IS_MUSL, + IS_PYPY, + IS_WASM, + _gen_alignment_data, + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_almost_equal_nulp, + assert_array_equal, + assert_array_max_ulp, + assert_equal, + assert_no_warnings, + assert_raises, + assert_raises_regex, + suppress_warnings, +) +from numpy.testing._private.utils import _glibc_older_than + +UFUNCS = [obj for obj in np._core.umath.__dict__.values() + if isinstance(obj, np.ufunc)] + +UFUNCS_UNARY = [ + uf for uf in UFUNCS if uf.nin == 1 +] +UFUNCS_UNARY_FP = [ + uf for uf in UFUNCS_UNARY if 'f->f' in uf.types +] + +UFUNCS_BINARY = [ + uf for uf in UFUNCS if uf.nin == 2 +] +UFUNCS_BINARY_ACC = [ + uf for uf in UFUNCS_BINARY if hasattr(uf, "accumulate") and uf.nout == 1 +] + +def interesting_binop_operands(val1, val2, dtype): + """ + Helper to create "interesting" operands to cover common code paths: + * scalar inputs + * only first "values" is an array (e.g. scalar division fast-paths) + * Longer array (SIMD) placing the value of interest at different positions + * Oddly strided arrays which may not be SIMD compatible + + It does not attempt to cover unaligned access or mixed dtypes. + These are normally handled by the casting/buffering machinery. + + This is not a fixture (currently), since I believe a fixture normally + only yields once? + """ + fill_value = 1 # could be a parameter, but maybe not an optional one? + + arr1 = np.full(10003, dtype=dtype, fill_value=fill_value) + arr2 = np.full(10003, dtype=dtype, fill_value=fill_value) + + arr1[0] = val1 + arr2[0] = val2 + + extractor = lambda res: res + yield arr1[0], arr2[0], extractor, "scalars" + + extractor = lambda res: res + yield arr1[0, ...], arr2[0, ...], extractor, "scalar-arrays" + + # reset array values to fill_value: + arr1[0] = fill_value + arr2[0] = fill_value + + for pos in [0, 1, 2, 3, 4, 5, -1, -2, -3, -4]: + arr1[pos] = val1 + arr2[pos] = val2 + + extractor = lambda res: res[pos] + yield arr1, arr2, extractor, f"off-{pos}" + yield arr1, arr2[pos], extractor, f"off-{pos}-with-scalar" + + arr1[pos] = fill_value + arr2[pos] = fill_value + + for stride in [-1, 113]: + op1 = arr1[::stride] + op2 = arr2[::stride] + op1[10] = val1 + op2[10] = val2 + + extractor = lambda res: res[10] + yield op1, op2, extractor, f"stride-{stride}" + + op1[10] = fill_value + op2[10] = fill_value + + +def on_powerpc(): + """ True if we are running on a Power PC platform.""" + return platform.processor() == 'powerpc' or \ + platform.machine().startswith('ppc') + + +def bad_arcsinh(): + """The blocklisted trig functions are not accurate on aarch64/PPC for + complex256. Rather than dig through the actual problem skip the + test. This should be fixed when we can move past glibc2.17 + which is the version in manylinux2014 + """ + if platform.machine() == 'aarch64': + x = 1.78e-10 + elif on_powerpc(): + x = 2.16e-10 + else: + return False + v1 = np.arcsinh(np.float128(x)) + v2 = np.arcsinh(np.complex256(x)).real + # The eps for float128 is 1-e33, so this is way bigger + return abs((v1 / v2) - 1.0) > 1e-23 + + +class _FilterInvalids: + def setup_method(self): + self.olderr = np.seterr(invalid='ignore') + + def teardown_method(self): + np.seterr(**self.olderr) + + +class TestConstants: + def test_pi(self): + assert_allclose(ncu.pi, 3.141592653589793, 1e-15) + + def test_e(self): + assert_allclose(ncu.e, 2.718281828459045, 1e-15) + + def test_euler_gamma(self): + assert_allclose(ncu.euler_gamma, 0.5772156649015329, 1e-15) + + +class TestOut: + def test_out_subok(self): + for subok in (True, False): + a = np.array(0.5) + o = np.empty(()) + + r = np.add(a, 2, o, subok=subok) + assert_(r is o) + r = np.add(a, 2, out=o, subok=subok) + assert_(r is o) + r = np.add(a, 2, out=(o,), subok=subok) + assert_(r is o) + + d = np.array(5.7) + o1 = np.empty(()) + o2 = np.empty((), dtype=np.int32) + + r1, r2 = np.frexp(d, o1, None, subok=subok) + assert_(r1 is o1) + r1, r2 = np.frexp(d, None, o2, subok=subok) + assert_(r2 is o2) + r1, r2 = np.frexp(d, o1, o2, subok=subok) + assert_(r1 is o1) + assert_(r2 is o2) + + r1, r2 = np.frexp(d, out=(o1, None), subok=subok) + assert_(r1 is o1) + r1, r2 = np.frexp(d, out=(None, o2), subok=subok) + assert_(r2 is o2) + r1, r2 = np.frexp(d, out=(o1, o2), subok=subok) + assert_(r1 is o1) + assert_(r2 is o2) + + with assert_raises(TypeError): + # Out argument must be tuple, since there are multiple outputs. + r1, r2 = np.frexp(d, out=o1, subok=subok) + + assert_raises(TypeError, np.add, a, 2, o, o, subok=subok) + assert_raises(TypeError, np.add, a, 2, o, out=o, subok=subok) + assert_raises(TypeError, np.add, a, 2, None, out=o, subok=subok) + assert_raises(ValueError, np.add, a, 2, out=(o, o), subok=subok) + assert_raises(ValueError, np.add, a, 2, out=(), subok=subok) + assert_raises(TypeError, np.add, a, 2, [], subok=subok) + assert_raises(TypeError, np.add, a, 2, out=[], subok=subok) + assert_raises(TypeError, np.add, a, 2, out=([],), subok=subok) + o.flags.writeable = False + assert_raises(ValueError, np.add, a, 2, o, subok=subok) + assert_raises(ValueError, np.add, a, 2, out=o, subok=subok) + assert_raises(ValueError, np.add, a, 2, out=(o,), subok=subok) + + def test_out_wrap_subok(self): + class ArrayWrap(np.ndarray): + __array_priority__ = 10 + + def __new__(cls, arr): + return np.asarray(arr).view(cls).copy() + + def __array_wrap__(self, arr, context=None, return_scalar=False): + return arr.view(type(self)) + + for subok in (True, False): + a = ArrayWrap([0.5]) + + r = np.add(a, 2, subok=subok) + if subok: + assert_(isinstance(r, ArrayWrap)) + else: + assert_(type(r) == np.ndarray) + + r = np.add(a, 2, None, subok=subok) + if subok: + assert_(isinstance(r, ArrayWrap)) + else: + assert_(type(r) == np.ndarray) + + r = np.add(a, 2, out=None, subok=subok) + if subok: + assert_(isinstance(r, ArrayWrap)) + else: + assert_(type(r) == np.ndarray) + + r = np.add(a, 2, out=(None,), subok=subok) + if subok: + assert_(isinstance(r, ArrayWrap)) + else: + assert_(type(r) == np.ndarray) + + d = ArrayWrap([5.7]) + o1 = np.empty((1,)) + o2 = np.empty((1,), dtype=np.int32) + + r1, r2 = np.frexp(d, o1, subok=subok) + if subok: + assert_(isinstance(r2, ArrayWrap)) + else: + assert_(type(r2) == np.ndarray) + + r1, r2 = np.frexp(d, o1, None, subok=subok) + if subok: + assert_(isinstance(r2, ArrayWrap)) + else: + assert_(type(r2) == np.ndarray) + + r1, r2 = np.frexp(d, None, o2, subok=subok) + if subok: + assert_(isinstance(r1, ArrayWrap)) + else: + assert_(type(r1) == np.ndarray) + + r1, r2 = np.frexp(d, out=(o1, None), subok=subok) + if subok: + assert_(isinstance(r2, ArrayWrap)) + else: + assert_(type(r2) == np.ndarray) + + r1, r2 = np.frexp(d, out=(None, o2), subok=subok) + if subok: + assert_(isinstance(r1, ArrayWrap)) + else: + assert_(type(r1) == np.ndarray) + + with assert_raises(TypeError): + # Out argument must be tuple, since there are multiple outputs. + r1, r2 = np.frexp(d, out=o1, subok=subok) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_out_wrap_no_leak(self): + # Regression test for gh-26545 + class ArrSubclass(np.ndarray): + pass + + arr = np.arange(10).view(ArrSubclass) + orig_refcount = sys.getrefcount(arr) + arr *= 1 + assert sys.getrefcount(arr) == orig_refcount + + +class TestComparisons: + import operator + + @pytest.mark.parametrize('dtype', sctypes['uint'] + sctypes['int'] + + sctypes['float'] + [np.bool]) + @pytest.mark.parametrize('py_comp,np_comp', [ + (operator.lt, np.less), + (operator.le, np.less_equal), + (operator.gt, np.greater), + (operator.ge, np.greater_equal), + (operator.eq, np.equal), + (operator.ne, np.not_equal) + ]) + def test_comparison_functions(self, dtype, py_comp, np_comp): + # Initialize input arrays + if dtype == np.bool: + a = np.random.choice(a=[False, True], size=1000) + b = np.random.choice(a=[False, True], size=1000) + scalar = True + else: + a = np.random.randint(low=1, high=10, size=1000).astype(dtype) + b = np.random.randint(low=1, high=10, size=1000).astype(dtype) + scalar = 5 + np_scalar = np.dtype(dtype).type(scalar) + a_lst = a.tolist() + b_lst = b.tolist() + + # (Binary) Comparison (x1=array, x2=array) + comp_b = np_comp(a, b).view(np.uint8) + comp_b_list = [int(py_comp(x, y)) for x, y in zip(a_lst, b_lst)] + + # (Scalar1) Comparison (x1=scalar, x2=array) + comp_s1 = np_comp(np_scalar, b).view(np.uint8) + comp_s1_list = [int(py_comp(scalar, x)) for x in b_lst] + + # (Scalar2) Comparison (x1=array, x2=scalar) + comp_s2 = np_comp(a, np_scalar).view(np.uint8) + comp_s2_list = [int(py_comp(x, scalar)) for x in a_lst] + + # Sequence: Binary, Scalar1 and Scalar2 + assert_(comp_b.tolist() == comp_b_list, + f"Failed comparison ({py_comp.__name__})") + assert_(comp_s1.tolist() == comp_s1_list, + f"Failed comparison ({py_comp.__name__})") + assert_(comp_s2.tolist() == comp_s2_list, + f"Failed comparison ({py_comp.__name__})") + + def test_ignore_object_identity_in_equal(self): + # Check comparing identical objects whose comparison + # is not a simple boolean, e.g., arrays that are compared elementwise. + a = np.array([np.array([1, 2, 3]), None], dtype=object) + assert_raises(ValueError, np.equal, a, a) + + # Check error raised when comparing identical non-comparable objects. + class FunkyType: + def __eq__(self, other): + raise TypeError("I won't compare") + + a = np.array([FunkyType()]) + assert_raises(TypeError, np.equal, a, a) + + # Check identity doesn't override comparison mismatch. + a = np.array([np.nan], dtype=object) + assert_equal(np.equal(a, a), [False]) + + def test_ignore_object_identity_in_not_equal(self): + # Check comparing identical objects whose comparison + # is not a simple boolean, e.g., arrays that are compared elementwise. + a = np.array([np.array([1, 2, 3]), None], dtype=object) + assert_raises(ValueError, np.not_equal, a, a) + + # Check error raised when comparing identical non-comparable objects. + class FunkyType: + def __ne__(self, other): + raise TypeError("I won't compare") + + a = np.array([FunkyType()]) + assert_raises(TypeError, np.not_equal, a, a) + + # Check identity doesn't override comparison mismatch. + a = np.array([np.nan], dtype=object) + assert_equal(np.not_equal(a, a), [True]) + + def test_error_in_equal_reduce(self): + # gh-20929 + # make sure np.equal.reduce raises a TypeError if an array is passed + # without specifying the dtype + a = np.array([0, 0]) + assert_equal(np.equal.reduce(a, dtype=bool), True) + assert_raises(TypeError, np.equal.reduce, a) + + def test_object_dtype(self): + assert np.equal(1, [1], dtype=object).dtype == object + assert np.equal(1, [1], signature=(None, None, "O")).dtype == object + + def test_object_nonbool_dtype_error(self): + # bool output dtype is fine of course: + assert np.equal(1, [1], dtype=bool).dtype == bool + + # but the following are examples do not have a loop: + with pytest.raises(TypeError, match="No loop matching"): + np.equal(1, 1, dtype=np.int64) + + with pytest.raises(TypeError, match="No loop matching"): + np.equal(1, 1, sig=(None, None, "l")) + + @pytest.mark.parametrize("dtypes", ["qQ", "Qq"]) + @pytest.mark.parametrize('py_comp, np_comp', [ + (operator.lt, np.less), + (operator.le, np.less_equal), + (operator.gt, np.greater), + (operator.ge, np.greater_equal), + (operator.eq, np.equal), + (operator.ne, np.not_equal) + ]) + @pytest.mark.parametrize("vals", [(2**60, 2**60 + 1), (2**60 + 1, 2**60)]) + def test_large_integer_direct_comparison( + self, dtypes, py_comp, np_comp, vals): + # Note that float(2**60) + 1 == float(2**60). + a1 = np.array([2**60], dtype=dtypes[0]) + a2 = np.array([2**60 + 1], dtype=dtypes[1]) + expected = py_comp(2**60, 2**60 + 1) + + assert py_comp(a1, a2) == expected + assert np_comp(a1, a2) == expected + # Also check the scalars: + s1 = a1[0] + s2 = a2[0] + assert isinstance(s1, np.integer) + assert isinstance(s2, np.integer) + # The Python operator here is mainly interesting: + assert py_comp(s1, s2) == expected + assert np_comp(s1, s2) == expected + + @pytest.mark.parametrize("dtype", np.typecodes['UnsignedInteger']) + @pytest.mark.parametrize('py_comp_func, np_comp_func', [ + (operator.lt, np.less), + (operator.le, np.less_equal), + (operator.gt, np.greater), + (operator.ge, np.greater_equal), + (operator.eq, np.equal), + (operator.ne, np.not_equal) + ]) + @pytest.mark.parametrize("flip", [True, False]) + def test_unsigned_signed_direct_comparison( + self, dtype, py_comp_func, np_comp_func, flip): + if flip: + py_comp = lambda x, y: py_comp_func(y, x) + np_comp = lambda x, y: np_comp_func(y, x) + else: + py_comp = py_comp_func + np_comp = np_comp_func + + arr = np.array([np.iinfo(dtype).max], dtype=dtype) + expected = py_comp(int(arr[0]), -1) + + assert py_comp(arr, -1) == expected + assert np_comp(arr, -1) == expected + + scalar = arr[0] + assert isinstance(scalar, np.integer) + # The Python operator here is mainly interesting: + assert py_comp(scalar, -1) == expected + assert np_comp(scalar, -1) == expected + + +class TestAdd: + def test_reduce_alignment(self): + # gh-9876 + # make sure arrays with weird strides work with the optimizations in + # pairwise_sum_@TYPE@. On x86, the 'b' field will count as aligned at a + # 4 byte offset, even though its itemsize is 8. + a = np.zeros(2, dtype=[('a', np.int32), ('b', np.float64)]) + a['a'] = -1 + assert_equal(a['b'].sum(), 0) + + +class TestDivision: + def test_division_int(self): + # int division should follow Python + x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120]) + if 5 / 10 == 0.5: + assert_equal(x / 100, [0.05, 0.1, 0.9, 1, + -0.05, -0.1, -0.9, -1, -1.2]) + else: + assert_equal(x / 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) + assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2]) + assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80]) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize("dtype,ex_val", itertools.product( + sctypes['int'] + sctypes['uint'], ( + ( + # dividend + "np.array(range(fo.max-lsize, fo.max)).astype(dtype)," + # divisors + "np.arange(lsize).astype(dtype)," + # scalar divisors + "range(15)" + ), + ( + # dividend + "np.arange(fo.min, fo.min+lsize).astype(dtype)," + # divisors + "np.arange(lsize//-2, lsize//2).astype(dtype)," + # scalar divisors + "range(fo.min, fo.min + 15)" + ), ( + # dividend + "np.array(range(fo.max-lsize, fo.max)).astype(dtype)," + # divisors + "np.arange(lsize).astype(dtype)," + # scalar divisors + "[1,3,9,13,neg, fo.min+1, fo.min//2, fo.max//3, fo.max//4]" + ) + ) + )) + def test_division_int_boundary(self, dtype, ex_val): + fo = np.iinfo(dtype) + neg = -1 if fo.min < 0 else 1 + # Large enough to test SIMD loops and remainder elements + lsize = 512 + 7 + a, b, divisors = eval(ex_val) + a_lst, b_lst = a.tolist(), b.tolist() + + c_div = lambda n, d: ( + 0 if d == 0 else ( + fo.min if (n and n == fo.min and d == -1) else n // d + ) + ) + with np.errstate(divide='ignore'): + ac = a.copy() + ac //= b + div_ab = a // b + div_lst = [c_div(x, y) for x, y in zip(a_lst, b_lst)] + + msg = "Integer arrays floor division check (//)" + assert all(div_ab == div_lst), msg + msg_eq = "Integer arrays floor division check (//=)" + assert all(ac == div_lst), msg_eq + + for divisor in divisors: + ac = a.copy() + with np.errstate(divide='ignore', over='ignore'): + div_a = a // divisor + ac //= divisor + div_lst = [c_div(i, divisor) for i in a_lst] + + assert all(div_a == div_lst), msg + assert all(ac == div_lst), msg_eq + + with np.errstate(divide='raise', over='raise'): + if 0 in b: + # Verify overflow case + with pytest.raises(FloatingPointError, + match="divide by zero encountered in floor_divide"): + a // b + else: + a // b + if fo.min and fo.min in a: + with pytest.raises(FloatingPointError, + match='overflow encountered in floor_divide'): + a // -1 + elif fo.min: + a // -1 + with pytest.raises(FloatingPointError, + match="divide by zero encountered in floor_divide"): + a // 0 + with pytest.raises(FloatingPointError, + match="divide by zero encountered in floor_divide"): + ac = a.copy() + ac //= 0 + + np.array([], dtype=dtype) // 0 + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize("dtype,ex_val", itertools.product( + sctypes['int'] + sctypes['uint'], ( + "np.array([fo.max, 1, 2, 1, 1, 2, 3], dtype=dtype)", + "np.array([fo.min, 1, -2, 1, 1, 2, -3]).astype(dtype)", + "np.arange(fo.min, fo.min+(100*10), 10, dtype=dtype)", + "np.array(range(fo.max-(100*7), fo.max, 7)).astype(dtype)", + ) + )) + def test_division_int_reduce(self, dtype, ex_val): + fo = np.iinfo(dtype) + a = eval(ex_val) + lst = a.tolist() + c_div = lambda n, d: ( + 0 if d == 0 or (n and n == fo.min and d == -1) else n // d + ) + + with np.errstate(divide='ignore'): + div_a = np.floor_divide.reduce(a) + div_lst = reduce(c_div, lst) + msg = "Reduce floor integer division check" + assert div_a == div_lst, msg + + with np.errstate(divide='raise', over='raise'): + with pytest.raises(FloatingPointError, + match="divide by zero encountered in reduce"): + np.floor_divide.reduce(np.arange(-100, 100).astype(dtype)) + if fo.min: + with pytest.raises(FloatingPointError, + match='overflow encountered in reduce'): + np.floor_divide.reduce( + np.array([fo.min, 1, -1], dtype=dtype) + ) + + @pytest.mark.parametrize( + "dividend,divisor,quotient", + [(np.timedelta64(2, 'Y'), np.timedelta64(2, 'M'), 12), + (np.timedelta64(2, 'Y'), np.timedelta64(-2, 'M'), -12), + (np.timedelta64(-2, 'Y'), np.timedelta64(2, 'M'), -12), + (np.timedelta64(-2, 'Y'), np.timedelta64(-2, 'M'), 12), + (np.timedelta64(2, 'M'), np.timedelta64(-2, 'Y'), -1), + (np.timedelta64(2, 'Y'), np.timedelta64(0, 'M'), 0), + (np.timedelta64(2, 'Y'), 2, np.timedelta64(1, 'Y')), + (np.timedelta64(2, 'Y'), -2, np.timedelta64(-1, 'Y')), + (np.timedelta64(-2, 'Y'), 2, np.timedelta64(-1, 'Y')), + (np.timedelta64(-2, 'Y'), -2, np.timedelta64(1, 'Y')), + (np.timedelta64(-2, 'Y'), -2, np.timedelta64(1, 'Y')), + (np.timedelta64(-2, 'Y'), -3, np.timedelta64(0, 'Y')), + (np.timedelta64(-2, 'Y'), 0, np.timedelta64('Nat', 'Y')), + ]) + def test_division_int_timedelta(self, dividend, divisor, quotient): + # If either divisor is 0 or quotient is Nat, check for division by 0 + if divisor and (isinstance(quotient, int) or not np.isnat(quotient)): + msg = "Timedelta floor division check" + assert dividend // divisor == quotient, msg + + # Test for arrays as well + msg = "Timedelta arrays floor division check" + dividend_array = np.array([dividend] * 5) + quotient_array = np.array([quotient] * 5) + assert all(dividend_array // divisor == quotient_array), msg + else: + if IS_WASM: + pytest.skip("fp errors don't work in wasm") + with np.errstate(divide='raise', invalid='raise'): + with pytest.raises(FloatingPointError): + dividend // divisor + + def test_division_complex(self): + # check that implementation is correct + msg = "Complex division implementation check" + x = np.array([1. + 1. * 1j, 1. + .5 * 1j, 1. + 2. * 1j], dtype=np.complex128) + assert_almost_equal(x**2 / x, x, err_msg=msg) + # check overflow, underflow + msg = "Complex division overflow/underflow check" + x = np.array([1.e+110, 1.e-110], dtype=np.complex128) + y = x**2 / x + assert_almost_equal(y / x, [1, 1], err_msg=msg) + + def test_zero_division_complex(self): + with np.errstate(invalid="ignore", divide="ignore"): + x = np.array([0.0], dtype=np.complex128) + y = 1.0 / x + assert_(np.isinf(y)[0]) + y = complex(np.inf, np.nan) / x + assert_(np.isinf(y)[0]) + y = complex(np.nan, np.inf) / x + assert_(np.isinf(y)[0]) + y = complex(np.inf, np.inf) / x + assert_(np.isinf(y)[0]) + y = 0.0 / x + assert_(np.isnan(y)[0]) + + def test_floor_division_complex(self): + # check that floor division, divmod and remainder raises type errors + x = np.array([.9 + 1j, -.1 + 1j, .9 + .5 * 1j, .9 + 2. * 1j], dtype=np.complex128) + with pytest.raises(TypeError): + x // 7 + with pytest.raises(TypeError): + np.divmod(x, 7) + with pytest.raises(TypeError): + np.remainder(x, 7) + + def test_floor_division_signed_zero(self): + # Check that the sign bit is correctly set when dividing positive and + # negative zero by one. + x = np.zeros(10) + assert_equal(np.signbit(x // 1), 0) + assert_equal(np.signbit((-x) // 1), 1) + + @pytest.mark.skipif(hasattr(np.__config__, "blas_ssl2_info"), + reason="gh-22982") + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize('dtype', np.typecodes['Float']) + def test_floor_division_errors(self, dtype): + fnan = np.array(np.nan, dtype=dtype) + fone = np.array(1.0, dtype=dtype) + fzer = np.array(0.0, dtype=dtype) + finf = np.array(np.inf, dtype=dtype) + # divide by zero error check + with np.errstate(divide='raise', invalid='ignore'): + assert_raises(FloatingPointError, np.floor_divide, fone, fzer) + with np.errstate(divide='ignore', invalid='raise'): + np.floor_divide(fone, fzer) + + # The following already contain a NaN and should not warn + with np.errstate(all='raise'): + np.floor_divide(fnan, fone) + np.floor_divide(fone, fnan) + np.floor_divide(fnan, fzer) + np.floor_divide(fzer, fnan) + + @pytest.mark.parametrize('dtype', np.typecodes['Float']) + def test_floor_division_corner_cases(self, dtype): + # test corner cases like 1.0//0.0 for errors and return vals + x = np.zeros(10, dtype=dtype) + y = np.ones(10, dtype=dtype) + fnan = np.array(np.nan, dtype=dtype) + fone = np.array(1.0, dtype=dtype) + fzer = np.array(0.0, dtype=dtype) + finf = np.array(np.inf, dtype=dtype) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in floor_divide") + div = np.floor_divide(fnan, fone) + assert np.isnan(div), f"div: {div}" + div = np.floor_divide(fone, fnan) + assert np.isnan(div), f"div: {div}" + div = np.floor_divide(fnan, fzer) + assert np.isnan(div), f"div: {div}" + # verify 1.0//0.0 computations return inf + with np.errstate(divide='ignore'): + z = np.floor_divide(y, x) + assert_(np.isinf(z).all()) + +def floor_divide_and_remainder(x, y): + return (np.floor_divide(x, y), np.remainder(x, y)) + + +def _signs(dt): + if dt in np.typecodes['UnsignedInteger']: + return (+1,) + else: + return (+1, -1) + + +class TestRemainder: + + def test_remainder_basic(self): + dt = np.typecodes['AllInteger'] + np.typecodes['Float'] + for op in [floor_divide_and_remainder, np.divmod]: + for dt1, dt2 in itertools.product(dt, dt): + for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)): + fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' + msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) + a = np.array(sg1 * 71, dtype=dt1) + b = np.array(sg2 * 19, dtype=dt2) + div, rem = op(a, b) + assert_equal(div * b + rem, a, err_msg=msg) + if sg2 == -1: + assert_(b < rem <= 0, msg) + else: + assert_(b > rem >= 0, msg) + + def test_float_remainder_exact(self): + # test that float results are exact for small integers. This also + # holds for the same integers scaled by powers of two. + nlst = list(range(-127, 0)) + plst = list(range(1, 128)) + dividend = nlst + [0] + plst + divisor = nlst + plst + arg = list(itertools.product(dividend, divisor)) + tgt = [divmod(*t) for t in arg] + + a, b = np.array(arg, dtype=int).T + # convert exact integer results from Python to float so that + # signed zero can be used, it is checked. + tgtdiv, tgtrem = np.array(tgt, dtype=float).T + tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv) + tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem) + + for op in [floor_divide_and_remainder, np.divmod]: + for dt in np.typecodes['Float']: + msg = f'op: {op.__name__}, dtype: {dt}' + fa = a.astype(dt) + fb = b.astype(dt) + div, rem = op(fa, fb) + assert_equal(div, tgtdiv, err_msg=msg) + assert_equal(rem, tgtrem, err_msg=msg) + + def test_float_remainder_roundoff(self): + # gh-6127 + dt = np.typecodes['Float'] + for op in [floor_divide_and_remainder, np.divmod]: + for dt1, dt2 in itertools.product(dt, dt): + for sg1, sg2 in itertools.product((+1, -1), (+1, -1)): + fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s' + msg = fmt % (op.__name__, dt1, dt2, sg1, sg2) + a = np.array(sg1 * 78 * 6e-8, dtype=dt1) + b = np.array(sg2 * 6e-8, dtype=dt2) + div, rem = op(a, b) + # Equal assertion should hold when fmod is used + assert_equal(div * b + rem, a, err_msg=msg) + if sg2 == -1: + assert_(b < rem <= 0, msg) + else: + assert_(b > rem >= 0, msg) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.xfail(sys.platform.startswith("darwin"), + reason="MacOS seems to not give the correct 'invalid' warning for " + "`fmod`. Hopefully, others always do.") + @pytest.mark.parametrize('dtype', np.typecodes['Float']) + def test_float_divmod_errors(self, dtype): + # Check valid errors raised for divmod and remainder + fzero = np.array(0.0, dtype=dtype) + fone = np.array(1.0, dtype=dtype) + finf = np.array(np.inf, dtype=dtype) + fnan = np.array(np.nan, dtype=dtype) + # since divmod is combination of both remainder and divide + # ops it will set both dividebyzero and invalid flags + with np.errstate(divide='raise', invalid='ignore'): + assert_raises(FloatingPointError, np.divmod, fone, fzero) + with np.errstate(divide='ignore', invalid='raise'): + assert_raises(FloatingPointError, np.divmod, fone, fzero) + with np.errstate(invalid='raise'): + assert_raises(FloatingPointError, np.divmod, fzero, fzero) + with np.errstate(invalid='raise'): + assert_raises(FloatingPointError, np.divmod, finf, finf) + with np.errstate(divide='ignore', invalid='raise'): + assert_raises(FloatingPointError, np.divmod, finf, fzero) + with np.errstate(divide='raise', invalid='ignore'): + # inf / 0 does not set any flags, only the modulo creates a NaN + np.divmod(finf, fzero) + + @pytest.mark.skipif(hasattr(np.__config__, "blas_ssl2_info"), + reason="gh-22982") + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.xfail(sys.platform.startswith("darwin"), + reason="MacOS seems to not give the correct 'invalid' warning for " + "`fmod`. Hopefully, others always do.") + @pytest.mark.parametrize('dtype', np.typecodes['Float']) + @pytest.mark.parametrize('fn', [np.fmod, np.remainder]) + def test_float_remainder_errors(self, dtype, fn): + fzero = np.array(0.0, dtype=dtype) + fone = np.array(1.0, dtype=dtype) + finf = np.array(np.inf, dtype=dtype) + fnan = np.array(np.nan, dtype=dtype) + + # The following already contain a NaN and should not warn. + with np.errstate(all='raise'): + with pytest.raises(FloatingPointError, + match="invalid value"): + fn(fone, fzero) + fn(fnan, fzero) + fn(fzero, fnan) + fn(fone, fnan) + fn(fnan, fone) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_float_remainder_overflow(self): + a = np.finfo(np.float64).tiny + with np.errstate(over='ignore', invalid='ignore'): + div, mod = np.divmod(4, a) + np.isinf(div) + assert_(mod == 0) + with np.errstate(over='raise', invalid='ignore'): + assert_raises(FloatingPointError, np.divmod, 4, a) + with np.errstate(invalid='raise', over='ignore'): + assert_raises(FloatingPointError, np.divmod, 4, a) + + def test_float_divmod_corner_cases(self): + # check nan cases + for dt in np.typecodes['Float']: + fnan = np.array(np.nan, dtype=dt) + fone = np.array(1.0, dtype=dt) + fzer = np.array(0.0, dtype=dt) + finf = np.array(np.inf, dtype=dt) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in divmod") + sup.filter(RuntimeWarning, "divide by zero encountered in divmod") + div, rem = np.divmod(fone, fzer) + assert np.isinf(div), f'dt: {dt}, div: {rem}' + assert np.isnan(rem), f'dt: {dt}, rem: {rem}' + div, rem = np.divmod(fzer, fzer) + assert np.isnan(rem), f'dt: {dt}, rem: {rem}' + assert_(np.isnan(div)), f'dt: {dt}, rem: {rem}' + div, rem = np.divmod(finf, finf) + assert np.isnan(div), f'dt: {dt}, rem: {rem}' + assert np.isnan(rem), f'dt: {dt}, rem: {rem}' + div, rem = np.divmod(finf, fzer) + assert np.isinf(div), f'dt: {dt}, rem: {rem}' + assert np.isnan(rem), f'dt: {dt}, rem: {rem}' + div, rem = np.divmod(fnan, fone) + assert np.isnan(rem), f"dt: {dt}, rem: {rem}" + assert np.isnan(div), f"dt: {dt}, rem: {rem}" + div, rem = np.divmod(fone, fnan) + assert np.isnan(rem), f"dt: {dt}, rem: {rem}" + assert np.isnan(div), f"dt: {dt}, rem: {rem}" + div, rem = np.divmod(fnan, fzer) + assert np.isnan(rem), f"dt: {dt}, rem: {rem}" + assert np.isnan(div), f"dt: {dt}, rem: {rem}" + + def test_float_remainder_corner_cases(self): + # Check remainder magnitude. + for dt in np.typecodes['Float']: + fone = np.array(1.0, dtype=dt) + fzer = np.array(0.0, dtype=dt) + fnan = np.array(np.nan, dtype=dt) + b = np.array(1.0, dtype=dt) + a = np.nextafter(np.array(0.0, dtype=dt), -b) + rem = np.remainder(a, b) + assert_(rem <= b, f'dt: {dt}') + rem = np.remainder(-a, -b) + assert_(rem >= -b, f'dt: {dt}') + + # Check nans, inf + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in remainder") + sup.filter(RuntimeWarning, "invalid value encountered in fmod") + for dt in np.typecodes['Float']: + fone = np.array(1.0, dtype=dt) + fzer = np.array(0.0, dtype=dt) + finf = np.array(np.inf, dtype=dt) + fnan = np.array(np.nan, dtype=dt) + rem = np.remainder(fone, fzer) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + # MSVC 2008 returns NaN here, so disable the check. + #rem = np.remainder(fone, finf) + #assert_(rem == fone, 'dt: %s, rem: %s' % (dt, rem)) + rem = np.remainder(finf, fone) + fmod = np.fmod(finf, fone) + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {fmod}') + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + rem = np.remainder(finf, finf) + fmod = np.fmod(finf, fone) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {fmod}') + rem = np.remainder(finf, fzer) + fmod = np.fmod(finf, fzer) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {fmod}') + rem = np.remainder(fone, fnan) + fmod = np.fmod(fone, fnan) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {fmod}') + rem = np.remainder(fnan, fzer) + fmod = np.fmod(fnan, fzer) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {rem}') + rem = np.remainder(fnan, fone) + fmod = np.fmod(fnan, fone) + assert_(np.isnan(rem), f'dt: {dt}, rem: {rem}') + assert_(np.isnan(fmod), f'dt: {dt}, fmod: {rem}') + + +class TestDivisionIntegerOverflowsAndDivideByZero: + result_type = namedtuple('result_type', + ['nocast', 'casted']) + helper_lambdas = { + 'zero': lambda dtype: 0, + 'min': lambda dtype: np.iinfo(dtype).min, + 'neg_min': lambda dtype: -np.iinfo(dtype).min, + 'min-zero': lambda dtype: (np.iinfo(dtype).min, 0), + 'neg_min-zero': lambda dtype: (-np.iinfo(dtype).min, 0), + } + overflow_results = { + np.remainder: result_type( + helper_lambdas['zero'], helper_lambdas['zero']), + np.fmod: result_type( + helper_lambdas['zero'], helper_lambdas['zero']), + operator.mod: result_type( + helper_lambdas['zero'], helper_lambdas['zero']), + operator.floordiv: result_type( + helper_lambdas['min'], helper_lambdas['neg_min']), + np.floor_divide: result_type( + helper_lambdas['min'], helper_lambdas['neg_min']), + np.divmod: result_type( + helper_lambdas['min-zero'], helper_lambdas['neg_min-zero']) + } + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize("dtype", np.typecodes["Integer"]) + def test_signed_division_overflow(self, dtype): + to_check = interesting_binop_operands(np.iinfo(dtype).min, -1, dtype) + for op1, op2, extractor, operand_identifier in to_check: + with pytest.warns(RuntimeWarning, match="overflow encountered"): + res = op1 // op2 + + assert res.dtype == op1.dtype + assert extractor(res) == np.iinfo(op1.dtype).min + + # Remainder is well defined though, and does not warn: + res = op1 % op2 + assert res.dtype == op1.dtype + assert extractor(res) == 0 + # Check fmod as well: + res = np.fmod(op1, op2) + assert extractor(res) == 0 + + # Divmod warns for the division part: + with pytest.warns(RuntimeWarning, match="overflow encountered"): + res1, res2 = np.divmod(op1, op2) + + assert res1.dtype == res2.dtype == op1.dtype + assert extractor(res1) == np.iinfo(op1.dtype).min + assert extractor(res2) == 0 + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) + def test_divide_by_zero(self, dtype): + # Note that the return value cannot be well defined here, but NumPy + # currently uses 0 consistently. This could be changed. + to_check = interesting_binop_operands(1, 0, dtype) + for op1, op2, extractor, operand_identifier in to_check: + with pytest.warns(RuntimeWarning, match="divide by zero"): + res = op1 // op2 + + assert res.dtype == op1.dtype + assert extractor(res) == 0 + + with pytest.warns(RuntimeWarning, match="divide by zero"): + res1, res2 = np.divmod(op1, op2) + + assert res1.dtype == res2.dtype == op1.dtype + assert extractor(res1) == 0 + assert extractor(res2) == 0 + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize("dividend_dtype", sctypes['int']) + @pytest.mark.parametrize("divisor_dtype", sctypes['int']) + @pytest.mark.parametrize("operation", + [np.remainder, np.fmod, np.divmod, np.floor_divide, + operator.mod, operator.floordiv]) + @np.errstate(divide='warn', over='warn') + def test_overflows(self, dividend_dtype, divisor_dtype, operation): + # SIMD tries to perform the operation on as many elements as possible + # that is a multiple of the register's size. We resort to the + # default implementation for the leftover elements. + # We try to cover all paths here. + arrays = [np.array([np.iinfo(dividend_dtype).min] * i, + dtype=dividend_dtype) for i in range(1, 129)] + divisor = np.array([-1], dtype=divisor_dtype) + # If dividend is a larger type than the divisor (`else` case), + # then, result will be a larger type than dividend and will not + # result in an overflow for `divmod` and `floor_divide`. + if np.dtype(dividend_dtype).itemsize >= np.dtype( + divisor_dtype).itemsize and operation in ( + np.divmod, np.floor_divide, operator.floordiv): + with pytest.warns( + RuntimeWarning, + match="overflow encountered in"): + result = operation( + dividend_dtype(np.iinfo(dividend_dtype).min), + divisor_dtype(-1) + ) + assert result == self.overflow_results[operation].nocast( + dividend_dtype) + + # Arrays + for a in arrays: + # In case of divmod, we need to flatten the result + # column first as we get a column vector of quotient and + # remainder and a normal flatten of the expected result. + with pytest.warns( + RuntimeWarning, + match="overflow encountered in"): + result = np.array(operation(a, divisor)).flatten('f') + expected_array = np.array( + [self.overflow_results[operation].nocast( + dividend_dtype)] * len(a)).flatten() + assert_array_equal(result, expected_array) + else: + # Scalars + result = operation( + dividend_dtype(np.iinfo(dividend_dtype).min), + divisor_dtype(-1) + ) + assert result == self.overflow_results[operation].casted( + dividend_dtype) + + # Arrays + for a in arrays: + # See above comment on flatten + result = np.array(operation(a, divisor)).flatten('f') + expected_array = np.array( + [self.overflow_results[operation].casted( + dividend_dtype)] * len(a)).flatten() + assert_array_equal(result, expected_array) + + +class TestCbrt: + def test_cbrt_scalar(self): + assert_almost_equal((np.cbrt(np.float32(-2.5)**3)), -2.5) + + def test_cbrt(self): + x = np.array([1., 2., -3., np.inf, -np.inf]) + assert_almost_equal(np.cbrt(x**3), x) + + assert_(np.isnan(np.cbrt(np.nan))) + assert_equal(np.cbrt(np.inf), np.inf) + assert_equal(np.cbrt(-np.inf), -np.inf) + + +class TestPower: + def test_power_float(self): + x = np.array([1., 2., 3.]) + assert_equal(x**0, [1., 1., 1.]) + assert_equal(x**1, x) + assert_equal(x**2, [1., 4., 9.]) + y = x.copy() + y **= 2 + assert_equal(y, [1., 4., 9.]) + assert_almost_equal(x**(-1), [1., 0.5, 1. / 3]) + assert_almost_equal(x**(0.5), [1., ncu.sqrt(2), ncu.sqrt(3)]) + + for out, inp, msg in _gen_alignment_data(dtype=np.float32, + type='unary', + max_size=11): + exp = [ncu.sqrt(i) for i in inp] + assert_almost_equal(inp**(0.5), exp, err_msg=msg) + np.sqrt(inp, out=out) + assert_equal(out, exp, err_msg=msg) + + for out, inp, msg in _gen_alignment_data(dtype=np.float64, + type='unary', + max_size=7): + exp = [ncu.sqrt(i) for i in inp] + assert_almost_equal(inp**(0.5), exp, err_msg=msg) + np.sqrt(inp, out=out) + assert_equal(out, exp, err_msg=msg) + + def test_power_complex(self): + x = np.array([1 + 2j, 2 + 3j, 3 + 4j]) + assert_equal(x**0, [1., 1., 1.]) + assert_equal(x**1, x) + assert_almost_equal(x**2, [-3 + 4j, -5 + 12j, -7 + 24j]) + assert_almost_equal(x**3, [(1 + 2j)**3, (2 + 3j)**3, (3 + 4j)**3]) + assert_almost_equal(x**4, [(1 + 2j)**4, (2 + 3j)**4, (3 + 4j)**4]) + assert_almost_equal(x**(-1), [1 / (1 + 2j), 1 / (2 + 3j), 1 / (3 + 4j)]) + assert_almost_equal(x**(-2), [1 / (1 + 2j)**2, 1 / (2 + 3j)**2, 1 / (3 + 4j)**2]) + assert_almost_equal(x**(-3), [(-11 + 2j) / 125, (-46 - 9j) / 2197, + (-117 - 44j) / 15625]) + assert_almost_equal(x**(0.5), [ncu.sqrt(1 + 2j), ncu.sqrt(2 + 3j), + ncu.sqrt(3 + 4j)]) + norm = 1. / ((x**14)[0]) + assert_almost_equal(x**14 * norm, + [i * norm for i in [-76443 + 16124j, 23161315 + 58317492j, + 5583548873 + 2465133864j]]) + + # Ticket #836 + def assert_complex_equal(x, y): + assert_array_equal(x.real, y.real) + assert_array_equal(x.imag, y.imag) + + for z in [complex(0, np.inf), complex(1, np.inf)]: + z = np.array([z], dtype=np.complex128) + with np.errstate(invalid="ignore"): + assert_complex_equal(z**1, z) + assert_complex_equal(z**2, z * z) + assert_complex_equal(z**3, z * z * z) + + def test_power_zero(self): + # ticket #1271 + zero = np.array([0j]) + one = np.array([1 + 0j]) + cnan = np.array([complex(np.nan, np.nan)]) + # FIXME cinf not tested. + #cinf = np.array([complex(np.inf, 0)]) + + def assert_complex_equal(x, y): + x, y = np.asarray(x), np.asarray(y) + assert_array_equal(x.real, y.real) + assert_array_equal(x.imag, y.imag) + + # positive powers + for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]: + assert_complex_equal(np.power(zero, p), zero) + + # zero power + assert_complex_equal(np.power(zero, 0), one) + with np.errstate(invalid="ignore"): + assert_complex_equal(np.power(zero, 0 + 1j), cnan) + + # negative power + for p in [0.33, 0.5, 1, 1.5, 2, 3, 4, 5, 6.6]: + assert_complex_equal(np.power(zero, -p), cnan) + assert_complex_equal(np.power(zero, -1 + 0.2j), cnan) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_zero_power_nonzero(self): + # Testing 0^{Non-zero} issue 18378 + zero = np.array([0.0 + 0.0j]) + cnan = np.array([complex(np.nan, np.nan)]) + + def assert_complex_equal(x, y): + assert_array_equal(x.real, y.real) + assert_array_equal(x.imag, y.imag) + + # Complex powers with positive real part will not generate a warning + assert_complex_equal(np.power(zero, 1 + 4j), zero) + assert_complex_equal(np.power(zero, 2 - 3j), zero) + # Testing zero values when real part is greater than zero + assert_complex_equal(np.power(zero, 1 + 1j), zero) + assert_complex_equal(np.power(zero, 1 + 0j), zero) + assert_complex_equal(np.power(zero, 1 - 1j), zero) + # Complex powers will negative real part or 0 (provided imaginary + # part is not zero) will generate a NAN and hence a RUNTIME warning + with pytest.warns(expected_warning=RuntimeWarning) as r: + assert_complex_equal(np.power(zero, -1 + 1j), cnan) + assert_complex_equal(np.power(zero, -2 - 3j), cnan) + assert_complex_equal(np.power(zero, -7 + 0j), cnan) + assert_complex_equal(np.power(zero, 0 + 1j), cnan) + assert_complex_equal(np.power(zero, 0 - 1j), cnan) + assert len(r) == 5 + + def test_fast_power(self): + x = np.array([1, 2, 3], np.int16) + res = x**2.0 + assert_((x**2.00001).dtype is res.dtype) + assert_array_equal(res, [1, 4, 9]) + # check the inplace operation on the casted copy doesn't mess with x + assert_(not np.may_share_memory(res, x)) + assert_array_equal(x, [1, 2, 3]) + + # Check that the fast path ignores 1-element not 0-d arrays + res = x ** np.array([[[2]]]) + assert_equal(res.shape, (1, 1, 3)) + + def test_integer_power(self): + a = np.array([15, 15], 'i8') + b = np.power(a, a) + assert_equal(b, [437893890380859375, 437893890380859375]) + + def test_integer_power_with_integer_zero_exponent(self): + dtypes = np.typecodes['Integer'] + for dt in dtypes: + arr = np.arange(-10, 10, dtype=dt) + assert_equal(np.power(arr, 0), np.ones_like(arr)) + + dtypes = np.typecodes['UnsignedInteger'] + for dt in dtypes: + arr = np.arange(10, dtype=dt) + assert_equal(np.power(arr, 0), np.ones_like(arr)) + + def test_integer_power_of_1(self): + dtypes = np.typecodes['AllInteger'] + for dt in dtypes: + arr = np.arange(10, dtype=dt) + assert_equal(np.power(1, arr), np.ones_like(arr)) + + def test_integer_power_of_zero(self): + dtypes = np.typecodes['AllInteger'] + for dt in dtypes: + arr = np.arange(1, 10, dtype=dt) + assert_equal(np.power(0, arr), np.zeros_like(arr)) + + def test_integer_to_negative_power(self): + dtypes = np.typecodes['Integer'] + for dt in dtypes: + a = np.array([0, 1, 2, 3], dtype=dt) + b = np.array([0, 1, 2, -3], dtype=dt) + one = np.array(1, dtype=dt) + minusone = np.array(-1, dtype=dt) + assert_raises(ValueError, np.power, a, b) + assert_raises(ValueError, np.power, a, minusone) + assert_raises(ValueError, np.power, one, b) + assert_raises(ValueError, np.power, one, minusone) + + def test_float_to_inf_power(self): + for dt in [np.float32, np.float64]: + a = np.array([1, 1, 2, 2, -2, -2, np.inf, -np.inf], dt) + b = np.array([np.inf, -np.inf, np.inf, -np.inf, + np.inf, -np.inf, np.inf, -np.inf], dt) + r = np.array([1, 1, np.inf, 0, np.inf, 0, np.inf, 0], dt) + assert_equal(np.power(a, b), r) + + def test_power_fast_paths(self): + # gh-26055 + for dt in [np.float32, np.float64]: + a = np.array([0, 1.1, 2, 12e12, -10., np.inf, -np.inf], dt) + expected = np.array([0.0, 1.21, 4., 1.44e+26, 100, np.inf, np.inf]) + result = np.power(a, 2.) + assert_array_max_ulp(result, expected.astype(dt), maxulp=1) + + a = np.array([0, 1.1, 2, 12e12], dt) + expected = np.sqrt(a).astype(dt) + result = np.power(a, 0.5) + assert_array_max_ulp(result, expected, maxulp=1) + + +class TestFloat_power: + def test_type_conversion(self): + arg_type = '?bhilBHILefdgFDG' + res_type = 'ddddddddddddgDDG' + for dtin, dtout in zip(arg_type, res_type): + msg = f"dtin: {dtin}, dtout: {dtout}" + arg = np.ones(1, dtype=dtin) + res = np.float_power(arg, arg) + assert_(res.dtype.name == np.dtype(dtout).name, msg) + + +class TestLog2: + @pytest.mark.parametrize('dt', ['f', 'd', 'g']) + def test_log2_values(self, dt): + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_almost_equal(np.log2(xf), yf) + + @pytest.mark.parametrize("i", range(1, 65)) + def test_log2_ints(self, i): + # a good log2 implementation should provide this, + # might fail on OS with bad libm + v = np.log2(2.**i) + assert_equal(v, float(i), err_msg='at exponent %d' % i) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_log2_special(self): + assert_equal(np.log2(1.), 0.) + assert_equal(np.log2(np.inf), np.inf) + assert_(np.isnan(np.log2(np.nan))) + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_(np.isnan(np.log2(-1.))) + assert_(np.isnan(np.log2(-np.inf))) + assert_equal(np.log2(0.), -np.inf) + assert_(w[0].category is RuntimeWarning) + assert_(w[1].category is RuntimeWarning) + assert_(w[2].category is RuntimeWarning) + + +class TestExp2: + def test_exp2_values(self): + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for dt in ['f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_almost_equal(np.exp2(yf), xf) + + +class TestLogAddExp2(_FilterInvalids): + # Need test for intermediate precisions + def test_logaddexp2_values(self): + x = [1, 2, 3, 4, 5] + y = [5, 4, 3, 2, 1] + z = [6, 6, 6, 6, 6] + for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]): + xf = np.log2(np.array(x, dtype=dt)) + yf = np.log2(np.array(y, dtype=dt)) + zf = np.log2(np.array(z, dtype=dt)) + assert_almost_equal(np.logaddexp2(xf, yf), zf, decimal=dec_) + + def test_logaddexp2_range(self): + x = [1000000, -1000000, 1000200, -1000200] + y = [1000200, -1000200, 1000000, -1000000] + z = [1000200, -1000000, 1000200, -1000000] + for dt in ['f', 'd', 'g']: + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_almost_equal(np.logaddexp2(logxf, logyf), logzf) + + def test_inf(self): + inf = np.inf + x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] # noqa: E221 + y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] # noqa: E221 + z = [inf, inf, inf, -inf, inf, inf, 1, 1] + with np.errstate(invalid='raise'): + for dt in ['f', 'd', 'g']: + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_equal(np.logaddexp2(logxf, logyf), logzf) + + def test_nan(self): + assert_(np.isnan(np.logaddexp2(np.nan, np.inf))) + assert_(np.isnan(np.logaddexp2(np.inf, np.nan))) + assert_(np.isnan(np.logaddexp2(np.nan, 0))) + assert_(np.isnan(np.logaddexp2(0, np.nan))) + assert_(np.isnan(np.logaddexp2(np.nan, np.nan))) + + def test_reduce(self): + assert_equal(np.logaddexp2.identity, -np.inf) + assert_equal(np.logaddexp2.reduce([]), -np.inf) + assert_equal(np.logaddexp2.reduce([-np.inf]), -np.inf) + assert_equal(np.logaddexp2.reduce([-np.inf, 0]), 0) + + +class TestLog: + def test_log_values(self): + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for dt in ['f', 'd', 'g']: + log2_ = 0.69314718055994530943 + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) * log2_ + assert_almost_equal(np.log(xf), yf) + + # test aliasing(issue #17761) + x = np.array([2, 0.937500, 3, 0.947500, 1.054697]) + xf = np.log(x) + assert_almost_equal(np.log(x, out=x), xf) + + def test_log_values_maxofdtype(self): + # test log() of max for dtype does not raise + dtypes = [np.float32, np.float64] + # This is failing at least on linux aarch64 (see gh-25460), and on most + # other non x86-64 platforms checking `longdouble` isn't too useful as + # it's an alias for float64. + if platform.machine() == 'x86_64': + dtypes += [np.longdouble] + + for dt in dtypes: + with np.errstate(all='raise'): + x = np.finfo(dt).max + np.log(x) + + def test_log_strides(self): + np.random.seed(42) + strides = np.array([-4, -3, -2, -1, 1, 2, 3, 4]) + sizes = np.arange(2, 100) + for ii in sizes: + x_f64 = np.float64(np.random.uniform(low=0.01, high=100.0, size=ii)) + x_special = x_f64.copy() + x_special[3:-1:4] = 1.0 + y_true = np.log(x_f64) + y_special = np.log(x_special) + for jj in strides: + assert_array_almost_equal_nulp(np.log(x_f64[::jj]), y_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.log(x_special[::jj]), y_special[::jj], nulp=2) + + # Reference values were computed with mpmath, with mp.dps = 200. + @pytest.mark.parametrize( + 'z, wref', + [(1 + 1e-12j, 5e-25 + 1e-12j), + (1.000000000000001 + 3e-08j, + 1.5602230246251546e-15 + 2.999999999999996e-08j), + (0.9999995000000417 + 0.0009999998333333417j, + 7.831475869017683e-18 + 0.001j), + (0.9999999999999996 + 2.999999999999999e-08j, + 5.9107901499372034e-18 + 3e-08j), + (0.99995000042 - 0.009999833j, + -7.015159763822903e-15 - 0.009999999665816696j)], + ) + def test_log_precision_float64(self, z, wref): + w = np.log(z) + assert_allclose(w, wref, rtol=1e-15) + + # Reference values were computed with mpmath, with mp.dps = 200. + @pytest.mark.parametrize( + 'z, wref', + [(np.complex64(1.0 + 3e-6j), np.complex64(4.5e-12 + 3e-06j)), + (np.complex64(1.0 - 2e-5j), np.complex64(1.9999999e-10 - 2e-5j)), + (np.complex64(0.9999999 + 1e-06j), + np.complex64(-1.192088e-07 + 1.0000001e-06j))], + ) + def test_log_precision_float32(self, z, wref): + w = np.log(z) + assert_allclose(w, wref, rtol=1e-6) + + +class TestExp: + def test_exp_values(self): + x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] + y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + for dt in ['f', 'd', 'g']: + log2_ = 0.69314718055994530943 + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) * log2_ + assert_almost_equal(np.exp(yf), xf) + + def test_exp_strides(self): + np.random.seed(42) + strides = np.array([-4, -3, -2, -1, 1, 2, 3, 4]) + sizes = np.arange(2, 100) + for ii in sizes: + x_f64 = np.float64(np.random.uniform(low=0.01, high=709.1, size=ii)) + y_true = np.exp(x_f64) + for jj in strides: + assert_array_almost_equal_nulp(np.exp(x_f64[::jj]), y_true[::jj], nulp=2) + +class TestSpecialFloats: + def test_exp_values(self): + with np.errstate(under='raise', over='raise'): + x = [np.nan, np.nan, np.inf, 0.] + y = [np.nan, -np.nan, np.inf, -np.inf] + for dt in ['e', 'f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.exp(yf), xf) + + # See: https://github.com/numpy/numpy/issues/19192 + @pytest.mark.xfail( + _glibc_older_than("2.17"), + reason="Older glibc versions may not raise appropriate FP exceptions" + ) + def test_exp_exceptions(self): + with np.errstate(over='raise'): + assert_raises(FloatingPointError, np.exp, np.float16(11.0899)) + assert_raises(FloatingPointError, np.exp, np.float32(100.)) + assert_raises(FloatingPointError, np.exp, np.float32(1E19)) + assert_raises(FloatingPointError, np.exp, np.float64(800.)) + assert_raises(FloatingPointError, np.exp, np.float64(1E19)) + + with np.errstate(under='raise'): + assert_raises(FloatingPointError, np.exp, np.float16(-17.5)) + assert_raises(FloatingPointError, np.exp, np.float32(-1000.)) + assert_raises(FloatingPointError, np.exp, np.float32(-1E19)) + assert_raises(FloatingPointError, np.exp, np.float64(-1000.)) + assert_raises(FloatingPointError, np.exp, np.float64(-1E19)) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_log_values(self): + with np.errstate(all='ignore'): + x = [np.nan, np.nan, np.inf, np.nan, -np.inf, np.nan] + y = [np.nan, -np.nan, np.inf, -np.inf, 0.0, -1.0] + y1p = [np.nan, -np.nan, np.inf, -np.inf, -1.0, -2.0] + for dt in ['e', 'f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + yf1p = np.array(y1p, dtype=dt) + assert_equal(np.log(yf), xf) + assert_equal(np.log2(yf), xf) + assert_equal(np.log10(yf), xf) + assert_equal(np.log1p(yf1p), xf) + + with np.errstate(divide='raise'): + for dt in ['e', 'f', 'd']: + assert_raises(FloatingPointError, np.log, + np.array(0.0, dtype=dt)) + assert_raises(FloatingPointError, np.log2, + np.array(0.0, dtype=dt)) + assert_raises(FloatingPointError, np.log10, + np.array(0.0, dtype=dt)) + assert_raises(FloatingPointError, np.log1p, + np.array(-1.0, dtype=dt)) + + with np.errstate(invalid='raise'): + for dt in ['e', 'f', 'd']: + assert_raises(FloatingPointError, np.log, + np.array(-np.inf, dtype=dt)) + assert_raises(FloatingPointError, np.log, + np.array(-1.0, dtype=dt)) + assert_raises(FloatingPointError, np.log2, + np.array(-np.inf, dtype=dt)) + assert_raises(FloatingPointError, np.log2, + np.array(-1.0, dtype=dt)) + assert_raises(FloatingPointError, np.log10, + np.array(-np.inf, dtype=dt)) + assert_raises(FloatingPointError, np.log10, + np.array(-1.0, dtype=dt)) + assert_raises(FloatingPointError, np.log1p, + np.array(-np.inf, dtype=dt)) + assert_raises(FloatingPointError, np.log1p, + np.array(-2.0, dtype=dt)) + + # See https://github.com/numpy/numpy/issues/18005 + with assert_no_warnings(): + a = np.array(1e9, dtype='float32') + np.log(a) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize('dtype', ['e', 'f', 'd', 'g']) + def test_sincos_values(self, dtype): + with np.errstate(all='ignore'): + x = [np.nan, np.nan, np.nan, np.nan] + y = [np.nan, -np.nan, np.inf, -np.inf] + xf = np.array(x, dtype=dtype) + yf = np.array(y, dtype=dtype) + assert_equal(np.sin(yf), xf) + assert_equal(np.cos(yf), xf) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.xfail( + sys.platform.startswith("darwin"), + reason="underflow is triggered for scalar 'sin'" + ) + def test_sincos_underflow(self): + with np.errstate(under='raise'): + underflow_trigger = np.array( + float.fromhex("0x1.f37f47a03f82ap-511"), + dtype=np.float64 + ) + np.sin(underflow_trigger) + np.cos(underflow_trigger) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize('callable', [np.sin, np.cos]) + @pytest.mark.parametrize('dtype', ['e', 'f', 'd']) + @pytest.mark.parametrize('value', [np.inf, -np.inf]) + def test_sincos_errors(self, callable, dtype, value): + with np.errstate(invalid='raise'): + assert_raises(FloatingPointError, callable, + np.array([value], dtype=dtype)) + + @pytest.mark.parametrize('callable', [np.sin, np.cos]) + @pytest.mark.parametrize('dtype', ['f', 'd']) + @pytest.mark.parametrize('stride', [-1, 1, 2, 4, 5]) + def test_sincos_overlaps(self, callable, dtype, stride): + N = 100 + M = N // abs(stride) + rng = np.random.default_rng(42) + x = rng.standard_normal(N, dtype) + y = callable(x[::stride]) + callable(x[::stride], out=x[:M]) + assert_equal(x[:M], y) + + @pytest.mark.parametrize('dt', ['e', 'f', 'd', 'g']) + def test_sqrt_values(self, dt): + with np.errstate(all='ignore'): + x = [np.nan, np.nan, np.inf, np.nan, 0.] + y = [np.nan, -np.nan, np.inf, -np.inf, 0.] + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.sqrt(yf), xf) + + # with np.errstate(invalid='raise'): + # assert_raises( + # FloatingPointError, np.sqrt, np.array(-100., dtype=dt) + # ) + + def test_abs_values(self): + x = [np.nan, np.nan, np.inf, np.inf, 0., 0., 1.0, 1.0] + y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0., -1.0, 1.0] + for dt in ['e', 'f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.abs(yf), xf) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_square_values(self): + x = [np.nan, np.nan, np.inf, np.inf] + y = [np.nan, -np.nan, np.inf, -np.inf] + with np.errstate(all='ignore'): + for dt in ['e', 'f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.square(yf), xf) + + with np.errstate(over='raise'): + assert_raises(FloatingPointError, np.square, + np.array(1E3, dtype='e')) + assert_raises(FloatingPointError, np.square, + np.array(1E32, dtype='f')) + assert_raises(FloatingPointError, np.square, + np.array(1E200, dtype='d')) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_reciprocal_values(self): + with np.errstate(all='ignore'): + x = [np.nan, np.nan, 0.0, -0.0, np.inf, -np.inf] + y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0.] + for dt in ['e', 'f', 'd', 'g']: + xf = np.array(x, dtype=dt) + yf = np.array(y, dtype=dt) + assert_equal(np.reciprocal(yf), xf) + + with np.errstate(divide='raise'): + for dt in ['e', 'f', 'd', 'g']: + assert_raises(FloatingPointError, np.reciprocal, + np.array(-0.0, dtype=dt)) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_tan(self): + with np.errstate(all='ignore'): + in_ = [np.nan, -np.nan, 0.0, -0.0, np.inf, -np.inf] + out = [np.nan, np.nan, 0.0, -0.0, np.nan, np.nan] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.tan(in_arr), out_arr) + + with np.errstate(invalid='raise'): + for dt in ['e', 'f', 'd']: + assert_raises(FloatingPointError, np.tan, + np.array(np.inf, dtype=dt)) + assert_raises(FloatingPointError, np.tan, + np.array(-np.inf, dtype=dt)) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_arcsincos(self): + with np.errstate(all='ignore'): + in_ = [np.nan, -np.nan, np.inf, -np.inf] + out = [np.nan, np.nan, np.nan, np.nan] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.arcsin(in_arr), out_arr) + assert_equal(np.arccos(in_arr), out_arr) + + for callable in [np.arcsin, np.arccos]: + for value in [np.inf, -np.inf, 2.0, -2.0]: + for dt in ['e', 'f', 'd']: + with np.errstate(invalid='raise'): + assert_raises(FloatingPointError, callable, + np.array(value, dtype=dt)) + + def test_arctan(self): + with np.errstate(all='ignore'): + in_ = [np.nan, -np.nan] + out = [np.nan, np.nan] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.arctan(in_arr), out_arr) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_sinh(self): + in_ = [np.nan, -np.nan, np.inf, -np.inf] + out = [np.nan, np.nan, np.inf, -np.inf] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.sinh(in_arr), out_arr) + + with np.errstate(over='raise'): + assert_raises(FloatingPointError, np.sinh, + np.array(12.0, dtype='e')) + assert_raises(FloatingPointError, np.sinh, + np.array(120.0, dtype='f')) + assert_raises(FloatingPointError, np.sinh, + np.array(1200.0, dtype='d')) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.skipif('bsd' in sys.platform, + reason="fallback implementation may not raise, see gh-2487") + def test_cosh(self): + in_ = [np.nan, -np.nan, np.inf, -np.inf] + out = [np.nan, np.nan, np.inf, np.inf] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.cosh(in_arr), out_arr) + + with np.errstate(over='raise'): + assert_raises(FloatingPointError, np.cosh, + np.array(12.0, dtype='e')) + assert_raises(FloatingPointError, np.cosh, + np.array(120.0, dtype='f')) + assert_raises(FloatingPointError, np.cosh, + np.array(1200.0, dtype='d')) + + def test_tanh(self): + in_ = [np.nan, -np.nan, np.inf, -np.inf] + out = [np.nan, np.nan, 1.0, -1.0] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_array_max_ulp(np.tanh(in_arr), out_arr, 3) + + def test_arcsinh(self): + in_ = [np.nan, -np.nan, np.inf, -np.inf] + out = [np.nan, np.nan, np.inf, -np.inf] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.arcsinh(in_arr), out_arr) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_arccosh(self): + with np.errstate(all='ignore'): + in_ = [np.nan, -np.nan, np.inf, -np.inf, 1.0, 0.0] + out = [np.nan, np.nan, np.inf, np.nan, 0.0, np.nan] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.arccosh(in_arr), out_arr) + + for value in [0.0, -np.inf]: + with np.errstate(invalid='raise'): + for dt in ['e', 'f', 'd']: + assert_raises(FloatingPointError, np.arccosh, + np.array(value, dtype=dt)) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_arctanh(self): + with np.errstate(all='ignore'): + in_ = [np.nan, -np.nan, np.inf, -np.inf, 1.0, -1.0, 2.0] + out = [np.nan, np.nan, np.nan, np.nan, np.inf, -np.inf, np.nan] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.arctanh(in_arr), out_arr) + + for value in [1.01, np.inf, -np.inf, 1.0, -1.0]: + with np.errstate(invalid='raise', divide='raise'): + for dt in ['e', 'f', 'd']: + assert_raises(FloatingPointError, np.arctanh, + np.array(value, dtype=dt)) + + # Make sure glibc < 2.18 atanh is not used, issue 25087 + assert np.signbit(np.arctanh(-1j).real) + + # See: https://github.com/numpy/numpy/issues/20448 + @pytest.mark.xfail( + _glibc_older_than("2.17"), + reason="Older glibc versions may not raise appropriate FP exceptions" + ) + def test_exp2(self): + with np.errstate(all='ignore'): + in_ = [np.nan, -np.nan, np.inf, -np.inf] + out = [np.nan, np.nan, np.inf, 0.0] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.exp2(in_arr), out_arr) + + for value in [2000.0, -2000.0]: + with np.errstate(over='raise', under='raise'): + for dt in ['e', 'f', 'd']: + assert_raises(FloatingPointError, np.exp2, + np.array(value, dtype=dt)) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_expm1(self): + with np.errstate(all='ignore'): + in_ = [np.nan, -np.nan, np.inf, -np.inf] + out = [np.nan, np.nan, np.inf, -1.0] + for dt in ['e', 'f', 'd']: + in_arr = np.array(in_, dtype=dt) + out_arr = np.array(out, dtype=dt) + assert_equal(np.expm1(in_arr), out_arr) + + for value in [200.0, 2000.0]: + with np.errstate(over='raise'): + for dt in ['e', 'f']: + assert_raises(FloatingPointError, np.expm1, + np.array(value, dtype=dt)) + + # test to ensure no spurious FP exceptions are raised due to SIMD + INF_INVALID_ERR = [ + np.cos, np.sin, np.tan, np.arccos, np.arcsin, np.spacing, np.arctanh + ] + NEG_INVALID_ERR = [ + np.log, np.log2, np.log10, np.log1p, np.sqrt, np.arccosh, + np.arctanh + ] + ONE_INVALID_ERR = [ + np.arctanh, + ] + LTONE_INVALID_ERR = [ + np.arccosh, + ] + BYZERO_ERR = [ + np.log, np.log2, np.log10, np.reciprocal, np.arccosh + ] + + @pytest.mark.parametrize("ufunc", UFUNCS_UNARY_FP) + @pytest.mark.parametrize("dtype", ('e', 'f', 'd')) + @pytest.mark.parametrize("data, escape", ( + ([0.03], LTONE_INVALID_ERR), + ([0.03] * 32, LTONE_INVALID_ERR), + # neg + ([-1.0], NEG_INVALID_ERR), + ([-1.0] * 32, NEG_INVALID_ERR), + # flat + ([1.0], ONE_INVALID_ERR), + ([1.0] * 32, ONE_INVALID_ERR), + # zero + ([0.0], BYZERO_ERR), + ([0.0] * 32, BYZERO_ERR), + ([-0.0], BYZERO_ERR), + ([-0.0] * 32, BYZERO_ERR), + # nan + ([0.5, 0.5, 0.5, np.nan], LTONE_INVALID_ERR), + ([0.5, 0.5, 0.5, np.nan] * 32, LTONE_INVALID_ERR), + ([np.nan, 1.0, 1.0, 1.0], ONE_INVALID_ERR), + ([np.nan, 1.0, 1.0, 1.0] * 32, ONE_INVALID_ERR), + ([np.nan], []), + ([np.nan] * 32, []), + # inf + ([0.5, 0.5, 0.5, np.inf], INF_INVALID_ERR + LTONE_INVALID_ERR), + ([0.5, 0.5, 0.5, np.inf] * 32, INF_INVALID_ERR + LTONE_INVALID_ERR), + ([np.inf, 1.0, 1.0, 1.0], INF_INVALID_ERR), + ([np.inf, 1.0, 1.0, 1.0] * 32, INF_INVALID_ERR), + ([np.inf], INF_INVALID_ERR), + ([np.inf] * 32, INF_INVALID_ERR), + # ninf + ([0.5, 0.5, 0.5, -np.inf], + NEG_INVALID_ERR + INF_INVALID_ERR + LTONE_INVALID_ERR), + ([0.5, 0.5, 0.5, -np.inf] * 32, + NEG_INVALID_ERR + INF_INVALID_ERR + LTONE_INVALID_ERR), + ([-np.inf, 1.0, 1.0, 1.0], NEG_INVALID_ERR + INF_INVALID_ERR), + ([-np.inf, 1.0, 1.0, 1.0] * 32, NEG_INVALID_ERR + INF_INVALID_ERR), + ([-np.inf], NEG_INVALID_ERR + INF_INVALID_ERR), + ([-np.inf] * 32, NEG_INVALID_ERR + INF_INVALID_ERR), + )) + def test_unary_spurious_fpexception(self, ufunc, dtype, data, escape): + if escape and ufunc in escape: + return + # FIXME: NAN raises FP invalid exception: + # - ceil/float16 on MSVC:32-bit + # - spacing/float16 on almost all platforms + # - spacing all floats on MSVC vs2022 + if ufunc == np.spacing: + return + if ufunc == np.ceil and dtype == 'e': + return + array = np.array(data, dtype=dtype) + with assert_no_warnings(): + ufunc(array) + + @pytest.mark.parametrize("dtype", ('e', 'f', 'd')) + def test_divide_spurious_fpexception(self, dtype): + dt = np.dtype(dtype) + dt_info = np.finfo(dt) + subnorm = dt_info.smallest_subnormal + # Verify a bug fix caused due to filling the remaining lanes of the + # partially loaded dividend SIMD vector with ones, which leads to + # raising an overflow warning when the divisor is denormal. + # see https://github.com/numpy/numpy/issues/25097 + with assert_no_warnings(): + np.zeros(128 + 1, dtype=dt) / subnorm + +class TestFPClass: + @pytest.mark.parametrize("stride", [-5, -4, -3, -2, -1, 1, + 2, 4, 5, 6, 7, 8, 9, 10]) + def test_fpclass(self, stride): + arr_f64 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 2.2251e-308, -2.2251e-308], dtype='d') + arr_f32 = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, 1.0, -0.0, 0.0, 1.4013e-045, -1.4013e-045], dtype='f') + nan = np.array([True, True, False, False, False, False, False, False, False, False]) # noqa: E221 + inf = np.array([False, False, True, True, False, False, False, False, False, False]) # noqa: E221 + sign = np.array([False, True, False, True, True, False, True, False, False, True]) # noqa: E221 + finite = np.array([False, False, False, False, True, True, True, True, True, True]) # noqa: E221 + assert_equal(np.isnan(arr_f32[::stride]), nan[::stride]) + assert_equal(np.isnan(arr_f64[::stride]), nan[::stride]) + assert_equal(np.isinf(arr_f32[::stride]), inf[::stride]) + assert_equal(np.isinf(arr_f64[::stride]), inf[::stride]) + if platform.machine() == 'riscv64': + # On RISC-V, many operations that produce NaNs, such as converting + # a -NaN from f64 to f32, return a canonical NaN. The canonical + # NaNs are always positive. See section 11.3 NaN Generation and + # Propagation of the RISC-V Unprivileged ISA for more details. + # We disable the sign test on riscv64 for -np.nan as we + # cannot assume that its sign will be honoured in these tests. + arr_f64_rv = np.copy(arr_f64) + arr_f32_rv = np.copy(arr_f32) + arr_f64_rv[1] = -1.0 + arr_f32_rv[1] = -1.0 + assert_equal(np.signbit(arr_f32_rv[::stride]), sign[::stride]) + assert_equal(np.signbit(arr_f64_rv[::stride]), sign[::stride]) + else: + assert_equal(np.signbit(arr_f32[::stride]), sign[::stride]) + assert_equal(np.signbit(arr_f64[::stride]), sign[::stride]) + assert_equal(np.isfinite(arr_f32[::stride]), finite[::stride]) + assert_equal(np.isfinite(arr_f64[::stride]), finite[::stride]) + + @pytest.mark.parametrize("dtype", ['d', 'f']) + def test_fp_noncontiguous(self, dtype): + data = np.array([np.nan, -np.nan, np.inf, -np.inf, -1.0, + 1.0, -0.0, 0.0, 2.2251e-308, + -2.2251e-308], dtype=dtype) + nan = np.array([True, True, False, False, False, False, + False, False, False, False]) + inf = np.array([False, False, True, True, False, False, + False, False, False, False]) + sign = np.array([False, True, False, True, True, False, + True, False, False, True]) + finite = np.array([False, False, False, False, True, True, + True, True, True, True]) + out = np.ndarray(data.shape, dtype='bool') + ncontig_in = data[1::3] + ncontig_out = out[1::3] + contig_in = np.array(ncontig_in) + + if platform.machine() == 'riscv64': + # Disable the -np.nan signbit tests on riscv64. See comments in + # test_fpclass for more details. + data_rv = np.copy(data) + data_rv[1] = -1.0 + ncontig_sign_in = data_rv[1::3] + contig_sign_in = np.array(ncontig_sign_in) + else: + ncontig_sign_in = ncontig_in + contig_sign_in = contig_in + + assert_equal(ncontig_in.flags.c_contiguous, False) + assert_equal(ncontig_out.flags.c_contiguous, False) + assert_equal(contig_in.flags.c_contiguous, True) + assert_equal(ncontig_sign_in.flags.c_contiguous, False) + assert_equal(contig_sign_in.flags.c_contiguous, True) + # ncontig in, ncontig out + assert_equal(np.isnan(ncontig_in, out=ncontig_out), nan[1::3]) + assert_equal(np.isinf(ncontig_in, out=ncontig_out), inf[1::3]) + assert_equal(np.signbit(ncontig_sign_in, out=ncontig_out), sign[1::3]) + assert_equal(np.isfinite(ncontig_in, out=ncontig_out), finite[1::3]) + # contig in, ncontig out + assert_equal(np.isnan(contig_in, out=ncontig_out), nan[1::3]) + assert_equal(np.isinf(contig_in, out=ncontig_out), inf[1::3]) + assert_equal(np.signbit(contig_sign_in, out=ncontig_out), sign[1::3]) + assert_equal(np.isfinite(contig_in, out=ncontig_out), finite[1::3]) + # ncontig in, contig out + assert_equal(np.isnan(ncontig_in), nan[1::3]) + assert_equal(np.isinf(ncontig_in), inf[1::3]) + assert_equal(np.signbit(ncontig_sign_in), sign[1::3]) + assert_equal(np.isfinite(ncontig_in), finite[1::3]) + # contig in, contig out, nd stride + data_split = np.array(np.array_split(data, 2)) + nan_split = np.array(np.array_split(nan, 2)) + inf_split = np.array(np.array_split(inf, 2)) + sign_split = np.array(np.array_split(sign, 2)) + finite_split = np.array(np.array_split(finite, 2)) + assert_equal(np.isnan(data_split), nan_split) + assert_equal(np.isinf(data_split), inf_split) + if platform.machine() == 'riscv64': + data_split_rv = np.array(np.array_split(data_rv, 2)) + assert_equal(np.signbit(data_split_rv), sign_split) + else: + assert_equal(np.signbit(data_split), sign_split) + assert_equal(np.isfinite(data_split), finite_split) + +class TestLDExp: + @pytest.mark.parametrize("stride", [-4, -2, -1, 1, 2, 4]) + @pytest.mark.parametrize("dtype", ['f', 'd']) + def test_ldexp(self, dtype, stride): + mant = np.array([0.125, 0.25, 0.5, 1., 1., 2., 4., 8.], dtype=dtype) + exp = np.array([3, 2, 1, 0, 0, -1, -2, -3], dtype='i') + out = np.zeros(8, dtype=dtype) + assert_equal(np.ldexp(mant[::stride], exp[::stride], out=out[::stride]), np.ones(8, dtype=dtype)[::stride]) + assert_equal(out[::stride], np.ones(8, dtype=dtype)[::stride]) + +class TestFRExp: + @pytest.mark.parametrize("stride", [-4, -2, -1, 1, 2, 4]) + @pytest.mark.parametrize("dtype", ['f', 'd']) + @pytest.mark.skipif(not sys.platform.startswith('linux'), + reason="np.frexp gives different answers for NAN/INF on windows and linux") + @pytest.mark.xfail(IS_MUSL, reason="gh23049") + def test_frexp(self, dtype, stride): + arr = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 1.0, -1.0], dtype=dtype) + mant_true = np.array([np.nan, np.nan, np.inf, -np.inf, 0.0, -0.0, 0.5, -0.5], dtype=dtype) + exp_true = np.array([0, 0, 0, 0, 0, 0, 1, 1], dtype='i') + out_mant = np.ones(8, dtype=dtype) + out_exp = 2 * np.ones(8, dtype='i') + mant, exp = np.frexp(arr[::stride], out=(out_mant[::stride], out_exp[::stride])) + assert_equal(mant_true[::stride], mant) + assert_equal(exp_true[::stride], exp) + assert_equal(out_mant[::stride], mant_true[::stride]) + assert_equal(out_exp[::stride], exp_true[::stride]) + + +# func : [maxulperror, low, high] +avx_ufuncs = {'sqrt' : [1, 0., 100.], # noqa: E203 + 'absolute' : [0, -100., 100.], # noqa: E203 + 'reciprocal' : [1, 1., 100.], # noqa: E203 + 'square' : [1, -100., 100.], # noqa: E203 + 'rint' : [0, -100., 100.], # noqa: E203 + 'floor' : [0, -100., 100.], # noqa: E203 + 'ceil' : [0, -100., 100.], # noqa: E203 + 'trunc' : [0, -100., 100.]} # noqa: E203 + +class TestAVXUfuncs: + def test_avx_based_ufunc(self): + strides = np.array([-4, -3, -2, -1, 1, 2, 3, 4]) + np.random.seed(42) + for func, prop in avx_ufuncs.items(): + maxulperr = prop[0] + minval = prop[1] + maxval = prop[2] + # various array sizes to ensure masking in AVX is tested + for size in range(1, 32): + myfunc = getattr(np, func) + x_f32 = np.random.uniform(low=minval, high=maxval, + size=size).astype(np.float32) + x_f64 = x_f32.astype(np.float64) + x_f128 = x_f32.astype(np.longdouble) + y_true128 = myfunc(x_f128) + if maxulperr == 0: + assert_equal(myfunc(x_f32), y_true128.astype(np.float32)) + assert_equal(myfunc(x_f64), y_true128.astype(np.float64)) + else: + assert_array_max_ulp(myfunc(x_f32), + y_true128.astype(np.float32), + maxulp=maxulperr) + assert_array_max_ulp(myfunc(x_f64), + y_true128.astype(np.float64), + maxulp=maxulperr) + # various strides to test gather instruction + if size > 1: + y_true32 = myfunc(x_f32) + y_true64 = myfunc(x_f64) + for jj in strides: + assert_equal(myfunc(x_f64[::jj]), y_true64[::jj]) + assert_equal(myfunc(x_f32[::jj]), y_true32[::jj]) + +class TestAVXFloat32Transcendental: + def test_exp_float32(self): + np.random.seed(42) + x_f32 = np.float32(np.random.uniform(low=0.0, high=88.1, size=1000000)) + x_f64 = np.float64(x_f32) + assert_array_max_ulp(np.exp(x_f32), np.float32(np.exp(x_f64)), maxulp=3) + + def test_log_float32(self): + np.random.seed(42) + x_f32 = np.float32(np.random.uniform(low=0.0, high=1000, size=1000000)) + x_f64 = np.float64(x_f32) + assert_array_max_ulp(np.log(x_f32), np.float32(np.log(x_f64)), maxulp=4) + + def test_sincos_float32(self): + np.random.seed(42) + N = 1000000 + M = np.int_(N / 20) + index = np.random.randint(low=0, high=N, size=M) + x_f32 = np.float32(np.random.uniform(low=-100., high=100., size=N)) + if not _glibc_older_than("2.17"): + # test coverage for elements > 117435.992f for which glibc is used + # this is known to be problematic on old glibc, so skip it there + x_f32[index] = np.float32(10E+10 * np.random.rand(M)) + x_f64 = np.float64(x_f32) + assert_array_max_ulp(np.sin(x_f32), np.float32(np.sin(x_f64)), maxulp=2) + assert_array_max_ulp(np.cos(x_f32), np.float32(np.cos(x_f64)), maxulp=2) + # test aliasing(issue #17761) + tx_f32 = x_f32.copy() + assert_array_max_ulp(np.sin(x_f32, out=x_f32), np.float32(np.sin(x_f64)), maxulp=2) + assert_array_max_ulp(np.cos(tx_f32, out=tx_f32), np.float32(np.cos(x_f64)), maxulp=2) + + def test_strided_float32(self): + np.random.seed(42) + strides = np.array([-4, -3, -2, -1, 1, 2, 3, 4]) + sizes = np.arange(2, 100) + for ii in sizes: + x_f32 = np.float32(np.random.uniform(low=0.01, high=88.1, size=ii)) + x_f32_large = x_f32.copy() + x_f32_large[3:-1:4] = 120000.0 + exp_true = np.exp(x_f32) + log_true = np.log(x_f32) + sin_true = np.sin(x_f32_large) + cos_true = np.cos(x_f32_large) + for jj in strides: + assert_array_almost_equal_nulp(np.exp(x_f32[::jj]), exp_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.log(x_f32[::jj]), log_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.sin(x_f32_large[::jj]), sin_true[::jj], nulp=2) + assert_array_almost_equal_nulp(np.cos(x_f32_large[::jj]), cos_true[::jj], nulp=2) + +class TestLogAddExp(_FilterInvalids): + def test_logaddexp_values(self): + x = [1, 2, 3, 4, 5] + y = [5, 4, 3, 2, 1] + z = [6, 6, 6, 6, 6] + for dt, dec_ in zip(['f', 'd', 'g'], [6, 15, 15]): + xf = np.log(np.array(x, dtype=dt)) + yf = np.log(np.array(y, dtype=dt)) + zf = np.log(np.array(z, dtype=dt)) + assert_almost_equal(np.logaddexp(xf, yf), zf, decimal=dec_) + + def test_logaddexp_range(self): + x = [1000000, -1000000, 1000200, -1000200] + y = [1000200, -1000200, 1000000, -1000000] + z = [1000200, -1000000, 1000200, -1000000] + for dt in ['f', 'd', 'g']: + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_almost_equal(np.logaddexp(logxf, logyf), logzf) + + def test_inf(self): + inf = np.inf + x = [inf, -inf, inf, -inf, inf, 1, -inf, 1] # noqa: E221 + y = [inf, inf, -inf, -inf, 1, inf, 1, -inf] # noqa: E221 + z = [inf, inf, inf, -inf, inf, inf, 1, 1] + with np.errstate(invalid='raise'): + for dt in ['f', 'd', 'g']: + logxf = np.array(x, dtype=dt) + logyf = np.array(y, dtype=dt) + logzf = np.array(z, dtype=dt) + assert_equal(np.logaddexp(logxf, logyf), logzf) + + def test_nan(self): + assert_(np.isnan(np.logaddexp(np.nan, np.inf))) + assert_(np.isnan(np.logaddexp(np.inf, np.nan))) + assert_(np.isnan(np.logaddexp(np.nan, 0))) + assert_(np.isnan(np.logaddexp(0, np.nan))) + assert_(np.isnan(np.logaddexp(np.nan, np.nan))) + + def test_reduce(self): + assert_equal(np.logaddexp.identity, -np.inf) + assert_equal(np.logaddexp.reduce([]), -np.inf) + + +class TestLog1p: + def test_log1p(self): + assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2)) + assert_almost_equal(ncu.log1p(1e-6), ncu.log(1 + 1e-6)) + + def test_special(self): + with np.errstate(invalid="ignore", divide="ignore"): + assert_equal(ncu.log1p(np.nan), np.nan) + assert_equal(ncu.log1p(np.inf), np.inf) + assert_equal(ncu.log1p(-1.), -np.inf) + assert_equal(ncu.log1p(-2.), np.nan) + assert_equal(ncu.log1p(-np.inf), np.nan) + + +class TestExpm1: + def test_expm1(self): + assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2) - 1) + assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6) - 1) + + def test_special(self): + assert_equal(ncu.expm1(np.inf), np.inf) + assert_equal(ncu.expm1(0.), 0.) + assert_equal(ncu.expm1(-0.), -0.) + assert_equal(ncu.expm1(np.inf), np.inf) + assert_equal(ncu.expm1(-np.inf), -1.) + + def test_complex(self): + x = np.asarray(1e-12) + assert_allclose(x, ncu.expm1(x)) + x = x.astype(np.complex128) + assert_allclose(x, ncu.expm1(x)) + + +class TestHypot: + def test_simple(self): + assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2)) + assert_almost_equal(ncu.hypot(0, 0), 0) + + def test_reduce(self): + assert_almost_equal(ncu.hypot.reduce([3.0, 4.0]), 5.0) + assert_almost_equal(ncu.hypot.reduce([3.0, 4.0, 0]), 5.0) + assert_almost_equal(ncu.hypot.reduce([9.0, 12.0, 20.0]), 25.0) + assert_equal(ncu.hypot.reduce([]), 0.0) + + +def assert_hypot_isnan(x, y): + with np.errstate(invalid='ignore'): + assert_(np.isnan(ncu.hypot(x, y)), + f"hypot({x}, {y}) is {ncu.hypot(x, y)}, not nan") + + +def assert_hypot_isinf(x, y): + with np.errstate(invalid='ignore'): + assert_(np.isinf(ncu.hypot(x, y)), + f"hypot({x}, {y}) is {ncu.hypot(x, y)}, not inf") + + +class TestHypotSpecialValues: + def test_nan_outputs(self): + assert_hypot_isnan(np.nan, np.nan) + assert_hypot_isnan(np.nan, 1) + + def test_nan_outputs2(self): + assert_hypot_isinf(np.nan, np.inf) + assert_hypot_isinf(np.inf, np.nan) + assert_hypot_isinf(np.inf, 0) + assert_hypot_isinf(0, np.inf) + assert_hypot_isinf(np.inf, np.inf) + assert_hypot_isinf(np.inf, 23.0) + + def test_no_fpe(self): + assert_no_warnings(ncu.hypot, np.inf, 0) + + +def assert_arctan2_isnan(x, y): + assert_(np.isnan(ncu.arctan2(x, y)), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not nan") + + +def assert_arctan2_ispinf(x, y): + assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not +inf") + + +def assert_arctan2_isninf(x, y): + assert_((np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not -inf") + + +def assert_arctan2_ispzero(x, y): + assert_((ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not +0") + + +def assert_arctan2_isnzero(x, y): + assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), f"arctan({x}, {y}) is {ncu.arctan2(x, y)}, not -0") + + +class TestArctan2SpecialValues: + def test_one_one(self): + # atan2(1, 1) returns pi/4. + assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi) + assert_almost_equal(ncu.arctan2(-1, 1), -0.25 * np.pi) + assert_almost_equal(ncu.arctan2(1, -1), 0.75 * np.pi) + + def test_zero_nzero(self): + # atan2(+-0, -0) returns +-pi. + assert_almost_equal(ncu.arctan2(ncu.PZERO, ncu.NZERO), np.pi) + assert_almost_equal(ncu.arctan2(ncu.NZERO, ncu.NZERO), -np.pi) + + def test_zero_pzero(self): + # atan2(+-0, +0) returns +-0. + assert_arctan2_ispzero(ncu.PZERO, ncu.PZERO) + assert_arctan2_isnzero(ncu.NZERO, ncu.PZERO) + + def test_zero_negative(self): + # atan2(+-0, x) returns +-pi for x < 0. + assert_almost_equal(ncu.arctan2(ncu.PZERO, -1), np.pi) + assert_almost_equal(ncu.arctan2(ncu.NZERO, -1), -np.pi) + + def test_zero_positive(self): + # atan2(+-0, x) returns +-0 for x > 0. + assert_arctan2_ispzero(ncu.PZERO, 1) + assert_arctan2_isnzero(ncu.NZERO, 1) + + def test_positive_zero(self): + # atan2(y, +-0) returns +pi/2 for y > 0. + assert_almost_equal(ncu.arctan2(1, ncu.PZERO), 0.5 * np.pi) + assert_almost_equal(ncu.arctan2(1, ncu.NZERO), 0.5 * np.pi) + + def test_negative_zero(self): + # atan2(y, +-0) returns -pi/2 for y < 0. + assert_almost_equal(ncu.arctan2(-1, ncu.PZERO), -0.5 * np.pi) + assert_almost_equal(ncu.arctan2(-1, ncu.NZERO), -0.5 * np.pi) + + def test_any_ninf(self): + # atan2(+-y, -infinity) returns +-pi for finite y > 0. + assert_almost_equal(ncu.arctan2(1, -np.inf), np.pi) + assert_almost_equal(ncu.arctan2(-1, -np.inf), -np.pi) + + def test_any_pinf(self): + # atan2(+-y, +infinity) returns +-0 for finite y > 0. + assert_arctan2_ispzero(1, np.inf) + assert_arctan2_isnzero(-1, np.inf) + + def test_inf_any(self): + # atan2(+-infinity, x) returns +-pi/2 for finite x. + assert_almost_equal(ncu.arctan2( np.inf, 1), 0.5 * np.pi) + assert_almost_equal(ncu.arctan2(-np.inf, 1), -0.5 * np.pi) + + def test_inf_ninf(self): + # atan2(+-infinity, -infinity) returns +-3*pi/4. + assert_almost_equal(ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi) + assert_almost_equal(ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi) + + def test_inf_pinf(self): + # atan2(+-infinity, +infinity) returns +-pi/4. + assert_almost_equal(ncu.arctan2( np.inf, np.inf), 0.25 * np.pi) + assert_almost_equal(ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi) + + def test_nan_any(self): + # atan2(nan, x) returns nan for any x, including inf + assert_arctan2_isnan(np.nan, np.inf) + assert_arctan2_isnan(np.inf, np.nan) + assert_arctan2_isnan(np.nan, np.nan) + + +class TestLdexp: + def _check_ldexp(self, tp): + assert_almost_equal(ncu.ldexp(np.array(2., np.float32), + np.array(3, tp)), 16.) + assert_almost_equal(ncu.ldexp(np.array(2., np.float64), + np.array(3, tp)), 16.) + assert_almost_equal(ncu.ldexp(np.array(2., np.longdouble), + np.array(3, tp)), 16.) + + def test_ldexp(self): + # The default Python int type should work + assert_almost_equal(ncu.ldexp(2., 3), 16.) + # The following int types should all be accepted + self._check_ldexp(np.int8) + self._check_ldexp(np.int16) + self._check_ldexp(np.int32) + self._check_ldexp('i') + self._check_ldexp('l') + + def test_ldexp_overflow(self): + # silence warning emitted on overflow + with np.errstate(over="ignore"): + imax = np.iinfo(np.dtype('l')).max + imin = np.iinfo(np.dtype('l')).min + assert_equal(ncu.ldexp(2., imax), np.inf) + assert_equal(ncu.ldexp(2., imin), 0) + + +class TestMaximum(_FilterInvalids): + def test_reduce(self): + dflt = np.typecodes['AllFloat'] + dint = np.typecodes['AllInteger'] + seq1 = np.arange(11) + seq2 = seq1[::-1] + func = np.maximum.reduce + for dt in dint: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 10) + assert_equal(func(tmp2), 10) + for dt in dflt: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 10) + assert_equal(func(tmp2), 10) + tmp1[::2] = np.nan + tmp2[::2] = np.nan + assert_equal(func(tmp1), np.nan) + assert_equal(func(tmp2), np.nan) + + def test_reduce_complex(self): + assert_equal(np.maximum.reduce([1, 2j]), 1) + assert_equal(np.maximum.reduce([1 + 3j, 2j]), 1 + 3j) + + def test_float_nans(self): + nan = np.nan + arg1 = np.array([0, nan, nan]) + arg2 = np.array([nan, 0, nan]) + out = np.array([nan, nan, nan]) + assert_equal(np.maximum(arg1, arg2), out) + + def test_object_nans(self): + # Multiple checks to give this a chance to + # fail if cmp is used instead of rich compare. + # Failure cannot be guaranteed. + for i in range(1): + x = np.array(float('nan'), object) + y = 1.0 + z = np.array(float('nan'), object) + assert_(np.maximum(x, y) == 1.0) + assert_(np.maximum(z, y) == 1.0) + + def test_complex_nans(self): + nan = np.nan + for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: + arg1 = np.array([0, cnan, cnan], dtype=complex) + arg2 = np.array([cnan, 0, cnan], dtype=complex) + out = np.array([nan, nan, nan], dtype=complex) + assert_equal(np.maximum(arg1, arg2), out) + + def test_object_array(self): + arg1 = np.arange(5, dtype=object) + arg2 = arg1 + 1 + assert_equal(np.maximum(arg1, arg2), arg2) + + def test_strided_array(self): + arr1 = np.array([-4.0, 1.0, 10.0, 0.0, np.nan, -np.nan, np.inf, -np.inf]) + arr2 = np.array([-2.0, -1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0]) # noqa: E221 + maxtrue = np.array([-2.0, 1.0, np.nan, 1.0, np.nan, np.nan, np.inf, -3.0]) + out = np.ones(8) + out_maxtrue = np.array([-2.0, 1.0, 1.0, 10.0, 1.0, 1.0, np.nan, 1.0]) + assert_equal(np.maximum(arr1, arr2), maxtrue) + assert_equal(np.maximum(arr1[::2], arr2[::2]), maxtrue[::2]) + assert_equal(np.maximum(arr1[:4:], arr2[::2]), np.array([-2.0, np.nan, 10.0, 1.0])) + assert_equal(np.maximum(arr1[::3], arr2[:3:]), np.array([-2.0, 0.0, np.nan])) + assert_equal(np.maximum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-2.0, 10., np.nan])) + assert_equal(out, out_maxtrue) + + def test_precision(self): + dtypes = [np.float16, np.float32, np.float64, np.longdouble] + + for dt in dtypes: + dtmin = np.finfo(dt).min + dtmax = np.finfo(dt).max + d1 = dt(0.1) + d1_next = np.nextafter(d1, np.inf) + + test_cases = [ + # v1 v2 expected + (dtmin, -np.inf, dtmin), + (dtmax, -np.inf, dtmax), + (d1, d1_next, d1_next), + (dtmax, np.nan, np.nan), + ] + + for v1, v2, expected in test_cases: + assert_equal(np.maximum([v1], [v2]), [expected]) + assert_equal(np.maximum.reduce([v1, v2]), expected) + + +class TestMinimum(_FilterInvalids): + def test_reduce(self): + dflt = np.typecodes['AllFloat'] + dint = np.typecodes['AllInteger'] + seq1 = np.arange(11) + seq2 = seq1[::-1] + func = np.minimum.reduce + for dt in dint: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 0) + assert_equal(func(tmp2), 0) + for dt in dflt: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 0) + assert_equal(func(tmp2), 0) + tmp1[::2] = np.nan + tmp2[::2] = np.nan + assert_equal(func(tmp1), np.nan) + assert_equal(func(tmp2), np.nan) + + def test_reduce_complex(self): + assert_equal(np.minimum.reduce([1, 2j]), 2j) + assert_equal(np.minimum.reduce([1 + 3j, 2j]), 2j) + + def test_float_nans(self): + nan = np.nan + arg1 = np.array([0, nan, nan]) + arg2 = np.array([nan, 0, nan]) + out = np.array([nan, nan, nan]) + assert_equal(np.minimum(arg1, arg2), out) + + def test_object_nans(self): + # Multiple checks to give this a chance to + # fail if cmp is used instead of rich compare. + # Failure cannot be guaranteed. + for i in range(1): + x = np.array(float('nan'), object) + y = 1.0 + z = np.array(float('nan'), object) + assert_(np.minimum(x, y) == 1.0) + assert_(np.minimum(z, y) == 1.0) + + def test_complex_nans(self): + nan = np.nan + for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: + arg1 = np.array([0, cnan, cnan], dtype=complex) + arg2 = np.array([cnan, 0, cnan], dtype=complex) + out = np.array([nan, nan, nan], dtype=complex) + assert_equal(np.minimum(arg1, arg2), out) + + def test_object_array(self): + arg1 = np.arange(5, dtype=object) + arg2 = arg1 + 1 + assert_equal(np.minimum(arg1, arg2), arg1) + + def test_strided_array(self): + arr1 = np.array([-4.0, 1.0, 10.0, 0.0, np.nan, -np.nan, np.inf, -np.inf]) + arr2 = np.array([-2.0, -1.0, np.nan, 1.0, 0.0, np.nan, 1.0, -3.0]) + mintrue = np.array([-4.0, -1.0, np.nan, 0.0, np.nan, np.nan, 1.0, -np.inf]) + out = np.ones(8) + out_mintrue = np.array([-4.0, 1.0, 1.0, 1.0, 1.0, 1.0, np.nan, 1.0]) + assert_equal(np.minimum(arr1, arr2), mintrue) + assert_equal(np.minimum(arr1[::2], arr2[::2]), mintrue[::2]) + assert_equal(np.minimum(arr1[:4:], arr2[::2]), np.array([-4.0, np.nan, 0.0, 0.0])) + assert_equal(np.minimum(arr1[::3], arr2[:3:]), np.array([-4.0, -1.0, np.nan])) + assert_equal(np.minimum(arr1[:6:2], arr2[::3], out=out[::3]), np.array([-4.0, 1.0, np.nan])) + assert_equal(out, out_mintrue) + + def test_precision(self): + dtypes = [np.float16, np.float32, np.float64, np.longdouble] + + for dt in dtypes: + dtmin = np.finfo(dt).min + dtmax = np.finfo(dt).max + d1 = dt(0.1) + d1_next = np.nextafter(d1, np.inf) + + test_cases = [ + # v1 v2 expected + (dtmin, np.inf, dtmin), + (dtmax, np.inf, dtmax), + (d1, d1_next, d1), + (dtmin, np.nan, np.nan), + ] + + for v1, v2, expected in test_cases: + assert_equal(np.minimum([v1], [v2]), [expected]) + assert_equal(np.minimum.reduce([v1, v2]), expected) + + +class TestFmax(_FilterInvalids): + def test_reduce(self): + dflt = np.typecodes['AllFloat'] + dint = np.typecodes['AllInteger'] + seq1 = np.arange(11) + seq2 = seq1[::-1] + func = np.fmax.reduce + for dt in dint: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 10) + assert_equal(func(tmp2), 10) + for dt in dflt: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 10) + assert_equal(func(tmp2), 10) + tmp1[::2] = np.nan + tmp2[::2] = np.nan + assert_equal(func(tmp1), 9) + assert_equal(func(tmp2), 9) + + def test_reduce_complex(self): + assert_equal(np.fmax.reduce([1, 2j]), 1) + assert_equal(np.fmax.reduce([1 + 3j, 2j]), 1 + 3j) + + def test_float_nans(self): + nan = np.nan + arg1 = np.array([0, nan, nan]) + arg2 = np.array([nan, 0, nan]) + out = np.array([0, 0, nan]) + assert_equal(np.fmax(arg1, arg2), out) + + def test_complex_nans(self): + nan = np.nan + for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: + arg1 = np.array([0, cnan, cnan], dtype=complex) + arg2 = np.array([cnan, 0, cnan], dtype=complex) + out = np.array([0, 0, nan], dtype=complex) + assert_equal(np.fmax(arg1, arg2), out) + + def test_precision(self): + dtypes = [np.float16, np.float32, np.float64, np.longdouble] + + for dt in dtypes: + dtmin = np.finfo(dt).min + dtmax = np.finfo(dt).max + d1 = dt(0.1) + d1_next = np.nextafter(d1, np.inf) + + test_cases = [ + # v1 v2 expected + (dtmin, -np.inf, dtmin), + (dtmax, -np.inf, dtmax), + (d1, d1_next, d1_next), + (dtmax, np.nan, dtmax), + ] + + for v1, v2, expected in test_cases: + assert_equal(np.fmax([v1], [v2]), [expected]) + assert_equal(np.fmax.reduce([v1, v2]), expected) + + +class TestFmin(_FilterInvalids): + def test_reduce(self): + dflt = np.typecodes['AllFloat'] + dint = np.typecodes['AllInteger'] + seq1 = np.arange(11) + seq2 = seq1[::-1] + func = np.fmin.reduce + for dt in dint: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 0) + assert_equal(func(tmp2), 0) + for dt in dflt: + tmp1 = seq1.astype(dt) + tmp2 = seq2.astype(dt) + assert_equal(func(tmp1), 0) + assert_equal(func(tmp2), 0) + tmp1[::2] = np.nan + tmp2[::2] = np.nan + assert_equal(func(tmp1), 1) + assert_equal(func(tmp2), 1) + + def test_reduce_complex(self): + assert_equal(np.fmin.reduce([1, 2j]), 2j) + assert_equal(np.fmin.reduce([1 + 3j, 2j]), 2j) + + def test_float_nans(self): + nan = np.nan + arg1 = np.array([0, nan, nan]) + arg2 = np.array([nan, 0, nan]) + out = np.array([0, 0, nan]) + assert_equal(np.fmin(arg1, arg2), out) + + def test_complex_nans(self): + nan = np.nan + for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: + arg1 = np.array([0, cnan, cnan], dtype=complex) + arg2 = np.array([cnan, 0, cnan], dtype=complex) + out = np.array([0, 0, nan], dtype=complex) + assert_equal(np.fmin(arg1, arg2), out) + + def test_precision(self): + dtypes = [np.float16, np.float32, np.float64, np.longdouble] + + for dt in dtypes: + dtmin = np.finfo(dt).min + dtmax = np.finfo(dt).max + d1 = dt(0.1) + d1_next = np.nextafter(d1, np.inf) + + test_cases = [ + # v1 v2 expected + (dtmin, np.inf, dtmin), + (dtmax, np.inf, dtmax), + (d1, d1_next, d1), + (dtmin, np.nan, dtmin), + ] + + for v1, v2, expected in test_cases: + assert_equal(np.fmin([v1], [v2]), [expected]) + assert_equal(np.fmin.reduce([v1, v2]), expected) + + +class TestBool: + def test_exceptions(self): + a = np.ones(1, dtype=np.bool) + assert_raises(TypeError, np.negative, a) + assert_raises(TypeError, np.positive, a) + assert_raises(TypeError, np.subtract, a, a) + + def test_truth_table_logical(self): + # 2, 3 and 4 serves as true values + input1 = [0, 0, 3, 2] + input2 = [0, 4, 0, 2] + + typecodes = (np.typecodes['AllFloat'] + + np.typecodes['AllInteger'] + + '?') # boolean + for dtype in map(np.dtype, typecodes): + arg1 = np.asarray(input1, dtype=dtype) + arg2 = np.asarray(input2, dtype=dtype) + + # OR + out = [False, True, True, True] + for func in (np.logical_or, np.maximum): + assert_equal(func(arg1, arg2).astype(bool), out) + # AND + out = [False, False, False, True] + for func in (np.logical_and, np.minimum): + assert_equal(func(arg1, arg2).astype(bool), out) + # XOR + out = [False, True, True, False] + for func in (np.logical_xor, np.not_equal): + assert_equal(func(arg1, arg2).astype(bool), out) + + def test_truth_table_bitwise(self): + arg1 = [False, False, True, True] + arg2 = [False, True, False, True] + + out = [False, True, True, True] + assert_equal(np.bitwise_or(arg1, arg2), out) + + out = [False, False, False, True] + assert_equal(np.bitwise_and(arg1, arg2), out) + + out = [False, True, True, False] + assert_equal(np.bitwise_xor(arg1, arg2), out) + + def test_reduce(self): + none = np.array([0, 0, 0, 0], bool) + some = np.array([1, 0, 1, 1], bool) + every = np.array([1, 1, 1, 1], bool) + empty = np.array([], bool) + + arrs = [none, some, every, empty] + + for arr in arrs: + assert_equal(np.logical_and.reduce(arr), all(arr)) + + for arr in arrs: + assert_equal(np.logical_or.reduce(arr), any(arr)) + + for arr in arrs: + assert_equal(np.logical_xor.reduce(arr), arr.sum() % 2 == 1) + + +class TestBitwiseUFuncs: + + _all_ints_bits = [ + np.dtype(c).itemsize * 8 for c in np.typecodes["AllInteger"]] + bitwise_types = [ + np.dtype(c) for c in '?' + np.typecodes["AllInteger"] + 'O'] + bitwise_bits = [ + 2, # boolean type + *_all_ints_bits, # All integers + max(_all_ints_bits) + 1, # Object_ type + ] + + def test_values(self): + for dt in self.bitwise_types: + zeros = np.array([0], dtype=dt) + ones = np.array([-1]).astype(dt) + msg = f"dt = '{dt.char}'" + + assert_equal(np.bitwise_not(zeros), ones, err_msg=msg) + assert_equal(np.bitwise_not(ones), zeros, err_msg=msg) + + assert_equal(np.bitwise_or(zeros, zeros), zeros, err_msg=msg) + assert_equal(np.bitwise_or(zeros, ones), ones, err_msg=msg) + assert_equal(np.bitwise_or(ones, zeros), ones, err_msg=msg) + assert_equal(np.bitwise_or(ones, ones), ones, err_msg=msg) + + assert_equal(np.bitwise_xor(zeros, zeros), zeros, err_msg=msg) + assert_equal(np.bitwise_xor(zeros, ones), ones, err_msg=msg) + assert_equal(np.bitwise_xor(ones, zeros), ones, err_msg=msg) + assert_equal(np.bitwise_xor(ones, ones), zeros, err_msg=msg) + + assert_equal(np.bitwise_and(zeros, zeros), zeros, err_msg=msg) + assert_equal(np.bitwise_and(zeros, ones), zeros, err_msg=msg) + assert_equal(np.bitwise_and(ones, zeros), zeros, err_msg=msg) + assert_equal(np.bitwise_and(ones, ones), ones, err_msg=msg) + + def test_types(self): + for dt in self.bitwise_types: + zeros = np.array([0], dtype=dt) + ones = np.array([-1]).astype(dt) + msg = f"dt = '{dt.char}'" + + assert_(np.bitwise_not(zeros).dtype == dt, msg) + assert_(np.bitwise_or(zeros, zeros).dtype == dt, msg) + assert_(np.bitwise_xor(zeros, zeros).dtype == dt, msg) + assert_(np.bitwise_and(zeros, zeros).dtype == dt, msg) + + def test_identity(self): + assert_(np.bitwise_or.identity == 0, 'bitwise_or') + assert_(np.bitwise_xor.identity == 0, 'bitwise_xor') + assert_(np.bitwise_and.identity == -1, 'bitwise_and') + + def test_reduction(self): + binary_funcs = (np.bitwise_or, np.bitwise_xor, np.bitwise_and) + + for dt in self.bitwise_types: + zeros = np.array([0], dtype=dt) + ones = np.array([-1]).astype(dt) + for f in binary_funcs: + msg = f"dt: '{dt}', f: '{f}'" + assert_equal(f.reduce(zeros), zeros, err_msg=msg) + assert_equal(f.reduce(ones), ones, err_msg=msg) + + # Test empty reduction, no object dtype + for dt in self.bitwise_types[:-1]: + # No object array types + empty = np.array([], dtype=dt) + for f in binary_funcs: + msg = f"dt: '{dt}', f: '{f}'" + tgt = np.array(f.identity).astype(dt) + res = f.reduce(empty) + assert_equal(res, tgt, err_msg=msg) + assert_(res.dtype == tgt.dtype, msg) + + # Empty object arrays use the identity. Note that the types may + # differ, the actual type used is determined by the assign_identity + # function and is not the same as the type returned by the identity + # method. + for f in binary_funcs: + msg = f"dt: '{f}'" + empty = np.array([], dtype=object) + tgt = f.identity + res = f.reduce(empty) + assert_equal(res, tgt, err_msg=msg) + + # Non-empty object arrays do not use the identity + for f in binary_funcs: + msg = f"dt: '{f}'" + btype = np.array([True], dtype=object) + assert_(type(f.reduce(btype)) is bool, msg) + + @pytest.mark.parametrize("input_dtype_obj, bitsize", + zip(bitwise_types, bitwise_bits)) + def test_bitwise_count(self, input_dtype_obj, bitsize): + input_dtype = input_dtype_obj.type + + for i in range(1, bitsize): + num = 2**i - 1 + msg = f"bitwise_count for {num}" + assert i == np.bitwise_count(input_dtype(num)), msg + if np.issubdtype( + input_dtype, np.signedinteger) or input_dtype == np.object_: + assert i == np.bitwise_count(input_dtype(-num)), msg + + a = np.array([2**i - 1 for i in range(1, bitsize)], dtype=input_dtype) + bitwise_count_a = np.bitwise_count(a) + expected = np.arange(1, bitsize, dtype=input_dtype) + + msg = f"array bitwise_count for {input_dtype}" + assert all(bitwise_count_a == expected), msg + + +class TestInt: + def test_logical_not(self): + x = np.ones(10, dtype=np.int16) + o = np.ones(10 * 2, dtype=bool) + tgt = o.copy() + tgt[::2] = False + os = o[::2] + assert_array_equal(np.logical_not(x, out=os), False) + assert_array_equal(o, tgt) + + +class TestFloatingPoint: + def test_floating_point(self): + assert_equal(ncu.FLOATING_POINT_SUPPORT, 1) + + +class TestDegrees: + def test_degrees(self): + assert_almost_equal(ncu.degrees(np.pi), 180.0) + assert_almost_equal(ncu.degrees(-0.5 * np.pi), -90.0) + + +class TestRadians: + def test_radians(self): + assert_almost_equal(ncu.radians(180.0), np.pi) + assert_almost_equal(ncu.radians(-90.0), -0.5 * np.pi) + + +class TestHeavside: + def test_heaviside(self): + x = np.array([[-30.0, -0.1, 0.0, 0.2], [7.5, np.nan, np.inf, -np.inf]]) + expectedhalf = np.array([[0.0, 0.0, 0.5, 1.0], [1.0, np.nan, 1.0, 0.0]]) + expected1 = expectedhalf.copy() + expected1[0, 2] = 1 + + h = ncu.heaviside(x, 0.5) + assert_equal(h, expectedhalf) + + h = ncu.heaviside(x, 1.0) + assert_equal(h, expected1) + + x = x.astype(np.float32) + + h = ncu.heaviside(x, np.float32(0.5)) + assert_equal(h, expectedhalf.astype(np.float32)) + + h = ncu.heaviside(x, np.float32(1.0)) + assert_equal(h, expected1.astype(np.float32)) + + +class TestSign: + def test_sign(self): + a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0]) + out = np.zeros(a.shape) + tgt = np.array([1., -1., np.nan, 0.0, 1.0, -1.0]) + + with np.errstate(invalid='ignore'): + res = ncu.sign(a) + assert_equal(res, tgt) + res = ncu.sign(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + + def test_sign_complex(self): + a = np.array([ + np.inf, -np.inf, complex(0, np.inf), complex(0, -np.inf), + complex(np.inf, np.inf), complex(np.inf, -np.inf), # nan + np.nan, complex(0, np.nan), complex(np.nan, np.nan), # nan + 0.0, # 0. + 3.0, -3.0, -2j, 3.0 + 4.0j, -8.0 + 6.0j + ]) + out = np.zeros(a.shape, a.dtype) + tgt = np.array([ + 1., -1., 1j, -1j, + ] + [complex(np.nan, np.nan)] * 5 + [ + 0.0, + 1.0, -1.0, -1j, 0.6 + 0.8j, -0.8 + 0.6j]) + + with np.errstate(invalid='ignore'): + res = ncu.sign(a) + assert_equal(res, tgt) + res = ncu.sign(a, out) + assert_(res is out) + assert_equal(res, tgt) + + def test_sign_dtype_object(self): + # In reference to github issue #6229 + + foo = np.array([-.1, 0, .1]) + a = np.sign(foo.astype(object)) + b = np.sign(foo) + + assert_array_equal(a, b) + + def test_sign_dtype_nan_object(self): + # In reference to github issue #6229 + def test_nan(): + foo = np.array([np.nan]) + # FIXME: a not used + a = np.sign(foo.astype(object)) + + assert_raises(TypeError, test_nan) + +class TestMinMax: + def test_minmax_blocked(self): + # simd tests on max/min, test all alignments, slow but important + # for 2 * vz + 2 * (vs - 1) + 1 (unrolled once) + for dt, sz in [(np.float32, 15), (np.float64, 7)]: + for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary', + max_size=sz): + for i in range(inp.size): + inp[:] = np.arange(inp.size, dtype=dt) + inp[i] = np.nan + emsg = lambda: f'{inp!r}\n{msg}' + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, + "invalid value encountered in reduce") + assert_(np.isnan(inp.max()), msg=emsg) + assert_(np.isnan(inp.min()), msg=emsg) + + inp[i] = 1e10 + assert_equal(inp.max(), 1e10, err_msg=msg) + inp[i] = -1e10 + assert_equal(inp.min(), -1e10, err_msg=msg) + + def test_lower_align(self): + # check data that is not aligned to element size + # i.e doubles are aligned to 4 bytes on i386 + d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + assert_equal(d.max(), d[0]) + assert_equal(d.min(), d[0]) + + def test_reduce_reorder(self): + # gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus + # and put it before the call to an intrinsic function that causes + # invalid status to be set. Also make sure warnings are not emitted + for n in (2, 4, 8, 16, 32): + for dt in (np.float32, np.float16, np.complex64): + for r in np.diagflat(np.array([np.nan] * n, dtype=dt)): + assert_equal(np.min(r), np.nan) + + def test_minimize_no_warns(self): + a = np.minimum(np.nan, 1) + assert_equal(a, np.nan) + + +class TestAbsoluteNegative: + def test_abs_neg_blocked(self): + # simd tests on abs, test all alignments for vz + 2 * (vs - 1) + 1 + for dt, sz in [(np.float32, 11), (np.float64, 5)]: + for out, inp, msg in _gen_alignment_data(dtype=dt, type='unary', + max_size=sz): + tgt = [ncu.absolute(i) for i in inp] + np.absolute(inp, out=out) + assert_equal(out, tgt, err_msg=msg) + assert_((out >= 0).all()) + + tgt = [-1 * (i) for i in inp] + np.negative(inp, out=out) + assert_equal(out, tgt, err_msg=msg) + + for v in [np.nan, -np.inf, np.inf]: + for i in range(inp.size): + d = np.arange(inp.size, dtype=dt) + inp[:] = -d + inp[i] = v + d[i] = -v if v == -np.inf else v + assert_array_equal(np.abs(inp), d, err_msg=msg) + np.abs(inp, out=out) + assert_array_equal(out, d, err_msg=msg) + + assert_array_equal(-inp, -1 * inp, err_msg=msg) + d = -1 * inp + np.negative(inp, out=out) + assert_array_equal(out, d, err_msg=msg) + + def test_lower_align(self): + # check data that is not aligned to element size + # i.e doubles are aligned to 4 bytes on i386 + d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64) + assert_equal(np.abs(d), d) + assert_equal(np.negative(d), -d) + np.negative(d, out=d) + np.negative(np.ones_like(d), out=d) + np.abs(d, out=d) + np.abs(np.ones_like(d), out=d) + + @pytest.mark.parametrize("dtype", ['d', 'f', 'int32', 'int64']) + @pytest.mark.parametrize("big", [True, False]) + def test_noncontiguous(self, dtype, big): + data = np.array([-1.0, 1.0, -0.0, 0.0, 2.2251e-308, -2.5, 2.5, -6, + 6, -2.2251e-308, -8, 10], dtype=dtype) + expect = np.array([1.0, -1.0, 0.0, -0.0, -2.2251e-308, 2.5, -2.5, 6, + -6, 2.2251e-308, 8, -10], dtype=dtype) + if big: + data = np.repeat(data, 10) + expect = np.repeat(expect, 10) + out = np.ndarray(data.shape, dtype=dtype) + ncontig_in = data[1::2] + ncontig_out = out[1::2] + contig_in = np.array(ncontig_in) + # contig in, contig out + assert_array_equal(np.negative(contig_in), expect[1::2]) + # contig in, ncontig out + assert_array_equal(np.negative(contig_in, out=ncontig_out), + expect[1::2]) + # ncontig in, contig out + assert_array_equal(np.negative(ncontig_in), expect[1::2]) + # ncontig in, ncontig out + assert_array_equal(np.negative(ncontig_in, out=ncontig_out), + expect[1::2]) + # contig in, contig out, nd stride + data_split = np.array(np.array_split(data, 2)) + expect_split = np.array(np.array_split(expect, 2)) + assert_equal(np.negative(data_split), expect_split) + + +class TestPositive: + def test_valid(self): + valid_dtypes = [int, float, complex, object] + for dtype in valid_dtypes: + x = np.arange(5, dtype=dtype) + result = np.positive(x) + assert_equal(x, result, err_msg=str(dtype)) + + def test_invalid(self): + with assert_raises(TypeError): + np.positive(True) + with assert_raises(TypeError): + np.positive(np.datetime64('2000-01-01')) + with assert_raises(TypeError): + np.positive(np.array(['foo'], dtype=str)) + with assert_raises(TypeError): + np.positive(np.array(['bar'], dtype=object)) + + +class TestSpecialMethods: + def test_wrap(self): + + class with_wrap: + def __array__(self, dtype=None, copy=None): + return np.zeros(1) + + def __array_wrap__(self, arr, context, return_scalar): + r = with_wrap() + r.arr = arr + r.context = context + return r + + a = with_wrap() + x = ncu.minimum(a, a) + assert_equal(x.arr, np.zeros(1)) + func, args, i = x.context + assert_(func is ncu.minimum) + assert_equal(len(args), 2) + assert_equal(args[0], a) + assert_equal(args[1], a) + assert_equal(i, 0) + + def test_wrap_out(self): + # Calling convention for out should not affect how special methods are + # called + + class StoreArrayPrepareWrap(np.ndarray): + _wrap_args = None + _prepare_args = None + + def __new__(cls): + return np.zeros(()).view(cls) + + def __array_wrap__(self, obj, context, return_scalar): + self._wrap_args = context[1] + return obj + + @property + def args(self): + # We need to ensure these are fetched at the same time, before + # any other ufuncs are called by the assertions + return self._wrap_args + + def __repr__(self): + return "a" # for short test output + + def do_test(f_call, f_expected): + a = StoreArrayPrepareWrap() + + f_call(a) + + w = a.args + expected = f_expected(a) + try: + assert w == expected + except AssertionError as e: + # assert_equal produces truly useless error messages + raise AssertionError("\n".join([ + "Bad arguments passed in ufunc call", + f" expected: {expected}", + f" __array_wrap__ got: {w}" + ])) + + # method not on the out argument + do_test(lambda a: np.add(a, 0), lambda a: (a, 0)) + do_test(lambda a: np.add(a, 0, None), lambda a: (a, 0)) + do_test(lambda a: np.add(a, 0, out=None), lambda a: (a, 0)) + do_test(lambda a: np.add(a, 0, out=(None,)), lambda a: (a, 0)) + + # method on the out argument + do_test(lambda a: np.add(0, 0, a), lambda a: (0, 0, a)) + do_test(lambda a: np.add(0, 0, out=a), lambda a: (0, 0, a)) + do_test(lambda a: np.add(0, 0, out=(a,)), lambda a: (0, 0, a)) + + # Also check the where mask handling: + do_test(lambda a: np.add(a, 0, where=False), lambda a: (a, 0)) + do_test(lambda a: np.add(0, 0, a, where=False), lambda a: (0, 0, a)) + + def test_wrap_with_iterable(self): + # test fix for bug #1026: + + class with_wrap(np.ndarray): + __array_priority__ = 10 + + def __new__(cls): + return np.asarray(1).view(cls).copy() + + def __array_wrap__(self, arr, context, return_scalar): + return arr.view(type(self)) + + a = with_wrap() + x = ncu.multiply(a, (1, 2, 3)) + assert_(isinstance(x, with_wrap)) + assert_array_equal(x, np.array((1, 2, 3))) + + def test_priority_with_scalar(self): + # test fix for bug #826: + + class A(np.ndarray): + __array_priority__ = 10 + + def __new__(cls): + return np.asarray(1.0, 'float64').view(cls).copy() + + a = A() + x = np.float64(1) * a + assert_(isinstance(x, A)) + assert_array_equal(x, np.array(1)) + + def test_priority(self): + + class A: + def __array__(self, dtype=None, copy=None): + return np.zeros(1) + + def __array_wrap__(self, arr, context, return_scalar): + r = type(self)() + r.arr = arr + r.context = context + return r + + class B(A): + __array_priority__ = 20. + + class C(A): + __array_priority__ = 40. + + x = np.zeros(1) + a = A() + b = B() + c = C() + f = ncu.minimum + assert_(type(f(x, x)) is np.ndarray) + assert_(type(f(x, a)) is A) + assert_(type(f(x, b)) is B) + assert_(type(f(x, c)) is C) + assert_(type(f(a, x)) is A) + assert_(type(f(b, x)) is B) + assert_(type(f(c, x)) is C) + + assert_(type(f(a, a)) is A) + assert_(type(f(a, b)) is B) + assert_(type(f(b, a)) is B) + assert_(type(f(b, b)) is B) + assert_(type(f(b, c)) is C) + assert_(type(f(c, b)) is C) + assert_(type(f(c, c)) is C) + + assert_(type(ncu.exp(a) is A)) + assert_(type(ncu.exp(b) is B)) + assert_(type(ncu.exp(c) is C)) + + def test_failing_wrap(self): + + class A: + def __array__(self, dtype=None, copy=None): + return np.zeros(2) + + def __array_wrap__(self, arr, context, return_scalar): + raise RuntimeError + + a = A() + assert_raises(RuntimeError, ncu.maximum, a, a) + assert_raises(RuntimeError, ncu.maximum.reduce, a) + + def test_failing_out_wrap(self): + + singleton = np.array([1.0]) + + class Ok(np.ndarray): + def __array_wrap__(self, obj, context, return_scalar): + return singleton + + class Bad(np.ndarray): + def __array_wrap__(self, obj, context, return_scalar): + raise RuntimeError + + ok = np.empty(1).view(Ok) + bad = np.empty(1).view(Bad) + # double-free (segfault) of "ok" if "bad" raises an exception + for i in range(10): + assert_raises(RuntimeError, ncu.frexp, 1, ok, bad) + + def test_none_wrap(self): + # Tests that issue #8507 is resolved. Previously, this would segfault + + class A: + def __array__(self, dtype=None, copy=None): + return np.zeros(1) + + def __array_wrap__(self, arr, context=None, return_scalar=False): + return None + + a = A() + assert_equal(ncu.maximum(a, a), None) + + def test_default_prepare(self): + + class with_wrap: + __array_priority__ = 10 + + def __array__(self, dtype=None, copy=None): + return np.zeros(1) + + def __array_wrap__(self, arr, context, return_scalar): + return arr + + a = with_wrap() + x = ncu.minimum(a, a) + assert_equal(x, np.zeros(1)) + assert_equal(type(x), np.ndarray) + + def test_array_too_many_args(self): + + class A: + def __array__(self, dtype, context, copy=None): + return np.zeros(1) + + a = A() + assert_raises_regex(TypeError, '2 required positional', np.sum, a) + + def test_ufunc_override(self): + # check override works even with instance with high priority. + class A: + def __array_ufunc__(self, func, method, *inputs, **kwargs): + return self, func, method, inputs, kwargs + + class MyNDArray(np.ndarray): + __array_priority__ = 100 + + a = A() + b = np.array([1]).view(MyNDArray) + res0 = np.multiply(a, b) + res1 = np.multiply(b, b, out=a) + + # self + assert_equal(res0[0], a) + assert_equal(res1[0], a) + assert_equal(res0[1], np.multiply) + assert_equal(res1[1], np.multiply) + assert_equal(res0[2], '__call__') + assert_equal(res1[2], '__call__') + assert_equal(res0[3], (a, b)) + assert_equal(res1[3], (b, b)) + assert_equal(res0[4], {}) + assert_equal(res1[4], {'out': (a,)}) + + def test_ufunc_override_mro(self): + + # Some multi arg functions for testing. + def tres_mul(a, b, c): + return a * b * c + + def quatro_mul(a, b, c, d): + return a * b * c * d + + # Make these into ufuncs. + three_mul_ufunc = np.frompyfunc(tres_mul, 3, 1) + four_mul_ufunc = np.frompyfunc(quatro_mul, 4, 1) + + class A: + def __array_ufunc__(self, func, method, *inputs, **kwargs): + return "A" + + class ASub(A): + def __array_ufunc__(self, func, method, *inputs, **kwargs): + return "ASub" + + class B: + def __array_ufunc__(self, func, method, *inputs, **kwargs): + return "B" + + class C: + def __init__(self): + self.count = 0 + + def __array_ufunc__(self, func, method, *inputs, **kwargs): + self.count += 1 + return NotImplemented + + class CSub(C): + def __array_ufunc__(self, func, method, *inputs, **kwargs): + self.count += 1 + return NotImplemented + + a = A() + a_sub = ASub() + b = B() + c = C() + + # Standard + res = np.multiply(a, a_sub) + assert_equal(res, "ASub") + res = np.multiply(a_sub, b) + assert_equal(res, "ASub") + + # With 1 NotImplemented + res = np.multiply(c, a) + assert_equal(res, "A") + assert_equal(c.count, 1) + # Check our counter works, so we can trust tests below. + res = np.multiply(c, a) + assert_equal(c.count, 2) + + # Both NotImplemented. + c = C() + c_sub = CSub() + assert_raises(TypeError, np.multiply, c, c_sub) + assert_equal(c.count, 1) + assert_equal(c_sub.count, 1) + c.count = c_sub.count = 0 + assert_raises(TypeError, np.multiply, c_sub, c) + assert_equal(c.count, 1) + assert_equal(c_sub.count, 1) + c.count = 0 + assert_raises(TypeError, np.multiply, c, c) + assert_equal(c.count, 1) + c.count = 0 + assert_raises(TypeError, np.multiply, 2, c) + assert_equal(c.count, 1) + + # Ternary testing. + assert_equal(three_mul_ufunc(a, 1, 2), "A") + assert_equal(three_mul_ufunc(1, a, 2), "A") + assert_equal(three_mul_ufunc(1, 2, a), "A") + + assert_equal(three_mul_ufunc(a, a, 6), "A") + assert_equal(three_mul_ufunc(a, 2, a), "A") + assert_equal(three_mul_ufunc(a, 2, b), "A") + assert_equal(three_mul_ufunc(a, 2, a_sub), "ASub") + assert_equal(three_mul_ufunc(a, a_sub, 3), "ASub") + c.count = 0 + assert_equal(three_mul_ufunc(c, a_sub, 3), "ASub") + assert_equal(c.count, 1) + c.count = 0 + assert_equal(three_mul_ufunc(1, a_sub, c), "ASub") + assert_equal(c.count, 0) + + c.count = 0 + assert_equal(three_mul_ufunc(a, b, c), "A") + assert_equal(c.count, 0) + c_sub.count = 0 + assert_equal(three_mul_ufunc(a, b, c_sub), "A") + assert_equal(c_sub.count, 0) + assert_equal(three_mul_ufunc(1, 2, b), "B") + + assert_raises(TypeError, three_mul_ufunc, 1, 2, c) + assert_raises(TypeError, three_mul_ufunc, c_sub, 2, c) + assert_raises(TypeError, three_mul_ufunc, c_sub, 2, 3) + + # Quaternary testing. + assert_equal(four_mul_ufunc(a, 1, 2, 3), "A") + assert_equal(four_mul_ufunc(1, a, 2, 3), "A") + assert_equal(four_mul_ufunc(1, 1, a, 3), "A") + assert_equal(four_mul_ufunc(1, 1, 2, a), "A") + + assert_equal(four_mul_ufunc(a, b, 2, 3), "A") + assert_equal(four_mul_ufunc(1, a, 2, b), "A") + assert_equal(four_mul_ufunc(b, 1, a, 3), "B") + assert_equal(four_mul_ufunc(a_sub, 1, 2, a), "ASub") + assert_equal(four_mul_ufunc(a, 1, 2, a_sub), "ASub") + + c = C() + c_sub = CSub() + assert_raises(TypeError, four_mul_ufunc, 1, 2, 3, c) + assert_equal(c.count, 1) + c.count = 0 + assert_raises(TypeError, four_mul_ufunc, 1, 2, c_sub, c) + assert_equal(c_sub.count, 1) + assert_equal(c.count, 1) + c2 = C() + c.count = c_sub.count = 0 + assert_raises(TypeError, four_mul_ufunc, 1, c, c_sub, c2) + assert_equal(c_sub.count, 1) + assert_equal(c.count, 1) + assert_equal(c2.count, 0) + c.count = c2.count = c_sub.count = 0 + assert_raises(TypeError, four_mul_ufunc, c2, c, c_sub, c) + assert_equal(c_sub.count, 1) + assert_equal(c.count, 0) + assert_equal(c2.count, 1) + + def test_ufunc_override_methods(self): + + class A: + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return self, ufunc, method, inputs, kwargs + + # __call__ + a = A() + with assert_raises(TypeError): + np.multiply.__call__(1, a, foo='bar', answer=42) + res = np.multiply.__call__(1, a, subok='bar', where=42) + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], '__call__') + assert_equal(res[3], (1, a)) + assert_equal(res[4], {'subok': 'bar', 'where': 42}) + + # __call__, wrong args + assert_raises(TypeError, np.multiply, a) + assert_raises(TypeError, np.multiply, a, a, a, a) + assert_raises(TypeError, np.multiply, a, a, sig='a', signature='a') + assert_raises(TypeError, ncu_tests.inner1d, a, a, axis=0, axes=[0, 0]) + + # reduce, positional args + res = np.multiply.reduce(a, 'axis0', 'dtype0', 'out0', 'keep0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduce') + assert_equal(res[3], (a,)) + assert_equal(res[4], {'dtype': 'dtype0', + 'out': ('out0',), + 'keepdims': 'keep0', + 'axis': 'axis0'}) + + # reduce, kwargs + res = np.multiply.reduce(a, axis='axis0', dtype='dtype0', out='out0', + keepdims='keep0', initial='init0', + where='where0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduce') + assert_equal(res[3], (a,)) + assert_equal(res[4], {'dtype': 'dtype0', + 'out': ('out0',), + 'keepdims': 'keep0', + 'axis': 'axis0', + 'initial': 'init0', + 'where': 'where0'}) + + # reduce, output equal to None removed, but not other explicit ones, + # even if they are at their default value. + res = np.multiply.reduce(a, 0, None, None, False) + assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False}) + res = np.multiply.reduce(a, out=None, axis=0, keepdims=True) + assert_equal(res[4], {'axis': 0, 'keepdims': True}) + res = np.multiply.reduce(a, None, out=(None,), dtype=None) + assert_equal(res[4], {'axis': None, 'dtype': None}) + res = np.multiply.reduce(a, 0, None, None, False, 2, True) + assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, + 'initial': 2, 'where': True}) + # np._NoValue ignored for initial + res = np.multiply.reduce(a, 0, None, None, False, + np._NoValue, True) + assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, + 'where': True}) + # None kept for initial, True for where. + res = np.multiply.reduce(a, 0, None, None, False, None, True) + assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, + 'initial': None, 'where': True}) + + # reduce, wrong args + assert_raises(ValueError, np.multiply.reduce, a, out=()) + assert_raises(ValueError, np.multiply.reduce, a, out=('out0', 'out1')) + assert_raises(TypeError, np.multiply.reduce, a, 'axis0', axis='axis0') + + # accumulate, pos args + res = np.multiply.accumulate(a, 'axis0', 'dtype0', 'out0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'accumulate') + assert_equal(res[3], (a,)) + assert_equal(res[4], {'dtype': 'dtype0', + 'out': ('out0',), + 'axis': 'axis0'}) + + # accumulate, kwargs + res = np.multiply.accumulate(a, axis='axis0', dtype='dtype0', + out='out0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'accumulate') + assert_equal(res[3], (a,)) + assert_equal(res[4], {'dtype': 'dtype0', + 'out': ('out0',), + 'axis': 'axis0'}) + + # accumulate, output equal to None removed. + res = np.multiply.accumulate(a, 0, None, None) + assert_equal(res[4], {'axis': 0, 'dtype': None}) + res = np.multiply.accumulate(a, out=None, axis=0, dtype='dtype1') + assert_equal(res[4], {'axis': 0, 'dtype': 'dtype1'}) + res = np.multiply.accumulate(a, None, out=(None,), dtype=None) + assert_equal(res[4], {'axis': None, 'dtype': None}) + + # accumulate, wrong args + assert_raises(ValueError, np.multiply.accumulate, a, out=()) + assert_raises(ValueError, np.multiply.accumulate, a, + out=('out0', 'out1')) + assert_raises(TypeError, np.multiply.accumulate, a, + 'axis0', axis='axis0') + + # reduceat, pos args + res = np.multiply.reduceat(a, [4, 2], 'axis0', 'dtype0', 'out0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduceat') + assert_equal(res[3], (a, [4, 2])) + assert_equal(res[4], {'dtype': 'dtype0', + 'out': ('out0',), + 'axis': 'axis0'}) + + # reduceat, kwargs + res = np.multiply.reduceat(a, [4, 2], axis='axis0', dtype='dtype0', + out='out0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'reduceat') + assert_equal(res[3], (a, [4, 2])) + assert_equal(res[4], {'dtype': 'dtype0', + 'out': ('out0',), + 'axis': 'axis0'}) + + # reduceat, output equal to None removed. + res = np.multiply.reduceat(a, [4, 2], 0, None, None) + assert_equal(res[4], {'axis': 0, 'dtype': None}) + res = np.multiply.reduceat(a, [4, 2], axis=None, out=None, dtype='dt') + assert_equal(res[4], {'axis': None, 'dtype': 'dt'}) + res = np.multiply.reduceat(a, [4, 2], None, None, out=(None,)) + assert_equal(res[4], {'axis': None, 'dtype': None}) + + # reduceat, wrong args + assert_raises(ValueError, np.multiply.reduce, a, [4, 2], out=()) + assert_raises(ValueError, np.multiply.reduce, a, [4, 2], + out=('out0', 'out1')) + assert_raises(TypeError, np.multiply.reduce, a, [4, 2], + 'axis0', axis='axis0') + + # outer + res = np.multiply.outer(a, 42) + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'outer') + assert_equal(res[3], (a, 42)) + assert_equal(res[4], {}) + + # outer, wrong args + assert_raises(TypeError, np.multiply.outer, a) + assert_raises(TypeError, np.multiply.outer, a, a, a, a) + assert_raises(TypeError, np.multiply.outer, a, a, sig='a', signature='a') + + # at + res = np.multiply.at(a, [4, 2], 'b0') + assert_equal(res[0], a) + assert_equal(res[1], np.multiply) + assert_equal(res[2], 'at') + assert_equal(res[3], (a, [4, 2], 'b0')) + + # at, wrong args + assert_raises(TypeError, np.multiply.at, a) + assert_raises(TypeError, np.multiply.at, a, a, a, a) + + def test_ufunc_override_out(self): + + class A: + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return kwargs + + class B: + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return kwargs + + a = A() + b = B() + res0 = np.multiply(a, b, 'out_arg') + res1 = np.multiply(a, b, out='out_arg') + res2 = np.multiply(2, b, 'out_arg') + res3 = np.multiply(3, b, out='out_arg') + res4 = np.multiply(a, 4, 'out_arg') + res5 = np.multiply(a, 5, out='out_arg') + + assert_equal(res0['out'][0], 'out_arg') + assert_equal(res1['out'][0], 'out_arg') + assert_equal(res2['out'][0], 'out_arg') + assert_equal(res3['out'][0], 'out_arg') + assert_equal(res4['out'][0], 'out_arg') + assert_equal(res5['out'][0], 'out_arg') + + # ufuncs with multiple output modf and frexp. + res6 = np.modf(a, 'out0', 'out1') + res7 = np.frexp(a, 'out0', 'out1') + assert_equal(res6['out'][0], 'out0') + assert_equal(res6['out'][1], 'out1') + assert_equal(res7['out'][0], 'out0') + assert_equal(res7['out'][1], 'out1') + + # While we're at it, check that default output is never passed on. + assert_(np.sin(a, None) == {}) + assert_(np.sin(a, out=None) == {}) + assert_(np.sin(a, out=(None,)) == {}) + assert_(np.modf(a, None) == {}) + assert_(np.modf(a, None, None) == {}) + assert_(np.modf(a, out=(None, None)) == {}) + with assert_raises(TypeError): + # Out argument must be tuple, since there are multiple outputs. + np.modf(a, out=None) + + # don't give positional and output argument, or too many arguments. + # wrong number of arguments in the tuple is an error too. + assert_raises(TypeError, np.multiply, a, b, 'one', out='two') + assert_raises(TypeError, np.multiply, a, b, 'one', 'two') + assert_raises(ValueError, np.multiply, a, b, out=('one', 'two')) + assert_raises(TypeError, np.multiply, a, out=()) + assert_raises(TypeError, np.modf, a, 'one', out=('two', 'three')) + assert_raises(TypeError, np.modf, a, 'one', 'two', 'three') + assert_raises(ValueError, np.modf, a, out=('one', 'two', 'three')) + assert_raises(ValueError, np.modf, a, out=('one',)) + + def test_ufunc_override_where(self): + + class OverriddenArrayOld(np.ndarray): + + def _unwrap(self, objs): + cls = type(self) + result = [] + for obj in objs: + if isinstance(obj, cls): + obj = np.array(obj) + elif type(obj) != np.ndarray: + return NotImplemented + result.append(obj) + return result + + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + + inputs = self._unwrap(inputs) + if inputs is NotImplemented: + return NotImplemented + + kwargs = kwargs.copy() + if "out" in kwargs: + kwargs["out"] = self._unwrap(kwargs["out"]) + if kwargs["out"] is NotImplemented: + return NotImplemented + + r = super().__array_ufunc__(ufunc, method, *inputs, **kwargs) + if r is not NotImplemented: + r = r.view(type(self)) + + return r + + class OverriddenArrayNew(OverriddenArrayOld): + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + + kwargs = kwargs.copy() + if "where" in kwargs: + kwargs["where"] = self._unwrap((kwargs["where"], )) + if kwargs["where"] is NotImplemented: + return NotImplemented + else: + kwargs["where"] = kwargs["where"][0] + + r = super().__array_ufunc__(ufunc, method, *inputs, **kwargs) + if r is not NotImplemented: + r = r.view(type(self)) + + return r + + ufunc = np.negative + + array = np.array([1, 2, 3]) + where = np.array([True, False, True]) + expected = ufunc(array, where=where) + + with pytest.raises(TypeError): + ufunc(array, where=where.view(OverriddenArrayOld)) + + result_1 = ufunc( + array, + where=where.view(OverriddenArrayNew) + ) + assert isinstance(result_1, OverriddenArrayNew) + assert np.all(np.array(result_1) == expected, where=where) + + result_2 = ufunc( + array.view(OverriddenArrayNew), + where=where.view(OverriddenArrayNew) + ) + assert isinstance(result_2, OverriddenArrayNew) + assert np.all(np.array(result_2) == expected, where=where) + + def test_ufunc_override_exception(self): + + class A: + def __array_ufunc__(self, *a, **kwargs): + raise ValueError("oops") + + a = A() + assert_raises(ValueError, np.negative, 1, out=a) + assert_raises(ValueError, np.negative, a) + assert_raises(ValueError, np.divide, 1., a) + + def test_ufunc_override_not_implemented(self): + + class A: + def __array_ufunc__(self, *args, **kwargs): + return NotImplemented + + msg = ("operand type(s) all returned NotImplemented from " + "__array_ufunc__(, '__call__', <*>): 'A'") + with assert_raises_regex(TypeError, fnmatch.translate(msg)): + np.negative(A()) + + msg = ("operand type(s) all returned NotImplemented from " + "__array_ufunc__(, '__call__', <*>, , " + "out=(1,)): 'A', 'object', 'int'") + with assert_raises_regex(TypeError, fnmatch.translate(msg)): + np.add(A(), object(), out=1) + + def test_ufunc_override_disabled(self): + + class OptOut: + __array_ufunc__ = None + + opt_out = OptOut() + + # ufuncs always raise + msg = "operand 'OptOut' does not support ufuncs" + with assert_raises_regex(TypeError, msg): + np.add(opt_out, 1) + with assert_raises_regex(TypeError, msg): + np.add(1, opt_out) + with assert_raises_regex(TypeError, msg): + np.negative(opt_out) + + # opt-outs still hold even when other arguments have pathological + # __array_ufunc__ implementations + + class GreedyArray: + def __array_ufunc__(self, *args, **kwargs): + return self + + greedy = GreedyArray() + assert_(np.negative(greedy) is greedy) + with assert_raises_regex(TypeError, msg): + np.add(greedy, opt_out) + with assert_raises_regex(TypeError, msg): + np.add(greedy, 1, out=opt_out) + + def test_gufunc_override(self): + # gufunc are just ufunc instances, but follow a different path, + # so check __array_ufunc__ overrides them properly. + class A: + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return self, ufunc, method, inputs, kwargs + + inner1d = ncu_tests.inner1d + a = A() + res = inner1d(a, a) + assert_equal(res[0], a) + assert_equal(res[1], inner1d) + assert_equal(res[2], '__call__') + assert_equal(res[3], (a, a)) + assert_equal(res[4], {}) + + res = inner1d(1, 1, out=a) + assert_equal(res[0], a) + assert_equal(res[1], inner1d) + assert_equal(res[2], '__call__') + assert_equal(res[3], (1, 1)) + assert_equal(res[4], {'out': (a,)}) + + # wrong number of arguments in the tuple is an error too. + assert_raises(TypeError, inner1d, a, out='two') + assert_raises(TypeError, inner1d, a, a, 'one', out='two') + assert_raises(TypeError, inner1d, a, a, 'one', 'two') + assert_raises(ValueError, inner1d, a, a, out=('one', 'two')) + assert_raises(ValueError, inner1d, a, a, out=()) + + def test_ufunc_override_with_super(self): + # NOTE: this class is used in doc/source/user/basics.subclassing.rst + # if you make any changes here, do update it there too. + class A(np.ndarray): + def __array_ufunc__(self, ufunc, method, *inputs, out=None, **kwargs): + args = [] + in_no = [] + for i, input_ in enumerate(inputs): + if isinstance(input_, A): + in_no.append(i) + args.append(input_.view(np.ndarray)) + else: + args.append(input_) + + outputs = out + out_no = [] + if outputs: + out_args = [] + for j, output in enumerate(outputs): + if isinstance(output, A): + out_no.append(j) + out_args.append(output.view(np.ndarray)) + else: + out_args.append(output) + kwargs['out'] = tuple(out_args) + else: + outputs = (None,) * ufunc.nout + + info = {} + if in_no: + info['inputs'] = in_no + if out_no: + info['outputs'] = out_no + + results = super().__array_ufunc__(ufunc, method, + *args, **kwargs) + if results is NotImplemented: + return NotImplemented + + if method == 'at': + if isinstance(inputs[0], A): + inputs[0].info = info + return + + if ufunc.nout == 1: + results = (results,) + + results = tuple((np.asarray(result).view(A) + if output is None else output) + for result, output in zip(results, outputs)) + if results and isinstance(results[0], A): + results[0].info = info + + return results[0] if len(results) == 1 else results + + class B: + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + if any(isinstance(input_, A) for input_ in inputs): + return "A!" + else: + return NotImplemented + + d = np.arange(5.) + # 1 input, 1 output + a = np.arange(5.).view(A) + b = np.sin(a) + check = np.sin(d) + assert_(np.all(check == b)) + assert_equal(b.info, {'inputs': [0]}) + b = np.sin(d, out=(a,)) + assert_(np.all(check == b)) + assert_equal(b.info, {'outputs': [0]}) + assert_(b is a) + a = np.arange(5.).view(A) + b = np.sin(a, out=a) + assert_(np.all(check == b)) + assert_equal(b.info, {'inputs': [0], 'outputs': [0]}) + + # 1 input, 2 outputs + a = np.arange(5.).view(A) + b1, b2 = np.modf(a) + assert_equal(b1.info, {'inputs': [0]}) + b1, b2 = np.modf(d, out=(None, a)) + assert_(b2 is a) + assert_equal(b1.info, {'outputs': [1]}) + a = np.arange(5.).view(A) + b = np.arange(5.).view(A) + c1, c2 = np.modf(a, out=(a, b)) + assert_(c1 is a) + assert_(c2 is b) + assert_equal(c1.info, {'inputs': [0], 'outputs': [0, 1]}) + + # 2 input, 1 output + a = np.arange(5.).view(A) + b = np.arange(5.).view(A) + c = np.add(a, b, out=a) + assert_(c is a) + assert_equal(c.info, {'inputs': [0, 1], 'outputs': [0]}) + # some tests with a non-ndarray subclass + a = np.arange(5.) + b = B() + assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented) + assert_(b.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented) + assert_raises(TypeError, np.add, a, b) + a = a.view(A) + assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented) + assert_(b.__array_ufunc__(np.add, '__call__', a, b) == "A!") + assert_(np.add(a, b) == "A!") + # regression check for gh-9102 -- tests ufunc.reduce implicitly. + d = np.array([[1, 2, 3], [1, 2, 3]]) + a = d.view(A) + c = a.any() + check = d.any() + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + c = a.max() + check = d.max() + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + b = np.array(0).view(A) + c = a.max(out=b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + check = a.max(axis=0) + b = np.zeros_like(check).view(A) + c = a.max(axis=0, out=b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + # simple explicit tests of reduce, accumulate, reduceat + check = np.add.reduce(d, axis=1) + c = np.add.reduce(a, axis=1) + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + b = np.zeros_like(c) + c = np.add.reduce(a, 1, None, b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + check = np.add.accumulate(d, axis=0) + c = np.add.accumulate(a, axis=0) + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + b = np.zeros_like(c) + c = np.add.accumulate(a, 0, None, b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + indices = [0, 2, 1] + check = np.add.reduceat(d, indices, axis=1) + c = np.add.reduceat(a, indices, axis=1) + assert_equal(c, check) + assert_(c.info, {'inputs': [0]}) + b = np.zeros_like(c) + c = np.add.reduceat(a, indices, 1, None, b) + assert_equal(c, check) + assert_(c is b) + assert_(c.info, {'inputs': [0], 'outputs': [0]}) + # and a few tests for at + d = np.array([[1, 2, 3], [1, 2, 3]]) + check = d.copy() + a = d.copy().view(A) + np.add.at(check, ([0, 1], [0, 2]), 1.) + np.add.at(a, ([0, 1], [0, 2]), 1.) + assert_equal(a, check) + assert_(a.info, {'inputs': [0]}) + b = np.array(1.).view(A) + a = d.copy().view(A) + np.add.at(a, ([0, 1], [0, 2]), b) + assert_equal(a, check) + assert_(a.info, {'inputs': [0, 2]}) + + def test_array_ufunc_direct_call(self): + # This is mainly a regression test for gh-24023 (shouldn't segfault) + a = np.array(1) + with pytest.raises(TypeError): + a.__array_ufunc__() + + # No kwargs means kwargs may be NULL on the C-level + with pytest.raises(TypeError): + a.__array_ufunc__(1, 2) + + # And the same with a valid call: + res = a.__array_ufunc__(np.add, "__call__", a, a) + assert_array_equal(res, a + a) + + def test_ufunc_docstring(self): + original_doc = np.add.__doc__ + new_doc = "new docs" + expected_dict = ( + {} if IS_PYPY else {"__module__": "numpy", "__qualname__": "add"} + ) + + np.add.__doc__ = new_doc + assert np.add.__doc__ == new_doc + assert np.add.__dict__["__doc__"] == new_doc + + del np.add.__doc__ + assert np.add.__doc__ == original_doc + assert np.add.__dict__ == expected_dict + + np.add.__dict__["other"] = 1 + np.add.__dict__["__doc__"] = new_doc + assert np.add.__doc__ == new_doc + + del np.add.__dict__["__doc__"] + assert np.add.__doc__ == original_doc + del np.add.__dict__["other"] + assert np.add.__dict__ == expected_dict + + +class TestChoose: + def test_mixed(self): + c = np.array([True, True]) + a = np.array([True, True]) + assert_equal(np.choose(c, (a, 1)), np.array([1, 1])) + + +class TestRationalFunctions: + def test_lcm(self): + self._test_lcm_inner(np.int16) + self._test_lcm_inner(np.uint16) + + def test_lcm_object(self): + self._test_lcm_inner(np.object_) + + def test_gcd(self): + self._test_gcd_inner(np.int16) + self._test_lcm_inner(np.uint16) + + def test_gcd_object(self): + self._test_gcd_inner(np.object_) + + def _test_lcm_inner(self, dtype): + # basic use + a = np.array([12, 120], dtype=dtype) + b = np.array([20, 200], dtype=dtype) + assert_equal(np.lcm(a, b), [60, 600]) + + if not issubclass(dtype, np.unsignedinteger): + # negatives are ignored + a = np.array([12, -12, 12, -12], dtype=dtype) + b = np.array([20, 20, -20, -20], dtype=dtype) + assert_equal(np.lcm(a, b), [60] * 4) + + # reduce + a = np.array([3, 12, 20], dtype=dtype) + assert_equal(np.lcm.reduce([3, 12, 20]), 60) + + # broadcasting, and a test including 0 + a = np.arange(6).astype(dtype) + b = 20 + assert_equal(np.lcm(a, b), [0, 20, 20, 60, 20, 20]) + + def _test_gcd_inner(self, dtype): + # basic use + a = np.array([12, 120], dtype=dtype) + b = np.array([20, 200], dtype=dtype) + assert_equal(np.gcd(a, b), [4, 40]) + + if not issubclass(dtype, np.unsignedinteger): + # negatives are ignored + a = np.array([12, -12, 12, -12], dtype=dtype) + b = np.array([20, 20, -20, -20], dtype=dtype) + assert_equal(np.gcd(a, b), [4] * 4) + + # reduce + a = np.array([15, 25, 35], dtype=dtype) + assert_equal(np.gcd.reduce(a), 5) + + # broadcasting, and a test including 0 + a = np.arange(6).astype(dtype) + b = 20 + assert_equal(np.gcd(a, b), [20, 1, 2, 1, 4, 5]) + + def test_lcm_overflow(self): + # verify that we don't overflow when a*b does overflow + big = np.int32(np.iinfo(np.int32).max // 11) + a = 2 * big + b = 5 * big + assert_equal(np.lcm(a, b), 10 * big) + + def test_gcd_overflow(self): + for dtype in (np.int32, np.int64): + # verify that we don't overflow when taking abs(x) + # not relevant for lcm, where the result is unrepresentable anyway + a = dtype(np.iinfo(dtype).min) # negative power of two + q = -(a // 4) + assert_equal(np.gcd(a, q * 3), q) + assert_equal(np.gcd(a, -q * 3), q) + + def test_decimal(self): + from decimal import Decimal + a = np.array([1, 1, -1, -1]) * Decimal('0.20') + b = np.array([1, -1, 1, -1]) * Decimal('0.12') + + assert_equal(np.gcd(a, b), 4 * [Decimal('0.04')]) + assert_equal(np.lcm(a, b), 4 * [Decimal('0.60')]) + + def test_float(self): + # not well-defined on float due to rounding errors + assert_raises(TypeError, np.gcd, 0.3, 0.4) + assert_raises(TypeError, np.lcm, 0.3, 0.4) + + def test_huge_integers(self): + # Converting to an array first is a bit different as it means we + # have an explicit object dtype: + assert_equal(np.array(2**200), 2**200) + # Special promotion rules should ensure that this also works for + # two Python integers (even if slow). + # (We do this for comparisons, as the result is always bool and + # we also special case array comparisons with Python integers) + np.equal(2**200, 2**200) + + # But, we cannot do this when it would affect the result dtype: + with pytest.raises(OverflowError): + np.gcd(2**100, 3**100) + + # Asking for `object` explicitly is fine, though: + assert np.gcd(2**100, 3**100, dtype=object) == 1 + + # As of now, the below work, because it is using arrays (which + # will be object arrays) + a = np.array(2**100 * 3**5) + b = np.array([2**100 * 5**7, 2**50 * 3**10]) + assert_equal(np.gcd(a, b), [2**100, 2**50 * 3**5]) + assert_equal(np.lcm(a, b), [2**100 * 3**5 * 5**7, 2**100 * 3**10]) + + def test_inf_and_nan(self): + inf = np.array([np.inf], dtype=np.object_) + assert_raises(ValueError, np.gcd, inf, 1) + assert_raises(ValueError, np.gcd, 1, inf) + assert_raises(ValueError, np.gcd, np.nan, inf) + assert_raises(TypeError, np.gcd, 4, float(np.inf)) + + +class TestRoundingFunctions: + + def test_object_direct(self): + """ test direct implementation of these magic methods """ + class C: + def __floor__(self): + return 1 + + def __ceil__(self): + return 2 + + def __trunc__(self): + return 3 + + arr = np.array([C(), C()]) + assert_equal(np.floor(arr), [1, 1]) + assert_equal(np.ceil(arr), [2, 2]) + assert_equal(np.trunc(arr), [3, 3]) + + def test_object_indirect(self): + """ test implementations via __float__ """ + class C: + def __float__(self): + return -2.5 + + arr = np.array([C(), C()]) + assert_equal(np.floor(arr), [-3, -3]) + assert_equal(np.ceil(arr), [-2, -2]) + with pytest.raises(TypeError): + np.trunc(arr) # consistent with math.trunc + + def test_fraction(self): + f = Fraction(-4, 3) + assert_equal(np.floor(f), -2) + assert_equal(np.ceil(f), -1) + assert_equal(np.trunc(f), -1) + + @pytest.mark.parametrize('func', [np.floor, np.ceil, np.trunc]) + @pytest.mark.parametrize('dtype', [np.bool, np.float64, np.float32, + np.int64, np.uint32]) + def test_output_dtype(self, func, dtype): + arr = np.array([-2, 0, 4, 8]).astype(dtype) + result = func(arr) + assert_equal(arr, result) + assert result.dtype == dtype + + +class TestComplexFunctions: + funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh, + np.arctanh, np.sin, np.cos, np.tan, np.exp, + np.exp2, np.log, np.sqrt, np.log10, np.log2, + np.log1p] + + def test_it(self): + for f in self.funcs: + if f is np.arccosh: + x = 1.5 + else: + x = .5 + fr = f(x) + fz = f(complex(x)) + assert_almost_equal(fz.real, fr, err_msg=f'real part {f}') + assert_almost_equal(fz.imag, 0., err_msg=f'imag part {f}') + + @pytest.mark.xfail(IS_WASM, reason="doesn't work") + def test_precisions_consistent(self): + z = 1 + 1j + for f in self.funcs: + fcf = f(np.csingle(z)) + fcd = f(np.cdouble(z)) + fcl = f(np.clongdouble(z)) + assert_almost_equal(fcf, fcd, decimal=6, err_msg=f'fch-fcd {f}') + assert_almost_equal(fcl, fcd, decimal=15, err_msg=f'fch-fcl {f}') + + @pytest.mark.xfail(IS_WASM, reason="doesn't work") + def test_branch_cuts(self): + # check branch cuts and continuity on them + _check_branch_cut(np.log, -0.5, 1j, 1, -1, True) # noqa: E221 + _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True) # noqa: E221 + _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True) + _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True) + _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True) # noqa: E221 + + _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True) + _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True) + _check_branch_cut(np.arctan, [0 - 2j, 2j], [1, 1], -1, 1, True) + + _check_branch_cut(np.arcsinh, [0 - 2j, 2j], [1, 1], -1, 1, True) + _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True) + _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True) + + # check against bogus branch cuts: assert continuity between quadrants + _check_branch_cut(np.arcsin, [0 - 2j, 2j], [ 1, 1], 1, 1) + _check_branch_cut(np.arccos, [0 - 2j, 2j], [ 1, 1], 1, 1) + _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1) + + _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1) + _check_branch_cut(np.arccosh, [0 - 2j, 2j, 2], [1, 1, 1j], 1, 1) + _check_branch_cut(np.arctanh, [0 - 2j, 2j, 0], [1, 1, 1j], 1, 1) + + @pytest.mark.xfail(IS_WASM, reason="doesn't work") + def test_branch_cuts_complex64(self): + # check branch cuts and continuity on them + _check_branch_cut(np.log, -0.5, 1j, 1, -1, True, np.complex64) # noqa: E221 + _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True, np.complex64) # noqa: E221 + _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True, np.complex64) + _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True, np.complex64) + _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True, np.complex64) # noqa: E221 + + _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) + _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) + _check_branch_cut(np.arctan, [0 - 2j, 2j], [1, 1], -1, 1, True, np.complex64) + + _check_branch_cut(np.arcsinh, [0 - 2j, 2j], [1, 1], -1, 1, True, np.complex64) + _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64) + _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64) + + # check against bogus branch cuts: assert continuity between quadrants + _check_branch_cut(np.arcsin, [0 - 2j, 2j], [ 1, 1], 1, 1, False, np.complex64) + _check_branch_cut(np.arccos, [0 - 2j, 2j], [ 1, 1], 1, 1, False, np.complex64) + _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64) + + _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64) + _check_branch_cut(np.arccosh, [0 - 2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64) + _check_branch_cut(np.arctanh, [0 - 2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64) + + def test_against_cmath(self): + import cmath + + points = [-1 - 1j, -1 + 1j, +1 - 1j, +1 + 1j] + name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan', + 'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'} + atol = 4 * np.finfo(complex).eps + for func in self.funcs: + fname = func.__name__.split('.')[-1] + cname = name_map.get(fname, fname) + try: + cfunc = getattr(cmath, cname) + except AttributeError: + continue + for p in points: + a = complex(func(np.complex128(p))) + b = cfunc(p) + assert_( + abs(a - b) < atol, + f"{fname} {p}: {a}; cmath: {b}" + ) + + @pytest.mark.xfail( + # manylinux2014 uses glibc2.17 + _glibc_older_than("2.18"), + reason="Older glibc versions are imprecise (maybe passes with SIMD?)" + ) + @pytest.mark.xfail(IS_WASM, reason="doesn't work") + @pytest.mark.parametrize('dtype', [ + np.complex64, np.complex128, np.clongdouble + ]) + def test_loss_of_precision(self, dtype): + """Check loss of precision in complex arc* functions""" + if dtype is np.clongdouble and platform.machine() != 'x86_64': + # Failures on musllinux, aarch64, s390x, ppc64le (see gh-17554) + pytest.skip('Only works reliably for x86-64 and recent glibc') + + # Check against known-good functions + + info = np.finfo(dtype) + real_dtype = dtype(0.).real.dtype + eps = info.eps + + def check(x, rtol): + x = x.astype(real_dtype) + + z = x.astype(dtype) + d = np.absolute(np.arcsinh(x) / np.arcsinh(z).real - 1) + assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), + 'arcsinh')) + + z = (1j * x).astype(dtype) + d = np.absolute(np.arcsinh(x) / np.arcsin(z).imag - 1) + assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), + 'arcsin')) + + z = x.astype(dtype) + d = np.absolute(np.arctanh(x) / np.arctanh(z).real - 1) + assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), + 'arctanh')) + + z = (1j * x).astype(dtype) + d = np.absolute(np.arctanh(x) / np.arctan(z).imag - 1) + assert_(np.all(d < rtol), (np.argmax(d), x[np.argmax(d)], d.max(), + 'arctan')) + + # The switchover was chosen as 1e-3; hence there can be up to + # ~eps/1e-3 of relative cancellation error before it + + x_series = np.logspace(-20, -3.001, 200) + x_basic = np.logspace(-2.999, 0, 10, endpoint=False) + + if dtype is np.clongdouble: + if bad_arcsinh(): + pytest.skip("Trig functions of np.clongdouble values known " + "to be inaccurate on aarch64 and PPC for some " + "compilation configurations.") + # It's not guaranteed that the system-provided arc functions + # are accurate down to a few epsilons. (Eg. on Linux 64-bit) + # So, give more leeway for long complex tests here: + check(x_series, 50.0 * eps) + else: + check(x_series, 2.1 * eps) + check(x_basic, 2.0 * eps / 1e-3) + + # Check a few points + + z = np.array([1e-5 * (1 + 1j)], dtype=dtype) + p = 9.999999999333333333e-6 + 1.000000000066666666e-5j + d = np.absolute(1 - np.arctanh(z) / p) + assert_(np.all(d < 1e-15)) + + p = 1.0000000000333333333e-5 + 9.999999999666666667e-6j + d = np.absolute(1 - np.arcsinh(z) / p) + assert_(np.all(d < 1e-15)) + + p = 9.999999999333333333e-6j + 1.000000000066666666e-5 + d = np.absolute(1 - np.arctan(z) / p) + assert_(np.all(d < 1e-15)) + + p = 1.0000000000333333333e-5j + 9.999999999666666667e-6 + d = np.absolute(1 - np.arcsin(z) / p) + assert_(np.all(d < 1e-15)) + + # Check continuity across switchover points + + def check(func, z0, d=1): + z0 = np.asarray(z0, dtype=dtype) + zp = z0 + abs(z0) * d * eps * 2 + zm = z0 - abs(z0) * d * eps * 2 + assert_(np.all(zp != zm), (zp, zm)) + + # NB: the cancellation error at the switchover is at least eps + good = (abs(func(zp) - func(zm)) < 2 * eps) + assert_(np.all(good), (func, z0[~good])) + + for func in (np.arcsinh, np.arcsinh, np.arcsin, np.arctanh, np.arctan): + pts = [rp + 1j * ip for rp in (-1e-3, 0, 1e-3) for ip in (-1e-3, 0, 1e-3) + if rp != 0 or ip != 0] + check(func, pts, 1) + check(func, pts, 1j) + check(func, pts, 1 + 1j) + + @np.errstate(all="ignore") + def test_promotion_corner_cases(self): + for func in self.funcs: + assert func(np.float16(1)).dtype == np.float16 + # Integer to low precision float promotion is a dubious choice: + assert func(np.uint8(1)).dtype == np.float16 + assert func(np.int16(1)).dtype == np.float32 + + +class TestAttributes: + def test_attributes(self): + add = ncu.add + assert_equal(add.__name__, 'add') + assert_(add.ntypes >= 18) # don't fail if types added + assert_('ii->i' in add.types) + assert_equal(add.nin, 2) + assert_equal(add.nout, 1) + assert_equal(add.identity, 0) + + def test_doc(self): + # don't bother checking the long list of kwargs, which are likely to + # change + assert_(ncu.add.__doc__.startswith( + "add(x1, x2, /, out=None, *, where=True")) + assert_(ncu.frexp.__doc__.startswith( + "frexp(x[, out1, out2], / [, out=(None, None)], *, where=True")) + + +class TestSubclass: + + def test_subclass_op(self): + + class simple(np.ndarray): + def __new__(subtype, shape): + self = np.ndarray.__new__(subtype, shape, dtype=object) + self.fill(0) + return self + + a = simple((3, 4)) + assert_equal(a + a, a) + + +class TestFrompyfunc: + + def test_identity(self): + def mul(a, b): + return a * b + + # with identity=value + mul_ufunc = np.frompyfunc(mul, nin=2, nout=1, identity=1) + assert_equal(mul_ufunc.reduce([2, 3, 4]), 24) + assert_equal(mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), 1) + assert_equal(mul_ufunc.reduce([]), 1) + + # with identity=None (reorderable) + mul_ufunc = np.frompyfunc(mul, nin=2, nout=1, identity=None) + assert_equal(mul_ufunc.reduce([2, 3, 4]), 24) + assert_equal(mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1)), 1) + assert_raises(ValueError, lambda: mul_ufunc.reduce([])) + + # with no identity (not reorderable) + mul_ufunc = np.frompyfunc(mul, nin=2, nout=1) + assert_equal(mul_ufunc.reduce([2, 3, 4]), 24) + assert_raises(ValueError, lambda: mul_ufunc.reduce(np.ones((2, 2)), axis=(0, 1))) + assert_raises(ValueError, lambda: mul_ufunc.reduce([])) + + +def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False, + dtype=complex): + """ + Check for a branch cut in a function. + + Assert that `x0` lies on a branch cut of function `f` and `f` is + continuous from the direction `dx`. + + Parameters + ---------- + f : func + Function to check + x0 : array-like + Point on branch cut + dx : array-like + Direction to check continuity in + re_sign, im_sign : {1, -1} + Change of sign of the real or imaginary part expected + sig_zero_ok : bool + Whether to check if the branch cut respects signed zero (if applicable) + dtype : dtype + Dtype to check (should be complex) + + """ + x0 = np.atleast_1d(x0).astype(dtype) + dx = np.atleast_1d(dx).astype(dtype) + + if np.dtype(dtype).char == 'F': + scale = np.finfo(dtype).eps * 1e2 + atol = np.float32(1e-2) + else: + scale = np.finfo(dtype).eps * 1e3 + atol = 1e-4 + + y0 = f(x0) + yp = f(x0 + dx * scale * np.absolute(x0) / np.absolute(dx)) + ym = f(x0 - dx * scale * np.absolute(x0) / np.absolute(dx)) + + assert_(np.all(np.absolute(y0.real - yp.real) < atol), (y0, yp)) + assert_(np.all(np.absolute(y0.imag - yp.imag) < atol), (y0, yp)) + assert_(np.all(np.absolute(y0.real - ym.real * re_sign) < atol), (y0, ym)) + assert_(np.all(np.absolute(y0.imag - ym.imag * im_sign) < atol), (y0, ym)) + + if sig_zero_ok: + # check that signed zeros also work as a displacement + jr = (x0.real == 0) & (dx.real != 0) + ji = (x0.imag == 0) & (dx.imag != 0) + if np.any(jr): + x = x0[jr] + x.real = ncu.NZERO + ym = f(x) + assert_(np.all(np.absolute(y0[jr].real - ym.real * re_sign) < atol), (y0[jr], ym)) + assert_(np.all(np.absolute(y0[jr].imag - ym.imag * im_sign) < atol), (y0[jr], ym)) + + if np.any(ji): + x = x0[ji] + x.imag = ncu.NZERO + ym = f(x) + assert_(np.all(np.absolute(y0[ji].real - ym.real * re_sign) < atol), (y0[ji], ym)) + assert_(np.all(np.absolute(y0[ji].imag - ym.imag * im_sign) < atol), (y0[ji], ym)) + +def test_copysign(): + assert_(np.copysign(1, -1) == -1) + with np.errstate(divide="ignore"): + assert_(1 / np.copysign(0, -1) < 0) + assert_(1 / np.copysign(0, 1) > 0) + assert_(np.signbit(np.copysign(np.nan, -1))) + assert_(not np.signbit(np.copysign(np.nan, 1))) + +def _test_nextafter(t): + one = t(1) + two = t(2) + zero = t(0) + eps = np.finfo(t).eps + assert_(np.nextafter(one, two) - one == eps) + assert_(np.nextafter(one, zero) - one < 0) + assert_(np.isnan(np.nextafter(np.nan, one))) + assert_(np.isnan(np.nextafter(one, np.nan))) + assert_(np.nextafter(one, one) == one) + +def test_nextafter(): + return _test_nextafter(np.float64) + + +def test_nextafterf(): + return _test_nextafter(np.float32) + + +@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), + reason="long double is same as double") +@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"), + reason="IBM double double") +def test_nextafterl(): + return _test_nextafter(np.longdouble) + + +def test_nextafter_0(): + for t, direction in itertools.product(np._core.sctypes['float'], (1, -1)): + # The value of tiny for double double is NaN, so we need to pass the + # assert + with suppress_warnings() as sup: + sup.filter(UserWarning) + if not np.isnan(np.finfo(t).tiny): + tiny = np.finfo(t).tiny + assert_( + 0. < direction * np.nextafter(t(0), t(direction)) < tiny) + assert_equal(np.nextafter(t(0), t(direction)) / t(2.1), direction * 0.0) + +def _test_spacing(t): + one = t(1) + eps = np.finfo(t).eps + nan = t(np.nan) + inf = t(np.inf) + with np.errstate(invalid='ignore'): + assert_equal(np.spacing(one), eps) + assert_(np.isnan(np.spacing(nan))) + assert_(np.isnan(np.spacing(inf))) + assert_(np.isnan(np.spacing(-inf))) + assert_(np.spacing(t(1e30)) != 0) + +def test_spacing(): + return _test_spacing(np.float64) + +def test_spacingf(): + return _test_spacing(np.float32) + + +@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), + reason="long double is same as double") +@pytest.mark.xfail(condition=platform.machine().startswith("ppc64"), + reason="IBM double double") +def test_spacingl(): + return _test_spacing(np.longdouble) + +def test_spacing_gfortran(): + # Reference from this fortran file, built with gfortran 4.3.3 on linux + # 32bits: + # PROGRAM test_spacing + # INTEGER, PARAMETER :: SGL = SELECTED_REAL_KIND(p=6, r=37) + # INTEGER, PARAMETER :: DBL = SELECTED_REAL_KIND(p=13, r=200) + # + # WRITE(*,*) spacing(0.00001_DBL) + # WRITE(*,*) spacing(1.0_DBL) + # WRITE(*,*) spacing(1000._DBL) + # WRITE(*,*) spacing(10500._DBL) + # + # WRITE(*,*) spacing(0.00001_SGL) + # WRITE(*,*) spacing(1.0_SGL) + # WRITE(*,*) spacing(1000._SGL) + # WRITE(*,*) spacing(10500._SGL) + # END PROGRAM + ref = {np.float64: [1.69406589450860068E-021, + 2.22044604925031308E-016, + 1.13686837721616030E-013, + 1.81898940354585648E-012], + np.float32: [9.09494702E-13, + 1.19209290E-07, + 6.10351563E-05, + 9.76562500E-04]} + + for dt, dec_ in zip([np.float32, np.float64], (10, 20)): + x = np.array([1e-5, 1, 1000, 10500], dtype=dt) + assert_array_almost_equal(np.spacing(x), ref[dt], decimal=dec_) + +def test_nextafter_vs_spacing(): + # XXX: spacing does not handle long double yet + for t in [np.float32, np.float64]: + for _f in [1, 1e-5, 1000]: + f = t(_f) + f1 = t(_f + 1) + assert_(np.nextafter(f, f1) - f == np.spacing(f)) + +def test_pos_nan(): + """Check np.nan is a positive nan.""" + assert_(np.signbit(np.nan) == 0) + +def test_reduceat(): + """Test bug in reduceat when structured arrays are not copied.""" + db = np.dtype([('name', 'S11'), ('time', np.int64), ('value', np.float32)]) + a = np.empty([100], dtype=db) + a['name'] = 'Simple' + a['time'] = 10 + a['value'] = 100 + indx = [0, 7, 15, 25] + + h2 = [] + val1 = indx[0] + for val2 in indx[1:]: + h2.append(np.add.reduce(a['value'][val1:val2])) + val1 = val2 + h2.append(np.add.reduce(a['value'][val1:])) + h2 = np.array(h2) + + # test buffered -- this should work + h1 = np.add.reduceat(a['value'], indx) + assert_array_almost_equal(h1, h2) + + # This is when the error occurs. + # test no buffer + np.setbufsize(32) + h1 = np.add.reduceat(a['value'], indx) + np.setbufsize(ncu.UFUNC_BUFSIZE_DEFAULT) + assert_array_almost_equal(h1, h2) + +def test_negative_value_raises(): + with pytest.raises(ValueError, match="buffer size must be non-negative"): + np.setbufsize(-5) + + old = np.getbufsize() + try: + prev = np.setbufsize(4096) + assert prev == old + assert np.getbufsize() == 4096 + finally: + np.setbufsize(old) + +def test_reduceat_empty(): + """Reduceat should work with empty arrays""" + indices = np.array([], 'i4') + x = np.array([], 'f8') + result = np.add.reduceat(x, indices) + assert_equal(result.dtype, x.dtype) + assert_equal(result.shape, (0,)) + # Another case with a slightly different zero-sized shape + x = np.ones((5, 2)) + result = np.add.reduceat(x, [], axis=0) + assert_equal(result.dtype, x.dtype) + assert_equal(result.shape, (0, 2)) + result = np.add.reduceat(x, [], axis=1) + assert_equal(result.dtype, x.dtype) + assert_equal(result.shape, (5, 0)) + +def test_complex_nan_comparisons(): + nans = [complex(np.nan, 0), complex(0, np.nan), complex(np.nan, np.nan)] + fins = [complex(1, 0), complex(-1, 0), complex(0, 1), complex(0, -1), + complex(1, 1), complex(-1, -1), complex(0, 0)] + + with np.errstate(invalid='ignore'): + for x in nans + fins: + x = np.array([x]) + for y in nans + fins: + y = np.array([y]) + + if np.isfinite(x) and np.isfinite(y): + continue + + assert_equal(x < y, False, err_msg=f"{x!r} < {y!r}") + assert_equal(x > y, False, err_msg=f"{x!r} > {y!r}") + assert_equal(x <= y, False, err_msg=f"{x!r} <= {y!r}") + assert_equal(x >= y, False, err_msg=f"{x!r} >= {y!r}") + assert_equal(x == y, False, err_msg=f"{x!r} == {y!r}") + + +def test_rint_big_int(): + # np.rint bug for large integer values on Windows 32-bit and MKL + # https://github.com/numpy/numpy/issues/6685 + val = 4607998452777363968 + # This is exactly representable in floating point + assert_equal(val, int(float(val))) + # Rint should not change the value + assert_equal(val, np.rint(val)) + + +@pytest.mark.parametrize('ftype', [np.float32, np.float64]) +def test_memoverlap_accumulate(ftype): + # Reproduces bug https://github.com/numpy/numpy/issues/15597 + arr = np.array([0.61, 0.60, 0.77, 0.41, 0.19], dtype=ftype) + out_max = np.array([0.61, 0.61, 0.77, 0.77, 0.77], dtype=ftype) + out_min = np.array([0.61, 0.60, 0.60, 0.41, 0.19], dtype=ftype) + assert_equal(np.maximum.accumulate(arr), out_max) + assert_equal(np.minimum.accumulate(arr), out_min) + +@pytest.mark.parametrize("ufunc, dtype", [ + (ufunc, t[0]) + for ufunc in UFUNCS_BINARY_ACC + for t in ufunc.types + if t[-1] == '?' and t[0] not in 'DFGMmO' +]) +def test_memoverlap_accumulate_cmp(ufunc, dtype): + if ufunc.signature: + pytest.skip('For generic signatures only') + for size in (2, 8, 32, 64, 128, 256): + arr = np.array([0, 1, 1] * size, dtype=dtype) + acc = ufunc.accumulate(arr, dtype='?') + acc_u8 = acc.view(np.uint8) + exp = np.array(list(itertools.accumulate(arr, ufunc)), dtype=np.uint8) + assert_equal(exp, acc_u8) + +@pytest.mark.parametrize("ufunc, dtype", [ + (ufunc, t[0]) + for ufunc in UFUNCS_BINARY_ACC + for t in ufunc.types + if t[0] == t[1] and t[0] == t[-1] and t[0] not in 'DFGMmO?' +]) +def test_memoverlap_accumulate_symmetric(ufunc, dtype): + if ufunc.signature: + pytest.skip('For generic signatures only') + with np.errstate(all='ignore'): + for size in (2, 8, 32, 64, 128, 256): + arr = np.array([0, 1, 2] * size).astype(dtype) + acc = ufunc.accumulate(arr, dtype=dtype) + exp = np.array(list(itertools.accumulate(arr, ufunc)), dtype=dtype) + assert_equal(exp, acc) + +def test_signaling_nan_exceptions(): + with assert_no_warnings(): + a = np.ndarray(shape=(), dtype='float32', buffer=b'\x00\xe0\xbf\xff') + np.isnan(a) + +@pytest.mark.parametrize("arr", [ + np.arange(2), + np.matrix([0, 1]), + np.matrix([[0, 1], [2, 5]]), + ]) +def test_outer_subclass_preserve(arr): + # for gh-8661 + class foo(np.ndarray): + pass + actual = np.multiply.outer(arr.view(foo), arr.view(foo)) + assert actual.__class__.__name__ == 'foo' + +def test_outer_bad_subclass(): + class BadArr1(np.ndarray): + def __array_finalize__(self, obj): + # The outer call reshapes to 3 dims, try to do a bad reshape. + if self.ndim == 3: + self.shape = self.shape + (1,) + + class BadArr2(np.ndarray): + def __array_finalize__(self, obj): + if isinstance(obj, BadArr2): + # outer inserts 1-sized dims. In that case disturb them. + if self.shape[-1] == 1: + self.shape = self.shape[::-1] + + for cls in [BadArr1, BadArr2]: + arr = np.ones((2, 3)).view(cls) + with assert_raises(TypeError) as a: + # The first array gets reshaped (not the second one) + np.add.outer(arr, [1, 2]) + + # This actually works, since we only see the reshaping error: + arr = np.ones((2, 3)).view(cls) + assert type(np.add.outer([1, 2], arr)) is cls + +def test_outer_exceeds_maxdims(): + deep = np.ones((1,) * 33) + with assert_raises(ValueError): + np.add.outer(deep, deep) + +def test_bad_legacy_ufunc_silent_errors(): + # legacy ufuncs can't report errors and NumPy can't check if the GIL + # is released. So NumPy has to check after the GIL is released just to + # cover all bases. `np.power` uses/used to use this. + arr = np.arange(3).astype(np.float64) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error(arr, arr) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + # not contiguous means the fast-path cannot be taken + non_contig = arr.repeat(20).reshape(-1, 6)[:, ::2] + ncu_tests.always_error(non_contig, arr) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error.outer(arr, arr) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error.reduce(arr) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error.reduceat(arr, [0, 1]) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error.accumulate(arr) + + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error.at(arr, [0, 1, 2], arr) + + +@pytest.mark.parametrize('x1', [np.arange(3.0), [0.0, 1.0, 2.0]]) +def test_bad_legacy_gufunc_silent_errors(x1): + # Verify that an exception raised in a gufunc loop propagates correctly. + # The signature of always_error_gufunc is '(i),()->()'. + with pytest.raises(RuntimeError, match=r"How unexpected :\)!"): + ncu_tests.always_error_gufunc(x1, 0.0) + + +class TestAddDocstring: + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + @pytest.mark.skipif(IS_PYPY, reason="PyPy does not modify tp_doc") + def test_add_same_docstring(self): + # test for attributes (which are C-level defined) + ncu.add_docstring(np.ndarray.flat, np.ndarray.flat.__doc__) + + # And typical functions: + def func(): + """docstring""" + return + + ncu.add_docstring(func, func.__doc__) + + @pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO") + def test_different_docstring_fails(self): + # test for attributes (which are C-level defined) + with assert_raises(RuntimeError): + ncu.add_docstring(np.ndarray.flat, "different docstring") + + # And typical functions: + def func(): + """docstring""" + return + + with assert_raises(RuntimeError): + ncu.add_docstring(func, "different docstring") + + +class TestAdd_newdoc_ufunc: + @pytest.mark.filterwarnings("ignore:_add_newdoc_ufunc:DeprecationWarning") + def test_ufunc_arg(self): + assert_raises(TypeError, ncu._add_newdoc_ufunc, 2, "blah") + assert_raises(ValueError, ncu._add_newdoc_ufunc, np.add, "blah") + + @pytest.mark.filterwarnings("ignore:_add_newdoc_ufunc:DeprecationWarning") + def test_string_arg(self): + assert_raises(TypeError, ncu._add_newdoc_ufunc, np.add, 3) diff --git a/python/numpy/_core/tests/test_umath_accuracy.py b/python/numpy/_core/tests/test_umath_accuracy.py new file mode 100644 index 000000000..5707e9279 --- /dev/null +++ b/python/numpy/_core/tests/test_umath_accuracy.py @@ -0,0 +1,124 @@ +import os +import sys +from ctypes import POINTER, c_double, c_float, c_int, c_longlong, cast, pointer +from os import path + +import pytest +from numpy._core._multiarray_umath import __cpu_features__ + +import numpy as np +from numpy.testing import assert_array_max_ulp +from numpy.testing._private.utils import _glibc_older_than + +UNARY_UFUNCS = [obj for obj in np._core.umath.__dict__.values() if + isinstance(obj, np.ufunc)] +UNARY_OBJECT_UFUNCS = [uf for uf in UNARY_UFUNCS if "O->O" in uf.types] + +# Remove functions that do not support `floats` +UNARY_OBJECT_UFUNCS.remove(np.invert) +UNARY_OBJECT_UFUNCS.remove(np.bitwise_count) + +IS_AVX = __cpu_features__.get('AVX512F', False) or \ + (__cpu_features__.get('FMA3', False) and __cpu_features__.get('AVX2', False)) + +IS_AVX512FP16 = __cpu_features__.get('AVX512FP16', False) + +# only run on linux with AVX, also avoid old glibc (numpy/numpy#20448). +runtest = (sys.platform.startswith('linux') + and IS_AVX and not _glibc_older_than("2.17")) +platform_skip = pytest.mark.skipif(not runtest, + reason="avoid testing inconsistent platform " + "library implementations") + +# convert string to hex function taken from: +# https://stackoverflow.com/questions/1592158/convert-hex-to-float # +def convert(s, datatype="np.float32"): + i = int(s, 16) # convert from hex to a Python int + if (datatype == "np.float64"): + cp = pointer(c_longlong(i)) # make this into a c long long integer + fp = cast(cp, POINTER(c_double)) # cast the int pointer to a double pointer + else: + cp = pointer(c_int(i)) # make this into a c integer + fp = cast(cp, POINTER(c_float)) # cast the int pointer to a float pointer + + return fp.contents.value # dereference the pointer, get the float + + +str_to_float = np.vectorize(convert) + +class TestAccuracy: + @platform_skip + def test_validate_transcendentals(self): + with np.errstate(all='ignore'): + data_dir = path.join(path.dirname(__file__), 'data') + files = os.listdir(data_dir) + files = list(filter(lambda f: f.endswith('.csv'), files)) + for filename in files: + filepath = path.join(data_dir, filename) + with open(filepath) as fid: + file_without_comments = ( + r for r in fid if r[0] not in ('$', '#') + ) + data = np.genfromtxt(file_without_comments, + dtype=('|S39', '|S39', '|S39', int), + names=('type', 'input', 'output', 'ulperr'), + delimiter=',', + skip_header=1) + npname = path.splitext(filename)[0].split('-')[3] + npfunc = getattr(np, npname) + for datatype in np.unique(data['type']): + data_subset = data[data['type'] == datatype] + inval = np.array(str_to_float(data_subset['input'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype)) + outval = np.array(str_to_float(data_subset['output'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype)) + perm = np.random.permutation(len(inval)) + inval = inval[perm] + outval = outval[perm] + maxulperr = data_subset['ulperr'].max() + assert_array_max_ulp(npfunc(inval), outval, maxulperr) + + @pytest.mark.skipif(IS_AVX512FP16, + reason="SVML FP16 have slightly higher ULP errors") + @pytest.mark.parametrize("ufunc", UNARY_OBJECT_UFUNCS) + def test_validate_fp16_transcendentals(self, ufunc): + with np.errstate(all='ignore'): + arr = np.arange(65536, dtype=np.int16) + datafp16 = np.frombuffer(arr.tobytes(), dtype=np.float16) + datafp32 = datafp16.astype(np.float32) + assert_array_max_ulp(ufunc(datafp16), ufunc(datafp32), + maxulp=1, dtype=np.float16) + + @pytest.mark.skipif(not IS_AVX512FP16, + reason="lower ULP only apply for SVML FP16") + def test_validate_svml_fp16(self): + max_ulp_err = { + "arccos": 2.54, + "arccosh": 2.09, + "arcsin": 3.06, + "arcsinh": 1.51, + "arctan": 2.61, + "arctanh": 1.88, + "cbrt": 1.57, + "cos": 1.43, + "cosh": 1.33, + "exp2": 1.33, + "exp": 1.27, + "expm1": 0.53, + "log": 1.80, + "log10": 1.27, + "log1p": 1.88, + "log2": 1.80, + "sin": 1.88, + "sinh": 2.05, + "tan": 2.26, + "tanh": 3.00, + } + + with np.errstate(all='ignore'): + arr = np.arange(65536, dtype=np.int16) + datafp16 = np.frombuffer(arr.tobytes(), dtype=np.float16) + datafp32 = datafp16.astype(np.float32) + for func in max_ulp_err: + ufunc = getattr(np, func) + ulp = np.ceil(max_ulp_err[func]) + assert_array_max_ulp(ufunc(datafp16), ufunc(datafp32), + maxulp=ulp, dtype=np.float16) diff --git a/python/numpy/_core/tests/test_umath_complex.py b/python/numpy/_core/tests/test_umath_complex.py new file mode 100644 index 000000000..a97af475d --- /dev/null +++ b/python/numpy/_core/tests/test_umath_complex.py @@ -0,0 +1,626 @@ +import platform +import sys + +# import the c-extension module directly since _arg is not exported via umath +import numpy._core._multiarray_umath as ncu +import pytest + +import numpy as np +from numpy.testing import ( + assert_almost_equal, + assert_array_equal, + assert_array_max_ulp, + assert_equal, + assert_raises, +) + +# TODO: branch cuts (use Pauli code) +# TODO: conj 'symmetry' +# TODO: FPU exceptions + +# At least on Windows the results of many complex functions are not conforming +# to the C99 standard. See ticket 1574. +# Ditto for Solaris (ticket 1642) and OS X on PowerPC. +# FIXME: this will probably change when we require full C99 compatibility +with np.errstate(all='ignore'): + functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0) + or (np.log(complex(ncu.NZERO, 0)).imag != np.pi)) +# TODO: replace with a check on whether platform-provided C99 funcs are used +xfail_complex_tests = (not sys.platform.startswith('linux') or functions_seem_flaky) + +# TODO This can be xfail when the generator functions are got rid of. +platform_skip = pytest.mark.skipif(xfail_complex_tests, + reason="Inadequate C99 complex support") + + +class TestCexp: + def test_simple(self): + check = check_complex_value + f = np.exp + + check(f, 1, 0, np.exp(1), 0, False) + check(f, 0, 1, np.cos(1), np.sin(1), False) + + ref = np.exp(1) * complex(np.cos(1), np.sin(1)) + check(f, 1, 1, ref.real, ref.imag, False) + + @platform_skip + def test_special_values(self): + # C99: Section G 6.3.1 + + check = check_complex_value + f = np.exp + + # cexp(+-0 + 0i) is 1 + 0i + check(f, ncu.PZERO, 0, 1, 0, False) + check(f, ncu.NZERO, 0, 1, 0, False) + + # cexp(x + infi) is nan + nani for finite x and raises 'invalid' FPU + # exception + check(f, 1, np.inf, np.nan, np.nan) + check(f, -1, np.inf, np.nan, np.nan) + check(f, 0, np.inf, np.nan, np.nan) + + # cexp(inf + 0i) is inf + 0i + check(f, np.inf, 0, np.inf, 0) + + # cexp(-inf + yi) is +0 * (cos(y) + i sin(y)) for finite y + check(f, -np.inf, 1, ncu.PZERO, ncu.PZERO) + check(f, -np.inf, 0.75 * np.pi, ncu.NZERO, ncu.PZERO) + + # cexp(inf + yi) is +inf * (cos(y) + i sin(y)) for finite y + check(f, np.inf, 1, np.inf, np.inf) + check(f, np.inf, 0.75 * np.pi, -np.inf, np.inf) + + # cexp(-inf + inf i) is +-0 +- 0i (signs unspecified) + def _check_ninf_inf(dummy): + msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)" + with np.errstate(invalid='ignore'): + z = f(np.array(complex(-np.inf, np.inf))) + if z.real != 0 or z.imag != 0: + raise AssertionError(msgform % (z.real, z.imag)) + + _check_ninf_inf(None) + + # cexp(inf + inf i) is +-inf + NaNi and raised invalid FPU ex. + def _check_inf_inf(dummy): + msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)" + with np.errstate(invalid='ignore'): + z = f(np.array(complex(np.inf, np.inf))) + if not np.isinf(z.real) or not np.isnan(z.imag): + raise AssertionError(msgform % (z.real, z.imag)) + + _check_inf_inf(None) + + # cexp(-inf + nan i) is +-0 +- 0i + def _check_ninf_nan(dummy): + msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)" + with np.errstate(invalid='ignore'): + z = f(np.array(complex(-np.inf, np.nan))) + if z.real != 0 or z.imag != 0: + raise AssertionError(msgform % (z.real, z.imag)) + + _check_ninf_nan(None) + + # cexp(inf + nan i) is +-inf + nan + def _check_inf_nan(dummy): + msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)" + with np.errstate(invalid='ignore'): + z = f(np.array(complex(np.inf, np.nan))) + if not np.isinf(z.real) or not np.isnan(z.imag): + raise AssertionError(msgform % (z.real, z.imag)) + + _check_inf_nan(None) + + # cexp(nan + yi) is nan + nani for y != 0 (optional: raises invalid FPU + # ex) + check(f, np.nan, 1, np.nan, np.nan) + check(f, np.nan, -1, np.nan, np.nan) + + check(f, np.nan, np.inf, np.nan, np.nan) + check(f, np.nan, -np.inf, np.nan, np.nan) + + # cexp(nan + nani) is nan + nani + check(f, np.nan, np.nan, np.nan, np.nan) + + # TODO This can be xfail when the generator functions are got rid of. + @pytest.mark.skip(reason="cexp(nan + 0I) is wrong on most platforms") + def test_special_values2(self): + # XXX: most implementations get it wrong here (including glibc <= 2.10) + # cexp(nan + 0i) is nan + 0i + check = check_complex_value + f = np.exp + + check(f, np.nan, 0, np.nan, 0) + +class TestClog: + def test_simple(self): + x = np.array([1 + 0j, 1 + 2j]) + y_r = np.log(np.abs(x)) + 1j * np.angle(x) + y = np.log(x) + assert_almost_equal(y, y_r) + + @platform_skip + @pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.") + def test_special_values(self): + xl = [] + yl = [] + + # From C99 std (Sec 6.3.2) + # XXX: check exceptions raised + # --- raise for invalid fails. + + # clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero' + # floating-point exception. + with np.errstate(divide='raise'): + x = np.array([ncu.NZERO], dtype=complex) + y = complex(-np.inf, np.pi) + assert_raises(FloatingPointError, np.log, x) + with np.errstate(divide='ignore'): + assert_almost_equal(np.log(x), y) + + xl.append(x) + yl.append(y) + + # clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero' + # floating-point exception. + with np.errstate(divide='raise'): + x = np.array([0], dtype=complex) + y = complex(-np.inf, 0) + assert_raises(FloatingPointError, np.log, x) + with np.errstate(divide='ignore'): + assert_almost_equal(np.log(x), y) + + xl.append(x) + yl.append(y) + + # clog(x + i inf returns +inf + i pi /2, for finite x. + x = np.array([complex(1, np.inf)], dtype=complex) + y = complex(np.inf, 0.5 * np.pi) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + x = np.array([complex(-1, np.inf)], dtype=complex) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(x + iNaN) returns NaN + iNaN and optionally raises the + # 'invalid' floating- point exception, for finite x. + with np.errstate(invalid='raise'): + x = np.array([complex(1., np.nan)], dtype=complex) + y = complex(np.nan, np.nan) + #assert_raises(FloatingPointError, np.log, x) + with np.errstate(invalid='ignore'): + assert_almost_equal(np.log(x), y) + + xl.append(x) + yl.append(y) + + with np.errstate(invalid='raise'): + x = np.array([np.inf + 1j * np.nan], dtype=complex) + #assert_raises(FloatingPointError, np.log, x) + with np.errstate(invalid='ignore'): + assert_almost_equal(np.log(x), y) + + xl.append(x) + yl.append(y) + + # clog(- inf + iy) returns +inf + ipi , for finite positive-signed y. + x = np.array([-np.inf + 1j], dtype=complex) + y = complex(np.inf, np.pi) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(+ inf + iy) returns +inf + i0, for finite positive-signed y. + x = np.array([np.inf + 1j], dtype=complex) + y = complex(np.inf, 0) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(- inf + i inf) returns +inf + i3pi /4. + x = np.array([complex(-np.inf, np.inf)], dtype=complex) + y = complex(np.inf, 0.75 * np.pi) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(+ inf + i inf) returns +inf + ipi /4. + x = np.array([complex(np.inf, np.inf)], dtype=complex) + y = complex(np.inf, 0.25 * np.pi) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(+/- inf + iNaN) returns +inf + iNaN. + x = np.array([complex(np.inf, np.nan)], dtype=complex) + y = complex(np.inf, np.nan) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + x = np.array([complex(-np.inf, np.nan)], dtype=complex) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(NaN + iy) returns NaN + iNaN and optionally raises the + # 'invalid' floating-point exception, for finite y. + x = np.array([complex(np.nan, 1)], dtype=complex) + y = complex(np.nan, np.nan) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(NaN + i inf) returns +inf + iNaN. + x = np.array([complex(np.nan, np.inf)], dtype=complex) + y = complex(np.inf, np.nan) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(NaN + iNaN) returns NaN + iNaN. + x = np.array([complex(np.nan, np.nan)], dtype=complex) + y = complex(np.nan, np.nan) + assert_almost_equal(np.log(x), y) + xl.append(x) + yl.append(y) + + # clog(conj(z)) = conj(clog(z)). + xa = np.array(xl, dtype=complex) + ya = np.array(yl, dtype=complex) + with np.errstate(divide='ignore'): + for i in range(len(xa)): + assert_almost_equal(np.log(xa[i].conj()), ya[i].conj()) + + +class TestCsqrt: + + def test_simple(self): + # sqrt(1) + check_complex_value(np.sqrt, 1, 0, 1, 0) + + # sqrt(1i) + rres = 0.5 * np.sqrt(2) + ires = rres + check_complex_value(np.sqrt, 0, 1, rres, ires, False) + + # sqrt(-1) + check_complex_value(np.sqrt, -1, 0, 0, 1) + + def test_simple_conjugate(self): + ref = np.conj(np.sqrt(complex(1, 1))) + + def f(z): + return np.sqrt(np.conj(z)) + + check_complex_value(f, 1, 1, ref.real, ref.imag, False) + + #def test_branch_cut(self): + # _check_branch_cut(f, -1, 0, 1, -1) + + @platform_skip + def test_special_values(self): + # C99: Sec G 6.4.2 + + check = check_complex_value + f = np.sqrt + + # csqrt(+-0 + 0i) is 0 + 0i + check(f, ncu.PZERO, 0, 0, 0) + check(f, ncu.NZERO, 0, 0, 0) + + # csqrt(x + infi) is inf + infi for any x (including NaN) + check(f, 1, np.inf, np.inf, np.inf) + check(f, -1, np.inf, np.inf, np.inf) + + check(f, ncu.PZERO, np.inf, np.inf, np.inf) + check(f, ncu.NZERO, np.inf, np.inf, np.inf) + check(f, np.inf, np.inf, np.inf, np.inf) + check(f, -np.inf, np.inf, np.inf, np.inf) # noqa: E221 + check(f, -np.nan, np.inf, np.inf, np.inf) # noqa: E221 + + # csqrt(x + nani) is nan + nani for any finite x + check(f, 1, np.nan, np.nan, np.nan) + check(f, -1, np.nan, np.nan, np.nan) + check(f, 0, np.nan, np.nan, np.nan) + + # csqrt(-inf + yi) is +0 + infi for any finite y > 0 + check(f, -np.inf, 1, ncu.PZERO, np.inf) + + # csqrt(inf + yi) is +inf + 0i for any finite y > 0 + check(f, np.inf, 1, np.inf, ncu.PZERO) + + # csqrt(-inf + nani) is nan +- infi (both +i infi are valid) + def _check_ninf_nan(dummy): + msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)" + z = np.sqrt(np.array(complex(-np.inf, np.nan))) + # FIXME: ugly workaround for isinf bug. + with np.errstate(invalid='ignore'): + if not (np.isnan(z.real) and np.isinf(z.imag)): + raise AssertionError(msgform % (z.real, z.imag)) + + _check_ninf_nan(None) + + # csqrt(+inf + nani) is inf + nani + check(f, np.inf, np.nan, np.inf, np.nan) + + # csqrt(nan + yi) is nan + nani for any finite y (infinite handled in x + # + nani) + check(f, np.nan, 0, np.nan, np.nan) + check(f, np.nan, 1, np.nan, np.nan) + check(f, np.nan, np.nan, np.nan, np.nan) + + # XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch + # cuts first) + +class TestCpow: + def setup_method(self): + self.olderr = np.seterr(invalid='ignore') + + def teardown_method(self): + np.seterr(**self.olderr) + + def test_simple(self): + x = np.array([1 + 1j, 0 + 2j, 1 + 2j, np.inf, np.nan]) + y_r = x ** 2 + y = np.power(x, 2) + assert_almost_equal(y, y_r) + + def test_scalar(self): + x = np.array([1, 1j, 2, 2.5 + .37j, np.inf, np.nan]) + y = np.array([1, 1j, -0.5 + 1.5j, -0.5 + 1.5j, 2, 3]) + lx = list(range(len(x))) + + # Hardcode the expected `builtins.complex` values, + # as complex exponentiation is broken as of bpo-44698 + p_r = [ + 1 + 0j, + 0.20787957635076193 + 0j, + 0.35812203996480685 + 0.6097119028618724j, + 0.12659112128185032 + 0.48847676699581527j, + complex(np.inf, np.nan), + complex(np.nan, np.nan), + ] + + n_r = [x[i] ** y[i] for i in lx] + for i in lx: + assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) + + def test_array(self): + x = np.array([1, 1j, 2, 2.5 + .37j, np.inf, np.nan]) + y = np.array([1, 1j, -0.5 + 1.5j, -0.5 + 1.5j, 2, 3]) + lx = list(range(len(x))) + + # Hardcode the expected `builtins.complex` values, + # as complex exponentiation is broken as of bpo-44698 + p_r = [ + 1 + 0j, + 0.20787957635076193 + 0j, + 0.35812203996480685 + 0.6097119028618724j, + 0.12659112128185032 + 0.48847676699581527j, + complex(np.inf, np.nan), + complex(np.nan, np.nan), + ] + + n_r = x ** y + for i in lx: + assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i) + +class TestCabs: + def setup_method(self): + self.olderr = np.seterr(invalid='ignore') + + def teardown_method(self): + np.seterr(**self.olderr) + + def test_simple(self): + x = np.array([1 + 1j, 0 + 2j, 1 + 2j, np.inf, np.nan]) + y_r = np.array([np.sqrt(2.), 2, np.sqrt(5), np.inf, np.nan]) + y = np.abs(x) + assert_almost_equal(y, y_r) + + def test_fabs(self): + # Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs) + x = np.array([1 + 0j], dtype=complex) + assert_array_equal(np.abs(x), np.real(x)) + + x = np.array([complex(1, ncu.NZERO)], dtype=complex) + assert_array_equal(np.abs(x), np.real(x)) + + x = np.array([complex(np.inf, ncu.NZERO)], dtype=complex) + assert_array_equal(np.abs(x), np.real(x)) + + x = np.array([complex(np.nan, ncu.NZERO)], dtype=complex) + assert_array_equal(np.abs(x), np.real(x)) + + def test_cabs_inf_nan(self): + x, y = [], [] + + # cabs(+-nan + nani) returns nan + x.append(np.nan) + y.append(np.nan) + check_real_value(np.abs, np.nan, np.nan, np.nan) + + x.append(np.nan) + y.append(-np.nan) + check_real_value(np.abs, -np.nan, np.nan, np.nan) + + # According to C99 standard, if exactly one of the real/part is inf and + # the other nan, then cabs should return inf + x.append(np.inf) + y.append(np.nan) + check_real_value(np.abs, np.inf, np.nan, np.inf) + + x.append(-np.inf) + y.append(np.nan) + check_real_value(np.abs, -np.inf, np.nan, np.inf) + + # cabs(conj(z)) == conj(cabs(z)) (= cabs(z)) + def f(a): + return np.abs(np.conj(a)) + + def g(a, b): + return np.abs(complex(a, b)) + + xa = np.array(x, dtype=complex) + assert len(xa) == len(x) == len(y) + for xi, yi in zip(x, y): + ref = g(xi, yi) + check_real_value(f, xi, yi, ref) + +class TestCarg: + def test_simple(self): + check_real_value(ncu._arg, 1, 0, 0, False) + check_real_value(ncu._arg, 0, 1, 0.5 * np.pi, False) + + check_real_value(ncu._arg, 1, 1, 0.25 * np.pi, False) + check_real_value(ncu._arg, ncu.PZERO, ncu.PZERO, ncu.PZERO) + + # TODO This can be xfail when the generator functions are got rid of. + @pytest.mark.skip( + reason="Complex arithmetic with signed zero fails on most platforms") + def test_zero(self): + # carg(-0 +- 0i) returns +- pi + check_real_value(ncu._arg, ncu.NZERO, ncu.PZERO, np.pi, False) + check_real_value(ncu._arg, ncu.NZERO, ncu.NZERO, -np.pi, False) + + # carg(+0 +- 0i) returns +- 0 + check_real_value(ncu._arg, ncu.PZERO, ncu.PZERO, ncu.PZERO) + check_real_value(ncu._arg, ncu.PZERO, ncu.NZERO, ncu.NZERO) + + # carg(x +- 0i) returns +- 0 for x > 0 + check_real_value(ncu._arg, 1, ncu.PZERO, ncu.PZERO, False) + check_real_value(ncu._arg, 1, ncu.NZERO, ncu.NZERO, False) + + # carg(x +- 0i) returns +- pi for x < 0 + check_real_value(ncu._arg, -1, ncu.PZERO, np.pi, False) + check_real_value(ncu._arg, -1, ncu.NZERO, -np.pi, False) + + # carg(+- 0 + yi) returns pi/2 for y > 0 + check_real_value(ncu._arg, ncu.PZERO, 1, 0.5 * np.pi, False) + check_real_value(ncu._arg, ncu.NZERO, 1, 0.5 * np.pi, False) + + # carg(+- 0 + yi) returns -pi/2 for y < 0 + check_real_value(ncu._arg, ncu.PZERO, -1, 0.5 * np.pi, False) + check_real_value(ncu._arg, ncu.NZERO, -1, -0.5 * np.pi, False) + + #def test_branch_cuts(self): + # _check_branch_cut(ncu._arg, -1, 1j, -1, 1) + + def test_special_values(self): + # carg(-np.inf +- yi) returns +-pi for finite y > 0 + check_real_value(ncu._arg, -np.inf, 1, np.pi, False) + check_real_value(ncu._arg, -np.inf, -1, -np.pi, False) + + # carg(np.inf +- yi) returns +-0 for finite y > 0 + check_real_value(ncu._arg, np.inf, 1, ncu.PZERO, False) + check_real_value(ncu._arg, np.inf, -1, ncu.NZERO, False) + + # carg(x +- np.infi) returns +-pi/2 for finite x + check_real_value(ncu._arg, 1, np.inf, 0.5 * np.pi, False) + check_real_value(ncu._arg, 1, -np.inf, -0.5 * np.pi, False) + + # carg(-np.inf +- np.infi) returns +-3pi/4 + check_real_value(ncu._arg, -np.inf, np.inf, 0.75 * np.pi, False) + check_real_value(ncu._arg, -np.inf, -np.inf, -0.75 * np.pi, False) + + # carg(np.inf +- np.infi) returns +-pi/4 + check_real_value(ncu._arg, np.inf, np.inf, 0.25 * np.pi, False) + check_real_value(ncu._arg, np.inf, -np.inf, -0.25 * np.pi, False) + + # carg(x + yi) returns np.nan if x or y is nan + check_real_value(ncu._arg, np.nan, 0, np.nan, False) + check_real_value(ncu._arg, 0, np.nan, np.nan, False) + + check_real_value(ncu._arg, np.nan, np.inf, np.nan, False) + check_real_value(ncu._arg, np.inf, np.nan, np.nan, False) + + +def check_real_value(f, x1, y1, x, exact=True): + z1 = np.array([complex(x1, y1)]) + if exact: + assert_equal(f(z1), x) + else: + assert_almost_equal(f(z1), x) + + +def check_complex_value(f, x1, y1, x2, y2, exact=True): + z1 = np.array([complex(x1, y1)]) + z2 = complex(x2, y2) + with np.errstate(invalid='ignore'): + if exact: + assert_equal(f(z1), z2) + else: + assert_almost_equal(f(z1), z2) + +class TestSpecialComplexAVX: + @pytest.mark.parametrize("stride", [-4, -2, -1, 1, 2, 4]) + @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) + def test_array(self, stride, astype): + arr = np.array([complex(np.nan, np.nan), + complex(np.nan, np.inf), + complex(np.inf, np.nan), + complex(np.inf, np.inf), + complex(0., np.inf), + complex(np.inf, 0.), + complex(0., 0.), + complex(0., np.nan), + complex(np.nan, 0.)], dtype=astype) + abs_true = np.array([np.nan, np.inf, np.inf, np.inf, np.inf, np.inf, 0., np.nan, np.nan], dtype=arr.real.dtype) + sq_true = np.array([complex(np.nan, np.nan), + complex(np.nan, np.nan), + complex(np.nan, np.nan), + complex(np.nan, np.inf), + complex(-np.inf, np.nan), + complex(np.inf, np.nan), + complex(0., 0.), + complex(np.nan, np.nan), + complex(np.nan, np.nan)], dtype=astype) + with np.errstate(invalid='ignore'): + assert_equal(np.abs(arr[::stride]), abs_true[::stride]) + assert_equal(np.square(arr[::stride]), sq_true[::stride]) + +class TestComplexAbsoluteAVX: + @pytest.mark.parametrize("arraysize", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 18, 19]) + @pytest.mark.parametrize("stride", [-4, -3, -2, -1, 1, 2, 3, 4]) + @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) + # test to ensure masking and strides work as intended in the AVX implementation + def test_array(self, arraysize, stride, astype): + arr = np.ones(arraysize, dtype=astype) + abs_true = np.ones(arraysize, dtype=arr.real.dtype) + assert_equal(np.abs(arr[::stride]), abs_true[::stride]) + +# Testcase taken as is from https://github.com/numpy/numpy/issues/16660 +class TestComplexAbsoluteMixedDTypes: + @pytest.mark.parametrize("stride", [-4, -3, -2, -1, 1, 2, 3, 4]) + @pytest.mark.parametrize("astype", [np.complex64, np.complex128]) + @pytest.mark.parametrize("func", ['abs', 'square', 'conjugate']) + def test_array(self, stride, astype, func): + dtype = [('template_id', 'U') + uni_arr2 = str_arr.astype(').itemsize` instead.", + "byte_bounds": "Now it's available under `np.lib.array_utils.byte_bounds`", + "compare_chararrays": + "It's still available as `np.char.compare_chararrays`.", + "format_parser": "It's still available as `np.rec.format_parser`.", + "alltrue": "Use `np.all` instead.", + "sometrue": "Use `np.any` instead.", +} diff --git a/python/numpy/_expired_attrs_2_0.pyi b/python/numpy/_expired_attrs_2_0.pyi new file mode 100644 index 000000000..14524689c --- /dev/null +++ b/python/numpy/_expired_attrs_2_0.pyi @@ -0,0 +1,62 @@ +from typing import Final, TypedDict, final, type_check_only + +@final +@type_check_only +class _ExpiredAttributesType(TypedDict): + geterrobj: str + seterrobj: str + cast: str + source: str + lookfor: str + who: str + fastCopyAndTranspose: str + set_numeric_ops: str + NINF: str + PINF: str + NZERO: str + PZERO: str + add_newdoc: str + add_docstring: str + add_newdoc_ufunc: str + safe_eval: str + float_: str + complex_: str + longfloat: str + singlecomplex: str + cfloat: str + longcomplex: str + clongfloat: str + string_: str + unicode_: str + Inf: str + Infinity: str + NaN: str + infty: str + issctype: str + maximum_sctype: str + obj2sctype: str + sctype2char: str + sctypes: str + issubsctype: str + set_string_function: str + asfarray: str + issubclass_: str + tracemalloc_domain: str + mat: str + recfromcsv: str + recfromtxt: str + deprecate: str + deprecate_with_doc: str + disp: str + find_common_type: str + round_: str + get_array_wrap: str + DataSource: str + nbytes: str + byte_bounds: str + compare_chararrays: str + format_parser: str + alltrue: str + sometrue: str + +__expired_attributes__: Final[_ExpiredAttributesType] = ... diff --git a/python/numpy/_globals.py b/python/numpy/_globals.py new file mode 100644 index 000000000..5f838ba91 --- /dev/null +++ b/python/numpy/_globals.py @@ -0,0 +1,96 @@ +""" +Module defining global singleton classes. + +This module raises a RuntimeError if an attempt to reload it is made. In that +way the identities of the classes defined here are fixed and will remain so +even if numpy itself is reloaded. In particular, a function like the following +will still work correctly after numpy is reloaded:: + + def foo(arg=np._NoValue): + if arg is np._NoValue: + ... + +That was not the case when the singleton classes were defined in the numpy +``__init__.py`` file. See gh-7844 for a discussion of the reload problem that +motivated this module. + +""" +import enum + +from ._utils import set_module as _set_module + +__all__ = ['_NoValue', '_CopyMode'] + + +# Disallow reloading this module so as to preserve the identities of the +# classes defined here. +if '_is_loaded' in globals(): + raise RuntimeError('Reloading numpy._globals is not allowed') +_is_loaded = True + + +class _NoValueType: + """Special keyword value. + + The instance of this class may be used as the default value assigned to a + keyword if no other obvious default (e.g., `None`) is suitable, + + Common reasons for using this keyword are: + + - A new keyword is added to a function, and that function forwards its + inputs to another function or method which can be defined outside of + NumPy. For example, ``np.std(x)`` calls ``x.std``, so when a ``keepdims`` + keyword was added that could only be forwarded if the user explicitly + specified ``keepdims``; downstream array libraries may not have added + the same keyword, so adding ``x.std(..., keepdims=keepdims)`` + unconditionally could have broken previously working code. + - A keyword is being deprecated, and a deprecation warning must only be + emitted when the keyword is used. + + """ + __instance = None + + def __new__(cls): + # ensure that only one instance exists + if not cls.__instance: + cls.__instance = super().__new__(cls) + return cls.__instance + + def __repr__(self): + return "" + + +_NoValue = _NoValueType() + + +@_set_module("numpy") +class _CopyMode(enum.Enum): + """ + An enumeration for the copy modes supported + by numpy.copy() and numpy.array(). The following three modes are supported, + + - ALWAYS: This means that a deep copy of the input + array will always be taken. + - IF_NEEDED: This means that a deep copy of the input + array will be taken only if necessary. + - NEVER: This means that the deep copy will never be taken. + If a copy cannot be avoided then a `ValueError` will be + raised. + + Note that the buffer-protocol could in theory do copies. NumPy currently + assumes an object exporting the buffer protocol will never do this. + """ + + ALWAYS = True + NEVER = False + IF_NEEDED = 2 + + def __bool__(self): + # For backwards compatibility + if self == _CopyMode.ALWAYS: + return True + + if self == _CopyMode.NEVER: + return False + + raise ValueError(f"{self} is neither True nor False.") diff --git a/python/numpy/_globals.pyi b/python/numpy/_globals.pyi new file mode 100644 index 000000000..b2231a963 --- /dev/null +++ b/python/numpy/_globals.pyi @@ -0,0 +1,17 @@ +__all__ = ["_CopyMode", "_NoValue"] + +import enum +from typing import Final, final + +@final +class _CopyMode(enum.Enum): + ALWAYS = True + NEVER = False + IF_NEEDED = 2 + + def __bool__(self, /) -> bool: ... + +@final +class _NoValueType: ... + +_NoValue: Final[_NoValueType] = ... diff --git a/python/numpy/_pyinstaller/__init__.py b/python/numpy/_pyinstaller/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/python/numpy/_pyinstaller/__init__.pyi b/python/numpy/_pyinstaller/__init__.pyi new file mode 100644 index 000000000..e69de29bb diff --git a/python/numpy/_pyinstaller/__pycache__/__init__.cpython-312.pyc b/python/numpy/_pyinstaller/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..137d77687 Binary files /dev/null and b/python/numpy/_pyinstaller/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/_pyinstaller/__pycache__/hook-numpy.cpython-312.pyc b/python/numpy/_pyinstaller/__pycache__/hook-numpy.cpython-312.pyc new file mode 100644 index 000000000..426ab7fc5 Binary files /dev/null and b/python/numpy/_pyinstaller/__pycache__/hook-numpy.cpython-312.pyc differ diff --git a/python/numpy/_pyinstaller/hook-numpy.py b/python/numpy/_pyinstaller/hook-numpy.py new file mode 100644 index 000000000..61c224b33 --- /dev/null +++ b/python/numpy/_pyinstaller/hook-numpy.py @@ -0,0 +1,36 @@ +"""This hook should collect all binary files and any hidden modules that numpy +needs. + +Our (some-what inadequate) docs for writing PyInstaller hooks are kept here: +https://pyinstaller.readthedocs.io/en/stable/hooks.html + +""" +from PyInstaller.compat import is_pure_conda +from PyInstaller.utils.hooks import collect_dynamic_libs + +# Collect all DLLs inside numpy's installation folder, dump them into built +# app's root. +binaries = collect_dynamic_libs("numpy", ".") + +# If using Conda without any non-conda virtual environment manager: +if is_pure_conda: + # Assume running the NumPy from Conda-forge and collect it's DLLs from the + # communal Conda bin directory. DLLs from NumPy's dependencies must also be + # collected to capture MKL, OpenBlas, OpenMP, etc. + from PyInstaller.utils.hooks import conda_support + datas = conda_support.collect_dynamic_libs("numpy", dependencies=True) + +# Submodules PyInstaller cannot detect. `_dtype_ctypes` is only imported +# from C and `_multiarray_tests` is used in tests (which are not packed). +hiddenimports = ['numpy._core._dtype_ctypes', 'numpy._core._multiarray_tests'] + +# Remove testing and building code and packages that are referenced throughout +# NumPy but are not really dependencies. +excludedimports = [ + "scipy", + "pytest", + "f2py", + "setuptools", + "distutils", + "numpy.distutils", +] diff --git a/python/numpy/_pyinstaller/hook-numpy.pyi b/python/numpy/_pyinstaller/hook-numpy.pyi new file mode 100644 index 000000000..2642996da --- /dev/null +++ b/python/numpy/_pyinstaller/hook-numpy.pyi @@ -0,0 +1,13 @@ +from typing import Final + +# from `PyInstaller.compat` +is_conda: Final[bool] +is_pure_conda: Final[bool] + +# from `PyInstaller.utils.hooks` +def is_module_satisfies(requirements: str, version: None = None, version_attr: None = None) -> bool: ... + +binaries: Final[list[tuple[str, str]]] + +hiddenimports: Final[list[str]] +excludedimports: Final[list[str]] diff --git a/python/numpy/_pyinstaller/tests/__init__.py b/python/numpy/_pyinstaller/tests/__init__.py new file mode 100644 index 000000000..4ed8fdd53 --- /dev/null +++ b/python/numpy/_pyinstaller/tests/__init__.py @@ -0,0 +1,16 @@ +import pytest + +from numpy.testing import IS_EDITABLE, IS_WASM + +if IS_WASM: + pytest.skip( + "WASM/Pyodide does not use or support Fortran", + allow_module_level=True + ) + + +if IS_EDITABLE: + pytest.skip( + "Editable install doesn't support tests with a compile step", + allow_module_level=True + ) diff --git a/python/numpy/_pyinstaller/tests/__pycache__/__init__.cpython-312.pyc b/python/numpy/_pyinstaller/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..0b42ea3dd Binary files /dev/null and b/python/numpy/_pyinstaller/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/_pyinstaller/tests/__pycache__/pyinstaller-smoke.cpython-312.pyc b/python/numpy/_pyinstaller/tests/__pycache__/pyinstaller-smoke.cpython-312.pyc new file mode 100644 index 000000000..f53317cc0 Binary files /dev/null and b/python/numpy/_pyinstaller/tests/__pycache__/pyinstaller-smoke.cpython-312.pyc differ diff --git a/python/numpy/_pyinstaller/tests/__pycache__/test_pyinstaller.cpython-312.pyc b/python/numpy/_pyinstaller/tests/__pycache__/test_pyinstaller.cpython-312.pyc new file mode 100644 index 000000000..1ad1faf06 Binary files /dev/null and b/python/numpy/_pyinstaller/tests/__pycache__/test_pyinstaller.cpython-312.pyc differ diff --git a/python/numpy/_pyinstaller/tests/pyinstaller-smoke.py b/python/numpy/_pyinstaller/tests/pyinstaller-smoke.py new file mode 100644 index 000000000..eb28070e3 --- /dev/null +++ b/python/numpy/_pyinstaller/tests/pyinstaller-smoke.py @@ -0,0 +1,32 @@ +"""A crude *bit of everything* smoke test to verify PyInstaller compatibility. + +PyInstaller typically goes wrong by forgetting to package modules, extension +modules or shared libraries. This script should aim to touch as many of those +as possible in an attempt to trip a ModuleNotFoundError or a DLL load failure +due to an uncollected resource. Missing resources are unlikely to lead to +arithmetic errors so there's generally no need to verify any calculation's +output - merely that it made it to the end OK. This script should not +explicitly import any of numpy's submodules as that gives PyInstaller undue +hints that those submodules exist and should be collected (accessing implicitly +loaded submodules is OK). + +""" +import numpy as np + +a = np.arange(1., 10.).reshape((3, 3)) % 5 +np.linalg.det(a) +a @ a +a @ a.T +np.linalg.inv(a) +np.sin(np.exp(a)) +np.linalg.svd(a) +np.linalg.eigh(a) + +np.unique(np.random.randint(0, 10, 100)) +np.sort(np.random.uniform(0, 10, 100)) + +np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) +np.ma.masked_array(np.arange(10), np.random.rand(10) < .5).sum() +np.polynomial.Legendre([7, 8, 9]).roots() + +print("I made it!") diff --git a/python/numpy/_pyinstaller/tests/test_pyinstaller.py b/python/numpy/_pyinstaller/tests/test_pyinstaller.py new file mode 100644 index 000000000..a9061da19 --- /dev/null +++ b/python/numpy/_pyinstaller/tests/test_pyinstaller.py @@ -0,0 +1,35 @@ +import subprocess +from pathlib import Path + +import pytest + + +# PyInstaller has been very unproactive about replacing 'imp' with 'importlib'. +@pytest.mark.filterwarnings('ignore::DeprecationWarning') +# It also leaks io.BytesIO()s. +@pytest.mark.filterwarnings('ignore::ResourceWarning') +@pytest.mark.parametrize("mode", ["--onedir", "--onefile"]) +@pytest.mark.slow +def test_pyinstaller(mode, tmp_path): + """Compile and run pyinstaller-smoke.py using PyInstaller.""" + + pyinstaller_cli = pytest.importorskip("PyInstaller.__main__").run + + source = Path(__file__).with_name("pyinstaller-smoke.py").resolve() + args = [ + # Place all generated files in ``tmp_path``. + '--workpath', str(tmp_path / "build"), + '--distpath', str(tmp_path / "dist"), + '--specpath', str(tmp_path), + mode, + str(source), + ] + pyinstaller_cli(args) + + if mode == "--onefile": + exe = tmp_path / "dist" / source.stem + else: + exe = tmp_path / "dist" / source.stem / source.stem + + p = subprocess.run([str(exe)], check=True, stdout=subprocess.PIPE) + assert p.stdout.strip() == b"I made it!" diff --git a/python/numpy/_pytesttester.py b/python/numpy/_pytesttester.py new file mode 100644 index 000000000..77342e44a --- /dev/null +++ b/python/numpy/_pytesttester.py @@ -0,0 +1,201 @@ +""" +Pytest test running. + +This module implements the ``test()`` function for NumPy modules. The usual +boiler plate for doing that is to put the following in the module +``__init__.py`` file:: + + from numpy._pytesttester import PytestTester + test = PytestTester(__name__) + del PytestTester + + +Warnings filtering and other runtime settings should be dealt with in the +``pytest.ini`` file in the numpy repo root. The behavior of the test depends on +whether or not that file is found as follows: + +* ``pytest.ini`` is present (develop mode) + All warnings except those explicitly filtered out are raised as error. +* ``pytest.ini`` is absent (release mode) + DeprecationWarnings and PendingDeprecationWarnings are ignored, other + warnings are passed through. + +In practice, tests run from the numpy repo are run in development mode with +``spin``, through the standard ``spin test`` invocation or from an inplace +build with ``pytest numpy``. + +This module is imported by every numpy subpackage, so lies at the top level to +simplify circular import issues. For the same reason, it contains no numpy +imports at module scope, instead importing numpy within function calls. +""" +import os +import sys + +__all__ = ['PytestTester'] + + +def _show_numpy_info(): + import numpy as np + + print(f"NumPy version {np.__version__}") + info = np.lib._utils_impl._opt_info() + print("NumPy CPU features: ", (info or 'nothing enabled')) + + +class PytestTester: + """ + Pytest test runner. + + A test function is typically added to a package's __init__.py like so:: + + from numpy._pytesttester import PytestTester + test = PytestTester(__name__).test + del PytestTester + + Calling this test function finds and runs all tests associated with the + module and all its sub-modules. + + Attributes + ---------- + module_name : str + Full path to the package to test. + + Parameters + ---------- + module_name : module name + The name of the module to test. + + Notes + ----- + Unlike the previous ``nose``-based implementation, this class is not + publicly exposed as it performs some ``numpy``-specific warning + suppression. + + """ + def __init__(self, module_name): + self.module_name = module_name + self.__module__ = module_name + + def __call__(self, label='fast', verbose=1, extra_argv=None, + doctests=False, coverage=False, durations=-1, tests=None): + """ + Run tests for module using pytest. + + Parameters + ---------- + label : {'fast', 'full'}, optional + Identifies the tests to run. When set to 'fast', tests decorated + with `pytest.mark.slow` are skipped, when 'full', the slow marker + is ignored. + verbose : int, optional + Verbosity value for test outputs, in the range 1-3. Default is 1. + extra_argv : list, optional + List with any extra arguments to pass to pytests. + doctests : bool, optional + .. note:: Not supported + coverage : bool, optional + If True, report coverage of NumPy code. Default is False. + Requires installation of (pip) pytest-cov. + durations : int, optional + If < 0, do nothing, If 0, report time of all tests, if > 0, + report the time of the slowest `timer` tests. Default is -1. + tests : test or list of tests + Tests to be executed with pytest '--pyargs' + + Returns + ------- + result : bool + Return True on success, false otherwise. + + Notes + ----- + Each NumPy module exposes `test` in its namespace to run all tests for + it. For example, to run all tests for numpy.lib: + + >>> np.lib.test() #doctest: +SKIP + + Examples + -------- + >>> result = np.lib.test() #doctest: +SKIP + ... + 1023 passed, 2 skipped, 6 deselected, 1 xfailed in 10.39 seconds + >>> result + True + + """ + import warnings + + import pytest + + module = sys.modules[self.module_name] + module_path = os.path.abspath(module.__path__[0]) + + # setup the pytest arguments + pytest_args = ["-l"] + + # offset verbosity. The "-q" cancels a "-v". + pytest_args += ["-q"] + + if sys.version_info < (3, 12): + with warnings.catch_warnings(): + warnings.simplefilter("always") + # Filter out distutils cpu warnings (could be localized to + # distutils tests). ASV has problems with top level import, + # so fetch module for suppression here. + from numpy.distutils import cpuinfo # noqa: F401 + + # Filter out annoying import messages. Want these in both develop and + # release mode. + pytest_args += [ + "-W ignore:Not importing directory", + "-W ignore:numpy.dtype size changed", + "-W ignore:numpy.ufunc size changed", + "-W ignore::UserWarning:cpuinfo", + ] + + # When testing matrices, ignore their PendingDeprecationWarnings + pytest_args += [ + "-W ignore:the matrix subclass is not", + "-W ignore:Importing from numpy.matlib is", + ] + + if doctests: + pytest_args += ["--doctest-modules"] + + if extra_argv: + pytest_args += list(extra_argv) + + if verbose > 1: + pytest_args += ["-" + "v" * (verbose - 1)] + + if coverage: + pytest_args += ["--cov=" + module_path] + + if label == "fast": + # not importing at the top level to avoid circular import of module + from numpy.testing import IS_PYPY + if IS_PYPY: + pytest_args += ["-m", "not slow and not slow_pypy"] + else: + pytest_args += ["-m", "not slow"] + + elif label != "full": + pytest_args += ["-m", label] + + if durations >= 0: + pytest_args += [f"--durations={durations}"] + + if tests is None: + tests = [self.module_name] + + pytest_args += ["--pyargs"] + list(tests) + + # run tests. + _show_numpy_info() + + try: + code = pytest.main(pytest_args) + except SystemExit as exc: + code = exc.code + + return code == 0 diff --git a/python/numpy/_pytesttester.pyi b/python/numpy/_pytesttester.pyi new file mode 100644 index 000000000..a12abb1c1 --- /dev/null +++ b/python/numpy/_pytesttester.pyi @@ -0,0 +1,18 @@ +from collections.abc import Iterable +from typing import Literal as L + +__all__ = ["PytestTester"] + +class PytestTester: + module_name: str + def __init__(self, module_name: str) -> None: ... + def __call__( + self, + label: L["fast", "full"] = ..., + verbose: int = ..., + extra_argv: Iterable[str] | None = ..., + doctests: L[False] = ..., + coverage: bool = ..., + durations: int = ..., + tests: Iterable[str] | None = ..., + ) -> bool: ... diff --git a/python/numpy/_typing/__init__.py b/python/numpy/_typing/__init__.py new file mode 100644 index 000000000..16a7eee66 --- /dev/null +++ b/python/numpy/_typing/__init__.py @@ -0,0 +1,148 @@ +"""Private counterpart of ``numpy.typing``.""" + +from ._array_like import ArrayLike as ArrayLike +from ._array_like import NDArray as NDArray +from ._array_like import _ArrayLike as _ArrayLike +from ._array_like import _ArrayLikeAnyString_co as _ArrayLikeAnyString_co +from ._array_like import _ArrayLikeBool_co as _ArrayLikeBool_co +from ._array_like import _ArrayLikeBytes_co as _ArrayLikeBytes_co +from ._array_like import _ArrayLikeComplex128_co as _ArrayLikeComplex128_co +from ._array_like import _ArrayLikeComplex_co as _ArrayLikeComplex_co +from ._array_like import _ArrayLikeDT64_co as _ArrayLikeDT64_co +from ._array_like import _ArrayLikeFloat64_co as _ArrayLikeFloat64_co +from ._array_like import _ArrayLikeFloat_co as _ArrayLikeFloat_co +from ._array_like import _ArrayLikeInt as _ArrayLikeInt +from ._array_like import _ArrayLikeInt_co as _ArrayLikeInt_co +from ._array_like import _ArrayLikeNumber_co as _ArrayLikeNumber_co +from ._array_like import _ArrayLikeObject_co as _ArrayLikeObject_co +from ._array_like import _ArrayLikeStr_co as _ArrayLikeStr_co +from ._array_like import _ArrayLikeString_co as _ArrayLikeString_co +from ._array_like import _ArrayLikeTD64_co as _ArrayLikeTD64_co +from ._array_like import _ArrayLikeUInt_co as _ArrayLikeUInt_co +from ._array_like import _ArrayLikeVoid_co as _ArrayLikeVoid_co +from ._array_like import _FiniteNestedSequence as _FiniteNestedSequence +from ._array_like import _SupportsArray as _SupportsArray +from ._array_like import _SupportsArrayFunc as _SupportsArrayFunc + +# +from ._char_codes import _BoolCodes as _BoolCodes +from ._char_codes import _ByteCodes as _ByteCodes +from ._char_codes import _BytesCodes as _BytesCodes +from ._char_codes import _CDoubleCodes as _CDoubleCodes +from ._char_codes import _CharacterCodes as _CharacterCodes +from ._char_codes import _CLongDoubleCodes as _CLongDoubleCodes +from ._char_codes import _Complex64Codes as _Complex64Codes +from ._char_codes import _Complex128Codes as _Complex128Codes +from ._char_codes import _ComplexFloatingCodes as _ComplexFloatingCodes +from ._char_codes import _CSingleCodes as _CSingleCodes +from ._char_codes import _DoubleCodes as _DoubleCodes +from ._char_codes import _DT64Codes as _DT64Codes +from ._char_codes import _FlexibleCodes as _FlexibleCodes +from ._char_codes import _Float16Codes as _Float16Codes +from ._char_codes import _Float32Codes as _Float32Codes +from ._char_codes import _Float64Codes as _Float64Codes +from ._char_codes import _FloatingCodes as _FloatingCodes +from ._char_codes import _GenericCodes as _GenericCodes +from ._char_codes import _HalfCodes as _HalfCodes +from ._char_codes import _InexactCodes as _InexactCodes +from ._char_codes import _Int8Codes as _Int8Codes +from ._char_codes import _Int16Codes as _Int16Codes +from ._char_codes import _Int32Codes as _Int32Codes +from ._char_codes import _Int64Codes as _Int64Codes +from ._char_codes import _IntCCodes as _IntCCodes +from ._char_codes import _IntCodes as _IntCodes +from ._char_codes import _IntegerCodes as _IntegerCodes +from ._char_codes import _IntPCodes as _IntPCodes +from ._char_codes import _LongCodes as _LongCodes +from ._char_codes import _LongDoubleCodes as _LongDoubleCodes +from ._char_codes import _LongLongCodes as _LongLongCodes +from ._char_codes import _NumberCodes as _NumberCodes +from ._char_codes import _ObjectCodes as _ObjectCodes +from ._char_codes import _ShortCodes as _ShortCodes +from ._char_codes import _SignedIntegerCodes as _SignedIntegerCodes +from ._char_codes import _SingleCodes as _SingleCodes +from ._char_codes import _StrCodes as _StrCodes +from ._char_codes import _StringCodes as _StringCodes +from ._char_codes import _TD64Codes as _TD64Codes +from ._char_codes import _UByteCodes as _UByteCodes +from ._char_codes import _UInt8Codes as _UInt8Codes +from ._char_codes import _UInt16Codes as _UInt16Codes +from ._char_codes import _UInt32Codes as _UInt32Codes +from ._char_codes import _UInt64Codes as _UInt64Codes +from ._char_codes import _UIntCCodes as _UIntCCodes +from ._char_codes import _UIntCodes as _UIntCodes +from ._char_codes import _UIntPCodes as _UIntPCodes +from ._char_codes import _ULongCodes as _ULongCodes +from ._char_codes import _ULongLongCodes as _ULongLongCodes +from ._char_codes import _UnsignedIntegerCodes as _UnsignedIntegerCodes +from ._char_codes import _UShortCodes as _UShortCodes +from ._char_codes import _VoidCodes as _VoidCodes + +# +from ._dtype_like import DTypeLike as DTypeLike +from ._dtype_like import _DTypeLike as _DTypeLike +from ._dtype_like import _DTypeLikeBool as _DTypeLikeBool +from ._dtype_like import _DTypeLikeBytes as _DTypeLikeBytes +from ._dtype_like import _DTypeLikeComplex as _DTypeLikeComplex +from ._dtype_like import _DTypeLikeComplex_co as _DTypeLikeComplex_co +from ._dtype_like import _DTypeLikeDT64 as _DTypeLikeDT64 +from ._dtype_like import _DTypeLikeFloat as _DTypeLikeFloat +from ._dtype_like import _DTypeLikeInt as _DTypeLikeInt +from ._dtype_like import _DTypeLikeObject as _DTypeLikeObject +from ._dtype_like import _DTypeLikeStr as _DTypeLikeStr +from ._dtype_like import _DTypeLikeTD64 as _DTypeLikeTD64 +from ._dtype_like import _DTypeLikeUInt as _DTypeLikeUInt +from ._dtype_like import _DTypeLikeVoid as _DTypeLikeVoid +from ._dtype_like import _SupportsDType as _SupportsDType +from ._dtype_like import _VoidDTypeLike as _VoidDTypeLike + +# +from ._nbit import _NBitByte as _NBitByte +from ._nbit import _NBitDouble as _NBitDouble +from ._nbit import _NBitHalf as _NBitHalf +from ._nbit import _NBitInt as _NBitInt +from ._nbit import _NBitIntC as _NBitIntC +from ._nbit import _NBitIntP as _NBitIntP +from ._nbit import _NBitLong as _NBitLong +from ._nbit import _NBitLongDouble as _NBitLongDouble +from ._nbit import _NBitLongLong as _NBitLongLong +from ._nbit import _NBitShort as _NBitShort +from ._nbit import _NBitSingle as _NBitSingle + +# +from ._nbit_base import ( + NBitBase as NBitBase, # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +) +from ._nbit_base import _8Bit as _8Bit +from ._nbit_base import _16Bit as _16Bit +from ._nbit_base import _32Bit as _32Bit +from ._nbit_base import _64Bit as _64Bit +from ._nbit_base import _96Bit as _96Bit +from ._nbit_base import _128Bit as _128Bit + +# +from ._nested_sequence import _NestedSequence as _NestedSequence + +# +from ._scalars import _BoolLike_co as _BoolLike_co +from ._scalars import _CharLike_co as _CharLike_co +from ._scalars import _ComplexLike_co as _ComplexLike_co +from ._scalars import _FloatLike_co as _FloatLike_co +from ._scalars import _IntLike_co as _IntLike_co +from ._scalars import _NumberLike_co as _NumberLike_co +from ._scalars import _ScalarLike_co as _ScalarLike_co +from ._scalars import _TD64Like_co as _TD64Like_co +from ._scalars import _UIntLike_co as _UIntLike_co +from ._scalars import _VoidLike_co as _VoidLike_co + +# +from ._shape import _AnyShape as _AnyShape +from ._shape import _Shape as _Shape +from ._shape import _ShapeLike as _ShapeLike + +# +from ._ufunc import _GUFunc_Nin2_Nout1 as _GUFunc_Nin2_Nout1 +from ._ufunc import _UFunc_Nin1_Nout1 as _UFunc_Nin1_Nout1 +from ._ufunc import _UFunc_Nin1_Nout2 as _UFunc_Nin1_Nout2 +from ._ufunc import _UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1 +from ._ufunc import _UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2 diff --git a/python/numpy/_typing/__pycache__/__init__.cpython-312.pyc b/python/numpy/_typing/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..8673b2fe5 Binary files /dev/null and b/python/numpy/_typing/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/_typing/__pycache__/_add_docstring.cpython-312.pyc b/python/numpy/_typing/__pycache__/_add_docstring.cpython-312.pyc new file mode 100644 index 000000000..1ab03c974 Binary files /dev/null and b/python/numpy/_typing/__pycache__/_add_docstring.cpython-312.pyc differ diff --git a/python/numpy/_typing/__pycache__/_array_like.cpython-312.pyc b/python/numpy/_typing/__pycache__/_array_like.cpython-312.pyc new file mode 100644 index 000000000..a027540dd Binary files /dev/null and b/python/numpy/_typing/__pycache__/_array_like.cpython-312.pyc differ diff --git a/python/numpy/_typing/__pycache__/_char_codes.cpython-312.pyc b/python/numpy/_typing/__pycache__/_char_codes.cpython-312.pyc new file mode 100644 index 000000000..0b36489a1 Binary files /dev/null and b/python/numpy/_typing/__pycache__/_char_codes.cpython-312.pyc differ diff --git a/python/numpy/_typing/__pycache__/_dtype_like.cpython-312.pyc b/python/numpy/_typing/__pycache__/_dtype_like.cpython-312.pyc new file mode 100644 index 000000000..19b16da6b Binary files /dev/null and b/python/numpy/_typing/__pycache__/_dtype_like.cpython-312.pyc differ diff --git a/python/numpy/_typing/__pycache__/_extended_precision.cpython-312.pyc b/python/numpy/_typing/__pycache__/_extended_precision.cpython-312.pyc new file mode 100644 index 000000000..b43d97eb9 Binary files /dev/null and b/python/numpy/_typing/__pycache__/_extended_precision.cpython-312.pyc differ diff --git a/python/numpy/_typing/__pycache__/_nbit.cpython-312.pyc b/python/numpy/_typing/__pycache__/_nbit.cpython-312.pyc new file mode 100644 index 000000000..d810d5b5b Binary files /dev/null and b/python/numpy/_typing/__pycache__/_nbit.cpython-312.pyc differ diff --git a/python/numpy/_typing/__pycache__/_nbit_base.cpython-312.pyc b/python/numpy/_typing/__pycache__/_nbit_base.cpython-312.pyc new file mode 100644 index 000000000..17f7907d7 Binary files /dev/null and b/python/numpy/_typing/__pycache__/_nbit_base.cpython-312.pyc differ diff --git a/python/numpy/_typing/__pycache__/_nested_sequence.cpython-312.pyc b/python/numpy/_typing/__pycache__/_nested_sequence.cpython-312.pyc new file mode 100644 index 000000000..ca1c551ad Binary files /dev/null and b/python/numpy/_typing/__pycache__/_nested_sequence.cpython-312.pyc differ diff --git a/python/numpy/_typing/__pycache__/_scalars.cpython-312.pyc b/python/numpy/_typing/__pycache__/_scalars.cpython-312.pyc new file mode 100644 index 000000000..1c9d3b1c7 Binary files /dev/null and b/python/numpy/_typing/__pycache__/_scalars.cpython-312.pyc differ diff --git a/python/numpy/_typing/__pycache__/_shape.cpython-312.pyc b/python/numpy/_typing/__pycache__/_shape.cpython-312.pyc new file mode 100644 index 000000000..99da06e36 Binary files /dev/null and b/python/numpy/_typing/__pycache__/_shape.cpython-312.pyc differ diff --git a/python/numpy/_typing/__pycache__/_ufunc.cpython-312.pyc b/python/numpy/_typing/__pycache__/_ufunc.cpython-312.pyc new file mode 100644 index 000000000..46261e7df Binary files /dev/null and b/python/numpy/_typing/__pycache__/_ufunc.cpython-312.pyc differ diff --git a/python/numpy/_typing/_add_docstring.py b/python/numpy/_typing/_add_docstring.py new file mode 100644 index 000000000..5330a6b3b --- /dev/null +++ b/python/numpy/_typing/_add_docstring.py @@ -0,0 +1,153 @@ +"""A module for creating docstrings for sphinx ``data`` domains.""" + +import re +import textwrap + +from ._array_like import NDArray + +_docstrings_list = [] + + +def add_newdoc(name: str, value: str, doc: str) -> None: + """Append ``_docstrings_list`` with a docstring for `name`. + + Parameters + ---------- + name : str + The name of the object. + value : str + A string-representation of the object. + doc : str + The docstring of the object. + + """ + _docstrings_list.append((name, value, doc)) + + +def _parse_docstrings() -> str: + """Convert all docstrings in ``_docstrings_list`` into a single + sphinx-legible text block. + + """ + type_list_ret = [] + for name, value, doc in _docstrings_list: + s = textwrap.dedent(doc).replace("\n", "\n ") + + # Replace sections by rubrics + lines = s.split("\n") + new_lines = [] + indent = "" + for line in lines: + m = re.match(r'^(\s+)[-=]+\s*$', line) + if m and new_lines: + prev = textwrap.dedent(new_lines.pop()) + if prev == "Examples": + indent = "" + new_lines.append(f'{m.group(1)}.. rubric:: {prev}') + else: + indent = 4 * " " + new_lines.append(f'{m.group(1)}.. admonition:: {prev}') + new_lines.append("") + else: + new_lines.append(f"{indent}{line}") + + s = "\n".join(new_lines) + s_block = f""".. data:: {name}\n :value: {value}\n {s}""" + type_list_ret.append(s_block) + return "\n".join(type_list_ret) + + +add_newdoc('ArrayLike', 'typing.Union[...]', + """ + A `~typing.Union` representing objects that can be coerced + into an `~numpy.ndarray`. + + Among others this includes the likes of: + + * Scalars. + * (Nested) sequences. + * Objects implementing the `~class.__array__` protocol. + + .. versionadded:: 1.20 + + See Also + -------- + :term:`array_like`: + Any scalar or sequence that can be interpreted as an ndarray. + + Examples + -------- + .. code-block:: python + + >>> import numpy as np + >>> import numpy.typing as npt + + >>> def as_array(a: npt.ArrayLike) -> np.ndarray: + ... return np.array(a) + + """) + +add_newdoc('DTypeLike', 'typing.Union[...]', + """ + A `~typing.Union` representing objects that can be coerced + into a `~numpy.dtype`. + + Among others this includes the likes of: + + * :class:`type` objects. + * Character codes or the names of :class:`type` objects. + * Objects with the ``.dtype`` attribute. + + .. versionadded:: 1.20 + + See Also + -------- + :ref:`Specifying and constructing data types ` + A comprehensive overview of all objects that can be coerced + into data types. + + Examples + -------- + .. code-block:: python + + >>> import numpy as np + >>> import numpy.typing as npt + + >>> def as_dtype(d: npt.DTypeLike) -> np.dtype: + ... return np.dtype(d) + + """) + +add_newdoc('NDArray', repr(NDArray), + """ + A `np.ndarray[tuple[Any, ...], np.dtype[ScalarT]] ` + type alias :term:`generic ` w.r.t. its + `dtype.type `. + + Can be used during runtime for typing arrays with a given dtype + and unspecified shape. + + .. versionadded:: 1.21 + + Examples + -------- + .. code-block:: python + + >>> import numpy as np + >>> import numpy.typing as npt + + >>> print(npt.NDArray) + numpy.ndarray[tuple[typing.Any, ...], numpy.dtype[~_ScalarT]] + + >>> print(npt.NDArray[np.float64]) + numpy.ndarray[tuple[typing.Any, ...], numpy.dtype[numpy.float64]] + + >>> NDArrayInt = npt.NDArray[np.int_] + >>> a: NDArrayInt = np.arange(10) + + >>> def func(a: npt.ArrayLike) -> npt.NDArray[Any]: + ... return np.array(a) + + """) + +_docstrings = _parse_docstrings() diff --git a/python/numpy/_typing/_array_like.py b/python/numpy/_typing/_array_like.py new file mode 100644 index 000000000..6b071f4a0 --- /dev/null +++ b/python/numpy/_typing/_array_like.py @@ -0,0 +1,106 @@ +import sys +from collections.abc import Callable, Collection, Sequence +from typing import TYPE_CHECKING, Any, Protocol, TypeAlias, TypeVar, runtime_checkable + +import numpy as np +from numpy import dtype + +from ._nbit_base import _32Bit, _64Bit +from ._nested_sequence import _NestedSequence +from ._shape import _AnyShape + +if TYPE_CHECKING: + StringDType = np.dtypes.StringDType +else: + # at runtime outside of type checking importing this from numpy.dtypes + # would lead to a circular import + from numpy._core.multiarray import StringDType + +_T = TypeVar("_T") +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_DTypeT = TypeVar("_DTypeT", bound=dtype[Any]) +_DTypeT_co = TypeVar("_DTypeT_co", covariant=True, bound=dtype[Any]) + +NDArray: TypeAlias = np.ndarray[_AnyShape, dtype[_ScalarT]] + +# The `_SupportsArray` protocol only cares about the default dtype +# (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned +# array. +# Concrete implementations of the protocol are responsible for adding +# any and all remaining overloads +@runtime_checkable +class _SupportsArray(Protocol[_DTypeT_co]): + def __array__(self) -> np.ndarray[Any, _DTypeT_co]: ... + + +@runtime_checkable +class _SupportsArrayFunc(Protocol): + """A protocol class representing `~class.__array_function__`.""" + def __array_function__( + self, + func: Callable[..., Any], + types: Collection[type[Any]], + args: tuple[Any, ...], + kwargs: dict[str, Any], + ) -> object: ... + + +# TODO: Wait until mypy supports recursive objects in combination with typevars +_FiniteNestedSequence: TypeAlias = ( + _T + | Sequence[_T] + | Sequence[Sequence[_T]] + | Sequence[Sequence[Sequence[_T]]] + | Sequence[Sequence[Sequence[Sequence[_T]]]] +) + +# A subset of `npt.ArrayLike` that can be parametrized w.r.t. `np.generic` +_ArrayLike: TypeAlias = ( + _SupportsArray[dtype[_ScalarT]] + | _NestedSequence[_SupportsArray[dtype[_ScalarT]]] +) + +# A union representing array-like objects; consists of two typevars: +# One representing types that can be parametrized w.r.t. `np.dtype` +# and another one for the rest +_DualArrayLike: TypeAlias = ( + _SupportsArray[_DTypeT] + | _NestedSequence[_SupportsArray[_DTypeT]] + | _T + | _NestedSequence[_T] +) + +if sys.version_info >= (3, 12): + from collections.abc import Buffer as _Buffer +else: + @runtime_checkable + class _Buffer(Protocol): + def __buffer__(self, flags: int, /) -> memoryview: ... + +ArrayLike: TypeAlias = _Buffer | _DualArrayLike[dtype[Any], complex | bytes | str] + +# `ArrayLike_co`: array-like objects that can be coerced into `X` +# given the casting rules `same_kind` +_ArrayLikeBool_co: TypeAlias = _DualArrayLike[dtype[np.bool], bool] +_ArrayLikeUInt_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.unsignedinteger], bool] +_ArrayLikeInt_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.integer], int] +_ArrayLikeFloat_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.integer | np.floating], float] +_ArrayLikeComplex_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.number], complex] +_ArrayLikeNumber_co: TypeAlias = _ArrayLikeComplex_co +_ArrayLikeTD64_co: TypeAlias = _DualArrayLike[dtype[np.bool | np.integer | np.timedelta64], int] +_ArrayLikeDT64_co: TypeAlias = _ArrayLike[np.datetime64] +_ArrayLikeObject_co: TypeAlias = _ArrayLike[np.object_] + +_ArrayLikeVoid_co: TypeAlias = _ArrayLike[np.void] +_ArrayLikeBytes_co: TypeAlias = _DualArrayLike[dtype[np.bytes_], bytes] +_ArrayLikeStr_co: TypeAlias = _DualArrayLike[dtype[np.str_], str] +_ArrayLikeString_co: TypeAlias = _DualArrayLike[StringDType, str] +_ArrayLikeAnyString_co: TypeAlias = _DualArrayLike[dtype[np.character] | StringDType, bytes | str] + +__Float64_co: TypeAlias = np.floating[_64Bit] | np.float32 | np.float16 | np.integer | np.bool +__Complex128_co: TypeAlias = np.number[_64Bit] | np.number[_32Bit] | np.float16 | np.integer | np.bool +_ArrayLikeFloat64_co: TypeAlias = _DualArrayLike[dtype[__Float64_co], float] +_ArrayLikeComplex128_co: TypeAlias = _DualArrayLike[dtype[__Complex128_co], complex] + +# NOTE: This includes `builtins.bool`, but not `numpy.bool`. +_ArrayLikeInt: TypeAlias = _DualArrayLike[dtype[np.integer], int] diff --git a/python/numpy/_typing/_char_codes.py b/python/numpy/_typing/_char_codes.py new file mode 100644 index 000000000..7b6fad228 --- /dev/null +++ b/python/numpy/_typing/_char_codes.py @@ -0,0 +1,213 @@ +from typing import Literal + +_BoolCodes = Literal[ + "bool", "bool_", + "?", "|?", "=?", "?", + "b1", "|b1", "=b1", "b1", +] # fmt: skip + +_UInt8Codes = Literal["uint8", "u1", "|u1", "=u1", "u1"] +_UInt16Codes = Literal["uint16", "u2", "|u2", "=u2", "u2"] +_UInt32Codes = Literal["uint32", "u4", "|u4", "=u4", "u4"] +_UInt64Codes = Literal["uint64", "u8", "|u8", "=u8", "u8"] + +_Int8Codes = Literal["int8", "i1", "|i1", "=i1", "i1"] +_Int16Codes = Literal["int16", "i2", "|i2", "=i2", "i2"] +_Int32Codes = Literal["int32", "i4", "|i4", "=i4", "i4"] +_Int64Codes = Literal["int64", "i8", "|i8", "=i8", "i8"] + +_Float16Codes = Literal["float16", "f2", "|f2", "=f2", "f2"] +_Float32Codes = Literal["float32", "f4", "|f4", "=f4", "f4"] +_Float64Codes = Literal["float64", "f8", "|f8", "=f8", "f8"] + +_Complex64Codes = Literal["complex64", "c8", "|c8", "=c8", "c8"] +_Complex128Codes = Literal["complex128", "c16", "|c16", "=c16", "c16"] + +_ByteCodes = Literal["byte", "b", "|b", "=b", "b"] +_ShortCodes = Literal["short", "h", "|h", "=h", "h"] +_IntCCodes = Literal["intc", "i", "|i", "=i", "i"] +_IntPCodes = Literal["intp", "int", "int_", "n", "|n", "=n", "n"] +_LongCodes = Literal["long", "l", "|l", "=l", "l"] +_IntCodes = _IntPCodes +_LongLongCodes = Literal["longlong", "q", "|q", "=q", "q"] + +_UByteCodes = Literal["ubyte", "B", "|B", "=B", "B"] +_UShortCodes = Literal["ushort", "H", "|H", "=H", "H"] +_UIntCCodes = Literal["uintc", "I", "|I", "=I", "I"] +_UIntPCodes = Literal["uintp", "uint", "N", "|N", "=N", "N"] +_ULongCodes = Literal["ulong", "L", "|L", "=L", "L"] +_UIntCodes = _UIntPCodes +_ULongLongCodes = Literal["ulonglong", "Q", "|Q", "=Q", "Q"] + +_HalfCodes = Literal["half", "e", "|e", "=e", "e"] +_SingleCodes = Literal["single", "f", "|f", "=f", "f"] +_DoubleCodes = Literal["double", "float", "d", "|d", "=d", "d"] +_LongDoubleCodes = Literal["longdouble", "g", "|g", "=g", "g"] + +_CSingleCodes = Literal["csingle", "F", "|F", "=F", "F"] +_CDoubleCodes = Literal["cdouble", "complex", "D", "|D", "=D", "D"] +_CLongDoubleCodes = Literal["clongdouble", "G", "|G", "=G", "G"] + +_StrCodes = Literal["str", "str_", "unicode", "U", "|U", "=U", "U"] +_BytesCodes = Literal["bytes", "bytes_", "S", "|S", "=S", "S"] +_VoidCodes = Literal["void", "V", "|V", "=V", "V"] +_ObjectCodes = Literal["object", "object_", "O", "|O", "=O", "O"] + +_DT64Codes = Literal[ + "datetime64", "|datetime64", "=datetime64", + "datetime64", + "datetime64[Y]", "|datetime64[Y]", "=datetime64[Y]", + "datetime64[Y]", + "datetime64[M]", "|datetime64[M]", "=datetime64[M]", + "datetime64[M]", + "datetime64[W]", "|datetime64[W]", "=datetime64[W]", + "datetime64[W]", + "datetime64[D]", "|datetime64[D]", "=datetime64[D]", + "datetime64[D]", + "datetime64[h]", "|datetime64[h]", "=datetime64[h]", + "datetime64[h]", + "datetime64[m]", "|datetime64[m]", "=datetime64[m]", + "datetime64[m]", + "datetime64[s]", "|datetime64[s]", "=datetime64[s]", + "datetime64[s]", + "datetime64[ms]", "|datetime64[ms]", "=datetime64[ms]", + "datetime64[ms]", + "datetime64[us]", "|datetime64[us]", "=datetime64[us]", + "datetime64[us]", + "datetime64[ns]", "|datetime64[ns]", "=datetime64[ns]", + "datetime64[ns]", + "datetime64[ps]", "|datetime64[ps]", "=datetime64[ps]", + "datetime64[ps]", + "datetime64[fs]", "|datetime64[fs]", "=datetime64[fs]", + "datetime64[fs]", + "datetime64[as]", "|datetime64[as]", "=datetime64[as]", + "datetime64[as]", + "M", "|M", "=M", "M", + "M8", "|M8", "=M8", "M8", + "M8[Y]", "|M8[Y]", "=M8[Y]", "M8[Y]", + "M8[M]", "|M8[M]", "=M8[M]", "M8[M]", + "M8[W]", "|M8[W]", "=M8[W]", "M8[W]", + "M8[D]", "|M8[D]", "=M8[D]", "M8[D]", + "M8[h]", "|M8[h]", "=M8[h]", "M8[h]", + "M8[m]", "|M8[m]", "=M8[m]", "M8[m]", + "M8[s]", "|M8[s]", "=M8[s]", "M8[s]", + "M8[ms]", "|M8[ms]", "=M8[ms]", "M8[ms]", + "M8[us]", "|M8[us]", "=M8[us]", "M8[us]", + "M8[ns]", "|M8[ns]", "=M8[ns]", "M8[ns]", + "M8[ps]", "|M8[ps]", "=M8[ps]", "M8[ps]", + "M8[fs]", "|M8[fs]", "=M8[fs]", "M8[fs]", + "M8[as]", "|M8[as]", "=M8[as]", "M8[as]", +] +_TD64Codes = Literal[ + "timedelta64", "|timedelta64", "=timedelta64", + "timedelta64", + "timedelta64[Y]", "|timedelta64[Y]", "=timedelta64[Y]", + "timedelta64[Y]", + "timedelta64[M]", "|timedelta64[M]", "=timedelta64[M]", + "timedelta64[M]", + "timedelta64[W]", "|timedelta64[W]", "=timedelta64[W]", + "timedelta64[W]", + "timedelta64[D]", "|timedelta64[D]", "=timedelta64[D]", + "timedelta64[D]", + "timedelta64[h]", "|timedelta64[h]", "=timedelta64[h]", + "timedelta64[h]", + "timedelta64[m]", "|timedelta64[m]", "=timedelta64[m]", + "timedelta64[m]", + "timedelta64[s]", "|timedelta64[s]", "=timedelta64[s]", + "timedelta64[s]", + "timedelta64[ms]", "|timedelta64[ms]", "=timedelta64[ms]", + "timedelta64[ms]", + "timedelta64[us]", "|timedelta64[us]", "=timedelta64[us]", + "timedelta64[us]", + "timedelta64[ns]", "|timedelta64[ns]", "=timedelta64[ns]", + "timedelta64[ns]", + "timedelta64[ps]", "|timedelta64[ps]", "=timedelta64[ps]", + "timedelta64[ps]", + "timedelta64[fs]", "|timedelta64[fs]", "=timedelta64[fs]", + "timedelta64[fs]", + "timedelta64[as]", "|timedelta64[as]", "=timedelta64[as]", + "timedelta64[as]", + "m", "|m", "=m", "m", + "m8", "|m8", "=m8", "m8", + "m8[Y]", "|m8[Y]", "=m8[Y]", "m8[Y]", + "m8[M]", "|m8[M]", "=m8[M]", "m8[M]", + "m8[W]", "|m8[W]", "=m8[W]", "m8[W]", + "m8[D]", "|m8[D]", "=m8[D]", "m8[D]", + "m8[h]", "|m8[h]", "=m8[h]", "m8[h]", + "m8[m]", "|m8[m]", "=m8[m]", "m8[m]", + "m8[s]", "|m8[s]", "=m8[s]", "m8[s]", + "m8[ms]", "|m8[ms]", "=m8[ms]", "m8[ms]", + "m8[us]", "|m8[us]", "=m8[us]", "m8[us]", + "m8[ns]", "|m8[ns]", "=m8[ns]", "m8[ns]", + "m8[ps]", "|m8[ps]", "=m8[ps]", "m8[ps]", + "m8[fs]", "|m8[fs]", "=m8[fs]", "m8[fs]", + "m8[as]", "|m8[as]", "=m8[as]", "m8[as]", +] + +# NOTE: `StringDType' has no scalar type, and therefore has no name that can +# be passed to the `dtype` constructor +_StringCodes = Literal["T", "|T", "=T", "T"] + +# NOTE: Nested literals get flattened and de-duplicated at runtime, which isn't +# the case for a `Union` of `Literal`s. +# So even though they're equivalent when type-checking, they differ at runtime. +# Another advantage of nesting, is that they always have a "flat" +# `Literal.__args__`, which is a tuple of *literally* all its literal values. + +_UnsignedIntegerCodes = Literal[ + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _UIntCodes, + _UByteCodes, + _UShortCodes, + _UIntCCodes, + _ULongCodes, + _ULongLongCodes, +] +_SignedIntegerCodes = Literal[ + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _IntCodes, + _ByteCodes, + _ShortCodes, + _IntCCodes, + _LongCodes, + _LongLongCodes, +] +_FloatingCodes = Literal[ + _Float16Codes, + _Float32Codes, + _Float64Codes, + _HalfCodes, + _SingleCodes, + _DoubleCodes, + _LongDoubleCodes +] +_ComplexFloatingCodes = Literal[ + _Complex64Codes, + _Complex128Codes, + _CSingleCodes, + _CDoubleCodes, + _CLongDoubleCodes, +] +_IntegerCodes = Literal[_UnsignedIntegerCodes, _SignedIntegerCodes] +_InexactCodes = Literal[_FloatingCodes, _ComplexFloatingCodes] +_NumberCodes = Literal[_IntegerCodes, _InexactCodes] + +_CharacterCodes = Literal[_StrCodes, _BytesCodes] +_FlexibleCodes = Literal[_VoidCodes, _CharacterCodes] + +_GenericCodes = Literal[ + _BoolCodes, + _NumberCodes, + _FlexibleCodes, + _DT64Codes, + _TD64Codes, + _ObjectCodes, + # TODO: add `_StringCodes` once it has a scalar type + # _StringCodes, +] diff --git a/python/numpy/_typing/_dtype_like.py b/python/numpy/_typing/_dtype_like.py new file mode 100644 index 000000000..c406b3098 --- /dev/null +++ b/python/numpy/_typing/_dtype_like.py @@ -0,0 +1,114 @@ +from collections.abc import Sequence # noqa: F811 +from typing import ( + Any, + Protocol, + TypeAlias, + TypedDict, + TypeVar, + runtime_checkable, +) + +import numpy as np + +from ._char_codes import ( + _BoolCodes, + _BytesCodes, + _ComplexFloatingCodes, + _DT64Codes, + _FloatingCodes, + _NumberCodes, + _ObjectCodes, + _SignedIntegerCodes, + _StrCodes, + _TD64Codes, + _UnsignedIntegerCodes, + _VoidCodes, +) + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, covariant=True) + +_DTypeLikeNested: TypeAlias = Any # TODO: wait for support for recursive types + + +# Mandatory keys +class _DTypeDictBase(TypedDict): + names: Sequence[str] + formats: Sequence[_DTypeLikeNested] + + +# Mandatory + optional keys +class _DTypeDict(_DTypeDictBase, total=False): + # Only `str` elements are usable as indexing aliases, + # but `titles` can in principle accept any object + offsets: Sequence[int] + titles: Sequence[Any] + itemsize: int + aligned: bool + + +# A protocol for anything with the dtype attribute +@runtime_checkable +class _SupportsDType(Protocol[_DTypeT_co]): + @property + def dtype(self) -> _DTypeT_co: ... + + +# A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic` +_DTypeLike: TypeAlias = type[_ScalarT] | np.dtype[_ScalarT] | _SupportsDType[np.dtype[_ScalarT]] + + +# Would create a dtype[np.void] +_VoidDTypeLike: TypeAlias = ( + # If a tuple, then it can be either: + # - (flexible_dtype, itemsize) + # - (fixed_dtype, shape) + # - (base_dtype, new_dtype) + # But because `_DTypeLikeNested = Any`, the first two cases are redundant + + # tuple[_DTypeLikeNested, int] | tuple[_DTypeLikeNested, _ShapeLike] | + tuple[_DTypeLikeNested, _DTypeLikeNested] + + # [(field_name, field_dtype, field_shape), ...] + # The type here is quite broad because NumPy accepts quite a wide + # range of inputs inside the list; see the tests for some examples. + | list[Any] + + # {'names': ..., 'formats': ..., 'offsets': ..., 'titles': ..., 'itemsize': ...} + | _DTypeDict +) + +# Aliases for commonly used dtype-like objects. +# Note that the precision of `np.number` subclasses is ignored herein. +_DTypeLikeBool: TypeAlias = type[bool] | _DTypeLike[np.bool] | _BoolCodes +_DTypeLikeInt: TypeAlias = ( + type[int] | _DTypeLike[np.signedinteger] | _SignedIntegerCodes +) +_DTypeLikeUInt: TypeAlias = _DTypeLike[np.unsignedinteger] | _UnsignedIntegerCodes +_DTypeLikeFloat: TypeAlias = type[float] | _DTypeLike[np.floating] | _FloatingCodes +_DTypeLikeComplex: TypeAlias = ( + type[complex] | _DTypeLike[np.complexfloating] | _ComplexFloatingCodes +) +_DTypeLikeComplex_co: TypeAlias = ( + type[complex] | _DTypeLike[np.bool | np.number] | _BoolCodes | _NumberCodes +) +_DTypeLikeDT64: TypeAlias = _DTypeLike[np.timedelta64] | _TD64Codes +_DTypeLikeTD64: TypeAlias = _DTypeLike[np.datetime64] | _DT64Codes +_DTypeLikeBytes: TypeAlias = type[bytes] | _DTypeLike[np.bytes_] | _BytesCodes +_DTypeLikeStr: TypeAlias = type[str] | _DTypeLike[np.str_] | _StrCodes +_DTypeLikeVoid: TypeAlias = ( + type[memoryview] | _DTypeLike[np.void] | _VoidDTypeLike | _VoidCodes +) +_DTypeLikeObject: TypeAlias = type[object] | _DTypeLike[np.object_] | _ObjectCodes + + +# Anything that can be coerced into numpy.dtype. +# Reference: https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html +DTypeLike: TypeAlias = _DTypeLike[Any] | _VoidDTypeLike | str | None + +# NOTE: while it is possible to provide the dtype as a dict of +# dtype-like objects (e.g. `{'field1': ..., 'field2': ..., ...}`), +# this syntax is officially discouraged and +# therefore not included in the type-union defining `DTypeLike`. +# +# See https://github.com/numpy/numpy/issues/16891 for more details. diff --git a/python/numpy/_typing/_extended_precision.py b/python/numpy/_typing/_extended_precision.py new file mode 100644 index 000000000..c707e726a --- /dev/null +++ b/python/numpy/_typing/_extended_precision.py @@ -0,0 +1,15 @@ +"""A module with platform-specific extended precision +`numpy.number` subclasses. + +The subclasses are defined here (instead of ``__init__.pyi``) such +that they can be imported conditionally via the numpy's mypy plugin. +""" + +import numpy as np + +from . import _96Bit, _128Bit + +float96 = np.floating[_96Bit] +float128 = np.floating[_128Bit] +complex192 = np.complexfloating[_96Bit, _96Bit] +complex256 = np.complexfloating[_128Bit, _128Bit] diff --git a/python/numpy/_typing/_nbit.py b/python/numpy/_typing/_nbit.py new file mode 100644 index 000000000..60bce3245 --- /dev/null +++ b/python/numpy/_typing/_nbit.py @@ -0,0 +1,19 @@ +"""A module with the precisions of platform-specific `~numpy.number`s.""" + +from typing import TypeAlias + +from ._nbit_base import _8Bit, _16Bit, _32Bit, _64Bit, _96Bit, _128Bit + +# To-be replaced with a `npt.NBitBase` subclass by numpy's mypy plugin +_NBitByte: TypeAlias = _8Bit +_NBitShort: TypeAlias = _16Bit +_NBitIntC: TypeAlias = _32Bit +_NBitIntP: TypeAlias = _32Bit | _64Bit +_NBitInt: TypeAlias = _NBitIntP +_NBitLong: TypeAlias = _32Bit | _64Bit +_NBitLongLong: TypeAlias = _64Bit + +_NBitHalf: TypeAlias = _16Bit +_NBitSingle: TypeAlias = _32Bit +_NBitDouble: TypeAlias = _64Bit +_NBitLongDouble: TypeAlias = _64Bit | _96Bit | _128Bit diff --git a/python/numpy/_typing/_nbit_base.py b/python/numpy/_typing/_nbit_base.py new file mode 100644 index 000000000..28d3e63c1 --- /dev/null +++ b/python/numpy/_typing/_nbit_base.py @@ -0,0 +1,94 @@ +"""A module with the precisions of generic `~numpy.number` types.""" +from typing import final + +from numpy._utils import set_module + + +@final # Disallow the creation of arbitrary `NBitBase` subclasses +@set_module("numpy.typing") +class NBitBase: + """ + A type representing `numpy.number` precision during static type checking. + + Used exclusively for the purpose of static type checking, `NBitBase` + represents the base of a hierarchical set of subclasses. + Each subsequent subclass is herein used for representing a lower level + of precision, *e.g.* ``64Bit > 32Bit > 16Bit``. + + .. versionadded:: 1.20 + + .. deprecated:: 2.3 + Use ``@typing.overload`` or a ``TypeVar`` with a scalar-type as upper + bound, instead. + + Examples + -------- + Below is a typical usage example: `NBitBase` is herein used for annotating + a function that takes a float and integer of arbitrary precision + as arguments and returns a new float of whichever precision is largest + (*e.g.* ``np.float16 + np.int64 -> np.float64``). + + .. code-block:: python + + >>> from typing import TypeVar, TYPE_CHECKING + >>> import numpy as np + >>> import numpy.typing as npt + + >>> S = TypeVar("S", bound=npt.NBitBase) + >>> T = TypeVar("T", bound=npt.NBitBase) + + >>> def add(a: np.floating[S], b: np.integer[T]) -> np.floating[S | T]: + ... return a + b + + >>> a = np.float16() + >>> b = np.int64() + >>> out = add(a, b) + + >>> if TYPE_CHECKING: + ... reveal_locals() + ... # note: Revealed local types are: + ... # note: a: numpy.floating[numpy.typing._16Bit*] + ... # note: b: numpy.signedinteger[numpy.typing._64Bit*] + ... # note: out: numpy.floating[numpy.typing._64Bit*] + + """ + # Deprecated in NumPy 2.3, 2025-05-01 + + def __init_subclass__(cls) -> None: + allowed_names = { + "NBitBase", "_128Bit", "_96Bit", "_64Bit", "_32Bit", "_16Bit", "_8Bit" + } + if cls.__name__ not in allowed_names: + raise TypeError('cannot inherit from final class "NBitBase"') + super().__init_subclass__() + +@final +@set_module("numpy._typing") +# Silence errors about subclassing a `@final`-decorated class +class _128Bit(NBitBase): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] + pass + +@final +@set_module("numpy._typing") +class _96Bit(_128Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] + pass + +@final +@set_module("numpy._typing") +class _64Bit(_96Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] + pass + +@final +@set_module("numpy._typing") +class _32Bit(_64Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] + pass + +@final +@set_module("numpy._typing") +class _16Bit(_32Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] + pass + +@final +@set_module("numpy._typing") +class _8Bit(_16Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] + pass diff --git a/python/numpy/_typing/_nbit_base.pyi b/python/numpy/_typing/_nbit_base.pyi new file mode 100644 index 000000000..ccf8f5cea --- /dev/null +++ b/python/numpy/_typing/_nbit_base.pyi @@ -0,0 +1,40 @@ +# pyright: reportDeprecated=false +# pyright: reportGeneralTypeIssues=false +# mypy: disable-error-code=misc + +from typing import final + +from typing_extensions import deprecated + +# Deprecated in NumPy 2.3, 2025-05-01 +@deprecated( + "`NBitBase` is deprecated and will be removed from numpy.typing in the " + "future. Use `@typing.overload` or a `TypeVar` with a scalar-type as upper " + "bound, instead. (deprecated in NumPy 2.3)", +) +@final +class NBitBase: ... + +@final +class _256Bit(NBitBase): ... + +@final +class _128Bit(_256Bit): ... + +@final +class _96Bit(_128Bit): ... + +@final +class _80Bit(_96Bit): ... + +@final +class _64Bit(_80Bit): ... + +@final +class _32Bit(_64Bit): ... + +@final +class _16Bit(_32Bit): ... + +@final +class _8Bit(_16Bit): ... diff --git a/python/numpy/_typing/_nested_sequence.py b/python/numpy/_typing/_nested_sequence.py new file mode 100644 index 000000000..e3362a9f2 --- /dev/null +++ b/python/numpy/_typing/_nested_sequence.py @@ -0,0 +1,79 @@ +"""A module containing the `_NestedSequence` protocol.""" + +from typing import TYPE_CHECKING, Any, Protocol, TypeVar, runtime_checkable + +if TYPE_CHECKING: + from collections.abc import Iterator + +__all__ = ["_NestedSequence"] + +_T_co = TypeVar("_T_co", covariant=True) + + +@runtime_checkable +class _NestedSequence(Protocol[_T_co]): + """A protocol for representing nested sequences. + + Warning + ------- + `_NestedSequence` currently does not work in combination with typevars, + *e.g.* ``def func(a: _NestedSequnce[T]) -> T: ...``. + + See Also + -------- + collections.abc.Sequence + ABCs for read-only and mutable :term:`sequences`. + + Examples + -------- + .. code-block:: python + + >>> from typing import TYPE_CHECKING + >>> import numpy as np + >>> from numpy._typing import _NestedSequence + + >>> def get_dtype(seq: _NestedSequence[float]) -> np.dtype[np.float64]: + ... return np.asarray(seq).dtype + + >>> a = get_dtype([1.0]) + >>> b = get_dtype([[1.0]]) + >>> c = get_dtype([[[1.0]]]) + >>> d = get_dtype([[[[1.0]]]]) + + >>> if TYPE_CHECKING: + ... reveal_locals() + ... # note: Revealed local types are: + ... # note: a: numpy.dtype[numpy.floating[numpy._typing._64Bit]] + ... # note: b: numpy.dtype[numpy.floating[numpy._typing._64Bit]] + ... # note: c: numpy.dtype[numpy.floating[numpy._typing._64Bit]] + ... # note: d: numpy.dtype[numpy.floating[numpy._typing._64Bit]] + + """ + + def __len__(self, /) -> int: + """Implement ``len(self)``.""" + raise NotImplementedError + + def __getitem__(self, index: int, /) -> "_T_co | _NestedSequence[_T_co]": + """Implement ``self[x]``.""" + raise NotImplementedError + + def __contains__(self, x: object, /) -> bool: + """Implement ``x in self``.""" + raise NotImplementedError + + def __iter__(self, /) -> "Iterator[_T_co | _NestedSequence[_T_co]]": + """Implement ``iter(self)``.""" + raise NotImplementedError + + def __reversed__(self, /) -> "Iterator[_T_co | _NestedSequence[_T_co]]": + """Implement ``reversed(self)``.""" + raise NotImplementedError + + def count(self, value: Any, /) -> int: + """Return the number of occurrences of `value`.""" + raise NotImplementedError + + def index(self, value: Any, /) -> int: + """Return the first index of `value`.""" + raise NotImplementedError diff --git a/python/numpy/_typing/_scalars.py b/python/numpy/_typing/_scalars.py new file mode 100644 index 000000000..b0de66d89 --- /dev/null +++ b/python/numpy/_typing/_scalars.py @@ -0,0 +1,20 @@ +from typing import Any, TypeAlias + +import numpy as np + +# NOTE: `_StrLike_co` and `_BytesLike_co` are pointless, as `np.str_` and +# `np.bytes_` are already subclasses of their builtin counterpart +_CharLike_co: TypeAlias = str | bytes + +# The `Like_co` type-aliases below represent all scalars that can be +# coerced into `` (with the casting rule `same_kind`) +_BoolLike_co: TypeAlias = bool | np.bool +_UIntLike_co: TypeAlias = bool | np.unsignedinteger | np.bool +_IntLike_co: TypeAlias = int | np.integer | np.bool +_FloatLike_co: TypeAlias = float | np.floating | np.integer | np.bool +_ComplexLike_co: TypeAlias = complex | np.number | np.bool +_NumberLike_co: TypeAlias = _ComplexLike_co +_TD64Like_co: TypeAlias = int | np.timedelta64 | np.integer | np.bool +# `_VoidLike_co` is technically not a scalar, but it's close enough +_VoidLike_co: TypeAlias = tuple[Any, ...] | np.void +_ScalarLike_co: TypeAlias = complex | str | bytes | np.generic diff --git a/python/numpy/_typing/_shape.py b/python/numpy/_typing/_shape.py new file mode 100644 index 000000000..e297aef2f --- /dev/null +++ b/python/numpy/_typing/_shape.py @@ -0,0 +1,8 @@ +from collections.abc import Sequence +from typing import Any, SupportsIndex, TypeAlias + +_Shape: TypeAlias = tuple[int, ...] +_AnyShape: TypeAlias = tuple[Any, ...] + +# Anything that can be coerced to a shape tuple +_ShapeLike: TypeAlias = SupportsIndex | Sequence[SupportsIndex] diff --git a/python/numpy/_typing/_ufunc.py b/python/numpy/_typing/_ufunc.py new file mode 100644 index 000000000..db52a1fdb --- /dev/null +++ b/python/numpy/_typing/_ufunc.py @@ -0,0 +1,7 @@ +from numpy import ufunc + +_UFunc_Nin1_Nout1 = ufunc +_UFunc_Nin2_Nout1 = ufunc +_UFunc_Nin1_Nout2 = ufunc +_UFunc_Nin2_Nout2 = ufunc +_GUFunc_Nin2_Nout1 = ufunc diff --git a/python/numpy/_typing/_ufunc.pyi b/python/numpy/_typing/_ufunc.pyi new file mode 100644 index 000000000..766cde1ad --- /dev/null +++ b/python/numpy/_typing/_ufunc.pyi @@ -0,0 +1,941 @@ +"""A module with private type-check-only `numpy.ufunc` subclasses. + +The signatures of the ufuncs are too varied to reasonably type +with a single class. So instead, `ufunc` has been expanded into +four private subclasses, one for each combination of +`~ufunc.nin` and `~ufunc.nout`. +""" + +from typing import ( + Any, + Generic, + Literal, + LiteralString, + NoReturn, + Protocol, + SupportsIndex, + TypeAlias, + TypedDict, + TypeVar, + Unpack, + overload, + type_check_only, +) + +import numpy as np +from numpy import _CastingKind, _OrderKACF, ufunc +from numpy.typing import NDArray + +from ._array_like import ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co +from ._dtype_like import DTypeLike +from ._scalars import _ScalarLike_co +from ._shape import _ShapeLike + +_T = TypeVar("_T") +_2Tuple: TypeAlias = tuple[_T, _T] +_3Tuple: TypeAlias = tuple[_T, _T, _T] +_4Tuple: TypeAlias = tuple[_T, _T, _T, _T] + +_2PTuple: TypeAlias = tuple[_T, _T, *tuple[_T, ...]] +_3PTuple: TypeAlias = tuple[_T, _T, _T, *tuple[_T, ...]] +_4PTuple: TypeAlias = tuple[_T, _T, _T, _T, *tuple[_T, ...]] + +_NTypes = TypeVar("_NTypes", bound=int, covariant=True) +_IDType = TypeVar("_IDType", covariant=True) +_NameType = TypeVar("_NameType", bound=LiteralString, covariant=True) +_Signature = TypeVar("_Signature", bound=LiteralString, covariant=True) + +_NIn = TypeVar("_NIn", bound=int, covariant=True) +_NOut = TypeVar("_NOut", bound=int, covariant=True) +_ReturnType_co = TypeVar("_ReturnType_co", covariant=True) +_ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any]) + +@type_check_only +class _SupportsArrayUFunc(Protocol): + def __array_ufunc__( + self, + ufunc: ufunc, + method: Literal["__call__", "reduce", "reduceat", "accumulate", "outer", "at"], + *inputs: Any, + **kwargs: Any, + ) -> Any: ... + +@type_check_only +class _UFunc3Kwargs(TypedDict, total=False): + where: _ArrayLikeBool_co | None + casting: _CastingKind + order: _OrderKACF + subok: bool + signature: _3Tuple[str | None] | str | None + +# NOTE: `reduce`, `accumulate`, `reduceat` and `outer` raise a ValueError for +# ufuncs that don't accept two input arguments and return one output argument. +# In such cases the respective methods return `NoReturn` + +# NOTE: Similarly, `at` won't be defined for ufuncs that return +# multiple outputs; in such cases `at` is typed to return `NoReturn` + +# NOTE: If 2 output types are returned then `out` must be a +# 2-tuple of arrays. Otherwise `None` or a plain array are also acceptable + +# pyright: reportIncompatibleMethodOverride=false + +@type_check_only +class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def __qualname__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[1]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[2]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + __x1: _ScalarLike_co, + out: None = ..., + *, + where: _ArrayLikeBool_co | None = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _2Tuple[str | None] = ..., + ) -> Any: ... + @overload + def __call__( + self, + __x1: ArrayLike, + out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + *, + where: _ArrayLikeBool_co | None = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _2Tuple[str | None] = ..., + ) -> NDArray[Any]: ... + @overload + def __call__( + self, + __x1: _SupportsArrayUFunc, + out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + *, + where: _ArrayLikeBool_co | None = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _2Tuple[str | None] = ..., + ) -> Any: ... + + def at( + self, + a: _SupportsArrayUFunc, + indices: _ArrayLikeInt_co, + /, + ) -> None: ... + + def reduce(self, *args, **kwargs) -> NoReturn: ... + def accumulate(self, *args, **kwargs) -> NoReturn: ... + def reduceat(self, *args, **kwargs) -> NoReturn: ... + def outer(self, *args, **kwargs) -> NoReturn: ... + +@type_check_only +class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def __qualname__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[3]: ... + @property + def signature(self) -> None: ... + + @overload # (scalar, scalar) -> scalar + def __call__( + self, + x1: _ScalarLike_co, + x2: _ScalarLike_co, + /, + out: None = None, + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> Any: ... + @overload # (array-like, array) -> array + def __call__( + self, + x1: ArrayLike, + x2: NDArray[np.generic], + /, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array, array-like) -> array + def __call__( + self, + x1: NDArray[np.generic], + x2: ArrayLike, + /, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array-like, array-like, out=array) -> array + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: NDArray[np.generic] | tuple[NDArray[np.generic]], + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array-like, array-like) -> array | scalar + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + *, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any] | Any: ... + + def at( + self, + a: NDArray[Any], + indices: _ArrayLikeInt_co, + b: ArrayLike, + /, + ) -> None: ... + + def reduce( + self, + array: ArrayLike, + axis: _ShapeLike | None = ..., + dtype: DTypeLike = ..., + out: NDArray[Any] | None = ..., + keepdims: bool = ..., + initial: Any = ..., + where: _ArrayLikeBool_co = ..., + ) -> Any: ... + + def accumulate( + self, + array: ArrayLike, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: NDArray[Any] | None = ..., + ) -> NDArray[Any]: ... + + def reduceat( + self, + array: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: NDArray[Any] | None = ..., + ) -> NDArray[Any]: ... + + @overload # (scalar, scalar) -> scalar + def outer( + self, + A: _ScalarLike_co, + B: _ScalarLike_co, + /, + *, + out: None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> Any: ... + @overload # (array-like, array) -> array + def outer( + self, + A: ArrayLike, + B: NDArray[np.generic], + /, + *, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array, array-like) -> array + def outer( + self, + A: NDArray[np.generic], + B: ArrayLike, + /, + *, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array-like, array-like, out=array) -> array + def outer( + self, + A: ArrayLike, + B: ArrayLike, + /, + *, + out: NDArray[np.generic] | tuple[NDArray[np.generic]], + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any]: ... + @overload # (array-like, array-like) -> array | scalar + def outer( + self, + A: ArrayLike, + B: ArrayLike, + /, + *, + out: NDArray[np.generic] | tuple[NDArray[np.generic]] | None = None, + dtype: DTypeLike | None = None, + **kwds: Unpack[_UFunc3Kwargs], + ) -> NDArray[Any] | Any: ... + +@type_check_only +class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def __qualname__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[1]: ... + @property + def nout(self) -> Literal[2]: ... + @property + def nargs(self) -> Literal[3]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + __x1: _ScalarLike_co, + __out1: None = ..., + __out2: None = ..., + *, + where: _ArrayLikeBool_co | None = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[str | None] = ..., + ) -> _2Tuple[Any]: ... + @overload + def __call__( + self, + __x1: ArrayLike, + __out1: NDArray[Any] | None = ..., + __out2: NDArray[Any] | None = ..., + *, + out: _2Tuple[NDArray[Any]] = ..., + where: _ArrayLikeBool_co | None = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[str | None] = ..., + ) -> _2Tuple[NDArray[Any]]: ... + @overload + def __call__( + self, + __x1: _SupportsArrayUFunc, + __out1: NDArray[Any] | None = ..., + __out2: NDArray[Any] | None = ..., + *, + out: _2Tuple[NDArray[Any]] = ..., + where: _ArrayLikeBool_co | None = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[str | None] = ..., + ) -> _2Tuple[Any]: ... + + def at(self, *args, **kwargs) -> NoReturn: ... + def reduce(self, *args, **kwargs) -> NoReturn: ... + def accumulate(self, *args, **kwargs) -> NoReturn: ... + def reduceat(self, *args, **kwargs) -> NoReturn: ... + def outer(self, *args, **kwargs) -> NoReturn: ... + +@type_check_only +class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def __qualname__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[2]: ... + @property + def nargs(self) -> Literal[4]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + __x1: _ScalarLike_co, + __x2: _ScalarLike_co, + __out1: None = ..., + __out2: None = ..., + *, + where: _ArrayLikeBool_co | None = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _4Tuple[str | None] = ..., + ) -> _2Tuple[Any]: ... + @overload + def __call__( + self, + __x1: ArrayLike, + __x2: ArrayLike, + __out1: NDArray[Any] | None = ..., + __out2: NDArray[Any] | None = ..., + *, + out: _2Tuple[NDArray[Any]] = ..., + where: _ArrayLikeBool_co | None = ..., + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _4Tuple[str | None] = ..., + ) -> _2Tuple[NDArray[Any]]: ... + + def at(self, *args, **kwargs) -> NoReturn: ... + def reduce(self, *args, **kwargs) -> NoReturn: ... + def accumulate(self, *args, **kwargs) -> NoReturn: ... + def reduceat(self, *args, **kwargs) -> NoReturn: ... + def outer(self, *args, **kwargs) -> NoReturn: ... + +@type_check_only +class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature]): # type: ignore[misc] + @property + def __name__(self) -> _NameType: ... + @property + def __qualname__(self) -> _NameType: ... + @property + def ntypes(self) -> _NTypes: ... + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[3]: ... + @property + def signature(self) -> _Signature: ... + + # Scalar for 1D array-likes; ndarray otherwise + @overload + def __call__( + self, + __x1: ArrayLike, + __x2: ArrayLike, + out: None = ..., + *, + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[str | None] = ..., + axes: list[_2Tuple[SupportsIndex]] = ..., + ) -> Any: ... + @overload + def __call__( + self, + __x1: ArrayLike, + __x2: ArrayLike, + out: NDArray[Any] | tuple[NDArray[Any]], + *, + casting: _CastingKind = ..., + order: _OrderKACF = ..., + dtype: DTypeLike = ..., + subok: bool = ..., + signature: str | _3Tuple[str | None] = ..., + axes: list[_2Tuple[SupportsIndex]] = ..., + ) -> NDArray[Any]: ... + + def at(self, *args, **kwargs) -> NoReturn: ... + def reduce(self, *args, **kwargs) -> NoReturn: ... + def accumulate(self, *args, **kwargs) -> NoReturn: ... + def reduceat(self, *args, **kwargs) -> NoReturn: ... + def outer(self, *args, **kwargs) -> NoReturn: ... + +@type_check_only +class _PyFunc_Kwargs_Nargs2(TypedDict, total=False): + where: _ArrayLikeBool_co | None + casting: _CastingKind + order: _OrderKACF + dtype: DTypeLike + subok: bool + signature: str | tuple[DTypeLike, DTypeLike] + +@type_check_only +class _PyFunc_Kwargs_Nargs3(TypedDict, total=False): + where: _ArrayLikeBool_co | None + casting: _CastingKind + order: _OrderKACF + dtype: DTypeLike + subok: bool + signature: str | tuple[DTypeLike, DTypeLike, DTypeLike] + +@type_check_only +class _PyFunc_Kwargs_Nargs3P(TypedDict, total=False): + where: _ArrayLikeBool_co | None + casting: _CastingKind + order: _OrderKACF + dtype: DTypeLike + subok: bool + signature: str | _3PTuple[DTypeLike] + +@type_check_only +class _PyFunc_Kwargs_Nargs4P(TypedDict, total=False): + where: _ArrayLikeBool_co | None + casting: _CastingKind + order: _OrderKACF + dtype: DTypeLike + subok: bool + signature: str | _4PTuple[DTypeLike] + +@type_check_only +class _PyFunc_Nin1_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: ignore[misc] + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[1]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[2]: ... + @property + def ntypes(self) -> Literal[1]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + x1: _ScalarLike_co, + /, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], + ) -> _ReturnType_co: ... + @overload + def __call__( + self, + x1: ArrayLike, + /, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], + ) -> _ReturnType_co | NDArray[np.object_]: ... + @overload + def __call__( + self, + x1: ArrayLike, + /, + out: _ArrayT | tuple[_ArrayT], + **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], + ) -> _ArrayT: ... + @overload + def __call__( + self, + x1: _SupportsArrayUFunc, + /, + out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs2], + ) -> Any: ... + + def at(self, a: _SupportsArrayUFunc, ixs: _ArrayLikeInt_co, /) -> None: ... + def reduce(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def accumulate(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def reduceat(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def outer(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + +@type_check_only +class _PyFunc_Nin2_Nout1(ufunc, Generic[_ReturnType_co, _IDType]): # type: ignore[misc] + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> Literal[2]: ... + @property + def nout(self) -> Literal[1]: ... + @property + def nargs(self) -> Literal[3]: ... + @property + def ntypes(self) -> Literal[1]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + x1: _ScalarLike_co, + x2: _ScalarLike_co, + /, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ReturnType_co: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ReturnType_co | NDArray[np.object_]: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + /, + out: _ArrayT | tuple[_ArrayT], + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ArrayT: ... + @overload + def __call__( + self, + x1: _SupportsArrayUFunc, + x2: _SupportsArrayUFunc | ArrayLike, + /, + out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> Any: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: _SupportsArrayUFunc, + /, + out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> Any: ... + + def at(self, a: _SupportsArrayUFunc, ixs: _ArrayLikeInt_co, b: ArrayLike, /) -> None: ... + + @overload + def reduce( + self, + array: ArrayLike, + axis: _ShapeLike | None, + dtype: DTypeLike, + out: _ArrayT, + /, + keepdims: bool = ..., + initial: _ScalarLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> _ArrayT: ... + @overload + def reduce( + self, + /, + array: ArrayLike, + axis: _ShapeLike | None = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayT | tuple[_ArrayT], + keepdims: bool = ..., + initial: _ScalarLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> _ArrayT: ... + @overload + def reduce( + self, + /, + array: ArrayLike, + axis: _ShapeLike | None = ..., + dtype: DTypeLike = ..., + out: None = ..., + *, + keepdims: Literal[True], + initial: _ScalarLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> NDArray[np.object_]: ... + @overload + def reduce( + self, + /, + array: ArrayLike, + axis: _ShapeLike | None = ..., + dtype: DTypeLike = ..., + out: None = ..., + keepdims: bool = ..., + initial: _ScalarLike_co = ..., + where: _ArrayLikeBool_co = ..., + ) -> _ReturnType_co | NDArray[np.object_]: ... + + @overload + def reduceat( + self, + array: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex, + dtype: DTypeLike, + out: _ArrayT, + /, + ) -> _ArrayT: ... + @overload + def reduceat( + self, + /, + array: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayT | tuple[_ArrayT], + ) -> _ArrayT: ... + @overload + def reduceat( + self, + /, + array: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., + ) -> NDArray[np.object_]: ... + @overload + def reduceat( + self, + /, + array: _SupportsArrayUFunc, + indices: _ArrayLikeInt_co, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + ) -> Any: ... + + @overload + def accumulate( + self, + array: ArrayLike, + axis: SupportsIndex, + dtype: DTypeLike, + out: _ArrayT, + /, + ) -> _ArrayT: ... + @overload + def accumulate( + self, + array: ArrayLike, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + *, + out: _ArrayT | tuple[_ArrayT], + ) -> _ArrayT: ... + @overload + def accumulate( + self, + /, + array: ArrayLike, + axis: SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., + ) -> NDArray[np.object_]: ... + + @overload + def outer( + self, + A: _ScalarLike_co, + B: _ScalarLike_co, + /, *, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ReturnType_co: ... + @overload + def outer( + self, + A: ArrayLike, + B: ArrayLike, + /, *, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ReturnType_co | NDArray[np.object_]: ... + @overload + def outer( + self, + A: ArrayLike, + B: ArrayLike, + /, *, + out: _ArrayT, + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> _ArrayT: ... + @overload + def outer( + self, + A: _SupportsArrayUFunc, + B: _SupportsArrayUFunc | ArrayLike, + /, *, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> Any: ... + @overload + def outer( + self, + A: _ScalarLike_co, + B: _SupportsArrayUFunc | ArrayLike, + /, *, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3], + ) -> Any: ... + +@type_check_only +class _PyFunc_Nin3P_Nout1(ufunc, Generic[_ReturnType_co, _IDType, _NIn]): # type: ignore[misc] + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> _NIn: ... + @property + def nout(self) -> Literal[1]: ... + @property + def ntypes(self) -> Literal[1]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + x1: _ScalarLike_co, + x2: _ScalarLike_co, + x3: _ScalarLike_co, + /, + *xs: _ScalarLike_co, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], + ) -> _ReturnType_co: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + x3: ArrayLike, + /, + *xs: ArrayLike, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], + ) -> _ReturnType_co | NDArray[np.object_]: ... + @overload + def __call__( + self, + x1: ArrayLike, + x2: ArrayLike, + x3: ArrayLike, + /, + *xs: ArrayLike, + out: _ArrayT | tuple[_ArrayT], + **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], + ) -> _ArrayT: ... + @overload + def __call__( + self, + x1: _SupportsArrayUFunc | ArrayLike, + x2: _SupportsArrayUFunc | ArrayLike, + x3: _SupportsArrayUFunc | ArrayLike, + /, + *xs: _SupportsArrayUFunc | ArrayLike, + out: NDArray[Any] | tuple[NDArray[Any]] | None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs4P], + ) -> Any: ... + + def at(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def reduce(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def accumulate(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def reduceat(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def outer(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + +@type_check_only +class _PyFunc_Nin1P_Nout2P(ufunc, Generic[_ReturnType_co, _IDType, _NIn, _NOut]): # type: ignore[misc] + @property + def identity(self) -> _IDType: ... + @property + def nin(self) -> _NIn: ... + @property + def nout(self) -> _NOut: ... + @property + def ntypes(self) -> Literal[1]: ... + @property + def signature(self) -> None: ... + + @overload + def __call__( + self, + x1: _ScalarLike_co, + /, + *xs: _ScalarLike_co, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], + ) -> _2PTuple[_ReturnType_co]: ... + @overload + def __call__( + self, + x1: ArrayLike, + /, + *xs: ArrayLike, + out: None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], + ) -> _2PTuple[_ReturnType_co | NDArray[np.object_]]: ... + @overload + def __call__( + self, + x1: ArrayLike, + /, + *xs: ArrayLike, + out: _2PTuple[_ArrayT], + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], + ) -> _2PTuple[_ArrayT]: ... + @overload + def __call__( + self, + x1: _SupportsArrayUFunc | ArrayLike, + /, + *xs: _SupportsArrayUFunc | ArrayLike, + out: _2PTuple[NDArray[Any]] | None = ..., + **kwargs: Unpack[_PyFunc_Kwargs_Nargs3P], + ) -> Any: ... + + def at(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def reduce(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def accumulate(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def reduceat(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... + def outer(self, /, *args: Any, **kwargs: Any) -> NoReturn: ... diff --git a/python/numpy/_utils/__init__.py b/python/numpy/_utils/__init__.py new file mode 100644 index 000000000..84ee99db1 --- /dev/null +++ b/python/numpy/_utils/__init__.py @@ -0,0 +1,95 @@ +""" +This is a module for defining private helpers which do not depend on the +rest of NumPy. + +Everything in here must be self-contained so that it can be +imported anywhere else without creating circular imports. +If a utility requires the import of NumPy, it probably belongs +in ``numpy._core``. +""" + +import functools +import warnings + +from ._convertions import asbytes, asunicode + + +def set_module(module): + """Private decorator for overriding __module__ on a function or class. + + Example usage:: + + @set_module('numpy') + def example(): + pass + + assert example.__module__ == 'numpy' + """ + def decorator(func): + if module is not None: + if isinstance(func, type): + try: + func._module_source = func.__module__ + except (AttributeError): + pass + + func.__module__ = module + return func + return decorator + + +def _rename_parameter(old_names, new_names, dep_version=None): + """ + Generate decorator for backward-compatible keyword renaming. + + Apply the decorator generated by `_rename_parameter` to functions with a + renamed parameter to maintain backward-compatibility. + + After decoration, the function behaves as follows: + If only the new parameter is passed into the function, behave as usual. + If only the old parameter is passed into the function (as a keyword), raise + a DeprecationWarning if `dep_version` is provided, and behave as usual + otherwise. + If both old and new parameters are passed into the function, raise a + DeprecationWarning if `dep_version` is provided, and raise the appropriate + TypeError (function got multiple values for argument). + + Parameters + ---------- + old_names : list of str + Old names of parameters + new_name : list of str + New names of parameters + dep_version : str, optional + Version of NumPy in which old parameter was deprecated in the format + 'X.Y.Z'. If supplied, the deprecation message will indicate that + support for the old parameter will be removed in version 'X.Y+2.Z' + + Notes + ----- + Untested with functions that accept *args. Probably won't work as written. + + """ + def decorator(fun): + @functools.wraps(fun) + def wrapper(*args, **kwargs): + __tracebackhide__ = True # Hide traceback for py.test + for old_name, new_name in zip(old_names, new_names): + if old_name in kwargs: + if dep_version: + end_version = dep_version.split('.') + end_version[1] = str(int(end_version[1]) + 2) + end_version = '.'.join(end_version) + msg = (f"Use of keyword argument `{old_name}` is " + f"deprecated and replaced by `{new_name}`. " + f"Support for `{old_name}` will be removed " + f"in NumPy {end_version}.") + warnings.warn(msg, DeprecationWarning, stacklevel=2) + if new_name in kwargs: + msg = (f"{fun.__name__}() got multiple values for " + f"argument now known as `{new_name}`") + raise TypeError(msg) + kwargs[new_name] = kwargs.pop(old_name) + return fun(*args, **kwargs) + return wrapper + return decorator diff --git a/python/numpy/_utils/__init__.pyi b/python/numpy/_utils/__init__.pyi new file mode 100644 index 000000000..f3472df9a --- /dev/null +++ b/python/numpy/_utils/__init__.pyi @@ -0,0 +1,30 @@ +from collections.abc import Callable, Iterable +from typing import Protocol, TypeVar, overload, type_check_only + +from _typeshed import IdentityFunction + +from ._convertions import asbytes as asbytes +from ._convertions import asunicode as asunicode + +### + +_T = TypeVar("_T") +_HasModuleT = TypeVar("_HasModuleT", bound=_HasModule) + +@type_check_only +class _HasModule(Protocol): + __module__: str + +### + +@overload +def set_module(module: None) -> IdentityFunction: ... +@overload +def set_module(module: str) -> Callable[[_HasModuleT], _HasModuleT]: ... + +# +def _rename_parameter( + old_names: Iterable[str], + new_names: Iterable[str], + dep_version: str | None = None, +) -> Callable[[Callable[..., _T]], Callable[..., _T]]: ... diff --git a/python/numpy/_utils/__pycache__/__init__.cpython-312.pyc b/python/numpy/_utils/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..68a0a049c Binary files /dev/null and b/python/numpy/_utils/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/_utils/__pycache__/_convertions.cpython-312.pyc b/python/numpy/_utils/__pycache__/_convertions.cpython-312.pyc new file mode 100644 index 000000000..0d3e40adf Binary files /dev/null and b/python/numpy/_utils/__pycache__/_convertions.cpython-312.pyc differ diff --git a/python/numpy/_utils/__pycache__/_inspect.cpython-312.pyc b/python/numpy/_utils/__pycache__/_inspect.cpython-312.pyc new file mode 100644 index 000000000..6d6dc3efb Binary files /dev/null and b/python/numpy/_utils/__pycache__/_inspect.cpython-312.pyc differ diff --git a/python/numpy/_utils/__pycache__/_pep440.cpython-312.pyc b/python/numpy/_utils/__pycache__/_pep440.cpython-312.pyc new file mode 100644 index 000000000..2677a585c Binary files /dev/null and b/python/numpy/_utils/__pycache__/_pep440.cpython-312.pyc differ diff --git a/python/numpy/_utils/_convertions.py b/python/numpy/_utils/_convertions.py new file mode 100644 index 000000000..ab15a8ba0 --- /dev/null +++ b/python/numpy/_utils/_convertions.py @@ -0,0 +1,18 @@ +""" +A set of methods retained from np.compat module that +are still used across codebase. +""" + +__all__ = ["asunicode", "asbytes"] + + +def asunicode(s): + if isinstance(s, bytes): + return s.decode('latin1') + return str(s) + + +def asbytes(s): + if isinstance(s, bytes): + return s + return str(s).encode('latin1') diff --git a/python/numpy/_utils/_convertions.pyi b/python/numpy/_utils/_convertions.pyi new file mode 100644 index 000000000..6cc599acc --- /dev/null +++ b/python/numpy/_utils/_convertions.pyi @@ -0,0 +1,4 @@ +__all__ = ["asbytes", "asunicode"] + +def asunicode(s: bytes | str) -> str: ... +def asbytes(s: bytes | str) -> str: ... diff --git a/python/numpy/_utils/_inspect.py b/python/numpy/_utils/_inspect.py new file mode 100644 index 000000000..b499f5837 --- /dev/null +++ b/python/numpy/_utils/_inspect.py @@ -0,0 +1,192 @@ +"""Subset of inspect module from upstream python + +We use this instead of upstream because upstream inspect is slow to import, and +significantly contributes to numpy import times. Importing this copy has almost +no overhead. + +""" +import types + +__all__ = ['getargspec', 'formatargspec'] + +# ----------------------------------------------------------- type-checking +def ismethod(object): + """Return true if the object is an instance method. + + Instance method objects provide these attributes: + __doc__ documentation string + __name__ name with which this method was defined + im_class class object in which this method belongs + im_func function object containing implementation of method + im_self instance to which this method is bound, or None + + """ + return isinstance(object, types.MethodType) + +def isfunction(object): + """Return true if the object is a user-defined function. + + Function objects provide these attributes: + __doc__ documentation string + __name__ name with which this function was defined + func_code code object containing compiled function bytecode + func_defaults tuple of any default values for arguments + func_doc (same as __doc__) + func_globals global namespace in which this function was defined + func_name (same as __name__) + + """ + return isinstance(object, types.FunctionType) + +def iscode(object): + """Return true if the object is a code object. + + Code objects provide these attributes: + co_argcount number of arguments (not including * or ** args) + co_code string of raw compiled bytecode + co_consts tuple of constants used in the bytecode + co_filename name of file in which this code object was created + co_firstlineno number of first line in Python source code + co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg + co_lnotab encoded mapping of line numbers to bytecode indices + co_name name with which this code object was defined + co_names tuple of names of local variables + co_nlocals number of local variables + co_stacksize virtual machine stack space required + co_varnames tuple of names of arguments and local variables + + """ + return isinstance(object, types.CodeType) + + +# ------------------------------------------------ argument list extraction +# These constants are from Python's compile.h. +CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8 + +def getargs(co): + """Get information about the arguments accepted by a code object. + + Three things are returned: (args, varargs, varkw), where 'args' is + a list of argument names (possibly containing nested lists), and + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + + """ + + if not iscode(co): + raise TypeError('arg is not a code object') + + nargs = co.co_argcount + names = co.co_varnames + args = list(names[:nargs]) + + # The following acrobatics are for anonymous (tuple) arguments. + # Which we do not need to support, so remove to avoid importing + # the dis module. + for i in range(nargs): + if args[i][:1] in ['', '.']: + raise TypeError("tuple function arguments are not supported") + varargs = None + if co.co_flags & CO_VARARGS: + varargs = co.co_varnames[nargs] + nargs = nargs + 1 + varkw = None + if co.co_flags & CO_VARKEYWORDS: + varkw = co.co_varnames[nargs] + return args, varargs, varkw + +def getargspec(func): + """Get the names and default values of a function's arguments. + + A tuple of four things is returned: (args, varargs, varkw, defaults). + 'args' is a list of the argument names (it may contain nested lists). + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + 'defaults' is an n-tuple of the default values of the last n arguments. + + """ + + if ismethod(func): + func = func.__func__ + if not isfunction(func): + raise TypeError('arg is not a Python function') + args, varargs, varkw = getargs(func.__code__) + return args, varargs, varkw, func.__defaults__ + +def getargvalues(frame): + """Get information about arguments passed into a particular frame. + + A tuple of four things is returned: (args, varargs, varkw, locals). + 'args' is a list of the argument names (it may contain nested lists). + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + 'locals' is the locals dictionary of the given frame. + + """ + args, varargs, varkw = getargs(frame.f_code) + return args, varargs, varkw, frame.f_locals + +def joinseq(seq): + if len(seq) == 1: + return '(' + seq[0] + ',)' + else: + return '(' + ', '.join(seq) + ')' + +def strseq(object, convert, join=joinseq): + """Recursively walk a sequence, stringifying each element. + + """ + if type(object) in [list, tuple]: + return join([strseq(_o, convert, join) for _o in object]) + else: + return convert(object) + +def formatargspec(args, varargs=None, varkw=None, defaults=None, + formatarg=str, + formatvarargs=lambda name: '*' + name, + formatvarkw=lambda name: '**' + name, + formatvalue=lambda value: '=' + repr(value), + join=joinseq): + """Format an argument spec from the 4 values returned by getargspec. + + The first four arguments are (args, varargs, varkw, defaults). The + other four arguments are the corresponding optional formatting functions + that are called to turn names and values into strings. The ninth + argument is an optional function to format the sequence of arguments. + + """ + specs = [] + if defaults: + firstdefault = len(args) - len(defaults) + for i in range(len(args)): + spec = strseq(args[i], formatarg, join) + if defaults and i >= firstdefault: + spec = spec + formatvalue(defaults[i - firstdefault]) + specs.append(spec) + if varargs is not None: + specs.append(formatvarargs(varargs)) + if varkw is not None: + specs.append(formatvarkw(varkw)) + return '(' + ', '.join(specs) + ')' + +def formatargvalues(args, varargs, varkw, locals, + formatarg=str, + formatvarargs=lambda name: '*' + name, + formatvarkw=lambda name: '**' + name, + formatvalue=lambda value: '=' + repr(value), + join=joinseq): + """Format an argument spec from the 4 values returned by getargvalues. + + The first four arguments are (args, varargs, varkw, locals). The + next four arguments are the corresponding optional formatting functions + that are called to turn names and values into strings. The ninth + argument is an optional function to format the sequence of arguments. + + """ + def convert(name, locals=locals, + formatarg=formatarg, formatvalue=formatvalue): + return formatarg(name) + formatvalue(locals[name]) + specs = [strseq(arg, convert, join) for arg in args] + + if varargs: + specs.append(formatvarargs(varargs) + formatvalue(locals[varargs])) + if varkw: + specs.append(formatvarkw(varkw) + formatvalue(locals[varkw])) + return '(' + ', '.join(specs) + ')' diff --git a/python/numpy/_utils/_inspect.pyi b/python/numpy/_utils/_inspect.pyi new file mode 100644 index 000000000..d53c3c40f --- /dev/null +++ b/python/numpy/_utils/_inspect.pyi @@ -0,0 +1,71 @@ +import types +from collections.abc import Callable, Mapping +from typing import Any, Final, TypeAlias, TypeVar, overload + +from _typeshed import SupportsLenAndGetItem +from typing_extensions import TypeIs + +__all__ = ["formatargspec", "getargspec"] + +### + +_T = TypeVar("_T") +_RT = TypeVar("_RT") + +_StrSeq: TypeAlias = SupportsLenAndGetItem[str] +_NestedSeq: TypeAlias = list[_T | _NestedSeq[_T]] | tuple[_T | _NestedSeq[_T], ...] + +_JoinFunc: TypeAlias = Callable[[list[_T]], _T] +_FormatFunc: TypeAlias = Callable[[_T], str] + +### + +CO_OPTIMIZED: Final = 1 +CO_NEWLOCALS: Final = 2 +CO_VARARGS: Final = 4 +CO_VARKEYWORDS: Final = 8 + +### + +def ismethod(object: object) -> TypeIs[types.MethodType]: ... +def isfunction(object: object) -> TypeIs[types.FunctionType]: ... +def iscode(object: object) -> TypeIs[types.CodeType]: ... + +### + +def getargs(co: types.CodeType) -> tuple[list[str], str | None, str | None]: ... +def getargspec(func: types.MethodType | types.FunctionType) -> tuple[list[str], str | None, str | None, tuple[Any, ...]]: ... +def getargvalues(frame: types.FrameType) -> tuple[list[str], str | None, str | None, dict[str, Any]]: ... + +# +def joinseq(seq: _StrSeq) -> str: ... + +# +@overload +def strseq(object: _NestedSeq[str], convert: Callable[[Any], Any], join: _JoinFunc[str] = ...) -> str: ... +@overload +def strseq(object: _NestedSeq[_T], convert: Callable[[_T], _RT], join: _JoinFunc[_RT]) -> _RT: ... + +# +def formatargspec( + args: _StrSeq, + varargs: str | None = None, + varkw: str | None = None, + defaults: SupportsLenAndGetItem[object] | None = None, + formatarg: _FormatFunc[str] = ..., # str + formatvarargs: _FormatFunc[str] = ..., # "*{}".format + formatvarkw: _FormatFunc[str] = ..., # "**{}".format + formatvalue: _FormatFunc[object] = ..., # "={!r}".format + join: _JoinFunc[str] = ..., # joinseq +) -> str: ... +def formatargvalues( + args: _StrSeq, + varargs: str | None, + varkw: str | None, + locals: Mapping[str, object] | None, + formatarg: _FormatFunc[str] = ..., # str + formatvarargs: _FormatFunc[str] = ..., # "*{}".format + formatvarkw: _FormatFunc[str] = ..., # "**{}".format + formatvalue: _FormatFunc[object] = ..., # "={!r}".format + join: _JoinFunc[str] = ..., # joinseq +) -> str: ... diff --git a/python/numpy/_utils/_pep440.py b/python/numpy/_utils/_pep440.py new file mode 100644 index 000000000..035a0695e --- /dev/null +++ b/python/numpy/_utils/_pep440.py @@ -0,0 +1,486 @@ +"""Utility to compare pep440 compatible version strings. + +The LooseVersion and StrictVersion classes that distutils provides don't +work; they don't recognize anything like alpha/beta/rc/dev versions. +""" + +# Copyright (c) Donald Stufft and individual contributors. +# All rights reserved. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: + +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. + +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +import collections +import itertools +import re + +__all__ = [ + "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN", +] + + +# BEGIN packaging/_structures.py + + +class Infinity: + def __repr__(self): + return "Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return False + + def __le__(self, other): + return False + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return True + + def __ge__(self, other): + return True + + def __neg__(self): + return NegativeInfinity + + +Infinity = Infinity() + + +class NegativeInfinity: + def __repr__(self): + return "-Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return True + + def __le__(self, other): + return True + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return False + + def __ge__(self, other): + return False + + def __neg__(self): + return Infinity + + +# BEGIN packaging/version.py + + +NegativeInfinity = NegativeInfinity() + +_Version = collections.namedtuple( + "_Version", + ["epoch", "release", "dev", "pre", "post", "local"], +) + + +def parse(version): + """ + Parse the given version string and return either a :class:`Version` object + or a :class:`LegacyVersion` object depending on if the given version is + a valid PEP 440 version or a legacy version. + """ + try: + return Version(version) + except InvalidVersion: + return LegacyVersion(version) + + +class InvalidVersion(ValueError): + """ + An invalid version was found, users should refer to PEP 440. + """ + + +class _BaseVersion: + + def __hash__(self): + return hash(self._key) + + def __lt__(self, other): + return self._compare(other, lambda s, o: s < o) + + def __le__(self, other): + return self._compare(other, lambda s, o: s <= o) + + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) + + def __ge__(self, other): + return self._compare(other, lambda s, o: s >= o) + + def __gt__(self, other): + return self._compare(other, lambda s, o: s > o) + + def __ne__(self, other): + return self._compare(other, lambda s, o: s != o) + + def _compare(self, other, method): + if not isinstance(other, _BaseVersion): + return NotImplemented + + return method(self._key, other._key) + + +class LegacyVersion(_BaseVersion): + + def __init__(self, version): + self._version = str(version) + self._key = _legacy_cmpkey(self._version) + + def __str__(self): + return self._version + + def __repr__(self): + return f"" + + @property + def public(self): + return self._version + + @property + def base_version(self): + return self._version + + @property + def local(self): + return None + + @property + def is_prerelease(self): + return False + + @property + def is_postrelease(self): + return False + + +_legacy_version_component_re = re.compile( + r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE, +) + +_legacy_version_replacement_map = { + "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@", +} + + +def _parse_version_parts(s): + for part in _legacy_version_component_re.split(s): + part = _legacy_version_replacement_map.get(part, part) + + if not part or part == ".": + continue + + if part[:1] in "0123456789": + # pad for numeric comparison + yield part.zfill(8) + else: + yield "*" + part + + # ensure that alpha/beta/candidate are before final + yield "*final" + + +def _legacy_cmpkey(version): + # We hardcode an epoch of -1 here. A PEP 440 version can only have an epoch + # greater than or equal to 0. This will effectively put the LegacyVersion, + # which uses the defacto standard originally implemented by setuptools, + # as before all PEP 440 versions. + epoch = -1 + + # This scheme is taken from pkg_resources.parse_version setuptools prior to + # its adoption of the packaging library. + parts = [] + for part in _parse_version_parts(version.lower()): + if part.startswith("*"): + # remove "-" before a prerelease tag + if part < "*final": + while parts and parts[-1] == "*final-": + parts.pop() + + # remove trailing zeros from each series of numeric parts + while parts and parts[-1] == "00000000": + parts.pop() + + parts.append(part) + parts = tuple(parts) + + return epoch, parts + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?P(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+
+class Version(_BaseVersion):
+
+    _regex = re.compile(
+        r"^\s*" + VERSION_PATTERN + r"\s*$",
+        re.VERBOSE | re.IGNORECASE,
+    )
+
+    def __init__(self, version):
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion(f"Invalid version: '{version}'")
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(
+                match.group("pre_l"),
+                match.group("pre_n"),
+            ),
+            post=_parse_letter_version(
+                match.group("post_l"),
+                match.group("post_n1") or match.group("post_n2"),
+            ),
+            dev=_parse_letter_version(
+                match.group("dev_l"),
+                match.group("dev_n"),
+            ),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self):
+        return f""
+
+    def __str__(self):
+        parts = []
+
+        # Epoch
+        if self._version.epoch != 0:
+            parts.append(f"{self._version.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self._version.release))
+
+        # Pre-release
+        if self._version.pre is not None:
+            parts.append("".join(str(x) for x in self._version.pre))
+
+        # Post-release
+        if self._version.post is not None:
+            parts.append(f".post{self._version.post[1]}")
+
+        # Development release
+        if self._version.dev is not None:
+            parts.append(f".dev{self._version.dev[1]}")
+
+        # Local version segment
+        if self._version.local is not None:
+            parts.append(
+                f"+{'.'.join(str(x) for x in self._version.local)}"
+            )
+
+        return "".join(parts)
+
+    @property
+    def public(self):
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self):
+        parts = []
+
+        # Epoch
+        if self._version.epoch != 0:
+            parts.append(f"{self._version.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self._version.release))
+
+        return "".join(parts)
+
+    @property
+    def local(self):
+        version_string = str(self)
+        if "+" in version_string:
+            return version_string.split("+", 1)[1]
+
+    @property
+    def is_prerelease(self):
+        return bool(self._version.dev or self._version.pre)
+
+    @property
+    def is_postrelease(self):
+        return bool(self._version.post)
+
+
+def _parse_letter_version(letter, number):
+    if letter:
+        # We assume there is an implicit 0 in a pre-release if there is
+        # no numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower-case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume that if we are given a number but not given a letter,
+        # then this is using the implicit post release syntax (e.g., 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+
+_local_version_seperators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local):
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_seperators.split(local)
+        )
+
+
+def _cmpkey(epoch, release, pre, post, dev, local):
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non-zero, then take the rest,
+    # re-reverse it back into the correct order, and make it a tuple and use
+    # that for our sorting key.
+    release = tuple(
+        reversed(list(
+            itertools.dropwhile(
+                lambda x: x == 0,
+                reversed(release),
+            )
+        ))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre-segment, but we _only_ want to do this
+    # if there is no pre- or a post-segment. If we have one of those, then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        pre = -Infinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        pre = Infinity
+
+    # Versions without a post-segment should sort before those with one.
+    if post is None:
+        post = -Infinity
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        dev = Infinity
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        local = -Infinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alphanumeric segments sort before numeric segments
+        # - Alphanumeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        local = tuple(
+            (i, "") if isinstance(i, int) else (-Infinity, i)
+            for i in local
+        )
+
+    return epoch, release, pre, post, dev, local
diff --git a/python/numpy/_utils/_pep440.pyi b/python/numpy/_utils/_pep440.pyi
new file mode 100644
index 000000000..29dd4c912
--- /dev/null
+++ b/python/numpy/_utils/_pep440.pyi
@@ -0,0 +1,121 @@
+import re
+from collections.abc import Callable
+from typing import (
+    Any,
+    ClassVar,
+    Final,
+    Generic,
+    NamedTuple,
+    TypeVar,
+    final,
+    type_check_only,
+)
+from typing import (
+    Literal as L,
+)
+
+from typing_extensions import TypeIs
+
+__all__ = ["VERSION_PATTERN", "InvalidVersion", "LegacyVersion", "Version", "parse"]
+
+###
+
+_CmpKeyT = TypeVar("_CmpKeyT", bound=tuple[object, ...])
+_CmpKeyT_co = TypeVar("_CmpKeyT_co", bound=tuple[object, ...], default=tuple[Any, ...], covariant=True)
+
+###
+
+VERSION_PATTERN: Final[str] = ...
+
+class InvalidVersion(ValueError): ...
+
+@type_check_only
+@final
+class _InfinityType:
+    def __hash__(self) -> int: ...
+    def __eq__(self, other: object, /) -> TypeIs[_InfinityType]: ...
+    def __ne__(self, other: object, /) -> bool: ...
+    def __lt__(self, other: object, /) -> L[False]: ...
+    def __le__(self, other: object, /) -> L[False]: ...
+    def __gt__(self, other: object, /) -> L[True]: ...
+    def __ge__(self, other: object, /) -> L[True]: ...
+    def __neg__(self) -> _NegativeInfinityType: ...
+
+Infinity: Final[_InfinityType] = ...
+
+@type_check_only
+@final
+class _NegativeInfinityType:
+    def __hash__(self) -> int: ...
+    def __eq__(self, other: object, /) -> TypeIs[_NegativeInfinityType]: ...
+    def __ne__(self, other: object, /) -> bool: ...
+    def __lt__(self, other: object, /) -> L[True]: ...
+    def __le__(self, other: object, /) -> L[True]: ...
+    def __gt__(self, other: object, /) -> L[False]: ...
+    def __ge__(self, other: object, /) -> L[False]: ...
+    def __neg__(self) -> _InfinityType: ...
+
+NegativeInfinity: Final[_NegativeInfinityType] = ...
+
+class _Version(NamedTuple):
+    epoch: int
+    release: tuple[int, ...]
+    dev: tuple[str, int] | None
+    pre: tuple[str, int] | None
+    post: tuple[str, int] | None
+    local: tuple[str | int, ...] | None
+
+class _BaseVersion(Generic[_CmpKeyT_co]):
+    _key: _CmpKeyT_co
+    def __hash__(self) -> int: ...
+    def __eq__(self, other: _BaseVersion, /) -> bool: ...  # type: ignore[override]  # pyright: ignore[reportIncompatibleMethodOverride]
+    def __ne__(self, other: _BaseVersion, /) -> bool: ...  # type: ignore[override]  # pyright: ignore[reportIncompatibleMethodOverride]
+    def __lt__(self, other: _BaseVersion, /) -> bool: ...
+    def __le__(self, other: _BaseVersion, /) -> bool: ...
+    def __ge__(self, other: _BaseVersion, /) -> bool: ...
+    def __gt__(self, other: _BaseVersion, /) -> bool: ...
+    def _compare(self, /, other: _BaseVersion[_CmpKeyT], method: Callable[[_CmpKeyT_co, _CmpKeyT], bool]) -> bool: ...
+
+class LegacyVersion(_BaseVersion[tuple[L[-1], tuple[str, ...]]]):
+    _version: Final[str]
+    def __init__(self, /, version: str) -> None: ...
+    @property
+    def public(self) -> str: ...
+    @property
+    def base_version(self) -> str: ...
+    @property
+    def local(self) -> None: ...
+    @property
+    def is_prerelease(self) -> L[False]: ...
+    @property
+    def is_postrelease(self) -> L[False]: ...
+
+class Version(
+    _BaseVersion[
+        tuple[
+            int,  # epoch
+            tuple[int, ...],  # release
+            tuple[str, int] | _InfinityType | _NegativeInfinityType,  # pre
+            tuple[str, int] | _NegativeInfinityType,  # post
+            tuple[str, int] | _InfinityType,  # dev
+            tuple[tuple[int, L[""]] | tuple[_NegativeInfinityType, str], ...] | _NegativeInfinityType,  # local
+        ],
+    ],
+):
+    _regex: ClassVar[re.Pattern[str]] = ...
+    _version: Final[str]
+
+    def __init__(self, /, version: str) -> None: ...
+    @property
+    def public(self) -> str: ...
+    @property
+    def base_version(self) -> str: ...
+    @property
+    def local(self) -> str | None: ...
+    @property
+    def is_prerelease(self) -> bool: ...
+    @property
+    def is_postrelease(self) -> bool: ...
+
+#
+def parse(version: str) -> Version | LegacyVersion: ...
diff --git a/python/numpy/char/__init__.py b/python/numpy/char/__init__.py
new file mode 100644
index 000000000..d98d38c1d
--- /dev/null
+++ b/python/numpy/char/__init__.py
@@ -0,0 +1,2 @@
+from numpy._core.defchararray import *
+from numpy._core.defchararray import __all__, __doc__
diff --git a/python/numpy/char/__init__.pyi b/python/numpy/char/__init__.pyi
new file mode 100644
index 000000000..e151f20e5
--- /dev/null
+++ b/python/numpy/char/__init__.pyi
@@ -0,0 +1,111 @@
+from numpy._core.defchararray import (
+    add,
+    array,
+    asarray,
+    capitalize,
+    center,
+    chararray,
+    compare_chararrays,
+    count,
+    decode,
+    encode,
+    endswith,
+    equal,
+    expandtabs,
+    find,
+    greater,
+    greater_equal,
+    index,
+    isalnum,
+    isalpha,
+    isdecimal,
+    isdigit,
+    islower,
+    isnumeric,
+    isspace,
+    istitle,
+    isupper,
+    join,
+    less,
+    less_equal,
+    ljust,
+    lower,
+    lstrip,
+    mod,
+    multiply,
+    not_equal,
+    partition,
+    replace,
+    rfind,
+    rindex,
+    rjust,
+    rpartition,
+    rsplit,
+    rstrip,
+    split,
+    splitlines,
+    startswith,
+    str_len,
+    strip,
+    swapcase,
+    title,
+    translate,
+    upper,
+    zfill,
+)
+
+__all__ = [
+    "equal",
+    "not_equal",
+    "greater_equal",
+    "less_equal",
+    "greater",
+    "less",
+    "str_len",
+    "add",
+    "multiply",
+    "mod",
+    "capitalize",
+    "center",
+    "count",
+    "decode",
+    "encode",
+    "endswith",
+    "expandtabs",
+    "find",
+    "index",
+    "isalnum",
+    "isalpha",
+    "isdigit",
+    "islower",
+    "isspace",
+    "istitle",
+    "isupper",
+    "join",
+    "ljust",
+    "lower",
+    "lstrip",
+    "partition",
+    "replace",
+    "rfind",
+    "rindex",
+    "rjust",
+    "rpartition",
+    "rsplit",
+    "rstrip",
+    "split",
+    "splitlines",
+    "startswith",
+    "strip",
+    "swapcase",
+    "title",
+    "translate",
+    "upper",
+    "zfill",
+    "isnumeric",
+    "isdecimal",
+    "array",
+    "asarray",
+    "compare_chararrays",
+    "chararray",
+]
diff --git a/python/numpy/char/__pycache__/__init__.cpython-312.pyc b/python/numpy/char/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 000000000..01a6e54d1
Binary files /dev/null and b/python/numpy/char/__pycache__/__init__.cpython-312.pyc differ
diff --git a/python/numpy/conftest.py b/python/numpy/conftest.py
new file mode 100644
index 000000000..fde4defc9
--- /dev/null
+++ b/python/numpy/conftest.py
@@ -0,0 +1,258 @@
+"""
+Pytest configuration and fixtures for the Numpy test suite.
+"""
+import os
+import string
+import sys
+import tempfile
+import warnings
+from contextlib import contextmanager
+
+import hypothesis
+import pytest
+
+import numpy
+import numpy as np
+from numpy._core._multiarray_tests import get_fpu_mode
+from numpy._core.tests._natype import get_stringdtype_dtype, pd_NA
+from numpy.testing._private.utils import NOGIL_BUILD
+
+try:
+    from scipy_doctest.conftest import dt_config
+    HAVE_SCPDT = True
+except ModuleNotFoundError:
+    HAVE_SCPDT = False
+
+
+_old_fpu_mode = None
+_collect_results = {}
+
+# Use a known and persistent tmpdir for hypothesis' caches, which
+# can be automatically cleared by the OS or user.
+hypothesis.configuration.set_hypothesis_home_dir(
+    os.path.join(tempfile.gettempdir(), ".hypothesis")
+)
+
+# We register two custom profiles for Numpy - for details see
+# https://hypothesis.readthedocs.io/en/latest/settings.html
+# The first is designed for our own CI runs; the latter also
+# forces determinism and is designed for use via np.test()
+hypothesis.settings.register_profile(
+    name="numpy-profile", deadline=None, print_blob=True,
+)
+hypothesis.settings.register_profile(
+    name="np.test() profile",
+    deadline=None, print_blob=True, database=None, derandomize=True,
+    suppress_health_check=list(hypothesis.HealthCheck),
+)
+# Note that the default profile is chosen based on the presence
+# of pytest.ini, but can be overridden by passing the
+# --hypothesis-profile=NAME argument to pytest.
+_pytest_ini = os.path.join(os.path.dirname(__file__), "..", "pytest.ini")
+hypothesis.settings.load_profile(
+    "numpy-profile" if os.path.isfile(_pytest_ini) else "np.test() profile"
+)
+
+# The experimentalAPI is used in _umath_tests
+os.environ["NUMPY_EXPERIMENTAL_DTYPE_API"] = "1"
+
+def pytest_configure(config):
+    config.addinivalue_line("markers",
+        "valgrind_error: Tests that are known to error under valgrind.")
+    config.addinivalue_line("markers",
+        "leaks_references: Tests that are known to leak references.")
+    config.addinivalue_line("markers",
+        "slow: Tests that are very slow.")
+    config.addinivalue_line("markers",
+        "slow_pypy: Tests that are very slow on pypy.")
+
+
+def pytest_addoption(parser):
+    parser.addoption("--available-memory", action="store", default=None,
+                     help=("Set amount of memory available for running the "
+                           "test suite. This can result to tests requiring "
+                           "especially large amounts of memory to be skipped. "
+                           "Equivalent to setting environment variable "
+                           "NPY_AVAILABLE_MEM. Default: determined"
+                           "automatically."))
+
+
+gil_enabled_at_start = True
+if NOGIL_BUILD:
+    gil_enabled_at_start = sys._is_gil_enabled()
+
+
+def pytest_sessionstart(session):
+    available_mem = session.config.getoption('available_memory')
+    if available_mem is not None:
+        os.environ['NPY_AVAILABLE_MEM'] = available_mem
+
+
+def pytest_terminal_summary(terminalreporter, exitstatus, config):
+    if NOGIL_BUILD and not gil_enabled_at_start and sys._is_gil_enabled():
+        tr = terminalreporter
+        tr.ensure_newline()
+        tr.section("GIL re-enabled", sep="=", red=True, bold=True)
+        tr.line("The GIL was re-enabled at runtime during the tests.")
+        tr.line("This can happen with no test failures if the RuntimeWarning")
+        tr.line("raised by Python when this happens is filtered by a test.")
+        tr.line("")
+        tr.line("Please ensure all new C modules declare support for running")
+        tr.line("without the GIL. Any new tests that intentionally imports ")
+        tr.line("code that re-enables the GIL should do so in a subprocess.")
+        pytest.exit("GIL re-enabled during tests", returncode=1)
+
+# FIXME when yield tests are gone.
+@pytest.hookimpl()
+def pytest_itemcollected(item):
+    """
+    Check FPU precision mode was not changed during test collection.
+
+    The clumsy way we do it here is mainly necessary because numpy
+    still uses yield tests, which can execute code at test collection
+    time.
+    """
+    global _old_fpu_mode
+
+    mode = get_fpu_mode()
+
+    if _old_fpu_mode is None:
+        _old_fpu_mode = mode
+    elif mode != _old_fpu_mode:
+        _collect_results[item] = (_old_fpu_mode, mode)
+        _old_fpu_mode = mode
+
+
+@pytest.fixture(scope="function", autouse=True)
+def check_fpu_mode(request):
+    """
+    Check FPU precision mode was not changed during the test.
+    """
+    old_mode = get_fpu_mode()
+    yield
+    new_mode = get_fpu_mode()
+
+    if old_mode != new_mode:
+        raise AssertionError(f"FPU precision mode changed from {old_mode:#x} to "
+                             f"{new_mode:#x} during the test")
+
+    collect_result = _collect_results.get(request.node)
+    if collect_result is not None:
+        old_mode, new_mode = collect_result
+        raise AssertionError(f"FPU precision mode changed from {old_mode:#x} to "
+                             f"{new_mode:#x} when collecting the test")
+
+
+@pytest.fixture(autouse=True)
+def add_np(doctest_namespace):
+    doctest_namespace['np'] = numpy
+
+@pytest.fixture(autouse=True)
+def env_setup(monkeypatch):
+    monkeypatch.setenv('PYTHONHASHSEED', '0')
+
+
+if HAVE_SCPDT:
+
+    @contextmanager
+    def warnings_errors_and_rng(test=None):
+        """Filter out the wall of DeprecationWarnings.
+        """
+        msgs = ["The numpy.linalg.linalg",
+                "The numpy.fft.helper",
+                "dep_util",
+                "pkg_resources",
+                "numpy.core.umath",
+                "msvccompiler",
+                "Deprecated call",
+                "numpy.core",
+                "Importing from numpy.matlib",
+                "This function is deprecated.",    # random_integers
+                "Data type alias 'a'",     # numpy.rec.fromfile
+                "Arrays of 2-dimensional vectors",   # matlib.cross
+                "`in1d` is deprecated", ]
+        msg = "|".join(msgs)
+
+        msgs_r = [
+            "invalid value encountered",
+            "divide by zero encountered"
+        ]
+        msg_r = "|".join(msgs_r)
+
+        with warnings.catch_warnings():
+            warnings.filterwarnings(
+                'ignore', category=DeprecationWarning, message=msg
+            )
+            warnings.filterwarnings(
+                'ignore', category=RuntimeWarning, message=msg_r
+            )
+            yield
+
+    # find and check doctests under this context manager
+    dt_config.user_context_mgr = warnings_errors_and_rng
+
+    # numpy specific tweaks from refguide-check
+    dt_config.rndm_markers.add('#uninitialized')
+    dt_config.rndm_markers.add('# uninitialized')
+
+    # make the checker pick on mismatched dtypes
+    dt_config.strict_check = True
+
+    import doctest
+    dt_config.optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
+
+    # recognize the StringDType repr
+    dt_config.check_namespace['StringDType'] = numpy.dtypes.StringDType
+
+    # temporary skips
+    dt_config.skiplist = {
+        'numpy.savez',    # unclosed file
+        'numpy.matlib.savez',
+        'numpy.__array_namespace_info__',
+        'numpy.matlib.__array_namespace_info__',
+    }
+
+    # xfail problematic tutorials
+    dt_config.pytest_extra_xfail = {
+        'how-to-verify-bug.rst': '',
+        'c-info.ufunc-tutorial.rst': '',
+        'basics.interoperability.rst': 'needs pandas',
+        'basics.dispatch.rst': 'errors out in /testing/overrides.py',
+        'basics.subclassing.rst': '.. testcode:: admonitions not understood',
+        'misc.rst': 'manipulates warnings',
+    }
+
+    # ignores are for things fail doctest collection (optionals etc)
+    dt_config.pytest_extra_ignore = [
+        'numpy/distutils',
+        'numpy/_core/cversions.py',
+        'numpy/_pyinstaller',
+        'numpy/random/_examples',
+        'numpy/f2py/_backends/_distutils.py',
+    ]
+
+
+@pytest.fixture
+def random_string_list():
+    chars = list(string.ascii_letters + string.digits)
+    chars = np.array(chars, dtype="U1")
+    ret = np.random.choice(chars, size=100 * 10, replace=True)
+    return ret.view("U100")
+
+
+@pytest.fixture(params=[True, False])
+def coerce(request):
+    return request.param
+
+
+@pytest.fixture(
+    params=["unset", None, pd_NA, np.nan, float("nan"), "__nan__"],
+    ids=["unset", "None", "pandas.NA", "np.nan", "float('nan')", "string nan"],
+)
+def na_object(request):
+    return request.param
+
+
+@pytest.fixture()
+def dtype(na_object, coerce):
+    return get_stringdtype_dtype(na_object, coerce)
diff --git a/python/numpy/core/__init__.py b/python/numpy/core/__init__.py
new file mode 100644
index 000000000..cfd96ede6
--- /dev/null
+++ b/python/numpy/core/__init__.py
@@ -0,0 +1,33 @@
+"""
+The `numpy.core` submodule exists solely for backward compatibility
+purposes. The original `core` was renamed to `_core` and made private.
+`numpy.core` will be removed in the future.
+"""
+from numpy import _core
+
+from ._utils import _raise_warning
+
+
+# We used to use `np.core._ufunc_reconstruct` to unpickle.
+# This is unnecessary, but old pickles saved before 1.20 will be using it,
+# and there is no reason to break loading them.
+def _ufunc_reconstruct(module, name):
+    # The `fromlist` kwarg is required to ensure that `mod` points to the
+    # inner-most module rather than the parent package when module name is
+    # nested. This makes it possible to pickle non-toplevel ufuncs such as
+    # scipy.special.expit for instance.
+    mod = __import__(module, fromlist=[name])
+    return getattr(mod, name)
+
+
+# force lazy-loading of submodules to ensure a warning is printed
+
+__all__ = ["arrayprint", "defchararray", "_dtype_ctypes", "_dtype",  # noqa: F822
+           "einsumfunc", "fromnumeric", "function_base", "getlimits",
+           "_internal", "multiarray", "_multiarray_umath", "numeric",
+           "numerictypes", "overrides", "records", "shape_base", "umath"]
+
+def __getattr__(attr_name):
+    attr = getattr(_core, attr_name)
+    _raise_warning(attr_name)
+    return attr
diff --git a/python/numpy/core/__init__.pyi b/python/numpy/core/__init__.pyi
new file mode 100644
index 000000000..e69de29bb
diff --git a/python/numpy/core/__pycache__/__init__.cpython-312.pyc b/python/numpy/core/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 000000000..6d83a34ce
Binary files /dev/null and b/python/numpy/core/__pycache__/__init__.cpython-312.pyc differ
diff --git a/python/numpy/core/__pycache__/_dtype.cpython-312.pyc b/python/numpy/core/__pycache__/_dtype.cpython-312.pyc
new file mode 100644
index 000000000..bfcb143fc
Binary files /dev/null and b/python/numpy/core/__pycache__/_dtype.cpython-312.pyc differ
diff --git a/python/numpy/core/__pycache__/_dtype_ctypes.cpython-312.pyc b/python/numpy/core/__pycache__/_dtype_ctypes.cpython-312.pyc
new file mode 100644
index 000000000..c101768c3
Binary files /dev/null and b/python/numpy/core/__pycache__/_dtype_ctypes.cpython-312.pyc differ
diff --git a/python/numpy/core/__pycache__/_internal.cpython-312.pyc b/python/numpy/core/__pycache__/_internal.cpython-312.pyc
new file mode 100644
index 000000000..578797ae5
Binary files /dev/null and b/python/numpy/core/__pycache__/_internal.cpython-312.pyc differ
diff --git a/python/numpy/core/__pycache__/_multiarray_umath.cpython-312.pyc b/python/numpy/core/__pycache__/_multiarray_umath.cpython-312.pyc
new file mode 100644
index 000000000..774f5d0ce
Binary files /dev/null and b/python/numpy/core/__pycache__/_multiarray_umath.cpython-312.pyc differ
diff --git a/python/numpy/core/__pycache__/_utils.cpython-312.pyc b/python/numpy/core/__pycache__/_utils.cpython-312.pyc
new file mode 100644
index 000000000..d5aad2c55
Binary files /dev/null and b/python/numpy/core/__pycache__/_utils.cpython-312.pyc differ
diff --git a/python/numpy/core/__pycache__/arrayprint.cpython-312.pyc b/python/numpy/core/__pycache__/arrayprint.cpython-312.pyc
new file mode 100644
index 000000000..91b464ea9
Binary files /dev/null and b/python/numpy/core/__pycache__/arrayprint.cpython-312.pyc differ
diff --git a/python/numpy/core/__pycache__/defchararray.cpython-312.pyc b/python/numpy/core/__pycache__/defchararray.cpython-312.pyc
new file mode 100644
index 000000000..77dba0cd3
Binary files /dev/null and b/python/numpy/core/__pycache__/defchararray.cpython-312.pyc differ
diff --git a/python/numpy/core/__pycache__/einsumfunc.cpython-312.pyc b/python/numpy/core/__pycache__/einsumfunc.cpython-312.pyc
new file mode 100644
index 000000000..fb0e900ae
Binary files /dev/null and b/python/numpy/core/__pycache__/einsumfunc.cpython-312.pyc differ
diff --git a/python/numpy/core/__pycache__/fromnumeric.cpython-312.pyc b/python/numpy/core/__pycache__/fromnumeric.cpython-312.pyc
new file mode 100644
index 000000000..c67dc23bc
Binary files /dev/null and b/python/numpy/core/__pycache__/fromnumeric.cpython-312.pyc differ
diff --git a/python/numpy/core/__pycache__/function_base.cpython-312.pyc b/python/numpy/core/__pycache__/function_base.cpython-312.pyc
new file mode 100644
index 000000000..d816f3d39
Binary files /dev/null and b/python/numpy/core/__pycache__/function_base.cpython-312.pyc differ
diff --git a/python/numpy/core/__pycache__/getlimits.cpython-312.pyc b/python/numpy/core/__pycache__/getlimits.cpython-312.pyc
new file mode 100644
index 000000000..7af05782c
Binary files /dev/null and b/python/numpy/core/__pycache__/getlimits.cpython-312.pyc differ
diff --git a/python/numpy/core/__pycache__/multiarray.cpython-312.pyc b/python/numpy/core/__pycache__/multiarray.cpython-312.pyc
new file mode 100644
index 000000000..c6dd3561b
Binary files /dev/null and b/python/numpy/core/__pycache__/multiarray.cpython-312.pyc differ
diff --git a/python/numpy/core/__pycache__/numeric.cpython-312.pyc b/python/numpy/core/__pycache__/numeric.cpython-312.pyc
new file mode 100644
index 000000000..de2d03943
Binary files /dev/null and b/python/numpy/core/__pycache__/numeric.cpython-312.pyc differ
diff --git a/python/numpy/core/__pycache__/numerictypes.cpython-312.pyc b/python/numpy/core/__pycache__/numerictypes.cpython-312.pyc
new file mode 100644
index 000000000..942b26ba0
Binary files /dev/null and b/python/numpy/core/__pycache__/numerictypes.cpython-312.pyc differ
diff --git a/python/numpy/core/__pycache__/overrides.cpython-312.pyc b/python/numpy/core/__pycache__/overrides.cpython-312.pyc
new file mode 100644
index 000000000..ca8a7b730
Binary files /dev/null and b/python/numpy/core/__pycache__/overrides.cpython-312.pyc differ
diff --git a/python/numpy/core/__pycache__/records.cpython-312.pyc b/python/numpy/core/__pycache__/records.cpython-312.pyc
new file mode 100644
index 000000000..b27aa830d
Binary files /dev/null and b/python/numpy/core/__pycache__/records.cpython-312.pyc differ
diff --git a/python/numpy/core/__pycache__/shape_base.cpython-312.pyc b/python/numpy/core/__pycache__/shape_base.cpython-312.pyc
new file mode 100644
index 000000000..d5f1f8182
Binary files /dev/null and b/python/numpy/core/__pycache__/shape_base.cpython-312.pyc differ
diff --git a/python/numpy/core/__pycache__/umath.cpython-312.pyc b/python/numpy/core/__pycache__/umath.cpython-312.pyc
new file mode 100644
index 000000000..d46f92e16
Binary files /dev/null and b/python/numpy/core/__pycache__/umath.cpython-312.pyc differ
diff --git a/python/numpy/core/_dtype.py b/python/numpy/core/_dtype.py
new file mode 100644
index 000000000..544607909
--- /dev/null
+++ b/python/numpy/core/_dtype.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import _dtype
+
+    from ._utils import _raise_warning
+    ret = getattr(_dtype, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core._dtype' has no attribute {attr_name}")
+    _raise_warning(attr_name, "_dtype")
+    return ret
diff --git a/python/numpy/core/_dtype.pyi b/python/numpy/core/_dtype.pyi
new file mode 100644
index 000000000..e69de29bb
diff --git a/python/numpy/core/_dtype_ctypes.py b/python/numpy/core/_dtype_ctypes.py
new file mode 100644
index 000000000..10cfba25e
--- /dev/null
+++ b/python/numpy/core/_dtype_ctypes.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import _dtype_ctypes
+
+    from ._utils import _raise_warning
+    ret = getattr(_dtype_ctypes, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core._dtype_ctypes' has no attribute {attr_name}")
+    _raise_warning(attr_name, "_dtype_ctypes")
+    return ret
diff --git a/python/numpy/core/_dtype_ctypes.pyi b/python/numpy/core/_dtype_ctypes.pyi
new file mode 100644
index 000000000..e69de29bb
diff --git a/python/numpy/core/_internal.py b/python/numpy/core/_internal.py
new file mode 100644
index 000000000..63a6ccc75
--- /dev/null
+++ b/python/numpy/core/_internal.py
@@ -0,0 +1,27 @@
+from numpy._core import _internal
+
+
+# Build a new array from the information in a pickle.
+# Note that the name numpy.core._internal._reconstruct is embedded in
+# pickles of ndarrays made with NumPy before release 1.0
+# so don't remove the name here, or you'll
+# break backward compatibility.
+def _reconstruct(subtype, shape, dtype):
+    from numpy import ndarray
+    return ndarray.__new__(subtype, shape, dtype)
+
+
+# Pybind11 (in versions <= 2.11.1) imports _dtype_from_pep3118 from the
+# _internal submodule, therefore it must be importable without a warning.
+_dtype_from_pep3118 = _internal._dtype_from_pep3118
+
+def __getattr__(attr_name):
+    from numpy._core import _internal
+
+    from ._utils import _raise_warning
+    ret = getattr(_internal, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core._internal' has no attribute {attr_name}")
+    _raise_warning(attr_name, "_internal")
+    return ret
diff --git a/python/numpy/core/_multiarray_umath.py b/python/numpy/core/_multiarray_umath.py
new file mode 100644
index 000000000..c1e6b4e8c
--- /dev/null
+++ b/python/numpy/core/_multiarray_umath.py
@@ -0,0 +1,57 @@
+from numpy import ufunc
+from numpy._core import _multiarray_umath
+
+for item in _multiarray_umath.__dir__():
+    # ufuncs appear in pickles with a path in numpy.core._multiarray_umath
+    # and so must import from this namespace without warning or error
+    attr = getattr(_multiarray_umath, item)
+    if isinstance(attr, ufunc):
+        globals()[item] = attr
+
+
+def __getattr__(attr_name):
+    from numpy._core import _multiarray_umath
+
+    from ._utils import _raise_warning
+
+    if attr_name in {"_ARRAY_API", "_UFUNC_API"}:
+        import sys
+        import textwrap
+        import traceback
+
+        from numpy.version import short_version
+
+        msg = textwrap.dedent(f"""
+            A module that was compiled using NumPy 1.x cannot be run in
+            NumPy {short_version} as it may crash. To support both 1.x and 2.x
+            versions of NumPy, modules must be compiled with NumPy 2.0.
+            Some module may need to rebuild instead e.g. with 'pybind11>=2.12'.
+
+            If you are a user of the module, the easiest solution will be to
+            downgrade to 'numpy<2' or try to upgrade the affected module.
+            We expect that some modules will need time to support NumPy 2.
+
+            """)
+        tb_msg = "Traceback (most recent call last):"
+        for line in traceback.format_stack()[:-1]:
+            if "frozen importlib" in line:
+                continue
+            tb_msg += line
+
+        # Also print the message (with traceback).  This is because old versions
+        # of NumPy unfortunately set up the import to replace (and hide) the
+        # error.  The traceback shouldn't be needed, but e.g. pytest plugins
+        # seem to swallow it and we should be failing anyway...
+        sys.stderr.write(msg + tb_msg)
+        raise ImportError(msg)
+
+    ret = getattr(_multiarray_umath, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            "module 'numpy.core._multiarray_umath' has no attribute "
+            f"{attr_name}")
+    _raise_warning(attr_name, "_multiarray_umath")
+    return ret
+
+
+del _multiarray_umath, ufunc
diff --git a/python/numpy/core/_utils.py b/python/numpy/core/_utils.py
new file mode 100644
index 000000000..5f47f4ba4
--- /dev/null
+++ b/python/numpy/core/_utils.py
@@ -0,0 +1,21 @@
+import warnings
+
+
+def _raise_warning(attr: str, submodule: str | None = None) -> None:
+    new_module = "numpy._core"
+    old_module = "numpy.core"
+    if submodule is not None:
+        new_module = f"{new_module}.{submodule}"
+        old_module = f"{old_module}.{submodule}"
+    warnings.warn(
+        f"{old_module} is deprecated and has been renamed to {new_module}. "
+        "The numpy._core namespace contains private NumPy internals and its "
+        "use is discouraged, as NumPy internals can change without warning in "
+        "any release. In practice, most real-world usage of numpy.core is to "
+        "access functionality in the public NumPy API. If that is the case, "
+        "use the public NumPy API. If not, you are using NumPy internals. "
+        "If you would still like to access an internal attribute, "
+        f"use {new_module}.{attr}.",
+        DeprecationWarning,
+        stacklevel=3
+    )
diff --git a/python/numpy/core/arrayprint.py b/python/numpy/core/arrayprint.py
new file mode 100644
index 000000000..8be5c5c7c
--- /dev/null
+++ b/python/numpy/core/arrayprint.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import arrayprint
+
+    from ._utils import _raise_warning
+    ret = getattr(arrayprint, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.arrayprint' has no attribute {attr_name}")
+    _raise_warning(attr_name, "arrayprint")
+    return ret
diff --git a/python/numpy/core/defchararray.py b/python/numpy/core/defchararray.py
new file mode 100644
index 000000000..1c8706875
--- /dev/null
+++ b/python/numpy/core/defchararray.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import defchararray
+
+    from ._utils import _raise_warning
+    ret = getattr(defchararray, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.defchararray' has no attribute {attr_name}")
+    _raise_warning(attr_name, "defchararray")
+    return ret
diff --git a/python/numpy/core/einsumfunc.py b/python/numpy/core/einsumfunc.py
new file mode 100644
index 000000000..fe5aa399f
--- /dev/null
+++ b/python/numpy/core/einsumfunc.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import einsumfunc
+
+    from ._utils import _raise_warning
+    ret = getattr(einsumfunc, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.einsumfunc' has no attribute {attr_name}")
+    _raise_warning(attr_name, "einsumfunc")
+    return ret
diff --git a/python/numpy/core/fromnumeric.py b/python/numpy/core/fromnumeric.py
new file mode 100644
index 000000000..fae7a0399
--- /dev/null
+++ b/python/numpy/core/fromnumeric.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import fromnumeric
+
+    from ._utils import _raise_warning
+    ret = getattr(fromnumeric, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.fromnumeric' has no attribute {attr_name}")
+    _raise_warning(attr_name, "fromnumeric")
+    return ret
diff --git a/python/numpy/core/function_base.py b/python/numpy/core/function_base.py
new file mode 100644
index 000000000..e15c97141
--- /dev/null
+++ b/python/numpy/core/function_base.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import function_base
+
+    from ._utils import _raise_warning
+    ret = getattr(function_base, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.function_base' has no attribute {attr_name}")
+    _raise_warning(attr_name, "function_base")
+    return ret
diff --git a/python/numpy/core/getlimits.py b/python/numpy/core/getlimits.py
new file mode 100644
index 000000000..dc009cbd9
--- /dev/null
+++ b/python/numpy/core/getlimits.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import getlimits
+
+    from ._utils import _raise_warning
+    ret = getattr(getlimits, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.getlimits' has no attribute {attr_name}")
+    _raise_warning(attr_name, "getlimits")
+    return ret
diff --git a/python/numpy/core/multiarray.py b/python/numpy/core/multiarray.py
new file mode 100644
index 000000000..b22670942
--- /dev/null
+++ b/python/numpy/core/multiarray.py
@@ -0,0 +1,25 @@
+from numpy._core import multiarray
+
+# these must import without warning or error from numpy.core.multiarray to
+# support old pickle files
+for item in ["_reconstruct", "scalar"]:
+    globals()[item] = getattr(multiarray, item)
+
+# Pybind11 (in versions <= 2.11.1) imports _ARRAY_API from the multiarray
+# submodule as a part of NumPy initialization, therefore it must be importable
+# without a warning.
+_ARRAY_API = multiarray._ARRAY_API
+
+def __getattr__(attr_name):
+    from numpy._core import multiarray
+
+    from ._utils import _raise_warning
+    ret = getattr(multiarray, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.multiarray' has no attribute {attr_name}")
+    _raise_warning(attr_name, "multiarray")
+    return ret
+
+
+del multiarray
diff --git a/python/numpy/core/numeric.py b/python/numpy/core/numeric.py
new file mode 100644
index 000000000..ddd70b363
--- /dev/null
+++ b/python/numpy/core/numeric.py
@@ -0,0 +1,12 @@
+def __getattr__(attr_name):
+    from numpy._core import numeric
+
+    from ._utils import _raise_warning
+
+    sentinel = object()
+    ret = getattr(numeric, attr_name, sentinel)
+    if ret is sentinel:
+        raise AttributeError(
+            f"module 'numpy.core.numeric' has no attribute {attr_name}")
+    _raise_warning(attr_name, "numeric")
+    return ret
diff --git a/python/numpy/core/numerictypes.py b/python/numpy/core/numerictypes.py
new file mode 100644
index 000000000..cf2ad99f9
--- /dev/null
+++ b/python/numpy/core/numerictypes.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import numerictypes
+
+    from ._utils import _raise_warning
+    ret = getattr(numerictypes, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.numerictypes' has no attribute {attr_name}")
+    _raise_warning(attr_name, "numerictypes")
+    return ret
diff --git a/python/numpy/core/overrides.py b/python/numpy/core/overrides.py
new file mode 100644
index 000000000..17830ed41
--- /dev/null
+++ b/python/numpy/core/overrides.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import overrides
+
+    from ._utils import _raise_warning
+    ret = getattr(overrides, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.overrides' has no attribute {attr_name}")
+    _raise_warning(attr_name, "overrides")
+    return ret
diff --git a/python/numpy/core/overrides.pyi b/python/numpy/core/overrides.pyi
new file mode 100644
index 000000000..fab351262
--- /dev/null
+++ b/python/numpy/core/overrides.pyi
@@ -0,0 +1,7 @@
+# NOTE: At runtime, this submodule dynamically re-exports any `numpy._core.overrides`
+# member, and issues a `DeprecationWarning` when accessed. But since there is no
+# `__dir__` or `__all__` present, these annotations would be unverifiable. Because
+# this module is also deprecated in favor of `numpy._core`, and therefore not part of
+# the public API, we omit the "re-exports", which in practice would require literal
+# duplication of the stubs in order for the `@deprecated` decorator to be understood
+# by type-checkers.
diff --git a/python/numpy/core/records.py b/python/numpy/core/records.py
new file mode 100644
index 000000000..0cc45037d
--- /dev/null
+++ b/python/numpy/core/records.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import records
+
+    from ._utils import _raise_warning
+    ret = getattr(records, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.records' has no attribute {attr_name}")
+    _raise_warning(attr_name, "records")
+    return ret
diff --git a/python/numpy/core/shape_base.py b/python/numpy/core/shape_base.py
new file mode 100644
index 000000000..9cffce705
--- /dev/null
+++ b/python/numpy/core/shape_base.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import shape_base
+
+    from ._utils import _raise_warning
+    ret = getattr(shape_base, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.shape_base' has no attribute {attr_name}")
+    _raise_warning(attr_name, "shape_base")
+    return ret
diff --git a/python/numpy/core/umath.py b/python/numpy/core/umath.py
new file mode 100644
index 000000000..25a60cc9d
--- /dev/null
+++ b/python/numpy/core/umath.py
@@ -0,0 +1,10 @@
+def __getattr__(attr_name):
+    from numpy._core import umath
+
+    from ._utils import _raise_warning
+    ret = getattr(umath, attr_name, None)
+    if ret is None:
+        raise AttributeError(
+            f"module 'numpy.core.umath' has no attribute {attr_name}")
+    _raise_warning(attr_name, "umath")
+    return ret
diff --git a/python/numpy/ctypeslib/__init__.py b/python/numpy/ctypeslib/__init__.py
new file mode 100644
index 000000000..fd3c773e4
--- /dev/null
+++ b/python/numpy/ctypeslib/__init__.py
@@ -0,0 +1,13 @@
+from ._ctypeslib import (
+    __all__,
+    __doc__,
+    _concrete_ndptr,
+    _ndptr,
+    as_array,
+    as_ctypes,
+    as_ctypes_type,
+    c_intp,
+    ctypes,
+    load_library,
+    ndpointer,
+)
diff --git a/python/numpy/ctypeslib/__init__.pyi b/python/numpy/ctypeslib/__init__.pyi
new file mode 100644
index 000000000..adc51da26
--- /dev/null
+++ b/python/numpy/ctypeslib/__init__.pyi
@@ -0,0 +1,33 @@
+import ctypes
+from ctypes import c_int64 as _c_intp
+
+from ._ctypeslib import (
+    __all__ as __all__,
+)
+from ._ctypeslib import (
+    __doc__ as __doc__,
+)
+from ._ctypeslib import (
+    _concrete_ndptr as _concrete_ndptr,
+)
+from ._ctypeslib import (
+    _ndptr as _ndptr,
+)
+from ._ctypeslib import (
+    as_array as as_array,
+)
+from ._ctypeslib import (
+    as_ctypes as as_ctypes,
+)
+from ._ctypeslib import (
+    as_ctypes_type as as_ctypes_type,
+)
+from ._ctypeslib import (
+    c_intp as c_intp,
+)
+from ._ctypeslib import (
+    load_library as load_library,
+)
+from ._ctypeslib import (
+    ndpointer as ndpointer,
+)
diff --git a/python/numpy/ctypeslib/__pycache__/__init__.cpython-312.pyc b/python/numpy/ctypeslib/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 000000000..509fb6193
Binary files /dev/null and b/python/numpy/ctypeslib/__pycache__/__init__.cpython-312.pyc differ
diff --git a/python/numpy/ctypeslib/__pycache__/_ctypeslib.cpython-312.pyc b/python/numpy/ctypeslib/__pycache__/_ctypeslib.cpython-312.pyc
new file mode 100644
index 000000000..2b2ec47b3
Binary files /dev/null and b/python/numpy/ctypeslib/__pycache__/_ctypeslib.cpython-312.pyc differ
diff --git a/python/numpy/ctypeslib/_ctypeslib.py b/python/numpy/ctypeslib/_ctypeslib.py
new file mode 100644
index 000000000..9255603cd
--- /dev/null
+++ b/python/numpy/ctypeslib/_ctypeslib.py
@@ -0,0 +1,603 @@
+"""
+============================
+``ctypes`` Utility Functions
+============================
+
+See Also
+--------
+load_library : Load a C library.
+ndpointer : Array restype/argtype with verification.
+as_ctypes : Create a ctypes array from an ndarray.
+as_array : Create an ndarray from a ctypes array.
+
+References
+----------
+.. [1] "SciPy Cookbook: ctypes", https://scipy-cookbook.readthedocs.io/items/Ctypes.html
+
+Examples
+--------
+Load the C library:
+
+>>> _lib = np.ctypeslib.load_library('libmystuff', '.')     #doctest: +SKIP
+
+Our result type, an ndarray that must be of type double, be 1-dimensional
+and is C-contiguous in memory:
+
+>>> array_1d_double = np.ctypeslib.ndpointer(
+...                          dtype=np.double,
+...                          ndim=1, flags='CONTIGUOUS')    #doctest: +SKIP
+
+Our C-function typically takes an array and updates its values
+in-place.  For example::
+
+    void foo_func(double* x, int length)
+    {
+        int i;
+        for (i = 0; i < length; i++) {
+            x[i] = i*i;
+        }
+    }
+
+We wrap it using:
+
+>>> _lib.foo_func.restype = None                      #doctest: +SKIP
+>>> _lib.foo_func.argtypes = [array_1d_double, c_int] #doctest: +SKIP
+
+Then, we're ready to call ``foo_func``:
+
+>>> out = np.empty(15, dtype=np.double)
+>>> _lib.foo_func(out, len(out))                #doctest: +SKIP
+
+"""
+__all__ = ['load_library', 'ndpointer', 'c_intp', 'as_ctypes', 'as_array',
+           'as_ctypes_type']
+
+import os
+
+import numpy as np
+import numpy._core.multiarray as mu
+from numpy._utils import set_module
+
+try:
+    import ctypes
+except ImportError:
+    ctypes = None
+
+if ctypes is None:
+    @set_module("numpy.ctypeslib")
+    def _dummy(*args, **kwds):
+        """
+        Dummy object that raises an ImportError if ctypes is not available.
+
+        Raises
+        ------
+        ImportError
+            If ctypes is not available.
+
+        """
+        raise ImportError("ctypes is not available.")
+    load_library = _dummy
+    as_ctypes = _dummy
+    as_ctypes_type = _dummy
+    as_array = _dummy
+    ndpointer = _dummy
+    from numpy import intp as c_intp
+    _ndptr_base = object
+else:
+    import numpy._core._internal as nic
+    c_intp = nic._getintp_ctype()
+    del nic
+    _ndptr_base = ctypes.c_void_p
+
+    # Adapted from Albert Strasheim
+    @set_module("numpy.ctypeslib")
+    def load_library(libname, loader_path):
+        """
+        It is possible to load a library using
+
+        >>> lib = ctypes.cdll[] # doctest: +SKIP
+
+        But there are cross-platform considerations, such as library file extensions,
+        plus the fact Windows will just load the first library it finds with that name.
+        NumPy supplies the load_library function as a convenience.
+
+        .. versionchanged:: 1.20.0
+            Allow libname and loader_path to take any
+            :term:`python:path-like object`.
+
+        Parameters
+        ----------
+        libname : path-like
+            Name of the library, which can have 'lib' as a prefix,
+            but without an extension.
+        loader_path : path-like
+            Where the library can be found.
+
+        Returns
+        -------
+        ctypes.cdll[libpath] : library object
+           A ctypes library object
+
+        Raises
+        ------
+        OSError
+            If there is no library with the expected extension, or the
+            library is defective and cannot be loaded.
+        """
+        # Convert path-like objects into strings
+        libname = os.fsdecode(libname)
+        loader_path = os.fsdecode(loader_path)
+
+        ext = os.path.splitext(libname)[1]
+        if not ext:
+            import sys
+            import sysconfig
+            # Try to load library with platform-specific name, otherwise
+            # default to libname.[so|dll|dylib].  Sometimes, these files are
+            # built erroneously on non-linux platforms.
+            base_ext = ".so"
+            if sys.platform.startswith("darwin"):
+                base_ext = ".dylib"
+            elif sys.platform.startswith("win"):
+                base_ext = ".dll"
+            libname_ext = [libname + base_ext]
+            so_ext = sysconfig.get_config_var("EXT_SUFFIX")
+            if not so_ext == base_ext:
+                libname_ext.insert(0, libname + so_ext)
+        else:
+            libname_ext = [libname]
+
+        loader_path = os.path.abspath(loader_path)
+        if not os.path.isdir(loader_path):
+            libdir = os.path.dirname(loader_path)
+        else:
+            libdir = loader_path
+
+        for ln in libname_ext:
+            libpath = os.path.join(libdir, ln)
+            if os.path.exists(libpath):
+                try:
+                    return ctypes.cdll[libpath]
+                except OSError:
+                    # defective lib file
+                    raise
+        # if no successful return in the libname_ext loop:
+        raise OSError("no file with expected extension")
+
+
+def _num_fromflags(flaglist):
+    num = 0
+    for val in flaglist:
+        num += mu._flagdict[val]
+    return num
+
+
+_flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE',
+              'OWNDATA', 'WRITEBACKIFCOPY']
+def _flags_fromnum(num):
+    res = []
+    for key in _flagnames:
+        value = mu._flagdict[key]
+        if (num & value):
+            res.append(key)
+    return res
+
+
+class _ndptr(_ndptr_base):
+    @classmethod
+    def from_param(cls, obj):
+        if not isinstance(obj, np.ndarray):
+            raise TypeError("argument must be an ndarray")
+        if cls._dtype_ is not None \
+               and obj.dtype != cls._dtype_:
+            raise TypeError(f"array must have data type {cls._dtype_}")
+        if cls._ndim_ is not None \
+               and obj.ndim != cls._ndim_:
+            raise TypeError("array must have %d dimension(s)" % cls._ndim_)
+        if cls._shape_ is not None \
+               and obj.shape != cls._shape_:
+            raise TypeError(f"array must have shape {str(cls._shape_)}")
+        if cls._flags_ is not None \
+               and ((obj.flags.num & cls._flags_) != cls._flags_):
+            raise TypeError(f"array must have flags {_flags_fromnum(cls._flags_)}")
+        return obj.ctypes
+
+
+class _concrete_ndptr(_ndptr):
+    """
+    Like _ndptr, but with `_shape_` and `_dtype_` specified.
+
+    Notably, this means the pointer has enough information to reconstruct
+    the array, which is not generally true.
+    """
+    def _check_retval_(self):
+        """
+        This method is called when this class is used as the .restype
+        attribute for a shared-library function, to automatically wrap the
+        pointer into an array.
+        """
+        return self.contents
+
+    @property
+    def contents(self):
+        """
+        Get an ndarray viewing the data pointed to by this pointer.
+
+        This mirrors the `contents` attribute of a normal ctypes pointer
+        """
+        full_dtype = np.dtype((self._dtype_, self._shape_))
+        full_ctype = ctypes.c_char * full_dtype.itemsize
+        buffer = ctypes.cast(self, ctypes.POINTER(full_ctype)).contents
+        return np.frombuffer(buffer, dtype=full_dtype).squeeze(axis=0)
+
+
+# Factory for an array-checking class with from_param defined for
+# use with ctypes argtypes mechanism
+_pointer_type_cache = {}
+
+@set_module("numpy.ctypeslib")
+def ndpointer(dtype=None, ndim=None, shape=None, flags=None):
+    """
+    Array-checking restype/argtypes.
+
+    An ndpointer instance is used to describe an ndarray in restypes
+    and argtypes specifications.  This approach is more flexible than
+    using, for example, ``POINTER(c_double)``, since several restrictions
+    can be specified, which are verified upon calling the ctypes function.
+    These include data type, number of dimensions, shape and flags.  If a
+    given array does not satisfy the specified restrictions,
+    a ``TypeError`` is raised.
+
+    Parameters
+    ----------
+    dtype : data-type, optional
+        Array data-type.
+    ndim : int, optional
+        Number of array dimensions.
+    shape : tuple of ints, optional
+        Array shape.
+    flags : str or tuple of str
+        Array flags; may be one or more of:
+
+        - C_CONTIGUOUS / C / CONTIGUOUS
+        - F_CONTIGUOUS / F / FORTRAN
+        - OWNDATA / O
+        - WRITEABLE / W
+        - ALIGNED / A
+        - WRITEBACKIFCOPY / X
+
+    Returns
+    -------
+    klass : ndpointer type object
+        A type object, which is an ``_ndtpr`` instance containing
+        dtype, ndim, shape and flags information.
+
+    Raises
+    ------
+    TypeError
+        If a given array does not satisfy the specified restrictions.
+
+    Examples
+    --------
+    >>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=np.float64,
+    ...                                                  ndim=1,
+    ...                                                  flags='C_CONTIGUOUS')]
+    ... #doctest: +SKIP
+    >>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64))
+    ... #doctest: +SKIP
+
+    """
+
+    # normalize dtype to dtype | None
+    if dtype is not None:
+        dtype = np.dtype(dtype)
+
+    # normalize flags to int | None
+    num = None
+    if flags is not None:
+        if isinstance(flags, str):
+            flags = flags.split(',')
+        elif isinstance(flags, (int, np.integer)):
+            num = flags
+            flags = _flags_fromnum(num)
+        elif isinstance(flags, mu.flagsobj):
+            num = flags.num
+            flags = _flags_fromnum(num)
+        if num is None:
+            try:
+                flags = [x.strip().upper() for x in flags]
+            except Exception as e:
+                raise TypeError("invalid flags specification") from e
+            num = _num_fromflags(flags)
+
+    # normalize shape to tuple | None
+    if shape is not None:
+        try:
+            shape = tuple(shape)
+        except TypeError:
+            # single integer -> 1-tuple
+            shape = (shape,)
+
+    cache_key = (dtype, ndim, shape, num)
+
+    try:
+        return _pointer_type_cache[cache_key]
+    except KeyError:
+        pass
+
+    # produce a name for the new type
+    if dtype is None:
+        name = 'any'
+    elif dtype.names is not None:
+        name = str(id(dtype))
+    else:
+        name = dtype.str
+    if ndim is not None:
+        name += "_%dd" % ndim
+    if shape is not None:
+        name += "_" + "x".join(str(x) for x in shape)
+    if flags is not None:
+        name += "_" + "_".join(flags)
+
+    if dtype is not None and shape is not None:
+        base = _concrete_ndptr
+    else:
+        base = _ndptr
+
+    klass = type(f"ndpointer_{name}", (base,),
+                 {"_dtype_": dtype,
+                  "_shape_": shape,
+                  "_ndim_": ndim,
+                  "_flags_": num})
+    _pointer_type_cache[cache_key] = klass
+    return klass
+
+
+if ctypes is not None:
+    def _ctype_ndarray(element_type, shape):
+        """ Create an ndarray of the given element type and shape """
+        for dim in shape[::-1]:
+            element_type = dim * element_type
+            # prevent the type name include np.ctypeslib
+            element_type.__module__ = None
+        return element_type
+
+    def _get_scalar_type_map():
+        """
+        Return a dictionary mapping native endian scalar dtype to ctypes types
+        """
+        ct = ctypes
+        simple_types = [
+            ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong,
+            ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong,
+            ct.c_float, ct.c_double,
+            ct.c_bool,
+        ]
+        return {np.dtype(ctype): ctype for ctype in simple_types}
+
+    _scalar_type_map = _get_scalar_type_map()
+
+    def _ctype_from_dtype_scalar(dtype):
+        # swapping twice ensure that `=` is promoted to <, >, or |
+        dtype_with_endian = dtype.newbyteorder('S').newbyteorder('S')
+        dtype_native = dtype.newbyteorder('=')
+        try:
+            ctype = _scalar_type_map[dtype_native]
+        except KeyError as e:
+            raise NotImplementedError(
+                f"Converting {dtype!r} to a ctypes type"
+            ) from None
+
+        if dtype_with_endian.byteorder == '>':
+            ctype = ctype.__ctype_be__
+        elif dtype_with_endian.byteorder == '<':
+            ctype = ctype.__ctype_le__
+
+        return ctype
+
+    def _ctype_from_dtype_subarray(dtype):
+        element_dtype, shape = dtype.subdtype
+        ctype = _ctype_from_dtype(element_dtype)
+        return _ctype_ndarray(ctype, shape)
+
+    def _ctype_from_dtype_structured(dtype):
+        # extract offsets of each field
+        field_data = []
+        for name in dtype.names:
+            field_dtype, offset = dtype.fields[name][:2]
+            field_data.append((offset, name, _ctype_from_dtype(field_dtype)))
+
+        # ctypes doesn't care about field order
+        field_data = sorted(field_data, key=lambda f: f[0])
+
+        if len(field_data) > 1 and all(offset == 0 for offset, _, _ in field_data):
+            # union, if multiple fields all at address 0
+            size = 0
+            _fields_ = []
+            for offset, name, ctype in field_data:
+                _fields_.append((name, ctype))
+                size = max(size, ctypes.sizeof(ctype))
+
+            # pad to the right size
+            if dtype.itemsize != size:
+                _fields_.append(('', ctypes.c_char * dtype.itemsize))
+
+            # we inserted manual padding, so always `_pack_`
+            return type('union', (ctypes.Union,), {
+                '_fields_': _fields_,
+                '_pack_': 1,
+                '__module__': None,
+            })
+        else:
+            last_offset = 0
+            _fields_ = []
+            for offset, name, ctype in field_data:
+                padding = offset - last_offset
+                if padding < 0:
+                    raise NotImplementedError("Overlapping fields")
+                if padding > 0:
+                    _fields_.append(('', ctypes.c_char * padding))
+
+                _fields_.append((name, ctype))
+                last_offset = offset + ctypes.sizeof(ctype)
+
+            padding = dtype.itemsize - last_offset
+            if padding > 0:
+                _fields_.append(('', ctypes.c_char * padding))
+
+            # we inserted manual padding, so always `_pack_`
+            return type('struct', (ctypes.Structure,), {
+                '_fields_': _fields_,
+                '_pack_': 1,
+                '__module__': None,
+            })
+
+    def _ctype_from_dtype(dtype):
+        if dtype.fields is not None:
+            return _ctype_from_dtype_structured(dtype)
+        elif dtype.subdtype is not None:
+            return _ctype_from_dtype_subarray(dtype)
+        else:
+            return _ctype_from_dtype_scalar(dtype)
+
+    @set_module("numpy.ctypeslib")
+    def as_ctypes_type(dtype):
+        r"""
+        Convert a dtype into a ctypes type.
+
+        Parameters
+        ----------
+        dtype : dtype
+            The dtype to convert
+
+        Returns
+        -------
+        ctype
+            A ctype scalar, union, array, or struct
+
+        Raises
+        ------
+        NotImplementedError
+            If the conversion is not possible
+
+        Notes
+        -----
+        This function does not losslessly round-trip in either direction.
+
+        ``np.dtype(as_ctypes_type(dt))`` will:
+
+        - insert padding fields
+        - reorder fields to be sorted by offset
+        - discard field titles
+
+        ``as_ctypes_type(np.dtype(ctype))`` will:
+
+        - discard the class names of `ctypes.Structure`\ s and
+          `ctypes.Union`\ s
+        - convert single-element `ctypes.Union`\ s into single-element
+          `ctypes.Structure`\ s
+        - insert padding fields
+
+        Examples
+        --------
+        Converting a simple dtype:
+
+        >>> dt = np.dtype('int8')
+        >>> ctype = np.ctypeslib.as_ctypes_type(dt)
+        >>> ctype
+        
+
+        Converting a structured dtype:
+
+        >>> dt = np.dtype([('x', 'i4'), ('y', 'f4')])
+        >>> ctype = np.ctypeslib.as_ctypes_type(dt)
+        >>> ctype
+        
+
+        """
+        return _ctype_from_dtype(np.dtype(dtype))
+
+    @set_module("numpy.ctypeslib")
+    def as_array(obj, shape=None):
+        """
+        Create a numpy array from a ctypes array or POINTER.
+
+        The numpy array shares the memory with the ctypes object.
+
+        The shape parameter must be given if converting from a ctypes POINTER.
+        The shape parameter is ignored if converting from a ctypes array
+
+        Examples
+        --------
+        Converting a ctypes integer array:
+
+        >>> import ctypes
+        >>> ctypes_array = (ctypes.c_int * 5)(0, 1, 2, 3, 4)
+        >>> np_array = np.ctypeslib.as_array(ctypes_array)
+        >>> np_array
+        array([0, 1, 2, 3, 4], dtype=int32)
+
+        Converting a ctypes POINTER:
+
+        >>> import ctypes
+        >>> buffer = (ctypes.c_int * 5)(0, 1, 2, 3, 4)
+        >>> pointer = ctypes.cast(buffer, ctypes.POINTER(ctypes.c_int))
+        >>> np_array = np.ctypeslib.as_array(pointer, (5,))
+        >>> np_array
+        array([0, 1, 2, 3, 4], dtype=int32)
+
+        """
+        if isinstance(obj, ctypes._Pointer):
+            # convert pointers to an array of the desired shape
+            if shape is None:
+                raise TypeError(
+                    'as_array() requires a shape argument when called on a '
+                    'pointer')
+            p_arr_type = ctypes.POINTER(_ctype_ndarray(obj._type_, shape))
+            obj = ctypes.cast(obj, p_arr_type).contents
+
+        return np.asarray(obj)
+
+    @set_module("numpy.ctypeslib")
+    def as_ctypes(obj):
+        """
+        Create and return a ctypes object from a numpy array.  Actually
+        anything that exposes the __array_interface__ is accepted.
+
+        Examples
+        --------
+        Create ctypes object from inferred int ``np.array``:
+
+        >>> inferred_int_array = np.array([1, 2, 3])
+        >>> c_int_array = np.ctypeslib.as_ctypes(inferred_int_array)
+        >>> type(c_int_array)
+        
+        >>> c_int_array[:]
+        [1, 2, 3]
+
+        Create ctypes object from explicit 8 bit unsigned int ``np.array`` :
+
+        >>> exp_int_array = np.array([1, 2, 3], dtype=np.uint8)
+        >>> c_int_array = np.ctypeslib.as_ctypes(exp_int_array)
+        >>> type(c_int_array)
+        
+        >>> c_int_array[:]
+        [1, 2, 3]
+
+        """
+        ai = obj.__array_interface__
+        if ai["strides"]:
+            raise TypeError("strided arrays not supported")
+        if ai["version"] != 3:
+            raise TypeError("only __array_interface__ version 3 supported")
+        addr, readonly = ai["data"]
+        if readonly:
+            raise TypeError("readonly arrays unsupported")
+
+        # can't use `_dtype((ai["typestr"], ai["shape"]))` here, as it overflows
+        # dtype.itemsize (gh-14214)
+        ctype_scalar = as_ctypes_type(ai["typestr"])
+        result_type = _ctype_ndarray(ctype_scalar, ai["shape"])
+        result = result_type.from_address(addr)
+        result.__keep = obj
+        return result
diff --git a/python/numpy/ctypeslib/_ctypeslib.pyi b/python/numpy/ctypeslib/_ctypeslib.pyi
new file mode 100644
index 000000000..e26d6052e
--- /dev/null
+++ b/python/numpy/ctypeslib/_ctypeslib.pyi
@@ -0,0 +1,245 @@
+# NOTE: Numpy's mypy plugin is used for importing the correct
+# platform-specific `ctypes._SimpleCData[int]` sub-type
+import ctypes
+from collections.abc import Iterable, Sequence
+from ctypes import c_int64 as _c_intp
+from typing import (
+    Any,
+    ClassVar,
+    Generic,
+    TypeAlias,
+    TypeVar,
+    overload,
+)
+from typing import Literal as L
+
+from _typeshed import StrOrBytesPath
+
+import numpy as np
+from numpy import (
+    byte,
+    double,
+    dtype,
+    generic,
+    intc,
+    long,
+    longdouble,
+    longlong,
+    ndarray,
+    short,
+    single,
+    ubyte,
+    uintc,
+    ulong,
+    ulonglong,
+    ushort,
+    void,
+)
+from numpy._core._internal import _ctypes
+from numpy._core.multiarray import flagsobj
+from numpy._typing import (
+    DTypeLike,
+    NDArray,
+    _AnyShape,
+    _ArrayLike,
+    _BoolCodes,
+    _ByteCodes,
+    _DoubleCodes,
+    _DTypeLike,
+    _IntCCodes,
+    _LongCodes,
+    _LongDoubleCodes,
+    _LongLongCodes,
+    _ShapeLike,
+    _ShortCodes,
+    _SingleCodes,
+    _UByteCodes,
+    _UIntCCodes,
+    _ULongCodes,
+    _ULongLongCodes,
+    _UShortCodes,
+    _VoidDTypeLike,
+)
+
+__all__ = ["load_library", "ndpointer", "c_intp", "as_ctypes", "as_array", "as_ctypes_type"]
+
+# TODO: Add a proper `_Shape` bound once we've got variadic typevars
+_DTypeT = TypeVar("_DTypeT", bound=dtype)
+_DTypeOptionalT = TypeVar("_DTypeOptionalT", bound=dtype | None)
+_ScalarT = TypeVar("_ScalarT", bound=generic)
+
+_FlagsKind: TypeAlias = L[
+    'C_CONTIGUOUS', 'CONTIGUOUS', 'C',
+    'F_CONTIGUOUS', 'FORTRAN', 'F',
+    'ALIGNED', 'A',
+    'WRITEABLE', 'W',
+    'OWNDATA', 'O',
+    'WRITEBACKIFCOPY', 'X',
+]
+
+# TODO: Add a shape typevar once we have variadic typevars (PEP 646)
+class _ndptr(ctypes.c_void_p, Generic[_DTypeOptionalT]):
+    # In practice these 4 classvars are defined in the dynamic class
+    # returned by `ndpointer`
+    _dtype_: ClassVar[_DTypeOptionalT]
+    _shape_: ClassVar[None]
+    _ndim_: ClassVar[int | None]
+    _flags_: ClassVar[list[_FlagsKind] | None]
+
+    @overload
+    @classmethod
+    def from_param(cls: type[_ndptr[None]], obj: NDArray[Any]) -> _ctypes[Any]: ...
+    @overload
+    @classmethod
+    def from_param(cls: type[_ndptr[_DTypeT]], obj: ndarray[Any, _DTypeT]) -> _ctypes[Any]: ...
+
+class _concrete_ndptr(_ndptr[_DTypeT]):
+    _dtype_: ClassVar[_DTypeT]
+    _shape_: ClassVar[_AnyShape]
+    @property
+    def contents(self) -> ndarray[_AnyShape, _DTypeT]: ...
+
+def load_library(libname: StrOrBytesPath, loader_path: StrOrBytesPath) -> ctypes.CDLL: ...
+
+c_intp = _c_intp
+
+@overload
+def ndpointer(
+    dtype: None = ...,
+    ndim: int = ...,
+    shape: _ShapeLike | None = ...,
+    flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = ...,
+) -> type[_ndptr[None]]: ...
+@overload
+def ndpointer(
+    dtype: _DTypeLike[_ScalarT],
+    ndim: int = ...,
+    *,
+    shape: _ShapeLike,
+    flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = ...,
+) -> type[_concrete_ndptr[dtype[_ScalarT]]]: ...
+@overload
+def ndpointer(
+    dtype: DTypeLike,
+    ndim: int = ...,
+    *,
+    shape: _ShapeLike,
+    flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = ...,
+) -> type[_concrete_ndptr[dtype]]: ...
+@overload
+def ndpointer(
+    dtype: _DTypeLike[_ScalarT],
+    ndim: int = ...,
+    shape: None = ...,
+    flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = ...,
+) -> type[_ndptr[dtype[_ScalarT]]]: ...
+@overload
+def ndpointer(
+    dtype: DTypeLike,
+    ndim: int = ...,
+    shape: None = ...,
+    flags: _FlagsKind | Iterable[_FlagsKind] | int | flagsobj | None = ...,
+) -> type[_ndptr[dtype]]: ...
+
+@overload
+def as_ctypes_type(dtype: _BoolCodes | _DTypeLike[np.bool] | type[ctypes.c_bool]) -> type[ctypes.c_bool]: ...
+@overload
+def as_ctypes_type(dtype: _ByteCodes | _DTypeLike[byte] | type[ctypes.c_byte]) -> type[ctypes.c_byte]: ...
+@overload
+def as_ctypes_type(dtype: _ShortCodes | _DTypeLike[short] | type[ctypes.c_short]) -> type[ctypes.c_short]: ...
+@overload
+def as_ctypes_type(dtype: _IntCCodes | _DTypeLike[intc] | type[ctypes.c_int]) -> type[ctypes.c_int]: ...
+@overload
+def as_ctypes_type(dtype: _LongCodes | _DTypeLike[long] | type[ctypes.c_long]) -> type[ctypes.c_long]: ...
+@overload
+def as_ctypes_type(dtype: type[int]) -> type[c_intp]: ...
+@overload
+def as_ctypes_type(dtype: _LongLongCodes | _DTypeLike[longlong] | type[ctypes.c_longlong]) -> type[ctypes.c_longlong]: ...
+@overload
+def as_ctypes_type(dtype: _UByteCodes | _DTypeLike[ubyte] | type[ctypes.c_ubyte]) -> type[ctypes.c_ubyte]: ...
+@overload
+def as_ctypes_type(dtype: _UShortCodes | _DTypeLike[ushort] | type[ctypes.c_ushort]) -> type[ctypes.c_ushort]: ...
+@overload
+def as_ctypes_type(dtype: _UIntCCodes | _DTypeLike[uintc] | type[ctypes.c_uint]) -> type[ctypes.c_uint]: ...
+@overload
+def as_ctypes_type(dtype: _ULongCodes | _DTypeLike[ulong] | type[ctypes.c_ulong]) -> type[ctypes.c_ulong]: ...
+@overload
+def as_ctypes_type(dtype: _ULongLongCodes | _DTypeLike[ulonglong] | type[ctypes.c_ulonglong]) -> type[ctypes.c_ulonglong]: ...
+@overload
+def as_ctypes_type(dtype: _SingleCodes | _DTypeLike[single] | type[ctypes.c_float]) -> type[ctypes.c_float]: ...
+@overload
+def as_ctypes_type(dtype: _DoubleCodes | _DTypeLike[double] | type[float | ctypes.c_double]) -> type[ctypes.c_double]: ...
+@overload
+def as_ctypes_type(dtype: _LongDoubleCodes | _DTypeLike[longdouble] | type[ctypes.c_longdouble]) -> type[ctypes.c_longdouble]: ...
+@overload
+def as_ctypes_type(dtype: _VoidDTypeLike) -> type[Any]: ...  # `ctypes.Union` or `ctypes.Structure`
+@overload
+def as_ctypes_type(dtype: str) -> type[Any]: ...
+
+@overload
+def as_array(obj: ctypes._PointerLike, shape: Sequence[int]) -> NDArray[Any]: ...
+@overload
+def as_array(obj: _ArrayLike[_ScalarT], shape: _ShapeLike | None = ...) -> NDArray[_ScalarT]: ...
+@overload
+def as_array(obj: object, shape: _ShapeLike | None = ...) -> NDArray[Any]: ...
+
+@overload
+def as_ctypes(obj: np.bool) -> ctypes.c_bool: ...
+@overload
+def as_ctypes(obj: byte) -> ctypes.c_byte: ...
+@overload
+def as_ctypes(obj: short) -> ctypes.c_short: ...
+@overload
+def as_ctypes(obj: intc) -> ctypes.c_int: ...
+@overload
+def as_ctypes(obj: long) -> ctypes.c_long: ...
+@overload
+def as_ctypes(obj: longlong) -> ctypes.c_longlong: ...
+@overload
+def as_ctypes(obj: ubyte) -> ctypes.c_ubyte: ...
+@overload
+def as_ctypes(obj: ushort) -> ctypes.c_ushort: ...
+@overload
+def as_ctypes(obj: uintc) -> ctypes.c_uint: ...
+@overload
+def as_ctypes(obj: ulong) -> ctypes.c_ulong: ...
+@overload
+def as_ctypes(obj: ulonglong) -> ctypes.c_ulonglong: ...
+@overload
+def as_ctypes(obj: single) -> ctypes.c_float: ...
+@overload
+def as_ctypes(obj: double) -> ctypes.c_double: ...
+@overload
+def as_ctypes(obj: longdouble) -> ctypes.c_longdouble: ...
+@overload
+def as_ctypes(obj: void) -> Any: ...  # `ctypes.Union` or `ctypes.Structure`
+@overload
+def as_ctypes(obj: NDArray[np.bool]) -> ctypes.Array[ctypes.c_bool]: ...
+@overload
+def as_ctypes(obj: NDArray[byte]) -> ctypes.Array[ctypes.c_byte]: ...
+@overload
+def as_ctypes(obj: NDArray[short]) -> ctypes.Array[ctypes.c_short]: ...
+@overload
+def as_ctypes(obj: NDArray[intc]) -> ctypes.Array[ctypes.c_int]: ...
+@overload
+def as_ctypes(obj: NDArray[long]) -> ctypes.Array[ctypes.c_long]: ...
+@overload
+def as_ctypes(obj: NDArray[longlong]) -> ctypes.Array[ctypes.c_longlong]: ...
+@overload
+def as_ctypes(obj: NDArray[ubyte]) -> ctypes.Array[ctypes.c_ubyte]: ...
+@overload
+def as_ctypes(obj: NDArray[ushort]) -> ctypes.Array[ctypes.c_ushort]: ...
+@overload
+def as_ctypes(obj: NDArray[uintc]) -> ctypes.Array[ctypes.c_uint]: ...
+@overload
+def as_ctypes(obj: NDArray[ulong]) -> ctypes.Array[ctypes.c_ulong]: ...
+@overload
+def as_ctypes(obj: NDArray[ulonglong]) -> ctypes.Array[ctypes.c_ulonglong]: ...
+@overload
+def as_ctypes(obj: NDArray[single]) -> ctypes.Array[ctypes.c_float]: ...
+@overload
+def as_ctypes(obj: NDArray[double]) -> ctypes.Array[ctypes.c_double]: ...
+@overload
+def as_ctypes(obj: NDArray[longdouble]) -> ctypes.Array[ctypes.c_longdouble]: ...
+@overload
+def as_ctypes(obj: NDArray[void]) -> ctypes.Array[Any]: ...  # `ctypes.Union` or `ctypes.Structure`
diff --git a/python/numpy/doc/__pycache__/ufuncs.cpython-312.pyc b/python/numpy/doc/__pycache__/ufuncs.cpython-312.pyc
new file mode 100644
index 000000000..1a6743047
Binary files /dev/null and b/python/numpy/doc/__pycache__/ufuncs.cpython-312.pyc differ
diff --git a/python/numpy/doc/ufuncs.py b/python/numpy/doc/ufuncs.py
new file mode 100644
index 000000000..7324168e1
--- /dev/null
+++ b/python/numpy/doc/ufuncs.py
@@ -0,0 +1,138 @@
+"""
+===================
+Universal Functions
+===================
+
+Ufuncs are, generally speaking, mathematical functions or operations that are
+applied element-by-element to the contents of an array. That is, the result
+in each output array element only depends on the value in the corresponding
+input array (or arrays) and on no other array elements. NumPy comes with a
+large suite of ufuncs, and scipy extends that suite substantially. The simplest
+example is the addition operator: ::
+
+ >>> np.array([0,2,3,4]) + np.array([1,1,-1,2])
+ array([1, 3, 2, 6])
+
+The ufunc module lists all the available ufuncs in numpy. Documentation on
+the specific ufuncs may be found in those modules. This documentation is
+intended to address the more general aspects of ufuncs common to most of
+them. All of the ufuncs that make use of Python operators (e.g., +, -, etc.)
+have equivalent functions defined (e.g. add() for +)
+
+Type coercion
+=============
+
+What happens when a binary operator (e.g., +,-,\\*,/, etc) deals with arrays of
+two different types? What is the type of the result? Typically, the result is
+the higher of the two types. For example: ::
+
+ float32 + float64 -> float64
+ int8 + int32 -> int32
+ int16 + float32 -> float32
+ float32 + complex64 -> complex64
+
+There are some less obvious cases generally involving mixes of types
+(e.g. uints, ints and floats) where equal bit sizes for each are not
+capable of saving all the information in a different type of equivalent
+bit size. Some examples are int32 vs float32 or uint32 vs int32.
+Generally, the result is the higher type of larger size than both
+(if available). So: ::
+
+ int32 + float32 -> float64
+ uint32 + int32 -> int64
+
+Finally, the type coercion behavior when expressions involve Python
+scalars is different than that seen for arrays. Since Python has a
+limited number of types, combining a Python int with a dtype=np.int8
+array does not coerce to the higher type but instead, the type of the
+array prevails. So the rules for Python scalars combined with arrays is
+that the result will be that of the array equivalent the Python scalar
+if the Python scalar is of a higher 'kind' than the array (e.g., float
+vs. int), otherwise the resultant type will be that of the array.
+For example: ::
+
+  Python int + int8 -> int8
+  Python float + int8 -> float64
+
+ufunc methods
+=============
+
+Binary ufuncs support 4 methods.
+
+**.reduce(arr)** applies the binary operator to elements of the array in
+  sequence. For example: ::
+
+ >>> np.add.reduce(np.arange(10))  # adds all elements of array
+ 45
+
+For multidimensional arrays, the first dimension is reduced by default: ::
+
+ >>> np.add.reduce(np.arange(10).reshape(2,5))
+     array([ 5,  7,  9, 11, 13])
+
+The axis keyword can be used to specify different axes to reduce: ::
+
+ >>> np.add.reduce(np.arange(10).reshape(2,5),axis=1)
+ array([10, 35])
+
+**.accumulate(arr)** applies the binary operator and generates an
+equivalently shaped array that includes the accumulated amount for each
+element of the array. A couple examples: ::
+
+ >>> np.add.accumulate(np.arange(10))
+ array([ 0,  1,  3,  6, 10, 15, 21, 28, 36, 45])
+ >>> np.multiply.accumulate(np.arange(1,9))
+ array([    1,     2,     6,    24,   120,   720,  5040, 40320])
+
+The behavior for multidimensional arrays is the same as for .reduce(),
+as is the use of the axis keyword).
+
+**.reduceat(arr,indices)** allows one to apply reduce to selected parts
+  of an array. It is a difficult method to understand. See the documentation
+  at:
+
+**.outer(arr1,arr2)** generates an outer operation on the two arrays arr1 and
+  arr2. It will work on multidimensional arrays (the shape of the result is
+  the concatenation of the two input shapes.: ::
+
+ >>> np.multiply.outer(np.arange(3),np.arange(4))
+ array([[0, 0, 0, 0],
+        [0, 1, 2, 3],
+        [0, 2, 4, 6]])
+
+Output arguments
+================
+
+All ufuncs accept an optional output array. The array must be of the expected
+output shape. Beware that if the type of the output array is of a different
+(and lower) type than the output result, the results may be silently truncated
+or otherwise corrupted in the downcast to the lower type. This usage is useful
+when one wants to avoid creating large temporary arrays and instead allows one
+to reuse the same array memory repeatedly (at the expense of not being able to
+use more convenient operator notation in expressions). Note that when the
+output argument is used, the ufunc still returns a reference to the result.
+
+ >>> x = np.arange(2)
+ >>> np.add(np.arange(2, dtype=float), np.arange(2, dtype=float), x,
+ ...        casting='unsafe')
+ array([0, 2])
+ >>> x
+ array([0, 2])
+
+and & or as ufuncs
+==================
+
+Invariably people try to use the python 'and' and 'or' as logical operators
+(and quite understandably). But these operators do not behave as normal
+operators since Python treats these quite differently. They cannot be
+overloaded with array equivalents. Thus using 'and' or 'or' with an array
+results in an error. There are two alternatives:
+
+ 1) use the ufunc functions logical_and() and logical_or().
+ 2) use the bitwise operators & and \\|. The drawback of these is that if
+    the arguments to these operators are not boolean arrays, the result is
+    likely incorrect. On the other hand, most usages of logical_and and
+    logical_or are with boolean arrays. As long as one is careful, this is
+    a convenient way to apply these operators.
+
+"""
diff --git a/python/numpy/dtypes.py b/python/numpy/dtypes.py
new file mode 100644
index 000000000..550a29e18
--- /dev/null
+++ b/python/numpy/dtypes.py
@@ -0,0 +1,41 @@
+"""
+This module is home to specific dtypes related functionality and their classes.
+For more general information about dtypes, also see `numpy.dtype` and
+:ref:`arrays.dtypes`.
+
+Similar to the builtin ``types`` module, this submodule defines types (classes)
+that are not widely used directly.
+
+.. versionadded:: NumPy 1.25
+
+    The dtypes module is new in NumPy 1.25.  Previously DType classes were
+    only accessible indirectly.
+
+
+DType classes
+-------------
+
+The following are the classes of the corresponding NumPy dtype instances and
+NumPy scalar types.  The classes can be used in ``isinstance`` checks and can
+also be instantiated or used directly.  Direct use of these classes is not
+typical, since their scalar counterparts (e.g. ``np.float64``) or strings
+like ``"float64"`` can be used.
+"""
+
+# See doc/source/reference/routines.dtypes.rst for module-level docs
+
+__all__ = []
+
+
+def _add_dtype_helper(DType, alias):
+    # Function to add DTypes a bit more conveniently without channeling them
+    # through `numpy._core._multiarray_umath` namespace or similar.
+    from numpy import dtypes
+
+    setattr(dtypes, DType.__name__, DType)
+    __all__.append(DType.__name__)
+
+    if alias:
+        alias = alias.removeprefix("numpy.dtypes.")
+        setattr(dtypes, alias, DType)
+        __all__.append(alias)
diff --git a/python/numpy/dtypes.pyi b/python/numpy/dtypes.pyi
new file mode 100644
index 000000000..007dc643c
--- /dev/null
+++ b/python/numpy/dtypes.pyi
@@ -0,0 +1,631 @@
+# ruff: noqa: ANN401
+from typing import (
+    Any,
+    Generic,
+    LiteralString,
+    Never,
+    NoReturn,
+    Self,
+    TypeAlias,
+    final,
+    overload,
+    type_check_only,
+)
+from typing import Literal as L
+
+from typing_extensions import TypeVar
+
+import numpy as np
+
+__all__ = [  # noqa: RUF022
+    'BoolDType',
+    'Int8DType',
+    'ByteDType',
+    'UInt8DType',
+    'UByteDType',
+    'Int16DType',
+    'ShortDType',
+    'UInt16DType',
+    'UShortDType',
+    'Int32DType',
+    'IntDType',
+    'UInt32DType',
+    'UIntDType',
+    'Int64DType',
+    'LongDType',
+    'UInt64DType',
+    'ULongDType',
+    'LongLongDType',
+    'ULongLongDType',
+    'Float16DType',
+    'Float32DType',
+    'Float64DType',
+    'LongDoubleDType',
+    'Complex64DType',
+    'Complex128DType',
+    'CLongDoubleDType',
+    'ObjectDType',
+    'BytesDType',
+    'StrDType',
+    'VoidDType',
+    'DateTime64DType',
+    'TimeDelta64DType',
+    'StringDType',
+]
+
+# Helper base classes (typing-only)
+
+_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True)
+
+@type_check_only
+class _SimpleDType(np.dtype[_ScalarT_co], Generic[_ScalarT_co]):  # type: ignore[misc]  # pyright: ignore[reportGeneralTypeIssues]
+    names: None  # pyright: ignore[reportIncompatibleVariableOverride]
+    def __new__(cls, /) -> Self: ...
+    def __getitem__(self, key: Any, /) -> NoReturn: ...
+    @property
+    def base(self) -> np.dtype[_ScalarT_co]: ...
+    @property
+    def fields(self) -> None: ...
+    @property
+    def isalignedstruct(self) -> L[False]: ...
+    @property
+    def isnative(self) -> L[True]: ...
+    @property
+    def ndim(self) -> L[0]: ...
+    @property
+    def shape(self) -> tuple[()]: ...
+    @property
+    def subdtype(self) -> None: ...
+
+@type_check_only
+class _LiteralDType(_SimpleDType[_ScalarT_co], Generic[_ScalarT_co]):  # type: ignore[misc]
+    @property
+    def flags(self) -> L[0]: ...
+    @property
+    def hasobject(self) -> L[False]: ...
+
+# Helper mixins (typing-only):
+
+_KindT_co = TypeVar("_KindT_co", bound=LiteralString, covariant=True)
+_CharT_co = TypeVar("_CharT_co", bound=LiteralString, covariant=True)
+_NumT_co = TypeVar("_NumT_co", bound=int, covariant=True)
+
+@type_check_only
+class _TypeCodes(Generic[_KindT_co, _CharT_co, _NumT_co]):
+    @final
+    @property
+    def kind(self) -> _KindT_co: ...
+    @final
+    @property
+    def char(self) -> _CharT_co: ...
+    @final
+    @property
+    def num(self) -> _NumT_co: ...
+
+@type_check_only
+class _NoOrder:
+    @final
+    @property
+    def byteorder(self) -> L["|"]: ...
+
+@type_check_only
+class _NativeOrder:
+    @final
+    @property
+    def byteorder(self) -> L["="]: ...
+
+_DataSize_co = TypeVar("_DataSize_co", bound=int, covariant=True)
+_ItemSize_co = TypeVar("_ItemSize_co", bound=int, covariant=True, default=int)
+
+@type_check_only
+class _NBit(Generic[_DataSize_co, _ItemSize_co]):
+    @final
+    @property
+    def alignment(self) -> _DataSize_co: ...
+    @final
+    @property
+    def itemsize(self) -> _ItemSize_co: ...
+
+@type_check_only
+class _8Bit(_NoOrder, _NBit[L[1], L[1]]): ...
+
+# Boolean:
+
+@final
+class BoolDType(  # type: ignore[misc]
+    _TypeCodes[L["b"], L["?"], L[0]],
+    _8Bit,
+    _LiteralDType[np.bool],
+):
+    @property
+    def name(self) -> L["bool"]: ...
+    @property
+    def str(self) -> L["|b1"]: ...
+
+# Sized integers:
+
+@final
+class Int8DType(  # type: ignore[misc]
+    _TypeCodes[L["i"], L["b"], L[1]],
+    _8Bit,
+    _LiteralDType[np.int8],
+):
+    @property
+    def name(self) -> L["int8"]: ...
+    @property
+    def str(self) -> L["|i1"]: ...
+
+@final
+class UInt8DType(  # type: ignore[misc]
+    _TypeCodes[L["u"], L["B"], L[2]],
+    _8Bit,
+    _LiteralDType[np.uint8],
+):
+    @property
+    def name(self) -> L["uint8"]: ...
+    @property
+    def str(self) -> L["|u1"]: ...
+
+@final
+class Int16DType(  # type: ignore[misc]
+    _TypeCodes[L["i"], L["h"], L[3]],
+    _NativeOrder,
+    _NBit[L[2], L[2]],
+    _LiteralDType[np.int16],
+):
+    @property
+    def name(self) -> L["int16"]: ...
+    @property
+    def str(self) -> L["i2"]: ...
+
+@final
+class UInt16DType(  # type: ignore[misc]
+    _TypeCodes[L["u"], L["H"], L[4]],
+    _NativeOrder,
+    _NBit[L[2], L[2]],
+    _LiteralDType[np.uint16],
+):
+    @property
+    def name(self) -> L["uint16"]: ...
+    @property
+    def str(self) -> L["u2"]: ...
+
+@final
+class Int32DType(  # type: ignore[misc]
+    _TypeCodes[L["i"], L["i", "l"], L[5, 7]],
+    _NativeOrder,
+    _NBit[L[4], L[4]],
+    _LiteralDType[np.int32],
+):
+    @property
+    def name(self) -> L["int32"]: ...
+    @property
+    def str(self) -> L["i4"]: ...
+
+@final
+class UInt32DType(  # type: ignore[misc]
+    _TypeCodes[L["u"], L["I", "L"], L[6, 8]],
+    _NativeOrder,
+    _NBit[L[4], L[4]],
+    _LiteralDType[np.uint32],
+):
+    @property
+    def name(self) -> L["uint32"]: ...
+    @property
+    def str(self) -> L["u4"]: ...
+
+@final
+class Int64DType(  # type: ignore[misc]
+    _TypeCodes[L["i"], L["l", "q"], L[7, 9]],
+    _NativeOrder,
+    _NBit[L[8], L[8]],
+    _LiteralDType[np.int64],
+):
+    @property
+    def name(self) -> L["int64"]: ...
+    @property
+    def str(self) -> L["i8"]: ...
+
+@final
+class UInt64DType(  # type: ignore[misc]
+    _TypeCodes[L["u"], L["L", "Q"], L[8, 10]],
+    _NativeOrder,
+    _NBit[L[8], L[8]],
+    _LiteralDType[np.uint64],
+):
+    @property
+    def name(self) -> L["uint64"]: ...
+    @property
+    def str(self) -> L["u8"]: ...
+
+# Standard C-named version/alias:
+# NOTE: Don't make these `Final`: it will break stubtest
+ByteDType = Int8DType
+UByteDType = UInt8DType
+ShortDType = Int16DType
+UShortDType = UInt16DType
+
+@final
+class IntDType(  # type: ignore[misc]
+    _TypeCodes[L["i"], L["i"], L[5]],
+    _NativeOrder,
+    _NBit[L[4], L[4]],
+    _LiteralDType[np.intc],
+):
+    @property
+    def name(self) -> L["int32"]: ...
+    @property
+    def str(self) -> L["i4"]: ...
+
+@final
+class UIntDType(  # type: ignore[misc]
+    _TypeCodes[L["u"], L["I"], L[6]],
+    _NativeOrder,
+    _NBit[L[4], L[4]],
+    _LiteralDType[np.uintc],
+):
+    @property
+    def name(self) -> L["uint32"]: ...
+    @property
+    def str(self) -> L["u4"]: ...
+
+@final
+class LongDType(  # type: ignore[misc]
+    _TypeCodes[L["i"], L["l"], L[7]],
+    _NativeOrder,
+    _NBit[L[4, 8], L[4, 8]],
+    _LiteralDType[np.long],
+):
+    @property
+    def name(self) -> L["int32", "int64"]: ...
+    @property
+    def str(self) -> L["i4", "i8"]: ...
+
+@final
+class ULongDType(  # type: ignore[misc]
+    _TypeCodes[L["u"], L["L"], L[8]],
+    _NativeOrder,
+    _NBit[L[4, 8], L[4, 8]],
+    _LiteralDType[np.ulong],
+):
+    @property
+    def name(self) -> L["uint32", "uint64"]: ...
+    @property
+    def str(self) -> L["u4", "u8"]: ...
+
+@final
+class LongLongDType(  # type: ignore[misc]
+    _TypeCodes[L["i"], L["q"], L[9]],
+    _NativeOrder,
+    _NBit[L[8], L[8]],
+    _LiteralDType[np.longlong],
+):
+    @property
+    def name(self) -> L["int64"]: ...
+    @property
+    def str(self) -> L["i8"]: ...
+
+@final
+class ULongLongDType(  # type: ignore[misc]
+    _TypeCodes[L["u"], L["Q"], L[10]],
+    _NativeOrder,
+    _NBit[L[8], L[8]],
+    _LiteralDType[np.ulonglong],
+):
+    @property
+    def name(self) -> L["uint64"]: ...
+    @property
+    def str(self) -> L["u8"]: ...
+
+# Floats:
+
+@final
+class Float16DType(  # type: ignore[misc]
+    _TypeCodes[L["f"], L["e"], L[23]],
+    _NativeOrder,
+    _NBit[L[2], L[2]],
+    _LiteralDType[np.float16],
+):
+    @property
+    def name(self) -> L["float16"]: ...
+    @property
+    def str(self) -> L["f2"]: ...
+
+@final
+class Float32DType(  # type: ignore[misc]
+    _TypeCodes[L["f"], L["f"], L[11]],
+    _NativeOrder,
+    _NBit[L[4], L[4]],
+    _LiteralDType[np.float32],
+):
+    @property
+    def name(self) -> L["float32"]: ...
+    @property
+    def str(self) -> L["f4"]: ...
+
+@final
+class Float64DType(  # type: ignore[misc]
+    _TypeCodes[L["f"], L["d"], L[12]],
+    _NativeOrder,
+    _NBit[L[8], L[8]],
+    _LiteralDType[np.float64],
+):
+    @property
+    def name(self) -> L["float64"]: ...
+    @property
+    def str(self) -> L["f8"]: ...
+
+@final
+class LongDoubleDType(  # type: ignore[misc]
+    _TypeCodes[L["f"], L["g"], L[13]],
+    _NativeOrder,
+    _NBit[L[8, 12, 16], L[8, 12, 16]],
+    _LiteralDType[np.longdouble],
+):
+    @property
+    def name(self) -> L["float64", "float96", "float128"]: ...
+    @property
+    def str(self) -> L["f8", "f12", "f16"]: ...
+
+# Complex:
+
+@final
+class Complex64DType(  # type: ignore[misc]
+    _TypeCodes[L["c"], L["F"], L[14]],
+    _NativeOrder,
+    _NBit[L[4], L[8]],
+    _LiteralDType[np.complex64],
+):
+    @property
+    def name(self) -> L["complex64"]: ...
+    @property
+    def str(self) -> L["c8"]: ...
+
+@final
+class Complex128DType(  # type: ignore[misc]
+    _TypeCodes[L["c"], L["D"], L[15]],
+    _NativeOrder,
+    _NBit[L[8], L[16]],
+    _LiteralDType[np.complex128],
+):
+    @property
+    def name(self) -> L["complex128"]: ...
+    @property
+    def str(self) -> L["c16"]: ...
+
+@final
+class CLongDoubleDType(  # type: ignore[misc]
+    _TypeCodes[L["c"], L["G"], L[16]],
+    _NativeOrder,
+    _NBit[L[8, 12, 16], L[16, 24, 32]],
+    _LiteralDType[np.clongdouble],
+):
+    @property
+    def name(self) -> L["complex128", "complex192", "complex256"]: ...
+    @property
+    def str(self) -> L["c16", "c24", "c32"]: ...
+
+# Python objects:
+
+@final
+class ObjectDType(  # type: ignore[misc]
+    _TypeCodes[L["O"], L["O"], L[17]],
+    _NoOrder,
+    _NBit[L[8], L[8]],
+    _SimpleDType[np.object_],
+):
+    @property
+    def hasobject(self) -> L[True]: ...
+    @property
+    def name(self) -> L["object"]: ...
+    @property
+    def str(self) -> L["|O"]: ...
+
+# Flexible:
+
+@final
+class BytesDType(  # type: ignore[misc]
+    _TypeCodes[L["S"], L["S"], L[18]],
+    _NoOrder,
+    _NBit[L[1], _ItemSize_co],
+    _SimpleDType[np.bytes_],
+    Generic[_ItemSize_co],
+):
+    def __new__(cls, size: _ItemSize_co, /) -> BytesDType[_ItemSize_co]: ...
+    @property
+    def hasobject(self) -> L[False]: ...
+    @property
+    def name(self) -> LiteralString: ...
+    @property
+    def str(self) -> LiteralString: ...
+
+@final
+class StrDType(  # type: ignore[misc]
+    _TypeCodes[L["U"], L["U"], L[19]],
+    _NativeOrder,
+    _NBit[L[4], _ItemSize_co],
+    _SimpleDType[np.str_],
+    Generic[_ItemSize_co],
+):
+    def __new__(cls, size: _ItemSize_co, /) -> StrDType[_ItemSize_co]: ...
+    @property
+    def hasobject(self) -> L[False]: ...
+    @property
+    def name(self) -> LiteralString: ...
+    @property
+    def str(self) -> LiteralString: ...
+
+@final
+class VoidDType(  # type: ignore[misc]
+    _TypeCodes[L["V"], L["V"], L[20]],
+    _NoOrder,
+    _NBit[L[1], _ItemSize_co],
+    np.dtype[np.void],  # pyright: ignore[reportGeneralTypeIssues]
+    Generic[_ItemSize_co],
+):
+    # NOTE: `VoidDType(...)` raises a `TypeError` at the moment
+    def __new__(cls, length: _ItemSize_co, /) -> NoReturn: ...
+    @property
+    def base(self) -> Self: ...
+    @property
+    def isalignedstruct(self) -> L[False]: ...
+    @property
+    def isnative(self) -> L[True]: ...
+    @property
+    def ndim(self) -> L[0]: ...
+    @property
+    def shape(self) -> tuple[()]: ...
+    @property
+    def subdtype(self) -> None: ...
+    @property
+    def name(self) -> LiteralString: ...
+    @property
+    def str(self) -> LiteralString: ...
+
+# Other:
+
+_DateUnit: TypeAlias = L["Y", "M", "W", "D"]
+_TimeUnit: TypeAlias = L["h", "m", "s", "ms", "us", "ns", "ps", "fs", "as"]
+_DateTimeUnit: TypeAlias = _DateUnit | _TimeUnit
+
+@final
+class DateTime64DType(  # type: ignore[misc]
+    _TypeCodes[L["M"], L["M"], L[21]],
+    _NativeOrder,
+    _NBit[L[8], L[8]],
+    _LiteralDType[np.datetime64],
+):
+    # NOTE: `DateTime64DType(...)` raises a `TypeError` at the moment
+    # TODO: Once implemented, don't forget the`unit: L["μs"]` overload.
+    def __new__(cls, unit: _DateTimeUnit, /) -> NoReturn: ...
+    @property
+    def name(self) -> L[
+        "datetime64",
+        "datetime64[Y]",
+        "datetime64[M]",
+        "datetime64[W]",
+        "datetime64[D]",
+        "datetime64[h]",
+        "datetime64[m]",
+        "datetime64[s]",
+        "datetime64[ms]",
+        "datetime64[us]",
+        "datetime64[ns]",
+        "datetime64[ps]",
+        "datetime64[fs]",
+        "datetime64[as]",
+    ]: ...
+    @property
+    def str(self) -> L[
+        "M8",
+        "M8[Y]",
+        "M8[M]",
+        "M8[W]",
+        "M8[D]",
+        "M8[h]",
+        "M8[m]",
+        "M8[s]",
+        "M8[ms]",
+        "M8[us]",
+        "M8[ns]",
+        "M8[ps]",
+        "M8[fs]",
+        "M8[as]",
+    ]: ...
+
+@final
+class TimeDelta64DType(  # type: ignore[misc]
+    _TypeCodes[L["m"], L["m"], L[22]],
+    _NativeOrder,
+    _NBit[L[8], L[8]],
+    _LiteralDType[np.timedelta64],
+):
+    # NOTE: `TimeDelta64DType(...)` raises a `TypeError` at the moment
+    # TODO: Once implemented, don't forget to overload on `unit: L["μs"]`.
+    def __new__(cls, unit: _DateTimeUnit, /) -> NoReturn: ...
+    @property
+    def name(self) -> L[
+        "timedelta64",
+        "timedelta64[Y]",
+        "timedelta64[M]",
+        "timedelta64[W]",
+        "timedelta64[D]",
+        "timedelta64[h]",
+        "timedelta64[m]",
+        "timedelta64[s]",
+        "timedelta64[ms]",
+        "timedelta64[us]",
+        "timedelta64[ns]",
+        "timedelta64[ps]",
+        "timedelta64[fs]",
+        "timedelta64[as]",
+    ]: ...
+    @property
+    def str(self) -> L[
+        "m8",
+        "m8[Y]",
+        "m8[M]",
+        "m8[W]",
+        "m8[D]",
+        "m8[h]",
+        "m8[m]",
+        "m8[s]",
+        "m8[ms]",
+        "m8[us]",
+        "m8[ns]",
+        "m8[ps]",
+        "m8[fs]",
+        "m8[as]",
+    ]: ...
+
+_NaObjectT_co = TypeVar("_NaObjectT_co", default=Never, covariant=True)
+
+@final
+class StringDType(  # type: ignore[misc]
+    _TypeCodes[L["T"], L["T"], L[2056]],
+    _NativeOrder,
+    _NBit[L[8], L[16]],
+    # TODO(jorenham): change once we have a string scalar type:
+    # https://github.com/numpy/numpy/issues/28165
+    np.dtype[str],  # type: ignore[type-var]  # pyright: ignore[reportGeneralTypeIssues, reportInvalidTypeArguments]
+    Generic[_NaObjectT_co],
+):
+    @property
+    def na_object(self) -> _NaObjectT_co: ...
+    @property
+    def coerce(self) -> L[True]: ...
+
+    #
+    @overload
+    def __new__(cls, /, *, coerce: bool = True) -> Self: ...
+    @overload
+    def __new__(cls, /, *, na_object: _NaObjectT_co, coerce: bool = True) -> Self: ...
+
+    #
+    def __getitem__(self, key: Never, /) -> NoReturn: ...  # type: ignore[override]  # pyright: ignore[reportIncompatibleMethodOverride]
+    @property
+    def fields(self) -> None: ...
+    @property
+    def base(self) -> Self: ...
+    @property
+    def ndim(self) -> L[0]: ...
+    @property
+    def shape(self) -> tuple[()]: ...
+
+    #
+    @property
+    def name(self) -> L["StringDType64", "StringDType128"]: ...
+    @property
+    def subdtype(self) -> None: ...
+    @property
+    def type(self) -> type[str]: ...
+    @property
+    def str(self) -> L["|T8", "|T16"]: ...
+
+    #
+    @property
+    def hasobject(self) -> L[True]: ...
+    @property
+    def isalignedstruct(self) -> L[False]: ...
+    @property
+    def isnative(self) -> L[True]: ...
diff --git a/python/numpy/exceptions.py b/python/numpy/exceptions.py
new file mode 100644
index 000000000..0e8688ae9
--- /dev/null
+++ b/python/numpy/exceptions.py
@@ -0,0 +1,247 @@
+"""
+Exceptions and Warnings
+=======================
+
+General exceptions used by NumPy.  Note that some exceptions may be module
+specific, such as linear algebra errors.
+
+.. versionadded:: NumPy 1.25
+
+    The exceptions module is new in NumPy 1.25.  Older exceptions remain
+    available through the main NumPy namespace for compatibility.
+
+.. currentmodule:: numpy.exceptions
+
+Warnings
+--------
+.. autosummary::
+   :toctree: generated/
+
+   ComplexWarning             Given when converting complex to real.
+   VisibleDeprecationWarning  Same as a DeprecationWarning, but more visible.
+   RankWarning                Issued when the design matrix is rank deficient.
+
+Exceptions
+----------
+.. autosummary::
+   :toctree: generated/
+
+    AxisError          Given when an axis was invalid.
+    DTypePromotionError   Given when no common dtype could be found.
+    TooHardError       Error specific to `numpy.shares_memory`.
+
+"""
+
+
+__all__ = [
+    "ComplexWarning", "VisibleDeprecationWarning", "ModuleDeprecationWarning",
+    "TooHardError", "AxisError", "DTypePromotionError"]
+
+
+# Disallow reloading this module so as to preserve the identities of the
+# classes defined here.
+if '_is_loaded' in globals():
+    raise RuntimeError('Reloading numpy._globals is not allowed')
+_is_loaded = True
+
+
+class ComplexWarning(RuntimeWarning):
+    """
+    The warning raised when casting a complex dtype to a real dtype.
+
+    As implemented, casting a complex number to a real discards its imaginary
+    part, but this behavior may not be what the user actually wants.
+
+    """
+    pass
+
+
+class ModuleDeprecationWarning(DeprecationWarning):
+    """Module deprecation warning.
+
+    .. warning::
+
+        This warning should not be used, since nose testing is not relevant
+        anymore.
+
+    The nose tester turns ordinary Deprecation warnings into test failures.
+    That makes it hard to deprecate whole modules, because they get
+    imported by default. So this is a special Deprecation warning that the
+    nose tester will let pass without making tests fail.
+
+    """
+    pass
+
+
+class VisibleDeprecationWarning(UserWarning):
+    """Visible deprecation warning.
+
+    By default, python will not show deprecation warnings, so this class
+    can be used when a very visible warning is helpful, for example because
+    the usage is most likely a user bug.
+
+    """
+    pass
+
+
+class RankWarning(RuntimeWarning):
+    """Matrix rank warning.
+
+    Issued by polynomial functions when the design matrix is rank deficient.
+
+    """
+    pass
+
+
+# Exception used in shares_memory()
+class TooHardError(RuntimeError):
+    """``max_work`` was exceeded.
+
+    This is raised whenever the maximum number of candidate solutions
+    to consider specified by the ``max_work`` parameter is exceeded.
+    Assigning a finite number to ``max_work`` may have caused the operation
+    to fail.
+
+    """
+    pass
+
+
+class AxisError(ValueError, IndexError):
+    """Axis supplied was invalid.
+
+    This is raised whenever an ``axis`` parameter is specified that is larger
+    than the number of array dimensions.
+    For compatibility with code written against older numpy versions, which
+    raised a mixture of :exc:`ValueError` and :exc:`IndexError` for this
+    situation, this exception subclasses both to ensure that
+    ``except ValueError`` and ``except IndexError`` statements continue
+    to catch ``AxisError``.
+
+    Parameters
+    ----------
+    axis : int or str
+        The out of bounds axis or a custom exception message.
+        If an axis is provided, then `ndim` should be specified as well.
+    ndim : int, optional
+        The number of array dimensions.
+    msg_prefix : str, optional
+        A prefix for the exception message.
+
+    Attributes
+    ----------
+    axis : int, optional
+        The out of bounds axis or ``None`` if a custom exception
+        message was provided. This should be the axis as passed by
+        the user, before any normalization to resolve negative indices.
+
+        .. versionadded:: 1.22
+    ndim : int, optional
+        The number of array dimensions or ``None`` if a custom exception
+        message was provided.
+
+        .. versionadded:: 1.22
+
+
+    Examples
+    --------
+    >>> import numpy as np
+    >>> array_1d = np.arange(10)
+    >>> np.cumsum(array_1d, axis=1)
+    Traceback (most recent call last):
+      ...
+    numpy.exceptions.AxisError: axis 1 is out of bounds for array of dimension 1
+
+    Negative axes are preserved:
+
+    >>> np.cumsum(array_1d, axis=-2)
+    Traceback (most recent call last):
+      ...
+    numpy.exceptions.AxisError: axis -2 is out of bounds for array of dimension 1
+
+    The class constructor generally takes the axis and arrays'
+    dimensionality as arguments:
+
+    >>> print(np.exceptions.AxisError(2, 1, msg_prefix='error'))
+    error: axis 2 is out of bounds for array of dimension 1
+
+    Alternatively, a custom exception message can be passed:
+
+    >>> print(np.exceptions.AxisError('Custom error message'))
+    Custom error message
+
+    """
+
+    __slots__ = ("_msg", "axis", "ndim")
+
+    def __init__(self, axis, ndim=None, msg_prefix=None):
+        if ndim is msg_prefix is None:
+            # single-argument form: directly set the error message
+            self._msg = axis
+            self.axis = None
+            self.ndim = None
+        else:
+            self._msg = msg_prefix
+            self.axis = axis
+            self.ndim = ndim
+
+    def __str__(self):
+        axis = self.axis
+        ndim = self.ndim
+
+        if axis is ndim is None:
+            return self._msg
+        else:
+            msg = f"axis {axis} is out of bounds for array of dimension {ndim}"
+            if self._msg is not None:
+                msg = f"{self._msg}: {msg}"
+            return msg
+
+
+class DTypePromotionError(TypeError):
+    """Multiple DTypes could not be converted to a common one.
+
+    This exception derives from ``TypeError`` and is raised whenever dtypes
+    cannot be converted to a single common one.  This can be because they
+    are of a different category/class or incompatible instances of the same
+    one (see Examples).
+
+    Notes
+    -----
+    Many functions will use promotion to find the correct result and
+    implementation.  For these functions the error will typically be chained
+    with a more specific error indicating that no implementation was found
+    for the input dtypes.
+
+    Typically promotion should be considered "invalid" between the dtypes of
+    two arrays when `arr1 == arr2` can safely return all ``False`` because the
+    dtypes are fundamentally different.
+
+    Examples
+    --------
+    Datetimes and complex numbers are incompatible classes and cannot be
+    promoted:
+
+    >>> import numpy as np
+    >>> np.result_type(np.dtype("M8[s]"), np.complex128)  # doctest: +IGNORE_EXCEPTION_DETAIL
+    Traceback (most recent call last):
+     ...
+    DTypePromotionError: The DType  could not
+    be promoted by . This means that no common
+    DType exists for the given inputs. For example they cannot be stored in a
+    single array unless the dtype is `object`. The full list of DTypes is:
+    (, )
+
+    For example for structured dtypes, the structure can mismatch and the
+    same ``DTypePromotionError`` is given when two structured dtypes with
+    a mismatch in their number of fields is given:
+
+    >>> dtype1 = np.dtype([("field1", np.float64), ("field2", np.int64)])
+    >>> dtype2 = np.dtype([("field1", np.float64)])
+    >>> np.promote_types(dtype1, dtype2)  # doctest: +IGNORE_EXCEPTION_DETAIL
+    Traceback (most recent call last):
+     ...
+    DTypePromotionError: field names `('field1', 'field2')` and `('field1',)`
+    mismatch.
+
+    """  # noqa: E501
+    pass
diff --git a/python/numpy/exceptions.pyi b/python/numpy/exceptions.pyi
new file mode 100644
index 000000000..9bcc097df
--- /dev/null
+++ b/python/numpy/exceptions.pyi
@@ -0,0 +1,27 @@
+from typing import overload
+
+__all__ = [
+    "ComplexWarning",
+    "VisibleDeprecationWarning",
+    "ModuleDeprecationWarning",
+    "TooHardError",
+    "AxisError",
+    "DTypePromotionError",
+]
+
+class ComplexWarning(RuntimeWarning): ...
+class ModuleDeprecationWarning(DeprecationWarning): ...
+class VisibleDeprecationWarning(UserWarning): ...
+class RankWarning(RuntimeWarning): ...
+class TooHardError(RuntimeError): ...
+class DTypePromotionError(TypeError): ...
+
+class AxisError(ValueError, IndexError):
+    __slots__ = "_msg", "axis", "ndim"
+
+    axis: int | None
+    ndim: int | None
+    @overload
+    def __init__(self, axis: str, ndim: None = ..., msg_prefix: None = ...) -> None: ...
+    @overload
+    def __init__(self, axis: int, ndim: int, msg_prefix: str | None = ...) -> None: ...
diff --git a/python/numpy/f2py/__init__.py b/python/numpy/f2py/__init__.py
new file mode 100644
index 000000000..e34dd99ae
--- /dev/null
+++ b/python/numpy/f2py/__init__.py
@@ -0,0 +1,86 @@
+"""Fortran to Python Interface Generator.
+
+Copyright 1999 -- 2011 Pearu Peterson all rights reserved.
+Copyright 2011 -- present NumPy Developers.
+Permission to use, modify, and distribute this software is given under the terms
+of the NumPy License.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED.  USE AT YOUR OWN RISK.
+"""
+__all__ = ['run_main', 'get_include']
+
+import os
+import subprocess
+import sys
+import warnings
+
+from numpy.exceptions import VisibleDeprecationWarning
+
+from . import diagnose, f2py2e
+
+run_main = f2py2e.run_main
+main = f2py2e.main
+
+
+def get_include():
+    """
+    Return the directory that contains the ``fortranobject.c`` and ``.h`` files.
+
+    .. note::
+
+        This function is not needed when building an extension with
+        `numpy.distutils` directly from ``.f`` and/or ``.pyf`` files
+        in one go.
+
+    Python extension modules built with f2py-generated code need to use
+    ``fortranobject.c`` as a source file, and include the ``fortranobject.h``
+    header. This function can be used to obtain the directory containing
+    both of these files.
+
+    Returns
+    -------
+    include_path : str
+        Absolute path to the directory containing ``fortranobject.c`` and
+        ``fortranobject.h``.
+
+    Notes
+    -----
+    .. versionadded:: 1.21.1
+
+    Unless the build system you are using has specific support for f2py,
+    building a Python extension using a ``.pyf`` signature file is a two-step
+    process. For a module ``mymod``:
+
+    * Step 1: run ``python -m numpy.f2py mymod.pyf --quiet``. This
+      generates ``mymodmodule.c`` and (if needed)
+      ``mymod-f2pywrappers.f`` files next to ``mymod.pyf``.
+    * Step 2: build your Python extension module. This requires the
+      following source files:
+
+      * ``mymodmodule.c``
+      * ``mymod-f2pywrappers.f`` (if it was generated in Step 1)
+      * ``fortranobject.c``
+
+    See Also
+    --------
+    numpy.get_include : function that returns the numpy include directory
+
+    """
+    return os.path.join(os.path.dirname(__file__), 'src')
+
+
+def __getattr__(attr):
+
+    # Avoid importing things that aren't needed for building
+    # which might import the main numpy module
+    if attr == "test":
+        from numpy._pytesttester import PytestTester
+        test = PytestTester(__name__)
+        return test
+
+    else:
+        raise AttributeError(f"module {__name__!r} has no attribute {attr!r}")
+
+
+def __dir__():
+    return list(globals().keys() | {"test"})
diff --git a/python/numpy/f2py/__init__.pyi b/python/numpy/f2py/__init__.pyi
new file mode 100644
index 000000000..d12f47e80
--- /dev/null
+++ b/python/numpy/f2py/__init__.pyi
@@ -0,0 +1,6 @@
+from .f2py2e import main as main
+from .f2py2e import run_main
+
+__all__ = ["get_include", "run_main"]
+
+def get_include() -> str: ...
diff --git a/python/numpy/f2py/__main__.py b/python/numpy/f2py/__main__.py
new file mode 100644
index 000000000..936a753a2
--- /dev/null
+++ b/python/numpy/f2py/__main__.py
@@ -0,0 +1,5 @@
+# See:
+# https://web.archive.org/web/20140822061353/http://cens.ioc.ee/projects/f2py2e
+from numpy.f2py.f2py2e import main
+
+main()
diff --git a/python/numpy/f2py/__pycache__/__init__.cpython-312.pyc b/python/numpy/f2py/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 000000000..00516b0a0
Binary files /dev/null and b/python/numpy/f2py/__pycache__/__init__.cpython-312.pyc differ
diff --git a/python/numpy/f2py/__pycache__/__main__.cpython-312.pyc b/python/numpy/f2py/__pycache__/__main__.cpython-312.pyc
new file mode 100644
index 000000000..7a4921ab7
Binary files /dev/null and b/python/numpy/f2py/__pycache__/__main__.cpython-312.pyc differ
diff --git a/python/numpy/f2py/__pycache__/__version__.cpython-312.pyc b/python/numpy/f2py/__pycache__/__version__.cpython-312.pyc
new file mode 100644
index 000000000..90753e4c9
Binary files /dev/null and b/python/numpy/f2py/__pycache__/__version__.cpython-312.pyc differ
diff --git a/python/numpy/f2py/__pycache__/_isocbind.cpython-312.pyc b/python/numpy/f2py/__pycache__/_isocbind.cpython-312.pyc
new file mode 100644
index 000000000..8dcff6970
Binary files /dev/null and b/python/numpy/f2py/__pycache__/_isocbind.cpython-312.pyc differ
diff --git a/python/numpy/f2py/__pycache__/_src_pyf.cpython-312.pyc b/python/numpy/f2py/__pycache__/_src_pyf.cpython-312.pyc
new file mode 100644
index 000000000..816ad3a4b
Binary files /dev/null and b/python/numpy/f2py/__pycache__/_src_pyf.cpython-312.pyc differ
diff --git a/python/numpy/f2py/__pycache__/auxfuncs.cpython-312.pyc b/python/numpy/f2py/__pycache__/auxfuncs.cpython-312.pyc
new file mode 100644
index 000000000..b1a357999
Binary files /dev/null and b/python/numpy/f2py/__pycache__/auxfuncs.cpython-312.pyc differ
diff --git a/python/numpy/f2py/__pycache__/capi_maps.cpython-312.pyc b/python/numpy/f2py/__pycache__/capi_maps.cpython-312.pyc
new file mode 100644
index 000000000..277170ac6
Binary files /dev/null and b/python/numpy/f2py/__pycache__/capi_maps.cpython-312.pyc differ
diff --git a/python/numpy/f2py/__pycache__/cb_rules.cpython-312.pyc b/python/numpy/f2py/__pycache__/cb_rules.cpython-312.pyc
new file mode 100644
index 000000000..30177702c
Binary files /dev/null and b/python/numpy/f2py/__pycache__/cb_rules.cpython-312.pyc differ
diff --git a/python/numpy/f2py/__pycache__/cfuncs.cpython-312.pyc b/python/numpy/f2py/__pycache__/cfuncs.cpython-312.pyc
new file mode 100644
index 000000000..59dfebe4f
Binary files /dev/null and b/python/numpy/f2py/__pycache__/cfuncs.cpython-312.pyc differ
diff --git a/python/numpy/f2py/__pycache__/common_rules.cpython-312.pyc b/python/numpy/f2py/__pycache__/common_rules.cpython-312.pyc
new file mode 100644
index 000000000..ffedf7ed2
Binary files /dev/null and b/python/numpy/f2py/__pycache__/common_rules.cpython-312.pyc differ
diff --git a/python/numpy/f2py/__pycache__/crackfortran.cpython-312.pyc b/python/numpy/f2py/__pycache__/crackfortran.cpython-312.pyc
new file mode 100644
index 000000000..8d6d51b93
Binary files /dev/null and b/python/numpy/f2py/__pycache__/crackfortran.cpython-312.pyc differ
diff --git a/python/numpy/f2py/__pycache__/diagnose.cpython-312.pyc b/python/numpy/f2py/__pycache__/diagnose.cpython-312.pyc
new file mode 100644
index 000000000..e60c4c1df
Binary files /dev/null and b/python/numpy/f2py/__pycache__/diagnose.cpython-312.pyc differ
diff --git a/python/numpy/f2py/__pycache__/f2py2e.cpython-312.pyc b/python/numpy/f2py/__pycache__/f2py2e.cpython-312.pyc
new file mode 100644
index 000000000..fb971cefe
Binary files /dev/null and b/python/numpy/f2py/__pycache__/f2py2e.cpython-312.pyc differ
diff --git a/python/numpy/f2py/__pycache__/f90mod_rules.cpython-312.pyc b/python/numpy/f2py/__pycache__/f90mod_rules.cpython-312.pyc
new file mode 100644
index 000000000..bfc97cb83
Binary files /dev/null and b/python/numpy/f2py/__pycache__/f90mod_rules.cpython-312.pyc differ
diff --git a/python/numpy/f2py/__pycache__/func2subr.cpython-312.pyc b/python/numpy/f2py/__pycache__/func2subr.cpython-312.pyc
new file mode 100644
index 000000000..605f97afe
Binary files /dev/null and b/python/numpy/f2py/__pycache__/func2subr.cpython-312.pyc differ
diff --git a/python/numpy/f2py/__pycache__/rules.cpython-312.pyc b/python/numpy/f2py/__pycache__/rules.cpython-312.pyc
new file mode 100644
index 000000000..d20885f65
Binary files /dev/null and b/python/numpy/f2py/__pycache__/rules.cpython-312.pyc differ
diff --git a/python/numpy/f2py/__pycache__/symbolic.cpython-312.pyc b/python/numpy/f2py/__pycache__/symbolic.cpython-312.pyc
new file mode 100644
index 000000000..44c8ed09e
Binary files /dev/null and b/python/numpy/f2py/__pycache__/symbolic.cpython-312.pyc differ
diff --git a/python/numpy/f2py/__pycache__/use_rules.cpython-312.pyc b/python/numpy/f2py/__pycache__/use_rules.cpython-312.pyc
new file mode 100644
index 000000000..13301230a
Binary files /dev/null and b/python/numpy/f2py/__pycache__/use_rules.cpython-312.pyc differ
diff --git a/python/numpy/f2py/__version__.py b/python/numpy/f2py/__version__.py
new file mode 100644
index 000000000..8d12d955a
--- /dev/null
+++ b/python/numpy/f2py/__version__.py
@@ -0,0 +1 @@
+from numpy.version import version  # noqa: F401
diff --git a/python/numpy/f2py/__version__.pyi b/python/numpy/f2py/__version__.pyi
new file mode 100644
index 000000000..85b422529
--- /dev/null
+++ b/python/numpy/f2py/__version__.pyi
@@ -0,0 +1 @@
+from numpy.version import version as version
diff --git a/python/numpy/f2py/_backends/__init__.py b/python/numpy/f2py/_backends/__init__.py
new file mode 100644
index 000000000..e91393c14
--- /dev/null
+++ b/python/numpy/f2py/_backends/__init__.py
@@ -0,0 +1,9 @@
+def f2py_build_generator(name):
+    if name == "meson":
+        from ._meson import MesonBackend
+        return MesonBackend
+    elif name == "distutils":
+        from ._distutils import DistutilsBackend
+        return DistutilsBackend
+    else:
+        raise ValueError(f"Unknown backend: {name}")
diff --git a/python/numpy/f2py/_backends/__init__.pyi b/python/numpy/f2py/_backends/__init__.pyi
new file mode 100644
index 000000000..43625c680
--- /dev/null
+++ b/python/numpy/f2py/_backends/__init__.pyi
@@ -0,0 +1,5 @@
+from typing import Literal as L
+
+from ._backend import Backend
+
+def f2py_build_generator(name: L["distutils", "meson"]) -> Backend: ...
diff --git a/python/numpy/f2py/_backends/__pycache__/__init__.cpython-312.pyc b/python/numpy/f2py/_backends/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 000000000..e7e261555
Binary files /dev/null and b/python/numpy/f2py/_backends/__pycache__/__init__.cpython-312.pyc differ
diff --git a/python/numpy/f2py/_backends/__pycache__/_backend.cpython-312.pyc b/python/numpy/f2py/_backends/__pycache__/_backend.cpython-312.pyc
new file mode 100644
index 000000000..d594374e0
Binary files /dev/null and b/python/numpy/f2py/_backends/__pycache__/_backend.cpython-312.pyc differ
diff --git a/python/numpy/f2py/_backends/__pycache__/_distutils.cpython-312.pyc b/python/numpy/f2py/_backends/__pycache__/_distutils.cpython-312.pyc
new file mode 100644
index 000000000..550084a7f
Binary files /dev/null and b/python/numpy/f2py/_backends/__pycache__/_distutils.cpython-312.pyc differ
diff --git a/python/numpy/f2py/_backends/__pycache__/_meson.cpython-312.pyc b/python/numpy/f2py/_backends/__pycache__/_meson.cpython-312.pyc
new file mode 100644
index 000000000..b68035cd7
Binary files /dev/null and b/python/numpy/f2py/_backends/__pycache__/_meson.cpython-312.pyc differ
diff --git a/python/numpy/f2py/_backends/_backend.py b/python/numpy/f2py/_backends/_backend.py
new file mode 100644
index 000000000..5dda40043
--- /dev/null
+++ b/python/numpy/f2py/_backends/_backend.py
@@ -0,0 +1,44 @@
+from abc import ABC, abstractmethod
+
+
+class Backend(ABC):
+    def __init__(
+        self,
+        modulename,
+        sources,
+        extra_objects,
+        build_dir,
+        include_dirs,
+        library_dirs,
+        libraries,
+        define_macros,
+        undef_macros,
+        f2py_flags,
+        sysinfo_flags,
+        fc_flags,
+        flib_flags,
+        setup_flags,
+        remove_build_dir,
+        extra_dat,
+    ):
+        self.modulename = modulename
+        self.sources = sources
+        self.extra_objects = extra_objects
+        self.build_dir = build_dir
+        self.include_dirs = include_dirs
+        self.library_dirs = library_dirs
+        self.libraries = libraries
+        self.define_macros = define_macros
+        self.undef_macros = undef_macros
+        self.f2py_flags = f2py_flags
+        self.sysinfo_flags = sysinfo_flags
+        self.fc_flags = fc_flags
+        self.flib_flags = flib_flags
+        self.setup_flags = setup_flags
+        self.remove_build_dir = remove_build_dir
+        self.extra_dat = extra_dat
+
+    @abstractmethod
+    def compile(self) -> None:
+        """Compile the wrapper."""
+        pass
diff --git a/python/numpy/f2py/_backends/_backend.pyi b/python/numpy/f2py/_backends/_backend.pyi
new file mode 100644
index 000000000..ed24519ab
--- /dev/null
+++ b/python/numpy/f2py/_backends/_backend.pyi
@@ -0,0 +1,46 @@
+import abc
+from pathlib import Path
+from typing import Any, Final
+
+class Backend(abc.ABC):
+    modulename: Final[str]
+    sources: Final[list[str | Path]]
+    extra_objects: Final[list[str]]
+    build_dir: Final[str | Path]
+    include_dirs: Final[list[str | Path]]
+    library_dirs: Final[list[str | Path]]
+    libraries: Final[list[str]]
+    define_macros: Final[list[tuple[str, str | None]]]
+    undef_macros: Final[list[str]]
+    f2py_flags: Final[list[str]]
+    sysinfo_flags: Final[list[str]]
+    fc_flags: Final[list[str]]
+    flib_flags: Final[list[str]]
+    setup_flags: Final[list[str]]
+    remove_build_dir: Final[bool]
+    extra_dat: Final[dict[str, Any]]
+
+    def __init__(
+        self,
+        /,
+        modulename: str,
+        sources: list[str | Path],
+        extra_objects: list[str],
+        build_dir: str | Path,
+        include_dirs: list[str | Path],
+        library_dirs: list[str | Path],
+        libraries: list[str],
+        define_macros: list[tuple[str, str | None]],
+        undef_macros: list[str],
+        f2py_flags: list[str],
+        sysinfo_flags: list[str],
+        fc_flags: list[str],
+        flib_flags: list[str],
+        setup_flags: list[str],
+        remove_build_dir: bool,
+        extra_dat: dict[str, Any],
+    ) -> None: ...
+
+    #
+    @abc.abstractmethod
+    def compile(self) -> None: ...
diff --git a/python/numpy/f2py/_backends/_distutils.py b/python/numpy/f2py/_backends/_distutils.py
new file mode 100644
index 000000000..5c8f1092b
--- /dev/null
+++ b/python/numpy/f2py/_backends/_distutils.py
@@ -0,0 +1,76 @@
+import os
+import shutil
+import sys
+import warnings
+
+from numpy.distutils.core import Extension, setup
+from numpy.distutils.misc_util import dict_append
+from numpy.distutils.system_info import get_info
+from numpy.exceptions import VisibleDeprecationWarning
+
+from ._backend import Backend
+
+
+class DistutilsBackend(Backend):
+    def __init__(sef, *args, **kwargs):
+        warnings.warn(
+            "\ndistutils has been deprecated since NumPy 1.26.x\n"
+            "Use the Meson backend instead, or generate wrappers"
+            " without -c and use a custom build script",
+            VisibleDeprecationWarning,
+            stacklevel=2,
+        )
+        super().__init__(*args, **kwargs)
+
+    def compile(self):
+        num_info = {}
+        if num_info:
+            self.include_dirs.extend(num_info.get("include_dirs", []))
+        ext_args = {
+            "name": self.modulename,
+            "sources": self.sources,
+            "include_dirs": self.include_dirs,
+            "library_dirs": self.library_dirs,
+            "libraries": self.libraries,
+            "define_macros": self.define_macros,
+            "undef_macros": self.undef_macros,
+            "extra_objects": self.extra_objects,
+            "f2py_options": self.f2py_flags,
+        }
+
+        if self.sysinfo_flags:
+            for n in self.sysinfo_flags:
+                i = get_info(n)
+                if not i:
+                    print(
+                        f"No {n!r} resources found"
+                        "in system (try `f2py --help-link`)"
+                    )
+                dict_append(ext_args, **i)
+
+        ext = Extension(**ext_args)
+
+        sys.argv = [sys.argv[0]] + self.setup_flags
+        sys.argv.extend(
+            [
+                "build",
+                "--build-temp",
+                self.build_dir,
+                "--build-base",
+                self.build_dir,
+                "--build-platlib",
+                ".",
+                "--disable-optimization",
+            ]
+        )
+
+        if self.fc_flags:
+            sys.argv.extend(["config_fc"] + self.fc_flags)
+        if self.flib_flags:
+            sys.argv.extend(["build_ext"] + self.flib_flags)
+
+        setup(ext_modules=[ext])
+
+        if self.remove_build_dir and os.path.exists(self.build_dir):
+            print(f"Removing build directory {self.build_dir}")
+            shutil.rmtree(self.build_dir)
diff --git a/python/numpy/f2py/_backends/_distutils.pyi b/python/numpy/f2py/_backends/_distutils.pyi
new file mode 100644
index 000000000..56bbf7e5b
--- /dev/null
+++ b/python/numpy/f2py/_backends/_distutils.pyi
@@ -0,0 +1,13 @@
+from typing_extensions import deprecated, override
+
+from ._backend import Backend
+
+class DistutilsBackend(Backend):
+    @deprecated(
+        "distutils has been deprecated since NumPy 1.26.x. Use the Meson backend instead, or generate wrappers without -c and "
+        "use a custom build script"
+    )
+    # NOTE: the `sef` typo matches runtime
+    def __init__(sef, *args: object, **kwargs: object) -> None: ...
+    @override
+    def compile(self) -> None: ...
diff --git a/python/numpy/f2py/_backends/_meson.py b/python/numpy/f2py/_backends/_meson.py
new file mode 100644
index 000000000..cbd9b0e32
--- /dev/null
+++ b/python/numpy/f2py/_backends/_meson.py
@@ -0,0 +1,231 @@
+import errno
+import os
+import re
+import shutil
+import subprocess
+import sys
+from itertools import chain
+from pathlib import Path
+from string import Template
+
+from ._backend import Backend
+
+
+class MesonTemplate:
+    """Template meson build file generation class."""
+
+    def __init__(
+        self,
+        modulename: str,
+        sources: list[Path],
+        deps: list[str],
+        libraries: list[str],
+        library_dirs: list[Path],
+        include_dirs: list[Path],
+        object_files: list[Path],
+        linker_args: list[str],
+        fortran_args: list[str],
+        build_type: str,
+        python_exe: str,
+    ):
+        self.modulename = modulename
+        self.build_template_path = (
+            Path(__file__).parent.absolute() / "meson.build.template"
+        )
+        self.sources = sources
+        self.deps = deps
+        self.libraries = libraries
+        self.library_dirs = library_dirs
+        if include_dirs is not None:
+            self.include_dirs = include_dirs
+        else:
+            self.include_dirs = []
+        self.substitutions = {}
+        self.objects = object_files
+        # Convert args to '' wrapped variant for meson
+        self.fortran_args = [
+            f"'{x}'" if not (x.startswith("'") and x.endswith("'")) else x
+            for x in fortran_args
+        ]
+        self.pipeline = [
+            self.initialize_template,
+            self.sources_substitution,
+            self.deps_substitution,
+            self.include_substitution,
+            self.libraries_substitution,
+            self.fortran_args_substitution,
+        ]
+        self.build_type = build_type
+        self.python_exe = python_exe
+        self.indent = " " * 21
+
+    def meson_build_template(self) -> str:
+        if not self.build_template_path.is_file():
+            raise FileNotFoundError(
+                errno.ENOENT,
+                "Meson build template"
+                f" {self.build_template_path.absolute()}"
+                " does not exist.",
+            )
+        return self.build_template_path.read_text()
+
+    def initialize_template(self) -> None:
+        self.substitutions["modulename"] = self.modulename
+        self.substitutions["buildtype"] = self.build_type
+        self.substitutions["python"] = self.python_exe
+
+    def sources_substitution(self) -> None:
+        self.substitutions["source_list"] = ",\n".join(
+            [f"{self.indent}'''{source}'''," for source in self.sources]
+        )
+
+    def deps_substitution(self) -> None:
+        self.substitutions["dep_list"] = f",\n{self.indent}".join(
+            [f"{self.indent}dependency('{dep}')," for dep in self.deps]
+        )
+
+    def libraries_substitution(self) -> None:
+        self.substitutions["lib_dir_declarations"] = "\n".join(
+            [
+                f"lib_dir_{i} = declare_dependency(link_args : ['''-L{lib_dir}'''])"
+                for i, lib_dir in enumerate(self.library_dirs)
+            ]
+        )
+
+        self.substitutions["lib_declarations"] = "\n".join(
+            [
+                f"{lib.replace('.', '_')} = declare_dependency(link_args : ['-l{lib}'])"
+                for lib in self.libraries
+            ]
+        )
+
+        self.substitutions["lib_list"] = f"\n{self.indent}".join(
+            [f"{self.indent}{lib.replace('.', '_')}," for lib in self.libraries]
+        )
+        self.substitutions["lib_dir_list"] = f"\n{self.indent}".join(
+            [f"{self.indent}lib_dir_{i}," for i in range(len(self.library_dirs))]
+        )
+
+    def include_substitution(self) -> None:
+        self.substitutions["inc_list"] = f",\n{self.indent}".join(
+            [f"{self.indent}'''{inc}'''," for inc in self.include_dirs]
+        )
+
+    def fortran_args_substitution(self) -> None:
+        if self.fortran_args:
+            self.substitutions["fortran_args"] = (
+                f"{self.indent}fortran_args: [{', '.join(list(self.fortran_args))}],"
+            )
+        else:
+            self.substitutions["fortran_args"] = ""
+
+    def generate_meson_build(self):
+        for node in self.pipeline:
+            node()
+        template = Template(self.meson_build_template())
+        meson_build = template.substitute(self.substitutions)
+        meson_build = meson_build.replace(",,", ",")
+        return meson_build
+
+
+class MesonBackend(Backend):
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.dependencies = self.extra_dat.get("dependencies", [])
+        self.meson_build_dir = "bbdir"
+        self.build_type = (
+            "debug" if any("debug" in flag for flag in self.fc_flags) else "release"
+        )
+        self.fc_flags = _get_flags(self.fc_flags)
+
+    def _move_exec_to_root(self, build_dir: Path):
+        walk_dir = Path(build_dir) / self.meson_build_dir
+        path_objects = chain(
+            walk_dir.glob(f"{self.modulename}*.so"),
+            walk_dir.glob(f"{self.modulename}*.pyd"),
+            walk_dir.glob(f"{self.modulename}*.dll"),
+        )
+        # Same behavior as distutils
+        # https://github.com/numpy/numpy/issues/24874#issuecomment-1835632293
+        for path_object in path_objects:
+            dest_path = Path.cwd() / path_object.name
+            if dest_path.exists():
+                dest_path.unlink()
+            shutil.copy2(path_object, dest_path)
+            os.remove(path_object)
+
+    def write_meson_build(self, build_dir: Path) -> None:
+        """Writes the meson build file at specified location"""
+        meson_template = MesonTemplate(
+            self.modulename,
+            self.sources,
+            self.dependencies,
+            self.libraries,
+            self.library_dirs,
+            self.include_dirs,
+            self.extra_objects,
+            self.flib_flags,
+            self.fc_flags,
+            self.build_type,
+            sys.executable,
+        )
+        src = meson_template.generate_meson_build()
+        Path(build_dir).mkdir(parents=True, exist_ok=True)
+        meson_build_file = Path(build_dir) / "meson.build"
+        meson_build_file.write_text(src)
+        return meson_build_file
+
+    def _run_subprocess_command(self, command, cwd):
+        subprocess.run(command, cwd=cwd, check=True)
+
+    def run_meson(self, build_dir: Path):
+        setup_command = ["meson", "setup", self.meson_build_dir]
+        self._run_subprocess_command(setup_command, build_dir)
+        compile_command = ["meson", "compile", "-C", self.meson_build_dir]
+        self._run_subprocess_command(compile_command, build_dir)
+
+    def compile(self) -> None:
+        self.sources = _prepare_sources(self.modulename, self.sources, self.build_dir)
+        self.write_meson_build(self.build_dir)
+        self.run_meson(self.build_dir)
+        self._move_exec_to_root(self.build_dir)
+
+
+def _prepare_sources(mname, sources, bdir):
+    extended_sources = sources.copy()
+    Path(bdir).mkdir(parents=True, exist_ok=True)
+    # Copy sources
+    for source in sources:
+        if Path(source).exists() and Path(source).is_file():
+            shutil.copy(source, bdir)
+    generated_sources = [
+        Path(f"{mname}module.c"),
+        Path(f"{mname}-f2pywrappers2.f90"),
+        Path(f"{mname}-f2pywrappers.f"),
+    ]
+    bdir = Path(bdir)
+    for generated_source in generated_sources:
+        if generated_source.exists():
+            shutil.copy(generated_source, bdir / generated_source.name)
+            extended_sources.append(generated_source.name)
+            generated_source.unlink()
+    extended_sources = [
+        Path(source).name
+        for source in extended_sources
+        if not Path(source).suffix == ".pyf"
+    ]
+    return extended_sources
+
+
+def _get_flags(fc_flags):
+    flag_values = []
+    flag_pattern = re.compile(r"--f(77|90)flags=(.*)")
+    for flag in fc_flags:
+        match_result = flag_pattern.match(flag)
+        if match_result:
+            values = match_result.group(2).strip().split()
+            values = [val.strip("'\"") for val in values]
+            flag_values.extend(values)
+    # Hacky way to preserve order of flags
+    unique_flags = list(dict.fromkeys(flag_values))
+    return unique_flags
diff --git a/python/numpy/f2py/_backends/_meson.pyi b/python/numpy/f2py/_backends/_meson.pyi
new file mode 100644
index 000000000..b9f959537
--- /dev/null
+++ b/python/numpy/f2py/_backends/_meson.pyi
@@ -0,0 +1,63 @@
+from collections.abc import Callable
+from pathlib import Path
+from typing import Final
+from typing import Literal as L
+
+from typing_extensions import override
+
+from ._backend import Backend
+
+class MesonTemplate:
+    modulename: Final[str]
+    build_template_path: Final[Path]
+    sources: Final[list[str | Path]]
+    deps: Final[list[str]]
+    libraries: Final[list[str]]
+    library_dirs: Final[list[str | Path]]
+    include_dirs: Final[list[str | Path]]
+    substitutions: Final[dict[str, str]]
+    objects: Final[list[str | Path]]
+    fortran_args: Final[list[str]]
+    pipeline: Final[list[Callable[[], None]]]
+    build_type: Final[str]
+    python_exe: Final[str]
+    indent: Final[str]
+
+    def __init__(
+        self,
+        /,
+        modulename: str,
+        sources: list[Path],
+        deps: list[str],
+        libraries: list[str],
+        library_dirs: list[str | Path],
+        include_dirs: list[str | Path],
+        object_files: list[str | Path],
+        linker_args: list[str],
+        fortran_args: list[str],
+        build_type: str,
+        python_exe: str,
+    ) -> None: ...
+
+    #
+    def initialize_template(self) -> None: ...
+    def sources_substitution(self) -> None: ...
+    def deps_substitution(self) -> None: ...
+    def libraries_substitution(self) -> None: ...
+    def include_substitution(self) -> None: ...
+    def fortran_args_substitution(self) -> None: ...
+
+    #
+    def meson_build_template(self) -> str: ...
+    def generate_meson_build(self) -> str: ...
+
+class MesonBackend(Backend):
+    dependencies: list[str]
+    meson_build_dir: L["bdir"]
+    build_type: L["debug", "release"]
+
+    def __init__(self, /, *args: object, **kwargs: object) -> None: ...
+    def write_meson_build(self, /, build_dir: Path) -> None: ...
+    def run_meson(self, /, build_dir: Path) -> None: ...
+    @override
+    def compile(self) -> None: ...
diff --git a/python/numpy/f2py/_backends/meson.build.template b/python/numpy/f2py/_backends/meson.build.template
new file mode 100644
index 000000000..fdcc1b17c
--- /dev/null
+++ b/python/numpy/f2py/_backends/meson.build.template
@@ -0,0 +1,55 @@
+project('${modulename}',
+        ['c', 'fortran'],
+        version : '0.1',
+        meson_version: '>= 1.1.0',
+        default_options : [
+                            'warning_level=1',
+                            'buildtype=${buildtype}'
+                          ])
+fc = meson.get_compiler('fortran')
+
+py = import('python').find_installation('''${python}''', pure: false)
+py_dep = py.dependency()
+
+incdir_numpy = run_command(py,
+  ['-c', 'import os; os.chdir(".."); import numpy; print(numpy.get_include())'],
+  check : true
+).stdout().strip()
+
+incdir_f2py = run_command(py,
+    ['-c', 'import os; os.chdir(".."); import numpy.f2py; print(numpy.f2py.get_include())'],
+    check : true
+).stdout().strip()
+
+inc_np = include_directories(incdir_numpy)
+np_dep = declare_dependency(include_directories: inc_np)
+
+incdir_f2py = incdir_numpy / '..' / '..' / 'f2py' / 'src'
+inc_f2py = include_directories(incdir_f2py)
+fortranobject_c = incdir_f2py / 'fortranobject.c'
+
+inc_np = include_directories(incdir_numpy, incdir_f2py)
+# gh-25000
+quadmath_dep = fc.find_library('quadmath', required: false)
+
+${lib_declarations}
+${lib_dir_declarations}
+
+py.extension_module('${modulename}',
+                     [
+${source_list},
+                     fortranobject_c
+                     ],
+                     include_directories: [
+                     inc_np,
+${inc_list}
+                     ],
+                     dependencies : [
+                     py_dep,
+                     quadmath_dep,
+${dep_list}
+${lib_list}
+${lib_dir_list}
+                     ],
+${fortran_args}
+                     install : true)
diff --git a/python/numpy/f2py/_isocbind.py b/python/numpy/f2py/_isocbind.py
new file mode 100644
index 000000000..3043c5d91
--- /dev/null
+++ b/python/numpy/f2py/_isocbind.py
@@ -0,0 +1,62 @@
+"""
+ISO_C_BINDING maps for f2py2e.
+Only required declarations/macros/functions will be used.
+
+Copyright 1999 -- 2011 Pearu Peterson all rights reserved.
+Copyright 2011 -- present NumPy Developers.
+Permission to use, modify, and distribute this software is given under the
+terms of the NumPy License.
+
+NO WARRANTY IS EXPRESSED OR IMPLIED.  USE AT YOUR OWN RISK.
+"""
+# These map to keys in c2py_map, via forced casting for now, see gh-25229
+iso_c_binding_map = {
+    'integer': {
+        'c_int': 'int',
+        'c_short': 'short',  # 'short' <=> 'int' for now
+        'c_long': 'long',  # 'long' <=> 'int' for now
+        'c_long_long': 'long_long',
+        'c_signed_char': 'signed_char',
+        'c_size_t': 'unsigned',  # size_t <=> 'unsigned' for now
+        'c_int8_t': 'signed_char',  # int8_t <=> 'signed_char' for now
+        'c_int16_t': 'short',  # int16_t <=> 'short' for now
+        'c_int32_t': 'int',  # int32_t <=> 'int' for now
+        'c_int64_t': 'long_long',
+        'c_int_least8_t': 'signed_char',  # int_least8_t <=> 'signed_char' for now
+        'c_int_least16_t': 'short',  # int_least16_t <=> 'short' for now
+        'c_int_least32_t': 'int',  # int_least32_t <=> 'int' for now
+        'c_int_least64_t': 'long_long',
+        'c_int_fast8_t': 'signed_char',  # int_fast8_t <=> 'signed_char' for now
+        'c_int_fast16_t': 'short',  # int_fast16_t <=> 'short' for now
+        'c_int_fast32_t': 'int',  # int_fast32_t <=> 'int' for now
+        'c_int_fast64_t': 'long_long',
+        'c_intmax_t': 'long_long',  # intmax_t <=> 'long_long' for now
+        'c_intptr_t': 'long',  # intptr_t <=> 'long' for now
+        'c_ptrdiff_t': 'long',  # ptrdiff_t <=> 'long' for now
+    },
+    'real': {
+        'c_float': 'float',
+        'c_double': 'double',
+        'c_long_double': 'long_double'
+    },
+    'complex': {
+        'c_float_complex': 'complex_float',
+        'c_double_complex': 'complex_double',
+        'c_long_double_complex': 'complex_long_double'
+    },
+    'logical': {
+        'c_bool': 'unsigned_char'  # _Bool <=> 'unsigned_char' for now
+    },
+    'character': {
+        'c_char': 'char'
+    }
+}
+
+# TODO: See gh-25229
+isoc_c2pycode_map = {}
+iso_c2py_map = {}
+
+isoc_kindmap = {}
+for fortran_type, c_type_dict in iso_c_binding_map.items():
+    for c_type in c_type_dict.keys():
+        isoc_kindmap[c_type] = fortran_type
diff --git a/python/numpy/f2py/_isocbind.pyi b/python/numpy/f2py/_isocbind.pyi
new file mode 100644
index 000000000..b972f5603
--- /dev/null
+++ b/python/numpy/f2py/_isocbind.pyi
@@ -0,0 +1,13 @@
+from typing import Any, Final
+
+iso_c_binding_map: Final[dict[str, dict[str, str]]] = ...
+
+isoc_c2pycode_map: Final[dict[str, Any]] = {}  # not implemented
+iso_c2py_map: Final[dict[str, Any]] = {}  # not implemented
+
+isoc_kindmap: Final[dict[str, str]] = ...
+
+# namespace pollution
+c_type: str
+c_type_dict: dict[str, str]
+fortran_type: str
diff --git a/python/numpy/f2py/_src_pyf.py b/python/numpy/f2py/_src_pyf.py
new file mode 100644
index 000000000..b5c424f99
--- /dev/null
+++ b/python/numpy/f2py/_src_pyf.py
@@ -0,0 +1,247 @@
+import os
+import re
+
+# START OF CODE VENDORED FROM `numpy.distutils.from_template`
+#############################################################
+"""
+process_file(filename)
+
+  takes templated file .xxx.src and produces .xxx file where .xxx
+  is .pyf .f90 or .f using the following template rules:
+
+  '<..>' denotes a template.
+
+  All function and subroutine blocks in a source file with names that
+  contain '<..>' will be replicated according to the rules in '<..>'.
+
+  The number of comma-separated words in '<..>' will determine the number of
+  replicates.
+
+  '<..>' may have two different forms, named and short. For example,
+
+  named:
+    where anywhere inside a block '

' will be replaced with + 'd', 's', 'z', and 'c' for each replicate of the block. + + <_c> is already defined: <_c=s,d,c,z> + <_t> is already defined: <_t=real,double precision,complex,double complex> + + short: + , a short form of the named, useful when no

appears inside + a block. + + In general, '<..>' contains a comma separated list of arbitrary + expressions. If these expression must contain a comma|leftarrow|rightarrow, + then prepend the comma|leftarrow|rightarrow with a backslash. + + If an expression matches '\\' then it will be replaced + by -th expression. + + Note that all '<..>' forms in a block must have the same number of + comma-separated entries. + + Predefined named template rules: + + + + + +""" + +routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I) +routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I) +function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I) + +def parse_structure(astr): + """ Return a list of tuples for each function or subroutine each + tuple is the start and end of a subroutine or function to be + expanded. + """ + + spanlist = [] + ind = 0 + while True: + m = routine_start_re.search(astr, ind) + if m is None: + break + start = m.start() + if function_start_re.match(astr, start, m.end()): + while True: + i = astr.rfind('\n', ind, start) + if i == -1: + break + start = i + if astr[i:i + 7] != '\n $': + break + start += 1 + m = routine_end_re.search(astr, m.end()) + ind = end = (m and m.end() - 1) or len(astr) + spanlist.append((start, end)) + return spanlist + + +template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>") +named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>") +list_re = re.compile(r"<\s*((.*?))\s*>") + +def find_repl_patterns(astr): + reps = named_re.findall(astr) + names = {} + for rep in reps: + name = rep[0].strip() or unique_key(names) + repl = rep[1].replace(r'\,', '@comma@') + thelist = conv(repl) + names[name] = thelist + return names + +def find_and_remove_repl_patterns(astr): + names = find_repl_patterns(astr) + astr = re.subn(named_re, '', astr)[0] + return astr, names + + +item_re = re.compile(r"\A\\(?P\d+)\Z") +def conv(astr): + b = astr.split(',') + l = [x.strip() for x in b] + for i in range(len(l)): + m = item_re.match(l[i]) + if m: + j = int(m.group('index')) + l[i] = l[j] + return ','.join(l) + +def unique_key(adict): + """ Obtain a unique key given a dictionary.""" + allkeys = list(adict.keys()) + done = False + n = 1 + while not done: + newkey = f'__l{n}' + if newkey in allkeys: + n += 1 + else: + done = True + return newkey + + +template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z') +def expand_sub(substr, names): + substr = substr.replace(r'\>', '@rightarrow@') + substr = substr.replace(r'\<', '@leftarrow@') + lnames = find_repl_patterns(substr) + substr = named_re.sub(r"<\1>", substr) # get rid of definition templates + + def listrepl(mobj): + thelist = conv(mobj.group(1).replace(r'\,', '@comma@')) + if template_name_re.match(thelist): + return f"<{thelist}>" + name = None + for key in lnames.keys(): # see if list is already in dictionary + if lnames[key] == thelist: + name = key + if name is None: # this list is not in the dictionary yet + name = unique_key(lnames) + lnames[name] = thelist + return f"<{name}>" + + # convert all lists to named templates + # new names are constructed as needed + substr = list_re.sub(listrepl, substr) + + numsubs = None + base_rule = None + rules = {} + for r in template_re.findall(substr): + if r not in rules: + thelist = lnames.get(r, names.get(r, None)) + if thelist is None: + raise ValueError(f'No replicates found for <{r}>') + if r not in names and not thelist.startswith('_'): + names[r] = thelist + rule = [i.replace('@comma@', ',') for i in thelist.split(',')] + num = len(rule) + + if numsubs is None: + numsubs = num + rules[r] = rule + base_rule = r + elif num == numsubs: + rules[r] = rule + else: + rules_base_rule = ','.join(rules[base_rule]) + print("Mismatch in number of replacements " + f"(base <{base_rule}={rules_base_rule}>) " + f"for <{r}={thelist}>. Ignoring.") + if not rules: + return substr + + def namerepl(mobj): + name = mobj.group(1) + return rules.get(name, (k + 1) * [name])[k] + + newstr = '' + for k in range(numsubs): + newstr += template_re.sub(namerepl, substr) + '\n\n' + + newstr = newstr.replace('@rightarrow@', '>') + newstr = newstr.replace('@leftarrow@', '<') + return newstr + +def process_str(allstr): + newstr = allstr + writestr = '' + + struct = parse_structure(newstr) + + oldend = 0 + names = {} + names.update(_special_names) + for sub in struct: + cleanedstr, defs = find_and_remove_repl_patterns(newstr[oldend:sub[0]]) + writestr += cleanedstr + names.update(defs) + writestr += expand_sub(newstr[sub[0]:sub[1]], names) + oldend = sub[1] + writestr += newstr[oldend:] + + return writestr + + +include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P[\w\d./\\]+\.src)['\"]", re.I) + +def resolve_includes(source): + d = os.path.dirname(source) + with open(source) as fid: + lines = [] + for line in fid: + m = include_src_re.match(line) + if m: + fn = m.group('name') + if not os.path.isabs(fn): + fn = os.path.join(d, fn) + if os.path.isfile(fn): + lines.extend(resolve_includes(fn)) + else: + lines.append(line) + else: + lines.append(line) + return lines + +def process_file(source): + lines = resolve_includes(source) + return process_str(''.join(lines)) + + +_special_names = find_repl_patterns(''' +<_c=s,d,c,z> +<_t=real,double precision,complex,double complex> + + + + + +''') + +# END OF CODE VENDORED FROM `numpy.distutils.from_template` +########################################################### diff --git a/python/numpy/f2py/_src_pyf.pyi b/python/numpy/f2py/_src_pyf.pyi new file mode 100644 index 000000000..f5aecbf1d --- /dev/null +++ b/python/numpy/f2py/_src_pyf.pyi @@ -0,0 +1,29 @@ +import re +from collections.abc import Mapping +from typing import Final + +from _typeshed import StrOrBytesPath + +routine_start_re: Final[re.Pattern[str]] = ... +routine_end_re: Final[re.Pattern[str]] = ... +function_start_re: Final[re.Pattern[str]] = ... +template_re: Final[re.Pattern[str]] = ... +named_re: Final[re.Pattern[str]] = ... +list_re: Final[re.Pattern[str]] = ... +item_re: Final[re.Pattern[str]] = ... +template_name_re: Final[re.Pattern[str]] = ... +include_src_re: Final[re.Pattern[str]] = ... + +def parse_structure(astr: str) -> list[tuple[int, int]]: ... +def find_repl_patterns(astr: str) -> dict[str, str]: ... +def find_and_remove_repl_patterns(astr: str) -> tuple[str, dict[str, str]]: ... +def conv(astr: str) -> str: ... + +# +def unique_key(adict: Mapping[str, object]) -> str: ... +def expand_sub(substr: str, names: dict[str, str]) -> str: ... +def process_str(allstr: str) -> str: ... + +# +def resolve_includes(source: StrOrBytesPath) -> list[str]: ... +def process_file(source: StrOrBytesPath) -> str: ... diff --git a/python/numpy/f2py/auxfuncs.py b/python/numpy/f2py/auxfuncs.py new file mode 100644 index 000000000..a5af31d97 --- /dev/null +++ b/python/numpy/f2py/auxfuncs.py @@ -0,0 +1,1004 @@ +""" +Auxiliary functions for f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy (BSD style) LICENSE. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +import pprint +import re +import sys +import types +from functools import reduce + +from . import __version__, cfuncs +from .cfuncs import errmess + +__all__ = [ + 'applyrules', 'debugcapi', 'dictappend', 'errmess', 'gentitle', + 'getargs2', 'getcallprotoargument', 'getcallstatement', + 'getfortranname', 'getpymethoddef', 'getrestdoc', 'getusercode', + 'getusercode1', 'getdimension', 'hasbody', 'hascallstatement', 'hascommon', + 'hasexternals', 'hasinitvalue', 'hasnote', 'hasresultnote', + 'isallocatable', 'isarray', 'isarrayofstrings', + 'ischaracter', 'ischaracterarray', 'ischaracter_or_characterarray', + 'iscomplex', 'iscstyledirective', + 'iscomplexarray', 'iscomplexfunction', 'iscomplexfunction_warn', + 'isdouble', 'isdummyroutine', 'isexternal', 'isfunction', + 'isfunction_wrap', 'isint1', 'isint1array', 'isinteger', 'isintent_aux', + 'isintent_c', 'isintent_callback', 'isintent_copy', 'isintent_dict', + 'isintent_hide', 'isintent_in', 'isintent_inout', 'isintent_inplace', + 'isintent_nothide', 'isintent_out', 'isintent_overwrite', 'islogical', + 'islogicalfunction', 'islong_complex', 'islong_double', + 'islong_doublefunction', 'islong_long', 'islong_longfunction', + 'ismodule', 'ismoduleroutine', 'isoptional', 'isprivate', 'isvariable', + 'isrequired', 'isroutine', 'isscalar', 'issigned_long_longarray', + 'isstring', 'isstringarray', 'isstring_or_stringarray', 'isstringfunction', + 'issubroutine', 'get_f2py_modulename', 'issubroutine_wrap', 'isthreadsafe', + 'isunsigned', 'isunsigned_char', 'isunsigned_chararray', + 'isunsigned_long_long', 'isunsigned_long_longarray', 'isunsigned_short', + 'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess', 'replace', + 'show', 'stripcomma', 'throw_error', 'isattr_value', 'getuseblocks', + 'process_f2cmap_dict', 'containscommon', 'containsderivedtypes' +] + + +f2py_version = __version__.version + + +show = pprint.pprint + +options = {} +debugoptions = [] +wrapfuncs = 1 + + +def outmess(t): + if options.get('verbose', 1): + sys.stdout.write(t) + + +def debugcapi(var): + return 'capi' in debugoptions + + +def _ischaracter(var): + return 'typespec' in var and var['typespec'] == 'character' and \ + not isexternal(var) + + +def _isstring(var): + return 'typespec' in var and var['typespec'] == 'character' and \ + not isexternal(var) + + +def ischaracter_or_characterarray(var): + return _ischaracter(var) and 'charselector' not in var + + +def ischaracter(var): + return ischaracter_or_characterarray(var) and not isarray(var) + + +def ischaracterarray(var): + return ischaracter_or_characterarray(var) and isarray(var) + + +def isstring_or_stringarray(var): + return _ischaracter(var) and 'charselector' in var + + +def isstring(var): + return isstring_or_stringarray(var) and not isarray(var) + + +def isstringarray(var): + return isstring_or_stringarray(var) and isarray(var) + + +def isarrayofstrings(var): # obsolete? + # leaving out '*' for now so that `character*(*) a(m)` and `character + # a(m,*)` are treated differently. Luckily `character**` is illegal. + return isstringarray(var) and var['dimension'][-1] == '(*)' + + +def isarray(var): + return 'dimension' in var and not isexternal(var) + + +def isscalar(var): + return not (isarray(var) or isstring(var) or isexternal(var)) + + +def iscomplex(var): + return isscalar(var) and \ + var.get('typespec') in ['complex', 'double complex'] + + +def islogical(var): + return isscalar(var) and var.get('typespec') == 'logical' + + +def isinteger(var): + return isscalar(var) and var.get('typespec') == 'integer' + + +def isreal(var): + return isscalar(var) and var.get('typespec') == 'real' + + +def get_kind(var): + try: + return var['kindselector']['*'] + except KeyError: + try: + return var['kindselector']['kind'] + except KeyError: + pass + + +def isint1(var): + return var.get('typespec') == 'integer' \ + and get_kind(var) == '1' and not isarray(var) + + +def islong_long(var): + if not isscalar(var): + return 0 + if var.get('typespec') not in ['integer', 'logical']: + return 0 + return get_kind(var) == '8' + + +def isunsigned_char(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var) == '-1' + + +def isunsigned_short(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var) == '-2' + + +def isunsigned(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var) == '-4' + + +def isunsigned_long_long(var): + if not isscalar(var): + return 0 + if var.get('typespec') != 'integer': + return 0 + return get_kind(var) == '-8' + + +def isdouble(var): + if not isscalar(var): + return 0 + if not var.get('typespec') == 'real': + return 0 + return get_kind(var) == '8' + + +def islong_double(var): + if not isscalar(var): + return 0 + if not var.get('typespec') == 'real': + return 0 + return get_kind(var) == '16' + + +def islong_complex(var): + if not iscomplex(var): + return 0 + return get_kind(var) == '32' + + +def iscomplexarray(var): + return isarray(var) and \ + var.get('typespec') in ['complex', 'double complex'] + + +def isint1array(var): + return isarray(var) and var.get('typespec') == 'integer' \ + and get_kind(var) == '1' + + +def isunsigned_chararray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '-1' + + +def isunsigned_shortarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '-2' + + +def isunsignedarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '-4' + + +def isunsigned_long_longarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '-8' + + +def issigned_chararray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '1' + + +def issigned_shortarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '2' + + +def issigned_array(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '4' + + +def issigned_long_longarray(var): + return isarray(var) and var.get('typespec') in ['integer', 'logical']\ + and get_kind(var) == '8' + + +def isallocatable(var): + return 'attrspec' in var and 'allocatable' in var['attrspec'] + + +def ismutable(var): + return not ('dimension' not in var or isstring(var)) + + +def ismoduleroutine(rout): + return 'modulename' in rout + + +def ismodule(rout): + return 'block' in rout and 'module' == rout['block'] + + +def isfunction(rout): + return 'block' in rout and 'function' == rout['block'] + + +def isfunction_wrap(rout): + if isintent_c(rout): + return 0 + return wrapfuncs and isfunction(rout) and (not isexternal(rout)) + + +def issubroutine(rout): + return 'block' in rout and 'subroutine' == rout['block'] + + +def issubroutine_wrap(rout): + if isintent_c(rout): + return 0 + return issubroutine(rout) and hasassumedshape(rout) + +def isattr_value(var): + return 'value' in var.get('attrspec', []) + + +def hasassumedshape(rout): + if rout.get('hasassumedshape'): + return True + for a in rout['args']: + for d in rout['vars'].get(a, {}).get('dimension', []): + if d == ':': + rout['hasassumedshape'] = True + return True + return False + + +def requiresf90wrapper(rout): + return ismoduleroutine(rout) or hasassumedshape(rout) + + +def isroutine(rout): + return isfunction(rout) or issubroutine(rout) + + +def islogicalfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return islogical(rout['vars'][a]) + return 0 + + +def islong_longfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return islong_long(rout['vars'][a]) + return 0 + + +def islong_doublefunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return islong_double(rout['vars'][a]) + return 0 + + +def iscomplexfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return iscomplex(rout['vars'][a]) + return 0 + + +def iscomplexfunction_warn(rout): + if iscomplexfunction(rout): + outmess("""\ + ************************************************************** + Warning: code with a function returning complex value + may not work correctly with your Fortran compiler. + When using GNU gcc/g77 compilers, codes should work + correctly for callbacks with: + f2py -c -DF2PY_CB_RETURNCOMPLEX + **************************************************************\n""") + return 1 + return 0 + + +def isstringfunction(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return isstring(rout['vars'][a]) + return 0 + + +def hasexternals(rout): + return 'externals' in rout and rout['externals'] + + +def isthreadsafe(rout): + return 'f2pyenhancements' in rout and \ + 'threadsafe' in rout['f2pyenhancements'] + + +def hasvariables(rout): + return 'vars' in rout and rout['vars'] + + +def isoptional(var): + return ('attrspec' in var and 'optional' in var['attrspec'] and + 'required' not in var['attrspec']) and isintent_nothide(var) + + +def isexternal(var): + return 'attrspec' in var and 'external' in var['attrspec'] + + +def getdimension(var): + dimpattern = r"\((.*?)\)" + if 'attrspec' in var.keys(): + if any('dimension' in s for s in var['attrspec']): + return next(re.findall(dimpattern, v) for v in var['attrspec']) + + +def isrequired(var): + return not isoptional(var) and isintent_nothide(var) + + +def iscstyledirective(f2py_line): + directives = {"callstatement", "callprotoargument", "pymethoddef"} + return any(directive in f2py_line.lower() for directive in directives) + + +def isintent_in(var): + if 'intent' not in var: + return 1 + if 'hide' in var['intent']: + return 0 + if 'inplace' in var['intent']: + return 0 + if 'in' in var['intent']: + return 1 + if 'out' in var['intent']: + return 0 + if 'inout' in var['intent']: + return 0 + if 'outin' in var['intent']: + return 0 + return 1 + + +def isintent_inout(var): + return ('intent' in var and ('inout' in var['intent'] or + 'outin' in var['intent']) and 'in' not in var['intent'] and + 'hide' not in var['intent'] and 'inplace' not in var['intent']) + + +def isintent_out(var): + return 'out' in var.get('intent', []) + + +def isintent_hide(var): + return ('intent' in var and ('hide' in var['intent'] or + ('out' in var['intent'] and 'in' not in var['intent'] and + (not l_or(isintent_inout, isintent_inplace)(var))))) + + +def isintent_nothide(var): + return not isintent_hide(var) + + +def isintent_c(var): + return 'c' in var.get('intent', []) + + +def isintent_cache(var): + return 'cache' in var.get('intent', []) + + +def isintent_copy(var): + return 'copy' in var.get('intent', []) + + +def isintent_overwrite(var): + return 'overwrite' in var.get('intent', []) + + +def isintent_callback(var): + return 'callback' in var.get('intent', []) + + +def isintent_inplace(var): + return 'inplace' in var.get('intent', []) + + +def isintent_aux(var): + return 'aux' in var.get('intent', []) + + +def isintent_aligned4(var): + return 'aligned4' in var.get('intent', []) + + +def isintent_aligned8(var): + return 'aligned8' in var.get('intent', []) + + +def isintent_aligned16(var): + return 'aligned16' in var.get('intent', []) + + +isintent_dict = {isintent_in: 'INTENT_IN', isintent_inout: 'INTENT_INOUT', + isintent_out: 'INTENT_OUT', isintent_hide: 'INTENT_HIDE', + isintent_cache: 'INTENT_CACHE', + isintent_c: 'INTENT_C', isoptional: 'OPTIONAL', + isintent_inplace: 'INTENT_INPLACE', + isintent_aligned4: 'INTENT_ALIGNED4', + isintent_aligned8: 'INTENT_ALIGNED8', + isintent_aligned16: 'INTENT_ALIGNED16', + } + + +def isprivate(var): + return 'attrspec' in var and 'private' in var['attrspec'] + + +def isvariable(var): + # heuristic to find public/private declarations of filtered subroutines + if len(var) == 1 and 'attrspec' in var and \ + var['attrspec'][0] in ('public', 'private'): + is_var = False + else: + is_var = True + return is_var + +def hasinitvalue(var): + return '=' in var + + +def hasinitvalueasstring(var): + if not hasinitvalue(var): + return 0 + return var['='][0] in ['"', "'"] + + +def hasnote(var): + return 'note' in var + + +def hasresultnote(rout): + if not isfunction(rout): + return 0 + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if a in rout['vars']: + return hasnote(rout['vars'][a]) + return 0 + + +def hascommon(rout): + return 'common' in rout + + +def containscommon(rout): + if hascommon(rout): + return 1 + if hasbody(rout): + for b in rout['body']: + if containscommon(b): + return 1 + return 0 + + +def hasderivedtypes(rout): + return ('block' in rout) and rout['block'] == 'type' + + +def containsderivedtypes(rout): + if hasderivedtypes(rout): + return 1 + if hasbody(rout): + for b in rout['body']: + if hasderivedtypes(b): + return 1 + return 0 + + +def containsmodule(block): + if ismodule(block): + return 1 + if not hasbody(block): + return 0 + for b in block['body']: + if containsmodule(b): + return 1 + return 0 + + +def hasbody(rout): + return 'body' in rout + + +def hascallstatement(rout): + return getcallstatement(rout) is not None + + +def istrue(var): + return 1 + + +def isfalse(var): + return 0 + + +class F2PYError(Exception): + pass + + +class throw_error: + + def __init__(self, mess): + self.mess = mess + + def __call__(self, var): + mess = f'\n\n var = {var}\n Message: {self.mess}\n' + raise F2PYError(mess) + + +def l_and(*f): + l1, l2 = 'lambda v', [] + for i in range(len(f)): + l1 = '%s,f%d=f[%d]' % (l1, i, i) + l2.append('f%d(v)' % (i)) + return eval(f"{l1}:{' and '.join(l2)}") + + +def l_or(*f): + l1, l2 = 'lambda v', [] + for i in range(len(f)): + l1 = '%s,f%d=f[%d]' % (l1, i, i) + l2.append('f%d(v)' % (i)) + return eval(f"{l1}:{' or '.join(l2)}") + + +def l_not(f): + return eval('lambda v,f=f:not f(v)') + + +def isdummyroutine(rout): + try: + return rout['f2pyenhancements']['fortranname'] == '' + except KeyError: + return 0 + + +def getfortranname(rout): + try: + name = rout['f2pyenhancements']['fortranname'] + if name == '': + raise KeyError + if not name: + errmess(f"Failed to use fortranname from {rout['f2pyenhancements']}\n") + raise KeyError + except KeyError: + name = rout['name'] + return name + + +def getmultilineblock(rout, blockname, comment=1, counter=0): + try: + r = rout['f2pyenhancements'].get(blockname) + except KeyError: + return + if not r: + return + if counter > 0 and isinstance(r, str): + return + if isinstance(r, list): + if counter >= len(r): + return + r = r[counter] + if r[:3] == "'''": + if comment: + r = '\t/* start ' + blockname + \ + ' multiline (' + repr(counter) + ') */\n' + r[3:] + else: + r = r[3:] + if r[-3:] == "'''": + if comment: + r = r[:-3] + '\n\t/* end multiline (' + repr(counter) + ')*/' + else: + r = r[:-3] + else: + errmess(f"{blockname} multiline block should end with `'''`: {repr(r)}\n") + return r + + +def getcallstatement(rout): + return getmultilineblock(rout, 'callstatement') + + +def getcallprotoargument(rout, cb_map={}): + r = getmultilineblock(rout, 'callprotoargument', comment=0) + if r: + return r + if hascallstatement(rout): + outmess( + 'warning: callstatement is defined without callprotoargument\n') + return + from .capi_maps import getctype + arg_types, arg_types2 = [], [] + if l_and(isstringfunction, l_not(isfunction_wrap))(rout): + arg_types.extend(['char*', 'size_t']) + for n in rout['args']: + var = rout['vars'][n] + if isintent_callback(var): + continue + if n in cb_map: + ctype = cb_map[n] + '_typedef' + else: + ctype = getctype(var) + if l_and(isintent_c, l_or(isscalar, iscomplex))(var): + pass + elif isstring(var): + pass + elif not isattr_value(var): + ctype = ctype + '*' + if (isstring(var) + or isarrayofstrings(var) # obsolete? + or isstringarray(var)): + arg_types2.append('size_t') + arg_types.append(ctype) + + proto_args = ','.join(arg_types + arg_types2) + if not proto_args: + proto_args = 'void' + return proto_args + + +def getusercode(rout): + return getmultilineblock(rout, 'usercode') + + +def getusercode1(rout): + return getmultilineblock(rout, 'usercode', counter=1) + + +def getpymethoddef(rout): + return getmultilineblock(rout, 'pymethoddef') + + +def getargs(rout): + sortargs, args = [], [] + if 'args' in rout: + args = rout['args'] + if 'sortvars' in rout: + for a in rout['sortvars']: + if a in args: + sortargs.append(a) + for a in args: + if a not in sortargs: + sortargs.append(a) + else: + sortargs = rout['args'] + return args, sortargs + + +def getargs2(rout): + sortargs, args = [], rout.get('args', []) + auxvars = [a for a in rout['vars'].keys() if isintent_aux(rout['vars'][a]) + and a not in args] + args = auxvars + args + if 'sortvars' in rout: + for a in rout['sortvars']: + if a in args: + sortargs.append(a) + for a in args: + if a not in sortargs: + sortargs.append(a) + else: + sortargs = auxvars + rout['args'] + return args, sortargs + + +def getrestdoc(rout): + if 'f2pymultilines' not in rout: + return None + k = None + if rout['block'] == 'python module': + k = rout['block'], rout['name'] + return rout['f2pymultilines'].get(k, None) + + +def gentitle(name): + ln = (80 - len(name) - 6) // 2 + return f"/*{ln * '*'} {name} {ln * '*'}*/" + + +def flatlist(lst): + if isinstance(lst, list): + return reduce(lambda x, y, f=flatlist: x + f(y), lst, []) + return [lst] + + +def stripcomma(s): + if s and s[-1] == ',': + return s[:-1] + return s + + +def replace(str, d, defaultsep=''): + if isinstance(d, list): + return [replace(str, _m, defaultsep) for _m in d] + if isinstance(str, list): + return [replace(_m, d, defaultsep) for _m in str] + for k in 2 * list(d.keys()): + if k == 'separatorsfor': + continue + if 'separatorsfor' in d and k in d['separatorsfor']: + sep = d['separatorsfor'][k] + else: + sep = defaultsep + if isinstance(d[k], list): + str = str.replace(f'#{k}#', sep.join(flatlist(d[k]))) + else: + str = str.replace(f'#{k}#', d[k]) + return str + + +def dictappend(rd, ar): + if isinstance(ar, list): + for a in ar: + rd = dictappend(rd, a) + return rd + for k in ar.keys(): + if k[0] == '_': + continue + if k in rd: + if isinstance(rd[k], str): + rd[k] = [rd[k]] + if isinstance(rd[k], list): + if isinstance(ar[k], list): + rd[k] = rd[k] + ar[k] + else: + rd[k].append(ar[k]) + elif isinstance(rd[k], dict): + if isinstance(ar[k], dict): + if k == 'separatorsfor': + for k1 in ar[k].keys(): + if k1 not in rd[k]: + rd[k][k1] = ar[k][k1] + else: + rd[k] = dictappend(rd[k], ar[k]) + else: + rd[k] = ar[k] + return rd + + +def applyrules(rules, d, var={}): + ret = {} + if isinstance(rules, list): + for r in rules: + rr = applyrules(r, d, var) + ret = dictappend(ret, rr) + if '_break' in rr: + break + return ret + if '_check' in rules and (not rules['_check'](var)): + return ret + if 'need' in rules: + res = applyrules({'needs': rules['need']}, d, var) + if 'needs' in res: + cfuncs.append_needs(res['needs']) + + for k in rules.keys(): + if k == 'separatorsfor': + ret[k] = rules[k] + continue + if isinstance(rules[k], str): + ret[k] = replace(rules[k], d) + elif isinstance(rules[k], list): + ret[k] = [] + for i in rules[k]: + ar = applyrules({k: i}, d, var) + if k in ar: + ret[k].append(ar[k]) + elif k[0] == '_': + continue + elif isinstance(rules[k], dict): + ret[k] = [] + for k1 in rules[k].keys(): + if isinstance(k1, types.FunctionType) and k1(var): + if isinstance(rules[k][k1], list): + for i in rules[k][k1]: + if isinstance(i, dict): + res = applyrules({'supertext': i}, d, var) + i = res.get('supertext', '') + ret[k].append(replace(i, d)) + else: + i = rules[k][k1] + if isinstance(i, dict): + res = applyrules({'supertext': i}, d) + i = res.get('supertext', '') + ret[k].append(replace(i, d)) + else: + errmess(f'applyrules: ignoring rule {repr(rules[k])}.\n') + if isinstance(ret[k], list): + if len(ret[k]) == 1: + ret[k] = ret[k][0] + if ret[k] == []: + del ret[k] + return ret + + +_f2py_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]+)', + re.I).match +_f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P[\w_]*?' + r'__user__[\w_]*)', re.I).match + +def get_f2py_modulename(source): + name = None + with open(source) as f: + for line in f: + m = _f2py_module_name_match(line) + if m: + if _f2py_user_module_name_match(line): # skip *__user__* names + continue + name = m.group('name') + break + return name + +def getuseblocks(pymod): + all_uses = [] + for inner in pymod['body']: + for modblock in inner['body']: + if modblock.get('use'): + all_uses.extend([x for x in modblock.get("use").keys() if "__" not in x]) + return all_uses + +def process_f2cmap_dict(f2cmap_all, new_map, c2py_map, verbose=False): + """ + Update the Fortran-to-C type mapping dictionary with new mappings and + return a list of successfully mapped C types. + + This function integrates a new mapping dictionary into an existing + Fortran-to-C type mapping dictionary. It ensures that all keys are in + lowercase and validates new entries against a given C-to-Python mapping + dictionary. Redefinitions and invalid entries are reported with a warning. + + Parameters + ---------- + f2cmap_all : dict + The existing Fortran-to-C type mapping dictionary that will be updated. + It should be a dictionary of dictionaries where the main keys represent + Fortran types and the nested dictionaries map Fortran type specifiers + to corresponding C types. + + new_map : dict + A dictionary containing new type mappings to be added to `f2cmap_all`. + The structure should be similar to `f2cmap_all`, with keys representing + Fortran types and values being dictionaries of type specifiers and their + C type equivalents. + + c2py_map : dict + A dictionary used for validating the C types in `new_map`. It maps C + types to corresponding Python types and is used to ensure that the C + types specified in `new_map` are valid. + + verbose : boolean + A flag used to provide information about the types mapped + + Returns + ------- + tuple of (dict, list) + The updated Fortran-to-C type mapping dictionary and a list of + successfully mapped C types. + """ + f2cmap_mapped = [] + + new_map_lower = {} + for k, d1 in new_map.items(): + d1_lower = {k1.lower(): v1 for k1, v1 in d1.items()} + new_map_lower[k.lower()] = d1_lower + + for k, d1 in new_map_lower.items(): + if k not in f2cmap_all: + f2cmap_all[k] = {} + + for k1, v1 in d1.items(): + if v1 in c2py_map: + if k1 in f2cmap_all[k]: + outmess( + "\tWarning: redefinition of {'%s':{'%s':'%s'->'%s'}}\n" + % (k, k1, f2cmap_all[k][k1], v1) + ) + f2cmap_all[k][k1] = v1 + if verbose: + outmess(f'\tMapping "{k}(kind={k1})" to "{v1}\"\n') + f2cmap_mapped.append(v1) + elif verbose: + errmess( + "\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n" + % (k, k1, v1, v1, list(c2py_map.keys())) + ) + + return f2cmap_all, f2cmap_mapped diff --git a/python/numpy/f2py/auxfuncs.pyi b/python/numpy/f2py/auxfuncs.pyi new file mode 100644 index 000000000..f2ff09faf --- /dev/null +++ b/python/numpy/f2py/auxfuncs.pyi @@ -0,0 +1,264 @@ +from collections.abc import Callable, Mapping +from pprint import pprint as show +from typing import Any, Final, Never, TypeAlias, TypeVar, overload +from typing import Literal as L + +from _typeshed import FileDescriptorOrPath + +from .cfuncs import errmess + +__all__ = [ + "applyrules", + "containscommon", + "containsderivedtypes", + "debugcapi", + "dictappend", + "errmess", + "gentitle", + "get_f2py_modulename", + "getargs2", + "getcallprotoargument", + "getcallstatement", + "getdimension", + "getfortranname", + "getpymethoddef", + "getrestdoc", + "getuseblocks", + "getusercode", + "getusercode1", + "hasbody", + "hascallstatement", + "hascommon", + "hasexternals", + "hasinitvalue", + "hasnote", + "hasresultnote", + "isallocatable", + "isarray", + "isarrayofstrings", + "isattr_value", + "ischaracter", + "ischaracter_or_characterarray", + "ischaracterarray", + "iscomplex", + "iscomplexarray", + "iscomplexfunction", + "iscomplexfunction_warn", + "iscstyledirective", + "isdouble", + "isdummyroutine", + "isexternal", + "isfunction", + "isfunction_wrap", + "isint1", + "isint1array", + "isinteger", + "isintent_aux", + "isintent_c", + "isintent_callback", + "isintent_copy", + "isintent_dict", + "isintent_hide", + "isintent_in", + "isintent_inout", + "isintent_inplace", + "isintent_nothide", + "isintent_out", + "isintent_overwrite", + "islogical", + "islogicalfunction", + "islong_complex", + "islong_double", + "islong_doublefunction", + "islong_long", + "islong_longfunction", + "ismodule", + "ismoduleroutine", + "isoptional", + "isprivate", + "isrequired", + "isroutine", + "isscalar", + "issigned_long_longarray", + "isstring", + "isstring_or_stringarray", + "isstringarray", + "isstringfunction", + "issubroutine", + "issubroutine_wrap", + "isthreadsafe", + "isunsigned", + "isunsigned_char", + "isunsigned_chararray", + "isunsigned_long_long", + "isunsigned_long_longarray", + "isunsigned_short", + "isunsigned_shortarray", + "isvariable", + "l_and", + "l_not", + "l_or", + "outmess", + "process_f2cmap_dict", + "replace", + "show", + "stripcomma", + "throw_error", +] + +### + +_VT = TypeVar("_VT") +_RT = TypeVar("_RT") + +_Var: TypeAlias = Mapping[str, list[str]] +_ROut: TypeAlias = Mapping[str, str] +_F2CMap: TypeAlias = Mapping[str, Mapping[str, str]] + +_Bool: TypeAlias = bool | L[0, 1] +_Intent: TypeAlias = L[ + "INTENT_IN", + "INTENT_OUT", + "INTENT_INOUT", + "INTENT_C", + "INTENT_CACHE", + "INTENT_HIDE", + "INTENT_INPLACE", + "INTENT_ALIGNED4", + "INTENT_ALIGNED8", + "INTENT_ALIGNED16", + "OPTIONAL", +] + +### + +isintent_dict: dict[Callable[[_Var], _Bool], _Intent] + +class F2PYError(Exception): ... + +class throw_error: + mess: Final[str] + def __init__(self, /, mess: str) -> None: ... + def __call__(self, /, var: _Var) -> Never: ... # raises F2PYError + +# +def l_and(*f: tuple[str, Callable[[_VT], _RT]]) -> Callable[[_VT], _RT]: ... +def l_or(*f: tuple[str, Callable[[_VT], _RT]]) -> Callable[[_VT], _RT]: ... +def l_not(f: tuple[str, Callable[[_VT], _RT]]) -> Callable[[_VT], _RT]: ... + +# +def outmess(t: str) -> None: ... +def debugcapi(var: _Var) -> bool: ... + +# +def hasinitvalue(var: _Var | str) -> bool: ... +def hasnote(var: _Var | str) -> bool: ... +def ischaracter(var: _Var) -> bool: ... +def ischaracterarray(var: _Var) -> bool: ... +def ischaracter_or_characterarray(var: _Var) -> bool: ... +def isstring(var: _Var) -> bool: ... +def isstringarray(var: _Var) -> bool: ... +def isstring_or_stringarray(var: _Var) -> bool: ... +def isarray(var: _Var) -> bool: ... +def isarrayofstrings(var: _Var) -> bool: ... +def isscalar(var: _Var) -> bool: ... +def iscomplex(var: _Var) -> bool: ... +def islogical(var: _Var) -> bool: ... +def isinteger(var: _Var) -> bool: ... +def isint1(var: _Var) -> bool: ... +def isint1array(var: _Var) -> bool: ... +def islong_long(var: _Var) -> _Bool: ... +def isunsigned(var: _Var) -> _Bool: ... +def isunsigned_char(var: _Var) -> _Bool: ... +def isunsigned_chararray(var: _Var) -> bool: ... +def isunsigned_short(var: _Var) -> _Bool: ... +def isunsigned_shortarray(var: _Var) -> bool: ... +def isunsigned_long_long(var: _Var) -> _Bool: ... +def isunsigned_long_longarray(var: _Var) -> bool: ... +def issigned_long_longarray(var: _Var) -> bool: ... +def isdouble(var: _Var) -> _Bool: ... +def islong_double(var: _Var) -> _Bool: ... +def islong_complex(var: _Var) -> _Bool: ... +def iscomplexarray(var: _Var) -> bool: ... +def isallocatable(var: _Var) -> bool: ... +def isattr_value(var: _Var) -> bool: ... +def isoptional(var: _Var) -> bool: ... +def isexternal(var: _Var) -> bool: ... +def isrequired(var: _Var) -> bool: ... +def isprivate(var: _Var) -> bool: ... +def isvariable(var: _Var) -> bool: ... +def isintent_in(var: _Var) -> _Bool: ... +def isintent_inout(var: _Var) -> bool: ... +def isintent_out(var: _Var) -> bool: ... +def isintent_hide(var: _Var) -> bool: ... +def isintent_nothide(var: _Var) -> bool: ... +def isintent_c(var: _Var) -> bool: ... +def isintent_cache(var: _Var) -> bool: ... +def isintent_copy(var: _Var) -> bool: ... +def isintent_overwrite(var: _Var) -> bool: ... +def isintent_callback(var: _Var) -> bool: ... +def isintent_inplace(var: _Var) -> bool: ... +def isintent_aux(var: _Var) -> bool: ... + +# +def containsderivedtypes(rout: _ROut) -> L[0, 1]: ... +def containscommon(rout: _ROut) -> _Bool: ... +def hasexternals(rout: _ROut) -> bool: ... +def hasresultnote(rout: _ROut) -> _Bool: ... +def hasbody(rout: _ROut) -> _Bool: ... +def hascommon(rout: _ROut) -> bool: ... +def hasderivedtypes(rout: _ROut) -> bool: ... +def hascallstatement(rout: _ROut) -> bool: ... +def isroutine(rout: _ROut) -> bool: ... +def ismodule(rout: _ROut) -> bool: ... +def ismoduleroutine(rout: _ROut) -> bool: ... +def issubroutine(rout: _ROut) -> bool: ... +def issubroutine_wrap(rout: _ROut) -> _Bool: ... +def isfunction(rout: _ROut) -> bool: ... +def isfunction_wrap(rout: _ROut) -> _Bool: ... +def islogicalfunction(rout: _ROut) -> _Bool: ... +def islong_longfunction(rout: _ROut) -> _Bool: ... +def islong_doublefunction(rout: _ROut) -> _Bool: ... +def iscomplexfunction(rout: _ROut) -> _Bool: ... +def iscomplexfunction_warn(rout: _ROut) -> _Bool: ... +def isstringfunction(rout: _ROut) -> _Bool: ... +def isthreadsafe(rout: _ROut) -> bool: ... +def isdummyroutine(rout: _ROut) -> _Bool: ... +def iscstyledirective(f2py_line: str) -> bool: ... + +# . +def getdimension(var: _Var) -> list[Any] | None: ... +def getfortranname(rout: _ROut) -> str: ... +def getmultilineblock(rout: _ROut, blockname: str, comment: _Bool = 1, counter: int = 0) -> str | None: ... +def getcallstatement(rout: _ROut) -> str | None: ... +def getcallprotoargument(rout: _ROut, cb_map: dict[str, str] = {}) -> str: ... +def getusercode(rout: _ROut) -> str | None: ... +def getusercode1(rout: _ROut) -> str | None: ... +def getpymethoddef(rout: _ROut) -> str | None: ... +def getargs(rout: _ROut) -> tuple[list[str], list[str]]: ... +def getargs2(rout: _ROut) -> tuple[list[str], list[str]]: ... +def getrestdoc(rout: _ROut) -> str | None: ... + +# +def gentitle(name: str) -> str: ... +def stripcomma(s: str) -> str: ... +@overload +def replace(str: str, d: list[str], defaultsep: str = "") -> list[str]: ... +@overload +def replace(str: list[str], d: str, defaultsep: str = "") -> list[str]: ... +@overload +def replace(str: str, d: str, defaultsep: str = "") -> str: ... + +# +def dictappend(rd: Mapping[str, object], ar: Mapping[str, object] | list[Mapping[str, object]]) -> dict[str, Any]: ... +def applyrules(rules: Mapping[str, object], d: Mapping[str, object], var: _Var = {}) -> dict[str, Any]: ... + +# +def get_f2py_modulename(source: FileDescriptorOrPath) -> str: ... +def getuseblocks(pymod: Mapping[str, Mapping[str, Mapping[str, str]]]) -> list[str]: ... +def process_f2cmap_dict( + f2cmap_all: _F2CMap, + new_map: _F2CMap, + c2py_map: _F2CMap, + verbose: bool = False, +) -> tuple[dict[str, dict[str, str]], list[str]]: ... diff --git a/python/numpy/f2py/capi_maps.py b/python/numpy/f2py/capi_maps.py new file mode 100644 index 000000000..290ac2f46 --- /dev/null +++ b/python/numpy/f2py/capi_maps.py @@ -0,0 +1,811 @@ +""" +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +from . import __version__ + +f2py_version = __version__.version + +import copy +import os +import re + +from . import cb_rules +from ._isocbind import iso_c2py_map, iso_c_binding_map, isoc_c2pycode_map + +# The environment provided by auxfuncs.py is needed for some calls to eval. +# As the needed functions cannot be determined by static inspection of the +# code, it is safest to use import * pending a major refactoring of f2py. +from .auxfuncs import * +from .crackfortran import markoutercomma + +__all__ = [ + 'getctype', 'getstrlength', 'getarrdims', 'getpydocsign', + 'getarrdocsign', 'getinit', 'sign2map', 'routsign2map', 'modsign2map', + 'cb_sign2map', 'cb_routsign2map', 'common_sign2map', 'process_f2cmap_dict' +] + + +depargs = [] +lcb_map = {} +lcb2_map = {} +# forced casting: mainly caused by the fact that Python or Numeric +# C/APIs do not support the corresponding C types. +c2py_map = {'double': 'float', + 'float': 'float', # forced casting + 'long_double': 'float', # forced casting + 'char': 'int', # forced casting + 'signed_char': 'int', # forced casting + 'unsigned_char': 'int', # forced casting + 'short': 'int', # forced casting + 'unsigned_short': 'int', # forced casting + 'int': 'int', # forced casting + 'long': 'int', + 'long_long': 'long', + 'unsigned': 'int', # forced casting + 'complex_float': 'complex', # forced casting + 'complex_double': 'complex', + 'complex_long_double': 'complex', # forced casting + 'string': 'string', + 'character': 'bytes', + } + +c2capi_map = {'double': 'NPY_DOUBLE', + 'float': 'NPY_FLOAT', + 'long_double': 'NPY_LONGDOUBLE', + 'char': 'NPY_BYTE', + 'unsigned_char': 'NPY_UBYTE', + 'signed_char': 'NPY_BYTE', + 'short': 'NPY_SHORT', + 'unsigned_short': 'NPY_USHORT', + 'int': 'NPY_INT', + 'unsigned': 'NPY_UINT', + 'long': 'NPY_LONG', + 'unsigned_long': 'NPY_ULONG', + 'long_long': 'NPY_LONGLONG', + 'unsigned_long_long': 'NPY_ULONGLONG', + 'complex_float': 'NPY_CFLOAT', + 'complex_double': 'NPY_CDOUBLE', + 'complex_long_double': 'NPY_CDOUBLE', + 'string': 'NPY_STRING', + 'character': 'NPY_STRING'} + +c2pycode_map = {'double': 'd', + 'float': 'f', + 'long_double': 'g', + 'char': 'b', + 'unsigned_char': 'B', + 'signed_char': 'b', + 'short': 'h', + 'unsigned_short': 'H', + 'int': 'i', + 'unsigned': 'I', + 'long': 'l', + 'unsigned_long': 'L', + 'long_long': 'q', + 'unsigned_long_long': 'Q', + 'complex_float': 'F', + 'complex_double': 'D', + 'complex_long_double': 'G', + 'string': 'S', + 'character': 'c'} + +# https://docs.python.org/3/c-api/arg.html#building-values +c2buildvalue_map = {'double': 'd', + 'float': 'f', + 'char': 'b', + 'signed_char': 'b', + 'short': 'h', + 'int': 'i', + 'long': 'l', + 'long_long': 'L', + 'complex_float': 'N', + 'complex_double': 'N', + 'complex_long_double': 'N', + 'string': 'y', + 'character': 'c'} + +f2cmap_all = {'real': {'': 'float', '4': 'float', '8': 'double', + '12': 'long_double', '16': 'long_double'}, + 'integer': {'': 'int', '1': 'signed_char', '2': 'short', + '4': 'int', '8': 'long_long', + '-1': 'unsigned_char', '-2': 'unsigned_short', + '-4': 'unsigned', '-8': 'unsigned_long_long'}, + 'complex': {'': 'complex_float', '8': 'complex_float', + '16': 'complex_double', '24': 'complex_long_double', + '32': 'complex_long_double'}, + 'complexkind': {'': 'complex_float', '4': 'complex_float', + '8': 'complex_double', '12': 'complex_long_double', + '16': 'complex_long_double'}, + 'logical': {'': 'int', '1': 'char', '2': 'short', '4': 'int', + '8': 'long_long'}, + 'double complex': {'': 'complex_double'}, + 'double precision': {'': 'double'}, + 'byte': {'': 'char'}, + } + +# Add ISO_C handling +c2pycode_map.update(isoc_c2pycode_map) +c2py_map.update(iso_c2py_map) +f2cmap_all, _ = process_f2cmap_dict(f2cmap_all, iso_c_binding_map, c2py_map) +# End ISO_C handling +f2cmap_default = copy.deepcopy(f2cmap_all) + +f2cmap_mapped = [] + +def load_f2cmap_file(f2cmap_file): + global f2cmap_all, f2cmap_mapped + + f2cmap_all = copy.deepcopy(f2cmap_default) + + if f2cmap_file is None: + # Default value + f2cmap_file = '.f2py_f2cmap' + if not os.path.isfile(f2cmap_file): + return + + # User defined additions to f2cmap_all. + # f2cmap_file must contain a dictionary of dictionaries, only. For + # example, {'real':{'low':'float'}} means that Fortran 'real(low)' is + # interpreted as C 'float'. This feature is useful for F90/95 users if + # they use PARAMETERS in type specifications. + try: + outmess(f'Reading f2cmap from {f2cmap_file!r} ...\n') + with open(f2cmap_file) as f: + d = eval(f.read().lower(), {}, {}) + f2cmap_all, f2cmap_mapped = process_f2cmap_dict(f2cmap_all, d, c2py_map, True) + outmess('Successfully applied user defined f2cmap changes\n') + except Exception as msg: + errmess(f'Failed to apply user defined f2cmap changes: {msg}. Skipping.\n') + + +cformat_map = {'double': '%g', + 'float': '%g', + 'long_double': '%Lg', + 'char': '%d', + 'signed_char': '%d', + 'unsigned_char': '%hhu', + 'short': '%hd', + 'unsigned_short': '%hu', + 'int': '%d', + 'unsigned': '%u', + 'long': '%ld', + 'unsigned_long': '%lu', + 'long_long': '%ld', + 'complex_float': '(%g,%g)', + 'complex_double': '(%g,%g)', + 'complex_long_double': '(%Lg,%Lg)', + 'string': '\\"%s\\"', + 'character': "'%c'", + } + +# Auxiliary functions + + +def getctype(var): + """ + Determines C type + """ + ctype = 'void' + if isfunction(var): + if 'result' in var: + a = var['result'] + else: + a = var['name'] + if a in var['vars']: + return getctype(var['vars'][a]) + else: + errmess(f'getctype: function {a} has no return value?!\n') + elif issubroutine(var): + return ctype + elif ischaracter_or_characterarray(var): + return 'character' + elif isstring_or_stringarray(var): + return 'string' + elif 'typespec' in var and var['typespec'].lower() in f2cmap_all: + typespec = var['typespec'].lower() + f2cmap = f2cmap_all[typespec] + ctype = f2cmap[''] # default type + if 'kindselector' in var: + if '*' in var['kindselector']: + try: + ctype = f2cmap[var['kindselector']['*']] + except KeyError: + errmess('getctype: "%s %s %s" not supported.\n' % + (var['typespec'], '*', var['kindselector']['*'])) + elif 'kind' in var['kindselector']: + if typespec + 'kind' in f2cmap_all: + f2cmap = f2cmap_all[typespec + 'kind'] + try: + ctype = f2cmap[var['kindselector']['kind']] + except KeyError: + if typespec in f2cmap_all: + f2cmap = f2cmap_all[typespec] + try: + ctype = f2cmap[str(var['kindselector']['kind'])] + except KeyError: + errmess('getctype: "%s(kind=%s)" is mapped to C "%s" (to override define dict(%s = dict(%s="")) in %s/.f2py_f2cmap file).\n' + % (typespec, var['kindselector']['kind'], ctype, + typespec, var['kindselector']['kind'], os.getcwd())) + elif not isexternal(var): + errmess(f'getctype: No C-type found in "{var}", assuming void.\n') + return ctype + + +def f2cexpr(expr): + """Rewrite Fortran expression as f2py supported C expression. + + Due to the lack of a proper expression parser in f2py, this + function uses a heuristic approach that assumes that Fortran + arithmetic expressions are valid C arithmetic expressions when + mapping Fortran function calls to the corresponding C function/CPP + macros calls. + + """ + # TODO: support Fortran `len` function with optional kind parameter + expr = re.sub(r'\blen\b', 'f2py_slen', expr) + return expr + + +def getstrlength(var): + if isstringfunction(var): + if 'result' in var: + a = var['result'] + else: + a = var['name'] + if a in var['vars']: + return getstrlength(var['vars'][a]) + else: + errmess(f'getstrlength: function {a} has no return value?!\n') + if not isstring(var): + errmess( + f'getstrlength: expected a signature of a string but got: {repr(var)}\n') + len = '1' + if 'charselector' in var: + a = var['charselector'] + if '*' in a: + len = a['*'] + elif 'len' in a: + len = f2cexpr(a['len']) + if re.match(r'\(\s*(\*|:)\s*\)', len) or re.match(r'(\*|:)', len): + if isintent_hide(var): + errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n' % ( + repr(var))) + len = '-1' + return len + + +def getarrdims(a, var, verbose=0): + ret = {} + if isstring(var) and not isarray(var): + ret['size'] = getstrlength(var) + ret['rank'] = '0' + ret['dims'] = '' + elif isscalar(var): + ret['size'] = '1' + ret['rank'] = '0' + ret['dims'] = '' + elif isarray(var): + dim = copy.copy(var['dimension']) + ret['size'] = '*'.join(dim) + try: + ret['size'] = repr(eval(ret['size'])) + except Exception: + pass + ret['dims'] = ','.join(dim) + ret['rank'] = repr(len(dim)) + ret['rank*[-1]'] = repr(len(dim) * [-1])[1:-1] + for i in range(len(dim)): # solve dim for dependencies + v = [] + if dim[i] in depargs: + v = [dim[i]] + else: + for va in depargs: + if re.match(r'.*?\b%s\b.*' % va, dim[i]): + v.append(va) + for va in v: + if depargs.index(va) > depargs.index(a): + dim[i] = '*' + break + ret['setdims'], i = '', -1 + for d in dim: + i = i + 1 + if d not in ['*', ':', '(*)', '(:)']: + ret['setdims'] = '%s#varname#_Dims[%d]=%s,' % ( + ret['setdims'], i, d) + if ret['setdims']: + ret['setdims'] = ret['setdims'][:-1] + ret['cbsetdims'], i = '', -1 + for d in var['dimension']: + i = i + 1 + if d not in ['*', ':', '(*)', '(:)']: + ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % ( + ret['cbsetdims'], i, d) + elif isintent_in(var): + outmess('getarrdims:warning: assumed shape array, using 0 instead of %r\n' + % (d)) + ret['cbsetdims'] = '%s#varname#_Dims[%d]=%s,' % ( + ret['cbsetdims'], i, 0) + elif verbose: + errmess( + f'getarrdims: If in call-back function: array argument {repr(a)} must have bounded dimensions: got {repr(d)}\n') + if ret['cbsetdims']: + ret['cbsetdims'] = ret['cbsetdims'][:-1] +# if not isintent_c(var): +# var['dimension'].reverse() + return ret + + +def getpydocsign(a, var): + global lcb_map + if isfunction(var): + if 'result' in var: + af = var['result'] + else: + af = var['name'] + if af in var['vars']: + return getpydocsign(af, var['vars'][af]) + else: + errmess(f'getctype: function {af} has no return value?!\n') + return '', '' + sig, sigout = a, a + opt = '' + if isintent_in(var): + opt = 'input' + elif isintent_inout(var): + opt = 'in/output' + out_a = a + if isintent_out(var): + for k in var['intent']: + if k[:4] == 'out=': + out_a = k[4:] + break + init = '' + ctype = getctype(var) + + if hasinitvalue(var): + init, showinit = getinit(a, var) + init = f', optional\\n Default: {showinit}' + if isscalar(var): + if isintent_inout(var): + sig = '%s : %s rank-0 array(%s,\'%s\')%s' % (a, opt, c2py_map[ctype], + c2pycode_map[ctype], init) + else: + sig = f'{a} : {opt} {c2py_map[ctype]}{init}' + sigout = f'{out_a} : {c2py_map[ctype]}' + elif isstring(var): + if isintent_inout(var): + sig = '%s : %s rank-0 array(string(len=%s),\'c\')%s' % ( + a, opt, getstrlength(var), init) + else: + sig = f'{a} : {opt} string(len={getstrlength(var)}){init}' + sigout = f'{out_a} : string(len={getstrlength(var)})' + elif isarray(var): + dim = var['dimension'] + rank = repr(len(dim)) + sig = '%s : %s rank-%s array(\'%s\') with bounds (%s)%s' % (a, opt, rank, + c2pycode_map[ + ctype], + ','.join(dim), init) + if a == out_a: + sigout = '%s : rank-%s array(\'%s\') with bounds (%s)'\ + % (a, rank, c2pycode_map[ctype], ','.join(dim)) + else: + sigout = '%s : rank-%s array(\'%s\') with bounds (%s) and %s storage'\ + % (out_a, rank, c2pycode_map[ctype], ','.join(dim), a) + elif isexternal(var): + ua = '' + if a in lcb_map and lcb_map[a] in lcb2_map and 'argname' in lcb2_map[lcb_map[a]]: + ua = lcb2_map[lcb_map[a]]['argname'] + if not ua == a: + ua = f' => {ua}' + else: + ua = '' + sig = f'{a} : call-back function{ua}' + sigout = sig + else: + errmess( + f'getpydocsign: Could not resolve docsignature for "{a}".\n') + return sig, sigout + + +def getarrdocsign(a, var): + ctype = getctype(var) + if isstring(var) and (not isarray(var)): + sig = f'{a} : rank-0 array(string(len={getstrlength(var)}),\'c\')' + elif isscalar(var): + sig = f'{a} : rank-0 array({c2py_map[ctype]},\'{c2pycode_map[ctype]}\')' + elif isarray(var): + dim = var['dimension'] + rank = repr(len(dim)) + sig = '%s : rank-%s array(\'%s\') with bounds (%s)' % (a, rank, + c2pycode_map[ + ctype], + ','.join(dim)) + return sig + + +def getinit(a, var): + if isstring(var): + init, showinit = '""', "''" + else: + init, showinit = '', '' + if hasinitvalue(var): + init = var['='] + showinit = init + if iscomplex(var) or iscomplexarray(var): + ret = {} + + try: + v = var["="] + if ',' in v: + ret['init.r'], ret['init.i'] = markoutercomma( + v[1:-1]).split('@,@') + else: + v = eval(v, {}, {}) + ret['init.r'], ret['init.i'] = str(v.real), str(v.imag) + except Exception: + raise ValueError( + f'getinit: expected complex number `(r,i)\' but got `{init}\' as initial value of {a!r}.') + if isarray(var): + init = f"(capi_c.r={ret['init.r']},capi_c.i={ret['init.i']},capi_c)" + elif isstring(var): + if not init: + init, showinit = '""', "''" + if init[0] == "'": + init = '"%s"' % (init[1:-1].replace('"', '\\"')) + if init[0] == '"': + showinit = f"'{init[1:-1]}'" + return init, showinit + + +def get_elsize(var): + if isstring(var) or isstringarray(var): + elsize = getstrlength(var) + # override with user-specified length when available: + elsize = var['charselector'].get('f2py_len', elsize) + return elsize + if ischaracter(var) or ischaracterarray(var): + return '1' + # for numerical types, PyArray_New* functions ignore specified + # elsize, so we just return 1 and let elsize be determined at + # runtime, see fortranobject.c + return '1' + + +def sign2map(a, var): + """ + varname,ctype,atype + init,init.r,init.i,pytype + vardebuginfo,vardebugshowvalue,varshowvalue + varrformat + + intent + """ + out_a = a + if isintent_out(var): + for k in var['intent']: + if k[:4] == 'out=': + out_a = k[4:] + break + ret = {'varname': a, 'outvarname': out_a, 'ctype': getctype(var)} + intent_flags = [] + for f, s in isintent_dict.items(): + if f(var): + intent_flags.append(f'F2PY_{s}') + if intent_flags: + # TODO: Evaluate intent_flags here. + ret['intent'] = '|'.join(intent_flags) + else: + ret['intent'] = 'F2PY_INTENT_IN' + if isarray(var): + ret['varrformat'] = 'N' + elif ret['ctype'] in c2buildvalue_map: + ret['varrformat'] = c2buildvalue_map[ret['ctype']] + else: + ret['varrformat'] = 'O' + ret['init'], ret['showinit'] = getinit(a, var) + if hasinitvalue(var) and iscomplex(var) and not isarray(var): + ret['init.r'], ret['init.i'] = markoutercomma( + ret['init'][1:-1]).split('@,@') + if isexternal(var): + ret['cbnamekey'] = a + if a in lcb_map: + ret['cbname'] = lcb_map[a] + ret['maxnofargs'] = lcb2_map[lcb_map[a]]['maxnofargs'] + ret['nofoptargs'] = lcb2_map[lcb_map[a]]['nofoptargs'] + ret['cbdocstr'] = lcb2_map[lcb_map[a]]['docstr'] + ret['cblatexdocstr'] = lcb2_map[lcb_map[a]]['latexdocstr'] + else: + ret['cbname'] = a + errmess('sign2map: Confused: external %s is not in lcb_map%s.\n' % ( + a, list(lcb_map.keys()))) + if isstring(var): + ret['length'] = getstrlength(var) + if isarray(var): + ret = dictappend(ret, getarrdims(a, var)) + dim = copy.copy(var['dimension']) + if ret['ctype'] in c2capi_map: + ret['atype'] = c2capi_map[ret['ctype']] + ret['elsize'] = get_elsize(var) + # Debug info + if debugcapi(var): + il = [isintent_in, 'input', isintent_out, 'output', + isintent_inout, 'inoutput', isrequired, 'required', + isoptional, 'optional', isintent_hide, 'hidden', + iscomplex, 'complex scalar', + l_and(isscalar, l_not(iscomplex)), 'scalar', + isstring, 'string', isarray, 'array', + iscomplexarray, 'complex array', isstringarray, 'string array', + iscomplexfunction, 'complex function', + l_and(isfunction, l_not(iscomplexfunction)), 'function', + isexternal, 'callback', + isintent_callback, 'callback', + isintent_aux, 'auxiliary', + ] + rl = [] + for i in range(0, len(il), 2): + if il[i](var): + rl.append(il[i + 1]) + if isstring(var): + rl.append(f"slen({a})={ret['length']}") + if isarray(var): + ddim = ','.join( + map(lambda x, y: f'{x}|{y}', var['dimension'], dim)) + rl.append(f'dims({ddim})') + if isexternal(var): + ret['vardebuginfo'] = f"debug-capi:{a}=>{ret['cbname']}:{','.join(rl)}" + else: + ret['vardebuginfo'] = 'debug-capi:%s %s=%s:%s' % ( + ret['ctype'], a, ret['showinit'], ','.join(rl)) + if isscalar(var): + if ret['ctype'] in cformat_map: + ret['vardebugshowvalue'] = f"debug-capi:{a}={cformat_map[ret['ctype']]}" + if isstring(var): + ret['vardebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % ( + a, a) + if isexternal(var): + ret['vardebugshowvalue'] = f'debug-capi:{a}=%p' + if ret['ctype'] in cformat_map: + ret['varshowvalue'] = f"#name#:{a}={cformat_map[ret['ctype']]}" + ret['showvalueformat'] = f"{cformat_map[ret['ctype']]}" + if isstring(var): + ret['varshowvalue'] = '#name#:slen(%s)=%%d %s=\\"%%s\\"' % (a, a) + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) + if hasnote(var): + ret['note'] = var['note'] + return ret + + +def routsign2map(rout): + """ + name,NAME,begintitle,endtitle + rname,ctype,rformat + routdebugshowvalue + """ + global lcb_map + name = rout['name'] + fname = getfortranname(rout) + ret = {'name': name, + 'texname': name.replace('_', '\\_'), + 'name_lower': name.lower(), + 'NAME': name.upper(), + 'begintitle': gentitle(name), + 'endtitle': gentitle(f'end of {name}'), + 'fortranname': fname, + 'FORTRANNAME': fname.upper(), + 'callstatement': getcallstatement(rout) or '', + 'usercode': getusercode(rout) or '', + 'usercode1': getusercode1(rout) or '', + } + if '_' in fname: + ret['F_FUNC'] = 'F_FUNC_US' + else: + ret['F_FUNC'] = 'F_FUNC' + if '_' in name: + ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC_US' + else: + ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC' + lcb_map = {} + if 'use' in rout: + for u in rout['use'].keys(): + if u in cb_rules.cb_map: + for un in cb_rules.cb_map[u]: + ln = un[0] + if 'map' in rout['use'][u]: + for k in rout['use'][u]['map'].keys(): + if rout['use'][u]['map'][k] == un[0]: + ln = k + break + lcb_map[ln] = un[1] + elif rout.get('externals'): + errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n' % ( + ret['name'], repr(rout['externals']))) + ret['callprotoargument'] = getcallprotoargument(rout, lcb_map) or '' + if isfunction(rout): + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + ret['rname'] = a + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, rout) + ret['ctype'] = getctype(rout['vars'][a]) + if hasresultnote(rout): + ret['resultnote'] = rout['vars'][a]['note'] + rout['vars'][a]['note'] = ['See elsewhere.'] + if ret['ctype'] in c2buildvalue_map: + ret['rformat'] = c2buildvalue_map[ret['ctype']] + else: + ret['rformat'] = 'O' + errmess('routsign2map: no c2buildvalue key for type %s\n' % + (repr(ret['ctype']))) + if debugcapi(rout): + if ret['ctype'] in cformat_map: + ret['routdebugshowvalue'] = 'debug-capi:%s=%s' % ( + a, cformat_map[ret['ctype']]) + if isstringfunction(rout): + ret['routdebugshowvalue'] = 'debug-capi:slen(%s)=%%d %s=\\"%%s\\"' % ( + a, a) + if isstringfunction(rout): + ret['rlength'] = getstrlength(rout['vars'][a]) + if ret['rlength'] == '-1': + errmess('routsign2map: expected explicit specification of the length of the string returned by the fortran function %s; taking 10.\n' % ( + repr(rout['name']))) + ret['rlength'] = '10' + if hasnote(rout): + ret['note'] = rout['note'] + rout['note'] = ['See elsewhere.'] + return ret + + +def modsign2map(m): + """ + modulename + """ + if ismodule(m): + ret = {'f90modulename': m['name'], + 'F90MODULENAME': m['name'].upper(), + 'texf90modulename': m['name'].replace('_', '\\_')} + else: + ret = {'modulename': m['name'], + 'MODULENAME': m['name'].upper(), + 'texmodulename': m['name'].replace('_', '\\_')} + ret['restdoc'] = getrestdoc(m) or [] + if hasnote(m): + ret['note'] = m['note'] + ret['usercode'] = getusercode(m) or '' + ret['usercode1'] = getusercode1(m) or '' + if m['body']: + ret['interface_usercode'] = getusercode(m['body'][0]) or '' + else: + ret['interface_usercode'] = '' + ret['pymethoddef'] = getpymethoddef(m) or '' + if 'gil_used' in m: + ret['gil_used'] = m['gil_used'] + if 'coutput' in m: + ret['coutput'] = m['coutput'] + if 'f2py_wrapper_output' in m: + ret['f2py_wrapper_output'] = m['f2py_wrapper_output'] + return ret + + +def cb_sign2map(a, var, index=None): + ret = {'varname': a} + ret['varname_i'] = ret['varname'] + ret['ctype'] = getctype(var) + if ret['ctype'] in c2capi_map: + ret['atype'] = c2capi_map[ret['ctype']] + ret['elsize'] = get_elsize(var) + if ret['ctype'] in cformat_map: + ret['showvalueformat'] = f"{cformat_map[ret['ctype']]}" + if isarray(var): + ret = dictappend(ret, getarrdims(a, var)) + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) + if hasnote(var): + ret['note'] = var['note'] + var['note'] = ['See elsewhere.'] + return ret + + +def cb_routsign2map(rout, um): + """ + name,begintitle,endtitle,argname + ctype,rctype,maxnofargs,nofoptargs,returncptr + """ + ret = {'name': f"cb_{rout['name']}_in_{um}", + 'returncptr': ''} + if isintent_callback(rout): + if '_' in rout['name']: + F_FUNC = 'F_FUNC_US' + else: + F_FUNC = 'F_FUNC' + ret['callbackname'] = f"{F_FUNC}({rout['name'].lower()},{rout['name'].upper()})" + ret['static'] = 'extern' + else: + ret['callbackname'] = ret['name'] + ret['static'] = 'static' + ret['argname'] = rout['name'] + ret['begintitle'] = gentitle(ret['name']) + ret['endtitle'] = gentitle(f"end of {ret['name']}") + ret['ctype'] = getctype(rout) + ret['rctype'] = 'void' + if ret['ctype'] == 'string': + ret['rctype'] = 'void' + else: + ret['rctype'] = ret['ctype'] + if ret['rctype'] != 'void': + if iscomplexfunction(rout): + ret['returncptr'] = """ +#ifdef F2PY_CB_RETURNCOMPLEX +return_value= +#endif +""" + else: + ret['returncptr'] = 'return_value=' + if ret['ctype'] in cformat_map: + ret['showvalueformat'] = f"{cformat_map[ret['ctype']]}" + if isstringfunction(rout): + ret['strlength'] = getstrlength(rout) + if isfunction(rout): + if 'result' in rout: + a = rout['result'] + else: + a = rout['name'] + if hasnote(rout['vars'][a]): + ret['note'] = rout['vars'][a]['note'] + rout['vars'][a]['note'] = ['See elsewhere.'] + ret['rname'] = a + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, rout) + if iscomplexfunction(rout): + ret['rctype'] = """ +#ifdef F2PY_CB_RETURNCOMPLEX +#ctype# +#else +void +#endif +""" + elif hasnote(rout): + ret['note'] = rout['note'] + rout['note'] = ['See elsewhere.'] + nofargs = 0 + nofoptargs = 0 + if 'args' in rout and 'vars' in rout: + for a in rout['args']: + var = rout['vars'][a] + if l_or(isintent_in, isintent_inout)(var): + nofargs = nofargs + 1 + if isoptional(var): + nofoptargs = nofoptargs + 1 + ret['maxnofargs'] = repr(nofargs) + ret['nofoptargs'] = repr(nofoptargs) + if hasnote(rout) and isfunction(rout) and 'result' in rout: + ret['routnote'] = rout['note'] + rout['note'] = ['See elsewhere.'] + return ret + + +def common_sign2map(a, var): # obsolete + ret = {'varname': a, 'ctype': getctype(var)} + if isstringarray(var): + ret['ctype'] = 'char' + if ret['ctype'] in c2capi_map: + ret['atype'] = c2capi_map[ret['ctype']] + ret['elsize'] = get_elsize(var) + if ret['ctype'] in cformat_map: + ret['showvalueformat'] = f"{cformat_map[ret['ctype']]}" + if isarray(var): + ret = dictappend(ret, getarrdims(a, var)) + elif isstring(var): + ret['size'] = getstrlength(var) + ret['rank'] = '1' + ret['pydocsign'], ret['pydocsignout'] = getpydocsign(a, var) + if hasnote(var): + ret['note'] = var['note'] + var['note'] = ['See elsewhere.'] + # for strings this returns 0-rank but actually is 1-rank + ret['arrdocstr'] = getarrdocsign(a, var) + return ret diff --git a/python/numpy/f2py/capi_maps.pyi b/python/numpy/f2py/capi_maps.pyi new file mode 100644 index 000000000..926600365 --- /dev/null +++ b/python/numpy/f2py/capi_maps.pyi @@ -0,0 +1,33 @@ +from .auxfuncs import _ROut, _Var, process_f2cmap_dict + +__all__ = [ + "cb_routsign2map", + "cb_sign2map", + "common_sign2map", + "getarrdims", + "getarrdocsign", + "getctype", + "getinit", + "getpydocsign", + "getstrlength", + "modsign2map", + "process_f2cmap_dict", + "routsign2map", + "sign2map", +] + +### + +def getctype(var: _Var) -> str: ... +def f2cexpr(expr: str) -> str: ... +def getstrlength(var: _Var) -> str: ... +def getarrdims(a: str, var: _Var, verbose: int = 0) -> dict[str, str]: ... +def getpydocsign(a: str, var: _Var) -> tuple[str, str]: ... +def getarrdocsign(a: str, var: _Var) -> str: ... +def getinit(a: str, var: _Var) -> tuple[str, str]: ... +def sign2map(a: str, var: _Var) -> dict[str, str]: ... +def routsign2map(rout: _ROut) -> dict[str, str]: ... +def modsign2map(m: _ROut) -> dict[str, str]: ... +def cb_sign2map(a: str, var: _Var, index: object | None = None) -> dict[str, str]: ... +def cb_routsign2map(rout: _ROut, um: str) -> dict[str, str]: ... +def common_sign2map(a: str, var: _Var) -> dict[str, str]: ... # obsolete diff --git a/python/numpy/f2py/cb_rules.py b/python/numpy/f2py/cb_rules.py new file mode 100644 index 000000000..238d47311 --- /dev/null +++ b/python/numpy/f2py/cb_rules.py @@ -0,0 +1,665 @@ +""" +Build call-back mechanism for f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +from . import __version__, cfuncs +from .auxfuncs import ( + applyrules, + debugcapi, + dictappend, + errmess, + getargs, + hasnote, + isarray, + iscomplex, + iscomplexarray, + iscomplexfunction, + isfunction, + isintent_c, + isintent_hide, + isintent_in, + isintent_inout, + isintent_nothide, + isintent_out, + isoptional, + isrequired, + isscalar, + isstring, + isstringfunction, + issubroutine, + l_and, + l_not, + l_or, + outmess, + replace, + stripcomma, + throw_error, +) + +f2py_version = __version__.version + + +################## Rules for callback function ############## + +cb_routine_rules = { + 'cbtypedefs': 'typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);', + 'body': """ +#begintitle# +typedef struct { + PyObject *capi; + PyTupleObject *args_capi; + int nofargs; + jmp_buf jmpbuf; +} #name#_t; + +#if defined(F2PY_THREAD_LOCAL_DECL) && !defined(F2PY_USE_PYTHON_TLS) + +static F2PY_THREAD_LOCAL_DECL #name#_t *_active_#name# = NULL; + +static #name#_t *swap_active_#name#(#name#_t *ptr) { + #name#_t *prev = _active_#name#; + _active_#name# = ptr; + return prev; +} + +static #name#_t *get_active_#name#(void) { + return _active_#name#; +} + +#else + +static #name#_t *swap_active_#name#(#name#_t *ptr) { + char *key = "__f2py_cb_#name#"; + return (#name#_t *)F2PySwapThreadLocalCallbackPtr(key, ptr); +} + +static #name#_t *get_active_#name#(void) { + char *key = "__f2py_cb_#name#"; + return (#name#_t *)F2PyGetThreadLocalCallbackPtr(key); +} + +#endif + +/*typedef #rctype#(*#name#_typedef)(#optargs_td##args_td##strarglens_td##noargs#);*/ +#static# #rctype# #callbackname# (#optargs##args##strarglens##noargs#) { + #name#_t cb_local = { NULL, NULL, 0 }; + #name#_t *cb = NULL; + PyTupleObject *capi_arglist = NULL; + PyObject *capi_return = NULL; + PyObject *capi_tmp = NULL; + PyObject *capi_arglist_list = NULL; + int capi_j,capi_i = 0; + int capi_longjmp_ok = 1; +#decl# +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_start_clock(); +#endif + cb = get_active_#name#(); + if (cb == NULL) { + capi_longjmp_ok = 0; + cb = &cb_local; + } + capi_arglist = cb->args_capi; + CFUNCSMESS(\"cb:Call-back function #name# (maxnofargs=#maxnofargs#(-#nofoptargs#))\\n\"); + CFUNCSMESSPY(\"cb:#name#_capi=\",cb->capi); + if (cb->capi==NULL) { + capi_longjmp_ok = 0; + cb->capi = PyObject_GetAttrString(#modulename#_module,\"#argname#\"); + CFUNCSMESSPY(\"cb:#name#_capi=\",cb->capi); + } + if (cb->capi==NULL) { + PyErr_SetString(#modulename#_error,\"cb: Callback #argname# not defined (as an argument or module #modulename# attribute).\\n\"); + goto capi_fail; + } + if (F2PyCapsule_Check(cb->capi)) { + #name#_typedef #name#_cptr; + #name#_cptr = F2PyCapsule_AsVoidPtr(cb->capi); + #returncptr#(*#name#_cptr)(#optargs_nm##args_nm##strarglens_nm#); + #return# + } + if (capi_arglist==NULL) { + capi_longjmp_ok = 0; + capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#argname#_extra_args\"); + if (capi_tmp) { + capi_arglist = (PyTupleObject *)PySequence_Tuple(capi_tmp); + Py_DECREF(capi_tmp); + if (capi_arglist==NULL) { + PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#argname#_extra_args to tuple.\\n\"); + goto capi_fail; + } + } else { + PyErr_Clear(); + capi_arglist = (PyTupleObject *)Py_BuildValue(\"()\"); + } + } + if (capi_arglist == NULL) { + PyErr_SetString(#modulename#_error,\"Callback #argname# argument list is not set.\\n\"); + goto capi_fail; + } +#setdims# +#ifdef PYPY_VERSION +#define CAPI_ARGLIST_SETITEM(idx, value) PyList_SetItem((PyObject *)capi_arglist_list, idx, value) + capi_arglist_list = PySequence_List((PyObject *)capi_arglist); + if (capi_arglist_list == NULL) goto capi_fail; +#else +#define CAPI_ARGLIST_SETITEM(idx, value) PyTuple_SetItem((PyObject *)capi_arglist, idx, value) +#endif +#pyobjfrom# +#undef CAPI_ARGLIST_SETITEM +#ifdef PYPY_VERSION + CFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist_list); +#else + CFUNCSMESSPY(\"cb:capi_arglist=\",capi_arglist); +#endif + CFUNCSMESS(\"cb:Call-back calling Python function #argname#.\\n\"); +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_start_call_clock(); +#endif +#ifdef PYPY_VERSION + capi_return = PyObject_CallObject(cb->capi,(PyObject *)capi_arglist_list); + Py_DECREF(capi_arglist_list); + capi_arglist_list = NULL; +#else + capi_return = PyObject_CallObject(cb->capi,(PyObject *)capi_arglist); +#endif +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_stop_call_clock(); +#endif + CFUNCSMESSPY(\"cb:capi_return=\",capi_return); + if (capi_return == NULL) { + fprintf(stderr,\"capi_return is NULL\\n\"); + goto capi_fail; + } + if (capi_return == Py_None) { + Py_DECREF(capi_return); + capi_return = Py_BuildValue(\"()\"); + } + else if (!PyTuple_Check(capi_return)) { + capi_return = Py_BuildValue(\"(N)\",capi_return); + } + capi_j = PyTuple_Size(capi_return); + capi_i = 0; +#frompyobj# + CFUNCSMESS(\"cb:#name#:successful\\n\"); + Py_DECREF(capi_return); +#ifdef F2PY_REPORT_ATEXIT +f2py_cb_stop_clock(); +#endif + goto capi_return_pt; +capi_fail: + fprintf(stderr,\"Call-back #name# failed.\\n\"); + Py_XDECREF(capi_return); + Py_XDECREF(capi_arglist_list); + if (capi_longjmp_ok) { + longjmp(cb->jmpbuf,-1); + } +capi_return_pt: + ; +#return# +} +#endtitle# +""", + 'need': ['setjmp.h', 'CFUNCSMESS', 'F2PY_THREAD_LOCAL_DECL'], + 'maxnofargs': '#maxnofargs#', + 'nofoptargs': '#nofoptargs#', + 'docstr': """\ + def #argname#(#docsignature#): return #docreturn#\\n\\ +#docstrsigns#""", + 'latexdocstr': """ +{{}\\verb@def #argname#(#latexdocsignature#): return #docreturn#@{}} +#routnote# + +#latexdocstrsigns#""", + 'docstrshort': 'def #argname#(#docsignature#): return #docreturn#' +} +cb_rout_rules = [ + { # Init + 'separatorsfor': {'decl': '\n', + 'args': ',', 'optargs': '', 'pyobjfrom': '\n', 'freemem': '\n', + 'args_td': ',', 'optargs_td': '', + 'args_nm': ',', 'optargs_nm': '', + 'frompyobj': '\n', 'setdims': '\n', + 'docstrsigns': '\\n"\n"', + 'latexdocstrsigns': '\n', + 'latexdocstrreq': '\n', 'latexdocstropt': '\n', + 'latexdocstrout': '\n', 'latexdocstrcbs': '\n', + }, + 'decl': '/*decl*/', 'pyobjfrom': '/*pyobjfrom*/', 'frompyobj': '/*frompyobj*/', + 'args': [], 'optargs': '', 'return': '', 'strarglens': '', 'freemem': '/*freemem*/', + 'args_td': [], 'optargs_td': '', 'strarglens_td': '', + 'args_nm': [], 'optargs_nm': '', 'strarglens_nm': '', + 'noargs': '', + 'setdims': '/*setdims*/', + 'docstrsigns': '', 'latexdocstrsigns': '', + 'docstrreq': ' Required arguments:', + 'docstropt': ' Optional arguments:', + 'docstrout': ' Return objects:', + 'docstrcbs': ' Call-back functions:', + 'docreturn': '', 'docsign': '', 'docsignopt': '', + 'latexdocstrreq': '\\noindent Required arguments:', + 'latexdocstropt': '\\noindent Optional arguments:', + 'latexdocstrout': '\\noindent Return objects:', + 'latexdocstrcbs': '\\noindent Call-back functions:', + 'routnote': {hasnote: '--- #note#', l_not(hasnote): ''}, + }, { # Function + 'decl': ' #ctype# return_value = 0;', + 'frompyobj': [ + {debugcapi: ' CFUNCSMESS("cb:Getting return_value->");'}, + '''\ + if (capi_j>capi_i) { + GETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#, + "#ctype#_from_pyobj failed in converting return_value of" + " call-back function #name# to C #ctype#\\n"); + } else { + fprintf(stderr,"Warning: call-back function #name# did not provide" + " return value (index=%d, type=#ctype#)\\n",capi_i); + }''', + {debugcapi: + ' fprintf(stderr,"#showvalueformat#.\\n",return_value);'} + ], + 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, 'GETSCALARFROMPYTUPLE'], + 'return': ' return return_value;', + '_check': l_and(isfunction, l_not(isstringfunction), l_not(iscomplexfunction)) + }, + { # String function + 'pyobjfrom': {debugcapi: ' fprintf(stderr,"debug-capi:cb:#name#:%d:\\n",return_value_len);'}, + 'args': '#ctype# return_value,int return_value_len', + 'args_nm': 'return_value,&return_value_len', + 'args_td': '#ctype# ,int', + 'frompyobj': [ + {debugcapi: ' CFUNCSMESS("cb:Getting return_value->\\"");'}, + """\ + if (capi_j>capi_i) { + GETSTRFROMPYTUPLE(capi_return,capi_i++,return_value,return_value_len); + } else { + fprintf(stderr,"Warning: call-back function #name# did not provide" + " return value (index=%d, type=#ctype#)\\n",capi_i); + }""", + {debugcapi: + ' fprintf(stderr,"#showvalueformat#\\".\\n",return_value);'} + ], + 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, + 'string.h', 'GETSTRFROMPYTUPLE'], + 'return': 'return;', + '_check': isstringfunction + }, + { # Complex function + 'optargs': """ +#ifndef F2PY_CB_RETURNCOMPLEX +#ctype# *return_value +#endif +""", + 'optargs_nm': """ +#ifndef F2PY_CB_RETURNCOMPLEX +return_value +#endif +""", + 'optargs_td': """ +#ifndef F2PY_CB_RETURNCOMPLEX +#ctype# * +#endif +""", + 'decl': """ +#ifdef F2PY_CB_RETURNCOMPLEX + #ctype# return_value = {0, 0}; +#endif +""", + 'frompyobj': [ + {debugcapi: ' CFUNCSMESS("cb:Getting return_value->");'}, + """\ + if (capi_j>capi_i) { +#ifdef F2PY_CB_RETURNCOMPLEX + GETSCALARFROMPYTUPLE(capi_return,capi_i++,&return_value,#ctype#, + \"#ctype#_from_pyobj failed in converting return_value of call-back\" + \" function #name# to C #ctype#\\n\"); +#else + GETSCALARFROMPYTUPLE(capi_return,capi_i++,return_value,#ctype#, + \"#ctype#_from_pyobj failed in converting return_value of call-back\" + \" function #name# to C #ctype#\\n\"); +#endif + } else { + fprintf(stderr, + \"Warning: call-back function #name# did not provide\" + \" return value (index=%d, type=#ctype#)\\n\",capi_i); + }""", + {debugcapi: """\ +#ifdef F2PY_CB_RETURNCOMPLEX + fprintf(stderr,\"#showvalueformat#.\\n\",(return_value).r,(return_value).i); +#else + fprintf(stderr,\"#showvalueformat#.\\n\",(*return_value).r,(*return_value).i); +#endif +"""} + ], + 'return': """ +#ifdef F2PY_CB_RETURNCOMPLEX + return return_value; +#else + return; +#endif +""", + 'need': ['#ctype#_from_pyobj', {debugcapi: 'CFUNCSMESS'}, + 'string.h', 'GETSCALARFROMPYTUPLE', '#ctype#'], + '_check': iscomplexfunction + }, + {'docstrout': ' #pydocsignout#', + 'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}', + {hasnote: '--- #note#'}], + 'docreturn': '#rname#,', + '_check': isfunction}, + {'_check': issubroutine, 'return': 'return;'} +] + +cb_arg_rules = [ + { # Doc + 'docstropt': {l_and(isoptional, isintent_nothide): ' #pydocsign#'}, + 'docstrreq': {l_and(isrequired, isintent_nothide): ' #pydocsign#'}, + 'docstrout': {isintent_out: ' #pydocsignout#'}, + 'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote: '--- #note#'}]}, + 'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote: '--- #note#'}]}, + 'latexdocstrout': {isintent_out: ['\\item[]{{}\\verb@#pydocsignout#@{}}', + {l_and(hasnote, isintent_hide): '--- #note#', + l_and(hasnote, isintent_nothide): '--- See above.'}]}, + 'docsign': {l_and(isrequired, isintent_nothide): '#varname#,'}, + 'docsignopt': {l_and(isoptional, isintent_nothide): '#varname#,'}, + 'depend': '' + }, + { + 'args': { + l_and(isscalar, isintent_c): '#ctype# #varname_i#', + l_and(isscalar, l_not(isintent_c)): '#ctype# *#varname_i#_cb_capi', + isarray: '#ctype# *#varname_i#', + isstring: '#ctype# #varname_i#' + }, + 'args_nm': { + l_and(isscalar, isintent_c): '#varname_i#', + l_and(isscalar, l_not(isintent_c)): '#varname_i#_cb_capi', + isarray: '#varname_i#', + isstring: '#varname_i#' + }, + 'args_td': { + l_and(isscalar, isintent_c): '#ctype#', + l_and(isscalar, l_not(isintent_c)): '#ctype# *', + isarray: '#ctype# *', + isstring: '#ctype#' + }, + 'need': {l_or(isscalar, isarray, isstring): '#ctype#'}, + # untested with multiple args + 'strarglens': {isstring: ',int #varname_i#_cb_len'}, + 'strarglens_td': {isstring: ',int'}, # untested with multiple args + # untested with multiple args + 'strarglens_nm': {isstring: ',#varname_i#_cb_len'}, + }, + { # Scalars + 'decl': {l_not(isintent_c): ' #ctype# #varname_i#=(*#varname_i#_cb_capi);'}, + 'error': {l_and(isintent_c, isintent_out, + throw_error('intent(c,out) is forbidden for callback scalar arguments')): + ''}, + 'frompyobj': [{debugcapi: ' CFUNCSMESS("cb:Getting #varname#->");'}, + {isintent_out: + ' if (capi_j>capi_i)\n GETSCALARFROMPYTUPLE(capi_return,capi_i++,#varname_i#_cb_capi,#ctype#,"#ctype#_from_pyobj failed in converting argument #varname# of call-back function #name# to C #ctype#\\n");'}, + {l_and(debugcapi, l_and(l_not(iscomplex), isintent_c)): + ' fprintf(stderr,"#showvalueformat#.\\n",#varname_i#);'}, + {l_and(debugcapi, l_and(l_not(iscomplex), l_not(isintent_c))): + ' fprintf(stderr,"#showvalueformat#.\\n",*#varname_i#_cb_capi);'}, + {l_and(debugcapi, l_and(iscomplex, isintent_c)): + ' fprintf(stderr,"#showvalueformat#.\\n",(#varname_i#).r,(#varname_i#).i);'}, + {l_and(debugcapi, l_and(iscomplex, l_not(isintent_c))): + ' fprintf(stderr,"#showvalueformat#.\\n",(*#varname_i#_cb_capi).r,(*#varname_i#_cb_capi).i);'}, + ], + 'need': [{isintent_out: ['#ctype#_from_pyobj', 'GETSCALARFROMPYTUPLE']}, + {debugcapi: 'CFUNCSMESS'}], + '_check': isscalar + }, { + 'pyobjfrom': [{isintent_in: """\ + if (cb->nofargs>capi_i) + if (CAPI_ARGLIST_SETITEM(capi_i++,pyobj_from_#ctype#1(#varname_i#))) + goto capi_fail;"""}, + {isintent_inout: """\ + if (cb->nofargs>capi_i) + if (CAPI_ARGLIST_SETITEM(capi_i++,pyarr_from_p_#ctype#1(#varname_i#_cb_capi))) + goto capi_fail;"""}], + 'need': [{isintent_in: 'pyobj_from_#ctype#1'}, + {isintent_inout: 'pyarr_from_p_#ctype#1'}, + {iscomplex: '#ctype#'}], + '_check': l_and(isscalar, isintent_nothide), + '_optional': '' + }, { # String + 'frompyobj': [{debugcapi: ' CFUNCSMESS("cb:Getting #varname#->\\"");'}, + """ if (capi_j>capi_i) + GETSTRFROMPYTUPLE(capi_return,capi_i++,#varname_i#,#varname_i#_cb_len);""", + {debugcapi: + ' fprintf(stderr,"#showvalueformat#\\":%d:.\\n",#varname_i#,#varname_i#_cb_len);'}, + ], + 'need': ['#ctype#', 'GETSTRFROMPYTUPLE', + {debugcapi: 'CFUNCSMESS'}, 'string.h'], + '_check': l_and(isstring, isintent_out) + }, { + 'pyobjfrom': [ + {debugcapi: + (' fprintf(stderr,"debug-capi:cb:#varname#=#showvalueformat#:' + '%d:\\n",#varname_i#,#varname_i#_cb_len);')}, + {isintent_in: """\ + if (cb->nofargs>capi_i) + if (CAPI_ARGLIST_SETITEM(capi_i++,pyobj_from_#ctype#1size(#varname_i#,#varname_i#_cb_len))) + goto capi_fail;"""}, + {isintent_inout: """\ + if (cb->nofargs>capi_i) { + int #varname_i#_cb_dims[] = {#varname_i#_cb_len}; + if (CAPI_ARGLIST_SETITEM(capi_i++,pyarr_from_p_#ctype#1(#varname_i#,#varname_i#_cb_dims))) + goto capi_fail; + }"""}], + 'need': [{isintent_in: 'pyobj_from_#ctype#1size'}, + {isintent_inout: 'pyarr_from_p_#ctype#1'}], + '_check': l_and(isstring, isintent_nothide), + '_optional': '' + }, + # Array ... + { + 'decl': ' npy_intp #varname_i#_Dims[#rank#] = {#rank*[-1]#};', + 'setdims': ' #cbsetdims#;', + '_check': isarray, + '_depend': '' + }, + { + 'pyobjfrom': [{debugcapi: ' fprintf(stderr,"debug-capi:cb:#varname#\\n");'}, + {isintent_c: """\ + if (cb->nofargs>capi_i) { + /* tmp_arr will be inserted to capi_arglist_list that will be + destroyed when leaving callback function wrapper together + with tmp_arr. */ + PyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type, + #rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,#elsize#, + NPY_ARRAY_CARRAY,NULL); +""", + l_not(isintent_c): """\ + if (cb->nofargs>capi_i) { + /* tmp_arr will be inserted to capi_arglist_list that will be + destroyed when leaving callback function wrapper together + with tmp_arr. */ + PyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type, + #rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,#elsize#, + NPY_ARRAY_FARRAY,NULL); +""", + }, + """ + if (tmp_arr==NULL) + goto capi_fail; + if (CAPI_ARGLIST_SETITEM(capi_i++,(PyObject *)tmp_arr)) + goto capi_fail; +}"""], + '_check': l_and(isarray, isintent_nothide, l_or(isintent_in, isintent_inout)), + '_optional': '', + }, { + 'frompyobj': [{debugcapi: ' CFUNCSMESS("cb:Getting #varname#->");'}, + """ if (capi_j>capi_i) { + PyArrayObject *rv_cb_arr = NULL; + if ((capi_tmp = PyTuple_GetItem(capi_return,capi_i++))==NULL) goto capi_fail; + rv_cb_arr = array_from_pyobj(#atype#,#varname_i#_Dims,#rank#,F2PY_INTENT_IN""", + {isintent_c: '|F2PY_INTENT_C'}, + """,capi_tmp); + if (rv_cb_arr == NULL) { + fprintf(stderr,\"rv_cb_arr is NULL\\n\"); + goto capi_fail; + } + MEMCOPY(#varname_i#,PyArray_DATA(rv_cb_arr),PyArray_NBYTES(rv_cb_arr)); + if (capi_tmp != (PyObject *)rv_cb_arr) { + Py_DECREF(rv_cb_arr); + } + }""", + {debugcapi: ' fprintf(stderr,"<-.\\n");'}, + ], + 'need': ['MEMCOPY', {iscomplexarray: '#ctype#'}], + '_check': l_and(isarray, isintent_out) + }, { + 'docreturn': '#varname#,', + '_check': isintent_out + } +] + +################## Build call-back module ############# +cb_map = {} + + +def buildcallbacks(m): + cb_map[m['name']] = [] + for bi in m['body']: + if bi['block'] == 'interface': + for b in bi['body']: + if b: + buildcallback(b, m['name']) + else: + errmess(f"warning: empty body for {m['name']}\n") + + +def buildcallback(rout, um): + from . import capi_maps + + outmess(f" Constructing call-back function \"cb_{rout['name']}_in_{um}\"\n") + args, depargs = getargs(rout) + capi_maps.depargs = depargs + var = rout['vars'] + vrd = capi_maps.cb_routsign2map(rout, um) + rd = dictappend({}, vrd) + cb_map[um].append([rout['name'], rd['name']]) + for r in cb_rout_rules: + if ('_check' in r and r['_check'](rout)) or ('_check' not in r): + ar = applyrules(r, vrd, rout) + rd = dictappend(rd, ar) + savevrd = {} + for i, a in enumerate(args): + vrd = capi_maps.cb_sign2map(a, var[a], index=i) + savevrd[a] = vrd + for r in cb_arg_rules: + if '_depend' in r: + continue + if '_optional' in r and isoptional(var[a]): + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + for a in args: + vrd = savevrd[a] + for r in cb_arg_rules: + if '_depend' in r: + continue + if ('_optional' not in r) or ('_optional' in r and isrequired(var[a])): + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + for a in depargs: + vrd = savevrd[a] + for r in cb_arg_rules: + if '_depend' not in r: + continue + if '_optional' in r: + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + if 'args' in rd and 'optargs' in rd: + if isinstance(rd['optargs'], list): + rd['optargs'] = rd['optargs'] + [""" +#ifndef F2PY_CB_RETURNCOMPLEX +, +#endif +"""] + rd['optargs_nm'] = rd['optargs_nm'] + [""" +#ifndef F2PY_CB_RETURNCOMPLEX +, +#endif +"""] + rd['optargs_td'] = rd['optargs_td'] + [""" +#ifndef F2PY_CB_RETURNCOMPLEX +, +#endif +"""] + if isinstance(rd['docreturn'], list): + rd['docreturn'] = stripcomma( + replace('#docreturn#', {'docreturn': rd['docreturn']})) + optargs = stripcomma(replace('#docsignopt#', + {'docsignopt': rd['docsignopt']} + )) + if optargs == '': + rd['docsignature'] = stripcomma( + replace('#docsign#', {'docsign': rd['docsign']})) + else: + rd['docsignature'] = replace('#docsign#[#docsignopt#]', + {'docsign': rd['docsign'], + 'docsignopt': optargs, + }) + rd['latexdocsignature'] = rd['docsignature'].replace('_', '\\_') + rd['latexdocsignature'] = rd['latexdocsignature'].replace(',', ', ') + rd['docstrsigns'] = [] + rd['latexdocstrsigns'] = [] + for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']: + if k in rd and isinstance(rd[k], list): + rd['docstrsigns'] = rd['docstrsigns'] + rd[k] + k = 'latex' + k + if k in rd and isinstance(rd[k], list): + rd['latexdocstrsigns'] = rd['latexdocstrsigns'] + rd[k][0:1] +\ + ['\\begin{description}'] + rd[k][1:] +\ + ['\\end{description}'] + if 'args' not in rd: + rd['args'] = '' + rd['args_td'] = '' + rd['args_nm'] = '' + if not (rd.get('args') or rd.get('optargs') or rd.get('strarglens')): + rd['noargs'] = 'void' + + ar = applyrules(cb_routine_rules, rd) + cfuncs.callbacks[rd['name']] = ar['body'] + if isinstance(ar['need'], str): + ar['need'] = [ar['need']] + + if 'need' in rd: + for t in cfuncs.typedefs.keys(): + if t in rd['need']: + ar['need'].append(t) + + cfuncs.typedefs_generated[rd['name'] + '_typedef'] = ar['cbtypedefs'] + ar['need'].append(rd['name'] + '_typedef') + cfuncs.needs[rd['name']] = ar['need'] + + capi_maps.lcb2_map[rd['name']] = {'maxnofargs': ar['maxnofargs'], + 'nofoptargs': ar['nofoptargs'], + 'docstr': ar['docstr'], + 'latexdocstr': ar['latexdocstr'], + 'argname': rd['argname'] + } + outmess(f" {ar['docstrshort']}\n") +################## Build call-back function ############# diff --git a/python/numpy/f2py/cb_rules.pyi b/python/numpy/f2py/cb_rules.pyi new file mode 100644 index 000000000..b22f5448a --- /dev/null +++ b/python/numpy/f2py/cb_rules.pyi @@ -0,0 +1,17 @@ +from collections.abc import Mapping +from typing import Any, Final + +from .__version__ import version + +## + +f2py_version: Final = version + +cb_routine_rules: Final[dict[str, str | list[str]]] = ... +cb_rout_rules: Final[list[dict[str, str | Any]]] = ... +cb_arg_rules: Final[list[dict[str, str | Any]]] = ... + +cb_map: Final[dict[str, list[list[str]]]] = ... + +def buildcallbacks(m: Mapping[str, object]) -> None: ... +def buildcallback(rout: Mapping[str, object], um: Mapping[str, object]) -> None: ... diff --git a/python/numpy/f2py/cfuncs.py b/python/numpy/f2py/cfuncs.py new file mode 100644 index 000000000..b2b1cad3d --- /dev/null +++ b/python/numpy/f2py/cfuncs.py @@ -0,0 +1,1563 @@ +""" +C declarations, CPP macros, and C functions for f2py2e. +Only required declarations/macros/functions will be used. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +import copy +import sys + +from . import __version__ + +f2py_version = __version__.version + + +def errmess(s: str) -> None: + """ + Write an error message to stderr. + + This indirection is needed because sys.stderr might not always be available (see #26862). + """ + if sys.stderr is not None: + sys.stderr.write(s) + +##################### Definitions ################## + + +outneeds = {'includes0': [], 'includes': [], 'typedefs': [], 'typedefs_generated': [], + 'userincludes': [], + 'cppmacros': [], 'cfuncs': [], 'callbacks': [], 'f90modhooks': [], + 'commonhooks': []} +needs = {} +includes0 = {'includes0': '/*need_includes0*/'} +includes = {'includes': '/*need_includes*/'} +userincludes = {'userincludes': '/*need_userincludes*/'} +typedefs = {'typedefs': '/*need_typedefs*/'} +typedefs_generated = {'typedefs_generated': '/*need_typedefs_generated*/'} +cppmacros = {'cppmacros': '/*need_cppmacros*/'} +cfuncs = {'cfuncs': '/*need_cfuncs*/'} +callbacks = {'callbacks': '/*need_callbacks*/'} +f90modhooks = {'f90modhooks': '/*need_f90modhooks*/', + 'initf90modhooksstatic': '/*initf90modhooksstatic*/', + 'initf90modhooksdynamic': '/*initf90modhooksdynamic*/', + } +commonhooks = {'commonhooks': '/*need_commonhooks*/', + 'initcommonhooks': '/*need_initcommonhooks*/', + } + +############ Includes ################### + +includes0['math.h'] = '#include ' +includes0['string.h'] = '#include ' +includes0['setjmp.h'] = '#include ' + +includes['arrayobject.h'] = '''#define PY_ARRAY_UNIQUE_SYMBOL PyArray_API +#include "arrayobject.h"''' +includes['npy_math.h'] = '#include "numpy/npy_math.h"' + +includes['arrayobject.h'] = '#include "fortranobject.h"' +includes['stdarg.h'] = '#include ' + +############# Type definitions ############### + +typedefs['unsigned_char'] = 'typedef unsigned char unsigned_char;' +typedefs['unsigned_short'] = 'typedef unsigned short unsigned_short;' +typedefs['unsigned_long'] = 'typedef unsigned long unsigned_long;' +typedefs['signed_char'] = 'typedef signed char signed_char;' +typedefs['long_long'] = """ +#if defined(NPY_OS_WIN32) +typedef __int64 long_long; +#else +typedef long long long_long; +typedef unsigned long long unsigned_long_long; +#endif +""" +typedefs['unsigned_long_long'] = """ +#if defined(NPY_OS_WIN32) +typedef __uint64 long_long; +#else +typedef unsigned long long unsigned_long_long; +#endif +""" +typedefs['long_double'] = """ +#ifndef _LONG_DOUBLE +typedef long double long_double; +#endif +""" +typedefs[ + 'complex_long_double'] = 'typedef struct {long double r,i;} complex_long_double;' +typedefs['complex_float'] = 'typedef struct {float r,i;} complex_float;' +typedefs['complex_double'] = 'typedef struct {double r,i;} complex_double;' +typedefs['string'] = """typedef char * string;""" +typedefs['character'] = """typedef char character;""" + + +############### CPP macros #################### +cppmacros['CFUNCSMESS'] = """ +#ifdef DEBUGCFUNCS +#define CFUNCSMESS(mess) fprintf(stderr,\"debug-capi:\"mess); +#define CFUNCSMESSPY(mess,obj) CFUNCSMESS(mess) \\ + PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ + fprintf(stderr,\"\\n\"); +#else +#define CFUNCSMESS(mess) +#define CFUNCSMESSPY(mess,obj) +#endif +""" +cppmacros['F_FUNC'] = """ +#if defined(PREPEND_FORTRAN) +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) _##F +#else +#define F_FUNC(f,F) _##f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) _##F##_ +#else +#define F_FUNC(f,F) _##f##_ +#endif +#endif +#else +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) F +#else +#define F_FUNC(f,F) f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_FUNC(f,F) F##_ +#else +#define F_FUNC(f,F) f##_ +#endif +#endif +#endif +#if defined(UNDERSCORE_G77) +#define F_FUNC_US(f,F) F_FUNC(f##_,F##_) +#else +#define F_FUNC_US(f,F) F_FUNC(f,F) +#endif +""" +cppmacros['F_WRAPPEDFUNC'] = """ +#if defined(PREPEND_FORTRAN) +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F +#else +#define F_WRAPPEDFUNC(f,F) _f2pywrap##f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) _F2PYWRAP##F##_ +#else +#define F_WRAPPEDFUNC(f,F) _f2pywrap##f##_ +#endif +#endif +#else +#if defined(NO_APPEND_FORTRAN) +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F +#else +#define F_WRAPPEDFUNC(f,F) f2pywrap##f +#endif +#else +#if defined(UPPERCASE_FORTRAN) +#define F_WRAPPEDFUNC(f,F) F2PYWRAP##F##_ +#else +#define F_WRAPPEDFUNC(f,F) f2pywrap##f##_ +#endif +#endif +#endif +#if defined(UNDERSCORE_G77) +#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f##_,F##_) +#else +#define F_WRAPPEDFUNC_US(f,F) F_WRAPPEDFUNC(f,F) +#endif +""" +cppmacros['F_MODFUNC'] = """ +#if defined(F90MOD2CCONV1) /*E.g. Compaq Fortran */ +#if defined(NO_APPEND_FORTRAN) +#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f +#else +#define F_MODFUNCNAME(m,f) $ ## m ## $ ## f ## _ +#endif +#endif + +#if defined(F90MOD2CCONV2) /*E.g. IBM XL Fortran, not tested though */ +#if defined(NO_APPEND_FORTRAN) +#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f +#else +#define F_MODFUNCNAME(m,f) __ ## m ## _MOD_ ## f ## _ +#endif +#endif + +#if defined(F90MOD2CCONV3) /*E.g. MIPSPro Compilers */ +#if defined(NO_APPEND_FORTRAN) +#define F_MODFUNCNAME(m,f) f ## .in. ## m +#else +#define F_MODFUNCNAME(m,f) f ## .in. ## m ## _ +#endif +#endif +/* +#if defined(UPPERCASE_FORTRAN) +#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(M,F) +#else +#define F_MODFUNC(m,M,f,F) F_MODFUNCNAME(m,f) +#endif +*/ + +#define F_MODFUNC(m,f) (*(f2pymodstruct##m##.##f)) +""" +cppmacros['SWAPUNSAFE'] = """ +#define SWAP(a,b) (size_t)(a) = ((size_t)(a) ^ (size_t)(b));\\ + (size_t)(b) = ((size_t)(a) ^ (size_t)(b));\\ + (size_t)(a) = ((size_t)(a) ^ (size_t)(b)) +""" +cppmacros['SWAP'] = """ +#define SWAP(a,b,t) {\\ + t *c;\\ + c = a;\\ + a = b;\\ + b = c;} +""" +# cppmacros['ISCONTIGUOUS']='#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) & +# NPY_ARRAY_C_CONTIGUOUS)' +cppmacros['PRINTPYOBJERR'] = """ +#define PRINTPYOBJERR(obj)\\ + fprintf(stderr,\"#modulename#.error is related to \");\\ + PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\ + fprintf(stderr,\"\\n\"); +""" +cppmacros['MINMAX'] = """ +#ifndef max +#define max(a,b) ((a > b) ? (a) : (b)) +#endif +#ifndef min +#define min(a,b) ((a < b) ? (a) : (b)) +#endif +#ifndef MAX +#define MAX(a,b) ((a > b) ? (a) : (b)) +#endif +#ifndef MIN +#define MIN(a,b) ((a < b) ? (a) : (b)) +#endif +""" +cppmacros['len..'] = """ +/* See fortranobject.h for definitions. The macros here are provided for BC. */ +#define rank f2py_rank +#define shape f2py_shape +#define fshape f2py_shape +#define len f2py_len +#define flen f2py_flen +#define slen f2py_slen +#define size f2py_size +""" +cppmacros['pyobj_from_char1'] = r""" +#define pyobj_from_char1(v) (PyLong_FromLong(v)) +""" +cppmacros['pyobj_from_short1'] = r""" +#define pyobj_from_short1(v) (PyLong_FromLong(v)) +""" +needs['pyobj_from_int1'] = ['signed_char'] +cppmacros['pyobj_from_int1'] = r""" +#define pyobj_from_int1(v) (PyLong_FromLong(v)) +""" +cppmacros['pyobj_from_long1'] = r""" +#define pyobj_from_long1(v) (PyLong_FromLong(v)) +""" +needs['pyobj_from_long_long1'] = ['long_long'] +cppmacros['pyobj_from_long_long1'] = """ +#ifdef HAVE_LONG_LONG +#define pyobj_from_long_long1(v) (PyLong_FromLongLong(v)) +#else +#warning HAVE_LONG_LONG is not available. Redefining pyobj_from_long_long. +#define pyobj_from_long_long1(v) (PyLong_FromLong(v)) +#endif +""" +needs['pyobj_from_long_double1'] = ['long_double'] +cppmacros['pyobj_from_long_double1'] = """ +#define pyobj_from_long_double1(v) (PyFloat_FromDouble(v))""" +cppmacros['pyobj_from_double1'] = """ +#define pyobj_from_double1(v) (PyFloat_FromDouble(v))""" +cppmacros['pyobj_from_float1'] = """ +#define pyobj_from_float1(v) (PyFloat_FromDouble(v))""" +needs['pyobj_from_complex_long_double1'] = ['complex_long_double'] +cppmacros['pyobj_from_complex_long_double1'] = """ +#define pyobj_from_complex_long_double1(v) (PyComplex_FromDoubles(v.r,v.i))""" +needs['pyobj_from_complex_double1'] = ['complex_double'] +cppmacros['pyobj_from_complex_double1'] = """ +#define pyobj_from_complex_double1(v) (PyComplex_FromDoubles(v.r,v.i))""" +needs['pyobj_from_complex_float1'] = ['complex_float'] +cppmacros['pyobj_from_complex_float1'] = """ +#define pyobj_from_complex_float1(v) (PyComplex_FromDoubles(v.r,v.i))""" +needs['pyobj_from_string1'] = ['string'] +cppmacros['pyobj_from_string1'] = """ +#define pyobj_from_string1(v) (PyUnicode_FromString((char *)v))""" +needs['pyobj_from_string1size'] = ['string'] +cppmacros['pyobj_from_string1size'] = """ +#define pyobj_from_string1size(v,len) (PyUnicode_FromStringAndSize((char *)v, len))""" +needs['TRYPYARRAYTEMPLATE'] = ['PRINTPYOBJERR'] +cppmacros['TRYPYARRAYTEMPLATE'] = """ +/* New SciPy */ +#define TRYPYARRAYTEMPLATECHAR case NPY_STRING: *(char *)(PyArray_DATA(arr))=*v; break; +#define TRYPYARRAYTEMPLATELONG case NPY_LONG: *(long *)(PyArray_DATA(arr))=*v; break; +#define TRYPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr,PyArray_DATA(arr),pyobj_from_ ## ctype ## 1(*v)); break; + +#define TRYPYARRAYTEMPLATE(ctype,typecode) \\ + PyArrayObject *arr = NULL;\\ + if (!obj) return -2;\\ + if (!PyArray_Check(obj)) return -1;\\ + if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ + if (PyArray_DESCR(arr)->type==typecode) {*(ctype *)(PyArray_DATA(arr))=*v; return 1;}\\ + switch (PyArray_TYPE(arr)) {\\ + case NPY_DOUBLE: *(npy_double *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_INT: *(npy_int *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_LONG: *(npy_long *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_FLOAT: *(npy_float *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_CDOUBLE: *(npy_double *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_CFLOAT: *(npy_float *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=(*v!=0); break;\\ + case NPY_UBYTE: *(npy_ubyte *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_BYTE: *(npy_byte *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_SHORT: *(npy_short *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\ + case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_ ## ctype ## 1(*v)); break;\\ + default: return -2;\\ + };\\ + return 1 +""" + +needs['TRYCOMPLEXPYARRAYTEMPLATE'] = ['PRINTPYOBJERR'] +cppmacros['TRYCOMPLEXPYARRAYTEMPLATE'] = """ +#define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break; +#define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typecode)\\ + PyArrayObject *arr = NULL;\\ + if (!obj) return -2;\\ + if (!PyArray_Check(obj)) return -1;\\ + if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYCOMPLEXPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\ + if (PyArray_DESCR(arr)->type==typecode) {\\ + *(ctype *)(PyArray_DATA(arr))=(*v).r;\\ + *(ctype *)(PyArray_DATA(arr)+sizeof(ctype))=(*v).i;\\ + return 1;\\ + }\\ + switch (PyArray_TYPE(arr)) {\\ + case NPY_CDOUBLE: *(npy_double *)(PyArray_DATA(arr))=(*v).r;\\ + *(npy_double *)(PyArray_DATA(arr)+sizeof(npy_double))=(*v).i;\\ + break;\\ + case NPY_CFLOAT: *(npy_float *)(PyArray_DATA(arr))=(*v).r;\\ + *(npy_float *)(PyArray_DATA(arr)+sizeof(npy_float))=(*v).i;\\ + break;\\ + case NPY_DOUBLE: *(npy_double *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_LONG: *(npy_long *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_FLOAT: *(npy_float *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_INT: *(npy_int *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_SHORT: *(npy_short *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_UBYTE: *(npy_ubyte *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_BYTE: *(npy_byte *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=((*v).r!=0 && (*v).i!=0); break;\\ + case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r; break;\\ + case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r;\\ + *(npy_longdouble *)(PyArray_DATA(arr)+sizeof(npy_longdouble))=(*v).i;\\ + break;\\ + case NPY_OBJECT: PyArray_SETITEM(arr, PyArray_DATA(arr), pyobj_from_complex_ ## ctype ## 1((*v))); break;\\ + default: return -2;\\ + };\\ + return -1; +""" +# cppmacros['NUMFROMARROBJ']=""" +# define NUMFROMARROBJ(typenum,ctype) \\ +# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ +# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ +# if (arr) {\\ +# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\ +# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\ +# goto capi_fail;\\ +# } else {\\ +# (PyArray_DESCR(arr)->cast[typenum])(PyArray_DATA(arr),1,(char*)v,1,1);\\ +# }\\ +# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ +# return 1;\\ +# } +# """ +# XXX: Note that CNUMFROMARROBJ is identical with NUMFROMARROBJ +# cppmacros['CNUMFROMARROBJ']=""" +# define CNUMFROMARROBJ(typenum,ctype) \\ +# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\ +# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\ +# if (arr) {\\ +# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\ +# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\ +# goto capi_fail;\\ +# } else {\\ +# (PyArray_DESCR(arr)->cast[typenum])((void *)(PyArray_DATA(arr)),1,(void *)(v),1,1);\\ +# }\\ +# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\ +# return 1;\\ +# } +# """ + + +needs['GETSTRFROMPYTUPLE'] = ['STRINGCOPYN', 'PRINTPYOBJERR'] +cppmacros['GETSTRFROMPYTUPLE'] = """ +#define GETSTRFROMPYTUPLE(tuple,index,str,len) {\\ + PyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\ + if (rv_cb_str == NULL)\\ + goto capi_fail;\\ + if (PyBytes_Check(rv_cb_str)) {\\ + str[len-1]='\\0';\\ + STRINGCOPYN((str),PyBytes_AS_STRING((PyBytesObject*)rv_cb_str),(len));\\ + } else {\\ + PRINTPYOBJERR(rv_cb_str);\\ + PyErr_SetString(#modulename#_error,\"string object expected\");\\ + goto capi_fail;\\ + }\\ + } +""" +cppmacros['GETSCALARFROMPYTUPLE'] = """ +#define GETSCALARFROMPYTUPLE(tuple,index,var,ctype,mess) {\\ + if ((capi_tmp = PyTuple_GetItem((tuple),(index)))==NULL) goto capi_fail;\\ + if (!(ctype ## _from_pyobj((var),capi_tmp,mess)))\\ + goto capi_fail;\\ + } +""" + +cppmacros['FAILNULL'] = """\ +#define FAILNULL(p) do { \\ + if ((p) == NULL) { \\ + PyErr_SetString(PyExc_MemoryError, "NULL pointer found"); \\ + goto capi_fail; \\ + } \\ +} while (0) +""" +needs['MEMCOPY'] = ['string.h', 'FAILNULL'] +cppmacros['MEMCOPY'] = """ +#define MEMCOPY(to,from,n)\\ + do { FAILNULL(to); FAILNULL(from); (void)memcpy(to,from,n); } while (0) +""" +cppmacros['STRINGMALLOC'] = """ +#define STRINGMALLOC(str,len)\\ + if ((str = (string)malloc(len+1)) == NULL) {\\ + PyErr_SetString(PyExc_MemoryError, \"out of memory\");\\ + goto capi_fail;\\ + } else {\\ + (str)[len] = '\\0';\\ + } +""" +cppmacros['STRINGFREE'] = """ +#define STRINGFREE(str) do {if (!(str == NULL)) free(str);} while (0) +""" +needs['STRINGPADN'] = ['string.h'] +cppmacros['STRINGPADN'] = """ +/* +STRINGPADN replaces null values with padding values from the right. + +`to` must have size of at least N bytes. + +If the `to[N-1]` has null value, then replace it and all the +preceding, nulls with the given padding. + +STRINGPADN(to, N, PADDING, NULLVALUE) is an inverse operation. +*/ +#define STRINGPADN(to, N, NULLVALUE, PADDING) \\ + do { \\ + int _m = (N); \\ + char *_to = (to); \\ + for (_m -= 1; _m >= 0 && _to[_m] == NULLVALUE; _m--) { \\ + _to[_m] = PADDING; \\ + } \\ + } while (0) +""" +needs['STRINGCOPYN'] = ['string.h', 'FAILNULL'] +cppmacros['STRINGCOPYN'] = """ +/* +STRINGCOPYN copies N bytes. + +`to` and `from` buffers must have sizes of at least N bytes. +*/ +#define STRINGCOPYN(to,from,N) \\ + do { \\ + int _m = (N); \\ + char *_to = (to); \\ + char *_from = (from); \\ + FAILNULL(_to); FAILNULL(_from); \\ + (void)strncpy(_to, _from, _m); \\ + } while (0) +""" +needs['STRINGCOPY'] = ['string.h', 'FAILNULL'] +cppmacros['STRINGCOPY'] = """ +#define STRINGCOPY(to,from)\\ + do { FAILNULL(to); FAILNULL(from); (void)strcpy(to,from); } while (0) +""" +cppmacros['CHECKGENERIC'] = """ +#define CHECKGENERIC(check,tcheck,name) \\ + if (!(check)) {\\ + PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ + /*goto capi_fail;*/\\ + } else """ +cppmacros['CHECKARRAY'] = """ +#define CHECKARRAY(check,tcheck,name) \\ + if (!(check)) {\\ + PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\ + /*goto capi_fail;*/\\ + } else """ +cppmacros['CHECKSTRING'] = """ +#define CHECKSTRING(check,tcheck,name,show,var)\\ + if (!(check)) {\\ + char errstring[256];\\ + sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\ + PyErr_SetString(#modulename#_error, errstring);\\ + /*goto capi_fail;*/\\ + } else """ +cppmacros['CHECKSCALAR'] = """ +#define CHECKSCALAR(check,tcheck,name,show,var)\\ + if (!(check)) {\\ + char errstring[256];\\ + sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\ + PyErr_SetString(#modulename#_error,errstring);\\ + /*goto capi_fail;*/\\ + } else """ +# cppmacros['CHECKDIMS']=""" +# define CHECKDIMS(dims,rank) \\ +# for (int i=0;i<(rank);i++)\\ +# if (dims[i]<0) {\\ +# fprintf(stderr,\"Unspecified array argument requires a complete dimension specification.\\n\");\\ +# goto capi_fail;\\ +# } +# """ +cppmacros[ + 'ARRSIZE'] = '#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))' +cppmacros['OLDPYNUM'] = """ +#ifdef OLDPYNUM +#error You need to install NumPy version 0.13 or higher. See https://scipy.org/install.html +#endif +""" + +# Defining the correct value to indicate thread-local storage in C without +# running a compile-time check (which we have no control over in generated +# code used outside of NumPy) is hard. Therefore we support overriding this +# via an external define - the f2py-using package can then use the same +# compile-time checks as we use for `NPY_TLS` when building NumPy (see +# scipy#21860 for an example of that). +# +# __STDC_NO_THREADS__ should not be coupled to the availability of _Thread_local. +# In case we get a bug report, guard it with __STDC_NO_THREADS__ after all. +# +# `thread_local` has become a keyword in C23, but don't try to use that yet +# (too new, doing so while C23 support is preliminary will likely cause more +# problems than it solves). +# +# Note: do not try to use `threads.h`, its availability is very low +# *and* threads.h isn't actually used where `F2PY_THREAD_LOCAL_DECL` is +# in the generated code. See gh-27718 for more details. +cppmacros["F2PY_THREAD_LOCAL_DECL"] = """ +#ifndef F2PY_THREAD_LOCAL_DECL +#if defined(_MSC_VER) +#define F2PY_THREAD_LOCAL_DECL __declspec(thread) +#elif defined(NPY_OS_MINGW) +#define F2PY_THREAD_LOCAL_DECL __thread +#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) +#define F2PY_THREAD_LOCAL_DECL _Thread_local +#elif defined(__GNUC__) \\ + && (__GNUC__ > 4 || (__GNUC__ == 4 && (__GNUC_MINOR__ >= 4))) +#define F2PY_THREAD_LOCAL_DECL __thread +#endif +#endif +""" +################# C functions ############### + +cfuncs['calcarrindex'] = """ +static int calcarrindex(int *i,PyArrayObject *arr) { + int k,ii = i[0]; + for (k=1; k < PyArray_NDIM(arr); k++) + ii += (ii*(PyArray_DIM(arr,k) - 1)+i[k]); /* assuming contiguous arr */ + return ii; +}""" +cfuncs['calcarrindextr'] = """ +static int calcarrindextr(int *i,PyArrayObject *arr) { + int k,ii = i[PyArray_NDIM(arr)-1]; + for (k=1; k < PyArray_NDIM(arr); k++) + ii += (ii*(PyArray_DIM(arr,PyArray_NDIM(arr)-k-1) - 1)+i[PyArray_NDIM(arr)-k-1]); /* assuming contiguous arr */ + return ii; +}""" +cfuncs['forcomb'] = """ +struct ForcombCache { int nd;npy_intp *d;int *i,*i_tr,tr; }; +static int initforcomb(struct ForcombCache *cache, npy_intp *dims,int nd,int tr) { + int k; + if (dims==NULL) return 0; + if (nd<0) return 0; + cache->nd = nd; + cache->d = dims; + cache->tr = tr; + + cache->i = (int *)malloc(sizeof(int)*nd); + if (cache->i==NULL) return 0; + cache->i_tr = (int *)malloc(sizeof(int)*nd); + if (cache->i_tr==NULL) {free(cache->i); return 0;}; + + for (k=1;ki[k] = cache->i_tr[nd-k-1] = 0; + } + cache->i[0] = cache->i_tr[nd-1] = -1; + return 1; +} +static int *nextforcomb(struct ForcombCache *cache) { + if (cache==NULL) return NULL; + int j,*i,*i_tr,k; + int nd=cache->nd; + if ((i=cache->i) == NULL) return NULL; + if ((i_tr=cache->i_tr) == NULL) return NULL; + if (cache->d == NULL) return NULL; + i[0]++; + if (i[0]==cache->d[0]) { + j=1; + while ((jd[j]-1)) j++; + if (j==nd) { + free(i); + free(i_tr); + return NULL; + } + for (k=0;ktr) return i_tr; + return i; +}""" +needs['try_pyarr_from_string'] = ['STRINGCOPYN', 'PRINTPYOBJERR', 'string'] +cfuncs['try_pyarr_from_string'] = """ +/* + try_pyarr_from_string copies str[:len(obj)] to the data of an `ndarray`. + + If obj is an `ndarray`, it is assumed to be contiguous. + + If the specified len==-1, str must be null-terminated. +*/ +static int try_pyarr_from_string(PyObject *obj, + const string str, const int len) { +#ifdef DEBUGCFUNCS +fprintf(stderr, "try_pyarr_from_string(str='%s', len=%d, obj=%p)\\n", + (char*)str,len, obj); +#endif + if (!obj) return -2; /* Object missing */ + if (obj == Py_None) return -1; /* None */ + if (!PyArray_Check(obj)) goto capi_fail; /* not an ndarray */ + if (PyArray_Check(obj)) { + PyArrayObject *arr = (PyArrayObject *)obj; + assert(ISCONTIGUOUS(arr)); + string buf = PyArray_DATA(arr); + npy_intp n = len; + if (n == -1) { + /* Assuming null-terminated str. */ + n = strlen(str); + } + if (n > PyArray_NBYTES(arr)) { + n = PyArray_NBYTES(arr); + } + STRINGCOPYN(buf, str, n); + return 1; + } +capi_fail: + PRINTPYOBJERR(obj); + PyErr_SetString(#modulename#_error, \"try_pyarr_from_string failed\"); + return 0; +} +""" +needs['string_from_pyobj'] = ['string', 'STRINGMALLOC', 'STRINGCOPYN'] +cfuncs['string_from_pyobj'] = """ +/* + Create a new string buffer `str` of at most length `len` from a + Python string-like object `obj`. + + The string buffer has given size (len) or the size of inistr when len==-1. + + The string buffer is padded with blanks: in Fortran, trailing blanks + are insignificant contrary to C nulls. + */ +static int +string_from_pyobj(string *str, int *len, const string inistr, PyObject *obj, + const char *errmess) +{ + PyObject *tmp = NULL; + string buf = NULL; + npy_intp n = -1; +#ifdef DEBUGCFUNCS +fprintf(stderr,\"string_from_pyobj(str='%s',len=%d,inistr='%s',obj=%p)\\n\", + (char*)str, *len, (char *)inistr, obj); +#endif + if (obj == Py_None) { + n = strlen(inistr); + buf = inistr; + } + else if (PyArray_Check(obj)) { + PyArrayObject *arr = (PyArrayObject *)obj; + if (!ISCONTIGUOUS(arr)) { + PyErr_SetString(PyExc_ValueError, + \"array object is non-contiguous.\"); + goto capi_fail; + } + n = PyArray_NBYTES(arr); + buf = PyArray_DATA(arr); + n = strnlen(buf, n); + } + else { + if (PyBytes_Check(obj)) { + tmp = obj; + Py_INCREF(tmp); + } + else if (PyUnicode_Check(obj)) { + tmp = PyUnicode_AsASCIIString(obj); + } + else { + PyObject *tmp2; + tmp2 = PyObject_Str(obj); + if (tmp2) { + tmp = PyUnicode_AsASCIIString(tmp2); + Py_DECREF(tmp2); + } + else { + tmp = NULL; + } + } + if (tmp == NULL) goto capi_fail; + n = PyBytes_GET_SIZE(tmp); + buf = PyBytes_AS_STRING(tmp); + } + if (*len == -1) { + /* TODO: change the type of `len` so that we can remove this */ + if (n > NPY_MAX_INT) { + PyErr_SetString(PyExc_OverflowError, + "object too large for a 32-bit int"); + goto capi_fail; + } + *len = n; + } + else if (*len < n) { + /* discard the last (len-n) bytes of input buf */ + n = *len; + } + if (n < 0 || *len < 0 || buf == NULL) { + goto capi_fail; + } + STRINGMALLOC(*str, *len); // *str is allocated with size (*len + 1) + if (n < *len) { + /* + Pad fixed-width string with nulls. The caller will replace + nulls with blanks when the corresponding argument is not + intent(c). + */ + memset(*str + n, '\\0', *len - n); + } + STRINGCOPYN(*str, buf, n); + Py_XDECREF(tmp); + return 1; +capi_fail: + Py_XDECREF(tmp); + { + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = #modulename#_error; + } + PyErr_SetString(err, errmess); + } + return 0; +} +""" + +cfuncs['character_from_pyobj'] = """ +static int +character_from_pyobj(character* v, PyObject *obj, const char *errmess) { + if (PyBytes_Check(obj)) { + /* empty bytes has trailing null, so dereferencing is always safe */ + *v = PyBytes_AS_STRING(obj)[0]; + return 1; + } else if (PyUnicode_Check(obj)) { + PyObject* tmp = PyUnicode_AsASCIIString(obj); + if (tmp != NULL) { + *v = PyBytes_AS_STRING(tmp)[0]; + Py_DECREF(tmp); + return 1; + } + } else if (PyArray_Check(obj)) { + PyArrayObject* arr = (PyArrayObject*)obj; + if (F2PY_ARRAY_IS_CHARACTER_COMPATIBLE(arr)) { + *v = PyArray_BYTES(arr)[0]; + return 1; + } else if (F2PY_IS_UNICODE_ARRAY(arr)) { + // TODO: update when numpy will support 1-byte and + // 2-byte unicode dtypes + PyObject* tmp = PyUnicode_FromKindAndData( + PyUnicode_4BYTE_KIND, + PyArray_BYTES(arr), + (PyArray_NBYTES(arr)>0?1:0)); + if (tmp != NULL) { + if (character_from_pyobj(v, tmp, errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + } + } else if (PySequence_Check(obj)) { + PyObject* tmp = PySequence_GetItem(obj,0); + if (tmp != NULL) { + if (character_from_pyobj(v, tmp, errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + } + { + /* TODO: This error (and most other) error handling needs cleaning. */ + char mess[F2PY_MESSAGE_BUFFER_SIZE]; + strcpy(mess, errmess); + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = PyExc_TypeError; + Py_INCREF(err); + } + else { + Py_INCREF(err); + PyErr_Clear(); + } + sprintf(mess + strlen(mess), + " -- expected str|bytes|sequence-of-str-or-bytes, got "); + f2py_describe(obj, mess + strlen(mess)); + PyErr_SetString(err, mess); + Py_DECREF(err); + } + return 0; +} +""" + +# TODO: These should be dynamically generated, too many mapped to int things, +# see note in _isocbind.py +needs['char_from_pyobj'] = ['int_from_pyobj'] +cfuncs['char_from_pyobj'] = """ +static int +char_from_pyobj(char* v, PyObject *obj, const char *errmess) { + int i = 0; + if (int_from_pyobj(&i, obj, errmess)) { + *v = (char)i; + return 1; + } + return 0; +} +""" + + +needs['signed_char_from_pyobj'] = ['int_from_pyobj', 'signed_char'] +cfuncs['signed_char_from_pyobj'] = """ +static int +signed_char_from_pyobj(signed_char* v, PyObject *obj, const char *errmess) { + int i = 0; + if (int_from_pyobj(&i, obj, errmess)) { + *v = (signed_char)i; + return 1; + } + return 0; +} +""" + + +needs['short_from_pyobj'] = ['int_from_pyobj'] +cfuncs['short_from_pyobj'] = """ +static int +short_from_pyobj(short* v, PyObject *obj, const char *errmess) { + int i = 0; + if (int_from_pyobj(&i, obj, errmess)) { + *v = (short)i; + return 1; + } + return 0; +} +""" + + +cfuncs['int_from_pyobj'] = """ +static int +int_from_pyobj(int* v, PyObject *obj, const char *errmess) +{ + PyObject* tmp = NULL; + + if (PyLong_Check(obj)) { + *v = Npy__PyLong_AsInt(obj); + return !(*v == -1 && PyErr_Occurred()); + } + + tmp = PyNumber_Long(obj); + if (tmp) { + *v = Npy__PyLong_AsInt(tmp); + Py_DECREF(tmp); + return !(*v == -1 && PyErr_Occurred()); + } + + if (PyComplex_Check(obj)) { + PyErr_Clear(); + tmp = PyObject_GetAttrString(obj,\"real\"); + } + else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) { + /*pass*/; + } + else if (PySequence_Check(obj)) { + PyErr_Clear(); + tmp = PySequence_GetItem(obj, 0); + } + + if (tmp) { + if (int_from_pyobj(v, tmp, errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + + { + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = #modulename#_error; + } + PyErr_SetString(err, errmess); + } + return 0; +} +""" + + +cfuncs['long_from_pyobj'] = """ +static int +long_from_pyobj(long* v, PyObject *obj, const char *errmess) { + PyObject* tmp = NULL; + + if (PyLong_Check(obj)) { + *v = PyLong_AsLong(obj); + return !(*v == -1 && PyErr_Occurred()); + } + + tmp = PyNumber_Long(obj); + if (tmp) { + *v = PyLong_AsLong(tmp); + Py_DECREF(tmp); + return !(*v == -1 && PyErr_Occurred()); + } + + if (PyComplex_Check(obj)) { + PyErr_Clear(); + tmp = PyObject_GetAttrString(obj,\"real\"); + } + else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) { + /*pass*/; + } + else if (PySequence_Check(obj)) { + PyErr_Clear(); + tmp = PySequence_GetItem(obj, 0); + } + + if (tmp) { + if (long_from_pyobj(v, tmp, errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + { + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = #modulename#_error; + } + PyErr_SetString(err, errmess); + } + return 0; +} +""" + + +needs['long_long_from_pyobj'] = ['long_long'] +cfuncs['long_long_from_pyobj'] = """ +static int +long_long_from_pyobj(long_long* v, PyObject *obj, const char *errmess) +{ + PyObject* tmp = NULL; + + if (PyLong_Check(obj)) { + *v = PyLong_AsLongLong(obj); + return !(*v == -1 && PyErr_Occurred()); + } + + tmp = PyNumber_Long(obj); + if (tmp) { + *v = PyLong_AsLongLong(tmp); + Py_DECREF(tmp); + return !(*v == -1 && PyErr_Occurred()); + } + + if (PyComplex_Check(obj)) { + PyErr_Clear(); + tmp = PyObject_GetAttrString(obj,\"real\"); + } + else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) { + /*pass*/; + } + else if (PySequence_Check(obj)) { + PyErr_Clear(); + tmp = PySequence_GetItem(obj, 0); + } + + if (tmp) { + if (long_long_from_pyobj(v, tmp, errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + { + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = #modulename#_error; + } + PyErr_SetString(err,errmess); + } + return 0; +} +""" + + +needs['long_double_from_pyobj'] = ['double_from_pyobj', 'long_double'] +cfuncs['long_double_from_pyobj'] = """ +static int +long_double_from_pyobj(long_double* v, PyObject *obj, const char *errmess) +{ + double d=0; + if (PyArray_CheckScalar(obj)){ + if PyArray_IsScalar(obj, LongDouble) { + PyArray_ScalarAsCtype(obj, v); + return 1; + } + else if (PyArray_Check(obj)) { + PyArrayObject *arr = (PyArrayObject *)obj; + if (PyArray_TYPE(arr) == NPY_LONGDOUBLE) { + (*v) = *((npy_longdouble *)PyArray_DATA(arr)); + return 1; + } + } + } + if (double_from_pyobj(&d, obj, errmess)) { + *v = (long_double)d; + return 1; + } + return 0; +} +""" + + +cfuncs['double_from_pyobj'] = """ +static int +double_from_pyobj(double* v, PyObject *obj, const char *errmess) +{ + PyObject* tmp = NULL; + if (PyFloat_Check(obj)) { + *v = PyFloat_AsDouble(obj); + return !(*v == -1.0 && PyErr_Occurred()); + } + + tmp = PyNumber_Float(obj); + if (tmp) { + *v = PyFloat_AsDouble(tmp); + Py_DECREF(tmp); + return !(*v == -1.0 && PyErr_Occurred()); + } + + if (PyComplex_Check(obj)) { + PyErr_Clear(); + tmp = PyObject_GetAttrString(obj,\"real\"); + } + else if (PyBytes_Check(obj) || PyUnicode_Check(obj)) { + /*pass*/; + } + else if (PySequence_Check(obj)) { + PyErr_Clear(); + tmp = PySequence_GetItem(obj, 0); + } + + if (tmp) { + if (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;} + Py_DECREF(tmp); + } + { + PyObject* err = PyErr_Occurred(); + if (err==NULL) err = #modulename#_error; + PyErr_SetString(err,errmess); + } + return 0; +} +""" + + +needs['float_from_pyobj'] = ['double_from_pyobj'] +cfuncs['float_from_pyobj'] = """ +static int +float_from_pyobj(float* v, PyObject *obj, const char *errmess) +{ + double d=0.0; + if (double_from_pyobj(&d,obj,errmess)) { + *v = (float)d; + return 1; + } + return 0; +} +""" + + +needs['complex_long_double_from_pyobj'] = ['complex_long_double', 'long_double', + 'complex_double_from_pyobj', 'npy_math.h'] +cfuncs['complex_long_double_from_pyobj'] = """ +static int +complex_long_double_from_pyobj(complex_long_double* v, PyObject *obj, const char *errmess) +{ + complex_double cd = {0.0,0.0}; + if (PyArray_CheckScalar(obj)){ + if PyArray_IsScalar(obj, CLongDouble) { + PyArray_ScalarAsCtype(obj, v); + return 1; + } + else if (PyArray_Check(obj)) { + PyArrayObject *arr = (PyArrayObject *)obj; + if (PyArray_TYPE(arr)==NPY_CLONGDOUBLE) { + (*v).r = npy_creall(*(((npy_clongdouble *)PyArray_DATA(arr)))); + (*v).i = npy_cimagl(*(((npy_clongdouble *)PyArray_DATA(arr)))); + return 1; + } + } + } + if (complex_double_from_pyobj(&cd,obj,errmess)) { + (*v).r = (long_double)cd.r; + (*v).i = (long_double)cd.i; + return 1; + } + return 0; +} +""" + + +needs['complex_double_from_pyobj'] = ['complex_double', 'npy_math.h'] +cfuncs['complex_double_from_pyobj'] = """ +static int +complex_double_from_pyobj(complex_double* v, PyObject *obj, const char *errmess) { + Py_complex c; + if (PyComplex_Check(obj)) { + c = PyComplex_AsCComplex(obj); + (*v).r = c.real; + (*v).i = c.imag; + return 1; + } + if (PyArray_IsScalar(obj, ComplexFloating)) { + if (PyArray_IsScalar(obj, CFloat)) { + npy_cfloat new; + PyArray_ScalarAsCtype(obj, &new); + (*v).r = (double)npy_crealf(new); + (*v).i = (double)npy_cimagf(new); + } + else if (PyArray_IsScalar(obj, CLongDouble)) { + npy_clongdouble new; + PyArray_ScalarAsCtype(obj, &new); + (*v).r = (double)npy_creall(new); + (*v).i = (double)npy_cimagl(new); + } + else { /* if (PyArray_IsScalar(obj, CDouble)) */ + PyArray_ScalarAsCtype(obj, v); + } + return 1; + } + if (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */ + PyArrayObject *arr; + if (PyArray_Check(obj)) { + arr = (PyArrayObject *)PyArray_Cast((PyArrayObject *)obj, NPY_CDOUBLE); + } + else { + arr = (PyArrayObject *)PyArray_FromScalar(obj, PyArray_DescrFromType(NPY_CDOUBLE)); + } + if (arr == NULL) { + return 0; + } + (*v).r = npy_creal(*(((npy_cdouble *)PyArray_DATA(arr)))); + (*v).i = npy_cimag(*(((npy_cdouble *)PyArray_DATA(arr)))); + Py_DECREF(arr); + return 1; + } + /* Python does not provide PyNumber_Complex function :-( */ + (*v).i = 0.0; + if (PyFloat_Check(obj)) { + (*v).r = PyFloat_AsDouble(obj); + return !((*v).r == -1.0 && PyErr_Occurred()); + } + if (PyLong_Check(obj)) { + (*v).r = PyLong_AsDouble(obj); + return !((*v).r == -1.0 && PyErr_Occurred()); + } + if (PySequence_Check(obj) && !(PyBytes_Check(obj) || PyUnicode_Check(obj))) { + PyObject *tmp = PySequence_GetItem(obj,0); + if (tmp) { + if (complex_double_from_pyobj(v,tmp,errmess)) { + Py_DECREF(tmp); + return 1; + } + Py_DECREF(tmp); + } + } + { + PyObject* err = PyErr_Occurred(); + if (err==NULL) + err = PyExc_TypeError; + PyErr_SetString(err,errmess); + } + return 0; +} +""" + + +needs['complex_float_from_pyobj'] = [ + 'complex_float', 'complex_double_from_pyobj'] +cfuncs['complex_float_from_pyobj'] = """ +static int +complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess) +{ + complex_double cd={0.0,0.0}; + if (complex_double_from_pyobj(&cd,obj,errmess)) { + (*v).r = (float)cd.r; + (*v).i = (float)cd.i; + return 1; + } + return 0; +} +""" + + +cfuncs['try_pyarr_from_character'] = """ +static int try_pyarr_from_character(PyObject* obj, character* v) { + PyArrayObject *arr = (PyArrayObject*)obj; + if (!obj) return -2; + if (PyArray_Check(obj)) { + if (F2PY_ARRAY_IS_CHARACTER_COMPATIBLE(arr)) { + *(character *)(PyArray_DATA(arr)) = *v; + return 1; + } + } + { + char mess[F2PY_MESSAGE_BUFFER_SIZE]; + PyObject* err = PyErr_Occurred(); + if (err == NULL) { + err = PyExc_ValueError; + strcpy(mess, "try_pyarr_from_character failed" + " -- expected bytes array-scalar|array, got "); + f2py_describe(obj, mess + strlen(mess)); + PyErr_SetString(err, mess); + } + } + return 0; +} +""" + +needs['try_pyarr_from_char'] = ['pyobj_from_char1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_char'] = 'static int try_pyarr_from_char(PyObject* obj,char* v) {\n TRYPYARRAYTEMPLATE(char,\'c\');\n}\n' +needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'unsigned_char'] +cfuncs[ + 'try_pyarr_from_unsigned_char'] = 'static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n TRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n' +needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'signed_char'] +cfuncs[ + 'try_pyarr_from_signed_char'] = 'static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n TRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n' +needs['try_pyarr_from_short'] = ['pyobj_from_short1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_short'] = 'static int try_pyarr_from_short(PyObject* obj,short* v) {\n TRYPYARRAYTEMPLATE(short,\'s\');\n}\n' +needs['try_pyarr_from_int'] = ['pyobj_from_int1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_int'] = 'static int try_pyarr_from_int(PyObject* obj,int* v) {\n TRYPYARRAYTEMPLATE(int,\'i\');\n}\n' +needs['try_pyarr_from_long'] = ['pyobj_from_long1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_long'] = 'static int try_pyarr_from_long(PyObject* obj,long* v) {\n TRYPYARRAYTEMPLATE(long,\'l\');\n}\n' +needs['try_pyarr_from_long_long'] = [ + 'pyobj_from_long_long1', 'TRYPYARRAYTEMPLATE', 'long_long'] +cfuncs[ + 'try_pyarr_from_long_long'] = 'static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n TRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n' +needs['try_pyarr_from_float'] = ['pyobj_from_float1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_float'] = 'static int try_pyarr_from_float(PyObject* obj,float* v) {\n TRYPYARRAYTEMPLATE(float,\'f\');\n}\n' +needs['try_pyarr_from_double'] = ['pyobj_from_double1', 'TRYPYARRAYTEMPLATE'] +cfuncs[ + 'try_pyarr_from_double'] = 'static int try_pyarr_from_double(PyObject* obj,double* v) {\n TRYPYARRAYTEMPLATE(double,\'d\');\n}\n' +needs['try_pyarr_from_complex_float'] = [ + 'pyobj_from_complex_float1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_float'] +cfuncs[ + 'try_pyarr_from_complex_float'] = 'static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n' +needs['try_pyarr_from_complex_double'] = [ + 'pyobj_from_complex_double1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_double'] +cfuncs[ + 'try_pyarr_from_complex_double'] = 'static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n' + + +needs['create_cb_arglist'] = ['CFUNCSMESS', 'PRINTPYOBJERR', 'MINMAX'] +# create the list of arguments to be used when calling back to python +cfuncs['create_cb_arglist'] = """ +static int +create_cb_arglist(PyObject* fun, PyTupleObject* xa , const int maxnofargs, + const int nofoptargs, int *nofargs, PyTupleObject **args, + const char *errmess) +{ + PyObject *tmp = NULL; + PyObject *tmp_fun = NULL; + Py_ssize_t tot, opt, ext, siz, i, di = 0; + CFUNCSMESS(\"create_cb_arglist\\n\"); + tot=opt=ext=siz=0; + /* Get the total number of arguments */ + if (PyFunction_Check(fun)) { + tmp_fun = fun; + Py_INCREF(tmp_fun); + } + else { + di = 1; + if (PyObject_HasAttrString(fun,\"im_func\")) { + tmp_fun = PyObject_GetAttrString(fun,\"im_func\"); + } + else if (PyObject_HasAttrString(fun,\"__call__\")) { + tmp = PyObject_GetAttrString(fun,\"__call__\"); + if (PyObject_HasAttrString(tmp,\"im_func\")) + tmp_fun = PyObject_GetAttrString(tmp,\"im_func\"); + else { + tmp_fun = fun; /* built-in function */ + Py_INCREF(tmp_fun); + tot = maxnofargs; + if (PyCFunction_Check(fun)) { + /* In case the function has a co_argcount (like on PyPy) */ + di = 0; + } + if (xa != NULL) + tot += PyTuple_Size((PyObject *)xa); + } + Py_XDECREF(tmp); + } + else if (PyFortran_Check(fun) || PyFortran_Check1(fun)) { + tot = maxnofargs; + if (xa != NULL) + tot += PyTuple_Size((PyObject *)xa); + tmp_fun = fun; + Py_INCREF(tmp_fun); + } + else if (F2PyCapsule_Check(fun)) { + tot = maxnofargs; + if (xa != NULL) + ext = PyTuple_Size((PyObject *)xa); + if(ext>0) { + fprintf(stderr,\"extra arguments tuple cannot be used with PyCapsule call-back\\n\"); + goto capi_fail; + } + tmp_fun = fun; + Py_INCREF(tmp_fun); + } + } + + if (tmp_fun == NULL) { + fprintf(stderr, + \"Call-back argument must be function|instance|instance.__call__|f2py-function \" + \"but got %s.\\n\", + ((fun == NULL) ? \"NULL\" : Py_TYPE(fun)->tp_name)); + goto capi_fail; + } + + if (PyObject_HasAttrString(tmp_fun,\"__code__\")) { + if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\")) { + PyObject *tmp_argcount = PyObject_GetAttrString(tmp,\"co_argcount\"); + Py_DECREF(tmp); + if (tmp_argcount == NULL) { + goto capi_fail; + } + tot = PyLong_AsSsize_t(tmp_argcount) - di; + Py_DECREF(tmp_argcount); + } + } + /* Get the number of optional arguments */ + if (PyObject_HasAttrString(tmp_fun,\"__defaults__\")) { + if (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"__defaults__\"))) + opt = PyTuple_Size(tmp); + Py_XDECREF(tmp); + } + /* Get the number of extra arguments */ + if (xa != NULL) + ext = PyTuple_Size((PyObject *)xa); + /* Calculate the size of call-backs argument list */ + siz = MIN(maxnofargs+ext,tot); + *nofargs = MAX(0,siz-ext); + +#ifdef DEBUGCFUNCS + fprintf(stderr, + \"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),\" + \"tot,opt,ext,siz,nofargs = %d(-%d), %zd, %zd, %zd, %zd, %d\\n\", + maxnofargs, nofoptargs, tot, opt, ext, siz, *nofargs); +#endif + + if (siz < tot-opt) { + fprintf(stderr, + \"create_cb_arglist: Failed to build argument list \" + \"(siz) with enough arguments (tot-opt) required by \" + \"user-supplied function (siz,tot,opt=%zd, %zd, %zd).\\n\", + siz, tot, opt); + goto capi_fail; + } + + /* Initialize argument list */ + *args = (PyTupleObject *)PyTuple_New(siz); + for (i=0;i<*nofargs;i++) { + Py_INCREF(Py_None); + PyTuple_SET_ITEM((PyObject *)(*args),i,Py_None); + } + if (xa != NULL) + for (i=(*nofargs);i 0: + if outneeds[n][0] not in needs: + out.append(outneeds[n][0]) + del outneeds[n][0] + else: + flag = 0 + for k in outneeds[n][1:]: + if k in needs[outneeds[n][0]]: + flag = 1 + break + if flag: + outneeds[n] = outneeds[n][1:] + [outneeds[n][0]] + else: + out.append(outneeds[n][0]) + del outneeds[n][0] + if saveout and (0 not in map(lambda x, y: x == y, saveout, outneeds[n])) \ + and outneeds[n] != []: + print(n, saveout) + errmess( + 'get_needs: no progress in sorting needs, probably circular dependence, skipping.\n') + out = out + saveout + break + saveout = copy.copy(outneeds[n]) + if out == []: + out = [n] + res[n] = out + return res diff --git a/python/numpy/f2py/cfuncs.pyi b/python/numpy/f2py/cfuncs.pyi new file mode 100644 index 000000000..588717775 --- /dev/null +++ b/python/numpy/f2py/cfuncs.pyi @@ -0,0 +1,31 @@ +from typing import Final, TypeAlias + +from .__version__ import version + +### + +_NeedListDict: TypeAlias = dict[str, list[str]] +_NeedDict: TypeAlias = dict[str, str] + +### + +f2py_version: Final = version + +outneeds: Final[_NeedListDict] = ... +needs: Final[_NeedListDict] = ... + +includes0: Final[_NeedDict] = ... +includes: Final[_NeedDict] = ... +userincludes: Final[_NeedDict] = ... +typedefs: Final[_NeedDict] = ... +typedefs_generated: Final[_NeedDict] = ... +cppmacros: Final[_NeedDict] = ... +cfuncs: Final[_NeedDict] = ... +callbacks: Final[_NeedDict] = ... +f90modhooks: Final[_NeedDict] = ... +commonhooks: Final[_NeedDict] = ... + +def errmess(s: str) -> None: ... +def buildcfuncs() -> None: ... +def get_needs() -> _NeedListDict: ... +def append_needs(need: str | list[str], flag: int = 1) -> _NeedListDict: ... diff --git a/python/numpy/f2py/common_rules.py b/python/numpy/f2py/common_rules.py new file mode 100644 index 000000000..cef757b6c --- /dev/null +++ b/python/numpy/f2py/common_rules.py @@ -0,0 +1,143 @@ +""" +Build common block mechanism for f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +from . import __version__ + +f2py_version = __version__.version + +from . import capi_maps, func2subr +from .auxfuncs import getuseblocks, hasbody, hascommon, hasnote, isintent_hide, outmess +from .crackfortran import rmbadname + + +def findcommonblocks(block, top=1): + ret = [] + if hascommon(block): + for key, value in block['common'].items(): + vars_ = {v: block['vars'][v] for v in value} + ret.append((key, value, vars_)) + elif hasbody(block): + for b in block['body']: + ret = ret + findcommonblocks(b, 0) + if top: + tret = [] + names = [] + for t in ret: + if t[0] not in names: + names.append(t[0]) + tret.append(t) + return tret + return ret + + +def buildhooks(m): + ret = {'commonhooks': [], 'initcommonhooks': [], + 'docs': ['"COMMON blocks:\\n"']} + fwrap = [''] + + def fadd(line, s=fwrap): + s[0] = f'{s[0]}\n {line}' + chooks = [''] + + def cadd(line, s=chooks): + s[0] = f'{s[0]}\n{line}' + ihooks = [''] + + def iadd(line, s=ihooks): + s[0] = f'{s[0]}\n{line}' + doc = [''] + + def dadd(line, s=doc): + s[0] = f'{s[0]}\n{line}' + for (name, vnames, vars) in findcommonblocks(m): + lower_name = name.lower() + hnames, inames = [], [] + for n in vnames: + if isintent_hide(vars[n]): + hnames.append(n) + else: + inames.append(n) + if hnames: + outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n\t\t Hidden: %s\n' % ( + name, ','.join(inames), ','.join(hnames))) + else: + outmess('\t\tConstructing COMMON block support for "%s"...\n\t\t %s\n' % ( + name, ','.join(inames))) + fadd(f'subroutine f2pyinit{name}(setupfunc)') + for usename in getuseblocks(m): + fadd(f'use {usename}') + fadd('external setupfunc') + for n in vnames: + fadd(func2subr.var2fixfortran(vars, n)) + if name == '_BLNK_': + fadd(f"common {','.join(vnames)}") + else: + fadd(f"common /{name}/ {','.join(vnames)}") + fadd(f"call setupfunc({','.join(inames)})") + fadd('end\n') + cadd('static FortranDataDef f2py_%s_def[] = {' % (name)) + idims = [] + for n in inames: + ct = capi_maps.getctype(vars[n]) + elsize = capi_maps.get_elsize(vars[n]) + at = capi_maps.c2capi_map[ct] + dm = capi_maps.getarrdims(n, vars[n]) + if dm['dims']: + idims.append(f"({dm['dims']})") + else: + idims.append('') + dms = dm['dims'].strip() + if not dms: + dms = '-1' + cadd('\t{\"%s\",%s,{{%s}},%s, %s},' + % (n, dm['rank'], dms, at, elsize)) + cadd('\t{NULL}\n};') + inames1 = rmbadname(inames) + inames1_tps = ','.join(['char *' + s for s in inames1]) + cadd('static void f2py_setup_%s(%s) {' % (name, inames1_tps)) + cadd('\tint i_f2py=0;') + for n in inames1: + cadd(f'\tf2py_{name}_def[i_f2py++].data = {n};') + cadd('}') + if '_' in lower_name: + F_FUNC = 'F_FUNC_US' + else: + F_FUNC = 'F_FUNC' + cadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void(*)(%s));' + % (F_FUNC, lower_name, name.upper(), + ','.join(['char*'] * len(inames1)))) + cadd('static void f2py_init_%s(void) {' % name) + cadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);' + % (F_FUNC, lower_name, name.upper(), name)) + cadd('}\n') + iadd(f'\ttmp = PyFortranObject_New(f2py_{name}_def,f2py_init_{name});') + iadd('\tif (tmp == NULL) return NULL;') + iadd(f'\tif (F2PyDict_SetItemString(d, "{name}", tmp) == -1) return NULL;') + iadd('\tPy_DECREF(tmp);') + tname = name.replace('_', '\\_') + dadd('\\subsection{Common block \\texttt{%s}}\n' % (tname)) + dadd('\\begin{description}') + for n in inames: + dadd('\\item[]{{}\\verb@%s@{}}' % + (capi_maps.getarrdocsign(n, vars[n]))) + if hasnote(vars[n]): + note = vars[n]['note'] + if isinstance(note, list): + note = '\n'.join(note) + dadd(f'--- {note}') + dadd('\\end{description}') + ret['docs'].append( + f"\"\t/{name}/ {','.join(map(lambda v, d: v + d, inames, idims))}\\n\"") + ret['commonhooks'] = chooks + ret['initcommonhooks'] = ihooks + ret['latexdoc'] = doc[0] + if len(ret['docs']) <= 1: + ret['docs'] = '' + return ret, fwrap[0] diff --git a/python/numpy/f2py/common_rules.pyi b/python/numpy/f2py/common_rules.pyi new file mode 100644 index 000000000..d840de000 --- /dev/null +++ b/python/numpy/f2py/common_rules.pyi @@ -0,0 +1,9 @@ +from collections.abc import Mapping +from typing import Any, Final + +from .__version__ import version + +f2py_version: Final = version + +def findcommonblocks(block: Mapping[str, object], top: int = 1) -> list[tuple[str, list[str], dict[str, Any]]]: ... +def buildhooks(m: Mapping[str, object]) -> tuple[dict[str, Any], str]: ... diff --git a/python/numpy/f2py/crackfortran.py b/python/numpy/f2py/crackfortran.py new file mode 100644 index 000000000..22d804389 --- /dev/null +++ b/python/numpy/f2py/crackfortran.py @@ -0,0 +1,3725 @@ +""" +crackfortran --- read fortran (77,90) code and extract declaration information. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. + + +Usage of crackfortran: +====================== +Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h + -m ,--ignore-contains +Functions: crackfortran, crack2fortran +The following Fortran statements/constructions are supported +(or will be if needed): + block data,byte,call,character,common,complex,contains,data, + dimension,double complex,double precision,end,external,function, + implicit,integer,intent,interface,intrinsic, + logical,module,optional,parameter,private,public, + program,real,(sequence?),subroutine,type,use,virtual, + include,pythonmodule +Note: 'virtual' is mapped to 'dimension'. +Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug). +Note: code after 'contains' will be ignored until its scope ends. +Note: 'common' statement is extended: dimensions are moved to variable definitions +Note: f2py directive: f2py is read as +Note: pythonmodule is introduced to represent Python module + +Usage: + `postlist=crackfortran(files)` + `postlist` contains declaration information read from the list of files `files`. + `crack2fortran(postlist)` returns a fortran code to be saved to pyf-file + + `postlist` has the following structure: + *** it is a list of dictionaries containing `blocks': + B = {'block','body','vars','parent_block'[,'name','prefix','args','result', + 'implicit','externals','interfaced','common','sortvars', + 'commonvars','note']} + B['block'] = 'interface' | 'function' | 'subroutine' | 'module' | + 'program' | 'block data' | 'type' | 'pythonmodule' | + 'abstract interface' + B['body'] --- list containing `subblocks' with the same structure as `blocks' + B['parent_block'] --- dictionary of a parent block: + C['body'][]['parent_block'] is C + B['vars'] --- dictionary of variable definitions + B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first) + B['name'] --- name of the block (not if B['block']=='interface') + B['prefix'] --- prefix string (only if B['block']=='function') + B['args'] --- list of argument names if B['block']== 'function' | 'subroutine' + B['result'] --- name of the return value (only if B['block']=='function') + B['implicit'] --- dictionary {'a':,'b':...} | None + B['externals'] --- list of variables being external + B['interfaced'] --- list of variables being external and defined + B['common'] --- dictionary of common blocks (list of objects) + B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions) + B['from'] --- string showing the 'parents' of the current block + B['use'] --- dictionary of modules used in current block: + {:{['only':<0|1>],['map':{:,...}]}} + B['note'] --- list of LaTeX comments on the block + B['f2pyenhancements'] --- optional dictionary + {'threadsafe':'','fortranname':, + 'callstatement':|, + 'callprotoargument':, + 'usercode':|, + 'pymethoddef:' + } + B['entry'] --- dictionary {entryname:argslist,..} + B['varnames'] --- list of variable names given in the order of reading the + Fortran code, useful for derived types. + B['saved_interface'] --- a string of scanned routine signature, defines explicit interface + *** Variable definition is a dictionary + D = B['vars'][] = + {'typespec'[,'attrspec','kindselector','charselector','=','typename']} + D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' | + 'double precision' | 'integer' | 'logical' | 'real' | 'type' + D['attrspec'] --- list of attributes (e.g. 'dimension()', + 'external','intent(in|out|inout|hide|c|callback|cache|aligned4|aligned8|aligned16)', + 'optional','required', etc) + K = D['kindselector'] = {['*','kind']} (only if D['typespec'] = + 'complex' | 'integer' | 'logical' | 'real' ) + C = D['charselector'] = {['*','len','kind','f2py_len']} + (only if D['typespec']=='character') + D['='] --- initialization expression string + D['typename'] --- name of the type if D['typespec']=='type' + D['dimension'] --- list of dimension bounds + D['intent'] --- list of intent specifications + D['depend'] --- list of variable names on which current variable depends on + D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised + D['note'] --- list of LaTeX comments on the variable + *** Meaning of kind/char selectors (few examples): + D['typespec>']*K['*'] + D['typespec'](kind=K['kind']) + character*C['*'] + character(len=C['len'],kind=C['kind'], f2py_len=C['f2py_len']) + (see also fortran type declaration statement formats below) + +Fortran 90 type declaration statement format (F77 is subset of F90) +==================================================================== +(Main source: IBM XL Fortran 5.1 Language Reference Manual) +type declaration = [[]::] + = byte | + character[] | + complex[] | + double complex | + double precision | + integer[] | + logical[] | + real[] | + type() + = * | + ([len=][,[kind=]]) | + (kind=[,len=]) + = * | + ([kind=]) + = comma separated list of attributes. + Only the following attributes are used in + building up the interface: + external + (parameter --- affects '=' key) + optional + intent + Other attributes are ignored. + = in | out | inout + = comma separated list of dimension bounds. + = [[*][()] | [()]*] + [// | =] [,] + +In addition, the following attributes are used: check,depend,note + +TODO: + * Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)' + -> 'real x(2)') + The above may be solved by creating appropriate preprocessor program, for example. + +""" +import codecs +import copy +import fileinput +import os +import platform +import re +import string +import sys +from pathlib import Path + +try: + import charset_normalizer +except ImportError: + charset_normalizer = None + +from . import __version__, symbolic + +# The environment provided by auxfuncs.py is needed for some calls to eval. +# As the needed functions cannot be determined by static inspection of the +# code, it is safest to use import * pending a major refactoring of f2py. +from .auxfuncs import * + +f2py_version = __version__.version + +# Global flags: +strictf77 = 1 # Ignore `!' comments unless line[0]=='!' +sourcecodeform = 'fix' # 'fix','free' +quiet = 0 # Be verbose if 0 (Obsolete: not used any more) +verbose = 1 # Be quiet if 0, extra verbose if > 1. +tabchar = 4 * ' ' +pyffilename = '' +f77modulename = '' +skipemptyends = 0 # for old F77 programs without 'program' statement +ignorecontains = 1 +dolowercase = 1 +debug = [] + +# Global variables +beginpattern = '' +currentfilename = '' +expectbegin = 1 +f90modulevars = {} +filepositiontext = '' +gotnextfile = 1 +groupcache = None +groupcounter = 0 +grouplist = {groupcounter: []} +groupname = '' +include_paths = [] +neededmodule = -1 +onlyfuncs = [] +previous_context = None +skipblocksuntil = -1 +skipfuncs = [] +skipfunctions = [] +usermodules = [] + + +def reset_global_f2py_vars(): + global groupcounter, grouplist, neededmodule, expectbegin + global skipblocksuntil, usermodules, f90modulevars, gotnextfile + global filepositiontext, currentfilename, skipfunctions, skipfuncs + global onlyfuncs, include_paths, previous_context + global strictf77, sourcecodeform, quiet, verbose, tabchar, pyffilename + global f77modulename, skipemptyends, ignorecontains, dolowercase, debug + + # flags + strictf77 = 1 + sourcecodeform = 'fix' + quiet = 0 + verbose = 1 + tabchar = 4 * ' ' + pyffilename = '' + f77modulename = '' + skipemptyends = 0 + ignorecontains = 1 + dolowercase = 1 + debug = [] + # variables + groupcounter = 0 + grouplist = {groupcounter: []} + neededmodule = -1 + expectbegin = 1 + skipblocksuntil = -1 + usermodules = [] + f90modulevars = {} + gotnextfile = 1 + filepositiontext = '' + currentfilename = '' + skipfunctions = [] + skipfuncs = [] + onlyfuncs = [] + include_paths = [] + previous_context = None + + +def outmess(line, flag=1): + global filepositiontext + + if not verbose: + return + if not quiet: + if flag: + sys.stdout.write(filepositiontext) + sys.stdout.write(line) + + +re._MAXCACHE = 50 +defaultimplicitrules = {} +for c in "abcdefghopqrstuvwxyz$_": + defaultimplicitrules[c] = {'typespec': 'real'} +for c in "ijklmn": + defaultimplicitrules[c] = {'typespec': 'integer'} +badnames = {} +invbadnames = {} +for n in ['int', 'double', 'float', 'char', 'short', 'long', 'void', 'case', 'while', + 'return', 'signed', 'unsigned', 'if', 'for', 'typedef', 'sizeof', 'union', + 'struct', 'static', 'register', 'new', 'break', 'do', 'goto', 'switch', + 'continue', 'else', 'inline', 'extern', 'delete', 'const', 'auto', + 'len', 'rank', 'shape', 'index', 'slen', 'size', '_i', + 'max', 'min', + 'flen', 'fshape', + 'string', 'complex_double', 'float_double', 'stdin', 'stderr', 'stdout', + 'type', 'default']: + badnames[n] = n + '_bn' + invbadnames[n + '_bn'] = n + + +def rmbadname1(name): + if name in badnames: + errmess(f'rmbadname1: Replacing "{name}" with "{badnames[name]}".\n') + return badnames[name] + return name + + +def rmbadname(names): + return [rmbadname1(_m) for _m in names] + + +def undo_rmbadname1(name): + if name in invbadnames: + errmess(f'undo_rmbadname1: Replacing "{name}" with "{invbadnames[name]}".\n') + return invbadnames[name] + return name + + +def undo_rmbadname(names): + return [undo_rmbadname1(_m) for _m in names] + + +_has_f_header = re.compile(r'-\*-\s*fortran\s*-\*-', re.I).search +_has_f90_header = re.compile(r'-\*-\s*f90\s*-\*-', re.I).search +_has_fix_header = re.compile(r'-\*-\s*fix\s*-\*-', re.I).search +_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]', re.I).match + +# Extensions +COMMON_FREE_EXTENSIONS = ['.f90', '.f95', '.f03', '.f08'] +COMMON_FIXED_EXTENSIONS = ['.for', '.ftn', '.f77', '.f'] + + +def openhook(filename, mode): + """Ensures that filename is opened with correct encoding parameter. + + This function uses charset_normalizer package, when available, for + determining the encoding of the file to be opened. When charset_normalizer + is not available, the function detects only UTF encodings, otherwise, ASCII + encoding is used as fallback. + """ + # Reads in the entire file. Robust detection of encoding. + # Correctly handles comments or late stage unicode characters + # gh-22871 + if charset_normalizer is not None: + encoding = charset_normalizer.from_path(filename).best().encoding + else: + # hint: install charset_normalizer for correct encoding handling + # No need to read the whole file for trying with startswith + nbytes = min(32, os.path.getsize(filename)) + with open(filename, 'rb') as fhandle: + raw = fhandle.read(nbytes) + if raw.startswith(codecs.BOM_UTF8): + encoding = 'UTF-8-SIG' + elif raw.startswith((codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)): + encoding = 'UTF-32' + elif raw.startswith((codecs.BOM_LE, codecs.BOM_BE)): + encoding = 'UTF-16' + else: + # Fallback, without charset_normalizer + encoding = 'ascii' + return open(filename, mode, encoding=encoding) + + +def is_free_format(fname): + """Check if file is in free format Fortran.""" + # f90 allows both fixed and free format, assuming fixed unless + # signs of free format are detected. + result = False + if Path(fname).suffix.lower() in COMMON_FREE_EXTENSIONS: + result = True + with openhook(fname, 'r') as fhandle: + line = fhandle.readline() + n = 15 # the number of non-comment lines to scan for hints + if _has_f_header(line): + n = 0 + elif _has_f90_header(line): + n = 0 + result = True + while n > 0 and line: + if line[0] != '!' and line.strip(): + n -= 1 + if (line[0] != '\t' and _free_f90_start(line[:5])) or line[-2:-1] == '&': + result = True + break + line = fhandle.readline() + return result + + +# Read fortran (77,90) code +def readfortrancode(ffile, dowithline=show, istop=1): + """ + Read fortran codes from files and + 1) Get rid of comments, line continuations, and empty lines; lower cases. + 2) Call dowithline(line) on every line. + 3) Recursively call itself when statement \"include ''\" is met. + """ + global gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77 + global beginpattern, quiet, verbose, dolowercase, include_paths + + if not istop: + saveglobals = gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ + beginpattern, quiet, verbose, dolowercase + if ffile == []: + return + localdolowercase = dolowercase + # cont: set to True when the content of the last line read + # indicates statement continuation + cont = False + finalline = '' + ll = '' + includeline = re.compile( + r'\s*include\s*(\'|")(?P[^\'"]*)(\'|")', re.I) + cont1 = re.compile(r'(?P.*)&\s*\Z') + cont2 = re.compile(r'(\s*&|)(?P.*)') + mline_mark = re.compile(r".*?'''") + if istop: + dowithline('', -1) + ll, l1 = '', '' + spacedigits = [' '] + [str(_m) for _m in range(10)] + filepositiontext = '' + fin = fileinput.FileInput(ffile, openhook=openhook) + while True: + try: + l = fin.readline() + except UnicodeDecodeError as msg: + raise Exception( + f'readfortrancode: reading {fin.filename()}#{fin.lineno()}' + f' failed with\n{msg}.\nIt is likely that installing charset_normalizer' + ' package will help f2py determine the input file encoding' + ' correctly.') + if not l: + break + if fin.isfirstline(): + filepositiontext = '' + currentfilename = fin.filename() + gotnextfile = 1 + l1 = l + strictf77 = 0 + sourcecodeform = 'fix' + ext = os.path.splitext(currentfilename)[1] + if Path(currentfilename).suffix.lower() in COMMON_FIXED_EXTENSIONS and \ + not (_has_f90_header(l) or _has_fix_header(l)): + strictf77 = 1 + elif is_free_format(currentfilename) and not _has_fix_header(l): + sourcecodeform = 'free' + if strictf77: + beginpattern = beginpattern77 + else: + beginpattern = beginpattern90 + outmess('\tReading file %s (format:%s%s)\n' + % (repr(currentfilename), sourcecodeform, + (strictf77 and ',strict') or '')) + + l = l.expandtabs().replace('\xa0', ' ') + # Get rid of newline characters + while not l == '': + if l[-1] not in "\n\r\f": + break + l = l[:-1] + # Do not lower for directives, gh-2547, gh-27697, gh-26681 + is_f2py_directive = False + # Unconditionally remove comments + (l, rl) = split_by_unquoted(l, '!') + l += ' ' + if rl[:5].lower() == '!f2py': # f2py directive + l, _ = split_by_unquoted(l + 4 * ' ' + rl[5:], '!') + is_f2py_directive = True + if l.strip() == '': # Skip empty line + if sourcecodeform == 'free': + # In free form, a statement continues in the next line + # that is not a comment line [3.3.2.4^1], lines with + # blanks are comment lines [3.3.2.3^1]. Hence, the + # line continuation flag must retain its state. + pass + else: + # In fixed form, statement continuation is determined + # by a non-blank character at the 6-th position. Empty + # line indicates a start of a new statement + # [3.3.3.3^1]. Hence, the line continuation flag must + # be reset. + cont = False + continue + if sourcecodeform == 'fix': + if l[0] in ['*', 'c', '!', 'C', '#']: + if l[1:5].lower() == 'f2py': # f2py directive + l = ' ' + l[5:] + is_f2py_directive = True + else: # Skip comment line + cont = False + is_f2py_directive = False + continue + elif strictf77: + if len(l) > 72: + l = l[:72] + if l[0] not in spacedigits: + raise Exception('readfortrancode: Found non-(space,digit) char ' + 'in the first column.\n\tAre you sure that ' + 'this code is in fix form?\n\tline=%s' % repr(l)) + + if (not cont or strictf77) and (len(l) > 5 and not l[5] == ' '): + # Continuation of a previous line + ll = ll + l[6:] + finalline = '' + origfinalline = '' + else: + r = cont1.match(l) + if r: + l = r.group('line') # Continuation follows .. + if cont: + ll = ll + cont2.match(l).group('line') + finalline = '' + origfinalline = '' + else: + # clean up line beginning from possible digits. + l = ' ' + l[5:] + # f2py directives are already stripped by this point + if localdolowercase: + finalline = ll.lower() + else: + finalline = ll + origfinalline = ll + ll = l + + elif sourcecodeform == 'free': + if not cont and ext == '.pyf' and mline_mark.match(l): + l = l + '\n' + while True: + lc = fin.readline() + if not lc: + errmess( + 'Unexpected end of file when reading multiline\n') + break + l = l + lc + if mline_mark.match(lc): + break + l = l.rstrip() + r = cont1.match(l) + if r: + l = r.group('line') # Continuation follows .. + if cont: + ll = ll + cont2.match(l).group('line') + finalline = '' + origfinalline = '' + else: + if localdolowercase: + # only skip lowering for C style constructs + # gh-2547, gh-27697, gh-26681, gh-28014 + finalline = ll.lower() if not (is_f2py_directive and iscstyledirective(ll)) else ll + else: + finalline = ll + origfinalline = ll + ll = l + cont = (r is not None) + else: + raise ValueError( + f"Flag sourcecodeform must be either 'fix' or 'free': {repr(sourcecodeform)}") + filepositiontext = 'Line #%d in %s:"%s"\n\t' % ( + fin.filelineno() - 1, currentfilename, l1) + m = includeline.match(origfinalline) + if m: + fn = m.group('name') + if os.path.isfile(fn): + readfortrancode(fn, dowithline=dowithline, istop=0) + else: + include_dirs = [ + os.path.dirname(currentfilename)] + include_paths + foundfile = 0 + for inc_dir in include_dirs: + fn1 = os.path.join(inc_dir, fn) + if os.path.isfile(fn1): + foundfile = 1 + readfortrancode(fn1, dowithline=dowithline, istop=0) + break + if not foundfile: + outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % ( + repr(fn), os.pathsep.join(include_dirs))) + else: + dowithline(finalline) + l1 = ll + # Last line should never have an f2py directive anyway + if localdolowercase: + finalline = ll.lower() + else: + finalline = ll + origfinalline = ll + filepositiontext = 'Line #%d in %s:"%s"\n\t' % ( + fin.filelineno() - 1, currentfilename, l1) + m = includeline.match(origfinalline) + if m: + fn = m.group('name') + if os.path.isfile(fn): + readfortrancode(fn, dowithline=dowithline, istop=0) + else: + include_dirs = [os.path.dirname(currentfilename)] + include_paths + foundfile = 0 + for inc_dir in include_dirs: + fn1 = os.path.join(inc_dir, fn) + if os.path.isfile(fn1): + foundfile = 1 + readfortrancode(fn1, dowithline=dowithline, istop=0) + break + if not foundfile: + outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n' % ( + repr(fn), os.pathsep.join(include_dirs))) + else: + dowithline(finalline) + filepositiontext = '' + fin.close() + if istop: + dowithline('', 1) + else: + gotnextfile, filepositiontext, currentfilename, sourcecodeform, strictf77,\ + beginpattern, quiet, verbose, dolowercase = saveglobals + + +# Crack line +beforethisafter = r'\s*(?P%s(?=\s*(\b(%s)\b)))'\ + r'\s*(?P(\b(%s)\b))'\ + r'\s*(?P%s)\s*\Z' +## +fortrantypes = r'character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte' +typespattern = re.compile( + beforethisafter % ('', fortrantypes, fortrantypes, '.*'), re.I), 'type' +typespattern4implicit = re.compile(beforethisafter % ( + '', fortrantypes + '|static|automatic|undefined', fortrantypes + '|static|automatic|undefined', '.*'), re.I) +# +functionpattern = re.compile(beforethisafter % ( + r'([a-z]+[\w\s(=*+-/)]*?|)', 'function', 'function', '.*'), re.I), 'begin' +subroutinepattern = re.compile(beforethisafter % ( + r'[a-z\s]*?', 'subroutine', 'subroutine', '.*'), re.I), 'begin' +# modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin' +# +groupbegins77 = r'program|block\s*data' +beginpattern77 = re.compile( + beforethisafter % ('', groupbegins77, groupbegins77, '.*'), re.I), 'begin' +groupbegins90 = groupbegins77 + \ + r'|module(?!\s*procedure)|python\s*module|(abstract|)\s*interface|'\ + r'type(?!\s*\()' +beginpattern90 = re.compile( + beforethisafter % ('', groupbegins90, groupbegins90, '.*'), re.I), 'begin' +groupends = (r'end|endprogram|endblockdata|endmodule|endpythonmodule|' + r'endinterface|endsubroutine|endfunction') +endpattern = re.compile( + beforethisafter % ('', groupends, groupends, '.*'), re.I), 'end' +# block, the Fortran 2008 construct needs special handling in the rest of the file +endifs = r'end\s*(if|do|where|select|while|forall|associate|'\ + r'critical|enum|team)' +endifpattern = re.compile( + beforethisafter % (r'[\w]*?', endifs, endifs, '.*'), re.I), 'endif' +# +moduleprocedures = r'module\s*procedure' +moduleprocedurepattern = re.compile( + beforethisafter % ('', moduleprocedures, moduleprocedures, '.*'), re.I), \ + 'moduleprocedure' +implicitpattern = re.compile( + beforethisafter % ('', 'implicit', 'implicit', '.*'), re.I), 'implicit' +dimensionpattern = re.compile(beforethisafter % ( + '', 'dimension|virtual', 'dimension|virtual', '.*'), re.I), 'dimension' +externalpattern = re.compile( + beforethisafter % ('', 'external', 'external', '.*'), re.I), 'external' +optionalpattern = re.compile( + beforethisafter % ('', 'optional', 'optional', '.*'), re.I), 'optional' +requiredpattern = re.compile( + beforethisafter % ('', 'required', 'required', '.*'), re.I), 'required' +publicpattern = re.compile( + beforethisafter % ('', 'public', 'public', '.*'), re.I), 'public' +privatepattern = re.compile( + beforethisafter % ('', 'private', 'private', '.*'), re.I), 'private' +intrinsicpattern = re.compile( + beforethisafter % ('', 'intrinsic', 'intrinsic', '.*'), re.I), 'intrinsic' +intentpattern = re.compile(beforethisafter % ( + '', 'intent|depend|note|check', 'intent|depend|note|check', r'\s*\(.*?\).*'), re.I), 'intent' +parameterpattern = re.compile( + beforethisafter % ('', 'parameter', 'parameter', r'\s*\(.*'), re.I), 'parameter' +datapattern = re.compile( + beforethisafter % ('', 'data', 'data', '.*'), re.I), 'data' +callpattern = re.compile( + beforethisafter % ('', 'call', 'call', '.*'), re.I), 'call' +entrypattern = re.compile( + beforethisafter % ('', 'entry', 'entry', '.*'), re.I), 'entry' +callfunpattern = re.compile( + beforethisafter % ('', 'callfun', 'callfun', '.*'), re.I), 'callfun' +commonpattern = re.compile( + beforethisafter % ('', 'common', 'common', '.*'), re.I), 'common' +usepattern = re.compile( + beforethisafter % ('', 'use', 'use', '.*'), re.I), 'use' +containspattern = re.compile( + beforethisafter % ('', 'contains', 'contains', ''), re.I), 'contains' +formatpattern = re.compile( + beforethisafter % ('', 'format', 'format', '.*'), re.I), 'format' +# Non-fortran and f2py-specific statements +f2pyenhancementspattern = re.compile(beforethisafter % ('', 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', + 'threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef', '.*'), re.I | re.S), 'f2pyenhancements' +multilinepattern = re.compile( + r"\s*(?P''')(?P.*?)(?P''')\s*\Z", re.S), 'multiline' +## + +def split_by_unquoted(line, characters): + """ + Splits the line into (line[:i], line[i:]), + where i is the index of first occurrence of one of the characters + not within quotes, or len(line) if no such index exists + """ + assert not (set('"\'') & set(characters)), "cannot split by unquoted quotes" + r = re.compile( + r"\A(?P({single_quoted}|{double_quoted}|{not_quoted})*)" + r"(?P{char}.*)\Z".format( + not_quoted=f"[^\"'{re.escape(characters)}]", + char=f"[{re.escape(characters)}]", + single_quoted=r"('([^'\\]|(\\.))*')", + double_quoted=r'("([^"\\]|(\\.))*")')) + m = r.match(line) + if m: + d = m.groupdict() + return (d["before"], d["after"]) + return (line, "") + +def _simplifyargs(argsline): + a = [] + for n in markoutercomma(argsline).split('@,@'): + for r in '(),': + n = n.replace(r, '_') + a.append(n) + return ','.join(a) + + +crackline_re_1 = re.compile(r'\s*(?P\b[a-z]+\w*\b)\s*=.*', re.I) +crackline_bind_1 = re.compile(r'\s*(?P\b[a-z]+\w*\b)\s*=.*', re.I) +crackline_bindlang = re.compile(r'\s*bind\(\s*(?P[^,]+)\s*,\s*name\s*=\s*"(?P[^"]+)"\s*\)', re.I) + +def crackline(line, reset=0): + """ + reset=-1 --- initialize + reset=0 --- crack the line + reset=1 --- final check if mismatch of blocks occurred + + Cracked data is saved in grouplist[0]. + """ + global beginpattern, groupcounter, groupname, groupcache, grouplist + global filepositiontext, currentfilename, neededmodule, expectbegin + global skipblocksuntil, skipemptyends, previous_context, gotnextfile + + _, has_semicolon = split_by_unquoted(line, ";") + if has_semicolon and not (f2pyenhancementspattern[0].match(line) or + multilinepattern[0].match(line)): + # XXX: non-zero reset values need testing + assert reset == 0, repr(reset) + # split line on unquoted semicolons + line, semicolon_line = split_by_unquoted(line, ";") + while semicolon_line: + crackline(line, reset) + line, semicolon_line = split_by_unquoted(semicolon_line[1:], ";") + crackline(line, reset) + return + if reset < 0: + groupcounter = 0 + groupname = {groupcounter: ''} + groupcache = {groupcounter: {}} + grouplist = {groupcounter: []} + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['vars'] = {} + groupcache[groupcounter]['block'] = '' + groupcache[groupcounter]['name'] = '' + neededmodule = -1 + skipblocksuntil = -1 + return + if reset > 0: + fl = 0 + if f77modulename and neededmodule == groupcounter: + fl = 2 + while groupcounter > fl: + outmess('crackline: groupcounter=%s groupname=%s\n' % + (repr(groupcounter), repr(groupname))) + outmess( + 'crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n') + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 + if f77modulename and neededmodule == groupcounter: + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 # end interface + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 # end module + neededmodule = -1 + return + if line == '': + return + flag = 0 + for pat in [dimensionpattern, externalpattern, intentpattern, optionalpattern, + requiredpattern, + parameterpattern, datapattern, publicpattern, privatepattern, + intrinsicpattern, + endifpattern, endpattern, + formatpattern, + beginpattern, functionpattern, subroutinepattern, + implicitpattern, typespattern, commonpattern, + callpattern, usepattern, containspattern, + entrypattern, + f2pyenhancementspattern, + multilinepattern, + moduleprocedurepattern + ]: + m = pat[0].match(line) + if m: + break + flag = flag + 1 + if not m: + re_1 = crackline_re_1 + if 0 <= skipblocksuntil <= groupcounter: + return + if 'externals' in groupcache[groupcounter]: + for name in groupcache[groupcounter]['externals']: + if name in invbadnames: + name = invbadnames[name] + if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']: + continue + m1 = re.match( + r'(?P[^"]*)\b%s\b\s*@\(@(?P[^@]*)@\)@.*\Z' % name, markouterparen(line), re.I) + if m1: + m2 = re_1.match(m1.group('before')) + a = _simplifyargs(m1.group('args')) + if m2: + line = f"callfun {name}({a}) result ({m2.group('result')})" + else: + line = f'callfun {name}({a})' + m = callfunpattern[0].match(line) + if not m: + outmess( + f'crackline: could not resolve function call for line={repr(line)}.\n') + return + analyzeline(m, 'callfun', line) + return + if verbose > 1 or (verbose == 1 and currentfilename.lower().endswith('.pyf')): + previous_context = None + outmess('crackline:%d: No pattern for line\n' % (groupcounter)) + return + elif pat[1] == 'end': + if 0 <= skipblocksuntil < groupcounter: + groupcounter = groupcounter - 1 + if skipblocksuntil <= groupcounter: + return + if groupcounter <= 0: + raise Exception('crackline: groupcounter(=%s) is nonpositive. ' + 'Check the blocks.' + % (groupcounter)) + m1 = beginpattern[0].match(line) + if (m1) and (not m1.group('this') == groupname[groupcounter]): + raise Exception('crackline: End group %s does not match with ' + 'previous Begin group %s\n\t%s' % + (repr(m1.group('this')), repr(groupname[groupcounter]), + filepositiontext) + ) + if skipblocksuntil == groupcounter: + skipblocksuntil = -1 + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 + if not skipemptyends: + expectbegin = 1 + elif pat[1] == 'begin': + if 0 <= skipblocksuntil <= groupcounter: + groupcounter = groupcounter + 1 + return + gotnextfile = 0 + analyzeline(m, pat[1], line) + expectbegin = 0 + elif pat[1] == 'endif': + pass + elif pat[1] == 'moduleprocedure': + analyzeline(m, pat[1], line) + elif pat[1] == 'contains': + if ignorecontains: + return + if 0 <= skipblocksuntil <= groupcounter: + return + skipblocksuntil = groupcounter + else: + if 0 <= skipblocksuntil <= groupcounter: + return + analyzeline(m, pat[1], line) + + +def markouterparen(line): + l = '' + f = 0 + for c in line: + if c == '(': + f = f + 1 + if f == 1: + l = l + '@(@' + continue + elif c == ')': + f = f - 1 + if f == 0: + l = l + '@)@' + continue + l = l + c + return l + + +def markoutercomma(line, comma=','): + l = '' + f = 0 + before, after = split_by_unquoted(line, comma + '()') + l += before + while after: + if (after[0] == comma) and (f == 0): + l += '@' + comma + '@' + else: + l += after[0] + if after[0] == '(': + f += 1 + elif after[0] == ')': + f -= 1 + before, after = split_by_unquoted(after[1:], comma + '()') + l += before + assert not f, repr((f, line, l)) + return l + +def unmarkouterparen(line): + r = line.replace('@(@', '(').replace('@)@', ')') + return r + + +def appenddecl(decl, decl2, force=1): + if not decl: + decl = {} + if not decl2: + return decl + if decl is decl2: + return decl + for k in list(decl2.keys()): + if k == 'typespec': + if force or k not in decl: + decl[k] = decl2[k] + elif k == 'attrspec': + for l in decl2[k]: + decl = setattrspec(decl, l, force) + elif k == 'kindselector': + decl = setkindselector(decl, decl2[k], force) + elif k == 'charselector': + decl = setcharselector(decl, decl2[k], force) + elif k in ['=', 'typename']: + if force or k not in decl: + decl[k] = decl2[k] + elif k == 'note': + pass + elif k in ['intent', 'check', 'dimension', 'optional', + 'required', 'depend']: + errmess(f'appenddecl: "{k}" not implemented.\n') + else: + raise Exception('appenddecl: Unknown variable definition key: ' + + str(k)) + return decl + + +selectpattern = re.compile( + r'\s*(?P(@\(@.*?@\)@|\*[\d*]+|\*\s*@\(@.*?@\)@|))(?P.*)\Z', re.I) +typedefpattern = re.compile( + r'(?:,(?P[\w(),]+))?(::)?(?P\b[a-z$_][\w$]*\b)' + r'(?:\((?P[\w,]*)\))?\Z', re.I) +nameargspattern = re.compile( + r'\s*(?P\b[\w$]+\b)\s*(@\(@\s*(?P[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P(?:(?!@\)@).)*)\s*@\)@))*\s*\Z', re.I) +operatorpattern = re.compile( + r'\s*(?P(operator|assignment))' + r'@\(@\s*(?P[^)]+)\s*@\)@\s*\Z', re.I) +callnameargspattern = re.compile( + r'\s*(?P\b[\w$]+\b)\s*@\(@\s*(?P.*)\s*@\)@\s*\Z', re.I) +real16pattern = re.compile( + r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)') +real8pattern = re.compile( + r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))') + +_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b', re.I) + + +def _is_intent_callback(vdecl): + for a in vdecl.get('attrspec', []): + if _intentcallbackpattern.match(a): + return 1 + return 0 + + +def _resolvetypedefpattern(line): + line = ''.join(line.split()) # removes whitespace + m1 = typedefpattern.match(line) + print(line, m1) + if m1: + attrs = m1.group('attributes') + attrs = [a.lower() for a in attrs.split(',')] if attrs else [] + return m1.group('name'), attrs, m1.group('params') + return None, [], None + +def parse_name_for_bind(line): + pattern = re.compile(r'bind\(\s*(?P[^,]+)(?:\s*,\s*name\s*=\s*["\'](?P[^"\']+)["\']\s*)?\)', re.I) + match = pattern.search(line) + bind_statement = None + if match: + bind_statement = match.group(0) + # Remove the 'bind' construct from the line. + line = line[:match.start()] + line[match.end():] + return line, bind_statement + +def _resolvenameargspattern(line): + line, bind_cname = parse_name_for_bind(line) + line = markouterparen(line) + m1 = nameargspattern.match(line) + if m1: + return m1.group('name'), m1.group('args'), m1.group('result'), bind_cname + m1 = operatorpattern.match(line) + if m1: + name = m1.group('scheme') + '(' + m1.group('name') + ')' + return name, [], None, None + m1 = callnameargspattern.match(line) + if m1: + return m1.group('name'), m1.group('args'), None, None + return None, [], None, None + + +def analyzeline(m, case, line): + """ + Reads each line in the input file in sequence and updates global vars. + + Effectively reads and collects information from the input file to the + global variable groupcache, a dictionary containing info about each part + of the fortran module. + + At the end of analyzeline, information is filtered into the correct dict + keys, but parameter values and dimensions are not yet interpreted. + """ + global groupcounter, groupname, groupcache, grouplist, filepositiontext + global currentfilename, f77modulename, neededinterface, neededmodule + global expectbegin, gotnextfile, previous_context + + block = m.group('this') + if case != 'multiline': + previous_context = None + if expectbegin and case not in ['begin', 'call', 'callfun', 'type'] \ + and not skipemptyends and groupcounter < 1: + newname = os.path.basename(currentfilename).split('.')[0] + outmess( + f'analyzeline: no group yet. Creating program group with name "{newname}".\n') + gotnextfile = 0 + groupcounter = groupcounter + 1 + groupname[groupcounter] = 'program' + groupcache[groupcounter] = {} + grouplist[groupcounter] = [] + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['vars'] = {} + groupcache[groupcounter]['block'] = 'program' + groupcache[groupcounter]['name'] = newname + groupcache[groupcounter]['from'] = 'fromsky' + expectbegin = 0 + if case in ['begin', 'call', 'callfun']: + # Crack line => block,name,args,result + block = block.lower() + if re.match(r'block\s*data', block, re.I): + block = 'block data' + elif re.match(r'python\s*module', block, re.I): + block = 'python module' + elif re.match(r'abstract\s*interface', block, re.I): + block = 'abstract interface' + if block == 'type': + name, attrs, _ = _resolvetypedefpattern(m.group('after')) + groupcache[groupcounter]['vars'][name] = {'attrspec': attrs} + args = [] + result = None + else: + name, args, result, bindcline = _resolvenameargspattern(m.group('after')) + if name is None: + if block == 'block data': + name = '_BLOCK_DATA_' + else: + name = '' + if block not in ['interface', 'block data', 'abstract interface']: + outmess('analyzeline: No name/args pattern found for line.\n') + + previous_context = (block, name, groupcounter) + if args: + args = rmbadname([x.strip() + for x in markoutercomma(args).split('@,@')]) + else: + args = [] + if '' in args: + while '' in args: + args.remove('') + outmess( + 'analyzeline: argument list is malformed (missing argument).\n') + + # end of crack line => block,name,args,result + needmodule = 0 + needinterface = 0 + + if case in ['call', 'callfun']: + needinterface = 1 + if 'args' not in groupcache[groupcounter]: + return + if name not in groupcache[groupcounter]['args']: + return + for it in grouplist[groupcounter]: + if it['name'] == name: + return + if name in groupcache[groupcounter]['interfaced']: + return + block = {'call': 'subroutine', 'callfun': 'function'}[case] + if f77modulename and neededmodule == -1 and groupcounter <= 1: + neededmodule = groupcounter + 2 + needmodule = 1 + if block not in ['interface', 'abstract interface']: + needinterface = 1 + # Create new block(s) + groupcounter = groupcounter + 1 + groupcache[groupcounter] = {} + grouplist[groupcounter] = [] + if needmodule: + if verbose > 1: + outmess('analyzeline: Creating module block %s\n' % + repr(f77modulename), 0) + groupname[groupcounter] = 'module' + groupcache[groupcounter]['block'] = 'python module' + groupcache[groupcounter]['name'] = f77modulename + groupcache[groupcounter]['from'] = '' + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['interfaced'] = [] + groupcache[groupcounter]['vars'] = {} + groupcounter = groupcounter + 1 + groupcache[groupcounter] = {} + grouplist[groupcounter] = [] + if needinterface: + if verbose > 1: + outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % ( + groupcounter), 0) + groupname[groupcounter] = 'interface' + groupcache[groupcounter]['block'] = 'interface' + groupcache[groupcounter]['name'] = 'unknown_interface' + groupcache[groupcounter]['from'] = '%s:%s' % ( + groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['interfaced'] = [] + groupcache[groupcounter]['vars'] = {} + groupcounter = groupcounter + 1 + groupcache[groupcounter] = {} + grouplist[groupcounter] = [] + groupname[groupcounter] = block + groupcache[groupcounter]['block'] = block + if not name: + name = 'unknown_' + block.replace(' ', '_') + groupcache[groupcounter]['prefix'] = m.group('before') + groupcache[groupcounter]['name'] = rmbadname1(name) + groupcache[groupcounter]['result'] = result + if groupcounter == 1: + groupcache[groupcounter]['from'] = currentfilename + elif f77modulename and groupcounter == 3: + groupcache[groupcounter]['from'] = '%s:%s' % ( + groupcache[groupcounter - 1]['from'], currentfilename) + else: + groupcache[groupcounter]['from'] = '%s:%s' % ( + groupcache[groupcounter - 1]['from'], groupcache[groupcounter - 1]['name']) + for k in list(groupcache[groupcounter].keys()): + if not groupcache[groupcounter][k]: + del groupcache[groupcounter][k] + + groupcache[groupcounter]['args'] = args + groupcache[groupcounter]['body'] = [] + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['interfaced'] = [] + groupcache[groupcounter]['vars'] = {} + groupcache[groupcounter]['entry'] = {} + # end of creation + if block == 'type': + groupcache[groupcounter]['varnames'] = [] + + if case in ['call', 'callfun']: # set parents variables + if name not in groupcache[groupcounter - 2]['externals']: + groupcache[groupcounter - 2]['externals'].append(name) + groupcache[groupcounter]['vars'] = copy.deepcopy( + groupcache[groupcounter - 2]['vars']) + try: + del groupcache[groupcounter]['vars'][name][ + groupcache[groupcounter]['vars'][name]['attrspec'].index('external')] + except Exception: + pass + if block in ['function', 'subroutine']: # set global attributes + # name is fortran name + if bindcline: + bindcdat = re.search(crackline_bindlang, bindcline) + if bindcdat: + groupcache[groupcounter]['bindlang'] = {name: {}} + groupcache[groupcounter]['bindlang'][name]["lang"] = bindcdat.group('lang') + if bindcdat.group('lang_name'): + groupcache[groupcounter]['bindlang'][name]["name"] = bindcdat.group('lang_name') + try: + groupcache[groupcounter]['vars'][name] = appenddecl( + groupcache[groupcounter]['vars'][name], groupcache[groupcounter - 2]['vars']['']) + except Exception: + pass + if case == 'callfun': # return type + if result and result in groupcache[groupcounter]['vars']: + if not name == result: + groupcache[groupcounter]['vars'][name] = appenddecl( + groupcache[groupcounter]['vars'][name], groupcache[groupcounter]['vars'][result]) + # if groupcounter>1: # name is interfaced + try: + groupcache[groupcounter - 2]['interfaced'].append(name) + except Exception: + pass + if block == 'function': + t = typespattern[0].match(m.group('before') + ' ' + name) + if t: + typespec, selector, attr, edecl = cracktypespec0( + t.group('this'), t.group('after')) + updatevars(typespec, selector, attr, edecl) + + if case in ['call', 'callfun']: + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 # end routine + grouplist[groupcounter - 1].append(groupcache[groupcounter]) + grouplist[groupcounter - 1][-1]['body'] = grouplist[groupcounter] + del grouplist[groupcounter] + groupcounter = groupcounter - 1 # end interface + + elif case == 'entry': + name, args, result, _ = _resolvenameargspattern(m.group('after')) + if name is not None: + if args: + args = rmbadname([x.strip() + for x in markoutercomma(args).split('@,@')]) + else: + args = [] + assert result is None, repr(result) + groupcache[groupcounter]['entry'][name] = args + previous_context = ('entry', name, groupcounter) + elif case == 'type': + typespec, selector, attr, edecl = cracktypespec0( + block, m.group('after')) + last_name = updatevars(typespec, selector, attr, edecl) + if last_name is not None: + previous_context = ('variable', last_name, groupcounter) + elif case in ['dimension', 'intent', 'optional', 'required', 'external', 'public', 'private', 'intrinsic']: + edecl = groupcache[groupcounter]['vars'] + ll = m.group('after').strip() + i = ll.find('::') + if i < 0 and case == 'intent': + i = markouterparen(ll).find('@)@') - 2 + ll = ll[:i + 1] + '::' + ll[i + 1:] + i = ll.find('::') + if ll[i:] == '::' and 'args' in groupcache[groupcounter]: + outmess('All arguments will have attribute %s%s\n' % + (m.group('this'), ll[:i])) + ll = ll + ','.join(groupcache[groupcounter]['args']) + if i < 0: + i = 0 + pl = '' + else: + pl = ll[:i].strip() + ll = ll[i + 2:] + ch = markoutercomma(pl).split('@,@') + if len(ch) > 1: + pl = ch[0] + outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % ( + ','.join(ch[1:]))) + last_name = None + + for e in [x.strip() for x in markoutercomma(ll).split('@,@')]: + m1 = namepattern.match(e) + if not m1: + if case in ['public', 'private']: + k = '' + else: + print(m.groupdict()) + outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n' % ( + case, repr(e))) + continue + else: + k = rmbadname1(m1.group('name')) + if case in ['public', 'private'] and k in {'operator', 'assignment'}: + k += m1.group('after') + if k not in edecl: + edecl[k] = {} + if case == 'dimension': + ap = case + m1.group('after') + if case == 'intent': + ap = m.group('this') + pl + if _intentcallbackpattern.match(ap): + if k not in groupcache[groupcounter]['args']: + if groupcounter > 1: + if '__user__' not in groupcache[groupcounter - 2]['name']: + outmess( + 'analyzeline: missing __user__ module (could be nothing)\n') + # fixes ticket 1693 + if k != groupcache[groupcounter]['name']: + outmess('analyzeline: appending intent(callback) %s' + ' to %s arguments\n' % (k, groupcache[groupcounter]['name'])) + groupcache[groupcounter]['args'].append(k) + else: + errmess( + f'analyzeline: intent(callback) {k} is ignored\n') + else: + errmess('analyzeline: intent(callback) %s is already' + ' in argument list\n' % (k)) + if case in ['optional', 'required', 'public', 'external', 'private', 'intrinsic']: + ap = case + if 'attrspec' in edecl[k]: + edecl[k]['attrspec'].append(ap) + else: + edecl[k]['attrspec'] = [ap] + if case == 'external': + if groupcache[groupcounter]['block'] == 'program': + outmess('analyzeline: ignoring program arguments\n') + continue + if k not in groupcache[groupcounter]['args']: + continue + if 'externals' not in groupcache[groupcounter]: + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['externals'].append(k) + last_name = k + groupcache[groupcounter]['vars'] = edecl + if last_name is not None: + previous_context = ('variable', last_name, groupcounter) + elif case == 'moduleprocedure': + groupcache[groupcounter]['implementedby'] = \ + [x.strip() for x in m.group('after').split(',')] + elif case == 'parameter': + edecl = groupcache[groupcounter]['vars'] + ll = m.group('after').strip()[1:-1] + last_name = None + for e in markoutercomma(ll).split('@,@'): + try: + k, initexpr = [x.strip() for x in e.split('=')] + except Exception: + outmess( + f'analyzeline: could not extract name,expr in parameter statement "{e}" of "{ll}\"\n') + continue + params = get_parameters(edecl) + k = rmbadname1(k) + if k not in edecl: + edecl[k] = {} + if '=' in edecl[k] and (not edecl[k]['='] == initexpr): + outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n' % ( + k, edecl[k]['='], initexpr)) + t = determineexprtype(initexpr, params) + if t: + if t.get('typespec') == 'real': + tt = list(initexpr) + for m in real16pattern.finditer(initexpr): + tt[m.start():m.end()] = list( + initexpr[m.start():m.end()].lower().replace('d', 'e')) + initexpr = ''.join(tt) + elif t.get('typespec') == 'complex': + initexpr = initexpr[1:].lower().replace('d', 'e').\ + replace(',', '+1j*(') + try: + v = eval(initexpr, {}, params) + except (SyntaxError, NameError, TypeError) as msg: + errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n' + % (initexpr, msg)) + continue + edecl[k]['='] = repr(v) + if 'attrspec' in edecl[k]: + edecl[k]['attrspec'].append('parameter') + else: + edecl[k]['attrspec'] = ['parameter'] + last_name = k + groupcache[groupcounter]['vars'] = edecl + if last_name is not None: + previous_context = ('variable', last_name, groupcounter) + elif case == 'implicit': + if m.group('after').strip().lower() == 'none': + groupcache[groupcounter]['implicit'] = None + elif m.group('after'): + impl = groupcache[groupcounter].get('implicit', {}) + if impl is None: + outmess( + 'analyzeline: Overwriting earlier "implicit none" statement.\n') + impl = {} + for e in markoutercomma(m.group('after')).split('@,@'): + decl = {} + m1 = re.match( + r'\s*(?P.*?)\s*(\(\s*(?P[a-z-, ]+)\s*\)\s*|)\Z', e, re.I) + if not m1: + outmess( + f'analyzeline: could not extract info of implicit statement part "{e}\"\n') + continue + m2 = typespattern4implicit.match(m1.group('this')) + if not m2: + outmess( + f'analyzeline: could not extract types pattern of implicit statement part "{e}\"\n') + continue + typespec, selector, attr, edecl = cracktypespec0( + m2.group('this'), m2.group('after')) + kindselect, charselect, typename = cracktypespec( + typespec, selector) + decl['typespec'] = typespec + decl['kindselector'] = kindselect + decl['charselector'] = charselect + decl['typename'] = typename + for k in list(decl.keys()): + if not decl[k]: + del decl[k] + for r in markoutercomma(m1.group('after')).split('@,@'): + if '-' in r: + try: + begc, endc = [x.strip() for x in r.split('-')] + except Exception: + outmess( + f'analyzeline: expected "-" instead of "{r}" in range list of implicit statement\n') + continue + else: + begc = endc = r.strip() + if not len(begc) == len(endc) == 1: + outmess( + f'analyzeline: expected "-" instead of "{r}" in range list of implicit statement (2)\n') + continue + for o in range(ord(begc), ord(endc) + 1): + impl[chr(o)] = decl + groupcache[groupcounter]['implicit'] = impl + elif case == 'data': + ll = [] + dl = '' + il = '' + f = 0 + fc = 1 + inp = 0 + for c in m.group('after'): + if not inp: + if c == "'": + fc = not fc + if c == '/' and fc: + f = f + 1 + continue + if c == '(': + inp = inp + 1 + elif c == ')': + inp = inp - 1 + if f == 0: + dl = dl + c + elif f == 1: + il = il + c + elif f == 2: + dl = dl.strip() + if dl.startswith(','): + dl = dl[1:].strip() + ll.append([dl, il]) + dl = c + il = '' + f = 0 + if f == 2: + dl = dl.strip() + if dl.startswith(','): + dl = dl[1:].strip() + ll.append([dl, il]) + vars = groupcache[groupcounter].get('vars', {}) + last_name = None + for l in ll: + l[0], l[1] = l[0].strip().removeprefix(','), l[1].strip() + if l[0].startswith('('): + outmess(f'analyzeline: implied-DO list "{l[0]}" is not supported. Skipping.\n') + continue + for idx, v in enumerate(rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')])): + if v.startswith('('): + outmess(f'analyzeline: implied-DO list "{v}" is not supported. Skipping.\n') + # XXX: subsequent init expressions may get wrong values. + # Ignoring since data statements are irrelevant for + # wrapping. + continue + if '!' in l[1]: + # Fixes gh-24746 pyf generation + # XXX: This essentially ignores the value for generating the pyf which is fine: + # integer dimension(3) :: mytab + # common /mycom/ mytab + # Since in any case it is initialized in the Fortran code + outmess(f'Comment line in declaration "{l[1]}" is not supported. Skipping.\n') + continue + vars.setdefault(v, {}) + vtype = vars[v].get('typespec') + vdim = getdimension(vars[v]) + matches = re.findall(r"\(.*?\)", l[1]) if vtype == 'complex' else l[1].split(',') + try: + new_val = f"(/{', '.join(matches)}/)" if vdim else matches[idx] + except IndexError: + # gh-24746 + # Runs only if above code fails. Fixes the line + # DATA IVAR1, IVAR2, IVAR3, IVAR4, EVAR5 /4*0,0.0D0/ + # by expanding to ['0', '0', '0', '0', '0.0d0'] + if any("*" in m for m in matches): + expanded_list = [] + for match in matches: + if "*" in match: + try: + multiplier, value = match.split("*") + expanded_list.extend([value.strip()] * int(multiplier)) + except ValueError: # if int(multiplier) fails + expanded_list.append(match.strip()) + else: + expanded_list.append(match.strip()) + matches = expanded_list + new_val = f"(/{', '.join(matches)}/)" if vdim else matches[idx] + current_val = vars[v].get('=') + if current_val and (current_val != new_val): + outmess(f'analyzeline: changing init expression of "{v}" ("{current_val}") to "{new_val}\"\n') + vars[v]['='] = new_val + last_name = v + groupcache[groupcounter]['vars'] = vars + if last_name: + previous_context = ('variable', last_name, groupcounter) + elif case == 'common': + line = m.group('after').strip() + if not line[0] == '/': + line = '//' + line + + cl = [] + [_, bn, ol] = re.split('/', line, maxsplit=2) # noqa: RUF039 + bn = bn.strip() + if not bn: + bn = '_BLNK_' + cl.append([bn, ol]) + commonkey = {} + if 'common' in groupcache[groupcounter]: + commonkey = groupcache[groupcounter]['common'] + for c in cl: + if c[0] not in commonkey: + commonkey[c[0]] = [] + for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]: + if i: + commonkey[c[0]].append(i) + groupcache[groupcounter]['common'] = commonkey + previous_context = ('common', bn, groupcounter) + elif case == 'use': + m1 = re.match( + r'\A\s*(?P\b\w+\b)\s*((,(\s*\bonly\b\s*:|(?P))\s*(?P.*))|)\s*\Z', m.group('after'), re.I) + if m1: + mm = m1.groupdict() + if 'use' not in groupcache[groupcounter]: + groupcache[groupcounter]['use'] = {} + name = m1.group('name') + groupcache[groupcounter]['use'][name] = {} + isonly = 0 + if 'list' in mm and mm['list'] is not None: + if 'notonly' in mm and mm['notonly'] is None: + isonly = 1 + groupcache[groupcounter]['use'][name]['only'] = isonly + ll = [x.strip() for x in mm['list'].split(',')] + rl = {} + for l in ll: + if '=' in l: + m2 = re.match( + r'\A\s*(?P\b\w+\b)\s*=\s*>\s*(?P\b\w+\b)\s*\Z', l, re.I) + if m2: + rl[m2.group('local').strip()] = m2.group( + 'use').strip() + else: + outmess( + f'analyzeline: Not local=>use pattern found in {repr(l)}\n') + else: + rl[l] = l + groupcache[groupcounter]['use'][name]['map'] = rl + else: + print(m.groupdict()) + outmess('analyzeline: Could not crack the use statement.\n') + elif case in ['f2pyenhancements']: + if 'f2pyenhancements' not in groupcache[groupcounter]: + groupcache[groupcounter]['f2pyenhancements'] = {} + d = groupcache[groupcounter]['f2pyenhancements'] + if m.group('this') == 'usercode' and 'usercode' in d: + if isinstance(d['usercode'], str): + d['usercode'] = [d['usercode']] + d['usercode'].append(m.group('after')) + else: + d[m.group('this')] = m.group('after') + elif case == 'multiline': + if previous_context is None: + if verbose: + outmess('analyzeline: No context for multiline block.\n') + return + gc = groupcounter + appendmultiline(groupcache[gc], + previous_context[:2], + m.group('this')) + elif verbose > 1: + print(m.groupdict()) + outmess('analyzeline: No code implemented for line.\n') + + +def appendmultiline(group, context_name, ml): + if 'f2pymultilines' not in group: + group['f2pymultilines'] = {} + d = group['f2pymultilines'] + if context_name not in d: + d[context_name] = [] + d[context_name].append(ml) + + +def cracktypespec0(typespec, ll): + selector = None + attr = None + if re.match(r'double\s*complex', typespec, re.I): + typespec = 'double complex' + elif re.match(r'double\s*precision', typespec, re.I): + typespec = 'double precision' + else: + typespec = typespec.strip().lower() + m1 = selectpattern.match(markouterparen(ll)) + if not m1: + outmess( + 'cracktypespec0: no kind/char_selector pattern found for line.\n') + return + d = m1.groupdict() + for k in list(d.keys()): + d[k] = unmarkouterparen(d[k]) + if typespec in ['complex', 'integer', 'logical', 'real', 'character', 'type']: + selector = d['this'] + ll = d['after'] + i = ll.find('::') + if i >= 0: + attr = ll[:i].strip() + ll = ll[i + 2:] + return typespec, selector, attr, ll + + +##### +namepattern = re.compile(r'\s*(?P\b\w+\b)\s*(?P.*)\s*\Z', re.I) +kindselector = re.compile( + r'\s*(\(\s*(kind\s*=)?\s*(?P.*)\s*\)|\*\s*(?P.*?))\s*\Z', re.I) +charselector = re.compile( + r'\s*(\((?P.*)\)|\*\s*(?P.*))\s*\Z', re.I) +lenkindpattern = re.compile( + r'\s*(kind\s*=\s*(?P.*?)\s*(@,@\s*len\s*=\s*(?P.*)|)' + r'|(len\s*=\s*|)(?P.*?)\s*(@,@\s*(kind\s*=\s*|)(?P.*)' + r'|(f2py_len\s*=\s*(?P.*))|))\s*\Z', re.I) +lenarraypattern = re.compile( + r'\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@\s*\*\s*(?P.*?)|(\*\s*(?P.*?)|)\s*(@\(@\s*(?!/)\s*(?P.*?)\s*@\)@|))\s*(=\s*(?P.*?)|(@\(@|)/\s*(?P.*?)\s*/(@\)@|)|)\s*\Z', re.I) + + +def removespaces(expr): + expr = expr.strip() + if len(expr) <= 1: + return expr + expr2 = expr[0] + for i in range(1, len(expr) - 1): + if (expr[i] == ' ' and + ((expr[i + 1] in "()[]{}=+-/* ") or + (expr[i - 1] in "()[]{}=+-/* "))): + continue + expr2 = expr2 + expr[i] + expr2 = expr2 + expr[-1] + return expr2 + + +def markinnerspaces(line): + """ + The function replace all spaces in the input variable line which are + surrounded with quotation marks, with the triplet "@_@". + + For instance, for the input "a 'b c'" the function returns "a 'b@_@c'" + + Parameters + ---------- + line : str + + Returns + ------- + str + + """ + fragment = '' + inside = False + current_quote = None + escaped = '' + for c in line: + if escaped == '\\' and c in ['\\', '\'', '"']: + fragment += c + escaped = c + continue + if not inside and c in ['\'', '"']: + current_quote = c + if c == current_quote: + inside = not inside + elif c == ' ' and inside: + fragment += '@_@' + continue + fragment += c + escaped = c # reset to non-backslash + return fragment + + +def updatevars(typespec, selector, attrspec, entitydecl): + """ + Returns last_name, the variable name without special chars, parenthesis + or dimension specifiers. + + Alters groupcache to add the name, typespec, attrspec (and possibly value) + of current variable. + """ + global groupcache, groupcounter + + last_name = None + kindselect, charselect, typename = cracktypespec(typespec, selector) + # Clean up outer commas, whitespace and undesired chars from attrspec + if attrspec: + attrspec = [x.strip() for x in markoutercomma(attrspec).split('@,@')] + l = [] + c = re.compile(r'(?P[a-zA-Z]+)') + for a in attrspec: + if not a: + continue + m = c.match(a) + if m: + s = m.group('start').lower() + a = s + a[len(s):] + l.append(a) + attrspec = l + el = [x.strip() for x in markoutercomma(entitydecl).split('@,@')] + el1 = [] + for e in el: + for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)), comma=' ').split('@ @')]: + if e1: + el1.append(e1.replace('@_@', ' ')) + for e in el1: + m = namepattern.match(e) + if not m: + outmess( + f'updatevars: no name pattern found for entity={repr(e)}. Skipping.\n') + continue + ename = rmbadname1(m.group('name')) + edecl = {} + if ename in groupcache[groupcounter]['vars']: + edecl = groupcache[groupcounter]['vars'][ename].copy() + not_has_typespec = 'typespec' not in edecl + if not_has_typespec: + edecl['typespec'] = typespec + elif typespec and (not typespec == edecl['typespec']): + outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % ( + ename, edecl['typespec'], typespec)) + if 'kindselector' not in edecl: + edecl['kindselector'] = copy.copy(kindselect) + elif kindselect: + for k in list(kindselect.keys()): + if k in edecl['kindselector'] and (not kindselect[k] == edecl['kindselector'][k]): + outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % ( + k, ename, edecl['kindselector'][k], kindselect[k])) + else: + edecl['kindselector'][k] = copy.copy(kindselect[k]) + if 'charselector' not in edecl and charselect: + if not_has_typespec: + edecl['charselector'] = charselect + else: + errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n' + % (ename, charselect)) + elif charselect: + for k in list(charselect.keys()): + if k in edecl['charselector'] and (not charselect[k] == edecl['charselector'][k]): + outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % ( + k, ename, edecl['charselector'][k], charselect[k])) + else: + edecl['charselector'][k] = copy.copy(charselect[k]) + if 'typename' not in edecl: + edecl['typename'] = typename + elif typename and (not edecl['typename'] == typename): + outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % ( + ename, edecl['typename'], typename)) + if 'attrspec' not in edecl: + edecl['attrspec'] = copy.copy(attrspec) + elif attrspec: + for a in attrspec: + if a not in edecl['attrspec']: + edecl['attrspec'].append(a) + else: + edecl['typespec'] = copy.copy(typespec) + edecl['kindselector'] = copy.copy(kindselect) + edecl['charselector'] = copy.copy(charselect) + edecl['typename'] = typename + edecl['attrspec'] = copy.copy(attrspec) + if 'external' in (edecl.get('attrspec') or []) and e in groupcache[groupcounter]['args']: + if 'externals' not in groupcache[groupcounter]: + groupcache[groupcounter]['externals'] = [] + groupcache[groupcounter]['externals'].append(e) + if m.group('after'): + m1 = lenarraypattern.match(markouterparen(m.group('after'))) + if m1: + d1 = m1.groupdict() + for lk in ['len', 'array', 'init']: + if d1[lk + '2'] is not None: + d1[lk] = d1[lk + '2'] + del d1[lk + '2'] + for k in list(d1.keys()): + if d1[k] is not None: + d1[k] = unmarkouterparen(d1[k]) + else: + del d1[k] + + if 'len' in d1 and 'array' in d1: + if d1['len'] == '': + d1['len'] = d1['array'] + del d1['array'] + elif typespec == 'character': + if ('charselector' not in edecl) or (not edecl['charselector']): + edecl['charselector'] = {} + if 'len' in edecl['charselector']: + del edecl['charselector']['len'] + edecl['charselector']['*'] = d1['len'] + del d1['len'] + else: + d1['array'] = d1['array'] + ',' + d1['len'] + del d1['len'] + errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n' % ( + typespec, e, typespec, ename, d1['array'])) + + if 'len' in d1: + if typespec in ['complex', 'integer', 'logical', 'real']: + if ('kindselector' not in edecl) or (not edecl['kindselector']): + edecl['kindselector'] = {} + edecl['kindselector']['*'] = d1['len'] + del d1['len'] + elif typespec == 'character': + if ('charselector' not in edecl) or (not edecl['charselector']): + edecl['charselector'] = {} + if 'len' in edecl['charselector']: + del edecl['charselector']['len'] + edecl['charselector']['*'] = d1['len'] + del d1['len'] + + if 'init' in d1: + if '=' in edecl and (not edecl['='] == d1['init']): + outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % ( + ename, edecl['='], d1['init'])) + else: + edecl['='] = d1['init'] + + if 'array' in d1: + dm = f"dimension({d1['array']})" + if 'attrspec' not in edecl or (not edecl['attrspec']): + edecl['attrspec'] = [dm] + else: + edecl['attrspec'].append(dm) + for dm1 in edecl['attrspec']: + if dm1[:9] == 'dimension' and dm1 != dm: + del edecl['attrspec'][-1] + errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n' + % (ename, dm1, dm)) + break + + else: + outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n' % ( + ename + m.group('after'))) + for k in list(edecl.keys()): + if not edecl[k]: + del edecl[k] + groupcache[groupcounter]['vars'][ename] = edecl + if 'varnames' in groupcache[groupcounter]: + groupcache[groupcounter]['varnames'].append(ename) + last_name = ename + return last_name + + +def cracktypespec(typespec, selector): + kindselect = None + charselect = None + typename = None + if selector: + if typespec in ['complex', 'integer', 'logical', 'real']: + kindselect = kindselector.match(selector) + if not kindselect: + outmess( + f'cracktypespec: no kindselector pattern found for {repr(selector)}\n') + return + kindselect = kindselect.groupdict() + kindselect['*'] = kindselect['kind2'] + del kindselect['kind2'] + for k in list(kindselect.keys()): + if not kindselect[k]: + del kindselect[k] + for k, i in list(kindselect.items()): + kindselect[k] = rmbadname1(i) + elif typespec == 'character': + charselect = charselector.match(selector) + if not charselect: + outmess( + f'cracktypespec: no charselector pattern found for {repr(selector)}\n') + return + charselect = charselect.groupdict() + charselect['*'] = charselect['charlen'] + del charselect['charlen'] + if charselect['lenkind']: + lenkind = lenkindpattern.match( + markoutercomma(charselect['lenkind'])) + lenkind = lenkind.groupdict() + for lk in ['len', 'kind']: + if lenkind[lk + '2']: + lenkind[lk] = lenkind[lk + '2'] + charselect[lk] = lenkind[lk] + del lenkind[lk + '2'] + if lenkind['f2py_len'] is not None: + # used to specify the length of assumed length strings + charselect['f2py_len'] = lenkind['f2py_len'] + del charselect['lenkind'] + for k in list(charselect.keys()): + if not charselect[k]: + del charselect[k] + for k, i in list(charselect.items()): + charselect[k] = rmbadname1(i) + elif typespec == 'type': + typename = re.match(r'\s*\(\s*(?P\w+)\s*\)', selector, re.I) + if typename: + typename = typename.group('name') + else: + outmess('cracktypespec: no typename found in %s\n' % + (repr(typespec + selector))) + else: + outmess(f'cracktypespec: no selector used for {repr(selector)}\n') + return kindselect, charselect, typename +###### + + +def setattrspec(decl, attr, force=0): + if not decl: + decl = {} + if not attr: + return decl + if 'attrspec' not in decl: + decl['attrspec'] = [attr] + return decl + if force: + decl['attrspec'].append(attr) + if attr in decl['attrspec']: + return decl + if attr == 'static' and 'automatic' not in decl['attrspec']: + decl['attrspec'].append(attr) + elif attr == 'automatic' and 'static' not in decl['attrspec']: + decl['attrspec'].append(attr) + elif attr == 'public': + if 'private' not in decl['attrspec']: + decl['attrspec'].append(attr) + elif attr == 'private': + if 'public' not in decl['attrspec']: + decl['attrspec'].append(attr) + else: + decl['attrspec'].append(attr) + return decl + + +def setkindselector(decl, sel, force=0): + if not decl: + decl = {} + if not sel: + return decl + if 'kindselector' not in decl: + decl['kindselector'] = sel + return decl + for k in list(sel.keys()): + if force or k not in decl['kindselector']: + decl['kindselector'][k] = sel[k] + return decl + + +def setcharselector(decl, sel, force=0): + if not decl: + decl = {} + if not sel: + return decl + if 'charselector' not in decl: + decl['charselector'] = sel + return decl + + for k in list(sel.keys()): + if force or k not in decl['charselector']: + decl['charselector'][k] = sel[k] + return decl + + +def getblockname(block, unknown='unknown'): + if 'name' in block: + return block['name'] + return unknown + +# post processing + + +def setmesstext(block): + global filepositiontext + + try: + filepositiontext = f"In: {block['from']}:{block['name']}\n" + except Exception: + pass + + +def get_usedict(block): + usedict = {} + if 'parent_block' in block: + usedict = get_usedict(block['parent_block']) + if 'use' in block: + usedict.update(block['use']) + return usedict + + +def get_useparameters(block, param_map=None): + global f90modulevars + + if param_map is None: + param_map = {} + usedict = get_usedict(block) + if not usedict: + return param_map + for usename, mapping in list(usedict.items()): + usename = usename.lower() + if usename not in f90modulevars: + outmess('get_useparameters: no module %s info used by %s\n' % + (usename, block.get('name'))) + continue + mvars = f90modulevars[usename] + params = get_parameters(mvars) + if not params: + continue + # XXX: apply mapping + if mapping: + errmess(f'get_useparameters: mapping for {mapping} not impl.\n') + for k, v in list(params.items()): + if k in param_map: + outmess('get_useparameters: overriding parameter %s with' + ' value from module %s\n' % (repr(k), repr(usename))) + param_map[k] = v + + return param_map + + +def postcrack2(block, tab='', param_map=None): + global f90modulevars + + if not f90modulevars: + return block + if isinstance(block, list): + ret = [postcrack2(g, tab=tab + '\t', param_map=param_map) + for g in block] + return ret + setmesstext(block) + outmess(f"{tab}Block: {block['name']}\n", 0) + + if param_map is None: + param_map = get_useparameters(block) + + if param_map is not None and 'vars' in block: + vars = block['vars'] + for n in list(vars.keys()): + var = vars[n] + if 'kindselector' in var: + kind = var['kindselector'] + if 'kind' in kind: + val = kind['kind'] + if val in param_map: + kind['kind'] = param_map[val] + new_body = [postcrack2(b, tab=tab + '\t', param_map=param_map) + for b in block['body']] + block['body'] = new_body + + return block + + +def postcrack(block, args=None, tab=''): + """ + TODO: + function return values + determine expression types if in argument list + """ + global usermodules, onlyfunctions + + if isinstance(block, list): + gret = [] + uret = [] + for g in block: + setmesstext(g) + g = postcrack(g, tab=tab + '\t') + # sort user routines to appear first + if 'name' in g and '__user__' in g['name']: + uret.append(g) + else: + gret.append(g) + return uret + gret + setmesstext(block) + if not isinstance(block, dict) and 'block' not in block: + raise Exception('postcrack: Expected block dictionary instead of ' + + str(block)) + if 'name' in block and not block['name'] == 'unknown_interface': + outmess(f"{tab}Block: {block['name']}\n", 0) + block = analyzeargs(block) + block = analyzecommon(block) + block['vars'] = analyzevars(block) + block['sortvars'] = sortvarnames(block['vars']) + if block.get('args'): + args = block['args'] + block['body'] = analyzebody(block, args, tab=tab) + + userisdefined = [] + if 'use' in block: + useblock = block['use'] + for k in list(useblock.keys()): + if '__user__' in k: + userisdefined.append(k) + else: + useblock = {} + name = '' + if 'name' in block: + name = block['name'] + # and not userisdefined: # Build a __user__ module + if block.get('externals'): + interfaced = [] + if 'interfaced' in block: + interfaced = block['interfaced'] + mvars = copy.copy(block['vars']) + if name: + mname = name + '__user__routines' + else: + mname = 'unknown__user__routines' + if mname in userisdefined: + i = 1 + while f"{mname}_{i}" in userisdefined: + i = i + 1 + mname = f"{mname}_{i}" + interface = {'block': 'interface', 'body': [], + 'vars': {}, 'name': name + '_user_interface'} + for e in block['externals']: + if e in interfaced: + edef = [] + j = -1 + for b in block['body']: + j = j + 1 + if b['block'] == 'interface': + i = -1 + for bb in b['body']: + i = i + 1 + if 'name' in bb and bb['name'] == e: + edef = copy.copy(bb) + del b['body'][i] + break + if edef: + if not b['body']: + del block['body'][j] + del interfaced[interfaced.index(e)] + break + interface['body'].append(edef) + elif e in mvars and not isexternal(mvars[e]): + interface['vars'][e] = mvars[e] + if interface['vars'] or interface['body']: + block['interfaced'] = interfaced + mblock = {'block': 'python module', 'body': [ + interface], 'vars': {}, 'name': mname, 'interfaced': block['externals']} + useblock[mname] = {} + usermodules.append(mblock) + if useblock: + block['use'] = useblock + return block + + +def sortvarnames(vars): + indep = [] + dep = [] + for v in list(vars.keys()): + if 'depend' in vars[v] and vars[v]['depend']: + dep.append(v) + else: + indep.append(v) + n = len(dep) + i = 0 + while dep: # XXX: How to catch dependence cycles correctly? + v = dep[0] + fl = 0 + for w in dep[1:]: + if w in vars[v]['depend']: + fl = 1 + break + if fl: + dep = dep[1:] + [v] + i = i + 1 + if i > n: + errmess('sortvarnames: failed to compute dependencies because' + ' of cyclic dependencies between ' + + ', '.join(dep) + '\n') + indep = indep + dep + break + else: + indep.append(v) + dep = dep[1:] + n = len(dep) + i = 0 + return indep + + +def analyzecommon(block): + if not hascommon(block): + return block + commonvars = [] + for k in list(block['common'].keys()): + comvars = [] + for e in block['common'][k]: + m = re.match( + r'\A\s*\b(?P.*?)\b\s*(\((?P.*?)\)|)\s*\Z', e, re.I) + if m: + dims = [] + if m.group('dims'): + dims = [x.strip() + for x in markoutercomma(m.group('dims')).split('@,@')] + n = rmbadname1(m.group('name').strip()) + if n in block['vars']: + if 'attrspec' in block['vars'][n]: + block['vars'][n]['attrspec'].append( + f"dimension({','.join(dims)})") + else: + block['vars'][n]['attrspec'] = [ + f"dimension({','.join(dims)})"] + elif dims: + block['vars'][n] = { + 'attrspec': [f"dimension({','.join(dims)})"]} + else: + block['vars'][n] = {} + if n not in commonvars: + commonvars.append(n) + else: + n = e + errmess( + f'analyzecommon: failed to extract "[()]" from "{e}" in common /{k}/.\n') + comvars.append(n) + block['common'][k] = comvars + if 'commonvars' not in block: + block['commonvars'] = commonvars + else: + block['commonvars'] = block['commonvars'] + commonvars + return block + + +def analyzebody(block, args, tab=''): + global usermodules, skipfuncs, onlyfuncs, f90modulevars + + setmesstext(block) + + maybe_private = { + key: value + for key, value in block['vars'].items() + if 'attrspec' not in value or 'public' not in value['attrspec'] + } + + body = [] + for b in block['body']: + b['parent_block'] = block + if b['block'] in ['function', 'subroutine']: + if args is not None and b['name'] not in args: + continue + else: + as_ = b['args'] + # Add private members to skipfuncs for gh-23879 + if b['name'] in maybe_private.keys(): + skipfuncs.append(b['name']) + if b['name'] in skipfuncs: + continue + if onlyfuncs and b['name'] not in onlyfuncs: + continue + b['saved_interface'] = crack2fortrangen( + b, '\n' + ' ' * 6, as_interface=True) + + else: + as_ = args + b = postcrack(b, as_, tab=tab + '\t') + if b['block'] in ['interface', 'abstract interface'] and \ + not b['body'] and not b.get('implementedby'): + if 'f2pyenhancements' not in b: + continue + if b['block'].replace(' ', '') == 'pythonmodule': + usermodules.append(b) + else: + if b['block'] == 'module': + f90modulevars[b['name']] = b['vars'] + body.append(b) + return body + + +def buildimplicitrules(block): + setmesstext(block) + implicitrules = defaultimplicitrules + attrrules = {} + if 'implicit' in block: + if block['implicit'] is None: + implicitrules = None + if verbose > 1: + outmess( + f"buildimplicitrules: no implicit rules for routine {repr(block['name'])}.\n") + else: + for k in list(block['implicit'].keys()): + if block['implicit'][k].get('typespec') not in ['static', 'automatic']: + implicitrules[k] = block['implicit'][k] + else: + attrrules[k] = block['implicit'][k]['typespec'] + return implicitrules, attrrules + + +def myeval(e, g=None, l=None): + """ Like `eval` but returns only integers and floats """ + r = eval(e, g, l) + if type(r) in [int, float]: + return r + raise ValueError(f'r={r!r}') + + +getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z', re.I) + + +def getlincoef(e, xset): # e = a*x+b ; x in xset + """ + Obtain ``a`` and ``b`` when ``e == "a*x+b"``, where ``x`` is a symbol in + xset. + + >>> getlincoef('2*x + 1', {'x'}) + (2, 1, 'x') + >>> getlincoef('3*x + x*2 + 2 + 1', {'x'}) + (5, 3, 'x') + >>> getlincoef('0', {'x'}) + (0, 0, None) + >>> getlincoef('0*x', {'x'}) + (0, 0, 'x') + >>> getlincoef('x*x', {'x'}) + (None, None, None) + + This can be tricked by sufficiently complex expressions + + >>> getlincoef('(x - 0.5)*(x - 1.5)*(x - 1)*x + 2*x + 3', {'x'}) + (2.0, 3.0, 'x') + """ + try: + c = int(myeval(e, {}, {})) + return 0, c, None + except Exception: + pass + if getlincoef_re_1.match(e): + return 1, 0, e + len_e = len(e) + for x in xset: + if len(x) > len_e: + continue + if re.search(r'\w\s*\([^)]*\b' + x + r'\b', e): + # skip function calls having x as an argument, e.g max(1, x) + continue + re_1 = re.compile(r'(?P.*?)\b' + x + r'\b(?P.*)', re.I) + m = re_1.match(e) + if m: + try: + m1 = re_1.match(e) + while m1: + ee = f"{m1.group('before')}({0}){m1.group('after')}" + m1 = re_1.match(ee) + b = myeval(ee, {}, {}) + m1 = re_1.match(e) + while m1: + ee = f"{m1.group('before')}({1}){m1.group('after')}" + m1 = re_1.match(ee) + a = myeval(ee, {}, {}) - b + m1 = re_1.match(e) + while m1: + ee = f"{m1.group('before')}({0.5}){m1.group('after')}" + m1 = re_1.match(ee) + c = myeval(ee, {}, {}) + # computing another point to be sure that expression is linear + m1 = re_1.match(e) + while m1: + ee = f"{m1.group('before')}({1.5}){m1.group('after')}" + m1 = re_1.match(ee) + c2 = myeval(ee, {}, {}) + if (a * 0.5 + b == c and a * 1.5 + b == c2): + return a, b, x + except Exception: + pass + break + return None, None, None + + +word_pattern = re.compile(r'\b[a-z][\w$]*\b', re.I) + + +def _get_depend_dict(name, vars, deps): + if name in vars: + words = vars[name].get('depend', []) + + if '=' in vars[name] and not isstring(vars[name]): + for word in word_pattern.findall(vars[name]['=']): + # The word_pattern may return values that are not + # only variables, they can be string content for instance + if word not in words and word in vars and word != name: + words.append(word) + for word in words[:]: + for w in deps.get(word, []) \ + or _get_depend_dict(word, vars, deps): + if w not in words: + words.append(w) + else: + outmess(f'_get_depend_dict: no dependence info for {repr(name)}\n') + words = [] + deps[name] = words + return words + + +def _calc_depend_dict(vars): + names = list(vars.keys()) + depend_dict = {} + for n in names: + _get_depend_dict(n, vars, depend_dict) + return depend_dict + + +def get_sorted_names(vars): + depend_dict = _calc_depend_dict(vars) + names = [] + for name in list(depend_dict.keys()): + if not depend_dict[name]: + names.append(name) + del depend_dict[name] + while depend_dict: + for name, lst in list(depend_dict.items()): + new_lst = [n for n in lst if n in depend_dict] + if not new_lst: + names.append(name) + del depend_dict[name] + else: + depend_dict[name] = new_lst + return [name for name in names if name in vars] + + +def _kind_func(string): + # XXX: return something sensible. + if string[0] in "'\"": + string = string[1:-1] + if real16pattern.match(string): + return 8 + elif real8pattern.match(string): + return 4 + return 'kind(' + string + ')' + + +def _selected_int_kind_func(r): + # XXX: This should be processor dependent + m = 10 ** r + if m <= 2 ** 8: + return 1 + if m <= 2 ** 16: + return 2 + if m <= 2 ** 32: + return 4 + if m <= 2 ** 63: + return 8 + if m <= 2 ** 128: + return 16 + return -1 + + +def _selected_real_kind_func(p, r=0, radix=0): + # XXX: This should be processor dependent + # This is only verified for 0 <= p <= 20, possibly good for p <= 33 and above + if p < 7: + return 4 + if p < 16: + return 8 + machine = platform.machine().lower() + if machine.startswith(('aarch64', 'alpha', 'arm64', 'loongarch', 'mips', 'power', 'ppc', 'riscv', 's390x', 'sparc')): + if p <= 33: + return 16 + elif p < 19: + return 10 + elif p <= 33: + return 16 + return -1 + + +def get_parameters(vars, global_params={}): + params = copy.copy(global_params) + g_params = copy.copy(global_params) + for name, func in [('kind', _kind_func), + ('selected_int_kind', _selected_int_kind_func), + ('selected_real_kind', _selected_real_kind_func), ]: + if name not in g_params: + g_params[name] = func + param_names = [] + for n in get_sorted_names(vars): + if 'attrspec' in vars[n] and 'parameter' in vars[n]['attrspec']: + param_names.append(n) + kind_re = re.compile(r'\bkind\s*\(\s*(?P.*)\s*\)', re.I) + selected_int_kind_re = re.compile( + r'\bselected_int_kind\s*\(\s*(?P.*)\s*\)', re.I) + selected_kind_re = re.compile( + r'\bselected_(int|real)_kind\s*\(\s*(?P.*)\s*\)', re.I) + for n in param_names: + if '=' in vars[n]: + v = vars[n]['='] + if islogical(vars[n]): + v = v.lower() + for repl in [ + ('.false.', 'False'), + ('.true.', 'True'), + # TODO: test .eq., .neq., etc replacements. + ]: + v = v.replace(*repl) + + v = kind_re.sub(r'kind("\1")', v) + v = selected_int_kind_re.sub(r'selected_int_kind(\1)', v) + + # We need to act according to the data. + # The easy case is if the data has a kind-specifier, + # then we may easily remove those specifiers. + # However, it may be that the user uses other specifiers...(!) + is_replaced = False + + if 'kindselector' in vars[n]: + # Remove kind specifier (including those defined + # by parameters) + if 'kind' in vars[n]['kindselector']: + orig_v_len = len(v) + v = v.replace('_' + vars[n]['kindselector']['kind'], '') + # Again, this will be true if even a single specifier + # has been replaced, see comment above. + is_replaced = len(v) < orig_v_len + + if not is_replaced: + if not selected_kind_re.match(v): + v_ = v.split('_') + # In case there are additive parameters + if len(v_) > 1: + v = ''.join(v_[:-1]).lower().replace(v_[-1].lower(), '') + + # Currently this will not work for complex numbers. + # There is missing code for extracting a complex number, + # which may be defined in either of these: + # a) (Re, Im) + # b) cmplx(Re, Im) + # c) dcmplx(Re, Im) + # d) cmplx(Re, Im, ) + + if isdouble(vars[n]): + tt = list(v) + for m in real16pattern.finditer(v): + tt[m.start():m.end()] = list( + v[m.start():m.end()].lower().replace('d', 'e')) + v = ''.join(tt) + + elif iscomplex(vars[n]): + outmess(f'get_parameters[TODO]: ' + f'implement evaluation of complex expression {v}\n') + + dimspec = ([s.removeprefix('dimension').strip() + for s in vars[n]['attrspec'] + if s.startswith('dimension')] or [None])[0] + + # Handle _dp for gh-6624 + # Also fixes gh-20460 + if real16pattern.search(v): + v = 8 + elif real8pattern.search(v): + v = 4 + try: + params[n] = param_eval(v, g_params, params, dimspec=dimspec) + except Exception as msg: + params[n] = v + outmess(f'get_parameters: got "{msg}" on {n!r}\n') + + if isstring(vars[n]) and isinstance(params[n], int): + params[n] = chr(params[n]) + nl = n.lower() + if nl != n: + params[nl] = params[n] + else: + print(vars[n]) + outmess(f'get_parameters:parameter {n!r} does not have value?!\n') + return params + + +def _eval_length(length, params): + if length in ['(:)', '(*)', '*']: + return '(*)' + return _eval_scalar(length, params) + + +_is_kind_number = re.compile(r'\d+_').match + + +def _eval_scalar(value, params): + if _is_kind_number(value): + value = value.split('_')[0] + try: + # TODO: use symbolic from PR #19805 + value = eval(value, {}, params) + value = (repr if isinstance(value, str) else str)(value) + except (NameError, SyntaxError, TypeError): + return value + except Exception as msg: + errmess('"%s" in evaluating %r ' + '(available names: %s)\n' + % (msg, value, list(params.keys()))) + return value + + +def analyzevars(block): + """ + Sets correct dimension information for each variable/parameter + """ + + global f90modulevars + + setmesstext(block) + implicitrules, attrrules = buildimplicitrules(block) + vars = copy.copy(block['vars']) + if block['block'] == 'function' and block['name'] not in vars: + vars[block['name']] = {} + if '' in block['vars']: + del vars[''] + if 'attrspec' in block['vars']['']: + gen = block['vars']['']['attrspec'] + for n in set(vars) | {b['name'] for b in block['body']}: + for k in ['public', 'private']: + if k in gen: + vars[n] = setattrspec(vars.get(n, {}), k) + svars = [] + args = block['args'] + for a in args: + try: + vars[a] + svars.append(a) + except KeyError: + pass + for n in list(vars.keys()): + if n not in args: + svars.append(n) + + params = get_parameters(vars, get_useparameters(block)) + # At this point, params are read and interpreted, but + # the params used to define vars are not yet parsed + dep_matches = {} + name_match = re.compile(r'[A-Za-z][\w$]*').match + for v in list(vars.keys()): + m = name_match(v) + if m: + n = v[m.start():m.end()] + try: + dep_matches[n] + except KeyError: + dep_matches[n] = re.compile(r'.*\b%s\b' % (v), re.I).match + for n in svars: + if n[0] in list(attrrules.keys()): + vars[n] = setattrspec(vars[n], attrrules[n[0]]) + if 'typespec' not in vars[n]: + if not ('attrspec' in vars[n] and 'external' in vars[n]['attrspec']): + if implicitrules: + ln0 = n[0].lower() + for k in list(implicitrules[ln0].keys()): + if k == 'typespec' and implicitrules[ln0][k] == 'undefined': + continue + if k not in vars[n]: + vars[n][k] = implicitrules[ln0][k] + elif k == 'attrspec': + for l in implicitrules[ln0][k]: + vars[n] = setattrspec(vars[n], l) + elif n in block['args']: + outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n' % ( + repr(n), block['name'])) + if 'charselector' in vars[n]: + if 'len' in vars[n]['charselector']: + l = vars[n]['charselector']['len'] + try: + l = str(eval(l, {}, params)) + except Exception: + pass + vars[n]['charselector']['len'] = l + + if 'kindselector' in vars[n]: + if 'kind' in vars[n]['kindselector']: + l = vars[n]['kindselector']['kind'] + try: + l = str(eval(l, {}, params)) + except Exception: + pass + vars[n]['kindselector']['kind'] = l + + dimension_exprs = {} + if 'attrspec' in vars[n]: + attr = vars[n]['attrspec'] + attr.reverse() + vars[n]['attrspec'] = [] + dim, intent, depend, check, note = None, None, None, None, None + for a in attr: + if a[:9] == 'dimension': + dim = (a[9:].strip())[1:-1] + elif a[:6] == 'intent': + intent = (a[6:].strip())[1:-1] + elif a[:6] == 'depend': + depend = (a[6:].strip())[1:-1] + elif a[:5] == 'check': + check = (a[5:].strip())[1:-1] + elif a[:4] == 'note': + note = (a[4:].strip())[1:-1] + else: + vars[n] = setattrspec(vars[n], a) + if intent: + if 'intent' not in vars[n]: + vars[n]['intent'] = [] + for c in [x.strip() for x in markoutercomma(intent).split('@,@')]: + # Remove spaces so that 'in out' becomes 'inout' + tmp = c.replace(' ', '') + if tmp not in vars[n]['intent']: + vars[n]['intent'].append(tmp) + intent = None + if note: + note = note.replace('\\n\\n', '\n\n') + note = note.replace('\\n ', '\n') + if 'note' not in vars[n]: + vars[n]['note'] = [note] + else: + vars[n]['note'].append(note) + note = None + if depend is not None: + if 'depend' not in vars[n]: + vars[n]['depend'] = [] + for c in rmbadname([x.strip() for x in markoutercomma(depend).split('@,@')]): + if c not in vars[n]['depend']: + vars[n]['depend'].append(c) + depend = None + if check is not None: + if 'check' not in vars[n]: + vars[n]['check'] = [] + for c in [x.strip() for x in markoutercomma(check).split('@,@')]: + if c not in vars[n]['check']: + vars[n]['check'].append(c) + check = None + if dim and 'dimension' not in vars[n]: + vars[n]['dimension'] = [] + for d in rmbadname( + [x.strip() for x in markoutercomma(dim).split('@,@')] + ): + # d is the expression inside the dimension declaration + # Evaluate `d` with respect to params + try: + # the dimension for this variable depends on a + # previously defined parameter + d = param_parse(d, params) + except (ValueError, IndexError, KeyError): + outmess( + 'analyzevars: could not parse dimension for ' + f'variable {d!r}\n' + ) + + dim_char = ':' if d == ':' else '*' + if d == dim_char: + dl = [dim_char] + else: + dl = markoutercomma(d, ':').split('@:@') + if len(dl) == 2 and '*' in dl: # e.g. dimension(5:*) + dl = ['*'] + d = '*' + if len(dl) == 1 and dl[0] != dim_char: + dl = ['1', dl[0]] + if len(dl) == 2: + d1, d2 = map(symbolic.Expr.parse, dl) + dsize = d2 - d1 + 1 + d = dsize.tostring(language=symbolic.Language.C) + # find variables v that define d as a linear + # function, `d == a * v + b`, and store + # coefficients a and b for further analysis. + solver_and_deps = {} + for v in block['vars']: + s = symbolic.as_symbol(v) + if dsize.contains(s): + try: + a, b = dsize.linear_solve(s) + + def solve_v(s, a=a, b=b): + return (s - b) / a + + all_symbols = set(a.symbols()) + all_symbols.update(b.symbols()) + except RuntimeError as msg: + # d is not a linear function of v, + # however, if v can be determined + # from d using other means, + # implement the corresponding + # solve_v function here. + solve_v = None + all_symbols = set(dsize.symbols()) + v_deps = { + s.data for s in all_symbols + if s.data in vars} + solver_and_deps[v] = solve_v, list(v_deps) + # Note that dsize may contain symbols that are + # not defined in block['vars']. Here we assume + # these correspond to Fortran/C intrinsic + # functions or that are defined by other + # means. We'll let the compiler validate the + # definiteness of such symbols. + dimension_exprs[d] = solver_and_deps + vars[n]['dimension'].append(d) + + if 'check' not in vars[n] and 'args' in block and n in block['args']: + # n is an argument that has no checks defined. Here we + # generate some consistency checks for n, and when n is an + # array, generate checks for its dimensions and construct + # initialization expressions. + n_deps = vars[n].get('depend', []) + n_checks = [] + n_is_input = l_or(isintent_in, isintent_inout, + isintent_inplace)(vars[n]) + if isarray(vars[n]): # n is array + for i, d in enumerate(vars[n]['dimension']): + coeffs_and_deps = dimension_exprs.get(d) + if coeffs_and_deps is None: + # d is `:` or `*` or a constant expression + pass + elif n_is_input: + # n is an input array argument and its shape + # may define variables used in dimension + # specifications. + for v, (solver, deps) in coeffs_and_deps.items(): + def compute_deps(v, deps): + for v1 in coeffs_and_deps.get(v, [None, []])[1]: + if v1 not in deps: + deps.add(v1) + compute_deps(v1, deps) + all_deps = set() + compute_deps(v, all_deps) + if (v in n_deps + or '=' in vars[v] + or 'depend' in vars[v]): + # Skip a variable that + # - n depends on + # - has user-defined initialization expression + # - has user-defined dependencies + continue + if solver is not None and v not in all_deps: + # v can be solved from d, hence, we + # make it an optional argument with + # initialization expression: + is_required = False + init = solver(symbolic.as_symbol( + f'shape({n}, {i})')) + init = init.tostring( + language=symbolic.Language.C) + vars[v]['='] = init + # n needs to be initialized before v. So, + # making v dependent on n and on any + # variables in solver or d. + vars[v]['depend'] = [n] + deps + if 'check' not in vars[v]: + # add check only when no + # user-specified checks exist + vars[v]['check'] = [ + f'shape({n}, {i}) == {d}'] + else: + # d is a non-linear function on v, + # hence, v must be a required input + # argument that n will depend on + is_required = True + if 'intent' not in vars[v]: + vars[v]['intent'] = [] + if 'in' not in vars[v]['intent']: + vars[v]['intent'].append('in') + # v needs to be initialized before n + n_deps.append(v) + n_checks.append( + f'shape({n}, {i}) == {d}') + v_attr = vars[v].get('attrspec', []) + if not ('optional' in v_attr + or 'required' in v_attr): + v_attr.append( + 'required' if is_required else 'optional') + if v_attr: + vars[v]['attrspec'] = v_attr + if coeffs_and_deps is not None: + # extend v dependencies with ones specified in attrspec + for v, (solver, deps) in coeffs_and_deps.items(): + v_deps = vars[v].get('depend', []) + for aa in vars[v].get('attrspec', []): + if aa.startswith('depend'): + aa = ''.join(aa.split()) + v_deps.extend(aa[7:-1].split(',')) + if v_deps: + vars[v]['depend'] = list(set(v_deps)) + if n not in v_deps: + n_deps.append(v) + elif isstring(vars[n]): + if 'charselector' in vars[n]: + if '*' in vars[n]['charselector']: + length = _eval_length(vars[n]['charselector']['*'], + params) + vars[n]['charselector']['*'] = length + elif 'len' in vars[n]['charselector']: + length = _eval_length(vars[n]['charselector']['len'], + params) + del vars[n]['charselector']['len'] + vars[n]['charselector']['*'] = length + if n_checks: + vars[n]['check'] = n_checks + if n_deps: + vars[n]['depend'] = list(set(n_deps)) + + if '=' in vars[n]: + if 'attrspec' not in vars[n]: + vars[n]['attrspec'] = [] + if ('optional' not in vars[n]['attrspec']) and \ + ('required' not in vars[n]['attrspec']): + vars[n]['attrspec'].append('optional') + if 'depend' not in vars[n]: + vars[n]['depend'] = [] + for v, m in list(dep_matches.items()): + if m(vars[n]['=']): + vars[n]['depend'].append(v) + if not vars[n]['depend']: + del vars[n]['depend'] + if isscalar(vars[n]): + vars[n]['='] = _eval_scalar(vars[n]['='], params) + + for n in list(vars.keys()): + if n == block['name']: # n is block name + if 'note' in vars[n]: + block['note'] = vars[n]['note'] + if block['block'] == 'function': + if 'result' in block and block['result'] in vars: + vars[n] = appenddecl(vars[n], vars[block['result']]) + if 'prefix' in block: + pr = block['prefix'] + pr1 = pr.replace('pure', '') + ispure = (not pr == pr1) + pr = pr1.replace('recursive', '') + isrec = (not pr == pr1) + m = typespattern[0].match(pr) + if m: + typespec, selector, attr, edecl = cracktypespec0( + m.group('this'), m.group('after')) + kindselect, charselect, typename = cracktypespec( + typespec, selector) + vars[n]['typespec'] = typespec + try: + if block['result']: + vars[block['result']]['typespec'] = typespec + except Exception: + pass + if kindselect: + if 'kind' in kindselect: + try: + kindselect['kind'] = eval( + kindselect['kind'], {}, params) + except Exception: + pass + vars[n]['kindselector'] = kindselect + if charselect: + vars[n]['charselector'] = charselect + if typename: + vars[n]['typename'] = typename + if ispure: + vars[n] = setattrspec(vars[n], 'pure') + if isrec: + vars[n] = setattrspec(vars[n], 'recursive') + else: + outmess( + f"analyzevars: prefix ({repr(block['prefix'])}) were not used\n") + if block['block'] not in ['module', 'pythonmodule', 'python module', 'block data']: + if 'commonvars' in block: + neededvars = copy.copy(block['args'] + block['commonvars']) + else: + neededvars = copy.copy(block['args']) + for n in list(vars.keys()): + if l_or(isintent_callback, isintent_aux)(vars[n]): + neededvars.append(n) + if 'entry' in block: + neededvars.extend(list(block['entry'].keys())) + for k in list(block['entry'].keys()): + for n in block['entry'][k]: + if n not in neededvars: + neededvars.append(n) + if block['block'] == 'function': + if 'result' in block: + neededvars.append(block['result']) + else: + neededvars.append(block['name']) + if block['block'] in ['subroutine', 'function']: + name = block['name'] + if name in vars and 'intent' in vars[name]: + block['intent'] = vars[name]['intent'] + if block['block'] == 'type': + neededvars.extend(list(vars.keys())) + for n in list(vars.keys()): + if n not in neededvars: + del vars[n] + return vars + + +analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z', re.I) + + +def param_eval(v, g_params, params, dimspec=None): + """ + Creates a dictionary of indices and values for each parameter in a + parameter array to be evaluated later. + + WARNING: It is not possible to initialize multidimensional array + parameters e.g. dimension(-3:1, 4, 3:5) at this point. This is because in + Fortran initialization through array constructor requires the RESHAPE + intrinsic function. Since the right-hand side of the parameter declaration + is not executed in f2py, but rather at the compiled c/fortran extension, + later, it is not possible to execute a reshape of a parameter array. + One issue remains: if the user wants to access the array parameter from + python, we should either + 1) allow them to access the parameter array using python standard indexing + (which is often incompatible with the original fortran indexing) + 2) allow the parameter array to be accessed in python as a dictionary with + fortran indices as keys + We are choosing 2 for now. + """ + if dimspec is None: + try: + p = eval(v, g_params, params) + except Exception as msg: + p = v + outmess(f'param_eval: got "{msg}" on {v!r}\n') + return p + + # This is an array parameter. + # First, we parse the dimension information + if len(dimspec) < 2 or dimspec[::len(dimspec) - 1] != "()": + raise ValueError(f'param_eval: dimension {dimspec} can\'t be parsed') + dimrange = dimspec[1:-1].split(',') + if len(dimrange) == 1: + # e.g. dimension(2) or dimension(-1:1) + dimrange = dimrange[0].split(':') + # now, dimrange is a list of 1 or 2 elements + if len(dimrange) == 1: + bound = param_parse(dimrange[0], params) + dimrange = range(1, int(bound) + 1) + else: + lbound = param_parse(dimrange[0], params) + ubound = param_parse(dimrange[1], params) + dimrange = range(int(lbound), int(ubound) + 1) + else: + raise ValueError('param_eval: multidimensional array parameters ' + f'{dimspec} not supported') + + # Parse parameter value + v = (v[2:-2] if v.startswith('(/') else v).split(',') + v_eval = [] + for item in v: + try: + item = eval(item, g_params, params) + except Exception as msg: + outmess(f'param_eval: got "{msg}" on {item!r}\n') + v_eval.append(item) + + p = dict(zip(dimrange, v_eval)) + + return p + + +def param_parse(d, params): + """Recursively parse array dimensions. + + Parses the declaration of an array variable or parameter + `dimension` keyword, and is called recursively if the + dimension for this array is a previously defined parameter + (found in `params`). + + Parameters + ---------- + d : str + Fortran expression describing the dimension of an array. + params : dict + Previously parsed parameters declared in the Fortran source file. + + Returns + ------- + out : str + Parsed dimension expression. + + Examples + -------- + + * If the line being analyzed is + + `integer, parameter, dimension(2) :: pa = (/ 3, 5 /)` + + then `d = 2` and we return immediately, with + + >>> d = '2' + >>> param_parse(d, params) + 2 + + * If the line being analyzed is + + `integer, parameter, dimension(pa) :: pb = (/1, 2, 3/)` + + then `d = 'pa'`; since `pa` is a previously parsed parameter, + and `pa = 3`, we call `param_parse` recursively, to obtain + + >>> d = 'pa' + >>> params = {'pa': 3} + >>> param_parse(d, params) + 3 + + * If the line being analyzed is + + `integer, parameter, dimension(pa(1)) :: pb = (/1, 2, 3/)` + + then `d = 'pa(1)'`; since `pa` is a previously parsed parameter, + and `pa(1) = 3`, we call `param_parse` recursively, to obtain + + >>> d = 'pa(1)' + >>> params = dict(pa={1: 3, 2: 5}) + >>> param_parse(d, params) + 3 + """ + if "(" in d: + # this dimension expression is an array + dname = d[:d.find("(")] + ddims = d[d.find("(") + 1:d.rfind(")")] + # this dimension expression is also a parameter; + # parse it recursively + index = int(param_parse(ddims, params)) + return str(params[dname][index]) + elif d in params: + return str(params[d]) + else: + for p in params: + re_1 = re.compile( + r'(?P.*?)\b' + p + r'\b(?P.*)', re.I + ) + m = re_1.match(d) + while m: + d = m.group('before') + \ + str(params[p]) + m.group('after') + m = re_1.match(d) + return d + + +def expr2name(a, block, args=[]): + orig_a = a + a_is_expr = not analyzeargs_re_1.match(a) + if a_is_expr: # `a` is an expression + implicitrules, attrrules = buildimplicitrules(block) + at = determineexprtype(a, block['vars'], implicitrules) + na = 'e_' + for c in a: + c = c.lower() + if c not in string.ascii_lowercase + string.digits: + c = '_' + na = na + c + if na[-1] == '_': + na = na + 'e' + else: + na = na + '_e' + a = na + while a in block['vars'] or a in block['args']: + a = a + 'r' + if a in args: + k = 1 + while a + str(k) in args: + k = k + 1 + a = a + str(k) + if a_is_expr: + block['vars'][a] = at + else: + if a not in block['vars']: + block['vars'][a] = block['vars'].get(orig_a, {}) + if 'externals' in block and orig_a in block['externals'] + block['interfaced']: + block['vars'][a] = setattrspec(block['vars'][a], 'external') + return a + + +def analyzeargs(block): + setmesstext(block) + implicitrules, _ = buildimplicitrules(block) + if 'args' not in block: + block['args'] = [] + args = [] + for a in block['args']: + a = expr2name(a, block, args) + args.append(a) + block['args'] = args + if 'entry' in block: + for k, args1 in list(block['entry'].items()): + for a in args1: + if a not in block['vars']: + block['vars'][a] = {} + + for b in block['body']: + if b['name'] in args: + if 'externals' not in block: + block['externals'] = [] + if b['name'] not in block['externals']: + block['externals'].append(b['name']) + if 'result' in block and block['result'] not in block['vars']: + block['vars'][block['result']] = {} + return block + + +determineexprtype_re_1 = re.compile(r'\A\(.+?,.+?\)\Z', re.I) +determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(?P\w+)|)\Z', re.I) +determineexprtype_re_3 = re.compile( + r'\A[+-]?[\d.]+[-\d+de.]*(_(?P\w+)|)\Z', re.I) +determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z', re.I) +determineexprtype_re_5 = re.compile(r'\A(?P\w+)\s*\(.*?\)\s*\Z', re.I) + + +def _ensure_exprdict(r): + if isinstance(r, int): + return {'typespec': 'integer'} + if isinstance(r, float): + return {'typespec': 'real'} + if isinstance(r, complex): + return {'typespec': 'complex'} + if isinstance(r, dict): + return r + raise AssertionError(repr(r)) + + +def determineexprtype(expr, vars, rules={}): + if expr in vars: + return _ensure_exprdict(vars[expr]) + expr = expr.strip() + if determineexprtype_re_1.match(expr): + return {'typespec': 'complex'} + m = determineexprtype_re_2.match(expr) + if m: + if 'name' in m.groupdict() and m.group('name'): + outmess( + f'determineexprtype: selected kind types not supported ({repr(expr)})\n') + return {'typespec': 'integer'} + m = determineexprtype_re_3.match(expr) + if m: + if 'name' in m.groupdict() and m.group('name'): + outmess( + f'determineexprtype: selected kind types not supported ({repr(expr)})\n') + return {'typespec': 'real'} + for op in ['+', '-', '*', '/']: + for e in [x.strip() for x in markoutercomma(expr, comma=op).split('@' + op + '@')]: + if e in vars: + return _ensure_exprdict(vars[e]) + t = {} + if determineexprtype_re_4.match(expr): # in parenthesis + t = determineexprtype(expr[1:-1], vars, rules) + else: + m = determineexprtype_re_5.match(expr) + if m: + rn = m.group('name') + t = determineexprtype(m.group('name'), vars, rules) + if t and 'attrspec' in t: + del t['attrspec'] + if not t: + if rn[0] in rules: + return _ensure_exprdict(rules[rn[0]]) + if expr[0] in '\'"': + return {'typespec': 'character', 'charselector': {'*': '*'}} + if not t: + outmess( + f'determineexprtype: could not determine expressions ({repr(expr)}) type.\n') + return t + +###### + + +def crack2fortrangen(block, tab='\n', as_interface=False): + global skipfuncs, onlyfuncs + + setmesstext(block) + ret = '' + if isinstance(block, list): + for g in block: + if g and g['block'] in ['function', 'subroutine']: + if g['name'] in skipfuncs: + continue + if onlyfuncs and g['name'] not in onlyfuncs: + continue + ret = ret + crack2fortrangen(g, tab, as_interface=as_interface) + return ret + prefix = '' + name = '' + args = '' + blocktype = block['block'] + if blocktype == 'program': + return '' + argsl = [] + if 'name' in block: + name = block['name'] + if 'args' in block: + vars = block['vars'] + for a in block['args']: + a = expr2name(a, block, argsl) + if not isintent_callback(vars[a]): + argsl.append(a) + if block['block'] == 'function' or argsl: + args = f"({','.join(argsl)})" + f2pyenhancements = '' + if 'f2pyenhancements' in block: + for k in list(block['f2pyenhancements'].keys()): + f2pyenhancements = '%s%s%s %s' % ( + f2pyenhancements, tab + tabchar, k, block['f2pyenhancements'][k]) + intent_lst = block.get('intent', [])[:] + if blocktype == 'function' and 'callback' in intent_lst: + intent_lst.remove('callback') + if intent_lst: + f2pyenhancements = '%s%sintent(%s) %s' %\ + (f2pyenhancements, tab + tabchar, + ','.join(intent_lst), name) + use = '' + if 'use' in block: + use = use2fortran(block['use'], tab + tabchar) + common = '' + if 'common' in block: + common = common2fortran(block['common'], tab + tabchar) + if name == 'unknown_interface': + name = '' + result = '' + if 'result' in block: + result = f" result ({block['result']})" + if block['result'] not in argsl: + argsl.append(block['result']) + body = crack2fortrangen(block['body'], tab + tabchar, as_interface=as_interface) + vars = vars2fortran( + block, block['vars'], argsl, tab + tabchar, as_interface=as_interface) + mess = '' + if 'from' in block and not as_interface: + mess = f"! in {block['from']}" + if 'entry' in block: + entry_stmts = '' + for k, i in list(block['entry'].items()): + entry_stmts = f"{entry_stmts}{tab + tabchar}entry {k}({','.join(i)})" + body = body + entry_stmts + if blocktype == 'block data' and name == '_BLOCK_DATA_': + name = '' + ret = '%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s' % ( + tab, prefix, blocktype, name, args, result, mess, f2pyenhancements, use, vars, common, body, tab, blocktype, name) + return ret + + +def common2fortran(common, tab=''): + ret = '' + for k in list(common.keys()): + if k == '_BLNK_': + ret = f"{ret}{tab}common {','.join(common[k])}" + else: + ret = f"{ret}{tab}common /{k}/ {','.join(common[k])}" + return ret + + +def use2fortran(use, tab=''): + ret = '' + for m in list(use.keys()): + ret = f'{ret}{tab}use {m},' + if use[m] == {}: + if ret and ret[-1] == ',': + ret = ret[:-1] + continue + if 'only' in use[m] and use[m]['only']: + ret = f'{ret} only:' + if 'map' in use[m] and use[m]['map']: + c = ' ' + for k in list(use[m]['map'].keys()): + if k == use[m]['map'][k]: + ret = f'{ret}{c}{k}' + c = ',' + else: + ret = f"{ret}{c}{k}=>{use[m]['map'][k]}" + c = ',' + if ret and ret[-1] == ',': + ret = ret[:-1] + return ret + + +def true_intent_list(var): + lst = var['intent'] + ret = [] + for intent in lst: + try: + f = globals()[f'isintent_{intent}'] + except KeyError: + pass + else: + if f(var): + ret.append(intent) + return ret + + +def vars2fortran(block, vars, args, tab='', as_interface=False): + setmesstext(block) + ret = '' + nout = [] + for a in args: + if a in block['vars']: + nout.append(a) + if 'commonvars' in block: + for a in block['commonvars']: + if a in vars: + if a not in nout: + nout.append(a) + else: + errmess( + f'vars2fortran: Confused?!: "{a}" is not defined in vars.\n') + if 'varnames' in block: + nout.extend(block['varnames']) + if not as_interface: + for a in list(vars.keys()): + if a not in nout: + nout.append(a) + for a in nout: + if 'depend' in vars[a]: + for d in vars[a]['depend']: + if d in vars and 'depend' in vars[d] and a in vars[d]['depend']: + errmess( + f'vars2fortran: Warning: cross-dependence between variables "{a}" and "{d}\"\n') + if 'externals' in block and a in block['externals']: + if isintent_callback(vars[a]): + ret = f'{ret}{tab}intent(callback) {a}' + ret = f'{ret}{tab}external {a}' + if isoptional(vars[a]): + ret = f'{ret}{tab}optional {a}' + if a in vars and 'typespec' not in vars[a]: + continue + cont = 1 + for b in block['body']: + if a == b['name'] and b['block'] == 'function': + cont = 0 + break + if cont: + continue + if a not in vars: + show(vars) + outmess(f'vars2fortran: No definition for argument "{a}".\n') + continue + if a == block['name']: + if block['block'] != 'function' or block.get('result'): + # 1) skip declaring a variable that name matches with + # subroutine name + # 2) skip declaring function when its type is + # declared via `result` construction + continue + if 'typespec' not in vars[a]: + if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']: + if a in args: + ret = f'{ret}{tab}external {a}' + continue + show(vars[a]) + outmess(f'vars2fortran: No typespec for argument "{a}".\n') + continue + vardef = vars[a]['typespec'] + if vardef == 'type' and 'typename' in vars[a]: + vardef = f"{vardef}({vars[a]['typename']})" + selector = {} + if 'kindselector' in vars[a]: + selector = vars[a]['kindselector'] + elif 'charselector' in vars[a]: + selector = vars[a]['charselector'] + if '*' in selector: + if selector['*'] in ['*', ':']: + vardef = f"{vardef}*({selector['*']})" + else: + vardef = f"{vardef}*{selector['*']}" + elif 'len' in selector: + vardef = f"{vardef}(len={selector['len']}" + if 'kind' in selector: + vardef = f"{vardef},kind={selector['kind']})" + else: + vardef = f'{vardef})' + elif 'kind' in selector: + vardef = f"{vardef}(kind={selector['kind']})" + c = ' ' + if 'attrspec' in vars[a]: + attr = [l for l in vars[a]['attrspec'] + if l not in ['external']] + if as_interface and 'intent(in)' in attr and 'intent(out)' in attr: + # In Fortran, intent(in, out) are conflicting while + # intent(in, out) can be specified only via + # `!f2py intent(out) ..`. + # So, for the Fortran interface, we'll drop + # intent(out) to resolve the conflict. + attr.remove('intent(out)') + if attr: + vardef = f"{vardef}, {','.join(attr)}" + c = ',' + if 'dimension' in vars[a]: + vardef = f"{vardef}{c}dimension({','.join(vars[a]['dimension'])})" + c = ',' + if 'intent' in vars[a]: + lst = true_intent_list(vars[a]) + if lst: + vardef = f"{vardef}{c}intent({','.join(lst)})" + c = ',' + if 'check' in vars[a]: + vardef = f"{vardef}{c}check({','.join(vars[a]['check'])})" + c = ',' + if 'depend' in vars[a]: + vardef = f"{vardef}{c}depend({','.join(vars[a]['depend'])})" + c = ',' + if '=' in vars[a]: + v = vars[a]['='] + if vars[a]['typespec'] in ['complex', 'double complex']: + try: + v = eval(v) + v = f'({v.real},{v.imag})' + except Exception: + pass + vardef = f'{vardef} :: {a}={v}' + else: + vardef = f'{vardef} :: {a}' + ret = f'{ret}{tab}{vardef}' + return ret +###### + + +# We expose post_processing_hooks as global variable so that +# user-libraries could register their own hooks to f2py. +post_processing_hooks = [] + + +def crackfortran(files): + global usermodules, post_processing_hooks + + outmess('Reading fortran codes...\n', 0) + readfortrancode(files, crackline) + outmess('Post-processing...\n', 0) + usermodules = [] + postlist = postcrack(grouplist[0]) + outmess('Applying post-processing hooks...\n', 0) + for hook in post_processing_hooks: + outmess(f' {hook.__name__}\n', 0) + postlist = traverse(postlist, hook) + outmess('Post-processing (stage 2)...\n', 0) + postlist = postcrack2(postlist) + return usermodules + postlist + + +def crack2fortran(block): + global f2py_version + + pyf = crack2fortrangen(block) + '\n' + header = """! -*- f90 -*- +! Note: the context of this file is case sensitive. +""" + footer = """ +! This file was auto-generated with f2py (version:%s). +! See: +! https://web.archive.org/web/20140822061353/http://cens.ioc.ee/projects/f2py2e +""" % (f2py_version) + return header + pyf + footer + + +def _is_visit_pair(obj): + return (isinstance(obj, tuple) + and len(obj) == 2 + and isinstance(obj[0], (int, str))) + + +def traverse(obj, visit, parents=[], result=None, *args, **kwargs): + '''Traverse f2py data structure with the following visit function: + + def visit(item, parents, result, *args, **kwargs): + """ + + parents is a list of key-"f2py data structure" pairs from which + items are taken from. + + result is a f2py data structure that is filled with the + return value of the visit function. + + item is 2-tuple (index, value) if parents[-1][1] is a list + item is 2-tuple (key, value) if parents[-1][1] is a dict + + The return value of visit must be None, or of the same kind as + item, that is, if parents[-1] is a list, the return value must + be 2-tuple (new_index, new_value), or if parents[-1] is a + dict, the return value must be 2-tuple (new_key, new_value). + + If new_index or new_value is None, the return value of visit + is ignored, that is, it will not be added to the result. + + If the return value is None, the content of obj will be + traversed, otherwise not. + """ + ''' + + if _is_visit_pair(obj): + if obj[0] == 'parent_block': + # avoid infinite recursion + return obj + new_result = visit(obj, parents, result, *args, **kwargs) + if new_result is not None: + assert _is_visit_pair(new_result) + return new_result + parent = obj + result_key, obj = obj + else: + parent = (None, obj) + result_key = None + + if isinstance(obj, list): + new_result = [] + for index, value in enumerate(obj): + new_index, new_item = traverse((index, value), visit, + parents + [parent], result, + *args, **kwargs) + if new_index is not None: + new_result.append(new_item) + elif isinstance(obj, dict): + new_result = {} + for key, value in obj.items(): + new_key, new_value = traverse((key, value), visit, + parents + [parent], result, + *args, **kwargs) + if new_key is not None: + new_result[new_key] = new_value + else: + new_result = obj + + if result_key is None: + return new_result + return result_key, new_result + + +def character_backward_compatibility_hook(item, parents, result, + *args, **kwargs): + """Previously, Fortran character was incorrectly treated as + character*1. This hook fixes the usage of the corresponding + variables in `check`, `dimension`, `=`, and `callstatement` + expressions. + + The usage of `char*` in `callprotoargument` expression can be left + unchanged because C `character` is C typedef of `char`, although, + new implementations should use `character*` in the corresponding + expressions. + + See https://github.com/numpy/numpy/pull/19388 for more information. + + """ + parent_key, parent_value = parents[-1] + key, value = item + + def fix_usage(varname, value): + value = re.sub(r'[*]\s*\b' + varname + r'\b', varname, value) + value = re.sub(r'\b' + varname + r'\b\s*[\[]\s*0\s*[\]]', + varname, value) + return value + + if parent_key in ['dimension', 'check']: + assert parents[-3][0] == 'vars' + vars_dict = parents[-3][1] + elif key == '=': + assert parents[-2][0] == 'vars' + vars_dict = parents[-2][1] + else: + vars_dict = None + + new_value = None + if vars_dict is not None: + new_value = value + for varname, vd in vars_dict.items(): + if ischaracter(vd): + new_value = fix_usage(varname, new_value) + elif key == 'callstatement': + vars_dict = parents[-2][1]['vars'] + new_value = value + for varname, vd in vars_dict.items(): + if ischaracter(vd): + # replace all occurrences of `` with + # `&` in argument passing + new_value = re.sub( + r'(? `{new_value}`\n', 1) + return (key, new_value) + + +post_processing_hooks.append(character_backward_compatibility_hook) + + +if __name__ == "__main__": + files = [] + funcs = [] + f = 1 + f2 = 0 + f3 = 0 + showblocklist = 0 + for l in sys.argv[1:]: + if l == '': + pass + elif l[0] == ':': + f = 0 + elif l == '-quiet': + quiet = 1 + verbose = 0 + elif l == '-verbose': + verbose = 2 + quiet = 0 + elif l == '-fix': + if strictf77: + outmess( + 'Use option -f90 before -fix if Fortran 90 code is in fix form.\n', 0) + skipemptyends = 1 + sourcecodeform = 'fix' + elif l == '-skipemptyends': + skipemptyends = 1 + elif l == '--ignore-contains': + ignorecontains = 1 + elif l == '-f77': + strictf77 = 1 + sourcecodeform = 'fix' + elif l == '-f90': + strictf77 = 0 + sourcecodeform = 'free' + skipemptyends = 1 + elif l == '-h': + f2 = 1 + elif l == '-show': + showblocklist = 1 + elif l == '-m': + f3 = 1 + elif l[0] == '-': + errmess(f'Unknown option {repr(l)}\n') + elif f2: + f2 = 0 + pyffilename = l + elif f3: + f3 = 0 + f77modulename = l + elif f: + try: + open(l).close() + files.append(l) + except OSError as detail: + errmess(f'OSError: {detail!s}\n') + else: + funcs.append(l) + if not strictf77 and f77modulename and not skipemptyends: + outmess("""\ + Warning: You have specified module name for non Fortran 77 code that + should not need one (expect if you are scanning F90 code for non + module blocks but then you should use flag -skipemptyends and also + be sure that the files do not contain programs without program + statement). +""", 0) + + postlist = crackfortran(files) + if pyffilename: + outmess(f'Writing fortran code to file {repr(pyffilename)}\n', 0) + pyf = crack2fortran(postlist) + with open(pyffilename, 'w') as f: + f.write(pyf) + if showblocklist: + show(postlist) diff --git a/python/numpy/f2py/crackfortran.pyi b/python/numpy/f2py/crackfortran.pyi new file mode 100644 index 000000000..6b08f8784 --- /dev/null +++ b/python/numpy/f2py/crackfortran.pyi @@ -0,0 +1,258 @@ +import re +from collections.abc import Callable, Iterable, Mapping +from typing import IO, Any, Concatenate, Final, Never, ParamSpec, TypeAlias, overload +from typing import Literal as L + +from _typeshed import StrOrBytesPath, StrPath + +from .__version__ import version +from .auxfuncs import isintent_dict as isintent_dict + +### + +_Tss = ParamSpec("_Tss") + +_VisitResult: TypeAlias = list[Any] | dict[str, Any] | None +_VisitItem: TypeAlias = tuple[str | None, _VisitResult] +_VisitFunc: TypeAlias = Callable[Concatenate[_VisitItem, list[_VisitItem], _VisitResult, _Tss], _VisitItem | None] + +### + +COMMON_FREE_EXTENSIONS: Final[list[str]] = ... +COMMON_FIXED_EXTENSIONS: Final[list[str]] = ... + +f2py_version: Final = version +tabchar: Final[str] = " " + +f77modulename: str +pyffilename: str +sourcecodeform: L["fix", "gree"] +strictf77: L[0, 1] +quiet: L[0, 1] +verbose: L[0, 1, 2] +skipemptyends: L[0, 1] +ignorecontains: L[1] +dolowercase: L[1] + +beginpattern: str | re.Pattern[str] +currentfilename: str +filepositiontext: str +expectbegin: L[0, 1] +gotnextfile: L[0, 1] +neededmodule: int +skipblocksuntil: int +groupcounter: int +groupname: dict[int, str] | str +groupcache: dict[int, dict[str, Any]] | None +grouplist: dict[int, list[dict[str, Any]]] | None +previous_context: tuple[str, str, int] | None + +f90modulevars: dict[str, dict[str, Any]] = {} +debug: list[Never] = [] +include_paths: list[str] = [] +onlyfuncs: list[str] = [] +skipfuncs: list[str] = [] +skipfunctions: Final[list[str]] = [] +usermodules: Final[list[dict[str, Any]]] = [] + +defaultimplicitrules: Final[dict[str, dict[str, str]]] = {} +badnames: Final[dict[str, str]] = {} +invbadnames: Final[dict[str, str]] = {} + +beforethisafter: Final[str] = ... +fortrantypes: Final[str] = ... +groupbegins77: Final[str] = ... +groupbegins90: Final[str] = ... +groupends: Final[str] = ... +endifs: Final[str] = ... +moduleprocedures: Final[str] = ... + +beginpattern77: Final[tuple[re.Pattern[str], L["begin"]]] = ... +beginpattern90: Final[tuple[re.Pattern[str], L["begin"]]] = ... +callpattern: Final[tuple[re.Pattern[str], L["call"]]] = ... +callfunpattern: Final[tuple[re.Pattern[str], L["callfun"]]] = ... +commonpattern: Final[tuple[re.Pattern[str], L["common"]]] = ... +containspattern: Final[tuple[re.Pattern[str], L["contains"]]] = ... +datapattern: Final[tuple[re.Pattern[str], L["data"]]] = ... +dimensionpattern: Final[tuple[re.Pattern[str], L["dimension"]]] = ... +endifpattern: Final[tuple[re.Pattern[str], L["endif"]]] = ... +endpattern: Final[tuple[re.Pattern[str], L["end"]]] = ... +entrypattern: Final[tuple[re.Pattern[str], L["entry"]]] = ... +externalpattern: Final[tuple[re.Pattern[str], L["external"]]] = ... +f2pyenhancementspattern: Final[tuple[re.Pattern[str], L["f2pyenhancements"]]] = ... +formatpattern: Final[tuple[re.Pattern[str], L["format"]]] = ... +functionpattern: Final[tuple[re.Pattern[str], L["begin"]]] = ... +implicitpattern: Final[tuple[re.Pattern[str], L["implicit"]]] = ... +intentpattern: Final[tuple[re.Pattern[str], L["intent"]]] = ... +intrinsicpattern: Final[tuple[re.Pattern[str], L["intrinsic"]]] = ... +optionalpattern: Final[tuple[re.Pattern[str], L["optional"]]] = ... +moduleprocedurepattern: Final[tuple[re.Pattern[str], L["moduleprocedure"]]] = ... +multilinepattern: Final[tuple[re.Pattern[str], L["multiline"]]] = ... +parameterpattern: Final[tuple[re.Pattern[str], L["parameter"]]] = ... +privatepattern: Final[tuple[re.Pattern[str], L["private"]]] = ... +publicpattern: Final[tuple[re.Pattern[str], L["public"]]] = ... +requiredpattern: Final[tuple[re.Pattern[str], L["required"]]] = ... +subroutinepattern: Final[tuple[re.Pattern[str], L["begin"]]] = ... +typespattern: Final[tuple[re.Pattern[str], L["type"]]] = ... +usepattern: Final[tuple[re.Pattern[str], L["use"]]] = ... + +analyzeargs_re_1: Final[re.Pattern[str]] = ... +callnameargspattern: Final[re.Pattern[str]] = ... +charselector: Final[re.Pattern[str]] = ... +crackline_bind_1: Final[re.Pattern[str]] = ... +crackline_bindlang: Final[re.Pattern[str]] = ... +crackline_re_1: Final[re.Pattern[str]] = ... +determineexprtype_re_1: Final[re.Pattern[str]] = ... +determineexprtype_re_2: Final[re.Pattern[str]] = ... +determineexprtype_re_3: Final[re.Pattern[str]] = ... +determineexprtype_re_4: Final[re.Pattern[str]] = ... +determineexprtype_re_5: Final[re.Pattern[str]] = ... +getlincoef_re_1: Final[re.Pattern[str]] = ... +kindselector: Final[re.Pattern[str]] = ... +lenarraypattern: Final[re.Pattern[str]] = ... +lenkindpattern: Final[re.Pattern[str]] = ... +namepattern: Final[re.Pattern[str]] = ... +nameargspattern: Final[re.Pattern[str]] = ... +operatorpattern: Final[re.Pattern[str]] = ... +real16pattern: Final[re.Pattern[str]] = ... +real8pattern: Final[re.Pattern[str]] = ... +selectpattern: Final[re.Pattern[str]] = ... +typedefpattern: Final[re.Pattern[str]] = ... +typespattern4implicit: Final[re.Pattern[str]] = ... +word_pattern: Final[re.Pattern[str]] = ... + +post_processing_hooks: Final[list[_VisitFunc[...]]] = [] + +# +def outmess(line: str, flag: int = 1) -> None: ... +def reset_global_f2py_vars() -> None: ... + +# +def rmbadname1(name: str) -> str: ... +def undo_rmbadname1(name: str) -> str: ... +def rmbadname(names: Iterable[str]) -> list[str]: ... +def undo_rmbadname(names: Iterable[str]) -> list[str]: ... + +# +def openhook(filename: StrPath, mode: str) -> IO[Any]: ... +def is_free_format(fname: StrPath) -> bool: ... +def readfortrancode( + ffile: StrOrBytesPath | Iterable[StrOrBytesPath], + dowithline: Callable[[str, int], object] = ..., + istop: int = 1, +) -> None: ... + +# +def split_by_unquoted(line: str, characters: str) -> tuple[str, str]: ... + +# +def crackline(line: str, reset: int = 0) -> None: ... +def markouterparen(line: str) -> str: ... +def markoutercomma(line: str, comma: str = ",") -> str: ... +def unmarkouterparen(line: str) -> str: ... +def appenddecl(decl: Mapping[str, object] | None, decl2: Mapping[str, object] | None, force: int = 1) -> dict[str, Any]: ... + +# +def parse_name_for_bind(line: str) -> tuple[str, str | None]: ... +def analyzeline(m: re.Match[str], case: str, line: str) -> None: ... +def appendmultiline(group: dict[str, Any], context_name: str, ml: str) -> None: ... +def cracktypespec0(typespec: str, ll: str | None) -> tuple[str, str | None, str | None, str | None]: ... + +# +def removespaces(expr: str) -> str: ... +def markinnerspaces(line: str) -> str: ... +def updatevars(typespec: str, selector: str | None, attrspec: str, entitydecl: str) -> str: ... +def cracktypespec(typespec: str, selector: str | None) -> tuple[dict[str, str] | None, dict[str, str] | None, str | None]: ... + +# +def setattrspec(decl: dict[str, list[str]], attr: str | None, force: int = 0) -> dict[str, list[str]]: ... +def setkindselector(decl: dict[str, dict[str, str]], sel: dict[str, str], force: int = 0) -> dict[str, dict[str, str]]: ... +def setcharselector(decl: dict[str, dict[str, str]], sel: dict[str, str], force: int = 0) -> dict[str, dict[str, str]]: ... +def getblockname(block: Mapping[str, object], unknown: str = "unknown") -> str: ... +def setmesstext(block: Mapping[str, object]) -> None: ... +def get_usedict(block: Mapping[str, object]) -> dict[str, str]: ... +def get_useparameters(block: Mapping[str, object], param_map: Mapping[str, str] | None = None) -> dict[str, str]: ... + +# +@overload +def postcrack2( + block: dict[str, Any], + tab: str = "", + param_map: Mapping[str, str] | None = None, +) -> dict[str, str | Any]: ... +@overload +def postcrack2( + block: list[dict[str, Any]], + tab: str = "", + param_map: Mapping[str, str] | None = None, +) -> list[dict[str, str | Any]]: ... + +# +@overload +def postcrack(block: dict[str, Any], args: Mapping[str, str] | None = None, tab: str = "") -> dict[str, Any]: ... +@overload +def postcrack(block: list[dict[str, str]], args: Mapping[str, str] | None = None, tab: str = "") -> list[dict[str, Any]]: ... + +# +def sortvarnames(vars: Mapping[str, object]) -> list[str]: ... +def analyzecommon(block: Mapping[str, object]) -> dict[str, Any]: ... +def analyzebody(block: Mapping[str, object], args: Mapping[str, str], tab: str = "") -> list[dict[str, Any]]: ... +def buildimplicitrules(block: Mapping[str, object]) -> tuple[dict[str, dict[str, str]], dict[str, str]]: ... +def myeval(e: str, g: object | None = None, l: object | None = None) -> float: ... + +# +def getlincoef(e: str, xset: set[str]) -> tuple[float | None, float | None, str | None]: ... + +# +def get_sorted_names(vars: Mapping[str, Mapping[str, str]]) -> list[str]: ... +def get_parameters(vars: Mapping[str, Mapping[str, str]], global_params: dict[str, str] = {}) -> dict[str, str]: ... + +# +def analyzevars(block: Mapping[str, Any]) -> dict[str, dict[str, str]]: ... + +# +def param_eval(v: str, g_params: dict[str, Any], params: Mapping[str, object], dimspec: str | None = None) -> dict[str, Any]: ... +def param_parse(d: str, params: Mapping[str, str]) -> str: ... +def expr2name(a: str, block: Mapping[str, object], args: list[str] = []) -> str: ... +def analyzeargs(block: Mapping[str, object]) -> dict[str, Any]: ... + +# +def determineexprtype(expr: str, vars: Mapping[str, object], rules: dict[str, Any] = {}) -> dict[str, Any]: ... +def crack2fortrangen(block: Mapping[str, object], tab: str = "\n", as_interface: bool = False) -> str: ... +def common2fortran(common: Mapping[str, object], tab: str = "") -> str: ... +def use2fortran(use: Mapping[str, object], tab: str = "") -> str: ... +def true_intent_list(var: dict[str, list[str]]) -> list[str]: ... +def vars2fortran( + block: Mapping[str, Mapping[str, object]], + vars: Mapping[str, object], + args: Mapping[str, str], + tab: str = "", + as_interface: bool = False, +) -> str: ... + +# +def crackfortran(files: StrOrBytesPath | Iterable[StrOrBytesPath]) -> list[dict[str, Any]]: ... +def crack2fortran(block: Mapping[str, Any]) -> str: ... + +# +def traverse( + obj: tuple[str | None, _VisitResult], + visit: _VisitFunc[_Tss], + parents: list[tuple[str | None, _VisitResult]] = [], + result: list[Any] | dict[str, Any] | None = None, + *args: _Tss.args, + **kwargs: _Tss.kwargs, +) -> _VisitItem | _VisitResult: ... + +# +def character_backward_compatibility_hook( + item: _VisitItem, + parents: list[_VisitItem], + result: object, # ignored + *args: object, # ignored + **kwargs: object, # ignored +) -> _VisitItem | None: ... + +# namespace pollution +c: str +n: str diff --git a/python/numpy/f2py/diagnose.py b/python/numpy/f2py/diagnose.py new file mode 100644 index 000000000..7eb1697cc --- /dev/null +++ b/python/numpy/f2py/diagnose.py @@ -0,0 +1,149 @@ +#!/usr/bin/env python3 +import os +import sys +import tempfile + + +def run(): + _path = os.getcwd() + os.chdir(tempfile.gettempdir()) + print('------') + print(f'os.name={os.name!r}') + print('------') + print(f'sys.platform={sys.platform!r}') + print('------') + print('sys.version:') + print(sys.version) + print('------') + print('sys.prefix:') + print(sys.prefix) + print('------') + print(f"sys.path={':'.join(sys.path)!r}") + print('------') + + try: + import numpy + has_newnumpy = 1 + except ImportError as e: + print('Failed to import new numpy:', e) + has_newnumpy = 0 + + try: + from numpy.f2py import f2py2e + has_f2py2e = 1 + except ImportError as e: + print('Failed to import f2py2e:', e) + has_f2py2e = 0 + + try: + import numpy.distutils + has_numpy_distutils = 2 + except ImportError: + try: + import numpy_distutils + has_numpy_distutils = 1 + except ImportError as e: + print('Failed to import numpy_distutils:', e) + has_numpy_distutils = 0 + + if has_newnumpy: + try: + print(f'Found new numpy version {numpy.__version__!r} in {numpy.__file__}') + except Exception as msg: + print('error:', msg) + print('------') + + if has_f2py2e: + try: + print('Found f2py2e version %r in %s' % + (f2py2e.__version__.version, f2py2e.__file__)) + except Exception as msg: + print('error:', msg) + print('------') + + if has_numpy_distutils: + try: + if has_numpy_distutils == 2: + print('Found numpy.distutils version %r in %r' % ( + numpy.distutils.__version__, + numpy.distutils.__file__)) + else: + print('Found numpy_distutils version %r in %r' % ( + numpy_distutils.numpy_distutils_version.numpy_distutils_version, + numpy_distutils.__file__)) + print('------') + except Exception as msg: + print('error:', msg) + print('------') + try: + if has_numpy_distutils == 1: + print( + 'Importing numpy_distutils.command.build_flib ...', end=' ') + import numpy_distutils.command.build_flib as build_flib + print('ok') + print('------') + try: + print( + 'Checking availability of supported Fortran compilers:') + for compiler_class in build_flib.all_compilers: + compiler_class(verbose=1).is_available() + print('------') + except Exception as msg: + print('error:', msg) + print('------') + except Exception as msg: + print( + 'error:', msg, '(ignore it, build_flib is obsolete for numpy.distutils 0.2.2 and up)') + print('------') + try: + if has_numpy_distutils == 2: + print('Importing numpy.distutils.fcompiler ...', end=' ') + import numpy.distutils.fcompiler as fcompiler + else: + print('Importing numpy_distutils.fcompiler ...', end=' ') + import numpy_distutils.fcompiler as fcompiler + print('ok') + print('------') + try: + print('Checking availability of supported Fortran compilers:') + fcompiler.show_fcompilers() + print('------') + except Exception as msg: + print('error:', msg) + print('------') + except Exception as msg: + print('error:', msg) + print('------') + try: + if has_numpy_distutils == 2: + print('Importing numpy.distutils.cpuinfo ...', end=' ') + from numpy.distutils.cpuinfo import cpuinfo + print('ok') + print('------') + else: + try: + print( + 'Importing numpy_distutils.command.cpuinfo ...', end=' ') + from numpy_distutils.command.cpuinfo import cpuinfo + print('ok') + print('------') + except Exception as msg: + print('error:', msg, '(ignore it)') + print('Importing numpy_distutils.cpuinfo ...', end=' ') + from numpy_distutils.cpuinfo import cpuinfo + print('ok') + print('------') + cpu = cpuinfo() + print('CPU information:', end=' ') + for name in dir(cpuinfo): + if name[0] == '_' and name[1] != '_' and getattr(cpu, name[1:])(): + print(name[1:], end=' ') + print('------') + except Exception as msg: + print('error:', msg) + print('------') + os.chdir(_path) + + +if __name__ == "__main__": + run() diff --git a/python/numpy/f2py/diagnose.pyi b/python/numpy/f2py/diagnose.pyi new file mode 100644 index 000000000..b88194ac6 --- /dev/null +++ b/python/numpy/f2py/diagnose.pyi @@ -0,0 +1 @@ +def run() -> None: ... diff --git a/python/numpy/f2py/f2py2e.py b/python/numpy/f2py/f2py2e.py new file mode 100644 index 000000000..459299f8e --- /dev/null +++ b/python/numpy/f2py/f2py2e.py @@ -0,0 +1,786 @@ +""" + +f2py2e - Fortran to Python C/API generator. 2nd Edition. + See __usage__ below. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +import argparse +import os +import pprint +import re +import sys + +from numpy.f2py._backends import f2py_build_generator + +from . import ( + __version__, + auxfuncs, + capi_maps, + cb_rules, + cfuncs, + crackfortran, + f90mod_rules, + rules, +) +from .cfuncs import errmess + +f2py_version = __version__.version +numpy_version = __version__.version + +# outmess=sys.stdout.write +show = pprint.pprint +outmess = auxfuncs.outmess +MESON_ONLY_VER = (sys.version_info >= (3, 12)) + +__usage__ =\ +f"""Usage: + +1) To construct extension module sources: + + f2py [] [[[only:]||[skip:]] \\ + ] \\ + [: ...] + +2) To compile fortran files and build extension modules: + + f2py -c [, , ] + +3) To generate signature files: + + f2py -h ...< same options as in (1) > + +Description: This program generates a Python C/API file (module.c) + that contains wrappers for given fortran functions so that they + can be called from Python. With the -c option the corresponding + extension modules are built. + +Options: + + -h Write signatures of the fortran routines to file + and exit. You can then edit and use it instead + of . If ==stdout then the + signatures are printed to stdout. + Names of fortran routines for which Python C/API + functions will be generated. Default is all that are found + in . + Paths to fortran/signature files that will be scanned for + in order to determine their signatures. + skip: Ignore fortran functions that follow until `:'. + only: Use only fortran functions that follow until `:'. + : Get back to mode. + + -m Name of the module; f2py generates a Python/C API + file module.c or extension module . + Default is 'untitled'. + + '-include

' Writes additional headers in the C wrapper, can be passed + multiple times, generates #include
each time. + + --[no-]lower Do [not] lower the cases in . By default, + --lower is assumed with -h key, and --no-lower without -h key. + + --build-dir All f2py generated files are created in . + Default is tempfile.mkdtemp(). + + --overwrite-signature Overwrite existing signature file. + + --[no-]latex-doc Create (or not) module.tex. + Default is --no-latex-doc. + --short-latex Create 'incomplete' LaTeX document (without commands + \\documentclass, \\tableofcontents, and \\begin{{document}}, + \\end{{document}}). + + --[no-]rest-doc Create (or not) module.rst. + Default is --no-rest-doc. + + --debug-capi Create C/API code that reports the state of the wrappers + during runtime. Useful for debugging. + + --[no-]wrap-functions Create Fortran subroutine wrappers to Fortran 77 + functions. --wrap-functions is default because it ensures + maximum portability/compiler independence. + + --[no-]freethreading-compatible Create a module that declares it does or + doesn't require the GIL. The default is + --freethreading-compatible for backward + compatibility. Inspect the Fortran code you are wrapping for + thread safety issues before passing + --no-freethreading-compatible, as f2py does not analyze + fortran code for thread safety issues. + + --include-paths ::... Search include files from the given + directories. + + --help-link [..] List system resources found by system_info.py. See also + --link- switch below. [..] is optional list + of resources names. E.g. try 'f2py --help-link lapack_opt'. + + --f2cmap Load Fortran-to-Python KIND specification from the given + file. Default: .f2py_f2cmap in current directory. + + --quiet Run quietly. + --verbose Run with extra verbosity. + --skip-empty-wrappers Only generate wrapper files when needed. + -v Print f2py version ID and exit. + + +build backend options (only effective with -c) +[NO_MESON] is used to indicate an option not meant to be used +with the meson backend or above Python 3.12: + + --fcompiler= Specify Fortran compiler type by vendor [NO_MESON] + --compiler= Specify distutils C compiler type [NO_MESON] + + --help-fcompiler List available Fortran compilers and exit [NO_MESON] + --f77exec= Specify the path to F77 compiler [NO_MESON] + --f90exec= Specify the path to F90 compiler [NO_MESON] + --f77flags= Specify F77 compiler flags + --f90flags= Specify F90 compiler flags + --opt= Specify optimization flags [NO_MESON] + --arch= Specify architecture specific optimization flags [NO_MESON] + --noopt Compile without optimization [NO_MESON] + --noarch Compile without arch-dependent optimization [NO_MESON] + --debug Compile with debugging information + + --dep + Specify a meson dependency for the module. This may + be passed multiple times for multiple dependencies. + Dependencies are stored in a list for further processing. + + Example: --dep lapack --dep scalapack + This will identify "lapack" and "scalapack" as dependencies + and remove them from argv, leaving a dependencies list + containing ["lapack", "scalapack"]. + + --backend + Specify the build backend for the compilation process. + The supported backends are 'meson' and 'distutils'. + If not specified, defaults to 'distutils'. On + Python 3.12 or higher, the default is 'meson'. + +Extra options (only effective with -c): + + --link- Link extension module with as defined + by numpy.distutils/system_info.py. E.g. to link + with optimized LAPACK libraries (vecLib on MacOSX, + ATLAS elsewhere), use --link-lapack_opt. + See also --help-link switch. [NO_MESON] + + -L/path/to/lib/ -l + -D -U + -I/path/to/include/ + .o .so .a + + Using the following macros may be required with non-gcc Fortran + compilers: + -DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN + + When using -DF2PY_REPORT_ATEXIT, a performance report of F2PY + interface is printed out at exit (platforms: Linux). + + When using -DF2PY_REPORT_ON_ARRAY_COPY=, a message is + sent to stderr whenever F2PY interface makes a copy of an + array. Integer sets the threshold for array sizes when + a message should be shown. + +Version: {f2py_version} +numpy Version: {numpy_version} +License: NumPy license (see LICENSE.txt in the NumPy source code) +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +https://numpy.org/doc/stable/f2py/index.html\n""" + + +def scaninputline(inputline): + files, skipfuncs, onlyfuncs, debug = [], [], [], [] + f, f2, f3, f5, f6, f8, f9, f10 = 1, 0, 0, 0, 0, 0, 0, 0 + verbose = 1 + emptygen = True + dolc = -1 + dolatexdoc = 0 + dorestdoc = 0 + wrapfuncs = 1 + buildpath = '.' + include_paths, freethreading_compatible, inputline = get_newer_options(inputline) + signsfile, modulename = None, None + options = {'buildpath': buildpath, + 'coutput': None, + 'f2py_wrapper_output': None} + for l in inputline: + if l == '': + pass + elif l == 'only:': + f = 0 + elif l == 'skip:': + f = -1 + elif l == ':': + f = 1 + elif l[:8] == '--debug-': + debug.append(l[8:]) + elif l == '--lower': + dolc = 1 + elif l == '--build-dir': + f6 = 1 + elif l == '--no-lower': + dolc = 0 + elif l == '--quiet': + verbose = 0 + elif l == '--verbose': + verbose += 1 + elif l == '--latex-doc': + dolatexdoc = 1 + elif l == '--no-latex-doc': + dolatexdoc = 0 + elif l == '--rest-doc': + dorestdoc = 1 + elif l == '--no-rest-doc': + dorestdoc = 0 + elif l == '--wrap-functions': + wrapfuncs = 1 + elif l == '--no-wrap-functions': + wrapfuncs = 0 + elif l == '--short-latex': + options['shortlatex'] = 1 + elif l == '--coutput': + f8 = 1 + elif l == '--f2py-wrapper-output': + f9 = 1 + elif l == '--f2cmap': + f10 = 1 + elif l == '--overwrite-signature': + options['h-overwrite'] = 1 + elif l == '-h': + f2 = 1 + elif l == '-m': + f3 = 1 + elif l[:2] == '-v': + print(f2py_version) + sys.exit() + elif l == '--show-compilers': + f5 = 1 + elif l[:8] == '-include': + cfuncs.outneeds['userincludes'].append(l[9:-1]) + cfuncs.userincludes[l[9:-1]] = '#include ' + l[8:] + elif l == '--skip-empty-wrappers': + emptygen = False + elif l[0] == '-': + errmess(f'Unknown option {repr(l)}\n') + sys.exit() + elif f2: + f2 = 0 + signsfile = l + elif f3: + f3 = 0 + modulename = l + elif f6: + f6 = 0 + buildpath = l + elif f8: + f8 = 0 + options["coutput"] = l + elif f9: + f9 = 0 + options["f2py_wrapper_output"] = l + elif f10: + f10 = 0 + options["f2cmap_file"] = l + elif f == 1: + try: + with open(l): + pass + files.append(l) + except OSError as detail: + errmess(f'OSError: {detail!s}. Skipping file "{l!s}".\n') + elif f == -1: + skipfuncs.append(l) + elif f == 0: + onlyfuncs.append(l) + if not f5 and not files and not modulename: + print(__usage__) + sys.exit() + if not os.path.isdir(buildpath): + if not verbose: + outmess(f'Creating build directory {buildpath}\n') + os.mkdir(buildpath) + if signsfile: + signsfile = os.path.join(buildpath, signsfile) + if signsfile and os.path.isfile(signsfile) and 'h-overwrite' not in options: + errmess( + f'Signature file "{signsfile}" exists!!! Use --overwrite-signature to overwrite.\n') + sys.exit() + + options['emptygen'] = emptygen + options['debug'] = debug + options['verbose'] = verbose + if dolc == -1 and not signsfile: + options['do-lower'] = 0 + else: + options['do-lower'] = dolc + if modulename: + options['module'] = modulename + if signsfile: + options['signsfile'] = signsfile + if onlyfuncs: + options['onlyfuncs'] = onlyfuncs + if skipfuncs: + options['skipfuncs'] = skipfuncs + options['dolatexdoc'] = dolatexdoc + options['dorestdoc'] = dorestdoc + options['wrapfuncs'] = wrapfuncs + options['buildpath'] = buildpath + options['include_paths'] = include_paths + options['requires_gil'] = not freethreading_compatible + options.setdefault('f2cmap_file', None) + return files, options + + +def callcrackfortran(files, options): + rules.options = options + crackfortran.debug = options['debug'] + crackfortran.verbose = options['verbose'] + if 'module' in options: + crackfortran.f77modulename = options['module'] + if 'skipfuncs' in options: + crackfortran.skipfuncs = options['skipfuncs'] + if 'onlyfuncs' in options: + crackfortran.onlyfuncs = options['onlyfuncs'] + crackfortran.include_paths[:] = options['include_paths'] + crackfortran.dolowercase = options['do-lower'] + postlist = crackfortran.crackfortran(files) + if 'signsfile' in options: + outmess(f"Saving signatures to file \"{options['signsfile']}\"\n") + pyf = crackfortran.crack2fortran(postlist) + if options['signsfile'][-6:] == 'stdout': + sys.stdout.write(pyf) + else: + with open(options['signsfile'], 'w') as f: + f.write(pyf) + if options["coutput"] is None: + for mod in postlist: + mod["coutput"] = f"{mod['name']}module.c" + else: + for mod in postlist: + mod["coutput"] = options["coutput"] + if options["f2py_wrapper_output"] is None: + for mod in postlist: + mod["f2py_wrapper_output"] = f"{mod['name']}-f2pywrappers.f" + else: + for mod in postlist: + mod["f2py_wrapper_output"] = options["f2py_wrapper_output"] + for mod in postlist: + if options["requires_gil"]: + mod['gil_used'] = 'Py_MOD_GIL_USED' + else: + mod['gil_used'] = 'Py_MOD_GIL_NOT_USED' + return postlist + + +def buildmodules(lst): + cfuncs.buildcfuncs() + outmess('Building modules...\n') + modules, mnames, isusedby = [], [], {} + for item in lst: + if '__user__' in item['name']: + cb_rules.buildcallbacks(item) + else: + if 'use' in item: + for u in item['use'].keys(): + if u not in isusedby: + isusedby[u] = [] + isusedby[u].append(item['name']) + modules.append(item) + mnames.append(item['name']) + ret = {} + for module, name in zip(modules, mnames): + if name in isusedby: + outmess('\tSkipping module "%s" which is used by %s.\n' % ( + name, ','.join('"%s"' % s for s in isusedby[name]))) + else: + um = [] + if 'use' in module: + for u in module['use'].keys(): + if u in isusedby and u in mnames: + um.append(modules[mnames.index(u)]) + else: + outmess( + f'\tModule "{name}" uses nonexisting "{u}" ' + 'which will be ignored.\n') + ret[name] = {} + dict_append(ret[name], rules.buildmodule(module, um)) + return ret + + +def dict_append(d_out, d_in): + for (k, v) in d_in.items(): + if k not in d_out: + d_out[k] = [] + if isinstance(v, list): + d_out[k] = d_out[k] + v + else: + d_out[k].append(v) + + +def run_main(comline_list): + """ + Equivalent to running:: + + f2py + + where ``=string.join(,' ')``, but in Python. Unless + ``-h`` is used, this function returns a dictionary containing + information on generated modules and their dependencies on source + files. + + You cannot build extension modules with this function, that is, + using ``-c`` is not allowed. Use the ``compile`` command instead. + + Examples + -------- + The command ``f2py -m scalar scalar.f`` can be executed from Python as + follows. + + .. literalinclude:: ../../source/f2py/code/results/run_main_session.dat + :language: python + + """ + crackfortran.reset_global_f2py_vars() + f2pydir = os.path.dirname(os.path.abspath(cfuncs.__file__)) + fobjhsrc = os.path.join(f2pydir, 'src', 'fortranobject.h') + fobjcsrc = os.path.join(f2pydir, 'src', 'fortranobject.c') + # gh-22819 -- begin + parser = make_f2py_compile_parser() + args, comline_list = parser.parse_known_args(comline_list) + pyf_files, _ = filter_files("", "[.]pyf([.]src|)", comline_list) + # Checks that no existing modulename is defined in a pyf file + # TODO: Remove all this when scaninputline is replaced + if args.module_name: + if "-h" in comline_list: + modname = ( + args.module_name + ) # Directly use from args when -h is present + else: + modname = validate_modulename( + pyf_files, args.module_name + ) # Validate modname when -h is not present + comline_list += ['-m', modname] # needed for the rest of scaninputline + # gh-22819 -- end + files, options = scaninputline(comline_list) + auxfuncs.options = options + capi_maps.load_f2cmap_file(options['f2cmap_file']) + postlist = callcrackfortran(files, options) + isusedby = {} + for plist in postlist: + if 'use' in plist: + for u in plist['use'].keys(): + if u not in isusedby: + isusedby[u] = [] + isusedby[u].append(plist['name']) + for plist in postlist: + module_name = plist['name'] + if plist['block'] == 'python module' and '__user__' in module_name: + if module_name in isusedby: + # if not quiet: + usedby = ','.join(f'"{s}"' for s in isusedby[module_name]) + outmess( + f'Skipping Makefile build for module "{module_name}" ' + f'which is used by {usedby}\n') + if 'signsfile' in options: + if options['verbose'] > 1: + outmess( + 'Stopping. Edit the signature file and then run f2py on the signature file: ') + outmess(f"{os.path.basename(sys.argv[0])} {options['signsfile']}\n") + return + for plist in postlist: + if plist['block'] != 'python module': + if 'python module' not in options: + errmess( + 'Tip: If your original code is Fortran source then you must use -m option.\n') + raise TypeError('All blocks must be python module blocks but got %s' % ( + repr(plist['block']))) + auxfuncs.debugoptions = options['debug'] + f90mod_rules.options = options + auxfuncs.wrapfuncs = options['wrapfuncs'] + + ret = buildmodules(postlist) + + for mn in ret.keys(): + dict_append(ret[mn], {'csrc': fobjcsrc, 'h': fobjhsrc}) + return ret + + +def filter_files(prefix, suffix, files, remove_prefix=None): + """ + Filter files by prefix and suffix. + """ + filtered, rest = [], [] + match = re.compile(prefix + r'.*' + suffix + r'\Z').match + if remove_prefix: + ind = len(prefix) + else: + ind = 0 + for file in [x.strip() for x in files]: + if match(file): + filtered.append(file[ind:]) + else: + rest.append(file) + return filtered, rest + + +def get_prefix(module): + p = os.path.dirname(os.path.dirname(module.__file__)) + return p + + +class CombineIncludePaths(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + include_paths_set = set(getattr(namespace, 'include_paths', []) or []) + if option_string == "--include_paths": + outmess("Use --include-paths or -I instead of --include_paths which will be removed") + if option_string in {"--include-paths", "--include_paths"}: + include_paths_set.update(values.split(':')) + else: + include_paths_set.add(values) + namespace.include_paths = list(include_paths_set) + +def f2py_parser(): + parser = argparse.ArgumentParser(add_help=False) + parser.add_argument("-I", dest="include_paths", action=CombineIncludePaths) + parser.add_argument("--include-paths", dest="include_paths", action=CombineIncludePaths) + parser.add_argument("--include_paths", dest="include_paths", action=CombineIncludePaths) + parser.add_argument("--freethreading-compatible", dest="ftcompat", action=argparse.BooleanOptionalAction) + return parser + +def get_newer_options(iline): + iline = (' '.join(iline)).split() + parser = f2py_parser() + args, remain = parser.parse_known_args(iline) + ipaths = args.include_paths + if args.include_paths is None: + ipaths = [] + return ipaths, args.ftcompat, remain + +def make_f2py_compile_parser(): + parser = argparse.ArgumentParser(add_help=False) + parser.add_argument("--dep", action="append", dest="dependencies") + parser.add_argument("--backend", choices=['meson', 'distutils'], default='distutils') + parser.add_argument("-m", dest="module_name") + return parser + +def preparse_sysargv(): + # To keep backwards bug compatibility, newer flags are handled by argparse, + # and `sys.argv` is passed to the rest of `f2py` as is. + parser = make_f2py_compile_parser() + + args, remaining_argv = parser.parse_known_args() + sys.argv = [sys.argv[0]] + remaining_argv + + backend_key = args.backend + if MESON_ONLY_VER and backend_key == 'distutils': + outmess("Cannot use distutils backend with Python>=3.12," + " using meson backend instead.\n") + backend_key = "meson" + + return { + "dependencies": args.dependencies or [], + "backend": backend_key, + "modulename": args.module_name, + } + +def run_compile(): + """ + Do it all in one call! + """ + import tempfile + + # Collect dependency flags, preprocess sys.argv + argy = preparse_sysargv() + modulename = argy["modulename"] + if modulename is None: + modulename = 'untitled' + dependencies = argy["dependencies"] + backend_key = argy["backend"] + build_backend = f2py_build_generator(backend_key) + + i = sys.argv.index('-c') + del sys.argv[i] + + remove_build_dir = 0 + try: + i = sys.argv.index('--build-dir') + except ValueError: + i = None + if i is not None: + build_dir = sys.argv[i + 1] + del sys.argv[i + 1] + del sys.argv[i] + else: + remove_build_dir = 1 + build_dir = tempfile.mkdtemp() + + _reg1 = re.compile(r'--link-') + sysinfo_flags = [_m for _m in sys.argv[1:] if _reg1.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in sysinfo_flags] + if sysinfo_flags: + sysinfo_flags = [f[7:] for f in sysinfo_flags] + + _reg2 = re.compile( + r'--((no-|)(wrap-functions|lower|freethreading-compatible)|debug-capi|quiet|skip-empty-wrappers)|-include') + f2py_flags = [_m for _m in sys.argv[1:] if _reg2.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in f2py_flags] + f2py_flags2 = [] + fl = 0 + for a in sys.argv[1:]: + if a in ['only:', 'skip:']: + fl = 1 + elif a == ':': + fl = 0 + if fl or a == ':': + f2py_flags2.append(a) + if f2py_flags2 and f2py_flags2[-1] != ':': + f2py_flags2.append(':') + f2py_flags.extend(f2py_flags2) + sys.argv = [_m for _m in sys.argv if _m not in f2py_flags2] + _reg3 = re.compile( + r'--((f(90)?compiler(-exec|)|compiler)=|help-compiler)') + flib_flags = [_m for _m in sys.argv[1:] if _reg3.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in flib_flags] + # TODO: Once distutils is dropped completely, i.e. min_ver >= 3.12, unify into --fflags + reg_f77_f90_flags = re.compile(r'--f(77|90)flags=') + reg_distutils_flags = re.compile(r'--((f(77|90)exec|opt|arch)=|(debug|noopt|noarch|help-fcompiler))') + fc_flags = [_m for _m in sys.argv[1:] if reg_f77_f90_flags.match(_m)] + distutils_flags = [_m for _m in sys.argv[1:] if reg_distutils_flags.match(_m)] + if not (MESON_ONLY_VER or backend_key == 'meson'): + fc_flags.extend(distutils_flags) + sys.argv = [_m for _m in sys.argv if _m not in (fc_flags + distutils_flags)] + + del_list = [] + for s in flib_flags: + v = '--fcompiler=' + if s[:len(v)] == v: + if MESON_ONLY_VER or backend_key == 'meson': + outmess( + "--fcompiler cannot be used with meson," + "set compiler with the FC environment variable\n" + ) + else: + from numpy.distutils import fcompiler + fcompiler.load_all_fcompiler_classes() + allowed_keys = list(fcompiler.fcompiler_class.keys()) + nv = ov = s[len(v):].lower() + if ov not in allowed_keys: + vmap = {} # XXX + try: + nv = vmap[ov] + except KeyError: + if ov not in vmap.values(): + print(f'Unknown vendor: "{s[len(v):]}"') + nv = ov + i = flib_flags.index(s) + flib_flags[i] = '--fcompiler=' + nv # noqa: B909 + continue + for s in del_list: + i = flib_flags.index(s) + del flib_flags[i] + assert len(flib_flags) <= 2, repr(flib_flags) + + _reg5 = re.compile(r'--(verbose)') + setup_flags = [_m for _m in sys.argv[1:] if _reg5.match(_m)] + sys.argv = [_m for _m in sys.argv if _m not in setup_flags] + + if '--quiet' in f2py_flags: + setup_flags.append('--quiet') + + # Ugly filter to remove everything but sources + sources = sys.argv[1:] + f2cmapopt = '--f2cmap' + if f2cmapopt in sys.argv: + i = sys.argv.index(f2cmapopt) + f2py_flags.extend(sys.argv[i:i + 2]) + del sys.argv[i + 1], sys.argv[i] + sources = sys.argv[1:] + + pyf_files, _sources = filter_files("", "[.]pyf([.]src|)", sources) + sources = pyf_files + _sources + modulename = validate_modulename(pyf_files, modulename) + extra_objects, sources = filter_files('', '[.](o|a|so|dylib)', sources) + library_dirs, sources = filter_files('-L', '', sources, remove_prefix=1) + libraries, sources = filter_files('-l', '', sources, remove_prefix=1) + undef_macros, sources = filter_files('-U', '', sources, remove_prefix=1) + define_macros, sources = filter_files('-D', '', sources, remove_prefix=1) + for i in range(len(define_macros)): + name_value = define_macros[i].split('=', 1) + if len(name_value) == 1: + name_value.append(None) + if len(name_value) == 2: + define_macros[i] = tuple(name_value) + else: + print('Invalid use of -D:', name_value) + + # Construct wrappers / signatures / things + if backend_key == 'meson': + if not pyf_files: + outmess('Using meson backend\nWill pass --lower to f2py\nSee https://numpy.org/doc/stable/f2py/buildtools/meson.html\n') + f2py_flags.append('--lower') + run_main(f" {' '.join(f2py_flags)} -m {modulename} {' '.join(sources)}".split()) + else: + run_main(f" {' '.join(f2py_flags)} {' '.join(pyf_files)}".split()) + + # Order matters here, includes are needed for run_main above + include_dirs, _, sources = get_newer_options(sources) + # Now use the builder + builder = build_backend( + modulename, + sources, + extra_objects, + build_dir, + include_dirs, + library_dirs, + libraries, + define_macros, + undef_macros, + f2py_flags, + sysinfo_flags, + fc_flags, + flib_flags, + setup_flags, + remove_build_dir, + {"dependencies": dependencies}, + ) + + builder.compile() + + +def validate_modulename(pyf_files, modulename='untitled'): + if len(pyf_files) > 1: + raise ValueError("Only one .pyf file per call") + if pyf_files: + pyff = pyf_files[0] + pyf_modname = auxfuncs.get_f2py_modulename(pyff) + if modulename != pyf_modname: + outmess( + f"Ignoring -m {modulename}.\n" + f"{pyff} defines {pyf_modname} to be the modulename.\n" + ) + modulename = pyf_modname + return modulename + +def main(): + if '--help-link' in sys.argv[1:]: + sys.argv.remove('--help-link') + if MESON_ONLY_VER: + outmess("Use --dep for meson builds\n") + else: + from numpy.distutils.system_info import show_all + show_all() + return + + if '-c' in sys.argv[1:]: + run_compile() + else: + run_main(sys.argv[1:]) diff --git a/python/numpy/f2py/f2py2e.pyi b/python/numpy/f2py/f2py2e.pyi new file mode 100644 index 000000000..dd1d0c39e --- /dev/null +++ b/python/numpy/f2py/f2py2e.pyi @@ -0,0 +1,76 @@ +import argparse +import pprint +from collections.abc import Hashable, Iterable, Mapping, MutableMapping, Sequence +from types import ModuleType +from typing import Any, Final, NotRequired, TypedDict, type_check_only + +from typing_extensions import TypeVar, override + +from .__version__ import version +from .auxfuncs import _Bool +from .auxfuncs import outmess as outmess + +### + +_KT = TypeVar("_KT", bound=Hashable) +_VT = TypeVar("_VT") + +@type_check_only +class _F2PyDict(TypedDict): + csrc: list[str] + h: list[str] + fsrc: NotRequired[list[str]] + ltx: NotRequired[list[str]] + +@type_check_only +class _PreparseResult(TypedDict): + dependencies: list[str] + backend: str + modulename: str + +### + +MESON_ONLY_VER: Final[bool] +f2py_version: Final = version +numpy_version: Final = version +__usage__: Final[str] + +show = pprint.pprint + +class CombineIncludePaths(argparse.Action): + @override + def __call__( + self, + /, + parser: argparse.ArgumentParser, + namespace: argparse.Namespace, + values: str | Sequence[str] | None, + option_string: str | None = None, + ) -> None: ... + +# +def run_main(comline_list: Iterable[str]) -> dict[str, _F2PyDict]: ... +def run_compile() -> None: ... +def main() -> None: ... + +# +def scaninputline(inputline: Iterable[str]) -> tuple[list[str], dict[str, _Bool]]: ... +def callcrackfortran(files: list[str], options: dict[str, bool]) -> list[dict[str, Any]]: ... +def buildmodules(lst: Iterable[Mapping[str, object]]) -> dict[str, dict[str, Any]]: ... +def dict_append(d_out: MutableMapping[_KT, _VT], d_in: Mapping[_KT, _VT]) -> None: ... +def filter_files( + prefix: str, + suffix: str, + files: Iterable[str], + remove_prefix: _Bool | None = None, +) -> tuple[list[str], list[str]]: ... +def get_prefix(module: ModuleType) -> str: ... +def get_newer_options(iline: Iterable[str]) -> tuple[list[str], Any, list[str]]: ... + +# +def f2py_parser() -> argparse.ArgumentParser: ... +def make_f2py_compile_parser() -> argparse.ArgumentParser: ... + +# +def preparse_sysargv() -> _PreparseResult: ... +def validate_modulename(pyf_files: Sequence[str], modulename: str = "untitled") -> str: ... diff --git a/python/numpy/f2py/f90mod_rules.py b/python/numpy/f2py/f90mod_rules.py new file mode 100644 index 000000000..d13a42a9d --- /dev/null +++ b/python/numpy/f2py/f90mod_rules.py @@ -0,0 +1,269 @@ +""" +Build F90 module support for f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +__version__ = "$Revision: 1.27 $"[10:-1] + +f2py_version = 'See `f2py -v`' + +import numpy as np + +from . import capi_maps, func2subr + +# The environment provided by auxfuncs.py is needed for some calls to eval. +# As the needed functions cannot be determined by static inspection of the +# code, it is safest to use import * pending a major refactoring of f2py. +from .auxfuncs import * +from .crackfortran import undo_rmbadname, undo_rmbadname1 + +options = {} + + +def findf90modules(m): + if ismodule(m): + return [m] + if not hasbody(m): + return [] + ret = [] + for b in m['body']: + if ismodule(b): + ret.append(b) + else: + ret = ret + findf90modules(b) + return ret + + +fgetdims1 = """\ + external f2pysetdata + logical ns + integer r,i + integer(%d) s(*) + ns = .FALSE. + if (allocated(d)) then + do i=1,r + if ((size(d,i).ne.s(i)).and.(s(i).ge.0)) then + ns = .TRUE. + end if + end do + if (ns) then + deallocate(d) + end if + end if + if ((.not.allocated(d)).and.(s(1).ge.1)) then""" % np.intp().itemsize + +fgetdims2 = """\ + end if + if (allocated(d)) then + do i=1,r + s(i) = size(d,i) + end do + end if + flag = 1 + call f2pysetdata(d,allocated(d))""" + +fgetdims2_sa = """\ + end if + if (allocated(d)) then + do i=1,r + s(i) = size(d,i) + end do + !s(r) must be equal to len(d(1)) + end if + flag = 2 + call f2pysetdata(d,allocated(d))""" + + +def buildhooks(pymod): + from . import rules + ret = {'f90modhooks': [], 'initf90modhooks': [], 'body': [], + 'need': ['F_FUNC', 'arrayobject.h'], + 'separatorsfor': {'includes0': '\n', 'includes': '\n'}, + 'docs': ['"Fortran 90/95 modules:\\n"'], + 'latexdoc': []} + fhooks = [''] + + def fadd(line, s=fhooks): + s[0] = f'{s[0]}\n {line}' + doc = [''] + + def dadd(line, s=doc): + s[0] = f'{s[0]}\n{line}' + + usenames = getuseblocks(pymod) + for m in findf90modules(pymod): + sargs, fargs, efargs, modobjs, notvars, onlyvars = [], [], [], [], [ + m['name']], [] + sargsp = [] + ifargs = [] + mfargs = [] + if hasbody(m): + for b in m['body']: + notvars.append(b['name']) + for n in m['vars'].keys(): + var = m['vars'][n] + + if (n not in notvars and isvariable(var)) and (not l_or(isintent_hide, isprivate)(var)): + onlyvars.append(n) + mfargs.append(n) + outmess(f"\t\tConstructing F90 module support for \"{m['name']}\"...\n") + if len(onlyvars) == 0 and len(notvars) == 1 and m['name'] in notvars: + outmess(f"\t\t\tSkipping {m['name']} since there are no public vars/func in this module...\n") + continue + + # gh-25186 + if m['name'] in usenames and containscommon(m): + outmess(f"\t\t\tSkipping {m['name']} since it is in 'use' and contains a common block...\n") + continue + # skip modules with derived types + if m['name'] in usenames and containsderivedtypes(m): + outmess(f"\t\t\tSkipping {m['name']} since it is in 'use' and contains a derived type...\n") + continue + if onlyvars: + outmess(f"\t\t Variables: {' '.join(onlyvars)}\n") + chooks = [''] + + def cadd(line, s=chooks): + s[0] = f'{s[0]}\n{line}' + ihooks = [''] + + def iadd(line, s=ihooks): + s[0] = f'{s[0]}\n{line}' + + vrd = capi_maps.modsign2map(m) + cadd('static FortranDataDef f2py_%s_def[] = {' % (m['name'])) + dadd('\\subsection{Fortran 90/95 module \\texttt{%s}}\n' % (m['name'])) + if hasnote(m): + note = m['note'] + if isinstance(note, list): + note = '\n'.join(note) + dadd(note) + if onlyvars: + dadd('\\begin{description}') + for n in onlyvars: + var = m['vars'][n] + modobjs.append(n) + ct = capi_maps.getctype(var) + at = capi_maps.c2capi_map[ct] + dm = capi_maps.getarrdims(n, var) + dms = dm['dims'].replace('*', '-1').strip() + dms = dms.replace(':', '-1').strip() + if not dms: + dms = '-1' + use_fgetdims2 = fgetdims2 + cadd('\t{"%s",%s,{{%s}},%s, %s},' % + (undo_rmbadname1(n), dm['rank'], dms, at, + capi_maps.get_elsize(var))) + dadd('\\item[]{{}\\verb@%s@{}}' % + (capi_maps.getarrdocsign(n, var))) + if hasnote(var): + note = var['note'] + if isinstance(note, list): + note = '\n'.join(note) + dadd(f'--- {note}') + if isallocatable(var): + fargs.append(f"f2py_{m['name']}_getdims_{n}") + efargs.append(fargs[-1]) + sargs.append( + f'void (*{n})(int*,npy_intp*,void(*)(char*,npy_intp*),int*)') + sargsp.append('void (*)(int*,npy_intp*,void(*)(char*,npy_intp*),int*)') + iadd(f"\tf2py_{m['name']}_def[i_f2py++].func = {n};") + fadd(f'subroutine {fargs[-1]}(r,s,f2pysetdata,flag)') + fadd(f"use {m['name']}, only: d => {undo_rmbadname1(n)}\n") + fadd('integer flag\n') + fhooks[0] = fhooks[0] + fgetdims1 + dms = range(1, int(dm['rank']) + 1) + fadd(' allocate(d(%s))\n' % + (','.join(['s(%s)' % i for i in dms]))) + fhooks[0] = fhooks[0] + use_fgetdims2 + fadd(f'end subroutine {fargs[-1]}') + else: + fargs.append(n) + sargs.append(f'char *{n}') + sargsp.append('char*') + iadd(f"\tf2py_{m['name']}_def[i_f2py++].data = {n};") + if onlyvars: + dadd('\\end{description}') + if hasbody(m): + for b in m['body']: + if not isroutine(b): + outmess("f90mod_rules.buildhooks:" + f" skipping {b['block']} {b['name']}\n") + continue + modobjs.append(f"{b['name']}()") + b['modulename'] = m['name'] + api, wrap = rules.buildapi(b) + if isfunction(b): + fhooks[0] = fhooks[0] + wrap + fargs.append(f"f2pywrap_{m['name']}_{b['name']}") + ifargs.append(func2subr.createfuncwrapper(b, signature=1)) + elif wrap: + fhooks[0] = fhooks[0] + wrap + fargs.append(f"f2pywrap_{m['name']}_{b['name']}") + ifargs.append( + func2subr.createsubrwrapper(b, signature=1)) + else: + fargs.append(b['name']) + mfargs.append(fargs[-1]) + api['externroutines'] = [] + ar = applyrules(api, vrd) + ar['docs'] = [] + ar['docshort'] = [] + ret = dictappend(ret, ar) + cadd(('\t{"%s",-1,{{-1}},0,0,NULL,(void *)' + 'f2py_rout_#modulename#_%s_%s,' + 'doc_f2py_rout_#modulename#_%s_%s},') + % (b['name'], m['name'], b['name'], m['name'], b['name'])) + sargs.append(f"char *{b['name']}") + sargsp.append('char *') + iadd(f"\tf2py_{m['name']}_def[i_f2py++].data = {b['name']};") + cadd('\t{NULL}\n};\n') + iadd('}') + ihooks[0] = 'static void f2py_setup_%s(%s) {\n\tint i_f2py=0;%s' % ( + m['name'], ','.join(sargs), ihooks[0]) + if '_' in m['name']: + F_FUNC = 'F_FUNC_US' + else: + F_FUNC = 'F_FUNC' + iadd('extern void %s(f2pyinit%s,F2PYINIT%s)(void (*)(%s));' + % (F_FUNC, m['name'], m['name'].upper(), ','.join(sargsp))) + iadd('static void f2py_init_%s(void) {' % (m['name'])) + iadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);' + % (F_FUNC, m['name'], m['name'].upper(), m['name'])) + iadd('}\n') + ret['f90modhooks'] = ret['f90modhooks'] + chooks + ihooks + ret['initf90modhooks'] = ['\tPyDict_SetItemString(d, "%s", PyFortranObject_New(f2py_%s_def,f2py_init_%s));' % ( + m['name'], m['name'], m['name'])] + ret['initf90modhooks'] + fadd('') + fadd(f"subroutine f2pyinit{m['name']}(f2pysetupfunc)") + if mfargs: + for a in undo_rmbadname(mfargs): + fadd(f"use {m['name']}, only : {a}") + if ifargs: + fadd(' '.join(['interface'] + ifargs)) + fadd('end interface') + fadd('external f2pysetupfunc') + if efargs: + for a in undo_rmbadname(efargs): + fadd(f'external {a}') + fadd(f"call f2pysetupfunc({','.join(undo_rmbadname(fargs))})") + fadd(f"end subroutine f2pyinit{m['name']}\n") + + dadd('\n'.join(ret['latexdoc']).replace( + r'\subsection{', r'\subsubsection{')) + + ret['latexdoc'] = [] + ret['docs'].append(f"\"\t{m['name']} --- {','.join(undo_rmbadname(modobjs))}\"") + + ret['routine_defs'] = '' + ret['doc'] = [] + ret['docshort'] = [] + ret['latexdoc'] = doc[0] + if len(ret['docs']) <= 1: + ret['docs'] = '' + return ret, fhooks[0] diff --git a/python/numpy/f2py/f90mod_rules.pyi b/python/numpy/f2py/f90mod_rules.pyi new file mode 100644 index 000000000..4df004eef --- /dev/null +++ b/python/numpy/f2py/f90mod_rules.pyi @@ -0,0 +1,16 @@ +from collections.abc import Mapping +from typing import Any, Final + +from .auxfuncs import isintent_dict as isintent_dict + +__version__: Final[str] = ... +f2py_version: Final = "See `f2py -v`" + +options: Final[dict[str, bool]] + +fgetdims1: Final[str] = ... +fgetdims2: Final[str] = ... +fgetdims2_sa: Final[str] = ... + +def findf90modules(m: Mapping[str, object]) -> list[dict[str, Any]]: ... +def buildhooks(pymod: Mapping[str, object]) -> dict[str, Any]: ... diff --git a/python/numpy/f2py/func2subr.py b/python/numpy/f2py/func2subr.py new file mode 100644 index 000000000..0a875006e --- /dev/null +++ b/python/numpy/f2py/func2subr.py @@ -0,0 +1,329 @@ +""" + +Rules for building C/API module with f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +import copy + +from ._isocbind import isoc_kindmap +from .auxfuncs import ( + getfortranname, + isexternal, + isfunction, + isfunction_wrap, + isintent_in, + isintent_out, + islogicalfunction, + ismoduleroutine, + isscalar, + issubroutine, + issubroutine_wrap, + outmess, + show, +) + + +def var2fixfortran(vars, a, fa=None, f90mode=None): + if fa is None: + fa = a + if a not in vars: + show(vars) + outmess(f'var2fixfortran: No definition for argument "{a}".\n') + return '' + if 'typespec' not in vars[a]: + show(vars[a]) + outmess(f'var2fixfortran: No typespec for argument "{a}".\n') + return '' + vardef = vars[a]['typespec'] + if vardef == 'type' and 'typename' in vars[a]: + vardef = f"{vardef}({vars[a]['typename']})" + selector = {} + lk = '' + if 'kindselector' in vars[a]: + selector = vars[a]['kindselector'] + lk = 'kind' + elif 'charselector' in vars[a]: + selector = vars[a]['charselector'] + lk = 'len' + if '*' in selector: + if f90mode: + if selector['*'] in ['*', ':', '(*)']: + vardef = f'{vardef}(len=*)' + else: + vardef = f"{vardef}({lk}={selector['*']})" + elif selector['*'] in ['*', ':']: + vardef = f"{vardef}*({selector['*']})" + else: + vardef = f"{vardef}*{selector['*']}" + elif 'len' in selector: + vardef = f"{vardef}(len={selector['len']}" + if 'kind' in selector: + vardef = f"{vardef},kind={selector['kind']})" + else: + vardef = f'{vardef})' + elif 'kind' in selector: + vardef = f"{vardef}(kind={selector['kind']})" + + vardef = f'{vardef} {fa}' + if 'dimension' in vars[a]: + vardef = f"{vardef}({','.join(vars[a]['dimension'])})" + return vardef + +def useiso_c_binding(rout): + useisoc = False + for key, value in rout['vars'].items(): + kind_value = value.get('kindselector', {}).get('kind') + if kind_value in isoc_kindmap: + return True + return useisoc + +def createfuncwrapper(rout, signature=0): + assert isfunction(rout) + + extra_args = [] + vars = rout['vars'] + for a in rout['args']: + v = rout['vars'][a] + for i, d in enumerate(v.get('dimension', [])): + if d == ':': + dn = f'f2py_{a}_d{i}' + dv = {'typespec': 'integer', 'intent': ['hide']} + dv['='] = f'shape({a}, {i})' + extra_args.append(dn) + vars[dn] = dv + v['dimension'][i] = dn + rout['args'].extend(extra_args) + need_interface = bool(extra_args) + + ret = [''] + + def add(line, ret=ret): + ret[0] = f'{ret[0]}\n {line}' + name = rout['name'] + fortranname = getfortranname(rout) + f90mode = ismoduleroutine(rout) + newname = f'{name}f2pywrap' + + if newname not in vars: + vars[newname] = vars[name] + args = [newname] + rout['args'][1:] + else: + args = [newname] + rout['args'] + + l_tmpl = var2fixfortran(vars, name, '@@@NAME@@@', f90mode) + if l_tmpl[:13] == 'character*(*)': + if f90mode: + l_tmpl = 'character(len=10)' + l_tmpl[13:] + else: + l_tmpl = 'character*10' + l_tmpl[13:] + charselect = vars[name]['charselector'] + if charselect.get('*', '') == '(*)': + charselect['*'] = '10' + + l1 = l_tmpl.replace('@@@NAME@@@', newname) + rl = None + + useisoc = useiso_c_binding(rout) + sargs = ', '.join(args) + if f90mode: + # gh-23598 fix warning + # Essentially, this gets called again with modules where the name of the + # function is added to the arguments, which is not required, and removed + sargs = sargs.replace(f"{name}, ", '') + args = [arg for arg in args if arg != name] + rout['args'] = args + add(f"subroutine f2pywrap_{rout['modulename']}_{name} ({sargs})") + if not signature: + add(f"use {rout['modulename']}, only : {fortranname}") + if useisoc: + add('use iso_c_binding') + else: + add(f'subroutine f2pywrap{name} ({sargs})') + if useisoc: + add('use iso_c_binding') + if not need_interface: + add(f'external {fortranname}') + rl = l_tmpl.replace('@@@NAME@@@', '') + ' ' + fortranname + + if need_interface: + for line in rout['saved_interface'].split('\n'): + if line.lstrip().startswith('use ') and '__user__' not in line: + add(line) + + args = args[1:] + dumped_args = [] + for a in args: + if isexternal(vars[a]): + add(f'external {a}') + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + if isscalar(vars[a]): + add(var2fixfortran(vars, a, f90mode=f90mode)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + if isintent_in(vars[a]): + add(var2fixfortran(vars, a, f90mode=f90mode)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + add(var2fixfortran(vars, a, f90mode=f90mode)) + + add(l1) + if rl is not None: + add(rl) + + if need_interface: + if f90mode: + # f90 module already defines needed interface + pass + else: + add('interface') + add(rout['saved_interface'].lstrip()) + add('end interface') + + sargs = ', '.join([a for a in args if a not in extra_args]) + + if not signature: + if islogicalfunction(rout): + add(f'{newname} = .not.(.not.{fortranname}({sargs}))') + else: + add(f'{newname} = {fortranname}({sargs})') + if f90mode: + add(f"end subroutine f2pywrap_{rout['modulename']}_{name}") + else: + add('end') + return ret[0] + + +def createsubrwrapper(rout, signature=0): + assert issubroutine(rout) + + extra_args = [] + vars = rout['vars'] + for a in rout['args']: + v = rout['vars'][a] + for i, d in enumerate(v.get('dimension', [])): + if d == ':': + dn = f'f2py_{a}_d{i}' + dv = {'typespec': 'integer', 'intent': ['hide']} + dv['='] = f'shape({a}, {i})' + extra_args.append(dn) + vars[dn] = dv + v['dimension'][i] = dn + rout['args'].extend(extra_args) + need_interface = bool(extra_args) + + ret = [''] + + def add(line, ret=ret): + ret[0] = f'{ret[0]}\n {line}' + name = rout['name'] + fortranname = getfortranname(rout) + f90mode = ismoduleroutine(rout) + + args = rout['args'] + + useisoc = useiso_c_binding(rout) + sargs = ', '.join(args) + if f90mode: + add(f"subroutine f2pywrap_{rout['modulename']}_{name} ({sargs})") + if useisoc: + add('use iso_c_binding') + if not signature: + add(f"use {rout['modulename']}, only : {fortranname}") + else: + add(f'subroutine f2pywrap{name} ({sargs})') + if useisoc: + add('use iso_c_binding') + if not need_interface: + add(f'external {fortranname}') + + if need_interface: + for line in rout['saved_interface'].split('\n'): + if line.lstrip().startswith('use ') and '__user__' not in line: + add(line) + + dumped_args = [] + for a in args: + if isexternal(vars[a]): + add(f'external {a}') + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + if isscalar(vars[a]): + add(var2fixfortran(vars, a, f90mode=f90mode)) + dumped_args.append(a) + for a in args: + if a in dumped_args: + continue + add(var2fixfortran(vars, a, f90mode=f90mode)) + + if need_interface: + if f90mode: + # f90 module already defines needed interface + pass + else: + add('interface') + for line in rout['saved_interface'].split('\n'): + if line.lstrip().startswith('use ') and '__user__' in line: + continue + add(line) + add('end interface') + + sargs = ', '.join([a for a in args if a not in extra_args]) + + if not signature: + add(f'call {fortranname}({sargs})') + if f90mode: + add(f"end subroutine f2pywrap_{rout['modulename']}_{name}") + else: + add('end') + return ret[0] + + +def assubr(rout): + if isfunction_wrap(rout): + fortranname = getfortranname(rout) + name = rout['name'] + outmess('\t\tCreating wrapper for Fortran function "%s"("%s")...\n' % ( + name, fortranname)) + rout = copy.copy(rout) + fname = name + rname = fname + if 'result' in rout: + rname = rout['result'] + rout['vars'][fname] = rout['vars'][rname] + fvar = rout['vars'][fname] + if not isintent_out(fvar): + if 'intent' not in fvar: + fvar['intent'] = [] + fvar['intent'].append('out') + flag = 1 + for i in fvar['intent']: + if i.startswith('out='): + flag = 0 + break + if flag: + fvar['intent'].append(f'out={rname}') + rout['args'][:] = [fname] + rout['args'] + return rout, createfuncwrapper(rout) + if issubroutine_wrap(rout): + fortranname = getfortranname(rout) + name = rout['name'] + outmess('\t\tCreating wrapper for Fortran subroutine "%s"("%s")...\n' + % (name, fortranname)) + rout = copy.copy(rout) + return rout, createsubrwrapper(rout) + return rout, '' diff --git a/python/numpy/f2py/func2subr.pyi b/python/numpy/f2py/func2subr.pyi new file mode 100644 index 000000000..8d2b3dbaa --- /dev/null +++ b/python/numpy/f2py/func2subr.pyi @@ -0,0 +1,7 @@ +from .auxfuncs import _Bool, _ROut, _Var + +def var2fixfortran(vars: _Var, a: str, fa: str | None = None, f90mode: _Bool | None = None) -> str: ... +def useiso_c_binding(rout: _ROut) -> bool: ... +def createfuncwrapper(rout: _ROut, signature: int = 0) -> str: ... +def createsubrwrapper(rout: _ROut, signature: int = 0) -> str: ... +def assubr(rout: _ROut) -> tuple[dict[str, str], str]: ... diff --git a/python/numpy/f2py/rules.py b/python/numpy/f2py/rules.py new file mode 100644 index 000000000..667ef287f --- /dev/null +++ b/python/numpy/f2py/rules.py @@ -0,0 +1,1629 @@ +""" + +Rules for building C/API module with f2py2e. + +Here is a skeleton of a new wrapper function (13Dec2001): + +wrapper_function(args) + declarations + get_python_arguments, say, `a' and `b' + + get_a_from_python + if (successful) { + + get_b_from_python + if (successful) { + + callfortran + if (successful) { + + put_a_to_python + if (successful) { + + put_b_to_python + if (successful) { + + buildvalue = ... + + } + + } + + } + + } + cleanup_b + + } + cleanup_a + + return buildvalue + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +import copy +import os +import sys +import time +from pathlib import Path + +# __version__.version is now the same as the NumPy version +from . import ( + __version__, + capi_maps, + cfuncs, + common_rules, + f90mod_rules, + func2subr, + use_rules, +) +from .auxfuncs import ( + applyrules, + debugcapi, + dictappend, + errmess, + gentitle, + getargs2, + hascallstatement, + hasexternals, + hasinitvalue, + hasnote, + hasresultnote, + isarray, + isarrayofstrings, + isattr_value, + ischaracter, + ischaracter_or_characterarray, + ischaracterarray, + iscomplex, + iscomplexarray, + iscomplexfunction, + iscomplexfunction_warn, + isdummyroutine, + isexternal, + isfunction, + isfunction_wrap, + isint1, + isint1array, + isintent_aux, + isintent_c, + isintent_callback, + isintent_copy, + isintent_hide, + isintent_inout, + isintent_nothide, + isintent_out, + isintent_overwrite, + islogical, + islong_complex, + islong_double, + islong_doublefunction, + islong_long, + islong_longfunction, + ismoduleroutine, + isoptional, + isrequired, + isscalar, + issigned_long_longarray, + isstring, + isstringarray, + isstringfunction, + issubroutine, + issubroutine_wrap, + isthreadsafe, + isunsigned, + isunsigned_char, + isunsigned_chararray, + isunsigned_long_long, + isunsigned_long_longarray, + isunsigned_short, + isunsigned_shortarray, + l_and, + l_not, + l_or, + outmess, + replace, + requiresf90wrapper, + stripcomma, +) + +f2py_version = __version__.version +numpy_version = __version__.version + +options = {} +sepdict = {} +# for k in ['need_cfuncs']: sepdict[k]=',' +for k in ['decl', + 'frompyobj', + 'cleanupfrompyobj', + 'topyarr', 'method', + 'pyobjfrom', 'closepyobjfrom', + 'freemem', + 'userincludes', + 'includes0', 'includes', 'typedefs', 'typedefs_generated', + 'cppmacros', 'cfuncs', 'callbacks', + 'latexdoc', + 'restdoc', + 'routine_defs', 'externroutines', + 'initf2pywraphooks', + 'commonhooks', 'initcommonhooks', + 'f90modhooks', 'initf90modhooks']: + sepdict[k] = '\n' + +#################### Rules for C/API module ################# + +generationtime = int(os.environ.get('SOURCE_DATE_EPOCH', time.time())) +module_rules = { + 'modulebody': """\ +/* File: #modulename#module.c + * This file is auto-generated with f2py (version:#f2py_version#). + * f2py is a Fortran to Python Interface Generator (FPIG), Second Edition, + * written by Pearu Peterson . + * Generation date: """ + time.asctime(time.gmtime(generationtime)) + """ + * Do not edit this file directly unless you know what you are doing!!! + */ + +#ifdef __cplusplus +extern \"C\" { +#endif + +#ifndef PY_SSIZE_T_CLEAN +#define PY_SSIZE_T_CLEAN +#endif /* PY_SSIZE_T_CLEAN */ + +/* Unconditionally included */ +#include +#include + +""" + gentitle("See f2py2e/cfuncs.py: includes") + """ +#includes# +#includes0# + +""" + gentitle("See f2py2e/rules.py: mod_rules['modulebody']") + """ +static PyObject *#modulename#_error; +static PyObject *#modulename#_module; + +""" + gentitle("See f2py2e/cfuncs.py: typedefs") + """ +#typedefs# + +""" + gentitle("See f2py2e/cfuncs.py: typedefs_generated") + """ +#typedefs_generated# + +""" + gentitle("See f2py2e/cfuncs.py: cppmacros") + """ +#cppmacros# + +""" + gentitle("See f2py2e/cfuncs.py: cfuncs") + """ +#cfuncs# + +""" + gentitle("See f2py2e/cfuncs.py: userincludes") + """ +#userincludes# + +""" + gentitle("See f2py2e/capi_rules.py: usercode") + """ +#usercode# + +/* See f2py2e/rules.py */ +#externroutines# + +""" + gentitle("See f2py2e/capi_rules.py: usercode1") + """ +#usercode1# + +""" + gentitle("See f2py2e/cb_rules.py: buildcallback") + """ +#callbacks# + +""" + gentitle("See f2py2e/rules.py: buildapi") + """ +#body# + +""" + gentitle("See f2py2e/f90mod_rules.py: buildhooks") + """ +#f90modhooks# + +""" + gentitle("See f2py2e/rules.py: module_rules['modulebody']") + """ + +""" + gentitle("See f2py2e/common_rules.py: buildhooks") + """ +#commonhooks# + +""" + gentitle("See f2py2e/rules.py") + """ + +static FortranDataDef f2py_routine_defs[] = { +#routine_defs# + {NULL} +}; + +static PyMethodDef f2py_module_methods[] = { +#pymethoddef# + {NULL,NULL} +}; + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "#modulename#", + NULL, + -1, + f2py_module_methods, + NULL, + NULL, + NULL, + NULL +}; + +PyMODINIT_FUNC PyInit_#modulename#(void) { + int i; + PyObject *m,*d, *s, *tmp; + m = #modulename#_module = PyModule_Create(&moduledef); + Py_SET_TYPE(&PyFortran_Type, &PyType_Type); + import_array(); + if (PyErr_Occurred()) + {PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return m;} + d = PyModule_GetDict(m); + s = PyUnicode_FromString(\"#f2py_version#\"); + PyDict_SetItemString(d, \"__version__\", s); + Py_DECREF(s); + s = PyUnicode_FromString( + \"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\"); + PyDict_SetItemString(d, \"__doc__\", s); + Py_DECREF(s); + s = PyUnicode_FromString(\"""" + numpy_version + """\"); + PyDict_SetItemString(d, \"__f2py_numpy_version__\", s); + Py_DECREF(s); + #modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL); + /* + * Store the error object inside the dict, so that it could get deallocated. + * (in practice, this is a module, so it likely will not and cannot.) + */ + PyDict_SetItemString(d, \"_#modulename#_error\", #modulename#_error); + Py_DECREF(#modulename#_error); + for(i=0;f2py_routine_defs[i].name!=NULL;i++) { + tmp = PyFortranObject_NewAsAttr(&f2py_routine_defs[i]); + PyDict_SetItemString(d, f2py_routine_defs[i].name, tmp); + Py_DECREF(tmp); + } +#initf2pywraphooks# +#initf90modhooks# +#initcommonhooks# +#interface_usercode# + +#if Py_GIL_DISABLED + // signal whether this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m , #gil_used#); +#endif + +#ifdef F2PY_REPORT_ATEXIT + if (! PyErr_Occurred()) + on_exit(f2py_report_on_exit,(void*)\"#modulename#\"); +#endif + + if (PyType_Ready(&PyFortran_Type) < 0) { + return NULL; + } + + return m; +} +#ifdef __cplusplus +} +#endif +""", + 'separatorsfor': {'latexdoc': '\n\n', + 'restdoc': '\n\n'}, + 'latexdoc': ['\\section{Module \\texttt{#texmodulename#}}\n', + '#modnote#\n', + '#latexdoc#'], + 'restdoc': ['Module #modulename#\n' + '=' * 80, + '\n#restdoc#'] +} + +defmod_rules = [ + {'body': '/*eof body*/', + 'method': '/*eof method*/', + 'externroutines': '/*eof externroutines*/', + 'routine_defs': '/*eof routine_defs*/', + 'initf90modhooks': '/*eof initf90modhooks*/', + 'initf2pywraphooks': '/*eof initf2pywraphooks*/', + 'initcommonhooks': '/*eof initcommonhooks*/', + 'latexdoc': '', + 'restdoc': '', + 'modnote': {hasnote: '#note#', l_not(hasnote): ''}, + } +] + +routine_rules = { + 'separatorsfor': sepdict, + 'body': """ +#begintitle# +static char doc_#apiname#[] = \"\\\n#docreturn##name#(#docsignatureshort#)\\n\\nWrapper for ``#name#``.\\\n\\n#docstrsigns#\"; +/* #declfortranroutine# */ +static PyObject *#apiname#(const PyObject *capi_self, + PyObject *capi_args, + PyObject *capi_keywds, + #functype# (*f2py_func)(#callprotoargument#)) { + PyObject * volatile capi_buildvalue = NULL; + volatile int f2py_success = 1; +#decl# + static char *capi_kwlist[] = {#kwlist##kwlistopt##kwlistxa#NULL}; +#usercode# +#routdebugenter# +#ifdef F2PY_REPORT_ATEXIT +f2py_start_clock(); +#endif + if (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\ + \"#argformat#|#keyformat##xaformat#:#pyname#\",\\ + capi_kwlist#args_capi##keys_capi##keys_xa#))\n return NULL; +#frompyobj# +/*end of frompyobj*/ +#ifdef F2PY_REPORT_ATEXIT +f2py_start_call_clock(); +#endif +#callfortranroutine# +if (PyErr_Occurred()) + f2py_success = 0; +#ifdef F2PY_REPORT_ATEXIT +f2py_stop_call_clock(); +#endif +/*end of callfortranroutine*/ + if (f2py_success) { +#pyobjfrom# +/*end of pyobjfrom*/ + CFUNCSMESS(\"Building return value.\\n\"); + capi_buildvalue = Py_BuildValue(\"#returnformat#\"#return#); +/*closepyobjfrom*/ +#closepyobjfrom# + } /*if (f2py_success) after callfortranroutine*/ +/*cleanupfrompyobj*/ +#cleanupfrompyobj# + if (capi_buildvalue == NULL) { +#routdebugfailure# + } else { +#routdebugleave# + } + CFUNCSMESS(\"Freeing memory.\\n\"); +#freemem# +#ifdef F2PY_REPORT_ATEXIT +f2py_stop_clock(); +#endif + return capi_buildvalue; +} +#endtitle# +""", + 'routine_defs': '#routine_def#', + 'initf2pywraphooks': '#initf2pywraphook#', + 'externroutines': '#declfortranroutine#', + 'doc': '#docreturn##name#(#docsignature#)', + 'docshort': '#docreturn##name#(#docsignatureshort#)', + 'docs': '" #docreturn##name#(#docsignature#)\\n"\n', + 'need': ['arrayobject.h', 'CFUNCSMESS', 'MINMAX'], + 'cppmacros': {debugcapi: '#define DEBUGCFUNCS'}, + 'latexdoc': ['\\subsection{Wrapper function \\texttt{#texname#}}\n', + """ +\\noindent{{}\\verb@#docreturn##name#@{}}\\texttt{(#latexdocsignatureshort#)} +#routnote# + +#latexdocstrsigns# +"""], + 'restdoc': ['Wrapped function ``#name#``\n' + '-' * 80, + + ] +} + +################## Rules for C/API function ############## + +rout_rules = [ + { # Init + 'separatorsfor': {'callfortranroutine': '\n', 'routdebugenter': '\n', 'decl': '\n', + 'routdebugleave': '\n', 'routdebugfailure': '\n', + 'setjmpbuf': ' || ', + 'docstrreq': '\n', 'docstropt': '\n', 'docstrout': '\n', + 'docstrcbs': '\n', 'docstrsigns': '\\n"\n"', + 'latexdocstrsigns': '\n', + 'latexdocstrreq': '\n', 'latexdocstropt': '\n', + 'latexdocstrout': '\n', 'latexdocstrcbs': '\n', + }, + 'kwlist': '', 'kwlistopt': '', 'callfortran': '', 'callfortranappend': '', + 'docsign': '', 'docsignopt': '', 'decl': '/*decl*/', + 'freemem': '/*freemem*/', + 'docsignshort': '', 'docsignoptshort': '', + 'docstrsigns': '', 'latexdocstrsigns': '', + 'docstrreq': '\\nParameters\\n----------', + 'docstropt': '\\nOther Parameters\\n----------------', + 'docstrout': '\\nReturns\\n-------', + 'docstrcbs': '\\nNotes\\n-----\\nCall-back functions::\\n', + 'latexdocstrreq': '\\noindent Required arguments:', + 'latexdocstropt': '\\noindent Optional arguments:', + 'latexdocstrout': '\\noindent Return objects:', + 'latexdocstrcbs': '\\noindent Call-back functions:', + 'args_capi': '', 'keys_capi': '', 'functype': '', + 'frompyobj': '/*frompyobj*/', + # this list will be reversed + 'cleanupfrompyobj': ['/*end of cleanupfrompyobj*/'], + 'pyobjfrom': '/*pyobjfrom*/', + # this list will be reversed + 'closepyobjfrom': ['/*end of closepyobjfrom*/'], + 'topyarr': '/*topyarr*/', 'routdebugleave': '/*routdebugleave*/', + 'routdebugenter': '/*routdebugenter*/', + 'routdebugfailure': '/*routdebugfailure*/', + 'callfortranroutine': '/*callfortranroutine*/', + 'argformat': '', 'keyformat': '', 'need_cfuncs': '', + 'docreturn': '', 'return': '', 'returnformat': '', 'rformat': '', + 'kwlistxa': '', 'keys_xa': '', 'xaformat': '', 'docsignxa': '', 'docsignxashort': '', + 'initf2pywraphook': '', + 'routnote': {hasnote: '--- #note#', l_not(hasnote): ''}, + }, { + 'apiname': 'f2py_rout_#modulename#_#name#', + 'pyname': '#modulename#.#name#', + 'decl': '', + '_check': l_not(ismoduleroutine) + }, { + 'apiname': 'f2py_rout_#modulename#_#f90modulename#_#name#', + 'pyname': '#modulename#.#f90modulename#.#name#', + 'decl': '', + '_check': ismoduleroutine + }, { # Subroutine + 'functype': 'void', + 'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern void #fortranname#(#callprotoargument#);', + ismoduleroutine: '', + isdummyroutine: '' + }, + 'routine_def': { + l_not(l_or(ismoduleroutine, isintent_c, isdummyroutine)): + ' {\"#name#\",-1,{{-1}},0,0,(char *)' + ' #F_FUNC#(#fortranname#,#FORTRANNAME#),' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): + ' {\"#name#\",-1,{{-1}},0,0,(char *)#fortranname#,' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isdummyroutine): + ' {\"#name#\",-1,{{-1}},0,0,NULL,' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'need': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'F_FUNC'}, + 'callfortranroutine': [ + {debugcapi: [ + """ fprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]}, + {hasexternals: """\ + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + {hascallstatement: ''' #callstatement#; + /*(*f2py_func)(#callfortran#);*/'''}, + {l_not(l_or(hascallstatement, isdummyroutine)) + : ' (*f2py_func)(#callfortran#);'}, + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: """ }"""} + ], + '_check': l_and(issubroutine, l_not(issubroutine_wrap)), + }, { # Wrapped function + 'functype': 'void', + 'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', + isdummyroutine: '', + }, + + 'routine_def': { + l_not(l_or(ismoduleroutine, isdummyroutine)): + ' {\"#name#\",-1,{{-1}},0,0,(char *)' + ' #F_WRAPPEDFUNC#(#name_lower#,#NAME#),' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + isdummyroutine: + ' {\"#name#\",-1,{{-1}},0,0,NULL,' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): ''' + { + extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void); + PyObject* o = PyDict_GetItemString(d,"#name#"); + tmp = F2PyCapsule_FromVoidPtr((void*)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),NULL); + PyObject_SetAttrString(o,"_cpointer", tmp); + Py_DECREF(tmp); + s = PyUnicode_FromString("#name#"); + PyObject_SetAttrString(o,"__name__", s); + Py_DECREF(s); + } + '''}, + 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']}, + 'callfortranroutine': [ + {debugcapi: [ + """ fprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, + {hasexternals: """\ + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + {l_not(l_or(hascallstatement, isdummyroutine)) + : ' (*f2py_func)(#callfortran#);'}, + {hascallstatement: + ' #callstatement#;\n /*(*f2py_func)(#callfortran#);*/'}, + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: ' }'} + ], + '_check': isfunction_wrap, + }, { # Wrapped subroutine + 'functype': 'void', + 'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);', + isdummyroutine: '', + }, + + 'routine_def': { + l_not(l_or(ismoduleroutine, isdummyroutine)): + ' {\"#name#\",-1,{{-1}},0,0,(char *)' + ' #F_WRAPPEDFUNC#(#name_lower#,#NAME#),' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + isdummyroutine: + ' {\"#name#\",-1,{{-1}},0,0,NULL,' + ' (f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): ''' + { + extern void #F_FUNC#(#name_lower#,#NAME#)(void); + PyObject* o = PyDict_GetItemString(d,"#name#"); + tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL); + PyObject_SetAttrString(o,"_cpointer", tmp); + Py_DECREF(tmp); + s = PyUnicode_FromString("#name#"); + PyObject_SetAttrString(o,"__name__", s); + Py_DECREF(s); + } + '''}, + 'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']}, + 'callfortranroutine': [ + {debugcapi: [ + """ fprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]}, + {hasexternals: """\ + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + {l_not(l_or(hascallstatement, isdummyroutine)) + : ' (*f2py_func)(#callfortran#);'}, + {hascallstatement: + ' #callstatement#;\n /*(*f2py_func)(#callfortran#);*/'}, + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: ' }'} + ], + '_check': issubroutine_wrap, + }, { # Function + 'functype': '#ctype#', + 'docreturn': {l_not(isintent_hide): '#rname#,'}, + 'docstrout': '#pydocsignout#', + 'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}', + {hasresultnote: '--- #resultnote#'}], + 'callfortranroutine': [{l_and(debugcapi, isstringfunction): """\ +#ifdef USESCOMPAQFORTRAN + fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\"); +#else + fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); +#endif +"""}, + {l_and(debugcapi, l_not(isstringfunction)): """\ + fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\"); +"""} + ], + '_check': l_and(isfunction, l_not(isfunction_wrap)) + }, { # Scalar function + 'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'extern #ctype# #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern #ctype# #fortranname#(#callprotoargument#);', + isdummyroutine: '' + }, + 'routine_def': { + l_and(l_not(l_or(ismoduleroutine, isintent_c)), + l_not(isdummyroutine)): + (' {\"#name#\",-1,{{-1}},0,0,(char *)' + ' #F_FUNC#(#fortranname#,#FORTRANNAME#),' + ' (f2py_init_func)#apiname#,doc_#apiname#},'), + l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): + (' {\"#name#\",-1,{{-1}},0,0,(char *)#fortranname#,' + ' (f2py_init_func)#apiname#,doc_#apiname#},'), + isdummyroutine: + ' {\"#name#\",-1,{{-1}},0,0,NULL,' + '(f2py_init_func)#apiname#,doc_#apiname#},', + }, + 'decl': [{iscomplexfunction_warn: ' #ctype# #name#_return_value={0,0};', + l_not(iscomplexfunction): ' #ctype# #name#_return_value=0;'}, + {iscomplexfunction: + ' PyObject *#name#_return_value_capi = Py_None;'} + ], + 'callfortranroutine': [ + {hasexternals: """\ + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + {hascallstatement: ''' #callstatement#; +/* #name#_return_value = (*f2py_func)(#callfortran#);*/ +'''}, + {l_not(l_or(hascallstatement, isdummyroutine)) + : ' #name#_return_value = (*f2py_func)(#callfortran#);'}, + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: ' }'}, + {l_and(debugcapi, iscomplexfunction) + : ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'}, + {l_and(debugcapi, l_not(iscomplexfunction)): ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}], + 'pyobjfrom': {iscomplexfunction: ' #name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'}, + 'need': [{l_not(isdummyroutine): 'F_FUNC'}, + {iscomplexfunction: 'pyobj_from_#ctype#1'}, + {islong_longfunction: 'long_long'}, + {islong_doublefunction: 'long_double'}], + 'returnformat': {l_not(isintent_hide): '#rformat#'}, + 'return': {iscomplexfunction: ',#name#_return_value_capi', + l_not(l_or(iscomplexfunction, isintent_hide)): ',#name#_return_value'}, + '_check': l_and(isfunction, l_not(isstringfunction), l_not(isfunction_wrap)) + }, { # String function # in use for --no-wrap + 'declfortranroutine': 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);', + 'routine_def': {l_not(l_or(ismoduleroutine, isintent_c)): + ' {\"#name#\",-1,{{-1}},0,0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},', + l_and(l_not(ismoduleroutine), isintent_c): + ' {\"#name#\",-1,{{-1}},0,0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},' + }, + 'decl': [' #ctype# #name#_return_value = NULL;', + ' int #name#_return_value_len = 0;'], + 'callfortran': '#name#_return_value,#name#_return_value_len,', + 'callfortranroutine': [' #name#_return_value_len = #rlength#;', + ' if ((#name#_return_value = (string)malloc(#name#_return_value_len+1) == NULL) {', + ' PyErr_SetString(PyExc_MemoryError, \"out of memory\");', + ' f2py_success = 0;', + ' } else {', + " (#name#_return_value)[#name#_return_value_len] = '\\0';", + ' }', + ' if (f2py_success) {', + {hasexternals: """\ + if (#setjmpbuf#) { + f2py_success = 0; + } else {"""}, + {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'}, + """\ +#ifdef USESCOMPAQFORTRAN + (*f2py_func)(#callcompaqfortran#); +#else + (*f2py_func)(#callfortran#); +#endif +""", + {isthreadsafe: ' Py_END_ALLOW_THREADS'}, + {hasexternals: ' }'}, + {debugcapi: + ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'}, + ' } /* if (f2py_success) after (string)malloc */', + ], + 'returnformat': '#rformat#', + 'return': ',#name#_return_value', + 'freemem': ' STRINGFREE(#name#_return_value);', + 'need': ['F_FUNC', '#ctype#', 'STRINGFREE'], + '_check': l_and(isstringfunction, l_not(isfunction_wrap)) # ???obsolete + }, + { # Debugging + 'routdebugenter': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");', + 'routdebugleave': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");', + 'routdebugfailure': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");', + '_check': debugcapi + } +] + +################ Rules for arguments ################## + +typedef_need_dict = {islong_long: 'long_long', + islong_double: 'long_double', + islong_complex: 'complex_long_double', + isunsigned_char: 'unsigned_char', + isunsigned_short: 'unsigned_short', + isunsigned: 'unsigned', + isunsigned_long_long: 'unsigned_long_long', + isunsigned_chararray: 'unsigned_char', + isunsigned_shortarray: 'unsigned_short', + isunsigned_long_longarray: 'unsigned_long_long', + issigned_long_longarray: 'long_long', + isint1: 'signed_char', + ischaracter_or_characterarray: 'character', + } + +aux_rules = [ + { + 'separatorsfor': sepdict + }, + { # Common + 'frompyobj': [' /* Processing auxiliary variable #varname# */', + {debugcapi: ' fprintf(stderr,"#vardebuginfo#\\n");'}, ], + 'cleanupfrompyobj': ' /* End of cleaning variable #varname# */', + 'need': typedef_need_dict, + }, + # Scalars (not complex) + { # Common + 'decl': ' #ctype# #varname# = 0;', + 'need': {hasinitvalue: 'math.h'}, + 'frompyobj': {hasinitvalue: ' #varname# = #init#;'}, + '_check': l_and(isscalar, l_not(iscomplex)), + }, + { + 'return': ',#varname#', + 'docstrout': '#pydocsignout#', + 'docreturn': '#outvarname#,', + 'returnformat': '#varrformat#', + '_check': l_and(isscalar, l_not(iscomplex), isintent_out), + }, + # Complex scalars + { # Common + 'decl': ' #ctype# #varname#;', + 'frompyobj': {hasinitvalue: ' #varname#.r = #init.r#, #varname#.i = #init.i#;'}, + '_check': iscomplex + }, + # String + { # Common + 'decl': [' #ctype# #varname# = NULL;', + ' int slen(#varname#);', + ], + 'need': ['len..'], + '_check': isstring + }, + # Array + { # Common + 'decl': [' #ctype# *#varname# = NULL;', + ' npy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', + ' const int #varname#_Rank = #rank#;', + ], + 'need': ['len..', {hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}], + '_check': isarray + }, + # Scalararray + { # Common + '_check': l_and(isarray, l_not(iscomplexarray)) + }, { # Not hidden + '_check': l_and(isarray, l_not(iscomplexarray), isintent_nothide) + }, + # Integer*1 array + {'need': '#ctype#', + '_check': isint1array, + '_depend': '' + }, + # Integer*-1 array + {'need': '#ctype#', + '_check': l_or(isunsigned_chararray, isunsigned_char), + '_depend': '' + }, + # Integer*-2 array + {'need': '#ctype#', + '_check': isunsigned_shortarray, + '_depend': '' + }, + # Integer*-8 array + {'need': '#ctype#', + '_check': isunsigned_long_longarray, + '_depend': '' + }, + # Complexarray + {'need': '#ctype#', + '_check': iscomplexarray, + '_depend': '' + }, + # Stringarray + { + 'callfortranappend': {isarrayofstrings: 'flen(#varname#),'}, + 'need': 'string', + '_check': isstringarray + } +] + +arg_rules = [ + { + 'separatorsfor': sepdict + }, + { # Common + 'frompyobj': [' /* Processing variable #varname# */', + {debugcapi: ' fprintf(stderr,"#vardebuginfo#\\n");'}, ], + 'cleanupfrompyobj': ' /* End of cleaning variable #varname# */', + '_depend': '', + 'need': typedef_need_dict, + }, + # Doc signatures + { + 'docstropt': {l_and(isoptional, isintent_nothide): '#pydocsign#'}, + 'docstrreq': {l_and(isrequired, isintent_nothide): '#pydocsign#'}, + 'docstrout': {isintent_out: '#pydocsignout#'}, + 'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote: '--- #note#'}]}, + 'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}', + {hasnote: '--- #note#'}]}, + 'latexdocstrout': {isintent_out: ['\\item[]{{}\\verb@#pydocsignout#@{}}', + {l_and(hasnote, isintent_hide): '--- #note#', + l_and(hasnote, isintent_nothide): '--- See above.'}]}, + 'depend': '' + }, + # Required/Optional arguments + { + 'kwlist': '"#varname#",', + 'docsign': '#varname#,', + '_check': l_and(isintent_nothide, l_not(isoptional)) + }, + { + 'kwlistopt': '"#varname#",', + 'docsignopt': '#varname#=#showinit#,', + 'docsignoptshort': '#varname#,', + '_check': l_and(isintent_nothide, isoptional) + }, + # Docstring/BuildValue + { + 'docreturn': '#outvarname#,', + 'returnformat': '#varrformat#', + '_check': isintent_out + }, + # Externals (call-back functions) + { # Common + 'docsignxa': {isintent_nothide: '#varname#_extra_args=(),'}, + 'docsignxashort': {isintent_nothide: '#varname#_extra_args,'}, + 'docstropt': {isintent_nothide: '#varname#_extra_args : input tuple, optional\\n Default: ()'}, + 'docstrcbs': '#cbdocstr#', + 'latexdocstrcbs': '\\item[] #cblatexdocstr#', + 'latexdocstropt': {isintent_nothide: '\\item[]{{}\\verb@#varname#_extra_args := () input tuple@{}} --- Extra arguments for call-back function {{}\\verb@#varname#@{}}.'}, + 'decl': [' #cbname#_t #varname#_cb = { Py_None, NULL, 0 };', + ' #cbname#_t *#varname#_cb_ptr = &#varname#_cb;', + ' PyTupleObject *#varname#_xa_capi = NULL;', + {l_not(isintent_callback): + ' #cbname#_typedef #varname#_cptr;'} + ], + 'kwlistxa': {isintent_nothide: '"#varname#_extra_args",'}, + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'xaformat': {isintent_nothide: 'O!'}, + 'args_capi': {isrequired: ',&#varname#_cb.capi'}, + 'keys_capi': {isoptional: ',&#varname#_cb.capi'}, + 'keys_xa': ',&PyTuple_Type,&#varname#_xa_capi', + 'setjmpbuf': '(setjmp(#varname#_cb.jmpbuf))', + 'callfortran': {l_not(isintent_callback): '#varname#_cptr,'}, + 'need': ['#cbname#', 'setjmp.h'], + '_check': isexternal + }, + { + 'frompyobj': [{l_not(isintent_callback): """\ +if(F2PyCapsule_Check(#varname#_cb.capi)) { + #varname#_cptr = F2PyCapsule_AsVoidPtr(#varname#_cb.capi); +} else { + #varname#_cptr = #cbname#; +} +"""}, {isintent_callback: """\ +if (#varname#_cb.capi==Py_None) { + #varname#_cb.capi = PyObject_GetAttrString(#modulename#_module,\"#varname#\"); + if (#varname#_cb.capi) { + if (#varname#_xa_capi==NULL) { + if (PyObject_HasAttrString(#modulename#_module,\"#varname#_extra_args\")) { + PyObject* capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#varname#_extra_args\"); + if (capi_tmp) { + #varname#_xa_capi = (PyTupleObject *)PySequence_Tuple(capi_tmp); + Py_DECREF(capi_tmp); + } + else { + #varname#_xa_capi = (PyTupleObject *)Py_BuildValue(\"()\"); + } + if (#varname#_xa_capi==NULL) { + PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#varname#_extra_args to tuple.\\n\"); + return NULL; + } + } + } + } + if (#varname#_cb.capi==NULL) { + PyErr_SetString(#modulename#_error,\"Callback #varname# not defined (as an argument or module #modulename# attribute).\\n\"); + return NULL; + } +} +"""}, + """\ + if (create_cb_arglist(#varname#_cb.capi,#varname#_xa_capi,#maxnofargs#,#nofoptargs#,&#varname#_cb.nofargs,&#varname#_cb.args_capi,\"failed in processing argument list for call-back #varname#.\")) { +""", + {debugcapi: ["""\ + fprintf(stderr,\"debug-capi:Assuming %d arguments; at most #maxnofargs#(-#nofoptargs#) is expected.\\n\",#varname#_cb.nofargs); + CFUNCSMESSPY(\"for #varname#=\",#varname#_cb.capi);""", + {l_not(isintent_callback): """ fprintf(stderr,\"#vardebugshowvalue# (call-back in C).\\n\",#cbname#);"""}]}, + """\ + CFUNCSMESS(\"Saving callback variables for `#varname#`.\\n\"); + #varname#_cb_ptr = swap_active_#cbname#(#varname#_cb_ptr);""", + ], + 'cleanupfrompyobj': + """\ + CFUNCSMESS(\"Restoring callback variables for `#varname#`.\\n\"); + #varname#_cb_ptr = swap_active_#cbname#(#varname#_cb_ptr); + Py_DECREF(#varname#_cb.args_capi); + }""", + 'need': ['SWAP', 'create_cb_arglist'], + '_check': isexternal, + '_depend': '' + }, + # Scalars (not complex) + { # Common + 'decl': ' #ctype# #varname# = 0;', + 'pyobjfrom': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, + 'callfortran': {l_or(isintent_c, isattr_value): '#varname#,', l_not(l_or(isintent_c, isattr_value)): '&#varname#,'}, + 'return': {isintent_out: ',#varname#'}, + '_check': l_and(isscalar, l_not(iscomplex)) + }, { + 'need': {hasinitvalue: 'math.h'}, + '_check': l_and(isscalar, l_not(iscomplex)), + }, { # Not hidden + 'decl': ' PyObject *#varname#_capi = Py_None;', + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + 'pyobjfrom': {isintent_inout: """\ + f2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); + if (f2py_success) {"""}, + 'closepyobjfrom': {isintent_inout: " } /*if (f2py_success) of #varname# pyobjfrom*/"}, + 'need': {isintent_inout: 'try_pyarr_from_#ctype#'}, + '_check': l_and(isscalar, l_not(iscomplex), l_not(isstring), + isintent_nothide) + }, { + 'frompyobj': [ + # hasinitvalue... + # if pyobj is None: + # varname = init + # else + # from_pyobj(varname) + # + # isoptional and noinitvalue... + # if pyobj is not None: + # from_pyobj(varname) + # else: + # varname is uninitialized + # + # ... + # from_pyobj(varname) + # + {hasinitvalue: ' if (#varname#_capi == Py_None) #varname# = #init#; else', + '_depend': ''}, + {l_and(isoptional, l_not(hasinitvalue)): ' if (#varname#_capi != Py_None)', + '_depend': ''}, + {l_not(islogical): '''\ + f2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#"); + if (f2py_success) {'''}, + {islogical: '''\ + #varname# = (#ctype#)PyObject_IsTrue(#varname#_capi); + f2py_success = 1; + if (f2py_success) {'''}, + ], + 'cleanupfrompyobj': ' } /*if (f2py_success) of #varname#*/', + 'need': {l_not(islogical): '#ctype#_from_pyobj'}, + '_check': l_and(isscalar, l_not(iscomplex), isintent_nothide), + '_depend': '' + }, { # Hidden + 'frompyobj': {hasinitvalue: ' #varname# = #init#;'}, + 'need': typedef_need_dict, + '_check': l_and(isscalar, l_not(iscomplex), isintent_hide), + '_depend': '' + }, { # Common + 'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'}, + '_check': l_and(isscalar, l_not(iscomplex)), + '_depend': '' + }, + # Complex scalars + { # Common + 'decl': ' #ctype# #varname#;', + 'callfortran': {isintent_c: '#varname#,', l_not(isintent_c): '&#varname#,'}, + 'pyobjfrom': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, + 'return': {isintent_out: ',#varname#_capi'}, + '_check': iscomplex + }, { # Not hidden + 'decl': ' PyObject *#varname#_capi = Py_None;', + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + 'need': {isintent_inout: 'try_pyarr_from_#ctype#'}, + 'pyobjfrom': {isintent_inout: """\ + f2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#); + if (f2py_success) {"""}, + 'closepyobjfrom': {isintent_inout: " } /*if (f2py_success) of #varname# pyobjfrom*/"}, + '_check': l_and(iscomplex, isintent_nothide) + }, { + 'frompyobj': [{hasinitvalue: ' if (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'}, + {l_and(isoptional, l_not(hasinitvalue)) + : ' if (#varname#_capi != Py_None)'}, + ' f2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");' + '\n if (f2py_success) {'], + 'cleanupfrompyobj': ' } /*if (f2py_success) of #varname# frompyobj*/', + 'need': ['#ctype#_from_pyobj'], + '_check': l_and(iscomplex, isintent_nothide), + '_depend': '' + }, { # Hidden + 'decl': {isintent_out: ' PyObject *#varname#_capi = Py_None;'}, + '_check': l_and(iscomplex, isintent_hide) + }, { + 'frompyobj': {hasinitvalue: ' #varname#.r = #init.r#, #varname#.i = #init.i#;'}, + '_check': l_and(iscomplex, isintent_hide), + '_depend': '' + }, { # Common + 'pyobjfrom': {isintent_out: ' #varname#_capi = pyobj_from_#ctype#1(#varname#);'}, + 'need': ['pyobj_from_#ctype#1'], + '_check': iscomplex + }, { + 'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'}, + '_check': iscomplex, + '_depend': '' + }, + # String + { # Common + 'decl': [' #ctype# #varname# = NULL;', + ' int slen(#varname#);', + ' PyObject *#varname#_capi = Py_None;'], + 'callfortran': '#varname#,', + 'callfortranappend': 'slen(#varname#),', + 'pyobjfrom': [ + {debugcapi: + ' fprintf(stderr,' + '"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, + # The trailing null value for Fortran is blank. + {l_and(isintent_out, l_not(isintent_c)): + " STRINGPADN(#varname#, slen(#varname#), ' ', '\\0');"}, + ], + 'return': {isintent_out: ',#varname#'}, + 'need': ['len..', + {l_and(isintent_out, l_not(isintent_c)): 'STRINGPADN'}], + '_check': isstring + }, { # Common + 'frompyobj': [ + """\ + slen(#varname#) = #elsize#; + f2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,""" +"""#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth#""" +"""`#varname#\' of #pyname# to C #ctype#\"); + if (f2py_success) {""", + # The trailing null value for Fortran is blank. + {l_not(isintent_c): + " STRINGPADN(#varname#, slen(#varname#), '\\0', ' ');"}, + ], + 'cleanupfrompyobj': """\ + STRINGFREE(#varname#); + } /*if (f2py_success) of #varname#*/""", + 'need': ['#ctype#_from_pyobj', 'len..', 'STRINGFREE', + {l_not(isintent_c): 'STRINGPADN'}], + '_check': isstring, + '_depend': '' + }, { # Not hidden + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + 'pyobjfrom': [ + {l_and(isintent_inout, l_not(isintent_c)): + " STRINGPADN(#varname#, slen(#varname#), ' ', '\\0');"}, + {isintent_inout: '''\ + f2py_success = try_pyarr_from_#ctype#(#varname#_capi, #varname#, + slen(#varname#)); + if (f2py_success) {'''}], + 'closepyobjfrom': {isintent_inout: ' } /*if (f2py_success) of #varname# pyobjfrom*/'}, + 'need': {isintent_inout: 'try_pyarr_from_#ctype#', + l_and(isintent_inout, l_not(isintent_c)): 'STRINGPADN'}, + '_check': l_and(isstring, isintent_nothide) + }, { # Hidden + '_check': l_and(isstring, isintent_hide) + }, { + 'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'}, + '_check': isstring, + '_depend': '' + }, + # Array + { # Common + 'decl': [' #ctype# *#varname# = NULL;', + ' npy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};', + ' const int #varname#_Rank = #rank#;', + ' PyArrayObject *capi_#varname#_as_array = NULL;', + ' int capi_#varname#_intent = 0;', + {isstringarray: ' int slen(#varname#) = 0;'}, + ], + 'callfortran': '#varname#,', + 'callfortranappend': {isstringarray: 'slen(#varname#),'}, + 'return': {isintent_out: ',capi_#varname#_as_array'}, + 'need': 'len..', + '_check': isarray + }, { # intent(overwrite) array + 'decl': ' int capi_overwrite_#varname# = 1;', + 'kwlistxa': '"overwrite_#varname#",', + 'xaformat': 'i', + 'keys_xa': ',&capi_overwrite_#varname#', + 'docsignxa': 'overwrite_#varname#=1,', + 'docsignxashort': 'overwrite_#varname#,', + 'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 1', + '_check': l_and(isarray, isintent_overwrite), + }, { + 'frompyobj': ' capi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', + '_check': l_and(isarray, isintent_overwrite), + '_depend': '', + }, + { # intent(copy) array + 'decl': ' int capi_overwrite_#varname# = 0;', + 'kwlistxa': '"overwrite_#varname#",', + 'xaformat': 'i', + 'keys_xa': ',&capi_overwrite_#varname#', + 'docsignxa': 'overwrite_#varname#=0,', + 'docsignxashort': 'overwrite_#varname#,', + 'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 0', + '_check': l_and(isarray, isintent_copy), + }, { + 'frompyobj': ' capi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);', + '_check': l_and(isarray, isintent_copy), + '_depend': '', + }, { + 'need': [{hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}], + '_check': isarray, + '_depend': '' + }, { # Not hidden + 'decl': ' PyObject *#varname#_capi = Py_None;', + 'argformat': {isrequired: 'O'}, + 'keyformat': {isoptional: 'O'}, + 'args_capi': {isrequired: ',&#varname#_capi'}, + 'keys_capi': {isoptional: ',&#varname#_capi'}, + '_check': l_and(isarray, isintent_nothide) + }, { + 'frompyobj': [ + ' #setdims#;', + ' capi_#varname#_intent |= #intent#;', + (' const char capi_errmess[] = "#modulename#.#pyname#:' + ' failed to create array from the #nth# `#varname#`";'), + {isintent_hide: + ' capi_#varname#_as_array = ndarray_from_pyobj(' + ' #atype#,#elsize#,#varname#_Dims,#varname#_Rank,' + ' capi_#varname#_intent,Py_None,capi_errmess);'}, + {isintent_nothide: + ' capi_#varname#_as_array = ndarray_from_pyobj(' + ' #atype#,#elsize#,#varname#_Dims,#varname#_Rank,' + ' capi_#varname#_intent,#varname#_capi,capi_errmess);'}, + """\ + if (capi_#varname#_as_array == NULL) { + PyObject* capi_err = PyErr_Occurred(); + if (capi_err == NULL) { + capi_err = #modulename#_error; + PyErr_SetString(capi_err, capi_errmess); + } + } else { + #varname# = (#ctype# *)(PyArray_DATA(capi_#varname#_as_array)); +""", + {isstringarray: + ' slen(#varname#) = f2py_itemsize(#varname#);'}, + {hasinitvalue: [ + {isintent_nothide: + ' if (#varname#_capi == Py_None) {'}, + {isintent_hide: ' {'}, + {iscomplexarray: ' #ctype# capi_c;'}, + """\ + int *_i,capi_i=0; + CFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\"); + struct ForcombCache cache; + if (initforcomb(&cache, PyArray_DIMS(capi_#varname#_as_array), + PyArray_NDIM(capi_#varname#_as_array),1)) { + while ((_i = nextforcomb(&cache))) + #varname#[capi_i++] = #init#; /* fortran way */ + } else { + PyObject *exc, *val, *tb; + PyErr_Fetch(&exc, &val, &tb); + PyErr_SetString(exc ? exc : #modulename#_error, + \"Initialization of #nth# #varname# failed (initforcomb).\"); + npy_PyErr_ChainExceptionsCause(exc, val, tb); + f2py_success = 0; + } + } + if (f2py_success) {"""]}, + ], + 'cleanupfrompyobj': [ # note that this list will be reversed + ' } ' + '/* if (capi_#varname#_as_array == NULL) ... else of #varname# */', + {l_not(l_or(isintent_out, isintent_hide)): """\ + if((PyObject *)capi_#varname#_as_array!=#varname#_capi) { + Py_XDECREF(capi_#varname#_as_array); }"""}, + {l_and(isintent_hide, l_not(isintent_out)) + : """ Py_XDECREF(capi_#varname#_as_array);"""}, + {hasinitvalue: ' } /*if (f2py_success) of #varname# init*/'}, + ], + '_check': isarray, + '_depend': '' + }, + # Scalararray + { # Common + '_check': l_and(isarray, l_not(iscomplexarray)) + }, { # Not hidden + '_check': l_and(isarray, l_not(iscomplexarray), isintent_nothide) + }, + # Integer*1 array + {'need': '#ctype#', + '_check': isint1array, + '_depend': '' + }, + # Integer*-1 array + {'need': '#ctype#', + '_check': isunsigned_chararray, + '_depend': '' + }, + # Integer*-2 array + {'need': '#ctype#', + '_check': isunsigned_shortarray, + '_depend': '' + }, + # Integer*-8 array + {'need': '#ctype#', + '_check': isunsigned_long_longarray, + '_depend': '' + }, + # Complexarray + {'need': '#ctype#', + '_check': iscomplexarray, + '_depend': '' + }, + # Character + { + 'need': 'string', + '_check': ischaracter, + }, + # Character array + { + 'need': 'string', + '_check': ischaracterarray, + }, + # Stringarray + { + 'callfortranappend': {isarrayofstrings: 'flen(#varname#),'}, + 'need': 'string', + '_check': isstringarray + } +] + +################# Rules for checking ############### + +check_rules = [ + { + 'frompyobj': {debugcapi: ' fprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'}, + 'need': 'len..' + }, { + 'frompyobj': ' CHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', + 'cleanupfrompyobj': ' } /*CHECKSCALAR(#check#)*/', + 'need': 'CHECKSCALAR', + '_check': l_and(isscalar, l_not(iscomplex)), + '_break': '' + }, { + 'frompyobj': ' CHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {', + 'cleanupfrompyobj': ' } /*CHECKSTRING(#check#)*/', + 'need': 'CHECKSTRING', + '_check': isstring, + '_break': '' + }, { + 'need': 'CHECKARRAY', + 'frompyobj': ' CHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {', + 'cleanupfrompyobj': ' } /*CHECKARRAY(#check#)*/', + '_check': isarray, + '_break': '' + }, { + 'need': 'CHECKGENERIC', + 'frompyobj': ' CHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {', + 'cleanupfrompyobj': ' } /*CHECKGENERIC(#check#)*/', + } +] + +########## Applying the rules. No need to modify what follows ############# + +#################### Build C/API module ####################### + + +def buildmodule(m, um): + """ + Return + """ + outmess(f" Building module \"{m['name']}\"...\n") + ret = {} + mod_rules = defmod_rules[:] + vrd = capi_maps.modsign2map(m) + rd = dictappend({'f2py_version': f2py_version}, vrd) + funcwrappers = [] + funcwrappers2 = [] # F90 codes + for n in m['interfaced']: + nb = None + for bi in m['body']: + if bi['block'] not in ['interface', 'abstract interface']: + errmess('buildmodule: Expected interface block. Skipping.\n') + continue + for b in bi['body']: + if b['name'] == n: + nb = b + break + + if not nb: + print( + f'buildmodule: Could not find the body of interfaced routine "{n}". Skipping.\n', file=sys.stderr) + continue + nb_list = [nb] + if 'entry' in nb: + for k, a in nb['entry'].items(): + nb1 = copy.deepcopy(nb) + del nb1['entry'] + nb1['name'] = k + nb1['args'] = a + nb_list.append(nb1) + for nb in nb_list: + # requiresf90wrapper must be called before buildapi as it + # rewrites assumed shape arrays as automatic arrays. + isf90 = requiresf90wrapper(nb) + # options is in scope here + if options['emptygen']: + b_path = options['buildpath'] + m_name = vrd['modulename'] + outmess(' Generating possibly empty wrappers"\n') + Path(f"{b_path}/{vrd['coutput']}").touch() + if isf90: + # f77 + f90 wrappers + outmess(f' Maybe empty "{m_name}-f2pywrappers2.f90"\n') + Path(f'{b_path}/{m_name}-f2pywrappers2.f90').touch() + outmess(f' Maybe empty "{m_name}-f2pywrappers.f"\n') + Path(f'{b_path}/{m_name}-f2pywrappers.f').touch() + else: + # only f77 wrappers + outmess(f' Maybe empty "{m_name}-f2pywrappers.f"\n') + Path(f'{b_path}/{m_name}-f2pywrappers.f').touch() + api, wrap = buildapi(nb) + if wrap: + if isf90: + funcwrappers2.append(wrap) + else: + funcwrappers.append(wrap) + ar = applyrules(api, vrd) + rd = dictappend(rd, ar) + + # Construct COMMON block support + cr, wrap = common_rules.buildhooks(m) + if wrap: + funcwrappers.append(wrap) + ar = applyrules(cr, vrd) + rd = dictappend(rd, ar) + + # Construct F90 module support + mr, wrap = f90mod_rules.buildhooks(m) + if wrap: + funcwrappers2.append(wrap) + ar = applyrules(mr, vrd) + rd = dictappend(rd, ar) + + for u in um: + ar = use_rules.buildusevars(u, m['use'][u['name']]) + rd = dictappend(rd, ar) + + needs = cfuncs.get_needs() + # Add mapped definitions + needs['typedefs'] += [cvar for cvar in capi_maps.f2cmap_mapped # + if cvar in typedef_need_dict.values()] + code = {} + for n in needs.keys(): + code[n] = [] + for k in needs[n]: + c = '' + if k in cfuncs.includes0: + c = cfuncs.includes0[k] + elif k in cfuncs.includes: + c = cfuncs.includes[k] + elif k in cfuncs.userincludes: + c = cfuncs.userincludes[k] + elif k in cfuncs.typedefs: + c = cfuncs.typedefs[k] + elif k in cfuncs.typedefs_generated: + c = cfuncs.typedefs_generated[k] + elif k in cfuncs.cppmacros: + c = cfuncs.cppmacros[k] + elif k in cfuncs.cfuncs: + c = cfuncs.cfuncs[k] + elif k in cfuncs.callbacks: + c = cfuncs.callbacks[k] + elif k in cfuncs.f90modhooks: + c = cfuncs.f90modhooks[k] + elif k in cfuncs.commonhooks: + c = cfuncs.commonhooks[k] + else: + errmess(f'buildmodule: unknown need {repr(k)}.\n') + continue + code[n].append(c) + mod_rules.append(code) + for r in mod_rules: + if ('_check' in r and r['_check'](m)) or ('_check' not in r): + ar = applyrules(r, vrd, m) + rd = dictappend(rd, ar) + ar = applyrules(module_rules, rd) + + fn = os.path.join(options['buildpath'], vrd['coutput']) + ret['csrc'] = fn + with open(fn, 'w') as f: + f.write(ar['modulebody'].replace('\t', 2 * ' ')) + outmess(f" Wrote C/API module \"{m['name']}\" to file \"{fn}\"\n") + + if options['dorestdoc']: + fn = os.path.join( + options['buildpath'], vrd['modulename'] + 'module.rest') + with open(fn, 'w') as f: + f.write('.. -*- rest -*-\n') + f.write('\n'.join(ar['restdoc'])) + outmess(' ReST Documentation is saved to file "%s/%smodule.rest"\n' % + (options['buildpath'], vrd['modulename'])) + if options['dolatexdoc']: + fn = os.path.join( + options['buildpath'], vrd['modulename'] + 'module.tex') + ret['ltx'] = fn + with open(fn, 'w') as f: + f.write( + f'% This file is auto-generated with f2py (version:{f2py_version})\n') + if 'shortlatex' not in options: + f.write( + '\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n') + f.write('\n'.join(ar['latexdoc'])) + if 'shortlatex' not in options: + f.write('\\end{document}') + outmess(' Documentation is saved to file "%s/%smodule.tex"\n' % + (options['buildpath'], vrd['modulename'])) + if funcwrappers: + wn = os.path.join(options['buildpath'], vrd['f2py_wrapper_output']) + ret['fsrc'] = wn + with open(wn, 'w') as f: + f.write('C -*- fortran -*-\n') + f.write( + f'C This file is autogenerated with f2py (version:{f2py_version})\n') + f.write( + 'C It contains Fortran 77 wrappers to fortran functions.\n') + lines = [] + for l in ('\n\n'.join(funcwrappers) + '\n').split('\n'): + if 0 <= l.find('!') < 66: + # don't split comment lines + lines.append(l + '\n') + elif l and l[0] == ' ': + while len(l) >= 66: + lines.append(l[:66] + '\n &') + l = l[66:] + lines.append(l + '\n') + else: + lines.append(l + '\n') + lines = ''.join(lines).replace('\n &\n', '\n') + f.write(lines) + outmess(f' Fortran 77 wrappers are saved to "{wn}\"\n') + if funcwrappers2: + wn = os.path.join( + options['buildpath'], f"{vrd['modulename']}-f2pywrappers2.f90") + ret['fsrc'] = wn + with open(wn, 'w') as f: + f.write('! -*- f90 -*-\n') + f.write( + f'! This file is autogenerated with f2py (version:{f2py_version})\n') + f.write( + '! It contains Fortran 90 wrappers to fortran functions.\n') + lines = [] + for l in ('\n\n'.join(funcwrappers2) + '\n').split('\n'): + if 0 <= l.find('!') < 72: + # don't split comment lines + lines.append(l + '\n') + elif len(l) > 72 and l[0] == ' ': + lines.append(l[:72] + '&\n &') + l = l[72:] + while len(l) > 66: + lines.append(l[:66] + '&\n &') + l = l[66:] + lines.append(l + '\n') + else: + lines.append(l + '\n') + lines = ''.join(lines).replace('\n &\n', '\n') + f.write(lines) + outmess(f' Fortran 90 wrappers are saved to "{wn}\"\n') + return ret + +################## Build C/API function ############# + + +stnd = {1: 'st', 2: 'nd', 3: 'rd', 4: 'th', 5: 'th', + 6: 'th', 7: 'th', 8: 'th', 9: 'th', 0: 'th'} + + +def buildapi(rout): + rout, wrap = func2subr.assubr(rout) + args, depargs = getargs2(rout) + capi_maps.depargs = depargs + var = rout['vars'] + + if ismoduleroutine(rout): + outmess(' Constructing wrapper function "%s.%s"...\n' % + (rout['modulename'], rout['name'])) + else: + outmess(f" Constructing wrapper function \"{rout['name']}\"...\n") + # Routine + vrd = capi_maps.routsign2map(rout) + rd = dictappend({}, vrd) + for r in rout_rules: + if ('_check' in r and r['_check'](rout)) or ('_check' not in r): + ar = applyrules(r, vrd, rout) + rd = dictappend(rd, ar) + + # Args + nth, nthk = 0, 0 + savevrd = {} + for a in args: + vrd = capi_maps.sign2map(a, var[a]) + if isintent_aux(var[a]): + _rules = aux_rules + else: + _rules = arg_rules + if not isintent_hide(var[a]): + if not isoptional(var[a]): + nth = nth + 1 + vrd['nth'] = repr(nth) + stnd[nth % 10] + ' argument' + else: + nthk = nthk + 1 + vrd['nth'] = repr(nthk) + stnd[nthk % 10] + ' keyword' + else: + vrd['nth'] = 'hidden' + savevrd[a] = vrd + for r in _rules: + if '_depend' in r: + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + for a in depargs: + if isintent_aux(var[a]): + _rules = aux_rules + else: + _rules = arg_rules + vrd = savevrd[a] + for r in _rules: + if '_depend' not in r: + continue + if ('_check' in r and r['_check'](var[a])) or ('_check' not in r): + ar = applyrules(r, vrd, var[a]) + rd = dictappend(rd, ar) + if '_break' in r: + break + if 'check' in var[a]: + for c in var[a]['check']: + vrd['check'] = c + ar = applyrules(check_rules, vrd, var[a]) + rd = dictappend(rd, ar) + if isinstance(rd['cleanupfrompyobj'], list): + rd['cleanupfrompyobj'].reverse() + if isinstance(rd['closepyobjfrom'], list): + rd['closepyobjfrom'].reverse() + rd['docsignature'] = stripcomma(replace('#docsign##docsignopt##docsignxa#', + {'docsign': rd['docsign'], + 'docsignopt': rd['docsignopt'], + 'docsignxa': rd['docsignxa']})) + optargs = stripcomma(replace('#docsignopt##docsignxa#', + {'docsignxa': rd['docsignxashort'], + 'docsignopt': rd['docsignoptshort']} + )) + if optargs == '': + rd['docsignatureshort'] = stripcomma( + replace('#docsign#', {'docsign': rd['docsign']})) + else: + rd['docsignatureshort'] = replace('#docsign#[#docsignopt#]', + {'docsign': rd['docsign'], + 'docsignopt': optargs, + }) + rd['latexdocsignatureshort'] = rd['docsignatureshort'].replace('_', '\\_') + rd['latexdocsignatureshort'] = rd[ + 'latexdocsignatureshort'].replace(',', ', ') + cfs = stripcomma(replace('#callfortran##callfortranappend#', { + 'callfortran': rd['callfortran'], 'callfortranappend': rd['callfortranappend']})) + if len(rd['callfortranappend']) > 1: + rd['callcompaqfortran'] = stripcomma(replace('#callfortran# 0,#callfortranappend#', { + 'callfortran': rd['callfortran'], 'callfortranappend': rd['callfortranappend']})) + else: + rd['callcompaqfortran'] = cfs + rd['callfortran'] = cfs + if isinstance(rd['docreturn'], list): + rd['docreturn'] = stripcomma( + replace('#docreturn#', {'docreturn': rd['docreturn']})) + ' = ' + rd['docstrsigns'] = [] + rd['latexdocstrsigns'] = [] + for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']: + if k in rd and isinstance(rd[k], list): + rd['docstrsigns'] = rd['docstrsigns'] + rd[k] + k = 'latex' + k + if k in rd and isinstance(rd[k], list): + rd['latexdocstrsigns'] = rd['latexdocstrsigns'] + rd[k][0:1] +\ + ['\\begin{description}'] + rd[k][1:] +\ + ['\\end{description}'] + + ar = applyrules(routine_rules, rd) + if ismoduleroutine(rout): + outmess(f" {ar['docshort']}\n") + else: + outmess(f" {ar['docshort']}\n") + return ar, wrap + + +#################### EOF rules.py ####################### diff --git a/python/numpy/f2py/rules.pyi b/python/numpy/f2py/rules.pyi new file mode 100644 index 000000000..aa91e9426 --- /dev/null +++ b/python/numpy/f2py/rules.pyi @@ -0,0 +1,43 @@ +from collections.abc import Callable, Iterable, Mapping +from typing import Any, Final, TypeAlias +from typing import Literal as L + +from typing_extensions import TypeVar + +from .__version__ import version +from .auxfuncs import _Bool, _Var + +### + +_VT = TypeVar("_VT", default=str) + +_Predicate: TypeAlias = Callable[[_Var], _Bool] +_RuleDict: TypeAlias = dict[str, _VT] +_DefDict: TypeAlias = dict[_Predicate, _VT] + +### + +f2py_version: Final = version +numpy_version: Final = version + +options: Final[dict[str, bool]] = ... +sepdict: Final[dict[str, str]] = ... + +generationtime: Final[int] = ... +typedef_need_dict: Final[_DefDict[str]] = ... + +module_rules: Final[_RuleDict[str | list[str] | _RuleDict]] = ... +routine_rules: Final[_RuleDict[str | list[str] | _DefDict | _RuleDict]] = ... +defmod_rules: Final[list[_RuleDict[str | _DefDict]]] = ... +rout_rules: Final[list[_RuleDict[str | Any]]] = ... +aux_rules: Final[list[_RuleDict[str | Any]]] = ... +arg_rules: Final[list[_RuleDict[str | Any]]] = ... +check_rules: Final[list[_RuleDict[str | Any]]] = ... + +stnd: Final[dict[L[1, 2, 3, 4, 5, 6, 7, 8, 9, 0], L["st", "nd", "rd", "th"]]] = ... + +def buildmodule(m: Mapping[str, str | Any], um: Iterable[Mapping[str, str | Any]]) -> _RuleDict: ... +def buildapi(rout: Mapping[str, str]) -> tuple[_RuleDict, str]: ... + +# namespace pollution +k: str diff --git a/python/numpy/f2py/setup.cfg b/python/numpy/f2py/setup.cfg new file mode 100644 index 000000000..14669544c --- /dev/null +++ b/python/numpy/f2py/setup.cfg @@ -0,0 +1,3 @@ +[bdist_rpm] +doc_files = docs/ + tests/ \ No newline at end of file diff --git a/python/numpy/f2py/src/fortranobject.c b/python/numpy/f2py/src/fortranobject.c new file mode 100644 index 000000000..5c2b4bdf0 --- /dev/null +++ b/python/numpy/f2py/src/fortranobject.c @@ -0,0 +1,1436 @@ +#define FORTRANOBJECT_C +#include "fortranobject.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +/* + This file implements: FortranObject, array_from_pyobj, copy_ND_array + + Author: Pearu Peterson + $Revision: 1.52 $ + $Date: 2005/07/11 07:44:20 $ +*/ + +int +F2PyDict_SetItemString(PyObject *dict, char *name, PyObject *obj) +{ + if (obj == NULL) { + fprintf(stderr, "Error loading %s\n", name); + if (PyErr_Occurred()) { + PyErr_Print(); + PyErr_Clear(); + } + return -1; + } + return PyDict_SetItemString(dict, name, obj); +} + +/* + * Python-only fallback for thread-local callback pointers + */ +void * +F2PySwapThreadLocalCallbackPtr(char *key, void *ptr) +{ + PyObject *local_dict, *value; + void *prev; + + local_dict = PyThreadState_GetDict(); + if (local_dict == NULL) { + Py_FatalError( + "F2PySwapThreadLocalCallbackPtr: PyThreadState_GetDict " + "failed"); + } + + value = PyDict_GetItemString(local_dict, key); + if (value != NULL) { + prev = PyLong_AsVoidPtr(value); + if (PyErr_Occurred()) { + Py_FatalError( + "F2PySwapThreadLocalCallbackPtr: PyLong_AsVoidPtr failed"); + } + } + else { + prev = NULL; + } + + value = PyLong_FromVoidPtr((void *)ptr); + if (value == NULL) { + Py_FatalError( + "F2PySwapThreadLocalCallbackPtr: PyLong_FromVoidPtr failed"); + } + + if (PyDict_SetItemString(local_dict, key, value) != 0) { + Py_FatalError( + "F2PySwapThreadLocalCallbackPtr: PyDict_SetItemString failed"); + } + + Py_DECREF(value); + + return prev; +} + +void * +F2PyGetThreadLocalCallbackPtr(char *key) +{ + PyObject *local_dict, *value; + void *prev; + + local_dict = PyThreadState_GetDict(); + if (local_dict == NULL) { + Py_FatalError( + "F2PyGetThreadLocalCallbackPtr: PyThreadState_GetDict failed"); + } + + value = PyDict_GetItemString(local_dict, key); + if (value != NULL) { + prev = PyLong_AsVoidPtr(value); + if (PyErr_Occurred()) { + Py_FatalError( + "F2PyGetThreadLocalCallbackPtr: PyLong_AsVoidPtr failed"); + } + } + else { + prev = NULL; + } + + return prev; +} + +static PyArray_Descr * +get_descr_from_type_and_elsize(const int type_num, const int elsize) { + PyArray_Descr * descr = PyArray_DescrFromType(type_num); + if (type_num == NPY_STRING) { + // PyArray_DescrFromType returns descr with elsize = 0. + PyArray_DESCR_REPLACE(descr); + if (descr == NULL) { + return NULL; + } + PyDataType_SET_ELSIZE(descr, elsize); + } + return descr; +} + +/************************* FortranObject *******************************/ + +typedef PyObject *(*fortranfunc)(PyObject *, PyObject *, PyObject *, void *); + +PyObject * +PyFortranObject_New(FortranDataDef *defs, f2py_void_func init) +{ + int i; + PyFortranObject *fp = NULL; + PyObject *v = NULL; + if (init != NULL) { /* Initialize F90 module objects */ + (*(init))(); + } + fp = PyObject_New(PyFortranObject, &PyFortran_Type); + if (fp == NULL) { + return NULL; + } + if ((fp->dict = PyDict_New()) == NULL) { + Py_DECREF(fp); + return NULL; + } + fp->len = 0; + while (defs[fp->len].name != NULL) { + fp->len++; + } + if (fp->len == 0) { + goto fail; + } + fp->defs = defs; + for (i = 0; i < fp->len; i++) { + if (fp->defs[i].rank == -1) { /* Is Fortran routine */ + v = PyFortranObject_NewAsAttr(&(fp->defs[i])); + if (v == NULL) { + goto fail; + } + PyDict_SetItemString(fp->dict, fp->defs[i].name, v); + Py_XDECREF(v); + } + else if ((fp->defs[i].data) != + NULL) { /* Is Fortran variable or array (not allocatable) */ + PyArray_Descr * + descr = get_descr_from_type_and_elsize(fp->defs[i].type, + fp->defs[i].elsize); + if (descr == NULL) { + goto fail; + } + v = PyArray_NewFromDescr(&PyArray_Type, descr, fp->defs[i].rank, + fp->defs[i].dims.d, NULL, fp->defs[i].data, + NPY_ARRAY_FARRAY, NULL); + if (v == NULL) { + Py_DECREF(descr); + goto fail; + } + PyDict_SetItemString(fp->dict, fp->defs[i].name, v); + Py_XDECREF(v); + } + } + return (PyObject *)fp; +fail: + Py_XDECREF(fp); + return NULL; +} + +PyObject * +PyFortranObject_NewAsAttr(FortranDataDef *defs) +{ /* used for calling F90 module routines */ + PyFortranObject *fp = NULL; + fp = PyObject_New(PyFortranObject, &PyFortran_Type); + if (fp == NULL) + return NULL; + if ((fp->dict = PyDict_New()) == NULL) { + PyObject_Del(fp); + return NULL; + } + fp->len = 1; + fp->defs = defs; + if (defs->rank == -1) { + PyDict_SetItemString(fp->dict, "__name__", PyUnicode_FromFormat("function %s", defs->name)); + } else if (defs->rank == 0) { + PyDict_SetItemString(fp->dict, "__name__", PyUnicode_FromFormat("scalar %s", defs->name)); + } else { + PyDict_SetItemString(fp->dict, "__name__", PyUnicode_FromFormat("array %s", defs->name)); + } + return (PyObject *)fp; +} + +/* Fortran methods */ + +static void +fortran_dealloc(PyFortranObject *fp) +{ + Py_XDECREF(fp->dict); + PyObject_Del(fp); +} + +/* Returns number of bytes consumed from buf, or -1 on error. */ +static Py_ssize_t +format_def(char *buf, Py_ssize_t size, FortranDataDef def) +{ + char *p = buf; + int i; + npy_intp n; + + n = PyOS_snprintf(p, size, "array(%" NPY_INTP_FMT, def.dims.d[0]); + if (n < 0 || n >= size) { + return -1; + } + p += n; + size -= n; + + for (i = 1; i < def.rank; i++) { + n = PyOS_snprintf(p, size, ",%" NPY_INTP_FMT, def.dims.d[i]); + if (n < 0 || n >= size) { + return -1; + } + p += n; + size -= n; + } + + if (size <= 0) { + return -1; + } + + *p++ = ')'; + size--; + + if (def.data == NULL) { + static const char notalloc[] = ", not allocated"; + if ((size_t)size < sizeof(notalloc)) { + return -1; + } + memcpy(p, notalloc, sizeof(notalloc)); + p += sizeof(notalloc); + size -= sizeof(notalloc); + } + + return p - buf; +} + +static PyObject * +fortran_doc(FortranDataDef def) +{ + char *buf, *p; + PyObject *s = NULL; + Py_ssize_t n, origsize, size = 100; + + if (def.doc != NULL) { + size += strlen(def.doc); + } + origsize = size; + buf = p = (char *)PyMem_Malloc(size); + if (buf == NULL) { + return PyErr_NoMemory(); + } + + if (def.rank == -1) { + if (def.doc) { + n = strlen(def.doc); + if (n > size) { + goto fail; + } + memcpy(p, def.doc, n); + p += n; + size -= n; + } + else { + n = PyOS_snprintf(p, size, "%s - no docs available", def.name); + if (n < 0 || n >= size) { + goto fail; + } + p += n; + size -= n; + } + } + else { + PyArray_Descr *d = PyArray_DescrFromType(def.type); + n = PyOS_snprintf(p, size, "%s : '%c'-", def.name, d->type); + Py_DECREF(d); + if (n < 0 || n >= size) { + goto fail; + } + p += n; + size -= n; + + if (def.data == NULL) { + n = format_def(p, size, def); + if (n < 0) { + goto fail; + } + p += n; + size -= n; + } + else if (def.rank > 0) { + n = format_def(p, size, def); + if (n < 0) { + goto fail; + } + p += n; + size -= n; + } + else { + n = strlen("scalar"); + if (size < n) { + goto fail; + } + memcpy(p, "scalar", n); + p += n; + size -= n; + } + } + if (size <= 1) { + goto fail; + } + *p++ = '\n'; + size--; + + /* p now points one beyond the last character of the string in buf */ + s = PyUnicode_FromStringAndSize(buf, p - buf); + + PyMem_Free(buf); + return s; + +fail: + fprintf(stderr, + "fortranobject.c: fortran_doc: len(p)=%zd>%zd=size:" + " too long docstring required, increase size\n", + p - buf, origsize); + PyMem_Free(buf); + return NULL; +} + +static FortranDataDef *save_def; /* save pointer of an allocatable array */ +static void +set_data(char *d, npy_intp *f) +{ /* callback from Fortran */ + if (*f) /* In fortran f=allocated(d) */ + save_def->data = d; + else + save_def->data = NULL; + /* printf("set_data: d=%p,f=%d\n",d,*f); */ +} + +static PyObject * +fortran_getattr(PyFortranObject *fp, char *name) +{ + int i, j, k, flag; + if (fp->dict != NULL) { + // python 3.13 added PyDict_GetItemRef +#if PY_VERSION_HEX < 0x030D0000 + PyObject *v = _PyDict_GetItemStringWithError(fp->dict, name); + if (v == NULL && PyErr_Occurred()) { + return NULL; + } + else if (v != NULL) { + Py_INCREF(v); + return v; + } +#else + PyObject *v; + int result = PyDict_GetItemStringRef(fp->dict, name, &v); + if (result == -1) { + return NULL; + } + else if (result == 1) { + return v; + } +#endif + + } + for (i = 0, j = 1; i < fp->len && (j = strcmp(name, fp->defs[i].name)); + i++) + ; + if (j == 0) + if (fp->defs[i].rank != -1) { /* F90 allocatable array */ + if (fp->defs[i].func == NULL) + return NULL; + for (k = 0; k < fp->defs[i].rank; ++k) fp->defs[i].dims.d[k] = -1; + save_def = &fp->defs[i]; + (*(fp->defs[i].func))(&fp->defs[i].rank, fp->defs[i].dims.d, + set_data, &flag); + if (flag == 2) + k = fp->defs[i].rank + 1; + else + k = fp->defs[i].rank; + if (fp->defs[i].data != NULL) { /* array is allocated */ + PyObject *v = PyArray_New( + &PyArray_Type, k, fp->defs[i].dims.d, fp->defs[i].type, + NULL, fp->defs[i].data, 0, NPY_ARRAY_FARRAY, NULL); + if (v == NULL) + return NULL; + /* Py_INCREF(v); */ + return v; + } + else { /* array is not allocated */ + Py_RETURN_NONE; + } + } + if (strcmp(name, "__dict__") == 0) { + Py_INCREF(fp->dict); + return fp->dict; + } + if (strcmp(name, "__doc__") == 0) { + PyObject *s = PyUnicode_FromString(""), *s2, *s3; + for (i = 0; i < fp->len; i++) { + s2 = fortran_doc(fp->defs[i]); + s3 = PyUnicode_Concat(s, s2); + Py_DECREF(s2); + Py_DECREF(s); + s = s3; + } + if (PyDict_SetItemString(fp->dict, name, s)) + return NULL; + return s; + } + if ((strcmp(name, "_cpointer") == 0) && (fp->len == 1)) { + PyObject *cobj = + F2PyCapsule_FromVoidPtr((void *)(fp->defs[0].data), NULL); + if (PyDict_SetItemString(fp->dict, name, cobj)) + return NULL; + return cobj; + } + PyObject *str, *ret; + str = PyUnicode_FromString(name); + ret = PyObject_GenericGetAttr((PyObject *)fp, str); + Py_DECREF(str); + return ret; +} + +static int +fortran_setattr(PyFortranObject *fp, char *name, PyObject *v) +{ + int i, j, flag; + PyArrayObject *arr = NULL; + for (i = 0, j = 1; i < fp->len && (j = strcmp(name, fp->defs[i].name)); + i++) + ; + if (j == 0) { + if (fp->defs[i].rank == -1) { + PyErr_SetString(PyExc_AttributeError, + "over-writing fortran routine"); + return -1; + } + if (fp->defs[i].func != NULL) { /* is allocatable array */ + npy_intp dims[F2PY_MAX_DIMS]; + int k; + save_def = &fp->defs[i]; + if (v != Py_None) { /* set new value (reallocate if needed -- + see f2py generated code for more + details ) */ + for (k = 0; k < fp->defs[i].rank; k++) dims[k] = -1; + if ((arr = array_from_pyobj(fp->defs[i].type, dims, + fp->defs[i].rank, F2PY_INTENT_IN, + v)) == NULL) + return -1; + (*(fp->defs[i].func))(&fp->defs[i].rank, PyArray_DIMS(arr), + set_data, &flag); + } + else { /* deallocate */ + for (k = 0; k < fp->defs[i].rank; k++) dims[k] = 0; + (*(fp->defs[i].func))(&fp->defs[i].rank, dims, set_data, + &flag); + for (k = 0; k < fp->defs[i].rank; k++) dims[k] = -1; + } + memcpy(fp->defs[i].dims.d, dims, + fp->defs[i].rank * sizeof(npy_intp)); + } + else { /* not allocatable array */ + if ((arr = array_from_pyobj(fp->defs[i].type, fp->defs[i].dims.d, + fp->defs[i].rank, F2PY_INTENT_IN, + v)) == NULL) + return -1; + } + if (fp->defs[i].data != + NULL) { /* copy Python object to Fortran array */ + npy_intp s = PyArray_MultiplyList(fp->defs[i].dims.d, + PyArray_NDIM(arr)); + if (s == -1) + s = PyArray_MultiplyList(PyArray_DIMS(arr), PyArray_NDIM(arr)); + if (s < 0 || (memcpy(fp->defs[i].data, PyArray_DATA(arr), + s * PyArray_ITEMSIZE(arr))) == NULL) { + if ((PyObject *)arr != v) { + Py_DECREF(arr); + } + return -1; + } + if ((PyObject *)arr != v) { + Py_DECREF(arr); + } + } + else + return (fp->defs[i].func == NULL ? -1 : 0); + return 0; /* successful */ + } + if (fp->dict == NULL) { + fp->dict = PyDict_New(); + if (fp->dict == NULL) + return -1; + } + if (v == NULL) { + int rv = PyDict_DelItemString(fp->dict, name); + if (rv < 0) + PyErr_SetString(PyExc_AttributeError, + "delete non-existing fortran attribute"); + return rv; + } + else + return PyDict_SetItemString(fp->dict, name, v); +} + +static PyObject * +fortran_call(PyFortranObject *fp, PyObject *arg, PyObject *kw) +{ + int i = 0; + /* printf("fortran call + name=%s,func=%p,data=%p,%p\n",fp->defs[i].name, + fp->defs[i].func,fp->defs[i].data,&fp->defs[i].data); */ + if (fp->defs[i].rank == -1) { /* is Fortran routine */ + if (fp->defs[i].func == NULL) { + PyErr_Format(PyExc_RuntimeError, "no function to call"); + return NULL; + } + else if (fp->defs[i].data == NULL) + /* dummy routine */ + return (*((fortranfunc)(fp->defs[i].func)))((PyObject *)fp, arg, + kw, NULL); + else + return (*((fortranfunc)(fp->defs[i].func)))( + (PyObject *)fp, arg, kw, (void *)fp->defs[i].data); + } + PyErr_Format(PyExc_TypeError, "this fortran object is not callable"); + return NULL; +} + +static PyObject * +fortran_repr(PyFortranObject *fp) +{ + PyObject *name = NULL, *repr = NULL; + name = PyObject_GetAttrString((PyObject *)fp, "__name__"); + PyErr_Clear(); + if (name != NULL && PyUnicode_Check(name)) { + repr = PyUnicode_FromFormat("", name); + } + else { + repr = PyUnicode_FromString(""); + } + Py_XDECREF(name); + return repr; +} + +PyTypeObject PyFortran_Type = { + PyVarObject_HEAD_INIT(NULL, 0).tp_name = "fortran", + .tp_basicsize = sizeof(PyFortranObject), + .tp_dealloc = (destructor)fortran_dealloc, + .tp_getattr = (getattrfunc)fortran_getattr, + .tp_setattr = (setattrfunc)fortran_setattr, + .tp_repr = (reprfunc)fortran_repr, + .tp_call = (ternaryfunc)fortran_call, +}; + +/************************* f2py_report_atexit *******************************/ + +#ifdef F2PY_REPORT_ATEXIT +static int passed_time = 0; +static int passed_counter = 0; +static int passed_call_time = 0; +static struct timeb start_time; +static struct timeb stop_time; +static struct timeb start_call_time; +static struct timeb stop_call_time; +static int cb_passed_time = 0; +static int cb_passed_counter = 0; +static int cb_passed_call_time = 0; +static struct timeb cb_start_time; +static struct timeb cb_stop_time; +static struct timeb cb_start_call_time; +static struct timeb cb_stop_call_time; + +extern void +f2py_start_clock(void) +{ + ftime(&start_time); +} +extern void +f2py_start_call_clock(void) +{ + f2py_stop_clock(); + ftime(&start_call_time); +} +extern void +f2py_stop_clock(void) +{ + ftime(&stop_time); + passed_time += 1000 * (stop_time.time - start_time.time); + passed_time += stop_time.millitm - start_time.millitm; +} +extern void +f2py_stop_call_clock(void) +{ + ftime(&stop_call_time); + passed_call_time += 1000 * (stop_call_time.time - start_call_time.time); + passed_call_time += stop_call_time.millitm - start_call_time.millitm; + passed_counter += 1; + f2py_start_clock(); +} + +extern void +f2py_cb_start_clock(void) +{ + ftime(&cb_start_time); +} +extern void +f2py_cb_start_call_clock(void) +{ + f2py_cb_stop_clock(); + ftime(&cb_start_call_time); +} +extern void +f2py_cb_stop_clock(void) +{ + ftime(&cb_stop_time); + cb_passed_time += 1000 * (cb_stop_time.time - cb_start_time.time); + cb_passed_time += cb_stop_time.millitm - cb_start_time.millitm; +} +extern void +f2py_cb_stop_call_clock(void) +{ + ftime(&cb_stop_call_time); + cb_passed_call_time += + 1000 * (cb_stop_call_time.time - cb_start_call_time.time); + cb_passed_call_time += + cb_stop_call_time.millitm - cb_start_call_time.millitm; + cb_passed_counter += 1; + f2py_cb_start_clock(); +} + +static int f2py_report_on_exit_been_here = 0; +extern void +f2py_report_on_exit(int exit_flag, void *name) +{ + if (f2py_report_on_exit_been_here) { + fprintf(stderr, " %s\n", (char *)name); + return; + } + f2py_report_on_exit_been_here = 1; + fprintf(stderr, " /-----------------------\\\n"); + fprintf(stderr, " < F2PY performance report >\n"); + fprintf(stderr, " \\-----------------------/\n"); + fprintf(stderr, "Overall time spent in ...\n"); + fprintf(stderr, "(a) wrapped (Fortran/C) functions : %8d msec\n", + passed_call_time); + fprintf(stderr, "(b) f2py interface, %6d calls : %8d msec\n", + passed_counter, passed_time); + fprintf(stderr, "(c) call-back (Python) functions : %8d msec\n", + cb_passed_call_time); + fprintf(stderr, "(d) f2py call-back interface, %6d calls : %8d msec\n", + cb_passed_counter, cb_passed_time); + + fprintf(stderr, + "(e) wrapped (Fortran/C) functions (actual) : %8d msec\n\n", + passed_call_time - cb_passed_call_time - cb_passed_time); + fprintf(stderr, + "Use -DF2PY_REPORT_ATEXIT_DISABLE to disable this message.\n"); + fprintf(stderr, "Exit status: %d\n", exit_flag); + fprintf(stderr, "Modules : %s\n", (char *)name); +} +#endif + +/********************** report on array copy ****************************/ + +#ifdef F2PY_REPORT_ON_ARRAY_COPY +static void +f2py_report_on_array_copy(PyArrayObject *arr) +{ + const npy_intp arr_size = PyArray_Size((PyObject *)arr); + if (arr_size > F2PY_REPORT_ON_ARRAY_COPY) { + fprintf(stderr, + "copied an array: size=%ld, elsize=%" NPY_INTP_FMT "\n", + arr_size, (npy_intp)PyArray_ITEMSIZE(arr)); + } +} +static void +f2py_report_on_array_copy_fromany(void) +{ + fprintf(stderr, "created an array from object\n"); +} + +#define F2PY_REPORT_ON_ARRAY_COPY_FROMARR \ + f2py_report_on_array_copy((PyArrayObject *)arr) +#define F2PY_REPORT_ON_ARRAY_COPY_FROMANY f2py_report_on_array_copy_fromany() +#else +#define F2PY_REPORT_ON_ARRAY_COPY_FROMARR +#define F2PY_REPORT_ON_ARRAY_COPY_FROMANY +#endif + +/************************* array_from_obj *******************************/ + +/* + * File: array_from_pyobj.c + * + * Description: + * ------------ + * Provides array_from_pyobj function that returns a contiguous array + * object with the given dimensions and required storage order, either + * in row-major (C) or column-major (Fortran) order. The function + * array_from_pyobj is very flexible about its Python object argument + * that can be any number, list, tuple, or array. + * + * array_from_pyobj is used in f2py generated Python extension + * modules. + * + * Author: Pearu Peterson + * Created: 13-16 January 2002 + * $Id: fortranobject.c,v 1.52 2005/07/11 07:44:20 pearu Exp $ + */ + +static int check_and_fix_dimensions(const PyArrayObject* arr, + const int rank, + npy_intp *dims, + const char *errmess); + +static int +find_first_negative_dimension(const int rank, const npy_intp *dims) +{ + int i; + for (i = 0; i < rank; ++i) { + if (dims[i] < 0) { + return i; + } + } + return -1; +} + +#ifdef DEBUG_COPY_ND_ARRAY +void +dump_dims(int rank, npy_intp const *dims) +{ + int i; + printf("["); + for (i = 0; i < rank; ++i) { + printf("%3" NPY_INTP_FMT, dims[i]); + } + printf("]\n"); +} +void +dump_attrs(const PyArrayObject *obj) +{ + const PyArrayObject_fields *arr = (const PyArrayObject_fields *)obj; + int rank = PyArray_NDIM(arr); + npy_intp size = PyArray_Size((PyObject *)arr); + printf("\trank = %d, flags = %d, size = %" NPY_INTP_FMT "\n", rank, + arr->flags, size); + printf("\tstrides = "); + dump_dims(rank, arr->strides); + printf("\tdimensions = "); + dump_dims(rank, arr->dimensions); +} +#endif + +#define SWAPTYPE(a, b, t) \ + { \ + t c; \ + c = (a); \ + (a) = (b); \ + (b) = c; \ + } + +static int +swap_arrays(PyArrayObject *obj1, PyArrayObject *obj2) +{ + PyArrayObject_fields *arr1 = (PyArrayObject_fields *)obj1, + *arr2 = (PyArrayObject_fields *)obj2; + SWAPTYPE(arr1->data, arr2->data, char *); + SWAPTYPE(arr1->nd, arr2->nd, int); + SWAPTYPE(arr1->dimensions, arr2->dimensions, npy_intp *); + SWAPTYPE(arr1->strides, arr2->strides, npy_intp *); + SWAPTYPE(arr1->base, arr2->base, PyObject *); + SWAPTYPE(arr1->descr, arr2->descr, PyArray_Descr *); + SWAPTYPE(arr1->flags, arr2->flags, int); + /* SWAPTYPE(arr1->weakreflist,arr2->weakreflist,PyObject*); */ + return 0; +} + +#define ARRAY_ISCOMPATIBLE(arr,type_num) \ + ((PyArray_ISINTEGER(arr) && PyTypeNum_ISINTEGER(type_num)) || \ + (PyArray_ISFLOAT(arr) && PyTypeNum_ISFLOAT(type_num)) || \ + (PyArray_ISCOMPLEX(arr) && PyTypeNum_ISCOMPLEX(type_num)) || \ + (PyArray_ISBOOL(arr) && PyTypeNum_ISBOOL(type_num)) || \ + (PyArray_ISSTRING(arr) && PyTypeNum_ISSTRING(type_num))) + +static int +get_elsize(PyObject *obj) { + /* + get_elsize determines array itemsize from a Python object. Returns + elsize if successful, -1 otherwise. + + Supported types of the input are: numpy.ndarray, bytes, str, tuple, + list. + */ + + if (PyArray_Check(obj)) { + return PyArray_ITEMSIZE((PyArrayObject *)obj); + } else if (PyBytes_Check(obj)) { + return PyBytes_GET_SIZE(obj); + } else if (PyUnicode_Check(obj)) { + return PyUnicode_GET_LENGTH(obj); + } else if (PySequence_Check(obj)) { + PyObject* fast = PySequence_Fast(obj, "f2py:fortranobject.c:get_elsize"); + if (fast != NULL) { + Py_ssize_t i, n = PySequence_Fast_GET_SIZE(fast); + int sz, elsize = 0; + for (i=0; i elsize) { + elsize = sz; + } + } + Py_DECREF(fast); + return elsize; + } + } + return -1; +} + +extern PyArrayObject * +ndarray_from_pyobj(const int type_num, + const int elsize_, + npy_intp *dims, + const int rank, + const int intent, + PyObject *obj, + const char *errmess) { + /* + * Return an array with given element type and shape from a Python + * object while taking into account the usage intent of the array. + * + * - element type is defined by type_num and elsize + * - shape is defined by dims and rank + * + * ndarray_from_pyobj is used to convert Python object arguments + * to numpy ndarrays with given type and shape that data is passed + * to interfaced Fortran or C functions. + * + * errmess (if not NULL), contains a prefix of an error message + * for an exception to be triggered within this function. + * + * Negative elsize value means that elsize is to be determined + * from the Python object in runtime. + * + * Note on strings + * --------------- + * + * String type (type_num == NPY_STRING) does not have fixed + * element size and, by default, the type object sets it to + * 0. Therefore, for string types, one has to use elsize + * argument. For other types, elsize value is ignored. + * + * NumPy defines the type of a fixed-width string as + * dtype('S'). In addition, there is also dtype('c'), that + * appears as dtype('S1') (these have the same type_num value), + * but is actually different (.char attribute is either 'S' or + * 'c', respectively). + * + * In Fortran, character arrays and strings are different + * concepts. The relation between Fortran types, NumPy dtypes, + * and type_num-elsize pairs, is defined as follows: + * + * character*5 foo | dtype('S5') | elsize=5, shape=() + * character(5) foo | dtype('S1') | elsize=1, shape=(5) + * character*5 foo(n) | dtype('S5') | elsize=5, shape=(n,) + * character(5) foo(n) | dtype('S1') | elsize=1, shape=(5, n) + * character*(*) foo | dtype('S') | elsize=-1, shape=() + * + * Note about reference counting + * ----------------------------- + * + * If the caller returns the array to Python, it must be done with + * Py_BuildValue("N",arr). Otherwise, if obj!=arr then the caller + * must call Py_DECREF(arr). + * + * Note on intent(cache,out,..) + * ---------------------------- + * Don't expect correct data when returning intent(cache) array. + * + */ + char mess[F2PY_MESSAGE_BUFFER_SIZE]; + PyArrayObject *arr = NULL; + int elsize = (elsize_ < 0 ? get_elsize(obj) : elsize_); + if (elsize < 0) { + if (errmess != NULL) { + strcpy(mess, errmess); + } + sprintf(mess + strlen(mess), + " -- failed to determine element size from %s", + Py_TYPE(obj)->tp_name); + PyErr_SetString(PyExc_SystemError, mess); + return NULL; + } + PyArray_Descr * descr = get_descr_from_type_and_elsize(type_num, elsize); // new reference + if (descr == NULL) { + return NULL; + } + elsize = PyDataType_ELSIZE(descr); + if ((intent & F2PY_INTENT_HIDE) + || ((intent & F2PY_INTENT_CACHE) && (obj == Py_None)) + || ((intent & F2PY_OPTIONAL) && (obj == Py_None)) + ) { + /* intent(cache), optional, intent(hide) */ + int ineg = find_first_negative_dimension(rank, dims); + if (ineg >= 0) { + int i; + strcpy(mess, "failed to create intent(cache|hide)|optional array" + "-- must have defined dimensions but got ("); + for(i = 0; i < rank; ++i) + sprintf(mess + strlen(mess), "%" NPY_INTP_FMT ",", dims[i]); + strcat(mess, ")"); + PyErr_SetString(PyExc_ValueError, mess); + Py_DECREF(descr); + return NULL; + } + arr = (PyArrayObject *) \ + PyArray_NewFromDescr(&PyArray_Type, descr, rank, dims, + NULL, NULL, !(intent & F2PY_INTENT_C), NULL); + if (arr == NULL) { + Py_DECREF(descr); + return NULL; + } + if (PyArray_ITEMSIZE(arr) != elsize) { + strcpy(mess, "failed to create intent(cache|hide)|optional array"); + sprintf(mess+strlen(mess)," -- expected elsize=%d got %" NPY_INTP_FMT, elsize, (npy_intp)PyArray_ITEMSIZE(arr)); + PyErr_SetString(PyExc_ValueError,mess); + Py_DECREF(arr); + return NULL; + } + if (!(intent & F2PY_INTENT_CACHE)) { + PyArray_FILLWBYTE(arr, 0); + } + return arr; + } + + if (PyArray_Check(obj)) { + arr = (PyArrayObject *)obj; + if (intent & F2PY_INTENT_CACHE) { + /* intent(cache) */ + if (PyArray_ISONESEGMENT(arr) + && PyArray_ITEMSIZE(arr) >= elsize) { + if (check_and_fix_dimensions(arr, rank, dims, errmess)) { + Py_DECREF(descr); + return NULL; + } + if (intent & F2PY_INTENT_OUT) + Py_INCREF(arr); + Py_DECREF(descr); + return arr; + } + strcpy(mess, "failed to initialize intent(cache) array"); + if (!PyArray_ISONESEGMENT(arr)) + strcat(mess, " -- input must be in one segment"); + if (PyArray_ITEMSIZE(arr) < elsize) + sprintf(mess + strlen(mess), + " -- expected at least elsize=%d but got " + "%" NPY_INTP_FMT, + elsize, (npy_intp)PyArray_ITEMSIZE(arr)); + PyErr_SetString(PyExc_ValueError, mess); + Py_DECREF(descr); + return NULL; + } + + /* here we have always intent(in) or intent(inout) or intent(inplace) + */ + + if (check_and_fix_dimensions(arr, rank, dims, errmess)) { + Py_DECREF(descr); + return NULL; + } + /* + printf("intent alignment=%d\n", F2PY_GET_ALIGNMENT(intent)); + printf("alignment check=%d\n", F2PY_CHECK_ALIGNMENT(arr, intent)); + int i; + for (i=1;i<=16;i++) + printf("i=%d isaligned=%d\n", i, ARRAY_ISALIGNED(arr, i)); + */ + if ((! (intent & F2PY_INTENT_COPY)) && + PyArray_ITEMSIZE(arr) == elsize && + ARRAY_ISCOMPATIBLE(arr,type_num) && + F2PY_CHECK_ALIGNMENT(arr, intent)) { + if ((intent & F2PY_INTENT_INOUT || intent & F2PY_INTENT_INPLACE) + ? ((intent & F2PY_INTENT_C) ? PyArray_ISCARRAY(arr) : PyArray_ISFARRAY(arr)) + : ((intent & F2PY_INTENT_C) ? PyArray_ISCARRAY_RO(arr) : PyArray_ISFARRAY_RO(arr))) { + if ((intent & F2PY_INTENT_OUT)) { + Py_INCREF(arr); + } + /* Returning input array */ + Py_DECREF(descr); + return arr; + } + } + if (intent & F2PY_INTENT_INOUT) { + strcpy(mess, "failed to initialize intent(inout) array"); + /* Must use PyArray_IS*ARRAY because intent(inout) requires + * writable input */ + if ((intent & F2PY_INTENT_C) && !PyArray_ISCARRAY(arr)) + strcat(mess, " -- input not contiguous"); + if (!(intent & F2PY_INTENT_C) && !PyArray_ISFARRAY(arr)) + strcat(mess, " -- input not fortran contiguous"); + if (PyArray_ITEMSIZE(arr) != elsize) + sprintf(mess + strlen(mess), + " -- expected elsize=%d but got %" NPY_INTP_FMT, + elsize, + (npy_intp)PyArray_ITEMSIZE(arr) + ); + if (!(ARRAY_ISCOMPATIBLE(arr, type_num))) { + sprintf(mess + strlen(mess), + " -- input '%c' not compatible to '%c'", + PyArray_DESCR(arr)->type, descr->type); + } + if (!(F2PY_CHECK_ALIGNMENT(arr, intent))) + sprintf(mess + strlen(mess), " -- input not %d-aligned", + F2PY_GET_ALIGNMENT(intent)); + PyErr_SetString(PyExc_ValueError, mess); + Py_DECREF(descr); + return NULL; + } + + /* here we have always intent(in) or intent(inplace) */ + + { + PyArrayObject * retarr = (PyArrayObject *) \ + PyArray_NewFromDescr(&PyArray_Type, descr, PyArray_NDIM(arr), PyArray_DIMS(arr), + NULL, NULL, !(intent & F2PY_INTENT_C), NULL); + if (retarr==NULL) { + Py_DECREF(descr); + return NULL; + } + F2PY_REPORT_ON_ARRAY_COPY_FROMARR; + if (PyArray_CopyInto(retarr, arr)) { + Py_DECREF(retarr); + return NULL; + } + if (intent & F2PY_INTENT_INPLACE) { + if (swap_arrays(arr,retarr)) { + Py_DECREF(retarr); + return NULL; /* XXX: set exception */ + } + Py_XDECREF(retarr); + if (intent & F2PY_INTENT_OUT) + Py_INCREF(arr); + } else { + arr = retarr; + } + } + return arr; + } + + if ((intent & F2PY_INTENT_INOUT) || (intent & F2PY_INTENT_INPLACE) || + (intent & F2PY_INTENT_CACHE)) { + PyErr_Format(PyExc_TypeError, + "failed to initialize intent(inout|inplace|cache) " + "array, input '%s' object is not an array", + Py_TYPE(obj)->tp_name); + Py_DECREF(descr); + return NULL; + } + + { + F2PY_REPORT_ON_ARRAY_COPY_FROMANY; + arr = (PyArrayObject *)PyArray_FromAny( + obj, descr, 0, 0, + ((intent & F2PY_INTENT_C) ? NPY_ARRAY_CARRAY + : NPY_ARRAY_FARRAY) | + NPY_ARRAY_FORCECAST, + NULL); + // Warning: in the case of NPY_STRING, PyArray_FromAny may + // reset descr->elsize, e.g. dtype('S0') becomes dtype('S1'). + if (arr == NULL) { + Py_DECREF(descr); + return NULL; + } + if (type_num != NPY_STRING && PyArray_ITEMSIZE(arr) != elsize) { + // This is internal sanity tests: elsize has been set to + // descr->elsize in the beginning of this function. + strcpy(mess, "failed to initialize intent(in) array"); + sprintf(mess + strlen(mess), + " -- expected elsize=%d got %" NPY_INTP_FMT, elsize, + (npy_intp)PyArray_ITEMSIZE(arr)); + PyErr_SetString(PyExc_ValueError, mess); + Py_DECREF(arr); + return NULL; + } + if (check_and_fix_dimensions(arr, rank, dims, errmess)) { + Py_DECREF(arr); + return NULL; + } + return arr; + } +} + +extern PyArrayObject * +array_from_pyobj(const int type_num, + npy_intp *dims, + const int rank, + const int intent, + PyObject *obj) { + /* + Same as ndarray_from_pyobj but with elsize determined from type, + if possible. Provided for backward compatibility. + */ + PyArray_Descr* descr = PyArray_DescrFromType(type_num); + int elsize = PyDataType_ELSIZE(descr); + Py_DECREF(descr); + return ndarray_from_pyobj(type_num, elsize, dims, rank, intent, obj, NULL); +} + +/*****************************************/ +/* Helper functions for array_from_pyobj */ +/*****************************************/ + +static int +check_and_fix_dimensions(const PyArrayObject* arr, const int rank, + npy_intp *dims, const char *errmess) +{ + /* + * This function fills in blanks (that are -1's) in dims list using + * the dimensions from arr. It also checks that non-blank dims will + * match with the corresponding values in arr dimensions. + * + * Returns 0 if the function is successful. + * + * If an error condition is detected, an exception is set and 1 is + * returned. + */ + char mess[F2PY_MESSAGE_BUFFER_SIZE]; + const npy_intp arr_size = + (PyArray_NDIM(arr)) ? PyArray_Size((PyObject *)arr) : 1; +#ifdef DEBUG_COPY_ND_ARRAY + dump_attrs(arr); + printf("check_and_fix_dimensions:init: dims="); + dump_dims(rank, dims); +#endif + if (rank > PyArray_NDIM(arr)) { /* [1,2] -> [[1],[2]]; 1 -> [[1]] */ + npy_intp new_size = 1; + int free_axe = -1; + int i; + npy_intp d; + /* Fill dims where -1 or 0; check dimensions; calc new_size; */ + for (i = 0; i < PyArray_NDIM(arr); ++i) { + d = PyArray_DIM(arr, i); + if (dims[i] >= 0) { + if (d > 1 && dims[i] != d) { + PyErr_Format( + PyExc_ValueError, + "%d-th dimension must be fixed to %" NPY_INTP_FMT + " but got %" NPY_INTP_FMT "\n", + i, dims[i], d); + return 1; + } + if (!dims[i]) + dims[i] = 1; + } + else { + dims[i] = d ? d : 1; + } + new_size *= dims[i]; + } + for (i = PyArray_NDIM(arr); i < rank; ++i) + if (dims[i] > 1) { + PyErr_Format(PyExc_ValueError, + "%d-th dimension must be %" NPY_INTP_FMT + " but got 0 (not defined).\n", + i, dims[i]); + return 1; + } + else if (free_axe < 0) + free_axe = i; + else + dims[i] = 1; + if (free_axe >= 0) { + dims[free_axe] = arr_size / new_size; + new_size *= dims[free_axe]; + } + if (new_size != arr_size) { + PyErr_Format(PyExc_ValueError, + "unexpected array size: new_size=%" NPY_INTP_FMT + ", got array with arr_size=%" NPY_INTP_FMT + " (maybe too many free indices)\n", + new_size, arr_size); + return 1; + } + } + else if (rank == PyArray_NDIM(arr)) { + npy_intp new_size = 1; + int i; + npy_intp d; + for (i = 0; i < rank; ++i) { + d = PyArray_DIM(arr, i); + if (dims[i] >= 0) { + if (d > 1 && d != dims[i]) { + if (errmess != NULL) { + strcpy(mess, errmess); + } + sprintf(mess + strlen(mess), + " -- %d-th dimension must be fixed to %" + NPY_INTP_FMT " but got %" NPY_INTP_FMT, + i, dims[i], d); + PyErr_SetString(PyExc_ValueError, mess); + return 1; + } + if (!dims[i]) + dims[i] = 1; + } + else + dims[i] = d; + new_size *= dims[i]; + } + if (new_size != arr_size) { + PyErr_Format(PyExc_ValueError, + "unexpected array size: new_size=%" NPY_INTP_FMT + ", got array with arr_size=%" NPY_INTP_FMT "\n", + new_size, arr_size); + return 1; + } + } + else { /* [[1,2]] -> [[1],[2]] */ + int i, j; + npy_intp d; + int effrank; + npy_intp size; + for (i = 0, effrank = 0; i < PyArray_NDIM(arr); ++i) + if (PyArray_DIM(arr, i) > 1) + ++effrank; + if (dims[rank - 1] >= 0) + if (effrank > rank) { + PyErr_Format(PyExc_ValueError, + "too many axes: %d (effrank=%d), " + "expected rank=%d\n", + PyArray_NDIM(arr), effrank, rank); + return 1; + } + + for (i = 0, j = 0; i < rank; ++i) { + while (j < PyArray_NDIM(arr) && PyArray_DIM(arr, j) < 2) ++j; + if (j >= PyArray_NDIM(arr)) + d = 1; + else + d = PyArray_DIM(arr, j++); + if (dims[i] >= 0) { + if (d > 1 && d != dims[i]) { + if (errmess != NULL) { + strcpy(mess, errmess); + } + sprintf(mess + strlen(mess), + " -- %d-th dimension must be fixed to %" + NPY_INTP_FMT " but got %" NPY_INTP_FMT + " (real index=%d)\n", + i, dims[i], d, j-1); + PyErr_SetString(PyExc_ValueError, mess); + return 1; + } + if (!dims[i]) + dims[i] = 1; + } + else + dims[i] = d; + } + + for (i = rank; i < PyArray_NDIM(arr); + ++i) { /* [[1,2],[3,4]] -> [1,2,3,4] */ + while (j < PyArray_NDIM(arr) && PyArray_DIM(arr, j) < 2) ++j; + if (j >= PyArray_NDIM(arr)) + d = 1; + else + d = PyArray_DIM(arr, j++); + dims[rank - 1] *= d; + } + for (i = 0, size = 1; i < rank; ++i) size *= dims[i]; + if (size != arr_size) { + char msg[200]; + int len; + snprintf(msg, sizeof(msg), + "unexpected array size: size=%" NPY_INTP_FMT + ", arr_size=%" NPY_INTP_FMT + ", rank=%d, effrank=%d, arr.nd=%d, dims=[", + size, arr_size, rank, effrank, PyArray_NDIM(arr)); + for (i = 0; i < rank; ++i) { + len = strlen(msg); + snprintf(msg + len, sizeof(msg) - len, " %" NPY_INTP_FMT, + dims[i]); + } + len = strlen(msg); + snprintf(msg + len, sizeof(msg) - len, " ], arr.dims=["); + for (i = 0; i < PyArray_NDIM(arr); ++i) { + len = strlen(msg); + snprintf(msg + len, sizeof(msg) - len, " %" NPY_INTP_FMT, + PyArray_DIM(arr, i)); + } + len = strlen(msg); + snprintf(msg + len, sizeof(msg) - len, " ]\n"); + PyErr_SetString(PyExc_ValueError, msg); + return 1; + } + } +#ifdef DEBUG_COPY_ND_ARRAY + printf("check_and_fix_dimensions:end: dims="); + dump_dims(rank, dims); +#endif + return 0; +} + +/* End of file: array_from_pyobj.c */ + +/************************* copy_ND_array *******************************/ + +extern int +copy_ND_array(const PyArrayObject *arr, PyArrayObject *out) +{ + F2PY_REPORT_ON_ARRAY_COPY_FROMARR; + return PyArray_CopyInto(out, (PyArrayObject *)arr); +} + +/********************* Various utility functions ***********************/ + +extern int +f2py_describe(PyObject *obj, char *buf) { + /* + Write the description of a Python object to buf. The caller must + provide buffer with size sufficient to write the description. + + Return 1 on success. + */ + char localbuf[F2PY_MESSAGE_BUFFER_SIZE]; + if (PyBytes_Check(obj)) { + sprintf(localbuf, "%d-%s", (npy_int)PyBytes_GET_SIZE(obj), Py_TYPE(obj)->tp_name); + } else if (PyUnicode_Check(obj)) { + sprintf(localbuf, "%d-%s", (npy_int)PyUnicode_GET_LENGTH(obj), Py_TYPE(obj)->tp_name); + } else if (PyArray_CheckScalar(obj)) { + PyArrayObject* arr = (PyArrayObject*)obj; + sprintf(localbuf, "%c%" NPY_INTP_FMT "-%s-scalar", PyArray_DESCR(arr)->kind, PyArray_ITEMSIZE(arr), Py_TYPE(obj)->tp_name); + } else if (PyArray_Check(obj)) { + int i; + PyArrayObject* arr = (PyArrayObject*)obj; + strcpy(localbuf, "("); + for (i=0; ikind, PyArray_ITEMSIZE(arr), Py_TYPE(obj)->tp_name); + } else if (PySequence_Check(obj)) { + sprintf(localbuf, "%d-%s", (npy_int)PySequence_Length(obj), Py_TYPE(obj)->tp_name); + } else { + sprintf(localbuf, "%s instance", Py_TYPE(obj)->tp_name); + } + // TODO: detect the size of buf and make sure that size(buf) >= size(localbuf). + strcpy(buf, localbuf); + return 1; +} + +extern npy_intp +f2py_size_impl(PyArrayObject* var, ...) +{ + npy_intp sz = 0; + npy_intp dim; + npy_intp rank; + va_list argp; + va_start(argp, var); + dim = va_arg(argp, npy_int); + if (dim==-1) + { + sz = PyArray_SIZE(var); + } + else + { + rank = PyArray_NDIM(var); + if (dim>=1 && dim<=rank) + sz = PyArray_DIM(var, dim-1); + else + fprintf(stderr, "f2py_size: 2nd argument value=%" NPY_INTP_FMT + " fails to satisfy 1<=value<=%" NPY_INTP_FMT + ". Result will be 0.\n", dim, rank); + } + va_end(argp); + return sz; +} + +/*********************************************/ +/* Compatibility functions for Python >= 3.0 */ +/*********************************************/ + +PyObject * +F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)) +{ + PyObject *ret = PyCapsule_New(ptr, NULL, dtor); + if (ret == NULL) { + PyErr_Clear(); + } + return ret; +} + +void * +F2PyCapsule_AsVoidPtr(PyObject *obj) +{ + void *ret = PyCapsule_GetPointer(obj, NULL); + if (ret == NULL) { + PyErr_Clear(); + } + return ret; +} + +int +F2PyCapsule_Check(PyObject *ptr) +{ + return PyCapsule_CheckExact(ptr); +} + +#ifdef __cplusplus +} +#endif +/************************* EOF fortranobject.c *******************************/ diff --git a/python/numpy/f2py/src/fortranobject.h b/python/numpy/f2py/src/fortranobject.h new file mode 100644 index 000000000..4aed2f608 --- /dev/null +++ b/python/numpy/f2py/src/fortranobject.h @@ -0,0 +1,173 @@ +#ifndef Py_FORTRANOBJECT_H +#define Py_FORTRANOBJECT_H +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#ifndef NPY_NO_DEPRECATED_API +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#endif +#ifdef FORTRANOBJECT_C +#define NO_IMPORT_ARRAY +#endif +#define PY_ARRAY_UNIQUE_SYMBOL _npy_f2py_ARRAY_API +#include "numpy/arrayobject.h" +#include "numpy/npy_3kcompat.h" + +#ifdef F2PY_REPORT_ATEXIT +#include +// clang-format off +extern void f2py_start_clock(void); +extern void f2py_stop_clock(void); +extern void f2py_start_call_clock(void); +extern void f2py_stop_call_clock(void); +extern void f2py_cb_start_clock(void); +extern void f2py_cb_stop_clock(void); +extern void f2py_cb_start_call_clock(void); +extern void f2py_cb_stop_call_clock(void); +extern void f2py_report_on_exit(int, void *); +// clang-format on +#endif + +#ifdef DMALLOC +#include "dmalloc.h" +#endif + +/* Fortran object interface */ + +/* +123456789-123456789-123456789-123456789-123456789-123456789-123456789-12 + +PyFortranObject represents various Fortran objects: +Fortran (module) routines, COMMON blocks, module data. + +Author: Pearu Peterson +*/ + +#define F2PY_MAX_DIMS 40 +#define F2PY_MESSAGE_BUFFER_SIZE 300 // Increase on "stack smashing detected" + +typedef void (*f2py_set_data_func)(char *, npy_intp *); +typedef void (*f2py_void_func)(void); +typedef void (*f2py_init_func)(int *, npy_intp *, f2py_set_data_func, int *); + +/*typedef void* (*f2py_c_func)(void*,...);*/ + +typedef void *(*f2pycfunc)(void); + +typedef struct { + char *name; /* attribute (array||routine) name */ + int rank; /* array rank, 0 for scalar, max is F2PY_MAX_DIMS, + || rank=-1 for Fortran routine */ + struct { + npy_intp d[F2PY_MAX_DIMS]; + } dims; /* dimensions of the array, || not used */ + int type; /* PyArray_ || not used */ + int elsize; /* Element size || not used */ + char *data; /* pointer to array || Fortran routine */ + f2py_init_func func; /* initialization function for + allocatable arrays: + func(&rank,dims,set_ptr_func,name,len(name)) + || C/API wrapper for Fortran routine */ + char *doc; /* documentation string; only recommended + for routines. */ +} FortranDataDef; + +typedef struct { + PyObject_HEAD + int len; /* Number of attributes */ + FortranDataDef *defs; /* An array of FortranDataDef's */ + PyObject *dict; /* Fortran object attribute dictionary */ +} PyFortranObject; + +#define PyFortran_Check(op) (Py_TYPE(op) == &PyFortran_Type) +#define PyFortran_Check1(op) (0 == strcmp(Py_TYPE(op)->tp_name, "fortran")) + +extern PyTypeObject PyFortran_Type; +extern int +F2PyDict_SetItemString(PyObject *dict, char *name, PyObject *obj); +extern PyObject * +PyFortranObject_New(FortranDataDef *defs, f2py_void_func init); +extern PyObject * +PyFortranObject_NewAsAttr(FortranDataDef *defs); + +PyObject * +F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *)); +void * +F2PyCapsule_AsVoidPtr(PyObject *obj); +int +F2PyCapsule_Check(PyObject *ptr); + +extern void * +F2PySwapThreadLocalCallbackPtr(char *key, void *ptr); +extern void * +F2PyGetThreadLocalCallbackPtr(char *key); + +#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) & NPY_ARRAY_C_CONTIGUOUS) +#define F2PY_INTENT_IN 1 +#define F2PY_INTENT_INOUT 2 +#define F2PY_INTENT_OUT 4 +#define F2PY_INTENT_HIDE 8 +#define F2PY_INTENT_CACHE 16 +#define F2PY_INTENT_COPY 32 +#define F2PY_INTENT_C 64 +#define F2PY_OPTIONAL 128 +#define F2PY_INTENT_INPLACE 256 +#define F2PY_INTENT_ALIGNED4 512 +#define F2PY_INTENT_ALIGNED8 1024 +#define F2PY_INTENT_ALIGNED16 2048 + +#define ARRAY_ISALIGNED(ARR, SIZE) ((size_t)(PyArray_DATA(ARR)) % (SIZE) == 0) +#define F2PY_ALIGN4(intent) (intent & F2PY_INTENT_ALIGNED4) +#define F2PY_ALIGN8(intent) (intent & F2PY_INTENT_ALIGNED8) +#define F2PY_ALIGN16(intent) (intent & F2PY_INTENT_ALIGNED16) + +#define F2PY_GET_ALIGNMENT(intent) \ + (F2PY_ALIGN4(intent) \ + ? 4 \ + : (F2PY_ALIGN8(intent) ? 8 : (F2PY_ALIGN16(intent) ? 16 : 1))) +#define F2PY_CHECK_ALIGNMENT(arr, intent) \ + ARRAY_ISALIGNED(arr, F2PY_GET_ALIGNMENT(intent)) +#define F2PY_ARRAY_IS_CHARACTER_COMPATIBLE(arr) ((PyArray_DESCR(arr)->type_num == NPY_STRING && PyArray_ITEMSIZE(arr) >= 1) \ + || PyArray_DESCR(arr)->type_num == NPY_UINT8) +#define F2PY_IS_UNICODE_ARRAY(arr) (PyArray_DESCR(arr)->type_num == NPY_UNICODE) + +extern PyArrayObject * +ndarray_from_pyobj(const int type_num, const int elsize_, npy_intp *dims, + const int rank, const int intent, PyObject *obj, + const char *errmess); + +extern PyArrayObject * +array_from_pyobj(const int type_num, npy_intp *dims, const int rank, + const int intent, PyObject *obj); +extern int +copy_ND_array(const PyArrayObject *in, PyArrayObject *out); + +#ifdef DEBUG_COPY_ND_ARRAY +extern void +dump_attrs(const PyArrayObject *arr); +#endif + + extern int f2py_describe(PyObject *obj, char *buf); + + /* Utility CPP macros and functions that can be used in signature file + expressions. See signature-file.rst for documentation. + */ + +#define f2py_itemsize(var) (PyArray_ITEMSIZE(capi_ ## var ## _as_array)) +#define f2py_size(var, ...) f2py_size_impl((PyArrayObject *)(capi_ ## var ## _as_array), ## __VA_ARGS__, -1) +#define f2py_rank(var) var ## _Rank +#define f2py_shape(var,dim) var ## _Dims[dim] +#define f2py_len(var) f2py_shape(var,0) +#define f2py_fshape(var,dim) f2py_shape(var,rank(var)-dim-1) +#define f2py_flen(var) f2py_fshape(var,0) +#define f2py_slen(var) capi_ ## var ## _len + + extern npy_intp f2py_size_impl(PyArrayObject* var, ...); + +#ifdef __cplusplus +} +#endif +#endif /* !Py_FORTRANOBJECT_H */ diff --git a/python/numpy/f2py/symbolic.py b/python/numpy/f2py/symbolic.py new file mode 100644 index 000000000..11645172f --- /dev/null +++ b/python/numpy/f2py/symbolic.py @@ -0,0 +1,1516 @@ +"""Fortran/C symbolic expressions + +References: +- J3/21-007: Draft Fortran 202x. https://j3-fortran.org/doc/year/21/21-007.pdf + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" + +# To analyze Fortran expressions to solve dimensions specifications, +# for instances, we implement a minimal symbolic engine for parsing +# expressions into a tree of expression instances. As a first +# instance, we care only about arithmetic expressions involving +# integers and operations like addition (+), subtraction (-), +# multiplication (*), division (Fortran / is Python //, Fortran // is +# concatenate), and exponentiation (**). In addition, .pyf files may +# contain C expressions that support here is implemented as well. +# +# TODO: support logical constants (Op.BOOLEAN) +# TODO: support logical operators (.AND., ...) +# TODO: support defined operators (.MYOP., ...) +# +__all__ = ['Expr'] + + +import re +import warnings +from enum import Enum +from math import gcd + + +class Language(Enum): + """ + Used as Expr.tostring language argument. + """ + Python = 0 + Fortran = 1 + C = 2 + + +class Op(Enum): + """ + Used as Expr op attribute. + """ + INTEGER = 10 + REAL = 12 + COMPLEX = 15 + STRING = 20 + ARRAY = 30 + SYMBOL = 40 + TERNARY = 100 + APPLY = 200 + INDEXING = 210 + CONCAT = 220 + RELATIONAL = 300 + TERMS = 1000 + FACTORS = 2000 + REF = 3000 + DEREF = 3001 + + +class RelOp(Enum): + """ + Used in Op.RELATIONAL expression to specify the function part. + """ + EQ = 1 + NE = 2 + LT = 3 + LE = 4 + GT = 5 + GE = 6 + + @classmethod + def fromstring(cls, s, language=Language.C): + if language is Language.Fortran: + return {'.eq.': RelOp.EQ, '.ne.': RelOp.NE, + '.lt.': RelOp.LT, '.le.': RelOp.LE, + '.gt.': RelOp.GT, '.ge.': RelOp.GE}[s.lower()] + return {'==': RelOp.EQ, '!=': RelOp.NE, '<': RelOp.LT, + '<=': RelOp.LE, '>': RelOp.GT, '>=': RelOp.GE}[s] + + def tostring(self, language=Language.C): + if language is Language.Fortran: + return {RelOp.EQ: '.eq.', RelOp.NE: '.ne.', + RelOp.LT: '.lt.', RelOp.LE: '.le.', + RelOp.GT: '.gt.', RelOp.GE: '.ge.'}[self] + return {RelOp.EQ: '==', RelOp.NE: '!=', + RelOp.LT: '<', RelOp.LE: '<=', + RelOp.GT: '>', RelOp.GE: '>='}[self] + + +class ArithOp(Enum): + """ + Used in Op.APPLY expression to specify the function part. + """ + POS = 1 + NEG = 2 + ADD = 3 + SUB = 4 + MUL = 5 + DIV = 6 + POW = 7 + + +class OpError(Exception): + pass + + +class Precedence(Enum): + """ + Used as Expr.tostring precedence argument. + """ + ATOM = 0 + POWER = 1 + UNARY = 2 + PRODUCT = 3 + SUM = 4 + LT = 6 + EQ = 7 + LAND = 11 + LOR = 12 + TERNARY = 13 + ASSIGN = 14 + TUPLE = 15 + NONE = 100 + + +integer_types = (int,) +number_types = (int, float) + + +def _pairs_add(d, k, v): + # Internal utility method for updating terms and factors data. + c = d.get(k) + if c is None: + d[k] = v + else: + c = c + v + if c: + d[k] = c + else: + del d[k] + + +class ExprWarning(UserWarning): + pass + + +def ewarn(message): + warnings.warn(message, ExprWarning, stacklevel=2) + + +class Expr: + """Represents a Fortran expression as a op-data pair. + + Expr instances are hashable and sortable. + """ + + @staticmethod + def parse(s, language=Language.C): + """Parse a Fortran expression to a Expr. + """ + return fromstring(s, language=language) + + def __init__(self, op, data): + assert isinstance(op, Op) + + # sanity checks + if op is Op.INTEGER: + # data is a 2-tuple of numeric object and a kind value + # (default is 4) + assert isinstance(data, tuple) and len(data) == 2 + assert isinstance(data[0], int) + assert isinstance(data[1], (int, str)), data + elif op is Op.REAL: + # data is a 2-tuple of numeric object and a kind value + # (default is 4) + assert isinstance(data, tuple) and len(data) == 2 + assert isinstance(data[0], float) + assert isinstance(data[1], (int, str)), data + elif op is Op.COMPLEX: + # data is a 2-tuple of constant expressions + assert isinstance(data, tuple) and len(data) == 2 + elif op is Op.STRING: + # data is a 2-tuple of quoted string and a kind value + # (default is 1) + assert isinstance(data, tuple) and len(data) == 2 + assert (isinstance(data[0], str) + and data[0][::len(data[0]) - 1] in ('""', "''", '@@')) + assert isinstance(data[1], (int, str)), data + elif op is Op.SYMBOL: + # data is any hashable object + assert hash(data) is not None + elif op in (Op.ARRAY, Op.CONCAT): + # data is a tuple of expressions + assert isinstance(data, tuple) + assert all(isinstance(item, Expr) for item in data), data + elif op in (Op.TERMS, Op.FACTORS): + # data is {:} where dict values + # are nonzero Python integers + assert isinstance(data, dict) + elif op is Op.APPLY: + # data is (, , ) where + # operands are Expr instances + assert isinstance(data, tuple) and len(data) == 3 + # function is any hashable object + assert hash(data[0]) is not None + assert isinstance(data[1], tuple) + assert isinstance(data[2], dict) + elif op is Op.INDEXING: + # data is (, ) + assert isinstance(data, tuple) and len(data) == 2 + # function is any hashable object + assert hash(data[0]) is not None + elif op is Op.TERNARY: + # data is (, , ) + assert isinstance(data, tuple) and len(data) == 3 + elif op in (Op.REF, Op.DEREF): + # data is Expr instance + assert isinstance(data, Expr) + elif op is Op.RELATIONAL: + # data is (, , ) + assert isinstance(data, tuple) and len(data) == 3 + else: + raise NotImplementedError( + f'unknown op or missing sanity check: {op}') + + self.op = op + self.data = data + + def __eq__(self, other): + return (isinstance(other, Expr) + and self.op is other.op + and self.data == other.data) + + def __hash__(self): + if self.op in (Op.TERMS, Op.FACTORS): + data = tuple(sorted(self.data.items())) + elif self.op is Op.APPLY: + data = self.data[:2] + tuple(sorted(self.data[2].items())) + else: + data = self.data + return hash((self.op, data)) + + def __lt__(self, other): + if isinstance(other, Expr): + if self.op is not other.op: + return self.op.value < other.op.value + if self.op in (Op.TERMS, Op.FACTORS): + return (tuple(sorted(self.data.items())) + < tuple(sorted(other.data.items()))) + if self.op is Op.APPLY: + if self.data[:2] != other.data[:2]: + return self.data[:2] < other.data[:2] + return tuple(sorted(self.data[2].items())) < tuple( + sorted(other.data[2].items())) + return self.data < other.data + return NotImplemented + + def __le__(self, other): return self == other or self < other + + def __gt__(self, other): return not (self <= other) + + def __ge__(self, other): return not (self < other) + + def __repr__(self): + return f'{type(self).__name__}({self.op}, {self.data!r})' + + def __str__(self): + return self.tostring() + + def tostring(self, parent_precedence=Precedence.NONE, + language=Language.Fortran): + """Return a string representation of Expr. + """ + if self.op in (Op.INTEGER, Op.REAL): + precedence = (Precedence.SUM if self.data[0] < 0 + else Precedence.ATOM) + r = str(self.data[0]) + (f'_{self.data[1]}' + if self.data[1] != 4 else '') + elif self.op is Op.COMPLEX: + r = ', '.join(item.tostring(Precedence.TUPLE, language=language) + for item in self.data) + r = '(' + r + ')' + precedence = Precedence.ATOM + elif self.op is Op.SYMBOL: + precedence = Precedence.ATOM + r = str(self.data) + elif self.op is Op.STRING: + r = self.data[0] + if self.data[1] != 1: + r = self.data[1] + '_' + r + precedence = Precedence.ATOM + elif self.op is Op.ARRAY: + r = ', '.join(item.tostring(Precedence.TUPLE, language=language) + for item in self.data) + r = '[' + r + ']' + precedence = Precedence.ATOM + elif self.op is Op.TERMS: + terms = [] + for term, coeff in sorted(self.data.items()): + if coeff < 0: + op = ' - ' + coeff = -coeff + else: + op = ' + ' + if coeff == 1: + term = term.tostring(Precedence.SUM, language=language) + elif term == as_number(1): + term = str(coeff) + else: + term = f'{coeff} * ' + term.tostring( + Precedence.PRODUCT, language=language) + if terms: + terms.append(op) + elif op == ' - ': + terms.append('-') + terms.append(term) + r = ''.join(terms) or '0' + precedence = Precedence.SUM if terms else Precedence.ATOM + elif self.op is Op.FACTORS: + factors = [] + tail = [] + for base, exp in sorted(self.data.items()): + op = ' * ' + if exp == 1: + factor = base.tostring(Precedence.PRODUCT, + language=language) + elif language is Language.C: + if exp in range(2, 10): + factor = base.tostring(Precedence.PRODUCT, + language=language) + factor = ' * '.join([factor] * exp) + elif exp in range(-10, 0): + factor = base.tostring(Precedence.PRODUCT, + language=language) + tail += [factor] * -exp + continue + else: + factor = base.tostring(Precedence.TUPLE, + language=language) + factor = f'pow({factor}, {exp})' + else: + factor = base.tostring(Precedence.POWER, + language=language) + f' ** {exp}' + if factors: + factors.append(op) + factors.append(factor) + if tail: + if not factors: + factors += ['1'] + factors += ['/', '(', ' * '.join(tail), ')'] + r = ''.join(factors) or '1' + precedence = Precedence.PRODUCT if factors else Precedence.ATOM + elif self.op is Op.APPLY: + name, args, kwargs = self.data + if name is ArithOp.DIV and language is Language.C: + numer, denom = [arg.tostring(Precedence.PRODUCT, + language=language) + for arg in args] + r = f'{numer} / {denom}' + precedence = Precedence.PRODUCT + else: + args = [arg.tostring(Precedence.TUPLE, language=language) + for arg in args] + args += [k + '=' + v.tostring(Precedence.NONE) + for k, v in kwargs.items()] + r = f'{name}({", ".join(args)})' + precedence = Precedence.ATOM + elif self.op is Op.INDEXING: + name = self.data[0] + args = [arg.tostring(Precedence.TUPLE, language=language) + for arg in self.data[1:]] + r = f'{name}[{", ".join(args)}]' + precedence = Precedence.ATOM + elif self.op is Op.CONCAT: + args = [arg.tostring(Precedence.PRODUCT, language=language) + for arg in self.data] + r = " // ".join(args) + precedence = Precedence.PRODUCT + elif self.op is Op.TERNARY: + cond, expr1, expr2 = [a.tostring(Precedence.TUPLE, + language=language) + for a in self.data] + if language is Language.C: + r = f'({cond}?{expr1}:{expr2})' + elif language is Language.Python: + r = f'({expr1} if {cond} else {expr2})' + elif language is Language.Fortran: + r = f'merge({expr1}, {expr2}, {cond})' + else: + raise NotImplementedError( + f'tostring for {self.op} and {language}') + precedence = Precedence.ATOM + elif self.op is Op.REF: + r = '&' + self.data.tostring(Precedence.UNARY, language=language) + precedence = Precedence.UNARY + elif self.op is Op.DEREF: + r = '*' + self.data.tostring(Precedence.UNARY, language=language) + precedence = Precedence.UNARY + elif self.op is Op.RELATIONAL: + rop, left, right = self.data + precedence = (Precedence.EQ if rop in (RelOp.EQ, RelOp.NE) + else Precedence.LT) + left = left.tostring(precedence, language=language) + right = right.tostring(precedence, language=language) + rop = rop.tostring(language=language) + r = f'{left} {rop} {right}' + else: + raise NotImplementedError(f'tostring for op {self.op}') + if parent_precedence.value < precedence.value: + # If parent precedence is higher than operand precedence, + # operand will be enclosed in parenthesis. + return '(' + r + ')' + return r + + def __pos__(self): + return self + + def __neg__(self): + return self * -1 + + def __add__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + if self.op is other.op: + if self.op in (Op.INTEGER, Op.REAL): + return as_number( + self.data[0] + other.data[0], + max(self.data[1], other.data[1])) + if self.op is Op.COMPLEX: + r1, i1 = self.data + r2, i2 = other.data + return as_complex(r1 + r2, i1 + i2) + if self.op is Op.TERMS: + r = Expr(self.op, dict(self.data)) + for k, v in other.data.items(): + _pairs_add(r.data, k, v) + return normalize(r) + if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL): + return self + as_complex(other) + elif self.op in (Op.INTEGER, Op.REAL) and other.op is Op.COMPLEX: + return as_complex(self) + other + elif self.op is Op.REAL and other.op is Op.INTEGER: + return self + as_real(other, kind=self.data[1]) + elif self.op is Op.INTEGER and other.op is Op.REAL: + return as_real(self, kind=other.data[1]) + other + return as_terms(self) + as_terms(other) + return NotImplemented + + def __radd__(self, other): + if isinstance(other, number_types): + return as_number(other) + self + return NotImplemented + + def __sub__(self, other): + return self + (-other) + + def __rsub__(self, other): + if isinstance(other, number_types): + return as_number(other) - self + return NotImplemented + + def __mul__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + if self.op is other.op: + if self.op in (Op.INTEGER, Op.REAL): + return as_number(self.data[0] * other.data[0], + max(self.data[1], other.data[1])) + elif self.op is Op.COMPLEX: + r1, i1 = self.data + r2, i2 = other.data + return as_complex(r1 * r2 - i1 * i2, r1 * i2 + r2 * i1) + + if self.op is Op.FACTORS: + r = Expr(self.op, dict(self.data)) + for k, v in other.data.items(): + _pairs_add(r.data, k, v) + return normalize(r) + elif self.op is Op.TERMS: + r = Expr(self.op, {}) + for t1, c1 in self.data.items(): + for t2, c2 in other.data.items(): + _pairs_add(r.data, t1 * t2, c1 * c2) + return normalize(r) + + if self.op is Op.COMPLEX and other.op in (Op.INTEGER, Op.REAL): + return self * as_complex(other) + elif other.op is Op.COMPLEX and self.op in (Op.INTEGER, Op.REAL): + return as_complex(self) * other + elif self.op is Op.REAL and other.op is Op.INTEGER: + return self * as_real(other, kind=self.data[1]) + elif self.op is Op.INTEGER and other.op is Op.REAL: + return as_real(self, kind=other.data[1]) * other + + if self.op is Op.TERMS: + return self * as_terms(other) + elif other.op is Op.TERMS: + return as_terms(self) * other + + return as_factors(self) * as_factors(other) + return NotImplemented + + def __rmul__(self, other): + if isinstance(other, number_types): + return as_number(other) * self + return NotImplemented + + def __pow__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + if other.op is Op.INTEGER: + exponent = other.data[0] + # TODO: other kind not used + if exponent == 0: + return as_number(1) + if exponent == 1: + return self + if exponent > 0: + if self.op is Op.FACTORS: + r = Expr(self.op, {}) + for k, v in self.data.items(): + r.data[k] = v * exponent + return normalize(r) + return self * (self ** (exponent - 1)) + elif exponent != -1: + return (self ** (-exponent)) ** -1 + return Expr(Op.FACTORS, {self: exponent}) + return as_apply(ArithOp.POW, self, other) + return NotImplemented + + def __truediv__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + # Fortran / is different from Python /: + # - `/` is a truncate operation for integer operands + return normalize(as_apply(ArithOp.DIV, self, other)) + return NotImplemented + + def __rtruediv__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + return other / self + return NotImplemented + + def __floordiv__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + # Fortran // is different from Python //: + # - `//` is a concatenate operation for string operands + return normalize(Expr(Op.CONCAT, (self, other))) + return NotImplemented + + def __rfloordiv__(self, other): + other = as_expr(other) + if isinstance(other, Expr): + return other // self + return NotImplemented + + def __call__(self, *args, **kwargs): + # In Fortran, parenthesis () are use for both function call as + # well as indexing operations. + # + # TODO: implement a method for deciding when __call__ should + # return an INDEXING expression. + return as_apply(self, *map(as_expr, args), + **{k: as_expr(v) for k, v in kwargs.items()}) + + def __getitem__(self, index): + # Provided to support C indexing operations that .pyf files + # may contain. + index = as_expr(index) + if not isinstance(index, tuple): + index = index, + if len(index) > 1: + ewarn(f'C-index should be a single expression but got `{index}`') + return Expr(Op.INDEXING, (self,) + index) + + def substitute(self, symbols_map): + """Recursively substitute symbols with values in symbols map. + + Symbols map is a dictionary of symbol-expression pairs. + """ + if self.op is Op.SYMBOL: + value = symbols_map.get(self) + if value is None: + return self + m = re.match(r'\A(@__f2py_PARENTHESIS_(\w+)_\d+@)\Z', self.data) + if m: + # complement to fromstring method + items, paren = m.groups() + if paren in ['ROUNDDIV', 'SQUARE']: + return as_array(value) + assert paren == 'ROUND', (paren, value) + return value + if self.op in (Op.INTEGER, Op.REAL, Op.STRING): + return self + if self.op in (Op.ARRAY, Op.COMPLEX): + return Expr(self.op, tuple(item.substitute(symbols_map) + for item in self.data)) + if self.op is Op.CONCAT: + return normalize(Expr(self.op, tuple(item.substitute(symbols_map) + for item in self.data))) + if self.op is Op.TERMS: + r = None + for term, coeff in self.data.items(): + if r is None: + r = term.substitute(symbols_map) * coeff + else: + r += term.substitute(symbols_map) * coeff + if r is None: + ewarn('substitute: empty TERMS expression interpreted as' + ' int-literal 0') + return as_number(0) + return r + if self.op is Op.FACTORS: + r = None + for base, exponent in self.data.items(): + if r is None: + r = base.substitute(symbols_map) ** exponent + else: + r *= base.substitute(symbols_map) ** exponent + if r is None: + ewarn('substitute: empty FACTORS expression interpreted' + ' as int-literal 1') + return as_number(1) + return r + if self.op is Op.APPLY: + target, args, kwargs = self.data + if isinstance(target, Expr): + target = target.substitute(symbols_map) + args = tuple(a.substitute(symbols_map) for a in args) + kwargs = {k: v.substitute(symbols_map) + for k, v in kwargs.items()} + return normalize(Expr(self.op, (target, args, kwargs))) + if self.op is Op.INDEXING: + func = self.data[0] + if isinstance(func, Expr): + func = func.substitute(symbols_map) + args = tuple(a.substitute(symbols_map) for a in self.data[1:]) + return normalize(Expr(self.op, (func,) + args)) + if self.op is Op.TERNARY: + operands = tuple(a.substitute(symbols_map) for a in self.data) + return normalize(Expr(self.op, operands)) + if self.op in (Op.REF, Op.DEREF): + return normalize(Expr(self.op, self.data.substitute(symbols_map))) + if self.op is Op.RELATIONAL: + rop, left, right = self.data + left = left.substitute(symbols_map) + right = right.substitute(symbols_map) + return normalize(Expr(self.op, (rop, left, right))) + raise NotImplementedError(f'substitute method for {self.op}: {self!r}') + + def traverse(self, visit, *args, **kwargs): + """Traverse expression tree with visit function. + + The visit function is applied to an expression with given args + and kwargs. + + Traverse call returns an expression returned by visit when not + None, otherwise return a new normalized expression with + traverse-visit sub-expressions. + """ + result = visit(self, *args, **kwargs) + if result is not None: + return result + + if self.op in (Op.INTEGER, Op.REAL, Op.STRING, Op.SYMBOL): + return self + elif self.op in (Op.COMPLEX, Op.ARRAY, Op.CONCAT, Op.TERNARY): + return normalize(Expr(self.op, tuple( + item.traverse(visit, *args, **kwargs) + for item in self.data))) + elif self.op in (Op.TERMS, Op.FACTORS): + data = {} + for k, v in self.data.items(): + k = k.traverse(visit, *args, **kwargs) + v = (v.traverse(visit, *args, **kwargs) + if isinstance(v, Expr) else v) + if k in data: + v = data[k] + v + data[k] = v + return normalize(Expr(self.op, data)) + elif self.op is Op.APPLY: + obj = self.data[0] + func = (obj.traverse(visit, *args, **kwargs) + if isinstance(obj, Expr) else obj) + operands = tuple(operand.traverse(visit, *args, **kwargs) + for operand in self.data[1]) + kwoperands = {k: v.traverse(visit, *args, **kwargs) + for k, v in self.data[2].items()} + return normalize(Expr(self.op, (func, operands, kwoperands))) + elif self.op is Op.INDEXING: + obj = self.data[0] + obj = (obj.traverse(visit, *args, **kwargs) + if isinstance(obj, Expr) else obj) + indices = tuple(index.traverse(visit, *args, **kwargs) + for index in self.data[1:]) + return normalize(Expr(self.op, (obj,) + indices)) + elif self.op in (Op.REF, Op.DEREF): + return normalize(Expr(self.op, + self.data.traverse(visit, *args, **kwargs))) + elif self.op is Op.RELATIONAL: + rop, left, right = self.data + left = left.traverse(visit, *args, **kwargs) + right = right.traverse(visit, *args, **kwargs) + return normalize(Expr(self.op, (rop, left, right))) + raise NotImplementedError(f'traverse method for {self.op}') + + def contains(self, other): + """Check if self contains other. + """ + found = [] + + def visit(expr, found=found): + if found: + return expr + elif expr == other: + found.append(1) + return expr + + self.traverse(visit) + + return len(found) != 0 + + def symbols(self): + """Return a set of symbols contained in self. + """ + found = set() + + def visit(expr, found=found): + if expr.op is Op.SYMBOL: + found.add(expr) + + self.traverse(visit) + + return found + + def polynomial_atoms(self): + """Return a set of expressions used as atoms in polynomial self. + """ + found = set() + + def visit(expr, found=found): + if expr.op is Op.FACTORS: + for b in expr.data: + b.traverse(visit) + return expr + if expr.op in (Op.TERMS, Op.COMPLEX): + return + if expr.op is Op.APPLY and isinstance(expr.data[0], ArithOp): + if expr.data[0] is ArithOp.POW: + expr.data[1][0].traverse(visit) + return expr + return + if expr.op in (Op.INTEGER, Op.REAL): + return expr + + found.add(expr) + + if expr.op in (Op.INDEXING, Op.APPLY): + return expr + + self.traverse(visit) + + return found + + def linear_solve(self, symbol): + """Return a, b such that a * symbol + b == self. + + If self is not linear with respect to symbol, raise RuntimeError. + """ + b = self.substitute({symbol: as_number(0)}) + ax = self - b + a = ax.substitute({symbol: as_number(1)}) + + zero, _ = as_numer_denom(a * symbol - ax) + + if zero != as_number(0): + raise RuntimeError(f'not a {symbol}-linear equation:' + f' {a} * {symbol} + {b} == {self}') + return a, b + + +def normalize(obj): + """Normalize Expr and apply basic evaluation methods. + """ + if not isinstance(obj, Expr): + return obj + + if obj.op is Op.TERMS: + d = {} + for t, c in obj.data.items(): + if c == 0: + continue + if t.op is Op.COMPLEX and c != 1: + t = t * c + c = 1 + if t.op is Op.TERMS: + for t1, c1 in t.data.items(): + _pairs_add(d, t1, c1 * c) + else: + _pairs_add(d, t, c) + if len(d) == 0: + # TODO: determine correct kind + return as_number(0) + elif len(d) == 1: + (t, c), = d.items() + if c == 1: + return t + return Expr(Op.TERMS, d) + + if obj.op is Op.FACTORS: + coeff = 1 + d = {} + for b, e in obj.data.items(): + if e == 0: + continue + if b.op is Op.TERMS and isinstance(e, integer_types) and e > 1: + # expand integer powers of sums + b = b * (b ** (e - 1)) + e = 1 + + if b.op in (Op.INTEGER, Op.REAL): + if e == 1: + coeff *= b.data[0] + elif e > 0: + coeff *= b.data[0] ** e + else: + _pairs_add(d, b, e) + elif b.op is Op.FACTORS: + if e > 0 and isinstance(e, integer_types): + for b1, e1 in b.data.items(): + _pairs_add(d, b1, e1 * e) + else: + _pairs_add(d, b, e) + else: + _pairs_add(d, b, e) + if len(d) == 0 or coeff == 0: + # TODO: determine correct kind + assert isinstance(coeff, number_types) + return as_number(coeff) + elif len(d) == 1: + (b, e), = d.items() + if e == 1: + t = b + else: + t = Expr(Op.FACTORS, d) + if coeff == 1: + return t + return Expr(Op.TERMS, {t: coeff}) + elif coeff == 1: + return Expr(Op.FACTORS, d) + else: + return Expr(Op.TERMS, {Expr(Op.FACTORS, d): coeff}) + + if obj.op is Op.APPLY and obj.data[0] is ArithOp.DIV: + dividend, divisor = obj.data[1] + t1, c1 = as_term_coeff(dividend) + t2, c2 = as_term_coeff(divisor) + if isinstance(c1, integer_types) and isinstance(c2, integer_types): + g = gcd(c1, c2) + c1, c2 = c1 // g, c2 // g + else: + c1, c2 = c1 / c2, 1 + + if t1.op is Op.APPLY and t1.data[0] is ArithOp.DIV: + numer = t1.data[1][0] * c1 + denom = t1.data[1][1] * t2 * c2 + return as_apply(ArithOp.DIV, numer, denom) + + if t2.op is Op.APPLY and t2.data[0] is ArithOp.DIV: + numer = t2.data[1][1] * t1 * c1 + denom = t2.data[1][0] * c2 + return as_apply(ArithOp.DIV, numer, denom) + + d = dict(as_factors(t1).data) + for b, e in as_factors(t2).data.items(): + _pairs_add(d, b, -e) + numer, denom = {}, {} + for b, e in d.items(): + if e > 0: + numer[b] = e + else: + denom[b] = -e + numer = normalize(Expr(Op.FACTORS, numer)) * c1 + denom = normalize(Expr(Op.FACTORS, denom)) * c2 + + if denom.op in (Op.INTEGER, Op.REAL) and denom.data[0] == 1: + # TODO: denom kind not used + return numer + return as_apply(ArithOp.DIV, numer, denom) + + if obj.op is Op.CONCAT: + lst = [obj.data[0]] + for s in obj.data[1:]: + last = lst[-1] + if ( + last.op is Op.STRING + and s.op is Op.STRING + and last.data[0][0] in '"\'' + and s.data[0][0] == last.data[0][-1] + ): + new_last = as_string(last.data[0][:-1] + s.data[0][1:], + max(last.data[1], s.data[1])) + lst[-1] = new_last + else: + lst.append(s) + if len(lst) == 1: + return lst[0] + return Expr(Op.CONCAT, tuple(lst)) + + if obj.op is Op.TERNARY: + cond, expr1, expr2 = map(normalize, obj.data) + if cond.op is Op.INTEGER: + return expr1 if cond.data[0] else expr2 + return Expr(Op.TERNARY, (cond, expr1, expr2)) + + return obj + + +def as_expr(obj): + """Convert non-Expr objects to Expr objects. + """ + if isinstance(obj, complex): + return as_complex(obj.real, obj.imag) + if isinstance(obj, number_types): + return as_number(obj) + if isinstance(obj, str): + # STRING expression holds string with boundary quotes, hence + # applying repr: + return as_string(repr(obj)) + if isinstance(obj, tuple): + return tuple(map(as_expr, obj)) + return obj + + +def as_symbol(obj): + """Return object as SYMBOL expression (variable or unparsed expression). + """ + return Expr(Op.SYMBOL, obj) + + +def as_number(obj, kind=4): + """Return object as INTEGER or REAL constant. + """ + if isinstance(obj, int): + return Expr(Op.INTEGER, (obj, kind)) + if isinstance(obj, float): + return Expr(Op.REAL, (obj, kind)) + if isinstance(obj, Expr): + if obj.op in (Op.INTEGER, Op.REAL): + return obj + raise OpError(f'cannot convert {obj} to INTEGER or REAL constant') + + +def as_integer(obj, kind=4): + """Return object as INTEGER constant. + """ + if isinstance(obj, int): + return Expr(Op.INTEGER, (obj, kind)) + if isinstance(obj, Expr): + if obj.op is Op.INTEGER: + return obj + raise OpError(f'cannot convert {obj} to INTEGER constant') + + +def as_real(obj, kind=4): + """Return object as REAL constant. + """ + if isinstance(obj, int): + return Expr(Op.REAL, (float(obj), kind)) + if isinstance(obj, float): + return Expr(Op.REAL, (obj, kind)) + if isinstance(obj, Expr): + if obj.op is Op.REAL: + return obj + elif obj.op is Op.INTEGER: + return Expr(Op.REAL, (float(obj.data[0]), kind)) + raise OpError(f'cannot convert {obj} to REAL constant') + + +def as_string(obj, kind=1): + """Return object as STRING expression (string literal constant). + """ + return Expr(Op.STRING, (obj, kind)) + + +def as_array(obj): + """Return object as ARRAY expression (array constant). + """ + if isinstance(obj, Expr): + obj = obj, + return Expr(Op.ARRAY, obj) + + +def as_complex(real, imag=0): + """Return object as COMPLEX expression (complex literal constant). + """ + return Expr(Op.COMPLEX, (as_expr(real), as_expr(imag))) + + +def as_apply(func, *args, **kwargs): + """Return object as APPLY expression (function call, constructor, etc.) + """ + return Expr(Op.APPLY, + (func, tuple(map(as_expr, args)), + {k: as_expr(v) for k, v in kwargs.items()})) + + +def as_ternary(cond, expr1, expr2): + """Return object as TERNARY expression (cond?expr1:expr2). + """ + return Expr(Op.TERNARY, (cond, expr1, expr2)) + + +def as_ref(expr): + """Return object as referencing expression. + """ + return Expr(Op.REF, expr) + + +def as_deref(expr): + """Return object as dereferencing expression. + """ + return Expr(Op.DEREF, expr) + + +def as_eq(left, right): + return Expr(Op.RELATIONAL, (RelOp.EQ, left, right)) + + +def as_ne(left, right): + return Expr(Op.RELATIONAL, (RelOp.NE, left, right)) + + +def as_lt(left, right): + return Expr(Op.RELATIONAL, (RelOp.LT, left, right)) + + +def as_le(left, right): + return Expr(Op.RELATIONAL, (RelOp.LE, left, right)) + + +def as_gt(left, right): + return Expr(Op.RELATIONAL, (RelOp.GT, left, right)) + + +def as_ge(left, right): + return Expr(Op.RELATIONAL, (RelOp.GE, left, right)) + + +def as_terms(obj): + """Return expression as TERMS expression. + """ + if isinstance(obj, Expr): + obj = normalize(obj) + if obj.op is Op.TERMS: + return obj + if obj.op is Op.INTEGER: + return Expr(Op.TERMS, {as_integer(1, obj.data[1]): obj.data[0]}) + if obj.op is Op.REAL: + return Expr(Op.TERMS, {as_real(1, obj.data[1]): obj.data[0]}) + return Expr(Op.TERMS, {obj: 1}) + raise OpError(f'cannot convert {type(obj)} to terms Expr') + + +def as_factors(obj): + """Return expression as FACTORS expression. + """ + if isinstance(obj, Expr): + obj = normalize(obj) + if obj.op is Op.FACTORS: + return obj + if obj.op is Op.TERMS: + if len(obj.data) == 1: + (term, coeff), = obj.data.items() + if coeff == 1: + return Expr(Op.FACTORS, {term: 1}) + return Expr(Op.FACTORS, {term: 1, Expr.number(coeff): 1}) + if (obj.op is Op.APPLY + and obj.data[0] is ArithOp.DIV + and not obj.data[2]): + return Expr(Op.FACTORS, {obj.data[1][0]: 1, obj.data[1][1]: -1}) + return Expr(Op.FACTORS, {obj: 1}) + raise OpError(f'cannot convert {type(obj)} to terms Expr') + + +def as_term_coeff(obj): + """Return expression as term-coefficient pair. + """ + if isinstance(obj, Expr): + obj = normalize(obj) + if obj.op is Op.INTEGER: + return as_integer(1, obj.data[1]), obj.data[0] + if obj.op is Op.REAL: + return as_real(1, obj.data[1]), obj.data[0] + if obj.op is Op.TERMS: + if len(obj.data) == 1: + (term, coeff), = obj.data.items() + return term, coeff + # TODO: find common divisor of coefficients + if obj.op is Op.APPLY and obj.data[0] is ArithOp.DIV: + t, c = as_term_coeff(obj.data[1][0]) + return as_apply(ArithOp.DIV, t, obj.data[1][1]), c + return obj, 1 + raise OpError(f'cannot convert {type(obj)} to term and coeff') + + +def as_numer_denom(obj): + """Return expression as numer-denom pair. + """ + if isinstance(obj, Expr): + obj = normalize(obj) + if obj.op in (Op.INTEGER, Op.REAL, Op.COMPLEX, Op.SYMBOL, + Op.INDEXING, Op.TERNARY): + return obj, as_number(1) + elif obj.op is Op.APPLY: + if obj.data[0] is ArithOp.DIV and not obj.data[2]: + numers, denoms = map(as_numer_denom, obj.data[1]) + return numers[0] * denoms[1], numers[1] * denoms[0] + return obj, as_number(1) + elif obj.op is Op.TERMS: + numers, denoms = [], [] + for term, coeff in obj.data.items(): + n, d = as_numer_denom(term) + n = n * coeff + numers.append(n) + denoms.append(d) + numer, denom = as_number(0), as_number(1) + for i in range(len(numers)): + n = numers[i] + for j in range(len(numers)): + if i != j: + n *= denoms[j] + numer += n + denom *= denoms[i] + if denom.op in (Op.INTEGER, Op.REAL) and denom.data[0] < 0: + numer, denom = -numer, -denom + return numer, denom + elif obj.op is Op.FACTORS: + numer, denom = as_number(1), as_number(1) + for b, e in obj.data.items(): + bnumer, bdenom = as_numer_denom(b) + if e > 0: + numer *= bnumer ** e + denom *= bdenom ** e + elif e < 0: + numer *= bdenom ** (-e) + denom *= bnumer ** (-e) + return numer, denom + raise OpError(f'cannot convert {type(obj)} to numer and denom') + + +def _counter(): + # Used internally to generate unique dummy symbols + counter = 0 + while True: + counter += 1 + yield counter + + +COUNTER = _counter() + + +def eliminate_quotes(s): + """Replace quoted substrings of input string. + + Return a new string and a mapping of replacements. + """ + d = {} + + def repl(m): + kind, value = m.groups()[:2] + if kind: + # remove trailing underscore + kind = kind[:-1] + p = {"'": "SINGLE", '"': "DOUBLE"}[value[0]] + k = f'{kind}@__f2py_QUOTES_{p}_{COUNTER.__next__()}@' + d[k] = value + return k + + new_s = re.sub(r'({kind}_|)({single_quoted}|{double_quoted})'.format( + kind=r'\w[\w\d_]*', + single_quoted=r"('([^'\\]|(\\.))*')", + double_quoted=r'("([^"\\]|(\\.))*")'), + repl, s) + + assert '"' not in new_s + assert "'" not in new_s + + return new_s, d + + +def insert_quotes(s, d): + """Inverse of eliminate_quotes. + """ + for k, v in d.items(): + kind = k[:k.find('@')] + if kind: + kind += '_' + s = s.replace(k, kind + v) + return s + + +def replace_parenthesis(s): + """Replace substrings of input that are enclosed in parenthesis. + + Return a new string and a mapping of replacements. + """ + # Find a parenthesis pair that appears first. + + # Fortran deliminator are `(`, `)`, `[`, `]`, `(/', '/)`, `/`. + # We don't handle `/` deliminator because it is not a part of an + # expression. + left, right = None, None + mn_i = len(s) + for left_, right_ in (('(/', '/)'), + '()', + '{}', # to support C literal structs + '[]'): + i = s.find(left_) + if i == -1: + continue + if i < mn_i: + mn_i = i + left, right = left_, right_ + + if left is None: + return s, {} + + i = mn_i + j = s.find(right, i) + + while s.count(left, i + 1, j) != s.count(right, i + 1, j): + j = s.find(right, j + 1) + if j == -1: + raise ValueError(f'Mismatch of {left + right} parenthesis in {s!r}') + + p = {'(': 'ROUND', '[': 'SQUARE', '{': 'CURLY', '(/': 'ROUNDDIV'}[left] + + k = f'@__f2py_PARENTHESIS_{p}_{COUNTER.__next__()}@' + v = s[i + len(left):j] + r, d = replace_parenthesis(s[j + len(right):]) + d[k] = v + return s[:i] + k + r, d + + +def _get_parenthesis_kind(s): + assert s.startswith('@__f2py_PARENTHESIS_'), s + return s.split('_')[4] + + +def unreplace_parenthesis(s, d): + """Inverse of replace_parenthesis. + """ + for k, v in d.items(): + p = _get_parenthesis_kind(k) + left = {'ROUND': '(', 'SQUARE': '[', 'CURLY': '{', 'ROUNDDIV': '(/'}[p] + right = {'ROUND': ')', 'SQUARE': ']', 'CURLY': '}', 'ROUNDDIV': '/)'}[p] + s = s.replace(k, left + v + right) + return s + + +def fromstring(s, language=Language.C): + """Create an expression from a string. + + This is a "lazy" parser, that is, only arithmetic operations are + resolved, non-arithmetic operations are treated as symbols. + """ + r = _FromStringWorker(language=language).parse(s) + if isinstance(r, Expr): + return r + raise ValueError(f'failed to parse `{s}` to Expr instance: got `{r}`') + + +class _Pair: + # Internal class to represent a pair of expressions + + def __init__(self, left, right): + self.left = left + self.right = right + + def substitute(self, symbols_map): + left, right = self.left, self.right + if isinstance(left, Expr): + left = left.substitute(symbols_map) + if isinstance(right, Expr): + right = right.substitute(symbols_map) + return _Pair(left, right) + + def __repr__(self): + return f'{type(self).__name__}({self.left}, {self.right})' + + +class _FromStringWorker: + + def __init__(self, language=Language.C): + self.original = None + self.quotes_map = None + self.language = language + + def finalize_string(self, s): + return insert_quotes(s, self.quotes_map) + + def parse(self, inp): + self.original = inp + unquoted, self.quotes_map = eliminate_quotes(inp) + return self.process(unquoted) + + def process(self, s, context='expr'): + """Parse string within the given context. + + The context may define the result in case of ambiguous + expressions. For instance, consider expressions `f(x, y)` and + `(x, y) + (a, b)` where `f` is a function and pair `(x, y)` + denotes complex number. Specifying context as "args" or + "expr", the subexpression `(x, y)` will be parse to an + argument list or to a complex number, respectively. + """ + if isinstance(s, (list, tuple)): + return type(s)(self.process(s_, context) for s_ in s) + + assert isinstance(s, str), (type(s), s) + + # replace subexpressions in parenthesis with f2py @-names + r, raw_symbols_map = replace_parenthesis(s) + r = r.strip() + + def restore(r): + # restores subexpressions marked with f2py @-names + if isinstance(r, (list, tuple)): + return type(r)(map(restore, r)) + return unreplace_parenthesis(r, raw_symbols_map) + + # comma-separated tuple + if ',' in r: + operands = restore(r.split(',')) + if context == 'args': + return tuple(self.process(operands)) + if context == 'expr': + if len(operands) == 2: + # complex number literal + return as_complex(*self.process(operands)) + raise NotImplementedError( + f'parsing comma-separated list (context={context}): {r}') + + # ternary operation + m = re.match(r'\A([^?]+)[?]([^:]+)[:](.+)\Z', r) + if m: + assert context == 'expr', context + oper, expr1, expr2 = restore(m.groups()) + oper = self.process(oper) + expr1 = self.process(expr1) + expr2 = self.process(expr2) + return as_ternary(oper, expr1, expr2) + + # relational expression + if self.language is Language.Fortran: + m = re.match( + r'\A(.+)\s*[.](eq|ne|lt|le|gt|ge)[.]\s*(.+)\Z', r, re.I) + else: + m = re.match( + r'\A(.+)\s*([=][=]|[!][=]|[<][=]|[<]|[>][=]|[>])\s*(.+)\Z', r) + if m: + left, rop, right = m.groups() + if self.language is Language.Fortran: + rop = '.' + rop + '.' + left, right = self.process(restore((left, right))) + rop = RelOp.fromstring(rop, language=self.language) + return Expr(Op.RELATIONAL, (rop, left, right)) + + # keyword argument + m = re.match(r'\A(\w[\w\d_]*)\s*[=](.*)\Z', r) + if m: + keyname, value = m.groups() + value = restore(value) + return _Pair(keyname, self.process(value)) + + # addition/subtraction operations + operands = re.split(r'((? 1: + result = self.process(restore(operands[0] or '0')) + for op, operand in zip(operands[1::2], operands[2::2]): + operand = self.process(restore(operand)) + op = op.strip() + if op == '+': + result += operand + else: + assert op == '-' + result -= operand + return result + + # string concatenate operation + if self.language is Language.Fortran and '//' in r: + operands = restore(r.split('//')) + return Expr(Op.CONCAT, + tuple(self.process(operands))) + + # multiplication/division operations + operands = re.split(r'(?<=[@\w\d_])\s*([*]|/)', + (r if self.language is Language.C + else r.replace('**', '@__f2py_DOUBLE_STAR@'))) + if len(operands) > 1: + operands = restore(operands) + if self.language is not Language.C: + operands = [operand.replace('@__f2py_DOUBLE_STAR@', '**') + for operand in operands] + # Expression is an arithmetic product + result = self.process(operands[0]) + for op, operand in zip(operands[1::2], operands[2::2]): + operand = self.process(operand) + op = op.strip() + if op == '*': + result *= operand + else: + assert op == '/' + result /= operand + return result + + # referencing/dereferencing + if r.startswith(('*', '&')): + op = {'*': Op.DEREF, '&': Op.REF}[r[0]] + operand = self.process(restore(r[1:])) + return Expr(op, operand) + + # exponentiation operations + if self.language is not Language.C and '**' in r: + operands = list(reversed(restore(r.split('**')))) + result = self.process(operands[0]) + for operand in operands[1:]: + operand = self.process(operand) + result = operand ** result + return result + + # int-literal-constant + m = re.match(r'\A({digit_string})({kind}|)\Z'.format( + digit_string=r'\d+', + kind=r'_(\d+|\w[\w\d_]*)'), r) + if m: + value, _, kind = m.groups() + if kind and kind.isdigit(): + kind = int(kind) + return as_integer(int(value), kind or 4) + + # real-literal-constant + m = re.match(r'\A({significant}({exponent}|)|\d+{exponent})({kind}|)\Z' + .format( + significant=r'[.]\d+|\d+[.]\d*', + exponent=r'[edED][+-]?\d+', + kind=r'_(\d+|\w[\w\d_]*)'), r) + if m: + value, _, _, kind = m.groups() + if kind and kind.isdigit(): + kind = int(kind) + value = value.lower() + if 'd' in value: + return as_real(float(value.replace('d', 'e')), kind or 8) + return as_real(float(value), kind or 4) + + # string-literal-constant with kind parameter specification + if r in self.quotes_map: + kind = r[:r.find('@')] + return as_string(self.quotes_map[r], kind or 1) + + # array constructor or literal complex constant or + # parenthesized expression + if r in raw_symbols_map: + paren = _get_parenthesis_kind(r) + items = self.process(restore(raw_symbols_map[r]), + 'expr' if paren == 'ROUND' else 'args') + if paren == 'ROUND': + if isinstance(items, Expr): + return items + if paren in ['ROUNDDIV', 'SQUARE']: + # Expression is a array constructor + if isinstance(items, Expr): + items = (items,) + return as_array(items) + + # function call/indexing + m = re.match(r'\A(.+)\s*(@__f2py_PARENTHESIS_(ROUND|SQUARE)_\d+@)\Z', + r) + if m: + target, args, paren = m.groups() + target = self.process(restore(target)) + args = self.process(restore(args)[1:-1], 'args') + if not isinstance(args, tuple): + args = args, + if paren == 'ROUND': + kwargs = {a.left: a.right for a in args + if isinstance(a, _Pair)} + args = tuple(a for a in args if not isinstance(a, _Pair)) + # Warning: this could also be Fortran indexing operation.. + return as_apply(target, *args, **kwargs) + else: + # Expression is a C/Python indexing operation + # (e.g. used in .pyf files) + assert paren == 'SQUARE' + return target[args] + + # Fortran standard conforming identifier + m = re.match(r'\A\w[\w\d_]*\Z', r) + if m: + return as_symbol(r) + + # fall-back to symbol + r = self.finalize_string(restore(r)) + ewarn( + f'fromstring: treating {r!r} as symbol (original={self.original})') + return as_symbol(r) diff --git a/python/numpy/f2py/symbolic.pyi b/python/numpy/f2py/symbolic.pyi new file mode 100644 index 000000000..74e7a48ab --- /dev/null +++ b/python/numpy/f2py/symbolic.pyi @@ -0,0 +1,221 @@ +from collections.abc import Callable, Mapping +from enum import Enum +from typing import Any, Generic, ParamSpec, Self, TypeAlias, overload +from typing import Literal as L + +from typing_extensions import TypeVar + +__all__ = ["Expr"] + +### + +_Tss = ParamSpec("_Tss") +_ExprT = TypeVar("_ExprT", bound=Expr) +_ExprT1 = TypeVar("_ExprT1", bound=Expr) +_ExprT2 = TypeVar("_ExprT2", bound=Expr) +_OpT_co = TypeVar("_OpT_co", bound=Op, default=Op, covariant=True) +_LanguageT_co = TypeVar("_LanguageT_co", bound=Language, default=Language, covariant=True) +_DataT_co = TypeVar("_DataT_co", default=Any, covariant=True) +_LeftT_co = TypeVar("_LeftT_co", default=Any, covariant=True) +_RightT_co = TypeVar("_RightT_co", default=Any, covariant=True) + +_RelCOrPy: TypeAlias = L["==", "!=", "<", "<=", ">", ">="] +_RelFortran: TypeAlias = L[".eq.", ".ne.", ".lt.", ".le.", ".gt.", ".ge."] + +_ToExpr: TypeAlias = Expr | complex | str +_ToExprN: TypeAlias = _ToExpr | tuple[_ToExprN, ...] +_NestedString: TypeAlias = str | tuple[_NestedString, ...] | list[_NestedString] + +### + +class OpError(Exception): ... +class ExprWarning(UserWarning): ... + +class Language(Enum): + Python = 0 + Fortran = 1 + C = 2 + +class Op(Enum): + INTEGER = 10 + REAL = 12 + COMPLEX = 15 + STRING = 20 + ARRAY = 30 + SYMBOL = 40 + TERNARY = 100 + APPLY = 200 + INDEXING = 210 + CONCAT = 220 + RELATIONAL = 300 + TERMS = 1_000 + FACTORS = 2_000 + REF = 3_000 + DEREF = 3_001 + +class RelOp(Enum): + EQ = 1 + NE = 2 + LT = 3 + LE = 4 + GT = 5 + GE = 6 + + @overload + @classmethod + def fromstring(cls, s: _RelCOrPy, language: L[Language.C, Language.Python] = ...) -> RelOp: ... + @overload + @classmethod + def fromstring(cls, s: _RelFortran, language: L[Language.Fortran]) -> RelOp: ... + + # + @overload + def tostring(self, /, language: L[Language.C, Language.Python] = ...) -> _RelCOrPy: ... + @overload + def tostring(self, /, language: L[Language.Fortran]) -> _RelFortran: ... + +class ArithOp(Enum): + POS = 1 + NEG = 2 + ADD = 3 + SUB = 4 + MUL = 5 + DIV = 6 + POW = 7 + +class Precedence(Enum): + ATOM = 0 + POWER = 1 + UNARY = 2 + PRODUCT = 3 + SUM = 4 + LT = 6 + EQ = 7 + LAND = 11 + LOR = 12 + TERNARY = 13 + ASSIGN = 14 + TUPLE = 15 + NONE = 100 + +class Expr(Generic[_OpT_co, _DataT_co]): + op: _OpT_co + data: _DataT_co + + @staticmethod + def parse(s: str, language: Language = ...) -> Expr: ... + + # + def __init__(self, /, op: Op, data: _DataT_co) -> None: ... + + # + def __lt__(self, other: Expr, /) -> bool: ... + def __le__(self, other: Expr, /) -> bool: ... + def __gt__(self, other: Expr, /) -> bool: ... + def __ge__(self, other: Expr, /) -> bool: ... + + # + def __pos__(self, /) -> Self: ... + def __neg__(self, /) -> Expr: ... + + # + def __add__(self, other: Expr, /) -> Expr: ... + def __radd__(self, other: Expr, /) -> Expr: ... + + # + def __sub__(self, other: Expr, /) -> Expr: ... + def __rsub__(self, other: Expr, /) -> Expr: ... + + # + def __mul__(self, other: Expr, /) -> Expr: ... + def __rmul__(self, other: Expr, /) -> Expr: ... + + # + def __pow__(self, other: Expr, /) -> Expr: ... + + # + def __truediv__(self, other: Expr, /) -> Expr: ... + def __rtruediv__(self, other: Expr, /) -> Expr: ... + + # + def __floordiv__(self, other: Expr, /) -> Expr: ... + def __rfloordiv__(self, other: Expr, /) -> Expr: ... + + # + def __call__( + self, + /, + *args: _ToExprN, + **kwargs: _ToExprN, + ) -> Expr[L[Op.APPLY], tuple[Self, tuple[Expr, ...], dict[str, Expr]]]: ... + + # + @overload + def __getitem__(self, index: _ExprT | tuple[_ExprT], /) -> Expr[L[Op.INDEXING], tuple[Self, _ExprT]]: ... + @overload + def __getitem__(self, index: _ToExpr | tuple[_ToExpr], /) -> Expr[L[Op.INDEXING], tuple[Self, Expr]]: ... + + # + def substitute(self, /, symbols_map: Mapping[Expr, Expr]) -> Expr: ... + + # + @overload + def traverse(self, /, visit: Callable[_Tss, None], *args: _Tss.args, **kwargs: _Tss.kwargs) -> Expr: ... + @overload + def traverse(self, /, visit: Callable[_Tss, _ExprT], *args: _Tss.args, **kwargs: _Tss.kwargs) -> _ExprT: ... + + # + def contains(self, /, other: Expr) -> bool: ... + + # + def symbols(self, /) -> set[Expr]: ... + def polynomial_atoms(self, /) -> set[Expr]: ... + + # + def linear_solve(self, /, symbol: Expr) -> tuple[Expr, Expr]: ... + + # + def tostring(self, /, parent_precedence: Precedence = ..., language: Language = ...) -> str: ... + +class _Pair(Generic[_LeftT_co, _RightT_co]): + left: _LeftT_co + right: _RightT_co + + def __init__(self, /, left: _LeftT_co, right: _RightT_co) -> None: ... + + # + @overload + def substitute(self: _Pair[_ExprT1, _ExprT2], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Expr, Expr]: ... + @overload + def substitute(self: _Pair[_ExprT1, object], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Expr, Any]: ... + @overload + def substitute(self: _Pair[object, _ExprT2], /, symbols_map: Mapping[Expr, Expr]) -> _Pair[Any, Expr]: ... + @overload + def substitute(self, /, symbols_map: Mapping[Expr, Expr]) -> _Pair: ... + +class _FromStringWorker(Generic[_LanguageT_co]): + language: _LanguageT_co + + original: str | None + quotes_map: dict[str, str] + + @overload + def __init__(self: _FromStringWorker[L[Language.C]], /, language: L[Language.C] = ...) -> None: ... + @overload + def __init__(self, /, language: _LanguageT_co) -> None: ... + + # + def finalize_string(self, /, s: str) -> str: ... + + # + def parse(self, /, inp: str) -> Expr | _Pair: ... + + # + @overload + def process(self, /, s: str, context: str = "expr") -> Expr | _Pair: ... + @overload + def process(self, /, s: list[str], context: str = "expr") -> list[Expr | _Pair]: ... + @overload + def process(self, /, s: tuple[str, ...], context: str = "expr") -> tuple[Expr | _Pair, ...]: ... + @overload + def process(self, /, s: _NestedString, context: str = "expr") -> Any: ... # noqa: ANN401 diff --git a/python/numpy/f2py/tests/__init__.py b/python/numpy/f2py/tests/__init__.py new file mode 100644 index 000000000..4ed8fdd53 --- /dev/null +++ b/python/numpy/f2py/tests/__init__.py @@ -0,0 +1,16 @@ +import pytest + +from numpy.testing import IS_EDITABLE, IS_WASM + +if IS_WASM: + pytest.skip( + "WASM/Pyodide does not use or support Fortran", + allow_module_level=True + ) + + +if IS_EDITABLE: + pytest.skip( + "Editable install doesn't support tests with a compile step", + allow_module_level=True + ) diff --git a/python/numpy/f2py/tests/__pycache__/__init__.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..d972c043f Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_abstract_interface.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_abstract_interface.cpython-312.pyc new file mode 100644 index 000000000..08b892b33 Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_abstract_interface.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_array_from_pyobj.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_array_from_pyobj.cpython-312.pyc new file mode 100644 index 000000000..725199275 Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_array_from_pyobj.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_assumed_shape.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_assumed_shape.cpython-312.pyc new file mode 100644 index 000000000..479286b14 Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_assumed_shape.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_block_docstring.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_block_docstring.cpython-312.pyc new file mode 100644 index 000000000..07b2e99ef Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_block_docstring.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_callback.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_callback.cpython-312.pyc new file mode 100644 index 000000000..77bc7fb0b Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_callback.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_character.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_character.cpython-312.pyc new file mode 100644 index 000000000..42545128a Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_character.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_common.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_common.cpython-312.pyc new file mode 100644 index 000000000..cf888548a Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_common.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_crackfortran.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_crackfortran.cpython-312.pyc new file mode 100644 index 000000000..de6d746d5 Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_crackfortran.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_data.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_data.cpython-312.pyc new file mode 100644 index 000000000..1c6ee91a7 Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_data.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_docs.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_docs.cpython-312.pyc new file mode 100644 index 000000000..d5e962bb1 Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_docs.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_f2cmap.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_f2cmap.cpython-312.pyc new file mode 100644 index 000000000..89c1df81f Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_f2cmap.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_f2py2e.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_f2py2e.cpython-312.pyc new file mode 100644 index 000000000..5444c563b Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_f2py2e.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_isoc.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_isoc.cpython-312.pyc new file mode 100644 index 000000000..79053cc03 Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_isoc.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_kind.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_kind.cpython-312.pyc new file mode 100644 index 000000000..e281e7873 Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_kind.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_mixed.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_mixed.cpython-312.pyc new file mode 100644 index 000000000..5007f71a5 Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_mixed.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_modules.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_modules.cpython-312.pyc new file mode 100644 index 000000000..8d881ea0a Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_modules.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_parameter.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_parameter.cpython-312.pyc new file mode 100644 index 000000000..d42c4c6c2 Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_parameter.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_pyf_src.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_pyf_src.cpython-312.pyc new file mode 100644 index 000000000..81dd0e6fb Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_pyf_src.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_quoted_character.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_quoted_character.cpython-312.pyc new file mode 100644 index 000000000..9fbb0ba24 Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_quoted_character.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_regression.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_regression.cpython-312.pyc new file mode 100644 index 000000000..2a26d94c7 Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_regression.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_return_character.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_return_character.cpython-312.pyc new file mode 100644 index 000000000..6d4bacde6 Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_return_character.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_return_complex.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_return_complex.cpython-312.pyc new file mode 100644 index 000000000..45baaadc4 Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_return_complex.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_return_integer.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_return_integer.cpython-312.pyc new file mode 100644 index 000000000..6003280fd Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_return_integer.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_return_logical.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_return_logical.cpython-312.pyc new file mode 100644 index 000000000..3ed0f6729 Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_return_logical.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_return_real.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_return_real.cpython-312.pyc new file mode 100644 index 000000000..57b796860 Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_return_real.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_routines.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_routines.cpython-312.pyc new file mode 100644 index 000000000..a52311603 Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_routines.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_semicolon_split.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_semicolon_split.cpython-312.pyc new file mode 100644 index 000000000..3e39bf97a Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_semicolon_split.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_size.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_size.cpython-312.pyc new file mode 100644 index 000000000..83d2e0bf4 Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_size.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_string.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_string.cpython-312.pyc new file mode 100644 index 000000000..8596e1eb1 Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_string.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_symbolic.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_symbolic.cpython-312.pyc new file mode 100644 index 000000000..587258bcf Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_symbolic.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/test_value_attrspec.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/test_value_attrspec.cpython-312.pyc new file mode 100644 index 000000000..6309ea982 Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/test_value_attrspec.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/__pycache__/util.cpython-312.pyc b/python/numpy/f2py/tests/__pycache__/util.cpython-312.pyc new file mode 100644 index 000000000..06baf0031 Binary files /dev/null and b/python/numpy/f2py/tests/__pycache__/util.cpython-312.pyc differ diff --git a/python/numpy/f2py/tests/src/abstract_interface/foo.f90 b/python/numpy/f2py/tests/src/abstract_interface/foo.f90 new file mode 100644 index 000000000..76d16aae2 --- /dev/null +++ b/python/numpy/f2py/tests/src/abstract_interface/foo.f90 @@ -0,0 +1,34 @@ +module ops_module + + abstract interface + subroutine op(x, y, z) + integer, intent(in) :: x, y + integer, intent(out) :: z + end subroutine + end interface + +contains + + subroutine foo(x, y, r1, r2) + integer, intent(in) :: x, y + integer, intent(out) :: r1, r2 + procedure (op) add1, add2 + procedure (op), pointer::p + p=>add1 + call p(x, y, r1) + p=>add2 + call p(x, y, r2) + end subroutine +end module + +subroutine add1(x, y, z) + integer, intent(in) :: x, y + integer, intent(out) :: z + z = x + y +end subroutine + +subroutine add2(x, y, z) + integer, intent(in) :: x, y + integer, intent(out) :: z + z = x + 2 * y +end subroutine diff --git a/python/numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90 b/python/numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90 new file mode 100644 index 000000000..36791e469 --- /dev/null +++ b/python/numpy/f2py/tests/src/abstract_interface/gh18403_mod.f90 @@ -0,0 +1,6 @@ +module test + abstract interface + subroutine foo() + end subroutine + end interface +end module test diff --git a/python/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/python/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c new file mode 100644 index 000000000..b66672a43 --- /dev/null +++ b/python/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c @@ -0,0 +1,235 @@ +/* + * This file was auto-generated with f2py (version:2_1330) and hand edited by + * Pearu for testing purposes. Do not edit this file unless you know what you + * are doing!!! + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/*********************** See f2py2e/cfuncs.py: includes ***********************/ + +#define PY_SSIZE_T_CLEAN +#include +#include "fortranobject.h" +#include + +static PyObject *wrap_error; +static PyObject *wrap_module; + +/************************************ call ************************************/ +static char doc_f2py_rout_wrap_call[] = "\ +Function signature:\n\ + arr = call(type_num,dims,intent,obj)\n\ +Required arguments:\n" +" type_num : input int\n" +" dims : input int-sequence\n" +" intent : input int\n" +" obj : input python object\n" +"Return objects:\n" +" arr : array"; +static PyObject *f2py_rout_wrap_call(PyObject *capi_self, + PyObject *capi_args) { + PyObject * volatile capi_buildvalue = NULL; + int type_num = 0; + int elsize = 0; + npy_intp *dims = NULL; + PyObject *dims_capi = Py_None; + int rank = 0; + int intent = 0; + PyArrayObject *capi_arr_tmp = NULL; + PyObject *arr_capi = Py_None; + int i; + + if (!PyArg_ParseTuple(capi_args,"iiOiO|:wrap.call",\ + &type_num,&elsize,&dims_capi,&intent,&arr_capi)) + return NULL; + rank = PySequence_Length(dims_capi); + dims = malloc(rank*sizeof(npy_intp)); + for (i=0;ikind, + PyArray_DESCR(arr)->type, + PyArray_TYPE(arr), + PyArray_ITEMSIZE(arr), + PyDataType_ALIGNMENT(PyArray_DESCR(arr)), + PyArray_FLAGS(arr), + PyArray_ITEMSIZE(arr)); +} + +static PyMethodDef f2py_module_methods[] = { + + {"call",f2py_rout_wrap_call,METH_VARARGS,doc_f2py_rout_wrap_call}, + {"array_attrs",f2py_rout_wrap_attrs,METH_VARARGS,doc_f2py_rout_wrap_attrs}, + {NULL,NULL} +}; + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "test_array_from_pyobj_ext", + NULL, + -1, + f2py_module_methods, + NULL, + NULL, + NULL, + NULL +}; + +PyMODINIT_FUNC PyInit_test_array_from_pyobj_ext(void) { + PyObject *m,*d, *s; + m = wrap_module = PyModule_Create(&moduledef); + Py_SET_TYPE(&PyFortran_Type, &PyType_Type); + import_array(); + if (PyErr_Occurred()) + Py_FatalError("can't initialize module wrap (failed to import numpy)"); + d = PyModule_GetDict(m); + s = PyUnicode_FromString("This module 'wrap' is auto-generated with f2py (version:2_1330).\nFunctions:\n" + " arr = call(type_num,dims,intent,obj)\n" + "."); + PyDict_SetItemString(d, "__doc__", s); + wrap_error = PyErr_NewException ("wrap.error", NULL, NULL); + Py_DECREF(s); + +#define ADDCONST(NAME, CONST) \ + s = PyLong_FromLong(CONST); \ + PyDict_SetItemString(d, NAME, s); \ + Py_DECREF(s) + + ADDCONST("F2PY_INTENT_IN", F2PY_INTENT_IN); + ADDCONST("F2PY_INTENT_INOUT", F2PY_INTENT_INOUT); + ADDCONST("F2PY_INTENT_OUT", F2PY_INTENT_OUT); + ADDCONST("F2PY_INTENT_HIDE", F2PY_INTENT_HIDE); + ADDCONST("F2PY_INTENT_CACHE", F2PY_INTENT_CACHE); + ADDCONST("F2PY_INTENT_COPY", F2PY_INTENT_COPY); + ADDCONST("F2PY_INTENT_C", F2PY_INTENT_C); + ADDCONST("F2PY_OPTIONAL", F2PY_OPTIONAL); + ADDCONST("F2PY_INTENT_INPLACE", F2PY_INTENT_INPLACE); + ADDCONST("NPY_BOOL", NPY_BOOL); + ADDCONST("NPY_BYTE", NPY_BYTE); + ADDCONST("NPY_UBYTE", NPY_UBYTE); + ADDCONST("NPY_SHORT", NPY_SHORT); + ADDCONST("NPY_USHORT", NPY_USHORT); + ADDCONST("NPY_INT", NPY_INT); + ADDCONST("NPY_UINT", NPY_UINT); + ADDCONST("NPY_INTP", NPY_INTP); + ADDCONST("NPY_UINTP", NPY_UINTP); + ADDCONST("NPY_LONG", NPY_LONG); + ADDCONST("NPY_ULONG", NPY_ULONG); + ADDCONST("NPY_LONGLONG", NPY_LONGLONG); + ADDCONST("NPY_ULONGLONG", NPY_ULONGLONG); + ADDCONST("NPY_FLOAT", NPY_FLOAT); + ADDCONST("NPY_DOUBLE", NPY_DOUBLE); + ADDCONST("NPY_LONGDOUBLE", NPY_LONGDOUBLE); + ADDCONST("NPY_CFLOAT", NPY_CFLOAT); + ADDCONST("NPY_CDOUBLE", NPY_CDOUBLE); + ADDCONST("NPY_CLONGDOUBLE", NPY_CLONGDOUBLE); + ADDCONST("NPY_OBJECT", NPY_OBJECT); + ADDCONST("NPY_STRING", NPY_STRING); + ADDCONST("NPY_UNICODE", NPY_UNICODE); + ADDCONST("NPY_VOID", NPY_VOID); + ADDCONST("NPY_NTYPES_LEGACY", NPY_NTYPES_LEGACY); + ADDCONST("NPY_NOTYPE", NPY_NOTYPE); + ADDCONST("NPY_USERDEF", NPY_USERDEF); + + ADDCONST("CONTIGUOUS", NPY_ARRAY_C_CONTIGUOUS); + ADDCONST("FORTRAN", NPY_ARRAY_F_CONTIGUOUS); + ADDCONST("OWNDATA", NPY_ARRAY_OWNDATA); + ADDCONST("FORCECAST", NPY_ARRAY_FORCECAST); + ADDCONST("ENSURECOPY", NPY_ARRAY_ENSURECOPY); + ADDCONST("ENSUREARRAY", NPY_ARRAY_ENSUREARRAY); + ADDCONST("ALIGNED", NPY_ARRAY_ALIGNED); + ADDCONST("WRITEABLE", NPY_ARRAY_WRITEABLE); + ADDCONST("WRITEBACKIFCOPY", NPY_ARRAY_WRITEBACKIFCOPY); + + ADDCONST("BEHAVED", NPY_ARRAY_BEHAVED); + ADDCONST("BEHAVED_NS", NPY_ARRAY_BEHAVED_NS); + ADDCONST("CARRAY", NPY_ARRAY_CARRAY); + ADDCONST("FARRAY", NPY_ARRAY_FARRAY); + ADDCONST("CARRAY_RO", NPY_ARRAY_CARRAY_RO); + ADDCONST("FARRAY_RO", NPY_ARRAY_FARRAY_RO); + ADDCONST("DEFAULT", NPY_ARRAY_DEFAULT); + ADDCONST("UPDATE_ALL", NPY_ARRAY_UPDATE_ALL); + +#undef ADDCONST + + if (PyErr_Occurred()) + Py_FatalError("can't initialize module wrap"); + +#ifdef F2PY_REPORT_ATEXIT + on_exit(f2py_report_on_exit,(void*)"array_from_pyobj.wrap.call"); +#endif + +#if Py_GIL_DISABLED + // signal whether this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + + return m; +} +#ifdef __cplusplus +} +#endif diff --git a/python/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap b/python/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap new file mode 100644 index 000000000..2665f89b5 --- /dev/null +++ b/python/numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap @@ -0,0 +1 @@ +dict(real=dict(rk="double")) diff --git a/python/numpy/f2py/tests/src/assumed_shape/foo_free.f90 b/python/numpy/f2py/tests/src/assumed_shape/foo_free.f90 new file mode 100644 index 000000000..b301710f5 --- /dev/null +++ b/python/numpy/f2py/tests/src/assumed_shape/foo_free.f90 @@ -0,0 +1,34 @@ + +subroutine sum(x, res) + implicit none + real, intent(in) :: x(:) + real, intent(out) :: res + + integer :: i + + !print *, "sum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end subroutine sum + +function fsum(x) result (res) + implicit none + real, intent(in) :: x(:) + real :: res + + integer :: i + + !print *, "fsum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end function fsum diff --git a/python/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 b/python/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 new file mode 100644 index 000000000..cbe6317ed --- /dev/null +++ b/python/numpy/f2py/tests/src/assumed_shape/foo_mod.f90 @@ -0,0 +1,41 @@ + +module mod + +contains + +subroutine sum(x, res) + implicit none + real, intent(in) :: x(:) + real, intent(out) :: res + + integer :: i + + !print *, "sum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end subroutine sum + +function fsum(x) result (res) + implicit none + real, intent(in) :: x(:) + real :: res + + integer :: i + + !print *, "fsum: size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + +end function fsum + + +end module mod diff --git a/python/numpy/f2py/tests/src/assumed_shape/foo_use.f90 b/python/numpy/f2py/tests/src/assumed_shape/foo_use.f90 new file mode 100644 index 000000000..337465ac5 --- /dev/null +++ b/python/numpy/f2py/tests/src/assumed_shape/foo_use.f90 @@ -0,0 +1,19 @@ +subroutine sum_with_use(x, res) + use precision + + implicit none + + real(kind=rk), intent(in) :: x(:) + real(kind=rk), intent(out) :: res + + integer :: i + + !print *, "size(x) = ", size(x) + + res = 0.0 + + do i = 1, size(x) + res = res + x(i) + enddo + + end subroutine diff --git a/python/numpy/f2py/tests/src/assumed_shape/precision.f90 b/python/numpy/f2py/tests/src/assumed_shape/precision.f90 new file mode 100644 index 000000000..ed6c70cbb --- /dev/null +++ b/python/numpy/f2py/tests/src/assumed_shape/precision.f90 @@ -0,0 +1,4 @@ +module precision + integer, parameter :: rk = selected_real_kind(8) + integer, parameter :: ik = selected_real_kind(4) +end module diff --git a/python/numpy/f2py/tests/src/block_docstring/foo.f b/python/numpy/f2py/tests/src/block_docstring/foo.f new file mode 100644 index 000000000..c8315f12c --- /dev/null +++ b/python/numpy/f2py/tests/src/block_docstring/foo.f @@ -0,0 +1,6 @@ + SUBROUTINE FOO() + INTEGER BAR(2, 3) + + COMMON /BLOCK/ BAR + RETURN + END diff --git a/python/numpy/f2py/tests/src/callback/foo.f b/python/numpy/f2py/tests/src/callback/foo.f new file mode 100644 index 000000000..ba397bb38 --- /dev/null +++ b/python/numpy/f2py/tests/src/callback/foo.f @@ -0,0 +1,62 @@ + subroutine t(fun,a) + integer a +cf2py intent(out) a + external fun + call fun(a) + end + + subroutine func(a) +cf2py intent(in,out) a + integer a + a = a + 11 + end + + subroutine func0(a) +cf2py intent(out) a + integer a + a = 11 + end + + subroutine t2(a) +cf2py intent(callback) fun + integer a +cf2py intent(out) a + external fun + call fun(a) + end + + subroutine string_callback(callback, a) + external callback + double precision callback + double precision a + character*1 r +cf2py intent(out) a + r = 'r' + a = callback(r) + end + + subroutine string_callback_array(callback, cu, lencu, a) + external callback + integer callback + integer lencu + character*8 cu(lencu) + integer a +cf2py intent(out) a + + a = callback(cu, lencu) + end + + subroutine hidden_callback(a, r) + external global_f +cf2py intent(callback, hide) global_f + integer a, r, global_f +cf2py intent(out) r + r = global_f(a) + end + + subroutine hidden_callback2(a, r) + external global_f + integer a, r, global_f +cf2py intent(out) r + r = global_f(a) + end diff --git a/python/numpy/f2py/tests/src/callback/gh17797.f90 b/python/numpy/f2py/tests/src/callback/gh17797.f90 new file mode 100644 index 000000000..49853afd7 --- /dev/null +++ b/python/numpy/f2py/tests/src/callback/gh17797.f90 @@ -0,0 +1,7 @@ +function gh17797(f, y) result(r) + external f + integer(8) :: r, f + integer(8), dimension(:) :: y + r = f(0) + r = r + sum(y) +end function gh17797 diff --git a/python/numpy/f2py/tests/src/callback/gh18335.f90 b/python/numpy/f2py/tests/src/callback/gh18335.f90 new file mode 100644 index 000000000..92b6d7540 --- /dev/null +++ b/python/numpy/f2py/tests/src/callback/gh18335.f90 @@ -0,0 +1,17 @@ + ! When gh18335_workaround is defined as an extension, + ! the issue cannot be reproduced. + !subroutine gh18335_workaround(f, y) + ! implicit none + ! external f + ! integer(kind=1) :: y(1) + ! call f(y) + !end subroutine gh18335_workaround + + function gh18335(f) result (r) + implicit none + external f + integer(kind=1) :: y(1), r + y(1) = 123 + call f(y) + r = y(1) + end function gh18335 diff --git a/python/numpy/f2py/tests/src/callback/gh25211.f b/python/numpy/f2py/tests/src/callback/gh25211.f new file mode 100644 index 000000000..ba727a10a --- /dev/null +++ b/python/numpy/f2py/tests/src/callback/gh25211.f @@ -0,0 +1,10 @@ + SUBROUTINE FOO(FUN,R) + EXTERNAL FUN + INTEGER I + REAL*8 R, FUN +Cf2py intent(out) r + R = 0D0 + DO I=-5,5 + R = R + FUN(I) + ENDDO + END diff --git a/python/numpy/f2py/tests/src/callback/gh25211.pyf b/python/numpy/f2py/tests/src/callback/gh25211.pyf new file mode 100644 index 000000000..f12011153 --- /dev/null +++ b/python/numpy/f2py/tests/src/callback/gh25211.pyf @@ -0,0 +1,18 @@ +python module __user__routines + interface + function fun(i) result (r) + integer :: i + real*8 :: r + end function fun + end interface +end python module __user__routines + +python module callback2 + interface + subroutine foo(f,r) + use __user__routines, f=>fun + external f + real*8 intent(out) :: r + end subroutine foo + end interface +end python module callback2 diff --git a/python/numpy/f2py/tests/src/callback/gh26681.f90 b/python/numpy/f2py/tests/src/callback/gh26681.f90 new file mode 100644 index 000000000..00c0ec93d --- /dev/null +++ b/python/numpy/f2py/tests/src/callback/gh26681.f90 @@ -0,0 +1,18 @@ +module utils + implicit none + contains + subroutine my_abort(message) + implicit none + character(len=*), intent(in) :: message + !f2py callstatement PyErr_SetString(PyExc_ValueError, message);f2py_success = 0; + !f2py callprotoargument char* + write(0,*) "THIS SHOULD NOT APPEAR" + stop 1 + end subroutine my_abort + + subroutine do_something(message) + !f2py intent(callback, hide) mypy_abort + character(len=*), intent(in) :: message + call mypy_abort(message) + end subroutine do_something +end module utils diff --git a/python/numpy/f2py/tests/src/cli/gh_22819.pyf b/python/numpy/f2py/tests/src/cli/gh_22819.pyf new file mode 100644 index 000000000..8eb5bb106 --- /dev/null +++ b/python/numpy/f2py/tests/src/cli/gh_22819.pyf @@ -0,0 +1,6 @@ +python module test_22819 + interface + subroutine hello() + end subroutine hello + end interface +end python module test_22819 diff --git a/python/numpy/f2py/tests/src/cli/hi77.f b/python/numpy/f2py/tests/src/cli/hi77.f new file mode 100644 index 000000000..8b916ebe0 --- /dev/null +++ b/python/numpy/f2py/tests/src/cli/hi77.f @@ -0,0 +1,3 @@ + SUBROUTINE HI + PRINT*, "HELLO WORLD" + END SUBROUTINE diff --git a/python/numpy/f2py/tests/src/cli/hiworld.f90 b/python/numpy/f2py/tests/src/cli/hiworld.f90 new file mode 100644 index 000000000..981f87754 --- /dev/null +++ b/python/numpy/f2py/tests/src/cli/hiworld.f90 @@ -0,0 +1,3 @@ +function hi() + print*, "Hello World" +end function diff --git a/python/numpy/f2py/tests/src/common/block.f b/python/numpy/f2py/tests/src/common/block.f new file mode 100644 index 000000000..7ea7968fe --- /dev/null +++ b/python/numpy/f2py/tests/src/common/block.f @@ -0,0 +1,11 @@ + SUBROUTINE INITCB + DOUBLE PRECISION LONG + CHARACTER STRING + INTEGER OK + + COMMON /BLOCK/ LONG, STRING, OK + LONG = 1.0 + STRING = '2' + OK = 3 + RETURN + END diff --git a/python/numpy/f2py/tests/src/common/gh19161.f90 b/python/numpy/f2py/tests/src/common/gh19161.f90 new file mode 100644 index 000000000..a2f40735a --- /dev/null +++ b/python/numpy/f2py/tests/src/common/gh19161.f90 @@ -0,0 +1,10 @@ +module typedefmod + use iso_fortran_env, only: real32 +end module typedefmod + +module data + use typedefmod, only: real32 + implicit none + real(kind=real32) :: x + common/test/x +end module data diff --git a/python/numpy/f2py/tests/src/crackfortran/accesstype.f90 b/python/numpy/f2py/tests/src/crackfortran/accesstype.f90 new file mode 100644 index 000000000..e2cbd445d --- /dev/null +++ b/python/numpy/f2py/tests/src/crackfortran/accesstype.f90 @@ -0,0 +1,13 @@ +module foo + public + type, private, bind(c) :: a + integer :: i + end type a + type, bind(c) :: b_ + integer :: j + end type b_ + public :: b_ + type :: c + integer :: k + end type c +end module foo diff --git a/python/numpy/f2py/tests/src/crackfortran/common_with_division.f b/python/numpy/f2py/tests/src/crackfortran/common_with_division.f new file mode 100644 index 000000000..4aa12cf6d --- /dev/null +++ b/python/numpy/f2py/tests/src/crackfortran/common_with_division.f @@ -0,0 +1,17 @@ + subroutine common_with_division + integer lmu,lb,lub,lpmin + parameter (lmu=1) + parameter (lb=20) +c crackfortran fails to parse this +c parameter (lub=(lb-1)*lmu+1) +c crackfortran can successfully parse this though + parameter (lub=lb*lmu-lmu+1) + parameter (lpmin=2) + +c crackfortran fails to parse this correctly +c common /mortmp/ ctmp((lub*(lub+1)*(lub+1))/lpmin+1) + + common /mortmp/ ctmp(lub/lpmin+1) + + return + end diff --git a/python/numpy/f2py/tests/src/crackfortran/data_common.f b/python/numpy/f2py/tests/src/crackfortran/data_common.f new file mode 100644 index 000000000..5ffd865c8 --- /dev/null +++ b/python/numpy/f2py/tests/src/crackfortran/data_common.f @@ -0,0 +1,8 @@ + BLOCK DATA PARAM_INI + COMMON /MYCOM/ MYDATA + DATA MYDATA /0/ + END + SUBROUTINE SUB1 + COMMON /MYCOM/ MYDATA + MYDATA = MYDATA + 1 + END diff --git a/python/numpy/f2py/tests/src/crackfortran/data_multiplier.f b/python/numpy/f2py/tests/src/crackfortran/data_multiplier.f new file mode 100644 index 000000000..19ff8a83e --- /dev/null +++ b/python/numpy/f2py/tests/src/crackfortran/data_multiplier.f @@ -0,0 +1,5 @@ + BLOCK DATA MYBLK + IMPLICIT DOUBLE PRECISION (A-H,O-Z) + COMMON /MYCOM/ IVAR1, IVAR2, IVAR3, IVAR4, EVAR5 + DATA IVAR1, IVAR2, IVAR3, IVAR4, EVAR5 /2*3,2*2,0.0D0/ + END diff --git a/python/numpy/f2py/tests/src/crackfortran/data_stmts.f90 b/python/numpy/f2py/tests/src/crackfortran/data_stmts.f90 new file mode 100644 index 000000000..576c5e485 --- /dev/null +++ b/python/numpy/f2py/tests/src/crackfortran/data_stmts.f90 @@ -0,0 +1,20 @@ +! gh-23276 +module cmplxdat + implicit none + integer :: i, j + real :: x, y + real, dimension(2) :: z + real(kind=8) :: pi + complex(kind=8), target :: medium_ref_index + complex(kind=8), target :: ref_index_one, ref_index_two + complex(kind=8), dimension(2) :: my_array + real(kind=8), dimension(3) :: my_real_array = (/1.0d0, 2.0d0, 3.0d0/) + + data i, j / 2, 3 / + data x, y / 1.5, 2.0 / + data z / 3.5, 7.0 / + data medium_ref_index / (1.d0, 0.d0) / + data ref_index_one, ref_index_two / (13.0d0, 21.0d0), (-30.0d0, 43.0d0) / + data my_array / (1.0d0, 2.0d0), (-3.0d0, 4.0d0) / + data pi / 3.1415926535897932384626433832795028841971693993751058209749445923078164062d0 / +end module cmplxdat diff --git a/python/numpy/f2py/tests/src/crackfortran/data_with_comments.f b/python/numpy/f2py/tests/src/crackfortran/data_with_comments.f new file mode 100644 index 000000000..4128f004e --- /dev/null +++ b/python/numpy/f2py/tests/src/crackfortran/data_with_comments.f @@ -0,0 +1,8 @@ + BLOCK DATA PARAM_INI + COMMON /MYCOM/ MYTAB + INTEGER MYTAB(3) + DATA MYTAB/ + * 0, ! 1 and more commenty stuff + * 4, ! 2 + * 0 / + END diff --git a/python/numpy/f2py/tests/src/crackfortran/foo_deps.f90 b/python/numpy/f2py/tests/src/crackfortran/foo_deps.f90 new file mode 100644 index 000000000..e327b25c8 --- /dev/null +++ b/python/numpy/f2py/tests/src/crackfortran/foo_deps.f90 @@ -0,0 +1,6 @@ +module foo + type bar + character(len = 4) :: text + end type bar + type(bar), parameter :: abar = bar('abar') +end module foo diff --git a/python/numpy/f2py/tests/src/crackfortran/gh15035.f b/python/numpy/f2py/tests/src/crackfortran/gh15035.f new file mode 100644 index 000000000..1bb2e6745 --- /dev/null +++ b/python/numpy/f2py/tests/src/crackfortran/gh15035.f @@ -0,0 +1,16 @@ + subroutine subb(k) + real(8), intent(inout) :: k(:) + k=k+1 + endsubroutine + + subroutine subc(w,k) + real(8), intent(in) :: w(:) + real(8), intent(out) :: k(size(w)) + k=w+1 + endsubroutine + + function t0(value) + character value + character t0 + t0 = value + endfunction diff --git a/python/numpy/f2py/tests/src/crackfortran/gh17859.f b/python/numpy/f2py/tests/src/crackfortran/gh17859.f new file mode 100644 index 000000000..995953845 --- /dev/null +++ b/python/numpy/f2py/tests/src/crackfortran/gh17859.f @@ -0,0 +1,12 @@ + integer(8) function external_as_statement(fcn) + implicit none + external fcn + integer(8) :: fcn + external_as_statement = fcn(0) + end + + integer(8) function external_as_attribute(fcn) + implicit none + integer(8), external :: fcn + external_as_attribute = fcn(0) + end diff --git a/python/numpy/f2py/tests/src/crackfortran/gh22648.pyf b/python/numpy/f2py/tests/src/crackfortran/gh22648.pyf new file mode 100644 index 000000000..b3454f186 --- /dev/null +++ b/python/numpy/f2py/tests/src/crackfortran/gh22648.pyf @@ -0,0 +1,7 @@ +python module iri16py ! in + interface ! in :iri16py + block data ! in :iri16py:iridreg_modified.for + COMMON /fircom/ eden,tabhe,tabla,tabmo,tabza,tabfl + end block data + end interface +end python module iri16py diff --git a/python/numpy/f2py/tests/src/crackfortran/gh23533.f b/python/numpy/f2py/tests/src/crackfortran/gh23533.f new file mode 100644 index 000000000..db522afa7 --- /dev/null +++ b/python/numpy/f2py/tests/src/crackfortran/gh23533.f @@ -0,0 +1,5 @@ + SUBROUTINE EXAMPLE( ) + IF( .TRUE. ) THEN + CALL DO_SOMETHING() + END IF ! ** .TRUE. ** + END diff --git a/python/numpy/f2py/tests/src/crackfortran/gh23598.f90 b/python/numpy/f2py/tests/src/crackfortran/gh23598.f90 new file mode 100644 index 000000000..e0dffb5ef --- /dev/null +++ b/python/numpy/f2py/tests/src/crackfortran/gh23598.f90 @@ -0,0 +1,4 @@ +integer function intproduct(a, b) result(res) + integer, intent(in) :: a, b + res = a*b +end function diff --git a/python/numpy/f2py/tests/src/crackfortran/gh23598Warn.f90 b/python/numpy/f2py/tests/src/crackfortran/gh23598Warn.f90 new file mode 100644 index 000000000..3b44efc5e --- /dev/null +++ b/python/numpy/f2py/tests/src/crackfortran/gh23598Warn.f90 @@ -0,0 +1,11 @@ +module test_bug + implicit none + private + public :: intproduct + +contains + integer function intproduct(a, b) result(res) + integer, intent(in) :: a, b + res = a*b + end function +end module diff --git a/python/numpy/f2py/tests/src/crackfortran/gh23879.f90 b/python/numpy/f2py/tests/src/crackfortran/gh23879.f90 new file mode 100644 index 000000000..fac262d53 --- /dev/null +++ b/python/numpy/f2py/tests/src/crackfortran/gh23879.f90 @@ -0,0 +1,20 @@ +module gh23879 + implicit none + private + public :: foo + + contains + + subroutine foo(a, b) + integer, intent(in) :: a + integer, intent(out) :: b + b = a + call bar(b) + end subroutine + + subroutine bar(x) + integer, intent(inout) :: x + x = 2*x + end subroutine + + end module gh23879 diff --git a/python/numpy/f2py/tests/src/crackfortran/gh27697.f90 b/python/numpy/f2py/tests/src/crackfortran/gh27697.f90 new file mode 100644 index 000000000..a5eae4e79 --- /dev/null +++ b/python/numpy/f2py/tests/src/crackfortran/gh27697.f90 @@ -0,0 +1,12 @@ +module utils + implicit none + contains + subroutine my_abort(message) + implicit none + character(len=*), intent(in) :: message + !f2py callstatement PyErr_SetString(PyExc_ValueError, message);f2py_success = 0; + !f2py callprotoargument char* + write(0,*) "THIS SHOULD NOT APPEAR" + stop 1 + end subroutine my_abort +end module utils diff --git a/python/numpy/f2py/tests/src/crackfortran/gh2848.f90 b/python/numpy/f2py/tests/src/crackfortran/gh2848.f90 new file mode 100644 index 000000000..31ea9327a --- /dev/null +++ b/python/numpy/f2py/tests/src/crackfortran/gh2848.f90 @@ -0,0 +1,13 @@ + subroutine gh2848( & + ! first 2 parameters + par1, par2,& + ! last 2 parameters + par3, par4) + + integer, intent(in) :: par1, par2 + integer, intent(out) :: par3, par4 + + par3 = par1 + par4 = par2 + + end subroutine gh2848 diff --git a/python/numpy/f2py/tests/src/crackfortran/operators.f90 b/python/numpy/f2py/tests/src/crackfortran/operators.f90 new file mode 100644 index 000000000..1d060a3d2 --- /dev/null +++ b/python/numpy/f2py/tests/src/crackfortran/operators.f90 @@ -0,0 +1,49 @@ +module foo + type bar + character(len = 32) :: item + end type bar + interface operator(.item.) + module procedure item_int, item_real + end interface operator(.item.) + interface operator(==) + module procedure items_are_equal + end interface operator(==) + interface assignment(=) + module procedure get_int, get_real + end interface assignment(=) +contains + function item_int(val) result(elem) + integer, intent(in) :: val + type(bar) :: elem + + write(elem%item, "(I32)") val + end function item_int + + function item_real(val) result(elem) + real, intent(in) :: val + type(bar) :: elem + + write(elem%item, "(1PE32.12)") val + end function item_real + + function items_are_equal(val1, val2) result(equal) + type(bar), intent(in) :: val1, val2 + logical :: equal + + equal = (val1%item == val2%item) + end function items_are_equal + + subroutine get_real(rval, item) + real, intent(out) :: rval + type(bar), intent(in) :: item + + read(item%item, *) rval + end subroutine get_real + + subroutine get_int(rval, item) + integer, intent(out) :: rval + type(bar), intent(in) :: item + + read(item%item, *) rval + end subroutine get_int +end module foo diff --git a/python/numpy/f2py/tests/src/crackfortran/privatemod.f90 b/python/numpy/f2py/tests/src/crackfortran/privatemod.f90 new file mode 100644 index 000000000..2674c2147 --- /dev/null +++ b/python/numpy/f2py/tests/src/crackfortran/privatemod.f90 @@ -0,0 +1,11 @@ +module foo + private + integer :: a + public :: setA + integer :: b +contains + subroutine setA(v) + integer, intent(in) :: v + a = v + end subroutine setA +end module foo diff --git a/python/numpy/f2py/tests/src/crackfortran/publicmod.f90 b/python/numpy/f2py/tests/src/crackfortran/publicmod.f90 new file mode 100644 index 000000000..1db76e3fe --- /dev/null +++ b/python/numpy/f2py/tests/src/crackfortran/publicmod.f90 @@ -0,0 +1,10 @@ +module foo + public + integer, private :: a + public :: setA +contains + subroutine setA(v) + integer, intent(in) :: v + a = v + end subroutine setA +end module foo diff --git a/python/numpy/f2py/tests/src/crackfortran/pubprivmod.f90 b/python/numpy/f2py/tests/src/crackfortran/pubprivmod.f90 new file mode 100644 index 000000000..46bef7cb9 --- /dev/null +++ b/python/numpy/f2py/tests/src/crackfortran/pubprivmod.f90 @@ -0,0 +1,10 @@ +module foo + public + integer, private :: a + integer :: b +contains + subroutine setA(v) + integer, intent(in) :: v + a = v + end subroutine setA +end module foo diff --git a/python/numpy/f2py/tests/src/crackfortran/unicode_comment.f90 b/python/numpy/f2py/tests/src/crackfortran/unicode_comment.f90 new file mode 100644 index 000000000..13515ce98 --- /dev/null +++ b/python/numpy/f2py/tests/src/crackfortran/unicode_comment.f90 @@ -0,0 +1,4 @@ +subroutine foo(x) + real(8), intent(in) :: x + ! Écrit à l'écran la valeur de x +end subroutine diff --git a/python/numpy/f2py/tests/src/f2cmap/.f2py_f2cmap b/python/numpy/f2py/tests/src/f2cmap/.f2py_f2cmap new file mode 100644 index 000000000..a4425f887 --- /dev/null +++ b/python/numpy/f2py/tests/src/f2cmap/.f2py_f2cmap @@ -0,0 +1 @@ +dict(real=dict(real32='float', real64='double'), integer=dict(int64='long_long')) diff --git a/python/numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90 b/python/numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90 new file mode 100644 index 000000000..1e1dc1d40 --- /dev/null +++ b/python/numpy/f2py/tests/src/f2cmap/isoFortranEnvMap.f90 @@ -0,0 +1,9 @@ + subroutine func1(n, x, res) + use, intrinsic :: iso_fortran_env, only: int64, real64 + implicit none + integer(int64), intent(in) :: n + real(real64), intent(in) :: x(n) + real(real64), intent(out) :: res +!f2py intent(hide) :: n + res = sum(x) + end diff --git a/python/numpy/f2py/tests/src/isocintrin/isoCtests.f90 b/python/numpy/f2py/tests/src/isocintrin/isoCtests.f90 new file mode 100644 index 000000000..765f7c1ce --- /dev/null +++ b/python/numpy/f2py/tests/src/isocintrin/isoCtests.f90 @@ -0,0 +1,34 @@ + module coddity + use iso_c_binding, only: c_double, c_int, c_int64_t + implicit none + contains + subroutine c_add(a, b, c) bind(c, name="c_add") + real(c_double), intent(in) :: a, b + real(c_double), intent(out) :: c + c = a + b + end subroutine c_add + ! gh-9693 + function wat(x, y) result(z) bind(c) + integer(c_int), intent(in) :: x, y + integer(c_int) :: z + + z = x + 7 + end function wat + ! gh-25207 + subroutine c_add_int64(a, b, c) bind(c) + integer(c_int64_t), intent(in) :: a, b + integer(c_int64_t), intent(out) :: c + c = a + b + end subroutine c_add_int64 + ! gh-25207 + subroutine add_arr(A, B, C) + integer(c_int64_t), intent(in) :: A(3) + integer(c_int64_t), intent(in) :: B(3) + integer(c_int64_t), intent(out) :: C(3) + integer :: j + + do j = 1, 3 + C(j) = A(j)+B(j) + end do + end subroutine + end module coddity diff --git a/python/numpy/f2py/tests/src/kind/foo.f90 b/python/numpy/f2py/tests/src/kind/foo.f90 new file mode 100644 index 000000000..d3d15cfb2 --- /dev/null +++ b/python/numpy/f2py/tests/src/kind/foo.f90 @@ -0,0 +1,20 @@ + + +subroutine selectedrealkind(p, r, res) + implicit none + + integer, intent(in) :: p, r + !f2py integer :: r=0 + integer, intent(out) :: res + res = selected_real_kind(p, r) + +end subroutine + +subroutine selectedintkind(p, res) + implicit none + + integer, intent(in) :: p + integer, intent(out) :: res + res = selected_int_kind(p) + +end subroutine diff --git a/python/numpy/f2py/tests/src/mixed/foo.f b/python/numpy/f2py/tests/src/mixed/foo.f new file mode 100644 index 000000000..c34742578 --- /dev/null +++ b/python/numpy/f2py/tests/src/mixed/foo.f @@ -0,0 +1,5 @@ + subroutine bar11(a) +cf2py intent(out) a + integer a + a = 11 + end diff --git a/python/numpy/f2py/tests/src/mixed/foo_fixed.f90 b/python/numpy/f2py/tests/src/mixed/foo_fixed.f90 new file mode 100644 index 000000000..7543a6acb --- /dev/null +++ b/python/numpy/f2py/tests/src/mixed/foo_fixed.f90 @@ -0,0 +1,8 @@ + module foo_fixed + contains + subroutine bar12(a) +!f2py intent(out) a + integer a + a = 12 + end subroutine bar12 + end module foo_fixed diff --git a/python/numpy/f2py/tests/src/mixed/foo_free.f90 b/python/numpy/f2py/tests/src/mixed/foo_free.f90 new file mode 100644 index 000000000..c1b641f13 --- /dev/null +++ b/python/numpy/f2py/tests/src/mixed/foo_free.f90 @@ -0,0 +1,8 @@ +module foo_free +contains + subroutine bar13(a) + !f2py intent(out) a + integer a + a = 13 + end subroutine bar13 +end module foo_free diff --git a/python/numpy/f2py/tests/src/modules/gh25337/data.f90 b/python/numpy/f2py/tests/src/modules/gh25337/data.f90 new file mode 100644 index 000000000..483d13ceb --- /dev/null +++ b/python/numpy/f2py/tests/src/modules/gh25337/data.f90 @@ -0,0 +1,8 @@ +module data + real(8) :: shift +contains + subroutine set_shift(in_shift) + real(8), intent(in) :: in_shift + shift = in_shift + end subroutine set_shift +end module data diff --git a/python/numpy/f2py/tests/src/modules/gh25337/use_data.f90 b/python/numpy/f2py/tests/src/modules/gh25337/use_data.f90 new file mode 100644 index 000000000..b3fae8b87 --- /dev/null +++ b/python/numpy/f2py/tests/src/modules/gh25337/use_data.f90 @@ -0,0 +1,6 @@ +subroutine shift_a(dim_a, a) + use data, only: shift + integer, intent(in) :: dim_a + real(8), intent(inout), dimension(dim_a) :: a + a = a + shift +end subroutine shift_a diff --git a/python/numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90 b/python/numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90 new file mode 100644 index 000000000..07adce591 --- /dev/null +++ b/python/numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90 @@ -0,0 +1,21 @@ + module mod2 + implicit none + private mod2_func1 + contains + + subroutine mod2_func1() + print*, "mod2_func1" + end subroutine mod2_func1 + + end module mod2 + + module mod1 + implicit none + private :: mod1_func1 + contains + + subroutine mod1_func1() + print*, "mod1_func1" + end subroutine mod1_func1 + + end module mod1 diff --git a/python/numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90 b/python/numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90 new file mode 100644 index 000000000..b7fb95b01 --- /dev/null +++ b/python/numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90 @@ -0,0 +1,21 @@ + module mod2 + implicit none + PUBLIC :: mod2_func1 + contains + + subroutine mod2_func1() + print*, "mod2_func1" + end subroutine mod2_func1 + + end module mod2 + + module mod1 + implicit none + PUBLIC :: mod1_func1 + contains + + subroutine mod1_func1() + print*, "mod1_func1" + end subroutine mod1_func1 + + end module mod1 diff --git a/python/numpy/f2py/tests/src/modules/module_data_docstring.f90 b/python/numpy/f2py/tests/src/modules/module_data_docstring.f90 new file mode 100644 index 000000000..4505e0cbc --- /dev/null +++ b/python/numpy/f2py/tests/src/modules/module_data_docstring.f90 @@ -0,0 +1,12 @@ +module mod + integer :: i + integer :: x(4) + real, dimension(2,3) :: a + real, allocatable, dimension(:,:) :: b +contains + subroutine foo + integer :: k + k = 1 + a(1,2) = a(1,2)+3 + end subroutine foo +end module mod diff --git a/python/numpy/f2py/tests/src/modules/use_modules.f90 b/python/numpy/f2py/tests/src/modules/use_modules.f90 new file mode 100644 index 000000000..aa40c86ca --- /dev/null +++ b/python/numpy/f2py/tests/src/modules/use_modules.f90 @@ -0,0 +1,20 @@ +module mathops + implicit none +contains + function add(a, b) result(c) + integer, intent(in) :: a, b + integer :: c + c = a + b + end function add +end module mathops + +module useops + use mathops, only: add + implicit none +contains + function sum_and_double(a, b) result(d) + integer, intent(in) :: a, b + integer :: d + d = 2 * add(a, b) + end function sum_and_double +end module useops diff --git a/python/numpy/f2py/tests/src/negative_bounds/issue_20853.f90 b/python/numpy/f2py/tests/src/negative_bounds/issue_20853.f90 new file mode 100644 index 000000000..bf1fa9285 --- /dev/null +++ b/python/numpy/f2py/tests/src/negative_bounds/issue_20853.f90 @@ -0,0 +1,7 @@ +subroutine foo(is_, ie_, arr, tout) + implicit none + integer :: is_,ie_ + real, intent(in) :: arr(is_:ie_) + real, intent(out) :: tout(is_:ie_) + tout = arr +end diff --git a/python/numpy/f2py/tests/src/parameter/constant_array.f90 b/python/numpy/f2py/tests/src/parameter/constant_array.f90 new file mode 100644 index 000000000..9a6bf8161 --- /dev/null +++ b/python/numpy/f2py/tests/src/parameter/constant_array.f90 @@ -0,0 +1,45 @@ +! Check that parameter arrays are correctly intercepted. +subroutine foo_array(x, y, z) + implicit none + integer, parameter :: dp = selected_real_kind(15) + integer, parameter :: pa = 2 + integer, parameter :: intparamarray(2) = (/ 3, 5 /) + integer, dimension(pa), parameter :: pb = (/ 2, 10 /) + integer, parameter, dimension(intparamarray(1)) :: pc = (/ 2, 10, 20 /) + real(dp), parameter :: doubleparamarray(3) = (/ 3.14_dp, 4._dp, 6.44_dp /) + real(dp), intent(inout) :: x(intparamarray(1)) + real(dp), intent(inout) :: y(intparamarray(2)) + real(dp), intent(out) :: z + + x = x/pb(2) + y = y*pc(2) + z = doubleparamarray(1)*doubleparamarray(2) + doubleparamarray(3) + + return +end subroutine + +subroutine foo_array_any_index(x, y) + implicit none + integer, parameter :: dp = selected_real_kind(15) + integer, parameter, dimension(-1:1) :: myparamarray = (/ 6, 3, 1 /) + integer, parameter, dimension(2) :: nested = (/ 2, 0 /) + integer, parameter :: dim = 2 + real(dp), intent(in) :: x(myparamarray(-1)) + real(dp), intent(out) :: y(nested(1), myparamarray(nested(dim))) + + y = reshape(x, (/nested(1), myparamarray(nested(2))/)) + + return +end subroutine + +subroutine foo_array_delims(x) + implicit none + integer, parameter :: dp = selected_real_kind(15) + integer, parameter, dimension(2) :: myparamarray = (/ (6), 1 /) + integer, parameter, dimension(3) :: test = (/2, 1, (3)/) + real(dp), intent(out) :: x + + x = myparamarray(1)+test(3) + + return +end subroutine diff --git a/python/numpy/f2py/tests/src/parameter/constant_both.f90 b/python/numpy/f2py/tests/src/parameter/constant_both.f90 new file mode 100644 index 000000000..ac90cedc5 --- /dev/null +++ b/python/numpy/f2py/tests/src/parameter/constant_both.f90 @@ -0,0 +1,57 @@ +! Check that parameters are correct intercepted. +! Constants with comma separations are commonly +! used, for instance Pi = 3._dp +subroutine foo(x) + implicit none + integer, parameter :: sp = selected_real_kind(6) + integer, parameter :: dp = selected_real_kind(15) + integer, parameter :: ii = selected_int_kind(9) + integer, parameter :: il = selected_int_kind(18) + real(dp), intent(inout) :: x + dimension x(3) + real(sp), parameter :: three_s = 3._sp + real(dp), parameter :: three_d = 3._dp + integer(ii), parameter :: three_i = 3_ii + integer(il), parameter :: three_l = 3_il + x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l + x(2) = x(2) * three_s + x(3) = x(3) * three_l + return +end subroutine + + +subroutine foo_no(x) + implicit none + integer, parameter :: sp = selected_real_kind(6) + integer, parameter :: dp = selected_real_kind(15) + integer, parameter :: ii = selected_int_kind(9) + integer, parameter :: il = selected_int_kind(18) + real(dp), intent(inout) :: x + dimension x(3) + real(sp), parameter :: three_s = 3. + real(dp), parameter :: three_d = 3. + integer(ii), parameter :: three_i = 3 + integer(il), parameter :: three_l = 3 + x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l + x(2) = x(2) * three_s + x(3) = x(3) * three_l + return +end subroutine + +subroutine foo_sum(x) + implicit none + integer, parameter :: sp = selected_real_kind(6) + integer, parameter :: dp = selected_real_kind(15) + integer, parameter :: ii = selected_int_kind(9) + integer, parameter :: il = selected_int_kind(18) + real(dp), intent(inout) :: x + dimension x(3) + real(sp), parameter :: three_s = 2._sp + 1._sp + real(dp), parameter :: three_d = 1._dp + 2._dp + integer(ii), parameter :: three_i = 2_ii + 1_ii + integer(il), parameter :: three_l = 1_il + 2_il + x(1) = x(1) + x(2) * three_s * three_i + x(3) * three_d * three_l + x(2) = x(2) * three_s + x(3) = x(3) * three_l + return +end subroutine diff --git a/python/numpy/f2py/tests/src/parameter/constant_compound.f90 b/python/numpy/f2py/tests/src/parameter/constant_compound.f90 new file mode 100644 index 000000000..e51f5e9b2 --- /dev/null +++ b/python/numpy/f2py/tests/src/parameter/constant_compound.f90 @@ -0,0 +1,15 @@ +! Check that parameters are correct intercepted. +! Constants with comma separations are commonly +! used, for instance Pi = 3._dp +subroutine foo_compound_int(x) + implicit none + integer, parameter :: ii = selected_int_kind(9) + integer(ii), intent(inout) :: x + dimension x(3) + integer(ii), parameter :: three = 3_ii + integer(ii), parameter :: two = 2_ii + integer(ii), parameter :: six = three * 1_ii * two + + x(1) = x(1) + x(2) + x(3) * six + return +end subroutine diff --git a/python/numpy/f2py/tests/src/parameter/constant_integer.f90 b/python/numpy/f2py/tests/src/parameter/constant_integer.f90 new file mode 100644 index 000000000..aaa83d2eb --- /dev/null +++ b/python/numpy/f2py/tests/src/parameter/constant_integer.f90 @@ -0,0 +1,22 @@ +! Check that parameters are correct intercepted. +! Constants with comma separations are commonly +! used, for instance Pi = 3._dp +subroutine foo_int(x) + implicit none + integer, parameter :: ii = selected_int_kind(9) + integer(ii), intent(inout) :: x + dimension x(3) + integer(ii), parameter :: three = 3_ii + x(1) = x(1) + x(2) + x(3) * three + return +end subroutine + +subroutine foo_long(x) + implicit none + integer, parameter :: ii = selected_int_kind(18) + integer(ii), intent(inout) :: x + dimension x(3) + integer(ii), parameter :: three = 3_ii + x(1) = x(1) + x(2) + x(3) * three + return +end subroutine diff --git a/python/numpy/f2py/tests/src/parameter/constant_non_compound.f90 b/python/numpy/f2py/tests/src/parameter/constant_non_compound.f90 new file mode 100644 index 000000000..62c9a5b94 --- /dev/null +++ b/python/numpy/f2py/tests/src/parameter/constant_non_compound.f90 @@ -0,0 +1,23 @@ +! Check that parameters are correct intercepted. +! Specifically that types of constants without +! compound kind specs are correctly inferred +! adapted Gibbs iteration code from pymc +! for this test case +subroutine foo_non_compound_int(x) + implicit none + integer, parameter :: ii = selected_int_kind(9) + + integer(ii) maxiterates + parameter (maxiterates=2) + + integer(ii) maxseries + parameter (maxseries=2) + + integer(ii) wasize + parameter (wasize=maxiterates*maxseries) + integer(ii), intent(inout) :: x + dimension x(wasize) + + x(1) = x(1) + x(2) + x(3) + x(4) * wasize + return +end subroutine diff --git a/python/numpy/f2py/tests/src/parameter/constant_real.f90 b/python/numpy/f2py/tests/src/parameter/constant_real.f90 new file mode 100644 index 000000000..02ac9dd99 --- /dev/null +++ b/python/numpy/f2py/tests/src/parameter/constant_real.f90 @@ -0,0 +1,23 @@ +! Check that parameters are correct intercepted. +! Constants with comma separations are commonly +! used, for instance Pi = 3._dp +subroutine foo_single(x) + implicit none + integer, parameter :: rp = selected_real_kind(6) + real(rp), intent(inout) :: x + dimension x(3) + real(rp), parameter :: three = 3._rp + x(1) = x(1) + x(2) + x(3) * three + return +end subroutine + +subroutine foo_double(x) + implicit none + integer, parameter :: rp = selected_real_kind(15) + real(rp), intent(inout) :: x + dimension x(3) + real(rp), parameter :: three = 3._rp + x(1) = x(1) + x(2) + x(3) * three + return +end subroutine + diff --git a/python/numpy/f2py/tests/src/quoted_character/foo.f b/python/numpy/f2py/tests/src/quoted_character/foo.f new file mode 100644 index 000000000..9dc1cfa44 --- /dev/null +++ b/python/numpy/f2py/tests/src/quoted_character/foo.f @@ -0,0 +1,14 @@ + SUBROUTINE FOO(OUT1, OUT2, OUT3, OUT4, OUT5, OUT6) + CHARACTER SINGLE, DOUBLE, SEMICOL, EXCLA, OPENPAR, CLOSEPAR + PARAMETER (SINGLE="'", DOUBLE='"', SEMICOL=';', EXCLA="!", + 1 OPENPAR="(", CLOSEPAR=")") + CHARACTER OUT1, OUT2, OUT3, OUT4, OUT5, OUT6 +Cf2py intent(out) OUT1, OUT2, OUT3, OUT4, OUT5, OUT6 + OUT1 = SINGLE + OUT2 = DOUBLE + OUT3 = SEMICOL + OUT4 = EXCLA + OUT5 = OPENPAR + OUT6 = CLOSEPAR + RETURN + END diff --git a/python/numpy/f2py/tests/src/regression/AB.inc b/python/numpy/f2py/tests/src/regression/AB.inc new file mode 100644 index 000000000..8a02f631f --- /dev/null +++ b/python/numpy/f2py/tests/src/regression/AB.inc @@ -0,0 +1 @@ +real(8) b, n, m diff --git a/python/numpy/f2py/tests/src/regression/assignOnlyModule.f90 b/python/numpy/f2py/tests/src/regression/assignOnlyModule.f90 new file mode 100644 index 000000000..479ac7980 --- /dev/null +++ b/python/numpy/f2py/tests/src/regression/assignOnlyModule.f90 @@ -0,0 +1,25 @@ + MODULE MOD_TYPES + INTEGER, PARAMETER :: SP = SELECTED_REAL_KIND(6, 37) + INTEGER, PARAMETER :: DP = SELECTED_REAL_KIND(15, 307) + END MODULE +! + MODULE F_GLOBALS + USE MOD_TYPES + IMPLICIT NONE + INTEGER, PARAMETER :: N_MAX = 16 + INTEGER, PARAMETER :: I_MAX = 18 + INTEGER, PARAMETER :: J_MAX = 72 + REAL(SP) :: XREF + END MODULE F_GLOBALS +! + SUBROUTINE DUMMY () +! + USE F_GLOBALS + USE MOD_TYPES + IMPLICIT NONE +! + REAL(SP) :: MINIMAL + MINIMAL = 0.01*XREF + RETURN +! + END SUBROUTINE DUMMY diff --git a/python/numpy/f2py/tests/src/regression/datonly.f90 b/python/numpy/f2py/tests/src/regression/datonly.f90 new file mode 100644 index 000000000..67fc4aca8 --- /dev/null +++ b/python/numpy/f2py/tests/src/regression/datonly.f90 @@ -0,0 +1,17 @@ +module datonly + implicit none + integer, parameter :: max_value = 100 + real, dimension(:), allocatable :: data_array +end module datonly + +module dat + implicit none + integer, parameter :: max_= 1009 +end module dat + +subroutine simple_subroutine(ain, aout) + use dat, only: max_ + integer, intent(in) :: ain + integer, intent(out) :: aout + aout = ain + max_ +end subroutine simple_subroutine diff --git a/python/numpy/f2py/tests/src/regression/f77comments.f b/python/numpy/f2py/tests/src/regression/f77comments.f new file mode 100644 index 000000000..452a01a14 --- /dev/null +++ b/python/numpy/f2py/tests/src/regression/f77comments.f @@ -0,0 +1,26 @@ + SUBROUTINE TESTSUB( + & INPUT1, INPUT2, !Input + & OUTPUT1, OUTPUT2) !Output + + IMPLICIT NONE + INTEGER, INTENT(IN) :: INPUT1, INPUT2 + INTEGER, INTENT(OUT) :: OUTPUT1, OUTPUT2 + + OUTPUT1 = INPUT1 + INPUT2 + OUTPUT2 = INPUT1 * INPUT2 + + RETURN + END SUBROUTINE TESTSUB + + SUBROUTINE TESTSUB2(OUTPUT) + IMPLICIT NONE + INTEGER, PARAMETER :: N = 10 ! Array dimension + REAL, INTENT(OUT) :: OUTPUT(N) + INTEGER :: I + + DO I = 1, N + OUTPUT(I) = I * 2.0 + END DO + + RETURN + END diff --git a/python/numpy/f2py/tests/src/regression/f77fixedform.f95 b/python/numpy/f2py/tests/src/regression/f77fixedform.f95 new file mode 100644 index 000000000..e47a13f7e --- /dev/null +++ b/python/numpy/f2py/tests/src/regression/f77fixedform.f95 @@ -0,0 +1,5 @@ +C This is an invalid file, but it does compile with -ffixed-form + subroutine mwe( + & x) + real x + end subroutine mwe diff --git a/python/numpy/f2py/tests/src/regression/f90continuation.f90 b/python/numpy/f2py/tests/src/regression/f90continuation.f90 new file mode 100644 index 000000000..879e716bb --- /dev/null +++ b/python/numpy/f2py/tests/src/regression/f90continuation.f90 @@ -0,0 +1,9 @@ +SUBROUTINE TESTSUB(INPUT1, & ! Hello +! commenty +INPUT2, OUTPUT1, OUTPUT2) ! more comments + INTEGER, INTENT(IN) :: INPUT1, INPUT2 + INTEGER, INTENT(OUT) :: OUTPUT1, OUTPUT2 + OUTPUT1 = INPUT1 + & + INPUT2 + OUTPUT2 = INPUT1 * INPUT2 +END SUBROUTINE TESTSUB diff --git a/python/numpy/f2py/tests/src/regression/incfile.f90 b/python/numpy/f2py/tests/src/regression/incfile.f90 new file mode 100644 index 000000000..276ef3a67 --- /dev/null +++ b/python/numpy/f2py/tests/src/regression/incfile.f90 @@ -0,0 +1,5 @@ +function add(n,m) result(b) + implicit none + include 'AB.inc' + b = n + m +end function add diff --git a/python/numpy/f2py/tests/src/regression/inout.f90 b/python/numpy/f2py/tests/src/regression/inout.f90 new file mode 100644 index 000000000..80cdad90c --- /dev/null +++ b/python/numpy/f2py/tests/src/regression/inout.f90 @@ -0,0 +1,9 @@ +! Check that intent(in out) translates as intent(inout). +! The separation seems to be a common usage. + subroutine foo(x) + implicit none + real(4), intent(in out) :: x + dimension x(3) + x(1) = x(1) + x(2) + x(3) + return + end diff --git a/python/numpy/f2py/tests/src/regression/lower_f2py_fortran.f90 b/python/numpy/f2py/tests/src/regression/lower_f2py_fortran.f90 new file mode 100644 index 000000000..1c4b8c192 --- /dev/null +++ b/python/numpy/f2py/tests/src/regression/lower_f2py_fortran.f90 @@ -0,0 +1,5 @@ +subroutine inquire_next(IU) + IMPLICIT NONE + integer :: IU + !f2py intent(in) IU +end subroutine diff --git a/python/numpy/f2py/tests/src/regression/mod_derived_types.f90 b/python/numpy/f2py/tests/src/regression/mod_derived_types.f90 new file mode 100644 index 000000000..7692c82cf --- /dev/null +++ b/python/numpy/f2py/tests/src/regression/mod_derived_types.f90 @@ -0,0 +1,23 @@ +module mtypes + implicit none + integer, parameter :: value1 = 100 + type :: master_data + integer :: idat = 200 + end type master_data + type(master_data) :: masterdata +end module mtypes + + +subroutine no_type_subroutine(ain, aout) + use mtypes, only: value1 + integer, intent(in) :: ain + integer, intent(out) :: aout + aout = ain + value1 +end subroutine no_type_subroutine + +subroutine type_subroutine(ain, aout) + use mtypes, only: masterdata + integer, intent(in) :: ain + integer, intent(out) :: aout + aout = ain + masterdata%idat +end subroutine type_subroutine \ No newline at end of file diff --git a/python/numpy/f2py/tests/src/return_character/foo77.f b/python/numpy/f2py/tests/src/return_character/foo77.f new file mode 100644 index 000000000..facae1016 --- /dev/null +++ b/python/numpy/f2py/tests/src/return_character/foo77.f @@ -0,0 +1,45 @@ + function t0(value) + character value + character t0 + t0 = value + end + function t1(value) + character*1 value + character*1 t1 + t1 = value + end + function t5(value) + character*5 value + character*5 t5 + t5 = value + end + function ts(value) + character*(*) value + character*(*) ts + ts = value + end + + subroutine s0(t0,value) + character value + character t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s1(t1,value) + character*1 value + character*1 t1 +cf2py intent(out) t1 + t1 = value + end + subroutine s5(t5,value) + character*5 value + character*5 t5 +cf2py intent(out) t5 + t5 = value + end + subroutine ss(ts,value) + character*(*) value + character*10 ts +cf2py intent(out) ts + ts = value + end diff --git a/python/numpy/f2py/tests/src/return_character/foo90.f90 b/python/numpy/f2py/tests/src/return_character/foo90.f90 new file mode 100644 index 000000000..36182bcf2 --- /dev/null +++ b/python/numpy/f2py/tests/src/return_character/foo90.f90 @@ -0,0 +1,48 @@ +module f90_return_char + contains + function t0(value) + character :: value + character :: t0 + t0 = value + end function t0 + function t1(value) + character(len=1) :: value + character(len=1) :: t1 + t1 = value + end function t1 + function t5(value) + character(len=5) :: value + character(len=5) :: t5 + t5 = value + end function t5 + function ts(value) + character(len=*) :: value + character(len=10) :: ts + ts = value + end function ts + + subroutine s0(t0,value) + character :: value + character :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s1(t1,value) + character(len=1) :: value + character(len=1) :: t1 +!f2py intent(out) t1 + t1 = value + end subroutine s1 + subroutine s5(t5,value) + character(len=5) :: value + character(len=5) :: t5 +!f2py intent(out) t5 + t5 = value + end subroutine s5 + subroutine ss(ts,value) + character(len=*) :: value + character(len=10) :: ts +!f2py intent(out) ts + ts = value + end subroutine ss +end module f90_return_char diff --git a/python/numpy/f2py/tests/src/return_complex/foo77.f b/python/numpy/f2py/tests/src/return_complex/foo77.f new file mode 100644 index 000000000..37a1ec845 --- /dev/null +++ b/python/numpy/f2py/tests/src/return_complex/foo77.f @@ -0,0 +1,45 @@ + function t0(value) + complex value + complex t0 + t0 = value + end + function t8(value) + complex*8 value + complex*8 t8 + t8 = value + end + function t16(value) + complex*16 value + complex*16 t16 + t16 = value + end + function td(value) + double complex value + double complex td + td = value + end + + subroutine s0(t0,value) + complex value + complex t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s8(t8,value) + complex*8 value + complex*8 t8 +cf2py intent(out) t8 + t8 = value + end + subroutine s16(t16,value) + complex*16 value + complex*16 t16 +cf2py intent(out) t16 + t16 = value + end + subroutine sd(td,value) + double complex value + double complex td +cf2py intent(out) td + td = value + end diff --git a/python/numpy/f2py/tests/src/return_complex/foo90.f90 b/python/numpy/f2py/tests/src/return_complex/foo90.f90 new file mode 100644 index 000000000..adc27b470 --- /dev/null +++ b/python/numpy/f2py/tests/src/return_complex/foo90.f90 @@ -0,0 +1,48 @@ +module f90_return_complex + contains + function t0(value) + complex :: value + complex :: t0 + t0 = value + end function t0 + function t8(value) + complex(kind=4) :: value + complex(kind=4) :: t8 + t8 = value + end function t8 + function t16(value) + complex(kind=8) :: value + complex(kind=8) :: t16 + t16 = value + end function t16 + function td(value) + double complex :: value + double complex :: td + td = value + end function td + + subroutine s0(t0,value) + complex :: value + complex :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s8(t8,value) + complex(kind=4) :: value + complex(kind=4) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 + subroutine s16(t16,value) + complex(kind=8) :: value + complex(kind=8) :: t16 +!f2py intent(out) t16 + t16 = value + end subroutine s16 + subroutine sd(td,value) + double complex :: value + double complex :: td +!f2py intent(out) td + td = value + end subroutine sd +end module f90_return_complex diff --git a/python/numpy/f2py/tests/src/return_integer/foo77.f b/python/numpy/f2py/tests/src/return_integer/foo77.f new file mode 100644 index 000000000..1ab895b9a --- /dev/null +++ b/python/numpy/f2py/tests/src/return_integer/foo77.f @@ -0,0 +1,56 @@ + function t0(value) + integer value + integer t0 + t0 = value + end + function t1(value) + integer*1 value + integer*1 t1 + t1 = value + end + function t2(value) + integer*2 value + integer*2 t2 + t2 = value + end + function t4(value) + integer*4 value + integer*4 t4 + t4 = value + end + function t8(value) + integer*8 value + integer*8 t8 + t8 = value + end + + subroutine s0(t0,value) + integer value + integer t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s1(t1,value) + integer*1 value + integer*1 t1 +cf2py intent(out) t1 + t1 = value + end + subroutine s2(t2,value) + integer*2 value + integer*2 t2 +cf2py intent(out) t2 + t2 = value + end + subroutine s4(t4,value) + integer*4 value + integer*4 t4 +cf2py intent(out) t4 + t4 = value + end + subroutine s8(t8,value) + integer*8 value + integer*8 t8 +cf2py intent(out) t8 + t8 = value + end diff --git a/python/numpy/f2py/tests/src/return_integer/foo90.f90 b/python/numpy/f2py/tests/src/return_integer/foo90.f90 new file mode 100644 index 000000000..ba9249aa2 --- /dev/null +++ b/python/numpy/f2py/tests/src/return_integer/foo90.f90 @@ -0,0 +1,59 @@ +module f90_return_integer + contains + function t0(value) + integer :: value + integer :: t0 + t0 = value + end function t0 + function t1(value) + integer(kind=1) :: value + integer(kind=1) :: t1 + t1 = value + end function t1 + function t2(value) + integer(kind=2) :: value + integer(kind=2) :: t2 + t2 = value + end function t2 + function t4(value) + integer(kind=4) :: value + integer(kind=4) :: t4 + t4 = value + end function t4 + function t8(value) + integer(kind=8) :: value + integer(kind=8) :: t8 + t8 = value + end function t8 + + subroutine s0(t0,value) + integer :: value + integer :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s1(t1,value) + integer(kind=1) :: value + integer(kind=1) :: t1 +!f2py intent(out) t1 + t1 = value + end subroutine s1 + subroutine s2(t2,value) + integer(kind=2) :: value + integer(kind=2) :: t2 +!f2py intent(out) t2 + t2 = value + end subroutine s2 + subroutine s4(t4,value) + integer(kind=4) :: value + integer(kind=4) :: t4 +!f2py intent(out) t4 + t4 = value + end subroutine s4 + subroutine s8(t8,value) + integer(kind=8) :: value + integer(kind=8) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 +end module f90_return_integer diff --git a/python/numpy/f2py/tests/src/return_logical/foo77.f b/python/numpy/f2py/tests/src/return_logical/foo77.f new file mode 100644 index 000000000..ef530145f --- /dev/null +++ b/python/numpy/f2py/tests/src/return_logical/foo77.f @@ -0,0 +1,56 @@ + function t0(value) + logical value + logical t0 + t0 = value + end + function t1(value) + logical*1 value + logical*1 t1 + t1 = value + end + function t2(value) + logical*2 value + logical*2 t2 + t2 = value + end + function t4(value) + logical*4 value + logical*4 t4 + t4 = value + end +c function t8(value) +c logical*8 value +c logical*8 t8 +c t8 = value +c end + + subroutine s0(t0,value) + logical value + logical t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s1(t1,value) + logical*1 value + logical*1 t1 +cf2py intent(out) t1 + t1 = value + end + subroutine s2(t2,value) + logical*2 value + logical*2 t2 +cf2py intent(out) t2 + t2 = value + end + subroutine s4(t4,value) + logical*4 value + logical*4 t4 +cf2py intent(out) t4 + t4 = value + end +c subroutine s8(t8,value) +c logical*8 value +c logical*8 t8 +cf2py intent(out) t8 +c t8 = value +c end diff --git a/python/numpy/f2py/tests/src/return_logical/foo90.f90 b/python/numpy/f2py/tests/src/return_logical/foo90.f90 new file mode 100644 index 000000000..a4526468e --- /dev/null +++ b/python/numpy/f2py/tests/src/return_logical/foo90.f90 @@ -0,0 +1,59 @@ +module f90_return_logical + contains + function t0(value) + logical :: value + logical :: t0 + t0 = value + end function t0 + function t1(value) + logical(kind=1) :: value + logical(kind=1) :: t1 + t1 = value + end function t1 + function t2(value) + logical(kind=2) :: value + logical(kind=2) :: t2 + t2 = value + end function t2 + function t4(value) + logical(kind=4) :: value + logical(kind=4) :: t4 + t4 = value + end function t4 + function t8(value) + logical(kind=8) :: value + logical(kind=8) :: t8 + t8 = value + end function t8 + + subroutine s0(t0,value) + logical :: value + logical :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s1(t1,value) + logical(kind=1) :: value + logical(kind=1) :: t1 +!f2py intent(out) t1 + t1 = value + end subroutine s1 + subroutine s2(t2,value) + logical(kind=2) :: value + logical(kind=2) :: t2 +!f2py intent(out) t2 + t2 = value + end subroutine s2 + subroutine s4(t4,value) + logical(kind=4) :: value + logical(kind=4) :: t4 +!f2py intent(out) t4 + t4 = value + end subroutine s4 + subroutine s8(t8,value) + logical(kind=8) :: value + logical(kind=8) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 +end module f90_return_logical diff --git a/python/numpy/f2py/tests/src/return_real/foo77.f b/python/numpy/f2py/tests/src/return_real/foo77.f new file mode 100644 index 000000000..bf43dbf11 --- /dev/null +++ b/python/numpy/f2py/tests/src/return_real/foo77.f @@ -0,0 +1,45 @@ + function t0(value) + real value + real t0 + t0 = value + end + function t4(value) + real*4 value + real*4 t4 + t4 = value + end + function t8(value) + real*8 value + real*8 t8 + t8 = value + end + function td(value) + double precision value + double precision td + td = value + end + + subroutine s0(t0,value) + real value + real t0 +cf2py intent(out) t0 + t0 = value + end + subroutine s4(t4,value) + real*4 value + real*4 t4 +cf2py intent(out) t4 + t4 = value + end + subroutine s8(t8,value) + real*8 value + real*8 t8 +cf2py intent(out) t8 + t8 = value + end + subroutine sd(td,value) + double precision value + double precision td +cf2py intent(out) td + td = value + end diff --git a/python/numpy/f2py/tests/src/return_real/foo90.f90 b/python/numpy/f2py/tests/src/return_real/foo90.f90 new file mode 100644 index 000000000..df9719980 --- /dev/null +++ b/python/numpy/f2py/tests/src/return_real/foo90.f90 @@ -0,0 +1,48 @@ +module f90_return_real + contains + function t0(value) + real :: value + real :: t0 + t0 = value + end function t0 + function t4(value) + real(kind=4) :: value + real(kind=4) :: t4 + t4 = value + end function t4 + function t8(value) + real(kind=8) :: value + real(kind=8) :: t8 + t8 = value + end function t8 + function td(value) + double precision :: value + double precision :: td + td = value + end function td + + subroutine s0(t0,value) + real :: value + real :: t0 +!f2py intent(out) t0 + t0 = value + end subroutine s0 + subroutine s4(t4,value) + real(kind=4) :: value + real(kind=4) :: t4 +!f2py intent(out) t4 + t4 = value + end subroutine s4 + subroutine s8(t8,value) + real(kind=8) :: value + real(kind=8) :: t8 +!f2py intent(out) t8 + t8 = value + end subroutine s8 + subroutine sd(td,value) + double precision :: value + double precision :: td +!f2py intent(out) td + td = value + end subroutine sd +end module f90_return_real diff --git a/python/numpy/f2py/tests/src/routines/funcfortranname.f b/python/numpy/f2py/tests/src/routines/funcfortranname.f new file mode 100644 index 000000000..89be972d3 --- /dev/null +++ b/python/numpy/f2py/tests/src/routines/funcfortranname.f @@ -0,0 +1,5 @@ + REAL*8 FUNCTION FUNCFORTRANNAME(A,B) + REAL*8 A, B + FUNCFORTRANNAME = A + B + RETURN + END FUNCTION diff --git a/python/numpy/f2py/tests/src/routines/funcfortranname.pyf b/python/numpy/f2py/tests/src/routines/funcfortranname.pyf new file mode 100644 index 000000000..8730ca6a6 --- /dev/null +++ b/python/numpy/f2py/tests/src/routines/funcfortranname.pyf @@ -0,0 +1,11 @@ +python module funcfortranname ! in + interface ! in :funcfortranname + function funcfortranname_default(a,b) ! in :funcfortranname:funcfortranname.f + fortranname funcfortranname + real*8 :: a + real*8 :: b + real*8 :: funcfortranname_default + real*8, intent(out) :: funcfortranname + end function funcfortranname_default + end interface +end python module funcfortranname diff --git a/python/numpy/f2py/tests/src/routines/subrout.f b/python/numpy/f2py/tests/src/routines/subrout.f new file mode 100644 index 000000000..1d1eeaeb5 --- /dev/null +++ b/python/numpy/f2py/tests/src/routines/subrout.f @@ -0,0 +1,4 @@ + SUBROUTINE SUBROUT(A,B,C) + REAL*8 A, B, C + C = A + B + END SUBROUTINE diff --git a/python/numpy/f2py/tests/src/routines/subrout.pyf b/python/numpy/f2py/tests/src/routines/subrout.pyf new file mode 100644 index 000000000..e27cbe1c7 --- /dev/null +++ b/python/numpy/f2py/tests/src/routines/subrout.pyf @@ -0,0 +1,10 @@ +python module subrout ! in + interface ! in :subrout + subroutine subrout_default(a,b,c) ! in :subrout:subrout.f + fortranname subrout + real*8 :: a + real*8 :: b + real*8, intent(out) :: c + end subroutine subrout_default + end interface +end python module subrout diff --git a/python/numpy/f2py/tests/src/size/foo.f90 b/python/numpy/f2py/tests/src/size/foo.f90 new file mode 100644 index 000000000..5b66f8c43 --- /dev/null +++ b/python/numpy/f2py/tests/src/size/foo.f90 @@ -0,0 +1,44 @@ + +subroutine foo(a, n, m, b) + implicit none + + real, intent(in) :: a(n, m) + integer, intent(in) :: n, m + real, intent(out) :: b(size(a, 1)) + + integer :: i + + do i = 1, size(b) + b(i) = sum(a(i,:)) + enddo +end subroutine + +subroutine trans(x,y) + implicit none + real, intent(in), dimension(:,:) :: x + real, intent(out), dimension( size(x,2), size(x,1) ) :: y + integer :: N, M, i, j + N = size(x,1) + M = size(x,2) + DO i=1,N + do j=1,M + y(j,i) = x(i,j) + END DO + END DO +end subroutine trans + +subroutine flatten(x,y) + implicit none + real, intent(in), dimension(:,:) :: x + real, intent(out), dimension( size(x) ) :: y + integer :: N, M, i, j, k + N = size(x,1) + M = size(x,2) + k = 1 + DO i=1,N + do j=1,M + y(k) = x(i,j) + k = k + 1 + END DO + END DO +end subroutine flatten diff --git a/python/numpy/f2py/tests/src/string/char.f90 b/python/numpy/f2py/tests/src/string/char.f90 new file mode 100644 index 000000000..bb7985ce5 --- /dev/null +++ b/python/numpy/f2py/tests/src/string/char.f90 @@ -0,0 +1,29 @@ +MODULE char_test + +CONTAINS + +SUBROUTINE change_strings(strings, n_strs, out_strings) + IMPLICIT NONE + + ! Inputs + INTEGER, INTENT(IN) :: n_strs + CHARACTER, INTENT(IN), DIMENSION(2,n_strs) :: strings + CHARACTER, INTENT(OUT), DIMENSION(2,n_strs) :: out_strings + +!f2py INTEGER, INTENT(IN) :: n_strs +!f2py CHARACTER, INTENT(IN), DIMENSION(2,n_strs) :: strings +!f2py CHARACTER, INTENT(OUT), DIMENSION(2,n_strs) :: strings + + ! Misc. + INTEGER*4 :: j + + + DO j=1, n_strs + out_strings(1,j) = strings(1,j) + out_strings(2,j) = 'A' + END DO + +END SUBROUTINE change_strings + +END MODULE char_test + diff --git a/python/numpy/f2py/tests/src/string/fixed_string.f90 b/python/numpy/f2py/tests/src/string/fixed_string.f90 new file mode 100644 index 000000000..7fd158543 --- /dev/null +++ b/python/numpy/f2py/tests/src/string/fixed_string.f90 @@ -0,0 +1,34 @@ +function sint(s) result(i) + implicit none + character(len=*) :: s + integer :: j, i + i = 0 + do j=len(s), 1, -1 + if (.not.((i.eq.0).and.(s(j:j).eq.' '))) then + i = i + ichar(s(j:j)) * 10 ** (j - 1) + endif + end do + return + end function sint + + function test_in_bytes4(a) result (i) + implicit none + integer :: sint + character(len=4) :: a + integer :: i + i = sint(a) + a(1:1) = 'A' + return + end function test_in_bytes4 + + function test_inout_bytes4(a) result (i) + implicit none + integer :: sint + character(len=4), intent(inout) :: a + integer :: i + if (a(1:1).ne.' ') then + a(1:1) = 'E' + endif + i = sint(a) + return + end function test_inout_bytes4 diff --git a/python/numpy/f2py/tests/src/string/gh24008.f b/python/numpy/f2py/tests/src/string/gh24008.f new file mode 100644 index 000000000..ab64cf771 --- /dev/null +++ b/python/numpy/f2py/tests/src/string/gh24008.f @@ -0,0 +1,8 @@ + SUBROUTINE GREET(NAME, GREETING) + CHARACTER NAME*(*), GREETING*(*) + CHARACTER*(50) MESSAGE + + MESSAGE = 'Hello, ' // NAME // ', ' // GREETING +c$$$ PRINT *, MESSAGE + + END SUBROUTINE GREET diff --git a/python/numpy/f2py/tests/src/string/gh24662.f90 b/python/numpy/f2py/tests/src/string/gh24662.f90 new file mode 100644 index 000000000..ca53413cc --- /dev/null +++ b/python/numpy/f2py/tests/src/string/gh24662.f90 @@ -0,0 +1,7 @@ +subroutine string_inout_optional(output) + implicit none + character*(32), optional, intent(inout) :: output + if (present(output)) then + output="output string" + endif +end subroutine diff --git a/python/numpy/f2py/tests/src/string/gh25286.f90 b/python/numpy/f2py/tests/src/string/gh25286.f90 new file mode 100644 index 000000000..db1c7100d --- /dev/null +++ b/python/numpy/f2py/tests/src/string/gh25286.f90 @@ -0,0 +1,14 @@ +subroutine charint(trans, info) + character, intent(in) :: trans + integer, intent(out) :: info + if (trans == 'N') then + info = 1 + else if (trans == 'T') then + info = 2 + else if (trans == 'C') then + info = 3 + else + info = -1 + end if + +end subroutine charint diff --git a/python/numpy/f2py/tests/src/string/gh25286.pyf b/python/numpy/f2py/tests/src/string/gh25286.pyf new file mode 100644 index 000000000..7b9609071 --- /dev/null +++ b/python/numpy/f2py/tests/src/string/gh25286.pyf @@ -0,0 +1,12 @@ +python module _char_handling_test + interface + subroutine charint(trans, info) + callstatement (*f2py_func)(&trans, &info) + callprotoargument char*, int* + + character, intent(in), check(trans=='N'||trans=='T'||trans=='C') :: trans = 'N' + integer intent(out) :: info + + end subroutine charint + end interface +end python module _char_handling_test diff --git a/python/numpy/f2py/tests/src/string/gh25286_bc.pyf b/python/numpy/f2py/tests/src/string/gh25286_bc.pyf new file mode 100644 index 000000000..e7b10fa92 --- /dev/null +++ b/python/numpy/f2py/tests/src/string/gh25286_bc.pyf @@ -0,0 +1,12 @@ +python module _char_handling_test + interface + subroutine charint(trans, info) + callstatement (*f2py_func)(&trans, &info) + callprotoargument char*, int* + + character, intent(in), check(*trans=='N'||*trans=='T'||*trans=='C') :: trans = 'N' + integer intent(out) :: info + + end subroutine charint + end interface +end python module _char_handling_test diff --git a/python/numpy/f2py/tests/src/string/scalar_string.f90 b/python/numpy/f2py/tests/src/string/scalar_string.f90 new file mode 100644 index 000000000..f8f076172 --- /dev/null +++ b/python/numpy/f2py/tests/src/string/scalar_string.f90 @@ -0,0 +1,9 @@ +MODULE string_test + + character(len=8) :: string + character string77 * 8 + + character(len=12), dimension(5,7) :: strarr + character strarr77(5,7) * 12 + +END MODULE string_test diff --git a/python/numpy/f2py/tests/src/string/string.f b/python/numpy/f2py/tests/src/string/string.f new file mode 100644 index 000000000..5210ca4dc --- /dev/null +++ b/python/numpy/f2py/tests/src/string/string.f @@ -0,0 +1,12 @@ +C FILE: STRING.F + SUBROUTINE FOO(A,B,C,D) + CHARACTER*5 A, B + CHARACTER*(*) C,D +Cf2py intent(in) a,c +Cf2py intent(inout) b,d + A(1:1) = 'A' + B(1:1) = 'B' + C(1:1) = 'C' + D(1:1) = 'D' + END +C END OF FILE STRING.F diff --git a/python/numpy/f2py/tests/src/value_attrspec/gh21665.f90 b/python/numpy/f2py/tests/src/value_attrspec/gh21665.f90 new file mode 100644 index 000000000..7d9dc0fd4 --- /dev/null +++ b/python/numpy/f2py/tests/src/value_attrspec/gh21665.f90 @@ -0,0 +1,9 @@ +module fortfuncs + implicit none +contains + subroutine square(x,y) + integer, intent(in), value :: x + integer, intent(out) :: y + y = x*x + end subroutine square +end module fortfuncs diff --git a/python/numpy/f2py/tests/test_abstract_interface.py b/python/numpy/f2py/tests/test_abstract_interface.py new file mode 100644 index 000000000..21e77db3e --- /dev/null +++ b/python/numpy/f2py/tests/test_abstract_interface.py @@ -0,0 +1,26 @@ +import pytest + +from numpy.f2py import crackfortran +from numpy.testing import IS_WASM + +from . import util + + +@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") +@pytest.mark.slow +class TestAbstractInterface(util.F2PyTest): + sources = [util.getpath("tests", "src", "abstract_interface", "foo.f90")] + + skip = ["add1", "add2"] + + def test_abstract_interface(self): + assert self.module.ops_module.foo(3, 5) == (8, 13) + + def test_parse_abstract_interface(self): + # Test gh18403 + fpath = util.getpath("tests", "src", "abstract_interface", + "gh18403_mod.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + assert len(mod[0]["body"]) == 1 + assert mod[0]["body"][0]["block"] == "abstract interface" diff --git a/python/numpy/f2py/tests/test_array_from_pyobj.py b/python/numpy/f2py/tests/test_array_from_pyobj.py new file mode 100644 index 000000000..a8f952752 --- /dev/null +++ b/python/numpy/f2py/tests/test_array_from_pyobj.py @@ -0,0 +1,678 @@ +import copy +import platform +import sys +from pathlib import Path + +import pytest + +import numpy as np +from numpy._core._type_aliases import c_names_dict as _c_names_dict + +from . import util + +wrap = None + +# Extend core typeinfo with CHARACTER to test dtype('c') +c_names_dict = dict( + CHARACTER=np.dtype("c"), + **_c_names_dict +) + + +def get_testdir(): + testroot = Path(__file__).resolve().parent / "src" + return testroot / "array_from_pyobj" + +def setup_module(): + """ + Build the required testing extension module + + """ + global wrap + + if wrap is None: + src = [ + get_testdir() / "wrapmodule.c", + ] + wrap = util.build_meson(src, module_name="test_array_from_pyobj_ext") + + +def flags_info(arr): + flags = wrap.array_attrs(arr)[6] + return flags2names(flags) + + +def flags2names(flags): + info = [] + for flagname in [ + "CONTIGUOUS", + "FORTRAN", + "OWNDATA", + "ENSURECOPY", + "ENSUREARRAY", + "ALIGNED", + "NOTSWAPPED", + "WRITEABLE", + "WRITEBACKIFCOPY", + "UPDATEIFCOPY", + "BEHAVED", + "BEHAVED_RO", + "CARRAY", + "FARRAY", + ]: + if abs(flags) & getattr(wrap, flagname, 0): + info.append(flagname) + return info + + +class Intent: + def __init__(self, intent_list=[]): + self.intent_list = intent_list[:] + flags = 0 + for i in intent_list: + if i == "optional": + flags |= wrap.F2PY_OPTIONAL + else: + flags |= getattr(wrap, "F2PY_INTENT_" + i.upper()) + self.flags = flags + + def __getattr__(self, name): + name = name.lower() + if name == "in_": + name = "in" + return self.__class__(self.intent_list + [name]) + + def __str__(self): + return f"intent({','.join(self.intent_list)})" + + def __repr__(self): + return f"Intent({self.intent_list!r})" + + def is_intent(self, *names): + return all(name in self.intent_list for name in names) + + def is_intent_exact(self, *names): + return len(self.intent_list) == len(names) and self.is_intent(*names) + + +intent = Intent() + +_type_names = [ + "BOOL", + "BYTE", + "UBYTE", + "SHORT", + "USHORT", + "INT", + "UINT", + "LONG", + "ULONG", + "LONGLONG", + "ULONGLONG", + "FLOAT", + "DOUBLE", + "CFLOAT", + "STRING1", + "STRING5", + "CHARACTER", +] + +_cast_dict = {"BOOL": ["BOOL"]} +_cast_dict["BYTE"] = _cast_dict["BOOL"] + ["BYTE"] +_cast_dict["UBYTE"] = _cast_dict["BOOL"] + ["UBYTE"] +_cast_dict["BYTE"] = ["BYTE"] +_cast_dict["UBYTE"] = ["UBYTE"] +_cast_dict["SHORT"] = _cast_dict["BYTE"] + ["UBYTE", "SHORT"] +_cast_dict["USHORT"] = _cast_dict["UBYTE"] + ["BYTE", "USHORT"] +_cast_dict["INT"] = _cast_dict["SHORT"] + ["USHORT", "INT"] +_cast_dict["UINT"] = _cast_dict["USHORT"] + ["SHORT", "UINT"] + +_cast_dict["LONG"] = _cast_dict["INT"] + ["LONG"] +_cast_dict["ULONG"] = _cast_dict["UINT"] + ["ULONG"] + +_cast_dict["LONGLONG"] = _cast_dict["LONG"] + ["LONGLONG"] +_cast_dict["ULONGLONG"] = _cast_dict["ULONG"] + ["ULONGLONG"] + +_cast_dict["FLOAT"] = _cast_dict["SHORT"] + ["USHORT", "FLOAT"] +_cast_dict["DOUBLE"] = _cast_dict["INT"] + ["UINT", "FLOAT", "DOUBLE"] + +_cast_dict["CFLOAT"] = _cast_dict["FLOAT"] + ["CFLOAT"] + +_cast_dict['STRING1'] = ['STRING1'] +_cast_dict['STRING5'] = ['STRING5'] +_cast_dict['CHARACTER'] = ['CHARACTER'] + +# 32 bit system malloc typically does not provide the alignment required by +# 16 byte long double types this means the inout intent cannot be satisfied +# and several tests fail as the alignment flag can be randomly true or false +# when numpy gains an aligned allocator the tests could be enabled again +# +# Furthermore, on macOS ARM64, LONGDOUBLE is an alias for DOUBLE. +if ((np.intp().dtype.itemsize != 4 or np.clongdouble().dtype.alignment <= 8) + and sys.platform != "win32" + and (platform.system(), platform.processor()) != ("Darwin", "arm")): + _type_names.extend(["LONGDOUBLE", "CDOUBLE", "CLONGDOUBLE"]) + _cast_dict["LONGDOUBLE"] = _cast_dict["LONG"] + [ + "ULONG", + "FLOAT", + "DOUBLE", + "LONGDOUBLE", + ] + _cast_dict["CLONGDOUBLE"] = _cast_dict["LONGDOUBLE"] + [ + "CFLOAT", + "CDOUBLE", + "CLONGDOUBLE", + ] + _cast_dict["CDOUBLE"] = _cast_dict["DOUBLE"] + ["CFLOAT", "CDOUBLE"] + + +class Type: + _type_cache = {} + + def __new__(cls, name): + if isinstance(name, np.dtype): + dtype0 = name + name = None + for n, i in c_names_dict.items(): + if not isinstance(i, type) and dtype0.type is i.type: + name = n + break + obj = cls._type_cache.get(name.upper(), None) + if obj is not None: + return obj + obj = object.__new__(cls) + obj._init(name) + cls._type_cache[name.upper()] = obj + return obj + + def _init(self, name): + self.NAME = name.upper() + + if self.NAME == 'CHARACTER': + info = c_names_dict[self.NAME] + self.type_num = wrap.NPY_STRING + self.elsize = 1 + self.dtype = np.dtype('c') + elif self.NAME.startswith('STRING'): + info = c_names_dict[self.NAME[:6]] + self.type_num = wrap.NPY_STRING + self.elsize = int(self.NAME[6:] or 0) + self.dtype = np.dtype(f'S{self.elsize}') + else: + info = c_names_dict[self.NAME] + self.type_num = getattr(wrap, 'NPY_' + self.NAME) + self.elsize = info.itemsize + self.dtype = np.dtype(info.type) + + assert self.type_num == info.num + self.type = info.type + self.dtypechar = info.char + + def __repr__(self): + return (f"Type({self.NAME})|type_num={self.type_num}," + f" dtype={self.dtype}," + f" type={self.type}, elsize={self.elsize}," + f" dtypechar={self.dtypechar}") + + def cast_types(self): + return [self.__class__(_m) for _m in _cast_dict[self.NAME]] + + def all_types(self): + return [self.__class__(_m) for _m in _type_names] + + def smaller_types(self): + bits = c_names_dict[self.NAME].alignment + types = [] + for name in _type_names: + if c_names_dict[name].alignment < bits: + types.append(Type(name)) + return types + + def equal_types(self): + bits = c_names_dict[self.NAME].alignment + types = [] + for name in _type_names: + if name == self.NAME: + continue + if c_names_dict[name].alignment == bits: + types.append(Type(name)) + return types + + def larger_types(self): + bits = c_names_dict[self.NAME].alignment + types = [] + for name in _type_names: + if c_names_dict[name].alignment > bits: + types.append(Type(name)) + return types + + +class Array: + + def __repr__(self): + return (f'Array({self.type}, {self.dims}, {self.intent},' + f' {self.obj})|arr={self.arr}') + + def __init__(self, typ, dims, intent, obj): + self.type = typ + self.dims = dims + self.intent = intent + self.obj_copy = copy.deepcopy(obj) + self.obj = obj + + # arr.dtypechar may be different from typ.dtypechar + self.arr = wrap.call(typ.type_num, + typ.elsize, + dims, intent.flags, obj) + + assert isinstance(self.arr, np.ndarray) + + self.arr_attr = wrap.array_attrs(self.arr) + + if len(dims) > 1: + if self.intent.is_intent("c"): + assert (intent.flags & wrap.F2PY_INTENT_C) + assert not self.arr.flags["FORTRAN"] + assert self.arr.flags["CONTIGUOUS"] + assert (not self.arr_attr[6] & wrap.FORTRAN) + else: + assert (not intent.flags & wrap.F2PY_INTENT_C) + assert self.arr.flags["FORTRAN"] + assert not self.arr.flags["CONTIGUOUS"] + assert (self.arr_attr[6] & wrap.FORTRAN) + + if obj is None: + self.pyarr = None + self.pyarr_attr = None + return + + if intent.is_intent("cache"): + assert isinstance(obj, np.ndarray), repr(type(obj)) + self.pyarr = np.array(obj).reshape(*dims).copy() + else: + self.pyarr = np.array( + np.array(obj, dtype=typ.dtypechar).reshape(*dims), + order=(self.intent.is_intent("c") and "C") or "F", + ) + assert self.pyarr.dtype == typ + self.pyarr.setflags(write=self.arr.flags["WRITEABLE"]) + assert self.pyarr.flags["OWNDATA"], (obj, intent) + self.pyarr_attr = wrap.array_attrs(self.pyarr) + + if len(dims) > 1: + if self.intent.is_intent("c"): + assert not self.pyarr.flags["FORTRAN"] + assert self.pyarr.flags["CONTIGUOUS"] + assert (not self.pyarr_attr[6] & wrap.FORTRAN) + else: + assert self.pyarr.flags["FORTRAN"] + assert not self.pyarr.flags["CONTIGUOUS"] + assert (self.pyarr_attr[6] & wrap.FORTRAN) + + assert self.arr_attr[1] == self.pyarr_attr[1] # nd + assert self.arr_attr[2] == self.pyarr_attr[2] # dimensions + if self.arr_attr[1] <= 1: + assert self.arr_attr[3] == self.pyarr_attr[3], repr(( + self.arr_attr[3], + self.pyarr_attr[3], + self.arr.tobytes(), + self.pyarr.tobytes(), + )) # strides + assert self.arr_attr[5][-2:] == self.pyarr_attr[5][-2:], repr(( + self.arr_attr[5], self.pyarr_attr[5] + )) # descr + assert self.arr_attr[6] == self.pyarr_attr[6], repr(( + self.arr_attr[6], + self.pyarr_attr[6], + flags2names(0 * self.arr_attr[6] - self.pyarr_attr[6]), + flags2names(self.arr_attr[6]), + intent, + )) # flags + + if intent.is_intent("cache"): + assert self.arr_attr[5][3] >= self.type.elsize + else: + assert self.arr_attr[5][3] == self.type.elsize + assert (self.arr_equal(self.pyarr, self.arr)) + + if isinstance(self.obj, np.ndarray): + if typ.elsize == Type(obj.dtype).elsize: + if not intent.is_intent("copy") and self.arr_attr[1] <= 1: + assert self.has_shared_memory() + + def arr_equal(self, arr1, arr2): + if arr1.shape != arr2.shape: + return False + return (arr1 == arr2).all() + + def __str__(self): + return str(self.arr) + + def has_shared_memory(self): + """Check that created array shares data with input array.""" + if self.obj is self.arr: + return True + if not isinstance(self.obj, np.ndarray): + return False + obj_attr = wrap.array_attrs(self.obj) + return obj_attr[0] == self.arr_attr[0] + + +class TestIntent: + def test_in_out(self): + assert str(intent.in_.out) == "intent(in,out)" + assert intent.in_.c.is_intent("c") + assert not intent.in_.c.is_intent_exact("c") + assert intent.in_.c.is_intent_exact("c", "in") + assert intent.in_.c.is_intent_exact("in", "c") + assert not intent.in_.is_intent("c") + + +class TestSharedMemory: + + @pytest.fixture(autouse=True, scope="class", params=_type_names) + def setup_type(self, request): + request.cls.type = Type(request.param) + request.cls.array = lambda self, dims, intent, obj: Array( + Type(request.param), dims, intent, obj) + + @property + def num2seq(self): + if self.type.NAME.startswith('STRING'): + elsize = self.type.elsize + return ['1' * elsize, '2' * elsize] + return [1, 2] + + @property + def num23seq(self): + if self.type.NAME.startswith('STRING'): + elsize = self.type.elsize + return [['1' * elsize, '2' * elsize, '3' * elsize], + ['4' * elsize, '5' * elsize, '6' * elsize]] + return [[1, 2, 3], [4, 5, 6]] + + def test_in_from_2seq(self): + a = self.array([2], intent.in_, self.num2seq) + assert not a.has_shared_memory() + + def test_in_from_2casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num2seq, dtype=t.dtype) + a = self.array([len(self.num2seq)], intent.in_, obj) + if t.elsize == self.type.elsize: + assert a.has_shared_memory(), repr((self.type.dtype, t.dtype)) + else: + assert not a.has_shared_memory() + + @pytest.mark.parametrize("write", ["w", "ro"]) + @pytest.mark.parametrize("order", ["C", "F"]) + @pytest.mark.parametrize("inp", ["2seq", "23seq"]) + def test_in_nocopy(self, write, order, inp): + """Test if intent(in) array can be passed without copies""" + seq = getattr(self, "num" + inp) + obj = np.array(seq, dtype=self.type.dtype, order=order) + obj.setflags(write=(write == 'w')) + a = self.array(obj.shape, + ((order == 'C' and intent.in_.c) or intent.in_), obj) + assert a.has_shared_memory() + + def test_inout_2seq(self): + obj = np.array(self.num2seq, dtype=self.type.dtype) + a = self.array([len(self.num2seq)], intent.inout, obj) + assert a.has_shared_memory() + + try: + a = self.array([2], intent.in_.inout, self.num2seq) + except TypeError as msg: + if not str(msg).startswith( + "failed to initialize intent(inout|inplace|cache) array"): + raise + else: + raise SystemError("intent(inout) should have failed on sequence") + + def test_f_inout_23seq(self): + obj = np.array(self.num23seq, dtype=self.type.dtype, order="F") + shape = (len(self.num23seq), len(self.num23seq[0])) + a = self.array(shape, intent.in_.inout, obj) + assert a.has_shared_memory() + + obj = np.array(self.num23seq, dtype=self.type.dtype, order="C") + shape = (len(self.num23seq), len(self.num23seq[0])) + try: + a = self.array(shape, intent.in_.inout, obj) + except ValueError as msg: + if not str(msg).startswith( + "failed to initialize intent(inout) array"): + raise + else: + raise SystemError( + "intent(inout) should have failed on improper array") + + def test_c_inout_23seq(self): + obj = np.array(self.num23seq, dtype=self.type.dtype) + shape = (len(self.num23seq), len(self.num23seq[0])) + a = self.array(shape, intent.in_.c.inout, obj) + assert a.has_shared_memory() + + def test_in_copy_from_2casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num2seq, dtype=t.dtype) + a = self.array([len(self.num2seq)], intent.in_.copy, obj) + assert not a.has_shared_memory() + + def test_c_in_from_23seq(self): + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_, + self.num23seq) + assert not a.has_shared_memory() + + def test_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype) + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_, obj) + assert not a.has_shared_memory() + + def test_f_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype, order="F") + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_, obj) + if t.elsize == self.type.elsize: + assert a.has_shared_memory() + else: + assert not a.has_shared_memory() + + def test_c_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype) + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_.c, obj) + if t.elsize == self.type.elsize: + assert a.has_shared_memory() + else: + assert not a.has_shared_memory() + + def test_f_copy_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype, order="F") + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_.copy, + obj) + assert not a.has_shared_memory() + + def test_c_copy_in_from_23casttype(self): + for t in self.type.cast_types(): + obj = np.array(self.num23seq, dtype=t.dtype) + a = self.array( + [len(self.num23seq), len(self.num23seq[0])], intent.in_.c.copy, + obj) + assert not a.has_shared_memory() + + def test_in_cache_from_2casttype(self): + for t in self.type.all_types(): + if t.elsize != self.type.elsize: + continue + obj = np.array(self.num2seq, dtype=t.dtype) + shape = (len(self.num2seq), ) + a = self.array(shape, intent.in_.c.cache, obj) + assert a.has_shared_memory() + + a = self.array(shape, intent.in_.cache, obj) + assert a.has_shared_memory() + + obj = np.array(self.num2seq, dtype=t.dtype, order="F") + a = self.array(shape, intent.in_.c.cache, obj) + assert a.has_shared_memory() + + a = self.array(shape, intent.in_.cache, obj) + assert a.has_shared_memory(), repr(t.dtype) + + try: + a = self.array(shape, intent.in_.cache, obj[::-1]) + except ValueError as msg: + if not str(msg).startswith( + "failed to initialize intent(cache) array"): + raise + else: + raise SystemError( + "intent(cache) should have failed on multisegmented array") + + def test_in_cache_from_2casttype_failure(self): + for t in self.type.all_types(): + if t.NAME == 'STRING': + # string elsize is 0, so skipping the test + continue + if t.elsize >= self.type.elsize: + continue + is_int = np.issubdtype(t.dtype, np.integer) + if is_int and int(self.num2seq[0]) > np.iinfo(t.dtype).max: + # skip test if num2seq would trigger an overflow error + continue + obj = np.array(self.num2seq, dtype=t.dtype) + shape = (len(self.num2seq), ) + try: + self.array(shape, intent.in_.cache, obj) # Should succeed + except ValueError as msg: + if not str(msg).startswith( + "failed to initialize intent(cache) array"): + raise + else: + raise SystemError( + "intent(cache) should have failed on smaller array") + + def test_cache_hidden(self): + shape = (2, ) + a = self.array(shape, intent.cache.hide, None) + assert a.arr.shape == shape + + shape = (2, 3) + a = self.array(shape, intent.cache.hide, None) + assert a.arr.shape == shape + + shape = (-1, 3) + try: + a = self.array(shape, intent.cache.hide, None) + except ValueError as msg: + if not str(msg).startswith( + "failed to create intent(cache|hide)|optional array"): + raise + else: + raise SystemError( + "intent(cache) should have failed on undefined dimensions") + + def test_hidden(self): + shape = (2, ) + a = self.array(shape, intent.hide, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + + shape = (2, 3) + a = self.array(shape, intent.hide, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + assert a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"] + + shape = (2, 3) + a = self.array(shape, intent.c.hide, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + assert not a.arr.flags["FORTRAN"] and a.arr.flags["CONTIGUOUS"] + + shape = (-1, 3) + try: + a = self.array(shape, intent.hide, None) + except ValueError as msg: + if not str(msg).startswith( + "failed to create intent(cache|hide)|optional array"): + raise + else: + raise SystemError( + "intent(hide) should have failed on undefined dimensions") + + def test_optional_none(self): + shape = (2, ) + a = self.array(shape, intent.optional, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + + shape = (2, 3) + a = self.array(shape, intent.optional, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + assert a.arr.flags["FORTRAN"] and not a.arr.flags["CONTIGUOUS"] + + shape = (2, 3) + a = self.array(shape, intent.c.optional, None) + assert a.arr.shape == shape + assert a.arr_equal(a.arr, np.zeros(shape, dtype=self.type.dtype)) + assert not a.arr.flags["FORTRAN"] and a.arr.flags["CONTIGUOUS"] + + def test_optional_from_2seq(self): + obj = self.num2seq + shape = (len(obj), ) + a = self.array(shape, intent.optional, obj) + assert a.arr.shape == shape + assert not a.has_shared_memory() + + def test_optional_from_23seq(self): + obj = self.num23seq + shape = (len(obj), len(obj[0])) + a = self.array(shape, intent.optional, obj) + assert a.arr.shape == shape + assert not a.has_shared_memory() + + a = self.array(shape, intent.optional.c, obj) + assert a.arr.shape == shape + assert not a.has_shared_memory() + + def test_inplace(self): + obj = np.array(self.num23seq, dtype=self.type.dtype) + assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"] + shape = obj.shape + a = self.array(shape, intent.inplace, obj) + assert obj[1][2] == a.arr[1][2], repr((obj, a.arr)) + a.arr[1][2] = 54 + assert obj[1][2] == a.arr[1][2] == np.array(54, dtype=self.type.dtype) + assert a.arr is obj + assert obj.flags["FORTRAN"] # obj attributes are changed inplace! + assert not obj.flags["CONTIGUOUS"] + + def test_inplace_from_casttype(self): + for t in self.type.cast_types(): + if t is self.type: + continue + obj = np.array(self.num23seq, dtype=t.dtype) + assert obj.dtype.type == t.type + assert obj.dtype.type is not self.type.type + assert not obj.flags["FORTRAN"] and obj.flags["CONTIGUOUS"] + shape = obj.shape + a = self.array(shape, intent.inplace, obj) + assert obj[1][2] == a.arr[1][2], repr((obj, a.arr)) + a.arr[1][2] = 54 + assert obj[1][2] == a.arr[1][2] == np.array(54, + dtype=self.type.dtype) + assert a.arr is obj + assert obj.flags["FORTRAN"] # obj attributes changed inplace! + assert not obj.flags["CONTIGUOUS"] + assert obj.dtype.type is self.type.type # obj changed inplace! diff --git a/python/numpy/f2py/tests/test_assumed_shape.py b/python/numpy/f2py/tests/test_assumed_shape.py new file mode 100644 index 000000000..cf75644d4 --- /dev/null +++ b/python/numpy/f2py/tests/test_assumed_shape.py @@ -0,0 +1,50 @@ +import os +import tempfile + +import pytest + +from . import util + + +class TestAssumedShapeSumExample(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "assumed_shape", "foo_free.f90"), + util.getpath("tests", "src", "assumed_shape", "foo_use.f90"), + util.getpath("tests", "src", "assumed_shape", "precision.f90"), + util.getpath("tests", "src", "assumed_shape", "foo_mod.f90"), + util.getpath("tests", "src", "assumed_shape", ".f2py_f2cmap"), + ] + + @pytest.mark.slow + def test_all(self): + r = self.module.fsum([1, 2]) + assert r == 3 + r = self.module.sum([1, 2]) + assert r == 3 + r = self.module.sum_with_use([1, 2]) + assert r == 3 + + r = self.module.mod.sum([1, 2]) + assert r == 3 + r = self.module.mod.fsum([1, 2]) + assert r == 3 + + +class TestF2cmapOption(TestAssumedShapeSumExample): + def setup_method(self): + # Use a custom file name for .f2py_f2cmap + self.sources = list(self.sources) + f2cmap_src = self.sources.pop(-1) + + self.f2cmap_file = tempfile.NamedTemporaryFile(delete=False) + with open(f2cmap_src, "rb") as f: + self.f2cmap_file.write(f.read()) + self.f2cmap_file.close() + + self.sources.append(self.f2cmap_file.name) + self.options = ["--f2cmap", self.f2cmap_file.name] + + super().setup_method() + + def teardown_method(self): + os.unlink(self.f2cmap_file.name) diff --git a/python/numpy/f2py/tests/test_block_docstring.py b/python/numpy/f2py/tests/test_block_docstring.py new file mode 100644 index 000000000..ba255a1b4 --- /dev/null +++ b/python/numpy/f2py/tests/test_block_docstring.py @@ -0,0 +1,20 @@ +import sys + +import pytest + +from numpy.testing import IS_PYPY + +from . import util + + +@pytest.mark.slow +class TestBlockDocString(util.F2PyTest): + sources = [util.getpath("tests", "src", "block_docstring", "foo.f")] + + @pytest.mark.skipif(sys.platform == "win32", + reason="Fails with MinGW64 Gfortran (Issue #9673)") + @pytest.mark.xfail(IS_PYPY, + reason="PyPy cannot modify tp_doc after PyType_Ready") + def test_block_docstring(self): + expected = "bar : 'i'-array(2,3)\n" + assert self.module.block.__doc__ == expected diff --git a/python/numpy/f2py/tests/test_callback.py b/python/numpy/f2py/tests/test_callback.py new file mode 100644 index 000000000..6614efb16 --- /dev/null +++ b/python/numpy/f2py/tests/test_callback.py @@ -0,0 +1,263 @@ +import math +import platform +import sys +import textwrap +import threading +import time +import traceback + +import pytest + +import numpy as np +from numpy.testing import IS_PYPY + +from . import util + + +class TestF77Callback(util.F2PyTest): + sources = [util.getpath("tests", "src", "callback", "foo.f")] + + @pytest.mark.parametrize("name", ["t", "t2"]) + @pytest.mark.slow + def test_all(self, name): + self.check_function(name) + + @pytest.mark.xfail(IS_PYPY, + reason="PyPy cannot modify tp_doc after PyType_Ready") + def test_docstring(self): + expected = textwrap.dedent("""\ + a = t(fun,[fun_extra_args]) + + Wrapper for ``t``. + + Parameters + ---------- + fun : call-back function + + Other Parameters + ---------------- + fun_extra_args : input tuple, optional + Default: () + + Returns + ------- + a : int + + Notes + ----- + Call-back functions:: + + def fun(): return a + Return objects: + a : int + """) + assert self.module.t.__doc__ == expected + + def check_function(self, name): + t = getattr(self.module, name) + r = t(lambda: 4) + assert r == 4 + r = t(lambda a: 5, fun_extra_args=(6, )) + assert r == 5 + r = t(lambda a: a, fun_extra_args=(6, )) + assert r == 6 + r = t(lambda a: 5 + a, fun_extra_args=(7, )) + assert r == 12 + r = t(math.degrees, fun_extra_args=(math.pi, )) + assert r == 180 + r = t(math.degrees, fun_extra_args=(math.pi, )) + assert r == 180 + + r = t(self.module.func, fun_extra_args=(6, )) + assert r == 17 + r = t(self.module.func0) + assert r == 11 + r = t(self.module.func0._cpointer) + assert r == 11 + + class A: + def __call__(self): + return 7 + + def mth(self): + return 9 + + a = A() + r = t(a) + assert r == 7 + r = t(a.mth) + assert r == 9 + + @pytest.mark.skipif(sys.platform == 'win32', + reason='Fails with MinGW64 Gfortran (Issue #9673)') + def test_string_callback(self): + def callback(code): + if code == "r": + return 0 + else: + return 1 + + f = self.module.string_callback + r = f(callback) + assert r == 0 + + @pytest.mark.skipif(sys.platform == 'win32', + reason='Fails with MinGW64 Gfortran (Issue #9673)') + def test_string_callback_array(self): + # See gh-10027 + cu1 = np.zeros((1, ), "S8") + cu2 = np.zeros((1, 8), "c") + cu3 = np.array([""], "S8") + + def callback(cu, lencu): + if cu.shape != (lencu,): + return 1 + if cu.dtype != "S8": + return 2 + if not np.all(cu == b""): + return 3 + return 0 + + f = self.module.string_callback_array + for cu in [cu1, cu2, cu3]: + res = f(callback, cu, cu.size) + assert res == 0 + + def test_threadsafety(self): + # Segfaults if the callback handling is not threadsafe + + errors = [] + + def cb(): + # Sleep here to make it more likely for another thread + # to call their callback at the same time. + time.sleep(1e-3) + + # Check reentrancy + r = self.module.t(lambda: 123) + assert r == 123 + + return 42 + + def runner(name): + try: + for j in range(50): + r = self.module.t(cb) + assert r == 42 + self.check_function(name) + except Exception: + errors.append(traceback.format_exc()) + + threads = [ + threading.Thread(target=runner, args=(arg, )) + for arg in ("t", "t2") for n in range(20) + ] + + for t in threads: + t.start() + + for t in threads: + t.join() + + errors = "\n\n".join(errors) + if errors: + raise AssertionError(errors) + + def test_hidden_callback(self): + try: + self.module.hidden_callback(2) + except Exception as msg: + assert str(msg).startswith("Callback global_f not defined") + + try: + self.module.hidden_callback2(2) + except Exception as msg: + assert str(msg).startswith("cb: Callback global_f not defined") + + self.module.global_f = lambda x: x + 1 + r = self.module.hidden_callback(2) + assert r == 3 + + self.module.global_f = lambda x: x + 2 + r = self.module.hidden_callback(2) + assert r == 4 + + del self.module.global_f + try: + self.module.hidden_callback(2) + except Exception as msg: + assert str(msg).startswith("Callback global_f not defined") + + self.module.global_f = lambda x=0: x + 3 + r = self.module.hidden_callback(2) + assert r == 5 + + # reproducer of gh18341 + r = self.module.hidden_callback2(2) + assert r == 3 + + +class TestF77CallbackPythonTLS(TestF77Callback): + """ + Callback tests using Python thread-local storage instead of + compiler-provided + """ + + options = ["-DF2PY_USE_PYTHON_TLS"] + + +class TestF90Callback(util.F2PyTest): + sources = [util.getpath("tests", "src", "callback", "gh17797.f90")] + + @pytest.mark.slow + def test_gh17797(self): + def incr(x): + return x + 123 + + y = np.array([1, 2, 3], dtype=np.int64) + r = self.module.gh17797(incr, y) + assert r == 123 + 1 + 2 + 3 + + +class TestGH18335(util.F2PyTest): + """The reproduction of the reported issue requires specific input that + extensions may break the issue conditions, so the reproducer is + implemented as a separate test class. Do not extend this test with + other tests! + """ + sources = [util.getpath("tests", "src", "callback", "gh18335.f90")] + + @pytest.mark.slow + def test_gh18335(self): + def foo(x): + x[0] += 1 + + r = self.module.gh18335(foo) + assert r == 123 + 1 + + +class TestGH25211(util.F2PyTest): + sources = [util.getpath("tests", "src", "callback", "gh25211.f"), + util.getpath("tests", "src", "callback", "gh25211.pyf")] + module_name = "callback2" + + def test_gh25211(self): + def bar(x): + return x * x + + res = self.module.foo(bar) + assert res == 110 + + +@pytest.mark.slow +@pytest.mark.xfail(condition=(platform.system().lower() == 'darwin'), + run=False, + reason="Callback aborts cause CI failures on macOS") +class TestCBFortranCallstatement(util.F2PyTest): + sources = [util.getpath("tests", "src", "callback", "gh26681.f90")] + options = ['--lower'] + + def test_callstatement_fortran(self): + with pytest.raises(ValueError, match='helpme') as exc: + self.module.mypy_abort = self.module.utils.my_abort + self.module.utils.do_something('helpme') diff --git a/python/numpy/f2py/tests/test_character.py b/python/numpy/f2py/tests/test_character.py new file mode 100644 index 000000000..74868a6f0 --- /dev/null +++ b/python/numpy/f2py/tests/test_character.py @@ -0,0 +1,641 @@ +import textwrap + +import pytest + +import numpy as np +from numpy.f2py.tests import util +from numpy.testing import assert_array_equal, assert_equal, assert_raises + + +@pytest.mark.slow +class TestCharacterString(util.F2PyTest): + # options = ['--debug-capi', '--build-dir', '/tmp/test-build-f2py'] + suffix = '.f90' + fprefix = 'test_character_string' + length_list = ['1', '3', 'star'] + + code = '' + for length in length_list: + fsuffix = length + clength = {'star': '(*)'}.get(length, length) + + code += textwrap.dedent(f""" + + subroutine {fprefix}_input_{fsuffix}(c, o, n) + character*{clength}, intent(in) :: c + integer n + !f2py integer, depend(c), intent(hide) :: n = slen(c) + integer*1, dimension(n) :: o + !f2py intent(out) o + o = transfer(c, o) + end subroutine {fprefix}_input_{fsuffix} + + subroutine {fprefix}_output_{fsuffix}(c, o, n) + character*{clength}, intent(out) :: c + integer n + integer*1, dimension(n), intent(in) :: o + !f2py integer, depend(o), intent(hide) :: n = len(o) + c = transfer(o, c) + end subroutine {fprefix}_output_{fsuffix} + + subroutine {fprefix}_array_input_{fsuffix}(c, o, m, n) + integer m, i, n + character*{clength}, intent(in), dimension(m) :: c + !f2py integer, depend(c), intent(hide) :: m = len(c) + !f2py integer, depend(c), intent(hide) :: n = f2py_itemsize(c) + integer*1, dimension(m, n), intent(out) :: o + do i=1,m + o(i, :) = transfer(c(i), o(i, :)) + end do + end subroutine {fprefix}_array_input_{fsuffix} + + subroutine {fprefix}_array_output_{fsuffix}(c, o, m, n) + character*{clength}, intent(out), dimension(m) :: c + integer n + integer*1, dimension(m, n), intent(in) :: o + !f2py character(f2py_len=n) :: c + !f2py integer, depend(o), intent(hide) :: m = len(o) + !f2py integer, depend(o), intent(hide) :: n = shape(o, 1) + do i=1,m + c(i) = transfer(o(i, :), c(i)) + end do + end subroutine {fprefix}_array_output_{fsuffix} + + subroutine {fprefix}_2d_array_input_{fsuffix}(c, o, m1, m2, n) + integer m1, m2, i, j, n + character*{clength}, intent(in), dimension(m1, m2) :: c + !f2py integer, depend(c), intent(hide) :: m1 = len(c) + !f2py integer, depend(c), intent(hide) :: m2 = shape(c, 1) + !f2py integer, depend(c), intent(hide) :: n = f2py_itemsize(c) + integer*1, dimension(m1, m2, n), intent(out) :: o + do i=1,m1 + do j=1,m2 + o(i, j, :) = transfer(c(i, j), o(i, j, :)) + end do + end do + end subroutine {fprefix}_2d_array_input_{fsuffix} + """) + + @pytest.mark.parametrize("length", length_list) + def test_input(self, length): + fsuffix = {'(*)': 'star'}.get(length, length) + f = getattr(self.module, self.fprefix + '_input_' + fsuffix) + + a = {'1': 'a', '3': 'abc', 'star': 'abcde' * 3}[length] + + assert_array_equal(f(a), np.array(list(map(ord, a)), dtype='u1')) + + @pytest.mark.parametrize("length", length_list[:-1]) + def test_output(self, length): + fsuffix = length + f = getattr(self.module, self.fprefix + '_output_' + fsuffix) + + a = {'1': 'a', '3': 'abc'}[length] + + assert_array_equal(f(np.array(list(map(ord, a)), dtype='u1')), + a.encode()) + + @pytest.mark.parametrize("length", length_list) + def test_array_input(self, length): + fsuffix = length + f = getattr(self.module, self.fprefix + '_array_input_' + fsuffix) + + a = np.array([{'1': 'a', '3': 'abc', 'star': 'abcde' * 3}[length], + {'1': 'A', '3': 'ABC', 'star': 'ABCDE' * 3}[length], + ], dtype='S') + + expected = np.array([list(s) for s in a], dtype='u1') + assert_array_equal(f(a), expected) + + @pytest.mark.parametrize("length", length_list) + def test_array_output(self, length): + fsuffix = length + f = getattr(self.module, self.fprefix + '_array_output_' + fsuffix) + + expected = np.array( + [{'1': 'a', '3': 'abc', 'star': 'abcde' * 3}[length], + {'1': 'A', '3': 'ABC', 'star': 'ABCDE' * 3}[length]], dtype='S') + + a = np.array([list(s) for s in expected], dtype='u1') + assert_array_equal(f(a), expected) + + @pytest.mark.parametrize("length", length_list) + def test_2d_array_input(self, length): + fsuffix = length + f = getattr(self.module, self.fprefix + '_2d_array_input_' + fsuffix) + + a = np.array([[{'1': 'a', '3': 'abc', 'star': 'abcde' * 3}[length], + {'1': 'A', '3': 'ABC', 'star': 'ABCDE' * 3}[length]], + [{'1': 'f', '3': 'fgh', 'star': 'fghij' * 3}[length], + {'1': 'F', '3': 'FGH', 'star': 'FGHIJ' * 3}[length]]], + dtype='S') + expected = np.array([[list(item) for item in row] for row in a], + dtype='u1', order='F') + assert_array_equal(f(a), expected) + + +class TestCharacter(util.F2PyTest): + # options = ['--debug-capi', '--build-dir', '/tmp/test-build-f2py'] + suffix = '.f90' + fprefix = 'test_character' + + code = textwrap.dedent(f""" + subroutine {fprefix}_input(c, o) + character, intent(in) :: c + integer*1 o + !f2py intent(out) o + o = transfer(c, o) + end subroutine {fprefix}_input + + subroutine {fprefix}_output(c, o) + character :: c + integer*1, intent(in) :: o + !f2py intent(out) c + c = transfer(o, c) + end subroutine {fprefix}_output + + subroutine {fprefix}_input_output(c, o) + character, intent(in) :: c + character o + !f2py intent(out) o + o = c + end subroutine {fprefix}_input_output + + subroutine {fprefix}_inout(c, n) + character :: c, n + !f2py intent(in) n + !f2py intent(inout) c + c = n + end subroutine {fprefix}_inout + + function {fprefix}_return(o) result (c) + character :: c + character, intent(in) :: o + c = transfer(o, c) + end function {fprefix}_return + + subroutine {fprefix}_array_input(c, o) + character, intent(in) :: c(3) + integer*1 o(3) + !f2py intent(out) o + integer i + do i=1,3 + o(i) = transfer(c(i), o(i)) + end do + end subroutine {fprefix}_array_input + + subroutine {fprefix}_2d_array_input(c, o) + character, intent(in) :: c(2, 3) + integer*1 o(2, 3) + !f2py intent(out) o + integer i, j + do i=1,2 + do j=1,3 + o(i, j) = transfer(c(i, j), o(i, j)) + end do + end do + end subroutine {fprefix}_2d_array_input + + subroutine {fprefix}_array_output(c, o) + character :: c(3) + integer*1, intent(in) :: o(3) + !f2py intent(out) c + do i=1,3 + c(i) = transfer(o(i), c(i)) + end do + end subroutine {fprefix}_array_output + + subroutine {fprefix}_array_inout(c, n) + character :: c(3), n(3) + !f2py intent(in) n(3) + !f2py intent(inout) c(3) + do i=1,3 + c(i) = n(i) + end do + end subroutine {fprefix}_array_inout + + subroutine {fprefix}_2d_array_inout(c, n) + character :: c(2, 3), n(2, 3) + !f2py intent(in) n(2, 3) + !f2py intent(inout) c(2. 3) + integer i, j + do i=1,2 + do j=1,3 + c(i, j) = n(i, j) + end do + end do + end subroutine {fprefix}_2d_array_inout + + function {fprefix}_array_return(o) result (c) + character, dimension(3) :: c + character, intent(in) :: o(3) + do i=1,3 + c(i) = o(i) + end do + end function {fprefix}_array_return + + function {fprefix}_optional(o) result (c) + character, intent(in) :: o + !f2py character o = "a" + character :: c + c = o + end function {fprefix}_optional + """) + + @pytest.mark.parametrize("dtype", ['c', 'S1']) + def test_input(self, dtype): + f = getattr(self.module, self.fprefix + '_input') + + assert_equal(f(np.array('a', dtype=dtype)), ord('a')) + assert_equal(f(np.array(b'a', dtype=dtype)), ord('a')) + assert_equal(f(np.array(['a'], dtype=dtype)), ord('a')) + assert_equal(f(np.array('abc', dtype=dtype)), ord('a')) + assert_equal(f(np.array([['a']], dtype=dtype)), ord('a')) + + def test_input_varia(self): + f = getattr(self.module, self.fprefix + '_input') + + assert_equal(f('a'), ord('a')) + assert_equal(f(b'a'), ord(b'a')) + assert_equal(f(''), 0) + assert_equal(f(b''), 0) + assert_equal(f(b'\0'), 0) + assert_equal(f('ab'), ord('a')) + assert_equal(f(b'ab'), ord('a')) + assert_equal(f(['a']), ord('a')) + + assert_equal(f(np.array(b'a')), ord('a')) + assert_equal(f(np.array([b'a'])), ord('a')) + a = np.array('a') + assert_equal(f(a), ord('a')) + a = np.array(['a']) + assert_equal(f(a), ord('a')) + + try: + f([]) + except IndexError as msg: + if not str(msg).endswith(' got 0-list'): + raise + else: + raise SystemError(f'{f.__name__} should have failed on empty list') + + try: + f(97) + except TypeError as msg: + if not str(msg).endswith(' got int instance'): + raise + else: + raise SystemError(f'{f.__name__} should have failed on int value') + + @pytest.mark.parametrize("dtype", ['c', 'S1', 'U1']) + def test_array_input(self, dtype): + f = getattr(self.module, self.fprefix + '_array_input') + + assert_array_equal(f(np.array(['a', 'b', 'c'], dtype=dtype)), + np.array(list(map(ord, 'abc')), dtype='i1')) + assert_array_equal(f(np.array([b'a', b'b', b'c'], dtype=dtype)), + np.array(list(map(ord, 'abc')), dtype='i1')) + + def test_array_input_varia(self): + f = getattr(self.module, self.fprefix + '_array_input') + assert_array_equal(f(['a', 'b', 'c']), + np.array(list(map(ord, 'abc')), dtype='i1')) + assert_array_equal(f([b'a', b'b', b'c']), + np.array(list(map(ord, 'abc')), dtype='i1')) + + try: + f(['a', 'b', 'c', 'd']) + except ValueError as msg: + if not str(msg).endswith( + 'th dimension must be fixed to 3 but got 4'): + raise + else: + raise SystemError( + f'{f.__name__} should have failed on wrong input') + + @pytest.mark.parametrize("dtype", ['c', 'S1', 'U1']) + def test_2d_array_input(self, dtype): + f = getattr(self.module, self.fprefix + '_2d_array_input') + + a = np.array([['a', 'b', 'c'], + ['d', 'e', 'f']], dtype=dtype, order='F') + expected = a.view(np.uint32 if dtype == 'U1' else np.uint8) + assert_array_equal(f(a), expected) + + def test_output(self): + f = getattr(self.module, self.fprefix + '_output') + + assert_equal(f(ord(b'a')), b'a') + assert_equal(f(0), b'\0') + + def test_array_output(self): + f = getattr(self.module, self.fprefix + '_array_output') + + assert_array_equal(f(list(map(ord, 'abc'))), + np.array(list('abc'), dtype='S1')) + + def test_input_output(self): + f = getattr(self.module, self.fprefix + '_input_output') + + assert_equal(f(b'a'), b'a') + assert_equal(f('a'), b'a') + assert_equal(f(''), b'\0') + + @pytest.mark.parametrize("dtype", ['c', 'S1']) + def test_inout(self, dtype): + f = getattr(self.module, self.fprefix + '_inout') + + a = np.array(list('abc'), dtype=dtype) + f(a, 'A') + assert_array_equal(a, np.array(list('Abc'), dtype=a.dtype)) + f(a[1:], 'B') + assert_array_equal(a, np.array(list('ABc'), dtype=a.dtype)) + + a = np.array(['abc'], dtype=dtype) + f(a, 'A') + assert_array_equal(a, np.array(['Abc'], dtype=a.dtype)) + + def test_inout_varia(self): + f = getattr(self.module, self.fprefix + '_inout') + a = np.array('abc', dtype='S3') + f(a, 'A') + assert_array_equal(a, np.array('Abc', dtype=a.dtype)) + + a = np.array(['abc'], dtype='S3') + f(a, 'A') + assert_array_equal(a, np.array(['Abc'], dtype=a.dtype)) + + try: + f('abc', 'A') + except ValueError as msg: + if not str(msg).endswith(' got 3-str'): + raise + else: + raise SystemError(f'{f.__name__} should have failed on str value') + + @pytest.mark.parametrize("dtype", ['c', 'S1']) + def test_array_inout(self, dtype): + f = getattr(self.module, self.fprefix + '_array_inout') + n = np.array(['A', 'B', 'C'], dtype=dtype, order='F') + + a = np.array(['a', 'b', 'c'], dtype=dtype, order='F') + f(a, n) + assert_array_equal(a, n) + + a = np.array(['a', 'b', 'c', 'd'], dtype=dtype) + f(a[1:], n) + assert_array_equal(a, np.array(['a', 'A', 'B', 'C'], dtype=dtype)) + + a = np.array([['a', 'b', 'c']], dtype=dtype, order='F') + f(a, n) + assert_array_equal(a, np.array([['A', 'B', 'C']], dtype=dtype)) + + a = np.array(['a', 'b', 'c', 'd'], dtype=dtype, order='F') + try: + f(a, n) + except ValueError as msg: + if not str(msg).endswith( + 'th dimension must be fixed to 3 but got 4'): + raise + else: + raise SystemError( + f'{f.__name__} should have failed on wrong input') + + @pytest.mark.parametrize("dtype", ['c', 'S1']) + def test_2d_array_inout(self, dtype): + f = getattr(self.module, self.fprefix + '_2d_array_inout') + n = np.array([['A', 'B', 'C'], + ['D', 'E', 'F']], + dtype=dtype, order='F') + a = np.array([['a', 'b', 'c'], + ['d', 'e', 'f']], + dtype=dtype, order='F') + f(a, n) + assert_array_equal(a, n) + + def test_return(self): + f = getattr(self.module, self.fprefix + '_return') + + assert_equal(f('a'), b'a') + + @pytest.mark.skip('fortran function returning array segfaults') + def test_array_return(self): + f = getattr(self.module, self.fprefix + '_array_return') + + a = np.array(list('abc'), dtype='S1') + assert_array_equal(f(a), a) + + def test_optional(self): + f = getattr(self.module, self.fprefix + '_optional') + + assert_equal(f(), b"a") + assert_equal(f(b'B'), b"B") + + +class TestMiscCharacter(util.F2PyTest): + # options = ['--debug-capi', '--build-dir', '/tmp/test-build-f2py'] + suffix = '.f90' + fprefix = 'test_misc_character' + + code = textwrap.dedent(f""" + subroutine {fprefix}_gh18684(x, y, m) + character(len=5), dimension(m), intent(in) :: x + character*5, dimension(m), intent(out) :: y + integer i, m + !f2py integer, intent(hide), depend(x) :: m = f2py_len(x) + do i=1,m + y(i) = x(i) + end do + end subroutine {fprefix}_gh18684 + + subroutine {fprefix}_gh6308(x, i) + integer i + !f2py check(i>=0 && i<12) i + character*5 name, x + common name(12) + name(i + 1) = x + end subroutine {fprefix}_gh6308 + + subroutine {fprefix}_gh4519(x) + character(len=*), intent(in) :: x(:) + !f2py intent(out) x + integer :: i + ! Uncomment for debug printing: + !do i=1, size(x) + ! print*, "x(",i,")=", x(i) + !end do + end subroutine {fprefix}_gh4519 + + pure function {fprefix}_gh3425(x) result (y) + character(len=*), intent(in) :: x + character(len=len(x)) :: y + integer :: i + do i = 1, len(x) + j = iachar(x(i:i)) + if (j>=iachar("a") .and. j<=iachar("z") ) then + y(i:i) = achar(j-32) + else + y(i:i) = x(i:i) + endif + end do + end function {fprefix}_gh3425 + + subroutine {fprefix}_character_bc_new(x, y, z) + character, intent(in) :: x + character, intent(out) :: y + !f2py character, depend(x) :: y = x + !f2py character, dimension((x=='a'?1:2)), depend(x), intent(out) :: z + character, dimension(*) :: z + !f2py character, optional, check(x == 'a' || x == 'b') :: x = 'a' + !f2py callstatement (*f2py_func)(&x, &y, z) + !f2py callprotoargument character*, character*, character* + if (y.eq.x) then + y = x + else + y = 'e' + endif + z(1) = 'c' + end subroutine {fprefix}_character_bc_new + + subroutine {fprefix}_character_bc_old(x, y, z) + character, intent(in) :: x + character, intent(out) :: y + !f2py character, depend(x) :: y = x[0] + !f2py character, dimension((*x=='a'?1:2)), depend(x), intent(out) :: z + character, dimension(*) :: z + !f2py character, optional, check(*x == 'a' || x[0] == 'b') :: x = 'a' + !f2py callstatement (*f2py_func)(x, y, z) + !f2py callprotoargument char*, char*, char* + if (y.eq.x) then + y = x + else + y = 'e' + endif + z(1) = 'c' + end subroutine {fprefix}_character_bc_old + """) + + @pytest.mark.slow + def test_gh18684(self): + # Test character(len=5) and character*5 usages + f = getattr(self.module, self.fprefix + '_gh18684') + x = np.array(["abcde", "fghij"], dtype='S5') + y = f(x) + + assert_array_equal(x, y) + + def test_gh6308(self): + # Test character string array in a common block + f = getattr(self.module, self.fprefix + '_gh6308') + + assert_equal(self.module._BLNK_.name.dtype, np.dtype('S5')) + assert_equal(len(self.module._BLNK_.name), 12) + f("abcde", 0) + assert_equal(self.module._BLNK_.name[0], b"abcde") + f("12345", 5) + assert_equal(self.module._BLNK_.name[5], b"12345") + + def test_gh4519(self): + # Test array of assumed length strings + f = getattr(self.module, self.fprefix + '_gh4519') + + for x, expected in [ + ('a', {'shape': (), 'dtype': np.dtype('S1')}), + ('text', {'shape': (), 'dtype': np.dtype('S4')}), + (np.array(['1', '2', '3'], dtype='S1'), + {'shape': (3,), 'dtype': np.dtype('S1')}), + (['1', '2', '34'], + {'shape': (3,), 'dtype': np.dtype('S2')}), + (['', ''], {'shape': (2,), 'dtype': np.dtype('S1')})]: + r = f(x) + for k, v in expected.items(): + assert_equal(getattr(r, k), v) + + def test_gh3425(self): + # Test returning a copy of assumed length string + f = getattr(self.module, self.fprefix + '_gh3425') + # f is equivalent to bytes.upper + + assert_equal(f('abC'), b'ABC') + assert_equal(f(''), b'') + assert_equal(f('abC12d'), b'ABC12D') + + @pytest.mark.parametrize("state", ['new', 'old']) + def test_character_bc(self, state): + f = getattr(self.module, self.fprefix + '_character_bc_' + state) + + c, a = f() + assert_equal(c, b'a') + assert_equal(len(a), 1) + + c, a = f(b'b') + assert_equal(c, b'b') + assert_equal(len(a), 2) + + assert_raises(Exception, lambda: f(b'c')) + + +class TestStringScalarArr(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "scalar_string.f90")] + + def test_char(self): + for out in (self.module.string_test.string, + self.module.string_test.string77): + expected = () + assert out.shape == expected + expected = '|S8' + assert out.dtype == expected + + def test_char_arr(self): + for out in (self.module.string_test.strarr, + self.module.string_test.strarr77): + expected = (5, 7) + assert out.shape == expected + expected = '|S12' + assert out.dtype == expected + +class TestStringAssumedLength(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "gh24008.f")] + + def test_gh24008(self): + self.module.greet("joe", "bob") + +@pytest.mark.slow +class TestStringOptionalInOut(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "gh24662.f90")] + + def test_gh24662(self): + self.module.string_inout_optional() + a = np.array('hi', dtype='S32') + self.module.string_inout_optional(a) + assert "output string" in a.tobytes().decode() + with pytest.raises(Exception): # noqa: B017 + aa = "Hi" + self.module.string_inout_optional(aa) + + +@pytest.mark.slow +class TestNewCharHandling(util.F2PyTest): + # from v1.24 onwards, gh-19388 + sources = [ + util.getpath("tests", "src", "string", "gh25286.pyf"), + util.getpath("tests", "src", "string", "gh25286.f90") + ] + module_name = "_char_handling_test" + + def test_gh25286(self): + info = self.module.charint('T') + assert info == 2 + +@pytest.mark.slow +class TestBCCharHandling(util.F2PyTest): + # SciPy style, "incorrect" bindings with a hook + sources = [ + util.getpath("tests", "src", "string", "gh25286_bc.pyf"), + util.getpath("tests", "src", "string", "gh25286.f90") + ] + module_name = "_char_handling_test" + + def test_gh25286(self): + info = self.module.charint('T') + assert info == 2 diff --git a/python/numpy/f2py/tests/test_common.py b/python/numpy/f2py/tests/test_common.py new file mode 100644 index 000000000..b9fbd84d5 --- /dev/null +++ b/python/numpy/f2py/tests/test_common.py @@ -0,0 +1,23 @@ +import pytest + +import numpy as np + +from . import util + + +@pytest.mark.slow +class TestCommonBlock(util.F2PyTest): + sources = [util.getpath("tests", "src", "common", "block.f")] + + def test_common_block(self): + self.module.initcb() + assert self.module.block.long_bn == np.array(1.0, dtype=np.float64) + assert self.module.block.string_bn == np.array("2", dtype="|S1") + assert self.module.block.ok == np.array(3, dtype=np.int32) + + +class TestCommonWithUse(util.F2PyTest): + sources = [util.getpath("tests", "src", "common", "gh19161.f90")] + + def test_common_gh19161(self): + assert self.module.data.x == 0 diff --git a/python/numpy/f2py/tests/test_crackfortran.py b/python/numpy/f2py/tests/test_crackfortran.py new file mode 100644 index 000000000..c3967cfb9 --- /dev/null +++ b/python/numpy/f2py/tests/test_crackfortran.py @@ -0,0 +1,421 @@ +import contextlib +import importlib +import io +import textwrap +import time + +import pytest + +import numpy as np +from numpy.f2py import crackfortran +from numpy.f2py.crackfortran import markinnerspaces, nameargspattern + +from . import util + + +class TestNoSpace(util.F2PyTest): + # issue gh-15035: add handling for endsubroutine, endfunction with no space + # between "end" and the block name + sources = [util.getpath("tests", "src", "crackfortran", "gh15035.f")] + + def test_module(self): + k = np.array([1, 2, 3], dtype=np.float64) + w = np.array([1, 2, 3], dtype=np.float64) + self.module.subb(k) + assert np.allclose(k, w + 1) + self.module.subc([w, k]) + assert np.allclose(k, w + 1) + assert self.module.t0("23") == b"2" + + +class TestPublicPrivate: + def test_defaultPrivate(self): + fpath = util.getpath("tests", "src", "crackfortran", "privatemod.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + mod = mod[0] + assert "private" in mod["vars"]["a"]["attrspec"] + assert "public" not in mod["vars"]["a"]["attrspec"] + assert "private" in mod["vars"]["b"]["attrspec"] + assert "public" not in mod["vars"]["b"]["attrspec"] + assert "private" not in mod["vars"]["seta"]["attrspec"] + assert "public" in mod["vars"]["seta"]["attrspec"] + + def test_defaultPublic(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "publicmod.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + mod = mod[0] + assert "private" in mod["vars"]["a"]["attrspec"] + assert "public" not in mod["vars"]["a"]["attrspec"] + assert "private" not in mod["vars"]["seta"]["attrspec"] + assert "public" in mod["vars"]["seta"]["attrspec"] + + def test_access_type(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "accesstype.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + tt = mod[0]['vars'] + assert set(tt['a']['attrspec']) == {'private', 'bind(c)'} + assert set(tt['b_']['attrspec']) == {'public', 'bind(c)'} + assert set(tt['c']['attrspec']) == {'public'} + + def test_nowrap_private_proceedures(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "gh23879.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + pyf = crackfortran.crack2fortran(mod) + assert 'bar' not in pyf + +class TestModuleProcedure: + def test_moduleOperators(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "operators.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + mod = mod[0] + assert "body" in mod and len(mod["body"]) == 9 + assert mod["body"][1]["name"] == "operator(.item.)" + assert "implementedby" in mod["body"][1] + assert mod["body"][1]["implementedby"] == \ + ["item_int", "item_real"] + assert mod["body"][2]["name"] == "operator(==)" + assert "implementedby" in mod["body"][2] + assert mod["body"][2]["implementedby"] == ["items_are_equal"] + assert mod["body"][3]["name"] == "assignment(=)" + assert "implementedby" in mod["body"][3] + assert mod["body"][3]["implementedby"] == \ + ["get_int", "get_real"] + + def test_notPublicPrivate(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "pubprivmod.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + mod = mod[0] + assert mod['vars']['a']['attrspec'] == ['private', ] + assert mod['vars']['b']['attrspec'] == ['public', ] + assert mod['vars']['seta']['attrspec'] == ['public', ] + + +class TestExternal(util.F2PyTest): + # issue gh-17859: add external attribute support + sources = [util.getpath("tests", "src", "crackfortran", "gh17859.f")] + + def test_external_as_statement(self): + def incr(x): + return x + 123 + + r = self.module.external_as_statement(incr) + assert r == 123 + + def test_external_as_attribute(self): + def incr(x): + return x + 123 + + r = self.module.external_as_attribute(incr) + assert r == 123 + + +class TestCrackFortran(util.F2PyTest): + # gh-2848: commented lines between parameters in subroutine parameter lists + sources = [util.getpath("tests", "src", "crackfortran", "gh2848.f90"), + util.getpath("tests", "src", "crackfortran", "common_with_division.f") + ] + + def test_gh2848(self): + r = self.module.gh2848(1, 2) + assert r == (1, 2) + + def test_common_with_division(self): + assert len(self.module.mortmp.ctmp) == 11 + +class TestMarkinnerspaces: + # gh-14118: markinnerspaces does not handle multiple quotations + + def test_do_not_touch_normal_spaces(self): + test_list = ["a ", " a", "a b c", "'abcdefghij'"] + for i in test_list: + assert markinnerspaces(i) == i + + def test_one_relevant_space(self): + assert markinnerspaces("a 'b c' \\' \\'") == "a 'b@_@c' \\' \\'" + assert markinnerspaces(r'a "b c" \" \"') == r'a "b@_@c" \" \"' + + def test_ignore_inner_quotes(self): + assert markinnerspaces("a 'b c\" \" d' e") == "a 'b@_@c\"@_@\"@_@d' e" + assert markinnerspaces("a \"b c' ' d\" e") == "a \"b@_@c'@_@'@_@d\" e" + + def test_multiple_relevant_spaces(self): + assert markinnerspaces("a 'b c' 'd e'") == "a 'b@_@c' 'd@_@e'" + assert markinnerspaces(r'a "b c" "d e"') == r'a "b@_@c" "d@_@e"' + + +class TestDimSpec(util.F2PyTest): + """This test suite tests various expressions that are used as dimension + specifications. + + There exists two usage cases where analyzing dimensions + specifications are important. + + In the first case, the size of output arrays must be defined based + on the inputs to a Fortran function. Because Fortran supports + arbitrary bases for indexing, for instance, `arr(lower:upper)`, + f2py has to evaluate an expression `upper - lower + 1` where + `lower` and `upper` are arbitrary expressions of input parameters. + The evaluation is performed in C, so f2py has to translate Fortran + expressions to valid C expressions (an alternative approach is + that a developer specifies the corresponding C expressions in a + .pyf file). + + In the second case, when user provides an input array with a given + size but some hidden parameters used in dimensions specifications + need to be determined based on the input array size. This is a + harder problem because f2py has to solve the inverse problem: find + a parameter `p` such that `upper(p) - lower(p) + 1` equals to the + size of input array. In the case when this equation cannot be + solved (e.g. because the input array size is wrong), raise an + error before calling the Fortran function (that otherwise would + likely crash Python process when the size of input arrays is + wrong). f2py currently supports this case only when the equation + is linear with respect to unknown parameter. + + """ + + suffix = ".f90" + + code_template = textwrap.dedent(""" + function get_arr_size_{count}(a, n) result (length) + integer, intent(in) :: n + integer, dimension({dimspec}), intent(out) :: a + integer length + length = size(a) + end function + + subroutine get_inv_arr_size_{count}(a, n) + integer :: n + ! the value of n is computed in f2py wrapper + !f2py intent(out) n + integer, dimension({dimspec}), intent(in) :: a + if (a({first}).gt.0) then + ! print*, "a=", a + endif + end subroutine + """) + + linear_dimspecs = [ + "n", "2*n", "2:n", "n/2", "5 - n/2", "3*n:20", "n*(n+1):n*(n+5)", + "2*n, n" + ] + nonlinear_dimspecs = ["2*n:3*n*n+2*n"] + all_dimspecs = linear_dimspecs + nonlinear_dimspecs + + code = "" + for count, dimspec in enumerate(all_dimspecs): + lst = [(d.split(":")[0] if ":" in d else "1") for d in dimspec.split(',')] + code += code_template.format( + count=count, + dimspec=dimspec, + first=", ".join(lst), + ) + + @pytest.mark.parametrize("dimspec", all_dimspecs) + @pytest.mark.slow + def test_array_size(self, dimspec): + + count = self.all_dimspecs.index(dimspec) + get_arr_size = getattr(self.module, f"get_arr_size_{count}") + + for n in [1, 2, 3, 4, 5]: + sz, a = get_arr_size(n) + assert a.size == sz + + @pytest.mark.parametrize("dimspec", all_dimspecs) + def test_inv_array_size(self, dimspec): + + count = self.all_dimspecs.index(dimspec) + get_arr_size = getattr(self.module, f"get_arr_size_{count}") + get_inv_arr_size = getattr(self.module, f"get_inv_arr_size_{count}") + + for n in [1, 2, 3, 4, 5]: + sz, a = get_arr_size(n) + if dimspec in self.nonlinear_dimspecs: + # one must specify n as input, the call we'll ensure + # that a and n are compatible: + n1 = get_inv_arr_size(a, n) + else: + # in case of linear dependence, n can be determined + # from the shape of a: + n1 = get_inv_arr_size(a) + # n1 may be different from n (for instance, when `a` size + # is a function of some `n` fraction) but it must produce + # the same sized array + sz1, _ = get_arr_size(n1) + assert sz == sz1, (n, n1, sz, sz1) + + +class TestModuleDeclaration: + def test_dependencies(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "foo_deps.f90") + mod = crackfortran.crackfortran([str(fpath)]) + assert len(mod) == 1 + assert mod[0]["vars"]["abar"]["="] == "bar('abar')" + + +class TestEval(util.F2PyTest): + def test_eval_scalar(self): + eval_scalar = crackfortran._eval_scalar + + assert eval_scalar('123', {}) == '123' + assert eval_scalar('12 + 3', {}) == '15' + assert eval_scalar('a + b', {"a": 1, "b": 2}) == '3' + assert eval_scalar('"123"', {}) == "'123'" + + +class TestFortranReader(util.F2PyTest): + @pytest.mark.parametrize("encoding", + ['ascii', 'utf-8', 'utf-16', 'utf-32']) + def test_input_encoding(self, tmp_path, encoding): + # gh-635 + f_path = tmp_path / f"input_with_{encoding}_encoding.f90" + with f_path.open('w', encoding=encoding) as ff: + ff.write(""" + subroutine foo() + end subroutine foo + """) + mod = crackfortran.crackfortran([str(f_path)]) + assert mod[0]['name'] == 'foo' + + +@pytest.mark.slow +class TestUnicodeComment(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "unicode_comment.f90")] + + @pytest.mark.skipif( + (importlib.util.find_spec("charset_normalizer") is None), + reason="test requires charset_normalizer which is not installed", + ) + def test_encoding_comment(self): + self.module.foo(3) + + +class TestNameArgsPatternBacktracking: + @pytest.mark.parametrize( + ['adversary'], + [ + ('@)@bind@(@',), + ('@)@bind @(@',), + ('@)@bind foo bar baz@(@',) + ] + ) + def test_nameargspattern_backtracking(self, adversary): + '''address ReDOS vulnerability: + https://github.com/numpy/numpy/issues/23338''' + trials_per_batch = 12 + batches_per_regex = 4 + start_reps, end_reps = 15, 25 + for ii in range(start_reps, end_reps): + repeated_adversary = adversary * ii + # test times in small batches. + # this gives us more chances to catch a bad regex + # while still catching it before too long if it is bad + for _ in range(batches_per_regex): + times = [] + for _ in range(trials_per_batch): + t0 = time.perf_counter() + mtch = nameargspattern.search(repeated_adversary) + times.append(time.perf_counter() - t0) + # our pattern should be much faster than 0.2s per search + # it's unlikely that a bad regex will pass even on fast CPUs + assert np.median(times) < 0.2 + assert not mtch + # if the adversary is capped with @)@, it becomes acceptable + # according to the old version of the regex. + # that should still be true. + good_version_of_adversary = repeated_adversary + '@)@' + assert nameargspattern.search(good_version_of_adversary) + +class TestFunctionReturn(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "gh23598.f90")] + + @pytest.mark.slow + def test_function_rettype(self): + # gh-23598 + assert self.module.intproduct(3, 4) == 12 + + +class TestFortranGroupCounters(util.F2PyTest): + def test_end_if_comment(self): + # gh-23533 + fpath = util.getpath("tests", "src", "crackfortran", "gh23533.f") + try: + crackfortran.crackfortran([str(fpath)]) + except Exception as exc: + assert False, f"'crackfortran.crackfortran' raised an exception {exc}" + + +class TestF77CommonBlockReader: + def test_gh22648(self, tmp_path): + fpath = util.getpath("tests", "src", "crackfortran", "gh22648.pyf") + with contextlib.redirect_stdout(io.StringIO()) as stdout_f2py: + mod = crackfortran.crackfortran([str(fpath)]) + assert "Mismatch" not in stdout_f2py.getvalue() + +class TestParamEval: + # issue gh-11612, array parameter parsing + def test_param_eval_nested(self): + v = '(/3.14, 4./)' + g_params = {"kind": crackfortran._kind_func, + "selected_int_kind": crackfortran._selected_int_kind_func, + "selected_real_kind": crackfortran._selected_real_kind_func} + params = {'dp': 8, 'intparamarray': {1: 3, 2: 5}, + 'nested': {1: 1, 2: 2, 3: 3}} + dimspec = '(2)' + ret = crackfortran.param_eval(v, g_params, params, dimspec=dimspec) + assert ret == {1: 3.14, 2: 4.0} + + def test_param_eval_nonstandard_range(self): + v = '(/ 6, 3, 1 /)' + g_params = {"kind": crackfortran._kind_func, + "selected_int_kind": crackfortran._selected_int_kind_func, + "selected_real_kind": crackfortran._selected_real_kind_func} + params = {} + dimspec = '(-1:1)' + ret = crackfortran.param_eval(v, g_params, params, dimspec=dimspec) + assert ret == {-1: 6, 0: 3, 1: 1} + + def test_param_eval_empty_range(self): + v = '6' + g_params = {"kind": crackfortran._kind_func, + "selected_int_kind": crackfortran._selected_int_kind_func, + "selected_real_kind": crackfortran._selected_real_kind_func} + params = {} + dimspec = '' + pytest.raises(ValueError, crackfortran.param_eval, v, g_params, params, + dimspec=dimspec) + + def test_param_eval_non_array_param(self): + v = '3.14_dp' + g_params = {"kind": crackfortran._kind_func, + "selected_int_kind": crackfortran._selected_int_kind_func, + "selected_real_kind": crackfortran._selected_real_kind_func} + params = {} + ret = crackfortran.param_eval(v, g_params, params, dimspec=None) + assert ret == '3.14_dp' + + def test_param_eval_too_many_dims(self): + v = 'reshape((/ (i, i=1, 250) /), (/5, 10, 5/))' + g_params = {"kind": crackfortran._kind_func, + "selected_int_kind": crackfortran._selected_int_kind_func, + "selected_real_kind": crackfortran._selected_real_kind_func} + params = {} + dimspec = '(0:4, 3:12, 5)' + pytest.raises(ValueError, crackfortran.param_eval, v, g_params, params, + dimspec=dimspec) + +@pytest.mark.slow +class TestLowerF2PYDirective(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "gh27697.f90")] + options = ['--lower'] + + def test_no_lower_fail(self): + with pytest.raises(ValueError, match='aborting directly') as exc: + self.module.utils.my_abort('aborting directly') diff --git a/python/numpy/f2py/tests/test_data.py b/python/numpy/f2py/tests/test_data.py new file mode 100644 index 000000000..0cea5561b --- /dev/null +++ b/python/numpy/f2py/tests/test_data.py @@ -0,0 +1,71 @@ +import pytest + +import numpy as np +from numpy.f2py.crackfortran import crackfortran + +from . import util + + +class TestData(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "data_stmts.f90")] + + # For gh-23276 + @pytest.mark.slow + def test_data_stmts(self): + assert self.module.cmplxdat.i == 2 + assert self.module.cmplxdat.j == 3 + assert self.module.cmplxdat.x == 1.5 + assert self.module.cmplxdat.y == 2.0 + assert self.module.cmplxdat.pi == 3.1415926535897932384626433832795028841971693993751058209749445923078164062 + assert self.module.cmplxdat.medium_ref_index == np.array(1. + 0.j) + assert np.all(self.module.cmplxdat.z == np.array([3.5, 7.0])) + assert np.all(self.module.cmplxdat.my_array == np.array([ 1. + 2.j, -3. + 4.j])) + assert np.all(self.module.cmplxdat.my_real_array == np.array([ 1., 2., 3.])) + assert np.all(self.module.cmplxdat.ref_index_one == np.array([13.0 + 21.0j])) + assert np.all(self.module.cmplxdat.ref_index_two == np.array([-30.0 + 43.0j])) + + def test_crackedlines(self): + mod = crackfortran(self.sources) + assert mod[0]['vars']['x']['='] == '1.5' + assert mod[0]['vars']['y']['='] == '2.0' + assert mod[0]['vars']['pi']['='] == '3.1415926535897932384626433832795028841971693993751058209749445923078164062d0' + assert mod[0]['vars']['my_real_array']['='] == '(/1.0d0, 2.0d0, 3.0d0/)' + assert mod[0]['vars']['ref_index_one']['='] == '(13.0d0, 21.0d0)' + assert mod[0]['vars']['ref_index_two']['='] == '(-30.0d0, 43.0d0)' + assert mod[0]['vars']['my_array']['='] == '(/(1.0d0, 2.0d0), (-3.0d0, 4.0d0)/)' + assert mod[0]['vars']['z']['='] == '(/3.5, 7.0/)' + +class TestDataF77(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "data_common.f")] + + # For gh-23276 + def test_data_stmts(self): + assert self.module.mycom.mydata == 0 + + def test_crackedlines(self): + mod = crackfortran(str(self.sources[0])) + print(mod[0]['vars']) + assert mod[0]['vars']['mydata']['='] == '0' + + +class TestDataMultiplierF77(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "data_multiplier.f")] + + # For gh-23276 + def test_data_stmts(self): + assert self.module.mycom.ivar1 == 3 + assert self.module.mycom.ivar2 == 3 + assert self.module.mycom.ivar3 == 2 + assert self.module.mycom.ivar4 == 2 + assert self.module.mycom.evar5 == 0 + + +class TestDataWithCommentsF77(util.F2PyTest): + sources = [util.getpath("tests", "src", "crackfortran", "data_with_comments.f")] + + # For gh-23276 + def test_data_stmts(self): + assert len(self.module.mycom.mytab) == 3 + assert self.module.mycom.mytab[0] == 0 + assert self.module.mycom.mytab[1] == 4 + assert self.module.mycom.mytab[2] == 0 diff --git a/python/numpy/f2py/tests/test_docs.py b/python/numpy/f2py/tests/test_docs.py new file mode 100644 index 000000000..5d9aaac9f --- /dev/null +++ b/python/numpy/f2py/tests/test_docs.py @@ -0,0 +1,64 @@ +from pathlib import Path + +import pytest + +import numpy as np +from numpy.testing import assert_array_equal, assert_equal + +from . import util + + +def get_docdir(): + parents = Path(__file__).resolve().parents + try: + # Assumes that spin is used to run tests + nproot = parents[8] + except IndexError: + docdir = None + else: + docdir = nproot / "doc" / "source" / "f2py" / "code" + if docdir and docdir.is_dir(): + return docdir + # Assumes that an editable install is used to run tests + return parents[3] / "doc" / "source" / "f2py" / "code" + + +pytestmark = pytest.mark.skipif( + not get_docdir().is_dir(), + reason=f"Could not find f2py documentation sources" + f"({get_docdir()} does not exist)", +) + +def _path(*args): + return get_docdir().joinpath(*args) + +@pytest.mark.slow +class TestDocAdvanced(util.F2PyTest): + # options = ['--debug-capi', '--build-dir', '/tmp/build-f2py'] + sources = [_path('asterisk1.f90'), _path('asterisk2.f90'), + _path('ftype.f')] + + def test_asterisk1(self): + foo = self.module.foo1 + assert_equal(foo(), b'123456789A12') + + def test_asterisk2(self): + foo = self.module.foo2 + assert_equal(foo(2), b'12') + assert_equal(foo(12), b'123456789A12') + assert_equal(foo(20), b'123456789A123456789B') + + def test_ftype(self): + ftype = self.module + ftype.foo() + assert_equal(ftype.data.a, 0) + ftype.data.a = 3 + ftype.data.x = [1, 2, 3] + assert_equal(ftype.data.a, 3) + assert_array_equal(ftype.data.x, + np.array([1, 2, 3], dtype=np.float32)) + ftype.data.x[1] = 45 + assert_array_equal(ftype.data.x, + np.array([1, 45, 3], dtype=np.float32)) + + # TODO: implement test methods for other example Fortran codes diff --git a/python/numpy/f2py/tests/test_f2cmap.py b/python/numpy/f2py/tests/test_f2cmap.py new file mode 100644 index 000000000..a35320ccc --- /dev/null +++ b/python/numpy/f2py/tests/test_f2cmap.py @@ -0,0 +1,17 @@ +import numpy as np + +from . import util + + +class TestF2Cmap(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "f2cmap", "isoFortranEnvMap.f90"), + util.getpath("tests", "src", "f2cmap", ".f2py_f2cmap") + ] + + # gh-15095 + def test_gh15095(self): + inp = np.ones(3) + out = self.module.func1(inp) + exp_out = 3 + assert out == exp_out diff --git a/python/numpy/f2py/tests/test_f2py2e.py b/python/numpy/f2py/tests/test_f2py2e.py new file mode 100644 index 000000000..2f91eb77c --- /dev/null +++ b/python/numpy/f2py/tests/test_f2py2e.py @@ -0,0 +1,964 @@ +import platform +import re +import shlex +import subprocess +import sys +import textwrap +from collections import namedtuple +from pathlib import Path + +import pytest + +from numpy.f2py.f2py2e import main as f2pycli +from numpy.testing._private.utils import NOGIL_BUILD + +from . import util + +####################### +# F2PY Test utilities # +###################### + +# Tests for CLI commands which call meson will fail if no compilers are present, these are to be skipped + +def compiler_check_f2pycli(): + if not util.has_fortran_compiler(): + pytest.skip("CLI command needs a Fortran compiler") + else: + f2pycli() + +######################### +# CLI utils and classes # +######################### + + +PPaths = namedtuple("PPaths", "finp, f90inp, pyf, wrap77, wrap90, cmodf") + + +def get_io_paths(fname_inp, mname="untitled"): + """Takes in a temporary file for testing and returns the expected output and input paths + + Here expected output is essentially one of any of the possible generated + files. + + ..note:: + + Since this does not actually run f2py, none of these are guaranteed to + exist, and module names are typically incorrect + + Parameters + ---------- + fname_inp : str + The input filename + mname : str, optional + The name of the module, untitled by default + + Returns + ------- + genp : NamedTuple PPaths + The possible paths which are generated, not all of which exist + """ + bpath = Path(fname_inp) + return PPaths( + finp=bpath.with_suffix(".f"), + f90inp=bpath.with_suffix(".f90"), + pyf=bpath.with_suffix(".pyf"), + wrap77=bpath.with_name(f"{mname}-f2pywrappers.f"), + wrap90=bpath.with_name(f"{mname}-f2pywrappers2.f90"), + cmodf=bpath.with_name(f"{mname}module.c"), + ) + + +################ +# CLI Fixtures # +################ + + +@pytest.fixture(scope="session") +def hello_world_f90(tmpdir_factory): + """Generates a single f90 file for testing""" + fdat = util.getpath("tests", "src", "cli", "hiworld.f90").read_text() + fn = tmpdir_factory.getbasetemp() / "hello.f90" + fn.write_text(fdat, encoding="ascii") + return fn + + +@pytest.fixture(scope="session") +def gh23598_warn(tmpdir_factory): + """F90 file for testing warnings in gh23598""" + fdat = util.getpath("tests", "src", "crackfortran", "gh23598Warn.f90").read_text() + fn = tmpdir_factory.getbasetemp() / "gh23598Warn.f90" + fn.write_text(fdat, encoding="ascii") + return fn + + +@pytest.fixture(scope="session") +def gh22819_cli(tmpdir_factory): + """F90 file for testing disallowed CLI arguments in ghff819""" + fdat = util.getpath("tests", "src", "cli", "gh_22819.pyf").read_text() + fn = tmpdir_factory.getbasetemp() / "gh_22819.pyf" + fn.write_text(fdat, encoding="ascii") + return fn + + +@pytest.fixture(scope="session") +def hello_world_f77(tmpdir_factory): + """Generates a single f77 file for testing""" + fdat = util.getpath("tests", "src", "cli", "hi77.f").read_text() + fn = tmpdir_factory.getbasetemp() / "hello.f" + fn.write_text(fdat, encoding="ascii") + return fn + + +@pytest.fixture(scope="session") +def retreal_f77(tmpdir_factory): + """Generates a single f77 file for testing""" + fdat = util.getpath("tests", "src", "return_real", "foo77.f").read_text() + fn = tmpdir_factory.getbasetemp() / "foo.f" + fn.write_text(fdat, encoding="ascii") + return fn + +@pytest.fixture(scope="session") +def f2cmap_f90(tmpdir_factory): + """Generates a single f90 file for testing""" + fdat = util.getpath("tests", "src", "f2cmap", "isoFortranEnvMap.f90").read_text() + f2cmap = util.getpath("tests", "src", "f2cmap", ".f2py_f2cmap").read_text() + fn = tmpdir_factory.getbasetemp() / "f2cmap.f90" + fmap = tmpdir_factory.getbasetemp() / "mapfile" + fn.write_text(fdat, encoding="ascii") + fmap.write_text(f2cmap, encoding="ascii") + return fn + +######### +# Tests # +######### + +def test_gh22819_cli(capfd, gh22819_cli, monkeypatch): + """Check that module names are handled correctly + gh-22819 + Essentially, the -m name cannot be used to import the module, so the module + named in the .pyf needs to be used instead + + CLI :: -m and a .pyf file + """ + ipath = Path(gh22819_cli) + monkeypatch.setattr(sys, "argv", f"f2py -m blah {ipath}".split()) + with util.switchdir(ipath.parent): + f2pycli() + gen_paths = [item.name for item in ipath.parent.rglob("*") if item.is_file()] + assert "blahmodule.c" not in gen_paths # shouldn't be generated + assert "blah-f2pywrappers.f" not in gen_paths + assert "test_22819-f2pywrappers.f" in gen_paths + assert "test_22819module.c" in gen_paths + + +def test_gh22819_many_pyf(capfd, gh22819_cli, monkeypatch): + """Only one .pyf file allowed + gh-22819 + CLI :: .pyf files + """ + ipath = Path(gh22819_cli) + monkeypatch.setattr(sys, "argv", f"f2py -m blah {ipath} hello.pyf".split()) + with util.switchdir(ipath.parent): + with pytest.raises(ValueError, match="Only one .pyf file per call"): + f2pycli() + + +def test_gh23598_warn(capfd, gh23598_warn, monkeypatch): + foutl = get_io_paths(gh23598_warn, mname="test") + ipath = foutl.f90inp + monkeypatch.setattr( + sys, "argv", + f'f2py {ipath} -m test'.split()) + + with util.switchdir(ipath.parent): + f2pycli() # Generate files + wrapper = foutl.wrap90.read_text() + assert "intproductf2pywrap, intpr" not in wrapper + + +def test_gen_pyf(capfd, hello_world_f90, monkeypatch): + """Ensures that a signature file is generated via the CLI + CLI :: -h + """ + ipath = Path(hello_world_f90) + opath = Path(hello_world_f90).stem + ".pyf" + monkeypatch.setattr(sys, "argv", f'f2py -h {opath} {ipath}'.split()) + + with util.switchdir(ipath.parent): + f2pycli() # Generate wrappers + out, _ = capfd.readouterr() + assert "Saving signatures to file" in out + assert Path(f'{opath}').exists() + + +def test_gen_pyf_stdout(capfd, hello_world_f90, monkeypatch): + """Ensures that a signature file can be dumped to stdout + CLI :: -h + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -h stdout {ipath}'.split()) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Saving signatures to file" in out + assert "function hi() ! in " in out + + +def test_gen_pyf_no_overwrite(capfd, hello_world_f90, monkeypatch): + """Ensures that the CLI refuses to overwrite signature files + CLI :: -h without --overwrite-signature + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -h faker.pyf {ipath}'.split()) + + with util.switchdir(ipath.parent): + Path("faker.pyf").write_text("Fake news", encoding="ascii") + with pytest.raises(SystemExit): + f2pycli() # Refuse to overwrite + _, err = capfd.readouterr() + assert "Use --overwrite-signature to overwrite" in err + + +@pytest.mark.skipif(sys.version_info <= (3, 12), reason="Python 3.12 required") +def test_untitled_cli(capfd, hello_world_f90, monkeypatch): + """Check that modules are named correctly + + CLI :: defaults + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f"f2py --backend meson -c {ipath}".split()) + with util.switchdir(ipath.parent): + compiler_check_f2pycli() + out, _ = capfd.readouterr() + assert "untitledmodule.c" in out + + +@pytest.mark.skipif((platform.system() != 'Linux') or (sys.version_info <= (3, 12)), reason='Compiler and 3.12 required') +def test_no_py312_distutils_fcompiler(capfd, hello_world_f90, monkeypatch): + """Check that no distutils imports are performed on 3.12 + CLI :: --fcompiler --help-link --backend distutils + """ + MNAME = "hi" + foutl = get_io_paths(hello_world_f90, mname=MNAME) + ipath = foutl.f90inp + monkeypatch.setattr( + sys, "argv", f"f2py {ipath} -c --fcompiler=gfortran -m {MNAME}".split() + ) + with util.switchdir(ipath.parent): + compiler_check_f2pycli() + out, _ = capfd.readouterr() + assert "--fcompiler cannot be used with meson" in out + monkeypatch.setattr( + sys, "argv", ["f2py", "--help-link"] + ) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Use --dep for meson builds" in out + MNAME = "hi2" # Needs to be different for a new -c + monkeypatch.setattr( + sys, "argv", f"f2py {ipath} -c -m {MNAME} --backend distutils".split() + ) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Cannot use distutils backend with Python>=3.12" in out + + +@pytest.mark.xfail +def test_f2py_skip(capfd, retreal_f77, monkeypatch): + """Tests that functions can be skipped + CLI :: skip: + """ + foutl = get_io_paths(retreal_f77, mname="test") + ipath = foutl.finp + toskip = "t0 t4 t8 sd s8 s4" + remaining = "td s0" + monkeypatch.setattr( + sys, "argv", + f'f2py {ipath} -m test skip: {toskip}'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, err = capfd.readouterr() + for skey in toskip.split(): + assert ( + f'buildmodule: Could not found the body of interfaced routine "{skey}". Skipping.' + in err) + for rkey in remaining.split(): + assert f'Constructing wrapper function "{rkey}"' in out + + +def test_f2py_only(capfd, retreal_f77, monkeypatch): + """Test that functions can be kept by only: + CLI :: only: + """ + foutl = get_io_paths(retreal_f77, mname="test") + ipath = foutl.finp + toskip = "t0 t4 t8 sd s8 s4" + tokeep = "td s0" + monkeypatch.setattr( + sys, "argv", + f'f2py {ipath} -m test only: {tokeep}'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, err = capfd.readouterr() + for skey in toskip.split(): + assert ( + f'buildmodule: Could not find the body of interfaced routine "{skey}". Skipping.' + in err) + for rkey in tokeep.split(): + assert f'Constructing wrapper function "{rkey}"' in out + + +def test_file_processing_switch(capfd, hello_world_f90, retreal_f77, + monkeypatch): + """Tests that it is possible to return to file processing mode + CLI :: : + BUG: numpy-gh #20520 + """ + foutl = get_io_paths(retreal_f77, mname="test") + ipath = foutl.finp + toskip = "t0 t4 t8 sd s8 s4" + ipath2 = Path(hello_world_f90) + tokeep = "td s0 hi" # hi is in ipath2 + mname = "blah" + monkeypatch.setattr( + sys, + "argv", + f'f2py {ipath} -m {mname} only: {tokeep} : {ipath2}'.split( + ), + ) + + with util.switchdir(ipath.parent): + f2pycli() + out, err = capfd.readouterr() + for skey in toskip.split(): + assert ( + f'buildmodule: Could not find the body of interfaced routine "{skey}". Skipping.' + in err) + for rkey in tokeep.split(): + assert f'Constructing wrapper function "{rkey}"' in out + + +def test_mod_gen_f77(capfd, hello_world_f90, monkeypatch): + """Checks the generation of files based on a module name + CLI :: -m + """ + MNAME = "hi" + foutl = get_io_paths(hello_world_f90, mname=MNAME) + ipath = foutl.f90inp + monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m {MNAME}'.split()) + with util.switchdir(ipath.parent): + f2pycli() + + # Always generate C module + assert Path.exists(foutl.cmodf) + # File contains a function, check for F77 wrappers + assert Path.exists(foutl.wrap77) + + +def test_mod_gen_gh25263(capfd, hello_world_f77, monkeypatch): + """Check that pyf files are correctly generated with module structure + CLI :: -m -h pyf_file + BUG: numpy-gh #20520 + """ + MNAME = "hi" + foutl = get_io_paths(hello_world_f77, mname=MNAME) + ipath = foutl.finp + monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m {MNAME} -h hi.pyf'.split()) + with util.switchdir(ipath.parent): + f2pycli() + with Path('hi.pyf').open() as hipyf: + pyfdat = hipyf.read() + assert "python module hi" in pyfdat + + +def test_lower_cmod(capfd, hello_world_f77, monkeypatch): + """Lowers cases by flag or when -h is present + + CLI :: --[no-]lower + """ + foutl = get_io_paths(hello_world_f77, mname="test") + ipath = foutl.finp + capshi = re.compile(r"HI\(\)") + capslo = re.compile(r"hi\(\)") + # Case I: --lower is passed + monkeypatch.setattr(sys, "argv", f'f2py {ipath} -m test --lower'.split()) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert capslo.search(out) is not None + assert capshi.search(out) is None + # Case II: --no-lower is passed + monkeypatch.setattr(sys, "argv", + f'f2py {ipath} -m test --no-lower'.split()) + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert capslo.search(out) is None + assert capshi.search(out) is not None + + +def test_lower_sig(capfd, hello_world_f77, monkeypatch): + """Lowers cases in signature files by flag or when -h is present + + CLI :: --[no-]lower -h + """ + foutl = get_io_paths(hello_world_f77, mname="test") + ipath = foutl.finp + # Signature files + capshi = re.compile(r"Block: HI") + capslo = re.compile(r"Block: hi") + # Case I: --lower is implied by -h + # TODO: Clean up to prevent passing --overwrite-signature + monkeypatch.setattr( + sys, + "argv", + f'f2py {ipath} -h {foutl.pyf} -m test --overwrite-signature'.split(), + ) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert capslo.search(out) is not None + assert capshi.search(out) is None + + # Case II: --no-lower overrides -h + monkeypatch.setattr( + sys, + "argv", + f'f2py {ipath} -h {foutl.pyf} -m test --overwrite-signature --no-lower' + .split(), + ) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert capslo.search(out) is None + assert capshi.search(out) is not None + + +def test_build_dir(capfd, hello_world_f90, monkeypatch): + """Ensures that the build directory can be specified + + CLI :: --build-dir + """ + ipath = Path(hello_world_f90) + mname = "blah" + odir = "tttmp" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --build-dir {odir}'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert f"Wrote C/API module \"{mname}\"" in out + + +def test_overwrite(capfd, hello_world_f90, monkeypatch): + """Ensures that the build directory can be specified + + CLI :: --overwrite-signature + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr( + sys, "argv", + f'f2py -h faker.pyf {ipath} --overwrite-signature'.split()) + + with util.switchdir(ipath.parent): + Path("faker.pyf").write_text("Fake news", encoding="ascii") + f2pycli() + out, _ = capfd.readouterr() + assert "Saving signatures to file" in out + + +def test_latexdoc(capfd, hello_world_f90, monkeypatch): + """Ensures that TeX documentation is written out + + CLI :: --latex-doc + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --latex-doc'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Documentation is saved to file" in out + with Path(f"{mname}module.tex").open() as otex: + assert "\\documentclass" in otex.read() + + +def test_nolatexdoc(capfd, hello_world_f90, monkeypatch): + """Ensures that TeX documentation is written out + + CLI :: --no-latex-doc + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --no-latex-doc'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Documentation is saved to file" not in out + + +def test_shortlatex(capfd, hello_world_f90, monkeypatch): + """Ensures that truncated documentation is written out + + TODO: Test to ensure this has no effect without --latex-doc + CLI :: --latex-doc --short-latex + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr( + sys, + "argv", + f'f2py -m {mname} {ipath} --latex-doc --short-latex'.split(), + ) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Documentation is saved to file" in out + with Path(f"./{mname}module.tex").open() as otex: + assert "\\documentclass" not in otex.read() + + +def test_restdoc(capfd, hello_world_f90, monkeypatch): + """Ensures that RsT documentation is written out + + CLI :: --rest-doc + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --rest-doc'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "ReST Documentation is saved to file" in out + with Path(f"./{mname}module.rest").open() as orst: + assert r".. -*- rest -*-" in orst.read() + + +def test_norestexdoc(capfd, hello_world_f90, monkeypatch): + """Ensures that TeX documentation is written out + + CLI :: --no-rest-doc + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --no-rest-doc'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "ReST Documentation is saved to file" not in out + + +def test_debugcapi(capfd, hello_world_f90, monkeypatch): + """Ensures that debugging wrappers are written + + CLI :: --debug-capi + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --debug-capi'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + with Path(f"./{mname}module.c").open() as ocmod: + assert r"#define DEBUGCFUNCS" in ocmod.read() + + +@pytest.mark.skip(reason="Consistently fails on CI; noisy so skip not xfail.") +def test_debugcapi_bld(hello_world_f90, monkeypatch): + """Ensures that debugging wrappers work + + CLI :: --debug-capi -c + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} -c --debug-capi'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + cmd_run = shlex.split(f"{sys.executable} -c \"import blah; blah.hi()\"") + rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') + eout = ' Hello World\n' + eerr = textwrap.dedent("""\ +debug-capi:Python C/API function blah.hi() +debug-capi:float hi=:output,hidden,scalar +debug-capi:hi=0 +debug-capi:Fortran subroutine `f2pywraphi(&hi)' +debug-capi:hi=0 +debug-capi:Building return value. +debug-capi:Python C/API function blah.hi: successful. +debug-capi:Freeing memory. + """) + assert rout.stdout == eout + assert rout.stderr == eerr + + +def test_wrapfunc_def(capfd, hello_world_f90, monkeypatch): + """Ensures that fortran subroutine wrappers for F77 are included by default + + CLI :: --[no]-wrap-functions + """ + # Implied + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", f'f2py -m {mname} {ipath}'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert r"Fortran 77 wrappers are saved to" in out + + # Explicit + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --wrap-functions'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert r"Fortran 77 wrappers are saved to" in out + + +def test_nowrapfunc(capfd, hello_world_f90, monkeypatch): + """Ensures that fortran subroutine wrappers for F77 can be disabled + + CLI :: --no-wrap-functions + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr(sys, "argv", + f'f2py -m {mname} {ipath} --no-wrap-functions'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert r"Fortran 77 wrappers are saved to" not in out + + +def test_inclheader(capfd, hello_world_f90, monkeypatch): + """Add to the include directories + + CLI :: -include + TODO: Document this in the help string + """ + ipath = Path(hello_world_f90) + mname = "blah" + monkeypatch.setattr( + sys, + "argv", + f'f2py -m {mname} {ipath} -include -include '. + split(), + ) + + with util.switchdir(ipath.parent): + f2pycli() + with Path(f"./{mname}module.c").open() as ocmod: + ocmr = ocmod.read() + assert "#include " in ocmr + assert "#include " in ocmr + + +def test_inclpath(): + """Add to the include directories + + CLI :: --include-paths + """ + # TODO: populate + pass + + +def test_hlink(): + """Add to the include directories + + CLI :: --help-link + """ + # TODO: populate + pass + + +def test_f2cmap(capfd, f2cmap_f90, monkeypatch): + """Check that Fortran-to-Python KIND specs can be passed + + CLI :: --f2cmap + """ + ipath = Path(f2cmap_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} --f2cmap mapfile'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "Reading f2cmap from 'mapfile' ..." in out + assert "Mapping \"real(kind=real32)\" to \"float\"" in out + assert "Mapping \"real(kind=real64)\" to \"double\"" in out + assert "Mapping \"integer(kind=int64)\" to \"long_long\"" in out + assert "Successfully applied user defined f2cmap changes" in out + + +def test_quiet(capfd, hello_world_f90, monkeypatch): + """Reduce verbosity + + CLI :: --quiet + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} --quiet'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert len(out) == 0 + + +def test_verbose(capfd, hello_world_f90, monkeypatch): + """Increase verbosity + + CLI :: --verbose + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} --verbose'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + out, _ = capfd.readouterr() + assert "analyzeline" in out + + +def test_version(capfd, monkeypatch): + """Ensure version + + CLI :: -v + """ + monkeypatch.setattr(sys, "argv", ["f2py", "-v"]) + # TODO: f2py2e should not call sys.exit() after printing the version + with pytest.raises(SystemExit): + f2pycli() + out, _ = capfd.readouterr() + import numpy as np + assert np.__version__ == out.strip() + + +@pytest.mark.skip(reason="Consistently fails on CI; noisy so skip not xfail.") +def test_npdistop(hello_world_f90, monkeypatch): + """ + CLI :: -c + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} -c'.split()) + + with util.switchdir(ipath.parent): + f2pycli() + cmd_run = shlex.split(f"{sys.executable} -c \"import blah; blah.hi()\"") + rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') + eout = ' Hello World\n' + assert rout.stdout == eout + + +@pytest.mark.skipif((platform.system() != 'Linux') or sys.version_info <= (3, 12), + reason='Compiler and Python 3.12 or newer required') +def test_no_freethreading_compatible(hello_world_f90, monkeypatch): + """ + CLI :: --no-freethreading-compatible + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} -c --no-freethreading-compatible'.split()) + + with util.switchdir(ipath.parent): + compiler_check_f2pycli() + cmd = f"{sys.executable} -c \"import blah; blah.hi();" + if NOGIL_BUILD: + cmd += "import sys; assert sys._is_gil_enabled() is True\"" + else: + cmd += "\"" + cmd_run = shlex.split(cmd) + rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') + eout = ' Hello World\n' + assert rout.stdout == eout + if NOGIL_BUILD: + assert "The global interpreter lock (GIL) has been enabled to load module 'blah'" in rout.stderr + assert rout.returncode == 0 + + +@pytest.mark.skipif((platform.system() != 'Linux') or sys.version_info <= (3, 12), + reason='Compiler and Python 3.12 or newer required') +def test_freethreading_compatible(hello_world_f90, monkeypatch): + """ + CLI :: --freethreading_compatible + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} -c --freethreading-compatible'.split()) + + with util.switchdir(ipath.parent): + compiler_check_f2pycli() + cmd = f"{sys.executable} -c \"import blah; blah.hi();" + if NOGIL_BUILD: + cmd += "import sys; assert sys._is_gil_enabled() is False\"" + else: + cmd += "\"" + cmd_run = shlex.split(cmd) + rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') + eout = ' Hello World\n' + assert rout.stdout == eout + assert rout.stderr == "" + assert rout.returncode == 0 + + +# Numpy distutils flags +# TODO: These should be tested separately + +def test_npd_fcompiler(): + """ + CLI :: -c --fcompiler + """ + # TODO: populate + pass + + +def test_npd_compiler(): + """ + CLI :: -c --compiler + """ + # TODO: populate + pass + + +def test_npd_help_fcompiler(): + """ + CLI :: -c --help-fcompiler + """ + # TODO: populate + pass + + +def test_npd_f77exec(): + """ + CLI :: -c --f77exec + """ + # TODO: populate + pass + + +def test_npd_f90exec(): + """ + CLI :: -c --f90exec + """ + # TODO: populate + pass + + +def test_npd_f77flags(): + """ + CLI :: -c --f77flags + """ + # TODO: populate + pass + + +def test_npd_f90flags(): + """ + CLI :: -c --f90flags + """ + # TODO: populate + pass + + +def test_npd_opt(): + """ + CLI :: -c --opt + """ + # TODO: populate + pass + + +def test_npd_arch(): + """ + CLI :: -c --arch + """ + # TODO: populate + pass + + +def test_npd_noopt(): + """ + CLI :: -c --noopt + """ + # TODO: populate + pass + + +def test_npd_noarch(): + """ + CLI :: -c --noarch + """ + # TODO: populate + pass + + +def test_npd_debug(): + """ + CLI :: -c --debug + """ + # TODO: populate + pass + + +def test_npd_link_auto(): + """ + CLI :: -c --link- + """ + # TODO: populate + pass + + +def test_npd_lib(): + """ + CLI :: -c -L/path/to/lib/ -l + """ + # TODO: populate + pass + + +def test_npd_define(): + """ + CLI :: -D + """ + # TODO: populate + pass + + +def test_npd_undefine(): + """ + CLI :: -U + """ + # TODO: populate + pass + + +def test_npd_incl(): + """ + CLI :: -I/path/to/include/ + """ + # TODO: populate + pass + + +def test_npd_linker(): + """ + CLI :: .o .so .a + """ + # TODO: populate + pass diff --git a/python/numpy/f2py/tests/test_isoc.py b/python/numpy/f2py/tests/test_isoc.py new file mode 100644 index 000000000..f3450f15f --- /dev/null +++ b/python/numpy/f2py/tests/test_isoc.py @@ -0,0 +1,56 @@ +import pytest + +import numpy as np +from numpy.testing import assert_allclose + +from . import util + + +class TestISOC(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "isocintrin", "isoCtests.f90"), + ] + + # gh-24553 + @pytest.mark.slow + def test_c_double(self): + out = self.module.coddity.c_add(1, 2) + exp_out = 3 + assert out == exp_out + + # gh-9693 + def test_bindc_function(self): + out = self.module.coddity.wat(1, 20) + exp_out = 8 + assert out == exp_out + + # gh-25207 + def test_bindc_kinds(self): + out = self.module.coddity.c_add_int64(1, 20) + exp_out = 21 + assert out == exp_out + + # gh-25207 + def test_bindc_add_arr(self): + a = np.array([1, 2, 3]) + b = np.array([1, 2, 3]) + out = self.module.coddity.add_arr(a, b) + exp_out = a * 2 + assert_allclose(out, exp_out) + + +def test_process_f2cmap_dict(): + from numpy.f2py.auxfuncs import process_f2cmap_dict + + f2cmap_all = {"integer": {"8": "rubbish_type"}} + new_map = {"INTEGER": {"4": "int"}} + c2py_map = {"int": "int", "rubbish_type": "long"} + + exp_map, exp_maptyp = ({"integer": {"8": "rubbish_type", "4": "int"}}, ["int"]) + + # Call the function + res_map, res_maptyp = process_f2cmap_dict(f2cmap_all, new_map, c2py_map) + + # Assert the result is as expected + assert res_map == exp_map + assert res_maptyp == exp_maptyp diff --git a/python/numpy/f2py/tests/test_kind.py b/python/numpy/f2py/tests/test_kind.py new file mode 100644 index 000000000..ce223a555 --- /dev/null +++ b/python/numpy/f2py/tests/test_kind.py @@ -0,0 +1,53 @@ +import platform +import sys + +import pytest + +from numpy.f2py.crackfortran import ( + _selected_int_kind_func as selected_int_kind, +) +from numpy.f2py.crackfortran import ( + _selected_real_kind_func as selected_real_kind, +) + +from . import util + + +class TestKind(util.F2PyTest): + sources = [util.getpath("tests", "src", "kind", "foo.f90")] + + @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, + reason="Fails for 32 bit machines") + def test_int(self): + """Test `int` kind_func for integers up to 10**40.""" + selectedintkind = self.module.selectedintkind + + for i in range(40): + assert selectedintkind(i) == selected_int_kind( + i + ), f"selectedintkind({i}): expected {selected_int_kind(i)!r} but got {selectedintkind(i)!r}" + + def test_real(self): + """ + Test (processor-dependent) `real` kind_func for real numbers + of up to 31 digits precision (extended/quadruple). + """ + selectedrealkind = self.module.selectedrealkind + + for i in range(32): + assert selectedrealkind(i) == selected_real_kind( + i + ), f"selectedrealkind({i}): expected {selected_real_kind(i)!r} but got {selectedrealkind(i)!r}" + + @pytest.mark.xfail(platform.machine().lower().startswith("ppc"), + reason="Some PowerPC may not support full IEEE 754 precision") + def test_quad_precision(self): + """ + Test kind_func for quadruple precision [`real(16)`] of 32+ digits . + """ + selectedrealkind = self.module.selectedrealkind + + for i in range(32, 40): + assert selectedrealkind(i) == selected_real_kind( + i + ), f"selectedrealkind({i}): expected {selected_real_kind(i)!r} but got {selectedrealkind(i)!r}" diff --git a/python/numpy/f2py/tests/test_mixed.py b/python/numpy/f2py/tests/test_mixed.py new file mode 100644 index 000000000..07f43e2bc --- /dev/null +++ b/python/numpy/f2py/tests/test_mixed.py @@ -0,0 +1,35 @@ +import textwrap + +import pytest + +from numpy.testing import IS_PYPY + +from . import util + + +class TestMixed(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "mixed", "foo.f"), + util.getpath("tests", "src", "mixed", "foo_fixed.f90"), + util.getpath("tests", "src", "mixed", "foo_free.f90"), + ] + + @pytest.mark.slow + def test_all(self): + assert self.module.bar11() == 11 + assert self.module.foo_fixed.bar12() == 12 + assert self.module.foo_free.bar13() == 13 + + @pytest.mark.xfail(IS_PYPY, + reason="PyPy cannot modify tp_doc after PyType_Ready") + def test_docstring(self): + expected = textwrap.dedent("""\ + a = bar11() + + Wrapper for ``bar11``. + + Returns + ------- + a : int + """) + assert self.module.bar11.__doc__ == expected diff --git a/python/numpy/f2py/tests/test_modules.py b/python/numpy/f2py/tests/test_modules.py new file mode 100644 index 000000000..96d5ffc66 --- /dev/null +++ b/python/numpy/f2py/tests/test_modules.py @@ -0,0 +1,83 @@ +import textwrap + +import pytest + +from numpy.testing import IS_PYPY + +from . import util + + +@pytest.mark.slow +class TestModuleFilterPublicEntities(util.F2PyTest): + sources = [ + util.getpath( + "tests", "src", "modules", "gh26920", + "two_mods_with_one_public_routine.f90" + ) + ] + # we filter the only public function mod2 + only = ["mod1_func1", ] + + def test_gh26920(self): + # if it compiles and can be loaded, things are fine + pass + + +@pytest.mark.slow +class TestModuleWithoutPublicEntities(util.F2PyTest): + sources = [ + util.getpath( + "tests", "src", "modules", "gh26920", + "two_mods_with_no_public_entities.f90" + ) + ] + only = ["mod1_func1", ] + + def test_gh26920(self): + # if it compiles and can be loaded, things are fine + pass + + +@pytest.mark.slow +class TestModuleDocString(util.F2PyTest): + sources = [util.getpath("tests", "src", "modules", "module_data_docstring.f90")] + + @pytest.mark.xfail(IS_PYPY, reason="PyPy cannot modify tp_doc after PyType_Ready") + def test_module_docstring(self): + assert self.module.mod.__doc__ == textwrap.dedent( + """\ + i : 'i'-scalar + x : 'i'-array(4) + a : 'f'-array(2,3) + b : 'f'-array(-1,-1), not allocated\x00 + foo()\n + Wrapper for ``foo``.\n\n""" + ) + + +@pytest.mark.slow +class TestModuleAndSubroutine(util.F2PyTest): + module_name = "example" + sources = [ + util.getpath("tests", "src", "modules", "gh25337", "data.f90"), + util.getpath("tests", "src", "modules", "gh25337", "use_data.f90"), + ] + + def test_gh25337(self): + self.module.data.set_shift(3) + assert "data" in dir(self.module) + + +@pytest.mark.slow +class TestUsedModule(util.F2PyTest): + module_name = "fmath" + sources = [ + util.getpath("tests", "src", "modules", "use_modules.f90"), + ] + + def test_gh25867(self): + compiled_mods = [x for x in dir(self.module) if "__" not in x] + assert "useops" in compiled_mods + assert self.module.useops.sum_and_double(3, 7) == 20 + assert "mathops" in compiled_mods + assert self.module.mathops.add(3, 7) == 10 diff --git a/python/numpy/f2py/tests/test_parameter.py b/python/numpy/f2py/tests/test_parameter.py new file mode 100644 index 000000000..513d02100 --- /dev/null +++ b/python/numpy/f2py/tests/test_parameter.py @@ -0,0 +1,129 @@ +import pytest + +import numpy as np + +from . import util + + +class TestParameters(util.F2PyTest): + # Check that intent(in out) translates as intent(inout) + sources = [ + util.getpath("tests", "src", "parameter", "constant_real.f90"), + util.getpath("tests", "src", "parameter", "constant_integer.f90"), + util.getpath("tests", "src", "parameter", "constant_both.f90"), + util.getpath("tests", "src", "parameter", "constant_compound.f90"), + util.getpath("tests", "src", "parameter", "constant_non_compound.f90"), + util.getpath("tests", "src", "parameter", "constant_array.f90"), + ] + + @pytest.mark.slow + def test_constant_real_single(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float32)[::2] + pytest.raises(ValueError, self.module.foo_single, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float32) + self.module.foo_single(x) + assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2]) + + @pytest.mark.slow + def test_constant_real_double(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float64)[::2] + pytest.raises(ValueError, self.module.foo_double, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float64) + self.module.foo_double(x) + assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2]) + + @pytest.mark.slow + def test_constant_compound_int(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.int32)[::2] + pytest.raises(ValueError, self.module.foo_compound_int, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.int32) + self.module.foo_compound_int(x) + assert np.allclose(x, [0 + 1 + 2 * 6, 1, 2]) + + @pytest.mark.slow + def test_constant_non_compound_int(self): + # check values + x = np.arange(4, dtype=np.int32) + self.module.foo_non_compound_int(x) + assert np.allclose(x, [0 + 1 + 2 + 3 * 4, 1, 2, 3]) + + @pytest.mark.slow + def test_constant_integer_int(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.int32)[::2] + pytest.raises(ValueError, self.module.foo_int, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.int32) + self.module.foo_int(x) + assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2]) + + @pytest.mark.slow + def test_constant_integer_long(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.int64)[::2] + pytest.raises(ValueError, self.module.foo_long, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.int64) + self.module.foo_long(x) + assert np.allclose(x, [0 + 1 + 2 * 3, 1, 2]) + + @pytest.mark.slow + def test_constant_both(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float64)[::2] + pytest.raises(ValueError, self.module.foo, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float64) + self.module.foo(x) + assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3]) + + @pytest.mark.slow + def test_constant_no(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float64)[::2] + pytest.raises(ValueError, self.module.foo_no, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float64) + self.module.foo_no(x) + assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3]) + + @pytest.mark.slow + def test_constant_sum(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float64)[::2] + pytest.raises(ValueError, self.module.foo_sum, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float64) + self.module.foo_sum(x) + assert np.allclose(x, [0 + 1 * 3 * 3 + 2 * 3 * 3, 1 * 3, 2 * 3]) + + def test_constant_array(self): + x = np.arange(3, dtype=np.float64) + y = np.arange(5, dtype=np.float64) + z = self.module.foo_array(x, y) + assert np.allclose(x, [0.0, 1. / 10, 2. / 10]) + assert np.allclose(y, [0.0, 1. * 10, 2. * 10, 3. * 10, 4. * 10]) + assert np.allclose(z, 19.0) + + def test_constant_array_any_index(self): + x = np.arange(6, dtype=np.float64) + y = self.module.foo_array_any_index(x) + assert np.allclose(y, x.reshape((2, 3), order='F')) + + def test_constant_array_delims(self): + x = self.module.foo_array_delims() + assert x == 9 diff --git a/python/numpy/f2py/tests/test_pyf_src.py b/python/numpy/f2py/tests/test_pyf_src.py new file mode 100644 index 000000000..2ecb0fbeb --- /dev/null +++ b/python/numpy/f2py/tests/test_pyf_src.py @@ -0,0 +1,43 @@ +# This test is ported from numpy.distutils +from numpy.f2py._src_pyf import process_str +from numpy.testing import assert_equal + +pyf_src = """ +python module foo + <_rd=real,double precision> + interface + subroutine foosub(tol) + <_rd>, intent(in,out) :: tol + end subroutine foosub + end interface +end python module foo +""" + +expected_pyf = """ +python module foo + interface + subroutine sfoosub(tol) + real, intent(in,out) :: tol + end subroutine sfoosub + subroutine dfoosub(tol) + double precision, intent(in,out) :: tol + end subroutine dfoosub + end interface +end python module foo +""" + + +def normalize_whitespace(s): + """ + Remove leading and trailing whitespace, and convert internal + stretches of whitespace to a single space. + """ + return ' '.join(s.split()) + + +def test_from_template(): + """Regression test for gh-10712.""" + pyf = process_str(pyf_src) + normalized_pyf = normalize_whitespace(pyf) + normalized_expected_pyf = normalize_whitespace(expected_pyf) + assert_equal(normalized_pyf, normalized_expected_pyf) diff --git a/python/numpy/f2py/tests/test_quoted_character.py b/python/numpy/f2py/tests/test_quoted_character.py new file mode 100644 index 000000000..3cbcb3c55 --- /dev/null +++ b/python/numpy/f2py/tests/test_quoted_character.py @@ -0,0 +1,18 @@ +"""See https://github.com/numpy/numpy/pull/10676. + +""" +import sys + +import pytest + +from . import util + + +class TestQuotedCharacter(util.F2PyTest): + sources = [util.getpath("tests", "src", "quoted_character", "foo.f")] + + @pytest.mark.skipif(sys.platform == "win32", + reason="Fails with MinGW64 Gfortran (Issue #9673)") + @pytest.mark.slow + def test_quoted_character(self): + assert self.module.foo() == (b"'", b'"', b";", b"!", b"(", b")") diff --git a/python/numpy/f2py/tests/test_regression.py b/python/numpy/f2py/tests/test_regression.py new file mode 100644 index 000000000..93eb29e8e --- /dev/null +++ b/python/numpy/f2py/tests/test_regression.py @@ -0,0 +1,187 @@ +import os +import platform + +import pytest + +import numpy as np +import numpy.testing as npt + +from . import util + + +class TestIntentInOut(util.F2PyTest): + # Check that intent(in out) translates as intent(inout) + sources = [util.getpath("tests", "src", "regression", "inout.f90")] + + @pytest.mark.slow + def test_inout(self): + # non-contiguous should raise error + x = np.arange(6, dtype=np.float32)[::2] + pytest.raises(ValueError, self.module.foo, x) + + # check values with contiguous array + x = np.arange(3, dtype=np.float32) + self.module.foo(x) + assert np.allclose(x, [3, 1, 2]) + + +class TestDataOnlyMultiModule(util.F2PyTest): + # Check that modules without subroutines work + sources = [util.getpath("tests", "src", "regression", "datonly.f90")] + + @pytest.mark.slow + def test_mdat(self): + assert self.module.datonly.max_value == 100 + assert self.module.dat.max_ == 1009 + int_in = 5 + assert self.module.simple_subroutine(5) == 1014 + + +class TestModuleWithDerivedType(util.F2PyTest): + # Check that modules with derived types work + sources = [util.getpath("tests", "src", "regression", "mod_derived_types.f90")] + + @pytest.mark.slow + def test_mtypes(self): + assert self.module.no_type_subroutine(10) == 110 + assert self.module.type_subroutine(10) == 210 + + +class TestNegativeBounds(util.F2PyTest): + # Check that negative bounds work correctly + sources = [util.getpath("tests", "src", "negative_bounds", "issue_20853.f90")] + + @pytest.mark.slow + def test_negbound(self): + xvec = np.arange(12) + xlow = -6 + xhigh = 4 + + # Calculate the upper bound, + # Keeping the 1 index in mind + + def ubound(xl, xh): + return xh - xl + 1 + rval = self.module.foo(is_=xlow, ie_=xhigh, + arr=xvec[:ubound(xlow, xhigh)]) + expval = np.arange(11, dtype=np.float32) + assert np.allclose(rval, expval) + + +class TestNumpyVersionAttribute(util.F2PyTest): + # Check that th attribute __f2py_numpy_version__ is present + # in the compiled module and that has the value np.__version__. + sources = [util.getpath("tests", "src", "regression", "inout.f90")] + + @pytest.mark.slow + def test_numpy_version_attribute(self): + + # Check that self.module has an attribute named "__f2py_numpy_version__" + assert hasattr(self.module, "__f2py_numpy_version__") + + # Check that the attribute __f2py_numpy_version__ is a string + assert isinstance(self.module.__f2py_numpy_version__, str) + + # Check that __f2py_numpy_version__ has the value numpy.__version__ + assert np.__version__ == self.module.__f2py_numpy_version__ + + +def test_include_path(): + incdir = np.f2py.get_include() + fnames_in_dir = os.listdir(incdir) + for fname in ("fortranobject.c", "fortranobject.h"): + assert fname in fnames_in_dir + + +class TestIncludeFiles(util.F2PyTest): + sources = [util.getpath("tests", "src", "regression", "incfile.f90")] + options = [f"-I{util.getpath('tests', 'src', 'regression')}", + f"--include-paths {util.getpath('tests', 'src', 'regression')}"] + + @pytest.mark.slow + def test_gh25344(self): + exp = 7.0 + res = self.module.add(3.0, 4.0) + assert exp == res + +class TestF77Comments(util.F2PyTest): + # Check that comments are stripped from F77 continuation lines + sources = [util.getpath("tests", "src", "regression", "f77comments.f")] + + @pytest.mark.slow + def test_gh26148(self): + x1 = np.array(3, dtype=np.int32) + x2 = np.array(5, dtype=np.int32) + res = self.module.testsub(x1, x2) + assert res[0] == 8 + assert res[1] == 15 + + @pytest.mark.slow + def test_gh26466(self): + # Check that comments after PARAMETER directions are stripped + expected = np.arange(1, 11, dtype=np.float32) * 2 + res = self.module.testsub2() + npt.assert_allclose(expected, res) + +class TestF90Contiuation(util.F2PyTest): + # Check that comments are stripped from F90 continuation lines + sources = [util.getpath("tests", "src", "regression", "f90continuation.f90")] + + @pytest.mark.slow + def test_gh26148b(self): + x1 = np.array(3, dtype=np.int32) + x2 = np.array(5, dtype=np.int32) + res = self.module.testsub(x1, x2) + assert res[0] == 8 + assert res[1] == 15 + +class TestLowerF2PYDirectives(util.F2PyTest): + # Check variables are cased correctly + sources = [util.getpath("tests", "src", "regression", "lower_f2py_fortran.f90")] + + @pytest.mark.slow + def test_gh28014(self): + self.module.inquire_next(3) + assert True + +@pytest.mark.slow +def test_gh26623(): + # Including libraries with . should not generate an incorrect meson.build + try: + aa = util.build_module( + [util.getpath("tests", "src", "regression", "f90continuation.f90")], + ["-lfoo.bar"], + module_name="Blah", + ) + except RuntimeError as rerr: + assert "lparen got assign" not in str(rerr) + + +@pytest.mark.slow +@pytest.mark.skipif(platform.system() not in ['Linux', 'Darwin'], reason='Unsupported on this platform for now') +def test_gh25784(): + # Compile dubious file using passed flags + try: + aa = util.build_module( + [util.getpath("tests", "src", "regression", "f77fixedform.f95")], + options=[ + # Meson will collect and dedup these to pass to fortran_args: + "--f77flags='-ffixed-form -O2'", + "--f90flags=\"-ffixed-form -Og\"", + ], + module_name="Blah", + ) + except ImportError as rerr: + assert "unknown_subroutine_" in str(rerr) + + +@pytest.mark.slow +class TestAssignmentOnlyModules(util.F2PyTest): + # Ensure that variables are exposed without functions or subroutines in a module + sources = [util.getpath("tests", "src", "regression", "assignOnlyModule.f90")] + + @pytest.mark.slow + def test_gh27167(self): + assert (self.module.f_globals.n_max == 16) + assert (self.module.f_globals.i_max == 18) + assert (self.module.f_globals.j_max == 72) diff --git a/python/numpy/f2py/tests/test_return_character.py b/python/numpy/f2py/tests/test_return_character.py new file mode 100644 index 000000000..aae3f0f91 --- /dev/null +++ b/python/numpy/f2py/tests/test_return_character.py @@ -0,0 +1,48 @@ +import platform + +import pytest + +from numpy import array + +from . import util + +IS_S390X = platform.machine() == "s390x" + + +@pytest.mark.slow +class TestReturnCharacter(util.F2PyTest): + def check_function(self, t, tname): + if tname in ["t0", "t1", "s0", "s1"]: + assert t("23") == b"2" + r = t("ab") + assert r == b"a" + r = t(array("ab")) + assert r == b"a" + r = t(array(77, "u1")) + assert r == b"M" + elif tname in ["ts", "ss"]: + assert t(23) == b"23" + assert t("123456789abcdef") == b"123456789a" + elif tname in ["t5", "s5"]: + assert t(23) == b"23" + assert t("ab") == b"ab" + assert t("123456789abcdef") == b"12345" + else: + raise NotImplementedError + + +class TestFReturnCharacter(TestReturnCharacter): + sources = [ + util.getpath("tests", "src", "return_character", "foo77.f"), + util.getpath("tests", "src", "return_character", "foo90.f90"), + ] + + @pytest.mark.xfail(IS_S390X, reason="callback returns ' '") + @pytest.mark.parametrize("name", ["t0", "t1", "t5", "s0", "s1", "s5", "ss"]) + def test_all_f77(self, name): + self.check_function(getattr(self.module, name), name) + + @pytest.mark.xfail(IS_S390X, reason="callback returns ' '") + @pytest.mark.parametrize("name", ["t0", "t1", "t5", "ts", "s0", "s1", "s5", "ss"]) + def test_all_f90(self, name): + self.check_function(getattr(self.module.f90_return_char, name), name) diff --git a/python/numpy/f2py/tests/test_return_complex.py b/python/numpy/f2py/tests/test_return_complex.py new file mode 100644 index 000000000..aa3f28e67 --- /dev/null +++ b/python/numpy/f2py/tests/test_return_complex.py @@ -0,0 +1,67 @@ +import pytest + +from numpy import array + +from . import util + + +@pytest.mark.slow +class TestReturnComplex(util.F2PyTest): + def check_function(self, t, tname): + if tname in ["t0", "t8", "s0", "s8"]: + err = 1e-5 + else: + err = 0.0 + assert abs(t(234j) - 234.0j) <= err + assert abs(t(234.6) - 234.6) <= err + assert abs(t(234) - 234.0) <= err + assert abs(t(234.6 + 3j) - (234.6 + 3j)) <= err + # assert abs(t('234')-234.)<=err + # assert abs(t('234.6')-234.6)<=err + assert abs(t(-234) + 234.0) <= err + assert abs(t([234]) - 234.0) <= err + assert abs(t((234, )) - 234.0) <= err + assert abs(t(array(234)) - 234.0) <= err + assert abs(t(array(23 + 4j, "F")) - (23 + 4j)) <= err + assert abs(t(array([234])) - 234.0) <= err + assert abs(t(array([[234]])) - 234.0) <= err + assert abs(t(array([234]).astype("b")) + 22.0) <= err + assert abs(t(array([234], "h")) - 234.0) <= err + assert abs(t(array([234], "i")) - 234.0) <= err + assert abs(t(array([234], "l")) - 234.0) <= err + assert abs(t(array([234], "q")) - 234.0) <= err + assert abs(t(array([234], "f")) - 234.0) <= err + assert abs(t(array([234], "d")) - 234.0) <= err + assert abs(t(array([234 + 3j], "F")) - (234 + 3j)) <= err + assert abs(t(array([234], "D")) - 234.0) <= err + + # pytest.raises(TypeError, t, array([234], 'S1')) + pytest.raises(TypeError, t, "abc") + + pytest.raises(IndexError, t, []) + pytest.raises(IndexError, t, ()) + + pytest.raises(TypeError, t, t) + pytest.raises(TypeError, t, {}) + + try: + r = t(10**400) + assert repr(r) in ["(inf+0j)", "(Infinity+0j)"] + except OverflowError: + pass + + +class TestFReturnComplex(TestReturnComplex): + sources = [ + util.getpath("tests", "src", "return_complex", "foo77.f"), + util.getpath("tests", "src", "return_complex", "foo90.f90"), + ] + + @pytest.mark.parametrize("name", ["t0", "t8", "t16", "td", "s0", "s8", "s16", "sd"]) + def test_all_f77(self, name): + self.check_function(getattr(self.module, name), name) + + @pytest.mark.parametrize("name", ["t0", "t8", "t16", "td", "s0", "s8", "s16", "sd"]) + def test_all_f90(self, name): + self.check_function(getattr(self.module.f90_return_complex, name), + name) diff --git a/python/numpy/f2py/tests/test_return_integer.py b/python/numpy/f2py/tests/test_return_integer.py new file mode 100644 index 000000000..13a9f862f --- /dev/null +++ b/python/numpy/f2py/tests/test_return_integer.py @@ -0,0 +1,55 @@ +import pytest + +from numpy import array + +from . import util + + +@pytest.mark.slow +class TestReturnInteger(util.F2PyTest): + def check_function(self, t, tname): + assert t(123) == 123 + assert t(123.6) == 123 + assert t("123") == 123 + assert t(-123) == -123 + assert t([123]) == 123 + assert t((123, )) == 123 + assert t(array(123)) == 123 + assert t(array(123, "b")) == 123 + assert t(array(123, "h")) == 123 + assert t(array(123, "i")) == 123 + assert t(array(123, "l")) == 123 + assert t(array(123, "B")) == 123 + assert t(array(123, "f")) == 123 + assert t(array(123, "d")) == 123 + + # pytest.raises(ValueError, t, array([123],'S3')) + pytest.raises(ValueError, t, "abc") + + pytest.raises(IndexError, t, []) + pytest.raises(IndexError, t, ()) + + pytest.raises(Exception, t, t) + pytest.raises(Exception, t, {}) + + if tname in ["t8", "s8"]: + pytest.raises(OverflowError, t, 100000000000000000000000) + pytest.raises(OverflowError, t, 10000000011111111111111.23) + + +class TestFReturnInteger(TestReturnInteger): + sources = [ + util.getpath("tests", "src", "return_integer", "foo77.f"), + util.getpath("tests", "src", "return_integer", "foo90.f90"), + ] + + @pytest.mark.parametrize("name", + ["t0", "t1", "t2", "t4", "t8", "s0", "s1", "s2", "s4", "s8"]) + def test_all_f77(self, name): + self.check_function(getattr(self.module, name), name) + + @pytest.mark.parametrize("name", + ["t0", "t1", "t2", "t4", "t8", "s0", "s1", "s2", "s4", "s8"]) + def test_all_f90(self, name): + self.check_function(getattr(self.module.f90_return_integer, name), + name) diff --git a/python/numpy/f2py/tests/test_return_logical.py b/python/numpy/f2py/tests/test_return_logical.py new file mode 100644 index 000000000..a4a339572 --- /dev/null +++ b/python/numpy/f2py/tests/test_return_logical.py @@ -0,0 +1,65 @@ +import pytest + +from numpy import array + +from . import util + + +class TestReturnLogical(util.F2PyTest): + def check_function(self, t): + assert t(True) == 1 + assert t(False) == 0 + assert t(0) == 0 + assert t(None) == 0 + assert t(0.0) == 0 + assert t(0j) == 0 + assert t(1j) == 1 + assert t(234) == 1 + assert t(234.6) == 1 + assert t(234.6 + 3j) == 1 + assert t("234") == 1 + assert t("aaa") == 1 + assert t("") == 0 + assert t([]) == 0 + assert t(()) == 0 + assert t({}) == 0 + assert t(t) == 1 + assert t(-234) == 1 + assert t(10**100) == 1 + assert t([234]) == 1 + assert t((234, )) == 1 + assert t(array(234)) == 1 + assert t(array([234])) == 1 + assert t(array([[234]])) == 1 + assert t(array([127], "b")) == 1 + assert t(array([234], "h")) == 1 + assert t(array([234], "i")) == 1 + assert t(array([234], "l")) == 1 + assert t(array([234], "f")) == 1 + assert t(array([234], "d")) == 1 + assert t(array([234 + 3j], "F")) == 1 + assert t(array([234], "D")) == 1 + assert t(array(0)) == 0 + assert t(array([0])) == 0 + assert t(array([[0]])) == 0 + assert t(array([0j])) == 0 + assert t(array([1])) == 1 + pytest.raises(ValueError, t, array([0, 0])) + + +class TestFReturnLogical(TestReturnLogical): + sources = [ + util.getpath("tests", "src", "return_logical", "foo77.f"), + util.getpath("tests", "src", "return_logical", "foo90.f90"), + ] + + @pytest.mark.slow + @pytest.mark.parametrize("name", ["t0", "t1", "t2", "t4", "s0", "s1", "s2", "s4"]) + def test_all_f77(self, name): + self.check_function(getattr(self.module, name)) + + @pytest.mark.slow + @pytest.mark.parametrize("name", + ["t0", "t1", "t2", "t4", "t8", "s0", "s1", "s2", "s4", "s8"]) + def test_all_f90(self, name): + self.check_function(getattr(self.module.f90_return_logical, name)) diff --git a/python/numpy/f2py/tests/test_return_real.py b/python/numpy/f2py/tests/test_return_real.py new file mode 100644 index 000000000..c871ed3d4 --- /dev/null +++ b/python/numpy/f2py/tests/test_return_real.py @@ -0,0 +1,109 @@ +import platform + +import pytest + +from numpy import array +from numpy.testing import IS_64BIT + +from . import util + + +@pytest.mark.slow +class TestReturnReal(util.F2PyTest): + def check_function(self, t, tname): + if tname in ["t0", "t4", "s0", "s4"]: + err = 1e-5 + else: + err = 0.0 + assert abs(t(234) - 234.0) <= err + assert abs(t(234.6) - 234.6) <= err + assert abs(t("234") - 234) <= err + assert abs(t("234.6") - 234.6) <= err + assert abs(t(-234) + 234) <= err + assert abs(t([234]) - 234) <= err + assert abs(t((234, )) - 234.0) <= err + assert abs(t(array(234)) - 234.0) <= err + assert abs(t(array(234).astype("b")) + 22) <= err + assert abs(t(array(234, "h")) - 234.0) <= err + assert abs(t(array(234, "i")) - 234.0) <= err + assert abs(t(array(234, "l")) - 234.0) <= err + assert abs(t(array(234, "B")) - 234.0) <= err + assert abs(t(array(234, "f")) - 234.0) <= err + assert abs(t(array(234, "d")) - 234.0) <= err + if tname in ["t0", "t4", "s0", "s4"]: + assert t(1e200) == t(1e300) # inf + + # pytest.raises(ValueError, t, array([234], 'S1')) + pytest.raises(ValueError, t, "abc") + + pytest.raises(IndexError, t, []) + pytest.raises(IndexError, t, ()) + + pytest.raises(Exception, t, t) + pytest.raises(Exception, t, {}) + + try: + r = t(10**400) + assert repr(r) in ["inf", "Infinity"] + except OverflowError: + pass + + +@pytest.mark.skipif( + platform.system() == "Darwin", + reason="Prone to error when run with numpy/f2py/tests on mac os, " + "but not when run in isolation", +) +@pytest.mark.skipif( + not IS_64BIT, reason="32-bit builds are buggy" +) +class TestCReturnReal(TestReturnReal): + suffix = ".pyf" + module_name = "c_ext_return_real" + code = """ +python module c_ext_return_real +usercode \'\'\' +float t4(float value) { return value; } +void s4(float *t4, float value) { *t4 = value; } +double t8(double value) { return value; } +void s8(double *t8, double value) { *t8 = value; } +\'\'\' +interface + function t4(value) + real*4 intent(c) :: t4,value + end + function t8(value) + real*8 intent(c) :: t8,value + end + subroutine s4(t4,value) + intent(c) s4 + real*4 intent(out) :: t4 + real*4 intent(c) :: value + end + subroutine s8(t8,value) + intent(c) s8 + real*8 intent(out) :: t8 + real*8 intent(c) :: value + end +end interface +end python module c_ext_return_real + """ + + @pytest.mark.parametrize("name", ["t4", "t8", "s4", "s8"]) + def test_all(self, name): + self.check_function(getattr(self.module, name), name) + + +class TestFReturnReal(TestReturnReal): + sources = [ + util.getpath("tests", "src", "return_real", "foo77.f"), + util.getpath("tests", "src", "return_real", "foo90.f90"), + ] + + @pytest.mark.parametrize("name", ["t0", "t4", "t8", "td", "s0", "s4", "s8", "sd"]) + def test_all_f77(self, name): + self.check_function(getattr(self.module, name), name) + + @pytest.mark.parametrize("name", ["t0", "t4", "t8", "td", "s0", "s4", "s8", "sd"]) + def test_all_f90(self, name): + self.check_function(getattr(self.module.f90_return_real, name), name) diff --git a/python/numpy/f2py/tests/test_routines.py b/python/numpy/f2py/tests/test_routines.py new file mode 100644 index 000000000..01135dd69 --- /dev/null +++ b/python/numpy/f2py/tests/test_routines.py @@ -0,0 +1,29 @@ +import pytest + +from . import util + + +@pytest.mark.slow +class TestRenamedFunc(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "routines", "funcfortranname.f"), + util.getpath("tests", "src", "routines", "funcfortranname.pyf"), + ] + module_name = "funcfortranname" + + def test_gh25799(self): + assert dir(self.module) + assert self.module.funcfortranname_default(200, 12) == 212 + + +@pytest.mark.slow +class TestRenamedSubroutine(util.F2PyTest): + sources = [ + util.getpath("tests", "src", "routines", "subrout.f"), + util.getpath("tests", "src", "routines", "subrout.pyf"), + ] + module_name = "subrout" + + def test_renamed_subroutine(self): + assert dir(self.module) + assert self.module.subrout_default(200, 12) == 212 diff --git a/python/numpy/f2py/tests/test_semicolon_split.py b/python/numpy/f2py/tests/test_semicolon_split.py new file mode 100644 index 000000000..2a16b191b --- /dev/null +++ b/python/numpy/f2py/tests/test_semicolon_split.py @@ -0,0 +1,75 @@ +import platform + +import pytest + +from numpy.testing import IS_64BIT + +from . import util + + +@pytest.mark.skipif( + platform.system() == "Darwin", + reason="Prone to error when run with numpy/f2py/tests on mac os, " + "but not when run in isolation", +) +@pytest.mark.skipif( + not IS_64BIT, reason="32-bit builds are buggy" +) +class TestMultiline(util.F2PyTest): + suffix = ".pyf" + module_name = "multiline" + code = f""" +python module {module_name} + usercode ''' +void foo(int* x) {{ + char dummy = ';'; + *x = 42; +}} +''' + interface + subroutine foo(x) + intent(c) foo + integer intent(out) :: x + end subroutine foo + end interface +end python module {module_name} + """ + + def test_multiline(self): + assert self.module.foo() == 42 + + +@pytest.mark.skipif( + platform.system() == "Darwin", + reason="Prone to error when run with numpy/f2py/tests on mac os, " + "but not when run in isolation", +) +@pytest.mark.skipif( + not IS_64BIT, reason="32-bit builds are buggy" +) +@pytest.mark.slow +class TestCallstatement(util.F2PyTest): + suffix = ".pyf" + module_name = "callstatement" + code = f""" +python module {module_name} + usercode ''' +void foo(int* x) {{ +}} +''' + interface + subroutine foo(x) + intent(c) foo + integer intent(out) :: x + callprotoargument int* + callstatement {{ & + ; & + x = 42; & + }} + end subroutine foo + end interface +end python module {module_name} + """ + + def test_callstatement(self): + assert self.module.foo() == 42 diff --git a/python/numpy/f2py/tests/test_size.py b/python/numpy/f2py/tests/test_size.py new file mode 100644 index 000000000..ac2eaf141 --- /dev/null +++ b/python/numpy/f2py/tests/test_size.py @@ -0,0 +1,45 @@ +import pytest + +import numpy as np + +from . import util + + +class TestSizeSumExample(util.F2PyTest): + sources = [util.getpath("tests", "src", "size", "foo.f90")] + + @pytest.mark.slow + def test_all(self): + r = self.module.foo([[]]) + assert r == [0] + + r = self.module.foo([[1, 2]]) + assert r == [3] + + r = self.module.foo([[1, 2], [3, 4]]) + assert np.allclose(r, [3, 7]) + + r = self.module.foo([[1, 2], [3, 4], [5, 6]]) + assert np.allclose(r, [3, 7, 11]) + + @pytest.mark.slow + def test_transpose(self): + r = self.module.trans([[]]) + assert np.allclose(r.T, np.array([[]])) + + r = self.module.trans([[1, 2]]) + assert np.allclose(r, [[1.], [2.]]) + + r = self.module.trans([[1, 2, 3], [4, 5, 6]]) + assert np.allclose(r, [[1, 4], [2, 5], [3, 6]]) + + @pytest.mark.slow + def test_flatten(self): + r = self.module.flatten([[]]) + assert np.allclose(r, []) + + r = self.module.flatten([[1, 2]]) + assert np.allclose(r, [1, 2]) + + r = self.module.flatten([[1, 2, 3], [4, 5, 6]]) + assert np.allclose(r, [1, 2, 3, 4, 5, 6]) diff --git a/python/numpy/f2py/tests/test_string.py b/python/numpy/f2py/tests/test_string.py new file mode 100644 index 000000000..f484ea3f1 --- /dev/null +++ b/python/numpy/f2py/tests/test_string.py @@ -0,0 +1,100 @@ +import pytest + +import numpy as np + +from . import util + + +class TestString(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "char.f90")] + + @pytest.mark.slow + def test_char(self): + strings = np.array(["ab", "cd", "ef"], dtype="c").T + inp, out = self.module.char_test.change_strings( + strings, strings.shape[1]) + assert inp == pytest.approx(strings) + expected = strings.copy() + expected[1, :] = "AAA" + assert out == pytest.approx(expected) + + +class TestDocStringArguments(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "string.f")] + + def test_example(self): + a = np.array(b"123\0\0") + b = np.array(b"123\0\0") + c = np.array(b"123") + d = np.array(b"123") + + self.module.foo(a, b, c, d) + + assert a.tobytes() == b"123\0\0" + assert b.tobytes() == b"B23\0\0" + assert c.tobytes() == b"123" + assert d.tobytes() == b"D23" + + +class TestFixedString(util.F2PyTest): + sources = [util.getpath("tests", "src", "string", "fixed_string.f90")] + + @staticmethod + def _sint(s, start=0, end=None): + """Return the content of a string buffer as integer value. + + For example: + _sint('1234') -> 4321 + _sint('123A') -> 17321 + """ + if isinstance(s, np.ndarray): + s = s.tobytes() + elif isinstance(s, str): + s = s.encode() + assert isinstance(s, bytes) + if end is None: + end = len(s) + i = 0 + for j in range(start, min(end, len(s))): + i += s[j] * 10**j + return i + + def _get_input(self, intent="in"): + if intent in ["in"]: + yield "" + yield "1" + yield "1234" + yield "12345" + yield b"" + yield b"\0" + yield b"1" + yield b"\01" + yield b"1\0" + yield b"1234" + yield b"12345" + yield np.ndarray((), np.bytes_, buffer=b"") # array(b'', dtype='|S0') + yield np.array(b"") # array(b'', dtype='|S1') + yield np.array(b"\0") + yield np.array(b"1") + yield np.array(b"1\0") + yield np.array(b"\01") + yield np.array(b"1234") + yield np.array(b"123\0") + yield np.array(b"12345") + + def test_intent_in(self): + for s in self._get_input(): + r = self.module.test_in_bytes4(s) + # also checks that s is not changed inplace + expected = self._sint(s, end=4) + assert r == expected, s + + def test_intent_inout(self): + for s in self._get_input(intent="inout"): + rest = self._sint(s, start=4) + r = self.module.test_inout_bytes4(s) + expected = self._sint(s, end=4) + assert r == expected + + # check that the rest of input string is preserved + assert rest == self._sint(s, start=4) diff --git a/python/numpy/f2py/tests/test_symbolic.py b/python/numpy/f2py/tests/test_symbolic.py new file mode 100644 index 000000000..ec23f5221 --- /dev/null +++ b/python/numpy/f2py/tests/test_symbolic.py @@ -0,0 +1,495 @@ +import pytest + +from numpy.f2py.symbolic import ( + ArithOp, + Expr, + Language, + Op, + as_apply, + as_array, + as_complex, + as_deref, + as_eq, + as_expr, + as_factors, + as_ge, + as_gt, + as_le, + as_lt, + as_ne, + as_number, + as_numer_denom, + as_ref, + as_string, + as_symbol, + as_terms, + as_ternary, + eliminate_quotes, + fromstring, + insert_quotes, + normalize, +) + +from . import util + + +class TestSymbolic(util.F2PyTest): + def test_eliminate_quotes(self): + def worker(s): + r, d = eliminate_quotes(s) + s1 = insert_quotes(r, d) + assert s1 == s + + for kind in ["", "mykind_"]: + worker(kind + '"1234" // "ABCD"') + worker(kind + '"1234" // ' + kind + '"ABCD"') + worker(kind + "\"1234\" // 'ABCD'") + worker(kind + '"1234" // ' + kind + "'ABCD'") + worker(kind + '"1\\"2\'AB\'34"') + worker("a = " + kind + "'1\\'2\"AB\"34'") + + def test_sanity(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + + assert x.op == Op.SYMBOL + assert repr(x) == "Expr(Op.SYMBOL, 'x')" + assert x == x + assert x != y + assert hash(x) is not None + + n = as_number(123) + m = as_number(456) + assert n.op == Op.INTEGER + assert repr(n) == "Expr(Op.INTEGER, (123, 4))" + assert n == n + assert n != m + assert hash(n) is not None + + fn = as_number(12.3) + fm = as_number(45.6) + assert fn.op == Op.REAL + assert repr(fn) == "Expr(Op.REAL, (12.3, 4))" + assert fn == fn + assert fn != fm + assert hash(fn) is not None + + c = as_complex(1, 2) + c2 = as_complex(3, 4) + assert c.op == Op.COMPLEX + assert repr(c) == ("Expr(Op.COMPLEX, (Expr(Op.INTEGER, (1, 4))," + " Expr(Op.INTEGER, (2, 4))))") + assert c == c + assert c != c2 + assert hash(c) is not None + + s = as_string("'123'") + s2 = as_string('"ABC"') + assert s.op == Op.STRING + assert repr(s) == "Expr(Op.STRING, (\"'123'\", 1))", repr(s) + assert s == s + assert s != s2 + + a = as_array((n, m)) + b = as_array((n, )) + assert a.op == Op.ARRAY + assert repr(a) == ("Expr(Op.ARRAY, (Expr(Op.INTEGER, (123, 4))," + " Expr(Op.INTEGER, (456, 4))))") + assert a == a + assert a != b + + t = as_terms(x) + u = as_terms(y) + assert t.op == Op.TERMS + assert repr(t) == "Expr(Op.TERMS, {Expr(Op.SYMBOL, 'x'): 1})" + assert t == t + assert t != u + assert hash(t) is not None + + v = as_factors(x) + w = as_factors(y) + assert v.op == Op.FACTORS + assert repr(v) == "Expr(Op.FACTORS, {Expr(Op.SYMBOL, 'x'): 1})" + assert v == v + assert w != v + assert hash(v) is not None + + t = as_ternary(x, y, z) + u = as_ternary(x, z, y) + assert t.op == Op.TERNARY + assert t == t + assert t != u + assert hash(t) is not None + + e = as_eq(x, y) + f = as_lt(x, y) + assert e.op == Op.RELATIONAL + assert e == e + assert e != f + assert hash(e) is not None + + def test_tostring_fortran(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + n = as_number(123) + m = as_number(456) + a = as_array((n, m)) + c = as_complex(n, m) + + assert str(x) == "x" + assert str(n) == "123" + assert str(a) == "[123, 456]" + assert str(c) == "(123, 456)" + + assert str(Expr(Op.TERMS, {x: 1})) == "x" + assert str(Expr(Op.TERMS, {x: 2})) == "2 * x" + assert str(Expr(Op.TERMS, {x: -1})) == "-x" + assert str(Expr(Op.TERMS, {x: -2})) == "-2 * x" + assert str(Expr(Op.TERMS, {x: 1, y: 1})) == "x + y" + assert str(Expr(Op.TERMS, {x: -1, y: -1})) == "-x - y" + assert str(Expr(Op.TERMS, {x: 2, y: 3})) == "2 * x + 3 * y" + assert str(Expr(Op.TERMS, {x: -2, y: 3})) == "-2 * x + 3 * y" + assert str(Expr(Op.TERMS, {x: 2, y: -3})) == "2 * x - 3 * y" + + assert str(Expr(Op.FACTORS, {x: 1})) == "x" + assert str(Expr(Op.FACTORS, {x: 2})) == "x ** 2" + assert str(Expr(Op.FACTORS, {x: -1})) == "x ** -1" + assert str(Expr(Op.FACTORS, {x: -2})) == "x ** -2" + assert str(Expr(Op.FACTORS, {x: 1, y: 1})) == "x * y" + assert str(Expr(Op.FACTORS, {x: 2, y: 3})) == "x ** 2 * y ** 3" + + v = Expr(Op.FACTORS, {x: 2, Expr(Op.TERMS, {x: 1, y: 1}): 3}) + assert str(v) == "x ** 2 * (x + y) ** 3", str(v) + v = Expr(Op.FACTORS, {x: 2, Expr(Op.FACTORS, {x: 1, y: 1}): 3}) + assert str(v) == "x ** 2 * (x * y) ** 3", str(v) + + assert str(Expr(Op.APPLY, ("f", (), {}))) == "f()" + assert str(Expr(Op.APPLY, ("f", (x, ), {}))) == "f(x)" + assert str(Expr(Op.APPLY, ("f", (x, y), {}))) == "f(x, y)" + assert str(Expr(Op.INDEXING, ("f", x))) == "f[x]" + + assert str(as_ternary(x, y, z)) == "merge(y, z, x)" + assert str(as_eq(x, y)) == "x .eq. y" + assert str(as_ne(x, y)) == "x .ne. y" + assert str(as_lt(x, y)) == "x .lt. y" + assert str(as_le(x, y)) == "x .le. y" + assert str(as_gt(x, y)) == "x .gt. y" + assert str(as_ge(x, y)) == "x .ge. y" + + def test_tostring_c(self): + language = Language.C + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + n = as_number(123) + + assert Expr(Op.FACTORS, {x: 2}).tostring(language=language) == "x * x" + assert (Expr(Op.FACTORS, { + x + y: 2 + }).tostring(language=language) == "(x + y) * (x + y)") + assert Expr(Op.FACTORS, { + x: 12 + }).tostring(language=language) == "pow(x, 12)" + + assert as_apply(ArithOp.DIV, x, + y).tostring(language=language) == "x / y" + assert (as_apply(ArithOp.DIV, x, + x + y).tostring(language=language) == "x / (x + y)") + assert (as_apply(ArithOp.DIV, x - y, x + + y).tostring(language=language) == "(x - y) / (x + y)") + assert (x + (x - y) / (x + y) + + n).tostring(language=language) == "123 + x + (x - y) / (x + y)" + + assert as_ternary(x, y, z).tostring(language=language) == "(x?y:z)" + assert as_eq(x, y).tostring(language=language) == "x == y" + assert as_ne(x, y).tostring(language=language) == "x != y" + assert as_lt(x, y).tostring(language=language) == "x < y" + assert as_le(x, y).tostring(language=language) == "x <= y" + assert as_gt(x, y).tostring(language=language) == "x > y" + assert as_ge(x, y).tostring(language=language) == "x >= y" + + def test_operations(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + + assert x + x == Expr(Op.TERMS, {x: 2}) + assert x - x == Expr(Op.INTEGER, (0, 4)) + assert x + y == Expr(Op.TERMS, {x: 1, y: 1}) + assert x - y == Expr(Op.TERMS, {x: 1, y: -1}) + assert x * x == Expr(Op.FACTORS, {x: 2}) + assert x * y == Expr(Op.FACTORS, {x: 1, y: 1}) + + assert +x == x + assert -x == Expr(Op.TERMS, {x: -1}), repr(-x) + assert 2 * x == Expr(Op.TERMS, {x: 2}) + assert 2 + x == Expr(Op.TERMS, {x: 1, as_number(1): 2}) + assert 2 * x + 3 * y == Expr(Op.TERMS, {x: 2, y: 3}) + assert (x + y) * 2 == Expr(Op.TERMS, {x: 2, y: 2}) + + assert x**2 == Expr(Op.FACTORS, {x: 2}) + assert (x + y)**2 == Expr( + Op.TERMS, + { + Expr(Op.FACTORS, {x: 2}): 1, + Expr(Op.FACTORS, {y: 2}): 1, + Expr(Op.FACTORS, { + x: 1, + y: 1 + }): 2, + }, + ) + assert (x + y) * x == x**2 + x * y + assert (x + y)**2 == x**2 + 2 * x * y + y**2 + assert (x + y)**2 + (x - y)**2 == 2 * x**2 + 2 * y**2 + assert (x + y) * z == x * z + y * z + assert z * (x + y) == x * z + y * z + + assert (x / 2) == as_apply(ArithOp.DIV, x, as_number(2)) + assert (2 * x / 2) == x + assert (3 * x / 2) == as_apply(ArithOp.DIV, 3 * x, as_number(2)) + assert (4 * x / 2) == 2 * x + assert (5 * x / 2) == as_apply(ArithOp.DIV, 5 * x, as_number(2)) + assert (6 * x / 2) == 3 * x + assert ((3 * 5) * x / 6) == as_apply(ArithOp.DIV, 5 * x, as_number(2)) + assert (30 * x**2 * y**4 / (24 * x**3 * y**3)) == as_apply( + ArithOp.DIV, 5 * y, 4 * x) + assert ((15 * x / 6) / 5) == as_apply(ArithOp.DIV, x, + as_number(2)), (15 * x / 6) / 5 + assert (x / (5 / x)) == as_apply(ArithOp.DIV, x**2, as_number(5)) + + assert (x / 2.0) == Expr(Op.TERMS, {x: 0.5}) + + s = as_string('"ABC"') + t = as_string('"123"') + + assert s // t == Expr(Op.STRING, ('"ABC123"', 1)) + assert s // x == Expr(Op.CONCAT, (s, x)) + assert x // s == Expr(Op.CONCAT, (x, s)) + + c = as_complex(1.0, 2.0) + assert -c == as_complex(-1.0, -2.0) + assert c + c == as_expr((1 + 2j) * 2) + assert c * c == as_expr((1 + 2j)**2) + + def test_substitute(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + a = as_array((x, y)) + + assert x.substitute({x: y}) == y + assert (x + y).substitute({x: z}) == y + z + assert (x * y).substitute({x: z}) == y * z + assert (x**4).substitute({x: z}) == z**4 + assert (x / y).substitute({x: z}) == z / y + assert x.substitute({x: y + z}) == y + z + assert a.substitute({x: y + z}) == as_array((y + z, y)) + + assert as_ternary(x, y, + z).substitute({x: y + z}) == as_ternary(y + z, y, z) + assert as_eq(x, y).substitute({x: y + z}) == as_eq(y + z, y) + + def test_fromstring(self): + + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + f = as_symbol("f") + s = as_string('"ABC"') + t = as_string('"123"') + a = as_array((x, y)) + + assert fromstring("x") == x + assert fromstring("+ x") == x + assert fromstring("- x") == -x + assert fromstring("x + y") == x + y + assert fromstring("x + 1") == x + 1 + assert fromstring("x * y") == x * y + assert fromstring("x * 2") == x * 2 + assert fromstring("x / y") == x / y + assert fromstring("x ** 2", language=Language.Python) == x**2 + assert fromstring("x ** 2 ** 3", language=Language.Python) == x**2**3 + assert fromstring("(x + y) * z") == (x + y) * z + + assert fromstring("f(x)") == f(x) + assert fromstring("f(x,y)") == f(x, y) + assert fromstring("f[x]") == f[x] + assert fromstring("f[x][y]") == f[x][y] + + assert fromstring('"ABC"') == s + assert (normalize( + fromstring('"ABC" // "123" ', + language=Language.Fortran)) == s // t) + assert fromstring('f("ABC")') == f(s) + assert fromstring('MYSTRKIND_"ABC"') == as_string('"ABC"', "MYSTRKIND") + + assert fromstring("(/x, y/)") == a, fromstring("(/x, y/)") + assert fromstring("f((/x, y/))") == f(a) + assert fromstring("(/(x+y)*z/)") == as_array(((x + y) * z, )) + + assert fromstring("123") == as_number(123) + assert fromstring("123_2") == as_number(123, 2) + assert fromstring("123_myintkind") == as_number(123, "myintkind") + + assert fromstring("123.0") == as_number(123.0, 4) + assert fromstring("123.0_4") == as_number(123.0, 4) + assert fromstring("123.0_8") == as_number(123.0, 8) + assert fromstring("123.0e0") == as_number(123.0, 4) + assert fromstring("123.0d0") == as_number(123.0, 8) + assert fromstring("123d0") == as_number(123.0, 8) + assert fromstring("123e-0") == as_number(123.0, 4) + assert fromstring("123d+0") == as_number(123.0, 8) + assert fromstring("123.0_myrealkind") == as_number(123.0, "myrealkind") + assert fromstring("3E4") == as_number(30000.0, 4) + + assert fromstring("(1, 2)") == as_complex(1, 2) + assert fromstring("(1e2, PI)") == as_complex(as_number(100.0), + as_symbol("PI")) + + assert fromstring("[1, 2]") == as_array((as_number(1), as_number(2))) + + assert fromstring("POINT(x, y=1)") == as_apply(as_symbol("POINT"), + x, + y=as_number(1)) + assert fromstring( + 'PERSON(name="John", age=50, shape=(/34, 23/))') == as_apply( + as_symbol("PERSON"), + name=as_string('"John"'), + age=as_number(50), + shape=as_array((as_number(34), as_number(23))), + ) + + assert fromstring("x?y:z") == as_ternary(x, y, z) + + assert fromstring("*x") == as_deref(x) + assert fromstring("**x") == as_deref(as_deref(x)) + assert fromstring("&x") == as_ref(x) + assert fromstring("(*x) * (*y)") == as_deref(x) * as_deref(y) + assert fromstring("(*x) * *y") == as_deref(x) * as_deref(y) + assert fromstring("*x * *y") == as_deref(x) * as_deref(y) + assert fromstring("*x**y") == as_deref(x) * as_deref(y) + + assert fromstring("x == y") == as_eq(x, y) + assert fromstring("x != y") == as_ne(x, y) + assert fromstring("x < y") == as_lt(x, y) + assert fromstring("x > y") == as_gt(x, y) + assert fromstring("x <= y") == as_le(x, y) + assert fromstring("x >= y") == as_ge(x, y) + + assert fromstring("x .eq. y", language=Language.Fortran) == as_eq(x, y) + assert fromstring("x .ne. y", language=Language.Fortran) == as_ne(x, y) + assert fromstring("x .lt. y", language=Language.Fortran) == as_lt(x, y) + assert fromstring("x .gt. y", language=Language.Fortran) == as_gt(x, y) + assert fromstring("x .le. y", language=Language.Fortran) == as_le(x, y) + assert fromstring("x .ge. y", language=Language.Fortran) == as_ge(x, y) + + def test_traverse(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + f = as_symbol("f") + + # Use traverse to substitute a symbol + def replace_visit(s, r=z): + if s == x: + return r + + assert x.traverse(replace_visit) == z + assert y.traverse(replace_visit) == y + assert z.traverse(replace_visit) == z + assert (f(y)).traverse(replace_visit) == f(y) + assert (f(x)).traverse(replace_visit) == f(z) + assert (f[y]).traverse(replace_visit) == f[y] + assert (f[z]).traverse(replace_visit) == f[z] + assert (x + y + z).traverse(replace_visit) == (2 * z + y) + assert (x + + f(y, x - z)).traverse(replace_visit) == (z + + f(y, as_number(0))) + assert as_eq(x, y).traverse(replace_visit) == as_eq(z, y) + + # Use traverse to collect symbols, method 1 + function_symbols = set() + symbols = set() + + def collect_symbols(s): + if s.op is Op.APPLY: + oper = s.data[0] + function_symbols.add(oper) + if oper in symbols: + symbols.remove(oper) + elif s.op is Op.SYMBOL and s not in function_symbols: + symbols.add(s) + + (x + f(y, x - z)).traverse(collect_symbols) + assert function_symbols == {f} + assert symbols == {x, y, z} + + # Use traverse to collect symbols, method 2 + def collect_symbols2(expr, symbols): + if expr.op is Op.SYMBOL: + symbols.add(expr) + + symbols = set() + (x + f(y, x - z)).traverse(collect_symbols2, symbols) + assert symbols == {x, y, z, f} + + # Use traverse to partially collect symbols + def collect_symbols3(expr, symbols): + if expr.op is Op.APPLY: + # skip traversing function calls + return expr + if expr.op is Op.SYMBOL: + symbols.add(expr) + + symbols = set() + (x + f(y, x - z)).traverse(collect_symbols3, symbols) + assert symbols == {x} + + def test_linear_solve(self): + x = as_symbol("x") + y = as_symbol("y") + z = as_symbol("z") + + assert x.linear_solve(x) == (as_number(1), as_number(0)) + assert (x + 1).linear_solve(x) == (as_number(1), as_number(1)) + assert (2 * x).linear_solve(x) == (as_number(2), as_number(0)) + assert (2 * x + 3).linear_solve(x) == (as_number(2), as_number(3)) + assert as_number(3).linear_solve(x) == (as_number(0), as_number(3)) + assert y.linear_solve(x) == (as_number(0), y) + assert (y * z).linear_solve(x) == (as_number(0), y * z) + + assert (x + y).linear_solve(x) == (as_number(1), y) + assert (z * x + y).linear_solve(x) == (z, y) + assert ((z + y) * x + y).linear_solve(x) == (z + y, y) + assert (z * y * x + y).linear_solve(x) == (z * y, y) + + pytest.raises(RuntimeError, lambda: (x * x).linear_solve(x)) + + def test_as_numer_denom(self): + x = as_symbol("x") + y = as_symbol("y") + n = as_number(123) + + assert as_numer_denom(x) == (x, as_number(1)) + assert as_numer_denom(x / n) == (x, n) + assert as_numer_denom(n / x) == (n, x) + assert as_numer_denom(x / y) == (x, y) + assert as_numer_denom(x * y) == (x * y, as_number(1)) + assert as_numer_denom(n + x / y) == (x + n * y, y) + assert as_numer_denom(n + x / (y - x / n)) == (y * n**2, y * n - x) + + def test_polynomial_atoms(self): + x = as_symbol("x") + y = as_symbol("y") + n = as_number(123) + + assert x.polynomial_atoms() == {x} + assert n.polynomial_atoms() == set() + assert (y[x]).polynomial_atoms() == {y[x]} + assert (y(x)).polynomial_atoms() == {y(x)} + assert (y(x) + x).polynomial_atoms() == {y(x), x} + assert (y(x) * x[y]).polynomial_atoms() == {y(x), x[y]} + assert (y(x)**x).polynomial_atoms() == {y(x)} diff --git a/python/numpy/f2py/tests/test_value_attrspec.py b/python/numpy/f2py/tests/test_value_attrspec.py new file mode 100644 index 000000000..1afae08bf --- /dev/null +++ b/python/numpy/f2py/tests/test_value_attrspec.py @@ -0,0 +1,15 @@ +import pytest + +from . import util + + +class TestValueAttr(util.F2PyTest): + sources = [util.getpath("tests", "src", "value_attrspec", "gh21665.f90")] + + # gh-21665 + @pytest.mark.slow + def test_gh21665(self): + inp = 2 + out = self.module.fortfuncs.square(inp) + exp_out = 4 + assert out == exp_out diff --git a/python/numpy/f2py/tests/util.py b/python/numpy/f2py/tests/util.py new file mode 100644 index 000000000..35e5d3bd8 --- /dev/null +++ b/python/numpy/f2py/tests/util.py @@ -0,0 +1,442 @@ +""" +Utility functions for + +- building and importing modules on test time, using a temporary location +- detecting if compilers are present +- determining paths to tests + +""" +import atexit +import concurrent.futures +import contextlib +import glob +import os +import shutil +import subprocess +import sys +import tempfile +from importlib import import_module +from pathlib import Path + +import pytest + +import numpy +from numpy._utils import asunicode +from numpy.f2py._backends._meson import MesonBackend +from numpy.testing import IS_WASM, temppath + +# +# Check if compilers are available at all... +# + +def check_language(lang, code_snippet=None): + if sys.platform == "win32": + pytest.skip("No Fortran tests on Windows (Issue #25134)", allow_module_level=True) + tmpdir = tempfile.mkdtemp() + try: + meson_file = os.path.join(tmpdir, "meson.build") + with open(meson_file, "w") as f: + f.write("project('check_compilers')\n") + f.write(f"add_languages('{lang}')\n") + if code_snippet: + f.write(f"{lang}_compiler = meson.get_compiler('{lang}')\n") + f.write(f"{lang}_code = '''{code_snippet}'''\n") + f.write( + f"_have_{lang}_feature =" + f"{lang}_compiler.compiles({lang}_code," + f" name: '{lang} feature check')\n" + ) + try: + runmeson = subprocess.run( + ["meson", "setup", "btmp"], + check=False, + cwd=tmpdir, + capture_output=True, + ) + except subprocess.CalledProcessError: + pytest.skip("meson not present, skipping compiler dependent test", allow_module_level=True) + return runmeson.returncode == 0 + finally: + shutil.rmtree(tmpdir) + + +fortran77_code = ''' +C Example Fortran 77 code + PROGRAM HELLO + PRINT *, 'Hello, Fortran 77!' + END +''' + +fortran90_code = ''' +! Example Fortran 90 code +program hello90 + type :: greeting + character(len=20) :: text + end type greeting + + type(greeting) :: greet + greet%text = 'hello, fortran 90!' + print *, greet%text +end program hello90 +''' + +# Dummy class for caching relevant checks +class CompilerChecker: + def __init__(self): + self.compilers_checked = False + self.has_c = False + self.has_f77 = False + self.has_f90 = False + + def check_compilers(self): + if (not self.compilers_checked) and (not sys.platform == "cygwin"): + with concurrent.futures.ThreadPoolExecutor() as executor: + futures = [ + executor.submit(check_language, "c"), + executor.submit(check_language, "fortran", fortran77_code), + executor.submit(check_language, "fortran", fortran90_code) + ] + + self.has_c = futures[0].result() + self.has_f77 = futures[1].result() + self.has_f90 = futures[2].result() + + self.compilers_checked = True + + +if not IS_WASM: + checker = CompilerChecker() + checker.check_compilers() + +def has_c_compiler(): + return checker.has_c + +def has_f77_compiler(): + return checker.has_f77 + +def has_f90_compiler(): + return checker.has_f90 + +def has_fortran_compiler(): + return (checker.has_f90 and checker.has_f77) + + +# +# Maintaining a temporary module directory +# + +_module_dir = None +_module_num = 5403 + +if sys.platform == "cygwin": + NUMPY_INSTALL_ROOT = Path(__file__).parent.parent.parent + _module_list = list(NUMPY_INSTALL_ROOT.glob("**/*.dll")) + + +def _cleanup(): + global _module_dir + if _module_dir is not None: + try: + sys.path.remove(_module_dir) + except ValueError: + pass + try: + shutil.rmtree(_module_dir) + except OSError: + pass + _module_dir = None + + +def get_module_dir(): + global _module_dir + if _module_dir is None: + _module_dir = tempfile.mkdtemp() + atexit.register(_cleanup) + if _module_dir not in sys.path: + sys.path.insert(0, _module_dir) + return _module_dir + + +def get_temp_module_name(): + # Assume single-threaded, and the module dir usable only by this thread + global _module_num + get_module_dir() + name = "_test_ext_module_%d" % _module_num + _module_num += 1 + if name in sys.modules: + # this should not be possible, but check anyway + raise RuntimeError("Temporary module name already in use.") + return name + + +def _memoize(func): + memo = {} + + def wrapper(*a, **kw): + key = repr((a, kw)) + if key not in memo: + try: + memo[key] = func(*a, **kw) + except Exception as e: + memo[key] = e + raise + ret = memo[key] + if isinstance(ret, Exception): + raise ret + return ret + + wrapper.__name__ = func.__name__ + return wrapper + + +# +# Building modules +# + + +@_memoize +def build_module(source_files, options=[], skip=[], only=[], module_name=None): + """ + Compile and import a f2py module, built from the given files. + + """ + + code = f"import sys; sys.path = {sys.path!r}; import numpy.f2py; numpy.f2py.main()" + + d = get_module_dir() + # gh-27045 : Skip if no compilers are found + if not has_fortran_compiler(): + pytest.skip("No Fortran compiler available") + + # Copy files + dst_sources = [] + f2py_sources = [] + for fn in source_files: + if not os.path.isfile(fn): + raise RuntimeError(f"{fn} is not a file") + dst = os.path.join(d, os.path.basename(fn)) + shutil.copyfile(fn, dst) + dst_sources.append(dst) + + base, ext = os.path.splitext(dst) + if ext in (".f90", ".f95", ".f", ".c", ".pyf"): + f2py_sources.append(dst) + + assert f2py_sources + + # Prepare options + if module_name is None: + module_name = get_temp_module_name() + gil_options = [] + if '--freethreading-compatible' not in options and '--no-freethreading-compatible' not in options: + # default to disabling the GIL if unset in options + gil_options = ['--freethreading-compatible'] + f2py_opts = ["-c", "-m", module_name] + options + gil_options + f2py_sources + f2py_opts += ["--backend", "meson"] + if skip: + f2py_opts += ["skip:"] + skip + if only: + f2py_opts += ["only:"] + only + + # Build + cwd = os.getcwd() + try: + os.chdir(d) + cmd = [sys.executable, "-c", code] + f2py_opts + p = subprocess.Popen(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + out, err = p.communicate() + if p.returncode != 0: + raise RuntimeError(f"Running f2py failed: {cmd[4:]}\n{asunicode(out)}") + finally: + os.chdir(cwd) + + # Partial cleanup + for fn in dst_sources: + os.unlink(fn) + + # Rebase (Cygwin-only) + if sys.platform == "cygwin": + # If someone starts deleting modules after import, this will + # need to change to record how big each module is, rather than + # relying on rebase being able to find that from the files. + _module_list.extend( + glob.glob(os.path.join(d, f"{module_name:s}*")) + ) + subprocess.check_call( + ["/usr/bin/rebase", "--database", "--oblivious", "--verbose"] + + _module_list + ) + + # Import + return import_module(module_name) + + +@_memoize +def build_code(source_code, + options=[], + skip=[], + only=[], + suffix=None, + module_name=None): + """ + Compile and import Fortran code using f2py. + + """ + if suffix is None: + suffix = ".f" + with temppath(suffix=suffix) as path: + with open(path, "w") as f: + f.write(source_code) + return build_module([path], + options=options, + skip=skip, + only=only, + module_name=module_name) + + +# +# Building with meson +# + + +class SimplifiedMesonBackend(MesonBackend): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def compile(self): + self.write_meson_build(self.build_dir) + self.run_meson(self.build_dir) + + +def build_meson(source_files, module_name=None, **kwargs): + """ + Build a module via Meson and import it. + """ + + # gh-27045 : Skip if no compilers are found + if not has_fortran_compiler(): + pytest.skip("No Fortran compiler available") + + build_dir = get_module_dir() + if module_name is None: + module_name = get_temp_module_name() + + # Initialize the MesonBackend + backend = SimplifiedMesonBackend( + modulename=module_name, + sources=source_files, + extra_objects=kwargs.get("extra_objects", []), + build_dir=build_dir, + include_dirs=kwargs.get("include_dirs", []), + library_dirs=kwargs.get("library_dirs", []), + libraries=kwargs.get("libraries", []), + define_macros=kwargs.get("define_macros", []), + undef_macros=kwargs.get("undef_macros", []), + f2py_flags=kwargs.get("f2py_flags", []), + sysinfo_flags=kwargs.get("sysinfo_flags", []), + fc_flags=kwargs.get("fc_flags", []), + flib_flags=kwargs.get("flib_flags", []), + setup_flags=kwargs.get("setup_flags", []), + remove_build_dir=kwargs.get("remove_build_dir", False), + extra_dat=kwargs.get("extra_dat", {}), + ) + + backend.compile() + + # Import the compiled module + sys.path.insert(0, f"{build_dir}/{backend.meson_build_dir}") + return import_module(module_name) + + +# +# Unittest convenience +# + + +class F2PyTest: + code = None + sources = None + options = [] + skip = [] + only = [] + suffix = ".f" + module = None + _has_c_compiler = None + _has_f77_compiler = None + _has_f90_compiler = None + + @property + def module_name(self): + cls = type(self) + return f'_{cls.__module__.rsplit(".", 1)[-1]}_{cls.__name__}_ext_module' + + @classmethod + def setup_class(cls): + if sys.platform == "win32": + pytest.skip("Fails with MinGW64 Gfortran (Issue #9673)") + F2PyTest._has_c_compiler = has_c_compiler() + F2PyTest._has_f77_compiler = has_f77_compiler() + F2PyTest._has_f90_compiler = has_f90_compiler() + F2PyTest._has_fortran_compiler = has_fortran_compiler() + + def setup_method(self): + if self.module is not None: + return + + codes = self.sources or [] + if self.code: + codes.append(self.suffix) + + needs_f77 = any(str(fn).endswith(".f") for fn in codes) + needs_f90 = any(str(fn).endswith(".f90") for fn in codes) + needs_pyf = any(str(fn).endswith(".pyf") for fn in codes) + + if needs_f77 and not self._has_f77_compiler: + pytest.skip("No Fortran 77 compiler available") + if needs_f90 and not self._has_f90_compiler: + pytest.skip("No Fortran 90 compiler available") + if needs_pyf and not self._has_fortran_compiler: + pytest.skip("No Fortran compiler available") + + # Build the module + if self.code is not None: + self.module = build_code( + self.code, + options=self.options, + skip=self.skip, + only=self.only, + suffix=self.suffix, + module_name=self.module_name, + ) + + if self.sources is not None: + self.module = build_module( + self.sources, + options=self.options, + skip=self.skip, + only=self.only, + module_name=self.module_name, + ) + + +# +# Helper functions +# + + +def getpath(*a): + # Package root + d = Path(numpy.f2py.__file__).parent.resolve() + return d.joinpath(*a) + + +@contextlib.contextmanager +def switchdir(path): + curpath = Path.cwd() + os.chdir(path) + try: + yield + finally: + os.chdir(curpath) diff --git a/python/numpy/f2py/use_rules.py b/python/numpy/f2py/use_rules.py new file mode 100644 index 000000000..1e06f6c01 --- /dev/null +++ b/python/numpy/f2py/use_rules.py @@ -0,0 +1,99 @@ +""" +Build 'use others module data' mechanism for f2py2e. + +Copyright 1999 -- 2011 Pearu Peterson all rights reserved. +Copyright 2011 -- present NumPy Developers. +Permission to use, modify, and distribute this software is given under the +terms of the NumPy License. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +""" +__version__ = "$Revision: 1.3 $"[10:-1] + +f2py_version = 'See `f2py -v`' + + +from .auxfuncs import applyrules, dictappend, gentitle, hasnote, outmess + +usemodule_rules = { + 'body': """ +#begintitle# +static char doc_#apiname#[] = \"\\\nVariable wrapper signature:\\n\\ +\t #name# = get_#name#()\\n\\ +Arguments:\\n\\ +#docstr#\"; +extern F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#); +static PyObject *#apiname#(PyObject *capi_self, PyObject *capi_args) { +/*#decl#*/ +\tif (!PyArg_ParseTuple(capi_args, \"\")) goto capi_fail; +printf(\"c: %d\\n\",F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#)); +\treturn Py_BuildValue(\"\"); +capi_fail: +\treturn NULL; +} +""", + 'method': '\t{\"get_#name#\",#apiname#,METH_VARARGS|METH_KEYWORDS,doc_#apiname#},', + 'need': ['F_MODFUNC'] +} + +################ + + +def buildusevars(m, r): + ret = {} + outmess( + f"\t\tBuilding use variable hooks for module \"{m['name']}\" (feature only for F90/F95)...\n") + varsmap = {} + revmap = {} + if 'map' in r: + for k in r['map'].keys(): + if r['map'][k] in revmap: + outmess('\t\t\tVariable "%s<=%s" is already mapped by "%s". Skipping.\n' % ( + r['map'][k], k, revmap[r['map'][k]])) + else: + revmap[r['map'][k]] = k + if r.get('only'): + for v in r['map'].keys(): + if r['map'][v] in m['vars']: + + if revmap[r['map'][v]] == v: + varsmap[v] = r['map'][v] + else: + outmess(f"\t\t\tIgnoring map \"{v}=>{r['map'][v]}\". See above.\n") + else: + outmess( + f"\t\t\tNo definition for variable \"{v}=>{r['map'][v]}\". Skipping.\n") + else: + for v in m['vars'].keys(): + varsmap[v] = revmap.get(v, v) + for v in varsmap.keys(): + ret = dictappend(ret, buildusevar(v, varsmap[v], m['vars'], m['name'])) + return ret + + +def buildusevar(name, realname, vars, usemodulename): + outmess('\t\t\tConstructing wrapper function for variable "%s=>%s"...\n' % ( + name, realname)) + ret = {} + vrd = {'name': name, + 'realname': realname, + 'REALNAME': realname.upper(), + 'usemodulename': usemodulename, + 'USEMODULENAME': usemodulename.upper(), + 'texname': name.replace('_', '\\_'), + 'begintitle': gentitle(f'{name}=>{realname}'), + 'endtitle': gentitle(f'end of {name}=>{realname}'), + 'apiname': f'#modulename#_use_{realname}_from_{usemodulename}' + } + nummap = {0: 'Ro', 1: 'Ri', 2: 'Rii', 3: 'Riii', 4: 'Riv', + 5: 'Rv', 6: 'Rvi', 7: 'Rvii', 8: 'Rviii', 9: 'Rix'} + vrd['texnamename'] = name + for i in nummap.keys(): + vrd['texnamename'] = vrd['texnamename'].replace(repr(i), nummap[i]) + if hasnote(vars[realname]): + vrd['note'] = vars[realname]['note'] + rd = dictappend({}, vrd) + + print(name, realname, vars[realname]) + ret = applyrules(usemodule_rules, rd) + return ret diff --git a/python/numpy/f2py/use_rules.pyi b/python/numpy/f2py/use_rules.pyi new file mode 100644 index 000000000..58c7f9b5f --- /dev/null +++ b/python/numpy/f2py/use_rules.pyi @@ -0,0 +1,9 @@ +from collections.abc import Mapping +from typing import Any, Final + +__version__: Final[str] = ... +f2py_version: Final = "See `f2py -v`" +usemodule_rules: Final[dict[str, str | list[str]]] = ... + +def buildusevars(m: Mapping[str, object], r: Mapping[str, Mapping[str, object]]) -> dict[str, Any]: ... +def buildusevar(name: str, realname: str, vars: Mapping[str, Mapping[str, object]], usemodulename: str) -> dict[str, Any]: ... diff --git a/python/numpy/fft/__init__.py b/python/numpy/fft/__init__.py new file mode 100644 index 000000000..55f7320f6 --- /dev/null +++ b/python/numpy/fft/__init__.py @@ -0,0 +1,215 @@ +""" +Discrete Fourier Transform +========================== + +.. currentmodule:: numpy.fft + +The SciPy module `scipy.fft` is a more comprehensive superset +of `numpy.fft`, which includes only a basic set of routines. + +Standard FFTs +------------- + +.. autosummary:: + :toctree: generated/ + + fft Discrete Fourier transform. + ifft Inverse discrete Fourier transform. + fft2 Discrete Fourier transform in two dimensions. + ifft2 Inverse discrete Fourier transform in two dimensions. + fftn Discrete Fourier transform in N-dimensions. + ifftn Inverse discrete Fourier transform in N dimensions. + +Real FFTs +--------- + +.. autosummary:: + :toctree: generated/ + + rfft Real discrete Fourier transform. + irfft Inverse real discrete Fourier transform. + rfft2 Real discrete Fourier transform in two dimensions. + irfft2 Inverse real discrete Fourier transform in two dimensions. + rfftn Real discrete Fourier transform in N dimensions. + irfftn Inverse real discrete Fourier transform in N dimensions. + +Hermitian FFTs +-------------- + +.. autosummary:: + :toctree: generated/ + + hfft Hermitian discrete Fourier transform. + ihfft Inverse Hermitian discrete Fourier transform. + +Helper routines +--------------- + +.. autosummary:: + :toctree: generated/ + + fftfreq Discrete Fourier Transform sample frequencies. + rfftfreq DFT sample frequencies (for usage with rfft, irfft). + fftshift Shift zero-frequency component to center of spectrum. + ifftshift Inverse of fftshift. + + +Background information +---------------------- + +Fourier analysis is fundamentally a method for expressing a function as a +sum of periodic components, and for recovering the function from those +components. When both the function and its Fourier transform are +replaced with discretized counterparts, it is called the discrete Fourier +transform (DFT). The DFT has become a mainstay of numerical computing in +part because of a very fast algorithm for computing it, called the Fast +Fourier Transform (FFT), which was known to Gauss (1805) and was brought +to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_ +provide an accessible introduction to Fourier analysis and its +applications. + +Because the discrete Fourier transform separates its input into +components that contribute at discrete frequencies, it has a great number +of applications in digital signal processing, e.g., for filtering, and in +this context the discretized input to the transform is customarily +referred to as a *signal*, which exists in the *time domain*. The output +is called a *spectrum* or *transform* and exists in the *frequency +domain*. + +Implementation details +---------------------- + +There are many ways to define the DFT, varying in the sign of the +exponent, normalization, etc. In this implementation, the DFT is defined +as + +.. math:: + A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\} + \\qquad k = 0,\\ldots,n-1. + +The DFT is in general defined for complex inputs and outputs, and a +single-frequency component at linear frequency :math:`f` is +represented by a complex exponential +:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t` +is the sampling interval. + +The values in the result follow so-called "standard" order: If ``A = +fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the sum of +the signal), which is always purely real for real inputs. Then ``A[1:n/2]`` +contains the positive-frequency terms, and ``A[n/2+1:]`` contains the +negative-frequency terms, in order of decreasingly negative frequency. +For an even number of input points, ``A[n/2]`` represents both positive and +negative Nyquist frequency, and is also purely real for real input. For +an odd number of input points, ``A[(n-1)/2]`` contains the largest positive +frequency, while ``A[(n+1)/2]`` contains the largest negative frequency. +The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies +of corresponding elements in the output. The routine +``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the +zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes +that shift. + +When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)`` +is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum. +The phase spectrum is obtained by ``np.angle(A)``. + +The inverse DFT is defined as + +.. math:: + a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\} + \\qquad m = 0,\\ldots,n-1. + +It differs from the forward transform by the sign of the exponential +argument and the default normalization by :math:`1/n`. + +Type Promotion +-------------- + +`numpy.fft` promotes ``float32`` and ``complex64`` arrays to ``float64`` and +``complex128`` arrays respectively. For an FFT implementation that does not +promote input arrays, see `scipy.fftpack`. + +Normalization +------------- + +The argument ``norm`` indicates which direction of the pair of direct/inverse +transforms is scaled and with what normalization factor. +The default normalization (``"backward"``) has the direct (forward) transforms +unscaled and the inverse (backward) transforms scaled by :math:`1/n`. It is +possible to obtain unitary transforms by setting the keyword argument ``norm`` +to ``"ortho"`` so that both direct and inverse transforms are scaled by +:math:`1/\\sqrt{n}`. Finally, setting the keyword argument ``norm`` to +``"forward"`` has the direct transforms scaled by :math:`1/n` and the inverse +transforms unscaled (i.e. exactly opposite to the default ``"backward"``). +`None` is an alias of the default option ``"backward"`` for backward +compatibility. + +Real and Hermitian transforms +----------------------------- + +When the input is purely real, its transform is Hermitian, i.e., the +component at frequency :math:`f_k` is the complex conjugate of the +component at frequency :math:`-f_k`, which means that for real +inputs there is no information in the negative frequency components that +is not already available from the positive frequency components. +The family of `rfft` functions is +designed to operate on real inputs, and exploits this symmetry by +computing only the positive frequency components, up to and including the +Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex +output points. The inverses of this family assumes the same symmetry of +its input, and for an output of ``n`` points uses ``n/2+1`` input points. + +Correspondingly, when the spectrum is purely real, the signal is +Hermitian. The `hfft` family of functions exploits this symmetry by +using ``n/2+1`` complex points in the input (time) domain for ``n`` real +points in the frequency domain. + +In higher dimensions, FFTs are used, e.g., for image analysis and +filtering. The computational efficiency of the FFT means that it can +also be a faster way to compute large convolutions, using the property +that a convolution in the time domain is equivalent to a point-by-point +multiplication in the frequency domain. + +Higher dimensions +----------------- + +In two dimensions, the DFT is defined as + +.. math:: + A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1} + a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\} + \\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1, + +which extends in the obvious way to higher dimensions, and the inverses +in higher dimensions also extend in the same way. + +References +---------- + +.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the + machine calculation of complex Fourier series," *Math. Comput.* + 19: 297-301. + +.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P., + 2007, *Numerical Recipes: The Art of Scientific Computing*, ch. + 12-13. Cambridge Univ. Press, Cambridge, UK. + +Examples +-------- + +For examples, see the various functions. + +""" + +# TODO: `numpy.fft.helper`` was deprecated in NumPy 2.0. It should +# be deleted once downstream libraries move to `numpy.fft`. +from . import _helper, _pocketfft, helper +from ._helper import * +from ._pocketfft import * + +__all__ = _pocketfft.__all__.copy() # noqa: PLE0605 +__all__ += _helper.__all__ + +from numpy._pytesttester import PytestTester + +test = PytestTester(__name__) +del PytestTester diff --git a/python/numpy/fft/__init__.pyi b/python/numpy/fft/__init__.pyi new file mode 100644 index 000000000..54d0ea8c7 --- /dev/null +++ b/python/numpy/fft/__init__.pyi @@ -0,0 +1,43 @@ +from ._helper import ( + fftfreq, + fftshift, + ifftshift, + rfftfreq, +) +from ._pocketfft import ( + fft, + fft2, + fftn, + hfft, + ifft, + ifft2, + ifftn, + ihfft, + irfft, + irfft2, + irfftn, + rfft, + rfft2, + rfftn, +) + +__all__ = [ + "fft", + "ifft", + "rfft", + "irfft", + "hfft", + "ihfft", + "rfftn", + "irfftn", + "rfft2", + "irfft2", + "fft2", + "ifft2", + "fftn", + "ifftn", + "fftshift", + "ifftshift", + "fftfreq", + "rfftfreq", +] diff --git a/python/numpy/fft/__pycache__/__init__.cpython-312.pyc b/python/numpy/fft/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..dbfdbf612 Binary files /dev/null and b/python/numpy/fft/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/fft/__pycache__/_helper.cpython-312.pyc b/python/numpy/fft/__pycache__/_helper.cpython-312.pyc new file mode 100644 index 000000000..8d0feb850 Binary files /dev/null and b/python/numpy/fft/__pycache__/_helper.cpython-312.pyc differ diff --git a/python/numpy/fft/__pycache__/_pocketfft.cpython-312.pyc b/python/numpy/fft/__pycache__/_pocketfft.cpython-312.pyc new file mode 100644 index 000000000..4f068ad38 Binary files /dev/null and b/python/numpy/fft/__pycache__/_pocketfft.cpython-312.pyc differ diff --git a/python/numpy/fft/__pycache__/helper.cpython-312.pyc b/python/numpy/fft/__pycache__/helper.cpython-312.pyc new file mode 100644 index 000000000..04545ff5d Binary files /dev/null and b/python/numpy/fft/__pycache__/helper.cpython-312.pyc differ diff --git a/python/numpy/fft/_helper.py b/python/numpy/fft/_helper.py new file mode 100644 index 000000000..77adeac92 --- /dev/null +++ b/python/numpy/fft/_helper.py @@ -0,0 +1,235 @@ +""" +Discrete Fourier Transforms - _helper.py + +""" +from numpy._core import arange, asarray, empty, integer, roll +from numpy._core.overrides import array_function_dispatch, set_module + +# Created by Pearu Peterson, September 2002 + +__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq'] + +integer_types = (int, integer) + + +def _fftshift_dispatcher(x, axes=None): + return (x,) + + +@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft') +def fftshift(x, axes=None): + """ + Shift the zero-frequency component to the center of the spectrum. + + This function swaps half-spaces for all axes listed (defaults to all). + Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even. + + Parameters + ---------- + x : array_like + Input array. + axes : int or shape tuple, optional + Axes over which to shift. Default is None, which shifts all axes. + + Returns + ------- + y : ndarray + The shifted array. + + See Also + -------- + ifftshift : The inverse of `fftshift`. + + Examples + -------- + >>> import numpy as np + >>> freqs = np.fft.fftfreq(10, 0.1) + >>> freqs + array([ 0., 1., 2., ..., -3., -2., -1.]) + >>> np.fft.fftshift(freqs) + array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.]) + + Shift the zero-frequency component only along the second axis: + + >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) + >>> freqs + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + >>> np.fft.fftshift(freqs, axes=(1,)) + array([[ 2., 0., 1.], + [-4., 3., 4.], + [-1., -3., -2.]]) + + """ + x = asarray(x) + if axes is None: + axes = tuple(range(x.ndim)) + shift = [dim // 2 for dim in x.shape] + elif isinstance(axes, integer_types): + shift = x.shape[axes] // 2 + else: + shift = [x.shape[ax] // 2 for ax in axes] + + return roll(x, shift, axes) + + +@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft') +def ifftshift(x, axes=None): + """ + The inverse of `fftshift`. Although identical for even-length `x`, the + functions differ by one sample for odd-length `x`. + + Parameters + ---------- + x : array_like + Input array. + axes : int or shape tuple, optional + Axes over which to calculate. Defaults to None, which shifts all axes. + + Returns + ------- + y : ndarray + The shifted array. + + See Also + -------- + fftshift : Shift zero-frequency component to the center of the spectrum. + + Examples + -------- + >>> import numpy as np + >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) + >>> freqs + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + >>> np.fft.ifftshift(np.fft.fftshift(freqs)) + array([[ 0., 1., 2.], + [ 3., 4., -4.], + [-3., -2., -1.]]) + + """ + x = asarray(x) + if axes is None: + axes = tuple(range(x.ndim)) + shift = [-(dim // 2) for dim in x.shape] + elif isinstance(axes, integer_types): + shift = -(x.shape[axes] // 2) + else: + shift = [-(x.shape[ax] // 2) for ax in axes] + + return roll(x, shift, axes) + + +@set_module('numpy.fft') +def fftfreq(n, d=1.0, device=None): + """ + Return the Discrete Fourier Transform sample frequencies. + + The returned float array `f` contains the frequency bin centers in cycles + per unit of the sample spacing (with zero at the start). For instance, if + the sample spacing is in seconds, then the frequency unit is cycles/second. + + Given a window length `n` and a sample spacing `d`:: + + f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even + f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd + + Parameters + ---------- + n : int + Window length. + d : scalar, optional + Sample spacing (inverse of the sampling rate). Defaults to 1. + device : str, optional + The device on which to place the created array. Default: ``None``. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + + Returns + ------- + f : ndarray + Array of length `n` containing the sample frequencies. + + Examples + -------- + >>> import numpy as np + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) + >>> fourier = np.fft.fft(signal) + >>> n = signal.size + >>> timestep = 0.1 + >>> freq = np.fft.fftfreq(n, d=timestep) + >>> freq + array([ 0. , 1.25, 2.5 , ..., -3.75, -2.5 , -1.25]) + + """ + if not isinstance(n, integer_types): + raise ValueError("n should be an integer") + val = 1.0 / (n * d) + results = empty(n, int, device=device) + N = (n - 1) // 2 + 1 + p1 = arange(0, N, dtype=int, device=device) + results[:N] = p1 + p2 = arange(-(n // 2), 0, dtype=int, device=device) + results[N:] = p2 + return results * val + + +@set_module('numpy.fft') +def rfftfreq(n, d=1.0, device=None): + """ + Return the Discrete Fourier Transform sample frequencies + (for usage with rfft, irfft). + + The returned float array `f` contains the frequency bin centers in cycles + per unit of the sample spacing (with zero at the start). For instance, if + the sample spacing is in seconds, then the frequency unit is cycles/second. + + Given a window length `n` and a sample spacing `d`:: + + f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even + f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd + + Unlike `fftfreq` (but like `scipy.fftpack.rfftfreq`) + the Nyquist frequency component is considered to be positive. + + Parameters + ---------- + n : int + Window length. + d : scalar, optional + Sample spacing (inverse of the sampling rate). Defaults to 1. + device : str, optional + The device on which to place the created array. Default: ``None``. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + + Returns + ------- + f : ndarray + Array of length ``n//2 + 1`` containing the sample frequencies. + + Examples + -------- + >>> import numpy as np + >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float) + >>> fourier = np.fft.rfft(signal) + >>> n = signal.size + >>> sample_rate = 100 + >>> freq = np.fft.fftfreq(n, d=1./sample_rate) + >>> freq + array([ 0., 10., 20., ..., -30., -20., -10.]) + >>> freq = np.fft.rfftfreq(n, d=1./sample_rate) + >>> freq + array([ 0., 10., 20., 30., 40., 50.]) + + """ + if not isinstance(n, integer_types): + raise ValueError("n should be an integer") + val = 1.0 / (n * d) + N = n // 2 + 1 + results = arange(0, N, dtype=int, device=device) + return results * val diff --git a/python/numpy/fft/_helper.pyi b/python/numpy/fft/_helper.pyi new file mode 100644 index 000000000..d06bda7ad --- /dev/null +++ b/python/numpy/fft/_helper.pyi @@ -0,0 +1,45 @@ +from typing import Any, Final, TypeVar, overload +from typing import Literal as L + +from numpy import complexfloating, floating, generic, integer +from numpy._typing import ( + ArrayLike, + NDArray, + _ArrayLike, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ShapeLike, +) + +__all__ = ["fftfreq", "fftshift", "ifftshift", "rfftfreq"] + +_ScalarT = TypeVar("_ScalarT", bound=generic) + +### + +integer_types: Final[tuple[type[int], type[integer]]] = ... + +### + +@overload +def fftshift(x: _ArrayLike[_ScalarT], axes: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... +@overload +def fftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... + +# +@overload +def ifftshift(x: _ArrayLike[_ScalarT], axes: _ShapeLike | None = None) -> NDArray[_ScalarT]: ... +@overload +def ifftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... + +# +@overload +def fftfreq(n: int | integer, d: _ArrayLikeFloat_co = 1.0, device: L["cpu"] | None = None) -> NDArray[floating]: ... +@overload +def fftfreq(n: int | integer, d: _ArrayLikeComplex_co = 1.0, device: L["cpu"] | None = None) -> NDArray[complexfloating]: ... + +# +@overload +def rfftfreq(n: int | integer, d: _ArrayLikeFloat_co = 1.0, device: L["cpu"] | None = None) -> NDArray[floating]: ... +@overload +def rfftfreq(n: int | integer, d: _ArrayLikeComplex_co = 1.0, device: L["cpu"] | None = None) -> NDArray[complexfloating]: ... diff --git a/python/numpy/fft/_pocketfft.py b/python/numpy/fft/_pocketfft.py new file mode 100644 index 000000000..c7f2f6a8b --- /dev/null +++ b/python/numpy/fft/_pocketfft.py @@ -0,0 +1,1693 @@ +""" +Discrete Fourier Transforms + +Routines in this module: + +fft(a, n=None, axis=-1, norm="backward") +ifft(a, n=None, axis=-1, norm="backward") +rfft(a, n=None, axis=-1, norm="backward") +irfft(a, n=None, axis=-1, norm="backward") +hfft(a, n=None, axis=-1, norm="backward") +ihfft(a, n=None, axis=-1, norm="backward") +fftn(a, s=None, axes=None, norm="backward") +ifftn(a, s=None, axes=None, norm="backward") +rfftn(a, s=None, axes=None, norm="backward") +irfftn(a, s=None, axes=None, norm="backward") +fft2(a, s=None, axes=(-2,-1), norm="backward") +ifft2(a, s=None, axes=(-2, -1), norm="backward") +rfft2(a, s=None, axes=(-2,-1), norm="backward") +irfft2(a, s=None, axes=(-2, -1), norm="backward") + +i = inverse transform +r = transform of purely real data +h = Hermite transform +n = n-dimensional transform +2 = 2-dimensional transform +(Note: 2D routines are just nD routines with different default +behavior.) + +""" +__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn', + 'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn'] + +import functools +import warnings + +from numpy._core import ( + asarray, + conjugate, + empty_like, + overrides, + reciprocal, + result_type, + sqrt, + take, +) +from numpy.lib.array_utils import normalize_axis_index + +from . import _pocketfft_umath as pfu + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy.fft') + + +# `inv_norm` is a float by which the result of the transform needs to be +# divided. This replaces the original, more intuitive 'fct` parameter to avoid +# divisions by zero (or alternatively additional checks) in the case of +# zero-length axes during its computation. +def _raw_fft(a, n, axis, is_real, is_forward, norm, out=None): + if n < 1: + raise ValueError(f"Invalid number of FFT data points ({n}) specified.") + + # Calculate the normalization factor, passing in the array dtype to + # avoid precision loss in the possible sqrt or reciprocal. + if not is_forward: + norm = _swap_direction(norm) + + real_dtype = result_type(a.real.dtype, 1.0) + if norm is None or norm == "backward": + fct = 1 + elif norm == "ortho": + fct = reciprocal(sqrt(n, dtype=real_dtype)) + elif norm == "forward": + fct = reciprocal(n, dtype=real_dtype) + else: + raise ValueError(f'Invalid norm value {norm}; should be "backward",' + '"ortho" or "forward".') + + n_out = n + if is_real: + if is_forward: + ufunc = pfu.rfft_n_even if n % 2 == 0 else pfu.rfft_n_odd + n_out = n // 2 + 1 + else: + ufunc = pfu.irfft + else: + ufunc = pfu.fft if is_forward else pfu.ifft + + axis = normalize_axis_index(axis, a.ndim) + + if out is None: + if is_real and not is_forward: # irfft, complex in, real output. + out_dtype = real_dtype + else: # Others, complex output. + out_dtype = result_type(a.dtype, 1j) + out = empty_like(a, shape=a.shape[:axis] + (n_out,) + a.shape[axis + 1:], + dtype=out_dtype) + elif ((shape := getattr(out, "shape", None)) is not None + and (len(shape) != a.ndim or shape[axis] != n_out)): + raise ValueError("output array has wrong shape.") + + return ufunc(a, fct, axes=[(axis,), (), (axis,)], out=out) + + +_SWAP_DIRECTION_MAP = {"backward": "forward", None: "forward", + "ortho": "ortho", "forward": "backward"} + + +def _swap_direction(norm): + try: + return _SWAP_DIRECTION_MAP[norm] + except KeyError: + raise ValueError(f'Invalid norm value {norm}; should be "backward", ' + '"ortho" or "forward".') from None + + +def _fft_dispatcher(a, n=None, axis=None, norm=None, out=None): + return (a, out) + + +@array_function_dispatch(_fft_dispatcher) +def fft(a, n=None, axis=-1, norm=None, out=None): + """ + Compute the one-dimensional discrete Fourier Transform. + + This function computes the one-dimensional *n*-point discrete Fourier + Transform (DFT) with the efficient Fast Fourier Transform (FFT) + algorithm [CT]. + + Parameters + ---------- + a : array_like + Input array, can be complex. + n : int, optional + Length of the transformed axis of the output. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See Also + -------- + numpy.fft : for definition of the DFT and conventions used. + ifft : The inverse of `fft`. + fft2 : The two-dimensional FFT. + fftn : The *n*-dimensional FFT. + rfftn : The *n*-dimensional FFT of real input. + fftfreq : Frequency bins for given FFT parameters. + + Notes + ----- + FFT (Fast Fourier Transform) refers to a way the discrete Fourier + Transform (DFT) can be calculated efficiently, by using symmetries in the + calculated terms. The symmetry is highest when `n` is a power of 2, and + the transform is therefore most efficient for these sizes. + + The DFT is defined, with the conventions used in this implementation, in + the documentation for the `numpy.fft` module. + + References + ---------- + .. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the + machine calculation of complex Fourier series," *Math. Comput.* + 19: 297-301. + + Examples + -------- + >>> import numpy as np + >>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) + array([-2.33486982e-16+1.14423775e-17j, 8.00000000e+00-1.25557246e-15j, + 2.33486982e-16+2.33486982e-16j, 0.00000000e+00+1.22464680e-16j, + -1.14423775e-17+2.33486982e-16j, 0.00000000e+00+5.20784380e-16j, + 1.14423775e-17+1.14423775e-17j, 0.00000000e+00+1.22464680e-16j]) + + In this example, real input has an FFT which is Hermitian, i.e., symmetric + in the real part and anti-symmetric in the imaginary part, as described in + the `numpy.fft` documentation: + + >>> import matplotlib.pyplot as plt + >>> t = np.arange(256) + >>> sp = np.fft.fft(np.sin(t)) + >>> freq = np.fft.fftfreq(t.shape[-1]) + >>> _ = plt.plot(freq, sp.real, freq, sp.imag) + >>> plt.show() + + """ + a = asarray(a) + if n is None: + n = a.shape[axis] + output = _raw_fft(a, n, axis, False, True, norm, out) + return output + + +@array_function_dispatch(_fft_dispatcher) +def ifft(a, n=None, axis=-1, norm=None, out=None): + """ + Compute the one-dimensional inverse discrete Fourier Transform. + + This function computes the inverse of the one-dimensional *n*-point + discrete Fourier transform computed by `fft`. In other words, + ``ifft(fft(a)) == a`` to within numerical accuracy. + For a general description of the algorithm and definitions, + see `numpy.fft`. + + The input should be ordered in the same way as is returned by `fft`, + i.e., + + * ``a[0]`` should contain the zero frequency term, + * ``a[1:n//2]`` should contain the positive-frequency terms, + * ``a[n//2 + 1:]`` should contain the negative-frequency terms, in + increasing order starting from the most negative frequency. + + For an even number of input points, ``A[n//2]`` represents the sum of + the values at the positive and negative Nyquist frequencies, as the two + are aliased together. See `numpy.fft` for details. + + Parameters + ---------- + a : array_like + Input array, can be complex. + n : int, optional + Length of the transformed axis of the output. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + See notes about padding issues. + axis : int, optional + Axis over which to compute the inverse DFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See Also + -------- + numpy.fft : An introduction, with definitions and general explanations. + fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse + ifft2 : The two-dimensional inverse FFT. + ifftn : The n-dimensional inverse FFT. + + Notes + ----- + If the input parameter `n` is larger than the size of the input, the input + is padded by appending zeros at the end. Even though this is the common + approach, it might lead to surprising results. If a different padding is + desired, it must be performed before calling `ifft`. + + Examples + -------- + >>> import numpy as np + >>> np.fft.ifft([0, 4, 0, 0]) + array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) # may vary + + Create and plot a band-limited signal with random phases: + + >>> import matplotlib.pyplot as plt + >>> t = np.arange(400) + >>> n = np.zeros((400,), dtype=complex) + >>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,))) + >>> s = np.fft.ifft(n) + >>> plt.plot(t, s.real, label='real') + [] + >>> plt.plot(t, s.imag, '--', label='imaginary') + [] + >>> plt.legend() + + >>> plt.show() + + """ + a = asarray(a) + if n is None: + n = a.shape[axis] + output = _raw_fft(a, n, axis, False, False, norm, out=out) + return output + + +@array_function_dispatch(_fft_dispatcher) +def rfft(a, n=None, axis=-1, norm=None, out=None): + """ + Compute the one-dimensional discrete Fourier Transform for real input. + + This function computes the one-dimensional *n*-point discrete Fourier + Transform (DFT) of a real-valued array by means of an efficient algorithm + called the Fast Fourier Transform (FFT). + + Parameters + ---------- + a : array_like + Input array + n : int, optional + Number of points along transformation axis in the input to use. + If `n` is smaller than the length of the input, the input is cropped. + If it is larger, the input is padded with zeros. If `n` is not given, + the length of the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the FFT. If not given, the last axis is + used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + If `n` is even, the length of the transformed axis is ``(n/2)+1``. + If `n` is odd, the length is ``(n+1)/2``. + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See Also + -------- + numpy.fft : For definition of the DFT and conventions used. + irfft : The inverse of `rfft`. + fft : The one-dimensional FFT of general (complex) input. + fftn : The *n*-dimensional FFT. + rfftn : The *n*-dimensional FFT of real input. + + Notes + ----- + When the DFT is computed for purely real input, the output is + Hermitian-symmetric, i.e. the negative frequency terms are just the complex + conjugates of the corresponding positive-frequency terms, and the + negative-frequency terms are therefore redundant. This function does not + compute the negative frequency terms, and the length of the transformed + axis of the output is therefore ``n//2 + 1``. + + When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains + the zero-frequency term 0*fs, which is real due to Hermitian symmetry. + + If `n` is even, ``A[-1]`` contains the term representing both positive + and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely + real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains + the largest positive frequency (fs/2*(n-1)/n), and is complex in the + general case. + + If the input `a` contains an imaginary part, it is silently discarded. + + Examples + -------- + >>> import numpy as np + >>> np.fft.fft([0, 1, 0, 0]) + array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) # may vary + >>> np.fft.rfft([0, 1, 0, 0]) + array([ 1.+0.j, 0.-1.j, -1.+0.j]) # may vary + + Notice how the final element of the `fft` output is the complex conjugate + of the second element, for real input. For `rfft`, this symmetry is + exploited to compute only the non-negative frequency terms. + + """ + a = asarray(a) + if n is None: + n = a.shape[axis] + output = _raw_fft(a, n, axis, True, True, norm, out=out) + return output + + +@array_function_dispatch(_fft_dispatcher) +def irfft(a, n=None, axis=-1, norm=None, out=None): + """ + Computes the inverse of `rfft`. + + This function computes the inverse of the one-dimensional *n*-point + discrete Fourier Transform of real input computed by `rfft`. + In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical + accuracy. (See Notes below for why ``len(a)`` is necessary here.) + + The input is expected to be in the form returned by `rfft`, i.e. the + real zero-frequency term followed by the complex positive frequency terms + in order of increasing frequency. Since the discrete Fourier Transform of + real input is Hermitian-symmetric, the negative frequency terms are taken + to be the complex conjugates of the corresponding positive frequency terms. + + Parameters + ---------- + a : array_like + The input array. + n : int, optional + Length of the transformed axis of the output. + For `n` output points, ``n//2+1`` input points are necessary. If the + input is longer than this, it is cropped. If it is shorter than this, + it is padded with zeros. If `n` is not given, it is taken to be + ``2*(m-1)`` where ``m`` is the length of the input along the axis + specified by `axis`. + axis : int, optional + Axis over which to compute the inverse FFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is `n`, or, if `n` is not given, + ``2*(m-1)`` where ``m`` is the length of the transformed axis of the + input. To get an odd number of output points, `n` must be specified. + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See Also + -------- + numpy.fft : For definition of the DFT and conventions used. + rfft : The one-dimensional FFT of real input, of which `irfft` is inverse. + fft : The one-dimensional FFT. + irfft2 : The inverse of the two-dimensional FFT of real input. + irfftn : The inverse of the *n*-dimensional FFT of real input. + + Notes + ----- + Returns the real valued `n`-point inverse discrete Fourier transform + of `a`, where `a` contains the non-negative frequency terms of a + Hermitian-symmetric sequence. `n` is the length of the result, not the + input. + + If you specify an `n` such that `a` must be zero-padded or truncated, the + extra/removed values will be added/removed at high frequencies. One can + thus resample a series to `m` points via Fourier interpolation by: + ``a_resamp = irfft(rfft(a), m)``. + + The correct interpretation of the hermitian input depends on the length of + the original data, as given by `n`. This is because each input shape could + correspond to either an odd or even length signal. By default, `irfft` + assumes an even output length which puts the last entry at the Nyquist + frequency; aliasing with its symmetric counterpart. By Hermitian symmetry, + the value is thus treated as purely real. To avoid losing information, the + correct length of the real input **must** be given. + + Examples + -------- + >>> import numpy as np + >>> np.fft.ifft([1, -1j, -1, 1j]) + array([0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) # may vary + >>> np.fft.irfft([1, -1j, -1]) + array([0., 1., 0., 0.]) + + Notice how the last term in the input to the ordinary `ifft` is the + complex conjugate of the second term, and the output has zero imaginary + part everywhere. When calling `irfft`, the negative frequencies are not + specified, and the output array is purely real. + + """ + a = asarray(a) + if n is None: + n = (a.shape[axis] - 1) * 2 + output = _raw_fft(a, n, axis, True, False, norm, out=out) + return output + + +@array_function_dispatch(_fft_dispatcher) +def hfft(a, n=None, axis=-1, norm=None, out=None): + """ + Compute the FFT of a signal that has Hermitian symmetry, i.e., a real + spectrum. + + Parameters + ---------- + a : array_like + The input array. + n : int, optional + Length of the transformed axis of the output. For `n` output + points, ``n//2 + 1`` input points are necessary. If the input is + longer than this, it is cropped. If it is shorter than this, it is + padded with zeros. If `n` is not given, it is taken to be ``2*(m-1)`` + where ``m`` is the length of the input along the axis specified by + `axis`. + axis : int, optional + Axis over which to compute the FFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is `n`, or, if `n` is not given, + ``2*m - 2`` where ``m`` is the length of the transformed axis of + the input. To get an odd number of output points, `n` must be + specified, for instance as ``2*m - 1`` in the typical case, + + Raises + ------ + IndexError + If `axis` is not a valid axis of `a`. + + See also + -------- + rfft : Compute the one-dimensional FFT for real input. + ihfft : The inverse of `hfft`. + + Notes + ----- + `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the + opposite case: here the signal has Hermitian symmetry in the time + domain and is real in the frequency domain. So here it's `hfft` for + which you must supply the length of the result if it is to be odd. + + * even: ``ihfft(hfft(a, 2*len(a) - 2)) == a``, within roundoff error, + * odd: ``ihfft(hfft(a, 2*len(a) - 1)) == a``, within roundoff error. + + The correct interpretation of the hermitian input depends on the length of + the original data, as given by `n`. This is because each input shape could + correspond to either an odd or even length signal. By default, `hfft` + assumes an even output length which puts the last entry at the Nyquist + frequency; aliasing with its symmetric counterpart. By Hermitian symmetry, + the value is thus treated as purely real. To avoid losing information, the + shape of the full signal **must** be given. + + Examples + -------- + >>> import numpy as np + >>> signal = np.array([1, 2, 3, 4, 3, 2]) + >>> np.fft.fft(signal) + array([15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j]) # may vary + >>> np.fft.hfft(signal[:4]) # Input first half of signal + array([15., -4., 0., -1., 0., -4.]) + >>> np.fft.hfft(signal, 6) # Input entire signal and truncate + array([15., -4., 0., -1., 0., -4.]) + + + >>> signal = np.array([[1, 1.j], [-1.j, 2]]) + >>> np.conj(signal.T) - signal # check Hermitian symmetry + array([[ 0.-0.j, -0.+0.j], # may vary + [ 0.+0.j, 0.-0.j]]) + >>> freq_spectrum = np.fft.hfft(signal) + >>> freq_spectrum + array([[ 1., 1.], + [ 2., -2.]]) + + """ + a = asarray(a) + if n is None: + n = (a.shape[axis] - 1) * 2 + new_norm = _swap_direction(norm) + output = irfft(conjugate(a), n, axis, norm=new_norm, out=None) + return output + + +@array_function_dispatch(_fft_dispatcher) +def ihfft(a, n=None, axis=-1, norm=None, out=None): + """ + Compute the inverse FFT of a signal that has Hermitian symmetry. + + Parameters + ---------- + a : array_like + Input array. + n : int, optional + Length of the inverse FFT, the number of points along + transformation axis in the input to use. If `n` is smaller than + the length of the input, the input is cropped. If it is larger, + the input is padded with zeros. If `n` is not given, the length of + the input along the axis specified by `axis` is used. + axis : int, optional + Axis over which to compute the inverse FFT. If not given, the last + axis is used. + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axis + indicated by `axis`, or the last one if `axis` is not specified. + The length of the transformed axis is ``n//2 + 1``. + + See also + -------- + hfft, irfft + + Notes + ----- + `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the + opposite case: here the signal has Hermitian symmetry in the time + domain and is real in the frequency domain. So here it's `hfft` for + which you must supply the length of the result if it is to be odd: + + * even: ``ihfft(hfft(a, 2*len(a) - 2)) == a``, within roundoff error, + * odd: ``ihfft(hfft(a, 2*len(a) - 1)) == a``, within roundoff error. + + Examples + -------- + >>> import numpy as np + >>> spectrum = np.array([ 15, -4, 0, -1, 0, -4]) + >>> np.fft.ifft(spectrum) + array([1.+0.j, 2.+0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.+0.j]) # may vary + >>> np.fft.ihfft(spectrum) + array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) # may vary + + """ + a = asarray(a) + if n is None: + n = a.shape[axis] + new_norm = _swap_direction(norm) + out = rfft(a, n, axis, norm=new_norm, out=out) + return conjugate(out, out=out) + + +def _cook_nd_args(a, s=None, axes=None, invreal=0): + if s is None: + shapeless = True + if axes is None: + s = list(a.shape) + else: + s = take(a.shape, axes) + else: + shapeless = False + s = list(s) + if axes is None: + if not shapeless: + msg = ("`axes` should not be `None` if `s` is not `None` " + "(Deprecated in NumPy 2.0). In a future version of NumPy, " + "this will raise an error and `s[i]` will correspond to " + "the size along the transformed axis specified by " + "`axes[i]`. To retain current behaviour, pass a sequence " + "[0, ..., k-1] to `axes` for an array of dimension k.") + warnings.warn(msg, DeprecationWarning, stacklevel=3) + axes = list(range(-len(s), 0)) + if len(s) != len(axes): + raise ValueError("Shape and axes have different lengths.") + if invreal and shapeless: + s[-1] = (a.shape[axes[-1]] - 1) * 2 + if None in s: + msg = ("Passing an array containing `None` values to `s` is " + "deprecated in NumPy 2.0 and will raise an error in " + "a future version of NumPy. To use the default behaviour " + "of the corresponding 1-D transform, pass the value matching " + "the default for its `n` parameter. To use the default " + "behaviour for every axis, the `s` argument can be omitted.") + warnings.warn(msg, DeprecationWarning, stacklevel=3) + # use the whole input array along axis `i` if `s[i] == -1` + s = [a.shape[_a] if _s == -1 else _s for _s, _a in zip(s, axes)] + return s, axes + + +def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None, out=None): + a = asarray(a) + s, axes = _cook_nd_args(a, s, axes) + itl = list(range(len(axes))) + itl.reverse() + for ii in itl: + a = function(a, n=s[ii], axis=axes[ii], norm=norm, out=out) + return a + + +def _fftn_dispatcher(a, s=None, axes=None, norm=None, out=None): + return (a, out) + + +@array_function_dispatch(_fftn_dispatcher) +def fftn(a, s=None, axes=None, norm=None, out=None): + """ + Compute the N-dimensional discrete Fourier Transform. + + This function computes the *N*-dimensional discrete Fourier Transform over + any number of axes in an *M*-dimensional array by means of the Fast Fourier + Transform (FFT). + + Parameters + ---------- + a : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``fft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + If `s` is not given, the shape of the input along the axes specified + by `axes` is used. + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + Repeated indices in `axes` means that the transform over that axis is + performed multiple times. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must be explicitly specified too. + + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for all axes (and hence is + incompatible with passing in all but the trivial ``s``). + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` and `a`, + as explained in the parameters section above. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT. + fft : The one-dimensional FFT, with definitions and conventions used. + rfftn : The *n*-dimensional FFT of real input. + fft2 : The two-dimensional FFT. + fftshift : Shifts zero-frequency terms to centre of array + + Notes + ----- + The output, analogously to `fft`, contains the term for zero frequency in + the low-order corner of all axes, the positive frequency terms in the + first half of all axes, the term for the Nyquist frequency in the middle + of all axes and the negative frequency terms in the second half of all + axes, in order of decreasingly negative frequency. + + See `numpy.fft` for details, definitions and conventions used. + + Examples + -------- + >>> import numpy as np + >>> a = np.mgrid[:3, :3, :3][0] + >>> np.fft.fftn(a, axes=(1, 2)) + array([[[ 0.+0.j, 0.+0.j, 0.+0.j], # may vary + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[ 9.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[18.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]]]) + >>> np.fft.fftn(a, (2, 2), axes=(0, 1)) + array([[[ 2.+0.j, 2.+0.j, 2.+0.j], # may vary + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[-2.+0.j, -2.+0.j, -2.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]]]) + + >>> import matplotlib.pyplot as plt + >>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12, + ... 2 * np.pi * np.arange(200) / 34) + >>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape) + >>> FS = np.fft.fftn(S) + >>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2)) + + >>> plt.show() + + """ + return _raw_fftnd(a, s, axes, fft, norm, out=out) + + +@array_function_dispatch(_fftn_dispatcher) +def ifftn(a, s=None, axes=None, norm=None, out=None): + """ + Compute the N-dimensional inverse discrete Fourier Transform. + + This function computes the inverse of the N-dimensional discrete + Fourier Transform over any number of axes in an M-dimensional array by + means of the Fast Fourier Transform (FFT). In other words, + ``ifftn(fftn(a)) == a`` to within numerical accuracy. + For a description of the definitions and conventions used, see `numpy.fft`. + + The input, analogously to `ifft`, should be ordered in the same way as is + returned by `fftn`, i.e. it should have the term for zero frequency + in all axes in the low-order corner, the positive frequency terms in the + first half of all axes, the term for the Nyquist frequency in the middle + of all axes and the negative frequency terms in the second half of all + axes, in order of decreasingly negative frequency. + + Parameters + ---------- + a : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``ifft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + If `s` is not given, the shape of the input along the axes specified + by `axes` is used. See notes for issue on `ifft` zero padding. + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + Axes over which to compute the IFFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + Repeated indices in `axes` means that the inverse transform over that + axis is performed multiple times. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must be explicitly specified too. + + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for all axes (and hence is + incompatible with passing in all but the trivial ``s``). + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` or `a`, + as explained in the parameters section above. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse. + ifft : The one-dimensional inverse FFT. + ifft2 : The two-dimensional inverse FFT. + ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning + of array. + + Notes + ----- + See `numpy.fft` for definitions and conventions used. + + Zero-padding, analogously with `ifft`, is performed by appending zeros to + the input along the specified dimension. Although this is the common + approach, it might lead to surprising results. If another form of zero + padding is desired, it must be performed before `ifftn` is called. + + Examples + -------- + >>> import numpy as np + >>> a = np.eye(4) + >>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,)) + array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary + [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]]) + + + Create and plot an image with band-limited frequency content: + + >>> import matplotlib.pyplot as plt + >>> n = np.zeros((200,200), dtype=complex) + >>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20))) + >>> im = np.fft.ifftn(n).real + >>> plt.imshow(im) + + >>> plt.show() + + """ + return _raw_fftnd(a, s, axes, ifft, norm, out=out) + + +@array_function_dispatch(_fftn_dispatcher) +def fft2(a, s=None, axes=(-2, -1), norm=None, out=None): + """ + Compute the 2-dimensional discrete Fourier Transform. + + This function computes the *n*-dimensional discrete Fourier Transform + over any axes in an *M*-dimensional array by means of the + Fast Fourier Transform (FFT). By default, the transform is computed over + the last two axes of the input array, i.e., a 2-dimensional FFT. + + Parameters + ---------- + a : array_like + Input array, can be complex + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + This corresponds to ``n`` for ``fft(x, n)``. + Along each axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + If `s` is not given, the shape of the input along the axes specified + by `axes` is used. + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last two + axes are used. A repeated index in `axes` means the transform over + that axis is performed multiple times. A one-element sequence means + that a one-dimensional FFT is performed. Default: ``(-2, -1)``. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must not be ``None``. + + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for all axes (and hence only the + last axis can have ``s`` not equal to the shape at that axis). + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or the last two axes if `axes` is not given. + + Raises + ------ + ValueError + If `s` and `axes` have different length, or `axes` not given and + ``len(s) != 2``. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + ifft2 : The inverse two-dimensional FFT. + fft : The one-dimensional FFT. + fftn : The *n*-dimensional FFT. + fftshift : Shifts zero-frequency terms to the center of the array. + For two-dimensional input, swaps first and third quadrants, and second + and fourth quadrants. + + Notes + ----- + `fft2` is just `fftn` with a different default for `axes`. + + The output, analogously to `fft`, contains the term for zero frequency in + the low-order corner of the transformed axes, the positive frequency terms + in the first half of these axes, the term for the Nyquist frequency in the + middle of the axes and the negative frequency terms in the second half of + the axes, in order of decreasingly negative frequency. + + See `fftn` for details and a plotting example, and `numpy.fft` for + definitions and conventions used. + + + Examples + -------- + >>> import numpy as np + >>> a = np.mgrid[:5, :5][0] + >>> np.fft.fft2(a) + array([[ 50. +0.j , 0. +0.j , 0. +0.j , # may vary + 0. +0.j , 0. +0.j ], + [-12.5+17.20477401j, 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ], + [-12.5 +4.0614962j , 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ], + [-12.5 -4.0614962j , 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ], + [-12.5-17.20477401j, 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ]]) + + """ + return _raw_fftnd(a, s, axes, fft, norm, out=out) + + +@array_function_dispatch(_fftn_dispatcher) +def ifft2(a, s=None, axes=(-2, -1), norm=None, out=None): + """ + Compute the 2-dimensional inverse discrete Fourier Transform. + + This function computes the inverse of the 2-dimensional discrete Fourier + Transform over any number of axes in an M-dimensional array by means of + the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a`` + to within numerical accuracy. By default, the inverse transform is + computed over the last two axes of the input array. + + The input, analogously to `ifft`, should be ordered in the same way as is + returned by `fft2`, i.e. it should have the term for zero frequency + in the low-order corner of the two axes, the positive frequency terms in + the first half of these axes, the term for the Nyquist frequency in the + middle of the axes and the negative frequency terms in the second half of + both axes, in order of decreasingly negative frequency. + + Parameters + ---------- + a : array_like + Input array, can be complex. + s : sequence of ints, optional + Shape (length of each axis) of the output (``s[0]`` refers to axis 0, + ``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``. + Along each axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + If `s` is not given, the shape of the input along the axes specified + by `axes` is used. See notes for issue on `ifft` zero padding. + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last two + axes are used. A repeated index in `axes` means the transform over + that axis is performed multiple times. A one-element sequence means + that a one-dimensional FFT is performed. Default: ``(-2, -1)``. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must not be ``None``. + + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for all axes (and hence is + incompatible with passing in all but the trivial ``s``). + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or the last two axes if `axes` is not given. + + Raises + ------ + ValueError + If `s` and `axes` have different length, or `axes` not given and + ``len(s) != 2``. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + numpy.fft : Overall view of discrete Fourier transforms, with definitions + and conventions used. + fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse. + ifftn : The inverse of the *n*-dimensional FFT. + fft : The one-dimensional FFT. + ifft : The one-dimensional inverse FFT. + + Notes + ----- + `ifft2` is just `ifftn` with a different default for `axes`. + + See `ifftn` for details and a plotting example, and `numpy.fft` for + definition and conventions used. + + Zero-padding, analogously with `ifft`, is performed by appending zeros to + the input along the specified dimension. Although this is the common + approach, it might lead to surprising results. If another form of zero + padding is desired, it must be performed before `ifft2` is called. + + Examples + -------- + >>> import numpy as np + >>> a = 4 * np.eye(4) + >>> np.fft.ifft2(a) + array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary + [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j], + [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], + [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]]) + + """ + return _raw_fftnd(a, s, axes, ifft, norm, out=None) + + +@array_function_dispatch(_fftn_dispatcher) +def rfftn(a, s=None, axes=None, norm=None, out=None): + """ + Compute the N-dimensional discrete Fourier Transform for real input. + + This function computes the N-dimensional discrete Fourier Transform over + any number of axes in an M-dimensional real array by means of the Fast + Fourier Transform (FFT). By default, all axes are transformed, with the + real transform performed over the last axis, while the remaining + transforms are complex. + + Parameters + ---------- + a : array_like + Input array, taken to be real. + s : sequence of ints, optional + Shape (length along each transformed axis) to use from the input. + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). + The final element of `s` corresponds to `n` for ``rfft(x, n)``, while + for the remaining axes, it corresponds to `n` for ``fft(x, n)``. + Along any axis, if the given shape is smaller than that of the input, + the input is cropped. If it is larger, the input is padded with zeros. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + If `s` is not given, the shape of the input along the axes specified + by `axes` is used. + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + Axes over which to compute the FFT. If not given, the last ``len(s)`` + axes are used, or all axes if `s` is also not specified. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must be explicitly specified too. + + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for all axes (and hence is + incompatible with passing in all but the trivial ``s``). + + .. versionadded:: 2.0.0 + + Returns + ------- + out : complex ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` and `a`, + as explained in the parameters section above. + The length of the last axis transformed will be ``s[-1]//2+1``, + while the remaining transformed axes will have lengths according to + `s`, or unchanged from the input. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT + of real input. + fft : The one-dimensional FFT, with definitions and conventions used. + rfft : The one-dimensional FFT of real input. + fftn : The n-dimensional FFT. + rfft2 : The two-dimensional FFT of real input. + + Notes + ----- + The transform for real input is performed over the last transformation + axis, as by `rfft`, then the transform over the remaining axes is + performed as by `fftn`. The order of the output is as for `rfft` for the + final transformation axis, and as for `fftn` for the remaining + transformation axes. + + See `fft` for details, definitions and conventions used. + + Examples + -------- + >>> import numpy as np + >>> a = np.ones((2, 2, 2)) + >>> np.fft.rfftn(a) + array([[[8.+0.j, 0.+0.j], # may vary + [0.+0.j, 0.+0.j]], + [[0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j]]]) + + >>> np.fft.rfftn(a, axes=(2, 0)) + array([[[4.+0.j, 0.+0.j], # may vary + [4.+0.j, 0.+0.j]], + [[0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j]]]) + + """ + a = asarray(a) + s, axes = _cook_nd_args(a, s, axes) + a = rfft(a, s[-1], axes[-1], norm, out=out) + for ii in range(len(axes) - 2, -1, -1): + a = fft(a, s[ii], axes[ii], norm, out=out) + return a + + +@array_function_dispatch(_fftn_dispatcher) +def rfft2(a, s=None, axes=(-2, -1), norm=None, out=None): + """ + Compute the 2-dimensional FFT of a real array. + + Parameters + ---------- + a : array + Input array, taken to be real. + s : sequence of ints, optional + Shape of the FFT. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + Axes over which to compute the FFT. Default: ``(-2, -1)``. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must not be ``None``. + + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : complex ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for the last inverse transform. + incompatible with passing in all but the trivial ``s``). + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + The result of the real 2-D FFT. + + See Also + -------- + rfftn : Compute the N-dimensional discrete Fourier Transform for real + input. + + Notes + ----- + This is really just `rfftn` with different default behavior. + For more details see `rfftn`. + + Examples + -------- + >>> import numpy as np + >>> a = np.mgrid[:5, :5][0] + >>> np.fft.rfft2(a) + array([[ 50. +0.j , 0. +0.j , 0. +0.j ], + [-12.5+17.20477401j, 0. +0.j , 0. +0.j ], + [-12.5 +4.0614962j , 0. +0.j , 0. +0.j ], + [-12.5 -4.0614962j , 0. +0.j , 0. +0.j ], + [-12.5-17.20477401j, 0. +0.j , 0. +0.j ]]) + """ + return rfftn(a, s, axes, norm, out=out) + + +@array_function_dispatch(_fftn_dispatcher) +def irfftn(a, s=None, axes=None, norm=None, out=None): + """ + Computes the inverse of `rfftn`. + + This function computes the inverse of the N-dimensional discrete + Fourier Transform for real input over any number of axes in an + M-dimensional array by means of the Fast Fourier Transform (FFT). In + other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical + accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`, + and for the same reason.) + + The input should be ordered in the same way as is returned by `rfftn`, + i.e. as for `irfft` for the final transformation axis, and as for `ifftn` + along all the other axes. + + Parameters + ---------- + a : array_like + Input array. + s : sequence of ints, optional + Shape (length of each transformed axis) of the output + (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the + number of input points used along this axis, except for the last axis, + where ``s[-1]//2+1`` points of the input are used. + Along any axis, if the shape indicated by `s` is smaller than that of + the input, the input is cropped. If it is larger, the input is padded + with zeros. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + If `s` is not given, the shape of the input along the axes + specified by axes is used. Except for the last axis which is taken to + be ``2*(m-1)`` where ``m`` is the length of the input along that axis. + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + Axes over which to compute the inverse FFT. If not given, the last + `len(s)` axes are used, or all axes if `s` is also not specified. + Repeated indices in `axes` means that the inverse transform over that + axis is performed multiple times. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must be explicitly specified too. + + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for the last transformation. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + The truncated or zero-padded input, transformed along the axes + indicated by `axes`, or by a combination of `s` or `a`, + as explained in the parameters section above. + The length of each transformed axis is as given by the corresponding + element of `s`, or the length of the input in every axis except for the + last one if `s` is not given. In the final transformed axis the length + of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the + length of the final transformed axis of the input. To get an odd + number of output points in the final axis, `s` must be specified. + + Raises + ------ + ValueError + If `s` and `axes` have different length. + IndexError + If an element of `axes` is larger than than the number of axes of `a`. + + See Also + -------- + rfftn : The forward n-dimensional FFT of real input, + of which `ifftn` is the inverse. + fft : The one-dimensional FFT, with definitions and conventions used. + irfft : The inverse of the one-dimensional FFT of real input. + irfft2 : The inverse of the two-dimensional FFT of real input. + + Notes + ----- + See `fft` for definitions and conventions used. + + See `rfft` for definitions and conventions used for real input. + + The correct interpretation of the hermitian input depends on the shape of + the original data, as given by `s`. This is because each input shape could + correspond to either an odd or even length signal. By default, `irfftn` + assumes an even output length which puts the last entry at the Nyquist + frequency; aliasing with its symmetric counterpart. When performing the + final complex to real transform, the last value is thus treated as purely + real. To avoid losing information, the correct shape of the real input + **must** be given. + + Examples + -------- + >>> import numpy as np + >>> a = np.zeros((3, 2, 2)) + >>> a[0, 0, 0] = 3 * 2 * 2 + >>> np.fft.irfftn(a) + array([[[1., 1.], + [1., 1.]], + [[1., 1.], + [1., 1.]], + [[1., 1.], + [1., 1.]]]) + + """ + a = asarray(a) + s, axes = _cook_nd_args(a, s, axes, invreal=1) + for ii in range(len(axes) - 1): + a = ifft(a, s[ii], axes[ii], norm) + a = irfft(a, s[-1], axes[-1], norm, out=out) + return a + + +@array_function_dispatch(_fftn_dispatcher) +def irfft2(a, s=None, axes=(-2, -1), norm=None, out=None): + """ + Computes the inverse of `rfft2`. + + Parameters + ---------- + a : array_like + The input array + s : sequence of ints, optional + Shape of the real output to the inverse FFT. + + .. versionchanged:: 2.0 + + If it is ``-1``, the whole input is used (no padding/trimming). + + .. deprecated:: 2.0 + + If `s` is not ``None``, `axes` must not be ``None`` either. + + .. deprecated:: 2.0 + + `s` must contain only ``int`` s, not ``None`` values. ``None`` + values currently mean that the default value for ``n`` is used + in the corresponding 1-D transform, but this behaviour is + deprecated. + + axes : sequence of ints, optional + The axes over which to compute the inverse fft. + Default: ``(-2, -1)``, the last two axes. + + .. deprecated:: 2.0 + + If `s` is specified, the corresponding `axes` to be transformed + must not be ``None``. + + norm : {"backward", "ortho", "forward"}, optional + Normalization mode (see `numpy.fft`). Default is "backward". + Indicates which direction of the forward/backward pair of transforms + is scaled and with what normalization factor. + + .. versionadded:: 1.20.0 + + The "backward", "forward" values were added. + + out : ndarray, optional + If provided, the result will be placed in this array. It should be + of the appropriate shape and dtype for the last transformation. + + .. versionadded:: 2.0.0 + + Returns + ------- + out : ndarray + The result of the inverse real 2-D FFT. + + See Also + -------- + rfft2 : The forward two-dimensional FFT of real input, + of which `irfft2` is the inverse. + rfft : The one-dimensional FFT for real input. + irfft : The inverse of the one-dimensional FFT of real input. + irfftn : Compute the inverse of the N-dimensional FFT of real input. + + Notes + ----- + This is really `irfftn` with different defaults. + For more details see `irfftn`. + + Examples + -------- + >>> import numpy as np + >>> a = np.mgrid[:5, :5][0] + >>> A = np.fft.rfft2(a) + >>> np.fft.irfft2(A, s=a.shape) + array([[0., 0., 0., 0., 0.], + [1., 1., 1., 1., 1.], + [2., 2., 2., 2., 2.], + [3., 3., 3., 3., 3.], + [4., 4., 4., 4., 4.]]) + """ + return irfftn(a, s, axes, norm, out=None) diff --git a/python/numpy/fft/_pocketfft.pyi b/python/numpy/fft/_pocketfft.pyi new file mode 100644 index 000000000..215cf14d1 --- /dev/null +++ b/python/numpy/fft/_pocketfft.pyi @@ -0,0 +1,138 @@ +from collections.abc import Sequence +from typing import Literal as L +from typing import TypeAlias + +from numpy import complex128, float64 +from numpy._typing import ArrayLike, NDArray, _ArrayLikeNumber_co + +__all__ = [ + "fft", + "ifft", + "rfft", + "irfft", + "hfft", + "ihfft", + "rfftn", + "irfftn", + "rfft2", + "irfft2", + "fft2", + "ifft2", + "fftn", + "ifftn", +] + +_NormKind: TypeAlias = L["backward", "ortho", "forward"] | None + +def fft( + a: ArrayLike, + n: int | None = ..., + axis: int = ..., + norm: _NormKind = ..., + out: NDArray[complex128] | None = ..., +) -> NDArray[complex128]: ... + +def ifft( + a: ArrayLike, + n: int | None = ..., + axis: int = ..., + norm: _NormKind = ..., + out: NDArray[complex128] | None = ..., +) -> NDArray[complex128]: ... + +def rfft( + a: ArrayLike, + n: int | None = ..., + axis: int = ..., + norm: _NormKind = ..., + out: NDArray[complex128] | None = ..., +) -> NDArray[complex128]: ... + +def irfft( + a: ArrayLike, + n: int | None = ..., + axis: int = ..., + norm: _NormKind = ..., + out: NDArray[float64] | None = ..., +) -> NDArray[float64]: ... + +# Input array must be compatible with `np.conjugate` +def hfft( + a: _ArrayLikeNumber_co, + n: int | None = ..., + axis: int = ..., + norm: _NormKind = ..., + out: NDArray[float64] | None = ..., +) -> NDArray[float64]: ... + +def ihfft( + a: ArrayLike, + n: int | None = ..., + axis: int = ..., + norm: _NormKind = ..., + out: NDArray[complex128] | None = ..., +) -> NDArray[complex128]: ... + +def fftn( + a: ArrayLike, + s: Sequence[int] | None = ..., + axes: Sequence[int] | None = ..., + norm: _NormKind = ..., + out: NDArray[complex128] | None = ..., +) -> NDArray[complex128]: ... + +def ifftn( + a: ArrayLike, + s: Sequence[int] | None = ..., + axes: Sequence[int] | None = ..., + norm: _NormKind = ..., + out: NDArray[complex128] | None = ..., +) -> NDArray[complex128]: ... + +def rfftn( + a: ArrayLike, + s: Sequence[int] | None = ..., + axes: Sequence[int] | None = ..., + norm: _NormKind = ..., + out: NDArray[complex128] | None = ..., +) -> NDArray[complex128]: ... + +def irfftn( + a: ArrayLike, + s: Sequence[int] | None = ..., + axes: Sequence[int] | None = ..., + norm: _NormKind = ..., + out: NDArray[float64] | None = ..., +) -> NDArray[float64]: ... + +def fft2( + a: ArrayLike, + s: Sequence[int] | None = ..., + axes: Sequence[int] | None = ..., + norm: _NormKind = ..., + out: NDArray[complex128] | None = ..., +) -> NDArray[complex128]: ... + +def ifft2( + a: ArrayLike, + s: Sequence[int] | None = ..., + axes: Sequence[int] | None = ..., + norm: _NormKind = ..., + out: NDArray[complex128] | None = ..., +) -> NDArray[complex128]: ... + +def rfft2( + a: ArrayLike, + s: Sequence[int] | None = ..., + axes: Sequence[int] | None = ..., + norm: _NormKind = ..., + out: NDArray[complex128] | None = ..., +) -> NDArray[complex128]: ... + +def irfft2( + a: ArrayLike, + s: Sequence[int] | None = ..., + axes: Sequence[int] | None = ..., + norm: _NormKind = ..., + out: NDArray[float64] | None = ..., +) -> NDArray[float64]: ... diff --git a/python/numpy/fft/_pocketfft_umath.cpython-312-x86_64-linux-gnu.so b/python/numpy/fft/_pocketfft_umath.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 000000000..df57850de Binary files /dev/null and b/python/numpy/fft/_pocketfft_umath.cpython-312-x86_64-linux-gnu.so differ diff --git a/python/numpy/fft/helper.py b/python/numpy/fft/helper.py new file mode 100644 index 000000000..08d5662c6 --- /dev/null +++ b/python/numpy/fft/helper.py @@ -0,0 +1,17 @@ +def __getattr__(attr_name): + import warnings + + from numpy.fft import _helper + ret = getattr(_helper, attr_name, None) + if ret is None: + raise AttributeError( + f"module 'numpy.fft.helper' has no attribute {attr_name}") + warnings.warn( + "The numpy.fft.helper has been made private and renamed to " + "numpy.fft._helper. All four functions exported by it (i.e. fftshift, " + "ifftshift, fftfreq, rfftfreq) are available from numpy.fft. " + f"Please use numpy.fft.{attr_name} instead.", + DeprecationWarning, + stacklevel=3 + ) + return ret diff --git a/python/numpy/fft/helper.pyi b/python/numpy/fft/helper.pyi new file mode 100644 index 000000000..887cbe7e2 --- /dev/null +++ b/python/numpy/fft/helper.pyi @@ -0,0 +1,22 @@ +from typing import Any +from typing import Literal as L + +from typing_extensions import deprecated + +import numpy as np +from numpy._typing import ArrayLike, NDArray, _ShapeLike + +from ._helper import integer_types as integer_types + +__all__ = ["fftfreq", "fftshift", "ifftshift", "rfftfreq"] + +### + +@deprecated("Please use `numpy.fft.fftshift` instead.") +def fftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... +@deprecated("Please use `numpy.fft.ifftshift` instead.") +def ifftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ... +@deprecated("Please use `numpy.fft.fftfreq` instead.") +def fftfreq(n: int | np.integer, d: ArrayLike = 1.0, device: L["cpu"] | None = None) -> NDArray[Any]: ... +@deprecated("Please use `numpy.fft.rfftfreq` instead.") +def rfftfreq(n: int | np.integer, d: ArrayLike = 1.0, device: L["cpu"] | None = None) -> NDArray[Any]: ... diff --git a/python/numpy/fft/tests/__init__.py b/python/numpy/fft/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/python/numpy/fft/tests/__pycache__/__init__.cpython-312.pyc b/python/numpy/fft/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..3bfad9008 Binary files /dev/null and b/python/numpy/fft/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/fft/tests/__pycache__/test_helper.cpython-312.pyc b/python/numpy/fft/tests/__pycache__/test_helper.cpython-312.pyc new file mode 100644 index 000000000..e31790757 Binary files /dev/null and b/python/numpy/fft/tests/__pycache__/test_helper.cpython-312.pyc differ diff --git a/python/numpy/fft/tests/__pycache__/test_pocketfft.cpython-312.pyc b/python/numpy/fft/tests/__pycache__/test_pocketfft.cpython-312.pyc new file mode 100644 index 000000000..2fbee4b9a Binary files /dev/null and b/python/numpy/fft/tests/__pycache__/test_pocketfft.cpython-312.pyc differ diff --git a/python/numpy/fft/tests/test_helper.py b/python/numpy/fft/tests/test_helper.py new file mode 100644 index 000000000..c02a73639 --- /dev/null +++ b/python/numpy/fft/tests/test_helper.py @@ -0,0 +1,167 @@ +"""Test functions for fftpack.helper module + +Copied from fftpack.helper by Pearu Peterson, October 2005 + +""" +import numpy as np +from numpy import fft, pi +from numpy.testing import assert_array_almost_equal + + +class TestFFTShift: + + def test_definition(self): + x = [0, 1, 2, 3, 4, -4, -3, -2, -1] + y = [-4, -3, -2, -1, 0, 1, 2, 3, 4] + assert_array_almost_equal(fft.fftshift(x), y) + assert_array_almost_equal(fft.ifftshift(y), x) + x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1] + y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4] + assert_array_almost_equal(fft.fftshift(x), y) + assert_array_almost_equal(fft.ifftshift(y), x) + + def test_inverse(self): + for n in [1, 4, 9, 100, 211]: + x = np.random.random((n,)) + assert_array_almost_equal(fft.ifftshift(fft.fftshift(x)), x) + + def test_axes_keyword(self): + freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]] + shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]] + assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shifted) + assert_array_almost_equal(fft.fftshift(freqs, axes=0), + fft.fftshift(freqs, axes=(0,))) + assert_array_almost_equal(fft.ifftshift(shifted, axes=(0, 1)), freqs) + assert_array_almost_equal(fft.ifftshift(shifted, axes=0), + fft.ifftshift(shifted, axes=(0,))) + + assert_array_almost_equal(fft.fftshift(freqs), shifted) + assert_array_almost_equal(fft.ifftshift(shifted), freqs) + + def test_uneven_dims(self): + """ Test 2D input, which has uneven dimension sizes """ + freqs = [ + [0, 1], + [2, 3], + [4, 5] + ] + + # shift in dimension 0 + shift_dim0 = [ + [4, 5], + [0, 1], + [2, 3] + ] + assert_array_almost_equal(fft.fftshift(freqs, axes=0), shift_dim0) + assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=0), freqs) + assert_array_almost_equal(fft.fftshift(freqs, axes=(0,)), shift_dim0) + assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=[0]), freqs) + + # shift in dimension 1 + shift_dim1 = [ + [1, 0], + [3, 2], + [5, 4] + ] + assert_array_almost_equal(fft.fftshift(freqs, axes=1), shift_dim1) + assert_array_almost_equal(fft.ifftshift(shift_dim1, axes=1), freqs) + + # shift in both dimensions + shift_dim_both = [ + [5, 4], + [1, 0], + [3, 2] + ] + assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shift_dim_both) + assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=(0, 1)), freqs) + assert_array_almost_equal(fft.fftshift(freqs, axes=[0, 1]), shift_dim_both) + assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=[0, 1]), freqs) + + # axes=None (default) shift in all dimensions + assert_array_almost_equal(fft.fftshift(freqs, axes=None), shift_dim_both) + assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=None), freqs) + assert_array_almost_equal(fft.fftshift(freqs), shift_dim_both) + assert_array_almost_equal(fft.ifftshift(shift_dim_both), freqs) + + def test_equal_to_original(self): + """ Test the new (>=v1.15) and old implementations are equal (see #10073) """ + from numpy._core import arange, asarray, concatenate, take + + def original_fftshift(x, axes=None): + """ How fftshift was implemented in v1.14""" + tmp = asarray(x) + ndim = tmp.ndim + if axes is None: + axes = list(range(ndim)) + elif isinstance(axes, int): + axes = (axes,) + y = tmp + for k in axes: + n = tmp.shape[k] + p2 = (n + 1) // 2 + mylist = concatenate((arange(p2, n), arange(p2))) + y = take(y, mylist, k) + return y + + def original_ifftshift(x, axes=None): + """ How ifftshift was implemented in v1.14 """ + tmp = asarray(x) + ndim = tmp.ndim + if axes is None: + axes = list(range(ndim)) + elif isinstance(axes, int): + axes = (axes,) + y = tmp + for k in axes: + n = tmp.shape[k] + p2 = n - (n + 1) // 2 + mylist = concatenate((arange(p2, n), arange(p2))) + y = take(y, mylist, k) + return y + + # create possible 2d array combinations and try all possible keywords + # compare output to original functions + for i in range(16): + for j in range(16): + for axes_keyword in [0, 1, None, (0,), (0, 1)]: + inp = np.random.rand(i, j) + + assert_array_almost_equal(fft.fftshift(inp, axes_keyword), + original_fftshift(inp, axes_keyword)) + + assert_array_almost_equal(fft.ifftshift(inp, axes_keyword), + original_ifftshift(inp, axes_keyword)) + + +class TestFFTFreq: + + def test_definition(self): + x = [0, 1, 2, 3, 4, -4, -3, -2, -1] + assert_array_almost_equal(9 * fft.fftfreq(9), x) + assert_array_almost_equal(9 * pi * fft.fftfreq(9, pi), x) + x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1] + assert_array_almost_equal(10 * fft.fftfreq(10), x) + assert_array_almost_equal(10 * pi * fft.fftfreq(10, pi), x) + + +class TestRFFTFreq: + + def test_definition(self): + x = [0, 1, 2, 3, 4] + assert_array_almost_equal(9 * fft.rfftfreq(9), x) + assert_array_almost_equal(9 * pi * fft.rfftfreq(9, pi), x) + x = [0, 1, 2, 3, 4, 5] + assert_array_almost_equal(10 * fft.rfftfreq(10), x) + assert_array_almost_equal(10 * pi * fft.rfftfreq(10, pi), x) + + +class TestIRFFTN: + + def test_not_last_axis_success(self): + ar, ai = np.random.random((2, 16, 8, 32)) + a = ar + 1j * ai + + axes = (-2,) + + # Should not raise error + fft.irfftn(a, axes=axes) diff --git a/python/numpy/fft/tests/test_pocketfft.py b/python/numpy/fft/tests/test_pocketfft.py new file mode 100644 index 000000000..021181845 --- /dev/null +++ b/python/numpy/fft/tests/test_pocketfft.py @@ -0,0 +1,589 @@ +import queue +import threading + +import pytest + +import numpy as np +from numpy.random import random +from numpy.testing import IS_WASM, assert_allclose, assert_array_equal, assert_raises + + +def fft1(x): + L = len(x) + phase = -2j * np.pi * (np.arange(L) / L) + phase = np.arange(L).reshape(-1, 1) * phase + return np.sum(x * np.exp(phase), axis=1) + + +class TestFFTShift: + + def test_fft_n(self): + assert_raises(ValueError, np.fft.fft, [1, 2, 3], 0) + + +class TestFFT1D: + + def test_identity(self): + maxlen = 512 + x = random(maxlen) + 1j * random(maxlen) + xr = random(maxlen) + for i in range(1, maxlen): + assert_allclose(np.fft.ifft(np.fft.fft(x[0:i])), x[0:i], + atol=1e-12) + assert_allclose(np.fft.irfft(np.fft.rfft(xr[0:i]), i), + xr[0:i], atol=1e-12) + + @pytest.mark.parametrize("dtype", [np.single, np.double, np.longdouble]) + def test_identity_long_short(self, dtype): + # Test with explicitly given number of points, both for n + # smaller and for n larger than the input size. + maxlen = 16 + atol = 5 * np.spacing(np.array(1., dtype=dtype)) + x = random(maxlen).astype(dtype) + 1j * random(maxlen).astype(dtype) + xx = np.concatenate([x, np.zeros_like(x)]) + xr = random(maxlen).astype(dtype) + xxr = np.concatenate([xr, np.zeros_like(xr)]) + for i in range(1, maxlen * 2): + check_c = np.fft.ifft(np.fft.fft(x, n=i), n=i) + assert check_c.real.dtype == dtype + assert_allclose(check_c, xx[0:i], atol=atol, rtol=0) + check_r = np.fft.irfft(np.fft.rfft(xr, n=i), n=i) + assert check_r.dtype == dtype + assert_allclose(check_r, xxr[0:i], atol=atol, rtol=0) + + @pytest.mark.parametrize("dtype", [np.single, np.double, np.longdouble]) + def test_identity_long_short_reversed(self, dtype): + # Also test explicitly given number of points in reversed order. + maxlen = 16 + atol = 5 * np.spacing(np.array(1., dtype=dtype)) + x = random(maxlen).astype(dtype) + 1j * random(maxlen).astype(dtype) + xx = np.concatenate([x, np.zeros_like(x)]) + for i in range(1, maxlen * 2): + check_via_c = np.fft.fft(np.fft.ifft(x, n=i), n=i) + assert check_via_c.dtype == x.dtype + assert_allclose(check_via_c, xx[0:i], atol=atol, rtol=0) + # For irfft, we can neither recover the imaginary part of + # the first element, nor the imaginary part of the last + # element if npts is even. So, set to 0 for the comparison. + y = x.copy() + n = i // 2 + 1 + y.imag[0] = 0 + if i % 2 == 0: + y.imag[n - 1:] = 0 + yy = np.concatenate([y, np.zeros_like(y)]) + check_via_r = np.fft.rfft(np.fft.irfft(x, n=i), n=i) + assert check_via_r.dtype == x.dtype + assert_allclose(check_via_r, yy[0:n], atol=atol, rtol=0) + + def test_fft(self): + x = random(30) + 1j * random(30) + assert_allclose(fft1(x), np.fft.fft(x), atol=1e-6) + assert_allclose(fft1(x), np.fft.fft(x, norm="backward"), atol=1e-6) + assert_allclose(fft1(x) / np.sqrt(30), + np.fft.fft(x, norm="ortho"), atol=1e-6) + assert_allclose(fft1(x) / 30., + np.fft.fft(x, norm="forward"), atol=1e-6) + + @pytest.mark.parametrize("axis", (0, 1)) + @pytest.mark.parametrize("dtype", (complex, float)) + @pytest.mark.parametrize("transpose", (True, False)) + def test_fft_out_argument(self, dtype, transpose, axis): + def zeros_like(x): + if transpose: + return np.zeros_like(x.T).T + else: + return np.zeros_like(x) + + # tests below only test the out parameter + if dtype is complex: + y = random((10, 20)) + 1j * random((10, 20)) + fft, ifft = np.fft.fft, np.fft.ifft + else: + y = random((10, 20)) + fft, ifft = np.fft.rfft, np.fft.irfft + + expected = fft(y, axis=axis) + out = zeros_like(expected) + result = fft(y, out=out, axis=axis) + assert result is out + assert_array_equal(result, expected) + + expected2 = ifft(expected, axis=axis) + out2 = out if dtype is complex else zeros_like(expected2) + result2 = ifft(out, out=out2, axis=axis) + assert result2 is out2 + assert_array_equal(result2, expected2) + + @pytest.mark.parametrize("axis", [0, 1]) + def test_fft_inplace_out(self, axis): + # Test some weirder in-place combinations + y = random((20, 20)) + 1j * random((20, 20)) + # Fully in-place. + y1 = y.copy() + expected1 = np.fft.fft(y1, axis=axis) + result1 = np.fft.fft(y1, axis=axis, out=y1) + assert result1 is y1 + assert_array_equal(result1, expected1) + # In-place of part of the array; rest should be unchanged. + y2 = y.copy() + out2 = y2[:10] if axis == 0 else y2[:, :10] + expected2 = np.fft.fft(y2, n=10, axis=axis) + result2 = np.fft.fft(y2, n=10, axis=axis, out=out2) + assert result2 is out2 + assert_array_equal(result2, expected2) + if axis == 0: + assert_array_equal(y2[10:], y[10:]) + else: + assert_array_equal(y2[:, 10:], y[:, 10:]) + # In-place of another part of the array. + y3 = y.copy() + y3_sel = y3[5:] if axis == 0 else y3[:, 5:] + out3 = y3[5:15] if axis == 0 else y3[:, 5:15] + expected3 = np.fft.fft(y3_sel, n=10, axis=axis) + result3 = np.fft.fft(y3_sel, n=10, axis=axis, out=out3) + assert result3 is out3 + assert_array_equal(result3, expected3) + if axis == 0: + assert_array_equal(y3[:5], y[:5]) + assert_array_equal(y3[15:], y[15:]) + else: + assert_array_equal(y3[:, :5], y[:, :5]) + assert_array_equal(y3[:, 15:], y[:, 15:]) + # In-place with n > nin; rest should be unchanged. + y4 = y.copy() + y4_sel = y4[:10] if axis == 0 else y4[:, :10] + out4 = y4[:15] if axis == 0 else y4[:, :15] + expected4 = np.fft.fft(y4_sel, n=15, axis=axis) + result4 = np.fft.fft(y4_sel, n=15, axis=axis, out=out4) + assert result4 is out4 + assert_array_equal(result4, expected4) + if axis == 0: + assert_array_equal(y4[15:], y[15:]) + else: + assert_array_equal(y4[:, 15:], y[:, 15:]) + # Overwrite in a transpose. + y5 = y.copy() + out5 = y5.T + result5 = np.fft.fft(y5, axis=axis, out=out5) + assert result5 is out5 + assert_array_equal(result5, expected1) + # Reverse strides. + y6 = y.copy() + out6 = y6[::-1] if axis == 0 else y6[:, ::-1] + result6 = np.fft.fft(y6, axis=axis, out=out6) + assert result6 is out6 + assert_array_equal(result6, expected1) + + def test_fft_bad_out(self): + x = np.arange(30.) + with pytest.raises(TypeError, match="must be of ArrayType"): + np.fft.fft(x, out="") + with pytest.raises(ValueError, match="has wrong shape"): + np.fft.fft(x, out=np.zeros_like(x).reshape(5, -1)) + with pytest.raises(TypeError, match="Cannot cast"): + np.fft.fft(x, out=np.zeros_like(x, dtype=float)) + + @pytest.mark.parametrize('norm', (None, 'backward', 'ortho', 'forward')) + def test_ifft(self, norm): + x = random(30) + 1j * random(30) + assert_allclose( + x, np.fft.ifft(np.fft.fft(x, norm=norm), norm=norm), + atol=1e-6) + # Ensure we get the correct error message + with pytest.raises(ValueError, + match='Invalid number of FFT data points'): + np.fft.ifft([], norm=norm) + + def test_fft2(self): + x = random((30, 20)) + 1j * random((30, 20)) + assert_allclose(np.fft.fft(np.fft.fft(x, axis=1), axis=0), + np.fft.fft2(x), atol=1e-6) + assert_allclose(np.fft.fft2(x), + np.fft.fft2(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.fft2(x) / np.sqrt(30 * 20), + np.fft.fft2(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.fft2(x) / (30. * 20.), + np.fft.fft2(x, norm="forward"), atol=1e-6) + + def test_ifft2(self): + x = random((30, 20)) + 1j * random((30, 20)) + assert_allclose(np.fft.ifft(np.fft.ifft(x, axis=1), axis=0), + np.fft.ifft2(x), atol=1e-6) + assert_allclose(np.fft.ifft2(x), + np.fft.ifft2(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.ifft2(x) * np.sqrt(30 * 20), + np.fft.ifft2(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.ifft2(x) * (30. * 20.), + np.fft.ifft2(x, norm="forward"), atol=1e-6) + + def test_fftn(self): + x = random((30, 20, 10)) + 1j * random((30, 20, 10)) + assert_allclose( + np.fft.fft(np.fft.fft(np.fft.fft(x, axis=2), axis=1), axis=0), + np.fft.fftn(x), atol=1e-6) + assert_allclose(np.fft.fftn(x), + np.fft.fftn(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.fftn(x) / np.sqrt(30 * 20 * 10), + np.fft.fftn(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.fftn(x) / (30. * 20. * 10.), + np.fft.fftn(x, norm="forward"), atol=1e-6) + + def test_ifftn(self): + x = random((30, 20, 10)) + 1j * random((30, 20, 10)) + assert_allclose( + np.fft.ifft(np.fft.ifft(np.fft.ifft(x, axis=2), axis=1), axis=0), + np.fft.ifftn(x), atol=1e-6) + assert_allclose(np.fft.ifftn(x), + np.fft.ifftn(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.ifftn(x) * np.sqrt(30 * 20 * 10), + np.fft.ifftn(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.ifftn(x) * (30. * 20. * 10.), + np.fft.ifftn(x, norm="forward"), atol=1e-6) + + def test_rfft(self): + x = random(30) + for n in [x.size, 2 * x.size]: + for norm in [None, 'backward', 'ortho', 'forward']: + assert_allclose( + np.fft.fft(x, n=n, norm=norm)[:(n // 2 + 1)], + np.fft.rfft(x, n=n, norm=norm), atol=1e-6) + assert_allclose( + np.fft.rfft(x, n=n), + np.fft.rfft(x, n=n, norm="backward"), atol=1e-6) + assert_allclose( + np.fft.rfft(x, n=n) / np.sqrt(n), + np.fft.rfft(x, n=n, norm="ortho"), atol=1e-6) + assert_allclose( + np.fft.rfft(x, n=n) / n, + np.fft.rfft(x, n=n, norm="forward"), atol=1e-6) + + def test_rfft_even(self): + x = np.arange(8) + n = 4 + y = np.fft.rfft(x, n) + assert_allclose(y, np.fft.fft(x[:n])[:n // 2 + 1], rtol=1e-14) + + def test_rfft_odd(self): + x = np.array([1, 0, 2, 3, -3]) + y = np.fft.rfft(x) + assert_allclose(y, np.fft.fft(x)[:3], rtol=1e-14) + + def test_irfft(self): + x = random(30) + assert_allclose(x, np.fft.irfft(np.fft.rfft(x)), atol=1e-6) + assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="backward"), + norm="backward"), atol=1e-6) + assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="ortho"), + norm="ortho"), atol=1e-6) + assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="forward"), + norm="forward"), atol=1e-6) + + def test_rfft2(self): + x = random((30, 20)) + assert_allclose(np.fft.fft2(x)[:, :11], np.fft.rfft2(x), atol=1e-6) + assert_allclose(np.fft.rfft2(x), + np.fft.rfft2(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.rfft2(x) / np.sqrt(30 * 20), + np.fft.rfft2(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.rfft2(x) / (30. * 20.), + np.fft.rfft2(x, norm="forward"), atol=1e-6) + + def test_irfft2(self): + x = random((30, 20)) + assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x)), atol=1e-6) + assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="backward"), + norm="backward"), atol=1e-6) + assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="ortho"), + norm="ortho"), atol=1e-6) + assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="forward"), + norm="forward"), atol=1e-6) + + def test_rfftn(self): + x = random((30, 20, 10)) + assert_allclose(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x), atol=1e-6) + assert_allclose(np.fft.rfftn(x), + np.fft.rfftn(x, norm="backward"), atol=1e-6) + assert_allclose(np.fft.rfftn(x) / np.sqrt(30 * 20 * 10), + np.fft.rfftn(x, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.rfftn(x) / (30. * 20. * 10.), + np.fft.rfftn(x, norm="forward"), atol=1e-6) + # Regression test for gh-27159 + x = np.ones((2, 3)) + result = np.fft.rfftn(x, axes=(0, 0, 1), s=(10, 20, 40)) + assert result.shape == (10, 21) + expected = np.fft.fft(np.fft.fft(np.fft.rfft(x, axis=1, n=40), + axis=0, n=20), axis=0, n=10) + assert expected.shape == (10, 21) + assert_allclose(result, expected, atol=1e-6) + + def test_irfftn(self): + x = random((30, 20, 10)) + assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x)), atol=1e-6) + assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="backward"), + norm="backward"), atol=1e-6) + assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="ortho"), + norm="ortho"), atol=1e-6) + assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="forward"), + norm="forward"), atol=1e-6) + + def test_hfft(self): + x = random(14) + 1j * random(14) + x_herm = np.concatenate((random(1), x, random(1))) + x = np.concatenate((x_herm, x[::-1].conj())) + assert_allclose(np.fft.fft(x), np.fft.hfft(x_herm), atol=1e-6) + assert_allclose(np.fft.hfft(x_herm), + np.fft.hfft(x_herm, norm="backward"), atol=1e-6) + assert_allclose(np.fft.hfft(x_herm) / np.sqrt(30), + np.fft.hfft(x_herm, norm="ortho"), atol=1e-6) + assert_allclose(np.fft.hfft(x_herm) / 30., + np.fft.hfft(x_herm, norm="forward"), atol=1e-6) + + def test_ihfft(self): + x = random(14) + 1j * random(14) + x_herm = np.concatenate((random(1), x, random(1))) + x = np.concatenate((x_herm, x[::-1].conj())) + assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm)), atol=1e-6) + assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm, + norm="backward"), norm="backward"), atol=1e-6) + assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm, + norm="ortho"), norm="ortho"), atol=1e-6) + assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm, + norm="forward"), norm="forward"), atol=1e-6) + + @pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn, + np.fft.rfftn, np.fft.irfftn]) + def test_axes(self, op): + x = random((30, 20, 10)) + axes = [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)] + for a in axes: + op_tr = op(np.transpose(x, a)) + tr_op = np.transpose(op(x, axes=a), a) + assert_allclose(op_tr, tr_op, atol=1e-6) + + @pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn, + np.fft.fft2, np.fft.ifft2]) + def test_s_negative_1(self, op): + x = np.arange(100).reshape(10, 10) + # should use the whole input array along the first axis + assert op(x, s=(-1, 5), axes=(0, 1)).shape == (10, 5) + + @pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn, + np.fft.rfftn, np.fft.irfftn]) + def test_s_axes_none(self, op): + x = np.arange(100).reshape(10, 10) + with pytest.warns(match='`axes` should not be `None` if `s`'): + op(x, s=(-1, 5)) + + @pytest.mark.parametrize("op", [np.fft.fft2, np.fft.ifft2]) + def test_s_axes_none_2D(self, op): + x = np.arange(100).reshape(10, 10) + with pytest.warns(match='`axes` should not be `None` if `s`'): + op(x, s=(-1, 5), axes=None) + + @pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn, + np.fft.rfftn, np.fft.irfftn, + np.fft.fft2, np.fft.ifft2]) + def test_s_contains_none(self, op): + x = random((30, 20, 10)) + with pytest.warns(match='array containing `None` values to `s`'): + op(x, s=(10, None, 10), axes=(0, 1, 2)) + + def test_all_1d_norm_preserving(self): + # verify that round-trip transforms are norm-preserving + x = random(30) + x_norm = np.linalg.norm(x) + n = x.size * 2 + func_pairs = [(np.fft.fft, np.fft.ifft), + (np.fft.rfft, np.fft.irfft), + # hfft: order so the first function takes x.size samples + # (necessary for comparison to x_norm above) + (np.fft.ihfft, np.fft.hfft), + ] + for forw, back in func_pairs: + for n in [x.size, 2 * x.size]: + for norm in [None, 'backward', 'ortho', 'forward']: + tmp = forw(x, n=n, norm=norm) + tmp = back(tmp, n=n, norm=norm) + assert_allclose(x_norm, + np.linalg.norm(tmp), atol=1e-6) + + @pytest.mark.parametrize("axes", [(0, 1), (0, 2), None]) + @pytest.mark.parametrize("dtype", (complex, float)) + @pytest.mark.parametrize("transpose", (True, False)) + def test_fftn_out_argument(self, dtype, transpose, axes): + def zeros_like(x): + if transpose: + return np.zeros_like(x.T).T + else: + return np.zeros_like(x) + + # tests below only test the out parameter + if dtype is complex: + x = random((10, 5, 6)) + 1j * random((10, 5, 6)) + fft, ifft = np.fft.fftn, np.fft.ifftn + else: + x = random((10, 5, 6)) + fft, ifft = np.fft.rfftn, np.fft.irfftn + + expected = fft(x, axes=axes) + out = zeros_like(expected) + result = fft(x, out=out, axes=axes) + assert result is out + assert_array_equal(result, expected) + + expected2 = ifft(expected, axes=axes) + out2 = out if dtype is complex else zeros_like(expected2) + result2 = ifft(out, out=out2, axes=axes) + assert result2 is out2 + assert_array_equal(result2, expected2) + + @pytest.mark.parametrize("fft", [np.fft.fftn, np.fft.ifftn, np.fft.rfftn]) + def test_fftn_out_and_s_interaction(self, fft): + # With s, shape varies, so generally one cannot pass in out. + if fft is np.fft.rfftn: + x = random((10, 5, 6)) + else: + x = random((10, 5, 6)) + 1j * random((10, 5, 6)) + with pytest.raises(ValueError, match="has wrong shape"): + fft(x, out=np.zeros_like(x), s=(3, 3, 3), axes=(0, 1, 2)) + # Except on the first axis done (which is the last of axes). + s = (10, 5, 5) + expected = fft(x, s=s, axes=(0, 1, 2)) + out = np.zeros_like(expected) + result = fft(x, s=s, axes=(0, 1, 2), out=out) + assert result is out + assert_array_equal(result, expected) + + @pytest.mark.parametrize("s", [(9, 5, 5), (3, 3, 3)]) + def test_irfftn_out_and_s_interaction(self, s): + # Since for irfftn, the output is real and thus cannot be used for + # intermediate steps, it should always work. + x = random((9, 5, 6, 2)) + 1j * random((9, 5, 6, 2)) + expected = np.fft.irfftn(x, s=s, axes=(0, 1, 2)) + out = np.zeros_like(expected) + result = np.fft.irfftn(x, s=s, axes=(0, 1, 2), out=out) + assert result is out + assert_array_equal(result, expected) + + +@pytest.mark.parametrize( + "dtype", + [np.float32, np.float64, np.complex64, np.complex128]) +@pytest.mark.parametrize("order", ["F", 'non-contiguous']) +@pytest.mark.parametrize( + "fft", + [np.fft.fft, np.fft.fft2, np.fft.fftn, + np.fft.ifft, np.fft.ifft2, np.fft.ifftn]) +def test_fft_with_order(dtype, order, fft): + # Check that FFT/IFFT produces identical results for C, Fortran and + # non contiguous arrays + rng = np.random.RandomState(42) + X = rng.rand(8, 7, 13).astype(dtype, copy=False) + # See discussion in pull/14178 + _tol = 8.0 * np.sqrt(np.log2(X.size)) * np.finfo(X.dtype).eps + if order == 'F': + Y = np.asfortranarray(X) + else: + # Make a non contiguous array + Y = X[::-1] + X = np.ascontiguousarray(X[::-1]) + + if fft.__name__.endswith('fft'): + for axis in range(3): + X_res = fft(X, axis=axis) + Y_res = fft(Y, axis=axis) + assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol) + elif fft.__name__.endswith(('fft2', 'fftn')): + axes = [(0, 1), (1, 2), (0, 2)] + if fft.__name__.endswith('fftn'): + axes.extend([(0,), (1,), (2,), None]) + for ax in axes: + X_res = fft(X, axes=ax) + Y_res = fft(Y, axes=ax) + assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol) + else: + raise ValueError + + +@pytest.mark.parametrize("order", ["F", "C"]) +@pytest.mark.parametrize("n", [None, 7, 12]) +def test_fft_output_order(order, n): + rng = np.random.RandomState(42) + x = rng.rand(10) + x = np.asarray(x, dtype=np.complex64, order=order) + res = np.fft.fft(x, n=n) + assert res.flags.c_contiguous == x.flags.c_contiguous + assert res.flags.f_contiguous == x.flags.f_contiguous + +@pytest.mark.skipif(IS_WASM, reason="Cannot start thread") +class TestFFTThreadSafe: + threads = 16 + input_shape = (800, 200) + + def _test_mtsame(self, func, *args): + def worker(args, q): + q.put(func(*args)) + + q = queue.Queue() + expected = func(*args) + + # Spin off a bunch of threads to call the same function simultaneously + t = [threading.Thread(target=worker, args=(args, q)) + for i in range(self.threads)] + [x.start() for x in t] + + [x.join() for x in t] + # Make sure all threads returned the correct value + for i in range(self.threads): + assert_array_equal(q.get(timeout=5), expected, + 'Function returned wrong value in multithreaded context') + + def test_fft(self): + a = np.ones(self.input_shape) * 1 + 0j + self._test_mtsame(np.fft.fft, a) + + def test_ifft(self): + a = np.ones(self.input_shape) * 1 + 0j + self._test_mtsame(np.fft.ifft, a) + + def test_rfft(self): + a = np.ones(self.input_shape) + self._test_mtsame(np.fft.rfft, a) + + def test_irfft(self): + a = np.ones(self.input_shape) * 1 + 0j + self._test_mtsame(np.fft.irfft, a) + + +def test_irfft_with_n_1_regression(): + # Regression test for gh-25661 + x = np.arange(10) + np.fft.irfft(x, n=1) + np.fft.hfft(x, n=1) + np.fft.irfft(np.array([0], complex), n=10) + + +def test_irfft_with_n_large_regression(): + # Regression test for gh-25679 + x = np.arange(5) * (1 + 1j) + result = np.fft.hfft(x, n=10) + expected = np.array([20., 9.91628173, -11.8819096, 7.1048486, + -6.62459848, 4., -3.37540152, -0.16057669, + 1.8819096, -20.86055364]) + assert_allclose(result, expected) + + +@pytest.mark.parametrize("fft", [ + np.fft.fft, np.fft.ifft, np.fft.rfft, np.fft.irfft +]) +@pytest.mark.parametrize("data", [ + np.array([False, True, False]), + np.arange(10, dtype=np.uint8), + np.arange(5, dtype=np.int16), +]) +def test_fft_with_integer_or_bool_input(data, fft): + # Regression test for gh-25819 + result = fft(data) + float_data = data.astype(np.result_type(data, 1.)) + expected = fft(float_data) + assert_array_equal(result, expected) diff --git a/python/numpy/lib/__init__.py b/python/numpy/lib/__init__.py new file mode 100644 index 000000000..a248d048f --- /dev/null +++ b/python/numpy/lib/__init__.py @@ -0,0 +1,97 @@ +""" +``numpy.lib`` is mostly a space for implementing functions that don't +belong in core or in another NumPy submodule with a clear purpose +(e.g. ``random``, ``fft``, ``linalg``, ``ma``). + +``numpy.lib``'s private submodules contain basic functions that are used by +other public modules and are useful to have in the main name-space. + +""" + +# Public submodules +# Note: recfunctions is public, but not imported +from numpy._core._multiarray_umath import add_docstring, tracemalloc_domain +from numpy._core.function_base import add_newdoc + +# Private submodules +# load module names. See https://github.com/networkx/networkx/issues/5838 +from . import ( + _arraypad_impl, + _arraysetops_impl, + _arrayterator_impl, + _function_base_impl, + _histograms_impl, + _index_tricks_impl, + _nanfunctions_impl, + _npyio_impl, + _polynomial_impl, + _shape_base_impl, + _stride_tricks_impl, + _twodim_base_impl, + _type_check_impl, + _ufunclike_impl, + _utils_impl, + _version, + array_utils, + format, + introspect, + mixins, + npyio, + scimath, + stride_tricks, +) + +# numpy.lib namespace members +from ._arrayterator_impl import Arrayterator +from ._version import NumpyVersion + +__all__ = [ + "Arrayterator", "add_docstring", "add_newdoc", "array_utils", + "format", "introspect", "mixins", "NumpyVersion", "npyio", "scimath", + "stride_tricks", "tracemalloc_domain", +] + +add_newdoc.__module__ = "numpy.lib" + +from numpy._pytesttester import PytestTester + +test = PytestTester(__name__) +del PytestTester + +def __getattr__(attr): + # Warn for deprecated/removed aliases + import math + import warnings + + if attr == "math": + warnings.warn( + "`np.lib.math` is a deprecated alias for the standard library " + "`math` module (Deprecated Numpy 1.25). Replace usages of " + "`numpy.lib.math` with `math`", DeprecationWarning, stacklevel=2) + return math + elif attr == "emath": + raise AttributeError( + "numpy.lib.emath was an alias for emath module that was removed " + "in NumPy 2.0. Replace usages of numpy.lib.emath with " + "numpy.emath.", + name=None + ) + elif attr in ( + "histograms", "type_check", "nanfunctions", "function_base", + "arraypad", "arraysetops", "ufunclike", "utils", "twodim_base", + "shape_base", "polynomial", "index_tricks", + ): + raise AttributeError( + f"numpy.lib.{attr} is now private. If you are using a public " + "function, it should be available in the main numpy namespace, " + "otherwise check the NumPy 2.0 migration guide.", + name=None + ) + elif attr == "arrayterator": + raise AttributeError( + "numpy.lib.arrayterator submodule is now private. To access " + "Arrayterator class use numpy.lib.Arrayterator.", + name=None + ) + else: + raise AttributeError(f"module {__name__!r} has no attribute {attr!r}") diff --git a/python/numpy/lib/__init__.pyi b/python/numpy/lib/__init__.pyi new file mode 100644 index 000000000..6185a494d --- /dev/null +++ b/python/numpy/lib/__init__.pyi @@ -0,0 +1,44 @@ +from numpy._core.function_base import add_newdoc +from numpy._core.multiarray import add_docstring, tracemalloc_domain + +# all submodules of `lib` are accessible at runtime through `__getattr__`, +# so we implicitly re-export them here +from . import _array_utils_impl as _array_utils_impl +from . import _arraypad_impl as _arraypad_impl +from . import _arraysetops_impl as _arraysetops_impl +from . import _arrayterator_impl as _arrayterator_impl +from . import _datasource as _datasource +from . import _format_impl as _format_impl +from . import _function_base_impl as _function_base_impl +from . import _histograms_impl as _histograms_impl +from . import _index_tricks_impl as _index_tricks_impl +from . import _iotools as _iotools +from . import _nanfunctions_impl as _nanfunctions_impl +from . import _npyio_impl as _npyio_impl +from . import _polynomial_impl as _polynomial_impl +from . import _scimath_impl as _scimath_impl +from . import _shape_base_impl as _shape_base_impl +from . import _stride_tricks_impl as _stride_tricks_impl +from . import _twodim_base_impl as _twodim_base_impl +from . import _type_check_impl as _type_check_impl +from . import _ufunclike_impl as _ufunclike_impl +from . import _utils_impl as _utils_impl +from . import _version as _version +from . import array_utils, format, introspect, mixins, npyio, scimath, stride_tricks +from ._arrayterator_impl import Arrayterator +from ._version import NumpyVersion + +__all__ = [ + "Arrayterator", + "add_docstring", + "add_newdoc", + "array_utils", + "format", + "introspect", + "mixins", + "NumpyVersion", + "npyio", + "scimath", + "stride_tricks", + "tracemalloc_domain", +] diff --git a/python/numpy/lib/__pycache__/__init__.cpython-312.pyc b/python/numpy/lib/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..5b54dc6f2 Binary files /dev/null and b/python/numpy/lib/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/_array_utils_impl.cpython-312.pyc b/python/numpy/lib/__pycache__/_array_utils_impl.cpython-312.pyc new file mode 100644 index 000000000..90ca981ac Binary files /dev/null and b/python/numpy/lib/__pycache__/_array_utils_impl.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/_arraypad_impl.cpython-312.pyc b/python/numpy/lib/__pycache__/_arraypad_impl.cpython-312.pyc new file mode 100644 index 000000000..447d06df1 Binary files /dev/null and b/python/numpy/lib/__pycache__/_arraypad_impl.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/_arraysetops_impl.cpython-312.pyc b/python/numpy/lib/__pycache__/_arraysetops_impl.cpython-312.pyc new file mode 100644 index 000000000..95b3d3f08 Binary files /dev/null and b/python/numpy/lib/__pycache__/_arraysetops_impl.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/_arrayterator_impl.cpython-312.pyc b/python/numpy/lib/__pycache__/_arrayterator_impl.cpython-312.pyc new file mode 100644 index 000000000..3712ad803 Binary files /dev/null and b/python/numpy/lib/__pycache__/_arrayterator_impl.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/_datasource.cpython-312.pyc b/python/numpy/lib/__pycache__/_datasource.cpython-312.pyc new file mode 100644 index 000000000..11246ed72 Binary files /dev/null and b/python/numpy/lib/__pycache__/_datasource.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/_format_impl.cpython-312.pyc b/python/numpy/lib/__pycache__/_format_impl.cpython-312.pyc new file mode 100644 index 000000000..fc34387e3 Binary files /dev/null and b/python/numpy/lib/__pycache__/_format_impl.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/_function_base_impl.cpython-312.pyc b/python/numpy/lib/__pycache__/_function_base_impl.cpython-312.pyc new file mode 100644 index 000000000..856c5f810 Binary files /dev/null and b/python/numpy/lib/__pycache__/_function_base_impl.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/_histograms_impl.cpython-312.pyc b/python/numpy/lib/__pycache__/_histograms_impl.cpython-312.pyc new file mode 100644 index 000000000..9efdb3f2c Binary files /dev/null and b/python/numpy/lib/__pycache__/_histograms_impl.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/_index_tricks_impl.cpython-312.pyc b/python/numpy/lib/__pycache__/_index_tricks_impl.cpython-312.pyc new file mode 100644 index 000000000..66ff81ac5 Binary files /dev/null and b/python/numpy/lib/__pycache__/_index_tricks_impl.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/_iotools.cpython-312.pyc b/python/numpy/lib/__pycache__/_iotools.cpython-312.pyc new file mode 100644 index 000000000..5fac4aa29 Binary files /dev/null and b/python/numpy/lib/__pycache__/_iotools.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/_nanfunctions_impl.cpython-312.pyc b/python/numpy/lib/__pycache__/_nanfunctions_impl.cpython-312.pyc new file mode 100644 index 000000000..d187d981b Binary files /dev/null and b/python/numpy/lib/__pycache__/_nanfunctions_impl.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/_npyio_impl.cpython-312.pyc b/python/numpy/lib/__pycache__/_npyio_impl.cpython-312.pyc new file mode 100644 index 000000000..bc8cdd105 Binary files /dev/null and b/python/numpy/lib/__pycache__/_npyio_impl.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/_polynomial_impl.cpython-312.pyc b/python/numpy/lib/__pycache__/_polynomial_impl.cpython-312.pyc new file mode 100644 index 000000000..1ba3e0b71 Binary files /dev/null and b/python/numpy/lib/__pycache__/_polynomial_impl.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/_scimath_impl.cpython-312.pyc b/python/numpy/lib/__pycache__/_scimath_impl.cpython-312.pyc new file mode 100644 index 000000000..6c4be23f4 Binary files /dev/null and b/python/numpy/lib/__pycache__/_scimath_impl.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/_shape_base_impl.cpython-312.pyc b/python/numpy/lib/__pycache__/_shape_base_impl.cpython-312.pyc new file mode 100644 index 000000000..b1ebc17fd Binary files /dev/null and b/python/numpy/lib/__pycache__/_shape_base_impl.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/_stride_tricks_impl.cpython-312.pyc b/python/numpy/lib/__pycache__/_stride_tricks_impl.cpython-312.pyc new file mode 100644 index 000000000..847ea1d0b Binary files /dev/null and b/python/numpy/lib/__pycache__/_stride_tricks_impl.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/_twodim_base_impl.cpython-312.pyc b/python/numpy/lib/__pycache__/_twodim_base_impl.cpython-312.pyc new file mode 100644 index 000000000..7c5007603 Binary files /dev/null and b/python/numpy/lib/__pycache__/_twodim_base_impl.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/_type_check_impl.cpython-312.pyc b/python/numpy/lib/__pycache__/_type_check_impl.cpython-312.pyc new file mode 100644 index 000000000..3b6641ed8 Binary files /dev/null and b/python/numpy/lib/__pycache__/_type_check_impl.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/_ufunclike_impl.cpython-312.pyc b/python/numpy/lib/__pycache__/_ufunclike_impl.cpython-312.pyc new file mode 100644 index 000000000..1478d1bf1 Binary files /dev/null and b/python/numpy/lib/__pycache__/_ufunclike_impl.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/_user_array_impl.cpython-312.pyc b/python/numpy/lib/__pycache__/_user_array_impl.cpython-312.pyc new file mode 100644 index 000000000..e2429c9be Binary files /dev/null and b/python/numpy/lib/__pycache__/_user_array_impl.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/_utils_impl.cpython-312.pyc b/python/numpy/lib/__pycache__/_utils_impl.cpython-312.pyc new file mode 100644 index 000000000..c53ce5442 Binary files /dev/null and b/python/numpy/lib/__pycache__/_utils_impl.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/_version.cpython-312.pyc b/python/numpy/lib/__pycache__/_version.cpython-312.pyc new file mode 100644 index 000000000..af0a798fc Binary files /dev/null and b/python/numpy/lib/__pycache__/_version.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/array_utils.cpython-312.pyc b/python/numpy/lib/__pycache__/array_utils.cpython-312.pyc new file mode 100644 index 000000000..2c66a1d1f Binary files /dev/null and b/python/numpy/lib/__pycache__/array_utils.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/format.cpython-312.pyc b/python/numpy/lib/__pycache__/format.cpython-312.pyc new file mode 100644 index 000000000..a453319f0 Binary files /dev/null and b/python/numpy/lib/__pycache__/format.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/introspect.cpython-312.pyc b/python/numpy/lib/__pycache__/introspect.cpython-312.pyc new file mode 100644 index 000000000..669ebf490 Binary files /dev/null and b/python/numpy/lib/__pycache__/introspect.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/mixins.cpython-312.pyc b/python/numpy/lib/__pycache__/mixins.cpython-312.pyc new file mode 100644 index 000000000..0bb6a6cd7 Binary files /dev/null and b/python/numpy/lib/__pycache__/mixins.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/npyio.cpython-312.pyc b/python/numpy/lib/__pycache__/npyio.cpython-312.pyc new file mode 100644 index 000000000..4c21054b7 Binary files /dev/null and b/python/numpy/lib/__pycache__/npyio.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/recfunctions.cpython-312.pyc b/python/numpy/lib/__pycache__/recfunctions.cpython-312.pyc new file mode 100644 index 000000000..5b17fcf96 Binary files /dev/null and b/python/numpy/lib/__pycache__/recfunctions.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/scimath.cpython-312.pyc b/python/numpy/lib/__pycache__/scimath.cpython-312.pyc new file mode 100644 index 000000000..9caac3e8b Binary files /dev/null and b/python/numpy/lib/__pycache__/scimath.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/stride_tricks.cpython-312.pyc b/python/numpy/lib/__pycache__/stride_tricks.cpython-312.pyc new file mode 100644 index 000000000..b6dda2c2e Binary files /dev/null and b/python/numpy/lib/__pycache__/stride_tricks.cpython-312.pyc differ diff --git a/python/numpy/lib/__pycache__/user_array.cpython-312.pyc b/python/numpy/lib/__pycache__/user_array.cpython-312.pyc new file mode 100644 index 000000000..8ff83ec02 Binary files /dev/null and b/python/numpy/lib/__pycache__/user_array.cpython-312.pyc differ diff --git a/python/numpy/lib/_array_utils_impl.py b/python/numpy/lib/_array_utils_impl.py new file mode 100644 index 000000000..c3996e1f2 --- /dev/null +++ b/python/numpy/lib/_array_utils_impl.py @@ -0,0 +1,62 @@ +""" +Miscellaneous utils. +""" +from numpy._core import asarray +from numpy._core.numeric import normalize_axis_index, normalize_axis_tuple +from numpy._utils import set_module + +__all__ = ["byte_bounds", "normalize_axis_tuple", "normalize_axis_index"] + + +@set_module("numpy.lib.array_utils") +def byte_bounds(a): + """ + Returns pointers to the end-points of an array. + + Parameters + ---------- + a : ndarray + Input array. It must conform to the Python-side of the array + interface. + + Returns + ------- + (low, high) : tuple of 2 integers + The first integer is the first byte of the array, the second + integer is just past the last byte of the array. If `a` is not + contiguous it will not use every byte between the (`low`, `high`) + values. + + Examples + -------- + >>> import numpy as np + >>> I = np.eye(2, dtype='f'); I.dtype + dtype('float32') + >>> low, high = np.lib.array_utils.byte_bounds(I) + >>> high - low == I.size*I.itemsize + True + >>> I = np.eye(2); I.dtype + dtype('float64') + >>> low, high = np.lib.array_utils.byte_bounds(I) + >>> high - low == I.size*I.itemsize + True + + """ + ai = a.__array_interface__ + a_data = ai['data'][0] + astrides = ai['strides'] + ashape = ai['shape'] + bytes_a = asarray(a).dtype.itemsize + + a_low = a_high = a_data + if astrides is None: + # contiguous case + a_high += a.size * bytes_a + else: + for shape, stride in zip(ashape, astrides): + if stride < 0: + a_low += (shape - 1) * stride + else: + a_high += (shape - 1) * stride + a_high += bytes_a + return a_low, a_high diff --git a/python/numpy/lib/_array_utils_impl.pyi b/python/numpy/lib/_array_utils_impl.pyi new file mode 100644 index 000000000..d3e071477 --- /dev/null +++ b/python/numpy/lib/_array_utils_impl.pyi @@ -0,0 +1,26 @@ +from collections.abc import Iterable +from typing import Any + +from numpy import generic +from numpy.typing import NDArray + +__all__ = ["byte_bounds", "normalize_axis_tuple", "normalize_axis_index"] + +# NOTE: In practice `byte_bounds` can (potentially) take any object +# implementing the `__array_interface__` protocol. The caveat is +# that certain keys, marked as optional in the spec, must be present for +# `byte_bounds`. This concerns `"strides"` and `"data"`. +def byte_bounds(a: generic | NDArray[Any]) -> tuple[int, int]: ... + +def normalize_axis_tuple( + axis: int | Iterable[int], + ndim: int = ..., + argname: str | None = ..., + allow_duplicate: bool | None = ..., +) -> tuple[int, int]: ... + +def normalize_axis_index( + axis: int = ..., + ndim: int = ..., + msg_prefix: str | None = ..., +) -> int: ... diff --git a/python/numpy/lib/_arraypad_impl.py b/python/numpy/lib/_arraypad_impl.py new file mode 100644 index 000000000..507a0ab51 --- /dev/null +++ b/python/numpy/lib/_arraypad_impl.py @@ -0,0 +1,890 @@ +""" +The arraypad module contains a group of functions to pad values onto the edges +of an n-dimensional array. + +""" +import numpy as np +from numpy._core.overrides import array_function_dispatch +from numpy.lib._index_tricks_impl import ndindex + +__all__ = ['pad'] + + +############################################################################### +# Private utility functions. + + +def _round_if_needed(arr, dtype): + """ + Rounds arr inplace if destination dtype is integer. + + Parameters + ---------- + arr : ndarray + Input array. + dtype : dtype + The dtype of the destination array. + """ + if np.issubdtype(dtype, np.integer): + arr.round(out=arr) + + +def _slice_at_axis(sl, axis): + """ + Construct tuple of slices to slice an array in the given dimension. + + Parameters + ---------- + sl : slice + The slice for the given dimension. + axis : int + The axis to which `sl` is applied. All other dimensions are left + "unsliced". + + Returns + ------- + sl : tuple of slices + A tuple with slices matching `shape` in length. + + Examples + -------- + >>> np._slice_at_axis(slice(None, 3, -1), 1) + (slice(None, None, None), slice(None, 3, -1), (...,)) + """ + return (slice(None),) * axis + (sl,) + (...,) + + +def _view_roi(array, original_area_slice, axis): + """ + Get a view of the current region of interest during iterative padding. + + When padding multiple dimensions iteratively corner values are + unnecessarily overwritten multiple times. This function reduces the + working area for the first dimensions so that corners are excluded. + + Parameters + ---------- + array : ndarray + The array with the region of interest. + original_area_slice : tuple of slices + Denotes the area with original values of the unpadded array. + axis : int + The currently padded dimension assuming that `axis` is padded before + `axis` + 1. + + Returns + ------- + roi : ndarray + The region of interest of the original `array`. + """ + axis += 1 + sl = (slice(None),) * axis + original_area_slice[axis:] + return array[sl] + + +def _pad_simple(array, pad_width, fill_value=None): + """ + Pad array on all sides with either a single value or undefined values. + + Parameters + ---------- + array : ndarray + Array to grow. + pad_width : sequence of tuple[int, int] + Pad width on both sides for each dimension in `arr`. + fill_value : scalar, optional + If provided the padded area is filled with this value, otherwise + the pad area left undefined. + + Returns + ------- + padded : ndarray + The padded array with the same dtype as`array`. Its order will default + to C-style if `array` is not F-contiguous. + original_area_slice : tuple + A tuple of slices pointing to the area of the original array. + """ + # Allocate grown array + new_shape = tuple( + left + size + right + for size, (left, right) in zip(array.shape, pad_width) + ) + order = 'F' if array.flags.fnc else 'C' # Fortran and not also C-order + padded = np.empty(new_shape, dtype=array.dtype, order=order) + + if fill_value is not None: + padded.fill(fill_value) + + # Copy old array into correct space + original_area_slice = tuple( + slice(left, left + size) + for size, (left, right) in zip(array.shape, pad_width) + ) + padded[original_area_slice] = array + + return padded, original_area_slice + + +def _set_pad_area(padded, axis, width_pair, value_pair): + """ + Set empty-padded area in given dimension. + + Parameters + ---------- + padded : ndarray + Array with the pad area which is modified inplace. + axis : int + Dimension with the pad area to set. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + value_pair : tuple of scalars or ndarrays + Values inserted into the pad area on each side. It must match or be + broadcastable to the shape of `arr`. + """ + left_slice = _slice_at_axis(slice(None, width_pair[0]), axis) + padded[left_slice] = value_pair[0] + + right_slice = _slice_at_axis( + slice(padded.shape[axis] - width_pair[1], None), axis) + padded[right_slice] = value_pair[1] + + +def _get_edges(padded, axis, width_pair): + """ + Retrieve edge values from empty-padded array in given dimension. + + Parameters + ---------- + padded : ndarray + Empty-padded array. + axis : int + Dimension in which the edges are considered. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + + Returns + ------- + left_edge, right_edge : ndarray + Edge values of the valid area in `padded` in the given dimension. Its + shape will always match `padded` except for the dimension given by + `axis` which will have a length of 1. + """ + left_index = width_pair[0] + left_slice = _slice_at_axis(slice(left_index, left_index + 1), axis) + left_edge = padded[left_slice] + + right_index = padded.shape[axis] - width_pair[1] + right_slice = _slice_at_axis(slice(right_index - 1, right_index), axis) + right_edge = padded[right_slice] + + return left_edge, right_edge + + +def _get_linear_ramps(padded, axis, width_pair, end_value_pair): + """ + Construct linear ramps for empty-padded array in given dimension. + + Parameters + ---------- + padded : ndarray + Empty-padded array. + axis : int + Dimension in which the ramps are constructed. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + end_value_pair : (scalar, scalar) + End values for the linear ramps which form the edge of the fully padded + array. These values are included in the linear ramps. + + Returns + ------- + left_ramp, right_ramp : ndarray + Linear ramps to set on both sides of `padded`. + """ + edge_pair = _get_edges(padded, axis, width_pair) + + left_ramp, right_ramp = ( + np.linspace( + start=end_value, + stop=edge.squeeze(axis), # Dimension is replaced by linspace + num=width, + endpoint=False, + dtype=padded.dtype, + axis=axis + ) + for end_value, edge, width in zip( + end_value_pair, edge_pair, width_pair + ) + ) + + # Reverse linear space in appropriate dimension + right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)] + + return left_ramp, right_ramp + + +def _get_stats(padded, axis, width_pair, length_pair, stat_func): + """ + Calculate statistic for the empty-padded array in given dimension. + + Parameters + ---------- + padded : ndarray + Empty-padded array. + axis : int + Dimension in which the statistic is calculated. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + length_pair : 2-element sequence of None or int + Gives the number of values in valid area from each side that is + taken into account when calculating the statistic. If None the entire + valid area in `padded` is considered. + stat_func : function + Function to compute statistic. The expected signature is + ``stat_func(x: ndarray, axis: int, keepdims: bool) -> ndarray``. + + Returns + ------- + left_stat, right_stat : ndarray + Calculated statistic for both sides of `padded`. + """ + # Calculate indices of the edges of the area with original values + left_index = width_pair[0] + right_index = padded.shape[axis] - width_pair[1] + # as well as its length + max_length = right_index - left_index + + # Limit stat_lengths to max_length + left_length, right_length = length_pair + if left_length is None or max_length < left_length: + left_length = max_length + if right_length is None or max_length < right_length: + right_length = max_length + + if (left_length == 0 or right_length == 0) \ + and stat_func in {np.amax, np.amin}: + # amax and amin can't operate on an empty array, + # raise a more descriptive warning here instead of the default one + raise ValueError("stat_length of 0 yields no value for padding") + + # Calculate statistic for the left side + left_slice = _slice_at_axis( + slice(left_index, left_index + left_length), axis) + left_chunk = padded[left_slice] + left_stat = stat_func(left_chunk, axis=axis, keepdims=True) + _round_if_needed(left_stat, padded.dtype) + + if left_length == right_length == max_length: + # return early as right_stat must be identical to left_stat + return left_stat, left_stat + + # Calculate statistic for the right side + right_slice = _slice_at_axis( + slice(right_index - right_length, right_index), axis) + right_chunk = padded[right_slice] + right_stat = stat_func(right_chunk, axis=axis, keepdims=True) + _round_if_needed(right_stat, padded.dtype) + + return left_stat, right_stat + + +def _set_reflect_both(padded, axis, width_pair, method, + original_period, include_edge=False): + """ + Pad `axis` of `arr` with reflection. + + Parameters + ---------- + padded : ndarray + Input array of arbitrary shape. + axis : int + Axis along which to pad `arr`. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + method : str + Controls method of reflection; options are 'even' or 'odd'. + original_period : int + Original length of data on `axis` of `arr`. + include_edge : bool + If true, edge value is included in reflection, otherwise the edge + value forms the symmetric axis to the reflection. + + Returns + ------- + pad_amt : tuple of ints, length 2 + New index positions of padding to do along the `axis`. If these are + both 0, padding is done in this dimension. + """ + left_pad, right_pad = width_pair + old_length = padded.shape[axis] - right_pad - left_pad + + if include_edge: + # Avoid wrapping with only a subset of the original area + # by ensuring period can only be a multiple of the original + # area's length. + old_length = old_length // original_period * original_period + # Edge is included, we need to offset the pad amount by 1 + edge_offset = 1 + else: + # Avoid wrapping with only a subset of the original area + # by ensuring period can only be a multiple of the original + # area's length. + old_length = ((old_length - 1) // (original_period - 1) + * (original_period - 1) + 1) + edge_offset = 0 # Edge is not included, no need to offset pad amount + old_length -= 1 # but must be omitted from the chunk + + if left_pad > 0: + # Pad with reflected values on left side: + # First limit chunk size which can't be larger than pad area + chunk_length = min(old_length, left_pad) + # Slice right to left, stop on or next to edge, start relative to stop + stop = left_pad - edge_offset + start = stop + chunk_length + left_slice = _slice_at_axis(slice(start, stop, -1), axis) + left_chunk = padded[left_slice] + + if method == "odd": + # Negate chunk and align with edge + edge_slice = _slice_at_axis(slice(left_pad, left_pad + 1), axis) + left_chunk = 2 * padded[edge_slice] - left_chunk + + # Insert chunk into padded area + start = left_pad - chunk_length + stop = left_pad + pad_area = _slice_at_axis(slice(start, stop), axis) + padded[pad_area] = left_chunk + # Adjust pointer to left edge for next iteration + left_pad -= chunk_length + + if right_pad > 0: + # Pad with reflected values on right side: + # First limit chunk size which can't be larger than pad area + chunk_length = min(old_length, right_pad) + # Slice right to left, start on or next to edge, stop relative to start + start = -right_pad + edge_offset - 2 + stop = start - chunk_length + right_slice = _slice_at_axis(slice(start, stop, -1), axis) + right_chunk = padded[right_slice] + + if method == "odd": + # Negate chunk and align with edge + edge_slice = _slice_at_axis( + slice(-right_pad - 1, -right_pad), axis) + right_chunk = 2 * padded[edge_slice] - right_chunk + + # Insert chunk into padded area + start = padded.shape[axis] - right_pad + stop = start + chunk_length + pad_area = _slice_at_axis(slice(start, stop), axis) + padded[pad_area] = right_chunk + # Adjust pointer to right edge for next iteration + right_pad -= chunk_length + + return left_pad, right_pad + + +def _set_wrap_both(padded, axis, width_pair, original_period): + """ + Pad `axis` of `arr` with wrapped values. + + Parameters + ---------- + padded : ndarray + Input array of arbitrary shape. + axis : int + Axis along which to pad `arr`. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + original_period : int + Original length of data on `axis` of `arr`. + + Returns + ------- + pad_amt : tuple of ints, length 2 + New index positions of padding to do along the `axis`. If these are + both 0, padding is done in this dimension. + """ + left_pad, right_pad = width_pair + period = padded.shape[axis] - right_pad - left_pad + # Avoid wrapping with only a subset of the original area by ensuring period + # can only be a multiple of the original area's length. + period = period // original_period * original_period + + # If the current dimension of `arr` doesn't contain enough valid values + # (not part of the undefined pad area) we need to pad multiple times. + # Each time the pad area shrinks on both sides which is communicated with + # these variables. + new_left_pad = 0 + new_right_pad = 0 + + if left_pad > 0: + # Pad with wrapped values on left side + # First slice chunk from left side of the non-pad area. + # Use min(period, left_pad) to ensure that chunk is not larger than + # pad area. + slice_end = left_pad + period + slice_start = slice_end - min(period, left_pad) + right_slice = _slice_at_axis(slice(slice_start, slice_end), axis) + right_chunk = padded[right_slice] + + if left_pad > period: + # Chunk is smaller than pad area + pad_area = _slice_at_axis(slice(left_pad - period, left_pad), axis) + new_left_pad = left_pad - period + else: + # Chunk matches pad area + pad_area = _slice_at_axis(slice(None, left_pad), axis) + padded[pad_area] = right_chunk + + if right_pad > 0: + # Pad with wrapped values on right side + # First slice chunk from right side of the non-pad area. + # Use min(period, right_pad) to ensure that chunk is not larger than + # pad area. + slice_start = -right_pad - period + slice_end = slice_start + min(period, right_pad) + left_slice = _slice_at_axis(slice(slice_start, slice_end), axis) + left_chunk = padded[left_slice] + + if right_pad > period: + # Chunk is smaller than pad area + pad_area = _slice_at_axis( + slice(-right_pad, -right_pad + period), axis) + new_right_pad = right_pad - period + else: + # Chunk matches pad area + pad_area = _slice_at_axis(slice(-right_pad, None), axis) + padded[pad_area] = left_chunk + + return new_left_pad, new_right_pad + + +def _as_pairs(x, ndim, as_index=False): + """ + Broadcast `x` to an array with the shape (`ndim`, 2). + + A helper function for `pad` that prepares and validates arguments like + `pad_width` for iteration in pairs. + + Parameters + ---------- + x : {None, scalar, array-like} + The object to broadcast to the shape (`ndim`, 2). + ndim : int + Number of pairs the broadcasted `x` will have. + as_index : bool, optional + If `x` is not None, try to round each element of `x` to an integer + (dtype `np.intp`) and ensure every element is positive. + + Returns + ------- + pairs : nested iterables, shape (`ndim`, 2) + The broadcasted version of `x`. + + Raises + ------ + ValueError + If `as_index` is True and `x` contains negative elements. + Or if `x` is not broadcastable to the shape (`ndim`, 2). + """ + if x is None: + # Pass through None as a special case, otherwise np.round(x) fails + # with an AttributeError + return ((None, None),) * ndim + + x = np.array(x) + if as_index: + x = np.round(x).astype(np.intp, copy=False) + + if x.ndim < 3: + # Optimization: Possibly use faster paths for cases where `x` has + # only 1 or 2 elements. `np.broadcast_to` could handle these as well + # but is currently slower + + if x.size == 1: + # x was supplied as a single value + x = x.ravel() # Ensure x[0] works for x.ndim == 0, 1, 2 + if as_index and x < 0: + raise ValueError("index can't contain negative values") + return ((x[0], x[0]),) * ndim + + if x.size == 2 and x.shape != (2, 1): + # x was supplied with a single value for each side + # but except case when each dimension has a single value + # which should be broadcasted to a pair, + # e.g. [[1], [2]] -> [[1, 1], [2, 2]] not [[1, 2], [1, 2]] + x = x.ravel() # Ensure x[0], x[1] works + if as_index and (x[0] < 0 or x[1] < 0): + raise ValueError("index can't contain negative values") + return ((x[0], x[1]),) * ndim + + if as_index and x.min() < 0: + raise ValueError("index can't contain negative values") + + # Converting the array with `tolist` seems to improve performance + # when iterating and indexing the result (see usage in `pad`) + return np.broadcast_to(x, (ndim, 2)).tolist() + + +def _pad_dispatcher(array, pad_width, mode=None, **kwargs): + return (array,) + + +############################################################################### +# Public functions + + +@array_function_dispatch(_pad_dispatcher, module='numpy') +def pad(array, pad_width, mode='constant', **kwargs): + """ + Pad an array. + + Parameters + ---------- + array : array_like of rank N + The array to pad. + pad_width : {sequence, array_like, int} + Number of values padded to the edges of each axis. + ``((before_1, after_1), ... (before_N, after_N))`` unique pad widths + for each axis. + ``(before, after)`` or ``((before, after),)`` yields same before + and after pad for each axis. + ``(pad,)`` or ``int`` is a shortcut for before = after = pad width + for all axes. + mode : str or function, optional + One of the following string values or a user supplied function. + + 'constant' (default) + Pads with a constant value. + 'edge' + Pads with the edge values of array. + 'linear_ramp' + Pads with the linear ramp between end_value and the + array edge value. + 'maximum' + Pads with the maximum value of all or part of the + vector along each axis. + 'mean' + Pads with the mean value of all or part of the + vector along each axis. + 'median' + Pads with the median value of all or part of the + vector along each axis. + 'minimum' + Pads with the minimum value of all or part of the + vector along each axis. + 'reflect' + Pads with the reflection of the vector mirrored on + the first and last values of the vector along each + axis. + 'symmetric' + Pads with the reflection of the vector mirrored + along the edge of the array. + 'wrap' + Pads with the wrap of the vector along the axis. + The first values are used to pad the end and the + end values are used to pad the beginning. + 'empty' + Pads with undefined values. + + + Padding function, see Notes. + stat_length : sequence or int, optional + Used in 'maximum', 'mean', 'median', and 'minimum'. Number of + values at edge of each axis used to calculate the statistic value. + + ``((before_1, after_1), ... (before_N, after_N))`` unique statistic + lengths for each axis. + + ``(before, after)`` or ``((before, after),)`` yields same before + and after statistic lengths for each axis. + + ``(stat_length,)`` or ``int`` is a shortcut for + ``before = after = statistic`` length for all axes. + + Default is ``None``, to use the entire axis. + constant_values : sequence or scalar, optional + Used in 'constant'. The values to set the padded values for each + axis. + + ``((before_1, after_1), ... (before_N, after_N))`` unique pad constants + for each axis. + + ``(before, after)`` or ``((before, after),)`` yields same before + and after constants for each axis. + + ``(constant,)`` or ``constant`` is a shortcut for + ``before = after = constant`` for all axes. + + Default is 0. + end_values : sequence or scalar, optional + Used in 'linear_ramp'. The values used for the ending value of the + linear_ramp and that will form the edge of the padded array. + + ``((before_1, after_1), ... (before_N, after_N))`` unique end values + for each axis. + + ``(before, after)`` or ``((before, after),)`` yields same before + and after end values for each axis. + + ``(constant,)`` or ``constant`` is a shortcut for + ``before = after = constant`` for all axes. + + Default is 0. + reflect_type : {'even', 'odd'}, optional + Used in 'reflect', and 'symmetric'. The 'even' style is the + default with an unaltered reflection around the edge value. For + the 'odd' style, the extended part of the array is created by + subtracting the reflected values from two times the edge value. + + Returns + ------- + pad : ndarray + Padded array of rank equal to `array` with shape increased + according to `pad_width`. + + Notes + ----- + For an array with rank greater than 1, some of the padding of later + axes is calculated from padding of previous axes. This is easiest to + think about with a rank 2 array where the corners of the padded array + are calculated by using padded values from the first axis. + + The padding function, if used, should modify a rank 1 array in-place. It + has the following signature:: + + padding_func(vector, iaxis_pad_width, iaxis, kwargs) + + where + + vector : ndarray + A rank 1 array already padded with zeros. Padded values are + vector[:iaxis_pad_width[0]] and vector[-iaxis_pad_width[1]:]. + iaxis_pad_width : tuple + A 2-tuple of ints, iaxis_pad_width[0] represents the number of + values padded at the beginning of vector where + iaxis_pad_width[1] represents the number of values padded at + the end of vector. + iaxis : int + The axis currently being calculated. + kwargs : dict + Any keyword arguments the function requires. + + Examples + -------- + >>> import numpy as np + >>> a = [1, 2, 3, 4, 5] + >>> np.pad(a, (2, 3), 'constant', constant_values=(4, 6)) + array([4, 4, 1, ..., 6, 6, 6]) + + >>> np.pad(a, (2, 3), 'edge') + array([1, 1, 1, ..., 5, 5, 5]) + + >>> np.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4)) + array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4]) + + >>> np.pad(a, (2,), 'maximum') + array([5, 5, 1, 2, 3, 4, 5, 5, 5]) + + >>> np.pad(a, (2,), 'mean') + array([3, 3, 1, 2, 3, 4, 5, 3, 3]) + + >>> np.pad(a, (2,), 'median') + array([3, 3, 1, 2, 3, 4, 5, 3, 3]) + + >>> a = [[1, 2], [3, 4]] + >>> np.pad(a, ((3, 2), (2, 3)), 'minimum') + array([[1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1], + [3, 3, 3, 4, 3, 3, 3], + [1, 1, 1, 2, 1, 1, 1], + [1, 1, 1, 2, 1, 1, 1]]) + + >>> a = [1, 2, 3, 4, 5] + >>> np.pad(a, (2, 3), 'reflect') + array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2]) + + >>> np.pad(a, (2, 3), 'reflect', reflect_type='odd') + array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8]) + + >>> np.pad(a, (2, 3), 'symmetric') + array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3]) + + >>> np.pad(a, (2, 3), 'symmetric', reflect_type='odd') + array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7]) + + >>> np.pad(a, (2, 3), 'wrap') + array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3]) + + >>> def pad_with(vector, pad_width, iaxis, kwargs): + ... pad_value = kwargs.get('padder', 10) + ... vector[:pad_width[0]] = pad_value + ... vector[-pad_width[1]:] = pad_value + >>> a = np.arange(6) + >>> a = a.reshape((2, 3)) + >>> np.pad(a, 2, pad_with) + array([[10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10], + [10, 10, 0, 1, 2, 10, 10], + [10, 10, 3, 4, 5, 10, 10], + [10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10]]) + >>> np.pad(a, 2, pad_with, padder=100) + array([[100, 100, 100, 100, 100, 100, 100], + [100, 100, 100, 100, 100, 100, 100], + [100, 100, 0, 1, 2, 100, 100], + [100, 100, 3, 4, 5, 100, 100], + [100, 100, 100, 100, 100, 100, 100], + [100, 100, 100, 100, 100, 100, 100]]) + """ + array = np.asarray(array) + pad_width = np.asarray(pad_width) + + if not pad_width.dtype.kind == 'i': + raise TypeError('`pad_width` must be of integral type.') + + # Broadcast to shape (array.ndim, 2) + pad_width = _as_pairs(pad_width, array.ndim, as_index=True) + + if callable(mode): + # Old behavior: Use user-supplied function with np.apply_along_axis + function = mode + # Create a new zero padded array + padded, _ = _pad_simple(array, pad_width, fill_value=0) + # And apply along each axis + + for axis in range(padded.ndim): + # Iterate using ndindex as in apply_along_axis, but assuming that + # function operates inplace on the padded array. + + # view with the iteration axis at the end + view = np.moveaxis(padded, axis, -1) + + # compute indices for the iteration axes, and append a trailing + # ellipsis to prevent 0d arrays decaying to scalars (gh-8642) + inds = ndindex(view.shape[:-1]) + inds = (ind + (Ellipsis,) for ind in inds) + for ind in inds: + function(view[ind], pad_width[axis], axis, kwargs) + + return padded + + # Make sure that no unsupported keywords were passed for the current mode + allowed_kwargs = { + 'empty': [], 'edge': [], 'wrap': [], + 'constant': ['constant_values'], + 'linear_ramp': ['end_values'], + 'maximum': ['stat_length'], + 'mean': ['stat_length'], + 'median': ['stat_length'], + 'minimum': ['stat_length'], + 'reflect': ['reflect_type'], + 'symmetric': ['reflect_type'], + } + try: + unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode]) + except KeyError: + raise ValueError(f"mode '{mode}' is not supported") from None + if unsupported_kwargs: + raise ValueError("unsupported keyword arguments for mode " + f"'{mode}': {unsupported_kwargs}") + + stat_functions = {"maximum": np.amax, "minimum": np.amin, + "mean": np.mean, "median": np.median} + + # Create array with final shape and original values + # (padded area is undefined) + padded, original_area_slice = _pad_simple(array, pad_width) + # And prepare iteration over all dimensions + # (zipping may be more readable than using enumerate) + axes = range(padded.ndim) + + if mode == "constant": + values = kwargs.get("constant_values", 0) + values = _as_pairs(values, padded.ndim) + for axis, width_pair, value_pair in zip(axes, pad_width, values): + roi = _view_roi(padded, original_area_slice, axis) + _set_pad_area(roi, axis, width_pair, value_pair) + + elif mode == "empty": + pass # Do nothing as _pad_simple already returned the correct result + + elif array.size == 0: + # Only modes "constant" and "empty" can extend empty axes, all other + # modes depend on `array` not being empty + # -> ensure every empty axis is only "padded with 0" + for axis, width_pair in zip(axes, pad_width): + if array.shape[axis] == 0 and any(width_pair): + raise ValueError( + f"can't extend empty axis {axis} using modes other than " + "'constant' or 'empty'" + ) + # passed, don't need to do anything more as _pad_simple already + # returned the correct result + + elif mode == "edge": + for axis, width_pair in zip(axes, pad_width): + roi = _view_roi(padded, original_area_slice, axis) + edge_pair = _get_edges(roi, axis, width_pair) + _set_pad_area(roi, axis, width_pair, edge_pair) + + elif mode == "linear_ramp": + end_values = kwargs.get("end_values", 0) + end_values = _as_pairs(end_values, padded.ndim) + for axis, width_pair, value_pair in zip(axes, pad_width, end_values): + roi = _view_roi(padded, original_area_slice, axis) + ramp_pair = _get_linear_ramps(roi, axis, width_pair, value_pair) + _set_pad_area(roi, axis, width_pair, ramp_pair) + + elif mode in stat_functions: + func = stat_functions[mode] + length = kwargs.get("stat_length") + length = _as_pairs(length, padded.ndim, as_index=True) + for axis, width_pair, length_pair in zip(axes, pad_width, length): + roi = _view_roi(padded, original_area_slice, axis) + stat_pair = _get_stats(roi, axis, width_pair, length_pair, func) + _set_pad_area(roi, axis, width_pair, stat_pair) + + elif mode in {"reflect", "symmetric"}: + method = kwargs.get("reflect_type", "even") + include_edge = mode == "symmetric" + for axis, (left_index, right_index) in zip(axes, pad_width): + if array.shape[axis] == 1 and (left_index > 0 or right_index > 0): + # Extending singleton dimension for 'reflect' is legacy + # behavior; it really should raise an error. + edge_pair = _get_edges(padded, axis, (left_index, right_index)) + _set_pad_area( + padded, axis, (left_index, right_index), edge_pair) + continue + + roi = _view_roi(padded, original_area_slice, axis) + while left_index > 0 or right_index > 0: + # Iteratively pad until dimension is filled with reflected + # values. This is necessary if the pad area is larger than + # the length of the original values in the current dimension. + left_index, right_index = _set_reflect_both( + roi, axis, (left_index, right_index), + method, array.shape[axis], include_edge + ) + + elif mode == "wrap": + for axis, (left_index, right_index) in zip(axes, pad_width): + roi = _view_roi(padded, original_area_slice, axis) + original_period = padded.shape[axis] - right_index - left_index + while left_index > 0 or right_index > 0: + # Iteratively pad until dimension is filled with wrapped + # values. This is necessary if the pad area is larger than + # the length of the original values in the current dimension. + left_index, right_index = _set_wrap_both( + roi, axis, (left_index, right_index), original_period) + + return padded diff --git a/python/numpy/lib/_arraypad_impl.pyi b/python/numpy/lib/_arraypad_impl.pyi new file mode 100644 index 000000000..46b43762b --- /dev/null +++ b/python/numpy/lib/_arraypad_impl.pyi @@ -0,0 +1,89 @@ +from typing import ( + Any, + Protocol, + TypeAlias, + TypeVar, + overload, + type_check_only, +) +from typing import ( + Literal as L, +) + +from numpy import generic +from numpy._typing import ( + ArrayLike, + NDArray, + _ArrayLike, + _ArrayLikeInt, +) + +__all__ = ["pad"] + +_ScalarT = TypeVar("_ScalarT", bound=generic) + +@type_check_only +class _ModeFunc(Protocol): + def __call__( + self, + vector: NDArray[Any], + iaxis_pad_width: tuple[int, int], + iaxis: int, + kwargs: dict[str, Any], + /, + ) -> None: ... + +_ModeKind: TypeAlias = L[ + "constant", + "edge", + "linear_ramp", + "maximum", + "mean", + "median", + "minimum", + "reflect", + "symmetric", + "wrap", + "empty", +] + +# TODO: In practice each keyword argument is exclusive to one or more +# specific modes. Consider adding more overloads to express this in the future. + +# Expand `**kwargs` into explicit keyword-only arguments +@overload +def pad( + array: _ArrayLike[_ScalarT], + pad_width: _ArrayLikeInt, + mode: _ModeKind = ..., + *, + stat_length: _ArrayLikeInt | None = ..., + constant_values: ArrayLike = ..., + end_values: ArrayLike = ..., + reflect_type: L["odd", "even"] = ..., +) -> NDArray[_ScalarT]: ... +@overload +def pad( + array: ArrayLike, + pad_width: _ArrayLikeInt, + mode: _ModeKind = ..., + *, + stat_length: _ArrayLikeInt | None = ..., + constant_values: ArrayLike = ..., + end_values: ArrayLike = ..., + reflect_type: L["odd", "even"] = ..., +) -> NDArray[Any]: ... +@overload +def pad( + array: _ArrayLike[_ScalarT], + pad_width: _ArrayLikeInt, + mode: _ModeFunc, + **kwargs: Any, +) -> NDArray[_ScalarT]: ... +@overload +def pad( + array: ArrayLike, + pad_width: _ArrayLikeInt, + mode: _ModeFunc, + **kwargs: Any, +) -> NDArray[Any]: ... diff --git a/python/numpy/lib/_arraysetops_impl.py b/python/numpy/lib/_arraysetops_impl.py new file mode 100644 index 000000000..ef0739ba4 --- /dev/null +++ b/python/numpy/lib/_arraysetops_impl.py @@ -0,0 +1,1260 @@ +""" +Set operations for arrays based on sorting. + +Notes +----- + +For floating point arrays, inaccurate results may appear due to usual round-off +and floating point comparison issues. + +Speed could be gained in some operations by an implementation of +`numpy.sort`, that can provide directly the permutation vectors, thus avoiding +calls to `numpy.argsort`. + +Original author: Robert Cimrman + +""" +import functools +import warnings +from typing import NamedTuple + +import numpy as np +from numpy._core import overrides +from numpy._core._multiarray_umath import _array_converter, _unique_hash + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + "ediff1d", "in1d", "intersect1d", "isin", "setdiff1d", "setxor1d", + "union1d", "unique", "unique_all", "unique_counts", "unique_inverse", + "unique_values" +] + + +def _ediff1d_dispatcher(ary, to_end=None, to_begin=None): + return (ary, to_end, to_begin) + + +@array_function_dispatch(_ediff1d_dispatcher) +def ediff1d(ary, to_end=None, to_begin=None): + """ + The differences between consecutive elements of an array. + + Parameters + ---------- + ary : array_like + If necessary, will be flattened before the differences are taken. + to_end : array_like, optional + Number(s) to append at the end of the returned differences. + to_begin : array_like, optional + Number(s) to prepend at the beginning of the returned differences. + + Returns + ------- + ediff1d : ndarray + The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``. + + See Also + -------- + diff, gradient + + Notes + ----- + When applied to masked arrays, this function drops the mask information + if the `to_begin` and/or `to_end` parameters are used. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 4, 7, 0]) + >>> np.ediff1d(x) + array([ 1, 2, 3, -7]) + + >>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99])) + array([-99, 1, 2, ..., -7, 88, 99]) + + The returned array is always 1D. + + >>> y = [[1, 2, 4], [1, 6, 24]] + >>> np.ediff1d(y) + array([ 1, 2, -3, 5, 18]) + + """ + conv = _array_converter(ary) + # Convert to (any) array and ravel: + ary = conv[0].ravel() + + # enforce that the dtype of `ary` is used for the output + dtype_req = ary.dtype + + # fast track default case + if to_begin is None and to_end is None: + return ary[1:] - ary[:-1] + + if to_begin is None: + l_begin = 0 + else: + to_begin = np.asanyarray(to_begin) + if not np.can_cast(to_begin, dtype_req, casting="same_kind"): + raise TypeError("dtype of `to_begin` must be compatible " + "with input `ary` under the `same_kind` rule.") + + to_begin = to_begin.ravel() + l_begin = len(to_begin) + + if to_end is None: + l_end = 0 + else: + to_end = np.asanyarray(to_end) + if not np.can_cast(to_end, dtype_req, casting="same_kind"): + raise TypeError("dtype of `to_end` must be compatible " + "with input `ary` under the `same_kind` rule.") + + to_end = to_end.ravel() + l_end = len(to_end) + + # do the calculation in place and copy to_begin and to_end + l_diff = max(len(ary) - 1, 0) + result = np.empty_like(ary, shape=l_diff + l_begin + l_end) + + if l_begin > 0: + result[:l_begin] = to_begin + if l_end > 0: + result[l_begin + l_diff:] = to_end + np.subtract(ary[1:], ary[:-1], result[l_begin:l_begin + l_diff]) + + return conv.wrap(result) + + +def _unpack_tuple(x): + """ Unpacks one-element tuples for use as return values """ + if len(x) == 1: + return x[0] + else: + return x + + +def _unique_dispatcher(ar, return_index=None, return_inverse=None, + return_counts=None, axis=None, *, equal_nan=None, + sorted=True): + return (ar,) + + +@array_function_dispatch(_unique_dispatcher) +def unique(ar, return_index=False, return_inverse=False, + return_counts=False, axis=None, *, equal_nan=True, + sorted=True): + """ + Find the unique elements of an array. + + Returns the sorted unique elements of an array. There are three optional + outputs in addition to the unique elements: + + * the indices of the input array that give the unique values + * the indices of the unique array that reconstruct the input array + * the number of times each unique value comes up in the input array + + Parameters + ---------- + ar : array_like + Input array. Unless `axis` is specified, this will be flattened if it + is not already 1-D. + return_index : bool, optional + If True, also return the indices of `ar` (along the specified axis, + if provided, or in the flattened array) that result in the unique array. + return_inverse : bool, optional + If True, also return the indices of the unique array (for the specified + axis, if provided) that can be used to reconstruct `ar`. + return_counts : bool, optional + If True, also return the number of times each unique item appears + in `ar`. + axis : int or None, optional + The axis to operate on. If None, `ar` will be flattened. If an integer, + the subarrays indexed by the given axis will be flattened and treated + as the elements of a 1-D array with the dimension of the given axis, + see the notes for more details. Object arrays or structured arrays + that contain objects are not supported if the `axis` kwarg is used. The + default is None. + + equal_nan : bool, optional + If True, collapses multiple NaN values in the return array into one. + + .. versionadded:: 1.24 + + sorted : bool, optional + If True, the unique elements are sorted. Elements may be sorted in + practice even if ``sorted=False``, but this could change without + notice. + + .. versionadded:: 2.3 + + Returns + ------- + unique : ndarray + The sorted unique values. + unique_indices : ndarray, optional + The indices of the first occurrences of the unique values in the + original array. Only provided if `return_index` is True. + unique_inverse : ndarray, optional + The indices to reconstruct the original array from the + unique array. Only provided if `return_inverse` is True. + unique_counts : ndarray, optional + The number of times each of the unique values comes up in the + original array. Only provided if `return_counts` is True. + + See Also + -------- + repeat : Repeat elements of an array. + sort : Return a sorted copy of an array. + + Notes + ----- + When an axis is specified the subarrays indexed by the axis are sorted. + This is done by making the specified axis the first dimension of the array + (move the axis to the first dimension to keep the order of the other axes) + and then flattening the subarrays in C order. The flattened subarrays are + then viewed as a structured type with each element given a label, with the + effect that we end up with a 1-D array of structured types that can be + treated in the same way as any other 1-D array. The result is that the + flattened subarrays are sorted in lexicographic order starting with the + first element. + + .. versionchanged:: 1.21 + Like np.sort, NaN will sort to the end of the values. + For complex arrays all NaN values are considered equivalent + (no matter whether the NaN is in the real or imaginary part). + As the representant for the returned array the smallest one in the + lexicographical order is chosen - see np.sort for how the lexicographical + order is defined for complex arrays. + + .. versionchanged:: 2.0 + For multi-dimensional inputs, ``unique_inverse`` is reshaped + such that the input can be reconstructed using + ``np.take(unique, unique_inverse, axis=axis)``. The result is + now not 1-dimensional when ``axis=None``. + + Note that in NumPy 2.0.0 a higher dimensional array was returned also + when ``axis`` was not ``None``. This was reverted, but + ``inverse.reshape(-1)`` can be used to ensure compatibility with both + versions. + + Examples + -------- + >>> import numpy as np + >>> np.unique([1, 1, 2, 2, 3, 3]) + array([1, 2, 3]) + >>> a = np.array([[1, 1], [2, 3]]) + >>> np.unique(a) + array([1, 2, 3]) + + Return the unique rows of a 2D array + + >>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]]) + >>> np.unique(a, axis=0) + array([[1, 0, 0], [2, 3, 4]]) + + Return the indices of the original array that give the unique values: + + >>> a = np.array(['a', 'b', 'b', 'c', 'a']) + >>> u, indices = np.unique(a, return_index=True) + >>> u + array(['a', 'b', 'c'], dtype='>> indices + array([0, 1, 3]) + >>> a[indices] + array(['a', 'b', 'c'], dtype='>> a = np.array([1, 2, 6, 4, 2, 3, 2]) + >>> u, indices = np.unique(a, return_inverse=True) + >>> u + array([1, 2, 3, 4, 6]) + >>> indices + array([0, 1, 4, 3, 1, 2, 1]) + >>> u[indices] + array([1, 2, 6, 4, 2, 3, 2]) + + Reconstruct the input values from the unique values and counts: + + >>> a = np.array([1, 2, 6, 4, 2, 3, 2]) + >>> values, counts = np.unique(a, return_counts=True) + >>> values + array([1, 2, 3, 4, 6]) + >>> counts + array([1, 3, 1, 1, 1]) + >>> np.repeat(values, counts) + array([1, 2, 2, 2, 3, 4, 6]) # original order not preserved + + """ + ar = np.asanyarray(ar) + if axis is None: + ret = _unique1d(ar, return_index, return_inverse, return_counts, + equal_nan=equal_nan, inverse_shape=ar.shape, axis=None, + sorted=sorted) + return _unpack_tuple(ret) + + # axis was specified and not None + try: + ar = np.moveaxis(ar, axis, 0) + except np.exceptions.AxisError: + # this removes the "axis1" or "axis2" prefix from the error message + raise np.exceptions.AxisError(axis, ar.ndim) from None + inverse_shape = [1] * ar.ndim + inverse_shape[axis] = ar.shape[0] + + # Must reshape to a contiguous 2D array for this to work... + orig_shape, orig_dtype = ar.shape, ar.dtype + ar = ar.reshape(orig_shape[0], np.prod(orig_shape[1:], dtype=np.intp)) + ar = np.ascontiguousarray(ar) + dtype = [(f'f{i}', ar.dtype) for i in range(ar.shape[1])] + + # At this point, `ar` has shape `(n, m)`, and `dtype` is a structured + # data type with `m` fields where each field has the data type of `ar`. + # In the following, we create the array `consolidated`, which has + # shape `(n,)` with data type `dtype`. + try: + if ar.shape[1] > 0: + consolidated = ar.view(dtype) + else: + # If ar.shape[1] == 0, then dtype will be `np.dtype([])`, which is + # a data type with itemsize 0, and the call `ar.view(dtype)` will + # fail. Instead, we'll use `np.empty` to explicitly create the + # array with shape `(len(ar),)`. Because `dtype` in this case has + # itemsize 0, the total size of the result is still 0 bytes. + consolidated = np.empty(len(ar), dtype=dtype) + except TypeError as e: + # There's no good way to do this for object arrays, etc... + msg = 'The axis argument to unique is not supported for dtype {dt}' + raise TypeError(msg.format(dt=ar.dtype)) from e + + def reshape_uniq(uniq): + n = len(uniq) + uniq = uniq.view(orig_dtype) + uniq = uniq.reshape(n, *orig_shape[1:]) + uniq = np.moveaxis(uniq, 0, axis) + return uniq + + output = _unique1d(consolidated, return_index, + return_inverse, return_counts, + equal_nan=equal_nan, inverse_shape=inverse_shape, + axis=axis, sorted=sorted) + output = (reshape_uniq(output[0]),) + output[1:] + return _unpack_tuple(output) + + +def _unique1d(ar, return_index=False, return_inverse=False, + return_counts=False, *, equal_nan=True, inverse_shape=None, + axis=None, sorted=True): + """ + Find the unique elements of an array, ignoring shape. + + Uses a hash table to find the unique elements if possible. + """ + ar = np.asanyarray(ar).flatten() + if len(ar.shape) != 1: + # np.matrix, and maybe some other array subclasses, insist on keeping + # two dimensions for all operations. Coerce to an ndarray in such cases. + ar = np.asarray(ar).flatten() + + optional_indices = return_index or return_inverse + + # masked arrays are not supported yet. + if not optional_indices and not return_counts and not np.ma.is_masked(ar): + # First we convert the array to a numpy array, later we wrap it back + # in case it was a subclass of numpy.ndarray. + conv = _array_converter(ar) + ar_, = conv + + if (hash_unique := _unique_hash(ar_)) is not NotImplemented: + if sorted: + hash_unique.sort() + # We wrap the result back in case it was a subclass of numpy.ndarray. + return (conv.wrap(hash_unique),) + + # If we don't use the hash map, we use the slower sorting method. + if optional_indices: + perm = ar.argsort(kind='mergesort' if return_index else 'quicksort') + aux = ar[perm] + else: + ar.sort() + aux = ar + mask = np.empty(aux.shape, dtype=np.bool) + mask[:1] = True + if (equal_nan and aux.shape[0] > 0 and aux.dtype.kind in "cfmM" and + np.isnan(aux[-1])): + if aux.dtype.kind == "c": # for complex all NaNs are considered equivalent + aux_firstnan = np.searchsorted(np.isnan(aux), True, side='left') + else: + aux_firstnan = np.searchsorted(aux, aux[-1], side='left') + if aux_firstnan > 0: + mask[1:aux_firstnan] = ( + aux[1:aux_firstnan] != aux[:aux_firstnan - 1]) + mask[aux_firstnan] = True + mask[aux_firstnan + 1:] = False + else: + mask[1:] = aux[1:] != aux[:-1] + + ret = (aux[mask],) + if return_index: + ret += (perm[mask],) + if return_inverse: + imask = np.cumsum(mask) - 1 + inv_idx = np.empty(mask.shape, dtype=np.intp) + inv_idx[perm] = imask + ret += (inv_idx.reshape(inverse_shape) if axis is None else inv_idx,) + if return_counts: + idx = np.concatenate(np.nonzero(mask) + ([mask.size],)) + ret += (np.diff(idx),) + return ret + + +# Array API set functions + +class UniqueAllResult(NamedTuple): + values: np.ndarray + indices: np.ndarray + inverse_indices: np.ndarray + counts: np.ndarray + + +class UniqueCountsResult(NamedTuple): + values: np.ndarray + counts: np.ndarray + + +class UniqueInverseResult(NamedTuple): + values: np.ndarray + inverse_indices: np.ndarray + + +def _unique_all_dispatcher(x, /): + return (x,) + + +@array_function_dispatch(_unique_all_dispatcher) +def unique_all(x): + """ + Find the unique elements of an array, and counts, inverse, and indices. + + This function is an Array API compatible alternative to:: + + np.unique(x, return_index=True, return_inverse=True, + return_counts=True, equal_nan=False, sorted=False) + + but returns a namedtuple for easier access to each output. + + .. note:: + This function currently always returns a sorted result, however, + this could change in any NumPy minor release. + + Parameters + ---------- + x : array_like + Input array. It will be flattened if it is not already 1-D. + + Returns + ------- + out : namedtuple + The result containing: + + * values - The unique elements of an input array. + * indices - The first occurring indices for each unique element. + * inverse_indices - The indices from the set of unique elements + that reconstruct `x`. + * counts - The corresponding counts for each unique element. + + See Also + -------- + unique : Find the unique elements of an array. + + Examples + -------- + >>> import numpy as np + >>> x = [1, 1, 2] + >>> uniq = np.unique_all(x) + >>> uniq.values + array([1, 2]) + >>> uniq.indices + array([0, 2]) + >>> uniq.inverse_indices + array([0, 0, 1]) + >>> uniq.counts + array([2, 1]) + """ + result = unique( + x, + return_index=True, + return_inverse=True, + return_counts=True, + equal_nan=False, + ) + return UniqueAllResult(*result) + + +def _unique_counts_dispatcher(x, /): + return (x,) + + +@array_function_dispatch(_unique_counts_dispatcher) +def unique_counts(x): + """ + Find the unique elements and counts of an input array `x`. + + This function is an Array API compatible alternative to:: + + np.unique(x, return_counts=True, equal_nan=False, sorted=False) + + but returns a namedtuple for easier access to each output. + + .. note:: + This function currently always returns a sorted result, however, + this could change in any NumPy minor release. + + Parameters + ---------- + x : array_like + Input array. It will be flattened if it is not already 1-D. + + Returns + ------- + out : namedtuple + The result containing: + + * values - The unique elements of an input array. + * counts - The corresponding counts for each unique element. + + See Also + -------- + unique : Find the unique elements of an array. + + Examples + -------- + >>> import numpy as np + >>> x = [1, 1, 2] + >>> uniq = np.unique_counts(x) + >>> uniq.values + array([1, 2]) + >>> uniq.counts + array([2, 1]) + """ + result = unique( + x, + return_index=False, + return_inverse=False, + return_counts=True, + equal_nan=False, + ) + return UniqueCountsResult(*result) + + +def _unique_inverse_dispatcher(x, /): + return (x,) + + +@array_function_dispatch(_unique_inverse_dispatcher) +def unique_inverse(x): + """ + Find the unique elements of `x` and indices to reconstruct `x`. + + This function is an Array API compatible alternative to:: + + np.unique(x, return_inverse=True, equal_nan=False, sorted=False) + + but returns a namedtuple for easier access to each output. + + .. note:: + This function currently always returns a sorted result, however, + this could change in any NumPy minor release. + + Parameters + ---------- + x : array_like + Input array. It will be flattened if it is not already 1-D. + + Returns + ------- + out : namedtuple + The result containing: + + * values - The unique elements of an input array. + * inverse_indices - The indices from the set of unique elements + that reconstruct `x`. + + See Also + -------- + unique : Find the unique elements of an array. + + Examples + -------- + >>> import numpy as np + >>> x = [1, 1, 2] + >>> uniq = np.unique_inverse(x) + >>> uniq.values + array([1, 2]) + >>> uniq.inverse_indices + array([0, 0, 1]) + """ + result = unique( + x, + return_index=False, + return_inverse=True, + return_counts=False, + equal_nan=False, + ) + return UniqueInverseResult(*result) + + +def _unique_values_dispatcher(x, /): + return (x,) + + +@array_function_dispatch(_unique_values_dispatcher) +def unique_values(x): + """ + Returns the unique elements of an input array `x`. + + This function is an Array API compatible alternative to:: + + np.unique(x, equal_nan=False, sorted=False) + + .. versionchanged:: 2.3 + The algorithm was changed to a faster one that does not rely on + sorting, and hence the results are no longer implicitly sorted. + + Parameters + ---------- + x : array_like + Input array. It will be flattened if it is not already 1-D. + + Returns + ------- + out : ndarray + The unique elements of an input array. + + See Also + -------- + unique : Find the unique elements of an array. + + Examples + -------- + >>> import numpy as np + >>> np.unique_values([1, 1, 2]) + array([1, 2]) # may vary + + """ + return unique( + x, + return_index=False, + return_inverse=False, + return_counts=False, + equal_nan=False, + sorted=False, + ) + + +def _intersect1d_dispatcher( + ar1, ar2, assume_unique=None, return_indices=None): + return (ar1, ar2) + + +@array_function_dispatch(_intersect1d_dispatcher) +def intersect1d(ar1, ar2, assume_unique=False, return_indices=False): + """ + Find the intersection of two arrays. + + Return the sorted, unique values that are in both of the input arrays. + + Parameters + ---------- + ar1, ar2 : array_like + Input arrays. Will be flattened if not already 1D. + assume_unique : bool + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. If True but ``ar1`` or ``ar2`` are not + unique, incorrect results and out-of-bounds indices could result. + Default is False. + return_indices : bool + If True, the indices which correspond to the intersection of the two + arrays are returned. The first instance of a value is used if there are + multiple. Default is False. + + Returns + ------- + intersect1d : ndarray + Sorted 1D array of common and unique elements. + comm1 : ndarray + The indices of the first occurrences of the common values in `ar1`. + Only provided if `return_indices` is True. + comm2 : ndarray + The indices of the first occurrences of the common values in `ar2`. + Only provided if `return_indices` is True. + + Examples + -------- + >>> import numpy as np + >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1]) + array([1, 3]) + + To intersect more than two arrays, use functools.reduce: + + >>> from functools import reduce + >>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) + array([3]) + + To return the indices of the values common to the input arrays + along with the intersected values: + + >>> x = np.array([1, 1, 2, 3, 4]) + >>> y = np.array([2, 1, 4, 6]) + >>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True) + >>> x_ind, y_ind + (array([0, 2, 4]), array([1, 0, 2])) + >>> xy, x[x_ind], y[y_ind] + (array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4])) + + """ + ar1 = np.asanyarray(ar1) + ar2 = np.asanyarray(ar2) + + if not assume_unique: + if return_indices: + ar1, ind1 = unique(ar1, return_index=True) + ar2, ind2 = unique(ar2, return_index=True) + else: + ar1 = unique(ar1) + ar2 = unique(ar2) + else: + ar1 = ar1.ravel() + ar2 = ar2.ravel() + + aux = np.concatenate((ar1, ar2)) + if return_indices: + aux_sort_indices = np.argsort(aux, kind='mergesort') + aux = aux[aux_sort_indices] + else: + aux.sort() + + mask = aux[1:] == aux[:-1] + int1d = aux[:-1][mask] + + if return_indices: + ar1_indices = aux_sort_indices[:-1][mask] + ar2_indices = aux_sort_indices[1:][mask] - ar1.size + if not assume_unique: + ar1_indices = ind1[ar1_indices] + ar2_indices = ind2[ar2_indices] + + return int1d, ar1_indices, ar2_indices + else: + return int1d + + +def _setxor1d_dispatcher(ar1, ar2, assume_unique=None): + return (ar1, ar2) + + +@array_function_dispatch(_setxor1d_dispatcher) +def setxor1d(ar1, ar2, assume_unique=False): + """ + Find the set exclusive-or of two arrays. + + Return the sorted, unique values that are in only one (not both) of the + input arrays. + + Parameters + ---------- + ar1, ar2 : array_like + Input arrays. + assume_unique : bool + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + + Returns + ------- + setxor1d : ndarray + Sorted 1D array of unique values that are in only one of the input + arrays. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1, 2, 3, 2, 4]) + >>> b = np.array([2, 3, 5, 7, 5]) + >>> np.setxor1d(a,b) + array([1, 4, 5, 7]) + + """ + if not assume_unique: + ar1 = unique(ar1) + ar2 = unique(ar2) + + aux = np.concatenate((ar1, ar2), axis=None) + if aux.size == 0: + return aux + + aux.sort() + flag = np.concatenate(([True], aux[1:] != aux[:-1], [True])) + return aux[flag[1:] & flag[:-1]] + + +def _in1d_dispatcher(ar1, ar2, assume_unique=None, invert=None, *, + kind=None): + return (ar1, ar2) + + +@array_function_dispatch(_in1d_dispatcher) +def in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): + """ + Test whether each element of a 1-D array is also present in a second array. + + .. deprecated:: 2.0 + Use :func:`isin` instead of `in1d` for new code. + + Returns a boolean array the same length as `ar1` that is True + where an element of `ar1` is in `ar2` and False otherwise. + + Parameters + ---------- + ar1 : (M,) array_like + Input array. + ar2 : array_like + The values against which to test each value of `ar1`. + assume_unique : bool, optional + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + invert : bool, optional + If True, the values in the returned array are inverted (that is, + False where an element of `ar1` is in `ar2` and True otherwise). + Default is False. ``np.in1d(a, b, invert=True)`` is equivalent + to (but is faster than) ``np.invert(in1d(a, b))``. + kind : {None, 'sort', 'table'}, optional + The algorithm to use. This will not affect the final result, + but will affect the speed and memory use. The default, None, + will select automatically based on memory considerations. + + * If 'sort', will use a mergesort-based approach. This will have + a memory usage of roughly 6 times the sum of the sizes of + `ar1` and `ar2`, not accounting for size of dtypes. + * If 'table', will use a lookup table approach similar + to a counting sort. This is only available for boolean and + integer arrays. This will have a memory usage of the + size of `ar1` plus the max-min value of `ar2`. `assume_unique` + has no effect when the 'table' option is used. + * If None, will automatically choose 'table' if + the required memory allocation is less than or equal to + 6 times the sum of the sizes of `ar1` and `ar2`, + otherwise will use 'sort'. This is done to not use + a large amount of memory by default, even though + 'table' may be faster in most cases. If 'table' is chosen, + `assume_unique` will have no effect. + + Returns + ------- + in1d : (M,) ndarray, bool + The values `ar1[in1d]` are in `ar2`. + + See Also + -------- + isin : Version of this function that preserves the + shape of ar1. + + Notes + ----- + `in1d` can be considered as an element-wise function version of the + python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly + equivalent to ``np.array([item in b for item in a])``. + However, this idea fails if `ar2` is a set, or similar (non-sequence) + container: As ``ar2`` is converted to an array, in those cases + ``asarray(ar2)`` is an object array rather than the expected array of + contained values. + + Using ``kind='table'`` tends to be faster than `kind='sort'` if the + following relationship is true: + ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``, + but may use greater memory. The default value for `kind` will + be automatically selected based only on memory usage, so one may + manually set ``kind='table'`` if memory constraints can be relaxed. + + Examples + -------- + >>> import numpy as np + >>> test = np.array([0, 1, 2, 5, 0]) + >>> states = [0, 2] + >>> mask = np.in1d(test, states) + >>> mask + array([ True, False, True, False, True]) + >>> test[mask] + array([0, 2, 0]) + >>> mask = np.in1d(test, states, invert=True) + >>> mask + array([False, True, False, True, False]) + >>> test[mask] + array([1, 5]) + """ + + # Deprecated in NumPy 2.0, 2023-08-18 + warnings.warn( + "`in1d` is deprecated. Use `np.isin` instead.", + DeprecationWarning, + stacklevel=2 + ) + + return _in1d(ar1, ar2, assume_unique, invert, kind=kind) + + +def _in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): + # Ravel both arrays, behavior for the first array could be different + ar1 = np.asarray(ar1).ravel() + ar2 = np.asarray(ar2).ravel() + + # Ensure that iteration through object arrays yields size-1 arrays + if ar2.dtype == object: + ar2 = ar2.reshape(-1, 1) + + if kind not in {None, 'sort', 'table'}: + raise ValueError( + f"Invalid kind: '{kind}'. Please use None, 'sort' or 'table'.") + + # Can use the table method if all arrays are integers or boolean: + is_int_arrays = all(ar.dtype.kind in ("u", "i", "b") for ar in (ar1, ar2)) + use_table_method = is_int_arrays and kind in {None, 'table'} + + if use_table_method: + if ar2.size == 0: + if invert: + return np.ones_like(ar1, dtype=bool) + else: + return np.zeros_like(ar1, dtype=bool) + + # Convert booleans to uint8 so we can use the fast integer algorithm + if ar1.dtype == bool: + ar1 = ar1.astype(np.uint8) + if ar2.dtype == bool: + ar2 = ar2.astype(np.uint8) + + ar2_min = int(np.min(ar2)) + ar2_max = int(np.max(ar2)) + + ar2_range = ar2_max - ar2_min + + # Constraints on whether we can actually use the table method: + # 1. Assert memory usage is not too large + below_memory_constraint = ar2_range <= 6 * (ar1.size + ar2.size) + # 2. Check overflows for (ar2 - ar2_min); dtype=ar2.dtype + range_safe_from_overflow = ar2_range <= np.iinfo(ar2.dtype).max + + # Optimal performance is for approximately + # log10(size) > (log10(range) - 2.27) / 0.927. + # However, here we set the requirement that by default + # the intermediate array can only be 6x + # the combined memory allocation of the original + # arrays. See discussion on + # https://github.com/numpy/numpy/pull/12065. + + if ( + range_safe_from_overflow and + (below_memory_constraint or kind == 'table') + ): + + if invert: + outgoing_array = np.ones_like(ar1, dtype=bool) + else: + outgoing_array = np.zeros_like(ar1, dtype=bool) + + # Make elements 1 where the integer exists in ar2 + if invert: + isin_helper_ar = np.ones(ar2_range + 1, dtype=bool) + isin_helper_ar[ar2 - ar2_min] = 0 + else: + isin_helper_ar = np.zeros(ar2_range + 1, dtype=bool) + isin_helper_ar[ar2 - ar2_min] = 1 + + # Mask out elements we know won't work + basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min) + in_range_ar1 = ar1[basic_mask] + if in_range_ar1.size == 0: + # Nothing more to do, since all values are out of range. + return outgoing_array + + # Unfortunately, ar2_min can be out of range for `intp` even + # if the calculation result must fit in range (and be positive). + # In that case, use ar2.dtype which must work for all unmasked + # values. + try: + ar2_min = np.array(ar2_min, dtype=np.intp) + dtype = np.intp + except OverflowError: + dtype = ar2.dtype + + out = np.empty_like(in_range_ar1, dtype=np.intp) + outgoing_array[basic_mask] = isin_helper_ar[ + np.subtract(in_range_ar1, ar2_min, dtype=dtype, + out=out, casting="unsafe")] + + return outgoing_array + elif kind == 'table': # not range_safe_from_overflow + raise RuntimeError( + "You have specified kind='table', " + "but the range of values in `ar2` or `ar1` exceed the " + "maximum integer of the datatype. " + "Please set `kind` to None or 'sort'." + ) + elif kind == 'table': + raise ValueError( + "The 'table' method is only " + "supported for boolean or integer arrays. " + "Please select 'sort' or None for kind." + ) + + # Check if one of the arrays may contain arbitrary objects + contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject + + # This code is run when + # a) the first condition is true, making the code significantly faster + # b) the second condition is true (i.e. `ar1` or `ar2` may contain + # arbitrary objects), since then sorting is not guaranteed to work + if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object: + if invert: + mask = np.ones(len(ar1), dtype=bool) + for a in ar2: + mask &= (ar1 != a) + else: + mask = np.zeros(len(ar1), dtype=bool) + for a in ar2: + mask |= (ar1 == a) + return mask + + # Otherwise use sorting + if not assume_unique: + ar1, rev_idx = np.unique(ar1, return_inverse=True) + ar2 = np.unique(ar2) + + ar = np.concatenate((ar1, ar2)) + # We need this to be a stable sort, so always use 'mergesort' + # here. The values from the first array should always come before + # the values from the second array. + order = ar.argsort(kind='mergesort') + sar = ar[order] + if invert: + bool_ar = (sar[1:] != sar[:-1]) + else: + bool_ar = (sar[1:] == sar[:-1]) + flag = np.concatenate((bool_ar, [invert])) + ret = np.empty(ar.shape, dtype=bool) + ret[order] = flag + + if assume_unique: + return ret[:len(ar1)] + else: + return ret[rev_idx] + + +def _isin_dispatcher(element, test_elements, assume_unique=None, invert=None, + *, kind=None): + return (element, test_elements) + + +@array_function_dispatch(_isin_dispatcher) +def isin(element, test_elements, assume_unique=False, invert=False, *, + kind=None): + """ + Calculates ``element in test_elements``, broadcasting over `element` only. + Returns a boolean array of the same shape as `element` that is True + where an element of `element` is in `test_elements` and False otherwise. + + Parameters + ---------- + element : array_like + Input array. + test_elements : array_like + The values against which to test each value of `element`. + This argument is flattened if it is an array or array_like. + See notes for behavior with non-array-like parameters. + assume_unique : bool, optional + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + invert : bool, optional + If True, the values in the returned array are inverted, as if + calculating `element not in test_elements`. Default is False. + ``np.isin(a, b, invert=True)`` is equivalent to (but faster + than) ``np.invert(np.isin(a, b))``. + kind : {None, 'sort', 'table'}, optional + The algorithm to use. This will not affect the final result, + but will affect the speed and memory use. The default, None, + will select automatically based on memory considerations. + + * If 'sort', will use a mergesort-based approach. This will have + a memory usage of roughly 6 times the sum of the sizes of + `element` and `test_elements`, not accounting for size of dtypes. + * If 'table', will use a lookup table approach similar + to a counting sort. This is only available for boolean and + integer arrays. This will have a memory usage of the + size of `element` plus the max-min value of `test_elements`. + `assume_unique` has no effect when the 'table' option is used. + * If None, will automatically choose 'table' if + the required memory allocation is less than or equal to + 6 times the sum of the sizes of `element` and `test_elements`, + otherwise will use 'sort'. This is done to not use + a large amount of memory by default, even though + 'table' may be faster in most cases. If 'table' is chosen, + `assume_unique` will have no effect. + + + Returns + ------- + isin : ndarray, bool + Has the same shape as `element`. The values `element[isin]` + are in `test_elements`. + + Notes + ----- + `isin` is an element-wise function version of the python keyword `in`. + ``isin(a, b)`` is roughly equivalent to + ``np.array([item in b for item in a])`` if `a` and `b` are 1-D sequences. + + `element` and `test_elements` are converted to arrays if they are not + already. If `test_elements` is a set (or other non-sequence collection) + it will be converted to an object array with one element, rather than an + array of the values contained in `test_elements`. This is a consequence + of the `array` constructor's way of handling non-sequence collections. + Converting the set to a list usually gives the desired behavior. + + Using ``kind='table'`` tends to be faster than `kind='sort'` if the + following relationship is true: + ``log10(len(test_elements)) > + (log10(max(test_elements)-min(test_elements)) - 2.27) / 0.927``, + but may use greater memory. The default value for `kind` will + be automatically selected based only on memory usage, so one may + manually set ``kind='table'`` if memory constraints can be relaxed. + + Examples + -------- + >>> import numpy as np + >>> element = 2*np.arange(4).reshape((2, 2)) + >>> element + array([[0, 2], + [4, 6]]) + >>> test_elements = [1, 2, 4, 8] + >>> mask = np.isin(element, test_elements) + >>> mask + array([[False, True], + [ True, False]]) + >>> element[mask] + array([2, 4]) + + The indices of the matched values can be obtained with `nonzero`: + + >>> np.nonzero(mask) + (array([0, 1]), array([1, 0])) + + The test can also be inverted: + + >>> mask = np.isin(element, test_elements, invert=True) + >>> mask + array([[ True, False], + [False, True]]) + >>> element[mask] + array([0, 6]) + + Because of how `array` handles sets, the following does not + work as expected: + + >>> test_set = {1, 2, 4, 8} + >>> np.isin(element, test_set) + array([[False, False], + [False, False]]) + + Casting the set to a list gives the expected result: + + >>> np.isin(element, list(test_set)) + array([[False, True], + [ True, False]]) + """ + element = np.asarray(element) + return _in1d(element, test_elements, assume_unique=assume_unique, + invert=invert, kind=kind).reshape(element.shape) + + +def _union1d_dispatcher(ar1, ar2): + return (ar1, ar2) + + +@array_function_dispatch(_union1d_dispatcher) +def union1d(ar1, ar2): + """ + Find the union of two arrays. + + Return the unique, sorted array of values that are in either of the two + input arrays. + + Parameters + ---------- + ar1, ar2 : array_like + Input arrays. They are flattened if they are not already 1D. + + Returns + ------- + union1d : ndarray + Unique, sorted union of the input arrays. + + Examples + -------- + >>> import numpy as np + >>> np.union1d([-1, 0, 1], [-2, 0, 2]) + array([-2, -1, 0, 1, 2]) + + To find the union of more than two arrays, use functools.reduce: + + >>> from functools import reduce + >>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) + array([1, 2, 3, 4, 6]) + """ + return unique(np.concatenate((ar1, ar2), axis=None)) + + +def _setdiff1d_dispatcher(ar1, ar2, assume_unique=None): + return (ar1, ar2) + + +@array_function_dispatch(_setdiff1d_dispatcher) +def setdiff1d(ar1, ar2, assume_unique=False): + """ + Find the set difference of two arrays. + + Return the unique values in `ar1` that are not in `ar2`. + + Parameters + ---------- + ar1 : array_like + Input array. + ar2 : array_like + Input comparison array. + assume_unique : bool + If True, the input arrays are both assumed to be unique, which + can speed up the calculation. Default is False. + + Returns + ------- + setdiff1d : ndarray + 1D array of values in `ar1` that are not in `ar2`. The result + is sorted when `assume_unique=False`, but otherwise only sorted + if the input is sorted. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1, 2, 3, 2, 4, 1]) + >>> b = np.array([3, 4, 5, 6]) + >>> np.setdiff1d(a, b) + array([1, 2]) + + """ + if assume_unique: + ar1 = np.asarray(ar1).ravel() + else: + ar1 = unique(ar1) + ar2 = unique(ar2) + return ar1[_in1d(ar1, ar2, assume_unique=True, invert=True)] diff --git a/python/numpy/lib/_arraysetops_impl.pyi b/python/numpy/lib/_arraysetops_impl.pyi new file mode 100644 index 000000000..a2cb04a9c --- /dev/null +++ b/python/numpy/lib/_arraysetops_impl.pyi @@ -0,0 +1,468 @@ +from typing import Any, Generic, NamedTuple, SupportsIndex, TypeAlias, overload +from typing import Literal as L + +from typing_extensions import TypeVar, deprecated + +import numpy as np +from numpy._typing import ( + ArrayLike, + NDArray, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeNumber_co, +) + +__all__ = [ + "ediff1d", + "in1d", + "intersect1d", + "isin", + "setdiff1d", + "setxor1d", + "union1d", + "unique", + "unique_all", + "unique_counts", + "unique_inverse", + "unique_values", +] + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_NumericT = TypeVar("_NumericT", bound=np.number | np.timedelta64 | np.object_) + +# Explicitly set all allowed values to prevent accidental castings to +# abstract dtypes (their common super-type). +# Only relevant if two or more arguments are parametrized, (e.g. `setdiff1d`) +# which could result in, for example, `int64` and `float64`producing a +# `number[_64Bit]` array +_EitherSCT = TypeVar( + "_EitherSCT", + np.bool, + np.int8, np.int16, np.int32, np.int64, np.intp, + np.uint8, np.uint16, np.uint32, np.uint64, np.uintp, + np.float16, np.float32, np.float64, np.longdouble, + np.complex64, np.complex128, np.clongdouble, + np.timedelta64, np.datetime64, + np.bytes_, np.str_, np.void, np.object_, + np.integer, np.floating, np.complexfloating, np.character, +) # fmt: skip + +_AnyArray: TypeAlias = NDArray[Any] +_IntArray: TypeAlias = NDArray[np.intp] + +### + +class UniqueAllResult(NamedTuple, Generic[_ScalarT]): + values: NDArray[_ScalarT] + indices: _IntArray + inverse_indices: _IntArray + counts: _IntArray + +class UniqueCountsResult(NamedTuple, Generic[_ScalarT]): + values: NDArray[_ScalarT] + counts: _IntArray + +class UniqueInverseResult(NamedTuple, Generic[_ScalarT]): + values: NDArray[_ScalarT] + inverse_indices: _IntArray + +# +@overload +def ediff1d( + ary: _ArrayLikeBool_co, + to_end: ArrayLike | None = None, + to_begin: ArrayLike | None = None, +) -> NDArray[np.int8]: ... +@overload +def ediff1d( + ary: _ArrayLike[_NumericT], + to_end: ArrayLike | None = None, + to_begin: ArrayLike | None = None, +) -> NDArray[_NumericT]: ... +@overload +def ediff1d( + ary: _ArrayLike[np.datetime64[Any]], + to_end: ArrayLike | None = None, + to_begin: ArrayLike | None = None, +) -> NDArray[np.timedelta64]: ... +@overload +def ediff1d( + ary: _ArrayLikeNumber_co, + to_end: ArrayLike | None = None, + to_begin: ArrayLike | None = None, +) -> _AnyArray: ... + +# +@overload # known scalar-type, FFF +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[False] = False, + return_inverse: L[False] = False, + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> NDArray[_ScalarT]: ... +@overload # unknown scalar-type, FFF +def unique( + ar: ArrayLike, + return_index: L[False] = False, + return_inverse: L[False] = False, + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> _AnyArray: ... +@overload # known scalar-type, TFF +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[True], + return_inverse: L[False] = False, + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray]: ... +@overload # unknown scalar-type, TFF +def unique( + ar: ArrayLike, + return_index: L[True], + return_inverse: L[False] = False, + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[_AnyArray, _IntArray]: ... +@overload # known scalar-type, FTF (positional) +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[False], + return_inverse: L[True], + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray]: ... +@overload # known scalar-type, FTF (keyword) +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[False] = False, + *, + return_inverse: L[True], + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray]: ... +@overload # unknown scalar-type, FTF (positional) +def unique( + ar: ArrayLike, + return_index: L[False], + return_inverse: L[True], + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[_AnyArray, _IntArray]: ... +@overload # unknown scalar-type, FTF (keyword) +def unique( + ar: ArrayLike, + return_index: L[False] = False, + *, + return_inverse: L[True], + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[_AnyArray, _IntArray]: ... +@overload # known scalar-type, FFT (positional) +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[False], + return_inverse: L[False], + return_counts: L[True], + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray]: ... +@overload # known scalar-type, FFT (keyword) +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[False] = False, + return_inverse: L[False] = False, + *, + return_counts: L[True], + axis: SupportsIndex | None = None, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray]: ... +@overload # unknown scalar-type, FFT (positional) +def unique( + ar: ArrayLike, + return_index: L[False], + return_inverse: L[False], + return_counts: L[True], + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[_AnyArray, _IntArray]: ... +@overload # unknown scalar-type, FFT (keyword) +def unique( + ar: ArrayLike, + return_index: L[False] = False, + return_inverse: L[False] = False, + *, + return_counts: L[True], + axis: SupportsIndex | None = None, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[_AnyArray, _IntArray]: ... +@overload # known scalar-type, TTF +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[True], + return_inverse: L[True], + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +@overload # unknown scalar-type, TTF +def unique( + ar: ArrayLike, + return_index: L[True], + return_inverse: L[True], + return_counts: L[False] = False, + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +@overload # known scalar-type, TFT (positional) +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[True], + return_inverse: L[False], + return_counts: L[True], + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +@overload # known scalar-type, TFT (keyword) +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[True], + return_inverse: L[False] = False, + *, + return_counts: L[True], + axis: SupportsIndex | None = None, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +@overload # unknown scalar-type, TFT (positional) +def unique( + ar: ArrayLike, + return_index: L[True], + return_inverse: L[False], + return_counts: L[True], + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +@overload # unknown scalar-type, TFT (keyword) +def unique( + ar: ArrayLike, + return_index: L[True], + return_inverse: L[False] = False, + *, + return_counts: L[True], + axis: SupportsIndex | None = None, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +@overload # known scalar-type, FTT (positional) +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[False], + return_inverse: L[True], + return_counts: L[True], + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +@overload # known scalar-type, FTT (keyword) +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[False] = False, + *, + return_inverse: L[True], + return_counts: L[True], + axis: SupportsIndex | None = None, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ... +@overload # unknown scalar-type, FTT (positional) +def unique( + ar: ArrayLike, + return_index: L[False], + return_inverse: L[True], + return_counts: L[True], + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +@overload # unknown scalar-type, FTT (keyword) +def unique( + ar: ArrayLike, + return_index: L[False] = False, + *, + return_inverse: L[True], + return_counts: L[True], + axis: SupportsIndex | None = None, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +@overload # known scalar-type, TTT +def unique( + ar: _ArrayLike[_ScalarT], + return_index: L[True], + return_inverse: L[True], + return_counts: L[True], + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray, _IntArray]: ... +@overload # unknown scalar-type, TTT +def unique( + ar: ArrayLike, + return_index: L[True], + return_inverse: L[True], + return_counts: L[True], + axis: SupportsIndex | None = None, + *, + equal_nan: bool = True, + sorted: bool = True, +) -> tuple[_AnyArray, _IntArray, _IntArray, _IntArray]: ... + +# +@overload +def unique_all(x: _ArrayLike[_ScalarT]) -> UniqueAllResult[_ScalarT]: ... +@overload +def unique_all(x: ArrayLike) -> UniqueAllResult[Any]: ... + +# +@overload +def unique_counts(x: _ArrayLike[_ScalarT]) -> UniqueCountsResult[_ScalarT]: ... +@overload +def unique_counts(x: ArrayLike) -> UniqueCountsResult[Any]: ... + +# +@overload +def unique_inverse(x: _ArrayLike[_ScalarT]) -> UniqueInverseResult[_ScalarT]: ... +@overload +def unique_inverse(x: ArrayLike) -> UniqueInverseResult[Any]: ... + +# +@overload +def unique_values(x: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +@overload +def unique_values(x: ArrayLike) -> _AnyArray: ... + +# +@overload # known scalar-type, return_indices=False (default) +def intersect1d( + ar1: _ArrayLike[_EitherSCT], + ar2: _ArrayLike[_EitherSCT], + assume_unique: bool = False, + return_indices: L[False] = False, +) -> NDArray[_EitherSCT]: ... +@overload # known scalar-type, return_indices=True (positional) +def intersect1d( + ar1: _ArrayLike[_EitherSCT], + ar2: _ArrayLike[_EitherSCT], + assume_unique: bool, + return_indices: L[True], +) -> tuple[NDArray[_EitherSCT], _IntArray, _IntArray]: ... +@overload # known scalar-type, return_indices=True (keyword) +def intersect1d( + ar1: _ArrayLike[_EitherSCT], + ar2: _ArrayLike[_EitherSCT], + assume_unique: bool = False, + *, + return_indices: L[True], +) -> tuple[NDArray[_EitherSCT], _IntArray, _IntArray]: ... +@overload # unknown scalar-type, return_indices=False (default) +def intersect1d( + ar1: ArrayLike, + ar2: ArrayLike, + assume_unique: bool = False, + return_indices: L[False] = False, +) -> _AnyArray: ... +@overload # unknown scalar-type, return_indices=True (positional) +def intersect1d( + ar1: ArrayLike, + ar2: ArrayLike, + assume_unique: bool, + return_indices: L[True], +) -> tuple[_AnyArray, _IntArray, _IntArray]: ... +@overload # unknown scalar-type, return_indices=True (keyword) +def intersect1d( + ar1: ArrayLike, + ar2: ArrayLike, + assume_unique: bool = False, + *, + return_indices: L[True], +) -> tuple[_AnyArray, _IntArray, _IntArray]: ... + +# +@overload +def setxor1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT], assume_unique: bool = False) -> NDArray[_EitherSCT]: ... +@overload +def setxor1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _AnyArray: ... + +# +@overload +def union1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT]) -> NDArray[_EitherSCT]: ... +@overload +def union1d(ar1: ArrayLike, ar2: ArrayLike) -> _AnyArray: ... + +# +@overload +def setdiff1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT], assume_unique: bool = False) -> NDArray[_EitherSCT]: ... +@overload +def setdiff1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _AnyArray: ... + +# +def isin( + element: ArrayLike, + test_elements: ArrayLike, + assume_unique: bool = False, + invert: bool = False, + *, + kind: L["sort", "table"] | None = None, +) -> NDArray[np.bool]: ... + +# +@deprecated("Use 'isin' instead") +def in1d( + element: ArrayLike, + test_elements: ArrayLike, + assume_unique: bool = False, + invert: bool = False, + *, + kind: L["sort", "table"] | None = None, +) -> NDArray[np.bool]: ... diff --git a/python/numpy/lib/_arrayterator_impl.py b/python/numpy/lib/_arrayterator_impl.py new file mode 100644 index 000000000..5f7c5fc4f --- /dev/null +++ b/python/numpy/lib/_arrayterator_impl.py @@ -0,0 +1,224 @@ +""" +A buffered iterator for big arrays. + +This module solves the problem of iterating over a big file-based array +without having to read it into memory. The `Arrayterator` class wraps +an array object, and when iterated it will return sub-arrays with at most +a user-specified number of elements. + +""" +from functools import reduce +from operator import mul + +__all__ = ['Arrayterator'] + + +class Arrayterator: + """ + Buffered iterator for big arrays. + + `Arrayterator` creates a buffered iterator for reading big arrays in small + contiguous blocks. The class is useful for objects stored in the + file system. It allows iteration over the object *without* reading + everything in memory; instead, small blocks are read and iterated over. + + `Arrayterator` can be used with any object that supports multidimensional + slices. This includes NumPy arrays, but also variables from + Scientific.IO.NetCDF or pynetcdf for example. + + Parameters + ---------- + var : array_like + The object to iterate over. + buf_size : int, optional + The buffer size. If `buf_size` is supplied, the maximum amount of + data that will be read into memory is `buf_size` elements. + Default is None, which will read as many element as possible + into memory. + + Attributes + ---------- + var + buf_size + start + stop + step + shape + flat + + See Also + -------- + numpy.ndenumerate : Multidimensional array iterator. + numpy.flatiter : Flat array iterator. + numpy.memmap : Create a memory-map to an array stored + in a binary file on disk. + + Notes + ----- + The algorithm works by first finding a "running dimension", along which + the blocks will be extracted. Given an array of dimensions + ``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the + first dimension will be used. If, on the other hand, + ``d1 < buf_size < d1*d2`` the second dimension will be used, and so on. + Blocks are extracted along this dimension, and when the last block is + returned the process continues from the next dimension, until all + elements have been read. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) + >>> a_itor = np.lib.Arrayterator(a, 2) + >>> a_itor.shape + (3, 4, 5, 6) + + Now we can iterate over ``a_itor``, and it will return arrays of size + two. Since `buf_size` was smaller than any dimension, the first + dimension will be iterated over first: + + >>> for subarr in a_itor: + ... if not subarr.all(): + ... print(subarr, subarr.shape) # doctest: +SKIP + >>> # [[[[0 1]]]] (1, 1, 1, 2) + + """ + + __module__ = "numpy.lib" + + def __init__(self, var, buf_size=None): + self.var = var + self.buf_size = buf_size + + self.start = [0 for dim in var.shape] + self.stop = list(var.shape) + self.step = [1 for dim in var.shape] + + def __getattr__(self, attr): + return getattr(self.var, attr) + + def __getitem__(self, index): + """ + Return a new arrayterator. + + """ + # Fix index, handling ellipsis and incomplete slices. + if not isinstance(index, tuple): + index = (index,) + fixed = [] + length, dims = len(index), self.ndim + for slice_ in index: + if slice_ is Ellipsis: + fixed.extend([slice(None)] * (dims - length + 1)) + length = len(fixed) + elif isinstance(slice_, int): + fixed.append(slice(slice_, slice_ + 1, 1)) + else: + fixed.append(slice_) + index = tuple(fixed) + if len(index) < dims: + index += (slice(None),) * (dims - len(index)) + + # Return a new arrayterator object. + out = self.__class__(self.var, self.buf_size) + for i, (start, stop, step, slice_) in enumerate( + zip(self.start, self.stop, self.step, index)): + out.start[i] = start + (slice_.start or 0) + out.step[i] = step * (slice_.step or 1) + out.stop[i] = start + (slice_.stop or stop - start) + out.stop[i] = min(stop, out.stop[i]) + return out + + def __array__(self, dtype=None, copy=None): + """ + Return corresponding data. + + """ + slice_ = tuple(slice(*t) for t in zip( + self.start, self.stop, self.step)) + return self.var[slice_] + + @property + def flat(self): + """ + A 1-D flat iterator for Arrayterator objects. + + This iterator returns elements of the array to be iterated over in + `~lib.Arrayterator` one by one. + It is similar to `flatiter`. + + See Also + -------- + lib.Arrayterator + flatiter + + Examples + -------- + >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) + >>> a_itor = np.lib.Arrayterator(a, 2) + + >>> for subarr in a_itor.flat: + ... if not subarr: + ... print(subarr, type(subarr)) + ... + 0 + + """ + for block in self: + yield from block.flat + + @property + def shape(self): + """ + The shape of the array to be iterated over. + + For an example, see `Arrayterator`. + + """ + return tuple(((stop - start - 1) // step + 1) for start, stop, step in + zip(self.start, self.stop, self.step)) + + def __iter__(self): + # Skip arrays with degenerate dimensions + if [dim for dim in self.shape if dim <= 0]: + return + + start = self.start[:] + stop = self.stop[:] + step = self.step[:] + ndims = self.var.ndim + + while True: + count = self.buf_size or reduce(mul, self.shape) + + # iterate over each dimension, looking for the + # running dimension (ie, the dimension along which + # the blocks will be built from) + rundim = 0 + for i in range(ndims - 1, -1, -1): + # if count is zero we ran out of elements to read + # along higher dimensions, so we read only a single position + if count == 0: + stop[i] = start[i] + 1 + elif count <= self.shape[i]: + # limit along this dimension + stop[i] = start[i] + count * step[i] + rundim = i + else: + # read everything along this dimension + stop[i] = self.stop[i] + stop[i] = min(self.stop[i], stop[i]) + count = count // self.shape[i] + + # yield a block + slice_ = tuple(slice(*t) for t in zip(start, stop, step)) + yield self.var[slice_] + + # Update start position, taking care of overflow to + # other dimensions + start[rundim] = stop[rundim] # start where we stopped + for i in range(ndims - 1, 0, -1): + if start[i] >= self.stop[i]: + start[i] = self.start[i] + start[i - 1] += self.step[i - 1] + if start[0] >= self.stop[0]: + return diff --git a/python/numpy/lib/_arrayterator_impl.pyi b/python/numpy/lib/_arrayterator_impl.pyi new file mode 100644 index 000000000..e1a9e056a --- /dev/null +++ b/python/numpy/lib/_arrayterator_impl.pyi @@ -0,0 +1,46 @@ +# pyright: reportIncompatibleMethodOverride=false + +from collections.abc import Generator +from types import EllipsisType +from typing import Any, Final, TypeAlias, overload + +from typing_extensions import TypeVar + +import numpy as np +from numpy._typing import _AnyShape, _Shape + +__all__ = ["Arrayterator"] + +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) + +_AnyIndex: TypeAlias = EllipsisType | int | slice | tuple[EllipsisType | int | slice, ...] + +# NOTE: In reality `Arrayterator` does not actually inherit from `ndarray`, +# but its ``__getattr__` method does wrap around the former and thus has +# access to all its methods + +class Arrayterator(np.ndarray[_ShapeT_co, _DTypeT_co]): + var: np.ndarray[_ShapeT_co, _DTypeT_co] # type: ignore[assignment] + buf_size: Final[int | None] + start: Final[list[int]] + stop: Final[list[int]] + step: Final[list[int]] + + @property # type: ignore[misc] + def shape(self) -> _ShapeT_co: ... + @property + def flat(self: Arrayterator[Any, np.dtype[_ScalarT]]) -> Generator[_ScalarT]: ... # type: ignore[override] + + # + def __init__(self, /, var: np.ndarray[_ShapeT_co, _DTypeT_co], buf_size: int | None = None) -> None: ... + def __getitem__(self, index: _AnyIndex, /) -> Arrayterator[_AnyShape, _DTypeT_co]: ... # type: ignore[override] + def __iter__(self) -> Generator[np.ndarray[_AnyShape, _DTypeT_co]]: ... + + # + @overload # type: ignore[override] + def __array__(self, /, dtype: None = None, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __array__(self, /, dtype: _DTypeT, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT]: ... diff --git a/python/numpy/lib/_datasource.py b/python/numpy/lib/_datasource.py new file mode 100644 index 000000000..72398c547 --- /dev/null +++ b/python/numpy/lib/_datasource.py @@ -0,0 +1,700 @@ +"""A file interface for handling local and remote data files. + +The goal of datasource is to abstract some of the file system operations +when dealing with data files so the researcher doesn't have to know all the +low-level details. Through datasource, a researcher can obtain and use a +file with one function call, regardless of location of the file. + +DataSource is meant to augment standard python libraries, not replace them. +It should work seamlessly with standard file IO operations and the os +module. + +DataSource files can originate locally or remotely: + +- local files : '/home/guido/src/local/data.txt' +- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt' + +DataSource files can also be compressed or uncompressed. Currently only +gzip, bz2 and xz are supported. + +Example:: + + >>> # Create a DataSource, use os.curdir (default) for local storage. + >>> from numpy import DataSource + >>> ds = DataSource() + >>> + >>> # Open a remote file. + >>> # DataSource downloads the file, stores it locally in: + >>> # './www.google.com/index.html' + >>> # opens the file and returns a file object. + >>> fp = ds.open('http://www.google.com/') # doctest: +SKIP + >>> + >>> # Use the file as you normally would + >>> fp.read() # doctest: +SKIP + >>> fp.close() # doctest: +SKIP + +""" +import os + +from numpy._utils import set_module + +_open = open + + +def _check_mode(mode, encoding, newline): + """Check mode and that encoding and newline are compatible. + + Parameters + ---------- + mode : str + File open mode. + encoding : str + File encoding. + newline : str + Newline for text files. + + """ + if "t" in mode: + if "b" in mode: + raise ValueError(f"Invalid mode: {mode!r}") + else: + if encoding is not None: + raise ValueError("Argument 'encoding' not supported in binary mode") + if newline is not None: + raise ValueError("Argument 'newline' not supported in binary mode") + + +# Using a class instead of a module-level dictionary +# to reduce the initial 'import numpy' overhead by +# deferring the import of lzma, bz2 and gzip until needed + +# TODO: .zip support, .tar support? +class _FileOpeners: + """ + Container for different methods to open (un-)compressed files. + + `_FileOpeners` contains a dictionary that holds one method for each + supported file format. Attribute lookup is implemented in such a way + that an instance of `_FileOpeners` itself can be indexed with the keys + of that dictionary. Currently uncompressed files as well as files + compressed with ``gzip``, ``bz2`` or ``xz`` compression are supported. + + Notes + ----- + `_file_openers`, an instance of `_FileOpeners`, is made available for + use in the `_datasource` module. + + Examples + -------- + >>> import gzip + >>> np.lib._datasource._file_openers.keys() + [None, '.bz2', '.gz', '.xz', '.lzma'] + >>> np.lib._datasource._file_openers['.gz'] is gzip.open + True + + """ + + def __init__(self): + self._loaded = False + self._file_openers = {None: open} + + def _load(self): + if self._loaded: + return + + try: + import bz2 + self._file_openers[".bz2"] = bz2.open + except ImportError: + pass + + try: + import gzip + self._file_openers[".gz"] = gzip.open + except ImportError: + pass + + try: + import lzma + self._file_openers[".xz"] = lzma.open + self._file_openers[".lzma"] = lzma.open + except (ImportError, AttributeError): + # There are incompatible backports of lzma that do not have the + # lzma.open attribute, so catch that as well as ImportError. + pass + + self._loaded = True + + def keys(self): + """ + Return the keys of currently supported file openers. + + Parameters + ---------- + None + + Returns + ------- + keys : list + The keys are None for uncompressed files and the file extension + strings (i.e. ``'.gz'``, ``'.xz'``) for supported compression + methods. + + """ + self._load() + return list(self._file_openers.keys()) + + def __getitem__(self, key): + self._load() + return self._file_openers[key] + + +_file_openers = _FileOpeners() + +def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None): + """ + Open `path` with `mode` and return the file object. + + If ``path`` is an URL, it will be downloaded, stored in the + `DataSource` `destpath` directory and opened from there. + + Parameters + ---------- + path : str or pathlib.Path + Local file path or URL to open. + mode : str, optional + Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to + append. Available modes depend on the type of object specified by + path. Default is 'r'. + destpath : str, optional + Path to the directory where the source file gets downloaded to for + use. If `destpath` is None, a temporary directory will be created. + The default path is the current directory. + encoding : {None, str}, optional + Open text file with given encoding. The default encoding will be + what `open` uses. + newline : {None, str}, optional + Newline to use when reading text file. + + Returns + ------- + out : file object + The opened file. + + Notes + ----- + This is a convenience function that instantiates a `DataSource` and + returns the file object from ``DataSource.open(path)``. + + """ + + ds = DataSource(destpath) + return ds.open(path, mode, encoding=encoding, newline=newline) + + +@set_module('numpy.lib.npyio') +class DataSource: + """ + DataSource(destpath='.') + + A generic data source file (file, http, ftp, ...). + + DataSources can be local files or remote files/URLs. The files may + also be compressed or uncompressed. DataSource hides some of the + low-level details of downloading the file, allowing you to simply pass + in a valid file path (or URL) and obtain a file object. + + Parameters + ---------- + destpath : str or None, optional + Path to the directory where the source file gets downloaded to for + use. If `destpath` is None, a temporary directory will be created. + The default path is the current directory. + + Notes + ----- + URLs require a scheme string (``http://``) to be used, without it they + will fail:: + + >>> repos = np.lib.npyio.DataSource() + >>> repos.exists('www.google.com/index.html') + False + >>> repos.exists('http://www.google.com/index.html') + True + + Temporary directories are deleted when the DataSource is deleted. + + Examples + -------- + :: + + >>> ds = np.lib.npyio.DataSource('/home/guido') + >>> urlname = 'http://www.google.com/' + >>> gfile = ds.open('http://www.google.com/') + >>> ds.abspath(urlname) + '/home/guido/www.google.com/index.html' + + >>> ds = np.lib.npyio.DataSource(None) # use with temporary file + >>> ds.open('/home/guido/foobar.txt') + + >>> ds.abspath('/home/guido/foobar.txt') + '/tmp/.../home/guido/foobar.txt' + + """ + + def __init__(self, destpath=os.curdir): + """Create a DataSource with a local path at destpath.""" + if destpath: + self._destpath = os.path.abspath(destpath) + self._istmpdest = False + else: + import tempfile # deferring import to improve startup time + self._destpath = tempfile.mkdtemp() + self._istmpdest = True + + def __del__(self): + # Remove temp directories + if hasattr(self, '_istmpdest') and self._istmpdest: + import shutil + + shutil.rmtree(self._destpath) + + def _iszip(self, filename): + """Test if the filename is a zip file by looking at the file extension. + + """ + fname, ext = os.path.splitext(filename) + return ext in _file_openers.keys() + + def _iswritemode(self, mode): + """Test if the given mode will open a file for writing.""" + + # Currently only used to test the bz2 files. + _writemodes = ("w", "+") + return any(c in _writemodes for c in mode) + + def _splitzipext(self, filename): + """Split zip extension from filename and return filename. + + Returns + ------- + base, zip_ext : {tuple} + + """ + + if self._iszip(filename): + return os.path.splitext(filename) + else: + return filename, None + + def _possible_names(self, filename): + """Return a tuple containing compressed filename variations.""" + names = [filename] + if not self._iszip(filename): + for zipext in _file_openers.keys(): + if zipext: + names.append(filename + zipext) + return names + + def _isurl(self, path): + """Test if path is a net location. Tests the scheme and netloc.""" + + # We do this here to reduce the 'import numpy' initial import time. + from urllib.parse import urlparse + + # BUG : URLs require a scheme string ('http://') to be used. + # www.google.com will fail. + # Should we prepend the scheme for those that don't have it and + # test that also? Similar to the way we append .gz and test for + # for compressed versions of files. + + scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path) + return bool(scheme and netloc) + + def _cache(self, path): + """Cache the file specified by path. + + Creates a copy of the file in the datasource cache. + + """ + # We import these here because importing them is slow and + # a significant fraction of numpy's total import time. + import shutil + from urllib.request import urlopen + + upath = self.abspath(path) + + # ensure directory exists + if not os.path.exists(os.path.dirname(upath)): + os.makedirs(os.path.dirname(upath)) + + # TODO: Doesn't handle compressed files! + if self._isurl(path): + with urlopen(path) as openedurl: + with _open(upath, 'wb') as f: + shutil.copyfileobj(openedurl, f) + else: + shutil.copyfile(path, upath) + return upath + + def _findfile(self, path): + """Searches for ``path`` and returns full path if found. + + If path is an URL, _findfile will cache a local copy and return the + path to the cached file. If path is a local file, _findfile will + return a path to that local file. + + The search will include possible compressed versions of the file + and return the first occurrence found. + + """ + + # Build list of possible local file paths + if not self._isurl(path): + # Valid local paths + filelist = self._possible_names(path) + # Paths in self._destpath + filelist += self._possible_names(self.abspath(path)) + else: + # Cached URLs in self._destpath + filelist = self._possible_names(self.abspath(path)) + # Remote URLs + filelist = filelist + self._possible_names(path) + + for name in filelist: + if self.exists(name): + if self._isurl(name): + name = self._cache(name) + return name + return None + + def abspath(self, path): + """ + Return absolute path of file in the DataSource directory. + + If `path` is an URL, then `abspath` will return either the location + the file exists locally or the location it would exist when opened + using the `open` method. + + Parameters + ---------- + path : str or pathlib.Path + Can be a local file or a remote URL. + + Returns + ------- + out : str + Complete path, including the `DataSource` destination directory. + + Notes + ----- + The functionality is based on `os.path.abspath`. + + """ + # We do this here to reduce the 'import numpy' initial import time. + from urllib.parse import urlparse + + # TODO: This should be more robust. Handles case where path includes + # the destpath, but not other sub-paths. Failing case: + # path = /home/guido/datafile.txt + # destpath = /home/alex/ + # upath = self.abspath(path) + # upath == '/home/alex/home/guido/datafile.txt' + + # handle case where path includes self._destpath + splitpath = path.split(self._destpath, 2) + if len(splitpath) > 1: + path = splitpath[1] + scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path) + netloc = self._sanitize_relative_path(netloc) + upath = self._sanitize_relative_path(upath) + return os.path.join(self._destpath, netloc, upath) + + def _sanitize_relative_path(self, path): + """Return a sanitised relative path for which + os.path.abspath(os.path.join(base, path)).startswith(base) + """ + last = None + path = os.path.normpath(path) + while path != last: + last = path + # Note: os.path.join treats '/' as os.sep on Windows + path = path.lstrip(os.sep).lstrip('/') + path = path.lstrip(os.pardir).removeprefix('..') + drive, path = os.path.splitdrive(path) # for Windows + return path + + def exists(self, path): + """ + Test if path exists. + + Test if `path` exists as (and in this order): + + - a local file. + - a remote URL that has been downloaded and stored locally in the + `DataSource` directory. + - a remote URL that has not been downloaded, but is valid and + accessible. + + Parameters + ---------- + path : str or pathlib.Path + Can be a local file or a remote URL. + + Returns + ------- + out : bool + True if `path` exists. + + Notes + ----- + When `path` is an URL, `exists` will return True if it's either + stored locally in the `DataSource` directory, or is a valid remote + URL. `DataSource` does not discriminate between the two, the file + is accessible if it exists in either location. + + """ + + # First test for local path + if os.path.exists(path): + return True + + # We import this here because importing urllib is slow and + # a significant fraction of numpy's total import time. + from urllib.error import URLError + from urllib.request import urlopen + + # Test cached url + upath = self.abspath(path) + if os.path.exists(upath): + return True + + # Test remote url + if self._isurl(path): + try: + netfile = urlopen(path) + netfile.close() + del netfile + return True + except URLError: + return False + return False + + def open(self, path, mode='r', encoding=None, newline=None): + """ + Open and return file-like object. + + If `path` is an URL, it will be downloaded, stored in the + `DataSource` directory and opened from there. + + Parameters + ---------- + path : str or pathlib.Path + Local file path or URL to open. + mode : {'r', 'w', 'a'}, optional + Mode to open `path`. Mode 'r' for reading, 'w' for writing, + 'a' to append. Available modes depend on the type of object + specified by `path`. Default is 'r'. + encoding : {None, str}, optional + Open text file with given encoding. The default encoding will be + what `open` uses. + newline : {None, str}, optional + Newline to use when reading text file. + + Returns + ------- + out : file object + File object. + + """ + + # TODO: There is no support for opening a file for writing which + # doesn't exist yet (creating a file). Should there be? + + # TODO: Add a ``subdir`` parameter for specifying the subdirectory + # used to store URLs in self._destpath. + + if self._isurl(path) and self._iswritemode(mode): + raise ValueError("URLs are not writeable") + + # NOTE: _findfile will fail on a new file opened for writing. + found = self._findfile(path) + if found: + _fname, ext = self._splitzipext(found) + if ext == 'bz2': + mode.replace("+", "") + return _file_openers[ext](found, mode=mode, + encoding=encoding, newline=newline) + else: + raise FileNotFoundError(f"{path} not found.") + + +class Repository (DataSource): + """ + Repository(baseurl, destpath='.') + + A data repository where multiple DataSource's share a base + URL/directory. + + `Repository` extends `DataSource` by prepending a base URL (or + directory) to all the files it handles. Use `Repository` when you will + be working with multiple files from one base URL. Initialize + `Repository` with the base URL, then refer to each file by its filename + only. + + Parameters + ---------- + baseurl : str + Path to the local directory or remote location that contains the + data files. + destpath : str or None, optional + Path to the directory where the source file gets downloaded to for + use. If `destpath` is None, a temporary directory will be created. + The default path is the current directory. + + Examples + -------- + To analyze all files in the repository, do something like this + (note: this is not self-contained code):: + + >>> repos = np.lib._datasource.Repository('/home/user/data/dir/') + >>> for filename in filelist: + ... fp = repos.open(filename) + ... fp.analyze() + ... fp.close() + + Similarly you could use a URL for a repository:: + + >>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data') + + """ + + def __init__(self, baseurl, destpath=os.curdir): + """Create a Repository with a shared url or directory of baseurl.""" + DataSource.__init__(self, destpath=destpath) + self._baseurl = baseurl + + def __del__(self): + DataSource.__del__(self) + + def _fullpath(self, path): + """Return complete path for path. Prepends baseurl if necessary.""" + splitpath = path.split(self._baseurl, 2) + if len(splitpath) == 1: + result = os.path.join(self._baseurl, path) + else: + result = path # path contains baseurl already + return result + + def _findfile(self, path): + """Extend DataSource method to prepend baseurl to ``path``.""" + return DataSource._findfile(self, self._fullpath(path)) + + def abspath(self, path): + """ + Return absolute path of file in the Repository directory. + + If `path` is an URL, then `abspath` will return either the location + the file exists locally or the location it would exist when opened + using the `open` method. + + Parameters + ---------- + path : str or pathlib.Path + Can be a local file or a remote URL. This may, but does not + have to, include the `baseurl` with which the `Repository` was + initialized. + + Returns + ------- + out : str + Complete path, including the `DataSource` destination directory. + + """ + return DataSource.abspath(self, self._fullpath(path)) + + def exists(self, path): + """ + Test if path exists prepending Repository base URL to path. + + Test if `path` exists as (and in this order): + + - a local file. + - a remote URL that has been downloaded and stored locally in the + `DataSource` directory. + - a remote URL that has not been downloaded, but is valid and + accessible. + + Parameters + ---------- + path : str or pathlib.Path + Can be a local file or a remote URL. This may, but does not + have to, include the `baseurl` with which the `Repository` was + initialized. + + Returns + ------- + out : bool + True if `path` exists. + + Notes + ----- + When `path` is an URL, `exists` will return True if it's either + stored locally in the `DataSource` directory, or is a valid remote + URL. `DataSource` does not discriminate between the two, the file + is accessible if it exists in either location. + + """ + return DataSource.exists(self, self._fullpath(path)) + + def open(self, path, mode='r', encoding=None, newline=None): + """ + Open and return file-like object prepending Repository base URL. + + If `path` is an URL, it will be downloaded, stored in the + DataSource directory and opened from there. + + Parameters + ---------- + path : str or pathlib.Path + Local file path or URL to open. This may, but does not have to, + include the `baseurl` with which the `Repository` was + initialized. + mode : {'r', 'w', 'a'}, optional + Mode to open `path`. Mode 'r' for reading, 'w' for writing, + 'a' to append. Available modes depend on the type of object + specified by `path`. Default is 'r'. + encoding : {None, str}, optional + Open text file with given encoding. The default encoding will be + what `open` uses. + newline : {None, str}, optional + Newline to use when reading text file. + + Returns + ------- + out : file object + File object. + + """ + return DataSource.open(self, self._fullpath(path), mode, + encoding=encoding, newline=newline) + + def listdir(self): + """ + List files in the source Repository. + + Returns + ------- + files : list of str or pathlib.Path + List of file names (not containing a directory part). + + Notes + ----- + Does not currently work for remote repositories. + + """ + if self._isurl(self._baseurl): + raise NotImplementedError( + "Directory listing of URLs, not supported yet.") + else: + return os.listdir(self._baseurl) diff --git a/python/numpy/lib/_datasource.pyi b/python/numpy/lib/_datasource.pyi new file mode 100644 index 000000000..9f91fdf89 --- /dev/null +++ b/python/numpy/lib/_datasource.pyi @@ -0,0 +1,31 @@ +from pathlib import Path +from typing import IO, Any, TypeAlias + +from _typeshed import OpenBinaryMode, OpenTextMode + +_Mode: TypeAlias = OpenBinaryMode | OpenTextMode + +### + +# exported in numpy.lib.nppyio +class DataSource: + def __init__(self, /, destpath: Path | str | None = ...) -> None: ... + def __del__(self, /) -> None: ... + def abspath(self, /, path: str) -> str: ... + def exists(self, /, path: str) -> bool: ... + + # Whether the file-object is opened in string or bytes mode (by default) + # depends on the file-extension of `path` + def open(self, /, path: str, mode: _Mode = "r", encoding: str | None = None, newline: str | None = None) -> IO[Any]: ... + +class Repository(DataSource): + def __init__(self, /, baseurl: str, destpath: str | None = ...) -> None: ... + def listdir(self, /) -> list[str]: ... + +def open( + path: str, + mode: _Mode = "r", + destpath: str | None = ..., + encoding: str | None = None, + newline: str | None = None, +) -> IO[Any]: ... diff --git a/python/numpy/lib/_format_impl.py b/python/numpy/lib/_format_impl.py new file mode 100644 index 000000000..7378ba554 --- /dev/null +++ b/python/numpy/lib/_format_impl.py @@ -0,0 +1,1036 @@ +""" +Binary serialization + +NPY format +========== + +A simple format for saving numpy arrays to disk with the full +information about them. + +The ``.npy`` format is the standard binary file format in NumPy for +persisting a *single* arbitrary NumPy array on disk. The format stores all +of the shape and dtype information necessary to reconstruct the array +correctly even on another machine with a different architecture. +The format is designed to be as simple as possible while achieving +its limited goals. + +The ``.npz`` format is the standard format for persisting *multiple* NumPy +arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy`` +files, one for each array. + +Capabilities +------------ + +- Can represent all NumPy arrays including nested record arrays and + object arrays. + +- Represents the data in its native binary form. + +- Supports Fortran-contiguous arrays directly. + +- Stores all of the necessary information to reconstruct the array + including shape and dtype on a machine of a different + architecture. Both little-endian and big-endian arrays are + supported, and a file with little-endian numbers will yield + a little-endian array on any machine reading the file. The + types are described in terms of their actual sizes. For example, + if a machine with a 64-bit C "long int" writes out an array with + "long ints", a reading machine with 32-bit C "long ints" will yield + an array with 64-bit integers. + +- Is straightforward to reverse engineer. Datasets often live longer than + the programs that created them. A competent developer should be + able to create a solution in their preferred programming language to + read most ``.npy`` files that they have been given without much + documentation. + +- Allows memory-mapping of the data. See `open_memmap`. + +- Can be read from a filelike stream object instead of an actual file. + +- Stores object arrays, i.e. arrays containing elements that are arbitrary + Python objects. Files with object arrays are not to be mmapable, but + can be read and written to disk. + +Limitations +----------- + +- Arbitrary subclasses of numpy.ndarray are not completely preserved. + Subclasses will be accepted for writing, but only the array data will + be written out. A regular numpy.ndarray object will be created + upon reading the file. + +.. warning:: + + Due to limitations in the interpretation of structured dtypes, dtypes + with fields with empty names will have the names replaced by 'f0', 'f1', + etc. Such arrays will not round-trip through the format entirely + accurately. The data is intact; only the field names will differ. We are + working on a fix for this. This fix will not require a change in the + file format. The arrays with such structures can still be saved and + restored, and the correct dtype may be restored by using the + ``loadedarray.view(correct_dtype)`` method. + +File extensions +--------------- + +We recommend using the ``.npy`` and ``.npz`` extensions for files saved +in this format. This is by no means a requirement; applications may wish +to use these file formats but use an extension specific to the +application. In the absence of an obvious alternative, however, +we suggest using ``.npy`` and ``.npz``. + +Version numbering +----------------- + +The version numbering of these formats is independent of NumPy version +numbering. If the format is upgraded, the code in `numpy.io` will still +be able to read and write Version 1.0 files. + +Format Version 1.0 +------------------ + +The first 6 bytes are a magic string: exactly ``\\x93NUMPY``. + +The next 1 byte is an unsigned byte: the major version number of the file +format, e.g. ``\\x01``. + +The next 1 byte is an unsigned byte: the minor version number of the file +format, e.g. ``\\x00``. Note: the version of the file format is not tied +to the version of the numpy package. + +The next 2 bytes form a little-endian unsigned short int: the length of +the header data HEADER_LEN. + +The next HEADER_LEN bytes form the header data describing the array's +format. It is an ASCII string which contains a Python literal expression +of a dictionary. It is terminated by a newline (``\\n``) and padded with +spaces (``\\x20``) to make the total of +``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible +by 64 for alignment purposes. + +The dictionary contains three keys: + + "descr" : dtype.descr + An object that can be passed as an argument to the `numpy.dtype` + constructor to create the array's dtype. + "fortran_order" : bool + Whether the array data is Fortran-contiguous or not. Since + Fortran-contiguous arrays are a common form of non-C-contiguity, + we allow them to be written directly to disk for efficiency. + "shape" : tuple of int + The shape of the array. + +For repeatability and readability, the dictionary keys are sorted in +alphabetic order. This is for convenience only. A writer SHOULD implement +this if possible. A reader MUST NOT depend on this. + +Following the header comes the array data. If the dtype contains Python +objects (i.e. ``dtype.hasobject is True``), then the data is a Python +pickle of the array. Otherwise the data is the contiguous (either C- +or Fortran-, depending on ``fortran_order``) bytes of the array. +Consumers can figure out the number of bytes by multiplying the number +of elements given by the shape (noting that ``shape=()`` means there is +1 element) by ``dtype.itemsize``. + +Format Version 2.0 +------------------ + +The version 1.0 format only allowed the array header to have a total size of +65535 bytes. This can be exceeded by structured arrays with a large number of +columns. The version 2.0 format extends the header size to 4 GiB. +`numpy.save` will automatically save in 2.0 format if the data requires it, +else it will always use the more compatible 1.0 format. + +The description of the fourth element of the header therefore has become: +"The next 4 bytes form a little-endian unsigned int: the length of the header +data HEADER_LEN." + +Format Version 3.0 +------------------ + +This version replaces the ASCII string (which in practice was latin1) with +a utf8-encoded string, so supports structured types with any unicode field +names. + +Notes +----- +The ``.npy`` format, including motivation for creating it and a comparison of +alternatives, is described in the +:doc:`"npy-format" NEP `, however details have +evolved with time and this document is more current. + +""" +import io +import os +import pickle +import warnings + +import numpy +from numpy._utils import set_module +from numpy.lib._utils_impl import drop_metadata + +__all__ = [] + +drop_metadata.__module__ = "numpy.lib.format" + +EXPECTED_KEYS = {'descr', 'fortran_order', 'shape'} +MAGIC_PREFIX = b'\x93NUMPY' +MAGIC_LEN = len(MAGIC_PREFIX) + 2 +ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096 +BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes +# allow growth within the address space of a 64 bit machine along one axis +GROWTH_AXIS_MAX_DIGITS = 21 # = len(str(8*2**64-1)) hypothetical int1 dtype + +# difference between version 1.0 and 2.0 is a 4 byte (I) header length +# instead of 2 bytes (H) allowing storage of large structured arrays +_header_size_info = { + (1, 0): (' 255: + raise ValueError("major version must be 0 <= major < 256") + if minor < 0 or minor > 255: + raise ValueError("minor version must be 0 <= minor < 256") + return MAGIC_PREFIX + bytes([major, minor]) + + +@set_module("numpy.lib.format") +def read_magic(fp): + """ Read the magic string to get the version of the file format. + + Parameters + ---------- + fp : filelike object + + Returns + ------- + major : int + minor : int + """ + magic_str = _read_bytes(fp, MAGIC_LEN, "magic string") + if magic_str[:-2] != MAGIC_PREFIX: + msg = "the magic string is not correct; expected %r, got %r" + raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2])) + major, minor = magic_str[-2:] + return major, minor + + +@set_module("numpy.lib.format") +def dtype_to_descr(dtype): + """ + Get a serializable descriptor from the dtype. + + The .descr attribute of a dtype object cannot be round-tripped through + the dtype() constructor. Simple types, like dtype('float32'), have + a descr which looks like a record array with one field with '' as + a name. The dtype() constructor interprets this as a request to give + a default name. Instead, we construct descriptor that can be passed to + dtype(). + + Parameters + ---------- + dtype : dtype + The dtype of the array that will be written to disk. + + Returns + ------- + descr : object + An object that can be passed to `numpy.dtype()` in order to + replicate the input dtype. + + """ + # NOTE: that drop_metadata may not return the right dtype e.g. for user + # dtypes. In that case our code below would fail the same, though. + new_dtype = drop_metadata(dtype) + if new_dtype is not dtype: + warnings.warn("metadata on a dtype is not saved to an npy/npz. " + "Use another format (such as pickle) to store it.", + UserWarning, stacklevel=2) + dtype = new_dtype + + if dtype.names is not None: + # This is a record array. The .descr is fine. XXX: parts of the + # record array with an empty name, like padding bytes, still get + # fiddled with. This needs to be fixed in the C implementation of + # dtype(). + return dtype.descr + elif not type(dtype)._legacy: + # this must be a user-defined dtype since numpy does not yet expose any + # non-legacy dtypes in the public API + # + # non-legacy dtypes don't yet have __array_interface__ + # support. Instead, as a hack, we use pickle to save the array, and lie + # that the dtype is object. When the array is loaded, the descriptor is + # unpickled with the array and the object dtype in the header is + # discarded. + # + # a future NEP should define a way to serialize user-defined + # descriptors and ideally work out the possible security implications + warnings.warn("Custom dtypes are saved as python objects using the " + "pickle protocol. Loading this file requires " + "allow_pickle=True to be set.", + UserWarning, stacklevel=2) + return "|O" + else: + return dtype.str + + +@set_module("numpy.lib.format") +def descr_to_dtype(descr): + """ + Returns a dtype based off the given description. + + This is essentially the reverse of `~lib.format.dtype_to_descr`. It will + remove the valueless padding fields created by, i.e. simple fields like + dtype('float32'), and then convert the description to its corresponding + dtype. + + Parameters + ---------- + descr : object + The object retrieved by dtype.descr. Can be passed to + `numpy.dtype` in order to replicate the input dtype. + + Returns + ------- + dtype : dtype + The dtype constructed by the description. + + """ + if isinstance(descr, str): + # No padding removal needed + return numpy.dtype(descr) + elif isinstance(descr, tuple): + # subtype, will always have a shape descr[1] + dt = descr_to_dtype(descr[0]) + return numpy.dtype((dt, descr[1])) + + titles = [] + names = [] + formats = [] + offsets = [] + offset = 0 + for field in descr: + if len(field) == 2: + name, descr_str = field + dt = descr_to_dtype(descr_str) + else: + name, descr_str, shape = field + dt = numpy.dtype((descr_to_dtype(descr_str), shape)) + + # Ignore padding bytes, which will be void bytes with '' as name + # Once support for blank names is removed, only "if name == ''" needed) + is_pad = (name == '' and dt.type is numpy.void and dt.names is None) + if not is_pad: + title, name = name if isinstance(name, tuple) else (None, name) + titles.append(title) + names.append(name) + formats.append(dt) + offsets.append(offset) + offset += dt.itemsize + + return numpy.dtype({'names': names, 'formats': formats, 'titles': titles, + 'offsets': offsets, 'itemsize': offset}) + + +@set_module("numpy.lib.format") +def header_data_from_array_1_0(array): + """ Get the dictionary of header metadata from a numpy.ndarray. + + Parameters + ---------- + array : numpy.ndarray + + Returns + ------- + d : dict + This has the appropriate entries for writing its string representation + to the header of the file. + """ + d = {'shape': array.shape} + if array.flags.c_contiguous: + d['fortran_order'] = False + elif array.flags.f_contiguous: + d['fortran_order'] = True + else: + # Totally non-contiguous data. We will have to make it C-contiguous + # before writing. Note that we need to test for C_CONTIGUOUS first + # because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS. + d['fortran_order'] = False + + d['descr'] = dtype_to_descr(array.dtype) + return d + + +def _wrap_header(header, version): + """ + Takes a stringified header, and attaches the prefix and padding to it + """ + import struct + assert version is not None + fmt, encoding = _header_size_info[version] + header = header.encode(encoding) + hlen = len(header) + 1 + padlen = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize(fmt) + hlen) % ARRAY_ALIGN) + try: + header_prefix = magic(*version) + struct.pack(fmt, hlen + padlen) + except struct.error: + msg = f"Header length {hlen} too big for version={version}" + raise ValueError(msg) from None + + # Pad the header with spaces and a final newline such that the magic + # string, the header-length short and the header are aligned on a + # ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes + # aligned up to ARRAY_ALIGN on systems like Linux where mmap() + # offset must be page-aligned (i.e. the beginning of the file). + return header_prefix + header + b' ' * padlen + b'\n' + + +def _wrap_header_guess_version(header): + """ + Like `_wrap_header`, but chooses an appropriate version given the contents + """ + try: + return _wrap_header(header, (1, 0)) + except ValueError: + pass + + try: + ret = _wrap_header(header, (2, 0)) + except UnicodeEncodeError: + pass + else: + warnings.warn("Stored array in format 2.0. It can only be" + "read by NumPy >= 1.9", UserWarning, stacklevel=2) + return ret + + header = _wrap_header(header, (3, 0)) + warnings.warn("Stored array in format 3.0. It can only be " + "read by NumPy >= 1.17", UserWarning, stacklevel=2) + return header + + +def _write_array_header(fp, d, version=None): + """ Write the header for an array and returns the version used + + Parameters + ---------- + fp : filelike object + d : dict + This has the appropriate entries for writing its string representation + to the header of the file. + version : tuple or None + None means use oldest that works. Providing an explicit version will + raise a ValueError if the format does not allow saving this data. + Default: None + """ + header = ["{"] + for key, value in sorted(d.items()): + # Need to use repr here, since we eval these when reading + header.append(f"'{key}': {repr(value)}, ") + header.append("}") + header = "".join(header) + + # Add some spare space so that the array header can be modified in-place + # when changing the array size, e.g. when growing it by appending data at + # the end. + shape = d['shape'] + header += " " * ((GROWTH_AXIS_MAX_DIGITS - len(repr( + shape[-1 if d['fortran_order'] else 0] + ))) if len(shape) > 0 else 0) + + if version is None: + header = _wrap_header_guess_version(header) + else: + header = _wrap_header(header, version) + fp.write(header) + + +@set_module("numpy.lib.format") +def write_array_header_1_0(fp, d): + """ Write the header for an array using the 1.0 format. + + Parameters + ---------- + fp : filelike object + d : dict + This has the appropriate entries for writing its string + representation to the header of the file. + """ + _write_array_header(fp, d, (1, 0)) + + +@set_module("numpy.lib.format") +def write_array_header_2_0(fp, d): + """ Write the header for an array using the 2.0 format. + The 2.0 format allows storing very large structured arrays. + + Parameters + ---------- + fp : filelike object + d : dict + This has the appropriate entries for writing its string + representation to the header of the file. + """ + _write_array_header(fp, d, (2, 0)) + + +@set_module("numpy.lib.format") +def read_array_header_1_0(fp, max_header_size=_MAX_HEADER_SIZE): + """ + Read an array header from a filelike object using the 1.0 file format + version. + + This will leave the file object located just after the header. + + Parameters + ---------- + fp : filelike object + A file object or something with a `.read()` method like a file. + + Returns + ------- + shape : tuple of int + The shape of the array. + fortran_order : bool + The array data will be written out directly if it is either + C-contiguous or Fortran-contiguous. Otherwise, it will be made + contiguous before writing it out. + dtype : dtype + The dtype of the file's data. + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + + Raises + ------ + ValueError + If the data is invalid. + + """ + return _read_array_header( + fp, version=(1, 0), max_header_size=max_header_size) + + +@set_module("numpy.lib.format") +def read_array_header_2_0(fp, max_header_size=_MAX_HEADER_SIZE): + """ + Read an array header from a filelike object using the 2.0 file format + version. + + This will leave the file object located just after the header. + + Parameters + ---------- + fp : filelike object + A file object or something with a `.read()` method like a file. + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + + Returns + ------- + shape : tuple of int + The shape of the array. + fortran_order : bool + The array data will be written out directly if it is either + C-contiguous or Fortran-contiguous. Otherwise, it will be made + contiguous before writing it out. + dtype : dtype + The dtype of the file's data. + + Raises + ------ + ValueError + If the data is invalid. + + """ + return _read_array_header( + fp, version=(2, 0), max_header_size=max_header_size) + + +def _filter_header(s): + """Clean up 'L' in npz header ints. + + Cleans up the 'L' in strings representing integers. Needed to allow npz + headers produced in Python2 to be read in Python3. + + Parameters + ---------- + s : string + Npy file header. + + Returns + ------- + header : str + Cleaned up header. + + """ + import tokenize + from io import StringIO + + tokens = [] + last_token_was_number = False + for token in tokenize.generate_tokens(StringIO(s).readline): + token_type = token[0] + token_string = token[1] + if (last_token_was_number and + token_type == tokenize.NAME and + token_string == "L"): + continue + else: + tokens.append(token) + last_token_was_number = (token_type == tokenize.NUMBER) + return tokenize.untokenize(tokens) + + +def _read_array_header(fp, version, max_header_size=_MAX_HEADER_SIZE): + """ + see read_array_header_1_0 + """ + # Read an unsigned, little-endian short int which has the length of the + # header. + import ast + import struct + hinfo = _header_size_info.get(version) + if hinfo is None: + raise ValueError(f"Invalid version {version!r}") + hlength_type, encoding = hinfo + + hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length") + header_length = struct.unpack(hlength_type, hlength_str)[0] + header = _read_bytes(fp, header_length, "array header") + header = header.decode(encoding) + if len(header) > max_header_size: + raise ValueError( + f"Header info length ({len(header)}) is large and may not be safe " + "to load securely.\n" + "To allow loading, adjust `max_header_size` or fully trust " + "the `.npy` file using `allow_pickle=True`.\n" + "For safety against large resource use or crashes, sandboxing " + "may be necessary.") + + # The header is a pretty-printed string representation of a literal + # Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte + # boundary. The keys are strings. + # "shape" : tuple of int + # "fortran_order" : bool + # "descr" : dtype.descr + # Versions (2, 0) and (1, 0) could have been created by a Python 2 + # implementation before header filtering was implemented. + # + # For performance reasons, we try without _filter_header first though + try: + d = ast.literal_eval(header) + except SyntaxError as e: + if version <= (2, 0): + header = _filter_header(header) + try: + d = ast.literal_eval(header) + except SyntaxError as e2: + msg = "Cannot parse header: {!r}" + raise ValueError(msg.format(header)) from e2 + else: + warnings.warn( + "Reading `.npy` or `.npz` file required additional " + "header parsing as it was created on Python 2. Save the " + "file again to speed up loading and avoid this warning.", + UserWarning, stacklevel=4) + else: + msg = "Cannot parse header: {!r}" + raise ValueError(msg.format(header)) from e + if not isinstance(d, dict): + msg = "Header is not a dictionary: {!r}" + raise ValueError(msg.format(d)) + + if EXPECTED_KEYS != d.keys(): + keys = sorted(d.keys()) + msg = "Header does not contain the correct keys: {!r}" + raise ValueError(msg.format(keys)) + + # Sanity-check the values. + if (not isinstance(d['shape'], tuple) or + not all(isinstance(x, int) for x in d['shape'])): + msg = "shape is not valid: {!r}" + raise ValueError(msg.format(d['shape'])) + if not isinstance(d['fortran_order'], bool): + msg = "fortran_order is not a valid bool: {!r}" + raise ValueError(msg.format(d['fortran_order'])) + try: + dtype = descr_to_dtype(d['descr']) + except TypeError as e: + msg = "descr is not a valid dtype descriptor: {!r}" + raise ValueError(msg.format(d['descr'])) from e + + return d['shape'], d['fortran_order'], dtype + + +@set_module("numpy.lib.format") +def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None): + """ + Write an array to an NPY file, including a header. + + If the array is neither C-contiguous nor Fortran-contiguous AND the + file_like object is not a real file object, this function will have to + copy data in memory. + + Parameters + ---------- + fp : file_like object + An open, writable file object, or similar object with a + ``.write()`` method. + array : ndarray + The array to write to disk. + version : (int, int) or None, optional + The version number of the format. None means use the oldest + supported version that is able to store the data. Default: None + allow_pickle : bool, optional + Whether to allow writing pickled data. Default: True + pickle_kwargs : dict, optional + Additional keyword arguments to pass to pickle.dump, excluding + 'protocol'. These are only useful when pickling objects in object + arrays to Python 2 compatible format. + + Raises + ------ + ValueError + If the array cannot be persisted. This includes the case of + allow_pickle=False and array being an object array. + Various other errors + If the array contains Python objects as part of its dtype, the + process of pickling them may raise various errors if the objects + are not picklable. + + """ + _check_version(version) + _write_array_header(fp, header_data_from_array_1_0(array), version) + + if array.itemsize == 0: + buffersize = 0 + else: + # Set buffer size to 16 MiB to hide the Python loop overhead. + buffersize = max(16 * 1024 ** 2 // array.itemsize, 1) + + dtype_class = type(array.dtype) + + if array.dtype.hasobject or not dtype_class._legacy: + # We contain Python objects so we cannot write out the data + # directly. Instead, we will pickle it out + if not allow_pickle: + if array.dtype.hasobject: + raise ValueError("Object arrays cannot be saved when " + "allow_pickle=False") + if not dtype_class._legacy: + raise ValueError("User-defined dtypes cannot be saved " + "when allow_pickle=False") + if pickle_kwargs is None: + pickle_kwargs = {} + pickle.dump(array, fp, protocol=4, **pickle_kwargs) + elif array.flags.f_contiguous and not array.flags.c_contiguous: + if isfileobj(fp): + array.T.tofile(fp) + else: + for chunk in numpy.nditer( + array, flags=['external_loop', 'buffered', 'zerosize_ok'], + buffersize=buffersize, order='F'): + fp.write(chunk.tobytes('C')) + elif isfileobj(fp): + array.tofile(fp) + else: + for chunk in numpy.nditer( + array, flags=['external_loop', 'buffered', 'zerosize_ok'], + buffersize=buffersize, order='C'): + fp.write(chunk.tobytes('C')) + + +@set_module("numpy.lib.format") +def read_array(fp, allow_pickle=False, pickle_kwargs=None, *, + max_header_size=_MAX_HEADER_SIZE): + """ + Read an array from an NPY file. + + Parameters + ---------- + fp : file_like object + If this is not a real file object, then this may take extra memory + and time. + allow_pickle : bool, optional + Whether to allow writing pickled data. Default: False + pickle_kwargs : dict + Additional keyword arguments to pass to pickle.load. These are only + useful when loading object arrays saved on Python 2. + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + This option is ignored when `allow_pickle` is passed. In that case + the file is by definition trusted and the limit is unnecessary. + + Returns + ------- + array : ndarray + The array from the data on disk. + + Raises + ------ + ValueError + If the data is invalid, or allow_pickle=False and the file contains + an object array. + + """ + if allow_pickle: + # Effectively ignore max_header_size, since `allow_pickle` indicates + # that the input is fully trusted. + max_header_size = 2**64 + + version = read_magic(fp) + _check_version(version) + shape, fortran_order, dtype = _read_array_header( + fp, version, max_header_size=max_header_size) + if len(shape) == 0: + count = 1 + else: + count = numpy.multiply.reduce(shape, dtype=numpy.int64) + + # Now read the actual data. + if dtype.hasobject: + # The array contained Python objects. We need to unpickle the data. + if not allow_pickle: + raise ValueError("Object arrays cannot be loaded when " + "allow_pickle=False") + if pickle_kwargs is None: + pickle_kwargs = {} + try: + array = pickle.load(fp, **pickle_kwargs) + except UnicodeError as err: + # Friendlier error message + raise UnicodeError("Unpickling a python object failed: %r\n" + "You may need to pass the encoding= option " + "to numpy.load" % (err,)) from err + else: + if isfileobj(fp): + # We can use the fast fromfile() function. + array = numpy.fromfile(fp, dtype=dtype, count=count) + else: + # This is not a real file. We have to read it the + # memory-intensive way. + # crc32 module fails on reads greater than 2 ** 32 bytes, + # breaking large reads from gzip streams. Chunk reads to + # BUFFER_SIZE bytes to avoid issue and reduce memory overhead + # of the read. In non-chunked case count < max_read_count, so + # only one read is performed. + + # Use np.ndarray instead of np.empty since the latter does + # not correctly instantiate zero-width string dtypes; see + # https://github.com/numpy/numpy/pull/6430 + array = numpy.ndarray(count, dtype=dtype) + + if dtype.itemsize > 0: + # If dtype.itemsize == 0 then there's nothing more to read + max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize) + + for i in range(0, count, max_read_count): + read_count = min(max_read_count, count - i) + read_size = int(read_count * dtype.itemsize) + data = _read_bytes(fp, read_size, "array data") + array[i:i + read_count] = numpy.frombuffer(data, dtype=dtype, + count=read_count) + + if array.size != count: + raise ValueError( + "Failed to read all data for array. " + f"Expected {shape} = {count} elements, " + f"could only read {array.size} elements. " + "(file seems not fully written?)" + ) + + if fortran_order: + array.shape = shape[::-1] + array = array.transpose() + else: + array.shape = shape + + return array + + +@set_module("numpy.lib.format") +def open_memmap(filename, mode='r+', dtype=None, shape=None, + fortran_order=False, version=None, *, + max_header_size=_MAX_HEADER_SIZE): + """ + Open a .npy file as a memory-mapped array. + + This may be used to read an existing file or create a new one. + + Parameters + ---------- + filename : str or path-like + The name of the file on disk. This may *not* be a file-like + object. + mode : str, optional + The mode in which to open the file; the default is 'r+'. In + addition to the standard file modes, 'c' is also accepted to mean + "copy on write." See `memmap` for the available mode strings. + dtype : data-type, optional + The data type of the array if we are creating a new file in "write" + mode, if not, `dtype` is ignored. The default value is None, which + results in a data-type of `float64`. + shape : tuple of int + The shape of the array if we are creating a new file in "write" + mode, in which case this parameter is required. Otherwise, this + parameter is ignored and is thus optional. + fortran_order : bool, optional + Whether the array should be Fortran-contiguous (True) or + C-contiguous (False, the default) if we are creating a new file in + "write" mode. + version : tuple of int (major, minor) or None + If the mode is a "write" mode, then this is the version of the file + format used to create the file. None means use the oldest + supported version that is able to store the data. Default: None + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + + Returns + ------- + marray : memmap + The memory-mapped array. + + Raises + ------ + ValueError + If the data or the mode is invalid. + OSError + If the file is not found or cannot be opened correctly. + + See Also + -------- + numpy.memmap + + """ + if isfileobj(filename): + raise ValueError("Filename must be a string or a path-like object." + " Memmap cannot use existing file handles.") + + if 'w' in mode: + # We are creating the file, not reading it. + # Check if we ought to create the file. + _check_version(version) + # Ensure that the given dtype is an authentic dtype object rather + # than just something that can be interpreted as a dtype object. + dtype = numpy.dtype(dtype) + if dtype.hasobject: + msg = "Array can't be memory-mapped: Python objects in dtype." + raise ValueError(msg) + d = { + "descr": dtype_to_descr(dtype), + "fortran_order": fortran_order, + "shape": shape, + } + # If we got here, then it should be safe to create the file. + with open(os.fspath(filename), mode + 'b') as fp: + _write_array_header(fp, d, version) + offset = fp.tell() + else: + # Read the header of the file first. + with open(os.fspath(filename), 'rb') as fp: + version = read_magic(fp) + _check_version(version) + + shape, fortran_order, dtype = _read_array_header( + fp, version, max_header_size=max_header_size) + if dtype.hasobject: + msg = "Array can't be memory-mapped: Python objects in dtype." + raise ValueError(msg) + offset = fp.tell() + + if fortran_order: + order = 'F' + else: + order = 'C' + + # We need to change a write-only mode to a read-write mode since we've + # already written data to the file. + if mode == 'w+': + mode = 'r+' + + marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order, + mode=mode, offset=offset) + + return marray + + +def _read_bytes(fp, size, error_template="ran out of data"): + """ + Read from file-like object until size bytes are read. + Raises ValueError if not EOF is encountered before size bytes are read. + Non-blocking objects only supported if they derive from io objects. + + Required as e.g. ZipExtFile in python 2.6 can return less data than + requested. + """ + data = b"" + while True: + # io files (default in python3) return None or raise on + # would-block, python2 file will truncate, probably nothing can be + # done about that. note that regular files can't be non-blocking + try: + r = fp.read(size - len(data)) + data += r + if len(r) == 0 or len(data) == size: + break + except BlockingIOError: + pass + if len(data) != size: + msg = "EOF: reading %s, expected %d bytes got %d" + raise ValueError(msg % (error_template, size, len(data))) + else: + return data + + +@set_module("numpy.lib.format") +def isfileobj(f): + if not isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)): + return False + try: + # BufferedReader/Writer may raise OSError when + # fetching `fileno()` (e.g. when wrapping BytesIO). + f.fileno() + return True + except OSError: + return False diff --git a/python/numpy/lib/_format_impl.pyi b/python/numpy/lib/_format_impl.pyi new file mode 100644 index 000000000..f4898d9ae --- /dev/null +++ b/python/numpy/lib/_format_impl.pyi @@ -0,0 +1,26 @@ +from typing import Final, Literal + +from numpy.lib._utils_impl import drop_metadata # noqa: F401 + +__all__: list[str] = [] + +EXPECTED_KEYS: Final[set[str]] +MAGIC_PREFIX: Final[bytes] +MAGIC_LEN: Literal[8] +ARRAY_ALIGN: Literal[64] +BUFFER_SIZE: Literal[262144] # 2**18 +GROWTH_AXIS_MAX_DIGITS: Literal[21] + +def magic(major, minor): ... +def read_magic(fp): ... +def dtype_to_descr(dtype): ... +def descr_to_dtype(descr): ... +def header_data_from_array_1_0(array): ... +def write_array_header_1_0(fp, d): ... +def write_array_header_2_0(fp, d): ... +def read_array_header_1_0(fp): ... +def read_array_header_2_0(fp): ... +def write_array(fp, array, version=..., allow_pickle=..., pickle_kwargs=...): ... +def read_array(fp, allow_pickle=..., pickle_kwargs=...): ... +def open_memmap(filename, mode=..., dtype=..., shape=..., fortran_order=..., version=...): ... +def isfileobj(f): ... diff --git a/python/numpy/lib/_function_base_impl.py b/python/numpy/lib/_function_base_impl.py new file mode 100644 index 000000000..9ee59449e --- /dev/null +++ b/python/numpy/lib/_function_base_impl.py @@ -0,0 +1,5844 @@ +import builtins +import collections.abc +import functools +import re +import sys +import warnings + +import numpy as np +import numpy._core.numeric as _nx +from numpy._core import overrides, transpose +from numpy._core._multiarray_umath import _array_converter +from numpy._core.fromnumeric import any, mean, nonzero, partition, ravel, sum +from numpy._core.multiarray import _monotonicity, _place, bincount, normalize_axis_index +from numpy._core.multiarray import interp as compiled_interp +from numpy._core.multiarray import interp_complex as compiled_interp_complex +from numpy._core.numeric import ( + absolute, + arange, + array, + asanyarray, + asarray, + concatenate, + dot, + empty, + integer, + intp, + isscalar, + ndarray, + ones, + take, + where, + zeros_like, +) +from numpy._core.numerictypes import typecodes +from numpy._core.umath import ( + add, + arctan2, + cos, + exp, + frompyfunc, + less_equal, + minimum, + mod, + not_equal, + pi, + sin, + sqrt, + subtract, +) +from numpy._utils import set_module + +# needed in this module for compatibility +from numpy.lib._histograms_impl import histogram, histogramdd # noqa: F401 +from numpy.lib._twodim_base_impl import diag + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile', + 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'flip', + 'rot90', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average', + 'bincount', 'digitize', 'cov', 'corrcoef', + 'median', 'sinc', 'hamming', 'hanning', 'bartlett', + 'blackman', 'kaiser', 'trapezoid', 'trapz', 'i0', + 'meshgrid', 'delete', 'insert', 'append', 'interp', + 'quantile' + ] + +# _QuantileMethods is a dictionary listing all the supported methods to +# compute quantile/percentile. +# +# Below virtual_index refers to the index of the element where the percentile +# would be found in the sorted sample. +# When the sample contains exactly the percentile wanted, the virtual_index is +# an integer to the index of this element. +# When the percentile wanted is in between two elements, the virtual_index +# is made of a integer part (a.k.a 'i' or 'left') and a fractional part +# (a.k.a 'g' or 'gamma') +# +# Each method in _QuantileMethods has two properties +# get_virtual_index : Callable +# The function used to compute the virtual_index. +# fix_gamma : Callable +# A function used for discrete methods to force the index to a specific value. +_QuantileMethods = { + # --- HYNDMAN and FAN METHODS + # Discrete methods + 'inverted_cdf': { + 'get_virtual_index': lambda n, quantiles: _inverted_cdf(n, quantiles), # noqa: PLW0108 + 'fix_gamma': None, # should never be called + }, + 'averaged_inverted_cdf': { + 'get_virtual_index': lambda n, quantiles: (n * quantiles) - 1, + 'fix_gamma': lambda gamma, _: _get_gamma_mask( + shape=gamma.shape, + default_value=1., + conditioned_value=0.5, + where=gamma == 0), + }, + 'closest_observation': { + 'get_virtual_index': lambda n, quantiles: _closest_observation(n, quantiles), # noqa: PLW0108 + 'fix_gamma': None, # should never be called + }, + # Continuous methods + 'interpolated_inverted_cdf': { + 'get_virtual_index': lambda n, quantiles: + _compute_virtual_index(n, quantiles, 0, 1), + 'fix_gamma': lambda gamma, _: gamma, + }, + 'hazen': { + 'get_virtual_index': lambda n, quantiles: + _compute_virtual_index(n, quantiles, 0.5, 0.5), + 'fix_gamma': lambda gamma, _: gamma, + }, + 'weibull': { + 'get_virtual_index': lambda n, quantiles: + _compute_virtual_index(n, quantiles, 0, 0), + 'fix_gamma': lambda gamma, _: gamma, + }, + # Default method. + # To avoid some rounding issues, `(n-1) * quantiles` is preferred to + # `_compute_virtual_index(n, quantiles, 1, 1)`. + # They are mathematically equivalent. + 'linear': { + 'get_virtual_index': lambda n, quantiles: (n - 1) * quantiles, + 'fix_gamma': lambda gamma, _: gamma, + }, + 'median_unbiased': { + 'get_virtual_index': lambda n, quantiles: + _compute_virtual_index(n, quantiles, 1 / 3.0, 1 / 3.0), + 'fix_gamma': lambda gamma, _: gamma, + }, + 'normal_unbiased': { + 'get_virtual_index': lambda n, quantiles: + _compute_virtual_index(n, quantiles, 3 / 8.0, 3 / 8.0), + 'fix_gamma': lambda gamma, _: gamma, + }, + # --- OTHER METHODS + 'lower': { + 'get_virtual_index': lambda n, quantiles: np.floor( + (n - 1) * quantiles).astype(np.intp), + 'fix_gamma': None, # should never be called, index dtype is int + }, + 'higher': { + 'get_virtual_index': lambda n, quantiles: np.ceil( + (n - 1) * quantiles).astype(np.intp), + 'fix_gamma': None, # should never be called, index dtype is int + }, + 'midpoint': { + 'get_virtual_index': lambda n, quantiles: 0.5 * ( + np.floor((n - 1) * quantiles) + + np.ceil((n - 1) * quantiles)), + 'fix_gamma': lambda gamma, index: _get_gamma_mask( + shape=gamma.shape, + default_value=0.5, + conditioned_value=0., + where=index % 1 == 0), + }, + 'nearest': { + 'get_virtual_index': lambda n, quantiles: np.around( + (n - 1) * quantiles).astype(np.intp), + 'fix_gamma': None, + # should never be called, index dtype is int + }} + + +def _rot90_dispatcher(m, k=None, axes=None): + return (m,) + + +@array_function_dispatch(_rot90_dispatcher) +def rot90(m, k=1, axes=(0, 1)): + """ + Rotate an array by 90 degrees in the plane specified by axes. + + Rotation direction is from the first towards the second axis. + This means for a 2D array with the default `k` and `axes`, the + rotation will be counterclockwise. + + Parameters + ---------- + m : array_like + Array of two or more dimensions. + k : integer + Number of times the array is rotated by 90 degrees. + axes : (2,) array_like + The array is rotated in the plane defined by the axes. + Axes must be different. + + Returns + ------- + y : ndarray + A rotated view of `m`. + + See Also + -------- + flip : Reverse the order of elements in an array along the given axis. + fliplr : Flip an array horizontally. + flipud : Flip an array vertically. + + Notes + ----- + ``rot90(m, k=1, axes=(1,0))`` is the reverse of + ``rot90(m, k=1, axes=(0,1))`` + + ``rot90(m, k=1, axes=(1,0))`` is equivalent to + ``rot90(m, k=-1, axes=(0,1))`` + + Examples + -------- + >>> import numpy as np + >>> m = np.array([[1,2],[3,4]], int) + >>> m + array([[1, 2], + [3, 4]]) + >>> np.rot90(m) + array([[2, 4], + [1, 3]]) + >>> np.rot90(m, 2) + array([[4, 3], + [2, 1]]) + >>> m = np.arange(8).reshape((2,2,2)) + >>> np.rot90(m, 1, (1,2)) + array([[[1, 3], + [0, 2]], + [[5, 7], + [4, 6]]]) + + """ + axes = tuple(axes) + if len(axes) != 2: + raise ValueError("len(axes) must be 2.") + + m = asanyarray(m) + + if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim: + raise ValueError("Axes must be different.") + + if (axes[0] >= m.ndim or axes[0] < -m.ndim + or axes[1] >= m.ndim or axes[1] < -m.ndim): + raise ValueError(f"Axes={axes} out of range for array of ndim={m.ndim}.") + + k %= 4 + + if k == 0: + return m[:] + if k == 2: + return flip(flip(m, axes[0]), axes[1]) + + axes_list = arange(0, m.ndim) + (axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]], + axes_list[axes[0]]) + + if k == 1: + return transpose(flip(m, axes[1]), axes_list) + else: + # k == 3 + return flip(transpose(m, axes_list), axes[1]) + + +def _flip_dispatcher(m, axis=None): + return (m,) + + +@array_function_dispatch(_flip_dispatcher) +def flip(m, axis=None): + """ + Reverse the order of elements in an array along the given axis. + + The shape of the array is preserved, but the elements are reordered. + + Parameters + ---------- + m : array_like + Input array. + axis : None or int or tuple of ints, optional + Axis or axes along which to flip over. The default, + axis=None, will flip over all of the axes of the input array. + If axis is negative it counts from the last to the first axis. + + If axis is a tuple of ints, flipping is performed on all of the axes + specified in the tuple. + + Returns + ------- + out : array_like + A view of `m` with the entries of axis reversed. Since a view is + returned, this operation is done in constant time. + + See Also + -------- + flipud : Flip an array vertically (axis=0). + fliplr : Flip an array horizontally (axis=1). + + Notes + ----- + flip(m, 0) is equivalent to flipud(m). + + flip(m, 1) is equivalent to fliplr(m). + + flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n. + + flip(m) corresponds to ``m[::-1,::-1,...,::-1]`` with ``::-1`` at all + positions. + + flip(m, (0, 1)) corresponds to ``m[::-1,::-1,...]`` with ``::-1`` at + position 0 and position 1. + + Examples + -------- + >>> import numpy as np + >>> A = np.arange(8).reshape((2,2,2)) + >>> A + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> np.flip(A, 0) + array([[[4, 5], + [6, 7]], + [[0, 1], + [2, 3]]]) + >>> np.flip(A, 1) + array([[[2, 3], + [0, 1]], + [[6, 7], + [4, 5]]]) + >>> np.flip(A) + array([[[7, 6], + [5, 4]], + [[3, 2], + [1, 0]]]) + >>> np.flip(A, (0, 2)) + array([[[5, 4], + [7, 6]], + [[1, 0], + [3, 2]]]) + >>> rng = np.random.default_rng() + >>> A = rng.normal(size=(3,4,5)) + >>> np.all(np.flip(A,2) == A[:,:,::-1,...]) + True + """ + if not hasattr(m, 'ndim'): + m = asarray(m) + if axis is None: + indexer = (np.s_[::-1],) * m.ndim + else: + axis = _nx.normalize_axis_tuple(axis, m.ndim) + indexer = [np.s_[:]] * m.ndim + for ax in axis: + indexer[ax] = np.s_[::-1] + indexer = tuple(indexer) + return m[indexer] + + +@set_module('numpy') +def iterable(y): + """ + Check whether or not an object can be iterated over. + + Parameters + ---------- + y : object + Input object. + + Returns + ------- + b : bool + Return ``True`` if the object has an iterator method or is a + sequence and ``False`` otherwise. + + + Examples + -------- + >>> import numpy as np + >>> np.iterable([1, 2, 3]) + True + >>> np.iterable(2) + False + + Notes + ----- + In most cases, the results of ``np.iterable(obj)`` are consistent with + ``isinstance(obj, collections.abc.Iterable)``. One notable exception is + the treatment of 0-dimensional arrays:: + + >>> from collections.abc import Iterable + >>> a = np.array(1.0) # 0-dimensional numpy array + >>> isinstance(a, Iterable) + True + >>> np.iterable(a) + False + + """ + try: + iter(y) + except TypeError: + return False + return True + + +def _weights_are_valid(weights, a, axis): + """Validate weights array. + + We assume, weights is not None. + """ + wgt = np.asanyarray(weights) + + # Sanity checks + if a.shape != wgt.shape: + if axis is None: + raise TypeError( + "Axis must be specified when shapes of a and weights " + "differ.") + if wgt.shape != tuple(a.shape[ax] for ax in axis): + raise ValueError( + "Shape of weights must be consistent with " + "shape of a along specified axis.") + + # setup wgt to broadcast along axis + wgt = wgt.transpose(np.argsort(axis)) + wgt = wgt.reshape(tuple((s if ax in axis else 1) + for ax, s in enumerate(a.shape))) + return wgt + + +def _average_dispatcher(a, axis=None, weights=None, returned=None, *, + keepdims=None): + return (a, weights) + + +@array_function_dispatch(_average_dispatcher) +def average(a, axis=None, weights=None, returned=False, *, + keepdims=np._NoValue): + """ + Compute the weighted average along the specified axis. + + Parameters + ---------- + a : array_like + Array containing data to be averaged. If `a` is not an array, a + conversion is attempted. + axis : None or int or tuple of ints, optional + Axis or axes along which to average `a`. The default, + `axis=None`, will average over all of the elements of the input array. + If axis is negative it counts from the last to the first axis. + If axis is a tuple of ints, averaging is performed on all of the axes + specified in the tuple instead of a single axis or all the axes as + before. + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the average according to its associated weight. + The array of weights must be the same shape as `a` if no axis is + specified, otherwise the weights must have dimensions and shape + consistent with `a` along the specified axis. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. + The calculation is:: + + avg = sum(a * weights) / sum(weights) + + where the sum is over all included elements. + The only constraint on the values of `weights` is that `sum(weights)` + must not be 0. + returned : bool, optional + Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`) + is returned, otherwise only the average is returned. + If `weights=None`, `sum_of_weights` is equivalent to the number of + elements over which the average is taken. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + *Note:* `keepdims` will not work with instances of `numpy.matrix` + or other classes whose methods do not support `keepdims`. + + .. versionadded:: 1.23.0 + + Returns + ------- + retval, [sum_of_weights] : array_type or double + Return the average along the specified axis. When `returned` is `True`, + return a tuple with the average as the first element and the sum + of the weights as the second element. `sum_of_weights` is of the + same type as `retval`. The result dtype follows a general pattern. + If `weights` is None, the result dtype will be that of `a` , or ``float64`` + if `a` is integral. Otherwise, if `weights` is not None and `a` is non- + integral, the result type will be the type of lowest precision capable of + representing values of both `a` and `weights`. If `a` happens to be + integral, the previous rules still applies but the result dtype will + at least be ``float64``. + + Raises + ------ + ZeroDivisionError + When all weights along axis are zero. See `numpy.ma.average` for a + version robust to this type of error. + TypeError + When `weights` does not have the same shape as `a`, and `axis=None`. + ValueError + When `weights` does not have dimensions and shape consistent with `a` + along specified `axis`. + + See Also + -------- + mean + + ma.average : average for masked arrays -- useful if your data contains + "missing" values + numpy.result_type : Returns the type that results from applying the + numpy type promotion rules to the arguments. + + Examples + -------- + >>> import numpy as np + >>> data = np.arange(1, 5) + >>> data + array([1, 2, 3, 4]) + >>> np.average(data) + 2.5 + >>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1)) + 4.0 + + >>> data = np.arange(6).reshape((3, 2)) + >>> data + array([[0, 1], + [2, 3], + [4, 5]]) + >>> np.average(data, axis=1, weights=[1./4, 3./4]) + array([0.75, 2.75, 4.75]) + >>> np.average(data, weights=[1./4, 3./4]) + Traceback (most recent call last): + ... + TypeError: Axis must be specified when shapes of a and weights differ. + + With ``keepdims=True``, the following result has shape (3, 1). + + >>> np.average(data, axis=1, keepdims=True) + array([[0.5], + [2.5], + [4.5]]) + + >>> data = np.arange(8).reshape((2, 2, 2)) + >>> data + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> np.average(data, axis=(0, 1), weights=[[1./4, 3./4], [1., 1./2]]) + array([3.4, 4.4]) + >>> np.average(data, axis=0, weights=[[1./4, 3./4], [1., 1./2]]) + Traceback (most recent call last): + ... + ValueError: Shape of weights must be consistent + with shape of a along specified axis. + """ + a = np.asanyarray(a) + + if axis is not None: + axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis") + + if keepdims is np._NoValue: + # Don't pass on the keepdims argument if one wasn't given. + keepdims_kw = {} + else: + keepdims_kw = {'keepdims': keepdims} + + if weights is None: + avg = a.mean(axis, **keepdims_kw) + avg_as_array = np.asanyarray(avg) + scl = avg_as_array.dtype.type(a.size / avg_as_array.size) + else: + wgt = _weights_are_valid(weights=weights, a=a, axis=axis) + + if issubclass(a.dtype.type, (np.integer, np.bool)): + result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8') + else: + result_dtype = np.result_type(a.dtype, wgt.dtype) + + scl = wgt.sum(axis=axis, dtype=result_dtype, **keepdims_kw) + if np.any(scl == 0.0): + raise ZeroDivisionError( + "Weights sum to zero, can't be normalized") + + avg = avg_as_array = np.multiply(a, wgt, + dtype=result_dtype).sum(axis, **keepdims_kw) / scl + + if returned: + if scl.shape != avg_as_array.shape: + scl = np.broadcast_to(scl, avg_as_array.shape).copy() + return avg, scl + else: + return avg + + +@set_module('numpy') +def asarray_chkfinite(a, dtype=None, order=None): + """Convert the input to an array, checking for NaNs or Infs. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists and ndarrays. Success requires no NaNs or Infs. + dtype : data-type, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F', 'A', 'K'}, optional + Memory layout. 'A' and 'K' depend on the order of input array a. + 'C' row-major (C-style), + 'F' column-major (Fortran-style) memory representation. + 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise + 'K' (keep) preserve input order + Defaults to 'C'. + + Returns + ------- + out : ndarray + Array interpretation of `a`. No copy is performed if the input + is already an ndarray. If `a` is a subclass of ndarray, a base + class ndarray is returned. + + Raises + ------ + ValueError + Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity). + + See Also + -------- + asarray : Create and array. + asanyarray : Similar function which passes through subclasses. + ascontiguousarray : Convert input to a contiguous array. + asfortranarray : Convert input to an ndarray with column-major + memory order. + fromiter : Create an array from an iterator. + fromfunction : Construct an array by executing a function on grid + positions. + + Examples + -------- + >>> import numpy as np + + Convert a list into an array. If all elements are finite, then + ``asarray_chkfinite`` is identical to ``asarray``. + + >>> a = [1, 2] + >>> np.asarray_chkfinite(a, dtype=float) + array([1., 2.]) + + Raises ValueError if array_like contains Nans or Infs. + + >>> a = [1, 2, np.inf] + >>> try: + ... np.asarray_chkfinite(a) + ... except ValueError: + ... print('ValueError') + ... + ValueError + + """ + a = asarray(a, dtype=dtype, order=order) + if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all(): + raise ValueError( + "array must not contain infs or NaNs") + return a + + +def _piecewise_dispatcher(x, condlist, funclist, *args, **kw): + yield x + # support the undocumented behavior of allowing scalars + if np.iterable(condlist): + yield from condlist + + +@array_function_dispatch(_piecewise_dispatcher) +def piecewise(x, condlist, funclist, *args, **kw): + """ + Evaluate a piecewise-defined function. + + Given a set of conditions and corresponding functions, evaluate each + function on the input data wherever its condition is true. + + Parameters + ---------- + x : ndarray or scalar + The input domain. + condlist : list of bool arrays or bool scalars + Each boolean array corresponds to a function in `funclist`. Wherever + `condlist[i]` is True, `funclist[i](x)` is used as the output value. + + Each boolean array in `condlist` selects a piece of `x`, + and should therefore be of the same shape as `x`. + + The length of `condlist` must correspond to that of `funclist`. + If one extra function is given, i.e. if + ``len(funclist) == len(condlist) + 1``, then that extra function + is the default value, used wherever all conditions are false. + funclist : list of callables, f(x,*args,**kw), or scalars + Each function is evaluated over `x` wherever its corresponding + condition is True. It should take a 1d array as input and give an 1d + array or a scalar value as output. If, instead of a callable, + a scalar is provided then a constant function (``lambda x: scalar``) is + assumed. + args : tuple, optional + Any further arguments given to `piecewise` are passed to the functions + upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then + each function is called as ``f(x, 1, 'a')``. + kw : dict, optional + Keyword arguments used in calling `piecewise` are passed to the + functions upon execution, i.e., if called + ``piecewise(..., ..., alpha=1)``, then each function is called as + ``f(x, alpha=1)``. + + Returns + ------- + out : ndarray + The output is the same shape and type as x and is found by + calling the functions in `funclist` on the appropriate portions of `x`, + as defined by the boolean arrays in `condlist`. Portions not covered + by any condition have a default value of 0. + + + See Also + -------- + choose, select, where + + Notes + ----- + This is similar to choose or select, except that functions are + evaluated on elements of `x` that satisfy the corresponding condition from + `condlist`. + + The result is:: + + |-- + |funclist[0](x[condlist[0]]) + out = |funclist[1](x[condlist[1]]) + |... + |funclist[n2](x[condlist[n2]]) + |-- + + Examples + -------- + >>> import numpy as np + + Define the signum function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. + + >>> x = np.linspace(-2.5, 2.5, 6) + >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1]) + array([-1., -1., -1., 1., 1., 1.]) + + Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for + ``x >= 0``. + + >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x]) + array([2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) + + Apply the same function to a scalar value. + + >>> y = -2 + >>> np.piecewise(y, [y < 0, y >= 0], [lambda x: -x, lambda x: x]) + array(2) + + """ + x = asanyarray(x) + n2 = len(funclist) + + # undocumented: single condition is promoted to a list of one condition + if isscalar(condlist) or ( + not isinstance(condlist[0], (list, ndarray)) and x.ndim != 0): + condlist = [condlist] + + condlist = asarray(condlist, dtype=bool) + n = len(condlist) + + if n == n2 - 1: # compute the "otherwise" condition. + condelse = ~np.any(condlist, axis=0, keepdims=True) + condlist = np.concatenate([condlist, condelse], axis=0) + n += 1 + elif n != n2: + raise ValueError( + f"with {n} condition(s), either {n} or {n + 1} functions are expected" + ) + + y = zeros_like(x) + for cond, func in zip(condlist, funclist): + if not isinstance(func, collections.abc.Callable): + y[cond] = func + else: + vals = x[cond] + if vals.size > 0: + y[cond] = func(vals, *args, **kw) + + return y + + +def _select_dispatcher(condlist, choicelist, default=None): + yield from condlist + yield from choicelist + + +@array_function_dispatch(_select_dispatcher) +def select(condlist, choicelist, default=0): + """ + Return an array drawn from elements in choicelist, depending on conditions. + + Parameters + ---------- + condlist : list of bool ndarrays + The list of conditions which determine from which array in `choicelist` + the output elements are taken. When multiple conditions are satisfied, + the first one encountered in `condlist` is used. + choicelist : list of ndarrays + The list of arrays from which the output elements are taken. It has + to be of the same length as `condlist`. + default : scalar, optional + The element inserted in `output` when all conditions evaluate to False. + + Returns + ------- + output : ndarray + The output at position m is the m-th element of the array in + `choicelist` where the m-th element of the corresponding array in + `condlist` is True. + + See Also + -------- + where : Return elements from one of two arrays depending on condition. + take, choose, compress, diag, diagonal + + Examples + -------- + >>> import numpy as np + + Beginning with an array of integers from 0 to 5 (inclusive), + elements less than ``3`` are negated, elements greater than ``3`` + are squared, and elements not meeting either of these conditions + (exactly ``3``) are replaced with a `default` value of ``42``. + + >>> x = np.arange(6) + >>> condlist = [x<3, x>3] + >>> choicelist = [-x, x**2] + >>> np.select(condlist, choicelist, 42) + array([ 0, -1, -2, 42, 16, 25]) + + When multiple conditions are satisfied, the first one encountered in + `condlist` is used. + + >>> condlist = [x<=4, x>3] + >>> choicelist = [x, x**2] + >>> np.select(condlist, choicelist, 55) + array([ 0, 1, 2, 3, 4, 25]) + + """ + # Check the size of condlist and choicelist are the same, or abort. + if len(condlist) != len(choicelist): + raise ValueError( + 'list of cases must be same length as list of conditions') + + # Now that the dtype is known, handle the deprecated select([], []) case + if len(condlist) == 0: + raise ValueError("select with an empty condition list is not possible") + + # TODO: This preserves the Python int, float, complex manually to get the + # right `result_type` with NEP 50. Most likely we will grow a better + # way to spell this (and this can be replaced). + choicelist = [ + choice if type(choice) in (int, float, complex) else np.asarray(choice) + for choice in choicelist] + choicelist.append(default if type(default) in (int, float, complex) + else np.asarray(default)) + + try: + dtype = np.result_type(*choicelist) + except TypeError as e: + msg = f'Choicelist and default value do not have a common dtype: {e}' + raise TypeError(msg) from None + + # Convert conditions to arrays and broadcast conditions and choices + # as the shape is needed for the result. Doing it separately optimizes + # for example when all choices are scalars. + condlist = np.broadcast_arrays(*condlist) + choicelist = np.broadcast_arrays(*choicelist) + + # If cond array is not an ndarray in boolean format or scalar bool, abort. + for i, cond in enumerate(condlist): + if cond.dtype.type is not np.bool: + raise TypeError( + f'invalid entry {i} in condlist: should be boolean ndarray') + + if choicelist[0].ndim == 0: + # This may be common, so avoid the call. + result_shape = condlist[0].shape + else: + result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape + + result = np.full(result_shape, choicelist[-1], dtype) + + # Use np.copyto to burn each choicelist array onto result, using the + # corresponding condlist as a boolean mask. This is done in reverse + # order since the first choice should take precedence. + choicelist = choicelist[-2::-1] + condlist = condlist[::-1] + for choice, cond in zip(choicelist, condlist): + np.copyto(result, choice, where=cond) + + return result + + +def _copy_dispatcher(a, order=None, subok=None): + return (a,) + + +@array_function_dispatch(_copy_dispatcher) +def copy(a, order='K', subok=False): + """ + Return an array copy of the given object. + + Parameters + ---------- + a : array_like + Input data. + order : {'C', 'F', 'A', 'K'}, optional + Controls the memory layout of the copy. 'C' means C-order, + 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, + 'C' otherwise. 'K' means match the layout of `a` as closely + as possible. (Note that this function and :meth:`ndarray.copy` are very + similar, but have different default values for their order= + arguments.) + subok : bool, optional + If True, then sub-classes will be passed-through, otherwise the + returned array will be forced to be a base-class array (defaults to False). + + Returns + ------- + arr : ndarray + Array interpretation of `a`. + + See Also + -------- + ndarray.copy : Preferred method for creating an array copy + + Notes + ----- + This is equivalent to: + + >>> np.array(a, copy=True) #doctest: +SKIP + + The copy made of the data is shallow, i.e., for arrays with object dtype, + the new array will point to the same objects. + See Examples from `ndarray.copy`. + + Examples + -------- + >>> import numpy as np + + Create an array x, with a reference y and a copy z: + + >>> x = np.array([1, 2, 3]) + >>> y = x + >>> z = np.copy(x) + + Note that, when we modify x, y changes, but not z: + + >>> x[0] = 10 + >>> x[0] == y[0] + True + >>> x[0] == z[0] + False + + Note that, np.copy clears previously set WRITEABLE=False flag. + + >>> a = np.array([1, 2, 3]) + >>> a.flags["WRITEABLE"] = False + >>> b = np.copy(a) + >>> b.flags["WRITEABLE"] + True + >>> b[0] = 3 + >>> b + array([3, 2, 3]) + """ + return array(a, order=order, subok=subok, copy=True) + +# Basic operations + + +def _gradient_dispatcher(f, *varargs, axis=None, edge_order=None): + yield f + yield from varargs + + +@array_function_dispatch(_gradient_dispatcher) +def gradient(f, *varargs, axis=None, edge_order=1): + """ + Return the gradient of an N-dimensional array. + + The gradient is computed using second order accurate central differences + in the interior points and either first or second order accurate one-sides + (forward or backwards) differences at the boundaries. + The returned gradient hence has the same shape as the input array. + + Parameters + ---------- + f : array_like + An N-dimensional array containing samples of a scalar function. + varargs : list of scalar or array, optional + Spacing between f values. Default unitary spacing for all dimensions. + Spacing can be specified using: + + 1. single scalar to specify a sample distance for all dimensions. + 2. N scalars to specify a constant sample distance for each dimension. + i.e. `dx`, `dy`, `dz`, ... + 3. N arrays to specify the coordinates of the values along each + dimension of F. The length of the array must match the size of + the corresponding dimension + 4. Any combination of N scalars/arrays with the meaning of 2. and 3. + + If `axis` is given, the number of varargs must equal the number of axes + specified in the axis parameter. + Default: 1. (see Examples below). + + edge_order : {1, 2}, optional + Gradient is calculated using N-th order accurate differences + at the boundaries. Default: 1. + axis : None or int or tuple of ints, optional + Gradient is calculated only along the given axis or axes + The default (axis = None) is to calculate the gradient for all the axes + of the input array. axis may be negative, in which case it counts from + the last to the first axis. + + Returns + ------- + gradient : ndarray or tuple of ndarray + A tuple of ndarrays (or a single ndarray if there is only one + dimension) corresponding to the derivatives of f with respect + to each dimension. Each derivative has the same shape as f. + + Examples + -------- + >>> import numpy as np + >>> f = np.array([1, 2, 4, 7, 11, 16]) + >>> np.gradient(f) + array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) + >>> np.gradient(f, 2) + array([0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) + + Spacing can be also specified with an array that represents the coordinates + of the values F along the dimensions. + For instance a uniform spacing: + + >>> x = np.arange(f.size) + >>> np.gradient(f, x) + array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) + + Or a non uniform one: + + >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.]) + >>> np.gradient(f, x) + array([1. , 3. , 3.5, 6.7, 6.9, 2.5]) + + For two dimensional arrays, the return will be two arrays ordered by + axis. In this example the first array stands for the gradient in + rows and the second one in columns direction: + + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]])) + (array([[ 2., 2., -1.], + [ 2., 2., -1.]]), + array([[1. , 2.5, 4. ], + [1. , 1. , 1. ]])) + + In this example the spacing is also specified: + uniform for axis=0 and non uniform for axis=1 + + >>> dx = 2. + >>> y = [1., 1.5, 3.5] + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]]), dx, y) + (array([[ 1. , 1. , -0.5], + [ 1. , 1. , -0.5]]), + array([[2. , 2. , 2. ], + [2. , 1.7, 0.5]])) + + It is possible to specify how boundaries are treated using `edge_order` + + >>> x = np.array([0, 1, 2, 3, 4]) + >>> f = x**2 + >>> np.gradient(f, edge_order=1) + array([1., 2., 4., 6., 7.]) + >>> np.gradient(f, edge_order=2) + array([0., 2., 4., 6., 8.]) + + The `axis` keyword can be used to specify a subset of axes of which the + gradient is calculated + + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]]), axis=0) + array([[ 2., 2., -1.], + [ 2., 2., -1.]]) + + The `varargs` argument defines the spacing between sample points in the + input array. It can take two forms: + + 1. An array, specifying coordinates, which may be unevenly spaced: + + >>> x = np.array([0., 2., 3., 6., 8.]) + >>> y = x ** 2 + >>> np.gradient(y, x, edge_order=2) + array([ 0., 4., 6., 12., 16.]) + + 2. A scalar, representing the fixed sample distance: + + >>> dx = 2 + >>> x = np.array([0., 2., 4., 6., 8.]) + >>> y = x ** 2 + >>> np.gradient(y, dx, edge_order=2) + array([ 0., 4., 8., 12., 16.]) + + It's possible to provide different data for spacing along each dimension. + The number of arguments must match the number of dimensions in the input + data. + + >>> dx = 2 + >>> dy = 3 + >>> x = np.arange(0, 6, dx) + >>> y = np.arange(0, 9, dy) + >>> xs, ys = np.meshgrid(x, y) + >>> zs = xs + 2 * ys + >>> np.gradient(zs, dy, dx) # Passing two scalars + (array([[2., 2., 2.], + [2., 2., 2.], + [2., 2., 2.]]), + array([[1., 1., 1.], + [1., 1., 1.], + [1., 1., 1.]])) + + Mixing scalars and arrays is also allowed: + + >>> np.gradient(zs, y, dx) # Passing one array and one scalar + (array([[2., 2., 2.], + [2., 2., 2.], + [2., 2., 2.]]), + array([[1., 1., 1.], + [1., 1., 1.], + [1., 1., 1.]])) + + Notes + ----- + Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continuous + derivatives) and let :math:`h_{*}` be a non-homogeneous stepsize, we + minimize the "consistency error" :math:`\\eta_{i}` between the true gradient + and its estimate from a linear combination of the neighboring grid-points: + + .. math:: + + \\eta_{i} = f_{i}^{\\left(1\\right)} - + \\left[ \\alpha f\\left(x_{i}\\right) + + \\beta f\\left(x_{i} + h_{d}\\right) + + \\gamma f\\left(x_{i}-h_{s}\\right) + \\right] + + By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})` + with their Taylor series expansion, this translates into solving + the following the linear system: + + .. math:: + + \\left\\{ + \\begin{array}{r} + \\alpha+\\beta+\\gamma=0 \\\\ + \\beta h_{d}-\\gamma h_{s}=1 \\\\ + \\beta h_{d}^{2}+\\gamma h_{s}^{2}=0 + \\end{array} + \\right. + + The resulting approximation of :math:`f_{i}^{(1)}` is the following: + + .. math:: + + \\hat f_{i}^{(1)} = + \\frac{ + h_{s}^{2}f\\left(x_{i} + h_{d}\\right) + + \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right) + - h_{d}^{2}f\\left(x_{i}-h_{s}\\right)} + { h_{s}h_{d}\\left(h_{d} + h_{s}\\right)} + + \\mathcal{O}\\left(\\frac{h_{d}h_{s}^{2} + + h_{s}h_{d}^{2}}{h_{d} + + h_{s}}\\right) + + It is worth noting that if :math:`h_{s}=h_{d}` + (i.e., data are evenly spaced) + we find the standard second order approximation: + + .. math:: + + \\hat f_{i}^{(1)}= + \\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h} + + \\mathcal{O}\\left(h^{2}\\right) + + With a similar procedure the forward/backward approximations used for + boundaries can be derived. + + References + ---------- + .. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics + (Texts in Applied Mathematics). New York: Springer. + .. [2] Durran D. R. (1999) Numerical Methods for Wave Equations + in Geophysical Fluid Dynamics. New York: Springer. + .. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on + Arbitrarily Spaced Grids, + Mathematics of Computation 51, no. 184 : 699-706. + `PDF `_. + """ + f = np.asanyarray(f) + N = f.ndim # number of dimensions + + if axis is None: + axes = tuple(range(N)) + else: + axes = _nx.normalize_axis_tuple(axis, N) + + len_axes = len(axes) + n = len(varargs) + if n == 0: + # no spacing argument - use 1 in all axes + dx = [1.0] * len_axes + elif n == 1 and np.ndim(varargs[0]) == 0: + # single scalar for all axes + dx = varargs * len_axes + elif n == len_axes: + # scalar or 1d array for each axis + dx = list(varargs) + for i, distances in enumerate(dx): + distances = np.asanyarray(distances) + if distances.ndim == 0: + continue + elif distances.ndim != 1: + raise ValueError("distances must be either scalars or 1d") + if len(distances) != f.shape[axes[i]]: + raise ValueError("when 1d, distances must match " + "the length of the corresponding dimension") + if np.issubdtype(distances.dtype, np.integer): + # Convert numpy integer types to float64 to avoid modular + # arithmetic in np.diff(distances). + distances = distances.astype(np.float64) + diffx = np.diff(distances) + # if distances are constant reduce to the scalar case + # since it brings a consistent speedup + if (diffx == diffx[0]).all(): + diffx = diffx[0] + dx[i] = diffx + else: + raise TypeError("invalid number of arguments") + + if edge_order > 2: + raise ValueError("'edge_order' greater than 2 not supported") + + # use central differences on interior and one-sided differences on the + # endpoints. This preserves second order-accuracy over the full domain. + + outvals = [] + + # create slice objects --- initially all are [:, :, ..., :] + slice1 = [slice(None)] * N + slice2 = [slice(None)] * N + slice3 = [slice(None)] * N + slice4 = [slice(None)] * N + + otype = f.dtype + if otype.type is np.datetime64: + # the timedelta dtype with the same unit information + otype = np.dtype(otype.name.replace('datetime', 'timedelta')) + # view as timedelta to allow addition + f = f.view(otype) + elif otype.type is np.timedelta64: + pass + elif np.issubdtype(otype, np.inexact): + pass + else: + # All other types convert to floating point. + # First check if f is a numpy integer type; if so, convert f to float64 + # to avoid modular arithmetic when computing the changes in f. + if np.issubdtype(otype, np.integer): + f = f.astype(np.float64) + otype = np.float64 + + for axis, ax_dx in zip(axes, dx): + if f.shape[axis] < edge_order + 1: + raise ValueError( + "Shape of array too small to calculate a numerical gradient, " + "at least (edge_order + 1) elements are required.") + # result allocation + out = np.empty_like(f, dtype=otype) + + # spacing for the current axis + uniform_spacing = np.ndim(ax_dx) == 0 + + # Numerical differentiation: 2nd order interior + slice1[axis] = slice(1, -1) + slice2[axis] = slice(None, -2) + slice3[axis] = slice(1, -1) + slice4[axis] = slice(2, None) + + if uniform_spacing: + out[tuple(slice1)] = (f[tuple(slice4)] - f[tuple(slice2)]) / (2. * ax_dx) + else: + dx1 = ax_dx[0:-1] + dx2 = ax_dx[1:] + a = -(dx2) / (dx1 * (dx1 + dx2)) + b = (dx2 - dx1) / (dx1 * dx2) + c = dx1 / (dx2 * (dx1 + dx2)) + # fix the shape for broadcasting + shape = np.ones(N, dtype=int) + shape[axis] = -1 + a.shape = b.shape = c.shape = shape + # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] \ + + c * f[tuple(slice4)] + + # Numerical differentiation: 1st order edges + if edge_order == 1: + slice1[axis] = 0 + slice2[axis] = 1 + slice3[axis] = 0 + dx_0 = ax_dx if uniform_spacing else ax_dx[0] + # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0]) + out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_0 + + slice1[axis] = -1 + slice2[axis] = -1 + slice3[axis] = -2 + dx_n = ax_dx if uniform_spacing else ax_dx[-1] + # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2]) + out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_n + + # Numerical differentiation: 2nd order edges + else: + slice1[axis] = 0 + slice2[axis] = 0 + slice3[axis] = 1 + slice4[axis] = 2 + if uniform_spacing: + a = -1.5 / ax_dx + b = 2. / ax_dx + c = -0.5 / ax_dx + else: + dx1 = ax_dx[0] + dx2 = ax_dx[1] + a = -(2. * dx1 + dx2) / (dx1 * (dx1 + dx2)) + b = (dx1 + dx2) / (dx1 * dx2) + c = - dx1 / (dx2 * (dx1 + dx2)) + # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] \ + + c * f[tuple(slice4)] + + slice1[axis] = -1 + slice2[axis] = -3 + slice3[axis] = -2 + slice4[axis] = -1 + if uniform_spacing: + a = 0.5 / ax_dx + b = -2. / ax_dx + c = 1.5 / ax_dx + else: + dx1 = ax_dx[-2] + dx2 = ax_dx[-1] + a = (dx2) / (dx1 * (dx1 + dx2)) + b = - (dx2 + dx1) / (dx1 * dx2) + c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2)) + # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1] + out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] \ + + c * f[tuple(slice4)] + + outvals.append(out) + + # reset the slice object in this dimension to ":" + slice1[axis] = slice(None) + slice2[axis] = slice(None) + slice3[axis] = slice(None) + slice4[axis] = slice(None) + + if len_axes == 1: + return outvals[0] + return tuple(outvals) + + +def _diff_dispatcher(a, n=None, axis=None, prepend=None, append=None): + return (a, prepend, append) + + +@array_function_dispatch(_diff_dispatcher) +def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): + """ + Calculate the n-th discrete difference along the given axis. + + The first difference is given by ``out[i] = a[i+1] - a[i]`` along + the given axis, higher differences are calculated by using `diff` + recursively. + + Parameters + ---------- + a : array_like + Input array + n : int, optional + The number of times values are differenced. If zero, the input + is returned as-is. + axis : int, optional + The axis along which the difference is taken, default is the + last axis. + prepend, append : array_like, optional + Values to prepend or append to `a` along axis prior to + performing the difference. Scalar values are expanded to + arrays with length 1 in the direction of axis and the shape + of the input array in along all other axes. Otherwise the + dimension and shape must match `a` except along axis. + + Returns + ------- + diff : ndarray + The n-th differences. The shape of the output is the same as `a` + except along `axis` where the dimension is smaller by `n`. The + type of the output is the same as the type of the difference + between any two elements of `a`. This is the same as the type of + `a` in most cases. A notable exception is `datetime64`, which + results in a `timedelta64` output array. + + See Also + -------- + gradient, ediff1d, cumsum + + Notes + ----- + Type is preserved for boolean arrays, so the result will contain + `False` when consecutive elements are the same and `True` when they + differ. + + For unsigned integer arrays, the results will also be unsigned. This + should not be surprising, as the result is consistent with + calculating the difference directly: + + >>> u8_arr = np.array([1, 0], dtype=np.uint8) + >>> np.diff(u8_arr) + array([255], dtype=uint8) + >>> u8_arr[1,...] - u8_arr[0,...] + np.uint8(255) + + If this is not desirable, then the array should be cast to a larger + integer type first: + + >>> i16_arr = u8_arr.astype(np.int16) + >>> np.diff(i16_arr) + array([-1], dtype=int16) + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 4, 7, 0]) + >>> np.diff(x) + array([ 1, 2, 3, -7]) + >>> np.diff(x, n=2) + array([ 1, 1, -10]) + + >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) + >>> np.diff(x) + array([[2, 3, 4], + [5, 1, 2]]) + >>> np.diff(x, axis=0) + array([[-1, 2, 0, -2]]) + + >>> x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64) + >>> np.diff(x) + array([1, 1], dtype='timedelta64[D]') + + """ + if n == 0: + return a + if n < 0: + raise ValueError( + "order must be non-negative but got " + repr(n)) + + a = asanyarray(a) + nd = a.ndim + if nd == 0: + raise ValueError("diff requires input that is at least one dimensional") + axis = normalize_axis_index(axis, nd) + + combined = [] + if prepend is not np._NoValue: + prepend = np.asanyarray(prepend) + if prepend.ndim == 0: + shape = list(a.shape) + shape[axis] = 1 + prepend = np.broadcast_to(prepend, tuple(shape)) + combined.append(prepend) + + combined.append(a) + + if append is not np._NoValue: + append = np.asanyarray(append) + if append.ndim == 0: + shape = list(a.shape) + shape[axis] = 1 + append = np.broadcast_to(append, tuple(shape)) + combined.append(append) + + if len(combined) > 1: + a = np.concatenate(combined, axis) + + slice1 = [slice(None)] * nd + slice2 = [slice(None)] * nd + slice1[axis] = slice(1, None) + slice2[axis] = slice(None, -1) + slice1 = tuple(slice1) + slice2 = tuple(slice2) + + op = not_equal if a.dtype == np.bool else subtract + for _ in range(n): + a = op(a[slice1], a[slice2]) + + return a + + +def _interp_dispatcher(x, xp, fp, left=None, right=None, period=None): + return (x, xp, fp) + + +@array_function_dispatch(_interp_dispatcher) +def interp(x, xp, fp, left=None, right=None, period=None): + """ + One-dimensional linear interpolation for monotonically increasing sample points. + + Returns the one-dimensional piecewise linear interpolant to a function + with given discrete data points (`xp`, `fp`), evaluated at `x`. + + Parameters + ---------- + x : array_like + The x-coordinates at which to evaluate the interpolated values. + + xp : 1-D sequence of floats + The x-coordinates of the data points, must be increasing if argument + `period` is not specified. Otherwise, `xp` is internally sorted after + normalizing the periodic boundaries with ``xp = xp % period``. + + fp : 1-D sequence of float or complex + The y-coordinates of the data points, same length as `xp`. + + left : optional float or complex corresponding to fp + Value to return for `x < xp[0]`, default is `fp[0]`. + + right : optional float or complex corresponding to fp + Value to return for `x > xp[-1]`, default is `fp[-1]`. + + period : None or float, optional + A period for the x-coordinates. This parameter allows the proper + interpolation of angular x-coordinates. Parameters `left` and `right` + are ignored if `period` is specified. + + Returns + ------- + y : float or complex (corresponding to fp) or ndarray + The interpolated values, same shape as `x`. + + Raises + ------ + ValueError + If `xp` and `fp` have different length + If `xp` or `fp` are not 1-D sequences + If `period == 0` + + See Also + -------- + scipy.interpolate + + Warnings + -------- + The x-coordinate sequence is expected to be increasing, but this is not + explicitly enforced. However, if the sequence `xp` is non-increasing, + interpolation results are meaningless. + + Note that, since NaN is unsortable, `xp` also cannot contain NaNs. + + A simple check for `xp` being strictly increasing is:: + + np.all(np.diff(xp) > 0) + + Examples + -------- + >>> import numpy as np + >>> xp = [1, 2, 3] + >>> fp = [3, 2, 0] + >>> np.interp(2.5, xp, fp) + 1.0 + >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp) + array([3. , 3. , 2.5 , 0.56, 0. ]) + >>> UNDEF = -99.0 + >>> np.interp(3.14, xp, fp, right=UNDEF) + -99.0 + + Plot an interpolant to the sine function: + + >>> x = np.linspace(0, 2*np.pi, 10) + >>> y = np.sin(x) + >>> xvals = np.linspace(0, 2*np.pi, 50) + >>> yinterp = np.interp(xvals, x, y) + >>> import matplotlib.pyplot as plt + >>> plt.plot(x, y, 'o') + [] + >>> plt.plot(xvals, yinterp, '-x') + [] + >>> plt.show() + + Interpolation with periodic x-coordinates: + + >>> x = [-180, -170, -185, 185, -10, -5, 0, 365] + >>> xp = [190, -190, 350, -350] + >>> fp = [5, 10, 3, 4] + >>> np.interp(x, xp, fp, period=360) + array([7.5 , 5. , 8.75, 6.25, 3. , 3.25, 3.5 , 3.75]) + + Complex interpolation: + + >>> x = [1.5, 4.0] + >>> xp = [2,3,5] + >>> fp = [1.0j, 0, 2+3j] + >>> np.interp(x, xp, fp) + array([0.+1.j , 1.+1.5j]) + + """ + + fp = np.asarray(fp) + + if np.iscomplexobj(fp): + interp_func = compiled_interp_complex + input_dtype = np.complex128 + else: + interp_func = compiled_interp + input_dtype = np.float64 + + if period is not None: + if period == 0: + raise ValueError("period must be a non-zero value") + period = abs(period) + left = None + right = None + + x = np.asarray(x, dtype=np.float64) + xp = np.asarray(xp, dtype=np.float64) + fp = np.asarray(fp, dtype=input_dtype) + + if xp.ndim != 1 or fp.ndim != 1: + raise ValueError("Data points must be 1-D sequences") + if xp.shape[0] != fp.shape[0]: + raise ValueError("fp and xp are not of the same length") + # normalizing periodic boundaries + x = x % period + xp = xp % period + asort_xp = np.argsort(xp) + xp = xp[asort_xp] + fp = fp[asort_xp] + xp = np.concatenate((xp[-1:] - period, xp, xp[0:1] + period)) + fp = np.concatenate((fp[-1:], fp, fp[0:1])) + + return interp_func(x, xp, fp, left, right) + + +def _angle_dispatcher(z, deg=None): + return (z,) + + +@array_function_dispatch(_angle_dispatcher) +def angle(z, deg=False): + """ + Return the angle of the complex argument. + + Parameters + ---------- + z : array_like + A complex number or sequence of complex numbers. + deg : bool, optional + Return angle in degrees if True, radians if False (default). + + Returns + ------- + angle : ndarray or scalar + The counterclockwise angle from the positive real axis on the complex + plane in the range ``(-pi, pi]``, with dtype as numpy.float64. + + See Also + -------- + arctan2 + absolute + + Notes + ----- + This function passes the imaginary and real parts of the argument to + `arctan2` to compute the result; consequently, it follows the convention + of `arctan2` when the magnitude of the argument is zero. See example. + + Examples + -------- + >>> import numpy as np + >>> np.angle([1.0, 1.0j, 1+1j]) # in radians + array([ 0. , 1.57079633, 0.78539816]) # may vary + >>> np.angle(1+1j, deg=True) # in degrees + 45.0 + >>> np.angle([0., -0., complex(0., -0.), complex(-0., -0.)]) # convention + array([ 0. , 3.14159265, -0. , -3.14159265]) + + """ + z = asanyarray(z) + if issubclass(z.dtype.type, _nx.complexfloating): + zimag = z.imag + zreal = z.real + else: + zimag = 0 + zreal = z + + a = arctan2(zimag, zreal) + if deg: + a *= 180 / pi + return a + + +def _unwrap_dispatcher(p, discont=None, axis=None, *, period=None): + return (p,) + + +@array_function_dispatch(_unwrap_dispatcher) +def unwrap(p, discont=None, axis=-1, *, period=2 * pi): + r""" + Unwrap by taking the complement of large deltas with respect to the period. + + This unwraps a signal `p` by changing elements which have an absolute + difference from their predecessor of more than ``max(discont, period/2)`` + to their `period`-complementary values. + + For the default case where `period` is :math:`2\pi` and `discont` is + :math:`\pi`, this unwraps a radian phase `p` such that adjacent differences + are never greater than :math:`\pi` by adding :math:`2k\pi` for some + integer :math:`k`. + + Parameters + ---------- + p : array_like + Input array. + discont : float, optional + Maximum discontinuity between values, default is ``period/2``. + Values below ``period/2`` are treated as if they were ``period/2``. + To have an effect different from the default, `discont` should be + larger than ``period/2``. + axis : int, optional + Axis along which unwrap will operate, default is the last axis. + period : float, optional + Size of the range over which the input wraps. By default, it is + ``2 pi``. + + .. versionadded:: 1.21.0 + + Returns + ------- + out : ndarray + Output array. + + See Also + -------- + rad2deg, deg2rad + + Notes + ----- + If the discontinuity in `p` is smaller than ``period/2``, + but larger than `discont`, no unwrapping is done because taking + the complement would only make the discontinuity larger. + + Examples + -------- + >>> import numpy as np + >>> phase = np.linspace(0, np.pi, num=5) + >>> phase[3:] += np.pi + >>> phase + array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) # may vary + >>> np.unwrap(phase) + array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) # may vary + >>> np.unwrap([0, 1, 2, -1, 0], period=4) + array([0, 1, 2, 3, 4]) + >>> np.unwrap([ 1, 2, 3, 4, 5, 6, 1, 2, 3], period=6) + array([1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> np.unwrap([2, 3, 4, 5, 2, 3, 4, 5], period=4) + array([2, 3, 4, 5, 6, 7, 8, 9]) + >>> phase_deg = np.mod(np.linspace(0 ,720, 19), 360) - 180 + >>> np.unwrap(phase_deg, period=360) + array([-180., -140., -100., -60., -20., 20., 60., 100., 140., + 180., 220., 260., 300., 340., 380., 420., 460., 500., + 540.]) + """ + p = asarray(p) + nd = p.ndim + dd = diff(p, axis=axis) + if discont is None: + discont = period / 2 + slice1 = [slice(None, None)] * nd # full slices + slice1[axis] = slice(1, None) + slice1 = tuple(slice1) + dtype = np.result_type(dd, period) + if _nx.issubdtype(dtype, _nx.integer): + interval_high, rem = divmod(period, 2) + boundary_ambiguous = rem == 0 + else: + interval_high = period / 2 + boundary_ambiguous = True + interval_low = -interval_high + ddmod = mod(dd - interval_low, period) + interval_low + if boundary_ambiguous: + # for `mask = (abs(dd) == period/2)`, the above line made + # `ddmod[mask] == -period/2`. correct these such that + # `ddmod[mask] == sign(dd[mask])*period/2`. + _nx.copyto(ddmod, interval_high, + where=(ddmod == interval_low) & (dd > 0)) + ph_correct = ddmod - dd + _nx.copyto(ph_correct, 0, where=abs(dd) < discont) + up = array(p, copy=True, dtype=dtype) + up[slice1] = p[slice1] + ph_correct.cumsum(axis) + return up + + +def _sort_complex(a): + return (a,) + + +@array_function_dispatch(_sort_complex) +def sort_complex(a): + """ + Sort a complex array using the real part first, then the imaginary part. + + Parameters + ---------- + a : array_like + Input array + + Returns + ------- + out : complex ndarray + Always returns a sorted complex array. + + Examples + -------- + >>> import numpy as np + >>> np.sort_complex([5, 3, 6, 2, 1]) + array([1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) + + >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) + array([1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) + + """ + b = array(a, copy=True) + b.sort() + if not issubclass(b.dtype.type, _nx.complexfloating): + if b.dtype.char in 'bhBH': + return b.astype('F') + elif b.dtype.char == 'g': + return b.astype('G') + else: + return b.astype('D') + else: + return b + + +def _arg_trim_zeros(filt): + """Return indices of the first and last non-zero element. + + Parameters + ---------- + filt : array_like + Input array. + + Returns + ------- + start, stop : ndarray + Two arrays containing the indices of the first and last non-zero + element in each dimension. + + See also + -------- + trim_zeros + + Examples + -------- + >>> import numpy as np + >>> _arg_trim_zeros(np.array([0, 0, 1, 1, 0])) + (array([2]), array([3])) + """ + nonzero = ( + np.argwhere(filt) + if filt.dtype != np.object_ + # Historically, `trim_zeros` treats `None` in an object array + # as non-zero while argwhere doesn't, account for that + else np.argwhere(filt != 0) + ) + if nonzero.size == 0: + start = stop = np.array([], dtype=np.intp) + else: + start = nonzero.min(axis=0) + stop = nonzero.max(axis=0) + return start, stop + + +def _trim_zeros(filt, trim=None, axis=None): + return (filt,) + + +@array_function_dispatch(_trim_zeros) +def trim_zeros(filt, trim='fb', axis=None): + """Remove values along a dimension which are zero along all other. + + Parameters + ---------- + filt : array_like + Input array. + trim : {"fb", "f", "b"}, optional + A string with 'f' representing trim from front and 'b' to trim from + back. By default, zeros are trimmed on both sides. + Front and back refer to the edges of a dimension, with "front" referring + to the side with the lowest index 0, and "back" referring to the highest + index (or index -1). + axis : int or sequence, optional + If None, `filt` is cropped such that the smallest bounding box is + returned that still contains all values which are not zero. + If an axis is specified, `filt` will be sliced in that dimension only + on the sides specified by `trim`. The remaining area will be the + smallest that still contains all values wich are not zero. + + .. versionadded:: 2.2.0 + + Returns + ------- + trimmed : ndarray or sequence + The result of trimming the input. The number of dimensions and the + input data type are preserved. + + Notes + ----- + For all-zero arrays, the first axis is trimmed first. + + Examples + -------- + >>> import numpy as np + >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)) + >>> np.trim_zeros(a) + array([1, 2, 3, 0, 2, 1]) + + >>> np.trim_zeros(a, trim='b') + array([0, 0, 0, ..., 0, 2, 1]) + + Multiple dimensions are supported. + + >>> b = np.array([[0, 0, 2, 3, 0, 0], + ... [0, 1, 0, 3, 0, 0], + ... [0, 0, 0, 0, 0, 0]]) + >>> np.trim_zeros(b) + array([[0, 2, 3], + [1, 0, 3]]) + + >>> np.trim_zeros(b, axis=-1) + array([[0, 2, 3], + [1, 0, 3], + [0, 0, 0]]) + + The input data type is preserved, list/tuple in means list/tuple out. + + >>> np.trim_zeros([0, 1, 2, 0]) + [1, 2] + + """ + filt_ = np.asarray(filt) + + trim = trim.lower() + if trim not in {"fb", "bf", "f", "b"}: + raise ValueError(f"unexpected character(s) in `trim`: {trim!r}") + + start, stop = _arg_trim_zeros(filt_) + stop += 1 # Adjust for slicing + + if start.size == 0: + # filt is all-zero -> assign same values to start and stop so that + # resulting slice will be empty + start = stop = np.zeros(filt_.ndim, dtype=np.intp) + else: + if 'f' not in trim: + start = (None,) * filt_.ndim + if 'b' not in trim: + stop = (None,) * filt_.ndim + + if len(start) == 1: + # filt is 1D -> don't use multi-dimensional slicing to preserve + # non-array input types + sl = slice(start[0], stop[0]) + elif axis is None: + # trim all axes + sl = tuple(slice(*x) for x in zip(start, stop)) + else: + # only trim single axis + axis = normalize_axis_index(axis, filt_.ndim) + sl = (slice(None),) * axis + (slice(start[axis], stop[axis]),) + (...,) + + trimmed = filt[sl] + return trimmed + + +def _extract_dispatcher(condition, arr): + return (condition, arr) + + +@array_function_dispatch(_extract_dispatcher) +def extract(condition, arr): + """ + Return the elements of an array that satisfy some condition. + + This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If + `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``. + + Note that `place` does the exact opposite of `extract`. + + Parameters + ---------- + condition : array_like + An array whose nonzero or True entries indicate the elements of `arr` + to extract. + arr : array_like + Input array of the same size as `condition`. + + Returns + ------- + extract : ndarray + Rank 1 array of values from `arr` where `condition` is True. + + See Also + -------- + take, put, copyto, compress, place + + Examples + -------- + >>> import numpy as np + >>> arr = np.arange(12).reshape((3, 4)) + >>> arr + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> condition = np.mod(arr, 3)==0 + >>> condition + array([[ True, False, False, True], + [False, False, True, False], + [False, True, False, False]]) + >>> np.extract(condition, arr) + array([0, 3, 6, 9]) + + + If `condition` is boolean: + + >>> arr[condition] + array([0, 3, 6, 9]) + + """ + return _nx.take(ravel(arr), nonzero(ravel(condition))[0]) + + +def _place_dispatcher(arr, mask, vals): + return (arr, mask, vals) + + +@array_function_dispatch(_place_dispatcher) +def place(arr, mask, vals): + """ + Change elements of an array based on conditional and input values. + + Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that + `place` uses the first N elements of `vals`, where N is the number of + True values in `mask`, while `copyto` uses the elements where `mask` + is True. + + Note that `extract` does the exact opposite of `place`. + + Parameters + ---------- + arr : ndarray + Array to put data into. + mask : array_like + Boolean mask array. Must have the same size as `a`. + vals : 1-D sequence + Values to put into `a`. Only the first N elements are used, where + N is the number of True values in `mask`. If `vals` is smaller + than N, it will be repeated, and if elements of `a` are to be masked, + this sequence must be non-empty. + + See Also + -------- + copyto, put, take, extract + + Examples + -------- + >>> import numpy as np + >>> arr = np.arange(6).reshape(2, 3) + >>> np.place(arr, arr>2, [44, 55]) + >>> arr + array([[ 0, 1, 2], + [44, 55, 44]]) + + """ + return _place(arr, mask, vals) + + +def disp(mesg, device=None, linefeed=True): + """ + Display a message on a device. + + .. deprecated:: 2.0 + Use your own printing function instead. + + Parameters + ---------- + mesg : str + Message to display. + device : object + Device to write message. If None, defaults to ``sys.stdout`` which is + very similar to ``print``. `device` needs to have ``write()`` and + ``flush()`` methods. + linefeed : bool, optional + Option whether to print a line feed or not. Defaults to True. + + Raises + ------ + AttributeError + If `device` does not have a ``write()`` or ``flush()`` method. + + Examples + -------- + >>> import numpy as np + + Besides ``sys.stdout``, a file-like object can also be used as it has + both required methods: + + >>> from io import StringIO + >>> buf = StringIO() + >>> np.disp('"Display" in a file', device=buf) + >>> buf.getvalue() + '"Display" in a file\\n' + + """ + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`disp` is deprecated, " + "use your own printing function instead. " + "(deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + if device is None: + device = sys.stdout + if linefeed: + device.write(f'{mesg}\n') + else: + device.write(f'{mesg}') + device.flush() + + +# See https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html +_DIMENSION_NAME = r'\w+' +_CORE_DIMENSION_LIST = f'(?:{_DIMENSION_NAME}(?:,{_DIMENSION_NAME})*)?' +_ARGUMENT = fr'\({_CORE_DIMENSION_LIST}\)' +_ARGUMENT_LIST = f'{_ARGUMENT}(?:,{_ARGUMENT})*' +_SIGNATURE = f'^{_ARGUMENT_LIST}->{_ARGUMENT_LIST}$' + + +def _parse_gufunc_signature(signature): + """ + Parse string signatures for a generalized universal function. + + Arguments + --------- + signature : string + Generalized universal function signature, e.g., ``(m,n),(n,p)->(m,p)`` + for ``np.matmul``. + + Returns + ------- + Tuple of input and output core dimensions parsed from the signature, each + of the form List[Tuple[str, ...]]. + """ + signature = re.sub(r'\s+', '', signature) + + if not re.match(_SIGNATURE, signature): + raise ValueError( + f'not a valid gufunc signature: {signature}') + return tuple([tuple(re.findall(_DIMENSION_NAME, arg)) + for arg in re.findall(_ARGUMENT, arg_list)] + for arg_list in signature.split('->')) + + +def _update_dim_sizes(dim_sizes, arg, core_dims): + """ + Incrementally check and update core dimension sizes for a single argument. + + Arguments + --------- + dim_sizes : Dict[str, int] + Sizes of existing core dimensions. Will be updated in-place. + arg : ndarray + Argument to examine. + core_dims : Tuple[str, ...] + Core dimensions for this argument. + """ + if not core_dims: + return + + num_core_dims = len(core_dims) + if arg.ndim < num_core_dims: + raise ValueError( + '%d-dimensional argument does not have enough ' + 'dimensions for all core dimensions %r' + % (arg.ndim, core_dims)) + + core_shape = arg.shape[-num_core_dims:] + for dim, size in zip(core_dims, core_shape): + if dim in dim_sizes: + if size != dim_sizes[dim]: + raise ValueError( + 'inconsistent size for core dimension %r: %r vs %r' + % (dim, size, dim_sizes[dim])) + else: + dim_sizes[dim] = size + + +def _parse_input_dimensions(args, input_core_dims): + """ + Parse broadcast and core dimensions for vectorize with a signature. + + Arguments + --------- + args : Tuple[ndarray, ...] + Tuple of input arguments to examine. + input_core_dims : List[Tuple[str, ...]] + List of core dimensions corresponding to each input. + + Returns + ------- + broadcast_shape : Tuple[int, ...] + Common shape to broadcast all non-core dimensions to. + dim_sizes : Dict[str, int] + Common sizes for named core dimensions. + """ + broadcast_args = [] + dim_sizes = {} + for arg, core_dims in zip(args, input_core_dims): + _update_dim_sizes(dim_sizes, arg, core_dims) + ndim = arg.ndim - len(core_dims) + dummy_array = np.lib.stride_tricks.as_strided(0, arg.shape[:ndim]) + broadcast_args.append(dummy_array) + broadcast_shape = np.lib._stride_tricks_impl._broadcast_shape( + *broadcast_args + ) + return broadcast_shape, dim_sizes + + +def _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims): + """Helper for calculating broadcast shapes with core dimensions.""" + return [broadcast_shape + tuple(dim_sizes[dim] for dim in core_dims) + for core_dims in list_of_core_dims] + + +def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes, + results=None): + """Helper for creating output arrays in vectorize.""" + shapes = _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims) + if dtypes is None: + dtypes = [None] * len(shapes) + if results is None: + arrays = tuple(np.empty(shape=shape, dtype=dtype) + for shape, dtype in zip(shapes, dtypes)) + else: + arrays = tuple(np.empty_like(result, shape=shape, dtype=dtype) + for result, shape, dtype + in zip(results, shapes, dtypes)) + return arrays + + +def _get_vectorize_dtype(dtype): + if dtype.char in "SU": + return dtype.char + return dtype + + +@set_module('numpy') +class vectorize: + """ + vectorize(pyfunc=np._NoValue, otypes=None, doc=None, excluded=None, + cache=False, signature=None) + + Returns an object that acts like pyfunc, but takes arrays as input. + + Define a vectorized function which takes a nested sequence of objects or + numpy arrays as inputs and returns a single numpy array or a tuple of numpy + arrays. The vectorized function evaluates `pyfunc` over successive tuples + of the input arrays like the python map function, except it uses the + broadcasting rules of numpy. + + The data type of the output of `vectorized` is determined by calling + the function with the first element of the input. This can be avoided + by specifying the `otypes` argument. + + Parameters + ---------- + pyfunc : callable, optional + A python function or method. + Can be omitted to produce a decorator with keyword arguments. + otypes : str or list of dtypes, optional + The output data type. It must be specified as either a string of + typecode characters or a list of data type specifiers. There should + be one data type specifier for each output. + doc : str, optional + The docstring for the function. If None, the docstring will be the + ``pyfunc.__doc__``. + excluded : set, optional + Set of strings or integers representing the positional or keyword + arguments for which the function will not be vectorized. These will be + passed directly to `pyfunc` unmodified. + + cache : bool, optional + If `True`, then cache the first function call that determines the number + of outputs if `otypes` is not provided. + + signature : string, optional + Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for + vectorized matrix-vector multiplication. If provided, ``pyfunc`` will + be called with (and expected to return) arrays with shapes given by the + size of corresponding core dimensions. By default, ``pyfunc`` is + assumed to take scalars as input and output. + + Returns + ------- + out : callable + A vectorized function if ``pyfunc`` was provided, + a decorator otherwise. + + See Also + -------- + frompyfunc : Takes an arbitrary Python function and returns a ufunc + + Notes + ----- + The `vectorize` function is provided primarily for convenience, not for + performance. The implementation is essentially a for loop. + + If `otypes` is not specified, then a call to the function with the + first argument will be used to determine the number of outputs. The + results of this call will be cached if `cache` is `True` to prevent + calling the function twice. However, to implement the cache, the + original function must be wrapped which will slow down subsequent + calls, so only do this if your function is expensive. + + The new keyword argument interface and `excluded` argument support + further degrades performance. + + References + ---------- + .. [1] :doc:`/reference/c-api/generalized-ufuncs` + + Examples + -------- + >>> import numpy as np + >>> def myfunc(a, b): + ... "Return a-b if a>b, otherwise return a+b" + ... if a > b: + ... return a - b + ... else: + ... return a + b + + >>> vfunc = np.vectorize(myfunc) + >>> vfunc([1, 2, 3, 4], 2) + array([3, 4, 1, 2]) + + The docstring is taken from the input function to `vectorize` unless it + is specified: + + >>> vfunc.__doc__ + 'Return a-b if a>b, otherwise return a+b' + >>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`') + >>> vfunc.__doc__ + 'Vectorized `myfunc`' + + The output type is determined by evaluating the first element of the input, + unless it is specified: + + >>> out = vfunc([1, 2, 3, 4], 2) + >>> type(out[0]) + + >>> vfunc = np.vectorize(myfunc, otypes=[float]) + >>> out = vfunc([1, 2, 3, 4], 2) + >>> type(out[0]) + + + The `excluded` argument can be used to prevent vectorizing over certain + arguments. This can be useful for array-like arguments of a fixed length + such as the coefficients for a polynomial as in `polyval`: + + >>> def mypolyval(p, x): + ... _p = list(p) + ... res = _p.pop(0) + ... while _p: + ... res = res*x + _p.pop(0) + ... return res + + Here, we exclude the zeroth argument from vectorization whether it is + passed by position or keyword. + + >>> vpolyval = np.vectorize(mypolyval, excluded={0, 'p'}) + >>> vpolyval([1, 2, 3], x=[0, 1]) + array([3, 6]) + >>> vpolyval(p=[1, 2, 3], x=[0, 1]) + array([3, 6]) + + The `signature` argument allows for vectorizing functions that act on + non-scalar arrays of fixed length. For example, you can use it for a + vectorized calculation of Pearson correlation coefficient and its p-value: + + >>> import scipy.stats + >>> pearsonr = np.vectorize(scipy.stats.pearsonr, + ... signature='(n),(n)->(),()') + >>> pearsonr([[0, 1, 2, 3]], [[1, 2, 3, 4], [4, 3, 2, 1]]) + (array([ 1., -1.]), array([ 0., 0.])) + + Or for a vectorized convolution: + + >>> convolve = np.vectorize(np.convolve, signature='(n),(m)->(k)') + >>> convolve(np.eye(4), [1, 2, 1]) + array([[1., 2., 1., 0., 0., 0.], + [0., 1., 2., 1., 0., 0.], + [0., 0., 1., 2., 1., 0.], + [0., 0., 0., 1., 2., 1.]]) + + Decorator syntax is supported. The decorator can be called as + a function to provide keyword arguments: + + >>> @np.vectorize + ... def identity(x): + ... return x + ... + >>> identity([0, 1, 2]) + array([0, 1, 2]) + >>> @np.vectorize(otypes=[float]) + ... def as_float(x): + ... return x + ... + >>> as_float([0, 1, 2]) + array([0., 1., 2.]) + """ + def __init__(self, pyfunc=np._NoValue, otypes=None, doc=None, + excluded=None, cache=False, signature=None): + + if (pyfunc != np._NoValue) and (not callable(pyfunc)): + # Splitting the error message to keep + # the length below 79 characters. + part1 = "When used as a decorator, " + part2 = "only accepts keyword arguments." + raise TypeError(part1 + part2) + + self.pyfunc = pyfunc + self.cache = cache + self.signature = signature + if pyfunc != np._NoValue and hasattr(pyfunc, '__name__'): + self.__name__ = pyfunc.__name__ + + self._ufunc = {} # Caching to improve default performance + self._doc = None + self.__doc__ = doc + if doc is None and hasattr(pyfunc, '__doc__'): + self.__doc__ = pyfunc.__doc__ + else: + self._doc = doc + + if isinstance(otypes, str): + for char in otypes: + if char not in typecodes['All']: + raise ValueError(f"Invalid otype specified: {char}") + elif iterable(otypes): + otypes = [_get_vectorize_dtype(_nx.dtype(x)) for x in otypes] + elif otypes is not None: + raise ValueError("Invalid otype specification") + self.otypes = otypes + + # Excluded variable support + if excluded is None: + excluded = set() + self.excluded = set(excluded) + + if signature is not None: + self._in_and_out_core_dims = _parse_gufunc_signature(signature) + else: + self._in_and_out_core_dims = None + + def _init_stage_2(self, pyfunc, *args, **kwargs): + self.__name__ = pyfunc.__name__ + self.pyfunc = pyfunc + if self._doc is None: + self.__doc__ = pyfunc.__doc__ + else: + self.__doc__ = self._doc + + def _call_as_normal(self, *args, **kwargs): + """ + Return arrays with the results of `pyfunc` broadcast (vectorized) over + `args` and `kwargs` not in `excluded`. + """ + excluded = self.excluded + if not kwargs and not excluded: + func = self.pyfunc + vargs = args + else: + # The wrapper accepts only positional arguments: we use `names` and + # `inds` to mutate `the_args` and `kwargs` to pass to the original + # function. + nargs = len(args) + + names = [_n for _n in kwargs if _n not in excluded] + inds = [_i for _i in range(nargs) if _i not in excluded] + the_args = list(args) + + def func(*vargs): + for _n, _i in enumerate(inds): + the_args[_i] = vargs[_n] + kwargs.update(zip(names, vargs[len(inds):])) + return self.pyfunc(*the_args, **kwargs) + + vargs = [args[_i] for _i in inds] + vargs.extend([kwargs[_n] for _n in names]) + + return self._vectorize_call(func=func, args=vargs) + + def __call__(self, *args, **kwargs): + if self.pyfunc is np._NoValue: + self._init_stage_2(*args, **kwargs) + return self + + return self._call_as_normal(*args, **kwargs) + + def _get_ufunc_and_otypes(self, func, args): + """Return (ufunc, otypes).""" + # frompyfunc will fail if args is empty + if not args: + raise ValueError('args can not be empty') + + if self.otypes is not None: + otypes = self.otypes + + # self._ufunc is a dictionary whose keys are the number of + # arguments (i.e. len(args)) and whose values are ufuncs created + # by frompyfunc. len(args) can be different for different calls if + # self.pyfunc has parameters with default values. We only use the + # cache when func is self.pyfunc, which occurs when the call uses + # only positional arguments and no arguments are excluded. + + nin = len(args) + nout = len(self.otypes) + if func is not self.pyfunc or nin not in self._ufunc: + ufunc = frompyfunc(func, nin, nout) + else: + ufunc = None # We'll get it from self._ufunc + if func is self.pyfunc: + ufunc = self._ufunc.setdefault(nin, ufunc) + else: + # Get number of outputs and output types by calling the function on + # the first entries of args. We also cache the result to prevent + # the subsequent call when the ufunc is evaluated. + # Assumes that ufunc first evaluates the 0th elements in the input + # arrays (the input values are not checked to ensure this) + args = [asarray(a) for a in args] + if builtins.any(arg.size == 0 for arg in args): + raise ValueError('cannot call `vectorize` on size 0 inputs ' + 'unless `otypes` is set') + + inputs = [arg.flat[0] for arg in args] + outputs = func(*inputs) + + # Performance note: profiling indicates that -- for simple + # functions at least -- this wrapping can almost double the + # execution time. + # Hence we make it optional. + if self.cache: + _cache = [outputs] + + def _func(*vargs): + if _cache: + return _cache.pop() + else: + return func(*vargs) + else: + _func = func + + if isinstance(outputs, tuple): + nout = len(outputs) + else: + nout = 1 + outputs = (outputs,) + + otypes = ''.join([asarray(outputs[_k]).dtype.char + for _k in range(nout)]) + + # Performance note: profiling indicates that creating the ufunc is + # not a significant cost compared with wrapping so it seems not + # worth trying to cache this. + ufunc = frompyfunc(_func, len(args), nout) + + return ufunc, otypes + + def _vectorize_call(self, func, args): + """Vectorized call to `func` over positional `args`.""" + if self.signature is not None: + res = self._vectorize_call_with_signature(func, args) + elif not args: + res = func() + else: + ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) + # gh-29196: `dtype=object` should eventually be removed + args = [asanyarray(a, dtype=object) for a in args] + outputs = ufunc(*args, out=...) + + if ufunc.nout == 1: + res = asanyarray(outputs, dtype=otypes[0]) + else: + res = tuple(asanyarray(x, dtype=t) + for x, t in zip(outputs, otypes)) + return res + + def _vectorize_call_with_signature(self, func, args): + """Vectorized call over positional arguments with a signature.""" + input_core_dims, output_core_dims = self._in_and_out_core_dims + + if len(args) != len(input_core_dims): + raise TypeError('wrong number of positional arguments: ' + 'expected %r, got %r' + % (len(input_core_dims), len(args))) + args = tuple(asanyarray(arg) for arg in args) + + broadcast_shape, dim_sizes = _parse_input_dimensions( + args, input_core_dims) + input_shapes = _calculate_shapes(broadcast_shape, dim_sizes, + input_core_dims) + args = [np.broadcast_to(arg, shape, subok=True) + for arg, shape in zip(args, input_shapes)] + + outputs = None + otypes = self.otypes + nout = len(output_core_dims) + + for index in np.ndindex(*broadcast_shape): + results = func(*(arg[index] for arg in args)) + + n_results = len(results) if isinstance(results, tuple) else 1 + + if nout != n_results: + raise ValueError( + 'wrong number of outputs from pyfunc: expected %r, got %r' + % (nout, n_results)) + + if nout == 1: + results = (results,) + + if outputs is None: + for result, core_dims in zip(results, output_core_dims): + _update_dim_sizes(dim_sizes, result, core_dims) + + outputs = _create_arrays(broadcast_shape, dim_sizes, + output_core_dims, otypes, results) + + for output, result in zip(outputs, results): + output[index] = result + + if outputs is None: + # did not call the function even once + if otypes is None: + raise ValueError('cannot call `vectorize` on size 0 inputs ' + 'unless `otypes` is set') + if builtins.any(dim not in dim_sizes + for dims in output_core_dims + for dim in dims): + raise ValueError('cannot call `vectorize` with a signature ' + 'including new output dimensions on size 0 ' + 'inputs') + outputs = _create_arrays(broadcast_shape, dim_sizes, + output_core_dims, otypes) + + return outputs[0] if nout == 1 else outputs + + +def _cov_dispatcher(m, y=None, rowvar=None, bias=None, ddof=None, + fweights=None, aweights=None, *, dtype=None): + return (m, y, fweights, aweights) + + +@array_function_dispatch(_cov_dispatcher) +def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, + aweights=None, *, dtype=None): + """ + Estimate a covariance matrix, given data and weights. + + Covariance indicates the level to which two variables vary together. + If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, + then the covariance matrix element :math:`C_{ij}` is the covariance of + :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance + of :math:`x_i`. + + See the notes for an outline of the algorithm. + + Parameters + ---------- + m : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `m` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same form + as that of `m`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : bool, optional + Default normalization (False) is by ``(N - 1)``, where ``N`` is the + number of observations given (unbiased estimate). If `bias` is True, + then normalization is by ``N``. These values can be overridden by using + the keyword ``ddof`` in numpy versions >= 1.5. + ddof : int, optional + If not ``None`` the default value implied by `bias` is overridden. + Note that ``ddof=1`` will return the unbiased estimate, even if both + `fweights` and `aweights` are specified, and ``ddof=0`` will return + the simple average. See the notes for the details. The default value + is ``None``. + fweights : array_like, int, optional + 1-D array of integer frequency weights; the number of times each + observation vector should be repeated. + aweights : array_like, optional + 1-D array of observation vector weights. These relative weights are + typically large for observations considered "important" and smaller for + observations considered less "important". If ``ddof=0`` the array of + weights can be used to assign probabilities to observation vectors. + dtype : data-type, optional + Data-type of the result. By default, the return data-type will have + at least `numpy.float64` precision. + + .. versionadded:: 1.20 + + Returns + ------- + out : ndarray + The covariance matrix of the variables. + + See Also + -------- + corrcoef : Normalized covariance matrix + + Notes + ----- + Assume that the observations are in the columns of the observation + array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The + steps to compute the weighted covariance are as follows:: + + >>> m = np.arange(10, dtype=np.float64) + >>> f = np.arange(10) * 2 + >>> a = np.arange(10) ** 2. + >>> ddof = 1 + >>> w = f * a + >>> v1 = np.sum(w) + >>> v2 = np.sum(w * a) + >>> m -= np.sum(m * w, axis=None, keepdims=True) / v1 + >>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2) + + Note that when ``a == 1``, the normalization factor + ``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)`` + as it should. + + Examples + -------- + >>> import numpy as np + + Consider two variables, :math:`x_0` and :math:`x_1`, which + correlate perfectly, but in opposite directions: + + >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T + >>> x + array([[0, 1, 2], + [2, 1, 0]]) + + Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance + matrix shows this clearly: + + >>> np.cov(x) + array([[ 1., -1.], + [-1., 1.]]) + + Note that element :math:`C_{0,1}`, which shows the correlation between + :math:`x_0` and :math:`x_1`, is negative. + + Further, note how `x` and `y` are combined: + + >>> x = [-2.1, -1, 4.3] + >>> y = [3, 1.1, 0.12] + >>> X = np.stack((x, y), axis=0) + >>> np.cov(X) + array([[11.71 , -4.286 ], # may vary + [-4.286 , 2.144133]]) + >>> np.cov(x, y) + array([[11.71 , -4.286 ], # may vary + [-4.286 , 2.144133]]) + >>> np.cov(x) + array(11.71) + + """ + # Check inputs + if ddof is not None and ddof != int(ddof): + raise ValueError( + "ddof must be integer") + + # Handles complex arrays too + m = np.asarray(m) + if m.ndim > 2: + raise ValueError("m has more than 2 dimensions") + + if y is not None: + y = np.asarray(y) + if y.ndim > 2: + raise ValueError("y has more than 2 dimensions") + + if dtype is None: + if y is None: + dtype = np.result_type(m, np.float64) + else: + dtype = np.result_type(m, y, np.float64) + + X = array(m, ndmin=2, dtype=dtype) + if not rowvar and m.ndim != 1: + X = X.T + if X.shape[0] == 0: + return np.array([]).reshape(0, 0) + if y is not None: + y = array(y, copy=None, ndmin=2, dtype=dtype) + if not rowvar and y.shape[0] != 1: + y = y.T + X = np.concatenate((X, y), axis=0) + + if ddof is None: + if bias == 0: + ddof = 1 + else: + ddof = 0 + + # Get the product of frequencies and weights + w = None + if fweights is not None: + fweights = np.asarray(fweights, dtype=float) + if not np.all(fweights == np.around(fweights)): + raise TypeError( + "fweights must be integer") + if fweights.ndim > 1: + raise RuntimeError( + "cannot handle multidimensional fweights") + if fweights.shape[0] != X.shape[1]: + raise RuntimeError( + "incompatible numbers of samples and fweights") + if any(fweights < 0): + raise ValueError( + "fweights cannot be negative") + w = fweights + if aweights is not None: + aweights = np.asarray(aweights, dtype=float) + if aweights.ndim > 1: + raise RuntimeError( + "cannot handle multidimensional aweights") + if aweights.shape[0] != X.shape[1]: + raise RuntimeError( + "incompatible numbers of samples and aweights") + if any(aweights < 0): + raise ValueError( + "aweights cannot be negative") + if w is None: + w = aweights + else: + w *= aweights + + avg, w_sum = average(X, axis=1, weights=w, returned=True) + w_sum = w_sum[0] + + # Determine the normalization + if w is None: + fact = X.shape[1] - ddof + elif ddof == 0: + fact = w_sum + elif aweights is None: + fact = w_sum - ddof + else: + fact = w_sum - ddof * sum(w * aweights) / w_sum + + if fact <= 0: + warnings.warn("Degrees of freedom <= 0 for slice", + RuntimeWarning, stacklevel=2) + fact = 0.0 + + X -= avg[:, None] + if w is None: + X_T = X.T + else: + X_T = (X * w).T + c = dot(X, X_T.conj()) + c *= np.true_divide(1, fact) + return c.squeeze() + + +def _corrcoef_dispatcher(x, y=None, rowvar=None, bias=None, ddof=None, *, + dtype=None): + return (x, y) + + +@array_function_dispatch(_corrcoef_dispatcher) +def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue, *, + dtype=None): + """ + Return Pearson product-moment correlation coefficients. + + Please refer to the documentation for `cov` for more detail. The + relationship between the correlation coefficient matrix, `R`, and the + covariance matrix, `C`, is + + .. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} C_{jj} } } + + The values of `R` are between -1 and 1, inclusive. + + Parameters + ---------- + x : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `x` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same + shape as `x`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.10.0 + ddof : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.10.0 + dtype : data-type, optional + Data-type of the result. By default, the return data-type will have + at least `numpy.float64` precision. + + .. versionadded:: 1.20 + + Returns + ------- + R : ndarray + The correlation coefficient matrix of the variables. + + See Also + -------- + cov : Covariance matrix + + Notes + ----- + Due to floating point rounding the resulting array may not be Hermitian, + the diagonal elements may not be 1, and the elements may not satisfy the + inequality abs(a) <= 1. The real and imaginary parts are clipped to the + interval [-1, 1] in an attempt to improve on that situation but is not + much help in the complex case. + + This function accepts but discards arguments `bias` and `ddof`. This is + for backwards compatibility with previous versions of this function. These + arguments had no effect on the return values of the function and can be + safely ignored in this and previous versions of numpy. + + Examples + -------- + >>> import numpy as np + + In this example we generate two random arrays, ``xarr`` and ``yarr``, and + compute the row-wise and column-wise Pearson correlation coefficients, + ``R``. Since ``rowvar`` is true by default, we first find the row-wise + Pearson correlation coefficients between the variables of ``xarr``. + + >>> import numpy as np + >>> rng = np.random.default_rng(seed=42) + >>> xarr = rng.random((3, 3)) + >>> xarr + array([[0.77395605, 0.43887844, 0.85859792], + [0.69736803, 0.09417735, 0.97562235], + [0.7611397 , 0.78606431, 0.12811363]]) + >>> R1 = np.corrcoef(xarr) + >>> R1 + array([[ 1. , 0.99256089, -0.68080986], + [ 0.99256089, 1. , -0.76492172], + [-0.68080986, -0.76492172, 1. ]]) + + If we add another set of variables and observations ``yarr``, we can + compute the row-wise Pearson correlation coefficients between the + variables in ``xarr`` and ``yarr``. + + >>> yarr = rng.random((3, 3)) + >>> yarr + array([[0.45038594, 0.37079802, 0.92676499], + [0.64386512, 0.82276161, 0.4434142 ], + [0.22723872, 0.55458479, 0.06381726]]) + >>> R2 = np.corrcoef(xarr, yarr) + >>> R2 + array([[ 1. , 0.99256089, -0.68080986, 0.75008178, -0.934284 , + -0.99004057], + [ 0.99256089, 1. , -0.76492172, 0.82502011, -0.97074098, + -0.99981569], + [-0.68080986, -0.76492172, 1. , -0.99507202, 0.89721355, + 0.77714685], + [ 0.75008178, 0.82502011, -0.99507202, 1. , -0.93657855, + -0.83571711], + [-0.934284 , -0.97074098, 0.89721355, -0.93657855, 1. , + 0.97517215], + [-0.99004057, -0.99981569, 0.77714685, -0.83571711, 0.97517215, + 1. ]]) + + Finally if we use the option ``rowvar=False``, the columns are now + being treated as the variables and we will find the column-wise Pearson + correlation coefficients between variables in ``xarr`` and ``yarr``. + + >>> R3 = np.corrcoef(xarr, yarr, rowvar=False) + >>> R3 + array([[ 1. , 0.77598074, -0.47458546, -0.75078643, -0.9665554 , + 0.22423734], + [ 0.77598074, 1. , -0.92346708, -0.99923895, -0.58826587, + -0.44069024], + [-0.47458546, -0.92346708, 1. , 0.93773029, 0.23297648, + 0.75137473], + [-0.75078643, -0.99923895, 0.93773029, 1. , 0.55627469, + 0.47536961], + [-0.9665554 , -0.58826587, 0.23297648, 0.55627469, 1. , + -0.46666491], + [ 0.22423734, -0.44069024, 0.75137473, 0.47536961, -0.46666491, + 1. ]]) + + """ + if bias is not np._NoValue or ddof is not np._NoValue: + # 2015-03-15, 1.10 + warnings.warn('bias and ddof have no effect and are deprecated', + DeprecationWarning, stacklevel=2) + c = cov(x, y, rowvar, dtype=dtype) + try: + d = diag(c) + except ValueError: + # scalar covariance + # nan if incorrect value (nan, inf, 0), 1 otherwise + return c / c + stddev = sqrt(d.real) + c /= stddev[:, None] + c /= stddev[None, :] + + # Clip real and imaginary parts to [-1, 1]. This does not guarantee + # abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without + # excessive work. + np.clip(c.real, -1, 1, out=c.real) + if np.iscomplexobj(c): + np.clip(c.imag, -1, 1, out=c.imag) + + return c + + +@set_module('numpy') +def blackman(M): + """ + Return the Blackman window. + + The Blackman window is a taper formed by using the first three + terms of a summation of cosines. It was designed to have close to the + minimal leakage possible. It is close to optimal, only slightly worse + than a Kaiser window. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an empty + array is returned. + + Returns + ------- + out : ndarray + The window, with the maximum value normalized to one (the value one + appears only if the number of samples is odd). + + See Also + -------- + bartlett, hamming, hanning, kaiser + + Notes + ----- + The Blackman window is defined as + + .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M) + + Most references to the Blackman window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. It is known as a + "near optimal" tapering function, almost as good (by some measures) + as the kaiser window. + + References + ---------- + Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, + Dover Publications, New York. + + Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. + Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> np.blackman(12) + array([-1.38777878e-17, 3.26064346e-02, 1.59903635e-01, # may vary + 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, + 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, + 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) + + Plot the window and the frequency response. + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from numpy.fft import fft, fftshift + window = np.blackman(51) + plt.plot(window) + plt.title("Blackman window") + plt.ylabel("Amplitude") + plt.xlabel("Sample") + plt.show() # doctest: +SKIP + + plt.figure() + A = fft(window, 2048) / 25.5 + mag = np.abs(fftshift(A)) + freq = np.linspace(-0.5, 0.5, len(A)) + with np.errstate(divide='ignore', invalid='ignore'): + response = 20 * np.log10(mag) + response = np.clip(response, -100, 100) + plt.plot(freq, response) + plt.title("Frequency response of Blackman window") + plt.ylabel("Magnitude [dB]") + plt.xlabel("Normalized frequency [cycles per sample]") + plt.axis('tight') + plt.show() + + """ + # Ensures at least float64 via 0.0. M should be an integer, but conversion + # to double is safe for a range. + values = np.array([0.0, M]) + M = values[1] + + if M < 1: + return array([], dtype=values.dtype) + if M == 1: + return ones(1, dtype=values.dtype) + n = arange(1 - M, M, 2) + return 0.42 + 0.5 * cos(pi * n / (M - 1)) + 0.08 * cos(2.0 * pi * n / (M - 1)) + + +@set_module('numpy') +def bartlett(M): + """ + Return the Bartlett window. + + The Bartlett window is very similar to a triangular window, except + that the end points are at zero. It is often used in signal + processing for tapering a signal, without generating too much + ripple in the frequency domain. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + + Returns + ------- + out : array + The triangular window, with the maximum value normalized to one + (the value one appears only if the number of samples is odd), with + the first and last samples equal to zero. + + See Also + -------- + blackman, hamming, hanning, kaiser + + Notes + ----- + The Bartlett window is defined as + + .. math:: w(n) = \\frac{2}{M-1} \\left( + \\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right| + \\right) + + Most references to the Bartlett window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. Note that convolution with this window produces linear + interpolation. It is also known as an apodization (which means "removing + the foot", i.e. smoothing discontinuities at the beginning and end of the + sampled signal) or tapering function. The Fourier transform of the + Bartlett window is the product of two sinc functions. Note the excellent + discussion in Kanasewich [2]_. + + References + ---------- + .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", + Biometrika 37, 1-16, 1950. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", + The University of Alberta Press, 1975, pp. 109-110. + .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal + Processing", Prentice-Hall, 1999, pp. 468-471. + .. [4] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 429. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> np.bartlett(12) + array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, # may vary + 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, + 0.18181818, 0. ]) + + Plot the window and its frequency response (requires SciPy and matplotlib). + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from numpy.fft import fft, fftshift + window = np.bartlett(51) + plt.plot(window) + plt.title("Bartlett window") + plt.ylabel("Amplitude") + plt.xlabel("Sample") + plt.show() + plt.figure() + A = fft(window, 2048) / 25.5 + mag = np.abs(fftshift(A)) + freq = np.linspace(-0.5, 0.5, len(A)) + with np.errstate(divide='ignore', invalid='ignore'): + response = 20 * np.log10(mag) + response = np.clip(response, -100, 100) + plt.plot(freq, response) + plt.title("Frequency response of Bartlett window") + plt.ylabel("Magnitude [dB]") + plt.xlabel("Normalized frequency [cycles per sample]") + plt.axis('tight') + plt.show() + + """ + # Ensures at least float64 via 0.0. M should be an integer, but conversion + # to double is safe for a range. + values = np.array([0.0, M]) + M = values[1] + + if M < 1: + return array([], dtype=values.dtype) + if M == 1: + return ones(1, dtype=values.dtype) + n = arange(1 - M, M, 2) + return where(less_equal(n, 0), 1 + n / (M - 1), 1 - n / (M - 1)) + + +@set_module('numpy') +def hanning(M): + """ + Return the Hanning window. + + The Hanning window is a taper formed by using a weighted cosine. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + + Returns + ------- + out : ndarray, shape(M,) + The window, with the maximum value normalized to one (the value + one appears only if `M` is odd). + + See Also + -------- + bartlett, blackman, hamming, kaiser + + Notes + ----- + The Hanning window is defined as + + .. math:: w(n) = 0.5 - 0.5\\cos\\left(\\frac{2\\pi{n}}{M-1}\\right) + \\qquad 0 \\leq n \\leq M-1 + + The Hanning was named for Julius von Hann, an Austrian meteorologist. + It is also known as the Cosine Bell. Some authors prefer that it be + called a Hann window, to help avoid confusion with the very similar + Hamming window. + + Most references to the Hanning window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power + spectra, Dover Publications, New York. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", + The University of Alberta Press, 1975, pp. 106-108. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 425. + + Examples + -------- + >>> import numpy as np + >>> np.hanning(12) + array([0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, + 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, + 0.07937323, 0. ]) + + Plot the window and its frequency response. + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from numpy.fft import fft, fftshift + window = np.hanning(51) + plt.plot(window) + plt.title("Hann window") + plt.ylabel("Amplitude") + plt.xlabel("Sample") + plt.show() + + plt.figure() + A = fft(window, 2048) / 25.5 + mag = np.abs(fftshift(A)) + freq = np.linspace(-0.5, 0.5, len(A)) + with np.errstate(divide='ignore', invalid='ignore'): + response = 20 * np.log10(mag) + response = np.clip(response, -100, 100) + plt.plot(freq, response) + plt.title("Frequency response of the Hann window") + plt.ylabel("Magnitude [dB]") + plt.xlabel("Normalized frequency [cycles per sample]") + plt.axis('tight') + plt.show() + + """ + # Ensures at least float64 via 0.0. M should be an integer, but conversion + # to double is safe for a range. + values = np.array([0.0, M]) + M = values[1] + + if M < 1: + return array([], dtype=values.dtype) + if M == 1: + return ones(1, dtype=values.dtype) + n = arange(1 - M, M, 2) + return 0.5 + 0.5 * cos(pi * n / (M - 1)) + + +@set_module('numpy') +def hamming(M): + """ + Return the Hamming window. + + The Hamming window is a taper formed by using a weighted cosine. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + + Returns + ------- + out : ndarray + The window, with the maximum value normalized to one (the value + one appears only if the number of samples is odd). + + See Also + -------- + bartlett, blackman, hanning, kaiser + + Notes + ----- + The Hamming window is defined as + + .. math:: w(n) = 0.54 - 0.46\\cos\\left(\\frac{2\\pi{n}}{M-1}\\right) + \\qquad 0 \\leq n \\leq M-1 + + The Hamming was named for R. W. Hamming, an associate of J. W. Tukey + and is described in Blackman and Tukey. It was recommended for + smoothing the truncated autocovariance function in the time domain. + Most references to the Hamming window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power + spectra, Dover Publications, New York. + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The + University of Alberta Press, 1975, pp. 109-110. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, + "Numerical Recipes", Cambridge University Press, 1986, page 425. + + Examples + -------- + >>> import numpy as np + >>> np.hamming(12) + array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, # may vary + 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, + 0.15302337, 0.08 ]) + + Plot the window and the frequency response. + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from numpy.fft import fft, fftshift + window = np.hamming(51) + plt.plot(window) + plt.title("Hamming window") + plt.ylabel("Amplitude") + plt.xlabel("Sample") + plt.show() + + plt.figure() + A = fft(window, 2048) / 25.5 + mag = np.abs(fftshift(A)) + freq = np.linspace(-0.5, 0.5, len(A)) + response = 20 * np.log10(mag) + response = np.clip(response, -100, 100) + plt.plot(freq, response) + plt.title("Frequency response of Hamming window") + plt.ylabel("Magnitude [dB]") + plt.xlabel("Normalized frequency [cycles per sample]") + plt.axis('tight') + plt.show() + + """ + # Ensures at least float64 via 0.0. M should be an integer, but conversion + # to double is safe for a range. + values = np.array([0.0, M]) + M = values[1] + + if M < 1: + return array([], dtype=values.dtype) + if M == 1: + return ones(1, dtype=values.dtype) + n = arange(1 - M, M, 2) + return 0.54 + 0.46 * cos(pi * n / (M - 1)) + + +## Code from cephes for i0 + +_i0A = [ + -4.41534164647933937950E-18, + 3.33079451882223809783E-17, + -2.43127984654795469359E-16, + 1.71539128555513303061E-15, + -1.16853328779934516808E-14, + 7.67618549860493561688E-14, + -4.85644678311192946090E-13, + 2.95505266312963983461E-12, + -1.72682629144155570723E-11, + 9.67580903537323691224E-11, + -5.18979560163526290666E-10, + 2.65982372468238665035E-9, + -1.30002500998624804212E-8, + 6.04699502254191894932E-8, + -2.67079385394061173391E-7, + 1.11738753912010371815E-6, + -4.41673835845875056359E-6, + 1.64484480707288970893E-5, + -5.75419501008210370398E-5, + 1.88502885095841655729E-4, + -5.76375574538582365885E-4, + 1.63947561694133579842E-3, + -4.32430999505057594430E-3, + 1.05464603945949983183E-2, + -2.37374148058994688156E-2, + 4.93052842396707084878E-2, + -9.49010970480476444210E-2, + 1.71620901522208775349E-1, + -3.04682672343198398683E-1, + 6.76795274409476084995E-1 + ] + +_i0B = [ + -7.23318048787475395456E-18, + -4.83050448594418207126E-18, + 4.46562142029675999901E-17, + 3.46122286769746109310E-17, + -2.82762398051658348494E-16, + -3.42548561967721913462E-16, + 1.77256013305652638360E-15, + 3.81168066935262242075E-15, + -9.55484669882830764870E-15, + -4.15056934728722208663E-14, + 1.54008621752140982691E-14, + 3.85277838274214270114E-13, + 7.18012445138366623367E-13, + -1.79417853150680611778E-12, + -1.32158118404477131188E-11, + -3.14991652796324136454E-11, + 1.18891471078464383424E-11, + 4.94060238822496958910E-10, + 3.39623202570838634515E-9, + 2.26666899049817806459E-8, + 2.04891858946906374183E-7, + 2.89137052083475648297E-6, + 6.88975834691682398426E-5, + 3.36911647825569408990E-3, + 8.04490411014108831608E-1 + ] + + +def _chbevl(x, vals): + b0 = vals[0] + b1 = 0.0 + + for i in range(1, len(vals)): + b2 = b1 + b1 = b0 + b0 = x * b1 - b2 + vals[i] + + return 0.5 * (b0 - b2) + + +def _i0_1(x): + return exp(x) * _chbevl(x / 2.0 - 2, _i0A) + + +def _i0_2(x): + return exp(x) * _chbevl(32.0 / x - 2.0, _i0B) / sqrt(x) + + +def _i0_dispatcher(x): + return (x,) + + +@array_function_dispatch(_i0_dispatcher) +def i0(x): + """ + Modified Bessel function of the first kind, order 0. + + Usually denoted :math:`I_0`. + + Parameters + ---------- + x : array_like of float + Argument of the Bessel function. + + Returns + ------- + out : ndarray, shape = x.shape, dtype = float + The modified Bessel function evaluated at each of the elements of `x`. + + See Also + -------- + scipy.special.i0, scipy.special.iv, scipy.special.ive + + Notes + ----- + The scipy implementation is recommended over this function: it is a + proper ufunc written in C, and more than an order of magnitude faster. + + We use the algorithm published by Clenshaw [1]_ and referenced by + Abramowitz and Stegun [2]_, for which the function domain is + partitioned into the two intervals [0,8] and (8,inf), and Chebyshev + polynomial expansions are employed in each interval. Relative error on + the domain [0,30] using IEEE arithmetic is documented [3]_ as having a + peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000). + + References + ---------- + .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in + *National Physical Laboratory Mathematical Tables*, vol. 5, London: + Her Majesty's Stationery Office, 1962. + .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical + Functions*, 10th printing, New York: Dover, 1964, pp. 379. + https://personal.math.ubc.ca/~cbm/aands/page_379.htm + .. [3] https://metacpan.org/pod/distribution/Math-Cephes/lib/Math/Cephes.pod#i0:-Modified-Bessel-function-of-order-zero + + Examples + -------- + >>> import numpy as np + >>> np.i0(0.) + array(1.0) + >>> np.i0([0, 1, 2, 3]) + array([1. , 1.26606588, 2.2795853 , 4.88079259]) + + """ + x = np.asanyarray(x) + if x.dtype.kind == 'c': + raise TypeError("i0 not supported for complex values") + if x.dtype.kind != 'f': + x = x.astype(float) + x = np.abs(x) + return piecewise(x, [x <= 8.0], [_i0_1, _i0_2]) + +## End of cephes code for i0 + + +@set_module('numpy') +def kaiser(M, beta): + """ + Return the Kaiser window. + + The Kaiser window is a taper formed by using a Bessel function. + + Parameters + ---------- + M : int + Number of points in the output window. If zero or less, an + empty array is returned. + beta : float + Shape parameter for window. + + Returns + ------- + out : array + The window, with the maximum value normalized to one (the value + one appears only if the number of samples is odd). + + See Also + -------- + bartlett, blackman, hamming, hanning + + Notes + ----- + The Kaiser window is defined as + + .. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}} + \\right)/I_0(\\beta) + + with + + .. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2}, + + where :math:`I_0` is the modified zeroth-order Bessel function. + + The Kaiser was named for Jim Kaiser, who discovered a simple + approximation to the DPSS window based on Bessel functions. The Kaiser + window is a very good approximation to the Digital Prolate Spheroidal + Sequence, or Slepian window, which is the transform which maximizes the + energy in the main lobe of the window relative to total energy. + + The Kaiser can approximate many other windows by varying the beta + parameter. + + ==== ======================= + beta Window shape + ==== ======================= + 0 Rectangular + 5 Similar to a Hamming + 6 Similar to a Hanning + 8.6 Similar to a Blackman + ==== ======================= + + A beta value of 14 is probably a good starting point. Note that as beta + gets large, the window narrows, and so the number of samples needs to be + large enough to sample the increasingly narrow spike, otherwise NaNs will + get returned. + + Most references to the Kaiser window come from the signal processing + literature, where it is used as one of many windowing functions for + smoothing values. It is also known as an apodization (which means + "removing the foot", i.e. smoothing discontinuities at the beginning + and end of the sampled signal) or tapering function. + + References + ---------- + .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by + digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. + John Wiley and Sons, New York, (1966). + .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The + University of Alberta Press, 1975, pp. 177-178. + .. [3] Wikipedia, "Window function", + https://en.wikipedia.org/wiki/Window_function + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> np.kaiser(12, 14) + array([7.72686684e-06, 3.46009194e-03, 4.65200189e-02, # may vary + 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, + 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, + 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) + + + Plot the window and the frequency response. + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + from numpy.fft import fft, fftshift + window = np.kaiser(51, 14) + plt.plot(window) + plt.title("Kaiser window") + plt.ylabel("Amplitude") + plt.xlabel("Sample") + plt.show() + + plt.figure() + A = fft(window, 2048) / 25.5 + mag = np.abs(fftshift(A)) + freq = np.linspace(-0.5, 0.5, len(A)) + response = 20 * np.log10(mag) + response = np.clip(response, -100, 100) + plt.plot(freq, response) + plt.title("Frequency response of Kaiser window") + plt.ylabel("Magnitude [dB]") + plt.xlabel("Normalized frequency [cycles per sample]") + plt.axis('tight') + plt.show() + + """ + # Ensures at least float64 via 0.0. M should be an integer, but conversion + # to double is safe for a range. (Simplified result_type with 0.0 + # strongly typed. result-type is not/less order sensitive, but that mainly + # matters for integers anyway.) + values = np.array([0.0, M, beta]) + M = values[1] + beta = values[2] + + if M == 1: + return np.ones(1, dtype=values.dtype) + n = arange(0, M) + alpha = (M - 1) / 2.0 + return i0(beta * sqrt(1 - ((n - alpha) / alpha)**2.0)) / i0(beta) + + +def _sinc_dispatcher(x): + return (x,) + + +@array_function_dispatch(_sinc_dispatcher) +def sinc(x): + r""" + Return the normalized sinc function. + + The sinc function is equal to :math:`\sin(\pi x)/(\pi x)` for any argument + :math:`x\ne 0`. ``sinc(0)`` takes the limit value 1, making ``sinc`` not + only everywhere continuous but also infinitely differentiable. + + .. note:: + + Note the normalization factor of ``pi`` used in the definition. + This is the most commonly used definition in signal processing. + Use ``sinc(x / np.pi)`` to obtain the unnormalized sinc function + :math:`\sin(x)/x` that is more common in mathematics. + + Parameters + ---------- + x : ndarray + Array (possibly multi-dimensional) of values for which to calculate + ``sinc(x)``. + + Returns + ------- + out : ndarray + ``sinc(x)``, which has the same shape as the input. + + Notes + ----- + The name sinc is short for "sine cardinal" or "sinus cardinalis". + + The sinc function is used in various signal processing applications, + including in anti-aliasing, in the construction of a Lanczos resampling + filter, and in interpolation. + + For bandlimited interpolation of discrete-time signals, the ideal + interpolation kernel is proportional to the sinc function. + + References + ---------- + .. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web + Resource. https://mathworld.wolfram.com/SincFunction.html + .. [2] Wikipedia, "Sinc function", + https://en.wikipedia.org/wiki/Sinc_function + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-4, 4, 41) + >>> np.sinc(x) + array([-3.89804309e-17, -4.92362781e-02, -8.40918587e-02, # may vary + -8.90384387e-02, -5.84680802e-02, 3.89804309e-17, + 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, + 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, + -1.89206682e-01, -2.16236208e-01, -1.55914881e-01, + 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, + 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, + 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, + 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, + -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, + -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, + 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, + -5.84680802e-02, -8.90384387e-02, -8.40918587e-02, + -4.92362781e-02, -3.89804309e-17]) + + >>> plt.plot(x, np.sinc(x)) + [] + >>> plt.title("Sinc Function") + Text(0.5, 1.0, 'Sinc Function') + >>> plt.ylabel("Amplitude") + Text(0, 0.5, 'Amplitude') + >>> plt.xlabel("X") + Text(0.5, 0, 'X') + >>> plt.show() + + """ + x = np.asanyarray(x) + x = pi * x + # Hope that 1e-20 is sufficient for objects... + eps = np.finfo(x.dtype).eps if x.dtype.kind == "f" else 1e-20 + y = where(x, x, eps) + return sin(y) / y + + +def _ureduce(a, func, keepdims=False, **kwargs): + """ + Internal Function. + Call `func` with `a` as first argument swapping the axes to use extended + axis on functions that don't support it natively. + + Returns result and a.shape with axis dims set to 1. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + func : callable + Reduction function capable of receiving a single axis argument. + It is called with `a` as first argument followed by `kwargs`. + kwargs : keyword arguments + additional keyword arguments to pass to `func`. + + Returns + ------- + result : tuple + Result of func(a, **kwargs) and a.shape with axis dims set to 1 + which can be used to reshape the result to the same shape a ufunc with + keepdims=True would produce. + + """ + a = np.asanyarray(a) + axis = kwargs.get('axis') + out = kwargs.get('out') + + if keepdims is np._NoValue: + keepdims = False + + nd = a.ndim + if axis is not None: + axis = _nx.normalize_axis_tuple(axis, nd) + + if keepdims and out is not None: + index_out = tuple( + 0 if i in axis else slice(None) for i in range(nd)) + kwargs['out'] = out[(Ellipsis, ) + index_out] + + if len(axis) == 1: + kwargs['axis'] = axis[0] + else: + keep = set(range(nd)) - set(axis) + nkeep = len(keep) + # swap axis that should not be reduced to front + for i, s in enumerate(sorted(keep)): + a = a.swapaxes(i, s) + # merge reduced axis + a = a.reshape(a.shape[:nkeep] + (-1,)) + kwargs['axis'] = -1 + elif keepdims and out is not None: + index_out = (0, ) * nd + kwargs['out'] = out[(Ellipsis, ) + index_out] + + r = func(a, **kwargs) + + if out is not None: + return out + + if keepdims: + if axis is None: + index_r = (np.newaxis, ) * nd + else: + index_r = tuple( + np.newaxis if i in axis else slice(None) + for i in range(nd)) + r = r[(Ellipsis, ) + index_r] + + return r + + +def _median_dispatcher( + a, axis=None, out=None, overwrite_input=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_median_dispatcher) +def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): + """ + Compute the median along the specified axis. + + Returns the median of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : {int, sequence of int, None}, optional + Axis or axes along which the medians are computed. The default, + axis=None, will compute the median along a flattened version of + the array. If a sequence of axes, the array is first flattened + along the given axes, then the median is computed along the + resulting flattened axis. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow use of memory of input array `a` for + calculations. The input array will be modified by the call to + `median`. This will save memory when you do not need to preserve + the contents of the input array. Treat the input as undefined, + but it will probably be fully or partially sorted. Default is + False. If `overwrite_input` is ``True`` and `a` is not already an + `ndarray`, an error will be raised. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `arr`. + + Returns + ------- + median : ndarray + A new array holding the result. If the input contains integers + or floats smaller than ``float64``, then the output data-type is + ``np.float64``. Otherwise, the data-type of the output is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean, percentile + + Notes + ----- + Given a vector ``V`` of length ``N``, the median of ``V`` is the + middle value of a sorted copy of ``V``, ``V_sorted`` - i + e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the + two middle values of ``V_sorted`` when ``N`` is even. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[10, 7, 4], [3, 2, 1]]) + >>> a + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> np.median(a) + np.float64(3.5) + >>> np.median(a, axis=0) + array([6.5, 4.5, 2.5]) + >>> np.median(a, axis=1) + array([7., 2.]) + >>> np.median(a, axis=(0, 1)) + np.float64(3.5) + >>> m = np.median(a, axis=0) + >>> out = np.zeros_like(m) + >>> np.median(a, axis=0, out=m) + array([6.5, 4.5, 2.5]) + >>> m + array([6.5, 4.5, 2.5]) + >>> b = a.copy() + >>> np.median(b, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a==b) + >>> b = a.copy() + >>> np.median(b, axis=None, overwrite_input=True) + np.float64(3.5) + >>> assert not np.all(a==b) + + """ + return _ureduce(a, func=_median, keepdims=keepdims, axis=axis, out=out, + overwrite_input=overwrite_input) + + +def _median(a, axis=None, out=None, overwrite_input=False): + # can't be reasonably be implemented in terms of percentile as we have to + # call mean to not break astropy + a = np.asanyarray(a) + + # Set the partition indexes + if axis is None: + sz = a.size + else: + sz = a.shape[axis] + if sz % 2 == 0: + szh = sz // 2 + kth = [szh - 1, szh] + else: + kth = [(sz - 1) // 2] + + # We have to check for NaNs (as of writing 'M' doesn't actually work). + supports_nans = np.issubdtype(a.dtype, np.inexact) or a.dtype.kind in 'Mm' + if supports_nans: + kth.append(-1) + + if overwrite_input: + if axis is None: + part = a.ravel() + part.partition(kth) + else: + a.partition(kth, axis=axis) + part = a + else: + part = partition(a, kth, axis=axis) + + if part.shape == (): + # make 0-D arrays work + return part.item() + if axis is None: + axis = 0 + + indexer = [slice(None)] * part.ndim + index = part.shape[axis] // 2 + if part.shape[axis] % 2 == 1: + # index with slice to allow mean (below) to work + indexer[axis] = slice(index, index + 1) + else: + indexer[axis] = slice(index - 1, index + 1) + indexer = tuple(indexer) + + # Use mean in both odd and even case to coerce data type, + # using out array if needed. + rout = mean(part[indexer], axis=axis, out=out) + if supports_nans and sz > 0: + # If nans are possible, warn and replace by nans like mean would. + rout = np.lib._utils_impl._median_nancheck(part, rout, axis) + + return rout + + +def _percentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, + method=None, keepdims=None, *, weights=None, + interpolation=None): + return (a, q, out, weights) + + +@array_function_dispatch(_percentile_dispatcher) +def percentile(a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=False, + *, + weights=None, + interpolation=None): + """ + Compute the q-th percentile of the data along the specified axis. + + Returns the q-th percentile(s) of the array elements. + + Parameters + ---------- + a : array_like of real numbers + Input array or object that can be converted to an array. + q : array_like of float + Percentage or sequence of percentages for the percentiles to compute. + Values must be between 0 and 100 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the percentiles are computed. The + default is to compute the percentile(s) along a flattened + version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by intermediate + calculations, to save memory. In this case, the contents of the input + `a` after this function completes is undefined. + method : str, optional + This parameter specifies the method to use for estimating the + percentile. There are many different methods, some unique to NumPy. + See the notes for explanation. The options sorted by their R type + as summarized in the H&F paper [1]_ are: + + 1. 'inverted_cdf' + 2. 'averaged_inverted_cdf' + 3. 'closest_observation' + 4. 'interpolated_inverted_cdf' + 5. 'hazen' + 6. 'weibull' + 7. 'linear' (default) + 8. 'median_unbiased' + 9. 'normal_unbiased' + + The first three methods are discontinuous. NumPy further defines the + following discontinuous variations of the default 'linear' (7.) option: + + * 'lower' + * 'higher', + * 'midpoint' + * 'nearest' + + .. versionchanged:: 1.22.0 + This argument was previously called "interpolation" and only + offered the "linear" default and last four options. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the percentile according to its associated weight. + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given axis) or of the same shape as `a`. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. + Only `method="inverted_cdf"` supports weights. + See the notes for more details. + + .. versionadded:: 2.0.0 + + interpolation : str, optional + Deprecated name for the method keyword argument. + + .. deprecated:: 1.22.0 + + Returns + ------- + percentile : scalar or ndarray + If `q` is a single percentile and `axis=None`, then the result + is a scalar. If multiple percentiles are given, first axis of + the result corresponds to the percentiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean + median : equivalent to ``percentile(..., 50)`` + nanpercentile + quantile : equivalent to percentile, except q in the range [0, 1]. + + Notes + ----- + The behavior of `numpy.percentile` with percentage `q` is + that of `numpy.quantile` with argument ``q/100``. + For more information, please see `numpy.quantile`. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[10, 7, 4], [3, 2, 1]]) + >>> a + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> np.percentile(a, 50) + 3.5 + >>> np.percentile(a, 50, axis=0) + array([6.5, 4.5, 2.5]) + >>> np.percentile(a, 50, axis=1) + array([7., 2.]) + >>> np.percentile(a, 50, axis=1, keepdims=True) + array([[7.], + [2.]]) + + >>> m = np.percentile(a, 50, axis=0) + >>> out = np.zeros_like(m) + >>> np.percentile(a, 50, axis=0, out=out) + array([6.5, 4.5, 2.5]) + >>> m + array([6.5, 4.5, 2.5]) + + >>> b = a.copy() + >>> np.percentile(b, 50, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a == b) + + The different methods can be visualized graphically: + + .. plot:: + + import matplotlib.pyplot as plt + + a = np.arange(4) + p = np.linspace(0, 100, 6001) + ax = plt.gca() + lines = [ + ('linear', '-', 'C0'), + ('inverted_cdf', ':', 'C1'), + # Almost the same as `inverted_cdf`: + ('averaged_inverted_cdf', '-.', 'C1'), + ('closest_observation', ':', 'C2'), + ('interpolated_inverted_cdf', '--', 'C1'), + ('hazen', '--', 'C3'), + ('weibull', '-.', 'C4'), + ('median_unbiased', '--', 'C5'), + ('normal_unbiased', '-.', 'C6'), + ] + for method, style, color in lines: + ax.plot( + p, np.percentile(a, p, method=method), + label=method, linestyle=style, color=color) + ax.set( + title='Percentiles for different methods and data: ' + str(a), + xlabel='Percentile', + ylabel='Estimated percentile value', + yticks=a) + ax.legend(bbox_to_anchor=(1.03, 1)) + plt.tight_layout() + plt.show() + + References + ---------- + .. [1] R. J. Hyndman and Y. Fan, + "Sample quantiles in statistical packages," + The American Statistician, 50(4), pp. 361-365, 1996 + + """ + if interpolation is not None: + method = _check_interpolation_as_method( + method, interpolation, "percentile") + + a = np.asanyarray(a) + if a.dtype.kind == "c": + raise TypeError("a must be an array of real numbers") + + # Use dtype of array if possible (e.g., if q is a python int or float) + # by making the divisor have the dtype of the data array. + q = np.true_divide(q, a.dtype.type(100) if a.dtype.kind == "f" else 100, out=...) + if not _quantile_is_valid(q): + raise ValueError("Percentiles must be in the range [0, 100]") + + if weights is not None: + if method != "inverted_cdf": + msg = ("Only method 'inverted_cdf' supports weights. " + f"Got: {method}.") + raise ValueError(msg) + if axis is not None: + axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis") + weights = _weights_are_valid(weights=weights, a=a, axis=axis) + if np.any(weights < 0): + raise ValueError("Weights must be non-negative.") + + return _quantile_unchecked( + a, q, axis, out, overwrite_input, method, keepdims, weights) + + +def _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, + method=None, keepdims=None, *, weights=None, + interpolation=None): + return (a, q, out, weights) + + +@array_function_dispatch(_quantile_dispatcher) +def quantile(a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=False, + *, + weights=None, + interpolation=None): + """ + Compute the q-th quantile of the data along the specified axis. + + Parameters + ---------- + a : array_like of real numbers + Input array or object that can be converted to an array. + q : array_like of float + Probability or sequence of probabilities of the quantiles to compute. + Values must be between 0 and 1 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the quantiles are computed. The default is + to compute the quantile(s) along a flattened version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape and buffer length as the expected output, but the + type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by + intermediate calculations, to save memory. In this case, the + contents of the input `a` after this function completes is + undefined. + method : str, optional + This parameter specifies the method to use for estimating the + quantile. There are many different methods, some unique to NumPy. + The recommended options, numbered as they appear in [1]_, are: + + 1. 'inverted_cdf' + 2. 'averaged_inverted_cdf' + 3. 'closest_observation' + 4. 'interpolated_inverted_cdf' + 5. 'hazen' + 6. 'weibull' + 7. 'linear' (default) + 8. 'median_unbiased' + 9. 'normal_unbiased' + + The first three methods are discontinuous. For backward compatibility + with previous versions of NumPy, the following discontinuous variations + of the default 'linear' (7.) option are available: + + * 'lower' + * 'higher', + * 'midpoint' + * 'nearest' + + See Notes for details. + + .. versionchanged:: 1.22.0 + This argument was previously called "interpolation" and only + offered the "linear" default and last four options. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the quantile according to its associated weight. + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given axis) or of the same shape as `a`. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. + Only `method="inverted_cdf"` supports weights. + See the notes for more details. + + .. versionadded:: 2.0.0 + + interpolation : str, optional + Deprecated name for the method keyword argument. + + .. deprecated:: 1.22.0 + + Returns + ------- + quantile : scalar or ndarray + If `q` is a single probability and `axis=None`, then the result + is a scalar. If multiple probability levels are given, first axis + of the result corresponds to the quantiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean + percentile : equivalent to quantile, but with q in the range [0, 100]. + median : equivalent to ``quantile(..., 0.5)`` + nanquantile + + Notes + ----- + Given a sample `a` from an underlying distribution, `quantile` provides a + nonparametric estimate of the inverse cumulative distribution function. + + By default, this is done by interpolating between adjacent elements in + ``y``, a sorted copy of `a`:: + + (1-g)*y[j] + g*y[j+1] + + where the index ``j`` and coefficient ``g`` are the integral and + fractional components of ``q * (n-1)``, and ``n`` is the number of + elements in the sample. + + This is a special case of Equation 1 of H&F [1]_. More generally, + + - ``j = (q*n + m - 1) // 1``, and + - ``g = (q*n + m - 1) % 1``, + + where ``m`` may be defined according to several different conventions. + The preferred convention may be selected using the ``method`` parameter: + + =============================== =============== =============== + ``method`` number in H&F ``m`` + =============================== =============== =============== + ``interpolated_inverted_cdf`` 4 ``0`` + ``hazen`` 5 ``1/2`` + ``weibull`` 6 ``q`` + ``linear`` (default) 7 ``1 - q`` + ``median_unbiased`` 8 ``q/3 + 1/3`` + ``normal_unbiased`` 9 ``q/4 + 3/8`` + =============================== =============== =============== + + Note that indices ``j`` and ``j + 1`` are clipped to the range ``0`` to + ``n - 1`` when the results of the formula would be outside the allowed + range of non-negative indices. The ``- 1`` in the formulas for ``j`` and + ``g`` accounts for Python's 0-based indexing. + + The table above includes only the estimators from H&F that are continuous + functions of probability `q` (estimators 4-9). NumPy also provides the + three discontinuous estimators from H&F (estimators 1-3), where ``j`` is + defined as above, ``m`` is defined as follows, and ``g`` is a function + of the real-valued ``index = q*n + m - 1`` and ``j``. + + 1. ``inverted_cdf``: ``m = 0`` and ``g = int(index - j > 0)`` + 2. ``averaged_inverted_cdf``: ``m = 0`` and + ``g = (1 + int(index - j > 0)) / 2`` + 3. ``closest_observation``: ``m = -1/2`` and + ``g = 1 - int((index == j) & (j%2 == 1))`` + + For backward compatibility with previous versions of NumPy, `quantile` + provides four additional discontinuous estimators. Like + ``method='linear'``, all have ``m = 1 - q`` so that ``j = q*(n-1) // 1``, + but ``g`` is defined as follows. + + - ``lower``: ``g = 0`` + - ``midpoint``: ``g = 0.5`` + - ``higher``: ``g = 1`` + - ``nearest``: ``g = (q*(n-1) % 1) > 0.5`` + + **Weighted quantiles:** + More formally, the quantile at probability level :math:`q` of a cumulative + distribution function :math:`F(y)=P(Y \\leq y)` with probability measure + :math:`P` is defined as any number :math:`x` that fulfills the + *coverage conditions* + + .. math:: P(Y < x) \\leq q \\quad\\text{and}\\quad P(Y \\leq x) \\geq q + + with random variable :math:`Y\\sim P`. + Sample quantiles, the result of `quantile`, provide nonparametric + estimation of the underlying population counterparts, represented by the + unknown :math:`F`, given a data vector `a` of length ``n``. + + Some of the estimators above arise when one considers :math:`F` as the + empirical distribution function of the data, i.e. + :math:`F(y) = \\frac{1}{n} \\sum_i 1_{a_i \\leq y}`. + Then, different methods correspond to different choices of :math:`x` that + fulfill the above coverage conditions. Methods that follow this approach + are ``inverted_cdf`` and ``averaged_inverted_cdf``. + + For weighted quantiles, the coverage conditions still hold. The + empirical cumulative distribution is simply replaced by its weighted + version, i.e. + :math:`P(Y \\leq t) = \\frac{1}{\\sum_i w_i} \\sum_i w_i 1_{x_i \\leq t}`. + Only ``method="inverted_cdf"`` supports weights. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[10, 7, 4], [3, 2, 1]]) + >>> a + array([[10, 7, 4], + [ 3, 2, 1]]) + >>> np.quantile(a, 0.5) + 3.5 + >>> np.quantile(a, 0.5, axis=0) + array([6.5, 4.5, 2.5]) + >>> np.quantile(a, 0.5, axis=1) + array([7., 2.]) + >>> np.quantile(a, 0.5, axis=1, keepdims=True) + array([[7.], + [2.]]) + >>> m = np.quantile(a, 0.5, axis=0) + >>> out = np.zeros_like(m) + >>> np.quantile(a, 0.5, axis=0, out=out) + array([6.5, 4.5, 2.5]) + >>> m + array([6.5, 4.5, 2.5]) + >>> b = a.copy() + >>> np.quantile(b, 0.5, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a == b) + + See also `numpy.percentile` for a visualization of most methods. + + References + ---------- + .. [1] R. J. Hyndman and Y. Fan, + "Sample quantiles in statistical packages," + The American Statistician, 50(4), pp. 361-365, 1996 + + """ + if interpolation is not None: + method = _check_interpolation_as_method( + method, interpolation, "quantile") + + a = np.asanyarray(a) + if a.dtype.kind == "c": + raise TypeError("a must be an array of real numbers") + + # Use dtype of array if possible (e.g., if q is a python int or float). + if isinstance(q, (int, float)) and a.dtype.kind == "f": + q = np.asanyarray(q, dtype=a.dtype) + else: + q = np.asanyarray(q) + + if not _quantile_is_valid(q): + raise ValueError("Quantiles must be in the range [0, 1]") + + if weights is not None: + if method != "inverted_cdf": + msg = ("Only method 'inverted_cdf' supports weights. " + f"Got: {method}.") + raise ValueError(msg) + if axis is not None: + axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis") + weights = _weights_are_valid(weights=weights, a=a, axis=axis) + if np.any(weights < 0): + raise ValueError("Weights must be non-negative.") + + return _quantile_unchecked( + a, q, axis, out, overwrite_input, method, keepdims, weights) + + +def _quantile_unchecked(a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=False, + weights=None): + """Assumes that q is in [0, 1], and is an ndarray""" + return _ureduce(a, + func=_quantile_ureduce_func, + q=q, + weights=weights, + keepdims=keepdims, + axis=axis, + out=out, + overwrite_input=overwrite_input, + method=method) + + +def _quantile_is_valid(q): + # avoid expensive reductions, relevant for arrays with < O(1000) elements + if q.ndim == 1 and q.size < 10: + for i in range(q.size): + if not (0.0 <= q[i] <= 1.0): + return False + elif not (q.min() >= 0 and q.max() <= 1): + return False + return True + + +def _check_interpolation_as_method(method, interpolation, fname): + # Deprecated NumPy 1.22, 2021-11-08 + warnings.warn( + f"the `interpolation=` argument to {fname} was renamed to " + "`method=`, which has additional options.\n" + "Users of the modes 'nearest', 'lower', 'higher', or " + "'midpoint' are encouraged to review the method they used. " + "(Deprecated NumPy 1.22)", + DeprecationWarning, stacklevel=4) + if method != "linear": + # sanity check, we assume this basically never happens + raise TypeError( + "You shall not pass both `method` and `interpolation`!\n" + "(`interpolation` is Deprecated in favor of `method`)") + return interpolation + + +def _compute_virtual_index(n, quantiles, alpha: float, beta: float): + """ + Compute the floating point indexes of an array for the linear + interpolation of quantiles. + n : array_like + The sample sizes. + quantiles : array_like + The quantiles values. + alpha : float + A constant used to correct the index computed. + beta : float + A constant used to correct the index computed. + + alpha and beta values depend on the chosen method + (see quantile documentation) + + Reference: + Hyndman&Fan paper "Sample Quantiles in Statistical Packages", + DOI: 10.1080/00031305.1996.10473566 + """ + return n * quantiles + ( + alpha + quantiles * (1 - alpha - beta) + ) - 1 + + +def _get_gamma(virtual_indexes, previous_indexes, method): + """ + Compute gamma (a.k.a 'm' or 'weight') for the linear interpolation + of quantiles. + + virtual_indexes : array_like + The indexes where the percentile is supposed to be found in the sorted + sample. + previous_indexes : array_like + The floor values of virtual_indexes. + interpolation : dict + The interpolation method chosen, which may have a specific rule + modifying gamma. + + gamma is usually the fractional part of virtual_indexes but can be modified + by the interpolation method. + """ + gamma = np.asanyarray(virtual_indexes - previous_indexes) + gamma = method["fix_gamma"](gamma, virtual_indexes) + # Ensure both that we have an array, and that we keep the dtype + # (which may have been matched to the input array). + return np.asanyarray(gamma, dtype=virtual_indexes.dtype) + + +def _lerp(a, b, t, out=None): + """ + Compute the linear interpolation weighted by gamma on each point of + two same shape array. + + a : array_like + Left bound. + b : array_like + Right bound. + t : array_like + The interpolation weight. + out : array_like + Output array. + """ + diff_b_a = subtract(b, a) + # asanyarray is a stop-gap until gh-13105 + lerp_interpolation = asanyarray(add(a, diff_b_a * t, out=out)) + subtract(b, diff_b_a * (1 - t), out=lerp_interpolation, where=t >= 0.5, + casting='unsafe', dtype=type(lerp_interpolation.dtype)) + if lerp_interpolation.ndim == 0 and out is None: + lerp_interpolation = lerp_interpolation[()] # unpack 0d arrays + return lerp_interpolation + + +def _get_gamma_mask(shape, default_value, conditioned_value, where): + out = np.full(shape, default_value) + np.copyto(out, conditioned_value, where=where, casting="unsafe") + return out + + +def _discrete_interpolation_to_boundaries(index, gamma_condition_fun): + previous = np.floor(index) + next = previous + 1 + gamma = index - previous + res = _get_gamma_mask(shape=index.shape, + default_value=next, + conditioned_value=previous, + where=gamma_condition_fun(gamma, index) + ).astype(np.intp) + # Some methods can lead to out-of-bound integers, clip them: + res[res < 0] = 0 + return res + + +def _closest_observation(n, quantiles): + # "choose the nearest even order statistic at g=0" (H&F (1996) pp. 362). + # Order is 1-based so for zero-based indexing round to nearest odd index. + gamma_fun = lambda gamma, index: (gamma == 0) & (np.floor(index) % 2 == 1) + return _discrete_interpolation_to_boundaries((n * quantiles) - 1 - 0.5, + gamma_fun) + + +def _inverted_cdf(n, quantiles): + gamma_fun = lambda gamma, _: (gamma == 0) + return _discrete_interpolation_to_boundaries((n * quantiles) - 1, + gamma_fun) + + +def _quantile_ureduce_func( + a: np.array, + q: np.array, + weights: np.array, + axis: int | None = None, + out=None, + overwrite_input: bool = False, + method="linear", +) -> np.array: + if q.ndim > 2: + # The code below works fine for nd, but it might not have useful + # semantics. For now, keep the supported dimensions the same as it was + # before. + raise ValueError("q must be a scalar or 1d") + if overwrite_input: + if axis is None: + axis = 0 + arr = a.ravel() + wgt = None if weights is None else weights.ravel() + else: + arr = a + wgt = weights + elif axis is None: + axis = 0 + arr = a.flatten() + wgt = None if weights is None else weights.flatten() + else: + arr = a.copy() + wgt = weights + result = _quantile(arr, + quantiles=q, + axis=axis, + method=method, + out=out, + weights=wgt) + return result + + +def _get_indexes(arr, virtual_indexes, valid_values_count): + """ + Get the valid indexes of arr neighbouring virtual_indexes. + Note + This is a companion function to linear interpolation of + Quantiles + + Returns + ------- + (previous_indexes, next_indexes): Tuple + A Tuple of virtual_indexes neighbouring indexes + """ + previous_indexes = np.asanyarray(np.floor(virtual_indexes)) + next_indexes = np.asanyarray(previous_indexes + 1) + indexes_above_bounds = virtual_indexes >= valid_values_count - 1 + # When indexes is above max index, take the max value of the array + if indexes_above_bounds.any(): + previous_indexes[indexes_above_bounds] = -1 + next_indexes[indexes_above_bounds] = -1 + # When indexes is below min index, take the min value of the array + indexes_below_bounds = virtual_indexes < 0 + if indexes_below_bounds.any(): + previous_indexes[indexes_below_bounds] = 0 + next_indexes[indexes_below_bounds] = 0 + if np.issubdtype(arr.dtype, np.inexact): + # After the sort, slices having NaNs will have for last element a NaN + virtual_indexes_nans = np.isnan(virtual_indexes) + if virtual_indexes_nans.any(): + previous_indexes[virtual_indexes_nans] = -1 + next_indexes[virtual_indexes_nans] = -1 + previous_indexes = previous_indexes.astype(np.intp) + next_indexes = next_indexes.astype(np.intp) + return previous_indexes, next_indexes + + +def _quantile( + arr: np.array, + quantiles: np.array, + axis: int = -1, + method="linear", + out=None, + weights=None, +): + """ + Private function that doesn't support extended axis or keepdims. + These methods are extended to this function using _ureduce + See nanpercentile for parameter usage + It computes the quantiles of the array for the given axis. + A linear interpolation is performed based on the `interpolation`. + + By default, the method is "linear" where alpha == beta == 1 which + performs the 7th method of Hyndman&Fan. + With "median_unbiased" we get alpha == beta == 1/3 + thus the 8th method of Hyndman&Fan. + """ + # --- Setup + arr = np.asanyarray(arr) + values_count = arr.shape[axis] + # The dimensions of `q` are prepended to the output shape, so we need the + # axis being sampled from `arr` to be last. + if axis != 0: # But moveaxis is slow, so only call it if necessary. + arr = np.moveaxis(arr, axis, destination=0) + supports_nans = ( + np.issubdtype(arr.dtype, np.inexact) or arr.dtype.kind in 'Mm' + ) + + if weights is None: + # --- Computation of indexes + # Index where to find the value in the sorted array. + # Virtual because it is a floating point value, not an valid index. + # The nearest neighbours are used for interpolation + try: + method_props = _QuantileMethods[method] + except KeyError: + raise ValueError( + f"{method!r} is not a valid method. Use one of: " + f"{_QuantileMethods.keys()}") from None + virtual_indexes = method_props["get_virtual_index"](values_count, + quantiles) + virtual_indexes = np.asanyarray(virtual_indexes) + + if method_props["fix_gamma"] is None: + supports_integers = True + else: + int_virtual_indices = np.issubdtype(virtual_indexes.dtype, + np.integer) + supports_integers = method == 'linear' and int_virtual_indices + + if supports_integers: + # No interpolation needed, take the points along axis + if supports_nans: + # may contain nan, which would sort to the end + arr.partition( + concatenate((virtual_indexes.ravel(), [-1])), axis=0, + ) + slices_having_nans = np.isnan(arr[-1, ...]) + else: + # cannot contain nan + arr.partition(virtual_indexes.ravel(), axis=0) + slices_having_nans = np.array(False, dtype=bool) + result = take(arr, virtual_indexes, axis=0, out=out) + else: + previous_indexes, next_indexes = _get_indexes(arr, + virtual_indexes, + values_count) + # --- Sorting + arr.partition( + np.unique(np.concatenate(([0, -1], + previous_indexes.ravel(), + next_indexes.ravel(), + ))), + axis=0) + if supports_nans: + slices_having_nans = np.isnan(arr[-1, ...]) + else: + slices_having_nans = None + # --- Get values from indexes + previous = arr[previous_indexes] + next = arr[next_indexes] + # --- Linear interpolation + gamma = _get_gamma(virtual_indexes, previous_indexes, method_props) + result_shape = virtual_indexes.shape + (1,) * (arr.ndim - 1) + gamma = gamma.reshape(result_shape) + result = _lerp(previous, + next, + gamma, + out=out) + else: + # Weighted case + # This implements method="inverted_cdf", the only supported weighted + # method, which needs to sort anyway. + weights = np.asanyarray(weights) + if axis != 0: + weights = np.moveaxis(weights, axis, destination=0) + index_array = np.argsort(arr, axis=0, kind="stable") + + # arr = arr[index_array, ...] # but this adds trailing dimensions of + # 1. + arr = np.take_along_axis(arr, index_array, axis=0) + if weights.shape == arr.shape: + weights = np.take_along_axis(weights, index_array, axis=0) + else: + # weights is 1d + weights = weights.reshape(-1)[index_array, ...] + + if supports_nans: + # may contain nan, which would sort to the end + slices_having_nans = np.isnan(arr[-1, ...]) + else: + # cannot contain nan + slices_having_nans = np.array(False, dtype=bool) + + # We use the weights to calculate the empirical cumulative + # distribution function cdf + cdf = weights.cumsum(axis=0, dtype=np.float64) + cdf /= cdf[-1, ...] # normalization to 1 + # Search index i such that + # sum(weights[j], j=0..i-1) < quantile <= sum(weights[j], j=0..i) + # is then equivalent to + # cdf[i-1] < quantile <= cdf[i] + # Unfortunately, searchsorted only accepts 1-d arrays as first + # argument, so we will need to iterate over dimensions. + + # Without the following cast, searchsorted can return surprising + # results, e.g. + # np.searchsorted(np.array([0.2, 0.4, 0.6, 0.8, 1.]), + # np.array(0.4, dtype=np.float32), side="left") + # returns 2 instead of 1 because 0.4 is not binary representable. + if quantiles.dtype.kind == "f": + cdf = cdf.astype(quantiles.dtype) + # Weights must be non-negative, so we might have zero weights at the + # beginning leading to some leading zeros in cdf. The call to + # np.searchsorted for quantiles=0 will then pick the first element, + # but should pick the first one larger than zero. We + # therefore simply set 0 values in cdf to -1. + if np.any(cdf[0, ...] == 0): + cdf[cdf == 0] = -1 + + def find_cdf_1d(arr, cdf): + indices = np.searchsorted(cdf, quantiles, side="left") + # We might have reached the maximum with i = len(arr), e.g. for + # quantiles = 1, and need to cut it to len(arr) - 1. + indices = minimum(indices, values_count - 1) + result = take(arr, indices, axis=0) + return result + + r_shape = arr.shape[1:] + if quantiles.ndim > 0: + r_shape = quantiles.shape + r_shape + if out is None: + result = np.empty_like(arr, shape=r_shape) + else: + if out.shape != r_shape: + msg = (f"Wrong shape of argument 'out', shape={r_shape} is " + f"required; got shape={out.shape}.") + raise ValueError(msg) + result = out + + # See apply_along_axis, which we do for axis=0. Note that Ni = (,) + # always, so we remove it here. + Nk = arr.shape[1:] + for kk in np.ndindex(Nk): + result[(...,) + kk] = find_cdf_1d( + arr[np.s_[:, ] + kk], cdf[np.s_[:, ] + kk] + ) + + # Make result the same as in unweighted inverted_cdf. + if result.shape == () and result.dtype == np.dtype("O"): + result = result.item() + + if np.any(slices_having_nans): + if result.ndim == 0 and out is None: + # can't write to a scalar, but indexing will be correct + result = arr[-1] + else: + np.copyto(result, arr[-1, ...], where=slices_having_nans) + return result + + +def _trapezoid_dispatcher(y, x=None, dx=None, axis=None): + return (y, x) + + +@array_function_dispatch(_trapezoid_dispatcher) +def trapezoid(y, x=None, dx=1.0, axis=-1): + r""" + Integrate along the given axis using the composite trapezoidal rule. + + If `x` is provided, the integration happens in sequence along its + elements - they are not sorted. + + Integrate `y` (`x`) along each 1d slice on the given axis, compute + :math:`\int y(x) dx`. + When `x` is specified, this integrates along the parametric curve, + computing :math:`\int_t y(t) dt = + \int_t y(t) \left.\frac{dx}{dt}\right|_{x=x(t)} dt`. + + .. versionadded:: 2.0.0 + + Parameters + ---------- + y : array_like + Input array to integrate. + x : array_like, optional + The sample points corresponding to the `y` values. If `x` is None, + the sample points are assumed to be evenly spaced `dx` apart. The + default is None. + dx : scalar, optional + The spacing between sample points when `x` is None. The default is 1. + axis : int, optional + The axis along which to integrate. + + Returns + ------- + trapezoid : float or ndarray + Definite integral of `y` = n-dimensional array as approximated along + a single axis by the trapezoidal rule. If `y` is a 1-dimensional array, + then the result is a float. If `n` is greater than 1, then the result + is an `n`-1 dimensional array. + + See Also + -------- + sum, cumsum + + Notes + ----- + Image [2]_ illustrates trapezoidal rule -- y-axis locations of points + will be taken from `y` array, by default x-axis distances between + points will be 1.0, alternatively they can be provided with `x` array + or with `dx` scalar. Return value will be equal to combined area under + the red lines. + + + References + ---------- + .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule + + .. [2] Illustration image: + https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png + + Examples + -------- + >>> import numpy as np + + Use the trapezoidal rule on evenly spaced points: + + >>> np.trapezoid([1, 2, 3]) + 4.0 + + The spacing between sample points can be selected by either the + ``x`` or ``dx`` arguments: + + >>> np.trapezoid([1, 2, 3], x=[4, 6, 8]) + 8.0 + >>> np.trapezoid([1, 2, 3], dx=2) + 8.0 + + Using a decreasing ``x`` corresponds to integrating in reverse: + + >>> np.trapezoid([1, 2, 3], x=[8, 6, 4]) + -8.0 + + More generally ``x`` is used to integrate along a parametric curve. We can + estimate the integral :math:`\int_0^1 x^2 = 1/3` using: + + >>> x = np.linspace(0, 1, num=50) + >>> y = x**2 + >>> np.trapezoid(y, x) + 0.33340274885464394 + + Or estimate the area of a circle, noting we repeat the sample which closes + the curve: + + >>> theta = np.linspace(0, 2 * np.pi, num=1000, endpoint=True) + >>> np.trapezoid(np.cos(theta), x=np.sin(theta)) + 3.141571941375841 + + ``np.trapezoid`` can be applied along a specified axis to do multiple + computations in one call: + + >>> a = np.arange(6).reshape(2, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5]]) + >>> np.trapezoid(a, axis=0) + array([1.5, 2.5, 3.5]) + >>> np.trapezoid(a, axis=1) + array([2., 8.]) + """ + + y = asanyarray(y) + if x is None: + d = dx + else: + x = asanyarray(x) + if x.ndim == 1: + d = diff(x) + # reshape to correct shape + shape = [1] * y.ndim + shape[axis] = d.shape[0] + d = d.reshape(shape) + else: + d = diff(x, axis=axis) + nd = y.ndim + slice1 = [slice(None)] * nd + slice2 = [slice(None)] * nd + slice1[axis] = slice(1, None) + slice2[axis] = slice(None, -1) + try: + ret = (d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0).sum(axis) + except ValueError: + # Operations didn't work, cast to ndarray + d = np.asarray(d) + y = np.asarray(y) + ret = add.reduce(d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0, axis) + return ret + + +@set_module('numpy') +def trapz(y, x=None, dx=1.0, axis=-1): + """ + `trapz` is deprecated in NumPy 2.0. + + Please use `trapezoid` instead, or one of the numerical integration + functions in `scipy.integrate`. + """ + # Deprecated in NumPy 2.0, 2023-08-18 + warnings.warn( + "`trapz` is deprecated. Use `trapezoid` instead, or one of the " + "numerical integration functions in `scipy.integrate`.", + DeprecationWarning, + stacklevel=2 + ) + return trapezoid(y, x=x, dx=dx, axis=axis) + + +def _meshgrid_dispatcher(*xi, copy=None, sparse=None, indexing=None): + return xi + + +# Based on scitools meshgrid +@array_function_dispatch(_meshgrid_dispatcher) +def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): + """ + Return a tuple of coordinate matrices from coordinate vectors. + + Make N-D coordinate arrays for vectorized evaluations of + N-D scalar/vector fields over N-D grids, given + one-dimensional coordinate arrays x1, x2,..., xn. + + Parameters + ---------- + x1, x2,..., xn : array_like + 1-D arrays representing the coordinates of a grid. + indexing : {'xy', 'ij'}, optional + Cartesian ('xy', default) or matrix ('ij') indexing of output. + See Notes for more details. + sparse : bool, optional + If True the shape of the returned coordinate array for dimension *i* + is reduced from ``(N1, ..., Ni, ... Nn)`` to + ``(1, ..., 1, Ni, 1, ..., 1)``. These sparse coordinate grids are + intended to be used with :ref:`basics.broadcasting`. When all + coordinates are used in an expression, broadcasting still leads to a + fully-dimensonal result array. + + Default is False. + + copy : bool, optional + If False, a view into the original arrays are returned in order to + conserve memory. Default is True. Please note that + ``sparse=False, copy=False`` will likely return non-contiguous + arrays. Furthermore, more than one element of a broadcast array + may refer to a single memory location. If you need to write to the + arrays, make copies first. + + Returns + ------- + X1, X2,..., XN : tuple of ndarrays + For vectors `x1`, `x2`,..., `xn` with lengths ``Ni=len(xi)``, + returns ``(N1, N2, N3,..., Nn)`` shaped arrays if indexing='ij' + or ``(N2, N1, N3,..., Nn)`` shaped arrays if indexing='xy' + with the elements of `xi` repeated to fill the matrix along + the first dimension for `x1`, the second for `x2` and so on. + + Notes + ----- + This function supports both indexing conventions through the indexing + keyword argument. Giving the string 'ij' returns a meshgrid with + matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing. + In the 2-D case with inputs of length M and N, the outputs are of shape + (N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case + with inputs of length M, N and P, outputs are of shape (N, M, P) for + 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is + illustrated by the following code snippet:: + + xv, yv = np.meshgrid(x, y, indexing='ij') + for i in range(nx): + for j in range(ny): + # treat xv[i,j], yv[i,j] + + xv, yv = np.meshgrid(x, y, indexing='xy') + for i in range(nx): + for j in range(ny): + # treat xv[j,i], yv[j,i] + + In the 1-D and 0-D case, the indexing and sparse keywords have no effect. + + See Also + -------- + mgrid : Construct a multi-dimensional "meshgrid" using indexing notation. + ogrid : Construct an open multi-dimensional "meshgrid" using indexing + notation. + :ref:`how-to-index` + + Examples + -------- + >>> import numpy as np + >>> nx, ny = (3, 2) + >>> x = np.linspace(0, 1, nx) + >>> y = np.linspace(0, 1, ny) + >>> xv, yv = np.meshgrid(x, y) + >>> xv + array([[0. , 0.5, 1. ], + [0. , 0.5, 1. ]]) + >>> yv + array([[0., 0., 0.], + [1., 1., 1.]]) + + The result of `meshgrid` is a coordinate grid: + + >>> import matplotlib.pyplot as plt + >>> plt.plot(xv, yv, marker='o', color='k', linestyle='none') + >>> plt.show() + + You can create sparse output arrays to save memory and computation time. + + >>> xv, yv = np.meshgrid(x, y, sparse=True) + >>> xv + array([[0. , 0.5, 1. ]]) + >>> yv + array([[0.], + [1.]]) + + `meshgrid` is very useful to evaluate functions on a grid. If the + function depends on all coordinates, both dense and sparse outputs can be + used. + + >>> x = np.linspace(-5, 5, 101) + >>> y = np.linspace(-5, 5, 101) + >>> # full coordinate arrays + >>> xx, yy = np.meshgrid(x, y) + >>> zz = np.sqrt(xx**2 + yy**2) + >>> xx.shape, yy.shape, zz.shape + ((101, 101), (101, 101), (101, 101)) + >>> # sparse coordinate arrays + >>> xs, ys = np.meshgrid(x, y, sparse=True) + >>> zs = np.sqrt(xs**2 + ys**2) + >>> xs.shape, ys.shape, zs.shape + ((1, 101), (101, 1), (101, 101)) + >>> np.array_equal(zz, zs) + True + + >>> h = plt.contourf(x, y, zs) + >>> plt.axis('scaled') + >>> plt.colorbar() + >>> plt.show() + """ + ndim = len(xi) + + if indexing not in ['xy', 'ij']: + raise ValueError( + "Valid values for `indexing` are 'xy' and 'ij'.") + + s0 = (1,) * ndim + output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1:]) + for i, x in enumerate(xi)] + + if indexing == 'xy' and ndim > 1: + # switch first and second axis + output[0].shape = (1, -1) + s0[2:] + output[1].shape = (-1, 1) + s0[2:] + + if not sparse: + # Return the full N-D matrix (not only the 1-D vector) + output = np.broadcast_arrays(*output, subok=True) + + if copy: + output = tuple(x.copy() for x in output) + + return output + + +def _delete_dispatcher(arr, obj, axis=None): + return (arr, obj) + + +@array_function_dispatch(_delete_dispatcher) +def delete(arr, obj, axis=None): + """ + Return a new array with sub-arrays along an axis deleted. For a one + dimensional array, this returns those entries not returned by + `arr[obj]`. + + Parameters + ---------- + arr : array_like + Input array. + obj : slice, int, array-like of ints or bools + Indicate indices of sub-arrays to remove along the specified axis. + + .. versionchanged:: 1.19.0 + Boolean indices are now treated as a mask of elements to remove, + rather than being cast to the integers 0 and 1. + + axis : int, optional + The axis along which to delete the subarray defined by `obj`. + If `axis` is None, `obj` is applied to the flattened array. + + Returns + ------- + out : ndarray + A copy of `arr` with the elements specified by `obj` removed. Note + that `delete` does not occur in-place. If `axis` is None, `out` is + a flattened array. + + See Also + -------- + insert : Insert elements into an array. + append : Append elements at the end of an array. + + Notes + ----- + Often it is preferable to use a boolean mask. For example: + + >>> arr = np.arange(12) + 1 + >>> mask = np.ones(len(arr), dtype=bool) + >>> mask[[0,2,4]] = False + >>> result = arr[mask,...] + + Is equivalent to ``np.delete(arr, [0,2,4], axis=0)``, but allows further + use of `mask`. + + Examples + -------- + >>> import numpy as np + >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) + >>> arr + array([[ 1, 2, 3, 4], + [ 5, 6, 7, 8], + [ 9, 10, 11, 12]]) + >>> np.delete(arr, 1, 0) + array([[ 1, 2, 3, 4], + [ 9, 10, 11, 12]]) + + >>> np.delete(arr, np.s_[::2], 1) + array([[ 2, 4], + [ 6, 8], + [10, 12]]) + >>> np.delete(arr, [1,3,5], None) + array([ 1, 3, 5, 7, 8, 9, 10, 11, 12]) + + """ + conv = _array_converter(arr) + arr, = conv.as_arrays(subok=False) + + ndim = arr.ndim + arrorder = 'F' if arr.flags.fnc else 'C' + if axis is None: + if ndim != 1: + arr = arr.ravel() + # needed for np.matrix, which is still not 1d after being ravelled + ndim = arr.ndim + axis = ndim - 1 + else: + axis = normalize_axis_index(axis, ndim) + + slobj = [slice(None)] * ndim + N = arr.shape[axis] + newshape = list(arr.shape) + + if isinstance(obj, slice): + start, stop, step = obj.indices(N) + xr = range(start, stop, step) + numtodel = len(xr) + + if numtodel <= 0: + return conv.wrap(arr.copy(order=arrorder), to_scalar=False) + + # Invert if step is negative: + if step < 0: + step = -step + start = xr[-1] + stop = xr[0] + 1 + + newshape[axis] -= numtodel + new = empty(newshape, arr.dtype, arrorder) + # copy initial chunk + if start == 0: + pass + else: + slobj[axis] = slice(None, start) + new[tuple(slobj)] = arr[tuple(slobj)] + # copy end chunk + if stop == N: + pass + else: + slobj[axis] = slice(stop - numtodel, None) + slobj2 = [slice(None)] * ndim + slobj2[axis] = slice(stop, None) + new[tuple(slobj)] = arr[tuple(slobj2)] + # copy middle pieces + if step == 1: + pass + else: # use array indexing. + keep = ones(stop - start, dtype=bool) + keep[:stop - start:step] = False + slobj[axis] = slice(start, stop - numtodel) + slobj2 = [slice(None)] * ndim + slobj2[axis] = slice(start, stop) + arr = arr[tuple(slobj2)] + slobj2[axis] = keep + new[tuple(slobj)] = arr[tuple(slobj2)] + + return conv.wrap(new, to_scalar=False) + + if isinstance(obj, (int, integer)) and not isinstance(obj, bool): + single_value = True + else: + single_value = False + _obj = obj + obj = np.asarray(obj) + # `size == 0` to allow empty lists similar to indexing, but (as there) + # is really too generic: + if obj.size == 0 and not isinstance(_obj, np.ndarray): + obj = obj.astype(intp) + elif obj.size == 1 and obj.dtype.kind in "ui": + # For a size 1 integer array we can use the single-value path + # (most dtypes, except boolean, should just fail later). + obj = obj.item() + single_value = True + + if single_value: + # optimization for a single value + if (obj < -N or obj >= N): + raise IndexError( + f"index {obj} is out of bounds for axis {axis} with " + f"size {N}") + if (obj < 0): + obj += N + newshape[axis] -= 1 + new = empty(newshape, arr.dtype, arrorder) + slobj[axis] = slice(None, obj) + new[tuple(slobj)] = arr[tuple(slobj)] + slobj[axis] = slice(obj, None) + slobj2 = [slice(None)] * ndim + slobj2[axis] = slice(obj + 1, None) + new[tuple(slobj)] = arr[tuple(slobj2)] + else: + if obj.dtype == bool: + if obj.shape != (N,): + raise ValueError('boolean array argument obj to delete ' + 'must be one dimensional and match the axis ' + f'length of {N}') + + # optimization, the other branch is slower + keep = ~obj + else: + keep = ones(N, dtype=bool) + keep[obj,] = False + + slobj[axis] = keep + new = arr[tuple(slobj)] + + return conv.wrap(new, to_scalar=False) + + +def _insert_dispatcher(arr, obj, values, axis=None): + return (arr, obj, values) + + +@array_function_dispatch(_insert_dispatcher) +def insert(arr, obj, values, axis=None): + """ + Insert values along the given axis before the given indices. + + Parameters + ---------- + arr : array_like + Input array. + obj : slice, int, array-like of ints or bools + Object that defines the index or indices before which `values` is + inserted. + + .. versionchanged:: 2.1.2 + Boolean indices are now treated as a mask of elements to insert, + rather than being cast to the integers 0 and 1. + + Support for multiple insertions when `obj` is a single scalar or a + sequence with one element (similar to calling insert multiple + times). + values : array_like + Values to insert into `arr`. If the type of `values` is different + from that of `arr`, `values` is converted to the type of `arr`. + `values` should be shaped so that ``arr[...,obj,...] = values`` + is legal. + axis : int, optional + Axis along which to insert `values`. If `axis` is None then `arr` + is flattened first. + + Returns + ------- + out : ndarray + A copy of `arr` with `values` inserted. Note that `insert` + does not occur in-place: a new array is returned. If + `axis` is None, `out` is a flattened array. + + See Also + -------- + append : Append elements at the end of an array. + concatenate : Join a sequence of arrays along an existing axis. + delete : Delete elements from an array. + + Notes + ----- + Note that for higher dimensional inserts ``obj=0`` behaves very different + from ``obj=[0]`` just like ``arr[:,0,:] = values`` is different from + ``arr[:,[0],:] = values``. This is because of the difference between basic + and advanced :ref:`indexing `. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(6).reshape(3, 2) + >>> a + array([[0, 1], + [2, 3], + [4, 5]]) + >>> np.insert(a, 1, 6) + array([0, 6, 1, 2, 3, 4, 5]) + >>> np.insert(a, 1, 6, axis=1) + array([[0, 6, 1], + [2, 6, 3], + [4, 6, 5]]) + + Difference between sequence and scalars, + showing how ``obj=[1]`` behaves different from ``obj=1``: + + >>> np.insert(a, [1], [[7],[8],[9]], axis=1) + array([[0, 7, 1], + [2, 8, 3], + [4, 9, 5]]) + >>> np.insert(a, 1, [[7],[8],[9]], axis=1) + array([[0, 7, 8, 9, 1], + [2, 7, 8, 9, 3], + [4, 7, 8, 9, 5]]) + >>> np.array_equal(np.insert(a, 1, [7, 8, 9], axis=1), + ... np.insert(a, [1], [[7],[8],[9]], axis=1)) + True + + >>> b = a.flatten() + >>> b + array([0, 1, 2, 3, 4, 5]) + >>> np.insert(b, [2, 2], [6, 7]) + array([0, 1, 6, 7, 2, 3, 4, 5]) + + >>> np.insert(b, slice(2, 4), [7, 8]) + array([0, 1, 7, 2, 8, 3, 4, 5]) + + >>> np.insert(b, [2, 2], [7.13, False]) # type casting + array([0, 1, 7, 0, 2, 3, 4, 5]) + + >>> x = np.arange(8).reshape(2, 4) + >>> idx = (1, 3) + >>> np.insert(x, idx, 999, axis=1) + array([[ 0, 999, 1, 2, 999, 3], + [ 4, 999, 5, 6, 999, 7]]) + + """ + conv = _array_converter(arr) + arr, = conv.as_arrays(subok=False) + + ndim = arr.ndim + arrorder = 'F' if arr.flags.fnc else 'C' + if axis is None: + if ndim != 1: + arr = arr.ravel() + # needed for np.matrix, which is still not 1d after being ravelled + ndim = arr.ndim + axis = ndim - 1 + else: + axis = normalize_axis_index(axis, ndim) + slobj = [slice(None)] * ndim + N = arr.shape[axis] + newshape = list(arr.shape) + + if isinstance(obj, slice): + # turn it into a range object + indices = arange(*obj.indices(N), dtype=intp) + else: + # need to copy obj, because indices will be changed in-place + indices = np.array(obj) + if indices.dtype == bool: + if obj.ndim != 1: + raise ValueError('boolean array argument obj to insert ' + 'must be one dimensional') + indices = np.flatnonzero(obj) + elif indices.ndim > 1: + raise ValueError( + "index array argument obj to insert must be one dimensional " + "or scalar") + if indices.size == 1: + index = indices.item() + if index < -N or index > N: + raise IndexError(f"index {obj} is out of bounds for axis {axis} " + f"with size {N}") + if (index < 0): + index += N + + # There are some object array corner cases here, but we cannot avoid + # that: + values = array(values, copy=None, ndmin=arr.ndim, dtype=arr.dtype) + if indices.ndim == 0: + # broadcasting is very different here, since a[:,0,:] = ... behaves + # very different from a[:,[0],:] = ...! This changes values so that + # it works likes the second case. (here a[:,0:1,:]) + values = np.moveaxis(values, 0, axis) + numnew = values.shape[axis] + newshape[axis] += numnew + new = empty(newshape, arr.dtype, arrorder) + slobj[axis] = slice(None, index) + new[tuple(slobj)] = arr[tuple(slobj)] + slobj[axis] = slice(index, index + numnew) + new[tuple(slobj)] = values + slobj[axis] = slice(index + numnew, None) + slobj2 = [slice(None)] * ndim + slobj2[axis] = slice(index, None) + new[tuple(slobj)] = arr[tuple(slobj2)] + + return conv.wrap(new, to_scalar=False) + + elif indices.size == 0 and not isinstance(obj, np.ndarray): + # Can safely cast the empty list to intp + indices = indices.astype(intp) + + indices[indices < 0] += N + + numnew = len(indices) + order = indices.argsort(kind='mergesort') # stable sort + indices[order] += np.arange(numnew) + + newshape[axis] += numnew + old_mask = ones(newshape[axis], dtype=bool) + old_mask[indices] = False + + new = empty(newshape, arr.dtype, arrorder) + slobj2 = [slice(None)] * ndim + slobj[axis] = indices + slobj2[axis] = old_mask + new[tuple(slobj)] = values + new[tuple(slobj2)] = arr + + return conv.wrap(new, to_scalar=False) + + +def _append_dispatcher(arr, values, axis=None): + return (arr, values) + + +@array_function_dispatch(_append_dispatcher) +def append(arr, values, axis=None): + """ + Append values to the end of an array. + + Parameters + ---------- + arr : array_like + Values are appended to a copy of this array. + values : array_like + These values are appended to a copy of `arr`. It must be of the + correct shape (the same shape as `arr`, excluding `axis`). If + `axis` is not specified, `values` can be any shape and will be + flattened before use. + axis : int, optional + The axis along which `values` are appended. If `axis` is not + given, both `arr` and `values` are flattened before use. + + Returns + ------- + append : ndarray + A copy of `arr` with `values` appended to `axis`. Note that + `append` does not occur in-place: a new array is allocated and + filled. If `axis` is None, `out` is a flattened array. + + See Also + -------- + insert : Insert elements into an array. + delete : Delete elements from an array. + + Examples + -------- + >>> import numpy as np + >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) + array([1, 2, 3, ..., 7, 8, 9]) + + When `axis` is specified, `values` must have the correct shape. + + >>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0) + array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + + >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0) + Traceback (most recent call last): + ... + ValueError: all the input arrays must have same number of dimensions, but + the array at index 0 has 2 dimension(s) and the array at index 1 has 1 + dimension(s) + + >>> a = np.array([1, 2], dtype=int) + >>> c = np.append(a, []) + >>> c + array([1., 2.]) + >>> c.dtype + float64 + + Default dtype for empty ndarrays is `float64` thus making the output of dtype + `float64` when appended with dtype `int64` + + """ + arr = asanyarray(arr) + if axis is None: + if arr.ndim != 1: + arr = arr.ravel() + values = ravel(values) + axis = arr.ndim - 1 + return concatenate((arr, values), axis=axis) + + +def _digitize_dispatcher(x, bins, right=None): + return (x, bins) + + +@array_function_dispatch(_digitize_dispatcher) +def digitize(x, bins, right=False): + """ + Return the indices of the bins to which each value in input array belongs. + + ========= ============= ============================ + `right` order of bins returned index `i` satisfies + ========= ============= ============================ + ``False`` increasing ``bins[i-1] <= x < bins[i]`` + ``True`` increasing ``bins[i-1] < x <= bins[i]`` + ``False`` decreasing ``bins[i-1] > x >= bins[i]`` + ``True`` decreasing ``bins[i-1] >= x > bins[i]`` + ========= ============= ============================ + + If values in `x` are beyond the bounds of `bins`, 0 or ``len(bins)`` is + returned as appropriate. + + Parameters + ---------- + x : array_like + Input array to be binned. Prior to NumPy 1.10.0, this array had to + be 1-dimensional, but can now have any shape. + bins : array_like + Array of bins. It has to be 1-dimensional and monotonic. + right : bool, optional + Indicating whether the intervals include the right or the left bin + edge. Default behavior is (right==False) indicating that the interval + does not include the right edge. The left bin end is open in this + case, i.e., bins[i-1] <= x < bins[i] is the default behavior for + monotonically increasing bins. + + Returns + ------- + indices : ndarray of ints + Output array of indices, of same shape as `x`. + + Raises + ------ + ValueError + If `bins` is not monotonic. + TypeError + If the type of the input is complex. + + See Also + -------- + bincount, histogram, unique, searchsorted + + Notes + ----- + If values in `x` are such that they fall outside the bin range, + attempting to index `bins` with the indices that `digitize` returns + will result in an IndexError. + + .. versionadded:: 1.10.0 + + `numpy.digitize` is implemented in terms of `numpy.searchsorted`. + This means that a binary search is used to bin the values, which scales + much better for larger number of bins than the previous linear search. + It also removes the requirement for the input array to be 1-dimensional. + + For monotonically *increasing* `bins`, the following are equivalent:: + + np.digitize(x, bins, right=True) + np.searchsorted(bins, x, side='left') + + Note that as the order of the arguments are reversed, the side must be too. + The `searchsorted` call is marginally faster, as it does not do any + monotonicity checks. Perhaps more importantly, it supports all dtypes. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([0.2, 6.4, 3.0, 1.6]) + >>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0]) + >>> inds = np.digitize(x, bins) + >>> inds + array([1, 4, 3, 2]) + >>> for n in range(x.size): + ... print(bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]]) + ... + 0.0 <= 0.2 < 1.0 + 4.0 <= 6.4 < 10.0 + 2.5 <= 3.0 < 4.0 + 1.0 <= 1.6 < 2.5 + + >>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.]) + >>> bins = np.array([0, 5, 10, 15, 20]) + >>> np.digitize(x,bins,right=True) + array([1, 2, 3, 4, 4]) + >>> np.digitize(x,bins,right=False) + array([1, 3, 3, 4, 5]) + """ + x = _nx.asarray(x) + bins = _nx.asarray(bins) + + # here for compatibility, searchsorted below is happy to take this + if np.issubdtype(x.dtype, _nx.complexfloating): + raise TypeError("x may not be complex") + + mono = _monotonicity(bins) + if mono == 0: + raise ValueError("bins must be monotonically increasing or decreasing") + + # this is backwards because the arguments below are swapped + side = 'left' if right else 'right' + if mono == -1: + # reverse the bins, and invert the results + return len(bins) - _nx.searchsorted(bins[::-1], x, side=side) + else: + return _nx.searchsorted(bins, x, side=side) diff --git a/python/numpy/lib/_function_base_impl.pyi b/python/numpy/lib/_function_base_impl.pyi new file mode 100644 index 000000000..c2eaf1b5a --- /dev/null +++ b/python/numpy/lib/_function_base_impl.pyi @@ -0,0 +1,1164 @@ +# ruff: noqa: ANN401 +from collections.abc import Callable, Iterable, Sequence +from typing import ( + Any, + Concatenate, + ParamSpec, + Protocol, + SupportsIndex, + SupportsInt, + TypeAlias, + TypeVar, + overload, + type_check_only, +) +from typing import Literal as L + +from _typeshed import Incomplete +from typing_extensions import TypeIs, deprecated + +import numpy as np +from numpy import ( + _OrderKACF, + bool_, + complex128, + complexfloating, + datetime64, + float64, + floating, + generic, + integer, + intp, + object_, + timedelta64, + vectorize, +) +from numpy._core.multiarray import bincount +from numpy._globals import _NoValueType +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeComplex_co, + _ArrayLikeDT64_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeNumber_co, + _ArrayLikeObject_co, + _ArrayLikeTD64_co, + _ComplexLike_co, + _DTypeLike, + _FloatLike_co, + _NestedSequence, + _NumberLike_co, + _ScalarLike_co, + _ShapeLike, +) + +__all__ = [ + "select", + "piecewise", + "trim_zeros", + "copy", + "iterable", + "percentile", + "diff", + "gradient", + "angle", + "unwrap", + "sort_complex", + "flip", + "rot90", + "extract", + "place", + "vectorize", + "asarray_chkfinite", + "average", + "bincount", + "digitize", + "cov", + "corrcoef", + "median", + "sinc", + "hamming", + "hanning", + "bartlett", + "blackman", + "kaiser", + "trapezoid", + "trapz", + "i0", + "meshgrid", + "delete", + "insert", + "append", + "interp", + "quantile", +] + +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) +# The `{}ss` suffix refers to the Python 3.12 syntax: `**P` +_Pss = ParamSpec("_Pss") +_ScalarT = TypeVar("_ScalarT", bound=generic) +_ScalarT1 = TypeVar("_ScalarT1", bound=generic) +_ScalarT2 = TypeVar("_ScalarT2", bound=generic) +_ArrayT = TypeVar("_ArrayT", bound=np.ndarray) + +_2Tuple: TypeAlias = tuple[_T, _T] +_MeshgridIdx: TypeAlias = L['ij', 'xy'] + +@type_check_only +class _TrimZerosSequence(Protocol[_T_co]): + def __len__(self, /) -> int: ... + @overload + def __getitem__(self, key: int, /) -> object: ... + @overload + def __getitem__(self, key: slice, /) -> _T_co: ... + +### + +@overload +def rot90( + m: _ArrayLike[_ScalarT], + k: int = ..., + axes: tuple[int, int] = ..., +) -> NDArray[_ScalarT]: ... +@overload +def rot90( + m: ArrayLike, + k: int = ..., + axes: tuple[int, int] = ..., +) -> NDArray[Any]: ... + +@overload +def flip(m: _ScalarT, axis: None = ...) -> _ScalarT: ... +@overload +def flip(m: _ScalarLike_co, axis: None = ...) -> Any: ... +@overload +def flip(m: _ArrayLike[_ScalarT], axis: _ShapeLike | None = ...) -> NDArray[_ScalarT]: ... +@overload +def flip(m: ArrayLike, axis: _ShapeLike | None = ...) -> NDArray[Any]: ... + +def iterable(y: object) -> TypeIs[Iterable[Any]]: ... + +@overload +def average( + a: _ArrayLikeFloat_co, + axis: None = None, + weights: _ArrayLikeFloat_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> floating: ... +@overload +def average( + a: _ArrayLikeFloat_co, + axis: None = None, + weights: _ArrayLikeFloat_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _2Tuple[floating]: ... +@overload +def average( + a: _ArrayLikeComplex_co, + axis: None = None, + weights: _ArrayLikeComplex_co | None = None, + returned: L[False] = False, + *, + keepdims: L[False] | _NoValueType = ..., +) -> complexfloating: ... +@overload +def average( + a: _ArrayLikeComplex_co, + axis: None = None, + weights: _ArrayLikeComplex_co | None = None, + *, + returned: L[True], + keepdims: L[False] | _NoValueType = ..., +) -> _2Tuple[complexfloating]: ... +@overload +def average( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = None, + weights: object | None = None, + *, + returned: L[True], + keepdims: bool | bool_ | _NoValueType = ..., +) -> _2Tuple[Incomplete]: ... +@overload +def average( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = None, + weights: object | None = None, + returned: bool | bool_ = False, + *, + keepdims: bool | bool_ | _NoValueType = ..., +) -> Incomplete: ... + +@overload +def asarray_chkfinite( + a: _ArrayLike[_ScalarT], + dtype: None = ..., + order: _OrderKACF = ..., +) -> NDArray[_ScalarT]: ... +@overload +def asarray_chkfinite( + a: object, + dtype: None = ..., + order: _OrderKACF = ..., +) -> NDArray[Any]: ... +@overload +def asarray_chkfinite( + a: Any, + dtype: _DTypeLike[_ScalarT], + order: _OrderKACF = ..., +) -> NDArray[_ScalarT]: ... +@overload +def asarray_chkfinite( + a: Any, + dtype: DTypeLike, + order: _OrderKACF = ..., +) -> NDArray[Any]: ... + +@overload +def piecewise( + x: _ArrayLike[_ScalarT], + condlist: _ArrayLike[bool_] | Sequence[_ArrayLikeBool_co], + funclist: Sequence[ + Callable[Concatenate[NDArray[_ScalarT], _Pss], NDArray[_ScalarT | Any]] + | _ScalarT | object + ], + *args: _Pss.args, + **kw: _Pss.kwargs, +) -> NDArray[_ScalarT]: ... +@overload +def piecewise( + x: ArrayLike, + condlist: _ArrayLike[bool_] | Sequence[_ArrayLikeBool_co], + funclist: Sequence[ + Callable[Concatenate[NDArray[Any], _Pss], NDArray[Any]] + | object + ], + *args: _Pss.args, + **kw: _Pss.kwargs, +) -> NDArray[Any]: ... + +def select( + condlist: Sequence[ArrayLike], + choicelist: Sequence[ArrayLike], + default: ArrayLike = ..., +) -> NDArray[Any]: ... + +@overload +def copy( + a: _ArrayT, + order: _OrderKACF, + subok: L[True], +) -> _ArrayT: ... +@overload +def copy( + a: _ArrayT, + order: _OrderKACF = ..., + *, + subok: L[True], +) -> _ArrayT: ... +@overload +def copy( + a: _ArrayLike[_ScalarT], + order: _OrderKACF = ..., + subok: L[False] = ..., +) -> NDArray[_ScalarT]: ... +@overload +def copy( + a: ArrayLike, + order: _OrderKACF = ..., + subok: L[False] = ..., +) -> NDArray[Any]: ... + +def gradient( + f: ArrayLike, + *varargs: ArrayLike, + axis: _ShapeLike | None = ..., + edge_order: L[1, 2] = ..., +) -> Any: ... + +@overload +def diff( # type: ignore[overload-overlap] + a: _T, + n: L[0], + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., # = _NoValue + append: ArrayLike | _NoValueType = ..., # = _NoValue +) -> _T: ... +@overload +def diff( + a: ArrayLike, + n: int = 1, + axis: SupportsIndex = -1, + prepend: ArrayLike | _NoValueType = ..., # = _NoValue + append: ArrayLike | _NoValueType = ..., # = _NoValue +) -> NDArray[Incomplete]: ... + +@overload # float scalar +def interp( + x: _FloatLike_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, +) -> float64: ... +@overload # float array +def interp( + x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co], + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[float64]: ... +@overload # float scalar or array +def interp( + x: _ArrayLikeFloat_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeFloat_co, + left: _FloatLike_co | None = None, + right: _FloatLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[float64] | float64: ... +@overload # complex scalar +def interp( + x: _FloatLike_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLike[complexfloating], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> complex128: ... +@overload # complex or float scalar +def interp( + x: _FloatLike_co, + xp: _ArrayLikeFloat_co, + fp: Sequence[complex | complexfloating], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> complex128 | float64: ... +@overload # complex array +def interp( + x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co], + xp: _ArrayLikeFloat_co, + fp: _ArrayLike[complexfloating], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[complex128]: ... +@overload # complex or float array +def interp( + x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co], + xp: _ArrayLikeFloat_co, + fp: Sequence[complex | complexfloating], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[complex128 | float64]: ... +@overload # complex scalar or array +def interp( + x: _ArrayLikeFloat_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLike[complexfloating], + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[complex128] | complex128: ... +@overload # complex or float scalar or array +def interp( + x: _ArrayLikeFloat_co, + xp: _ArrayLikeFloat_co, + fp: _ArrayLikeNumber_co, + left: _NumberLike_co | None = None, + right: _NumberLike_co | None = None, + period: _FloatLike_co | None = None, +) -> NDArray[complex128 | float64] | complex128 | float64: ... + +@overload +def angle(z: _ComplexLike_co, deg: bool = ...) -> floating: ... +@overload +def angle(z: object_, deg: bool = ...) -> Any: ... +@overload +def angle(z: _ArrayLikeComplex_co, deg: bool = ...) -> NDArray[floating]: ... +@overload +def angle(z: _ArrayLikeObject_co, deg: bool = ...) -> NDArray[object_]: ... + +@overload +def unwrap( + p: _ArrayLikeFloat_co, + discont: float | None = ..., + axis: int = ..., + *, + period: float = ..., +) -> NDArray[floating]: ... +@overload +def unwrap( + p: _ArrayLikeObject_co, + discont: float | None = ..., + axis: int = ..., + *, + period: float = ..., +) -> NDArray[object_]: ... + +def sort_complex(a: ArrayLike) -> NDArray[complexfloating]: ... + +def trim_zeros( + filt: _TrimZerosSequence[_T], + trim: L["f", "b", "fb", "bf"] = "fb", + axis: _ShapeLike | None = None, +) -> _T: ... + +@overload +def extract(condition: ArrayLike, arr: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +@overload +def extract(condition: ArrayLike, arr: ArrayLike) -> NDArray[Any]: ... + +def place(arr: NDArray[Any], mask: ArrayLike, vals: Any) -> None: ... + +@overload +def cov( + m: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co | None = ..., + rowvar: bool = ..., + bias: bool = ..., + ddof: SupportsIndex | SupportsInt | None = ..., + fweights: ArrayLike | None = ..., + aweights: ArrayLike | None = ..., + *, + dtype: None = ..., +) -> NDArray[floating]: ... +@overload +def cov( + m: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co | None = ..., + rowvar: bool = ..., + bias: bool = ..., + ddof: SupportsIndex | SupportsInt | None = ..., + fweights: ArrayLike | None = ..., + aweights: ArrayLike | None = ..., + *, + dtype: None = ..., +) -> NDArray[complexfloating]: ... +@overload +def cov( + m: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co | None = ..., + rowvar: bool = ..., + bias: bool = ..., + ddof: SupportsIndex | SupportsInt | None = ..., + fweights: ArrayLike | None = ..., + aweights: ArrayLike | None = ..., + *, + dtype: _DTypeLike[_ScalarT], +) -> NDArray[_ScalarT]: ... +@overload +def cov( + m: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co | None = ..., + rowvar: bool = ..., + bias: bool = ..., + ddof: SupportsIndex | SupportsInt | None = ..., + fweights: ArrayLike | None = ..., + aweights: ArrayLike | None = ..., + *, + dtype: DTypeLike, +) -> NDArray[Any]: ... + +# NOTE `bias` and `ddof` are deprecated and ignored +@overload +def corrcoef( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co | None = None, + rowvar: bool = True, + bias: _NoValueType = ..., + ddof: _NoValueType = ..., + *, + dtype: None = None, +) -> NDArray[floating]: ... +@overload +def corrcoef( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co | None = None, + rowvar: bool = True, + bias: _NoValueType = ..., + ddof: _NoValueType = ..., + *, + dtype: None = None, +) -> NDArray[complexfloating]: ... +@overload +def corrcoef( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co | None = None, + rowvar: bool = True, + bias: _NoValueType = ..., + ddof: _NoValueType = ..., + *, + dtype: _DTypeLike[_ScalarT], +) -> NDArray[_ScalarT]: ... +@overload +def corrcoef( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co | None = None, + rowvar: bool = True, + bias: _NoValueType = ..., + ddof: _NoValueType = ..., + *, + dtype: DTypeLike | None = None, +) -> NDArray[Any]: ... + +def blackman(M: _FloatLike_co) -> NDArray[floating]: ... + +def bartlett(M: _FloatLike_co) -> NDArray[floating]: ... + +def hanning(M: _FloatLike_co) -> NDArray[floating]: ... + +def hamming(M: _FloatLike_co) -> NDArray[floating]: ... + +def i0(x: _ArrayLikeFloat_co) -> NDArray[floating]: ... + +def kaiser( + M: _FloatLike_co, + beta: _FloatLike_co, +) -> NDArray[floating]: ... + +@overload +def sinc(x: _FloatLike_co) -> floating: ... +@overload +def sinc(x: _ComplexLike_co) -> complexfloating: ... +@overload +def sinc(x: _ArrayLikeFloat_co) -> NDArray[floating]: ... +@overload +def sinc(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def median( + a: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + keepdims: L[False] = ..., +) -> floating: ... +@overload +def median( + a: _ArrayLikeComplex_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + keepdims: L[False] = ..., +) -> complexfloating: ... +@overload +def median( + a: _ArrayLikeTD64_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + keepdims: L[False] = ..., +) -> timedelta64: ... +@overload +def median( + a: _ArrayLikeObject_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + keepdims: L[False] = ..., +) -> Any: ... +@overload +def median( + a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + out: None = ..., + overwrite_input: bool = ..., + keepdims: bool = ..., +) -> Any: ... +@overload +def median( + a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + axis: _ShapeLike | None, + out: _ArrayT, + overwrite_input: bool = ..., + keepdims: bool = ..., +) -> _ArrayT: ... +@overload +def median( + a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + axis: _ShapeLike | None = ..., + *, + out: _ArrayT, + overwrite_input: bool = ..., + keepdims: bool = ..., +) -> _ArrayT: ... + +_MethodKind = L[ + "inverted_cdf", + "averaged_inverted_cdf", + "closest_observation", + "interpolated_inverted_cdf", + "hazen", + "weibull", + "linear", + "median_unbiased", + "normal_unbiased", + "lower", + "higher", + "midpoint", + "nearest", +] + +@overload +def percentile( + a: _ArrayLikeFloat_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated +) -> floating: ... +@overload +def percentile( + a: _ArrayLikeComplex_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated +) -> complexfloating: ... +@overload +def percentile( + a: _ArrayLikeTD64_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated +) -> timedelta64: ... +@overload +def percentile( + a: _ArrayLikeDT64_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated +) -> datetime64: ... +@overload +def percentile( + a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated +) -> Any: ... +@overload +def percentile( + a: _ArrayLikeFloat_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated +) -> NDArray[floating]: ... +@overload +def percentile( + a: _ArrayLikeComplex_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated +) -> NDArray[complexfloating]: ... +@overload +def percentile( + a: _ArrayLikeTD64_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated +) -> NDArray[timedelta64]: ... +@overload +def percentile( + a: _ArrayLikeDT64_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated +) -> NDArray[datetime64]: ... +@overload +def percentile( + a: _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: L[False] = ..., + *, + weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated +) -> NDArray[object_]: ... +@overload +def percentile( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None = ..., + out: None = ..., + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: bool = ..., + *, + weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated +) -> Any: ... +@overload +def percentile( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None, + out: _ArrayT, + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: bool = ..., + *, + weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated +) -> _ArrayT: ... +@overload +def percentile( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None = ..., + *, + out: _ArrayT, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: bool = False, + weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated +) -> _ArrayT: ... + +# NOTE: keep in sync with `percentile` +@overload +def quantile( + a: _ArrayLikeFloat_co, + q: _FloatLike_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated +) -> floating: ... +@overload +def quantile( + a: _ArrayLikeComplex_co, + q: _FloatLike_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated +) -> complexfloating: ... +@overload +def quantile( + a: _ArrayLikeTD64_co, + q: _FloatLike_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated +) -> timedelta64: ... +@overload +def quantile( + a: _ArrayLikeDT64_co, + q: _FloatLike_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated +) -> datetime64: ... +@overload +def quantile( + a: _ArrayLikeObject_co, + q: _FloatLike_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated +) -> Any: ... +@overload +def quantile( + a: _ArrayLikeFloat_co, + q: _ArrayLikeFloat_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated +) -> NDArray[floating]: ... +@overload +def quantile( + a: _ArrayLikeComplex_co, + q: _ArrayLikeFloat_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated +) -> NDArray[complexfloating]: ... +@overload +def quantile( + a: _ArrayLikeTD64_co, + q: _ArrayLikeFloat_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated +) -> NDArray[timedelta64]: ... +@overload +def quantile( + a: _ArrayLikeDT64_co, + q: _ArrayLikeFloat_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated +) -> NDArray[datetime64]: ... +@overload +def quantile( + a: _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: L[False] = False, + *, + weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated +) -> NDArray[object_]: ... +@overload +def quantile( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None = None, + out: None = None, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated +) -> Any: ... +@overload +def quantile( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None, + out: _ArrayT, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: bool = False, + *, + weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated +) -> _ArrayT: ... +@overload +def quantile( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + overwrite_input: bool = False, + method: _MethodKind = "linear", + keepdims: bool = False, + weights: _ArrayLikeFloat_co | None = None, + interpolation: None = None, # deprecated +) -> _ArrayT: ... + +_ScalarT_fm = TypeVar( + "_ScalarT_fm", + bound=floating | complexfloating | timedelta64, +) + +class _SupportsRMulFloat(Protocol[_T_co]): + def __rmul__(self, other: float, /) -> _T_co: ... + +@overload +def trapezoid( # type: ignore[overload-overlap] + y: Sequence[_FloatLike_co], + x: Sequence[_FloatLike_co] | None = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> float64: ... +@overload +def trapezoid( + y: Sequence[_ComplexLike_co], + x: Sequence[_ComplexLike_co] | None = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> complex128: ... +@overload +def trapezoid( + y: _ArrayLike[bool_ | integer], + x: _ArrayLike[bool_ | integer] | None = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> float64 | NDArray[float64]: ... +@overload +def trapezoid( # type: ignore[overload-overlap] + y: _ArrayLikeObject_co, + x: _ArrayLikeFloat_co | _ArrayLikeObject_co | None = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> float | NDArray[object_]: ... +@overload +def trapezoid( + y: _ArrayLike[_ScalarT_fm], + x: _ArrayLike[_ScalarT_fm] | _ArrayLikeInt_co | None = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> _ScalarT_fm | NDArray[_ScalarT_fm]: ... +@overload +def trapezoid( + y: Sequence[_SupportsRMulFloat[_T]], + x: Sequence[_SupportsRMulFloat[_T] | _T] | None = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> _T: ... +@overload +def trapezoid( + y: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + x: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co | None = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> ( + floating | complexfloating | timedelta64 + | NDArray[floating | complexfloating | timedelta64 | object_] +): ... + +@deprecated("Use 'trapezoid' instead") +def trapz(y: ArrayLike, x: ArrayLike | None = None, dx: float = 1.0, axis: int = -1) -> generic | NDArray[generic]: ... + +@overload +def meshgrid( + *, + copy: bool = ..., + sparse: bool = ..., + indexing: _MeshgridIdx = ..., +) -> tuple[()]: ... +@overload +def meshgrid( + x1: _ArrayLike[_ScalarT], + /, + *, + copy: bool = ..., + sparse: bool = ..., + indexing: _MeshgridIdx = ..., +) -> tuple[NDArray[_ScalarT]]: ... +@overload +def meshgrid( + x1: ArrayLike, + /, + *, + copy: bool = ..., + sparse: bool = ..., + indexing: _MeshgridIdx = ..., +) -> tuple[NDArray[Any]]: ... +@overload +def meshgrid( + x1: _ArrayLike[_ScalarT1], + x2: _ArrayLike[_ScalarT2], + /, + *, + copy: bool = ..., + sparse: bool = ..., + indexing: _MeshgridIdx = ..., +) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ... +@overload +def meshgrid( + x1: ArrayLike, + x2: _ArrayLike[_ScalarT], + /, + *, + copy: bool = ..., + sparse: bool = ..., + indexing: _MeshgridIdx = ..., +) -> tuple[NDArray[Any], NDArray[_ScalarT]]: ... +@overload +def meshgrid( + x1: _ArrayLike[_ScalarT], + x2: ArrayLike, + /, + *, + copy: bool = ..., + sparse: bool = ..., + indexing: _MeshgridIdx = ..., +) -> tuple[NDArray[_ScalarT], NDArray[Any]]: ... +@overload +def meshgrid( + x1: ArrayLike, + x2: ArrayLike, + /, + *, + copy: bool = ..., + sparse: bool = ..., + indexing: _MeshgridIdx = ..., +) -> tuple[NDArray[Any], NDArray[Any]]: ... +@overload +def meshgrid( + x1: ArrayLike, + x2: ArrayLike, + x3: ArrayLike, + /, + *, + copy: bool = ..., + sparse: bool = ..., + indexing: _MeshgridIdx = ..., +) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any]]: ... +@overload +def meshgrid( + x1: ArrayLike, + x2: ArrayLike, + x3: ArrayLike, + x4: ArrayLike, + /, + *, + copy: bool = ..., + sparse: bool = ..., + indexing: _MeshgridIdx = ..., +) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any], NDArray[Any]]: ... +@overload +def meshgrid( + *xi: ArrayLike, + copy: bool = ..., + sparse: bool = ..., + indexing: _MeshgridIdx = ..., +) -> tuple[NDArray[Any], ...]: ... + +@overload +def delete( + arr: _ArrayLike[_ScalarT], + obj: slice | _ArrayLikeInt_co, + axis: SupportsIndex | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def delete( + arr: ArrayLike, + obj: slice | _ArrayLikeInt_co, + axis: SupportsIndex | None = ..., +) -> NDArray[Any]: ... + +@overload +def insert( + arr: _ArrayLike[_ScalarT], + obj: slice | _ArrayLikeInt_co, + values: ArrayLike, + axis: SupportsIndex | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def insert( + arr: ArrayLike, + obj: slice | _ArrayLikeInt_co, + values: ArrayLike, + axis: SupportsIndex | None = ..., +) -> NDArray[Any]: ... + +def append( + arr: ArrayLike, + values: ArrayLike, + axis: SupportsIndex | None = ..., +) -> NDArray[Any]: ... + +@overload +def digitize( + x: _FloatLike_co, + bins: _ArrayLikeFloat_co, + right: bool = ..., +) -> intp: ... +@overload +def digitize( + x: _ArrayLikeFloat_co, + bins: _ArrayLikeFloat_co, + right: bool = ..., +) -> NDArray[intp]: ... diff --git a/python/numpy/lib/_histograms_impl.py b/python/numpy/lib/_histograms_impl.py new file mode 100644 index 000000000..b4aacd057 --- /dev/null +++ b/python/numpy/lib/_histograms_impl.py @@ -0,0 +1,1085 @@ +""" +Histogram-related functions +""" +import contextlib +import functools +import operator +import warnings + +import numpy as np +from numpy._core import overrides + +__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges'] + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + +# range is a keyword argument to many functions, so save the builtin so they can +# use it. +_range = range + + +def _ptp(x): + """Peak-to-peak value of x. + + This implementation avoids the problem of signed integer arrays having a + peak-to-peak value that cannot be represented with the array's data type. + This function returns an unsigned value for signed integer arrays. + """ + return _unsigned_subtract(x.max(), x.min()) + + +def _hist_bin_sqrt(x, range): + """ + Square root histogram bin estimator. + + Bin width is inversely proportional to the data size. Used by many + programs for its simplicity. + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + return _ptp(x) / np.sqrt(x.size) + + +def _hist_bin_sturges(x, range): + """ + Sturges histogram bin estimator. + + A very simplistic estimator based on the assumption of normality of + the data. This estimator has poor performance for non-normal data, + which becomes especially obvious for large data sets. The estimate + depends only on size of the data. + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + return _ptp(x) / (np.log2(x.size) + 1.0) + + +def _hist_bin_rice(x, range): + """ + Rice histogram bin estimator. + + Another simple estimator with no normality assumption. It has better + performance for large data than Sturges, but tends to overestimate + the number of bins. The number of bins is proportional to the cube + root of data size (asymptotically optimal). The estimate depends + only on size of the data. + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + return _ptp(x) / (2.0 * x.size ** (1.0 / 3)) + + +def _hist_bin_scott(x, range): + """ + Scott histogram bin estimator. + + The binwidth is proportional to the standard deviation of the data + and inversely proportional to the cube root of data size + (asymptotically optimal). + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x) + + +def _hist_bin_stone(x, range): + """ + Histogram bin estimator based on minimizing the estimated integrated squared error (ISE). + + The number of bins is chosen by minimizing the estimated ISE against the unknown + true distribution. The ISE is estimated using cross-validation and can be regarded + as a generalization of Scott's rule. + https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule + + This paper by Stone appears to be the origination of this rule. + https://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + range : (float, float) + The lower and upper range of the bins. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ # noqa: E501 + + n = x.size + ptp_x = _ptp(x) + if n <= 1 or ptp_x == 0: + return 0 + + def jhat(nbins): + hh = ptp_x / nbins + p_k = np.histogram(x, bins=nbins, range=range)[0] / n + return (2 - (n + 1) * p_k.dot(p_k)) / hh + + nbins_upper_bound = max(100, int(np.sqrt(n))) + nbins = min(_range(1, nbins_upper_bound + 1), key=jhat) + if nbins == nbins_upper_bound: + warnings.warn("The number of bins estimated may be suboptimal.", + RuntimeWarning, stacklevel=3) + return ptp_x / nbins + + +def _hist_bin_doane(x, range): + """ + Doane's histogram bin estimator. + + Improved version of Sturges' formula which works better for + non-normal data. See + stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + if x.size > 2: + sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3))) + sigma = np.std(x) + if sigma > 0.0: + # These three operations add up to + # g1 = np.mean(((x - np.mean(x)) / sigma)**3) + # but use only one temp array instead of three + temp = x - np.mean(x) + np.true_divide(temp, sigma, temp) + np.power(temp, 3, temp) + g1 = np.mean(temp) + return _ptp(x) / (1.0 + np.log2(x.size) + + np.log2(1.0 + np.absolute(g1) / sg1)) + return 0.0 + + +def _hist_bin_fd(x, range): + """ + The Freedman-Diaconis histogram bin estimator. + + The Freedman-Diaconis rule uses interquartile range (IQR) to + estimate binwidth. It is considered a variation of the Scott rule + with more robustness as the IQR is less affected by outliers than + the standard deviation. However, the IQR depends on fewer points + than the standard deviation, so it is less accurate, especially for + long tailed distributions. + + If the IQR is 0, this function returns 0 for the bin width. + Binwidth is inversely proportional to the cube root of data size + (asymptotically optimal). + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + """ + del range # unused + iqr = np.subtract(*np.percentile(x, [75, 25])) + return 2.0 * iqr * x.size ** (-1.0 / 3.0) + + +def _hist_bin_auto(x, range): + """ + Histogram bin estimator that uses the minimum width of a relaxed + Freedman-Diaconis and Sturges estimators if the FD bin width does + not result in a large number of bins. The relaxed Freedman-Diaconis estimator + limits the bin width to half the sqrt estimated to avoid small bins. + + The FD estimator is usually the most robust method, but its width + estimate tends to be too large for small `x` and bad for data with limited + variance. The Sturges estimator is quite good for small (<1000) datasets + and is the default in the R language. This method gives good off-the-shelf + behaviour. + + + Parameters + ---------- + x : array_like + Input data that is to be histogrammed, trimmed to range. May not + be empty. + range : Tuple with range for the histogram + + Returns + ------- + h : An estimate of the optimal bin width for the given data. + + See Also + -------- + _hist_bin_fd, _hist_bin_sturges + """ + fd_bw = _hist_bin_fd(x, range) + sturges_bw = _hist_bin_sturges(x, range) + sqrt_bw = _hist_bin_sqrt(x, range) + # heuristic to limit the maximal number of bins + fd_bw_corrected = max(fd_bw, sqrt_bw / 2) + return min(fd_bw_corrected, sturges_bw) + + +# Private dict initialized at module load time +_hist_bin_selectors = {'stone': _hist_bin_stone, + 'auto': _hist_bin_auto, + 'doane': _hist_bin_doane, + 'fd': _hist_bin_fd, + 'rice': _hist_bin_rice, + 'scott': _hist_bin_scott, + 'sqrt': _hist_bin_sqrt, + 'sturges': _hist_bin_sturges} + + +def _ravel_and_check_weights(a, weights): + """ Check a and weights have matching shapes, and ravel both """ + a = np.asarray(a) + + # Ensure that the array is a "subtractable" dtype + if a.dtype == np.bool: + msg = f"Converting input from {a.dtype} to {np.uint8} for compatibility." + warnings.warn(msg, RuntimeWarning, stacklevel=3) + a = a.astype(np.uint8) + + if weights is not None: + weights = np.asarray(weights) + if weights.shape != a.shape: + raise ValueError( + 'weights should have the same shape as a.') + weights = weights.ravel() + a = a.ravel() + return a, weights + + +def _get_outer_edges(a, range): + """ + Determine the outer bin edges to use, from either the data or the range + argument + """ + if range is not None: + first_edge, last_edge = range + if first_edge > last_edge: + raise ValueError( + 'max must be larger than min in range parameter.') + if not (np.isfinite(first_edge) and np.isfinite(last_edge)): + raise ValueError( + f"supplied range of [{first_edge}, {last_edge}] is not finite") + elif a.size == 0: + # handle empty arrays. Can't determine range, so use 0-1. + first_edge, last_edge = 0, 1 + else: + first_edge, last_edge = a.min(), a.max() + if not (np.isfinite(first_edge) and np.isfinite(last_edge)): + raise ValueError( + f"autodetected range of [{first_edge}, {last_edge}] is not finite") + + # expand empty range to avoid divide by zero + if first_edge == last_edge: + first_edge = first_edge - 0.5 + last_edge = last_edge + 0.5 + + return first_edge, last_edge + + +def _unsigned_subtract(a, b): + """ + Subtract two values where a >= b, and produce an unsigned result + + This is needed when finding the difference between the upper and lower + bound of an int16 histogram + """ + # coerce to a single type + signed_to_unsigned = { + np.byte: np.ubyte, + np.short: np.ushort, + np.intc: np.uintc, + np.int_: np.uint, + np.longlong: np.ulonglong + } + dt = np.result_type(a, b) + try: + unsigned_dt = signed_to_unsigned[dt.type] + except KeyError: + return np.subtract(a, b, dtype=dt) + else: + # we know the inputs are integers, and we are deliberately casting + # signed to unsigned. The input may be negative python integers so + # ensure we pass in arrays with the initial dtype (related to NEP 50). + return np.subtract(np.asarray(a, dtype=dt), np.asarray(b, dtype=dt), + casting='unsafe', dtype=unsigned_dt) + + +def _get_bin_edges(a, bins, range, weights): + """ + Computes the bins used internally by `histogram`. + + Parameters + ========== + a : ndarray + Ravelled data array + bins, range + Forwarded arguments from `histogram`. + weights : ndarray, optional + Ravelled weights array, or None + + Returns + ======= + bin_edges : ndarray + Array of bin edges + uniform_bins : (Number, Number, int): + The upper bound, lowerbound, and number of bins, used in the optimized + implementation of `histogram` that works on uniform bins. + """ + # parse the overloaded bins argument + n_equal_bins = None + bin_edges = None + + if isinstance(bins, str): + bin_name = bins + # if `bins` is a string for an automatic method, + # this will replace it with the number of bins calculated + if bin_name not in _hist_bin_selectors: + raise ValueError( + f"{bin_name!r} is not a valid estimator for `bins`") + if weights is not None: + raise TypeError("Automated estimation of the number of " + "bins is not supported for weighted data") + + first_edge, last_edge = _get_outer_edges(a, range) + + # truncate the range if needed + if range is not None: + keep = (a >= first_edge) + keep &= (a <= last_edge) + if not np.logical_and.reduce(keep): + a = a[keep] + + if a.size == 0: + n_equal_bins = 1 + else: + # Do not call selectors on empty arrays + width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge)) + if width: + if np.issubdtype(a.dtype, np.integer) and width < 1: + width = 1 + delta = _unsigned_subtract(last_edge, first_edge) + n_equal_bins = int(np.ceil(delta / width)) + else: + # Width can be zero for some estimators, e.g. FD when + # the IQR of the data is zero. + n_equal_bins = 1 + + elif np.ndim(bins) == 0: + try: + n_equal_bins = operator.index(bins) + except TypeError as e: + raise TypeError( + '`bins` must be an integer, a string, or an array') from e + if n_equal_bins < 1: + raise ValueError('`bins` must be positive, when an integer') + + first_edge, last_edge = _get_outer_edges(a, range) + + elif np.ndim(bins) == 1: + bin_edges = np.asarray(bins) + if np.any(bin_edges[:-1] > bin_edges[1:]): + raise ValueError( + '`bins` must increase monotonically, when an array') + + else: + raise ValueError('`bins` must be 1d, when an array') + + if n_equal_bins is not None: + # gh-10322 means that type resolution rules are dependent on array + # shapes. To avoid this causing problems, we pick a type now and stick + # with it throughout. + bin_type = np.result_type(first_edge, last_edge, a) + if np.issubdtype(bin_type, np.integer): + bin_type = np.result_type(bin_type, float) + + # bin edges must be computed + bin_edges = np.linspace( + first_edge, last_edge, n_equal_bins + 1, + endpoint=True, dtype=bin_type) + if np.any(bin_edges[:-1] >= bin_edges[1:]): + raise ValueError( + f'Too many bins for data range. Cannot create {n_equal_bins} ' + f'finite-sized bins.') + return bin_edges, (first_edge, last_edge, n_equal_bins) + else: + return bin_edges, None + + +def _search_sorted_inclusive(a, v): + """ + Like `searchsorted`, but where the last item in `v` is placed on the right. + + In the context of a histogram, this makes the last bin edge inclusive + """ + return np.concatenate(( + a.searchsorted(v[:-1], 'left'), + a.searchsorted(v[-1:], 'right') + )) + + +def _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None): + return (a, bins, weights) + + +@array_function_dispatch(_histogram_bin_edges_dispatcher) +def histogram_bin_edges(a, bins=10, range=None, weights=None): + r""" + Function to calculate only the edges of the bins used by the `histogram` + function. + + Parameters + ---------- + a : array_like + Input data. The histogram is computed over the flattened array. + bins : int or sequence of scalars or str, optional + If `bins` is an int, it defines the number of equal-width + bins in the given range (10, by default). If `bins` is a + sequence, it defines the bin edges, including the rightmost + edge, allowing for non-uniform bin widths. + + If `bins` is a string from the list below, `histogram_bin_edges` will + use the method chosen to calculate the optimal bin width and + consequently the number of bins (see the Notes section for more detail + on the estimators) from the data that falls within the requested range. + While the bin width will be optimal for the actual data + in the range, the number of bins will be computed to fill the + entire range, including the empty portions. For visualisation, + using the 'auto' option is suggested. Weighted data is not + supported for automated bin size selection. + + 'auto' + Minimum bin width between the 'sturges' and 'fd' estimators. + Provides good all-around performance. + + 'fd' (Freedman Diaconis Estimator) + Robust (resilient to outliers) estimator that takes into + account data variability and data size. + + 'doane' + An improved version of Sturges' estimator that works better + with non-normal datasets. + + 'scott' + Less robust estimator that takes into account data variability + and data size. + + 'stone' + Estimator based on leave-one-out cross-validation estimate of + the integrated squared error. Can be regarded as a generalization + of Scott's rule. + + 'rice' + Estimator does not take variability into account, only data + size. Commonly overestimates number of bins required. + + 'sturges' + R's default method, only accounts for data size. Only + optimal for gaussian data and underestimates number of bins + for large non-gaussian datasets. + + 'sqrt' + Square root (of data size) estimator, used by Excel and + other programs for its speed and simplicity. + + range : (float, float), optional + The lower and upper range of the bins. If not provided, range + is simply ``(a.min(), a.max())``. Values outside the range are + ignored. The first element of the range must be less than or + equal to the second. `range` affects the automatic bin + computation as well. While bin width is computed to be optimal + based on the actual data within `range`, the bin count will fill + the entire range including portions containing no data. + + weights : array_like, optional + An array of weights, of the same shape as `a`. Each value in + `a` only contributes its associated weight towards the bin count + (instead of 1). This is currently not used by any of the bin estimators, + but may be in the future. + + Returns + ------- + bin_edges : array of dtype float + The edges to pass into `histogram` + + See Also + -------- + histogram + + Notes + ----- + The methods to estimate the optimal number of bins are well founded + in literature, and are inspired by the choices R provides for + histogram visualisation. Note that having the number of bins + proportional to :math:`n^{1/3}` is asymptotically optimal, which is + why it appears in most estimators. These are simply plug-in methods + that give good starting points for number of bins. In the equations + below, :math:`h` is the binwidth and :math:`n_h` is the number of + bins. All estimators that compute bin counts are recast to bin width + using the `ptp` of the data. The final bin count is obtained from + ``np.round(np.ceil(range / h))``. The final bin width is often less + than what is returned by the estimators below. + + 'auto' (minimum bin width of the 'sturges' and 'fd' estimators) + A compromise to get a good value. For small datasets the Sturges + value will usually be chosen, while larger datasets will usually + default to FD. Avoids the overly conservative behaviour of FD + and Sturges for small and large datasets respectively. + Switchover point is usually :math:`a.size \approx 1000`. + + 'fd' (Freedman Diaconis Estimator) + .. math:: h = 2 \frac{IQR}{n^{1/3}} + + The binwidth is proportional to the interquartile range (IQR) + and inversely proportional to cube root of a.size. Can be too + conservative for small datasets, but is quite good for large + datasets. The IQR is very robust to outliers. + + 'scott' + .. math:: h = \sigma \sqrt[3]{\frac{24 \sqrt{\pi}}{n}} + + The binwidth is proportional to the standard deviation of the + data and inversely proportional to cube root of ``x.size``. Can + be too conservative for small datasets, but is quite good for + large datasets. The standard deviation is not very robust to + outliers. Values are very similar to the Freedman-Diaconis + estimator in the absence of outliers. + + 'rice' + .. math:: n_h = 2n^{1/3} + + The number of bins is only proportional to cube root of + ``a.size``. It tends to overestimate the number of bins and it + does not take into account data variability. + + 'sturges' + .. math:: n_h = \log _{2}(n) + 1 + + The number of bins is the base 2 log of ``a.size``. This + estimator assumes normality of data and is too conservative for + larger, non-normal datasets. This is the default method in R's + ``hist`` method. + + 'doane' + .. math:: n_h = 1 + \log_{2}(n) + + \log_{2}\left(1 + \frac{|g_1|}{\sigma_{g_1}}\right) + + g_1 = mean\left[\left(\frac{x - \mu}{\sigma}\right)^3\right] + + \sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}} + + An improved version of Sturges' formula that produces better + estimates for non-normal datasets. This estimator attempts to + account for the skew of the data. + + 'sqrt' + .. math:: n_h = \sqrt n + + The simplest and fastest estimator. Only takes into account the + data size. + + Additionally, if the data is of integer dtype, then the binwidth will never + be less than 1. + + Examples + -------- + >>> import numpy as np + >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5]) + >>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1)) + array([0. , 0.25, 0.5 , 0.75, 1. ]) + >>> np.histogram_bin_edges(arr, bins=2) + array([0. , 2.5, 5. ]) + + For consistency with histogram, an array of pre-computed bins is + passed through unmodified: + + >>> np.histogram_bin_edges(arr, [1, 2]) + array([1, 2]) + + This function allows one set of bins to be computed, and reused across + multiple histograms: + + >>> shared_bins = np.histogram_bin_edges(arr, bins='auto') + >>> shared_bins + array([0., 1., 2., 3., 4., 5.]) + + >>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1]) + >>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins) + >>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins) + + >>> hist_0; hist_1 + array([1, 1, 0, 1, 0]) + array([2, 0, 1, 1, 2]) + + Which gives more easily comparable results than using separate bins for + each histogram: + + >>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto') + >>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto') + >>> hist_0; hist_1 + array([1, 1, 1]) + array([2, 1, 1, 2]) + >>> bins_0; bins_1 + array([0., 1., 2., 3.]) + array([0. , 1.25, 2.5 , 3.75, 5. ]) + + """ + a, weights = _ravel_and_check_weights(a, weights) + bin_edges, _ = _get_bin_edges(a, bins, range, weights) + return bin_edges + + +def _histogram_dispatcher( + a, bins=None, range=None, density=None, weights=None): + return (a, bins, weights) + + +@array_function_dispatch(_histogram_dispatcher) +def histogram(a, bins=10, range=None, density=None, weights=None): + r""" + Compute the histogram of a dataset. + + Parameters + ---------- + a : array_like + Input data. The histogram is computed over the flattened array. + bins : int or sequence of scalars or str, optional + If `bins` is an int, it defines the number of equal-width + bins in the given range (10, by default). If `bins` is a + sequence, it defines a monotonically increasing array of bin edges, + including the rightmost edge, allowing for non-uniform bin widths. + + If `bins` is a string, it defines the method used to calculate the + optimal bin width, as defined by `histogram_bin_edges`. + + range : (float, float), optional + The lower and upper range of the bins. If not provided, range + is simply ``(a.min(), a.max())``. Values outside the range are + ignored. The first element of the range must be less than or + equal to the second. `range` affects the automatic bin + computation as well. While bin width is computed to be optimal + based on the actual data within `range`, the bin count will fill + the entire range including portions containing no data. + weights : array_like, optional + An array of weights, of the same shape as `a`. Each value in + `a` only contributes its associated weight towards the bin count + (instead of 1). If `density` is True, the weights are + normalized, so that the integral of the density over the range + remains 1. + Please note that the ``dtype`` of `weights` will also become the + ``dtype`` of the returned accumulator (`hist`), so it must be + large enough to hold accumulated values as well. + density : bool, optional + If ``False``, the result will contain the number of samples in + each bin. If ``True``, the result is the value of the + probability *density* function at the bin, normalized such that + the *integral* over the range is 1. Note that the sum of the + histogram values will not be equal to 1 unless bins of unity + width are chosen; it is not a probability *mass* function. + + Returns + ------- + hist : array + The values of the histogram. See `density` and `weights` for a + description of the possible semantics. If `weights` are given, + ``hist.dtype`` will be taken from `weights`. + bin_edges : array of dtype float + Return the bin edges ``(length(hist)+1)``. + + + See Also + -------- + histogramdd, bincount, searchsorted, digitize, histogram_bin_edges + + Notes + ----- + All but the last (righthand-most) bin is half-open. In other words, + if `bins` is:: + + [1, 2, 3, 4] + + then the first bin is ``[1, 2)`` (including 1, but excluding 2) and + the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which + *includes* 4. + + + Examples + -------- + >>> import numpy as np + >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) + (array([0, 2, 1]), array([0, 1, 2, 3])) + >>> np.histogram(np.arange(4), bins=np.arange(5), density=True) + (array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4])) + >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]) + (array([1, 4, 1]), array([0, 1, 2, 3])) + + >>> a = np.arange(5) + >>> hist, bin_edges = np.histogram(a, density=True) + >>> hist + array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5]) + >>> hist.sum() + 2.4999999999999996 + >>> np.sum(hist * np.diff(bin_edges)) + 1.0 + + Automated Bin Selection Methods example, using 2 peak random data + with 2000 points. + + .. plot:: + :include-source: + + import matplotlib.pyplot as plt + import numpy as np + + rng = np.random.RandomState(10) # deterministic random data + a = np.hstack((rng.normal(size=1000), + rng.normal(loc=5, scale=2, size=1000))) + plt.hist(a, bins='auto') # arguments are passed to np.histogram + plt.title("Histogram with 'auto' bins") + plt.show() + + """ + a, weights = _ravel_and_check_weights(a, weights) + + bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights) + + # Histogram is an integer or a float array depending on the weights. + if weights is None: + ntype = np.dtype(np.intp) + else: + ntype = weights.dtype + + # We set a block size, as this allows us to iterate over chunks when + # computing histograms, to minimize memory usage. + BLOCK = 65536 + + # The fast path uses bincount, but that only works for certain types + # of weight + simple_weights = ( + weights is None or + np.can_cast(weights.dtype, np.double) or + np.can_cast(weights.dtype, complex) + ) + + if uniform_bins is not None and simple_weights: + # Fast algorithm for equal bins + # We now convert values of a to bin indices, under the assumption of + # equal bin widths (which is valid here). + first_edge, last_edge, n_equal_bins = uniform_bins + + # Initialize empty histogram + n = np.zeros(n_equal_bins, ntype) + + # Pre-compute histogram scaling factor + norm_numerator = n_equal_bins + norm_denom = _unsigned_subtract(last_edge, first_edge) + + # We iterate over blocks here for two reasons: the first is that for + # large arrays, it is actually faster (for example for a 10^8 array it + # is 2x as fast) and it results in a memory footprint 3x lower in the + # limit of large arrays. + for i in _range(0, len(a), BLOCK): + tmp_a = a[i:i + BLOCK] + if weights is None: + tmp_w = None + else: + tmp_w = weights[i:i + BLOCK] + + # Only include values in the right range + keep = (tmp_a >= first_edge) + keep &= (tmp_a <= last_edge) + if not np.logical_and.reduce(keep): + tmp_a = tmp_a[keep] + if tmp_w is not None: + tmp_w = tmp_w[keep] + + # This cast ensures no type promotions occur below, which gh-10322 + # make unpredictable. Getting it wrong leads to precision errors + # like gh-8123. + tmp_a = tmp_a.astype(bin_edges.dtype, copy=False) + + # Compute the bin indices, and for values that lie exactly on + # last_edge we need to subtract one + f_indices = ((_unsigned_subtract(tmp_a, first_edge) / norm_denom) + * norm_numerator) + indices = f_indices.astype(np.intp) + indices[indices == n_equal_bins] -= 1 + + # The index computation is not guaranteed to give exactly + # consistent results within ~1 ULP of the bin edges. + decrement = tmp_a < bin_edges[indices] + indices[decrement] -= 1 + # The last bin includes the right edge. The other bins do not. + increment = ((tmp_a >= bin_edges[indices + 1]) + & (indices != n_equal_bins - 1)) + indices[increment] += 1 + + # We now compute the histogram using bincount + if ntype.kind == 'c': + n.real += np.bincount(indices, weights=tmp_w.real, + minlength=n_equal_bins) + n.imag += np.bincount(indices, weights=tmp_w.imag, + minlength=n_equal_bins) + else: + n += np.bincount(indices, weights=tmp_w, + minlength=n_equal_bins).astype(ntype) + else: + # Compute via cumulative histogram + cum_n = np.zeros(bin_edges.shape, ntype) + if weights is None: + for i in _range(0, len(a), BLOCK): + sa = np.sort(a[i:i + BLOCK]) + cum_n += _search_sorted_inclusive(sa, bin_edges) + else: + zero = np.zeros(1, dtype=ntype) + for i in _range(0, len(a), BLOCK): + tmp_a = a[i:i + BLOCK] + tmp_w = weights[i:i + BLOCK] + sorting_index = np.argsort(tmp_a) + sa = tmp_a[sorting_index] + sw = tmp_w[sorting_index] + cw = np.concatenate((zero, sw.cumsum())) + bin_index = _search_sorted_inclusive(sa, bin_edges) + cum_n += cw[bin_index] + + n = np.diff(cum_n) + + if density: + db = np.array(np.diff(bin_edges), float) + return n / db / n.sum(), bin_edges + + return n, bin_edges + + +def _histogramdd_dispatcher(sample, bins=None, range=None, density=None, + weights=None): + if hasattr(sample, 'shape'): # same condition as used in histogramdd + yield sample + else: + yield from sample + with contextlib.suppress(TypeError): + yield from bins + yield weights + + +@array_function_dispatch(_histogramdd_dispatcher) +def histogramdd(sample, bins=10, range=None, density=None, weights=None): + """ + Compute the multidimensional histogram of some data. + + Parameters + ---------- + sample : (N, D) array, or (N, D) array_like + The data to be histogrammed. + + Note the unusual interpretation of sample when an array_like: + + * When an array, each row is a coordinate in a D-dimensional space - + such as ``histogramdd(np.array([p1, p2, p3]))``. + * When an array_like, each element is the list of values for single + coordinate - such as ``histogramdd((X, Y, Z))``. + + The first form should be preferred. + + bins : sequence or int, optional + The bin specification: + + * A sequence of arrays describing the monotonically increasing bin + edges along each dimension. + * The number of bins for each dimension (nx, ny, ... =bins) + * The number of bins for all dimensions (nx=ny=...=bins). + + range : sequence, optional + A sequence of length D, each an optional (lower, upper) tuple giving + the outer bin edges to be used if the edges are not given explicitly in + `bins`. + An entry of None in the sequence results in the minimum and maximum + values being used for the corresponding dimension. + The default, None, is equivalent to passing a tuple of D None values. + density : bool, optional + If False, the default, returns the number of samples in each bin. + If True, returns the probability *density* function at the bin, + ``bin_count / sample_count / bin_volume``. + weights : (N,) array_like, optional + An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`. + Weights are normalized to 1 if density is True. If density is False, + the values of the returned histogram are equal to the sum of the + weights belonging to the samples falling into each bin. + + Returns + ------- + H : ndarray + The multidimensional histogram of sample x. See density and weights + for the different possible semantics. + edges : tuple of ndarrays + A tuple of D arrays describing the bin edges for each dimension. + + See Also + -------- + histogram: 1-D histogram + histogram2d: 2-D histogram + + Examples + -------- + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> r = rng.normal(size=(100,3)) + >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) + >>> H.shape, edges[0].size, edges[1].size, edges[2].size + ((5, 8, 4), 6, 9, 5) + + """ + + try: + # Sample is an ND-array. + N, D = sample.shape + except (AttributeError, ValueError): + # Sample is a sequence of 1D arrays. + sample = np.atleast_2d(sample).T + N, D = sample.shape + + nbin = np.empty(D, np.intp) + edges = D * [None] + dedges = D * [None] + if weights is not None: + weights = np.asarray(weights) + + try: + M = len(bins) + if M != D: + raise ValueError( + 'The dimension of bins must be equal to the dimension of the ' + 'sample x.') + except TypeError: + # bins is an integer + bins = D * [bins] + + # normalize the range argument + if range is None: + range = (None,) * D + elif len(range) != D: + raise ValueError('range argument must have one entry per dimension') + + # Create edge arrays + for i in _range(D): + if np.ndim(bins[i]) == 0: + if bins[i] < 1: + raise ValueError( + f'`bins[{i}]` must be positive, when an integer') + smin, smax = _get_outer_edges(sample[:, i], range[i]) + try: + n = operator.index(bins[i]) + + except TypeError as e: + raise TypeError( + f"`bins[{i}]` must be an integer, when a scalar" + ) from e + + edges[i] = np.linspace(smin, smax, n + 1) + elif np.ndim(bins[i]) == 1: + edges[i] = np.asarray(bins[i]) + if np.any(edges[i][:-1] > edges[i][1:]): + raise ValueError( + f'`bins[{i}]` must be monotonically increasing, when an array') + else: + raise ValueError( + f'`bins[{i}]` must be a scalar or 1d array') + + nbin[i] = len(edges[i]) + 1 # includes an outlier on each end + dedges[i] = np.diff(edges[i]) + + # Compute the bin number each sample falls into. + Ncount = tuple( + # avoid np.digitize to work around gh-11022 + np.searchsorted(edges[i], sample[:, i], side='right') + for i in _range(D) + ) + + # Using digitize, values that fall on an edge are put in the right bin. + # For the rightmost bin, we want values equal to the right edge to be + # counted in the last bin, and not as an outlier. + for i in _range(D): + # Find which points are on the rightmost edge. + on_edge = (sample[:, i] == edges[i][-1]) + # Shift these points one bin to the left. + Ncount[i][on_edge] -= 1 + + # Compute the sample indices in the flattened histogram matrix. + # This raises an error if the array is too large. + xy = np.ravel_multi_index(Ncount, nbin) + + # Compute the number of repetitions in xy and assign it to the + # flattened histmat. + hist = np.bincount(xy, weights, minlength=nbin.prod()) + + # Shape into a proper matrix + hist = hist.reshape(nbin) + + # This preserves the (bad) behavior observed in gh-7845, for now. + hist = hist.astype(float, casting='safe') + + # Remove outliers (indices 0 and -1 for each dimension). + core = D * (slice(1, -1),) + hist = hist[core] + + if density: + # calculate the probability density function + s = hist.sum() + for i in _range(D): + shape = np.ones(D, int) + shape[i] = nbin[i] - 2 + hist = hist / dedges[i].reshape(shape) + hist /= s + + if (hist.shape != nbin - 2).any(): + raise RuntimeError( + "Internal Shape Error") + return hist, edges diff --git a/python/numpy/lib/_histograms_impl.pyi b/python/numpy/lib/_histograms_impl.pyi new file mode 100644 index 000000000..4a65988e4 --- /dev/null +++ b/python/numpy/lib/_histograms_impl.pyi @@ -0,0 +1,50 @@ +from collections.abc import Sequence +from typing import ( + Any, + SupportsIndex, + TypeAlias, +) +from typing import ( + Literal as L, +) + +from numpy._typing import ( + ArrayLike, + NDArray, +) + +__all__ = ["histogram", "histogramdd", "histogram_bin_edges"] + +_BinKind: TypeAlias = L[ + "stone", + "auto", + "doane", + "fd", + "rice", + "scott", + "sqrt", + "sturges", +] + +def histogram_bin_edges( + a: ArrayLike, + bins: _BinKind | SupportsIndex | ArrayLike = ..., + range: tuple[float, float] | None = ..., + weights: ArrayLike | None = ..., +) -> NDArray[Any]: ... + +def histogram( + a: ArrayLike, + bins: _BinKind | SupportsIndex | ArrayLike = 10, + range: tuple[float, float] | None = None, + density: bool | None = None, + weights: ArrayLike | None = None, +) -> tuple[NDArray[Any], NDArray[Any]]: ... + +def histogramdd( + sample: ArrayLike, + bins: SupportsIndex | ArrayLike = 10, + range: Sequence[tuple[float, float]] | None = None, + density: bool | None = None, + weights: ArrayLike | None = None, +) -> tuple[NDArray[Any], tuple[NDArray[Any], ...]]: ... diff --git a/python/numpy/lib/_index_tricks_impl.py b/python/numpy/lib/_index_tricks_impl.py new file mode 100644 index 000000000..131bbae5d --- /dev/null +++ b/python/numpy/lib/_index_tricks_impl.py @@ -0,0 +1,1067 @@ +import functools +import math +import sys +import warnings + +import numpy as np +import numpy._core.numeric as _nx +import numpy.matrixlib as matrixlib +from numpy._core import linspace, overrides +from numpy._core.multiarray import ravel_multi_index, unravel_index +from numpy._core.numeric import ScalarType, array +from numpy._core.numerictypes import issubdtype +from numpy._utils import set_module +from numpy.lib._function_base_impl import diff +from numpy.lib.stride_tricks import as_strided + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + 'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_', + 's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal', + 'diag_indices', 'diag_indices_from' +] + + +def _ix__dispatcher(*args): + return args + + +@array_function_dispatch(_ix__dispatcher) +def ix_(*args): + """ + Construct an open mesh from multiple sequences. + + This function takes N 1-D sequences and returns N outputs with N + dimensions each, such that the shape is 1 in all but one dimension + and the dimension with the non-unit shape value cycles through all + N dimensions. + + Using `ix_` one can quickly construct index arrays that will index + the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array + ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``. + + Parameters + ---------- + args : 1-D sequences + Each sequence should be of integer or boolean type. + Boolean sequences will be interpreted as boolean masks for the + corresponding dimension (equivalent to passing in + ``np.nonzero(boolean_sequence)``). + + Returns + ------- + out : tuple of ndarrays + N arrays with N dimensions each, with N the number of input + sequences. Together these arrays form an open mesh. + + See Also + -------- + ogrid, mgrid, meshgrid + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(10).reshape(2, 5) + >>> a + array([[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]]) + >>> ixgrid = np.ix_([0, 1], [2, 4]) + >>> ixgrid + (array([[0], + [1]]), array([[2, 4]])) + >>> ixgrid[0].shape, ixgrid[1].shape + ((2, 1), (1, 2)) + >>> a[ixgrid] + array([[2, 4], + [7, 9]]) + + >>> ixgrid = np.ix_([True, True], [2, 4]) + >>> a[ixgrid] + array([[2, 4], + [7, 9]]) + >>> ixgrid = np.ix_([True, True], [False, False, True, False, True]) + >>> a[ixgrid] + array([[2, 4], + [7, 9]]) + + """ + out = [] + nd = len(args) + for k, new in enumerate(args): + if not isinstance(new, _nx.ndarray): + new = np.asarray(new) + if new.size == 0: + # Explicitly type empty arrays to avoid float default + new = new.astype(_nx.intp) + if new.ndim != 1: + raise ValueError("Cross index must be 1 dimensional") + if issubdtype(new.dtype, _nx.bool): + new, = new.nonzero() + new = new.reshape((1,) * k + (new.size,) + (1,) * (nd - k - 1)) + out.append(new) + return tuple(out) + + +class nd_grid: + """ + Construct a multi-dimensional "meshgrid". + + ``grid = nd_grid()`` creates an instance which will return a mesh-grid + when indexed. The dimension and number of the output arrays are equal + to the number of indexing dimensions. If the step length is not a + complex number, then the stop is not inclusive. + + However, if the step length is a **complex number** (e.g. 5j), then the + integer part of its magnitude is interpreted as specifying the + number of points to create between the start and stop values, where + the stop value **is inclusive**. + + If instantiated with an argument of ``sparse=True``, the mesh-grid is + open (or not fleshed out) so that only one-dimension of each returned + argument is greater than 1. + + Parameters + ---------- + sparse : bool, optional + Whether the grid is sparse or not. Default is False. + + Notes + ----- + Two instances of `nd_grid` are made available in the NumPy namespace, + `mgrid` and `ogrid`, approximately defined as:: + + mgrid = nd_grid(sparse=False) + ogrid = nd_grid(sparse=True) + + Users should use these pre-defined instances instead of using `nd_grid` + directly. + """ + __slots__ = ('sparse',) + + def __init__(self, sparse=False): + self.sparse = sparse + + def __getitem__(self, key): + try: + size = [] + # Mimic the behavior of `np.arange` and use a data type + # which is at least as large as `np.int_` + num_list = [0] + for k in range(len(key)): + step = key[k].step + start = key[k].start + stop = key[k].stop + if start is None: + start = 0 + if step is None: + step = 1 + if isinstance(step, (_nx.complexfloating, complex)): + step = abs(step) + size.append(int(step)) + else: + size.append( + math.ceil((stop - start) / step)) + num_list += [start, stop, step] + typ = _nx.result_type(*num_list) + if self.sparse: + nn = [_nx.arange(_x, dtype=_t) + for _x, _t in zip(size, (typ,) * len(size))] + else: + nn = _nx.indices(size, typ) + for k, kk in enumerate(key): + step = kk.step + start = kk.start + if start is None: + start = 0 + if step is None: + step = 1 + if isinstance(step, (_nx.complexfloating, complex)): + step = int(abs(step)) + if step != 1: + step = (kk.stop - start) / float(step - 1) + nn[k] = (nn[k] * step + start) + if self.sparse: + slobj = [_nx.newaxis] * len(size) + for k in range(len(size)): + slobj[k] = slice(None, None) + nn[k] = nn[k][tuple(slobj)] + slobj[k] = _nx.newaxis + return tuple(nn) # ogrid -> tuple of arrays + return nn # mgrid -> ndarray + except (IndexError, TypeError): + step = key.step + stop = key.stop + start = key.start + if start is None: + start = 0 + if isinstance(step, (_nx.complexfloating, complex)): + # Prevent the (potential) creation of integer arrays + step_float = abs(step) + step = length = int(step_float) + if step != 1: + step = (key.stop - start) / float(step - 1) + typ = _nx.result_type(start, stop, step_float) + return _nx.arange(0, length, 1, dtype=typ) * step + start + else: + return _nx.arange(start, stop, step) + + +class MGridClass(nd_grid): + """ + An instance which returns a dense multi-dimensional "meshgrid". + + An instance which returns a dense (or fleshed out) mesh-grid + when indexed, so that each returned argument has the same shape. + The dimensions and number of the output arrays are equal to the + number of indexing dimensions. If the step length is not a complex + number, then the stop is not inclusive. + + However, if the step length is a **complex number** (e.g. 5j), then + the integer part of its magnitude is interpreted as specifying the + number of points to create between the start and stop values, where + the stop value **is inclusive**. + + Returns + ------- + mesh-grid : ndarray + A single array, containing a set of `ndarray`\\ s all of the same + dimensions. stacked along the first axis. + + See Also + -------- + ogrid : like `mgrid` but returns open (not fleshed out) mesh grids + meshgrid: return coordinate matrices from coordinate vectors + r_ : array concatenator + :ref:`how-to-partition` + + Examples + -------- + >>> import numpy as np + >>> np.mgrid[0:5, 0:5] + array([[[0, 0, 0, 0, 0], + [1, 1, 1, 1, 1], + [2, 2, 2, 2, 2], + [3, 3, 3, 3, 3], + [4, 4, 4, 4, 4]], + [[0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4]]]) + >>> np.mgrid[-1:1:5j] + array([-1. , -0.5, 0. , 0.5, 1. ]) + + >>> np.mgrid[0:4].shape + (4,) + >>> np.mgrid[0:4, 0:5].shape + (2, 4, 5) + >>> np.mgrid[0:4, 0:5, 0:6].shape + (3, 4, 5, 6) + + """ + __slots__ = () + + def __init__(self): + super().__init__(sparse=False) + + +mgrid = MGridClass() + + +class OGridClass(nd_grid): + """ + An instance which returns an open multi-dimensional "meshgrid". + + An instance which returns an open (i.e. not fleshed out) mesh-grid + when indexed, so that only one dimension of each returned array is + greater than 1. The dimension and number of the output arrays are + equal to the number of indexing dimensions. If the step length is + not a complex number, then the stop is not inclusive. + + However, if the step length is a **complex number** (e.g. 5j), then + the integer part of its magnitude is interpreted as specifying the + number of points to create between the start and stop values, where + the stop value **is inclusive**. + + Returns + ------- + mesh-grid : ndarray or tuple of ndarrays + If the input is a single slice, returns an array. + If the input is multiple slices, returns a tuple of arrays, with + only one dimension not equal to 1. + + See Also + -------- + mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids + meshgrid: return coordinate matrices from coordinate vectors + r_ : array concatenator + :ref:`how-to-partition` + + Examples + -------- + >>> from numpy import ogrid + >>> ogrid[-1:1:5j] + array([-1. , -0.5, 0. , 0.5, 1. ]) + >>> ogrid[0:5, 0:5] + (array([[0], + [1], + [2], + [3], + [4]]), + array([[0, 1, 2, 3, 4]])) + + """ + __slots__ = () + + def __init__(self): + super().__init__(sparse=True) + + +ogrid = OGridClass() + + +class AxisConcatenator: + """ + Translates slice objects to concatenation along an axis. + + For detailed documentation on usage, see `r_`. + """ + __slots__ = ('axis', 'matrix', 'ndmin', 'trans1d') + + # allow ma.mr_ to override this + concatenate = staticmethod(_nx.concatenate) + makemat = staticmethod(matrixlib.matrix) + + def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1): + self.axis = axis + self.matrix = matrix + self.trans1d = trans1d + self.ndmin = ndmin + + def __getitem__(self, key): + # handle matrix builder syntax + if isinstance(key, str): + frame = sys._getframe().f_back + mymat = matrixlib.bmat(key, frame.f_globals, frame.f_locals) + return mymat + + if not isinstance(key, tuple): + key = (key,) + + # copy attributes, since they can be overridden in the first argument + trans1d = self.trans1d + ndmin = self.ndmin + matrix = self.matrix + axis = self.axis + + objs = [] + # dtypes or scalars for weak scalar handling in result_type + result_type_objs = [] + + for k, item in enumerate(key): + scalar = False + if isinstance(item, slice): + step = item.step + start = item.start + stop = item.stop + if start is None: + start = 0 + if step is None: + step = 1 + if isinstance(step, (_nx.complexfloating, complex)): + size = int(abs(step)) + newobj = linspace(start, stop, num=size) + else: + newobj = _nx.arange(start, stop, step) + if ndmin > 1: + newobj = array(newobj, copy=None, ndmin=ndmin) + if trans1d != -1: + newobj = newobj.swapaxes(-1, trans1d) + elif isinstance(item, str): + if k != 0: + raise ValueError("special directives must be the " + "first entry.") + if item in ('r', 'c'): + matrix = True + col = (item == 'c') + continue + if ',' in item: + vec = item.split(',') + try: + axis, ndmin = [int(x) for x in vec[:2]] + if len(vec) == 3: + trans1d = int(vec[2]) + continue + except Exception as e: + raise ValueError( + f"unknown special directive {item!r}" + ) from e + try: + axis = int(item) + continue + except (ValueError, TypeError) as e: + raise ValueError("unknown special directive") from e + elif type(item) in ScalarType: + scalar = True + newobj = item + else: + item_ndim = np.ndim(item) + newobj = array(item, copy=None, subok=True, ndmin=ndmin) + if trans1d != -1 and item_ndim < ndmin: + k2 = ndmin - item_ndim + k1 = trans1d + if k1 < 0: + k1 += k2 + 1 + defaxes = list(range(ndmin)) + axes = defaxes[:k1] + defaxes[k2:] + defaxes[k1:k2] + newobj = newobj.transpose(axes) + + objs.append(newobj) + if scalar: + result_type_objs.append(item) + else: + result_type_objs.append(newobj.dtype) + + # Ensure that scalars won't up-cast unless warranted, for 0, drops + # through to error in concatenate. + if len(result_type_objs) != 0: + final_dtype = _nx.result_type(*result_type_objs) + # concatenate could do cast, but that can be overridden: + objs = [array(obj, copy=None, subok=True, + ndmin=ndmin, dtype=final_dtype) for obj in objs] + + res = self.concatenate(tuple(objs), axis=axis) + + if matrix: + oldndim = res.ndim + res = self.makemat(res) + if oldndim == 1 and col: + res = res.T + return res + + def __len__(self): + return 0 + +# separate classes are used here instead of just making r_ = concatenator(0), +# etc. because otherwise we couldn't get the doc string to come out right +# in help(r_) + + +class RClass(AxisConcatenator): + """ + Translates slice objects to concatenation along the first axis. + + This is a simple way to build up arrays quickly. There are two use cases. + + 1. If the index expression contains comma separated arrays, then stack + them along their first axis. + 2. If the index expression contains slice notation or scalars then create + a 1-D array with a range indicated by the slice notation. + + If slice notation is used, the syntax ``start:stop:step`` is equivalent + to ``np.arange(start, stop, step)`` inside of the brackets. However, if + ``step`` is an imaginary number (i.e. 100j) then its integer portion is + interpreted as a number-of-points desired and the start and stop are + inclusive. In other words ``start:stop:stepj`` is interpreted as + ``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets. + After expansion of slice notation, all comma separated sequences are + concatenated together. + + Optional character strings placed as the first element of the index + expression can be used to change the output. The strings 'r' or 'c' result + in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row) + matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1 + (column) matrix is produced. If the result is 2-D then both provide the + same matrix result. + + A string integer specifies which axis to stack multiple comma separated + arrays along. A string of two comma-separated integers allows indication + of the minimum number of dimensions to force each entry into as the + second integer (the axis to concatenate along is still the first integer). + + A string with three comma-separated integers allows specification of the + axis to concatenate along, the minimum number of dimensions to force the + entries to, and which axis should contain the start of the arrays which + are less than the specified number of dimensions. In other words the third + integer allows you to specify where the 1's should be placed in the shape + of the arrays that have their shapes upgraded. By default, they are placed + in the front of the shape tuple. The third argument allows you to specify + where the start of the array should be instead. Thus, a third argument of + '0' would place the 1's at the end of the array shape. Negative integers + specify where in the new shape tuple the last dimension of upgraded arrays + should be placed, so the default is '-1'. + + Parameters + ---------- + Not a function, so takes no parameters + + + Returns + ------- + A concatenated ndarray or matrix. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + c_ : Translates slice objects to concatenation along the second axis. + + Examples + -------- + >>> import numpy as np + >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])] + array([1, 2, 3, ..., 4, 5, 6]) + >>> np.r_[-1:1:6j, [0]*3, 5, 6] + array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ]) + + String integers specify the axis to concatenate along or the minimum + number of dimensions to force entries into. + + >>> a = np.array([[0, 1, 2], [3, 4, 5]]) + >>> np.r_['-1', a, a] # concatenate along last axis + array([[0, 1, 2, 0, 1, 2], + [3, 4, 5, 3, 4, 5]]) + >>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2 + array([[1, 2, 3], + [4, 5, 6]]) + + >>> np.r_['0,2,0', [1,2,3], [4,5,6]] + array([[1], + [2], + [3], + [4], + [5], + [6]]) + >>> np.r_['1,2,0', [1,2,3], [4,5,6]] + array([[1, 4], + [2, 5], + [3, 6]]) + + Using 'r' or 'c' as a first string argument creates a matrix. + + >>> np.r_['r',[1,2,3], [4,5,6]] + matrix([[1, 2, 3, 4, 5, 6]]) + + """ + __slots__ = () + + def __init__(self): + AxisConcatenator.__init__(self, 0) + + +r_ = RClass() + + +class CClass(AxisConcatenator): + """ + Translates slice objects to concatenation along the second axis. + + This is short-hand for ``np.r_['-1,2,0', index expression]``, which is + useful because of its common occurrence. In particular, arrays will be + stacked along their last axis after being upgraded to at least 2-D with + 1's post-pended to the shape (column vectors made out of 1-D arrays). + + See Also + -------- + column_stack : Stack 1-D arrays as columns into a 2-D array. + r_ : For more detailed documentation. + + Examples + -------- + >>> import numpy as np + >>> np.c_[np.array([1,2,3]), np.array([4,5,6])] + array([[1, 4], + [2, 5], + [3, 6]]) + >>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])] + array([[1, 2, 3, ..., 4, 5, 6]]) + + """ + __slots__ = () + + def __init__(self): + AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0) + + +c_ = CClass() + + +@set_module('numpy') +class ndenumerate: + """ + Multidimensional index iterator. + + Return an iterator yielding pairs of array coordinates and values. + + Parameters + ---------- + arr : ndarray + Input array. + + See Also + -------- + ndindex, flatiter + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> for index, x in np.ndenumerate(a): + ... print(index, x) + (0, 0) 1 + (0, 1) 2 + (1, 0) 3 + (1, 1) 4 + + """ + + def __init__(self, arr): + self.iter = np.asarray(arr).flat + + def __next__(self): + """ + Standard iterator method, returns the index tuple and array value. + + Returns + ------- + coords : tuple of ints + The indices of the current iteration. + val : scalar + The array element of the current iteration. + + """ + return self.iter.coords, next(self.iter) + + def __iter__(self): + return self + + +@set_module('numpy') +class ndindex: + """ + An N-dimensional iterator object to index arrays. + + Given the shape of an array, an `ndindex` instance iterates over + the N-dimensional index of the array. At each iteration a tuple + of indices is returned, the last dimension is iterated over first. + + Parameters + ---------- + shape : ints, or a single tuple of ints + The size of each dimension of the array can be passed as + individual parameters or as the elements of a tuple. + + See Also + -------- + ndenumerate, flatiter + + Examples + -------- + >>> import numpy as np + + Dimensions as individual arguments + + >>> for index in np.ndindex(3, 2, 1): + ... print(index) + (0, 0, 0) + (0, 1, 0) + (1, 0, 0) + (1, 1, 0) + (2, 0, 0) + (2, 1, 0) + + Same dimensions - but in a tuple ``(3, 2, 1)`` + + >>> for index in np.ndindex((3, 2, 1)): + ... print(index) + (0, 0, 0) + (0, 1, 0) + (1, 0, 0) + (1, 1, 0) + (2, 0, 0) + (2, 1, 0) + + """ + + def __init__(self, *shape): + if len(shape) == 1 and isinstance(shape[0], tuple): + shape = shape[0] + x = as_strided(_nx.zeros(1), shape=shape, + strides=_nx.zeros_like(shape)) + self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'], + order='C') + + def __iter__(self): + return self + + def ndincr(self): + """ + Increment the multi-dimensional index by one. + + This method is for backward compatibility only: do not use. + + .. deprecated:: 1.20.0 + This method has been advised against since numpy 1.8.0, but only + started emitting DeprecationWarning as of this version. + """ + # NumPy 1.20.0, 2020-09-08 + warnings.warn( + "`ndindex.ndincr()` is deprecated, use `next(ndindex)` instead", + DeprecationWarning, stacklevel=2) + next(self) + + def __next__(self): + """ + Standard iterator method, updates the index and returns the index + tuple. + + Returns + ------- + val : tuple of ints + Returns a tuple containing the indices of the current + iteration. + + """ + next(self._it) + return self._it.multi_index + + +# You can do all this with slice() plus a few special objects, +# but there's a lot to remember. This version is simpler because +# it uses the standard array indexing syntax. +# +# Written by Konrad Hinsen +# last revision: 1999-7-23 +# +# Cosmetic changes by T. Oliphant 2001 +# +# + +class IndexExpression: + """ + A nicer way to build up index tuples for arrays. + + .. note:: + Use one of the two predefined instances ``index_exp`` or `s_` + rather than directly using `IndexExpression`. + + For any index combination, including slicing and axis insertion, + ``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any + array `a`. However, ``np.index_exp[indices]`` can be used anywhere + in Python code and returns a tuple of slice objects that can be + used in the construction of complex index expressions. + + Parameters + ---------- + maketuple : bool + If True, always returns a tuple. + + See Also + -------- + s_ : Predefined instance without tuple conversion: + `s_ = IndexExpression(maketuple=False)`. + The ``index_exp`` is another predefined instance that + always returns a tuple: + `index_exp = IndexExpression(maketuple=True)`. + + Notes + ----- + You can do all this with :class:`slice` plus a few special objects, + but there's a lot to remember and this version is simpler because + it uses the standard array indexing syntax. + + Examples + -------- + >>> import numpy as np + >>> np.s_[2::2] + slice(2, None, 2) + >>> np.index_exp[2::2] + (slice(2, None, 2),) + + >>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]] + array([2, 4]) + + """ + __slots__ = ('maketuple',) + + def __init__(self, maketuple): + self.maketuple = maketuple + + def __getitem__(self, item): + if self.maketuple and not isinstance(item, tuple): + return (item,) + else: + return item + + +index_exp = IndexExpression(maketuple=True) +s_ = IndexExpression(maketuple=False) + +# End contribution from Konrad. + + +# The following functions complement those in twodim_base, but are +# applicable to N-dimensions. + + +def _fill_diagonal_dispatcher(a, val, wrap=None): + return (a,) + + +@array_function_dispatch(_fill_diagonal_dispatcher) +def fill_diagonal(a, val, wrap=False): + """Fill the main diagonal of the given array of any dimensionality. + + For an array `a` with ``a.ndim >= 2``, the diagonal is the list of + values ``a[i, ..., i]`` with indices ``i`` all identical. This function + modifies the input array in-place without returning a value. + + Parameters + ---------- + a : array, at least 2-D. + Array whose diagonal is to be filled in-place. + val : scalar or array_like + Value(s) to write on the diagonal. If `val` is scalar, the value is + written along the diagonal. If array-like, the flattened `val` is + written along the diagonal, repeating if necessary to fill all + diagonal entries. + + wrap : bool + For tall matrices in NumPy version up to 1.6.2, the + diagonal "wrapped" after N columns. You can have this behavior + with this option. This affects only tall matrices. + + See also + -------- + diag_indices, diag_indices_from + + Notes + ----- + This functionality can be obtained via `diag_indices`, but internally + this version uses a much faster implementation that never constructs the + indices and uses simple slicing. + + Examples + -------- + >>> import numpy as np + >>> a = np.zeros((3, 3), int) + >>> np.fill_diagonal(a, 5) + >>> a + array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5]]) + + The same function can operate on a 4-D array: + + >>> a = np.zeros((3, 3, 3, 3), int) + >>> np.fill_diagonal(a, 4) + + We only show a few blocks for clarity: + + >>> a[0, 0] + array([[4, 0, 0], + [0, 0, 0], + [0, 0, 0]]) + >>> a[1, 1] + array([[0, 0, 0], + [0, 4, 0], + [0, 0, 0]]) + >>> a[2, 2] + array([[0, 0, 0], + [0, 0, 0], + [0, 0, 4]]) + + The wrap option affects only tall matrices: + + >>> # tall matrices no wrap + >>> a = np.zeros((5, 3), int) + >>> np.fill_diagonal(a, 4) + >>> a + array([[4, 0, 0], + [0, 4, 0], + [0, 0, 4], + [0, 0, 0], + [0, 0, 0]]) + + >>> # tall matrices wrap + >>> a = np.zeros((5, 3), int) + >>> np.fill_diagonal(a, 4, wrap=True) + >>> a + array([[4, 0, 0], + [0, 4, 0], + [0, 0, 4], + [0, 0, 0], + [4, 0, 0]]) + + >>> # wide matrices + >>> a = np.zeros((3, 5), int) + >>> np.fill_diagonal(a, 4, wrap=True) + >>> a + array([[4, 0, 0, 0, 0], + [0, 4, 0, 0, 0], + [0, 0, 4, 0, 0]]) + + The anti-diagonal can be filled by reversing the order of elements + using either `numpy.flipud` or `numpy.fliplr`. + + >>> a = np.zeros((3, 3), int); + >>> np.fill_diagonal(np.fliplr(a), [1,2,3]) # Horizontal flip + >>> a + array([[0, 0, 1], + [0, 2, 0], + [3, 0, 0]]) + >>> np.fill_diagonal(np.flipud(a), [1,2,3]) # Vertical flip + >>> a + array([[0, 0, 3], + [0, 2, 0], + [1, 0, 0]]) + + Note that the order in which the diagonal is filled varies depending + on the flip function. + """ + if a.ndim < 2: + raise ValueError("array must be at least 2-d") + end = None + if a.ndim == 2: + # Explicit, fast formula for the common case. For 2-d arrays, we + # accept rectangular ones. + step = a.shape[1] + 1 + # This is needed to don't have tall matrix have the diagonal wrap. + if not wrap: + end = a.shape[1] * a.shape[1] + else: + # For more than d=2, the strided formula is only valid for arrays with + # all dimensions equal, so we check first. + if not np.all(diff(a.shape) == 0): + raise ValueError("All dimensions of input must be of equal length") + step = 1 + (np.cumprod(a.shape[:-1])).sum() + + # Write the value out into the diagonal. + a.flat[:end:step] = val + + +@set_module('numpy') +def diag_indices(n, ndim=2): + """ + Return the indices to access the main diagonal of an array. + + This returns a tuple of indices that can be used to access the main + diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape + (n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for + ``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]`` + for ``i = [0..n-1]``. + + Parameters + ---------- + n : int + The size, along each dimension, of the arrays for which the returned + indices can be used. + + ndim : int, optional + The number of dimensions. + + See Also + -------- + diag_indices_from + + Examples + -------- + >>> import numpy as np + + Create a set of indices to access the diagonal of a (4, 4) array: + + >>> di = np.diag_indices(4) + >>> di + (array([0, 1, 2, 3]), array([0, 1, 2, 3])) + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + >>> a[di] = 100 + >>> a + array([[100, 1, 2, 3], + [ 4, 100, 6, 7], + [ 8, 9, 100, 11], + [ 12, 13, 14, 100]]) + + Now, we create indices to manipulate a 3-D array: + + >>> d3 = np.diag_indices(2, 3) + >>> d3 + (array([0, 1]), array([0, 1]), array([0, 1])) + + And use it to set the diagonal of an array of zeros to 1: + + >>> a = np.zeros((2, 2, 2), dtype=int) + >>> a[d3] = 1 + >>> a + array([[[1, 0], + [0, 0]], + [[0, 0], + [0, 1]]]) + + """ + idx = np.arange(n) + return (idx,) * ndim + + +def _diag_indices_from(arr): + return (arr,) + + +@array_function_dispatch(_diag_indices_from) +def diag_indices_from(arr): + """ + Return the indices to access the main diagonal of an n-dimensional array. + + See `diag_indices` for full details. + + Parameters + ---------- + arr : array, at least 2-D + + See Also + -------- + diag_indices + + Examples + -------- + >>> import numpy as np + + Create a 4 by 4 array. + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Get the indices of the diagonal elements. + + >>> di = np.diag_indices_from(a) + >>> di + (array([0, 1, 2, 3]), array([0, 1, 2, 3])) + + >>> a[di] + array([ 0, 5, 10, 15]) + + This is simply syntactic sugar for diag_indices. + + >>> np.diag_indices(a.shape[0]) + (array([0, 1, 2, 3]), array([0, 1, 2, 3])) + + """ + + if not arr.ndim >= 2: + raise ValueError("input array must be at least 2-d") + # For more than d=2, the strided formula is only valid for arrays with + # all dimensions equal, so we check first. + if not np.all(diff(arr.shape) == 0): + raise ValueError("All dimensions of input must be of equal length") + + return diag_indices(arr.shape[0], arr.ndim) diff --git a/python/numpy/lib/_index_tricks_impl.pyi b/python/numpy/lib/_index_tricks_impl.pyi new file mode 100644 index 000000000..c6b06ddb8 --- /dev/null +++ b/python/numpy/lib/_index_tricks_impl.pyi @@ -0,0 +1,208 @@ +from collections.abc import Sequence +from typing import Any, ClassVar, Final, Generic, Self, SupportsIndex, final, overload +from typing import Literal as L + +from _typeshed import Incomplete +from typing_extensions import TypeVar, deprecated + +import numpy as np +from numpy._core.multiarray import ravel_multi_index, unravel_index +from numpy._typing import ( + ArrayLike, + NDArray, + _AnyShape, + _FiniteNestedSequence, + _NestedSequence, + _SupportsArray, + _SupportsDType, +) + +__all__ = [ # noqa: RUF022 + "ravel_multi_index", + "unravel_index", + "mgrid", + "ogrid", + "r_", + "c_", + "s_", + "index_exp", + "ix_", + "ndenumerate", + "ndindex", + "fill_diagonal", + "diag_indices", + "diag_indices_from", +] + +### + +_T = TypeVar("_T") +_TupleT = TypeVar("_TupleT", bound=tuple[Any, ...]) +_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) +_BoolT_co = TypeVar("_BoolT_co", bound=bool, default=bool, covariant=True) + +_AxisT_co = TypeVar("_AxisT_co", bound=int, default=L[0], covariant=True) +_MatrixT_co = TypeVar("_MatrixT_co", bound=bool, default=L[False], covariant=True) +_NDMinT_co = TypeVar("_NDMinT_co", bound=int, default=L[1], covariant=True) +_Trans1DT_co = TypeVar("_Trans1DT_co", bound=int, default=L[-1], covariant=True) + +### + +class ndenumerate(Generic[_ScalarT_co]): + @overload + def __new__(cls, arr: _FiniteNestedSequence[_SupportsArray[np.dtype[_ScalarT]]]) -> ndenumerate[_ScalarT]: ... + @overload + def __new__(cls, arr: str | _NestedSequence[str]) -> ndenumerate[np.str_]: ... + @overload + def __new__(cls, arr: bytes | _NestedSequence[bytes]) -> ndenumerate[np.bytes_]: ... + @overload + def __new__(cls, arr: bool | _NestedSequence[bool]) -> ndenumerate[np.bool]: ... + @overload + def __new__(cls, arr: int | _NestedSequence[int]) -> ndenumerate[np.intp]: ... + @overload + def __new__(cls, arr: float | _NestedSequence[float]) -> ndenumerate[np.float64]: ... + @overload + def __new__(cls, arr: complex | _NestedSequence[complex]) -> ndenumerate[np.complex128]: ... + @overload + def __new__(cls, arr: object) -> ndenumerate[Any]: ... + + # The first overload is a (semi-)workaround for a mypy bug (tested with v1.10 and v1.11) + @overload + def __next__( + self: ndenumerate[np.bool | np.number | np.flexible | np.datetime64 | np.timedelta64], + /, + ) -> tuple[_AnyShape, _ScalarT_co]: ... + @overload + def __next__(self: ndenumerate[np.object_], /) -> tuple[_AnyShape, Incomplete]: ... + @overload + def __next__(self, /) -> tuple[_AnyShape, _ScalarT_co]: ... + + # + def __iter__(self) -> Self: ... + +class ndindex: + @overload + def __init__(self, shape: tuple[SupportsIndex, ...], /) -> None: ... + @overload + def __init__(self, /, *shape: SupportsIndex) -> None: ... + + # + def __iter__(self) -> Self: ... + def __next__(self) -> _AnyShape: ... + + # + @deprecated("Deprecated since 1.20.0.") + def ndincr(self, /) -> None: ... + +class nd_grid(Generic[_BoolT_co]): + __slots__ = ("sparse",) + + sparse: _BoolT_co + def __init__(self, sparse: _BoolT_co = ...) -> None: ... + @overload + def __getitem__(self: nd_grid[L[False]], key: slice | Sequence[slice]) -> NDArray[Incomplete]: ... + @overload + def __getitem__(self: nd_grid[L[True]], key: slice | Sequence[slice]) -> tuple[NDArray[Incomplete], ...]: ... + +@final +class MGridClass(nd_grid[L[False]]): + __slots__ = () + + def __init__(self) -> None: ... + +@final +class OGridClass(nd_grid[L[True]]): + __slots__ = () + + def __init__(self) -> None: ... + +class AxisConcatenator(Generic[_AxisT_co, _MatrixT_co, _NDMinT_co, _Trans1DT_co]): + __slots__ = "axis", "matrix", "ndmin", "trans1d" + + makemat: ClassVar[type[np.matrix[tuple[int, int], np.dtype]]] + + axis: _AxisT_co + matrix: _MatrixT_co + ndmin: _NDMinT_co + trans1d: _Trans1DT_co + + # + def __init__( + self, + /, + axis: _AxisT_co = ..., + matrix: _MatrixT_co = ..., + ndmin: _NDMinT_co = ..., + trans1d: _Trans1DT_co = ..., + ) -> None: ... + + # TODO(jorenham): annotate this + def __getitem__(self, key: Incomplete, /) -> Incomplete: ... + def __len__(self, /) -> L[0]: ... + + # + @staticmethod + @overload + def concatenate(*a: ArrayLike, axis: SupportsIndex | None = 0, out: _ArrayT) -> _ArrayT: ... + @staticmethod + @overload + def concatenate(*a: ArrayLike, axis: SupportsIndex | None = 0, out: None = None) -> NDArray[Incomplete]: ... + +@final +class RClass(AxisConcatenator[L[0], L[False], L[1], L[-1]]): + __slots__ = () + + def __init__(self, /) -> None: ... + +@final +class CClass(AxisConcatenator[L[-1], L[False], L[2], L[0]]): + __slots__ = () + + def __init__(self, /) -> None: ... + +class IndexExpression(Generic[_BoolT_co]): + __slots__ = ("maketuple",) + + maketuple: _BoolT_co + def __init__(self, maketuple: _BoolT_co) -> None: ... + @overload + def __getitem__(self, item: _TupleT) -> _TupleT: ... + @overload + def __getitem__(self: IndexExpression[L[True]], item: _T) -> tuple[_T]: ... + @overload + def __getitem__(self: IndexExpression[L[False]], item: _T) -> _T: ... + +@overload +def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DTypeT]]) -> tuple[np.ndarray[_AnyShape, _DTypeT], ...]: ... +@overload +def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[np.str_], ...]: ... +@overload +def ix_(*args: bytes | _NestedSequence[bytes]) -> tuple[NDArray[np.bytes_], ...]: ... +@overload +def ix_(*args: bool | _NestedSequence[bool]) -> tuple[NDArray[np.bool], ...]: ... +@overload +def ix_(*args: int | _NestedSequence[int]) -> tuple[NDArray[np.intp], ...]: ... +@overload +def ix_(*args: float | _NestedSequence[float]) -> tuple[NDArray[np.float64], ...]: ... +@overload +def ix_(*args: complex | _NestedSequence[complex]) -> tuple[NDArray[np.complex128], ...]: ... + +# +def fill_diagonal(a: NDArray[Any], val: object, wrap: bool = ...) -> None: ... + +# +def diag_indices(n: int, ndim: int = ...) -> tuple[NDArray[np.intp], ...]: ... +def diag_indices_from(arr: ArrayLike) -> tuple[NDArray[np.intp], ...]: ... + +# +mgrid: Final[MGridClass] = ... +ogrid: Final[OGridClass] = ... + +r_: Final[RClass] = ... +c_: Final[CClass] = ... + +index_exp: Final[IndexExpression[L[True]]] = ... +s_: Final[IndexExpression[L[False]]] = ... diff --git a/python/numpy/lib/_iotools.py b/python/numpy/lib/_iotools.py new file mode 100644 index 000000000..3586b41de --- /dev/null +++ b/python/numpy/lib/_iotools.py @@ -0,0 +1,900 @@ +"""A collection of functions designed to help I/O with ascii files. + +""" +__docformat__ = "restructuredtext en" + +import itertools + +import numpy as np +import numpy._core.numeric as nx +from numpy._utils import asbytes, asunicode + + +def _decode_line(line, encoding=None): + """Decode bytes from binary input streams. + + Defaults to decoding from 'latin1'. + + Parameters + ---------- + line : str or bytes + Line to be decoded. + encoding : str + Encoding used to decode `line`. + + Returns + ------- + decoded_line : str + + """ + if type(line) is bytes: + if encoding is None: + encoding = "latin1" + line = line.decode(encoding) + + return line + + +def _is_string_like(obj): + """ + Check whether obj behaves like a string. + """ + try: + obj + '' + except (TypeError, ValueError): + return False + return True + + +def _is_bytes_like(obj): + """ + Check whether obj behaves like a bytes object. + """ + try: + obj + b'' + except (TypeError, ValueError): + return False + return True + + +def has_nested_fields(ndtype): + """ + Returns whether one or several fields of a dtype are nested. + + Parameters + ---------- + ndtype : dtype + Data-type of a structured array. + + Raises + ------ + AttributeError + If `ndtype` does not have a `names` attribute. + + Examples + -------- + >>> import numpy as np + >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)]) + >>> np.lib._iotools.has_nested_fields(dt) + False + + """ + return any(ndtype[name].names is not None for name in ndtype.names or ()) + + +def flatten_dtype(ndtype, flatten_base=False): + """ + Unpack a structured data-type by collapsing nested fields and/or fields + with a shape. + + Note that the field names are lost. + + Parameters + ---------- + ndtype : dtype + The datatype to collapse + flatten_base : bool, optional + If True, transform a field with a shape into several fields. Default is + False. + + Examples + -------- + >>> import numpy as np + >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ... ('block', int, (2, 3))]) + >>> np.lib._iotools.flatten_dtype(dt) + [dtype('S4'), dtype('float64'), dtype('float64'), dtype('int64')] + >>> np.lib._iotools.flatten_dtype(dt, flatten_base=True) + [dtype('S4'), + dtype('float64'), + dtype('float64'), + dtype('int64'), + dtype('int64'), + dtype('int64'), + dtype('int64'), + dtype('int64'), + dtype('int64')] + + """ + names = ndtype.names + if names is None: + if flatten_base: + return [ndtype.base] * int(np.prod(ndtype.shape)) + return [ndtype.base] + else: + types = [] + for field in names: + info = ndtype.fields[field] + flat_dt = flatten_dtype(info[0], flatten_base) + types.extend(flat_dt) + return types + + +class LineSplitter: + """ + Object to split a string at a given delimiter or at given places. + + Parameters + ---------- + delimiter : str, int, or sequence of ints, optional + If a string, character used to delimit consecutive fields. + If an integer or a sequence of integers, width(s) of each field. + comments : str, optional + Character used to mark the beginning of a comment. Default is '#'. + autostrip : bool, optional + Whether to strip each individual field. Default is True. + + """ + + def autostrip(self, method): + """ + Wrapper to strip each member of the output of `method`. + + Parameters + ---------- + method : function + Function that takes a single argument and returns a sequence of + strings. + + Returns + ------- + wrapped : function + The result of wrapping `method`. `wrapped` takes a single input + argument and returns a list of strings that are stripped of + white-space. + + """ + return lambda input: [_.strip() for _ in method(input)] + + def __init__(self, delimiter=None, comments='#', autostrip=True, + encoding=None): + delimiter = _decode_line(delimiter) + comments = _decode_line(comments) + + self.comments = comments + + # Delimiter is a character + if (delimiter is None) or isinstance(delimiter, str): + delimiter = delimiter or None + _handyman = self._delimited_splitter + # Delimiter is a list of field widths + elif hasattr(delimiter, '__iter__'): + _handyman = self._variablewidth_splitter + idx = np.cumsum([0] + list(delimiter)) + delimiter = [slice(i, j) for (i, j) in itertools.pairwise(idx)] + # Delimiter is a single integer + elif int(delimiter): + (_handyman, delimiter) = ( + self._fixedwidth_splitter, int(delimiter)) + else: + (_handyman, delimiter) = (self._delimited_splitter, None) + self.delimiter = delimiter + if autostrip: + self._handyman = self.autostrip(_handyman) + else: + self._handyman = _handyman + self.encoding = encoding + + def _delimited_splitter(self, line): + """Chop off comments, strip, and split at delimiter. """ + if self.comments is not None: + line = line.split(self.comments)[0] + line = line.strip(" \r\n") + if not line: + return [] + return line.split(self.delimiter) + + def _fixedwidth_splitter(self, line): + if self.comments is not None: + line = line.split(self.comments)[0] + line = line.strip("\r\n") + if not line: + return [] + fixed = self.delimiter + slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)] + return [line[s] for s in slices] + + def _variablewidth_splitter(self, line): + if self.comments is not None: + line = line.split(self.comments)[0] + if not line: + return [] + slices = self.delimiter + return [line[s] for s in slices] + + def __call__(self, line): + return self._handyman(_decode_line(line, self.encoding)) + + +class NameValidator: + """ + Object to validate a list of strings to use as field names. + + The strings are stripped of any non alphanumeric character, and spaces + are replaced by '_'. During instantiation, the user can define a list + of names to exclude, as well as a list of invalid characters. Names in + the exclusion list are appended a '_' character. + + Once an instance has been created, it can be called with a list of + names, and a list of valid names will be created. The `__call__` + method accepts an optional keyword "default" that sets the default name + in case of ambiguity. By default this is 'f', so that names will + default to `f0`, `f1`, etc. + + Parameters + ---------- + excludelist : sequence, optional + A list of names to exclude. This list is appended to the default + list ['return', 'file', 'print']. Excluded names are appended an + underscore: for example, `file` becomes `file_` if supplied. + deletechars : str, optional + A string combining invalid characters that must be deleted from the + names. + case_sensitive : {True, False, 'upper', 'lower'}, optional + * If True, field names are case-sensitive. + * If False or 'upper', field names are converted to upper case. + * If 'lower', field names are converted to lower case. + + The default value is True. + replace_space : '_', optional + Character(s) used in replacement of white spaces. + + Notes + ----- + Calling an instance of `NameValidator` is the same as calling its + method `validate`. + + Examples + -------- + >>> import numpy as np + >>> validator = np.lib._iotools.NameValidator() + >>> validator(['file', 'field2', 'with space', 'CaSe']) + ('file_', 'field2', 'with_space', 'CaSe') + + >>> validator = np.lib._iotools.NameValidator(excludelist=['excl'], + ... deletechars='q', + ... case_sensitive=False) + >>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe']) + ('EXCL', 'FIELD2', 'NO_Q', 'WITH_SPACE', 'CASE') + + """ + + defaultexcludelist = 'return', 'file', 'print' + defaultdeletechars = frozenset(r"""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""") + + def __init__(self, excludelist=None, deletechars=None, + case_sensitive=None, replace_space='_'): + # Process the exclusion list .. + if excludelist is None: + excludelist = [] + excludelist.extend(self.defaultexcludelist) + self.excludelist = excludelist + # Process the list of characters to delete + if deletechars is None: + delete = set(self.defaultdeletechars) + else: + delete = set(deletechars) + delete.add('"') + self.deletechars = delete + # Process the case option ..... + if (case_sensitive is None) or (case_sensitive is True): + self.case_converter = lambda x: x + elif (case_sensitive is False) or case_sensitive.startswith('u'): + self.case_converter = lambda x: x.upper() + elif case_sensitive.startswith('l'): + self.case_converter = lambda x: x.lower() + else: + msg = f'unrecognized case_sensitive value {case_sensitive}.' + raise ValueError(msg) + + self.replace_space = replace_space + + def validate(self, names, defaultfmt="f%i", nbfields=None): + """ + Validate a list of strings as field names for a structured array. + + Parameters + ---------- + names : sequence of str + Strings to be validated. + defaultfmt : str, optional + Default format string, used if validating a given string + reduces its length to zero. + nbfields : integer, optional + Final number of validated names, used to expand or shrink the + initial list of names. + + Returns + ------- + validatednames : list of str + The list of validated field names. + + Notes + ----- + A `NameValidator` instance can be called directly, which is the + same as calling `validate`. For examples, see `NameValidator`. + + """ + # Initial checks .............. + if (names is None): + if (nbfields is None): + return None + names = [] + if isinstance(names, str): + names = [names, ] + if nbfields is not None: + nbnames = len(names) + if (nbnames < nbfields): + names = list(names) + [''] * (nbfields - nbnames) + elif (nbnames > nbfields): + names = names[:nbfields] + # Set some shortcuts ........... + deletechars = self.deletechars + excludelist = self.excludelist + case_converter = self.case_converter + replace_space = self.replace_space + # Initializes some variables ... + validatednames = [] + seen = {} + nbempty = 0 + + for item in names: + item = case_converter(item).strip() + if replace_space: + item = item.replace(' ', replace_space) + item = ''.join([c for c in item if c not in deletechars]) + if item == '': + item = defaultfmt % nbempty + while item in names: + nbempty += 1 + item = defaultfmt % nbempty + nbempty += 1 + elif item in excludelist: + item += '_' + cnt = seen.get(item, 0) + if cnt > 0: + validatednames.append(item + '_%d' % cnt) + else: + validatednames.append(item) + seen[item] = cnt + 1 + return tuple(validatednames) + + def __call__(self, names, defaultfmt="f%i", nbfields=None): + return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields) + + +def str2bool(value): + """ + Tries to transform a string supposed to represent a boolean to a boolean. + + Parameters + ---------- + value : str + The string that is transformed to a boolean. + + Returns + ------- + boolval : bool + The boolean representation of `value`. + + Raises + ------ + ValueError + If the string is not 'True' or 'False' (case independent) + + Examples + -------- + >>> import numpy as np + >>> np.lib._iotools.str2bool('TRUE') + True + >>> np.lib._iotools.str2bool('false') + False + + """ + value = value.upper() + if value == 'TRUE': + return True + elif value == 'FALSE': + return False + else: + raise ValueError("Invalid boolean") + + +class ConverterError(Exception): + """ + Exception raised when an error occurs in a converter for string values. + + """ + pass + + +class ConverterLockError(ConverterError): + """ + Exception raised when an attempt is made to upgrade a locked converter. + + """ + pass + + +class ConversionWarning(UserWarning): + """ + Warning issued when a string converter has a problem. + + Notes + ----- + In `genfromtxt` a `ConversionWarning` is issued if raising exceptions + is explicitly suppressed with the "invalid_raise" keyword. + + """ + pass + + +class StringConverter: + """ + Factory class for function transforming a string into another object + (int, float). + + After initialization, an instance can be called to transform a string + into another object. If the string is recognized as representing a + missing value, a default value is returned. + + Attributes + ---------- + func : function + Function used for the conversion. + default : any + Default value to return when the input corresponds to a missing + value. + type : type + Type of the output. + _status : int + Integer representing the order of the conversion. + _mapper : sequence of tuples + Sequence of tuples (dtype, function, default value) to evaluate in + order. + _locked : bool + Holds `locked` parameter. + + Parameters + ---------- + dtype_or_func : {None, dtype, function}, optional + If a `dtype`, specifies the input data type, used to define a basic + function and a default value for missing data. For example, when + `dtype` is float, the `func` attribute is set to `float` and the + default value to `np.nan`. If a function, this function is used to + convert a string to another object. In this case, it is recommended + to give an associated default value as input. + default : any, optional + Value to return by default, that is, when the string to be + converted is flagged as missing. If not given, `StringConverter` + tries to supply a reasonable default value. + missing_values : {None, sequence of str}, optional + ``None`` or sequence of strings indicating a missing value. If ``None`` + then missing values are indicated by empty entries. The default is + ``None``. + locked : bool, optional + Whether the StringConverter should be locked to prevent automatic + upgrade or not. Default is False. + + """ + _mapper = [(nx.bool, str2bool, False), + (nx.int_, int, -1),] + + # On 32-bit systems, we need to make sure that we explicitly include + # nx.int64 since ns.int_ is nx.int32. + if nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize: + _mapper.append((nx.int64, int, -1)) + + _mapper.extend([(nx.float64, float, nx.nan), + (nx.complex128, complex, nx.nan + 0j), + (nx.longdouble, nx.longdouble, nx.nan), + # If a non-default dtype is passed, fall back to generic + # ones (should only be used for the converter) + (nx.integer, int, -1), + (nx.floating, float, nx.nan), + (nx.complexfloating, complex, nx.nan + 0j), + # Last, try with the string types (must be last, because + # `_mapper[-1]` is used as default in some cases) + (nx.str_, asunicode, '???'), + (nx.bytes_, asbytes, '???'), + ]) + + @classmethod + def _getdtype(cls, val): + """Returns the dtype of the input variable.""" + return np.array(val).dtype + + @classmethod + def _getsubdtype(cls, val): + """Returns the type of the dtype of the input variable.""" + return np.array(val).dtype.type + + @classmethod + def _dtypeortype(cls, dtype): + """Returns dtype for datetime64 and type of dtype otherwise.""" + + # This is a bit annoying. We want to return the "general" type in most + # cases (ie. "string" rather than "S10"), but we want to return the + # specific type for datetime64 (ie. "datetime64[us]" rather than + # "datetime64"). + if dtype.type == np.datetime64: + return dtype + return dtype.type + + @classmethod + def upgrade_mapper(cls, func, default=None): + """ + Upgrade the mapper of a StringConverter by adding a new function and + its corresponding default. + + The input function (or sequence of functions) and its associated + default value (if any) is inserted in penultimate position of the + mapper. The corresponding type is estimated from the dtype of the + default value. + + Parameters + ---------- + func : var + Function, or sequence of functions + + Examples + -------- + >>> import dateutil.parser + >>> import datetime + >>> dateparser = dateutil.parser.parse + >>> defaultdate = datetime.date(2000, 1, 1) + >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate) + """ + # Func is a single functions + if callable(func): + cls._mapper.insert(-1, (cls._getsubdtype(default), func, default)) + return + elif hasattr(func, '__iter__'): + if isinstance(func[0], (tuple, list)): + for _ in func: + cls._mapper.insert(-1, _) + return + if default is None: + default = [None] * len(func) + else: + default = list(default) + default.append([None] * (len(func) - len(default))) + for fct, dft in zip(func, default): + cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft)) + + @classmethod + def _find_map_entry(cls, dtype): + # if a converter for the specific dtype is available use that + for i, (deftype, func, default_def) in enumerate(cls._mapper): + if dtype.type == deftype: + return i, (deftype, func, default_def) + + # otherwise find an inexact match + for i, (deftype, func, default_def) in enumerate(cls._mapper): + if np.issubdtype(dtype.type, deftype): + return i, (deftype, func, default_def) + + raise LookupError + + def __init__(self, dtype_or_func=None, default=None, missing_values=None, + locked=False): + # Defines a lock for upgrade + self._locked = bool(locked) + # No input dtype: minimal initialization + if dtype_or_func is None: + self.func = str2bool + self._status = 0 + self.default = default or False + dtype = np.dtype('bool') + else: + # Is the input a np.dtype ? + try: + self.func = None + dtype = np.dtype(dtype_or_func) + except TypeError: + # dtype_or_func must be a function, then + if not callable(dtype_or_func): + errmsg = ("The input argument `dtype` is neither a" + " function nor a dtype (got '%s' instead)") + raise TypeError(errmsg % type(dtype_or_func)) + # Set the function + self.func = dtype_or_func + # If we don't have a default, try to guess it or set it to + # None + if default is None: + try: + default = self.func('0') + except ValueError: + default = None + dtype = self._getdtype(default) + + # find the best match in our mapper + try: + self._status, (_, func, default_def) = self._find_map_entry(dtype) + except LookupError: + # no match + self.default = default + _, func, _ = self._mapper[-1] + self._status = 0 + else: + # use the found default only if we did not already have one + if default is None: + self.default = default_def + else: + self.default = default + + # If the input was a dtype, set the function to the last we saw + if self.func is None: + self.func = func + + # If the status is 1 (int), change the function to + # something more robust. + if self.func == self._mapper[1][1]: + if issubclass(dtype.type, np.uint64): + self.func = np.uint64 + elif issubclass(dtype.type, np.int64): + self.func = np.int64 + else: + self.func = lambda x: int(float(x)) + # Store the list of strings corresponding to missing values. + if missing_values is None: + self.missing_values = {''} + else: + if isinstance(missing_values, str): + missing_values = missing_values.split(",") + self.missing_values = set(list(missing_values) + ['']) + + self._callingfunction = self._strict_call + self.type = self._dtypeortype(dtype) + self._checked = False + self._initial_default = default + + def _loose_call(self, value): + try: + return self.func(value) + except ValueError: + return self.default + + def _strict_call(self, value): + try: + + # We check if we can convert the value using the current function + new_value = self.func(value) + + # In addition to having to check whether func can convert the + # value, we also have to make sure that we don't get overflow + # errors for integers. + if self.func is int: + try: + np.array(value, dtype=self.type) + except OverflowError: + raise ValueError + + # We're still here so we can now return the new value + return new_value + + except ValueError: + if value.strip() in self.missing_values: + if not self._status: + self._checked = False + return self.default + raise ValueError(f"Cannot convert string '{value}'") + + def __call__(self, value): + return self._callingfunction(value) + + def _do_upgrade(self): + # Raise an exception if we locked the converter... + if self._locked: + errmsg = "Converter is locked and cannot be upgraded" + raise ConverterLockError(errmsg) + _statusmax = len(self._mapper) + # Complains if we try to upgrade by the maximum + _status = self._status + if _status == _statusmax: + errmsg = "Could not find a valid conversion function" + raise ConverterError(errmsg) + elif _status < _statusmax - 1: + _status += 1 + self.type, self.func, default = self._mapper[_status] + self._status = _status + if self._initial_default is not None: + self.default = self._initial_default + else: + self.default = default + + def upgrade(self, value): + """ + Find the best converter for a given string, and return the result. + + The supplied string `value` is converted by testing different + converters in order. First the `func` method of the + `StringConverter` instance is tried, if this fails other available + converters are tried. The order in which these other converters + are tried is determined by the `_status` attribute of the instance. + + Parameters + ---------- + value : str + The string to convert. + + Returns + ------- + out : any + The result of converting `value` with the appropriate converter. + + """ + self._checked = True + try: + return self._strict_call(value) + except ValueError: + self._do_upgrade() + return self.upgrade(value) + + def iterupgrade(self, value): + self._checked = True + if not hasattr(value, '__iter__'): + value = (value,) + _strict_call = self._strict_call + try: + for _m in value: + _strict_call(_m) + except ValueError: + self._do_upgrade() + self.iterupgrade(value) + + def update(self, func, default=None, testing_value=None, + missing_values='', locked=False): + """ + Set StringConverter attributes directly. + + Parameters + ---------- + func : function + Conversion function. + default : any, optional + Value to return by default, that is, when the string to be + converted is flagged as missing. If not given, + `StringConverter` tries to supply a reasonable default value. + testing_value : str, optional + A string representing a standard input value of the converter. + This string is used to help defining a reasonable default + value. + missing_values : {sequence of str, None}, optional + Sequence of strings indicating a missing value. If ``None``, then + the existing `missing_values` are cleared. The default is ``''``. + locked : bool, optional + Whether the StringConverter should be locked to prevent + automatic upgrade or not. Default is False. + + Notes + ----- + `update` takes the same parameters as the constructor of + `StringConverter`, except that `func` does not accept a `dtype` + whereas `dtype_or_func` in the constructor does. + + """ + self.func = func + self._locked = locked + + # Don't reset the default to None if we can avoid it + if default is not None: + self.default = default + self.type = self._dtypeortype(self._getdtype(default)) + else: + try: + tester = func(testing_value or '1') + except (TypeError, ValueError): + tester = None + self.type = self._dtypeortype(self._getdtype(tester)) + + # Add the missing values to the existing set or clear it. + if missing_values is None: + # Clear all missing values even though the ctor initializes it to + # set(['']) when the argument is None. + self.missing_values = set() + else: + if not np.iterable(missing_values): + missing_values = [missing_values] + if not all(isinstance(v, str) for v in missing_values): + raise TypeError("missing_values must be strings or unicode") + self.missing_values.update(missing_values) + + +def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs): + """ + Convenience function to create a `np.dtype` object. + + The function processes the input `dtype` and matches it with the given + names. + + Parameters + ---------- + ndtype : var + Definition of the dtype. Can be any string or dictionary recognized + by the `np.dtype` function, or a sequence of types. + names : str or sequence, optional + Sequence of strings to use as field names for a structured dtype. + For convenience, `names` can be a string of a comma-separated list + of names. + defaultfmt : str, optional + Format string used to define missing names, such as ``"f%i"`` + (default) or ``"fields_%02i"``. + validationargs : optional + A series of optional arguments used to initialize a + `NameValidator`. + + Examples + -------- + >>> import numpy as np + >>> np.lib._iotools.easy_dtype(float) + dtype('float64') + >>> np.lib._iotools.easy_dtype("i4, f8") + dtype([('f0', '>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i") + dtype([('field_000', '>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c") + dtype([('a', '>> np.lib._iotools.easy_dtype(float, names="a,b,c") + dtype([('a', ' None: ... + def __call__(self, /, line: str | bytes) -> list[str]: ... + def autostrip(self, /, method: Callable[[_T], Iterable[str]]) -> Callable[[_T], list[str]]: ... + +class NameValidator: + defaultexcludelist: ClassVar[Sequence[str]] + defaultdeletechars: ClassVar[Sequence[str]] + excludelist: list[str] + deletechars: set[str] + case_converter: Callable[[str], str] + replace_space: str + + def __init__( + self, + /, + excludelist: Iterable[str] | None = None, + deletechars: Iterable[str] | None = None, + case_sensitive: Literal["upper", "lower"] | bool | None = None, + replace_space: str = "_", + ) -> None: ... + def __call__(self, /, names: Iterable[str], defaultfmt: str = "f%i", nbfields: int | None = None) -> tuple[str, ...]: ... + def validate(self, /, names: Iterable[str], defaultfmt: str = "f%i", nbfields: int | None = None) -> tuple[str, ...]: ... + +class StringConverter: + func: Callable[[str], Any] | None + default: Any + missing_values: set[str] + type: np.dtype[np.datetime64] | np.generic + + def __init__( + self, + /, + dtype_or_func: npt.DTypeLike | None = None, + default: None = None, + missing_values: Iterable[str] | None = None, + locked: bool = False, + ) -> None: ... + def update( + self, + /, + func: Callable[[str], Any], + default: object | None = None, + testing_value: str | None = None, + missing_values: str = "", + locked: bool = False, + ) -> None: ... + # + def __call__(self, /, value: str) -> Any: ... + def upgrade(self, /, value: str) -> Any: ... + def iterupgrade(self, /, value: Iterable[str] | str) -> None: ... + + # + @classmethod + def upgrade_mapper(cls, func: Callable[[str], Any], default: object | None = None) -> None: ... + +@overload +def str2bool(value: Literal["false", "False", "FALSE"]) -> Literal[False]: ... +@overload +def str2bool(value: Literal["true", "True", "TRUE"]) -> Literal[True]: ... + +# +def has_nested_fields(ndtype: np.dtype[np.void]) -> bool: ... +def flatten_dtype(ndtype: np.dtype[np.void], flatten_base: bool = False) -> type[np.dtype]: ... +def easy_dtype( + ndtype: npt.DTypeLike, + names: Iterable[str] | None = None, + defaultfmt: str = "f%i", + **validationargs: Unpack[_ValidationKwargs], +) -> np.dtype[np.void]: ... diff --git a/python/numpy/lib/_nanfunctions_impl.py b/python/numpy/lib/_nanfunctions_impl.py new file mode 100644 index 000000000..4a0149030 --- /dev/null +++ b/python/numpy/lib/_nanfunctions_impl.py @@ -0,0 +1,2024 @@ +""" +Functions that ignore NaN. + +Functions +--------- + +- `nanmin` -- minimum non-NaN value +- `nanmax` -- maximum non-NaN value +- `nanargmin` -- index of minimum non-NaN value +- `nanargmax` -- index of maximum non-NaN value +- `nansum` -- sum of non-NaN values +- `nanprod` -- product of non-NaN values +- `nancumsum` -- cumulative sum of non-NaN values +- `nancumprod` -- cumulative product of non-NaN values +- `nanmean` -- mean of non-NaN values +- `nanvar` -- variance of non-NaN values +- `nanstd` -- standard deviation of non-NaN values +- `nanmedian` -- median of non-NaN values +- `nanquantile` -- qth quantile of non-NaN values +- `nanpercentile` -- qth percentile of non-NaN values + +""" +import functools +import warnings + +import numpy as np +import numpy._core.numeric as _nx +from numpy._core import overrides +from numpy.lib import _function_base_impl as fnb +from numpy.lib._function_base_impl import _weights_are_valid + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +__all__ = [ + 'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean', + 'nanmedian', 'nanpercentile', 'nanvar', 'nanstd', 'nanprod', + 'nancumsum', 'nancumprod', 'nanquantile' + ] + + +def _nan_mask(a, out=None): + """ + Parameters + ---------- + a : array-like + Input array with at least 1 dimension. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output and will prevent the allocation of a new array. + + Returns + ------- + y : bool ndarray or True + A bool array where ``np.nan`` positions are marked with ``False`` + and other positions are marked with ``True``. If the type of ``a`` + is such that it can't possibly contain ``np.nan``, returns ``True``. + """ + # we assume that a is an array for this private function + + if a.dtype.kind not in 'fc': + return True + + y = np.isnan(a, out=out) + y = np.invert(y, out=y) + return y + +def _replace_nan(a, val): + """ + If `a` is of inexact type, make a copy of `a`, replace NaNs with + the `val` value, and return the copy together with a boolean mask + marking the locations where NaNs were present. If `a` is not of + inexact type, do nothing and return `a` together with a mask of None. + + Note that scalars will end up as array scalars, which is important + for using the result as the value of the out argument in some + operations. + + Parameters + ---------- + a : array-like + Input array. + val : float + NaN values are set to val before doing the operation. + + Returns + ------- + y : ndarray + If `a` is of inexact type, return a copy of `a` with the NaNs + replaced by the fill value, otherwise return `a`. + mask: {bool, None} + If `a` is of inexact type, return a boolean mask marking locations of + NaNs, otherwise return None. + + """ + a = np.asanyarray(a) + + if a.dtype == np.object_: + # object arrays do not support `isnan` (gh-9009), so make a guess + mask = np.not_equal(a, a, dtype=bool) + elif issubclass(a.dtype.type, np.inexact): + mask = np.isnan(a) + else: + mask = None + + if mask is not None: + a = np.array(a, subok=True, copy=True) + np.copyto(a, val, where=mask) + + return a, mask + + +def _copyto(a, val, mask): + """ + Replace values in `a` with NaN where `mask` is True. This differs from + copyto in that it will deal with the case where `a` is a numpy scalar. + + Parameters + ---------- + a : ndarray or numpy scalar + Array or numpy scalar some of whose values are to be replaced + by val. + val : numpy scalar + Value used a replacement. + mask : ndarray, scalar + Boolean array. Where True the corresponding element of `a` is + replaced by `val`. Broadcasts. + + Returns + ------- + res : ndarray, scalar + Array with elements replaced or scalar `val`. + + """ + if isinstance(a, np.ndarray): + np.copyto(a, val, where=mask, casting='unsafe') + else: + a = a.dtype.type(val) + return a + + +def _remove_nan_1d(arr1d, second_arr1d=None, overwrite_input=False): + """ + Equivalent to arr1d[~arr1d.isnan()], but in a different order + + Presumably faster as it incurs fewer copies + + Parameters + ---------- + arr1d : ndarray + Array to remove nans from + second_arr1d : ndarray or None + A second array which will have the same positions removed as arr1d. + overwrite_input : bool + True if `arr1d` can be modified in place + + Returns + ------- + res : ndarray + Array with nan elements removed + second_res : ndarray or None + Second array with nan element positions of first array removed. + overwrite_input : bool + True if `res` can be modified in place, given the constraint on the + input + """ + if arr1d.dtype == object: + # object arrays do not support `isnan` (gh-9009), so make a guess + c = np.not_equal(arr1d, arr1d, dtype=bool) + else: + c = np.isnan(arr1d) + + s = np.nonzero(c)[0] + if s.size == arr1d.size: + warnings.warn("All-NaN slice encountered", RuntimeWarning, + stacklevel=6) + if second_arr1d is None: + return arr1d[:0], None, True + else: + return arr1d[:0], second_arr1d[:0], True + elif s.size == 0: + return arr1d, second_arr1d, overwrite_input + else: + if not overwrite_input: + arr1d = arr1d.copy() + # select non-nans at end of array + enonan = arr1d[-s.size:][~c[-s.size:]] + # fill nans in beginning of array with non-nans of end + arr1d[s[:enonan.size]] = enonan + + if second_arr1d is None: + return arr1d[:-s.size], None, True + else: + if not overwrite_input: + second_arr1d = second_arr1d.copy() + enonan = second_arr1d[-s.size:][~c[-s.size:]] + second_arr1d[s[:enonan.size]] = enonan + + return arr1d[:-s.size], second_arr1d[:-s.size], True + + +def _divide_by_count(a, b, out=None): + """ + Compute a/b ignoring invalid results. If `a` is an array the division + is done in place. If `a` is a scalar, then its type is preserved in the + output. If out is None, then a is used instead so that the division + is in place. Note that this is only called with `a` an inexact type. + + Parameters + ---------- + a : {ndarray, numpy scalar} + Numerator. Expected to be of inexact type but not checked. + b : {ndarray, numpy scalar} + Denominator. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. + + Returns + ------- + ret : {ndarray, numpy scalar} + The return value is a/b. If `a` was an ndarray the division is done + in place. If `a` is a numpy scalar, the division preserves its type. + + """ + with np.errstate(invalid='ignore', divide='ignore'): + if isinstance(a, np.ndarray): + if out is None: + return np.divide(a, b, out=a, casting='unsafe') + else: + return np.divide(a, b, out=out, casting='unsafe') + elif out is None: + # Precaution against reduced object arrays + try: + return a.dtype.type(a / b) + except AttributeError: + return a / b + else: + # This is questionable, but currently a numpy scalar can + # be output to a zero dimensional array. + return np.divide(a, b, out=out, casting='unsafe') + + +def _nanmin_dispatcher(a, axis=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_nanmin_dispatcher) +def nanmin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): + """ + Return minimum of an array or minimum along an axis, ignoring any NaNs. + When all-NaN slices are encountered a ``RuntimeWarning`` is raised and + Nan is returned for that slice. + + Parameters + ---------- + a : array_like + Array containing numbers whose minimum is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the minimum is computed. The default is to compute + the minimum of the flattened array. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + :ref:`ufuncs-output-type` for more details. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If the value is anything but the default, then + `keepdims` will be passed through to the `min` method + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. + initial : scalar, optional + The maximum value of an output element. Must be present to allow + computation on empty slice. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + where : array_like of bool, optional + Elements to compare for the minimum. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.22.0 + + Returns + ------- + nanmin : ndarray + An array with the same shape as `a`, with the specified axis + removed. If `a` is a 0-d array, or if axis is None, an ndarray + scalar is returned. The same dtype as `a` is returned. + + See Also + -------- + nanmax : + The maximum value of an array along a given axis, ignoring any NaNs. + amin : + The minimum value of an array along a given axis, propagating any NaNs. + fmin : + Element-wise minimum of two arrays, ignoring any NaNs. + minimum : + Element-wise minimum of two arrays, propagating any NaNs. + isnan : + Shows which elements are Not a Number (NaN). + isfinite: + Shows which elements are neither NaN nor infinity. + + amax, fmax, maximum + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + Positive infinity is treated as a very large number and negative + infinity is treated as a very small (i.e. negative) number. + + If the input has a integer type the function is equivalent to np.min. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanmin(a) + 1.0 + >>> np.nanmin(a, axis=0) + array([1., 2.]) + >>> np.nanmin(a, axis=1) + array([1., 3.]) + + When positive infinity and negative infinity are present: + + >>> np.nanmin([1, 2, np.nan, np.inf]) + 1.0 + >>> np.nanmin([1, 2, np.nan, -np.inf]) + -inf + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if initial is not np._NoValue: + kwargs['initial'] = initial + if where is not np._NoValue: + kwargs['where'] = where + + if (type(a) is np.ndarray or type(a) is np.memmap) and a.dtype != np.object_: + # Fast, but not safe for subclasses of ndarray, or object arrays, + # which do not implement isnan (gh-9009), or fmin correctly (gh-8975) + res = np.fmin.reduce(a, axis=axis, out=out, **kwargs) + if np.isnan(res).any(): + warnings.warn("All-NaN slice encountered", RuntimeWarning, + stacklevel=2) + else: + # Slow, but safe for subclasses of ndarray + a, mask = _replace_nan(a, +np.inf) + res = np.amin(a, axis=axis, out=out, **kwargs) + if mask is None: + return res + + # Check for all-NaN axis + kwargs.pop("initial", None) + mask = np.all(mask, axis=axis, **kwargs) + if np.any(mask): + res = _copyto(res, np.nan, mask) + warnings.warn("All-NaN axis encountered", RuntimeWarning, + stacklevel=2) + return res + + +def _nanmax_dispatcher(a, axis=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_nanmax_dispatcher) +def nanmax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, + where=np._NoValue): + """ + Return the maximum of an array or maximum along an axis, ignoring any + NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is + raised and NaN is returned for that slice. + + Parameters + ---------- + a : array_like + Array containing numbers whose maximum is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the maximum is computed. The default is to compute + the maximum of the flattened array. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + :ref:`ufuncs-output-type` for more details. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + If the value is anything but the default, then + `keepdims` will be passed through to the `max` method + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. + initial : scalar, optional + The minimum value of an output element. Must be present to allow + computation on empty slice. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + where : array_like of bool, optional + Elements to compare for the maximum. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.22.0 + + Returns + ------- + nanmax : ndarray + An array with the same shape as `a`, with the specified axis removed. + If `a` is a 0-d array, or if axis is None, an ndarray scalar is + returned. The same dtype as `a` is returned. + + See Also + -------- + nanmin : + The minimum value of an array along a given axis, ignoring any NaNs. + amax : + The maximum value of an array along a given axis, propagating any NaNs. + fmax : + Element-wise maximum of two arrays, ignoring any NaNs. + maximum : + Element-wise maximum of two arrays, propagating any NaNs. + isnan : + Shows which elements are Not a Number (NaN). + isfinite: + Shows which elements are neither NaN nor infinity. + + amin, fmin, minimum + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + Positive infinity is treated as a very large number and negative + infinity is treated as a very small (i.e. negative) number. + + If the input has a integer type the function is equivalent to np.max. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanmax(a) + 3.0 + >>> np.nanmax(a, axis=0) + array([3., 2.]) + >>> np.nanmax(a, axis=1) + array([2., 3.]) + + When positive infinity and negative infinity are present: + + >>> np.nanmax([1, 2, np.nan, -np.inf]) + 2.0 + >>> np.nanmax([1, 2, np.nan, np.inf]) + inf + + """ + kwargs = {} + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + if initial is not np._NoValue: + kwargs['initial'] = initial + if where is not np._NoValue: + kwargs['where'] = where + + if (type(a) is np.ndarray or type(a) is np.memmap) and a.dtype != np.object_: + # Fast, but not safe for subclasses of ndarray, or object arrays, + # which do not implement isnan (gh-9009), or fmax correctly (gh-8975) + res = np.fmax.reduce(a, axis=axis, out=out, **kwargs) + if np.isnan(res).any(): + warnings.warn("All-NaN slice encountered", RuntimeWarning, + stacklevel=2) + else: + # Slow, but safe for subclasses of ndarray + a, mask = _replace_nan(a, -np.inf) + res = np.amax(a, axis=axis, out=out, **kwargs) + if mask is None: + return res + + # Check for all-NaN axis + kwargs.pop("initial", None) + mask = np.all(mask, axis=axis, **kwargs) + if np.any(mask): + res = _copyto(res, np.nan, mask) + warnings.warn("All-NaN axis encountered", RuntimeWarning, + stacklevel=2) + return res + + +def _nanargmin_dispatcher(a, axis=None, out=None, *, keepdims=None): + return (a,) + + +@array_function_dispatch(_nanargmin_dispatcher) +def nanargmin(a, axis=None, out=None, *, keepdims=np._NoValue): + """ + Return the indices of the minimum values in the specified axis ignoring + NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results + cannot be trusted if a slice contains only NaNs and Infs. + + Parameters + ---------- + a : array_like + Input data. + axis : int, optional + Axis along which to operate. By default flattened input is used. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + + .. versionadded:: 1.22.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + .. versionadded:: 1.22.0 + + Returns + ------- + index_array : ndarray + An array of indices or a single index value. + + See Also + -------- + argmin, nanargmax + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[np.nan, 4], [2, 3]]) + >>> np.argmin(a) + 0 + >>> np.nanargmin(a) + 2 + >>> np.nanargmin(a, axis=0) + array([1, 1]) + >>> np.nanargmin(a, axis=1) + array([1, 0]) + + """ + a, mask = _replace_nan(a, np.inf) + if mask is not None and mask.size: + mask = np.all(mask, axis=axis) + if np.any(mask): + raise ValueError("All-NaN slice encountered") + res = np.argmin(a, axis=axis, out=out, keepdims=keepdims) + return res + + +def _nanargmax_dispatcher(a, axis=None, out=None, *, keepdims=None): + return (a,) + + +@array_function_dispatch(_nanargmax_dispatcher) +def nanargmax(a, axis=None, out=None, *, keepdims=np._NoValue): + """ + Return the indices of the maximum values in the specified axis ignoring + NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the + results cannot be trusted if a slice contains only NaNs and -Infs. + + + Parameters + ---------- + a : array_like + Input data. + axis : int, optional + Axis along which to operate. By default flattened input is used. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and dtype. + + .. versionadded:: 1.22.0 + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + .. versionadded:: 1.22.0 + + Returns + ------- + index_array : ndarray + An array of indices or a single index value. + + See Also + -------- + argmax, nanargmin + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[np.nan, 4], [2, 3]]) + >>> np.argmax(a) + 0 + >>> np.nanargmax(a) + 1 + >>> np.nanargmax(a, axis=0) + array([1, 0]) + >>> np.nanargmax(a, axis=1) + array([1, 1]) + + """ + a, mask = _replace_nan(a, -np.inf) + if mask is not None and mask.size: + mask = np.all(mask, axis=axis) + if np.any(mask): + raise ValueError("All-NaN slice encountered") + res = np.argmax(a, axis=axis, out=out, keepdims=keepdims) + return res + + +def _nansum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_nansum_dispatcher) +def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + initial=np._NoValue, where=np._NoValue): + """ + Return the sum of array elements over a given axis treating Not a + Numbers (NaNs) as zero. + + In NumPy versions <= 1.9.0 Nan is returned for slices that are all-NaN or + empty. In later versions zero is returned. + + Parameters + ---------- + a : array_like + Array containing numbers whose sum is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the sum is computed. The default is to compute the + sum of the flattened array. + dtype : data-type, optional + The type of the returned array and of the accumulator in which the + elements are summed. By default, the dtype of `a` is used. An + exception is when `a` has an integer type with less precision than + the platform (u)intp. In that case, the default will be either + (u)int32 or (u)int64 depending on whether the platform is 32 or 64 + bits. For inexact inputs, dtype must be inexact. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``. If provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + :ref:`ufuncs-output-type` for more details. The casting of NaN to integer + can yield unexpected results. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If the value is anything but the default, then + `keepdims` will be passed through to the `mean` or `sum` methods + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. + initial : scalar, optional + Starting value for the sum. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + where : array_like of bool, optional + Elements to include in the sum. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + + Returns + ------- + nansum : ndarray. + A new array holding the result is returned unless `out` is + specified, in which it is returned. The result has the same + size as `a`, and the same shape as `a` if `axis` is not None + or `a` is a 1-d array. + + See Also + -------- + numpy.sum : Sum across array propagating NaNs. + isnan : Show which elements are NaN. + isfinite : Show which elements are not NaN or +/-inf. + + Notes + ----- + If both positive and negative infinity are present, the sum will be Not + A Number (NaN). + + Examples + -------- + >>> import numpy as np + >>> np.nansum(1) + 1 + >>> np.nansum([1]) + 1 + >>> np.nansum([1, np.nan]) + 1.0 + >>> a = np.array([[1, 1], [1, np.nan]]) + >>> np.nansum(a) + 3.0 + >>> np.nansum(a, axis=0) + array([2., 1.]) + >>> np.nansum([1, np.nan, np.inf]) + inf + >>> np.nansum([1, np.nan, -np.inf]) + -inf + >>> from numpy.testing import suppress_warnings + >>> with np.errstate(invalid="ignore"): + ... np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present + np.float64(nan) + + """ + a, mask = _replace_nan(a, 0) + return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + initial=initial, where=where) + + +def _nanprod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + initial=None, where=None): + return (a, out) + + +@array_function_dispatch(_nanprod_dispatcher) +def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + initial=np._NoValue, where=np._NoValue): + """ + Return the product of array elements over a given axis treating Not a + Numbers (NaNs) as ones. + + One is returned for slices that are all-NaN or empty. + + Parameters + ---------- + a : array_like + Array containing numbers whose product is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the product is computed. The default is to compute + the product of the flattened array. + dtype : data-type, optional + The type of the returned array and of the accumulator in which the + elements are summed. By default, the dtype of `a` is used. An + exception is when `a` has an integer type with less precision than + the platform (u)intp. In that case, the default will be either + (u)int32 or (u)int64 depending on whether the platform is 32 or 64 + bits. For inexact inputs, dtype must be inexact. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``. If provided, it must have the same shape as the + expected output, but the type will be cast if necessary. See + :ref:`ufuncs-output-type` for more details. The casting of NaN to integer + can yield unexpected results. + keepdims : bool, optional + If True, the axes which are reduced are left in the result as + dimensions with size one. With this option, the result will + broadcast correctly against the original `arr`. + initial : scalar, optional + The starting value for this product. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.22.0 + where : array_like of bool, optional + Elements to include in the product. See `~numpy.ufunc.reduce` + for details. + + .. versionadded:: 1.22.0 + + Returns + ------- + nanprod : ndarray + A new array holding the result is returned unless `out` is + specified, in which case it is returned. + + See Also + -------- + numpy.prod : Product across array propagating NaNs. + isnan : Show which elements are NaN. + + Examples + -------- + >>> import numpy as np + >>> np.nanprod(1) + 1 + >>> np.nanprod([1]) + 1 + >>> np.nanprod([1, np.nan]) + 1.0 + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nanprod(a) + 6.0 + >>> np.nanprod(a, axis=0) + array([3., 2.]) + + """ + a, mask = _replace_nan(a, 1) + return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + initial=initial, where=where) + + +def _nancumsum_dispatcher(a, axis=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_nancumsum_dispatcher) +def nancumsum(a, axis=None, dtype=None, out=None): + """ + Return the cumulative sum of array elements over a given axis treating Not a + Numbers (NaNs) as zero. The cumulative sum does not change when NaNs are + encountered and leading NaNs are replaced by zeros. + + Zeros are returned for slices that are all-NaN or empty. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the cumulative sum is computed. The default + (None) is to compute the cumsum over the flattened array. + dtype : dtype, optional + Type of the returned array and of the accumulator in which the + elements are summed. If `dtype` is not specified, it defaults + to the dtype of `a`, unless `a` has an integer dtype with a + precision less than that of the default platform integer. In + that case, the default platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. See :ref:`ufuncs-output-type` for + more details. + + Returns + ------- + nancumsum : ndarray. + A new array holding the result is returned unless `out` is + specified, in which it is returned. The result has the same + size as `a`, and the same shape as `a` if `axis` is not None + or `a` is a 1-d array. + + See Also + -------- + numpy.cumsum : Cumulative sum across array propagating NaNs. + isnan : Show which elements are NaN. + + Examples + -------- + >>> import numpy as np + >>> np.nancumsum(1) + array([1]) + >>> np.nancumsum([1]) + array([1]) + >>> np.nancumsum([1, np.nan]) + array([1., 1.]) + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nancumsum(a) + array([1., 3., 6., 6.]) + >>> np.nancumsum(a, axis=0) + array([[1., 2.], + [4., 2.]]) + >>> np.nancumsum(a, axis=1) + array([[1., 3.], + [3., 3.]]) + + """ + a, mask = _replace_nan(a, 0) + return np.cumsum(a, axis=axis, dtype=dtype, out=out) + + +def _nancumprod_dispatcher(a, axis=None, dtype=None, out=None): + return (a, out) + + +@array_function_dispatch(_nancumprod_dispatcher) +def nancumprod(a, axis=None, dtype=None, out=None): + """ + Return the cumulative product of array elements over a given axis treating Not a + Numbers (NaNs) as one. The cumulative product does not change when NaNs are + encountered and leading NaNs are replaced by ones. + + Ones are returned for slices that are all-NaN or empty. + + Parameters + ---------- + a : array_like + Input array. + axis : int, optional + Axis along which the cumulative product is computed. By default + the input is flattened. + dtype : dtype, optional + Type of the returned array, as well as of the accumulator in which + the elements are multiplied. If *dtype* is not specified, it + defaults to the dtype of `a`, unless `a` has an integer dtype with + a precision less than that of the default platform integer. In + that case, the default platform integer is used instead. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type of the resulting values will be cast if necessary. + + Returns + ------- + nancumprod : ndarray + A new array holding the result is returned unless `out` is + specified, in which case it is returned. + + See Also + -------- + numpy.cumprod : Cumulative product across array propagating NaNs. + isnan : Show which elements are NaN. + + Examples + -------- + >>> import numpy as np + >>> np.nancumprod(1) + array([1]) + >>> np.nancumprod([1]) + array([1]) + >>> np.nancumprod([1, np.nan]) + array([1., 1.]) + >>> a = np.array([[1, 2], [3, np.nan]]) + >>> np.nancumprod(a) + array([1., 2., 6., 6.]) + >>> np.nancumprod(a, axis=0) + array([[1., 2.], + [3., 2.]]) + >>> np.nancumprod(a, axis=1) + array([[1., 2.], + [3., 3.]]) + + """ + a, mask = _replace_nan(a, 1) + return np.cumprod(a, axis=axis, dtype=dtype, out=out) + + +def _nanmean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, + *, where=None): + return (a, out) + + +@array_function_dispatch(_nanmean_dispatcher) +def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, + *, where=np._NoValue): + """ + Compute the arithmetic mean along the specified axis, ignoring NaNs. + + Returns the average of the array elements. The average is taken over + the flattened array by default, otherwise over the specified axis. + `float64` intermediate and return values are used for integer inputs. + + For all-NaN slices, NaN is returned and a `RuntimeWarning` is raised. + + Parameters + ---------- + a : array_like + Array containing numbers whose mean is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the means are computed. The default is to compute + the mean of the flattened array. + dtype : data-type, optional + Type to use in computing the mean. For integer inputs, the default + is `float64`; for inexact inputs, it is the same as the input + dtype. + out : ndarray, optional + Alternate output array in which to place the result. The default + is ``None``; if provided, it must have the same shape as the + expected output, but the type will be cast if necessary. + See :ref:`ufuncs-output-type` for more details. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If the value is anything but the default, then + `keepdims` will be passed through to the `mean` or `sum` methods + of sub-classes of `ndarray`. If the sub-classes methods + does not implement `keepdims` any exceptions will be raised. + where : array_like of bool, optional + Elements to include in the mean. See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + + Returns + ------- + m : ndarray, see dtype parameter above + If `out=None`, returns a new array containing the mean values, + otherwise a reference to the output array is returned. Nan is + returned for slices that contain only NaNs. + + See Also + -------- + average : Weighted average + mean : Arithmetic mean taken while not ignoring NaNs + var, nanvar + + Notes + ----- + The arithmetic mean is the sum of the non-NaN elements along the axis + divided by the number of non-NaN elements. + + Note that for floating-point input, the mean is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for `float32`. Specifying a + higher-precision accumulator using the `dtype` keyword can alleviate + this issue. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, np.nan], [3, 4]]) + >>> np.nanmean(a) + 2.6666666666666665 + >>> np.nanmean(a, axis=0) + array([2., 4.]) + >>> np.nanmean(a, axis=1) + array([1., 3.5]) # may vary + + """ + arr, mask = _replace_nan(a, 0) + if mask is None: + return np.mean(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + where=where) + + if dtype is not None: + dtype = np.dtype(dtype) + if dtype is not None and not issubclass(dtype.type, np.inexact): + raise TypeError("If a is inexact, then dtype must be inexact") + if out is not None and not issubclass(out.dtype.type, np.inexact): + raise TypeError("If a is inexact, then out must be inexact") + + cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=keepdims, + where=where) + tot = np.sum(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + where=where) + avg = _divide_by_count(tot, cnt, out=out) + + isbad = (cnt == 0) + if isbad.any(): + warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2) + # NaN is the only possible bad value, so no further + # action is needed to handle bad results. + return avg + + +def _nanmedian1d(arr1d, overwrite_input=False): + """ + Private function for rank 1 arrays. Compute the median ignoring NaNs. + See nanmedian for parameter usage + """ + arr1d_parsed, _, overwrite_input = _remove_nan_1d( + arr1d, overwrite_input=overwrite_input, + ) + + if arr1d_parsed.size == 0: + # Ensure that a nan-esque scalar of the appropriate type (and unit) + # is returned for `timedelta64` and `complexfloating` + return arr1d[-1] + + return np.median(arr1d_parsed, overwrite_input=overwrite_input) + + +def _nanmedian(a, axis=None, out=None, overwrite_input=False): + """ + Private function that doesn't support extended axis or keepdims. + These methods are extended to this function using _ureduce + See nanmedian for parameter usage + + """ + if axis is None or a.ndim == 1: + part = a.ravel() + if out is None: + return _nanmedian1d(part, overwrite_input) + else: + out[...] = _nanmedian1d(part, overwrite_input) + return out + else: + # for small medians use sort + indexing which is still faster than + # apply_along_axis + # benchmarked with shuffled (50, 50, x) containing a few NaN + if a.shape[axis] < 600: + return _nanmedian_small(a, axis, out, overwrite_input) + result = np.apply_along_axis(_nanmedian1d, axis, a, overwrite_input) + if out is not None: + out[...] = result + return result + + +def _nanmedian_small(a, axis=None, out=None, overwrite_input=False): + """ + sort + indexing median, faster for small medians along multiple + dimensions due to the high overhead of apply_along_axis + + see nanmedian for parameter usage + """ + a = np.ma.masked_array(a, np.isnan(a)) + m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input) + for i in range(np.count_nonzero(m.mask.ravel())): + warnings.warn("All-NaN slice encountered", RuntimeWarning, + stacklevel=5) + + fill_value = np.timedelta64("NaT") if m.dtype.kind == "m" else np.nan + if out is not None: + out[...] = m.filled(fill_value) + return out + return m.filled(fill_value) + + +def _nanmedian_dispatcher( + a, axis=None, out=None, overwrite_input=None, keepdims=None): + return (a, out) + + +@array_function_dispatch(_nanmedian_dispatcher) +def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValue): + """ + Compute the median along the specified axis, while ignoring NaNs. + + Returns the median of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : {int, sequence of int, None}, optional + Axis or axes along which the medians are computed. The default + is to compute the median along a flattened version of the array. + A sequence of axes is supported since version 1.9.0. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow use of memory of input array `a` for + calculations. The input array will be modified by the call to + `median`. This will save memory when you do not need to preserve + the contents of the input array. Treat the input as undefined, + but it will probably be fully or partially sorted. Default is + False. If `overwrite_input` is ``True`` and `a` is not already an + `ndarray`, an error will be raised. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If this is anything but the default value it will be passed + through (in the special case of an empty array) to the + `mean` function of the underlying array. If the array is + a sub-class and `mean` does not have the kwarg `keepdims` this + will raise a RuntimeError. + + Returns + ------- + median : ndarray + A new array holding the result. If the input contains integers + or floats smaller than ``float64``, then the output data-type is + ``np.float64``. Otherwise, the data-type of the output is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + mean, median, percentile + + Notes + ----- + Given a vector ``V`` of length ``N``, the median of ``V`` is the + middle value of a sorted copy of ``V``, ``V_sorted`` - i.e., + ``V_sorted[(N-1)/2]``, when ``N`` is odd and the average of the two + middle values of ``V_sorted`` when ``N`` is even. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[10.0, 7, 4], [3, 2, 1]]) + >>> a[0, 1] = np.nan + >>> a + array([[10., nan, 4.], + [ 3., 2., 1.]]) + >>> np.median(a) + np.float64(nan) + >>> np.nanmedian(a) + 3.0 + >>> np.nanmedian(a, axis=0) + array([6.5, 2. , 2.5]) + >>> np.median(a, axis=1) + array([nan, 2.]) + >>> b = a.copy() + >>> np.nanmedian(b, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a==b) + >>> b = a.copy() + >>> np.nanmedian(b, axis=None, overwrite_input=True) + 3.0 + >>> assert not np.all(a==b) + + """ + a = np.asanyarray(a) + # apply_along_axis in _nanmedian doesn't handle empty arrays well, + # so deal them upfront + if a.size == 0: + return np.nanmean(a, axis, out=out, keepdims=keepdims) + + return fnb._ureduce(a, func=_nanmedian, keepdims=keepdims, + axis=axis, out=out, + overwrite_input=overwrite_input) + + +def _nanpercentile_dispatcher( + a, q, axis=None, out=None, overwrite_input=None, + method=None, keepdims=None, *, weights=None, interpolation=None): + return (a, q, out, weights) + + +@array_function_dispatch(_nanpercentile_dispatcher) +def nanpercentile( + a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=np._NoValue, + *, + weights=None, + interpolation=None, +): + """ + Compute the qth percentile of the data along the specified axis, + while ignoring nan values. + + Returns the qth percentile(s) of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array, containing + nan values to be ignored. + q : array_like of float + Percentile or sequence of percentiles to compute, which must be + between 0 and 100 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the percentiles are computed. The default + is to compute the percentile(s) along a flattened version of the + array. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape and buffer length as the expected output, but the + type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by + intermediate calculations, to save memory. In this case, the + contents of the input `a` after this function completes is + undefined. + method : str, optional + This parameter specifies the method to use for estimating the + percentile. There are many different methods, some unique to NumPy. + See the notes for explanation. The options sorted by their R type + as summarized in the H&F paper [1]_ are: + + 1. 'inverted_cdf' + 2. 'averaged_inverted_cdf' + 3. 'closest_observation' + 4. 'interpolated_inverted_cdf' + 5. 'hazen' + 6. 'weibull' + 7. 'linear' (default) + 8. 'median_unbiased' + 9. 'normal_unbiased' + + The first three methods are discontinuous. NumPy further defines the + following discontinuous variations of the default 'linear' (7.) option: + + * 'lower' + * 'higher', + * 'midpoint' + * 'nearest' + + .. versionchanged:: 1.22.0 + This argument was previously called "interpolation" and only + offered the "linear" default and last four options. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + If this is anything but the default value it will be passed + through (in the special case of an empty array) to the + `mean` function of the underlying array. If the array is + a sub-class and `mean` does not have the kwarg `keepdims` this + will raise a RuntimeError. + + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the percentile according to its associated weight. + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given axis) or of the same shape as `a`. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. + Only `method="inverted_cdf"` supports weights. + + .. versionadded:: 2.0.0 + + interpolation : str, optional + Deprecated name for the method keyword argument. + + .. deprecated:: 1.22.0 + + Returns + ------- + percentile : scalar or ndarray + If `q` is a single percentile and `axis=None`, then the result + is a scalar. If multiple percentiles are given, first axis of + the result corresponds to the percentiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + nanmean + nanmedian : equivalent to ``nanpercentile(..., 50)`` + percentile, median, mean + nanquantile : equivalent to nanpercentile, except q in range [0, 1]. + + Notes + ----- + The behavior of `numpy.nanpercentile` with percentage `q` is that of + `numpy.quantile` with argument ``q/100`` (ignoring nan values). + For more information, please see `numpy.quantile`. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) + >>> a[0][1] = np.nan + >>> a + array([[10., nan, 4.], + [ 3., 2., 1.]]) + >>> np.percentile(a, 50) + np.float64(nan) + >>> np.nanpercentile(a, 50) + 3.0 + >>> np.nanpercentile(a, 50, axis=0) + array([6.5, 2. , 2.5]) + >>> np.nanpercentile(a, 50, axis=1, keepdims=True) + array([[7.], + [2.]]) + >>> m = np.nanpercentile(a, 50, axis=0) + >>> out = np.zeros_like(m) + >>> np.nanpercentile(a, 50, axis=0, out=out) + array([6.5, 2. , 2.5]) + >>> m + array([6.5, 2. , 2.5]) + + >>> b = a.copy() + >>> np.nanpercentile(b, 50, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a==b) + + References + ---------- + .. [1] R. J. Hyndman and Y. Fan, + "Sample quantiles in statistical packages," + The American Statistician, 50(4), pp. 361-365, 1996 + + """ + if interpolation is not None: + method = fnb._check_interpolation_as_method( + method, interpolation, "nanpercentile") + + a = np.asanyarray(a) + if a.dtype.kind == "c": + raise TypeError("a must be an array of real numbers") + + q = np.true_divide(q, a.dtype.type(100) if a.dtype.kind == "f" else 100, out=...) + if not fnb._quantile_is_valid(q): + raise ValueError("Percentiles must be in the range [0, 100]") + + if weights is not None: + if method != "inverted_cdf": + msg = ("Only method 'inverted_cdf' supports weights. " + f"Got: {method}.") + raise ValueError(msg) + if axis is not None: + axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis") + weights = _weights_are_valid(weights=weights, a=a, axis=axis) + if np.any(weights < 0): + raise ValueError("Weights must be non-negative.") + + return _nanquantile_unchecked( + a, q, axis, out, overwrite_input, method, keepdims, weights) + + +def _nanquantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, + method=None, keepdims=None, *, weights=None, + interpolation=None): + return (a, q, out, weights) + + +@array_function_dispatch(_nanquantile_dispatcher) +def nanquantile( + a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=np._NoValue, + *, + weights=None, + interpolation=None, +): + """ + Compute the qth quantile of the data along the specified axis, + while ignoring nan values. + Returns the qth quantile(s) of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array, containing + nan values to be ignored + q : array_like of float + Probability or sequence of probabilities for the quantiles to compute. + Values must be between 0 and 1 inclusive. + axis : {int, tuple of int, None}, optional + Axis or axes along which the quantiles are computed. The + default is to compute the quantile(s) along a flattened + version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output, + but the type (of the output) will be cast if necessary. + overwrite_input : bool, optional + If True, then allow the input array `a` to be modified by intermediate + calculations, to save memory. In this case, the contents of the input + `a` after this function completes is undefined. + method : str, optional + This parameter specifies the method to use for estimating the + quantile. There are many different methods, some unique to NumPy. + See the notes for explanation. The options sorted by their R type + as summarized in the H&F paper [1]_ are: + + 1. 'inverted_cdf' + 2. 'averaged_inverted_cdf' + 3. 'closest_observation' + 4. 'interpolated_inverted_cdf' + 5. 'hazen' + 6. 'weibull' + 7. 'linear' (default) + 8. 'median_unbiased' + 9. 'normal_unbiased' + + The first three methods are discontinuous. NumPy further defines the + following discontinuous variations of the default 'linear' (7.) option: + + * 'lower' + * 'higher', + * 'midpoint' + * 'nearest' + + .. versionchanged:: 1.22.0 + This argument was previously called "interpolation" and only + offered the "linear" default and last four options. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left in + the result as dimensions with size one. With this option, the + result will broadcast correctly against the original array `a`. + + If this is anything but the default value it will be passed + through (in the special case of an empty array) to the + `mean` function of the underlying array. If the array is + a sub-class and `mean` does not have the kwarg `keepdims` this + will raise a RuntimeError. + + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the quantile according to its associated weight. + The weights array can either be 1-D (in which case its length must be + the size of `a` along the given axis) or of the same shape as `a`. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. + Only `method="inverted_cdf"` supports weights. + + .. versionadded:: 2.0.0 + + interpolation : str, optional + Deprecated name for the method keyword argument. + + .. deprecated:: 1.22.0 + + Returns + ------- + quantile : scalar or ndarray + If `q` is a single probability and `axis=None`, then the result + is a scalar. If multiple probability levels are given, first axis of + the result corresponds to the quantiles. The other axes are + the axes that remain after the reduction of `a`. If the input + contains integers or floats smaller than ``float64``, the output + data-type is ``float64``. Otherwise, the output data-type is the + same as that of the input. If `out` is specified, that array is + returned instead. + + See Also + -------- + quantile + nanmean, nanmedian + nanmedian : equivalent to ``nanquantile(..., 0.5)`` + nanpercentile : same as nanquantile, but with q in the range [0, 100]. + + Notes + ----- + The behavior of `numpy.nanquantile` is the same as that of + `numpy.quantile` (ignoring nan values). + For more information, please see `numpy.quantile`. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) + >>> a[0][1] = np.nan + >>> a + array([[10., nan, 4.], + [ 3., 2., 1.]]) + >>> np.quantile(a, 0.5) + np.float64(nan) + >>> np.nanquantile(a, 0.5) + 3.0 + >>> np.nanquantile(a, 0.5, axis=0) + array([6.5, 2. , 2.5]) + >>> np.nanquantile(a, 0.5, axis=1, keepdims=True) + array([[7.], + [2.]]) + >>> m = np.nanquantile(a, 0.5, axis=0) + >>> out = np.zeros_like(m) + >>> np.nanquantile(a, 0.5, axis=0, out=out) + array([6.5, 2. , 2.5]) + >>> m + array([6.5, 2. , 2.5]) + >>> b = a.copy() + >>> np.nanquantile(b, 0.5, axis=1, overwrite_input=True) + array([7., 2.]) + >>> assert not np.all(a==b) + + References + ---------- + .. [1] R. J. Hyndman and Y. Fan, + "Sample quantiles in statistical packages," + The American Statistician, 50(4), pp. 361-365, 1996 + + """ + + if interpolation is not None: + method = fnb._check_interpolation_as_method( + method, interpolation, "nanquantile") + + a = np.asanyarray(a) + if a.dtype.kind == "c": + raise TypeError("a must be an array of real numbers") + + # Use dtype of array if possible (e.g., if q is a python int or float). + if isinstance(q, (int, float)) and a.dtype.kind == "f": + q = np.asanyarray(q, dtype=a.dtype) + else: + q = np.asanyarray(q) + + if not fnb._quantile_is_valid(q): + raise ValueError("Quantiles must be in the range [0, 1]") + + if weights is not None: + if method != "inverted_cdf": + msg = ("Only method 'inverted_cdf' supports weights. " + f"Got: {method}.") + raise ValueError(msg) + if axis is not None: + axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis") + weights = _weights_are_valid(weights=weights, a=a, axis=axis) + if np.any(weights < 0): + raise ValueError("Weights must be non-negative.") + + return _nanquantile_unchecked( + a, q, axis, out, overwrite_input, method, keepdims, weights) + + +def _nanquantile_unchecked( + a, + q, + axis=None, + out=None, + overwrite_input=False, + method="linear", + keepdims=np._NoValue, + weights=None, +): + """Assumes that q is in [0, 1], and is an ndarray""" + # apply_along_axis in _nanpercentile doesn't handle empty arrays well, + # so deal them upfront + if a.size == 0: + return np.nanmean(a, axis, out=out, keepdims=keepdims) + return fnb._ureduce(a, + func=_nanquantile_ureduce_func, + q=q, + weights=weights, + keepdims=keepdims, + axis=axis, + out=out, + overwrite_input=overwrite_input, + method=method) + + +def _nanquantile_ureduce_func( + a: np.array, + q: np.array, + weights: np.array, + axis: int | None = None, + out=None, + overwrite_input: bool = False, + method="linear", +): + """ + Private function that doesn't support extended axis or keepdims. + These methods are extended to this function using _ureduce + See nanpercentile for parameter usage + """ + if axis is None or a.ndim == 1: + part = a.ravel() + wgt = None if weights is None else weights.ravel() + result = _nanquantile_1d(part, q, overwrite_input, method, weights=wgt) + # Note that this code could try to fill in `out` right away + elif weights is None: + result = np.apply_along_axis(_nanquantile_1d, axis, a, q, + overwrite_input, method, weights) + # apply_along_axis fills in collapsed axis with results. + # Move those axes to the beginning to match percentile's + # convention. + if q.ndim != 0: + from_ax = [axis + i for i in range(q.ndim)] + result = np.moveaxis(result, from_ax, list(range(q.ndim))) + else: + # We need to apply along axis over 2 arrays, a and weights. + # move operation axes to end for simplicity: + a = np.moveaxis(a, axis, -1) + if weights is not None: + weights = np.moveaxis(weights, axis, -1) + if out is not None: + result = out + else: + # weights are limited to `inverted_cdf` so the result dtype + # is known to be identical to that of `a` here: + result = np.empty_like(a, shape=q.shape + a.shape[:-1]) + + for ii in np.ndindex(a.shape[:-1]): + result[(...,) + ii] = _nanquantile_1d( + a[ii], q, weights=weights[ii], + overwrite_input=overwrite_input, method=method, + ) + # This path dealt with `out` already... + return result + + if out is not None: + out[...] = result + return result + + +def _nanquantile_1d( + arr1d, q, overwrite_input=False, method="linear", weights=None, +): + """ + Private function for rank 1 arrays. Compute quantile ignoring NaNs. + See nanpercentile for parameter usage + """ + # TODO: What to do when arr1d = [1, np.nan] and weights = [0, 1]? + arr1d, weights, overwrite_input = _remove_nan_1d(arr1d, + second_arr1d=weights, overwrite_input=overwrite_input) + if arr1d.size == 0: + # convert to scalar + return np.full(q.shape, np.nan, dtype=arr1d.dtype)[()] + + return fnb._quantile_unchecked( + arr1d, + q, + overwrite_input=overwrite_input, + method=method, + weights=weights, + ) + + +def _nanvar_dispatcher(a, axis=None, dtype=None, out=None, ddof=None, + keepdims=None, *, where=None, mean=None, + correction=None): + return (a, out) + + +@array_function_dispatch(_nanvar_dispatcher) +def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, + *, where=np._NoValue, mean=np._NoValue, correction=np._NoValue): + """ + Compute the variance along the specified axis, while ignoring NaNs. + + Returns the variance of the array elements, a measure of the spread of + a distribution. The variance is computed for the flattened array by + default, otherwise over the specified axis. + + For all-NaN slices or slices with zero degrees of freedom, NaN is + returned and a `RuntimeWarning` is raised. + + Parameters + ---------- + a : array_like + Array containing numbers whose variance is desired. If `a` is not an + array, a conversion is attempted. + axis : {int, tuple of int, None}, optional + Axis or axes along which the variance is computed. The default is to compute + the variance of the flattened array. + dtype : data-type, optional + Type to use in computing the variance. For arrays of integer type + the default is `float64`; for arrays of float types it is the same as + the array type. + out : ndarray, optional + Alternate output array in which to place the result. It must have + the same shape as the expected output, but the type is cast if + necessary. + ddof : {int, float}, optional + "Delta Degrees of Freedom": the divisor used in the calculation is + ``N - ddof``, where ``N`` represents the number of non-NaN + elements. By default `ddof` is zero. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + where : array_like of bool, optional + Elements to include in the variance. See `~numpy.ufunc.reduce` for + details. + + .. versionadded:: 1.22.0 + + mean : array_like, optional + Provide the mean to prevent its recalculation. The mean should have + a shape as if it was calculated with ``keepdims=True``. + The axis for the calculation of the mean should be the same as used in + the call to this var function. + + .. versionadded:: 2.0.0 + + correction : {int, float}, optional + Array API compatible name for the ``ddof`` parameter. Only one of them + can be provided at the same time. + + .. versionadded:: 2.0.0 + + Returns + ------- + variance : ndarray, see dtype parameter above + If `out` is None, return a new array containing the variance, + otherwise return a reference to the output array. If ddof is >= the + number of non-NaN elements in a slice or the slice contains only + NaNs, then the result for that slice is NaN. + + See Also + -------- + std : Standard deviation + mean : Average + var : Variance while not ignoring NaNs + nanstd, nanmean + :ref:`ufuncs-output-type` + + Notes + ----- + The variance is the average of the squared deviations from the mean, + i.e., ``var = mean(abs(x - x.mean())**2)``. + + The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``. + If, however, `ddof` is specified, the divisor ``N - ddof`` is used + instead. In standard statistical practice, ``ddof=1`` provides an + unbiased estimator of the variance of a hypothetical infinite + population. ``ddof=0`` provides a maximum likelihood estimate of the + variance for normally distributed variables. + + Note that for complex numbers, the absolute value is taken before + squaring, so that the result is always real and nonnegative. + + For floating-point input, the variance is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for `float32` (see example + below). Specifying a higher-accuracy accumulator using the ``dtype`` + keyword can alleviate this issue. + + For this function to work on sub-classes of ndarray, they must define + `sum` with the kwarg `keepdims` + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, np.nan], [3, 4]]) + >>> np.nanvar(a) + 1.5555555555555554 + >>> np.nanvar(a, axis=0) + array([1., 0.]) + >>> np.nanvar(a, axis=1) + array([0., 0.25]) # may vary + + """ + arr, mask = _replace_nan(a, 0) + if mask is None: + return np.var(arr, axis=axis, dtype=dtype, out=out, ddof=ddof, + keepdims=keepdims, where=where, mean=mean, + correction=correction) + + if dtype is not None: + dtype = np.dtype(dtype) + if dtype is not None and not issubclass(dtype.type, np.inexact): + raise TypeError("If a is inexact, then dtype must be inexact") + if out is not None and not issubclass(out.dtype.type, np.inexact): + raise TypeError("If a is inexact, then out must be inexact") + + if correction != np._NoValue: + if ddof != 0: + raise ValueError( + "ddof and correction can't be provided simultaneously." + ) + else: + ddof = correction + + # Compute mean + if type(arr) is np.matrix: + _keepdims = np._NoValue + else: + _keepdims = True + + cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=_keepdims, + where=where) + + if mean is not np._NoValue: + avg = mean + else: + # we need to special case matrix for reverse compatibility + # in order for this to work, these sums need to be called with + # keepdims=True, however matrix now raises an error in this case, but + # the reason that it drops the keepdims kwarg is to force keepdims=True + # so this used to work by serendipity. + avg = np.sum(arr, axis=axis, dtype=dtype, + keepdims=_keepdims, where=where) + avg = _divide_by_count(avg, cnt) + + # Compute squared deviation from mean. + np.subtract(arr, avg, out=arr, casting='unsafe', where=where) + arr = _copyto(arr, 0, mask) + if issubclass(arr.dtype.type, np.complexfloating): + sqr = np.multiply(arr, arr.conj(), out=arr, where=where).real + else: + sqr = np.multiply(arr, arr, out=arr, where=where) + + # Compute variance. + var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims, + where=where) + + # Precaution against reduced object arrays + try: + var_ndim = var.ndim + except AttributeError: + var_ndim = np.ndim(var) + if var_ndim < cnt.ndim: + # Subclasses of ndarray may ignore keepdims, so check here. + cnt = cnt.squeeze(axis) + dof = cnt - ddof + var = _divide_by_count(var, dof) + + isbad = (dof <= 0) + if np.any(isbad): + warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning, + stacklevel=2) + # NaN, inf, or negative numbers are all possible bad + # values, so explicitly replace them with NaN. + var = _copyto(var, np.nan, isbad) + return var + + +def _nanstd_dispatcher(a, axis=None, dtype=None, out=None, ddof=None, + keepdims=None, *, where=None, mean=None, + correction=None): + return (a, out) + + +@array_function_dispatch(_nanstd_dispatcher) +def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, + *, where=np._NoValue, mean=np._NoValue, correction=np._NoValue): + """ + Compute the standard deviation along the specified axis, while + ignoring NaNs. + + Returns the standard deviation, a measure of the spread of a + distribution, of the non-NaN array elements. The standard deviation is + computed for the flattened array by default, otherwise over the + specified axis. + + For all-NaN slices or slices with zero degrees of freedom, NaN is + returned and a `RuntimeWarning` is raised. + + Parameters + ---------- + a : array_like + Calculate the standard deviation of the non-NaN values. + axis : {int, tuple of int, None}, optional + Axis or axes along which the standard deviation is computed. The default is + to compute the standard deviation of the flattened array. + dtype : dtype, optional + Type to use in computing the standard deviation. For arrays of + integer type the default is float64, for arrays of float types it + is the same as the array type. + out : ndarray, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type (of the + calculated values) will be cast if necessary. + ddof : {int, float}, optional + Means Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of non-NaN + elements. By default `ddof` is zero. + + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + + If this value is anything but the default it is passed through + as-is to the relevant functions of the sub-classes. If these + functions do not have a `keepdims` kwarg, a RuntimeError will + be raised. + where : array_like of bool, optional + Elements to include in the standard deviation. + See `~numpy.ufunc.reduce` for details. + + .. versionadded:: 1.22.0 + + mean : array_like, optional + Provide the mean to prevent its recalculation. The mean should have + a shape as if it was calculated with ``keepdims=True``. + The axis for the calculation of the mean should be the same as used in + the call to this std function. + + .. versionadded:: 2.0.0 + + correction : {int, float}, optional + Array API compatible name for the ``ddof`` parameter. Only one of them + can be provided at the same time. + + .. versionadded:: 2.0.0 + + Returns + ------- + standard_deviation : ndarray, see dtype parameter above. + If `out` is None, return a new array containing the standard + deviation, otherwise return a reference to the output array. If + ddof is >= the number of non-NaN elements in a slice or the slice + contains only NaNs, then the result for that slice is NaN. + + See Also + -------- + var, mean, std + nanvar, nanmean + :ref:`ufuncs-output-type` + + Notes + ----- + The standard deviation is the square root of the average of the squared + deviations from the mean: ``std = sqrt(mean(abs(x - x.mean())**2))``. + + The average squared deviation is normally calculated as + ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is + specified, the divisor ``N - ddof`` is used instead. In standard + statistical practice, ``ddof=1`` provides an unbiased estimator of the + variance of the infinite population. ``ddof=0`` provides a maximum + likelihood estimate of the variance for normally distributed variables. + The standard deviation computed in this function is the square root of + the estimated variance, so even with ``ddof=1``, it will not be an + unbiased estimate of the standard deviation per se. + + Note that, for complex numbers, `std` takes the absolute value before + squaring, so that the result is always real and nonnegative. + + For floating-point input, the *std* is computed using the same + precision the input has. Depending on the input data, this can cause + the results to be inaccurate, especially for float32 (see example + below). Specifying a higher-accuracy accumulator using the `dtype` + keyword can alleviate this issue. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([[1, np.nan], [3, 4]]) + >>> np.nanstd(a) + 1.247219128924647 + >>> np.nanstd(a, axis=0) + array([1., 0.]) + >>> np.nanstd(a, axis=1) + array([0., 0.5]) # may vary + + """ + var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof, + keepdims=keepdims, where=where, mean=mean, + correction=correction) + if isinstance(var, np.ndarray): + std = np.sqrt(var, out=var) + elif hasattr(var, 'dtype'): + std = var.dtype.type(np.sqrt(var)) + else: + std = np.sqrt(var) + return std diff --git a/python/numpy/lib/_nanfunctions_impl.pyi b/python/numpy/lib/_nanfunctions_impl.pyi new file mode 100644 index 000000000..f39800d58 --- /dev/null +++ b/python/numpy/lib/_nanfunctions_impl.pyi @@ -0,0 +1,52 @@ +from numpy._core.fromnumeric import ( + amax, + amin, + argmax, + argmin, + cumprod, + cumsum, + mean, + prod, + std, + sum, + var, +) +from numpy.lib._function_base_impl import ( + median, + percentile, + quantile, +) + +__all__ = [ + "nansum", + "nanmax", + "nanmin", + "nanargmax", + "nanargmin", + "nanmean", + "nanmedian", + "nanpercentile", + "nanvar", + "nanstd", + "nanprod", + "nancumsum", + "nancumprod", + "nanquantile", +] + +# NOTE: In reality these functions are not aliases but distinct functions +# with identical signatures. +nanmin = amin +nanmax = amax +nanargmin = argmin +nanargmax = argmax +nansum = sum +nanprod = prod +nancumsum = cumsum +nancumprod = cumprod +nanmean = mean +nanvar = var +nanstd = std +nanmedian = median +nanpercentile = percentile +nanquantile = quantile diff --git a/python/numpy/lib/_npyio_impl.py b/python/numpy/lib/_npyio_impl.py new file mode 100644 index 000000000..6aea56703 --- /dev/null +++ b/python/numpy/lib/_npyio_impl.py @@ -0,0 +1,2596 @@ +""" +IO related functions. +""" +import contextlib +import functools +import itertools +import operator +import os +import pickle +import re +import warnings +import weakref +from collections.abc import Mapping +from operator import itemgetter + +import numpy as np +from numpy._core import overrides +from numpy._core._multiarray_umath import _load_from_filelike +from numpy._core.multiarray import packbits, unpackbits +from numpy._core.overrides import finalize_array_function_like, set_module +from numpy._utils import asbytes, asunicode + +from . import format +from ._datasource import DataSource # noqa: F401 +from ._format_impl import _MAX_HEADER_SIZE +from ._iotools import ( + ConversionWarning, + ConverterError, + ConverterLockError, + LineSplitter, + NameValidator, + StringConverter, + _decode_line, + _is_string_like, + easy_dtype, + flatten_dtype, + has_nested_fields, +) + +__all__ = [ + 'savetxt', 'loadtxt', 'genfromtxt', 'load', 'save', 'savez', + 'savez_compressed', 'packbits', 'unpackbits', 'fromregex' + ] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +class BagObj: + """ + BagObj(obj) + + Convert attribute look-ups to getitems on the object passed in. + + Parameters + ---------- + obj : class instance + Object on which attribute look-up is performed. + + Examples + -------- + >>> import numpy as np + >>> from numpy.lib._npyio_impl import BagObj as BO + >>> class BagDemo: + ... def __getitem__(self, key): # An instance of BagObj(BagDemo) + ... # will call this method when any + ... # attribute look-up is required + ... result = "Doesn't matter what you want, " + ... return result + "you're gonna get this" + ... + >>> demo_obj = BagDemo() + >>> bagobj = BO(demo_obj) + >>> bagobj.hello_there + "Doesn't matter what you want, you're gonna get this" + >>> bagobj.I_can_be_anything + "Doesn't matter what you want, you're gonna get this" + + """ + + def __init__(self, obj): + # Use weakref to make NpzFile objects collectable by refcount + self._obj = weakref.proxy(obj) + + def __getattribute__(self, key): + try: + return object.__getattribute__(self, '_obj')[key] + except KeyError: + raise AttributeError(key) from None + + def __dir__(self): + """ + Enables dir(bagobj) to list the files in an NpzFile. + + This also enables tab-completion in an interpreter or IPython. + """ + return list(object.__getattribute__(self, '_obj').keys()) + + +def zipfile_factory(file, *args, **kwargs): + """ + Create a ZipFile. + + Allows for Zip64, and the `file` argument can accept file, str, or + pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile + constructor. + """ + if not hasattr(file, 'read'): + file = os.fspath(file) + import zipfile + kwargs['allowZip64'] = True + return zipfile.ZipFile(file, *args, **kwargs) + + +@set_module('numpy.lib.npyio') +class NpzFile(Mapping): + """ + NpzFile(fid) + + A dictionary-like object with lazy-loading of files in the zipped + archive provided on construction. + + `NpzFile` is used to load files in the NumPy ``.npz`` data archive + format. It assumes that files in the archive have a ``.npy`` extension, + other files are ignored. + + The arrays and file strings are lazily loaded on either + getitem access using ``obj['key']`` or attribute lookup using + ``obj.f.key``. A list of all files (without ``.npy`` extensions) can + be obtained with ``obj.files`` and the ZipFile object itself using + ``obj.zip``. + + Attributes + ---------- + files : list of str + List of all files in the archive with a ``.npy`` extension. + zip : ZipFile instance + The ZipFile object initialized with the zipped archive. + f : BagObj instance + An object on which attribute can be performed as an alternative + to getitem access on the `NpzFile` instance itself. + allow_pickle : bool, optional + Allow loading pickled data. Default: False + pickle_kwargs : dict, optional + Additional keyword arguments to pass on to pickle.load. + These are only useful when loading object arrays saved on + Python 2. + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + This option is ignored when `allow_pickle` is passed. In that case + the file is by definition trusted and the limit is unnecessary. + + Parameters + ---------- + fid : file, str, or pathlib.Path + The zipped archive to open. This is either a file-like object + or a string containing the path to the archive. + own_fid : bool, optional + Whether NpzFile should close the file handle. + Requires that `fid` is a file-like object. + + Examples + -------- + >>> import numpy as np + >>> from tempfile import TemporaryFile + >>> outfile = TemporaryFile() + >>> x = np.arange(10) + >>> y = np.sin(x) + >>> np.savez(outfile, x=x, y=y) + >>> _ = outfile.seek(0) + + >>> npz = np.load(outfile) + >>> isinstance(npz, np.lib.npyio.NpzFile) + True + >>> npz + NpzFile 'object' with keys: x, y + >>> sorted(npz.files) + ['x', 'y'] + >>> npz['x'] # getitem access + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> npz.f.x # attribute lookup + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + """ + # Make __exit__ safe if zipfile_factory raises an exception + zip = None + fid = None + _MAX_REPR_ARRAY_COUNT = 5 + + def __init__(self, fid, own_fid=False, allow_pickle=False, + pickle_kwargs=None, *, + max_header_size=_MAX_HEADER_SIZE): + # Import is postponed to here since zipfile depends on gzip, an + # optional component of the so-called standard library. + _zip = zipfile_factory(fid) + _files = _zip.namelist() + self.files = [name.removesuffix(".npy") for name in _files] + self._files = dict(zip(self.files, _files)) + self._files.update(zip(_files, _files)) + self.allow_pickle = allow_pickle + self.max_header_size = max_header_size + self.pickle_kwargs = pickle_kwargs + self.zip = _zip + self.f = BagObj(self) + if own_fid: + self.fid = fid + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def close(self): + """ + Close the file. + + """ + if self.zip is not None: + self.zip.close() + self.zip = None + if self.fid is not None: + self.fid.close() + self.fid = None + self.f = None # break reference cycle + + def __del__(self): + self.close() + + # Implement the Mapping ABC + def __iter__(self): + return iter(self.files) + + def __len__(self): + return len(self.files) + + def __getitem__(self, key): + try: + key = self._files[key] + except KeyError: + raise KeyError(f"{key} is not a file in the archive") from None + else: + with self.zip.open(key) as bytes: + magic = bytes.read(len(format.MAGIC_PREFIX)) + bytes.seek(0) + if magic == format.MAGIC_PREFIX: + # FIXME: This seems like it will copy strings around + # more than is strictly necessary. The zipfile + # will read the string and then + # the format.read_array will copy the string + # to another place in memory. + # It would be better if the zipfile could read + # (or at least uncompress) the data + # directly into the array memory. + return format.read_array( + bytes, + allow_pickle=self.allow_pickle, + pickle_kwargs=self.pickle_kwargs, + max_header_size=self.max_header_size + ) + else: + return bytes.read() + + def __contains__(self, key): + return (key in self._files) + + def __repr__(self): + # Get filename or default to `object` + if isinstance(self.fid, str): + filename = self.fid + else: + filename = getattr(self.fid, "name", "object") + + # Get the name of arrays + array_names = ', '.join(self.files[:self._MAX_REPR_ARRAY_COUNT]) + if len(self.files) > self._MAX_REPR_ARRAY_COUNT: + array_names += "..." + return f"NpzFile {filename!r} with keys: {array_names}" + + # Work around problems with the docstrings in the Mapping methods + # They contain a `->`, which confuses the type annotation interpretations + # of sphinx-docs. See gh-25964 + + def get(self, key, default=None, /): + """ + D.get(k,[,d]) returns D[k] if k in D, else d. d defaults to None. + """ + return Mapping.get(self, key, default) + + def items(self): + """ + D.items() returns a set-like object providing a view on the items + """ + return Mapping.items(self) + + def keys(self): + """ + D.keys() returns a set-like object providing a view on the keys + """ + return Mapping.keys(self) + + def values(self): + """ + D.values() returns a set-like object providing a view on the values + """ + return Mapping.values(self) + + +@set_module('numpy') +def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, + encoding='ASCII', *, max_header_size=_MAX_HEADER_SIZE): + """ + Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files. + + .. warning:: Loading files that contain object arrays uses the ``pickle`` + module, which is not secure against erroneous or maliciously + constructed data. Consider passing ``allow_pickle=False`` to + load data that is known not to contain object arrays for the + safer handling of untrusted sources. + + Parameters + ---------- + file : file-like object, string, or pathlib.Path + The file to read. File-like objects must support the + ``seek()`` and ``read()`` methods and must always + be opened in binary mode. Pickled files require that the + file-like object support the ``readline()`` method as well. + mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional + If not None, then memory-map the file, using the given mode (see + `numpy.memmap` for a detailed description of the modes). A + memory-mapped array is kept on disk. However, it can be accessed + and sliced like any ndarray. Memory mapping is especially useful + for accessing small fragments of large files without reading the + entire file into memory. + allow_pickle : bool, optional + Allow loading pickled object arrays stored in npy files. Reasons for + disallowing pickles include security, as loading pickled data can + execute arbitrary code. If pickles are disallowed, loading object + arrays will fail. Default: False + fix_imports : bool, optional + Only useful when loading Python 2 generated pickled files, + which includes npy/npz files containing object arrays. If `fix_imports` + is True, pickle will try to map the old Python 2 names to the new names + used in Python 3. + encoding : str, optional + What encoding to use when reading Python 2 strings. Only useful when + loading Python 2 generated pickled files, which includes + npy/npz files containing object arrays. Values other than 'latin1', + 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical + data. Default: 'ASCII' + max_header_size : int, optional + Maximum allowed size of the header. Large headers may not be safe + to load securely and thus require explicitly passing a larger value. + See :py:func:`ast.literal_eval()` for details. + This option is ignored when `allow_pickle` is passed. In that case + the file is by definition trusted and the limit is unnecessary. + + Returns + ------- + result : array, tuple, dict, etc. + Data stored in the file. For ``.npz`` files, the returned instance + of NpzFile class must be closed to avoid leaking file descriptors. + + Raises + ------ + OSError + If the input file does not exist or cannot be read. + UnpicklingError + If ``allow_pickle=True``, but the file cannot be loaded as a pickle. + ValueError + The file contains an object array, but ``allow_pickle=False`` given. + EOFError + When calling ``np.load`` multiple times on the same file handle, + if all data has already been read + + See Also + -------- + save, savez, savez_compressed, loadtxt + memmap : Create a memory-map to an array stored in a file on disk. + lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file. + + Notes + ----- + - If the file contains pickle data, then whatever object is stored + in the pickle is returned. + - If the file is a ``.npy`` file, then a single array is returned. + - If the file is a ``.npz`` file, then a dictionary-like object is + returned, containing ``{filename: array}`` key-value pairs, one for + each file in the archive. + - If the file is a ``.npz`` file, the returned value supports the + context manager protocol in a similar fashion to the open function:: + + with load('foo.npz') as data: + a = data['a'] + + The underlying file descriptor is closed when exiting the 'with' + block. + + Examples + -------- + >>> import numpy as np + + Store data to disk, and load it again: + + >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]])) + >>> np.load('/tmp/123.npy') + array([[1, 2, 3], + [4, 5, 6]]) + + Store compressed data to disk, and load it again: + + >>> a=np.array([[1, 2, 3], [4, 5, 6]]) + >>> b=np.array([1, 2]) + >>> np.savez('/tmp/123.npz', a=a, b=b) + >>> data = np.load('/tmp/123.npz') + >>> data['a'] + array([[1, 2, 3], + [4, 5, 6]]) + >>> data['b'] + array([1, 2]) + >>> data.close() + + Mem-map the stored array, and then access the second row + directly from disk: + + >>> X = np.load('/tmp/123.npy', mmap_mode='r') + >>> X[1, :] + memmap([4, 5, 6]) + + """ + if encoding not in ('ASCII', 'latin1', 'bytes'): + # The 'encoding' value for pickle also affects what encoding + # the serialized binary data of NumPy arrays is loaded + # in. Pickle does not pass on the encoding information to + # NumPy. The unpickling code in numpy._core.multiarray is + # written to assume that unicode data appearing where binary + # should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'. + # + # Other encoding values can corrupt binary data, and we + # purposefully disallow them. For the same reason, the errors= + # argument is not exposed, as values other than 'strict' + # result can similarly silently corrupt numerical data. + raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'") + + pickle_kwargs = {'encoding': encoding, 'fix_imports': fix_imports} + + with contextlib.ExitStack() as stack: + if hasattr(file, 'read'): + fid = file + own_fid = False + else: + fid = stack.enter_context(open(os.fspath(file), "rb")) + own_fid = True + + # Code to distinguish from NumPy binary files and pickles. + _ZIP_PREFIX = b'PK\x03\x04' + _ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this + N = len(format.MAGIC_PREFIX) + magic = fid.read(N) + if not magic: + raise EOFError("No data left in file") + # If the file size is less than N, we need to make sure not + # to seek past the beginning of the file + fid.seek(-min(N, len(magic)), 1) # back-up + if magic.startswith((_ZIP_PREFIX, _ZIP_SUFFIX)): + # zip-file (assume .npz) + # Potentially transfer file ownership to NpzFile + stack.pop_all() + ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle, + pickle_kwargs=pickle_kwargs, + max_header_size=max_header_size) + return ret + elif magic == format.MAGIC_PREFIX: + # .npy file + if mmap_mode: + if allow_pickle: + max_header_size = 2**64 + return format.open_memmap(file, mode=mmap_mode, + max_header_size=max_header_size) + else: + return format.read_array(fid, allow_pickle=allow_pickle, + pickle_kwargs=pickle_kwargs, + max_header_size=max_header_size) + else: + # Try a pickle + if not allow_pickle: + raise ValueError( + "This file contains pickled (object) data. If you trust " + "the file you can load it unsafely using the " + "`allow_pickle=` keyword argument or `pickle.load()`.") + try: + return pickle.load(fid, **pickle_kwargs) + except Exception as e: + raise pickle.UnpicklingError( + f"Failed to interpret file {file!r} as a pickle") from e + + +def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None): + return (arr,) + + +@array_function_dispatch(_save_dispatcher) +def save(file, arr, allow_pickle=True, fix_imports=np._NoValue): + """ + Save an array to a binary file in NumPy ``.npy`` format. + + Parameters + ---------- + file : file, str, or pathlib.Path + File or filename to which the data is saved. If file is a file-object, + then the filename is unchanged. If file is a string or Path, + a ``.npy`` extension will be appended to the filename if it does not + already have one. + arr : array_like + Array data to be saved. + allow_pickle : bool, optional + Allow saving object arrays using Python pickles. Reasons for + disallowing pickles include security (loading pickled data can execute + arbitrary code) and portability (pickled objects may not be loadable + on different Python installations, for example if the stored objects + require libraries that are not available, and not all pickled data is + compatible between different versions of Python). + Default: True + fix_imports : bool, optional + The `fix_imports` flag is deprecated and has no effect. + + .. deprecated:: 2.1 + This flag is ignored since NumPy 1.17 and was only needed to + support loading in Python 2 some files written in Python 3. + + See Also + -------- + savez : Save several arrays into a ``.npz`` archive + savetxt, load + + Notes + ----- + For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. + + Any data saved to the file is appended to the end of the file. + + Examples + -------- + >>> import numpy as np + + >>> from tempfile import TemporaryFile + >>> outfile = TemporaryFile() + + >>> x = np.arange(10) + >>> np.save(outfile, x) + + >>> _ = outfile.seek(0) # Only needed to simulate closing & reopening file + >>> np.load(outfile) + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + + >>> with open('test.npy', 'wb') as f: + ... np.save(f, np.array([1, 2])) + ... np.save(f, np.array([1, 3])) + >>> with open('test.npy', 'rb') as f: + ... a = np.load(f) + ... b = np.load(f) + >>> print(a, b) + # [1 2] [1 3] + """ + if fix_imports is not np._NoValue: + # Deprecated 2024-05-16, NumPy 2.1 + warnings.warn( + "The 'fix_imports' flag is deprecated and has no effect. " + "(Deprecated in NumPy 2.1)", + DeprecationWarning, stacklevel=2) + if hasattr(file, 'write'): + file_ctx = contextlib.nullcontext(file) + else: + file = os.fspath(file) + if not file.endswith('.npy'): + file = file + '.npy' + file_ctx = open(file, "wb") + + with file_ctx as fid: + arr = np.asanyarray(arr) + format.write_array(fid, arr, allow_pickle=allow_pickle, + pickle_kwargs={'fix_imports': fix_imports}) + + +def _savez_dispatcher(file, *args, allow_pickle=True, **kwds): + yield from args + yield from kwds.values() + + +@array_function_dispatch(_savez_dispatcher) +def savez(file, *args, allow_pickle=True, **kwds): + """Save several arrays into a single file in uncompressed ``.npz`` format. + + Provide arrays as keyword arguments to store them under the + corresponding name in the output file: ``savez(fn, x=x, y=y)``. + + If arrays are specified as positional arguments, i.e., ``savez(fn, + x, y)``, their names will be `arr_0`, `arr_1`, etc. + + Parameters + ---------- + file : file, str, or pathlib.Path + Either the filename (string) or an open file (file-like object) + where the data will be saved. If file is a string or a Path, the + ``.npz`` extension will be appended to the filename if it is not + already there. + args : Arguments, optional + Arrays to save to the file. Please use keyword arguments (see + `kwds` below) to assign names to arrays. Arrays specified as + args will be named "arr_0", "arr_1", and so on. + allow_pickle : bool, optional + Allow saving object arrays using Python pickles. Reasons for + disallowing pickles include security (loading pickled data can execute + arbitrary code) and portability (pickled objects may not be loadable + on different Python installations, for example if the stored objects + require libraries that are not available, and not all pickled data is + compatible between different versions of Python). + Default: True + kwds : Keyword arguments, optional + Arrays to save to the file. Each array will be saved to the + output file with its corresponding keyword name. + + Returns + ------- + None + + See Also + -------- + save : Save a single array to a binary file in NumPy format. + savetxt : Save an array to a file as plain text. + savez_compressed : Save several arrays into a compressed ``.npz`` archive + + Notes + ----- + The ``.npz`` file format is a zipped archive of files named after the + variables they contain. The archive is not compressed and each file + in the archive contains one variable in ``.npy`` format. For a + description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. + + When opening the saved ``.npz`` file with `load` a `~lib.npyio.NpzFile` + object is returned. This is a dictionary-like object which can be queried + for its list of arrays (with the ``.files`` attribute), and for the arrays + themselves. + + Keys passed in `kwds` are used as filenames inside the ZIP archive. + Therefore, keys should be valid filenames; e.g., avoid keys that begin with + ``/`` or contain ``.``. + + When naming variables with keyword arguments, it is not possible to name a + variable ``file``, as this would cause the ``file`` argument to be defined + twice in the call to ``savez``. + + Examples + -------- + >>> import numpy as np + >>> from tempfile import TemporaryFile + >>> outfile = TemporaryFile() + >>> x = np.arange(10) + >>> y = np.sin(x) + + Using `savez` with \\*args, the arrays are saved with default names. + + >>> np.savez(outfile, x, y) + >>> _ = outfile.seek(0) # Only needed to simulate closing & reopening file + >>> npzfile = np.load(outfile) + >>> npzfile.files + ['arr_0', 'arr_1'] + >>> npzfile['arr_0'] + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + Using `savez` with \\**kwds, the arrays are saved with the keyword names. + + >>> outfile = TemporaryFile() + >>> np.savez(outfile, x=x, y=y) + >>> _ = outfile.seek(0) + >>> npzfile = np.load(outfile) + >>> sorted(npzfile.files) + ['x', 'y'] + >>> npzfile['x'] + array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + """ + _savez(file, args, kwds, False, allow_pickle=allow_pickle) + + +def _savez_compressed_dispatcher(file, *args, allow_pickle=True, **kwds): + yield from args + yield from kwds.values() + + +@array_function_dispatch(_savez_compressed_dispatcher) +def savez_compressed(file, *args, allow_pickle=True, **kwds): + """ + Save several arrays into a single file in compressed ``.npz`` format. + + Provide arrays as keyword arguments to store them under the + corresponding name in the output file: ``savez_compressed(fn, x=x, y=y)``. + + If arrays are specified as positional arguments, i.e., + ``savez_compressed(fn, x, y)``, their names will be `arr_0`, `arr_1`, etc. + + Parameters + ---------- + file : file, str, or pathlib.Path + Either the filename (string) or an open file (file-like object) + where the data will be saved. If file is a string or a Path, the + ``.npz`` extension will be appended to the filename if it is not + already there. + args : Arguments, optional + Arrays to save to the file. Please use keyword arguments (see + `kwds` below) to assign names to arrays. Arrays specified as + args will be named "arr_0", "arr_1", and so on. + allow_pickle : bool, optional + Allow saving object arrays using Python pickles. Reasons for + disallowing pickles include security (loading pickled data can execute + arbitrary code) and portability (pickled objects may not be loadable + on different Python installations, for example if the stored objects + require libraries that are not available, and not all pickled data is + compatible between different versions of Python). + Default: True + kwds : Keyword arguments, optional + Arrays to save to the file. Each array will be saved to the + output file with its corresponding keyword name. + + Returns + ------- + None + + See Also + -------- + numpy.save : Save a single array to a binary file in NumPy format. + numpy.savetxt : Save an array to a file as plain text. + numpy.savez : Save several arrays into an uncompressed ``.npz`` file format + numpy.load : Load the files created by savez_compressed. + + Notes + ----- + The ``.npz`` file format is a zipped archive of files named after the + variables they contain. The archive is compressed with + ``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable + in ``.npy`` format. For a description of the ``.npy`` format, see + :py:mod:`numpy.lib.format`. + + + When opening the saved ``.npz`` file with `load` a `~lib.npyio.NpzFile` + object is returned. This is a dictionary-like object which can be queried + for its list of arrays (with the ``.files`` attribute), and for the arrays + themselves. + + Examples + -------- + >>> import numpy as np + >>> test_array = np.random.rand(3, 2) + >>> test_vector = np.random.rand(4) + >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector) + >>> loaded = np.load('/tmp/123.npz') + >>> print(np.array_equal(test_array, loaded['a'])) + True + >>> print(np.array_equal(test_vector, loaded['b'])) + True + + """ + _savez(file, args, kwds, True, allow_pickle=allow_pickle) + + +def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None): + # Import is postponed to here since zipfile depends on gzip, an optional + # component of the so-called standard library. + import zipfile + + if not hasattr(file, 'write'): + file = os.fspath(file) + if not file.endswith('.npz'): + file = file + '.npz' + + namedict = kwds + for i, val in enumerate(args): + key = 'arr_%d' % i + if key in namedict.keys(): + raise ValueError( + f"Cannot use un-named variables and keyword {key}") + namedict[key] = val + + if compress: + compression = zipfile.ZIP_DEFLATED + else: + compression = zipfile.ZIP_STORED + + zipf = zipfile_factory(file, mode="w", compression=compression) + try: + for key, val in namedict.items(): + fname = key + '.npy' + val = np.asanyarray(val) + # always force zip64, gh-10776 + with zipf.open(fname, 'w', force_zip64=True) as fid: + format.write_array(fid, val, + allow_pickle=allow_pickle, + pickle_kwargs=pickle_kwargs) + finally: + zipf.close() + + +def _ensure_ndmin_ndarray_check_param(ndmin): + """Just checks if the param ndmin is supported on + _ensure_ndmin_ndarray. It is intended to be used as + verification before running anything expensive. + e.g. loadtxt, genfromtxt + """ + # Check correctness of the values of `ndmin` + if ndmin not in [0, 1, 2]: + raise ValueError(f"Illegal value of ndmin keyword: {ndmin}") + +def _ensure_ndmin_ndarray(a, *, ndmin: int): + """This is a helper function of loadtxt and genfromtxt to ensure + proper minimum dimension as requested + + ndim : int. Supported values 1, 2, 3 + ^^ whenever this changes, keep in sync with + _ensure_ndmin_ndarray_check_param + """ + # Verify that the array has at least dimensions `ndmin`. + # Tweak the size and shape of the arrays - remove extraneous dimensions + if a.ndim > ndmin: + a = np.squeeze(a) + # and ensure we have the minimum number of dimensions asked for + # - has to be in this order for the odd case ndmin=1, a.squeeze().ndim=0 + if a.ndim < ndmin: + if ndmin == 1: + a = np.atleast_1d(a) + elif ndmin == 2: + a = np.atleast_2d(a).T + + return a + + +# amount of lines loadtxt reads in one chunk, can be overridden for testing +_loadtxt_chunksize = 50000 + + +def _check_nonneg_int(value, name="argument"): + try: + operator.index(value) + except TypeError: + raise TypeError(f"{name} must be an integer") from None + if value < 0: + raise ValueError(f"{name} must be nonnegative") + + +def _preprocess_comments(iterable, comments, encoding): + """ + Generator that consumes a line iterated iterable and strips out the + multiple (or multi-character) comments from lines. + This is a pre-processing step to achieve feature parity with loadtxt + (we assume that this feature is a nieche feature). + """ + for line in iterable: + if isinstance(line, bytes): + # Need to handle conversion here, or the splitting would fail + line = line.decode(encoding) + + for c in comments: + line = line.split(c, 1)[0] + + yield line + + +# The number of rows we read in one go if confronted with a parametric dtype +_loadtxt_chunksize = 50000 + + +def _read(fname, *, delimiter=',', comment='#', quote='"', + imaginary_unit='j', usecols=None, skiplines=0, + max_rows=None, converters=None, ndmin=None, unpack=False, + dtype=np.float64, encoding=None): + r""" + Read a NumPy array from a text file. + This is a helper function for loadtxt. + + Parameters + ---------- + fname : file, str, or pathlib.Path + The filename or the file to be read. + delimiter : str, optional + Field delimiter of the fields in line of the file. + Default is a comma, ','. If None any sequence of whitespace is + considered a delimiter. + comment : str or sequence of str or None, optional + Character that begins a comment. All text from the comment + character to the end of the line is ignored. + Multiple comments or multiple-character comment strings are supported, + but may be slower and `quote` must be empty if used. + Use None to disable all use of comments. + quote : str or None, optional + Character that is used to quote string fields. Default is '"' + (a double quote). Use None to disable quote support. + imaginary_unit : str, optional + Character that represent the imaginary unit `sqrt(-1)`. + Default is 'j'. + usecols : array_like, optional + A one-dimensional array of integer column numbers. These are the + columns from the file to be included in the array. If this value + is not given, all the columns are used. + skiplines : int, optional + Number of lines to skip before interpreting the data in the file. + max_rows : int, optional + Maximum number of rows of data to read. Default is to read the + entire file. + converters : dict or callable, optional + A function to parse all columns strings into the desired value, or + a dictionary mapping column number to a parser function. + E.g. if column 0 is a date string: ``converters = {0: datestr2num}``. + Converters can also be used to provide a default value for missing + data, e.g. ``converters = lambda s: float(s.strip() or 0)`` will + convert empty fields to 0. + Default: None + ndmin : int, optional + Minimum dimension of the array returned. + Allowed values are 0, 1 or 2. Default is 0. + unpack : bool, optional + If True, the returned array is transposed, so that arguments may be + unpacked using ``x, y, z = read(...)``. When used with a structured + data-type, arrays are returned for each field. Default is False. + dtype : numpy data type + A NumPy dtype instance, can be a structured dtype to map to the + columns of the file. + encoding : str, optional + Encoding used to decode the inputfile. The special value 'bytes' + (the default) enables backwards-compatible behavior for `converters`, + ensuring that inputs to the converter functions are encoded + bytes objects. The special value 'bytes' has no additional effect if + ``converters=None``. If encoding is ``'bytes'`` or ``None``, the + default system encoding is used. + + Returns + ------- + ndarray + NumPy array. + """ + # Handle special 'bytes' keyword for encoding + byte_converters = False + if encoding == 'bytes': + encoding = None + byte_converters = True + + if dtype is None: + raise TypeError("a dtype must be provided.") + dtype = np.dtype(dtype) + + read_dtype_via_object_chunks = None + if dtype.kind in 'SUM' and dtype in { + np.dtype("S0"), np.dtype("U0"), np.dtype("M8"), np.dtype("m8")}: + # This is a legacy "flexible" dtype. We do not truly support + # parametric dtypes currently (no dtype discovery step in the core), + # but have to support these for backward compatibility. + read_dtype_via_object_chunks = dtype + dtype = np.dtype(object) + + if usecols is not None: + # Allow usecols to be a single int or a sequence of ints, the C-code + # handles the rest + try: + usecols = list(usecols) + except TypeError: + usecols = [usecols] + + _ensure_ndmin_ndarray_check_param(ndmin) + + if comment is None: + comments = None + else: + # assume comments are a sequence of strings + if "" in comment: + raise ValueError( + "comments cannot be an empty string. Use comments=None to " + "disable comments." + ) + comments = tuple(comment) + comment = None + if len(comments) == 0: + comments = None # No comments at all + elif len(comments) == 1: + # If there is only one comment, and that comment has one character, + # the normal parsing can deal with it just fine. + if isinstance(comments[0], str) and len(comments[0]) == 1: + comment = comments[0] + comments = None + # Input validation if there are multiple comment characters + elif delimiter in comments: + raise TypeError( + f"Comment characters '{comments}' cannot include the " + f"delimiter '{delimiter}'" + ) + + # comment is now either a 1 or 0 character string or a tuple: + if comments is not None: + # Note: An earlier version support two character comments (and could + # have been extended to multiple characters, we assume this is + # rare enough to not optimize for. + if quote is not None: + raise ValueError( + "when multiple comments or a multi-character comment is " + "given, quotes are not supported. In this case quotechar " + "must be set to None.") + + if len(imaginary_unit) != 1: + raise ValueError('len(imaginary_unit) must be 1.') + + _check_nonneg_int(skiplines) + if max_rows is not None: + _check_nonneg_int(max_rows) + else: + # Passing -1 to the C code means "read the entire file". + max_rows = -1 + + fh_closing_ctx = contextlib.nullcontext() + filelike = False + try: + if isinstance(fname, os.PathLike): + fname = os.fspath(fname) + if isinstance(fname, str): + fh = np.lib._datasource.open(fname, 'rt', encoding=encoding) + if encoding is None: + encoding = getattr(fh, 'encoding', 'latin1') + + fh_closing_ctx = contextlib.closing(fh) + data = fh + filelike = True + else: + if encoding is None: + encoding = getattr(fname, 'encoding', 'latin1') + data = iter(fname) + except TypeError as e: + raise ValueError( + f"fname must be a string, filehandle, list of strings,\n" + f"or generator. Got {type(fname)} instead.") from e + + with fh_closing_ctx: + if comments is not None: + if filelike: + data = iter(data) + filelike = False + data = _preprocess_comments(data, comments, encoding) + + if read_dtype_via_object_chunks is None: + arr = _load_from_filelike( + data, delimiter=delimiter, comment=comment, quote=quote, + imaginary_unit=imaginary_unit, + usecols=usecols, skiplines=skiplines, max_rows=max_rows, + converters=converters, dtype=dtype, + encoding=encoding, filelike=filelike, + byte_converters=byte_converters) + + else: + # This branch reads the file into chunks of object arrays and then + # casts them to the desired actual dtype. This ensures correct + # string-length and datetime-unit discovery (like `arr.astype()`). + # Due to chunking, certain error reports are less clear, currently. + if filelike: + data = iter(data) # cannot chunk when reading from file + filelike = False + + c_byte_converters = False + if read_dtype_via_object_chunks == "S": + c_byte_converters = True # Use latin1 rather than ascii + + chunks = [] + while max_rows != 0: + if max_rows < 0: + chunk_size = _loadtxt_chunksize + else: + chunk_size = min(_loadtxt_chunksize, max_rows) + + next_arr = _load_from_filelike( + data, delimiter=delimiter, comment=comment, quote=quote, + imaginary_unit=imaginary_unit, + usecols=usecols, skiplines=skiplines, max_rows=chunk_size, + converters=converters, dtype=dtype, + encoding=encoding, filelike=filelike, + byte_converters=byte_converters, + c_byte_converters=c_byte_converters) + # Cast here already. We hope that this is better even for + # large files because the storage is more compact. It could + # be adapted (in principle the concatenate could cast). + chunks.append(next_arr.astype(read_dtype_via_object_chunks)) + + skiplines = 0 # Only have to skip for first chunk + if max_rows >= 0: + max_rows -= chunk_size + if len(next_arr) < chunk_size: + # There was less data than requested, so we are done. + break + + # Need at least one chunk, but if empty, the last one may have + # the wrong shape. + if len(chunks) > 1 and len(chunks[-1]) == 0: + del chunks[-1] + if len(chunks) == 1: + arr = chunks[0] + else: + arr = np.concatenate(chunks, axis=0) + + # NOTE: ndmin works as advertised for structured dtypes, but normally + # these would return a 1D result plus the structured dimension, + # so ndmin=2 adds a third dimension even when no squeezing occurs. + # A `squeeze=False` could be a better solution (pandas uses squeeze). + arr = _ensure_ndmin_ndarray(arr, ndmin=ndmin) + + if arr.shape: + if arr.shape[0] == 0: + warnings.warn( + f'loadtxt: input contained no data: "{fname}"', + category=UserWarning, + stacklevel=3 + ) + + if unpack: + # Unpack structured dtypes if requested: + dt = arr.dtype + if dt.names is not None: + # For structured arrays, return an array for each field. + return [arr[field] for field in dt.names] + else: + return arr.T + else: + return arr + + +@finalize_array_function_like +@set_module('numpy') +def loadtxt(fname, dtype=float, comments='#', delimiter=None, + converters=None, skiprows=0, usecols=None, unpack=False, + ndmin=0, encoding=None, max_rows=None, *, quotechar=None, + like=None): + r""" + Load data from a text file. + + Parameters + ---------- + fname : file, str, pathlib.Path, list of str, generator + File, filename, list, or generator to read. If the filename + extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note + that generators must return bytes or strings. The strings + in a list or produced by a generator are treated as lines. + dtype : data-type, optional + Data-type of the resulting array; default: float. If this is a + structured data-type, the resulting array will be 1-dimensional, and + each row will be interpreted as an element of the array. In this + case, the number of columns used must match the number of fields in + the data-type. + comments : str or sequence of str or None, optional + The characters or list of characters used to indicate the start of a + comment. None implies no comments. For backwards compatibility, byte + strings will be decoded as 'latin1'. The default is '#'. + delimiter : str, optional + The character used to separate the values. For backwards compatibility, + byte strings will be decoded as 'latin1'. The default is whitespace. + + .. versionchanged:: 1.23.0 + Only single character delimiters are supported. Newline characters + cannot be used as the delimiter. + + converters : dict or callable, optional + Converter functions to customize value parsing. If `converters` is + callable, the function is applied to all columns, else it must be a + dict that maps column number to a parser function. + See examples for further details. + Default: None. + + .. versionchanged:: 1.23.0 + The ability to pass a single callable to be applied to all columns + was added. + + skiprows : int, optional + Skip the first `skiprows` lines, including comments; default: 0. + usecols : int or sequence, optional + Which columns to read, with 0 being the first. For example, + ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. + The default, None, results in all columns being read. + unpack : bool, optional + If True, the returned array is transposed, so that arguments may be + unpacked using ``x, y, z = loadtxt(...)``. When used with a + structured data-type, arrays are returned for each field. + Default is False. + ndmin : int, optional + The returned array will have at least `ndmin` dimensions. + Otherwise mono-dimensional axes will be squeezed. + Legal values: 0 (default), 1 or 2. + encoding : str, optional + Encoding used to decode the inputfile. Does not apply to input streams. + The special value 'bytes' enables backward compatibility workarounds + that ensures you receive byte arrays as results if possible and passes + 'latin1' encoded strings to converters. Override this value to receive + unicode arrays and pass strings as input to converters. If set to None + the system default is used. The default value is None. + + .. versionchanged:: 2.0 + Before NumPy 2, the default was ``'bytes'`` for Python 2 + compatibility. The default is now ``None``. + + max_rows : int, optional + Read `max_rows` rows of content after `skiprows` lines. The default is + to read all the rows. Note that empty rows containing no data such as + empty lines and comment lines are not counted towards `max_rows`, + while such lines are counted in `skiprows`. + + .. versionchanged:: 1.23.0 + Lines containing no data, including comment lines (e.g., lines + starting with '#' or as specified via `comments`) are not counted + towards `max_rows`. + quotechar : unicode character or None, optional + The character used to denote the start and end of a quoted item. + Occurrences of the delimiter or comment characters are ignored within + a quoted item. The default value is ``quotechar=None``, which means + quoting support is disabled. + + If two consecutive instances of `quotechar` are found within a quoted + field, the first is treated as an escape character. See examples. + + .. versionadded:: 1.23.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Data read from the text file. + + See Also + -------- + load, fromstring, fromregex + genfromtxt : Load data with missing values handled as specified. + scipy.io.loadmat : reads MATLAB data files + + Notes + ----- + This function aims to be a fast reader for simply formatted files. The + `genfromtxt` function provides more sophisticated handling of, e.g., + lines with missing values. + + Each row in the input text file must have the same number of values to be + able to read all values. If all rows do not have same number of values, a + subset of up to n columns (where n is the least number of values present + in all rows) can be read by specifying the columns via `usecols`. + + The strings produced by the Python float.hex method can be used as + input for floats. + + Examples + -------- + >>> import numpy as np + >>> from io import StringIO # StringIO behaves like a file object + >>> c = StringIO("0 1\n2 3") + >>> np.loadtxt(c) + array([[0., 1.], + [2., 3.]]) + + >>> d = StringIO("M 21 72\nF 35 58") + >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'), + ... 'formats': ('S1', 'i4', 'f4')}) + array([(b'M', 21, 72.), (b'F', 35, 58.)], + dtype=[('gender', 'S1'), ('age', '>> c = StringIO("1,0,2\n3,0,4") + >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True) + >>> x + array([1., 3.]) + >>> y + array([2., 4.]) + + The `converters` argument is used to specify functions to preprocess the + text prior to parsing. `converters` can be a dictionary that maps + preprocessing functions to each column: + + >>> s = StringIO("1.618, 2.296\n3.141, 4.669\n") + >>> conv = { + ... 0: lambda x: np.floor(float(x)), # conversion fn for column 0 + ... 1: lambda x: np.ceil(float(x)), # conversion fn for column 1 + ... } + >>> np.loadtxt(s, delimiter=",", converters=conv) + array([[1., 3.], + [3., 5.]]) + + `converters` can be a callable instead of a dictionary, in which case it + is applied to all columns: + + >>> s = StringIO("0xDE 0xAD\n0xC0 0xDE") + >>> import functools + >>> conv = functools.partial(int, base=16) + >>> np.loadtxt(s, converters=conv) + array([[222., 173.], + [192., 222.]]) + + This example shows how `converters` can be used to convert a field + with a trailing minus sign into a negative number. + + >>> s = StringIO("10.01 31.25-\n19.22 64.31\n17.57- 63.94") + >>> def conv(fld): + ... return -float(fld[:-1]) if fld.endswith("-") else float(fld) + ... + >>> np.loadtxt(s, converters=conv) + array([[ 10.01, -31.25], + [ 19.22, 64.31], + [-17.57, 63.94]]) + + Using a callable as the converter can be particularly useful for handling + values with different formatting, e.g. floats with underscores: + + >>> s = StringIO("1 2.7 100_000") + >>> np.loadtxt(s, converters=float) + array([1.e+00, 2.7e+00, 1.e+05]) + + This idea can be extended to automatically handle values specified in + many different formats, such as hex values: + + >>> def conv(val): + ... try: + ... return float(val) + ... except ValueError: + ... return float.fromhex(val) + >>> s = StringIO("1, 2.5, 3_000, 0b4, 0x1.4000000000000p+2") + >>> np.loadtxt(s, delimiter=",", converters=conv) + array([1.0e+00, 2.5e+00, 3.0e+03, 1.8e+02, 5.0e+00]) + + Or a format where the ``-`` sign comes after the number: + + >>> s = StringIO("10.01 31.25-\n19.22 64.31\n17.57- 63.94") + >>> conv = lambda x: -float(x[:-1]) if x.endswith("-") else float(x) + >>> np.loadtxt(s, converters=conv) + array([[ 10.01, -31.25], + [ 19.22, 64.31], + [-17.57, 63.94]]) + + Support for quoted fields is enabled with the `quotechar` parameter. + Comment and delimiter characters are ignored when they appear within a + quoted item delineated by `quotechar`: + + >>> s = StringIO('"alpha, #42", 10.0\n"beta, #64", 2.0\n') + >>> dtype = np.dtype([("label", "U12"), ("value", float)]) + >>> np.loadtxt(s, dtype=dtype, delimiter=",", quotechar='"') + array([('alpha, #42', 10.), ('beta, #64', 2.)], + dtype=[('label', '>> s = StringIO('"alpha, #42" 10.0\n"beta, #64" 2.0\n') + >>> dtype = np.dtype([("label", "U12"), ("value", float)]) + >>> np.loadtxt(s, dtype=dtype, delimiter=None, quotechar='"') + array([('alpha, #42', 10.), ('beta, #64', 2.)], + dtype=[('label', '>> s = StringIO('"Hello, my name is ""Monty""!"') + >>> np.loadtxt(s, dtype="U", delimiter=",", quotechar='"') + array('Hello, my name is "Monty"!', dtype='>> d = StringIO("1 2\n2 4\n3 9 12\n4 16 20") + >>> np.loadtxt(d, usecols=(0, 1)) + array([[ 1., 2.], + [ 2., 4.], + [ 3., 9.], + [ 4., 16.]]) + + """ + + if like is not None: + return _loadtxt_with_like( + like, fname, dtype=dtype, comments=comments, delimiter=delimiter, + converters=converters, skiprows=skiprows, usecols=usecols, + unpack=unpack, ndmin=ndmin, encoding=encoding, + max_rows=max_rows + ) + + if isinstance(delimiter, bytes): + delimiter.decode("latin1") + + if dtype is None: + dtype = np.float64 + + comment = comments + # Control character type conversions for Py3 convenience + if comment is not None: + if isinstance(comment, (str, bytes)): + comment = [comment] + comment = [ + x.decode('latin1') if isinstance(x, bytes) else x for x in comment] + if isinstance(delimiter, bytes): + delimiter = delimiter.decode('latin1') + + arr = _read(fname, dtype=dtype, comment=comment, delimiter=delimiter, + converters=converters, skiplines=skiprows, usecols=usecols, + unpack=unpack, ndmin=ndmin, encoding=encoding, + max_rows=max_rows, quote=quotechar) + + return arr + + +_loadtxt_with_like = array_function_dispatch()(loadtxt) + + +def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None, + header=None, footer=None, comments=None, + encoding=None): + return (X,) + + +@array_function_dispatch(_savetxt_dispatcher) +def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='', + footer='', comments='# ', encoding=None): + """ + Save an array to a text file. + + Parameters + ---------- + fname : filename, file handle or pathlib.Path + If the filename ends in ``.gz``, the file is automatically saved in + compressed gzip format. `loadtxt` understands gzipped files + transparently. + X : 1D or 2D array_like + Data to be saved to a text file. + fmt : str or sequence of strs, optional + A single format (%10.5f), a sequence of formats, or a + multi-format string, e.g. 'Iteration %d -- %10.5f', in which + case `delimiter` is ignored. For complex `X`, the legal options + for `fmt` are: + + * a single specifier, ``fmt='%.4e'``, resulting in numbers formatted + like ``' (%s+%sj)' % (fmt, fmt)`` + * a full string specifying every real and imaginary part, e.g. + ``' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'`` for 3 columns + * a list of specifiers, one per column - in this case, the real + and imaginary part must have separate specifiers, + e.g. ``['%.3e + %.3ej', '(%.15e%+.15ej)']`` for 2 columns + delimiter : str, optional + String or character separating columns. + newline : str, optional + String or character separating lines. + header : str, optional + String that will be written at the beginning of the file. + footer : str, optional + String that will be written at the end of the file. + comments : str, optional + String that will be prepended to the ``header`` and ``footer`` strings, + to mark them as comments. Default: '# ', as expected by e.g. + ``numpy.loadtxt``. + encoding : {None, str}, optional + Encoding used to encode the outputfile. Does not apply to output + streams. If the encoding is something other than 'bytes' or 'latin1' + you will not be able to load the file in NumPy versions < 1.14. Default + is 'latin1'. + + See Also + -------- + save : Save an array to a binary file in NumPy ``.npy`` format + savez : Save several arrays into an uncompressed ``.npz`` archive + savez_compressed : Save several arrays into a compressed ``.npz`` archive + + Notes + ----- + Further explanation of the `fmt` parameter + (``%[flag]width[.precision]specifier``): + + flags: + ``-`` : left justify + + ``+`` : Forces to precede result with + or -. + + ``0`` : Left pad the number with zeros instead of space (see width). + + width: + Minimum number of characters to be printed. The value is not truncated + if it has more characters. + + precision: + - For integer specifiers (eg. ``d,i,o,x``), the minimum number of + digits. + - For ``e, E`` and ``f`` specifiers, the number of digits to print + after the decimal point. + - For ``g`` and ``G``, the maximum number of significant digits. + - For ``s``, the maximum number of characters. + + specifiers: + ``c`` : character + + ``d`` or ``i`` : signed decimal integer + + ``e`` or ``E`` : scientific notation with ``e`` or ``E``. + + ``f`` : decimal floating point + + ``g,G`` : use the shorter of ``e,E`` or ``f`` + + ``o`` : signed octal + + ``s`` : string of characters + + ``u`` : unsigned decimal integer + + ``x,X`` : unsigned hexadecimal integer + + This explanation of ``fmt`` is not complete, for an exhaustive + specification see [1]_. + + References + ---------- + .. [1] `Format Specification Mini-Language + `_, + Python Documentation. + + Examples + -------- + >>> import numpy as np + >>> x = y = z = np.arange(0.0,5.0,1.0) + >>> np.savetxt('test.out', x, delimiter=',') # X is an array + >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays + >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation + + """ + + class WriteWrap: + """Convert to bytes on bytestream inputs. + + """ + def __init__(self, fh, encoding): + self.fh = fh + self.encoding = encoding + self.do_write = self.first_write + + def close(self): + self.fh.close() + + def write(self, v): + self.do_write(v) + + def write_bytes(self, v): + if isinstance(v, bytes): + self.fh.write(v) + else: + self.fh.write(v.encode(self.encoding)) + + def write_normal(self, v): + self.fh.write(asunicode(v)) + + def first_write(self, v): + try: + self.write_normal(v) + self.write = self.write_normal + except TypeError: + # input is probably a bytestream + self.write_bytes(v) + self.write = self.write_bytes + + own_fh = False + if isinstance(fname, os.PathLike): + fname = os.fspath(fname) + if _is_string_like(fname): + # datasource doesn't support creating a new file ... + open(fname, 'wt').close() + fh = np.lib._datasource.open(fname, 'wt', encoding=encoding) + own_fh = True + elif hasattr(fname, 'write'): + # wrap to handle byte output streams + fh = WriteWrap(fname, encoding or 'latin1') + else: + raise ValueError('fname must be a string or file handle') + + try: + X = np.asarray(X) + + # Handle 1-dimensional arrays + if X.ndim == 0 or X.ndim > 2: + raise ValueError( + "Expected 1D or 2D array, got %dD array instead" % X.ndim) + elif X.ndim == 1: + # Common case -- 1d array of numbers + if X.dtype.names is None: + X = np.atleast_2d(X).T + ncol = 1 + + # Complex dtype -- each field indicates a separate column + else: + ncol = len(X.dtype.names) + else: + ncol = X.shape[1] + + iscomplex_X = np.iscomplexobj(X) + # `fmt` can be a string with multiple insertion points or a + # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d') + if type(fmt) in (list, tuple): + if len(fmt) != ncol: + raise AttributeError(f'fmt has wrong shape. {str(fmt)}') + format = delimiter.join(fmt) + elif isinstance(fmt, str): + n_fmt_chars = fmt.count('%') + error = ValueError(f'fmt has wrong number of % formats: {fmt}') + if n_fmt_chars == 1: + if iscomplex_X: + fmt = [f' ({fmt}+{fmt}j)', ] * ncol + else: + fmt = [fmt, ] * ncol + format = delimiter.join(fmt) + elif iscomplex_X and n_fmt_chars != (2 * ncol): + raise error + elif ((not iscomplex_X) and n_fmt_chars != ncol): + raise error + else: + format = fmt + else: + raise ValueError(f'invalid fmt: {fmt!r}') + + if len(header) > 0: + header = header.replace('\n', '\n' + comments) + fh.write(comments + header + newline) + if iscomplex_X: + for row in X: + row2 = [] + for number in row: + row2.extend((number.real, number.imag)) + s = format % tuple(row2) + newline + fh.write(s.replace('+-', '-')) + else: + for row in X: + try: + v = format % tuple(row) + newline + except TypeError as e: + raise TypeError("Mismatch between array dtype ('%s') and " + "format specifier ('%s')" + % (str(X.dtype), format)) from e + fh.write(v) + + if len(footer) > 0: + footer = footer.replace('\n', '\n' + comments) + fh.write(comments + footer + newline) + finally: + if own_fh: + fh.close() + + +@set_module('numpy') +def fromregex(file, regexp, dtype, encoding=None): + r""" + Construct an array from a text file, using regular expression parsing. + + The returned array is always a structured array, and is constructed from + all matches of the regular expression in the file. Groups in the regular + expression are converted to fields of the structured array. + + Parameters + ---------- + file : file, str, or pathlib.Path + Filename or file object to read. + + .. versionchanged:: 1.22.0 + Now accepts `os.PathLike` implementations. + + regexp : str or regexp + Regular expression used to parse the file. + Groups in the regular expression correspond to fields in the dtype. + dtype : dtype or list of dtypes + Dtype for the structured array; must be a structured datatype. + encoding : str, optional + Encoding used to decode the inputfile. Does not apply to input streams. + + Returns + ------- + output : ndarray + The output array, containing the part of the content of `file` that + was matched by `regexp`. `output` is always a structured array. + + Raises + ------ + TypeError + When `dtype` is not a valid dtype for a structured array. + + See Also + -------- + fromstring, loadtxt + + Notes + ----- + Dtypes for structured arrays can be specified in several forms, but all + forms specify at least the data type and field name. For details see + `basics.rec`. + + Examples + -------- + >>> import numpy as np + >>> from io import StringIO + >>> text = StringIO("1312 foo\n1534 bar\n444 qux") + + >>> regexp = r"(\d+)\s+(...)" # match [digits, whitespace, anything] + >>> output = np.fromregex(text, regexp, + ... [('num', np.int64), ('key', 'S3')]) + >>> output + array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')], + dtype=[('num', '>> output['num'] + array([1312, 1534, 444]) + + """ + own_fh = False + if not hasattr(file, "read"): + file = os.fspath(file) + file = np.lib._datasource.open(file, 'rt', encoding=encoding) + own_fh = True + + try: + if not isinstance(dtype, np.dtype): + dtype = np.dtype(dtype) + if dtype.names is None: + raise TypeError('dtype must be a structured datatype.') + + content = file.read() + if isinstance(content, bytes) and isinstance(regexp, str): + regexp = asbytes(regexp) + + if not hasattr(regexp, 'match'): + regexp = re.compile(regexp) + seq = regexp.findall(content) + if seq and not isinstance(seq[0], tuple): + # Only one group is in the regexp. + # Create the new array as a single data-type and then + # re-interpret as a single-field structured array. + newdtype = np.dtype(dtype[dtype.names[0]]) + output = np.array(seq, dtype=newdtype) + output.dtype = dtype + else: + output = np.array(seq, dtype=dtype) + + return output + finally: + if own_fh: + file.close() + + +#####-------------------------------------------------------------------------- +#---- --- ASCII functions --- +#####-------------------------------------------------------------------------- + + +@finalize_array_function_like +@set_module('numpy') +def genfromtxt(fname, dtype=float, comments='#', delimiter=None, + skip_header=0, skip_footer=0, converters=None, + missing_values=None, filling_values=None, usecols=None, + names=None, excludelist=None, + deletechars=''.join(sorted(NameValidator.defaultdeletechars)), # noqa: B008 + replace_space='_', autostrip=False, case_sensitive=True, + defaultfmt="f%i", unpack=None, usemask=False, loose=True, + invalid_raise=True, max_rows=None, encoding=None, + *, ndmin=0, like=None): + """ + Load data from a text file, with missing values handled as specified. + + Each line past the first `skip_header` lines is split at the `delimiter` + character, and characters following the `comments` character are discarded. + + Parameters + ---------- + fname : file, str, pathlib.Path, list of str, generator + File, filename, list, or generator to read. If the filename + extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note + that generators must return bytes or strings. The strings + in a list or produced by a generator are treated as lines. + dtype : dtype, optional + Data type of the resulting array. + If None, the dtypes will be determined by the contents of each + column, individually. + comments : str, optional + The character used to indicate the start of a comment. + All the characters occurring on a line after a comment are discarded. + delimiter : str, int, or sequence, optional + The string used to separate values. By default, any consecutive + whitespaces act as delimiter. An integer or sequence of integers + can also be provided as width(s) of each field. + skiprows : int, optional + `skiprows` was removed in numpy 1.10. Please use `skip_header` instead. + skip_header : int, optional + The number of lines to skip at the beginning of the file. + skip_footer : int, optional + The number of lines to skip at the end of the file. + converters : variable, optional + The set of functions that convert the data of a column to a value. + The converters can also be used to provide a default value + for missing data: ``converters = {3: lambda s: float(s or 0)}``. + missing : variable, optional + `missing` was removed in numpy 1.10. Please use `missing_values` + instead. + missing_values : variable, optional + The set of strings corresponding to missing data. + filling_values : variable, optional + The set of values to be used as default when the data are missing. + usecols : sequence, optional + Which columns to read, with 0 being the first. For example, + ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns. + names : {None, True, str, sequence}, optional + If `names` is True, the field names are read from the first line after + the first `skip_header` lines. This line can optionally be preceded + by a comment delimiter. Any content before the comment delimiter is + discarded. If `names` is a sequence or a single-string of + comma-separated names, the names will be used to define the field + names in a structured dtype. If `names` is None, the names of the + dtype fields will be used, if any. + excludelist : sequence, optional + A list of names to exclude. This list is appended to the default list + ['return','file','print']. Excluded names are appended with an + underscore: for example, `file` would become `file_`. + deletechars : str, optional + A string combining invalid characters that must be deleted from the + names. + defaultfmt : str, optional + A format used to define default field names, such as "f%i" or "f_%02i". + autostrip : bool, optional + Whether to automatically strip white spaces from the variables. + replace_space : char, optional + Character(s) used in replacement of white spaces in the variable + names. By default, use a '_'. + case_sensitive : {True, False, 'upper', 'lower'}, optional + If True, field names are case sensitive. + If False or 'upper', field names are converted to upper case. + If 'lower', field names are converted to lower case. + unpack : bool, optional + If True, the returned array is transposed, so that arguments may be + unpacked using ``x, y, z = genfromtxt(...)``. When used with a + structured data-type, arrays are returned for each field. + Default is False. + usemask : bool, optional + If True, return a masked array. + If False, return a regular array. + loose : bool, optional + If True, do not raise errors for invalid values. + invalid_raise : bool, optional + If True, an exception is raised if an inconsistency is detected in the + number of columns. + If False, a warning is emitted and the offending lines are skipped. + max_rows : int, optional + The maximum number of rows to read. Must not be used with skip_footer + at the same time. If given, the value must be at least 1. Default is + to read the entire file. + encoding : str, optional + Encoding used to decode the inputfile. Does not apply when `fname` + is a file object. The special value 'bytes' enables backward + compatibility workarounds that ensure that you receive byte arrays + when possible and passes latin1 encoded strings to converters. + Override this value to receive unicode arrays and pass strings + as input to converters. If set to None the system default is used. + The default value is 'bytes'. + + .. versionchanged:: 2.0 + Before NumPy 2, the default was ``'bytes'`` for Python 2 + compatibility. The default is now ``None``. + + ndmin : int, optional + Same parameter as `loadtxt` + + .. versionadded:: 1.23.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + out : ndarray + Data read from the text file. If `usemask` is True, this is a + masked array. + + See Also + -------- + numpy.loadtxt : equivalent function when no data is missing. + + Notes + ----- + * When spaces are used as delimiters, or when no delimiter has been given + as input, there should not be any missing data between two fields. + * When variables are named (either by a flexible dtype or with a `names` + sequence), there must not be any header in the file (else a ValueError + exception is raised). + * Individual values are not stripped of spaces by default. + When using a custom converter, make sure the function does remove spaces. + * Custom converters may receive unexpected values due to dtype + discovery. + + References + ---------- + .. [1] NumPy User Guide, section `I/O with NumPy + `_. + + Examples + -------- + >>> from io import StringIO + >>> import numpy as np + + Comma delimited file with mixed dtype + + >>> s = StringIO("1,1.3,abcde") + >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'), + ... ('mystring','S5')], delimiter=",") + >>> data + array((1, 1.3, b'abcde'), + dtype=[('myint', '>> _ = s.seek(0) # needed for StringIO example only + >>> data = np.genfromtxt(s, dtype=None, + ... names = ['myint','myfloat','mystring'], delimiter=",") + >>> data + array((1, 1.3, 'abcde'), + dtype=[('myint', '>> _ = s.seek(0) + >>> data = np.genfromtxt(s, dtype="i8,f8,S5", + ... names=['myint','myfloat','mystring'], delimiter=",") + >>> data + array((1, 1.3, b'abcde'), + dtype=[('myint', '>> s = StringIO("11.3abcde") + >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'], + ... delimiter=[1,3,5]) + >>> data + array((1, 1.3, 'abcde'), + dtype=[('intvar', '>> f = StringIO(''' + ... text,# of chars + ... hello world,11 + ... numpy,5''') + >>> np.genfromtxt(f, dtype='S12,S12', delimiter=',') + array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')], + dtype=[('f0', 'S12'), ('f1', 'S12')]) + + """ + + if like is not None: + return _genfromtxt_with_like( + like, fname, dtype=dtype, comments=comments, delimiter=delimiter, + skip_header=skip_header, skip_footer=skip_footer, + converters=converters, missing_values=missing_values, + filling_values=filling_values, usecols=usecols, names=names, + excludelist=excludelist, deletechars=deletechars, + replace_space=replace_space, autostrip=autostrip, + case_sensitive=case_sensitive, defaultfmt=defaultfmt, + unpack=unpack, usemask=usemask, loose=loose, + invalid_raise=invalid_raise, max_rows=max_rows, encoding=encoding, + ndmin=ndmin, + ) + + _ensure_ndmin_ndarray_check_param(ndmin) + + if max_rows is not None: + if skip_footer: + raise ValueError( + "The keywords 'skip_footer' and 'max_rows' can not be " + "specified at the same time.") + if max_rows < 1: + raise ValueError("'max_rows' must be at least 1.") + + if usemask: + from numpy.ma import MaskedArray, make_mask_descr + # Check the input dictionary of converters + user_converters = converters or {} + if not isinstance(user_converters, dict): + raise TypeError( + "The input argument 'converter' should be a valid dictionary " + "(got '%s' instead)" % type(user_converters)) + + if encoding == 'bytes': + encoding = None + byte_converters = True + else: + byte_converters = False + + # Initialize the filehandle, the LineSplitter and the NameValidator + if isinstance(fname, os.PathLike): + fname = os.fspath(fname) + if isinstance(fname, str): + fid = np.lib._datasource.open(fname, 'rt', encoding=encoding) + fid_ctx = contextlib.closing(fid) + else: + fid = fname + fid_ctx = contextlib.nullcontext(fid) + try: + fhd = iter(fid) + except TypeError as e: + raise TypeError( + "fname must be a string, a filehandle, a sequence of strings,\n" + f"or an iterator of strings. Got {type(fname)} instead." + ) from e + with fid_ctx: + split_line = LineSplitter(delimiter=delimiter, comments=comments, + autostrip=autostrip, encoding=encoding) + validate_names = NameValidator(excludelist=excludelist, + deletechars=deletechars, + case_sensitive=case_sensitive, + replace_space=replace_space) + + # Skip the first `skip_header` rows + try: + for i in range(skip_header): + next(fhd) + + # Keep on until we find the first valid values + first_values = None + + while not first_values: + first_line = _decode_line(next(fhd), encoding) + if (names is True) and (comments is not None): + if comments in first_line: + first_line = ( + ''.join(first_line.split(comments)[1:])) + first_values = split_line(first_line) + except StopIteration: + # return an empty array if the datafile is empty + first_line = '' + first_values = [] + warnings.warn( + f'genfromtxt: Empty input file: "{fname}"', stacklevel=2 + ) + + # Should we take the first values as names ? + if names is True: + fval = first_values[0].strip() + if comments is not None: + if fval in comments: + del first_values[0] + + # Check the columns to use: make sure `usecols` is a list + if usecols is not None: + try: + usecols = [_.strip() for _ in usecols.split(",")] + except AttributeError: + try: + usecols = list(usecols) + except TypeError: + usecols = [usecols, ] + nbcols = len(usecols or first_values) + + # Check the names and overwrite the dtype.names if needed + if names is True: + names = validate_names([str(_.strip()) for _ in first_values]) + first_line = '' + elif _is_string_like(names): + names = validate_names([_.strip() for _ in names.split(',')]) + elif names: + names = validate_names(names) + # Get the dtype + if dtype is not None: + dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names, + excludelist=excludelist, + deletechars=deletechars, + case_sensitive=case_sensitive, + replace_space=replace_space) + # Make sure the names is a list (for 2.5) + if names is not None: + names = list(names) + + if usecols: + for (i, current) in enumerate(usecols): + # if usecols is a list of names, convert to a list of indices + if _is_string_like(current): + usecols[i] = names.index(current) + elif current < 0: + usecols[i] = current + len(first_values) + # If the dtype is not None, make sure we update it + if (dtype is not None) and (len(dtype) > nbcols): + descr = dtype.descr + dtype = np.dtype([descr[_] for _ in usecols]) + names = list(dtype.names) + # If `names` is not None, update the names + elif (names is not None) and (len(names) > nbcols): + names = [names[_] for _ in usecols] + elif (names is not None) and (dtype is not None): + names = list(dtype.names) + + # Process the missing values ............................... + # Rename missing_values for convenience + user_missing_values = missing_values or () + if isinstance(user_missing_values, bytes): + user_missing_values = user_missing_values.decode('latin1') + + # Define the list of missing_values (one column: one list) + missing_values = [[''] for _ in range(nbcols)] + + # We have a dictionary: process it field by field + if isinstance(user_missing_values, dict): + # Loop on the items + for (key, val) in user_missing_values.items(): + # Is the key a string ? + if _is_string_like(key): + try: + # Transform it into an integer + key = names.index(key) + except ValueError: + # We couldn't find it: the name must have been dropped + continue + # Redefine the key as needed if it's a column number + if usecols: + try: + key = usecols.index(key) + except ValueError: + pass + # Transform the value as a list of string + if isinstance(val, (list, tuple)): + val = [str(_) for _ in val] + else: + val = [str(val), ] + # Add the value(s) to the current list of missing + if key is None: + # None acts as default + for miss in missing_values: + miss.extend(val) + else: + missing_values[key].extend(val) + # We have a sequence : each item matches a column + elif isinstance(user_missing_values, (list, tuple)): + for (value, entry) in zip(user_missing_values, missing_values): + value = str(value) + if value not in entry: + entry.append(value) + # We have a string : apply it to all entries + elif isinstance(user_missing_values, str): + user_value = user_missing_values.split(",") + for entry in missing_values: + entry.extend(user_value) + # We have something else: apply it to all entries + else: + for entry in missing_values: + entry.extend([str(user_missing_values)]) + + # Process the filling_values ............................... + # Rename the input for convenience + user_filling_values = filling_values + if user_filling_values is None: + user_filling_values = [] + # Define the default + filling_values = [None] * nbcols + # We have a dictionary : update each entry individually + if isinstance(user_filling_values, dict): + for (key, val) in user_filling_values.items(): + if _is_string_like(key): + try: + # Transform it into an integer + key = names.index(key) + except ValueError: + # We couldn't find it: the name must have been dropped + continue + # Redefine the key if it's a column number + # and usecols is defined + if usecols: + try: + key = usecols.index(key) + except ValueError: + pass + # Add the value to the list + filling_values[key] = val + # We have a sequence : update on a one-to-one basis + elif isinstance(user_filling_values, (list, tuple)): + n = len(user_filling_values) + if (n <= nbcols): + filling_values[:n] = user_filling_values + else: + filling_values = user_filling_values[:nbcols] + # We have something else : use it for all entries + else: + filling_values = [user_filling_values] * nbcols + + # Initialize the converters ................................ + if dtype is None: + # Note: we can't use a [...]*nbcols, as we would have 3 times + # the same converter, instead of 3 different converters. + converters = [ + StringConverter(None, missing_values=miss, default=fill) + for (miss, fill) in zip(missing_values, filling_values) + ] + else: + dtype_flat = flatten_dtype(dtype, flatten_base=True) + # Initialize the converters + if len(dtype_flat) > 1: + # Flexible type : get a converter from each dtype + zipit = zip(dtype_flat, missing_values, filling_values) + converters = [StringConverter(dt, + locked=True, + missing_values=miss, + default=fill) + for (dt, miss, fill) in zipit] + else: + # Set to a default converter (but w/ different missing values) + zipit = zip(missing_values, filling_values) + converters = [StringConverter(dtype, + locked=True, + missing_values=miss, + default=fill) + for (miss, fill) in zipit] + # Update the converters to use the user-defined ones + uc_update = [] + for (j, conv) in user_converters.items(): + # If the converter is specified by column names, + # use the index instead + if _is_string_like(j): + try: + j = names.index(j) + i = j + except ValueError: + continue + elif usecols: + try: + i = usecols.index(j) + except ValueError: + # Unused converter specified + continue + else: + i = j + # Find the value to test - first_line is not filtered by usecols: + if len(first_line): + testing_value = first_values[j] + else: + testing_value = None + if conv is bytes: + user_conv = asbytes + elif byte_converters: + # Converters may use decode to workaround numpy's old + # behavior, so encode the string again before passing + # to the user converter. + def tobytes_first(x, conv): + if type(x) is bytes: + return conv(x) + return conv(x.encode("latin1")) + user_conv = functools.partial(tobytes_first, conv=conv) + else: + user_conv = conv + converters[i].update(user_conv, locked=True, + testing_value=testing_value, + default=filling_values[i], + missing_values=missing_values[i],) + uc_update.append((i, user_conv)) + # Make sure we have the corrected keys in user_converters... + user_converters.update(uc_update) + + # Fixme: possible error as following variable never used. + # miss_chars = [_.missing_values for _ in converters] + + # Initialize the output lists ... + # ... rows + rows = [] + append_to_rows = rows.append + # ... masks + if usemask: + masks = [] + append_to_masks = masks.append + # ... invalid + invalid = [] + append_to_invalid = invalid.append + + # Parse each line + for (i, line) in enumerate(itertools.chain([first_line, ], fhd)): + values = split_line(line) + nbvalues = len(values) + # Skip an empty line + if nbvalues == 0: + continue + if usecols: + # Select only the columns we need + try: + values = [values[_] for _ in usecols] + except IndexError: + append_to_invalid((i + skip_header + 1, nbvalues)) + continue + elif nbvalues != nbcols: + append_to_invalid((i + skip_header + 1, nbvalues)) + continue + # Store the values + append_to_rows(tuple(values)) + if usemask: + append_to_masks(tuple(v.strip() in m + for (v, m) in zip(values, + missing_values))) + if len(rows) == max_rows: + break + + # Upgrade the converters (if needed) + if dtype is None: + for (i, converter) in enumerate(converters): + current_column = [itemgetter(i)(_m) for _m in rows] + try: + converter.iterupgrade(current_column) + except ConverterLockError: + errmsg = f"Converter #{i} is locked and cannot be upgraded: " + current_column = map(itemgetter(i), rows) + for (j, value) in enumerate(current_column): + try: + converter.upgrade(value) + except (ConverterError, ValueError): + line_number = j + 1 + skip_header + errmsg += f"(occurred line #{line_number} for value '{value}')" + raise ConverterError(errmsg) + + # Check that we don't have invalid values + nbinvalid = len(invalid) + if nbinvalid > 0: + nbrows = len(rows) + nbinvalid - skip_footer + # Construct the error message + template = f" Line #%i (got %i columns instead of {nbcols})" + if skip_footer > 0: + nbinvalid_skipped = len([_ for _ in invalid + if _[0] > nbrows + skip_header]) + invalid = invalid[:nbinvalid - nbinvalid_skipped] + skip_footer -= nbinvalid_skipped +# +# nbrows -= skip_footer +# errmsg = [template % (i, nb) +# for (i, nb) in invalid if i < nbrows] +# else: + errmsg = [template % (i, nb) + for (i, nb) in invalid] + if len(errmsg): + errmsg.insert(0, "Some errors were detected !") + errmsg = "\n".join(errmsg) + # Raise an exception ? + if invalid_raise: + raise ValueError(errmsg) + # Issue a warning ? + else: + warnings.warn(errmsg, ConversionWarning, stacklevel=2) + + # Strip the last skip_footer data + if skip_footer > 0: + rows = rows[:-skip_footer] + if usemask: + masks = masks[:-skip_footer] + + # Convert each value according to the converter: + # We want to modify the list in place to avoid creating a new one... + if loose: + rows = list( + zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)] + for (i, conv) in enumerate(converters)])) + else: + rows = list( + zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)] + for (i, conv) in enumerate(converters)])) + + # Reset the dtype + data = rows + if dtype is None: + # Get the dtypes from the types of the converters + column_types = [conv.type for conv in converters] + # Find the columns with strings... + strcolidx = [i for (i, v) in enumerate(column_types) + if v == np.str_] + + if byte_converters and strcolidx: + # convert strings back to bytes for backward compatibility + warnings.warn( + "Reading unicode strings without specifying the encoding " + "argument is deprecated. Set the encoding, use None for the " + "system default.", + np.exceptions.VisibleDeprecationWarning, stacklevel=2) + + def encode_unicode_cols(row_tup): + row = list(row_tup) + for i in strcolidx: + row[i] = row[i].encode('latin1') + return tuple(row) + + try: + data = [encode_unicode_cols(r) for r in data] + except UnicodeEncodeError: + pass + else: + for i in strcolidx: + column_types[i] = np.bytes_ + + # Update string types to be the right length + sized_column_types = column_types.copy() + for i, col_type in enumerate(column_types): + if np.issubdtype(col_type, np.character): + n_chars = max(len(row[i]) for row in data) + sized_column_types[i] = (col_type, n_chars) + + if names is None: + # If the dtype is uniform (before sizing strings) + base = { + c_type + for c, c_type in zip(converters, column_types) + if c._checked} + if len(base) == 1: + uniform_type, = base + (ddtype, mdtype) = (uniform_type, bool) + else: + ddtype = [(defaultfmt % i, dt) + for (i, dt) in enumerate(sized_column_types)] + if usemask: + mdtype = [(defaultfmt % i, bool) + for (i, dt) in enumerate(sized_column_types)] + else: + ddtype = list(zip(names, sized_column_types)) + mdtype = list(zip(names, [bool] * len(sized_column_types))) + output = np.array(data, dtype=ddtype) + if usemask: + outputmask = np.array(masks, dtype=mdtype) + else: + # Overwrite the initial dtype names if needed + if names and dtype.names is not None: + dtype.names = names + # Case 1. We have a structured type + if len(dtype_flat) > 1: + # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])] + # First, create the array using a flattened dtype: + # [('a', int), ('b1', int), ('b2', float)] + # Then, view the array using the specified dtype. + if 'O' in (_.char for _ in dtype_flat): + if has_nested_fields(dtype): + raise NotImplementedError( + "Nested fields involving objects are not supported...") + else: + output = np.array(data, dtype=dtype) + else: + rows = np.array(data, dtype=[('', _) for _ in dtype_flat]) + output = rows.view(dtype) + # Now, process the rowmasks the same way + if usemask: + rowmasks = np.array( + masks, dtype=np.dtype([('', bool) for t in dtype_flat])) + # Construct the new dtype + mdtype = make_mask_descr(dtype) + outputmask = rowmasks.view(mdtype) + # Case #2. We have a basic dtype + else: + # We used some user-defined converters + if user_converters: + ishomogeneous = True + descr = [] + for i, ttype in enumerate([conv.type for conv in converters]): + # Keep the dtype of the current converter + if i in user_converters: + ishomogeneous &= (ttype == dtype.type) + if np.issubdtype(ttype, np.character): + ttype = (ttype, max(len(row[i]) for row in data)) + descr.append(('', ttype)) + else: + descr.append(('', dtype)) + # So we changed the dtype ? + if not ishomogeneous: + # We have more than one field + if len(descr) > 1: + dtype = np.dtype(descr) + # We have only one field: drop the name if not needed. + else: + dtype = np.dtype(ttype) + # + output = np.array(data, dtype) + if usemask: + if dtype.names is not None: + mdtype = [(_, bool) for _ in dtype.names] + else: + mdtype = bool + outputmask = np.array(masks, dtype=mdtype) + # Try to take care of the missing data we missed + names = output.dtype.names + if usemask and names: + for (name, conv) in zip(names, converters): + missing_values = [conv(_) for _ in conv.missing_values + if _ != ''] + for mval in missing_values: + outputmask[name] |= (output[name] == mval) + # Construct the final array + if usemask: + output = output.view(MaskedArray) + output._mask = outputmask + + output = _ensure_ndmin_ndarray(output, ndmin=ndmin) + + if unpack: + if names is None: + return output.T + elif len(names) == 1: + # squeeze single-name dtypes too + return output[names[0]] + else: + # For structured arrays with multiple fields, + # return an array for each field. + return [output[field] for field in names] + return output + + +_genfromtxt_with_like = array_function_dispatch()(genfromtxt) + + +def recfromtxt(fname, **kwargs): + """ + Load ASCII data from a file and return it in a record array. + + If ``usemask=False`` a standard `recarray` is returned, + if ``usemask=True`` a MaskedRecords array is returned. + + .. deprecated:: 2.0 + Use `numpy.genfromtxt` instead. + + Parameters + ---------- + fname, kwargs : For a description of input parameters, see `genfromtxt`. + + See Also + -------- + numpy.genfromtxt : generic function + + Notes + ----- + By default, `dtype` is None, which means that the data-type of the output + array will be determined from the data. + + """ + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`recfromtxt` is deprecated, " + "use `numpy.genfromtxt` instead." + "(deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + kwargs.setdefault("dtype", None) + usemask = kwargs.get('usemask', False) + output = genfromtxt(fname, **kwargs) + if usemask: + from numpy.ma.mrecords import MaskedRecords + output = output.view(MaskedRecords) + else: + output = output.view(np.recarray) + return output + + +def recfromcsv(fname, **kwargs): + """ + Load ASCII data stored in a comma-separated file. + + The returned array is a record array (if ``usemask=False``, see + `recarray`) or a masked record array (if ``usemask=True``, + see `ma.mrecords.MaskedRecords`). + + .. deprecated:: 2.0 + Use `numpy.genfromtxt` with comma as `delimiter` instead. + + Parameters + ---------- + fname, kwargs : For a description of input parameters, see `genfromtxt`. + + See Also + -------- + numpy.genfromtxt : generic function to load ASCII data. + + Notes + ----- + By default, `dtype` is None, which means that the data-type of the output + array will be determined from the data. + + """ + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`recfromcsv` is deprecated, " + "use `numpy.genfromtxt` with comma as `delimiter` instead. " + "(deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + # Set default kwargs for genfromtxt as relevant to csv import. + kwargs.setdefault("case_sensitive", "lower") + kwargs.setdefault("names", True) + kwargs.setdefault("delimiter", ",") + kwargs.setdefault("dtype", None) + output = genfromtxt(fname, **kwargs) + + usemask = kwargs.get("usemask", False) + if usemask: + from numpy.ma.mrecords import MaskedRecords + output = output.view(MaskedRecords) + else: + output = output.view(np.recarray) + return output diff --git a/python/numpy/lib/_npyio_impl.pyi b/python/numpy/lib/_npyio_impl.pyi new file mode 100644 index 000000000..40369c55f --- /dev/null +++ b/python/numpy/lib/_npyio_impl.pyi @@ -0,0 +1,301 @@ +import types +import zipfile +from collections.abc import Callable, Collection, Iterable, Iterator, Mapping, Sequence +from re import Pattern +from typing import ( + IO, + Any, + ClassVar, + Generic, + Protocol, + Self, + TypeAlias, + overload, + type_check_only, +) +from typing import Literal as L + +from _typeshed import ( + StrOrBytesPath, + StrPath, + SupportsKeysAndGetItem, + SupportsRead, + SupportsWrite, +) +from typing_extensions import TypeVar, deprecated, override + +import numpy as np +from numpy._core.multiarray import packbits, unpackbits +from numpy._typing import ArrayLike, DTypeLike, NDArray, _DTypeLike, _SupportsArrayFunc +from numpy.ma.mrecords import MaskedRecords + +from ._datasource import DataSource as DataSource + +__all__ = [ + "fromregex", + "genfromtxt", + "load", + "loadtxt", + "packbits", + "save", + "savetxt", + "savez", + "savez_compressed", + "unpackbits", +] + +_T_co = TypeVar("_T_co", covariant=True) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, default=Any, covariant=True) + +_FName: TypeAlias = StrPath | Iterable[str] | Iterable[bytes] +_FNameRead: TypeAlias = StrPath | SupportsRead[str] | SupportsRead[bytes] +_FNameWriteBytes: TypeAlias = StrPath | SupportsWrite[bytes] +_FNameWrite: TypeAlias = _FNameWriteBytes | SupportsWrite[str] + +@type_check_only +class _SupportsReadSeek(SupportsRead[_T_co], Protocol[_T_co]): + def seek(self, offset: int, whence: int, /) -> object: ... + +class BagObj(Generic[_T_co]): + def __init__(self, /, obj: SupportsKeysAndGetItem[str, _T_co]) -> None: ... + def __getattribute__(self, key: str, /) -> _T_co: ... + def __dir__(self) -> list[str]: ... + +class NpzFile(Mapping[str, NDArray[_ScalarT_co]]): + _MAX_REPR_ARRAY_COUNT: ClassVar[int] = 5 + + zip: zipfile.ZipFile + fid: IO[str] | None + files: list[str] + allow_pickle: bool + pickle_kwargs: Mapping[str, Any] | None + f: BagObj[NpzFile[_ScalarT_co]] + + # + def __init__( + self, + /, + fid: IO[Any], + own_fid: bool = False, + allow_pickle: bool = False, + pickle_kwargs: Mapping[str, object] | None = None, + *, + max_header_size: int = 10_000, + ) -> None: ... + def __del__(self) -> None: ... + def __enter__(self) -> Self: ... + def __exit__(self, cls: type[BaseException] | None, e: BaseException | None, tb: types.TracebackType | None, /) -> None: ... + @override + def __len__(self) -> int: ... + @override + def __iter__(self) -> Iterator[str]: ... + @override + def __getitem__(self, key: str, /) -> NDArray[_ScalarT_co]: ... + def close(self) -> None: ... + +# NOTE: Returns a `NpzFile` if file is a zip file; +# returns an `ndarray`/`memmap` otherwise +def load( + file: StrOrBytesPath | _SupportsReadSeek[bytes], + mmap_mode: L["r+", "r", "w+", "c"] | None = None, + allow_pickle: bool = False, + fix_imports: bool = True, + encoding: L["ASCII", "latin1", "bytes"] = "ASCII", + *, + max_header_size: int = 10_000, +) -> Any: ... + +@overload +def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool = True) -> None: ... +@overload +@deprecated("The 'fix_imports' flag is deprecated in NumPy 2.1.") +def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool, fix_imports: bool) -> None: ... +@overload +@deprecated("The 'fix_imports' flag is deprecated in NumPy 2.1.") +def save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool = True, *, fix_imports: bool) -> None: ... + +# +def savez(file: _FNameWriteBytes, *args: ArrayLike, allow_pickle: bool = True, **kwds: ArrayLike) -> None: ... + +# +def savez_compressed(file: _FNameWriteBytes, *args: ArrayLike, allow_pickle: bool = True, **kwds: ArrayLike) -> None: ... + +# File-like objects only have to implement `__iter__` and, +# optionally, `encoding` +@overload +def loadtxt( + fname: _FName, + dtype: None = None, + comments: str | Sequence[str] | None = "#", + delimiter: str | None = None, + converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None, + skiprows: int = 0, + usecols: int | Sequence[int] | None = None, + unpack: bool = False, + ndmin: L[0, 1, 2] = 0, + encoding: str | None = None, + max_rows: int | None = None, + *, + quotechar: str | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[np.float64]: ... +@overload +def loadtxt( + fname: _FName, + dtype: _DTypeLike[_ScalarT], + comments: str | Sequence[str] | None = "#", + delimiter: str | None = None, + converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None, + skiprows: int = 0, + usecols: int | Sequence[int] | None = None, + unpack: bool = False, + ndmin: L[0, 1, 2] = 0, + encoding: str | None = None, + max_rows: int | None = None, + *, + quotechar: str | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def loadtxt( + fname: _FName, + dtype: DTypeLike, + comments: str | Sequence[str] | None = "#", + delimiter: str | None = None, + converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None, + skiprows: int = 0, + usecols: int | Sequence[int] | None = None, + unpack: bool = False, + ndmin: L[0, 1, 2] = 0, + encoding: str | None = None, + max_rows: int | None = None, + *, + quotechar: str | None = None, + like: _SupportsArrayFunc | None = None, +) -> NDArray[Any]: ... + +def savetxt( + fname: _FNameWrite, + X: ArrayLike, + fmt: str | Sequence[str] = "%.18e", + delimiter: str = " ", + newline: str = "\n", + header: str = "", + footer: str = "", + comments: str = "# ", + encoding: str | None = None, +) -> None: ... + +@overload +def fromregex( + file: _FNameRead, + regexp: str | bytes | Pattern[Any], + dtype: _DTypeLike[_ScalarT], + encoding: str | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def fromregex( + file: _FNameRead, + regexp: str | bytes | Pattern[Any], + dtype: DTypeLike, + encoding: str | None = None, +) -> NDArray[Any]: ... + +@overload +def genfromtxt( + fname: _FName, + dtype: None = None, + comments: str = ..., + delimiter: str | int | Iterable[int] | None = ..., + skip_header: int = ..., + skip_footer: int = ..., + converters: Mapping[int | str, Callable[[str], Any]] | None = ..., + missing_values: Any = ..., + filling_values: Any = ..., + usecols: Sequence[int] | None = ..., + names: L[True] | str | Collection[str] | None = ..., + excludelist: Sequence[str] | None = ..., + deletechars: str = ..., + replace_space: str = ..., + autostrip: bool = ..., + case_sensitive: bool | L["upper", "lower"] = ..., + defaultfmt: str = ..., + unpack: bool | None = ..., + usemask: bool = ..., + loose: bool = ..., + invalid_raise: bool = ..., + max_rows: int | None = ..., + encoding: str = ..., + *, + ndmin: L[0, 1, 2] = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... +@overload +def genfromtxt( + fname: _FName, + dtype: _DTypeLike[_ScalarT], + comments: str = ..., + delimiter: str | int | Iterable[int] | None = ..., + skip_header: int = ..., + skip_footer: int = ..., + converters: Mapping[int | str, Callable[[str], Any]] | None = ..., + missing_values: Any = ..., + filling_values: Any = ..., + usecols: Sequence[int] | None = ..., + names: L[True] | str | Collection[str] | None = ..., + excludelist: Sequence[str] | None = ..., + deletechars: str = ..., + replace_space: str = ..., + autostrip: bool = ..., + case_sensitive: bool | L["upper", "lower"] = ..., + defaultfmt: str = ..., + unpack: bool | None = ..., + usemask: bool = ..., + loose: bool = ..., + invalid_raise: bool = ..., + max_rows: int | None = ..., + encoding: str = ..., + *, + ndmin: L[0, 1, 2] = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def genfromtxt( + fname: _FName, + dtype: DTypeLike, + comments: str = ..., + delimiter: str | int | Iterable[int] | None = ..., + skip_header: int = ..., + skip_footer: int = ..., + converters: Mapping[int | str, Callable[[str], Any]] | None = ..., + missing_values: Any = ..., + filling_values: Any = ..., + usecols: Sequence[int] | None = ..., + names: L[True] | str | Collection[str] | None = ..., + excludelist: Sequence[str] | None = ..., + deletechars: str = ..., + replace_space: str = ..., + autostrip: bool = ..., + case_sensitive: bool | L["upper", "lower"] = ..., + defaultfmt: str = ..., + unpack: bool | None = ..., + usemask: bool = ..., + loose: bool = ..., + invalid_raise: bool = ..., + max_rows: int | None = ..., + encoding: str = ..., + *, + ndmin: L[0, 1, 2] = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +@overload +def recfromtxt(fname: _FName, *, usemask: L[False] = False, **kwargs: object) -> np.recarray[Any, np.dtype[np.record]]: ... +@overload +def recfromtxt(fname: _FName, *, usemask: L[True], **kwargs: object) -> MaskedRecords[Any, np.dtype[np.void]]: ... + +@overload +def recfromcsv(fname: _FName, *, usemask: L[False] = False, **kwargs: object) -> np.recarray[Any, np.dtype[np.record]]: ... +@overload +def recfromcsv(fname: _FName, *, usemask: L[True], **kwargs: object) -> MaskedRecords[Any, np.dtype[np.void]]: ... diff --git a/python/numpy/lib/_polynomial_impl.py b/python/numpy/lib/_polynomial_impl.py new file mode 100644 index 000000000..a58ca76ec --- /dev/null +++ b/python/numpy/lib/_polynomial_impl.py @@ -0,0 +1,1465 @@ +""" +Functions to operate on polynomials. + +""" +__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd', + 'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d', + 'polyfit'] + +import functools +import re +import warnings + +import numpy._core.numeric as NX +from numpy._core import ( + abs, + array, + atleast_1d, + dot, + finfo, + hstack, + isscalar, + ones, + overrides, +) +from numpy._utils import set_module +from numpy.exceptions import RankWarning +from numpy.lib._function_base_impl import trim_zeros +from numpy.lib._twodim_base_impl import diag, vander +from numpy.lib._type_check_impl import imag, iscomplex, mintypecode, real +from numpy.linalg import eigvals, inv, lstsq + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +def _poly_dispatcher(seq_of_zeros): + return seq_of_zeros + + +@array_function_dispatch(_poly_dispatcher) +def poly(seq_of_zeros): + """ + Find the coefficients of a polynomial with the given sequence of roots. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Returns the coefficients of the polynomial whose leading coefficient + is one for the given sequence of zeros (multiple roots must be included + in the sequence as many times as their multiplicity; see Examples). + A square matrix (or array, which will be treated as a matrix) can also + be given, in which case the coefficients of the characteristic polynomial + of the matrix are returned. + + Parameters + ---------- + seq_of_zeros : array_like, shape (N,) or (N, N) + A sequence of polynomial roots, or a square array or matrix object. + + Returns + ------- + c : ndarray + 1D array of polynomial coefficients from highest to lowest degree: + + ``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]`` + where c[0] always equals 1. + + Raises + ------ + ValueError + If input is the wrong shape (the input must be a 1-D or square + 2-D array). + + See Also + -------- + polyval : Compute polynomial values. + roots : Return the roots of a polynomial. + polyfit : Least squares polynomial fit. + poly1d : A one-dimensional polynomial class. + + Notes + ----- + Specifying the roots of a polynomial still leaves one degree of + freedom, typically represented by an undetermined leading + coefficient. [1]_ In the case of this function, that coefficient - + the first one in the returned array - is always taken as one. (If + for some reason you have one other point, the only automatic way + presently to leverage that information is to use ``polyfit``.) + + The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n` + matrix **A** is given by + + :math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`, + + where **I** is the `n`-by-`n` identity matrix. [2]_ + + References + ---------- + .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trigonometry, + Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996. + + .. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition," + Academic Press, pg. 182, 1980. + + Examples + -------- + + Given a sequence of a polynomial's zeros: + + >>> import numpy as np + + >>> np.poly((0, 0, 0)) # Multiple root example + array([1., 0., 0., 0.]) + + The line above represents z**3 + 0*z**2 + 0*z + 0. + + >>> np.poly((-1./2, 0, 1./2)) + array([ 1. , 0. , -0.25, 0. ]) + + The line above represents z**3 - z/4 + + >>> np.poly((np.random.random(1)[0], 0, np.random.random(1)[0])) + array([ 1. , -0.77086955, 0.08618131, 0. ]) # random + + Given a square array object: + + >>> P = np.array([[0, 1./3], [-1./2, 0]]) + >>> np.poly(P) + array([1. , 0. , 0.16666667]) + + Note how in all cases the leading coefficient is always 1. + + """ + seq_of_zeros = atleast_1d(seq_of_zeros) + sh = seq_of_zeros.shape + + if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0: + seq_of_zeros = eigvals(seq_of_zeros) + elif len(sh) == 1: + dt = seq_of_zeros.dtype + # Let object arrays slip through, e.g. for arbitrary precision + if dt != object: + seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char)) + else: + raise ValueError("input must be 1d or non-empty square 2d array.") + + if len(seq_of_zeros) == 0: + return 1.0 + dt = seq_of_zeros.dtype + a = ones((1,), dtype=dt) + for zero in seq_of_zeros: + a = NX.convolve(a, array([1, -zero], dtype=dt), mode='full') + + if issubclass(a.dtype.type, NX.complexfloating): + # if complex roots are all complex conjugates, the roots are real. + roots = NX.asarray(seq_of_zeros, complex) + if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())): + a = a.real.copy() + + return a + + +def _roots_dispatcher(p): + return p + + +@array_function_dispatch(_roots_dispatcher) +def roots(p): + """ + Return the roots of a polynomial with coefficients given in p. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + The values in the rank-1 array `p` are coefficients of a polynomial. + If the length of `p` is n+1 then the polynomial is described by:: + + p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n] + + Parameters + ---------- + p : array_like + Rank-1 array of polynomial coefficients. + + Returns + ------- + out : ndarray + An array containing the roots of the polynomial. + + Raises + ------ + ValueError + When `p` cannot be converted to a rank-1 array. + + See also + -------- + poly : Find the coefficients of a polynomial with a given sequence + of roots. + polyval : Compute polynomial values. + polyfit : Least squares polynomial fit. + poly1d : A one-dimensional polynomial class. + + Notes + ----- + The algorithm relies on computing the eigenvalues of the + companion matrix [1]_. + + References + ---------- + .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK: + Cambridge University Press, 1999, pp. 146-7. + + Examples + -------- + >>> import numpy as np + >>> coeff = [3.2, 2, 1] + >>> np.roots(coeff) + array([-0.3125+0.46351241j, -0.3125-0.46351241j]) + + """ + # If input is scalar, this makes it an array + p = atleast_1d(p) + if p.ndim != 1: + raise ValueError("Input must be a rank-1 array.") + + # find non-zero array entries + non_zero = NX.nonzero(NX.ravel(p))[0] + + # Return an empty array if polynomial is all zeros + if len(non_zero) == 0: + return NX.array([]) + + # find the number of trailing zeros -- this is the number of roots at 0. + trailing_zeros = len(p) - non_zero[-1] - 1 + + # strip leading and trailing zeros + p = p[int(non_zero[0]):int(non_zero[-1]) + 1] + + # casting: if incoming array isn't floating point, make it floating point. + if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)): + p = p.astype(float) + + N = len(p) + if N > 1: + # build companion matrix and find its eigenvalues (the roots) + A = diag(NX.ones((N - 2,), p.dtype), -1) + A[0, :] = -p[1:] / p[0] + roots = eigvals(A) + else: + roots = NX.array([]) + + # tack any zeros onto the back of the array + roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype))) + return roots + + +def _polyint_dispatcher(p, m=None, k=None): + return (p,) + + +@array_function_dispatch(_polyint_dispatcher) +def polyint(p, m=1, k=None): + """ + Return an antiderivative (indefinite integral) of a polynomial. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + The returned order `m` antiderivative `P` of polynomial `p` satisfies + :math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1` + integration constants `k`. The constants determine the low-order + polynomial part + + .. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1} + + of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`. + + Parameters + ---------- + p : array_like or poly1d + Polynomial to integrate. + A sequence is interpreted as polynomial coefficients, see `poly1d`. + m : int, optional + Order of the antiderivative. (Default: 1) + k : list of `m` scalars or scalar, optional + Integration constants. They are given in the order of integration: + those corresponding to highest-order terms come first. + + If ``None`` (default), all constants are assumed to be zero. + If `m = 1`, a single scalar can be given instead of a list. + + See Also + -------- + polyder : derivative of a polynomial + poly1d.integ : equivalent method + + Examples + -------- + + The defining property of the antiderivative: + + >>> import numpy as np + + >>> p = np.poly1d([1,1,1]) + >>> P = np.polyint(p) + >>> P + poly1d([ 0.33333333, 0.5 , 1. , 0. ]) # may vary + >>> np.polyder(P) == p + True + + The integration constants default to zero, but can be specified: + + >>> P = np.polyint(p, 3) + >>> P(0) + 0.0 + >>> np.polyder(P)(0) + 0.0 + >>> np.polyder(P, 2)(0) + 0.0 + >>> P = np.polyint(p, 3, k=[6,5,3]) + >>> P + poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) # may vary + + Note that 3 = 6 / 2!, and that the constants are given in the order of + integrations. Constant of the highest-order polynomial term comes first: + + >>> np.polyder(P, 2)(0) + 6.0 + >>> np.polyder(P, 1)(0) + 5.0 + >>> P(0) + 3.0 + + """ + m = int(m) + if m < 0: + raise ValueError("Order of integral must be positive (see polyder)") + if k is None: + k = NX.zeros(m, float) + k = atleast_1d(k) + if len(k) == 1 and m > 1: + k = k[0] * NX.ones(m, float) + if len(k) < m: + raise ValueError( + "k must be a scalar or a rank-1 array of length 1 or >m.") + + truepoly = isinstance(p, poly1d) + p = NX.asarray(p) + if m == 0: + if truepoly: + return poly1d(p) + return p + else: + # Note: this must work also with object and integer arrays + y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]])) + val = polyint(y, m - 1, k=k[1:]) + if truepoly: + return poly1d(val) + return val + + +def _polyder_dispatcher(p, m=None): + return (p,) + + +@array_function_dispatch(_polyder_dispatcher) +def polyder(p, m=1): + """ + Return the derivative of the specified order of a polynomial. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Parameters + ---------- + p : poly1d or sequence + Polynomial to differentiate. + A sequence is interpreted as polynomial coefficients, see `poly1d`. + m : int, optional + Order of differentiation (default: 1) + + Returns + ------- + der : poly1d + A new polynomial representing the derivative. + + See Also + -------- + polyint : Anti-derivative of a polynomial. + poly1d : Class for one-dimensional polynomials. + + Examples + -------- + + The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is: + + >>> import numpy as np + + >>> p = np.poly1d([1,1,1,1]) + >>> p2 = np.polyder(p) + >>> p2 + poly1d([3, 2, 1]) + + which evaluates to: + + >>> p2(2.) + 17.0 + + We can verify this, approximating the derivative with + ``(f(x + h) - f(x))/h``: + + >>> (p(2. + 0.001) - p(2.)) / 0.001 + 17.007000999997857 + + The fourth-order derivative of a 3rd-order polynomial is zero: + + >>> np.polyder(p, 2) + poly1d([6, 2]) + >>> np.polyder(p, 3) + poly1d([6]) + >>> np.polyder(p, 4) + poly1d([0]) + + """ + m = int(m) + if m < 0: + raise ValueError("Order of derivative must be positive (see polyint)") + + truepoly = isinstance(p, poly1d) + p = NX.asarray(p) + n = len(p) - 1 + y = p[:-1] * NX.arange(n, 0, -1) + if m == 0: + val = p + else: + val = polyder(y, m - 1) + if truepoly: + val = poly1d(val) + return val + + +def _polyfit_dispatcher(x, y, deg, rcond=None, full=None, w=None, cov=None): + return (x, y, w) + + +@array_function_dispatch(_polyfit_dispatcher) +def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): + """ + Least squares polynomial fit. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg` + to points `(x, y)`. Returns a vector of coefficients `p` that minimises + the squared error in the order `deg`, `deg-1`, ... `0`. + + The `Polynomial.fit ` class + method is recommended for new code as it is more stable numerically. See + the documentation of the method for more information. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int + Degree of the fitting polynomial + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (M,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + cov : bool or str, optional + If given and not `False`, return not just the estimate but also its + covariance matrix. By default, the covariance are scaled by + chi2/dof, where dof = M - (deg + 1), i.e., the weights are presumed + to be unreliable except in a relative sense and everything is scaled + such that the reduced chi2 is unity. This scaling is omitted if + ``cov='unscaled'``, as is relevant for the case that the weights are + w = 1/sigma, with sigma known to be a reliable estimate of the + uncertainty. + + Returns + ------- + p : ndarray, shape (deg + 1,) or (deg + 1, K) + Polynomial coefficients, highest power first. If `y` was 2-D, the + coefficients for `k`-th data set are in ``p[:,k]``. + + residuals, rank, singular_values, rcond + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the effective rank of the scaled Vandermonde + coefficient matrix + - singular_values -- singular values of the scaled Vandermonde + coefficient matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + V : ndarray, shape (deg + 1, deg + 1) or (deg + 1, deg + 1, K) + Present only if ``full == False`` and ``cov == True``. The covariance + matrix of the polynomial coefficient estimates. The diagonal of + this matrix are the variance estimates for each coefficient. If y + is a 2-D array, then the covariance matrix for the `k`-th data set + are in ``V[:,:,k]`` + + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full == False``. + + The warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.exceptions.RankWarning) + + See Also + -------- + polyval : Compute polynomial values. + linalg.lstsq : Computes a least-squares fit. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution minimizes the squared error + + .. math:: + E = \\sum_{j=0}^k |p(x_j) - y_j|^2 + + in the equations:: + + x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0] + x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1] + ... + x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k] + + The coefficient matrix of the coefficients `p` is a Vandermonde matrix. + + `polyfit` issues a `~exceptions.RankWarning` when the least-squares fit is + badly conditioned. This implies that the best fit is not well-defined due + to numerical error. The results may be improved by lowering the polynomial + degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter + can also be set to a value smaller than its default, but the resulting + fit may be spurious: including contributions from the small singular + values can add numerical noise to the result. + + Note that fitting polynomial coefficients is inherently badly conditioned + when the degree of the polynomial is large or the interval of sample points + is badly centered. The quality of the fit should always be checked in these + cases. When polynomial fits are not satisfactory, splines may be a good + alternative. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + .. [2] Wikipedia, "Polynomial interpolation", + https://en.wikipedia.org/wiki/Polynomial_interpolation + + Examples + -------- + >>> import numpy as np + >>> import warnings + >>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0]) + >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0]) + >>> z = np.polyfit(x, y, 3) + >>> z + array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) # may vary + + It is convenient to use `poly1d` objects for dealing with polynomials: + + >>> p = np.poly1d(z) + >>> p(0.5) + 0.6143849206349179 # may vary + >>> p(3.5) + -0.34732142857143039 # may vary + >>> p(10) + 22.579365079365115 # may vary + + High-order polynomials may oscillate wildly: + + >>> with warnings.catch_warnings(): + ... warnings.simplefilter('ignore', np.exceptions.RankWarning) + ... p30 = np.poly1d(np.polyfit(x, y, 30)) + ... + >>> p30(4) + -0.80000000000000204 # may vary + >>> p30(5) + -0.99999999999999445 # may vary + >>> p30(4.5) + -0.10547061179440398 # may vary + + Illustration: + + >>> import matplotlib.pyplot as plt + >>> xp = np.linspace(-2, 6, 100) + >>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--') + >>> plt.ylim(-2,2) + (-2, 2) + >>> plt.show() + + """ + order = int(deg) + 1 + x = NX.asarray(x) + 0.0 + y = NX.asarray(y) + 0.0 + + # check arguments. + if deg < 0: + raise ValueError("expected deg >= 0") + if x.ndim != 1: + raise TypeError("expected 1D vector for x") + if x.size == 0: + raise TypeError("expected non-empty vector for x") + if y.ndim < 1 or y.ndim > 2: + raise TypeError("expected 1D or 2D array for y") + if x.shape[0] != y.shape[0]: + raise TypeError("expected x and y to have same length") + + # set rcond + if rcond is None: + rcond = len(x) * finfo(x.dtype).eps + + # set up least squares equation for powers of x + lhs = vander(x, order) + rhs = y + + # apply weighting + if w is not None: + w = NX.asarray(w) + 0.0 + if w.ndim != 1: + raise TypeError("expected a 1-d array for weights") + if w.shape[0] != y.shape[0]: + raise TypeError("expected w and y to have the same length") + lhs *= w[:, NX.newaxis] + if rhs.ndim == 2: + rhs *= w[:, NX.newaxis] + else: + rhs *= w + + # scale lhs to improve condition number and solve + scale = NX.sqrt((lhs * lhs).sum(axis=0)) + lhs /= scale + c, resids, rank, s = lstsq(lhs, rhs, rcond) + c = (c.T / scale).T # broadcast scale coefficients + + # warn on rank reduction, which indicates an ill conditioned matrix + if rank != order and not full: + msg = "Polyfit may be poorly conditioned" + warnings.warn(msg, RankWarning, stacklevel=2) + + if full: + return c, resids, rank, s, rcond + elif cov: + Vbase = inv(dot(lhs.T, lhs)) + Vbase /= NX.outer(scale, scale) + if cov == "unscaled": + fac = 1 + else: + if len(x) <= order: + raise ValueError("the number of data points must exceed order " + "to scale the covariance matrix") + # note, this used to be: fac = resids / (len(x) - order - 2.0) + # it was decided that the "- 2" (originally justified by "Bayesian + # uncertainty analysis") is not what the user expects + # (see gh-11196 and gh-11197) + fac = resids / (len(x) - order) + if y.ndim == 1: + return c, Vbase * fac + else: + return c, Vbase[:, :, NX.newaxis] * fac + else: + return c + + +def _polyval_dispatcher(p, x): + return (p, x) + + +@array_function_dispatch(_polyval_dispatcher) +def polyval(p, x): + """ + Evaluate a polynomial at specific values. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + If `p` is of length N, this function returns the value:: + + p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1] + + If `x` is a sequence, then ``p(x)`` is returned for each element of ``x``. + If `x` is another polynomial then the composite polynomial ``p(x(t))`` + is returned. + + Parameters + ---------- + p : array_like or poly1d object + 1D array of polynomial coefficients (including coefficients equal + to zero) from highest degree to the constant term, or an + instance of poly1d. + x : array_like or poly1d object + A number, an array of numbers, or an instance of poly1d, at + which to evaluate `p`. + + Returns + ------- + values : ndarray or poly1d + If `x` is a poly1d instance, the result is the composition of the two + polynomials, i.e., `x` is "substituted" in `p` and the simplified + result is returned. In addition, the type of `x` - array_like or + poly1d - governs the type of the output: `x` array_like => `values` + array_like, `x` a poly1d object => `values` is also. + + See Also + -------- + poly1d: A polynomial class. + + Notes + ----- + Horner's scheme [1]_ is used to evaluate the polynomial. Even so, + for polynomials of high degree the values may be inaccurate due to + rounding errors. Use carefully. + + If `x` is a subtype of `ndarray` the return value will be of the same type. + + References + ---------- + .. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng. + trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand + Reinhold Co., 1985, pg. 720. + + Examples + -------- + >>> import numpy as np + >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1 + 76 + >>> np.polyval([3,0,1], np.poly1d(5)) + poly1d([76]) + >>> np.polyval(np.poly1d([3,0,1]), 5) + 76 + >>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5)) + poly1d([76]) + + """ + p = NX.asarray(p) + if isinstance(x, poly1d): + y = 0 + else: + x = NX.asanyarray(x) + y = NX.zeros_like(x) + for pv in p: + y = y * x + pv + return y + + +def _binary_op_dispatcher(a1, a2): + return (a1, a2) + + +@array_function_dispatch(_binary_op_dispatcher) +def polyadd(a1, a2): + """ + Find the sum of two polynomials. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Returns the polynomial resulting from the sum of two input polynomials. + Each input must be either a poly1d object or a 1D sequence of polynomial + coefficients, from highest to lowest degree. + + Parameters + ---------- + a1, a2 : array_like or poly1d object + Input polynomials. + + Returns + ------- + out : ndarray or poly1d object + The sum of the inputs. If either input is a poly1d object, then the + output is also a poly1d object. Otherwise, it is a 1D array of + polynomial coefficients from highest to lowest degree. + + See Also + -------- + poly1d : A one-dimensional polynomial class. + poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval + + Examples + -------- + >>> import numpy as np + >>> np.polyadd([1, 2], [9, 5, 4]) + array([9, 6, 6]) + + Using poly1d objects: + + >>> p1 = np.poly1d([1, 2]) + >>> p2 = np.poly1d([9, 5, 4]) + >>> print(p1) + 1 x + 2 + >>> print(p2) + 2 + 9 x + 5 x + 4 + >>> print(np.polyadd(p1, p2)) + 2 + 9 x + 6 x + 6 + + """ + truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) + a1 = atleast_1d(a1) + a2 = atleast_1d(a2) + diff = len(a2) - len(a1) + if diff == 0: + val = a1 + a2 + elif diff > 0: + zr = NX.zeros(diff, a1.dtype) + val = NX.concatenate((zr, a1)) + a2 + else: + zr = NX.zeros(abs(diff), a2.dtype) + val = a1 + NX.concatenate((zr, a2)) + if truepoly: + val = poly1d(val) + return val + + +@array_function_dispatch(_binary_op_dispatcher) +def polysub(a1, a2): + """ + Difference (subtraction) of two polynomials. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Given two polynomials `a1` and `a2`, returns ``a1 - a2``. + `a1` and `a2` can be either array_like sequences of the polynomials' + coefficients (including coefficients equal to zero), or `poly1d` objects. + + Parameters + ---------- + a1, a2 : array_like or poly1d + Minuend and subtrahend polynomials, respectively. + + Returns + ------- + out : ndarray or poly1d + Array or `poly1d` object of the difference polynomial's coefficients. + + See Also + -------- + polyval, polydiv, polymul, polyadd + + Examples + -------- + + .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2) + + >>> import numpy as np + + >>> np.polysub([2, 10, -2], [3, 10, -4]) + array([-1, 0, 2]) + + """ + truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) + a1 = atleast_1d(a1) + a2 = atleast_1d(a2) + diff = len(a2) - len(a1) + if diff == 0: + val = a1 - a2 + elif diff > 0: + zr = NX.zeros(diff, a1.dtype) + val = NX.concatenate((zr, a1)) - a2 + else: + zr = NX.zeros(abs(diff), a2.dtype) + val = a1 - NX.concatenate((zr, a2)) + if truepoly: + val = poly1d(val) + return val + + +@array_function_dispatch(_binary_op_dispatcher) +def polymul(a1, a2): + """ + Find the product of two polynomials. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + Finds the polynomial resulting from the multiplication of the two input + polynomials. Each input must be either a poly1d object or a 1D sequence + of polynomial coefficients, from highest to lowest degree. + + Parameters + ---------- + a1, a2 : array_like or poly1d object + Input polynomials. + + Returns + ------- + out : ndarray or poly1d object + The polynomial resulting from the multiplication of the inputs. If + either inputs is a poly1d object, then the output is also a poly1d + object. Otherwise, it is a 1D array of polynomial coefficients from + highest to lowest degree. + + See Also + -------- + poly1d : A one-dimensional polynomial class. + poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval + convolve : Array convolution. Same output as polymul, but has parameter + for overlap mode. + + Examples + -------- + >>> import numpy as np + >>> np.polymul([1, 2, 3], [9, 5, 1]) + array([ 9, 23, 38, 17, 3]) + + Using poly1d objects: + + >>> p1 = np.poly1d([1, 2, 3]) + >>> p2 = np.poly1d([9, 5, 1]) + >>> print(p1) + 2 + 1 x + 2 x + 3 + >>> print(p2) + 2 + 9 x + 5 x + 1 + >>> print(np.polymul(p1, p2)) + 4 3 2 + 9 x + 23 x + 38 x + 17 x + 3 + + """ + truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d)) + a1, a2 = poly1d(a1), poly1d(a2) + val = NX.convolve(a1, a2) + if truepoly: + val = poly1d(val) + return val + + +def _polydiv_dispatcher(u, v): + return (u, v) + + +@array_function_dispatch(_polydiv_dispatcher) +def polydiv(u, v): + """ + Returns the quotient and remainder of polynomial division. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + The input arrays are the coefficients (including any coefficients + equal to zero) of the "numerator" (dividend) and "denominator" + (divisor) polynomials, respectively. + + Parameters + ---------- + u : array_like or poly1d + Dividend polynomial's coefficients. + + v : array_like or poly1d + Divisor polynomial's coefficients. + + Returns + ------- + q : ndarray + Coefficients, including those equal to zero, of the quotient. + r : ndarray + Coefficients, including those equal to zero, of the remainder. + + See Also + -------- + poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub + polyval + + Notes + ----- + Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need + not equal `v.ndim`. In other words, all four possible combinations - + ``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``, + ``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work. + + Examples + -------- + + .. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25 + + >>> import numpy as np + + >>> x = np.array([3.0, 5.0, 2.0]) + >>> y = np.array([2.0, 1.0]) + >>> np.polydiv(x, y) + (array([1.5 , 1.75]), array([0.25])) + + """ + truepoly = (isinstance(u, poly1d) or isinstance(v, poly1d)) + u = atleast_1d(u) + 0.0 + v = atleast_1d(v) + 0.0 + # w has the common type + w = u[0] + v[0] + m = len(u) - 1 + n = len(v) - 1 + scale = 1. / v[0] + q = NX.zeros((max(m - n + 1, 1),), w.dtype) + r = u.astype(w.dtype) + for k in range(m - n + 1): + d = scale * r[k] + q[k] = d + r[k:k + n + 1] -= d * v + while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1): + r = r[1:] + if truepoly: + return poly1d(q), poly1d(r) + return q, r + + +_poly_mat = re.compile(r"\*\*([0-9]*)") +def _raise_power(astr, wrap=70): + n = 0 + line1 = '' + line2 = '' + output = ' ' + while True: + mat = _poly_mat.search(astr, n) + if mat is None: + break + span = mat.span() + power = mat.groups()[0] + partstr = astr[n:span[0]] + n = span[1] + toadd2 = partstr + ' ' * (len(power) - 1) + toadd1 = ' ' * (len(partstr) - 1) + power + if ((len(line2) + len(toadd2) > wrap) or + (len(line1) + len(toadd1) > wrap)): + output += line1 + "\n" + line2 + "\n " + line1 = toadd1 + line2 = toadd2 + else: + line2 += partstr + ' ' * (len(power) - 1) + line1 += ' ' * (len(partstr) - 1) + power + output += line1 + "\n" + line2 + return output + astr[n:] + + +@set_module('numpy') +class poly1d: + """ + A one-dimensional polynomial class. + + .. note:: + This forms part of the old polynomial API. Since version 1.4, the + new polynomial API defined in `numpy.polynomial` is preferred. + A summary of the differences can be found in the + :doc:`transition guide `. + + A convenience class, used to encapsulate "natural" operations on + polynomials so that said operations may take on their customary + form in code (see Examples). + + Parameters + ---------- + c_or_r : array_like + The polynomial's coefficients, in decreasing powers, or if + the value of the second parameter is True, the polynomial's + roots (values where the polynomial evaluates to 0). For example, + ``poly1d([1, 2, 3])`` returns an object that represents + :math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns + one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`. + r : bool, optional + If True, `c_or_r` specifies the polynomial's roots; the default + is False. + variable : str, optional + Changes the variable used when printing `p` from `x` to `variable` + (see Examples). + + Examples + -------- + >>> import numpy as np + + Construct the polynomial :math:`x^2 + 2x + 3`: + + >>> import numpy as np + + >>> p = np.poly1d([1, 2, 3]) + >>> print(np.poly1d(p)) + 2 + 1 x + 2 x + 3 + + Evaluate the polynomial at :math:`x = 0.5`: + + >>> p(0.5) + 4.25 + + Find the roots: + + >>> p.r + array([-1.+1.41421356j, -1.-1.41421356j]) + >>> p(p.r) + array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) # may vary + + These numbers in the previous line represent (0, 0) to machine precision + + Show the coefficients: + + >>> p.c + array([1, 2, 3]) + + Display the order (the leading zero-coefficients are removed): + + >>> p.order + 2 + + Show the coefficient of the k-th power in the polynomial + (which is equivalent to ``p.c[-(i+1)]``): + + >>> p[1] + 2 + + Polynomials can be added, subtracted, multiplied, and divided + (returns quotient and remainder): + + >>> p * p + poly1d([ 1, 4, 10, 12, 9]) + + >>> (p**3 + 4) / p + (poly1d([ 1., 4., 10., 12., 9.]), poly1d([4.])) + + ``asarray(p)`` gives the coefficient array, so polynomials can be + used in all functions that accept arrays: + + >>> p**2 # square of polynomial + poly1d([ 1, 4, 10, 12, 9]) + + >>> np.square(p) # square of individual coefficients + array([1, 4, 9]) + + The variable used in the string representation of `p` can be modified, + using the `variable` parameter: + + >>> p = np.poly1d([1,2,3], variable='z') + >>> print(p) + 2 + 1 z + 2 z + 3 + + Construct a polynomial from its roots: + + >>> np.poly1d([1, 2], True) + poly1d([ 1., -3., 2.]) + + This is the same polynomial as obtained by: + + >>> np.poly1d([1, -1]) * np.poly1d([1, -2]) + poly1d([ 1, -3, 2]) + + """ + __hash__ = None + + @property + def coeffs(self): + """ The polynomial coefficients """ + return self._coeffs + + @coeffs.setter + def coeffs(self, value): + # allowing this makes p.coeffs *= 2 legal + if value is not self._coeffs: + raise AttributeError("Cannot set attribute") + + @property + def variable(self): + """ The name of the polynomial variable """ + return self._variable + + # calculated attributes + @property + def order(self): + """ The order or degree of the polynomial """ + return len(self._coeffs) - 1 + + @property + def roots(self): + """ The roots of the polynomial, where self(x) == 0 """ + return roots(self._coeffs) + + # our internal _coeffs property need to be backed by __dict__['coeffs'] for + # scipy to work correctly. + @property + def _coeffs(self): + return self.__dict__['coeffs'] + + @_coeffs.setter + def _coeffs(self, coeffs): + self.__dict__['coeffs'] = coeffs + + # alias attributes + r = roots + c = coef = coefficients = coeffs + o = order + + def __init__(self, c_or_r, r=False, variable=None): + if isinstance(c_or_r, poly1d): + self._variable = c_or_r._variable + self._coeffs = c_or_r._coeffs + + if set(c_or_r.__dict__) - set(self.__dict__): + msg = ("In the future extra properties will not be copied " + "across when constructing one poly1d from another") + warnings.warn(msg, FutureWarning, stacklevel=2) + self.__dict__.update(c_or_r.__dict__) + + if variable is not None: + self._variable = variable + return + if r: + c_or_r = poly(c_or_r) + c_or_r = atleast_1d(c_or_r) + if c_or_r.ndim > 1: + raise ValueError("Polynomial must be 1d only.") + c_or_r = trim_zeros(c_or_r, trim='f') + if len(c_or_r) == 0: + c_or_r = NX.array([0], dtype=c_or_r.dtype) + self._coeffs = c_or_r + if variable is None: + variable = 'x' + self._variable = variable + + def __array__(self, t=None, copy=None): + if t: + return NX.asarray(self.coeffs, t, copy=copy) + else: + return NX.asarray(self.coeffs, copy=copy) + + def __repr__(self): + vals = repr(self.coeffs) + vals = vals[6:-1] + return f"poly1d({vals})" + + def __len__(self): + return self.order + + def __str__(self): + thestr = "0" + var = self.variable + + # Remove leading zeros + coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)] + N = len(coeffs) - 1 + + def fmt_float(q): + s = f'{q:.4g}' + s = s.removesuffix('.0000') + return s + + for k, coeff in enumerate(coeffs): + if not iscomplex(coeff): + coefstr = fmt_float(real(coeff)) + elif real(coeff) == 0: + coefstr = f'{fmt_float(imag(coeff))}j' + else: + coefstr = f'({fmt_float(real(coeff))} + {fmt_float(imag(coeff))}j)' + + power = (N - k) + if power == 0: + if coefstr != '0': + newstr = f'{coefstr}' + elif k == 0: + newstr = '0' + else: + newstr = '' + elif power == 1: + if coefstr == '0': + newstr = '' + elif coefstr == 'b': + newstr = var + else: + newstr = f'{coefstr} {var}' + elif coefstr == '0': + newstr = '' + elif coefstr == 'b': + newstr = '%s**%d' % (var, power,) + else: + newstr = '%s %s**%d' % (coefstr, var, power) + + if k > 0: + if newstr != '': + if newstr.startswith('-'): + thestr = f"{thestr} - {newstr[1:]}" + else: + thestr = f"{thestr} + {newstr}" + else: + thestr = newstr + return _raise_power(thestr) + + def __call__(self, val): + return polyval(self.coeffs, val) + + def __neg__(self): + return poly1d(-self.coeffs) + + def __pos__(self): + return self + + def __mul__(self, other): + if isscalar(other): + return poly1d(self.coeffs * other) + else: + other = poly1d(other) + return poly1d(polymul(self.coeffs, other.coeffs)) + + def __rmul__(self, other): + if isscalar(other): + return poly1d(other * self.coeffs) + else: + other = poly1d(other) + return poly1d(polymul(self.coeffs, other.coeffs)) + + def __add__(self, other): + other = poly1d(other) + return poly1d(polyadd(self.coeffs, other.coeffs)) + + def __radd__(self, other): + other = poly1d(other) + return poly1d(polyadd(self.coeffs, other.coeffs)) + + def __pow__(self, val): + if not isscalar(val) or int(val) != val or val < 0: + raise ValueError("Power to non-negative integers only.") + res = [1] + for _ in range(val): + res = polymul(self.coeffs, res) + return poly1d(res) + + def __sub__(self, other): + other = poly1d(other) + return poly1d(polysub(self.coeffs, other.coeffs)) + + def __rsub__(self, other): + other = poly1d(other) + return poly1d(polysub(other.coeffs, self.coeffs)) + + def __truediv__(self, other): + if isscalar(other): + return poly1d(self.coeffs / other) + else: + other = poly1d(other) + return polydiv(self, other) + + def __rtruediv__(self, other): + if isscalar(other): + return poly1d(other / self.coeffs) + else: + other = poly1d(other) + return polydiv(other, self) + + def __eq__(self, other): + if not isinstance(other, poly1d): + return NotImplemented + if self.coeffs.shape != other.coeffs.shape: + return False + return (self.coeffs == other.coeffs).all() + + def __ne__(self, other): + if not isinstance(other, poly1d): + return NotImplemented + return not self.__eq__(other) + + def __getitem__(self, val): + ind = self.order - val + if val > self.order: + return self.coeffs.dtype.type(0) + if val < 0: + return self.coeffs.dtype.type(0) + return self.coeffs[ind] + + def __setitem__(self, key, val): + ind = self.order - key + if key < 0: + raise ValueError("Does not support negative powers.") + if key > self.order: + zr = NX.zeros(key - self.order, self.coeffs.dtype) + self._coeffs = NX.concatenate((zr, self.coeffs)) + ind = 0 + self._coeffs[ind] = val + + def __iter__(self): + return iter(self.coeffs) + + def integ(self, m=1, k=0): + """ + Return an antiderivative (indefinite integral) of this polynomial. + + Refer to `polyint` for full documentation. + + See Also + -------- + polyint : equivalent function + + """ + return poly1d(polyint(self.coeffs, m=m, k=k)) + + def deriv(self, m=1): + """ + Return a derivative of this polynomial. + + Refer to `polyder` for full documentation. + + See Also + -------- + polyder : equivalent function + + """ + return poly1d(polyder(self.coeffs, m=m)) + +# Stuff to do on module import + + +warnings.simplefilter('always', RankWarning) diff --git a/python/numpy/lib/_polynomial_impl.pyi b/python/numpy/lib/_polynomial_impl.pyi new file mode 100644 index 000000000..3beece111 --- /dev/null +++ b/python/numpy/lib/_polynomial_impl.pyi @@ -0,0 +1,318 @@ +from typing import ( + Any, + NoReturn, + SupportsIndex, + SupportsInt, + TypeAlias, + TypeVar, + overload, +) +from typing import ( + Literal as L, +) + +import numpy as np +from numpy import ( + complex128, + complexfloating, + float64, + floating, + int32, + int64, + object_, + poly1d, + signedinteger, + unsignedinteger, +) +from numpy._typing import ( + ArrayLike, + NDArray, + _ArrayLikeBool_co, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeObject_co, + _ArrayLikeUInt_co, +) + +_T = TypeVar("_T") + +_2Tup: TypeAlias = tuple[_T, _T] +_5Tup: TypeAlias = tuple[ + _T, + NDArray[float64], + NDArray[int32], + NDArray[float64], + NDArray[float64], +] + +__all__ = [ + "poly", + "roots", + "polyint", + "polyder", + "polyadd", + "polysub", + "polymul", + "polydiv", + "polyval", + "poly1d", + "polyfit", +] + +def poly(seq_of_zeros: ArrayLike) -> NDArray[floating]: ... + +# Returns either a float or complex array depending on the input values. +# See `np.linalg.eigvals`. +def roots(p: ArrayLike) -> NDArray[complexfloating] | NDArray[floating]: ... + +@overload +def polyint( + p: poly1d, + m: SupportsInt | SupportsIndex = ..., + k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = ..., +) -> poly1d: ... +@overload +def polyint( + p: _ArrayLikeFloat_co, + m: SupportsInt | SupportsIndex = ..., + k: _ArrayLikeFloat_co | None = ..., +) -> NDArray[floating]: ... +@overload +def polyint( + p: _ArrayLikeComplex_co, + m: SupportsInt | SupportsIndex = ..., + k: _ArrayLikeComplex_co | None = ..., +) -> NDArray[complexfloating]: ... +@overload +def polyint( + p: _ArrayLikeObject_co, + m: SupportsInt | SupportsIndex = ..., + k: _ArrayLikeObject_co | None = ..., +) -> NDArray[object_]: ... + +@overload +def polyder( + p: poly1d, + m: SupportsInt | SupportsIndex = ..., +) -> poly1d: ... +@overload +def polyder( + p: _ArrayLikeFloat_co, + m: SupportsInt | SupportsIndex = ..., +) -> NDArray[floating]: ... +@overload +def polyder( + p: _ArrayLikeComplex_co, + m: SupportsInt | SupportsIndex = ..., +) -> NDArray[complexfloating]: ... +@overload +def polyder( + p: _ArrayLikeObject_co, + m: SupportsInt | SupportsIndex = ..., +) -> NDArray[object_]: ... + +@overload +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None = ..., + full: L[False] = ..., + w: _ArrayLikeFloat_co | None = ..., + cov: L[False] = ..., +) -> NDArray[float64]: ... +@overload +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None = ..., + full: L[False] = ..., + w: _ArrayLikeFloat_co | None = ..., + cov: L[False] = ..., +) -> NDArray[complex128]: ... +@overload +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + *, + cov: L[True, "unscaled"], +) -> _2Tup[NDArray[float64]]: ... +@overload +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None = None, + full: L[False] = False, + w: _ArrayLikeFloat_co | None = None, + *, + cov: L[True, "unscaled"], +) -> _2Tup[NDArray[complex128]]: ... +@overload +def polyfit( + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None = ..., + full: L[True] = ..., + w: _ArrayLikeFloat_co | None = ..., + cov: bool | L["unscaled"] = ..., +) -> _5Tup[NDArray[float64]]: ... +@overload +def polyfit( + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: SupportsIndex | SupportsInt, + rcond: float | None = ..., + full: L[True] = ..., + w: _ArrayLikeFloat_co | None = ..., + cov: bool | L["unscaled"] = ..., +) -> _5Tup[NDArray[complex128]]: ... + +@overload +def polyval( + p: _ArrayLikeBool_co, + x: _ArrayLikeBool_co, +) -> NDArray[int64]: ... +@overload +def polyval( + p: _ArrayLikeUInt_co, + x: _ArrayLikeUInt_co, +) -> NDArray[unsignedinteger]: ... +@overload +def polyval( + p: _ArrayLikeInt_co, + x: _ArrayLikeInt_co, +) -> NDArray[signedinteger]: ... +@overload +def polyval( + p: _ArrayLikeFloat_co, + x: _ArrayLikeFloat_co, +) -> NDArray[floating]: ... +@overload +def polyval( + p: _ArrayLikeComplex_co, + x: _ArrayLikeComplex_co, +) -> NDArray[complexfloating]: ... +@overload +def polyval( + p: _ArrayLikeObject_co, + x: _ArrayLikeObject_co, +) -> NDArray[object_]: ... + +@overload +def polyadd( + a1: poly1d, + a2: _ArrayLikeComplex_co | _ArrayLikeObject_co, +) -> poly1d: ... +@overload +def polyadd( + a1: _ArrayLikeComplex_co | _ArrayLikeObject_co, + a2: poly1d, +) -> poly1d: ... +@overload +def polyadd( + a1: _ArrayLikeBool_co, + a2: _ArrayLikeBool_co, +) -> NDArray[np.bool]: ... +@overload +def polyadd( + a1: _ArrayLikeUInt_co, + a2: _ArrayLikeUInt_co, +) -> NDArray[unsignedinteger]: ... +@overload +def polyadd( + a1: _ArrayLikeInt_co, + a2: _ArrayLikeInt_co, +) -> NDArray[signedinteger]: ... +@overload +def polyadd( + a1: _ArrayLikeFloat_co, + a2: _ArrayLikeFloat_co, +) -> NDArray[floating]: ... +@overload +def polyadd( + a1: _ArrayLikeComplex_co, + a2: _ArrayLikeComplex_co, +) -> NDArray[complexfloating]: ... +@overload +def polyadd( + a1: _ArrayLikeObject_co, + a2: _ArrayLikeObject_co, +) -> NDArray[object_]: ... + +@overload +def polysub( + a1: poly1d, + a2: _ArrayLikeComplex_co | _ArrayLikeObject_co, +) -> poly1d: ... +@overload +def polysub( + a1: _ArrayLikeComplex_co | _ArrayLikeObject_co, + a2: poly1d, +) -> poly1d: ... +@overload +def polysub( + a1: _ArrayLikeBool_co, + a2: _ArrayLikeBool_co, +) -> NoReturn: ... +@overload +def polysub( + a1: _ArrayLikeUInt_co, + a2: _ArrayLikeUInt_co, +) -> NDArray[unsignedinteger]: ... +@overload +def polysub( + a1: _ArrayLikeInt_co, + a2: _ArrayLikeInt_co, +) -> NDArray[signedinteger]: ... +@overload +def polysub( + a1: _ArrayLikeFloat_co, + a2: _ArrayLikeFloat_co, +) -> NDArray[floating]: ... +@overload +def polysub( + a1: _ArrayLikeComplex_co, + a2: _ArrayLikeComplex_co, +) -> NDArray[complexfloating]: ... +@overload +def polysub( + a1: _ArrayLikeObject_co, + a2: _ArrayLikeObject_co, +) -> NDArray[object_]: ... + +# NOTE: Not an alias, but they do have the same signature (that we can reuse) +polymul = polyadd + +@overload +def polydiv( + u: poly1d, + v: _ArrayLikeComplex_co | _ArrayLikeObject_co, +) -> _2Tup[poly1d]: ... +@overload +def polydiv( + u: _ArrayLikeComplex_co | _ArrayLikeObject_co, + v: poly1d, +) -> _2Tup[poly1d]: ... +@overload +def polydiv( + u: _ArrayLikeFloat_co, + v: _ArrayLikeFloat_co, +) -> _2Tup[NDArray[floating]]: ... +@overload +def polydiv( + u: _ArrayLikeComplex_co, + v: _ArrayLikeComplex_co, +) -> _2Tup[NDArray[complexfloating]]: ... +@overload +def polydiv( + u: _ArrayLikeObject_co, + v: _ArrayLikeObject_co, +) -> _2Tup[NDArray[Any]]: ... diff --git a/python/numpy/lib/_scimath_impl.py b/python/numpy/lib/_scimath_impl.py new file mode 100644 index 000000000..8136a7d54 --- /dev/null +++ b/python/numpy/lib/_scimath_impl.py @@ -0,0 +1,642 @@ +""" +Wrapper functions to more user-friendly calling of certain math functions +whose output data-type is different than the input data-type in certain +domains of the input. + +For example, for functions like `log` with branch cuts, the versions in this +module provide the mathematically valid answers in the complex plane:: + + >>> import math + >>> np.emath.log(-math.exp(1)) == (1+1j*math.pi) + True + +Similarly, `sqrt`, other base logarithms, `power` and trig functions are +correctly handled. See their respective docstrings for specific examples. + +""" +import numpy._core.numeric as nx +import numpy._core.numerictypes as nt +from numpy._core.numeric import any, asarray +from numpy._core.overrides import array_function_dispatch, set_module +from numpy.lib._type_check_impl import isreal + +__all__ = [ + 'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin', + 'arctanh' + ] + + +_ln2 = nx.log(2.0) + + +def _tocomplex(arr): + """Convert its input `arr` to a complex array. + + The input is returned as a complex array of the smallest type that will fit + the original data: types like single, byte, short, etc. become csingle, + while others become cdouble. + + A copy of the input is always made. + + Parameters + ---------- + arr : array + + Returns + ------- + array + An array with the same input data as the input but in complex form. + + Examples + -------- + >>> import numpy as np + + First, consider an input of type short: + + >>> a = np.array([1,2,3],np.short) + + >>> ac = np.lib.scimath._tocomplex(a); ac + array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + + >>> ac.dtype + dtype('complex64') + + If the input is of type double, the output is correspondingly of the + complex double type as well: + + >>> b = np.array([1,2,3],np.double) + + >>> bc = np.lib.scimath._tocomplex(b); bc + array([1.+0.j, 2.+0.j, 3.+0.j]) + + >>> bc.dtype + dtype('complex128') + + Note that even if the input was complex to begin with, a copy is still + made, since the astype() method always copies: + + >>> c = np.array([1,2,3],np.csingle) + + >>> cc = np.lib.scimath._tocomplex(c); cc + array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + + >>> c *= 2; c + array([2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64) + + >>> cc + array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + """ + if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte, + nt.ushort, nt.csingle)): + return arr.astype(nt.csingle) + else: + return arr.astype(nt.cdouble) + + +def _fix_real_lt_zero(x): + """Convert `x` to complex if it has real, negative components. + + Otherwise, output is just the array version of the input (via asarray). + + Parameters + ---------- + x : array_like + + Returns + ------- + array + + Examples + -------- + >>> import numpy as np + >>> np.lib.scimath._fix_real_lt_zero([1,2]) + array([1, 2]) + + >>> np.lib.scimath._fix_real_lt_zero([-1,2]) + array([-1.+0.j, 2.+0.j]) + + """ + x = asarray(x) + if any(isreal(x) & (x < 0)): + x = _tocomplex(x) + return x + + +def _fix_int_lt_zero(x): + """Convert `x` to double if it has real, negative components. + + Otherwise, output is just the array version of the input (via asarray). + + Parameters + ---------- + x : array_like + + Returns + ------- + array + + Examples + -------- + >>> import numpy as np + >>> np.lib.scimath._fix_int_lt_zero([1,2]) + array([1, 2]) + + >>> np.lib.scimath._fix_int_lt_zero([-1,2]) + array([-1., 2.]) + """ + x = asarray(x) + if any(isreal(x) & (x < 0)): + x = x * 1.0 + return x + + +def _fix_real_abs_gt_1(x): + """Convert `x` to complex if it has real components x_i with abs(x_i)>1. + + Otherwise, output is just the array version of the input (via asarray). + + Parameters + ---------- + x : array_like + + Returns + ------- + array + + Examples + -------- + >>> import numpy as np + >>> np.lib.scimath._fix_real_abs_gt_1([0,1]) + array([0, 1]) + + >>> np.lib.scimath._fix_real_abs_gt_1([0,2]) + array([0.+0.j, 2.+0.j]) + """ + x = asarray(x) + if any(isreal(x) & (abs(x) > 1)): + x = _tocomplex(x) + return x + + +def _unary_dispatcher(x): + return (x,) + + +@set_module('numpy.lib.scimath') +@array_function_dispatch(_unary_dispatcher) +def sqrt(x): + """ + Compute the square root of x. + + For negative input elements, a complex value is returned + (unlike `numpy.sqrt` which returns NaN). + + Parameters + ---------- + x : array_like + The input value(s). + + Returns + ------- + out : ndarray or scalar + The square root of `x`. If `x` was a scalar, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.sqrt + + Examples + -------- + For real, non-negative inputs this works just like `numpy.sqrt`: + + >>> import numpy as np + + >>> np.emath.sqrt(1) + 1.0 + >>> np.emath.sqrt([1, 4]) + array([1., 2.]) + + But it automatically handles negative inputs: + + >>> np.emath.sqrt(-1) + 1j + >>> np.emath.sqrt([-1,4]) + array([0.+1.j, 2.+0.j]) + + Different results are expected because: + floating point 0.0 and -0.0 are distinct. + + For more control, explicitly use complex() as follows: + + >>> np.emath.sqrt(complex(-4.0, 0.0)) + 2j + >>> np.emath.sqrt(complex(-4.0, -0.0)) + -2j + """ + x = _fix_real_lt_zero(x) + return nx.sqrt(x) + + +@set_module('numpy.lib.scimath') +@array_function_dispatch(_unary_dispatcher) +def log(x): + """ + Compute the natural logarithm of `x`. + + Return the "principal value" (for a description of this, see `numpy.log`) + of :math:`log_e(x)`. For real `x > 0`, this is a real number (``log(0)`` + returns ``-inf`` and ``log(np.inf)`` returns ``inf``). Otherwise, the + complex principle value is returned. + + Parameters + ---------- + x : array_like + The value(s) whose log is (are) required. + + Returns + ------- + out : ndarray or scalar + The log of the `x` value(s). If `x` was a scalar, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.log + + Notes + ----- + For a log() that returns ``NAN`` when real `x < 0`, use `numpy.log` + (note, however, that otherwise `numpy.log` and this `log` are identical, + i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, and, + notably, the complex principle value if ``x.imag != 0``). + + Examples + -------- + >>> import numpy as np + >>> np.emath.log(np.exp(1)) + 1.0 + + Negative arguments are handled "correctly" (recall that + ``exp(log(x)) == x`` does *not* hold for real ``x < 0``): + + >>> np.emath.log(-np.exp(1)) == (1 + np.pi * 1j) + True + + """ + x = _fix_real_lt_zero(x) + return nx.log(x) + + +@set_module('numpy.lib.scimath') +@array_function_dispatch(_unary_dispatcher) +def log10(x): + """ + Compute the logarithm base 10 of `x`. + + Return the "principal value" (for a description of this, see + `numpy.log10`) of :math:`log_{10}(x)`. For real `x > 0`, this + is a real number (``log10(0)`` returns ``-inf`` and ``log10(np.inf)`` + returns ``inf``). Otherwise, the complex principle value is returned. + + Parameters + ---------- + x : array_like or scalar + The value(s) whose log base 10 is (are) required. + + Returns + ------- + out : ndarray or scalar + The log base 10 of the `x` value(s). If `x` was a scalar, so is `out`, + otherwise an array object is returned. + + See Also + -------- + numpy.log10 + + Notes + ----- + For a log10() that returns ``NAN`` when real `x < 0`, use `numpy.log10` + (note, however, that otherwise `numpy.log10` and this `log10` are + identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, + and, notably, the complex principle value if ``x.imag != 0``). + + Examples + -------- + >>> import numpy as np + + (We set the printing precision so the example can be auto-tested) + + >>> np.set_printoptions(precision=4) + + >>> np.emath.log10(10**1) + 1.0 + + >>> np.emath.log10([-10**1, -10**2, 10**2]) + array([1.+1.3644j, 2.+1.3644j, 2.+0.j ]) + + """ + x = _fix_real_lt_zero(x) + return nx.log10(x) + + +def _logn_dispatcher(n, x): + return (n, x,) + + +@set_module('numpy.lib.scimath') +@array_function_dispatch(_logn_dispatcher) +def logn(n, x): + """ + Take log base n of x. + + If `x` contains negative inputs, the answer is computed and returned in the + complex domain. + + Parameters + ---------- + n : array_like + The integer base(s) in which the log is taken. + x : array_like + The value(s) whose log base `n` is (are) required. + + Returns + ------- + out : ndarray or scalar + The log base `n` of the `x` value(s). If `x` was a scalar, so is + `out`, otherwise an array is returned. + + Examples + -------- + >>> import numpy as np + >>> np.set_printoptions(precision=4) + + >>> np.emath.logn(2, [4, 8]) + array([2., 3.]) + >>> np.emath.logn(2, [-4, -8, 8]) + array([2.+4.5324j, 3.+4.5324j, 3.+0.j ]) + + """ + x = _fix_real_lt_zero(x) + n = _fix_real_lt_zero(n) + return nx.log(x) / nx.log(n) + + +@set_module('numpy.lib.scimath') +@array_function_dispatch(_unary_dispatcher) +def log2(x): + """ + Compute the logarithm base 2 of `x`. + + Return the "principal value" (for a description of this, see + `numpy.log2`) of :math:`log_2(x)`. For real `x > 0`, this is + a real number (``log2(0)`` returns ``-inf`` and ``log2(np.inf)`` returns + ``inf``). Otherwise, the complex principle value is returned. + + Parameters + ---------- + x : array_like + The value(s) whose log base 2 is (are) required. + + Returns + ------- + out : ndarray or scalar + The log base 2 of the `x` value(s). If `x` was a scalar, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.log2 + + Notes + ----- + For a log2() that returns ``NAN`` when real `x < 0`, use `numpy.log2` + (note, however, that otherwise `numpy.log2` and this `log2` are + identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, + and, notably, the complex principle value if ``x.imag != 0``). + + Examples + -------- + + We set the printing precision so the example can be auto-tested: + + >>> np.set_printoptions(precision=4) + + >>> np.emath.log2(8) + 3.0 + >>> np.emath.log2([-4, -8, 8]) + array([2.+4.5324j, 3.+4.5324j, 3.+0.j ]) + + """ + x = _fix_real_lt_zero(x) + return nx.log2(x) + + +def _power_dispatcher(x, p): + return (x, p) + + +@set_module('numpy.lib.scimath') +@array_function_dispatch(_power_dispatcher) +def power(x, p): + """ + Return x to the power p, (x**p). + + If `x` contains negative values, the output is converted to the + complex domain. + + Parameters + ---------- + x : array_like + The input value(s). + p : array_like of ints + The power(s) to which `x` is raised. If `x` contains multiple values, + `p` has to either be a scalar, or contain the same number of values + as `x`. In the latter case, the result is + ``x[0]**p[0], x[1]**p[1], ...``. + + Returns + ------- + out : ndarray or scalar + The result of ``x**p``. If `x` and `p` are scalars, so is `out`, + otherwise an array is returned. + + See Also + -------- + numpy.power + + Examples + -------- + >>> import numpy as np + >>> np.set_printoptions(precision=4) + + >>> np.emath.power(2, 2) + 4 + + >>> np.emath.power([2, 4], 2) + array([ 4, 16]) + + >>> np.emath.power([2, 4], -2) + array([0.25 , 0.0625]) + + >>> np.emath.power([-2, 4], 2) + array([ 4.-0.j, 16.+0.j]) + + >>> np.emath.power([2, 4], [2, 4]) + array([ 4, 256]) + + """ + x = _fix_real_lt_zero(x) + p = _fix_int_lt_zero(p) + return nx.power(x, p) + + +@set_module('numpy.lib.scimath') +@array_function_dispatch(_unary_dispatcher) +def arccos(x): + """ + Compute the inverse cosine of x. + + Return the "principal value" (for a description of this, see + `numpy.arccos`) of the inverse cosine of `x`. For real `x` such that + `abs(x) <= 1`, this is a real number in the closed interval + :math:`[0, \\pi]`. Otherwise, the complex principle value is returned. + + Parameters + ---------- + x : array_like or scalar + The value(s) whose arccos is (are) required. + + Returns + ------- + out : ndarray or scalar + The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so + is `out`, otherwise an array object is returned. + + See Also + -------- + numpy.arccos + + Notes + ----- + For an arccos() that returns ``NAN`` when real `x` is not in the + interval ``[-1,1]``, use `numpy.arccos`. + + Examples + -------- + >>> import numpy as np + >>> np.set_printoptions(precision=4) + + >>> np.emath.arccos(1) # a scalar is returned + 0.0 + + >>> np.emath.arccos([1,2]) + array([0.-0.j , 0.-1.317j]) + + """ + x = _fix_real_abs_gt_1(x) + return nx.arccos(x) + + +@set_module('numpy.lib.scimath') +@array_function_dispatch(_unary_dispatcher) +def arcsin(x): + """ + Compute the inverse sine of x. + + Return the "principal value" (for a description of this, see + `numpy.arcsin`) of the inverse sine of `x`. For real `x` such that + `abs(x) <= 1`, this is a real number in the closed interval + :math:`[-\\pi/2, \\pi/2]`. Otherwise, the complex principle value is + returned. + + Parameters + ---------- + x : array_like or scalar + The value(s) whose arcsin is (are) required. + + Returns + ------- + out : ndarray or scalar + The inverse sine(s) of the `x` value(s). If `x` was a scalar, so + is `out`, otherwise an array object is returned. + + See Also + -------- + numpy.arcsin + + Notes + ----- + For an arcsin() that returns ``NAN`` when real `x` is not in the + interval ``[-1,1]``, use `numpy.arcsin`. + + Examples + -------- + >>> import numpy as np + >>> np.set_printoptions(precision=4) + + >>> np.emath.arcsin(0) + 0.0 + + >>> np.emath.arcsin([0,1]) + array([0. , 1.5708]) + + """ + x = _fix_real_abs_gt_1(x) + return nx.arcsin(x) + + +@set_module('numpy.lib.scimath') +@array_function_dispatch(_unary_dispatcher) +def arctanh(x): + """ + Compute the inverse hyperbolic tangent of `x`. + + Return the "principal value" (for a description of this, see + `numpy.arctanh`) of ``arctanh(x)``. For real `x` such that + ``abs(x) < 1``, this is a real number. If `abs(x) > 1`, or if `x` is + complex, the result is complex. Finally, `x = 1` returns``inf`` and + ``x=-1`` returns ``-inf``. + + Parameters + ---------- + x : array_like + The value(s) whose arctanh is (are) required. + + Returns + ------- + out : ndarray or scalar + The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was + a scalar so is `out`, otherwise an array is returned. + + + See Also + -------- + numpy.arctanh + + Notes + ----- + For an arctanh() that returns ``NAN`` when real `x` is not in the + interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does + return +/-inf for ``x = +/-1``). + + Examples + -------- + >>> import numpy as np + >>> np.set_printoptions(precision=4) + + >>> np.emath.arctanh(0.5) + 0.5493061443340549 + + >>> from numpy.testing import suppress_warnings + >>> with suppress_warnings() as sup: + ... sup.filter(RuntimeWarning) + ... np.emath.arctanh(np.eye(2)) + array([[inf, 0.], + [ 0., inf]]) + >>> np.emath.arctanh([1j]) + array([0.+0.7854j]) + + """ + x = _fix_real_abs_gt_1(x) + return nx.arctanh(x) diff --git a/python/numpy/lib/_scimath_impl.pyi b/python/numpy/lib/_scimath_impl.pyi new file mode 100644 index 000000000..e6390c29c --- /dev/null +++ b/python/numpy/lib/_scimath_impl.pyi @@ -0,0 +1,93 @@ +from typing import Any, overload + +from numpy import complexfloating +from numpy._typing import ( + NDArray, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ComplexLike_co, + _FloatLike_co, +) + +__all__ = ["sqrt", "log", "log2", "logn", "log10", "power", "arccos", "arcsin", "arctanh"] + +@overload +def sqrt(x: _FloatLike_co) -> Any: ... +@overload +def sqrt(x: _ComplexLike_co) -> complexfloating: ... +@overload +def sqrt(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def sqrt(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def log(x: _FloatLike_co) -> Any: ... +@overload +def log(x: _ComplexLike_co) -> complexfloating: ... +@overload +def log(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def log(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def log10(x: _FloatLike_co) -> Any: ... +@overload +def log10(x: _ComplexLike_co) -> complexfloating: ... +@overload +def log10(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def log10(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def log2(x: _FloatLike_co) -> Any: ... +@overload +def log2(x: _ComplexLike_co) -> complexfloating: ... +@overload +def log2(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def log2(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def logn(n: _FloatLike_co, x: _FloatLike_co) -> Any: ... +@overload +def logn(n: _ComplexLike_co, x: _ComplexLike_co) -> complexfloating: ... +@overload +def logn(n: _ArrayLikeFloat_co, x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def logn(n: _ArrayLikeComplex_co, x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def power(x: _FloatLike_co, p: _FloatLike_co) -> Any: ... +@overload +def power(x: _ComplexLike_co, p: _ComplexLike_co) -> complexfloating: ... +@overload +def power(x: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def power(x: _ArrayLikeComplex_co, p: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def arccos(x: _FloatLike_co) -> Any: ... +@overload +def arccos(x: _ComplexLike_co) -> complexfloating: ... +@overload +def arccos(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def arccos(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def arcsin(x: _FloatLike_co) -> Any: ... +@overload +def arcsin(x: _ComplexLike_co) -> complexfloating: ... +@overload +def arcsin(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def arcsin(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def arctanh(x: _FloatLike_co) -> Any: ... +@overload +def arctanh(x: _ComplexLike_co) -> complexfloating: ... +@overload +def arctanh(x: _ArrayLikeFloat_co) -> NDArray[Any]: ... +@overload +def arctanh(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... diff --git a/python/numpy/lib/_shape_base_impl.py b/python/numpy/lib/_shape_base_impl.py new file mode 100644 index 000000000..89b86c809 --- /dev/null +++ b/python/numpy/lib/_shape_base_impl.py @@ -0,0 +1,1301 @@ +import functools +import warnings + +import numpy._core.numeric as _nx +from numpy._core import atleast_3d, overrides, vstack +from numpy._core._multiarray_umath import _array_converter +from numpy._core.fromnumeric import reshape, transpose +from numpy._core.multiarray import normalize_axis_index +from numpy._core.numeric import ( + array, + asanyarray, + asarray, + normalize_axis_tuple, + zeros, + zeros_like, +) +from numpy._core.overrides import set_module +from numpy._core.shape_base import _arrays_for_stack_dispatcher +from numpy.lib._index_tricks_impl import ndindex +from numpy.matrixlib.defmatrix import matrix # this raises all the right alarm bells + +__all__ = [ + 'column_stack', 'row_stack', 'dstack', 'array_split', 'split', + 'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims', + 'apply_along_axis', 'kron', 'tile', 'take_along_axis', + 'put_along_axis' + ] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +def _make_along_axis_idx(arr_shape, indices, axis): + # compute dimensions to iterate over + if not _nx.issubdtype(indices.dtype, _nx.integer): + raise IndexError('`indices` must be an integer array') + if len(arr_shape) != indices.ndim: + raise ValueError( + "`indices` and `arr` must have the same number of dimensions") + shape_ones = (1,) * indices.ndim + dest_dims = list(range(axis)) + [None] + list(range(axis + 1, indices.ndim)) + + # build a fancy index, consisting of orthogonal aranges, with the + # requested index inserted at the right location + fancy_index = [] + for dim, n in zip(dest_dims, arr_shape): + if dim is None: + fancy_index.append(indices) + else: + ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim + 1:] + fancy_index.append(_nx.arange(n).reshape(ind_shape)) + + return tuple(fancy_index) + + +def _take_along_axis_dispatcher(arr, indices, axis=None): + return (arr, indices) + + +@array_function_dispatch(_take_along_axis_dispatcher) +def take_along_axis(arr, indices, axis=-1): + """ + Take values from the input array by matching 1d index and data slices. + + This iterates over matching 1d slices oriented along the specified axis in + the index and data arrays, and uses the former to look up values in the + latter. These slices can be different lengths. + + Functions returning an index along an axis, like `argsort` and + `argpartition`, produce suitable indices for this function. + + Parameters + ---------- + arr : ndarray (Ni..., M, Nk...) + Source array + indices : ndarray (Ni..., J, Nk...) + Indices to take along each 1d slice of ``arr``. This must match the + dimension of ``arr``, but dimensions Ni and Nj only need to broadcast + against ``arr``. + axis : int or None, optional + The axis to take 1d slices along. If axis is None, the input array is + treated as if it had first been flattened to 1d, for consistency with + `sort` and `argsort`. + + .. versionchanged:: 2.3 + The default value is now ``-1``. + + Returns + ------- + out: ndarray (Ni..., J, Nk...) + The indexed result. + + Notes + ----- + This is equivalent to (but faster than) the following use of `ndindex` and + `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices:: + + Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:] + J = indices.shape[axis] # Need not equal M + out = np.empty(Ni + (J,) + Nk) + + for ii in ndindex(Ni): + for kk in ndindex(Nk): + a_1d = a [ii + s_[:,] + kk] + indices_1d = indices[ii + s_[:,] + kk] + out_1d = out [ii + s_[:,] + kk] + for j in range(J): + out_1d[j] = a_1d[indices_1d[j]] + + Equivalently, eliminating the inner loop, the last two lines would be:: + + out_1d[:] = a_1d[indices_1d] + + See Also + -------- + take : Take along an axis, using the same indices for every 1d slice + put_along_axis : + Put values into the destination array by matching 1d index and data slices + + Examples + -------- + >>> import numpy as np + + For this sample array + + >>> a = np.array([[10, 30, 20], [60, 40, 50]]) + + We can sort either by using sort directly, or argsort and this function + + >>> np.sort(a, axis=1) + array([[10, 20, 30], + [40, 50, 60]]) + >>> ai = np.argsort(a, axis=1) + >>> ai + array([[0, 2, 1], + [1, 2, 0]]) + >>> np.take_along_axis(a, ai, axis=1) + array([[10, 20, 30], + [40, 50, 60]]) + + The same works for max and min, if you maintain the trivial dimension + with ``keepdims``: + + >>> np.max(a, axis=1, keepdims=True) + array([[30], + [60]]) + >>> ai = np.argmax(a, axis=1, keepdims=True) + >>> ai + array([[1], + [0]]) + >>> np.take_along_axis(a, ai, axis=1) + array([[30], + [60]]) + + If we want to get the max and min at the same time, we can stack the + indices first + + >>> ai_min = np.argmin(a, axis=1, keepdims=True) + >>> ai_max = np.argmax(a, axis=1, keepdims=True) + >>> ai = np.concatenate([ai_min, ai_max], axis=1) + >>> ai + array([[0, 1], + [1, 0]]) + >>> np.take_along_axis(a, ai, axis=1) + array([[10, 30], + [40, 60]]) + """ + # normalize inputs + if axis is None: + if indices.ndim != 1: + raise ValueError( + 'when axis=None, `indices` must have a single dimension.') + arr = arr.flat + arr_shape = (len(arr),) # flatiter has no .shape + axis = 0 + else: + axis = normalize_axis_index(axis, arr.ndim) + arr_shape = arr.shape + + # use the fancy index + return arr[_make_along_axis_idx(arr_shape, indices, axis)] + + +def _put_along_axis_dispatcher(arr, indices, values, axis): + return (arr, indices, values) + + +@array_function_dispatch(_put_along_axis_dispatcher) +def put_along_axis(arr, indices, values, axis): + """ + Put values into the destination array by matching 1d index and data slices. + + This iterates over matching 1d slices oriented along the specified axis in + the index and data arrays, and uses the former to place values into the + latter. These slices can be different lengths. + + Functions returning an index along an axis, like `argsort` and + `argpartition`, produce suitable indices for this function. + + Parameters + ---------- + arr : ndarray (Ni..., M, Nk...) + Destination array. + indices : ndarray (Ni..., J, Nk...) + Indices to change along each 1d slice of `arr`. This must match the + dimension of arr, but dimensions in Ni and Nj may be 1 to broadcast + against `arr`. + values : array_like (Ni..., J, Nk...) + values to insert at those indices. Its shape and dimension are + broadcast to match that of `indices`. + axis : int + The axis to take 1d slices along. If axis is None, the destination + array is treated as if a flattened 1d view had been created of it. + + Notes + ----- + This is equivalent to (but faster than) the following use of `ndindex` and + `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices:: + + Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:] + J = indices.shape[axis] # Need not equal M + + for ii in ndindex(Ni): + for kk in ndindex(Nk): + a_1d = a [ii + s_[:,] + kk] + indices_1d = indices[ii + s_[:,] + kk] + values_1d = values [ii + s_[:,] + kk] + for j in range(J): + a_1d[indices_1d[j]] = values_1d[j] + + Equivalently, eliminating the inner loop, the last two lines would be:: + + a_1d[indices_1d] = values_1d + + See Also + -------- + take_along_axis : + Take values from the input array by matching 1d index and data slices + + Examples + -------- + >>> import numpy as np + + For this sample array + + >>> a = np.array([[10, 30, 20], [60, 40, 50]]) + + We can replace the maximum values with: + + >>> ai = np.argmax(a, axis=1, keepdims=True) + >>> ai + array([[1], + [0]]) + >>> np.put_along_axis(a, ai, 99, axis=1) + >>> a + array([[10, 99, 20], + [99, 40, 50]]) + + """ + # normalize inputs + if axis is None: + if indices.ndim != 1: + raise ValueError( + 'when axis=None, `indices` must have a single dimension.') + arr = arr.flat + axis = 0 + arr_shape = (len(arr),) # flatiter has no .shape + else: + axis = normalize_axis_index(axis, arr.ndim) + arr_shape = arr.shape + + # use the fancy index + arr[_make_along_axis_idx(arr_shape, indices, axis)] = values + + +def _apply_along_axis_dispatcher(func1d, axis, arr, *args, **kwargs): + return (arr,) + + +@array_function_dispatch(_apply_along_axis_dispatcher) +def apply_along_axis(func1d, axis, arr, *args, **kwargs): + """ + Apply a function to 1-D slices along the given axis. + + Execute `func1d(a, *args, **kwargs)` where `func1d` operates on 1-D arrays + and `a` is a 1-D slice of `arr` along `axis`. + + This is equivalent to (but faster than) the following use of `ndindex` and + `s_`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of indices:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + for ii in ndindex(Ni): + for kk in ndindex(Nk): + f = func1d(arr[ii + s_[:,] + kk]) + Nj = f.shape + for jj in ndindex(Nj): + out[ii + jj + kk] = f[jj] + + Equivalently, eliminating the inner loop, this can be expressed as:: + + Ni, Nk = a.shape[:axis], a.shape[axis+1:] + for ii in ndindex(Ni): + for kk in ndindex(Nk): + out[ii + s_[...,] + kk] = func1d(arr[ii + s_[:,] + kk]) + + Parameters + ---------- + func1d : function (M,) -> (Nj...) + This function should accept 1-D arrays. It is applied to 1-D + slices of `arr` along the specified axis. + axis : integer + Axis along which `arr` is sliced. + arr : ndarray (Ni..., M, Nk...) + Input array. + args : any + Additional arguments to `func1d`. + kwargs : any + Additional named arguments to `func1d`. + + Returns + ------- + out : ndarray (Ni..., Nj..., Nk...) + The output array. The shape of `out` is identical to the shape of + `arr`, except along the `axis` dimension. This axis is removed, and + replaced with new dimensions equal to the shape of the return value + of `func1d`. So if `func1d` returns a scalar `out` will have one + fewer dimensions than `arr`. + + See Also + -------- + apply_over_axes : Apply a function repeatedly over multiple axes. + + Examples + -------- + >>> import numpy as np + >>> def my_func(a): + ... \"\"\"Average first and last element of a 1-D array\"\"\" + ... return (a[0] + a[-1]) * 0.5 + >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) + >>> np.apply_along_axis(my_func, 0, b) + array([4., 5., 6.]) + >>> np.apply_along_axis(my_func, 1, b) + array([2., 5., 8.]) + + For a function that returns a 1D array, the number of dimensions in + `outarr` is the same as `arr`. + + >>> b = np.array([[8,1,7], [4,3,9], [5,2,6]]) + >>> np.apply_along_axis(sorted, 1, b) + array([[1, 7, 8], + [3, 4, 9], + [2, 5, 6]]) + + For a function that returns a higher dimensional array, those dimensions + are inserted in place of the `axis` dimension. + + >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) + >>> np.apply_along_axis(np.diag, -1, b) + array([[[1, 0, 0], + [0, 2, 0], + [0, 0, 3]], + [[4, 0, 0], + [0, 5, 0], + [0, 0, 6]], + [[7, 0, 0], + [0, 8, 0], + [0, 0, 9]]]) + """ + # handle negative axes + conv = _array_converter(arr) + arr = conv[0] + + nd = arr.ndim + axis = normalize_axis_index(axis, nd) + + # arr, with the iteration axis at the end + in_dims = list(range(nd)) + inarr_view = transpose(arr, in_dims[:axis] + in_dims[axis + 1:] + [axis]) + + # compute indices for the iteration axes, and append a trailing ellipsis to + # prevent 0d arrays decaying to scalars, which fixes gh-8642 + inds = ndindex(inarr_view.shape[:-1]) + inds = (ind + (Ellipsis,) for ind in inds) + + # invoke the function on the first item + try: + ind0 = next(inds) + except StopIteration: + raise ValueError( + 'Cannot apply_along_axis when any iteration dimensions are 0' + ) from None + res = asanyarray(func1d(inarr_view[ind0], *args, **kwargs)) + + # build a buffer for storing evaluations of func1d. + # remove the requested axis, and add the new ones on the end. + # laid out so that each write is contiguous. + # for a tuple index inds, buff[inds] = func1d(inarr_view[inds]) + if not isinstance(res, matrix): + buff = zeros_like(res, shape=inarr_view.shape[:-1] + res.shape) + else: + # Matrices are nasty with reshaping, so do not preserve them here. + buff = zeros(inarr_view.shape[:-1] + res.shape, dtype=res.dtype) + + # permutation of axes such that out = buff.transpose(buff_permute) + buff_dims = list(range(buff.ndim)) + buff_permute = ( + buff_dims[0 : axis] + + buff_dims[buff.ndim - res.ndim : buff.ndim] + + buff_dims[axis : buff.ndim - res.ndim] + ) + + # save the first result, then compute and save all remaining results + buff[ind0] = res + for ind in inds: + buff[ind] = asanyarray(func1d(inarr_view[ind], *args, **kwargs)) + + res = transpose(buff, buff_permute) + return conv.wrap(res) + + +def _apply_over_axes_dispatcher(func, a, axes): + return (a,) + + +@array_function_dispatch(_apply_over_axes_dispatcher) +def apply_over_axes(func, a, axes): + """ + Apply a function repeatedly over multiple axes. + + `func` is called as `res = func(a, axis)`, where `axis` is the first + element of `axes`. The result `res` of the function call must have + either the same dimensions as `a` or one less dimension. If `res` + has one less dimension than `a`, a dimension is inserted before + `axis`. The call to `func` is then repeated for each axis in `axes`, + with `res` as the first argument. + + Parameters + ---------- + func : function + This function must take two arguments, `func(a, axis)`. + a : array_like + Input array. + axes : array_like + Axes over which `func` is applied; the elements must be integers. + + Returns + ------- + apply_over_axis : ndarray + The output array. The number of dimensions is the same as `a`, + but the shape can be different. This depends on whether `func` + changes the shape of its output with respect to its input. + + See Also + -------- + apply_along_axis : + Apply a function to 1-D slices of an array along the given axis. + + Notes + ----- + This function is equivalent to tuple axis arguments to reorderable ufuncs + with keepdims=True. Tuple axis arguments to ufuncs have been available since + version 1.7.0. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(24).reshape(2,3,4) + >>> a + array([[[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]], + [[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]]]) + + Sum over axes 0 and 2. The result has same number of dimensions + as the original array: + + >>> np.apply_over_axes(np.sum, a, [0,2]) + array([[[ 60], + [ 92], + [124]]]) + + Tuple axis arguments to ufuncs are equivalent: + + >>> np.sum(a, axis=(0,2), keepdims=True) + array([[[ 60], + [ 92], + [124]]]) + + """ + val = asarray(a) + N = a.ndim + if array(axes).ndim == 0: + axes = (axes,) + for axis in axes: + if axis < 0: + axis = N + axis + args = (val, axis) + res = func(*args) + if res.ndim == val.ndim: + val = res + else: + res = expand_dims(res, axis) + if res.ndim == val.ndim: + val = res + else: + raise ValueError("function is not returning " + "an array of the correct shape") + return val + + +def _expand_dims_dispatcher(a, axis): + return (a,) + + +@array_function_dispatch(_expand_dims_dispatcher) +def expand_dims(a, axis): + """ + Expand the shape of an array. + + Insert a new axis that will appear at the `axis` position in the expanded + array shape. + + Parameters + ---------- + a : array_like + Input array. + axis : int or tuple of ints + Position in the expanded axes where the new axis (or axes) is placed. + + .. deprecated:: 1.13.0 + Passing an axis where ``axis > a.ndim`` will be treated as + ``axis == a.ndim``, and passing ``axis < -a.ndim - 1`` will + be treated as ``axis == 0``. This behavior is deprecated. + + Returns + ------- + result : ndarray + View of `a` with the number of dimensions increased. + + See Also + -------- + squeeze : The inverse operation, removing singleton dimensions + reshape : Insert, remove, and combine dimensions, and resize existing ones + atleast_1d, atleast_2d, atleast_3d + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2]) + >>> x.shape + (2,) + + The following is equivalent to ``x[np.newaxis, :]`` or ``x[np.newaxis]``: + + >>> y = np.expand_dims(x, axis=0) + >>> y + array([[1, 2]]) + >>> y.shape + (1, 2) + + The following is equivalent to ``x[:, np.newaxis]``: + + >>> y = np.expand_dims(x, axis=1) + >>> y + array([[1], + [2]]) + >>> y.shape + (2, 1) + + ``axis`` may also be a tuple: + + >>> y = np.expand_dims(x, axis=(0, 1)) + >>> y + array([[[1, 2]]]) + + >>> y = np.expand_dims(x, axis=(2, 0)) + >>> y + array([[[1], + [2]]]) + + Note that some examples may use ``None`` instead of ``np.newaxis``. These + are the same objects: + + >>> np.newaxis is None + True + + """ + if isinstance(a, matrix): + a = asarray(a) + else: + a = asanyarray(a) + + if not isinstance(axis, (tuple, list)): + axis = (axis,) + + out_ndim = len(axis) + a.ndim + axis = normalize_axis_tuple(axis, out_ndim) + + shape_it = iter(a.shape) + shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)] + + return a.reshape(shape) + + +# NOTE: Remove once deprecation period passes +@set_module("numpy") +def row_stack(tup, *, dtype=None, casting="same_kind"): + # Deprecated in NumPy 2.0, 2023-08-18 + warnings.warn( + "`row_stack` alias is deprecated. " + "Use `np.vstack` directly.", + DeprecationWarning, + stacklevel=2 + ) + return vstack(tup, dtype=dtype, casting=casting) + + +row_stack.__doc__ = vstack.__doc__ + + +def _column_stack_dispatcher(tup): + return _arrays_for_stack_dispatcher(tup) + + +@array_function_dispatch(_column_stack_dispatcher) +def column_stack(tup): + """ + Stack 1-D arrays as columns into a 2-D array. + + Take a sequence of 1-D arrays and stack them as columns + to make a single 2-D array. 2-D arrays are stacked as-is, + just like with `hstack`. 1-D arrays are turned into 2-D columns + first. + + Parameters + ---------- + tup : sequence of 1-D or 2-D arrays. + Arrays to stack. All of them must have the same first dimension. + + Returns + ------- + stacked : 2-D array + The array formed by stacking the given arrays. + + See Also + -------- + stack, hstack, vstack, concatenate + + Examples + -------- + >>> import numpy as np + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.column_stack((a,b)) + array([[1, 2], + [2, 3], + [3, 4]]) + + """ + arrays = [] + for v in tup: + arr = asanyarray(v) + if arr.ndim < 2: + arr = array(arr, copy=None, subok=True, ndmin=2).T + arrays.append(arr) + return _nx.concatenate(arrays, 1) + + +def _dstack_dispatcher(tup): + return _arrays_for_stack_dispatcher(tup) + + +@array_function_dispatch(_dstack_dispatcher) +def dstack(tup): + """ + Stack arrays in sequence depth wise (along third axis). + + This is equivalent to concatenation along the third axis after 2-D arrays + of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape + `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by + `dsplit`. + + This function makes most sense for arrays with up to 3 dimensions. For + instance, for pixel-data with a height (first axis), width (second axis), + and r/g/b channels (third axis). The functions `concatenate`, `stack` and + `block` provide more general stacking and concatenation operations. + + Parameters + ---------- + tup : sequence of arrays + The arrays must have the same shape along all but the third axis. + 1-D or 2-D arrays must have the same shape. + + Returns + ------- + stacked : ndarray + The array formed by stacking the given arrays, will be at least 3-D. + + See Also + -------- + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + block : Assemble an nd-array from nested lists of blocks. + vstack : Stack arrays in sequence vertically (row wise). + hstack : Stack arrays in sequence horizontally (column wise). + column_stack : Stack 1-D arrays as columns into a 2-D array. + dsplit : Split array along third axis. + + Examples + -------- + >>> import numpy as np + >>> a = np.array((1,2,3)) + >>> b = np.array((2,3,4)) + >>> np.dstack((a,b)) + array([[[1, 2], + [2, 3], + [3, 4]]]) + + >>> a = np.array([[1],[2],[3]]) + >>> b = np.array([[2],[3],[4]]) + >>> np.dstack((a,b)) + array([[[1, 2]], + [[2, 3]], + [[3, 4]]]) + + """ + arrs = atleast_3d(*tup) + if not isinstance(arrs, tuple): + arrs = (arrs,) + return _nx.concatenate(arrs, 2) + + +def _replace_zero_by_x_arrays(sub_arys): + for i in range(len(sub_arys)): + if _nx.ndim(sub_arys[i]) == 0: + sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype) + elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)): + sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype) + return sub_arys + + +def _array_split_dispatcher(ary, indices_or_sections, axis=None): + return (ary, indices_or_sections) + + +@array_function_dispatch(_array_split_dispatcher) +def array_split(ary, indices_or_sections, axis=0): + """ + Split an array into multiple sub-arrays. + + Please refer to the ``split`` documentation. The only difference + between these functions is that ``array_split`` allows + `indices_or_sections` to be an integer that does *not* equally + divide the axis. For an array of length l that should be split + into n sections, it returns l % n sub-arrays of size l//n + 1 + and the rest of size l//n. + + See Also + -------- + split : Split array into multiple sub-arrays of equal size. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(8.0) + >>> np.array_split(x, 3) + [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])] + + >>> x = np.arange(9) + >>> np.array_split(x, 4) + [array([0, 1, 2]), array([3, 4]), array([5, 6]), array([7, 8])] + + """ + try: + Ntotal = ary.shape[axis] + except AttributeError: + Ntotal = len(ary) + try: + # handle array case. + Nsections = len(indices_or_sections) + 1 + div_points = [0] + list(indices_or_sections) + [Ntotal] + except TypeError: + # indices_or_sections is a scalar, not an array. + Nsections = int(indices_or_sections) + if Nsections <= 0: + raise ValueError('number sections must be larger than 0.') from None + Neach_section, extras = divmod(Ntotal, Nsections) + section_sizes = ([0] + + extras * [Neach_section + 1] + + (Nsections - extras) * [Neach_section]) + div_points = _nx.array(section_sizes, dtype=_nx.intp).cumsum() + + sub_arys = [] + sary = _nx.swapaxes(ary, axis, 0) + for i in range(Nsections): + st = div_points[i] + end = div_points[i + 1] + sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0)) + + return sub_arys + + +def _split_dispatcher(ary, indices_or_sections, axis=None): + return (ary, indices_or_sections) + + +@array_function_dispatch(_split_dispatcher) +def split(ary, indices_or_sections, axis=0): + """ + Split an array into multiple sub-arrays as views into `ary`. + + Parameters + ---------- + ary : ndarray + Array to be divided into sub-arrays. + indices_or_sections : int or 1-D array + If `indices_or_sections` is an integer, N, the array will be divided + into N equal arrays along `axis`. If such a split is not possible, + an error is raised. + + If `indices_or_sections` is a 1-D array of sorted integers, the entries + indicate where along `axis` the array is split. For example, + ``[2, 3]`` would, for ``axis=0``, result in + + - ary[:2] + - ary[2:3] + - ary[3:] + + If an index exceeds the dimension of the array along `axis`, + an empty sub-array is returned correspondingly. + axis : int, optional + The axis along which to split, default is 0. + + Returns + ------- + sub-arrays : list of ndarrays + A list of sub-arrays as views into `ary`. + + Raises + ------ + ValueError + If `indices_or_sections` is given as an integer, but + a split does not result in equal division. + + See Also + -------- + array_split : Split an array into multiple sub-arrays of equal or + near-equal size. Does not raise an exception if + an equal division cannot be made. + hsplit : Split array into multiple sub-arrays horizontally (column-wise). + vsplit : Split array into multiple sub-arrays vertically (row wise). + dsplit : Split array into multiple sub-arrays along the 3rd axis (depth). + concatenate : Join a sequence of arrays along an existing axis. + stack : Join a sequence of arrays along a new axis. + hstack : Stack arrays in sequence horizontally (column wise). + vstack : Stack arrays in sequence vertically (row wise). + dstack : Stack arrays in sequence depth wise (along third dimension). + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(9.0) + >>> np.split(x, 3) + [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])] + + >>> x = np.arange(8.0) + >>> np.split(x, [3, 5, 6, 10]) + [array([0., 1., 2.]), + array([3., 4.]), + array([5.]), + array([6., 7.]), + array([], dtype=float64)] + + """ + try: + len(indices_or_sections) + except TypeError: + sections = indices_or_sections + N = ary.shape[axis] + if N % sections: + raise ValueError( + 'array split does not result in an equal division') from None + return array_split(ary, indices_or_sections, axis) + + +def _hvdsplit_dispatcher(ary, indices_or_sections): + return (ary, indices_or_sections) + + +@array_function_dispatch(_hvdsplit_dispatcher) +def hsplit(ary, indices_or_sections): + """ + Split an array into multiple sub-arrays horizontally (column-wise). + + Please refer to the `split` documentation. `hsplit` is equivalent + to `split` with ``axis=1``, the array is always split along the second + axis except for 1-D arrays, where it is split at ``axis=0``. + + See Also + -------- + split : Split an array into multiple sub-arrays of equal size. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(16.0).reshape(4, 4) + >>> x + array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + >>> np.hsplit(x, 2) + [array([[ 0., 1.], + [ 4., 5.], + [ 8., 9.], + [12., 13.]]), + array([[ 2., 3.], + [ 6., 7.], + [10., 11.], + [14., 15.]])] + >>> np.hsplit(x, np.array([3, 6])) + [array([[ 0., 1., 2.], + [ 4., 5., 6.], + [ 8., 9., 10.], + [12., 13., 14.]]), + array([[ 3.], + [ 7.], + [11.], + [15.]]), + array([], shape=(4, 0), dtype=float64)] + + With a higher dimensional array the split is still along the second axis. + + >>> x = np.arange(8.0).reshape(2, 2, 2) + >>> x + array([[[0., 1.], + [2., 3.]], + [[4., 5.], + [6., 7.]]]) + >>> np.hsplit(x, 2) + [array([[[0., 1.]], + [[4., 5.]]]), + array([[[2., 3.]], + [[6., 7.]]])] + + With a 1-D array, the split is along axis 0. + + >>> x = np.array([0, 1, 2, 3, 4, 5]) + >>> np.hsplit(x, 2) + [array([0, 1, 2]), array([3, 4, 5])] + + """ + if _nx.ndim(ary) == 0: + raise ValueError('hsplit only works on arrays of 1 or more dimensions') + if ary.ndim > 1: + return split(ary, indices_or_sections, 1) + else: + return split(ary, indices_or_sections, 0) + + +@array_function_dispatch(_hvdsplit_dispatcher) +def vsplit(ary, indices_or_sections): + """ + Split an array into multiple sub-arrays vertically (row-wise). + + Please refer to the ``split`` documentation. ``vsplit`` is equivalent + to ``split`` with `axis=0` (default), the array is always split along the + first axis regardless of the array dimension. + + See Also + -------- + split : Split an array into multiple sub-arrays of equal size. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(16.0).reshape(4, 4) + >>> x + array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + >>> np.vsplit(x, 2) + [array([[0., 1., 2., 3.], + [4., 5., 6., 7.]]), + array([[ 8., 9., 10., 11.], + [12., 13., 14., 15.]])] + >>> np.vsplit(x, np.array([3, 6])) + [array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]]), + array([[12., 13., 14., 15.]]), + array([], shape=(0, 4), dtype=float64)] + + With a higher dimensional array the split is still along the first axis. + + >>> x = np.arange(8.0).reshape(2, 2, 2) + >>> x + array([[[0., 1.], + [2., 3.]], + [[4., 5.], + [6., 7.]]]) + >>> np.vsplit(x, 2) + [array([[[0., 1.], + [2., 3.]]]), + array([[[4., 5.], + [6., 7.]]])] + + """ + if _nx.ndim(ary) < 2: + raise ValueError('vsplit only works on arrays of 2 or more dimensions') + return split(ary, indices_or_sections, 0) + + +@array_function_dispatch(_hvdsplit_dispatcher) +def dsplit(ary, indices_or_sections): + """ + Split array into multiple sub-arrays along the 3rd axis (depth). + + Please refer to the `split` documentation. `dsplit` is equivalent + to `split` with ``axis=2``, the array is always split along the third + axis provided the array dimension is greater than or equal to 3. + + See Also + -------- + split : Split an array into multiple sub-arrays of equal size. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(16.0).reshape(2, 2, 4) + >>> x + array([[[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.]], + [[ 8., 9., 10., 11.], + [12., 13., 14., 15.]]]) + >>> np.dsplit(x, 2) + [array([[[ 0., 1.], + [ 4., 5.]], + [[ 8., 9.], + [12., 13.]]]), array([[[ 2., 3.], + [ 6., 7.]], + [[10., 11.], + [14., 15.]]])] + >>> np.dsplit(x, np.array([3, 6])) + [array([[[ 0., 1., 2.], + [ 4., 5., 6.]], + [[ 8., 9., 10.], + [12., 13., 14.]]]), + array([[[ 3.], + [ 7.]], + [[11.], + [15.]]]), + array([], shape=(2, 2, 0), dtype=float64)] + """ + if _nx.ndim(ary) < 3: + raise ValueError('dsplit only works on arrays of 3 or more dimensions') + return split(ary, indices_or_sections, 2) + + +def get_array_wrap(*args): + """Find the wrapper for the array with the highest priority. + + In case of ties, leftmost wins. If no wrapper is found, return None. + + .. deprecated:: 2.0 + """ + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`get_array_wrap` is deprecated. " + "(deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + wrappers = sorted((getattr(x, '__array_priority__', 0), -i, + x.__array_wrap__) for i, x in enumerate(args) + if hasattr(x, '__array_wrap__')) + if wrappers: + return wrappers[-1][-1] + return None + + +def _kron_dispatcher(a, b): + return (a, b) + + +@array_function_dispatch(_kron_dispatcher) +def kron(a, b): + """ + Kronecker product of two arrays. + + Computes the Kronecker product, a composite array made of blocks of the + second array scaled by the first. + + Parameters + ---------- + a, b : array_like + + Returns + ------- + out : ndarray + + See Also + -------- + outer : The outer product + + Notes + ----- + The function assumes that the number of dimensions of `a` and `b` + are the same, if necessary prepending the smallest with ones. + If ``a.shape = (r0,r1,..,rN)`` and ``b.shape = (s0,s1,...,sN)``, + the Kronecker product has shape ``(r0*s0, r1*s1, ..., rN*SN)``. + The elements are products of elements from `a` and `b`, organized + explicitly by:: + + kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN] + + where:: + + kt = it * st + jt, t = 0,...,N + + In the common 2-D case (N=1), the block structure can be visualized:: + + [[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ], + [ ... ... ], + [ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]] + + + Examples + -------- + >>> import numpy as np + >>> np.kron([1,10,100], [5,6,7]) + array([ 5, 6, 7, ..., 500, 600, 700]) + >>> np.kron([5,6,7], [1,10,100]) + array([ 5, 50, 500, ..., 7, 70, 700]) + + >>> np.kron(np.eye(2), np.ones((2,2))) + array([[1., 1., 0., 0.], + [1., 1., 0., 0.], + [0., 0., 1., 1.], + [0., 0., 1., 1.]]) + + >>> a = np.arange(100).reshape((2,5,2,5)) + >>> b = np.arange(24).reshape((2,3,4)) + >>> c = np.kron(a,b) + >>> c.shape + (2, 10, 6, 20) + >>> I = (1,3,0,2) + >>> J = (0,2,1) + >>> J1 = (0,) + J # extend to ndim=4 + >>> S1 = (1,) + b.shape + >>> K = tuple(np.array(I) * np.array(S1) + np.array(J1)) + >>> c[K] == a[I]*b[J] + True + + """ + # Working: + # 1. Equalise the shapes by prepending smaller array with 1s + # 2. Expand shapes of both the arrays by adding new axes at + # odd positions for 1st array and even positions for 2nd + # 3. Compute the product of the modified array + # 4. The inner most array elements now contain the rows of + # the Kronecker product + # 5. Reshape the result to kron's shape, which is same as + # product of shapes of the two arrays. + b = asanyarray(b) + a = array(a, copy=None, subok=True, ndmin=b.ndim) + is_any_mat = isinstance(a, matrix) or isinstance(b, matrix) + ndb, nda = b.ndim, a.ndim + nd = max(ndb, nda) + + if (nda == 0 or ndb == 0): + return _nx.multiply(a, b) + + as_ = a.shape + bs = b.shape + if not a.flags.contiguous: + a = reshape(a, as_) + if not b.flags.contiguous: + b = reshape(b, bs) + + # Equalise the shapes by prepending smaller one with 1s + as_ = (1,) * max(0, ndb - nda) + as_ + bs = (1,) * max(0, nda - ndb) + bs + + # Insert empty dimensions + a_arr = expand_dims(a, axis=tuple(range(ndb - nda))) + b_arr = expand_dims(b, axis=tuple(range(nda - ndb))) + + # Compute the product + a_arr = expand_dims(a_arr, axis=tuple(range(1, nd * 2, 2))) + b_arr = expand_dims(b_arr, axis=tuple(range(0, nd * 2, 2))) + # In case of `mat`, convert result to `array` + result = _nx.multiply(a_arr, b_arr, subok=(not is_any_mat)) + + # Reshape back + result = result.reshape(_nx.multiply(as_, bs)) + + return result if not is_any_mat else matrix(result, copy=False) + + +def _tile_dispatcher(A, reps): + return (A, reps) + + +@array_function_dispatch(_tile_dispatcher) +def tile(A, reps): + """ + Construct an array by repeating A the number of times given by reps. + + If `reps` has length ``d``, the result will have dimension of + ``max(d, A.ndim)``. + + If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new + axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication, + or shape (1, 1, 3) for 3-D replication. If this is not the desired + behavior, promote `A` to d-dimensions manually before calling this + function. + + If ``A.ndim > d``, `reps` is promoted to `A`.ndim by prepending 1's to it. + Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as + (1, 1, 2, 2). + + Note : Although tile may be used for broadcasting, it is strongly + recommended to use numpy's broadcasting operations and functions. + + Parameters + ---------- + A : array_like + The input array. + reps : array_like + The number of repetitions of `A` along each axis. + + Returns + ------- + c : ndarray + The tiled output array. + + See Also + -------- + repeat : Repeat elements of an array. + broadcast_to : Broadcast an array to a new shape + + Examples + -------- + >>> import numpy as np + >>> a = np.array([0, 1, 2]) + >>> np.tile(a, 2) + array([0, 1, 2, 0, 1, 2]) + >>> np.tile(a, (2, 2)) + array([[0, 1, 2, 0, 1, 2], + [0, 1, 2, 0, 1, 2]]) + >>> np.tile(a, (2, 1, 2)) + array([[[0, 1, 2, 0, 1, 2]], + [[0, 1, 2, 0, 1, 2]]]) + + >>> b = np.array([[1, 2], [3, 4]]) + >>> np.tile(b, 2) + array([[1, 2, 1, 2], + [3, 4, 3, 4]]) + >>> np.tile(b, (2, 1)) + array([[1, 2], + [3, 4], + [1, 2], + [3, 4]]) + + >>> c = np.array([1,2,3,4]) + >>> np.tile(c,(4,1)) + array([[1, 2, 3, 4], + [1, 2, 3, 4], + [1, 2, 3, 4], + [1, 2, 3, 4]]) + """ + try: + tup = tuple(reps) + except TypeError: + tup = (reps,) + d = len(tup) + if all(x == 1 for x in tup) and isinstance(A, _nx.ndarray): + # Fixes the problem that the function does not make a copy if A is a + # numpy array and the repetitions are 1 in all dimensions + return _nx.array(A, copy=True, subok=True, ndmin=d) + else: + # Note that no copy of zero-sized arrays is made. However since they + # have no data there is no risk of an inadvertent overwrite. + c = _nx.array(A, copy=None, subok=True, ndmin=d) + if (d < c.ndim): + tup = (1,) * (c.ndim - d) + tup + shape_out = tuple(s * t for s, t in zip(c.shape, tup)) + n = c.size + if n > 0: + for dim_in, nrep in zip(c.shape, tup): + if nrep != 1: + c = c.reshape(-1, n).repeat(nrep, 0) + n //= dim_in + return c.reshape(shape_out) diff --git a/python/numpy/lib/_shape_base_impl.pyi b/python/numpy/lib/_shape_base_impl.pyi new file mode 100644 index 000000000..a50d372bb --- /dev/null +++ b/python/numpy/lib/_shape_base_impl.pyi @@ -0,0 +1,235 @@ +from collections.abc import Callable, Sequence +from typing import ( + Any, + Concatenate, + ParamSpec, + Protocol, + SupportsIndex, + TypeVar, + overload, + type_check_only, +) + +from typing_extensions import deprecated + +import numpy as np +from numpy import ( + _CastingKind, + complexfloating, + floating, + generic, + integer, + object_, + signedinteger, + ufunc, + unsignedinteger, +) +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeObject_co, + _ArrayLikeUInt_co, + _ShapeLike, +) + +__all__ = [ + "column_stack", + "row_stack", + "dstack", + "array_split", + "split", + "hsplit", + "vsplit", + "dsplit", + "apply_over_axes", + "expand_dims", + "apply_along_axis", + "kron", + "tile", + "take_along_axis", + "put_along_axis", +] + +_P = ParamSpec("_P") +_ScalarT = TypeVar("_ScalarT", bound=generic) + +# Signature of `__array_wrap__` +@type_check_only +class _ArrayWrap(Protocol): + def __call__( + self, + array: NDArray[Any], + context: tuple[ufunc, tuple[Any, ...], int] | None = ..., + return_scalar: bool = ..., + /, + ) -> Any: ... + +@type_check_only +class _SupportsArrayWrap(Protocol): + @property + def __array_wrap__(self) -> _ArrayWrap: ... + +### + +def take_along_axis( + arr: _ScalarT | NDArray[_ScalarT], + indices: NDArray[integer], + axis: int | None = ..., +) -> NDArray[_ScalarT]: ... + +def put_along_axis( + arr: NDArray[_ScalarT], + indices: NDArray[integer], + values: ArrayLike, + axis: int | None, +) -> None: ... + +@overload +def apply_along_axis( + func1d: Callable[Concatenate[NDArray[Any], _P], _ArrayLike[_ScalarT]], + axis: SupportsIndex, + arr: ArrayLike, + *args: _P.args, + **kwargs: _P.kwargs, +) -> NDArray[_ScalarT]: ... +@overload +def apply_along_axis( + func1d: Callable[Concatenate[NDArray[Any], _P], Any], + axis: SupportsIndex, + arr: ArrayLike, + *args: _P.args, + **kwargs: _P.kwargs, +) -> NDArray[Any]: ... + +def apply_over_axes( + func: Callable[[NDArray[Any], int], NDArray[_ScalarT]], + a: ArrayLike, + axes: int | Sequence[int], +) -> NDArray[_ScalarT]: ... + +@overload +def expand_dims( + a: _ArrayLike[_ScalarT], + axis: _ShapeLike, +) -> NDArray[_ScalarT]: ... +@overload +def expand_dims( + a: ArrayLike, + axis: _ShapeLike, +) -> NDArray[Any]: ... + +# Deprecated in NumPy 2.0, 2023-08-18 +@deprecated("`row_stack` alias is deprecated. Use `np.vstack` directly.") +def row_stack( + tup: Sequence[ArrayLike], + *, + dtype: DTypeLike | None = None, + casting: _CastingKind = "same_kind", +) -> NDArray[Any]: ... + +# +@overload +def column_stack(tup: Sequence[_ArrayLike[_ScalarT]]) -> NDArray[_ScalarT]: ... +@overload +def column_stack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... + +@overload +def dstack(tup: Sequence[_ArrayLike[_ScalarT]]) -> NDArray[_ScalarT]: ... +@overload +def dstack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... + +@overload +def array_split( + ary: _ArrayLike[_ScalarT], + indices_or_sections: _ShapeLike, + axis: SupportsIndex = ..., +) -> list[NDArray[_ScalarT]]: ... +@overload +def array_split( + ary: ArrayLike, + indices_or_sections: _ShapeLike, + axis: SupportsIndex = ..., +) -> list[NDArray[Any]]: ... + +@overload +def split( + ary: _ArrayLike[_ScalarT], + indices_or_sections: _ShapeLike, + axis: SupportsIndex = ..., +) -> list[NDArray[_ScalarT]]: ... +@overload +def split( + ary: ArrayLike, + indices_or_sections: _ShapeLike, + axis: SupportsIndex = ..., +) -> list[NDArray[Any]]: ... + +@overload +def hsplit( + ary: _ArrayLike[_ScalarT], + indices_or_sections: _ShapeLike, +) -> list[NDArray[_ScalarT]]: ... +@overload +def hsplit( + ary: ArrayLike, + indices_or_sections: _ShapeLike, +) -> list[NDArray[Any]]: ... + +@overload +def vsplit( + ary: _ArrayLike[_ScalarT], + indices_or_sections: _ShapeLike, +) -> list[NDArray[_ScalarT]]: ... +@overload +def vsplit( + ary: ArrayLike, + indices_or_sections: _ShapeLike, +) -> list[NDArray[Any]]: ... + +@overload +def dsplit( + ary: _ArrayLike[_ScalarT], + indices_or_sections: _ShapeLike, +) -> list[NDArray[_ScalarT]]: ... +@overload +def dsplit( + ary: ArrayLike, + indices_or_sections: _ShapeLike, +) -> list[NDArray[Any]]: ... + +@overload +def get_array_wrap(*args: _SupportsArrayWrap) -> _ArrayWrap: ... +@overload +def get_array_wrap(*args: object) -> _ArrayWrap | None: ... + +@overload +def kron(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] +@overload +def kron(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co) -> NDArray[unsignedinteger]: ... # type: ignore[misc] +@overload +def kron(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[signedinteger]: ... # type: ignore[misc] +@overload +def kron(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[floating]: ... # type: ignore[misc] +@overload +def kron(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... +@overload +def kron(a: _ArrayLikeObject_co, b: Any) -> NDArray[object_]: ... +@overload +def kron(a: Any, b: _ArrayLikeObject_co) -> NDArray[object_]: ... + +@overload +def tile( + A: _ArrayLike[_ScalarT], + reps: int | Sequence[int], +) -> NDArray[_ScalarT]: ... +@overload +def tile( + A: ArrayLike, + reps: int | Sequence[int], +) -> NDArray[Any]: ... diff --git a/python/numpy/lib/_stride_tricks_impl.py b/python/numpy/lib/_stride_tricks_impl.py new file mode 100644 index 000000000..d4780783a --- /dev/null +++ b/python/numpy/lib/_stride_tricks_impl.py @@ -0,0 +1,549 @@ +""" +Utilities that manipulate strides to achieve desirable effects. + +An explanation of strides can be found in the :ref:`arrays.ndarray`. + +""" +import numpy as np +from numpy._core.numeric import normalize_axis_tuple +from numpy._core.overrides import array_function_dispatch, set_module + +__all__ = ['broadcast_to', 'broadcast_arrays', 'broadcast_shapes'] + + +class DummyArray: + """Dummy object that just exists to hang __array_interface__ dictionaries + and possibly keep alive a reference to a base array. + """ + + def __init__(self, interface, base=None): + self.__array_interface__ = interface + self.base = base + + +def _maybe_view_as_subclass(original_array, new_array): + if type(original_array) is not type(new_array): + # if input was an ndarray subclass and subclasses were OK, + # then view the result as that subclass. + new_array = new_array.view(type=type(original_array)) + # Since we have done something akin to a view from original_array, we + # should let the subclass finalize (if it has it implemented, i.e., is + # not None). + if new_array.__array_finalize__: + new_array.__array_finalize__(original_array) + return new_array + + +@set_module("numpy.lib.stride_tricks") +def as_strided(x, shape=None, strides=None, subok=False, writeable=True): + """ + Create a view into the array with the given shape and strides. + + .. warning:: This function has to be used with extreme care, see notes. + + Parameters + ---------- + x : ndarray + Array to create a new. + shape : sequence of int, optional + The shape of the new array. Defaults to ``x.shape``. + strides : sequence of int, optional + The strides of the new array. Defaults to ``x.strides``. + subok : bool, optional + If True, subclasses are preserved. + writeable : bool, optional + If set to False, the returned array will always be readonly. + Otherwise it will be writable if the original array was. It + is advisable to set this to False if possible (see Notes). + + Returns + ------- + view : ndarray + + See also + -------- + broadcast_to : broadcast an array to a given shape. + reshape : reshape an array. + lib.stride_tricks.sliding_window_view : + userfriendly and safe function for a creation of sliding window views. + + Notes + ----- + ``as_strided`` creates a view into the array given the exact strides + and shape. This means it manipulates the internal data structure of + ndarray and, if done incorrectly, the array elements can point to + invalid memory and can corrupt results or crash your program. + It is advisable to always use the original ``x.strides`` when + calculating new strides to avoid reliance on a contiguous memory + layout. + + Furthermore, arrays created with this function often contain self + overlapping memory, so that two elements are identical. + Vectorized write operations on such arrays will typically be + unpredictable. They may even give different results for small, large, + or transposed arrays. + + Since writing to these arrays has to be tested and done with great + care, you may want to use ``writeable=False`` to avoid accidental write + operations. + + For these reasons it is advisable to avoid ``as_strided`` when + possible. + """ + # first convert input to array, possibly keeping subclass + x = np.array(x, copy=None, subok=subok) + interface = dict(x.__array_interface__) + if shape is not None: + interface['shape'] = tuple(shape) + if strides is not None: + interface['strides'] = tuple(strides) + + array = np.asarray(DummyArray(interface, base=x)) + # The route via `__interface__` does not preserve structured + # dtypes. Since dtype should remain unchanged, we set it explicitly. + array.dtype = x.dtype + + view = _maybe_view_as_subclass(x, array) + + if view.flags.writeable and not writeable: + view.flags.writeable = False + + return view + + +def _sliding_window_view_dispatcher(x, window_shape, axis=None, *, + subok=None, writeable=None): + return (x,) + + +@array_function_dispatch( + _sliding_window_view_dispatcher, module="numpy.lib.stride_tricks" +) +def sliding_window_view(x, window_shape, axis=None, *, + subok=False, writeable=False): + """ + Create a sliding window view into the array with the given window shape. + + Also known as rolling or moving window, the window slides across all + dimensions of the array and extracts subsets of the array at all window + positions. + + .. versionadded:: 1.20.0 + + Parameters + ---------- + x : array_like + Array to create the sliding window view from. + window_shape : int or tuple of int + Size of window over each axis that takes part in the sliding window. + If `axis` is not present, must have same length as the number of input + array dimensions. Single integers `i` are treated as if they were the + tuple `(i,)`. + axis : int or tuple of int, optional + Axis or axes along which the sliding window is applied. + By default, the sliding window is applied to all axes and + `window_shape[i]` will refer to axis `i` of `x`. + If `axis` is given as a `tuple of int`, `window_shape[i]` will refer to + the axis `axis[i]` of `x`. + Single integers `i` are treated as if they were the tuple `(i,)`. + subok : bool, optional + If True, sub-classes will be passed-through, otherwise the returned + array will be forced to be a base-class array (default). + writeable : bool, optional + When true, allow writing to the returned view. The default is false, + as this should be used with caution: the returned view contains the + same memory location multiple times, so writing to one location will + cause others to change. + + Returns + ------- + view : ndarray + Sliding window view of the array. The sliding window dimensions are + inserted at the end, and the original dimensions are trimmed as + required by the size of the sliding window. + That is, ``view.shape = x_shape_trimmed + window_shape``, where + ``x_shape_trimmed`` is ``x.shape`` with every entry reduced by one less + than the corresponding window size. + + See Also + -------- + lib.stride_tricks.as_strided: A lower-level and less safe routine for + creating arbitrary views from custom shape and strides. + broadcast_to: broadcast an array to a given shape. + + Notes + ----- + For many applications using a sliding window view can be convenient, but + potentially very slow. Often specialized solutions exist, for example: + + - `scipy.signal.fftconvolve` + + - filtering functions in `scipy.ndimage` + + - moving window functions provided by + `bottleneck `_. + + As a rough estimate, a sliding window approach with an input size of `N` + and a window size of `W` will scale as `O(N*W)` where frequently a special + algorithm can achieve `O(N)`. That means that the sliding window variant + for a window size of 100 can be a 100 times slower than a more specialized + version. + + Nevertheless, for small window sizes, when no custom algorithm exists, or + as a prototyping and developing tool, this function can be a good solution. + + Examples + -------- + >>> import numpy as np + >>> from numpy.lib.stride_tricks import sliding_window_view + >>> x = np.arange(6) + >>> x.shape + (6,) + >>> v = sliding_window_view(x, 3) + >>> v.shape + (4, 3) + >>> v + array([[0, 1, 2], + [1, 2, 3], + [2, 3, 4], + [3, 4, 5]]) + + This also works in more dimensions, e.g. + + >>> i, j = np.ogrid[:3, :4] + >>> x = 10*i + j + >>> x.shape + (3, 4) + >>> x + array([[ 0, 1, 2, 3], + [10, 11, 12, 13], + [20, 21, 22, 23]]) + >>> shape = (2,2) + >>> v = sliding_window_view(x, shape) + >>> v.shape + (2, 3, 2, 2) + >>> v + array([[[[ 0, 1], + [10, 11]], + [[ 1, 2], + [11, 12]], + [[ 2, 3], + [12, 13]]], + [[[10, 11], + [20, 21]], + [[11, 12], + [21, 22]], + [[12, 13], + [22, 23]]]]) + + The axis can be specified explicitly: + + >>> v = sliding_window_view(x, 3, 0) + >>> v.shape + (1, 4, 3) + >>> v + array([[[ 0, 10, 20], + [ 1, 11, 21], + [ 2, 12, 22], + [ 3, 13, 23]]]) + + The same axis can be used several times. In that case, every use reduces + the corresponding original dimension: + + >>> v = sliding_window_view(x, (2, 3), (1, 1)) + >>> v.shape + (3, 1, 2, 3) + >>> v + array([[[[ 0, 1, 2], + [ 1, 2, 3]]], + [[[10, 11, 12], + [11, 12, 13]]], + [[[20, 21, 22], + [21, 22, 23]]]]) + + Combining with stepped slicing (`::step`), this can be used to take sliding + views which skip elements: + + >>> x = np.arange(7) + >>> sliding_window_view(x, 5)[:, ::2] + array([[0, 2, 4], + [1, 3, 5], + [2, 4, 6]]) + + or views which move by multiple elements + + >>> x = np.arange(7) + >>> sliding_window_view(x, 3)[::2, :] + array([[0, 1, 2], + [2, 3, 4], + [4, 5, 6]]) + + A common application of `sliding_window_view` is the calculation of running + statistics. The simplest example is the + `moving average `_: + + >>> x = np.arange(6) + >>> x.shape + (6,) + >>> v = sliding_window_view(x, 3) + >>> v.shape + (4, 3) + >>> v + array([[0, 1, 2], + [1, 2, 3], + [2, 3, 4], + [3, 4, 5]]) + >>> moving_average = v.mean(axis=-1) + >>> moving_average + array([1., 2., 3., 4.]) + + Note that a sliding window approach is often **not** optimal (see Notes). + """ + window_shape = (tuple(window_shape) + if np.iterable(window_shape) + else (window_shape,)) + # first convert input to array, possibly keeping subclass + x = np.array(x, copy=None, subok=subok) + + window_shape_array = np.array(window_shape) + if np.any(window_shape_array < 0): + raise ValueError('`window_shape` cannot contain negative values') + + if axis is None: + axis = tuple(range(x.ndim)) + if len(window_shape) != len(axis): + raise ValueError(f'Since axis is `None`, must provide ' + f'window_shape for all dimensions of `x`; ' + f'got {len(window_shape)} window_shape elements ' + f'and `x.ndim` is {x.ndim}.') + else: + axis = normalize_axis_tuple(axis, x.ndim, allow_duplicate=True) + if len(window_shape) != len(axis): + raise ValueError(f'Must provide matching length window_shape and ' + f'axis; got {len(window_shape)} window_shape ' + f'elements and {len(axis)} axes elements.') + + out_strides = x.strides + tuple(x.strides[ax] for ax in axis) + + # note: same axis can be windowed repeatedly + x_shape_trimmed = list(x.shape) + for ax, dim in zip(axis, window_shape): + if x_shape_trimmed[ax] < dim: + raise ValueError( + 'window shape cannot be larger than input array shape') + x_shape_trimmed[ax] -= dim - 1 + out_shape = tuple(x_shape_trimmed) + window_shape + return as_strided(x, strides=out_strides, shape=out_shape, + subok=subok, writeable=writeable) + + +def _broadcast_to(array, shape, subok, readonly): + shape = tuple(shape) if np.iterable(shape) else (shape,) + array = np.array(array, copy=None, subok=subok) + if not shape and array.shape: + raise ValueError('cannot broadcast a non-scalar to a scalar array') + if any(size < 0 for size in shape): + raise ValueError('all elements of broadcast shape must be non-' + 'negative') + extras = [] + it = np.nditer( + (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'] + extras, + op_flags=['readonly'], itershape=shape, order='C') + with it: + # never really has writebackifcopy semantics + broadcast = it.itviews[0] + result = _maybe_view_as_subclass(array, broadcast) + # In a future version this will go away + if not readonly and array.flags._writeable_no_warn: + result.flags.writeable = True + result.flags._warn_on_write = True + return result + + +def _broadcast_to_dispatcher(array, shape, subok=None): + return (array,) + + +@array_function_dispatch(_broadcast_to_dispatcher, module='numpy') +def broadcast_to(array, shape, subok=False): + """Broadcast an array to a new shape. + + Parameters + ---------- + array : array_like + The array to broadcast. + shape : tuple or int + The shape of the desired array. A single integer ``i`` is interpreted + as ``(i,)``. + subok : bool, optional + If True, then sub-classes will be passed-through, otherwise + the returned array will be forced to be a base-class array (default). + + Returns + ------- + broadcast : array + A readonly view on the original array with the given shape. It is + typically not contiguous. Furthermore, more than one element of a + broadcasted array may refer to a single memory location. + + Raises + ------ + ValueError + If the array is not compatible with the new shape according to NumPy's + broadcasting rules. + + See Also + -------- + broadcast + broadcast_arrays + broadcast_shapes + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> np.broadcast_to(x, (3, 3)) + array([[1, 2, 3], + [1, 2, 3], + [1, 2, 3]]) + """ + return _broadcast_to(array, shape, subok=subok, readonly=True) + + +def _broadcast_shape(*args): + """Returns the shape of the arrays that would result from broadcasting the + supplied arrays against each other. + """ + # use the old-iterator because np.nditer does not handle size 0 arrays + # consistently + b = np.broadcast(*args[:32]) + # unfortunately, it cannot handle 32 or more arguments directly + for pos in range(32, len(args), 31): + # ironically, np.broadcast does not properly handle np.broadcast + # objects (it treats them as scalars) + # use broadcasting to avoid allocating the full array + b = broadcast_to(0, b.shape) + b = np.broadcast(b, *args[pos:(pos + 31)]) + return b.shape + + +_size0_dtype = np.dtype([]) + + +@set_module('numpy') +def broadcast_shapes(*args): + """ + Broadcast the input shapes into a single shape. + + :ref:`Learn more about broadcasting here `. + + .. versionadded:: 1.20.0 + + Parameters + ---------- + *args : tuples of ints, or ints + The shapes to be broadcast against each other. + + Returns + ------- + tuple + Broadcasted shape. + + Raises + ------ + ValueError + If the shapes are not compatible and cannot be broadcast according + to NumPy's broadcasting rules. + + See Also + -------- + broadcast + broadcast_arrays + broadcast_to + + Examples + -------- + >>> import numpy as np + >>> np.broadcast_shapes((1, 2), (3, 1), (3, 2)) + (3, 2) + + >>> np.broadcast_shapes((6, 7), (5, 6, 1), (7,), (5, 1, 7)) + (5, 6, 7) + """ + arrays = [np.empty(x, dtype=_size0_dtype) for x in args] + return _broadcast_shape(*arrays) + + +def _broadcast_arrays_dispatcher(*args, subok=None): + return args + + +@array_function_dispatch(_broadcast_arrays_dispatcher, module='numpy') +def broadcast_arrays(*args, subok=False): + """ + Broadcast any number of arrays against each other. + + Parameters + ---------- + *args : array_likes + The arrays to broadcast. + + subok : bool, optional + If True, then sub-classes will be passed-through, otherwise + the returned arrays will be forced to be a base-class array (default). + + Returns + ------- + broadcasted : tuple of arrays + These arrays are views on the original arrays. They are typically + not contiguous. Furthermore, more than one element of a + broadcasted array may refer to a single memory location. If you need + to write to the arrays, make copies first. While you can set the + ``writable`` flag True, writing to a single output value may end up + changing more than one location in the output array. + + .. deprecated:: 1.17 + The output is currently marked so that if written to, a deprecation + warning will be emitted. A future version will set the + ``writable`` flag False so writing to it will raise an error. + + See Also + -------- + broadcast + broadcast_to + broadcast_shapes + + Examples + -------- + >>> import numpy as np + >>> x = np.array([[1,2,3]]) + >>> y = np.array([[4],[5]]) + >>> np.broadcast_arrays(x, y) + (array([[1, 2, 3], + [1, 2, 3]]), + array([[4, 4, 4], + [5, 5, 5]])) + + Here is a useful idiom for getting contiguous copies instead of + non-contiguous views. + + >>> [np.array(a) for a in np.broadcast_arrays(x, y)] + [array([[1, 2, 3], + [1, 2, 3]]), + array([[4, 4, 4], + [5, 5, 5]])] + + """ + # nditer is not used here to avoid the limit of 32 arrays. + # Otherwise, something like the following one-liner would suffice: + # return np.nditer(args, flags=['multi_index', 'zerosize_ok'], + # order='C').itviews + + args = [np.array(_m, copy=None, subok=subok) for _m in args] + + shape = _broadcast_shape(*args) + + result = [array if array.shape == shape + else _broadcast_to(array, shape, subok=subok, readonly=False) + for array in args] + return tuple(result) diff --git a/python/numpy/lib/_stride_tricks_impl.pyi b/python/numpy/lib/_stride_tricks_impl.pyi new file mode 100644 index 000000000..a7005d702 --- /dev/null +++ b/python/numpy/lib/_stride_tricks_impl.pyi @@ -0,0 +1,74 @@ +from collections.abc import Iterable +from typing import Any, SupportsIndex, TypeVar, overload + +from numpy import generic +from numpy._typing import ArrayLike, NDArray, _AnyShape, _ArrayLike, _ShapeLike + +__all__ = ["broadcast_to", "broadcast_arrays", "broadcast_shapes"] + +_ScalarT = TypeVar("_ScalarT", bound=generic) + +class DummyArray: + __array_interface__: dict[str, Any] + base: NDArray[Any] | None + def __init__( + self, + interface: dict[str, Any], + base: NDArray[Any] | None = ..., + ) -> None: ... + +@overload +def as_strided( + x: _ArrayLike[_ScalarT], + shape: Iterable[int] | None = ..., + strides: Iterable[int] | None = ..., + subok: bool = ..., + writeable: bool = ..., +) -> NDArray[_ScalarT]: ... +@overload +def as_strided( + x: ArrayLike, + shape: Iterable[int] | None = ..., + strides: Iterable[int] | None = ..., + subok: bool = ..., + writeable: bool = ..., +) -> NDArray[Any]: ... + +@overload +def sliding_window_view( + x: _ArrayLike[_ScalarT], + window_shape: int | Iterable[int], + axis: SupportsIndex | None = ..., + *, + subok: bool = ..., + writeable: bool = ..., +) -> NDArray[_ScalarT]: ... +@overload +def sliding_window_view( + x: ArrayLike, + window_shape: int | Iterable[int], + axis: SupportsIndex | None = ..., + *, + subok: bool = ..., + writeable: bool = ..., +) -> NDArray[Any]: ... + +@overload +def broadcast_to( + array: _ArrayLike[_ScalarT], + shape: int | Iterable[int], + subok: bool = ..., +) -> NDArray[_ScalarT]: ... +@overload +def broadcast_to( + array: ArrayLike, + shape: int | Iterable[int], + subok: bool = ..., +) -> NDArray[Any]: ... + +def broadcast_shapes(*args: _ShapeLike) -> _AnyShape: ... + +def broadcast_arrays( + *args: ArrayLike, + subok: bool = ..., +) -> tuple[NDArray[Any], ...]: ... diff --git a/python/numpy/lib/_twodim_base_impl.py b/python/numpy/lib/_twodim_base_impl.py new file mode 100644 index 000000000..dc6a55886 --- /dev/null +++ b/python/numpy/lib/_twodim_base_impl.py @@ -0,0 +1,1201 @@ +""" Basic functions for manipulating 2d arrays + +""" +import functools +import operator + +from numpy._core import iinfo, overrides +from numpy._core._multiarray_umath import _array_converter +from numpy._core.numeric import ( + arange, + asanyarray, + asarray, + diagonal, + empty, + greater_equal, + indices, + int8, + int16, + int32, + int64, + intp, + multiply, + nonzero, + ones, + promote_types, + where, + zeros, +) +from numpy._core.overrides import finalize_array_function_like, set_module +from numpy.lib._stride_tricks_impl import broadcast_to + +__all__ = [ + 'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu', + 'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices', + 'tril_indices_from', 'triu_indices', 'triu_indices_from', ] + + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +i1 = iinfo(int8) +i2 = iinfo(int16) +i4 = iinfo(int32) + + +def _min_int(low, high): + """ get small int that fits the range """ + if high <= i1.max and low >= i1.min: + return int8 + if high <= i2.max and low >= i2.min: + return int16 + if high <= i4.max and low >= i4.min: + return int32 + return int64 + + +def _flip_dispatcher(m): + return (m,) + + +@array_function_dispatch(_flip_dispatcher) +def fliplr(m): + """ + Reverse the order of elements along axis 1 (left/right). + + For a 2-D array, this flips the entries in each row in the left/right + direction. Columns are preserved, but appear in a different order than + before. + + Parameters + ---------- + m : array_like + Input array, must be at least 2-D. + + Returns + ------- + f : ndarray + A view of `m` with the columns reversed. Since a view + is returned, this operation is :math:`\\mathcal O(1)`. + + See Also + -------- + flipud : Flip array in the up/down direction. + flip : Flip array in one or more dimensions. + rot90 : Rotate array counterclockwise. + + Notes + ----- + Equivalent to ``m[:,::-1]`` or ``np.flip(m, axis=1)``. + Requires the array to be at least 2-D. + + Examples + -------- + >>> import numpy as np + >>> A = np.diag([1.,2.,3.]) + >>> A + array([[1., 0., 0.], + [0., 2., 0.], + [0., 0., 3.]]) + >>> np.fliplr(A) + array([[0., 0., 1.], + [0., 2., 0.], + [3., 0., 0.]]) + + >>> rng = np.random.default_rng() + >>> A = rng.normal(size=(2,3,5)) + >>> np.all(np.fliplr(A) == A[:,::-1,...]) + True + + """ + m = asanyarray(m) + if m.ndim < 2: + raise ValueError("Input must be >= 2-d.") + return m[:, ::-1] + + +@array_function_dispatch(_flip_dispatcher) +def flipud(m): + """ + Reverse the order of elements along axis 0 (up/down). + + For a 2-D array, this flips the entries in each column in the up/down + direction. Rows are preserved, but appear in a different order than before. + + Parameters + ---------- + m : array_like + Input array. + + Returns + ------- + out : array_like + A view of `m` with the rows reversed. Since a view is + returned, this operation is :math:`\\mathcal O(1)`. + + See Also + -------- + fliplr : Flip array in the left/right direction. + flip : Flip array in one or more dimensions. + rot90 : Rotate array counterclockwise. + + Notes + ----- + Equivalent to ``m[::-1, ...]`` or ``np.flip(m, axis=0)``. + Requires the array to be at least 1-D. + + Examples + -------- + >>> import numpy as np + >>> A = np.diag([1.0, 2, 3]) + >>> A + array([[1., 0., 0.], + [0., 2., 0.], + [0., 0., 3.]]) + >>> np.flipud(A) + array([[0., 0., 3.], + [0., 2., 0.], + [1., 0., 0.]]) + + >>> rng = np.random.default_rng() + >>> A = rng.normal(size=(2,3,5)) + >>> np.all(np.flipud(A) == A[::-1,...]) + True + + >>> np.flipud([1,2]) + array([2, 1]) + + """ + m = asanyarray(m) + if m.ndim < 1: + raise ValueError("Input must be >= 1-d.") + return m[::-1, ...] + + +@finalize_array_function_like +@set_module('numpy') +def eye(N, M=None, k=0, dtype=float, order='C', *, device=None, like=None): + """ + Return a 2-D array with ones on the diagonal and zeros elsewhere. + + Parameters + ---------- + N : int + Number of rows in the output. + M : int, optional + Number of columns in the output. If None, defaults to `N`. + k : int, optional + Index of the diagonal: 0 (the default) refers to the main diagonal, + a positive value refers to an upper diagonal, and a negative value + to a lower diagonal. + dtype : data-type, optional + Data-type of the returned array. + order : {'C', 'F'}, optional + Whether the output should be stored in row-major (C-style) or + column-major (Fortran-style) order in memory. + device : str, optional + The device on which to place the created array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.0.0 + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + I : ndarray of shape (N,M) + An array where all elements are equal to zero, except for the `k`-th + diagonal, whose values are equal to one. + + See Also + -------- + identity : (almost) equivalent function + diag : diagonal 2-D array from a 1-D array specified by the user. + + Examples + -------- + >>> import numpy as np + >>> np.eye(2, dtype=int) + array([[1, 0], + [0, 1]]) + >>> np.eye(3, k=1) + array([[0., 1., 0.], + [0., 0., 1.], + [0., 0., 0.]]) + + """ + if like is not None: + return _eye_with_like( + like, N, M=M, k=k, dtype=dtype, order=order, device=device + ) + if M is None: + M = N + m = zeros((N, M), dtype=dtype, order=order, device=device) + if k >= M: + return m + # Ensure M and k are integers, so we don't get any surprise casting + # results in the expressions `M-k` and `M+1` used below. This avoids + # a problem with inputs with type (for example) np.uint64. + M = operator.index(M) + k = operator.index(k) + if k >= 0: + i = k + else: + i = (-k) * M + m[:M - k].flat[i::M + 1] = 1 + return m + + +_eye_with_like = array_function_dispatch()(eye) + + +def _diag_dispatcher(v, k=None): + return (v,) + + +@array_function_dispatch(_diag_dispatcher) +def diag(v, k=0): + """ + Extract a diagonal or construct a diagonal array. + + See the more detailed documentation for ``numpy.diagonal`` if you use this + function to extract a diagonal and wish to write to the resulting array; + whether it returns a copy or a view depends on what version of numpy you + are using. + + Parameters + ---------- + v : array_like + If `v` is a 2-D array, return a copy of its `k`-th diagonal. + If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th + diagonal. + k : int, optional + Diagonal in question. The default is 0. Use `k>0` for diagonals + above the main diagonal, and `k<0` for diagonals below the main + diagonal. + + Returns + ------- + out : ndarray + The extracted diagonal or constructed diagonal array. + + See Also + -------- + diagonal : Return specified diagonals. + diagflat : Create a 2-D array with the flattened input as a diagonal. + trace : Sum along diagonals. + triu : Upper triangle of an array. + tril : Lower triangle of an array. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(9).reshape((3,3)) + >>> x + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + + >>> np.diag(x) + array([0, 4, 8]) + >>> np.diag(x, k=1) + array([1, 5]) + >>> np.diag(x, k=-1) + array([3, 7]) + + >>> np.diag(np.diag(x)) + array([[0, 0, 0], + [0, 4, 0], + [0, 0, 8]]) + + """ + v = asanyarray(v) + s = v.shape + if len(s) == 1: + n = s[0] + abs(k) + res = zeros((n, n), v.dtype) + if k >= 0: + i = k + else: + i = (-k) * n + res[:n - k].flat[i::n + 1] = v + return res + elif len(s) == 2: + return diagonal(v, k) + else: + raise ValueError("Input must be 1- or 2-d.") + + +@array_function_dispatch(_diag_dispatcher) +def diagflat(v, k=0): + """ + Create a two-dimensional array with the flattened input as a diagonal. + + Parameters + ---------- + v : array_like + Input data, which is flattened and set as the `k`-th + diagonal of the output. + k : int, optional + Diagonal to set; 0, the default, corresponds to the "main" diagonal, + a positive (negative) `k` giving the number of the diagonal above + (below) the main. + + Returns + ------- + out : ndarray + The 2-D output array. + + See Also + -------- + diag : MATLAB work-alike for 1-D and 2-D arrays. + diagonal : Return specified diagonals. + trace : Sum along diagonals. + + Examples + -------- + >>> import numpy as np + >>> np.diagflat([[1,2], [3,4]]) + array([[1, 0, 0, 0], + [0, 2, 0, 0], + [0, 0, 3, 0], + [0, 0, 0, 4]]) + + >>> np.diagflat([1,2], 1) + array([[0, 1, 0], + [0, 0, 2], + [0, 0, 0]]) + + """ + conv = _array_converter(v) + v, = conv.as_arrays(subok=False) + v = v.ravel() + s = len(v) + n = s + abs(k) + res = zeros((n, n), v.dtype) + if (k >= 0): + i = arange(0, n - k, dtype=intp) + fi = i + k + i * n + else: + i = arange(0, n + k, dtype=intp) + fi = i + (i - k) * n + res.flat[fi] = v + + return conv.wrap(res) + + +@finalize_array_function_like +@set_module('numpy') +def tri(N, M=None, k=0, dtype=float, *, like=None): + """ + An array with ones at and below the given diagonal and zeros elsewhere. + + Parameters + ---------- + N : int + Number of rows in the array. + M : int, optional + Number of columns in the array. + By default, `M` is taken equal to `N`. + k : int, optional + The sub-diagonal at and below which the array is filled. + `k` = 0 is the main diagonal, while `k` < 0 is below it, + and `k` > 0 is above. The default is 0. + dtype : dtype, optional + Data type of the returned array. The default is float. + ${ARRAY_FUNCTION_LIKE} + + .. versionadded:: 1.20.0 + + Returns + ------- + tri : ndarray of shape (N, M) + Array with its lower triangle filled with ones and zero elsewhere; + in other words ``T[i,j] == 1`` for ``j <= i + k``, 0 otherwise. + + Examples + -------- + >>> import numpy as np + >>> np.tri(3, 5, 2, dtype=int) + array([[1, 1, 1, 0, 0], + [1, 1, 1, 1, 0], + [1, 1, 1, 1, 1]]) + + >>> np.tri(3, 5, -1) + array([[0., 0., 0., 0., 0.], + [1., 0., 0., 0., 0.], + [1., 1., 0., 0., 0.]]) + + """ + if like is not None: + return _tri_with_like(like, N, M=M, k=k, dtype=dtype) + + if M is None: + M = N + + m = greater_equal.outer(arange(N, dtype=_min_int(0, N)), + arange(-k, M - k, dtype=_min_int(-k, M - k))) + + # Avoid making a copy if the requested type is already bool + m = m.astype(dtype, copy=False) + + return m + + +_tri_with_like = array_function_dispatch()(tri) + + +def _trilu_dispatcher(m, k=None): + return (m,) + + +@array_function_dispatch(_trilu_dispatcher) +def tril(m, k=0): + """ + Lower triangle of an array. + + Return a copy of an array with elements above the `k`-th diagonal zeroed. + For arrays with ``ndim`` exceeding 2, `tril` will apply to the final two + axes. + + Parameters + ---------- + m : array_like, shape (..., M, N) + Input array. + k : int, optional + Diagonal above which to zero elements. `k = 0` (the default) is the + main diagonal, `k < 0` is below it and `k > 0` is above. + + Returns + ------- + tril : ndarray, shape (..., M, N) + Lower triangle of `m`, of same shape and data-type as `m`. + + See Also + -------- + triu : same thing, only for the upper triangle + + Examples + -------- + >>> import numpy as np + >>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) + array([[ 0, 0, 0], + [ 4, 0, 0], + [ 7, 8, 0], + [10, 11, 12]]) + + >>> np.tril(np.arange(3*4*5).reshape(3, 4, 5)) + array([[[ 0, 0, 0, 0, 0], + [ 5, 6, 0, 0, 0], + [10, 11, 12, 0, 0], + [15, 16, 17, 18, 0]], + [[20, 0, 0, 0, 0], + [25, 26, 0, 0, 0], + [30, 31, 32, 0, 0], + [35, 36, 37, 38, 0]], + [[40, 0, 0, 0, 0], + [45, 46, 0, 0, 0], + [50, 51, 52, 0, 0], + [55, 56, 57, 58, 0]]]) + + """ + m = asanyarray(m) + mask = tri(*m.shape[-2:], k=k, dtype=bool) + + return where(mask, m, zeros(1, m.dtype)) + + +@array_function_dispatch(_trilu_dispatcher) +def triu(m, k=0): + """ + Upper triangle of an array. + + Return a copy of an array with the elements below the `k`-th diagonal + zeroed. For arrays with ``ndim`` exceeding 2, `triu` will apply to the + final two axes. + + Please refer to the documentation for `tril` for further details. + + See Also + -------- + tril : lower triangle of an array + + Examples + -------- + >>> import numpy as np + >>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) + array([[ 1, 2, 3], + [ 4, 5, 6], + [ 0, 8, 9], + [ 0, 0, 12]]) + + >>> np.triu(np.arange(3*4*5).reshape(3, 4, 5)) + array([[[ 0, 1, 2, 3, 4], + [ 0, 6, 7, 8, 9], + [ 0, 0, 12, 13, 14], + [ 0, 0, 0, 18, 19]], + [[20, 21, 22, 23, 24], + [ 0, 26, 27, 28, 29], + [ 0, 0, 32, 33, 34], + [ 0, 0, 0, 38, 39]], + [[40, 41, 42, 43, 44], + [ 0, 46, 47, 48, 49], + [ 0, 0, 52, 53, 54], + [ 0, 0, 0, 58, 59]]]) + + """ + m = asanyarray(m) + mask = tri(*m.shape[-2:], k=k - 1, dtype=bool) + + return where(mask, zeros(1, m.dtype), m) + + +def _vander_dispatcher(x, N=None, increasing=None): + return (x,) + + +# Originally borrowed from John Hunter and matplotlib +@array_function_dispatch(_vander_dispatcher) +def vander(x, N=None, increasing=False): + """ + Generate a Vandermonde matrix. + + The columns of the output matrix are powers of the input vector. The + order of the powers is determined by the `increasing` boolean argument. + Specifically, when `increasing` is False, the `i`-th output column is + the input vector raised element-wise to the power of ``N - i - 1``. Such + a matrix with a geometric progression in each row is named for Alexandre- + Theophile Vandermonde. + + Parameters + ---------- + x : array_like + 1-D input array. + N : int, optional + Number of columns in the output. If `N` is not specified, a square + array is returned (``N = len(x)``). + increasing : bool, optional + Order of the powers of the columns. If True, the powers increase + from left to right, if False (the default) they are reversed. + + Returns + ------- + out : ndarray + Vandermonde matrix. If `increasing` is False, the first column is + ``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is + True, the columns are ``x^0, x^1, ..., x^(N-1)``. + + See Also + -------- + polynomial.polynomial.polyvander + + Examples + -------- + >>> import numpy as np + >>> x = np.array([1, 2, 3, 5]) + >>> N = 3 + >>> np.vander(x, N) + array([[ 1, 1, 1], + [ 4, 2, 1], + [ 9, 3, 1], + [25, 5, 1]]) + + >>> np.column_stack([x**(N-1-i) for i in range(N)]) + array([[ 1, 1, 1], + [ 4, 2, 1], + [ 9, 3, 1], + [25, 5, 1]]) + + >>> x = np.array([1, 2, 3, 5]) + >>> np.vander(x) + array([[ 1, 1, 1, 1], + [ 8, 4, 2, 1], + [ 27, 9, 3, 1], + [125, 25, 5, 1]]) + >>> np.vander(x, increasing=True) + array([[ 1, 1, 1, 1], + [ 1, 2, 4, 8], + [ 1, 3, 9, 27], + [ 1, 5, 25, 125]]) + + The determinant of a square Vandermonde matrix is the product + of the differences between the values of the input vector: + + >>> np.linalg.det(np.vander(x)) + 48.000000000000043 # may vary + >>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1) + 48 + + """ + x = asarray(x) + if x.ndim != 1: + raise ValueError("x must be a one-dimensional array or sequence.") + if N is None: + N = len(x) + + v = empty((len(x), N), dtype=promote_types(x.dtype, int)) + tmp = v[:, ::-1] if not increasing else v + + if N > 0: + tmp[:, 0] = 1 + if N > 1: + tmp[:, 1:] = x[:, None] + multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1) + + return v + + +def _histogram2d_dispatcher(x, y, bins=None, range=None, density=None, + weights=None): + yield x + yield y + + # This terrible logic is adapted from the checks in histogram2d + try: + N = len(bins) + except TypeError: + N = 1 + if N == 2: + yield from bins # bins=[x, y] + else: + yield bins + + yield weights + + +@array_function_dispatch(_histogram2d_dispatcher) +def histogram2d(x, y, bins=10, range=None, density=None, weights=None): + """ + Compute the bi-dimensional histogram of two data samples. + + Parameters + ---------- + x : array_like, shape (N,) + An array containing the x coordinates of the points to be + histogrammed. + y : array_like, shape (N,) + An array containing the y coordinates of the points to be + histogrammed. + bins : int or array_like or [int, int] or [array, array], optional + The bin specification: + + * If int, the number of bins for the two dimensions (nx=ny=bins). + * If array_like, the bin edges for the two dimensions + (x_edges=y_edges=bins). + * If [int, int], the number of bins in each dimension + (nx, ny = bins). + * If [array, array], the bin edges in each dimension + (x_edges, y_edges = bins). + * A combination [int, array] or [array, int], where int + is the number of bins and array is the bin edges. + + range : array_like, shape(2,2), optional + The leftmost and rightmost edges of the bins along each dimension + (if not specified explicitly in the `bins` parameters): + ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range + will be considered outliers and not tallied in the histogram. + density : bool, optional + If False, the default, returns the number of samples in each bin. + If True, returns the probability *density* function at the bin, + ``bin_count / sample_count / bin_area``. + weights : array_like, shape(N,), optional + An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. + Weights are normalized to 1 if `density` is True. If `density` is + False, the values of the returned histogram are equal to the sum of + the weights belonging to the samples falling into each bin. + + Returns + ------- + H : ndarray, shape(nx, ny) + The bi-dimensional histogram of samples `x` and `y`. Values in `x` + are histogrammed along the first dimension and values in `y` are + histogrammed along the second dimension. + xedges : ndarray, shape(nx+1,) + The bin edges along the first dimension. + yedges : ndarray, shape(ny+1,) + The bin edges along the second dimension. + + See Also + -------- + histogram : 1D histogram + histogramdd : Multidimensional histogram + + Notes + ----- + When `density` is True, then the returned histogram is the sample + density, defined such that the sum over bins of the product + ``bin_value * bin_area`` is 1. + + Please note that the histogram does not follow the Cartesian convention + where `x` values are on the abscissa and `y` values on the ordinate + axis. Rather, `x` is histogrammed along the first dimension of the + array (vertical), and `y` along the second dimension of the array + (horizontal). This ensures compatibility with `histogramdd`. + + Examples + -------- + >>> import numpy as np + >>> from matplotlib.image import NonUniformImage + >>> import matplotlib.pyplot as plt + + Construct a 2-D histogram with variable bin width. First define the bin + edges: + + >>> xedges = [0, 1, 3, 5] + >>> yedges = [0, 2, 3, 4, 6] + + Next we create a histogram H with random bin content: + + >>> x = np.random.normal(2, 1, 100) + >>> y = np.random.normal(1, 1, 100) + >>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges)) + >>> # Histogram does not follow Cartesian convention (see Notes), + >>> # therefore transpose H for visualization purposes. + >>> H = H.T + + :func:`imshow ` can only display square bins: + + >>> fig = plt.figure(figsize=(7, 3)) + >>> ax = fig.add_subplot(131, title='imshow: square bins') + >>> plt.imshow(H, interpolation='nearest', origin='lower', + ... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]]) + + + :func:`pcolormesh ` can display actual edges: + + >>> ax = fig.add_subplot(132, title='pcolormesh: actual edges', + ... aspect='equal') + >>> X, Y = np.meshgrid(xedges, yedges) + >>> ax.pcolormesh(X, Y, H) + + + :class:`NonUniformImage ` can be used to + display actual bin edges with interpolation: + + >>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated', + ... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]]) + >>> im = NonUniformImage(ax, interpolation='bilinear') + >>> xcenters = (xedges[:-1] + xedges[1:]) / 2 + >>> ycenters = (yedges[:-1] + yedges[1:]) / 2 + >>> im.set_data(xcenters, ycenters, H) + >>> ax.add_image(im) + >>> plt.show() + + It is also possible to construct a 2-D histogram without specifying bin + edges: + + >>> # Generate non-symmetric test data + >>> n = 10000 + >>> x = np.linspace(1, 100, n) + >>> y = 2*np.log(x) + np.random.rand(n) - 0.5 + >>> # Compute 2d histogram. Note the order of x/y and xedges/yedges + >>> H, yedges, xedges = np.histogram2d(y, x, bins=20) + + Now we can plot the histogram using + :func:`pcolormesh `, and a + :func:`hexbin ` for comparison. + + >>> # Plot histogram using pcolormesh + >>> fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True) + >>> ax1.pcolormesh(xedges, yedges, H, cmap='rainbow') + >>> ax1.plot(x, 2*np.log(x), 'k-') + >>> ax1.set_xlim(x.min(), x.max()) + >>> ax1.set_ylim(y.min(), y.max()) + >>> ax1.set_xlabel('x') + >>> ax1.set_ylabel('y') + >>> ax1.set_title('histogram2d') + >>> ax1.grid() + + >>> # Create hexbin plot for comparison + >>> ax2.hexbin(x, y, gridsize=20, cmap='rainbow') + >>> ax2.plot(x, 2*np.log(x), 'k-') + >>> ax2.set_title('hexbin') + >>> ax2.set_xlim(x.min(), x.max()) + >>> ax2.set_xlabel('x') + >>> ax2.grid() + + >>> plt.show() + """ + from numpy import histogramdd + + if len(x) != len(y): + raise ValueError('x and y must have the same length.') + + try: + N = len(bins) + except TypeError: + N = 1 + + if N not in {1, 2}: + xedges = yedges = asarray(bins) + bins = [xedges, yedges] + hist, edges = histogramdd([x, y], bins, range, density, weights) + return hist, edges[0], edges[1] + + +@set_module('numpy') +def mask_indices(n, mask_func, k=0): + """ + Return the indices to access (n, n) arrays, given a masking function. + + Assume `mask_func` is a function that, for a square array a of size + ``(n, n)`` with a possible offset argument `k`, when called as + ``mask_func(a, k)`` returns a new array with zeros in certain locations + (functions like `triu` or `tril` do precisely this). Then this function + returns the indices where the non-zero values would be located. + + Parameters + ---------- + n : int + The returned indices will be valid to access arrays of shape (n, n). + mask_func : callable + A function whose call signature is similar to that of `triu`, `tril`. + That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`. + `k` is an optional argument to the function. + k : scalar + An optional argument which is passed through to `mask_func`. Functions + like `triu`, `tril` take a second argument that is interpreted as an + offset. + + Returns + ------- + indices : tuple of arrays. + The `n` arrays of indices corresponding to the locations where + ``mask_func(np.ones((n, n)), k)`` is True. + + See Also + -------- + triu, tril, triu_indices, tril_indices + + Examples + -------- + >>> import numpy as np + + These are the indices that would allow you to access the upper triangular + part of any 3x3 array: + + >>> iu = np.mask_indices(3, np.triu) + + For example, if `a` is a 3x3 array: + + >>> a = np.arange(9).reshape(3, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> a[iu] + array([0, 1, 2, 4, 5, 8]) + + An offset can be passed also to the masking function. This gets us the + indices starting on the first diagonal right of the main one: + + >>> iu1 = np.mask_indices(3, np.triu, 1) + + with which we now extract only three elements: + + >>> a[iu1] + array([1, 2, 5]) + + """ + m = ones((n, n), int) + a = mask_func(m, k) + return nonzero(a != 0) + + +@set_module('numpy') +def tril_indices(n, k=0, m=None): + """ + Return the indices for the lower-triangle of an (n, m) array. + + Parameters + ---------- + n : int + The row dimension of the arrays for which the returned + indices will be valid. + k : int, optional + Diagonal offset (see `tril` for details). + m : int, optional + The column dimension of the arrays for which the returned + arrays will be valid. + By default `m` is taken equal to `n`. + + + Returns + ------- + inds : tuple of arrays + The row and column indices, respectively. The row indices are sorted + in non-decreasing order, and the correspdonding column indices are + strictly increasing for each row. + + See also + -------- + triu_indices : similar function, for upper-triangular. + mask_indices : generic function accepting an arbitrary mask function. + tril, triu + + Examples + -------- + >>> import numpy as np + + Compute two different sets of indices to access 4x4 arrays, one for the + lower triangular part starting at the main diagonal, and one starting two + diagonals further right: + + >>> il1 = np.tril_indices(4) + >>> il1 + (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3])) + + Note that row indices (first array) are non-decreasing, and the corresponding + column indices (second array) are strictly increasing for each row. + Here is how they can be used with a sample array: + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Both for indexing: + + >>> a[il1] + array([ 0, 4, 5, ..., 13, 14, 15]) + + And for assigning values: + + >>> a[il1] = -1 + >>> a + array([[-1, 1, 2, 3], + [-1, -1, 6, 7], + [-1, -1, -1, 11], + [-1, -1, -1, -1]]) + + These cover almost the whole array (two diagonals right of the main one): + + >>> il2 = np.tril_indices(4, 2) + >>> a[il2] = -10 + >>> a + array([[-10, -10, -10, 3], + [-10, -10, -10, -10], + [-10, -10, -10, -10], + [-10, -10, -10, -10]]) + + """ + tri_ = tri(n, m, k=k, dtype=bool) + + return tuple(broadcast_to(inds, tri_.shape)[tri_] + for inds in indices(tri_.shape, sparse=True)) + + +def _trilu_indices_form_dispatcher(arr, k=None): + return (arr,) + + +@array_function_dispatch(_trilu_indices_form_dispatcher) +def tril_indices_from(arr, k=0): + """ + Return the indices for the lower-triangle of arr. + + See `tril_indices` for full details. + + Parameters + ---------- + arr : array_like + The indices will be valid for square arrays whose dimensions are + the same as arr. + k : int, optional + Diagonal offset (see `tril` for details). + + Examples + -------- + >>> import numpy as np + + Create a 4 by 4 array + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Pass the array to get the indices of the lower triangular elements. + + >>> trili = np.tril_indices_from(a) + >>> trili + (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3])) + + >>> a[trili] + array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15]) + + This is syntactic sugar for tril_indices(). + + >>> np.tril_indices(a.shape[0]) + (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3])) + + Use the `k` parameter to return the indices for the lower triangular array + up to the k-th diagonal. + + >>> trili1 = np.tril_indices_from(a, k=1) + >>> a[trili1] + array([ 0, 1, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15]) + + See Also + -------- + tril_indices, tril, triu_indices_from + """ + if arr.ndim != 2: + raise ValueError("input array must be 2-d") + return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1]) + + +@set_module('numpy') +def triu_indices(n, k=0, m=None): + """ + Return the indices for the upper-triangle of an (n, m) array. + + Parameters + ---------- + n : int + The size of the arrays for which the returned indices will + be valid. + k : int, optional + Diagonal offset (see `triu` for details). + m : int, optional + The column dimension of the arrays for which the returned + arrays will be valid. + By default `m` is taken equal to `n`. + + + Returns + ------- + inds : tuple, shape(2) of ndarrays, shape(`n`) + The row and column indices, respectively. The row indices are sorted + in non-decreasing order, and the correspdonding column indices are + strictly increasing for each row. + + See also + -------- + tril_indices : similar function, for lower-triangular. + mask_indices : generic function accepting an arbitrary mask function. + triu, tril + + Examples + -------- + >>> import numpy as np + + Compute two different sets of indices to access 4x4 arrays, one for the + upper triangular part starting at the main diagonal, and one starting two + diagonals further right: + + >>> iu1 = np.triu_indices(4) + >>> iu1 + (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3])) + + Note that row indices (first array) are non-decreasing, and the corresponding + column indices (second array) are strictly increasing for each row. + + Here is how they can be used with a sample array: + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Both for indexing: + + >>> a[iu1] + array([ 0, 1, 2, ..., 10, 11, 15]) + + And for assigning values: + + >>> a[iu1] = -1 + >>> a + array([[-1, -1, -1, -1], + [ 4, -1, -1, -1], + [ 8, 9, -1, -1], + [12, 13, 14, -1]]) + + These cover only a small part of the whole array (two diagonals right + of the main one): + + >>> iu2 = np.triu_indices(4, 2) + >>> a[iu2] = -10 + >>> a + array([[ -1, -1, -10, -10], + [ 4, -1, -1, -10], + [ 8, 9, -1, -1], + [ 12, 13, 14, -1]]) + + """ + tri_ = ~tri(n, m, k=k - 1, dtype=bool) + + return tuple(broadcast_to(inds, tri_.shape)[tri_] + for inds in indices(tri_.shape, sparse=True)) + + +@array_function_dispatch(_trilu_indices_form_dispatcher) +def triu_indices_from(arr, k=0): + """ + Return the indices for the upper-triangle of arr. + + See `triu_indices` for full details. + + Parameters + ---------- + arr : ndarray, shape(N, N) + The indices will be valid for square arrays. + k : int, optional + Diagonal offset (see `triu` for details). + + Returns + ------- + triu_indices_from : tuple, shape(2) of ndarray, shape(N) + Indices for the upper-triangle of `arr`. + + Examples + -------- + >>> import numpy as np + + Create a 4 by 4 array + + >>> a = np.arange(16).reshape(4, 4) + >>> a + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11], + [12, 13, 14, 15]]) + + Pass the array to get the indices of the upper triangular elements. + + >>> triui = np.triu_indices_from(a) + >>> triui + (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3])) + + >>> a[triui] + array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15]) + + This is syntactic sugar for triu_indices(). + + >>> np.triu_indices(a.shape[0]) + (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3])) + + Use the `k` parameter to return the indices for the upper triangular array + from the k-th diagonal. + + >>> triuim1 = np.triu_indices_from(a, k=1) + >>> a[triuim1] + array([ 1, 2, 3, 6, 7, 11]) + + + See Also + -------- + triu_indices, triu, tril_indices_from + """ + if arr.ndim != 2: + raise ValueError("input array must be 2-d") + return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1]) diff --git a/python/numpy/lib/_twodim_base_impl.pyi b/python/numpy/lib/_twodim_base_impl.pyi new file mode 100644 index 000000000..43df38ed5 --- /dev/null +++ b/python/numpy/lib/_twodim_base_impl.pyi @@ -0,0 +1,438 @@ +from collections.abc import Callable, Sequence +from typing import ( + Any, + TypeAlias, + TypeVar, + overload, +) +from typing import ( + Literal as L, +) + +import numpy as np +from numpy import ( + _OrderCF, + complex128, + complexfloating, + datetime64, + float64, + floating, + generic, + int_, + intp, + object_, + signedinteger, + timedelta64, +) +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _ArrayLike, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeObject_co, + _DTypeLike, + _SupportsArray, + _SupportsArrayFunc, +) + +__all__ = [ + "diag", + "diagflat", + "eye", + "fliplr", + "flipud", + "tri", + "triu", + "tril", + "vander", + "histogram2d", + "mask_indices", + "tril_indices", + "tril_indices_from", + "triu_indices", + "triu_indices_from", +] + +### + +_T = TypeVar("_T") +_ScalarT = TypeVar("_ScalarT", bound=generic) +_ComplexFloatingT = TypeVar("_ComplexFloatingT", bound=np.complexfloating) +_InexactT = TypeVar("_InexactT", bound=np.inexact) +_NumberCoT = TypeVar("_NumberCoT", bound=_Number_co) + +# The returned arrays dtype must be compatible with `np.equal` +_MaskFunc: TypeAlias = Callable[[NDArray[int_], _T], NDArray[_Number_co | timedelta64 | datetime64 | object_]] + +_Int_co: TypeAlias = np.integer | np.bool +_Float_co: TypeAlias = np.floating | _Int_co +_Number_co: TypeAlias = np.number | np.bool + +_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_ScalarT]] | Sequence[_ScalarT] +_ArrayLike1DInt_co: TypeAlias = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co] +_ArrayLike1DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co] +_ArrayLike2DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[_ArrayLike1DFloat_co] +_ArrayLike1DNumber_co: TypeAlias = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co] + +### + +@overload +def fliplr(m: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +@overload +def fliplr(m: ArrayLike) -> NDArray[Any]: ... + +@overload +def flipud(m: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ... +@overload +def flipud(m: ArrayLike) -> NDArray[Any]: ... + +@overload +def eye( + N: int, + M: int | None = ..., + k: int = ..., + dtype: None = ..., + order: _OrderCF = ..., + *, + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[float64]: ... +@overload +def eye( + N: int, + M: int | None, + k: int, + dtype: _DTypeLike[_ScalarT], + order: _OrderCF = ..., + *, + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def eye( + N: int, + M: int | None = ..., + k: int = ..., + *, + dtype: _DTypeLike[_ScalarT], + order: _OrderCF = ..., + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[_ScalarT]: ... +@overload +def eye( + N: int, + M: int | None = ..., + k: int = ..., + dtype: DTypeLike = ..., + order: _OrderCF = ..., + *, + device: L["cpu"] | None = ..., + like: _SupportsArrayFunc | None = ..., +) -> NDArray[Any]: ... + +@overload +def diag(v: _ArrayLike[_ScalarT], k: int = ...) -> NDArray[_ScalarT]: ... +@overload +def diag(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... + +@overload +def diagflat(v: _ArrayLike[_ScalarT], k: int = ...) -> NDArray[_ScalarT]: ... +@overload +def diagflat(v: ArrayLike, k: int = ...) -> NDArray[Any]: ... + +@overload +def tri( + N: int, + M: int | None = ..., + k: int = ..., + dtype: None = ..., + *, + like: _SupportsArrayFunc | None = ... +) -> NDArray[float64]: ... +@overload +def tri( + N: int, + M: int | None, + k: int, + dtype: _DTypeLike[_ScalarT], + *, + like: _SupportsArrayFunc | None = ... +) -> NDArray[_ScalarT]: ... +@overload +def tri( + N: int, + M: int | None = ..., + k: int = ..., + *, + dtype: _DTypeLike[_ScalarT], + like: _SupportsArrayFunc | None = ... +) -> NDArray[_ScalarT]: ... +@overload +def tri( + N: int, + M: int | None = ..., + k: int = ..., + dtype: DTypeLike = ..., + *, + like: _SupportsArrayFunc | None = ... +) -> NDArray[Any]: ... + +@overload +def tril(m: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... +@overload +def tril(m: ArrayLike, k: int = 0) -> NDArray[Any]: ... + +@overload +def triu(m: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ... +@overload +def triu(m: ArrayLike, k: int = 0) -> NDArray[Any]: ... + +@overload +def vander( # type: ignore[misc] + x: _ArrayLikeInt_co, + N: int | None = ..., + increasing: bool = ..., +) -> NDArray[signedinteger]: ... +@overload +def vander( # type: ignore[misc] + x: _ArrayLikeFloat_co, + N: int | None = ..., + increasing: bool = ..., +) -> NDArray[floating]: ... +@overload +def vander( + x: _ArrayLikeComplex_co, + N: int | None = ..., + increasing: bool = ..., +) -> NDArray[complexfloating]: ... +@overload +def vander( + x: _ArrayLikeObject_co, + N: int | None = ..., + increasing: bool = ..., +) -> NDArray[object_]: ... + +@overload +def histogram2d( + x: _ArrayLike1D[_ComplexFloatingT], + y: _ArrayLike1D[_ComplexFloatingT | _Float_co], + bins: int | Sequence[int] = ..., + range: _ArrayLike2DFloat_co | None = ..., + density: bool | None = ..., + weights: _ArrayLike1DFloat_co | None = ..., +) -> tuple[ + NDArray[float64], + NDArray[_ComplexFloatingT], + NDArray[_ComplexFloatingT], +]: ... +@overload +def histogram2d( + x: _ArrayLike1D[_ComplexFloatingT | _Float_co], + y: _ArrayLike1D[_ComplexFloatingT], + bins: int | Sequence[int] = ..., + range: _ArrayLike2DFloat_co | None = ..., + density: bool | None = ..., + weights: _ArrayLike1DFloat_co | None = ..., +) -> tuple[ + NDArray[float64], + NDArray[_ComplexFloatingT], + NDArray[_ComplexFloatingT], +]: ... +@overload +def histogram2d( + x: _ArrayLike1D[_InexactT], + y: _ArrayLike1D[_InexactT | _Int_co], + bins: int | Sequence[int] = ..., + range: _ArrayLike2DFloat_co | None = ..., + density: bool | None = ..., + weights: _ArrayLike1DFloat_co | None = ..., +) -> tuple[ + NDArray[float64], + NDArray[_InexactT], + NDArray[_InexactT], +]: ... +@overload +def histogram2d( + x: _ArrayLike1D[_InexactT | _Int_co], + y: _ArrayLike1D[_InexactT], + bins: int | Sequence[int] = ..., + range: _ArrayLike2DFloat_co | None = ..., + density: bool | None = ..., + weights: _ArrayLike1DFloat_co | None = ..., +) -> tuple[ + NDArray[float64], + NDArray[_InexactT], + NDArray[_InexactT], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DInt_co | Sequence[float], + y: _ArrayLike1DInt_co | Sequence[float], + bins: int | Sequence[int] = ..., + range: _ArrayLike2DFloat_co | None = ..., + density: bool | None = ..., + weights: _ArrayLike1DFloat_co | None = ..., +) -> tuple[ + NDArray[float64], + NDArray[float64], + NDArray[float64], +]: ... +@overload +def histogram2d( + x: Sequence[complex], + y: Sequence[complex], + bins: int | Sequence[int] = ..., + range: _ArrayLike2DFloat_co | None = ..., + density: bool | None = ..., + weights: _ArrayLike1DFloat_co | None = ..., +) -> tuple[ + NDArray[float64], + NDArray[complex128 | float64], + NDArray[complex128 | float64], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: _ArrayLike1D[_NumberCoT] | Sequence[_ArrayLike1D[_NumberCoT]], + range: _ArrayLike2DFloat_co | None = ..., + density: bool | None = ..., + weights: _ArrayLike1DFloat_co | None = ..., +) -> tuple[ + NDArray[float64], + NDArray[_NumberCoT], + NDArray[_NumberCoT], +]: ... +@overload +def histogram2d( + x: _ArrayLike1D[_InexactT], + y: _ArrayLike1D[_InexactT], + bins: Sequence[_ArrayLike1D[_NumberCoT] | int], + range: _ArrayLike2DFloat_co | None = ..., + density: bool | None = ..., + weights: _ArrayLike1DFloat_co | None = ..., +) -> tuple[ + NDArray[float64], + NDArray[_NumberCoT | _InexactT], + NDArray[_NumberCoT | _InexactT], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DInt_co | Sequence[float], + y: _ArrayLike1DInt_co | Sequence[float], + bins: Sequence[_ArrayLike1D[_NumberCoT] | int], + range: _ArrayLike2DFloat_co | None = ..., + density: bool | None = ..., + weights: _ArrayLike1DFloat_co | None = ..., +) -> tuple[ + NDArray[float64], + NDArray[_NumberCoT | float64], + NDArray[_NumberCoT | float64], +]: ... +@overload +def histogram2d( + x: Sequence[complex], + y: Sequence[complex], + bins: Sequence[_ArrayLike1D[_NumberCoT] | int], + range: _ArrayLike2DFloat_co | None = ..., + density: bool | None = ..., + weights: _ArrayLike1DFloat_co | None = ..., +) -> tuple[ + NDArray[float64], + NDArray[_NumberCoT | complex128 | float64], + NDArray[_NumberCoT | complex128 | float64], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[bool]], + range: _ArrayLike2DFloat_co | None = ..., + density: bool | None = ..., + weights: _ArrayLike1DFloat_co | None = ..., +) -> tuple[ + NDArray[float64], + NDArray[np.bool], + NDArray[np.bool], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[int]], + range: _ArrayLike2DFloat_co | None = ..., + density: bool | None = ..., + weights: _ArrayLike1DFloat_co | None = ..., +) -> tuple[ + NDArray[float64], + NDArray[np.int_ | np.bool], + NDArray[np.int_ | np.bool], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[float]], + range: _ArrayLike2DFloat_co | None = ..., + density: bool | None = ..., + weights: _ArrayLike1DFloat_co | None = ..., +) -> tuple[ + NDArray[float64], + NDArray[np.float64 | np.int_ | np.bool], + NDArray[np.float64 | np.int_ | np.bool], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[complex]], + range: _ArrayLike2DFloat_co | None = ..., + density: bool | None = ..., + weights: _ArrayLike1DFloat_co | None = ..., +) -> tuple[ + NDArray[float64], + NDArray[np.complex128 | np.float64 | np.int_ | np.bool], + NDArray[np.complex128 | np.float64 | np.int_ | np.bool], +]: ... + +# NOTE: we're assuming/demanding here the `mask_func` returns +# an ndarray of shape `(n, n)`; otherwise there is the possibility +# of the output tuple having more or less than 2 elements +@overload +def mask_indices( + n: int, + mask_func: _MaskFunc[int], + k: int = ..., +) -> tuple[NDArray[intp], NDArray[intp]]: ... +@overload +def mask_indices( + n: int, + mask_func: _MaskFunc[_T], + k: _T, +) -> tuple[NDArray[intp], NDArray[intp]]: ... + +def tril_indices( + n: int, + k: int = ..., + m: int | None = ..., +) -> tuple[NDArray[int_], NDArray[int_]]: ... + +def tril_indices_from( + arr: NDArray[Any], + k: int = ..., +) -> tuple[NDArray[int_], NDArray[int_]]: ... + +def triu_indices( + n: int, + k: int = ..., + m: int | None = ..., +) -> tuple[NDArray[int_], NDArray[int_]]: ... + +def triu_indices_from( + arr: NDArray[Any], + k: int = ..., +) -> tuple[NDArray[int_], NDArray[int_]]: ... diff --git a/python/numpy/lib/_type_check_impl.py b/python/numpy/lib/_type_check_impl.py new file mode 100644 index 000000000..977609caa --- /dev/null +++ b/python/numpy/lib/_type_check_impl.py @@ -0,0 +1,699 @@ +"""Automatically adapted for numpy Sep 19, 2005 by convertcode.py + +""" +import functools + +__all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex', + 'isreal', 'nan_to_num', 'real', 'real_if_close', + 'typename', 'mintypecode', + 'common_type'] + +import numpy._core.numeric as _nx +from numpy._core import getlimits, overrides +from numpy._core.numeric import asanyarray, asarray, isnan, zeros +from numpy._utils import set_module + +from ._ufunclike_impl import isneginf, isposinf + +array_function_dispatch = functools.partial( + overrides.array_function_dispatch, module='numpy') + + +_typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?' + + +@set_module('numpy') +def mintypecode(typechars, typeset='GDFgdf', default='d'): + """ + Return the character for the minimum-size type to which given types can + be safely cast. + + The returned type character must represent the smallest size dtype such + that an array of the returned type can handle the data from an array of + all types in `typechars` (or if `typechars` is an array, then its + dtype.char). + + Parameters + ---------- + typechars : list of str or array_like + If a list of strings, each string should represent a dtype. + If array_like, the character representation of the array dtype is used. + typeset : str or list of str, optional + The set of characters that the returned character is chosen from. + The default set is 'GDFgdf'. + default : str, optional + The default character, this is returned if none of the characters in + `typechars` matches a character in `typeset`. + + Returns + ------- + typechar : str + The character representing the minimum-size type that was found. + + See Also + -------- + dtype + + Examples + -------- + >>> import numpy as np + >>> np.mintypecode(['d', 'f', 'S']) + 'd' + >>> x = np.array([1.1, 2-3.j]) + >>> np.mintypecode(x) + 'D' + + >>> np.mintypecode('abceh', default='G') + 'G' + + """ + typecodes = ((isinstance(t, str) and t) or asarray(t).dtype.char + for t in typechars) + intersection = {t for t in typecodes if t in typeset} + if not intersection: + return default + if 'F' in intersection and 'd' in intersection: + return 'D' + return min(intersection, key=_typecodes_by_elsize.index) + + +def _real_dispatcher(val): + return (val,) + + +@array_function_dispatch(_real_dispatcher) +def real(val): + """ + Return the real part of the complex argument. + + Parameters + ---------- + val : array_like + Input array. + + Returns + ------- + out : ndarray or scalar + The real component of the complex argument. If `val` is real, the type + of `val` is used for the output. If `val` has complex elements, the + returned type is float. + + See Also + -------- + real_if_close, imag, angle + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1+2j, 3+4j, 5+6j]) + >>> a.real + array([1., 3., 5.]) + >>> a.real = 9 + >>> a + array([9.+2.j, 9.+4.j, 9.+6.j]) + >>> a.real = np.array([9, 8, 7]) + >>> a + array([9.+2.j, 8.+4.j, 7.+6.j]) + >>> np.real(1 + 1j) + 1.0 + + """ + try: + return val.real + except AttributeError: + return asanyarray(val).real + + +def _imag_dispatcher(val): + return (val,) + + +@array_function_dispatch(_imag_dispatcher) +def imag(val): + """ + Return the imaginary part of the complex argument. + + Parameters + ---------- + val : array_like + Input array. + + Returns + ------- + out : ndarray or scalar + The imaginary component of the complex argument. If `val` is real, + the type of `val` is used for the output. If `val` has complex + elements, the returned type is float. + + See Also + -------- + real, angle, real_if_close + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1+2j, 3+4j, 5+6j]) + >>> a.imag + array([2., 4., 6.]) + >>> a.imag = np.array([8, 10, 12]) + >>> a + array([1. +8.j, 3.+10.j, 5.+12.j]) + >>> np.imag(1 + 1j) + 1.0 + + """ + try: + return val.imag + except AttributeError: + return asanyarray(val).imag + + +def _is_type_dispatcher(x): + return (x,) + + +@array_function_dispatch(_is_type_dispatcher) +def iscomplex(x): + """ + Returns a bool array, where True if input element is complex. + + What is tested is whether the input has a non-zero imaginary part, not if + the input type is complex. + + Parameters + ---------- + x : array_like + Input array. + + Returns + ------- + out : ndarray of bools + Output array. + + See Also + -------- + isreal + iscomplexobj : Return True if x is a complex type or an array of complex + numbers. + + Examples + -------- + >>> import numpy as np + >>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j]) + array([ True, False, False, False, False, True]) + + """ + ax = asanyarray(x) + if issubclass(ax.dtype.type, _nx.complexfloating): + return ax.imag != 0 + res = zeros(ax.shape, bool) + return res[()] # convert to scalar if needed + + +@array_function_dispatch(_is_type_dispatcher) +def isreal(x): + """ + Returns a bool array, where True if input element is real. + + If element has complex type with zero imaginary part, the return value + for that element is True. + + Parameters + ---------- + x : array_like + Input array. + + Returns + ------- + out : ndarray, bool + Boolean array of same shape as `x`. + + Notes + ----- + `isreal` may behave unexpectedly for string or object arrays (see examples) + + See Also + -------- + iscomplex + isrealobj : Return True if x is not a complex type. + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=complex) + >>> np.isreal(a) + array([False, True, True, True, True, False]) + + The function does not work on string arrays. + + >>> a = np.array([2j, "a"], dtype="U") + >>> np.isreal(a) # Warns about non-elementwise comparison + False + + Returns True for all elements in input array of ``dtype=object`` even if + any of the elements is complex. + + >>> a = np.array([1, "2", 3+4j], dtype=object) + >>> np.isreal(a) + array([ True, True, True]) + + isreal should not be used with object arrays + + >>> a = np.array([1+2j, 2+1j], dtype=object) + >>> np.isreal(a) + array([ True, True]) + + """ + return imag(x) == 0 + + +@array_function_dispatch(_is_type_dispatcher) +def iscomplexobj(x): + """ + Check for a complex type or an array of complex numbers. + + The type of the input is checked, not the value. Even if the input + has an imaginary part equal to zero, `iscomplexobj` evaluates to True. + + Parameters + ---------- + x : any + The input can be of any type and shape. + + Returns + ------- + iscomplexobj : bool + The return value, True if `x` is of a complex type or has at least + one complex element. + + See Also + -------- + isrealobj, iscomplex + + Examples + -------- + >>> import numpy as np + >>> np.iscomplexobj(1) + False + >>> np.iscomplexobj(1+0j) + True + >>> np.iscomplexobj([3, 1+0j, True]) + True + + """ + try: + dtype = x.dtype + type_ = dtype.type + except AttributeError: + type_ = asarray(x).dtype.type + return issubclass(type_, _nx.complexfloating) + + +@array_function_dispatch(_is_type_dispatcher) +def isrealobj(x): + """ + Return True if x is a not complex type or an array of complex numbers. + + The type of the input is checked, not the value. So even if the input + has an imaginary part equal to zero, `isrealobj` evaluates to False + if the data type is complex. + + Parameters + ---------- + x : any + The input can be of any type and shape. + + Returns + ------- + y : bool + The return value, False if `x` is of a complex type. + + See Also + -------- + iscomplexobj, isreal + + Notes + ----- + The function is only meant for arrays with numerical values but it + accepts all other objects. Since it assumes array input, the return + value of other objects may be True. + + >>> np.isrealobj('A string') + True + >>> np.isrealobj(False) + True + >>> np.isrealobj(None) + True + + Examples + -------- + >>> import numpy as np + >>> np.isrealobj(1) + True + >>> np.isrealobj(1+0j) + False + >>> np.isrealobj([3, 1+0j, True]) + False + + """ + return not iscomplexobj(x) + +#----------------------------------------------------------------------------- + +def _getmaxmin(t): + from numpy._core import getlimits + f = getlimits.finfo(t) + return f.max, f.min + + +def _nan_to_num_dispatcher(x, copy=None, nan=None, posinf=None, neginf=None): + return (x,) + + +@array_function_dispatch(_nan_to_num_dispatcher) +def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): + """ + Replace NaN with zero and infinity with large finite numbers (default + behaviour) or with the numbers defined by the user using the `nan`, + `posinf` and/or `neginf` keywords. + + If `x` is inexact, NaN is replaced by zero or by the user defined value in + `nan` keyword, infinity is replaced by the largest finite floating point + values representable by ``x.dtype`` or by the user defined value in + `posinf` keyword and -infinity is replaced by the most negative finite + floating point values representable by ``x.dtype`` or by the user defined + value in `neginf` keyword. + + For complex dtypes, the above is applied to each of the real and + imaginary components of `x` separately. + + If `x` is not inexact, then no replacements are made. + + Parameters + ---------- + x : scalar or array_like + Input data. + copy : bool, optional + Whether to create a copy of `x` (True) or to replace values + in-place (False). The in-place operation only occurs if + casting to an array does not require a copy. + Default is True. + nan : int, float, optional + Value to be used to fill NaN values. If no value is passed + then NaN values will be replaced with 0.0. + posinf : int, float, optional + Value to be used to fill positive infinity values. If no value is + passed then positive infinity values will be replaced with a very + large number. + neginf : int, float, optional + Value to be used to fill negative infinity values. If no value is + passed then negative infinity values will be replaced with a very + small (or negative) number. + + Returns + ------- + out : ndarray + `x`, with the non-finite values replaced. If `copy` is False, this may + be `x` itself. + + See Also + -------- + isinf : Shows which elements are positive or negative infinity. + isneginf : Shows which elements are negative infinity. + isposinf : Shows which elements are positive infinity. + isnan : Shows which elements are Not a Number (NaN). + isfinite : Shows which elements are finite (not NaN, not infinity) + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). This means that Not a Number is not equivalent to infinity. + + Examples + -------- + >>> import numpy as np + >>> np.nan_to_num(np.inf) + 1.7976931348623157e+308 + >>> np.nan_to_num(-np.inf) + -1.7976931348623157e+308 + >>> np.nan_to_num(np.nan) + 0.0 + >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128]) + >>> np.nan_to_num(x) + array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary + -1.28000000e+002, 1.28000000e+002]) + >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333) + array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03, + -1.2800000e+02, 1.2800000e+02]) + >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)]) + array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary + -1.28000000e+002, 1.28000000e+002]) + >>> np.nan_to_num(y) + array([ 1.79769313e+308 +0.00000000e+000j, # may vary + 0.00000000e+000 +0.00000000e+000j, + 0.00000000e+000 +1.79769313e+308j]) + >>> np.nan_to_num(y, nan=111111, posinf=222222) + array([222222.+111111.j, 111111. +0.j, 111111.+222222.j]) + """ + x = _nx.array(x, subok=True, copy=copy) + xtype = x.dtype.type + + isscalar = (x.ndim == 0) + + if not issubclass(xtype, _nx.inexact): + return x[()] if isscalar else x + + iscomplex = issubclass(xtype, _nx.complexfloating) + + dest = (x.real, x.imag) if iscomplex else (x,) + maxf, minf = _getmaxmin(x.real.dtype) + if posinf is not None: + maxf = posinf + if neginf is not None: + minf = neginf + for d in dest: + idx_nan = isnan(d) + idx_posinf = isposinf(d) + idx_neginf = isneginf(d) + _nx.copyto(d, nan, where=idx_nan) + _nx.copyto(d, maxf, where=idx_posinf) + _nx.copyto(d, minf, where=idx_neginf) + return x[()] if isscalar else x + +#----------------------------------------------------------------------------- + +def _real_if_close_dispatcher(a, tol=None): + return (a,) + + +@array_function_dispatch(_real_if_close_dispatcher) +def real_if_close(a, tol=100): + """ + If input is complex with all imaginary parts close to zero, return + real parts. + + "Close to zero" is defined as `tol` * (machine epsilon of the type for + `a`). + + Parameters + ---------- + a : array_like + Input array. + tol : float + Tolerance in machine epsilons for the complex part of the elements + in the array. If the tolerance is <=1, then the absolute tolerance + is used. + + Returns + ------- + out : ndarray + If `a` is real, the type of `a` is used for the output. If `a` + has complex elements, the returned type is float. + + See Also + -------- + real, imag, angle + + Notes + ----- + Machine epsilon varies from machine to machine and between data types + but Python floats on most platforms have a machine epsilon equal to + 2.2204460492503131e-16. You can use 'np.finfo(float).eps' to print + out the machine epsilon for floats. + + Examples + -------- + >>> import numpy as np + >>> np.finfo(float).eps + 2.2204460492503131e-16 # may vary + + >>> np.real_if_close([2.1 + 4e-14j, 5.2 + 3e-15j], tol=1000) + array([2.1, 5.2]) + >>> np.real_if_close([2.1 + 4e-13j, 5.2 + 3e-15j], tol=1000) + array([2.1+4.e-13j, 5.2 + 3e-15j]) + + """ + a = asanyarray(a) + type_ = a.dtype.type + if not issubclass(type_, _nx.complexfloating): + return a + if tol > 1: + f = getlimits.finfo(type_) + tol = f.eps * tol + if _nx.all(_nx.absolute(a.imag) < tol): + a = a.real + return a + + +#----------------------------------------------------------------------------- + +_namefromtype = {'S1': 'character', + '?': 'bool', + 'b': 'signed char', + 'B': 'unsigned char', + 'h': 'short', + 'H': 'unsigned short', + 'i': 'integer', + 'I': 'unsigned integer', + 'l': 'long integer', + 'L': 'unsigned long integer', + 'q': 'long long integer', + 'Q': 'unsigned long long integer', + 'f': 'single precision', + 'd': 'double precision', + 'g': 'long precision', + 'F': 'complex single precision', + 'D': 'complex double precision', + 'G': 'complex long double precision', + 'S': 'string', + 'U': 'unicode', + 'V': 'void', + 'O': 'object' + } + +@set_module('numpy') +def typename(char): + """ + Return a description for the given data type code. + + Parameters + ---------- + char : str + Data type code. + + Returns + ------- + out : str + Description of the input data type code. + + See Also + -------- + dtype + + Examples + -------- + >>> import numpy as np + >>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q', + ... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q'] + >>> for typechar in typechars: + ... print(typechar, ' : ', np.typename(typechar)) + ... + S1 : character + ? : bool + B : unsigned char + D : complex double precision + G : complex long double precision + F : complex single precision + I : unsigned integer + H : unsigned short + L : unsigned long integer + O : object + Q : unsigned long long integer + S : string + U : unicode + V : void + b : signed char + d : double precision + g : long precision + f : single precision + i : integer + h : short + l : long integer + q : long long integer + + """ + return _namefromtype[char] + +#----------------------------------------------------------------------------- + + +#determine the "minimum common type" for a group of arrays. +array_type = [[_nx.float16, _nx.float32, _nx.float64, _nx.longdouble], + [None, _nx.complex64, _nx.complex128, _nx.clongdouble]] +array_precision = {_nx.float16: 0, + _nx.float32: 1, + _nx.float64: 2, + _nx.longdouble: 3, + _nx.complex64: 1, + _nx.complex128: 2, + _nx.clongdouble: 3} + + +def _common_type_dispatcher(*arrays): + return arrays + + +@array_function_dispatch(_common_type_dispatcher) +def common_type(*arrays): + """ + Return a scalar type which is common to the input arrays. + + The return type will always be an inexact (i.e. floating point) scalar + type, even if all the arrays are integer arrays. If one of the inputs is + an integer array, the minimum precision type that is returned is a + 64-bit floating point dtype. + + All input arrays except int64 and uint64 can be safely cast to the + returned dtype without loss of information. + + Parameters + ---------- + array1, array2, ... : ndarrays + Input arrays. + + Returns + ------- + out : data type code + Data type code. + + See Also + -------- + dtype, mintypecode + + Examples + -------- + >>> np.common_type(np.arange(2, dtype=np.float32)) + + >>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2)) + + >>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0])) + + + """ + is_complex = False + precision = 0 + for a in arrays: + t = a.dtype.type + if iscomplexobj(a): + is_complex = True + if issubclass(t, _nx.integer): + p = 2 # array_precision[_nx.double] + else: + p = array_precision.get(t) + if p is None: + raise TypeError("can't get common type for non-numeric array") + precision = max(precision, p) + if is_complex: + return array_type[1][precision] + else: + return array_type[0][precision] diff --git a/python/numpy/lib/_type_check_impl.pyi b/python/numpy/lib/_type_check_impl.pyi new file mode 100644 index 000000000..944015e42 --- /dev/null +++ b/python/numpy/lib/_type_check_impl.pyi @@ -0,0 +1,350 @@ +from collections.abc import Container, Iterable +from typing import Any, Protocol, TypeAlias, overload, type_check_only +from typing import Literal as L + +from _typeshed import Incomplete +from typing_extensions import TypeVar + +import numpy as np +from numpy._typing import ( + ArrayLike, + NDArray, + _16Bit, + _32Bit, + _64Bit, + _ArrayLike, + _NestedSequence, + _ScalarLike_co, + _SupportsArray, +) + +__all__ = [ + "common_type", + "imag", + "iscomplex", + "iscomplexobj", + "isreal", + "isrealobj", + "mintypecode", + "nan_to_num", + "real", + "real_if_close", + "typename", +] + +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) +_RealT = TypeVar("_RealT", bound=np.floating | np.integer | np.bool) + +_FloatMax32: TypeAlias = np.float32 | np.float16 +_ComplexMax128: TypeAlias = np.complex128 | np.complex64 +_RealMax64: TypeAlias = np.float64 | np.float32 | np.float16 | np.integer +_Real: TypeAlias = np.floating | np.integer +_InexactMax32: TypeAlias = np.inexact[_32Bit] | np.float16 +_NumberMax64: TypeAlias = np.number[_64Bit] | np.number[_32Bit] | np.number[_16Bit] | np.integer + +@type_check_only +class _HasReal(Protocol[_T_co]): + @property + def real(self, /) -> _T_co: ... + +@type_check_only +class _HasImag(Protocol[_T_co]): + @property + def imag(self, /) -> _T_co: ... + +@type_check_only +class _HasDType(Protocol[_ScalarT_co]): + @property + def dtype(self, /) -> np.dtype[_ScalarT_co]: ... + +### + +def mintypecode(typechars: Iterable[str | ArrayLike], typeset: str | Container[str] = "GDFgdf", default: str = "d") -> str: ... + +# +@overload +def real(val: _HasReal[_T]) -> _T: ... # type: ignore[overload-overlap] +@overload +def real(val: _ArrayLike[_RealT]) -> NDArray[_RealT]: ... +@overload +def real(val: ArrayLike) -> NDArray[Any]: ... + +# +@overload +def imag(val: _HasImag[_T]) -> _T: ... # type: ignore[overload-overlap] +@overload +def imag(val: _ArrayLike[_RealT]) -> NDArray[_RealT]: ... +@overload +def imag(val: ArrayLike) -> NDArray[Any]: ... + +# +@overload +def iscomplex(x: _ScalarLike_co) -> np.bool: ... +@overload +def iscomplex(x: NDArray[Any] | _NestedSequence[ArrayLike]) -> NDArray[np.bool]: ... +@overload +def iscomplex(x: ArrayLike) -> np.bool | NDArray[np.bool]: ... + +# +@overload +def isreal(x: _ScalarLike_co) -> np.bool: ... +@overload +def isreal(x: NDArray[Any] | _NestedSequence[ArrayLike]) -> NDArray[np.bool]: ... +@overload +def isreal(x: ArrayLike) -> np.bool | NDArray[np.bool]: ... + +# +def iscomplexobj(x: _HasDType[Any] | ArrayLike) -> bool: ... +def isrealobj(x: _HasDType[Any] | ArrayLike) -> bool: ... + +# +@overload +def nan_to_num( + x: _ScalarT, + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> _ScalarT: ... +@overload +def nan_to_num( + x: NDArray[_ScalarT] | _NestedSequence[_ArrayLike[_ScalarT]], + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> NDArray[_ScalarT]: ... +@overload +def nan_to_num( + x: _SupportsArray[np.dtype[_ScalarT]], + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> _ScalarT | NDArray[_ScalarT]: ... +@overload +def nan_to_num( + x: _NestedSequence[ArrayLike], + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> NDArray[Incomplete]: ... +@overload +def nan_to_num( + x: ArrayLike, + copy: bool = True, + nan: float = 0.0, + posinf: float | None = None, + neginf: float | None = None, +) -> Incomplete: ... + +# NOTE: The [overload-overlap] mypy error is a false positive +@overload +def real_if_close(a: _ArrayLike[np.complex64], tol: float = 100) -> NDArray[np.float32 | np.complex64]: ... # type: ignore[overload-overlap] +@overload +def real_if_close(a: _ArrayLike[np.complex128], tol: float = 100) -> NDArray[np.float64 | np.complex128]: ... +@overload +def real_if_close(a: _ArrayLike[np.clongdouble], tol: float = 100) -> NDArray[np.longdouble | np.clongdouble]: ... +@overload +def real_if_close(a: _ArrayLike[_RealT], tol: float = 100) -> NDArray[_RealT]: ... +@overload +def real_if_close(a: ArrayLike, tol: float = 100) -> NDArray[Any]: ... + +# +@overload +def typename(char: L['S1']) -> L['character']: ... +@overload +def typename(char: L['?']) -> L['bool']: ... +@overload +def typename(char: L['b']) -> L['signed char']: ... +@overload +def typename(char: L['B']) -> L['unsigned char']: ... +@overload +def typename(char: L['h']) -> L['short']: ... +@overload +def typename(char: L['H']) -> L['unsigned short']: ... +@overload +def typename(char: L['i']) -> L['integer']: ... +@overload +def typename(char: L['I']) -> L['unsigned integer']: ... +@overload +def typename(char: L['l']) -> L['long integer']: ... +@overload +def typename(char: L['L']) -> L['unsigned long integer']: ... +@overload +def typename(char: L['q']) -> L['long long integer']: ... +@overload +def typename(char: L['Q']) -> L['unsigned long long integer']: ... +@overload +def typename(char: L['f']) -> L['single precision']: ... +@overload +def typename(char: L['d']) -> L['double precision']: ... +@overload +def typename(char: L['g']) -> L['long precision']: ... +@overload +def typename(char: L['F']) -> L['complex single precision']: ... +@overload +def typename(char: L['D']) -> L['complex double precision']: ... +@overload +def typename(char: L['G']) -> L['complex long double precision']: ... +@overload +def typename(char: L['S']) -> L['string']: ... +@overload +def typename(char: L['U']) -> L['unicode']: ... +@overload +def typename(char: L['V']) -> L['void']: ... +@overload +def typename(char: L['O']) -> L['object']: ... + +# NOTE: The [overload-overlap] mypy errors are false positives +@overload +def common_type() -> type[np.float16]: ... +@overload +def common_type(a0: _HasDType[np.float16], /, *ai: _HasDType[np.float16]) -> type[np.float16]: ... # type: ignore[overload-overlap] +@overload +def common_type(a0: _HasDType[np.float32], /, *ai: _HasDType[_FloatMax32]) -> type[np.float32]: ... # type: ignore[overload-overlap] +@overload +def common_type( # type: ignore[overload-overlap] + a0: _HasDType[np.float64 | np.integer], + /, + *ai: _HasDType[_RealMax64], +) -> type[np.float64]: ... +@overload +def common_type( # type: ignore[overload-overlap] + a0: _HasDType[np.longdouble], + /, + *ai: _HasDType[_Real], +) -> type[np.longdouble]: ... +@overload +def common_type( # type: ignore[overload-overlap] + a0: _HasDType[np.complex64], + /, + *ai: _HasDType[_InexactMax32], +) -> type[np.complex64]: ... +@overload +def common_type( # type: ignore[overload-overlap] + a0: _HasDType[np.complex128], + /, + *ai: _HasDType[_NumberMax64], +) -> type[np.complex128]: ... +@overload +def common_type( # type: ignore[overload-overlap] + a0: _HasDType[np.clongdouble], + /, + *ai: _HasDType[np.number], +) -> type[np.clongdouble]: ... +@overload +def common_type( # type: ignore[overload-overlap] + a0: _HasDType[_FloatMax32], + array1: _HasDType[np.float32], + /, + *ai: _HasDType[_FloatMax32], +) -> type[np.float32]: ... +@overload +def common_type( + a0: _HasDType[_RealMax64], + array1: _HasDType[np.float64 | np.integer], + /, + *ai: _HasDType[_RealMax64], +) -> type[np.float64]: ... +@overload +def common_type( + a0: _HasDType[_Real], + array1: _HasDType[np.longdouble], + /, + *ai: _HasDType[_Real], +) -> type[np.longdouble]: ... +@overload +def common_type( # type: ignore[overload-overlap] + a0: _HasDType[_InexactMax32], + array1: _HasDType[np.complex64], + /, + *ai: _HasDType[_InexactMax32], +) -> type[np.complex64]: ... +@overload +def common_type( + a0: _HasDType[np.float64], + array1: _HasDType[_ComplexMax128], + /, + *ai: _HasDType[_NumberMax64], +) -> type[np.complex128]: ... +@overload +def common_type( + a0: _HasDType[_ComplexMax128], + array1: _HasDType[np.float64], + /, + *ai: _HasDType[_NumberMax64], +) -> type[np.complex128]: ... +@overload +def common_type( + a0: _HasDType[_NumberMax64], + array1: _HasDType[np.complex128], + /, + *ai: _HasDType[_NumberMax64], +) -> type[np.complex128]: ... +@overload +def common_type( + a0: _HasDType[_ComplexMax128], + array1: _HasDType[np.complex128 | np.integer], + /, + *ai: _HasDType[_NumberMax64], +) -> type[np.complex128]: ... +@overload +def common_type( + a0: _HasDType[np.complex128 | np.integer], + array1: _HasDType[_ComplexMax128], + /, + *ai: _HasDType[_NumberMax64], +) -> type[np.complex128]: ... +@overload +def common_type( + a0: _HasDType[_Real], + /, + *ai: _HasDType[_Real], +) -> type[np.floating]: ... +@overload +def common_type( + a0: _HasDType[np.number], + array1: _HasDType[np.clongdouble], + /, + *ai: _HasDType[np.number], +) -> type[np.clongdouble]: ... +@overload +def common_type( + a0: _HasDType[np.longdouble], + array1: _HasDType[np.complexfloating], + /, + *ai: _HasDType[np.number], +) -> type[np.clongdouble]: ... +@overload +def common_type( + a0: _HasDType[np.complexfloating], + array1: _HasDType[np.longdouble], + /, + *ai: _HasDType[np.number], +) -> type[np.clongdouble]: ... +@overload +def common_type( + a0: _HasDType[np.complexfloating], + array1: _HasDType[np.number], + /, + *ai: _HasDType[np.number], +) -> type[np.complexfloating]: ... +@overload +def common_type( + a0: _HasDType[np.number], + array1: _HasDType[np.complexfloating], + /, + *ai: _HasDType[np.number], +) -> type[np.complexfloating]: ... +@overload +def common_type( + a0: _HasDType[np.number], + array1: _HasDType[np.number], + /, + *ai: _HasDType[np.number], +) -> type[Any]: ... diff --git a/python/numpy/lib/_ufunclike_impl.py b/python/numpy/lib/_ufunclike_impl.py new file mode 100644 index 000000000..695aab1b8 --- /dev/null +++ b/python/numpy/lib/_ufunclike_impl.py @@ -0,0 +1,207 @@ +""" +Module of functions that are like ufuncs in acting on arrays and optionally +storing results in an output array. + +""" +__all__ = ['fix', 'isneginf', 'isposinf'] + +import numpy._core.numeric as nx +from numpy._core.overrides import array_function_dispatch + + +def _dispatcher(x, out=None): + return (x, out) + + +@array_function_dispatch(_dispatcher, verify=False, module='numpy') +def fix(x, out=None): + """ + Round to nearest integer towards zero. + + Round an array of floats element-wise to nearest integer towards zero. + The rounded values have the same data-type as the input. + + Parameters + ---------- + x : array_like + An array to be rounded + out : ndarray, optional + A location into which the result is stored. If provided, it must have + a shape that the input broadcasts to. If not provided or None, a + freshly-allocated array is returned. + + Returns + ------- + out : ndarray of floats + An array with the same dimensions and data-type as the input. + If second argument is not supplied then a new array is returned + with the rounded values. + + If a second argument is supplied the result is stored there. + The return value ``out`` is then a reference to that array. + + See Also + -------- + rint, trunc, floor, ceil + around : Round to given number of decimals + + Examples + -------- + >>> import numpy as np + >>> np.fix(3.14) + 3.0 + >>> np.fix(3) + 3 + >>> np.fix([2.1, 2.9, -2.1, -2.9]) + array([ 2., 2., -2., -2.]) + + """ + # promote back to an array if flattened + res = nx.asanyarray(nx.ceil(x, out=out)) + res = nx.floor(x, out=res, where=nx.greater_equal(x, 0)) + + # when no out argument is passed and no subclasses are involved, flatten + # scalars + if out is None and type(res) is nx.ndarray: + res = res[()] + return res + + +@array_function_dispatch(_dispatcher, verify=False, module='numpy') +def isposinf(x, out=None): + """ + Test element-wise for positive infinity, return result as bool array. + + Parameters + ---------- + x : array_like + The input array. + out : array_like, optional + A location into which the result is stored. If provided, it must have a + shape that the input broadcasts to. If not provided or None, a + freshly-allocated boolean array is returned. + + Returns + ------- + out : ndarray + A boolean array with the same dimensions as the input. + If second argument is not supplied then a boolean array is returned + with values True where the corresponding element of the input is + positive infinity and values False where the element of the input is + not positive infinity. + + If a second argument is supplied the result is stored there. If the + type of that array is a numeric type the result is represented as zeros + and ones, if the type is boolean then as False and True. + The return value `out` is then a reference to that array. + + See Also + -------- + isinf, isneginf, isfinite, isnan + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). + + Errors result if the second argument is also supplied when x is a scalar + input, if first and second arguments have different shapes, or if the + first argument has complex values + + Examples + -------- + >>> import numpy as np + >>> np.isposinf(np.inf) + True + >>> np.isposinf(-np.inf) + False + >>> np.isposinf([-np.inf, 0., np.inf]) + array([False, False, True]) + + >>> x = np.array([-np.inf, 0., np.inf]) + >>> y = np.array([2, 2, 2]) + >>> np.isposinf(x, y) + array([0, 0, 1]) + >>> y + array([0, 0, 1]) + + """ + is_inf = nx.isinf(x) + try: + signbit = ~nx.signbit(x) + except TypeError as e: + dtype = nx.asanyarray(x).dtype + raise TypeError(f'This operation is not supported for {dtype} values ' + 'because it would be ambiguous.') from e + else: + return nx.logical_and(is_inf, signbit, out) + + +@array_function_dispatch(_dispatcher, verify=False, module='numpy') +def isneginf(x, out=None): + """ + Test element-wise for negative infinity, return result as bool array. + + Parameters + ---------- + x : array_like + The input array. + out : array_like, optional + A location into which the result is stored. If provided, it must have a + shape that the input broadcasts to. If not provided or None, a + freshly-allocated boolean array is returned. + + Returns + ------- + out : ndarray + A boolean array with the same dimensions as the input. + If second argument is not supplied then a numpy boolean array is + returned with values True where the corresponding element of the + input is negative infinity and values False where the element of + the input is not negative infinity. + + If a second argument is supplied the result is stored there. If the + type of that array is a numeric type the result is represented as + zeros and ones, if the type is boolean then as False and True. The + return value `out` is then a reference to that array. + + See Also + -------- + isinf, isposinf, isnan, isfinite + + Notes + ----- + NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic + (IEEE 754). + + Errors result if the second argument is also supplied when x is a scalar + input, if first and second arguments have different shapes, or if the + first argument has complex values. + + Examples + -------- + >>> import numpy as np + >>> np.isneginf(-np.inf) + True + >>> np.isneginf(np.inf) + False + >>> np.isneginf([-np.inf, 0., np.inf]) + array([ True, False, False]) + + >>> x = np.array([-np.inf, 0., np.inf]) + >>> y = np.array([2, 2, 2]) + >>> np.isneginf(x, y) + array([1, 0, 0]) + >>> y + array([1, 0, 0]) + + """ + is_inf = nx.isinf(x) + try: + signbit = nx.signbit(x) + except TypeError as e: + dtype = nx.asanyarray(x).dtype + raise TypeError(f'This operation is not supported for {dtype} values ' + 'because it would be ambiguous.') from e + else: + return nx.logical_and(is_inf, signbit, out) diff --git a/python/numpy/lib/_ufunclike_impl.pyi b/python/numpy/lib/_ufunclike_impl.pyi new file mode 100644 index 000000000..a673f05c0 --- /dev/null +++ b/python/numpy/lib/_ufunclike_impl.pyi @@ -0,0 +1,67 @@ +from typing import Any, TypeVar, overload + +import numpy as np +from numpy import floating, object_ +from numpy._typing import ( + NDArray, + _ArrayLikeFloat_co, + _ArrayLikeObject_co, + _FloatLike_co, +) + +__all__ = ["fix", "isneginf", "isposinf"] + +_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any]) + +@overload +def fix( # type: ignore[misc] + x: _FloatLike_co, + out: None = ..., +) -> floating: ... +@overload +def fix( + x: _ArrayLikeFloat_co, + out: None = ..., +) -> NDArray[floating]: ... +@overload +def fix( + x: _ArrayLikeObject_co, + out: None = ..., +) -> NDArray[object_]: ... +@overload +def fix( + x: _ArrayLikeFloat_co | _ArrayLikeObject_co, + out: _ArrayT, +) -> _ArrayT: ... + +@overload +def isposinf( # type: ignore[misc] + x: _FloatLike_co, + out: None = ..., +) -> np.bool: ... +@overload +def isposinf( + x: _ArrayLikeFloat_co, + out: None = ..., +) -> NDArray[np.bool]: ... +@overload +def isposinf( + x: _ArrayLikeFloat_co, + out: _ArrayT, +) -> _ArrayT: ... + +@overload +def isneginf( # type: ignore[misc] + x: _FloatLike_co, + out: None = ..., +) -> np.bool: ... +@overload +def isneginf( + x: _ArrayLikeFloat_co, + out: None = ..., +) -> NDArray[np.bool]: ... +@overload +def isneginf( + x: _ArrayLikeFloat_co, + out: _ArrayT, +) -> _ArrayT: ... diff --git a/python/numpy/lib/_user_array_impl.py b/python/numpy/lib/_user_array_impl.py new file mode 100644 index 000000000..f3a6c0f51 --- /dev/null +++ b/python/numpy/lib/_user_array_impl.py @@ -0,0 +1,299 @@ +""" +Container class for backward compatibility with NumArray. + +The user_array.container class exists for backward compatibility with NumArray +and is not meant to be used in new code. If you need to create an array +container class, we recommend either creating a class that wraps an ndarray +or subclasses ndarray. + +""" +from numpy._core import ( + absolute, + add, + arange, + array, + asarray, + bitwise_and, + bitwise_or, + bitwise_xor, + divide, + equal, + greater, + greater_equal, + invert, + left_shift, + less, + less_equal, + multiply, + not_equal, + power, + remainder, + reshape, + right_shift, + shape, + sin, + sqrt, + subtract, + transpose, +) +from numpy._core.overrides import set_module + + +@set_module("numpy.lib.user_array") +class container: + """ + container(data, dtype=None, copy=True) + + Standard container-class for easy multiple-inheritance. + + Methods + ------- + copy + byteswap + astype + + """ + def __init__(self, data, dtype=None, copy=True): + self.array = array(data, dtype, copy=copy) + + def __repr__(self): + if self.ndim > 0: + return self.__class__.__name__ + repr(self.array)[len("array"):] + else: + return self.__class__.__name__ + "(" + repr(self.array) + ")" + + def __array__(self, t=None): + if t: + return self.array.astype(t) + return self.array + + # Array as sequence + def __len__(self): + return len(self.array) + + def __getitem__(self, index): + return self._rc(self.array[index]) + + def __setitem__(self, index, value): + self.array[index] = asarray(value, self.dtype) + + def __abs__(self): + return self._rc(absolute(self.array)) + + def __neg__(self): + return self._rc(-self.array) + + def __add__(self, other): + return self._rc(self.array + asarray(other)) + + __radd__ = __add__ + + def __iadd__(self, other): + add(self.array, other, self.array) + return self + + def __sub__(self, other): + return self._rc(self.array - asarray(other)) + + def __rsub__(self, other): + return self._rc(asarray(other) - self.array) + + def __isub__(self, other): + subtract(self.array, other, self.array) + return self + + def __mul__(self, other): + return self._rc(multiply(self.array, asarray(other))) + + __rmul__ = __mul__ + + def __imul__(self, other): + multiply(self.array, other, self.array) + return self + + def __mod__(self, other): + return self._rc(remainder(self.array, other)) + + def __rmod__(self, other): + return self._rc(remainder(other, self.array)) + + def __imod__(self, other): + remainder(self.array, other, self.array) + return self + + def __divmod__(self, other): + return (self._rc(divide(self.array, other)), + self._rc(remainder(self.array, other))) + + def __rdivmod__(self, other): + return (self._rc(divide(other, self.array)), + self._rc(remainder(other, self.array))) + + def __pow__(self, other): + return self._rc(power(self.array, asarray(other))) + + def __rpow__(self, other): + return self._rc(power(asarray(other), self.array)) + + def __ipow__(self, other): + power(self.array, other, self.array) + return self + + def __lshift__(self, other): + return self._rc(left_shift(self.array, other)) + + def __rshift__(self, other): + return self._rc(right_shift(self.array, other)) + + def __rlshift__(self, other): + return self._rc(left_shift(other, self.array)) + + def __rrshift__(self, other): + return self._rc(right_shift(other, self.array)) + + def __ilshift__(self, other): + left_shift(self.array, other, self.array) + return self + + def __irshift__(self, other): + right_shift(self.array, other, self.array) + return self + + def __and__(self, other): + return self._rc(bitwise_and(self.array, other)) + + def __rand__(self, other): + return self._rc(bitwise_and(other, self.array)) + + def __iand__(self, other): + bitwise_and(self.array, other, self.array) + return self + + def __xor__(self, other): + return self._rc(bitwise_xor(self.array, other)) + + def __rxor__(self, other): + return self._rc(bitwise_xor(other, self.array)) + + def __ixor__(self, other): + bitwise_xor(self.array, other, self.array) + return self + + def __or__(self, other): + return self._rc(bitwise_or(self.array, other)) + + def __ror__(self, other): + return self._rc(bitwise_or(other, self.array)) + + def __ior__(self, other): + bitwise_or(self.array, other, self.array) + return self + + def __pos__(self): + return self._rc(self.array) + + def __invert__(self): + return self._rc(invert(self.array)) + + def _scalarfunc(self, func): + if self.ndim == 0: + return func(self[0]) + else: + raise TypeError( + "only rank-0 arrays can be converted to Python scalars.") + + def __complex__(self): + return self._scalarfunc(complex) + + def __float__(self): + return self._scalarfunc(float) + + def __int__(self): + return self._scalarfunc(int) + + def __hex__(self): + return self._scalarfunc(hex) + + def __oct__(self): + return self._scalarfunc(oct) + + def __lt__(self, other): + return self._rc(less(self.array, other)) + + def __le__(self, other): + return self._rc(less_equal(self.array, other)) + + def __eq__(self, other): + return self._rc(equal(self.array, other)) + + def __ne__(self, other): + return self._rc(not_equal(self.array, other)) + + def __gt__(self, other): + return self._rc(greater(self.array, other)) + + def __ge__(self, other): + return self._rc(greater_equal(self.array, other)) + + def copy(self): + "" + return self._rc(self.array.copy()) + + def tobytes(self): + "" + return self.array.tobytes() + + def byteswap(self): + "" + return self._rc(self.array.byteswap()) + + def astype(self, typecode): + "" + return self._rc(self.array.astype(typecode)) + + def _rc(self, a): + if len(shape(a)) == 0: + return a + else: + return self.__class__(a) + + def __array_wrap__(self, *args): + return self.__class__(args[0]) + + def __setattr__(self, attr, value): + if attr == 'array': + object.__setattr__(self, attr, value) + return + try: + self.array.__setattr__(attr, value) + except AttributeError: + object.__setattr__(self, attr, value) + + # Only called after other approaches fail. + def __getattr__(self, attr): + if (attr == 'array'): + return object.__getattribute__(self, attr) + return self.array.__getattribute__(attr) + + +############################################################# +# Test of class container +############################################################# +if __name__ == '__main__': + temp = reshape(arange(10000), (100, 100)) + + ua = container(temp) + # new object created begin test + print(dir(ua)) + print(shape(ua), ua.shape) # I have changed Numeric.py + + ua_small = ua[:3, :5] + print(ua_small) + # this did not change ua[0,0], which is not normal behavior + ua_small[0, 0] = 10 + print(ua_small[0, 0], ua[0, 0]) + print(sin(ua_small) / 3. * 6. + sqrt(ua_small ** 2)) + print(less(ua_small, 103), type(less(ua_small, 103))) + print(type(ua_small * reshape(arange(15), shape(ua_small)))) + print(reshape(ua_small, (5, 3))) + print(transpose(ua_small)) diff --git a/python/numpy/lib/_user_array_impl.pyi b/python/numpy/lib/_user_array_impl.pyi new file mode 100644 index 000000000..13c0a0163 --- /dev/null +++ b/python/numpy/lib/_user_array_impl.pyi @@ -0,0 +1,225 @@ +from types import EllipsisType +from typing import Any, Generic, Self, SupportsIndex, TypeAlias, overload + +from _typeshed import Incomplete +from typing_extensions import TypeVar, override + +import numpy as np +import numpy.typing as npt +from numpy._typing import ( + _AnyShape, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeInt_co, + _DTypeLike, +) + +### + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) +_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], default=_AnyShape, covariant=True) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype) +_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True) + +_BoolArrayT = TypeVar("_BoolArrayT", bound=container[Any, np.dtype[np.bool]]) +_IntegralArrayT = TypeVar("_IntegralArrayT", bound=container[Any, np.dtype[np.bool | np.integer | np.object_]]) +_RealContainerT = TypeVar( + "_RealContainerT", + bound=container[Any, np.dtype[np.bool | np.integer | np.floating | np.timedelta64 | np.object_]], +) +_NumericContainerT = TypeVar("_NumericContainerT", bound=container[Any, np.dtype[np.number | np.timedelta64 | np.object_]]) + +_ArrayInt_co: TypeAlias = npt.NDArray[np.integer | np.bool] + +_ToIndexSlice: TypeAlias = slice | EllipsisType | _ArrayInt_co | None +_ToIndexSlices: TypeAlias = _ToIndexSlice | tuple[_ToIndexSlice, ...] +_ToIndex: TypeAlias = SupportsIndex | _ToIndexSlice +_ToIndices: TypeAlias = _ToIndex | tuple[_ToIndex, ...] + +### + +class container(Generic[_ShapeT_co, _DTypeT_co]): + array: np.ndarray[_ShapeT_co, _DTypeT_co] + + @overload + def __init__( + self, + /, + data: container[_ShapeT_co, _DTypeT_co] | np.ndarray[_ShapeT_co, _DTypeT_co], + dtype: None = None, + copy: bool = True, + ) -> None: ... + @overload + def __init__( + self: container[Any, np.dtype[_ScalarT]], + /, + data: _ArrayLike[_ScalarT], + dtype: None = None, + copy: bool = True, + ) -> None: ... + @overload + def __init__( + self: container[Any, np.dtype[_ScalarT]], + /, + data: npt.ArrayLike, + dtype: _DTypeLike[_ScalarT], + copy: bool = True, + ) -> None: ... + @overload + def __init__(self, /, data: npt.ArrayLike, dtype: npt.DTypeLike | None = None, copy: bool = True) -> None: ... + + # + def __complex__(self, /) -> complex: ... + def __float__(self, /) -> float: ... + def __int__(self, /) -> int: ... + def __hex__(self, /) -> str: ... + def __oct__(self, /) -> str: ... + + # + @override + def __eq__(self, other: object, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + @override + def __ne__(self, other: object, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + + # + def __lt__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... + def __le__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... + def __gt__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... + def __ge__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... + + # + def __len__(self, /) -> int: ... + + # keep in sync with np.ndarray + @overload + def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> container[_ShapeT_co, _DTypeT_co]: ... + @overload + def __getitem__(self, key: _ToIndexSlices, /) -> container[_AnyShape, _DTypeT_co]: ... + @overload + def __getitem__(self, key: _ToIndices, /) -> Any: ... + @overload + def __getitem__(self: container[Any, np.dtype[np.void]], key: list[str], /) -> container[_ShapeT_co, np.dtype[np.void]]: ... + @overload + def __getitem__(self: container[Any, np.dtype[np.void]], key: str, /) -> container[_ShapeT_co, np.dtype]: ... + + # keep in sync with np.ndarray + @overload + def __setitem__(self, index: _ToIndices, value: object, /) -> None: ... + @overload + def __setitem__(self: container[Any, np.dtype[np.void]], key: str | list[str], value: object, /) -> None: ... + + # keep in sync with np.ndarray + @overload + def __abs__(self: container[_ShapeT, np.dtype[np.complex64]], /) -> container[_ShapeT, np.dtype[np.float32]]: ... # type: ignore[overload-overlap] + @overload + def __abs__(self: container[_ShapeT, np.dtype[np.complex128]], /) -> container[_ShapeT, np.dtype[np.float64]]: ... + @overload + def __abs__(self: container[_ShapeT, np.dtype[np.complex192]], /) -> container[_ShapeT, np.dtype[np.float96]]: ... + @overload + def __abs__(self: container[_ShapeT, np.dtype[np.complex256]], /) -> container[_ShapeT, np.dtype[np.float128]]: ... + @overload + def __abs__(self: _RealContainerT, /) -> _RealContainerT: ... + + # + def __neg__(self: _NumericContainerT, /) -> _NumericContainerT: ... # noqa: PYI019 + def __pos__(self: _NumericContainerT, /) -> _NumericContainerT: ... # noqa: PYI019 + def __invert__(self: _IntegralArrayT, /) -> _IntegralArrayT: ... # noqa: PYI019 + + # TODO(jorenham): complete these binary ops + + # + def __add__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __radd__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __iadd__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __sub__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rsub__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __isub__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __mul__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rmul__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __imul__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __mod__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rmod__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __imod__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __divmod__(self, other: npt.ArrayLike, /) -> tuple[Incomplete, Incomplete]: ... + def __rdivmod__(self, other: npt.ArrayLike, /) -> tuple[Incomplete, Incomplete]: ... + + # + def __pow__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __rpow__(self, other: npt.ArrayLike, /) -> Incomplete: ... + def __ipow__(self, other: npt.ArrayLike, /) -> Self: ... + + # + def __lshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ... + def __rlshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ... + def __ilshift__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + def __rshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ... + def __rrshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ... + def __irshift__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + @overload + def __and__( + self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, / + ) -> container[_AnyShape, np.dtype[np.bool]]: ... + @overload + def __and__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ... + __rand__ = __and__ + @overload + def __iand__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __iand__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + @overload + def __xor__( + self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, / + ) -> container[_AnyShape, np.dtype[np.bool]]: ... + @overload + def __xor__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ... + __rxor__ = __xor__ + @overload + def __ixor__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __ixor__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + @overload + def __or__( + self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, / + ) -> container[_AnyShape, np.dtype[np.bool]]: ... + @overload + def __or__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ... + __ror__ = __or__ + @overload + def __ior__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ... + @overload + def __ior__(self, other: _ArrayLikeInt_co, /) -> Self: ... + + # + @overload + def __array__(self, /, t: None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __array__(self, /, t: _DTypeT) -> np.ndarray[_ShapeT_co, _DTypeT]: ... + + # + @overload + def __array_wrap__(self, arg0: npt.ArrayLike, /) -> container[_ShapeT_co, _DTypeT_co]: ... + @overload + def __array_wrap__(self, a: np.ndarray[_ShapeT, _DTypeT], c: Any = ..., s: Any = ..., /) -> container[_ShapeT, _DTypeT]: ... + + # + def copy(self, /) -> Self: ... + def tobytes(self, /) -> bytes: ... + def byteswap(self, /) -> Self: ... + def astype(self, /, typecode: _DTypeLike[_ScalarT]) -> container[_ShapeT_co, np.dtype[_ScalarT]]: ... diff --git a/python/numpy/lib/_utils_impl.py b/python/numpy/lib/_utils_impl.py new file mode 100644 index 000000000..164aa4ee3 --- /dev/null +++ b/python/numpy/lib/_utils_impl.py @@ -0,0 +1,784 @@ +import functools +import os +import platform +import sys +import textwrap +import types +import warnings + +import numpy as np +from numpy._core import ndarray +from numpy._utils import set_module + +__all__ = [ + 'get_include', 'info', 'show_runtime' +] + + +@set_module('numpy') +def show_runtime(): + """ + Print information about various resources in the system + including available intrinsic support and BLAS/LAPACK library + in use + + .. versionadded:: 1.24.0 + + See Also + -------- + show_config : Show libraries in the system on which NumPy was built. + + Notes + ----- + 1. Information is derived with the help of `threadpoolctl `_ + library if available. + 2. SIMD related information is derived from ``__cpu_features__``, + ``__cpu_baseline__`` and ``__cpu_dispatch__`` + + """ + from pprint import pprint + + from numpy._core._multiarray_umath import ( + __cpu_baseline__, + __cpu_dispatch__, + __cpu_features__, + ) + config_found = [{ + "numpy_version": np.__version__, + "python": sys.version, + "uname": platform.uname(), + }] + features_found, features_not_found = [], [] + for feature in __cpu_dispatch__: + if __cpu_features__[feature]: + features_found.append(feature) + else: + features_not_found.append(feature) + config_found.append({ + "simd_extensions": { + "baseline": __cpu_baseline__, + "found": features_found, + "not_found": features_not_found + } + }) + config_found.append({ + "ignore_floating_point_errors_in_matmul": + not np._core._multiarray_umath._blas_supports_fpe(None), + }) + + try: + from threadpoolctl import threadpool_info + config_found.extend(threadpool_info()) + except ImportError: + print("WARNING: `threadpoolctl` not found in system!" + " Install it by `pip install threadpoolctl`." + " Once installed, try `np.show_runtime` again" + " for more detailed build information") + pprint(config_found) + + +@set_module('numpy') +def get_include(): + """ + Return the directory that contains the NumPy \\*.h header files. + + Extension modules that need to compile against NumPy may need to use this + function to locate the appropriate include directory. + + Notes + ----- + When using ``setuptools``, for example in ``setup.py``:: + + import numpy as np + ... + Extension('extension_name', ... + include_dirs=[np.get_include()]) + ... + + Note that a CLI tool ``numpy-config`` was introduced in NumPy 2.0, using + that is likely preferred for build systems other than ``setuptools``:: + + $ numpy-config --cflags + -I/path/to/site-packages/numpy/_core/include + + # Or rely on pkg-config: + $ export PKG_CONFIG_PATH=$(numpy-config --pkgconfigdir) + $ pkg-config --cflags + -I/path/to/site-packages/numpy/_core/include + + Examples + -------- + >>> np.get_include() + '.../site-packages/numpy/core/include' # may vary + + """ + import numpy + if numpy.show_config is None: + # running from numpy source directory + d = os.path.join(os.path.dirname(numpy.__file__), '_core', 'include') + else: + # using installed numpy core headers + import numpy._core as _core + d = os.path.join(os.path.dirname(_core.__file__), 'include') + return d + + +class _Deprecate: + """ + Decorator class to deprecate old functions. + + Refer to `deprecate` for details. + + See Also + -------- + deprecate + + """ + + def __init__(self, old_name=None, new_name=None, message=None): + self.old_name = old_name + self.new_name = new_name + self.message = message + + def __call__(self, func, *args, **kwargs): + """ + Decorator call. Refer to ``decorate``. + + """ + old_name = self.old_name + new_name = self.new_name + message = self.message + + if old_name is None: + old_name = func.__name__ + if new_name is None: + depdoc = f"`{old_name}` is deprecated!" + else: + depdoc = f"`{old_name}` is deprecated, use `{new_name}` instead!" + + if message is not None: + depdoc += "\n" + message + + @functools.wraps(func) + def newfunc(*args, **kwds): + warnings.warn(depdoc, DeprecationWarning, stacklevel=2) + return func(*args, **kwds) + + newfunc.__name__ = old_name + doc = func.__doc__ + if doc is None: + doc = depdoc + else: + lines = doc.expandtabs().split('\n') + indent = _get_indent(lines[1:]) + if lines[0].lstrip(): + # Indent the original first line to let inspect.cleandoc() + # dedent the docstring despite the deprecation notice. + doc = indent * ' ' + doc + else: + # Remove the same leading blank lines as cleandoc() would. + skip = len(lines[0]) + 1 + for line in lines[1:]: + if len(line) > indent: + break + skip += len(line) + 1 + doc = doc[skip:] + depdoc = textwrap.indent(depdoc, ' ' * indent) + doc = f'{depdoc}\n\n{doc}' + newfunc.__doc__ = doc + + return newfunc + + +def _get_indent(lines): + """ + Determines the leading whitespace that could be removed from all the lines. + """ + indent = sys.maxsize + for line in lines: + content = len(line.lstrip()) + if content: + indent = min(indent, len(line) - content) + if indent == sys.maxsize: + indent = 0 + return indent + + +def deprecate(*args, **kwargs): + """ + Issues a DeprecationWarning, adds warning to `old_name`'s + docstring, rebinds ``old_name.__name__`` and returns the new + function object. + + This function may also be used as a decorator. + + .. deprecated:: 2.0 + Use `~warnings.warn` with :exc:`DeprecationWarning` instead. + + Parameters + ---------- + func : function + The function to be deprecated. + old_name : str, optional + The name of the function to be deprecated. Default is None, in + which case the name of `func` is used. + new_name : str, optional + The new name for the function. Default is None, in which case the + deprecation message is that `old_name` is deprecated. If given, the + deprecation message is that `old_name` is deprecated and `new_name` + should be used instead. + message : str, optional + Additional explanation of the deprecation. Displayed in the + docstring after the warning. + + Returns + ------- + old_func : function + The deprecated function. + + Examples + -------- + Note that ``olduint`` returns a value after printing Deprecation + Warning: + + >>> olduint = np.lib.utils.deprecate(np.uint) + DeprecationWarning: `uint64` is deprecated! # may vary + >>> olduint(6) + 6 + + """ + # Deprecate may be run as a function or as a decorator + # If run as a function, we initialise the decorator class + # and execute its __call__ method. + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`deprecate` is deprecated, " + "use `warn` with `DeprecationWarning` instead. " + "(deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + if args: + fn = args[0] + args = args[1:] + + return _Deprecate(*args, **kwargs)(fn) + else: + return _Deprecate(*args, **kwargs) + + +def deprecate_with_doc(msg): + """ + Deprecates a function and includes the deprecation in its docstring. + + .. deprecated:: 2.0 + Use `~warnings.warn` with :exc:`DeprecationWarning` instead. + + This function is used as a decorator. It returns an object that can be + used to issue a DeprecationWarning, by passing the to-be decorated + function as argument, this adds warning to the to-be decorated function's + docstring and returns the new function object. + + See Also + -------- + deprecate : Decorate a function such that it issues a + :exc:`DeprecationWarning` + + Parameters + ---------- + msg : str + Additional explanation of the deprecation. Displayed in the + docstring after the warning. + + Returns + ------- + obj : object + + """ + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`deprecate` is deprecated, " + "use `warn` with `DeprecationWarning` instead. " + "(deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + return _Deprecate(message=msg) + + +#----------------------------------------------------------------------------- + + +# NOTE: pydoc defines a help function which works similarly to this +# except it uses a pager to take over the screen. + +# combine name and arguments and split to multiple lines of width +# characters. End lines on a comma and begin argument list indented with +# the rest of the arguments. +def _split_line(name, arguments, width): + firstwidth = len(name) + k = firstwidth + newstr = name + sepstr = ", " + arglist = arguments.split(sepstr) + for argument in arglist: + if k == firstwidth: + addstr = "" + else: + addstr = sepstr + k = k + len(argument) + len(addstr) + if k > width: + k = firstwidth + 1 + len(argument) + newstr = newstr + ",\n" + " " * (firstwidth + 2) + argument + else: + newstr = newstr + addstr + argument + return newstr + + +_namedict = None +_dictlist = None + +# Traverse all module directories underneath globals +# to see if something is defined +def _makenamedict(module='numpy'): + module = __import__(module, globals(), locals(), []) + thedict = {module.__name__: module.__dict__} + dictlist = [module.__name__] + totraverse = [module.__dict__] + while True: + if len(totraverse) == 0: + break + thisdict = totraverse.pop(0) + for x in thisdict.keys(): + if isinstance(thisdict[x], types.ModuleType): + modname = thisdict[x].__name__ + if modname not in dictlist: + moddict = thisdict[x].__dict__ + dictlist.append(modname) + totraverse.append(moddict) + thedict[modname] = moddict + return thedict, dictlist + + +def _info(obj, output=None): + """Provide information about ndarray obj. + + Parameters + ---------- + obj : ndarray + Must be ndarray, not checked. + output + Where printed output goes. + + Notes + ----- + Copied over from the numarray module prior to its removal. + Adapted somewhat as only numpy is an option now. + + Called by info. + + """ + extra = "" + tic = "" + bp = lambda x: x + cls = getattr(obj, '__class__', type(obj)) + nm = getattr(cls, '__name__', cls) + strides = obj.strides + endian = obj.dtype.byteorder + + if output is None: + output = sys.stdout + + print("class: ", nm, file=output) + print("shape: ", obj.shape, file=output) + print("strides: ", strides, file=output) + print("itemsize: ", obj.itemsize, file=output) + print("aligned: ", bp(obj.flags.aligned), file=output) + print("contiguous: ", bp(obj.flags.contiguous), file=output) + print("fortran: ", obj.flags.fortran, file=output) + print( + f"data pointer: {hex(obj.ctypes._as_parameter_.value)}{extra}", + file=output + ) + print("byteorder: ", end=' ', file=output) + if endian in ['|', '=']: + print(f"{tic}{sys.byteorder}{tic}", file=output) + byteswap = False + elif endian == '>': + print(f"{tic}big{tic}", file=output) + byteswap = sys.byteorder != "big" + else: + print(f"{tic}little{tic}", file=output) + byteswap = sys.byteorder != "little" + print("byteswap: ", bp(byteswap), file=output) + print(f"type: {obj.dtype}", file=output) + + +@set_module('numpy') +def info(object=None, maxwidth=76, output=None, toplevel='numpy'): + """ + Get help information for an array, function, class, or module. + + Parameters + ---------- + object : object or str, optional + Input object or name to get information about. If `object` is + an `ndarray` instance, information about the array is printed. + If `object` is a numpy object, its docstring is given. If it is + a string, available modules are searched for matching objects. + If None, information about `info` itself is returned. + maxwidth : int, optional + Printing width. + output : file like object, optional + File like object that the output is written to, default is + ``None``, in which case ``sys.stdout`` will be used. + The object has to be opened in 'w' or 'a' mode. + toplevel : str, optional + Start search at this level. + + Notes + ----- + When used interactively with an object, ``np.info(obj)`` is equivalent + to ``help(obj)`` on the Python prompt or ``obj?`` on the IPython + prompt. + + Examples + -------- + >>> np.info(np.polyval) # doctest: +SKIP + polyval(p, x) + Evaluate the polynomial p at x. + ... + + When using a string for `object` it is possible to get multiple results. + + >>> np.info('fft') # doctest: +SKIP + *** Found in numpy *** + Core FFT routines + ... + *** Found in numpy.fft *** + fft(a, n=None, axis=-1) + ... + *** Repeat reference found in numpy.fft.fftpack *** + *** Total of 3 references found. *** + + When the argument is an array, information about the array is printed. + + >>> a = np.array([[1 + 2j, 3, -4], [-5j, 6, 0]], dtype=np.complex64) + >>> np.info(a) + class: ndarray + shape: (2, 3) + strides: (24, 8) + itemsize: 8 + aligned: True + contiguous: True + fortran: False + data pointer: 0x562b6e0d2860 # may vary + byteorder: little + byteswap: False + type: complex64 + + """ + global _namedict, _dictlist + # Local import to speed up numpy's import time. + import inspect + import pydoc + + if (hasattr(object, '_ppimport_importer') or + hasattr(object, '_ppimport_module')): + object = object._ppimport_module + elif hasattr(object, '_ppimport_attr'): + object = object._ppimport_attr + + if output is None: + output = sys.stdout + + if object is None: + info(info) + elif isinstance(object, ndarray): + _info(object, output=output) + elif isinstance(object, str): + if _namedict is None: + _namedict, _dictlist = _makenamedict(toplevel) + numfound = 0 + objlist = [] + for namestr in _dictlist: + try: + obj = _namedict[namestr][object] + if id(obj) in objlist: + print(f"\n *** Repeat reference found in {namestr} *** ", + file=output + ) + else: + objlist.append(id(obj)) + print(f" *** Found in {namestr} ***", file=output) + info(obj) + print("-" * maxwidth, file=output) + numfound += 1 + except KeyError: + pass + if numfound == 0: + print(f"Help for {object} not found.", file=output) + else: + print("\n " + "*** Total of %d references found. ***" % numfound, + file=output + ) + + elif inspect.isfunction(object) or inspect.ismethod(object): + name = object.__name__ + try: + arguments = str(inspect.signature(object)) + except Exception: + arguments = "()" + + if len(name + arguments) > maxwidth: + argstr = _split_line(name, arguments, maxwidth) + else: + argstr = name + arguments + + print(" " + argstr + "\n", file=output) + print(inspect.getdoc(object), file=output) + + elif inspect.isclass(object): + name = object.__name__ + try: + arguments = str(inspect.signature(object)) + except Exception: + arguments = "()" + + if len(name + arguments) > maxwidth: + argstr = _split_line(name, arguments, maxwidth) + else: + argstr = name + arguments + + print(" " + argstr + "\n", file=output) + doc1 = inspect.getdoc(object) + if doc1 is None: + if hasattr(object, '__init__'): + print(inspect.getdoc(object.__init__), file=output) + else: + print(inspect.getdoc(object), file=output) + + methods = pydoc.allmethods(object) + + public_methods = [meth for meth in methods if meth[0] != '_'] + if public_methods: + print("\n\nMethods:\n", file=output) + for meth in public_methods: + thisobj = getattr(object, meth, None) + if thisobj is not None: + methstr, other = pydoc.splitdoc( + inspect.getdoc(thisobj) or "None" + ) + print(f" {meth} -- {methstr}", file=output) + + elif hasattr(object, '__doc__'): + print(inspect.getdoc(object), file=output) + + +def safe_eval(source): + """ + Protected string evaluation. + + .. deprecated:: 2.0 + Use `ast.literal_eval` instead. + + Evaluate a string containing a Python literal expression without + allowing the execution of arbitrary non-literal code. + + .. warning:: + + This function is identical to :py:meth:`ast.literal_eval` and + has the same security implications. It may not always be safe + to evaluate large input strings. + + Parameters + ---------- + source : str + The string to evaluate. + + Returns + ------- + obj : object + The result of evaluating `source`. + + Raises + ------ + SyntaxError + If the code has invalid Python syntax, or if it contains + non-literal code. + + Examples + -------- + >>> np.safe_eval('1') + 1 + >>> np.safe_eval('[1, 2, 3]') + [1, 2, 3] + >>> np.safe_eval('{"foo": ("bar", 10.0)}') + {'foo': ('bar', 10.0)} + + >>> np.safe_eval('import os') + Traceback (most recent call last): + ... + SyntaxError: invalid syntax + + >>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()') + Traceback (most recent call last): + ... + ValueError: malformed node or string: <_ast.Call object at 0x...> + + """ + + # Deprecated in NumPy 2.0, 2023-07-11 + warnings.warn( + "`safe_eval` is deprecated. Use `ast.literal_eval` instead. " + "Be aware of security implications, such as memory exhaustion " + "based attacks (deprecated in NumPy 2.0)", + DeprecationWarning, + stacklevel=2 + ) + + # Local import to speed up numpy's import time. + import ast + return ast.literal_eval(source) + + +def _median_nancheck(data, result, axis): + """ + Utility function to check median result from data for NaN values at the end + and return NaN in that case. Input result can also be a MaskedArray. + + Parameters + ---------- + data : array + Sorted input data to median function + result : Array or MaskedArray + Result of median function. + axis : int + Axis along which the median was computed. + + Returns + ------- + result : scalar or ndarray + Median or NaN in axes which contained NaN in the input. If the input + was an array, NaN will be inserted in-place. If a scalar, either the + input itself or a scalar NaN. + """ + if data.size == 0: + return result + potential_nans = data.take(-1, axis=axis) + n = np.isnan(potential_nans) + # masked NaN values are ok, although for masked the copyto may fail for + # unmasked ones (this was always broken) when the result is a scalar. + if np.ma.isMaskedArray(n): + n = n.filled(False) + + if not n.any(): + return result + + # Without given output, it is possible that the current result is a + # numpy scalar, which is not writeable. If so, just return nan. + if isinstance(result, np.generic): + return potential_nans + + # Otherwise copy NaNs (if there are any) + np.copyto(result, potential_nans, where=n) + return result + +def _opt_info(): + """ + Returns a string containing the CPU features supported + by the current build. + + The format of the string can be explained as follows: + - Dispatched features supported by the running machine end with `*`. + - Dispatched features not supported by the running machine + end with `?`. + - Remaining features represent the baseline. + + Returns: + str: A formatted string indicating the supported CPU features. + """ + from numpy._core._multiarray_umath import ( + __cpu_baseline__, + __cpu_dispatch__, + __cpu_features__, + ) + + if len(__cpu_baseline__) == 0 and len(__cpu_dispatch__) == 0: + return '' + + enabled_features = ' '.join(__cpu_baseline__) + for feature in __cpu_dispatch__: + if __cpu_features__[feature]: + enabled_features += f" {feature}*" + else: + enabled_features += f" {feature}?" + + return enabled_features + +def drop_metadata(dtype, /): + """ + Returns the dtype unchanged if it contained no metadata or a copy of the + dtype if it (or any of its structure dtypes) contained metadata. + + This utility is used by `np.save` and `np.savez` to drop metadata before + saving. + + .. note:: + + Due to its limitation this function may move to a more appropriate + home or change in the future and is considered semi-public API only. + + .. warning:: + + This function does not preserve more strange things like record dtypes + and user dtypes may simply return the wrong thing. If you need to be + sure about the latter, check the result with: + ``np.can_cast(new_dtype, dtype, casting="no")``. + + """ + if dtype.fields is not None: + found_metadata = dtype.metadata is not None + + names = [] + formats = [] + offsets = [] + titles = [] + for name, field in dtype.fields.items(): + field_dt = drop_metadata(field[0]) + if field_dt is not field[0]: + found_metadata = True + + names.append(name) + formats.append(field_dt) + offsets.append(field[1]) + titles.append(None if len(field) < 3 else field[2]) + + if not found_metadata: + return dtype + + structure = { + 'names': names, 'formats': formats, 'offsets': offsets, 'titles': titles, + 'itemsize': dtype.itemsize} + + # NOTE: Could pass (dtype.type, structure) to preserve record dtypes... + return np.dtype(structure, align=dtype.isalignedstruct) + elif dtype.subdtype is not None: + # subarray dtype + subdtype, shape = dtype.subdtype + new_subdtype = drop_metadata(subdtype) + if dtype.metadata is None and new_subdtype is subdtype: + return dtype + + return np.dtype((new_subdtype, shape)) + else: + # Normal unstructured dtype + if dtype.metadata is None: + return dtype + # Note that `dt.str` doesn't round-trip e.g. for user-dtypes. + return np.dtype(dtype.str) diff --git a/python/numpy/lib/_utils_impl.pyi b/python/numpy/lib/_utils_impl.pyi new file mode 100644 index 000000000..00ed47c9f --- /dev/null +++ b/python/numpy/lib/_utils_impl.pyi @@ -0,0 +1,10 @@ +from _typeshed import SupportsWrite + +from numpy._typing import DTypeLike + +__all__ = ["get_include", "info", "show_runtime"] + +def get_include() -> str: ... +def show_runtime() -> None: ... +def info(object: object = ..., maxwidth: int = ..., output: SupportsWrite[str] | None = ..., toplevel: str = ...) -> None: ... +def drop_metadata(dtype: DTypeLike, /) -> DTypeLike: ... diff --git a/python/numpy/lib/_version.py b/python/numpy/lib/_version.py new file mode 100644 index 000000000..f7a353868 --- /dev/null +++ b/python/numpy/lib/_version.py @@ -0,0 +1,154 @@ +"""Utility to compare (NumPy) version strings. + +The NumpyVersion class allows properly comparing numpy version strings. +The LooseVersion and StrictVersion classes that distutils provides don't +work; they don't recognize anything like alpha/beta/rc/dev versions. + +""" +import re + +__all__ = ['NumpyVersion'] + + +class NumpyVersion: + """Parse and compare numpy version strings. + + NumPy has the following versioning scheme (numbers given are examples; they + can be > 9 in principle): + + - Released version: '1.8.0', '1.8.1', etc. + - Alpha: '1.8.0a1', '1.8.0a2', etc. + - Beta: '1.8.0b1', '1.8.0b2', etc. + - Release candidates: '1.8.0rc1', '1.8.0rc2', etc. + - Development versions: '1.8.0.dev-f1234afa' (git commit hash appended) + - Development versions after a1: '1.8.0a1.dev-f1234afa', + '1.8.0b2.dev-f1234afa', + '1.8.1rc1.dev-f1234afa', etc. + - Development versions (no git hash available): '1.8.0.dev-Unknown' + + Comparing needs to be done against a valid version string or other + `NumpyVersion` instance. Note that all development versions of the same + (pre-)release compare equal. + + Parameters + ---------- + vstring : str + NumPy version string (``np.__version__``). + + Examples + -------- + >>> from numpy.lib import NumpyVersion + >>> if NumpyVersion(np.__version__) < '1.7.0': + ... print('skip') + >>> # skip + + >>> NumpyVersion('1.7') # raises ValueError, add ".0" + Traceback (most recent call last): + ... + ValueError: Not a valid numpy version string + + """ + + __module__ = "numpy.lib" + + def __init__(self, vstring): + self.vstring = vstring + ver_main = re.match(r'\d+\.\d+\.\d+', vstring) + if not ver_main: + raise ValueError("Not a valid numpy version string") + + self.version = ver_main.group() + self.major, self.minor, self.bugfix = [int(x) for x in + self.version.split('.')] + if len(vstring) == ver_main.end(): + self.pre_release = 'final' + else: + alpha = re.match(r'a\d', vstring[ver_main.end():]) + beta = re.match(r'b\d', vstring[ver_main.end():]) + rc = re.match(r'rc\d', vstring[ver_main.end():]) + pre_rel = [m for m in [alpha, beta, rc] if m is not None] + if pre_rel: + self.pre_release = pre_rel[0].group() + else: + self.pre_release = '' + + self.is_devversion = bool(re.search(r'.dev', vstring)) + + def _compare_version(self, other): + """Compare major.minor.bugfix""" + if self.major == other.major: + if self.minor == other.minor: + if self.bugfix == other.bugfix: + vercmp = 0 + elif self.bugfix > other.bugfix: + vercmp = 1 + else: + vercmp = -1 + elif self.minor > other.minor: + vercmp = 1 + else: + vercmp = -1 + elif self.major > other.major: + vercmp = 1 + else: + vercmp = -1 + + return vercmp + + def _compare_pre_release(self, other): + """Compare alpha/beta/rc/final.""" + if self.pre_release == other.pre_release: + vercmp = 0 + elif self.pre_release == 'final': + vercmp = 1 + elif other.pre_release == 'final': + vercmp = -1 + elif self.pre_release > other.pre_release: + vercmp = 1 + else: + vercmp = -1 + + return vercmp + + def _compare(self, other): + if not isinstance(other, (str, NumpyVersion)): + raise ValueError("Invalid object to compare with NumpyVersion.") + + if isinstance(other, str): + other = NumpyVersion(other) + + vercmp = self._compare_version(other) + if vercmp == 0: + # Same x.y.z version, check for alpha/beta/rc + vercmp = self._compare_pre_release(other) + if vercmp == 0: + # Same version and same pre-release, check if dev version + if self.is_devversion is other.is_devversion: + vercmp = 0 + elif self.is_devversion: + vercmp = -1 + else: + vercmp = 1 + + return vercmp + + def __lt__(self, other): + return self._compare(other) < 0 + + def __le__(self, other): + return self._compare(other) <= 0 + + def __eq__(self, other): + return self._compare(other) == 0 + + def __ne__(self, other): + return self._compare(other) != 0 + + def __gt__(self, other): + return self._compare(other) > 0 + + def __ge__(self, other): + return self._compare(other) >= 0 + + def __repr__(self): + return f"NumpyVersion({self.vstring})" diff --git a/python/numpy/lib/_version.pyi b/python/numpy/lib/_version.pyi new file mode 100644 index 000000000..c53ef795f --- /dev/null +++ b/python/numpy/lib/_version.pyi @@ -0,0 +1,17 @@ +__all__ = ["NumpyVersion"] + +class NumpyVersion: + vstring: str + version: str + major: int + minor: int + bugfix: int + pre_release: str + is_devversion: bool + def __init__(self, vstring: str) -> None: ... + def __lt__(self, other: str | NumpyVersion) -> bool: ... + def __le__(self, other: str | NumpyVersion) -> bool: ... + def __eq__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override] + def __ne__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override] + def __gt__(self, other: str | NumpyVersion) -> bool: ... + def __ge__(self, other: str | NumpyVersion) -> bool: ... diff --git a/python/numpy/lib/array_utils.py b/python/numpy/lib/array_utils.py new file mode 100644 index 000000000..c267eb021 --- /dev/null +++ b/python/numpy/lib/array_utils.py @@ -0,0 +1,7 @@ +from ._array_utils_impl import ( # noqa: F401 + __all__, + __doc__, + byte_bounds, + normalize_axis_index, + normalize_axis_tuple, +) diff --git a/python/numpy/lib/array_utils.pyi b/python/numpy/lib/array_utils.pyi new file mode 100644 index 000000000..8adc3c5b2 --- /dev/null +++ b/python/numpy/lib/array_utils.pyi @@ -0,0 +1,12 @@ +from ._array_utils_impl import ( + __all__ as __all__, +) +from ._array_utils_impl import ( + byte_bounds as byte_bounds, +) +from ._array_utils_impl import ( + normalize_axis_index as normalize_axis_index, +) +from ._array_utils_impl import ( + normalize_axis_tuple as normalize_axis_tuple, +) diff --git a/python/numpy/lib/format.py b/python/numpy/lib/format.py new file mode 100644 index 000000000..8e0c79942 --- /dev/null +++ b/python/numpy/lib/format.py @@ -0,0 +1,24 @@ +from ._format_impl import ( # noqa: F401 + ARRAY_ALIGN, + BUFFER_SIZE, + EXPECTED_KEYS, + GROWTH_AXIS_MAX_DIGITS, + MAGIC_LEN, + MAGIC_PREFIX, + __all__, + __doc__, + descr_to_dtype, + drop_metadata, + dtype_to_descr, + header_data_from_array_1_0, + isfileobj, + magic, + open_memmap, + read_array, + read_array_header_1_0, + read_array_header_2_0, + read_magic, + write_array, + write_array_header_1_0, + write_array_header_2_0, +) diff --git a/python/numpy/lib/format.pyi b/python/numpy/lib/format.pyi new file mode 100644 index 000000000..dd9470e1e --- /dev/null +++ b/python/numpy/lib/format.pyi @@ -0,0 +1,66 @@ +from ._format_impl import ( + ARRAY_ALIGN as ARRAY_ALIGN, +) +from ._format_impl import ( + BUFFER_SIZE as BUFFER_SIZE, +) +from ._format_impl import ( + EXPECTED_KEYS as EXPECTED_KEYS, +) +from ._format_impl import ( + GROWTH_AXIS_MAX_DIGITS as GROWTH_AXIS_MAX_DIGITS, +) +from ._format_impl import ( + MAGIC_LEN as MAGIC_LEN, +) +from ._format_impl import ( + MAGIC_PREFIX as MAGIC_PREFIX, +) +from ._format_impl import ( + __all__ as __all__, +) +from ._format_impl import ( + __doc__ as __doc__, +) +from ._format_impl import ( + descr_to_dtype as descr_to_dtype, +) +from ._format_impl import ( + drop_metadata as drop_metadata, +) +from ._format_impl import ( + dtype_to_descr as dtype_to_descr, +) +from ._format_impl import ( + header_data_from_array_1_0 as header_data_from_array_1_0, +) +from ._format_impl import ( + isfileobj as isfileobj, +) +from ._format_impl import ( + magic as magic, +) +from ._format_impl import ( + open_memmap as open_memmap, +) +from ._format_impl import ( + read_array as read_array, +) +from ._format_impl import ( + read_array_header_1_0 as read_array_header_1_0, +) +from ._format_impl import ( + read_array_header_2_0 as read_array_header_2_0, +) +from ._format_impl import ( + read_magic as read_magic, +) +from ._format_impl import ( + write_array as write_array, +) +from ._format_impl import ( + write_array_header_1_0 as write_array_header_1_0, +) +from ._format_impl import ( + write_array_header_2_0 as write_array_header_2_0, +) diff --git a/python/numpy/lib/introspect.py b/python/numpy/lib/introspect.py new file mode 100644 index 000000000..f4a0f32a9 --- /dev/null +++ b/python/numpy/lib/introspect.py @@ -0,0 +1,95 @@ +""" +Introspection helper functions. +""" + +__all__ = ['opt_func_info'] + + +def opt_func_info(func_name=None, signature=None): + """ + Returns a dictionary containing the currently supported CPU dispatched + features for all optimized functions. + + Parameters + ---------- + func_name : str (optional) + Regular expression to filter by function name. + + signature : str (optional) + Regular expression to filter by data type. + + Returns + ------- + dict + A dictionary where keys are optimized function names and values are + nested dictionaries indicating supported targets based on data types. + + Examples + -------- + Retrieve dispatch information for functions named 'add' or 'sub' and + data types 'float64' or 'float32': + + >>> import numpy as np + >>> dict = np.lib.introspect.opt_func_info( + ... func_name="add|abs", signature="float64|complex64" + ... ) + >>> import json + >>> print(json.dumps(dict, indent=2)) + { + "absolute": { + "dd": { + "current": "SSE41", + "available": "SSE41 baseline(SSE SSE2 SSE3)" + }, + "Ff": { + "current": "FMA3__AVX2", + "available": "AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)" + }, + "Dd": { + "current": "FMA3__AVX2", + "available": "AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)" + } + }, + "add": { + "ddd": { + "current": "FMA3__AVX2", + "available": "FMA3__AVX2 baseline(SSE SSE2 SSE3)" + }, + "FFF": { + "current": "FMA3__AVX2", + "available": "FMA3__AVX2 baseline(SSE SSE2 SSE3)" + } + } + } + + """ + import re + + from numpy._core._multiarray_umath import __cpu_targets_info__ as targets + from numpy._core._multiarray_umath import dtype + + if func_name is not None: + func_pattern = re.compile(func_name) + matching_funcs = { + k: v for k, v in targets.items() + if func_pattern.search(k) + } + else: + matching_funcs = targets + + if signature is not None: + sig_pattern = re.compile(signature) + matching_sigs = {} + for k, v in matching_funcs.items(): + matching_chars = {} + for chars, targets in v.items(): + if any( + sig_pattern.search(c) or sig_pattern.search(dtype(c).name) + for c in chars + ): + matching_chars[chars] = targets + if matching_chars: + matching_sigs[k] = matching_chars + else: + matching_sigs = matching_funcs + return matching_sigs diff --git a/python/numpy/lib/introspect.pyi b/python/numpy/lib/introspect.pyi new file mode 100644 index 000000000..7929981cd --- /dev/null +++ b/python/numpy/lib/introspect.pyi @@ -0,0 +1,3 @@ +__all__ = ["opt_func_info"] + +def opt_func_info(func_name: str | None = None, signature: str | None = None) -> dict[str, dict[str, dict[str, str]]]: ... diff --git a/python/numpy/lib/mixins.py b/python/numpy/lib/mixins.py new file mode 100644 index 000000000..831bb34cf --- /dev/null +++ b/python/numpy/lib/mixins.py @@ -0,0 +1,180 @@ +""" +Mixin classes for custom array types that don't inherit from ndarray. +""" + +__all__ = ['NDArrayOperatorsMixin'] + + +def _disables_array_ufunc(obj): + """True when __array_ufunc__ is set to None.""" + try: + return obj.__array_ufunc__ is None + except AttributeError: + return False + + +def _binary_method(ufunc, name): + """Implement a forward binary method with a ufunc, e.g., __add__.""" + def func(self, other): + if _disables_array_ufunc(other): + return NotImplemented + return ufunc(self, other) + func.__name__ = f'__{name}__' + return func + + +def _reflected_binary_method(ufunc, name): + """Implement a reflected binary method with a ufunc, e.g., __radd__.""" + def func(self, other): + if _disables_array_ufunc(other): + return NotImplemented + return ufunc(other, self) + func.__name__ = f'__r{name}__' + return func + + +def _inplace_binary_method(ufunc, name): + """Implement an in-place binary method with a ufunc, e.g., __iadd__.""" + def func(self, other): + return ufunc(self, other, out=(self,)) + func.__name__ = f'__i{name}__' + return func + + +def _numeric_methods(ufunc, name): + """Implement forward, reflected and inplace binary methods with a ufunc.""" + return (_binary_method(ufunc, name), + _reflected_binary_method(ufunc, name), + _inplace_binary_method(ufunc, name)) + + +def _unary_method(ufunc, name): + """Implement a unary special method with a ufunc.""" + def func(self): + return ufunc(self) + func.__name__ = f'__{name}__' + return func + + +class NDArrayOperatorsMixin: + """Mixin defining all operator special methods using __array_ufunc__. + + This class implements the special methods for almost all of Python's + builtin operators defined in the `operator` module, including comparisons + (``==``, ``>``, etc.) and arithmetic (``+``, ``*``, ``-``, etc.), by + deferring to the ``__array_ufunc__`` method, which subclasses must + implement. + + It is useful for writing classes that do not inherit from `numpy.ndarray`, + but that should support arithmetic and numpy universal functions like + arrays as described in :external+neps:doc:`nep-0013-ufunc-overrides`. + + As an trivial example, consider this implementation of an ``ArrayLike`` + class that simply wraps a NumPy array and ensures that the result of any + arithmetic operation is also an ``ArrayLike`` object: + + >>> import numbers + >>> class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin): + ... def __init__(self, value): + ... self.value = np.asarray(value) + ... + ... # One might also consider adding the built-in list type to this + ... # list, to support operations like np.add(array_like, list) + ... _HANDLED_TYPES = (np.ndarray, numbers.Number) + ... + ... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + ... out = kwargs.get('out', ()) + ... for x in inputs + out: + ... # Only support operations with instances of + ... # _HANDLED_TYPES. Use ArrayLike instead of type(self) + ... # for isinstance to allow subclasses that don't + ... # override __array_ufunc__ to handle ArrayLike objects. + ... if not isinstance( + ... x, self._HANDLED_TYPES + (ArrayLike,) + ... ): + ... return NotImplemented + ... + ... # Defer to the implementation of the ufunc + ... # on unwrapped values. + ... inputs = tuple(x.value if isinstance(x, ArrayLike) else x + ... for x in inputs) + ... if out: + ... kwargs['out'] = tuple( + ... x.value if isinstance(x, ArrayLike) else x + ... for x in out) + ... result = getattr(ufunc, method)(*inputs, **kwargs) + ... + ... if type(result) is tuple: + ... # multiple return values + ... return tuple(type(self)(x) for x in result) + ... elif method == 'at': + ... # no return value + ... return None + ... else: + ... # one return value + ... return type(self)(result) + ... + ... def __repr__(self): + ... return '%s(%r)' % (type(self).__name__, self.value) + + In interactions between ``ArrayLike`` objects and numbers or numpy arrays, + the result is always another ``ArrayLike``: + + >>> x = ArrayLike([1, 2, 3]) + >>> x - 1 + ArrayLike(array([0, 1, 2])) + >>> 1 - x + ArrayLike(array([ 0, -1, -2])) + >>> np.arange(3) - x + ArrayLike(array([-1, -1, -1])) + >>> x - np.arange(3) + ArrayLike(array([1, 1, 1])) + + Note that unlike ``numpy.ndarray``, ``ArrayLike`` does not allow operations + with arbitrary, unrecognized types. This ensures that interactions with + ArrayLike preserve a well-defined casting hierarchy. + + """ + from numpy._core import umath as um + + __slots__ = () + # Like np.ndarray, this mixin class implements "Option 1" from the ufunc + # overrides NEP. + + # comparisons don't have reflected and in-place versions + __lt__ = _binary_method(um.less, 'lt') + __le__ = _binary_method(um.less_equal, 'le') + __eq__ = _binary_method(um.equal, 'eq') + __ne__ = _binary_method(um.not_equal, 'ne') + __gt__ = _binary_method(um.greater, 'gt') + __ge__ = _binary_method(um.greater_equal, 'ge') + + # numeric methods + __add__, __radd__, __iadd__ = _numeric_methods(um.add, 'add') + __sub__, __rsub__, __isub__ = _numeric_methods(um.subtract, 'sub') + __mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, 'mul') + __matmul__, __rmatmul__, __imatmul__ = _numeric_methods( + um.matmul, 'matmul') + __truediv__, __rtruediv__, __itruediv__ = _numeric_methods( + um.true_divide, 'truediv') + __floordiv__, __rfloordiv__, __ifloordiv__ = _numeric_methods( + um.floor_divide, 'floordiv') + __mod__, __rmod__, __imod__ = _numeric_methods(um.remainder, 'mod') + __divmod__ = _binary_method(um.divmod, 'divmod') + __rdivmod__ = _reflected_binary_method(um.divmod, 'divmod') + # __idivmod__ does not exist + # TODO: handle the optional third argument for __pow__? + __pow__, __rpow__, __ipow__ = _numeric_methods(um.power, 'pow') + __lshift__, __rlshift__, __ilshift__ = _numeric_methods( + um.left_shift, 'lshift') + __rshift__, __rrshift__, __irshift__ = _numeric_methods( + um.right_shift, 'rshift') + __and__, __rand__, __iand__ = _numeric_methods(um.bitwise_and, 'and') + __xor__, __rxor__, __ixor__ = _numeric_methods(um.bitwise_xor, 'xor') + __or__, __ror__, __ior__ = _numeric_methods(um.bitwise_or, 'or') + + # unary methods + __neg__ = _unary_method(um.negative, 'neg') + __pos__ = _unary_method(um.positive, 'pos') + __abs__ = _unary_method(um.absolute, 'abs') + __invert__ = _unary_method(um.invert, 'invert') diff --git a/python/numpy/lib/mixins.pyi b/python/numpy/lib/mixins.pyi new file mode 100644 index 000000000..730827d92 --- /dev/null +++ b/python/numpy/lib/mixins.pyi @@ -0,0 +1,77 @@ +from abc import ABC, abstractmethod +from typing import Any +from typing import Literal as L + +from numpy import ufunc + +__all__ = ["NDArrayOperatorsMixin"] + +# NOTE: `NDArrayOperatorsMixin` is not formally an abstract baseclass, +# even though it's reliant on subclasses implementing `__array_ufunc__` + +# NOTE: The accepted input- and output-types of the various dunders are +# completely dependent on how `__array_ufunc__` is implemented. +# As such, only little type safety can be provided here. + +class NDArrayOperatorsMixin(ABC): + __slots__ = () + + @abstractmethod + def __array_ufunc__( + self, + ufunc: ufunc, + method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "at"], + *inputs: Any, + **kwargs: Any, + ) -> Any: ... + def __lt__(self, other: Any) -> Any: ... + def __le__(self, other: Any) -> Any: ... + def __eq__(self, other: Any) -> Any: ... + def __ne__(self, other: Any) -> Any: ... + def __gt__(self, other: Any) -> Any: ... + def __ge__(self, other: Any) -> Any: ... + def __add__(self, other: Any) -> Any: ... + def __radd__(self, other: Any) -> Any: ... + def __iadd__(self, other: Any) -> Any: ... + def __sub__(self, other: Any) -> Any: ... + def __rsub__(self, other: Any) -> Any: ... + def __isub__(self, other: Any) -> Any: ... + def __mul__(self, other: Any) -> Any: ... + def __rmul__(self, other: Any) -> Any: ... + def __imul__(self, other: Any) -> Any: ... + def __matmul__(self, other: Any) -> Any: ... + def __rmatmul__(self, other: Any) -> Any: ... + def __imatmul__(self, other: Any) -> Any: ... + def __truediv__(self, other: Any) -> Any: ... + def __rtruediv__(self, other: Any) -> Any: ... + def __itruediv__(self, other: Any) -> Any: ... + def __floordiv__(self, other: Any) -> Any: ... + def __rfloordiv__(self, other: Any) -> Any: ... + def __ifloordiv__(self, other: Any) -> Any: ... + def __mod__(self, other: Any) -> Any: ... + def __rmod__(self, other: Any) -> Any: ... + def __imod__(self, other: Any) -> Any: ... + def __divmod__(self, other: Any) -> Any: ... + def __rdivmod__(self, other: Any) -> Any: ... + def __pow__(self, other: Any) -> Any: ... + def __rpow__(self, other: Any) -> Any: ... + def __ipow__(self, other: Any) -> Any: ... + def __lshift__(self, other: Any) -> Any: ... + def __rlshift__(self, other: Any) -> Any: ... + def __ilshift__(self, other: Any) -> Any: ... + def __rshift__(self, other: Any) -> Any: ... + def __rrshift__(self, other: Any) -> Any: ... + def __irshift__(self, other: Any) -> Any: ... + def __and__(self, other: Any) -> Any: ... + def __rand__(self, other: Any) -> Any: ... + def __iand__(self, other: Any) -> Any: ... + def __xor__(self, other: Any) -> Any: ... + def __rxor__(self, other: Any) -> Any: ... + def __ixor__(self, other: Any) -> Any: ... + def __or__(self, other: Any) -> Any: ... + def __ror__(self, other: Any) -> Any: ... + def __ior__(self, other: Any) -> Any: ... + def __neg__(self) -> Any: ... + def __pos__(self) -> Any: ... + def __abs__(self) -> Any: ... + def __invert__(self) -> Any: ... diff --git a/python/numpy/lib/npyio.py b/python/numpy/lib/npyio.py new file mode 100644 index 000000000..84d807926 --- /dev/null +++ b/python/numpy/lib/npyio.py @@ -0,0 +1 @@ +from ._npyio_impl import DataSource, NpzFile, __doc__ # noqa: F401 diff --git a/python/numpy/lib/npyio.pyi b/python/numpy/lib/npyio.pyi new file mode 100644 index 000000000..49fb4d1fc --- /dev/null +++ b/python/numpy/lib/npyio.pyi @@ -0,0 +1,9 @@ +from numpy.lib._npyio_impl import ( + DataSource as DataSource, +) +from numpy.lib._npyio_impl import ( + NpzFile as NpzFile, +) +from numpy.lib._npyio_impl import ( + __doc__ as __doc__, +) diff --git a/python/numpy/lib/recfunctions.py b/python/numpy/lib/recfunctions.py new file mode 100644 index 000000000..c8a6dd818 --- /dev/null +++ b/python/numpy/lib/recfunctions.py @@ -0,0 +1,1681 @@ +""" +Collection of utilities to manipulate structured arrays. + +Most of these functions were initially implemented by John Hunter for +matplotlib. They have been rewritten and extended for convenience. + +""" +import itertools + +import numpy as np +import numpy.ma as ma +import numpy.ma.mrecords as mrec +from numpy._core.overrides import array_function_dispatch +from numpy.lib._iotools import _is_string_like + +__all__ = [ + 'append_fields', 'apply_along_fields', 'assign_fields_by_name', + 'drop_fields', 'find_duplicates', 'flatten_descr', + 'get_fieldstructure', 'get_names', 'get_names_flat', + 'join_by', 'merge_arrays', 'rec_append_fields', + 'rec_drop_fields', 'rec_join', 'recursive_fill_fields', + 'rename_fields', 'repack_fields', 'require_fields', + 'stack_arrays', 'structured_to_unstructured', 'unstructured_to_structured', + ] + + +def _recursive_fill_fields_dispatcher(input, output): + return (input, output) + + +@array_function_dispatch(_recursive_fill_fields_dispatcher) +def recursive_fill_fields(input, output): + """ + Fills fields from output with fields from input, + with support for nested structures. + + Parameters + ---------- + input : ndarray + Input array. + output : ndarray + Output array. + + Notes + ----- + * `output` should be at least the same size as `input` + + Examples + -------- + >>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)]) + >>> b = np.zeros((3,), dtype=a.dtype) + >>> rfn.recursive_fill_fields(a, b) + array([(1, 10.), (2, 20.), (0, 0.)], dtype=[('A', '>> import numpy as np + >>> dt = np.dtype([(('a', 'A'), np.int64), ('b', np.double, 3)]) + >>> dt.descr + [(('a', 'A'), '>> _get_fieldspec(dt) + [(('a', 'A'), dtype('int64')), ('b', dtype(('>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> rfn.get_names(np.empty((1,), dtype=[('A', int)]).dtype) + ('A',) + >>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]).dtype) + ('A', 'B') + >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) + >>> rfn.get_names(adtype) + ('a', ('b', ('ba', 'bb'))) + """ + listnames = [] + names = adtype.names + for name in names: + current = adtype[name] + if current.names is not None: + listnames.append((name, tuple(get_names(current)))) + else: + listnames.append(name) + return tuple(listnames) + + +def get_names_flat(adtype): + """ + Returns the field names of the input datatype as a tuple. Input datatype + must have fields otherwise error is raised. + Nested structure are flattened beforehand. + + Parameters + ---------- + adtype : dtype + Input datatype + + Examples + -------- + >>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> rfn.get_names_flat(np.empty((1,), dtype=[('A', int)]).dtype) is None + False + >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', str)]).dtype) + ('A', 'B') + >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) + >>> rfn.get_names_flat(adtype) + ('a', 'b', 'ba', 'bb') + """ + listnames = [] + names = adtype.names + for name in names: + listnames.append(name) + current = adtype[name] + if current.names is not None: + listnames.extend(get_names_flat(current)) + return tuple(listnames) + + +def flatten_descr(ndtype): + """ + Flatten a structured data-type description. + + Examples + -------- + >>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> ndtype = np.dtype([('a', '>> rfn.flatten_descr(ndtype) + (('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32'))) + + """ + names = ndtype.names + if names is None: + return (('', ndtype),) + else: + descr = [] + for field in names: + (typ, _) = ndtype.fields[field] + if typ.names is not None: + descr.extend(flatten_descr(typ)) + else: + descr.append((field, typ)) + return tuple(descr) + + +def _zip_dtype(seqarrays, flatten=False): + newdtype = [] + if flatten: + for a in seqarrays: + newdtype.extend(flatten_descr(a.dtype)) + else: + for a in seqarrays: + current = a.dtype + if current.names is not None and len(current.names) == 1: + # special case - dtypes of 1 field are flattened + newdtype.extend(_get_fieldspec(current)) + else: + newdtype.append(('', current)) + return np.dtype(newdtype) + + +def _zip_descr(seqarrays, flatten=False): + """ + Combine the dtype description of a series of arrays. + + Parameters + ---------- + seqarrays : sequence of arrays + Sequence of arrays + flatten : {boolean}, optional + Whether to collapse nested descriptions. + """ + return _zip_dtype(seqarrays, flatten=flatten).descr + + +def get_fieldstructure(adtype, lastname=None, parents=None,): + """ + Returns a dictionary with fields indexing lists of their parent fields. + + This function is used to simplify access to fields nested in other fields. + + Parameters + ---------- + adtype : np.dtype + Input datatype + lastname : optional + Last processed field name (used internally during recursion). + parents : dictionary + Dictionary of parent fields (used internally during recursion). + + Examples + -------- + >>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> ndtype = np.dtype([('A', int), + ... ('B', [('BA', int), + ... ('BB', [('BBA', int), ('BBB', int)])])]) + >>> rfn.get_fieldstructure(ndtype) + ... # XXX: possible regression, order of BBA and BBB is swapped + {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']} + + """ + if parents is None: + parents = {} + names = adtype.names + for name in names: + current = adtype[name] + if current.names is not None: + if lastname: + parents[name] = [lastname, ] + else: + parents[name] = [] + parents.update(get_fieldstructure(current, name, parents)) + else: + lastparent = list(parents.get(lastname, []) or []) + if lastparent: + lastparent.append(lastname) + elif lastname: + lastparent = [lastname, ] + parents[name] = lastparent or [] + return parents + + +def _izip_fields_flat(iterable): + """ + Returns an iterator of concatenated fields from a sequence of arrays, + collapsing any nested structure. + + """ + for element in iterable: + if isinstance(element, np.void): + yield from _izip_fields_flat(tuple(element)) + else: + yield element + + +def _izip_fields(iterable): + """ + Returns an iterator of concatenated fields from a sequence of arrays. + + """ + for element in iterable: + if (hasattr(element, '__iter__') and + not isinstance(element, str)): + yield from _izip_fields(element) + elif isinstance(element, np.void) and len(tuple(element)) == 1: + # this statement is the same from the previous expression + yield from _izip_fields(element) + else: + yield element + + +def _izip_records(seqarrays, fill_value=None, flatten=True): + """ + Returns an iterator of concatenated items from a sequence of arrays. + + Parameters + ---------- + seqarrays : sequence of arrays + Sequence of arrays. + fill_value : {None, integer} + Value used to pad shorter iterables. + flatten : {True, False}, + Whether to + """ + + # Should we flatten the items, or just use a nested approach + if flatten: + zipfunc = _izip_fields_flat + else: + zipfunc = _izip_fields + + for tup in itertools.zip_longest(*seqarrays, fillvalue=fill_value): + yield tuple(zipfunc(tup)) + + +def _fix_output(output, usemask=True, asrecarray=False): + """ + Private function: return a recarray, a ndarray, a MaskedArray + or a MaskedRecords depending on the input parameters + """ + if not isinstance(output, ma.MaskedArray): + usemask = False + if usemask: + if asrecarray: + output = output.view(mrec.MaskedRecords) + else: + output = ma.filled(output) + if asrecarray: + output = output.view(np.recarray) + return output + + +def _fix_defaults(output, defaults=None): + """ + Update the fill_value and masked data of `output` + from the default given in a dictionary defaults. + """ + names = output.dtype.names + (data, mask, fill_value) = (output.data, output.mask, output.fill_value) + for (k, v) in (defaults or {}).items(): + if k in names: + fill_value[k] = v + data[k][mask[k]] = v + return output + + +def _merge_arrays_dispatcher(seqarrays, fill_value=None, flatten=None, + usemask=None, asrecarray=None): + return seqarrays + + +@array_function_dispatch(_merge_arrays_dispatcher) +def merge_arrays(seqarrays, fill_value=-1, flatten=False, + usemask=False, asrecarray=False): + """ + Merge arrays field by field. + + Parameters + ---------- + seqarrays : sequence of ndarrays + Sequence of arrays + fill_value : {float}, optional + Filling value used to pad missing data on the shorter arrays. + flatten : {False, True}, optional + Whether to collapse nested fields. + usemask : {False, True}, optional + Whether to return a masked array or not. + asrecarray : {False, True}, optional + Whether to return a recarray (MaskedRecords) or not. + + Examples + -------- + >>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.]))) + array([( 1, 10.), ( 2, 20.), (-1, 30.)], + dtype=[('f0', '>> rfn.merge_arrays((np.array([1, 2], dtype=np.int64), + ... np.array([10., 20., 30.])), usemask=False) + array([(1, 10.0), (2, 20.0), (-1, 30.0)], + dtype=[('f0', '>> rfn.merge_arrays((np.array([1, 2]).view([('a', np.int64)]), + ... np.array([10., 20., 30.])), + ... usemask=False, asrecarray=True) + rec.array([( 1, 10.), ( 2, 20.), (-1, 30.)], + dtype=[('a', '>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + ... dtype=[('a', np.int64), ('b', [('ba', np.double), ('bb', np.int64)])]) + >>> rfn.drop_fields(a, 'a') + array([((2., 3),), ((5., 6),)], + dtype=[('b', [('ba', '>> rfn.drop_fields(a, 'ba') + array([(1, (3,)), (4, (6,))], dtype=[('a', '>> rfn.drop_fields(a, ['ba', 'bb']) + array([(1,), (4,)], dtype=[('a', '>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], + ... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])]) + >>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'}) + array([(1, (2., [ 3., 30.])), (4, (5., [ 6., 60.]))], + dtype=[('A', ' 1: + data = merge_arrays(data, flatten=True, usemask=usemask, + fill_value=fill_value) + else: + data = data.pop() + # + output = ma.masked_all( + max(len(base), len(data)), + dtype=_get_fieldspec(base.dtype) + _get_fieldspec(data.dtype)) + output = recursive_fill_fields(base, output) + output = recursive_fill_fields(data, output) + # + return _fix_output(output, usemask=usemask, asrecarray=asrecarray) + + +def _rec_append_fields_dispatcher(base, names, data, dtypes=None): + yield base + yield from data + + +@array_function_dispatch(_rec_append_fields_dispatcher) +def rec_append_fields(base, names, data, dtypes=None): + """ + Add new fields to an existing array. + + The names of the fields are given with the `names` arguments, + the corresponding values with the `data` arguments. + If a single field is appended, `names`, `data` and `dtypes` do not have + to be lists but just values. + + Parameters + ---------- + base : array + Input array to extend. + names : string, sequence + String or sequence of strings corresponding to the names + of the new fields. + data : array or sequence of arrays + Array or sequence of arrays storing the fields to add to the base. + dtypes : sequence of datatypes, optional + Datatype or sequence of datatypes. + If None, the datatypes are estimated from the `data`. + + See Also + -------- + append_fields + + Returns + ------- + appended_array : np.recarray + """ + return append_fields(base, names, data=data, dtypes=dtypes, + asrecarray=True, usemask=False) + + +def _repack_fields_dispatcher(a, align=None, recurse=None): + return (a,) + + +@array_function_dispatch(_repack_fields_dispatcher) +def repack_fields(a, align=False, recurse=False): + """ + Re-pack the fields of a structured array or dtype in memory. + + The memory layout of structured datatypes allows fields at arbitrary + byte offsets. This means the fields can be separated by padding bytes, + their offsets can be non-monotonically increasing, and they can overlap. + + This method removes any overlaps and reorders the fields in memory so they + have increasing byte offsets, and adds or removes padding bytes depending + on the `align` option, which behaves like the `align` option to + `numpy.dtype`. + + If `align=False`, this method produces a "packed" memory layout in which + each field starts at the byte the previous field ended, and any padding + bytes are removed. + + If `align=True`, this methods produces an "aligned" memory layout in which + each field's offset is a multiple of its alignment, and the total itemsize + is a multiple of the largest alignment, by adding padding bytes as needed. + + Parameters + ---------- + a : ndarray or dtype + array or dtype for which to repack the fields. + align : boolean + If true, use an "aligned" memory layout, otherwise use a "packed" layout. + recurse : boolean + If True, also repack nested structures. + + Returns + ------- + repacked : ndarray or dtype + Copy of `a` with fields repacked, or `a` itself if no repacking was + needed. + + Examples + -------- + >>> import numpy as np + + >>> from numpy.lib import recfunctions as rfn + >>> def print_offsets(d): + ... print("offsets:", [d.fields[name][1] for name in d.names]) + ... print("itemsize:", d.itemsize) + ... + >>> dt = np.dtype('u1, >> dt + dtype({'names': ['f0', 'f1', 'f2'], 'formats': ['u1', '>> print_offsets(dt) + offsets: [0, 8, 16] + itemsize: 24 + >>> packed_dt = rfn.repack_fields(dt) + >>> packed_dt + dtype([('f0', 'u1'), ('f1', '>> print_offsets(packed_dt) + offsets: [0, 1, 9] + itemsize: 17 + + """ + if not isinstance(a, np.dtype): + dt = repack_fields(a.dtype, align=align, recurse=recurse) + return a.astype(dt, copy=False) + + if a.names is None: + return a + + fieldinfo = [] + for name in a.names: + tup = a.fields[name] + if recurse: + fmt = repack_fields(tup[0], align=align, recurse=True) + else: + fmt = tup[0] + + if len(tup) == 3: + name = (tup[2], name) + + fieldinfo.append((name, fmt)) + + dt = np.dtype(fieldinfo, align=align) + return np.dtype((a.type, dt)) + +def _get_fields_and_offsets(dt, offset=0): + """ + Returns a flat list of (dtype, count, offset) tuples of all the + scalar fields in the dtype "dt", including nested fields, in left + to right order. + """ + + # counts up elements in subarrays, including nested subarrays, and returns + # base dtype and count + def count_elem(dt): + count = 1 + while dt.shape != (): + for size in dt.shape: + count *= size + dt = dt.base + return dt, count + + fields = [] + for name in dt.names: + field = dt.fields[name] + f_dt, f_offset = field[0], field[1] + f_dt, n = count_elem(f_dt) + + if f_dt.names is None: + fields.append((np.dtype((f_dt, (n,))), n, f_offset + offset)) + else: + subfields = _get_fields_and_offsets(f_dt, f_offset + offset) + size = f_dt.itemsize + + for i in range(n): + if i == 0: + # optimization: avoid list comprehension if no subarray + fields.extend(subfields) + else: + fields.extend([(d, c, o + i * size) for d, c, o in subfields]) + return fields + +def _common_stride(offsets, counts, itemsize): + """ + Returns the stride between the fields, or None if the stride is not + constant. The values in "counts" designate the lengths of + subarrays. Subarrays are treated as many contiguous fields, with + always positive stride. + """ + if len(offsets) <= 1: + return itemsize + + negative = offsets[1] < offsets[0] # negative stride + if negative: + # reverse, so offsets will be ascending + it = zip(reversed(offsets), reversed(counts)) + else: + it = zip(offsets, counts) + + prev_offset = None + stride = None + for offset, count in it: + if count != 1: # subarray: always c-contiguous + if negative: + return None # subarrays can never have a negative stride + if stride is None: + stride = itemsize + if stride != itemsize: + return None + end_offset = offset + (count - 1) * itemsize + else: + end_offset = offset + + if prev_offset is not None: + new_stride = offset - prev_offset + if stride is None: + stride = new_stride + if stride != new_stride: + return None + + prev_offset = end_offset + + if negative: + return -stride + return stride + + +def _structured_to_unstructured_dispatcher(arr, dtype=None, copy=None, + casting=None): + return (arr,) + +@array_function_dispatch(_structured_to_unstructured_dispatcher) +def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'): + """ + Converts an n-D structured array into an (n+1)-D unstructured array. + + The new array will have a new last dimension equal in size to the + number of field-elements of the input array. If not supplied, the output + datatype is determined from the numpy type promotion rules applied to all + the field datatypes. + + Nested fields, as well as each element of any subarray fields, all count + as a single field-elements. + + Parameters + ---------- + arr : ndarray + Structured array or dtype to convert. Cannot contain object datatype. + dtype : dtype, optional + The dtype of the output unstructured array. + copy : bool, optional + If true, always return a copy. If false, a view is returned if + possible, such as when the `dtype` and strides of the fields are + suitable and the array subtype is one of `numpy.ndarray`, + `numpy.recarray` or `numpy.memmap`. + + .. versionchanged:: 1.25.0 + A view can now be returned if the fields are separated by a + uniform stride. + + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + See casting argument of `numpy.ndarray.astype`. Controls what kind of + data casting may occur. + + Returns + ------- + unstructured : ndarray + Unstructured array with one more dimension. + + Examples + -------- + >>> import numpy as np + + >>> from numpy.lib import recfunctions as rfn + >>> a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) + >>> a + array([(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.]), + (0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.])], + dtype=[('a', '>> rfn.structured_to_unstructured(a) + array([[0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0.]]) + + >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) + >>> np.mean(rfn.structured_to_unstructured(b[['x', 'z']]), axis=-1) + array([ 3. , 5.5, 9. , 11. ]) + + """ # noqa: E501 + if arr.dtype.names is None: + raise ValueError('arr must be a structured array') + + fields = _get_fields_and_offsets(arr.dtype) + n_fields = len(fields) + if n_fields == 0 and dtype is None: + raise ValueError("arr has no fields. Unable to guess dtype") + elif n_fields == 0: + # too many bugs elsewhere for this to work now + raise NotImplementedError("arr with no fields is not supported") + + dts, counts, offsets = zip(*fields) + names = [f'f{n}' for n in range(n_fields)] + + if dtype is None: + out_dtype = np.result_type(*[dt.base for dt in dts]) + else: + out_dtype = np.dtype(dtype) + + # Use a series of views and casts to convert to an unstructured array: + + # first view using flattened fields (doesn't work for object arrays) + # Note: dts may include a shape for subarrays + flattened_fields = np.dtype({'names': names, + 'formats': dts, + 'offsets': offsets, + 'itemsize': arr.dtype.itemsize}) + arr = arr.view(flattened_fields) + + # we only allow a few types to be unstructured by manipulating the + # strides, because we know it won't work with, for example, np.matrix nor + # np.ma.MaskedArray. + can_view = type(arr) in (np.ndarray, np.recarray, np.memmap) + if (not copy) and can_view and all(dt.base == out_dtype for dt in dts): + # all elements have the right dtype already; if they have a common + # stride, we can just return a view + common_stride = _common_stride(offsets, counts, out_dtype.itemsize) + if common_stride is not None: + wrap = arr.__array_wrap__ + + new_shape = arr.shape + (sum(counts), out_dtype.itemsize) + new_strides = arr.strides + (abs(common_stride), 1) + + arr = arr[..., np.newaxis].view(np.uint8) # view as bytes + arr = arr[..., min(offsets):] # remove the leading unused data + arr = np.lib.stride_tricks.as_strided(arr, + new_shape, + new_strides, + subok=True) + + # cast and drop the last dimension again + arr = arr.view(out_dtype)[..., 0] + + if common_stride < 0: + arr = arr[..., ::-1] # reverse, if the stride was negative + if type(arr) is not type(wrap.__self__): + # Some types (e.g. recarray) turn into an ndarray along the + # way, so we have to wrap it again in order to match the + # behavior with copy=True. + arr = wrap(arr) + return arr + + # next cast to a packed format with all fields converted to new dtype + packed_fields = np.dtype({'names': names, + 'formats': [(out_dtype, dt.shape) for dt in dts]}) + arr = arr.astype(packed_fields, copy=copy, casting=casting) + + # finally is it safe to view the packed fields as the unstructured type + return arr.view((out_dtype, (sum(counts),))) + + +def _unstructured_to_structured_dispatcher(arr, dtype=None, names=None, + align=None, copy=None, casting=None): + return (arr,) + +@array_function_dispatch(_unstructured_to_structured_dispatcher) +def unstructured_to_structured(arr, dtype=None, names=None, align=False, + copy=False, casting='unsafe'): + """ + Converts an n-D unstructured array into an (n-1)-D structured array. + + The last dimension of the input array is converted into a structure, with + number of field-elements equal to the size of the last dimension of the + input array. By default all output fields have the input array's dtype, but + an output structured dtype with an equal number of fields-elements can be + supplied instead. + + Nested fields, as well as each element of any subarray fields, all count + towards the number of field-elements. + + Parameters + ---------- + arr : ndarray + Unstructured array or dtype to convert. + dtype : dtype, optional + The structured dtype of the output array + names : list of strings, optional + If dtype is not supplied, this specifies the field names for the output + dtype, in order. The field dtypes will be the same as the input array. + align : boolean, optional + Whether to create an aligned memory layout. + copy : bool, optional + See copy argument to `numpy.ndarray.astype`. If true, always return a + copy. If false, and `dtype` requirements are satisfied, a view is + returned. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + See casting argument of `numpy.ndarray.astype`. Controls what kind of + data casting may occur. + + Returns + ------- + structured : ndarray + Structured array with fewer dimensions. + + Examples + -------- + >>> import numpy as np + + >>> from numpy.lib import recfunctions as rfn + >>> dt = np.dtype([('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) + >>> a = np.arange(20).reshape((4,5)) + >>> a + array([[ 0, 1, 2, 3, 4], + [ 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]]) + >>> rfn.unstructured_to_structured(a, dt) + array([( 0, ( 1., 2), [ 3., 4.]), ( 5, ( 6., 7), [ 8., 9.]), + (10, (11., 12), [13., 14.]), (15, (16., 17), [18., 19.])], + dtype=[('a', '>> import numpy as np + + >>> from numpy.lib import recfunctions as rfn + >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], + ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) + >>> rfn.apply_along_fields(np.mean, b) + array([ 2.66666667, 5.33333333, 8.66666667, 11. ]) + >>> rfn.apply_along_fields(np.mean, b[['x', 'z']]) + array([ 3. , 5.5, 9. , 11. ]) + + """ + if arr.dtype.names is None: + raise ValueError('arr must be a structured array') + + uarr = structured_to_unstructured(arr) + return func(uarr, axis=-1) + # works and avoids axis requirement, but very, very slow: + #return np.apply_along_axis(func, -1, uarr) + +def _assign_fields_by_name_dispatcher(dst, src, zero_unassigned=None): + return dst, src + +@array_function_dispatch(_assign_fields_by_name_dispatcher) +def assign_fields_by_name(dst, src, zero_unassigned=True): + """ + Assigns values from one structured array to another by field name. + + Normally in numpy >= 1.14, assignment of one structured array to another + copies fields "by position", meaning that the first field from the src is + copied to the first field of the dst, and so on, regardless of field name. + + This function instead copies "by field name", such that fields in the dst + are assigned from the identically named field in the src. This applies + recursively for nested structures. This is how structure assignment worked + in numpy >= 1.6 to <= 1.13. + + Parameters + ---------- + dst : ndarray + src : ndarray + The source and destination arrays during assignment. + zero_unassigned : bool, optional + If True, fields in the dst for which there was no matching + field in the src are filled with the value 0 (zero). This + was the behavior of numpy <= 1.13. If False, those fields + are not modified. + """ + + if dst.dtype.names is None: + dst[...] = src + return + + for name in dst.dtype.names: + if name not in src.dtype.names: + if zero_unassigned: + dst[name] = 0 + else: + assign_fields_by_name(dst[name], src[name], + zero_unassigned) + +def _require_fields_dispatcher(array, required_dtype): + return (array,) + +@array_function_dispatch(_require_fields_dispatcher) +def require_fields(array, required_dtype): + """ + Casts a structured array to a new dtype using assignment by field-name. + + This function assigns from the old to the new array by name, so the + value of a field in the output array is the value of the field with the + same name in the source array. This has the effect of creating a new + ndarray containing only the fields "required" by the required_dtype. + + If a field name in the required_dtype does not exist in the + input array, that field is created and set to 0 in the output array. + + Parameters + ---------- + a : ndarray + array to cast + required_dtype : dtype + datatype for output array + + Returns + ------- + out : ndarray + array with the new dtype, with field values copied from the fields in + the input array with the same name + + Examples + -------- + >>> import numpy as np + + >>> from numpy.lib import recfunctions as rfn + >>> a = np.ones(4, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')]) + >>> rfn.require_fields(a, [('b', 'f4'), ('c', 'u1')]) + array([(1., 1), (1., 1), (1., 1), (1., 1)], + dtype=[('b', '>> rfn.require_fields(a, [('b', 'f4'), ('newf', 'u1')]) + array([(1., 0), (1., 0), (1., 0), (1., 0)], + dtype=[('b', '>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> x = np.array([1, 2,]) + >>> rfn.stack_arrays(x) is x + True + >>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)]) + >>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + ... dtype=[('A', '|S3'), ('B', np.double), ('C', np.double)]) + >>> test = rfn.stack_arrays((z,zz)) + >>> test + masked_array(data=[(b'A', 1.0, --), (b'B', 2.0, --), (b'a', 10.0, 100.0), + (b'b', 20.0, 200.0), (b'c', 30.0, 300.0)], + mask=[(False, False, True), (False, False, True), + (False, False, False), (False, False, False), + (False, False, False)], + fill_value=(b'N/A', 1e+20, 1e+20), + dtype=[('A', 'S3'), ('B', ' '{fdtype}'") + # Only one field: use concatenate + if len(newdescr) == 1: + output = ma.concatenate(seqarrays) + else: + # + output = ma.masked_all((np.sum(nrecords),), newdescr) + offset = np.cumsum(np.r_[0, nrecords]) + seen = [] + for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]): + names = a.dtype.names + if names is None: + output[f'f{len(seen)}'][i:j] = a + else: + for name in n: + output[name][i:j] = a[name] + if name not in seen: + seen.append(name) + # + return _fix_output(_fix_defaults(output, defaults), + usemask=usemask, asrecarray=asrecarray) + + +def _find_duplicates_dispatcher( + a, key=None, ignoremask=None, return_index=None): + return (a,) + + +@array_function_dispatch(_find_duplicates_dispatcher) +def find_duplicates(a, key=None, ignoremask=True, return_index=False): + """ + Find the duplicates in a structured array along a given key + + Parameters + ---------- + a : array-like + Input array + key : {string, None}, optional + Name of the fields along which to check the duplicates. + If None, the search is performed by records + ignoremask : {True, False}, optional + Whether masked data should be discarded or considered as duplicates. + return_index : {False, True}, optional + Whether to return the indices of the duplicated values. + + Examples + -------- + >>> import numpy as np + >>> from numpy.lib import recfunctions as rfn + >>> ndtype = [('a', int)] + >>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3], + ... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) + >>> rfn.find_duplicates(a, ignoremask=True, return_index=True) + (masked_array(data=[(1,), (1,), (2,), (2,)], + mask=[(False,), (False,), (False,), (False,)], + fill_value=(999999,), + dtype=[('a', '= nb1)] - nb1 + (r1cmn, r2cmn) = (len(idx_1), len(idx_2)) + if jointype == 'inner': + (r1spc, r2spc) = (0, 0) + elif jointype == 'outer': + idx_out = idx_sort[~flag_in] + idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)])) + idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1)) + (r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn) + elif jointype == 'leftouter': + idx_out = idx_sort[~flag_in] + idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)])) + (r1spc, r2spc) = (len(idx_1) - r1cmn, 0) + # Select the entries from each input + (s1, s2) = (r1[idx_1], r2[idx_2]) + # + # Build the new description of the output array ....... + # Start with the key fields + ndtype = _get_fieldspec(r1k.dtype) + + # Add the fields from r1 + for fname, fdtype in _get_fieldspec(r1.dtype): + if fname not in key: + ndtype.append((fname, fdtype)) + + # Add the fields from r2 + for fname, fdtype in _get_fieldspec(r2.dtype): + # Have we seen the current name already ? + # we need to rebuild this list every time + names = [name for name, dtype in ndtype] + try: + nameidx = names.index(fname) + except ValueError: + #... we haven't: just add the description to the current list + ndtype.append((fname, fdtype)) + else: + # collision + _, cdtype = ndtype[nameidx] + if fname in key: + # The current field is part of the key: take the largest dtype + ndtype[nameidx] = (fname, max(fdtype, cdtype)) + else: + # The current field is not part of the key: add the suffixes, + # and place the new field adjacent to the old one + ndtype[nameidx:nameidx + 1] = [ + (fname + r1postfix, cdtype), + (fname + r2postfix, fdtype) + ] + # Rebuild a dtype from the new fields + ndtype = np.dtype(ndtype) + # Find the largest nb of common fields : + # r1cmn and r2cmn should be equal, but... + cmn = max(r1cmn, r2cmn) + # Construct an empty array + output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype) + names = output.dtype.names + for f in r1names: + selected = s1[f] + if f not in names or (f in r2names and not r2postfix and f not in key): + f += r1postfix + current = output[f] + current[:r1cmn] = selected[:r1cmn] + if jointype in ('outer', 'leftouter'): + current[cmn:cmn + r1spc] = selected[r1cmn:] + for f in r2names: + selected = s2[f] + if f not in names or (f in r1names and not r1postfix and f not in key): + f += r2postfix + current = output[f] + current[:r2cmn] = selected[:r2cmn] + if (jointype == 'outer') and r2spc: + current[-r2spc:] = selected[r2cmn:] + # Sort and finalize the output + output.sort(order=key) + kwargs = {'usemask': usemask, 'asrecarray': asrecarray} + return _fix_output(_fix_defaults(output, defaults), **kwargs) + + +def _rec_join_dispatcher( + key, r1, r2, jointype=None, r1postfix=None, r2postfix=None, + defaults=None): + return (r1, r2) + + +@array_function_dispatch(_rec_join_dispatcher) +def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2', + defaults=None): + """ + Join arrays `r1` and `r2` on keys. + Alternative to join_by, that always returns a np.recarray. + + See Also + -------- + join_by : equivalent function + """ + kwargs = {'jointype': jointype, 'r1postfix': r1postfix, 'r2postfix': r2postfix, + 'defaults': defaults, 'usemask': False, 'asrecarray': True} + return join_by(key, r1, r2, **kwargs) + + +del array_function_dispatch diff --git a/python/numpy/lib/recfunctions.pyi b/python/numpy/lib/recfunctions.pyi new file mode 100644 index 000000000..073642918 --- /dev/null +++ b/python/numpy/lib/recfunctions.pyi @@ -0,0 +1,435 @@ +from collections.abc import Callable, Iterable, Mapping, Sequence +from typing import Any, Literal, TypeAlias, overload + +from _typeshed import Incomplete +from typing_extensions import TypeVar + +import numpy as np +import numpy.typing as npt +from numpy._typing import _AnyShape, _DTypeLike, _DTypeLikeVoid +from numpy.ma.mrecords import MaskedRecords + +__all__ = [ + "append_fields", + "apply_along_fields", + "assign_fields_by_name", + "drop_fields", + "find_duplicates", + "flatten_descr", + "get_fieldstructure", + "get_names", + "get_names_flat", + "join_by", + "merge_arrays", + "rec_append_fields", + "rec_drop_fields", + "rec_join", + "recursive_fill_fields", + "rename_fields", + "repack_fields", + "require_fields", + "stack_arrays", + "structured_to_unstructured", + "unstructured_to_structured", +] + +_T = TypeVar("_T") +_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...]) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_DTypeT = TypeVar("_DTypeT", bound=np.dtype) +_ArrayT = TypeVar("_ArrayT", bound=npt.NDArray[Any]) +_VoidArrayT = TypeVar("_VoidArrayT", bound=npt.NDArray[np.void]) +_NonVoidDTypeT = TypeVar("_NonVoidDTypeT", bound=_NonVoidDType) + +_OneOrMany: TypeAlias = _T | Iterable[_T] +_BuiltinSequence: TypeAlias = tuple[_T, ...] | list[_T] + +_NestedNames: TypeAlias = tuple[str | _NestedNames, ...] +_NonVoid: TypeAlias = np.bool | np.number | np.character | np.datetime64 | np.timedelta64 | np.object_ +_NonVoidDType: TypeAlias = np.dtype[_NonVoid] | np.dtypes.StringDType + +_JoinType: TypeAlias = Literal["inner", "outer", "leftouter"] + +### + +def recursive_fill_fields(input: npt.NDArray[np.void], output: _VoidArrayT) -> _VoidArrayT: ... + +# +def get_names(adtype: np.dtype[np.void]) -> _NestedNames: ... +def get_names_flat(adtype: np.dtype[np.void]) -> tuple[str, ...]: ... + +# +@overload +def flatten_descr(ndtype: _NonVoidDTypeT) -> tuple[tuple[Literal[""], _NonVoidDTypeT]]: ... +@overload +def flatten_descr(ndtype: np.dtype[np.void]) -> tuple[tuple[str, np.dtype]]: ... + +# +def get_fieldstructure( + adtype: np.dtype[np.void], + lastname: str | None = None, + parents: dict[str, list[str]] | None = None, +) -> dict[str, list[str]]: ... + +# +@overload +def merge_arrays( + seqarrays: Sequence[np.ndarray[_ShapeT, np.dtype]] | np.ndarray[_ShapeT, np.dtype], + fill_value: float = -1, + flatten: bool = False, + usemask: bool = False, + asrecarray: bool = False, +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def merge_arrays( + seqarrays: Sequence[npt.ArrayLike] | np.void, + fill_value: float = -1, + flatten: bool = False, + usemask: bool = False, + asrecarray: bool = False, +) -> np.recarray[_AnyShape, np.dtype[np.void]]: ... + +# +@overload +def drop_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + drop_names: str | Iterable[str], + usemask: bool = True, + asrecarray: Literal[False] = False, +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def drop_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + drop_names: str | Iterable[str], + usemask: bool, + asrecarray: Literal[True], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def drop_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + drop_names: str | Iterable[str], + usemask: bool = True, + *, + asrecarray: Literal[True], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... + +# +@overload +def rename_fields( + base: MaskedRecords[_ShapeT, np.dtype[np.void]], + namemapper: Mapping[str, str], +) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... +@overload +def rename_fields( + base: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], + namemapper: Mapping[str, str], +) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +@overload +def rename_fields( + base: np.recarray[_ShapeT, np.dtype[np.void]], + namemapper: Mapping[str, str], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def rename_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + namemapper: Mapping[str, str], +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... + +# +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None, + fill_value: int, + usemask: Literal[False], + asrecarray: Literal[False] = False, +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None = None, + fill_value: int = -1, + *, + usemask: Literal[False], + asrecarray: Literal[False] = False, +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None, + fill_value: int, + usemask: Literal[False], + asrecarray: Literal[True], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None = None, + fill_value: int = -1, + *, + usemask: Literal[False], + asrecarray: Literal[True], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None = None, + fill_value: int = -1, + usemask: Literal[True] = True, + asrecarray: Literal[False] = False, +) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None, + fill_value: int, + usemask: Literal[True], + asrecarray: Literal[True], +) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... +@overload +def append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None = None, + fill_value: int = -1, + usemask: Literal[True] = True, + *, + asrecarray: Literal[True], +) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ... + +# +def rec_drop_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + drop_names: str | Iterable[str], +) -> np.recarray[_ShapeT, np.dtype[np.void]]: ... + +# +def rec_append_fields( + base: np.ndarray[_ShapeT, np.dtype[np.void]], + names: _OneOrMany[str], + data: _OneOrMany[npt.NDArray[Any]], + dtypes: _BuiltinSequence[np.dtype] | None = None, +) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... + +# TODO(jorenham): Stop passing `void` directly once structured dtypes are implemented, +# e.g. using a `TypeVar` with constraints. +# https://github.com/numpy/numtype/issues/92 +@overload +def repack_fields(a: _DTypeT, align: bool = False, recurse: bool = False) -> _DTypeT: ... +@overload +def repack_fields(a: _ScalarT, align: bool = False, recurse: bool = False) -> _ScalarT: ... +@overload +def repack_fields(a: _ArrayT, align: bool = False, recurse: bool = False) -> _ArrayT: ... + +# TODO(jorenham): Attempt shape-typing (return type has ndim == arr.ndim + 1) +@overload +def structured_to_unstructured( + arr: npt.NDArray[np.void], + dtype: _DTypeLike[_ScalarT], + copy: bool = False, + casting: np._CastingKind = "unsafe", +) -> npt.NDArray[_ScalarT]: ... +@overload +def structured_to_unstructured( + arr: npt.NDArray[np.void], + dtype: npt.DTypeLike | None = None, + copy: bool = False, + casting: np._CastingKind = "unsafe", +) -> npt.NDArray[Any]: ... + +# +@overload +def unstructured_to_structured( + arr: npt.NDArray[Any], + dtype: npt.DTypeLike, + names: None = None, + align: bool = False, + copy: bool = False, + casting: str = "unsafe", +) -> npt.NDArray[np.void]: ... +@overload +def unstructured_to_structured( + arr: npt.NDArray[Any], + dtype: None, + names: _OneOrMany[str], + align: bool = False, + copy: bool = False, + casting: str = "unsafe", +) -> npt.NDArray[np.void]: ... + +# +def apply_along_fields( + func: Callable[[np.ndarray[_ShapeT, Any]], npt.NDArray[Any]], + arr: np.ndarray[_ShapeT, np.dtype[np.void]], +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... + +# +def assign_fields_by_name(dst: npt.NDArray[np.void], src: npt.NDArray[np.void], zero_unassigned: bool = True) -> None: ... + +# +def require_fields( + array: np.ndarray[_ShapeT, np.dtype[np.void]], + required_dtype: _DTypeLikeVoid, +) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ... + +# TODO(jorenham): Attempt shape-typing +@overload +def stack_arrays( + arrays: _ArrayT, + defaults: Mapping[str, object] | None = None, + usemask: bool = True, + asrecarray: bool = False, + autoconvert: bool = False, +) -> _ArrayT: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None, + usemask: Literal[False], + asrecarray: Literal[False] = False, + autoconvert: bool = False, +) -> npt.NDArray[np.void]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None = None, + *, + usemask: Literal[False], + asrecarray: Literal[False] = False, + autoconvert: bool = False, +) -> npt.NDArray[np.void]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None = None, + *, + usemask: Literal[False], + asrecarray: Literal[True], + autoconvert: bool = False, +) -> np.recarray[_AnyShape, np.dtype[np.void]]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None = None, + usemask: Literal[True] = True, + asrecarray: Literal[False] = False, + autoconvert: bool = False, +) -> np.ma.MaskedArray[_AnyShape, np.dtype[np.void]]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None, + usemask: Literal[True], + asrecarray: Literal[True], + autoconvert: bool = False, +) -> MaskedRecords[_AnyShape, np.dtype[np.void]]: ... +@overload +def stack_arrays( + arrays: Sequence[npt.NDArray[Any]], + defaults: Mapping[str, Incomplete] | None = None, + usemask: Literal[True] = True, + *, + asrecarray: Literal[True], + autoconvert: bool = False, +) -> MaskedRecords[_AnyShape, np.dtype[np.void]]: ... + +# +@overload +def find_duplicates( + a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], + key: str | None = None, + ignoremask: bool = True, + return_index: Literal[False] = False, +) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ... +@overload +def find_duplicates( + a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], + key: str | None, + ignoremask: bool, + return_index: Literal[True], +) -> tuple[np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], np.ndarray[_ShapeT, np.dtype[np.int_]]]: ... +@overload +def find_duplicates( + a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], + key: str | None = None, + ignoremask: bool = True, + *, + return_index: Literal[True], +) -> tuple[np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], np.ndarray[_ShapeT, np.dtype[np.int_]]]: ... + +# +@overload +def join_by( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, + *, + usemask: Literal[False], + asrecarray: Literal[False] = False, +) -> np.ndarray[tuple[int], np.dtype[np.void]]: ... +@overload +def join_by( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, + *, + usemask: Literal[False], + asrecarray: Literal[True], +) -> np.recarray[tuple[int], np.dtype[np.void]]: ... +@overload +def join_by( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, + usemask: Literal[True] = True, + asrecarray: Literal[False] = False, +) -> np.ma.MaskedArray[tuple[int], np.dtype[np.void]]: ... +@overload +def join_by( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, + usemask: Literal[True] = True, + *, + asrecarray: Literal[True], +) -> MaskedRecords[tuple[int], np.dtype[np.void]]: ... + +# +def rec_join( + key: str | Sequence[str], + r1: npt.NDArray[np.void], + r2: npt.NDArray[np.void], + jointype: _JoinType = "inner", + r1postfix: str = "1", + r2postfix: str = "2", + defaults: Mapping[str, object] | None = None, +) -> np.recarray[tuple[int], np.dtype[np.void]]: ... diff --git a/python/numpy/lib/scimath.py b/python/numpy/lib/scimath.py new file mode 100644 index 000000000..fb6824d9b --- /dev/null +++ b/python/numpy/lib/scimath.py @@ -0,0 +1,13 @@ +from ._scimath_impl import ( # noqa: F401 + __all__, + __doc__, + arccos, + arcsin, + arctanh, + log, + log2, + log10, + logn, + power, + sqrt, +) diff --git a/python/numpy/lib/scimath.pyi b/python/numpy/lib/scimath.pyi new file mode 100644 index 000000000..253235dfc --- /dev/null +++ b/python/numpy/lib/scimath.pyi @@ -0,0 +1,30 @@ +from ._scimath_impl import ( + __all__ as __all__, +) +from ._scimath_impl import ( + arccos as arccos, +) +from ._scimath_impl import ( + arcsin as arcsin, +) +from ._scimath_impl import ( + arctanh as arctanh, +) +from ._scimath_impl import ( + log as log, +) +from ._scimath_impl import ( + log2 as log2, +) +from ._scimath_impl import ( + log10 as log10, +) +from ._scimath_impl import ( + logn as logn, +) +from ._scimath_impl import ( + power as power, +) +from ._scimath_impl import ( + sqrt as sqrt, +) diff --git a/python/numpy/lib/stride_tricks.py b/python/numpy/lib/stride_tricks.py new file mode 100644 index 000000000..721a548f4 --- /dev/null +++ b/python/numpy/lib/stride_tricks.py @@ -0,0 +1 @@ +from ._stride_tricks_impl import __doc__, as_strided, sliding_window_view # noqa: F401 diff --git a/python/numpy/lib/stride_tricks.pyi b/python/numpy/lib/stride_tricks.pyi new file mode 100644 index 000000000..42d8fe9ef --- /dev/null +++ b/python/numpy/lib/stride_tricks.pyi @@ -0,0 +1,6 @@ +from numpy.lib._stride_tricks_impl import ( + as_strided as as_strided, +) +from numpy.lib._stride_tricks_impl import ( + sliding_window_view as sliding_window_view, +) diff --git a/python/numpy/lib/tests/__init__.py b/python/numpy/lib/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/python/numpy/lib/tests/__pycache__/__init__.cpython-312.pyc b/python/numpy/lib/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..4bc0120ea Binary files /dev/null and b/python/numpy/lib/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/lib/tests/__pycache__/test__datasource.cpython-312.pyc b/python/numpy/lib/tests/__pycache__/test__datasource.cpython-312.pyc new file mode 100644 index 000000000..aa2e5b982 Binary files /dev/null and b/python/numpy/lib/tests/__pycache__/test__datasource.cpython-312.pyc differ diff --git a/python/numpy/lib/tests/__pycache__/test__iotools.cpython-312.pyc b/python/numpy/lib/tests/__pycache__/test__iotools.cpython-312.pyc new file mode 100644 index 000000000..f063a564c Binary files /dev/null and b/python/numpy/lib/tests/__pycache__/test__iotools.cpython-312.pyc differ diff --git a/python/numpy/lib/tests/__pycache__/test__version.cpython-312.pyc b/python/numpy/lib/tests/__pycache__/test__version.cpython-312.pyc new file mode 100644 index 000000000..ea3279d34 Binary files /dev/null and b/python/numpy/lib/tests/__pycache__/test__version.cpython-312.pyc differ diff --git a/python/numpy/lib/tests/__pycache__/test_array_utils.cpython-312.pyc b/python/numpy/lib/tests/__pycache__/test_array_utils.cpython-312.pyc new file mode 100644 index 000000000..7b6075644 Binary files /dev/null and b/python/numpy/lib/tests/__pycache__/test_array_utils.cpython-312.pyc differ diff --git a/python/numpy/lib/tests/__pycache__/test_arraypad.cpython-312.pyc b/python/numpy/lib/tests/__pycache__/test_arraypad.cpython-312.pyc new file mode 100644 index 000000000..cf97e6d02 Binary files /dev/null and b/python/numpy/lib/tests/__pycache__/test_arraypad.cpython-312.pyc differ diff --git a/python/numpy/lib/tests/__pycache__/test_arraysetops.cpython-312.pyc b/python/numpy/lib/tests/__pycache__/test_arraysetops.cpython-312.pyc new file mode 100644 index 000000000..072f45b9d Binary files /dev/null and b/python/numpy/lib/tests/__pycache__/test_arraysetops.cpython-312.pyc differ diff --git a/python/numpy/lib/tests/__pycache__/test_arrayterator.cpython-312.pyc b/python/numpy/lib/tests/__pycache__/test_arrayterator.cpython-312.pyc new file mode 100644 index 000000000..012bfa7c8 Binary files /dev/null and b/python/numpy/lib/tests/__pycache__/test_arrayterator.cpython-312.pyc differ diff --git a/python/numpy/lib/tests/__pycache__/test_format.cpython-312.pyc b/python/numpy/lib/tests/__pycache__/test_format.cpython-312.pyc new file mode 100644 index 000000000..b4b6b04b0 Binary files /dev/null and b/python/numpy/lib/tests/__pycache__/test_format.cpython-312.pyc differ diff --git a/python/numpy/lib/tests/__pycache__/test_function_base.cpython-312.pyc b/python/numpy/lib/tests/__pycache__/test_function_base.cpython-312.pyc new file mode 100644 index 000000000..820a19a6f Binary files /dev/null and b/python/numpy/lib/tests/__pycache__/test_function_base.cpython-312.pyc differ diff --git a/python/numpy/lib/tests/__pycache__/test_histograms.cpython-312.pyc b/python/numpy/lib/tests/__pycache__/test_histograms.cpython-312.pyc new file mode 100644 index 000000000..5a890264f Binary files /dev/null and b/python/numpy/lib/tests/__pycache__/test_histograms.cpython-312.pyc differ diff --git a/python/numpy/lib/tests/__pycache__/test_index_tricks.cpython-312.pyc b/python/numpy/lib/tests/__pycache__/test_index_tricks.cpython-312.pyc new file mode 100644 index 000000000..a72d970d0 Binary files /dev/null and b/python/numpy/lib/tests/__pycache__/test_index_tricks.cpython-312.pyc differ diff --git a/python/numpy/lib/tests/__pycache__/test_io.cpython-312.pyc b/python/numpy/lib/tests/__pycache__/test_io.cpython-312.pyc new file mode 100644 index 000000000..3f86ae2aa Binary files /dev/null and b/python/numpy/lib/tests/__pycache__/test_io.cpython-312.pyc differ diff --git a/python/numpy/lib/tests/__pycache__/test_loadtxt.cpython-312.pyc b/python/numpy/lib/tests/__pycache__/test_loadtxt.cpython-312.pyc new file mode 100644 index 000000000..f9d4bc074 Binary files /dev/null and b/python/numpy/lib/tests/__pycache__/test_loadtxt.cpython-312.pyc differ diff --git a/python/numpy/lib/tests/__pycache__/test_mixins.cpython-312.pyc b/python/numpy/lib/tests/__pycache__/test_mixins.cpython-312.pyc new file mode 100644 index 000000000..958d4a95e Binary files /dev/null and b/python/numpy/lib/tests/__pycache__/test_mixins.cpython-312.pyc differ diff --git a/python/numpy/lib/tests/__pycache__/test_nanfunctions.cpython-312.pyc b/python/numpy/lib/tests/__pycache__/test_nanfunctions.cpython-312.pyc new file mode 100644 index 000000000..499118081 Binary files /dev/null and b/python/numpy/lib/tests/__pycache__/test_nanfunctions.cpython-312.pyc differ diff --git a/python/numpy/lib/tests/__pycache__/test_packbits.cpython-312.pyc b/python/numpy/lib/tests/__pycache__/test_packbits.cpython-312.pyc new file mode 100644 index 000000000..6bdb91b2b Binary files /dev/null and b/python/numpy/lib/tests/__pycache__/test_packbits.cpython-312.pyc differ diff --git a/python/numpy/lib/tests/__pycache__/test_polynomial.cpython-312.pyc b/python/numpy/lib/tests/__pycache__/test_polynomial.cpython-312.pyc new file mode 100644 index 000000000..5eeca81b6 Binary files /dev/null and b/python/numpy/lib/tests/__pycache__/test_polynomial.cpython-312.pyc differ diff --git a/python/numpy/lib/tests/__pycache__/test_recfunctions.cpython-312.pyc b/python/numpy/lib/tests/__pycache__/test_recfunctions.cpython-312.pyc new file mode 100644 index 000000000..1fc07f1bb Binary files /dev/null and b/python/numpy/lib/tests/__pycache__/test_recfunctions.cpython-312.pyc differ diff --git a/python/numpy/lib/tests/__pycache__/test_regression.cpython-312.pyc b/python/numpy/lib/tests/__pycache__/test_regression.cpython-312.pyc new file mode 100644 index 000000000..a69f3aee8 Binary files /dev/null and b/python/numpy/lib/tests/__pycache__/test_regression.cpython-312.pyc differ diff --git a/python/numpy/lib/tests/__pycache__/test_shape_base.cpython-312.pyc b/python/numpy/lib/tests/__pycache__/test_shape_base.cpython-312.pyc new file mode 100644 index 000000000..76ac953a9 Binary files /dev/null and b/python/numpy/lib/tests/__pycache__/test_shape_base.cpython-312.pyc differ diff --git a/python/numpy/lib/tests/__pycache__/test_stride_tricks.cpython-312.pyc b/python/numpy/lib/tests/__pycache__/test_stride_tricks.cpython-312.pyc new file mode 100644 index 000000000..53d15a1e7 Binary files /dev/null and b/python/numpy/lib/tests/__pycache__/test_stride_tricks.cpython-312.pyc differ diff --git a/python/numpy/lib/tests/__pycache__/test_twodim_base.cpython-312.pyc b/python/numpy/lib/tests/__pycache__/test_twodim_base.cpython-312.pyc new file mode 100644 index 000000000..18f718c94 Binary files /dev/null and b/python/numpy/lib/tests/__pycache__/test_twodim_base.cpython-312.pyc differ diff --git a/python/numpy/lib/tests/__pycache__/test_type_check.cpython-312.pyc b/python/numpy/lib/tests/__pycache__/test_type_check.cpython-312.pyc new file mode 100644 index 000000000..89914a9ad Binary files /dev/null and b/python/numpy/lib/tests/__pycache__/test_type_check.cpython-312.pyc differ diff --git a/python/numpy/lib/tests/__pycache__/test_ufunclike.cpython-312.pyc b/python/numpy/lib/tests/__pycache__/test_ufunclike.cpython-312.pyc new file mode 100644 index 000000000..a42062b6c Binary files /dev/null and b/python/numpy/lib/tests/__pycache__/test_ufunclike.cpython-312.pyc differ diff --git a/python/numpy/lib/tests/__pycache__/test_utils.cpython-312.pyc b/python/numpy/lib/tests/__pycache__/test_utils.cpython-312.pyc new file mode 100644 index 000000000..1d75ea959 Binary files /dev/null and b/python/numpy/lib/tests/__pycache__/test_utils.cpython-312.pyc differ diff --git a/python/numpy/lib/tests/data/py2-np0-objarr.npy b/python/numpy/lib/tests/data/py2-np0-objarr.npy new file mode 100644 index 000000000..a6e9e2397 Binary files /dev/null and b/python/numpy/lib/tests/data/py2-np0-objarr.npy differ diff --git a/python/numpy/lib/tests/data/py2-objarr.npy b/python/numpy/lib/tests/data/py2-objarr.npy new file mode 100644 index 000000000..12936c92d Binary files /dev/null and b/python/numpy/lib/tests/data/py2-objarr.npy differ diff --git a/python/numpy/lib/tests/data/py2-objarr.npz b/python/numpy/lib/tests/data/py2-objarr.npz new file mode 100644 index 000000000..68a3b53a1 Binary files /dev/null and b/python/numpy/lib/tests/data/py2-objarr.npz differ diff --git a/python/numpy/lib/tests/data/py3-objarr.npy b/python/numpy/lib/tests/data/py3-objarr.npy new file mode 100644 index 000000000..c9f33b010 Binary files /dev/null and b/python/numpy/lib/tests/data/py3-objarr.npy differ diff --git a/python/numpy/lib/tests/data/py3-objarr.npz b/python/numpy/lib/tests/data/py3-objarr.npz new file mode 100644 index 000000000..fd7d9d31c Binary files /dev/null and b/python/numpy/lib/tests/data/py3-objarr.npz differ diff --git a/python/numpy/lib/tests/data/python3.npy b/python/numpy/lib/tests/data/python3.npy new file mode 100644 index 000000000..7c6997dd6 Binary files /dev/null and b/python/numpy/lib/tests/data/python3.npy differ diff --git a/python/numpy/lib/tests/data/win64python2.npy b/python/numpy/lib/tests/data/win64python2.npy new file mode 100644 index 000000000..d9bc36af7 Binary files /dev/null and b/python/numpy/lib/tests/data/win64python2.npy differ diff --git a/python/numpy/lib/tests/test__datasource.py b/python/numpy/lib/tests/test__datasource.py new file mode 100644 index 000000000..65137324d --- /dev/null +++ b/python/numpy/lib/tests/test__datasource.py @@ -0,0 +1,352 @@ +import os +import urllib.request as urllib_request +from shutil import rmtree +from tempfile import NamedTemporaryFile, mkdtemp, mkstemp +from urllib.error import URLError +from urllib.parse import urlparse + +import pytest + +import numpy.lib._datasource as datasource +from numpy.testing import assert_, assert_equal, assert_raises + + +def urlopen_stub(url, data=None): + '''Stub to replace urlopen for testing.''' + if url == valid_httpurl(): + tmpfile = NamedTemporaryFile(prefix='urltmp_') + return tmpfile + else: + raise URLError('Name or service not known') + + +# setup and teardown +old_urlopen = None + + +def setup_module(): + global old_urlopen + + old_urlopen = urllib_request.urlopen + urllib_request.urlopen = urlopen_stub + + +def teardown_module(): + urllib_request.urlopen = old_urlopen + + +# A valid website for more robust testing +http_path = 'http://www.google.com/' +http_file = 'index.html' + +http_fakepath = 'http://fake.abc.web/site/' +http_fakefile = 'fake.txt' + +malicious_files = ['/etc/shadow', '../../shadow', + '..\\system.dat', 'c:\\windows\\system.dat'] + +magic_line = b'three is the magic number' + + +# Utility functions used by many tests +def valid_textfile(filedir): + # Generate and return a valid temporary file. + fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir, text=True) + os.close(fd) + return path + + +def invalid_textfile(filedir): + # Generate and return an invalid filename. + fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir) + os.close(fd) + os.remove(path) + return path + + +def valid_httpurl(): + return http_path + http_file + + +def invalid_httpurl(): + return http_fakepath + http_fakefile + + +def valid_baseurl(): + return http_path + + +def invalid_baseurl(): + return http_fakepath + + +def valid_httpfile(): + return http_file + + +def invalid_httpfile(): + return http_fakefile + + +class TestDataSourceOpen: + def setup_method(self): + self.tmpdir = mkdtemp() + self.ds = datasource.DataSource(self.tmpdir) + + def teardown_method(self): + rmtree(self.tmpdir) + del self.ds + + def test_ValidHTTP(self): + fh = self.ds.open(valid_httpurl()) + assert_(fh) + fh.close() + + def test_InvalidHTTP(self): + url = invalid_httpurl() + assert_raises(OSError, self.ds.open, url) + try: + self.ds.open(url) + except OSError as e: + # Regression test for bug fixed in r4342. + assert_(e.errno is None) + + def test_InvalidHTTPCacheURLError(self): + assert_raises(URLError, self.ds._cache, invalid_httpurl()) + + def test_ValidFile(self): + local_file = valid_textfile(self.tmpdir) + fh = self.ds.open(local_file) + assert_(fh) + fh.close() + + def test_InvalidFile(self): + invalid_file = invalid_textfile(self.tmpdir) + assert_raises(OSError, self.ds.open, invalid_file) + + def test_ValidGzipFile(self): + try: + import gzip + except ImportError: + # We don't have the gzip capabilities to test. + pytest.skip() + # Test datasource's internal file_opener for Gzip files. + filepath = os.path.join(self.tmpdir, 'foobar.txt.gz') + fp = gzip.open(filepath, 'w') + fp.write(magic_line) + fp.close() + fp = self.ds.open(filepath) + result = fp.readline() + fp.close() + assert_equal(magic_line, result) + + def test_ValidBz2File(self): + try: + import bz2 + except ImportError: + # We don't have the bz2 capabilities to test. + pytest.skip() + # Test datasource's internal file_opener for BZip2 files. + filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2') + fp = bz2.BZ2File(filepath, 'w') + fp.write(magic_line) + fp.close() + fp = self.ds.open(filepath) + result = fp.readline() + fp.close() + assert_equal(magic_line, result) + + +class TestDataSourceExists: + def setup_method(self): + self.tmpdir = mkdtemp() + self.ds = datasource.DataSource(self.tmpdir) + + def teardown_method(self): + rmtree(self.tmpdir) + del self.ds + + def test_ValidHTTP(self): + assert_(self.ds.exists(valid_httpurl())) + + def test_InvalidHTTP(self): + assert_equal(self.ds.exists(invalid_httpurl()), False) + + def test_ValidFile(self): + # Test valid file in destpath + tmpfile = valid_textfile(self.tmpdir) + assert_(self.ds.exists(tmpfile)) + # Test valid local file not in destpath + localdir = mkdtemp() + tmpfile = valid_textfile(localdir) + assert_(self.ds.exists(tmpfile)) + rmtree(localdir) + + def test_InvalidFile(self): + tmpfile = invalid_textfile(self.tmpdir) + assert_equal(self.ds.exists(tmpfile), False) + + +class TestDataSourceAbspath: + def setup_method(self): + self.tmpdir = os.path.abspath(mkdtemp()) + self.ds = datasource.DataSource(self.tmpdir) + + def teardown_method(self): + rmtree(self.tmpdir) + del self.ds + + def test_ValidHTTP(self): + scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) + local_path = os.path.join(self.tmpdir, netloc, + upath.strip(os.sep).strip('/')) + assert_equal(local_path, self.ds.abspath(valid_httpurl())) + + def test_ValidFile(self): + tmpfile = valid_textfile(self.tmpdir) + tmpfilename = os.path.split(tmpfile)[-1] + # Test with filename only + assert_equal(tmpfile, self.ds.abspath(tmpfilename)) + # Test filename with complete path + assert_equal(tmpfile, self.ds.abspath(tmpfile)) + + def test_InvalidHTTP(self): + scheme, netloc, upath, pms, qry, frg = urlparse(invalid_httpurl()) + invalidhttp = os.path.join(self.tmpdir, netloc, + upath.strip(os.sep).strip('/')) + assert_(invalidhttp != self.ds.abspath(valid_httpurl())) + + def test_InvalidFile(self): + invalidfile = valid_textfile(self.tmpdir) + tmpfile = valid_textfile(self.tmpdir) + tmpfilename = os.path.split(tmpfile)[-1] + # Test with filename only + assert_(invalidfile != self.ds.abspath(tmpfilename)) + # Test filename with complete path + assert_(invalidfile != self.ds.abspath(tmpfile)) + + def test_sandboxing(self): + tmpfile = valid_textfile(self.tmpdir) + tmpfilename = os.path.split(tmpfile)[-1] + + tmp_path = lambda x: os.path.abspath(self.ds.abspath(x)) + + assert_(tmp_path(valid_httpurl()).startswith(self.tmpdir)) + assert_(tmp_path(invalid_httpurl()).startswith(self.tmpdir)) + assert_(tmp_path(tmpfile).startswith(self.tmpdir)) + assert_(tmp_path(tmpfilename).startswith(self.tmpdir)) + for fn in malicious_files: + assert_(tmp_path(http_path + fn).startswith(self.tmpdir)) + assert_(tmp_path(fn).startswith(self.tmpdir)) + + def test_windows_os_sep(self): + orig_os_sep = os.sep + try: + os.sep = '\\' + self.test_ValidHTTP() + self.test_ValidFile() + self.test_InvalidHTTP() + self.test_InvalidFile() + self.test_sandboxing() + finally: + os.sep = orig_os_sep + + +class TestRepositoryAbspath: + def setup_method(self): + self.tmpdir = os.path.abspath(mkdtemp()) + self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) + + def teardown_method(self): + rmtree(self.tmpdir) + del self.repos + + def test_ValidHTTP(self): + scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl()) + local_path = os.path.join(self.repos._destpath, netloc, + upath.strip(os.sep).strip('/')) + filepath = self.repos.abspath(valid_httpfile()) + assert_equal(local_path, filepath) + + def test_sandboxing(self): + tmp_path = lambda x: os.path.abspath(self.repos.abspath(x)) + assert_(tmp_path(valid_httpfile()).startswith(self.tmpdir)) + for fn in malicious_files: + assert_(tmp_path(http_path + fn).startswith(self.tmpdir)) + assert_(tmp_path(fn).startswith(self.tmpdir)) + + def test_windows_os_sep(self): + orig_os_sep = os.sep + try: + os.sep = '\\' + self.test_ValidHTTP() + self.test_sandboxing() + finally: + os.sep = orig_os_sep + + +class TestRepositoryExists: + def setup_method(self): + self.tmpdir = mkdtemp() + self.repos = datasource.Repository(valid_baseurl(), self.tmpdir) + + def teardown_method(self): + rmtree(self.tmpdir) + del self.repos + + def test_ValidFile(self): + # Create local temp file + tmpfile = valid_textfile(self.tmpdir) + assert_(self.repos.exists(tmpfile)) + + def test_InvalidFile(self): + tmpfile = invalid_textfile(self.tmpdir) + assert_equal(self.repos.exists(tmpfile), False) + + def test_RemoveHTTPFile(self): + assert_(self.repos.exists(valid_httpurl())) + + def test_CachedHTTPFile(self): + localfile = valid_httpurl() + # Create a locally cached temp file with an URL based + # directory structure. This is similar to what Repository.open + # would do. + scheme, netloc, upath, pms, qry, frg = urlparse(localfile) + local_path = os.path.join(self.repos._destpath, netloc) + os.mkdir(local_path, 0o0700) + tmpfile = valid_textfile(local_path) + assert_(self.repos.exists(tmpfile)) + + +class TestOpenFunc: + def setup_method(self): + self.tmpdir = mkdtemp() + + def teardown_method(self): + rmtree(self.tmpdir) + + def test_DataSourceOpen(self): + local_file = valid_textfile(self.tmpdir) + # Test case where destpath is passed in + fp = datasource.open(local_file, destpath=self.tmpdir) + assert_(fp) + fp.close() + # Test case where default destpath is used + fp = datasource.open(local_file) + assert_(fp) + fp.close() + +def test_del_attr_handling(): + # DataSource __del__ can be called + # even if __init__ fails when the + # Exception object is caught by the + # caller as happens in refguide_check + # is_deprecated() function + + ds = datasource.DataSource() + # simulate failed __init__ by removing key attribute + # produced within __init__ and expected by __del__ + del ds._istmpdest + # should not raise an AttributeError if __del__ + # gracefully handles failed __init__: + ds.__del__() diff --git a/python/numpy/lib/tests/test__iotools.py b/python/numpy/lib/tests/test__iotools.py new file mode 100644 index 000000000..1581ffbe9 --- /dev/null +++ b/python/numpy/lib/tests/test__iotools.py @@ -0,0 +1,360 @@ +import time +from datetime import date + +import numpy as np +from numpy.lib._iotools import ( + LineSplitter, + NameValidator, + StringConverter, + easy_dtype, + flatten_dtype, + has_nested_fields, +) +from numpy.testing import ( + assert_, + assert_allclose, + assert_equal, + assert_raises, +) + + +class TestLineSplitter: + "Tests the LineSplitter class." + + def test_no_delimiter(self): + "Test LineSplitter w/o delimiter" + strg = " 1 2 3 4 5 # test" + test = LineSplitter()(strg) + assert_equal(test, ['1', '2', '3', '4', '5']) + test = LineSplitter('')(strg) + assert_equal(test, ['1', '2', '3', '4', '5']) + + def test_space_delimiter(self): + "Test space delimiter" + strg = " 1 2 3 4 5 # test" + test = LineSplitter(' ')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + test = LineSplitter(' ')(strg) + assert_equal(test, ['1 2 3 4', '5']) + + def test_tab_delimiter(self): + "Test tab delimiter" + strg = " 1\t 2\t 3\t 4\t 5 6" + test = LineSplitter('\t')(strg) + assert_equal(test, ['1', '2', '3', '4', '5 6']) + strg = " 1 2\t 3 4\t 5 6" + test = LineSplitter('\t')(strg) + assert_equal(test, ['1 2', '3 4', '5 6']) + + def test_other_delimiter(self): + "Test LineSplitter on delimiter" + strg = "1,2,3,4,,5" + test = LineSplitter(',')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + # + strg = " 1,2,3,4,,5 # test" + test = LineSplitter(',')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + + # gh-11028 bytes comment/delimiters should get encoded + strg = b" 1,2,3,4,,5 % test" + test = LineSplitter(delimiter=b',', comments=b'%')(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5']) + + def test_constant_fixed_width(self): + "Test LineSplitter w/ fixed-width fields" + strg = " 1 2 3 4 5 # test" + test = LineSplitter(3)(strg) + assert_equal(test, ['1', '2', '3', '4', '', '5', '']) + # + strg = " 1 3 4 5 6# test" + test = LineSplitter(20)(strg) + assert_equal(test, ['1 3 4 5 6']) + # + strg = " 1 3 4 5 6# test" + test = LineSplitter(30)(strg) + assert_equal(test, ['1 3 4 5 6']) + + def test_variable_fixed_width(self): + strg = " 1 3 4 5 6# test" + test = LineSplitter((3, 6, 6, 3))(strg) + assert_equal(test, ['1', '3', '4 5', '6']) + # + strg = " 1 3 4 5 6# test" + test = LineSplitter((6, 6, 9))(strg) + assert_equal(test, ['1', '3 4', '5 6']) + +# ----------------------------------------------------------------------------- + + +class TestNameValidator: + + def test_case_sensitivity(self): + "Test case sensitivity" + names = ['A', 'a', 'b', 'c'] + test = NameValidator().validate(names) + assert_equal(test, ['A', 'a', 'b', 'c']) + test = NameValidator(case_sensitive=False).validate(names) + assert_equal(test, ['A', 'A_1', 'B', 'C']) + test = NameValidator(case_sensitive='upper').validate(names) + assert_equal(test, ['A', 'A_1', 'B', 'C']) + test = NameValidator(case_sensitive='lower').validate(names) + assert_equal(test, ['a', 'a_1', 'b', 'c']) + + # check exceptions + assert_raises(ValueError, NameValidator, case_sensitive='foobar') + + def test_excludelist(self): + "Test excludelist" + names = ['dates', 'data', 'Other Data', 'mask'] + validator = NameValidator(excludelist=['dates', 'data', 'mask']) + test = validator.validate(names) + assert_equal(test, ['dates_', 'data_', 'Other_Data', 'mask_']) + + def test_missing_names(self): + "Test validate missing names" + namelist = ('a', 'b', 'c') + validator = NameValidator() + assert_equal(validator(namelist), ['a', 'b', 'c']) + namelist = ('', 'b', 'c') + assert_equal(validator(namelist), ['f0', 'b', 'c']) + namelist = ('a', 'b', '') + assert_equal(validator(namelist), ['a', 'b', 'f0']) + namelist = ('', 'f0', '') + assert_equal(validator(namelist), ['f1', 'f0', 'f2']) + + def test_validate_nb_names(self): + "Test validate nb names" + namelist = ('a', 'b', 'c') + validator = NameValidator() + assert_equal(validator(namelist, nbfields=1), ('a',)) + assert_equal(validator(namelist, nbfields=5, defaultfmt="g%i"), + ['a', 'b', 'c', 'g0', 'g1']) + + def test_validate_wo_names(self): + "Test validate no names" + namelist = None + validator = NameValidator() + assert_(validator(namelist) is None) + assert_equal(validator(namelist, nbfields=3), ['f0', 'f1', 'f2']) + +# ----------------------------------------------------------------------------- + + +def _bytes_to_date(s): + return date(*time.strptime(s, "%Y-%m-%d")[:3]) + + +class TestStringConverter: + "Test StringConverter" + + def test_creation(self): + "Test creation of a StringConverter" + converter = StringConverter(int, -99999) + assert_equal(converter._status, 1) + assert_equal(converter.default, -99999) + + def test_upgrade(self): + "Tests the upgrade method." + + converter = StringConverter() + assert_equal(converter._status, 0) + + # test int + assert_equal(converter.upgrade('0'), 0) + assert_equal(converter._status, 1) + + # On systems where long defaults to 32-bit, the statuses will be + # offset by one, so we check for this here. + import numpy._core.numeric as nx + status_offset = int(nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize) + + # test int > 2**32 + assert_equal(converter.upgrade('17179869184'), 17179869184) + assert_equal(converter._status, 1 + status_offset) + + # test float + assert_allclose(converter.upgrade('0.'), 0.0) + assert_equal(converter._status, 2 + status_offset) + + # test complex + assert_equal(converter.upgrade('0j'), complex('0j')) + assert_equal(converter._status, 3 + status_offset) + + # test str + # note that the longdouble type has been skipped, so the + # _status increases by 2. Everything should succeed with + # unicode conversion (8). + for s in ['a', b'a']: + res = converter.upgrade(s) + assert_(type(res) is str) + assert_equal(res, 'a') + assert_equal(converter._status, 8 + status_offset) + + def test_missing(self): + "Tests the use of missing values." + converter = StringConverter(missing_values=('missing', + 'missed')) + converter.upgrade('0') + assert_equal(converter('0'), 0) + assert_equal(converter(''), converter.default) + assert_equal(converter('missing'), converter.default) + assert_equal(converter('missed'), converter.default) + try: + converter('miss') + except ValueError: + pass + + def test_upgrademapper(self): + "Tests updatemapper" + dateparser = _bytes_to_date + _original_mapper = StringConverter._mapper[:] + try: + StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1)) + convert = StringConverter(dateparser, date(2000, 1, 1)) + test = convert('2001-01-01') + assert_equal(test, date(2001, 1, 1)) + test = convert('2009-01-01') + assert_equal(test, date(2009, 1, 1)) + test = convert('') + assert_equal(test, date(2000, 1, 1)) + finally: + StringConverter._mapper = _original_mapper + + def test_string_to_object(self): + "Make sure that string-to-object functions are properly recognized" + old_mapper = StringConverter._mapper[:] # copy of list + conv = StringConverter(_bytes_to_date) + assert_equal(conv._mapper, old_mapper) + assert_(hasattr(conv, 'default')) + + def test_keep_default(self): + "Make sure we don't lose an explicit default" + converter = StringConverter(None, missing_values='', + default=-999) + converter.upgrade('3.14159265') + assert_equal(converter.default, -999) + assert_equal(converter.type, np.dtype(float)) + # + converter = StringConverter( + None, missing_values='', default=0) + converter.upgrade('3.14159265') + assert_equal(converter.default, 0) + assert_equal(converter.type, np.dtype(float)) + + def test_keep_default_zero(self): + "Check that we don't lose a default of 0" + converter = StringConverter(int, default=0, + missing_values="N/A") + assert_equal(converter.default, 0) + + def test_keep_missing_values(self): + "Check that we're not losing missing values" + converter = StringConverter(int, default=0, + missing_values="N/A") + assert_equal( + converter.missing_values, {'', 'N/A'}) + + def test_int64_dtype(self): + "Check that int64 integer types can be specified" + converter = StringConverter(np.int64, default=0) + val = "-9223372036854775807" + assert_(converter(val) == -9223372036854775807) + val = "9223372036854775807" + assert_(converter(val) == 9223372036854775807) + + def test_uint64_dtype(self): + "Check that uint64 integer types can be specified" + converter = StringConverter(np.uint64, default=0) + val = "9223372043271415339" + assert_(converter(val) == 9223372043271415339) + + +class TestMiscFunctions: + + def test_has_nested_dtype(self): + "Test has_nested_dtype" + ndtype = np.dtype(float) + assert_equal(has_nested_fields(ndtype), False) + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + assert_equal(has_nested_fields(ndtype), False) + ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) + assert_equal(has_nested_fields(ndtype), True) + + def test_easy_dtype(self): + "Test ndtype on dtypes" + # Simple case + ndtype = float + assert_equal(easy_dtype(ndtype), np.dtype(float)) + # As string w/o names + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype), + np.dtype([('f0', "i4"), ('f1', "f8")])) + # As string w/o names but different default format + assert_equal(easy_dtype(ndtype, defaultfmt="field_%03i"), + np.dtype([('field_000', "i4"), ('field_001', "f8")])) + # As string w/ names + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype, names="a, b"), + np.dtype([('a', "i4"), ('b', "f8")])) + # As string w/ names (too many) + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype, names="a, b, c"), + np.dtype([('a', "i4"), ('b', "f8")])) + # As string w/ names (not enough) + ndtype = "i4, f8" + assert_equal(easy_dtype(ndtype, names=", b"), + np.dtype([('f0', "i4"), ('b', "f8")])) + # ... (with different default format) + assert_equal(easy_dtype(ndtype, names="a", defaultfmt="f%02i"), + np.dtype([('a', "i4"), ('f00', "f8")])) + # As list of tuples w/o names + ndtype = [('A', int), ('B', float)] + assert_equal(easy_dtype(ndtype), np.dtype([('A', int), ('B', float)])) + # As list of tuples w/ names + assert_equal(easy_dtype(ndtype, names="a,b"), + np.dtype([('a', int), ('b', float)])) + # As list of tuples w/ not enough names + assert_equal(easy_dtype(ndtype, names="a"), + np.dtype([('a', int), ('f0', float)])) + # As list of tuples w/ too many names + assert_equal(easy_dtype(ndtype, names="a,b,c"), + np.dtype([('a', int), ('b', float)])) + # As list of types w/o names + ndtype = (int, float, float) + assert_equal(easy_dtype(ndtype), + np.dtype([('f0', int), ('f1', float), ('f2', float)])) + # As list of types w names + ndtype = (int, float, float) + assert_equal(easy_dtype(ndtype, names="a, b, c"), + np.dtype([('a', int), ('b', float), ('c', float)])) + # As simple dtype w/ names + ndtype = np.dtype(float) + assert_equal(easy_dtype(ndtype, names="a, b, c"), + np.dtype([(_, float) for _ in ('a', 'b', 'c')])) + # As simple dtype w/o names (but multiple fields) + ndtype = np.dtype(float) + assert_equal( + easy_dtype(ndtype, names=['', '', ''], defaultfmt="f%02i"), + np.dtype([(_, float) for _ in ('f00', 'f01', 'f02')])) + + def test_flatten_dtype(self): + "Testing flatten_dtype" + # Standard dtype + dt = np.dtype([("a", "f8"), ("b", "f8")]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [float, float]) + # Recursive dtype + dt = np.dtype([("a", [("aa", '|S1'), ("ab", '|S2')]), ("b", int)]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [np.dtype('|S1'), np.dtype('|S2'), int]) + # dtype with shaped fields + dt = np.dtype([("a", (float, 2)), ("b", (int, 3))]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [float, int]) + dt_flat = flatten_dtype(dt, True) + assert_equal(dt_flat, [float] * 2 + [int] * 3) + # dtype w/ titles + dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")]) + dt_flat = flatten_dtype(dt) + assert_equal(dt_flat, [float, float]) diff --git a/python/numpy/lib/tests/test__version.py b/python/numpy/lib/tests/test__version.py new file mode 100644 index 000000000..6e6a34a24 --- /dev/null +++ b/python/numpy/lib/tests/test__version.py @@ -0,0 +1,64 @@ +"""Tests for the NumpyVersion class. + +""" +from numpy.lib import NumpyVersion +from numpy.testing import assert_, assert_raises + + +def test_main_versions(): + assert_(NumpyVersion('1.8.0') == '1.8.0') + for ver in ['1.9.0', '2.0.0', '1.8.1', '10.0.1']: + assert_(NumpyVersion('1.8.0') < ver) + + for ver in ['1.7.0', '1.7.1', '0.9.9']: + assert_(NumpyVersion('1.8.0') > ver) + + +def test_version_1_point_10(): + # regression test for gh-2998. + assert_(NumpyVersion('1.9.0') < '1.10.0') + assert_(NumpyVersion('1.11.0') < '1.11.1') + assert_(NumpyVersion('1.11.0') == '1.11.0') + assert_(NumpyVersion('1.99.11') < '1.99.12') + + +def test_alpha_beta_rc(): + assert_(NumpyVersion('1.8.0rc1') == '1.8.0rc1') + for ver in ['1.8.0', '1.8.0rc2']: + assert_(NumpyVersion('1.8.0rc1') < ver) + + for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']: + assert_(NumpyVersion('1.8.0rc1') > ver) + + assert_(NumpyVersion('1.8.0b1') > '1.8.0a2') + + +def test_dev_version(): + assert_(NumpyVersion('1.9.0.dev-Unknown') < '1.9.0') + for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev-ffffffff']: + assert_(NumpyVersion('1.9.0.dev-f16acvda') < ver) + + assert_(NumpyVersion('1.9.0.dev-f16acvda') == '1.9.0.dev-11111111') + + +def test_dev_a_b_rc_mixed(): + assert_(NumpyVersion('1.9.0a2.dev-f16acvda') == '1.9.0a2.dev-11111111') + assert_(NumpyVersion('1.9.0a2.dev-6acvda54') < '1.9.0a2') + + +def test_dev0_version(): + assert_(NumpyVersion('1.9.0.dev0+Unknown') < '1.9.0') + for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']: + assert_(NumpyVersion('1.9.0.dev0+f16acvda') < ver) + + assert_(NumpyVersion('1.9.0.dev0+f16acvda') == '1.9.0.dev0+11111111') + + +def test_dev0_a_b_rc_mixed(): + assert_(NumpyVersion('1.9.0a2.dev0+f16acvda') == '1.9.0a2.dev0+11111111') + assert_(NumpyVersion('1.9.0a2.dev0+6acvda54') < '1.9.0a2') + + +def test_raises(): + for ver in ['1.9', '1,9.0', '1.7.x']: + assert_raises(ValueError, NumpyVersion, ver) diff --git a/python/numpy/lib/tests/test_array_utils.py b/python/numpy/lib/tests/test_array_utils.py new file mode 100644 index 000000000..55b9d283b --- /dev/null +++ b/python/numpy/lib/tests/test_array_utils.py @@ -0,0 +1,32 @@ +import numpy as np +from numpy.lib import array_utils +from numpy.testing import assert_equal + + +class TestByteBounds: + def test_byte_bounds(self): + # pointer difference matches size * itemsize + # due to contiguity + a = np.arange(12).reshape(3, 4) + low, high = array_utils.byte_bounds(a) + assert_equal(high - low, a.size * a.itemsize) + + def test_unusual_order_positive_stride(self): + a = np.arange(12).reshape(3, 4) + b = a.T + low, high = array_utils.byte_bounds(b) + assert_equal(high - low, b.size * b.itemsize) + + def test_unusual_order_negative_stride(self): + a = np.arange(12).reshape(3, 4) + b = a.T[::-1] + low, high = array_utils.byte_bounds(b) + assert_equal(high - low, b.size * b.itemsize) + + def test_strided(self): + a = np.arange(12) + b = a[::2] + low, high = array_utils.byte_bounds(b) + # the largest pointer address is lost (even numbers only in the + # stride), and compensate addresses for striding by 2 + assert_equal(high - low, b.size * 2 * b.itemsize - b.itemsize) diff --git a/python/numpy/lib/tests/test_arraypad.py b/python/numpy/lib/tests/test_arraypad.py new file mode 100644 index 000000000..6efbe348c --- /dev/null +++ b/python/numpy/lib/tests/test_arraypad.py @@ -0,0 +1,1415 @@ +"""Tests for the array padding functions. + +""" +import pytest + +import numpy as np +from numpy.lib._arraypad_impl import _as_pairs +from numpy.testing import assert_allclose, assert_array_equal, assert_equal + +_numeric_dtypes = ( + np._core.sctypes["uint"] + + np._core.sctypes["int"] + + np._core.sctypes["float"] + + np._core.sctypes["complex"] +) +_all_modes = { + 'constant': {'constant_values': 0}, + 'edge': {}, + 'linear_ramp': {'end_values': 0}, + 'maximum': {'stat_length': None}, + 'mean': {'stat_length': None}, + 'median': {'stat_length': None}, + 'minimum': {'stat_length': None}, + 'reflect': {'reflect_type': 'even'}, + 'symmetric': {'reflect_type': 'even'}, + 'wrap': {}, + 'empty': {} +} + + +class TestAsPairs: + def test_single_value(self): + """Test casting for a single value.""" + expected = np.array([[3, 3]] * 10) + for x in (3, [3], [[3]]): + result = _as_pairs(x, 10) + assert_equal(result, expected) + # Test with dtype=object + obj = object() + assert_equal( + _as_pairs(obj, 10), + np.array([[obj, obj]] * 10) + ) + + def test_two_values(self): + """Test proper casting for two different values.""" + # Broadcasting in the first dimension with numbers + expected = np.array([[3, 4]] * 10) + for x in ([3, 4], [[3, 4]]): + result = _as_pairs(x, 10) + assert_equal(result, expected) + # and with dtype=object + obj = object() + assert_equal( + _as_pairs(["a", obj], 10), + np.array([["a", obj]] * 10) + ) + + # Broadcasting in the second / last dimension with numbers + assert_equal( + _as_pairs([[3], [4]], 2), + np.array([[3, 3], [4, 4]]) + ) + # and with dtype=object + assert_equal( + _as_pairs([["a"], [obj]], 2), + np.array([["a", "a"], [obj, obj]]) + ) + + def test_with_none(self): + expected = ((None, None), (None, None), (None, None)) + assert_equal( + _as_pairs(None, 3, as_index=False), + expected + ) + assert_equal( + _as_pairs(None, 3, as_index=True), + expected + ) + + def test_pass_through(self): + """Test if `x` already matching desired output are passed through.""" + expected = np.arange(12).reshape((6, 2)) + assert_equal( + _as_pairs(expected, 6), + expected + ) + + def test_as_index(self): + """Test results if `as_index=True`.""" + assert_equal( + _as_pairs([2.6, 3.3], 10, as_index=True), + np.array([[3, 3]] * 10, dtype=np.intp) + ) + assert_equal( + _as_pairs([2.6, 4.49], 10, as_index=True), + np.array([[3, 4]] * 10, dtype=np.intp) + ) + for x in (-3, [-3], [[-3]], [-3, 4], [3, -4], [[-3, 4]], [[4, -3]], + [[1, 2]] * 9 + [[1, -2]]): + with pytest.raises(ValueError, match="negative values"): + _as_pairs(x, 10, as_index=True) + + def test_exceptions(self): + """Ensure faulty usage is discovered.""" + with pytest.raises(ValueError, match="more dimensions than allowed"): + _as_pairs([[[3]]], 10) + with pytest.raises(ValueError, match="could not be broadcast"): + _as_pairs([[1, 2], [3, 4]], 3) + with pytest.raises(ValueError, match="could not be broadcast"): + _as_pairs(np.ones((2, 3)), 3) + + +class TestConditionalShortcuts: + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_zero_padding_shortcuts(self, mode): + test = np.arange(120).reshape(4, 5, 6) + pad_amt = [(0, 0) for _ in test.shape] + assert_array_equal(test, np.pad(test, pad_amt, mode=mode)) + + @pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',]) + def test_shallow_statistic_range(self, mode): + test = np.arange(120).reshape(4, 5, 6) + pad_amt = [(1, 1) for _ in test.shape] + assert_array_equal(np.pad(test, pad_amt, mode='edge'), + np.pad(test, pad_amt, mode=mode, stat_length=1)) + + @pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',]) + def test_clip_statistic_range(self, mode): + test = np.arange(30).reshape(5, 6) + pad_amt = [(3, 3) for _ in test.shape] + assert_array_equal(np.pad(test, pad_amt, mode=mode), + np.pad(test, pad_amt, mode=mode, stat_length=30)) + + +class TestStatistic: + def test_check_mean_stat_length(self): + a = np.arange(100).astype('f') + a = np.pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), )) + b = np.array( + [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, + 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, + 0.5, 0.5, 0.5, 0.5, 0.5, + + 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., + + 98., 98., 98., 98., 98., 98., 98., 98., 98., 98., + 98., 98., 98., 98., 98., 98., 98., 98., 98., 98. + ]) + assert_array_equal(a, b) + + def test_check_maximum_1(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'maximum') + b = np.array( + [99, 99, 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, + 99, 99, 99, 99, 99, 99, 99, 99, 99, 99] + ) + assert_array_equal(a, b) + + def test_check_maximum_2(self): + a = np.arange(100) + 1 + a = np.pad(a, (25, 20), 'maximum') + b = np.array( + [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, + + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100] + ) + assert_array_equal(a, b) + + def test_check_maximum_stat_length(self): + a = np.arange(100) + 1 + a = np.pad(a, (25, 20), 'maximum', stat_length=10) + b = np.array( + [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, + + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, 100, 100, 100, 100, 100, 100, 100, 100, 100] + ) + assert_array_equal(a, b) + + def test_check_minimum_1(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'minimum') + b = np.array( + [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ) + assert_array_equal(a, b) + + def test_check_minimum_2(self): + a = np.arange(100) + 2 + a = np.pad(a, (25, 20), 'minimum') + b = np.array( + [ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, + + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, + + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] + ) + assert_array_equal(a, b) + + def test_check_minimum_stat_length(self): + a = np.arange(100) + 1 + a = np.pad(a, (25, 20), 'minimum', stat_length=10) + b = np.array( + [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, + + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + + 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, + 91, 91, 91, 91, 91, 91, 91, 91, 91, 91] + ) + assert_array_equal(a, b) + + def test_check_median(self): + a = np.arange(100).astype('f') + a = np.pad(a, (25, 20), 'median') + b = np.array( + [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, + + 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., + + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5] + ) + assert_array_equal(a, b) + + def test_check_median_01(self): + a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]]) + a = np.pad(a, 1, 'median') + b = np.array( + [[4, 4, 5, 4, 4], + + [3, 3, 1, 4, 3], + [5, 4, 5, 9, 5], + [8, 9, 8, 2, 8], + + [4, 4, 5, 4, 4]] + ) + assert_array_equal(a, b) + + def test_check_median_02(self): + a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]]) + a = np.pad(a.T, 1, 'median').T + b = np.array( + [[5, 4, 5, 4, 5], + + [3, 3, 1, 4, 3], + [5, 4, 5, 9, 5], + [8, 9, 8, 2, 8], + + [5, 4, 5, 4, 5]] + ) + assert_array_equal(a, b) + + def test_check_median_stat_length(self): + a = np.arange(100).astype('f') + a[1] = 2. + a[97] = 96. + a = np.pad(a, (25, 20), 'median', stat_length=(3, 5)) + b = np.array( + [ 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., + 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., + 2., 2., 2., 2., 2., + + 0., 2., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 96., 98., 99., + + 96., 96., 96., 96., 96., 96., 96., 96., 96., 96., + 96., 96., 96., 96., 96., 96., 96., 96., 96., 96.] + ) + assert_array_equal(a, b) + + def test_check_mean_shape_one(self): + a = [[4, 5, 6]] + a = np.pad(a, (5, 7), 'mean', stat_length=2) + b = np.array( + [[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6], + [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6]] + ) + assert_array_equal(a, b) + + def test_check_mean_2(self): + a = np.arange(100).astype('f') + a = np.pad(a, (25, 20), 'mean') + b = np.array( + [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, + + 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., + 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., + 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., + 30., 31., 32., 33., 34., 35., 36., 37., 38., 39., + 40., 41., 42., 43., 44., 45., 46., 47., 48., 49., + 50., 51., 52., 53., 54., 55., 56., 57., 58., 59., + 60., 61., 62., 63., 64., 65., 66., 67., 68., 69., + 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., + 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., + 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., + + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, + 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5] + ) + assert_array_equal(a, b) + + @pytest.mark.parametrize("mode", [ + "mean", + "median", + "minimum", + "maximum" + ]) + def test_same_prepend_append(self, mode): + """ Test that appended and prepended values are equal """ + # This test is constructed to trigger floating point rounding errors in + # a way that caused gh-11216 for mode=='mean' + a = np.array([-1, 2, -1]) + np.array([0, 1e-12, 0], dtype=np.float64) + a = np.pad(a, (1, 1), mode) + assert_equal(a[0], a[-1]) + + @pytest.mark.parametrize("mode", ["mean", "median", "minimum", "maximum"]) + @pytest.mark.parametrize( + "stat_length", [-2, (-2,), (3, -1), ((5, 2), (-2, 3)), ((-4,), (2,))] + ) + def test_check_negative_stat_length(self, mode, stat_length): + arr = np.arange(30).reshape((6, 5)) + match = "index can't contain negative values" + with pytest.raises(ValueError, match=match): + np.pad(arr, 2, mode, stat_length=stat_length) + + def test_simple_stat_length(self): + a = np.arange(30) + a = np.reshape(a, (6, 5)) + a = np.pad(a, ((2, 3), (3, 2)), mode='mean', stat_length=(3,)) + b = np.array( + [[6, 6, 6, 5, 6, 7, 8, 9, 8, 8], + [6, 6, 6, 5, 6, 7, 8, 9, 8, 8], + + [1, 1, 1, 0, 1, 2, 3, 4, 3, 3], + [6, 6, 6, 5, 6, 7, 8, 9, 8, 8], + [11, 11, 11, 10, 11, 12, 13, 14, 13, 13], + [16, 16, 16, 15, 16, 17, 18, 19, 18, 18], + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], + [26, 26, 26, 25, 26, 27, 28, 29, 28, 28], + + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23], + [21, 21, 21, 20, 21, 22, 23, 24, 23, 23]] + ) + assert_array_equal(a, b) + + @pytest.mark.filterwarnings("ignore:Mean of empty slice:RuntimeWarning") + @pytest.mark.filterwarnings( + "ignore:invalid value encountered in( scalar)? divide:RuntimeWarning" + ) + @pytest.mark.parametrize("mode", ["mean", "median"]) + def test_zero_stat_length_valid(self, mode): + arr = np.pad([1., 2.], (1, 2), mode, stat_length=0) + expected = np.array([np.nan, 1., 2., np.nan, np.nan]) + assert_equal(arr, expected) + + @pytest.mark.parametrize("mode", ["minimum", "maximum"]) + def test_zero_stat_length_invalid(self, mode): + match = "stat_length of 0 yields no value for padding" + with pytest.raises(ValueError, match=match): + np.pad([1., 2.], 0, mode, stat_length=0) + with pytest.raises(ValueError, match=match): + np.pad([1., 2.], 0, mode, stat_length=(1, 0)) + with pytest.raises(ValueError, match=match): + np.pad([1., 2.], 1, mode, stat_length=0) + with pytest.raises(ValueError, match=match): + np.pad([1., 2.], 1, mode, stat_length=(1, 0)) + + +class TestConstant: + def test_check_constant(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'constant', constant_values=(10, 20)) + b = np.array( + [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, + 10, 10, 10, 10, 10, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, + 20, 20, 20, 20, 20, 20, 20, 20, 20, 20] + ) + assert_array_equal(a, b) + + def test_check_constant_zeros(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'constant') + b = np.array( + [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ) + assert_array_equal(a, b) + + def test_check_constant_float(self): + # If input array is int, but constant_values are float, the dtype of + # the array to be padded is kept + arr = np.arange(30).reshape(5, 6) + test = np.pad(arr, (1, 2), mode='constant', + constant_values=1.1) + expected = np.array( + [[1, 1, 1, 1, 1, 1, 1, 1, 1], + + [1, 0, 1, 2, 3, 4, 5, 1, 1], + [1, 6, 7, 8, 9, 10, 11, 1, 1], + [1, 12, 13, 14, 15, 16, 17, 1, 1], + [1, 18, 19, 20, 21, 22, 23, 1, 1], + [1, 24, 25, 26, 27, 28, 29, 1, 1], + + [1, 1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1, 1]] + ) + assert_allclose(test, expected) + + def test_check_constant_float2(self): + # If input array is float, and constant_values are float, the dtype of + # the array to be padded is kept - here retaining the float constants + arr = np.arange(30).reshape(5, 6) + arr_float = arr.astype(np.float64) + test = np.pad(arr_float, ((1, 2), (1, 2)), mode='constant', + constant_values=1.1) + expected = np.array( + [[1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1], + + [1.1, 0. , 1. , 2. , 3. , 4. , 5. , 1.1, 1.1], # noqa: E203 + [1.1, 6. , 7. , 8. , 9. , 10. , 11. , 1.1, 1.1], # noqa: E203 + [1.1, 12. , 13. , 14. , 15. , 16. , 17. , 1.1, 1.1], # noqa: E203 + [1.1, 18. , 19. , 20. , 21. , 22. , 23. , 1.1, 1.1], # noqa: E203 + [1.1, 24. , 25. , 26. , 27. , 28. , 29. , 1.1, 1.1], # noqa: E203 + + [1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1], + [1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]] + ) + assert_allclose(test, expected) + + def test_check_constant_float3(self): + a = np.arange(100, dtype=float) + a = np.pad(a, (25, 20), 'constant', constant_values=(-1.1, -1.2)) + b = np.array( + [-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, + -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, + -1.1, -1.1, -1.1, -1.1, -1.1, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, + -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2] + ) + assert_allclose(a, b) + + def test_check_constant_odd_pad_amount(self): + arr = np.arange(30).reshape(5, 6) + test = np.pad(arr, ((1,), (2,)), mode='constant', + constant_values=3) + expected = np.array( + [[3, 3, 3, 3, 3, 3, 3, 3, 3, 3], + + [3, 3, 0, 1, 2, 3, 4, 5, 3, 3], + [3, 3, 6, 7, 8, 9, 10, 11, 3, 3], + [3, 3, 12, 13, 14, 15, 16, 17, 3, 3], + [3, 3, 18, 19, 20, 21, 22, 23, 3, 3], + [3, 3, 24, 25, 26, 27, 28, 29, 3, 3], + + [3, 3, 3, 3, 3, 3, 3, 3, 3, 3]] + ) + assert_allclose(test, expected) + + def test_check_constant_pad_2d(self): + arr = np.arange(4).reshape(2, 2) + test = np.pad(arr, ((1, 2), (1, 3)), mode='constant', + constant_values=((1, 2), (3, 4))) + expected = np.array( + [[3, 1, 1, 4, 4, 4], + [3, 0, 1, 4, 4, 4], + [3, 2, 3, 4, 4, 4], + [3, 2, 2, 4, 4, 4], + [3, 2, 2, 4, 4, 4]] + ) + assert_allclose(test, expected) + + def test_check_large_integers(self): + uint64_max = 2 ** 64 - 1 + arr = np.full(5, uint64_max, dtype=np.uint64) + test = np.pad(arr, 1, mode="constant", constant_values=arr.min()) + expected = np.full(7, uint64_max, dtype=np.uint64) + assert_array_equal(test, expected) + + int64_max = 2 ** 63 - 1 + arr = np.full(5, int64_max, dtype=np.int64) + test = np.pad(arr, 1, mode="constant", constant_values=arr.min()) + expected = np.full(7, int64_max, dtype=np.int64) + assert_array_equal(test, expected) + + def test_check_object_array(self): + arr = np.empty(1, dtype=object) + obj_a = object() + arr[0] = obj_a + obj_b = object() + obj_c = object() + arr = np.pad(arr, pad_width=1, mode='constant', + constant_values=(obj_b, obj_c)) + + expected = np.empty((3,), dtype=object) + expected[0] = obj_b + expected[1] = obj_a + expected[2] = obj_c + + assert_array_equal(arr, expected) + + def test_pad_empty_dimension(self): + arr = np.zeros((3, 0, 2)) + result = np.pad(arr, [(0,), (2,), (1,)], mode="constant") + assert result.shape == (3, 4, 4) + + +class TestLinearRamp: + def test_check_simple(self): + a = np.arange(100).astype('f') + a = np.pad(a, (25, 20), 'linear_ramp', end_values=(4, 5)) + b = np.array( + [4.00, 3.84, 3.68, 3.52, 3.36, 3.20, 3.04, 2.88, 2.72, 2.56, + 2.40, 2.24, 2.08, 1.92, 1.76, 1.60, 1.44, 1.28, 1.12, 0.96, + 0.80, 0.64, 0.48, 0.32, 0.16, + + 0.00, 1.00, 2.00, 3.00, 4.00, 5.00, 6.00, 7.00, 8.00, 9.00, + 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, + 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, + 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, + 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, + 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, + 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0, + 70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, + 80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, + 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0, + + 94.3, 89.6, 84.9, 80.2, 75.5, 70.8, 66.1, 61.4, 56.7, 52.0, + 47.3, 42.6, 37.9, 33.2, 28.5, 23.8, 19.1, 14.4, 9.7, 5.] + ) + assert_allclose(a, b, rtol=1e-5, atol=1e-5) + + def test_check_2d(self): + arr = np.arange(20).reshape(4, 5).astype(np.float64) + test = np.pad(arr, (2, 2), mode='linear_ramp', end_values=(0, 0)) + expected = np.array( + [[0., 0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0.5, 1., 1.5, 2., 1., 0.], + [0., 0., 0., 1., 2., 3., 4., 2., 0.], + [0., 2.5, 5., 6., 7., 8., 9., 4.5, 0.], + [0., 5., 10., 11., 12., 13., 14., 7., 0.], + [0., 7.5, 15., 16., 17., 18., 19., 9.5, 0.], + [0., 3.75, 7.5, 8., 8.5, 9., 9.5, 4.75, 0.], + [0., 0., 0., 0., 0., 0., 0., 0., 0.]]) + assert_allclose(test, expected) + + @pytest.mark.xfail(exceptions=(AssertionError,)) + def test_object_array(self): + from fractions import Fraction + arr = np.array([Fraction(1, 2), Fraction(-1, 2)]) + actual = np.pad(arr, (2, 3), mode='linear_ramp', end_values=0) + + # deliberately chosen to have a non-power-of-2 denominator such that + # rounding to floats causes a failure. + expected = np.array([ + Fraction( 0, 12), + Fraction( 3, 12), + Fraction( 6, 12), + Fraction(-6, 12), + Fraction(-4, 12), + Fraction(-2, 12), + Fraction(-0, 12), + ]) + assert_equal(actual, expected) + + def test_end_values(self): + """Ensure that end values are exact.""" + a = np.pad(np.ones(10).reshape(2, 5), (223, 123), mode="linear_ramp") + assert_equal(a[:, 0], 0.) + assert_equal(a[:, -1], 0.) + assert_equal(a[0, :], 0.) + assert_equal(a[-1, :], 0.) + + @pytest.mark.parametrize("dtype", _numeric_dtypes) + def test_negative_difference(self, dtype): + """ + Check correct behavior of unsigned dtypes if there is a negative + difference between the edge to pad and `end_values`. Check both cases + to be independent of implementation. Test behavior for all other dtypes + in case dtype casting interferes with complex dtypes. See gh-14191. + """ + x = np.array([3], dtype=dtype) + result = np.pad(x, 3, mode="linear_ramp", end_values=0) + expected = np.array([0, 1, 2, 3, 2, 1, 0], dtype=dtype) + assert_equal(result, expected) + + x = np.array([0], dtype=dtype) + result = np.pad(x, 3, mode="linear_ramp", end_values=3) + expected = np.array([3, 2, 1, 0, 1, 2, 3], dtype=dtype) + assert_equal(result, expected) + + +class TestReflect: + def test_check_simple(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'reflect') + b = np.array( + [25, 24, 23, 22, 21, 20, 19, 18, 17, 16, + 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, + 5, 4, 3, 2, 1, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 98, 97, 96, 95, 94, 93, 92, 91, 90, 89, + 88, 87, 86, 85, 84, 83, 82, 81, 80, 79] + ) + assert_array_equal(a, b) + + def test_check_odd_method(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'reflect', reflect_type='odd') + b = np.array( + [-25, -24, -23, -22, -21, -20, -19, -18, -17, -16, + -15, -14, -13, -12, -11, -10, -9, -8, -7, -6, + -5, -4, -3, -2, -1, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, + 110, 111, 112, 113, 114, 115, 116, 117, 118, 119] + ) + assert_array_equal(a, b) + + def test_check_large_pad(self): + a = [[4, 5, 6], [6, 7, 8]] + a = np.pad(a, (5, 7), 'reflect') + b = np.array( + [[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]] + ) + assert_array_equal(a, b) + + def test_check_shape(self): + a = [[4, 5, 6]] + a = np.pad(a, (5, 7), 'reflect') + b = np.array( + [[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5], + [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]] + ) + assert_array_equal(a, b) + + def test_check_01(self): + a = np.pad([1, 2, 3], 2, 'reflect') + b = np.array([3, 2, 1, 2, 3, 2, 1]) + assert_array_equal(a, b) + + def test_check_02(self): + a = np.pad([1, 2, 3], 3, 'reflect') + b = np.array([2, 3, 2, 1, 2, 3, 2, 1, 2]) + assert_array_equal(a, b) + + def test_check_03(self): + a = np.pad([1, 2, 3], 4, 'reflect') + b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3]) + assert_array_equal(a, b) + + def test_check_04(self): + a = np.pad([1, 2, 3], [1, 10], 'reflect') + b = np.array([2, 1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3, 2, 1]) + assert_array_equal(a, b) + + def test_check_05(self): + a = np.pad([1, 2, 3, 4], [45, 10], 'reflect') + b = np.array( + [4, 3, 2, 1, 2, 3, 4, 3, 2, 1, + 2, 3, 4, 3, 2, 1, 2, 3, 4, 3, + 2, 1, 2, 3, 4, 3, 2, 1, 2, 3, + 4, 3, 2, 1, 2, 3, 4, 3, 2, 1, + 2, 3, 4, 3, 2, 1, 2, 3, 4, 3, + 2, 1, 2, 3, 4, 3, 2, 1, 2]) + assert_array_equal(a, b) + + def test_check_06(self): + a = np.pad([1, 2, 3, 4], [15, 2], 'symmetric') + b = np.array( + [2, 3, 4, 4, 3, 2, 1, 1, 2, 3, + 4, 4, 3, 2, 1, 1, 2, 3, 4, 4, + 3] + ) + assert_array_equal(a, b) + + def test_check_07(self): + a = np.pad([1, 2, 3, 4, 5, 6], [45, 3], 'symmetric') + b = np.array( + [4, 5, 6, 6, 5, 4, 3, 2, 1, 1, + 2, 3, 4, 5, 6, 6, 5, 4, 3, 2, + 1, 1, 2, 3, 4, 5, 6, 6, 5, 4, + 3, 2, 1, 1, 2, 3, 4, 5, 6, 6, + 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, + 6, 6, 5, 4]) + assert_array_equal(a, b) + + +class TestEmptyArray: + """Check how padding behaves on arrays with an empty dimension.""" + + @pytest.mark.parametrize( + # Keep parametrization ordered, otherwise pytest-xdist might believe + # that different tests were collected during parallelization + "mode", sorted(_all_modes.keys() - {"constant", "empty"}) + ) + def test_pad_empty_dimension(self, mode): + match = ("can't extend empty axis 0 using modes other than 'constant' " + "or 'empty'") + with pytest.raises(ValueError, match=match): + np.pad([], 4, mode=mode) + with pytest.raises(ValueError, match=match): + np.pad(np.ndarray(0), 4, mode=mode) + with pytest.raises(ValueError, match=match): + np.pad(np.zeros((0, 3)), ((1,), (0,)), mode=mode) + + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_pad_non_empty_dimension(self, mode): + result = np.pad(np.ones((2, 0, 2)), ((3,), (0,), (1,)), mode=mode) + assert result.shape == (8, 0, 4) + + +class TestSymmetric: + def test_check_simple(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'symmetric') + b = np.array( + [24, 23, 22, 21, 20, 19, 18, 17, 16, 15, + 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, + 4, 3, 2, 1, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 99, 98, 97, 96, 95, 94, 93, 92, 91, 90, + 89, 88, 87, 86, 85, 84, 83, 82, 81, 80] + ) + assert_array_equal(a, b) + + def test_check_odd_method(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'symmetric', reflect_type='odd') + b = np.array( + [-24, -23, -22, -21, -20, -19, -18, -17, -16, -15, + -14, -13, -12, -11, -10, -9, -8, -7, -6, -5, + -4, -3, -2, -1, 0, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, + 109, 110, 111, 112, 113, 114, 115, 116, 117, 118] + ) + assert_array_equal(a, b) + + def test_check_large_pad(self): + a = [[4, 5, 6], [6, 7, 8]] + a = np.pad(a, (5, 7), 'symmetric') + b = np.array( + [[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]] + ) + + assert_array_equal(a, b) + + def test_check_large_pad_odd(self): + a = [[4, 5, 6], [6, 7, 8]] + a = np.pad(a, (5, 7), 'symmetric', reflect_type='odd') + b = np.array( + [[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6], + [-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6], + [-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8], + [-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8], + [ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10], + + [ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10], + [ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12], + + [ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12], + [ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14], + [ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14], + [ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16], + [ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16], + [ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18], + [ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18]] + ) + assert_array_equal(a, b) + + def test_check_shape(self): + a = [[4, 5, 6]] + a = np.pad(a, (5, 7), 'symmetric') + b = np.array( + [[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6], + [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]] + ) + assert_array_equal(a, b) + + def test_check_01(self): + a = np.pad([1, 2, 3], 2, 'symmetric') + b = np.array([2, 1, 1, 2, 3, 3, 2]) + assert_array_equal(a, b) + + def test_check_02(self): + a = np.pad([1, 2, 3], 3, 'symmetric') + b = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1]) + assert_array_equal(a, b) + + def test_check_03(self): + a = np.pad([1, 2, 3], 6, 'symmetric') + b = np.array([1, 2, 3, 3, 2, 1, 1, 2, 3, 3, 2, 1, 1, 2, 3]) + assert_array_equal(a, b) + + +class TestWrap: + def test_check_simple(self): + a = np.arange(100) + a = np.pad(a, (25, 20), 'wrap') + b = np.array( + [75, 76, 77, 78, 79, 80, 81, 82, 83, 84, + 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, + 95, 96, 97, 98, 99, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, + 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] + ) + assert_array_equal(a, b) + + def test_check_large_pad(self): + a = np.arange(12) + a = np.reshape(a, (3, 4)) + a = np.pad(a, (10, 12), 'wrap') + b = np.array( + [[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11], + [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, + 3, 0, 1, 2, 3, 0, 1, 2, 3], + [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, + 7, 4, 5, 6, 7, 4, 5, 6, 7], + [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, + 11, 8, 9, 10, 11, 8, 9, 10, 11]] + ) + assert_array_equal(a, b) + + def test_check_01(self): + a = np.pad([1, 2, 3], 3, 'wrap') + b = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3]) + assert_array_equal(a, b) + + def test_check_02(self): + a = np.pad([1, 2, 3], 4, 'wrap') + b = np.array([3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1]) + assert_array_equal(a, b) + + def test_pad_with_zero(self): + a = np.ones((3, 5)) + b = np.pad(a, (0, 5), mode="wrap") + assert_array_equal(a, b[:-5, :-5]) + + def test_repeated_wrapping(self): + """ + Check wrapping on each side individually if the wrapped area is longer + than the original array. + """ + a = np.arange(5) + b = np.pad(a, (12, 0), mode="wrap") + assert_array_equal(np.r_[a, a, a, a][3:], b) + + a = np.arange(5) + b = np.pad(a, (0, 12), mode="wrap") + assert_array_equal(np.r_[a, a, a, a][:-3], b) + + def test_repeated_wrapping_multiple_origin(self): + """ + Assert that 'wrap' pads only with multiples of the original area if + the pad width is larger than the original array. + """ + a = np.arange(4).reshape(2, 2) + a = np.pad(a, [(1, 3), (3, 1)], mode='wrap') + b = np.array( + [[3, 2, 3, 2, 3, 2], + [1, 0, 1, 0, 1, 0], + [3, 2, 3, 2, 3, 2], + [1, 0, 1, 0, 1, 0], + [3, 2, 3, 2, 3, 2], + [1, 0, 1, 0, 1, 0]] + ) + assert_array_equal(a, b) + + +class TestEdge: + def test_check_simple(self): + a = np.arange(12) + a = np.reshape(a, (4, 3)) + a = np.pad(a, ((2, 3), (3, 2)), 'edge') + b = np.array( + [[0, 0, 0, 0, 1, 2, 2, 2], + [0, 0, 0, 0, 1, 2, 2, 2], + + [0, 0, 0, 0, 1, 2, 2, 2], + [3, 3, 3, 3, 4, 5, 5, 5], + [6, 6, 6, 6, 7, 8, 8, 8], + [9, 9, 9, 9, 10, 11, 11, 11], + + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11]] + ) + assert_array_equal(a, b) + + def test_check_width_shape_1_2(self): + # Check a pad_width of the form ((1, 2),). + # Regression test for issue gh-7808. + a = np.array([1, 2, 3]) + padded = np.pad(a, ((1, 2),), 'edge') + expected = np.array([1, 1, 2, 3, 3, 3]) + assert_array_equal(padded, expected) + + a = np.array([[1, 2, 3], [4, 5, 6]]) + padded = np.pad(a, ((1, 2),), 'edge') + expected = np.pad(a, ((1, 2), (1, 2)), 'edge') + assert_array_equal(padded, expected) + + a = np.arange(24).reshape(2, 3, 4) + padded = np.pad(a, ((1, 2),), 'edge') + expected = np.pad(a, ((1, 2), (1, 2), (1, 2)), 'edge') + assert_array_equal(padded, expected) + + +class TestEmpty: + def test_simple(self): + arr = np.arange(24).reshape(4, 6) + result = np.pad(arr, [(2, 3), (3, 1)], mode="empty") + assert result.shape == (9, 10) + assert_equal(arr, result[2:-3, 3:-1]) + + def test_pad_empty_dimension(self): + arr = np.zeros((3, 0, 2)) + result = np.pad(arr, [(0,), (2,), (1,)], mode="empty") + assert result.shape == (3, 4, 4) + + +def test_legacy_vector_functionality(): + def _padwithtens(vector, pad_width, iaxis, kwargs): + vector[:pad_width[0]] = 10 + vector[-pad_width[1]:] = 10 + + a = np.arange(6).reshape(2, 3) + a = np.pad(a, 2, _padwithtens) + b = np.array( + [[10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10], + + [10, 10, 0, 1, 2, 10, 10], + [10, 10, 3, 4, 5, 10, 10], + + [10, 10, 10, 10, 10, 10, 10], + [10, 10, 10, 10, 10, 10, 10]] + ) + assert_array_equal(a, b) + + +def test_unicode_mode(): + a = np.pad([1], 2, mode='constant') + b = np.array([0, 0, 1, 0, 0]) + assert_array_equal(a, b) + + +@pytest.mark.parametrize("mode", ["edge", "symmetric", "reflect", "wrap"]) +def test_object_input(mode): + # Regression test for issue gh-11395. + a = np.full((4, 3), fill_value=None) + pad_amt = ((2, 3), (3, 2)) + b = np.full((9, 8), fill_value=None) + assert_array_equal(np.pad(a, pad_amt, mode=mode), b) + + +class TestPadWidth: + @pytest.mark.parametrize("pad_width", [ + (4, 5, 6, 7), + ((1,), (2,), (3,)), + ((1, 2), (3, 4), (5, 6)), + ((3, 4, 5), (0, 1, 2)), + ]) + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_misshaped_pad_width(self, pad_width, mode): + arr = np.arange(30).reshape((6, 5)) + match = "operands could not be broadcast together" + with pytest.raises(ValueError, match=match): + np.pad(arr, pad_width, mode) + + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_misshaped_pad_width_2(self, mode): + arr = np.arange(30).reshape((6, 5)) + match = ("input operand has more dimensions than allowed by the axis " + "remapping") + with pytest.raises(ValueError, match=match): + np.pad(arr, (((3,), (4,), (5,)), ((0,), (1,), (2,))), mode) + + @pytest.mark.parametrize( + "pad_width", [-2, (-2,), (3, -1), ((5, 2), (-2, 3)), ((-4,), (2,))]) + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_negative_pad_width(self, pad_width, mode): + arr = np.arange(30).reshape((6, 5)) + match = "index can't contain negative values" + with pytest.raises(ValueError, match=match): + np.pad(arr, pad_width, mode) + + @pytest.mark.parametrize("pad_width, dtype", [ + ("3", None), + ("word", None), + (None, None), + (object(), None), + (3.4, None), + (((2, 3, 4), (3, 2)), object), + (complex(1, -1), None), + (((-2.1, 3), (3, 2)), None), + ]) + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_bad_type(self, pad_width, dtype, mode): + arr = np.arange(30).reshape((6, 5)) + match = "`pad_width` must be of integral type." + if dtype is not None: + # avoid DeprecationWarning when not specifying dtype + with pytest.raises(TypeError, match=match): + np.pad(arr, np.array(pad_width, dtype=dtype), mode) + else: + with pytest.raises(TypeError, match=match): + np.pad(arr, pad_width, mode) + with pytest.raises(TypeError, match=match): + np.pad(arr, np.array(pad_width), mode) + + def test_pad_width_as_ndarray(self): + a = np.arange(12) + a = np.reshape(a, (4, 3)) + a = np.pad(a, np.array(((2, 3), (3, 2))), 'edge') + b = np.array( + [[0, 0, 0, 0, 1, 2, 2, 2], + [0, 0, 0, 0, 1, 2, 2, 2], + + [0, 0, 0, 0, 1, 2, 2, 2], + [3, 3, 3, 3, 4, 5, 5, 5], + [6, 6, 6, 6, 7, 8, 8, 8], + [9, 9, 9, 9, 10, 11, 11, 11], + + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11], + [9, 9, 9, 9, 10, 11, 11, 11]] + ) + assert_array_equal(a, b) + + @pytest.mark.parametrize("pad_width", [0, (0, 0), ((0, 0), (0, 0))]) + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_zero_pad_width(self, pad_width, mode): + arr = np.arange(30).reshape(6, 5) + assert_array_equal(arr, np.pad(arr, pad_width, mode=mode)) + + +@pytest.mark.parametrize("mode", _all_modes.keys()) +def test_kwargs(mode): + """Test behavior of pad's kwargs for the given mode.""" + allowed = _all_modes[mode] + not_allowed = {} + for kwargs in _all_modes.values(): + if kwargs != allowed: + not_allowed.update(kwargs) + # Test if allowed keyword arguments pass + np.pad([1, 2, 3], 1, mode, **allowed) + # Test if prohibited keyword arguments of other modes raise an error + for key, value in not_allowed.items(): + match = f"unsupported keyword arguments for mode '{mode}'" + with pytest.raises(ValueError, match=match): + np.pad([1, 2, 3], 1, mode, **{key: value}) + + +def test_constant_zero_default(): + arr = np.array([1, 1]) + assert_array_equal(np.pad(arr, 2), [0, 0, 1, 1, 0, 0]) + + +@pytest.mark.parametrize("mode", [1, "const", object(), None, True, False]) +def test_unsupported_mode(mode): + match = f"mode '{mode}' is not supported" + with pytest.raises(ValueError, match=match): + np.pad([1, 2, 3], 4, mode=mode) + + +@pytest.mark.parametrize("mode", _all_modes.keys()) +def test_non_contiguous_array(mode): + arr = np.arange(24).reshape(4, 6)[::2, ::2] + result = np.pad(arr, (2, 3), mode) + assert result.shape == (7, 8) + assert_equal(result[2:-3, 2:-3], arr) + + +@pytest.mark.parametrize("mode", _all_modes.keys()) +def test_memory_layout_persistence(mode): + """Test if C and F order is preserved for all pad modes.""" + x = np.ones((5, 10), order='C') + assert np.pad(x, 5, mode).flags["C_CONTIGUOUS"] + x = np.ones((5, 10), order='F') + assert np.pad(x, 5, mode).flags["F_CONTIGUOUS"] + + +@pytest.mark.parametrize("dtype", _numeric_dtypes) +@pytest.mark.parametrize("mode", _all_modes.keys()) +def test_dtype_persistence(dtype, mode): + arr = np.zeros((3, 2, 1), dtype=dtype) + result = np.pad(arr, 1, mode=mode) + assert result.dtype == dtype diff --git a/python/numpy/lib/tests/test_arraysetops.py b/python/numpy/lib/tests/test_arraysetops.py new file mode 100644 index 000000000..7865e1b16 --- /dev/null +++ b/python/numpy/lib/tests/test_arraysetops.py @@ -0,0 +1,1074 @@ +"""Test functions for 1D array set operations. + +""" +import pytest + +import numpy as np +from numpy import ediff1d, intersect1d, isin, setdiff1d, setxor1d, union1d, unique +from numpy.exceptions import AxisError +from numpy.testing import ( + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) + + +class TestSetOps: + + def test_intersect1d(self): + # unique inputs + a = np.array([5, 7, 1, 2]) + b = np.array([2, 4, 3, 1, 5]) + + ec = np.array([1, 2, 5]) + c = intersect1d(a, b, assume_unique=True) + assert_array_equal(c, ec) + + # non-unique inputs + a = np.array([5, 5, 7, 1, 2]) + b = np.array([2, 1, 4, 3, 3, 1, 5]) + + ed = np.array([1, 2, 5]) + c = intersect1d(a, b) + assert_array_equal(c, ed) + assert_array_equal([], intersect1d([], [])) + + def test_intersect1d_array_like(self): + # See gh-11772 + class Test: + def __array__(self, dtype=None, copy=None): + return np.arange(3) + + a = Test() + res = intersect1d(a, a) + assert_array_equal(res, a) + res = intersect1d([1, 2, 3], [1, 2, 3]) + assert_array_equal(res, [1, 2, 3]) + + def test_intersect1d_indices(self): + # unique inputs + a = np.array([1, 2, 3, 4]) + b = np.array([2, 1, 4, 6]) + c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True) + ee = np.array([1, 2, 4]) + assert_array_equal(c, ee) + assert_array_equal(a[i1], ee) + assert_array_equal(b[i2], ee) + + # non-unique inputs + a = np.array([1, 2, 2, 3, 4, 3, 2]) + b = np.array([1, 8, 4, 2, 2, 3, 2, 3]) + c, i1, i2 = intersect1d(a, b, return_indices=True) + ef = np.array([1, 2, 3, 4]) + assert_array_equal(c, ef) + assert_array_equal(a[i1], ef) + assert_array_equal(b[i2], ef) + + # non1d, unique inputs + a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]]) + b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]]) + c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True) + ui1 = np.unravel_index(i1, a.shape) + ui2 = np.unravel_index(i2, b.shape) + ea = np.array([2, 6, 7, 8]) + assert_array_equal(ea, a[ui1]) + assert_array_equal(ea, b[ui2]) + + # non1d, not assumed to be uniqueinputs + a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]]) + b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]]) + c, i1, i2 = intersect1d(a, b, return_indices=True) + ui1 = np.unravel_index(i1, a.shape) + ui2 = np.unravel_index(i2, b.shape) + ea = np.array([2, 7, 8]) + assert_array_equal(ea, a[ui1]) + assert_array_equal(ea, b[ui2]) + + def test_setxor1d(self): + a = np.array([5, 7, 1, 2]) + b = np.array([2, 4, 3, 1, 5]) + + ec = np.array([3, 4, 7]) + c = setxor1d(a, b) + assert_array_equal(c, ec) + + a = np.array([1, 2, 3]) + b = np.array([6, 5, 4]) + + ec = np.array([1, 2, 3, 4, 5, 6]) + c = setxor1d(a, b) + assert_array_equal(c, ec) + + a = np.array([1, 8, 2, 3]) + b = np.array([6, 5, 4, 8]) + + ec = np.array([1, 2, 3, 4, 5, 6]) + c = setxor1d(a, b) + assert_array_equal(c, ec) + + assert_array_equal([], setxor1d([], [])) + + def test_setxor1d_unique(self): + a = np.array([1, 8, 2, 3]) + b = np.array([6, 5, 4, 8]) + + ec = np.array([1, 2, 3, 4, 5, 6]) + c = setxor1d(a, b, assume_unique=True) + assert_array_equal(c, ec) + + a = np.array([[1], [8], [2], [3]]) + b = np.array([[6, 5], [4, 8]]) + + ec = np.array([1, 2, 3, 4, 5, 6]) + c = setxor1d(a, b, assume_unique=True) + assert_array_equal(c, ec) + + def test_ediff1d(self): + zero_elem = np.array([]) + one_elem = np.array([1]) + two_elem = np.array([1, 2]) + + assert_array_equal([], ediff1d(zero_elem)) + assert_array_equal([0], ediff1d(zero_elem, to_begin=0)) + assert_array_equal([0], ediff1d(zero_elem, to_end=0)) + assert_array_equal([-1, 0], ediff1d(zero_elem, to_begin=-1, to_end=0)) + assert_array_equal([], ediff1d(one_elem)) + assert_array_equal([1], ediff1d(two_elem)) + assert_array_equal([7, 1, 9], ediff1d(two_elem, to_begin=7, to_end=9)) + assert_array_equal([5, 6, 1, 7, 8], + ediff1d(two_elem, to_begin=[5, 6], to_end=[7, 8])) + assert_array_equal([1, 9], ediff1d(two_elem, to_end=9)) + assert_array_equal([1, 7, 8], ediff1d(two_elem, to_end=[7, 8])) + assert_array_equal([7, 1], ediff1d(two_elem, to_begin=7)) + assert_array_equal([5, 6, 1], ediff1d(two_elem, to_begin=[5, 6])) + + @pytest.mark.parametrize("ary, prepend, append, expected", [ + # should fail because trying to cast + # np.nan standard floating point value + # into an integer array: + (np.array([1, 2, 3], dtype=np.int64), + None, + np.nan, + 'to_end'), + # should fail because attempting + # to downcast to int type: + (np.array([1, 2, 3], dtype=np.int64), + np.array([5, 7, 2], dtype=np.float32), + None, + 'to_begin'), + # should fail because attempting to cast + # two special floating point values + # to integers (on both sides of ary), + # `to_begin` is in the error message as the impl checks this first: + (np.array([1., 3., 9.], dtype=np.int8), + np.nan, + np.nan, + 'to_begin'), + ]) + def test_ediff1d_forbidden_type_casts(self, ary, prepend, append, expected): + # verify resolution of gh-11490 + + # specifically, raise an appropriate + # Exception when attempting to append or + # prepend with an incompatible type + msg = f'dtype of `{expected}` must be compatible' + with assert_raises_regex(TypeError, msg): + ediff1d(ary=ary, + to_end=append, + to_begin=prepend) + + @pytest.mark.parametrize( + "ary,prepend,append,expected", + [ + (np.array([1, 2, 3], dtype=np.int16), + 2**16, # will be cast to int16 under same kind rule. + 2**16 + 4, + np.array([0, 1, 1, 4], dtype=np.int16)), + (np.array([1, 2, 3], dtype=np.float32), + np.array([5], dtype=np.float64), + None, + np.array([5, 1, 1], dtype=np.float32)), + (np.array([1, 2, 3], dtype=np.int32), + 0, + 0, + np.array([0, 1, 1, 0], dtype=np.int32)), + (np.array([1, 2, 3], dtype=np.int64), + 3, + -9, + np.array([3, 1, 1, -9], dtype=np.int64)), + ] + ) + def test_ediff1d_scalar_handling(self, + ary, + prepend, + append, + expected): + # maintain backwards-compatibility + # of scalar prepend / append behavior + # in ediff1d following fix for gh-11490 + actual = np.ediff1d(ary=ary, + to_end=append, + to_begin=prepend) + assert_equal(actual, expected) + assert actual.dtype == expected.dtype + + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin(self, kind): + def _isin_slow(a, b): + b = np.asarray(b).flatten().tolist() + return a in b + isin_slow = np.vectorize(_isin_slow, otypes=[bool], excluded={1}) + + def assert_isin_equal(a, b): + x = isin(a, b, kind=kind) + y = isin_slow(a, b) + assert_array_equal(x, y) + + # multidimensional arrays in both arguments + a = np.arange(24).reshape([2, 3, 4]) + b = np.array([[10, 20, 30], [0, 1, 3], [11, 22, 33]]) + assert_isin_equal(a, b) + + # array-likes as both arguments + c = [(9, 8), (7, 6)] + d = (9, 7) + assert_isin_equal(c, d) + + # zero-d array: + f = np.array(3) + assert_isin_equal(f, b) + assert_isin_equal(a, f) + assert_isin_equal(f, f) + + # scalar: + assert_isin_equal(5, b) + assert_isin_equal(a, 6) + assert_isin_equal(5, 6) + + # empty array-like: + if kind != "table": + # An empty list will become float64, + # which is invalid for kind="table" + x = [] + assert_isin_equal(x, b) + assert_isin_equal(a, x) + assert_isin_equal(x, x) + + # empty array with various types: + for dtype in [bool, np.int64, np.float64]: + if kind == "table" and dtype == np.float64: + continue + + if dtype in {np.int64, np.float64}: + ar = np.array([10, 20, 30], dtype=dtype) + elif dtype in {bool}: + ar = np.array([True, False, False]) + + empty_array = np.array([], dtype=dtype) + + assert_isin_equal(empty_array, ar) + assert_isin_equal(ar, empty_array) + assert_isin_equal(empty_array, empty_array) + + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin_additional(self, kind): + # we use two different sizes for the b array here to test the + # two different paths in isin(). + for mult in (1, 10): + # One check without np.array to make sure lists are handled correct + a = [5, 7, 1, 2] + b = [2, 4, 3, 1, 5] * mult + ec = np.array([True, False, True, True]) + c = isin(a, b, assume_unique=True, kind=kind) + assert_array_equal(c, ec) + + a[0] = 8 + ec = np.array([False, False, True, True]) + c = isin(a, b, assume_unique=True, kind=kind) + assert_array_equal(c, ec) + + a[0], a[3] = 4, 8 + ec = np.array([True, False, True, False]) + c = isin(a, b, assume_unique=True, kind=kind) + assert_array_equal(c, ec) + + a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5]) + b = [2, 3, 4] * mult + ec = [False, True, False, True, True, True, True, True, True, + False, True, False, False, False] + c = isin(a, b, kind=kind) + assert_array_equal(c, ec) + + b = b + [5, 5, 4] * mult + ec = [True, True, True, True, True, True, True, True, True, True, + True, False, True, True] + c = isin(a, b, kind=kind) + assert_array_equal(c, ec) + + a = np.array([5, 7, 1, 2]) + b = np.array([2, 4, 3, 1, 5] * mult) + ec = np.array([True, False, True, True]) + c = isin(a, b, kind=kind) + assert_array_equal(c, ec) + + a = np.array([5, 7, 1, 1, 2]) + b = np.array([2, 4, 3, 3, 1, 5] * mult) + ec = np.array([True, False, True, True, True]) + c = isin(a, b, kind=kind) + assert_array_equal(c, ec) + + a = np.array([5, 5]) + b = np.array([2, 2] * mult) + ec = np.array([False, False]) + c = isin(a, b, kind=kind) + assert_array_equal(c, ec) + + a = np.array([5]) + b = np.array([2]) + ec = np.array([False]) + c = isin(a, b, kind=kind) + assert_array_equal(c, ec) + + if kind in {None, "sort"}: + assert_array_equal(isin([], [], kind=kind), []) + + def test_isin_char_array(self): + a = np.array(['a', 'b', 'c', 'd', 'e', 'c', 'e', 'b']) + b = np.array(['a', 'c']) + + ec = np.array([True, False, True, False, False, True, False, False]) + c = isin(a, b) + + assert_array_equal(c, ec) + + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin_invert(self, kind): + "Test isin's invert parameter" + # We use two different sizes for the b array here to test the + # two different paths in isin(). + for mult in (1, 10): + a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5]) + b = [2, 3, 4] * mult + assert_array_equal(np.invert(isin(a, b, kind=kind)), + isin(a, b, invert=True, kind=kind)) + + # float: + if kind in {None, "sort"}: + for mult in (1, 10): + a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5], + dtype=np.float32) + b = [2, 3, 4] * mult + b = np.array(b, dtype=np.float32) + assert_array_equal(np.invert(isin(a, b, kind=kind)), + isin(a, b, invert=True, kind=kind)) + + def test_isin_hit_alternate_algorithm(self): + """Hit the standard isin code with integers""" + # Need extreme range to hit standard code + # This hits it without the use of kind='table' + a = np.array([5, 4, 5, 3, 4, 4, 1e9], dtype=np.int64) + b = np.array([2, 3, 4, 1e9], dtype=np.int64) + expected = np.array([0, 1, 0, 1, 1, 1, 1], dtype=bool) + assert_array_equal(expected, isin(a, b)) + assert_array_equal(np.invert(expected), isin(a, b, invert=True)) + + a = np.array([5, 7, 1, 2], dtype=np.int64) + b = np.array([2, 4, 3, 1, 5, 1e9], dtype=np.int64) + ec = np.array([True, False, True, True]) + c = isin(a, b, assume_unique=True) + assert_array_equal(c, ec) + + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin_boolean(self, kind): + """Test that isin works for boolean input""" + a = np.array([True, False]) + b = np.array([False, False, False]) + expected = np.array([False, True]) + assert_array_equal(expected, + isin(a, b, kind=kind)) + assert_array_equal(np.invert(expected), + isin(a, b, invert=True, kind=kind)) + + @pytest.mark.parametrize("kind", [None, "sort"]) + def test_isin_timedelta(self, kind): + """Test that isin works for timedelta input""" + rstate = np.random.RandomState(0) + a = rstate.randint(0, 100, size=10) + b = rstate.randint(0, 100, size=10) + truth = isin(a, b) + a_timedelta = a.astype("timedelta64[s]") + b_timedelta = b.astype("timedelta64[s]") + assert_array_equal(truth, isin(a_timedelta, b_timedelta, kind=kind)) + + def test_isin_table_timedelta_fails(self): + a = np.array([0, 1, 2], dtype="timedelta64[s]") + b = a + # Make sure it raises a value error: + with pytest.raises(ValueError): + isin(a, b, kind="table") + + @pytest.mark.parametrize( + "dtype1,dtype2", + [ + (np.int8, np.int16), + (np.int16, np.int8), + (np.uint8, np.uint16), + (np.uint16, np.uint8), + (np.uint8, np.int16), + (np.int16, np.uint8), + (np.uint64, np.int64), + ] + ) + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin_mixed_dtype(self, dtype1, dtype2, kind): + """Test that isin works as expected for mixed dtype input.""" + is_dtype2_signed = np.issubdtype(dtype2, np.signedinteger) + ar1 = np.array([0, 0, 1, 1], dtype=dtype1) + + if is_dtype2_signed: + ar2 = np.array([-128, 0, 127], dtype=dtype2) + else: + ar2 = np.array([127, 0, 255], dtype=dtype2) + + expected = np.array([True, True, False, False]) + + expect_failure = kind == "table" and ( + dtype1 == np.int16 and dtype2 == np.int8) + + if expect_failure: + with pytest.raises(RuntimeError, match="exceed the maximum"): + isin(ar1, ar2, kind=kind) + else: + assert_array_equal(isin(ar1, ar2, kind=kind), expected) + + @pytest.mark.parametrize("data", [ + np.array([2**63, 2**63 + 1], dtype=np.uint64), + np.array([-2**62, -2**62 - 1], dtype=np.int64), + ]) + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin_mixed_huge_vals(self, kind, data): + """Test values outside intp range (negative ones if 32bit system)""" + query = data[1] + res = np.isin(data, query, kind=kind) + assert_array_equal(res, [False, True]) + # Also check that nothing weird happens for values can't possibly + # in range. + data = data.astype(np.int32) # clearly different values + res = np.isin(data, query, kind=kind) + assert_array_equal(res, [False, False]) + + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin_mixed_boolean(self, kind): + """Test that isin works as expected for bool/int input.""" + for dtype in np.typecodes["AllInteger"]: + a = np.array([True, False, False], dtype=bool) + b = np.array([0, 0, 0, 0], dtype=dtype) + expected = np.array([False, True, True], dtype=bool) + assert_array_equal(isin(a, b, kind=kind), expected) + + a, b = b, a + expected = np.array([True, True, True, True], dtype=bool) + assert_array_equal(isin(a, b, kind=kind), expected) + + def test_isin_first_array_is_object(self): + ar1 = [None] + ar2 = np.array([1] * 10) + expected = np.array([False]) + result = np.isin(ar1, ar2) + assert_array_equal(result, expected) + + def test_isin_second_array_is_object(self): + ar1 = 1 + ar2 = np.array([None] * 10) + expected = np.array([False]) + result = np.isin(ar1, ar2) + assert_array_equal(result, expected) + + def test_isin_both_arrays_are_object(self): + ar1 = [None] + ar2 = np.array([None] * 10) + expected = np.array([True]) + result = np.isin(ar1, ar2) + assert_array_equal(result, expected) + + def test_isin_both_arrays_have_structured_dtype(self): + # Test arrays of a structured data type containing an integer field + # and a field of dtype `object` allowing for arbitrary Python objects + dt = np.dtype([('field1', int), ('field2', object)]) + ar1 = np.array([(1, None)], dtype=dt) + ar2 = np.array([(1, None)] * 10, dtype=dt) + expected = np.array([True]) + result = np.isin(ar1, ar2) + assert_array_equal(result, expected) + + def test_isin_with_arrays_containing_tuples(self): + ar1 = np.array([(1,), 2], dtype=object) + ar2 = np.array([(1,), 2], dtype=object) + expected = np.array([True, True]) + result = np.isin(ar1, ar2) + assert_array_equal(result, expected) + result = np.isin(ar1, ar2, invert=True) + assert_array_equal(result, np.invert(expected)) + + # An integer is added at the end of the array to make sure + # that the array builder will create the array with tuples + # and after it's created the integer is removed. + # There's a bug in the array constructor that doesn't handle + # tuples properly and adding the integer fixes that. + ar1 = np.array([(1,), (2, 1), 1], dtype=object) + ar1 = ar1[:-1] + ar2 = np.array([(1,), (2, 1), 1], dtype=object) + ar2 = ar2[:-1] + expected = np.array([True, True]) + result = np.isin(ar1, ar2) + assert_array_equal(result, expected) + result = np.isin(ar1, ar2, invert=True) + assert_array_equal(result, np.invert(expected)) + + ar1 = np.array([(1,), (2, 3), 1], dtype=object) + ar1 = ar1[:-1] + ar2 = np.array([(1,), 2], dtype=object) + expected = np.array([True, False]) + result = np.isin(ar1, ar2) + assert_array_equal(result, expected) + result = np.isin(ar1, ar2, invert=True) + assert_array_equal(result, np.invert(expected)) + + def test_isin_errors(self): + """Test that isin raises expected errors.""" + + # Error 1: `kind` is not one of 'sort' 'table' or None. + ar1 = np.array([1, 2, 3, 4, 5]) + ar2 = np.array([2, 4, 6, 8, 10]) + assert_raises(ValueError, isin, ar1, ar2, kind='quicksort') + + # Error 2: `kind="table"` does not work for non-integral arrays. + obj_ar1 = np.array([1, 'a', 3, 'b', 5], dtype=object) + obj_ar2 = np.array([1, 'a', 3, 'b', 5], dtype=object) + assert_raises(ValueError, isin, obj_ar1, obj_ar2, kind='table') + + for dtype in [np.int32, np.int64]: + ar1 = np.array([-1, 2, 3, 4, 5], dtype=dtype) + # The range of this array will overflow: + overflow_ar2 = np.array([-1, np.iinfo(dtype).max], dtype=dtype) + + # Error 3: `kind="table"` will trigger a runtime error + # if there is an integer overflow expected when computing the + # range of ar2 + assert_raises( + RuntimeError, + isin, ar1, overflow_ar2, kind='table' + ) + + # Non-error: `kind=None` will *not* trigger a runtime error + # if there is an integer overflow, it will switch to + # the `sort` algorithm. + result = np.isin(ar1, overflow_ar2, kind=None) + assert_array_equal(result, [True] + [False] * 4) + result = np.isin(ar1, overflow_ar2, kind='sort') + assert_array_equal(result, [True] + [False] * 4) + + def test_union1d(self): + a = np.array([5, 4, 7, 1, 2]) + b = np.array([2, 4, 3, 3, 2, 1, 5]) + + ec = np.array([1, 2, 3, 4, 5, 7]) + c = union1d(a, b) + assert_array_equal(c, ec) + + # Tests gh-10340, arguments to union1d should be + # flattened if they are not already 1D + x = np.array([[0, 1, 2], [3, 4, 5]]) + y = np.array([0, 1, 2, 3, 4]) + ez = np.array([0, 1, 2, 3, 4, 5]) + z = union1d(x, y) + assert_array_equal(z, ez) + + assert_array_equal([], union1d([], [])) + + def test_setdiff1d(self): + a = np.array([6, 5, 4, 7, 1, 2, 7, 4]) + b = np.array([2, 4, 3, 3, 2, 1, 5]) + + ec = np.array([6, 7]) + c = setdiff1d(a, b) + assert_array_equal(c, ec) + + a = np.arange(21) + b = np.arange(19) + ec = np.array([19, 20]) + c = setdiff1d(a, b) + assert_array_equal(c, ec) + + assert_array_equal([], setdiff1d([], [])) + a = np.array((), np.uint32) + assert_equal(setdiff1d(a, []).dtype, np.uint32) + + def test_setdiff1d_unique(self): + a = np.array([3, 2, 1]) + b = np.array([7, 5, 2]) + expected = np.array([3, 1]) + actual = setdiff1d(a, b, assume_unique=True) + assert_equal(actual, expected) + + def test_setdiff1d_char_array(self): + a = np.array(['a', 'b', 'c']) + b = np.array(['a', 'b', 's']) + assert_array_equal(setdiff1d(a, b), np.array(['c'])) + + def test_manyways(self): + a = np.array([5, 7, 1, 2, 8]) + b = np.array([9, 8, 2, 4, 3, 1, 5]) + + c1 = setxor1d(a, b) + aux1 = intersect1d(a, b) + aux2 = union1d(a, b) + c2 = setdiff1d(aux2, aux1) + assert_array_equal(c1, c2) + + +class TestUnique: + + def check_all(self, a, b, i1, i2, c, dt): + base_msg = 'check {0} failed for type {1}' + + msg = base_msg.format('values', dt) + v = unique(a) + assert_array_equal(v, b, msg) + assert type(v) == type(b) + + msg = base_msg.format('return_index', dt) + v, j = unique(a, True, False, False) + assert_array_equal(v, b, msg) + assert_array_equal(j, i1, msg) + assert type(v) == type(b) + + msg = base_msg.format('return_inverse', dt) + v, j = unique(a, False, True, False) + assert_array_equal(v, b, msg) + assert_array_equal(j, i2, msg) + assert type(v) == type(b) + + msg = base_msg.format('return_counts', dt) + v, j = unique(a, False, False, True) + assert_array_equal(v, b, msg) + assert_array_equal(j, c, msg) + assert type(v) == type(b) + + msg = base_msg.format('return_index and return_inverse', dt) + v, j1, j2 = unique(a, True, True, False) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i1, msg) + assert_array_equal(j2, i2, msg) + assert type(v) == type(b) + + msg = base_msg.format('return_index and return_counts', dt) + v, j1, j2 = unique(a, True, False, True) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i1, msg) + assert_array_equal(j2, c, msg) + assert type(v) == type(b) + + msg = base_msg.format('return_inverse and return_counts', dt) + v, j1, j2 = unique(a, False, True, True) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i2, msg) + assert_array_equal(j2, c, msg) + assert type(v) == type(b) + + msg = base_msg.format(('return_index, return_inverse ' + 'and return_counts'), dt) + v, j1, j2, j3 = unique(a, True, True, True) + assert_array_equal(v, b, msg) + assert_array_equal(j1, i1, msg) + assert_array_equal(j2, i2, msg) + assert_array_equal(j3, c, msg) + assert type(v) == type(b) + + def get_types(self): + types = [] + types.extend(np.typecodes['AllInteger']) + types.extend(np.typecodes['AllFloat']) + types.append('datetime64[D]') + types.append('timedelta64[D]') + return types + + def test_unique_1d(self): + + a = [5, 7, 1, 2, 1, 5, 7] * 10 + b = [1, 2, 5, 7] + i1 = [2, 3, 0, 1] + i2 = [2, 3, 0, 1, 0, 2, 3] * 10 + c = np.multiply([2, 1, 2, 2], 10) + + # test for numeric arrays + types = self.get_types() + for dt in types: + aa = np.array(a, dt) + bb = np.array(b, dt) + self.check_all(aa, bb, i1, i2, c, dt) + + # test for object arrays + dt = 'O' + aa = np.empty(len(a), dt) + aa[:] = a + bb = np.empty(len(b), dt) + bb[:] = b + self.check_all(aa, bb, i1, i2, c, dt) + + # test for structured arrays + dt = [('', 'i'), ('', 'i')] + aa = np.array(list(zip(a, a)), dt) + bb = np.array(list(zip(b, b)), dt) + self.check_all(aa, bb, i1, i2, c, dt) + + # test for ticket #2799 + aa = [1. + 0.j, 1 - 1.j, 1] + assert_array_equal(np.unique(aa), [1. - 1.j, 1. + 0.j]) + + # test for ticket #4785 + a = [(1, 2), (1, 2), (2, 3)] + unq = [1, 2, 3] + inv = [[0, 1], [0, 1], [1, 2]] + a1 = unique(a) + assert_array_equal(a1, unq) + a2, a2_inv = unique(a, return_inverse=True) + assert_array_equal(a2, unq) + assert_array_equal(a2_inv, inv) + + # test for chararrays with return_inverse (gh-5099) + a = np.char.chararray(5) + a[...] = '' + a2, a2_inv = np.unique(a, return_inverse=True) + assert_array_equal(a2_inv, np.zeros(5)) + + # test for ticket #9137 + a = [] + a1_idx = np.unique(a, return_index=True)[1] + a2_inv = np.unique(a, return_inverse=True)[1] + a3_idx, a3_inv = np.unique(a, return_index=True, + return_inverse=True)[1:] + assert_equal(a1_idx.dtype, np.intp) + assert_equal(a2_inv.dtype, np.intp) + assert_equal(a3_idx.dtype, np.intp) + assert_equal(a3_inv.dtype, np.intp) + + # test for ticket 2111 - float + a = [2.0, np.nan, 1.0, np.nan] + ua = [1.0, 2.0, np.nan] + ua_idx = [2, 0, 1] + ua_inv = [1, 2, 0, 2] + ua_cnt = [1, 1, 2] + assert_equal(np.unique(a), ua) + assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) + + # test for ticket 2111 - complex + a = [2.0 - 1j, np.nan, 1.0 + 1j, complex(0.0, np.nan), complex(1.0, np.nan)] + ua = [1.0 + 1j, 2.0 - 1j, complex(0.0, np.nan)] + ua_idx = [2, 0, 3] + ua_inv = [1, 2, 0, 2, 2] + ua_cnt = [1, 1, 3] + assert_equal(np.unique(a), ua) + assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) + + # test for ticket 2111 - datetime64 + nat = np.datetime64('nat') + a = [np.datetime64('2020-12-26'), nat, np.datetime64('2020-12-24'), nat] + ua = [np.datetime64('2020-12-24'), np.datetime64('2020-12-26'), nat] + ua_idx = [2, 0, 1] + ua_inv = [1, 2, 0, 2] + ua_cnt = [1, 1, 2] + assert_equal(np.unique(a), ua) + assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) + + # test for ticket 2111 - timedelta + nat = np.timedelta64('nat') + a = [np.timedelta64(1, 'D'), nat, np.timedelta64(1, 'h'), nat] + ua = [np.timedelta64(1, 'h'), np.timedelta64(1, 'D'), nat] + ua_idx = [2, 0, 1] + ua_inv = [1, 2, 0, 2] + ua_cnt = [1, 1, 2] + assert_equal(np.unique(a), ua) + assert_equal(np.unique(a, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt)) + + # test for gh-19300 + all_nans = [np.nan] * 4 + ua = [np.nan] + ua_idx = [0] + ua_inv = [0, 0, 0, 0] + ua_cnt = [4] + assert_equal(np.unique(all_nans), ua) + assert_equal(np.unique(all_nans, return_index=True), (ua, ua_idx)) + assert_equal(np.unique(all_nans, return_inverse=True), (ua, ua_inv)) + assert_equal(np.unique(all_nans, return_counts=True), (ua, ua_cnt)) + + def test_unique_zero_sized(self): + # test for zero-sized arrays + for dt in self.get_types(): + a = np.array([], dt) + b = np.array([], dt) + i1 = np.array([], np.int64) + i2 = np.array([], np.int64) + c = np.array([], np.int64) + self.check_all(a, b, i1, i2, c, dt) + + def test_unique_subclass(self): + class Subclass(np.ndarray): + pass + + i1 = [2, 3, 0, 1] + i2 = [2, 3, 0, 1, 0, 2, 3] * 10 + c = np.multiply([2, 1, 2, 2], 10) + + # test for numeric arrays + types = self.get_types() + for dt in types: + a = np.array([5, 7, 1, 2, 1, 5, 7] * 10, dtype=dt) + b = np.array([1, 2, 5, 7], dtype=dt) + aa = Subclass(a.shape, dtype=dt, buffer=a) + bb = Subclass(b.shape, dtype=dt, buffer=b) + self.check_all(aa, bb, i1, i2, c, dt) + + @pytest.mark.parametrize("arg", ["return_index", "return_inverse", "return_counts"]) + def test_unsupported_hash_based(self, arg): + """These currently never use the hash-based solution. However, + it seems easier to just allow it. + + When the hash-based solution is added, this test should fail and be + replaced with something more comprehensive. + """ + a = np.array([1, 5, 2, 3, 4, 8, 199, 1, 3, 5]) + + res_not_sorted = np.unique([1, 1], sorted=False, **{arg: True}) + res_sorted = np.unique([1, 1], sorted=True, **{arg: True}) + # The following should fail without first sorting `res_not_sorted`. + for arr, expected in zip(res_not_sorted, res_sorted): + assert_array_equal(arr, expected) + + def test_unique_axis_errors(self): + assert_raises(TypeError, self._run_axis_tests, object) + assert_raises(TypeError, self._run_axis_tests, + [('a', int), ('b', object)]) + + assert_raises(AxisError, unique, np.arange(10), axis=2) + assert_raises(AxisError, unique, np.arange(10), axis=-2) + + def test_unique_axis_list(self): + msg = "Unique failed on list of lists" + inp = [[0, 1, 0], [0, 1, 0]] + inp_arr = np.asarray(inp) + assert_array_equal(unique(inp, axis=0), unique(inp_arr, axis=0), msg) + assert_array_equal(unique(inp, axis=1), unique(inp_arr, axis=1), msg) + + def test_unique_axis(self): + types = [] + types.extend(np.typecodes['AllInteger']) + types.extend(np.typecodes['AllFloat']) + types.append('datetime64[D]') + types.append('timedelta64[D]') + types.append([('a', int), ('b', int)]) + types.append([('a', int), ('b', float)]) + + for dtype in types: + self._run_axis_tests(dtype) + + msg = 'Non-bitwise-equal booleans test failed' + data = np.arange(10, dtype=np.uint8).reshape(-1, 2).view(bool) + result = np.array([[False, True], [True, True]], dtype=bool) + assert_array_equal(unique(data, axis=0), result, msg) + + msg = 'Negative zero equality test failed' + data = np.array([[-0.0, 0.0], [0.0, -0.0], [-0.0, 0.0], [0.0, -0.0]]) + result = np.array([[-0.0, 0.0]]) + assert_array_equal(unique(data, axis=0), result, msg) + + @pytest.mark.parametrize("axis", [0, -1]) + def test_unique_1d_with_axis(self, axis): + x = np.array([4, 3, 2, 3, 2, 1, 2, 2]) + uniq = unique(x, axis=axis) + assert_array_equal(uniq, [1, 2, 3, 4]) + + @pytest.mark.parametrize("axis", [None, 0, -1]) + def test_unique_inverse_with_axis(self, axis): + x = np.array([[4, 4, 3], [2, 2, 1], [2, 2, 1], [4, 4, 3]]) + uniq, inv = unique(x, return_inverse=True, axis=axis) + assert_equal(inv.ndim, x.ndim if axis is None else 1) + assert_array_equal(x, np.take(uniq, inv, axis=axis)) + + def test_unique_axis_zeros(self): + # issue 15559 + single_zero = np.empty(shape=(2, 0), dtype=np.int8) + uniq, idx, inv, cnt = unique(single_zero, axis=0, return_index=True, + return_inverse=True, return_counts=True) + + # there's 1 element of shape (0,) along axis 0 + assert_equal(uniq.dtype, single_zero.dtype) + assert_array_equal(uniq, np.empty(shape=(1, 0))) + assert_array_equal(idx, np.array([0])) + assert_array_equal(inv, np.array([0, 0])) + assert_array_equal(cnt, np.array([2])) + + # there's 0 elements of shape (2,) along axis 1 + uniq, idx, inv, cnt = unique(single_zero, axis=1, return_index=True, + return_inverse=True, return_counts=True) + + assert_equal(uniq.dtype, single_zero.dtype) + assert_array_equal(uniq, np.empty(shape=(2, 0))) + assert_array_equal(idx, np.array([])) + assert_array_equal(inv, np.array([])) + assert_array_equal(cnt, np.array([])) + + # test a "complicated" shape + shape = (0, 2, 0, 3, 0, 4, 0) + multiple_zeros = np.empty(shape=shape) + for axis in range(len(shape)): + expected_shape = list(shape) + if shape[axis] == 0: + expected_shape[axis] = 0 + else: + expected_shape[axis] = 1 + + assert_array_equal(unique(multiple_zeros, axis=axis), + np.empty(shape=expected_shape)) + + def test_unique_masked(self): + # issue 8664 + x = np.array([64, 0, 1, 2, 3, 63, 63, 0, 0, 0, 1, 2, 0, 63, 0], + dtype='uint8') + y = np.ma.masked_equal(x, 0) + + v = np.unique(y) + v2, i, c = np.unique(y, return_index=True, return_counts=True) + + msg = 'Unique returned different results when asked for index' + assert_array_equal(v.data, v2.data, msg) + assert_array_equal(v.mask, v2.mask, msg) + + def test_unique_sort_order_with_axis(self): + # These tests fail if sorting along axis is done by treating subarrays + # as unsigned byte strings. See gh-10495. + fmt = "sort order incorrect for integer type '%s'" + for dt in 'bhilq': + a = np.array([[-1], [0]], dt) + b = np.unique(a, axis=0) + assert_array_equal(a, b, fmt % dt) + + def _run_axis_tests(self, dtype): + data = np.array([[0, 1, 0, 0], + [1, 0, 0, 0], + [0, 1, 0, 0], + [1, 0, 0, 0]]).astype(dtype) + + msg = 'Unique with 1d array and axis=0 failed' + result = np.array([0, 1]) + assert_array_equal(unique(data), result.astype(dtype), msg) + + msg = 'Unique with 2d array and axis=0 failed' + result = np.array([[0, 1, 0, 0], [1, 0, 0, 0]]) + assert_array_equal(unique(data, axis=0), result.astype(dtype), msg) + + msg = 'Unique with 2d array and axis=1 failed' + result = np.array([[0, 0, 1], [0, 1, 0], [0, 0, 1], [0, 1, 0]]) + assert_array_equal(unique(data, axis=1), result.astype(dtype), msg) + + msg = 'Unique with 3d array and axis=2 failed' + data3d = np.array([[[1, 1], + [1, 0]], + [[0, 1], + [0, 0]]]).astype(dtype) + result = np.take(data3d, [1, 0], axis=2) + assert_array_equal(unique(data3d, axis=2), result, msg) + + uniq, idx, inv, cnt = unique(data, axis=0, return_index=True, + return_inverse=True, return_counts=True) + msg = "Unique's return_index=True failed with axis=0" + assert_array_equal(data[idx], uniq, msg) + msg = "Unique's return_inverse=True failed with axis=0" + assert_array_equal(np.take(uniq, inv, axis=0), data) + msg = "Unique's return_counts=True failed with axis=0" + assert_array_equal(cnt, np.array([2, 2]), msg) + + uniq, idx, inv, cnt = unique(data, axis=1, return_index=True, + return_inverse=True, return_counts=True) + msg = "Unique's return_index=True failed with axis=1" + assert_array_equal(data[:, idx], uniq) + msg = "Unique's return_inverse=True failed with axis=1" + assert_array_equal(np.take(uniq, inv, axis=1), data) + msg = "Unique's return_counts=True failed with axis=1" + assert_array_equal(cnt, np.array([2, 1, 1]), msg) + + def test_unique_nanequals(self): + # issue 20326 + a = np.array([1, 1, np.nan, np.nan, np.nan]) + unq = np.unique(a) + not_unq = np.unique(a, equal_nan=False) + assert_array_equal(unq, np.array([1, np.nan])) + assert_array_equal(not_unq, np.array([1, np.nan, np.nan, np.nan])) + + def test_unique_array_api_functions(self): + arr = np.array([np.nan, 1, 4, 1, 3, 4, np.nan, 5, 1]) + + for res_unique_array_api, res_unique in [ + ( + np.unique_values(arr), + np.unique(arr, equal_nan=False) + ), + ( + np.unique_counts(arr), + np.unique(arr, return_counts=True, equal_nan=False) + ), + ( + np.unique_inverse(arr), + np.unique(arr, return_inverse=True, equal_nan=False) + ), + ( + np.unique_all(arr), + np.unique( + arr, + return_index=True, + return_inverse=True, + return_counts=True, + equal_nan=False + ) + ) + ]: + assert len(res_unique_array_api) == len(res_unique) + for actual, expected in zip(res_unique_array_api, res_unique): + assert_array_equal(actual, expected) + + def test_unique_inverse_shape(self): + # Regression test for https://github.com/numpy/numpy/issues/25552 + arr = np.array([[1, 2, 3], [2, 3, 1]]) + expected_values, expected_inverse = np.unique(arr, return_inverse=True) + expected_inverse = expected_inverse.reshape(arr.shape) + for func in np.unique_inverse, np.unique_all: + result = func(arr) + assert_array_equal(expected_values, result.values) + assert_array_equal(expected_inverse, result.inverse_indices) + assert_array_equal(arr, result.values[result.inverse_indices]) + + @pytest.mark.parametrize( + 'data', + [[[1, 1, 1], + [1, 1, 1]], + [1, 3, 2], + 1], + ) + @pytest.mark.parametrize('transpose', [False, True]) + @pytest.mark.parametrize('dtype', [np.int32, np.float64]) + def test_unique_with_matrix(self, data, transpose, dtype): + mat = np.matrix(data).astype(dtype) + if transpose: + mat = mat.T + u = np.unique(mat) + expected = np.unique(np.asarray(mat)) + assert_array_equal(u, expected, strict=True) diff --git a/python/numpy/lib/tests/test_arrayterator.py b/python/numpy/lib/tests/test_arrayterator.py new file mode 100644 index 000000000..800c9a2a5 --- /dev/null +++ b/python/numpy/lib/tests/test_arrayterator.py @@ -0,0 +1,46 @@ +from functools import reduce +from operator import mul + +import numpy as np +from numpy.lib import Arrayterator +from numpy.random import randint +from numpy.testing import assert_ + + +def test(): + np.random.seed(np.arange(10)) + + # Create a random array + ndims = randint(5) + 1 + shape = tuple(randint(10) + 1 for dim in range(ndims)) + els = reduce(mul, shape) + a = np.arange(els) + a.shape = shape + + buf_size = randint(2 * els) + b = Arrayterator(a, buf_size) + + # Check that each block has at most ``buf_size`` elements + for block in b: + assert_(len(block.flat) <= (buf_size or els)) + + # Check that all elements are iterated correctly + assert_(list(b.flat) == list(a.flat)) + + # Slice arrayterator + start = [randint(dim) for dim in shape] + stop = [randint(dim) + 1 for dim in shape] + step = [randint(dim) + 1 for dim in shape] + slice_ = tuple(slice(*t) for t in zip(start, stop, step)) + c = b[slice_] + d = a[slice_] + + # Check that each block has at most ``buf_size`` elements + for block in c: + assert_(len(block.flat) <= (buf_size or els)) + + # Check that the arrayterator is sliced correctly + assert_(np.all(c.__array__() == d)) + + # Check that all elements are iterated correctly + assert_(list(c.flat) == list(d.flat)) diff --git a/python/numpy/lib/tests/test_format.py b/python/numpy/lib/tests/test_format.py new file mode 100644 index 000000000..d805d3493 --- /dev/null +++ b/python/numpy/lib/tests/test_format.py @@ -0,0 +1,1054 @@ +# doctest +r''' Test the .npy file format. + +Set up: + + >>> import sys + >>> from io import BytesIO + >>> from numpy.lib import format + >>> + >>> scalars = [ + ... np.uint8, + ... np.int8, + ... np.uint16, + ... np.int16, + ... np.uint32, + ... np.int32, + ... np.uint64, + ... np.int64, + ... np.float32, + ... np.float64, + ... np.complex64, + ... np.complex128, + ... object, + ... ] + >>> + >>> basic_arrays = [] + >>> + >>> for scalar in scalars: + ... for endian in '<>': + ... dtype = np.dtype(scalar).newbyteorder(endian) + ... basic = np.arange(15).astype(dtype) + ... basic_arrays.extend([ + ... np.array([], dtype=dtype), + ... np.array(10, dtype=dtype), + ... basic, + ... basic.reshape((3,5)), + ... basic.reshape((3,5)).T, + ... basic.reshape((3,5))[::-1,::2], + ... ]) + ... + >>> + >>> Pdescr = [ + ... ('x', 'i4', (2,)), + ... ('y', 'f8', (2, 2)), + ... ('z', 'u1')] + >>> + >>> + >>> PbufferT = [ + ... ([3,2], [[6.,4.],[6.,4.]], 8), + ... ([4,3], [[7.,5.],[7.,5.]], 9), + ... ] + >>> + >>> + >>> Ndescr = [ + ... ('x', 'i4', (2,)), + ... ('Info', [ + ... ('value', 'c16'), + ... ('y2', 'f8'), + ... ('Info2', [ + ... ('name', 'S2'), + ... ('value', 'c16', (2,)), + ... ('y3', 'f8', (2,)), + ... ('z3', 'u4', (2,))]), + ... ('name', 'S2'), + ... ('z2', 'b1')]), + ... ('color', 'S2'), + ... ('info', [ + ... ('Name', 'U8'), + ... ('Value', 'c16')]), + ... ('y', 'f8', (2, 2)), + ... ('z', 'u1')] + >>> + >>> + >>> NbufferT = [ + ... ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8), + ... ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9), + ... ] + >>> + >>> + >>> record_arrays = [ + ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')), + ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')), + ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')), + ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')), + ... ] + +Test the magic string writing. + + >>> format.magic(1, 0) + '\x93NUMPY\x01\x00' + >>> format.magic(0, 0) + '\x93NUMPY\x00\x00' + >>> format.magic(255, 255) + '\x93NUMPY\xff\xff' + >>> format.magic(2, 5) + '\x93NUMPY\x02\x05' + +Test the magic string reading. + + >>> format.read_magic(BytesIO(format.magic(1, 0))) + (1, 0) + >>> format.read_magic(BytesIO(format.magic(0, 0))) + (0, 0) + >>> format.read_magic(BytesIO(format.magic(255, 255))) + (255, 255) + >>> format.read_magic(BytesIO(format.magic(2, 5))) + (2, 5) + +Test the header writing. + + >>> for arr in basic_arrays + record_arrays: + ... f = BytesIO() + ... format.write_array_header_1_0(f, arr) # XXX: arr is not a dict, items gets called on it + ... print(repr(f.getvalue())) + ... + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'u2', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>u2', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'i2', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>i2', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'u4', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>u4', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'i4', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>i4', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'u8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>u8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'i8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>i8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'f4', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>f4', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'f8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>f8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'c8', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>c8', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'c16', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': '>c16', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n" + "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n" + "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n" + "v\x00{'descr': [('x', 'i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" + "\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n" +''' +import os +import sys +import warnings +from io import BytesIO + +import pytest + +import numpy as np +from numpy.lib import format +from numpy.testing import ( + IS_64BIT, + IS_PYPY, + IS_WASM, + assert_, + assert_array_equal, + assert_raises, + assert_raises_regex, + assert_warns, +) +from numpy.testing._private.utils import requires_memory + +# Generate some basic arrays to test with. +scalars = [ + np.uint8, + np.int8, + np.uint16, + np.int16, + np.uint32, + np.int32, + np.uint64, + np.int64, + np.float32, + np.float64, + np.complex64, + np.complex128, + object, +] +basic_arrays = [] +for scalar in scalars: + for endian in '<>': + dtype = np.dtype(scalar).newbyteorder(endian) + basic = np.arange(1500).astype(dtype) + basic_arrays.extend([ + # Empty + np.array([], dtype=dtype), + # Rank-0 + np.array(10, dtype=dtype), + # 1-D + basic, + # 2-D C-contiguous + basic.reshape((30, 50)), + # 2-D F-contiguous + basic.reshape((30, 50)).T, + # 2-D non-contiguous + basic.reshape((30, 50))[::-1, ::2], + ]) + +# More complicated record arrays. +# This is the structure of the table used for plain objects: +# +# +-+-+-+ +# |x|y|z| +# +-+-+-+ + +# Structure of a plain array description: +Pdescr = [ + ('x', 'i4', (2,)), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +# A plain list of tuples with values for testing: +PbufferT = [ + # x y z + ([3, 2], [[6., 4.], [6., 4.]], 8), + ([4, 3], [[7., 5.], [7., 5.]], 9), + ] + + +# This is the structure of the table used for nested objects (DON'T PANIC!): +# +# +-+---------------------------------+-----+----------+-+-+ +# |x|Info |color|info |y|z| +# | +-----+--+----------------+----+--+ +----+-----+ | | +# | |value|y2|Info2 |name|z2| |Name|Value| | | +# | | | +----+-----+--+--+ | | | | | | | +# | | | |name|value|y3|z3| | | | | | | | +# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+ +# + +# The corresponding nested array description: +Ndescr = [ + ('x', 'i4', (2,)), + ('Info', [ + ('value', 'c16'), + ('y2', 'f8'), + ('Info2', [ + ('name', 'S2'), + ('value', 'c16', (2,)), + ('y3', 'f8', (2,)), + ('z3', 'u4', (2,))]), + ('name', 'S2'), + ('z2', 'b1')]), + ('color', 'S2'), + ('info', [ + ('Name', 'U8'), + ('Value', 'c16')]), + ('y', 'f8', (2, 2)), + ('z', 'u1')] + +NbufferT = [ + # x Info color info y z + # value y2 Info2 name z2 Name Value + # name value y3 z3 + ([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True), + 'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8), + ([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False), + 'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9), + ] + +record_arrays = [ + np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')), + np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')), + np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')), + np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')), + np.zeros(1, dtype=[('c', ('= (3, 12), reason="see gh-23988") +@pytest.mark.xfail(IS_WASM, reason="Emscripten NODEFS has a buggy dup") +def test_python2_python3_interoperability(): + fname = 'win64python2.npy' + path = os.path.join(os.path.dirname(__file__), 'data', fname) + with pytest.warns(UserWarning, match="Reading.*this warning\\."): + data = np.load(path) + assert_array_equal(data, np.ones(2)) + + +def test_pickle_python2_python3(): + # Test that loading object arrays saved on Python 2 works both on + # Python 2 and Python 3 and vice versa + data_dir = os.path.join(os.path.dirname(__file__), 'data') + + expected = np.array([None, range, '\u512a\u826f', + b'\xe4\xb8\x8d\xe8\x89\xaf'], + dtype=object) + + for fname in ['py2-np0-objarr.npy', 'py2-objarr.npy', 'py2-objarr.npz', + 'py3-objarr.npy', 'py3-objarr.npz']: + path = os.path.join(data_dir, fname) + + for encoding in ['bytes', 'latin1']: + data_f = np.load(path, allow_pickle=True, encoding=encoding) + if fname.endswith('.npz'): + data = data_f['x'] + data_f.close() + else: + data = data_f + + if encoding == 'latin1' and fname.startswith('py2'): + assert_(isinstance(data[3], str)) + assert_array_equal(data[:-1], expected[:-1]) + # mojibake occurs + assert_array_equal(data[-1].encode(encoding), expected[-1]) + else: + assert_(isinstance(data[3], bytes)) + assert_array_equal(data, expected) + + if fname.startswith('py2'): + if fname.endswith('.npz'): + data = np.load(path, allow_pickle=True) + assert_raises(UnicodeError, data.__getitem__, 'x') + data.close() + data = np.load(path, allow_pickle=True, fix_imports=False, + encoding='latin1') + assert_raises(ImportError, data.__getitem__, 'x') + data.close() + else: + assert_raises(UnicodeError, np.load, path, + allow_pickle=True) + assert_raises(ImportError, np.load, path, + allow_pickle=True, fix_imports=False, + encoding='latin1') + + +def test_pickle_disallow(tmpdir): + data_dir = os.path.join(os.path.dirname(__file__), 'data') + + path = os.path.join(data_dir, 'py2-objarr.npy') + assert_raises(ValueError, np.load, path, + allow_pickle=False, encoding='latin1') + + path = os.path.join(data_dir, 'py2-objarr.npz') + with np.load(path, allow_pickle=False, encoding='latin1') as f: + assert_raises(ValueError, f.__getitem__, 'x') + + path = os.path.join(tmpdir, 'pickle-disabled.npy') + assert_raises(ValueError, np.save, path, np.array([None], dtype=object), + allow_pickle=False) + +@pytest.mark.parametrize('dt', [ + np.dtype(np.dtype([('a', np.int8), + ('b', np.int16), + ('c', np.int32), + ], align=True), + (3,)), + np.dtype([('x', np.dtype({'names': ['a', 'b'], + 'formats': ['i1', 'i1'], + 'offsets': [0, 4], + 'itemsize': 8, + }, + (3,)), + (4,), + )]), + np.dtype([('x', + (' 1, a) + assert_array_equal(b, [3, 2, 2, 3, 3]) + + def test_place(self): + # Make sure that non-np.ndarray objects + # raise an error instead of doing nothing + assert_raises(TypeError, place, [1, 2, 3], [True, False], [0, 1]) + + a = np.array([1, 4, 3, 2, 5, 8, 7]) + place(a, [0, 1, 0, 1, 0, 1, 0], [2, 4, 6]) + assert_array_equal(a, [1, 2, 3, 4, 5, 6, 7]) + + place(a, np.zeros(7), []) + assert_array_equal(a, np.arange(1, 8)) + + place(a, [1, 0, 1, 0, 1, 0, 1], [8, 9]) + assert_array_equal(a, [8, 2, 9, 4, 8, 6, 9]) + assert_raises_regex(ValueError, "Cannot insert from an empty array", + lambda: place(a, [0, 0, 0, 0, 0, 1, 0], [])) + + # See Issue #6974 + a = np.array(['12', '34']) + place(a, [0, 1], '9') + assert_array_equal(a, ['12', '9']) + + def test_both(self): + a = rand(10) + mask = a > 0.5 + ac = a.copy() + c = extract(mask, a) + place(a, mask, 0) + place(a, mask, c) + assert_array_equal(a, ac) + + +# _foo1 and _foo2 are used in some tests in TestVectorize. + +def _foo1(x, y=1.0): + return y * math.floor(x) + + +def _foo2(x, y=1.0, z=0.0): + return y * math.floor(x) + z + + +class TestVectorize: + + def test_simple(self): + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + + f = vectorize(addsubtract) + r = f([0, 3, 6, 9], [1, 3, 5, 7]) + assert_array_equal(r, [1, 6, 1, 2]) + + def test_scalar(self): + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + + f = vectorize(addsubtract) + r = f([0, 3, 6, 9], 5) + assert_array_equal(r, [5, 8, 1, 4]) + + def test_large(self): + x = np.linspace(-3, 2, 10000) + f = vectorize(lambda x: x) + y = f(x) + assert_array_equal(y, x) + + def test_ufunc(self): + f = vectorize(math.cos) + args = np.array([0, 0.5 * np.pi, np.pi, 1.5 * np.pi, 2 * np.pi]) + r1 = f(args) + r2 = np.cos(args) + assert_array_almost_equal(r1, r2) + + def test_keywords(self): + + def foo(a, b=1): + return a + b + + f = vectorize(foo) + args = np.array([1, 2, 3]) + r1 = f(args) + r2 = np.array([2, 3, 4]) + assert_array_equal(r1, r2) + r1 = f(args, 2) + r2 = np.array([3, 4, 5]) + assert_array_equal(r1, r2) + + def test_keywords_with_otypes_order1(self): + # gh-1620: The second call of f would crash with + # `ValueError: invalid number of arguments`. + f = vectorize(_foo1, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(np.arange(3.0), 1.0) + r2 = f(np.arange(3.0)) + assert_array_equal(r1, r2) + + def test_keywords_with_otypes_order2(self): + # gh-1620: The second call of f would crash with + # `ValueError: non-broadcastable output operand with shape () + # doesn't match the broadcast shape (3,)`. + f = vectorize(_foo1, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(np.arange(3.0)) + r2 = f(np.arange(3.0), 1.0) + assert_array_equal(r1, r2) + + def test_keywords_with_otypes_order3(self): + # gh-1620: The third call of f would crash with + # `ValueError: invalid number of arguments`. + f = vectorize(_foo1, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(np.arange(3.0)) + r2 = f(np.arange(3.0), y=1.0) + r3 = f(np.arange(3.0)) + assert_array_equal(r1, r2) + assert_array_equal(r1, r3) + + def test_keywords_with_otypes_several_kwd_args1(self): + # gh-1620 Make sure different uses of keyword arguments + # don't break the vectorized function. + f = vectorize(_foo2, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(10.4, z=100) + r2 = f(10.4, y=-1) + r3 = f(10.4) + assert_equal(r1, _foo2(10.4, z=100)) + assert_equal(r2, _foo2(10.4, y=-1)) + assert_equal(r3, _foo2(10.4)) + + def test_keywords_with_otypes_several_kwd_args2(self): + # gh-1620 Make sure different uses of keyword arguments + # don't break the vectorized function. + f = vectorize(_foo2, otypes=[float]) + # We're testing the caching of ufuncs by vectorize, so the order + # of these function calls is an important part of the test. + r1 = f(z=100, x=10.4, y=-1) + r2 = f(1, 2, 3) + assert_equal(r1, _foo2(z=100, x=10.4, y=-1)) + assert_equal(r2, _foo2(1, 2, 3)) + + def test_keywords_no_func_code(self): + # This needs to test a function that has keywords but + # no func_code attribute, since otherwise vectorize will + # inspect the func_code. + import random + try: + vectorize(random.randrange) # Should succeed + except Exception: + raise AssertionError + + def test_keywords2_ticket_2100(self): + # Test kwarg support: enhancement ticket 2100 + + def foo(a, b=1): + return a + b + + f = vectorize(foo) + args = np.array([1, 2, 3]) + r1 = f(a=args) + r2 = np.array([2, 3, 4]) + assert_array_equal(r1, r2) + r1 = f(b=1, a=args) + assert_array_equal(r1, r2) + r1 = f(args, b=2) + r2 = np.array([3, 4, 5]) + assert_array_equal(r1, r2) + + def test_keywords3_ticket_2100(self): + # Test excluded with mixed positional and kwargs: ticket 2100 + def mypolyval(x, p): + _p = list(p) + res = _p.pop(0) + while _p: + res = res * x + _p.pop(0) + return res + + vpolyval = np.vectorize(mypolyval, excluded=['p', 1]) + ans = [3, 6] + assert_array_equal(ans, vpolyval(x=[0, 1], p=[1, 2, 3])) + assert_array_equal(ans, vpolyval([0, 1], p=[1, 2, 3])) + assert_array_equal(ans, vpolyval([0, 1], [1, 2, 3])) + + def test_keywords4_ticket_2100(self): + # Test vectorizing function with no positional args. + @vectorize + def f(**kw): + res = 1.0 + for _k in kw: + res *= kw[_k] + return res + + assert_array_equal(f(a=[1, 2], b=[3, 4]), [3, 8]) + + def test_keywords5_ticket_2100(self): + # Test vectorizing function with no kwargs args. + @vectorize + def f(*v): + return np.prod(v) + + assert_array_equal(f([1, 2], [3, 4]), [3, 8]) + + def test_coverage1_ticket_2100(self): + def foo(): + return 1 + + f = vectorize(foo) + assert_array_equal(f(), 1) + + def test_assigning_docstring(self): + def foo(x): + """Original documentation""" + return x + + f = vectorize(foo) + assert_equal(f.__doc__, foo.__doc__) + + doc = "Provided documentation" + f = vectorize(foo, doc=doc) + assert_equal(f.__doc__, doc) + + def test_UnboundMethod_ticket_1156(self): + # Regression test for issue 1156 + class Foo: + b = 2 + + def bar(self, a): + return a ** self.b + + assert_array_equal(vectorize(Foo().bar)(np.arange(9)), + np.arange(9) ** 2) + assert_array_equal(vectorize(Foo.bar)(Foo(), np.arange(9)), + np.arange(9) ** 2) + + def test_execution_order_ticket_1487(self): + # Regression test for dependence on execution order: issue 1487 + f1 = vectorize(lambda x: x) + res1a = f1(np.arange(3)) + res1b = f1(np.arange(0.1, 3)) + f2 = vectorize(lambda x: x) + res2b = f2(np.arange(0.1, 3)) + res2a = f2(np.arange(3)) + assert_equal(res1a, res2a) + assert_equal(res1b, res2b) + + def test_string_ticket_1892(self): + # Test vectorization over strings: issue 1892. + f = np.vectorize(lambda x: x) + s = '0123456789' * 10 + assert_equal(s, f(s)) + + def test_dtype_promotion_gh_29189(self): + # dtype should not be silently promoted (int32 -> int64) + dtypes = [np.int16, np.int32, np.int64, np.float16, np.float32, np.float64] + + for dtype in dtypes: + x = np.asarray([1, 2, 3], dtype=dtype) + y = np.vectorize(lambda x: x + x)(x) + assert x.dtype == y.dtype + + def test_cache(self): + # Ensure that vectorized func called exactly once per argument. + _calls = [0] + + @vectorize + def f(x): + _calls[0] += 1 + return x ** 2 + + f.cache = True + x = np.arange(5) + assert_array_equal(f(x), x * x) + assert_equal(_calls[0], len(x)) + + def test_otypes(self): + f = np.vectorize(lambda x: x) + f.otypes = 'i' + x = np.arange(5) + assert_array_equal(f(x), x) + + def test_otypes_object_28624(self): + # with object otype, the vectorized function should return y + # wrapped into an object array + y = np.arange(3) + f = vectorize(lambda x: y, otypes=[object]) + + assert f(None).item() is y + assert f([None]).item() is y + + y = [1, 2, 3] + f = vectorize(lambda x: y, otypes=[object]) + + assert f(None).item() is y + assert f([None]).item() is y + + def test_parse_gufunc_signature(self): + assert_equal(nfb._parse_gufunc_signature('(x)->()'), ([('x',)], [()])) + assert_equal(nfb._parse_gufunc_signature('(x,y)->()'), + ([('x', 'y')], [()])) + assert_equal(nfb._parse_gufunc_signature('(x),(y)->()'), + ([('x',), ('y',)], [()])) + assert_equal(nfb._parse_gufunc_signature('(x)->(y)'), + ([('x',)], [('y',)])) + assert_equal(nfb._parse_gufunc_signature('(x)->(y),()'), + ([('x',)], [('y',), ()])) + assert_equal(nfb._parse_gufunc_signature('(),(a,b,c),(d)->(d,e)'), + ([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')])) + + # Tests to check if whitespaces are ignored + assert_equal(nfb._parse_gufunc_signature('(x )->()'), ([('x',)], [()])) + assert_equal(nfb._parse_gufunc_signature('( x , y )->( )'), + ([('x', 'y')], [()])) + assert_equal(nfb._parse_gufunc_signature('(x),( y) ->()'), + ([('x',), ('y',)], [()])) + assert_equal(nfb._parse_gufunc_signature('( x)-> (y ) '), + ([('x',)], [('y',)])) + assert_equal(nfb._parse_gufunc_signature(' (x)->( y),( )'), + ([('x',)], [('y',), ()])) + assert_equal(nfb._parse_gufunc_signature( + '( ), ( a, b,c ) ,( d) -> (d , e)'), + ([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')])) + + with assert_raises(ValueError): + nfb._parse_gufunc_signature('(x)(y)->()') + with assert_raises(ValueError): + nfb._parse_gufunc_signature('(x),(y)->') + with assert_raises(ValueError): + nfb._parse_gufunc_signature('((x))->(x)') + + def test_signature_simple(self): + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + + f = vectorize(addsubtract, signature='(),()->()') + r = f([0, 3, 6, 9], [1, 3, 5, 7]) + assert_array_equal(r, [1, 6, 1, 2]) + + def test_signature_mean_last(self): + def mean(a): + return a.mean() + + f = vectorize(mean, signature='(n)->()') + r = f([[1, 3], [2, 4]]) + assert_array_equal(r, [2, 3]) + + def test_signature_center(self): + def center(a): + return a - a.mean() + + f = vectorize(center, signature='(n)->(n)') + r = f([[1, 3], [2, 4]]) + assert_array_equal(r, [[-1, 1], [-1, 1]]) + + def test_signature_two_outputs(self): + f = vectorize(lambda x: (x, x), signature='()->(),()') + r = f([1, 2, 3]) + assert_(isinstance(r, tuple) and len(r) == 2) + assert_array_equal(r[0], [1, 2, 3]) + assert_array_equal(r[1], [1, 2, 3]) + + def test_signature_outer(self): + f = vectorize(np.outer, signature='(a),(b)->(a,b)') + r = f([1, 2], [1, 2, 3]) + assert_array_equal(r, [[1, 2, 3], [2, 4, 6]]) + + r = f([[[1, 2]]], [1, 2, 3]) + assert_array_equal(r, [[[[1, 2, 3], [2, 4, 6]]]]) + + r = f([[1, 0], [2, 0]], [1, 2, 3]) + assert_array_equal(r, [[[1, 2, 3], [0, 0, 0]], + [[2, 4, 6], [0, 0, 0]]]) + + r = f([1, 2], [[1, 2, 3], [0, 0, 0]]) + assert_array_equal(r, [[[1, 2, 3], [2, 4, 6]], + [[0, 0, 0], [0, 0, 0]]]) + + def test_signature_computed_size(self): + f = vectorize(lambda x: x[:-1], signature='(n)->(m)') + r = f([1, 2, 3]) + assert_array_equal(r, [1, 2]) + + r = f([[1, 2, 3], [2, 3, 4]]) + assert_array_equal(r, [[1, 2], [2, 3]]) + + def test_signature_excluded(self): + + def foo(a, b=1): + return a + b + + f = vectorize(foo, signature='()->()', excluded={'b'}) + assert_array_equal(f([1, 2, 3]), [2, 3, 4]) + assert_array_equal(f([1, 2, 3], b=0), [1, 2, 3]) + + def test_signature_otypes(self): + f = vectorize(lambda x: x, signature='(n)->(n)', otypes=['float64']) + r = f([1, 2, 3]) + assert_equal(r.dtype, np.dtype('float64')) + assert_array_equal(r, [1, 2, 3]) + + def test_signature_invalid_inputs(self): + f = vectorize(operator.add, signature='(n),(n)->(n)') + with assert_raises_regex(TypeError, 'wrong number of positional'): + f([1, 2]) + with assert_raises_regex( + ValueError, 'does not have enough dimensions'): + f(1, 2) + with assert_raises_regex( + ValueError, 'inconsistent size for core dimension'): + f([1, 2], [1, 2, 3]) + + f = vectorize(operator.add, signature='()->()') + with assert_raises_regex(TypeError, 'wrong number of positional'): + f(1, 2) + + def test_signature_invalid_outputs(self): + + f = vectorize(lambda x: x[:-1], signature='(n)->(n)') + with assert_raises_regex( + ValueError, 'inconsistent size for core dimension'): + f([1, 2, 3]) + + f = vectorize(lambda x: x, signature='()->(),()') + with assert_raises_regex(ValueError, 'wrong number of outputs'): + f(1) + + f = vectorize(lambda x: (x, x), signature='()->()') + with assert_raises_regex(ValueError, 'wrong number of outputs'): + f([1, 2]) + + def test_size_zero_output(self): + # see issue 5868 + f = np.vectorize(lambda x: x) + x = np.zeros([0, 5], dtype=int) + with assert_raises_regex(ValueError, 'otypes'): + f(x) + + f.otypes = 'i' + assert_array_equal(f(x), x) + + f = np.vectorize(lambda x: x, signature='()->()') + with assert_raises_regex(ValueError, 'otypes'): + f(x) + + f = np.vectorize(lambda x: x, signature='()->()', otypes='i') + assert_array_equal(f(x), x) + + f = np.vectorize(lambda x: x, signature='(n)->(n)', otypes='i') + assert_array_equal(f(x), x) + + f = np.vectorize(lambda x: x, signature='(n)->(n)') + assert_array_equal(f(x.T), x.T) + + f = np.vectorize(lambda x: [x], signature='()->(n)', otypes='i') + with assert_raises_regex(ValueError, 'new output dimensions'): + f(x) + + def test_subclasses(self): + class subclass(np.ndarray): + pass + + m = np.array([[1., 0., 0.], + [0., 0., 1.], + [0., 1., 0.]]).view(subclass) + v = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]).view(subclass) + # generalized (gufunc) + matvec = np.vectorize(np.matmul, signature='(m,m),(m)->(m)') + r = matvec(m, v) + assert_equal(type(r), subclass) + assert_equal(r, [[1., 3., 2.], [4., 6., 5.], [7., 9., 8.]]) + + # element-wise (ufunc) + mult = np.vectorize(lambda x, y: x * y) + r = mult(m, v) + assert_equal(type(r), subclass) + assert_equal(r, m * v) + + def test_name(self): + # gh-23021 + @np.vectorize + def f2(a, b): + return a + b + + assert f2.__name__ == 'f2' + + def test_decorator(self): + @vectorize + def addsubtract(a, b): + if a > b: + return a - b + else: + return a + b + + r = addsubtract([0, 3, 6, 9], [1, 3, 5, 7]) + assert_array_equal(r, [1, 6, 1, 2]) + + def test_docstring(self): + @vectorize + def f(x): + """Docstring""" + return x + + if sys.flags.optimize < 2: + assert f.__doc__ == "Docstring" + + def test_partial(self): + def foo(x, y): + return x + y + + bar = partial(foo, 3) + vbar = np.vectorize(bar) + assert vbar(1) == 4 + + def test_signature_otypes_decorator(self): + @vectorize(signature='(n)->(n)', otypes=['float64']) + def f(x): + return x + + r = f([1, 2, 3]) + assert_equal(r.dtype, np.dtype('float64')) + assert_array_equal(r, [1, 2, 3]) + assert f.__name__ == 'f' + + def test_bad_input(self): + with assert_raises(TypeError): + A = np.vectorize(pyfunc=3) + + def test_no_keywords(self): + with assert_raises(TypeError): + @np.vectorize("string") + def foo(): + return "bar" + + def test_positional_regression_9477(self): + # This supplies the first keyword argument as a positional, + # to ensure that they are still properly forwarded after the + # enhancement for #9477 + f = vectorize((lambda x: x), ['float64']) + r = f([2]) + assert_equal(r.dtype, np.dtype('float64')) + + def test_datetime_conversion(self): + otype = "datetime64[ns]" + arr = np.array(['2024-01-01', '2024-01-02', '2024-01-03'], + dtype='datetime64[ns]') + assert_array_equal(np.vectorize(lambda x: x, signature="(i)->(j)", + otypes=[otype])(arr), arr) + + +class TestLeaks: + class A: + iters = 20 + + def bound(self, *args): + return 0 + + @staticmethod + def unbound(*args): + return 0 + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + @pytest.mark.skipif(NOGIL_BUILD, + reason=("Functions are immortalized if a thread is " + "launched, making this test flaky")) + @pytest.mark.parametrize('name, incr', [ + ('bound', A.iters), + ('unbound', 0), + ]) + def test_frompyfunc_leaks(self, name, incr): + # exposed in gh-11867 as np.vectorized, but the problem stems from + # frompyfunc. + # class.attribute = np.frompyfunc() creates a + # reference cycle if is a bound class method. + # It requires a gc collection cycle to break the cycle. + import gc + A_func = getattr(self.A, name) + gc.disable() + try: + refcount = sys.getrefcount(A_func) + for i in range(self.A.iters): + a = self.A() + a.f = np.frompyfunc(getattr(a, name), 1, 1) + out = a.f(np.arange(10)) + a = None + # A.func is part of a reference cycle if incr is non-zero + assert_equal(sys.getrefcount(A_func), refcount + incr) + for i in range(5): + gc.collect() + assert_equal(sys.getrefcount(A_func), refcount) + finally: + gc.enable() + + +class TestDigitize: + + def test_forward(self): + x = np.arange(-6, 5) + bins = np.arange(-5, 5) + assert_array_equal(digitize(x, bins), np.arange(11)) + + def test_reverse(self): + x = np.arange(5, -6, -1) + bins = np.arange(5, -5, -1) + assert_array_equal(digitize(x, bins), np.arange(11)) + + def test_random(self): + x = rand(10) + bin = np.linspace(x.min(), x.max(), 10) + assert_(np.all(digitize(x, bin) != 0)) + + def test_right_basic(self): + x = [1, 5, 4, 10, 8, 11, 0] + bins = [1, 5, 10] + default_answer = [1, 2, 1, 3, 2, 3, 0] + assert_array_equal(digitize(x, bins), default_answer) + right_answer = [0, 1, 1, 2, 2, 3, 0] + assert_array_equal(digitize(x, bins, True), right_answer) + + def test_right_open(self): + x = np.arange(-6, 5) + bins = np.arange(-6, 4) + assert_array_equal(digitize(x, bins, True), np.arange(11)) + + def test_right_open_reverse(self): + x = np.arange(5, -6, -1) + bins = np.arange(4, -6, -1) + assert_array_equal(digitize(x, bins, True), np.arange(11)) + + def test_right_open_random(self): + x = rand(10) + bins = np.linspace(x.min(), x.max(), 10) + assert_(np.all(digitize(x, bins, True) != 10)) + + def test_monotonic(self): + x = [-1, 0, 1, 2] + bins = [0, 0, 1] + assert_array_equal(digitize(x, bins, False), [0, 2, 3, 3]) + assert_array_equal(digitize(x, bins, True), [0, 0, 2, 3]) + bins = [1, 1, 0] + assert_array_equal(digitize(x, bins, False), [3, 2, 0, 0]) + assert_array_equal(digitize(x, bins, True), [3, 3, 2, 0]) + bins = [1, 1, 1, 1] + assert_array_equal(digitize(x, bins, False), [0, 0, 4, 4]) + assert_array_equal(digitize(x, bins, True), [0, 0, 0, 4]) + bins = [0, 0, 1, 0] + assert_raises(ValueError, digitize, x, bins) + bins = [1, 1, 0, 1] + assert_raises(ValueError, digitize, x, bins) + + def test_casting_error(self): + x = [1, 2, 3 + 1.j] + bins = [1, 2, 3] + assert_raises(TypeError, digitize, x, bins) + x, bins = bins, x + assert_raises(TypeError, digitize, x, bins) + + def test_return_type(self): + # Functions returning indices should always return base ndarrays + class A(np.ndarray): + pass + a = np.arange(5).view(A) + b = np.arange(1, 3).view(A) + assert_(not isinstance(digitize(b, a, False), A)) + assert_(not isinstance(digitize(b, a, True), A)) + + def test_large_integers_increasing(self): + # gh-11022 + x = 2**54 # loses precision in a float + assert_equal(np.digitize(x, [x - 1, x + 1]), 1) + + @pytest.mark.xfail( + reason="gh-11022: np._core.multiarray._monoticity loses precision") + def test_large_integers_decreasing(self): + # gh-11022 + x = 2**54 # loses precision in a float + assert_equal(np.digitize(x, [x + 1, x - 1]), 1) + + +class TestUnwrap: + + def test_simple(self): + # check that unwrap removes jumps greater that 2*pi + assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1]) + # check that unwrap maintains continuity + assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi)) + + def test_period(self): + # check that unwrap removes jumps greater that 255 + assert_array_equal(unwrap([1, 1 + 256], period=255), [1, 2]) + # check that unwrap maintains continuity + assert_(np.all(diff(unwrap(rand(10) * 1000, period=255)) < 255)) + # check simple case + simple_seq = np.array([0, 75, 150, 225, 300]) + wrap_seq = np.mod(simple_seq, 255) + assert_array_equal(unwrap(wrap_seq, period=255), simple_seq) + # check custom discont value + uneven_seq = np.array([0, 75, 150, 225, 300, 430]) + wrap_uneven = np.mod(uneven_seq, 250) + no_discont = unwrap(wrap_uneven, period=250) + assert_array_equal(no_discont, [0, 75, 150, 225, 300, 180]) + sm_discont = unwrap(wrap_uneven, period=250, discont=140) + assert_array_equal(sm_discont, [0, 75, 150, 225, 300, 430]) + assert sm_discont.dtype == wrap_uneven.dtype + + +@pytest.mark.parametrize( + "dtype", "O" + np.typecodes["AllInteger"] + np.typecodes["Float"] +) +@pytest.mark.parametrize("M", [0, 1, 10]) +class TestFilterwindows: + + def test_hanning(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = hanning(scalar) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype + + # check symmetry + assert_equal(w, flipud(w)) + + # check known value + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 4.500, 4) + + def test_hamming(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = hamming(scalar) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype + + # check symmetry + assert_equal(w, flipud(w)) + + # check known value + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 4.9400, 4) + + def test_bartlett(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = bartlett(scalar) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype + + # check symmetry + assert_equal(w, flipud(w)) + + # check known value + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 4.4444, 4) + + def test_blackman(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = blackman(scalar) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype + + # check symmetry + assert_equal(w, flipud(w)) + + # check known value + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 3.7800, 4) + + def test_kaiser(self, dtype: str, M: int) -> None: + scalar = np.array(M, dtype=dtype)[()] + + w = kaiser(scalar, 0) + if dtype == "O": + ref_dtype = np.float64 + else: + ref_dtype = np.result_type(scalar.dtype, np.float64) + assert w.dtype == ref_dtype + + # check symmetry + assert_equal(w, flipud(w)) + + # check known value + if scalar < 1: + assert_array_equal(w, np.array([])) + elif scalar == 1: + assert_array_equal(w, np.ones(1)) + else: + assert_almost_equal(np.sum(w, axis=0), 10, 15) + + +class TestTrapezoid: + + def test_simple(self): + x = np.arange(-10, 10, .1) + r = trapezoid(np.exp(-.5 * x ** 2) / np.sqrt(2 * np.pi), dx=0.1) + # check integral of normal equals 1 + assert_almost_equal(r, 1, 7) + + def test_ndim(self): + x = np.linspace(0, 1, 3) + y = np.linspace(0, 2, 8) + z = np.linspace(0, 3, 13) + + wx = np.ones_like(x) * (x[1] - x[0]) + wx[0] /= 2 + wx[-1] /= 2 + wy = np.ones_like(y) * (y[1] - y[0]) + wy[0] /= 2 + wy[-1] /= 2 + wz = np.ones_like(z) * (z[1] - z[0]) + wz[0] /= 2 + wz[-1] /= 2 + + q = x[:, None, None] + y[None, :, None] + z[None, None, :] + + qx = (q * wx[:, None, None]).sum(axis=0) + qy = (q * wy[None, :, None]).sum(axis=1) + qz = (q * wz[None, None, :]).sum(axis=2) + + # n-d `x` + r = trapezoid(q, x=x[:, None, None], axis=0) + assert_almost_equal(r, qx) + r = trapezoid(q, x=y[None, :, None], axis=1) + assert_almost_equal(r, qy) + r = trapezoid(q, x=z[None, None, :], axis=2) + assert_almost_equal(r, qz) + + # 1-d `x` + r = trapezoid(q, x=x, axis=0) + assert_almost_equal(r, qx) + r = trapezoid(q, x=y, axis=1) + assert_almost_equal(r, qy) + r = trapezoid(q, x=z, axis=2) + assert_almost_equal(r, qz) + + def test_masked(self): + # Testing that masked arrays behave as if the function is 0 where + # masked + x = np.arange(5) + y = x * x + mask = x == 2 + ym = np.ma.array(y, mask=mask) + r = 13.0 # sum(0.5 * (0 + 1) * 1.0 + 0.5 * (9 + 16)) + assert_almost_equal(trapezoid(ym, x), r) + + xm = np.ma.array(x, mask=mask) + assert_almost_equal(trapezoid(ym, xm), r) + + xm = np.ma.array(x, mask=mask) + assert_almost_equal(trapezoid(y, xm), r) + + +class TestSinc: + + def test_simple(self): + assert_(sinc(0) == 1) + w = sinc(np.linspace(-1, 1, 100)) + # check symmetry + assert_array_almost_equal(w, flipud(w), 7) + + def test_array_like(self): + x = [0, 0.5] + y1 = sinc(np.array(x)) + y2 = sinc(list(x)) + y3 = sinc(tuple(x)) + assert_array_equal(y1, y2) + assert_array_equal(y1, y3) + + def test_bool_dtype(self): + x = (np.arange(4, dtype=np.uint8) % 2 == 1) + actual = sinc(x) + expected = sinc(x.astype(np.float64)) + assert_allclose(actual, expected) + assert actual.dtype == np.float64 + + @pytest.mark.parametrize('dtype', [np.uint8, np.int16, np.uint64]) + def test_int_dtypes(self, dtype): + x = np.arange(4, dtype=dtype) + actual = sinc(x) + expected = sinc(x.astype(np.float64)) + assert_allclose(actual, expected) + assert actual.dtype == np.float64 + + @pytest.mark.parametrize( + 'dtype', + [np.float16, np.float32, np.longdouble, np.complex64, np.complex128] + ) + def test_float_dtypes(self, dtype): + x = np.arange(4, dtype=dtype) + assert sinc(x).dtype == x.dtype + + def test_float16_underflow(self): + x = np.float16(0) + # before gh-27784, fill value for 0 in input would underflow float16, + # resulting in nan + assert_array_equal(sinc(x), np.asarray(1.0)) + +class TestUnique: + + def test_simple(self): + x = np.array([4, 3, 2, 1, 1, 2, 3, 4, 0]) + assert_(np.all(unique(x) == [0, 1, 2, 3, 4])) + assert_(unique(np.array([1, 1, 1, 1, 1])) == np.array([1])) + x = ['widget', 'ham', 'foo', 'bar', 'foo', 'ham'] + assert_(np.all(unique(x) == ['bar', 'foo', 'ham', 'widget'])) + x = np.array([5 + 6j, 1 + 1j, 1 + 10j, 10, 5 + 6j]) + assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10])) + + +class TestCheckFinite: + + def test_simple(self): + a = [1, 2, 3] + b = [1, 2, np.inf] + c = [1, 2, np.nan] + np.asarray_chkfinite(a) + assert_raises(ValueError, np.asarray_chkfinite, b) + assert_raises(ValueError, np.asarray_chkfinite, c) + + def test_dtype_order(self): + # Regression test for missing dtype and order arguments + a = [1, 2, 3] + a = np.asarray_chkfinite(a, order='F', dtype=np.float64) + assert_(a.dtype == np.float64) + + +class TestCorrCoef: + A = np.array( + [[0.15391142, 0.18045767, 0.14197213], + [0.70461506, 0.96474128, 0.27906989], + [0.9297531, 0.32296769, 0.19267156]]) + B = np.array( + [[0.10377691, 0.5417086, 0.49807457], + [0.82872117, 0.77801674, 0.39226705], + [0.9314666, 0.66800209, 0.03538394]]) + res1 = np.array( + [[1., 0.9379533, -0.04931983], + [0.9379533, 1., 0.30007991], + [-0.04931983, 0.30007991, 1.]]) + res2 = np.array( + [[1., 0.9379533, -0.04931983, 0.30151751, 0.66318558, 0.51532523], + [0.9379533, 1., 0.30007991, -0.04781421, 0.88157256, 0.78052386], + [-0.04931983, 0.30007991, 1., -0.96717111, 0.71483595, 0.83053601], + [0.30151751, -0.04781421, -0.96717111, 1., -0.51366032, -0.66173113], + [0.66318558, 0.88157256, 0.71483595, -0.51366032, 1., 0.98317823], + [0.51532523, 0.78052386, 0.83053601, -0.66173113, 0.98317823, 1.]]) + + def test_non_array(self): + assert_almost_equal(np.corrcoef([0, 1, 0], [1, 0, 1]), + [[1., -1.], [-1., 1.]]) + + def test_simple(self): + tgt1 = corrcoef(self.A) + assert_almost_equal(tgt1, self.res1) + assert_(np.all(np.abs(tgt1) <= 1.0)) + + tgt2 = corrcoef(self.A, self.B) + assert_almost_equal(tgt2, self.res2) + assert_(np.all(np.abs(tgt2) <= 1.0)) + + def test_ddof(self): + # ddof raises DeprecationWarning + with suppress_warnings() as sup: + warnings.simplefilter("always") + assert_warns(DeprecationWarning, corrcoef, self.A, ddof=-1) + sup.filter(DeprecationWarning) + # ddof has no or negligible effect on the function + assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1) + assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2) + assert_almost_equal(corrcoef(self.A, ddof=3), self.res1) + assert_almost_equal(corrcoef(self.A, self.B, ddof=3), self.res2) + + def test_bias(self): + # bias raises DeprecationWarning + with suppress_warnings() as sup: + warnings.simplefilter("always") + assert_warns(DeprecationWarning, corrcoef, self.A, self.B, 1, 0) + assert_warns(DeprecationWarning, corrcoef, self.A, bias=0) + sup.filter(DeprecationWarning) + # bias has no or negligible effect on the function + assert_almost_equal(corrcoef(self.A, bias=1), self.res1) + + def test_complex(self): + x = np.array([[1, 2, 3], [1j, 2j, 3j]]) + res = corrcoef(x) + tgt = np.array([[1., -1.j], [1.j, 1.]]) + assert_allclose(res, tgt) + assert_(np.all(np.abs(res) <= 1.0)) + + def test_xy(self): + x = np.array([[1, 2, 3]]) + y = np.array([[1j, 2j, 3j]]) + assert_allclose(np.corrcoef(x, y), np.array([[1., -1.j], [1.j, 1.]])) + + def test_empty(self): + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + assert_array_equal(corrcoef(np.array([])), np.nan) + assert_array_equal(corrcoef(np.array([]).reshape(0, 2)), + np.array([]).reshape(0, 0)) + assert_array_equal(corrcoef(np.array([]).reshape(2, 0)), + np.array([[np.nan, np.nan], [np.nan, np.nan]])) + + def test_extreme(self): + x = [[1e-100, 1e100], [1e100, 1e-100]] + with np.errstate(all='raise'): + c = corrcoef(x) + assert_array_almost_equal(c, np.array([[1., -1.], [-1., 1.]])) + assert_(np.all(np.abs(c) <= 1.0)) + + @pytest.mark.parametrize("test_type", [np.half, np.single, np.double, np.longdouble]) + def test_corrcoef_dtype(self, test_type): + cast_A = self.A.astype(test_type) + res = corrcoef(cast_A, dtype=test_type) + assert test_type == res.dtype + + +class TestCov: + x1 = np.array([[0, 2], [1, 1], [2, 0]]).T + res1 = np.array([[1., -1.], [-1., 1.]]) + x2 = np.array([0.0, 1.0, 2.0], ndmin=2) + frequencies = np.array([1, 4, 1]) + x2_repeats = np.array([[0.0], [1.0], [1.0], [1.0], [1.0], [2.0]]).T + res2 = np.array([[0.4, -0.4], [-0.4, 0.4]]) + unit_frequencies = np.ones(3, dtype=np.int_) + weights = np.array([1.0, 4.0, 1.0]) + res3 = np.array([[2. / 3., -2. / 3.], [-2. / 3., 2. / 3.]]) + unit_weights = np.ones(3) + x3 = np.array([0.3942, 0.5969, 0.7730, 0.9918, 0.7964]) + + def test_basic(self): + assert_allclose(cov(self.x1), self.res1) + + def test_complex(self): + x = np.array([[1, 2, 3], [1j, 2j, 3j]]) + res = np.array([[1., -1.j], [1.j, 1.]]) + assert_allclose(cov(x), res) + assert_allclose(cov(x, aweights=np.ones(3)), res) + + def test_xy(self): + x = np.array([[1, 2, 3]]) + y = np.array([[1j, 2j, 3j]]) + assert_allclose(cov(x, y), np.array([[1., -1.j], [1.j, 1.]])) + + def test_empty(self): + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + assert_array_equal(cov(np.array([])), np.nan) + assert_array_equal(cov(np.array([]).reshape(0, 2)), + np.array([]).reshape(0, 0)) + assert_array_equal(cov(np.array([]).reshape(2, 0)), + np.array([[np.nan, np.nan], [np.nan, np.nan]])) + + def test_wrong_ddof(self): + with warnings.catch_warnings(record=True): + warnings.simplefilter('always', RuntimeWarning) + assert_array_equal(cov(self.x1, ddof=5), + np.array([[np.inf, -np.inf], + [-np.inf, np.inf]])) + + def test_1D_rowvar(self): + assert_allclose(cov(self.x3), cov(self.x3, rowvar=False)) + y = np.array([0.0780, 0.3107, 0.2111, 0.0334, 0.8501]) + assert_allclose(cov(self.x3, y), cov(self.x3, y, rowvar=False)) + + def test_1D_variance(self): + assert_allclose(cov(self.x3, ddof=1), np.var(self.x3, ddof=1)) + + def test_fweights(self): + assert_allclose(cov(self.x2, fweights=self.frequencies), + cov(self.x2_repeats)) + assert_allclose(cov(self.x1, fweights=self.frequencies), + self.res2) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies), + self.res1) + nonint = self.frequencies + 0.5 + assert_raises(TypeError, cov, self.x1, fweights=nonint) + f = np.ones((2, 3), dtype=np.int_) + assert_raises(RuntimeError, cov, self.x1, fweights=f) + f = np.ones(2, dtype=np.int_) + assert_raises(RuntimeError, cov, self.x1, fweights=f) + f = -1 * np.ones(3, dtype=np.int_) + assert_raises(ValueError, cov, self.x1, fweights=f) + + def test_aweights(self): + assert_allclose(cov(self.x1, aweights=self.weights), self.res3) + assert_allclose(cov(self.x1, aweights=3.0 * self.weights), + cov(self.x1, aweights=self.weights)) + assert_allclose(cov(self.x1, aweights=self.unit_weights), self.res1) + w = np.ones((2, 3)) + assert_raises(RuntimeError, cov, self.x1, aweights=w) + w = np.ones(2) + assert_raises(RuntimeError, cov, self.x1, aweights=w) + w = -1.0 * np.ones(3) + assert_raises(ValueError, cov, self.x1, aweights=w) + + def test_unit_fweights_and_aweights(self): + assert_allclose(cov(self.x2, fweights=self.frequencies, + aweights=self.unit_weights), + cov(self.x2_repeats)) + assert_allclose(cov(self.x1, fweights=self.frequencies, + aweights=self.unit_weights), + self.res2) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies, + aweights=self.unit_weights), + self.res1) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies, + aweights=self.weights), + self.res3) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies, + aweights=3.0 * self.weights), + cov(self.x1, aweights=self.weights)) + assert_allclose(cov(self.x1, fweights=self.unit_frequencies, + aweights=self.unit_weights), + self.res1) + + @pytest.mark.parametrize("test_type", [np.half, np.single, np.double, np.longdouble]) + def test_cov_dtype(self, test_type): + cast_x1 = self.x1.astype(test_type) + res = cov(cast_x1, dtype=test_type) + assert test_type == res.dtype + + def test_gh_27658(self): + x = np.ones((3, 1)) + expected = np.cov(x, ddof=0, rowvar=True) + actual = np.cov(x.T, ddof=0, rowvar=False) + assert_allclose(actual, expected, strict=True) + + +class Test_I0: + + def test_simple(self): + assert_almost_equal( + i0(0.5), + np.array(1.0634833707413234)) + + # need at least one test above 8, as the implementation is piecewise + A = np.array([0.49842636, 0.6969809, 0.22011976, 0.0155549, 10.0]) + expected = np.array([1.06307822, 1.12518299, 1.01214991, 1.00006049, 2815.71662847]) + assert_almost_equal(i0(A), expected) + assert_almost_equal(i0(-A), expected) + + B = np.array([[0.827002, 0.99959078], + [0.89694769, 0.39298162], + [0.37954418, 0.05206293], + [0.36465447, 0.72446427], + [0.48164949, 0.50324519]]) + assert_almost_equal( + i0(B), + np.array([[1.17843223, 1.26583466], + [1.21147086, 1.03898290], + [1.03633899, 1.00067775], + [1.03352052, 1.13557954], + [1.05884290, 1.06432317]])) + # Regression test for gh-11205 + i0_0 = np.i0([0.]) + assert_equal(i0_0.shape, (1,)) + assert_array_equal(np.i0([0.]), np.array([1.])) + + def test_non_array(self): + a = np.arange(4) + + class array_like: + __array_interface__ = a.__array_interface__ + + def __array_wrap__(self, arr, context, return_scalar): + return self + + # E.g. pandas series survive ufunc calls through array-wrap: + assert isinstance(np.abs(array_like()), array_like) + exp = np.i0(a) + res = np.i0(array_like()) + + assert_array_equal(exp, res) + + def test_complex(self): + a = np.array([0, 1 + 2j]) + with pytest.raises(TypeError, match="i0 not supported for complex values"): + res = i0(a) + + +class TestKaiser: + + def test_simple(self): + assert_(np.isfinite(kaiser(1, 1.0))) + assert_almost_equal(kaiser(0, 1.0), + np.array([])) + assert_almost_equal(kaiser(2, 1.0), + np.array([0.78984831, 0.78984831])) + assert_almost_equal(kaiser(5, 1.0), + np.array([0.78984831, 0.94503323, 1., + 0.94503323, 0.78984831])) + assert_almost_equal(kaiser(5, 1.56789), + np.array([0.58285404, 0.88409679, 1., + 0.88409679, 0.58285404])) + + def test_int_beta(self): + kaiser(3, 4) + + +class TestMeshgrid: + + def test_simple(self): + [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7]) + assert_array_equal(X, np.array([[1, 2, 3], + [1, 2, 3], + [1, 2, 3], + [1, 2, 3]])) + assert_array_equal(Y, np.array([[4, 4, 4], + [5, 5, 5], + [6, 6, 6], + [7, 7, 7]])) + + def test_single_input(self): + [X] = meshgrid([1, 2, 3, 4]) + assert_array_equal(X, np.array([1, 2, 3, 4])) + + def test_no_input(self): + args = [] + assert_array_equal([], meshgrid(*args)) + assert_array_equal([], meshgrid(*args, copy=False)) + + def test_indexing(self): + x = [1, 2, 3] + y = [4, 5, 6, 7] + [X, Y] = meshgrid(x, y, indexing='ij') + assert_array_equal(X, np.array([[1, 1, 1, 1], + [2, 2, 2, 2], + [3, 3, 3, 3]])) + assert_array_equal(Y, np.array([[4, 5, 6, 7], + [4, 5, 6, 7], + [4, 5, 6, 7]])) + + # Test expected shapes: + z = [8, 9] + assert_(meshgrid(x, y)[0].shape == (4, 3)) + assert_(meshgrid(x, y, indexing='ij')[0].shape == (3, 4)) + assert_(meshgrid(x, y, z)[0].shape == (4, 3, 2)) + assert_(meshgrid(x, y, z, indexing='ij')[0].shape == (3, 4, 2)) + + assert_raises(ValueError, meshgrid, x, y, indexing='notvalid') + + def test_sparse(self): + [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7], sparse=True) + assert_array_equal(X, np.array([[1, 2, 3]])) + assert_array_equal(Y, np.array([[4], [5], [6], [7]])) + + def test_invalid_arguments(self): + # Test that meshgrid complains about invalid arguments + # Regression test for issue #4755: + # https://github.com/numpy/numpy/issues/4755 + assert_raises(TypeError, meshgrid, + [1, 2, 3], [4, 5, 6, 7], indices='ij') + + def test_return_type(self): + # Test for appropriate dtype in returned arrays. + # Regression test for issue #5297 + # https://github.com/numpy/numpy/issues/5297 + x = np.arange(0, 10, dtype=np.float32) + y = np.arange(10, 20, dtype=np.float64) + + X, Y = np.meshgrid(x, y) + + assert_(X.dtype == x.dtype) + assert_(Y.dtype == y.dtype) + + # copy + X, Y = np.meshgrid(x, y, copy=True) + + assert_(X.dtype == x.dtype) + assert_(Y.dtype == y.dtype) + + # sparse + X, Y = np.meshgrid(x, y, sparse=True) + + assert_(X.dtype == x.dtype) + assert_(Y.dtype == y.dtype) + + def test_writeback(self): + # Issue 8561 + X = np.array([1.1, 2.2]) + Y = np.array([3.3, 4.4]) + x, y = np.meshgrid(X, Y, sparse=False, copy=True) + + x[0, :] = 0 + assert_equal(x[0, :], 0) + assert_equal(x[1, :], X) + + def test_nd_shape(self): + a, b, c, d, e = np.meshgrid(*([0] * i for i in range(1, 6))) + expected_shape = (2, 1, 3, 4, 5) + assert_equal(a.shape, expected_shape) + assert_equal(b.shape, expected_shape) + assert_equal(c.shape, expected_shape) + assert_equal(d.shape, expected_shape) + assert_equal(e.shape, expected_shape) + + def test_nd_values(self): + a, b, c = np.meshgrid([0], [1, 2], [3, 4, 5]) + assert_equal(a, [[[0, 0, 0]], [[0, 0, 0]]]) + assert_equal(b, [[[1, 1, 1]], [[2, 2, 2]]]) + assert_equal(c, [[[3, 4, 5]], [[3, 4, 5]]]) + + def test_nd_indexing(self): + a, b, c = np.meshgrid([0], [1, 2], [3, 4, 5], indexing='ij') + assert_equal(a, [[[0, 0, 0], [0, 0, 0]]]) + assert_equal(b, [[[1, 1, 1], [2, 2, 2]]]) + assert_equal(c, [[[3, 4, 5], [3, 4, 5]]]) + + +class TestPiecewise: + + def test_simple(self): + # Condition is single bool list + x = piecewise([0, 0], [True, False], [1]) + assert_array_equal(x, [1, 0]) + + # List of conditions: single bool list + x = piecewise([0, 0], [[True, False]], [1]) + assert_array_equal(x, [1, 0]) + + # Conditions is single bool array + x = piecewise([0, 0], np.array([True, False]), [1]) + assert_array_equal(x, [1, 0]) + + # Condition is single int array + x = piecewise([0, 0], np.array([1, 0]), [1]) + assert_array_equal(x, [1, 0]) + + # List of conditions: int array + x = piecewise([0, 0], [np.array([1, 0])], [1]) + assert_array_equal(x, [1, 0]) + + x = piecewise([0, 0], [[False, True]], [lambda x:-1]) + assert_array_equal(x, [0, -1]) + + assert_raises_regex(ValueError, '1 or 2 functions are expected', + piecewise, [0, 0], [[False, True]], []) + assert_raises_regex(ValueError, '1 or 2 functions are expected', + piecewise, [0, 0], [[False, True]], [1, 2, 3]) + + def test_two_conditions(self): + x = piecewise([1, 2], [[True, False], [False, True]], [3, 4]) + assert_array_equal(x, [3, 4]) + + def test_scalar_domains_three_conditions(self): + x = piecewise(3, [True, False, False], [4, 2, 0]) + assert_equal(x, 4) + + def test_default(self): + # No value specified for x[1], should be 0 + x = piecewise([1, 2], [True, False], [2]) + assert_array_equal(x, [2, 0]) + + # Should set x[1] to 3 + x = piecewise([1, 2], [True, False], [2, 3]) + assert_array_equal(x, [2, 3]) + + def test_0d(self): + x = np.array(3) + y = piecewise(x, x > 3, [4, 0]) + assert_(y.ndim == 0) + assert_(y == 0) + + x = 5 + y = piecewise(x, [True, False], [1, 0]) + assert_(y.ndim == 0) + assert_(y == 1) + + # With 3 ranges (It was failing, before) + y = piecewise(x, [False, False, True], [1, 2, 3]) + assert_array_equal(y, 3) + + def test_0d_comparison(self): + x = 3 + y = piecewise(x, [x <= 3, x > 3], [4, 0]) # Should succeed. + assert_equal(y, 4) + + # With 3 ranges (It was failing, before) + x = 4 + y = piecewise(x, [x <= 3, (x > 3) * (x <= 5), x > 5], [1, 2, 3]) + assert_array_equal(y, 2) + + assert_raises_regex(ValueError, '2 or 3 functions are expected', + piecewise, x, [x <= 3, x > 3], [1]) + assert_raises_regex(ValueError, '2 or 3 functions are expected', + piecewise, x, [x <= 3, x > 3], [1, 1, 1, 1]) + + def test_0d_0d_condition(self): + x = np.array(3) + c = np.array(x > 3) + y = piecewise(x, [c], [1, 2]) + assert_equal(y, 2) + + def test_multidimensional_extrafunc(self): + x = np.array([[-2.5, -1.5, -0.5], + [0.5, 1.5, 2.5]]) + y = piecewise(x, [x < 0, x >= 2], [-1, 1, 3]) + assert_array_equal(y, np.array([[-1., -1., -1.], + [3., 3., 1.]])) + + def test_subclasses(self): + class subclass(np.ndarray): + pass + x = np.arange(5.).view(subclass) + r = piecewise(x, [x < 2., x >= 4], [-1., 1., 0.]) + assert_equal(type(r), subclass) + assert_equal(r, [-1., -1., 0., 0., 1.]) + + +class TestBincount: + + def test_simple(self): + y = np.bincount(np.arange(4)) + assert_array_equal(y, np.ones(4)) + + def test_simple2(self): + y = np.bincount(np.array([1, 5, 2, 4, 1])) + assert_array_equal(y, np.array([0, 2, 1, 0, 1, 1])) + + def test_simple_weight(self): + x = np.arange(4) + w = np.array([0.2, 0.3, 0.5, 0.1]) + y = np.bincount(x, w) + assert_array_equal(y, w) + + def test_simple_weight2(self): + x = np.array([1, 2, 4, 5, 2]) + w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) + y = np.bincount(x, w) + assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1])) + + def test_with_minlength(self): + x = np.array([0, 1, 0, 1, 1]) + y = np.bincount(x, minlength=3) + assert_array_equal(y, np.array([2, 3, 0])) + x = [] + y = np.bincount(x, minlength=0) + assert_array_equal(y, np.array([])) + + def test_with_minlength_smaller_than_maxvalue(self): + x = np.array([0, 1, 1, 2, 2, 3, 3]) + y = np.bincount(x, minlength=2) + assert_array_equal(y, np.array([1, 2, 2, 2])) + y = np.bincount(x, minlength=0) + assert_array_equal(y, np.array([1, 2, 2, 2])) + + def test_with_minlength_and_weights(self): + x = np.array([1, 2, 4, 5, 2]) + w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) + y = np.bincount(x, w, 8) + assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1, 0, 0])) + + def test_empty(self): + x = np.array([], dtype=int) + y = np.bincount(x) + assert_array_equal(x, y) + + def test_empty_with_minlength(self): + x = np.array([], dtype=int) + y = np.bincount(x, minlength=5) + assert_array_equal(y, np.zeros(5, dtype=int)) + + @pytest.mark.parametrize('minlength', [0, 3]) + def test_empty_list(self, minlength): + assert_array_equal(np.bincount([], minlength=minlength), + np.zeros(minlength, dtype=int)) + + def test_with_incorrect_minlength(self): + x = np.array([], dtype=int) + assert_raises_regex(TypeError, + "'str' object cannot be interpreted", + lambda: np.bincount(x, minlength="foobar")) + assert_raises_regex(ValueError, + "must not be negative", + lambda: np.bincount(x, minlength=-1)) + + x = np.arange(5) + assert_raises_regex(TypeError, + "'str' object cannot be interpreted", + lambda: np.bincount(x, minlength="foobar")) + assert_raises_regex(ValueError, + "must not be negative", + lambda: np.bincount(x, minlength=-1)) + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_dtype_reference_leaks(self): + # gh-6805 + intp_refcount = sys.getrefcount(np.dtype(np.intp)) + double_refcount = sys.getrefcount(np.dtype(np.double)) + + for j in range(10): + np.bincount([1, 2, 3]) + assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount) + assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount) + + for j in range(10): + np.bincount([1, 2, 3], [4, 5, 6]) + assert_equal(sys.getrefcount(np.dtype(np.intp)), intp_refcount) + assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount) + + @pytest.mark.parametrize("vals", [[[2, 2]], 2]) + def test_error_not_1d(self, vals): + # Test that values has to be 1-D (both as array and nested list) + vals_arr = np.asarray(vals) + with assert_raises(ValueError): + np.bincount(vals_arr) + with assert_raises(ValueError): + np.bincount(vals) + + @pytest.mark.parametrize("dt", np.typecodes["AllInteger"]) + def test_gh_28354(self, dt): + a = np.array([0, 1, 1, 3, 2, 1, 7], dtype=dt) + actual = np.bincount(a) + expected = [1, 3, 1, 1, 0, 0, 0, 1] + assert_array_equal(actual, expected) + + def test_contiguous_handling(self): + # check for absence of hard crash + np.bincount(np.arange(10000)[::2]) + + def test_gh_28354_array_like(self): + class A: + def __array__(self): + return np.array([0, 1, 1, 3, 2, 1, 7], dtype=np.uint64) + + a = A() + actual = np.bincount(a) + expected = [1, 3, 1, 1, 0, 0, 0, 1] + assert_array_equal(actual, expected) + + +class TestInterp: + + def test_exceptions(self): + assert_raises(ValueError, interp, 0, [], []) + assert_raises(ValueError, interp, 0, [0], [1, 2]) + assert_raises(ValueError, interp, 0, [0, 1], [1, 2], period=0) + assert_raises(ValueError, interp, 0, [], [], period=360) + assert_raises(ValueError, interp, 0, [0], [1, 2], period=360) + + def test_basic(self): + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = np.linspace(0, 1, 50) + assert_almost_equal(np.interp(x0, x, y), x0) + + def test_right_left_behavior(self): + # Needs range of sizes to test different code paths. + # size ==1 is special cased, 1 < size < 5 is linear search, and + # size >= 5 goes through local search and possibly binary search. + for size in range(1, 10): + xp = np.arange(size, dtype=np.double) + yp = np.ones(size, dtype=np.double) + incpts = np.array([-1, 0, size - 1, size], dtype=np.double) + decpts = incpts[::-1] + + incres = interp(incpts, xp, yp) + decres = interp(decpts, xp, yp) + inctgt = np.array([1, 1, 1, 1], dtype=float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + incres = interp(incpts, xp, yp, left=0) + decres = interp(decpts, xp, yp, left=0) + inctgt = np.array([0, 1, 1, 1], dtype=float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + incres = interp(incpts, xp, yp, right=2) + decres = interp(decpts, xp, yp, right=2) + inctgt = np.array([1, 1, 1, 2], dtype=float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + incres = interp(incpts, xp, yp, left=0, right=2) + decres = interp(decpts, xp, yp, left=0, right=2) + inctgt = np.array([0, 1, 1, 2], dtype=float) + dectgt = inctgt[::-1] + assert_equal(incres, inctgt) + assert_equal(decres, dectgt) + + def test_scalar_interpolation_point(self): + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = 0 + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = .3 + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = np.float32(.3) + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = np.float64(.3) + assert_almost_equal(np.interp(x0, x, y), x0) + x0 = np.nan + assert_almost_equal(np.interp(x0, x, y), x0) + + def test_non_finite_behavior_exact_x(self): + x = [1, 2, 2.5, 3, 4] + xp = [1, 2, 3, 4] + fp = [1, 2, np.inf, 4] + assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.inf, np.inf, 4]) + fp = [1, 2, np.nan, 4] + assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.nan, np.nan, 4]) + + @pytest.fixture(params=[ + np.float64, + lambda x: _make_complex(x, 0), + lambda x: _make_complex(0, x), + lambda x: _make_complex(x, np.multiply(x, -2)) + ], ids=[ + 'real', + 'complex-real', + 'complex-imag', + 'complex-both' + ]) + def sc(self, request): + """ scale function used by the below tests """ + return request.param + + def test_non_finite_any_nan(self, sc): + """ test that nans are propagated """ + assert_equal(np.interp(0.5, [np.nan, 1], sc([ 0, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, np.nan], sc([ 0, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([np.nan, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([ 0, np.nan])), sc(np.nan)) + + def test_non_finite_inf(self, sc): + """ Test that interp between opposite infs gives nan """ + assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 0, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([-np.inf, +np.inf])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, 1], sc([+np.inf, -np.inf])), sc(np.nan)) + + # unless the y values are equal + assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 10, 10])), sc(10)) + + def test_non_finite_half_inf_xf(self, sc): + """ Test that interp where both axes have a bound at inf gives nan """ + assert_equal(np.interp(0.5, [-np.inf, 1], sc([-np.inf, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [-np.inf, 1], sc([+np.inf, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, -np.inf])), sc(np.nan)) + assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, +np.inf])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, +np.inf], sc([-np.inf, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, +np.inf], sc([+np.inf, 10])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, -np.inf])), sc(np.nan)) + assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, +np.inf])), sc(np.nan)) + + def test_non_finite_half_inf_x(self, sc): + """ Test interp where the x axis has a bound at inf """ + assert_equal(np.interp(0.5, [-np.inf, -np.inf], sc([0, 10])), sc(10)) + assert_equal(np.interp(0.5, [-np.inf, 1 ], sc([0, 10])), sc(10)) # noqa: E202 + assert_equal(np.interp(0.5, [ 0, +np.inf], sc([0, 10])), sc(0)) + assert_equal(np.interp(0.5, [+np.inf, +np.inf], sc([0, 10])), sc(0)) + + def test_non_finite_half_inf_f(self, sc): + """ Test interp where the f axis has a bound at inf """ + assert_equal(np.interp(0.5, [0, 1], sc([ 0, -np.inf])), sc(-np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([ 0, +np.inf])), sc(+np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([-np.inf, 10])), sc(-np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([+np.inf, 10])), sc(+np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([-np.inf, -np.inf])), sc(-np.inf)) + assert_equal(np.interp(0.5, [0, 1], sc([+np.inf, +np.inf])), sc(+np.inf)) + + def test_complex_interp(self): + # test complex interpolation + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + (1 + np.linspace(0, 1, 5)) * 1.0j + x0 = 0.3 + y0 = x0 + (1 + x0) * 1.0j + assert_almost_equal(np.interp(x0, x, y), y0) + # test complex left and right + x0 = -1 + left = 2 + 3.0j + assert_almost_equal(np.interp(x0, x, y, left=left), left) + x0 = 2.0 + right = 2 + 3.0j + assert_almost_equal(np.interp(x0, x, y, right=right), right) + # test complex non finite + x = [1, 2, 2.5, 3, 4] + xp = [1, 2, 3, 4] + fp = [1, 2 + 1j, np.inf, 4] + y = [1, 2 + 1j, np.inf + 0.5j, np.inf, 4] + assert_almost_equal(np.interp(x, xp, fp), y) + # test complex periodic + x = [-180, -170, -185, 185, -10, -5, 0, 365] + xp = [190, -190, 350, -350] + fp = [5 + 1.0j, 10 + 2j, 3 + 3j, 4 + 4j] + y = [7.5 + 1.5j, 5. + 1.0j, 8.75 + 1.75j, 6.25 + 1.25j, 3. + 3j, 3.25 + 3.25j, + 3.5 + 3.5j, 3.75 + 3.75j] + assert_almost_equal(np.interp(x, xp, fp, period=360), y) + + def test_zero_dimensional_interpolation_point(self): + x = np.linspace(0, 1, 5) + y = np.linspace(0, 1, 5) + x0 = np.array(.3) + assert_almost_equal(np.interp(x0, x, y), x0) + + xp = np.array([0, 2, 4]) + fp = np.array([1, -1, 1]) + + actual = np.interp(np.array(1), xp, fp) + assert_equal(actual, 0) + assert_(isinstance(actual, np.float64)) + + actual = np.interp(np.array(4.5), xp, fp, period=4) + assert_equal(actual, 0.5) + assert_(isinstance(actual, np.float64)) + + def test_if_len_x_is_small(self): + xp = np.arange(0, 10, 0.0001) + fp = np.sin(xp) + assert_almost_equal(np.interp(np.pi, xp, fp), 0.0) + + def test_period(self): + x = [-180, -170, -185, 185, -10, -5, 0, 365] + xp = [190, -190, 350, -350] + fp = [5, 10, 3, 4] + y = [7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75] + assert_almost_equal(np.interp(x, xp, fp, period=360), y) + x = np.array(x, order='F').reshape(2, -1) + y = np.array(y, order='C').reshape(2, -1) + assert_almost_equal(np.interp(x, xp, fp, period=360), y) + + +class TestPercentile: + + def test_basic(self): + x = np.arange(8) * 0.5 + assert_equal(np.percentile(x, 0), 0.) + assert_equal(np.percentile(x, 100), 3.5) + assert_equal(np.percentile(x, 50), 1.75) + x[1] = np.nan + assert_equal(np.percentile(x, 0), np.nan) + assert_equal(np.percentile(x, 0, method='nearest'), np.nan) + assert_equal(np.percentile(x, 0, method='inverted_cdf'), np.nan) + assert_equal( + np.percentile(x, 0, method='inverted_cdf', + weights=np.ones_like(x)), + np.nan, + ) + + def test_fraction(self): + x = [Fraction(i, 2) for i in range(8)] + + p = np.percentile(x, Fraction(0)) + assert_equal(p, Fraction(0)) + assert_equal(type(p), Fraction) + + p = np.percentile(x, Fraction(100)) + assert_equal(p, Fraction(7, 2)) + assert_equal(type(p), Fraction) + + p = np.percentile(x, Fraction(50)) + assert_equal(p, Fraction(7, 4)) + assert_equal(type(p), Fraction) + + p = np.percentile(x, [Fraction(50)]) + assert_equal(p, np.array([Fraction(7, 4)])) + assert_equal(type(p), np.ndarray) + + def test_api(self): + d = np.ones(5) + np.percentile(d, 5, None, None, False) + np.percentile(d, 5, None, None, False, 'linear') + o = np.ones((1,)) + np.percentile(d, 5, None, o, False, 'linear') + + def test_complex(self): + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='G') + assert_raises(TypeError, np.percentile, arr_c, 0.5) + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='D') + assert_raises(TypeError, np.percentile, arr_c, 0.5) + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='F') + assert_raises(TypeError, np.percentile, arr_c, 0.5) + + def test_2D(self): + x = np.array([[1, 1, 1], + [1, 1, 1], + [4, 4, 3], + [1, 1, 1], + [1, 1, 1]]) + assert_array_equal(np.percentile(x, 50, axis=0), [1, 1, 1]) + + @pytest.mark.parametrize("dtype", np.typecodes["Float"]) + def test_linear_nan_1D(self, dtype): + # METHOD 1 of H&F + arr = np.asarray([15.0, np.nan, 35.0, 40.0, 50.0], dtype=dtype) + res = np.percentile( + arr, + 40.0, + method="linear") + np.testing.assert_equal(res, np.nan) + np.testing.assert_equal(res.dtype, arr.dtype) + + H_F_TYPE_CODES = [(int_type, np.float64) + for int_type in np.typecodes["AllInteger"] + ] + [(np.float16, np.float16), + (np.float32, np.float32), + (np.float64, np.float64), + (np.longdouble, np.longdouble), + (np.dtype("O"), np.float64)] + + @pytest.mark.parametrize(["function", "quantile"], + [(np.quantile, 0.4), + (np.percentile, 40.0)]) + @pytest.mark.parametrize(["input_dtype", "expected_dtype"], H_F_TYPE_CODES) + @pytest.mark.parametrize(["method", "weighted", "expected"], + [("inverted_cdf", False, 20), + ("inverted_cdf", True, 20), + ("averaged_inverted_cdf", False, 27.5), + ("closest_observation", False, 20), + ("interpolated_inverted_cdf", False, 20), + ("hazen", False, 27.5), + ("weibull", False, 26), + ("linear", False, 29), + ("median_unbiased", False, 27), + ("normal_unbiased", False, 27.125), + ]) + def test_linear_interpolation(self, + function, + quantile, + method, + weighted, + expected, + input_dtype, + expected_dtype): + expected_dtype = np.dtype(expected_dtype) + + arr = np.asarray([15.0, 20.0, 35.0, 40.0, 50.0], dtype=input_dtype) + weights = np.ones_like(arr) if weighted else None + if input_dtype is np.longdouble: + if function is np.quantile: + # 0.4 is not exactly representable and it matters + # for "averaged_inverted_cdf", so we need to cheat. + quantile = input_dtype("0.4") + # We want to use nulp, but that does not work for longdouble + test_function = np.testing.assert_almost_equal + else: + test_function = np.testing.assert_array_almost_equal_nulp + + actual = function(arr, quantile, method=method, weights=weights) + + test_function(actual, expected_dtype.type(expected)) + + if method in ["inverted_cdf", "closest_observation"]: + if input_dtype == "O": + np.testing.assert_equal(np.asarray(actual).dtype, np.float64) + else: + np.testing.assert_equal(np.asarray(actual).dtype, + np.dtype(input_dtype)) + else: + np.testing.assert_equal(np.asarray(actual).dtype, + np.dtype(expected_dtype)) + + TYPE_CODES = np.typecodes["AllInteger"] + np.typecodes["Float"] + "O" + + @pytest.mark.parametrize("dtype", TYPE_CODES) + def test_lower_higher(self, dtype): + assert_equal(np.percentile(np.arange(10, dtype=dtype), 50, + method='lower'), 4) + assert_equal(np.percentile(np.arange(10, dtype=dtype), 50, + method='higher'), 5) + + @pytest.mark.parametrize("dtype", TYPE_CODES) + def test_midpoint(self, dtype): + assert_equal(np.percentile(np.arange(10, dtype=dtype), 51, + method='midpoint'), 4.5) + assert_equal(np.percentile(np.arange(9, dtype=dtype) + 1, 50, + method='midpoint'), 5) + assert_equal(np.percentile(np.arange(11, dtype=dtype), 51, + method='midpoint'), 5.5) + assert_equal(np.percentile(np.arange(11, dtype=dtype), 50, + method='midpoint'), 5) + + @pytest.mark.parametrize("dtype", TYPE_CODES) + def test_nearest(self, dtype): + assert_equal(np.percentile(np.arange(10, dtype=dtype), 51, + method='nearest'), 5) + assert_equal(np.percentile(np.arange(10, dtype=dtype), 49, + method='nearest'), 4) + + def test_linear_interpolation_extrapolation(self): + arr = np.random.rand(5) + + actual = np.percentile(arr, 100) + np.testing.assert_equal(actual, arr.max()) + + actual = np.percentile(arr, 0) + np.testing.assert_equal(actual, arr.min()) + + def test_sequence(self): + x = np.arange(8) * 0.5 + assert_equal(np.percentile(x, [0, 100, 50]), [0, 3.5, 1.75]) + + def test_axis(self): + x = np.arange(12).reshape(3, 4) + + assert_equal(np.percentile(x, (25, 50, 100)), [2.75, 5.5, 11.0]) + + r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]] + assert_equal(np.percentile(x, (25, 50, 100), axis=0), r0) + + r1 = [[0.75, 1.5, 3], [4.75, 5.5, 7], [8.75, 9.5, 11]] + assert_equal(np.percentile(x, (25, 50, 100), axis=1), np.array(r1).T) + + # ensure qth axis is always first as with np.array(old_percentile(..)) + x = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) + assert_equal(np.percentile(x, (25, 50)).shape, (2,)) + assert_equal(np.percentile(x, (25, 50, 75)).shape, (3,)) + assert_equal(np.percentile(x, (25, 50), axis=0).shape, (2, 4, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=1).shape, (2, 3, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=2).shape, (2, 3, 4, 6)) + assert_equal(np.percentile(x, (25, 50), axis=3).shape, (2, 3, 4, 5)) + assert_equal( + np.percentile(x, (25, 50, 75), axis=1).shape, (3, 3, 5, 6)) + assert_equal(np.percentile(x, (25, 50), + method="higher").shape, (2,)) + assert_equal(np.percentile(x, (25, 50, 75), + method="higher").shape, (3,)) + assert_equal(np.percentile(x, (25, 50), axis=0, + method="higher").shape, (2, 4, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=1, + method="higher").shape, (2, 3, 5, 6)) + assert_equal(np.percentile(x, (25, 50), axis=2, + method="higher").shape, (2, 3, 4, 6)) + assert_equal(np.percentile(x, (25, 50), axis=3, + method="higher").shape, (2, 3, 4, 5)) + assert_equal(np.percentile(x, (25, 50, 75), axis=1, + method="higher").shape, (3, 3, 5, 6)) + + def test_scalar_q(self): + # test for no empty dimensions for compatibility with old percentile + x = np.arange(12).reshape(3, 4) + assert_equal(np.percentile(x, 50), 5.5) + assert_(np.isscalar(np.percentile(x, 50))) + r0 = np.array([4., 5., 6., 7.]) + assert_equal(np.percentile(x, 50, axis=0), r0) + assert_equal(np.percentile(x, 50, axis=0).shape, r0.shape) + r1 = np.array([1.5, 5.5, 9.5]) + assert_almost_equal(np.percentile(x, 50, axis=1), r1) + assert_equal(np.percentile(x, 50, axis=1).shape, r1.shape) + + out = np.empty(1) + assert_equal(np.percentile(x, 50, out=out), 5.5) + assert_equal(out, 5.5) + out = np.empty(4) + assert_equal(np.percentile(x, 50, axis=0, out=out), r0) + assert_equal(out, r0) + out = np.empty(3) + assert_equal(np.percentile(x, 50, axis=1, out=out), r1) + assert_equal(out, r1) + + # test for no empty dimensions for compatibility with old percentile + x = np.arange(12).reshape(3, 4) + assert_equal(np.percentile(x, 50, method='lower'), 5.) + assert_(np.isscalar(np.percentile(x, 50))) + r0 = np.array([4., 5., 6., 7.]) + c0 = np.percentile(x, 50, method='lower', axis=0) + assert_equal(c0, r0) + assert_equal(c0.shape, r0.shape) + r1 = np.array([1., 5., 9.]) + c1 = np.percentile(x, 50, method='lower', axis=1) + assert_almost_equal(c1, r1) + assert_equal(c1.shape, r1.shape) + + out = np.empty((), dtype=x.dtype) + c = np.percentile(x, 50, method='lower', out=out) + assert_equal(c, 5) + assert_equal(out, 5) + out = np.empty(4, dtype=x.dtype) + c = np.percentile(x, 50, method='lower', axis=0, out=out) + assert_equal(c, r0) + assert_equal(out, r0) + out = np.empty(3, dtype=x.dtype) + c = np.percentile(x, 50, method='lower', axis=1, out=out) + assert_equal(c, r1) + assert_equal(out, r1) + + def test_exception(self): + assert_raises(ValueError, np.percentile, [1, 2], 56, + method='foobar') + assert_raises(ValueError, np.percentile, [1], 101) + assert_raises(ValueError, np.percentile, [1], -1) + assert_raises(ValueError, np.percentile, [1], list(range(50)) + [101]) + assert_raises(ValueError, np.percentile, [1], list(range(50)) + [-0.1]) + + def test_percentile_list(self): + assert_equal(np.percentile([1, 2, 3], 0), 1) + + @pytest.mark.parametrize( + "percentile, with_weights", + [ + (np.percentile, False), + (partial(np.percentile, method="inverted_cdf"), True), + ] + ) + def test_percentile_out(self, percentile, with_weights): + out_dtype = int if with_weights else float + x = np.array([1, 2, 3]) + y = np.zeros((3,), dtype=out_dtype) + p = (1, 2, 3) + weights = np.ones_like(x) if with_weights else None + r = percentile(x, p, out=y, weights=weights) + assert r is y + assert_equal(percentile(x, p, weights=weights), y) + + x = np.array([[1, 2, 3], + [4, 5, 6]]) + y = np.zeros((3, 3), dtype=out_dtype) + weights = np.ones_like(x) if with_weights else None + r = percentile(x, p, axis=0, out=y, weights=weights) + assert r is y + assert_equal(percentile(x, p, weights=weights, axis=0), y) + + y = np.zeros((3, 2), dtype=out_dtype) + percentile(x, p, axis=1, out=y, weights=weights) + assert_equal(percentile(x, p, weights=weights, axis=1), y) + + x = np.arange(12).reshape(3, 4) + # q.dim > 1, float + if with_weights: + r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) + else: + r0 = np.array([[2., 3., 4., 5.], [4., 5., 6., 7.]]) + out = np.empty((2, 4), dtype=out_dtype) + weights = np.ones_like(x) if with_weights else None + assert_equal( + percentile(x, (25, 50), axis=0, out=out, weights=weights), r0 + ) + assert_equal(out, r0) + r1 = np.array([[0.75, 4.75, 8.75], [1.5, 5.5, 9.5]]) + out = np.empty((2, 3)) + assert_equal(np.percentile(x, (25, 50), axis=1, out=out), r1) + assert_equal(out, r1) + + # q.dim > 1, int + r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) + out = np.empty((2, 4), dtype=x.dtype) + c = np.percentile(x, (25, 50), method='lower', axis=0, out=out) + assert_equal(c, r0) + assert_equal(out, r0) + r1 = np.array([[0, 4, 8], [1, 5, 9]]) + out = np.empty((2, 3), dtype=x.dtype) + c = np.percentile(x, (25, 50), method='lower', axis=1, out=out) + assert_equal(c, r1) + assert_equal(out, r1) + + def test_percentile_empty_dim(self): + # empty dims are preserved + d = np.arange(11 * 2).reshape(11, 1, 2, 1) + assert_array_equal(np.percentile(d, 50, axis=0).shape, (1, 2, 1)) + assert_array_equal(np.percentile(d, 50, axis=1).shape, (11, 2, 1)) + assert_array_equal(np.percentile(d, 50, axis=2).shape, (11, 1, 1)) + assert_array_equal(np.percentile(d, 50, axis=3).shape, (11, 1, 2)) + assert_array_equal(np.percentile(d, 50, axis=-1).shape, (11, 1, 2)) + assert_array_equal(np.percentile(d, 50, axis=-2).shape, (11, 1, 1)) + assert_array_equal(np.percentile(d, 50, axis=-3).shape, (11, 2, 1)) + assert_array_equal(np.percentile(d, 50, axis=-4).shape, (1, 2, 1)) + + assert_array_equal(np.percentile(d, 50, axis=2, + method='midpoint').shape, + (11, 1, 1)) + assert_array_equal(np.percentile(d, 50, axis=-2, + method='midpoint').shape, + (11, 1, 1)) + + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=0)).shape, + (2, 1, 2, 1)) + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=1)).shape, + (2, 11, 2, 1)) + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=2)).shape, + (2, 11, 1, 1)) + assert_array_equal(np.array(np.percentile(d, [10, 50], axis=3)).shape, + (2, 11, 1, 2)) + + def test_percentile_no_overwrite(self): + a = np.array([2, 3, 4, 1]) + np.percentile(a, [50], overwrite_input=False) + assert_equal(a, np.array([2, 3, 4, 1])) + + a = np.array([2, 3, 4, 1]) + np.percentile(a, [50]) + assert_equal(a, np.array([2, 3, 4, 1])) + + def test_no_p_overwrite(self): + p = np.linspace(0., 100., num=5) + np.percentile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, np.linspace(0., 100., num=5)) + p = np.linspace(0., 100., num=5).tolist() + np.percentile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, np.linspace(0., 100., num=5).tolist()) + + def test_percentile_overwrite(self): + a = np.array([2, 3, 4, 1]) + b = np.percentile(a, [50], overwrite_input=True) + assert_equal(b, np.array([2.5])) + + b = np.percentile([2, 3, 4, 1], [50], overwrite_input=True) + assert_equal(b, np.array([2.5])) + + def test_extended_axis(self): + o = np.random.normal(size=(71, 23)) + x = np.dstack([o] * 10) + assert_equal(np.percentile(x, 30, axis=(0, 1)), np.percentile(o, 30)) + x = np.moveaxis(x, -1, 0) + assert_equal(np.percentile(x, 30, axis=(-2, -1)), np.percentile(o, 30)) + x = x.swapaxes(0, 1).copy() + assert_equal(np.percentile(x, 30, axis=(0, -1)), np.percentile(o, 30)) + x = x.swapaxes(0, 1).copy() + + assert_equal(np.percentile(x, [25, 60], axis=(0, 1, 2)), + np.percentile(x, [25, 60], axis=None)) + assert_equal(np.percentile(x, [25, 60], axis=(0,)), + np.percentile(x, [25, 60], axis=0)) + + d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11)) + np.random.shuffle(d.ravel()) + assert_equal(np.percentile(d, 25, axis=(0, 1, 2))[0], + np.percentile(d[:, :, :, 0].flatten(), 25)) + assert_equal(np.percentile(d, [10, 90], axis=(0, 1, 3))[:, 1], + np.percentile(d[:, :, 1, :].flatten(), [10, 90])) + assert_equal(np.percentile(d, 25, axis=(3, 1, -4))[2], + np.percentile(d[:, :, 2, :].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(3, 1, 2))[2], + np.percentile(d[2, :, :, :].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(3, 2))[2, 1], + np.percentile(d[2, 1, :, :].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(1, -2))[2, 1], + np.percentile(d[2, :, :, 1].flatten(), 25)) + assert_equal(np.percentile(d, 25, axis=(1, 3))[2, 2], + np.percentile(d[2, :, 2, :].flatten(), 25)) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(AxisError, np.percentile, d, axis=-5, q=25) + assert_raises(AxisError, np.percentile, d, axis=(0, -5), q=25) + assert_raises(AxisError, np.percentile, d, axis=4, q=25) + assert_raises(AxisError, np.percentile, d, axis=(0, 4), q=25) + # each of these refers to the same axis twice + assert_raises(ValueError, np.percentile, d, axis=(1, 1), q=25) + assert_raises(ValueError, np.percentile, d, axis=(-1, -1), q=25) + assert_raises(ValueError, np.percentile, d, axis=(3, -1), q=25) + + def test_keepdims(self): + d = np.ones((3, 5, 7, 11)) + assert_equal(np.percentile(d, 7, axis=None, keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.percentile(d, 7, axis=(0, 1), keepdims=True).shape, + (1, 1, 7, 11)) + assert_equal(np.percentile(d, 7, axis=(0, 3), keepdims=True).shape, + (1, 5, 7, 1)) + assert_equal(np.percentile(d, 7, axis=(1,), keepdims=True).shape, + (3, 1, 7, 11)) + assert_equal(np.percentile(d, 7, (0, 1, 2, 3), keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.percentile(d, 7, axis=(0, 1, 3), keepdims=True).shape, + (1, 1, 7, 1)) + + assert_equal(np.percentile(d, [1, 7], axis=(0, 1, 3), + keepdims=True).shape, (2, 1, 1, 7, 1)) + assert_equal(np.percentile(d, [1, 7], axis=(0, 3), + keepdims=True).shape, (2, 1, 5, 7, 1)) + + @pytest.mark.parametrize('q', [7, [1, 7]]) + @pytest.mark.parametrize( + argnames='axis', + argvalues=[ + None, + 1, + (1,), + (0, 1), + (-3, -1), + ] + ) + def test_keepdims_out(self, q, axis): + d = np.ones((3, 5, 7, 11)) + if axis is None: + shape_out = (1,) * d.ndim + else: + axis_norm = normalize_axis_tuple(axis, d.ndim) + shape_out = tuple( + 1 if i in axis_norm else d.shape[i] for i in range(d.ndim)) + shape_out = np.shape(q) + shape_out + + out = np.empty(shape_out) + result = np.percentile(d, q, axis=axis, keepdims=True, out=out) + assert result is out + assert_equal(result.shape, shape_out) + + def test_out(self): + o = np.zeros((4,)) + d = np.ones((3, 4)) + assert_equal(np.percentile(d, 0, 0, out=o), o) + assert_equal(np.percentile(d, 0, 0, method='nearest', out=o), o) + o = np.zeros((3,)) + assert_equal(np.percentile(d, 1, 1, out=o), o) + assert_equal(np.percentile(d, 1, 1, method='nearest', out=o), o) + + o = np.zeros(()) + assert_equal(np.percentile(d, 2, out=o), o) + assert_equal(np.percentile(d, 2, method='nearest', out=o), o) + + @pytest.mark.parametrize("method, weighted", [ + ("linear", False), + ("nearest", False), + ("inverted_cdf", False), + ("inverted_cdf", True), + ]) + def test_out_nan(self, method, weighted): + if weighted: + kwargs = {"weights": np.ones((3, 4)), "method": method} + else: + kwargs = {"method": method} + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', '', RuntimeWarning) + o = np.zeros((4,)) + d = np.ones((3, 4)) + d[2, 1] = np.nan + assert_equal(np.percentile(d, 0, 0, out=o, **kwargs), o) + + o = np.zeros((3,)) + assert_equal(np.percentile(d, 1, 1, out=o, **kwargs), o) + + o = np.zeros(()) + assert_equal(np.percentile(d, 1, out=o, **kwargs), o) + + def test_nan_behavior(self): + a = np.arange(24, dtype=float) + a[2] = np.nan + assert_equal(np.percentile(a, 0.3), np.nan) + assert_equal(np.percentile(a, 0.3, axis=0), np.nan) + assert_equal(np.percentile(a, [0.3, 0.6], axis=0), + np.array([np.nan] * 2)) + + a = np.arange(24, dtype=float).reshape(2, 3, 4) + a[1, 2, 3] = np.nan + a[1, 1, 2] = np.nan + + # no axis + assert_equal(np.percentile(a, 0.3), np.nan) + assert_equal(np.percentile(a, 0.3).ndim, 0) + + # axis0 zerod + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 0) + b[2, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.percentile(a, 0.3, 0), b) + + # axis0 not zerod + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), + [0.3, 0.6], 0) + b[:, 2, 3] = np.nan + b[:, 1, 2] = np.nan + assert_equal(np.percentile(a, [0.3, 0.6], 0), b) + + # axis1 zerod + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 1) + b[1, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.percentile(a, 0.3, 1), b) + # axis1 not zerod + b = np.percentile( + np.arange(24, dtype=float).reshape(2, 3, 4), [0.3, 0.6], 1) + b[:, 1, 3] = np.nan + b[:, 1, 2] = np.nan + assert_equal(np.percentile(a, [0.3, 0.6], 1), b) + + # axis02 zerod + b = np.percentile( + np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, (0, 2)) + b[1] = np.nan + b[2] = np.nan + assert_equal(np.percentile(a, 0.3, (0, 2)), b) + # axis02 not zerod + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), + [0.3, 0.6], (0, 2)) + b[:, 1] = np.nan + b[:, 2] = np.nan + assert_equal(np.percentile(a, [0.3, 0.6], (0, 2)), b) + # axis02 not zerod with method='nearest' + b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), + [0.3, 0.6], (0, 2), method='nearest') + b[:, 1] = np.nan + b[:, 2] = np.nan + assert_equal(np.percentile( + a, [0.3, 0.6], (0, 2), method='nearest'), b) + + def test_nan_q(self): + # GH18830 + with pytest.raises(ValueError, match="Percentiles must be in"): + np.percentile([1, 2, 3, 4.0], np.nan) + with pytest.raises(ValueError, match="Percentiles must be in"): + np.percentile([1, 2, 3, 4.0], [np.nan]) + q = np.linspace(1.0, 99.0, 16) + q[0] = np.nan + with pytest.raises(ValueError, match="Percentiles must be in"): + np.percentile([1, 2, 3, 4.0], q) + + @pytest.mark.parametrize("dtype", ["m8[D]", "M8[s]"]) + @pytest.mark.parametrize("pos", [0, 23, 10]) + def test_nat_basic(self, dtype, pos): + # TODO: Note that times have dubious rounding as of fixing NaTs! + # NaT and NaN should behave the same, do basic tests for NaT: + a = np.arange(0, 24, dtype=dtype) + a[pos] = "NaT" + res = np.percentile(a, 30) + assert res.dtype == dtype + assert np.isnat(res) + res = np.percentile(a, [30, 60]) + assert res.dtype == dtype + assert np.isnat(res).all() + + a = np.arange(0, 24 * 3, dtype=dtype).reshape(-1, 3) + a[pos, 1] = "NaT" + res = np.percentile(a, 30, axis=0) + assert_array_equal(np.isnat(res), [False, True, False]) + + +quantile_methods = [ + 'inverted_cdf', 'averaged_inverted_cdf', 'closest_observation', + 'interpolated_inverted_cdf', 'hazen', 'weibull', 'linear', + 'median_unbiased', 'normal_unbiased', 'nearest', 'lower', 'higher', + 'midpoint'] + + +methods_supporting_weights = ["inverted_cdf"] + + +class TestQuantile: + # most of this is already tested by TestPercentile + + def V(self, x, y, alpha): + # Identification function used in several tests. + return (x >= y) - alpha + + def test_max_ulp(self): + x = [0.0, 0.2, 0.4] + a = np.quantile(x, 0.45) + # The default linear method would result in 0 + 0.2 * (0.45/2) = 0.18. + # 0.18 is not exactly representable and the formula leads to a 1 ULP + # different result. Ensure it is this exact within 1 ULP, see gh-20331. + np.testing.assert_array_max_ulp(a, 0.18, maxulp=1) + + def test_basic(self): + x = np.arange(8) * 0.5 + assert_equal(np.quantile(x, 0), 0.) + assert_equal(np.quantile(x, 1), 3.5) + assert_equal(np.quantile(x, 0.5), 1.75) + + def test_correct_quantile_value(self): + a = np.array([True]) + tf_quant = np.quantile(True, False) + assert_equal(tf_quant, a[0]) + assert_equal(type(tf_quant), a.dtype) + a = np.array([False, True, True]) + quant_res = np.quantile(a, a) + assert_array_equal(quant_res, a) + assert_equal(quant_res.dtype, a.dtype) + + def test_fraction(self): + # fractional input, integral quantile + x = [Fraction(i, 2) for i in range(8)] + q = np.quantile(x, 0) + assert_equal(q, 0) + assert_equal(type(q), Fraction) + + q = np.quantile(x, 1) + assert_equal(q, Fraction(7, 2)) + assert_equal(type(q), Fraction) + + q = np.quantile(x, .5) + assert_equal(q, 1.75) + assert_equal(type(q), np.float64) + + q = np.quantile(x, Fraction(1, 2)) + assert_equal(q, Fraction(7, 4)) + assert_equal(type(q), Fraction) + + q = np.quantile(x, [Fraction(1, 2)]) + assert_equal(q, np.array([Fraction(7, 4)])) + assert_equal(type(q), np.ndarray) + + q = np.quantile(x, [[Fraction(1, 2)]]) + assert_equal(q, np.array([[Fraction(7, 4)]])) + assert_equal(type(q), np.ndarray) + + # repeat with integral input but fractional quantile + x = np.arange(8) + assert_equal(np.quantile(x, Fraction(1, 2)), Fraction(7, 2)) + + def test_complex(self): + # gh-22652 + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='G') + assert_raises(TypeError, np.quantile, arr_c, 0.5) + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='D') + assert_raises(TypeError, np.quantile, arr_c, 0.5) + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='F') + assert_raises(TypeError, np.quantile, arr_c, 0.5) + + def test_no_p_overwrite(self): + # this is worth retesting, because quantile does not make a copy + p0 = np.array([0, 0.75, 0.25, 0.5, 1.0]) + p = p0.copy() + np.quantile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, p0) + + p0 = p0.tolist() + p = p.tolist() + np.quantile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, p0) + + @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) + def test_quantile_preserve_int_type(self, dtype): + res = np.quantile(np.array([1, 2], dtype=dtype), [0.5], + method="nearest") + assert res.dtype == dtype + + @pytest.mark.parametrize("method", quantile_methods) + def test_q_zero_one(self, method): + # gh-24710 + arr = [10, 11, 12] + quantile = np.quantile(arr, q=[0, 1], method=method) + assert_equal(quantile, np.array([10, 12])) + + @pytest.mark.parametrize("method", quantile_methods) + def test_quantile_monotonic(self, method): + # GH 14685 + # test that the return value of quantile is monotonic if p0 is ordered + # Also tests that the boundary values are not mishandled. + p0 = np.linspace(0, 1, 101) + quantile = np.quantile(np.array([0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 1, 1, 9, 9, 9, + 8, 8, 7]) * 0.1, p0, method=method) + assert_equal(np.sort(quantile), quantile) + + # Also test one where the number of data points is clearly divisible: + quantile = np.quantile([0., 1., 2., 3.], p0, method=method) + assert_equal(np.sort(quantile), quantile) + + @hypothesis.given( + arr=arrays(dtype=np.float64, + shape=st.integers(min_value=3, max_value=1000), + elements=st.floats(allow_infinity=False, allow_nan=False, + min_value=-1e300, max_value=1e300))) + def test_quantile_monotonic_hypo(self, arr): + p0 = np.arange(0, 1, 0.01) + quantile = np.quantile(arr, p0) + assert_equal(np.sort(quantile), quantile) + + def test_quantile_scalar_nan(self): + a = np.array([[10., 7., 4.], [3., 2., 1.]]) + a[0][1] = np.nan + actual = np.quantile(a, 0.5) + assert np.isscalar(actual) + assert_equal(np.quantile(a, 0.5), np.nan) + + @pytest.mark.parametrize("weights", [False, True]) + @pytest.mark.parametrize("method", quantile_methods) + @pytest.mark.parametrize("alpha", [0.2, 0.5, 0.9]) + def test_quantile_identification_equation(self, weights, method, alpha): + # Test that the identification equation holds for the empirical + # CDF: + # E[V(x, Y)] = 0 <=> x is quantile + # with Y the random variable for which we have observed values and + # V(x, y) the canonical identification function for the quantile (at + # level alpha), see + # https://doi.org/10.48550/arXiv.0912.0902 + if weights and method not in methods_supporting_weights: + pytest.skip("Weights not supported by method.") + rng = np.random.default_rng(4321) + # We choose n and alpha such that we cover 3 cases: + # - n * alpha is an integer + # - n * alpha is a float that gets rounded down + # - n * alpha is a float that gest rounded up + n = 102 # n * alpha = 20.4, 51. , 91.8 + y = rng.random(n) + w = rng.integers(low=0, high=10, size=n) if weights else None + x = np.quantile(y, alpha, method=method, weights=w) + + if method in ("higher",): + # These methods do not fulfill the identification equation. + assert np.abs(np.mean(self.V(x, y, alpha))) > 0.1 / n + elif int(n * alpha) == n * alpha and not weights: + # We can expect exact results, up to machine precision. + assert_allclose( + np.average(self.V(x, y, alpha), weights=w), 0, atol=1e-14, + ) + else: + # V = (x >= y) - alpha cannot sum to zero exactly but within + # "sample precision". + assert_allclose(np.average(self.V(x, y, alpha), weights=w), 0, + atol=1 / n / np.amin([alpha, 1 - alpha])) + + @pytest.mark.parametrize("weights", [False, True]) + @pytest.mark.parametrize("method", quantile_methods) + @pytest.mark.parametrize("alpha", [0.2, 0.5, 0.9]) + def test_quantile_add_and_multiply_constant(self, weights, method, alpha): + # Test that + # 1. quantile(c + x) = c + quantile(x) + # 2. quantile(c * x) = c * quantile(x) + # 3. quantile(-x) = -quantile(x, 1 - alpha) + # On empirical quantiles, this equation does not hold exactly. + # Koenker (2005) "Quantile Regression" Chapter 2.2.3 calls these + # properties equivariance. + if weights and method not in methods_supporting_weights: + pytest.skip("Weights not supported by method.") + rng = np.random.default_rng(4321) + # We choose n and alpha such that we have cases for + # - n * alpha is an integer + # - n * alpha is a float that gets rounded down + # - n * alpha is a float that gest rounded up + n = 102 # n * alpha = 20.4, 51. , 91.8 + y = rng.random(n) + w = rng.integers(low=0, high=10, size=n) if weights else None + q = np.quantile(y, alpha, method=method, weights=w) + c = 13.5 + + # 1 + assert_allclose(np.quantile(c + y, alpha, method=method, weights=w), + c + q) + # 2 + assert_allclose(np.quantile(c * y, alpha, method=method, weights=w), + c * q) + # 3 + if weights: + # From here on, we would need more methods to support weights. + return + q = -np.quantile(-y, 1 - alpha, method=method) + if method == "inverted_cdf": + if ( + n * alpha == int(n * alpha) + or np.round(n * alpha) == int(n * alpha) + 1 + ): + assert_allclose(q, np.quantile(y, alpha, method="higher")) + else: + assert_allclose(q, np.quantile(y, alpha, method="lower")) + elif method == "closest_observation": + if n * alpha == int(n * alpha): + assert_allclose(q, np.quantile(y, alpha, method="higher")) + elif np.round(n * alpha) == int(n * alpha) + 1: + assert_allclose( + q, np.quantile(y, alpha + 1 / n, method="higher")) + else: + assert_allclose(q, np.quantile(y, alpha, method="lower")) + elif method == "interpolated_inverted_cdf": + assert_allclose(q, np.quantile(y, alpha + 1 / n, method=method)) + elif method == "nearest": + if n * alpha == int(n * alpha): + assert_allclose(q, np.quantile(y, alpha + 1 / n, method=method)) + else: + assert_allclose(q, np.quantile(y, alpha, method=method)) + elif method == "lower": + assert_allclose(q, np.quantile(y, alpha, method="higher")) + elif method == "higher": + assert_allclose(q, np.quantile(y, alpha, method="lower")) + else: + # "averaged_inverted_cdf", "hazen", "weibull", "linear", + # "median_unbiased", "normal_unbiased", "midpoint" + assert_allclose(q, np.quantile(y, alpha, method=method)) + + @pytest.mark.parametrize("method", methods_supporting_weights) + @pytest.mark.parametrize("alpha", [0.2, 0.5, 0.9]) + def test_quantile_constant_weights(self, method, alpha): + rng = np.random.default_rng(4321) + # We choose n and alpha such that we have cases for + # - n * alpha is an integer + # - n * alpha is a float that gets rounded down + # - n * alpha is a float that gest rounded up + n = 102 # n * alpha = 20.4, 51. , 91.8 + y = rng.random(n) + q = np.quantile(y, alpha, method=method) + + w = np.ones_like(y) + qw = np.quantile(y, alpha, method=method, weights=w) + assert_allclose(qw, q) + + w = 8.125 * np.ones_like(y) + qw = np.quantile(y, alpha, method=method, weights=w) + assert_allclose(qw, q) + + @pytest.mark.parametrize("method", methods_supporting_weights) + @pytest.mark.parametrize("alpha", [0, 0.2, 0.5, 0.9, 1]) + def test_quantile_with_integer_weights(self, method, alpha): + # Integer weights can be interpreted as repeated observations. + rng = np.random.default_rng(4321) + # We choose n and alpha such that we have cases for + # - n * alpha is an integer + # - n * alpha is a float that gets rounded down + # - n * alpha is a float that gest rounded up + n = 102 # n * alpha = 20.4, 51. , 91.8 + y = rng.random(n) + w = rng.integers(low=0, high=10, size=n, dtype=np.int32) + + qw = np.quantile(y, alpha, method=method, weights=w) + q = np.quantile(np.repeat(y, w), alpha, method=method) + assert_allclose(qw, q) + + @pytest.mark.parametrize("method", methods_supporting_weights) + def test_quantile_with_weights_and_axis(self, method): + rng = np.random.default_rng(4321) + + # 1d weight and single alpha + y = rng.random((2, 10, 3)) + w = np.abs(rng.random(10)) + alpha = 0.5 + q = np.quantile(y, alpha, weights=w, method=method, axis=1) + q_res = np.zeros(shape=(2, 3)) + for i in range(2): + for j in range(3): + q_res[i, j] = np.quantile( + y[i, :, j], alpha, method=method, weights=w + ) + assert_allclose(q, q_res) + + # 1d weight and 1d alpha + alpha = [0, 0.2, 0.4, 0.6, 0.8, 1] # shape (6,) + q = np.quantile(y, alpha, weights=w, method=method, axis=1) + q_res = np.zeros(shape=(6, 2, 3)) + for i in range(2): + for j in range(3): + q_res[:, i, j] = np.quantile( + y[i, :, j], alpha, method=method, weights=w + ) + assert_allclose(q, q_res) + + # 1d weight and 2d alpha + alpha = [[0, 0.2], [0.4, 0.6], [0.8, 1]] # shape (3, 2) + q = np.quantile(y, alpha, weights=w, method=method, axis=1) + q_res = q_res.reshape((3, 2, 2, 3)) + assert_allclose(q, q_res) + + # shape of weights equals shape of y + w = np.abs(rng.random((2, 10, 3))) + alpha = 0.5 + q = np.quantile(y, alpha, weights=w, method=method, axis=1) + q_res = np.zeros(shape=(2, 3)) + for i in range(2): + for j in range(3): + q_res[i, j] = np.quantile( + y[i, :, j], alpha, method=method, weights=w[i, :, j] + ) + assert_allclose(q, q_res) + + @pytest.mark.parametrize("method", methods_supporting_weights) + def test_quantile_weights_min_max(self, method): + # Test weighted quantile at 0 and 1 with leading and trailing zero + # weights. + w = [0, 0, 1, 2, 3, 0] + y = np.arange(6) + y_min = np.quantile(y, 0, weights=w, method="inverted_cdf") + y_max = np.quantile(y, 1, weights=w, method="inverted_cdf") + assert y_min == y[2] # == 2 + assert y_max == y[4] # == 4 + + def test_quantile_weights_raises_negative_weights(self): + y = [1, 2] + w = [-0.5, 1] + with pytest.raises(ValueError, match="Weights must be non-negative"): + np.quantile(y, 0.5, weights=w, method="inverted_cdf") + + @pytest.mark.parametrize( + "method", + sorted(set(quantile_methods) - set(methods_supporting_weights)), + ) + def test_quantile_weights_raises_unsupported_methods(self, method): + y = [1, 2] + w = [0.5, 1] + msg = "Only method 'inverted_cdf' supports weights" + with pytest.raises(ValueError, match=msg): + np.quantile(y, 0.5, weights=w, method=method) + + def test_weibull_fraction(self): + arr = [Fraction(0, 1), Fraction(1, 10)] + quantile = np.quantile(arr, [0, ], method='weibull') + assert_equal(quantile, np.array(Fraction(0, 1))) + quantile = np.quantile(arr, [Fraction(1, 2)], method='weibull') + assert_equal(quantile, np.array(Fraction(1, 20))) + + def test_closest_observation(self): + # Round ties to nearest even order statistic (see #26656) + m = 'closest_observation' + q = 0.5 + arr = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + assert_equal(2, np.quantile(arr[0:3], q, method=m)) + assert_equal(2, np.quantile(arr[0:4], q, method=m)) + assert_equal(2, np.quantile(arr[0:5], q, method=m)) + assert_equal(3, np.quantile(arr[0:6], q, method=m)) + assert_equal(4, np.quantile(arr[0:7], q, method=m)) + assert_equal(4, np.quantile(arr[0:8], q, method=m)) + assert_equal(4, np.quantile(arr[0:9], q, method=m)) + assert_equal(5, np.quantile(arr, q, method=m)) + + +class TestLerp: + @hypothesis.given(t0=st.floats(allow_nan=False, allow_infinity=False, + min_value=0, max_value=1), + t1=st.floats(allow_nan=False, allow_infinity=False, + min_value=0, max_value=1), + a=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300), + b=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300)) + def test_linear_interpolation_formula_monotonic(self, t0, t1, a, b): + l0 = nfb._lerp(a, b, t0) + l1 = nfb._lerp(a, b, t1) + if t0 == t1 or a == b: + assert l0 == l1 # uninteresting + elif (t0 < t1) == (a < b): + assert l0 <= l1 + else: + assert l0 >= l1 + + @hypothesis.given(t=st.floats(allow_nan=False, allow_infinity=False, + min_value=0, max_value=1), + a=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300), + b=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300)) + def test_linear_interpolation_formula_bounded(self, t, a, b): + if a <= b: + assert a <= nfb._lerp(a, b, t) <= b + else: + assert b <= nfb._lerp(a, b, t) <= a + + @hypothesis.given(t=st.floats(allow_nan=False, allow_infinity=False, + min_value=0, max_value=1), + a=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300), + b=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300)) + def test_linear_interpolation_formula_symmetric(self, t, a, b): + # double subtraction is needed to remove the extra precision of t < 0.5 + left = nfb._lerp(a, b, 1 - (1 - t)) + right = nfb._lerp(b, a, 1 - t) + assert_allclose(left, right) + + def test_linear_interpolation_formula_0d_inputs(self): + a = np.array(2) + b = np.array(5) + t = np.array(0.2) + assert nfb._lerp(a, b, t) == 2.6 + + +class TestMedian: + + def test_basic(self): + a0 = np.array(1) + a1 = np.arange(2) + a2 = np.arange(6).reshape(2, 3) + assert_equal(np.median(a0), 1) + assert_allclose(np.median(a1), 0.5) + assert_allclose(np.median(a2), 2.5) + assert_allclose(np.median(a2, axis=0), [1.5, 2.5, 3.5]) + assert_equal(np.median(a2, axis=1), [1, 4]) + assert_allclose(np.median(a2, axis=None), 2.5) + + a = np.array([0.0444502, 0.0463301, 0.141249, 0.0606775]) + assert_almost_equal((a[1] + a[3]) / 2., np.median(a)) + a = np.array([0.0463301, 0.0444502, 0.141249]) + assert_equal(a[0], np.median(a)) + a = np.array([0.0444502, 0.141249, 0.0463301]) + assert_equal(a[-1], np.median(a)) + # check array scalar result + assert_equal(np.median(a).ndim, 0) + a[1] = np.nan + assert_equal(np.median(a).ndim, 0) + + def test_axis_keyword(self): + a3 = np.array([[2, 3], + [0, 1], + [6, 7], + [4, 5]]) + for a in [a3, np.random.randint(0, 100, size=(2, 3, 4))]: + orig = a.copy() + np.median(a, axis=None) + for ax in range(a.ndim): + np.median(a, axis=ax) + assert_array_equal(a, orig) + + assert_allclose(np.median(a3, axis=0), [3, 4]) + assert_allclose(np.median(a3.T, axis=1), [3, 4]) + assert_allclose(np.median(a3), 3.5) + assert_allclose(np.median(a3, axis=None), 3.5) + assert_allclose(np.median(a3.T), 3.5) + + def test_overwrite_keyword(self): + a3 = np.array([[2, 3], + [0, 1], + [6, 7], + [4, 5]]) + a0 = np.array(1) + a1 = np.arange(2) + a2 = np.arange(6).reshape(2, 3) + assert_allclose(np.median(a0.copy(), overwrite_input=True), 1) + assert_allclose(np.median(a1.copy(), overwrite_input=True), 0.5) + assert_allclose(np.median(a2.copy(), overwrite_input=True), 2.5) + assert_allclose( + np.median(a2.copy(), overwrite_input=True, axis=0), [1.5, 2.5, 3.5]) + assert_allclose( + np.median(a2.copy(), overwrite_input=True, axis=1), [1, 4]) + assert_allclose( + np.median(a2.copy(), overwrite_input=True, axis=None), 2.5) + assert_allclose( + np.median(a3.copy(), overwrite_input=True, axis=0), [3, 4]) + assert_allclose( + np.median(a3.T.copy(), overwrite_input=True, axis=1), [3, 4]) + + a4 = np.arange(3 * 4 * 5, dtype=np.float32).reshape((3, 4, 5)) + np.random.shuffle(a4.ravel()) + assert_allclose(np.median(a4, axis=None), + np.median(a4.copy(), axis=None, overwrite_input=True)) + assert_allclose(np.median(a4, axis=0), + np.median(a4.copy(), axis=0, overwrite_input=True)) + assert_allclose(np.median(a4, axis=1), + np.median(a4.copy(), axis=1, overwrite_input=True)) + assert_allclose(np.median(a4, axis=2), + np.median(a4.copy(), axis=2, overwrite_input=True)) + + def test_array_like(self): + x = [1, 2, 3] + assert_almost_equal(np.median(x), 2) + x2 = [x] + assert_almost_equal(np.median(x2), 2) + assert_allclose(np.median(x2, axis=0), x) + + def test_subclass(self): + # gh-3846 + class MySubClass(np.ndarray): + + def __new__(cls, input_array, info=None): + obj = np.asarray(input_array).view(cls) + obj.info = info + return obj + + def mean(self, axis=None, dtype=None, out=None): + return -7 + + a = MySubClass([1, 2, 3]) + assert_equal(np.median(a), -7) + + @pytest.mark.parametrize('arr', + ([1., 2., 3.], [1., np.nan, 3.], np.nan, 0.)) + def test_subclass2(self, arr): + """Check that we return subclasses, even if a NaN scalar.""" + class MySubclass(np.ndarray): + pass + + m = np.median(np.array(arr).view(MySubclass)) + assert isinstance(m, MySubclass) + + def test_out(self): + o = np.zeros((4,)) + d = np.ones((3, 4)) + assert_equal(np.median(d, 0, out=o), o) + o = np.zeros((3,)) + assert_equal(np.median(d, 1, out=o), o) + o = np.zeros(()) + assert_equal(np.median(d, out=o), o) + + def test_out_nan(self): + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', '', RuntimeWarning) + o = np.zeros((4,)) + d = np.ones((3, 4)) + d[2, 1] = np.nan + assert_equal(np.median(d, 0, out=o), o) + o = np.zeros((3,)) + assert_equal(np.median(d, 1, out=o), o) + o = np.zeros(()) + assert_equal(np.median(d, out=o), o) + + def test_nan_behavior(self): + a = np.arange(24, dtype=float) + a[2] = np.nan + assert_equal(np.median(a), np.nan) + assert_equal(np.median(a, axis=0), np.nan) + + a = np.arange(24, dtype=float).reshape(2, 3, 4) + a[1, 2, 3] = np.nan + a[1, 1, 2] = np.nan + + # no axis + assert_equal(np.median(a), np.nan) + assert_equal(np.median(a).ndim, 0) + + # axis0 + b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 0) + b[2, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.median(a, 0), b) + + # axis1 + b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 1) + b[1, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.median(a, 1), b) + + # axis02 + b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), (0, 2)) + b[1] = np.nan + b[2] = np.nan + assert_equal(np.median(a, (0, 2)), b) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work correctly") + def test_empty(self): + # mean(empty array) emits two warnings: empty slice and divide by 0 + a = np.array([], dtype=float) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a), np.nan) + assert_(w[0].category is RuntimeWarning) + assert_equal(len(w), 2) + + # multiple dimensions + a = np.array([], dtype=float, ndmin=3) + # no axis + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a), np.nan) + assert_(w[0].category is RuntimeWarning) + + # axis 0 and 1 + b = np.array([], dtype=float, ndmin=2) + assert_equal(np.median(a, axis=0), b) + assert_equal(np.median(a, axis=1), b) + + # axis 2 + b = np.array(np.nan, dtype=float, ndmin=2) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.median(a, axis=2), b) + assert_(w[0].category is RuntimeWarning) + + def test_object(self): + o = np.arange(7.) + assert_(type(np.median(o.astype(object))), float) + o[2] = np.nan + assert_(type(np.median(o.astype(object))), float) + + def test_extended_axis(self): + o = np.random.normal(size=(71, 23)) + x = np.dstack([o] * 10) + assert_equal(np.median(x, axis=(0, 1)), np.median(o)) + x = np.moveaxis(x, -1, 0) + assert_equal(np.median(x, axis=(-2, -1)), np.median(o)) + x = x.swapaxes(0, 1).copy() + assert_equal(np.median(x, axis=(0, -1)), np.median(o)) + + assert_equal(np.median(x, axis=(0, 1, 2)), np.median(x, axis=None)) + assert_equal(np.median(x, axis=(0, )), np.median(x, axis=0)) + assert_equal(np.median(x, axis=(-1, )), np.median(x, axis=-1)) + + d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11)) + np.random.shuffle(d.ravel()) + assert_equal(np.median(d, axis=(0, 1, 2))[0], + np.median(d[:, :, :, 0].flatten())) + assert_equal(np.median(d, axis=(0, 1, 3))[1], + np.median(d[:, :, 1, :].flatten())) + assert_equal(np.median(d, axis=(3, 1, -4))[2], + np.median(d[:, :, 2, :].flatten())) + assert_equal(np.median(d, axis=(3, 1, 2))[2], + np.median(d[2, :, :, :].flatten())) + assert_equal(np.median(d, axis=(3, 2))[2, 1], + np.median(d[2, 1, :, :].flatten())) + assert_equal(np.median(d, axis=(1, -2))[2, 1], + np.median(d[2, :, :, 1].flatten())) + assert_equal(np.median(d, axis=(1, 3))[2, 2], + np.median(d[2, :, 2, :].flatten())) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(AxisError, np.median, d, axis=-5) + assert_raises(AxisError, np.median, d, axis=(0, -5)) + assert_raises(AxisError, np.median, d, axis=4) + assert_raises(AxisError, np.median, d, axis=(0, 4)) + assert_raises(ValueError, np.median, d, axis=(1, 1)) + + def test_keepdims(self): + d = np.ones((3, 5, 7, 11)) + assert_equal(np.median(d, axis=None, keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.median(d, axis=(0, 1), keepdims=True).shape, + (1, 1, 7, 11)) + assert_equal(np.median(d, axis=(0, 3), keepdims=True).shape, + (1, 5, 7, 1)) + assert_equal(np.median(d, axis=(1,), keepdims=True).shape, + (3, 1, 7, 11)) + assert_equal(np.median(d, axis=(0, 1, 2, 3), keepdims=True).shape, + (1, 1, 1, 1)) + assert_equal(np.median(d, axis=(0, 1, 3), keepdims=True).shape, + (1, 1, 7, 1)) + + @pytest.mark.parametrize( + argnames='axis', + argvalues=[ + None, + 1, + (1, ), + (0, 1), + (-3, -1), + ] + ) + def test_keepdims_out(self, axis): + d = np.ones((3, 5, 7, 11)) + if axis is None: + shape_out = (1,) * d.ndim + else: + axis_norm = normalize_axis_tuple(axis, d.ndim) + shape_out = tuple( + 1 if i in axis_norm else d.shape[i] for i in range(d.ndim)) + out = np.empty(shape_out) + result = np.median(d, axis=axis, keepdims=True, out=out) + assert result is out + assert_equal(result.shape, shape_out) + + @pytest.mark.parametrize("dtype", ["m8[s]"]) + @pytest.mark.parametrize("pos", [0, 23, 10]) + def test_nat_behavior(self, dtype, pos): + # TODO: Median does not support Datetime, due to `mean`. + # NaT and NaN should behave the same, do basic tests for NaT. + a = np.arange(0, 24, dtype=dtype) + a[pos] = "NaT" + res = np.median(a) + assert res.dtype == dtype + assert np.isnat(res) + res = np.percentile(a, [30, 60]) + assert res.dtype == dtype + assert np.isnat(res).all() + + a = np.arange(0, 24 * 3, dtype=dtype).reshape(-1, 3) + a[pos, 1] = "NaT" + res = np.median(a, axis=0) + assert_array_equal(np.isnat(res), [False, True, False]) + + +class TestSortComplex: + + @pytest.mark.parametrize("type_in, type_out", [ + ('l', 'D'), + ('h', 'F'), + ('H', 'F'), + ('b', 'F'), + ('B', 'F'), + ('g', 'G'), + ]) + def test_sort_real(self, type_in, type_out): + # sort_complex() type casting for real input types + a = np.array([5, 3, 6, 2, 1], dtype=type_in) + actual = np.sort_complex(a) + expected = np.sort(a).astype(type_out) + assert_equal(actual, expected) + assert_equal(actual.dtype, expected.dtype) + + def test_sort_complex(self): + # sort_complex() handling of complex input + a = np.array([2 + 3j, 1 - 2j, 1 - 3j, 2 + 1j], dtype='D') + expected = np.array([1 - 3j, 1 - 2j, 2 + 1j, 2 + 3j], dtype='D') + actual = np.sort_complex(a) + assert_equal(actual, expected) + assert_equal(actual.dtype, expected.dtype) diff --git a/python/numpy/lib/tests/test_histograms.py b/python/numpy/lib/tests/test_histograms.py new file mode 100644 index 000000000..b7752d1a8 --- /dev/null +++ b/python/numpy/lib/tests/test_histograms.py @@ -0,0 +1,855 @@ +import pytest + +import numpy as np +from numpy import histogram, histogram_bin_edges, histogramdd +from numpy.testing import ( + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_array_max_ulp, + assert_equal, + assert_raises, + assert_raises_regex, + suppress_warnings, +) + + +class TestHistogram: + + def setup_method(self): + pass + + def teardown_method(self): + pass + + def test_simple(self): + n = 100 + v = np.random.rand(n) + (a, b) = histogram(v) + # check if the sum of the bins equals the number of samples + assert_equal(np.sum(a, axis=0), n) + # check that the bin counts are evenly spaced when the data is from + # a linear function + (a, b) = histogram(np.linspace(0, 10, 100)) + assert_array_equal(a, 10) + + def test_one_bin(self): + # Ticket 632 + hist, edges = histogram([1, 2, 3, 4], [1, 2]) + assert_array_equal(hist, [2, ]) + assert_array_equal(edges, [1, 2]) + assert_raises(ValueError, histogram, [1, 2], bins=0) + h, e = histogram([1, 2], bins=1) + assert_equal(h, np.array([2])) + assert_allclose(e, np.array([1., 2.])) + + def test_density(self): + # Check that the integral of the density equals 1. + n = 100 + v = np.random.rand(n) + a, b = histogram(v, density=True) + area = np.sum(a * np.diff(b)) + assert_almost_equal(area, 1) + + # Check with non-constant bin widths + v = np.arange(10) + bins = [0, 1, 3, 6, 10] + a, b = histogram(v, bins, density=True) + assert_array_equal(a, .1) + assert_equal(np.sum(a * np.diff(b)), 1) + + # Test that passing False works too + a, b = histogram(v, bins, density=False) + assert_array_equal(a, [1, 2, 3, 4]) + + # Variable bin widths are especially useful to deal with + # infinities. + v = np.arange(10) + bins = [0, 1, 3, 6, np.inf] + a, b = histogram(v, bins, density=True) + assert_array_equal(a, [.1, .1, .1, 0.]) + + # Taken from a bug report from N. Becker on the numpy-discussion + # mailing list Aug. 6, 2010. + counts, dmy = np.histogram( + [1, 2, 3, 4], [0.5, 1.5, np.inf], density=True) + assert_equal(counts, [.25, 0]) + + def test_outliers(self): + # Check that outliers are not tallied + a = np.arange(10) + .5 + + # Lower outliers + h, b = histogram(a, range=[0, 9]) + assert_equal(h.sum(), 9) + + # Upper outliers + h, b = histogram(a, range=[1, 10]) + assert_equal(h.sum(), 9) + + # Normalization + h, b = histogram(a, range=[1, 9], density=True) + assert_almost_equal((h * np.diff(b)).sum(), 1, decimal=15) + + # Weights + w = np.arange(10) + .5 + h, b = histogram(a, range=[1, 9], weights=w, density=True) + assert_equal((h * np.diff(b)).sum(), 1) + + h, b = histogram(a, bins=8, range=[1, 9], weights=w) + assert_equal(h, w[1:-1]) + + def test_arr_weights_mismatch(self): + a = np.arange(10) + .5 + w = np.arange(11) + .5 + with assert_raises_regex(ValueError, "same shape as"): + h, b = histogram(a, range=[1, 9], weights=w, density=True) + + def test_type(self): + # Check the type of the returned histogram + a = np.arange(10) + .5 + h, b = histogram(a) + assert_(np.issubdtype(h.dtype, np.integer)) + + h, b = histogram(a, density=True) + assert_(np.issubdtype(h.dtype, np.floating)) + + h, b = histogram(a, weights=np.ones(10, int)) + assert_(np.issubdtype(h.dtype, np.integer)) + + h, b = histogram(a, weights=np.ones(10, float)) + assert_(np.issubdtype(h.dtype, np.floating)) + + def test_f32_rounding(self): + # gh-4799, check that the rounding of the edges works with float32 + x = np.array([276.318359, -69.593948, 21.329449], dtype=np.float32) + y = np.array([5005.689453, 4481.327637, 6010.369629], dtype=np.float32) + counts_hist, xedges, yedges = np.histogram2d(x, y, bins=100) + assert_equal(counts_hist.sum(), 3.) + + def test_bool_conversion(self): + # gh-12107 + # Reference integer histogram + a = np.array([1, 1, 0], dtype=np.uint8) + int_hist, int_edges = np.histogram(a) + + # Should raise an warning on booleans + # Ensure that the histograms are equivalent, need to suppress + # the warnings to get the actual outputs + with suppress_warnings() as sup: + rec = sup.record(RuntimeWarning, 'Converting input from .*') + hist, edges = np.histogram([True, True, False]) + # A warning should be issued + assert_equal(len(rec), 1) + assert_array_equal(hist, int_hist) + assert_array_equal(edges, int_edges) + + def test_weights(self): + v = np.random.rand(100) + w = np.ones(100) * 5 + a, b = histogram(v) + na, nb = histogram(v, density=True) + wa, wb = histogram(v, weights=w) + nwa, nwb = histogram(v, weights=w, density=True) + assert_array_almost_equal(a * 5, wa) + assert_array_almost_equal(na, nwa) + + # Check weights are properly applied. + v = np.linspace(0, 10, 10) + w = np.concatenate((np.zeros(5), np.ones(5))) + wa, wb = histogram(v, bins=np.arange(11), weights=w) + assert_array_almost_equal(wa, w) + + # Check with integer weights + wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1]) + assert_array_equal(wa, [4, 5, 0, 1]) + wa, wb = histogram( + [1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], density=True) + assert_array_almost_equal(wa, np.array([4, 5, 0, 1]) / 10. / 3. * 4) + + # Check weights with non-uniform bin widths + a, b = histogram( + np.arange(9), [0, 1, 3, 6, 10], + weights=[2, 1, 1, 1, 1, 1, 1, 1, 1], density=True) + assert_almost_equal(a, [.2, .1, .1, .075]) + + def test_exotic_weights(self): + + # Test the use of weights that are not integer or floats, but e.g. + # complex numbers or object types. + + # Complex weights + values = np.array([1.3, 2.5, 2.3]) + weights = np.array([1, -1, 2]) + 1j * np.array([2, 1, 2]) + + # Check with custom bins + wa, wb = histogram(values, bins=[0, 2, 3], weights=weights) + assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3])) + + # Check with even bins + wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights) + assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3])) + + # Decimal weights + from decimal import Decimal + values = np.array([1.3, 2.5, 2.3]) + weights = np.array([Decimal(1), Decimal(2), Decimal(3)]) + + # Check with custom bins + wa, wb = histogram(values, bins=[0, 2, 3], weights=weights) + assert_array_almost_equal(wa, [Decimal(1), Decimal(5)]) + + # Check with even bins + wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights) + assert_array_almost_equal(wa, [Decimal(1), Decimal(5)]) + + def test_no_side_effects(self): + # This is a regression test that ensures that values passed to + # ``histogram`` are unchanged. + values = np.array([1.3, 2.5, 2.3]) + np.histogram(values, range=[-10, 10], bins=100) + assert_array_almost_equal(values, [1.3, 2.5, 2.3]) + + def test_empty(self): + a, b = histogram([], bins=([0, 1])) + assert_array_equal(a, np.array([0])) + assert_array_equal(b, np.array([0, 1])) + + def test_error_binnum_type(self): + # Tests if right Error is raised if bins argument is float + vals = np.linspace(0.0, 1.0, num=100) + histogram(vals, 5) + assert_raises(TypeError, histogram, vals, 2.4) + + def test_finite_range(self): + # Normal ranges should be fine + vals = np.linspace(0.0, 1.0, num=100) + histogram(vals, range=[0.25, 0.75]) + assert_raises(ValueError, histogram, vals, range=[np.nan, 0.75]) + assert_raises(ValueError, histogram, vals, range=[0.25, np.inf]) + + def test_invalid_range(self): + # start of range must be < end of range + vals = np.linspace(0.0, 1.0, num=100) + with assert_raises_regex(ValueError, "max must be larger than"): + np.histogram(vals, range=[0.1, 0.01]) + + def test_bin_edge_cases(self): + # Ensure that floating-point computations correctly place edge cases. + arr = np.array([337, 404, 739, 806, 1007, 1811, 2012]) + hist, edges = np.histogram(arr, bins=8296, range=(2, 2280)) + mask = hist > 0 + left_edges = edges[:-1][mask] + right_edges = edges[1:][mask] + for x, left, right in zip(arr, left_edges, right_edges): + assert_(x >= left) + assert_(x < right) + + def test_last_bin_inclusive_range(self): + arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.]) + hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5)) + assert_equal(hist[-1], 1) + + def test_bin_array_dims(self): + # gracefully handle bins object > 1 dimension + vals = np.linspace(0.0, 1.0, num=100) + bins = np.array([[0, 0.5], [0.6, 1.0]]) + with assert_raises_regex(ValueError, "must be 1d"): + np.histogram(vals, bins=bins) + + def test_unsigned_monotonicity_check(self): + # Ensures ValueError is raised if bins not increasing monotonically + # when bins contain unsigned values (see #9222) + arr = np.array([2]) + bins = np.array([1, 3, 1], dtype='uint64') + with assert_raises(ValueError): + hist, edges = np.histogram(arr, bins=bins) + + def test_object_array_of_0d(self): + # gh-7864 + assert_raises(ValueError, + histogram, [np.array(0.4) for i in range(10)] + [-np.inf]) + assert_raises(ValueError, + histogram, [np.array(0.4) for i in range(10)] + [np.inf]) + + # these should not crash + np.histogram([np.array(0.5) for i in range(10)] + [.500000000000002]) + np.histogram([np.array(0.5) for i in range(10)] + [.5]) + + def test_some_nan_values(self): + # gh-7503 + one_nan = np.array([0, 1, np.nan]) + all_nan = np.array([np.nan, np.nan]) + + # the internal comparisons with NaN give warnings + sup = suppress_warnings() + sup.filter(RuntimeWarning) + with sup: + # can't infer range with nan + assert_raises(ValueError, histogram, one_nan, bins='auto') + assert_raises(ValueError, histogram, all_nan, bins='auto') + + # explicit range solves the problem + h, b = histogram(one_nan, bins='auto', range=(0, 1)) + assert_equal(h.sum(), 2) # nan is not counted + h, b = histogram(all_nan, bins='auto', range=(0, 1)) + assert_equal(h.sum(), 0) # nan is not counted + + # as does an explicit set of bins + h, b = histogram(one_nan, bins=[0, 1]) + assert_equal(h.sum(), 2) # nan is not counted + h, b = histogram(all_nan, bins=[0, 1]) + assert_equal(h.sum(), 0) # nan is not counted + + def test_datetime(self): + begin = np.datetime64('2000-01-01', 'D') + offsets = np.array([0, 0, 1, 1, 2, 3, 5, 10, 20]) + bins = np.array([0, 2, 7, 20]) + dates = begin + offsets + date_bins = begin + bins + + td = np.dtype('timedelta64[D]') + + # Results should be the same for integer offsets or datetime values. + # For now, only explicit bins are supported, since linspace does not + # work on datetimes or timedeltas + d_count, d_edge = histogram(dates, bins=date_bins) + t_count, t_edge = histogram(offsets.astype(td), bins=bins.astype(td)) + i_count, i_edge = histogram(offsets, bins=bins) + + assert_equal(d_count, i_count) + assert_equal(t_count, i_count) + + assert_equal((d_edge - begin).astype(int), i_edge) + assert_equal(t_edge.astype(int), i_edge) + + assert_equal(d_edge.dtype, dates.dtype) + assert_equal(t_edge.dtype, td) + + def do_signed_overflow_bounds(self, dtype): + exponent = 8 * np.dtype(dtype).itemsize - 1 + arr = np.array([-2**exponent + 4, 2**exponent - 4], dtype=dtype) + hist, e = histogram(arr, bins=2) + assert_equal(e, [-2**exponent + 4, 0, 2**exponent - 4]) + assert_equal(hist, [1, 1]) + + def test_signed_overflow_bounds(self): + self.do_signed_overflow_bounds(np.byte) + self.do_signed_overflow_bounds(np.short) + self.do_signed_overflow_bounds(np.intc) + self.do_signed_overflow_bounds(np.int_) + self.do_signed_overflow_bounds(np.longlong) + + def do_precision_lower_bound(self, float_small, float_large): + eps = np.finfo(float_large).eps + + arr = np.array([1.0], float_small) + range = np.array([1.0 + eps, 2.0], float_large) + + # test is looking for behavior when the bounds change between dtypes + if range.astype(float_small)[0] != 1: + return + + # previously crashed + count, x_loc = np.histogram(arr, bins=1, range=range) + assert_equal(count, [0]) + assert_equal(x_loc.dtype, float_large) + + def do_precision_upper_bound(self, float_small, float_large): + eps = np.finfo(float_large).eps + + arr = np.array([1.0], float_small) + range = np.array([0.0, 1.0 - eps], float_large) + + # test is looking for behavior when the bounds change between dtypes + if range.astype(float_small)[-1] != 1: + return + + # previously crashed + count, x_loc = np.histogram(arr, bins=1, range=range) + assert_equal(count, [0]) + + assert_equal(x_loc.dtype, float_large) + + def do_precision(self, float_small, float_large): + self.do_precision_lower_bound(float_small, float_large) + self.do_precision_upper_bound(float_small, float_large) + + def test_precision(self): + # not looping results in a useful stack trace upon failure + self.do_precision(np.half, np.single) + self.do_precision(np.half, np.double) + self.do_precision(np.half, np.longdouble) + self.do_precision(np.single, np.double) + self.do_precision(np.single, np.longdouble) + self.do_precision(np.double, np.longdouble) + + def test_histogram_bin_edges(self): + hist, e = histogram([1, 2, 3, 4], [1, 2]) + edges = histogram_bin_edges([1, 2, 3, 4], [1, 2]) + assert_array_equal(edges, e) + + arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.]) + hist, e = histogram(arr, bins=30, range=(-0.5, 5)) + edges = histogram_bin_edges(arr, bins=30, range=(-0.5, 5)) + assert_array_equal(edges, e) + + hist, e = histogram(arr, bins='auto', range=(0, 1)) + edges = histogram_bin_edges(arr, bins='auto', range=(0, 1)) + assert_array_equal(edges, e) + + def test_small_value_range(self): + arr = np.array([1, 1 + 2e-16] * 10) + with pytest.raises(ValueError, match="Too many bins for data range"): + histogram(arr, bins=10) + + # @requires_memory(free_bytes=1e10) + # @pytest.mark.slow + @pytest.mark.skip(reason="Bad memory reports lead to OOM in ci testing") + def test_big_arrays(self): + sample = np.zeros([100000000, 3]) + xbins = 400 + ybins = 400 + zbins = np.arange(16000) + hist = np.histogramdd(sample=sample, bins=(xbins, ybins, zbins)) + assert_equal(type(hist), type((1, 2))) + + def test_gh_23110(self): + hist, e = np.histogram(np.array([-0.9e-308], dtype='>f8'), + bins=2, + range=(-1e-308, -2e-313)) + expected_hist = np.array([1, 0]) + assert_array_equal(hist, expected_hist) + + def test_gh_28400(self): + e = 1 + 1e-12 + Z = [0, 1, 1, 1, 1, 1, e, e, e, e, e, e, 2] + counts, edges = np.histogram(Z, bins="auto") + assert len(counts) < 10 + assert edges[0] == Z[0] + assert edges[-1] == Z[-1] + +class TestHistogramOptimBinNums: + """ + Provide test coverage when using provided estimators for optimal number of + bins + """ + + def test_empty(self): + estimator_list = ['fd', 'scott', 'rice', 'sturges', + 'doane', 'sqrt', 'auto', 'stone'] + # check it can deal with empty data + for estimator in estimator_list: + a, b = histogram([], bins=estimator) + assert_array_equal(a, np.array([0])) + assert_array_equal(b, np.array([0, 1])) + + def test_simple(self): + """ + Straightforward testing with a mixture of linspace data (for + consistency). All test values have been precomputed and the values + shouldn't change + """ + # Some basic sanity checking, with some fixed data. + # Checking for the correct number of bins + basic_test = {50: {'fd': 4, 'scott': 4, 'rice': 8, 'sturges': 7, + 'doane': 8, 'sqrt': 8, 'auto': 7, 'stone': 2}, + 500: {'fd': 8, 'scott': 8, 'rice': 16, 'sturges': 10, + 'doane': 12, 'sqrt': 23, 'auto': 10, 'stone': 9}, + 5000: {'fd': 17, 'scott': 17, 'rice': 35, 'sturges': 14, + 'doane': 17, 'sqrt': 71, 'auto': 17, 'stone': 20}} + + for testlen, expectedResults in basic_test.items(): + # Create some sort of non uniform data to test with + # (2 peak uniform mixture) + x1 = np.linspace(-10, -1, testlen // 5 * 2) + x2 = np.linspace(1, 10, testlen // 5 * 3) + x = np.concatenate((x1, x2)) + for estimator, numbins in expectedResults.items(): + a, b = np.histogram(x, estimator) + assert_equal(len(a), numbins, err_msg=f"For the {estimator} estimator " + f"with datasize of {testlen}") + + def test_small(self): + """ + Smaller datasets have the potential to cause issues with the data + adaptive methods, especially the FD method. All bin numbers have been + precalculated. + """ + small_dat = {1: {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1, + 'doane': 1, 'sqrt': 1, 'stone': 1}, + 2: {'fd': 2, 'scott': 1, 'rice': 3, 'sturges': 2, + 'doane': 1, 'sqrt': 2, 'stone': 1}, + 3: {'fd': 2, 'scott': 2, 'rice': 3, 'sturges': 3, + 'doane': 3, 'sqrt': 2, 'stone': 1}} + + for testlen, expectedResults in small_dat.items(): + testdat = np.arange(testlen).astype(float) + for estimator, expbins in expectedResults.items(): + a, b = np.histogram(testdat, estimator) + assert_equal(len(a), expbins, err_msg=f"For the {estimator} estimator " + f"with datasize of {testlen}") + + def test_incorrect_methods(self): + """ + Check a Value Error is thrown when an unknown string is passed in + """ + check_list = ['mad', 'freeman', 'histograms', 'IQR'] + for estimator in check_list: + assert_raises(ValueError, histogram, [1, 2, 3], estimator) + + def test_novariance(self): + """ + Check that methods handle no variance in data + Primarily for Scott and FD as the SD and IQR are both 0 in this case + """ + novar_dataset = np.ones(100) + novar_resultdict = {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1, + 'doane': 1, 'sqrt': 1, 'auto': 1, 'stone': 1} + + for estimator, numbins in novar_resultdict.items(): + a, b = np.histogram(novar_dataset, estimator) + assert_equal(len(a), numbins, + err_msg=f"{estimator} estimator, No Variance test") + + def test_limited_variance(self): + """ + Check when IQR is 0, but variance exists, we return a reasonable value. + """ + lim_var_data = np.ones(1000) + lim_var_data[:3] = 0 + lim_var_data[-4:] = 100 + + edges_auto = histogram_bin_edges(lim_var_data, 'auto') + assert_equal(edges_auto[0], 0) + assert_equal(edges_auto[-1], 100.) + assert len(edges_auto) < 100 + + edges_fd = histogram_bin_edges(lim_var_data, 'fd') + assert_equal(edges_fd, np.array([0, 100])) + + edges_sturges = histogram_bin_edges(lim_var_data, 'sturges') + assert_equal(edges_sturges, np.linspace(0, 100, 12)) + + def test_outlier(self): + """ + Check the FD, Scott and Doane with outliers. + + The FD estimates a smaller binwidth since it's less affected by + outliers. Since the range is so (artificially) large, this means more + bins, most of which will be empty, but the data of interest usually is + unaffected. The Scott estimator is more affected and returns fewer bins, + despite most of the variance being in one area of the data. The Doane + estimator lies somewhere between the other two. + """ + xcenter = np.linspace(-10, 10, 50) + outlier_dataset = np.hstack((np.linspace(-110, -100, 5), xcenter)) + + outlier_resultdict = {'fd': 21, 'scott': 5, 'doane': 11, 'stone': 6} + + for estimator, numbins in outlier_resultdict.items(): + a, b = np.histogram(outlier_dataset, estimator) + assert_equal(len(a), numbins) + + def test_scott_vs_stone(self): + """Verify that Scott's rule and Stone's rule converges for normally distributed data""" + + def nbins_ratio(seed, size): + rng = np.random.RandomState(seed) + x = rng.normal(loc=0, scale=2, size=size) + a, b = len(np.histogram(x, 'stone')[0]), len(np.histogram(x, 'scott')[0]) + return a / (a + b) + + ll = [[nbins_ratio(seed, size) for size in np.geomspace(start=10, stop=100, num=4).round().astype(int)] + for seed in range(10)] + + # the average difference between the two methods decreases as the dataset size increases. + avg = abs(np.mean(ll, axis=0) - 0.5) + assert_almost_equal(avg, [0.15, 0.09, 0.08, 0.03], decimal=2) + + def test_simple_range(self): + """ + Straightforward testing with a mixture of linspace data (for + consistency). Adding in a 3rd mixture that will then be + completely ignored. All test values have been precomputed and + the shouldn't change. + """ + # some basic sanity checking, with some fixed data. + # Checking for the correct number of bins + basic_test = { + 50: {'fd': 8, 'scott': 8, 'rice': 15, + 'sturges': 14, 'auto': 14, 'stone': 8}, + 500: {'fd': 15, 'scott': 16, 'rice': 32, + 'sturges': 20, 'auto': 20, 'stone': 80}, + 5000: {'fd': 33, 'scott': 33, 'rice': 69, + 'sturges': 27, 'auto': 33, 'stone': 80} + } + + for testlen, expectedResults in basic_test.items(): + # create some sort of non uniform data to test with + # (3 peak uniform mixture) + x1 = np.linspace(-10, -1, testlen // 5 * 2) + x2 = np.linspace(1, 10, testlen // 5 * 3) + x3 = np.linspace(-100, -50, testlen) + x = np.hstack((x1, x2, x3)) + for estimator, numbins in expectedResults.items(): + a, b = np.histogram(x, estimator, range=(-20, 20)) + msg = f"For the {estimator} estimator" + msg += f" with datasize of {testlen}" + assert_equal(len(a), numbins, err_msg=msg) + + @pytest.mark.parametrize("bins", ['auto', 'fd', 'doane', 'scott', + 'stone', 'rice', 'sturges']) + def test_signed_integer_data(self, bins): + # Regression test for gh-14379. + a = np.array([-2, 0, 127], dtype=np.int8) + hist, edges = np.histogram(a, bins=bins) + hist32, edges32 = np.histogram(a.astype(np.int32), bins=bins) + assert_array_equal(hist, hist32) + assert_array_equal(edges, edges32) + + @pytest.mark.parametrize("bins", ['auto', 'fd', 'doane', 'scott', + 'stone', 'rice', 'sturges']) + def test_integer(self, bins): + """ + Test that bin width for integer data is at least 1. + """ + with suppress_warnings() as sup: + if bins == 'stone': + sup.filter(RuntimeWarning) + assert_equal( + np.histogram_bin_edges(np.tile(np.arange(9), 1000), bins), + np.arange(9)) + + def test_integer_non_auto(self): + """ + Test that the bin-width>=1 requirement *only* applies to auto binning. + """ + assert_equal( + np.histogram_bin_edges(np.tile(np.arange(9), 1000), 16), + np.arange(17) / 2) + assert_equal( + np.histogram_bin_edges(np.tile(np.arange(9), 1000), [.1, .2]), + [.1, .2]) + + def test_simple_weighted(self): + """ + Check that weighted data raises a TypeError + """ + estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto'] + for estimator in estimator_list: + assert_raises(TypeError, histogram, [1, 2, 3], + estimator, weights=[1, 2, 3]) + + +class TestHistogramdd: + + def test_simple(self): + x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5], + [.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]]) + H, edges = histogramdd(x, (2, 3, 3), + range=[[-1, 1], [0, 3], [0, 3]]) + answer = np.array([[[0, 1, 0], [0, 0, 1], [1, 0, 0]], + [[0, 1, 0], [0, 0, 1], [0, 0, 1]]]) + assert_array_equal(H, answer) + + # Check normalization + ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]] + H, edges = histogramdd(x, bins=ed, density=True) + assert_(np.all(H == answer / 12.)) + + # Check that H has the correct shape. + H, edges = histogramdd(x, (2, 3, 4), + range=[[-1, 1], [0, 3], [0, 4]], + density=True) + answer = np.array([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]], + [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]]]) + assert_array_almost_equal(H, answer / 6., 4) + # Check that a sequence of arrays is accepted and H has the correct + # shape. + z = [np.squeeze(y) for y in np.split(x, 3, axis=1)] + H, edges = histogramdd( + z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]]) + answer = np.array([[[0, 0], [0, 0], [0, 0]], + [[0, 1], [0, 0], [1, 0]], + [[0, 1], [0, 0], [0, 0]], + [[0, 0], [0, 0], [0, 0]]]) + assert_array_equal(H, answer) + + Z = np.zeros((5, 5, 5)) + Z[list(range(5)), list(range(5)), list(range(5))] = 1. + H, edges = histogramdd([np.arange(5), np.arange(5), np.arange(5)], 5) + assert_array_equal(H, Z) + + def test_shape_3d(self): + # All possible permutations for bins of different lengths in 3D. + bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4), + (4, 5, 6)) + r = np.random.rand(10, 3) + for b in bins: + H, edges = histogramdd(r, b) + assert_(H.shape == b) + + def test_shape_4d(self): + # All possible permutations for bins of different lengths in 4D. + bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4), + (5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6), + (7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7), + (4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5), + (6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5), + (5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4)) + + r = np.random.rand(10, 4) + for b in bins: + H, edges = histogramdd(r, b) + assert_(H.shape == b) + + def test_weights(self): + v = np.random.rand(100, 2) + hist, edges = histogramdd(v) + n_hist, edges = histogramdd(v, density=True) + w_hist, edges = histogramdd(v, weights=np.ones(100)) + assert_array_equal(w_hist, hist) + w_hist, edges = histogramdd(v, weights=np.ones(100) * 2, density=True) + assert_array_equal(w_hist, n_hist) + w_hist, edges = histogramdd(v, weights=np.ones(100, int) * 2) + assert_array_equal(w_hist, 2 * hist) + + def test_identical_samples(self): + x = np.zeros((10, 2), int) + hist, edges = histogramdd(x, bins=2) + assert_array_equal(edges[0], np.array([-0.5, 0., 0.5])) + + def test_empty(self): + a, b = histogramdd([[], []], bins=([0, 1], [0, 1])) + assert_array_max_ulp(a, np.array([[0.]])) + a, b = np.histogramdd([[], [], []], bins=2) + assert_array_max_ulp(a, np.zeros((2, 2, 2))) + + def test_bins_errors(self): + # There are two ways to specify bins. Check for the right errors + # when mixing those. + x = np.arange(8).reshape(2, 4) + assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5]) + assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1]) + assert_raises( + ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]]) + assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]])) + + def test_inf_edges(self): + # Test using +/-inf bin edges works. See #1788. + with np.errstate(invalid='ignore'): + x = np.arange(6).reshape(3, 2) + expected = np.array([[1, 0], [0, 1], [0, 1]]) + h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]]) + assert_allclose(h, expected) + h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])]) + assert_allclose(h, expected) + h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]]) + assert_allclose(h, expected) + + def test_rightmost_binedge(self): + # Test event very close to rightmost binedge. See Github issue #4266 + x = [0.9999999995] + bins = [[0., 0.5, 1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 1.) + x = [1.0] + bins = [[0., 0.5, 1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 1.) + x = [1.0000000001] + bins = [[0., 0.5, 1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 0.0) + x = [1.0001] + bins = [[0., 0.5, 1.0]] + hist, _ = histogramdd(x, bins=bins) + assert_(hist[0] == 0.0) + assert_(hist[1] == 0.0) + + def test_finite_range(self): + vals = np.random.random((100, 3)) + histogramdd(vals, range=[[0.0, 1.0], [0.25, 0.75], [0.25, 0.5]]) + assert_raises(ValueError, histogramdd, vals, + range=[[0.0, 1.0], [0.25, 0.75], [0.25, np.inf]]) + assert_raises(ValueError, histogramdd, vals, + range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]]) + + def test_equal_edges(self): + """ Test that adjacent entries in an edge array can be equal """ + x = np.array([0, 1, 2]) + y = np.array([0, 1, 2]) + x_edges = np.array([0, 2, 2]) + y_edges = 1 + hist, edges = histogramdd((x, y), bins=(x_edges, y_edges)) + + hist_expected = np.array([ + [2.], + [1.], # x == 2 falls in the final bin + ]) + assert_equal(hist, hist_expected) + + def test_edge_dtype(self): + """ Test that if an edge array is input, its type is preserved """ + x = np.array([0, 10, 20]) + y = x / 10 + x_edges = np.array([0, 5, 15, 20]) + y_edges = x_edges / 10 + hist, edges = histogramdd((x, y), bins=(x_edges, y_edges)) + + assert_equal(edges[0].dtype, x_edges.dtype) + assert_equal(edges[1].dtype, y_edges.dtype) + + def test_large_integers(self): + big = 2**60 # Too large to represent with a full precision float + + x = np.array([0], np.int64) + x_edges = np.array([-1, +1], np.int64) + y = big + x + y_edges = big + x_edges + + hist, edges = histogramdd((x, y), bins=(x_edges, y_edges)) + + assert_equal(hist[0, 0], 1) + + def test_density_non_uniform_2d(self): + # Defines the following grid: + # + # 0 2 8 + # 0+-+-----+ + # + | + + # + | + + # 6+-+-----+ + # 8+-+-----+ + x_edges = np.array([0, 2, 8]) + y_edges = np.array([0, 6, 8]) + relative_areas = np.array([ + [3, 9], + [1, 3]]) + + # ensure the number of points in each region is proportional to its area + x = np.array([1] + [1] * 3 + [7] * 3 + [7] * 9) + y = np.array([7] + [1] * 3 + [7] * 3 + [1] * 9) + + # sanity check that the above worked as intended + hist, edges = histogramdd((y, x), bins=(y_edges, x_edges)) + assert_equal(hist, relative_areas) + + # resulting histogram should be uniform, since counts and areas are proportional + hist, edges = histogramdd((y, x), bins=(y_edges, x_edges), density=True) + assert_equal(hist, 1 / (8 * 8)) + + def test_density_non_uniform_1d(self): + # compare to histogram to show the results are the same + v = np.arange(10) + bins = np.array([0, 1, 3, 6, 10]) + hist, edges = histogram(v, bins, density=True) + hist_dd, edges_dd = histogramdd((v,), (bins,), density=True) + assert_equal(hist, hist_dd) + assert_equal(edges, edges_dd[0]) diff --git a/python/numpy/lib/tests/test_index_tricks.py b/python/numpy/lib/tests/test_index_tricks.py new file mode 100644 index 000000000..7150a7867 --- /dev/null +++ b/python/numpy/lib/tests/test_index_tricks.py @@ -0,0 +1,573 @@ +import pytest + +import numpy as np +from numpy.lib._index_tricks_impl import ( + c_, + diag_indices, + diag_indices_from, + fill_diagonal, + index_exp, + ix_, + mgrid, + ndenumerate, + ndindex, + ogrid, + r_, + s_, +) +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) + + +class TestRavelUnravelIndex: + def test_basic(self): + assert_equal(np.unravel_index(2, (2, 2)), (1, 0)) + + # test that new shape argument works properly + assert_equal(np.unravel_index(indices=2, + shape=(2, 2)), + (1, 0)) + + # test that an invalid second keyword argument + # is properly handled, including the old name `dims`. + with assert_raises(TypeError): + np.unravel_index(indices=2, hape=(2, 2)) + + with assert_raises(TypeError): + np.unravel_index(2, hape=(2, 2)) + + with assert_raises(TypeError): + np.unravel_index(254, ims=(17, 94)) + + with assert_raises(TypeError): + np.unravel_index(254, dims=(17, 94)) + + assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2) + assert_equal(np.unravel_index(254, (17, 94)), (2, 66)) + assert_equal(np.ravel_multi_index((2, 66), (17, 94)), 254) + assert_raises(ValueError, np.unravel_index, -1, (2, 2)) + assert_raises(TypeError, np.unravel_index, 0.5, (2, 2)) + assert_raises(ValueError, np.unravel_index, 4, (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (-3, 1), (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (2, 1), (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (0, -3), (2, 2)) + assert_raises(ValueError, np.ravel_multi_index, (0, 2), (2, 2)) + assert_raises(TypeError, np.ravel_multi_index, (0.1, 0.), (2, 2)) + + assert_equal(np.unravel_index((2 * 3 + 1) * 6 + 4, (4, 3, 6)), [2, 1, 4]) + assert_equal( + np.ravel_multi_index([2, 1, 4], (4, 3, 6)), (2 * 3 + 1) * 6 + 4) + + arr = np.array([[3, 6, 6], [4, 5, 1]]) + assert_equal(np.ravel_multi_index(arr, (7, 6)), [22, 41, 37]) + assert_equal( + np.ravel_multi_index(arr, (7, 6), order='F'), [31, 41, 13]) + assert_equal( + np.ravel_multi_index(arr, (4, 6), mode='clip'), [22, 23, 19]) + assert_equal(np.ravel_multi_index(arr, (4, 4), mode=('clip', 'wrap')), + [12, 13, 13]) + assert_equal(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)), 1621) + + assert_equal(np.unravel_index(np.array([22, 41, 37]), (7, 6)), + [[3, 6, 6], [4, 5, 1]]) + assert_equal( + np.unravel_index(np.array([31, 41, 13]), (7, 6), order='F'), + [[3, 6, 6], [4, 5, 1]]) + assert_equal(np.unravel_index(1621, (6, 7, 8, 9)), [3, 1, 4, 1]) + + def test_empty_indices(self): + msg1 = 'indices must be integral: the provided empty sequence was' + msg2 = 'only int indices permitted' + assert_raises_regex(TypeError, msg1, np.unravel_index, [], (10, 3, 5)) + assert_raises_regex(TypeError, msg1, np.unravel_index, (), (10, 3, 5)) + assert_raises_regex(TypeError, msg2, np.unravel_index, np.array([]), + (10, 3, 5)) + assert_equal(np.unravel_index(np.array([], dtype=int), (10, 3, 5)), + [[], [], []]) + assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], []), + (10, 3)) + assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], ['abc']), + (10, 3)) + assert_raises_regex(TypeError, msg2, np.ravel_multi_index, + (np.array([]), np.array([])), (5, 3)) + assert_equal(np.ravel_multi_index( + (np.array([], dtype=int), np.array([], dtype=int)), (5, 3)), []) + assert_equal(np.ravel_multi_index(np.array([[], []], dtype=int), + (5, 3)), []) + + def test_big_indices(self): + # ravel_multi_index for big indices (issue #7546) + if np.intp == np.int64: + arr = ([1, 29], [3, 5], [3, 117], [19, 2], + [2379, 1284], [2, 2], [0, 1]) + assert_equal( + np.ravel_multi_index(arr, (41, 7, 120, 36, 2706, 8, 6)), + [5627771580, 117259570957]) + + # test unravel_index for big indices (issue #9538) + assert_raises(ValueError, np.unravel_index, 1, (2**32 - 1, 2**31 + 1)) + + # test overflow checking for too big array (issue #7546) + dummy_arr = ([0], [0]) + half_max = np.iinfo(np.intp).max // 2 + assert_equal( + np.ravel_multi_index(dummy_arr, (half_max, 2)), [0]) + assert_raises(ValueError, + np.ravel_multi_index, dummy_arr, (half_max + 1, 2)) + assert_equal( + np.ravel_multi_index(dummy_arr, (half_max, 2), order='F'), [0]) + assert_raises(ValueError, + np.ravel_multi_index, dummy_arr, (half_max + 1, 2), order='F') + + def test_dtypes(self): + # Test with different data types + for dtype in [np.int16, np.uint16, np.int32, + np.uint32, np.int64, np.uint64]: + coords = np.array( + [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], dtype=dtype) + shape = (5, 8) + uncoords = 8 * coords[0] + coords[1] + assert_equal(np.ravel_multi_index(coords, shape), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape)) + uncoords = coords[0] + 5 * coords[1] + assert_equal( + np.ravel_multi_index(coords, shape, order='F'), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) + + coords = np.array( + [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]], + dtype=dtype) + shape = (5, 8, 10) + uncoords = 10 * (8 * coords[0] + coords[1]) + coords[2] + assert_equal(np.ravel_multi_index(coords, shape), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape)) + uncoords = coords[0] + 5 * (coords[1] + 8 * coords[2]) + assert_equal( + np.ravel_multi_index(coords, shape, order='F'), uncoords) + assert_equal(coords, np.unravel_index(uncoords, shape, order='F')) + + def test_clipmodes(self): + # Test clipmodes + assert_equal( + np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), mode='wrap'), + np.ravel_multi_index([1, 1, 6, 2], (4, 3, 7, 12))) + assert_equal(np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), + mode=( + 'wrap', 'raise', 'clip', 'raise')), + np.ravel_multi_index([1, 1, 0, 2], (4, 3, 7, 12))) + assert_raises( + ValueError, np.ravel_multi_index, [5, 1, -1, 2], (4, 3, 7, 12)) + + def test_writeability(self): + # gh-7269 + x, y = np.unravel_index([1, 2, 3], (4, 5)) + assert_(x.flags.writeable) + assert_(y.flags.writeable) + + def test_0d(self): + # gh-580 + x = np.unravel_index(0, ()) + assert_equal(x, ()) + + assert_raises_regex(ValueError, "0d array", np.unravel_index, [0], ()) + assert_raises_regex( + ValueError, "out of bounds", np.unravel_index, [1], ()) + + @pytest.mark.parametrize("mode", ["clip", "wrap", "raise"]) + def test_empty_array_ravel(self, mode): + res = np.ravel_multi_index( + np.zeros((3, 0), dtype=np.intp), (2, 1, 0), mode=mode) + assert res.shape == (0,) + + with assert_raises(ValueError): + np.ravel_multi_index( + np.zeros((3, 1), dtype=np.intp), (2, 1, 0), mode=mode) + + def test_empty_array_unravel(self): + res = np.unravel_index(np.zeros(0, dtype=np.intp), (2, 1, 0)) + # res is a tuple of three empty arrays + assert len(res) == 3 + assert all(a.shape == (0,) for a in res) + + with assert_raises(ValueError): + np.unravel_index([1], (2, 1, 0)) + + def test_regression_size_1_index(self): + # actually tests the nditer size one index tracking + # regression test for gh-29690 + np.unravel_index(np.array([[1, 0, 1, 0]], dtype=np.uint32), (4,)) + +class TestGrid: + def test_basic(self): + a = mgrid[-1:1:10j] + b = mgrid[-1:1:0.1] + assert_(a.shape == (10,)) + assert_(b.shape == (20,)) + assert_(a[0] == -1) + assert_almost_equal(a[-1], 1) + assert_(b[0] == -1) + assert_almost_equal(b[1] - b[0], 0.1, 11) + assert_almost_equal(b[-1], b[0] + 19 * 0.1, 11) + assert_almost_equal(a[1] - a[0], 2.0 / 9.0, 11) + + def test_linspace_equivalence(self): + y, st = np.linspace(2, 10, retstep=True) + assert_almost_equal(st, 8 / 49.0) + assert_array_almost_equal(y, mgrid[2:10:50j], 13) + + def test_nd(self): + c = mgrid[-1:1:10j, -2:2:10j] + d = mgrid[-1:1:0.1, -2:2:0.2] + assert_(c.shape == (2, 10, 10)) + assert_(d.shape == (2, 20, 20)) + assert_array_equal(c[0][0, :], -np.ones(10, 'd')) + assert_array_equal(c[1][:, 0], -2 * np.ones(10, 'd')) + assert_array_almost_equal(c[0][-1, :], np.ones(10, 'd'), 11) + assert_array_almost_equal(c[1][:, -1], 2 * np.ones(10, 'd'), 11) + assert_array_almost_equal(d[0, 1, :] - d[0, 0, :], + 0.1 * np.ones(20, 'd'), 11) + assert_array_almost_equal(d[1, :, 1] - d[1, :, 0], + 0.2 * np.ones(20, 'd'), 11) + + def test_sparse(self): + grid_full = mgrid[-1:1:10j, -2:2:10j] + grid_sparse = ogrid[-1:1:10j, -2:2:10j] + + # sparse grids can be made dense by broadcasting + grid_broadcast = np.broadcast_arrays(*grid_sparse) + for f, b in zip(grid_full, grid_broadcast): + assert_equal(f, b) + + @pytest.mark.parametrize("start, stop, step, expected", [ + (None, 10, 10j, (200, 10)), + (-10, 20, None, (1800, 30)), + ]) + def test_mgrid_size_none_handling(self, start, stop, step, expected): + # regression test None value handling for + # start and step values used by mgrid; + # internally, this aims to cover previously + # unexplored code paths in nd_grid() + grid = mgrid[start:stop:step, start:stop:step] + # need a smaller grid to explore one of the + # untested code paths + grid_small = mgrid[start:stop:step] + assert_equal(grid.size, expected[0]) + assert_equal(grid_small.size, expected[1]) + + def test_accepts_npfloating(self): + # regression test for #16466 + grid64 = mgrid[0.1:0.33:0.1, ] + grid32 = mgrid[np.float32(0.1):np.float32(0.33):np.float32(0.1), ] + assert_array_almost_equal(grid64, grid32) + # At some point this was float64, but NEP 50 changed it: + assert grid32.dtype == np.float32 + assert grid64.dtype == np.float64 + + # different code path for single slice + grid64 = mgrid[0.1:0.33:0.1] + grid32 = mgrid[np.float32(0.1):np.float32(0.33):np.float32(0.1)] + assert_(grid32.dtype == np.float64) + assert_array_almost_equal(grid64, grid32) + + def test_accepts_longdouble(self): + # regression tests for #16945 + grid64 = mgrid[0.1:0.33:0.1, ] + grid128 = mgrid[ + np.longdouble(0.1):np.longdouble(0.33):np.longdouble(0.1), + ] + assert_(grid128.dtype == np.longdouble) + assert_array_almost_equal(grid64, grid128) + + grid128c_a = mgrid[0:np.longdouble(1):3.4j] + grid128c_b = mgrid[0:np.longdouble(1):3.4j, ] + assert_(grid128c_a.dtype == grid128c_b.dtype == np.longdouble) + assert_array_equal(grid128c_a, grid128c_b[0]) + + # different code path for single slice + grid64 = mgrid[0.1:0.33:0.1] + grid128 = mgrid[ + np.longdouble(0.1):np.longdouble(0.33):np.longdouble(0.1) + ] + assert_(grid128.dtype == np.longdouble) + assert_array_almost_equal(grid64, grid128) + + def test_accepts_npcomplexfloating(self): + # Related to #16466 + assert_array_almost_equal( + mgrid[0.1:0.3:3j, ], mgrid[0.1:0.3:np.complex64(3j), ] + ) + + # different code path for single slice + assert_array_almost_equal( + mgrid[0.1:0.3:3j], mgrid[0.1:0.3:np.complex64(3j)] + ) + + # Related to #16945 + grid64_a = mgrid[0.1:0.3:3.3j] + grid64_b = mgrid[0.1:0.3:3.3j, ][0] + assert_(grid64_a.dtype == grid64_b.dtype == np.float64) + assert_array_equal(grid64_a, grid64_b) + + grid128_a = mgrid[0.1:0.3:np.clongdouble(3.3j)] + grid128_b = mgrid[0.1:0.3:np.clongdouble(3.3j), ][0] + assert_(grid128_a.dtype == grid128_b.dtype == np.longdouble) + assert_array_equal(grid64_a, grid64_b) + + +class TestConcatenator: + def test_1d(self): + assert_array_equal(r_[1, 2, 3, 4, 5, 6], np.array([1, 2, 3, 4, 5, 6])) + b = np.ones(5) + c = r_[b, 0, 0, b] + assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]) + + def test_mixed_type(self): + g = r_[10.1, 1:10] + assert_(g.dtype == 'f8') + + def test_more_mixed_type(self): + g = r_[-10.1, np.array([1]), np.array([2, 3, 4]), 10.0] + assert_(g.dtype == 'f8') + + def test_complex_step(self): + # Regression test for #12262 + g = r_[0:36:100j] + assert_(g.shape == (100,)) + + # Related to #16466 + g = r_[0:36:np.complex64(100j)] + assert_(g.shape == (100,)) + + def test_2d(self): + b = np.random.rand(5, 5) + c = np.random.rand(5, 5) + d = r_['1', b, c] # append columns + assert_(d.shape == (5, 10)) + assert_array_equal(d[:, :5], b) + assert_array_equal(d[:, 5:], c) + d = r_[b, c] + assert_(d.shape == (10, 5)) + assert_array_equal(d[:5, :], b) + assert_array_equal(d[5:, :], c) + + def test_0d(self): + assert_equal(r_[0, np.array(1), 2], [0, 1, 2]) + assert_equal(r_[[0, 1, 2], np.array(3)], [0, 1, 2, 3]) + assert_equal(r_[np.array(0), [1, 2, 3]], [0, 1, 2, 3]) + + +class TestNdenumerate: + def test_basic(self): + a = np.array([[1, 2], [3, 4]]) + assert_equal(list(ndenumerate(a)), + [((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)]) + + +class TestIndexExpression: + def test_regression_1(self): + # ticket #1196 + a = np.arange(2) + assert_equal(a[:-1], a[s_[:-1]]) + assert_equal(a[:-1], a[index_exp[:-1]]) + + def test_simple_1(self): + a = np.random.rand(4, 5, 6) + + assert_equal(a[:, :3, [1, 2]], a[index_exp[:, :3, [1, 2]]]) + assert_equal(a[:, :3, [1, 2]], a[s_[:, :3, [1, 2]]]) + + +class TestIx_: + def test_regression_1(self): + # Test empty untyped inputs create outputs of indexing type, gh-5804 + a, = np.ix_(range(0)) + assert_equal(a.dtype, np.intp) + + a, = np.ix_([]) + assert_equal(a.dtype, np.intp) + + # but if the type is specified, don't change it + a, = np.ix_(np.array([], dtype=np.float32)) + assert_equal(a.dtype, np.float32) + + def test_shape_and_dtype(self): + sizes = (4, 5, 3, 2) + # Test both lists and arrays + for func in (range, np.arange): + arrays = np.ix_(*[func(sz) for sz in sizes]) + for k, (a, sz) in enumerate(zip(arrays, sizes)): + assert_equal(a.shape[k], sz) + assert_(all(sh == 1 for j, sh in enumerate(a.shape) if j != k)) + assert_(np.issubdtype(a.dtype, np.integer)) + + def test_bool(self): + bool_a = [True, False, True, True] + int_a, = np.nonzero(bool_a) + assert_equal(np.ix_(bool_a)[0], int_a) + + def test_1d_only(self): + idx2d = [[1, 2, 3], [4, 5, 6]] + assert_raises(ValueError, np.ix_, idx2d) + + def test_repeated_input(self): + length_of_vector = 5 + x = np.arange(length_of_vector) + out = ix_(x, x) + assert_equal(out[0].shape, (length_of_vector, 1)) + assert_equal(out[1].shape, (1, length_of_vector)) + # check that input shape is not modified + assert_equal(x.shape, (length_of_vector,)) + + +def test_c_(): + a = c_[np.array([[1, 2, 3]]), 0, 0, np.array([[4, 5, 6]])] + assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]]) + + +class TestFillDiagonal: + def test_basic(self): + a = np.zeros((3, 3), int) + fill_diagonal(a, 5) + assert_array_equal( + a, np.array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5]]) + ) + + def test_tall_matrix(self): + a = np.zeros((10, 3), int) + fill_diagonal(a, 5) + assert_array_equal( + a, np.array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0]]) + ) + + def test_tall_matrix_wrap(self): + a = np.zeros((10, 3), int) + fill_diagonal(a, 5, True) + assert_array_equal( + a, np.array([[5, 0, 0], + [0, 5, 0], + [0, 0, 5], + [0, 0, 0], + [5, 0, 0], + [0, 5, 0], + [0, 0, 5], + [0, 0, 0], + [5, 0, 0], + [0, 5, 0]]) + ) + + def test_wide_matrix(self): + a = np.zeros((3, 10), int) + fill_diagonal(a, 5) + assert_array_equal( + a, np.array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 5, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 5, 0, 0, 0, 0, 0, 0, 0]]) + ) + + def test_operate_4d_array(self): + a = np.zeros((3, 3, 3, 3), int) + fill_diagonal(a, 4) + i = np.array([0, 1, 2]) + assert_equal(np.where(a != 0), (i, i, i, i)) + + def test_low_dim_handling(self): + # raise error with low dimensionality + a = np.zeros(3, int) + with assert_raises_regex(ValueError, "at least 2-d"): + fill_diagonal(a, 5) + + def test_hetero_shape_handling(self): + # raise error with high dimensionality and + # shape mismatch + a = np.zeros((3, 3, 7, 3), int) + with assert_raises_regex(ValueError, "equal length"): + fill_diagonal(a, 2) + + +def test_diag_indices(): + di = diag_indices(4) + a = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]) + a[di] = 100 + assert_array_equal( + a, np.array([[100, 2, 3, 4], + [5, 100, 7, 8], + [9, 10, 100, 12], + [13, 14, 15, 100]]) + ) + + # Now, we create indices to manipulate a 3-d array: + d3 = diag_indices(2, 3) + + # And use it to set the diagonal of a zeros array to 1: + a = np.zeros((2, 2, 2), int) + a[d3] = 1 + assert_array_equal( + a, np.array([[[1, 0], + [0, 0]], + [[0, 0], + [0, 1]]]) + ) + + +class TestDiagIndicesFrom: + + def test_diag_indices_from(self): + x = np.random.random((4, 4)) + r, c = diag_indices_from(x) + assert_array_equal(r, np.arange(4)) + assert_array_equal(c, np.arange(4)) + + def test_error_small_input(self): + x = np.ones(7) + with assert_raises_regex(ValueError, "at least 2-d"): + diag_indices_from(x) + + def test_error_shape_mismatch(self): + x = np.zeros((3, 3, 2, 3), int) + with assert_raises_regex(ValueError, "equal length"): + diag_indices_from(x) + + +def test_ndindex(): + x = list(ndindex(1, 2, 3)) + expected = [ix for ix, e in ndenumerate(np.zeros((1, 2, 3)))] + assert_array_equal(x, expected) + + x = list(ndindex((1, 2, 3))) + assert_array_equal(x, expected) + + # Test use of scalars and tuples + x = list(ndindex((3,))) + assert_array_equal(x, list(ndindex(3))) + + # Make sure size argument is optional + x = list(ndindex()) + assert_equal(x, [()]) + + x = list(ndindex(())) + assert_equal(x, [()]) + + # Make sure 0-sized ndindex works correctly + x = list(ndindex(*[0])) + assert_equal(x, []) diff --git a/python/numpy/lib/tests/test_io.py b/python/numpy/lib/tests/test_io.py new file mode 100644 index 000000000..303dcfe7d --- /dev/null +++ b/python/numpy/lib/tests/test_io.py @@ -0,0 +1,2848 @@ +import gc +import gzip +import locale +import os +import re +import sys +import threading +import time +import warnings +import zipfile +from ctypes import c_bool +from datetime import datetime +from io import BytesIO, StringIO +from multiprocessing import Value, get_context +from pathlib import Path +from tempfile import NamedTemporaryFile + +import pytest + +import numpy as np +import numpy.ma as ma +from numpy._utils import asbytes +from numpy.exceptions import VisibleDeprecationWarning +from numpy.lib import _npyio_impl +from numpy.lib._iotools import ConversionWarning, ConverterError +from numpy.lib._npyio_impl import recfromcsv, recfromtxt +from numpy.ma.testutils import assert_equal +from numpy.testing import ( + HAS_REFCOUNT, + IS_PYPY, + IS_WASM, + assert_, + assert_allclose, + assert_array_equal, + assert_no_gc_cycles, + assert_no_warnings, + assert_raises, + assert_raises_regex, + assert_warns, + break_cycles, + suppress_warnings, + tempdir, + temppath, +) +from numpy.testing._private.utils import requires_memory + + +class TextIO(BytesIO): + """Helper IO class. + + Writes encode strings to bytes if needed, reads return bytes. + This makes it easier to emulate files opened in binary mode + without needing to explicitly convert strings to bytes in + setting up the test data. + + """ + def __init__(self, s=""): + BytesIO.__init__(self, asbytes(s)) + + def write(self, s): + BytesIO.write(self, asbytes(s)) + + def writelines(self, lines): + BytesIO.writelines(self, [asbytes(s) for s in lines]) + + +IS_64BIT = sys.maxsize > 2**32 +try: + import bz2 + HAS_BZ2 = True +except ImportError: + HAS_BZ2 = False +try: + import lzma + HAS_LZMA = True +except ImportError: + HAS_LZMA = False + + +def strptime(s, fmt=None): + """ + This function is available in the datetime module only from Python >= + 2.5. + + """ + if isinstance(s, bytes): + s = s.decode("latin1") + return datetime(*time.strptime(s, fmt)[:3]) + + +class RoundtripTest: + def roundtrip(self, save_func, *args, **kwargs): + """ + save_func : callable + Function used to save arrays to file. + file_on_disk : bool + If true, store the file on disk, instead of in a + string buffer. + save_kwds : dict + Parameters passed to `save_func`. + load_kwds : dict + Parameters passed to `numpy.load`. + args : tuple of arrays + Arrays stored to file. + + """ + save_kwds = kwargs.get('save_kwds', {}) + load_kwds = kwargs.get('load_kwds', {"allow_pickle": True}) + file_on_disk = kwargs.get('file_on_disk', False) + + if file_on_disk: + target_file = NamedTemporaryFile(delete=False) + load_file = target_file.name + else: + target_file = BytesIO() + load_file = target_file + + try: + arr = args + + save_func(target_file, *arr, **save_kwds) + target_file.flush() + target_file.seek(0) + + if sys.platform == 'win32' and not isinstance(target_file, BytesIO): + target_file.close() + + arr_reloaded = np.load(load_file, **load_kwds) + + self.arr = arr + self.arr_reloaded = arr_reloaded + finally: + if not isinstance(target_file, BytesIO): + target_file.close() + # holds an open file descriptor so it can't be deleted on win + if 'arr_reloaded' in locals(): + if not isinstance(arr_reloaded, np.lib.npyio.NpzFile): + os.remove(target_file.name) + + def check_roundtrips(self, a): + self.roundtrip(a) + self.roundtrip(a, file_on_disk=True) + self.roundtrip(np.asfortranarray(a)) + self.roundtrip(np.asfortranarray(a), file_on_disk=True) + if a.shape[0] > 1: + # neither C nor Fortran contiguous for 2D arrays or more + self.roundtrip(np.asfortranarray(a)[1:]) + self.roundtrip(np.asfortranarray(a)[1:], file_on_disk=True) + + def test_array(self): + a = np.array([], float) + self.check_roundtrips(a) + + a = np.array([[1, 2], [3, 4]], float) + self.check_roundtrips(a) + + a = np.array([[1, 2], [3, 4]], int) + self.check_roundtrips(a) + + a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle) + self.check_roundtrips(a) + + a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble) + self.check_roundtrips(a) + + def test_array_object(self): + a = np.array([], object) + self.check_roundtrips(a) + + a = np.array([[1, 2], [3, 4]], object) + self.check_roundtrips(a) + + def test_1D(self): + a = np.array([1, 2, 3, 4], int) + self.roundtrip(a) + + @pytest.mark.skipif(sys.platform == 'win32', reason="Fails on Win32") + def test_mmap(self): + a = np.array([[1, 2.5], [4, 7.3]]) + self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) + + a = np.asfortranarray([[1, 2.5], [4, 7.3]]) + self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'}) + + def test_record(self): + a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + self.check_roundtrips(a) + + @pytest.mark.slow + def test_format_2_0(self): + dt = [(("%d" % i) * 100, float) for i in range(500)] + a = np.ones(1000, dtype=dt) + with warnings.catch_warnings(record=True): + warnings.filterwarnings('always', '', UserWarning) + self.check_roundtrips(a) + + +class TestSaveLoad(RoundtripTest): + def roundtrip(self, *args, **kwargs): + RoundtripTest.roundtrip(self, np.save, *args, **kwargs) + assert_equal(self.arr[0], self.arr_reloaded) + assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype) + assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc) + + +class TestSavezLoad(RoundtripTest): + def roundtrip(self, *args, **kwargs): + RoundtripTest.roundtrip(self, np.savez, *args, **kwargs) + try: + for n, arr in enumerate(self.arr): + reloaded = self.arr_reloaded['arr_%d' % n] + assert_equal(arr, reloaded) + assert_equal(arr.dtype, reloaded.dtype) + assert_equal(arr.flags.fnc, reloaded.flags.fnc) + finally: + # delete tempfile, must be done here on windows + if self.arr_reloaded.fid: + self.arr_reloaded.fid.close() + os.remove(self.arr_reloaded.fid.name) + + def test_load_non_npy(self): + """Test loading non-.npy files and name mapping in .npz.""" + with temppath(prefix="numpy_test_npz_load_non_npy_", suffix=".npz") as tmp: + with zipfile.ZipFile(tmp, "w") as npz: + with npz.open("test1.npy", "w") as out_file: + np.save(out_file, np.arange(10)) + with npz.open("test2", "w") as out_file: + np.save(out_file, np.arange(10)) + with npz.open("metadata", "w") as out_file: + out_file.write(b"Name: Test") + with np.load(tmp) as npz: + assert len(npz["test1"]) == 10 + assert len(npz["test1.npy"]) == 10 + assert len(npz["test2"]) == 10 + assert npz["metadata"] == b"Name: Test" + + @pytest.mark.skipif(IS_PYPY, reason="Hangs on PyPy") + @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") + @pytest.mark.slow + def test_big_arrays(self): + L = (1 << 31) + 100000 + a = np.empty(L, dtype=np.uint8) + with temppath(prefix="numpy_test_big_arrays_", suffix=".npz") as tmp: + np.savez(tmp, a=a) + del a + npfile = np.load(tmp) + a = npfile['a'] # Should succeed + npfile.close() + + def test_multiple_arrays(self): + a = np.array([[1, 2], [3, 4]], float) + b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) + self.roundtrip(a, b) + + def test_named_arrays(self): + a = np.array([[1, 2], [3, 4]], float) + b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) + c = BytesIO() + np.savez(c, file_a=a, file_b=b) + c.seek(0) + l = np.load(c) + assert_equal(a, l['file_a']) + assert_equal(b, l['file_b']) + + def test_tuple_getitem_raises(self): + # gh-23748 + a = np.array([1, 2, 3]) + f = BytesIO() + np.savez(f, a=a) + f.seek(0) + l = np.load(f) + with pytest.raises(KeyError, match="(1, 2)"): + l[1, 2] + + def test_BagObj(self): + a = np.array([[1, 2], [3, 4]], float) + b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex) + c = BytesIO() + np.savez(c, file_a=a, file_b=b) + c.seek(0) + l = np.load(c) + assert_equal(sorted(dir(l.f)), ['file_a', 'file_b']) + assert_equal(a, l.f.file_a) + assert_equal(b, l.f.file_b) + + @pytest.mark.skipif(IS_WASM, reason="Cannot start thread") + def test_savez_filename_clashes(self): + # Test that issue #852 is fixed + # and savez functions in multithreaded environment + + def writer(error_list): + with temppath(suffix='.npz') as tmp: + arr = np.random.randn(500, 500) + try: + np.savez(tmp, arr=arr) + except OSError as err: + error_list.append(err) + + errors = [] + threads = [threading.Thread(target=writer, args=(errors,)) + for j in range(3)] + for t in threads: + t.start() + for t in threads: + t.join() + + if errors: + raise AssertionError(errors) + + def test_not_closing_opened_fid(self): + # Test that issue #2178 is fixed: + # verify could seek on 'loaded' file + with temppath(suffix='.npz') as tmp: + with open(tmp, 'wb') as fp: + np.savez(fp, data='LOVELY LOAD') + with open(tmp, 'rb', 10000) as fp: + fp.seek(0) + assert_(not fp.closed) + np.load(fp)['data'] + # fp must not get closed by .load + assert_(not fp.closed) + fp.seek(0) + assert_(not fp.closed) + + @pytest.mark.slow_pypy + def test_closing_fid(self): + # Test that issue #1517 (too many opened files) remains closed + # It might be a "weak" test since failed to get triggered on + # e.g. Debian sid of 2012 Jul 05 but was reported to + # trigger the failure on Ubuntu 10.04: + # http://projects.scipy.org/numpy/ticket/1517#comment:2 + with temppath(suffix='.npz') as tmp: + np.savez(tmp, data='LOVELY LOAD') + # We need to check if the garbage collector can properly close + # numpy npz file returned by np.load when their reference count + # goes to zero. Python running in debug mode raises a + # ResourceWarning when file closing is left to the garbage + # collector, so we catch the warnings. + with suppress_warnings() as sup: + sup.filter(ResourceWarning) # TODO: specify exact message + for i in range(1, 1025): + try: + np.load(tmp)["data"] + except Exception as e: + msg = f"Failed to load data from a file: {e}" + raise AssertionError(msg) + finally: + if IS_PYPY: + gc.collect() + + def test_closing_zipfile_after_load(self): + # Check that zipfile owns file and can close it. This needs to + # pass a file name to load for the test. On windows failure will + # cause a second error will be raised when the attempt to remove + # the open file is made. + prefix = 'numpy_test_closing_zipfile_after_load_' + with temppath(suffix='.npz', prefix=prefix) as tmp: + np.savez(tmp, lab='place holder') + data = np.load(tmp) + fp = data.zip.fp + data.close() + assert_(fp.closed) + + @pytest.mark.parametrize("count, expected_repr", [ + (1, "NpzFile {fname!r} with keys: arr_0"), + (5, "NpzFile {fname!r} with keys: arr_0, arr_1, arr_2, arr_3, arr_4"), + # _MAX_REPR_ARRAY_COUNT is 5, so files with more than 5 keys are + # expected to end in '...' + (6, "NpzFile {fname!r} with keys: arr_0, arr_1, arr_2, arr_3, arr_4..."), + ]) + def test_repr_lists_keys(self, count, expected_repr): + a = np.array([[1, 2], [3, 4]], float) + with temppath(suffix='.npz') as tmp: + np.savez(tmp, *[a] * count) + l = np.load(tmp) + assert repr(l) == expected_repr.format(fname=tmp) + l.close() + + +class TestSaveTxt: + def test_array(self): + a = np.array([[1, 2], [3, 4]], float) + fmt = "%.18e" + c = BytesIO() + np.savetxt(c, a, fmt=fmt) + c.seek(0) + assert_equal(c.readlines(), + [asbytes((fmt + ' ' + fmt + '\n') % (1, 2)), + asbytes((fmt + ' ' + fmt + '\n') % (3, 4))]) + + a = np.array([[1, 2], [3, 4]], int) + c = BytesIO() + np.savetxt(c, a, fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1 2\n', b'3 4\n']) + + def test_1D(self): + a = np.array([1, 2, 3, 4], int) + c = BytesIO() + np.savetxt(c, a, fmt='%d') + c.seek(0) + lines = c.readlines() + assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n']) + + def test_0D_3D(self): + c = BytesIO() + assert_raises(ValueError, np.savetxt, c, np.array(1)) + assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]])) + + def test_structured(self): + a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + c = BytesIO() + np.savetxt(c, a, fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1 2\n', b'3 4\n']) + + def test_structured_padded(self): + # gh-13297 + a = np.array([(1, 2, 3), (4, 5, 6)], dtype=[ + ('foo', 'i4'), ('bar', 'i4'), ('baz', 'i4') + ]) + c = BytesIO() + np.savetxt(c, a[['foo', 'baz']], fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1 3\n', b'4 6\n']) + + def test_multifield_view(self): + a = np.ones(1, dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'f4')]) + v = a[['x', 'z']] + with temppath(suffix='.npy') as path: + path = Path(path) + np.save(path, v) + data = np.load(path) + assert_array_equal(data, v) + + def test_delimiter(self): + a = np.array([[1., 2.], [3., 4.]]) + c = BytesIO() + np.savetxt(c, a, delimiter=',', fmt='%d') + c.seek(0) + assert_equal(c.readlines(), [b'1,2\n', b'3,4\n']) + + def test_format(self): + a = np.array([(1, 2), (3, 4)]) + c = BytesIO() + # Sequence of formats + np.savetxt(c, a, fmt=['%02d', '%3.1f']) + c.seek(0) + assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n']) + + # A single multiformat string + c = BytesIO() + np.savetxt(c, a, fmt='%02d : %3.1f') + c.seek(0) + lines = c.readlines() + assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n']) + + # Specify delimiter, should be overridden + c = BytesIO() + np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',') + c.seek(0) + lines = c.readlines() + assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n']) + + # Bad fmt, should raise a ValueError + c = BytesIO() + assert_raises(ValueError, np.savetxt, c, a, fmt=99) + + def test_header_footer(self): + # Test the functionality of the header and footer keyword argument. + + c = BytesIO() + a = np.array([(1, 2), (3, 4)], dtype=int) + test_header_footer = 'Test header / footer' + # Test the header keyword argument + np.savetxt(c, a, fmt='%1d', header=test_header_footer) + c.seek(0) + assert_equal(c.read(), + asbytes('# ' + test_header_footer + '\n1 2\n3 4\n')) + # Test the footer keyword argument + c = BytesIO() + np.savetxt(c, a, fmt='%1d', footer=test_header_footer) + c.seek(0) + assert_equal(c.read(), + asbytes('1 2\n3 4\n# ' + test_header_footer + '\n')) + # Test the commentstr keyword argument used on the header + c = BytesIO() + commentstr = '% ' + np.savetxt(c, a, fmt='%1d', + header=test_header_footer, comments=commentstr) + c.seek(0) + assert_equal(c.read(), + asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n')) + # Test the commentstr keyword argument used on the footer + c = BytesIO() + commentstr = '% ' + np.savetxt(c, a, fmt='%1d', + footer=test_header_footer, comments=commentstr) + c.seek(0) + assert_equal(c.read(), + asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n')) + + @pytest.mark.parametrize("filename_type", [Path, str]) + def test_file_roundtrip(self, filename_type): + with temppath() as name: + a = np.array([(1, 2), (3, 4)]) + np.savetxt(filename_type(name), a) + b = np.loadtxt(filename_type(name)) + assert_array_equal(a, b) + + def test_complex_arrays(self): + ncols = 2 + nrows = 2 + a = np.zeros((ncols, nrows), dtype=np.complex128) + re = np.pi + im = np.e + a[:] = re + 1.0j * im + + # One format only + c = BytesIO() + np.savetxt(c, a, fmt=' %+.3e') + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n', + b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n']) + + # One format for each real and imaginary part + c = BytesIO() + np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols) + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n', + b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n']) + + # One format for each complex number + c = BytesIO() + np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols) + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n', + b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n']) + + def test_complex_negative_exponent(self): + # Previous to 1.15, some formats generated x+-yj, gh 7895 + ncols = 2 + nrows = 2 + a = np.zeros((ncols, nrows), dtype=np.complex128) + re = np.pi + im = np.e + a[:] = re - 1.0j * im + c = BytesIO() + np.savetxt(c, a, fmt='%.3e') + c.seek(0) + lines = c.readlines() + assert_equal( + lines, + [b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n', + b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n']) + + def test_custom_writer(self): + + class CustomWriter(list): + def write(self, text): + self.extend(text.split(b'\n')) + + w = CustomWriter() + a = np.array([(1, 2), (3, 4)]) + np.savetxt(w, a) + b = np.loadtxt(w) + assert_array_equal(a, b) + + def test_unicode(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.str_) + with tempdir() as tmpdir: + # set encoding as on windows it may not be unicode even on py3 + np.savetxt(os.path.join(tmpdir, 'test.csv'), a, fmt=['%s'], + encoding='UTF-8') + + def test_unicode_roundtrip(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.str_) + # our gz wrapper support encoding + suffixes = ['', '.gz'] + if HAS_BZ2: + suffixes.append('.bz2') + if HAS_LZMA: + suffixes.extend(['.xz', '.lzma']) + with tempdir() as tmpdir: + for suffix in suffixes: + np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a, + fmt=['%s'], encoding='UTF-16-LE') + b = np.loadtxt(os.path.join(tmpdir, 'test.csv' + suffix), + encoding='UTF-16-LE', dtype=np.str_) + assert_array_equal(a, b) + + def test_unicode_bytestream(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.str_) + s = BytesIO() + np.savetxt(s, a, fmt=['%s'], encoding='UTF-8') + s.seek(0) + assert_equal(s.read().decode('UTF-8'), utf8 + '\n') + + def test_unicode_stringstream(self): + utf8 = b'\xcf\x96'.decode('UTF-8') + a = np.array([utf8], dtype=np.str_) + s = StringIO() + np.savetxt(s, a, fmt=['%s'], encoding='UTF-8') + s.seek(0) + assert_equal(s.read(), utf8 + '\n') + + @pytest.mark.parametrize("iotype", [StringIO, BytesIO]) + def test_unicode_and_bytes_fmt(self, iotype): + # string type of fmt should not matter, see also gh-4053 + a = np.array([1.]) + s = iotype() + np.savetxt(s, a, fmt="%f") + s.seek(0) + if iotype is StringIO: + assert_equal(s.read(), "%f\n" % 1.) + else: + assert_equal(s.read(), b"%f\n" % 1.) + + @pytest.mark.skipif(sys.platform == 'win32', reason="files>4GB may not work") + @pytest.mark.slow + @requires_memory(free_bytes=7e9) + def test_large_zip(self): + def check_large_zip(memoryerror_raised): + memoryerror_raised.value = False + try: + # The test takes at least 6GB of memory, writes a file larger + # than 4GB. This tests the ``allowZip64`` kwarg to ``zipfile`` + test_data = np.asarray([np.random.rand( + np.random.randint(50, 100), 4) + for i in range(800000)], dtype=object) + with tempdir() as tmpdir: + np.savez(os.path.join(tmpdir, 'test.npz'), + test_data=test_data) + except MemoryError: + memoryerror_raised.value = True + raise + # run in a subprocess to ensure memory is released on PyPy, see gh-15775 + # Use an object in shared memory to re-raise the MemoryError exception + # in our process if needed, see gh-16889 + memoryerror_raised = Value(c_bool) + + # Since Python 3.8, the default start method for multiprocessing has + # been changed from 'fork' to 'spawn' on macOS, causing inconsistency + # on memory sharing model, leading to failed test for check_large_zip + ctx = get_context('fork') + p = ctx.Process(target=check_large_zip, args=(memoryerror_raised,)) + p.start() + p.join() + if memoryerror_raised.value: + raise MemoryError("Child process raised a MemoryError exception") + # -9 indicates a SIGKILL, probably an OOM. + if p.exitcode == -9: + pytest.xfail("subprocess got a SIGKILL, apparently free memory was not sufficient") + assert p.exitcode == 0 + +class LoadTxtBase: + def check_compressed(self, fopen, suffixes): + # Test that we can load data from a compressed file + wanted = np.arange(6).reshape((2, 3)) + linesep = ('\n', '\r\n', '\r') + for sep in linesep: + data = '0 1 2' + sep + '3 4 5' + for suffix in suffixes: + with temppath(suffix=suffix) as name: + with fopen(name, mode='wt', encoding='UTF-32-LE') as f: + f.write(data) + res = self.loadfunc(name, encoding='UTF-32-LE') + assert_array_equal(res, wanted) + with fopen(name, "rt", encoding='UTF-32-LE') as f: + res = self.loadfunc(f) + assert_array_equal(res, wanted) + + def test_compressed_gzip(self): + self.check_compressed(gzip.open, ('.gz',)) + + @pytest.mark.skipif(not HAS_BZ2, reason="Needs bz2") + def test_compressed_bz2(self): + self.check_compressed(bz2.open, ('.bz2',)) + + @pytest.mark.skipif(not HAS_LZMA, reason="Needs lzma") + def test_compressed_lzma(self): + self.check_compressed(lzma.open, ('.xz', '.lzma')) + + def test_encoding(self): + with temppath() as path: + with open(path, "wb") as f: + f.write('0.\n1.\n2.'.encode("UTF-16")) + x = self.loadfunc(path, encoding="UTF-16") + assert_array_equal(x, [0., 1., 2.]) + + def test_stringload(self): + # umlaute + nonascii = b'\xc3\xb6\xc3\xbc\xc3\xb6'.decode("UTF-8") + with temppath() as path: + with open(path, "wb") as f: + f.write(nonascii.encode("UTF-16")) + x = self.loadfunc(path, encoding="UTF-16", dtype=np.str_) + assert_array_equal(x, nonascii) + + def test_binary_decode(self): + utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04' + v = self.loadfunc(BytesIO(utf16), dtype=np.str_, encoding='UTF-16') + assert_array_equal(v, np.array(utf16.decode('UTF-16').split())) + + def test_converters_decode(self): + # test converters that decode strings + c = TextIO() + c.write(b'\xcf\x96') + c.seek(0) + x = self.loadfunc(c, dtype=np.str_, encoding="bytes", + converters={0: lambda x: x.decode('UTF-8')}) + a = np.array([b'\xcf\x96'.decode('UTF-8')]) + assert_array_equal(x, a) + + def test_converters_nodecode(self): + # test native string converters enabled by setting an encoding + utf8 = b'\xcf\x96'.decode('UTF-8') + with temppath() as path: + with open(path, 'wt', encoding='UTF-8') as f: + f.write(utf8) + x = self.loadfunc(path, dtype=np.str_, + converters={0: lambda x: x + 't'}, + encoding='UTF-8') + a = np.array([utf8 + 't']) + assert_array_equal(x, a) + + +class TestLoadTxt(LoadTxtBase): + loadfunc = staticmethod(np.loadtxt) + + def setup_method(self): + # lower chunksize for testing + self.orig_chunk = _npyio_impl._loadtxt_chunksize + _npyio_impl._loadtxt_chunksize = 1 + + def teardown_method(self): + _npyio_impl._loadtxt_chunksize = self.orig_chunk + + def test_record(self): + c = TextIO() + c.write('1 2\n3 4') + c.seek(0) + x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)]) + a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + assert_array_equal(x, a) + + d = TextIO() + d.write('M 64 75.0\nF 25 60.0') + d.seek(0) + mydescriptor = {'names': ('gender', 'age', 'weight'), + 'formats': ('S1', 'i4', 'f4')} + b = np.array([('M', 64.0, 75.0), + ('F', 25.0, 60.0)], dtype=mydescriptor) + y = np.loadtxt(d, dtype=mydescriptor) + assert_array_equal(y, b) + + def test_array(self): + c = TextIO() + c.write('1 2\n3 4') + + c.seek(0) + x = np.loadtxt(c, dtype=int) + a = np.array([[1, 2], [3, 4]], int) + assert_array_equal(x, a) + + c.seek(0) + x = np.loadtxt(c, dtype=float) + a = np.array([[1, 2], [3, 4]], float) + assert_array_equal(x, a) + + def test_1D(self): + c = TextIO() + c.write('1\n2\n3\n4\n') + c.seek(0) + x = np.loadtxt(c, dtype=int) + a = np.array([1, 2, 3, 4], int) + assert_array_equal(x, a) + + c = TextIO() + c.write('1,2,3,4\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',') + a = np.array([1, 2, 3, 4], int) + assert_array_equal(x, a) + + def test_missing(self): + c = TextIO() + c.write('1,2,3,,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}) + a = np.array([1, 2, 3, -999, 5], int) + assert_array_equal(x, a) + + def test_converters_with_usecols(self): + c = TextIO() + c.write('1,2,3,,5\n6,7,8,9,10\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}, + usecols=(1, 3,)) + a = np.array([[2, -999], [7, 9]], int) + assert_array_equal(x, a) + + def test_comments_unicode(self): + c = TextIO() + c.write('# comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments='#') + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + def test_comments_byte(self): + c = TextIO() + c.write('# comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments=b'#') + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + def test_comments_multiple(self): + c = TextIO() + c.write('# comment\n1,2,3\n@ comment2\n4,5,6 // comment3') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments=['#', '@', '//']) + a = np.array([[1, 2, 3], [4, 5, 6]], int) + assert_array_equal(x, a) + + @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") + def test_comments_multi_chars(self): + c = TextIO() + c.write('/* comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + comments='/*') + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + # Check that '/*' is not transformed to ['/', '*'] + c = TextIO() + c.write('*/ comment\n1,2,3,5\n') + c.seek(0) + assert_raises(ValueError, np.loadtxt, c, dtype=int, delimiter=',', + comments='/*') + + def test_skiprows(self): + c = TextIO() + c.write('comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + skiprows=1) + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + c = TextIO() + c.write('# comment\n1,2,3,5\n') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + skiprows=1) + a = np.array([1, 2, 3, 5], int) + assert_array_equal(x, a) + + def test_usecols(self): + a = np.array([[1, 2], [3, 4]], float) + c = BytesIO() + np.savetxt(c, a) + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=(1,)) + assert_array_equal(x, a[:, 1]) + + a = np.array([[1, 2, 3], [3, 4, 5]], float) + c = BytesIO() + np.savetxt(c, a) + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=(1, 2)) + assert_array_equal(x, a[:, 1:]) + + # Testing with arrays instead of tuples. + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2])) + assert_array_equal(x, a[:, 1:]) + + # Testing with an integer instead of a sequence + for int_type in [int, np.int8, np.int16, + np.int32, np.int64, np.uint8, np.uint16, + np.uint32, np.uint64]: + to_read = int_type(1) + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=to_read) + assert_array_equal(x, a[:, 1]) + + # Testing with some crazy custom integer type + class CrazyInt: + def __index__(self): + return 1 + + crazy_int = CrazyInt() + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=crazy_int) + assert_array_equal(x, a[:, 1]) + + c.seek(0) + x = np.loadtxt(c, dtype=float, usecols=(crazy_int,)) + assert_array_equal(x, a[:, 1]) + + # Checking with dtypes defined converters. + data = '''JOE 70.1 25.3 + BOB 60.5 27.9 + ''' + c = TextIO(data) + names = ['stid', 'temp'] + dtypes = ['S4', 'f8'] + arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes))) + assert_equal(arr['stid'], [b"JOE", b"BOB"]) + assert_equal(arr['temp'], [25.3, 27.9]) + + # Testing non-ints in usecols + c.seek(0) + bogus_idx = 1.5 + assert_raises_regex( + TypeError, + f'^usecols must be.*{type(bogus_idx).__name__}', + np.loadtxt, c, usecols=bogus_idx + ) + + assert_raises_regex( + TypeError, + f'^usecols must be.*{type(bogus_idx).__name__}', + np.loadtxt, c, usecols=[0, bogus_idx, 0] + ) + + def test_bad_usecols(self): + with pytest.raises(OverflowError): + np.loadtxt(["1\n"], usecols=[2**64], delimiter=",") + with pytest.raises((ValueError, OverflowError)): + # Overflow error on 32bit platforms + np.loadtxt(["1\n"], usecols=[2**62], delimiter=",") + with pytest.raises(TypeError, + match="If a structured dtype .*. But 1 usecols were given and " + "the number of fields is 3."): + np.loadtxt(["1,1\n"], dtype="i,2i", usecols=[0], delimiter=",") + + def test_fancy_dtype(self): + c = TextIO() + c.write('1,2,3.0\n4,5,6.0\n') + c.seek(0) + dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + x = np.loadtxt(c, dtype=dt, delimiter=',') + a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt) + assert_array_equal(x, a) + + def test_shaped_dtype(self): + c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6") + dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ('block', int, (2, 3))]) + x = np.loadtxt(c, dtype=dt) + a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])], + dtype=dt) + assert_array_equal(x, a) + + def test_3d_shaped_dtype(self): + c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12") + dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ('block', int, (2, 2, 3))]) + x = np.loadtxt(c, dtype=dt) + a = np.array([('aaaa', 1.0, 8.0, + [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])], + dtype=dt) + assert_array_equal(x, a) + + def test_str_dtype(self): + # see gh-8033 + c = ["str1", "str2"] + + for dt in (str, np.bytes_): + a = np.array(["str1", "str2"], dtype=dt) + x = np.loadtxt(c, dtype=dt) + assert_array_equal(x, a) + + def test_empty_file(self): + with pytest.warns(UserWarning, match="input contained no data"): + c = TextIO() + x = np.loadtxt(c) + assert_equal(x.shape, (0,)) + x = np.loadtxt(c, dtype=np.int64) + assert_equal(x.shape, (0,)) + assert_(x.dtype == np.int64) + + def test_unused_converter(self): + c = TextIO() + c.writelines(['1 21\n', '3 42\n']) + c.seek(0) + data = np.loadtxt(c, usecols=(1,), + converters={0: lambda s: int(s, 16)}) + assert_array_equal(data, [21, 42]) + + c.seek(0) + data = np.loadtxt(c, usecols=(1,), + converters={1: lambda s: int(s, 16)}) + assert_array_equal(data, [33, 66]) + + def test_dtype_with_object(self): + # Test using an explicit dtype with an object + data = """ 1; 2001-01-01 + 2; 2002-01-31 """ + ndtype = [('idx', int), ('code', object)] + func = lambda s: strptime(s.strip(), "%Y-%m-%d") + converters = {1: func} + test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype, + converters=converters) + control = np.array( + [(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))], + dtype=ndtype) + assert_equal(test, control) + + def test_uint64_type(self): + tgt = (9223372043271415339, 9223372043271415853) + c = TextIO() + c.write("%s %s" % tgt) + c.seek(0) + res = np.loadtxt(c, dtype=np.uint64) + assert_equal(res, tgt) + + def test_int64_type(self): + tgt = (-9223372036854775807, 9223372036854775807) + c = TextIO() + c.write("%s %s" % tgt) + c.seek(0) + res = np.loadtxt(c, dtype=np.int64) + assert_equal(res, tgt) + + def test_from_float_hex(self): + # IEEE doubles and floats only, otherwise the float32 + # conversion may fail. + tgt = np.logspace(-10, 10, 5).astype(np.float32) + tgt = np.hstack((tgt, -tgt)).astype(float) + inp = '\n'.join(map(float.hex, tgt)) + c = TextIO() + c.write(inp) + for dt in [float, np.float32]: + c.seek(0) + res = np.loadtxt( + c, dtype=dt, converters=float.fromhex, encoding="latin1") + assert_equal(res, tgt, err_msg=f"{dt}") + + @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") + def test_default_float_converter_no_default_hex_conversion(self): + """ + Ensure that fromhex is only used for values with the correct prefix and + is not called by default. Regression test related to gh-19598. + """ + c = TextIO("a b c") + with pytest.raises(ValueError, + match=".*convert string 'a' to float64 at row 0, column 1"): + np.loadtxt(c) + + @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") + def test_default_float_converter_exception(self): + """ + Ensure that the exception message raised during failed floating point + conversion is correct. Regression test related to gh-19598. + """ + c = TextIO("qrs tuv") # Invalid values for default float converter + with pytest.raises(ValueError, + match="could not convert string 'qrs' to float64"): + np.loadtxt(c) + + def test_from_complex(self): + tgt = (complex(1, 1), complex(1, -1)) + c = TextIO() + c.write("%s %s" % tgt) + c.seek(0) + res = np.loadtxt(c, dtype=complex) + assert_equal(res, tgt) + + def test_complex_misformatted(self): + # test for backward compatibility + # some complex formats used to generate x+-yj + a = np.zeros((2, 2), dtype=np.complex128) + re = np.pi + im = np.e + a[:] = re - 1.0j * im + c = BytesIO() + np.savetxt(c, a, fmt='%.16e') + c.seek(0) + txt = c.read() + c.seek(0) + # misformat the sign on the imaginary part, gh 7895 + txt_bad = txt.replace(b'e+00-', b'e00+-') + assert_(txt_bad != txt) + c.write(txt_bad) + c.seek(0) + res = np.loadtxt(c, dtype=complex) + assert_equal(res, a) + + def test_universal_newline(self): + with temppath() as name: + with open(name, 'w') as f: + f.write('1 21\r3 42\r') + data = np.loadtxt(name) + assert_array_equal(data, [[1, 21], [3, 42]]) + + def test_empty_field_after_tab(self): + c = TextIO() + c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t') + c.seek(0) + dt = {'names': ('x', 'y', 'z', 'comment'), + 'formats': (' num rows + c = TextIO() + c.write('comment\n1,2,3,5\n4,5,7,8\n2,1,4,5') + c.seek(0) + x = np.loadtxt(c, dtype=int, delimiter=',', + skiprows=1, max_rows=6) + a = np.array([[1, 2, 3, 5], [4, 5, 7, 8], [2, 1, 4, 5]], int) + assert_array_equal(x, a) + + @pytest.mark.parametrize(["skip", "data"], [ + (1, ["ignored\n", "1,2\n", "\n", "3,4\n"]), + # "Bad" lines that do not end in newlines: + (1, ["ignored", "1,2", "", "3,4"]), + (1, StringIO("ignored\n1,2\n\n3,4")), + # Same as above, but do not skip any lines: + (0, ["-1,0\n", "1,2\n", "\n", "3,4\n"]), + (0, ["-1,0", "1,2", "", "3,4"]), + (0, StringIO("-1,0\n1,2\n\n3,4"))]) + def test_max_rows_empty_lines(self, skip, data): + with pytest.warns(UserWarning, + match=f"Input line 3.*max_rows={3 - skip}"): + res = np.loadtxt(data, dtype=int, skiprows=skip, delimiter=",", + max_rows=3 - skip) + assert_array_equal(res, [[-1, 0], [1, 2], [3, 4]][skip:]) + + if isinstance(data, StringIO): + data.seek(0) + + with warnings.catch_warnings(): + warnings.simplefilter("error", UserWarning) + with pytest.raises(UserWarning): + np.loadtxt(data, dtype=int, skiprows=skip, delimiter=",", + max_rows=3 - skip) + +class Testfromregex: + def test_record(self): + c = TextIO() + c.write('1.312 foo\n1.534 bar\n4.444 qux') + c.seek(0) + + dt = [('num', np.float64), ('val', 'S3')] + x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt) + a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')], + dtype=dt) + assert_array_equal(x, a) + + def test_record_2(self): + c = TextIO() + c.write('1312 foo\n1534 bar\n4444 qux') + c.seek(0) + + dt = [('num', np.int32), ('val', 'S3')] + x = np.fromregex(c, r"(\d+)\s+(...)", dt) + a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')], + dtype=dt) + assert_array_equal(x, a) + + def test_record_3(self): + c = TextIO() + c.write('1312 foo\n1534 bar\n4444 qux') + c.seek(0) + + dt = [('num', np.float64)] + x = np.fromregex(c, r"(\d+)\s+...", dt) + a = np.array([(1312,), (1534,), (4444,)], dtype=dt) + assert_array_equal(x, a) + + @pytest.mark.parametrize("path_type", [str, Path]) + def test_record_unicode(self, path_type): + utf8 = b'\xcf\x96' + with temppath() as str_path: + path = path_type(str_path) + with open(path, 'wb') as f: + f.write(b'1.312 foo' + utf8 + b' \n1.534 bar\n4.444 qux') + + dt = [('num', np.float64), ('val', 'U4')] + x = np.fromregex(path, r"(?u)([0-9.]+)\s+(\w+)", dt, encoding='UTF-8') + a = np.array([(1.312, 'foo' + utf8.decode('UTF-8')), (1.534, 'bar'), + (4.444, 'qux')], dtype=dt) + assert_array_equal(x, a) + + regexp = re.compile(r"([0-9.]+)\s+(\w+)", re.UNICODE) + x = np.fromregex(path, regexp, dt, encoding='UTF-8') + assert_array_equal(x, a) + + def test_compiled_bytes(self): + regexp = re.compile(br'(\d)') + c = BytesIO(b'123') + dt = [('num', np.float64)] + a = np.array([1, 2, 3], dtype=dt) + x = np.fromregex(c, regexp, dt) + assert_array_equal(x, a) + + def test_bad_dtype_not_structured(self): + regexp = re.compile(br'(\d)') + c = BytesIO(b'123') + with pytest.raises(TypeError, match='structured datatype'): + np.fromregex(c, regexp, dtype=np.float64) + + +#####-------------------------------------------------------------------------- + + +class TestFromTxt(LoadTxtBase): + loadfunc = staticmethod(np.genfromtxt) + + def test_record(self): + # Test w/ explicit dtype + data = TextIO('1 2\n3 4') + test = np.genfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)]) + control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')]) + assert_equal(test, control) + # + data = TextIO('M 64.0 75.0\nF 25.0 60.0') + descriptor = {'names': ('gender', 'age', 'weight'), + 'formats': ('S1', 'i4', 'f4')} + control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)], + dtype=descriptor) + test = np.genfromtxt(data, dtype=descriptor) + assert_equal(test, control) + + def test_array(self): + # Test outputting a standard ndarray + data = TextIO('1 2\n3 4') + control = np.array([[1, 2], [3, 4]], dtype=int) + test = np.genfromtxt(data, dtype=int) + assert_array_equal(test, control) + # + data.seek(0) + control = np.array([[1, 2], [3, 4]], dtype=float) + test = np.loadtxt(data, dtype=float) + assert_array_equal(test, control) + + def test_1D(self): + # Test squeezing to 1D + control = np.array([1, 2, 3, 4], int) + # + data = TextIO('1\n2\n3\n4\n') + test = np.genfromtxt(data, dtype=int) + assert_array_equal(test, control) + # + data = TextIO('1,2,3,4\n') + test = np.genfromtxt(data, dtype=int, delimiter=',') + assert_array_equal(test, control) + + def test_comments(self): + # Test the stripping of comments + control = np.array([1, 2, 3, 5], int) + # Comment on its own line + data = TextIO('# comment\n1,2,3,5\n') + test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#') + assert_equal(test, control) + # Comment at the end of a line + data = TextIO('1,2,3,5# comment\n') + test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#') + assert_equal(test, control) + + def test_skiprows(self): + # Test row skipping + control = np.array([1, 2, 3, 5], int) + kwargs = {"dtype": int, "delimiter": ','} + # + data = TextIO('comment\n1,2,3,5\n') + test = np.genfromtxt(data, skip_header=1, **kwargs) + assert_equal(test, control) + # + data = TextIO('# comment\n1,2,3,5\n') + test = np.loadtxt(data, skiprows=1, **kwargs) + assert_equal(test, control) + + def test_skip_footer(self): + data = [f"# {i}" for i in range(1, 6)] + data.append("A, B, C") + data.extend([f"{i},{i:3.1f},{i:03d}" for i in range(51)]) + data[-1] = "99,99" + kwargs = {"delimiter": ",", "names": True, "skip_header": 5, "skip_footer": 10} + test = np.genfromtxt(TextIO("\n".join(data)), **kwargs) + ctrl = np.array([(f"{i:f}", f"{i:f}", f"{i:f}") for i in range(41)], + dtype=[(_, float) for _ in "ABC"]) + assert_equal(test, ctrl) + + def test_skip_footer_with_invalid(self): + with suppress_warnings() as sup: + sup.filter(ConversionWarning) + basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n' + # Footer too small to get rid of all invalid values + assert_raises(ValueError, np.genfromtxt, + TextIO(basestr), skip_footer=1) + # except ValueError: + # pass + a = np.genfromtxt( + TextIO(basestr), skip_footer=1, invalid_raise=False) + assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])) + # + a = np.genfromtxt(TextIO(basestr), skip_footer=3) + assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])) + # + basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n' + a = np.genfromtxt( + TextIO(basestr), skip_footer=1, invalid_raise=False) + assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]])) + a = np.genfromtxt( + TextIO(basestr), skip_footer=3, invalid_raise=False) + assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]])) + + def test_header(self): + # Test retrieving a header + data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(data, dtype=None, names=True, + encoding='bytes') + assert_(w[0].category is VisibleDeprecationWarning) + control = {'gender': np.array([b'M', b'F']), + 'age': np.array([64.0, 25.0]), + 'weight': np.array([75.0, 60.0])} + assert_equal(test['gender'], control['gender']) + assert_equal(test['age'], control['age']) + assert_equal(test['weight'], control['weight']) + + def test_auto_dtype(self): + # Test the automatic definition of the output dtype + data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(data, dtype=None, encoding='bytes') + assert_(w[0].category is VisibleDeprecationWarning) + control = [np.array([b'A', b'BCD']), + np.array([64, 25]), + np.array([75.0, 60.0]), + np.array([3 + 4j, 5 + 6j]), + np.array([True, False]), ] + assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4']) + for (i, ctrl) in enumerate(control): + assert_equal(test[f'f{i}'], ctrl) + + def test_auto_dtype_uniform(self): + # Tests whether the output dtype can be uniformized + data = TextIO('1 2 3 4\n5 6 7 8\n') + test = np.genfromtxt(data, dtype=None) + control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) + assert_equal(test, control) + + def test_fancy_dtype(self): + # Check that a nested dtype isn't MIA + data = TextIO('1,2,3.0\n4,5,6.0\n') + fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + test = np.genfromtxt(data, dtype=fancydtype, delimiter=',') + control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype) + assert_equal(test, control) + + def test_names_overwrite(self): + # Test overwriting the names of the dtype + descriptor = {'names': ('g', 'a', 'w'), + 'formats': ('S1', 'i4', 'f4')} + data = TextIO(b'M 64.0 75.0\nF 25.0 60.0') + names = ('gender', 'age', 'weight') + test = np.genfromtxt(data, dtype=descriptor, names=names) + descriptor['names'] = names + control = np.array([('M', 64.0, 75.0), + ('F', 25.0, 60.0)], dtype=descriptor) + assert_equal(test, control) + + def test_bad_fname(self): + with pytest.raises(TypeError, match='fname must be a string,'): + np.genfromtxt(123) + + def test_commented_header(self): + # Check that names can be retrieved even if the line is commented out. + data = TextIO(""" +#gender age weight +M 21 72.100000 +F 35 58.330000 +M 33 21.99 + """) + # The # is part of the first name and should be deleted automatically. + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(data, names=True, dtype=None, + encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)], + dtype=[('gender', '|S1'), ('age', int), ('weight', float)]) + assert_equal(test, ctrl) + # Ditto, but we should get rid of the first element + data = TextIO(b""" +# gender age weight +M 21 72.100000 +F 35 58.330000 +M 33 21.99 + """) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(data, names=True, dtype=None, + encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + assert_equal(test, ctrl) + + def test_names_and_comments_none(self): + # Tests case when names is true but comments is None (gh-10780) + data = TextIO('col1 col2\n 1 2\n 3 4') + test = np.genfromtxt(data, dtype=(int, int), comments=None, names=True) + control = np.array([(1, 2), (3, 4)], dtype=[('col1', int), ('col2', int)]) + assert_equal(test, control) + + def test_file_is_closed_on_error(self): + # gh-13200 + with tempdir() as tmpdir: + fpath = os.path.join(tmpdir, "test.csv") + with open(fpath, "wb") as f: + f.write('\N{GREEK PI SYMBOL}'.encode()) + + # ResourceWarnings are emitted from a destructor, so won't be + # detected by regular propagation to errors. + with assert_no_warnings(): + with pytest.raises(UnicodeDecodeError): + np.genfromtxt(fpath, encoding="ascii") + + def test_autonames_and_usecols(self): + # Tests names and usecols + data = TextIO('A B C D\n aaaa 121 45 9.1') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(data, usecols=('A', 'C', 'D'), + names=True, dtype=None, encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + control = np.array(('aaaa', 45, 9.1), + dtype=[('A', '|S4'), ('C', int), ('D', float)]) + assert_equal(test, control) + + def test_converters_with_usecols(self): + # Test the combination user-defined converters and usecol + data = TextIO('1,2,3,,5\n6,7,8,9,10\n') + test = np.genfromtxt(data, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}, + usecols=(1, 3,)) + control = np.array([[2, -999], [7, 9]], int) + assert_equal(test, control) + + def test_converters_with_usecols_and_names(self): + # Tests names and usecols + data = TextIO('A B C D\n aaaa 121 45 9.1') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(data, usecols=('A', 'C', 'D'), names=True, + dtype=None, encoding="bytes", + converters={'C': lambda s: 2 * int(s)}) + assert_(w[0].category is VisibleDeprecationWarning) + control = np.array(('aaaa', 90, 9.1), + dtype=[('A', '|S4'), ('C', int), ('D', float)]) + assert_equal(test, control) + + def test_converters_cornercases(self): + # Test the conversion to datetime. + converter = { + 'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')} + data = TextIO('2009-02-03 12:00:00Z, 72214.0') + test = np.genfromtxt(data, delimiter=',', dtype=None, + names=['date', 'stid'], converters=converter) + control = np.array((datetime(2009, 2, 3), 72214.), + dtype=[('date', np.object_), ('stid', float)]) + assert_equal(test, control) + + def test_converters_cornercases2(self): + # Test the conversion to datetime64. + converter = { + 'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))} + data = TextIO('2009-02-03 12:00:00Z, 72214.0') + test = np.genfromtxt(data, delimiter=',', dtype=None, + names=['date', 'stid'], converters=converter) + control = np.array((datetime(2009, 2, 3), 72214.), + dtype=[('date', 'datetime64[us]'), ('stid', float)]) + assert_equal(test, control) + + def test_unused_converter(self): + # Test whether unused converters are forgotten + data = TextIO("1 21\n 3 42\n") + test = np.genfromtxt(data, usecols=(1,), + converters={0: lambda s: int(s, 16)}) + assert_equal(test, [21, 42]) + # + data.seek(0) + test = np.genfromtxt(data, usecols=(1,), + converters={1: lambda s: int(s, 16)}) + assert_equal(test, [33, 66]) + + def test_invalid_converter(self): + strip_rand = lambda x: float((b'r' in x.lower() and x.split()[-1]) or + ((b'r' not in x.lower() and x.strip()) or 0.0)) + strip_per = lambda x: float((b'%' in x.lower() and x.split()[0]) or + ((b'%' not in x.lower() and x.strip()) or 0.0)) + s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n" + "L24U05,12/5/2003, 2 %,1,300, 150.5\r\n" + "D02N03,10/10/2004,R 1,,7,145.55") + kwargs = { + "converters": {2: strip_per, 3: strip_rand}, "delimiter": ",", + "dtype": None, "encoding": "bytes"} + assert_raises(ConverterError, np.genfromtxt, s, **kwargs) + + def test_tricky_converter_bug1666(self): + # Test some corner cases + s = TextIO('q1,2\nq3,4') + cnv = lambda s: float(s[1:]) + test = np.genfromtxt(s, delimiter=',', converters={0: cnv}) + control = np.array([[1., 2.], [3., 4.]]) + assert_equal(test, control) + + def test_dtype_with_converters(self): + dstr = "2009; 23; 46" + test = np.genfromtxt(TextIO(dstr,), + delimiter=";", dtype=float, converters={0: bytes}) + control = np.array([('2009', 23., 46)], + dtype=[('f0', '|S4'), ('f1', float), ('f2', float)]) + assert_equal(test, control) + test = np.genfromtxt(TextIO(dstr,), + delimiter=";", dtype=float, converters={0: float}) + control = np.array([2009., 23., 46],) + assert_equal(test, control) + + @pytest.mark.filterwarnings("ignore:.*recfromcsv.*:DeprecationWarning") + def test_dtype_with_converters_and_usecols(self): + dstr = "1,5,-1,1:1\n2,8,-1,1:n\n3,3,-2,m:n\n" + dmap = {'1:1': 0, '1:n': 1, 'm:1': 2, 'm:n': 3} + dtyp = [('e1', 'i4'), ('e2', 'i4'), ('e3', 'i2'), ('n', 'i1')] + conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]} + test = recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', + names=None, converters=conv, encoding="bytes") + control = np.rec.array([(1, 5, -1, 0), (2, 8, -1, 1), (3, 3, -2, 3)], dtype=dtyp) + assert_equal(test, control) + dtyp = [('e1', 'i4'), ('e2', 'i4'), ('n', 'i1')] + test = recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',', + usecols=(0, 1, 3), names=None, converters=conv, + encoding="bytes") + control = np.rec.array([(1, 5, 0), (2, 8, 1), (3, 3, 3)], dtype=dtyp) + assert_equal(test, control) + + def test_dtype_with_object(self): + # Test using an explicit dtype with an object + data = """ 1; 2001-01-01 + 2; 2002-01-31 """ + ndtype = [('idx', int), ('code', object)] + func = lambda s: strptime(s.strip(), "%Y-%m-%d") + converters = {1: func} + test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype, + converters=converters) + control = np.array( + [(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))], + dtype=ndtype) + assert_equal(test, control) + + ndtype = [('nest', [('idx', int), ('code', object)])] + with assert_raises_regex(NotImplementedError, + 'Nested fields.* not supported.*'): + test = np.genfromtxt(TextIO(data), delimiter=";", + dtype=ndtype, converters=converters) + + # nested but empty fields also aren't supported + ndtype = [('idx', int), ('code', object), ('nest', [])] + with assert_raises_regex(NotImplementedError, + 'Nested fields.* not supported.*'): + test = np.genfromtxt(TextIO(data), delimiter=";", + dtype=ndtype, converters=converters) + + def test_dtype_with_object_no_converter(self): + # Object without a converter uses bytes: + parsed = np.genfromtxt(TextIO("1"), dtype=object) + assert parsed[()] == b"1" + parsed = np.genfromtxt(TextIO("string"), dtype=object) + assert parsed[()] == b"string" + + def test_userconverters_with_explicit_dtype(self): + # Test user_converters w/ explicit (standard) dtype + data = TextIO('skip,skip,2001-01-01,1.0,skip') + test = np.genfromtxt(data, delimiter=",", names=None, dtype=float, + usecols=(2, 3), converters={2: bytes}) + control = np.array([('2001-01-01', 1.)], + dtype=[('', '|S10'), ('', float)]) + assert_equal(test, control) + + def test_utf8_userconverters_with_explicit_dtype(self): + utf8 = b'\xcf\x96' + with temppath() as path: + with open(path, 'wb') as f: + f.write(b'skip,skip,2001-01-01' + utf8 + b',1.0,skip') + test = np.genfromtxt(path, delimiter=",", names=None, dtype=float, + usecols=(2, 3), converters={2: str}, + encoding='UTF-8') + control = np.array([('2001-01-01' + utf8.decode('UTF-8'), 1.)], + dtype=[('', '|U11'), ('', float)]) + assert_equal(test, control) + + def test_spacedelimiter(self): + # Test space delimiter + data = TextIO("1 2 3 4 5\n6 7 8 9 10") + test = np.genfromtxt(data) + control = np.array([[1., 2., 3., 4., 5.], + [6., 7., 8., 9., 10.]]) + assert_equal(test, control) + + def test_integer_delimiter(self): + # Test using an integer for delimiter + data = " 1 2 3\n 4 5 67\n890123 4" + test = np.genfromtxt(TextIO(data), delimiter=3) + control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]]) + assert_equal(test, control) + + def test_missing(self): + data = TextIO('1,2,3,,5\n') + test = np.genfromtxt(data, dtype=int, delimiter=',', + converters={3: lambda s: int(s or - 999)}) + control = np.array([1, 2, 3, -999, 5], int) + assert_equal(test, control) + + def test_missing_with_tabs(self): + # Test w/ a delimiter tab + txt = "1\t2\t3\n\t2\t\n1\t\t3" + test = np.genfromtxt(TextIO(txt), delimiter="\t", + usemask=True,) + ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],) + ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool) + assert_equal(test.data, ctrl_d) + assert_equal(test.mask, ctrl_m) + + def test_usecols(self): + # Test the selection of columns + # Select 1 column + control = np.array([[1, 2], [3, 4]], float) + data = TextIO() + np.savetxt(data, control) + data.seek(0) + test = np.genfromtxt(data, dtype=float, usecols=(1,)) + assert_equal(test, control[:, 1]) + # + control = np.array([[1, 2, 3], [3, 4, 5]], float) + data = TextIO() + np.savetxt(data, control) + data.seek(0) + test = np.genfromtxt(data, dtype=float, usecols=(1, 2)) + assert_equal(test, control[:, 1:]) + # Testing with arrays instead of tuples. + data.seek(0) + test = np.genfromtxt(data, dtype=float, usecols=np.array([1, 2])) + assert_equal(test, control[:, 1:]) + + def test_usecols_as_css(self): + # Test giving usecols with a comma-separated string + data = "1 2 3\n4 5 6" + test = np.genfromtxt(TextIO(data), + names="a, b, c", usecols="a, c") + ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"]) + assert_equal(test, ctrl) + + def test_usecols_with_structured_dtype(self): + # Test usecols with an explicit structured dtype + data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9") + names = ['stid', 'temp'] + dtypes = ['S4', 'f8'] + test = np.genfromtxt( + data, usecols=(0, 2), dtype=list(zip(names, dtypes))) + assert_equal(test['stid'], [b"JOE", b"BOB"]) + assert_equal(test['temp'], [25.3, 27.9]) + + def test_usecols_with_integer(self): + # Test usecols with an integer + test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0) + assert_equal(test, np.array([1., 4.])) + + def test_usecols_with_named_columns(self): + # Test usecols with named columns + ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)]) + data = "1 2 3\n4 5 6" + kwargs = {"names": "a, b, c"} + test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs) + assert_equal(test, ctrl) + test = np.genfromtxt(TextIO(data), + usecols=('a', 'c'), **kwargs) + assert_equal(test, ctrl) + + def test_empty_file(self): + # Test that an empty file raises the proper warning. + with suppress_warnings() as sup: + sup.filter(message="genfromtxt: Empty input file:") + data = TextIO() + test = np.genfromtxt(data) + assert_equal(test, np.array([])) + + # when skip_header > 0 + test = np.genfromtxt(data, skip_header=1) + assert_equal(test, np.array([])) + + def test_fancy_dtype_alt(self): + # Check that a nested dtype isn't MIA + data = TextIO('1,2,3.0\n4,5,6.0\n') + fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + test = np.genfromtxt(data, dtype=fancydtype, delimiter=',', usemask=True) + control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype) + assert_equal(test, control) + + def test_shaped_dtype(self): + c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6") + dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), + ('block', int, (2, 3))]) + x = np.genfromtxt(c, dtype=dt) + a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])], + dtype=dt) + assert_array_equal(x, a) + + def test_withmissing(self): + data = TextIO('A,B\n0,1\n2,N/A') + kwargs = {"delimiter": ",", "missing_values": "N/A", "names": True} + test = np.genfromtxt(data, dtype=None, usemask=True, **kwargs) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', int), ('B', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + # + data.seek(0) + test = np.genfromtxt(data, usemask=True, **kwargs) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', float), ('B', float)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_user_missing_values(self): + data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j" + basekwargs = {"dtype": None, "delimiter": ",", "names": True} + mdtype = [('A', int), ('B', float), ('C', complex)] + # + test = np.genfromtxt(TextIO(data), missing_values="N/A", + **basekwargs) + control = ma.array([(0, 0.0, 0j), (1, -999, 1j), + (-9, 2.2, -999j), (3, -99, 3j)], + mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)], + dtype=mdtype) + assert_equal(test, control) + # + basekwargs['dtype'] = mdtype + test = np.genfromtxt(TextIO(data), + missing_values={0: -9, 1: -99, 2: -999j}, usemask=True, **basekwargs) + control = ma.array([(0, 0.0, 0j), (1, -999, 1j), + (-9, 2.2, -999j), (3, -99, 3j)], + mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], + dtype=mdtype) + assert_equal(test, control) + # + test = np.genfromtxt(TextIO(data), + missing_values={0: -9, 'B': -99, 'C': -999j}, + usemask=True, + **basekwargs) + control = ma.array([(0, 0.0, 0j), (1, -999, 1j), + (-9, 2.2, -999j), (3, -99, 3j)], + mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)], + dtype=mdtype) + assert_equal(test, control) + + def test_user_filling_values(self): + # Test with missing and filling values + ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)]) + data = "N/A, 2, 3\n4, ,???" + kwargs = {"delimiter": ",", + "dtype": int, + "names": "a,b,c", + "missing_values": {0: "N/A", 'b': " ", 2: "???"}, + "filling_values": {0: 0, 'b': 0, 2: -999}} + test = np.genfromtxt(TextIO(data), **kwargs) + ctrl = np.array([(0, 2, 3), (4, 0, -999)], + dtype=[(_, int) for _ in "abc"]) + assert_equal(test, ctrl) + # + test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs) + ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"]) + assert_equal(test, ctrl) + + data2 = "1,2,*,4\n5,*,7,8\n" + test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int, + missing_values="*", filling_values=0) + ctrl = np.array([[1, 2, 0, 4], [5, 0, 7, 8]]) + assert_equal(test, ctrl) + test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int, + missing_values="*", filling_values=-1) + ctrl = np.array([[1, 2, -1, 4], [5, -1, 7, 8]]) + assert_equal(test, ctrl) + + def test_withmissing_float(self): + data = TextIO('A,B\n0,1.5\n2,-999.00') + test = np.genfromtxt(data, dtype=None, delimiter=',', + missing_values='-999.0', names=True, usemask=True) + control = ma.array([(0, 1.5), (2, -1.)], + mask=[(False, False), (False, True)], + dtype=[('A', int), ('B', float)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_with_masked_column_uniform(self): + # Test masked column + data = TextIO('1 2 3\n4 5 6\n') + test = np.genfromtxt(data, dtype=None, + missing_values='2,5', usemask=True) + control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]]) + assert_equal(test, control) + + def test_with_masked_column_various(self): + # Test masked column + data = TextIO('True 2 3\nFalse 5 6\n') + test = np.genfromtxt(data, dtype=None, + missing_values='2,5', usemask=True) + control = ma.array([(1, 2, 3), (0, 5, 6)], + mask=[(0, 1, 0), (0, 1, 0)], + dtype=[('f0', bool), ('f1', bool), ('f2', int)]) + assert_equal(test, control) + + def test_invalid_raise(self): + # Test invalid raise + data = ["1, 1, 1, 1, 1"] * 50 + for i in range(5): + data[10 * i] = "2, 2, 2, 2 2" + data.insert(0, "a, b, c, d, e") + mdata = TextIO("\n".join(data)) + + kwargs = {"delimiter": ",", "dtype": None, "names": True} + + def f(): + return np.genfromtxt(mdata, invalid_raise=False, **kwargs) + mtest = assert_warns(ConversionWarning, f) + assert_equal(len(mtest), 45) + assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde'])) + # + mdata.seek(0) + assert_raises(ValueError, np.genfromtxt, mdata, + delimiter=",", names=True) + + def test_invalid_raise_with_usecols(self): + # Test invalid_raise with usecols + data = ["1, 1, 1, 1, 1"] * 50 + for i in range(5): + data[10 * i] = "2, 2, 2, 2 2" + data.insert(0, "a, b, c, d, e") + mdata = TextIO("\n".join(data)) + + kwargs = {"delimiter": ",", "dtype": None, "names": True, + "invalid_raise": False} + + def f(): + return np.genfromtxt(mdata, usecols=(0, 4), **kwargs) + mtest = assert_warns(ConversionWarning, f) + assert_equal(len(mtest), 45) + assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae'])) + # + mdata.seek(0) + mtest = np.genfromtxt(mdata, usecols=(0, 1), **kwargs) + assert_equal(len(mtest), 50) + control = np.ones(50, dtype=[(_, int) for _ in 'ab']) + control[[10 * _ for _ in range(5)]] = (2, 2) + assert_equal(mtest, control) + + def test_inconsistent_dtype(self): + # Test inconsistent dtype + data = ["1, 1, 1, 1, -1.1"] * 50 + mdata = TextIO("\n".join(data)) + + converters = {4: lambda x: f"({x.decode()})"} + kwargs = {"delimiter": ",", "converters": converters, + "dtype": [(_, int) for _ in 'abcde'], "encoding": "bytes"} + assert_raises(ValueError, np.genfromtxt, mdata, **kwargs) + + def test_default_field_format(self): + # Test default format + data = "0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), + delimiter=",", dtype=None, defaultfmt="f%02i") + ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)], + dtype=[("f00", int), ("f01", int), ("f02", float)]) + assert_equal(mtest, ctrl) + + def test_single_dtype_wo_names(self): + # Test single dtype w/o names + data = "0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), + delimiter=",", dtype=float, defaultfmt="f%02i") + ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float) + assert_equal(mtest, ctrl) + + def test_single_dtype_w_explicit_names(self): + # Test single dtype w explicit names + data = "0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), + delimiter=",", dtype=float, names="a, b, c") + ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)], + dtype=[(_, float) for _ in "abc"]) + assert_equal(mtest, ctrl) + + def test_single_dtype_w_implicit_names(self): + # Test single dtype w implicit names + data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), + delimiter=",", dtype=float, names=True) + ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)], + dtype=[(_, float) for _ in "abc"]) + assert_equal(mtest, ctrl) + + def test_easy_structured_dtype(self): + # Test easy structured dtype + data = "0, 1, 2.3\n4, 5, 6.7" + mtest = np.genfromtxt(TextIO(data), delimiter=",", + dtype=(int, float, float), defaultfmt="f_%02i") + ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)], + dtype=[("f_00", int), ("f_01", float), ("f_02", float)]) + assert_equal(mtest, ctrl) + + def test_autostrip(self): + # Test autostrip + data = "01/01/2003 , 1.3, abcde" + kwargs = {"delimiter": ",", "dtype": None, "encoding": "bytes"} + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + mtest = np.genfromtxt(TextIO(data), **kwargs) + assert_(w[0].category is VisibleDeprecationWarning) + ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')], + dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')]) + assert_equal(mtest, ctrl) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + mtest = np.genfromtxt(TextIO(data), autostrip=True, **kwargs) + assert_(w[0].category is VisibleDeprecationWarning) + ctrl = np.array([('01/01/2003', 1.3, 'abcde')], + dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')]) + assert_equal(mtest, ctrl) + + def test_replace_space(self): + # Test the 'replace_space' option + txt = "A.A, B (B), C:C\n1, 2, 3.14" + # Test default: replace ' ' by '_' and delete non-alphanum chars + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=None) + ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)] + ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no replace, no delete + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=None, + replace_space='', deletechars='') + ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)] + ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no delete (spaces are replaced by _) + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=None, + deletechars='') + ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)] + ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype) + assert_equal(test, ctrl) + + def test_replace_space_known_dtype(self): + # Test the 'replace_space' (and related) options when dtype != None + txt = "A.A, B (B), C:C\n1, 2, 3" + # Test default: replace ' ' by '_' and delete non-alphanum chars + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=int) + ctrl_dtype = [("AA", int), ("B_B", int), ("CC", int)] + ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no replace, no delete + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=int, + replace_space='', deletechars='') + ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", int)] + ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) + assert_equal(test, ctrl) + # Test: no delete (spaces are replaced by _) + test = np.genfromtxt(TextIO(txt), + delimiter=",", names=True, dtype=int, + deletechars='') + ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", int)] + ctrl = np.array((1, 2, 3), dtype=ctrl_dtype) + assert_equal(test, ctrl) + + def test_incomplete_names(self): + # Test w/ incomplete names + data = "A,,C\n0,1,2\n3,4,5" + kwargs = {"delimiter": ",", "names": True} + # w/ dtype=None + ctrl = np.array([(0, 1, 2), (3, 4, 5)], + dtype=[(_, int) for _ in ('A', 'f0', 'C')]) + test = np.genfromtxt(TextIO(data), dtype=None, **kwargs) + assert_equal(test, ctrl) + # w/ default dtype + ctrl = np.array([(0, 1, 2), (3, 4, 5)], + dtype=[(_, float) for _ in ('A', 'f0', 'C')]) + test = np.genfromtxt(TextIO(data), **kwargs) + + def test_names_auto_completion(self): + # Make sure that names are properly completed + data = "1 2 3\n 4 5 6" + test = np.genfromtxt(TextIO(data), + dtype=(int, float, int), names="a") + ctrl = np.array([(1, 2, 3), (4, 5, 6)], + dtype=[('a', int), ('f0', float), ('f1', int)]) + assert_equal(test, ctrl) + + def test_names_with_usecols_bug1636(self): + # Make sure we pick up the right names w/ usecols + data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4" + ctrl_names = ("A", "C", "E") + test = np.genfromtxt(TextIO(data), + dtype=(int, int, int), delimiter=",", + usecols=(0, 2, 4), names=True) + assert_equal(test.dtype.names, ctrl_names) + # + test = np.genfromtxt(TextIO(data), + dtype=(int, int, int), delimiter=",", + usecols=("A", "C", "E"), names=True) + assert_equal(test.dtype.names, ctrl_names) + # + test = np.genfromtxt(TextIO(data), + dtype=int, delimiter=",", + usecols=("A", "C", "E"), names=True) + assert_equal(test.dtype.names, ctrl_names) + + def test_fixed_width_names(self): + # Test fix-width w/ names + data = " A B C\n 0 1 2.3\n 45 67 9." + kwargs = {"delimiter": (5, 5, 4), "names": True, "dtype": None} + ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)], + dtype=[('A', int), ('B', int), ('C', float)]) + test = np.genfromtxt(TextIO(data), **kwargs) + assert_equal(test, ctrl) + # + kwargs = {"delimiter": 5, "names": True, "dtype": None} + ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)], + dtype=[('A', int), ('B', int), ('C', float)]) + test = np.genfromtxt(TextIO(data), **kwargs) + assert_equal(test, ctrl) + + def test_filling_values(self): + # Test missing values + data = b"1, 2, 3\n1, , 5\n0, 6, \n" + kwargs = {"delimiter": ",", "dtype": None, "filling_values": -999} + ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int) + test = np.genfromtxt(TextIO(data), **kwargs) + assert_equal(test, ctrl) + + def test_comments_is_none(self): + # Github issue 329 (None was previously being converted to 'None'). + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"), + dtype=None, comments=None, delimiter=',', + encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + assert_equal(test[1], b'testNonetherestofthedata') + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"), + dtype=None, comments=None, delimiter=',', + encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + assert_equal(test[1], b' testNonetherestofthedata') + + def test_latin1(self): + latin1 = b'\xf6\xfc\xf6' + norm = b"norm1,norm2,norm3\n" + enc = b"test1,testNonethe" + latin1 + b",test3\n" + s = norm + enc + norm + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(TextIO(s), + dtype=None, comments=None, delimiter=',', + encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + assert_equal(test[1, 0], b"test1") + assert_equal(test[1, 1], b"testNonethe" + latin1) + assert_equal(test[1, 2], b"test3") + test = np.genfromtxt(TextIO(s), + dtype=None, comments=None, delimiter=',', + encoding='latin1') + assert_equal(test[1, 0], "test1") + assert_equal(test[1, 1], "testNonethe" + latin1.decode('latin1')) + assert_equal(test[1, 2], "test3") + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(TextIO(b"0,testNonethe" + latin1), + dtype=None, comments=None, delimiter=',', + encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + assert_equal(test['f0'], 0) + assert_equal(test['f1'], b"testNonethe" + latin1) + + def test_binary_decode_autodtype(self): + utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04' + v = self.loadfunc(BytesIO(utf16), dtype=None, encoding='UTF-16') + assert_array_equal(v, np.array(utf16.decode('UTF-16').split())) + + def test_utf8_byte_encoding(self): + utf8 = b"\xcf\x96" + norm = b"norm1,norm2,norm3\n" + enc = b"test1,testNonethe" + utf8 + b",test3\n" + s = norm + enc + norm + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', VisibleDeprecationWarning) + test = np.genfromtxt(TextIO(s), + dtype=None, comments=None, delimiter=',', + encoding="bytes") + assert_(w[0].category is VisibleDeprecationWarning) + ctl = np.array([ + [b'norm1', b'norm2', b'norm3'], + [b'test1', b'testNonethe' + utf8, b'test3'], + [b'norm1', b'norm2', b'norm3']]) + assert_array_equal(test, ctl) + + def test_utf8_file(self): + utf8 = b"\xcf\x96" + with temppath() as path: + with open(path, "wb") as f: + f.write((b"test1,testNonethe" + utf8 + b",test3\n") * 2) + test = np.genfromtxt(path, dtype=None, comments=None, + delimiter=',', encoding="UTF-8") + ctl = np.array([ + ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"], + ["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]], + dtype=np.str_) + assert_array_equal(test, ctl) + + # test a mixed dtype + with open(path, "wb") as f: + f.write(b"0,testNonethe" + utf8) + test = np.genfromtxt(path, dtype=None, comments=None, + delimiter=',', encoding="UTF-8") + assert_equal(test['f0'], 0) + assert_equal(test['f1'], "testNonethe" + utf8.decode("UTF-8")) + + def test_utf8_file_nodtype_unicode(self): + # bytes encoding with non-latin1 -> unicode upcast + utf8 = '\u03d6' + latin1 = '\xf6\xfc\xf6' + + # skip test if cannot encode utf8 test string with preferred + # encoding. The preferred encoding is assumed to be the default + # encoding of open. Will need to change this for PyTest, maybe + # using pytest.mark.xfail(raises=***). + try: + encoding = locale.getpreferredencoding() + utf8.encode(encoding) + except (UnicodeError, ImportError): + pytest.skip('Skipping test_utf8_file_nodtype_unicode, ' + 'unable to encode utf8 in preferred encoding') + + with temppath() as path: + with open(path, "wt") as f: + f.write("norm1,norm2,norm3\n") + f.write("norm1," + latin1 + ",norm3\n") + f.write("test1,testNonethe" + utf8 + ",test3\n") + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', + VisibleDeprecationWarning) + test = np.genfromtxt(path, dtype=None, comments=None, + delimiter=',', encoding="bytes") + # Check for warning when encoding not specified. + assert_(w[0].category is VisibleDeprecationWarning) + ctl = np.array([ + ["norm1", "norm2", "norm3"], + ["norm1", latin1, "norm3"], + ["test1", "testNonethe" + utf8, "test3"]], + dtype=np.str_) + assert_array_equal(test, ctl) + + @pytest.mark.filterwarnings("ignore:.*recfromtxt.*:DeprecationWarning") + def test_recfromtxt(self): + # + data = TextIO('A,B\n0,1\n2,3') + kwargs = {"delimiter": ",", "missing_values": "N/A", "names": True} + test = recfromtxt(data, **kwargs) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + # + data = TextIO('A,B\n0,1\n2,N/A') + test = recfromtxt(data, dtype=None, usemask=True, **kwargs) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', int), ('B', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(test.A, [0, 2]) + + @pytest.mark.filterwarnings("ignore:.*recfromcsv.*:DeprecationWarning") + def test_recfromcsv(self): + # + data = TextIO('A,B\n0,1\n2,3') + kwargs = {"missing_values": "N/A", "names": True, "case_sensitive": True, + "encoding": "bytes"} + test = recfromcsv(data, dtype=None, **kwargs) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + # + data = TextIO('A,B\n0,1\n2,N/A') + test = recfromcsv(data, dtype=None, usemask=True, **kwargs) + control = ma.array([(0, 1), (2, -1)], + mask=[(False, False), (False, True)], + dtype=[('A', int), ('B', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(test.A, [0, 2]) + # + data = TextIO('A,B\n0,1\n2,3') + test = recfromcsv(data, missing_values='N/A',) + control = np.array([(0, 1), (2, 3)], + dtype=[('a', int), ('b', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + # + data = TextIO('A,B\n0,1\n2,3') + dtype = [('a', int), ('b', float)] + test = recfromcsv(data, missing_values='N/A', dtype=dtype) + control = np.array([(0, 1), (2, 3)], + dtype=dtype) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + + # gh-10394 + data = TextIO('color\n"red"\n"blue"') + test = recfromcsv(data, converters={0: lambda x: x.strip('\"')}) + control = np.array([('red',), ('blue',)], dtype=[('color', (str, 4))]) + assert_equal(test.dtype, control.dtype) + assert_equal(test, control) + + def test_max_rows(self): + # Test the `max_rows` keyword argument. + data = '1 2\n3 4\n5 6\n7 8\n9 10\n' + txt = TextIO(data) + a1 = np.genfromtxt(txt, max_rows=3) + a2 = np.genfromtxt(txt) + assert_equal(a1, [[1, 2], [3, 4], [5, 6]]) + assert_equal(a2, [[7, 8], [9, 10]]) + + # max_rows must be at least 1. + assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=0) + + # An input with several invalid rows. + data = '1 1\n2 2\n0 \n3 3\n4 4\n5 \n6 \n7 \n' + + test = np.genfromtxt(TextIO(data), max_rows=2) + control = np.array([[1., 1.], [2., 2.]]) + assert_equal(test, control) + + # Test keywords conflict + assert_raises(ValueError, np.genfromtxt, TextIO(data), skip_footer=1, + max_rows=4) + + # Test with invalid value + assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=4) + + # Test with invalid not raise + with suppress_warnings() as sup: + sup.filter(ConversionWarning) + + test = np.genfromtxt(TextIO(data), max_rows=4, invalid_raise=False) + control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]) + assert_equal(test, control) + + test = np.genfromtxt(TextIO(data), max_rows=5, invalid_raise=False) + control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]) + assert_equal(test, control) + + # Structured array with field names. + data = 'a b\n#c d\n1 1\n2 2\n#0 \n3 3\n4 4\n5 5\n' + + # Test with header, names and comments + txt = TextIO(data) + test = np.genfromtxt(txt, skip_header=1, max_rows=3, names=True) + control = np.array([(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)], + dtype=[('c', ' should convert to float + # 2**34 = 17179869184 => should convert to int64 + # 2**10 = 1024 => should convert to int (int32 on 32-bit systems, + # int64 on 64-bit systems) + + data = TextIO('73786976294838206464 17179869184 1024') + + test = np.genfromtxt(data, dtype=None) + + assert_equal(test.dtype.names, ['f0', 'f1', 'f2']) + + assert_(test.dtype['f0'] == float) + assert_(test.dtype['f1'] == np.int64) + assert_(test.dtype['f2'] == np.int_) + + assert_allclose(test['f0'], 73786976294838206464.) + assert_equal(test['f1'], 17179869184) + assert_equal(test['f2'], 1024) + + def test_unpack_float_data(self): + txt = TextIO("1,2,3\n4,5,6\n7,8,9\n0.0,1.0,2.0") + a, b, c = np.loadtxt(txt, delimiter=",", unpack=True) + assert_array_equal(a, np.array([1.0, 4.0, 7.0, 0.0])) + assert_array_equal(b, np.array([2.0, 5.0, 8.0, 1.0])) + assert_array_equal(c, np.array([3.0, 6.0, 9.0, 2.0])) + + def test_unpack_structured(self): + # Regression test for gh-4341 + # Unpacking should work on structured arrays + txt = TextIO("M 21 72\nF 35 58") + dt = {'names': ('a', 'b', 'c'), 'formats': ('S1', 'i4', 'f4')} + a, b, c = np.genfromtxt(txt, dtype=dt, unpack=True) + assert_equal(a.dtype, np.dtype('S1')) + assert_equal(b.dtype, np.dtype('i4')) + assert_equal(c.dtype, np.dtype('f4')) + assert_array_equal(a, np.array([b'M', b'F'])) + assert_array_equal(b, np.array([21, 35])) + assert_array_equal(c, np.array([72., 58.])) + + def test_unpack_auto_dtype(self): + # Regression test for gh-4341 + # Unpacking should work when dtype=None + txt = TextIO("M 21 72.\nF 35 58.") + expected = (np.array(["M", "F"]), np.array([21, 35]), np.array([72., 58.])) + test = np.genfromtxt(txt, dtype=None, unpack=True, encoding="utf-8") + for arr, result in zip(expected, test): + assert_array_equal(arr, result) + assert_equal(arr.dtype, result.dtype) + + def test_unpack_single_name(self): + # Regression test for gh-4341 + # Unpacking should work when structured dtype has only one field + txt = TextIO("21\n35") + dt = {'names': ('a',), 'formats': ('i4',)} + expected = np.array([21, 35], dtype=np.int32) + test = np.genfromtxt(txt, dtype=dt, unpack=True) + assert_array_equal(expected, test) + assert_equal(expected.dtype, test.dtype) + + def test_squeeze_scalar(self): + # Regression test for gh-4341 + # Unpacking a scalar should give zero-dim output, + # even if dtype is structured + txt = TextIO("1") + dt = {'names': ('a',), 'formats': ('i4',)} + expected = np.array((1,), dtype=np.int32) + test = np.genfromtxt(txt, dtype=dt, unpack=True) + assert_array_equal(expected, test) + assert_equal((), test.shape) + assert_equal(expected.dtype, test.dtype) + + @pytest.mark.parametrize("ndim", [0, 1, 2]) + def test_ndmin_keyword(self, ndim: int): + # lets have the same behaviour of ndmin as loadtxt + # as they should be the same for non-missing values + txt = "42" + + a = np.loadtxt(StringIO(txt), ndmin=ndim) + b = np.genfromtxt(StringIO(txt), ndmin=ndim) + + assert_array_equal(a, b) + + +class TestPathUsage: + # Test that pathlib.Path can be used + def test_loadtxt(self): + with temppath(suffix='.txt') as path: + path = Path(path) + a = np.array([[1.1, 2], [3, 4]]) + np.savetxt(path, a) + x = np.loadtxt(path) + assert_array_equal(x, a) + + def test_save_load(self): + # Test that pathlib.Path instances can be used with save. + with temppath(suffix='.npy') as path: + path = Path(path) + a = np.array([[1, 2], [3, 4]], int) + np.save(path, a) + data = np.load(path) + assert_array_equal(data, a) + + def test_save_load_memmap(self): + # Test that pathlib.Path instances can be loaded mem-mapped. + with temppath(suffix='.npy') as path: + path = Path(path) + a = np.array([[1, 2], [3, 4]], int) + np.save(path, a) + data = np.load(path, mmap_mode='r') + assert_array_equal(data, a) + # close the mem-mapped file + del data + if IS_PYPY: + break_cycles() + break_cycles() + + @pytest.mark.xfail(IS_WASM, reason="memmap doesn't work correctly") + @pytest.mark.parametrize("filename_type", [Path, str]) + def test_save_load_memmap_readwrite(self, filename_type): + with temppath(suffix='.npy') as path: + path = filename_type(path) + a = np.array([[1, 2], [3, 4]], int) + np.save(path, a) + b = np.load(path, mmap_mode='r+') + a[0][0] = 5 + b[0][0] = 5 + del b # closes the file + if IS_PYPY: + break_cycles() + break_cycles() + data = np.load(path) + assert_array_equal(data, a) + + @pytest.mark.parametrize("filename_type", [Path, str]) + def test_savez_load(self, filename_type): + with temppath(suffix='.npz') as path: + path = filename_type(path) + np.savez(path, lab='place holder') + with np.load(path) as data: + assert_array_equal(data['lab'], 'place holder') + + @pytest.mark.parametrize("filename_type", [Path, str]) + def test_savez_compressed_load(self, filename_type): + with temppath(suffix='.npz') as path: + path = filename_type(path) + np.savez_compressed(path, lab='place holder') + data = np.load(path) + assert_array_equal(data['lab'], 'place holder') + data.close() + + @pytest.mark.parametrize("filename_type", [Path, str]) + def test_genfromtxt(self, filename_type): + with temppath(suffix='.txt') as path: + path = filename_type(path) + a = np.array([(1, 2), (3, 4)]) + np.savetxt(path, a) + data = np.genfromtxt(path) + assert_array_equal(a, data) + + @pytest.mark.parametrize("filename_type", [Path, str]) + @pytest.mark.filterwarnings("ignore:.*recfromtxt.*:DeprecationWarning") + def test_recfromtxt(self, filename_type): + with temppath(suffix='.txt') as path: + path = filename_type(path) + with open(path, 'w') as f: + f.write('A,B\n0,1\n2,3') + + kwargs = {"delimiter": ",", "missing_values": "N/A", "names": True} + test = recfromtxt(path, **kwargs) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + + @pytest.mark.parametrize("filename_type", [Path, str]) + @pytest.mark.filterwarnings("ignore:.*recfromcsv.*:DeprecationWarning") + def test_recfromcsv(self, filename_type): + with temppath(suffix='.txt') as path: + path = filename_type(path) + with open(path, 'w') as f: + f.write('A,B\n0,1\n2,3') + + kwargs = { + "missing_values": "N/A", "names": True, "case_sensitive": True + } + test = recfromcsv(path, dtype=None, **kwargs) + control = np.array([(0, 1), (2, 3)], + dtype=[('A', int), ('B', int)]) + assert_(isinstance(test, np.recarray)) + assert_equal(test, control) + + +def test_gzip_load(): + a = np.random.random((5, 5)) + + s = BytesIO() + f = gzip.GzipFile(fileobj=s, mode="w") + + np.save(f, a) + f.close() + s.seek(0) + + f = gzip.GzipFile(fileobj=s, mode="r") + assert_array_equal(np.load(f), a) + + +# These next two classes encode the minimal API needed to save()/load() arrays. +# The `test_ducktyping` ensures they work correctly +class JustWriter: + def __init__(self, base): + self.base = base + + def write(self, s): + return self.base.write(s) + + def flush(self): + return self.base.flush() + +class JustReader: + def __init__(self, base): + self.base = base + + def read(self, n): + return self.base.read(n) + + def seek(self, off, whence=0): + return self.base.seek(off, whence) + + +def test_ducktyping(): + a = np.random.random((5, 5)) + + s = BytesIO() + f = JustWriter(s) + + np.save(f, a) + f.flush() + s.seek(0) + + f = JustReader(s) + assert_array_equal(np.load(f), a) + + +def test_gzip_loadtxt(): + # Thanks to another windows brokenness, we can't use + # NamedTemporaryFile: a file created from this function cannot be + # reopened by another open call. So we first put the gzipped string + # of the test reference array, write it to a securely opened file, + # which is then read from by the loadtxt function + s = BytesIO() + g = gzip.GzipFile(fileobj=s, mode='w') + g.write(b'1 2 3\n') + g.close() + + s.seek(0) + with temppath(suffix='.gz') as name: + with open(name, 'wb') as f: + f.write(s.read()) + res = np.loadtxt(name) + s.close() + + assert_array_equal(res, [1, 2, 3]) + + +def test_gzip_loadtxt_from_string(): + s = BytesIO() + f = gzip.GzipFile(fileobj=s, mode="w") + f.write(b'1 2 3\n') + f.close() + s.seek(0) + + f = gzip.GzipFile(fileobj=s, mode="r") + assert_array_equal(np.loadtxt(f), [1, 2, 3]) + + +def test_npzfile_dict(): + s = BytesIO() + x = np.zeros((3, 3)) + y = np.zeros((3, 3)) + + np.savez(s, x=x, y=y) + s.seek(0) + + z = np.load(s) + + assert_('x' in z) + assert_('y' in z) + assert_('x' in z.keys()) + assert_('y' in z.keys()) + + for f, a in z.items(): + assert_(f in ['x', 'y']) + assert_equal(a.shape, (3, 3)) + + for a in z.values(): + assert_equal(a.shape, (3, 3)) + + assert_(len(z.items()) == 2) + + for f in z: + assert_(f in ['x', 'y']) + + assert_('x' in z.keys()) + assert (z.get('x') == z['x']).all() + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_load_refcount(): + # Check that objects returned by np.load are directly freed based on + # their refcount, rather than needing the gc to collect them. + + f = BytesIO() + np.savez(f, [1, 2, 3]) + f.seek(0) + + with assert_no_gc_cycles(): + np.load(f) + + f.seek(0) + dt = [("a", 'u1', 2), ("b", 'u1', 2)] + with assert_no_gc_cycles(): + x = np.loadtxt(TextIO("0 1 2 3"), dtype=dt) + assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt)) + + +def test_load_multiple_arrays_until_eof(): + f = BytesIO() + np.save(f, 1) + np.save(f, 2) + f.seek(0) + out1 = np.load(f) + assert out1 == 1 + out2 = np.load(f) + assert out2 == 2 + with pytest.raises(EOFError): + np.load(f) + + +def test_savez_nopickle(): + obj_array = np.array([1, 'hello'], dtype=object) + with temppath(suffix='.npz') as tmp: + np.savez(tmp, obj_array) + + with temppath(suffix='.npz') as tmp: + with pytest.raises(ValueError, match="Object arrays cannot be saved when.*"): + np.savez(tmp, obj_array, allow_pickle=False) + + with temppath(suffix='.npz') as tmp: + np.savez_compressed(tmp, obj_array) + + with temppath(suffix='.npz') as tmp: + with pytest.raises(ValueError, match="Object arrays cannot be saved when.*"): + np.savez_compressed(tmp, obj_array, allow_pickle=False) diff --git a/python/numpy/lib/tests/test_loadtxt.py b/python/numpy/lib/tests/test_loadtxt.py new file mode 100644 index 000000000..a2022a0d5 --- /dev/null +++ b/python/numpy/lib/tests/test_loadtxt.py @@ -0,0 +1,1101 @@ +""" +Tests specific to `np.loadtxt` added during the move of loadtxt to be backed +by C code. +These tests complement those found in `test_io.py`. +""" + +import os +import sys +from io import StringIO +from tempfile import NamedTemporaryFile, mkstemp + +import pytest + +import numpy as np +from numpy.ma.testutils import assert_equal +from numpy.testing import HAS_REFCOUNT, IS_PYPY, assert_array_equal + + +def test_scientific_notation(): + """Test that both 'e' and 'E' are parsed correctly.""" + data = StringIO( + + "1.0e-1,2.0E1,3.0\n" + "4.0e-2,5.0E-1,6.0\n" + "7.0e-3,8.0E1,9.0\n" + "0.0e-4,1.0E-1,2.0" + + ) + expected = np.array( + [[0.1, 20., 3.0], [0.04, 0.5, 6], [0.007, 80., 9], [0, 0.1, 2]] + ) + assert_array_equal(np.loadtxt(data, delimiter=","), expected) + + +@pytest.mark.parametrize("comment", ["..", "//", "@-", "this is a comment:"]) +def test_comment_multiple_chars(comment): + content = "# IGNORE\n1.5, 2.5# ABC\n3.0,4.0# XXX\n5.5,6.0\n" + txt = StringIO(content.replace("#", comment)) + a = np.loadtxt(txt, delimiter=",", comments=comment) + assert_equal(a, [[1.5, 2.5], [3.0, 4.0], [5.5, 6.0]]) + + +@pytest.fixture +def mixed_types_structured(): + """ + Fixture providing heterogeneous input data with a structured dtype, along + with the associated structured array. + """ + data = StringIO( + + "1000;2.4;alpha;-34\n" + "2000;3.1;beta;29\n" + "3500;9.9;gamma;120\n" + "4090;8.1;delta;0\n" + "5001;4.4;epsilon;-99\n" + "6543;7.8;omega;-1\n" + + ) + dtype = np.dtype( + [('f0', np.uint16), ('f1', np.float64), ('f2', 'S7'), ('f3', np.int8)] + ) + expected = np.array( + [ + (1000, 2.4, "alpha", -34), + (2000, 3.1, "beta", 29), + (3500, 9.9, "gamma", 120), + (4090, 8.1, "delta", 0), + (5001, 4.4, "epsilon", -99), + (6543, 7.8, "omega", -1) + ], + dtype=dtype + ) + return data, dtype, expected + + +@pytest.mark.parametrize('skiprows', [0, 1, 2, 3]) +def test_structured_dtype_and_skiprows_no_empty_lines( + skiprows, mixed_types_structured): + data, dtype, expected = mixed_types_structured + a = np.loadtxt(data, dtype=dtype, delimiter=";", skiprows=skiprows) + assert_array_equal(a, expected[skiprows:]) + + +def test_unpack_structured(mixed_types_structured): + data, dtype, expected = mixed_types_structured + + a, b, c, d = np.loadtxt(data, dtype=dtype, delimiter=";", unpack=True) + assert_array_equal(a, expected["f0"]) + assert_array_equal(b, expected["f1"]) + assert_array_equal(c, expected["f2"]) + assert_array_equal(d, expected["f3"]) + + +def test_structured_dtype_with_shape(): + dtype = np.dtype([("a", "u1", 2), ("b", "u1", 2)]) + data = StringIO("0,1,2,3\n6,7,8,9\n") + expected = np.array([((0, 1), (2, 3)), ((6, 7), (8, 9))], dtype=dtype) + assert_array_equal(np.loadtxt(data, delimiter=",", dtype=dtype), expected) + + +def test_structured_dtype_with_multi_shape(): + dtype = np.dtype([("a", "u1", (2, 2))]) + data = StringIO("0 1 2 3\n") + expected = np.array([(((0, 1), (2, 3)),)], dtype=dtype) + assert_array_equal(np.loadtxt(data, dtype=dtype), expected) + + +def test_nested_structured_subarray(): + # Test from gh-16678 + point = np.dtype([('x', float), ('y', float)]) + dt = np.dtype([('code', int), ('points', point, (2,))]) + data = StringIO("100,1,2,3,4\n200,5,6,7,8\n") + expected = np.array( + [ + (100, [(1., 2.), (3., 4.)]), + (200, [(5., 6.), (7., 8.)]), + ], + dtype=dt + ) + assert_array_equal(np.loadtxt(data, dtype=dt, delimiter=","), expected) + + +def test_structured_dtype_offsets(): + # An aligned structured dtype will have additional padding + dt = np.dtype("i1, i4, i1, i4, i1, i4", align=True) + data = StringIO("1,2,3,4,5,6\n7,8,9,10,11,12\n") + expected = np.array([(1, 2, 3, 4, 5, 6), (7, 8, 9, 10, 11, 12)], dtype=dt) + assert_array_equal(np.loadtxt(data, delimiter=",", dtype=dt), expected) + + +@pytest.mark.parametrize("param", ("skiprows", "max_rows")) +def test_exception_negative_row_limits(param): + """skiprows and max_rows should raise for negative parameters.""" + with pytest.raises(ValueError, match="argument must be nonnegative"): + np.loadtxt("foo.bar", **{param: -3}) + + +@pytest.mark.parametrize("param", ("skiprows", "max_rows")) +def test_exception_noninteger_row_limits(param): + with pytest.raises(TypeError, match="argument must be an integer"): + np.loadtxt("foo.bar", **{param: 1.0}) + + +@pytest.mark.parametrize( + "data, shape", + [ + ("1 2 3 4 5\n", (1, 5)), # Single row + ("1\n2\n3\n4\n5\n", (5, 1)), # Single column + ] +) +def test_ndmin_single_row_or_col(data, shape): + arr = np.array([1, 2, 3, 4, 5]) + arr2d = arr.reshape(shape) + + assert_array_equal(np.loadtxt(StringIO(data), dtype=int), arr) + assert_array_equal(np.loadtxt(StringIO(data), dtype=int, ndmin=0), arr) + assert_array_equal(np.loadtxt(StringIO(data), dtype=int, ndmin=1), arr) + assert_array_equal(np.loadtxt(StringIO(data), dtype=int, ndmin=2), arr2d) + + +@pytest.mark.parametrize("badval", [-1, 3, None, "plate of shrimp"]) +def test_bad_ndmin(badval): + with pytest.raises(ValueError, match="Illegal value of ndmin keyword"): + np.loadtxt("foo.bar", ndmin=badval) + + +@pytest.mark.parametrize( + "ws", + ( + " ", # space + "\t", # tab + "\u2003", # em + "\u00A0", # non-break + "\u3000", # ideographic space + ) +) +def test_blank_lines_spaces_delimit(ws): + txt = StringIO( + f"1 2{ws}30\n\n{ws}\n" + f"4 5 60{ws}\n {ws} \n" + f"7 8 {ws} 90\n # comment\n" + f"3 2 1" + ) + # NOTE: It is unclear that the ` # comment` should succeed. Except + # for delimiter=None, which should use any whitespace (and maybe + # should just be implemented closer to Python + expected = np.array([[1, 2, 30], [4, 5, 60], [7, 8, 90], [3, 2, 1]]) + assert_equal( + np.loadtxt(txt, dtype=int, delimiter=None, comments="#"), expected + ) + + +def test_blank_lines_normal_delimiter(): + txt = StringIO('1,2,30\n\n4,5,60\n\n7,8,90\n# comment\n3,2,1') + expected = np.array([[1, 2, 30], [4, 5, 60], [7, 8, 90], [3, 2, 1]]) + assert_equal( + np.loadtxt(txt, dtype=int, delimiter=',', comments="#"), expected + ) + + +@pytest.mark.parametrize("dtype", (float, object)) +def test_maxrows_no_blank_lines(dtype): + txt = StringIO("1.5,2.5\n3.0,4.0\n5.5,6.0") + res = np.loadtxt(txt, dtype=dtype, delimiter=",", max_rows=2) + assert_equal(res.dtype, dtype) + assert_equal(res, np.array([["1.5", "2.5"], ["3.0", "4.0"]], dtype=dtype)) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", (np.dtype("f8"), np.dtype("i2"))) +def test_exception_message_bad_values(dtype): + txt = StringIO("1,2\n3,XXX\n5,6") + msg = f"could not convert string 'XXX' to {dtype} at row 1, column 2" + with pytest.raises(ValueError, match=msg): + np.loadtxt(txt, dtype=dtype, delimiter=",") + + +def test_converters_negative_indices(): + txt = StringIO('1.5,2.5\n3.0,XXX\n5.5,6.0') + conv = {-1: lambda s: np.nan if s == 'XXX' else float(s)} + expected = np.array([[1.5, 2.5], [3.0, np.nan], [5.5, 6.0]]) + res = np.loadtxt(txt, dtype=np.float64, delimiter=",", converters=conv) + assert_equal(res, expected) + + +def test_converters_negative_indices_with_usecols(): + txt = StringIO('1.5,2.5,3.5\n3.0,4.0,XXX\n5.5,6.0,7.5\n') + conv = {-1: lambda s: np.nan if s == 'XXX' else float(s)} + expected = np.array([[1.5, 3.5], [3.0, np.nan], [5.5, 7.5]]) + res = np.loadtxt( + txt, + dtype=np.float64, + delimiter=",", + converters=conv, + usecols=[0, -1], + ) + assert_equal(res, expected) + + # Second test with variable number of rows: + res = np.loadtxt(StringIO('''0,1,2\n0,1,2,3,4'''), delimiter=",", + usecols=[0, -1], converters={-1: (lambda x: -1)}) + assert_array_equal(res, [[0, -1], [0, -1]]) + + +def test_ragged_error(): + rows = ["1,2,3", "1,2,3", "4,3,2,1"] + with pytest.raises(ValueError, + match="the number of columns changed from 3 to 4 at row 3"): + np.loadtxt(rows, delimiter=",") + + +def test_ragged_usecols(): + # usecols, and negative ones, work even with varying number of columns. + txt = StringIO("0,0,XXX\n0,XXX,0,XXX\n0,XXX,XXX,0,XXX\n") + expected = np.array([[0, 0], [0, 0], [0, 0]]) + res = np.loadtxt(txt, dtype=float, delimiter=",", usecols=[0, -2]) + assert_equal(res, expected) + + txt = StringIO("0,0,XXX\n0\n0,XXX,XXX,0,XXX\n") + with pytest.raises(ValueError, + match="invalid column index -2 at row 2 with 1 columns"): + # There is no -2 column in the second row: + np.loadtxt(txt, dtype=float, delimiter=",", usecols=[0, -2]) + + +def test_empty_usecols(): + txt = StringIO("0,0,XXX\n0,XXX,0,XXX\n0,XXX,XXX,0,XXX\n") + res = np.loadtxt(txt, dtype=np.dtype([]), delimiter=",", usecols=[]) + assert res.shape == (3,) + assert res.dtype == np.dtype([]) + + +@pytest.mark.parametrize("c1", ["a", "の", "🫕"]) +@pytest.mark.parametrize("c2", ["a", "の", "🫕"]) +def test_large_unicode_characters(c1, c2): + # c1 and c2 span ascii, 16bit and 32bit range. + txt = StringIO(f"a,{c1},c,1.0\ne,{c2},2.0,g") + res = np.loadtxt(txt, dtype=np.dtype('U12'), delimiter=",") + expected = np.array( + [f"a,{c1},c,1.0".split(","), f"e,{c2},2.0,g".split(",")], + dtype=np.dtype('U12') + ) + assert_equal(res, expected) + + +def test_unicode_with_converter(): + txt = StringIO("cat,dog\nαβγ,δεζ\nabc,def\n") + conv = {0: lambda s: s.upper()} + res = np.loadtxt( + txt, + dtype=np.dtype("U12"), + converters=conv, + delimiter=",", + encoding=None + ) + expected = np.array([['CAT', 'dog'], ['ΑΒΓ', 'δεζ'], ['ABC', 'def']]) + assert_equal(res, expected) + + +def test_converter_with_structured_dtype(): + txt = StringIO('1.5,2.5,Abc\n3.0,4.0,dEf\n5.5,6.0,ghI\n') + dt = np.dtype([('m', np.int32), ('r', np.float32), ('code', 'U8')]) + conv = {0: lambda s: int(10 * float(s)), -1: lambda s: s.upper()} + res = np.loadtxt(txt, dtype=dt, delimiter=",", converters=conv) + expected = np.array( + [(15, 2.5, 'ABC'), (30, 4.0, 'DEF'), (55, 6.0, 'GHI')], dtype=dt + ) + assert_equal(res, expected) + + +def test_converter_with_unicode_dtype(): + """ + With the 'bytes' encoding, tokens are encoded prior to being + passed to the converter. This means that the output of the converter may + be bytes instead of unicode as expected by `read_rows`. + + This test checks that outputs from the above scenario are properly decoded + prior to parsing by `read_rows`. + """ + txt = StringIO('abc,def\nrst,xyz') + conv = bytes.upper + res = np.loadtxt( + txt, dtype=np.dtype("U3"), converters=conv, delimiter=",", + encoding="bytes") + expected = np.array([['ABC', 'DEF'], ['RST', 'XYZ']]) + assert_equal(res, expected) + + +def test_read_huge_row(): + row = "1.5, 2.5," * 50000 + row = row[:-1] + "\n" + txt = StringIO(row * 2) + res = np.loadtxt(txt, delimiter=",", dtype=float) + assert_equal(res, np.tile([1.5, 2.5], (2, 50000))) + + +@pytest.mark.parametrize("dtype", "edfgFDG") +def test_huge_float(dtype): + # Covers a non-optimized path that is rarely taken: + field = "0" * 1000 + ".123456789" + dtype = np.dtype(dtype) + value = np.loadtxt([field], dtype=dtype)[()] + assert value == dtype.type("0.123456789") + + +@pytest.mark.parametrize( + ("given_dtype", "expected_dtype"), + [ + ("S", np.dtype("S5")), + ("U", np.dtype("U5")), + ], +) +def test_string_no_length_given(given_dtype, expected_dtype): + """ + The given dtype is just 'S' or 'U' with no length. In these cases, the + length of the resulting dtype is determined by the longest string found + in the file. + """ + txt = StringIO("AAA,5-1\nBBBBB,0-3\nC,4-9\n") + res = np.loadtxt(txt, dtype=given_dtype, delimiter=",") + expected = np.array( + [['AAA', '5-1'], ['BBBBB', '0-3'], ['C', '4-9']], dtype=expected_dtype + ) + assert_equal(res, expected) + assert_equal(res.dtype, expected_dtype) + + +def test_float_conversion(): + """ + Some tests that the conversion to float64 works as accurately as the + Python built-in `float` function. In a naive version of the float parser, + these strings resulted in values that were off by an ULP or two. + """ + strings = [ + '0.9999999999999999', + '9876543210.123456', + '5.43215432154321e+300', + '0.901', + '0.333', + ] + txt = StringIO('\n'.join(strings)) + res = np.loadtxt(txt) + expected = np.array([float(s) for s in strings]) + assert_equal(res, expected) + + +def test_bool(): + # Simple test for bool via integer + txt = StringIO("1, 0\n10, -1") + res = np.loadtxt(txt, dtype=bool, delimiter=",") + assert res.dtype == bool + assert_array_equal(res, [[True, False], [True, True]]) + # Make sure we use only 1 and 0 on the byte level: + assert_array_equal(res.view(np.uint8), [[1, 0], [1, 1]]) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) +@pytest.mark.filterwarnings("error:.*integer via a float.*:DeprecationWarning") +def test_integer_signs(dtype): + dtype = np.dtype(dtype) + assert np.loadtxt(["+2"], dtype=dtype) == 2 + if dtype.kind == "u": + with pytest.raises(ValueError): + np.loadtxt(["-1\n"], dtype=dtype) + else: + assert np.loadtxt(["-2\n"], dtype=dtype) == -2 + + for sign in ["++", "+-", "--", "-+"]: + with pytest.raises(ValueError): + np.loadtxt([f"{sign}2\n"], dtype=dtype) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"]) +@pytest.mark.filterwarnings("error:.*integer via a float.*:DeprecationWarning") +def test_implicit_cast_float_to_int_fails(dtype): + txt = StringIO("1.0, 2.1, 3.7\n4, 5, 6") + with pytest.raises(ValueError): + np.loadtxt(txt, dtype=dtype, delimiter=",") + +@pytest.mark.parametrize("dtype", (np.complex64, np.complex128)) +@pytest.mark.parametrize("with_parens", (False, True)) +def test_complex_parsing(dtype, with_parens): + s = "(1.0-2.5j),3.75,(7+-5.0j)\n(4),(-19e2j),(0)" + if not with_parens: + s = s.replace("(", "").replace(")", "") + + res = np.loadtxt(StringIO(s), dtype=dtype, delimiter=",") + expected = np.array( + [[1.0 - 2.5j, 3.75, 7 - 5j], [4.0, -1900j, 0]], dtype=dtype + ) + assert_equal(res, expected) + + +def test_read_from_generator(): + def gen(): + for i in range(4): + yield f"{i},{2 * i},{i**2}" + + res = np.loadtxt(gen(), dtype=int, delimiter=",") + expected = np.array([[0, 0, 0], [1, 2, 1], [2, 4, 4], [3, 6, 9]]) + assert_equal(res, expected) + + +def test_read_from_generator_multitype(): + def gen(): + for i in range(3): + yield f"{i} {i / 4}" + + res = np.loadtxt(gen(), dtype="i, d", delimiter=" ") + expected = np.array([(0, 0.0), (1, 0.25), (2, 0.5)], dtype="i, d") + assert_equal(res, expected) + + +def test_read_from_bad_generator(): + def gen(): + yield from ["1,2", b"3, 5", 12738] + + with pytest.raises( + TypeError, match=r"non-string returned while reading data"): + np.loadtxt(gen(), dtype="i, i", delimiter=",") + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +def test_object_cleanup_on_read_error(): + sentinel = object() + already_read = 0 + + def conv(x): + nonlocal already_read + if already_read > 4999: + raise ValueError("failed half-way through!") + already_read += 1 + return sentinel + + txt = StringIO("x\n" * 10000) + + with pytest.raises(ValueError, match="at row 5000, column 1"): + np.loadtxt(txt, dtype=object, converters={0: conv}) + + assert sys.getrefcount(sentinel) == 2 + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +def test_character_not_bytes_compatible(): + """Test exception when a character cannot be encoded as 'S'.""" + data = StringIO("–") # == \u2013 + with pytest.raises(ValueError): + np.loadtxt(data, dtype="S5") + + +@pytest.mark.parametrize("conv", (0, [float], "")) +def test_invalid_converter(conv): + msg = ( + "converters must be a dictionary mapping columns to converter " + "functions or a single callable." + ) + with pytest.raises(TypeError, match=msg): + np.loadtxt(StringIO("1 2\n3 4"), converters=conv) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +def test_converters_dict_raises_non_integer_key(): + with pytest.raises(TypeError, match="keys of the converters dict"): + np.loadtxt(StringIO("1 2\n3 4"), converters={"a": int}) + with pytest.raises(TypeError, match="keys of the converters dict"): + np.loadtxt(StringIO("1 2\n3 4"), converters={"a": int}, usecols=0) + + +@pytest.mark.parametrize("bad_col_ind", (3, -3)) +def test_converters_dict_raises_non_col_key(bad_col_ind): + data = StringIO("1 2\n3 4") + with pytest.raises(ValueError, match="converter specified for column"): + np.loadtxt(data, converters={bad_col_ind: int}) + + +def test_converters_dict_raises_val_not_callable(): + with pytest.raises(TypeError, + match="values of the converters dictionary must be callable"): + np.loadtxt(StringIO("1 2\n3 4"), converters={0: 1}) + + +@pytest.mark.parametrize("q", ('"', "'", "`")) +def test_quoted_field(q): + txt = StringIO( + f"{q}alpha, x{q}, 2.5\n{q}beta, y{q}, 4.5\n{q}gamma, z{q}, 5.0\n" + ) + dtype = np.dtype([('f0', 'U8'), ('f1', np.float64)]) + expected = np.array( + [("alpha, x", 2.5), ("beta, y", 4.5), ("gamma, z", 5.0)], dtype=dtype + ) + + res = np.loadtxt(txt, dtype=dtype, delimiter=",", quotechar=q) + assert_array_equal(res, expected) + + +@pytest.mark.parametrize("q", ('"', "'", "`")) +def test_quoted_field_with_whitepace_delimiter(q): + txt = StringIO( + f"{q}alpha, x{q} 2.5\n{q}beta, y{q} 4.5\n{q}gamma, z{q} 5.0\n" + ) + dtype = np.dtype([('f0', 'U8'), ('f1', np.float64)]) + expected = np.array( + [("alpha, x", 2.5), ("beta, y", 4.5), ("gamma, z", 5.0)], dtype=dtype + ) + + res = np.loadtxt(txt, dtype=dtype, delimiter=None, quotechar=q) + assert_array_equal(res, expected) + + +def test_quote_support_default(): + """Support for quoted fields is disabled by default.""" + txt = StringIO('"lat,long", 45, 30\n') + dtype = np.dtype([('f0', 'U24'), ('f1', np.float64), ('f2', np.float64)]) + + with pytest.raises(ValueError, + match="the dtype passed requires 3 columns but 4 were"): + np.loadtxt(txt, dtype=dtype, delimiter=",") + + # Enable quoting support with non-None value for quotechar param + txt.seek(0) + expected = np.array([("lat,long", 45., 30.)], dtype=dtype) + + res = np.loadtxt(txt, dtype=dtype, delimiter=",", quotechar='"') + assert_array_equal(res, expected) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +def test_quotechar_multichar_error(): + txt = StringIO("1,2\n3,4") + msg = r".*must be a single unicode character or None" + with pytest.raises(TypeError, match=msg): + np.loadtxt(txt, delimiter=",", quotechar="''") + + +def test_comment_multichar_error_with_quote(): + txt = StringIO("1,2\n3,4") + msg = ( + "when multiple comments or a multi-character comment is given, " + "quotes are not supported." + ) + with pytest.raises(ValueError, match=msg): + np.loadtxt(txt, delimiter=",", comments="123", quotechar='"') + with pytest.raises(ValueError, match=msg): + np.loadtxt(txt, delimiter=",", comments=["#", "%"], quotechar='"') + + # A single character string in a tuple is unpacked though: + res = np.loadtxt(txt, delimiter=",", comments=("#",), quotechar="'") + assert_equal(res, [[1, 2], [3, 4]]) + + +def test_structured_dtype_with_quotes(): + data = StringIO( + + "1000;2.4;'alpha';-34\n" + "2000;3.1;'beta';29\n" + "3500;9.9;'gamma';120\n" + "4090;8.1;'delta';0\n" + "5001;4.4;'epsilon';-99\n" + "6543;7.8;'omega';-1\n" + + ) + dtype = np.dtype( + [('f0', np.uint16), ('f1', np.float64), ('f2', 'S7'), ('f3', np.int8)] + ) + expected = np.array( + [ + (1000, 2.4, "alpha", -34), + (2000, 3.1, "beta", 29), + (3500, 9.9, "gamma", 120), + (4090, 8.1, "delta", 0), + (5001, 4.4, "epsilon", -99), + (6543, 7.8, "omega", -1) + ], + dtype=dtype + ) + res = np.loadtxt(data, dtype=dtype, delimiter=";", quotechar="'") + assert_array_equal(res, expected) + + +def test_quoted_field_is_not_empty(): + txt = StringIO('1\n\n"4"\n""') + expected = np.array(["1", "4", ""], dtype="U1") + res = np.loadtxt(txt, delimiter=",", dtype="U1", quotechar='"') + assert_equal(res, expected) + +def test_quoted_field_is_not_empty_nonstrict(): + # Same as test_quoted_field_is_not_empty but check that we are not strict + # about missing closing quote (this is the `csv.reader` default also) + txt = StringIO('1\n\n"4"\n"') + expected = np.array(["1", "4", ""], dtype="U1") + res = np.loadtxt(txt, delimiter=",", dtype="U1", quotechar='"') + assert_equal(res, expected) + +def test_consecutive_quotechar_escaped(): + txt = StringIO('"Hello, my name is ""Monty""!"') + expected = np.array('Hello, my name is "Monty"!', dtype="U40") + res = np.loadtxt(txt, dtype="U40", delimiter=",", quotechar='"') + assert_equal(res, expected) + + +@pytest.mark.parametrize("data", ("", "\n\n\n", "# 1 2 3\n# 4 5 6\n")) +@pytest.mark.parametrize("ndmin", (0, 1, 2)) +@pytest.mark.parametrize("usecols", [None, (1, 2, 3)]) +def test_warn_on_no_data(data, ndmin, usecols): + """Check that a UserWarning is emitted when no data is read from input.""" + if usecols is not None: + expected_shape = (0, 3) + elif ndmin == 2: + expected_shape = (0, 1) # guess a single column?! + else: + expected_shape = (0,) + + txt = StringIO(data) + with pytest.warns(UserWarning, match="input contained no data"): + res = np.loadtxt(txt, ndmin=ndmin, usecols=usecols) + assert res.shape == expected_shape + + with NamedTemporaryFile(mode="w") as fh: + fh.write(data) + fh.seek(0) + with pytest.warns(UserWarning, match="input contained no data"): + res = np.loadtxt(txt, ndmin=ndmin, usecols=usecols) + assert res.shape == expected_shape + +@pytest.mark.parametrize("skiprows", (2, 3)) +def test_warn_on_skipped_data(skiprows): + data = "1 2 3\n4 5 6" + txt = StringIO(data) + with pytest.warns(UserWarning, match="input contained no data"): + np.loadtxt(txt, skiprows=skiprows) + + +@pytest.mark.parametrize(["dtype", "value"], [ + ("i2", 0x0001), ("u2", 0x0001), + ("i4", 0x00010203), ("u4", 0x00010203), + ("i8", 0x0001020304050607), ("u8", 0x0001020304050607), + # The following values are constructed to lead to unique bytes: + ("float16", 3.07e-05), + ("float32", 9.2557e-41), ("complex64", 9.2557e-41 + 2.8622554e-29j), + ("float64", -1.758571353180402e-24), + # Here and below, the repr side-steps a small loss of precision in + # complex `str` in PyPy (which is probably fine, as repr works): + ("complex128", repr(5.406409232372729e-29 - 1.758571353180402e-24j)), + # Use integer values that fit into double. Everything else leads to + # problems due to longdoubles going via double and decimal strings + # causing rounding errors. + ("longdouble", 0x01020304050607), + ("clongdouble", repr(0x01020304050607 + (0x00121314151617 * 1j))), + ("U2", "\U00010203\U000a0b0c")]) +@pytest.mark.parametrize("swap", [True, False]) +def test_byteswapping_and_unaligned(dtype, value, swap): + # Try to create "interesting" values within the valid unicode range: + dtype = np.dtype(dtype) + data = [f"x,{value}\n"] # repr as PyPy `str` truncates some + if swap: + dtype = dtype.newbyteorder() + full_dt = np.dtype([("a", "S1"), ("b", dtype)], align=False) + # The above ensures that the interesting "b" field is unaligned: + assert full_dt.fields["b"][1] == 1 + res = np.loadtxt(data, dtype=full_dt, delimiter=",", + max_rows=1) # max-rows prevents over-allocation + assert res["b"] == dtype.type(value) + + +@pytest.mark.parametrize("dtype", + np.typecodes["AllInteger"] + "efdFD" + "?") +def test_unicode_whitespace_stripping(dtype): + # Test that all numeric types (and bool) strip whitespace correctly + # \u202F is a narrow no-break space, `\n` is just a whitespace if quoted. + # Currently, skip float128 as it did not always support this and has no + # "custom" parsing: + txt = StringIO(' 3 ,"\u202F2\n"') + res = np.loadtxt(txt, dtype=dtype, delimiter=",", quotechar='"') + assert_array_equal(res, np.array([3, 2]).astype(dtype)) + + +@pytest.mark.parametrize("dtype", "FD") +def test_unicode_whitespace_stripping_complex(dtype): + # Complex has a few extra cases since it has two components and + # parentheses + line = " 1 , 2+3j , ( 4+5j ), ( 6+-7j ) , 8j , ( 9j ) \n" + data = [line, line.replace(" ", "\u202F")] + res = np.loadtxt(data, dtype=dtype, delimiter=',') + assert_array_equal(res, np.array([[1, 2 + 3j, 4 + 5j, 6 - 7j, 8j, 9j]] * 2)) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", "FD") +@pytest.mark.parametrize("field", + ["1 +2j", "1+ 2j", "1+2 j", "1+-+3", "(1j", "(1", "(1+2j", "1+2j)"]) +def test_bad_complex(dtype, field): + with pytest.raises(ValueError): + np.loadtxt([field + "\n"], dtype=dtype, delimiter=",") + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", + np.typecodes["AllInteger"] + "efgdFDG" + "?") +def test_nul_character_error(dtype): + # Test that a \0 character is correctly recognized as an error even if + # what comes before is valid (not everything gets parsed internally). + if dtype.lower() == "g": + pytest.xfail("longdouble/clongdouble assignment may misbehave.") + with pytest.raises(ValueError): + np.loadtxt(["1\000"], dtype=dtype, delimiter=",", quotechar='"') + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +@pytest.mark.parametrize("dtype", + np.typecodes["AllInteger"] + "efgdFDG" + "?") +def test_no_thousands_support(dtype): + # Mainly to document behaviour, Python supports thousands like 1_1. + # (e and G may end up using different conversion and support it, this is + # a bug but happens...) + if dtype == "e": + pytest.skip("half assignment currently uses Python float converter") + if dtype in "eG": + pytest.xfail("clongdouble assignment is buggy (uses `complex`?).") + + assert int("1_1") == float("1_1") == complex("1_1") == 11 + with pytest.raises(ValueError): + np.loadtxt(["1_1\n"], dtype=dtype) + + +@pytest.mark.parametrize("data", [ + ["1,2\n", "2\n,3\n"], + ["1,2\n", "2\r,3\n"]]) +def test_bad_newline_in_iterator(data): + # In NumPy <=1.22 this was accepted, because newlines were completely + # ignored when the input was an iterable. This could be changed, but right + # now, we raise an error. + msg = "Found an unquoted embedded newline within a single line" + with pytest.raises(ValueError, match=msg): + np.loadtxt(data, delimiter=",") + + +@pytest.mark.parametrize("data", [ + ["1,2\n", "2,3\r\n"], # a universal newline + ["1,2\n", "'2\n',3\n"], # a quoted newline + ["1,2\n", "'2\r',3\n"], + ["1,2\n", "'2\r\n',3\n"], +]) +def test_good_newline_in_iterator(data): + # The quoted newlines will be untransformed here, but are just whitespace. + res = np.loadtxt(data, delimiter=",", quotechar="'") + assert_array_equal(res, [[1., 2.], [2., 3.]]) + + +@pytest.mark.parametrize("newline", ["\n", "\r", "\r\n"]) +def test_universal_newlines_quoted(newline): + # Check that universal newline support within the tokenizer is not applied + # to quoted fields. (note that lines must end in newline or quoted + # fields will not include a newline at all) + data = ['1,"2\n"\n', '3,"4\n', '1"\n'] + data = [row.replace("\n", newline) for row in data] + res = np.loadtxt(data, dtype=object, delimiter=",", quotechar='"') + assert_array_equal(res, [['1', f'2{newline}'], ['3', f'4{newline}1']]) + + +def test_null_character(): + # Basic tests to check that the NUL character is not special: + res = np.loadtxt(["1\0002\0003\n", "4\0005\0006"], delimiter="\000") + assert_array_equal(res, [[1, 2, 3], [4, 5, 6]]) + + # Also not as part of a field (avoid unicode/arrays as unicode strips \0) + res = np.loadtxt(["1\000,2\000,3\n", "4\000,5\000,6"], + delimiter=",", dtype=object) + assert res.tolist() == [["1\000", "2\000", "3"], ["4\000", "5\000", "6"]] + + +def test_iterator_fails_getting_next_line(): + class BadSequence: + def __len__(self): + return 100 + + def __getitem__(self, item): + if item == 50: + raise RuntimeError("Bad things happened!") + return f"{item}, {item + 1}" + + with pytest.raises(RuntimeError, match="Bad things happened!"): + np.loadtxt(BadSequence(), dtype=int, delimiter=",") + + +class TestCReaderUnitTests: + # These are internal tests for path that should not be possible to hit + # unless things go very very wrong somewhere. + def test_not_an_filelike(self): + with pytest.raises(AttributeError, match=".*read"): + np._core._multiarray_umath._load_from_filelike( + object(), dtype=np.dtype("i"), filelike=True) + + def test_filelike_read_fails(self): + # Can only be reached if loadtxt opens the file, so it is hard to do + # via the public interface (although maybe not impossible considering + # the current "DataClass" backing). + class BadFileLike: + counter = 0 + + def read(self, size): + self.counter += 1 + if self.counter > 20: + raise RuntimeError("Bad bad bad!") + return "1,2,3\n" + + with pytest.raises(RuntimeError, match="Bad bad bad!"): + np._core._multiarray_umath._load_from_filelike( + BadFileLike(), dtype=np.dtype("i"), filelike=True) + + def test_filelike_bad_read(self): + # Can only be reached if loadtxt opens the file, so it is hard to do + # via the public interface (although maybe not impossible considering + # the current "DataClass" backing). + + class BadFileLike: + counter = 0 + + def read(self, size): + return 1234 # not a string! + + with pytest.raises(TypeError, + match="non-string returned while reading data"): + np._core._multiarray_umath._load_from_filelike( + BadFileLike(), dtype=np.dtype("i"), filelike=True) + + def test_not_an_iter(self): + with pytest.raises(TypeError, + match="error reading from object, expected an iterable"): + np._core._multiarray_umath._load_from_filelike( + object(), dtype=np.dtype("i"), filelike=False) + + def test_bad_type(self): + with pytest.raises(TypeError, match="internal error: dtype must"): + np._core._multiarray_umath._load_from_filelike( + object(), dtype="i", filelike=False) + + def test_bad_encoding(self): + with pytest.raises(TypeError, match="encoding must be a unicode"): + np._core._multiarray_umath._load_from_filelike( + object(), dtype=np.dtype("i"), filelike=False, encoding=123) + + @pytest.mark.parametrize("newline", ["\r", "\n", "\r\n"]) + def test_manual_universal_newlines(self, newline): + # This is currently not available to users, because we should always + # open files with universal newlines enabled `newlines=None`. + # (And reading from an iterator uses slightly different code paths.) + # We have no real support for `newline="\r"` or `newline="\n" as the + # user cannot specify those options. + data = StringIO('0\n1\n"2\n"\n3\n4 #\n'.replace("\n", newline), + newline="") + + res = np._core._multiarray_umath._load_from_filelike( + data, dtype=np.dtype("U10"), filelike=True, + quote='"', comment="#", skiplines=1) + assert_array_equal(res[:, 0], ["1", f"2{newline}", "3", "4 "]) + + +def test_delimiter_comment_collision_raises(): + with pytest.raises(TypeError, match=".*control characters.*incompatible"): + np.loadtxt(StringIO("1, 2, 3"), delimiter=",", comments=",") + + +def test_delimiter_quotechar_collision_raises(): + with pytest.raises(TypeError, match=".*control characters.*incompatible"): + np.loadtxt(StringIO("1, 2, 3"), delimiter=",", quotechar=",") + + +def test_comment_quotechar_collision_raises(): + with pytest.raises(TypeError, match=".*control characters.*incompatible"): + np.loadtxt(StringIO("1 2 3"), comments="#", quotechar="#") + + +def test_delimiter_and_multiple_comments_collision_raises(): + with pytest.raises( + TypeError, match="Comment characters.*cannot include the delimiter" + ): + np.loadtxt(StringIO("1, 2, 3"), delimiter=",", comments=["#", ","]) + + +@pytest.mark.parametrize( + "ws", + ( + " ", # space + "\t", # tab + "\u2003", # em + "\u00A0", # non-break + "\u3000", # ideographic space + ) +) +def test_collision_with_default_delimiter_raises(ws): + with pytest.raises(TypeError, match=".*control characters.*incompatible"): + np.loadtxt(StringIO(f"1{ws}2{ws}3\n4{ws}5{ws}6\n"), comments=ws) + with pytest.raises(TypeError, match=".*control characters.*incompatible"): + np.loadtxt(StringIO(f"1{ws}2{ws}3\n4{ws}5{ws}6\n"), quotechar=ws) + + +@pytest.mark.parametrize("nl", ("\n", "\r")) +def test_control_character_newline_raises(nl): + txt = StringIO(f"1{nl}2{nl}3{nl}{nl}4{nl}5{nl}6{nl}{nl}") + msg = "control character.*cannot be a newline" + with pytest.raises(TypeError, match=msg): + np.loadtxt(txt, delimiter=nl) + with pytest.raises(TypeError, match=msg): + np.loadtxt(txt, comments=nl) + with pytest.raises(TypeError, match=msg): + np.loadtxt(txt, quotechar=nl) + + +@pytest.mark.parametrize( + ("generic_data", "long_datum", "unitless_dtype", "expected_dtype"), + [ + ("2012-03", "2013-01-15", "M8", "M8[D]"), # Datetimes + ("spam-a-lot", "tis_but_a_scratch", "U", "U17"), # str + ], +) +@pytest.mark.parametrize("nrows", (10, 50000, 60000)) # lt, eq, gt chunksize +def test_parametric_unit_discovery( + generic_data, long_datum, unitless_dtype, expected_dtype, nrows +): + """Check that the correct unit (e.g. month, day, second) is discovered from + the data when a user specifies a unitless datetime.""" + # Unit should be "D" (days) due to last entry + data = [generic_data] * nrows + [long_datum] + expected = np.array(data, dtype=expected_dtype) + assert len(data) == nrows + 1 + assert len(data) == len(expected) + + # file-like path + txt = StringIO("\n".join(data)) + a = np.loadtxt(txt, dtype=unitless_dtype) + assert len(a) == len(expected) + assert a.dtype == expected.dtype + assert_equal(a, expected) + + # file-obj path + fd, fname = mkstemp() + os.close(fd) + with open(fname, "w") as fh: + fh.write("\n".join(data) + "\n") + # loading the full file... + a = np.loadtxt(fname, dtype=unitless_dtype) + assert len(a) == len(expected) + assert a.dtype == expected.dtype + assert_equal(a, expected) + # loading half of the file... + a = np.loadtxt(fname, dtype=unitless_dtype, max_rows=int(nrows / 2)) + os.remove(fname) + assert len(a) == int(nrows / 2) + assert_equal(a, expected[:int(nrows / 2)]) + + +def test_str_dtype_unit_discovery_with_converter(): + data = ["spam-a-lot"] * 60000 + ["XXXtis_but_a_scratch"] + expected = np.array( + ["spam-a-lot"] * 60000 + ["tis_but_a_scratch"], dtype="U17" + ) + conv = lambda s: s.removeprefix("XXX") + + # file-like path + txt = StringIO("\n".join(data)) + a = np.loadtxt(txt, dtype="U", converters=conv) + assert a.dtype == expected.dtype + assert_equal(a, expected) + + # file-obj path + fd, fname = mkstemp() + os.close(fd) + with open(fname, "w") as fh: + fh.write("\n".join(data)) + a = np.loadtxt(fname, dtype="U", converters=conv) + os.remove(fname) + assert a.dtype == expected.dtype + assert_equal(a, expected) + + +@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), + reason="PyPy bug in error formatting") +def test_control_character_empty(): + with pytest.raises(TypeError, match="Text reading control character must"): + np.loadtxt(StringIO("1 2 3"), delimiter="") + with pytest.raises(TypeError, match="Text reading control character must"): + np.loadtxt(StringIO("1 2 3"), quotechar="") + with pytest.raises(ValueError, match="comments cannot be an empty string"): + np.loadtxt(StringIO("1 2 3"), comments="") + with pytest.raises(ValueError, match="comments cannot be an empty string"): + np.loadtxt(StringIO("1 2 3"), comments=["#", ""]) + + +def test_control_characters_as_bytes(): + """Byte control characters (comments, delimiter) are supported.""" + a = np.loadtxt(StringIO("#header\n1,2,3"), comments=b"#", delimiter=b",") + assert_equal(a, [1, 2, 3]) + + +@pytest.mark.filterwarnings('ignore::UserWarning') +def test_field_growing_cases(): + # Test empty field appending/growing (each field still takes 1 character) + # to see if the final field appending does not create issues. + res = np.loadtxt([""], delimiter=",", dtype=bytes) + assert len(res) == 0 + + for i in range(1, 1024): + res = np.loadtxt(["," * i], delimiter=",", dtype=bytes, max_rows=10) + assert len(res) == i + 1 + +@pytest.mark.parametrize("nmax", (10000, 50000, 55000, 60000)) +def test_maxrows_exceeding_chunksize(nmax): + # tries to read all of the file, + # or less, equal, greater than _loadtxt_chunksize + file_length = 60000 + + # file-like path + data = ["a 0.5 1"] * file_length + txt = StringIO("\n".join(data)) + res = np.loadtxt(txt, dtype=str, delimiter=" ", max_rows=nmax) + assert len(res) == nmax + + # file-obj path + fd, fname = mkstemp() + os.close(fd) + with open(fname, "w") as fh: + fh.write("\n".join(data)) + res = np.loadtxt(fname, dtype=str, delimiter=" ", max_rows=nmax) + os.remove(fname) + assert len(res) == nmax + +@pytest.mark.parametrize("nskip", (0, 10000, 12345, 50000, 67891, 100000)) +def test_skiprow_exceeding_maxrows_exceeding_chunksize(tmpdir, nskip): + # tries to read a file in chunks by skipping a variable amount of lines, + # less, equal, greater than max_rows + file_length = 110000 + data = "\n".join(f"{i} a 0.5 1" for i in range(1, file_length + 1)) + expected_length = min(60000, file_length - nskip) + expected = np.arange(nskip + 1, nskip + 1 + expected_length).astype(str) + + # file-like path + txt = StringIO(data) + res = np.loadtxt(txt, dtype='str', delimiter=" ", skiprows=nskip, max_rows=60000) + assert len(res) == expected_length + # are the right lines read in res? + assert_array_equal(expected, res[:, 0]) + + # file-obj path + tmp_file = tmpdir / "test_data.txt" + tmp_file.write(data) + fname = str(tmp_file) + res = np.loadtxt(fname, dtype='str', delimiter=" ", skiprows=nskip, max_rows=60000) + assert len(res) == expected_length + # are the right lines read in res? + assert_array_equal(expected, res[:, 0]) diff --git a/python/numpy/lib/tests/test_mixins.py b/python/numpy/lib/tests/test_mixins.py new file mode 100644 index 000000000..f0aec156d --- /dev/null +++ b/python/numpy/lib/tests/test_mixins.py @@ -0,0 +1,215 @@ +import numbers +import operator + +import numpy as np +from numpy.testing import assert_, assert_equal, assert_raises + +# NOTE: This class should be kept as an exact copy of the example from the +# docstring for NDArrayOperatorsMixin. + +class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin): + def __init__(self, value): + self.value = np.asarray(value) + + # One might also consider adding the built-in list type to this + # list, to support operations like np.add(array_like, list) + _HANDLED_TYPES = (np.ndarray, numbers.Number) + + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + out = kwargs.get('out', ()) + for x in inputs + out: + # Only support operations with instances of _HANDLED_TYPES. + # Use ArrayLike instead of type(self) for isinstance to + # allow subclasses that don't override __array_ufunc__ to + # handle ArrayLike objects. + if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)): + return NotImplemented + + # Defer to the implementation of the ufunc on unwrapped values. + inputs = tuple(x.value if isinstance(x, ArrayLike) else x + for x in inputs) + if out: + kwargs['out'] = tuple( + x.value if isinstance(x, ArrayLike) else x + for x in out) + result = getattr(ufunc, method)(*inputs, **kwargs) + + if type(result) is tuple: + # multiple return values + return tuple(type(self)(x) for x in result) + elif method == 'at': + # no return value + return None + else: + # one return value + return type(self)(result) + + def __repr__(self): + return f'{type(self).__name__}({self.value!r})' + + +def wrap_array_like(result): + if type(result) is tuple: + return tuple(ArrayLike(r) for r in result) + else: + return ArrayLike(result) + + +def _assert_equal_type_and_value(result, expected, err_msg=None): + assert_equal(type(result), type(expected), err_msg=err_msg) + if isinstance(result, tuple): + assert_equal(len(result), len(expected), err_msg=err_msg) + for result_item, expected_item in zip(result, expected): + _assert_equal_type_and_value(result_item, expected_item, err_msg) + else: + assert_equal(result.value, expected.value, err_msg=err_msg) + assert_equal(getattr(result.value, 'dtype', None), + getattr(expected.value, 'dtype', None), err_msg=err_msg) + + +_ALL_BINARY_OPERATORS = [ + operator.lt, + operator.le, + operator.eq, + operator.ne, + operator.gt, + operator.ge, + operator.add, + operator.sub, + operator.mul, + operator.truediv, + operator.floordiv, + operator.mod, + divmod, + pow, + operator.lshift, + operator.rshift, + operator.and_, + operator.xor, + operator.or_, +] + + +class TestNDArrayOperatorsMixin: + + def test_array_like_add(self): + + def check(result): + _assert_equal_type_and_value(result, ArrayLike(0)) + + check(ArrayLike(0) + 0) + check(0 + ArrayLike(0)) + + check(ArrayLike(0) + np.array(0)) + check(np.array(0) + ArrayLike(0)) + + check(ArrayLike(np.array(0)) + 0) + check(0 + ArrayLike(np.array(0))) + + check(ArrayLike(np.array(0)) + np.array(0)) + check(np.array(0) + ArrayLike(np.array(0))) + + def test_inplace(self): + array_like = ArrayLike(np.array([0])) + array_like += 1 + _assert_equal_type_and_value(array_like, ArrayLike(np.array([1]))) + + array = np.array([0]) + array += ArrayLike(1) + _assert_equal_type_and_value(array, ArrayLike(np.array([1]))) + + def test_opt_out(self): + + class OptOut: + """Object that opts out of __array_ufunc__.""" + __array_ufunc__ = None + + def __add__(self, other): + return self + + def __radd__(self, other): + return self + + array_like = ArrayLike(1) + opt_out = OptOut() + + # supported operations + assert_(array_like + opt_out is opt_out) + assert_(opt_out + array_like is opt_out) + + # not supported + with assert_raises(TypeError): + # don't use the Python default, array_like = array_like + opt_out + array_like += opt_out + with assert_raises(TypeError): + array_like - opt_out + with assert_raises(TypeError): + opt_out - array_like + + def test_subclass(self): + + class SubArrayLike(ArrayLike): + """Should take precedence over ArrayLike.""" + + x = ArrayLike(0) + y = SubArrayLike(1) + _assert_equal_type_and_value(x + y, y) + _assert_equal_type_and_value(y + x, y) + + def test_object(self): + x = ArrayLike(0) + obj = object() + with assert_raises(TypeError): + x + obj + with assert_raises(TypeError): + obj + x + with assert_raises(TypeError): + x += obj + + def test_unary_methods(self): + array = np.array([-1, 0, 1, 2]) + array_like = ArrayLike(array) + for op in [operator.neg, + operator.pos, + abs, + operator.invert]: + _assert_equal_type_and_value(op(array_like), ArrayLike(op(array))) + + def test_forward_binary_methods(self): + array = np.array([-1, 0, 1, 2]) + array_like = ArrayLike(array) + for op in _ALL_BINARY_OPERATORS: + expected = wrap_array_like(op(array, 1)) + actual = op(array_like, 1) + err_msg = f'failed for operator {op}' + _assert_equal_type_and_value(expected, actual, err_msg=err_msg) + + def test_reflected_binary_methods(self): + for op in _ALL_BINARY_OPERATORS: + expected = wrap_array_like(op(2, 1)) + actual = op(2, ArrayLike(1)) + err_msg = f'failed for operator {op}' + _assert_equal_type_and_value(expected, actual, err_msg=err_msg) + + def test_matmul(self): + array = np.array([1, 2], dtype=np.float64) + array_like = ArrayLike(array) + expected = ArrayLike(np.float64(5)) + _assert_equal_type_and_value(expected, np.matmul(array_like, array)) + _assert_equal_type_and_value( + expected, operator.matmul(array_like, array)) + _assert_equal_type_and_value( + expected, operator.matmul(array, array_like)) + + def test_ufunc_at(self): + array = ArrayLike(np.array([1, 2, 3, 4])) + assert_(np.negative.at(array, np.array([0, 1])) is None) + _assert_equal_type_and_value(array, ArrayLike([-1, -2, 3, 4])) + + def test_ufunc_two_outputs(self): + mantissa, exponent = np.frexp(2 ** -3) + expected = (ArrayLike(mantissa), ArrayLike(exponent)) + _assert_equal_type_and_value( + np.frexp(ArrayLike(2 ** -3)), expected) + _assert_equal_type_and_value( + np.frexp(ArrayLike(np.array(2 ** -3))), expected) diff --git a/python/numpy/lib/tests/test_nanfunctions.py b/python/numpy/lib/tests/test_nanfunctions.py new file mode 100644 index 000000000..89a6d1f95 --- /dev/null +++ b/python/numpy/lib/tests/test_nanfunctions.py @@ -0,0 +1,1438 @@ +import inspect +import warnings +from functools import partial + +import pytest + +import numpy as np +from numpy._core.numeric import normalize_axis_tuple +from numpy.exceptions import AxisError, ComplexWarning +from numpy.lib._nanfunctions_impl import _nan_mask, _replace_nan +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, + suppress_warnings, +) + +# Test data +_ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170], + [0.5351, -0.9403, np.nan, 0.2100, 0.4759, 0.2833], + [np.nan, np.nan, np.nan, 0.1042, np.nan, -0.5954], + [0.1610, np.nan, np.nan, 0.1859, 0.3146, np.nan]]) + + +# Rows of _ndat with nans removed +_rdat = [np.array([0.6244, 0.2692, 0.0116, 0.1170]), + np.array([0.5351, -0.9403, 0.2100, 0.4759, 0.2833]), + np.array([0.1042, -0.5954]), + np.array([0.1610, 0.1859, 0.3146])] + +# Rows of _ndat with nans converted to ones +_ndat_ones = np.array([[0.6244, 1.0, 0.2692, 0.0116, 1.0, 0.1170], + [0.5351, -0.9403, 1.0, 0.2100, 0.4759, 0.2833], + [1.0, 1.0, 1.0, 0.1042, 1.0, -0.5954], + [0.1610, 1.0, 1.0, 0.1859, 0.3146, 1.0]]) + +# Rows of _ndat with nans converted to zeros +_ndat_zeros = np.array([[0.6244, 0.0, 0.2692, 0.0116, 0.0, 0.1170], + [0.5351, -0.9403, 0.0, 0.2100, 0.4759, 0.2833], + [0.0, 0.0, 0.0, 0.1042, 0.0, -0.5954], + [0.1610, 0.0, 0.0, 0.1859, 0.3146, 0.0]]) + + +class TestSignatureMatch: + NANFUNCS = { + np.nanmin: np.amin, + np.nanmax: np.amax, + np.nanargmin: np.argmin, + np.nanargmax: np.argmax, + np.nansum: np.sum, + np.nanprod: np.prod, + np.nancumsum: np.cumsum, + np.nancumprod: np.cumprod, + np.nanmean: np.mean, + np.nanmedian: np.median, + np.nanpercentile: np.percentile, + np.nanquantile: np.quantile, + np.nanvar: np.var, + np.nanstd: np.std, + } + IDS = [k.__name__ for k in NANFUNCS] + + @staticmethod + def get_signature(func, default="..."): + """Construct a signature and replace all default parameter-values.""" + prm_list = [] + signature = inspect.signature(func) + for prm in signature.parameters.values(): + if prm.default is inspect.Parameter.empty: + prm_list.append(prm) + else: + prm_list.append(prm.replace(default=default)) + return inspect.Signature(prm_list) + + @pytest.mark.parametrize("nan_func,func", NANFUNCS.items(), ids=IDS) + def test_signature_match(self, nan_func, func): + # Ignore the default parameter-values as they can sometimes differ + # between the two functions (*e.g.* one has `False` while the other + # has `np._NoValue`) + signature = self.get_signature(func) + nan_signature = self.get_signature(nan_func) + np.testing.assert_equal(signature, nan_signature) + + def test_exhaustiveness(self): + """Validate that all nan functions are actually tested.""" + np.testing.assert_equal( + set(self.IDS), set(np.lib._nanfunctions_impl.__all__) + ) + + +class TestNanFunctions_MinMax: + + nanfuncs = [np.nanmin, np.nanmax] + stdfuncs = [np.min, np.max] + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + for f in self.nanfuncs: + f(ndat) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for axis in [None, 0, 1]: + tgt = rf(mat, axis=axis, keepdims=True) + res = nf(mat, axis=axis, keepdims=True) + assert_(res.ndim == tgt.ndim) + + def test_out(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + resout = np.zeros(3) + tgt = rf(mat, axis=1) + res = nf(mat, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_dtype_from_input(self): + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + mat = np.eye(3, dtype=c) + tgt = rf(mat, axis=1).dtype.type + res = nf(mat, axis=1).dtype.type + assert_(res is tgt) + # scalar case + tgt = rf(mat, axis=None).dtype.type + res = nf(mat, axis=None).dtype.type + assert_(res is tgt) + + def test_result_values(self): + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + tgt = [rf(d) for d in _rdat] + res = nf(_ndat, axis=1) + assert_almost_equal(res, tgt) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip("`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + match = "All-NaN slice encountered" + for func in self.nanfuncs: + with pytest.warns(RuntimeWarning, match=match): + out = func(array, axis=axis) + assert np.isnan(out).all() + assert out.dtype == array.dtype + + def test_masked(self): + mat = np.ma.fix_invalid(_ndat) + msk = mat._mask.copy() + for f in [np.nanmin]: + res = f(mat, axis=1) + tgt = f(_ndat, axis=1) + assert_equal(res, tgt) + assert_equal(mat._mask, msk) + assert_(not np.isinf(mat).any()) + + def test_scalar(self): + for f in self.nanfuncs: + assert_(f(0.) == 0.) + + def test_subclass(self): + class MyNDArray(np.ndarray): + pass + + # Check that it works and that type and + # shape are preserved + mine = np.eye(3).view(MyNDArray) + for f in self.nanfuncs: + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine) + assert_(res.shape == ()) + + # check that rows of nan are dealt with for subclasses (#4628) + mine[1] = np.nan + for f in self.nanfuncs: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(not np.any(np.isnan(res))) + assert_(len(w) == 0) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(np.isnan(res[1]) and not np.isnan(res[0]) + and not np.isnan(res[2])) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mine) + assert_(res.shape == ()) + assert_(res != np.nan) + assert_(len(w) == 0) + + def test_object_array(self): + arr = np.array([[1.0, 2.0], [np.nan, 4.0], [np.nan, np.nan]], dtype=object) + assert_equal(np.nanmin(arr), 1.0) + assert_equal(np.nanmin(arr, axis=0), [1.0, 2.0]) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + # assert_equal does not work on object arrays of nan + assert_equal(list(np.nanmin(arr, axis=1)), [1.0, 4.0, np.nan]) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_initial(self, dtype): + class MyNDArray(np.ndarray): + pass + + ar = np.arange(9).astype(dtype) + ar[:5] = np.nan + + for f in self.nanfuncs: + initial = 100 if f is np.nanmax else 0 + + ret1 = f(ar, initial=initial) + assert ret1.dtype == dtype + assert ret1 == initial + + ret2 = f(ar.view(MyNDArray), initial=initial) + assert ret2.dtype == dtype + assert ret2 == initial + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_where(self, dtype): + class MyNDArray(np.ndarray): + pass + + ar = np.arange(9).reshape(3, 3).astype(dtype) + ar[0, :] = np.nan + where = np.ones_like(ar, dtype=np.bool) + where[:, 0] = False + + for f in self.nanfuncs: + reference = 4 if f is np.nanmin else 8 + + ret1 = f(ar, where=where, initial=5) + assert ret1.dtype == dtype + assert ret1 == reference + + ret2 = f(ar.view(MyNDArray), where=where, initial=5) + assert ret2.dtype == dtype + assert ret2 == reference + + +class TestNanFunctions_ArgminArgmax: + + nanfuncs = [np.nanargmin, np.nanargmax] + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + for f in self.nanfuncs: + f(ndat) + assert_equal(ndat, _ndat) + + def test_result_values(self): + for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]): + for row in _ndat: + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "invalid value encountered in") + ind = f(row) + val = row[ind] + # comparing with NaN is tricky as the result + # is always false except for NaN != NaN + assert_(not np.isnan(val)) + assert_(not fcmp(val, row).any()) + assert_(not np.equal(val, row[:ind]).any()) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip("`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + for func in self.nanfuncs: + with pytest.raises(ValueError, match="All-NaN slice encountered"): + func(array, axis=axis) + + def test_empty(self): + mat = np.zeros((0, 3)) + for f in self.nanfuncs: + for axis in [0, None]: + assert_raises_regex( + ValueError, + "attempt to get argm.. of an empty sequence", + f, mat, axis=axis) + for axis in [1]: + res = f(mat, axis=axis) + assert_equal(res, np.zeros(0)) + + def test_scalar(self): + for f in self.nanfuncs: + assert_(f(0.) == 0.) + + def test_subclass(self): + class MyNDArray(np.ndarray): + pass + + # Check that it works and that type and + # shape are preserved + mine = np.eye(3).view(MyNDArray) + for f in self.nanfuncs: + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == (3,)) + res = f(mine) + assert_(res.shape == ()) + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_keepdims(self, dtype): + ar = np.arange(9).astype(dtype) + ar[:5] = np.nan + + for f in self.nanfuncs: + reference = 5 if f is np.nanargmin else 8 + ret = f(ar, keepdims=True) + assert ret.ndim == ar.ndim + assert ret == reference + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_out(self, dtype): + ar = np.arange(9).astype(dtype) + ar[:5] = np.nan + + for f in self.nanfuncs: + out = np.zeros((), dtype=np.intp) + reference = 5 if f is np.nanargmin else 8 + ret = f(ar, out=out) + assert ret is out + assert ret == reference + + +_TEST_ARRAYS = { + "0d": np.array(5), + "1d": np.array([127, 39, 93, 87, 46]) +} +for _v in _TEST_ARRAYS.values(): + _v.setflags(write=False) + + +@pytest.mark.parametrize( + "dtype", + np.typecodes["AllInteger"] + np.typecodes["AllFloat"] + "O", +) +@pytest.mark.parametrize("mat", _TEST_ARRAYS.values(), ids=_TEST_ARRAYS.keys()) +class TestNanFunctions_NumberTypes: + nanfuncs = { + np.nanmin: np.min, + np.nanmax: np.max, + np.nanargmin: np.argmin, + np.nanargmax: np.argmax, + np.nansum: np.sum, + np.nanprod: np.prod, + np.nancumsum: np.cumsum, + np.nancumprod: np.cumprod, + np.nanmean: np.mean, + np.nanmedian: np.median, + np.nanvar: np.var, + np.nanstd: np.std, + } + nanfunc_ids = [i.__name__ for i in nanfuncs] + + @pytest.mark.parametrize("nanfunc,func", nanfuncs.items(), ids=nanfunc_ids) + @np.errstate(over="ignore") + def test_nanfunc(self, mat, dtype, nanfunc, func): + mat = mat.astype(dtype) + tgt = func(mat) + out = nanfunc(mat) + + assert_almost_equal(out, tgt) + if dtype == "O": + assert type(out) is type(tgt) + else: + assert out.dtype == tgt.dtype + + @pytest.mark.parametrize( + "nanfunc,func", + [(np.nanquantile, np.quantile), (np.nanpercentile, np.percentile)], + ids=["nanquantile", "nanpercentile"], + ) + def test_nanfunc_q(self, mat, dtype, nanfunc, func): + mat = mat.astype(dtype) + if mat.dtype.kind == "c": + assert_raises(TypeError, func, mat, q=1) + assert_raises(TypeError, nanfunc, mat, q=1) + + else: + tgt = func(mat, q=1) + out = nanfunc(mat, q=1) + + assert_almost_equal(out, tgt) + + if dtype == "O": + assert type(out) is type(tgt) + else: + assert out.dtype == tgt.dtype + + @pytest.mark.parametrize( + "nanfunc,func", + [(np.nanvar, np.var), (np.nanstd, np.std)], + ids=["nanvar", "nanstd"], + ) + def test_nanfunc_ddof(self, mat, dtype, nanfunc, func): + mat = mat.astype(dtype) + tgt = func(mat, ddof=0.5) + out = nanfunc(mat, ddof=0.5) + + assert_almost_equal(out, tgt) + if dtype == "O": + assert type(out) is type(tgt) + else: + assert out.dtype == tgt.dtype + + @pytest.mark.parametrize( + "nanfunc", [np.nanvar, np.nanstd] + ) + def test_nanfunc_correction(self, mat, dtype, nanfunc): + mat = mat.astype(dtype) + assert_almost_equal( + nanfunc(mat, correction=0.5), nanfunc(mat, ddof=0.5) + ) + + err_msg = "ddof and correction can't be provided simultaneously." + with assert_raises_regex(ValueError, err_msg): + nanfunc(mat, ddof=0.5, correction=0.5) + + with assert_raises_regex(ValueError, err_msg): + nanfunc(mat, ddof=1, correction=0) + + +class SharedNanFunctionsTestsMixin: + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + for f in self.nanfuncs: + f(ndat) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for axis in [None, 0, 1]: + tgt = rf(mat, axis=axis, keepdims=True) + res = nf(mat, axis=axis, keepdims=True) + assert_(res.ndim == tgt.ndim) + + def test_out(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + resout = np.zeros(3) + tgt = rf(mat, axis=1) + res = nf(mat, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_dtype_from_dtype(self): + mat = np.eye(3) + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + with suppress_warnings() as sup: + if nf in {np.nanstd, np.nanvar} and c in 'FDG': + # Giving the warning is a small bug, see gh-8000 + sup.filter(ComplexWarning) + tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type + res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type + assert_(res is tgt) + # scalar case + tgt = rf(mat, dtype=np.dtype(c), axis=None).dtype.type + res = nf(mat, dtype=np.dtype(c), axis=None).dtype.type + assert_(res is tgt) + + def test_dtype_from_char(self): + mat = np.eye(3) + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + with suppress_warnings() as sup: + if nf in {np.nanstd, np.nanvar} and c in 'FDG': + # Giving the warning is a small bug, see gh-8000 + sup.filter(ComplexWarning) + tgt = rf(mat, dtype=c, axis=1).dtype.type + res = nf(mat, dtype=c, axis=1).dtype.type + assert_(res is tgt) + # scalar case + tgt = rf(mat, dtype=c, axis=None).dtype.type + res = nf(mat, dtype=c, axis=None).dtype.type + assert_(res is tgt) + + def test_dtype_from_input(self): + codes = 'efdgFDG' + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + for c in codes: + mat = np.eye(3, dtype=c) + tgt = rf(mat, axis=1).dtype.type + res = nf(mat, axis=1).dtype.type + assert_(res is tgt, f"res {res}, tgt {tgt}") + # scalar case + tgt = rf(mat, axis=None).dtype.type + res = nf(mat, axis=None).dtype.type + assert_(res is tgt) + + def test_result_values(self): + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + tgt = [rf(d) for d in _rdat] + res = nf(_ndat, axis=1) + assert_almost_equal(res, tgt) + + def test_scalar(self): + for f in self.nanfuncs: + assert_(f(0.) == 0.) + + def test_subclass(self): + class MyNDArray(np.ndarray): + pass + + # Check that it works and that type and + # shape are preserved + array = np.eye(3) + mine = array.view(MyNDArray) + for f in self.nanfuncs: + expected_shape = f(array, axis=0).shape + res = f(mine, axis=0) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == expected_shape) + expected_shape = f(array, axis=1).shape + res = f(mine, axis=1) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == expected_shape) + expected_shape = f(array).shape + res = f(mine) + assert_(isinstance(res, MyNDArray)) + assert_(res.shape == expected_shape) + + +class TestNanFunctions_SumProd(SharedNanFunctionsTestsMixin): + + nanfuncs = [np.nansum, np.nanprod] + stdfuncs = [np.sum, np.prod] + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip("`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + for func, identity in zip(self.nanfuncs, [0, 1]): + out = func(array, axis=axis) + assert np.all(out == identity) + assert out.dtype == array.dtype + + def test_empty(self): + for f, tgt_value in zip([np.nansum, np.nanprod], [0, 1]): + mat = np.zeros((0, 3)) + tgt = [tgt_value] * 3 + res = f(mat, axis=0) + assert_equal(res, tgt) + tgt = [] + res = f(mat, axis=1) + assert_equal(res, tgt) + tgt = tgt_value + res = f(mat, axis=None) + assert_equal(res, tgt) + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_initial(self, dtype): + ar = np.arange(9).astype(dtype) + ar[:5] = np.nan + + for f in self.nanfuncs: + reference = 28 if f is np.nansum else 3360 + ret = f(ar, initial=2) + assert ret.dtype == dtype + assert ret == reference + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_where(self, dtype): + ar = np.arange(9).reshape(3, 3).astype(dtype) + ar[0, :] = np.nan + where = np.ones_like(ar, dtype=np.bool) + where[:, 0] = False + + for f in self.nanfuncs: + reference = 26 if f is np.nansum else 2240 + ret = f(ar, where=where, initial=2) + assert ret.dtype == dtype + assert ret == reference + + +class TestNanFunctions_CumSumProd(SharedNanFunctionsTestsMixin): + + nanfuncs = [np.nancumsum, np.nancumprod] + stdfuncs = [np.cumsum, np.cumprod] + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan) + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip("`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + for func, identity in zip(self.nanfuncs, [0, 1]): + out = func(array) + assert np.all(out == identity) + assert out.dtype == array.dtype + + def test_empty(self): + for f, tgt_value in zip(self.nanfuncs, [0, 1]): + mat = np.zeros((0, 3)) + tgt = tgt_value * np.ones((0, 3)) + res = f(mat, axis=0) + assert_equal(res, tgt) + tgt = mat + res = f(mat, axis=1) + assert_equal(res, tgt) + tgt = np.zeros(0) + res = f(mat, axis=None) + assert_equal(res, tgt) + + def test_keepdims(self): + for f, g in zip(self.nanfuncs, self.stdfuncs): + mat = np.eye(3) + for axis in [None, 0, 1]: + tgt = f(mat, axis=axis, out=None) + res = g(mat, axis=axis, out=None) + assert_(res.ndim == tgt.ndim) + + for f in self.nanfuncs: + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + rs = np.random.RandomState(0) + d[rs.rand(*d.shape) < 0.5] = np.nan + res = f(d, axis=None) + assert_equal(res.shape, (1155,)) + for axis in np.arange(4): + res = f(d, axis=axis) + assert_equal(res.shape, (3, 5, 7, 11)) + + def test_result_values(self): + for axis in (-2, -1, 0, 1, None): + tgt = np.cumprod(_ndat_ones, axis=axis) + res = np.nancumprod(_ndat, axis=axis) + assert_almost_equal(res, tgt) + tgt = np.cumsum(_ndat_zeros, axis=axis) + res = np.nancumsum(_ndat, axis=axis) + assert_almost_equal(res, tgt) + + def test_out(self): + mat = np.eye(3) + for nf, rf in zip(self.nanfuncs, self.stdfuncs): + resout = np.eye(3) + for axis in (-2, -1, 0, 1): + tgt = rf(mat, axis=axis) + res = nf(mat, axis=axis, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + +class TestNanFunctions_MeanVarStd(SharedNanFunctionsTestsMixin): + + nanfuncs = [np.nanmean, np.nanvar, np.nanstd] + stdfuncs = [np.mean, np.var, np.std] + + def test_dtype_error(self): + for f in self.nanfuncs: + for dtype in [np.bool, np.int_, np.object_]: + assert_raises(TypeError, f, _ndat, axis=1, dtype=dtype) + + def test_out_dtype_error(self): + for f in self.nanfuncs: + for dtype in [np.bool, np.int_, np.object_]: + out = np.empty(_ndat.shape[0], dtype=dtype) + assert_raises(TypeError, f, _ndat, axis=1, out=out) + + def test_ddof(self): + nanfuncs = [np.nanvar, np.nanstd] + stdfuncs = [np.var, np.std] + for nf, rf in zip(nanfuncs, stdfuncs): + for ddof in [0, 1]: + tgt = [rf(d, ddof=ddof) for d in _rdat] + res = nf(_ndat, axis=1, ddof=ddof) + assert_almost_equal(res, tgt) + + def test_ddof_too_big(self): + nanfuncs = [np.nanvar, np.nanstd] + stdfuncs = [np.var, np.std] + dsize = [len(d) for d in _rdat] + for nf, rf in zip(nanfuncs, stdfuncs): + for ddof in range(5): + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + sup.filter(ComplexWarning) + tgt = [ddof >= d for d in dsize] + res = nf(_ndat, axis=1, ddof=ddof) + assert_equal(np.isnan(res), tgt) + if any(tgt): + assert_(len(sup.log) == 1) + else: + assert_(len(sup.log) == 0) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip("`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + match = "(Degrees of freedom <= 0 for slice.)|(Mean of empty slice)" + for func in self.nanfuncs: + with pytest.warns(RuntimeWarning, match=match): + out = func(array, axis=axis) + assert np.isnan(out).all() + + # `nanvar` and `nanstd` convert complex inputs to their + # corresponding floating dtype + if func is np.nanmean: + assert out.dtype == array.dtype + else: + assert out.dtype == np.abs(array).dtype + + def test_empty(self): + mat = np.zeros((0, 3)) + for f in self.nanfuncs: + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(f(mat, axis=axis)).all()) + assert_(len(w) == 1) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(f(mat, axis=axis), np.zeros([])) + assert_(len(w) == 0) + + @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) + def test_where(self, dtype): + ar = np.arange(9).reshape(3, 3).astype(dtype) + ar[0, :] = np.nan + where = np.ones_like(ar, dtype=np.bool) + where[:, 0] = False + + for f, f_std in zip(self.nanfuncs, self.stdfuncs): + reference = f_std(ar[where][2:]) + dtype_reference = dtype if f is np.nanmean else ar.real.dtype + + ret = f(ar, where=where) + assert ret.dtype == dtype_reference + np.testing.assert_allclose(ret, reference) + + def test_nanstd_with_mean_keyword(self): + # Setting the seed to make the test reproducible + rng = np.random.RandomState(1234) + A = rng.randn(10, 20, 5) + 0.5 + A[:, 5, :] = np.nan + + mean_out = np.zeros((10, 1, 5)) + std_out = np.zeros((10, 1, 5)) + + mean = np.nanmean(A, + out=mean_out, + axis=1, + keepdims=True) + + # The returned object should be the object specified during calling + assert mean_out is mean + + std = np.nanstd(A, + out=std_out, + axis=1, + keepdims=True, + mean=mean) + + # The returned object should be the object specified during calling + assert std_out is std + + # Shape of returned mean and std should be same + assert std.shape == mean.shape + assert std.shape == (10, 1, 5) + + # Output should be the same as from the individual algorithms + std_old = np.nanstd(A, axis=1, keepdims=True) + + assert std_old.shape == mean.shape + assert_almost_equal(std, std_old) + + +_TIME_UNITS = ( + "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as" +) + +# All `inexact` + `timdelta64` type codes +_TYPE_CODES = list(np.typecodes["AllFloat"]) +_TYPE_CODES += [f"m8[{unit}]" for unit in _TIME_UNITS] + + +class TestNanFunctions_Median: + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + np.nanmedian(ndat) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for axis in [None, 0, 1]: + tgt = np.median(mat, axis=axis, out=None, overwrite_input=False) + res = np.nanmedian(mat, axis=axis, out=None, overwrite_input=False) + assert_(res.ndim == tgt.ndim) + + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + w = np.random.random((4, 200)) * np.array(d.shape)[:, None] + w = w.astype(np.intp) + d[tuple(w)] = np.nan + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + res = np.nanmedian(d, axis=None, keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanmedian(d, axis=(0, 1), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 11)) + res = np.nanmedian(d, axis=(0, 3), keepdims=True) + assert_equal(res.shape, (1, 5, 7, 1)) + res = np.nanmedian(d, axis=(1,), keepdims=True) + assert_equal(res.shape, (3, 1, 7, 11)) + res = np.nanmedian(d, axis=(0, 1, 2, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanmedian(d, axis=(0, 1, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 1)) + + @pytest.mark.parametrize( + argnames='axis', + argvalues=[ + None, + 1, + (1, ), + (0, 1), + (-3, -1), + ] + ) + @pytest.mark.filterwarnings("ignore:All-NaN slice:RuntimeWarning") + def test_keepdims_out(self, axis): + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + w = np.random.random((4, 200)) * np.array(d.shape)[:, None] + w = w.astype(np.intp) + d[tuple(w)] = np.nan + if axis is None: + shape_out = (1,) * d.ndim + else: + axis_norm = normalize_axis_tuple(axis, d.ndim) + shape_out = tuple( + 1 if i in axis_norm else d.shape[i] for i in range(d.ndim)) + out = np.empty(shape_out) + result = np.nanmedian(d, axis=axis, keepdims=True, out=out) + assert result is out + assert_equal(result.shape, shape_out) + + def test_out(self): + mat = np.random.rand(3, 3) + nan_mat = np.insert(mat, [0, 2], np.nan, axis=1) + resout = np.zeros(3) + tgt = np.median(mat, axis=1) + res = np.nanmedian(nan_mat, axis=1, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + # 0-d output: + resout = np.zeros(()) + tgt = np.median(mat, axis=None) + res = np.nanmedian(nan_mat, axis=None, out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + res = np.nanmedian(nan_mat, axis=(0, 1), out=resout) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_small_large(self): + # test the small and large code paths, current cutoff 400 elements + for s in [5, 20, 51, 200, 1000]: + d = np.random.randn(4, s) + # Randomly set some elements to NaN: + w = np.random.randint(0, d.size, size=d.size // 5) + d.ravel()[w] = np.nan + d[:, 0] = 1. # ensure at least one good value + # use normal median without nans to compare + tgt = [] + for x in d: + nonan = np.compress(~np.isnan(x), x) + tgt.append(np.median(nonan, overwrite_input=True)) + + assert_array_equal(np.nanmedian(d, axis=-1), tgt) + + def test_result_values(self): + tgt = [np.median(d) for d in _rdat] + res = np.nanmedian(_ndat, axis=1) + assert_almost_equal(res, tgt) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", _TYPE_CODES) + def test_allnans(self, dtype, axis): + mat = np.full((3, 3), np.nan).astype(dtype) + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + + output = np.nanmedian(mat, axis=axis) + assert output.dtype == mat.dtype + assert np.isnan(output).all() + + if axis is None: + assert_(len(sup.log) == 1) + else: + assert_(len(sup.log) == 3) + + # Check scalar + scalar = np.array(np.nan).astype(dtype)[()] + output_scalar = np.nanmedian(scalar) + assert output_scalar.dtype == scalar.dtype + assert np.isnan(output_scalar) + + if axis is None: + assert_(len(sup.log) == 2) + else: + assert_(len(sup.log) == 4) + + def test_empty(self): + mat = np.zeros((0, 3)) + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(np.nanmedian(mat, axis=axis)).all()) + assert_(len(w) == 1) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(np.nanmedian(mat, axis=axis), np.zeros([])) + assert_(len(w) == 0) + + def test_scalar(self): + assert_(np.nanmedian(0.) == 0.) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(AxisError, np.nanmedian, d, axis=-5) + assert_raises(AxisError, np.nanmedian, d, axis=(0, -5)) + assert_raises(AxisError, np.nanmedian, d, axis=4) + assert_raises(AxisError, np.nanmedian, d, axis=(0, 4)) + assert_raises(ValueError, np.nanmedian, d, axis=(1, 1)) + + def test_float_special(self): + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + for inf in [np.inf, -np.inf]: + a = np.array([[inf, np.nan], [np.nan, np.nan]]) + assert_equal(np.nanmedian(a, axis=0), [inf, np.nan]) + assert_equal(np.nanmedian(a, axis=1), [inf, np.nan]) + assert_equal(np.nanmedian(a), inf) + + # minimum fill value check + a = np.array([[np.nan, np.nan, inf], + [np.nan, np.nan, inf]]) + assert_equal(np.nanmedian(a), inf) + assert_equal(np.nanmedian(a, axis=0), [np.nan, np.nan, inf]) + assert_equal(np.nanmedian(a, axis=1), inf) + + # no mask path + a = np.array([[inf, inf], [inf, inf]]) + assert_equal(np.nanmedian(a, axis=1), inf) + + a = np.array([[inf, 7, -inf, -9], + [-10, np.nan, np.nan, 5], + [4, np.nan, np.nan, inf]], + dtype=np.float32) + if inf > 0: + assert_equal(np.nanmedian(a, axis=0), [4., 7., -inf, 5.]) + assert_equal(np.nanmedian(a), 4.5) + else: + assert_equal(np.nanmedian(a, axis=0), [-10., 7., -inf, -9.]) + assert_equal(np.nanmedian(a), -2.5) + assert_equal(np.nanmedian(a, axis=-1), [-1., -2.5, inf]) + + for i in range(10): + for j in range(1, 10): + a = np.array([([np.nan] * i) + ([inf] * j)] * 2) + assert_equal(np.nanmedian(a), inf) + assert_equal(np.nanmedian(a, axis=1), inf) + assert_equal(np.nanmedian(a, axis=0), + ([np.nan] * i) + [inf] * j) + + a = np.array([([np.nan] * i) + ([-inf] * j)] * 2) + assert_equal(np.nanmedian(a), -inf) + assert_equal(np.nanmedian(a, axis=1), -inf) + assert_equal(np.nanmedian(a, axis=0), + ([np.nan] * i) + [-inf] * j) + + +class TestNanFunctions_Percentile: + + def test_mutation(self): + # Check that passed array is not modified. + ndat = _ndat.copy() + np.nanpercentile(ndat, 30) + assert_equal(ndat, _ndat) + + def test_keepdims(self): + mat = np.eye(3) + for axis in [None, 0, 1]: + tgt = np.percentile(mat, 70, axis=axis, out=None, + overwrite_input=False) + res = np.nanpercentile(mat, 70, axis=axis, out=None, + overwrite_input=False) + assert_(res.ndim == tgt.ndim) + + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + w = np.random.random((4, 200)) * np.array(d.shape)[:, None] + w = w.astype(np.intp) + d[tuple(w)] = np.nan + with suppress_warnings() as sup: + sup.filter(RuntimeWarning) + res = np.nanpercentile(d, 90, axis=None, keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 11)) + res = np.nanpercentile(d, 90, axis=(0, 3), keepdims=True) + assert_equal(res.shape, (1, 5, 7, 1)) + res = np.nanpercentile(d, 90, axis=(1,), keepdims=True) + assert_equal(res.shape, (3, 1, 7, 11)) + res = np.nanpercentile(d, 90, axis=(0, 1, 2, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 1, 1)) + res = np.nanpercentile(d, 90, axis=(0, 1, 3), keepdims=True) + assert_equal(res.shape, (1, 1, 7, 1)) + + @pytest.mark.parametrize('q', [7, [1, 7]]) + @pytest.mark.parametrize( + argnames='axis', + argvalues=[ + None, + 1, + (1,), + (0, 1), + (-3, -1), + ] + ) + @pytest.mark.filterwarnings("ignore:All-NaN slice:RuntimeWarning") + def test_keepdims_out(self, q, axis): + d = np.ones((3, 5, 7, 11)) + # Randomly set some elements to NaN: + w = np.random.random((4, 200)) * np.array(d.shape)[:, None] + w = w.astype(np.intp) + d[tuple(w)] = np.nan + if axis is None: + shape_out = (1,) * d.ndim + else: + axis_norm = normalize_axis_tuple(axis, d.ndim) + shape_out = tuple( + 1 if i in axis_norm else d.shape[i] for i in range(d.ndim)) + shape_out = np.shape(q) + shape_out + + out = np.empty(shape_out) + result = np.nanpercentile(d, q, axis=axis, keepdims=True, out=out) + assert result is out + assert_equal(result.shape, shape_out) + + @pytest.mark.parametrize("weighted", [False, True]) + def test_out(self, weighted): + mat = np.random.rand(3, 3) + nan_mat = np.insert(mat, [0, 2], np.nan, axis=1) + resout = np.zeros(3) + if weighted: + w_args = {"weights": np.ones_like(mat), "method": "inverted_cdf"} + nan_w_args = { + "weights": np.ones_like(nan_mat), "method": "inverted_cdf" + } + else: + w_args = {} + nan_w_args = {} + tgt = np.percentile(mat, 42, axis=1, **w_args) + res = np.nanpercentile(nan_mat, 42, axis=1, out=resout, **nan_w_args) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + # 0-d output: + resout = np.zeros(()) + tgt = np.percentile(mat, 42, axis=None, **w_args) + res = np.nanpercentile( + nan_mat, 42, axis=None, out=resout, **nan_w_args + ) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + res = np.nanpercentile( + nan_mat, 42, axis=(0, 1), out=resout, **nan_w_args + ) + assert_almost_equal(res, resout) + assert_almost_equal(res, tgt) + + def test_complex(self): + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='G') + assert_raises(TypeError, np.nanpercentile, arr_c, 0.5) + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='D') + assert_raises(TypeError, np.nanpercentile, arr_c, 0.5) + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='F') + assert_raises(TypeError, np.nanpercentile, arr_c, 0.5) + + @pytest.mark.parametrize("weighted", [False, True]) + @pytest.mark.parametrize("use_out", [False, True]) + def test_result_values(self, weighted, use_out): + if weighted: + percentile = partial(np.percentile, method="inverted_cdf") + nanpercentile = partial(np.nanpercentile, method="inverted_cdf") + + def gen_weights(d): + return np.ones_like(d) + + else: + percentile = np.percentile + nanpercentile = np.nanpercentile + + def gen_weights(d): + return None + + tgt = [percentile(d, 28, weights=gen_weights(d)) for d in _rdat] + out = np.empty_like(tgt) if use_out else None + res = nanpercentile(_ndat, 28, axis=1, + weights=gen_weights(_ndat), out=out) + assert_almost_equal(res, tgt) + # Transpose the array to fit the output convention of numpy.percentile + tgt = np.transpose([percentile(d, (28, 98), weights=gen_weights(d)) + for d in _rdat]) + out = np.empty_like(tgt) if use_out else None + res = nanpercentile(_ndat, (28, 98), axis=1, + weights=gen_weights(_ndat), out=out) + assert_almost_equal(res, tgt) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["Float"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip("`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + with pytest.warns(RuntimeWarning, match="All-NaN slice encountered"): + out = np.nanpercentile(array, 60, axis=axis) + assert np.isnan(out).all() + assert out.dtype == array.dtype + + def test_empty(self): + mat = np.zeros((0, 3)) + for axis in [0, None]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_(np.isnan(np.nanpercentile(mat, 40, axis=axis)).all()) + assert_(len(w) == 1) + assert_(issubclass(w[0].category, RuntimeWarning)) + for axis in [1]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + assert_equal(np.nanpercentile(mat, 40, axis=axis), np.zeros([])) + assert_(len(w) == 0) + + def test_scalar(self): + assert_equal(np.nanpercentile(0., 100), 0.) + a = np.arange(6) + r = np.nanpercentile(a, 50, axis=0) + assert_equal(r, 2.5) + assert_(np.isscalar(r)) + + def test_extended_axis_invalid(self): + d = np.ones((3, 5, 7, 11)) + assert_raises(AxisError, np.nanpercentile, d, q=5, axis=-5) + assert_raises(AxisError, np.nanpercentile, d, q=5, axis=(0, -5)) + assert_raises(AxisError, np.nanpercentile, d, q=5, axis=4) + assert_raises(AxisError, np.nanpercentile, d, q=5, axis=(0, 4)) + assert_raises(ValueError, np.nanpercentile, d, q=5, axis=(1, 1)) + + def test_multiple_percentiles(self): + perc = [50, 100] + mat = np.ones((4, 3)) + nan_mat = np.nan * mat + # For checking consistency in higher dimensional case + large_mat = np.ones((3, 4, 5)) + large_mat[:, 0:2:4, :] = 0 + large_mat[:, :, 3:] *= 2 + for axis in [None, 0, 1]: + for keepdim in [False, True]: + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "All-NaN slice encountered") + val = np.percentile(mat, perc, axis=axis, keepdims=keepdim) + nan_val = np.nanpercentile(nan_mat, perc, axis=axis, + keepdims=keepdim) + assert_equal(nan_val.shape, val.shape) + + val = np.percentile(large_mat, perc, axis=axis, + keepdims=keepdim) + nan_val = np.nanpercentile(large_mat, perc, axis=axis, + keepdims=keepdim) + assert_equal(nan_val, val) + + megamat = np.ones((3, 4, 5, 6)) + assert_equal( + np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6) + ) + + @pytest.mark.parametrize("nan_weight", [0, 1, 2, 3, 1e200]) + def test_nan_value_with_weight(self, nan_weight): + x = [1, np.nan, 2, 3] + result = np.float64(2.0) + q_unweighted = np.nanpercentile(x, 50, method="inverted_cdf") + assert_equal(q_unweighted, result) + + # The weight value at the nan position should not matter. + w = [1.0, nan_weight, 1.0, 1.0] + q_weighted = np.nanpercentile(x, 50, weights=w, method="inverted_cdf") + assert_equal(q_weighted, result) + + @pytest.mark.parametrize("axis", [0, 1, 2]) + def test_nan_value_with_weight_ndim(self, axis): + # Create a multi-dimensional array to test + np.random.seed(1) + x_no_nan = np.random.random(size=(100, 99, 2)) + # Set some places to NaN (not particularly smart) so there is always + # some non-Nan. + x = x_no_nan.copy() + x[np.arange(99), np.arange(99), 0] = np.nan + + p = np.array([[20., 50., 30], [70, 33, 80]]) + + # We just use ones as weights, but replace it with 0 or 1e200 at the + # NaN positions below. + weights = np.ones_like(x) + + # For comparison use weighted normal percentile with nan weights at + # 0 (and no NaNs); not sure this is strictly identical but should be + # sufficiently so (if a percentile lies exactly on a 0 value). + weights[np.isnan(x)] = 0 + p_expected = np.percentile( + x_no_nan, p, axis=axis, weights=weights, method="inverted_cdf") + + p_unweighted = np.nanpercentile( + x, p, axis=axis, method="inverted_cdf") + # The normal and unweighted versions should be identical: + assert_equal(p_unweighted, p_expected) + + weights[np.isnan(x)] = 1e200 # huge value, shouldn't matter + p_weighted = np.nanpercentile( + x, p, axis=axis, weights=weights, method="inverted_cdf") + assert_equal(p_weighted, p_expected) + # Also check with out passed: + out = np.empty_like(p_weighted) + res = np.nanpercentile( + x, p, axis=axis, weights=weights, out=out, method="inverted_cdf") + + assert res is out + assert_equal(out, p_expected) + + +class TestNanFunctions_Quantile: + # most of this is already tested by TestPercentile + + @pytest.mark.parametrize("weighted", [False, True]) + def test_regression(self, weighted): + ar = np.arange(24).reshape(2, 3, 4).astype(float) + ar[0][1] = np.nan + if weighted: + w_args = {"weights": np.ones_like(ar), "method": "inverted_cdf"} + else: + w_args = {} + + assert_equal(np.nanquantile(ar, q=0.5, **w_args), + np.nanpercentile(ar, q=50, **w_args)) + assert_equal(np.nanquantile(ar, q=0.5, axis=0, **w_args), + np.nanpercentile(ar, q=50, axis=0, **w_args)) + assert_equal(np.nanquantile(ar, q=0.5, axis=1, **w_args), + np.nanpercentile(ar, q=50, axis=1, **w_args)) + assert_equal(np.nanquantile(ar, q=[0.5], axis=1, **w_args), + np.nanpercentile(ar, q=[50], axis=1, **w_args)) + assert_equal(np.nanquantile(ar, q=[0.25, 0.5, 0.75], axis=1, **w_args), + np.nanpercentile(ar, q=[25, 50, 75], axis=1, **w_args)) + + def test_basic(self): + x = np.arange(8) * 0.5 + assert_equal(np.nanquantile(x, 0), 0.) + assert_equal(np.nanquantile(x, 1), 3.5) + assert_equal(np.nanquantile(x, 0.5), 1.75) + + def test_complex(self): + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='G') + assert_raises(TypeError, np.nanquantile, arr_c, 0.5) + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='D') + assert_raises(TypeError, np.nanquantile, arr_c, 0.5) + arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='F') + assert_raises(TypeError, np.nanquantile, arr_c, 0.5) + + def test_no_p_overwrite(self): + # this is worth retesting, because quantile does not make a copy + p0 = np.array([0, 0.75, 0.25, 0.5, 1.0]) + p = p0.copy() + np.nanquantile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, p0) + + p0 = p0.tolist() + p = p.tolist() + np.nanquantile(np.arange(100.), p, method="midpoint") + assert_array_equal(p, p0) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize("dtype", np.typecodes["Float"]) + @pytest.mark.parametrize("array", [ + np.array(np.nan), + np.full((3, 3), np.nan), + ], ids=["0d", "2d"]) + def test_allnans(self, axis, dtype, array): + if axis is not None and array.ndim == 0: + pytest.skip("`axis != None` not supported for 0d arrays") + + array = array.astype(dtype) + with pytest.warns(RuntimeWarning, match="All-NaN slice encountered"): + out = np.nanquantile(array, 1, axis=axis) + assert np.isnan(out).all() + assert out.dtype == array.dtype + +@pytest.mark.parametrize("arr, expected", [ + # array of floats with some nans + (np.array([np.nan, 5.0, np.nan, np.inf]), + np.array([False, True, False, True])), + # int64 array that can't possibly have nans + (np.array([1, 5, 7, 9], dtype=np.int64), + True), + # bool array that can't possibly have nans + (np.array([False, True, False, True]), + True), + # 2-D complex array with nans + (np.array([[np.nan, 5.0], + [np.nan, np.inf]], dtype=np.complex64), + np.array([[False, True], + [False, True]])), + ]) +def test__nan_mask(arr, expected): + for out in [None, np.empty(arr.shape, dtype=np.bool)]: + actual = _nan_mask(arr, out=out) + assert_equal(actual, expected) + # the above won't distinguish between True proper + # and an array of True values; we want True proper + # for types that can't possibly contain NaN + if type(expected) is not np.ndarray: + assert actual is True + + +def test__replace_nan(): + """ Test that _replace_nan returns the original array if there are no + NaNs, not a copy. + """ + for dtype in [np.bool, np.int32, np.int64]: + arr = np.array([0, 1], dtype=dtype) + result, mask = _replace_nan(arr, 0) + assert mask is None + # do not make a copy if there are no nans + assert result is arr + + for dtype in [np.float32, np.float64]: + arr = np.array([0, 1], dtype=dtype) + result, mask = _replace_nan(arr, 2) + assert (mask == False).all() + # mask is not None, so we make a copy + assert result is not arr + assert_equal(result, arr) + + arr_nan = np.array([0, 1, np.nan], dtype=dtype) + result_nan, mask_nan = _replace_nan(arr_nan, 2) + assert_equal(mask_nan, np.array([False, False, True])) + assert result_nan is not arr_nan + assert_equal(result_nan, np.array([0, 1, 2])) + assert np.isnan(arr_nan[-1]) + + +def test_memmap_takes_fast_route(tmpdir): + # We want memory mapped arrays to take the fast route through nanmax, + # which avoids creating a mask by using fmax.reduce (see gh-28721). So we + # check that on bad input, the error is from fmax (rather than maximum). + a = np.arange(10., dtype=float) + with open(tmpdir.join("data.bin"), "w+b") as fh: + fh.write(a.tobytes()) + mm = np.memmap(fh, dtype=a.dtype, shape=a.shape) + with pytest.raises(ValueError, match="reduction operation fmax"): + np.nanmax(mm, out=np.zeros(2)) + # For completeness, same for nanmin. + with pytest.raises(ValueError, match="reduction operation fmin"): + np.nanmin(mm, out=np.zeros(2)) diff --git a/python/numpy/lib/tests/test_packbits.py b/python/numpy/lib/tests/test_packbits.py new file mode 100644 index 000000000..0b0e9d185 --- /dev/null +++ b/python/numpy/lib/tests/test_packbits.py @@ -0,0 +1,376 @@ +from itertools import chain + +import pytest + +import numpy as np +from numpy.testing import assert_array_equal, assert_equal, assert_raises + + +def test_packbits(): + # Copied from the docstring. + a = [[[1, 0, 1], [0, 1, 0]], + [[1, 1, 0], [0, 0, 1]]] + for dt in '?bBhHiIlLqQ': + arr = np.array(a, dtype=dt) + b = np.packbits(arr, axis=-1) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, np.array([[[160], [64]], [[192], [32]]])) + + assert_raises(TypeError, np.packbits, np.array(a, dtype=float)) + + +def test_packbits_empty(): + shapes = [ + (0,), (10, 20, 0), (10, 0, 20), (0, 10, 20), (20, 0, 0), (0, 20, 0), + (0, 0, 20), (0, 0, 0), + ] + for dt in '?bBhHiIlLqQ': + for shape in shapes: + a = np.empty(shape, dtype=dt) + b = np.packbits(a) + assert_equal(b.dtype, np.uint8) + assert_equal(b.shape, (0,)) + + +def test_packbits_empty_with_axis(): + # Original shapes and lists of packed shapes for different axes. + shapes = [ + ((0,), [(0,)]), + ((10, 20, 0), [(2, 20, 0), (10, 3, 0), (10, 20, 0)]), + ((10, 0, 20), [(2, 0, 20), (10, 0, 20), (10, 0, 3)]), + ((0, 10, 20), [(0, 10, 20), (0, 2, 20), (0, 10, 3)]), + ((20, 0, 0), [(3, 0, 0), (20, 0, 0), (20, 0, 0)]), + ((0, 20, 0), [(0, 20, 0), (0, 3, 0), (0, 20, 0)]), + ((0, 0, 20), [(0, 0, 20), (0, 0, 20), (0, 0, 3)]), + ((0, 0, 0), [(0, 0, 0), (0, 0, 0), (0, 0, 0)]), + ] + for dt in '?bBhHiIlLqQ': + for in_shape, out_shapes in shapes: + for ax, out_shape in enumerate(out_shapes): + a = np.empty(in_shape, dtype=dt) + b = np.packbits(a, axis=ax) + assert_equal(b.dtype, np.uint8) + assert_equal(b.shape, out_shape) + +@pytest.mark.parametrize('bitorder', ('little', 'big')) +def test_packbits_large(bitorder): + # test data large enough for 16 byte vectorization + a = np.array([1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, + 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, + 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, + 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, + 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, + 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, + 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, + 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, + 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, + 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, + 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, + 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, + 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, + 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, + 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0]) + a = a.repeat(3) + for dtype in '?bBhHiIlLqQ': + arr = np.array(a, dtype=dtype) + b = np.packbits(arr, axis=None, bitorder=bitorder) + assert_equal(b.dtype, np.uint8) + r = [252, 127, 192, 3, 254, 7, 252, 0, 7, 31, 240, 0, 28, 1, 255, 252, + 113, 248, 3, 255, 192, 28, 15, 192, 28, 126, 0, 224, 127, 255, + 227, 142, 7, 31, 142, 63, 28, 126, 56, 227, 240, 0, 227, 128, 63, + 224, 14, 56, 252, 112, 56, 255, 241, 248, 3, 240, 56, 224, 112, + 63, 255, 255, 199, 224, 14, 0, 31, 143, 192, 3, 255, 199, 0, 1, + 255, 224, 1, 255, 252, 126, 63, 0, 1, 192, 252, 14, 63, 0, 15, + 199, 252, 113, 255, 3, 128, 56, 252, 14, 7, 0, 113, 255, 255, 142, 56, 227, + 129, 248, 227, 129, 199, 31, 128] + if bitorder == 'big': + assert_array_equal(b, r) + # equal for size being multiple of 8 + assert_array_equal(np.unpackbits(b, bitorder=bitorder)[:-4], a) + + # check last byte of different remainders (16 byte vectorization) + b = [np.packbits(arr[:-i], axis=None)[-1] for i in range(1, 16)] + assert_array_equal(b, [128, 128, 128, 31, 30, 28, 24, 16, 0, 0, 0, 199, + 198, 196, 192]) + + arr = arr.reshape(36, 25) + b = np.packbits(arr, axis=0) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, [[190, 186, 178, 178, 150, 215, 87, 83, 83, 195, + 199, 206, 204, 204, 140, 140, 136, 136, 8, 40, 105, + 107, 75, 74, 88], + [72, 216, 248, 241, 227, 195, 202, 90, 90, 83, + 83, 119, 127, 109, 73, 64, 208, 244, 189, 45, + 41, 104, 122, 90, 18], + [113, 120, 248, 216, 152, 24, 60, 52, 182, 150, + 150, 150, 146, 210, 210, 246, 255, 255, 223, + 151, 21, 17, 17, 131, 163], + [214, 210, 210, 64, 68, 5, 5, 1, 72, 88, 92, + 92, 78, 110, 39, 181, 149, 220, 222, 218, 218, + 202, 234, 170, 168], + [0, 128, 128, 192, 80, 112, 48, 160, 160, 224, + 240, 208, 144, 128, 160, 224, 240, 208, 144, + 144, 176, 240, 224, 192, 128]]) + + b = np.packbits(arr, axis=1) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, [[252, 127, 192, 0], + [ 7, 252, 15, 128], + [240, 0, 28, 0], + [255, 128, 0, 128], + [192, 31, 255, 128], + [142, 63, 0, 0], + [255, 240, 7, 0], + [ 7, 224, 14, 0], + [126, 0, 224, 0], + [255, 255, 199, 0], + [ 56, 28, 126, 0], + [113, 248, 227, 128], + [227, 142, 63, 0], + [ 0, 28, 112, 0], + [ 15, 248, 3, 128], + [ 28, 126, 56, 0], + [ 56, 255, 241, 128], + [240, 7, 224, 0], + [227, 129, 192, 128], + [255, 255, 254, 0], + [126, 0, 224, 0], + [ 3, 241, 248, 0], + [ 0, 255, 241, 128], + [128, 0, 255, 128], + [224, 1, 255, 128], + [248, 252, 126, 0], + [ 0, 7, 3, 128], + [224, 113, 248, 0], + [ 0, 252, 127, 128], + [142, 63, 224, 0], + [224, 14, 63, 0], + [ 7, 3, 128, 0], + [113, 255, 255, 128], + [ 28, 113, 199, 0], + [ 7, 227, 142, 0], + [ 14, 56, 252, 0]]) + + arr = arr.T.copy() + b = np.packbits(arr, axis=0) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, [[252, 7, 240, 255, 192, 142, 255, 7, 126, 255, + 56, 113, 227, 0, 15, 28, 56, 240, 227, 255, + 126, 3, 0, 128, 224, 248, 0, 224, 0, 142, 224, + 7, 113, 28, 7, 14], + [127, 252, 0, 128, 31, 63, 240, 224, 0, 255, + 28, 248, 142, 28, 248, 126, 255, 7, 129, 255, + 0, 241, 255, 0, 1, 252, 7, 113, 252, 63, 14, + 3, 255, 113, 227, 56], + [192, 15, 28, 0, 255, 0, 7, 14, 224, 199, 126, + 227, 63, 112, 3, 56, 241, 224, 192, 254, 224, + 248, 241, 255, 255, 126, 3, 248, 127, 224, 63, + 128, 255, 199, 142, 252], + [0, 128, 0, 128, 128, 0, 0, 0, 0, 0, 0, 128, 0, + 0, 128, 0, 128, 0, 128, 0, 0, 0, 128, 128, + 128, 0, 128, 0, 128, 0, 0, 0, 128, 0, 0, 0]]) + + b = np.packbits(arr, axis=1) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, [[190, 72, 113, 214, 0], + [186, 216, 120, 210, 128], + [178, 248, 248, 210, 128], + [178, 241, 216, 64, 192], + [150, 227, 152, 68, 80], + [215, 195, 24, 5, 112], + [ 87, 202, 60, 5, 48], + [ 83, 90, 52, 1, 160], + [ 83, 90, 182, 72, 160], + [195, 83, 150, 88, 224], + [199, 83, 150, 92, 240], + [206, 119, 150, 92, 208], + [204, 127, 146, 78, 144], + [204, 109, 210, 110, 128], + [140, 73, 210, 39, 160], + [140, 64, 246, 181, 224], + [136, 208, 255, 149, 240], + [136, 244, 255, 220, 208], + [ 8, 189, 223, 222, 144], + [ 40, 45, 151, 218, 144], + [105, 41, 21, 218, 176], + [107, 104, 17, 202, 240], + [ 75, 122, 17, 234, 224], + [ 74, 90, 131, 170, 192], + [ 88, 18, 163, 168, 128]]) + + # result is the same if input is multiplied with a nonzero value + for dtype in 'bBhHiIlLqQ': + arr = np.array(a, dtype=dtype) + rnd = np.random.randint(low=np.iinfo(dtype).min, + high=np.iinfo(dtype).max, size=arr.size, + dtype=dtype) + rnd[rnd == 0] = 1 + arr *= rnd.astype(dtype) + b = np.packbits(arr, axis=-1) + assert_array_equal(np.unpackbits(b)[:-4], a) + + assert_raises(TypeError, np.packbits, np.array(a, dtype=float)) + + +def test_packbits_very_large(): + # test some with a larger arrays gh-8637 + # code is covered earlier but larger array makes crash on bug more likely + for s in range(950, 1050): + for dt in '?bBhHiIlLqQ': + x = np.ones((200, s), dtype=bool) + np.packbits(x, axis=1) + + +def test_unpackbits(): + # Copied from the docstring. + a = np.array([[2], [7], [23]], dtype=np.uint8) + b = np.unpackbits(a, axis=1) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, np.array([[0, 0, 0, 0, 0, 0, 1, 0], + [0, 0, 0, 0, 0, 1, 1, 1], + [0, 0, 0, 1, 0, 1, 1, 1]])) + +def test_pack_unpack_order(): + a = np.array([[2], [7], [23]], dtype=np.uint8) + b = np.unpackbits(a, axis=1) + assert_equal(b.dtype, np.uint8) + b_little = np.unpackbits(a, axis=1, bitorder='little') + b_big = np.unpackbits(a, axis=1, bitorder='big') + assert_array_equal(b, b_big) + assert_array_equal(a, np.packbits(b_little, axis=1, bitorder='little')) + assert_array_equal(b[:, ::-1], b_little) + assert_array_equal(a, np.packbits(b_big, axis=1, bitorder='big')) + assert_raises(ValueError, np.unpackbits, a, bitorder='r') + assert_raises(TypeError, np.unpackbits, a, bitorder=10) + + +def test_unpackbits_empty(): + a = np.empty((0,), dtype=np.uint8) + b = np.unpackbits(a) + assert_equal(b.dtype, np.uint8) + assert_array_equal(b, np.empty((0,))) + + +def test_unpackbits_empty_with_axis(): + # Lists of packed shapes for different axes and unpacked shapes. + shapes = [ + ([(0,)], (0,)), + ([(2, 24, 0), (16, 3, 0), (16, 24, 0)], (16, 24, 0)), + ([(2, 0, 24), (16, 0, 24), (16, 0, 3)], (16, 0, 24)), + ([(0, 16, 24), (0, 2, 24), (0, 16, 3)], (0, 16, 24)), + ([(3, 0, 0), (24, 0, 0), (24, 0, 0)], (24, 0, 0)), + ([(0, 24, 0), (0, 3, 0), (0, 24, 0)], (0, 24, 0)), + ([(0, 0, 24), (0, 0, 24), (0, 0, 3)], (0, 0, 24)), + ([(0, 0, 0), (0, 0, 0), (0, 0, 0)], (0, 0, 0)), + ] + for in_shapes, out_shape in shapes: + for ax, in_shape in enumerate(in_shapes): + a = np.empty(in_shape, dtype=np.uint8) + b = np.unpackbits(a, axis=ax) + assert_equal(b.dtype, np.uint8) + assert_equal(b.shape, out_shape) + + +def test_unpackbits_large(): + # test all possible numbers via comparison to already tested packbits + d = np.arange(277, dtype=np.uint8) + assert_array_equal(np.packbits(np.unpackbits(d)), d) + assert_array_equal(np.packbits(np.unpackbits(d[::2])), d[::2]) + d = np.tile(d, (3, 1)) + assert_array_equal(np.packbits(np.unpackbits(d, axis=1), axis=1), d) + d = d.T.copy() + assert_array_equal(np.packbits(np.unpackbits(d, axis=0), axis=0), d) + + +class TestCount: + x = np.array([ + [1, 0, 1, 0, 0, 1, 0], + [0, 1, 1, 1, 0, 0, 0], + [0, 0, 1, 0, 0, 1, 1], + [1, 1, 0, 0, 0, 1, 1], + [1, 0, 1, 0, 1, 0, 1], + [0, 0, 1, 1, 1, 0, 0], + [0, 1, 0, 1, 0, 1, 0], + ], dtype=np.uint8) + padded1 = np.zeros(57, dtype=np.uint8) + padded1[:49] = x.ravel() + padded1b = np.zeros(57, dtype=np.uint8) + padded1b[:49] = x[::-1].copy().ravel() + padded2 = np.zeros((9, 9), dtype=np.uint8) + padded2[:7, :7] = x + + @pytest.mark.parametrize('bitorder', ('little', 'big')) + @pytest.mark.parametrize('count', chain(range(58), range(-1, -57, -1))) + def test_roundtrip(self, bitorder, count): + if count < 0: + # one extra zero of padding + cutoff = count - 1 + else: + cutoff = count + # test complete invertibility of packbits and unpackbits with count + packed = np.packbits(self.x, bitorder=bitorder) + unpacked = np.unpackbits(packed, count=count, bitorder=bitorder) + assert_equal(unpacked.dtype, np.uint8) + assert_array_equal(unpacked, self.padded1[:cutoff]) + + @pytest.mark.parametrize('kwargs', [ + {}, {'count': None}, + ]) + def test_count(self, kwargs): + packed = np.packbits(self.x) + unpacked = np.unpackbits(packed, **kwargs) + assert_equal(unpacked.dtype, np.uint8) + assert_array_equal(unpacked, self.padded1[:-1]) + + @pytest.mark.parametrize('bitorder', ('little', 'big')) + # delta==-1 when count<0 because one extra zero of padding + @pytest.mark.parametrize('count', chain(range(8), range(-1, -9, -1))) + def test_roundtrip_axis(self, bitorder, count): + if count < 0: + # one extra zero of padding + cutoff = count - 1 + else: + cutoff = count + packed0 = np.packbits(self.x, axis=0, bitorder=bitorder) + unpacked0 = np.unpackbits(packed0, axis=0, count=count, + bitorder=bitorder) + assert_equal(unpacked0.dtype, np.uint8) + assert_array_equal(unpacked0, self.padded2[:cutoff, :self.x.shape[1]]) + + packed1 = np.packbits(self.x, axis=1, bitorder=bitorder) + unpacked1 = np.unpackbits(packed1, axis=1, count=count, + bitorder=bitorder) + assert_equal(unpacked1.dtype, np.uint8) + assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :cutoff]) + + @pytest.mark.parametrize('kwargs', [ + {}, {'count': None}, + {'bitorder': 'little'}, + {'bitorder': 'little', 'count': None}, + {'bitorder': 'big'}, + {'bitorder': 'big', 'count': None}, + ]) + def test_axis_count(self, kwargs): + packed0 = np.packbits(self.x, axis=0) + unpacked0 = np.unpackbits(packed0, axis=0, **kwargs) + assert_equal(unpacked0.dtype, np.uint8) + if kwargs.get('bitorder', 'big') == 'big': + assert_array_equal(unpacked0, self.padded2[:-1, :self.x.shape[1]]) + else: + assert_array_equal(unpacked0[::-1, :], self.padded2[:-1, :self.x.shape[1]]) + + packed1 = np.packbits(self.x, axis=1) + unpacked1 = np.unpackbits(packed1, axis=1, **kwargs) + assert_equal(unpacked1.dtype, np.uint8) + if kwargs.get('bitorder', 'big') == 'big': + assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :-1]) + else: + assert_array_equal(unpacked1[:, ::-1], self.padded2[:self.x.shape[0], :-1]) + + def test_bad_count(self): + packed0 = np.packbits(self.x, axis=0) + assert_raises(ValueError, np.unpackbits, packed0, axis=0, count=-9) + packed1 = np.packbits(self.x, axis=1) + assert_raises(ValueError, np.unpackbits, packed1, axis=1, count=-9) + packed = np.packbits(self.x) + assert_raises(ValueError, np.unpackbits, packed, count=-57) diff --git a/python/numpy/lib/tests/test_polynomial.py b/python/numpy/lib/tests/test_polynomial.py new file mode 100644 index 000000000..c173ac321 --- /dev/null +++ b/python/numpy/lib/tests/test_polynomial.py @@ -0,0 +1,320 @@ +import pytest + +import numpy as np +import numpy.polynomial.polynomial as poly +from numpy.testing import ( + assert_, + assert_allclose, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, +) + +# `poly1d` has some support for `np.bool` and `np.timedelta64`, +# but it is limited and they are therefore excluded here +TYPE_CODES = np.typecodes["AllInteger"] + np.typecodes["AllFloat"] + "O" + + +class TestPolynomial: + def test_poly1d_str_and_repr(self): + p = np.poly1d([1., 2, 3]) + assert_equal(repr(p), 'poly1d([1., 2., 3.])') + assert_equal(str(p), + ' 2\n' + '1 x + 2 x + 3') + + q = np.poly1d([3., 2, 1]) + assert_equal(repr(q), 'poly1d([3., 2., 1.])') + assert_equal(str(q), + ' 2\n' + '3 x + 2 x + 1') + + r = np.poly1d([1.89999 + 2j, -3j, -5.12345678, 2 + 1j]) + assert_equal(str(r), + ' 3 2\n' + '(1.9 + 2j) x - 3j x - 5.123 x + (2 + 1j)') + + assert_equal(str(np.poly1d([-3, -2, -1])), + ' 2\n' + '-3 x - 2 x - 1') + + def test_poly1d_resolution(self): + p = np.poly1d([1., 2, 3]) + q = np.poly1d([3., 2, 1]) + assert_equal(p(0), 3.0) + assert_equal(p(5), 38.0) + assert_equal(q(0), 1.0) + assert_equal(q(5), 86.0) + + def test_poly1d_math(self): + # here we use some simple coeffs to make calculations easier + p = np.poly1d([1., 2, 4]) + q = np.poly1d([4., 2, 1]) + assert_equal(p / q, (np.poly1d([0.25]), np.poly1d([1.5, 3.75]))) + assert_equal(p.integ(), np.poly1d([1 / 3, 1., 4., 0.])) + assert_equal(p.integ(1), np.poly1d([1 / 3, 1., 4., 0.])) + + p = np.poly1d([1., 2, 3]) + q = np.poly1d([3., 2, 1]) + assert_equal(p * q, np.poly1d([3., 8., 14., 8., 3.])) + assert_equal(p + q, np.poly1d([4., 4., 4.])) + assert_equal(p - q, np.poly1d([-2., 0., 2.])) + assert_equal(p ** 4, np.poly1d([1., 8., 36., 104., 214., 312., 324., 216., 81.])) + assert_equal(p(q), np.poly1d([9., 12., 16., 8., 6.])) + assert_equal(q(p), np.poly1d([3., 12., 32., 40., 34.])) + assert_equal(p.deriv(), np.poly1d([2., 2.])) + assert_equal(p.deriv(2), np.poly1d([2.])) + assert_equal(np.polydiv(np.poly1d([1, 0, -1]), np.poly1d([1, 1])), + (np.poly1d([1., -1.]), np.poly1d([0.]))) + + @pytest.mark.parametrize("type_code", TYPE_CODES) + def test_poly1d_misc(self, type_code: str) -> None: + dtype = np.dtype(type_code) + ar = np.array([1, 2, 3], dtype=dtype) + p = np.poly1d(ar) + + # `__eq__` + assert_equal(np.asarray(p), ar) + assert_equal(np.asarray(p).dtype, dtype) + assert_equal(len(p), 2) + + # `__getitem__` + comparison_dct = {-1: 0, 0: 3, 1: 2, 2: 1, 3: 0} + for index, ref in comparison_dct.items(): + scalar = p[index] + assert_equal(scalar, ref) + if dtype == np.object_: + assert isinstance(scalar, int) + else: + assert_equal(scalar.dtype, dtype) + + def test_poly1d_variable_arg(self): + q = np.poly1d([1., 2, 3], variable='y') + assert_equal(str(q), + ' 2\n' + '1 y + 2 y + 3') + q = np.poly1d([1., 2, 3], variable='lambda') + assert_equal(str(q), + ' 2\n' + '1 lambda + 2 lambda + 3') + + def test_poly(self): + assert_array_almost_equal(np.poly([3, -np.sqrt(2), np.sqrt(2)]), + [1, -3, -2, 6]) + + # From matlab docs + A = [[1, 2, 3], [4, 5, 6], [7, 8, 0]] + assert_array_almost_equal(np.poly(A), [1, -6, -72, -27]) + + # Should produce real output for perfect conjugates + assert_(np.isrealobj(np.poly([+1.082j, +2.613j, -2.613j, -1.082j]))) + assert_(np.isrealobj(np.poly([0 + 1j, -0 + -1j, 1 + 2j, + 1 - 2j, 1. + 3.5j, 1 - 3.5j]))) + assert_(np.isrealobj(np.poly([1j, -1j, 1 + 2j, 1 - 2j, 1 + 3j, 1 - 3.j]))) + assert_(np.isrealobj(np.poly([1j, -1j, 1 + 2j, 1 - 2j]))) + assert_(np.isrealobj(np.poly([1j, -1j, 2j, -2j]))) + assert_(np.isrealobj(np.poly([1j, -1j]))) + assert_(np.isrealobj(np.poly([1, -1]))) + + assert_(np.iscomplexobj(np.poly([1j, -1.0000001j]))) + + np.random.seed(42) + a = np.random.randn(100) + 1j * np.random.randn(100) + assert_(np.isrealobj(np.poly(np.concatenate((a, np.conjugate(a)))))) + + def test_roots(self): + assert_array_equal(np.roots([1, 0, 0]), [0, 0]) + + # Testing for larger root values + for i in np.logspace(10, 25, num=1000, base=10): + tgt = np.array([-1, 1, i]) + res = np.sort(np.roots(poly.polyfromroots(tgt)[::-1])) + assert_almost_equal(res, tgt, 14 - int(np.log10(i))) # Adapting the expected precision according to the root value, to take into account numerical calculation error + + for i in np.logspace(10, 25, num=1000, base=10): + tgt = np.array([-1, 1.01, i]) + res = np.sort(np.roots(poly.polyfromroots(tgt)[::-1])) + assert_almost_equal(res, tgt, 14 - int(np.log10(i))) # Adapting the expected precision according to the root value, to take into account numerical calculation error + + def test_str_leading_zeros(self): + p = np.poly1d([4, 3, 2, 1]) + p[3] = 0 + assert_equal(str(p), + " 2\n" + "3 x + 2 x + 1") + + p = np.poly1d([1, 2]) + p[0] = 0 + p[1] = 0 + assert_equal(str(p), " \n0") + + def test_polyfit(self): + c = np.array([3., 2., 1.]) + x = np.linspace(0, 2, 7) + y = np.polyval(c, x) + err = [1, -1, 1, -1, 1, -1, 1] + weights = np.arange(8, 1, -1)**2 / 7.0 + + # Check exception when too few points for variance estimate. Note that + # the estimate requires the number of data points to exceed + # degree + 1 + assert_raises(ValueError, np.polyfit, + [1], [1], deg=0, cov=True) + + # check 1D case + m, cov = np.polyfit(x, y + err, 2, cov=True) + est = [3.8571, 0.2857, 1.619] + assert_almost_equal(est, m, decimal=4) + val0 = [[ 1.4694, -2.9388, 0.8163], + [-2.9388, 6.3673, -2.1224], + [ 0.8163, -2.1224, 1.161 ]] # noqa: E202 + assert_almost_equal(val0, cov, decimal=4) + + m2, cov2 = np.polyfit(x, y + err, 2, w=weights, cov=True) + assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4) + val = [[ 4.3964, -5.0052, 0.4878], + [-5.0052, 6.8067, -0.9089], + [ 0.4878, -0.9089, 0.3337]] + assert_almost_equal(val, cov2, decimal=4) + + m3, cov3 = np.polyfit(x, y + err, 2, w=weights, cov="unscaled") + assert_almost_equal([4.8927, -1.0177, 1.7768], m3, decimal=4) + val = [[ 0.1473, -0.1677, 0.0163], + [-0.1677, 0.228 , -0.0304], # noqa: E203 + [ 0.0163, -0.0304, 0.0112]] + assert_almost_equal(val, cov3, decimal=4) + + # check 2D (n,1) case + y = y[:, np.newaxis] + c = c[:, np.newaxis] + assert_almost_equal(c, np.polyfit(x, y, 2)) + # check 2D (n,2) case + yy = np.concatenate((y, y), axis=1) + cc = np.concatenate((c, c), axis=1) + assert_almost_equal(cc, np.polyfit(x, yy, 2)) + + m, cov = np.polyfit(x, yy + np.array(err)[:, np.newaxis], 2, cov=True) + assert_almost_equal(est, m[:, 0], decimal=4) + assert_almost_equal(est, m[:, 1], decimal=4) + assert_almost_equal(val0, cov[:, :, 0], decimal=4) + assert_almost_equal(val0, cov[:, :, 1], decimal=4) + + # check order 1 (deg=0) case, were the analytic results are simple + np.random.seed(123) + y = np.random.normal(size=(4, 10000)) + mean, cov = np.polyfit(np.zeros(y.shape[0]), y, deg=0, cov=True) + # Should get sigma_mean = sigma/sqrt(N) = 1./sqrt(4) = 0.5. + assert_allclose(mean.std(), 0.5, atol=0.01) + assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01) + # Without scaling, since reduced chi2 is 1, the result should be the same. + mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=np.ones(y.shape[0]), + deg=0, cov="unscaled") + assert_allclose(mean.std(), 0.5, atol=0.01) + assert_almost_equal(np.sqrt(cov.mean()), 0.5) + # If we estimate our errors wrong, no change with scaling: + w = np.full(y.shape[0], 1. / 0.5) + mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov=True) + assert_allclose(mean.std(), 0.5, atol=0.01) + assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01) + # But if we do not scale, our estimate for the error in the mean will + # differ. + mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov="unscaled") + assert_allclose(mean.std(), 0.5, atol=0.01) + assert_almost_equal(np.sqrt(cov.mean()), 0.25) + + def test_objects(self): + from decimal import Decimal + p = np.poly1d([Decimal('4.0'), Decimal('3.0'), Decimal('2.0')]) + p2 = p * Decimal('1.333333333333333') + assert_(p2[1] == Decimal("3.9999999999999990")) + p2 = p.deriv() + assert_(p2[1] == Decimal('8.0')) + p2 = p.integ() + assert_(p2[3] == Decimal("1.333333333333333333333333333")) + assert_(p2[2] == Decimal('1.5')) + assert_(np.issubdtype(p2.coeffs.dtype, np.object_)) + p = np.poly([Decimal(1), Decimal(2)]) + assert_equal(np.poly([Decimal(1), Decimal(2)]), + [1, Decimal(-3), Decimal(2)]) + + def test_complex(self): + p = np.poly1d([3j, 2j, 1j]) + p2 = p.integ() + assert_((p2.coeffs == [1j, 1j, 1j, 0]).all()) + p2 = p.deriv() + assert_((p2.coeffs == [6j, 2j]).all()) + + def test_integ_coeffs(self): + p = np.poly1d([3, 2, 1]) + p2 = p.integ(3, k=[9, 7, 6]) + assert_( + (p2.coeffs == [1 / 4. / 5., 1 / 3. / 4., 1 / 2. / 3., 9 / 1. / 2., 7, 6]).all()) + + def test_zero_dims(self): + try: + np.poly(np.zeros((0, 0))) + except ValueError: + pass + + def test_poly_int_overflow(self): + """ + Regression test for gh-5096. + """ + v = np.arange(1, 21) + assert_almost_equal(np.poly(v), np.poly(np.diag(v))) + + def test_zero_poly_dtype(self): + """ + Regression test for gh-16354. + """ + z = np.array([0, 0, 0]) + p = np.poly1d(z.astype(np.int64)) + assert_equal(p.coeffs.dtype, np.int64) + + p = np.poly1d(z.astype(np.float32)) + assert_equal(p.coeffs.dtype, np.float32) + + p = np.poly1d(z.astype(np.complex64)) + assert_equal(p.coeffs.dtype, np.complex64) + + def test_poly_eq(self): + p = np.poly1d([1, 2, 3]) + p2 = np.poly1d([1, 2, 4]) + assert_equal(p == None, False) # noqa: E711 + assert_equal(p != None, True) # noqa: E711 + assert_equal(p == p, True) + assert_equal(p == p2, False) + assert_equal(p != p2, True) + + def test_polydiv(self): + b = np.poly1d([2, 6, 6, 1]) + a = np.poly1d([-1j, (1 + 2j), -(2 + 1j), 1]) + q, r = np.polydiv(b, a) + assert_equal(q.coeffs.dtype, np.complex128) + assert_equal(r.coeffs.dtype, np.complex128) + assert_equal(q * a + r, b) + + c = [1, 2, 3] + d = np.poly1d([1, 2, 3]) + s, t = np.polydiv(c, d) + assert isinstance(s, np.poly1d) + assert isinstance(t, np.poly1d) + u, v = np.polydiv(d, c) + assert isinstance(u, np.poly1d) + assert isinstance(v, np.poly1d) + + def test_poly_coeffs_mutable(self): + """ Coefficients should be modifiable """ + p = np.poly1d([1, 2, 3]) + + p.coeffs += 1 + assert_equal(p.coeffs, [2, 3, 4]) + + p.coeffs[2] += 10 + assert_equal(p.coeffs, [2, 3, 14]) + + # this never used to be allowed - let's not add features to deprecated + # APIs + assert_raises(AttributeError, setattr, p, 'coeffs', np.array(1)) diff --git a/python/numpy/lib/tests/test_recfunctions.py b/python/numpy/lib/tests/test_recfunctions.py new file mode 100644 index 000000000..eee1f47f8 --- /dev/null +++ b/python/numpy/lib/tests/test_recfunctions.py @@ -0,0 +1,1052 @@ + +import numpy as np +import numpy.ma as ma +from numpy.lib.recfunctions import ( + append_fields, + apply_along_fields, + assign_fields_by_name, + drop_fields, + find_duplicates, + get_fieldstructure, + join_by, + merge_arrays, + recursive_fill_fields, + rename_fields, + repack_fields, + require_fields, + stack_arrays, + structured_to_unstructured, + unstructured_to_structured, +) +from numpy.ma.mrecords import MaskedRecords +from numpy.ma.testutils import assert_equal +from numpy.testing import assert_, assert_raises + +get_fieldspec = np.lib.recfunctions._get_fieldspec +get_names = np.lib.recfunctions.get_names +get_names_flat = np.lib.recfunctions.get_names_flat +zip_descr = np.lib.recfunctions._zip_descr +zip_dtype = np.lib.recfunctions._zip_dtype + + +class TestRecFunctions: + # Misc tests + + def setup_method(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array([('A', 1.), ('B', 2.)], + dtype=[('A', '|S3'), ('B', float)]) + w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + self.data = (w, x, y, z) + + def test_zip_descr(self): + # Test zip_descr + (w, x, y, z) = self.data + + # Std array + test = zip_descr((x, x), flatten=True) + assert_equal(test, + np.dtype([('', int), ('', int)])) + test = zip_descr((x, x), flatten=False) + assert_equal(test, + np.dtype([('', int), ('', int)])) + + # Std & flexible-dtype + test = zip_descr((x, z), flatten=True) + assert_equal(test, + np.dtype([('', int), ('A', '|S3'), ('B', float)])) + test = zip_descr((x, z), flatten=False) + assert_equal(test, + np.dtype([('', int), + ('', [('A', '|S3'), ('B', float)])])) + + # Standard & nested dtype + test = zip_descr((x, w), flatten=True) + assert_equal(test, + np.dtype([('', int), + ('a', int), + ('ba', float), ('bb', int)])) + test = zip_descr((x, w), flatten=False) + assert_equal(test, + np.dtype([('', int), + ('', [('a', int), + ('b', [('ba', float), ('bb', int)])])])) + + def test_drop_fields(self): + # Test drop_fields + a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + + # A basic field + test = drop_fields(a, 'a') + control = np.array([((2, 3.0),), ((5, 6.0),)], + dtype=[('b', [('ba', float), ('bb', int)])]) + assert_equal(test, control) + + # Another basic field (but nesting two fields) + test = drop_fields(a, 'b') + control = np.array([(1,), (4,)], dtype=[('a', int)]) + assert_equal(test, control) + + # A nested sub-field + test = drop_fields(a, ['ba', ]) + control = np.array([(1, (3.0,)), (4, (6.0,))], + dtype=[('a', int), ('b', [('bb', int)])]) + assert_equal(test, control) + + # All the nested sub-field from a field: zap that field + test = drop_fields(a, ['ba', 'bb']) + control = np.array([(1,), (4,)], dtype=[('a', int)]) + assert_equal(test, control) + + # dropping all fields results in an array with no fields + test = drop_fields(a, ['a', 'b']) + control = np.array([(), ()], dtype=[]) + assert_equal(test, control) + + def test_rename_fields(self): + # Test rename fields + a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], + dtype=[('a', int), + ('b', [('ba', float), ('bb', (float, 2))])]) + test = rename_fields(a, {'a': 'A', 'bb': 'BB'}) + newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])] + control = a.view(newdtype) + assert_equal(test.dtype, newdtype) + assert_equal(test, control) + + def test_get_names(self): + # Test get_names + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + test = get_names(ndtype) + assert_equal(test, ('A', 'B')) + + ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])]) + test = get_names(ndtype) + assert_equal(test, ('a', ('b', ('ba', 'bb')))) + + ndtype = np.dtype([('a', int), ('b', [])]) + test = get_names(ndtype) + assert_equal(test, ('a', ('b', ()))) + + ndtype = np.dtype([]) + test = get_names(ndtype) + assert_equal(test, ()) + + def test_get_names_flat(self): + # Test get_names_flat + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + test = get_names_flat(ndtype) + assert_equal(test, ('A', 'B')) + + ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])]) + test = get_names_flat(ndtype) + assert_equal(test, ('a', 'b', 'ba', 'bb')) + + ndtype = np.dtype([('a', int), ('b', [])]) + test = get_names_flat(ndtype) + assert_equal(test, ('a', 'b')) + + ndtype = np.dtype([]) + test = get_names_flat(ndtype) + assert_equal(test, ()) + + def test_get_fieldstructure(self): + # Test get_fieldstructure + + # No nested fields + ndtype = np.dtype([('A', '|S3'), ('B', float)]) + test = get_fieldstructure(ndtype) + assert_equal(test, {'A': [], 'B': []}) + + # One 1-nested field + ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])]) + test = get_fieldstructure(ndtype) + assert_equal(test, {'A': [], 'B': [], 'BA': ['B', ], 'BB': ['B']}) + + # One 2-nested fields + ndtype = np.dtype([('A', int), + ('B', [('BA', int), + ('BB', [('BBA', int), ('BBB', int)])])]) + test = get_fieldstructure(ndtype) + control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], + 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']} + assert_equal(test, control) + + # 0 fields + ndtype = np.dtype([]) + test = get_fieldstructure(ndtype) + assert_equal(test, {}) + + def test_find_duplicates(self): + # Test find_duplicates + a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')), + (1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))], + mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)), + (0, (0, 0)), (1, (0, 0)), (0, (1, 0))], + dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])]) + + test = find_duplicates(a, ignoremask=False, return_index=True) + control = [0, 2] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='A', return_index=True) + control = [0, 1, 2, 3, 5] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='B', return_index=True) + control = [0, 1, 2, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='BA', return_index=True) + control = [0, 1, 2, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, key='BB', return_index=True) + control = [0, 1, 2, 3, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + def test_find_duplicates_ignoremask(self): + # Test the ignoremask option of find_duplicates + ndtype = [('a', int)] + a = ma.array([1, 1, 1, 2, 2, 3, 3], + mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) + test = find_duplicates(a, ignoremask=True, return_index=True) + control = [0, 1, 3, 4] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + test = find_duplicates(a, ignoremask=False, return_index=True) + control = [0, 1, 2, 3, 4, 6] + assert_equal(sorted(test[-1]), control) + assert_equal(test[0], a[test[-1]]) + + def test_repack_fields(self): + dt = np.dtype('u1,f4,i8', align=True) + a = np.zeros(2, dtype=dt) + + assert_equal(repack_fields(dt), np.dtype('u1,f4,i8')) + assert_equal(repack_fields(a).itemsize, 13) + assert_equal(repack_fields(repack_fields(dt), align=True), dt) + + # make sure type is preserved + dt = np.dtype((np.record, dt)) + assert_(repack_fields(dt).type is np.record) + + def test_structured_to_unstructured(self, tmp_path): + a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) + out = structured_to_unstructured(a) + assert_equal(out, np.zeros((4, 5), dtype='f8')) + + b = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)], + dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) + out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1) + assert_equal(out, np.array([3., 5.5, 9., 11.])) + out = np.mean(structured_to_unstructured(b[['x']]), axis=-1) + assert_equal(out, np.array([1., 4. , 7., 10.])) # noqa: E203 + + c = np.arange(20).reshape((4, 5)) + out = unstructured_to_structured(c, a.dtype) + want = np.array([( 0, ( 1., 2), [ 3., 4.]), + ( 5, ( 6., 7), [ 8., 9.]), + (10, (11., 12), [13., 14.]), + (15, (16., 17), [18., 19.])], + dtype=[('a', 'i4'), + ('b', [('f0', 'f4'), ('f1', 'u2')]), + ('c', 'f4', (2,))]) + assert_equal(out, want) + + d = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)], + dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')]) + assert_equal(apply_along_fields(np.mean, d), + np.array([ 8.0 / 3, 16.0 / 3, 26.0 / 3, 11.])) + assert_equal(apply_along_fields(np.mean, d[['x', 'z']]), + np.array([ 3., 5.5, 9., 11.])) + + # check that for uniform field dtypes we get a view, not a copy: + d = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)], + dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')]) + dd = structured_to_unstructured(d) + ddd = unstructured_to_structured(dd, d.dtype) + assert_(np.shares_memory(dd, d)) + assert_(np.shares_memory(ddd, d)) + + # check that reversing the order of attributes works + dd_attrib_rev = structured_to_unstructured(d[['z', 'x']]) + assert_equal(dd_attrib_rev, [[5, 1], [7, 4], [11, 7], [12, 10]]) + assert_(np.shares_memory(dd_attrib_rev, d)) + + # including uniform fields with subarrays unpacked + d = np.array([(1, [2, 3], [[ 4, 5], [ 6, 7]]), + (8, [9, 10], [[11, 12], [13, 14]])], + dtype=[('x0', 'i4'), ('x1', ('i4', 2)), + ('x2', ('i4', (2, 2)))]) + dd = structured_to_unstructured(d) + ddd = unstructured_to_structured(dd, d.dtype) + assert_(np.shares_memory(dd, d)) + assert_(np.shares_memory(ddd, d)) + + # check that reversing with sub-arrays works as expected + d_rev = d[::-1] + dd_rev = structured_to_unstructured(d_rev) + assert_equal(dd_rev, [[8, 9, 10, 11, 12, 13, 14], + [1, 2, 3, 4, 5, 6, 7]]) + + # check that sub-arrays keep the order of their values + d_attrib_rev = d[['x2', 'x1', 'x0']] + dd_attrib_rev = structured_to_unstructured(d_attrib_rev) + assert_equal(dd_attrib_rev, [[4, 5, 6, 7, 2, 3, 1], + [11, 12, 13, 14, 9, 10, 8]]) + + # with ignored field at the end + d = np.array([(1, [2, 3], [[4, 5], [6, 7]], 32), + (8, [9, 10], [[11, 12], [13, 14]], 64)], + dtype=[('x0', 'i4'), ('x1', ('i4', 2)), + ('x2', ('i4', (2, 2))), ('ignored', 'u1')]) + dd = structured_to_unstructured(d[['x0', 'x1', 'x2']]) + assert_(np.shares_memory(dd, d)) + assert_equal(dd, [[1, 2, 3, 4, 5, 6, 7], + [8, 9, 10, 11, 12, 13, 14]]) + + # test that nested fields with identical names don't break anything + point = np.dtype([('x', int), ('y', int)]) + triangle = np.dtype([('a', point), ('b', point), ('c', point)]) + arr = np.zeros(10, triangle) + res = structured_to_unstructured(arr, dtype=int) + assert_equal(res, np.zeros((10, 6), dtype=int)) + + # test nested combinations of subarrays and structured arrays, gh-13333 + def subarray(dt, shape): + return np.dtype((dt, shape)) + + def structured(*dts): + return np.dtype([(f'x{i}', dt) for i, dt in enumerate(dts)]) + + def inspect(dt, dtype=None): + arr = np.zeros((), dt) + ret = structured_to_unstructured(arr, dtype=dtype) + backarr = unstructured_to_structured(ret, dt) + return ret.shape, ret.dtype, backarr.dtype + + dt = structured(subarray(structured(np.int32, np.int32), 3)) + assert_equal(inspect(dt), ((6,), np.int32, dt)) + + dt = structured(subarray(subarray(np.int32, 2), 2)) + assert_equal(inspect(dt), ((4,), np.int32, dt)) + + dt = structured(np.int32) + assert_equal(inspect(dt), ((1,), np.int32, dt)) + + dt = structured(np.int32, subarray(subarray(np.int32, 2), 2)) + assert_equal(inspect(dt), ((5,), np.int32, dt)) + + dt = structured() + assert_raises(ValueError, structured_to_unstructured, np.zeros(3, dt)) + + # these currently don't work, but we may make it work in the future + assert_raises(NotImplementedError, structured_to_unstructured, + np.zeros(3, dt), dtype=np.int32) + assert_raises(NotImplementedError, unstructured_to_structured, + np.zeros((3, 0), dtype=np.int32)) + + # test supported ndarray subclasses + d_plain = np.array([(1, 2), (3, 4)], dtype=[('a', 'i4'), ('b', 'i4')]) + dd_expected = structured_to_unstructured(d_plain, copy=True) + + # recarray + d = d_plain.view(np.recarray) + + dd = structured_to_unstructured(d, copy=False) + ddd = structured_to_unstructured(d, copy=True) + assert_(np.shares_memory(d, dd)) + assert_(type(dd) is np.recarray) + assert_(type(ddd) is np.recarray) + assert_equal(dd, dd_expected) + assert_equal(ddd, dd_expected) + + # memmap + d = np.memmap(tmp_path / 'memmap', + mode='w+', + dtype=d_plain.dtype, + shape=d_plain.shape) + d[:] = d_plain + dd = structured_to_unstructured(d, copy=False) + ddd = structured_to_unstructured(d, copy=True) + assert_(np.shares_memory(d, dd)) + assert_(type(dd) is np.memmap) + assert_(type(ddd) is np.memmap) + assert_equal(dd, dd_expected) + assert_equal(ddd, dd_expected) + + def test_unstructured_to_structured(self): + # test if dtype is the args of np.dtype + a = np.zeros((20, 2)) + test_dtype_args = [('x', float), ('y', float)] + test_dtype = np.dtype(test_dtype_args) + field1 = unstructured_to_structured(a, dtype=test_dtype_args) # now + field2 = unstructured_to_structured(a, dtype=test_dtype) # before + assert_equal(field1, field2) + + def test_field_assignment_by_name(self): + a = np.ones(2, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')]) + newdt = [('b', 'f4'), ('c', 'u1')] + + assert_equal(require_fields(a, newdt), np.ones(2, newdt)) + + b = np.array([(1, 2), (3, 4)], dtype=newdt) + assign_fields_by_name(a, b, zero_unassigned=False) + assert_equal(a, np.array([(1, 1, 2), (1, 3, 4)], dtype=a.dtype)) + assign_fields_by_name(a, b) + assert_equal(a, np.array([(0, 1, 2), (0, 3, 4)], dtype=a.dtype)) + + # test nested fields + a = np.ones(2, dtype=[('a', [('b', 'f8'), ('c', 'u1')])]) + newdt = [('a', [('c', 'u1')])] + assert_equal(require_fields(a, newdt), np.ones(2, newdt)) + b = np.array([((2,),), ((3,),)], dtype=newdt) + assign_fields_by_name(a, b, zero_unassigned=False) + assert_equal(a, np.array([((1, 2),), ((1, 3),)], dtype=a.dtype)) + assign_fields_by_name(a, b) + assert_equal(a, np.array([((0, 2),), ((0, 3),)], dtype=a.dtype)) + + # test unstructured code path for 0d arrays + a, b = np.array(3), np.array(0) + assign_fields_by_name(b, a) + assert_equal(b[()], 3) + + +class TestRecursiveFillFields: + # Test recursive_fill_fields. + def test_simple_flexible(self): + # Test recursive_fill_fields on flexible-array + a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)]) + b = np.zeros((3,), dtype=a.dtype) + test = recursive_fill_fields(a, b) + control = np.array([(1, 10.), (2, 20.), (0, 0.)], + dtype=[('A', int), ('B', float)]) + assert_equal(test, control) + + def test_masked_flexible(self): + # Test recursive_fill_fields on masked flexible-array + a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)], + dtype=[('A', int), ('B', float)]) + b = ma.zeros((3,), dtype=a.dtype) + test = recursive_fill_fields(a, b) + control = ma.array([(1, 10.), (2, 20.), (0, 0.)], + mask=[(0, 1), (1, 0), (0, 0)], + dtype=[('A', int), ('B', float)]) + assert_equal(test, control) + + +class TestMergeArrays: + # Test merge_arrays + + def setup_method(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array( + [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) + w = np.array( + [(1, (2, 3.0, ())), (4, (5, 6.0, ()))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int), ('bc', [])])]) + self.data = (w, x, y, z) + + def test_solo(self): + # Test merge_arrays on a single array. + (_, x, _, z) = self.data + + test = merge_arrays(x) + control = np.array([(1,), (2,)], dtype=[('f0', int)]) + assert_equal(test, control) + test = merge_arrays((x,)) + assert_equal(test, control) + + test = merge_arrays(z, flatten=False) + assert_equal(test, z) + test = merge_arrays(z, flatten=True) + assert_equal(test, z) + + def test_solo_w_flatten(self): + # Test merge_arrays on a single array w & w/o flattening + w = self.data[0] + test = merge_arrays(w, flatten=False) + assert_equal(test, w) + + test = merge_arrays(w, flatten=True) + control = np.array([(1, 2, 3.0), (4, 5, 6.0)], + dtype=[('a', int), ('ba', float), ('bb', int)]) + assert_equal(test, control) + + def test_standard(self): + # Test standard & standard + # Test merge arrays + (_, x, y, _) = self.data + test = merge_arrays((x, y), usemask=False) + control = np.array([(1, 10), (2, 20), (-1, 30)], + dtype=[('f0', int), ('f1', int)]) + assert_equal(test, control) + + test = merge_arrays((x, y), usemask=True) + control = ma.array([(1, 10), (2, 20), (-1, 30)], + mask=[(0, 0), (0, 0), (1, 0)], + dtype=[('f0', int), ('f1', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_flatten(self): + # Test standard & flexible + (_, x, _, z) = self.data + test = merge_arrays((x, z), flatten=True) + control = np.array([(1, 'A', 1.), (2, 'B', 2.)], + dtype=[('f0', int), ('A', '|S3'), ('B', float)]) + assert_equal(test, control) + + test = merge_arrays((x, z), flatten=False) + control = np.array([(1, ('A', 1.)), (2, ('B', 2.))], + dtype=[('f0', int), + ('f1', [('A', '|S3'), ('B', float)])]) + assert_equal(test, control) + + def test_flatten_wflexible(self): + # Test flatten standard & nested + (w, x, _, _) = self.data + test = merge_arrays((x, w), flatten=True) + control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)], + dtype=[('f0', int), + ('a', int), ('ba', float), ('bb', int)]) + assert_equal(test, control) + + test = merge_arrays((x, w), flatten=False) + controldtype = [('f0', int), + ('f1', [('a', int), + ('b', [('ba', float), ('bb', int), ('bc', [])])])] + control = np.array([(1., (1, (2, 3.0, ()))), (2, (4, (5, 6.0, ())))], + dtype=controldtype) + assert_equal(test, control) + + def test_wmasked_arrays(self): + # Test merge_arrays masked arrays + (_, x, _, _) = self.data + mx = ma.array([1, 2, 3], mask=[1, 0, 0]) + test = merge_arrays((x, mx), usemask=True) + control = ma.array([(1, 1), (2, 2), (-1, 3)], + mask=[(0, 1), (0, 0), (1, 0)], + dtype=[('f0', int), ('f1', int)]) + assert_equal(test, control) + test = merge_arrays((x, mx), usemask=True, asrecarray=True) + assert_equal(test, control) + assert_(isinstance(test, MaskedRecords)) + + def test_w_singlefield(self): + # Test single field + test = merge_arrays((np.array([1, 2]).view([('a', int)]), + np.array([10., 20., 30.])),) + control = ma.array([(1, 10.), (2, 20.), (-1, 30.)], + mask=[(0, 0), (0, 0), (1, 0)], + dtype=[('a', int), ('f1', float)]) + assert_equal(test, control) + + def test_w_shorter_flex(self): + # Test merge_arrays w/ a shorter flexndarray. + z = self.data[-1] + + # Fixme, this test looks incomplete and broken + #test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) + #control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], + # dtype=[('A', '|S3'), ('B', float), ('C', int)]) + #assert_equal(test, control) + + merge_arrays((z, np.array([10, 20, 30]).view([('C', int)]))) + np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)], + dtype=[('A', '|S3'), ('B', float), ('C', int)]) + + def test_singlerecord(self): + (_, x, y, z) = self.data + test = merge_arrays((x[0], y[0], z[0]), usemask=False) + control = np.array([(1, 10, ('A', 1))], + dtype=[('f0', int), + ('f1', int), + ('f2', [('A', '|S3'), ('B', float)])]) + assert_equal(test, control) + + +class TestAppendFields: + # Test append_fields + + def setup_method(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array( + [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) + w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + self.data = (w, x, y, z) + + def test_append_single(self): + # Test simple case + (_, x, _, _) = self.data + test = append_fields(x, 'A', data=[10, 20, 30]) + control = ma.array([(1, 10), (2, 20), (-1, 30)], + mask=[(0, 0), (0, 0), (1, 0)], + dtype=[('f0', int), ('A', int)],) + assert_equal(test, control) + + def test_append_double(self): + # Test simple case + (_, x, _, _) = self.data + test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]]) + control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)], + mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)], + dtype=[('f0', int), ('A', int), ('B', int)],) + assert_equal(test, control) + + def test_append_on_flex(self): + # Test append_fields on flexible type arrays + z = self.data[-1] + test = append_fields(z, 'C', data=[10, 20, 30]) + control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)], + mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)], + dtype=[('A', '|S3'), ('B', float), ('C', int)],) + assert_equal(test, control) + + def test_append_on_nested(self): + # Test append_fields on nested fields + w = self.data[0] + test = append_fields(w, 'C', data=[10, 20, 30]) + control = ma.array([(1, (2, 3.0), 10), + (4, (5, 6.0), 20), + (-1, (-1, -1.), 30)], + mask=[( + 0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)], + dtype=[('a', int), + ('b', [('ba', float), ('bb', int)]), + ('C', int)],) + assert_equal(test, control) + + +class TestStackArrays: + # Test stack_arrays + def setup_method(self): + x = np.array([1, 2, ]) + y = np.array([10, 20, 30]) + z = np.array( + [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)]) + w = np.array([(1, (2, 3.0)), (4, (5, 6.0))], + dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + self.data = (w, x, y, z) + + def test_solo(self): + # Test stack_arrays on single arrays + (_, x, _, _) = self.data + test = stack_arrays((x,)) + assert_equal(test, x) + assert_(test is x) + + test = stack_arrays(x) + assert_equal(test, x) + assert_(test is x) + + def test_unnamed_fields(self): + # Tests combinations of arrays w/o named fields + (_, x, y, _) = self.data + + test = stack_arrays((x, x), usemask=False) + control = np.array([1, 2, 1, 2]) + assert_equal(test, control) + + test = stack_arrays((x, y), usemask=False) + control = np.array([1, 2, 10, 20, 30]) + assert_equal(test, control) + + test = stack_arrays((y, x), usemask=False) + control = np.array([10, 20, 30, 1, 2]) + assert_equal(test, control) + + def test_unnamed_and_named_fields(self): + # Test combination of arrays w/ & w/o named fields + (_, x, _, z) = self.data + + test = stack_arrays((x, z)) + control = ma.array([(1, -1, -1), (2, -1, -1), + (-1, 'A', 1), (-1, 'B', 2)], + mask=[(0, 1, 1), (0, 1, 1), + (1, 0, 0), (1, 0, 0)], + dtype=[('f0', int), ('A', '|S3'), ('B', float)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + test = stack_arrays((z, x)) + control = ma.array([('A', 1, -1), ('B', 2, -1), + (-1, -1, 1), (-1, -1, 2), ], + mask=[(0, 0, 1), (0, 0, 1), + (1, 1, 0), (1, 1, 0)], + dtype=[('A', '|S3'), ('B', float), ('f2', int)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + test = stack_arrays((z, z, x)) + control = ma.array([('A', 1, -1), ('B', 2, -1), + ('A', 1, -1), ('B', 2, -1), + (-1, -1, 1), (-1, -1, 2), ], + mask=[(0, 0, 1), (0, 0, 1), + (0, 0, 1), (0, 0, 1), + (1, 1, 0), (1, 1, 0)], + dtype=[('A', '|S3'), ('B', float), ('f2', int)]) + assert_equal(test, control) + + def test_matching_named_fields(self): + # Test combination of arrays w/ matching field names + (_, x, _, z) = self.data + zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)]) + test = stack_arrays((z, zz)) + control = ma.array([('A', 1, -1), ('B', 2, -1), + ( + 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)], + mask=[(0, 0, 1), (0, 0, 1), + (0, 0, 0), (0, 0, 0), (0, 0, 0)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + test = stack_arrays((z, zz, x)) + ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)] + control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1), + ('a', 10., 100., -1), ('b', 20., 200., -1), + ('c', 30., 300., -1), + (-1, -1, -1, 1), (-1, -1, -1, 2)], + dtype=ndtype, + mask=[(0, 0, 1, 1), (0, 0, 1, 1), + (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), + (1, 1, 1, 0), (1, 1, 1, 0)]) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_defaults(self): + # Test defaults: no exception raised if keys of defaults are not fields. + (_, _, _, z) = self.data + zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)]) + defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.} + test = stack_arrays((z, zz), defaults=defaults) + control = ma.array([('A', 1, -9999.), ('B', 2, -9999.), + ( + 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], + dtype=[('A', '|S3'), ('B', float), ('C', float)], + mask=[(0, 0, 1), (0, 0, 1), + (0, 0, 0), (0, 0, 0), (0, 0, 0)]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_autoconversion(self): + # Tests autoconversion + adtype = [('A', int), ('B', bool), ('C', float)] + a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) + bdtype = [('A', int), ('B', float), ('C', float)] + b = ma.array([(4, 5, 6)], dtype=bdtype) + control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], + dtype=bdtype) + test = stack_arrays((a, b), autoconvert=True) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + with assert_raises(TypeError): + stack_arrays((a, b), autoconvert=False) + + def test_checktitles(self): + # Test using titles in the field names + adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] + a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype) + bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)] + b = ma.array([(4, 5, 6)], dtype=bdtype) + test = stack_arrays((a, b)) + control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)], + dtype=bdtype) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + + def test_subdtype(self): + z = np.array([ + ('A', 1), ('B', 2) + ], dtype=[('A', '|S3'), ('B', float, (1,))]) + zz = np.array([ + ('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.) + ], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)]) + + res = stack_arrays((z, zz)) + expected = ma.array( + data=[ + (b'A', [1.0], 0), + (b'B', [2.0], 0), + (b'a', [10.0], 100.0), + (b'b', [20.0], 200.0), + (b'c', [30.0], 300.0)], + mask=[ + (False, [False], True), + (False, [False], True), + (False, [False], False), + (False, [False], False), + (False, [False], False) + ], + dtype=zz.dtype + ) + assert_equal(res.dtype, expected.dtype) + assert_equal(res, expected) + assert_equal(res.mask, expected.mask) + + +class TestJoinBy: + def setup_method(self): + self.a = np.array(list(zip(np.arange(10), np.arange(50, 60), + np.arange(100, 110))), + dtype=[('a', int), ('b', int), ('c', int)]) + self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75), + np.arange(100, 110))), + dtype=[('a', int), ('b', int), ('d', int)]) + + def test_inner_join(self): + # Basic test of join_by + a, b = self.a, self.b + + test = join_by('a', a, b, jointype='inner') + control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101), + (7, 57, 67, 107, 102), (8, 58, 68, 108, 103), + (9, 59, 69, 109, 104)], + dtype=[('a', int), ('b1', int), ('b2', int), + ('c', int), ('d', int)]) + assert_equal(test, control) + + def test_join(self): + a, b = self.a, self.b + + # Fixme, this test is broken + #test = join_by(('a', 'b'), a, b) + #control = np.array([(5, 55, 105, 100), (6, 56, 106, 101), + # (7, 57, 107, 102), (8, 58, 108, 103), + # (9, 59, 109, 104)], + # dtype=[('a', int), ('b', int), + # ('c', int), ('d', int)]) + #assert_equal(test, control) + + join_by(('a', 'b'), a, b) + np.array([(5, 55, 105, 100), (6, 56, 106, 101), + (7, 57, 107, 102), (8, 58, 108, 103), + (9, 59, 109, 104)], + dtype=[('a', int), ('b', int), + ('c', int), ('d', int)]) + + def test_join_subdtype(self): + # tests the bug in https://stackoverflow.com/q/44769632/102441 + foo = np.array([(1,)], + dtype=[('key', int)]) + bar = np.array([(1, np.array([1, 2, 3]))], + dtype=[('key', int), ('value', 'uint16', 3)]) + res = join_by('key', foo, bar) + assert_equal(res, bar.view(ma.MaskedArray)) + + def test_outer_join(self): + a, b = self.a, self.b + + test = join_by(('a', 'b'), a, b, 'outer') + control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), + (2, 52, 102, -1), (3, 53, 103, -1), + (4, 54, 104, -1), (5, 55, 105, -1), + (5, 65, -1, 100), (6, 56, 106, -1), + (6, 66, -1, 101), (7, 57, 107, -1), + (7, 67, -1, 102), (8, 58, 108, -1), + (8, 68, -1, 103), (9, 59, 109, -1), + (9, 69, -1, 104), (10, 70, -1, 105), + (11, 71, -1, 106), (12, 72, -1, 107), + (13, 73, -1, 108), (14, 74, -1, 109)], + mask=[(0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 0, 1), + (0, 0, 1, 0), (0, 0, 1, 0), + (0, 0, 1, 0), (0, 0, 1, 0), + (0, 0, 1, 0), (0, 0, 1, 0)], + dtype=[('a', int), ('b', int), + ('c', int), ('d', int)]) + assert_equal(test, control) + + def test_leftouter_join(self): + a, b = self.a, self.b + + test = join_by(('a', 'b'), a, b, 'leftouter') + control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1), + (2, 52, 102, -1), (3, 53, 103, -1), + (4, 54, 104, -1), (5, 55, 105, -1), + (6, 56, 106, -1), (7, 57, 107, -1), + (8, 58, 108, -1), (9, 59, 109, -1)], + mask=[(0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1), + (0, 0, 0, 1), (0, 0, 0, 1)], + dtype=[('a', int), ('b', int), ('c', int), ('d', int)]) + assert_equal(test, control) + + def test_different_field_order(self): + # gh-8940 + a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')]) + b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')]) + # this should not give a FutureWarning: + j = join_by(['c', 'b'], a, b, jointype='inner', usemask=False) + assert_equal(j.dtype.names, ['b', 'c', 'a1', 'a2']) + + def test_duplicate_keys(self): + a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')]) + b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')]) + assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b) + + def test_same_name_different_dtypes_key(self): + a_dtype = np.dtype([('key', 'S5'), ('value', ' 2**32 + + +def _add_keepdims(func): + """ hack in keepdims behavior into a function taking an axis """ + @functools.wraps(func) + def wrapped(a, axis, **kwargs): + res = func(a, axis=axis, **kwargs) + if axis is None: + axis = 0 # res is now a scalar, so we can insert this anywhere + return np.expand_dims(res, axis=axis) + return wrapped + + +class TestTakeAlongAxis: + def test_argequivalent(self): + """ Test it translates from arg to """ + from numpy.random import rand + a = rand(3, 4, 5) + + funcs = [ + (np.sort, np.argsort, {}), + (_add_keepdims(np.min), _add_keepdims(np.argmin), {}), + (_add_keepdims(np.max), _add_keepdims(np.argmax), {}), + #(np.partition, np.argpartition, dict(kth=2)), + ] + + for func, argfunc, kwargs in funcs: + for axis in list(range(a.ndim)) + [None]: + a_func = func(a, axis=axis, **kwargs) + ai_func = argfunc(a, axis=axis, **kwargs) + assert_equal(a_func, take_along_axis(a, ai_func, axis=axis)) + + def test_invalid(self): + """ Test it errors when indices has too few dimensions """ + a = np.ones((10, 10)) + ai = np.ones((10, 2), dtype=np.intp) + + # sanity check + take_along_axis(a, ai, axis=1) + + # not enough indices + assert_raises(ValueError, take_along_axis, a, np.array(1), axis=1) + # bool arrays not allowed + assert_raises(IndexError, take_along_axis, a, ai.astype(bool), axis=1) + # float arrays not allowed + assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1) + # invalid axis + assert_raises(AxisError, take_along_axis, a, ai, axis=10) + # invalid indices + assert_raises(ValueError, take_along_axis, a, ai, axis=None) + + def test_empty(self): + """ Test everything is ok with empty results, even with inserted dims """ + a = np.ones((3, 4, 5)) + ai = np.ones((3, 0, 5), dtype=np.intp) + + actual = take_along_axis(a, ai, axis=1) + assert_equal(actual.shape, ai.shape) + + def test_broadcast(self): + """ Test that non-indexing dimensions are broadcast in both directions """ + a = np.ones((3, 4, 1)) + ai = np.ones((1, 2, 5), dtype=np.intp) + actual = take_along_axis(a, ai, axis=1) + assert_equal(actual.shape, (3, 2, 5)) + + +class TestPutAlongAxis: + def test_replace_max(self): + a_base = np.array([[10, 30, 20], [60, 40, 50]]) + + for axis in list(range(a_base.ndim)) + [None]: + # we mutate this in the loop + a = a_base.copy() + + # replace the max with a small value + i_max = _add_keepdims(np.argmax)(a, axis=axis) + put_along_axis(a, i_max, -99, axis=axis) + + # find the new minimum, which should max + i_min = _add_keepdims(np.argmin)(a, axis=axis) + + assert_equal(i_min, i_max) + + def test_broadcast(self): + """ Test that non-indexing dimensions are broadcast in both directions """ + a = np.ones((3, 4, 1)) + ai = np.arange(10, dtype=np.intp).reshape((1, 2, 5)) % 4 + put_along_axis(a, ai, 20, axis=1) + assert_equal(take_along_axis(a, ai, axis=1), 20) + + def test_invalid(self): + """ Test invalid inputs """ + a_base = np.array([[10, 30, 20], [60, 40, 50]]) + indices = np.array([[0], [1]]) + values = np.array([[2], [1]]) + + # sanity check + a = a_base.copy() + put_along_axis(a, indices, values, axis=0) + assert np.all(a == [[2, 2, 2], [1, 1, 1]]) + + # invalid indices + a = a_base.copy() + with assert_raises(ValueError) as exc: + put_along_axis(a, indices, values, axis=None) + assert "single dimension" in str(exc.exception) + + +class TestApplyAlongAxis: + def test_simple(self): + a = np.ones((20, 10), 'd') + assert_array_equal( + apply_along_axis(len, 0, a), len(a) * np.ones(a.shape[1])) + + def test_simple101(self): + a = np.ones((10, 101), 'd') + assert_array_equal( + apply_along_axis(len, 0, a), len(a) * np.ones(a.shape[1])) + + def test_3d(self): + a = np.arange(27).reshape((3, 3, 3)) + assert_array_equal(apply_along_axis(np.sum, 0, a), + [[27, 30, 33], [36, 39, 42], [45, 48, 51]]) + + def test_preserve_subclass(self): + def double(row): + return row * 2 + + class MyNDArray(np.ndarray): + pass + + m = np.array([[0, 1], [2, 3]]).view(MyNDArray) + expected = np.array([[0, 2], [4, 6]]).view(MyNDArray) + + result = apply_along_axis(double, 0, m) + assert_(isinstance(result, MyNDArray)) + assert_array_equal(result, expected) + + result = apply_along_axis(double, 1, m) + assert_(isinstance(result, MyNDArray)) + assert_array_equal(result, expected) + + def test_subclass(self): + class MinimalSubclass(np.ndarray): + data = 1 + + def minimal_function(array): + return array.data + + a = np.zeros((6, 3)).view(MinimalSubclass) + + assert_array_equal( + apply_along_axis(minimal_function, 0, a), np.array([1, 1, 1]) + ) + + def test_scalar_array(self, cls=np.ndarray): + a = np.ones((6, 3)).view(cls) + res = apply_along_axis(np.sum, 0, a) + assert_(isinstance(res, cls)) + assert_array_equal(res, np.array([6, 6, 6]).view(cls)) + + def test_0d_array(self, cls=np.ndarray): + def sum_to_0d(x): + """ Sum x, returning a 0d array of the same class """ + assert_equal(x.ndim, 1) + return np.squeeze(np.sum(x, keepdims=True)) + a = np.ones((6, 3)).view(cls) + res = apply_along_axis(sum_to_0d, 0, a) + assert_(isinstance(res, cls)) + assert_array_equal(res, np.array([6, 6, 6]).view(cls)) + + res = apply_along_axis(sum_to_0d, 1, a) + assert_(isinstance(res, cls)) + assert_array_equal(res, np.array([3, 3, 3, 3, 3, 3]).view(cls)) + + def test_axis_insertion(self, cls=np.ndarray): + def f1to2(x): + """produces an asymmetric non-square matrix from x""" + assert_equal(x.ndim, 1) + return (x[::-1] * x[1:, None]).view(cls) + + a2d = np.arange(6 * 3).reshape((6, 3)) + + # 2d insertion along first axis + actual = apply_along_axis(f1to2, 0, a2d) + expected = np.stack([ + f1to2(a2d[:, i]) for i in range(a2d.shape[1]) + ], axis=-1).view(cls) + assert_equal(type(actual), type(expected)) + assert_equal(actual, expected) + + # 2d insertion along last axis + actual = apply_along_axis(f1to2, 1, a2d) + expected = np.stack([ + f1to2(a2d[i, :]) for i in range(a2d.shape[0]) + ], axis=0).view(cls) + assert_equal(type(actual), type(expected)) + assert_equal(actual, expected) + + # 3d insertion along middle axis + a3d = np.arange(6 * 5 * 3).reshape((6, 5, 3)) + + actual = apply_along_axis(f1to2, 1, a3d) + expected = np.stack([ + np.stack([ + f1to2(a3d[i, :, j]) for i in range(a3d.shape[0]) + ], axis=0) + for j in range(a3d.shape[2]) + ], axis=-1).view(cls) + assert_equal(type(actual), type(expected)) + assert_equal(actual, expected) + + def test_subclass_preservation(self): + class MinimalSubclass(np.ndarray): + pass + self.test_scalar_array(MinimalSubclass) + self.test_0d_array(MinimalSubclass) + self.test_axis_insertion(MinimalSubclass) + + def test_axis_insertion_ma(self): + def f1to2(x): + """produces an asymmetric non-square matrix from x""" + assert_equal(x.ndim, 1) + res = x[::-1] * x[1:, None] + return np.ma.masked_where(res % 5 == 0, res) + a = np.arange(6 * 3).reshape((6, 3)) + res = apply_along_axis(f1to2, 0, a) + assert_(isinstance(res, np.ma.masked_array)) + assert_equal(res.ndim, 3) + assert_array_equal(res[:, :, 0].mask, f1to2(a[:, 0]).mask) + assert_array_equal(res[:, :, 1].mask, f1to2(a[:, 1]).mask) + assert_array_equal(res[:, :, 2].mask, f1to2(a[:, 2]).mask) + + def test_tuple_func1d(self): + def sample_1d(x): + return x[1], x[0] + res = np.apply_along_axis(sample_1d, 1, np.array([[1, 2], [3, 4]])) + assert_array_equal(res, np.array([[2, 1], [4, 3]])) + + def test_empty(self): + # can't apply_along_axis when there's no chance to call the function + def never_call(x): + assert_(False) # should never be reached + + a = np.empty((0, 0)) + assert_raises(ValueError, np.apply_along_axis, never_call, 0, a) + assert_raises(ValueError, np.apply_along_axis, never_call, 1, a) + + # but it's sometimes ok with some non-zero dimensions + def empty_to_1(x): + assert_(len(x) == 0) + return 1 + + a = np.empty((10, 0)) + actual = np.apply_along_axis(empty_to_1, 1, a) + assert_equal(actual, np.ones(10)) + assert_raises(ValueError, np.apply_along_axis, empty_to_1, 0, a) + + def test_with_iterable_object(self): + # from issue 5248 + d = np.array([ + [{1, 11}, {2, 22}, {3, 33}], + [{4, 44}, {5, 55}, {6, 66}] + ]) + actual = np.apply_along_axis(lambda a: set.union(*a), 0, d) + expected = np.array([{1, 11, 4, 44}, {2, 22, 5, 55}, {3, 33, 6, 66}]) + + assert_equal(actual, expected) + + # issue 8642 - assert_equal doesn't detect this! + for i in np.ndindex(actual.shape): + assert_equal(type(actual[i]), type(expected[i])) + + +class TestApplyOverAxes: + def test_simple(self): + a = np.arange(24).reshape(2, 3, 4) + aoa_a = apply_over_axes(np.sum, a, [0, 2]) + assert_array_equal(aoa_a, np.array([[[60], [92], [124]]])) + + +class TestExpandDims: + def test_functionality(self): + s = (2, 3, 4, 5) + a = np.empty(s) + for axis in range(-5, 4): + b = expand_dims(a, axis) + assert_(b.shape[axis] == 1) + assert_(np.squeeze(b).shape == s) + + def test_axis_tuple(self): + a = np.empty((3, 3, 3)) + assert np.expand_dims(a, axis=(0, 1, 2)).shape == (1, 1, 1, 3, 3, 3) + assert np.expand_dims(a, axis=(0, -1, -2)).shape == (1, 3, 3, 3, 1, 1) + assert np.expand_dims(a, axis=(0, 3, 5)).shape == (1, 3, 3, 1, 3, 1) + assert np.expand_dims(a, axis=(0, -3, -5)).shape == (1, 1, 3, 1, 3, 3) + + def test_axis_out_of_range(self): + s = (2, 3, 4, 5) + a = np.empty(s) + assert_raises(AxisError, expand_dims, a, -6) + assert_raises(AxisError, expand_dims, a, 5) + + a = np.empty((3, 3, 3)) + assert_raises(AxisError, expand_dims, a, (0, -6)) + assert_raises(AxisError, expand_dims, a, (0, 5)) + + def test_repeated_axis(self): + a = np.empty((3, 3, 3)) + assert_raises(ValueError, expand_dims, a, axis=(1, 1)) + + def test_subclasses(self): + a = np.arange(10).reshape((2, 5)) + a = np.ma.array(a, mask=a % 3 == 0) + + expanded = np.expand_dims(a, axis=1) + assert_(isinstance(expanded, np.ma.MaskedArray)) + assert_equal(expanded.shape, (2, 1, 5)) + assert_equal(expanded.mask.shape, (2, 1, 5)) + + +class TestArraySplit: + def test_integer_0_split(self): + a = np.arange(10) + assert_raises(ValueError, array_split, a, 0) + + def test_integer_split(self): + a = np.arange(10) + res = array_split(a, 1) + desired = [np.arange(10)] + compare_results(res, desired) + + res = array_split(a, 2) + desired = [np.arange(5), np.arange(5, 10)] + compare_results(res, desired) + + res = array_split(a, 3) + desired = [np.arange(4), np.arange(4, 7), np.arange(7, 10)] + compare_results(res, desired) + + res = array_split(a, 4) + desired = [np.arange(3), np.arange(3, 6), np.arange(6, 8), + np.arange(8, 10)] + compare_results(res, desired) + + res = array_split(a, 5) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), + np.arange(6, 8), np.arange(8, 10)] + compare_results(res, desired) + + res = array_split(a, 6) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), + np.arange(6, 8), np.arange(8, 9), np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 7) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6), + np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), + np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 8) + desired = [np.arange(2), np.arange(2, 4), np.arange(4, 5), + np.arange(5, 6), np.arange(6, 7), np.arange(7, 8), + np.arange(8, 9), np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 9) + desired = [np.arange(2), np.arange(2, 3), np.arange(3, 4), + np.arange(4, 5), np.arange(5, 6), np.arange(6, 7), + np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 10) + desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), + np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), + np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), + np.arange(9, 10)] + compare_results(res, desired) + + res = array_split(a, 11) + desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3), + np.arange(3, 4), np.arange(4, 5), np.arange(5, 6), + np.arange(6, 7), np.arange(7, 8), np.arange(8, 9), + np.arange(9, 10), np.array([])] + compare_results(res, desired) + + def test_integer_split_2D_rows(self): + a = np.array([np.arange(10), np.arange(10)]) + res = array_split(a, 3, axis=0) + tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), + np.zeros((0, 10))] + compare_results(res, tgt) + assert_(a.dtype.type is res[-1].dtype.type) + + # Same thing for manual splits: + res = array_split(a, [0, 1], axis=0) + tgt = [np.zeros((0, 10)), np.array([np.arange(10)]), + np.array([np.arange(10)])] + compare_results(res, tgt) + assert_(a.dtype.type is res[-1].dtype.type) + + def test_integer_split_2D_cols(self): + a = np.array([np.arange(10), np.arange(10)]) + res = array_split(a, 3, axis=-1) + desired = [np.array([np.arange(4), np.arange(4)]), + np.array([np.arange(4, 7), np.arange(4, 7)]), + np.array([np.arange(7, 10), np.arange(7, 10)])] + compare_results(res, desired) + + def test_integer_split_2D_default(self): + """ This will fail if we change default axis + """ + a = np.array([np.arange(10), np.arange(10)]) + res = array_split(a, 3) + tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]), + np.zeros((0, 10))] + compare_results(res, tgt) + assert_(a.dtype.type is res[-1].dtype.type) + # perhaps should check higher dimensions + + @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform") + def test_integer_split_2D_rows_greater_max_int32(self): + a = np.broadcast_to([0], (1 << 32, 2)) + res = array_split(a, 4) + chunk = np.broadcast_to([0], (1 << 30, 2)) + tgt = [chunk] * 4 + for i in range(len(tgt)): + assert_equal(res[i].shape, tgt[i].shape) + + def test_index_split_simple(self): + a = np.arange(10) + indices = [1, 5, 7] + res = array_split(a, indices, axis=-1) + desired = [np.arange(0, 1), np.arange(1, 5), np.arange(5, 7), + np.arange(7, 10)] + compare_results(res, desired) + + def test_index_split_low_bound(self): + a = np.arange(10) + indices = [0, 5, 7] + res = array_split(a, indices, axis=-1) + desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), + np.arange(7, 10)] + compare_results(res, desired) + + def test_index_split_high_bound(self): + a = np.arange(10) + indices = [0, 5, 7, 10, 12] + res = array_split(a, indices, axis=-1) + desired = [np.array([]), np.arange(0, 5), np.arange(5, 7), + np.arange(7, 10), np.array([]), np.array([])] + compare_results(res, desired) + + +class TestSplit: + # The split function is essentially the same as array_split, + # except that it test if splitting will result in an + # equal split. Only test for this case. + + def test_equal_split(self): + a = np.arange(10) + res = split(a, 2) + desired = [np.arange(5), np.arange(5, 10)] + compare_results(res, desired) + + def test_unequal_split(self): + a = np.arange(10) + assert_raises(ValueError, split, a, 3) + + +class TestColumnStack: + def test_non_iterable(self): + assert_raises(TypeError, column_stack, 1) + + def test_1D_arrays(self): + # example from docstring + a = np.array((1, 2, 3)) + b = np.array((2, 3, 4)) + expected = np.array([[1, 2], + [2, 3], + [3, 4]]) + actual = np.column_stack((a, b)) + assert_equal(actual, expected) + + def test_2D_arrays(self): + # same as hstack 2D docstring example + a = np.array([[1], [2], [3]]) + b = np.array([[2], [3], [4]]) + expected = np.array([[1, 2], + [2, 3], + [3, 4]]) + actual = np.column_stack((a, b)) + assert_equal(actual, expected) + + def test_generator(self): + with pytest.raises(TypeError, match="arrays to stack must be"): + column_stack(np.arange(3) for _ in range(2)) + + +class TestDstack: + def test_non_iterable(self): + assert_raises(TypeError, dstack, 1) + + def test_0D_array(self): + a = np.array(1) + b = np.array(2) + res = dstack([a, b]) + desired = np.array([[[1, 2]]]) + assert_array_equal(res, desired) + + def test_1D_array(self): + a = np.array([1]) + b = np.array([2]) + res = dstack([a, b]) + desired = np.array([[[1, 2]]]) + assert_array_equal(res, desired) + + def test_2D_array(self): + a = np.array([[1], [2]]) + b = np.array([[1], [2]]) + res = dstack([a, b]) + desired = np.array([[[1, 1]], [[2, 2, ]]]) + assert_array_equal(res, desired) + + def test_2D_array2(self): + a = np.array([1, 2]) + b = np.array([1, 2]) + res = dstack([a, b]) + desired = np.array([[[1, 1], [2, 2]]]) + assert_array_equal(res, desired) + + def test_generator(self): + with pytest.raises(TypeError, match="arrays to stack must be"): + dstack(np.arange(3) for _ in range(2)) + + +# array_split has more comprehensive test of splitting. +# only do simple test on hsplit, vsplit, and dsplit +class TestHsplit: + """Only testing for integer splits. + + """ + def test_non_iterable(self): + assert_raises(ValueError, hsplit, 1, 1) + + def test_0D_array(self): + a = np.array(1) + try: + hsplit(a, 2) + assert_(0) + except ValueError: + pass + + def test_1D_array(self): + a = np.array([1, 2, 3, 4]) + res = hsplit(a, 2) + desired = [np.array([1, 2]), np.array([3, 4])] + compare_results(res, desired) + + def test_2D_array(self): + a = np.array([[1, 2, 3, 4], + [1, 2, 3, 4]]) + res = hsplit(a, 2) + desired = [np.array([[1, 2], [1, 2]]), np.array([[3, 4], [3, 4]])] + compare_results(res, desired) + + +class TestVsplit: + """Only testing for integer splits. + + """ + def test_non_iterable(self): + assert_raises(ValueError, vsplit, 1, 1) + + def test_0D_array(self): + a = np.array(1) + assert_raises(ValueError, vsplit, a, 2) + + def test_1D_array(self): + a = np.array([1, 2, 3, 4]) + try: + vsplit(a, 2) + assert_(0) + except ValueError: + pass + + def test_2D_array(self): + a = np.array([[1, 2, 3, 4], + [1, 2, 3, 4]]) + res = vsplit(a, 2) + desired = [np.array([[1, 2, 3, 4]]), np.array([[1, 2, 3, 4]])] + compare_results(res, desired) + + +class TestDsplit: + # Only testing for integer splits. + def test_non_iterable(self): + assert_raises(ValueError, dsplit, 1, 1) + + def test_0D_array(self): + a = np.array(1) + assert_raises(ValueError, dsplit, a, 2) + + def test_1D_array(self): + a = np.array([1, 2, 3, 4]) + assert_raises(ValueError, dsplit, a, 2) + + def test_2D_array(self): + a = np.array([[1, 2, 3, 4], + [1, 2, 3, 4]]) + try: + dsplit(a, 2) + assert_(0) + except ValueError: + pass + + def test_3D_array(self): + a = np.array([[[1, 2, 3, 4], + [1, 2, 3, 4]], + [[1, 2, 3, 4], + [1, 2, 3, 4]]]) + res = dsplit(a, 2) + desired = [np.array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]]), + np.array([[[3, 4], [3, 4]], [[3, 4], [3, 4]]])] + compare_results(res, desired) + + +class TestSqueeze: + def test_basic(self): + from numpy.random import rand + + a = rand(20, 10, 10, 1, 1) + b = rand(20, 1, 10, 1, 20) + c = rand(1, 1, 20, 10) + assert_array_equal(np.squeeze(a), np.reshape(a, (20, 10, 10))) + assert_array_equal(np.squeeze(b), np.reshape(b, (20, 10, 20))) + assert_array_equal(np.squeeze(c), np.reshape(c, (20, 10))) + + # Squeezing to 0-dim should still give an ndarray + a = [[[1.5]]] + res = np.squeeze(a) + assert_equal(res, 1.5) + assert_equal(res.ndim, 0) + assert_equal(type(res), np.ndarray) + + +class TestKron: + def test_basic(self): + # Using 0-dimensional ndarray + a = np.array(1) + b = np.array([[1, 2], [3, 4]]) + k = np.array([[1, 2], [3, 4]]) + assert_array_equal(np.kron(a, b), k) + a = np.array([[1, 2], [3, 4]]) + b = np.array(1) + assert_array_equal(np.kron(a, b), k) + + # Using 1-dimensional ndarray + a = np.array([3]) + b = np.array([[1, 2], [3, 4]]) + k = np.array([[3, 6], [9, 12]]) + assert_array_equal(np.kron(a, b), k) + a = np.array([[1, 2], [3, 4]]) + b = np.array([3]) + assert_array_equal(np.kron(a, b), k) + + # Using 3-dimensional ndarray + a = np.array([[[1]], [[2]]]) + b = np.array([[1, 2], [3, 4]]) + k = np.array([[[1, 2], [3, 4]], [[2, 4], [6, 8]]]) + assert_array_equal(np.kron(a, b), k) + a = np.array([[1, 2], [3, 4]]) + b = np.array([[[1]], [[2]]]) + k = np.array([[[1, 2], [3, 4]], [[2, 4], [6, 8]]]) + assert_array_equal(np.kron(a, b), k) + + def test_return_type(self): + class myarray(np.ndarray): + __array_priority__ = 1.0 + + a = np.ones([2, 2]) + ma = myarray(a.shape, a.dtype, a.data) + assert_equal(type(kron(a, a)), np.ndarray) + assert_equal(type(kron(ma, ma)), myarray) + assert_equal(type(kron(a, ma)), myarray) + assert_equal(type(kron(ma, a)), myarray) + + @pytest.mark.parametrize( + "array_class", [np.asarray, np.asmatrix] + ) + def test_kron_smoke(self, array_class): + a = array_class(np.ones([3, 3])) + b = array_class(np.ones([3, 3])) + k = array_class(np.ones([9, 9])) + + assert_array_equal(np.kron(a, b), k) + + def test_kron_ma(self): + x = np.ma.array([[1, 2], [3, 4]], mask=[[0, 1], [1, 0]]) + k = np.ma.array(np.diag([1, 4, 4, 16]), + mask=~np.array(np.identity(4), dtype=bool)) + + assert_array_equal(k, np.kron(x, x)) + + @pytest.mark.parametrize( + "shape_a,shape_b", [ + ((1, 1), (1, 1)), + ((1, 2, 3), (4, 5, 6)), + ((2, 2), (2, 2, 2)), + ((1, 0), (1, 1)), + ((2, 0, 2), (2, 2)), + ((2, 0, 0, 2), (2, 0, 2)), + ]) + def test_kron_shape(self, shape_a, shape_b): + a = np.ones(shape_a) + b = np.ones(shape_b) + normalised_shape_a = (1,) * max(0, len(shape_b) - len(shape_a)) + shape_a + normalised_shape_b = (1,) * max(0, len(shape_a) - len(shape_b)) + shape_b + expected_shape = np.multiply(normalised_shape_a, normalised_shape_b) + + k = np.kron(a, b) + assert np.array_equal( + k.shape, expected_shape), "Unexpected shape from kron" + + +class TestTile: + def test_basic(self): + a = np.array([0, 1, 2]) + b = [[1, 2], [3, 4]] + assert_equal(tile(a, 2), [0, 1, 2, 0, 1, 2]) + assert_equal(tile(a, (2, 2)), [[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]]) + assert_equal(tile(a, (1, 2)), [[0, 1, 2, 0, 1, 2]]) + assert_equal(tile(b, 2), [[1, 2, 1, 2], [3, 4, 3, 4]]) + assert_equal(tile(b, (2, 1)), [[1, 2], [3, 4], [1, 2], [3, 4]]) + assert_equal(tile(b, (2, 2)), [[1, 2, 1, 2], [3, 4, 3, 4], + [1, 2, 1, 2], [3, 4, 3, 4]]) + + def test_tile_one_repetition_on_array_gh4679(self): + a = np.arange(5) + b = tile(a, 1) + b += 2 + assert_equal(a, np.arange(5)) + + def test_empty(self): + a = np.array([[[]]]) + b = np.array([[], []]) + c = tile(b, 2).shape + d = tile(a, (3, 2, 5)).shape + assert_equal(c, (2, 0)) + assert_equal(d, (3, 2, 0)) + + def test_kroncompare(self): + from numpy.random import randint + + reps = [(2,), (1, 2), (2, 1), (2, 2), (2, 3, 2), (3, 2)] + shape = [(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)] + for s in shape: + b = randint(0, 10, size=s) + for r in reps: + a = np.ones(r, b.dtype) + large = tile(b, r) + klarge = kron(a, b) + assert_equal(large, klarge) + + +class TestMayShareMemory: + def test_basic(self): + d = np.ones((50, 60)) + d2 = np.ones((30, 60, 6)) + assert_(np.may_share_memory(d, d)) + assert_(np.may_share_memory(d, d[::-1])) + assert_(np.may_share_memory(d, d[::2])) + assert_(np.may_share_memory(d, d[1:, ::-1])) + + assert_(not np.may_share_memory(d[::-1], d2)) + assert_(not np.may_share_memory(d[::2], d2)) + assert_(not np.may_share_memory(d[1:, ::-1], d2)) + assert_(np.may_share_memory(d2[1:, ::-1], d2)) + + +# Utility +def compare_results(res, desired): + """Compare lists of arrays.""" + for x, y in zip(res, desired, strict=False): + assert_array_equal(x, y) diff --git a/python/numpy/lib/tests/test_stride_tricks.py b/python/numpy/lib/tests/test_stride_tricks.py new file mode 100644 index 000000000..fe40c953a --- /dev/null +++ b/python/numpy/lib/tests/test_stride_tricks.py @@ -0,0 +1,656 @@ +import pytest + +import numpy as np +from numpy._core._rational_tests import rational +from numpy.lib._stride_tricks_impl import ( + _broadcast_shape, + as_strided, + broadcast_arrays, + broadcast_shapes, + broadcast_to, + sliding_window_view, +) +from numpy.testing import ( + assert_, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, + assert_warns, +) + + +def assert_shapes_correct(input_shapes, expected_shape): + # Broadcast a list of arrays with the given input shapes and check the + # common output shape. + + inarrays = [np.zeros(s) for s in input_shapes] + outarrays = broadcast_arrays(*inarrays) + outshapes = [a.shape for a in outarrays] + expected = [expected_shape] * len(inarrays) + assert_equal(outshapes, expected) + + +def assert_incompatible_shapes_raise(input_shapes): + # Broadcast a list of arrays with the given (incompatible) input shapes + # and check that they raise a ValueError. + + inarrays = [np.zeros(s) for s in input_shapes] + assert_raises(ValueError, broadcast_arrays, *inarrays) + + +def assert_same_as_ufunc(shape0, shape1, transposed=False, flipped=False): + # Broadcast two shapes against each other and check that the data layout + # is the same as if a ufunc did the broadcasting. + + x0 = np.zeros(shape0, dtype=int) + # Note that multiply.reduce's identity element is 1.0, so when shape1==(), + # this gives the desired n==1. + n = int(np.multiply.reduce(shape1)) + x1 = np.arange(n).reshape(shape1) + if transposed: + x0 = x0.T + x1 = x1.T + if flipped: + x0 = x0[::-1] + x1 = x1[::-1] + # Use the add ufunc to do the broadcasting. Since we're adding 0s to x1, the + # result should be exactly the same as the broadcasted view of x1. + y = x0 + x1 + b0, b1 = broadcast_arrays(x0, x1) + assert_array_equal(y, b1) + + +def test_same(): + x = np.arange(10) + y = np.arange(10) + bx, by = broadcast_arrays(x, y) + assert_array_equal(x, bx) + assert_array_equal(y, by) + +def test_broadcast_kwargs(): + # ensure that a TypeError is appropriately raised when + # np.broadcast_arrays() is called with any keyword + # argument other than 'subok' + x = np.arange(10) + y = np.arange(10) + + with assert_raises_regex(TypeError, 'got an unexpected keyword'): + broadcast_arrays(x, y, dtype='float64') + + +def test_one_off(): + x = np.array([[1, 2, 3]]) + y = np.array([[1], [2], [3]]) + bx, by = broadcast_arrays(x, y) + bx0 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) + by0 = bx0.T + assert_array_equal(bx0, bx) + assert_array_equal(by0, by) + + +def test_same_input_shapes(): + # Check that the final shape is just the input shape. + + data = [ + (), + (1,), + (3,), + (0, 1), + (0, 3), + (1, 0), + (3, 0), + (1, 3), + (3, 1), + (3, 3), + ] + for shape in data: + input_shapes = [shape] + # Single input. + assert_shapes_correct(input_shapes, shape) + # Double input. + input_shapes2 = [shape, shape] + assert_shapes_correct(input_shapes2, shape) + # Triple input. + input_shapes3 = [shape, shape, shape] + assert_shapes_correct(input_shapes3, shape) + + +def test_two_compatible_by_ones_input_shapes(): + # Check that two different input shapes of the same length, but some have + # ones, broadcast to the correct shape. + + data = [ + [[(1,), (3,)], (3,)], + [[(1, 3), (3, 3)], (3, 3)], + [[(3, 1), (3, 3)], (3, 3)], + [[(1, 3), (3, 1)], (3, 3)], + [[(1, 1), (3, 3)], (3, 3)], + [[(1, 1), (1, 3)], (1, 3)], + [[(1, 1), (3, 1)], (3, 1)], + [[(1, 0), (0, 0)], (0, 0)], + [[(0, 1), (0, 0)], (0, 0)], + [[(1, 0), (0, 1)], (0, 0)], + [[(1, 1), (0, 0)], (0, 0)], + [[(1, 1), (1, 0)], (1, 0)], + [[(1, 1), (0, 1)], (0, 1)], + ] + for input_shapes, expected_shape in data: + assert_shapes_correct(input_shapes, expected_shape) + # Reverse the input shapes since broadcasting should be symmetric. + assert_shapes_correct(input_shapes[::-1], expected_shape) + + +def test_two_compatible_by_prepending_ones_input_shapes(): + # Check that two different input shapes (of different lengths) broadcast + # to the correct shape. + + data = [ + [[(), (3,)], (3,)], + [[(3,), (3, 3)], (3, 3)], + [[(3,), (3, 1)], (3, 3)], + [[(1,), (3, 3)], (3, 3)], + [[(), (3, 3)], (3, 3)], + [[(1, 1), (3,)], (1, 3)], + [[(1,), (3, 1)], (3, 1)], + [[(1,), (1, 3)], (1, 3)], + [[(), (1, 3)], (1, 3)], + [[(), (3, 1)], (3, 1)], + [[(), (0,)], (0,)], + [[(0,), (0, 0)], (0, 0)], + [[(0,), (0, 1)], (0, 0)], + [[(1,), (0, 0)], (0, 0)], + [[(), (0, 0)], (0, 0)], + [[(1, 1), (0,)], (1, 0)], + [[(1,), (0, 1)], (0, 1)], + [[(1,), (1, 0)], (1, 0)], + [[(), (1, 0)], (1, 0)], + [[(), (0, 1)], (0, 1)], + ] + for input_shapes, expected_shape in data: + assert_shapes_correct(input_shapes, expected_shape) + # Reverse the input shapes since broadcasting should be symmetric. + assert_shapes_correct(input_shapes[::-1], expected_shape) + + +def test_incompatible_shapes_raise_valueerror(): + # Check that a ValueError is raised for incompatible shapes. + + data = [ + [(3,), (4,)], + [(2, 3), (2,)], + [(3,), (3,), (4,)], + [(1, 3, 4), (2, 3, 3)], + ] + for input_shapes in data: + assert_incompatible_shapes_raise(input_shapes) + # Reverse the input shapes since broadcasting should be symmetric. + assert_incompatible_shapes_raise(input_shapes[::-1]) + + +def test_same_as_ufunc(): + # Check that the data layout is the same as if a ufunc did the operation. + + data = [ + [[(1,), (3,)], (3,)], + [[(1, 3), (3, 3)], (3, 3)], + [[(3, 1), (3, 3)], (3, 3)], + [[(1, 3), (3, 1)], (3, 3)], + [[(1, 1), (3, 3)], (3, 3)], + [[(1, 1), (1, 3)], (1, 3)], + [[(1, 1), (3, 1)], (3, 1)], + [[(1, 0), (0, 0)], (0, 0)], + [[(0, 1), (0, 0)], (0, 0)], + [[(1, 0), (0, 1)], (0, 0)], + [[(1, 1), (0, 0)], (0, 0)], + [[(1, 1), (1, 0)], (1, 0)], + [[(1, 1), (0, 1)], (0, 1)], + [[(), (3,)], (3,)], + [[(3,), (3, 3)], (3, 3)], + [[(3,), (3, 1)], (3, 3)], + [[(1,), (3, 3)], (3, 3)], + [[(), (3, 3)], (3, 3)], + [[(1, 1), (3,)], (1, 3)], + [[(1,), (3, 1)], (3, 1)], + [[(1,), (1, 3)], (1, 3)], + [[(), (1, 3)], (1, 3)], + [[(), (3, 1)], (3, 1)], + [[(), (0,)], (0,)], + [[(0,), (0, 0)], (0, 0)], + [[(0,), (0, 1)], (0, 0)], + [[(1,), (0, 0)], (0, 0)], + [[(), (0, 0)], (0, 0)], + [[(1, 1), (0,)], (1, 0)], + [[(1,), (0, 1)], (0, 1)], + [[(1,), (1, 0)], (1, 0)], + [[(), (1, 0)], (1, 0)], + [[(), (0, 1)], (0, 1)], + ] + for input_shapes, expected_shape in data: + assert_same_as_ufunc(input_shapes[0], input_shapes[1], + f"Shapes: {input_shapes[0]} {input_shapes[1]}") + # Reverse the input shapes since broadcasting should be symmetric. + assert_same_as_ufunc(input_shapes[1], input_shapes[0]) + # Try them transposed, too. + assert_same_as_ufunc(input_shapes[0], input_shapes[1], True) + # ... and flipped for non-rank-0 inputs in order to test negative + # strides. + if () not in input_shapes: + assert_same_as_ufunc(input_shapes[0], input_shapes[1], False, True) + assert_same_as_ufunc(input_shapes[0], input_shapes[1], True, True) + + +def test_broadcast_to_succeeds(): + data = [ + [np.array(0), (0,), np.array(0)], + [np.array(0), (1,), np.zeros(1)], + [np.array(0), (3,), np.zeros(3)], + [np.ones(1), (1,), np.ones(1)], + [np.ones(1), (2,), np.ones(2)], + [np.ones(1), (1, 2, 3), np.ones((1, 2, 3))], + [np.arange(3), (3,), np.arange(3)], + [np.arange(3), (1, 3), np.arange(3).reshape(1, -1)], + [np.arange(3), (2, 3), np.array([[0, 1, 2], [0, 1, 2]])], + # test if shape is not a tuple + [np.ones(0), 0, np.ones(0)], + [np.ones(1), 1, np.ones(1)], + [np.ones(1), 2, np.ones(2)], + # these cases with size 0 are strange, but they reproduce the behavior + # of broadcasting with ufuncs (see test_same_as_ufunc above) + [np.ones(1), (0,), np.ones(0)], + [np.ones((1, 2)), (0, 2), np.ones((0, 2))], + [np.ones((2, 1)), (2, 0), np.ones((2, 0))], + ] + for input_array, shape, expected in data: + actual = broadcast_to(input_array, shape) + assert_array_equal(expected, actual) + + +def test_broadcast_to_raises(): + data = [ + [(0,), ()], + [(1,), ()], + [(3,), ()], + [(3,), (1,)], + [(3,), (2,)], + [(3,), (4,)], + [(1, 2), (2, 1)], + [(1, 1), (1,)], + [(1,), -1], + [(1,), (-1,)], + [(1, 2), (-1, 2)], + ] + for orig_shape, target_shape in data: + arr = np.zeros(orig_shape) + assert_raises(ValueError, lambda: broadcast_to(arr, target_shape)) + + +def test_broadcast_shape(): + # tests internal _broadcast_shape + # _broadcast_shape is already exercised indirectly by broadcast_arrays + # _broadcast_shape is also exercised by the public broadcast_shapes function + assert_equal(_broadcast_shape(), ()) + assert_equal(_broadcast_shape([1, 2]), (2,)) + assert_equal(_broadcast_shape(np.ones((1, 1))), (1, 1)) + assert_equal(_broadcast_shape(np.ones((1, 1)), np.ones((3, 4))), (3, 4)) + assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 32)), (1, 2)) + assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 100)), (1, 2)) + + # regression tests for gh-5862 + assert_equal(_broadcast_shape(*([np.ones(2)] * 32 + [1])), (2,)) + bad_args = [np.ones(2)] * 32 + [np.ones(3)] * 32 + assert_raises(ValueError, lambda: _broadcast_shape(*bad_args)) + + +def test_broadcast_shapes_succeeds(): + # tests public broadcast_shapes + data = [ + [[], ()], + [[()], ()], + [[(7,)], (7,)], + [[(1, 2), (2,)], (1, 2)], + [[(1, 1)], (1, 1)], + [[(1, 1), (3, 4)], (3, 4)], + [[(6, 7), (5, 6, 1), (7,), (5, 1, 7)], (5, 6, 7)], + [[(5, 6, 1)], (5, 6, 1)], + [[(1, 3), (3, 1)], (3, 3)], + [[(1, 0), (0, 0)], (0, 0)], + [[(0, 1), (0, 0)], (0, 0)], + [[(1, 0), (0, 1)], (0, 0)], + [[(1, 1), (0, 0)], (0, 0)], + [[(1, 1), (1, 0)], (1, 0)], + [[(1, 1), (0, 1)], (0, 1)], + [[(), (0,)], (0,)], + [[(0,), (0, 0)], (0, 0)], + [[(0,), (0, 1)], (0, 0)], + [[(1,), (0, 0)], (0, 0)], + [[(), (0, 0)], (0, 0)], + [[(1, 1), (0,)], (1, 0)], + [[(1,), (0, 1)], (0, 1)], + [[(1,), (1, 0)], (1, 0)], + [[(), (1, 0)], (1, 0)], + [[(), (0, 1)], (0, 1)], + [[(1,), (3,)], (3,)], + [[2, (3, 2)], (3, 2)], + ] + for input_shapes, target_shape in data: + assert_equal(broadcast_shapes(*input_shapes), target_shape) + + assert_equal(broadcast_shapes(*([(1, 2)] * 32)), (1, 2)) + assert_equal(broadcast_shapes(*([(1, 2)] * 100)), (1, 2)) + + # regression tests for gh-5862 + assert_equal(broadcast_shapes(*([(2,)] * 32)), (2,)) + + +def test_broadcast_shapes_raises(): + # tests public broadcast_shapes + data = [ + [(3,), (4,)], + [(2, 3), (2,)], + [(3,), (3,), (4,)], + [(1, 3, 4), (2, 3, 3)], + [(1, 2), (3, 1), (3, 2), (10, 5)], + [2, (2, 3)], + ] + for input_shapes in data: + assert_raises(ValueError, lambda: broadcast_shapes(*input_shapes)) + + bad_args = [(2,)] * 32 + [(3,)] * 32 + assert_raises(ValueError, lambda: broadcast_shapes(*bad_args)) + + +def test_as_strided(): + a = np.array([None]) + a_view = as_strided(a) + expected = np.array([None]) + assert_array_equal(a_view, np.array([None])) + + a = np.array([1, 2, 3, 4]) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,)) + expected = np.array([1, 3]) + assert_array_equal(a_view, expected) + + a = np.array([1, 2, 3, 4]) + a_view = as_strided(a, shape=(3, 4), strides=(0, 1 * a.itemsize)) + expected = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) + assert_array_equal(a_view, expected) + + # Regression test for gh-5081 + dt = np.dtype([('num', 'i4'), ('obj', 'O')]) + a = np.empty((4,), dtype=dt) + a['num'] = np.arange(1, 5) + a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) + expected_num = [[1, 2, 3, 4]] * 3 + expected_obj = [[None] * 4] * 3 + assert_equal(a_view.dtype, dt) + assert_array_equal(expected_num, a_view['num']) + assert_array_equal(expected_obj, a_view['obj']) + + # Make sure that void types without fields are kept unchanged + a = np.empty((4,), dtype='V4') + a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) + assert_equal(a.dtype, a_view.dtype) + + # Make sure that the only type that could fail is properly handled + dt = np.dtype({'names': [''], 'formats': ['V4']}) + a = np.empty((4,), dtype=dt) + a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) + assert_equal(a.dtype, a_view.dtype) + + # Custom dtypes should not be lost (gh-9161) + r = [rational(i) for i in range(4)] + a = np.array(r, dtype=rational) + a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize)) + assert_equal(a.dtype, a_view.dtype) + assert_array_equal([r] * 3, a_view) + + +class TestSlidingWindowView: + def test_1d(self): + arr = np.arange(5) + arr_view = sliding_window_view(arr, 2) + expected = np.array([[0, 1], + [1, 2], + [2, 3], + [3, 4]]) + assert_array_equal(arr_view, expected) + + def test_2d(self): + i, j = np.ogrid[:3, :4] + arr = 10 * i + j + shape = (2, 2) + arr_view = sliding_window_view(arr, shape) + expected = np.array([[[[0, 1], [10, 11]], + [[1, 2], [11, 12]], + [[2, 3], [12, 13]]], + [[[10, 11], [20, 21]], + [[11, 12], [21, 22]], + [[12, 13], [22, 23]]]]) + assert_array_equal(arr_view, expected) + + def test_2d_with_axis(self): + i, j = np.ogrid[:3, :4] + arr = 10 * i + j + arr_view = sliding_window_view(arr, 3, 0) + expected = np.array([[[0, 10, 20], + [1, 11, 21], + [2, 12, 22], + [3, 13, 23]]]) + assert_array_equal(arr_view, expected) + + def test_2d_repeated_axis(self): + i, j = np.ogrid[:3, :4] + arr = 10 * i + j + arr_view = sliding_window_view(arr, (2, 3), (1, 1)) + expected = np.array([[[[0, 1, 2], + [1, 2, 3]]], + [[[10, 11, 12], + [11, 12, 13]]], + [[[20, 21, 22], + [21, 22, 23]]]]) + assert_array_equal(arr_view, expected) + + def test_2d_without_axis(self): + i, j = np.ogrid[:4, :4] + arr = 10 * i + j + shape = (2, 3) + arr_view = sliding_window_view(arr, shape) + expected = np.array([[[[0, 1, 2], [10, 11, 12]], + [[1, 2, 3], [11, 12, 13]]], + [[[10, 11, 12], [20, 21, 22]], + [[11, 12, 13], [21, 22, 23]]], + [[[20, 21, 22], [30, 31, 32]], + [[21, 22, 23], [31, 32, 33]]]]) + assert_array_equal(arr_view, expected) + + def test_errors(self): + i, j = np.ogrid[:4, :4] + arr = 10 * i + j + with pytest.raises(ValueError, match='cannot contain negative values'): + sliding_window_view(arr, (-1, 3)) + with pytest.raises( + ValueError, + match='must provide window_shape for all dimensions of `x`'): + sliding_window_view(arr, (1,)) + with pytest.raises( + ValueError, + match='Must provide matching length window_shape and axis'): + sliding_window_view(arr, (1, 3, 4), axis=(0, 1)) + with pytest.raises( + ValueError, + match='window shape cannot be larger than input array'): + sliding_window_view(arr, (5, 5)) + + def test_writeable(self): + arr = np.arange(5) + view = sliding_window_view(arr, 2, writeable=False) + assert_(not view.flags.writeable) + with pytest.raises( + ValueError, + match='assignment destination is read-only'): + view[0, 0] = 3 + view = sliding_window_view(arr, 2, writeable=True) + assert_(view.flags.writeable) + view[0, 1] = 3 + assert_array_equal(arr, np.array([0, 3, 2, 3, 4])) + + def test_subok(self): + class MyArray(np.ndarray): + pass + + arr = np.arange(5).view(MyArray) + assert_(not isinstance(sliding_window_view(arr, 2, + subok=False), + MyArray)) + assert_(isinstance(sliding_window_view(arr, 2, subok=True), MyArray)) + # Default behavior + assert_(not isinstance(sliding_window_view(arr, 2), MyArray)) + + +def as_strided_writeable(): + arr = np.ones(10) + view = as_strided(arr, writeable=False) + assert_(not view.flags.writeable) + + # Check that writeable also is fine: + view = as_strided(arr, writeable=True) + assert_(view.flags.writeable) + view[...] = 3 + assert_array_equal(arr, np.full_like(arr, 3)) + + # Test that things do not break down for readonly: + arr.flags.writeable = False + view = as_strided(arr, writeable=False) + view = as_strided(arr, writeable=True) + assert_(not view.flags.writeable) + + +class VerySimpleSubClass(np.ndarray): + def __new__(cls, *args, **kwargs): + return np.array(*args, subok=True, **kwargs).view(cls) + + +class SimpleSubClass(VerySimpleSubClass): + def __new__(cls, *args, **kwargs): + self = np.array(*args, subok=True, **kwargs).view(cls) + self.info = 'simple' + return self + + def __array_finalize__(self, obj): + self.info = getattr(obj, 'info', '') + ' finalized' + + +def test_subclasses(): + # test that subclass is preserved only if subok=True + a = VerySimpleSubClass([1, 2, 3, 4]) + assert_(type(a) is VerySimpleSubClass) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,)) + assert_(type(a_view) is np.ndarray) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True) + assert_(type(a_view) is VerySimpleSubClass) + # test that if a subclass has __array_finalize__, it is used + a = SimpleSubClass([1, 2, 3, 4]) + a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True) + assert_(type(a_view) is SimpleSubClass) + assert_(a_view.info == 'simple finalized') + + # similar tests for broadcast_arrays + b = np.arange(len(a)).reshape(-1, 1) + a_view, b_view = broadcast_arrays(a, b) + assert_(type(a_view) is np.ndarray) + assert_(type(b_view) is np.ndarray) + assert_(a_view.shape == b_view.shape) + a_view, b_view = broadcast_arrays(a, b, subok=True) + assert_(type(a_view) is SimpleSubClass) + assert_(a_view.info == 'simple finalized') + assert_(type(b_view) is np.ndarray) + assert_(a_view.shape == b_view.shape) + + # and for broadcast_to + shape = (2, 4) + a_view = broadcast_to(a, shape) + assert_(type(a_view) is np.ndarray) + assert_(a_view.shape == shape) + a_view = broadcast_to(a, shape, subok=True) + assert_(type(a_view) is SimpleSubClass) + assert_(a_view.info == 'simple finalized') + assert_(a_view.shape == shape) + + +def test_writeable(): + # broadcast_to should return a readonly array + original = np.array([1, 2, 3]) + result = broadcast_to(original, (2, 3)) + assert_equal(result.flags.writeable, False) + assert_raises(ValueError, result.__setitem__, slice(None), 0) + + # but the result of broadcast_arrays needs to be writeable, to + # preserve backwards compatibility + test_cases = [((False,), broadcast_arrays(original,)), + ((True, False), broadcast_arrays(0, original))] + for is_broadcast, results in test_cases: + for array_is_broadcast, result in zip(is_broadcast, results): + # This will change to False in a future version + if array_is_broadcast: + with assert_warns(FutureWarning): + assert_equal(result.flags.writeable, True) + with assert_warns(DeprecationWarning): + result[:] = 0 + # Warning not emitted, writing to the array resets it + assert_equal(result.flags.writeable, True) + else: + # No warning: + assert_equal(result.flags.writeable, True) + + for results in [broadcast_arrays(original), + broadcast_arrays(0, original)]: + for result in results: + # resets the warn_on_write DeprecationWarning + result.flags.writeable = True + # check: no warning emitted + assert_equal(result.flags.writeable, True) + result[:] = 0 + + # keep readonly input readonly + original.flags.writeable = False + _, result = broadcast_arrays(0, original) + assert_equal(result.flags.writeable, False) + + # regression test for GH6491 + shape = (2,) + strides = [0] + tricky_array = as_strided(np.array(0), shape, strides) + other = np.zeros((1,)) + first, second = broadcast_arrays(tricky_array, other) + assert_(first.shape == second.shape) + + +def test_writeable_memoryview(): + # The result of broadcast_arrays exports as a non-writeable memoryview + # because otherwise there is no good way to opt in to the new behaviour + # (i.e. you would need to set writeable to False explicitly). + # See gh-13929. + original = np.array([1, 2, 3]) + + test_cases = [((False, ), broadcast_arrays(original,)), + ((True, False), broadcast_arrays(0, original))] + for is_broadcast, results in test_cases: + for array_is_broadcast, result in zip(is_broadcast, results): + # This will change to False in a future version + if array_is_broadcast: + # memoryview(result, writable=True) will give warning but cannot + # be tested using the python API. + assert memoryview(result).readonly + else: + assert not memoryview(result).readonly + + +def test_reference_types(): + input_array = np.array('a', dtype=object) + expected = np.array(['a'] * 3, dtype=object) + actual = broadcast_to(input_array, (3,)) + assert_array_equal(expected, actual) + + actual, _ = broadcast_arrays(input_array, np.ones(3)) + assert_array_equal(expected, actual) diff --git a/python/numpy/lib/tests/test_twodim_base.py b/python/numpy/lib/tests/test_twodim_base.py new file mode 100644 index 000000000..eb6aa69a4 --- /dev/null +++ b/python/numpy/lib/tests/test_twodim_base.py @@ -0,0 +1,559 @@ +"""Test functions for matrix module + +""" +import pytest + +import numpy as np +from numpy import ( + add, + arange, + array, + diag, + eye, + fliplr, + flipud, + histogram2d, + mask_indices, + ones, + tri, + tril_indices, + tril_indices_from, + triu_indices, + triu_indices_from, + vander, + zeros, +) +from numpy.testing import ( + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_array_max_ulp, + assert_equal, + assert_raises, +) + + +def get_mat(n): + data = arange(n) + data = add.outer(data, data) + return data + + +class TestEye: + def test_basic(self): + assert_equal(eye(4), + array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1]])) + + assert_equal(eye(4, dtype='f'), + array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1]], 'f')) + + assert_equal(eye(3) == 1, + eye(3, dtype=bool)) + + def test_uint64(self): + # Regression test for gh-9982 + assert_equal(eye(np.uint64(2), dtype=int), array([[1, 0], [0, 1]])) + assert_equal(eye(np.uint64(2), M=np.uint64(4), k=np.uint64(1)), + array([[0, 1, 0, 0], [0, 0, 1, 0]])) + + def test_diag(self): + assert_equal(eye(4, k=1), + array([[0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 0]])) + + assert_equal(eye(4, k=-1), + array([[0, 0, 0, 0], + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0]])) + + def test_2d(self): + assert_equal(eye(4, 3), + array([[1, 0, 0], + [0, 1, 0], + [0, 0, 1], + [0, 0, 0]])) + + assert_equal(eye(3, 4), + array([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0]])) + + def test_diag2d(self): + assert_equal(eye(3, 4, k=2), + array([[0, 0, 1, 0], + [0, 0, 0, 1], + [0, 0, 0, 0]])) + + assert_equal(eye(4, 3, k=-2), + array([[0, 0, 0], + [0, 0, 0], + [1, 0, 0], + [0, 1, 0]])) + + def test_eye_bounds(self): + assert_equal(eye(2, 2, 1), [[0, 1], [0, 0]]) + assert_equal(eye(2, 2, -1), [[0, 0], [1, 0]]) + assert_equal(eye(2, 2, 2), [[0, 0], [0, 0]]) + assert_equal(eye(2, 2, -2), [[0, 0], [0, 0]]) + assert_equal(eye(3, 2, 2), [[0, 0], [0, 0], [0, 0]]) + assert_equal(eye(3, 2, 1), [[0, 1], [0, 0], [0, 0]]) + assert_equal(eye(3, 2, -1), [[0, 0], [1, 0], [0, 1]]) + assert_equal(eye(3, 2, -2), [[0, 0], [0, 0], [1, 0]]) + assert_equal(eye(3, 2, -3), [[0, 0], [0, 0], [0, 0]]) + + def test_strings(self): + assert_equal(eye(2, 2, dtype='S3'), + [[b'1', b''], [b'', b'1']]) + + def test_bool(self): + assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]]) + + def test_order(self): + mat_c = eye(4, 3, k=-1) + mat_f = eye(4, 3, k=-1, order='F') + assert_equal(mat_c, mat_f) + assert mat_c.flags.c_contiguous + assert not mat_c.flags.f_contiguous + assert not mat_f.flags.c_contiguous + assert mat_f.flags.f_contiguous + + +class TestDiag: + def test_vector(self): + vals = (100 * arange(5)).astype('l') + b = zeros((5, 5)) + for k in range(5): + b[k, k] = vals[k] + assert_equal(diag(vals), b) + b = zeros((7, 7)) + c = b.copy() + for k in range(5): + b[k, k + 2] = vals[k] + c[k + 2, k] = vals[k] + assert_equal(diag(vals, k=2), b) + assert_equal(diag(vals, k=-2), c) + + def test_matrix(self, vals=None): + if vals is None: + vals = (100 * get_mat(5) + 1).astype('l') + b = zeros((5,)) + for k in range(5): + b[k] = vals[k, k] + assert_equal(diag(vals), b) + b = b * 0 + for k in range(3): + b[k] = vals[k, k + 2] + assert_equal(diag(vals, 2), b[:3]) + for k in range(3): + b[k] = vals[k + 2, k] + assert_equal(diag(vals, -2), b[:3]) + + def test_fortran_order(self): + vals = array((100 * get_mat(5) + 1), order='F', dtype='l') + self.test_matrix(vals) + + def test_diag_bounds(self): + A = [[1, 2], [3, 4], [5, 6]] + assert_equal(diag(A, k=2), []) + assert_equal(diag(A, k=1), [2]) + assert_equal(diag(A, k=0), [1, 4]) + assert_equal(diag(A, k=-1), [3, 6]) + assert_equal(diag(A, k=-2), [5]) + assert_equal(diag(A, k=-3), []) + + def test_failure(self): + assert_raises(ValueError, diag, [[[1]]]) + + +class TestFliplr: + def test_basic(self): + assert_raises(ValueError, fliplr, ones(4)) + a = get_mat(4) + b = a[:, ::-1] + assert_equal(fliplr(a), b) + a = [[0, 1, 2], + [3, 4, 5]] + b = [[2, 1, 0], + [5, 4, 3]] + assert_equal(fliplr(a), b) + + +class TestFlipud: + def test_basic(self): + a = get_mat(4) + b = a[::-1, :] + assert_equal(flipud(a), b) + a = [[0, 1, 2], + [3, 4, 5]] + b = [[3, 4, 5], + [0, 1, 2]] + assert_equal(flipud(a), b) + + +class TestHistogram2d: + def test_simple(self): + x = array( + [0.41702200, 0.72032449, 1.1437481e-4, 0.302332573, 0.146755891]) + y = array( + [0.09233859, 0.18626021, 0.34556073, 0.39676747, 0.53881673]) + xedges = np.linspace(0, 1, 10) + yedges = np.linspace(0, 1, 10) + H = histogram2d(x, y, (xedges, yedges))[0] + answer = array( + [[0, 0, 0, 1, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 0, 1, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0]]) + assert_array_equal(H.T, answer) + H = histogram2d(x, y, xedges)[0] + assert_array_equal(H.T, answer) + H, xedges, yedges = histogram2d(list(range(10)), list(range(10))) + assert_array_equal(H, eye(10, 10)) + assert_array_equal(xedges, np.linspace(0, 9, 11)) + assert_array_equal(yedges, np.linspace(0, 9, 11)) + + def test_asym(self): + x = array([1, 1, 2, 3, 4, 4, 4, 5]) + y = array([1, 3, 2, 0, 1, 2, 3, 4]) + H, xed, yed = histogram2d( + x, y, (6, 5), range=[[0, 6], [0, 5]], density=True) + answer = array( + [[0., 0, 0, 0, 0], + [0, 1, 0, 1, 0], + [0, 0, 1, 0, 0], + [1, 0, 0, 0, 0], + [0, 1, 1, 1, 0], + [0, 0, 0, 0, 1]]) + assert_array_almost_equal(H, answer / 8., 3) + assert_array_equal(xed, np.linspace(0, 6, 7)) + assert_array_equal(yed, np.linspace(0, 5, 6)) + + def test_density(self): + x = array([1, 2, 3, 1, 2, 3, 1, 2, 3]) + y = array([1, 1, 1, 2, 2, 2, 3, 3, 3]) + H, xed, yed = histogram2d( + x, y, [[1, 2, 3, 5], [1, 2, 3, 5]], density=True) + answer = array([[1, 1, .5], + [1, 1, .5], + [.5, .5, .25]]) / 9. + assert_array_almost_equal(H, answer, 3) + + def test_all_outliers(self): + r = np.random.rand(100) + 1. + 1e6 # histogramdd rounds by decimal=6 + H, xed, yed = histogram2d(r, r, (4, 5), range=([0, 1], [0, 1])) + assert_array_equal(H, 0) + + def test_empty(self): + a, edge1, edge2 = histogram2d([], [], bins=([0, 1], [0, 1])) + assert_array_max_ulp(a, array([[0.]])) + + a, edge1, edge2 = histogram2d([], [], bins=4) + assert_array_max_ulp(a, np.zeros((4, 4))) + + def test_binparameter_combination(self): + x = array( + [0, 0.09207008, 0.64575234, 0.12875982, 0.47390599, + 0.59944483, 1]) + y = array( + [0, 0.14344267, 0.48988575, 0.30558665, 0.44700682, + 0.15886423, 1]) + edges = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1) + H, xe, ye = histogram2d(x, y, (edges, 4)) + answer = array( + [[2., 0., 0., 0.], + [0., 1., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.], + [0., 1., 0., 0.], + [1., 0., 0., 0.], + [0., 1., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 1.]]) + assert_array_equal(H, answer) + assert_array_equal(ye, array([0., 0.25, 0.5, 0.75, 1])) + H, xe, ye = histogram2d(x, y, (4, edges)) + answer = array( + [[1., 1., 0., 1., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 1., 0., 0., 0., 0., 0.], + [0., 1., 0., 0., 1., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]]) + assert_array_equal(H, answer) + assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1])) + + def test_dispatch(self): + class ShouldDispatch: + def __array_function__(self, function, types, args, kwargs): + return types, args, kwargs + + xy = [1, 2] + s_d = ShouldDispatch() + r = histogram2d(s_d, xy) + # Cannot use assert_equal since that dispatches... + assert_(r == ((ShouldDispatch,), (s_d, xy), {})) + r = histogram2d(xy, s_d) + assert_(r == ((ShouldDispatch,), (xy, s_d), {})) + r = histogram2d(xy, xy, bins=s_d) + assert_(r, ((ShouldDispatch,), (xy, xy), {'bins': s_d})) + r = histogram2d(xy, xy, bins=[s_d, 5]) + assert_(r, ((ShouldDispatch,), (xy, xy), {'bins': [s_d, 5]})) + assert_raises(Exception, histogram2d, xy, xy, bins=[s_d]) + r = histogram2d(xy, xy, weights=s_d) + assert_(r, ((ShouldDispatch,), (xy, xy), {'weights': s_d})) + + @pytest.mark.parametrize(("x_len", "y_len"), [(10, 11), (20, 19)]) + def test_bad_length(self, x_len, y_len): + x, y = np.ones(x_len), np.ones(y_len) + with pytest.raises(ValueError, + match='x and y must have the same length.'): + histogram2d(x, y) + + +class TestTri: + def test_dtype(self): + out = array([[1, 0, 0], + [1, 1, 0], + [1, 1, 1]]) + assert_array_equal(tri(3), out) + assert_array_equal(tri(3, dtype=bool), out.astype(bool)) + + +def test_tril_triu_ndim2(): + for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']: + a = np.ones((2, 2), dtype=dtype) + b = np.tril(a) + c = np.triu(a) + assert_array_equal(b, [[1, 0], [1, 1]]) + assert_array_equal(c, b.T) + # should return the same dtype as the original array + assert_equal(b.dtype, a.dtype) + assert_equal(c.dtype, a.dtype) + + +def test_tril_triu_ndim3(): + for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']: + a = np.array([ + [[1, 1], [1, 1]], + [[1, 1], [1, 0]], + [[1, 1], [0, 0]], + ], dtype=dtype) + a_tril_desired = np.array([ + [[1, 0], [1, 1]], + [[1, 0], [1, 0]], + [[1, 0], [0, 0]], + ], dtype=dtype) + a_triu_desired = np.array([ + [[1, 1], [0, 1]], + [[1, 1], [0, 0]], + [[1, 1], [0, 0]], + ], dtype=dtype) + a_triu_observed = np.triu(a) + a_tril_observed = np.tril(a) + assert_array_equal(a_triu_observed, a_triu_desired) + assert_array_equal(a_tril_observed, a_tril_desired) + assert_equal(a_triu_observed.dtype, a.dtype) + assert_equal(a_tril_observed.dtype, a.dtype) + + +def test_tril_triu_with_inf(): + # Issue 4859 + arr = np.array([[1, 1, np.inf], + [1, 1, 1], + [np.inf, 1, 1]]) + out_tril = np.array([[1, 0, 0], + [1, 1, 0], + [np.inf, 1, 1]]) + out_triu = out_tril.T + assert_array_equal(np.triu(arr), out_triu) + assert_array_equal(np.tril(arr), out_tril) + + +def test_tril_triu_dtype(): + # Issue 4916 + # tril and triu should return the same dtype as input + for c in np.typecodes['All']: + if c == 'V': + continue + arr = np.zeros((3, 3), dtype=c) + assert_equal(np.triu(arr).dtype, arr.dtype) + assert_equal(np.tril(arr).dtype, arr.dtype) + + # check special cases + arr = np.array([['2001-01-01T12:00', '2002-02-03T13:56'], + ['2004-01-01T12:00', '2003-01-03T13:45']], + dtype='datetime64') + assert_equal(np.triu(arr).dtype, arr.dtype) + assert_equal(np.tril(arr).dtype, arr.dtype) + + arr = np.zeros((3, 3), dtype='f4,f4') + assert_equal(np.triu(arr).dtype, arr.dtype) + assert_equal(np.tril(arr).dtype, arr.dtype) + + +def test_mask_indices(): + # simple test without offset + iu = mask_indices(3, np.triu) + a = np.arange(9).reshape(3, 3) + assert_array_equal(a[iu], array([0, 1, 2, 4, 5, 8])) + # Now with an offset + iu1 = mask_indices(3, np.triu, 1) + assert_array_equal(a[iu1], array([1, 2, 5])) + + +def test_tril_indices(): + # indices without and with offset + il1 = tril_indices(4) + il2 = tril_indices(4, k=2) + il3 = tril_indices(4, m=5) + il4 = tril_indices(4, k=2, m=5) + + a = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]) + b = np.arange(1, 21).reshape(4, 5) + + # indexing: + assert_array_equal(a[il1], + array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16])) + assert_array_equal(b[il3], + array([1, 6, 7, 11, 12, 13, 16, 17, 18, 19])) + + # And for assigning values: + a[il1] = -1 + assert_array_equal(a, + array([[-1, 2, 3, 4], + [-1, -1, 7, 8], + [-1, -1, -1, 12], + [-1, -1, -1, -1]])) + b[il3] = -1 + assert_array_equal(b, + array([[-1, 2, 3, 4, 5], + [-1, -1, 8, 9, 10], + [-1, -1, -1, 14, 15], + [-1, -1, -1, -1, 20]])) + # These cover almost the whole array (two diagonals right of the main one): + a[il2] = -10 + assert_array_equal(a, + array([[-10, -10, -10, 4], + [-10, -10, -10, -10], + [-10, -10, -10, -10], + [-10, -10, -10, -10]])) + b[il4] = -10 + assert_array_equal(b, + array([[-10, -10, -10, 4, 5], + [-10, -10, -10, -10, 10], + [-10, -10, -10, -10, -10], + [-10, -10, -10, -10, -10]])) + + +class TestTriuIndices: + def test_triu_indices(self): + iu1 = triu_indices(4) + iu2 = triu_indices(4, k=2) + iu3 = triu_indices(4, m=5) + iu4 = triu_indices(4, k=2, m=5) + + a = np.array([[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16]]) + b = np.arange(1, 21).reshape(4, 5) + + # Both for indexing: + assert_array_equal(a[iu1], + array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16])) + assert_array_equal(b[iu3], + array([1, 2, 3, 4, 5, 7, 8, 9, + 10, 13, 14, 15, 19, 20])) + + # And for assigning values: + a[iu1] = -1 + assert_array_equal(a, + array([[-1, -1, -1, -1], + [5, -1, -1, -1], + [9, 10, -1, -1], + [13, 14, 15, -1]])) + b[iu3] = -1 + assert_array_equal(b, + array([[-1, -1, -1, -1, -1], + [6, -1, -1, -1, -1], + [11, 12, -1, -1, -1], + [16, 17, 18, -1, -1]])) + + # These cover almost the whole array (two diagonals right of the + # main one): + a[iu2] = -10 + assert_array_equal(a, + array([[-1, -1, -10, -10], + [5, -1, -1, -10], + [9, 10, -1, -1], + [13, 14, 15, -1]])) + b[iu4] = -10 + assert_array_equal(b, + array([[-1, -1, -10, -10, -10], + [6, -1, -1, -10, -10], + [11, 12, -1, -1, -10], + [16, 17, 18, -1, -1]])) + + +class TestTrilIndicesFrom: + def test_exceptions(self): + assert_raises(ValueError, tril_indices_from, np.ones((2,))) + assert_raises(ValueError, tril_indices_from, np.ones((2, 2, 2))) + # assert_raises(ValueError, tril_indices_from, np.ones((2, 3))) + + +class TestTriuIndicesFrom: + def test_exceptions(self): + assert_raises(ValueError, triu_indices_from, np.ones((2,))) + assert_raises(ValueError, triu_indices_from, np.ones((2, 2, 2))) + # assert_raises(ValueError, triu_indices_from, np.ones((2, 3))) + + +class TestVander: + def test_basic(self): + c = np.array([0, 1, -2, 3]) + v = vander(c) + powers = np.array([[0, 0, 0, 0, 1], + [1, 1, 1, 1, 1], + [16, -8, 4, -2, 1], + [81, 27, 9, 3, 1]]) + # Check default value of N: + assert_array_equal(v, powers[:, 1:]) + # Check a range of N values, including 0 and 5 (greater than default) + m = powers.shape[1] + for n in range(6): + v = vander(c, N=n) + assert_array_equal(v, powers[:, m - n:m]) + + def test_dtypes(self): + c = array([11, -12, 13], dtype=np.int8) + v = vander(c) + expected = np.array([[121, 11, 1], + [144, -12, 1], + [169, 13, 1]]) + assert_array_equal(v, expected) + + c = array([1.0 + 1j, 1.0 - 1j]) + v = vander(c, N=3) + expected = np.array([[2j, 1 + 1j, 1], + [-2j, 1 - 1j, 1]]) + # The data is floating point, but the values are small integers, + # so assert_array_equal *should* be safe here (rather than, say, + # assert_array_almost_equal). + assert_array_equal(v, expected) diff --git a/python/numpy/lib/tests/test_type_check.py b/python/numpy/lib/tests/test_type_check.py new file mode 100644 index 000000000..447c2c36c --- /dev/null +++ b/python/numpy/lib/tests/test_type_check.py @@ -0,0 +1,473 @@ +import numpy as np +from numpy import ( + common_type, + iscomplex, + iscomplexobj, + isneginf, + isposinf, + isreal, + isrealobj, + mintypecode, + nan_to_num, + real_if_close, +) +from numpy.testing import assert_, assert_array_equal, assert_equal + + +def assert_all(x): + assert_(np.all(x), x) + + +class TestCommonType: + def test_basic(self): + ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32) + af16 = np.array([[1, 2], [3, 4]], dtype=np.float16) + af32 = np.array([[1, 2], [3, 4]], dtype=np.float32) + af64 = np.array([[1, 2], [3, 4]], dtype=np.float64) + acs = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.complex64) + acd = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.complex128) + assert_(common_type(ai32) == np.float64) + assert_(common_type(af16) == np.float16) + assert_(common_type(af32) == np.float32) + assert_(common_type(af64) == np.float64) + assert_(common_type(acs) == np.complex64) + assert_(common_type(acd) == np.complex128) + + +class TestMintypecode: + + def test_default_1(self): + for itype in '1bcsuwil': + assert_equal(mintypecode(itype), 'd') + assert_equal(mintypecode('f'), 'f') + assert_equal(mintypecode('d'), 'd') + assert_equal(mintypecode('F'), 'F') + assert_equal(mintypecode('D'), 'D') + + def test_default_2(self): + for itype in '1bcsuwil': + assert_equal(mintypecode(itype + 'f'), 'f') + assert_equal(mintypecode(itype + 'd'), 'd') + assert_equal(mintypecode(itype + 'F'), 'F') + assert_equal(mintypecode(itype + 'D'), 'D') + assert_equal(mintypecode('ff'), 'f') + assert_equal(mintypecode('fd'), 'd') + assert_equal(mintypecode('fF'), 'F') + assert_equal(mintypecode('fD'), 'D') + assert_equal(mintypecode('df'), 'd') + assert_equal(mintypecode('dd'), 'd') + #assert_equal(mintypecode('dF',savespace=1),'F') + assert_equal(mintypecode('dF'), 'D') + assert_equal(mintypecode('dD'), 'D') + assert_equal(mintypecode('Ff'), 'F') + #assert_equal(mintypecode('Fd',savespace=1),'F') + assert_equal(mintypecode('Fd'), 'D') + assert_equal(mintypecode('FF'), 'F') + assert_equal(mintypecode('FD'), 'D') + assert_equal(mintypecode('Df'), 'D') + assert_equal(mintypecode('Dd'), 'D') + assert_equal(mintypecode('DF'), 'D') + assert_equal(mintypecode('DD'), 'D') + + def test_default_3(self): + assert_equal(mintypecode('fdF'), 'D') + #assert_equal(mintypecode('fdF',savespace=1),'F') + assert_equal(mintypecode('fdD'), 'D') + assert_equal(mintypecode('fFD'), 'D') + assert_equal(mintypecode('dFD'), 'D') + + assert_equal(mintypecode('ifd'), 'd') + assert_equal(mintypecode('ifF'), 'F') + assert_equal(mintypecode('ifD'), 'D') + assert_equal(mintypecode('idF'), 'D') + #assert_equal(mintypecode('idF',savespace=1),'F') + assert_equal(mintypecode('idD'), 'D') + + +class TestIsscalar: + + def test_basic(self): + assert_(np.isscalar(3)) + assert_(not np.isscalar([3])) + assert_(not np.isscalar((3,))) + assert_(np.isscalar(3j)) + assert_(np.isscalar(4.0)) + + +class TestReal: + + def test_real(self): + y = np.random.rand(10,) + assert_array_equal(y, np.real(y)) + + y = np.array(1) + out = np.real(y) + assert_array_equal(y, out) + assert_(isinstance(out, np.ndarray)) + + y = 1 + out = np.real(y) + assert_equal(y, out) + assert_(not isinstance(out, np.ndarray)) + + def test_cmplx(self): + y = np.random.rand(10,) + 1j * np.random.rand(10,) + assert_array_equal(y.real, np.real(y)) + + y = np.array(1 + 1j) + out = np.real(y) + assert_array_equal(y.real, out) + assert_(isinstance(out, np.ndarray)) + + y = 1 + 1j + out = np.real(y) + assert_equal(1.0, out) + assert_(not isinstance(out, np.ndarray)) + + +class TestImag: + + def test_real(self): + y = np.random.rand(10,) + assert_array_equal(0, np.imag(y)) + + y = np.array(1) + out = np.imag(y) + assert_array_equal(0, out) + assert_(isinstance(out, np.ndarray)) + + y = 1 + out = np.imag(y) + assert_equal(0, out) + assert_(not isinstance(out, np.ndarray)) + + def test_cmplx(self): + y = np.random.rand(10,) + 1j * np.random.rand(10,) + assert_array_equal(y.imag, np.imag(y)) + + y = np.array(1 + 1j) + out = np.imag(y) + assert_array_equal(y.imag, out) + assert_(isinstance(out, np.ndarray)) + + y = 1 + 1j + out = np.imag(y) + assert_equal(1.0, out) + assert_(not isinstance(out, np.ndarray)) + + +class TestIscomplex: + + def test_fail(self): + z = np.array([-1, 0, 1]) + res = iscomplex(z) + assert_(not np.any(res, axis=0)) + + def test_pass(self): + z = np.array([-1j, 1, 0]) + res = iscomplex(z) + assert_array_equal(res, [1, 0, 0]) + + +class TestIsreal: + + def test_pass(self): + z = np.array([-1, 0, 1j]) + res = isreal(z) + assert_array_equal(res, [1, 1, 0]) + + def test_fail(self): + z = np.array([-1j, 1, 0]) + res = isreal(z) + assert_array_equal(res, [0, 1, 1]) + + +class TestIscomplexobj: + + def test_basic(self): + z = np.array([-1, 0, 1]) + assert_(not iscomplexobj(z)) + z = np.array([-1j, 0, -1]) + assert_(iscomplexobj(z)) + + def test_scalar(self): + assert_(not iscomplexobj(1.0)) + assert_(iscomplexobj(1 + 0j)) + + def test_list(self): + assert_(iscomplexobj([3, 1 + 0j, True])) + assert_(not iscomplexobj([3, 1, True])) + + def test_duck(self): + class DummyComplexArray: + @property + def dtype(self): + return np.dtype(complex) + dummy = DummyComplexArray() + assert_(iscomplexobj(dummy)) + + def test_pandas_duck(self): + # This tests a custom np.dtype duck-typed class, such as used by pandas + # (pandas.core.dtypes) + class PdComplex(np.complex128): + pass + + class PdDtype: + name = 'category' + names = None + type = PdComplex + kind = 'c' + str = ' 1e10) and assert_all(np.isfinite(vals[2])) + assert_equal(type(vals), np.ndarray) + + # perform the same tests but with nan, posinf and neginf keywords + with np.errstate(divide='ignore', invalid='ignore'): + vals = nan_to_num(np.array((-1., 0, 1)) / 0., + nan=10, posinf=20, neginf=30) + assert_equal(vals, [30, 10, 20]) + assert_all(np.isfinite(vals[[0, 2]])) + assert_equal(type(vals), np.ndarray) + + # perform the same test but in-place + with np.errstate(divide='ignore', invalid='ignore'): + vals = np.array((-1., 0, 1)) / 0. + result = nan_to_num(vals, copy=False) + + assert_(result is vals) + assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0])) + assert_(vals[1] == 0) + assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2])) + assert_equal(type(vals), np.ndarray) + + # perform the same test but in-place + with np.errstate(divide='ignore', invalid='ignore'): + vals = np.array((-1., 0, 1)) / 0. + result = nan_to_num(vals, copy=False, nan=10, posinf=20, neginf=30) + + assert_(result is vals) + assert_equal(vals, [30, 10, 20]) + assert_all(np.isfinite(vals[[0, 2]])) + assert_equal(type(vals), np.ndarray) + + def test_array(self): + vals = nan_to_num([1]) + assert_array_equal(vals, np.array([1], int)) + assert_equal(type(vals), np.ndarray) + vals = nan_to_num([1], nan=10, posinf=20, neginf=30) + assert_array_equal(vals, np.array([1], int)) + assert_equal(type(vals), np.ndarray) + + def test_integer(self): + vals = nan_to_num(1) + assert_all(vals == 1) + assert_equal(type(vals), np.int_) + vals = nan_to_num(1, nan=10, posinf=20, neginf=30) + assert_all(vals == 1) + assert_equal(type(vals), np.int_) + + def test_float(self): + vals = nan_to_num(1.0) + assert_all(vals == 1.0) + assert_equal(type(vals), np.float64) + vals = nan_to_num(1.1, nan=10, posinf=20, neginf=30) + assert_all(vals == 1.1) + assert_equal(type(vals), np.float64) + + def test_complex_good(self): + vals = nan_to_num(1 + 1j) + assert_all(vals == 1 + 1j) + assert_equal(type(vals), np.complex128) + vals = nan_to_num(1 + 1j, nan=10, posinf=20, neginf=30) + assert_all(vals == 1 + 1j) + assert_equal(type(vals), np.complex128) + + def test_complex_bad(self): + with np.errstate(divide='ignore', invalid='ignore'): + v = 1 + 1j + v += np.array(0 + 1.j) / 0. + vals = nan_to_num(v) + # !! This is actually (unexpectedly) zero + assert_all(np.isfinite(vals)) + assert_equal(type(vals), np.complex128) + + def test_complex_bad2(self): + with np.errstate(divide='ignore', invalid='ignore'): + v = 1 + 1j + v += np.array(-1 + 1.j) / 0. + vals = nan_to_num(v) + assert_all(np.isfinite(vals)) + assert_equal(type(vals), np.complex128) + # Fixme + #assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals)) + # !! This is actually (unexpectedly) positive + # !! inf. Comment out for now, and see if it + # !! changes + #assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals)) + + def test_do_not_rewrite_previous_keyword(self): + # This is done to test that when, for instance, nan=np.inf then these + # values are not rewritten by posinf keyword to the posinf value. + with np.errstate(divide='ignore', invalid='ignore'): + vals = nan_to_num(np.array((-1., 0, 1)) / 0., nan=np.inf, posinf=999) + assert_all(np.isfinite(vals[[0, 2]])) + assert_all(vals[0] < -1e10) + assert_equal(vals[[1, 2]], [np.inf, 999]) + assert_equal(type(vals), np.ndarray) + + +class TestRealIfClose: + + def test_basic(self): + a = np.random.rand(10) + b = real_if_close(a + 1e-15j) + assert_all(isrealobj(b)) + assert_array_equal(a, b) + b = real_if_close(a + 1e-7j) + assert_all(iscomplexobj(b)) + b = real_if_close(a + 1e-7j, tol=1e-6) + assert_all(isrealobj(b)) diff --git a/python/numpy/lib/tests/test_ufunclike.py b/python/numpy/lib/tests/test_ufunclike.py new file mode 100644 index 000000000..b4257ebf9 --- /dev/null +++ b/python/numpy/lib/tests/test_ufunclike.py @@ -0,0 +1,97 @@ +import numpy as np +from numpy import fix, isneginf, isposinf +from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises + + +class TestUfunclike: + + def test_isposinf(self): + a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0]) + out = np.zeros(a.shape, bool) + tgt = np.array([True, False, False, False, False, False]) + + res = isposinf(a) + assert_equal(res, tgt) + res = isposinf(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + + a = a.astype(np.complex128) + with assert_raises(TypeError): + isposinf(a) + + def test_isneginf(self): + a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0]) + out = np.zeros(a.shape, bool) + tgt = np.array([False, True, False, False, False, False]) + + res = isneginf(a) + assert_equal(res, tgt) + res = isneginf(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + + a = a.astype(np.complex128) + with assert_raises(TypeError): + isneginf(a) + + def test_fix(self): + a = np.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]]) + out = np.zeros(a.shape, float) + tgt = np.array([[1., 1., 1., 1.], [-1., -1., -1., -1.]]) + + res = fix(a) + assert_equal(res, tgt) + res = fix(a, out) + assert_equal(res, tgt) + assert_equal(out, tgt) + assert_equal(fix(3.14), 3) + + def test_fix_with_subclass(self): + class MyArray(np.ndarray): + def __new__(cls, data, metadata=None): + res = np.array(data, copy=True).view(cls) + res.metadata = metadata + return res + + def __array_wrap__(self, obj, context=None, return_scalar=False): + if not isinstance(obj, MyArray): + obj = obj.view(MyArray) + if obj.metadata is None: + obj.metadata = self.metadata + return obj + + def __array_finalize__(self, obj): + self.metadata = getattr(obj, 'metadata', None) + return self + + a = np.array([1.1, -1.1]) + m = MyArray(a, metadata='foo') + f = fix(m) + assert_array_equal(f, np.array([1, -1])) + assert_(isinstance(f, MyArray)) + assert_equal(f.metadata, 'foo') + + # check 0d arrays don't decay to scalars + m0d = m[0, ...] + m0d.metadata = 'bar' + f0d = fix(m0d) + assert_(isinstance(f0d, MyArray)) + assert_equal(f0d.metadata, 'bar') + + def test_scalar(self): + x = np.inf + actual = np.isposinf(x) + expected = np.True_ + assert_equal(actual, expected) + assert_equal(type(actual), type(expected)) + + x = -3.4 + actual = np.fix(x) + expected = np.float64(-3.0) + assert_equal(actual, expected) + assert_equal(type(actual), type(expected)) + + out = np.array(0.0) + actual = np.fix(x, out=out) + assert_(actual is out) diff --git a/python/numpy/lib/tests/test_utils.py b/python/numpy/lib/tests/test_utils.py new file mode 100644 index 000000000..0106ee0d8 --- /dev/null +++ b/python/numpy/lib/tests/test_utils.py @@ -0,0 +1,80 @@ +from io import StringIO + +import pytest + +import numpy as np +import numpy.lib._utils_impl as _utils_impl +from numpy.testing import assert_raises_regex + + +def test_assert_raises_regex_context_manager(): + with assert_raises_regex(ValueError, 'no deprecation warning'): + raise ValueError('no deprecation warning') + + +def test_info_method_heading(): + # info(class) should only print "Methods:" heading if methods exist + + class NoPublicMethods: + pass + + class WithPublicMethods: + def first_method(): + pass + + def _has_method_heading(cls): + out = StringIO() + np.info(cls, output=out) + return 'Methods:' in out.getvalue() + + assert _has_method_heading(WithPublicMethods) + assert not _has_method_heading(NoPublicMethods) + + +def test_drop_metadata(): + def _compare_dtypes(dt1, dt2): + return np.can_cast(dt1, dt2, casting='no') + + # structured dtype + dt = np.dtype([('l1', [('l2', np.dtype('S8', metadata={'msg': 'toto'}))])], + metadata={'msg': 'titi'}) + dt_m = _utils_impl.drop_metadata(dt) + assert _compare_dtypes(dt, dt_m) is True + assert dt_m.metadata is None + assert dt_m['l1'].metadata is None + assert dt_m['l1']['l2'].metadata is None + + # alignment + dt = np.dtype([('x', '>> from numpy import linalg as LA + >>> LA.inv(np.zeros((2,2))) + Traceback (most recent call last): + File "", line 1, in + File "...linalg.py", line 350, + in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype))) + File "...linalg.py", line 249, + in solve + raise LinAlgError('Singular matrix') + numpy.linalg.LinAlgError: Singular matrix + + """ + + +def _raise_linalgerror_singular(err, flag): + raise LinAlgError("Singular matrix") + +def _raise_linalgerror_nonposdef(err, flag): + raise LinAlgError("Matrix is not positive definite") + +def _raise_linalgerror_eigenvalues_nonconvergence(err, flag): + raise LinAlgError("Eigenvalues did not converge") + +def _raise_linalgerror_svd_nonconvergence(err, flag): + raise LinAlgError("SVD did not converge") + +def _raise_linalgerror_lstsq(err, flag): + raise LinAlgError("SVD did not converge in Linear Least Squares") + +def _raise_linalgerror_qr(err, flag): + raise LinAlgError("Incorrect argument found while performing " + "QR factorization") + + +def _makearray(a): + new = asarray(a) + wrap = getattr(a, "__array_wrap__", new.__array_wrap__) + return new, wrap + +def isComplexType(t): + return issubclass(t, complexfloating) + + +_real_types_map = {single: single, + double: double, + csingle: single, + cdouble: double} + +_complex_types_map = {single: csingle, + double: cdouble, + csingle: csingle, + cdouble: cdouble} + +def _realType(t, default=double): + return _real_types_map.get(t, default) + +def _complexType(t, default=cdouble): + return _complex_types_map.get(t, default) + +def _commonType(*arrays): + # in lite version, use higher precision (always double or cdouble) + result_type = single + is_complex = False + for a in arrays: + type_ = a.dtype.type + if issubclass(type_, inexact): + if isComplexType(type_): + is_complex = True + rt = _realType(type_, default=None) + if rt is double: + result_type = double + elif rt is None: + # unsupported inexact scalar + raise TypeError(f"array type {a.dtype.name} is unsupported in linalg") + else: + result_type = double + if is_complex: + result_type = _complex_types_map[result_type] + return cdouble, result_type + else: + return double, result_type + + +def _to_native_byte_order(*arrays): + ret = [] + for arr in arrays: + if arr.dtype.byteorder not in ('=', '|'): + ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('='))) + else: + ret.append(arr) + if len(ret) == 1: + return ret[0] + else: + return ret + + +def _assert_2d(*arrays): + for a in arrays: + if a.ndim != 2: + raise LinAlgError('%d-dimensional array given. Array must be ' + 'two-dimensional' % a.ndim) + +def _assert_stacked_2d(*arrays): + for a in arrays: + if a.ndim < 2: + raise LinAlgError('%d-dimensional array given. Array must be ' + 'at least two-dimensional' % a.ndim) + +def _assert_stacked_square(*arrays): + for a in arrays: + try: + m, n = a.shape[-2:] + except ValueError: + raise LinAlgError('%d-dimensional array given. Array must be ' + 'at least two-dimensional' % a.ndim) + if m != n: + raise LinAlgError('Last 2 dimensions of the array must be square') + +def _assert_finite(*arrays): + for a in arrays: + if not isfinite(a).all(): + raise LinAlgError("Array must not contain infs or NaNs") + +def _is_empty_2d(arr): + # check size first for efficiency + return arr.size == 0 and prod(arr.shape[-2:]) == 0 + + +def transpose(a): + """ + Transpose each matrix in a stack of matrices. + + Unlike np.transpose, this only swaps the last two axes, rather than all of + them + + Parameters + ---------- + a : (...,M,N) array_like + + Returns + ------- + aT : (...,N,M) ndarray + """ + return swapaxes(a, -1, -2) + +# Linear equations + +def _tensorsolve_dispatcher(a, b, axes=None): + return (a, b) + + +@array_function_dispatch(_tensorsolve_dispatcher) +def tensorsolve(a, b, axes=None): + """ + Solve the tensor equation ``a x = b`` for x. + + It is assumed that all indices of `x` are summed over in the product, + together with the rightmost indices of `a`, as is done in, for example, + ``tensordot(a, x, axes=x.ndim)``. + + Parameters + ---------- + a : array_like + Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals + the shape of that sub-tensor of `a` consisting of the appropriate + number of its rightmost indices, and must be such that + ``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be + 'square'). + b : array_like + Right-hand tensor, which can be of any shape. + axes : tuple of ints, optional + Axes in `a` to reorder to the right, before inversion. + If None (default), no reordering is done. + + Returns + ------- + x : ndarray, shape Q + + Raises + ------ + LinAlgError + If `a` is singular or not 'square' (in the above sense). + + See Also + -------- + numpy.tensordot, tensorinv, numpy.einsum + + Examples + -------- + >>> import numpy as np + >>> a = np.eye(2*3*4) + >>> a.shape = (2*3, 4, 2, 3, 4) + >>> rng = np.random.default_rng() + >>> b = rng.normal(size=(2*3, 4)) + >>> x = np.linalg.tensorsolve(a, b) + >>> x.shape + (2, 3, 4) + >>> np.allclose(np.tensordot(a, x, axes=3), b) + True + + """ + a, wrap = _makearray(a) + b = asarray(b) + an = a.ndim + + if axes is not None: + allaxes = list(range(an)) + for k in axes: + allaxes.remove(k) + allaxes.insert(an, k) + a = a.transpose(allaxes) + + oldshape = a.shape[-(an - b.ndim):] + prod = 1 + for k in oldshape: + prod *= k + + if a.size != prod ** 2: + raise LinAlgError( + "Input arrays must satisfy the requirement \ + prod(a.shape[b.ndim:]) == prod(a.shape[:b.ndim])" + ) + + a = a.reshape(prod, prod) + b = b.ravel() + res = wrap(solve(a, b)) + res.shape = oldshape + return res + + +def _solve_dispatcher(a, b): + return (a, b) + + +@array_function_dispatch(_solve_dispatcher) +def solve(a, b): + """ + Solve a linear matrix equation, or system of linear scalar equations. + + Computes the "exact" solution, `x`, of the well-determined, i.e., full + rank, linear matrix equation `ax = b`. + + Parameters + ---------- + a : (..., M, M) array_like + Coefficient matrix. + b : {(M,), (..., M, K)}, array_like + Ordinate or "dependent variable" values. + + Returns + ------- + x : {(..., M,), (..., M, K)} ndarray + Solution to the system a x = b. Returned shape is (..., M) if b is + shape (M,) and (..., M, K) if b is (..., M, K), where the "..." part is + broadcasted between a and b. + + Raises + ------ + LinAlgError + If `a` is singular or not square. + + See Also + -------- + scipy.linalg.solve : Similar function in SciPy. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The solutions are computed using LAPACK routine ``_gesv``. + + `a` must be square and of full-rank, i.e., all rows (or, equivalently, + columns) must be linearly independent; if either is not true, use + `lstsq` for the least-squares best "solution" of the + system/equation. + + .. versionchanged:: 2.0 + + The b array is only treated as a shape (M,) column vector if it is + exactly 1-dimensional. In all other instances it is treated as a stack + of (M, K) matrices. Previously b would be treated as a stack of (M,) + vectors if b.ndim was equal to a.ndim - 1. + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, + FL, Academic Press, Inc., 1980, pg. 22. + + Examples + -------- + Solve the system of equations: + ``x0 + 2 * x1 = 1`` and + ``3 * x0 + 5 * x1 = 2``: + + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 5]]) + >>> b = np.array([1, 2]) + >>> x = np.linalg.solve(a, b) + >>> x + array([-1., 1.]) + + Check that the solution is correct: + + >>> np.allclose(np.dot(a, x), b) + True + + """ + a, _ = _makearray(a) + _assert_stacked_square(a) + b, wrap = _makearray(b) + t, result_t = _commonType(a, b) + + # We use the b = (..., M,) logic, only if the number of extra dimensions + # match exactly + if b.ndim == 1: + gufunc = _umath_linalg.solve1 + else: + gufunc = _umath_linalg.solve + + signature = 'DD->D' if isComplexType(t) else 'dd->d' + with errstate(call=_raise_linalgerror_singular, invalid='call', + over='ignore', divide='ignore', under='ignore'): + r = gufunc(a, b, signature=signature) + + return wrap(r.astype(result_t, copy=False)) + + +def _tensorinv_dispatcher(a, ind=None): + return (a,) + + +@array_function_dispatch(_tensorinv_dispatcher) +def tensorinv(a, ind=2): + """ + Compute the 'inverse' of an N-dimensional array. + + The result is an inverse for `a` relative to the tensordot operation + ``tensordot(a, b, ind)``, i. e., up to floating-point accuracy, + ``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the + tensordot operation. + + Parameters + ---------- + a : array_like + Tensor to 'invert'. Its shape must be 'square', i. e., + ``prod(a.shape[:ind]) == prod(a.shape[ind:])``. + ind : int, optional + Number of first indices that are involved in the inverse sum. + Must be a positive integer, default is 2. + + Returns + ------- + b : ndarray + `a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``. + + Raises + ------ + LinAlgError + If `a` is singular or not 'square' (in the above sense). + + See Also + -------- + numpy.tensordot, tensorsolve + + Examples + -------- + >>> import numpy as np + >>> a = np.eye(4*6) + >>> a.shape = (4, 6, 8, 3) + >>> ainv = np.linalg.tensorinv(a, ind=2) + >>> ainv.shape + (8, 3, 4, 6) + >>> rng = np.random.default_rng() + >>> b = rng.normal(size=(4, 6)) + >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b)) + True + + >>> a = np.eye(4*6) + >>> a.shape = (24, 8, 3) + >>> ainv = np.linalg.tensorinv(a, ind=1) + >>> ainv.shape + (8, 3, 24) + >>> rng = np.random.default_rng() + >>> b = rng.normal(size=24) + >>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) + True + + """ + a = asarray(a) + oldshape = a.shape + prod = 1 + if ind > 0: + invshape = oldshape[ind:] + oldshape[:ind] + for k in oldshape[ind:]: + prod *= k + else: + raise ValueError("Invalid ind argument.") + a = a.reshape(prod, -1) + ia = inv(a) + return ia.reshape(*invshape) + + +# Matrix inversion + +def _unary_dispatcher(a): + return (a,) + + +@array_function_dispatch(_unary_dispatcher) +def inv(a): + """ + Compute the inverse of a matrix. + + Given a square matrix `a`, return the matrix `ainv` satisfying + ``a @ ainv = ainv @ a = eye(a.shape[0])``. + + Parameters + ---------- + a : (..., M, M) array_like + Matrix to be inverted. + + Returns + ------- + ainv : (..., M, M) ndarray or matrix + Inverse of the matrix `a`. + + Raises + ------ + LinAlgError + If `a` is not square or inversion fails. + + See Also + -------- + scipy.linalg.inv : Similar function in SciPy. + numpy.linalg.cond : Compute the condition number of a matrix. + numpy.linalg.svd : Compute the singular value decomposition of a matrix. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + If `a` is detected to be singular, a `LinAlgError` is raised. If `a` is + ill-conditioned, a `LinAlgError` may or may not be raised, and results may + be inaccurate due to floating-point errors. + + References + ---------- + .. [1] Wikipedia, "Condition number", + https://en.wikipedia.org/wiki/Condition_number + + Examples + -------- + >>> import numpy as np + >>> from numpy.linalg import inv + >>> a = np.array([[1., 2.], [3., 4.]]) + >>> ainv = inv(a) + >>> np.allclose(a @ ainv, np.eye(2)) + True + >>> np.allclose(ainv @ a, np.eye(2)) + True + + If a is a matrix object, then the return value is a matrix as well: + + >>> ainv = inv(np.matrix(a)) + >>> ainv + matrix([[-2. , 1. ], + [ 1.5, -0.5]]) + + Inverses of several matrices can be computed at once: + + >>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]]) + >>> inv(a) + array([[[-2. , 1. ], + [ 1.5 , -0.5 ]], + [[-1.25, 0.75], + [ 0.75, -0.25]]]) + + If a matrix is close to singular, the computed inverse may not satisfy + ``a @ ainv = ainv @ a = eye(a.shape[0])`` even if a `LinAlgError` + is not raised: + + >>> a = np.array([[2,4,6],[2,0,2],[6,8,14]]) + >>> inv(a) # No errors raised + array([[-1.12589991e+15, -5.62949953e+14, 5.62949953e+14], + [-1.12589991e+15, -5.62949953e+14, 5.62949953e+14], + [ 1.12589991e+15, 5.62949953e+14, -5.62949953e+14]]) + >>> a @ inv(a) + array([[ 0. , -0.5 , 0. ], # may vary + [-0.5 , 0.625, 0.25 ], + [ 0. , 0. , 1. ]]) + + To detect ill-conditioned matrices, you can use `numpy.linalg.cond` to + compute its *condition number* [1]_. The larger the condition number, the + more ill-conditioned the matrix is. As a rule of thumb, if the condition + number ``cond(a) = 10**k``, then you may lose up to ``k`` digits of + accuracy on top of what would be lost to the numerical method due to loss + of precision from arithmetic methods. + + >>> from numpy.linalg import cond + >>> cond(a) + np.float64(8.659885634118668e+17) # may vary + + It is also possible to detect ill-conditioning by inspecting the matrix's + singular values directly. The ratio between the largest and the smallest + singular value is the condition number: + + >>> from numpy.linalg import svd + >>> sigma = svd(a, compute_uv=False) # Do not compute singular vectors + >>> sigma.max()/sigma.min() + 8.659885634118668e+17 # may vary + + """ + a, wrap = _makearray(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + + signature = 'D->D' if isComplexType(t) else 'd->d' + with errstate(call=_raise_linalgerror_singular, invalid='call', + over='ignore', divide='ignore', under='ignore'): + ainv = _umath_linalg.inv(a, signature=signature) + return wrap(ainv.astype(result_t, copy=False)) + + +def _matrix_power_dispatcher(a, n): + return (a,) + + +@array_function_dispatch(_matrix_power_dispatcher) +def matrix_power(a, n): + """ + Raise a square matrix to the (integer) power `n`. + + For positive integers `n`, the power is computed by repeated matrix + squarings and matrix multiplications. If ``n == 0``, the identity matrix + of the same shape as M is returned. If ``n < 0``, the inverse + is computed and then raised to the ``abs(n)``. + + .. note:: Stacks of object matrices are not currently supported. + + Parameters + ---------- + a : (..., M, M) array_like + Matrix to be "powered". + n : int + The exponent can be any integer or long integer, positive, + negative, or zero. + + Returns + ------- + a**n : (..., M, M) ndarray or matrix object + The return value is the same shape and type as `M`; + if the exponent is positive or zero then the type of the + elements is the same as those of `M`. If the exponent is + negative the elements are floating-point. + + Raises + ------ + LinAlgError + For matrices that are not square or that (for negative powers) cannot + be inverted numerically. + + Examples + -------- + >>> import numpy as np + >>> from numpy.linalg import matrix_power + >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit + >>> matrix_power(i, 3) # should = -i + array([[ 0, -1], + [ 1, 0]]) + >>> matrix_power(i, 0) + array([[1, 0], + [0, 1]]) + >>> matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements + array([[ 0., 1.], + [-1., 0.]]) + + Somewhat more sophisticated example + + >>> q = np.zeros((4, 4)) + >>> q[0:2, 0:2] = -i + >>> q[2:4, 2:4] = i + >>> q # one of the three quaternion units not equal to 1 + array([[ 0., -1., 0., 0.], + [ 1., 0., 0., 0.], + [ 0., 0., 0., 1.], + [ 0., 0., -1., 0.]]) + >>> matrix_power(q, 2) # = -np.eye(4) + array([[-1., 0., 0., 0.], + [ 0., -1., 0., 0.], + [ 0., 0., -1., 0.], + [ 0., 0., 0., -1.]]) + + """ + a = asanyarray(a) + _assert_stacked_square(a) + + try: + n = operator.index(n) + except TypeError as e: + raise TypeError("exponent must be an integer") from e + + # Fall back on dot for object arrays. Object arrays are not supported by + # the current implementation of matmul using einsum + if a.dtype != object: + fmatmul = matmul + elif a.ndim == 2: + fmatmul = dot + else: + raise NotImplementedError( + "matrix_power not supported for stacks of object arrays") + + if n == 0: + a = empty_like(a) + a[...] = eye(a.shape[-2], dtype=a.dtype) + return a + + elif n < 0: + a = inv(a) + n = abs(n) + + # short-cuts. + if n == 1: + return a + + elif n == 2: + return fmatmul(a, a) + + elif n == 3: + return fmatmul(fmatmul(a, a), a) + + # Use binary decomposition to reduce the number of matrix multiplications. + # Here, we iterate over the bits of n, from LSB to MSB, raise `a` to + # increasing powers of 2, and multiply into the result as needed. + z = result = None + while n > 0: + z = a if z is None else fmatmul(z, z) + n, bit = divmod(n, 2) + if bit: + result = z if result is None else fmatmul(result, z) + + return result + + +# Cholesky decomposition + +def _cholesky_dispatcher(a, /, *, upper=None): + return (a,) + + +@array_function_dispatch(_cholesky_dispatcher) +def cholesky(a, /, *, upper=False): + """ + Cholesky decomposition. + + Return the lower or upper Cholesky decomposition, ``L * L.H`` or + ``U.H * U``, of the square matrix ``a``, where ``L`` is lower-triangular, + ``U`` is upper-triangular, and ``.H`` is the conjugate transpose operator + (which is the ordinary transpose if ``a`` is real-valued). ``a`` must be + Hermitian (symmetric if real-valued) and positive-definite. No checking is + performed to verify whether ``a`` is Hermitian or not. In addition, only + the lower or upper-triangular and diagonal elements of ``a`` are used. + Only ``L`` or ``U`` is actually returned. + + Parameters + ---------- + a : (..., M, M) array_like + Hermitian (symmetric if all elements are real), positive-definite + input matrix. + upper : bool + If ``True``, the result must be the upper-triangular Cholesky factor. + If ``False``, the result must be the lower-triangular Cholesky factor. + Default: ``False``. + + Returns + ------- + L : (..., M, M) array_like + Lower or upper-triangular Cholesky factor of `a`. Returns a matrix + object if `a` is a matrix object. + + Raises + ------ + LinAlgError + If the decomposition fails, for example, if `a` is not + positive-definite. + + See Also + -------- + scipy.linalg.cholesky : Similar function in SciPy. + scipy.linalg.cholesky_banded : Cholesky decompose a banded Hermitian + positive-definite matrix. + scipy.linalg.cho_factor : Cholesky decomposition of a matrix, to use in + `scipy.linalg.cho_solve`. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The Cholesky decomposition is often used as a fast way of solving + + .. math:: A \\mathbf{x} = \\mathbf{b} + + (when `A` is both Hermitian/symmetric and positive-definite). + + First, we solve for :math:`\\mathbf{y}` in + + .. math:: L \\mathbf{y} = \\mathbf{b}, + + and then for :math:`\\mathbf{x}` in + + .. math:: L^{H} \\mathbf{x} = \\mathbf{y}. + + Examples + -------- + >>> import numpy as np + >>> A = np.array([[1,-2j],[2j,5]]) + >>> A + array([[ 1.+0.j, -0.-2.j], + [ 0.+2.j, 5.+0.j]]) + >>> L = np.linalg.cholesky(A) + >>> L + array([[1.+0.j, 0.+0.j], + [0.+2.j, 1.+0.j]]) + >>> np.dot(L, L.T.conj()) # verify that L * L.H = A + array([[1.+0.j, 0.-2.j], + [0.+2.j, 5.+0.j]]) + >>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like? + >>> np.linalg.cholesky(A) # an ndarray object is returned + array([[1.+0.j, 0.+0.j], + [0.+2.j, 1.+0.j]]) + >>> # But a matrix object is returned if A is a matrix object + >>> np.linalg.cholesky(np.matrix(A)) + matrix([[ 1.+0.j, 0.+0.j], + [ 0.+2.j, 1.+0.j]]) + >>> # The upper-triangular Cholesky factor can also be obtained. + >>> np.linalg.cholesky(A, upper=True) + array([[1.-0.j, 0.-2.j], + [0.-0.j, 1.-0.j]]) + + """ + gufunc = _umath_linalg.cholesky_up if upper else _umath_linalg.cholesky_lo + a, wrap = _makearray(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + signature = 'D->D' if isComplexType(t) else 'd->d' + with errstate(call=_raise_linalgerror_nonposdef, invalid='call', + over='ignore', divide='ignore', under='ignore'): + r = gufunc(a, signature=signature) + return wrap(r.astype(result_t, copy=False)) + + +# outer product + + +def _outer_dispatcher(x1, x2): + return (x1, x2) + + +@array_function_dispatch(_outer_dispatcher) +def outer(x1, x2, /): + """ + Compute the outer product of two vectors. + + This function is Array API compatible. Compared to ``np.outer`` + it accepts 1-dimensional inputs only. + + Parameters + ---------- + x1 : (M,) array_like + One-dimensional input array of size ``N``. + Must have a numeric data type. + x2 : (N,) array_like + One-dimensional input array of size ``M``. + Must have a numeric data type. + + Returns + ------- + out : (M, N) ndarray + ``out[i, j] = a[i] * b[j]`` + + See also + -------- + outer + + Examples + -------- + Make a (*very* coarse) grid for computing a Mandelbrot set: + + >>> rl = np.linalg.outer(np.ones((5,)), np.linspace(-2, 2, 5)) + >>> rl + array([[-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.]]) + >>> im = np.linalg.outer(1j*np.linspace(2, -2, 5), np.ones((5,))) + >>> im + array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j], + [0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j], + [0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], + [0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j], + [0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]]) + >>> grid = rl + im + >>> grid + array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j], + [-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j], + [-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j], + [-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j], + [-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]]) + + An example using a "vector" of letters: + + >>> x = np.array(['a', 'b', 'c'], dtype=object) + >>> np.linalg.outer(x, [1, 2, 3]) + array([['a', 'aa', 'aaa'], + ['b', 'bb', 'bbb'], + ['c', 'cc', 'ccc']], dtype=object) + + """ + x1 = asanyarray(x1) + x2 = asanyarray(x2) + if x1.ndim != 1 or x2.ndim != 1: + raise ValueError( + "Input arrays must be one-dimensional, but they are " + f"{x1.ndim=} and {x2.ndim=}." + ) + return _core_outer(x1, x2, out=None) + + +# QR decomposition + + +def _qr_dispatcher(a, mode=None): + return (a,) + + +@array_function_dispatch(_qr_dispatcher) +def qr(a, mode='reduced'): + """ + Compute the qr factorization of a matrix. + + Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is + upper-triangular. + + Parameters + ---------- + a : array_like, shape (..., M, N) + An array-like object with the dimensionality of at least 2. + mode : {'reduced', 'complete', 'r', 'raw'}, optional, default: 'reduced' + If K = min(M, N), then + + * 'reduced' : returns Q, R with dimensions (..., M, K), (..., K, N) + * 'complete' : returns Q, R with dimensions (..., M, M), (..., M, N) + * 'r' : returns R only with dimensions (..., K, N) + * 'raw' : returns h, tau with dimensions (..., N, M), (..., K,) + + The options 'reduced', 'complete, and 'raw' are new in numpy 1.8, + see the notes for more information. The default is 'reduced', and to + maintain backward compatibility with earlier versions of numpy both + it and the old default 'full' can be omitted. Note that array h + returned in 'raw' mode is transposed for calling Fortran. The + 'economic' mode is deprecated. The modes 'full' and 'economic' may + be passed using only the first letter for backwards compatibility, + but all others must be spelled out. See the Notes for more + explanation. + + + Returns + ------- + When mode is 'reduced' or 'complete', the result will be a namedtuple with + the attributes `Q` and `R`. + + Q : ndarray of float or complex, optional + A matrix with orthonormal columns. When mode = 'complete' the + result is an orthogonal/unitary matrix depending on whether or not + a is real/complex. The determinant may be either +/- 1 in that + case. In case the number of dimensions in the input array is + greater than 2 then a stack of the matrices with above properties + is returned. + R : ndarray of float or complex, optional + The upper-triangular matrix or a stack of upper-triangular + matrices if the number of dimensions in the input array is greater + than 2. + (h, tau) : ndarrays of np.double or np.cdouble, optional + The array h contains the Householder reflectors that generate q + along with r. The tau array contains scaling factors for the + reflectors. In the deprecated 'economic' mode only h is returned. + + Raises + ------ + LinAlgError + If factoring fails. + + See Also + -------- + scipy.linalg.qr : Similar function in SciPy. + scipy.linalg.rq : Compute RQ decomposition of a matrix. + + Notes + ----- + This is an interface to the LAPACK routines ``dgeqrf``, ``zgeqrf``, + ``dorgqr``, and ``zungqr``. + + For more information on the qr factorization, see for example: + https://en.wikipedia.org/wiki/QR_factorization + + Subclasses of `ndarray` are preserved except for the 'raw' mode. So if + `a` is of type `matrix`, all the return values will be matrices too. + + New 'reduced', 'complete', and 'raw' options for mode were added in + NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In + addition the options 'full' and 'economic' were deprecated. Because + 'full' was the previous default and 'reduced' is the new default, + backward compatibility can be maintained by letting `mode` default. + The 'raw' option was added so that LAPACK routines that can multiply + arrays by q using the Householder reflectors can be used. Note that in + this case the returned arrays are of type np.double or np.cdouble and + the h array is transposed to be FORTRAN compatible. No routines using + the 'raw' return are currently exposed by numpy, but some are available + in lapack_lite and just await the necessary work. + + Examples + -------- + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> a = rng.normal(size=(9, 6)) + >>> Q, R = np.linalg.qr(a) + >>> np.allclose(a, np.dot(Q, R)) # a does equal QR + True + >>> R2 = np.linalg.qr(a, mode='r') + >>> np.allclose(R, R2) # mode='r' returns the same R as mode='full' + True + >>> a = np.random.normal(size=(3, 2, 2)) # Stack of 2 x 2 matrices as input + >>> Q, R = np.linalg.qr(a) + >>> Q.shape + (3, 2, 2) + >>> R.shape + (3, 2, 2) + >>> np.allclose(a, np.matmul(Q, R)) + True + + Example illustrating a common use of `qr`: solving of least squares + problems + + What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for + the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points + and you'll see that it should be y0 = 0, m = 1.) The answer is provided + by solving the over-determined matrix equation ``Ax = b``, where:: + + A = array([[0, 1], [1, 1], [1, 1], [2, 1]]) + x = array([[y0], [m]]) + b = array([[1], [0], [2], [1]]) + + If A = QR such that Q is orthonormal (which is always possible via + Gram-Schmidt), then ``x = inv(R) * (Q.T) * b``. (In numpy practice, + however, we simply use `lstsq`.) + + >>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]]) + >>> A + array([[0, 1], + [1, 1], + [1, 1], + [2, 1]]) + >>> b = np.array([1, 2, 2, 3]) + >>> Q, R = np.linalg.qr(A) + >>> p = np.dot(Q.T, b) + >>> np.dot(np.linalg.inv(R), p) + array([ 1., 1.]) + + """ + if mode not in ('reduced', 'complete', 'r', 'raw'): + if mode in ('f', 'full'): + # 2013-04-01, 1.8 + msg = ( + "The 'full' option is deprecated in favor of 'reduced'.\n" + "For backward compatibility let mode default." + ) + warnings.warn(msg, DeprecationWarning, stacklevel=2) + mode = 'reduced' + elif mode in ('e', 'economic'): + # 2013-04-01, 1.8 + msg = "The 'economic' option is deprecated." + warnings.warn(msg, DeprecationWarning, stacklevel=2) + mode = 'economic' + else: + raise ValueError(f"Unrecognized mode '{mode}'") + + a, wrap = _makearray(a) + _assert_stacked_2d(a) + m, n = a.shape[-2:] + t, result_t = _commonType(a) + a = a.astype(t, copy=True) + a = _to_native_byte_order(a) + mn = min(m, n) + + signature = 'D->D' if isComplexType(t) else 'd->d' + with errstate(call=_raise_linalgerror_qr, invalid='call', + over='ignore', divide='ignore', under='ignore'): + tau = _umath_linalg.qr_r_raw(a, signature=signature) + + # handle modes that don't return q + if mode == 'r': + r = triu(a[..., :mn, :]) + r = r.astype(result_t, copy=False) + return wrap(r) + + if mode == 'raw': + q = transpose(a) + q = q.astype(result_t, copy=False) + tau = tau.astype(result_t, copy=False) + return wrap(q), tau + + if mode == 'economic': + a = a.astype(result_t, copy=False) + return wrap(a) + + # mc is the number of columns in the resulting q + # matrix. If the mode is complete then it is + # same as number of rows, and if the mode is reduced, + # then it is the minimum of number of rows and columns. + if mode == 'complete' and m > n: + mc = m + gufunc = _umath_linalg.qr_complete + else: + mc = mn + gufunc = _umath_linalg.qr_reduced + + signature = 'DD->D' if isComplexType(t) else 'dd->d' + with errstate(call=_raise_linalgerror_qr, invalid='call', + over='ignore', divide='ignore', under='ignore'): + q = gufunc(a, tau, signature=signature) + r = triu(a[..., :mc, :]) + + q = q.astype(result_t, copy=False) + r = r.astype(result_t, copy=False) + + return QRResult(wrap(q), wrap(r)) + +# Eigenvalues + + +@array_function_dispatch(_unary_dispatcher) +def eigvals(a): + """ + Compute the eigenvalues of a general matrix. + + Main difference between `eigvals` and `eig`: the eigenvectors aren't + returned. + + Parameters + ---------- + a : (..., M, M) array_like + A complex- or real-valued matrix whose eigenvalues will be computed. + + Returns + ------- + w : (..., M,) ndarray + The eigenvalues, each repeated according to its multiplicity. + They are not necessarily ordered, nor are they necessarily + real for real matrices. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eig : eigenvalues and right eigenvectors of general arrays + eigvalsh : eigenvalues of real symmetric or complex Hermitian + (conjugate symmetric) arrays. + eigh : eigenvalues and eigenvectors of real symmetric or complex + Hermitian (conjugate symmetric) arrays. + scipy.linalg.eigvals : Similar function in SciPy. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + This is implemented using the ``_geev`` LAPACK routines which compute + the eigenvalues and eigenvectors of general square arrays. + + Examples + -------- + Illustration, using the fact that the eigenvalues of a diagonal matrix + are its diagonal elements, that multiplying a matrix on the left + by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose + of `Q`), preserves the eigenvalues of the "middle" matrix. In other words, + if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as + ``A``: + + >>> import numpy as np + >>> from numpy import linalg as LA + >>> x = np.random.random() + >>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]]) + >>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :]) + (1.0, 1.0, 0.0) + + Now multiply a diagonal matrix by ``Q`` on one side and + by ``Q.T`` on the other: + + >>> D = np.diag((-1,1)) + >>> LA.eigvals(D) + array([-1., 1.]) + >>> A = np.dot(Q, D) + >>> A = np.dot(A, Q.T) + >>> LA.eigvals(A) + array([ 1., -1.]) # random + + """ + a, wrap = _makearray(a) + _assert_stacked_square(a) + _assert_finite(a) + t, result_t = _commonType(a) + + signature = 'D->D' if isComplexType(t) else 'd->D' + with errstate(call=_raise_linalgerror_eigenvalues_nonconvergence, + invalid='call', over='ignore', divide='ignore', + under='ignore'): + w = _umath_linalg.eigvals(a, signature=signature) + + if not isComplexType(t): + if all(w.imag == 0): + w = w.real + result_t = _realType(result_t) + else: + result_t = _complexType(result_t) + + return w.astype(result_t, copy=False) + + +def _eigvalsh_dispatcher(a, UPLO=None): + return (a,) + + +@array_function_dispatch(_eigvalsh_dispatcher) +def eigvalsh(a, UPLO='L'): + """ + Compute the eigenvalues of a complex Hermitian or real symmetric matrix. + + Main difference from eigh: the eigenvectors are not computed. + + Parameters + ---------- + a : (..., M, M) array_like + A complex- or real-valued matrix whose eigenvalues are to be + computed. + UPLO : {'L', 'U'}, optional + Specifies whether the calculation is done with the lower triangular + part of `a` ('L', default) or the upper triangular part ('U'). + Irrespective of this value only the real parts of the diagonal will + be considered in the computation to preserve the notion of a Hermitian + matrix. It therefore follows that the imaginary part of the diagonal + will always be treated as zero. + + Returns + ------- + w : (..., M,) ndarray + The eigenvalues in ascending order, each repeated according to + its multiplicity. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eigh : eigenvalues and eigenvectors of real symmetric or complex Hermitian + (conjugate symmetric) arrays. + eigvals : eigenvalues of general real or complex arrays. + eig : eigenvalues and right eigenvectors of general real or complex + arrays. + scipy.linalg.eigvalsh : Similar function in SciPy. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The eigenvalues are computed using LAPACK routines ``_syevd``, ``_heevd``. + + Examples + -------- + >>> import numpy as np + >>> from numpy import linalg as LA + >>> a = np.array([[1, -2j], [2j, 5]]) + >>> LA.eigvalsh(a) + array([ 0.17157288, 5.82842712]) # may vary + + >>> # demonstrate the treatment of the imaginary part of the diagonal + >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]]) + >>> a + array([[5.+2.j, 9.-2.j], + [0.+2.j, 2.-1.j]]) + >>> # with UPLO='L' this is numerically equivalent to using LA.eigvals() + >>> # with: + >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]]) + >>> b + array([[5.+0.j, 0.-2.j], + [0.+2.j, 2.+0.j]]) + >>> wa = LA.eigvalsh(a) + >>> wb = LA.eigvals(b) + >>> wa + array([1., 6.]) + >>> wb + array([6.+0.j, 1.+0.j]) + + """ + UPLO = UPLO.upper() + if UPLO not in ('L', 'U'): + raise ValueError("UPLO argument must be 'L' or 'U'") + + if UPLO == 'L': + gufunc = _umath_linalg.eigvalsh_lo + else: + gufunc = _umath_linalg.eigvalsh_up + + a, wrap = _makearray(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + signature = 'D->d' if isComplexType(t) else 'd->d' + with errstate(call=_raise_linalgerror_eigenvalues_nonconvergence, + invalid='call', over='ignore', divide='ignore', + under='ignore'): + w = gufunc(a, signature=signature) + return w.astype(_realType(result_t), copy=False) + + +# Eigenvectors + + +@array_function_dispatch(_unary_dispatcher) +def eig(a): + """ + Compute the eigenvalues and right eigenvectors of a square array. + + Parameters + ---------- + a : (..., M, M) array + Matrices for which the eigenvalues and right eigenvectors will + be computed + + Returns + ------- + A namedtuple with the following attributes: + + eigenvalues : (..., M) array + The eigenvalues, each repeated according to its multiplicity. + The eigenvalues are not necessarily ordered. The resulting + array will be of complex type, unless the imaginary part is + zero in which case it will be cast to a real type. When `a` + is real the resulting eigenvalues will be real (0 imaginary + part) or occur in conjugate pairs + + eigenvectors : (..., M, M) array + The normalized (unit "length") eigenvectors, such that the + column ``eigenvectors[:,i]`` is the eigenvector corresponding to the + eigenvalue ``eigenvalues[i]``. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eigvals : eigenvalues of a non-symmetric array. + eigh : eigenvalues and eigenvectors of a real symmetric or complex + Hermitian (conjugate symmetric) array. + eigvalsh : eigenvalues of a real symmetric or complex Hermitian + (conjugate symmetric) array. + scipy.linalg.eig : Similar function in SciPy that also solves the + generalized eigenvalue problem. + scipy.linalg.schur : Best choice for unitary and other non-Hermitian + normal matrices. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + This is implemented using the ``_geev`` LAPACK routines which compute + the eigenvalues and eigenvectors of general square arrays. + + The number `w` is an eigenvalue of `a` if there exists a vector `v` such + that ``a @ v = w * v``. Thus, the arrays `a`, `eigenvalues`, and + `eigenvectors` satisfy the equations ``a @ eigenvectors[:,i] = + eigenvalues[i] * eigenvectors[:,i]`` for :math:`i \\in \\{0,...,M-1\\}`. + + The array `eigenvectors` may not be of maximum rank, that is, some of the + columns may be linearly dependent, although round-off error may obscure + that fact. If the eigenvalues are all different, then theoretically the + eigenvectors are linearly independent and `a` can be diagonalized by a + similarity transformation using `eigenvectors`, i.e, ``inv(eigenvectors) @ + a @ eigenvectors`` is diagonal. + + For non-Hermitian normal matrices the SciPy function `scipy.linalg.schur` + is preferred because the matrix `eigenvectors` is guaranteed to be + unitary, which is not the case when using `eig`. The Schur factorization + produces an upper triangular matrix rather than a diagonal matrix, but for + normal matrices only the diagonal of the upper triangular matrix is + needed, the rest is roundoff error. + + Finally, it is emphasized that `eigenvectors` consists of the *right* (as + in right-hand side) eigenvectors of `a`. A vector `y` satisfying ``y.T @ a + = z * y.T`` for some number `z` is called a *left* eigenvector of `a`, + and, in general, the left and right eigenvectors of a matrix are not + necessarily the (perhaps conjugate) transposes of each other. + + References + ---------- + G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL, + Academic Press, Inc., 1980, Various pp. + + Examples + -------- + >>> import numpy as np + >>> from numpy import linalg as LA + + (Almost) trivial example with real eigenvalues and eigenvectors. + + >>> eigenvalues, eigenvectors = LA.eig(np.diag((1, 2, 3))) + >>> eigenvalues + array([1., 2., 3.]) + >>> eigenvectors + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + Real matrix possessing complex eigenvalues and eigenvectors; + note that the eigenvalues are complex conjugates of each other. + + >>> eigenvalues, eigenvectors = LA.eig(np.array([[1, -1], [1, 1]])) + >>> eigenvalues + array([1.+1.j, 1.-1.j]) + >>> eigenvectors + array([[0.70710678+0.j , 0.70710678-0.j ], + [0. -0.70710678j, 0. +0.70710678j]]) + + Complex-valued matrix with real eigenvalues (but complex-valued + eigenvectors); note that ``a.conj().T == a``, i.e., `a` is Hermitian. + + >>> a = np.array([[1, 1j], [-1j, 1]]) + >>> eigenvalues, eigenvectors = LA.eig(a) + >>> eigenvalues + array([2.+0.j, 0.+0.j]) + >>> eigenvectors + array([[ 0. +0.70710678j, 0.70710678+0.j ], # may vary + [ 0.70710678+0.j , -0. +0.70710678j]]) + + Be careful about round-off error! + + >>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]]) + >>> # Theor. eigenvalues are 1 +/- 1e-9 + >>> eigenvalues, eigenvectors = LA.eig(a) + >>> eigenvalues + array([1., 1.]) + >>> eigenvectors + array([[1., 0.], + [0., 1.]]) + + """ + a, wrap = _makearray(a) + _assert_stacked_square(a) + _assert_finite(a) + t, result_t = _commonType(a) + + signature = 'D->DD' if isComplexType(t) else 'd->DD' + with errstate(call=_raise_linalgerror_eigenvalues_nonconvergence, + invalid='call', over='ignore', divide='ignore', + under='ignore'): + w, vt = _umath_linalg.eig(a, signature=signature) + + if not isComplexType(t) and all(w.imag == 0.0): + w = w.real + vt = vt.real + result_t = _realType(result_t) + else: + result_t = _complexType(result_t) + + vt = vt.astype(result_t, copy=False) + return EigResult(w.astype(result_t, copy=False), wrap(vt)) + + +@array_function_dispatch(_eigvalsh_dispatcher) +def eigh(a, UPLO='L'): + """ + Return the eigenvalues and eigenvectors of a complex Hermitian + (conjugate symmetric) or a real symmetric matrix. + + Returns two objects, a 1-D array containing the eigenvalues of `a`, and + a 2-D square array or matrix (depending on the input type) of the + corresponding eigenvectors (in columns). + + Parameters + ---------- + a : (..., M, M) array + Hermitian or real symmetric matrices whose eigenvalues and + eigenvectors are to be computed. + UPLO : {'L', 'U'}, optional + Specifies whether the calculation is done with the lower triangular + part of `a` ('L', default) or the upper triangular part ('U'). + Irrespective of this value only the real parts of the diagonal will + be considered in the computation to preserve the notion of a Hermitian + matrix. It therefore follows that the imaginary part of the diagonal + will always be treated as zero. + + Returns + ------- + A namedtuple with the following attributes: + + eigenvalues : (..., M) ndarray + The eigenvalues in ascending order, each repeated according to + its multiplicity. + eigenvectors : {(..., M, M) ndarray, (..., M, M) matrix} + The column ``eigenvectors[:, i]`` is the normalized eigenvector + corresponding to the eigenvalue ``eigenvalues[i]``. Will return a + matrix object if `a` is a matrix object. + + Raises + ------ + LinAlgError + If the eigenvalue computation does not converge. + + See Also + -------- + eigvalsh : eigenvalues of real symmetric or complex Hermitian + (conjugate symmetric) arrays. + eig : eigenvalues and right eigenvectors for non-symmetric arrays. + eigvals : eigenvalues of non-symmetric arrays. + scipy.linalg.eigh : Similar function in SciPy (but also solves the + generalized eigenvalue problem). + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The eigenvalues/eigenvectors are computed using LAPACK routines ``_syevd``, + ``_heevd``. + + The eigenvalues of real symmetric or complex Hermitian matrices are always + real. [1]_ The array `eigenvalues` of (column) eigenvectors is unitary and + `a`, `eigenvalues`, and `eigenvectors` satisfy the equations ``dot(a, + eigenvectors[:, i]) = eigenvalues[i] * eigenvectors[:, i]``. + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, + FL, Academic Press, Inc., 1980, pg. 222. + + Examples + -------- + >>> import numpy as np + >>> from numpy import linalg as LA + >>> a = np.array([[1, -2j], [2j, 5]]) + >>> a + array([[ 1.+0.j, -0.-2.j], + [ 0.+2.j, 5.+0.j]]) + >>> eigenvalues, eigenvectors = LA.eigh(a) + >>> eigenvalues + array([0.17157288, 5.82842712]) + >>> eigenvectors + array([[-0.92387953+0.j , -0.38268343+0.j ], # may vary + [ 0. +0.38268343j, 0. -0.92387953j]]) + + >>> (np.dot(a, eigenvectors[:, 0]) - + ... eigenvalues[0] * eigenvectors[:, 0]) # verify 1st eigenval/vec pair + array([5.55111512e-17+0.0000000e+00j, 0.00000000e+00+1.2490009e-16j]) + >>> (np.dot(a, eigenvectors[:, 1]) - + ... eigenvalues[1] * eigenvectors[:, 1]) # verify 2nd eigenval/vec pair + array([0.+0.j, 0.+0.j]) + + >>> A = np.matrix(a) # what happens if input is a matrix object + >>> A + matrix([[ 1.+0.j, -0.-2.j], + [ 0.+2.j, 5.+0.j]]) + >>> eigenvalues, eigenvectors = LA.eigh(A) + >>> eigenvalues + array([0.17157288, 5.82842712]) + >>> eigenvectors + matrix([[-0.92387953+0.j , -0.38268343+0.j ], # may vary + [ 0. +0.38268343j, 0. -0.92387953j]]) + + >>> # demonstrate the treatment of the imaginary part of the diagonal + >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]]) + >>> a + array([[5.+2.j, 9.-2.j], + [0.+2.j, 2.-1.j]]) + >>> # with UPLO='L' this is numerically equivalent to using LA.eig() with: + >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]]) + >>> b + array([[5.+0.j, 0.-2.j], + [0.+2.j, 2.+0.j]]) + >>> wa, va = LA.eigh(a) + >>> wb, vb = LA.eig(b) + >>> wa + array([1., 6.]) + >>> wb + array([6.+0.j, 1.+0.j]) + >>> va + array([[-0.4472136 +0.j , -0.89442719+0.j ], # may vary + [ 0. +0.89442719j, 0. -0.4472136j ]]) + >>> vb + array([[ 0.89442719+0.j , -0. +0.4472136j], + [-0. +0.4472136j, 0.89442719+0.j ]]) + + """ + UPLO = UPLO.upper() + if UPLO not in ('L', 'U'): + raise ValueError("UPLO argument must be 'L' or 'U'") + + a, wrap = _makearray(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + + if UPLO == 'L': + gufunc = _umath_linalg.eigh_lo + else: + gufunc = _umath_linalg.eigh_up + + signature = 'D->dD' if isComplexType(t) else 'd->dd' + with errstate(call=_raise_linalgerror_eigenvalues_nonconvergence, + invalid='call', over='ignore', divide='ignore', + under='ignore'): + w, vt = gufunc(a, signature=signature) + w = w.astype(_realType(result_t), copy=False) + vt = vt.astype(result_t, copy=False) + return EighResult(w, wrap(vt)) + + +# Singular value decomposition + +def _svd_dispatcher(a, full_matrices=None, compute_uv=None, hermitian=None): + return (a,) + + +@array_function_dispatch(_svd_dispatcher) +def svd(a, full_matrices=True, compute_uv=True, hermitian=False): + """ + Singular Value Decomposition. + + When `a` is a 2D array, and ``full_matrices=False``, then it is + factorized as ``u @ np.diag(s) @ vh = (u * s) @ vh``, where + `u` and the Hermitian transpose of `vh` are 2D arrays with + orthonormal columns and `s` is a 1D array of `a`'s singular + values. When `a` is higher-dimensional, SVD is applied in + stacked mode as explained below. + + Parameters + ---------- + a : (..., M, N) array_like + A real or complex array with ``a.ndim >= 2``. + full_matrices : bool, optional + If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and + ``(..., N, N)``, respectively. Otherwise, the shapes are + ``(..., M, K)`` and ``(..., K, N)``, respectively, where + ``K = min(M, N)``. + compute_uv : bool, optional + Whether or not to compute `u` and `vh` in addition to `s`. True + by default. + hermitian : bool, optional + If True, `a` is assumed to be Hermitian (symmetric if real-valued), + enabling a more efficient method for finding singular values. + Defaults to False. + + Returns + ------- + When `compute_uv` is True, the result is a namedtuple with the following + attribute names: + + U : { (..., M, M), (..., M, K) } array + Unitary array(s). The first ``a.ndim - 2`` dimensions have the same + size as those of the input `a`. The size of the last two dimensions + depends on the value of `full_matrices`. Only returned when + `compute_uv` is True. + S : (..., K) array + Vector(s) with the singular values, within each vector sorted in + descending order. The first ``a.ndim - 2`` dimensions have the same + size as those of the input `a`. + Vh : { (..., N, N), (..., K, N) } array + Unitary array(s). The first ``a.ndim - 2`` dimensions have the same + size as those of the input `a`. The size of the last two dimensions + depends on the value of `full_matrices`. Only returned when + `compute_uv` is True. + + Raises + ------ + LinAlgError + If SVD computation does not converge. + + See Also + -------- + scipy.linalg.svd : Similar function in SciPy. + scipy.linalg.svdvals : Compute singular values of a matrix. + + Notes + ----- + The decomposition is performed using LAPACK routine ``_gesdd``. + + SVD is usually described for the factorization of a 2D matrix :math:`A`. + The higher-dimensional case will be discussed below. In the 2D case, SVD is + written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`, + :math:`S= \\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s` + contains the singular values of `a` and `u` and `vh` are unitary. The rows + of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are + the eigenvectors of :math:`A A^H`. In both cases the corresponding + (possibly non-zero) eigenvalues are given by ``s**2``. + + If `a` has more than two dimensions, then broadcasting rules apply, as + explained in :ref:`routines.linalg-broadcasting`. This means that SVD is + working in "stacked" mode: it iterates over all indices of the first + ``a.ndim - 2`` dimensions and for each combination SVD is applied to the + last two indices. The matrix `a` can be reconstructed from the + decomposition with either ``(u * s[..., None, :]) @ vh`` or + ``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the + function ``np.matmul`` for python versions below 3.5.) + + If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are + all the return values. + + Examples + -------- + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> a = rng.normal(size=(9, 6)) + 1j*rng.normal(size=(9, 6)) + >>> b = rng.normal(size=(2, 7, 8, 3)) + 1j*rng.normal(size=(2, 7, 8, 3)) + + + Reconstruction based on full SVD, 2D case: + + >>> U, S, Vh = np.linalg.svd(a, full_matrices=True) + >>> U.shape, S.shape, Vh.shape + ((9, 9), (6,), (6, 6)) + >>> np.allclose(a, np.dot(U[:, :6] * S, Vh)) + True + >>> smat = np.zeros((9, 6), dtype=complex) + >>> smat[:6, :6] = np.diag(S) + >>> np.allclose(a, np.dot(U, np.dot(smat, Vh))) + True + + Reconstruction based on reduced SVD, 2D case: + + >>> U, S, Vh = np.linalg.svd(a, full_matrices=False) + >>> U.shape, S.shape, Vh.shape + ((9, 6), (6,), (6, 6)) + >>> np.allclose(a, np.dot(U * S, Vh)) + True + >>> smat = np.diag(S) + >>> np.allclose(a, np.dot(U, np.dot(smat, Vh))) + True + + Reconstruction based on full SVD, 4D case: + + >>> U, S, Vh = np.linalg.svd(b, full_matrices=True) + >>> U.shape, S.shape, Vh.shape + ((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3)) + >>> np.allclose(b, np.matmul(U[..., :3] * S[..., None, :], Vh)) + True + >>> np.allclose(b, np.matmul(U[..., :3], S[..., None] * Vh)) + True + + Reconstruction based on reduced SVD, 4D case: + + >>> U, S, Vh = np.linalg.svd(b, full_matrices=False) + >>> U.shape, S.shape, Vh.shape + ((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3)) + >>> np.allclose(b, np.matmul(U * S[..., None, :], Vh)) + True + >>> np.allclose(b, np.matmul(U, S[..., None] * Vh)) + True + + """ + import numpy as np + a, wrap = _makearray(a) + + if hermitian: + # note: lapack svd returns eigenvalues with s ** 2 sorted descending, + # but eig returns s sorted ascending, so we re-order the eigenvalues + # and related arrays to have the correct order + if compute_uv: + s, u = eigh(a) + sgn = sign(s) + s = abs(s) + sidx = argsort(s)[..., ::-1] + sgn = np.take_along_axis(sgn, sidx, axis=-1) + s = np.take_along_axis(s, sidx, axis=-1) + u = np.take_along_axis(u, sidx[..., None, :], axis=-1) + # singular values are unsigned, move the sign into v + vt = transpose(u * sgn[..., None, :]).conjugate() + return SVDResult(wrap(u), s, wrap(vt)) + else: + s = eigvalsh(a) + s = abs(s) + return sort(s)[..., ::-1] + + _assert_stacked_2d(a) + t, result_t = _commonType(a) + + m, n = a.shape[-2:] + if compute_uv: + if full_matrices: + gufunc = _umath_linalg.svd_f + else: + gufunc = _umath_linalg.svd_s + + signature = 'D->DdD' if isComplexType(t) else 'd->ddd' + with errstate(call=_raise_linalgerror_svd_nonconvergence, + invalid='call', over='ignore', divide='ignore', + under='ignore'): + u, s, vh = gufunc(a, signature=signature) + u = u.astype(result_t, copy=False) + s = s.astype(_realType(result_t), copy=False) + vh = vh.astype(result_t, copy=False) + return SVDResult(wrap(u), s, wrap(vh)) + else: + signature = 'D->d' if isComplexType(t) else 'd->d' + with errstate(call=_raise_linalgerror_svd_nonconvergence, + invalid='call', over='ignore', divide='ignore', + under='ignore'): + s = _umath_linalg.svd(a, signature=signature) + s = s.astype(_realType(result_t), copy=False) + return s + + +def _svdvals_dispatcher(x): + return (x,) + + +@array_function_dispatch(_svdvals_dispatcher) +def svdvals(x, /): + """ + Returns the singular values of a matrix (or a stack of matrices) ``x``. + When x is a stack of matrices, the function will compute the singular + values for each matrix in the stack. + + This function is Array API compatible. + + Calling ``np.svdvals(x)`` to get singular values is the same as + ``np.svd(x, compute_uv=False, hermitian=False)``. + + Parameters + ---------- + x : (..., M, N) array_like + Input array having shape (..., M, N) and whose last two + dimensions form matrices on which to perform singular value + decomposition. Should have a floating-point data type. + + Returns + ------- + out : ndarray + An array with shape (..., K) that contains the vector(s) + of singular values of length K, where K = min(M, N). + + See Also + -------- + scipy.linalg.svdvals : Compute singular values of a matrix. + + Examples + -------- + + >>> np.linalg.svdvals([[1, 2, 3, 4, 5], + ... [1, 4, 9, 16, 25], + ... [1, 8, 27, 64, 125]]) + array([146.68862757, 5.57510612, 0.60393245]) + + Determine the rank of a matrix using singular values: + + >>> s = np.linalg.svdvals([[1, 2, 3], + ... [2, 4, 6], + ... [-1, 1, -1]]); s + array([8.38434191e+00, 1.64402274e+00, 2.31534378e-16]) + >>> np.count_nonzero(s > 1e-10) # Matrix of rank 2 + 2 + + """ + return svd(x, compute_uv=False, hermitian=False) + + +def _cond_dispatcher(x, p=None): + return (x,) + + +@array_function_dispatch(_cond_dispatcher) +def cond(x, p=None): + """ + Compute the condition number of a matrix. + + This function is capable of returning the condition number using + one of seven different norms, depending on the value of `p` (see + Parameters below). + + Parameters + ---------- + x : (..., M, N) array_like + The matrix whose condition number is sought. + p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional + Order of the norm used in the condition number computation: + + ===== ============================ + p norm for matrices + ===== ============================ + None 2-norm, computed directly using the ``SVD`` + 'fro' Frobenius norm + inf max(sum(abs(x), axis=1)) + -inf min(sum(abs(x), axis=1)) + 1 max(sum(abs(x), axis=0)) + -1 min(sum(abs(x), axis=0)) + 2 2-norm (largest sing. value) + -2 smallest singular value + ===== ============================ + + inf means the `numpy.inf` object, and the Frobenius norm is + the root-of-sum-of-squares norm. + + Returns + ------- + c : {float, inf} + The condition number of the matrix. May be infinite. + + See Also + -------- + numpy.linalg.norm + + Notes + ----- + The condition number of `x` is defined as the norm of `x` times the + norm of the inverse of `x` [1]_; the norm can be the usual L2-norm + (root-of-sum-of-squares) or one of a number of other matrix norms. + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL, + Academic Press, Inc., 1980, pg. 285. + + Examples + -------- + >>> import numpy as np + >>> from numpy import linalg as LA + >>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]]) + >>> a + array([[ 1, 0, -1], + [ 0, 1, 0], + [ 1, 0, 1]]) + >>> LA.cond(a) + 1.4142135623730951 + >>> LA.cond(a, 'fro') + 3.1622776601683795 + >>> LA.cond(a, np.inf) + 2.0 + >>> LA.cond(a, -np.inf) + 1.0 + >>> LA.cond(a, 1) + 2.0 + >>> LA.cond(a, -1) + 1.0 + >>> LA.cond(a, 2) + 1.4142135623730951 + >>> LA.cond(a, -2) + 0.70710678118654746 # may vary + >>> (min(LA.svd(a, compute_uv=False)) * + ... min(LA.svd(LA.inv(a), compute_uv=False))) + 0.70710678118654746 # may vary + + """ + x = asarray(x) # in case we have a matrix + if _is_empty_2d(x): + raise LinAlgError("cond is not defined on empty arrays") + if p is None or p in {2, -2}: + s = svd(x, compute_uv=False) + with errstate(all='ignore'): + if p == -2: + r = s[..., -1] / s[..., 0] + else: + r = s[..., 0] / s[..., -1] + else: + # Call inv(x) ignoring errors. The result array will + # contain nans in the entries where inversion failed. + _assert_stacked_square(x) + t, result_t = _commonType(x) + result_t = _realType(result_t) # condition number is always real + signature = 'D->D' if isComplexType(t) else 'd->d' + with errstate(all='ignore'): + invx = _umath_linalg.inv(x, signature=signature) + r = norm(x, p, axis=(-2, -1)) * norm(invx, p, axis=(-2, -1)) + r = r.astype(result_t, copy=False) + + # Convert nans to infs unless the original array had nan entries + r = asarray(r) + nan_mask = isnan(r) + if nan_mask.any(): + nan_mask &= ~isnan(x).any(axis=(-2, -1)) + if r.ndim > 0: + r[nan_mask] = inf + elif nan_mask: + r[()] = inf + + # Convention is to return scalars instead of 0d arrays + if r.ndim == 0: + r = r[()] + + return r + + +def _matrix_rank_dispatcher(A, tol=None, hermitian=None, *, rtol=None): + return (A,) + + +@array_function_dispatch(_matrix_rank_dispatcher) +def matrix_rank(A, tol=None, hermitian=False, *, rtol=None): + """ + Return matrix rank of array using SVD method + + Rank of the array is the number of singular values of the array that are + greater than `tol`. + + Parameters + ---------- + A : {(M,), (..., M, N)} array_like + Input vector or stack of matrices. + tol : (...) array_like, float, optional + Threshold below which SVD values are considered zero. If `tol` is + None, and ``S`` is an array with singular values for `M`, and + ``eps`` is the epsilon value for datatype of ``S``, then `tol` is + set to ``S.max() * max(M, N) * eps``. + hermitian : bool, optional + If True, `A` is assumed to be Hermitian (symmetric if real-valued), + enabling a more efficient method for finding singular values. + Defaults to False. + rtol : (...) array_like, float, optional + Parameter for the relative tolerance component. Only ``tol`` or + ``rtol`` can be set at a time. Defaults to ``max(M, N) * eps``. + + .. versionadded:: 2.0.0 + + Returns + ------- + rank : (...) array_like + Rank of A. + + Notes + ----- + The default threshold to detect rank deficiency is a test on the magnitude + of the singular values of `A`. By default, we identify singular values + less than ``S.max() * max(M, N) * eps`` as indicating rank deficiency + (with the symbols defined above). This is the algorithm MATLAB uses [1]. + It also appears in *Numerical recipes* in the discussion of SVD solutions + for linear least squares [2]. + + This default threshold is designed to detect rank deficiency accounting + for the numerical errors of the SVD computation. Imagine that there + is a column in `A` that is an exact (in floating point) linear combination + of other columns in `A`. Computing the SVD on `A` will not produce + a singular value exactly equal to 0 in general: any difference of + the smallest SVD value from 0 will be caused by numerical imprecision + in the calculation of the SVD. Our threshold for small SVD values takes + this numerical imprecision into account, and the default threshold will + detect such numerical rank deficiency. The threshold may declare a matrix + `A` rank deficient even if the linear combination of some columns of `A` + is not exactly equal to another column of `A` but only numerically very + close to another column of `A`. + + We chose our default threshold because it is in wide use. Other thresholds + are possible. For example, elsewhere in the 2007 edition of *Numerical + recipes* there is an alternative threshold of ``S.max() * + np.finfo(A.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe + this threshold as being based on "expected roundoff error" (p 71). + + The thresholds above deal with floating point roundoff error in the + calculation of the SVD. However, you may have more information about + the sources of error in `A` that would make you consider other tolerance + values to detect *effective* rank deficiency. The most useful measure + of the tolerance depends on the operations you intend to use on your + matrix. For example, if your data come from uncertain measurements with + uncertainties greater than floating point epsilon, choosing a tolerance + near that uncertainty may be preferable. The tolerance may be absolute + if the uncertainties are absolute rather than relative. + + References + ---------- + .. [1] MATLAB reference documentation, "Rank" + https://www.mathworks.com/help/techdoc/ref/rank.html + .. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery, + "Numerical Recipes (3rd edition)", Cambridge University Press, 2007, + page 795. + + Examples + -------- + >>> import numpy as np + >>> from numpy.linalg import matrix_rank + >>> matrix_rank(np.eye(4)) # Full rank matrix + 4 + >>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix + >>> matrix_rank(I) + 3 + >>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0 + 1 + >>> matrix_rank(np.zeros((4,))) + 0 + """ + if rtol is not None and tol is not None: + raise ValueError("`tol` and `rtol` can't be both set.") + + A = asarray(A) + if A.ndim < 2: + return int(not all(A == 0)) + S = svd(A, compute_uv=False, hermitian=hermitian) + + if tol is None: + if rtol is None: + rtol = max(A.shape[-2:]) * finfo(S.dtype).eps + else: + rtol = asarray(rtol)[..., newaxis] + tol = S.max(axis=-1, keepdims=True) * rtol + else: + tol = asarray(tol)[..., newaxis] + + return count_nonzero(S > tol, axis=-1) + + +# Generalized inverse + +def _pinv_dispatcher(a, rcond=None, hermitian=None, *, rtol=None): + return (a,) + + +@array_function_dispatch(_pinv_dispatcher) +def pinv(a, rcond=None, hermitian=False, *, rtol=_NoValue): + """ + Compute the (Moore-Penrose) pseudo-inverse of a matrix. + + Calculate the generalized inverse of a matrix using its + singular-value decomposition (SVD) and including all + *large* singular values. + + Parameters + ---------- + a : (..., M, N) array_like + Matrix or stack of matrices to be pseudo-inverted. + rcond : (...) array_like of float, optional + Cutoff for small singular values. + Singular values less than or equal to + ``rcond * largest_singular_value`` are set to zero. + Broadcasts against the stack of matrices. Default: ``1e-15``. + hermitian : bool, optional + If True, `a` is assumed to be Hermitian (symmetric if real-valued), + enabling a more efficient method for finding singular values. + Defaults to False. + rtol : (...) array_like of float, optional + Same as `rcond`, but it's an Array API compatible parameter name. + Only `rcond` or `rtol` can be set at a time. If none of them are + provided then NumPy's ``1e-15`` default is used. If ``rtol=None`` + is passed then the API standard default is used. + + .. versionadded:: 2.0.0 + + Returns + ------- + B : (..., N, M) ndarray + The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so + is `B`. + + Raises + ------ + LinAlgError + If the SVD computation does not converge. + + See Also + -------- + scipy.linalg.pinv : Similar function in SciPy. + scipy.linalg.pinvh : Compute the (Moore-Penrose) pseudo-inverse of a + Hermitian matrix. + + Notes + ----- + The pseudo-inverse of a matrix A, denoted :math:`A^+`, is + defined as: "the matrix that 'solves' [the least-squares problem] + :math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then + :math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`. + + It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular + value decomposition of A, then + :math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are + orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting + of A's so-called singular values, (followed, typically, by + zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix + consisting of the reciprocals of A's singular values + (again, followed by zeros). [1]_ + + References + ---------- + .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, + FL, Academic Press, Inc., 1980, pp. 139-142. + + Examples + -------- + The following example checks that ``a * a+ * a == a`` and + ``a+ * a * a+ == a+``: + + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> a = rng.normal(size=(9, 6)) + >>> B = np.linalg.pinv(a) + >>> np.allclose(a, np.dot(a, np.dot(B, a))) + True + >>> np.allclose(B, np.dot(B, np.dot(a, B))) + True + + """ + a, wrap = _makearray(a) + if rcond is None: + if rtol is _NoValue: + rcond = 1e-15 + elif rtol is None: + rcond = max(a.shape[-2:]) * finfo(a.dtype).eps + else: + rcond = rtol + elif rtol is not _NoValue: + raise ValueError("`rtol` and `rcond` can't be both set.") + else: + # NOTE: Deprecate `rcond` in a few versions. + pass + + rcond = asarray(rcond) + if _is_empty_2d(a): + m, n = a.shape[-2:] + res = empty(a.shape[:-2] + (n, m), dtype=a.dtype) + return wrap(res) + a = a.conjugate() + u, s, vt = svd(a, full_matrices=False, hermitian=hermitian) + + # discard small singular values + cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True) + large = s > cutoff + s = divide(1, s, where=large, out=s) + s[~large] = 0 + + res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u))) + return wrap(res) + + +# Determinant + + +@array_function_dispatch(_unary_dispatcher) +def slogdet(a): + """ + Compute the sign and (natural) logarithm of the determinant of an array. + + If an array has a very small or very large determinant, then a call to + `det` may overflow or underflow. This routine is more robust against such + issues, because it computes the logarithm of the determinant rather than + the determinant itself. + + Parameters + ---------- + a : (..., M, M) array_like + Input array, has to be a square 2-D array. + + Returns + ------- + A namedtuple with the following attributes: + + sign : (...) array_like + A number representing the sign of the determinant. For a real matrix, + this is 1, 0, or -1. For a complex matrix, this is a complex number + with absolute value 1 (i.e., it is on the unit circle), or else 0. + logabsdet : (...) array_like + The natural log of the absolute value of the determinant. + + If the determinant is zero, then `sign` will be 0 and `logabsdet` + will be -inf. In all cases, the determinant is equal to + ``sign * np.exp(logabsdet)``. + + See Also + -------- + det + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The determinant is computed via LU factorization using the LAPACK + routine ``z/dgetrf``. + + Examples + -------- + The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``: + + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> (sign, logabsdet) = np.linalg.slogdet(a) + >>> (sign, logabsdet) + (-1, 0.69314718055994529) # may vary + >>> sign * np.exp(logabsdet) + -2.0 + + Computing log-determinants for a stack of matrices: + + >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ]) + >>> a.shape + (3, 2, 2) + >>> sign, logabsdet = np.linalg.slogdet(a) + >>> (sign, logabsdet) + (array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154])) + >>> sign * np.exp(logabsdet) + array([-2., -3., -8.]) + + This routine succeeds where ordinary `det` does not: + + >>> np.linalg.det(np.eye(500) * 0.1) + 0.0 + >>> np.linalg.slogdet(np.eye(500) * 0.1) + (1, -1151.2925464970228) + + """ + a = asarray(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + real_t = _realType(result_t) + signature = 'D->Dd' if isComplexType(t) else 'd->dd' + sign, logdet = _umath_linalg.slogdet(a, signature=signature) + sign = sign.astype(result_t, copy=False) + logdet = logdet.astype(real_t, copy=False) + return SlogdetResult(sign, logdet) + + +@array_function_dispatch(_unary_dispatcher) +def det(a): + """ + Compute the determinant of an array. + + Parameters + ---------- + a : (..., M, M) array_like + Input array to compute determinants for. + + Returns + ------- + det : (...) array_like + Determinant of `a`. + + See Also + -------- + slogdet : Another way to represent the determinant, more suitable + for large matrices where underflow/overflow may occur. + scipy.linalg.det : Similar function in SciPy. + + Notes + ----- + Broadcasting rules apply, see the `numpy.linalg` documentation for + details. + + The determinant is computed via LU factorization using the LAPACK + routine ``z/dgetrf``. + + Examples + -------- + The determinant of a 2-D array [[a, b], [c, d]] is ad - bc: + + >>> import numpy as np + >>> a = np.array([[1, 2], [3, 4]]) + >>> np.linalg.det(a) + -2.0 # may vary + + Computing determinants for a stack of matrices: + + >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ]) + >>> a.shape + (3, 2, 2) + >>> np.linalg.det(a) + array([-2., -3., -8.]) + + """ + a = asarray(a) + _assert_stacked_square(a) + t, result_t = _commonType(a) + signature = 'D->D' if isComplexType(t) else 'd->d' + r = _umath_linalg.det(a, signature=signature) + r = r.astype(result_t, copy=False) + return r + + +# Linear Least Squares + +def _lstsq_dispatcher(a, b, rcond=None): + return (a, b) + + +@array_function_dispatch(_lstsq_dispatcher) +def lstsq(a, b, rcond=None): + r""" + Return the least-squares solution to a linear matrix equation. + + Computes the vector `x` that approximately solves the equation + ``a @ x = b``. The equation may be under-, well-, or over-determined + (i.e., the number of linearly independent rows of `a` can be less than, + equal to, or greater than its number of linearly independent columns). + If `a` is square and of full rank, then `x` (but for round-off error) + is the "exact" solution of the equation. Else, `x` minimizes the + Euclidean 2-norm :math:`||b - ax||`. If there are multiple minimizing + solutions, the one with the smallest 2-norm :math:`||x||` is returned. + + Parameters + ---------- + a : (M, N) array_like + "Coefficient" matrix. + b : {(M,), (M, K)} array_like + Ordinate or "dependent variable" values. If `b` is two-dimensional, + the least-squares solution is calculated for each of the `K` columns + of `b`. + rcond : float, optional + Cut-off ratio for small singular values of `a`. + For the purposes of rank determination, singular values are treated + as zero if they are smaller than `rcond` times the largest singular + value of `a`. + The default uses the machine precision times ``max(M, N)``. Passing + ``-1`` will use machine precision. + + .. versionchanged:: 2.0 + Previously, the default was ``-1``, but a warning was given that + this would change. + + Returns + ------- + x : {(N,), (N, K)} ndarray + Least-squares solution. If `b` is two-dimensional, + the solutions are in the `K` columns of `x`. + residuals : {(1,), (K,), (0,)} ndarray + Sums of squared residuals: Squared Euclidean 2-norm for each column in + ``b - a @ x``. + If the rank of `a` is < N or M <= N, this is an empty array. + If `b` is 1-dimensional, this is a (1,) shape array. + Otherwise the shape is (K,). + rank : int + Rank of matrix `a`. + s : (min(M, N),) ndarray + Singular values of `a`. + + Raises + ------ + LinAlgError + If computation does not converge. + + See Also + -------- + scipy.linalg.lstsq : Similar function in SciPy. + + Notes + ----- + If `b` is a matrix, then all array results are returned as matrices. + + Examples + -------- + Fit a line, ``y = mx + c``, through some noisy data-points: + + >>> import numpy as np + >>> x = np.array([0, 1, 2, 3]) + >>> y = np.array([-1, 0.2, 0.9, 2.1]) + + By examining the coefficients, we see that the line should have a + gradient of roughly 1 and cut the y-axis at, more or less, -1. + + We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]`` + and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`: + + >>> A = np.vstack([x, np.ones(len(x))]).T + >>> A + array([[ 0., 1.], + [ 1., 1.], + [ 2., 1.], + [ 3., 1.]]) + + >>> m, c = np.linalg.lstsq(A, y)[0] + >>> m, c + (1.0 -0.95) # may vary + + Plot the data along with the fitted line: + + >>> import matplotlib.pyplot as plt + >>> _ = plt.plot(x, y, 'o', label='Original data', markersize=10) + >>> _ = plt.plot(x, m*x + c, 'r', label='Fitted line') + >>> _ = plt.legend() + >>> plt.show() + + """ + a, _ = _makearray(a) + b, wrap = _makearray(b) + is_1d = b.ndim == 1 + if is_1d: + b = b[:, newaxis] + _assert_2d(a, b) + m, n = a.shape[-2:] + m2, n_rhs = b.shape[-2:] + if m != m2: + raise LinAlgError('Incompatible dimensions') + + t, result_t = _commonType(a, b) + result_real_t = _realType(result_t) + + if rcond is None: + rcond = finfo(t).eps * max(n, m) + + signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid' + if n_rhs == 0: + # lapack can't handle n_rhs = 0 - so allocate + # the array one larger in that axis + b = zeros(b.shape[:-2] + (m, n_rhs + 1), dtype=b.dtype) + + with errstate(call=_raise_linalgerror_lstsq, invalid='call', + over='ignore', divide='ignore', under='ignore'): + x, resids, rank, s = _umath_linalg.lstsq(a, b, rcond, + signature=signature) + if m == 0: + x[...] = 0 + if n_rhs == 0: + # remove the item we added + x = x[..., :n_rhs] + resids = resids[..., :n_rhs] + + # remove the axis we added + if is_1d: + x = x.squeeze(axis=-1) + # we probably should squeeze resids too, but we can't + # without breaking compatibility. + + # as documented + if rank != n or m <= n: + resids = array([], result_real_t) + + # coerce output arrays + s = s.astype(result_real_t, copy=False) + resids = resids.astype(result_real_t, copy=False) + # Copying lets the memory in r_parts be freed + x = x.astype(result_t, copy=True) + return wrap(x), wrap(resids), rank, s + + +def _multi_svd_norm(x, row_axis, col_axis, op, initial=None): + """Compute a function of the singular values of the 2-D matrices in `x`. + + This is a private utility function used by `numpy.linalg.norm()`. + + Parameters + ---------- + x : ndarray + row_axis, col_axis : int + The axes of `x` that hold the 2-D matrices. + op : callable + This should be either numpy.amin or `numpy.amax` or `numpy.sum`. + + Returns + ------- + result : float or ndarray + If `x` is 2-D, the return values is a float. + Otherwise, it is an array with ``x.ndim - 2`` dimensions. + The return values are either the minimum or maximum or sum of the + singular values of the matrices, depending on whether `op` + is `numpy.amin` or `numpy.amax` or `numpy.sum`. + + """ + y = moveaxis(x, (row_axis, col_axis), (-2, -1)) + result = op(svd(y, compute_uv=False), axis=-1, initial=initial) + return result + + +def _norm_dispatcher(x, ord=None, axis=None, keepdims=None): + return (x,) + + +@array_function_dispatch(_norm_dispatcher) +def norm(x, ord=None, axis=None, keepdims=False): + """ + Matrix or vector norm. + + This function is able to return one of eight different matrix norms, + or one of an infinite number of vector norms (described below), depending + on the value of the ``ord`` parameter. + + Parameters + ---------- + x : array_like + Input array. If `axis` is None, `x` must be 1-D or 2-D, unless `ord` + is None. If both `axis` and `ord` are None, the 2-norm of + ``x.ravel`` will be returned. + ord : {int, float, inf, -inf, 'fro', 'nuc'}, optional + Order of the norm (see table under ``Notes`` for what values are + supported for matrices and vectors respectively). inf means numpy's + `inf` object. The default is None. + axis : {None, int, 2-tuple of ints}, optional. + If `axis` is an integer, it specifies the axis of `x` along which to + compute the vector norms. If `axis` is a 2-tuple, it specifies the + axes that hold 2-D matrices, and the matrix norms of these matrices + are computed. If `axis` is None then either a vector norm (when `x` + is 1-D) or a matrix norm (when `x` is 2-D) is returned. The default + is None. + + keepdims : bool, optional + If this is set to True, the axes which are normed over are left in the + result as dimensions with size one. With this option the result will + broadcast correctly against the original `x`. + + Returns + ------- + n : float or ndarray + Norm of the matrix or vector(s). + + See Also + -------- + scipy.linalg.norm : Similar function in SciPy. + + Notes + ----- + For values of ``ord < 1``, the result is, strictly speaking, not a + mathematical 'norm', but it may still be useful for various numerical + purposes. + + The following norms can be calculated: + + ===== ============================ ========================== + ord norm for matrices norm for vectors + ===== ============================ ========================== + None Frobenius norm 2-norm + 'fro' Frobenius norm -- + 'nuc' nuclear norm -- + inf max(sum(abs(x), axis=1)) max(abs(x)) + -inf min(sum(abs(x), axis=1)) min(abs(x)) + 0 -- sum(x != 0) + 1 max(sum(abs(x), axis=0)) as below + -1 min(sum(abs(x), axis=0)) as below + 2 2-norm (largest sing. value) as below + -2 smallest singular value as below + other -- sum(abs(x)**ord)**(1./ord) + ===== ============================ ========================== + + The Frobenius norm is given by [1]_: + + :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}` + + The nuclear norm is the sum of the singular values. + + Both the Frobenius and nuclear norm orders are only defined for + matrices and raise a ValueError when ``x.ndim != 2``. + + References + ---------- + .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, + Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15 + + Examples + -------- + + >>> import numpy as np + >>> from numpy import linalg as LA + >>> a = np.arange(9) - 4 + >>> a + array([-4, -3, -2, ..., 2, 3, 4]) + >>> b = a.reshape((3, 3)) + >>> b + array([[-4, -3, -2], + [-1, 0, 1], + [ 2, 3, 4]]) + + >>> LA.norm(a) + 7.745966692414834 + >>> LA.norm(b) + 7.745966692414834 + >>> LA.norm(b, 'fro') + 7.745966692414834 + >>> LA.norm(a, np.inf) + 4.0 + >>> LA.norm(b, np.inf) + 9.0 + >>> LA.norm(a, -np.inf) + 0.0 + >>> LA.norm(b, -np.inf) + 2.0 + + >>> LA.norm(a, 1) + 20.0 + >>> LA.norm(b, 1) + 7.0 + >>> LA.norm(a, -1) + -4.6566128774142013e-010 + >>> LA.norm(b, -1) + 6.0 + >>> LA.norm(a, 2) + 7.745966692414834 + >>> LA.norm(b, 2) + 7.3484692283495345 + + >>> LA.norm(a, -2) + 0.0 + >>> LA.norm(b, -2) + 1.8570331885190563e-016 # may vary + >>> LA.norm(a, 3) + 5.8480354764257312 # may vary + >>> LA.norm(a, -3) + 0.0 + + Using the `axis` argument to compute vector norms: + + >>> c = np.array([[ 1, 2, 3], + ... [-1, 1, 4]]) + >>> LA.norm(c, axis=0) + array([ 1.41421356, 2.23606798, 5. ]) + >>> LA.norm(c, axis=1) + array([ 3.74165739, 4.24264069]) + >>> LA.norm(c, ord=1, axis=1) + array([ 6., 6.]) + + Using the `axis` argument to compute matrix norms: + + >>> m = np.arange(8).reshape(2,2,2) + >>> LA.norm(m, axis=(1,2)) + array([ 3.74165739, 11.22497216]) + >>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :]) + (3.7416573867739413, 11.224972160321824) + + """ + x = asarray(x) + + if not issubclass(x.dtype.type, (inexact, object_)): + x = x.astype(float) + + # Immediately handle some default, simple, fast, and common cases. + if axis is None: + ndim = x.ndim + if ( + (ord is None) or + (ord in ('f', 'fro') and ndim == 2) or + (ord == 2 and ndim == 1) + ): + x = x.ravel(order='K') + if isComplexType(x.dtype.type): + x_real = x.real + x_imag = x.imag + sqnorm = x_real.dot(x_real) + x_imag.dot(x_imag) + else: + sqnorm = x.dot(x) + ret = sqrt(sqnorm) + if keepdims: + ret = ret.reshape(ndim * [1]) + return ret + + # Normalize the `axis` argument to a tuple. + nd = x.ndim + if axis is None: + axis = tuple(range(nd)) + elif not isinstance(axis, tuple): + try: + axis = int(axis) + except Exception as e: + raise TypeError( + "'axis' must be None, an integer or a tuple of integers" + ) from e + axis = (axis,) + + if len(axis) == 1: + if ord == inf: + return abs(x).max(axis=axis, keepdims=keepdims, initial=0) + elif ord == -inf: + return abs(x).min(axis=axis, keepdims=keepdims) + elif ord == 0: + # Zero norm + return ( + (x != 0) + .astype(x.real.dtype) + .sum(axis=axis, keepdims=keepdims) + ) + elif ord == 1: + # special case for speedup + return add.reduce(abs(x), axis=axis, keepdims=keepdims) + elif ord is None or ord == 2: + # special case for speedup + s = (x.conj() * x).real + return sqrt(add.reduce(s, axis=axis, keepdims=keepdims)) + # None of the str-type keywords for ord ('fro', 'nuc') + # are valid for vectors + elif isinstance(ord, str): + raise ValueError(f"Invalid norm order '{ord}' for vectors") + else: + absx = abs(x) + absx **= ord + ret = add.reduce(absx, axis=axis, keepdims=keepdims) + ret **= reciprocal(ord, dtype=ret.dtype) + return ret + elif len(axis) == 2: + row_axis, col_axis = axis + row_axis = normalize_axis_index(row_axis, nd) + col_axis = normalize_axis_index(col_axis, nd) + if row_axis == col_axis: + raise ValueError('Duplicate axes given.') + if ord == 2: + ret = _multi_svd_norm(x, row_axis, col_axis, amax, 0) + elif ord == -2: + ret = _multi_svd_norm(x, row_axis, col_axis, amin) + elif ord == 1: + if col_axis > row_axis: + col_axis -= 1 + ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis, initial=0) + elif ord == inf: + if row_axis > col_axis: + row_axis -= 1 + ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis, initial=0) + elif ord == -1: + if col_axis > row_axis: + col_axis -= 1 + ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis) + elif ord == -inf: + if row_axis > col_axis: + row_axis -= 1 + ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis) + elif ord in [None, 'fro', 'f']: + ret = sqrt(add.reduce((x.conj() * x).real, axis=axis)) + elif ord == 'nuc': + ret = _multi_svd_norm(x, row_axis, col_axis, sum, 0) + else: + raise ValueError("Invalid norm order for matrices.") + if keepdims: + ret_shape = list(x.shape) + ret_shape[axis[0]] = 1 + ret_shape[axis[1]] = 1 + ret = ret.reshape(ret_shape) + return ret + else: + raise ValueError("Improper number of dimensions to norm.") + + +# multi_dot + +def _multidot_dispatcher(arrays, *, out=None): + yield from arrays + yield out + + +@array_function_dispatch(_multidot_dispatcher) +def multi_dot(arrays, *, out=None): + """ + Compute the dot product of two or more arrays in a single function call, + while automatically selecting the fastest evaluation order. + + `multi_dot` chains `numpy.dot` and uses optimal parenthesization + of the matrices [1]_ [2]_. Depending on the shapes of the matrices, + this can speed up the multiplication a lot. + + If the first argument is 1-D it is treated as a row vector. + If the last argument is 1-D it is treated as a column vector. + The other arguments must be 2-D. + + Think of `multi_dot` as:: + + def multi_dot(arrays): return functools.reduce(np.dot, arrays) + + + Parameters + ---------- + arrays : sequence of array_like + If the first argument is 1-D it is treated as row vector. + If the last argument is 1-D it is treated as column vector. + The other arguments must be 2-D. + out : ndarray, optional + Output argument. This must have the exact kind that would be returned + if it was not used. In particular, it must have the right type, must be + C-contiguous, and its dtype must be the dtype that would be returned + for `dot(a, b)`. This is a performance feature. Therefore, if these + conditions are not met, an exception is raised, instead of attempting + to be flexible. + + Returns + ------- + output : ndarray + Returns the dot product of the supplied arrays. + + See Also + -------- + numpy.dot : dot multiplication with two arguments. + + References + ---------- + + .. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378 + .. [2] https://en.wikipedia.org/wiki/Matrix_chain_multiplication + + Examples + -------- + `multi_dot` allows you to write:: + + >>> import numpy as np + >>> from numpy.linalg import multi_dot + >>> # Prepare some data + >>> A = np.random.random((10000, 100)) + >>> B = np.random.random((100, 1000)) + >>> C = np.random.random((1000, 5)) + >>> D = np.random.random((5, 333)) + >>> # the actual dot multiplication + >>> _ = multi_dot([A, B, C, D]) + + instead of:: + + >>> _ = np.dot(np.dot(np.dot(A, B), C), D) + >>> # or + >>> _ = A.dot(B).dot(C).dot(D) + + Notes + ----- + The cost for a matrix multiplication can be calculated with the + following function:: + + def cost(A, B): + return A.shape[0] * A.shape[1] * B.shape[1] + + Assume we have three matrices + :math:`A_{10 \times 100}, B_{100 \times 5}, C_{5 \times 50}`. + + The costs for the two different parenthesizations are as follows:: + + cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500 + cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000 + + """ + n = len(arrays) + # optimization only makes sense for len(arrays) > 2 + if n < 2: + raise ValueError("Expecting at least two arrays.") + elif n == 2: + return dot(arrays[0], arrays[1], out=out) + + arrays = [asanyarray(a) for a in arrays] + + # save original ndim to reshape the result array into the proper form later + ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim + # Explicitly convert vectors to 2D arrays to keep the logic of the internal + # _multi_dot_* functions as simple as possible. + if arrays[0].ndim == 1: + arrays[0] = atleast_2d(arrays[0]) + if arrays[-1].ndim == 1: + arrays[-1] = atleast_2d(arrays[-1]).T + _assert_2d(*arrays) + + # _multi_dot_three is much faster than _multi_dot_matrix_chain_order + if n == 3: + result = _multi_dot_three(arrays[0], arrays[1], arrays[2], out=out) + else: + order = _multi_dot_matrix_chain_order(arrays) + result = _multi_dot(arrays, order, 0, n - 1, out=out) + + # return proper shape + if ndim_first == 1 and ndim_last == 1: + return result[0, 0] # scalar + elif ndim_first == 1 or ndim_last == 1: + return result.ravel() # 1-D + else: + return result + + +def _multi_dot_three(A, B, C, out=None): + """ + Find the best order for three arrays and do the multiplication. + + For three arguments `_multi_dot_three` is approximately 15 times faster + than `_multi_dot_matrix_chain_order` + + """ + a0, a1b0 = A.shape + b1c0, c1 = C.shape + # cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1 + cost1 = a0 * b1c0 * (a1b0 + c1) + # cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1 + cost2 = a1b0 * c1 * (a0 + b1c0) + + if cost1 < cost2: + return dot(dot(A, B), C, out=out) + else: + return dot(A, dot(B, C), out=out) + + +def _multi_dot_matrix_chain_order(arrays, return_costs=False): + """ + Return a np.array that encodes the optimal order of multiplications. + + The optimal order array is then used by `_multi_dot()` to do the + multiplication. + + Also return the cost matrix if `return_costs` is `True` + + The implementation CLOSELY follows Cormen, "Introduction to Algorithms", + Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices. + + cost[i, j] = min([ + cost[prefix] + cost[suffix] + cost_mult(prefix, suffix) + for k in range(i, j)]) + + """ + n = len(arrays) + # p stores the dimensions of the matrices + # Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50] + p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]] + # m is a matrix of costs of the subproblems + # m[i,j]: min number of scalar multiplications needed to compute A_{i..j} + m = zeros((n, n), dtype=double) + # s is the actual ordering + # s[i, j] is the value of k at which we split the product A_i..A_j + s = empty((n, n), dtype=intp) + + for l in range(1, n): + for i in range(n - l): + j = i + l + m[i, j] = inf + for k in range(i, j): + q = m[i, k] + m[k + 1, j] + p[i] * p[k + 1] * p[j + 1] + if q < m[i, j]: + m[i, j] = q + s[i, j] = k # Note that Cormen uses 1-based index + + return (s, m) if return_costs else s + + +def _multi_dot(arrays, order, i, j, out=None): + """Actually do the multiplication with the given order.""" + if i == j: + # the initial call with non-None out should never get here + assert out is None + + return arrays[i] + else: + return dot(_multi_dot(arrays, order, i, order[i, j]), + _multi_dot(arrays, order, order[i, j] + 1, j), + out=out) + + +# diagonal + +def _diagonal_dispatcher(x, /, *, offset=None): + return (x,) + + +@array_function_dispatch(_diagonal_dispatcher) +def diagonal(x, /, *, offset=0): + """ + Returns specified diagonals of a matrix (or a stack of matrices) ``x``. + + This function is Array API compatible, contrary to + :py:func:`numpy.diagonal`, the matrix is assumed + to be defined by the last two dimensions. + + Parameters + ---------- + x : (...,M,N) array_like + Input array having shape (..., M, N) and whose innermost two + dimensions form MxN matrices. + offset : int, optional + Offset specifying the off-diagonal relative to the main diagonal, + where:: + + * offset = 0: the main diagonal. + * offset > 0: off-diagonal above the main diagonal. + * offset < 0: off-diagonal below the main diagonal. + + Returns + ------- + out : (...,min(N,M)) ndarray + An array containing the diagonals and whose shape is determined by + removing the last two dimensions and appending a dimension equal to + the size of the resulting diagonals. The returned array must have + the same data type as ``x``. + + See Also + -------- + numpy.diagonal + + Examples + -------- + >>> a = np.arange(4).reshape(2, 2); a + array([[0, 1], + [2, 3]]) + >>> np.linalg.diagonal(a) + array([0, 3]) + + A 3-D example: + + >>> a = np.arange(8).reshape(2, 2, 2); a + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> np.linalg.diagonal(a) + array([[0, 3], + [4, 7]]) + + Diagonals adjacent to the main diagonal can be obtained by using the + `offset` argument: + + >>> a = np.arange(9).reshape(3, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> np.linalg.diagonal(a, offset=1) # First superdiagonal + array([1, 5]) + >>> np.linalg.diagonal(a, offset=2) # Second superdiagonal + array([2]) + >>> np.linalg.diagonal(a, offset=-1) # First subdiagonal + array([3, 7]) + >>> np.linalg.diagonal(a, offset=-2) # Second subdiagonal + array([6]) + + The anti-diagonal can be obtained by reversing the order of elements + using either `numpy.flipud` or `numpy.fliplr`. + + >>> a = np.arange(9).reshape(3, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> np.linalg.diagonal(np.fliplr(a)) # Horizontal flip + array([2, 4, 6]) + >>> np.linalg.diagonal(np.flipud(a)) # Vertical flip + array([6, 4, 2]) + + Note that the order in which the diagonal is retrieved varies depending + on the flip function. + + """ + return _core_diagonal(x, offset, axis1=-2, axis2=-1) + + +# trace + +def _trace_dispatcher(x, /, *, offset=None, dtype=None): + return (x,) + + +@array_function_dispatch(_trace_dispatcher) +def trace(x, /, *, offset=0, dtype=None): + """ + Returns the sum along the specified diagonals of a matrix + (or a stack of matrices) ``x``. + + This function is Array API compatible, contrary to + :py:func:`numpy.trace`. + + Parameters + ---------- + x : (...,M,N) array_like + Input array having shape (..., M, N) and whose innermost two + dimensions form MxN matrices. + offset : int, optional + Offset specifying the off-diagonal relative to the main diagonal, + where:: + + * offset = 0: the main diagonal. + * offset > 0: off-diagonal above the main diagonal. + * offset < 0: off-diagonal below the main diagonal. + + dtype : dtype, optional + Data type of the returned array. + + Returns + ------- + out : ndarray + An array containing the traces and whose shape is determined by + removing the last two dimensions and storing the traces in the last + array dimension. For example, if x has rank k and shape: + (I, J, K, ..., L, M, N), then an output array has rank k-2 and shape: + (I, J, K, ..., L) where:: + + out[i, j, k, ..., l] = trace(a[i, j, k, ..., l, :, :]) + + The returned array must have a data type as described by the dtype + parameter above. + + See Also + -------- + numpy.trace + + Examples + -------- + >>> np.linalg.trace(np.eye(3)) + 3.0 + >>> a = np.arange(8).reshape((2, 2, 2)) + >>> np.linalg.trace(a) + array([3, 11]) + + Trace is computed with the last two axes as the 2-d sub-arrays. + This behavior differs from :py:func:`numpy.trace` which uses the first two + axes by default. + + >>> a = np.arange(24).reshape((3, 2, 2, 2)) + >>> np.linalg.trace(a).shape + (3, 2) + + Traces adjacent to the main diagonal can be obtained by using the + `offset` argument: + + >>> a = np.arange(9).reshape((3, 3)); a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> np.linalg.trace(a, offset=1) # First superdiagonal + 6 + >>> np.linalg.trace(a, offset=2) # Second superdiagonal + 2 + >>> np.linalg.trace(a, offset=-1) # First subdiagonal + 10 + >>> np.linalg.trace(a, offset=-2) # Second subdiagonal + 6 + + """ + return _core_trace(x, offset, axis1=-2, axis2=-1, dtype=dtype) + + +# cross + +def _cross_dispatcher(x1, x2, /, *, axis=None): + return (x1, x2,) + + +@array_function_dispatch(_cross_dispatcher) +def cross(x1, x2, /, *, axis=-1): + """ + Returns the cross product of 3-element vectors. + + If ``x1`` and/or ``x2`` are multi-dimensional arrays, then + the cross-product of each pair of corresponding 3-element vectors + is independently computed. + + This function is Array API compatible, contrary to + :func:`numpy.cross`. + + Parameters + ---------- + x1 : array_like + The first input array. + x2 : array_like + The second input array. Must be compatible with ``x1`` for all + non-compute axes. The size of the axis over which to compute + the cross-product must be the same size as the respective axis + in ``x1``. + axis : int, optional + The axis (dimension) of ``x1`` and ``x2`` containing the vectors for + which to compute the cross-product. Default: ``-1``. + + Returns + ------- + out : ndarray + An array containing the cross products. + + See Also + -------- + numpy.cross + + Examples + -------- + Vector cross-product. + + >>> x = np.array([1, 2, 3]) + >>> y = np.array([4, 5, 6]) + >>> np.linalg.cross(x, y) + array([-3, 6, -3]) + + Multiple vector cross-products. Note that the direction of the cross + product vector is defined by the *right-hand rule*. + + >>> x = np.array([[1,2,3], [4,5,6]]) + >>> y = np.array([[4,5,6], [1,2,3]]) + >>> np.linalg.cross(x, y) + array([[-3, 6, -3], + [ 3, -6, 3]]) + + >>> x = np.array([[1, 2], [3, 4], [5, 6]]) + >>> y = np.array([[4, 5], [6, 1], [2, 3]]) + >>> np.linalg.cross(x, y, axis=0) + array([[-24, 6], + [ 18, 24], + [-6, -18]]) + + """ + x1 = asanyarray(x1) + x2 = asanyarray(x2) + + if x1.shape[axis] != 3 or x2.shape[axis] != 3: + raise ValueError( + "Both input arrays must be (arrays of) 3-dimensional vectors, " + f"but they are {x1.shape[axis]} and {x2.shape[axis]} " + "dimensional instead." + ) + + return _core_cross(x1, x2, axis=axis) + + +# matmul + +def _matmul_dispatcher(x1, x2, /): + return (x1, x2) + + +@array_function_dispatch(_matmul_dispatcher) +def matmul(x1, x2, /): + """ + Computes the matrix product. + + This function is Array API compatible, contrary to + :func:`numpy.matmul`. + + Parameters + ---------- + x1 : array_like + The first input array. + x2 : array_like + The second input array. + + Returns + ------- + out : ndarray + The matrix product of the inputs. + This is a scalar only when both ``x1``, ``x2`` are 1-d vectors. + + Raises + ------ + ValueError + If the last dimension of ``x1`` is not the same size as + the second-to-last dimension of ``x2``. + + If a scalar value is passed in. + + See Also + -------- + numpy.matmul + + Examples + -------- + For 2-D arrays it is the matrix product: + + >>> a = np.array([[1, 0], + ... [0, 1]]) + >>> b = np.array([[4, 1], + ... [2, 2]]) + >>> np.linalg.matmul(a, b) + array([[4, 1], + [2, 2]]) + + For 2-D mixed with 1-D, the result is the usual. + + >>> a = np.array([[1, 0], + ... [0, 1]]) + >>> b = np.array([1, 2]) + >>> np.linalg.matmul(a, b) + array([1, 2]) + >>> np.linalg.matmul(b, a) + array([1, 2]) + + + Broadcasting is conventional for stacks of arrays + + >>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4)) + >>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2)) + >>> np.linalg.matmul(a,b).shape + (2, 2, 2) + >>> np.linalg.matmul(a, b)[0, 1, 1] + 98 + >>> sum(a[0, 1, :] * b[0 , :, 1]) + 98 + + Vector, vector returns the scalar inner product, but neither argument + is complex-conjugated: + + >>> np.linalg.matmul([2j, 3j], [2j, 3j]) + (-13+0j) + + Scalar multiplication raises an error. + + >>> np.linalg.matmul([1,2], 3) + Traceback (most recent call last): + ... + ValueError: matmul: Input operand 1 does not have enough dimensions ... + + """ + return _core_matmul(x1, x2) + + +# tensordot + +def _tensordot_dispatcher(x1, x2, /, *, axes=None): + return (x1, x2) + + +@array_function_dispatch(_tensordot_dispatcher) +def tensordot(x1, x2, /, *, axes=2): + return _core_tensordot(x1, x2, axes=axes) + + +tensordot.__doc__ = _core_tensordot.__doc__ + + +# matrix_transpose + +def _matrix_transpose_dispatcher(x): + return (x,) + +@array_function_dispatch(_matrix_transpose_dispatcher) +def matrix_transpose(x, /): + return _core_matrix_transpose(x) + + +matrix_transpose.__doc__ = f"""{_core_matrix_transpose.__doc__} + + Notes + ----- + This function is an alias of `numpy.matrix_transpose`. +""" + + +# matrix_norm + +def _matrix_norm_dispatcher(x, /, *, keepdims=None, ord=None): + return (x,) + +@array_function_dispatch(_matrix_norm_dispatcher) +def matrix_norm(x, /, *, keepdims=False, ord="fro"): + """ + Computes the matrix norm of a matrix (or a stack of matrices) ``x``. + + This function is Array API compatible. + + Parameters + ---------- + x : array_like + Input array having shape (..., M, N) and whose two innermost + dimensions form ``MxN`` matrices. + keepdims : bool, optional + If this is set to True, the axes which are normed over are left in + the result as dimensions with size one. Default: False. + ord : {1, -1, 2, -2, inf, -inf, 'fro', 'nuc'}, optional + The order of the norm. For details see the table under ``Notes`` + in `numpy.linalg.norm`. + + See Also + -------- + numpy.linalg.norm : Generic norm function + + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.arange(9) - 4 + >>> a + array([-4, -3, -2, ..., 2, 3, 4]) + >>> b = a.reshape((3, 3)) + >>> b + array([[-4, -3, -2], + [-1, 0, 1], + [ 2, 3, 4]]) + + >>> LA.matrix_norm(b) + 7.745966692414834 + >>> LA.matrix_norm(b, ord='fro') + 7.745966692414834 + >>> LA.matrix_norm(b, ord=np.inf) + 9.0 + >>> LA.matrix_norm(b, ord=-np.inf) + 2.0 + + >>> LA.matrix_norm(b, ord=1) + 7.0 + >>> LA.matrix_norm(b, ord=-1) + 6.0 + >>> LA.matrix_norm(b, ord=2) + 7.3484692283495345 + >>> LA.matrix_norm(b, ord=-2) + 1.8570331885190563e-016 # may vary + + """ + x = asanyarray(x) + return norm(x, axis=(-2, -1), keepdims=keepdims, ord=ord) + + +# vector_norm + +def _vector_norm_dispatcher(x, /, *, axis=None, keepdims=None, ord=None): + return (x,) + +@array_function_dispatch(_vector_norm_dispatcher) +def vector_norm(x, /, *, axis=None, keepdims=False, ord=2): + """ + Computes the vector norm of a vector (or batch of vectors) ``x``. + + This function is Array API compatible. + + Parameters + ---------- + x : array_like + Input array. + axis : {None, int, 2-tuple of ints}, optional + If an integer, ``axis`` specifies the axis (dimension) along which + to compute vector norms. If an n-tuple, ``axis`` specifies the axes + (dimensions) along which to compute batched vector norms. If ``None``, + the vector norm must be computed over all array values (i.e., + equivalent to computing the vector norm of a flattened array). + Default: ``None``. + keepdims : bool, optional + If this is set to True, the axes which are normed over are left in + the result as dimensions with size one. Default: False. + ord : {int, float, inf, -inf}, optional + The order of the norm. For details see the table under ``Notes`` + in `numpy.linalg.norm`. + + See Also + -------- + numpy.linalg.norm : Generic norm function + + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.arange(9) + 1 + >>> a + array([1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> b = a.reshape((3, 3)) + >>> b + array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + + >>> LA.vector_norm(b) + 16.881943016134134 + >>> LA.vector_norm(b, ord=np.inf) + 9.0 + >>> LA.vector_norm(b, ord=-np.inf) + 1.0 + + >>> LA.vector_norm(b, ord=0) + 9.0 + >>> LA.vector_norm(b, ord=1) + 45.0 + >>> LA.vector_norm(b, ord=-1) + 0.3534857623790153 + >>> LA.vector_norm(b, ord=2) + 16.881943016134134 + >>> LA.vector_norm(b, ord=-2) + 0.8058837395885292 + + """ + x = asanyarray(x) + shape = list(x.shape) + if axis is None: + # Note: np.linalg.norm() doesn't handle 0-D arrays + x = x.ravel() + _axis = 0 + elif isinstance(axis, tuple): + # Note: The axis argument supports any number of axes, whereas + # np.linalg.norm() only supports a single axis for vector norm. + normalized_axis = normalize_axis_tuple(axis, x.ndim) + rest = tuple(i for i in range(x.ndim) if i not in normalized_axis) + newshape = axis + rest + x = _core_transpose(x, newshape).reshape( + ( + prod([x.shape[i] for i in axis], dtype=int), + *[x.shape[i] for i in rest] + ) + ) + _axis = 0 + else: + _axis = axis + + res = norm(x, axis=_axis, ord=ord) + + if keepdims: + # We can't reuse np.linalg.norm(keepdims) because of the reshape hacks + # above to avoid matrix norm logic. + _axis = normalize_axis_tuple( + range(len(shape)) if axis is None else axis, len(shape) + ) + for i in _axis: + shape[i] = 1 + res = res.reshape(tuple(shape)) + + return res + + +# vecdot + +def _vecdot_dispatcher(x1, x2, /, *, axis=None): + return (x1, x2) + +@array_function_dispatch(_vecdot_dispatcher) +def vecdot(x1, x2, /, *, axis=-1): + """ + Computes the vector dot product. + + This function is restricted to arguments compatible with the Array API, + contrary to :func:`numpy.vecdot`. + + Let :math:`\\mathbf{a}` be a vector in ``x1`` and :math:`\\mathbf{b}` be + a corresponding vector in ``x2``. The dot product is defined as: + + .. math:: + \\mathbf{a} \\cdot \\mathbf{b} = \\sum_{i=0}^{n-1} \\overline{a_i}b_i + + over the dimension specified by ``axis`` and where :math:`\\overline{a_i}` + denotes the complex conjugate if :math:`a_i` is complex and the identity + otherwise. + + Parameters + ---------- + x1 : array_like + First input array. + x2 : array_like + Second input array. + axis : int, optional + Axis over which to compute the dot product. Default: ``-1``. + + Returns + ------- + output : ndarray + The vector dot product of the input. + + See Also + -------- + numpy.vecdot + + Examples + -------- + Get the projected size along a given normal for an array of vectors. + + >>> v = np.array([[0., 5., 0.], [0., 0., 10.], [0., 6., 8.]]) + >>> n = np.array([0., 0.6, 0.8]) + >>> np.linalg.vecdot(v, n) + array([ 3., 8., 10.]) + + """ + return _core_vecdot(x1, x2, axis=axis) diff --git a/python/numpy/linalg/_linalg.pyi b/python/numpy/linalg/_linalg.pyi new file mode 100644 index 000000000..3611053a3 --- /dev/null +++ b/python/numpy/linalg/_linalg.pyi @@ -0,0 +1,475 @@ +from collections.abc import Iterable +from typing import ( + Any, + NamedTuple, + Never, + SupportsIndex, + SupportsInt, + TypeAlias, + TypeVar, + overload, +) +from typing import Literal as L + +import numpy as np +from numpy import ( + complex128, + complexfloating, + float64, + # other + floating, + int32, + object_, + signedinteger, + timedelta64, + unsignedinteger, + # re-exports + vecdot, +) +from numpy._core.fromnumeric import matrix_transpose +from numpy._core.numeric import tensordot +from numpy._globals import _NoValueType +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _ArrayLikeObject_co, + _ArrayLikeTD64_co, + _ArrayLikeUInt_co, +) +from numpy.linalg import LinAlgError + +__all__ = [ + "matrix_power", + "solve", + "tensorsolve", + "tensorinv", + "inv", + "cholesky", + "eigvals", + "eigvalsh", + "pinv", + "slogdet", + "det", + "svd", + "svdvals", + "eig", + "eigh", + "lstsq", + "norm", + "qr", + "cond", + "matrix_rank", + "LinAlgError", + "multi_dot", + "trace", + "diagonal", + "cross", + "outer", + "tensordot", + "matmul", + "matrix_transpose", + "matrix_norm", + "vector_norm", + "vecdot", +] + +_NumberT = TypeVar("_NumberT", bound=np.number) + +_ModeKind: TypeAlias = L["reduced", "complete", "r", "raw"] + +### + +fortran_int = np.intc + +class EigResult(NamedTuple): + eigenvalues: NDArray[Any] + eigenvectors: NDArray[Any] + +class EighResult(NamedTuple): + eigenvalues: NDArray[Any] + eigenvectors: NDArray[Any] + +class QRResult(NamedTuple): + Q: NDArray[Any] + R: NDArray[Any] + +class SlogdetResult(NamedTuple): + # TODO: `sign` and `logabsdet` are scalars for input 2D arrays and + # a `(x.ndim - 2)`` dimensionl arrays otherwise + sign: Any + logabsdet: Any + +class SVDResult(NamedTuple): + U: NDArray[Any] + S: NDArray[Any] + Vh: NDArray[Any] + +@overload +def tensorsolve( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, + axes: Iterable[int] | None = ..., +) -> NDArray[float64]: ... +@overload +def tensorsolve( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + axes: Iterable[int] | None = ..., +) -> NDArray[floating]: ... +@overload +def tensorsolve( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, + axes: Iterable[int] | None = ..., +) -> NDArray[complexfloating]: ... + +@overload +def solve( + a: _ArrayLikeInt_co, + b: _ArrayLikeInt_co, +) -> NDArray[float64]: ... +@overload +def solve( + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, +) -> NDArray[floating]: ... +@overload +def solve( + a: _ArrayLikeComplex_co, + b: _ArrayLikeComplex_co, +) -> NDArray[complexfloating]: ... + +@overload +def tensorinv( + a: _ArrayLikeInt_co, + ind: int = ..., +) -> NDArray[float64]: ... +@overload +def tensorinv( + a: _ArrayLikeFloat_co, + ind: int = ..., +) -> NDArray[floating]: ... +@overload +def tensorinv( + a: _ArrayLikeComplex_co, + ind: int = ..., +) -> NDArray[complexfloating]: ... + +@overload +def inv(a: _ArrayLikeInt_co) -> NDArray[float64]: ... +@overload +def inv(a: _ArrayLikeFloat_co) -> NDArray[floating]: ... +@overload +def inv(a: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +# TODO: The supported input and output dtypes are dependent on the value of `n`. +# For example: `n < 0` always casts integer types to float64 +def matrix_power( + a: _ArrayLikeComplex_co | _ArrayLikeObject_co, + n: SupportsIndex, +) -> NDArray[Any]: ... + +@overload +def cholesky(a: _ArrayLikeInt_co, /, *, upper: bool = False) -> NDArray[float64]: ... +@overload +def cholesky(a: _ArrayLikeFloat_co, /, *, upper: bool = False) -> NDArray[floating]: ... +@overload +def cholesky(a: _ArrayLikeComplex_co, /, *, upper: bool = False) -> NDArray[complexfloating]: ... + +@overload +def outer(x1: _ArrayLike[Never], x2: _ArrayLike[Never], /) -> NDArray[Any]: ... +@overload +def outer(x1: _ArrayLikeBool_co, x2: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... +@overload +def outer(x1: _ArrayLike[_NumberT], x2: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... +@overload +def outer(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... +@overload +def outer(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... +@overload +def outer(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... +@overload +def outer(x1: _ArrayLikeComplex_co, x2: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... +@overload +def outer(x1: _ArrayLikeTD64_co, x2: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... +@overload +def outer(x1: _ArrayLikeObject_co, x2: _ArrayLikeObject_co, /) -> NDArray[object_]: ... +@overload +def outer( + x1: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + x2: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + /, +) -> NDArray[Any]: ... + +@overload +def qr(a: _ArrayLikeInt_co, mode: _ModeKind = ...) -> QRResult: ... +@overload +def qr(a: _ArrayLikeFloat_co, mode: _ModeKind = ...) -> QRResult: ... +@overload +def qr(a: _ArrayLikeComplex_co, mode: _ModeKind = ...) -> QRResult: ... + +@overload +def eigvals(a: _ArrayLikeInt_co) -> NDArray[float64] | NDArray[complex128]: ... +@overload +def eigvals(a: _ArrayLikeFloat_co) -> NDArray[floating] | NDArray[complexfloating]: ... +@overload +def eigvals(a: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ... + +@overload +def eigvalsh(a: _ArrayLikeInt_co, UPLO: L["L", "U", "l", "u"] = ...) -> NDArray[float64]: ... +@overload +def eigvalsh(a: _ArrayLikeComplex_co, UPLO: L["L", "U", "l", "u"] = ...) -> NDArray[floating]: ... + +@overload +def eig(a: _ArrayLikeInt_co) -> EigResult: ... +@overload +def eig(a: _ArrayLikeFloat_co) -> EigResult: ... +@overload +def eig(a: _ArrayLikeComplex_co) -> EigResult: ... + +@overload +def eigh( + a: _ArrayLikeInt_co, + UPLO: L["L", "U", "l", "u"] = ..., +) -> EighResult: ... +@overload +def eigh( + a: _ArrayLikeFloat_co, + UPLO: L["L", "U", "l", "u"] = ..., +) -> EighResult: ... +@overload +def eigh( + a: _ArrayLikeComplex_co, + UPLO: L["L", "U", "l", "u"] = ..., +) -> EighResult: ... + +@overload +def svd( + a: _ArrayLikeInt_co, + full_matrices: bool = ..., + compute_uv: L[True] = ..., + hermitian: bool = ..., +) -> SVDResult: ... +@overload +def svd( + a: _ArrayLikeFloat_co, + full_matrices: bool = ..., + compute_uv: L[True] = ..., + hermitian: bool = ..., +) -> SVDResult: ... +@overload +def svd( + a: _ArrayLikeComplex_co, + full_matrices: bool = ..., + compute_uv: L[True] = ..., + hermitian: bool = ..., +) -> SVDResult: ... +@overload +def svd( + a: _ArrayLikeInt_co, + full_matrices: bool = ..., + compute_uv: L[False] = ..., + hermitian: bool = ..., +) -> NDArray[float64]: ... +@overload +def svd( + a: _ArrayLikeComplex_co, + full_matrices: bool = ..., + compute_uv: L[False] = ..., + hermitian: bool = ..., +) -> NDArray[floating]: ... + +def svdvals( + x: _ArrayLikeInt_co | _ArrayLikeFloat_co | _ArrayLikeComplex_co +) -> NDArray[floating]: ... + +# TODO: Returns a scalar for 2D arrays and +# a `(x.ndim - 2)`` dimensionl array otherwise +def cond(x: _ArrayLikeComplex_co, p: float | L["fro", "nuc"] | None = ...) -> Any: ... + +# TODO: Returns `int` for <2D arrays and `intp` otherwise +def matrix_rank( + A: _ArrayLikeComplex_co, + tol: _ArrayLikeFloat_co | None = ..., + hermitian: bool = ..., + *, + rtol: _ArrayLikeFloat_co | None = ..., +) -> Any: ... + +@overload +def pinv( + a: _ArrayLikeInt_co, + rcond: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = ..., +) -> NDArray[float64]: ... +@overload +def pinv( + a: _ArrayLikeFloat_co, + rcond: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = ..., +) -> NDArray[floating]: ... +@overload +def pinv( + a: _ArrayLikeComplex_co, + rcond: _ArrayLikeFloat_co | None = None, + hermitian: bool = False, + *, + rtol: _ArrayLikeFloat_co | _NoValueType = ..., +) -> NDArray[complexfloating]: ... + +# TODO: Returns a 2-tuple of scalars for 2D arrays and +# a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise +def slogdet(a: _ArrayLikeComplex_co) -> SlogdetResult: ... + +# TODO: Returns a 2-tuple of scalars for 2D arrays and +# a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise +def det(a: _ArrayLikeComplex_co) -> Any: ... + +@overload +def lstsq(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, rcond: float | None = ...) -> tuple[ + NDArray[float64], + NDArray[float64], + int32, + NDArray[float64], +]: ... +@overload +def lstsq(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: float | None = ...) -> tuple[ + NDArray[floating], + NDArray[floating], + int32, + NDArray[floating], +]: ... +@overload +def lstsq(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: float | None = ...) -> tuple[ + NDArray[complexfloating], + NDArray[floating], + int32, + NDArray[floating], +]: ... + +@overload +def norm( + x: ArrayLike, + ord: float | L["fro", "nuc"] | None = ..., + axis: None = ..., + keepdims: bool = ..., +) -> floating: ... +@overload +def norm( + x: ArrayLike, + ord: float | L["fro", "nuc"] | None = ..., + axis: SupportsInt | SupportsIndex | tuple[int, ...] = ..., + keepdims: bool = ..., +) -> Any: ... + +@overload +def matrix_norm( + x: ArrayLike, + /, + *, + ord: float | L["fro", "nuc"] | None = ..., + keepdims: bool = ..., +) -> floating: ... +@overload +def matrix_norm( + x: ArrayLike, + /, + *, + ord: float | L["fro", "nuc"] | None = ..., + keepdims: bool = ..., +) -> Any: ... + +@overload +def vector_norm( + x: ArrayLike, + /, + *, + axis: None = ..., + ord: float | None = ..., + keepdims: bool = ..., +) -> floating: ... +@overload +def vector_norm( + x: ArrayLike, + /, + *, + axis: SupportsInt | SupportsIndex | tuple[int, ...] = ..., + ord: float | None = ..., + keepdims: bool = ..., +) -> Any: ... + +# TODO: Returns a scalar or array +def multi_dot( + arrays: Iterable[_ArrayLikeComplex_co | _ArrayLikeObject_co | _ArrayLikeTD64_co], + *, + out: NDArray[Any] | None = ..., +) -> Any: ... + +def diagonal( + x: ArrayLike, # >= 2D array + /, + *, + offset: SupportsIndex = ..., +) -> NDArray[Any]: ... + +def trace( + x: ArrayLike, # >= 2D array + /, + *, + offset: SupportsIndex = ..., + dtype: DTypeLike = ..., +) -> Any: ... + +@overload +def cross( + x1: _ArrayLikeUInt_co, + x2: _ArrayLikeUInt_co, + /, + *, + axis: int = ..., +) -> NDArray[unsignedinteger]: ... +@overload +def cross( + x1: _ArrayLikeInt_co, + x2: _ArrayLikeInt_co, + /, + *, + axis: int = ..., +) -> NDArray[signedinteger]: ... +@overload +def cross( + x1: _ArrayLikeFloat_co, + x2: _ArrayLikeFloat_co, + /, + *, + axis: int = ..., +) -> NDArray[floating]: ... +@overload +def cross( + x1: _ArrayLikeComplex_co, + x2: _ArrayLikeComplex_co, + /, + *, + axis: int = ..., +) -> NDArray[complexfloating]: ... + +@overload +def matmul(x1: _ArrayLike[_NumberT], x2: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... +@overload +def matmul(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... +@overload +def matmul(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... +@overload +def matmul(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... +@overload +def matmul(x1: _ArrayLikeComplex_co, x2: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... diff --git a/python/numpy/linalg/_umath_linalg.cpython-312-x86_64-linux-gnu.so b/python/numpy/linalg/_umath_linalg.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 000000000..ec49c7b42 Binary files /dev/null and b/python/numpy/linalg/_umath_linalg.cpython-312-x86_64-linux-gnu.so differ diff --git a/python/numpy/linalg/_umath_linalg.pyi b/python/numpy/linalg/_umath_linalg.pyi new file mode 100644 index 000000000..cd07acdb1 --- /dev/null +++ b/python/numpy/linalg/_umath_linalg.pyi @@ -0,0 +1,61 @@ +from typing import Final +from typing import Literal as L + +import numpy as np +from numpy._typing._ufunc import _GUFunc_Nin2_Nout1 + +__version__: Final[str] = ... +_ilp64: Final[bool] = ... + +### +# 1 -> 1 + +# (m,m) -> () +det: Final[np.ufunc] = ... +# (m,m) -> (m) +cholesky_lo: Final[np.ufunc] = ... +cholesky_up: Final[np.ufunc] = ... +eigvals: Final[np.ufunc] = ... +eigvalsh_lo: Final[np.ufunc] = ... +eigvalsh_up: Final[np.ufunc] = ... +# (m,m) -> (m,m) +inv: Final[np.ufunc] = ... +# (m,n) -> (p) +qr_r_raw: Final[np.ufunc] = ... +svd: Final[np.ufunc] = ... + +### +# 1 -> 2 + +# (m,m) -> (), () +slogdet: Final[np.ufunc] = ... +# (m,m) -> (m), (m,m) +eig: Final[np.ufunc] = ... +eigh_lo: Final[np.ufunc] = ... +eigh_up: Final[np.ufunc] = ... + +### +# 2 -> 1 + +# (m,n), (n) -> (m,m) +qr_complete: Final[_GUFunc_Nin2_Nout1[L["qr_complete"], L[2], None, L["(m,n),(n)->(m,m)"]]] = ... +# (m,n), (k) -> (m,k) +qr_reduced: Final[_GUFunc_Nin2_Nout1[L["qr_reduced"], L[2], None, L["(m,n),(k)->(m,k)"]]] = ... +# (m,m), (m,n) -> (m,n) +solve: Final[_GUFunc_Nin2_Nout1[L["solve"], L[4], None, L["(m,m),(m,n)->(m,n)"]]] = ... +# (m,m), (m) -> (m) +solve1: Final[_GUFunc_Nin2_Nout1[L["solve1"], L[4], None, L["(m,m),(m)->(m)"]]] = ... + +### +# 1 -> 3 + +# (m,n) -> (m,m), (p), (n,n) +svd_f: Final[np.ufunc] = ... +# (m,n) -> (m,p), (p), (p,n) +svd_s: Final[np.ufunc] = ... + +### +# 3 -> 4 + +# (m,n), (m,k), () -> (n,k), (k), (), (p) +lstsq: Final[np.ufunc] = ... diff --git a/python/numpy/linalg/lapack_lite.cpython-312-x86_64-linux-gnu.so b/python/numpy/linalg/lapack_lite.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 000000000..495033fc5 Binary files /dev/null and b/python/numpy/linalg/lapack_lite.cpython-312-x86_64-linux-gnu.so differ diff --git a/python/numpy/linalg/lapack_lite.pyi b/python/numpy/linalg/lapack_lite.pyi new file mode 100644 index 000000000..835293a26 --- /dev/null +++ b/python/numpy/linalg/lapack_lite.pyi @@ -0,0 +1,141 @@ +from typing import Final, TypedDict, type_check_only + +import numpy as np +from numpy._typing import NDArray + +from ._linalg import fortran_int + +### + +@type_check_only +class _GELSD(TypedDict): + m: int + n: int + nrhs: int + lda: int + ldb: int + rank: int + lwork: int + info: int + +@type_check_only +class _DGELSD(_GELSD): + dgelsd_: int + rcond: float + +@type_check_only +class _ZGELSD(_GELSD): + zgelsd_: int + +@type_check_only +class _GEQRF(TypedDict): + m: int + n: int + lda: int + lwork: int + info: int + +@type_check_only +class _DGEQRF(_GEQRF): + dgeqrf_: int + +@type_check_only +class _ZGEQRF(_GEQRF): + zgeqrf_: int + +@type_check_only +class _DORGQR(TypedDict): + dorgqr_: int + info: int + +@type_check_only +class _ZUNGQR(TypedDict): + zungqr_: int + info: int + +### + +_ilp64: Final[bool] = ... + +def dgelsd( + m: int, + n: int, + nrhs: int, + a: NDArray[np.float64], + lda: int, + b: NDArray[np.float64], + ldb: int, + s: NDArray[np.float64], + rcond: float, + rank: int, + work: NDArray[np.float64], + lwork: int, + iwork: NDArray[fortran_int], + info: int, +) -> _DGELSD: ... +def zgelsd( + m: int, + n: int, + nrhs: int, + a: NDArray[np.complex128], + lda: int, + b: NDArray[np.complex128], + ldb: int, + s: NDArray[np.float64], + rcond: float, + rank: int, + work: NDArray[np.complex128], + lwork: int, + rwork: NDArray[np.float64], + iwork: NDArray[fortran_int], + info: int, +) -> _ZGELSD: ... + +# +def dgeqrf( + m: int, + n: int, + a: NDArray[np.float64], # in/out, shape: (lda, n) + lda: int, + tau: NDArray[np.float64], # out, shape: (min(m, n),) + work: NDArray[np.float64], # out, shape: (max(1, lwork),) + lwork: int, + info: int, # out +) -> _DGEQRF: ... +def zgeqrf( + m: int, + n: int, + a: NDArray[np.complex128], # in/out, shape: (lda, n) + lda: int, + tau: NDArray[np.complex128], # out, shape: (min(m, n),) + work: NDArray[np.complex128], # out, shape: (max(1, lwork),) + lwork: int, + info: int, # out +) -> _ZGEQRF: ... + +# +def dorgqr( + m: int, # >=0 + n: int, # m >= n >= 0 + k: int, # n >= k >= 0 + a: NDArray[np.float64], # in/out, shape: (lda, n) + lda: int, # >= max(1, m) + tau: NDArray[np.float64], # in, shape: (k,) + work: NDArray[np.float64], # out, shape: (max(1, lwork),) + lwork: int, + info: int, # out +) -> _DORGQR: ... +def zungqr( + m: int, + n: int, + k: int, + a: NDArray[np.complex128], + lda: int, + tau: NDArray[np.complex128], + work: NDArray[np.complex128], + lwork: int, + info: int, +) -> _ZUNGQR: ... + +# +def xerbla(srname: object, info: int) -> None: ... diff --git a/python/numpy/linalg/linalg.py b/python/numpy/linalg/linalg.py new file mode 100644 index 000000000..81c80d0fd --- /dev/null +++ b/python/numpy/linalg/linalg.py @@ -0,0 +1,17 @@ +def __getattr__(attr_name): + import warnings + + from numpy.linalg import _linalg + ret = getattr(_linalg, attr_name, None) + if ret is None: + raise AttributeError( + f"module 'numpy.linalg.linalg' has no attribute {attr_name}") + warnings.warn( + "The numpy.linalg.linalg has been made private and renamed to " + "numpy.linalg._linalg. All public functions exported by it are " + f"available from numpy.linalg. Please use numpy.linalg.{attr_name} " + "instead.", + DeprecationWarning, + stacklevel=3 + ) + return ret diff --git a/python/numpy/linalg/linalg.pyi b/python/numpy/linalg/linalg.pyi new file mode 100644 index 000000000..dbe9becfb --- /dev/null +++ b/python/numpy/linalg/linalg.pyi @@ -0,0 +1,69 @@ +from ._linalg import ( + LinAlgError, + cholesky, + cond, + cross, + det, + diagonal, + eig, + eigh, + eigvals, + eigvalsh, + inv, + lstsq, + matmul, + matrix_norm, + matrix_power, + matrix_rank, + matrix_transpose, + multi_dot, + norm, + outer, + pinv, + qr, + slogdet, + solve, + svd, + svdvals, + tensordot, + tensorinv, + tensorsolve, + trace, + vecdot, + vector_norm, +) + +__all__ = [ + "LinAlgError", + "cholesky", + "cond", + "cross", + "det", + "diagonal", + "eig", + "eigh", + "eigvals", + "eigvalsh", + "inv", + "lstsq", + "matmul", + "matrix_norm", + "matrix_power", + "matrix_rank", + "matrix_transpose", + "multi_dot", + "norm", + "outer", + "pinv", + "qr", + "slogdet", + "solve", + "svd", + "svdvals", + "tensordot", + "tensorinv", + "tensorsolve", + "trace", + "vecdot", + "vector_norm", +] diff --git a/python/numpy/linalg/tests/__init__.py b/python/numpy/linalg/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/python/numpy/linalg/tests/__pycache__/__init__.cpython-312.pyc b/python/numpy/linalg/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..e4c541d6f Binary files /dev/null and b/python/numpy/linalg/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/linalg/tests/__pycache__/test_deprecations.cpython-312.pyc b/python/numpy/linalg/tests/__pycache__/test_deprecations.cpython-312.pyc new file mode 100644 index 000000000..27c67e271 Binary files /dev/null and b/python/numpy/linalg/tests/__pycache__/test_deprecations.cpython-312.pyc differ diff --git a/python/numpy/linalg/tests/__pycache__/test_linalg.cpython-312.pyc b/python/numpy/linalg/tests/__pycache__/test_linalg.cpython-312.pyc new file mode 100644 index 000000000..3f8898840 Binary files /dev/null and b/python/numpy/linalg/tests/__pycache__/test_linalg.cpython-312.pyc differ diff --git a/python/numpy/linalg/tests/__pycache__/test_regression.cpython-312.pyc b/python/numpy/linalg/tests/__pycache__/test_regression.cpython-312.pyc new file mode 100644 index 000000000..3100fcdce Binary files /dev/null and b/python/numpy/linalg/tests/__pycache__/test_regression.cpython-312.pyc differ diff --git a/python/numpy/linalg/tests/test_deprecations.py b/python/numpy/linalg/tests/test_deprecations.py new file mode 100644 index 000000000..cd4c10832 --- /dev/null +++ b/python/numpy/linalg/tests/test_deprecations.py @@ -0,0 +1,20 @@ +"""Test deprecation and future warnings. + +""" +import numpy as np +from numpy.testing import assert_warns + + +def test_qr_mode_full_future_warning(): + """Check mode='full' FutureWarning. + + In numpy 1.8 the mode options 'full' and 'economic' in linalg.qr were + deprecated. The release date will probably be sometime in the summer + of 2013. + + """ + a = np.eye(2) + assert_warns(DeprecationWarning, np.linalg.qr, a, mode='full') + assert_warns(DeprecationWarning, np.linalg.qr, a, mode='f') + assert_warns(DeprecationWarning, np.linalg.qr, a, mode='economic') + assert_warns(DeprecationWarning, np.linalg.qr, a, mode='e') diff --git a/python/numpy/linalg/tests/test_linalg.py b/python/numpy/linalg/tests/test_linalg.py new file mode 100644 index 000000000..f271d59e4 --- /dev/null +++ b/python/numpy/linalg/tests/test_linalg.py @@ -0,0 +1,2443 @@ +""" Test functions for linalg module + +""" +import itertools +import os +import subprocess +import sys +import textwrap +import threading +import traceback + +import pytest + +import numpy as np +from numpy import ( + array, + asarray, + atleast_2d, + cdouble, + csingle, + dot, + double, + identity, + inf, + linalg, + matmul, + multiply, + single, +) +from numpy._core import swapaxes +from numpy.exceptions import AxisError +from numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm +from numpy.linalg._linalg import _multi_dot_matrix_chain_order +from numpy.testing import ( + HAS_LAPACK64, + IS_WASM, + NOGIL_BUILD, + assert_, + assert_allclose, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, + suppress_warnings, +) + +try: + import numpy.linalg.lapack_lite +except ImportError: + # May be broken when numpy was built without BLAS/LAPACK present + # If so, ensure we don't break the whole test suite - the `lapack_lite` + # submodule should be removed, it's only used in two tests in this file. + pass + + +def consistent_subclass(out, in_): + # For ndarray subclass input, our output should have the same subclass + # (non-ndarray input gets converted to ndarray). + return type(out) is (type(in_) if isinstance(in_, np.ndarray) + else np.ndarray) + + +old_assert_almost_equal = assert_almost_equal + + +def assert_almost_equal(a, b, single_decimal=6, double_decimal=12, **kw): + if asarray(a).dtype.type in (single, csingle): + decimal = single_decimal + else: + decimal = double_decimal + old_assert_almost_equal(a, b, decimal=decimal, **kw) + + +def get_real_dtype(dtype): + return {single: single, double: double, + csingle: single, cdouble: double}[dtype] + + +def get_complex_dtype(dtype): + return {single: csingle, double: cdouble, + csingle: csingle, cdouble: cdouble}[dtype] + + +def get_rtol(dtype): + # Choose a safe rtol + if dtype in (single, csingle): + return 1e-5 + else: + return 1e-11 + + +# used to categorize tests +all_tags = { + 'square', 'nonsquare', 'hermitian', # mutually exclusive + 'generalized', 'size-0', 'strided' # optional additions +} + + +class LinalgCase: + def __init__(self, name, a, b, tags=set()): + """ + A bundle of arguments to be passed to a test case, with an identifying + name, the operands a and b, and a set of tags to filter the tests + """ + assert_(isinstance(name, str)) + self.name = name + self.a = a + self.b = b + self.tags = frozenset(tags) # prevent shared tags + + def check(self, do): + """ + Run the function `do` on this test case, expanding arguments + """ + do(self.a, self.b, tags=self.tags) + + def __repr__(self): + return f'' + + +def apply_tag(tag, cases): + """ + Add the given tag (a string) to each of the cases (a list of LinalgCase + objects) + """ + assert tag in all_tags, "Invalid tag" + for case in cases: + case.tags = case.tags | {tag} + return cases + + +# +# Base test cases +# + +np.random.seed(1234) + +CASES = [] + +# square test cases +CASES += apply_tag('square', [ + LinalgCase("single", + array([[1., 2.], [3., 4.]], dtype=single), + array([2., 1.], dtype=single)), + LinalgCase("double", + array([[1., 2.], [3., 4.]], dtype=double), + array([2., 1.], dtype=double)), + LinalgCase("double_2", + array([[1., 2.], [3., 4.]], dtype=double), + array([[2., 1., 4.], [3., 4., 6.]], dtype=double)), + LinalgCase("csingle", + array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=csingle), + array([2. + 1j, 1. + 2j], dtype=csingle)), + LinalgCase("cdouble", + array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble), + array([2. + 1j, 1. + 2j], dtype=cdouble)), + LinalgCase("cdouble_2", + array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble), + array([[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], dtype=cdouble)), + LinalgCase("0x0", + np.empty((0, 0), dtype=double), + np.empty((0,), dtype=double), + tags={'size-0'}), + LinalgCase("8x8", + np.random.rand(8, 8), + np.random.rand(8)), + LinalgCase("1x1", + np.random.rand(1, 1), + np.random.rand(1)), + LinalgCase("nonarray", + [[1, 2], [3, 4]], + [2, 1]), +]) + +# non-square test-cases +CASES += apply_tag('nonsquare', [ + LinalgCase("single_nsq_1", + array([[1., 2., 3.], [3., 4., 6.]], dtype=single), + array([2., 1.], dtype=single)), + LinalgCase("single_nsq_2", + array([[1., 2.], [3., 4.], [5., 6.]], dtype=single), + array([2., 1., 3.], dtype=single)), + LinalgCase("double_nsq_1", + array([[1., 2., 3.], [3., 4., 6.]], dtype=double), + array([2., 1.], dtype=double)), + LinalgCase("double_nsq_2", + array([[1., 2.], [3., 4.], [5., 6.]], dtype=double), + array([2., 1., 3.], dtype=double)), + LinalgCase("csingle_nsq_1", + array( + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=csingle), + array([2. + 1j, 1. + 2j], dtype=csingle)), + LinalgCase("csingle_nsq_2", + array( + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=csingle), + array([2. + 1j, 1. + 2j, 3. - 3j], dtype=csingle)), + LinalgCase("cdouble_nsq_1", + array( + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble), + array([2. + 1j, 1. + 2j], dtype=cdouble)), + LinalgCase("cdouble_nsq_2", + array( + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble), + array([2. + 1j, 1. + 2j, 3. - 3j], dtype=cdouble)), + LinalgCase("cdouble_nsq_1_2", + array( + [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble), + array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)), + LinalgCase("cdouble_nsq_2_2", + array( + [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble), + array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)), + LinalgCase("8x11", + np.random.rand(8, 11), + np.random.rand(8)), + LinalgCase("1x5", + np.random.rand(1, 5), + np.random.rand(1)), + LinalgCase("5x1", + np.random.rand(5, 1), + np.random.rand(5)), + LinalgCase("0x4", + np.random.rand(0, 4), + np.random.rand(0), + tags={'size-0'}), + LinalgCase("4x0", + np.random.rand(4, 0), + np.random.rand(4), + tags={'size-0'}), +]) + +# hermitian test-cases +CASES += apply_tag('hermitian', [ + LinalgCase("hsingle", + array([[1., 2.], [2., 1.]], dtype=single), + None), + LinalgCase("hdouble", + array([[1., 2.], [2., 1.]], dtype=double), + None), + LinalgCase("hcsingle", + array([[1., 2 + 3j], [2 - 3j, 1]], dtype=csingle), + None), + LinalgCase("hcdouble", + array([[1., 2 + 3j], [2 - 3j, 1]], dtype=cdouble), + None), + LinalgCase("hempty", + np.empty((0, 0), dtype=double), + None, + tags={'size-0'}), + LinalgCase("hnonarray", + [[1, 2], [2, 1]], + None), + LinalgCase("matrix_b_only", + array([[1., 2.], [2., 1.]]), + None), + LinalgCase("hmatrix_1x1", + np.random.rand(1, 1), + None), +]) + + +# +# Gufunc test cases +# +def _make_generalized_cases(): + new_cases = [] + + for case in CASES: + if not isinstance(case.a, np.ndarray): + continue + + a = np.array([case.a, 2 * case.a, 3 * case.a]) + if case.b is None: + b = None + elif case.b.ndim == 1: + b = case.b + else: + b = np.array([case.b, 7 * case.b, 6 * case.b]) + new_case = LinalgCase(case.name + "_tile3", a, b, + tags=case.tags | {'generalized'}) + new_cases.append(new_case) + + a = np.array([case.a] * 2 * 3).reshape((3, 2) + case.a.shape) + if case.b is None: + b = None + elif case.b.ndim == 1: + b = np.array([case.b] * 2 * 3 * a.shape[-1])\ + .reshape((3, 2) + case.a.shape[-2:]) + else: + b = np.array([case.b] * 2 * 3).reshape((3, 2) + case.b.shape) + new_case = LinalgCase(case.name + "_tile213", a, b, + tags=case.tags | {'generalized'}) + new_cases.append(new_case) + + return new_cases + + +CASES += _make_generalized_cases() + + +# +# Generate stride combination variations of the above +# +def _stride_comb_iter(x): + """ + Generate cartesian product of strides for all axes + """ + + if not isinstance(x, np.ndarray): + yield x, "nop" + return + + stride_set = [(1,)] * x.ndim + stride_set[-1] = (1, 3, -4) + if x.ndim > 1: + stride_set[-2] = (1, 3, -4) + if x.ndim > 2: + stride_set[-3] = (1, -4) + + for repeats in itertools.product(*tuple(stride_set)): + new_shape = [abs(a * b) for a, b in zip(x.shape, repeats)] + slices = tuple(slice(None, None, repeat) for repeat in repeats) + + # new array with different strides, but same data + xi = np.empty(new_shape, dtype=x.dtype) + xi.view(np.uint32).fill(0xdeadbeef) + xi = xi[slices] + xi[...] = x + xi = xi.view(x.__class__) + assert_(np.all(xi == x)) + yield xi, "stride_" + "_".join(["%+d" % j for j in repeats]) + + # generate also zero strides if possible + if x.ndim >= 1 and x.shape[-1] == 1: + s = list(x.strides) + s[-1] = 0 + xi = np.lib.stride_tricks.as_strided(x, strides=s) + yield xi, "stride_xxx_0" + if x.ndim >= 2 and x.shape[-2] == 1: + s = list(x.strides) + s[-2] = 0 + xi = np.lib.stride_tricks.as_strided(x, strides=s) + yield xi, "stride_xxx_0_x" + if x.ndim >= 2 and x.shape[:-2] == (1, 1): + s = list(x.strides) + s[-1] = 0 + s[-2] = 0 + xi = np.lib.stride_tricks.as_strided(x, strides=s) + yield xi, "stride_xxx_0_0" + + +def _make_strided_cases(): + new_cases = [] + for case in CASES: + for a, a_label in _stride_comb_iter(case.a): + for b, b_label in _stride_comb_iter(case.b): + new_case = LinalgCase(case.name + "_" + a_label + "_" + b_label, a, b, + tags=case.tags | {'strided'}) + new_cases.append(new_case) + return new_cases + + +CASES += _make_strided_cases() + + +# +# Test different routines against the above cases +# +class LinalgTestCase: + TEST_CASES = CASES + + def check_cases(self, require=set(), exclude=set()): + """ + Run func on each of the cases with all of the tags in require, and none + of the tags in exclude + """ + for case in self.TEST_CASES: + # filter by require and exclude + if case.tags & require != require: + continue + if case.tags & exclude: + continue + + try: + case.check(self.do) + except Exception as e: + msg = f'In test case: {case!r}\n\n' + msg += traceback.format_exc() + raise AssertionError(msg) from e + + +class LinalgSquareTestCase(LinalgTestCase): + + def test_sq_cases(self): + self.check_cases(require={'square'}, + exclude={'generalized', 'size-0'}) + + def test_empty_sq_cases(self): + self.check_cases(require={'square', 'size-0'}, + exclude={'generalized'}) + + +class LinalgNonsquareTestCase(LinalgTestCase): + + def test_nonsq_cases(self): + self.check_cases(require={'nonsquare'}, + exclude={'generalized', 'size-0'}) + + def test_empty_nonsq_cases(self): + self.check_cases(require={'nonsquare', 'size-0'}, + exclude={'generalized'}) + + +class HermitianTestCase(LinalgTestCase): + + def test_herm_cases(self): + self.check_cases(require={'hermitian'}, + exclude={'generalized', 'size-0'}) + + def test_empty_herm_cases(self): + self.check_cases(require={'hermitian', 'size-0'}, + exclude={'generalized'}) + + +class LinalgGeneralizedSquareTestCase(LinalgTestCase): + + @pytest.mark.slow + def test_generalized_sq_cases(self): + self.check_cases(require={'generalized', 'square'}, + exclude={'size-0'}) + + @pytest.mark.slow + def test_generalized_empty_sq_cases(self): + self.check_cases(require={'generalized', 'square', 'size-0'}) + + +class LinalgGeneralizedNonsquareTestCase(LinalgTestCase): + + @pytest.mark.slow + def test_generalized_nonsq_cases(self): + self.check_cases(require={'generalized', 'nonsquare'}, + exclude={'size-0'}) + + @pytest.mark.slow + def test_generalized_empty_nonsq_cases(self): + self.check_cases(require={'generalized', 'nonsquare', 'size-0'}) + + +class HermitianGeneralizedTestCase(LinalgTestCase): + + @pytest.mark.slow + def test_generalized_herm_cases(self): + self.check_cases(require={'generalized', 'hermitian'}, + exclude={'size-0'}) + + @pytest.mark.slow + def test_generalized_empty_herm_cases(self): + self.check_cases(require={'generalized', 'hermitian', 'size-0'}, + exclude={'none'}) + + +def identity_like_generalized(a): + a = asarray(a) + if a.ndim >= 3: + r = np.empty(a.shape, dtype=a.dtype) + r[...] = identity(a.shape[-2]) + return r + else: + return identity(a.shape[0]) + + +class SolveCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + # kept apart from TestSolve for use for testing with matrices. + def do(self, a, b, tags): + x = linalg.solve(a, b) + if np.array(b).ndim == 1: + # When a is (..., M, M) and b is (M,), it is the same as when b is + # (M, 1), except the result has shape (..., M) + adotx = matmul(a, x[..., None])[..., 0] + assert_almost_equal(np.broadcast_to(b, adotx.shape), adotx) + else: + adotx = matmul(a, x) + assert_almost_equal(b, adotx) + assert_(consistent_subclass(x, b)) + + +class TestSolve(SolveCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(linalg.solve(x, x).dtype, dtype) + + def test_1_d(self): + class ArraySubclass(np.ndarray): + pass + a = np.arange(8).reshape(2, 2, 2) + b = np.arange(2).view(ArraySubclass) + result = linalg.solve(a, b) + assert result.shape == (2, 2) + + # If b is anything other than 1-D it should be treated as a stack of + # matrices + b = np.arange(4).reshape(2, 2).view(ArraySubclass) + result = linalg.solve(a, b) + assert result.shape == (2, 2, 2) + + b = np.arange(2).reshape(1, 2).view(ArraySubclass) + assert_raises(ValueError, linalg.solve, a, b) + + def test_0_size(self): + class ArraySubclass(np.ndarray): + pass + # Test system of 0x0 matrices + a = np.arange(8).reshape(2, 2, 2) + b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass) + + expected = linalg.solve(a, b)[:, 0:0, :] + result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, :]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + # Test errors for non-square and only b's dimension being 0 + assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b) + assert_raises(ValueError, linalg.solve, a, b[:, 0:0, :]) + + # Test broadcasting error + b = np.arange(6).reshape(1, 3, 2) # broadcasting error + assert_raises(ValueError, linalg.solve, a, b) + assert_raises(ValueError, linalg.solve, a[0:0], b[0:0]) + + # Test zero "single equations" with 0x0 matrices. + b = np.arange(2).view(ArraySubclass) + expected = linalg.solve(a, b)[:, 0:0] + result = linalg.solve(a[:, 0:0, 0:0], b[0:0]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + b = np.arange(3).reshape(1, 3) + assert_raises(ValueError, linalg.solve, a, b) + assert_raises(ValueError, linalg.solve, a[0:0], b[0:0]) + assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b) + + def test_0_size_k(self): + # test zero multiple equation (K=0) case. + class ArraySubclass(np.ndarray): + pass + a = np.arange(4).reshape(1, 2, 2) + b = np.arange(6).reshape(3, 2, 1).view(ArraySubclass) + + expected = linalg.solve(a, b)[:, :, 0:0] + result = linalg.solve(a, b[:, :, 0:0]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + # test both zero. + expected = linalg.solve(a, b)[:, 0:0, 0:0] + result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, 0:0]) + assert_array_equal(result, expected) + assert_(isinstance(result, ArraySubclass)) + + +class InvCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + a_inv = linalg.inv(a) + assert_almost_equal(matmul(a, a_inv), + identity_like_generalized(a)) + assert_(consistent_subclass(a_inv, a)) + + +class TestInv(InvCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(linalg.inv(x).dtype, dtype) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.inv(a) + assert_(res.dtype.type is np.float64) + assert_equal(a.shape, res.shape) + assert_(isinstance(res, ArraySubclass)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.inv(a) + assert_(res.dtype.type is np.complex64) + assert_equal(a.shape, res.shape) + assert_(isinstance(res, ArraySubclass)) + + +class EigvalsCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + ev = linalg.eigvals(a) + evalues, evectors = linalg.eig(a) + assert_almost_equal(ev, evalues) + + +class TestEigvals(EigvalsCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(linalg.eigvals(x).dtype, dtype) + x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) + assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype)) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.eigvals(a) + assert_(res.dtype.type is np.float64) + assert_equal((0, 1), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(res, np.ndarray)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.eigvals(a) + assert_(res.dtype.type is np.complex64) + assert_equal((0,), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(res, np.ndarray)) + + +class EigCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + res = linalg.eig(a) + eigenvalues, eigenvectors = res.eigenvalues, res.eigenvectors + assert_allclose(matmul(a, eigenvectors), + np.asarray(eigenvectors) * np.asarray(eigenvalues)[..., None, :], + rtol=get_rtol(eigenvalues.dtype)) + assert_(consistent_subclass(eigenvectors, a)) + + +class TestEig(EigCases): + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + w, v = np.linalg.eig(x) + assert_equal(w.dtype, dtype) + assert_equal(v.dtype, dtype) + + x = np.array([[1, 0.5], [-1, 1]], dtype=dtype) + w, v = np.linalg.eig(x) + assert_equal(w.dtype, get_complex_dtype(dtype)) + assert_equal(v.dtype, get_complex_dtype(dtype)) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res, res_v = linalg.eig(a) + assert_(res_v.dtype.type is np.float64) + assert_(res.dtype.type is np.float64) + assert_equal(a.shape, res_v.shape) + assert_equal((0, 1), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(a, np.ndarray)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res, res_v = linalg.eig(a) + assert_(res_v.dtype.type is np.complex64) + assert_(res.dtype.type is np.complex64) + assert_equal(a.shape, res_v.shape) + assert_equal((0,), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(a, np.ndarray)) + + +class SVDBaseTests: + hermitian = False + + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + res = linalg.svd(x) + U, S, Vh = res.U, res.S, res.Vh + assert_equal(U.dtype, dtype) + assert_equal(S.dtype, get_real_dtype(dtype)) + assert_equal(Vh.dtype, dtype) + s = linalg.svd(x, compute_uv=False, hermitian=self.hermitian) + assert_equal(s.dtype, get_real_dtype(dtype)) + + +class SVDCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + u, s, vt = linalg.svd(a, False) + assert_allclose(a, matmul(np.asarray(u) * np.asarray(s)[..., None, :], + np.asarray(vt)), + rtol=get_rtol(u.dtype)) + assert_(consistent_subclass(u, a)) + assert_(consistent_subclass(vt, a)) + + +class TestSVD(SVDCases, SVDBaseTests): + def test_empty_identity(self): + """ Empty input should put an identity matrix in u or vh """ + x = np.empty((4, 0)) + u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian) + assert_equal(u.shape, (4, 4)) + assert_equal(vh.shape, (0, 0)) + assert_equal(u, np.eye(4)) + + x = np.empty((0, 4)) + u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian) + assert_equal(u.shape, (0, 0)) + assert_equal(vh.shape, (4, 4)) + assert_equal(vh, np.eye(4)) + + def test_svdvals(self): + x = np.array([[1, 0.5], [0.5, 1]]) + s_from_svd = linalg.svd(x, compute_uv=False, hermitian=self.hermitian) + s_from_svdvals = linalg.svdvals(x) + assert_almost_equal(s_from_svd, s_from_svdvals) + + +class SVDHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase): + + def do(self, a, b, tags): + u, s, vt = linalg.svd(a, False, hermitian=True) + assert_allclose(a, matmul(np.asarray(u) * np.asarray(s)[..., None, :], + np.asarray(vt)), + rtol=get_rtol(u.dtype)) + + def hermitian(mat): + axes = list(range(mat.ndim)) + axes[-1], axes[-2] = axes[-2], axes[-1] + return np.conj(np.transpose(mat, axes=axes)) + + assert_almost_equal(np.matmul(u, hermitian(u)), np.broadcast_to(np.eye(u.shape[-1]), u.shape)) + assert_almost_equal(np.matmul(vt, hermitian(vt)), np.broadcast_to(np.eye(vt.shape[-1]), vt.shape)) + assert_equal(np.sort(s)[..., ::-1], s) + assert_(consistent_subclass(u, a)) + assert_(consistent_subclass(vt, a)) + + +class TestSVDHermitian(SVDHermitianCases, SVDBaseTests): + hermitian = True + + +class CondCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + # cond(x, p) for p in (None, 2, -2) + + def do(self, a, b, tags): + c = asarray(a) # a might be a matrix + if 'size-0' in tags: + assert_raises(LinAlgError, linalg.cond, c) + return + + # +-2 norms + s = linalg.svd(c, compute_uv=False) + assert_almost_equal( + linalg.cond(a), s[..., 0] / s[..., -1], + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, 2), s[..., 0] / s[..., -1], + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, -2), s[..., -1] / s[..., 0], + single_decimal=5, double_decimal=11) + + # Other norms + cinv = np.linalg.inv(c) + assert_almost_equal( + linalg.cond(a, 1), + abs(c).sum(-2).max(-1) * abs(cinv).sum(-2).max(-1), + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, -1), + abs(c).sum(-2).min(-1) * abs(cinv).sum(-2).min(-1), + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, np.inf), + abs(c).sum(-1).max(-1) * abs(cinv).sum(-1).max(-1), + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, -np.inf), + abs(c).sum(-1).min(-1) * abs(cinv).sum(-1).min(-1), + single_decimal=5, double_decimal=11) + assert_almost_equal( + linalg.cond(a, 'fro'), + np.sqrt((abs(c)**2).sum(-1).sum(-1) + * (abs(cinv)**2).sum(-1).sum(-1)), + single_decimal=5, double_decimal=11) + + +class TestCond(CondCases): + @pytest.mark.parametrize('is_complex', [False, True]) + def test_basic_nonsvd(self, is_complex): + # Smoketest the non-svd norms + A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]]) + if is_complex: + # Since A is linearly scaled, the condition number should not change + A = A * (1 + 1j) + assert_almost_equal(linalg.cond(A, inf), 4) + assert_almost_equal(linalg.cond(A, -inf), 2 / 3) + assert_almost_equal(linalg.cond(A, 1), 4) + assert_almost_equal(linalg.cond(A, -1), 0.5) + assert_almost_equal(linalg.cond(A, 'fro'), np.sqrt(265 / 12)) + + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + @pytest.mark.parametrize('norm_ord', [1, -1, 2, -2, 'fro', np.inf, -np.inf]) + def test_cond_dtypes(self, dtype, norm_ord): + # Check that the condition number is computed in the same dtype + # as the input matrix + A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]], dtype=dtype) + out_type = get_real_dtype(dtype) + assert_equal(linalg.cond(A, p=norm_ord).dtype, out_type) + + def test_singular(self): + # Singular matrices have infinite condition number for + # positive norms, and negative norms shouldn't raise + # exceptions + As = [np.zeros((2, 2)), np.ones((2, 2))] + p_pos = [None, 1, 2, 'fro'] + p_neg = [-1, -2] + for A, p in itertools.product(As, p_pos): + # Inversion may not hit exact infinity, so just check the + # number is large + assert_(linalg.cond(A, p) > 1e15) + for A, p in itertools.product(As, p_neg): + linalg.cond(A, p) + + @pytest.mark.xfail(True, run=False, + reason="Platform/LAPACK-dependent failure, " + "see gh-18914") + def test_nan(self): + # nans should be passed through, not converted to infs + ps = [None, 1, -1, 2, -2, 'fro'] + p_pos = [None, 1, 2, 'fro'] + + A = np.ones((2, 2)) + A[0, 1] = np.nan + for p in ps: + c = linalg.cond(A, p) + assert_(isinstance(c, np.float64)) + assert_(np.isnan(c)) + + A = np.ones((3, 2, 2)) + A[1, 0, 1] = np.nan + for p in ps: + c = linalg.cond(A, p) + assert_(np.isnan(c[1])) + if p in p_pos: + assert_(c[0] > 1e15) + assert_(c[2] > 1e15) + else: + assert_(not np.isnan(c[0])) + assert_(not np.isnan(c[2])) + + def test_stacked_singular(self): + # Check behavior when only some of the stacked matrices are + # singular + np.random.seed(1234) + A = np.random.rand(2, 2, 2, 2) + A[0, 0] = 0 + A[1, 1] = 0 + + for p in (None, 1, 2, 'fro', -1, -2): + c = linalg.cond(A, p) + assert_equal(c[0, 0], np.inf) + assert_equal(c[1, 1], np.inf) + assert_(np.isfinite(c[0, 1])) + assert_(np.isfinite(c[1, 0])) + + +class PinvCases(LinalgSquareTestCase, + LinalgNonsquareTestCase, + LinalgGeneralizedSquareTestCase, + LinalgGeneralizedNonsquareTestCase): + + def do(self, a, b, tags): + a_ginv = linalg.pinv(a) + # `a @ a_ginv == I` does not hold if a is singular + dot = matmul + assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11) + assert_(consistent_subclass(a_ginv, a)) + + +class TestPinv(PinvCases): + pass + + +class PinvHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase): + + def do(self, a, b, tags): + a_ginv = linalg.pinv(a, hermitian=True) + # `a @ a_ginv == I` does not hold if a is singular + dot = matmul + assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11) + assert_(consistent_subclass(a_ginv, a)) + + +class TestPinvHermitian(PinvHermitianCases): + pass + + +def test_pinv_rtol_arg(): + a = np.array([[1, 2, 3], [4, 1, 1], [2, 3, 1]]) + + assert_almost_equal( + np.linalg.pinv(a, rcond=0.5), + np.linalg.pinv(a, rtol=0.5), + ) + + with pytest.raises( + ValueError, match=r"`rtol` and `rcond` can't be both set." + ): + np.linalg.pinv(a, rcond=0.5, rtol=0.5) + + +class DetCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase): + + def do(self, a, b, tags): + d = linalg.det(a) + res = linalg.slogdet(a) + s, ld = res.sign, res.logabsdet + if asarray(a).dtype.type in (single, double): + ad = asarray(a).astype(double) + else: + ad = asarray(a).astype(cdouble) + ev = linalg.eigvals(ad) + assert_almost_equal(d, multiply.reduce(ev, axis=-1)) + assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1)) + + s = np.atleast_1d(s) + ld = np.atleast_1d(ld) + m = (s != 0) + assert_almost_equal(np.abs(s[m]), 1) + assert_equal(ld[~m], -inf) + + +class TestDet(DetCases): + def test_zero(self): + assert_equal(linalg.det([[0.0]]), 0.0) + assert_equal(type(linalg.det([[0.0]])), double) + assert_equal(linalg.det([[0.0j]]), 0.0) + assert_equal(type(linalg.det([[0.0j]])), cdouble) + + assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf)) + assert_equal(type(linalg.slogdet([[0.0]])[0]), double) + assert_equal(type(linalg.slogdet([[0.0]])[1]), double) + assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf)) + assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble) + assert_equal(type(linalg.slogdet([[0.0j]])[1]), double) + + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + assert_equal(np.linalg.det(x).dtype, dtype) + ph, s = np.linalg.slogdet(x) + assert_equal(s.dtype, get_real_dtype(dtype)) + assert_equal(ph.dtype, dtype) + + def test_0_size(self): + a = np.zeros((0, 0), dtype=np.complex64) + res = linalg.det(a) + assert_equal(res, 1.) + assert_(res.dtype.type is np.complex64) + res = linalg.slogdet(a) + assert_equal(res, (1, 0)) + assert_(res[0].dtype.type is np.complex64) + assert_(res[1].dtype.type is np.float32) + + a = np.zeros((0, 0), dtype=np.float64) + res = linalg.det(a) + assert_equal(res, 1.) + assert_(res.dtype.type is np.float64) + res = linalg.slogdet(a) + assert_equal(res, (1, 0)) + assert_(res[0].dtype.type is np.float64) + assert_(res[1].dtype.type is np.float64) + + +class LstsqCases(LinalgSquareTestCase, LinalgNonsquareTestCase): + + def do(self, a, b, tags): + arr = np.asarray(a) + m, n = arr.shape + u, s, vt = linalg.svd(a, False) + x, residuals, rank, sv = linalg.lstsq(a, b, rcond=-1) + if m == 0: + assert_((x == 0).all()) + if m <= n: + assert_almost_equal(b, dot(a, x)) + assert_equal(rank, m) + else: + assert_equal(rank, n) + assert_almost_equal(sv, sv.__array_wrap__(s)) + if rank == n and m > n: + expect_resids = ( + np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0) + expect_resids = np.asarray(expect_resids) + if np.asarray(b).ndim == 1: + expect_resids.shape = (1,) + assert_equal(residuals.shape, expect_resids.shape) + else: + expect_resids = np.array([]).view(type(x)) + assert_almost_equal(residuals, expect_resids) + assert_(np.issubdtype(residuals.dtype, np.floating)) + assert_(consistent_subclass(x, b)) + assert_(consistent_subclass(residuals, b)) + + +class TestLstsq(LstsqCases): + def test_rcond(self): + a = np.array([[0., 1., 0., 1., 2., 0.], + [0., 2., 0., 0., 1., 0.], + [1., 0., 1., 0., 0., 4.], + [0., 0., 0., 2., 3., 0.]]).T + + b = np.array([1, 0, 0, 0, 0, 0]) + + x, residuals, rank, s = linalg.lstsq(a, b, rcond=-1) + assert_(rank == 4) + x, residuals, rank, s = linalg.lstsq(a, b) + assert_(rank == 3) + x, residuals, rank, s = linalg.lstsq(a, b, rcond=None) + assert_(rank == 3) + + @pytest.mark.parametrize(["m", "n", "n_rhs"], [ + (4, 2, 2), + (0, 4, 1), + (0, 4, 2), + (4, 0, 1), + (4, 0, 2), + (4, 2, 0), + (0, 0, 0) + ]) + def test_empty_a_b(self, m, n, n_rhs): + a = np.arange(m * n).reshape(m, n) + b = np.ones((m, n_rhs)) + x, residuals, rank, s = linalg.lstsq(a, b, rcond=None) + if m == 0: + assert_((x == 0).all()) + assert_equal(x.shape, (n, n_rhs)) + assert_equal(residuals.shape, ((n_rhs,) if m > n else (0,))) + if m > n and n_rhs > 0: + # residuals are exactly the squared norms of b's columns + r = b - np.dot(a, x) + assert_almost_equal(residuals, (r * r).sum(axis=-2)) + assert_equal(rank, min(m, n)) + assert_equal(s.shape, (min(m, n),)) + + def test_incompatible_dims(self): + # use modified version of docstring example + x = np.array([0, 1, 2, 3]) + y = np.array([-1, 0.2, 0.9, 2.1, 3.3]) + A = np.vstack([x, np.ones(len(x))]).T + with assert_raises_regex(LinAlgError, "Incompatible dimensions"): + linalg.lstsq(A, y, rcond=None) + + +@pytest.mark.parametrize('dt', [np.dtype(c) for c in '?bBhHiIqQefdgFDGO']) +class TestMatrixPower: + + rshft_0 = np.eye(4) + rshft_1 = rshft_0[[3, 0, 1, 2]] + rshft_2 = rshft_0[[2, 3, 0, 1]] + rshft_3 = rshft_0[[1, 2, 3, 0]] + rshft_all = [rshft_0, rshft_1, rshft_2, rshft_3] + noninv = array([[1, 0], [0, 0]]) + stacked = np.block([[[rshft_0]]] * 2) + # FIXME the 'e' dtype might work in future + dtnoinv = [object, np.dtype('e'), np.dtype('g'), np.dtype('G')] + + def test_large_power(self, dt): + rshft = self.rshft_1.astype(dt) + assert_equal( + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 0), self.rshft_0) + assert_equal( + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 1), self.rshft_1) + assert_equal( + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 2), self.rshft_2) + assert_equal( + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 3), self.rshft_3) + + def test_power_is_zero(self, dt): + def tz(M): + mz = matrix_power(M, 0) + assert_equal(mz, identity_like_generalized(M)) + assert_equal(mz.dtype, M.dtype) + + for mat in self.rshft_all: + tz(mat.astype(dt)) + if dt != object: + tz(self.stacked.astype(dt)) + + def test_power_is_one(self, dt): + def tz(mat): + mz = matrix_power(mat, 1) + assert_equal(mz, mat) + assert_equal(mz.dtype, mat.dtype) + + for mat in self.rshft_all: + tz(mat.astype(dt)) + if dt != object: + tz(self.stacked.astype(dt)) + + def test_power_is_two(self, dt): + def tz(mat): + mz = matrix_power(mat, 2) + mmul = matmul if mat.dtype != object else dot + assert_equal(mz, mmul(mat, mat)) + assert_equal(mz.dtype, mat.dtype) + + for mat in self.rshft_all: + tz(mat.astype(dt)) + if dt != object: + tz(self.stacked.astype(dt)) + + def test_power_is_minus_one(self, dt): + def tz(mat): + invmat = matrix_power(mat, -1) + mmul = matmul if mat.dtype != object else dot + assert_almost_equal( + mmul(invmat, mat), identity_like_generalized(mat)) + + for mat in self.rshft_all: + if dt not in self.dtnoinv: + tz(mat.astype(dt)) + + def test_exceptions_bad_power(self, dt): + mat = self.rshft_0.astype(dt) + assert_raises(TypeError, matrix_power, mat, 1.5) + assert_raises(TypeError, matrix_power, mat, [1]) + + def test_exceptions_non_square(self, dt): + assert_raises(LinAlgError, matrix_power, np.array([1], dt), 1) + assert_raises(LinAlgError, matrix_power, np.array([[1], [2]], dt), 1) + assert_raises(LinAlgError, matrix_power, np.ones((4, 3, 2), dt), 1) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_exceptions_not_invertible(self, dt): + if dt in self.dtnoinv: + return + mat = self.noninv.astype(dt) + assert_raises(LinAlgError, matrix_power, mat, -1) + + +class TestEigvalshCases(HermitianTestCase, HermitianGeneralizedTestCase): + + def do(self, a, b, tags): + # note that eigenvalue arrays returned by eig must be sorted since + # their order isn't guaranteed. + ev = linalg.eigvalsh(a, 'L') + evalues, evectors = linalg.eig(a) + evalues.sort(axis=-1) + assert_allclose(ev, evalues, rtol=get_rtol(ev.dtype)) + + ev2 = linalg.eigvalsh(a, 'U') + assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype)) + + +class TestEigvalsh: + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + w = np.linalg.eigvalsh(x) + assert_equal(w.dtype, get_real_dtype(dtype)) + + def test_invalid(self): + x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32) + assert_raises(ValueError, np.linalg.eigvalsh, x, UPLO="lrong") + assert_raises(ValueError, np.linalg.eigvalsh, x, "lower") + assert_raises(ValueError, np.linalg.eigvalsh, x, "upper") + + def test_UPLO(self): + Klo = np.array([[0, 0], [1, 0]], dtype=np.double) + Kup = np.array([[0, 1], [0, 0]], dtype=np.double) + tgt = np.array([-1, 1], dtype=np.double) + rtol = get_rtol(np.double) + + # Check default is 'L' + w = np.linalg.eigvalsh(Klo) + assert_allclose(w, tgt, rtol=rtol) + # Check 'L' + w = np.linalg.eigvalsh(Klo, UPLO='L') + assert_allclose(w, tgt, rtol=rtol) + # Check 'l' + w = np.linalg.eigvalsh(Klo, UPLO='l') + assert_allclose(w, tgt, rtol=rtol) + # Check 'U' + w = np.linalg.eigvalsh(Kup, UPLO='U') + assert_allclose(w, tgt, rtol=rtol) + # Check 'u' + w = np.linalg.eigvalsh(Kup, UPLO='u') + assert_allclose(w, tgt, rtol=rtol) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.eigvalsh(a) + assert_(res.dtype.type is np.float64) + assert_equal((0, 1), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(res, np.ndarray)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.eigvalsh(a) + assert_(res.dtype.type is np.float32) + assert_equal((0,), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(res, np.ndarray)) + + +class TestEighCases(HermitianTestCase, HermitianGeneralizedTestCase): + + def do(self, a, b, tags): + # note that eigenvalue arrays returned by eig must be sorted since + # their order isn't guaranteed. + res = linalg.eigh(a) + ev, evc = res.eigenvalues, res.eigenvectors + evalues, evectors = linalg.eig(a) + evalues.sort(axis=-1) + assert_almost_equal(ev, evalues) + + assert_allclose(matmul(a, evc), + np.asarray(ev)[..., None, :] * np.asarray(evc), + rtol=get_rtol(ev.dtype)) + + ev2, evc2 = linalg.eigh(a, 'U') + assert_almost_equal(ev2, evalues) + + assert_allclose(matmul(a, evc2), + np.asarray(ev2)[..., None, :] * np.asarray(evc2), + rtol=get_rtol(ev.dtype), err_msg=repr(a)) + + +class TestEigh: + @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble]) + def test_types(self, dtype): + x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype) + w, v = np.linalg.eigh(x) + assert_equal(w.dtype, get_real_dtype(dtype)) + assert_equal(v.dtype, dtype) + + def test_invalid(self): + x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32) + assert_raises(ValueError, np.linalg.eigh, x, UPLO="lrong") + assert_raises(ValueError, np.linalg.eigh, x, "lower") + assert_raises(ValueError, np.linalg.eigh, x, "upper") + + def test_UPLO(self): + Klo = np.array([[0, 0], [1, 0]], dtype=np.double) + Kup = np.array([[0, 1], [0, 0]], dtype=np.double) + tgt = np.array([-1, 1], dtype=np.double) + rtol = get_rtol(np.double) + + # Check default is 'L' + w, v = np.linalg.eigh(Klo) + assert_allclose(w, tgt, rtol=rtol) + # Check 'L' + w, v = np.linalg.eigh(Klo, UPLO='L') + assert_allclose(w, tgt, rtol=rtol) + # Check 'l' + w, v = np.linalg.eigh(Klo, UPLO='l') + assert_allclose(w, tgt, rtol=rtol) + # Check 'U' + w, v = np.linalg.eigh(Kup, UPLO='U') + assert_allclose(w, tgt, rtol=rtol) + # Check 'u' + w, v = np.linalg.eigh(Kup, UPLO='u') + assert_allclose(w, tgt, rtol=rtol) + + def test_0_size(self): + # Check that all kinds of 0-sized arrays work + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res, res_v = linalg.eigh(a) + assert_(res_v.dtype.type is np.float64) + assert_(res.dtype.type is np.float64) + assert_equal(a.shape, res_v.shape) + assert_equal((0, 1), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(a, np.ndarray)) + + a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass) + res, res_v = linalg.eigh(a) + assert_(res_v.dtype.type is np.complex64) + assert_(res.dtype.type is np.float32) + assert_equal(a.shape, res_v.shape) + assert_equal((0,), res.shape) + # This is just for documentation, it might make sense to change: + assert_(isinstance(a, np.ndarray)) + + +class _TestNormBase: + dt = None + dec = None + + @staticmethod + def check_dtype(x, res): + if issubclass(x.dtype.type, np.inexact): + assert_equal(res.dtype, x.real.dtype) + else: + # For integer input, don't have to test float precision of output. + assert_(issubclass(res.dtype.type, np.floating)) + + +class _TestNormGeneral(_TestNormBase): + + def test_empty(self): + assert_equal(norm([]), 0.0) + assert_equal(norm(array([], dtype=self.dt)), 0.0) + assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0) + + def test_vector_return_type(self): + a = np.array([1, 0, 1]) + + exact_types = np.typecodes['AllInteger'] + inexact_types = np.typecodes['AllFloat'] + + all_types = exact_types + inexact_types + + for each_type in all_types: + at = a.astype(each_type) + + an = norm(at, -np.inf) + self.check_dtype(at, an) + assert_almost_equal(an, 0.0) + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "divide by zero encountered") + an = norm(at, -1) + self.check_dtype(at, an) + assert_almost_equal(an, 0.0) + + an = norm(at, 0) + self.check_dtype(at, an) + assert_almost_equal(an, 2) + + an = norm(at, 1) + self.check_dtype(at, an) + assert_almost_equal(an, 2.0) + + an = norm(at, 2) + self.check_dtype(at, an) + assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0 / 2.0)) + + an = norm(at, 4) + self.check_dtype(at, an) + assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0 / 4.0)) + + an = norm(at, np.inf) + self.check_dtype(at, an) + assert_almost_equal(an, 1.0) + + def test_vector(self): + a = [1, 2, 3, 4] + b = [-1, -2, -3, -4] + c = [-1, 2, -3, 4] + + def _test(v): + np.testing.assert_almost_equal(norm(v), 30 ** 0.5, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, inf), 4.0, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, -inf), 1.0, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, 1), 10.0, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, -1), 12.0 / 25, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, 2), 30 ** 0.5, + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, -2), ((205. / 144) ** -0.5), + decimal=self.dec) + np.testing.assert_almost_equal(norm(v, 0), 4, + decimal=self.dec) + + for v in (a, b, c,): + _test(v) + + for v in (array(a, dtype=self.dt), array(b, dtype=self.dt), + array(c, dtype=self.dt)): + _test(v) + + def test_axis(self): + # Vector norms. + # Compare the use of `axis` with computing the norm of each row + # or column separately. + A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) + for order in [None, -1, 0, 1, 2, 3, np.inf, -np.inf]: + expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])] + assert_almost_equal(norm(A, ord=order, axis=0), expected0) + expected1 = [norm(A[k, :], ord=order) for k in range(A.shape[0])] + assert_almost_equal(norm(A, ord=order, axis=1), expected1) + + # Matrix norms. + B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) + nd = B.ndim + for order in [None, -2, 2, -1, 1, np.inf, -np.inf, 'fro']: + for axis in itertools.combinations(range(-nd, nd), 2): + row_axis, col_axis = axis + if row_axis < 0: + row_axis += nd + if col_axis < 0: + col_axis += nd + if row_axis == col_axis: + assert_raises(ValueError, norm, B, ord=order, axis=axis) + else: + n = norm(B, ord=order, axis=axis) + + # The logic using k_index only works for nd = 3. + # This has to be changed if nd is increased. + k_index = nd - (row_axis + col_axis) + if row_axis < col_axis: + expected = [norm(B[:].take(k, axis=k_index), ord=order) + for k in range(B.shape[k_index])] + else: + expected = [norm(B[:].take(k, axis=k_index).T, ord=order) + for k in range(B.shape[k_index])] + assert_almost_equal(n, expected) + + def test_keepdims(self): + A = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) + + allclose_err = 'order {0}, axis = {1}' + shape_err = 'Shape mismatch found {0}, expected {1}, order={2}, axis={3}' + + # check the order=None, axis=None case + expected = norm(A, ord=None, axis=None) + found = norm(A, ord=None, axis=None, keepdims=True) + assert_allclose(np.squeeze(found), expected, + err_msg=allclose_err.format(None, None)) + expected_shape = (1, 1, 1) + assert_(found.shape == expected_shape, + shape_err.format(found.shape, expected_shape, None, None)) + + # Vector norms. + for order in [None, -1, 0, 1, 2, 3, np.inf, -np.inf]: + for k in range(A.ndim): + expected = norm(A, ord=order, axis=k) + found = norm(A, ord=order, axis=k, keepdims=True) + assert_allclose(np.squeeze(found), expected, + err_msg=allclose_err.format(order, k)) + expected_shape = list(A.shape) + expected_shape[k] = 1 + expected_shape = tuple(expected_shape) + assert_(found.shape == expected_shape, + shape_err.format(found.shape, expected_shape, order, k)) + + # Matrix norms. + for order in [None, -2, 2, -1, 1, np.inf, -np.inf, 'fro', 'nuc']: + for k in itertools.permutations(range(A.ndim), 2): + expected = norm(A, ord=order, axis=k) + found = norm(A, ord=order, axis=k, keepdims=True) + assert_allclose(np.squeeze(found), expected, + err_msg=allclose_err.format(order, k)) + expected_shape = list(A.shape) + expected_shape[k[0]] = 1 + expected_shape[k[1]] = 1 + expected_shape = tuple(expected_shape) + assert_(found.shape == expected_shape, + shape_err.format(found.shape, expected_shape, order, k)) + + +class _TestNorm2D(_TestNormBase): + # Define the part for 2d arrays separately, so we can subclass this + # and run the tests using np.matrix in matrixlib.tests.test_matrix_linalg. + array = np.array + + def test_matrix_empty(self): + assert_equal(norm(self.array([[]], dtype=self.dt)), 0.0) + + def test_matrix_return_type(self): + a = self.array([[1, 0, 1], [0, 1, 1]]) + + exact_types = np.typecodes['AllInteger'] + + # float32, complex64, float64, complex128 types are the only types + # allowed by `linalg`, which performs the matrix operations used + # within `norm`. + inexact_types = 'fdFD' + + all_types = exact_types + inexact_types + + for each_type in all_types: + at = a.astype(each_type) + + an = norm(at, -np.inf) + self.check_dtype(at, an) + assert_almost_equal(an, 2.0) + + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "divide by zero encountered") + an = norm(at, -1) + self.check_dtype(at, an) + assert_almost_equal(an, 1.0) + + an = norm(at, 1) + self.check_dtype(at, an) + assert_almost_equal(an, 2.0) + + an = norm(at, 2) + self.check_dtype(at, an) + assert_almost_equal(an, 3.0**(1.0 / 2.0)) + + an = norm(at, -2) + self.check_dtype(at, an) + assert_almost_equal(an, 1.0) + + an = norm(at, np.inf) + self.check_dtype(at, an) + assert_almost_equal(an, 2.0) + + an = norm(at, 'fro') + self.check_dtype(at, an) + assert_almost_equal(an, 2.0) + + an = norm(at, 'nuc') + self.check_dtype(at, an) + # Lower bar needed to support low precision floats. + # They end up being off by 1 in the 7th place. + np.testing.assert_almost_equal(an, 2.7320508075688772, decimal=6) + + def test_matrix_2x2(self): + A = self.array([[1, 3], [5, 7]], dtype=self.dt) + assert_almost_equal(norm(A), 84 ** 0.5) + assert_almost_equal(norm(A, 'fro'), 84 ** 0.5) + assert_almost_equal(norm(A, 'nuc'), 10.0) + assert_almost_equal(norm(A, inf), 12.0) + assert_almost_equal(norm(A, -inf), 4.0) + assert_almost_equal(norm(A, 1), 10.0) + assert_almost_equal(norm(A, -1), 6.0) + assert_almost_equal(norm(A, 2), 9.1231056256176615) + assert_almost_equal(norm(A, -2), 0.87689437438234041) + + assert_raises(ValueError, norm, A, 'nofro') + assert_raises(ValueError, norm, A, -3) + assert_raises(ValueError, norm, A, 0) + + def test_matrix_3x3(self): + # This test has been added because the 2x2 example + # happened to have equal nuclear norm and induced 1-norm. + # The 1/10 scaling factor accommodates the absolute tolerance + # used in assert_almost_equal. + A = (1 / 10) * \ + self.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt) + assert_almost_equal(norm(A), (1 / 10) * 89 ** 0.5) + assert_almost_equal(norm(A, 'fro'), (1 / 10) * 89 ** 0.5) + assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836) + assert_almost_equal(norm(A, inf), 1.1) + assert_almost_equal(norm(A, -inf), 0.6) + assert_almost_equal(norm(A, 1), 1.0) + assert_almost_equal(norm(A, -1), 0.4) + assert_almost_equal(norm(A, 2), 0.88722940323461277) + assert_almost_equal(norm(A, -2), 0.19456584790481812) + + def test_bad_args(self): + # Check that bad arguments raise the appropriate exceptions. + + A = self.array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) + B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) + + # Using `axis=` or passing in a 1-D array implies vector + # norms are being computed, so also using `ord='fro'` + # or `ord='nuc'` or any other string raises a ValueError. + assert_raises(ValueError, norm, A, 'fro', 0) + assert_raises(ValueError, norm, A, 'nuc', 0) + assert_raises(ValueError, norm, [3, 4], 'fro', None) + assert_raises(ValueError, norm, [3, 4], 'nuc', None) + assert_raises(ValueError, norm, [3, 4], 'test', None) + + # Similarly, norm should raise an exception when ord is any finite + # number other than 1, 2, -1 or -2 when computing matrix norms. + for order in [0, 3]: + assert_raises(ValueError, norm, A, order, None) + assert_raises(ValueError, norm, A, order, (0, 1)) + assert_raises(ValueError, norm, B, order, (1, 2)) + + # Invalid axis + assert_raises(AxisError, norm, B, None, 3) + assert_raises(AxisError, norm, B, None, (2, 3)) + assert_raises(ValueError, norm, B, None, (0, 1, 2)) + + +class _TestNorm(_TestNorm2D, _TestNormGeneral): + pass + + +class TestNorm_NonSystematic: + + def test_longdouble_norm(self): + # Non-regression test: p-norm of longdouble would previously raise + # UnboundLocalError. + x = np.arange(10, dtype=np.longdouble) + old_assert_almost_equal(norm(x, ord=3), 12.65, decimal=2) + + def test_intmin(self): + # Non-regression test: p-norm of signed integer would previously do + # float cast and abs in the wrong order. + x = np.array([-2 ** 31], dtype=np.int32) + old_assert_almost_equal(norm(x, ord=3), 2 ** 31, decimal=5) + + def test_complex_high_ord(self): + # gh-4156 + d = np.empty((2,), dtype=np.clongdouble) + d[0] = 6 + 7j + d[1] = -6 + 7j + res = 11.615898132184 + old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=10) + d = d.astype(np.complex128) + old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=9) + d = d.astype(np.complex64) + old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=5) + + +# Separate definitions so we can use them for matrix tests. +class _TestNormDoubleBase(_TestNormBase): + dt = np.double + dec = 12 + + +class _TestNormSingleBase(_TestNormBase): + dt = np.float32 + dec = 6 + + +class _TestNormInt64Base(_TestNormBase): + dt = np.int64 + dec = 12 + + +class TestNormDouble(_TestNorm, _TestNormDoubleBase): + pass + + +class TestNormSingle(_TestNorm, _TestNormSingleBase): + pass + + +class TestNormInt64(_TestNorm, _TestNormInt64Base): + pass + + +class TestMatrixRank: + + def test_matrix_rank(self): + # Full rank matrix + assert_equal(4, matrix_rank(np.eye(4))) + # rank deficient matrix + I = np.eye(4) + I[-1, -1] = 0. + assert_equal(matrix_rank(I), 3) + # All zeros - zero rank + assert_equal(matrix_rank(np.zeros((4, 4))), 0) + # 1 dimension - rank 1 unless all 0 + assert_equal(matrix_rank([1, 0, 0, 0]), 1) + assert_equal(matrix_rank(np.zeros((4,))), 0) + # accepts array-like + assert_equal(matrix_rank([1]), 1) + # greater than 2 dimensions treated as stacked matrices + ms = np.array([I, np.eye(4), np.zeros((4, 4))]) + assert_equal(matrix_rank(ms), np.array([3, 4, 0])) + # works on scalar + assert_equal(matrix_rank(1), 1) + + with assert_raises_regex( + ValueError, "`tol` and `rtol` can\'t be both set." + ): + matrix_rank(I, tol=0.01, rtol=0.01) + + def test_symmetric_rank(self): + assert_equal(4, matrix_rank(np.eye(4), hermitian=True)) + assert_equal(1, matrix_rank(np.ones((4, 4)), hermitian=True)) + assert_equal(0, matrix_rank(np.zeros((4, 4)), hermitian=True)) + # rank deficient matrix + I = np.eye(4) + I[-1, -1] = 0. + assert_equal(3, matrix_rank(I, hermitian=True)) + # manually supplied tolerance + I[-1, -1] = 1e-8 + assert_equal(4, matrix_rank(I, hermitian=True, tol=0.99e-8)) + assert_equal(3, matrix_rank(I, hermitian=True, tol=1.01e-8)) + + +def test_reduced_rank(): + # Test matrices with reduced rank + rng = np.random.RandomState(20120714) + for i in range(100): + # Make a rank deficient matrix + X = rng.normal(size=(40, 10)) + X[:, 0] = X[:, 1] + X[:, 2] + # Assert that matrix_rank detected deficiency + assert_equal(matrix_rank(X), 9) + X[:, 3] = X[:, 4] + X[:, 5] + assert_equal(matrix_rank(X), 8) + + +class TestQR: + # Define the array class here, so run this on matrices elsewhere. + array = np.array + + def check_qr(self, a): + # This test expects the argument `a` to be an ndarray or + # a subclass of an ndarray of inexact type. + a_type = type(a) + a_dtype = a.dtype + m, n = a.shape + k = min(m, n) + + # mode == 'complete' + res = linalg.qr(a, mode='complete') + Q, R = res.Q, res.R + assert_(Q.dtype == a_dtype) + assert_(R.dtype == a_dtype) + assert_(isinstance(Q, a_type)) + assert_(isinstance(R, a_type)) + assert_(Q.shape == (m, m)) + assert_(R.shape == (m, n)) + assert_almost_equal(dot(Q, R), a) + assert_almost_equal(dot(Q.T.conj(), Q), np.eye(m)) + assert_almost_equal(np.triu(R), R) + + # mode == 'reduced' + q1, r1 = linalg.qr(a, mode='reduced') + assert_(q1.dtype == a_dtype) + assert_(r1.dtype == a_dtype) + assert_(isinstance(q1, a_type)) + assert_(isinstance(r1, a_type)) + assert_(q1.shape == (m, k)) + assert_(r1.shape == (k, n)) + assert_almost_equal(dot(q1, r1), a) + assert_almost_equal(dot(q1.T.conj(), q1), np.eye(k)) + assert_almost_equal(np.triu(r1), r1) + + # mode == 'r' + r2 = linalg.qr(a, mode='r') + assert_(r2.dtype == a_dtype) + assert_(isinstance(r2, a_type)) + assert_almost_equal(r2, r1) + + @pytest.mark.parametrize(["m", "n"], [ + (3, 0), + (0, 3), + (0, 0) + ]) + def test_qr_empty(self, m, n): + k = min(m, n) + a = np.empty((m, n)) + + self.check_qr(a) + + h, tau = np.linalg.qr(a, mode='raw') + assert_equal(h.dtype, np.double) + assert_equal(tau.dtype, np.double) + assert_equal(h.shape, (n, m)) + assert_equal(tau.shape, (k,)) + + def test_mode_raw(self): + # The factorization is not unique and varies between libraries, + # so it is not possible to check against known values. Functional + # testing is a possibility, but awaits the exposure of more + # of the functions in lapack_lite. Consequently, this test is + # very limited in scope. Note that the results are in FORTRAN + # order, hence the h arrays are transposed. + a = self.array([[1, 2], [3, 4], [5, 6]], dtype=np.double) + + # Test double + h, tau = linalg.qr(a, mode='raw') + assert_(h.dtype == np.double) + assert_(tau.dtype == np.double) + assert_(h.shape == (2, 3)) + assert_(tau.shape == (2,)) + + h, tau = linalg.qr(a.T, mode='raw') + assert_(h.dtype == np.double) + assert_(tau.dtype == np.double) + assert_(h.shape == (3, 2)) + assert_(tau.shape == (2,)) + + def test_mode_all_but_economic(self): + a = self.array([[1, 2], [3, 4]]) + b = self.array([[1, 2], [3, 4], [5, 6]]) + for dt in "fd": + m1 = a.astype(dt) + m2 = b.astype(dt) + self.check_qr(m1) + self.check_qr(m2) + self.check_qr(m2.T) + + for dt in "fd": + m1 = 1 + 1j * a.astype(dt) + m2 = 1 + 1j * b.astype(dt) + self.check_qr(m1) + self.check_qr(m2) + self.check_qr(m2.T) + + def check_qr_stacked(self, a): + # This test expects the argument `a` to be an ndarray or + # a subclass of an ndarray of inexact type. + a_type = type(a) + a_dtype = a.dtype + m, n = a.shape[-2:] + k = min(m, n) + + # mode == 'complete' + q, r = linalg.qr(a, mode='complete') + assert_(q.dtype == a_dtype) + assert_(r.dtype == a_dtype) + assert_(isinstance(q, a_type)) + assert_(isinstance(r, a_type)) + assert_(q.shape[-2:] == (m, m)) + assert_(r.shape[-2:] == (m, n)) + assert_almost_equal(matmul(q, r), a) + I_mat = np.identity(q.shape[-1]) + stack_I_mat = np.broadcast_to(I_mat, + q.shape[:-2] + (q.shape[-1],) * 2) + assert_almost_equal(matmul(swapaxes(q, -1, -2).conj(), q), stack_I_mat) + assert_almost_equal(np.triu(r[..., :, :]), r) + + # mode == 'reduced' + q1, r1 = linalg.qr(a, mode='reduced') + assert_(q1.dtype == a_dtype) + assert_(r1.dtype == a_dtype) + assert_(isinstance(q1, a_type)) + assert_(isinstance(r1, a_type)) + assert_(q1.shape[-2:] == (m, k)) + assert_(r1.shape[-2:] == (k, n)) + assert_almost_equal(matmul(q1, r1), a) + I_mat = np.identity(q1.shape[-1]) + stack_I_mat = np.broadcast_to(I_mat, + q1.shape[:-2] + (q1.shape[-1],) * 2) + assert_almost_equal(matmul(swapaxes(q1, -1, -2).conj(), q1), + stack_I_mat) + assert_almost_equal(np.triu(r1[..., :, :]), r1) + + # mode == 'r' + r2 = linalg.qr(a, mode='r') + assert_(r2.dtype == a_dtype) + assert_(isinstance(r2, a_type)) + assert_almost_equal(r2, r1) + + @pytest.mark.parametrize("size", [ + (3, 4), (4, 3), (4, 4), + (3, 0), (0, 3)]) + @pytest.mark.parametrize("outer_size", [ + (2, 2), (2,), (2, 3, 4)]) + @pytest.mark.parametrize("dt", [ + np.single, np.double, + np.csingle, np.cdouble]) + def test_stacked_inputs(self, outer_size, size, dt): + + rng = np.random.default_rng(123) + A = rng.normal(size=outer_size + size).astype(dt) + B = rng.normal(size=outer_size + size).astype(dt) + self.check_qr_stacked(A) + self.check_qr_stacked(A + 1.j * B) + + +class TestCholesky: + + @pytest.mark.parametrize( + 'shape', [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)] + ) + @pytest.mark.parametrize( + 'dtype', (np.float32, np.float64, np.complex64, np.complex128) + ) + @pytest.mark.parametrize( + 'upper', [False, True]) + def test_basic_property(self, shape, dtype, upper): + np.random.seed(1) + a = np.random.randn(*shape) + if np.issubdtype(dtype, np.complexfloating): + a = a + 1j * np.random.randn(*shape) + + t = list(range(len(shape))) + t[-2:] = -1, -2 + + a = np.matmul(a.transpose(t).conj(), a) + a = np.asarray(a, dtype=dtype) + + c = np.linalg.cholesky(a, upper=upper) + + # Check A = L L^H or A = U^H U + if upper: + b = np.matmul(c.transpose(t).conj(), c) + else: + b = np.matmul(c, c.transpose(t).conj()) + + atol = 500 * a.shape[0] * np.finfo(dtype).eps + assert_allclose(b, a, atol=atol, err_msg=f'{shape} {dtype}\n{a}\n{c}') + + # Check diag(L or U) is real and positive + d = np.diagonal(c, axis1=-2, axis2=-1) + assert_(np.all(np.isreal(d))) + assert_(np.all(d >= 0)) + + def test_0_size(self): + class ArraySubclass(np.ndarray): + pass + a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass) + res = linalg.cholesky(a) + assert_equal(a.shape, res.shape) + assert_(res.dtype.type is np.float64) + # for documentation purpose: + assert_(isinstance(res, np.ndarray)) + + a = np.zeros((1, 0, 0), dtype=np.complex64).view(ArraySubclass) + res = linalg.cholesky(a) + assert_equal(a.shape, res.shape) + assert_(res.dtype.type is np.complex64) + assert_(isinstance(res, np.ndarray)) + + def test_upper_lower_arg(self): + # Explicit test of upper argument that also checks the default. + a = np.array([[1 + 0j, 0 - 2j], [0 + 2j, 5 + 0j]]) + + assert_equal(linalg.cholesky(a), linalg.cholesky(a, upper=False)) + + assert_equal( + linalg.cholesky(a, upper=True), + linalg.cholesky(a).T.conj() + ) + + +class TestOuter: + arr1 = np.arange(3) + arr2 = np.arange(3) + expected = np.array( + [[0, 0, 0], + [0, 1, 2], + [0, 2, 4]] + ) + + assert_array_equal(np.linalg.outer(arr1, arr2), expected) + + with assert_raises_regex( + ValueError, "Input arrays must be one-dimensional" + ): + np.linalg.outer(arr1[:, np.newaxis], arr2) + + +def test_byteorder_check(): + # Byte order check should pass for native order + if sys.byteorder == 'little': + native = '<' + else: + native = '>' + + for dtt in (np.float32, np.float64): + arr = np.eye(4, dtype=dtt) + n_arr = arr.view(arr.dtype.newbyteorder(native)) + sw_arr = arr.view(arr.dtype.newbyteorder("S")).byteswap() + assert_equal(arr.dtype.byteorder, '=') + for routine in (linalg.inv, linalg.det, linalg.pinv): + # Normal call + res = routine(arr) + # Native but not '=' + assert_array_equal(res, routine(n_arr)) + # Swapped + assert_array_equal(res, routine(sw_arr)) + + +@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") +def test_generalized_raise_multiloop(): + # It should raise an error even if the error doesn't occur in the + # last iteration of the ufunc inner loop + + invertible = np.array([[1, 2], [3, 4]]) + non_invertible = np.array([[1, 1], [1, 1]]) + + x = np.zeros([4, 4, 2, 2])[1::2] + x[...] = invertible + x[0, 0] = non_invertible + + assert_raises(np.linalg.LinAlgError, np.linalg.inv, x) + + +@pytest.mark.skipif( + threading.active_count() > 1, + reason="skipping test that uses fork because there are multiple threads") +@pytest.mark.skipif( + NOGIL_BUILD, + reason="Cannot safely use fork in tests on the free-threaded build") +def test_xerbla_override(): + # Check that our xerbla has been successfully linked in. If it is not, + # the default xerbla routine is called, which prints a message to stdout + # and may, or may not, abort the process depending on the LAPACK package. + + XERBLA_OK = 255 + + try: + pid = os.fork() + except (OSError, AttributeError): + # fork failed, or not running on POSIX + pytest.skip("Not POSIX or fork failed.") + + if pid == 0: + # child; close i/o file handles + os.close(1) + os.close(0) + # Avoid producing core files. + import resource + resource.setrlimit(resource.RLIMIT_CORE, (0, 0)) + # These calls may abort. + try: + np.linalg.lapack_lite.xerbla() + except ValueError: + pass + except Exception: + os._exit(os.EX_CONFIG) + + try: + a = np.array([[1.]]) + np.linalg.lapack_lite.dorgqr( + 1, 1, 1, a, + 0, # <- invalid value + a, a, 0, 0) + except ValueError as e: + if "DORGQR parameter number 5" in str(e): + # success, reuse error code to mark success as + # FORTRAN STOP returns as success. + os._exit(XERBLA_OK) + + # Did not abort, but our xerbla was not linked in. + os._exit(os.EX_CONFIG) + else: + # parent + pid, status = os.wait() + if os.WEXITSTATUS(status) != XERBLA_OK: + pytest.skip('Numpy xerbla not linked in.') + + +@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") +@pytest.mark.slow +def test_sdot_bug_8577(): + # Regression test that loading certain other libraries does not + # result to wrong results in float32 linear algebra. + # + # There's a bug gh-8577 on OSX that can trigger this, and perhaps + # there are also other situations in which it occurs. + # + # Do the check in a separate process. + + bad_libs = ['PyQt5.QtWidgets', 'IPython'] + + template = textwrap.dedent(""" + import sys + {before} + try: + import {bad_lib} + except ImportError: + sys.exit(0) + {after} + x = np.ones(2, dtype=np.float32) + sys.exit(0 if np.allclose(x.dot(x), 2.0) else 1) + """) + + for bad_lib in bad_libs: + code = template.format(before="import numpy as np", after="", + bad_lib=bad_lib) + subprocess.check_call([sys.executable, "-c", code]) + + # Swapped import order + code = template.format(after="import numpy as np", before="", + bad_lib=bad_lib) + subprocess.check_call([sys.executable, "-c", code]) + + +class TestMultiDot: + + def test_basic_function_with_three_arguments(self): + # multi_dot with three arguments uses a fast hand coded algorithm to + # determine the optimal order. Therefore test it separately. + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + + assert_almost_equal(multi_dot([A, B, C]), A.dot(B).dot(C)) + assert_almost_equal(multi_dot([A, B, C]), np.dot(A, np.dot(B, C))) + + def test_basic_function_with_two_arguments(self): + # separate code path with two arguments + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + + assert_almost_equal(multi_dot([A, B]), A.dot(B)) + assert_almost_equal(multi_dot([A, B]), np.dot(A, B)) + + def test_basic_function_with_dynamic_programming_optimization(self): + # multi_dot with four or more arguments uses the dynamic programming + # optimization and therefore deserve a separate + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D = np.random.random((2, 1)) + assert_almost_equal(multi_dot([A, B, C, D]), A.dot(B).dot(C).dot(D)) + + def test_vector_as_first_argument(self): + # The first argument can be 1-D + A1d = np.random.random(2) # 1-D + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D = np.random.random((2, 2)) + + # the result should be 1-D + assert_equal(multi_dot([A1d, B, C, D]).shape, (2,)) + + def test_vector_as_last_argument(self): + # The last argument can be 1-D + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D1d = np.random.random(2) # 1-D + + # the result should be 1-D + assert_equal(multi_dot([A, B, C, D1d]).shape, (6,)) + + def test_vector_as_first_and_last_argument(self): + # The first and last arguments can be 1-D + A1d = np.random.random(2) # 1-D + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D1d = np.random.random(2) # 1-D + + # the result should be a scalar + assert_equal(multi_dot([A1d, B, C, D1d]).shape, ()) + + def test_three_arguments_and_out(self): + # multi_dot with three arguments uses a fast hand coded algorithm to + # determine the optimal order. Therefore test it separately. + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + + out = np.zeros((6, 2)) + ret = multi_dot([A, B, C], out=out) + assert out is ret + assert_almost_equal(out, A.dot(B).dot(C)) + assert_almost_equal(out, np.dot(A, np.dot(B, C))) + + def test_two_arguments_and_out(self): + # separate code path with two arguments + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + out = np.zeros((6, 6)) + ret = multi_dot([A, B], out=out) + assert out is ret + assert_almost_equal(out, A.dot(B)) + assert_almost_equal(out, np.dot(A, B)) + + def test_dynamic_programming_optimization_and_out(self): + # multi_dot with four or more arguments uses the dynamic programming + # optimization and therefore deserve a separate test + A = np.random.random((6, 2)) + B = np.random.random((2, 6)) + C = np.random.random((6, 2)) + D = np.random.random((2, 1)) + out = np.zeros((6, 1)) + ret = multi_dot([A, B, C, D], out=out) + assert out is ret + assert_almost_equal(out, A.dot(B).dot(C).dot(D)) + + def test_dynamic_programming_logic(self): + # Test for the dynamic programming part + # This test is directly taken from Cormen page 376. + arrays = [np.random.random((30, 35)), + np.random.random((35, 15)), + np.random.random((15, 5)), + np.random.random((5, 10)), + np.random.random((10, 20)), + np.random.random((20, 25))] + m_expected = np.array([[0., 15750., 7875., 9375., 11875., 15125.], + [0., 0., 2625., 4375., 7125., 10500.], + [0., 0., 0., 750., 2500., 5375.], + [0., 0., 0., 0., 1000., 3500.], + [0., 0., 0., 0., 0., 5000.], + [0., 0., 0., 0., 0., 0.]]) + s_expected = np.array([[0, 1, 1, 3, 3, 3], + [0, 0, 2, 3, 3, 3], + [0, 0, 0, 3, 3, 3], + [0, 0, 0, 0, 4, 5], + [0, 0, 0, 0, 0, 5], + [0, 0, 0, 0, 0, 0]], dtype=int) + s_expected -= 1 # Cormen uses 1-based index, python does not. + + s, m = _multi_dot_matrix_chain_order(arrays, return_costs=True) + + # Only the upper triangular part (without the diagonal) is interesting. + assert_almost_equal(np.triu(s[:-1, 1:]), + np.triu(s_expected[:-1, 1:])) + assert_almost_equal(np.triu(m), np.triu(m_expected)) + + def test_too_few_input_arrays(self): + assert_raises(ValueError, multi_dot, []) + assert_raises(ValueError, multi_dot, [np.random.random((3, 3))]) + + +class TestTensorinv: + + @pytest.mark.parametrize("arr, ind", [ + (np.ones((4, 6, 8, 2)), 2), + (np.ones((3, 3, 2)), 1), + ]) + def test_non_square_handling(self, arr, ind): + with assert_raises(LinAlgError): + linalg.tensorinv(arr, ind=ind) + + @pytest.mark.parametrize("shape, ind", [ + # examples from docstring + ((4, 6, 8, 3), 2), + ((24, 8, 3), 1), + ]) + def test_tensorinv_shape(self, shape, ind): + a = np.eye(24) + a.shape = shape + ainv = linalg.tensorinv(a=a, ind=ind) + expected = a.shape[ind:] + a.shape[:ind] + actual = ainv.shape + assert_equal(actual, expected) + + @pytest.mark.parametrize("ind", [ + 0, -2, + ]) + def test_tensorinv_ind_limit(self, ind): + a = np.eye(24) + a.shape = (4, 6, 8, 3) + with assert_raises(ValueError): + linalg.tensorinv(a=a, ind=ind) + + def test_tensorinv_result(self): + # mimic a docstring example + a = np.eye(24) + a.shape = (24, 8, 3) + ainv = linalg.tensorinv(a, ind=1) + b = np.ones(24) + assert_allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) + + +class TestTensorsolve: + + @pytest.mark.parametrize("a, axes", [ + (np.ones((4, 6, 8, 2)), None), + (np.ones((3, 3, 2)), (0, 2)), + ]) + def test_non_square_handling(self, a, axes): + with assert_raises(LinAlgError): + b = np.ones(a.shape[:2]) + linalg.tensorsolve(a, b, axes=axes) + + @pytest.mark.parametrize("shape", + [(2, 3, 6), (3, 4, 4, 3), (0, 3, 3, 0)], + ) + def test_tensorsolve_result(self, shape): + a = np.random.randn(*shape) + b = np.ones(a.shape[:2]) + x = np.linalg.tensorsolve(a, b) + assert_allclose(np.tensordot(a, x, axes=len(x.shape)), b) + + +def test_unsupported_commontype(): + # linalg gracefully handles unsupported type + arr = np.array([[1, -2], [2, 5]], dtype='float16') + with assert_raises_regex(TypeError, "unsupported in linalg"): + linalg.cholesky(arr) + + +#@pytest.mark.slow +#@pytest.mark.xfail(not HAS_LAPACK64, run=False, +# reason="Numpy not compiled with 64-bit BLAS/LAPACK") +#@requires_memory(free_bytes=16e9) +@pytest.mark.skip(reason="Bad memory reports lead to OOM in ci testing") +def test_blas64_dot(): + n = 2**32 + a = np.zeros([1, n], dtype=np.float32) + b = np.ones([1, 1], dtype=np.float32) + a[0, -1] = 1 + c = np.dot(b, a) + assert_equal(c[0, -1], 1) + + +@pytest.mark.xfail(not HAS_LAPACK64, + reason="Numpy not compiled with 64-bit BLAS/LAPACK") +def test_blas64_geqrf_lwork_smoketest(): + # Smoke test LAPACK geqrf lwork call with 64-bit integers + dtype = np.float64 + lapack_routine = np.linalg.lapack_lite.dgeqrf + + m = 2**32 + 1 + n = 2**32 + 1 + lda = m + + # Dummy arrays, not referenced by the lapack routine, so don't + # need to be of the right size + a = np.zeros([1, 1], dtype=dtype) + work = np.zeros([1], dtype=dtype) + tau = np.zeros([1], dtype=dtype) + + # Size query + results = lapack_routine(m, n, a, lda, tau, work, -1, 0) + assert_equal(results['info'], 0) + assert_equal(results['m'], m) + assert_equal(results['n'], m) + + # Should result to an integer of a reasonable size + lwork = int(work.item()) + assert_(2**32 < lwork < 2**42) + + +def test_diagonal(): + # Here we only test if selected axes are compatible + # with Array API (last two). Core implementation + # of `diagonal` is tested in `test_multiarray.py`. + x = np.arange(60).reshape((3, 4, 5)) + actual = np.linalg.diagonal(x) + expected = np.array( + [ + [0, 6, 12, 18], + [20, 26, 32, 38], + [40, 46, 52, 58], + ] + ) + assert_equal(actual, expected) + + +def test_trace(): + # Here we only test if selected axes are compatible + # with Array API (last two). Core implementation + # of `trace` is tested in `test_multiarray.py`. + x = np.arange(60).reshape((3, 4, 5)) + actual = np.linalg.trace(x) + expected = np.array([36, 116, 196]) + + assert_equal(actual, expected) + + +def test_cross(): + x = np.arange(9).reshape((3, 3)) + actual = np.linalg.cross(x, x + 1) + expected = np.array([ + [-1, 2, -1], + [-1, 2, -1], + [-1, 2, -1], + ]) + + assert_equal(actual, expected) + + # We test that lists are converted to arrays. + u = [1, 2, 3] + v = [4, 5, 6] + actual = np.linalg.cross(u, v) + expected = array([-3, 6, -3]) + + assert_equal(actual, expected) + + with assert_raises_regex( + ValueError, + r"input arrays must be \(arrays of\) 3-dimensional vectors" + ): + x_2dim = x[:, 1:] + np.linalg.cross(x_2dim, x_2dim) + + +def test_tensordot(): + # np.linalg.tensordot is just an alias for np.tensordot + x = np.arange(6).reshape((2, 3)) + + assert np.linalg.tensordot(x, x) == 55 + assert np.linalg.tensordot(x, x, axes=[(0, 1), (0, 1)]) == 55 + + +def test_matmul(): + # np.linalg.matmul and np.matmul only differs in the number + # of arguments in the signature + x = np.arange(6).reshape((2, 3)) + actual = np.linalg.matmul(x, x.T) + expected = np.array([[5, 14], [14, 50]]) + + assert_equal(actual, expected) + + +def test_matrix_transpose(): + x = np.arange(6).reshape((2, 3)) + actual = np.linalg.matrix_transpose(x) + expected = x.T + + assert_equal(actual, expected) + + with assert_raises_regex( + ValueError, "array must be at least 2-dimensional" + ): + np.linalg.matrix_transpose(x[:, 0]) + + +def test_matrix_norm(): + x = np.arange(9).reshape((3, 3)) + actual = np.linalg.matrix_norm(x) + + assert_almost_equal(actual, np.float64(14.2828), double_decimal=3) + + actual = np.linalg.matrix_norm(x, keepdims=True) + + assert_almost_equal(actual, np.array([[14.2828]]), double_decimal=3) + + +def test_matrix_norm_empty(): + for shape in [(0, 2), (2, 0), (0, 0)]: + for dtype in [np.float64, np.float32, np.int32]: + x = np.zeros(shape, dtype) + assert_equal(np.linalg.matrix_norm(x, ord="fro"), 0) + assert_equal(np.linalg.matrix_norm(x, ord="nuc"), 0) + assert_equal(np.linalg.matrix_norm(x, ord=1), 0) + assert_equal(np.linalg.matrix_norm(x, ord=2), 0) + assert_equal(np.linalg.matrix_norm(x, ord=np.inf), 0) + +def test_vector_norm(): + x = np.arange(9).reshape((3, 3)) + actual = np.linalg.vector_norm(x) + + assert_almost_equal(actual, np.float64(14.2828), double_decimal=3) + + actual = np.linalg.vector_norm(x, axis=0) + + assert_almost_equal( + actual, np.array([6.7082, 8.124, 9.6436]), double_decimal=3 + ) + + actual = np.linalg.vector_norm(x, keepdims=True) + expected = np.full((1, 1), 14.2828, dtype='float64') + assert_equal(actual.shape, expected.shape) + assert_almost_equal(actual, expected, double_decimal=3) + + +def test_vector_norm_empty(): + for dtype in [np.float64, np.float32, np.int32]: + x = np.zeros(0, dtype) + assert_equal(np.linalg.vector_norm(x, ord=1), 0) + assert_equal(np.linalg.vector_norm(x, ord=2), 0) + assert_equal(np.linalg.vector_norm(x, ord=np.inf), 0) diff --git a/python/numpy/linalg/tests/test_regression.py b/python/numpy/linalg/tests/test_regression.py new file mode 100644 index 000000000..c46f83adb --- /dev/null +++ b/python/numpy/linalg/tests/test_regression.py @@ -0,0 +1,181 @@ +""" Test functions for linalg module +""" + +import pytest + +import numpy as np +from numpy import arange, array, dot, float64, linalg, transpose +from numpy.testing import ( + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_array_less, + assert_equal, + assert_raises, +) + + +class TestRegression: + + def test_eig_build(self): + # Ticket #652 + rva = array([1.03221168e+02 + 0.j, + -1.91843603e+01 + 0.j, + -6.04004526e-01 + 15.84422474j, + -6.04004526e-01 - 15.84422474j, + -1.13692929e+01 + 0.j, + -6.57612485e-01 + 10.41755503j, + -6.57612485e-01 - 10.41755503j, + 1.82126812e+01 + 0.j, + 1.06011014e+01 + 0.j, + 7.80732773e+00 + 0.j, + -7.65390898e-01 + 0.j, + 1.51971555e-15 + 0.j, + -1.51308713e-15 + 0.j]) + a = arange(13 * 13, dtype=float64) + a.shape = (13, 13) + a = a % 17 + va, ve = linalg.eig(a) + va.sort() + rva.sort() + assert_array_almost_equal(va, rva) + + def test_eigh_build(self): + # Ticket 662. + rvals = [68.60568999, 89.57756725, 106.67185574] + + cov = array([[77.70273908, 3.51489954, 15.64602427], + [ 3.51489954, 88.97013878, -1.07431931], + [15.64602427, -1.07431931, 98.18223512]]) + + vals, vecs = linalg.eigh(cov) + assert_array_almost_equal(vals, rvals) + + def test_svd_build(self): + # Ticket 627. + a = array([[0., 1.], [1., 1.], [2., 1.], [3., 1.]]) + m, n = a.shape + u, s, vh = linalg.svd(a) + + b = dot(transpose(u[:, n:]), a) + + assert_array_almost_equal(b, np.zeros((2, 2))) + + def test_norm_vector_badarg(self): + # Regression for #786: Frobenius norm for vectors raises + # ValueError. + assert_raises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro') + + def test_lapack_endian(self): + # For bug #1482 + a = array([[ 5.7998084, -2.1825367], + [-2.1825367, 9.85910595]], dtype='>f8') + b = array(a, dtype=' 0.5) + assert_equal(c, 1) + assert_equal(np.linalg.matrix_rank(a), 1) + assert_array_less(1, np.linalg.norm(a, ord=2)) + + w_svdvals = linalg.svdvals(a) + assert_array_almost_equal(w, w_svdvals) + + def test_norm_object_array(self): + # gh-7575 + testvector = np.array([np.array([0, 1]), 0, 0], dtype=object) + + norm = linalg.norm(testvector) + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype == np.dtype('float64')) + + norm = linalg.norm(testvector, ord=1) + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype != np.dtype('float64')) + + norm = linalg.norm(testvector, ord=2) + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype == np.dtype('float64')) + + assert_raises(ValueError, linalg.norm, testvector, ord='fro') + assert_raises(ValueError, linalg.norm, testvector, ord='nuc') + assert_raises(ValueError, linalg.norm, testvector, ord=np.inf) + assert_raises(ValueError, linalg.norm, testvector, ord=-np.inf) + assert_raises(ValueError, linalg.norm, testvector, ord=0) + assert_raises(ValueError, linalg.norm, testvector, ord=-1) + assert_raises(ValueError, linalg.norm, testvector, ord=-2) + + testmatrix = np.array([[np.array([0, 1]), 0, 0], + [0, 0, 0]], dtype=object) + + norm = linalg.norm(testmatrix) + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype == np.dtype('float64')) + + norm = linalg.norm(testmatrix, ord='fro') + assert_array_equal(norm, [0, 1]) + assert_(norm.dtype == np.dtype('float64')) + + assert_raises(TypeError, linalg.norm, testmatrix, ord='nuc') + assert_raises(ValueError, linalg.norm, testmatrix, ord=np.inf) + assert_raises(ValueError, linalg.norm, testmatrix, ord=-np.inf) + assert_raises(ValueError, linalg.norm, testmatrix, ord=0) + assert_raises(ValueError, linalg.norm, testmatrix, ord=1) + assert_raises(ValueError, linalg.norm, testmatrix, ord=-1) + assert_raises(TypeError, linalg.norm, testmatrix, ord=2) + assert_raises(TypeError, linalg.norm, testmatrix, ord=-2) + assert_raises(ValueError, linalg.norm, testmatrix, ord=3) + + def test_lstsq_complex_larger_rhs(self): + # gh-9891 + size = 20 + n_rhs = 70 + G = np.random.randn(size, size) + 1j * np.random.randn(size, size) + u = np.random.randn(size, n_rhs) + 1j * np.random.randn(size, n_rhs) + b = G.dot(u) + # This should work without segmentation fault. + u_lstsq, res, rank, sv = linalg.lstsq(G, b, rcond=None) + # check results just in case + assert_array_almost_equal(u_lstsq, u) + + @pytest.mark.parametrize("upper", [True, False]) + def test_cholesky_empty_array(self, upper): + # gh-25840 - upper=True hung before. + res = np.linalg.cholesky(np.zeros((0, 0)), upper=upper) + assert res.size == 0 + + @pytest.mark.parametrize("rtol", [0.0, [0.0] * 4, np.zeros((4,))]) + def test_matrix_rank_rtol_argument(self, rtol): + # gh-25877 + x = np.zeros((4, 3, 2)) + res = np.linalg.matrix_rank(x, rtol=rtol) + assert res.shape == (4,) + + def test_openblas_threading(self): + # gh-27036 + # Test whether matrix multiplication involving a large matrix always + # gives the same (correct) answer + x = np.arange(500000, dtype=np.float64) + src = np.vstack((x, -10 * x)).T + matrix = np.array([[0, 1], [1, 0]]) + expected = np.vstack((-10 * x, x)).T # src @ matrix + for i in range(200): + result = src @ matrix + mismatches = (~np.isclose(result, expected)).sum() + if mismatches != 0: + assert False, ("unexpected result from matmul, " + "probably due to OpenBLAS threading issues") diff --git a/python/numpy/ma/API_CHANGES.txt b/python/numpy/ma/API_CHANGES.txt new file mode 100644 index 000000000..a3d792a1f --- /dev/null +++ b/python/numpy/ma/API_CHANGES.txt @@ -0,0 +1,135 @@ +.. -*- rest -*- + +================================================== +API changes in the new masked array implementation +================================================== + +Masked arrays are subclasses of ndarray +--------------------------------------- + +Contrary to the original implementation, masked arrays are now regular +ndarrays:: + + >>> x = masked_array([1,2,3],mask=[0,0,1]) + >>> print isinstance(x, numpy.ndarray) + True + + +``_data`` returns a view of the masked array +-------------------------------------------- + +Masked arrays are composed of a ``_data`` part and a ``_mask``. Accessing the +``_data`` part will return a regular ndarray or any of its subclass, depending +on the initial data:: + + >>> x = masked_array(numpy.matrix([[1,2],[3,4]]),mask=[[0,0],[0,1]]) + >>> print x._data + [[1 2] + [3 4]] + >>> print type(x._data) + + + +In practice, ``_data`` is implemented as a property, not as an attribute. +Therefore, you cannot access it directly, and some simple tests such as the +following one will fail:: + + >>>x._data is x._data + False + + +``filled(x)`` can return a subclass of ndarray +---------------------------------------------- +The function ``filled(a)`` returns an array of the same type as ``a._data``:: + + >>> x = masked_array(numpy.matrix([[1,2],[3,4]]),mask=[[0,0],[0,1]]) + >>> y = filled(x) + >>> print type(y) + + >>> print y + matrix([[ 1, 2], + [ 3, 999999]]) + + +``put``, ``putmask`` behave like their ndarray counterparts +----------------------------------------------------------- + +Previously, ``putmask`` was used like this:: + + mask = [False,True,True] + x = array([1,4,7],mask=mask) + putmask(x,mask,[3]) + +which translated to:: + + x[~mask] = [3] + +(Note that a ``True``-value in a mask suppresses a value.) + +In other words, the mask had the same length as ``x``, whereas +``values`` had ``sum(~mask)`` elements. + +Now, the behaviour is similar to that of ``ndarray.putmask``, where +the mask and the values are both the same length as ``x``, i.e. + +:: + + putmask(x,mask,[3,0,0]) + + +``fill_value`` is a property +---------------------------- + +``fill_value`` is no longer a method, but a property:: + + >>> print x.fill_value + 999999 + +``cumsum`` and ``cumprod`` ignore missing values +------------------------------------------------ + +Missing values are assumed to be the identity element, i.e. 0 for +``cumsum`` and 1 for ``cumprod``:: + + >>> x = N.ma.array([1,2,3,4],mask=[False,True,False,False]) + >>> print x + [1 -- 3 4] + >>> print x.cumsum() + [1 -- 4 8] + >> print x.cumprod() + [1 -- 3 12] + +``bool(x)`` raises a ValueError +------------------------------- + +Masked arrays now behave like regular ``ndarrays``, in that they cannot be +converted to booleans: + +:: + + >>> x = N.ma.array([1,2,3]) + >>> bool(x) + Traceback (most recent call last): + File "", line 1, in + ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all() + + +================================== +New features (non exhaustive list) +================================== + +``mr_`` +------- + +``mr_`` mimics the behavior of ``r_`` for masked arrays:: + + >>> np.ma.mr_[3,4,5] + masked_array(data = [3 4 5], + mask = False, + fill_value=999999) + + +``anom`` +-------- + +The ``anom`` method returns the deviations from the average (anomalies). diff --git a/python/numpy/ma/LICENSE b/python/numpy/ma/LICENSE new file mode 100644 index 000000000..b41aae0c8 --- /dev/null +++ b/python/numpy/ma/LICENSE @@ -0,0 +1,24 @@ +* Copyright (c) 2006, University of Georgia and Pierre G.F. Gerard-Marchant +* All rights reserved. +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above copyright +* notice, this list of conditions and the following disclaimer in the +* documentation and/or other materials provided with the distribution. +* * Neither the name of the University of Georgia nor the +* names of its contributors may be used to endorse or promote products +* derived from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY +* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY +* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/python/numpy/ma/README.rst b/python/numpy/ma/README.rst new file mode 100644 index 000000000..cd1010329 --- /dev/null +++ b/python/numpy/ma/README.rst @@ -0,0 +1,236 @@ +================================== +A guide to masked arrays in NumPy +================================== + +.. Contents:: + +See http://www.scipy.org/scipy/numpy/wiki/MaskedArray (dead link) +for updates of this document. + + +History +------- + +As a regular user of MaskedArray, I (Pierre G.F. Gerard-Marchant) became +increasingly frustrated with the subclassing of masked arrays (even if +I can only blame my inexperience). I needed to develop a class of arrays +that could store some additional information along with numerical values, +while keeping the possibility for missing data (picture storing a series +of dates along with measurements, what would later become the `TimeSeries +Scikit `__ +(dead link). + +I started to implement such a class, but then quickly realized that +any additional information disappeared when processing these subarrays +(for example, adding a constant value to a subarray would erase its +dates). I ended up writing the equivalent of *numpy.core.ma* for my +particular class, ufuncs included. Everything went fine until I needed to +subclass my new class, when more problems showed up: some attributes of +the new subclass were lost during processing. I identified the culprit as +MaskedArray, which returns masked ndarrays when I expected masked +arrays of my class. I was preparing myself to rewrite *numpy.core.ma* +when I forced myself to learn how to subclass ndarrays. As I became more +familiar with the *__new__* and *__array_finalize__* methods, +I started to wonder why masked arrays were objects, and not ndarrays, +and whether it wouldn't be more convenient for subclassing if they did +behave like regular ndarrays. + +The new *maskedarray* is what I eventually come up with. The +main differences with the initial *numpy.core.ma* package are +that MaskedArray is now a subclass of *ndarray* and that the +*_data* section can now be any subclass of *ndarray*. Apart from a +couple of issues listed below, the behavior of the new MaskedArray +class reproduces the old one. Initially the *maskedarray* +implementation was marginally slower than *numpy.ma* in some areas, +but work is underway to speed it up; the expectation is that it can be +made substantially faster than the present *numpy.ma*. + + +Note that if the subclass has some special methods and +attributes, they are not propagated to the masked version: +this would require a modification of the *__getattribute__* +method (first trying *ndarray.__getattribute__*, then trying +*self._data.__getattribute__* if an exception is raised in the first +place), which really slows things down. + +Main differences +---------------- + + * The *_data* part of the masked array can be any subclass of ndarray (but not recarray, cf below). + * *fill_value* is now a property, not a function. + * in the majority of cases, the mask is forced to *nomask* when no value is actually masked. A notable exception is when a masked array (with no masked values) has just been unpickled. + * I got rid of the *share_mask* flag, I never understood its purpose. + * *put*, *putmask* and *take* now mimic the ndarray methods, to avoid unpleasant surprises. Moreover, *put* and *putmask* both update the mask when needed. * if *a* is a masked array, *bool(a)* raises a *ValueError*, as it does with ndarrays. + * in the same way, the comparison of two masked arrays is a masked array, not a boolean + * *filled(a)* returns an array of the same subclass as *a._data*, and no test is performed on whether it is contiguous or not. + * the mask is always printed, even if it's *nomask*, which makes things easy (for me at least) to remember that a masked array is used. + * *cumsum* works as if the *_data* array was filled with 0. The mask is preserved, but not updated. + * *cumprod* works as if the *_data* array was filled with 1. The mask is preserved, but not updated. + +New features +------------ + +This list is non-exhaustive... + + * the *mr_* function mimics *r_* for masked arrays. + * the *anom* method returns the anomalies (deviations from the average) + +Using the new package with numpy.core.ma +---------------------------------------- + +I tried to make sure that the new package can understand old masked +arrays. Unfortunately, there's no upward compatibility. + +For example: + +>>> import numpy.core.ma as old_ma +>>> import maskedarray as new_ma +>>> x = old_ma.array([1,2,3,4,5], mask=[0,0,1,0,0]) +>>> x +array(data = + [ 1 2 999999 4 5], + mask = + [False False True False False], + fill_value=999999) +>>> y = new_ma.array([1,2,3,4,5], mask=[0,0,1,0,0]) +>>> y +array(data = [1 2 -- 4 5], + mask = [False False True False False], + fill_value=999999) +>>> x==y +array(data = + [True True True True True], + mask = + [False False True False False], + fill_value=?) +>>> old_ma.getmask(x) == new_ma.getmask(x) +array([True, True, True, True, True]) +>>> old_ma.getmask(y) == new_ma.getmask(y) +array([True, True, False, True, True]) +>>> old_ma.getmask(y) +False + + +Using maskedarray with matplotlib +--------------------------------- + +Starting with matplotlib 0.91.2, the masked array importing will work with +the maskedarray branch) as well as with earlier versions. + +By default matplotlib still uses numpy.ma, but there is an rcParams setting +that you can use to select maskedarray instead. In the matplotlibrc file +you will find:: + + #maskedarray : False # True to use external maskedarray module + # instead of numpy.ma; this is a temporary # + setting for testing maskedarray. + + +Uncomment and set to True to select maskedarray everywhere. +Alternatively, you can test a script with maskedarray by using a +command-line option, e.g.:: + + python simple_plot.py --maskedarray + + +Masked records +-------------- + +Like *numpy.ma.core*, the *ndarray*-based implementation +of MaskedArray is limited when working with records: you can +mask any record of the array, but not a field in a record. If you +need this feature, you may want to give the *mrecords* package +a try (available in the *maskedarray* directory in the scipy +sandbox). This module defines a new class, *MaskedRecord*. An +instance of this class accepts a *recarray* as data, and uses two +masks: the *fieldmask* has as many entries as records in the array, +each entry with the same fields as a record, but of boolean types: +they indicate whether the field is masked or not; a record entry +is flagged as masked in the *mask* array if all the fields are +masked. A few examples in the file should give you an idea of what +can be done. Note that *mrecords* is still experimental... + +Optimizing maskedarray +---------------------- + +Should masked arrays be filled before processing or not? +-------------------------------------------------------- + +In the current implementation, most operations on masked arrays involve +the following steps: + + * the input arrays are filled + * the operation is performed on the filled arrays + * the mask is set for the results, from the combination of the input masks and the mask corresponding to the domain of the operation. + +For example, consider the division of two masked arrays:: + + import numpy + import maskedarray as ma + x = ma.array([1,2,3,4],mask=[1,0,0,0], dtype=numpy.float64) + y = ma.array([-1,0,1,2], mask=[0,0,0,1], dtype=numpy.float64) + +The division of x by y is then computed as:: + + d1 = x.filled(0) # d1 = array([0., 2., 3., 4.]) + d2 = y.filled(1) # array([-1., 0., 1., 1.]) + m = ma.mask_or(ma.getmask(x), ma.getmask(y)) # m = + array([True,False,False,True]) + dm = ma.divide.domain(d1,d2) # array([False, True, False, False]) + result = (d1/d2).view(MaskedArray) # masked_array([-0. inf, 3., 4.]) + result._mask = logical_or(m, dm) + +Note that a division by zero takes place. To avoid it, we can consider +to fill the input arrays, taking the domain mask into account, so that:: + + d1 = x._data.copy() # d1 = array([1., 2., 3., 4.]) + d2 = y._data.copy() # array([-1., 0., 1., 2.]) + dm = ma.divide.domain(d1,d2) # array([False, True, False, False]) + numpy.putmask(d2, dm, 1) # d2 = array([-1., 1., 1., 2.]) + m = ma.mask_or(ma.getmask(x), ma.getmask(y)) # m = + array([True,False,False,True]) + result = (d1/d2).view(MaskedArray) # masked_array([-1. 0., 3., 2.]) + result._mask = logical_or(m, dm) + +Note that the *.copy()* is required to avoid updating the inputs with +*putmask*. The *.filled()* method also involves a *.copy()*. + +A third possibility consists in avoid filling the arrays:: + + d1 = x._data # d1 = array([1., 2., 3., 4.]) + d2 = y._data # array([-1., 0., 1., 2.]) + dm = ma.divide.domain(d1,d2) # array([False, True, False, False]) + m = ma.mask_or(ma.getmask(x), ma.getmask(y)) # m = + array([True,False,False,True]) + result = (d1/d2).view(MaskedArray) # masked_array([-1. inf, 3., 2.]) + result._mask = logical_or(m, dm) + +Note that here again the division by zero takes place. + +A quick benchmark gives the following results: + + * *numpy.ma.divide* : 2.69 ms per loop + * classical division : 2.21 ms per loop + * division w/ prefilling : 2.34 ms per loop + * division w/o filling : 1.55 ms per loop + +So, is it worth filling the arrays beforehand ? Yes, if we are interested +in avoiding floating-point exceptions that may fill the result with infs +and nans. No, if we are only interested into speed... + + +Thanks +------ + +I'd like to thank Paul Dubois, Travis Oliphant and Sasha for the +original masked array package: without you, I would never have started +that (it might be argued that I shouldn't have anyway, but that's +another story...). I also wish to extend these thanks to Reggie Dugard +and Eric Firing for their suggestions and numerous improvements. + + +Revision notes +-------------- + + * 08/25/2007 : Creation of this page + * 01/23/2007 : The package has been moved to the SciPy sandbox, and is regularly updated: please check out your SVN version! diff --git a/python/numpy/ma/__init__.py b/python/numpy/ma/__init__.py new file mode 100644 index 000000000..e2a742e9b --- /dev/null +++ b/python/numpy/ma/__init__.py @@ -0,0 +1,53 @@ +""" +============= +Masked Arrays +============= + +Arrays sometimes contain invalid or missing data. When doing operations +on such arrays, we wish to suppress invalid values, which is the purpose masked +arrays fulfill (an example of typical use is given below). + +For example, examine the following array: + +>>> x = np.array([2, 1, 3, np.nan, 5, 2, 3, np.nan]) + +When we try to calculate the mean of the data, the result is undetermined: + +>>> np.mean(x) +nan + +The mean is calculated using roughly ``np.sum(x)/len(x)``, but since +any number added to ``NaN`` [1]_ produces ``NaN``, this doesn't work. Enter +masked arrays: + +>>> m = np.ma.masked_array(x, np.isnan(x)) +>>> m +masked_array(data=[2.0, 1.0, 3.0, --, 5.0, 2.0, 3.0, --], + mask=[False, False, False, True, False, False, False, True], + fill_value=1e+20) + +Here, we construct a masked array that suppress all ``NaN`` values. We +may now proceed to calculate the mean of the other values: + +>>> np.mean(m) +2.6666666666666665 + +.. [1] Not-a-Number, a floating point value that is the result of an + invalid operation. + +.. moduleauthor:: Pierre Gerard-Marchant +.. moduleauthor:: Jarrod Millman + +""" +from . import core, extras +from .core import * +from .extras import * + +__all__ = ['core', 'extras'] +__all__ += core.__all__ +__all__ += extras.__all__ + +from numpy._pytesttester import PytestTester + +test = PytestTester(__name__) +del PytestTester diff --git a/python/numpy/ma/__init__.pyi b/python/numpy/ma/__init__.pyi new file mode 100644 index 000000000..176e929a8 --- /dev/null +++ b/python/numpy/ma/__init__.pyi @@ -0,0 +1,458 @@ +from . import core, extras +from .core import ( + MAError, + MaskedArray, + MaskError, + MaskType, + abs, + absolute, + add, + all, + allclose, + allequal, + alltrue, + amax, + amin, + angle, + anom, + anomalies, + any, + append, + arange, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + argmax, + argmin, + argsort, + around, + array, + asanyarray, + asarray, + bitwise_and, + bitwise_or, + bitwise_xor, + bool_, + ceil, + choose, + clip, + common_fill_value, + compress, + compressed, + concatenate, + conjugate, + convolve, + copy, + correlate, + cos, + cosh, + count, + cumprod, + cumsum, + default_fill_value, + diag, + diagonal, + diff, + divide, + empty, + empty_like, + equal, + exp, + expand_dims, + fabs, + filled, + fix_invalid, + flatten_mask, + flatten_structured_array, + floor, + floor_divide, + fmod, + frombuffer, + fromflex, + fromfunction, + getdata, + getmask, + getmaskarray, + greater, + greater_equal, + harden_mask, + hypot, + identity, + ids, + indices, + inner, + innerproduct, + is_mask, + is_masked, + isarray, + isMA, + isMaskedArray, + left_shift, + less, + less_equal, + log, + log2, + log10, + logical_and, + logical_not, + logical_or, + logical_xor, + make_mask, + make_mask_descr, + make_mask_none, + mask_or, + masked, + masked_array, + masked_equal, + masked_greater, + masked_greater_equal, + masked_inside, + masked_invalid, + masked_less, + masked_less_equal, + masked_not_equal, + masked_object, + masked_outside, + masked_print_option, + masked_singleton, + masked_values, + masked_where, + max, + maximum, + maximum_fill_value, + mean, + min, + minimum, + minimum_fill_value, + mod, + multiply, + mvoid, + ndim, + negative, + nomask, + nonzero, + not_equal, + ones, + ones_like, + outer, + outerproduct, + power, + prod, + product, + ptp, + put, + putmask, + ravel, + remainder, + repeat, + reshape, + resize, + right_shift, + round, + round_, + set_fill_value, + shape, + sin, + sinh, + size, + soften_mask, + sometrue, + sort, + sqrt, + squeeze, + std, + subtract, + sum, + swapaxes, + take, + tan, + tanh, + trace, + transpose, + true_divide, + var, + where, + zeros, + zeros_like, +) +from .extras import ( + apply_along_axis, + apply_over_axes, + atleast_1d, + atleast_2d, + atleast_3d, + average, + clump_masked, + clump_unmasked, + column_stack, + compress_cols, + compress_nd, + compress_rowcols, + compress_rows, + corrcoef, + count_masked, + cov, + diagflat, + dot, + dstack, + ediff1d, + flatnotmasked_contiguous, + flatnotmasked_edges, + hsplit, + hstack, + in1d, + intersect1d, + isin, + mask_cols, + mask_rowcols, + mask_rows, + masked_all, + masked_all_like, + median, + mr_, + ndenumerate, + notmasked_contiguous, + notmasked_edges, + polyfit, + row_stack, + setdiff1d, + setxor1d, + stack, + union1d, + unique, + vander, + vstack, +) + +__all__ = [ + "core", + "extras", + "MAError", + "MaskError", + "MaskType", + "MaskedArray", + "abs", + "absolute", + "add", + "all", + "allclose", + "allequal", + "alltrue", + "amax", + "amin", + "angle", + "anom", + "anomalies", + "any", + "append", + "arange", + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "argmax", + "argmin", + "argsort", + "around", + "array", + "asanyarray", + "asarray", + "bitwise_and", + "bitwise_or", + "bitwise_xor", + "bool_", + "ceil", + "choose", + "clip", + "common_fill_value", + "compress", + "compressed", + "concatenate", + "conjugate", + "convolve", + "copy", + "correlate", + "cos", + "cosh", + "count", + "cumprod", + "cumsum", + "default_fill_value", + "diag", + "diagonal", + "diff", + "divide", + "empty", + "empty_like", + "equal", + "exp", + "expand_dims", + "fabs", + "filled", + "fix_invalid", + "flatten_mask", + "flatten_structured_array", + "floor", + "floor_divide", + "fmod", + "frombuffer", + "fromflex", + "fromfunction", + "getdata", + "getmask", + "getmaskarray", + "greater", + "greater_equal", + "harden_mask", + "hypot", + "identity", + "ids", + "indices", + "inner", + "innerproduct", + "isMA", + "isMaskedArray", + "is_mask", + "is_masked", + "isarray", + "left_shift", + "less", + "less_equal", + "log", + "log10", + "log2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "make_mask", + "make_mask_descr", + "make_mask_none", + "mask_or", + "masked", + "masked_array", + "masked_equal", + "masked_greater", + "masked_greater_equal", + "masked_inside", + "masked_invalid", + "masked_less", + "masked_less_equal", + "masked_not_equal", + "masked_object", + "masked_outside", + "masked_print_option", + "masked_singleton", + "masked_values", + "masked_where", + "max", + "maximum", + "maximum_fill_value", + "mean", + "min", + "minimum", + "minimum_fill_value", + "mod", + "multiply", + "mvoid", + "ndim", + "negative", + "nomask", + "nonzero", + "not_equal", + "ones", + "ones_like", + "outer", + "outerproduct", + "power", + "prod", + "product", + "ptp", + "put", + "putmask", + "ravel", + "remainder", + "repeat", + "reshape", + "resize", + "right_shift", + "round", + "round_", + "set_fill_value", + "shape", + "sin", + "sinh", + "size", + "soften_mask", + "sometrue", + "sort", + "sqrt", + "squeeze", + "std", + "subtract", + "sum", + "swapaxes", + "take", + "tan", + "tanh", + "trace", + "transpose", + "true_divide", + "var", + "where", + "zeros", + "zeros_like", + "apply_along_axis", + "apply_over_axes", + "atleast_1d", + "atleast_2d", + "atleast_3d", + "average", + "clump_masked", + "clump_unmasked", + "column_stack", + "compress_cols", + "compress_nd", + "compress_rowcols", + "compress_rows", + "count_masked", + "corrcoef", + "cov", + "diagflat", + "dot", + "dstack", + "ediff1d", + "flatnotmasked_contiguous", + "flatnotmasked_edges", + "hsplit", + "hstack", + "isin", + "in1d", + "intersect1d", + "mask_cols", + "mask_rowcols", + "mask_rows", + "masked_all", + "masked_all_like", + "median", + "mr_", + "ndenumerate", + "notmasked_contiguous", + "notmasked_edges", + "polyfit", + "row_stack", + "setdiff1d", + "setxor1d", + "stack", + "unique", + "union1d", + "vander", + "vstack", +] diff --git a/python/numpy/ma/__pycache__/__init__.cpython-312.pyc b/python/numpy/ma/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..9c5961d09 Binary files /dev/null and b/python/numpy/ma/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/ma/__pycache__/core.cpython-312.pyc b/python/numpy/ma/__pycache__/core.cpython-312.pyc new file mode 100644 index 000000000..f2ce98896 Binary files /dev/null and b/python/numpy/ma/__pycache__/core.cpython-312.pyc differ diff --git a/python/numpy/ma/__pycache__/extras.cpython-312.pyc b/python/numpy/ma/__pycache__/extras.cpython-312.pyc new file mode 100644 index 000000000..bdf9e1db9 Binary files /dev/null and b/python/numpy/ma/__pycache__/extras.cpython-312.pyc differ diff --git a/python/numpy/ma/__pycache__/mrecords.cpython-312.pyc b/python/numpy/ma/__pycache__/mrecords.cpython-312.pyc new file mode 100644 index 000000000..11a554b88 Binary files /dev/null and b/python/numpy/ma/__pycache__/mrecords.cpython-312.pyc differ diff --git a/python/numpy/ma/__pycache__/testutils.cpython-312.pyc b/python/numpy/ma/__pycache__/testutils.cpython-312.pyc new file mode 100644 index 000000000..b7a521500 Binary files /dev/null and b/python/numpy/ma/__pycache__/testutils.cpython-312.pyc differ diff --git a/python/numpy/ma/core.py b/python/numpy/ma/core.py new file mode 100644 index 000000000..8a85960f6 --- /dev/null +++ b/python/numpy/ma/core.py @@ -0,0 +1,8933 @@ +""" +numpy.ma : a package to handle missing or invalid values. + +This package was initially written for numarray by Paul F. Dubois +at Lawrence Livermore National Laboratory. +In 2006, the package was completely rewritten by Pierre Gerard-Marchant +(University of Georgia) to make the MaskedArray class a subclass of ndarray, +and to improve support of structured arrays. + + +Copyright 1999, 2000, 2001 Regents of the University of California. +Released for unlimited redistribution. + +* Adapted for numpy_core 2005 by Travis Oliphant and (mainly) Paul Dubois. +* Subclassing of the base `ndarray` 2006 by Pierre Gerard-Marchant + (pgmdevlist_AT_gmail_DOT_com) +* Improvements suggested by Reggie Dugard (reggie_AT_merfinllc_DOT_com) + +.. moduleauthor:: Pierre Gerard-Marchant + +""" +import builtins +import functools +import inspect +import operator +import re +import textwrap +import warnings + +import numpy as np +import numpy._core.numerictypes as ntypes +import numpy._core.umath as umath +from numpy import ( + _NoValue, + amax, + amin, + angle, + bool_, + expand_dims, + finfo, # noqa: F401 + iinfo, # noqa: F401 + iscomplexobj, + ndarray, +) +from numpy import array as narray # noqa: F401 +from numpy._core import multiarray as mu +from numpy._core.numeric import normalize_axis_tuple +from numpy._utils import set_module +from numpy._utils._inspect import formatargspec, getargspec + +__all__ = [ + 'MAError', 'MaskError', 'MaskType', 'MaskedArray', 'abs', 'absolute', + 'add', 'all', 'allclose', 'allequal', 'alltrue', 'amax', 'amin', + 'angle', 'anom', 'anomalies', 'any', 'append', 'arange', 'arccos', + 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', + 'argmax', 'argmin', 'argsort', 'around', 'array', 'asanyarray', + 'asarray', 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'bool_', 'ceil', + 'choose', 'clip', 'common_fill_value', 'compress', 'compressed', + 'concatenate', 'conjugate', 'convolve', 'copy', 'correlate', 'cos', 'cosh', + 'count', 'cumprod', 'cumsum', 'default_fill_value', 'diag', 'diagonal', + 'diff', 'divide', 'empty', 'empty_like', 'equal', 'exp', + 'expand_dims', 'fabs', 'filled', 'fix_invalid', 'flatten_mask', + 'flatten_structured_array', 'floor', 'floor_divide', 'fmod', + 'frombuffer', 'fromflex', 'fromfunction', 'getdata', 'getmask', + 'getmaskarray', 'greater', 'greater_equal', 'harden_mask', 'hypot', + 'identity', 'ids', 'indices', 'inner', 'innerproduct', 'isMA', + 'isMaskedArray', 'is_mask', 'is_masked', 'isarray', 'left_shift', + 'less', 'less_equal', 'log', 'log10', 'log2', + 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'make_mask', + 'make_mask_descr', 'make_mask_none', 'mask_or', 'masked', + 'masked_array', 'masked_equal', 'masked_greater', + 'masked_greater_equal', 'masked_inside', 'masked_invalid', + 'masked_less', 'masked_less_equal', 'masked_not_equal', + 'masked_object', 'masked_outside', 'masked_print_option', + 'masked_singleton', 'masked_values', 'masked_where', 'max', 'maximum', + 'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value', + 'mod', 'multiply', 'mvoid', 'ndim', 'negative', 'nomask', 'nonzero', + 'not_equal', 'ones', 'ones_like', 'outer', 'outerproduct', 'power', 'prod', + 'product', 'ptp', 'put', 'putmask', 'ravel', 'remainder', + 'repeat', 'reshape', 'resize', 'right_shift', 'round', 'round_', + 'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'soften_mask', + 'sometrue', 'sort', 'sqrt', 'squeeze', 'std', 'subtract', 'sum', + 'swapaxes', 'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide', + 'var', 'where', 'zeros', 'zeros_like', + ] + +MaskType = np.bool +nomask = MaskType(0) + +class MaskedArrayFutureWarning(FutureWarning): + pass + +def _deprecate_argsort_axis(arr): + """ + Adjust the axis passed to argsort, warning if necessary + + Parameters + ---------- + arr + The array which argsort was called on + + np.ma.argsort has a long-term bug where the default of the axis argument + is wrong (gh-8701), which now must be kept for backwards compatibility. + Thankfully, this only makes a difference when arrays are 2- or more- + dimensional, so we only need a warning then. + """ + if arr.ndim <= 1: + # no warning needed - but switch to -1 anyway, to avoid surprising + # subclasses, which are more likely to implement scalar axes. + return -1 + else: + # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default + warnings.warn( + "In the future the default for argsort will be axis=-1, not the " + "current None, to match its documentation and np.argsort. " + "Explicitly pass -1 or None to silence this warning.", + MaskedArrayFutureWarning, stacklevel=3) + return None + + +def doc_note(initialdoc, note): + """ + Adds a Notes section to an existing docstring. + + """ + if initialdoc is None: + return + if note is None: + return initialdoc + + notesplit = re.split(r'\n\s*?Notes\n\s*?-----', inspect.cleandoc(initialdoc)) + notedoc = f"\n\nNotes\n-----\n{inspect.cleandoc(note)}\n" + + return ''.join(notesplit[:1] + [notedoc] + notesplit[1:]) + + +def get_object_signature(obj): + """ + Get the signature from obj + + """ + try: + sig = formatargspec(*getargspec(obj)) + except TypeError: + sig = '' + return sig + + +############################################################################### +# Exceptions # +############################################################################### + + +class MAError(Exception): + """ + Class for masked array related errors. + + """ + pass + + +class MaskError(MAError): + """ + Class for mask related errors. + + """ + pass + + +############################################################################### +# Filling options # +############################################################################### + + +# b: boolean - c: complex - f: floats - i: integer - O: object - S: string +default_filler = {'b': True, + 'c': 1.e20 + 0.0j, + 'f': 1.e20, + 'i': 999999, + 'O': '?', + 'S': b'N/A', + 'u': 999999, + 'V': b'???', + 'U': 'N/A' + } + +# Add datetime64 and timedelta64 types +for v in ["Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", + "fs", "as"]: + default_filler["M8[" + v + "]"] = np.datetime64("NaT", v) + default_filler["m8[" + v + "]"] = np.timedelta64("NaT", v) + +float_types_list = [np.half, np.single, np.double, np.longdouble, + np.csingle, np.cdouble, np.clongdouble] + +_minvals: dict[type, int] = {} +_maxvals: dict[type, int] = {} + +for sctype in ntypes.sctypeDict.values(): + scalar_dtype = np.dtype(sctype) + + if scalar_dtype.kind in "Mm": + info = np.iinfo(np.int64) + min_val, max_val = info.min + 1, info.max + elif np.issubdtype(scalar_dtype, np.integer): + info = np.iinfo(sctype) + min_val, max_val = info.min, info.max + elif np.issubdtype(scalar_dtype, np.floating): + info = np.finfo(sctype) + min_val, max_val = info.min, info.max + elif scalar_dtype.kind == "b": + min_val, max_val = 0, 1 + else: + min_val, max_val = None, None + + _minvals[sctype] = min_val + _maxvals[sctype] = max_val + +max_filler = _minvals +max_filler.update([(k, -np.inf) for k in float_types_list[:4]]) +max_filler.update([(k, complex(-np.inf, -np.inf)) for k in float_types_list[-3:]]) + +min_filler = _maxvals +min_filler.update([(k, +np.inf) for k in float_types_list[:4]]) +min_filler.update([(k, complex(+np.inf, +np.inf)) for k in float_types_list[-3:]]) + +del float_types_list + +def _recursive_fill_value(dtype, f): + """ + Recursively produce a fill value for `dtype`, calling f on scalar dtypes + """ + if dtype.names is not None: + # We wrap into `array` here, which ensures we use NumPy cast rules + # for integer casts, this allows the use of 99999 as a fill value + # for int8. + # TODO: This is probably a mess, but should best preserve behavior? + vals = tuple( + np.array(_recursive_fill_value(dtype[name], f)) + for name in dtype.names) + return np.array(vals, dtype=dtype)[()] # decay to void scalar from 0d + elif dtype.subdtype: + subtype, shape = dtype.subdtype + subval = _recursive_fill_value(subtype, f) + return np.full(shape, subval) + else: + return f(dtype) + + +def _get_dtype_of(obj): + """ Convert the argument for *_fill_value into a dtype """ + if isinstance(obj, np.dtype): + return obj + elif hasattr(obj, 'dtype'): + return obj.dtype + else: + return np.asanyarray(obj).dtype + + +def default_fill_value(obj): + """ + Return the default fill value for the argument object. + + The default filling value depends on the datatype of the input + array or the type of the input scalar: + + ======== ======== + datatype default + ======== ======== + bool True + int 999999 + float 1.e20 + complex 1.e20+0j + object '?' + string 'N/A' + ======== ======== + + For structured types, a structured scalar is returned, with each field the + default fill value for its type. + + For subarray types, the fill value is an array of the same size containing + the default scalar fill value. + + Parameters + ---------- + obj : ndarray, dtype or scalar + The array data-type or scalar for which the default fill value + is returned. + + Returns + ------- + fill_value : scalar + The default fill value. + + Examples + -------- + >>> import numpy as np + >>> np.ma.default_fill_value(1) + 999999 + >>> np.ma.default_fill_value(np.array([1.1, 2., np.pi])) + 1e+20 + >>> np.ma.default_fill_value(np.dtype(complex)) + (1e+20+0j) + + """ + def _scalar_fill_value(dtype): + if dtype.kind in 'Mm': + return default_filler.get(dtype.str[1:], '?') + else: + return default_filler.get(dtype.kind, '?') + + dtype = _get_dtype_of(obj) + return _recursive_fill_value(dtype, _scalar_fill_value) + + +def _extremum_fill_value(obj, extremum, extremum_name): + + def _scalar_fill_value(dtype): + try: + return extremum[dtype.type] + except KeyError as e: + raise TypeError( + f"Unsuitable type {dtype} for calculating {extremum_name}." + ) from None + + dtype = _get_dtype_of(obj) + return _recursive_fill_value(dtype, _scalar_fill_value) + + +def minimum_fill_value(obj): + """ + Return the maximum value that can be represented by the dtype of an object. + + This function is useful for calculating a fill value suitable for + taking the minimum of an array with a given dtype. + + Parameters + ---------- + obj : ndarray, dtype or scalar + An object that can be queried for it's numeric type. + + Returns + ------- + val : scalar + The maximum representable value. + + Raises + ------ + TypeError + If `obj` isn't a suitable numeric type. + + See Also + -------- + maximum_fill_value : The inverse function. + set_fill_value : Set the filling value of a masked array. + MaskedArray.fill_value : Return current fill value. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.int8() + >>> ma.minimum_fill_value(a) + 127 + >>> a = np.int32() + >>> ma.minimum_fill_value(a) + 2147483647 + + An array of numeric data can also be passed. + + >>> a = np.array([1, 2, 3], dtype=np.int8) + >>> ma.minimum_fill_value(a) + 127 + >>> a = np.array([1, 2, 3], dtype=np.float32) + >>> ma.minimum_fill_value(a) + inf + + """ + return _extremum_fill_value(obj, min_filler, "minimum") + + +def maximum_fill_value(obj): + """ + Return the minimum value that can be represented by the dtype of an object. + + This function is useful for calculating a fill value suitable for + taking the maximum of an array with a given dtype. + + Parameters + ---------- + obj : ndarray, dtype or scalar + An object that can be queried for it's numeric type. + + Returns + ------- + val : scalar + The minimum representable value. + + Raises + ------ + TypeError + If `obj` isn't a suitable numeric type. + + See Also + -------- + minimum_fill_value : The inverse function. + set_fill_value : Set the filling value of a masked array. + MaskedArray.fill_value : Return current fill value. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.int8() + >>> ma.maximum_fill_value(a) + -128 + >>> a = np.int32() + >>> ma.maximum_fill_value(a) + -2147483648 + + An array of numeric data can also be passed. + + >>> a = np.array([1, 2, 3], dtype=np.int8) + >>> ma.maximum_fill_value(a) + -128 + >>> a = np.array([1, 2, 3], dtype=np.float32) + >>> ma.maximum_fill_value(a) + -inf + + """ + return _extremum_fill_value(obj, max_filler, "maximum") + + +def _recursive_set_fill_value(fillvalue, dt): + """ + Create a fill value for a structured dtype. + + Parameters + ---------- + fillvalue : scalar or array_like + Scalar or array representing the fill value. If it is of shorter + length than the number of fields in dt, it will be resized. + dt : dtype + The structured dtype for which to create the fill value. + + Returns + ------- + val : tuple + A tuple of values corresponding to the structured fill value. + + """ + fillvalue = np.resize(fillvalue, len(dt.names)) + output_value = [] + for (fval, name) in zip(fillvalue, dt.names): + cdtype = dt[name] + if cdtype.subdtype: + cdtype = cdtype.subdtype[0] + + if cdtype.names is not None: + output_value.append(tuple(_recursive_set_fill_value(fval, cdtype))) + else: + output_value.append(np.array(fval, dtype=cdtype).item()) + return tuple(output_value) + + +def _check_fill_value(fill_value, ndtype): + """ + Private function validating the given `fill_value` for the given dtype. + + If fill_value is None, it is set to the default corresponding to the dtype. + + If fill_value is not None, its value is forced to the given dtype. + + The result is always a 0d array. + + """ + ndtype = np.dtype(ndtype) + if fill_value is None: + fill_value = default_fill_value(ndtype) + # TODO: It seems better to always store a valid fill_value, the oddity + # about is that `_fill_value = None` would behave even more + # different then. + # (e.g. this allows arr_uint8.astype(int64) to have the default + # fill value again...) + # The one thing that changed in 2.0/2.1 around cast safety is that the + # default `int(99...)` is not a same-kind cast anymore, so if we + # have a uint, use the default uint. + if ndtype.kind == "u": + fill_value = np.uint(fill_value) + elif ndtype.names is not None: + if isinstance(fill_value, (ndarray, np.void)): + try: + fill_value = np.asarray(fill_value, dtype=ndtype) + except ValueError as e: + err_msg = "Unable to transform %s to dtype %s" + raise ValueError(err_msg % (fill_value, ndtype)) from e + else: + fill_value = np.asarray(fill_value, dtype=object) + fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype), + dtype=ndtype) + elif isinstance(fill_value, str) and (ndtype.char not in 'OSVU'): + # Note this check doesn't work if fill_value is not a scalar + err_msg = "Cannot set fill value of string with array of dtype %s" + raise TypeError(err_msg % ndtype) + else: + # In case we want to convert 1e20 to int. + # Also in case of converting string arrays. + try: + fill_value = np.asarray(fill_value, dtype=ndtype) + except (OverflowError, ValueError) as e: + # Raise TypeError instead of OverflowError or ValueError. + # OverflowError is seldom used, and the real problem here is + # that the passed fill_value is not compatible with the ndtype. + err_msg = "Cannot convert fill_value %s to dtype %s" + raise TypeError(err_msg % (fill_value, ndtype)) from e + return np.array(fill_value) + + +def set_fill_value(a, fill_value): + """ + Set the filling value of a, if a is a masked array. + + This function changes the fill value of the masked array `a` in place. + If `a` is not a masked array, the function returns silently, without + doing anything. + + Parameters + ---------- + a : array_like + Input array. + fill_value : dtype + Filling value. A consistency test is performed to make sure + the value is compatible with the dtype of `a`. + + Returns + ------- + None + Nothing returned by this function. + + See Also + -------- + maximum_fill_value : Return the default fill value for a dtype. + MaskedArray.fill_value : Return current fill value. + MaskedArray.set_fill_value : Equivalent method. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.arange(5) + >>> a + array([0, 1, 2, 3, 4]) + >>> a = ma.masked_where(a < 3, a) + >>> a + masked_array(data=[--, --, --, 3, 4], + mask=[ True, True, True, False, False], + fill_value=999999) + >>> ma.set_fill_value(a, -999) + >>> a + masked_array(data=[--, --, --, 3, 4], + mask=[ True, True, True, False, False], + fill_value=-999) + + Nothing happens if `a` is not a masked array. + + >>> a = list(range(5)) + >>> a + [0, 1, 2, 3, 4] + >>> ma.set_fill_value(a, 100) + >>> a + [0, 1, 2, 3, 4] + >>> a = np.arange(5) + >>> a + array([0, 1, 2, 3, 4]) + >>> ma.set_fill_value(a, 100) + >>> a + array([0, 1, 2, 3, 4]) + + """ + if isinstance(a, MaskedArray): + a.set_fill_value(fill_value) + + +def get_fill_value(a): + """ + Return the filling value of a, if any. Otherwise, returns the + default filling value for that type. + + """ + if isinstance(a, MaskedArray): + result = a.fill_value + else: + result = default_fill_value(a) + return result + + +def common_fill_value(a, b): + """ + Return the common filling value of two masked arrays, if any. + + If ``a.fill_value == b.fill_value``, return the fill value, + otherwise return None. + + Parameters + ---------- + a, b : MaskedArray + The masked arrays for which to compare fill values. + + Returns + ------- + fill_value : scalar or None + The common fill value, or None. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([0, 1.], fill_value=3) + >>> y = np.ma.array([0, 1.], fill_value=3) + >>> np.ma.common_fill_value(x, y) + 3.0 + + """ + t1 = get_fill_value(a) + t2 = get_fill_value(b) + if t1 == t2: + return t1 + return None + + +def filled(a, fill_value=None): + """ + Return input as an `~numpy.ndarray`, with masked values replaced by + `fill_value`. + + If `a` is not a `MaskedArray`, `a` itself is returned. + If `a` is a `MaskedArray` with no masked values, then ``a.data`` is + returned. + If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to + ``a.fill_value``. + + Parameters + ---------- + a : MaskedArray or array_like + An input object. + fill_value : array_like, optional. + Can be scalar or non-scalar. If non-scalar, the + resulting filled array should be broadcastable + over input array. Default is None. + + Returns + ------- + a : ndarray + The filled array. + + See Also + -------- + compressed + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> x.filled() + array([[999999, 1, 2], + [999999, 4, 5], + [ 6, 7, 8]]) + >>> x.filled(fill_value=333) + array([[333, 1, 2], + [333, 4, 5], + [ 6, 7, 8]]) + >>> x.filled(fill_value=np.arange(3)) + array([[0, 1, 2], + [0, 4, 5], + [6, 7, 8]]) + + """ + if hasattr(a, 'filled'): + return a.filled(fill_value) + + elif isinstance(a, ndarray): + # Should we check for contiguity ? and a.flags['CONTIGUOUS']: + return a + elif isinstance(a, dict): + return np.array(a, 'O') + else: + return np.array(a) + + +def get_masked_subclass(*arrays): + """ + Return the youngest subclass of MaskedArray from a list of (masked) arrays. + + In case of siblings, the first listed takes over. + + """ + if len(arrays) == 1: + arr = arrays[0] + if isinstance(arr, MaskedArray): + rcls = type(arr) + else: + rcls = MaskedArray + else: + arrcls = [type(a) for a in arrays] + rcls = arrcls[0] + if not issubclass(rcls, MaskedArray): + rcls = MaskedArray + for cls in arrcls[1:]: + if issubclass(cls, rcls): + rcls = cls + # Don't return MaskedConstant as result: revert to MaskedArray + if rcls.__name__ == 'MaskedConstant': + return MaskedArray + return rcls + + +def getdata(a, subok=True): + """ + Return the data of a masked array as an ndarray. + + Return the data of `a` (if any) as an ndarray if `a` is a ``MaskedArray``, + else return `a` as a ndarray or subclass (depending on `subok`) if not. + + Parameters + ---------- + a : array_like + Input ``MaskedArray``, alternatively a ndarray or a subclass thereof. + subok : bool + Whether to force the output to be a `pure` ndarray (False) or to + return a subclass of ndarray if appropriate (True, default). + + See Also + -------- + getmask : Return the mask of a masked array, or nomask. + getmaskarray : Return the mask of a masked array, or full array of False. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = ma.masked_equal([[1,2],[3,4]], 2) + >>> a + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=2) + >>> ma.getdata(a) + array([[1, 2], + [3, 4]]) + + Equivalently use the ``MaskedArray`` `data` attribute. + + >>> a.data + array([[1, 2], + [3, 4]]) + + """ + try: + data = a._data + except AttributeError: + data = np.array(a, copy=None, subok=subok) + if not subok: + return data.view(ndarray) + return data + + +get_data = getdata + + +def fix_invalid(a, mask=nomask, copy=True, fill_value=None): + """ + Return input with invalid data masked and replaced by a fill value. + + Invalid data means values of `nan`, `inf`, etc. + + Parameters + ---------- + a : array_like + Input array, a (subclass of) ndarray. + mask : sequence, optional + Mask. Must be convertible to an array of booleans with the same + shape as `data`. True indicates a masked (i.e. invalid) data. + copy : bool, optional + Whether to use a copy of `a` (True) or to fix `a` in place (False). + Default is True. + fill_value : scalar, optional + Value used for fixing invalid data. Default is None, in which case + the ``a.fill_value`` is used. + + Returns + ------- + b : MaskedArray + The input array with invalid entries fixed. + + Notes + ----- + A copy is performed by default. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3) + >>> x + masked_array(data=[--, -1.0, nan, inf], + mask=[ True, False, False, False], + fill_value=1e+20) + >>> np.ma.fix_invalid(x) + masked_array(data=[--, -1.0, --, --], + mask=[ True, False, True, True], + fill_value=1e+20) + + >>> fixed = np.ma.fix_invalid(x) + >>> fixed.data + array([ 1.e+00, -1.e+00, 1.e+20, 1.e+20]) + >>> x.data + array([ 1., -1., nan, inf]) + + """ + a = masked_array(a, copy=copy, mask=mask, subok=True) + invalid = np.logical_not(np.isfinite(a._data)) + if not invalid.any(): + return a + a._mask |= invalid + if fill_value is None: + fill_value = a.fill_value + a._data[invalid] = fill_value + return a + +def is_string_or_list_of_strings(val): + return (isinstance(val, str) or + (isinstance(val, list) and val and + builtins.all(isinstance(s, str) for s in val))) + +############################################################################### +# Ufuncs # +############################################################################### + + +ufunc_domain = {} +ufunc_fills = {} + + +class _DomainCheckInterval: + """ + Define a valid interval, so that : + + ``domain_check_interval(a,b)(x) == True`` where + ``x < a`` or ``x > b``. + + """ + + def __init__(self, a, b): + "domain_check_interval(a,b)(x) = true where x < a or y > b" + if a > b: + (a, b) = (b, a) + self.a = a + self.b = b + + def __call__(self, x): + "Execute the call behavior." + # nans at masked positions cause RuntimeWarnings, even though + # they are masked. To avoid this we suppress warnings. + with np.errstate(invalid='ignore'): + return umath.logical_or(umath.greater(x, self.b), + umath.less(x, self.a)) + + +class _DomainTan: + """ + Define a valid interval for the `tan` function, so that: + + ``domain_tan(eps) = True`` where ``abs(cos(x)) < eps`` + + """ + + def __init__(self, eps): + "domain_tan(eps) = true where abs(cos(x)) < eps)" + self.eps = eps + + def __call__(self, x): + "Executes the call behavior." + with np.errstate(invalid='ignore'): + return umath.less(umath.absolute(umath.cos(x)), self.eps) + + +class _DomainSafeDivide: + """ + Define a domain for safe division. + + """ + + def __init__(self, tolerance=None): + self.tolerance = tolerance + + def __call__(self, a, b): + # Delay the selection of the tolerance to here in order to reduce numpy + # import times. The calculation of these parameters is a substantial + # component of numpy's import time. + if self.tolerance is None: + self.tolerance = np.finfo(float).tiny + # don't call ma ufuncs from __array_wrap__ which would fail for scalars + a, b = np.asarray(a), np.asarray(b) + with np.errstate(all='ignore'): + return umath.absolute(a) * self.tolerance >= umath.absolute(b) + + +class _DomainGreater: + """ + DomainGreater(v)(x) is True where x <= v. + + """ + + def __init__(self, critical_value): + "DomainGreater(v)(x) = true where x <= v" + self.critical_value = critical_value + + def __call__(self, x): + "Executes the call behavior." + with np.errstate(invalid='ignore'): + return umath.less_equal(x, self.critical_value) + + +class _DomainGreaterEqual: + """ + DomainGreaterEqual(v)(x) is True where x < v. + + """ + + def __init__(self, critical_value): + "DomainGreaterEqual(v)(x) = true where x < v" + self.critical_value = critical_value + + def __call__(self, x): + "Executes the call behavior." + with np.errstate(invalid='ignore'): + return umath.less(x, self.critical_value) + + +class _MaskedUFunc: + def __init__(self, ufunc): + self.f = ufunc + self.__doc__ = ufunc.__doc__ + self.__name__ = ufunc.__name__ + self.__qualname__ = ufunc.__qualname__ + + def __str__(self): + return f"Masked version of {self.f}" + + +class _MaskedUnaryOperation(_MaskedUFunc): + """ + Defines masked version of unary operations, where invalid values are + pre-masked. + + Parameters + ---------- + mufunc : callable + The function for which to define a masked version. Made available + as ``_MaskedUnaryOperation.f``. + fill : scalar, optional + Filling value, default is 0. + domain : class instance + Domain for the function. Should be one of the ``_Domain*`` + classes. Default is None. + + """ + + def __init__(self, mufunc, fill=0, domain=None): + super().__init__(mufunc) + self.fill = fill + self.domain = domain + ufunc_domain[mufunc] = domain + ufunc_fills[mufunc] = fill + + def __call__(self, a, *args, **kwargs): + """ + Execute the call behavior. + + """ + d = getdata(a) + # Deal with domain + if self.domain is not None: + # Case 1.1. : Domained function + # nans at masked positions cause RuntimeWarnings, even though + # they are masked. To avoid this we suppress warnings. + with np.errstate(divide='ignore', invalid='ignore'): + result = self.f(d, *args, **kwargs) + # Make a mask + m = ~umath.isfinite(result) + m |= self.domain(d) + m |= getmask(a) + else: + # Case 1.2. : Function without a domain + # Get the result and the mask + with np.errstate(divide='ignore', invalid='ignore'): + result = self.f(d, *args, **kwargs) + m = getmask(a) + + if not result.ndim: + # Case 2.1. : The result is scalarscalar + if m: + return masked + return result + + if m is not nomask: + # Case 2.2. The result is an array + # We need to fill the invalid data back w/ the input Now, + # that's plain silly: in C, we would just skip the element and + # keep the original, but we do have to do it that way in Python + + # In case result has a lower dtype than the inputs (as in + # equal) + try: + np.copyto(result, d, where=m) + except TypeError: + pass + # Transform to + masked_result = result.view(get_masked_subclass(a)) + masked_result._mask = m + masked_result._update_from(a) + return masked_result + + +class _MaskedBinaryOperation(_MaskedUFunc): + """ + Define masked version of binary operations, where invalid + values are pre-masked. + + Parameters + ---------- + mbfunc : function + The function for which to define a masked version. Made available + as ``_MaskedBinaryOperation.f``. + domain : class instance + Default domain for the function. Should be one of the ``_Domain*`` + classes. Default is None. + fillx : scalar, optional + Filling value for the first argument, default is 0. + filly : scalar, optional + Filling value for the second argument, default is 0. + + """ + + def __init__(self, mbfunc, fillx=0, filly=0): + """ + abfunc(fillx, filly) must be defined. + + abfunc(x, filly) = x for all x to enable reduce. + + """ + super().__init__(mbfunc) + self.fillx = fillx + self.filly = filly + ufunc_domain[mbfunc] = None + ufunc_fills[mbfunc] = (fillx, filly) + + def __call__(self, a, b, *args, **kwargs): + """ + Execute the call behavior. + + """ + # Get the data, as ndarray + (da, db) = (getdata(a), getdata(b)) + # Get the result + with np.errstate(): + np.seterr(divide='ignore', invalid='ignore') + result = self.f(da, db, *args, **kwargs) + # Get the mask for the result + (ma, mb) = (getmask(a), getmask(b)) + if ma is nomask: + if mb is nomask: + m = nomask + else: + m = umath.logical_or(getmaskarray(a), mb) + elif mb is nomask: + m = umath.logical_or(ma, getmaskarray(b)) + else: + m = umath.logical_or(ma, mb) + + # Case 1. : scalar + if not result.ndim: + if m: + return masked + return result + + # Case 2. : array + # Revert result to da where masked + if m is not nomask and m.any(): + # any errors, just abort; impossible to guarantee masked values + try: + np.copyto(result, da, casting='unsafe', where=m) + except Exception: + pass + + # Transforms to a (subclass of) MaskedArray + masked_result = result.view(get_masked_subclass(a, b)) + masked_result._mask = m + if isinstance(a, MaskedArray): + masked_result._update_from(a) + elif isinstance(b, MaskedArray): + masked_result._update_from(b) + return masked_result + + def reduce(self, target, axis=0, dtype=None): + """ + Reduce `target` along the given `axis`. + + """ + tclass = get_masked_subclass(target) + m = getmask(target) + t = filled(target, self.filly) + if t.shape == (): + t = t.reshape(1) + if m is not nomask: + m = make_mask(m, copy=True) + m.shape = (1,) + + if m is nomask: + tr = self.f.reduce(t, axis) + mr = nomask + else: + tr = self.f.reduce(t, axis, dtype=dtype) + mr = umath.logical_and.reduce(m, axis) + + if not tr.shape: + if mr: + return masked + else: + return tr + masked_tr = tr.view(tclass) + masked_tr._mask = mr + return masked_tr + + def outer(self, a, b): + """ + Return the function applied to the outer product of a and b. + + """ + (da, db) = (getdata(a), getdata(b)) + d = self.f.outer(da, db) + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + m = nomask + else: + ma = getmaskarray(a) + mb = getmaskarray(b) + m = umath.logical_or.outer(ma, mb) + if (not m.ndim) and m: + return masked + if m is not nomask: + np.copyto(d, da, where=m) + if not d.shape: + return d + masked_d = d.view(get_masked_subclass(a, b)) + masked_d._mask = m + return masked_d + + def accumulate(self, target, axis=0): + """Accumulate `target` along `axis` after filling with y fill + value. + + """ + tclass = get_masked_subclass(target) + t = filled(target, self.filly) + result = self.f.accumulate(t, axis) + masked_result = result.view(tclass) + return masked_result + + +class _DomainedBinaryOperation(_MaskedUFunc): + """ + Define binary operations that have a domain, like divide. + + They have no reduce, outer or accumulate. + + Parameters + ---------- + mbfunc : function + The function for which to define a masked version. Made available + as ``_DomainedBinaryOperation.f``. + domain : class instance + Default domain for the function. Should be one of the ``_Domain*`` + classes. + fillx : scalar, optional + Filling value for the first argument, default is 0. + filly : scalar, optional + Filling value for the second argument, default is 0. + + """ + + def __init__(self, dbfunc, domain, fillx=0, filly=0): + """abfunc(fillx, filly) must be defined. + abfunc(x, filly) = x for all x to enable reduce. + """ + super().__init__(dbfunc) + self.domain = domain + self.fillx = fillx + self.filly = filly + ufunc_domain[dbfunc] = domain + ufunc_fills[dbfunc] = (fillx, filly) + + def __call__(self, a, b, *args, **kwargs): + "Execute the call behavior." + # Get the data + (da, db) = (getdata(a), getdata(b)) + # Get the result + with np.errstate(divide='ignore', invalid='ignore'): + result = self.f(da, db, *args, **kwargs) + # Get the mask as a combination of the source masks and invalid + m = ~umath.isfinite(result) + m |= getmask(a) + m |= getmask(b) + # Apply the domain + domain = ufunc_domain.get(self.f, None) + if domain is not None: + m |= domain(da, db) + # Take care of the scalar case first + if not m.ndim: + if m: + return masked + else: + return result + # When the mask is True, put back da if possible + # any errors, just abort; impossible to guarantee masked values + try: + np.copyto(result, 0, casting='unsafe', where=m) + # avoid using "*" since this may be overlaid + masked_da = umath.multiply(m, da) + # only add back if it can be cast safely + if np.can_cast(masked_da.dtype, result.dtype, casting='safe'): + result += masked_da + except Exception: + pass + + # Transforms to a (subclass of) MaskedArray + masked_result = result.view(get_masked_subclass(a, b)) + masked_result._mask = m + if isinstance(a, MaskedArray): + masked_result._update_from(a) + elif isinstance(b, MaskedArray): + masked_result._update_from(b) + return masked_result + + +# Unary ufuncs +exp = _MaskedUnaryOperation(umath.exp) +conjugate = _MaskedUnaryOperation(umath.conjugate) +sin = _MaskedUnaryOperation(umath.sin) +cos = _MaskedUnaryOperation(umath.cos) +arctan = _MaskedUnaryOperation(umath.arctan) +arcsinh = _MaskedUnaryOperation(umath.arcsinh) +sinh = _MaskedUnaryOperation(umath.sinh) +cosh = _MaskedUnaryOperation(umath.cosh) +tanh = _MaskedUnaryOperation(umath.tanh) +abs = absolute = _MaskedUnaryOperation(umath.absolute) +angle = _MaskedUnaryOperation(angle) +fabs = _MaskedUnaryOperation(umath.fabs) +negative = _MaskedUnaryOperation(umath.negative) +floor = _MaskedUnaryOperation(umath.floor) +ceil = _MaskedUnaryOperation(umath.ceil) +around = _MaskedUnaryOperation(np.around) +logical_not = _MaskedUnaryOperation(umath.logical_not) + +# Domained unary ufuncs +sqrt = _MaskedUnaryOperation(umath.sqrt, 0.0, + _DomainGreaterEqual(0.0)) +log = _MaskedUnaryOperation(umath.log, 1.0, + _DomainGreater(0.0)) +log2 = _MaskedUnaryOperation(umath.log2, 1.0, + _DomainGreater(0.0)) +log10 = _MaskedUnaryOperation(umath.log10, 1.0, + _DomainGreater(0.0)) +tan = _MaskedUnaryOperation(umath.tan, 0.0, + _DomainTan(1e-35)) +arcsin = _MaskedUnaryOperation(umath.arcsin, 0.0, + _DomainCheckInterval(-1.0, 1.0)) +arccos = _MaskedUnaryOperation(umath.arccos, 0.0, + _DomainCheckInterval(-1.0, 1.0)) +arccosh = _MaskedUnaryOperation(umath.arccosh, 1.0, + _DomainGreaterEqual(1.0)) +arctanh = _MaskedUnaryOperation(umath.arctanh, 0.0, + _DomainCheckInterval(-1.0 + 1e-15, 1.0 - 1e-15)) + +# Binary ufuncs +add = _MaskedBinaryOperation(umath.add) +subtract = _MaskedBinaryOperation(umath.subtract) +multiply = _MaskedBinaryOperation(umath.multiply, 1, 1) +arctan2 = _MaskedBinaryOperation(umath.arctan2, 0.0, 1.0) +equal = _MaskedBinaryOperation(umath.equal) +equal.reduce = None +not_equal = _MaskedBinaryOperation(umath.not_equal) +not_equal.reduce = None +less_equal = _MaskedBinaryOperation(umath.less_equal) +less_equal.reduce = None +greater_equal = _MaskedBinaryOperation(umath.greater_equal) +greater_equal.reduce = None +less = _MaskedBinaryOperation(umath.less) +less.reduce = None +greater = _MaskedBinaryOperation(umath.greater) +greater.reduce = None +logical_and = _MaskedBinaryOperation(umath.logical_and) +alltrue = _MaskedBinaryOperation(umath.logical_and, 1, 1).reduce +logical_or = _MaskedBinaryOperation(umath.logical_or) +sometrue = logical_or.reduce +logical_xor = _MaskedBinaryOperation(umath.logical_xor) +bitwise_and = _MaskedBinaryOperation(umath.bitwise_and) +bitwise_or = _MaskedBinaryOperation(umath.bitwise_or) +bitwise_xor = _MaskedBinaryOperation(umath.bitwise_xor) +hypot = _MaskedBinaryOperation(umath.hypot) + +# Domained binary ufuncs +divide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1) +true_divide = divide # Just an alias for divide. +floor_divide = _DomainedBinaryOperation(umath.floor_divide, + _DomainSafeDivide(), 0, 1) +remainder = _DomainedBinaryOperation(umath.remainder, + _DomainSafeDivide(), 0, 1) +fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1) +mod = remainder + +############################################################################### +# Mask creation functions # +############################################################################### + + +def _replace_dtype_fields_recursive(dtype, primitive_dtype): + "Private function allowing recursion in _replace_dtype_fields." + _recurse = _replace_dtype_fields_recursive + + # Do we have some name fields ? + if dtype.names is not None: + descr = [] + for name in dtype.names: + field = dtype.fields[name] + if len(field) == 3: + # Prepend the title to the name + name = (field[-1], name) + descr.append((name, _recurse(field[0], primitive_dtype))) + new_dtype = np.dtype(descr) + + # Is this some kind of composite a la (float,2) + elif dtype.subdtype: + descr = list(dtype.subdtype) + descr[0] = _recurse(dtype.subdtype[0], primitive_dtype) + new_dtype = np.dtype(tuple(descr)) + + # this is a primitive type, so do a direct replacement + else: + new_dtype = primitive_dtype + + # preserve identity of dtypes + if new_dtype == dtype: + new_dtype = dtype + + return new_dtype + + +def _replace_dtype_fields(dtype, primitive_dtype): + """ + Construct a dtype description list from a given dtype. + + Returns a new dtype object, with all fields and subtypes in the given type + recursively replaced with `primitive_dtype`. + + Arguments are coerced to dtypes first. + """ + dtype = np.dtype(dtype) + primitive_dtype = np.dtype(primitive_dtype) + return _replace_dtype_fields_recursive(dtype, primitive_dtype) + + +def make_mask_descr(ndtype): + """ + Construct a dtype description list from a given dtype. + + Returns a new dtype object, with the type of all fields in `ndtype` to a + boolean type. Field names are not altered. + + Parameters + ---------- + ndtype : dtype + The dtype to convert. + + Returns + ------- + result : dtype + A dtype that looks like `ndtype`, the type of all fields is boolean. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> dtype = np.dtype({'names':['foo', 'bar'], + ... 'formats':[np.float32, np.int64]}) + >>> dtype + dtype([('foo', '>> ma.make_mask_descr(dtype) + dtype([('foo', '|b1'), ('bar', '|b1')]) + >>> ma.make_mask_descr(np.float32) + dtype('bool') + + """ + return _replace_dtype_fields(ndtype, MaskType) + + +def getmask(a): + """ + Return the mask of a masked array, or nomask. + + Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the + mask is not `nomask`, else return `nomask`. To guarantee a full array + of booleans of the same shape as a, use `getmaskarray`. + + Parameters + ---------- + a : array_like + Input `MaskedArray` for which the mask is required. + + See Also + -------- + getdata : Return the data of a masked array as an ndarray. + getmaskarray : Return the mask of a masked array, or full array of False. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = ma.masked_equal([[1,2],[3,4]], 2) + >>> a + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=2) + >>> ma.getmask(a) + array([[False, True], + [False, False]]) + + Equivalently use the `MaskedArray` `mask` attribute. + + >>> a.mask + array([[False, True], + [False, False]]) + + Result when mask == `nomask` + + >>> b = ma.masked_array([[1,2],[3,4]]) + >>> b + masked_array( + data=[[1, 2], + [3, 4]], + mask=False, + fill_value=999999) + >>> ma.nomask + False + >>> ma.getmask(b) == ma.nomask + True + >>> b.mask == ma.nomask + True + + """ + return getattr(a, '_mask', nomask) + + +get_mask = getmask + + +def getmaskarray(arr): + """ + Return the mask of a masked array, or full boolean array of False. + + Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and + the mask is not `nomask`, else return a full boolean array of False of + the same shape as `arr`. + + Parameters + ---------- + arr : array_like + Input `MaskedArray` for which the mask is required. + + See Also + -------- + getmask : Return the mask of a masked array, or nomask. + getdata : Return the data of a masked array as an ndarray. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = ma.masked_equal([[1,2],[3,4]], 2) + >>> a + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=2) + >>> ma.getmaskarray(a) + array([[False, True], + [False, False]]) + + Result when mask == ``nomask`` + + >>> b = ma.masked_array([[1,2],[3,4]]) + >>> b + masked_array( + data=[[1, 2], + [3, 4]], + mask=False, + fill_value=999999) + >>> ma.getmaskarray(b) + array([[False, False], + [False, False]]) + + """ + mask = getmask(arr) + if mask is nomask: + mask = make_mask_none(np.shape(arr), getattr(arr, 'dtype', None)) + return mask + + +def is_mask(m): + """ + Return True if m is a valid, standard mask. + + This function does not check the contents of the input, only that the + type is MaskType. In particular, this function returns False if the + mask has a flexible dtype. + + Parameters + ---------- + m : array_like + Array to test. + + Returns + ------- + result : bool + True if `m.dtype.type` is MaskType, False otherwise. + + See Also + -------- + ma.isMaskedArray : Test whether input is an instance of MaskedArray. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> m = ma.masked_equal([0, 1, 0, 2, 3], 0) + >>> m + masked_array(data=[--, 1, --, 2, 3], + mask=[ True, False, True, False, False], + fill_value=0) + >>> ma.is_mask(m) + False + >>> ma.is_mask(m.mask) + True + + Input must be an ndarray (or have similar attributes) + for it to be considered a valid mask. + + >>> m = [False, True, False] + >>> ma.is_mask(m) + False + >>> m = np.array([False, True, False]) + >>> m + array([False, True, False]) + >>> ma.is_mask(m) + True + + Arrays with complex dtypes don't return True. + + >>> dtype = np.dtype({'names':['monty', 'pithon'], + ... 'formats':[bool, bool]}) + >>> dtype + dtype([('monty', '|b1'), ('pithon', '|b1')]) + >>> m = np.array([(True, False), (False, True), (True, False)], + ... dtype=dtype) + >>> m + array([( True, False), (False, True), ( True, False)], + dtype=[('monty', '?'), ('pithon', '?')]) + >>> ma.is_mask(m) + False + + """ + try: + return m.dtype.type is MaskType + except AttributeError: + return False + + +def _shrink_mask(m): + """ + Shrink a mask to nomask if possible + """ + if m.dtype.names is None and not m.any(): + return nomask + else: + return m + + +def make_mask(m, copy=False, shrink=True, dtype=MaskType): + """ + Create a boolean mask from an array. + + Return `m` as a boolean mask, creating a copy if necessary or requested. + The function can accept any sequence that is convertible to integers, + or ``nomask``. Does not require that contents must be 0s and 1s, values + of 0 are interpreted as False, everything else as True. + + Parameters + ---------- + m : array_like + Potential mask. + copy : bool, optional + Whether to return a copy of `m` (True) or `m` itself (False). + shrink : bool, optional + Whether to shrink `m` to ``nomask`` if all its values are False. + dtype : dtype, optional + Data-type of the output mask. By default, the output mask has a + dtype of MaskType (bool). If the dtype is flexible, each field has + a boolean dtype. This is ignored when `m` is ``nomask``, in which + case ``nomask`` is always returned. + + Returns + ------- + result : ndarray + A boolean mask derived from `m`. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> m = [True, False, True, True] + >>> ma.make_mask(m) + array([ True, False, True, True]) + >>> m = [1, 0, 1, 1] + >>> ma.make_mask(m) + array([ True, False, True, True]) + >>> m = [1, 0, 2, -3] + >>> ma.make_mask(m) + array([ True, False, True, True]) + + Effect of the `shrink` parameter. + + >>> m = np.zeros(4) + >>> m + array([0., 0., 0., 0.]) + >>> ma.make_mask(m) + False + >>> ma.make_mask(m, shrink=False) + array([False, False, False, False]) + + Using a flexible `dtype`. + + >>> m = [1, 0, 1, 1] + >>> n = [0, 1, 0, 0] + >>> arr = [] + >>> for man, mouse in zip(m, n): + ... arr.append((man, mouse)) + >>> arr + [(1, 0), (0, 1), (1, 0), (1, 0)] + >>> dtype = np.dtype({'names':['man', 'mouse'], + ... 'formats':[np.int64, np.int64]}) + >>> arr = np.array(arr, dtype=dtype) + >>> arr + array([(1, 0), (0, 1), (1, 0), (1, 0)], + dtype=[('man', '>> ma.make_mask(arr, dtype=dtype) + array([(True, False), (False, True), (True, False), (True, False)], + dtype=[('man', '|b1'), ('mouse', '|b1')]) + + """ + if m is nomask: + return nomask + + # Make sure the input dtype is valid. + dtype = make_mask_descr(dtype) + + # legacy boolean special case: "existence of fields implies true" + if isinstance(m, ndarray) and m.dtype.fields and dtype == np.bool: + return np.ones(m.shape, dtype=dtype) + + # Fill the mask in case there are missing data; turn it into an ndarray. + copy = None if not copy else True + result = np.array(filled(m, True), copy=copy, dtype=dtype, subok=True) + # Bas les masques ! + if shrink: + result = _shrink_mask(result) + return result + + +def make_mask_none(newshape, dtype=None): + """ + Return a boolean mask of the given shape, filled with False. + + This function returns a boolean ndarray with all entries False, that can + be used in common mask manipulations. If a complex dtype is specified, the + type of each field is converted to a boolean type. + + Parameters + ---------- + newshape : tuple + A tuple indicating the shape of the mask. + dtype : {None, dtype}, optional + If None, use a MaskType instance. Otherwise, use a new datatype with + the same fields as `dtype`, converted to boolean types. + + Returns + ------- + result : ndarray + An ndarray of appropriate shape and dtype, filled with False. + + See Also + -------- + make_mask : Create a boolean mask from an array. + make_mask_descr : Construct a dtype description list from a given dtype. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> ma.make_mask_none((3,)) + array([False, False, False]) + + Defining a more complex dtype. + + >>> dtype = np.dtype({'names':['foo', 'bar'], + ... 'formats':[np.float32, np.int64]}) + >>> dtype + dtype([('foo', '>> ma.make_mask_none((3,), dtype=dtype) + array([(False, False), (False, False), (False, False)], + dtype=[('foo', '|b1'), ('bar', '|b1')]) + + """ + if dtype is None: + result = np.zeros(newshape, dtype=MaskType) + else: + result = np.zeros(newshape, dtype=make_mask_descr(dtype)) + return result + + +def _recursive_mask_or(m1, m2, newmask): + names = m1.dtype.names + for name in names: + current1 = m1[name] + if current1.dtype.names is not None: + _recursive_mask_or(current1, m2[name], newmask[name]) + else: + umath.logical_or(current1, m2[name], newmask[name]) + + +def mask_or(m1, m2, copy=False, shrink=True): + """ + Combine two masks with the ``logical_or`` operator. + + The result may be a view on `m1` or `m2` if the other is `nomask` + (i.e. False). + + Parameters + ---------- + m1, m2 : array_like + Input masks. + copy : bool, optional + If copy is False and one of the inputs is `nomask`, return a view + of the other input mask. Defaults to False. + shrink : bool, optional + Whether to shrink the output to `nomask` if all its values are + False. Defaults to True. + + Returns + ------- + mask : output mask + The result masks values that are masked in either `m1` or `m2`. + + Raises + ------ + ValueError + If `m1` and `m2` have different flexible dtypes. + + Examples + -------- + >>> import numpy as np + >>> m1 = np.ma.make_mask([0, 1, 1, 0]) + >>> m2 = np.ma.make_mask([1, 0, 0, 0]) + >>> np.ma.mask_or(m1, m2) + array([ True, True, True, False]) + + """ + + if (m1 is nomask) or (m1 is False): + dtype = getattr(m2, 'dtype', MaskType) + return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype) + if (m2 is nomask) or (m2 is False): + dtype = getattr(m1, 'dtype', MaskType) + return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype) + if m1 is m2 and is_mask(m1): + return _shrink_mask(m1) if shrink else m1 + (dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None)) + if dtype1 != dtype2: + raise ValueError(f"Incompatible dtypes '{dtype1}'<>'{dtype2}'") + if dtype1.names is not None: + # Allocate an output mask array with the properly broadcast shape. + newmask = np.empty(np.broadcast(m1, m2).shape, dtype1) + _recursive_mask_or(m1, m2, newmask) + return newmask + return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink) + + +def flatten_mask(mask): + """ + Returns a completely flattened version of the mask, where nested fields + are collapsed. + + Parameters + ---------- + mask : array_like + Input array, which will be interpreted as booleans. + + Returns + ------- + flattened_mask : ndarray of bools + The flattened input. + + Examples + -------- + >>> import numpy as np + >>> mask = np.array([0, 0, 1]) + >>> np.ma.flatten_mask(mask) + array([False, False, True]) + + >>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) + >>> np.ma.flatten_mask(mask) + array([False, False, False, True]) + + >>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] + >>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype) + >>> np.ma.flatten_mask(mask) + array([False, False, False, False, False, True]) + + """ + + def _flatmask(mask): + "Flatten the mask and returns a (maybe nested) sequence of booleans." + mnames = mask.dtype.names + if mnames is not None: + return [flatten_mask(mask[name]) for name in mnames] + else: + return mask + + def _flatsequence(sequence): + "Generates a flattened version of the sequence." + try: + for element in sequence: + if hasattr(element, '__iter__'): + yield from _flatsequence(element) + else: + yield element + except TypeError: + yield sequence + + mask = np.asarray(mask) + flattened = _flatsequence(_flatmask(mask)) + return np.array(list(flattened), dtype=bool) + + +def _check_mask_axis(mask, axis, keepdims=np._NoValue): + "Check whether there are masked values along the given axis" + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + if mask is not nomask: + return mask.all(axis=axis, **kwargs) + return nomask + + +############################################################################### +# Masking functions # +############################################################################### + +def masked_where(condition, a, copy=True): + """ + Mask an array where a condition is met. + + Return `a` as an array masked where `condition` is True. + Any masked values of `a` or `condition` are also masked in the output. + + Parameters + ---------- + condition : array_like + Masking condition. When `condition` tests floating point values for + equality, consider using ``masked_values`` instead. + a : array_like + Array to mask. + copy : bool + If True (default) make a copy of `a` in the result. If False modify + `a` in place and return a view. + + Returns + ------- + result : MaskedArray + The result of masking `a` where `condition` is True. + + See Also + -------- + masked_values : Mask using floating point equality. + masked_equal : Mask where equal to a given value. + masked_not_equal : Mask where *not* equal to a given value. + masked_less_equal : Mask where less than or equal to a given value. + masked_greater_equal : Mask where greater than or equal to a given value. + masked_less : Mask where less than a given value. + masked_greater : Mask where greater than a given value. + masked_inside : Mask inside a given interval. + masked_outside : Mask outside a given interval. + masked_invalid : Mask invalid values (NaNs or infs). + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_where(a <= 2, a) + masked_array(data=[--, --, --, 3], + mask=[ True, True, True, False], + fill_value=999999) + + Mask array `b` conditional on `a`. + + >>> b = ['a', 'b', 'c', 'd'] + >>> ma.masked_where(a == 2, b) + masked_array(data=['a', 'b', --, 'd'], + mask=[False, False, True, False], + fill_value='N/A', + dtype='>> c = ma.masked_where(a <= 2, a) + >>> c + masked_array(data=[--, --, --, 3], + mask=[ True, True, True, False], + fill_value=999999) + >>> c[0] = 99 + >>> c + masked_array(data=[99, --, --, 3], + mask=[False, True, True, False], + fill_value=999999) + >>> a + array([0, 1, 2, 3]) + >>> c = ma.masked_where(a <= 2, a, copy=False) + >>> c[0] = 99 + >>> c + masked_array(data=[99, --, --, 3], + mask=[False, True, True, False], + fill_value=999999) + >>> a + array([99, 1, 2, 3]) + + When `condition` or `a` contain masked values. + + >>> a = np.arange(4) + >>> a = ma.masked_where(a == 2, a) + >>> a + masked_array(data=[0, 1, --, 3], + mask=[False, False, True, False], + fill_value=999999) + >>> b = np.arange(4) + >>> b = ma.masked_where(b == 0, b) + >>> b + masked_array(data=[--, 1, 2, 3], + mask=[ True, False, False, False], + fill_value=999999) + >>> ma.masked_where(a == 3, b) + masked_array(data=[--, 1, --, --], + mask=[ True, False, True, True], + fill_value=999999) + + """ + # Make sure that condition is a valid standard-type mask. + cond = make_mask(condition, shrink=False) + a = np.array(a, copy=copy, subok=True) + + (cshape, ashape) = (cond.shape, a.shape) + if cshape and cshape != ashape: + raise IndexError("Inconsistent shape between the condition and the input" + " (got %s and %s)" % (cshape, ashape)) + if hasattr(a, '_mask'): + cond = mask_or(cond, a._mask) + cls = type(a) + else: + cls = MaskedArray + result = a.view(cls) + # Assign to *.mask so that structured masks are handled correctly. + result.mask = _shrink_mask(cond) + # There is no view of a boolean so when 'a' is a MaskedArray with nomask + # the update to the result's mask has no effect. + if not copy and hasattr(a, '_mask') and getmask(a) is nomask: + a._mask = result._mask.view() + return result + + +def masked_greater(x, value, copy=True): + """ + Mask an array where greater than a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x > value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_greater(a, 2) + masked_array(data=[0, 1, 2, --], + mask=[False, False, False, True], + fill_value=999999) + + """ + return masked_where(greater(x, value), x, copy=copy) + + +def masked_greater_equal(x, value, copy=True): + """ + Mask an array where greater than or equal to a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x >= value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_greater_equal(a, 2) + masked_array(data=[0, 1, --, --], + mask=[False, False, True, True], + fill_value=999999) + + """ + return masked_where(greater_equal(x, value), x, copy=copy) + + +def masked_less(x, value, copy=True): + """ + Mask an array where less than a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x < value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_less(a, 2) + masked_array(data=[--, --, 2, 3], + mask=[ True, True, False, False], + fill_value=999999) + + """ + return masked_where(less(x, value), x, copy=copy) + + +def masked_less_equal(x, value, copy=True): + """ + Mask an array where less than or equal to a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x <= value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_less_equal(a, 2) + masked_array(data=[--, --, --, 3], + mask=[ True, True, True, False], + fill_value=999999) + + """ + return masked_where(less_equal(x, value), x, copy=copy) + + +def masked_not_equal(x, value, copy=True): + """ + Mask an array where *not* equal to a given value. + + This function is a shortcut to ``masked_where``, with + `condition` = (x != value). + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_not_equal(a, 2) + masked_array(data=[--, --, 2, --], + mask=[ True, True, False, True], + fill_value=999999) + + """ + return masked_where(not_equal(x, value), x, copy=copy) + + +def masked_equal(x, value, copy=True): + """ + Mask an array where equal to a given value. + + Return a MaskedArray, masked where the data in array `x` are + equal to `value`. The fill_value of the returned MaskedArray + is set to `value`. + + For floating point arrays, consider using ``masked_values(x, value)``. + + See Also + -------- + masked_where : Mask where a condition is met. + masked_values : Mask using floating point equality. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.arange(4) + >>> a + array([0, 1, 2, 3]) + >>> ma.masked_equal(a, 2) + masked_array(data=[0, 1, --, 3], + mask=[False, False, True, False], + fill_value=2) + + """ + output = masked_where(equal(x, value), x, copy=copy) + output.fill_value = value + return output + + +def masked_inside(x, v1, v2, copy=True): + """ + Mask an array inside a given interval. + + Shortcut to ``masked_where``, where `condition` is True for `x` inside + the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2` + can be given in either order. + + See Also + -------- + masked_where : Mask where a condition is met. + + Notes + ----- + The array `x` is prefilled with its filling value. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] + >>> ma.masked_inside(x, -0.3, 0.3) + masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1], + mask=[False, False, True, True, False, False], + fill_value=1e+20) + + The order of `v1` and `v2` doesn't matter. + + >>> ma.masked_inside(x, 0.3, -0.3) + masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1], + mask=[False, False, True, True, False, False], + fill_value=1e+20) + + """ + if v2 < v1: + (v1, v2) = (v2, v1) + xf = filled(x) + condition = (xf >= v1) & (xf <= v2) + return masked_where(condition, x, copy=copy) + + +def masked_outside(x, v1, v2, copy=True): + """ + Mask an array outside a given interval. + + Shortcut to ``masked_where``, where `condition` is True for `x` outside + the interval [v1,v2] (x < v1)|(x > v2). + The boundaries `v1` and `v2` can be given in either order. + + See Also + -------- + masked_where : Mask where a condition is met. + + Notes + ----- + The array `x` is prefilled with its filling value. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] + >>> ma.masked_outside(x, -0.3, 0.3) + masked_array(data=[--, --, 0.01, 0.2, --, --], + mask=[ True, True, False, False, True, True], + fill_value=1e+20) + + The order of `v1` and `v2` doesn't matter. + + >>> ma.masked_outside(x, 0.3, -0.3) + masked_array(data=[--, --, 0.01, 0.2, --, --], + mask=[ True, True, False, False, True, True], + fill_value=1e+20) + + """ + if v2 < v1: + (v1, v2) = (v2, v1) + xf = filled(x) + condition = (xf < v1) | (xf > v2) + return masked_where(condition, x, copy=copy) + + +def masked_object(x, value, copy=True, shrink=True): + """ + Mask the array `x` where the data are exactly equal to value. + + This function is similar to `masked_values`, but only suitable + for object arrays: for floating point, use `masked_values` instead. + + Parameters + ---------- + x : array_like + Array to mask + value : object + Comparison value + copy : {True, False}, optional + Whether to return a copy of `x`. + shrink : {True, False}, optional + Whether to collapse a mask full of False to nomask + + Returns + ------- + result : MaskedArray + The result of masking `x` where equal to `value`. + + See Also + -------- + masked_where : Mask where a condition is met. + masked_equal : Mask where equal to a given value (integers). + masked_values : Mask using floating point equality. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> food = np.array(['green_eggs', 'ham'], dtype=object) + >>> # don't eat spoiled food + >>> eat = ma.masked_object(food, 'green_eggs') + >>> eat + masked_array(data=[--, 'ham'], + mask=[ True, False], + fill_value='green_eggs', + dtype=object) + >>> # plain ol` ham is boring + >>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object) + >>> eat = ma.masked_object(fresh_food, 'green_eggs') + >>> eat + masked_array(data=['cheese', 'ham', 'pineapple'], + mask=False, + fill_value='green_eggs', + dtype=object) + + Note that `mask` is set to ``nomask`` if possible. + + >>> eat + masked_array(data=['cheese', 'ham', 'pineapple'], + mask=False, + fill_value='green_eggs', + dtype=object) + + """ + if isMaskedArray(x): + condition = umath.equal(x._data, value) + mask = x._mask + else: + condition = umath.equal(np.asarray(x), value) + mask = nomask + mask = mask_or(mask, make_mask(condition, shrink=shrink)) + return masked_array(x, mask=mask, copy=copy, fill_value=value) + + +def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True): + """ + Mask using floating point equality. + + Return a MaskedArray, masked where the data in array `x` are approximately + equal to `value`, determined using `isclose`. The default tolerances for + `masked_values` are the same as those for `isclose`. + + For integer types, exact equality is used, in the same way as + `masked_equal`. + + The fill_value is set to `value` and the mask is set to ``nomask`` if + possible. + + Parameters + ---------- + x : array_like + Array to mask. + value : float + Masking value. + rtol, atol : float, optional + Tolerance parameters passed on to `isclose` + copy : bool, optional + Whether to return a copy of `x`. + shrink : bool, optional + Whether to collapse a mask full of False to ``nomask``. + + Returns + ------- + result : MaskedArray + The result of masking `x` where approximately equal to `value`. + + See Also + -------- + masked_where : Mask where a condition is met. + masked_equal : Mask where equal to a given value (integers). + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = np.array([1, 1.1, 2, 1.1, 3]) + >>> ma.masked_values(x, 1.1) + masked_array(data=[1.0, --, 2.0, --, 3.0], + mask=[False, True, False, True, False], + fill_value=1.1) + + Note that `mask` is set to ``nomask`` if possible. + + >>> ma.masked_values(x, 2.1) + masked_array(data=[1. , 1.1, 2. , 1.1, 3. ], + mask=False, + fill_value=2.1) + + Unlike `masked_equal`, `masked_values` can perform approximate equalities. + + >>> ma.masked_values(x, 2.1, atol=1e-1) + masked_array(data=[1.0, 1.1, --, 1.1, 3.0], + mask=[False, False, True, False, False], + fill_value=2.1) + + """ + xnew = filled(x, value) + if np.issubdtype(xnew.dtype, np.floating): + mask = np.isclose(xnew, value, atol=atol, rtol=rtol) + else: + mask = umath.equal(xnew, value) + ret = masked_array(xnew, mask=mask, copy=copy, fill_value=value) + if shrink: + ret.shrink_mask() + return ret + + +def masked_invalid(a, copy=True): + """ + Mask an array where invalid values occur (NaNs or infs). + + This function is a shortcut to ``masked_where``, with + `condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved. + Only applies to arrays with a dtype where NaNs or infs make sense + (i.e. floating point types), but accepts any array_like object. + + See Also + -------- + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.arange(5, dtype=float) + >>> a[2] = np.nan + >>> a[3] = np.inf + >>> a + array([ 0., 1., nan, inf, 4.]) + >>> ma.masked_invalid(a) + masked_array(data=[0.0, 1.0, --, --, 4.0], + mask=[False, False, True, True, False], + fill_value=1e+20) + + """ + a = np.array(a, copy=None, subok=True) + res = masked_where(~(np.isfinite(a)), a, copy=copy) + # masked_invalid previously never returned nomask as a mask and doing so + # threw off matplotlib (gh-22842). So use shrink=False: + if res._mask is nomask: + res._mask = make_mask_none(res.shape, res.dtype) + return res + +############################################################################### +# Printing options # +############################################################################### + + +class _MaskedPrintOption: + """ + Handle the string used to represent missing data in a masked array. + + """ + + def __init__(self, display): + """ + Create the masked_print_option object. + + """ + self._display = display + self._enabled = True + + def display(self): + """ + Display the string to print for masked values. + + """ + return self._display + + def set_display(self, s): + """ + Set the string to print for masked values. + + """ + self._display = s + + def enabled(self): + """ + Is the use of the display value enabled? + + """ + return self._enabled + + def enable(self, shrink=1): + """ + Set the enabling shrink to `shrink`. + + """ + self._enabled = shrink + + def __str__(self): + return str(self._display) + + __repr__ = __str__ + + +# if you single index into a masked location you get this object. +masked_print_option = _MaskedPrintOption('--') + + +def _recursive_printoption(result, mask, printopt): + """ + Puts printoptions in result where mask is True. + + Private function allowing for recursion + + """ + names = result.dtype.names + if names is not None: + for name in names: + curdata = result[name] + curmask = mask[name] + _recursive_printoption(curdata, curmask, printopt) + else: + np.copyto(result, printopt, where=mask) + + +# For better or worse, these end in a newline +_legacy_print_templates = { + 'long_std': textwrap.dedent("""\ + masked_%(name)s(data = + %(data)s, + %(nlen)s mask = + %(mask)s, + %(nlen)s fill_value = %(fill)s) + """), + 'long_flx': textwrap.dedent("""\ + masked_%(name)s(data = + %(data)s, + %(nlen)s mask = + %(mask)s, + %(nlen)s fill_value = %(fill)s, + %(nlen)s dtype = %(dtype)s) + """), + 'short_std': textwrap.dedent("""\ + masked_%(name)s(data = %(data)s, + %(nlen)s mask = %(mask)s, + %(nlen)s fill_value = %(fill)s) + """), + 'short_flx': textwrap.dedent("""\ + masked_%(name)s(data = %(data)s, + %(nlen)s mask = %(mask)s, + %(nlen)s fill_value = %(fill)s, + %(nlen)s dtype = %(dtype)s) + """) +} + +############################################################################### +# MaskedArray class # +############################################################################### + + +def _recursive_filled(a, mask, fill_value): + """ + Recursively fill `a` with `fill_value`. + + """ + names = a.dtype.names + for name in names: + current = a[name] + if current.dtype.names is not None: + _recursive_filled(current, mask[name], fill_value[name]) + else: + np.copyto(current, fill_value[name], where=mask[name]) + + +def flatten_structured_array(a): + """ + Flatten a structured array. + + The data type of the output is chosen such that it can represent all of the + (nested) fields. + + Parameters + ---------- + a : structured array + + Returns + ------- + output : masked array or ndarray + A flattened masked array if the input is a masked array, otherwise a + standard ndarray. + + Examples + -------- + >>> import numpy as np + >>> ndtype = [('a', int), ('b', float)] + >>> a = np.array([(1, 1), (2, 2)], dtype=ndtype) + >>> np.ma.flatten_structured_array(a) + array([[1., 1.], + [2., 2.]]) + + """ + + def flatten_sequence(iterable): + """ + Flattens a compound of nested iterables. + + """ + for elm in iter(iterable): + if hasattr(elm, '__iter__'): + yield from flatten_sequence(elm) + else: + yield elm + + a = np.asanyarray(a) + inishape = a.shape + a = a.ravel() + if isinstance(a, MaskedArray): + out = np.array([tuple(flatten_sequence(d.item())) for d in a._data]) + out = out.view(MaskedArray) + out._mask = np.array([tuple(flatten_sequence(d.item())) + for d in getmaskarray(a)]) + else: + out = np.array([tuple(flatten_sequence(d.item())) for d in a]) + if len(inishape) > 1: + newshape = list(out.shape) + newshape[0] = inishape + out.shape = tuple(flatten_sequence(newshape)) + return out + + +def _arraymethod(funcname, onmask=True): + """ + Return a class method wrapper around a basic array method. + + Creates a class method which returns a masked array, where the new + ``_data`` array is the output of the corresponding basic method called + on the original ``_data``. + + If `onmask` is True, the new mask is the output of the method called + on the initial mask. Otherwise, the new mask is just a reference + to the initial mask. + + Parameters + ---------- + funcname : str + Name of the function to apply on data. + onmask : bool + Whether the mask must be processed also (True) or left + alone (False). Default is True. Make available as `_onmask` + attribute. + + Returns + ------- + method : instancemethod + Class method wrapper of the specified basic array method. + + """ + def wrapped_method(self, *args, **params): + result = getattr(self._data, funcname)(*args, **params) + result = result.view(type(self)) + result._update_from(self) + mask = self._mask + if not onmask: + result.__setmask__(mask) + elif mask is not nomask: + # __setmask__ makes a copy, which we don't want + result._mask = getattr(mask, funcname)(*args, **params) + return result + methdoc = getattr(ndarray, funcname, None) or getattr(np, funcname, None) + if methdoc is not None: + wrapped_method.__doc__ = methdoc.__doc__ + wrapped_method.__name__ = funcname + return wrapped_method + + +class MaskedIterator: + """ + Flat iterator object to iterate over masked arrays. + + A `MaskedIterator` iterator is returned by ``x.flat`` for any masked array + `x`. It allows iterating over the array as if it were a 1-D array, + either in a for-loop or by calling its `next` method. + + Iteration is done in C-contiguous style, with the last index varying the + fastest. The iterator can also be indexed using basic slicing or + advanced indexing. + + See Also + -------- + MaskedArray.flat : Return a flat iterator over an array. + MaskedArray.flatten : Returns a flattened copy of an array. + + Notes + ----- + `MaskedIterator` is not exported by the `ma` module. Instead of + instantiating a `MaskedIterator` directly, use `MaskedArray.flat`. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array(arange(6).reshape(2, 3)) + >>> fl = x.flat + >>> type(fl) + + >>> for item in fl: + ... print(item) + ... + 0 + 1 + 2 + 3 + 4 + 5 + + Extracting more than a single element b indexing the `MaskedIterator` + returns a masked array: + + >>> fl[2:4] + masked_array(data = [2 3], + mask = False, + fill_value = 999999) + + """ + + def __init__(self, ma): + self.ma = ma + self.dataiter = ma._data.flat + + if ma._mask is nomask: + self.maskiter = None + else: + self.maskiter = ma._mask.flat + + def __iter__(self): + return self + + def __getitem__(self, indx): + result = self.dataiter.__getitem__(indx).view(type(self.ma)) + if self.maskiter is not None: + _mask = self.maskiter.__getitem__(indx) + if isinstance(_mask, ndarray): + # set shape to match that of data; this is needed for matrices + _mask.shape = result.shape + result._mask = _mask + elif isinstance(_mask, np.void): + return mvoid(result, mask=_mask, hardmask=self.ma._hardmask) + elif _mask: # Just a scalar, masked + return masked + return result + + # This won't work if ravel makes a copy + def __setitem__(self, index, value): + self.dataiter[index] = getdata(value) + if self.maskiter is not None: + self.maskiter[index] = getmaskarray(value) + + def __next__(self): + """ + Return the next value, or raise StopIteration. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([3, 2], mask=[0, 1]) + >>> fl = x.flat + >>> next(fl) + 3 + >>> next(fl) + masked + >>> next(fl) + Traceback (most recent call last): + ... + StopIteration + + """ + d = next(self.dataiter) + if self.maskiter is not None: + m = next(self.maskiter) + if isinstance(m, np.void): + return mvoid(d, mask=m, hardmask=self.ma._hardmask) + elif m: # Just a scalar, masked + return masked + return d + + +@set_module("numpy.ma") +class MaskedArray(ndarray): + """ + An array class with possibly masked values. + + Masked values of True exclude the corresponding element from any + computation. + + Construction:: + + x = MaskedArray(data, mask=nomask, dtype=None, copy=False, subok=True, + ndmin=0, fill_value=None, keep_mask=True, hard_mask=None, + shrink=True, order=None) + + Parameters + ---------- + data : array_like + Input data. + mask : sequence, optional + Mask. Must be convertible to an array of booleans with the same + shape as `data`. True indicates a masked (i.e. invalid) data. + dtype : dtype, optional + Data type of the output. + If `dtype` is None, the type of the data argument (``data.dtype``) + is used. If `dtype` is not None and different from ``data.dtype``, + a copy is performed. + copy : bool, optional + Whether to copy the input data (True), or to use a reference instead. + Default is False. + subok : bool, optional + Whether to return a subclass of `MaskedArray` if possible (True) or a + plain `MaskedArray`. Default is True. + ndmin : int, optional + Minimum number of dimensions. Default is 0. + fill_value : scalar, optional + Value used to fill in the masked values when necessary. + If None, a default based on the data-type is used. + keep_mask : bool, optional + Whether to combine `mask` with the mask of the input data, if any + (True), or to use only `mask` for the output (False). Default is True. + hard_mask : bool, optional + Whether to use a hard mask or not. With a hard mask, masked values + cannot be unmasked. Default is False. + shrink : bool, optional + Whether to force compression of an empty mask. Default is True. + order : {'C', 'F', 'A'}, optional + Specify the order of the array. If order is 'C', then the array + will be in C-contiguous order (last-index varies the fastest). + If order is 'F', then the returned array will be in + Fortran-contiguous order (first-index varies the fastest). + If order is 'A' (default), then the returned array may be + in any order (either C-, Fortran-contiguous, or even discontiguous), + unless a copy is required, in which case it will be C-contiguous. + + Examples + -------- + >>> import numpy as np + + The ``mask`` can be initialized with an array of boolean values + with the same shape as ``data``. + + >>> data = np.arange(6).reshape((2, 3)) + >>> np.ma.MaskedArray(data, mask=[[False, True, False], + ... [False, False, True]]) + masked_array( + data=[[0, --, 2], + [3, 4, --]], + mask=[[False, True, False], + [False, False, True]], + fill_value=999999) + + Alternatively, the ``mask`` can be initialized to homogeneous boolean + array with the same shape as ``data`` by passing in a scalar + boolean value: + + >>> np.ma.MaskedArray(data, mask=False) + masked_array( + data=[[0, 1, 2], + [3, 4, 5]], + mask=[[False, False, False], + [False, False, False]], + fill_value=999999) + + >>> np.ma.MaskedArray(data, mask=True) + masked_array( + data=[[--, --, --], + [--, --, --]], + mask=[[ True, True, True], + [ True, True, True]], + fill_value=999999, + dtype=int64) + + .. note:: + The recommended practice for initializing ``mask`` with a scalar + boolean value is to use ``True``/``False`` rather than + ``np.True_``/``np.False_``. The reason is :attr:`nomask` + is represented internally as ``np.False_``. + + >>> np.False_ is np.ma.nomask + True + + """ + + __array_priority__ = 15 + _defaultmask = nomask + _defaulthardmask = False + _baseclass = ndarray + + # Maximum number of elements per axis used when printing an array. The + # 1d case is handled separately because we need more values in this case. + _print_width = 100 + _print_width_1d = 1500 + + def __new__(cls, data=None, mask=nomask, dtype=None, copy=False, + subok=True, ndmin=0, fill_value=None, keep_mask=True, + hard_mask=None, shrink=True, order=None): + """ + Create a new masked array from scratch. + + Notes + ----- + A masked array can also be created by taking a .view(MaskedArray). + + """ + # Process data. + copy = None if not copy else True + _data = np.array(data, dtype=dtype, copy=copy, + order=order, subok=True, ndmin=ndmin) + _baseclass = getattr(data, '_baseclass', type(_data)) + # Check that we're not erasing the mask. + if isinstance(data, MaskedArray) and (data.shape != _data.shape): + copy = True + + # Here, we copy the _view_, so that we can attach new properties to it + # we must never do .view(MaskedConstant), as that would create a new + # instance of np.ma.masked, which make identity comparison fail + if isinstance(data, cls) and subok and not isinstance(data, MaskedConstant): + _data = ndarray.view(_data, type(data)) + else: + _data = ndarray.view(_data, cls) + + # Handle the case where data is not a subclass of ndarray, but + # still has the _mask attribute like MaskedArrays + if hasattr(data, '_mask') and not isinstance(data, ndarray): + _data._mask = data._mask + # FIXME: should we set `_data._sharedmask = True`? + # Process mask. + # Type of the mask + mdtype = make_mask_descr(_data.dtype) + if mask is nomask: + # Case 1. : no mask in input. + # Erase the current mask ? + if not keep_mask: + # With a reduced version + if shrink: + _data._mask = nomask + # With full version + else: + _data._mask = np.zeros(_data.shape, dtype=mdtype) + # Check whether we missed something + elif isinstance(data, (tuple, list)): + try: + # If data is a sequence of masked array + mask = np.array( + [getmaskarray(np.asanyarray(m, dtype=_data.dtype)) + for m in data], dtype=mdtype) + except (ValueError, TypeError): + # If data is nested + mask = nomask + # Force shrinking of the mask if needed (and possible) + if (mdtype == MaskType) and mask.any(): + _data._mask = mask + _data._sharedmask = False + else: + _data._sharedmask = not copy + if copy: + _data._mask = _data._mask.copy() + # Reset the shape of the original mask + if getmask(data) is not nomask: + # gh-21022 encounters an issue here + # because data._mask.shape is not writeable, but + # the op was also pointless in that case, because + # the shapes were the same, so we can at least + # avoid that path + if data._mask.shape != data.shape: + data._mask.shape = data.shape + else: + # Case 2. : With a mask in input. + # If mask is boolean, create an array of True or False + + # if users pass `mask=None` be forgiving here and cast it False + # for speed; although the default is `mask=nomask` and can differ. + if mask is None: + mask = False + + if mask is True and mdtype == MaskType: + mask = np.ones(_data.shape, dtype=mdtype) + elif mask is False and mdtype == MaskType: + mask = np.zeros(_data.shape, dtype=mdtype) + else: + # Read the mask with the current mdtype + try: + mask = np.array(mask, copy=copy, dtype=mdtype) + # Or assume it's a sequence of bool/int + except TypeError: + mask = np.array([tuple([m] * len(mdtype)) for m in mask], + dtype=mdtype) + # Make sure the mask and the data have the same shape + if mask.shape != _data.shape: + (nd, nm) = (_data.size, mask.size) + if nm == 1: + mask = np.resize(mask, _data.shape) + elif nm == nd: + mask = np.reshape(mask, _data.shape) + else: + msg = (f"Mask and data not compatible:" + f" data size is {nd}, mask size is {nm}.") + raise MaskError(msg) + copy = True + # Set the mask to the new value + if _data._mask is nomask: + _data._mask = mask + _data._sharedmask = not copy + elif not keep_mask: + _data._mask = mask + _data._sharedmask = not copy + else: + if _data.dtype.names is not None: + def _recursive_or(a, b): + "do a|=b on each field of a, recursively" + for name in a.dtype.names: + (af, bf) = (a[name], b[name]) + if af.dtype.names is not None: + _recursive_or(af, bf) + else: + af |= bf + + _recursive_or(_data._mask, mask) + else: + _data._mask = np.logical_or(mask, _data._mask) + _data._sharedmask = False + + # Update fill_value. + if fill_value is None: + fill_value = getattr(data, '_fill_value', None) + # But don't run the check unless we have something to check. + if fill_value is not None: + _data._fill_value = _check_fill_value(fill_value, _data.dtype) + # Process extra options .. + if hard_mask is None: + _data._hardmask = getattr(data, '_hardmask', False) + else: + _data._hardmask = hard_mask + _data._baseclass = _baseclass + return _data + + def _update_from(self, obj): + """ + Copies some attributes of obj to self. + + """ + if isinstance(obj, ndarray): + _baseclass = type(obj) + else: + _baseclass = ndarray + # We need to copy the _basedict to avoid backward propagation + _optinfo = {} + _optinfo.update(getattr(obj, '_optinfo', {})) + _optinfo.update(getattr(obj, '_basedict', {})) + if not isinstance(obj, MaskedArray): + _optinfo.update(getattr(obj, '__dict__', {})) + _dict = {'_fill_value': getattr(obj, '_fill_value', None), + '_hardmask': getattr(obj, '_hardmask', False), + '_sharedmask': getattr(obj, '_sharedmask', False), + '_isfield': getattr(obj, '_isfield', False), + '_baseclass': getattr(obj, '_baseclass', _baseclass), + '_optinfo': _optinfo, + '_basedict': _optinfo} + self.__dict__.update(_dict) + self.__dict__.update(_optinfo) + + def __array_finalize__(self, obj): + """ + Finalizes the masked array. + + """ + # Get main attributes. + self._update_from(obj) + + # We have to decide how to initialize self.mask, based on + # obj.mask. This is very difficult. There might be some + # correspondence between the elements in the array we are being + # created from (= obj) and us. Or there might not. This method can + # be called in all kinds of places for all kinds of reasons -- could + # be empty_like, could be slicing, could be a ufunc, could be a view. + # The numpy subclassing interface simply doesn't give us any way + # to know, which means that at best this method will be based on + # guesswork and heuristics. To make things worse, there isn't even any + # clear consensus about what the desired behavior is. For instance, + # most users think that np.empty_like(marr) -- which goes via this + # method -- should return a masked array with an empty mask (see + # gh-3404 and linked discussions), but others disagree, and they have + # existing code which depends on empty_like returning an array that + # matches the input mask. + # + # Historically our algorithm was: if the template object mask had the + # same *number of elements* as us, then we used *it's mask object + # itself* as our mask, so that writes to us would also write to the + # original array. This is horribly broken in multiple ways. + # + # Now what we do instead is, if the template object mask has the same + # number of elements as us, and we do not have the same base pointer + # as the template object (b/c views like arr[...] should keep the same + # mask), then we make a copy of the template object mask and use + # that. This is also horribly broken but somewhat less so. Maybe. + if isinstance(obj, ndarray): + # XX: This looks like a bug -- shouldn't it check self.dtype + # instead? + if obj.dtype.names is not None: + _mask = getmaskarray(obj) + else: + _mask = getmask(obj) + + # If self and obj point to exactly the same data, then probably + # self is a simple view of obj (e.g., self = obj[...]), so they + # should share the same mask. (This isn't 100% reliable, e.g. self + # could be the first row of obj, or have strange strides, but as a + # heuristic it's not bad.) In all other cases, we make a copy of + # the mask, so that future modifications to 'self' do not end up + # side-effecting 'obj' as well. + if (_mask is not nomask and obj.__array_interface__["data"][0] + != self.__array_interface__["data"][0]): + # We should make a copy. But we could get here via astype, + # in which case the mask might need a new dtype as well + # (e.g., changing to or from a structured dtype), and the + # order could have changed. So, change the mask type if + # needed and use astype instead of copy. + if self.dtype == obj.dtype: + _mask_dtype = _mask.dtype + else: + _mask_dtype = make_mask_descr(self.dtype) + + if self.flags.c_contiguous: + order = "C" + elif self.flags.f_contiguous: + order = "F" + else: + order = "K" + + _mask = _mask.astype(_mask_dtype, order) + else: + # Take a view so shape changes, etc., do not propagate back. + _mask = _mask.view() + else: + _mask = nomask + + self._mask = _mask + # Finalize the mask + if self._mask is not nomask: + try: + self._mask.shape = self.shape + except ValueError: + self._mask = nomask + except (TypeError, AttributeError): + # When _mask.shape is not writable (because it's a void) + pass + + # Finalize the fill_value + if self._fill_value is not None: + self._fill_value = _check_fill_value(self._fill_value, self.dtype) + elif self.dtype.names is not None: + # Finalize the default fill_value for structured arrays + self._fill_value = _check_fill_value(None, self.dtype) + + def __array_wrap__(self, obj, context=None, return_scalar=False): + """ + Special hook for ufuncs. + + Wraps the numpy array and sets the mask according to context. + + """ + if obj is self: # for in-place operations + result = obj + else: + result = obj.view(type(self)) + result._update_from(self) + + if context is not None: + result._mask = result._mask.copy() + func, args, out_i = context + # args sometimes contains outputs (gh-10459), which we don't want + input_args = args[:func.nin] + m = functools.reduce(mask_or, [getmaskarray(arg) for arg in input_args]) + # Get the domain mask + domain = ufunc_domain.get(func) + if domain is not None: + # Take the domain, and make sure it's a ndarray + with np.errstate(divide='ignore', invalid='ignore'): + # The result may be masked for two (unary) domains. + # That can't really be right as some domains drop + # the mask and some don't behaving differently here. + d = domain(*input_args).astype(bool, copy=False) + d = filled(d, True) + + if d.any(): + # Fill the result where the domain is wrong + try: + # Binary domain: take the last value + fill_value = ufunc_fills[func][-1] + except TypeError: + # Unary domain: just use this one + fill_value = ufunc_fills[func] + except KeyError: + # Domain not recognized, use fill_value instead + fill_value = self.fill_value + + np.copyto(result, fill_value, where=d) + + # Update the mask + if m is nomask: + m = d + else: + # Don't modify inplace, we risk back-propagation + m = (m | d) + + # Make sure the mask has the proper size + if result is not self and result.shape == () and m: + return masked + else: + result._mask = m + result._sharedmask = False + + return result + + def view(self, dtype=None, type=None, fill_value=None): + """ + Return a view of the MaskedArray data. + + Parameters + ---------- + dtype : data-type or ndarray sub-class, optional + Data-type descriptor of the returned view, e.g., float32 or int16. + The default, None, results in the view having the same data-type + as `a`. As with ``ndarray.view``, dtype can also be specified as + an ndarray sub-class, which then specifies the type of the + returned object (this is equivalent to setting the ``type`` + parameter). + type : Python type, optional + Type of the returned view, either ndarray or a subclass. The + default None results in type preservation. + fill_value : scalar, optional + The value to use for invalid entries (None by default). + If None, then this argument is inferred from the passed `dtype`, or + in its absence the original array, as discussed in the notes below. + + See Also + -------- + numpy.ndarray.view : Equivalent method on ndarray object. + + Notes + ----- + + ``a.view()`` is used two different ways: + + ``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view + of the array's memory with a different data-type. This can cause a + reinterpretation of the bytes of memory. + + ``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just + returns an instance of `ndarray_subclass` that looks at the same array + (same shape, dtype, etc.) This does not cause a reinterpretation of the + memory. + + If `fill_value` is not specified, but `dtype` is specified (and is not + an ndarray sub-class), the `fill_value` of the MaskedArray will be + reset. If neither `fill_value` nor `dtype` are specified (or if + `dtype` is an ndarray sub-class), then the fill value is preserved. + Finally, if `fill_value` is specified, but `dtype` is not, the fill + value is set to the specified value. + + For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of + bytes per entry than the previous dtype (for example, converting a + regular array to a structured array), then the behavior of the view + cannot be predicted just from the superficial appearance of ``a`` (shown + by ``print(a)``). It also depends on exactly how ``a`` is stored in + memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus + defined as a slice or transpose, etc., the view may give different + results. + """ + + if dtype is None: + if type is None: + output = ndarray.view(self) + else: + output = ndarray.view(self, type) + elif type is None: + try: + if issubclass(dtype, ndarray): + output = ndarray.view(self, dtype) + dtype = None + else: + output = ndarray.view(self, dtype) + except TypeError: + output = ndarray.view(self, dtype) + else: + output = ndarray.view(self, dtype, type) + + # also make the mask be a view (so attr changes to the view's + # mask do no affect original object's mask) + # (especially important to avoid affecting np.masked singleton) + if getmask(output) is not nomask: + output._mask = output._mask.view() + + # Make sure to reset the _fill_value if needed + if getattr(output, '_fill_value', None) is not None: + if fill_value is None: + if dtype is None: + pass # leave _fill_value as is + else: + output._fill_value = None + else: + output.fill_value = fill_value + return output + + def __getitem__(self, indx): + """ + x.__getitem__(y) <==> x[y] + + Return the item described by i, as a masked array. + + """ + # We could directly use ndarray.__getitem__ on self. + # But then we would have to modify __array_finalize__ to prevent the + # mask of being reshaped if it hasn't been set up properly yet + # So it's easier to stick to the current version + dout = self.data[indx] + _mask = self._mask + + def _is_scalar(m): + return not isinstance(m, np.ndarray) + + def _scalar_heuristic(arr, elem): + """ + Return whether `elem` is a scalar result of indexing `arr`, or None + if undecidable without promoting nomask to a full mask + """ + # obviously a scalar + if not isinstance(elem, np.ndarray): + return True + + # object array scalar indexing can return anything + elif arr.dtype.type is np.object_: + if arr.dtype is not elem.dtype: + # elem is an array, but dtypes do not match, so must be + # an element + return True + + # well-behaved subclass that only returns 0d arrays when + # expected - this is not a scalar + elif type(arr).__getitem__ == ndarray.__getitem__: + return False + + return None + + if _mask is not nomask: + # _mask cannot be a subclass, so it tells us whether we should + # expect a scalar. It also cannot be of dtype object. + mout = _mask[indx] + scalar_expected = _is_scalar(mout) + + else: + # attempt to apply the heuristic to avoid constructing a full mask + mout = nomask + scalar_expected = _scalar_heuristic(self.data, dout) + if scalar_expected is None: + # heuristics have failed + # construct a full array, so we can be certain. This is costly. + # we could also fall back on ndarray.__getitem__(self.data, indx) + scalar_expected = _is_scalar(getmaskarray(self)[indx]) + + # Did we extract a single item? + if scalar_expected: + # A record + if isinstance(dout, np.void): + # We should always re-cast to mvoid, otherwise users can + # change masks on rows that already have masked values, but not + # on rows that have no masked values, which is inconsistent. + return mvoid(dout, mask=mout, hardmask=self._hardmask) + + # special case introduced in gh-5962 + elif (self.dtype.type is np.object_ and + isinstance(dout, np.ndarray) and + dout is not masked): + # If masked, turn into a MaskedArray, with everything masked. + if mout: + return MaskedArray(dout, mask=True) + else: + return dout + + # Just a scalar + elif mout: + return masked + else: + return dout + else: + # Force dout to MA + dout = dout.view(type(self)) + # Inherit attributes from self + dout._update_from(self) + # Check the fill_value + if is_string_or_list_of_strings(indx): + if self._fill_value is not None: + dout._fill_value = self._fill_value[indx] + + # Something like gh-15895 has happened if this check fails. + # _fill_value should always be an ndarray. + if not isinstance(dout._fill_value, np.ndarray): + raise RuntimeError('Internal NumPy error.') + # If we're indexing a multidimensional field in a + # structured array (such as dtype("(2,)i2,(2,)i1")), + # dimensionality goes up (M[field].ndim == M.ndim + + # M.dtype[field].ndim). That's fine for + # M[field] but problematic for M[field].fill_value + # which should have shape () to avoid breaking several + # methods. There is no great way out, so set to + # first element. See issue #6723. + if dout._fill_value.ndim > 0: + if not (dout._fill_value == + dout._fill_value.flat[0]).all(): + warnings.warn( + "Upon accessing multidimensional field " + f"{indx!s}, need to keep dimensionality " + "of fill_value at 0. Discarding " + "heterogeneous fill_value and setting " + f"all to {dout._fill_value[0]!s}.", + stacklevel=2) + # Need to use `.flat[0:1].squeeze(...)` instead of just + # `.flat[0]` to ensure the result is a 0d array and not + # a scalar. + dout._fill_value = dout._fill_value.flat[0:1].squeeze(axis=0) + dout._isfield = True + # Update the mask if needed + if mout is not nomask: + # set shape to match that of data; this is needed for matrices + dout._mask = reshape(mout, dout.shape) + dout._sharedmask = True + # Note: Don't try to check for m.any(), that'll take too long + return dout + + # setitem may put NaNs into integer arrays or occasionally overflow a + # float. But this may happen in masked values, so avoid otherwise + # correct warnings (as is typical also in masked calculations). + @np.errstate(over='ignore', invalid='ignore') + def __setitem__(self, indx, value): + """ + x.__setitem__(i, y) <==> x[i]=y + + Set item described by index. If value is masked, masks those + locations. + + """ + if self is masked: + raise MaskError('Cannot alter the masked element.') + _data = self._data + _mask = self._mask + if isinstance(indx, str): + _data[indx] = value + if _mask is nomask: + self._mask = _mask = make_mask_none(self.shape, self.dtype) + _mask[indx] = getmask(value) + return + + _dtype = _data.dtype + + if value is masked: + # The mask wasn't set: create a full version. + if _mask is nomask: + _mask = self._mask = make_mask_none(self.shape, _dtype) + # Now, set the mask to its value. + if _dtype.names is not None: + _mask[indx] = tuple([True] * len(_dtype.names)) + else: + _mask[indx] = True + return + + # Get the _data part of the new value + dval = getattr(value, '_data', value) + # Get the _mask part of the new value + mval = getmask(value) + if _dtype.names is not None and mval is nomask: + mval = tuple([False] * len(_dtype.names)) + if _mask is nomask: + # Set the data, then the mask + _data[indx] = dval + if mval is not nomask: + _mask = self._mask = make_mask_none(self.shape, _dtype) + _mask[indx] = mval + elif not self._hardmask: + # Set the data, then the mask + if (isinstance(indx, masked_array) and + not isinstance(value, masked_array)): + _data[indx.data] = dval + else: + _data[indx] = dval + _mask[indx] = mval + elif hasattr(indx, 'dtype') and (indx.dtype == MaskType): + indx = indx * umath.logical_not(_mask) + _data[indx] = dval + else: + if _dtype.names is not None: + err_msg = "Flexible 'hard' masks are not yet supported." + raise NotImplementedError(err_msg) + mindx = mask_or(_mask[indx], mval, copy=True) + dindx = self._data[indx] + if dindx.size > 1: + np.copyto(dindx, dval, where=~mindx) + elif mindx is nomask: + dindx = dval + _data[indx] = dindx + _mask[indx] = mindx + return + + # Define so that we can overwrite the setter. + @property + def dtype(self): + return super().dtype + + @dtype.setter + def dtype(self, dtype): + super(MaskedArray, type(self)).dtype.__set__(self, dtype) + if self._mask is not nomask: + self._mask = self._mask.view(make_mask_descr(dtype), ndarray) + # Try to reset the shape of the mask (if we don't have a void). + # This raises a ValueError if the dtype change won't work. + try: + self._mask.shape = self.shape + except (AttributeError, TypeError): + pass + + @property + def shape(self): + return super().shape + + @shape.setter + def shape(self, shape): + super(MaskedArray, type(self)).shape.__set__(self, shape) + # Cannot use self._mask, since it may not (yet) exist when a + # masked matrix sets the shape. + if getmask(self) is not nomask: + self._mask.shape = self.shape + + def __setmask__(self, mask, copy=False): + """ + Set the mask. + + """ + idtype = self.dtype + current_mask = self._mask + if mask is masked: + mask = True + + if current_mask is nomask: + # Make sure the mask is set + # Just don't do anything if there's nothing to do. + if mask is nomask: + return + current_mask = self._mask = make_mask_none(self.shape, idtype) + + if idtype.names is None: + # No named fields. + # Hardmask: don't unmask the data + if self._hardmask: + current_mask |= mask + # Softmask: set everything to False + # If it's obviously a compatible scalar, use a quick update + # method. + elif isinstance(mask, (int, float, np.bool, np.number)): + current_mask[...] = mask + # Otherwise fall back to the slower, general purpose way. + else: + current_mask.flat = mask + else: + # Named fields w/ + mdtype = current_mask.dtype + mask = np.asarray(mask) + # Mask is a singleton + if not mask.ndim: + # It's a boolean : make a record + if mask.dtype.kind == 'b': + mask = np.array(tuple([mask.item()] * len(mdtype)), + dtype=mdtype) + # It's a record: make sure the dtype is correct + else: + mask = mask.astype(mdtype) + # Mask is a sequence + else: + # Make sure the new mask is a ndarray with the proper dtype + try: + copy = None if not copy else True + mask = np.array(mask, copy=copy, dtype=mdtype) + # Or assume it's a sequence of bool/int + except TypeError: + mask = np.array([tuple([m] * len(mdtype)) for m in mask], + dtype=mdtype) + # Hardmask: don't unmask the data + if self._hardmask: + for n in idtype.names: + current_mask[n] |= mask[n] + # Softmask: set everything to False + # If it's obviously a compatible scalar, use a quick update + # method. + elif isinstance(mask, (int, float, np.bool, np.number)): + current_mask[...] = mask + # Otherwise fall back to the slower, general purpose way. + else: + current_mask.flat = mask + # Reshape if needed + if current_mask.shape: + current_mask.shape = self.shape + return + + _set_mask = __setmask__ + + @property + def mask(self): + """ Current mask. """ + + # We could try to force a reshape, but that wouldn't work in some + # cases. + # Return a view so that the dtype and shape cannot be changed in place + # This still preserves nomask by identity + return self._mask.view() + + @mask.setter + def mask(self, value): + self.__setmask__(value) + + @property + def recordmask(self): + """ + Get or set the mask of the array if it has no named fields. For + structured arrays, returns a ndarray of booleans where entries are + ``True`` if **all** the fields are masked, ``False`` otherwise: + + >>> x = np.ma.array([(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], + ... mask=[(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)], + ... dtype=[('a', int), ('b', int)]) + >>> x.recordmask + array([False, False, True, False, False]) + """ + + _mask = self._mask.view(ndarray) + if _mask.dtype.names is None: + return _mask + return np.all(flatten_structured_array(_mask), axis=-1) + + @recordmask.setter + def recordmask(self, mask): + raise NotImplementedError("Coming soon: setting the mask per records!") + + def harden_mask(self): + """ + Force the mask to hard, preventing unmasking by assignment. + + Whether the mask of a masked array is hard or soft is determined by + its `~ma.MaskedArray.hardmask` property. `harden_mask` sets + `~ma.MaskedArray.hardmask` to ``True`` (and returns the modified + self). + + See Also + -------- + ma.MaskedArray.hardmask + ma.MaskedArray.soften_mask + + """ + self._hardmask = True + return self + + def soften_mask(self): + """ + Force the mask to soft (default), allowing unmasking by assignment. + + Whether the mask of a masked array is hard or soft is determined by + its `~ma.MaskedArray.hardmask` property. `soften_mask` sets + `~ma.MaskedArray.hardmask` to ``False`` (and returns the modified + self). + + See Also + -------- + ma.MaskedArray.hardmask + ma.MaskedArray.harden_mask + + """ + self._hardmask = False + return self + + @property + def hardmask(self): + """ + Specifies whether values can be unmasked through assignments. + + By default, assigning definite values to masked array entries will + unmask them. When `hardmask` is ``True``, the mask will not change + through assignments. + + See Also + -------- + ma.MaskedArray.harden_mask + ma.MaskedArray.soften_mask + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(10) + >>> m = np.ma.masked_array(x, x>5) + >>> assert not m.hardmask + + Since `m` has a soft mask, assigning an element value unmasks that + element: + + >>> m[8] = 42 + >>> m + masked_array(data=[0, 1, 2, 3, 4, 5, --, --, 42, --], + mask=[False, False, False, False, False, False, + True, True, False, True], + fill_value=999999) + + After hardening, the mask is not affected by assignments: + + >>> hardened = np.ma.harden_mask(m) + >>> assert m.hardmask and hardened is m + >>> m[:] = 23 + >>> m + masked_array(data=[23, 23, 23, 23, 23, 23, --, --, 23, --], + mask=[False, False, False, False, False, False, + True, True, False, True], + fill_value=999999) + + """ + return self._hardmask + + def unshare_mask(self): + """ + Copy the mask and set the `sharedmask` flag to ``False``. + + Whether the mask is shared between masked arrays can be seen from + the `sharedmask` property. `unshare_mask` ensures the mask is not + shared. A copy of the mask is only made if it was shared. + + See Also + -------- + sharedmask + + """ + if self._sharedmask: + self._mask = self._mask.copy() + self._sharedmask = False + return self + + @property + def sharedmask(self): + """ Share status of the mask (read-only). """ + return self._sharedmask + + def shrink_mask(self): + """ + Reduce a mask to nomask when possible. + + Parameters + ---------- + None + + Returns + ------- + result : MaskedArray + A :class:`~ma.MaskedArray` object. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4) + >>> x.mask + array([[False, False], + [False, False]]) + >>> x.shrink_mask() + masked_array( + data=[[1, 2], + [3, 4]], + mask=False, + fill_value=999999) + >>> x.mask + False + + """ + self._mask = _shrink_mask(self._mask) + return self + + @property + def baseclass(self): + """ Class of the underlying data (read-only). """ + return self._baseclass + + def _get_data(self): + """ + Returns the underlying data, as a view of the masked array. + + If the underlying data is a subclass of :class:`numpy.ndarray`, it is + returned as such. + + >>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) + >>> x.data + matrix([[1, 2], + [3, 4]]) + + The type of the data can be accessed through the :attr:`baseclass` + attribute. + """ + return ndarray.view(self, self._baseclass) + + _data = property(fget=_get_data) + data = property(fget=_get_data) + + @property + def flat(self): + """ Return a flat iterator, or set a flattened version of self to value. """ + return MaskedIterator(self) + + @flat.setter + def flat(self, value): + y = self.ravel() + y[:] = value + + @property + def fill_value(self): + """ + The filling value of the masked array is a scalar. When setting, None + will set to a default based on the data type. + + Examples + -------- + >>> import numpy as np + >>> for dt in [np.int32, np.int64, np.float64, np.complex128]: + ... np.ma.array([0, 1], dtype=dt).get_fill_value() + ... + np.int64(999999) + np.int64(999999) + np.float64(1e+20) + np.complex128(1e+20+0j) + + >>> x = np.ma.array([0, 1.], fill_value=-np.inf) + >>> x.fill_value + np.float64(-inf) + >>> x.fill_value = np.pi + >>> x.fill_value + np.float64(3.1415926535897931) + + Reset to default: + + >>> x.fill_value = None + >>> x.fill_value + np.float64(1e+20) + + """ + if self._fill_value is None: + self._fill_value = _check_fill_value(None, self.dtype) + + # Temporary workaround to account for the fact that str and bytes + # scalars cannot be indexed with (), whereas all other numpy + # scalars can. See issues #7259 and #7267. + # The if-block can be removed after #7267 has been fixed. + if isinstance(self._fill_value, ndarray): + return self._fill_value[()] + return self._fill_value + + @fill_value.setter + def fill_value(self, value=None): + target = _check_fill_value(value, self.dtype) + if not target.ndim == 0: + # 2019-11-12, 1.18.0 + warnings.warn( + "Non-scalar arrays for the fill value are deprecated. Use " + "arrays with scalar values instead. The filled function " + "still supports any array as `fill_value`.", + DeprecationWarning, stacklevel=2) + + _fill_value = self._fill_value + if _fill_value is None: + # Create the attribute if it was undefined + self._fill_value = target + else: + # Don't overwrite the attribute, just fill it (for propagation) + _fill_value[()] = target + + # kept for compatibility + get_fill_value = fill_value.fget + set_fill_value = fill_value.fset + + def filled(self, fill_value=None): + """ + Return a copy of self, with masked values filled with a given value. + **However**, if there are no masked values to fill, self will be + returned instead as an ndarray. + + Parameters + ---------- + fill_value : array_like, optional + The value to use for invalid entries. Can be scalar or non-scalar. + If non-scalar, the resulting ndarray must be broadcastable over + input array. Default is None, in which case, the `fill_value` + attribute of the array is used instead. + + Returns + ------- + filled_array : ndarray + A copy of ``self`` with invalid entries replaced by *fill_value* + (be it the function argument or the attribute of ``self``), or + ``self`` itself as an ndarray if there are no invalid entries to + be replaced. + + Notes + ----- + The result is **not** a MaskedArray! + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999) + >>> x.filled() + array([ 1, 2, -999, 4, -999]) + >>> x.filled(fill_value=1000) + array([ 1, 2, 1000, 4, 1000]) + >>> type(x.filled()) + + + Subclassing is preserved. This means that if, e.g., the data part of + the masked array is a recarray, `filled` returns a recarray: + + >>> x = np.array([(-1, 2), (-3, 4)], dtype='i8,i8').view(np.recarray) + >>> m = np.ma.array(x, mask=[(True, False), (False, True)]) + >>> m.filled() + rec.array([(999999, 2), ( -3, 999999)], + dtype=[('f0', '>> import numpy as np + >>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3) + >>> x.compressed() + array([0, 1]) + >>> type(x.compressed()) + + + N-D arrays are compressed to 1-D. + + >>> arr = [[1, 2], [3, 4]] + >>> mask = [[1, 0], [0, 1]] + >>> x = np.ma.array(arr, mask=mask) + >>> x.compressed() + array([2, 3]) + + """ + data = ndarray.ravel(self._data) + if self._mask is not nomask: + data = data.compress(np.logical_not(ndarray.ravel(self._mask))) + return data + + def compress(self, condition, axis=None, out=None): + """ + Return `a` where condition is ``True``. + + If condition is a `~ma.MaskedArray`, missing values are considered + as ``False``. + + Parameters + ---------- + condition : var + Boolean 1-d array selecting which entries to return. If len(condition) + is less than the size of a along the axis, then output is truncated + to length of condition array. + axis : {None, int}, optional + Axis along which the operation must be performed. + out : {None, ndarray}, optional + Alternative output array in which to place the result. It must have + the same shape as the expected output but the type will be cast if + necessary. + + Returns + ------- + result : MaskedArray + A :class:`~ma.MaskedArray` object. + + Notes + ----- + Please note the difference with :meth:`compressed` ! + The output of :meth:`compress` has a mask, the output of + :meth:`compressed` does not. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.compress([1, 0, 1]) + masked_array(data=[1, 3], + mask=[False, False], + fill_value=999999) + + >>> x.compress([1, 0, 1], axis=1) + masked_array( + data=[[1, 3], + [--, --], + [7, 9]], + mask=[[False, False], + [ True, True], + [False, False]], + fill_value=999999) + + """ + # Get the basic components + (_data, _mask) = (self._data, self._mask) + + # Force the condition to a regular ndarray and forget the missing + # values. + condition = np.asarray(condition) + + _new = _data.compress(condition, axis=axis, out=out).view(type(self)) + _new._update_from(self) + if _mask is not nomask: + _new._mask = _mask.compress(condition, axis=axis) + return _new + + def _insert_masked_print(self): + """ + Replace masked values with masked_print_option, casting all innermost + dtypes to object. + """ + if masked_print_option.enabled(): + mask = self._mask + if mask is nomask: + res = self._data + else: + # convert to object array to make filled work + data = self._data + # For big arrays, to avoid a costly conversion to the + # object dtype, extract the corners before the conversion. + print_width = (self._print_width if self.ndim > 1 + else self._print_width_1d) + for axis in range(self.ndim): + if data.shape[axis] > print_width: + ind = print_width // 2 + arr = np.split(data, (ind, -ind), axis=axis) + data = np.concatenate((arr[0], arr[2]), axis=axis) + arr = np.split(mask, (ind, -ind), axis=axis) + mask = np.concatenate((arr[0], arr[2]), axis=axis) + + rdtype = _replace_dtype_fields(self.dtype, "O") + res = data.astype(rdtype) + _recursive_printoption(res, mask, masked_print_option) + else: + res = self.filled(self.fill_value) + return res + + def __str__(self): + return str(self._insert_masked_print()) + + def __repr__(self): + """ + Literal string representation. + + """ + if self._baseclass is np.ndarray: + name = 'array' + else: + name = self._baseclass.__name__ + + # 2016-11-19: Demoted to legacy format + if np._core.arrayprint._get_legacy_print_mode() <= 113: + is_long = self.ndim > 1 + parameters = { + 'name': name, + 'nlen': " " * len(name), + 'data': str(self), + 'mask': str(self._mask), + 'fill': str(self.fill_value), + 'dtype': str(self.dtype) + } + is_structured = bool(self.dtype.names) + key = '{}_{}'.format( + 'long' if is_long else 'short', + 'flx' if is_structured else 'std' + ) + return _legacy_print_templates[key] % parameters + + prefix = f"masked_{name}(" + + dtype_needed = ( + not np._core.arrayprint.dtype_is_implied(self.dtype) or + np.all(self.mask) or + self.size == 0 + ) + + # determine which keyword args need to be shown + keys = ['data', 'mask', 'fill_value'] + if dtype_needed: + keys.append('dtype') + + # array has only one row (non-column) + is_one_row = builtins.all(dim == 1 for dim in self.shape[:-1]) + + # choose what to indent each keyword with + min_indent = 2 + if is_one_row: + # first key on the same line as the type, remaining keys + # aligned by equals + indents = {} + indents[keys[0]] = prefix + for k in keys[1:]: + n = builtins.max(min_indent, len(prefix + keys[0]) - len(k)) + indents[k] = ' ' * n + prefix = '' # absorbed into the first indent + else: + # each key on its own line, indented by two spaces + indents = dict.fromkeys(keys, ' ' * min_indent) + prefix = prefix + '\n' # first key on the next line + + # format the field values + reprs = {} + reprs['data'] = np.array2string( + self._insert_masked_print(), + separator=", ", + prefix=indents['data'] + 'data=', + suffix=',') + reprs['mask'] = np.array2string( + self._mask, + separator=", ", + prefix=indents['mask'] + 'mask=', + suffix=',') + + if self._fill_value is None: + self.fill_value # initialize fill_value # noqa: B018 + + if (self._fill_value.dtype.kind in ("S", "U") + and self.dtype.kind == self._fill_value.dtype.kind): + # Allow strings: "N/A" has length 3 so would mismatch. + fill_repr = repr(self.fill_value.item()) + elif self._fill_value.dtype == self.dtype and not self.dtype == object: + # Guess that it is OK to use the string as item repr. To really + # fix this, it needs new logic (shared with structured scalars) + fill_repr = str(self.fill_value) + else: + fill_repr = repr(self.fill_value) + + reprs['fill_value'] = fill_repr + if dtype_needed: + reprs['dtype'] = np._core.arrayprint.dtype_short_repr(self.dtype) + + # join keys with values and indentations + result = ',\n'.join( + f'{indents[k]}{k}={reprs[k]}' + for k in keys + ) + return prefix + result + ')' + + def _delegate_binop(self, other): + # This emulates the logic in + # private/binop_override.h:forward_binop_should_defer + if isinstance(other, type(self)): + return False + array_ufunc = getattr(other, "__array_ufunc__", False) + if array_ufunc is False: + other_priority = getattr(other, "__array_priority__", -1000000) + return self.__array_priority__ < other_priority + else: + # If array_ufunc is not None, it will be called inside the ufunc; + # None explicitly tells us to not call the ufunc, i.e., defer. + return array_ufunc is None + + def _comparison(self, other, compare): + """Compare self with other using operator.eq or operator.ne. + + When either of the elements is masked, the result is masked as well, + but the underlying boolean data are still set, with self and other + considered equal if both are masked, and unequal otherwise. + + For structured arrays, all fields are combined, with masked values + ignored. The result is masked if all fields were masked, with self + and other considered equal only if both were fully masked. + """ + omask = getmask(other) + smask = self.mask + mask = mask_or(smask, omask, copy=True) + + odata = getdata(other) + if mask.dtype.names is not None: + # only == and != are reasonably defined for structured dtypes, + # so give up early for all other comparisons: + if compare not in (operator.eq, operator.ne): + return NotImplemented + # For possibly masked structured arrays we need to be careful, + # since the standard structured array comparison will use all + # fields, masked or not. To avoid masked fields influencing the + # outcome, we set all masked fields in self to other, so they'll + # count as equal. To prepare, we ensure we have the right shape. + broadcast_shape = np.broadcast(self, odata).shape + sbroadcast = np.broadcast_to(self, broadcast_shape, subok=True) + sbroadcast._mask = mask + sdata = sbroadcast.filled(odata) + # Now take care of the mask; the merged mask should have an item + # masked if all fields were masked (in one and/or other). + mask = (mask == np.ones((), mask.dtype)) + # Ensure we can compare masks below if other was not masked. + if omask is np.False_: + omask = np.zeros((), smask.dtype) + + else: + # For regular arrays, just use the data as they come. + sdata = self.data + + check = compare(sdata, odata) + + if isinstance(check, (np.bool, bool)): + return masked if mask else check + + if mask is not nomask: + if compare in (operator.eq, operator.ne): + # Adjust elements that were masked, which should be treated + # as equal if masked in both, unequal if masked in one. + # Note that this works automatically for structured arrays too. + # Ignore this for operations other than `==` and `!=` + check = np.where(mask, compare(smask, omask), check) + + if mask.shape != check.shape: + # Guarantee consistency of the shape, making a copy since the + # the mask may need to get written to later. + mask = np.broadcast_to(mask, check.shape).copy() + + check = check.view(type(self)) + check._update_from(self) + check._mask = mask + + # Cast fill value to np.bool if needed. If it cannot be cast, the + # default boolean fill value is used. + if check._fill_value is not None: + try: + fill = _check_fill_value(check._fill_value, np.bool) + except (TypeError, ValueError): + fill = _check_fill_value(None, np.bool) + check._fill_value = fill + + return check + + def __eq__(self, other): + """Check whether other equals self elementwise. + + When either of the elements is masked, the result is masked as well, + but the underlying boolean data are still set, with self and other + considered equal if both are masked, and unequal otherwise. + + For structured arrays, all fields are combined, with masked values + ignored. The result is masked if all fields were masked, with self + and other considered equal only if both were fully masked. + """ + return self._comparison(other, operator.eq) + + def __ne__(self, other): + """Check whether other does not equal self elementwise. + + When either of the elements is masked, the result is masked as well, + but the underlying boolean data are still set, with self and other + considered equal if both are masked, and unequal otherwise. + + For structured arrays, all fields are combined, with masked values + ignored. The result is masked if all fields were masked, with self + and other considered equal only if both were fully masked. + """ + return self._comparison(other, operator.ne) + + # All other comparisons: + def __le__(self, other): + return self._comparison(other, operator.le) + + def __lt__(self, other): + return self._comparison(other, operator.lt) + + def __ge__(self, other): + return self._comparison(other, operator.ge) + + def __gt__(self, other): + return self._comparison(other, operator.gt) + + def __add__(self, other): + """ + Add self to other, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return add(self, other) + + def __radd__(self, other): + """ + Add other to self, and return a new masked array. + + """ + # In analogy with __rsub__ and __rdiv__, use original order: + # we get here from `other + self`. + return add(other, self) + + def __sub__(self, other): + """ + Subtract other from self, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return subtract(self, other) + + def __rsub__(self, other): + """ + Subtract self from other, and return a new masked array. + + """ + return subtract(other, self) + + def __mul__(self, other): + "Multiply self by other, and return a new masked array." + if self._delegate_binop(other): + return NotImplemented + return multiply(self, other) + + def __rmul__(self, other): + """ + Multiply other by self, and return a new masked array. + + """ + # In analogy with __rsub__ and __rdiv__, use original order: + # we get here from `other * self`. + return multiply(other, self) + + def __truediv__(self, other): + """ + Divide other into self, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return true_divide(self, other) + + def __rtruediv__(self, other): + """ + Divide self into other, and return a new masked array. + + """ + return true_divide(other, self) + + def __floordiv__(self, other): + """ + Divide other into self, and return a new masked array. + + """ + if self._delegate_binop(other): + return NotImplemented + return floor_divide(self, other) + + def __rfloordiv__(self, other): + """ + Divide self into other, and return a new masked array. + + """ + return floor_divide(other, self) + + def __pow__(self, other): + """ + Raise self to the power other, masking the potential NaNs/Infs + + """ + if self._delegate_binop(other): + return NotImplemented + return power(self, other) + + def __rpow__(self, other): + """ + Raise other to the power self, masking the potential NaNs/Infs + + """ + return power(other, self) + + def __iadd__(self, other): + """ + Add other to self in-place. + + """ + m = getmask(other) + if self._mask is nomask: + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m + elif m is not nomask: + self._mask += m + other_data = getdata(other) + other_data = np.where(self._mask, other_data.dtype.type(0), other_data) + self._data.__iadd__(other_data) + return self + + def __isub__(self, other): + """ + Subtract other from self in-place. + + """ + m = getmask(other) + if self._mask is nomask: + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m + elif m is not nomask: + self._mask += m + other_data = getdata(other) + other_data = np.where(self._mask, other_data.dtype.type(0), other_data) + self._data.__isub__(other_data) + return self + + def __imul__(self, other): + """ + Multiply self by other in-place. + + """ + m = getmask(other) + if self._mask is nomask: + if m is not nomask and m.any(): + self._mask = make_mask_none(self.shape, self.dtype) + self._mask += m + elif m is not nomask: + self._mask += m + other_data = getdata(other) + other_data = np.where(self._mask, other_data.dtype.type(1), other_data) + self._data.__imul__(other_data) + return self + + def __ifloordiv__(self, other): + """ + Floor divide self by other in-place. + + """ + other_data = getdata(other) + dom_mask = _DomainSafeDivide().__call__(self._data, other_data) + other_mask = getmask(other) + new_mask = mask_or(other_mask, dom_mask) + # The following 3 lines control the domain filling + if dom_mask.any(): + (_, fval) = ufunc_fills[np.floor_divide] + other_data = np.where( + dom_mask, other_data.dtype.type(fval), other_data) + self._mask |= new_mask + other_data = np.where(self._mask, other_data.dtype.type(1), other_data) + self._data.__ifloordiv__(other_data) + return self + + def __itruediv__(self, other): + """ + True divide self by other in-place. + + """ + other_data = getdata(other) + dom_mask = _DomainSafeDivide().__call__(self._data, other_data) + other_mask = getmask(other) + new_mask = mask_or(other_mask, dom_mask) + # The following 3 lines control the domain filling + if dom_mask.any(): + (_, fval) = ufunc_fills[np.true_divide] + other_data = np.where( + dom_mask, other_data.dtype.type(fval), other_data) + self._mask |= new_mask + other_data = np.where(self._mask, other_data.dtype.type(1), other_data) + self._data.__itruediv__(other_data) + return self + + def __ipow__(self, other): + """ + Raise self to the power other, in place. + + """ + other_data = getdata(other) + other_data = np.where(self._mask, other_data.dtype.type(1), other_data) + other_mask = getmask(other) + with np.errstate(divide='ignore', invalid='ignore'): + self._data.__ipow__(other_data) + invalid = np.logical_not(np.isfinite(self._data)) + if invalid.any(): + if self._mask is not nomask: + self._mask |= invalid + else: + self._mask = invalid + np.copyto(self._data, self.fill_value, where=invalid) + new_mask = mask_or(other_mask, invalid) + self._mask = mask_or(self._mask, new_mask) + return self + + def __float__(self): + """ + Convert to float. + + """ + if self.size > 1: + raise TypeError("Only length-1 arrays can be converted " + "to Python scalars") + elif self._mask: + warnings.warn("Warning: converting a masked element to nan.", stacklevel=2) + return np.nan + return float(self.item()) + + def __int__(self): + """ + Convert to int. + + """ + if self.size > 1: + raise TypeError("Only length-1 arrays can be converted " + "to Python scalars") + elif self._mask: + raise MaskError('Cannot convert masked element to a Python int.') + return int(self.item()) + + @property + def imag(self): + """ + The imaginary part of the masked array. + + This property is a view on the imaginary part of this `MaskedArray`. + + See Also + -------- + real + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) + >>> x.imag + masked_array(data=[1.0, --, 1.6], + mask=[False, True, False], + fill_value=1e+20) + + """ + result = self._data.imag.view(type(self)) + result.__setmask__(self._mask) + return result + + # kept for compatibility + get_imag = imag.fget + + @property + def real(self): + """ + The real part of the masked array. + + This property is a view on the real part of this `MaskedArray`. + + See Also + -------- + imag + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) + >>> x.real + masked_array(data=[1.0, --, 3.45], + mask=[False, True, False], + fill_value=1e+20) + + """ + result = self._data.real.view(type(self)) + result.__setmask__(self._mask) + return result + + # kept for compatibility + get_real = real.fget + + def count(self, axis=None, keepdims=np._NoValue): + """ + Count the non-masked elements of the array along the given axis. + + Parameters + ---------- + axis : None or int or tuple of ints, optional + Axis or axes along which the count is performed. + The default, None, performs the count over all + the dimensions of the input array. `axis` may be negative, in + which case it counts from the last to the first axis. + If this is a tuple of ints, the count is performed on multiple + axes, instead of a single axis or all the axes as before. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + Returns + ------- + result : ndarray or scalar + An array with the same shape as the input array, with the specified + axis removed. If the array is a 0-d array, or if `axis` is None, a + scalar is returned. + + See Also + -------- + ma.count_masked : Count masked elements in array or along a given axis. + + Examples + -------- + >>> import numpy.ma as ma + >>> a = ma.arange(6).reshape((2, 3)) + >>> a[1, :] = ma.masked + >>> a + masked_array( + data=[[0, 1, 2], + [--, --, --]], + mask=[[False, False, False], + [ True, True, True]], + fill_value=999999) + >>> a.count() + 3 + + When the `axis` keyword is specified an array of appropriate size is + returned. + + >>> a.count(axis=0) + array([1, 1, 1]) + >>> a.count(axis=1) + array([3, 0]) + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + m = self._mask + # special case for matrices (we assume no other subclasses modify + # their dimensions) + if isinstance(self.data, np.matrix): + if m is nomask: + m = np.zeros(self.shape, dtype=np.bool) + m = m.view(type(self.data)) + + if m is nomask: + # compare to _count_reduce_items in _methods.py + + if self.shape == (): + if axis not in (None, 0): + raise np.exceptions.AxisError(axis=axis, ndim=self.ndim) + return 1 + elif axis is None: + if kwargs.get('keepdims'): + return np.array(self.size, dtype=np.intp, ndmin=self.ndim) + return self.size + + axes = normalize_axis_tuple(axis, self.ndim) + items = 1 + for ax in axes: + items *= self.shape[ax] + + if kwargs.get('keepdims'): + out_dims = list(self.shape) + for a in axes: + out_dims[a] = 1 + else: + out_dims = [d for n, d in enumerate(self.shape) + if n not in axes] + # make sure to return a 0-d array if axis is supplied + return np.full(out_dims, items, dtype=np.intp) + + # take care of the masked singleton + if self is masked: + return 0 + + return (~m).sum(axis=axis, dtype=np.intp, **kwargs) + + def ravel(self, order='C'): + """ + Returns a 1D version of self, as a view. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + The elements of `a` are read using this index order. 'C' means to + index the elements in C-like order, with the last axis index + changing fastest, back to the first axis index changing slowest. + 'F' means to index the elements in Fortran-like index order, with + the first index changing fastest, and the last index changing + slowest. Note that the 'C' and 'F' options take no account of the + memory layout of the underlying array, and only refer to the order + of axis indexing. 'A' means to read the elements in Fortran-like + index order if `m` is Fortran *contiguous* in memory, C-like order + otherwise. 'K' means to read the elements in the order they occur + in memory, except for reversing the data when strides are negative. + By default, 'C' index order is used. + (Masked arrays currently use 'A' on the data when 'K' is passed.) + + Returns + ------- + MaskedArray + Output view is of shape ``(self.size,)`` (or + ``(np.ma.product(self.shape),)``). + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.ravel() + masked_array(data=[1, --, 3, --, 5, --, 7, --, 9], + mask=[False, True, False, True, False, True, False, True, + False], + fill_value=999999) + + """ + # The order of _data and _mask could be different (it shouldn't be + # normally). Passing order `K` or `A` would be incorrect. + # So we ignore the mask memory order. + # TODO: We don't actually support K, so use A instead. We could + # try to guess this correct by sorting strides or deprecate. + if order in "kKaA": + order = "F" if self._data.flags.fnc else "C" + r = ndarray.ravel(self._data, order=order).view(type(self)) + r._update_from(self) + if self._mask is not nomask: + r._mask = ndarray.ravel(self._mask, order=order).reshape(r.shape) + else: + r._mask = nomask + return r + + def reshape(self, *s, **kwargs): + """ + Give a new shape to the array without changing its data. + + Returns a masked array containing the same data, but with a new shape. + The result is a view on the original array; if this is not possible, a + ValueError is raised. + + Parameters + ---------- + shape : int or tuple of ints + The new shape should be compatible with the original shape. If an + integer is supplied, then the result will be a 1-D array of that + length. + order : {'C', 'F'}, optional + Determines whether the array data should be viewed as in C + (row-major) or FORTRAN (column-major) order. + + Returns + ------- + reshaped_array : array + A new view on the array. + + See Also + -------- + reshape : Equivalent function in the masked array module. + numpy.ndarray.reshape : Equivalent method on ndarray object. + numpy.reshape : Equivalent function in the NumPy module. + + Notes + ----- + The reshaping operation cannot guarantee that a copy will not be made, + to modify the shape in place, use ``a.shape = s`` + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1]) + >>> x + masked_array( + data=[[--, 2], + [3, --]], + mask=[[ True, False], + [False, True]], + fill_value=999999) + >>> x = x.reshape((4,1)) + >>> x + masked_array( + data=[[--], + [2], + [3], + [--]], + mask=[[ True], + [False], + [False], + [ True]], + fill_value=999999) + + """ + kwargs.update(order=kwargs.get('order', 'C')) + result = self._data.reshape(*s, **kwargs).view(type(self)) + result._update_from(self) + mask = self._mask + if mask is not nomask: + result._mask = mask.reshape(*s, **kwargs) + return result + + def resize(self, newshape, refcheck=True, order=False): + """ + .. warning:: + + This method does nothing, except raise a ValueError exception. A + masked array does not own its data and therefore cannot safely be + resized in place. Use the `numpy.ma.resize` function instead. + + This method is difficult to implement safely and may be deprecated in + future releases of NumPy. + + """ + # Note : the 'order' keyword looks broken, let's just drop it + errmsg = "A masked array does not own its data "\ + "and therefore cannot be resized.\n" \ + "Use the numpy.ma.resize function instead." + raise ValueError(errmsg) + + def put(self, indices, values, mode='raise'): + """ + Set storage-indexed locations to corresponding values. + + Sets self._data.flat[n] = values[n] for each n in indices. + If `values` is shorter than `indices` then it will repeat. + If `values` has some masked values, the initial mask is updated + in consequence, else the corresponding values are unmasked. + + Parameters + ---------- + indices : 1-D array_like + Target indices, interpreted as integers. + values : array_like + Values to place in self._data copy at target indices. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + 'raise' : raise an error. + 'wrap' : wrap around. + 'clip' : clip to the range. + + Notes + ----- + `values` can be a scalar or length 1 array. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.put([0,4,8],[10,20,30]) + >>> x + masked_array( + data=[[10, --, 3], + [--, 20, --], + [7, --, 30]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + + >>> x.put(4,999) + >>> x + masked_array( + data=[[10, --, 3], + [--, 999, --], + [7, --, 30]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + + """ + # Hard mask: Get rid of the values/indices that fall on masked data + if self._hardmask and self._mask is not nomask: + mask = self._mask[indices] + indices = narray(indices, copy=None) + values = narray(values, copy=None, subok=True) + values.resize(indices.shape) + indices = indices[~mask] + values = values[~mask] + + self._data.put(indices, values, mode=mode) + + # short circuit if neither self nor values are masked + if self._mask is nomask and getmask(values) is nomask: + return + + m = getmaskarray(self) + + if getmask(values) is nomask: + m.put(indices, False, mode=mode) + else: + m.put(indices, values._mask, mode=mode) + m = make_mask(m, copy=False, shrink=True) + self._mask = m + return + + def ids(self): + """ + Return the addresses of the data and mask areas. + + Parameters + ---------- + None + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1]) + >>> x.ids() + (166670640, 166659832) # may vary + + If the array has no mask, the address of `nomask` is returned. This address + is typically not close to the data in memory: + + >>> x = np.ma.array([1, 2, 3]) + >>> x.ids() + (166691080, 3083169284) # may vary + + """ + if self._mask is nomask: + return (self.ctypes.data, id(nomask)) + return (self.ctypes.data, self._mask.ctypes.data) + + def iscontiguous(self): + """ + Return a boolean indicating whether the data is contiguous. + + Parameters + ---------- + None + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([1, 2, 3]) + >>> x.iscontiguous() + True + + `iscontiguous` returns one of the flags of the masked array: + + >>> x.flags + C_CONTIGUOUS : True + F_CONTIGUOUS : True + OWNDATA : False + WRITEABLE : True + ALIGNED : True + WRITEBACKIFCOPY : False + + """ + return self.flags['CONTIGUOUS'] + + def all(self, axis=None, out=None, keepdims=np._NoValue): + """ + Returns True if all elements evaluate to True. + + The output array is masked where all the values along the given axis + are masked: if the output would have been a scalar and that all the + values are masked, then the output is `masked`. + + Refer to `numpy.all` for full documentation. + + See Also + -------- + numpy.ndarray.all : corresponding function for ndarrays + numpy.all : equivalent function + + Examples + -------- + >>> import numpy as np + >>> np.ma.array([1,2,3]).all() + True + >>> a = np.ma.array([1,2,3], mask=True) + >>> (a.all() is np.ma.masked) + True + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + mask = _check_mask_axis(self._mask, axis, **kwargs) + if out is None: + d = self.filled(True).all(axis=axis, **kwargs).view(type(self)) + if d.ndim: + d.__setmask__(mask) + elif mask: + return masked + return d + self.filled(True).all(axis=axis, out=out, **kwargs) + if isinstance(out, MaskedArray): + if out.ndim or mask: + out.__setmask__(mask) + return out + + def any(self, axis=None, out=None, keepdims=np._NoValue): + """ + Returns True if any of the elements of `a` evaluate to True. + + Masked values are considered as False during computation. + + Refer to `numpy.any` for full documentation. + + See Also + -------- + numpy.ndarray.any : corresponding function for ndarrays + numpy.any : equivalent function + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + mask = _check_mask_axis(self._mask, axis, **kwargs) + if out is None: + d = self.filled(False).any(axis=axis, **kwargs).view(type(self)) + if d.ndim: + d.__setmask__(mask) + elif mask: + d = masked + return d + self.filled(False).any(axis=axis, out=out, **kwargs) + if isinstance(out, MaskedArray): + if out.ndim or mask: + out.__setmask__(mask) + return out + + def nonzero(self): + """ + Return the indices of unmasked elements that are not zero. + + Returns a tuple of arrays, one for each dimension, containing the + indices of the non-zero elements in that dimension. The corresponding + non-zero values can be obtained with:: + + a[a.nonzero()] + + To group the indices by element, rather than dimension, use + instead:: + + np.transpose(a.nonzero()) + + The result of this is always a 2d array, with a row for each non-zero + element. + + Parameters + ---------- + None + + Returns + ------- + tuple_of_arrays : tuple + Indices of elements that are non-zero. + + See Also + -------- + numpy.nonzero : + Function operating on ndarrays. + flatnonzero : + Return indices that are non-zero in the flattened version of the input + array. + numpy.ndarray.nonzero : + Equivalent ndarray method. + count_nonzero : + Counts the number of non-zero elements in the input array. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = ma.array(np.eye(3)) + >>> x + masked_array( + data=[[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]], + mask=False, + fill_value=1e+20) + >>> x.nonzero() + (array([0, 1, 2]), array([0, 1, 2])) + + Masked elements are ignored. + + >>> x[1, 1] = ma.masked + >>> x + masked_array( + data=[[1.0, 0.0, 0.0], + [0.0, --, 0.0], + [0.0, 0.0, 1.0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1e+20) + >>> x.nonzero() + (array([0, 2]), array([0, 2])) + + Indices can also be grouped by element. + + >>> np.transpose(x.nonzero()) + array([[0, 0], + [2, 2]]) + + A common use for ``nonzero`` is to find the indices of an array, where + a condition is True. Given an array `a`, the condition `a` > 3 is a + boolean array and since False is interpreted as 0, ma.nonzero(a > 3) + yields the indices of the `a` where the condition is true. + + >>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]]) + >>> a > 3 + masked_array( + data=[[False, False, False], + [ True, True, True], + [ True, True, True]], + mask=False, + fill_value=True) + >>> ma.nonzero(a > 3) + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + The ``nonzero`` method of the condition array can also be called. + + >>> (a > 3).nonzero() + (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) + + """ + return np.asarray(self.filled(0)).nonzero() + + def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): + """ + (this docstring should be overwritten) + """ + # !!!: implement out + test! + m = self._mask + if m is nomask: + result = super().trace(offset=offset, axis1=axis1, axis2=axis2, + out=out) + return result.astype(dtype) + else: + D = self.diagonal(offset=offset, axis1=axis1, axis2=axis2) + return D.astype(dtype).filled(0).sum(axis=-1, out=out) + trace.__doc__ = ndarray.trace.__doc__ + + def dot(self, b, out=None, strict=False): + """ + a.dot(b, out=None) + + Masked dot product of two arrays. Note that `out` and `strict` are + located in different positions than in `ma.dot`. In order to + maintain compatibility with the functional version, it is + recommended that the optional arguments be treated as keyword only. + At some point that may be mandatory. + + Parameters + ---------- + b : masked_array_like + Inputs array. + out : masked_array, optional + Output argument. This must have the exact kind that would be + returned if it was not used. In particular, it must have the + right type, must be C-contiguous, and its dtype must be the + dtype that would be returned for `ma.dot(a,b)`. This is a + performance feature. Therefore, if these conditions are not + met, an exception is raised, instead of attempting to be + flexible. + strict : bool, optional + Whether masked data are propagated (True) or set to 0 (False) + for the computation. Default is False. Propagating the mask + means that if a masked value appears in a row or column, the + whole row or column is considered masked. + + See Also + -------- + numpy.ma.dot : equivalent function + + """ + return dot(self, b, out=out, strict=strict) + + def sum(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): + """ + Return the sum of the array elements over the given axis. + + Masked elements are set to 0 internally. + + Refer to `numpy.sum` for full documentation. + + See Also + -------- + numpy.ndarray.sum : corresponding function for ndarrays + numpy.sum : equivalent function + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.sum() + 25 + >>> x.sum(axis=1) + masked_array(data=[4, 5, 16], + mask=[False, False, False], + fill_value=999999) + >>> x.sum(axis=0) + masked_array(data=[8, 5, 12], + mask=[False, False, False], + fill_value=999999) + >>> print(type(x.sum(axis=0, dtype=np.int64)[0])) + + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + _mask = self._mask + newmask = _check_mask_axis(_mask, axis, **kwargs) + # No explicit output + if out is None: + result = self.filled(0).sum(axis, dtype=dtype, **kwargs) + rndim = getattr(result, 'ndim', 0) + if rndim: + result = result.view(type(self)) + result.__setmask__(newmask) + elif newmask: + result = masked + return result + # Explicit output + result = self.filled(0).sum(axis, dtype=dtype, out=out, **kwargs) + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + return out + + def cumsum(self, axis=None, dtype=None, out=None): + """ + Return the cumulative sum of the array elements over the given axis. + + Masked values are set to 0 internally during the computation. + However, their position is saved, and the result will be masked at + the same locations. + + Refer to `numpy.cumsum` for full documentation. + + Notes + ----- + The mask is lost if `out` is not a valid :class:`ma.MaskedArray` ! + + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + See Also + -------- + numpy.ndarray.cumsum : corresponding function for ndarrays + numpy.cumsum : equivalent function + + Examples + -------- + >>> import numpy as np + >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0]) + >>> marr.cumsum() + masked_array(data=[0, 1, 3, --, --, --, 9, 16, 24, 33], + mask=[False, False, False, True, True, True, False, False, + False, False], + fill_value=999999) + + """ + result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out) + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(self.mask) + return out + result = result.view(type(self)) + result.__setmask__(self._mask) + return result + + def prod(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): + """ + Return the product of the array elements over the given axis. + + Masked elements are set to 1 internally for computation. + + Refer to `numpy.prod` for full documentation. + + Notes + ----- + Arithmetic is modular when using integer types, and no error is raised + on overflow. + + See Also + -------- + numpy.ndarray.prod : corresponding function for ndarrays + numpy.prod : equivalent function + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + _mask = self._mask + newmask = _check_mask_axis(_mask, axis, **kwargs) + # No explicit output + if out is None: + result = self.filled(1).prod(axis, dtype=dtype, **kwargs) + rndim = getattr(result, 'ndim', 0) + if rndim: + result = result.view(type(self)) + result.__setmask__(newmask) + elif newmask: + result = masked + return result + # Explicit output + result = self.filled(1).prod(axis, dtype=dtype, out=out, **kwargs) + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + return out + product = prod + + def cumprod(self, axis=None, dtype=None, out=None): + """ + Return the cumulative product of the array elements over the given axis. + + Masked values are set to 1 internally during the computation. + However, their position is saved, and the result will be masked at + the same locations. + + Refer to `numpy.cumprod` for full documentation. + + Notes + ----- + The mask is lost if `out` is not a valid MaskedArray ! + + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + See Also + -------- + numpy.ndarray.cumprod : corresponding function for ndarrays + numpy.cumprod : equivalent function + """ + result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out) + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(self._mask) + return out + result = result.view(type(self)) + result.__setmask__(self._mask) + return result + + def mean(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): + """ + Returns the average of the array elements along given axis. + + Masked entries are ignored, and result elements which are not + finite will be masked. + + Refer to `numpy.mean` for full documentation. + + See Also + -------- + numpy.ndarray.mean : corresponding function for ndarrays + numpy.mean : Equivalent function + numpy.ma.average : Weighted average. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([1,2,3], mask=[False, False, True]) + >>> a + masked_array(data=[1, 2, --], + mask=[False, False, True], + fill_value=999999) + >>> a.mean() + 1.5 + + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + if self._mask is nomask: + result = super().mean(axis=axis, dtype=dtype, **kwargs)[()] + else: + is_float16_result = False + if dtype is None: + if issubclass(self.dtype.type, (ntypes.integer, ntypes.bool)): + dtype = mu.dtype('f8') + elif issubclass(self.dtype.type, ntypes.float16): + dtype = mu.dtype('f4') + is_float16_result = True + dsum = self.sum(axis=axis, dtype=dtype, **kwargs) + cnt = self.count(axis=axis, **kwargs) + if cnt.shape == () and (cnt == 0): + result = masked + elif is_float16_result: + result = self.dtype.type(dsum * 1. / cnt) + else: + result = dsum * 1. / cnt + if out is not None: + out.flat = result + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = getmask(result) + return out + return result + + def anom(self, axis=None, dtype=None): + """ + Compute the anomalies (deviations from the arithmetic mean) + along the given axis. + + Returns an array of anomalies, with the same shape as the input and + where the arithmetic mean is computed along the given axis. + + Parameters + ---------- + axis : int, optional + Axis over which the anomalies are taken. + The default is to use the mean of the flattened array as reference. + dtype : dtype, optional + Type to use in computing the variance. For arrays of integer type + the default is float32; for arrays of float types it is the same as + the array type. + + See Also + -------- + mean : Compute the mean of the array. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([1,2,3]) + >>> a.anom() + masked_array(data=[-1., 0., 1.], + mask=False, + fill_value=1e+20) + + """ + m = self.mean(axis, dtype) + if not axis: + return self - m + else: + return self - expand_dims(m, axis) + + def var(self, axis=None, dtype=None, out=None, ddof=0, + keepdims=np._NoValue, mean=np._NoValue): + """ + Returns the variance of the array elements along given axis. + + Masked entries are ignored, and result elements which are not + finite will be masked. + + Refer to `numpy.var` for full documentation. + + See Also + -------- + numpy.ndarray.var : corresponding function for ndarrays + numpy.var : Equivalent function + """ + kwargs = {} + + if keepdims is not np._NoValue: + kwargs['keepdims'] = keepdims + + # Easy case: nomask, business as usual + if self._mask is nomask: + + if mean is not np._NoValue: + kwargs['mean'] = mean + + ret = super().var(axis=axis, dtype=dtype, out=out, ddof=ddof, + **kwargs)[()] + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(nomask) + return out + return ret + + # Some data are masked, yay! + cnt = self.count(axis=axis, **kwargs) - ddof + + if mean is not np._NoValue: + danom = self - mean + else: + danom = self - self.mean(axis, dtype, keepdims=True) + + if iscomplexobj(self): + danom = umath.absolute(danom) ** 2 + else: + danom *= danom + dvar = divide(danom.sum(axis, **kwargs), cnt).view(type(self)) + # Apply the mask if it's not a scalar + if dvar.ndim: + dvar._mask = mask_or(self._mask.all(axis, **kwargs), (cnt <= 0)) + dvar._update_from(self) + elif getmask(dvar): + # Make sure that masked is returned when the scalar is masked. + dvar = masked + if out is not None: + if isinstance(out, MaskedArray): + out.flat = 0 + out.__setmask__(True) + elif out.dtype.kind in 'biu': + errmsg = "Masked data information would be lost in one or "\ + "more location." + raise MaskError(errmsg) + else: + out.flat = np.nan + return out + # In case with have an explicit output + if out is not None: + # Set the data + out.flat = dvar + # Set the mask if needed + if isinstance(out, MaskedArray): + out.__setmask__(dvar.mask) + return out + return dvar + var.__doc__ = np.var.__doc__ + + def std(self, axis=None, dtype=None, out=None, ddof=0, + keepdims=np._NoValue, mean=np._NoValue): + """ + Returns the standard deviation of the array elements along given axis. + + Masked entries are ignored. + + Refer to `numpy.std` for full documentation. + + See Also + -------- + numpy.ndarray.std : corresponding function for ndarrays + numpy.std : Equivalent function + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + dvar = self.var(axis, dtype, out, ddof, **kwargs) + if dvar is not masked: + if out is not None: + np.power(out, 0.5, out=out, casting='unsafe') + return out + dvar = sqrt(dvar) + return dvar + + def round(self, decimals=0, out=None): + """ + Return each element rounded to the given number of decimals. + + Refer to `numpy.around` for full documentation. + + See Also + -------- + numpy.ndarray.round : corresponding function for ndarrays + numpy.around : equivalent function + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = ma.array([1.35, 2.5, 1.5, 1.75, 2.25, 2.75], + ... mask=[0, 0, 0, 1, 0, 0]) + >>> ma.round(x) + masked_array(data=[1.0, 2.0, 2.0, --, 2.0, 3.0], + mask=[False, False, False, True, False, False], + fill_value=1e+20) + + """ + result = self._data.round(decimals=decimals, out=out).view(type(self)) + if result.ndim > 0: + result._mask = self._mask + result._update_from(self) + elif self._mask: + # Return masked when the scalar is masked + result = masked + # No explicit output: we're done + if out is None: + return result + if isinstance(out, MaskedArray): + out.__setmask__(self._mask) + return out + + def argsort(self, axis=np._NoValue, kind=None, order=None, endwith=True, + fill_value=None, *, stable=False): + """ + Return an ndarray of indices that sort the array along the + specified axis. Masked values are filled beforehand to + `fill_value`. + + Parameters + ---------- + axis : int, optional + Axis along which to sort. If None, the default, the flattened array + is used. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + The sorting algorithm used. + order : list, optional + When `a` is an array with fields defined, this argument specifies + which fields to compare first, second, etc. Not all fields need be + specified. + endwith : {True, False}, optional + Whether missing values (if any) should be treated as the largest values + (True) or the smallest values (False) + When the array contains unmasked values at the same extremes of the + datatype, the ordering of these values and the masked values is + undefined. + fill_value : scalar or None, optional + Value used internally for the masked values. + If ``fill_value`` is not None, it supersedes ``endwith``. + stable : bool, optional + Only for compatibility with ``np.argsort``. Ignored. + + Returns + ------- + index_array : ndarray, int + Array of indices that sort `a` along the specified axis. + In other words, ``a[index_array]`` yields a sorted `a`. + + See Also + -------- + ma.MaskedArray.sort : Describes sorting algorithms used. + lexsort : Indirect stable sort with multiple keys. + numpy.ndarray.sort : Inplace sort. + + Notes + ----- + See `sort` for notes on the different sorting algorithms. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([3,2,1], mask=[False, False, True]) + >>> a + masked_array(data=[3, 2, --], + mask=[False, False, True], + fill_value=999999) + >>> a.argsort() + array([1, 0, 2]) + + """ + if stable: + raise ValueError( + "`stable` parameter is not supported for masked arrays." + ) + + # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default + if axis is np._NoValue: + axis = _deprecate_argsort_axis(self) + + if fill_value is None: + if endwith: + # nan > inf + if np.issubdtype(self.dtype, np.floating): + fill_value = np.nan + else: + fill_value = minimum_fill_value(self) + else: + fill_value = maximum_fill_value(self) + + filled = self.filled(fill_value) + return filled.argsort(axis=axis, kind=kind, order=order) + + def argmin(self, axis=None, fill_value=None, out=None, *, + keepdims=np._NoValue): + """ + Return array of indices to the minimum values along the given axis. + + Parameters + ---------- + axis : {None, integer} + If None, the index is into the flattened array, otherwise along + the specified axis + fill_value : scalar or None, optional + Value used to fill in the masked values. If None, the output of + minimum_fill_value(self._data) is used instead. + out : {None, array}, optional + Array into which the result can be placed. Its type is preserved + and it must be of the right shape to hold the output. + + Returns + ------- + ndarray or scalar + If multi-dimension input, returns a new ndarray of indices to the + minimum values along the given axis. Otherwise, returns a scalar + of index to the minimum values along the given axis. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array(np.arange(4), mask=[1,1,0,0]) + >>> x.shape = (2,2) + >>> x + masked_array( + data=[[--, --], + [2, 3]], + mask=[[ True, True], + [False, False]], + fill_value=999999) + >>> x.argmin(axis=0, fill_value=-1) + array([0, 0]) + >>> x.argmin(axis=0, fill_value=9) + array([1, 1]) + + """ + if fill_value is None: + fill_value = minimum_fill_value(self) + d = self.filled(fill_value).view(ndarray) + keepdims = False if keepdims is np._NoValue else bool(keepdims) + return d.argmin(axis, out=out, keepdims=keepdims) + + def argmax(self, axis=None, fill_value=None, out=None, *, + keepdims=np._NoValue): + """ + Returns array of indices of the maximum values along the given axis. + Masked values are treated as if they had the value fill_value. + + Parameters + ---------- + axis : {None, integer} + If None, the index is into the flattened array, otherwise along + the specified axis + fill_value : scalar or None, optional + Value used to fill in the masked values. If None, the output of + maximum_fill_value(self._data) is used instead. + out : {None, array}, optional + Array into which the result can be placed. Its type is preserved + and it must be of the right shape to hold the output. + + Returns + ------- + index_array : {integer_array} + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(6).reshape(2,3) + >>> a.argmax() + 5 + >>> a.argmax(0) + array([1, 1, 1]) + >>> a.argmax(1) + array([2, 2]) + + """ + if fill_value is None: + fill_value = maximum_fill_value(self._data) + d = self.filled(fill_value).view(ndarray) + keepdims = False if keepdims is np._NoValue else bool(keepdims) + return d.argmax(axis, out=out, keepdims=keepdims) + + def sort(self, axis=-1, kind=None, order=None, endwith=True, + fill_value=None, *, stable=False): + """ + Sort the array, in-place + + Parameters + ---------- + a : array_like + Array to be sorted. + axis : int, optional + Axis along which to sort. If None, the array is flattened before + sorting. The default is -1, which sorts along the last axis. + kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional + The sorting algorithm used. + order : list, optional + When `a` is a structured array, this argument specifies which fields + to compare first, second, and so on. This list does not need to + include all of the fields. + endwith : {True, False}, optional + Whether missing values (if any) should be treated as the largest values + (True) or the smallest values (False) + When the array contains unmasked values sorting at the same extremes of the + datatype, the ordering of these values and the masked values is + undefined. + fill_value : scalar or None, optional + Value used internally for the masked values. + If ``fill_value`` is not None, it supersedes ``endwith``. + stable : bool, optional + Only for compatibility with ``np.sort``. Ignored. + + Returns + ------- + sorted_array : ndarray + Array of the same type and shape as `a`. + + See Also + -------- + numpy.ndarray.sort : Method to sort an array in-place. + argsort : Indirect sort. + lexsort : Indirect stable sort on multiple keys. + searchsorted : Find elements in a sorted array. + + Notes + ----- + See ``sort`` for notes on the different sorting algorithms. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) + >>> # Default + >>> a.sort() + >>> a + masked_array(data=[1, 3, 5, --, --], + mask=[False, False, False, True, True], + fill_value=999999) + + >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) + >>> # Put missing values in the front + >>> a.sort(endwith=False) + >>> a + masked_array(data=[--, --, 1, 3, 5], + mask=[ True, True, False, False, False], + fill_value=999999) + + >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) + >>> # fill_value takes over endwith + >>> a.sort(endwith=False, fill_value=3) + >>> a + masked_array(data=[1, --, --, 3, 5], + mask=[False, True, True, False, False], + fill_value=999999) + + """ + if stable: + raise ValueError( + "`stable` parameter is not supported for masked arrays." + ) + + if self._mask is nomask: + ndarray.sort(self, axis=axis, kind=kind, order=order) + return + + if self is masked: + return + + sidx = self.argsort(axis=axis, kind=kind, order=order, + fill_value=fill_value, endwith=endwith) + + self[...] = np.take_along_axis(self, sidx, axis=axis) + + def min(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + """ + Return the minimum along a given axis. + + Parameters + ---------- + axis : None or int or tuple of ints, optional + Axis along which to operate. By default, ``axis`` is None and the + flattened input is used. + If this is a tuple of ints, the minimum is selected over multiple + axes, instead of a single axis or all the axes as before. + out : array_like, optional + Alternative output array in which to place the result. Must be of + the same shape and buffer length as the expected output. + fill_value : scalar or None, optional + Value used to fill in the masked values. + If None, use the output of `minimum_fill_value`. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + Returns + ------- + amin : array_like + New array holding the result. + If ``out`` was specified, ``out`` is returned. + + See Also + -------- + ma.minimum_fill_value + Returns the minimum filling value for a given datatype. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [[1., -2., 3.], [0.2, -0.7, 0.1]] + >>> mask = [[1, 1, 0], [0, 0, 1]] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array( + data=[[--, --, 3.0], + [0.2, -0.7, --]], + mask=[[ True, True, False], + [False, False, True]], + fill_value=1e+20) + >>> ma.min(masked_x) + -0.7 + >>> ma.min(masked_x, axis=-1) + masked_array(data=[3.0, -0.7], + mask=[False, False], + fill_value=1e+20) + >>> ma.min(masked_x, axis=0, keepdims=True) + masked_array(data=[[0.2, -0.7, 3.0]], + mask=[[False, False, False]], + fill_value=1e+20) + >>> mask = [[1, 1, 1,], [1, 1, 1]] + >>> masked_x = ma.masked_array(x, mask) + >>> ma.min(masked_x, axis=0) + masked_array(data=[--, --, --], + mask=[ True, True, True], + fill_value=1e+20, + dtype=float64) + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + _mask = self._mask + newmask = _check_mask_axis(_mask, axis, **kwargs) + if fill_value is None: + fill_value = minimum_fill_value(self) + # No explicit output + if out is None: + result = self.filled(fill_value).min( + axis=axis, out=out, **kwargs).view(type(self)) + if result.ndim: + # Set the mask + result.__setmask__(newmask) + # Get rid of Infs + if newmask.ndim: + np.copyto(result, result.fill_value, where=newmask) + elif newmask: + result = masked + return result + # Explicit output + self.filled(fill_value).min(axis=axis, out=out, **kwargs) + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + else: + if out.dtype.kind in 'biu': + errmsg = "Masked data information would be lost in one or more"\ + " location." + raise MaskError(errmsg) + np.copyto(out, np.nan, where=newmask) + return out + + def max(self, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + """ + Return the maximum along a given axis. + + Parameters + ---------- + axis : None or int or tuple of ints, optional + Axis along which to operate. By default, ``axis`` is None and the + flattened input is used. + If this is a tuple of ints, the maximum is selected over multiple + axes, instead of a single axis or all the axes as before. + out : array_like, optional + Alternative output array in which to place the result. Must + be of the same shape and buffer length as the expected output. + fill_value : scalar or None, optional + Value used to fill in the masked values. + If None, use the output of maximum_fill_value(). + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + Returns + ------- + amax : array_like + New array holding the result. + If ``out`` was specified, ``out`` is returned. + + See Also + -------- + ma.maximum_fill_value + Returns the maximum filling value for a given datatype. + + Examples + -------- + >>> import numpy.ma as ma + >>> x = [[-1., 2.5], [4., -2.], [3., 0.]] + >>> mask = [[0, 0], [1, 0], [1, 0]] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array( + data=[[-1.0, 2.5], + [--, -2.0], + [--, 0.0]], + mask=[[False, False], + [ True, False], + [ True, False]], + fill_value=1e+20) + >>> ma.max(masked_x) + 2.5 + >>> ma.max(masked_x, axis=0) + masked_array(data=[-1.0, 2.5], + mask=[False, False], + fill_value=1e+20) + >>> ma.max(masked_x, axis=1, keepdims=True) + masked_array( + data=[[2.5], + [-2.0], + [0.0]], + mask=[[False], + [False], + [False]], + fill_value=1e+20) + >>> mask = [[1, 1], [1, 1], [1, 1]] + >>> masked_x = ma.masked_array(x, mask) + >>> ma.max(masked_x, axis=1) + masked_array(data=[--, --, --], + mask=[ True, True, True], + fill_value=1e+20, + dtype=float64) + """ + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + _mask = self._mask + newmask = _check_mask_axis(_mask, axis, **kwargs) + if fill_value is None: + fill_value = maximum_fill_value(self) + # No explicit output + if out is None: + result = self.filled(fill_value).max( + axis=axis, out=out, **kwargs).view(type(self)) + if result.ndim: + # Set the mask + result.__setmask__(newmask) + # Get rid of Infs + if newmask.ndim: + np.copyto(result, result.fill_value, where=newmask) + elif newmask: + result = masked + return result + # Explicit output + self.filled(fill_value).max(axis=axis, out=out, **kwargs) + if isinstance(out, MaskedArray): + outmask = getmask(out) + if outmask is nomask: + outmask = out._mask = make_mask_none(out.shape) + outmask.flat = newmask + else: + + if out.dtype.kind in 'biu': + errmsg = "Masked data information would be lost in one or more"\ + " location." + raise MaskError(errmsg) + np.copyto(out, np.nan, where=newmask) + return out + + def ptp(self, axis=None, out=None, fill_value=None, keepdims=False): + """ + Return (maximum - minimum) along the given dimension + (i.e. peak-to-peak value). + + .. warning:: + `ptp` preserves the data type of the array. This means the + return value for an input of signed integers with n bits + (e.g. `np.int8`, `np.int16`, etc) is also a signed integer + with n bits. In that case, peak-to-peak values greater than + ``2**(n-1)-1`` will be returned as negative values. An example + with a work-around is shown below. + + Parameters + ---------- + axis : {None, int}, optional + Axis along which to find the peaks. If None (default) the + flattened array is used. + out : {None, array_like}, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. + fill_value : scalar or None, optional + Value used to fill in the masked values. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the array. + + Returns + ------- + ptp : ndarray. + A new array holding the result, unless ``out`` was + specified, in which case a reference to ``out`` is returned. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.MaskedArray([[4, 9, 2, 10], + ... [6, 9, 7, 12]]) + + >>> x.ptp(axis=1) + masked_array(data=[8, 6], + mask=False, + fill_value=999999) + + >>> x.ptp(axis=0) + masked_array(data=[2, 0, 5, 2], + mask=False, + fill_value=999999) + + >>> x.ptp() + 10 + + This example shows that a negative value can be returned when + the input is an array of signed integers. + + >>> y = np.ma.MaskedArray([[1, 127], + ... [0, 127], + ... [-1, 127], + ... [-2, 127]], dtype=np.int8) + >>> y.ptp(axis=1) + masked_array(data=[ 126, 127, -128, -127], + mask=False, + fill_value=np.int64(999999), + dtype=int8) + + A work-around is to use the `view()` method to view the result as + unsigned integers with the same bit width: + + >>> y.ptp(axis=1).view(np.uint8) + masked_array(data=[126, 127, 128, 129], + mask=False, + fill_value=np.uint64(999999), + dtype=uint8) + """ + if out is None: + result = self.max(axis=axis, fill_value=fill_value, + keepdims=keepdims) + result -= self.min(axis=axis, fill_value=fill_value, + keepdims=keepdims) + return result + out.flat = self.max(axis=axis, out=out, fill_value=fill_value, + keepdims=keepdims) + min_value = self.min(axis=axis, fill_value=fill_value, + keepdims=keepdims) + np.subtract(out, min_value, out=out, casting='unsafe') + return out + + def partition(self, *args, **kwargs): + warnings.warn("Warning: 'partition' will ignore the 'mask' " + f"of the {self.__class__.__name__}.", + stacklevel=2) + return super().partition(*args, **kwargs) + + def argpartition(self, *args, **kwargs): + warnings.warn("Warning: 'argpartition' will ignore the 'mask' " + f"of the {self.__class__.__name__}.", + stacklevel=2) + return super().argpartition(*args, **kwargs) + + def take(self, indices, axis=None, out=None, mode='raise'): + """ + Take elements from a masked array along an axis. + + This function does the same thing as "fancy" indexing (indexing arrays + using arrays) for masked arrays. It can be easier to use if you need + elements along a given axis. + + Parameters + ---------- + a : masked_array + The source masked array. + indices : array_like + The indices of the values to extract. Also allow scalars for indices. + axis : int, optional + The axis over which to select values. By default, the flattened + input array is used. + out : MaskedArray, optional + If provided, the result will be placed in this array. It should + be of the appropriate shape and dtype. Note that `out` is always + buffered if `mode='raise'`; use other modes for better performance. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + + * 'raise' -- raise an error (default) + * 'wrap' -- wrap around + * 'clip' -- clip to the range + + 'clip' mode means that all indices that are too large are replaced + by the index that addresses the last element along that axis. Note + that this disables indexing with negative numbers. + + Returns + ------- + out : MaskedArray + The returned array has the same type as `a`. + + See Also + -------- + numpy.take : Equivalent function for ndarrays. + compress : Take elements using a boolean mask. + take_along_axis : Take elements by matching the array and the index arrays. + + Notes + ----- + This function behaves similarly to `numpy.take`, but it handles masked + values. The mask is retained in the output array, and masked values + in the input array remain masked in the output. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([4, 3, 5, 7, 6, 8], mask=[0, 0, 1, 0, 1, 0]) + >>> indices = [0, 1, 4] + >>> np.ma.take(a, indices) + masked_array(data=[4, 3, --], + mask=[False, False, True], + fill_value=999999) + + When `indices` is not one-dimensional, the output also has these dimensions: + + >>> np.ma.take(a, [[0, 1], [2, 3]]) + masked_array(data=[[4, 3], + [--, 7]], + mask=[[False, False], + [ True, False]], + fill_value=999999) + """ + (_data, _mask) = (self._data, self._mask) + cls = type(self) + # Make sure the indices are not masked + maskindices = getmask(indices) + if maskindices is not nomask: + indices = indices.filled(0) + # Get the data, promoting scalars to 0d arrays with [...] so that + # .view works correctly + if out is None: + out = _data.take(indices, axis=axis, mode=mode)[...].view(cls) + else: + np.take(_data, indices, axis=axis, mode=mode, out=out) + # Get the mask + if isinstance(out, MaskedArray): + if _mask is nomask: + outmask = maskindices + else: + outmask = _mask.take(indices, axis=axis, mode=mode) + outmask |= maskindices + out.__setmask__(outmask) + # demote 0d arrays back to scalars, for consistency with ndarray.take + return out[()] + + # Array methods + copy = _arraymethod('copy') + diagonal = _arraymethod('diagonal') + flatten = _arraymethod('flatten') + repeat = _arraymethod('repeat') + squeeze = _arraymethod('squeeze') + swapaxes = _arraymethod('swapaxes') + T = property(fget=lambda self: self.transpose()) + transpose = _arraymethod('transpose') + + @property + def mT(self): + """ + Return the matrix-transpose of the masked array. + + The matrix transpose is the transpose of the last two dimensions, even + if the array is of higher dimension. + + .. versionadded:: 2.0 + + Returns + ------- + result: MaskedArray + The masked array with the last two dimensions transposed + + Raises + ------ + ValueError + If the array is of dimension less than 2. + + See Also + -------- + ndarray.mT: + Equivalent method for arrays + """ + + if self.ndim < 2: + raise ValueError("matrix transpose with ndim < 2 is undefined") + + if self._mask is nomask: + return masked_array(data=self._data.mT) + else: + return masked_array(data=self.data.mT, mask=self.mask.mT) + + def tolist(self, fill_value=None): + """ + Return the data portion of the masked array as a hierarchical Python list. + + Data items are converted to the nearest compatible Python type. + Masked values are converted to `fill_value`. If `fill_value` is None, + the corresponding entries in the output list will be ``None``. + + Parameters + ---------- + fill_value : scalar, optional + The value to use for invalid entries. Default is None. + + Returns + ------- + result : list + The Python list representation of the masked array. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4) + >>> x.tolist() + [[1, None, 3], [None, 5, None], [7, None, 9]] + >>> x.tolist(-999) + [[1, -999, 3], [-999, 5, -999], [7, -999, 9]] + + """ + _mask = self._mask + # No mask ? Just return .data.tolist ? + if _mask is nomask: + return self._data.tolist() + # Explicit fill_value: fill the array and get the list + if fill_value is not None: + return self.filled(fill_value).tolist() + # Structured array. + names = self.dtype.names + if names: + result = self._data.astype([(_, object) for _ in names]) + for n in names: + result[n][_mask[n]] = None + return result.tolist() + # Standard arrays. + if _mask is nomask: + return [None] + # Set temps to save time when dealing w/ marrays. + inishape = self.shape + result = np.array(self._data.ravel(), dtype=object) + result[_mask.ravel()] = None + result.shape = inishape + return result.tolist() + + def tobytes(self, fill_value=None, order='C'): + """ + Return the array data as a string containing the raw bytes in the array. + + The array is filled with a fill value before the string conversion. + + Parameters + ---------- + fill_value : scalar, optional + Value used to fill in the masked values. Default is None, in which + case `MaskedArray.fill_value` is used. + order : {'C','F','A'}, optional + Order of the data item in the copy. Default is 'C'. + + - 'C' -- C order (row major). + - 'F' -- Fortran order (column major). + - 'A' -- Any, current order of array. + - None -- Same as 'A'. + + See Also + -------- + numpy.ndarray.tobytes + tolist, tofile + + Notes + ----- + As for `ndarray.tobytes`, information about the shape, dtype, etc., + but also about `fill_value`, will be lost. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) + >>> x.tobytes() + b'\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00' + + """ + return self.filled(fill_value).tobytes(order=order) + + def tofile(self, fid, sep="", format="%s"): + """ + Save a masked array to a file in binary format. + + .. warning:: + This function is not implemented yet. + + Raises + ------ + NotImplementedError + When `tofile` is called. + + """ + raise NotImplementedError("MaskedArray.tofile() not implemented yet.") + + def toflex(self): + """ + Transforms a masked array into a flexible-type array. + + The flexible type array that is returned will have two fields: + + * the ``_data`` field stores the ``_data`` part of the array. + * the ``_mask`` field stores the ``_mask`` part of the array. + + Parameters + ---------- + None + + Returns + ------- + record : ndarray + A new flexible-type `ndarray` with two fields: the first element + containing a value, the second element containing the corresponding + mask boolean. The returned record shape matches self.shape. + + Notes + ----- + A side-effect of transforming a masked array into a flexible `ndarray` is + that meta information (``fill_value``, ...) will be lost. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.toflex() + array([[(1, False), (2, True), (3, False)], + [(4, True), (5, False), (6, True)], + [(7, False), (8, True), (9, False)]], + dtype=[('_data', 'i2", (2,))]) + # x = A[0]; y = x["A"]; then y.mask["A"].size==2 + # and we can not say masked/unmasked. + # The result is no longer mvoid! + # See also issue #6724. + return masked_array( + data=self._data[indx], mask=m[indx], + fill_value=self._fill_value[indx], + hard_mask=self._hardmask) + if m is not nomask and m[indx]: + return masked + return self._data[indx] + + def __setitem__(self, indx, value): + self._data[indx] = value + if self._hardmask: + self._mask[indx] |= getattr(value, "_mask", False) + else: + self._mask[indx] = getattr(value, "_mask", False) + + def __str__(self): + m = self._mask + if m is nomask: + return str(self._data) + + rdtype = _replace_dtype_fields(self._data.dtype, "O") + data_arr = super()._data + res = data_arr.astype(rdtype) + _recursive_printoption(res, self._mask, masked_print_option) + return str(res) + + __repr__ = __str__ + + def __iter__(self): + "Defines an iterator for mvoid" + (_data, _mask) = (self._data, self._mask) + if _mask is nomask: + yield from _data + else: + for (d, m) in zip(_data, _mask): + if m: + yield masked + else: + yield d + + def __len__(self): + return self._data.__len__() + + def filled(self, fill_value=None): + """ + Return a copy with masked fields filled with a given value. + + Parameters + ---------- + fill_value : array_like, optional + The value to use for invalid entries. Can be scalar or + non-scalar. If latter is the case, the filled array should + be broadcastable over input array. Default is None, in + which case the `fill_value` attribute is used instead. + + Returns + ------- + filled_void + A `np.void` object + + See Also + -------- + MaskedArray.filled + + """ + return asarray(self).filled(fill_value)[()] + + def tolist(self): + """ + Transforms the mvoid object into a tuple. + + Masked fields are replaced by None. + + Returns + ------- + returned_tuple + Tuple of fields + """ + _mask = self._mask + if _mask is nomask: + return self._data.tolist() + result = [] + for (d, m) in zip(self._data, self._mask): + if m: + result.append(None) + else: + # .item() makes sure we return a standard Python object + result.append(d.item()) + return tuple(result) + + +############################################################################## +# Shortcuts # +############################################################################## + + +def isMaskedArray(x): + """ + Test whether input is an instance of MaskedArray. + + This function returns True if `x` is an instance of MaskedArray + and returns False otherwise. Any object is accepted as input. + + Parameters + ---------- + x : object + Object to test. + + Returns + ------- + result : bool + True if `x` is a MaskedArray. + + See Also + -------- + isMA : Alias to isMaskedArray. + isarray : Alias to isMaskedArray. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = np.eye(3, 3) + >>> a + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + >>> m = ma.masked_values(a, 0) + >>> m + masked_array( + data=[[1.0, --, --], + [--, 1.0, --], + [--, --, 1.0]], + mask=[[False, True, True], + [ True, False, True], + [ True, True, False]], + fill_value=0.0) + >>> ma.isMaskedArray(a) + False + >>> ma.isMaskedArray(m) + True + >>> ma.isMaskedArray([0, 1, 2]) + False + + """ + return isinstance(x, MaskedArray) + + +isarray = isMaskedArray +isMA = isMaskedArray # backward compatibility + + +class MaskedConstant(MaskedArray): + # the lone np.ma.masked instance + __singleton = None + + @classmethod + def __has_singleton(cls): + # second case ensures `cls.__singleton` is not just a view on the + # superclass singleton + return cls.__singleton is not None and type(cls.__singleton) is cls + + def __new__(cls): + if not cls.__has_singleton(): + # We define the masked singleton as a float for higher precedence. + # Note that it can be tricky sometimes w/ type comparison + data = np.array(0.) + mask = np.array(True) + + # prevent any modifications + data.flags.writeable = False + mask.flags.writeable = False + + # don't fall back on MaskedArray.__new__(MaskedConstant), since + # that might confuse it - this way, the construction is entirely + # within our control + cls.__singleton = MaskedArray(data, mask=mask).view(cls) + + return cls.__singleton + + def __array_finalize__(self, obj): + if not self.__has_singleton(): + # this handles the `.view` in __new__, which we want to copy across + # properties normally + return super().__array_finalize__(obj) + elif self is self.__singleton: + # not clear how this can happen, play it safe + pass + else: + # everywhere else, we want to downcast to MaskedArray, to prevent a + # duplicate maskedconstant. + self.__class__ = MaskedArray + MaskedArray.__array_finalize__(self, obj) + + def __array_wrap__(self, obj, context=None, return_scalar=False): + return self.view(MaskedArray).__array_wrap__(obj, context) + + def __str__(self): + return str(masked_print_option._display) + + def __repr__(self): + if self is MaskedConstant.__singleton: + return 'masked' + else: + # it's a subclass, or something is wrong, make it obvious + return object.__repr__(self) + + def __format__(self, format_spec): + # Replace ndarray.__format__ with the default, which supports no + # format characters. + # Supporting format characters is unwise here, because we do not know + # what type the user was expecting - better to not guess. + try: + return object.__format__(self, format_spec) + except TypeError: + # 2020-03-23, NumPy 1.19.0 + warnings.warn( + "Format strings passed to MaskedConstant are ignored," + " but in future may error or produce different behavior", + FutureWarning, stacklevel=2 + ) + return object.__format__(self, "") + + def __reduce__(self): + """Override of MaskedArray's __reduce__. + """ + return (self.__class__, ()) + + # inplace operations have no effect. We have to override them to avoid + # trying to modify the readonly data and mask arrays + def __iop__(self, other): + return self + __iadd__ = \ + __isub__ = \ + __imul__ = \ + __ifloordiv__ = \ + __itruediv__ = \ + __ipow__ = \ + __iop__ + del __iop__ # don't leave this around + + def copy(self, *args, **kwargs): + """ Copy is a no-op on the maskedconstant, as it is a scalar """ + # maskedconstant is a scalar, so copy doesn't need to copy. There's + # precedent for this with `np.bool` scalars. + return self + + def __copy__(self): + return self + + def __deepcopy__(self, memo): + return self + + def __setattr__(self, attr, value): + if not self.__has_singleton(): + # allow the singleton to be initialized + return super().__setattr__(attr, value) + elif self is self.__singleton: + raise AttributeError( + f"attributes of {self!r} are not writeable") + else: + # duplicate instance - we can end up here from __array_finalize__, + # where we set the __class__ attribute + return super().__setattr__(attr, value) + + +masked = masked_singleton = MaskedConstant() +masked_array = MaskedArray + + +def array(data, dtype=None, copy=False, order=None, + mask=nomask, fill_value=None, keep_mask=True, + hard_mask=False, shrink=True, subok=True, ndmin=0): + """ + Shortcut to MaskedArray. + + The options are in a different order for convenience and backwards + compatibility. + + """ + return MaskedArray(data, mask=mask, dtype=dtype, copy=copy, + subok=subok, keep_mask=keep_mask, + hard_mask=hard_mask, fill_value=fill_value, + ndmin=ndmin, shrink=shrink, order=order) + + +array.__doc__ = masked_array.__doc__ + + +def is_masked(x): + """ + Determine whether input has masked values. + + Accepts any object as input, but always returns False unless the + input is a MaskedArray containing masked values. + + Parameters + ---------- + x : array_like + Array to check for masked values. + + Returns + ------- + result : bool + True if `x` is a MaskedArray with masked values, False otherwise. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = ma.masked_equal([0, 1, 0, 2, 3], 0) + >>> x + masked_array(data=[--, 1, --, 2, 3], + mask=[ True, False, True, False, False], + fill_value=0) + >>> ma.is_masked(x) + True + >>> x = ma.masked_equal([0, 1, 0, 2, 3], 42) + >>> x + masked_array(data=[0, 1, 0, 2, 3], + mask=False, + fill_value=42) + >>> ma.is_masked(x) + False + + Always returns False if `x` isn't a MaskedArray. + + >>> x = [False, True, False] + >>> ma.is_masked(x) + False + >>> x = 'a string' + >>> ma.is_masked(x) + False + + """ + m = getmask(x) + if m is nomask: + return False + elif m.any(): + return True + return False + + +############################################################################## +# Extrema functions # +############################################################################## + + +class _extrema_operation(_MaskedUFunc): + """ + Generic class for maximum/minimum functions. + + .. note:: + This is the base class for `_maximum_operation` and + `_minimum_operation`. + + """ + def __init__(self, ufunc, compare, fill_value): + super().__init__(ufunc) + self.compare = compare + self.fill_value_func = fill_value + + def __call__(self, a, b): + "Executes the call behavior." + + return where(self.compare(a, b), a, b) + + def reduce(self, target, axis=np._NoValue): + "Reduce target along the given axis." + target = narray(target, copy=None, subok=True) + m = getmask(target) + + if axis is np._NoValue and target.ndim > 1: + name = self.__name__ + # 2017-05-06, Numpy 1.13.0: warn on axis default + warnings.warn( + f"In the future the default for ma.{name}.reduce will be axis=0, " + f"not the current None, to match np.{name}.reduce. " + "Explicitly pass 0 or None to silence this warning.", + MaskedArrayFutureWarning, stacklevel=2) + axis = None + + if axis is not np._NoValue: + kwargs = {'axis': axis} + else: + kwargs = {} + + if m is nomask: + t = self.f.reduce(target, **kwargs) + else: + target = target.filled( + self.fill_value_func(target)).view(type(target)) + t = self.f.reduce(target, **kwargs) + m = umath.logical_and.reduce(m, **kwargs) + if hasattr(t, '_mask'): + t._mask = m + elif m: + t = masked + return t + + def outer(self, a, b): + "Return the function applied to the outer product of a and b." + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + m = nomask + else: + ma = getmaskarray(a) + mb = getmaskarray(b) + m = logical_or.outer(ma, mb) + result = self.f.outer(filled(a), filled(b)) + if not isinstance(result, MaskedArray): + result = result.view(MaskedArray) + result._mask = m + return result + +def min(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + try: + return obj.min(axis=axis, fill_value=fill_value, out=out, **kwargs) + except (AttributeError, TypeError): + # If obj doesn't have a min method, or if the method doesn't accept a + # fill_value argument + return asanyarray(obj).min(axis=axis, fill_value=fill_value, + out=out, **kwargs) + + +min.__doc__ = MaskedArray.min.__doc__ + +def max(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + + try: + return obj.max(axis=axis, fill_value=fill_value, out=out, **kwargs) + except (AttributeError, TypeError): + # If obj doesn't have a max method, or if the method doesn't accept a + # fill_value argument + return asanyarray(obj).max(axis=axis, fill_value=fill_value, + out=out, **kwargs) + + +max.__doc__ = MaskedArray.max.__doc__ + + +def ptp(obj, axis=None, out=None, fill_value=None, keepdims=np._NoValue): + kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} + try: + return obj.ptp(axis, out=out, fill_value=fill_value, **kwargs) + except (AttributeError, TypeError): + # If obj doesn't have a ptp method or if the method doesn't accept + # a fill_value argument + return asanyarray(obj).ptp(axis=axis, fill_value=fill_value, + out=out, **kwargs) + + +ptp.__doc__ = MaskedArray.ptp.__doc__ + + +############################################################################## +# Definition of functions from the corresponding methods # +############################################################################## + + +class _frommethod: + """ + Define functions from existing MaskedArray methods. + + Parameters + ---------- + methodname : str + Name of the method to transform. + + """ + + def __init__(self, methodname, reversed=False): + self.__name__ = methodname + self.__qualname__ = methodname + self.__doc__ = self.getdoc() + self.reversed = reversed + + def getdoc(self): + "Return the doc of the function (from the doc of the method)." + meth = getattr(MaskedArray, self.__name__, None) or\ + getattr(np, self.__name__, None) + signature = self.__name__ + get_object_signature(meth) + if meth is not None: + doc = f""" {signature} +{getattr(meth, '__doc__', None)}""" + return doc + + def __call__(self, a, *args, **params): + if self.reversed: + args = list(args) + a, args[0] = args[0], a + + marr = asanyarray(a) + method_name = self.__name__ + method = getattr(type(marr), method_name, None) + if method is None: + # use the corresponding np function + method = getattr(np, method_name) + + return method(marr, *args, **params) + + +all = _frommethod('all') +anomalies = anom = _frommethod('anom') +any = _frommethod('any') +compress = _frommethod('compress', reversed=True) +cumprod = _frommethod('cumprod') +cumsum = _frommethod('cumsum') +copy = _frommethod('copy') +diagonal = _frommethod('diagonal') +harden_mask = _frommethod('harden_mask') +ids = _frommethod('ids') +maximum = _extrema_operation(umath.maximum, greater, maximum_fill_value) +mean = _frommethod('mean') +minimum = _extrema_operation(umath.minimum, less, minimum_fill_value) +nonzero = _frommethod('nonzero') +prod = _frommethod('prod') +product = _frommethod('product') +ravel = _frommethod('ravel') +repeat = _frommethod('repeat') +shrink_mask = _frommethod('shrink_mask') +soften_mask = _frommethod('soften_mask') +std = _frommethod('std') +sum = _frommethod('sum') +swapaxes = _frommethod('swapaxes') +#take = _frommethod('take') +trace = _frommethod('trace') +var = _frommethod('var') + +count = _frommethod('count') + +def take(a, indices, axis=None, out=None, mode='raise'): + """ + + """ + a = masked_array(a) + return a.take(indices, axis=axis, out=out, mode=mode) + + +def power(a, b, third=None): + """ + Returns element-wise base array raised to power from second array. + + This is the masked array version of `numpy.power`. For details see + `numpy.power`. + + See Also + -------- + numpy.power + + Notes + ----- + The *out* argument to `numpy.power` is not supported, `third` has to be + None. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = [11.2, -3.973, 0.801, -1.41] + >>> mask = [0, 0, 0, 1] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array(data=[11.2, -3.973, 0.801, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.power(masked_x, 2) + masked_array(data=[125.43999999999998, 15.784728999999999, + 0.6416010000000001, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> y = [-0.5, 2, 0, 17] + >>> masked_y = ma.masked_array(y, mask) + >>> masked_y + masked_array(data=[-0.5, 2.0, 0.0, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.power(masked_x, masked_y) + masked_array(data=[0.2988071523335984, 15.784728999999999, 1.0, --], + mask=[False, False, False, True], + fill_value=1e+20) + + """ + if third is not None: + raise MaskError("3-argument power not supported.") + # Get the masks + ma = getmask(a) + mb = getmask(b) + m = mask_or(ma, mb) + # Get the rawdata + fa = getdata(a) + fb = getdata(b) + # Get the type of the result (so that we preserve subclasses) + if isinstance(a, MaskedArray): + basetype = type(a) + else: + basetype = MaskedArray + # Get the result and view it as a (subclass of) MaskedArray + with np.errstate(divide='ignore', invalid='ignore'): + result = np.where(m, fa, umath.power(fa, fb)).view(basetype) + result._update_from(a) + # Find where we're in trouble w/ NaNs and Infs + invalid = np.logical_not(np.isfinite(result.view(ndarray))) + # Add the initial mask + if m is not nomask: + if not result.ndim: + return masked + result._mask = np.logical_or(m, invalid) + # Fix the invalid parts + if invalid.any(): + if not result.ndim: + return masked + elif result._mask is nomask: + result._mask = invalid + result._data[invalid] = result.fill_value + return result + + +argmin = _frommethod('argmin') +argmax = _frommethod('argmax') + +def argsort(a, axis=np._NoValue, kind=None, order=None, endwith=True, + fill_value=None, *, stable=None): + "Function version of the eponymous method." + a = np.asanyarray(a) + + # 2017-04-11, Numpy 1.13.0, gh-8701: warn on axis default + if axis is np._NoValue: + axis = _deprecate_argsort_axis(a) + + if isinstance(a, MaskedArray): + return a.argsort(axis=axis, kind=kind, order=order, endwith=endwith, + fill_value=fill_value, stable=None) + else: + return a.argsort(axis=axis, kind=kind, order=order, stable=None) + + +argsort.__doc__ = MaskedArray.argsort.__doc__ + +def sort(a, axis=-1, kind=None, order=None, endwith=True, fill_value=None, *, + stable=None): + """ + Return a sorted copy of the masked array. + + Equivalent to creating a copy of the array + and applying the MaskedArray ``sort()`` method. + + Refer to ``MaskedArray.sort`` for the full documentation + + See Also + -------- + MaskedArray.sort : equivalent method + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = [11.2, -3.973, 0.801, -1.41] + >>> mask = [0, 0, 0, 1] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array(data=[11.2, -3.973, 0.801, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.sort(masked_x) + masked_array(data=[-3.973, 0.801, 11.2, --], + mask=[False, False, False, True], + fill_value=1e+20) + """ + a = np.array(a, copy=True, subok=True) + if axis is None: + a = a.flatten() + axis = 0 + + if isinstance(a, MaskedArray): + a.sort(axis=axis, kind=kind, order=order, endwith=endwith, + fill_value=fill_value, stable=stable) + else: + a.sort(axis=axis, kind=kind, order=order, stable=stable) + return a + + +def compressed(x): + """ + Return all the non-masked data as a 1-D array. + + This function is equivalent to calling the "compressed" method of a + `ma.MaskedArray`, see `ma.MaskedArray.compressed` for details. + + See Also + -------- + ma.MaskedArray.compressed : Equivalent method. + + Examples + -------- + >>> import numpy as np + + Create an array with negative values masked: + + >>> import numpy as np + >>> x = np.array([[1, -1, 0], [2, -1, 3], [7, 4, -1]]) + >>> masked_x = np.ma.masked_array(x, mask=x < 0) + >>> masked_x + masked_array( + data=[[1, --, 0], + [2, --, 3], + [7, 4, --]], + mask=[[False, True, False], + [False, True, False], + [False, False, True]], + fill_value=999999) + + Compress the masked array into a 1-D array of non-masked values: + + >>> np.ma.compressed(masked_x) + array([1, 0, 2, 3, 7, 4]) + + """ + return asanyarray(x).compressed() + + +def concatenate(arrays, axis=0): + """ + Concatenate a sequence of arrays along the given axis. + + Parameters + ---------- + arrays : sequence of array_like + The arrays must have the same shape, except in the dimension + corresponding to `axis` (the first, by default). + axis : int, optional + The axis along which the arrays will be joined. Default is 0. + + Returns + ------- + result : MaskedArray + The concatenated array with any masked entries preserved. + + See Also + -------- + numpy.concatenate : Equivalent function in the top-level NumPy module. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = ma.arange(3) + >>> a[1] = ma.masked + >>> b = ma.arange(2, 5) + >>> a + masked_array(data=[0, --, 2], + mask=[False, True, False], + fill_value=999999) + >>> b + masked_array(data=[2, 3, 4], + mask=False, + fill_value=999999) + >>> ma.concatenate([a, b]) + masked_array(data=[0, --, 2, 2, 3, 4], + mask=[False, True, False, False, False, False], + fill_value=999999) + + """ + d = np.concatenate([getdata(a) for a in arrays], axis) + rcls = get_masked_subclass(*arrays) + data = d.view(rcls) + # Check whether one of the arrays has a non-empty mask. + for x in arrays: + if getmask(x) is not nomask: + break + else: + return data + # OK, so we have to concatenate the masks + dm = np.concatenate([getmaskarray(a) for a in arrays], axis) + dm = dm.reshape(d.shape) + + # If we decide to keep a '_shrinkmask' option, we want to check that + # all of them are True, and then check for dm.any() + data._mask = _shrink_mask(dm) + return data + + +def diag(v, k=0): + """ + Extract a diagonal or construct a diagonal array. + + This function is the equivalent of `numpy.diag` that takes masked + values into account, see `numpy.diag` for details. + + See Also + -------- + numpy.diag : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + + Create an array with negative values masked: + + >>> import numpy as np + >>> x = np.array([[11.2, -3.973, 18], [0.801, -1.41, 12], [7, 33, -12]]) + >>> masked_x = np.ma.masked_array(x, mask=x < 0) + >>> masked_x + masked_array( + data=[[11.2, --, 18.0], + [0.801, --, 12.0], + [7.0, 33.0, --]], + mask=[[False, True, False], + [False, True, False], + [False, False, True]], + fill_value=1e+20) + + Isolate the main diagonal from the masked array: + + >>> np.ma.diag(masked_x) + masked_array(data=[11.2, --, --], + mask=[False, True, True], + fill_value=1e+20) + + Isolate the first diagonal below the main diagonal: + + >>> np.ma.diag(masked_x, -1) + masked_array(data=[0.801, 33.0], + mask=[False, False], + fill_value=1e+20) + + """ + output = np.diag(v, k).view(MaskedArray) + if getmask(v) is not nomask: + output._mask = np.diag(v._mask, k) + return output + + +def left_shift(a, n): + """ + Shift the bits of an integer to the left. + + This is the masked array version of `numpy.left_shift`, for details + see that function. + + See Also + -------- + numpy.left_shift + + Examples + -------- + Shift with a masked array: + + >>> arr = np.ma.array([10, 20, 30], mask=[False, True, False]) + >>> np.ma.left_shift(arr, 1) + masked_array(data=[20, --, 60], + mask=[False, True, False], + fill_value=999999) + + Large shift: + + >>> np.ma.left_shift(10, 10) + masked_array(data=10240, + mask=False, + fill_value=999999) + + Shift with a scalar and an array: + + >>> scalar = 10 + >>> arr = np.ma.array([1, 2, 3], mask=[False, True, False]) + >>> np.ma.left_shift(scalar, arr) + masked_array(data=[20, --, 80], + mask=[False, True, False], + fill_value=999999) + + + """ + m = getmask(a) + if m is nomask: + d = umath.left_shift(filled(a), n) + return masked_array(d) + else: + d = umath.left_shift(filled(a, 0), n) + return masked_array(d, mask=m) + + +def right_shift(a, n): + """ + Shift the bits of an integer to the right. + + This is the masked array version of `numpy.right_shift`, for details + see that function. + + See Also + -------- + numpy.right_shift + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = [11, 3, 8, 1] + >>> mask = [0, 0, 0, 1] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array(data=[11, 3, 8, --], + mask=[False, False, False, True], + fill_value=999999) + >>> ma.right_shift(masked_x,1) + masked_array(data=[5, 1, 4, --], + mask=[False, False, False, True], + fill_value=999999) + + """ + m = getmask(a) + if m is nomask: + d = umath.right_shift(filled(a), n) + return masked_array(d) + else: + d = umath.right_shift(filled(a, 0), n) + return masked_array(d, mask=m) + + +def put(a, indices, values, mode='raise'): + """ + Set storage-indexed locations to corresponding values. + + This function is equivalent to `MaskedArray.put`, see that method + for details. + + See Also + -------- + MaskedArray.put + + Examples + -------- + Putting values in a masked array: + + >>> a = np.ma.array([1, 2, 3, 4], mask=[False, True, False, False]) + >>> np.ma.put(a, [1, 3], [10, 30]) + >>> a + masked_array(data=[ 1, 10, 3, 30], + mask=False, + fill_value=999999) + + Using put with a 2D array: + + >>> b = np.ma.array([[1, 2], [3, 4]], mask=[[False, True], [False, False]]) + >>> np.ma.put(b, [[0, 1], [1, 0]], [[10, 20], [30, 40]]) + >>> b + masked_array( + data=[[40, 30], + [ 3, 4]], + mask=False, + fill_value=999999) + + """ + # We can't use 'frommethod', the order of arguments is different + try: + return a.put(indices, values, mode=mode) + except AttributeError: + return np.asarray(a).put(indices, values, mode=mode) + + +def putmask(a, mask, values): # , mode='raise'): + """ + Changes elements of an array based on conditional and input values. + + This is the masked array version of `numpy.putmask`, for details see + `numpy.putmask`. + + See Also + -------- + numpy.putmask + + Notes + ----- + Using a masked array as `values` will **not** transform a `ndarray` into + a `MaskedArray`. + + Examples + -------- + >>> import numpy as np + >>> arr = [[1, 2], [3, 4]] + >>> mask = [[1, 0], [0, 0]] + >>> x = np.ma.array(arr, mask=mask) + >>> np.ma.putmask(x, x < 4, 10*x) + >>> x + masked_array( + data=[[--, 20], + [30, 4]], + mask=[[ True, False], + [False, False]], + fill_value=999999) + >>> x.data + array([[10, 20], + [30, 4]]) + + """ + # We can't use 'frommethod', the order of arguments is different + if not isinstance(a, MaskedArray): + a = a.view(MaskedArray) + (valdata, valmask) = (getdata(values), getmask(values)) + if getmask(a) is nomask: + if valmask is not nomask: + a._sharedmask = True + a._mask = make_mask_none(a.shape, a.dtype) + np.copyto(a._mask, valmask, where=mask) + elif a._hardmask: + if valmask is not nomask: + m = a._mask.copy() + np.copyto(m, valmask, where=mask) + a.mask |= m + else: + if valmask is nomask: + valmask = getmaskarray(values) + np.copyto(a._mask, valmask, where=mask) + np.copyto(a._data, valdata, where=mask) + + +def transpose(a, axes=None): + """ + Permute the dimensions of an array. + + This function is exactly equivalent to `numpy.transpose`. + + See Also + -------- + numpy.transpose : Equivalent function in top-level NumPy module. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = ma.arange(4).reshape((2,2)) + >>> x[1, 1] = ma.masked + >>> x + masked_array( + data=[[0, 1], + [2, --]], + mask=[[False, False], + [False, True]], + fill_value=999999) + + >>> ma.transpose(x) + masked_array( + data=[[0, 2], + [1, --]], + mask=[[False, False], + [False, True]], + fill_value=999999) + """ + # We can't use 'frommethod', as 'transpose' doesn't take keywords + try: + return a.transpose(axes) + except AttributeError: + return np.asarray(a).transpose(axes).view(MaskedArray) + + +def reshape(a, new_shape, order='C'): + """ + Returns an array containing the same data with a new shape. + + Refer to `MaskedArray.reshape` for full documentation. + + See Also + -------- + MaskedArray.reshape : equivalent function + + Examples + -------- + Reshaping a 1-D array: + + >>> a = np.ma.array([1, 2, 3, 4]) + >>> np.ma.reshape(a, (2, 2)) + masked_array( + data=[[1, 2], + [3, 4]], + mask=False, + fill_value=999999) + + Reshaping a 2-D array: + + >>> b = np.ma.array([[1, 2], [3, 4]]) + >>> np.ma.reshape(b, (1, 4)) + masked_array(data=[[1, 2, 3, 4]], + mask=False, + fill_value=999999) + + Reshaping a 1-D array with a mask: + + >>> c = np.ma.array([1, 2, 3, 4], mask=[False, True, False, False]) + >>> np.ma.reshape(c, (2, 2)) + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=999999) + + """ + # We can't use 'frommethod', it whine about some parameters. Dmmit. + try: + return a.reshape(new_shape, order=order) + except AttributeError: + _tmp = np.asarray(a).reshape(new_shape, order=order) + return _tmp.view(MaskedArray) + + +def resize(x, new_shape): + """ + Return a new masked array with the specified size and shape. + + This is the masked equivalent of the `numpy.resize` function. The new + array is filled with repeated copies of `x` (in the order that the + data are stored in memory). If `x` is masked, the new array will be + masked, and the new mask will be a repetition of the old one. + + See Also + -------- + numpy.resize : Equivalent function in the top level NumPy module. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = ma.array([[1, 2] ,[3, 4]]) + >>> a[0, 1] = ma.masked + >>> a + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=999999) + >>> np.resize(a, (3, 3)) + masked_array( + data=[[1, 2, 3], + [4, 1, 2], + [3, 4, 1]], + mask=False, + fill_value=999999) + >>> ma.resize(a, (3, 3)) + masked_array( + data=[[1, --, 3], + [4, 1, --], + [3, 4, 1]], + mask=[[False, True, False], + [False, False, True], + [False, False, False]], + fill_value=999999) + + A MaskedArray is always returned, regardless of the input type. + + >>> a = np.array([[1, 2] ,[3, 4]]) + >>> ma.resize(a, (3, 3)) + masked_array( + data=[[1, 2, 3], + [4, 1, 2], + [3, 4, 1]], + mask=False, + fill_value=999999) + + """ + # We can't use _frommethods here, as N.resize is notoriously whiny. + m = getmask(x) + if m is not nomask: + m = np.resize(m, new_shape) + result = np.resize(x, new_shape).view(get_masked_subclass(x)) + if result.ndim: + result._mask = m + return result + + +def ndim(obj): + """ + maskedarray version of the numpy function. + + """ + return np.ndim(getdata(obj)) + + +ndim.__doc__ = np.ndim.__doc__ + + +def shape(obj): + "maskedarray version of the numpy function." + return np.shape(getdata(obj)) + + +shape.__doc__ = np.shape.__doc__ + + +def size(obj, axis=None): + "maskedarray version of the numpy function." + return np.size(getdata(obj), axis) + + +size.__doc__ = np.size.__doc__ + + +def diff(a, /, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): + """ + Calculate the n-th discrete difference along the given axis. + The first difference is given by ``out[i] = a[i+1] - a[i]`` along + the given axis, higher differences are calculated by using `diff` + recursively. + Preserves the input mask. + + Parameters + ---------- + a : array_like + Input array + n : int, optional + The number of times values are differenced. If zero, the input + is returned as-is. + axis : int, optional + The axis along which the difference is taken, default is the + last axis. + prepend, append : array_like, optional + Values to prepend or append to `a` along axis prior to + performing the difference. Scalar values are expanded to + arrays with length 1 in the direction of axis and the shape + of the input array in along all other axes. Otherwise the + dimension and shape must match `a` except along axis. + + Returns + ------- + diff : MaskedArray + The n-th differences. The shape of the output is the same as `a` + except along `axis` where the dimension is smaller by `n`. The + type of the output is the same as the type of the difference + between any two elements of `a`. This is the same as the type of + `a` in most cases. A notable exception is `datetime64`, which + results in a `timedelta64` output array. + + See Also + -------- + numpy.diff : Equivalent function in the top-level NumPy module. + + Notes + ----- + Type is preserved for boolean arrays, so the result will contain + `False` when consecutive elements are the same and `True` when they + differ. + + For unsigned integer arrays, the results will also be unsigned. This + should not be surprising, as the result is consistent with + calculating the difference directly: + + >>> u8_arr = np.array([1, 0], dtype=np.uint8) + >>> np.ma.diff(u8_arr) + masked_array(data=[255], + mask=False, + fill_value=np.uint64(999999), + dtype=uint8) + >>> u8_arr[1,...] - u8_arr[0,...] + np.uint8(255) + + If this is not desirable, then the array should be cast to a larger + integer type first: + + >>> i16_arr = u8_arr.astype(np.int16) + >>> np.ma.diff(i16_arr) + masked_array(data=[-1], + mask=False, + fill_value=np.int64(999999), + dtype=int16) + + Examples + -------- + >>> import numpy as np + >>> a = np.array([1, 2, 3, 4, 7, 0, 2, 3]) + >>> x = np.ma.masked_where(a < 2, a) + >>> np.ma.diff(x) + masked_array(data=[--, 1, 1, 3, --, --, 1], + mask=[ True, False, False, False, True, True, False], + fill_value=999999) + + >>> np.ma.diff(x, n=2) + masked_array(data=[--, 0, 2, --, --, --], + mask=[ True, False, False, True, True, True], + fill_value=999999) + + >>> a = np.array([[1, 3, 1, 5, 10], [0, 1, 5, 6, 8]]) + >>> x = np.ma.masked_equal(a, value=1) + >>> np.ma.diff(x) + masked_array( + data=[[--, --, --, 5], + [--, --, 1, 2]], + mask=[[ True, True, True, False], + [ True, True, False, False]], + fill_value=1) + + >>> np.ma.diff(x, axis=0) + masked_array(data=[[--, --, --, 1, -2]], + mask=[[ True, True, True, False, False]], + fill_value=1) + + """ + if n == 0: + return a + if n < 0: + raise ValueError("order must be non-negative but got " + repr(n)) + + a = np.ma.asanyarray(a) + if a.ndim == 0: + raise ValueError( + "diff requires input that is at least one dimensional" + ) + + combined = [] + if prepend is not np._NoValue: + prepend = np.ma.asanyarray(prepend) + if prepend.ndim == 0: + shape = list(a.shape) + shape[axis] = 1 + prepend = np.broadcast_to(prepend, tuple(shape)) + combined.append(prepend) + + combined.append(a) + + if append is not np._NoValue: + append = np.ma.asanyarray(append) + if append.ndim == 0: + shape = list(a.shape) + shape[axis] = 1 + append = np.broadcast_to(append, tuple(shape)) + combined.append(append) + + if len(combined) > 1: + a = np.ma.concatenate(combined, axis) + + # GH 22465 np.diff without prepend/append preserves the mask + return np.diff(a, n, axis) + + +############################################################################## +# Extra functions # +############################################################################## + + +def where(condition, x=_NoValue, y=_NoValue): + """ + Return a masked array with elements from `x` or `y`, depending on condition. + + .. note:: + When only `condition` is provided, this function is identical to + `nonzero`. The rest of this documentation covers only the case where + all three arguments are provided. + + Parameters + ---------- + condition : array_like, bool + Where True, yield `x`, otherwise yield `y`. + x, y : array_like, optional + Values from which to choose. `x`, `y` and `condition` need to be + broadcastable to some shape. + + Returns + ------- + out : MaskedArray + An masked array with `masked` elements where the condition is masked, + elements from `x` where `condition` is True, and elements from `y` + elsewhere. + + See Also + -------- + numpy.where : Equivalent function in the top-level NumPy module. + nonzero : The function that is called when x and y are omitted + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0], + ... [1, 0, 1], + ... [0, 1, 0]]) + >>> x + masked_array( + data=[[0.0, --, 2.0], + [--, 4.0, --], + [6.0, --, 8.0]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=1e+20) + >>> np.ma.where(x > 5, x, -3.1416) + masked_array( + data=[[-3.1416, --, -3.1416], + [--, -3.1416, --], + [6.0, --, 8.0]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=1e+20) + + """ + + # handle the single-argument case + missing = (x is _NoValue, y is _NoValue).count(True) + if missing == 1: + raise ValueError("Must provide both 'x' and 'y' or neither.") + if missing == 2: + return nonzero(condition) + + # we only care if the condition is true - false or masked pick y + cf = filled(condition, False) + xd = getdata(x) + yd = getdata(y) + + # we need the full arrays here for correct final dimensions + cm = getmaskarray(condition) + xm = getmaskarray(x) + ym = getmaskarray(y) + + # deal with the fact that masked.dtype == float64, but we don't actually + # want to treat it as that. + if x is masked and y is not masked: + xd = np.zeros((), dtype=yd.dtype) + xm = np.ones((), dtype=ym.dtype) + elif y is masked and x is not masked: + yd = np.zeros((), dtype=xd.dtype) + ym = np.ones((), dtype=xm.dtype) + + data = np.where(cf, xd, yd) + mask = np.where(cf, xm, ym) + mask = np.where(cm, np.ones((), dtype=mask.dtype), mask) + + # collapse the mask, for backwards compatibility + mask = _shrink_mask(mask) + + return masked_array(data, mask=mask) + + +def choose(indices, choices, out=None, mode='raise'): + """ + Use an index array to construct a new array from a list of choices. + + Given an array of integers and a list of n choice arrays, this method + will create a new array that merges each of the choice arrays. Where a + value in `index` is i, the new array will have the value that choices[i] + contains in the same place. + + Parameters + ---------- + indices : ndarray of ints + This array must contain integers in ``[0, n-1]``, where n is the + number of choices. + choices : sequence of arrays + Choice arrays. The index array and all of the choices should be + broadcastable to the same shape. + out : array, optional + If provided, the result will be inserted into this array. It should + be of the appropriate shape and `dtype`. + mode : {'raise', 'wrap', 'clip'}, optional + Specifies how out-of-bounds indices will behave. + + * 'raise' : raise an error + * 'wrap' : wrap around + * 'clip' : clip to the range + + Returns + ------- + merged_array : array + + See Also + -------- + choose : equivalent function + + Examples + -------- + >>> import numpy as np + >>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]]) + >>> a = np.array([2, 1, 0]) + >>> np.ma.choose(a, choice) + masked_array(data=[3, 2, 1], + mask=False, + fill_value=999999) + + """ + def fmask(x): + "Returns the filled array, or True if masked." + if x is masked: + return True + return filled(x) + + def nmask(x): + "Returns the mask, True if ``masked``, False if ``nomask``." + if x is masked: + return True + return getmask(x) + # Get the indices. + c = filled(indices, 0) + # Get the masks. + masks = [nmask(x) for x in choices] + data = [fmask(x) for x in choices] + # Construct the mask + outputmask = np.choose(c, masks, mode=mode) + outputmask = make_mask(mask_or(outputmask, getmask(indices)), + copy=False, shrink=True) + # Get the choices. + d = np.choose(c, data, mode=mode, out=out).view(MaskedArray) + if out is not None: + if isinstance(out, MaskedArray): + out.__setmask__(outputmask) + return out + d.__setmask__(outputmask) + return d + + +def round_(a, decimals=0, out=None): + """ + Return a copy of a, rounded to 'decimals' places. + + When 'decimals' is negative, it specifies the number of positions + to the left of the decimal point. The real and imaginary parts of + complex numbers are rounded separately. Nothing is done if the + array is not of float type and 'decimals' is greater than or equal + to 0. + + Parameters + ---------- + decimals : int + Number of decimals to round to. May be negative. + out : array_like + Existing array to use for output. + If not given, returns a default copy of a. + + Notes + ----- + If out is given and does not have a mask attribute, the mask of a + is lost! + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> x = [11.2, -3.973, 0.801, -1.41] + >>> mask = [0, 0, 0, 1] + >>> masked_x = ma.masked_array(x, mask) + >>> masked_x + masked_array(data=[11.2, -3.973, 0.801, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.round_(masked_x) + masked_array(data=[11.0, -4.0, 1.0, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.round(masked_x, decimals=1) + masked_array(data=[11.2, -4.0, 0.8, --], + mask=[False, False, False, True], + fill_value=1e+20) + >>> ma.round_(masked_x, decimals=-1) + masked_array(data=[10.0, -0.0, 0.0, --], + mask=[False, False, False, True], + fill_value=1e+20) + """ + if out is None: + return np.round(a, decimals, out) + else: + np.round(getdata(a), decimals, out) + if hasattr(out, '_mask'): + out._mask = getmask(a) + return out + + +round = round_ + + +def _mask_propagate(a, axis): + """ + Mask whole 1-d vectors of an array that contain masked values. + """ + a = array(a, subok=False) + m = getmask(a) + if m is nomask or not m.any() or axis is None: + return a + a._mask = a._mask.copy() + axes = normalize_axis_tuple(axis, a.ndim) + for ax in axes: + a._mask |= m.any(axis=ax, keepdims=True) + return a + + +# Include masked dot here to avoid import problems in getting it from +# extras.py. Note that it is not included in __all__, but rather exported +# from extras in order to avoid backward compatibility problems. +def dot(a, b, strict=False, out=None): + """ + Return the dot product of two arrays. + + This function is the equivalent of `numpy.dot` that takes masked values + into account. Note that `strict` and `out` are in different position + than in the method version. In order to maintain compatibility with the + corresponding method, it is recommended that the optional arguments be + treated as keyword only. At some point that may be mandatory. + + Parameters + ---------- + a, b : masked_array_like + Inputs arrays. + strict : bool, optional + Whether masked data are propagated (True) or set to 0 (False) for + the computation. Default is False. Propagating the mask means that + if a masked value appears in a row or column, the whole row or + column is considered masked. + out : masked_array, optional + Output argument. This must have the exact kind that would be returned + if it was not used. In particular, it must have the right type, must be + C-contiguous, and its dtype must be the dtype that would be returned + for `dot(a,b)`. This is a performance feature. Therefore, if these + conditions are not met, an exception is raised, instead of attempting + to be flexible. + + See Also + -------- + numpy.dot : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]]) + >>> b = np.ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]]) + >>> np.ma.dot(a, b) + masked_array( + data=[[21, 26], + [45, 64]], + mask=[[False, False], + [False, False]], + fill_value=999999) + >>> np.ma.dot(a, b, strict=True) + masked_array( + data=[[--, --], + [--, 64]], + mask=[[ True, True], + [ True, False]], + fill_value=999999) + + """ + if strict is True: + if np.ndim(a) == 0 or np.ndim(b) == 0: + pass + elif b.ndim == 1: + a = _mask_propagate(a, a.ndim - 1) + b = _mask_propagate(b, b.ndim - 1) + else: + a = _mask_propagate(a, a.ndim - 1) + b = _mask_propagate(b, b.ndim - 2) + am = ~getmaskarray(a) + bm = ~getmaskarray(b) + + if out is None: + d = np.dot(filled(a, 0), filled(b, 0)) + m = ~np.dot(am, bm) + if np.ndim(d) == 0: + d = np.asarray(d) + r = d.view(get_masked_subclass(a, b)) + r.__setmask__(m) + return r + else: + d = np.dot(filled(a, 0), filled(b, 0), out._data) + if out.mask.shape != d.shape: + out._mask = np.empty(d.shape, MaskType) + np.dot(am, bm, out._mask) + np.logical_not(out._mask, out._mask) + return out + + +def inner(a, b): + """ + Returns the inner product of a and b for arrays of floating point types. + + Like the generic NumPy equivalent the product sum is over the last dimension + of a and b. The first argument is not conjugated. + + """ + fa = filled(a, 0) + fb = filled(b, 0) + if fa.ndim == 0: + fa.shape = (1,) + if fb.ndim == 0: + fb.shape = (1,) + return np.inner(fa, fb).view(MaskedArray) + + +inner.__doc__ = doc_note(np.inner.__doc__, + "Masked values are replaced by 0.") +innerproduct = inner + + +def outer(a, b): + "maskedarray version of the numpy function." + fa = filled(a, 0).ravel() + fb = filled(b, 0).ravel() + d = np.outer(fa, fb) + ma = getmask(a) + mb = getmask(b) + if ma is nomask and mb is nomask: + return masked_array(d) + ma = getmaskarray(a) + mb = getmaskarray(b) + m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=False) + return masked_array(d, mask=m) + + +outer.__doc__ = doc_note(np.outer.__doc__, + "Masked values are replaced by 0.") +outerproduct = outer + + +def _convolve_or_correlate(f, a, v, mode, propagate_mask): + """ + Helper function for ma.correlate and ma.convolve + """ + if propagate_mask: + # results which are contributed to by either item in any pair being invalid + mask = ( + f(getmaskarray(a), np.ones(np.shape(v), dtype=bool), mode=mode) + | f(np.ones(np.shape(a), dtype=bool), getmaskarray(v), mode=mode) + ) + data = f(getdata(a), getdata(v), mode=mode) + else: + # results which are not contributed to by any pair of valid elements + mask = ~f(~getmaskarray(a), ~getmaskarray(v), mode=mode) + data = f(filled(a, 0), filled(v, 0), mode=mode) + + return masked_array(data, mask=mask) + + +def correlate(a, v, mode='valid', propagate_mask=True): + """ + Cross-correlation of two 1-dimensional sequences. + + Parameters + ---------- + a, v : array_like + Input sequences. + mode : {'valid', 'same', 'full'}, optional + Refer to the `np.convolve` docstring. Note that the default + is 'valid', unlike `convolve`, which uses 'full'. + propagate_mask : bool + If True, then a result element is masked if any masked element contributes + towards it. If False, then a result element is only masked if no non-masked + element contribute towards it + + Returns + ------- + out : MaskedArray + Discrete cross-correlation of `a` and `v`. + + See Also + -------- + numpy.correlate : Equivalent function in the top-level NumPy module. + + Examples + -------- + Basic correlation: + + >>> a = np.ma.array([1, 2, 3]) + >>> v = np.ma.array([0, 1, 0]) + >>> np.ma.correlate(a, v, mode='valid') + masked_array(data=[2], + mask=[False], + fill_value=999999) + + Correlation with masked elements: + + >>> a = np.ma.array([1, 2, 3], mask=[False, True, False]) + >>> v = np.ma.array([0, 1, 0]) + >>> np.ma.correlate(a, v, mode='valid', propagate_mask=True) + masked_array(data=[--], + mask=[ True], + fill_value=999999, + dtype=int64) + + Correlation with different modes and mixed array types: + + >>> a = np.ma.array([1, 2, 3]) + >>> v = np.ma.array([0, 1, 0]) + >>> np.ma.correlate(a, v, mode='full') + masked_array(data=[0, 1, 2, 3, 0], + mask=[False, False, False, False, False], + fill_value=999999) + + """ + return _convolve_or_correlate(np.correlate, a, v, mode, propagate_mask) + + +def convolve(a, v, mode='full', propagate_mask=True): + """ + Returns the discrete, linear convolution of two one-dimensional sequences. + + Parameters + ---------- + a, v : array_like + Input sequences. + mode : {'valid', 'same', 'full'}, optional + Refer to the `np.convolve` docstring. + propagate_mask : bool + If True, then if any masked element is included in the sum for a result + element, then the result is masked. + If False, then the result element is only masked if no non-masked cells + contribute towards it + + Returns + ------- + out : MaskedArray + Discrete, linear convolution of `a` and `v`. + + See Also + -------- + numpy.convolve : Equivalent function in the top-level NumPy module. + """ + return _convolve_or_correlate(np.convolve, a, v, mode, propagate_mask) + + +def allequal(a, b, fill_value=True): + """ + Return True if all entries of a and b are equal, using + fill_value as a truth value where either or both are masked. + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + fill_value : bool, optional + Whether masked values in a or b are considered equal (True) or not + (False). + + Returns + ------- + y : bool + Returns True if the two arrays are equal within the given + tolerance, False otherwise. If either array contains NaN, + then False is returned. + + See Also + -------- + all, any + numpy.ma.allclose + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) + >>> a + masked_array(data=[10000000000.0, 1e-07, --], + mask=[False, False, True], + fill_value=1e+20) + + >>> b = np.array([1e10, 1e-7, -42.0]) + >>> b + array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01]) + >>> np.ma.allequal(a, b, fill_value=False) + False + >>> np.ma.allequal(a, b) + True + + """ + m = mask_or(getmask(a), getmask(b)) + if m is nomask: + x = getdata(a) + y = getdata(b) + d = umath.equal(x, y) + return d.all() + elif fill_value: + x = getdata(a) + y = getdata(b) + d = umath.equal(x, y) + dm = array(d, mask=m, copy=False) + return dm.filled(True).all(None) + else: + return False + + +def allclose(a, b, masked_equal=True, rtol=1e-5, atol=1e-8): + """ + Returns True if two arrays are element-wise equal within a tolerance. + + This function is equivalent to `allclose` except that masked values + are treated as equal (default) or unequal, depending on the `masked_equal` + argument. + + Parameters + ---------- + a, b : array_like + Input arrays to compare. + masked_equal : bool, optional + Whether masked values in `a` and `b` are considered equal (True) or not + (False). They are considered equal by default. + rtol : float, optional + Relative tolerance. The relative difference is equal to ``rtol * b``. + Default is 1e-5. + atol : float, optional + Absolute tolerance. The absolute difference is equal to `atol`. + Default is 1e-8. + + Returns + ------- + y : bool + Returns True if the two arrays are equal within the given + tolerance, False otherwise. If either array contains NaN, then + False is returned. + + See Also + -------- + all, any + numpy.allclose : the non-masked `allclose`. + + Notes + ----- + If the following equation is element-wise True, then `allclose` returns + True:: + + absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) + + Return True if all elements of `a` and `b` are equal subject to + given tolerances. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) + >>> a + masked_array(data=[10000000000.0, 1e-07, --], + mask=[False, False, True], + fill_value=1e+20) + >>> b = np.ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1]) + >>> np.ma.allclose(a, b) + False + + >>> a = np.ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) + >>> b = np.ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1]) + >>> np.ma.allclose(a, b) + True + >>> np.ma.allclose(a, b, masked_equal=False) + False + + Masked values are not compared directly. + + >>> a = np.ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) + >>> b = np.ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1]) + >>> np.ma.allclose(a, b) + True + >>> np.ma.allclose(a, b, masked_equal=False) + False + + """ + x = masked_array(a, copy=False) + y = masked_array(b, copy=False) + + # make sure y is an inexact type to avoid abs(MIN_INT); will cause + # casting of x later. + # NOTE: We explicitly allow timedelta, which used to work. This could + # possibly be deprecated. See also gh-18286. + # timedelta works if `atol` is an integer or also a timedelta. + # Although, the default tolerances are unlikely to be useful + if y.dtype.kind != "m": + dtype = np.result_type(y, 1.) + if y.dtype != dtype: + y = masked_array(y, dtype=dtype, copy=False) + + m = mask_or(getmask(x), getmask(y)) + xinf = np.isinf(masked_array(x, copy=False, mask=m)).filled(False) + # If we have some infs, they should fall at the same place. + if not np.all(xinf == filled(np.isinf(y), False)): + return False + # No infs at all + if not np.any(xinf): + d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)), + masked_equal) + return np.all(d) + + if not np.all(filled(x[xinf] == y[xinf], masked_equal)): + return False + x = x[~xinf] + y = y[~xinf] + + d = filled(less_equal(absolute(x - y), atol + rtol * absolute(y)), + masked_equal) + + return np.all(d) + + +def asarray(a, dtype=None, order=None): + """ + Convert the input to a masked array of the given data-type. + + No copy is performed if the input is already an `ndarray`. If `a` is + a subclass of `MaskedArray`, a base class `MaskedArray` is returned. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to a masked array. This + includes lists, lists of tuples, tuples, tuples of tuples, tuples + of lists, ndarrays and masked arrays. + dtype : dtype, optional + By default, the data-type is inferred from the input data. + order : {'C', 'F'}, optional + Whether to use row-major ('C') or column-major ('FORTRAN') memory + representation. Default is 'C'. + + Returns + ------- + out : MaskedArray + Masked array interpretation of `a`. + + See Also + -------- + asanyarray : Similar to `asarray`, but conserves subclasses. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(10.).reshape(2, 5) + >>> x + array([[0., 1., 2., 3., 4.], + [5., 6., 7., 8., 9.]]) + >>> np.ma.asarray(x) + masked_array( + data=[[0., 1., 2., 3., 4.], + [5., 6., 7., 8., 9.]], + mask=False, + fill_value=1e+20) + >>> type(np.ma.asarray(x)) + + + """ + order = order or 'C' + return masked_array(a, dtype=dtype, copy=False, keep_mask=True, + subok=False, order=order) + + +def asanyarray(a, dtype=None): + """ + Convert the input to a masked array, conserving subclasses. + + If `a` is a subclass of `MaskedArray`, its class is conserved. + No copy is performed if the input is already an `ndarray`. + + Parameters + ---------- + a : array_like + Input data, in any form that can be converted to an array. + dtype : dtype, optional + By default, the data-type is inferred from the input data. + + Returns + ------- + out : MaskedArray + MaskedArray interpretation of `a`. + + See Also + -------- + asarray : Similar to `asanyarray`, but does not conserve subclass. + + Examples + -------- + >>> import numpy as np + >>> x = np.arange(10.).reshape(2, 5) + >>> x + array([[0., 1., 2., 3., 4.], + [5., 6., 7., 8., 9.]]) + >>> np.ma.asanyarray(x) + masked_array( + data=[[0., 1., 2., 3., 4.], + [5., 6., 7., 8., 9.]], + mask=False, + fill_value=1e+20) + >>> type(np.ma.asanyarray(x)) + + + """ + # workaround for #8666, to preserve identity. Ideally the bottom line + # would handle this for us. + if isinstance(a, MaskedArray) and (dtype is None or dtype == a.dtype): + return a + return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True) + + +############################################################################## +# Pickling # +############################################################################## + + +def fromfile(file, dtype=float, count=-1, sep=''): + raise NotImplementedError( + "fromfile() not yet implemented for a MaskedArray.") + + +def fromflex(fxarray): + """ + Build a masked array from a suitable flexible-type array. + + The input array has to have a data-type with ``_data`` and ``_mask`` + fields. This type of array is output by `MaskedArray.toflex`. + + Parameters + ---------- + fxarray : ndarray + The structured input array, containing ``_data`` and ``_mask`` + fields. If present, other fields are discarded. + + Returns + ------- + result : MaskedArray + The constructed masked array. + + See Also + -------- + MaskedArray.toflex : Build a flexible-type array from a masked array. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4) + >>> rec = x.toflex() + >>> rec + array([[(0, False), (1, True), (2, False)], + [(3, True), (4, False), (5, True)], + [(6, False), (7, True), (8, False)]], + dtype=[('_data', '>> x2 = np.ma.fromflex(rec) + >>> x2 + masked_array( + data=[[0, --, 2], + [--, 4, --], + [6, --, 8]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + + Extra fields can be present in the structured array but are discarded: + + >>> dt = [('_data', '>> rec2 = np.zeros((2, 2), dtype=dt) + >>> rec2 + array([[(0, False, 0.), (0, False, 0.)], + [(0, False, 0.), (0, False, 0.)]], + dtype=[('_data', '>> y = np.ma.fromflex(rec2) + >>> y + masked_array( + data=[[0, 0], + [0, 0]], + mask=[[False, False], + [False, False]], + fill_value=np.int64(999999), + dtype=int32) + + """ + return masked_array(fxarray['_data'], mask=fxarray['_mask']) + + +class _convert2ma: + + """ + Convert functions from numpy to numpy.ma. + + Parameters + ---------- + _methodname : string + Name of the method to transform. + + """ + __doc__ = None + + def __init__(self, funcname, np_ret, np_ma_ret, params=None): + self._func = getattr(np, funcname) + self.__doc__ = self.getdoc(np_ret, np_ma_ret) + self._extras = params or {} + + def getdoc(self, np_ret, np_ma_ret): + "Return the doc of the function (from the doc of the method)." + doc = getattr(self._func, '__doc__', None) + sig = get_object_signature(self._func) + if doc: + doc = self._replace_return_type(doc, np_ret, np_ma_ret) + # Add the signature of the function at the beginning of the doc + if sig: + sig = f"{self._func.__name__}{sig}\n" + doc = sig + doc + return doc + + def _replace_return_type(self, doc, np_ret, np_ma_ret): + """ + Replace documentation of ``np`` function's return type. + + Replaces it with the proper type for the ``np.ma`` function. + + Parameters + ---------- + doc : str + The documentation of the ``np`` method. + np_ret : str + The return type string of the ``np`` method that we want to + replace. (e.g. "out : ndarray") + np_ma_ret : str + The return type string of the ``np.ma`` method. + (e.g. "out : MaskedArray") + """ + if np_ret not in doc: + raise RuntimeError( + f"Failed to replace `{np_ret}` with `{np_ma_ret}`. " + f"The documentation string for return type, {np_ret}, is not " + f"found in the docstring for `np.{self._func.__name__}`. " + f"Fix the docstring for `np.{self._func.__name__}` or " + "update the expected string for return type." + ) + + return doc.replace(np_ret, np_ma_ret) + + def __call__(self, *args, **params): + # Find the common parameters to the call and the definition + _extras = self._extras + common_params = set(params).intersection(_extras) + # Drop the common parameters from the call + for p in common_params: + _extras[p] = params.pop(p) + # Get the result + result = self._func.__call__(*args, **params).view(MaskedArray) + if "fill_value" in common_params: + result.fill_value = _extras.get("fill_value", None) + if "hardmask" in common_params: + result._hardmask = bool(_extras.get("hard_mask", False)) + return result + + +arange = _convert2ma( + 'arange', + params={'fill_value': None, 'hardmask': False}, + np_ret='arange : ndarray', + np_ma_ret='arange : MaskedArray', +) +clip = _convert2ma( + 'clip', + params={'fill_value': None, 'hardmask': False}, + np_ret='clipped_array : ndarray', + np_ma_ret='clipped_array : MaskedArray', +) +empty = _convert2ma( + 'empty', + params={'fill_value': None, 'hardmask': False}, + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +empty_like = _convert2ma( + 'empty_like', + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +frombuffer = _convert2ma( + 'frombuffer', + np_ret='out : ndarray', + np_ma_ret='out: MaskedArray', +) +fromfunction = _convert2ma( + 'fromfunction', + np_ret='fromfunction : any', + np_ma_ret='fromfunction: MaskedArray', +) +identity = _convert2ma( + 'identity', + params={'fill_value': None, 'hardmask': False}, + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +indices = _convert2ma( + 'indices', + params={'fill_value': None, 'hardmask': False}, + np_ret='grid : one ndarray or tuple of ndarrays', + np_ma_ret='grid : one MaskedArray or tuple of MaskedArrays', +) +ones = _convert2ma( + 'ones', + params={'fill_value': None, 'hardmask': False}, + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +ones_like = _convert2ma( + 'ones_like', + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +squeeze = _convert2ma( + 'squeeze', + params={'fill_value': None, 'hardmask': False}, + np_ret='squeezed : ndarray', + np_ma_ret='squeezed : MaskedArray', +) +zeros = _convert2ma( + 'zeros', + params={'fill_value': None, 'hardmask': False}, + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) +zeros_like = _convert2ma( + 'zeros_like', + np_ret='out : ndarray', + np_ma_ret='out : MaskedArray', +) + + +def append(a, b, axis=None): + """Append values to the end of an array. + + Parameters + ---------- + a : array_like + Values are appended to a copy of this array. + b : array_like + These values are appended to a copy of `a`. It must be of the + correct shape (the same shape as `a`, excluding `axis`). If `axis` + is not specified, `b` can be any shape and will be flattened + before use. + axis : int, optional + The axis along which `v` are appended. If `axis` is not given, + both `a` and `b` are flattened before use. + + Returns + ------- + append : MaskedArray + A copy of `a` with `b` appended to `axis`. Note that `append` + does not occur in-place: a new array is allocated and filled. If + `axis` is None, the result is a flattened array. + + See Also + -------- + numpy.append : Equivalent function in the top-level NumPy module. + + Examples + -------- + >>> import numpy as np + >>> import numpy.ma as ma + >>> a = ma.masked_values([1, 2, 3], 2) + >>> b = ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) + >>> ma.append(a, b) + masked_array(data=[1, --, 3, 4, 5, 6, --, 8, 9], + mask=[False, True, False, False, False, False, True, False, + False], + fill_value=999999) + """ + return concatenate([a, b], axis) diff --git a/python/numpy/ma/core.pyi b/python/numpy/ma/core.pyi new file mode 100644 index 000000000..089469dbe --- /dev/null +++ b/python/numpy/ma/core.pyi @@ -0,0 +1,1462 @@ +# pyright: reportIncompatibleMethodOverride=false +# ruff: noqa: ANN001, ANN002, ANN003, ANN201, ANN202 ANN204, ANN401 + +from collections.abc import Sequence +from typing import Any, Literal, Self, SupportsIndex, TypeAlias, overload + +from _typeshed import Incomplete +from typing_extensions import TypeIs, TypeVar + +import numpy as np +from numpy import ( + _HasDTypeWithRealAndImag, + _ModeKind, + _OrderKACF, + _PartitionKind, + _SortKind, + amax, + amin, + bool_, + bytes_, + character, + complexfloating, + datetime64, + dtype, + dtypes, + expand_dims, + float64, + floating, + generic, + int_, + integer, + intp, + ndarray, + object_, + str_, + timedelta64, +) +from numpy._globals import _NoValueType +from numpy._typing import ( + ArrayLike, + NDArray, + _AnyShape, + _ArrayLike, + _ArrayLikeBool_co, + _ArrayLikeBytes_co, + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _ArrayLikeInt, + _ArrayLikeInt_co, + _ArrayLikeStr_co, + _ArrayLikeString_co, + _ArrayLikeTD64_co, + _DTypeLikeBool, + _IntLike_co, + _ScalarLike_co, + _Shape, + _ShapeLike, +) + +__all__ = [ + "MAError", + "MaskError", + "MaskType", + "MaskedArray", + "abs", + "absolute", + "add", + "all", + "allclose", + "allequal", + "alltrue", + "amax", + "amin", + "angle", + "anom", + "anomalies", + "any", + "append", + "arange", + "arccos", + "arccosh", + "arcsin", + "arcsinh", + "arctan", + "arctan2", + "arctanh", + "argmax", + "argmin", + "argsort", + "around", + "array", + "asanyarray", + "asarray", + "bitwise_and", + "bitwise_or", + "bitwise_xor", + "bool_", + "ceil", + "choose", + "clip", + "common_fill_value", + "compress", + "compressed", + "concatenate", + "conjugate", + "convolve", + "copy", + "correlate", + "cos", + "cosh", + "count", + "cumprod", + "cumsum", + "default_fill_value", + "diag", + "diagonal", + "diff", + "divide", + "empty", + "empty_like", + "equal", + "exp", + "expand_dims", + "fabs", + "filled", + "fix_invalid", + "flatten_mask", + "flatten_structured_array", + "floor", + "floor_divide", + "fmod", + "frombuffer", + "fromflex", + "fromfunction", + "getdata", + "getmask", + "getmaskarray", + "greater", + "greater_equal", + "harden_mask", + "hypot", + "identity", + "ids", + "indices", + "inner", + "innerproduct", + "isMA", + "isMaskedArray", + "is_mask", + "is_masked", + "isarray", + "left_shift", + "less", + "less_equal", + "log", + "log2", + "log10", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "make_mask", + "make_mask_descr", + "make_mask_none", + "mask_or", + "masked", + "masked_array", + "masked_equal", + "masked_greater", + "masked_greater_equal", + "masked_inside", + "masked_invalid", + "masked_less", + "masked_less_equal", + "masked_not_equal", + "masked_object", + "masked_outside", + "masked_print_option", + "masked_singleton", + "masked_values", + "masked_where", + "max", + "maximum", + "maximum_fill_value", + "mean", + "min", + "minimum", + "minimum_fill_value", + "mod", + "multiply", + "mvoid", + "ndim", + "negative", + "nomask", + "nonzero", + "not_equal", + "ones", + "ones_like", + "outer", + "outerproduct", + "power", + "prod", + "product", + "ptp", + "put", + "putmask", + "ravel", + "remainder", + "repeat", + "reshape", + "resize", + "right_shift", + "round", + "round_", + "set_fill_value", + "shape", + "sin", + "sinh", + "size", + "soften_mask", + "sometrue", + "sort", + "sqrt", + "squeeze", + "std", + "subtract", + "sum", + "swapaxes", + "take", + "tan", + "tanh", + "trace", + "transpose", + "true_divide", + "var", + "where", + "zeros", + "zeros_like", +] + +_ShapeT = TypeVar("_ShapeT", bound=_Shape) +_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True) +_DTypeT = TypeVar("_DTypeT", bound=dtype) +_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, default=dtype, covariant=True) +_ArrayT = TypeVar("_ArrayT", bound=ndarray[Any, Any]) +_ScalarT = TypeVar("_ScalarT", bound=generic) +_ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) +# A subset of `MaskedArray` that can be parametrized w.r.t. `np.generic` +_MaskedArray: TypeAlias = MaskedArray[_AnyShape, dtype[_ScalarT]] +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] + +MaskType = bool_ +nomask: bool_[Literal[False]] + +class MaskedArrayFutureWarning(FutureWarning): ... +class MAError(Exception): ... +class MaskError(MAError): ... + +def default_fill_value(obj): ... +def minimum_fill_value(obj): ... +def maximum_fill_value(obj): ... +def set_fill_value(a, fill_value): ... +def common_fill_value(a, b): ... +@overload +def filled(a: ndarray[_ShapeT_co, _DTypeT_co], fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... +@overload +def filled(a: _ArrayLike[_ScalarT_co], fill_value: _ScalarLike_co | None = None) -> NDArray[_ScalarT_co]: ... +@overload +def filled(a: ArrayLike, fill_value: _ScalarLike_co | None = None) -> NDArray[Any]: ... +def getdata(a, subok=...): ... +get_data = getdata + +def fix_invalid(a, mask=..., copy=..., fill_value=...): ... + +class _MaskedUFunc: + f: Any + __doc__: Any + __name__: Any + def __init__(self, ufunc): ... + +class _MaskedUnaryOperation(_MaskedUFunc): + fill: Any + domain: Any + def __init__(self, mufunc, fill=..., domain=...): ... + def __call__(self, a, *args, **kwargs): ... + +class _MaskedBinaryOperation(_MaskedUFunc): + fillx: Any + filly: Any + def __init__(self, mbfunc, fillx=..., filly=...): ... + def __call__(self, a, b, *args, **kwargs): ... + def reduce(self, target, axis=..., dtype=...): ... + def outer(self, a, b): ... + def accumulate(self, target, axis=...): ... + +class _DomainedBinaryOperation(_MaskedUFunc): + domain: Any + fillx: Any + filly: Any + def __init__(self, dbfunc, domain, fillx=..., filly=...): ... + def __call__(self, a, b, *args, **kwargs): ... + +exp: _MaskedUnaryOperation +conjugate: _MaskedUnaryOperation +sin: _MaskedUnaryOperation +cos: _MaskedUnaryOperation +arctan: _MaskedUnaryOperation +arcsinh: _MaskedUnaryOperation +sinh: _MaskedUnaryOperation +cosh: _MaskedUnaryOperation +tanh: _MaskedUnaryOperation +abs: _MaskedUnaryOperation +absolute: _MaskedUnaryOperation +angle: _MaskedUnaryOperation +fabs: _MaskedUnaryOperation +negative: _MaskedUnaryOperation +floor: _MaskedUnaryOperation +ceil: _MaskedUnaryOperation +around: _MaskedUnaryOperation +logical_not: _MaskedUnaryOperation +sqrt: _MaskedUnaryOperation +log: _MaskedUnaryOperation +log2: _MaskedUnaryOperation +log10: _MaskedUnaryOperation +tan: _MaskedUnaryOperation +arcsin: _MaskedUnaryOperation +arccos: _MaskedUnaryOperation +arccosh: _MaskedUnaryOperation +arctanh: _MaskedUnaryOperation + +add: _MaskedBinaryOperation +subtract: _MaskedBinaryOperation +multiply: _MaskedBinaryOperation +arctan2: _MaskedBinaryOperation +equal: _MaskedBinaryOperation +not_equal: _MaskedBinaryOperation +less_equal: _MaskedBinaryOperation +greater_equal: _MaskedBinaryOperation +less: _MaskedBinaryOperation +greater: _MaskedBinaryOperation +logical_and: _MaskedBinaryOperation +def alltrue(target: ArrayLike, axis: SupportsIndex | None = 0, dtype: _DTypeLikeBool | None = None) -> Incomplete: ... +logical_or: _MaskedBinaryOperation +def sometrue(target: ArrayLike, axis: SupportsIndex | None = 0, dtype: _DTypeLikeBool | None = None) -> Incomplete: ... +logical_xor: _MaskedBinaryOperation +bitwise_and: _MaskedBinaryOperation +bitwise_or: _MaskedBinaryOperation +bitwise_xor: _MaskedBinaryOperation +hypot: _MaskedBinaryOperation + +divide: _DomainedBinaryOperation +true_divide: _DomainedBinaryOperation +floor_divide: _DomainedBinaryOperation +remainder: _DomainedBinaryOperation +fmod: _DomainedBinaryOperation +mod: _DomainedBinaryOperation + +def make_mask_descr(ndtype): ... + +@overload +def getmask(a: _ScalarLike_co) -> bool_: ... +@overload +def getmask(a: MaskedArray[_ShapeT_co, Any]) -> np.ndarray[_ShapeT_co, dtype[bool_]] | bool_: ... +@overload +def getmask(a: ArrayLike) -> NDArray[bool_] | bool_: ... + +get_mask = getmask + +def getmaskarray(arr): ... + +# It's sufficient for `m` to have dtype with type: `type[np.bool_]`, +# which isn't necessarily a ndarray. Please open an issue if this causes issues. +def is_mask(m: object) -> TypeIs[NDArray[bool_]]: ... + +def make_mask(m, copy=..., shrink=..., dtype=...): ... +def make_mask_none(newshape, dtype=...): ... +def mask_or(m1, m2, copy=..., shrink=...): ... +def flatten_mask(mask): ... +def masked_where(condition, a, copy=...): ... +def masked_greater(x, value, copy=...): ... +def masked_greater_equal(x, value, copy=...): ... +def masked_less(x, value, copy=...): ... +def masked_less_equal(x, value, copy=...): ... +def masked_not_equal(x, value, copy=...): ... +def masked_equal(x, value, copy=...): ... +def masked_inside(x, v1, v2, copy=...): ... +def masked_outside(x, v1, v2, copy=...): ... +def masked_object(x, value, copy=..., shrink=...): ... +def masked_values(x, value, rtol=..., atol=..., copy=..., shrink=...): ... +def masked_invalid(a, copy=...): ... + +class _MaskedPrintOption: + def __init__(self, display): ... + def display(self): ... + def set_display(self, s): ... + def enabled(self): ... + def enable(self, shrink=...): ... + +masked_print_option: _MaskedPrintOption + +def flatten_structured_array(a): ... + +class MaskedIterator: + ma: Any + dataiter: Any + maskiter: Any + def __init__(self, ma): ... + def __iter__(self): ... + def __getitem__(self, indx): ... + def __setitem__(self, index, value): ... + def __next__(self): ... + +class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): + __array_priority__: Any + def __new__(cls, data=..., mask=..., dtype=..., copy=..., subok=..., ndmin=..., fill_value=..., keep_mask=..., hard_mask=..., shrink=..., order=...): ... + def __array_finalize__(self, obj): ... + def __array_wrap__(self, obj, context=..., return_scalar=...): ... + def view(self, dtype=..., type=..., fill_value=...): ... + def __getitem__(self, indx): ... + def __setitem__(self, indx, value): ... + @property + def shape(self) -> _ShapeT_co: ... + @shape.setter + def shape(self: MaskedArray[_ShapeT, Any], shape: _ShapeT, /) -> None: ... + def __setmask__(self, mask: _ArrayLikeBool_co, copy: bool = False) -> None: ... + @property + def mask(self) -> NDArray[MaskType] | MaskType: ... + @mask.setter + def mask(self, value: _ArrayLikeBool_co, /) -> None: ... + @property + def recordmask(self): ... + @recordmask.setter + def recordmask(self, mask): ... + def harden_mask(self) -> Self: ... + def soften_mask(self) -> Self: ... + @property + def hardmask(self) -> bool: ... + def unshare_mask(self) -> Self: ... + @property + def sharedmask(self) -> bool: ... + def shrink_mask(self) -> Self: ... + @property + def baseclass(self) -> type[NDArray[Any]]: ... + data: Any + @property + def flat(self): ... + @flat.setter + def flat(self, value): ... + @property + def fill_value(self): ... + @fill_value.setter + def fill_value(self, value=...): ... + get_fill_value: Any + set_fill_value: Any + def filled(self, /, fill_value: _ScalarLike_co | None = None) -> ndarray[_ShapeT_co, _DTypeT_co]: ... + def compressed(self) -> ndarray[tuple[int], _DTypeT_co]: ... + def compress(self, condition, axis=..., out=...): ... + def __eq__(self, other): ... + def __ne__(self, other): ... + def __ge__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] + def __gt__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] + def __le__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] + def __lt__(self, other: ArrayLike, /) -> _MaskedArray[bool_]: ... # type: ignore[override] + def __add__(self, other): ... + def __radd__(self, other): ... + def __sub__(self, other): ... + def __rsub__(self, other): ... + def __mul__(self, other): ... + def __rmul__(self, other): ... + def __truediv__(self, other): ... + def __rtruediv__(self, other): ... + def __floordiv__(self, other): ... + def __rfloordiv__(self, other): ... + def __pow__(self, other, mod: None = None, /): ... + def __rpow__(self, other, mod: None = None, /): ... + + # Keep in sync with `ndarray.__iadd__` + @overload + def __iadd__( + self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__( + self: _MaskedArray[floating], other: _ArrayLikeFloat_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__( + self: _MaskedArray[complexfloating], other: _ArrayLikeComplex_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__( + self: _MaskedArray[timedelta64 | datetime64], other: _ArrayLikeTD64_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__(self: _MaskedArray[bytes_], other: _ArrayLikeBytes_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__( + self: MaskedArray[Any, dtype[str_] | dtypes.StringDType], + other: _ArrayLikeStr_co | _ArrayLikeString_co, + /, + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __iadd__( + self: _MaskedArray[object_], other: Any, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + + # Keep in sync with `ndarray.__isub__` + @overload + def __isub__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __isub__( + self: _MaskedArray[floating], other: _ArrayLikeFloat_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __isub__( + self: _MaskedArray[complexfloating], other: _ArrayLikeComplex_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __isub__( + self: _MaskedArray[timedelta64 | datetime64], other: _ArrayLikeTD64_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __isub__( + self: _MaskedArray[object_], other: Any, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + + # Keep in sync with `ndarray.__imul__` + @overload + def __imul__( + self: _MaskedArray[np.bool], other: _ArrayLikeBool_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imul__( + self: MaskedArray[Any, dtype[integer] | dtype[character] | dtypes.StringDType], other: _ArrayLikeInt_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imul__( + self: _MaskedArray[floating | timedelta64], other: _ArrayLikeFloat_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imul__( + self: _MaskedArray[complexfloating], other: _ArrayLikeComplex_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __imul__( + self: _MaskedArray[object_], other: Any, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + + # Keep in sync with `ndarray.__ifloordiv__` + @overload + def __ifloordiv__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ifloordiv__( + self: _MaskedArray[floating | timedelta64], other: _ArrayLikeFloat_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ifloordiv__( + self: _MaskedArray[object_], other: Any, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + + # Keep in sync with `ndarray.__itruediv__` + @overload + def __itruediv__( + self: _MaskedArray[floating | timedelta64], other: _ArrayLikeFloat_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __itruediv__( + self: _MaskedArray[complexfloating], + other: _ArrayLikeComplex_co, + /, + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __itruediv__( + self: _MaskedArray[object_], other: Any, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + + # Keep in sync with `ndarray.__ipow__` + @overload + def __ipow__(self: _MaskedArray[integer], other: _ArrayLikeInt_co, /) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ipow__( + self: _MaskedArray[floating], other: _ArrayLikeFloat_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ipow__( + self: _MaskedArray[complexfloating], other: _ArrayLikeComplex_co, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + @overload + def __ipow__( + self: _MaskedArray[object_], other: Any, / + ) -> MaskedArray[_ShapeT_co, _DTypeT_co]: ... + + # + @property # type: ignore[misc] + def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... + get_imag: Any + @property # type: ignore[misc] + def real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... + get_real: Any + + # keep in sync with `np.ma.count` + @overload + def count(self, axis: None = None, keepdims: Literal[False] | _NoValueType = ...) -> int: ... + @overload + def count(self, axis: _ShapeLike, keepdims: bool | _NoValueType = ...) -> NDArray[int_]: ... + @overload + def count(self, axis: _ShapeLike | None = ..., *, keepdims: Literal[True]) -> NDArray[int_]: ... + @overload + def count(self, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... + + def ravel(self, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... + def reshape(self, *s, **kwargs): ... + def resize(self, newshape, refcheck=..., order=...): ... + def put(self, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... + def ids(self) -> tuple[int, int]: ... + def iscontiguous(self) -> bool: ... + + @overload + def all( + self, + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + ) -> bool_: ... + @overload + def all( + self, + axis: _ShapeLike | None = None, + out: None = None, + *, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def all( + self, + axis: _ShapeLike | None, + out: None, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def all( + self, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> bool_ | _MaskedArray[bool_]: ... + @overload + def all( + self, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def all( + self, + axis: _ShapeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + @overload + def any( + self, + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + ) -> bool_: ... + @overload + def any( + self, + axis: _ShapeLike | None = None, + out: None = None, + *, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def any( + self, + axis: _ShapeLike | None, + out: None, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def any( + self, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> bool_ | _MaskedArray[bool_]: ... + @overload + def any( + self, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def any( + self, + axis: _ShapeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + def nonzero(self) -> tuple[_Array1D[intp], *tuple[_Array1D[intp], ...]]: ... + def trace(self, offset=..., axis1=..., axis2=..., dtype=..., out=...): ... + def dot(self, b, out=..., strict=...): ... + def sum(self, axis=..., dtype=..., out=..., keepdims=...): ... + def cumsum(self, axis=..., dtype=..., out=...): ... + def prod(self, axis=..., dtype=..., out=..., keepdims=...): ... + product: Any + def cumprod(self, axis=..., dtype=..., out=...): ... + def mean(self, axis=..., dtype=..., out=..., keepdims=...): ... + def anom(self, axis=..., dtype=...): ... + def var(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... + def std(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ... + def round(self, decimals=..., out=...): ... + def argsort(self, axis=..., kind=..., order=..., endwith=..., fill_value=..., *, stable=...): ... + + # Keep in-sync with np.ma.argmin + @overload # type: ignore[override] + def argmin( + self, + axis: None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., + ) -> intp: ... + @overload + def argmin( + self, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def argmin( + self, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def argmin( + self, + axis: SupportsIndex | None, + fill_value: _ScalarLike_co | None, + out: _ArrayT, + *, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + # Keep in-sync with np.ma.argmax + @overload # type: ignore[override] + def argmax( + self, + axis: None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., + ) -> intp: ... + @overload + def argmax( + self, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: bool | _NoValueType = ..., + ) -> Any: ... + @overload + def argmax( + self, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def argmax( + self, + axis: SupportsIndex | None, + fill_value: _ScalarLike_co | None, + out: _ArrayT, + *, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + # + def sort( # type: ignore[override] + self, + axis: SupportsIndex = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: Literal[False] | None = False, + ) -> None: ... + + # + @overload # type: ignore[override] + def min( + self: _MaskedArray[_ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] | _NoValueType = ..., + ) -> _ScalarT: ... + @overload + def min( + self, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ... + ) -> Any: ... + @overload + def min( + self, + axis: _ShapeLike | None, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def min( + self, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + # + @overload # type: ignore[override] + def max( + self: _MaskedArray[_ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] | _NoValueType = ..., + ) -> _ScalarT: ... + @overload + def max( + self, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ... + ) -> Any: ... + @overload + def max( + self, + axis: _ShapeLike | None, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def max( + self, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + # + @overload + def ptp( + self: _MaskedArray[_ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] = False, + ) -> _ScalarT: ... + @overload + def ptp( + self, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool = False, + ) -> Any: ... + @overload + def ptp( + self, + axis: _ShapeLike | None, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool = False, + ) -> _ArrayT: ... + @overload + def ptp( + self, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool = False, + ) -> _ArrayT: ... + + # + @overload + def partition( + self, + /, + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: None = None + ) -> None: ... + @overload + def partition( + self: _MaskedArray[np.void], + /, + kth: _ArrayLikeInt, + axis: SupportsIndex = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> None: ... + + # + @overload + def argpartition( + self, + /, + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: None = None, + ) -> _MaskedArray[intp]: ... + @overload + def argpartition( + self: _MaskedArray[np.void], + /, + kth: _ArrayLikeInt, + axis: SupportsIndex | None = -1, + kind: _PartitionKind = "introselect", + order: str | Sequence[str] | None = None, + ) -> _MaskedArray[intp]: ... + + # Keep in-sync with np.ma.take + @overload + def take( # type: ignore[overload-overlap] + self: _MaskedArray[_ScalarT], + indices: _IntLike_co, + axis: None = None, + out: None = None, + mode: _ModeKind = 'raise' + ) -> _ScalarT: ... + @overload + def take( + self: _MaskedArray[_ScalarT], + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = 'raise', + ) -> _MaskedArray[_ScalarT]: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None, + out: _ArrayT, + mode: _ModeKind = 'raise', + ) -> _ArrayT: ... + @overload + def take( + self, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + *, + out: _ArrayT, + mode: _ModeKind = 'raise', + ) -> _ArrayT: ... + + copy: Any + diagonal: Any + flatten: Any + + @overload + def repeat( + self, + repeats: _ArrayLikeInt_co, + axis: None = None, + ) -> MaskedArray[tuple[int], _DTypeT_co]: ... + @overload + def repeat( + self, + repeats: _ArrayLikeInt_co, + axis: SupportsIndex, + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + + squeeze: Any + + def swapaxes( + self, + axis1: SupportsIndex, + axis2: SupportsIndex, + / + ) -> MaskedArray[_AnyShape, _DTypeT_co]: ... + + # + def toflex(self) -> Incomplete: ... + def torecords(self) -> Incomplete: ... + def tolist(self, fill_value: Incomplete | None = None) -> Incomplete: ... + def tobytes(self, /, fill_value: Incomplete | None = None, order: _OrderKACF = "C") -> bytes: ... # type: ignore[override] + def tofile(self, /, fid: Incomplete, sep: str = "", format: str = "%s") -> Incomplete: ... + + # + def __reduce__(self): ... + def __deepcopy__(self, memo=...): ... + + # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` + @property + def dtype(self) -> _DTypeT_co: ... + @dtype.setter + def dtype(self: MaskedArray[_AnyShape, _DTypeT], dtype: _DTypeT, /) -> None: ... + +class mvoid(MaskedArray[_ShapeT_co, _DTypeT_co]): + def __new__( + self, # pyright: ignore[reportSelfClsParameterName] + data, + mask=..., + dtype=..., + fill_value=..., + hardmask=..., + copy=..., + subok=..., + ): ... + def __getitem__(self, indx): ... + def __setitem__(self, indx, value): ... + def __iter__(self): ... + def __len__(self): ... + def filled(self, fill_value=...): ... + def tolist(self): ... + +def isMaskedArray(x): ... +isarray = isMaskedArray +isMA = isMaskedArray + +# 0D float64 array +class MaskedConstant(MaskedArray[_AnyShape, dtype[float64]]): + def __new__(cls): ... + __class__: Any + def __array_finalize__(self, obj): ... + def __array_wrap__(self, obj, context=..., return_scalar=...): ... + def __format__(self, format_spec): ... + def __reduce__(self): ... + def __iop__(self, other): ... + __iadd__: Any + __isub__: Any + __imul__: Any + __ifloordiv__: Any + __itruediv__: Any + __ipow__: Any + def copy(self, *args, **kwargs): ... + def __copy__(self): ... + def __deepcopy__(self, memo): ... + def __setattr__(self, attr, value): ... + +masked: MaskedConstant +masked_singleton: MaskedConstant +masked_array = MaskedArray + +def array( + data, + dtype=..., + copy=..., + order=..., + mask=..., + fill_value=..., + keep_mask=..., + hard_mask=..., + shrink=..., + subok=..., + ndmin=..., +): ... +def is_masked(x: object) -> bool: ... + +class _extrema_operation(_MaskedUFunc): + compare: Any + fill_value_func: Any + def __init__(self, ufunc, compare, fill_value): ... + # NOTE: in practice `b` has a default value, but users should + # explicitly provide a value here as the default is deprecated + def __call__(self, a, b): ... + def reduce(self, target, axis=...): ... + def outer(self, a, b): ... + +@overload +def min( + obj: _ArrayLike[_ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def min( + obj: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ... +) -> Any: ... +@overload +def min( + obj: ArrayLike, + axis: _ShapeLike | None, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def min( + obj: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +@overload +def max( + obj: _ArrayLike[_ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def max( + obj: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ... +) -> Any: ... +@overload +def max( + obj: ArrayLike, + axis: _ShapeLike | None, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def max( + obj: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +@overload +def ptp( + obj: _ArrayLike[_ScalarT], + axis: None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: Literal[False] | _NoValueType = ..., +) -> _ScalarT: ... +@overload +def ptp( + obj: ArrayLike, + axis: _ShapeLike | None = None, + out: None = None, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ... +) -> Any: ... +@overload +def ptp( + obj: ArrayLike, + axis: _ShapeLike | None, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def ptp( + obj: ArrayLike, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + fill_value: _ScalarLike_co | None = None, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +class _frommethod: + __name__: Any + __doc__: Any + reversed: Any + def __init__(self, methodname, reversed=...): ... + def getdoc(self): ... + def __call__(self, a, *args, **params): ... + +all: _frommethod +anomalies: _frommethod +anom: _frommethod +any: _frommethod +compress: _frommethod +cumprod: _frommethod +cumsum: _frommethod +copy: _frommethod +diagonal: _frommethod +harden_mask: _frommethod +ids: _frommethod +mean: _frommethod +nonzero: _frommethod +prod: _frommethod +product: _frommethod +ravel: _frommethod +repeat: _frommethod +soften_mask: _frommethod +std: _frommethod +sum: _frommethod +swapaxes: _frommethod +trace: _frommethod +var: _frommethod + +@overload +def count(self: ArrayLike, axis: None = None, keepdims: Literal[False] | _NoValueType = ...) -> int: ... +@overload +def count(self: ArrayLike, axis: _ShapeLike, keepdims: bool | _NoValueType = ...) -> NDArray[int_]: ... +@overload +def count(self: ArrayLike, axis: _ShapeLike | None = ..., *, keepdims: Literal[True]) -> NDArray[int_]: ... +@overload +def count(self: ArrayLike, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... + +@overload +def argmin( + self: ArrayLike, + axis: None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., +) -> intp: ... +@overload +def argmin( + self: ArrayLike, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: bool | _NoValueType = ..., +) -> Any: ... +@overload +def argmin( + self: ArrayLike, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def argmin( + self: ArrayLike, + axis: SupportsIndex | None, + fill_value: _ScalarLike_co | None, + out: _ArrayT, + *, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +# +@overload +def argmax( + self: ArrayLike, + axis: None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: Literal[False] | _NoValueType = ..., +) -> intp: ... +@overload +def argmax( + self: ArrayLike, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + out: None = None, + *, + keepdims: bool | _NoValueType = ..., +) -> Any: ... +@overload +def argmax( + self: ArrayLike, + axis: SupportsIndex | None = None, + fill_value: _ScalarLike_co | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... +@overload +def argmax( + self: ArrayLike, + axis: SupportsIndex | None, + fill_value: _ScalarLike_co | None, + out: _ArrayT, + *, + keepdims: bool | _NoValueType = ..., +) -> _ArrayT: ... + +minimum: _extrema_operation +maximum: _extrema_operation + +@overload +def take( + a: _ArrayLike[_ScalarT], + indices: _IntLike_co, + axis: None = None, + out: None = None, + mode: _ModeKind = 'raise' +) -> _ScalarT: ... +@overload +def take( + a: _ArrayLike[_ScalarT], + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = 'raise', +) -> _MaskedArray[_ScalarT]: ... +@overload +def take( + a: ArrayLike, + indices: _IntLike_co, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = 'raise', +) -> Any: ... +@overload +def take( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + out: None = None, + mode: _ModeKind = 'raise', +) -> _MaskedArray[Any]: ... +@overload +def take( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None, + out: _ArrayT, + mode: _ModeKind = 'raise', +) -> _ArrayT: ... +@overload +def take( + a: ArrayLike, + indices: _ArrayLikeInt_co, + axis: SupportsIndex | None = None, + *, + out: _ArrayT, + mode: _ModeKind = 'raise', +) -> _ArrayT: ... + +def power(a, b, third=...): ... +def argsort(a, axis=..., kind=..., order=..., endwith=..., fill_value=..., *, stable=...): ... +@overload +def sort( + a: _ArrayT, + axis: SupportsIndex = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: Literal[False] | None = False, +) -> _ArrayT: ... +@overload +def sort( + a: ArrayLike, + axis: SupportsIndex = -1, + kind: _SortKind | None = None, + order: str | Sequence[str] | None = None, + endwith: bool | None = True, + fill_value: _ScalarLike_co | None = None, + *, + stable: Literal[False] | None = False, +) -> NDArray[Any]: ... +@overload +def compressed(x: _ArrayLike[_ScalarT_co]) -> _Array1D[_ScalarT_co]: ... +@overload +def compressed(x: ArrayLike) -> _Array1D[Any]: ... +def concatenate(arrays, axis=...): ... +def diag(v, k=...): ... +def left_shift(a, n): ... +def right_shift(a, n): ... +def put(a: NDArray[Any], indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = 'raise') -> None: ... +def putmask(a: NDArray[Any], mask: _ArrayLikeBool_co, values: ArrayLike) -> None: ... +def transpose(a, axes=...): ... +def reshape(a, new_shape, order=...): ... +def resize(x, new_shape): ... +def ndim(obj: ArrayLike) -> int: ... +def shape(obj): ... +def size(obj: ArrayLike, axis: SupportsIndex | None = None) -> int: ... +def diff(a, /, n=..., axis=..., prepend=..., append=...): ... +def where(condition, x=..., y=...): ... +def choose(indices, choices, out=..., mode=...): ... +def round_(a, decimals=..., out=...): ... +round = round_ + +def inner(a, b): ... +innerproduct = inner + +def outer(a, b): ... +outerproduct = outer + +def correlate(a, v, mode=..., propagate_mask=...): ... +def convolve(a, v, mode=..., propagate_mask=...): ... + +def allequal(a: ArrayLike, b: ArrayLike, fill_value: bool = True) -> bool: ... + +def allclose(a: ArrayLike, b: ArrayLike, masked_equal: bool = True, rtol: float = 1e-5, atol: float = 1e-8) -> bool: ... + +def asarray(a, dtype=..., order=...): ... +def asanyarray(a, dtype=...): ... +def fromflex(fxarray): ... + +class _convert2ma: + def __init__(self, /, funcname: str, np_ret: str, np_ma_ret: str, params: dict[str, Any] | None = None) -> None: ... + def __call__(self, /, *args: object, **params: object) -> Any: ... + def getdoc(self, /, np_ret: str, np_ma_ret: str) -> str | None: ... + +arange: _convert2ma +clip: _convert2ma +empty: _convert2ma +empty_like: _convert2ma +frombuffer: _convert2ma +fromfunction: _convert2ma +identity: _convert2ma +indices: _convert2ma +ones: _convert2ma +ones_like: _convert2ma +squeeze: _convert2ma +zeros: _convert2ma +zeros_like: _convert2ma + +def append(a, b, axis=...): ... +def dot(a, b, strict=..., out=...): ... +def mask_rowcols(a, axis=...): ... diff --git a/python/numpy/ma/extras.py b/python/numpy/ma/extras.py new file mode 100644 index 000000000..094c1e26b --- /dev/null +++ b/python/numpy/ma/extras.py @@ -0,0 +1,2344 @@ +""" +Masked arrays add-ons. + +A collection of utilities for `numpy.ma`. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu + +""" +__all__ = [ + 'apply_along_axis', 'apply_over_axes', 'atleast_1d', 'atleast_2d', + 'atleast_3d', 'average', 'clump_masked', 'clump_unmasked', 'column_stack', + 'compress_cols', 'compress_nd', 'compress_rowcols', 'compress_rows', + 'count_masked', 'corrcoef', 'cov', 'diagflat', 'dot', 'dstack', 'ediff1d', + 'flatnotmasked_contiguous', 'flatnotmasked_edges', 'hsplit', 'hstack', + 'isin', 'in1d', 'intersect1d', 'mask_cols', 'mask_rowcols', 'mask_rows', + 'masked_all', 'masked_all_like', 'median', 'mr_', 'ndenumerate', + 'notmasked_contiguous', 'notmasked_edges', 'polyfit', 'row_stack', + 'setdiff1d', 'setxor1d', 'stack', 'unique', 'union1d', 'vander', 'vstack', + ] + +import itertools +import warnings + +import numpy as np +from numpy import array as nxarray +from numpy import ndarray +from numpy.lib._function_base_impl import _ureduce +from numpy.lib._index_tricks_impl import AxisConcatenator +from numpy.lib.array_utils import normalize_axis_index, normalize_axis_tuple + +from . import core as ma +from .core import ( # noqa: F401 + MAError, + MaskedArray, + add, + array, + asarray, + concatenate, + count, + dot, + filled, + get_masked_subclass, + getdata, + getmask, + getmaskarray, + make_mask_descr, + mask_or, + masked, + masked_array, + nomask, + ones, + sort, + zeros, +) + + +def issequence(seq): + """ + Is seq a sequence (ndarray, list or tuple)? + + """ + return isinstance(seq, (ndarray, tuple, list)) + + +def count_masked(arr, axis=None): + """ + Count the number of masked elements along the given axis. + + Parameters + ---------- + arr : array_like + An array with (possibly) masked elements. + axis : int, optional + Axis along which to count. If None (default), a flattened + version of the array is used. + + Returns + ------- + count : int, ndarray + The total number of masked elements (axis=None) or the number + of masked elements along each slice of the given axis. + + See Also + -------- + MaskedArray.count : Count non-masked elements. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(9).reshape((3,3)) + >>> a = np.ma.array(a) + >>> a[1, 0] = np.ma.masked + >>> a[1, 2] = np.ma.masked + >>> a[2, 1] = np.ma.masked + >>> a + masked_array( + data=[[0, 1, 2], + [--, 4, --], + [6, --, 8]], + mask=[[False, False, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> np.ma.count_masked(a) + 3 + + When the `axis` keyword is used an array is returned. + + >>> np.ma.count_masked(a, axis=0) + array([1, 1, 1]) + >>> np.ma.count_masked(a, axis=1) + array([0, 2, 1]) + + """ + m = getmaskarray(arr) + return m.sum(axis) + + +def masked_all(shape, dtype=float): + """ + Empty masked array with all elements masked. + + Return an empty masked array of the given shape and dtype, where all the + data are masked. + + Parameters + ---------- + shape : int or tuple of ints + Shape of the required MaskedArray, e.g., ``(2, 3)`` or ``2``. + dtype : dtype, optional + Data type of the output. + + Returns + ------- + a : MaskedArray + A masked array with all data masked. + + See Also + -------- + masked_all_like : Empty masked array modelled on an existing array. + + Notes + ----- + Unlike other masked array creation functions (e.g. `numpy.ma.zeros`, + `numpy.ma.ones`, `numpy.ma.full`), `masked_all` does not initialize the + values of the array, and may therefore be marginally faster. However, + the values stored in the newly allocated array are arbitrary. For + reproducible behavior, be sure to set each element of the array before + reading. + + Examples + -------- + >>> import numpy as np + >>> np.ma.masked_all((3, 3)) + masked_array( + data=[[--, --, --], + [--, --, --], + [--, --, --]], + mask=[[ True, True, True], + [ True, True, True], + [ True, True, True]], + fill_value=1e+20, + dtype=float64) + + The `dtype` parameter defines the underlying data type. + + >>> a = np.ma.masked_all((3, 3)) + >>> a.dtype + dtype('float64') + >>> a = np.ma.masked_all((3, 3), dtype=np.int32) + >>> a.dtype + dtype('int32') + + """ + a = masked_array(np.empty(shape, dtype), + mask=np.ones(shape, make_mask_descr(dtype))) + return a + + +def masked_all_like(arr): + """ + Empty masked array with the properties of an existing array. + + Return an empty masked array of the same shape and dtype as + the array `arr`, where all the data are masked. + + Parameters + ---------- + arr : ndarray + An array describing the shape and dtype of the required MaskedArray. + + Returns + ------- + a : MaskedArray + A masked array with all data masked. + + Raises + ------ + AttributeError + If `arr` doesn't have a shape attribute (i.e. not an ndarray) + + See Also + -------- + masked_all : Empty masked array with all elements masked. + + Notes + ----- + Unlike other masked array creation functions (e.g. `numpy.ma.zeros_like`, + `numpy.ma.ones_like`, `numpy.ma.full_like`), `masked_all_like` does not + initialize the values of the array, and may therefore be marginally + faster. However, the values stored in the newly allocated array are + arbitrary. For reproducible behavior, be sure to set each element of the + array before reading. + + Examples + -------- + >>> import numpy as np + >>> arr = np.zeros((2, 3), dtype=np.float32) + >>> arr + array([[0., 0., 0.], + [0., 0., 0.]], dtype=float32) + >>> np.ma.masked_all_like(arr) + masked_array( + data=[[--, --, --], + [--, --, --]], + mask=[[ True, True, True], + [ True, True, True]], + fill_value=np.float64(1e+20), + dtype=float32) + + The dtype of the masked array matches the dtype of `arr`. + + >>> arr.dtype + dtype('float32') + >>> np.ma.masked_all_like(arr).dtype + dtype('float32') + + """ + a = np.empty_like(arr).view(MaskedArray) + a._mask = np.ones(a.shape, dtype=make_mask_descr(a.dtype)) + return a + + +#####-------------------------------------------------------------------------- +#---- --- Standard functions --- +#####-------------------------------------------------------------------------- +class _fromnxfunction: + """ + Defines a wrapper to adapt NumPy functions to masked arrays. + + + An instance of `_fromnxfunction` can be called with the same parameters + as the wrapped NumPy function. The docstring of `newfunc` is adapted from + the wrapped function as well, see `getdoc`. + + This class should not be used directly. Instead, one of its extensions that + provides support for a specific type of input should be used. + + Parameters + ---------- + funcname : str + The name of the function to be adapted. The function should be + in the NumPy namespace (i.e. ``np.funcname``). + + """ + + def __init__(self, funcname): + self.__name__ = funcname + self.__qualname__ = funcname + self.__doc__ = self.getdoc() + + def getdoc(self): + """ + Retrieve the docstring and signature from the function. + + The ``__doc__`` attribute of the function is used as the docstring for + the new masked array version of the function. A note on application + of the function to the mask is appended. + + Parameters + ---------- + None + + """ + npfunc = getattr(np, self.__name__, None) + doc = getattr(npfunc, '__doc__', None) + if doc: + sig = ma.get_object_signature(npfunc) + doc = ma.doc_note(doc, "The function is applied to both the _data " + "and the _mask, if any.") + if sig: + sig = self.__name__ + sig + "\n\n" + return sig + doc + return + + def __call__(self, *args, **params): + pass + + +class _fromnxfunction_single(_fromnxfunction): + """ + A version of `_fromnxfunction` that is called with a single array + argument followed by auxiliary args that are passed verbatim for + both the data and mask calls. + """ + def __call__(self, x, *args, **params): + func = getattr(np, self.__name__) + if isinstance(x, ndarray): + _d = func(x.__array__(), *args, **params) + _m = func(getmaskarray(x), *args, **params) + return masked_array(_d, mask=_m) + else: + _d = func(np.asarray(x), *args, **params) + _m = func(getmaskarray(x), *args, **params) + return masked_array(_d, mask=_m) + + +class _fromnxfunction_seq(_fromnxfunction): + """ + A version of `_fromnxfunction` that is called with a single sequence + of arrays followed by auxiliary args that are passed verbatim for + both the data and mask calls. + """ + def __call__(self, x, *args, **params): + func = getattr(np, self.__name__) + _d = func(tuple(np.asarray(a) for a in x), *args, **params) + _m = func(tuple(getmaskarray(a) for a in x), *args, **params) + return masked_array(_d, mask=_m) + + +class _fromnxfunction_args(_fromnxfunction): + """ + A version of `_fromnxfunction` that is called with multiple array + arguments. The first non-array-like input marks the beginning of the + arguments that are passed verbatim for both the data and mask calls. + Array arguments are processed independently and the results are + returned in a list. If only one array is found, the return value is + just the processed array instead of a list. + """ + def __call__(self, *args, **params): + func = getattr(np, self.__name__) + arrays = [] + args = list(args) + while len(args) > 0 and issequence(args[0]): + arrays.append(args.pop(0)) + res = [] + for x in arrays: + _d = func(np.asarray(x), *args, **params) + _m = func(getmaskarray(x), *args, **params) + res.append(masked_array(_d, mask=_m)) + if len(arrays) == 1: + return res[0] + return res + + +class _fromnxfunction_allargs(_fromnxfunction): + """ + A version of `_fromnxfunction` that is called with multiple array + arguments. Similar to `_fromnxfunction_args` except that all args + are converted to arrays even if they are not so already. This makes + it possible to process scalars as 1-D arrays. Only keyword arguments + are passed through verbatim for the data and mask calls. Arrays + arguments are processed independently and the results are returned + in a list. If only one arg is present, the return value is just the + processed array instead of a list. + """ + def __call__(self, *args, **params): + func = getattr(np, self.__name__) + res = [] + for x in args: + _d = func(np.asarray(x), **params) + _m = func(getmaskarray(x), **params) + res.append(masked_array(_d, mask=_m)) + if len(args) == 1: + return res[0] + return res + + +atleast_1d = _fromnxfunction_allargs('atleast_1d') +atleast_2d = _fromnxfunction_allargs('atleast_2d') +atleast_3d = _fromnxfunction_allargs('atleast_3d') + +vstack = row_stack = _fromnxfunction_seq('vstack') +hstack = _fromnxfunction_seq('hstack') +column_stack = _fromnxfunction_seq('column_stack') +dstack = _fromnxfunction_seq('dstack') +stack = _fromnxfunction_seq('stack') + +hsplit = _fromnxfunction_single('hsplit') + +diagflat = _fromnxfunction_single('diagflat') + + +#####-------------------------------------------------------------------------- +#---- +#####-------------------------------------------------------------------------- +def flatten_inplace(seq): + """Flatten a sequence in place.""" + k = 0 + while (k != len(seq)): + while hasattr(seq[k], '__iter__'): + seq[k:(k + 1)] = seq[k] + k += 1 + return seq + + +def apply_along_axis(func1d, axis, arr, *args, **kwargs): + """ + (This docstring should be overwritten) + """ + arr = array(arr, copy=False, subok=True) + nd = arr.ndim + axis = normalize_axis_index(axis, nd) + ind = [0] * (nd - 1) + i = np.zeros(nd, 'O') + indlist = list(range(nd)) + indlist.remove(axis) + i[axis] = slice(None, None) + outshape = np.asarray(arr.shape).take(indlist) + i.put(indlist, ind) + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) + # if res is a number, then we have a smaller output array + asscalar = np.isscalar(res) + if not asscalar: + try: + len(res) + except TypeError: + asscalar = True + # Note: we shouldn't set the dtype of the output from the first result + # so we force the type to object, and build a list of dtypes. We'll + # just take the largest, to avoid some downcasting + dtypes = [] + if asscalar: + dtypes.append(np.asarray(res).dtype) + outarr = zeros(outshape, object) + outarr[tuple(ind)] = res + Ntot = np.prod(outshape) + k = 1 + while k < Ntot: + # increment the index + ind[-1] += 1 + n = -1 + while (ind[n] >= outshape[n]) and (n > (1 - nd)): + ind[n - 1] += 1 + ind[n] = 0 + n -= 1 + i.put(indlist, ind) + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) + outarr[tuple(ind)] = res + dtypes.append(asarray(res).dtype) + k += 1 + else: + res = array(res, copy=False, subok=True) + j = i.copy() + j[axis] = ([slice(None, None)] * res.ndim) + j.put(indlist, ind) + Ntot = np.prod(outshape) + holdshape = outshape + outshape = list(arr.shape) + outshape[axis] = res.shape + dtypes.append(asarray(res).dtype) + outshape = flatten_inplace(outshape) + outarr = zeros(outshape, object) + outarr[tuple(flatten_inplace(j.tolist()))] = res + k = 1 + while k < Ntot: + # increment the index + ind[-1] += 1 + n = -1 + while (ind[n] >= holdshape[n]) and (n > (1 - nd)): + ind[n - 1] += 1 + ind[n] = 0 + n -= 1 + i.put(indlist, ind) + j.put(indlist, ind) + res = func1d(arr[tuple(i.tolist())], *args, **kwargs) + outarr[tuple(flatten_inplace(j.tolist()))] = res + dtypes.append(asarray(res).dtype) + k += 1 + max_dtypes = np.dtype(np.asarray(dtypes).max()) + if not hasattr(arr, '_mask'): + result = np.asarray(outarr, dtype=max_dtypes) + else: + result = asarray(outarr, dtype=max_dtypes) + result.fill_value = ma.default_fill_value(result) + return result + + +apply_along_axis.__doc__ = np.apply_along_axis.__doc__ + + +def apply_over_axes(func, a, axes): + """ + (This docstring will be overwritten) + """ + val = asarray(a) + N = a.ndim + if array(axes).ndim == 0: + axes = (axes,) + for axis in axes: + if axis < 0: + axis = N + axis + args = (val, axis) + res = func(*args) + if res.ndim == val.ndim: + val = res + else: + res = ma.expand_dims(res, axis) + if res.ndim == val.ndim: + val = res + else: + raise ValueError("function is not returning " + "an array of the correct shape") + return val + + +if apply_over_axes.__doc__ is not None: + apply_over_axes.__doc__ = np.apply_over_axes.__doc__[ + :np.apply_over_axes.__doc__.find('Notes')].rstrip() + \ + """ + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.arange(24).reshape(2,3,4) + >>> a[:,0,1] = np.ma.masked + >>> a[:,1,:] = np.ma.masked + >>> a + masked_array( + data=[[[0, --, 2, 3], + [--, --, --, --], + [8, 9, 10, 11]], + [[12, --, 14, 15], + [--, --, --, --], + [20, 21, 22, 23]]], + mask=[[[False, True, False, False], + [ True, True, True, True], + [False, False, False, False]], + [[False, True, False, False], + [ True, True, True, True], + [False, False, False, False]]], + fill_value=999999) + >>> np.ma.apply_over_axes(np.ma.sum, a, [0,2]) + masked_array( + data=[[[46], + [--], + [124]]], + mask=[[[False], + [ True], + [False]]], + fill_value=999999) + + Tuple axis arguments to ufuncs are equivalent: + + >>> np.ma.sum(a, axis=(0,2)).reshape((1,-1,1)) + masked_array( + data=[[[46], + [--], + [124]]], + mask=[[[False], + [ True], + [False]]], + fill_value=999999) + """ + + +def average(a, axis=None, weights=None, returned=False, *, + keepdims=np._NoValue): + """ + Return the weighted average of array over the given axis. + + Parameters + ---------- + a : array_like + Data to be averaged. + Masked entries are not taken into account in the computation. + axis : None or int or tuple of ints, optional + Axis or axes along which to average `a`. The default, + `axis=None`, will average over all of the elements of the input array. + If axis is a tuple of ints, averaging is performed on all of the axes + specified in the tuple instead of a single axis or all the axes as + before. + weights : array_like, optional + An array of weights associated with the values in `a`. Each value in + `a` contributes to the average according to its associated weight. + The array of weights must be the same shape as `a` if no axis is + specified, otherwise the weights must have dimensions and shape + consistent with `a` along the specified axis. + If `weights=None`, then all data in `a` are assumed to have a + weight equal to one. + The calculation is:: + + avg = sum(a * weights) / sum(weights) + + where the sum is over all included elements. + The only constraint on the values of `weights` is that `sum(weights)` + must not be 0. + returned : bool, optional + Flag indicating whether a tuple ``(result, sum of weights)`` + should be returned as output (True), or just the result (False). + Default is False. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the original `a`. + *Note:* `keepdims` will not work with instances of `numpy.matrix` + or other classes whose methods do not support `keepdims`. + + .. versionadded:: 1.23.0 + + Returns + ------- + average, [sum_of_weights] : (tuple of) scalar or MaskedArray + The average along the specified axis. When returned is `True`, + return a tuple with the average as the first element and the sum + of the weights as the second element. The return type is `np.float64` + if `a` is of integer type and floats smaller than `float64`, or the + input data-type, otherwise. If returned, `sum_of_weights` is always + `float64`. + + Raises + ------ + ZeroDivisionError + When all weights along axis are zero. See `numpy.ma.average` for a + version robust to this type of error. + TypeError + When `weights` does not have the same shape as `a`, and `axis=None`. + ValueError + When `weights` does not have dimensions and shape consistent with `a` + along specified `axis`. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array([1., 2., 3., 4.], mask=[False, False, True, True]) + >>> np.ma.average(a, weights=[3, 1, 0, 0]) + 1.25 + + >>> x = np.ma.arange(6.).reshape(3, 2) + >>> x + masked_array( + data=[[0., 1.], + [2., 3.], + [4., 5.]], + mask=False, + fill_value=1e+20) + >>> data = np.arange(8).reshape((2, 2, 2)) + >>> data + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> np.ma.average(data, axis=(0, 1), weights=[[1./4, 3./4], [1., 1./2]]) + masked_array(data=[3.4, 4.4], + mask=[False, False], + fill_value=1e+20) + >>> np.ma.average(data, axis=0, weights=[[1./4, 3./4], [1., 1./2]]) + Traceback (most recent call last): + ... + ValueError: Shape of weights must be consistent + with shape of a along specified axis. + + >>> avg, sumweights = np.ma.average(x, axis=0, weights=[1, 2, 3], + ... returned=True) + >>> avg + masked_array(data=[2.6666666666666665, 3.6666666666666665], + mask=[False, False], + fill_value=1e+20) + + With ``keepdims=True``, the following result has shape (3, 1). + + >>> np.ma.average(x, axis=1, keepdims=True) + masked_array( + data=[[0.5], + [2.5], + [4.5]], + mask=False, + fill_value=1e+20) + """ + a = asarray(a) + m = getmask(a) + + if axis is not None: + axis = normalize_axis_tuple(axis, a.ndim, argname="axis") + + if keepdims is np._NoValue: + # Don't pass on the keepdims argument if one wasn't given. + keepdims_kw = {} + else: + keepdims_kw = {'keepdims': keepdims} + + if weights is None: + avg = a.mean(axis, **keepdims_kw) + scl = avg.dtype.type(a.count(axis)) + else: + wgt = asarray(weights) + + if issubclass(a.dtype.type, (np.integer, np.bool)): + result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8') + else: + result_dtype = np.result_type(a.dtype, wgt.dtype) + + # Sanity checks + if a.shape != wgt.shape: + if axis is None: + raise TypeError( + "Axis must be specified when shapes of a and weights " + "differ.") + if wgt.shape != tuple(a.shape[ax] for ax in axis): + raise ValueError( + "Shape of weights must be consistent with " + "shape of a along specified axis.") + + # setup wgt to broadcast along axis + wgt = wgt.transpose(np.argsort(axis)) + wgt = wgt.reshape(tuple((s if ax in axis else 1) + for ax, s in enumerate(a.shape))) + + if m is not nomask: + wgt = wgt * (~a.mask) + wgt.mask |= a.mask + + scl = wgt.sum(axis=axis, dtype=result_dtype, **keepdims_kw) + avg = np.multiply(a, wgt, + dtype=result_dtype).sum(axis, **keepdims_kw) / scl + + if returned: + if scl.shape != avg.shape: + scl = np.broadcast_to(scl, avg.shape).copy() + return avg, scl + else: + return avg + + +def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): + """ + Compute the median along the specified axis. + + Returns the median of the array elements. + + Parameters + ---------- + a : array_like + Input array or object that can be converted to an array. + axis : int, optional + Axis along which the medians are computed. The default (None) is + to compute the median along a flattened version of the array. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. + overwrite_input : bool, optional + If True, then allow use of memory of input array (a) for + calculations. The input array will be modified by the call to + median. This will save memory when you do not need to preserve + the contents of the input array. Treat the input as undefined, + but it will probably be fully or partially sorted. Default is + False. Note that, if `overwrite_input` is True, and the input + is not already an `ndarray`, an error will be raised. + keepdims : bool, optional + If this is set to True, the axes which are reduced are left + in the result as dimensions with size one. With this option, + the result will broadcast correctly against the input array. + + Returns + ------- + median : ndarray + A new array holding the result is returned unless out is + specified, in which case a reference to out is returned. + Return data-type is `float64` for integers and floats smaller than + `float64`, or the input data-type, otherwise. + + See Also + -------- + mean + + Notes + ----- + Given a vector ``V`` with ``N`` non masked values, the median of ``V`` + is the middle value of a sorted copy of ``V`` (``Vs``) - i.e. + ``Vs[(N-1)/2]``, when ``N`` is odd, or ``{Vs[N/2 - 1] + Vs[N/2]}/2`` + when ``N`` is even. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array(np.arange(8), mask=[0]*4 + [1]*4) + >>> np.ma.median(x) + 1.5 + + >>> x = np.ma.array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4) + >>> np.ma.median(x) + 2.5 + >>> np.ma.median(x, axis=-1, overwrite_input=True) + masked_array(data=[2.0, 5.0], + mask=[False, False], + fill_value=1e+20) + + """ + if not hasattr(a, 'mask'): + m = np.median(getdata(a, subok=True), axis=axis, + out=out, overwrite_input=overwrite_input, + keepdims=keepdims) + if isinstance(m, np.ndarray) and 1 <= m.ndim: + return masked_array(m, copy=False) + else: + return m + + return _ureduce(a, func=_median, keepdims=keepdims, axis=axis, out=out, + overwrite_input=overwrite_input) + + +def _median(a, axis=None, out=None, overwrite_input=False): + # when an unmasked NaN is present return it, so we need to sort the NaN + # values behind the mask + if np.issubdtype(a.dtype, np.inexact): + fill_value = np.inf + else: + fill_value = None + if overwrite_input: + if axis is None: + asorted = a.ravel() + asorted.sort(fill_value=fill_value) + else: + a.sort(axis=axis, fill_value=fill_value) + asorted = a + else: + asorted = sort(a, axis=axis, fill_value=fill_value) + + if axis is None: + axis = 0 + else: + axis = normalize_axis_index(axis, asorted.ndim) + + if asorted.shape[axis] == 0: + # for empty axis integer indices fail so use slicing to get same result + # as median (which is mean of empty slice = nan) + indexer = [slice(None)] * asorted.ndim + indexer[axis] = slice(0, 0) + indexer = tuple(indexer) + return np.ma.mean(asorted[indexer], axis=axis, out=out) + + if asorted.ndim == 1: + idx, odd = divmod(count(asorted), 2) + mid = asorted[idx + odd - 1:idx + 1] + if np.issubdtype(asorted.dtype, np.inexact) and asorted.size > 0: + # avoid inf / x = masked + s = mid.sum(out=out) + if not odd: + s = np.true_divide(s, 2., casting='safe', out=out) + s = np.lib._utils_impl._median_nancheck(asorted, s, axis) + else: + s = mid.mean(out=out) + + # if result is masked either the input contained enough + # minimum_fill_value so that it would be the median or all values + # masked + if np.ma.is_masked(s) and not np.all(asorted.mask): + return np.ma.minimum_fill_value(asorted) + return s + + counts = count(asorted, axis=axis, keepdims=True) + h = counts // 2 + + # duplicate high if odd number of elements so mean does nothing + odd = counts % 2 == 1 + l = np.where(odd, h, h - 1) + + lh = np.concatenate([l, h], axis=axis) + + # get low and high median + low_high = np.take_along_axis(asorted, lh, axis=axis) + + def replace_masked(s): + # Replace masked entries with minimum_full_value unless it all values + # are masked. This is required as the sort order of values equal or + # larger than the fill value is undefined and a valid value placed + # elsewhere, e.g. [4, --, inf]. + if np.ma.is_masked(s): + rep = (~np.all(asorted.mask, axis=axis, keepdims=True)) & s.mask + s.data[rep] = np.ma.minimum_fill_value(asorted) + s.mask[rep] = False + + replace_masked(low_high) + + if np.issubdtype(asorted.dtype, np.inexact): + # avoid inf / x = masked + s = np.ma.sum(low_high, axis=axis, out=out) + np.true_divide(s.data, 2., casting='unsafe', out=s.data) + + s = np.lib._utils_impl._median_nancheck(asorted, s, axis) + else: + s = np.ma.mean(low_high, axis=axis, out=out) + + return s + + +def compress_nd(x, axis=None): + """Suppress slices from multiple dimensions which contain masked values. + + Parameters + ---------- + x : array_like, MaskedArray + The array to operate on. If not a MaskedArray instance (or if no array + elements are masked), `x` is interpreted as a MaskedArray with `mask` + set to `nomask`. + axis : tuple of ints or int, optional + Which dimensions to suppress slices from can be configured with this + parameter. + - If axis is a tuple of ints, those are the axes to suppress slices from. + - If axis is an int, then that is the only axis to suppress slices from. + - If axis is None, all axis are selected. + + Returns + ------- + compress_array : ndarray + The compressed array. + + Examples + -------- + >>> import numpy as np + >>> arr = [[1, 2], [3, 4]] + >>> mask = [[0, 1], [0, 0]] + >>> x = np.ma.array(arr, mask=mask) + >>> np.ma.compress_nd(x, axis=0) + array([[3, 4]]) + >>> np.ma.compress_nd(x, axis=1) + array([[1], + [3]]) + >>> np.ma.compress_nd(x) + array([[3]]) + + """ + x = asarray(x) + m = getmask(x) + # Set axis to tuple of ints + if axis is None: + axis = tuple(range(x.ndim)) + else: + axis = normalize_axis_tuple(axis, x.ndim) + + # Nothing is masked: return x + if m is nomask or not m.any(): + return x._data + # All is masked: return empty + if m.all(): + return nxarray([]) + # Filter elements through boolean indexing + data = x._data + for ax in axis: + axes = tuple(list(range(ax)) + list(range(ax + 1, x.ndim))) + data = data[(slice(None),) * ax + (~m.any(axis=axes),)] + return data + + +def compress_rowcols(x, axis=None): + """ + Suppress the rows and/or columns of a 2-D array that contain + masked values. + + The suppression behavior is selected with the `axis` parameter. + + - If axis is None, both rows and columns are suppressed. + - If axis is 0, only rows are suppressed. + - If axis is 1 or -1, only columns are suppressed. + + Parameters + ---------- + x : array_like, MaskedArray + The array to operate on. If not a MaskedArray instance (or if no array + elements are masked), `x` is interpreted as a MaskedArray with + `mask` set to `nomask`. Must be a 2D array. + axis : int, optional + Axis along which to perform the operation. Default is None. + + Returns + ------- + compressed_array : ndarray + The compressed array. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> x + masked_array( + data=[[--, 1, 2], + [--, 4, 5], + [6, 7, 8]], + mask=[[ True, False, False], + [ True, False, False], + [False, False, False]], + fill_value=999999) + + >>> np.ma.compress_rowcols(x) + array([[7, 8]]) + >>> np.ma.compress_rowcols(x, 0) + array([[6, 7, 8]]) + >>> np.ma.compress_rowcols(x, 1) + array([[1, 2], + [4, 5], + [7, 8]]) + + """ + if asarray(x).ndim != 2: + raise NotImplementedError("compress_rowcols works for 2D arrays only.") + return compress_nd(x, axis=axis) + + +def compress_rows(a): + """ + Suppress whole rows of a 2-D array that contain masked values. + + This is equivalent to ``np.ma.compress_rowcols(a, 0)``, see + `compress_rowcols` for details. + + Parameters + ---------- + x : array_like, MaskedArray + The array to operate on. If not a MaskedArray instance (or if no array + elements are masked), `x` is interpreted as a MaskedArray with + `mask` set to `nomask`. Must be a 2D array. + + Returns + ------- + compressed_array : ndarray + The compressed array. + + See Also + -------- + compress_rowcols + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> np.ma.compress_rows(a) + array([[6, 7, 8]]) + + """ + a = asarray(a) + if a.ndim != 2: + raise NotImplementedError("compress_rows works for 2D arrays only.") + return compress_rowcols(a, 0) + + +def compress_cols(a): + """ + Suppress whole columns of a 2-D array that contain masked values. + + This is equivalent to ``np.ma.compress_rowcols(a, 1)``, see + `compress_rowcols` for details. + + Parameters + ---------- + x : array_like, MaskedArray + The array to operate on. If not a MaskedArray instance (or if no array + elements are masked), `x` is interpreted as a MaskedArray with + `mask` set to `nomask`. Must be a 2D array. + + Returns + ------- + compressed_array : ndarray + The compressed array. + + See Also + -------- + compress_rowcols + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], + ... [1, 0, 0], + ... [0, 0, 0]]) + >>> np.ma.compress_cols(a) + array([[1, 2], + [4, 5], + [7, 8]]) + + """ + a = asarray(a) + if a.ndim != 2: + raise NotImplementedError("compress_cols works for 2D arrays only.") + return compress_rowcols(a, 1) + + +def mask_rowcols(a, axis=None): + """ + Mask rows and/or columns of a 2D array that contain masked values. + + Mask whole rows and/or columns of a 2D array that contain + masked values. The masking behavior is selected using the + `axis` parameter. + + - If `axis` is None, rows *and* columns are masked. + - If `axis` is 0, only rows are masked. + - If `axis` is 1 or -1, only columns are masked. + + Parameters + ---------- + a : array_like, MaskedArray + The array to mask. If not a MaskedArray instance (or if no array + elements are masked), the result is a MaskedArray with `mask` set + to `nomask` (False). Must be a 2D array. + axis : int, optional + Axis along which to perform the operation. If None, applies to a + flattened version of the array. + + Returns + ------- + a : MaskedArray + A modified version of the input array, masked depending on the value + of the `axis` parameter. + + Raises + ------ + NotImplementedError + If input array `a` is not 2D. + + See Also + -------- + mask_rows : Mask rows of a 2D array that contain masked values. + mask_cols : Mask cols of a 2D array that contain masked values. + masked_where : Mask where a condition is met. + + Notes + ----- + The input array's mask is modified by this function. + + Examples + -------- + >>> import numpy as np + >>> a = np.zeros((3, 3), dtype=int) + >>> a[1, 1] = 1 + >>> a + array([[0, 0, 0], + [0, 1, 0], + [0, 0, 0]]) + >>> a = np.ma.masked_equal(a, 1) + >>> a + masked_array( + data=[[0, 0, 0], + [0, --, 0], + [0, 0, 0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1) + >>> np.ma.mask_rowcols(a) + masked_array( + data=[[0, --, 0], + [--, --, --], + [0, --, 0]], + mask=[[False, True, False], + [ True, True, True], + [False, True, False]], + fill_value=1) + + """ + a = array(a, subok=False) + if a.ndim != 2: + raise NotImplementedError("mask_rowcols works for 2D arrays only.") + m = getmask(a) + # Nothing is masked: return a + if m is nomask or not m.any(): + return a + maskedval = m.nonzero() + a._mask = a._mask.copy() + if not axis: + a[np.unique(maskedval[0])] = masked + if axis in [None, 1, -1]: + a[:, np.unique(maskedval[1])] = masked + return a + + +def mask_rows(a, axis=np._NoValue): + """ + Mask rows of a 2D array that contain masked values. + + This function is a shortcut to ``mask_rowcols`` with `axis` equal to 0. + + See Also + -------- + mask_rowcols : Mask rows and/or columns of a 2D array. + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy as np + >>> a = np.zeros((3, 3), dtype=int) + >>> a[1, 1] = 1 + >>> a + array([[0, 0, 0], + [0, 1, 0], + [0, 0, 0]]) + >>> a = np.ma.masked_equal(a, 1) + >>> a + masked_array( + data=[[0, 0, 0], + [0, --, 0], + [0, 0, 0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1) + + >>> np.ma.mask_rows(a) + masked_array( + data=[[0, 0, 0], + [--, --, --], + [0, 0, 0]], + mask=[[False, False, False], + [ True, True, True], + [False, False, False]], + fill_value=1) + + """ + if axis is not np._NoValue: + # remove the axis argument when this deprecation expires + # NumPy 1.18.0, 2019-11-28 + warnings.warn( + "The axis argument has always been ignored, in future passing it " + "will raise TypeError", DeprecationWarning, stacklevel=2) + return mask_rowcols(a, 0) + + +def mask_cols(a, axis=np._NoValue): + """ + Mask columns of a 2D array that contain masked values. + + This function is a shortcut to ``mask_rowcols`` with `axis` equal to 1. + + See Also + -------- + mask_rowcols : Mask rows and/or columns of a 2D array. + masked_where : Mask where a condition is met. + + Examples + -------- + >>> import numpy as np + >>> a = np.zeros((3, 3), dtype=int) + >>> a[1, 1] = 1 + >>> a + array([[0, 0, 0], + [0, 1, 0], + [0, 0, 0]]) + >>> a = np.ma.masked_equal(a, 1) + >>> a + masked_array( + data=[[0, 0, 0], + [0, --, 0], + [0, 0, 0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1) + >>> np.ma.mask_cols(a) + masked_array( + data=[[0, --, 0], + [0, --, 0], + [0, --, 0]], + mask=[[False, True, False], + [False, True, False], + [False, True, False]], + fill_value=1) + + """ + if axis is not np._NoValue: + # remove the axis argument when this deprecation expires + # NumPy 1.18.0, 2019-11-28 + warnings.warn( + "The axis argument has always been ignored, in future passing it " + "will raise TypeError", DeprecationWarning, stacklevel=2) + return mask_rowcols(a, 1) + + +#####-------------------------------------------------------------------------- +#---- --- arraysetops --- +#####-------------------------------------------------------------------------- + +def ediff1d(arr, to_end=None, to_begin=None): + """ + Compute the differences between consecutive elements of an array. + + This function is the equivalent of `numpy.ediff1d` that takes masked + values into account, see `numpy.ediff1d` for details. + + See Also + -------- + numpy.ediff1d : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + >>> arr = np.ma.array([1, 2, 4, 7, 0]) + >>> np.ma.ediff1d(arr) + masked_array(data=[ 1, 2, 3, -7], + mask=False, + fill_value=999999) + + """ + arr = ma.asanyarray(arr).flat + ed = arr[1:] - arr[:-1] + arrays = [ed] + # + if to_begin is not None: + arrays.insert(0, to_begin) + if to_end is not None: + arrays.append(to_end) + # + if len(arrays) != 1: + # We'll save ourselves a copy of a potentially large array in the common + # case where neither to_begin or to_end was given. + ed = hstack(arrays) + # + return ed + + +def unique(ar1, return_index=False, return_inverse=False): + """ + Finds the unique elements of an array. + + Masked values are considered the same element (masked). The output array + is always a masked array. See `numpy.unique` for more details. + + See Also + -------- + numpy.unique : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + >>> a = [1, 2, 1000, 2, 3] + >>> mask = [0, 0, 1, 0, 0] + >>> masked_a = np.ma.masked_array(a, mask) + >>> masked_a + masked_array(data=[1, 2, --, 2, 3], + mask=[False, False, True, False, False], + fill_value=999999) + >>> np.ma.unique(masked_a) + masked_array(data=[1, 2, 3, --], + mask=[False, False, False, True], + fill_value=999999) + >>> np.ma.unique(masked_a, return_index=True) + (masked_array(data=[1, 2, 3, --], + mask=[False, False, False, True], + fill_value=999999), array([0, 1, 4, 2])) + >>> np.ma.unique(masked_a, return_inverse=True) + (masked_array(data=[1, 2, 3, --], + mask=[False, False, False, True], + fill_value=999999), array([0, 1, 3, 1, 2])) + >>> np.ma.unique(masked_a, return_index=True, return_inverse=True) + (masked_array(data=[1, 2, 3, --], + mask=[False, False, False, True], + fill_value=999999), array([0, 1, 4, 2]), array([0, 1, 3, 1, 2])) + """ + output = np.unique(ar1, + return_index=return_index, + return_inverse=return_inverse) + if isinstance(output, tuple): + output = list(output) + output[0] = output[0].view(MaskedArray) + output = tuple(output) + else: + output = output.view(MaskedArray) + return output + + +def intersect1d(ar1, ar2, assume_unique=False): + """ + Returns the unique elements common to both arrays. + + Masked values are considered equal one to the other. + The output is always a masked array. + + See `numpy.intersect1d` for more details. + + See Also + -------- + numpy.intersect1d : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([1, 3, 3, 3], mask=[0, 0, 0, 1]) + >>> y = np.ma.array([3, 1, 1, 1], mask=[0, 0, 0, 1]) + >>> np.ma.intersect1d(x, y) + masked_array(data=[1, 3, --], + mask=[False, False, True], + fill_value=999999) + + """ + if assume_unique: + aux = ma.concatenate((ar1, ar2)) + else: + # Might be faster than unique( intersect1d( ar1, ar2 ) )? + aux = ma.concatenate((unique(ar1), unique(ar2))) + aux.sort() + return aux[:-1][aux[1:] == aux[:-1]] + + +def setxor1d(ar1, ar2, assume_unique=False): + """ + Set exclusive-or of 1-D arrays with unique elements. + + The output is always a masked array. See `numpy.setxor1d` for more details. + + See Also + -------- + numpy.setxor1d : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + >>> ar1 = np.ma.array([1, 2, 3, 2, 4]) + >>> ar2 = np.ma.array([2, 3, 5, 7, 5]) + >>> np.ma.setxor1d(ar1, ar2) + masked_array(data=[1, 4, 5, 7], + mask=False, + fill_value=999999) + + """ + if not assume_unique: + ar1 = unique(ar1) + ar2 = unique(ar2) + + aux = ma.concatenate((ar1, ar2), axis=None) + if aux.size == 0: + return aux + aux.sort() + auxf = aux.filled() +# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0 + flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True])) +# flag2 = ediff1d( flag ) == 0 + flag2 = (flag[1:] == flag[:-1]) + return aux[flag2] + + +def in1d(ar1, ar2, assume_unique=False, invert=False): + """ + Test whether each element of an array is also present in a second + array. + + The output is always a masked array. See `numpy.in1d` for more details. + + We recommend using :func:`isin` instead of `in1d` for new code. + + See Also + -------- + isin : Version of this function that preserves the shape of ar1. + numpy.in1d : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + >>> ar1 = np.ma.array([0, 1, 2, 5, 0]) + >>> ar2 = [0, 2] + >>> np.ma.in1d(ar1, ar2) + masked_array(data=[ True, False, True, False, True], + mask=False, + fill_value=True) + + """ + if not assume_unique: + ar1, rev_idx = unique(ar1, return_inverse=True) + ar2 = unique(ar2) + + ar = ma.concatenate((ar1, ar2)) + # We need this to be a stable sort, so always use 'mergesort' + # here. The values from the first array should always come before + # the values from the second array. + order = ar.argsort(kind='mergesort') + sar = ar[order] + if invert: + bool_ar = (sar[1:] != sar[:-1]) + else: + bool_ar = (sar[1:] == sar[:-1]) + flag = ma.concatenate((bool_ar, [invert])) + indx = order.argsort(kind='mergesort')[:len(ar1)] + + if assume_unique: + return flag[indx] + else: + return flag[indx][rev_idx] + + +def isin(element, test_elements, assume_unique=False, invert=False): + """ + Calculates `element in test_elements`, broadcasting over + `element` only. + + The output is always a masked array of the same shape as `element`. + See `numpy.isin` for more details. + + See Also + -------- + in1d : Flattened version of this function. + numpy.isin : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + >>> element = np.ma.array([1, 2, 3, 4, 5, 6]) + >>> test_elements = [0, 2] + >>> np.ma.isin(element, test_elements) + masked_array(data=[False, True, False, False, False, False], + mask=False, + fill_value=True) + + """ + element = ma.asarray(element) + return in1d(element, test_elements, assume_unique=assume_unique, + invert=invert).reshape(element.shape) + + +def union1d(ar1, ar2): + """ + Union of two arrays. + + The output is always a masked array. See `numpy.union1d` for more details. + + See Also + -------- + numpy.union1d : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + >>> ar1 = np.ma.array([1, 2, 3, 4]) + >>> ar2 = np.ma.array([3, 4, 5, 6]) + >>> np.ma.union1d(ar1, ar2) + masked_array(data=[1, 2, 3, 4, 5, 6], + mask=False, + fill_value=999999) + + """ + return unique(ma.concatenate((ar1, ar2), axis=None)) + + +def setdiff1d(ar1, ar2, assume_unique=False): + """ + Set difference of 1D arrays with unique elements. + + The output is always a masked array. See `numpy.setdiff1d` for more + details. + + See Also + -------- + numpy.setdiff1d : Equivalent function for ndarrays. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1]) + >>> np.ma.setdiff1d(x, [1, 2]) + masked_array(data=[3, --], + mask=[False, True], + fill_value=999999) + + """ + if assume_unique: + ar1 = ma.asarray(ar1).ravel() + else: + ar1 = unique(ar1) + ar2 = unique(ar2) + return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)] + + +############################################################################### +# Covariance # +############################################################################### + + +def _covhelper(x, y=None, rowvar=True, allow_masked=True): + """ + Private function for the computation of covariance and correlation + coefficients. + + """ + x = ma.array(x, ndmin=2, copy=True, dtype=float) + xmask = ma.getmaskarray(x) + # Quick exit if we can't process masked data + if not allow_masked and xmask.any(): + raise ValueError("Cannot process masked data.") + # + if x.shape[0] == 1: + rowvar = True + # Make sure that rowvar is either 0 or 1 + rowvar = int(bool(rowvar)) + axis = 1 - rowvar + if rowvar: + tup = (slice(None), None) + else: + tup = (None, slice(None)) + # + if y is None: + # Check if we can guarantee that the integers in the (N - ddof) + # normalisation can be accurately represented with single-precision + # before computing the dot product. + if x.shape[0] > 2 ** 24 or x.shape[1] > 2 ** 24: + xnm_dtype = np.float64 + else: + xnm_dtype = np.float32 + xnotmask = np.logical_not(xmask).astype(xnm_dtype) + else: + y = array(y, copy=False, ndmin=2, dtype=float) + ymask = ma.getmaskarray(y) + if not allow_masked and ymask.any(): + raise ValueError("Cannot process masked data.") + if xmask.any() or ymask.any(): + if y.shape == x.shape: + # Define some common mask + common_mask = np.logical_or(xmask, ymask) + if common_mask is not nomask: + xmask = x._mask = y._mask = ymask = common_mask + x._sharedmask = False + y._sharedmask = False + x = ma.concatenate((x, y), axis) + # Check if we can guarantee that the integers in the (N - ddof) + # normalisation can be accurately represented with single-precision + # before computing the dot product. + if x.shape[0] > 2 ** 24 or x.shape[1] > 2 ** 24: + xnm_dtype = np.float64 + else: + xnm_dtype = np.float32 + xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype( + xnm_dtype + ) + x -= x.mean(axis=rowvar)[tup] + return (x, xnotmask, rowvar) + + +def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): + """ + Estimate the covariance matrix. + + Except for the handling of missing data this function does the same as + `numpy.cov`. For more details and examples, see `numpy.cov`. + + By default, masked values are recognized as such. If `x` and `y` have the + same shape, a common mask is allocated: if ``x[i,j]`` is masked, then + ``y[i,j]`` will also be masked. + Setting `allow_masked` to False will raise an exception if values are + missing in either of the input arrays. + + Parameters + ---------- + x : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `x` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same + shape as `x`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : bool, optional + Default normalization (False) is by ``(N-1)``, where ``N`` is the + number of observations given (unbiased estimate). If `bias` is True, + then normalization is by ``N``. This keyword can be overridden by + the keyword ``ddof`` in numpy versions >= 1.5. + allow_masked : bool, optional + If True, masked values are propagated pair-wise: if a value is masked + in `x`, the corresponding value is masked in `y`. + If False, raises a `ValueError` exception when some values are missing. + ddof : {None, int}, optional + If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is + the number of observations; this overrides the value implied by + ``bias``. The default value is ``None``. + + Raises + ------ + ValueError + Raised if some values are missing and `allow_masked` is False. + + See Also + -------- + numpy.cov + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[0, 1], [1, 1]], mask=[0, 1, 0, 1]) + >>> y = np.ma.array([[1, 0], [0, 1]], mask=[0, 0, 1, 1]) + >>> np.ma.cov(x, y) + masked_array( + data=[[--, --, --, --], + [--, --, --, --], + [--, --, --, --], + [--, --, --, --]], + mask=[[ True, True, True, True], + [ True, True, True, True], + [ True, True, True, True], + [ True, True, True, True]], + fill_value=1e+20, + dtype=float64) + + """ + # Check inputs + if ddof is not None and ddof != int(ddof): + raise ValueError("ddof must be an integer") + # Set up ddof + if ddof is None: + if bias: + ddof = 0 + else: + ddof = 1 + + (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) + if not rowvar: + fact = np.dot(xnotmask.T, xnotmask) - ddof + mask = np.less_equal(fact, 0, dtype=bool) + with np.errstate(divide="ignore", invalid="ignore"): + data = np.dot(filled(x.T, 0), filled(x.conj(), 0)) / fact + result = ma.array(data, mask=mask).squeeze() + else: + fact = np.dot(xnotmask, xnotmask.T) - ddof + mask = np.less_equal(fact, 0, dtype=bool) + with np.errstate(divide="ignore", invalid="ignore"): + data = np.dot(filled(x, 0), filled(x.T.conj(), 0)) / fact + result = ma.array(data, mask=mask).squeeze() + return result + + +def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, + ddof=np._NoValue): + """ + Return Pearson product-moment correlation coefficients. + + Except for the handling of missing data this function does the same as + `numpy.corrcoef`. For more details and examples, see `numpy.corrcoef`. + + Parameters + ---------- + x : array_like + A 1-D or 2-D array containing multiple variables and observations. + Each row of `x` represents a variable, and each column a single + observation of all those variables. Also see `rowvar` below. + y : array_like, optional + An additional set of variables and observations. `y` has the same + shape as `x`. + rowvar : bool, optional + If `rowvar` is True (default), then each row represents a + variable, with observations in the columns. Otherwise, the relationship + is transposed: each column represents a variable, while the rows + contain observations. + bias : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.10.0 + allow_masked : bool, optional + If True, masked values are propagated pair-wise: if a value is masked + in `x`, the corresponding value is masked in `y`. + If False, raises an exception. Because `bias` is deprecated, this + argument needs to be treated as keyword only to avoid a warning. + ddof : _NoValue, optional + Has no effect, do not use. + + .. deprecated:: 1.10.0 + + See Also + -------- + numpy.corrcoef : Equivalent function in top-level NumPy module. + cov : Estimate the covariance matrix. + + Notes + ----- + This function accepts but discards arguments `bias` and `ddof`. This is + for backwards compatibility with previous versions of this function. These + arguments had no effect on the return values of the function and can be + safely ignored in this and previous versions of numpy. + + Examples + -------- + >>> import numpy as np + >>> x = np.ma.array([[0, 1], [1, 1]], mask=[0, 1, 0, 1]) + >>> np.ma.corrcoef(x) + masked_array( + data=[[--, --], + [--, --]], + mask=[[ True, True], + [ True, True]], + fill_value=1e+20, + dtype=float64) + + """ + msg = 'bias and ddof have no effect and are deprecated' + if bias is not np._NoValue or ddof is not np._NoValue: + # 2015-03-15, 1.10 + warnings.warn(msg, DeprecationWarning, stacklevel=2) + # Estimate the covariance matrix. + corr = cov(x, y, rowvar, allow_masked=allow_masked) + # The non-masked version returns a masked value for a scalar. + try: + std = ma.sqrt(ma.diagonal(corr)) + except ValueError: + return ma.MaskedConstant() + corr /= ma.multiply.outer(std, std) + return corr + +#####-------------------------------------------------------------------------- +#---- --- Concatenation helpers --- +#####-------------------------------------------------------------------------- + +class MAxisConcatenator(AxisConcatenator): + """ + Translate slice objects to concatenation along an axis. + + For documentation on usage, see `mr_class`. + + See Also + -------- + mr_class + + """ + __slots__ = () + + concatenate = staticmethod(concatenate) + + @classmethod + def makemat(cls, arr): + # There used to be a view as np.matrix here, but we may eventually + # deprecate that class. In preparation, we use the unmasked version + # to construct the matrix (with copy=False for backwards compatibility + # with the .view) + data = super().makemat(arr.data, copy=False) + return array(data, mask=arr.mask) + + def __getitem__(self, key): + # matrix builder syntax, like 'a, b; c, d' + if isinstance(key, str): + raise MAError("Unavailable for masked array.") + + return super().__getitem__(key) + + +class mr_class(MAxisConcatenator): + """ + Translate slice objects to concatenation along the first axis. + + This is the masked array version of `r_`. + + See Also + -------- + r_ + + Examples + -------- + >>> import numpy as np + >>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])] + masked_array(data=[1, 2, 3, ..., 4, 5, 6], + mask=False, + fill_value=999999) + + """ + __slots__ = () + + def __init__(self): + MAxisConcatenator.__init__(self, 0) + + +mr_ = mr_class() + + +#####-------------------------------------------------------------------------- +#---- Find unmasked data --- +#####-------------------------------------------------------------------------- + +def ndenumerate(a, compressed=True): + """ + Multidimensional index iterator. + + Return an iterator yielding pairs of array coordinates and values, + skipping elements that are masked. With `compressed=False`, + `ma.masked` is yielded as the value of masked elements. This + behavior differs from that of `numpy.ndenumerate`, which yields the + value of the underlying data array. + + Notes + ----- + .. versionadded:: 1.23.0 + + Parameters + ---------- + a : array_like + An array with (possibly) masked elements. + compressed : bool, optional + If True (default), masked elements are skipped. + + See Also + -------- + numpy.ndenumerate : Equivalent function ignoring any mask. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.arange(9).reshape((3, 3)) + >>> a[1, 0] = np.ma.masked + >>> a[1, 2] = np.ma.masked + >>> a[2, 1] = np.ma.masked + >>> a + masked_array( + data=[[0, 1, 2], + [--, 4, --], + [6, --, 8]], + mask=[[False, False, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> for index, x in np.ma.ndenumerate(a): + ... print(index, x) + (0, 0) 0 + (0, 1) 1 + (0, 2) 2 + (1, 1) 4 + (2, 0) 6 + (2, 2) 8 + + >>> for index, x in np.ma.ndenumerate(a, compressed=False): + ... print(index, x) + (0, 0) 0 + (0, 1) 1 + (0, 2) 2 + (1, 0) -- + (1, 1) 4 + (1, 2) -- + (2, 0) 6 + (2, 1) -- + (2, 2) 8 + """ + for it, mask in zip(np.ndenumerate(a), getmaskarray(a).flat): + if not mask: + yield it + elif not compressed: + yield it[0], masked + + +def flatnotmasked_edges(a): + """ + Find the indices of the first and last unmasked values. + + Expects a 1-D `MaskedArray`, returns None if all values are masked. + + Parameters + ---------- + a : array_like + Input 1-D `MaskedArray` + + Returns + ------- + edges : ndarray or None + The indices of first and last non-masked value in the array. + Returns None if all values are masked. + + See Also + -------- + flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges + clump_masked, clump_unmasked + + Notes + ----- + Only accepts 1-D arrays. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.arange(10) + >>> np.ma.flatnotmasked_edges(a) + array([0, 9]) + + >>> mask = (a < 3) | (a > 8) | (a == 5) + >>> a[mask] = np.ma.masked + >>> np.array(a[~a.mask]) + array([3, 4, 6, 7, 8]) + + >>> np.ma.flatnotmasked_edges(a) + array([3, 8]) + + >>> a[:] = np.ma.masked + >>> print(np.ma.flatnotmasked_edges(a)) + None + + """ + m = getmask(a) + if m is nomask or not np.any(m): + return np.array([0, a.size - 1]) + unmasked = np.flatnonzero(~m) + if len(unmasked) > 0: + return unmasked[[0, -1]] + else: + return None + + +def notmasked_edges(a, axis=None): + """ + Find the indices of the first and last unmasked values along an axis. + + If all values are masked, return None. Otherwise, return a list + of two tuples, corresponding to the indices of the first and last + unmasked values respectively. + + Parameters + ---------- + a : array_like + The input array. + axis : int, optional + Axis along which to perform the operation. + If None (default), applies to a flattened version of the array. + + Returns + ------- + edges : ndarray or list + An array of start and end indexes if there are any masked data in + the array. If there are no masked data in the array, `edges` is a + list of the first and last index. + + See Also + -------- + flatnotmasked_contiguous, flatnotmasked_edges, notmasked_contiguous + clump_masked, clump_unmasked + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(9).reshape((3, 3)) + >>> m = np.zeros_like(a) + >>> m[1:, 1:] = 1 + + >>> am = np.ma.array(a, mask=m) + >>> np.array(am[~am.mask]) + array([0, 1, 2, 3, 6]) + + >>> np.ma.notmasked_edges(am) + array([0, 6]) + + """ + a = asarray(a) + if axis is None or a.ndim == 1: + return flatnotmasked_edges(a) + m = getmaskarray(a) + idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim)) + return [tuple(idx[i].min(axis).compressed() for i in range(a.ndim)), + tuple(idx[i].max(axis).compressed() for i in range(a.ndim)), ] + + +def flatnotmasked_contiguous(a): + """ + Find contiguous unmasked data in a masked array. + + Parameters + ---------- + a : array_like + The input array. + + Returns + ------- + slice_list : list + A sorted sequence of `slice` objects (start index, end index). + + See Also + -------- + flatnotmasked_edges, notmasked_contiguous, notmasked_edges + clump_masked, clump_unmasked + + Notes + ----- + Only accepts 2-D arrays at most. + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.arange(10) + >>> np.ma.flatnotmasked_contiguous(a) + [slice(0, 10, None)] + + >>> mask = (a < 3) | (a > 8) | (a == 5) + >>> a[mask] = np.ma.masked + >>> np.array(a[~a.mask]) + array([3, 4, 6, 7, 8]) + + >>> np.ma.flatnotmasked_contiguous(a) + [slice(3, 5, None), slice(6, 9, None)] + >>> a[:] = np.ma.masked + >>> np.ma.flatnotmasked_contiguous(a) + [] + + """ + m = getmask(a) + if m is nomask: + return [slice(0, a.size)] + i = 0 + result = [] + for (k, g) in itertools.groupby(m.ravel()): + n = len(list(g)) + if not k: + result.append(slice(i, i + n)) + i += n + return result + + +def notmasked_contiguous(a, axis=None): + """ + Find contiguous unmasked data in a masked array along the given axis. + + Parameters + ---------- + a : array_like + The input array. + axis : int, optional + Axis along which to perform the operation. + If None (default), applies to a flattened version of the array, and this + is the same as `flatnotmasked_contiguous`. + + Returns + ------- + endpoints : list + A list of slices (start and end indexes) of unmasked indexes + in the array. + + If the input is 2d and axis is specified, the result is a list of lists. + + See Also + -------- + flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges + clump_masked, clump_unmasked + + Notes + ----- + Only accepts 2-D arrays at most. + + Examples + -------- + >>> import numpy as np + >>> a = np.arange(12).reshape((3, 4)) + >>> mask = np.zeros_like(a) + >>> mask[1:, :-1] = 1; mask[0, 1] = 1; mask[-1, 0] = 0 + >>> ma = np.ma.array(a, mask=mask) + >>> ma + masked_array( + data=[[0, --, 2, 3], + [--, --, --, 7], + [8, --, --, 11]], + mask=[[False, True, False, False], + [ True, True, True, False], + [False, True, True, False]], + fill_value=999999) + >>> np.array(ma[~ma.mask]) + array([ 0, 2, 3, 7, 8, 11]) + + >>> np.ma.notmasked_contiguous(ma) + [slice(0, 1, None), slice(2, 4, None), slice(7, 9, None), slice(11, 12, None)] + + >>> np.ma.notmasked_contiguous(ma, axis=0) + [[slice(0, 1, None), slice(2, 3, None)], [], [slice(0, 1, None)], [slice(0, 3, None)]] + + >>> np.ma.notmasked_contiguous(ma, axis=1) + [[slice(0, 1, None), slice(2, 4, None)], [slice(3, 4, None)], [slice(0, 1, None), slice(3, 4, None)]] + + """ # noqa: E501 + a = asarray(a) + nd = a.ndim + if nd > 2: + raise NotImplementedError("Currently limited to at most 2D array.") + if axis is None or nd == 1: + return flatnotmasked_contiguous(a) + # + result = [] + # + other = (axis + 1) % 2 + idx = [0, 0] + idx[axis] = slice(None, None) + # + for i in range(a.shape[other]): + idx[other] = i + result.append(flatnotmasked_contiguous(a[tuple(idx)])) + return result + + +def _ezclump(mask): + """ + Finds the clumps (groups of data with the same values) for a 1D bool array. + + Returns a series of slices. + """ + if mask.ndim > 1: + mask = mask.ravel() + idx = (mask[1:] ^ mask[:-1]).nonzero() + idx = idx[0] + 1 + + if mask[0]: + if len(idx) == 0: + return [slice(0, mask.size)] + + r = [slice(0, idx[0])] + r.extend((slice(left, right) + for left, right in zip(idx[1:-1:2], idx[2::2]))) + else: + if len(idx) == 0: + return [] + + r = [slice(left, right) for left, right in zip(idx[:-1:2], idx[1::2])] + + if mask[-1]: + r.append(slice(idx[-1], mask.size)) + return r + + +def clump_unmasked(a): + """ + Return list of slices corresponding to the unmasked clumps of a 1-D array. + (A "clump" is defined as a contiguous region of the array). + + Parameters + ---------- + a : ndarray + A one-dimensional masked array. + + Returns + ------- + slices : list of slice + The list of slices, one for each continuous region of unmasked + elements in `a`. + + See Also + -------- + flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges + notmasked_contiguous, clump_masked + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.masked_array(np.arange(10)) + >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked + >>> np.ma.clump_unmasked(a) + [slice(3, 6, None), slice(7, 8, None)] + + """ + mask = getattr(a, '_mask', nomask) + if mask is nomask: + return [slice(0, a.size)] + return _ezclump(~mask) + + +def clump_masked(a): + """ + Returns a list of slices corresponding to the masked clumps of a 1-D array. + (A "clump" is defined as a contiguous region of the array). + + Parameters + ---------- + a : ndarray + A one-dimensional masked array. + + Returns + ------- + slices : list of slice + The list of slices, one for each continuous region of masked elements + in `a`. + + See Also + -------- + flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges + notmasked_contiguous, clump_unmasked + + Examples + -------- + >>> import numpy as np + >>> a = np.ma.masked_array(np.arange(10)) + >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked + >>> np.ma.clump_masked(a) + [slice(0, 3, None), slice(6, 7, None), slice(8, 10, None)] + + """ + mask = ma.getmask(a) + if mask is nomask: + return [] + return _ezclump(mask) + + +############################################################################### +# Polynomial fit # +############################################################################### + + +def vander(x, n=None): + """ + Masked values in the input array result in rows of zeros. + + """ + _vander = np.vander(x, n) + m = getmask(x) + if m is not nomask: + _vander[m] = 0 + return _vander + + +vander.__doc__ = ma.doc_note(np.vander.__doc__, vander.__doc__) + + +def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): + """ + Any masked values in x is propagated in y, and vice-versa. + + """ + x = asarray(x) + y = asarray(y) + + m = getmask(x) + if y.ndim == 1: + m = mask_or(m, getmask(y)) + elif y.ndim == 2: + my = getmask(mask_rows(y)) + if my is not nomask: + m = mask_or(m, my[:, 0]) + else: + raise TypeError("Expected a 1D or 2D array for y!") + + if w is not None: + w = asarray(w) + if w.ndim != 1: + raise TypeError("expected a 1-d array for weights") + if w.shape[0] != y.shape[0]: + raise TypeError("expected w and y to have the same length") + m = mask_or(m, getmask(w)) + + if m is not nomask: + not_m = ~m + if w is not None: + w = w[not_m] + return np.polyfit(x[not_m], y[not_m], deg, rcond, full, w, cov) + else: + return np.polyfit(x, y, deg, rcond, full, w, cov) + + +polyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__) diff --git a/python/numpy/ma/extras.pyi b/python/numpy/ma/extras.pyi new file mode 100644 index 000000000..9b46d32dd --- /dev/null +++ b/python/numpy/ma/extras.pyi @@ -0,0 +1,138 @@ +from _typeshed import Incomplete + +import numpy as np +from numpy.lib._function_base_impl import average +from numpy.lib._index_tricks_impl import AxisConcatenator + +from .core import MaskedArray, dot + +__all__ = [ + "apply_along_axis", + "apply_over_axes", + "atleast_1d", + "atleast_2d", + "atleast_3d", + "average", + "clump_masked", + "clump_unmasked", + "column_stack", + "compress_cols", + "compress_nd", + "compress_rowcols", + "compress_rows", + "corrcoef", + "count_masked", + "cov", + "diagflat", + "dot", + "dstack", + "ediff1d", + "flatnotmasked_contiguous", + "flatnotmasked_edges", + "hsplit", + "hstack", + "in1d", + "intersect1d", + "isin", + "mask_cols", + "mask_rowcols", + "mask_rows", + "masked_all", + "masked_all_like", + "median", + "mr_", + "ndenumerate", + "notmasked_contiguous", + "notmasked_edges", + "polyfit", + "row_stack", + "setdiff1d", + "setxor1d", + "stack", + "union1d", + "unique", + "vander", + "vstack", +] + +def count_masked(arr, axis=...): ... +def masked_all(shape, dtype=...): ... +def masked_all_like(arr): ... + +class _fromnxfunction: + __name__: Incomplete + __doc__: Incomplete + def __init__(self, funcname) -> None: ... + def getdoc(self): ... + def __call__(self, *args, **params): ... + +class _fromnxfunction_single(_fromnxfunction): + def __call__(self, x, *args, **params): ... + +class _fromnxfunction_seq(_fromnxfunction): + def __call__(self, x, *args, **params): ... + +class _fromnxfunction_allargs(_fromnxfunction): + def __call__(self, *args, **params): ... + +atleast_1d: _fromnxfunction_allargs +atleast_2d: _fromnxfunction_allargs +atleast_3d: _fromnxfunction_allargs + +vstack: _fromnxfunction_seq +row_stack: _fromnxfunction_seq +hstack: _fromnxfunction_seq +column_stack: _fromnxfunction_seq +dstack: _fromnxfunction_seq +stack: _fromnxfunction_seq + +hsplit: _fromnxfunction_single +diagflat: _fromnxfunction_single + +def apply_along_axis(func1d, axis, arr, *args, **kwargs): ... +def apply_over_axes(func, a, axes): ... +def median(a, axis=..., out=..., overwrite_input=..., keepdims=...): ... +def compress_nd(x, axis=...): ... +def compress_rowcols(x, axis=...): ... +def compress_rows(a): ... +def compress_cols(a): ... +def mask_rows(a, axis=...): ... +def mask_cols(a, axis=...): ... +def ediff1d(arr, to_end=..., to_begin=...): ... +def unique(ar1, return_index=..., return_inverse=...): ... +def intersect1d(ar1, ar2, assume_unique=...): ... +def setxor1d(ar1, ar2, assume_unique=...): ... +def in1d(ar1, ar2, assume_unique=..., invert=...): ... +def isin(element, test_elements, assume_unique=..., invert=...): ... +def union1d(ar1, ar2): ... +def setdiff1d(ar1, ar2, assume_unique=...): ... +def cov(x, y=..., rowvar=..., bias=..., allow_masked=..., ddof=...): ... +def corrcoef(x, y=..., rowvar=..., bias=..., allow_masked=..., ddof=...): ... + +class MAxisConcatenator(AxisConcatenator): + __slots__ = () + + @staticmethod + def concatenate(arrays: Incomplete, axis: int = 0) -> Incomplete: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] + @classmethod + def makemat(cls, arr: Incomplete) -> Incomplete: ... # type: ignore[override] # pyright: ignore[reportIncompatibleVariableOverride] + +class mr_class(MAxisConcatenator): + __slots__ = () + + def __init__(self) -> None: ... + +mr_: mr_class + +def ndenumerate(a, compressed=...): ... +def flatnotmasked_edges(a): ... +def notmasked_edges(a, axis=...): ... +def flatnotmasked_contiguous(a): ... +def notmasked_contiguous(a, axis=...): ... +def clump_unmasked(a): ... +def clump_masked(a): ... +def vander(x, n=...): ... +def polyfit(x, y, deg, rcond=..., full=..., w=..., cov=...): ... + +# +def mask_rowcols(a: Incomplete, axis: Incomplete | None = None) -> MaskedArray[Incomplete, np.dtype[Incomplete]]: ... diff --git a/python/numpy/ma/mrecords.py b/python/numpy/ma/mrecords.py new file mode 100644 index 000000000..835f3ce5b --- /dev/null +++ b/python/numpy/ma/mrecords.py @@ -0,0 +1,773 @@ +""":mod:`numpy.ma..mrecords` + +Defines the equivalent of :class:`numpy.recarrays` for masked arrays, +where fields can be accessed as attributes. +Note that :class:`numpy.ma.MaskedArray` already supports structured datatypes +and the masking of individual fields. + +.. moduleauthor:: Pierre Gerard-Marchant + +""" +# We should make sure that no field is called '_mask','mask','_fieldmask', +# or whatever restricted keywords. An idea would be to no bother in the +# first place, and then rename the invalid fields with a trailing +# underscore. Maybe we could just overload the parser function ? + +import warnings + +import numpy as np +import numpy.ma as ma + +_byteorderconv = np._core.records._byteorderconv + + +_check_fill_value = ma.core._check_fill_value + + +__all__ = [ + 'MaskedRecords', 'mrecarray', 'fromarrays', 'fromrecords', + 'fromtextfile', 'addfield', +] + +reserved_fields = ['_data', '_mask', '_fieldmask', 'dtype'] + + +def _checknames(descr, names=None): + """ + Checks that field names ``descr`` are not reserved keywords. + + If this is the case, a default 'f%i' is substituted. If the argument + `names` is not None, updates the field names to valid names. + + """ + ndescr = len(descr) + default_names = [f'f{i}' for i in range(ndescr)] + if names is None: + new_names = default_names + else: + if isinstance(names, (tuple, list)): + new_names = names + elif isinstance(names, str): + new_names = names.split(',') + else: + raise NameError(f'illegal input names {names!r}') + nnames = len(new_names) + if nnames < ndescr: + new_names += default_names[nnames:] + ndescr = [] + for (n, d, t) in zip(new_names, default_names, descr.descr): + if n in reserved_fields: + if t[0] in reserved_fields: + ndescr.append((d, t[1])) + else: + ndescr.append(t) + else: + ndescr.append((n, t[1])) + return np.dtype(ndescr) + + +def _get_fieldmask(self): + mdescr = [(n, '|b1') for n in self.dtype.names] + fdmask = np.empty(self.shape, dtype=mdescr) + fdmask.flat = tuple([False] * len(mdescr)) + return fdmask + + +class MaskedRecords(ma.MaskedArray): + """ + + Attributes + ---------- + _data : recarray + Underlying data, as a record array. + _mask : boolean array + Mask of the records. A record is masked when all its fields are + masked. + _fieldmask : boolean recarray + Record array of booleans, setting the mask of each individual field + of each record. + _fill_value : record + Filling values for each field. + + """ + + def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None, + formats=None, names=None, titles=None, + byteorder=None, aligned=False, + mask=ma.nomask, hard_mask=False, fill_value=None, keep_mask=True, + copy=False, + **options): + + self = np.recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset, + strides=strides, formats=formats, names=names, + titles=titles, byteorder=byteorder, + aligned=aligned,) + + mdtype = ma.make_mask_descr(self.dtype) + if mask is ma.nomask or not np.size(mask): + if not keep_mask: + self._mask = tuple([False] * len(mdtype)) + else: + mask = np.array(mask, copy=copy) + if mask.shape != self.shape: + (nd, nm) = (self.size, mask.size) + if nm == 1: + mask = np.resize(mask, self.shape) + elif nm == nd: + mask = np.reshape(mask, self.shape) + else: + msg = (f"Mask and data not compatible: data size is {nd}," + " mask size is {nm}.") + raise ma.MAError(msg) + if not keep_mask: + self.__setmask__(mask) + self._sharedmask = True + else: + if mask.dtype == mdtype: + _mask = mask + else: + _mask = np.array([tuple([m] * len(mdtype)) for m in mask], + dtype=mdtype) + self._mask = _mask + return self + + def __array_finalize__(self, obj): + # Make sure we have a _fieldmask by default + _mask = getattr(obj, '_mask', None) + if _mask is None: + objmask = getattr(obj, '_mask', ma.nomask) + _dtype = np.ndarray.__getattribute__(self, 'dtype') + if objmask is ma.nomask: + _mask = ma.make_mask_none(self.shape, dtype=_dtype) + else: + mdescr = ma.make_mask_descr(_dtype) + _mask = np.array([tuple([m] * len(mdescr)) for m in objmask], + dtype=mdescr).view(np.recarray) + # Update some of the attributes + _dict = self.__dict__ + _dict.update(_mask=_mask) + self._update_from(obj) + if _dict['_baseclass'] == np.ndarray: + _dict['_baseclass'] = np.recarray + + @property + def _data(self): + """ + Returns the data as a recarray. + + """ + return np.ndarray.view(self, np.recarray) + + @property + def _fieldmask(self): + """ + Alias to mask. + + """ + return self._mask + + def __len__(self): + """ + Returns the length + + """ + # We have more than one record + if self.ndim: + return len(self._data) + # We have only one record: return the nb of fields + return len(self.dtype) + + def __getattribute__(self, attr): + try: + return object.__getattribute__(self, attr) + except AttributeError: + # attr must be a fieldname + pass + fielddict = np.ndarray.__getattribute__(self, 'dtype').fields + try: + res = fielddict[attr][:2] + except (TypeError, KeyError) as e: + raise AttributeError( + f'record array has no attribute {attr}') from e + # So far, so good + _localdict = np.ndarray.__getattribute__(self, '__dict__') + _data = np.ndarray.view(self, _localdict['_baseclass']) + obj = _data.getfield(*res) + if obj.dtype.names is not None: + raise NotImplementedError("MaskedRecords is currently limited to" + "simple records.") + # Get some special attributes + # Reset the object's mask + hasmasked = False + _mask = _localdict.get('_mask', None) + if _mask is not None: + try: + _mask = _mask[attr] + except IndexError: + # Couldn't find a mask: use the default (nomask) + pass + tp_len = len(_mask.dtype) + hasmasked = _mask.view((bool, ((tp_len,) if tp_len else ()))).any() + if (obj.shape or hasmasked): + obj = obj.view(ma.MaskedArray) + obj._baseclass = np.ndarray + obj._isfield = True + obj._mask = _mask + # Reset the field values + _fill_value = _localdict.get('_fill_value', None) + if _fill_value is not None: + try: + obj._fill_value = _fill_value[attr] + except ValueError: + obj._fill_value = None + else: + obj = obj.item() + return obj + + def __setattr__(self, attr, val): + """ + Sets the attribute attr to the value val. + + """ + # Should we call __setmask__ first ? + if attr in ['mask', 'fieldmask']: + self.__setmask__(val) + return + # Create a shortcut (so that we don't have to call getattr all the time) + _localdict = object.__getattribute__(self, '__dict__') + # Check whether we're creating a new field + newattr = attr not in _localdict + try: + # Is attr a generic attribute ? + ret = object.__setattr__(self, attr, val) + except Exception: + # Not a generic attribute: exit if it's not a valid field + fielddict = np.ndarray.__getattribute__(self, 'dtype').fields or {} + optinfo = np.ndarray.__getattribute__(self, '_optinfo') or {} + if not (attr in fielddict or attr in optinfo): + raise + else: + # Get the list of names + fielddict = np.ndarray.__getattribute__(self, 'dtype').fields or {} + # Check the attribute + if attr not in fielddict: + return ret + if newattr: + # We just added this one or this setattr worked on an + # internal attribute. + try: + object.__delattr__(self, attr) + except Exception: + return ret + # Let's try to set the field + try: + res = fielddict[attr][:2] + except (TypeError, KeyError) as e: + raise AttributeError( + f'record array has no attribute {attr}') from e + + if val is ma.masked: + _fill_value = _localdict['_fill_value'] + if _fill_value is not None: + dval = _localdict['_fill_value'][attr] + else: + dval = val + mval = True + else: + dval = ma.filled(val) + mval = ma.getmaskarray(val) + obj = np.ndarray.__getattribute__(self, '_data').setfield(dval, *res) + _localdict['_mask'].__setitem__(attr, mval) + return obj + + def __getitem__(self, indx): + """ + Returns all the fields sharing the same fieldname base. + + The fieldname base is either `_data` or `_mask`. + + """ + _localdict = self.__dict__ + _mask = np.ndarray.__getattribute__(self, '_mask') + _data = np.ndarray.view(self, _localdict['_baseclass']) + # We want a field + if isinstance(indx, str): + # Make sure _sharedmask is True to propagate back to _fieldmask + # Don't use _set_mask, there are some copies being made that + # break propagation Don't force the mask to nomask, that wreaks + # easy masking + obj = _data[indx].view(ma.MaskedArray) + obj._mask = _mask[indx] + obj._sharedmask = True + fval = _localdict['_fill_value'] + if fval is not None: + obj._fill_value = fval[indx] + # Force to masked if the mask is True + if not obj.ndim and obj._mask: + return ma.masked + return obj + # We want some elements. + # First, the data. + obj = np.asarray(_data[indx]).view(mrecarray) + obj._mask = np.asarray(_mask[indx]).view(np.recarray) + return obj + + def __setitem__(self, indx, value): + """ + Sets the given record to value. + + """ + ma.MaskedArray.__setitem__(self, indx, value) + if isinstance(indx, str): + self._mask[indx] = ma.getmaskarray(value) + + def __str__(self): + """ + Calculates the string representation. + + """ + if self.size > 1: + mstr = [f"({','.join([str(i) for i in s])})" + for s in zip(*[getattr(self, f) for f in self.dtype.names])] + return f"[{', '.join(mstr)}]" + else: + mstr = [f"{','.join([str(i) for i in s])}" + for s in zip([getattr(self, f) for f in self.dtype.names])] + return f"({', '.join(mstr)})" + + def __repr__(self): + """ + Calculates the repr representation. + + """ + _names = self.dtype.names + fmt = f"%{max(len(n) for n in _names) + 4}s : %s" + reprstr = [fmt % (f, getattr(self, f)) for f in self.dtype.names] + reprstr.insert(0, 'masked_records(') + reprstr.extend([fmt % (' fill_value', self.fill_value), + ' )']) + return str("\n".join(reprstr)) + + def view(self, dtype=None, type=None): + """ + Returns a view of the mrecarray. + + """ + # OK, basic copy-paste from MaskedArray.view. + if dtype is None: + if type is None: + output = np.ndarray.view(self) + else: + output = np.ndarray.view(self, type) + # Here again. + elif type is None: + try: + if issubclass(dtype, np.ndarray): + output = np.ndarray.view(self, dtype) + else: + output = np.ndarray.view(self, dtype) + # OK, there's the change + except TypeError: + dtype = np.dtype(dtype) + # we need to revert to MaskedArray, but keeping the possibility + # of subclasses (eg, TimeSeriesRecords), so we'll force a type + # set to the first parent + if dtype.fields is None: + basetype = self.__class__.__bases__[0] + output = self.__array__().view(dtype, basetype) + output._update_from(self) + else: + output = np.ndarray.view(self, dtype) + output._fill_value = None + else: + output = np.ndarray.view(self, dtype, type) + # Update the mask, just like in MaskedArray.view + if (getattr(output, '_mask', ma.nomask) is not ma.nomask): + mdtype = ma.make_mask_descr(output.dtype) + output._mask = self._mask.view(mdtype, np.ndarray) + output._mask.shape = output.shape + return output + + def harden_mask(self): + """ + Forces the mask to hard. + + """ + self._hardmask = True + + def soften_mask(self): + """ + Forces the mask to soft + + """ + self._hardmask = False + + def copy(self): + """ + Returns a copy of the masked record. + + """ + copied = self._data.copy().view(type(self)) + copied._mask = self._mask.copy() + return copied + + def tolist(self, fill_value=None): + """ + Return the data portion of the array as a list. + + Data items are converted to the nearest compatible Python type. + Masked values are converted to fill_value. If fill_value is None, + the corresponding entries in the output list will be ``None``. + + """ + if fill_value is not None: + return self.filled(fill_value).tolist() + result = np.array(self.filled().tolist(), dtype=object) + mask = np.array(self._mask.tolist()) + result[mask] = None + return result.tolist() + + def __getstate__(self): + """Return the internal state of the masked array. + + This is for pickling. + + """ + state = (1, + self.shape, + self.dtype, + self.flags.fnc, + self._data.tobytes(), + self._mask.tobytes(), + self._fill_value, + ) + return state + + def __setstate__(self, state): + """ + Restore the internal state of the masked array. + + This is for pickling. ``state`` is typically the output of the + ``__getstate__`` output, and is a 5-tuple: + + - class name + - a tuple giving the shape of the data + - a typecode for the data + - a binary string for the data + - a binary string for the mask. + + """ + (ver, shp, typ, isf, raw, msk, flv) = state + np.ndarray.__setstate__(self, (shp, typ, isf, raw)) + mdtype = np.dtype([(k, np.bool) for (k, _) in self.dtype.descr]) + self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk)) + self.fill_value = flv + + def __reduce__(self): + """ + Return a 3-tuple for pickling a MaskedArray. + + """ + return (_mrreconstruct, + (self.__class__, self._baseclass, (0,), 'b',), + self.__getstate__()) + + +def _mrreconstruct(subtype, baseclass, baseshape, basetype,): + """ + Build a new MaskedArray from the information stored in a pickle. + + """ + _data = np.ndarray.__new__(baseclass, baseshape, basetype).view(subtype) + _mask = np.ndarray.__new__(np.ndarray, baseshape, 'b1') + return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,) + + +mrecarray = MaskedRecords + + +############################################################################### +# Constructors # +############################################################################### + + +def fromarrays(arraylist, dtype=None, shape=None, formats=None, + names=None, titles=None, aligned=False, byteorder=None, + fill_value=None): + """ + Creates a mrecarray from a (flat) list of masked arrays. + + Parameters + ---------- + arraylist : sequence + A list of (masked) arrays. Each element of the sequence is first converted + to a masked array if needed. If a 2D array is passed as argument, it is + processed line by line + dtype : {None, dtype}, optional + Data type descriptor. + shape : {None, integer}, optional + Number of records. If None, shape is defined from the shape of the + first array in the list. + formats : {None, sequence}, optional + Sequence of formats for each individual field. If None, the formats will + be autodetected by inspecting the fields and selecting the highest dtype + possible. + names : {None, sequence}, optional + Sequence of the names of each field. + fill_value : {None, sequence}, optional + Sequence of data to be used as filling values. + + Notes + ----- + Lists of tuples should be preferred over lists of lists for faster processing. + + """ + datalist = [ma.getdata(x) for x in arraylist] + masklist = [np.atleast_1d(ma.getmaskarray(x)) for x in arraylist] + _array = np.rec.fromarrays(datalist, + dtype=dtype, shape=shape, formats=formats, + names=names, titles=titles, aligned=aligned, + byteorder=byteorder).view(mrecarray) + _array._mask.flat = list(zip(*masklist)) + if fill_value is not None: + _array.fill_value = fill_value + return _array + + +def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None, + titles=None, aligned=False, byteorder=None, + fill_value=None, mask=ma.nomask): + """ + Creates a MaskedRecords from a list of records. + + Parameters + ---------- + reclist : sequence + A list of records. Each element of the sequence is first converted + to a masked array if needed. If a 2D array is passed as argument, it is + processed line by line + dtype : {None, dtype}, optional + Data type descriptor. + shape : {None,int}, optional + Number of records. If None, ``shape`` is defined from the shape of the + first array in the list. + formats : {None, sequence}, optional + Sequence of formats for each individual field. If None, the formats will + be autodetected by inspecting the fields and selecting the highest dtype + possible. + names : {None, sequence}, optional + Sequence of the names of each field. + fill_value : {None, sequence}, optional + Sequence of data to be used as filling values. + mask : {nomask, sequence}, optional. + External mask to apply on the data. + + Notes + ----- + Lists of tuples should be preferred over lists of lists for faster processing. + + """ + # Grab the initial _fieldmask, if needed: + _mask = getattr(reclist, '_mask', None) + # Get the list of records. + if isinstance(reclist, np.ndarray): + # Make sure we don't have some hidden mask + if isinstance(reclist, ma.MaskedArray): + reclist = reclist.filled().view(np.ndarray) + # Grab the initial dtype, just in case + if dtype is None: + dtype = reclist.dtype + reclist = reclist.tolist() + mrec = np.rec.fromrecords(reclist, dtype=dtype, shape=shape, formats=formats, + names=names, titles=titles, + aligned=aligned, byteorder=byteorder).view(mrecarray) + # Set the fill_value if needed + if fill_value is not None: + mrec.fill_value = fill_value + # Now, let's deal w/ the mask + if mask is not ma.nomask: + mask = np.asarray(mask) + maskrecordlength = len(mask.dtype) + if maskrecordlength: + mrec._mask.flat = mask + elif mask.ndim == 2: + mrec._mask.flat = [tuple(m) for m in mask] + else: + mrec.__setmask__(mask) + if _mask is not None: + mrec._mask[:] = _mask + return mrec + + +def _guessvartypes(arr): + """ + Tries to guess the dtypes of the str_ ndarray `arr`. + + Guesses by testing element-wise conversion. Returns a list of dtypes. + The array is first converted to ndarray. If the array is 2D, the test + is performed on the first line. An exception is raised if the file is + 3D or more. + + """ + vartypes = [] + arr = np.asarray(arr) + if arr.ndim == 2: + arr = arr[0] + elif arr.ndim > 2: + raise ValueError("The array should be 2D at most!") + # Start the conversion loop. + for f in arr: + try: + int(f) + except (ValueError, TypeError): + try: + float(f) + except (ValueError, TypeError): + try: + complex(f) + except (ValueError, TypeError): + vartypes.append(arr.dtype) + else: + vartypes.append(np.dtype(complex)) + else: + vartypes.append(np.dtype(float)) + else: + vartypes.append(np.dtype(int)) + return vartypes + + +def openfile(fname): + """ + Opens the file handle of file `fname`. + + """ + # A file handle + if hasattr(fname, 'readline'): + return fname + # Try to open the file and guess its type + try: + f = open(fname) + except FileNotFoundError as e: + raise FileNotFoundError(f"No such file: '{fname}'") from e + if f.readline()[:2] != "\\x": + f.seek(0, 0) + return f + f.close() + raise NotImplementedError("Wow, binary file") + + +def fromtextfile(fname, delimiter=None, commentchar='#', missingchar='', + varnames=None, vartypes=None, + *, delimitor=np._NoValue): # backwards compatibility + """ + Creates a mrecarray from data stored in the file `filename`. + + Parameters + ---------- + fname : {file name/handle} + Handle of an opened file. + delimiter : {None, string}, optional + Alphanumeric character used to separate columns in the file. + If None, any (group of) white spacestring(s) will be used. + commentchar : {'#', string}, optional + Alphanumeric character used to mark the start of a comment. + missingchar : {'', string}, optional + String indicating missing data, and used to create the masks. + varnames : {None, sequence}, optional + Sequence of the variable names. If None, a list will be created from + the first non empty line of the file. + vartypes : {None, sequence}, optional + Sequence of the variables dtypes. If None, it will be estimated from + the first non-commented line. + + + Ultra simple: the varnames are in the header, one line""" + if delimitor is not np._NoValue: + if delimiter is not None: + raise TypeError("fromtextfile() got multiple values for argument " + "'delimiter'") + # NumPy 1.22.0, 2021-09-23 + warnings.warn("The 'delimitor' keyword argument of " + "numpy.ma.mrecords.fromtextfile() is deprecated " + "since NumPy 1.22.0, use 'delimiter' instead.", + DeprecationWarning, stacklevel=2) + delimiter = delimitor + + # Try to open the file. + ftext = openfile(fname) + + # Get the first non-empty line as the varnames + while True: + line = ftext.readline() + firstline = line[:line.find(commentchar)].strip() + _varnames = firstline.split(delimiter) + if len(_varnames) > 1: + break + if varnames is None: + varnames = _varnames + + # Get the data. + _variables = ma.masked_array([line.strip().split(delimiter) for line in ftext + if line[0] != commentchar and len(line) > 1]) + (_, nfields) = _variables.shape + ftext.close() + + # Try to guess the dtype. + if vartypes is None: + vartypes = _guessvartypes(_variables[0]) + else: + vartypes = [np.dtype(v) for v in vartypes] + if len(vartypes) != nfields: + msg = f"Attempting to {len(vartypes)} dtypes for {nfields} fields!" + msg += " Reverting to default." + warnings.warn(msg, stacklevel=2) + vartypes = _guessvartypes(_variables[0]) + + # Construct the descriptor. + mdescr = list(zip(varnames, vartypes)) + mfillv = [ma.default_fill_value(f) for f in vartypes] + + # Get the data and the mask. + # We just need a list of masked_arrays. It's easier to create it like that: + _mask = (_variables.T == missingchar) + _datalist = [ma.masked_array(a, mask=m, dtype=t, fill_value=f) + for (a, m, t, f) in zip(_variables.T, _mask, vartypes, mfillv)] + + return fromarrays(_datalist, dtype=mdescr) + + +def addfield(mrecord, newfield, newfieldname=None): + """Adds a new field to the masked record array + + Uses `newfield` as data and `newfieldname` as name. If `newfieldname` + is None, the new field name is set to 'fi', where `i` is the number of + existing fields. + + """ + _data = mrecord._data + _mask = mrecord._mask + if newfieldname is None or newfieldname in reserved_fields: + newfieldname = f'f{len(_data.dtype)}' + newfield = ma.array(newfield) + # Get the new data. + # Create a new empty recarray + newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)]) + newdata = np.recarray(_data.shape, newdtype) + # Add the existing field + [newdata.setfield(_data.getfield(*f), *f) + for f in _data.dtype.fields.values()] + # Add the new field + newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname]) + newdata = newdata.view(MaskedRecords) + # Get the new mask + # Create a new empty recarray + newmdtype = np.dtype([(n, np.bool) for n in newdtype.names]) + newmask = np.recarray(_data.shape, newmdtype) + # Add the old masks + [newmask.setfield(_mask.getfield(*f), *f) + for f in _mask.dtype.fields.values()] + # Add the mask of the new field + newmask.setfield(ma.getmaskarray(newfield), + *newmask.dtype.fields[newfieldname]) + newdata._mask = newmask + return newdata diff --git a/python/numpy/ma/mrecords.pyi b/python/numpy/ma/mrecords.pyi new file mode 100644 index 000000000..cae687aa7 --- /dev/null +++ b/python/numpy/ma/mrecords.pyi @@ -0,0 +1,96 @@ +from typing import Any, TypeVar + +from numpy import dtype + +from . import MaskedArray + +__all__ = [ + "MaskedRecords", + "mrecarray", + "fromarrays", + "fromrecords", + "fromtextfile", + "addfield", +] + +_ShapeT_co = TypeVar("_ShapeT_co", covariant=True, bound=tuple[int, ...]) +_DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, covariant=True) + +class MaskedRecords(MaskedArray[_ShapeT_co, _DTypeT_co]): + def __new__( + cls, + shape, + dtype=..., + buf=..., + offset=..., + strides=..., + formats=..., + names=..., + titles=..., + byteorder=..., + aligned=..., + mask=..., + hard_mask=..., + fill_value=..., + keep_mask=..., + copy=..., + **options, + ): ... + _mask: Any + _fill_value: Any + @property + def _data(self): ... + @property + def _fieldmask(self): ... + def __array_finalize__(self, obj): ... + def __len__(self): ... + def __getattribute__(self, attr): ... + def __setattr__(self, attr, val): ... + def __getitem__(self, indx): ... + def __setitem__(self, indx, value): ... + def view(self, dtype=..., type=...): ... + def harden_mask(self): ... + def soften_mask(self): ... + def copy(self): ... + def tolist(self, fill_value=...): ... + def __reduce__(self): ... + +mrecarray = MaskedRecords + +def fromarrays( + arraylist, + dtype=..., + shape=..., + formats=..., + names=..., + titles=..., + aligned=..., + byteorder=..., + fill_value=..., +): ... + +def fromrecords( + reclist, + dtype=..., + shape=..., + formats=..., + names=..., + titles=..., + aligned=..., + byteorder=..., + fill_value=..., + mask=..., +): ... + +def fromtextfile( + fname, + delimiter=..., + commentchar=..., + missingchar=..., + varnames=..., + vartypes=..., + # NOTE: deprecated: NumPy 1.22.0, 2021-09-23 + # delimitor=..., +): ... + +def addfield(mrecord, newfield, newfieldname=...): ... diff --git a/python/numpy/ma/tests/__init__.py b/python/numpy/ma/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/python/numpy/ma/tests/__pycache__/__init__.cpython-312.pyc b/python/numpy/ma/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..fd36a86e3 Binary files /dev/null and b/python/numpy/ma/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/ma/tests/__pycache__/test_arrayobject.cpython-312.pyc b/python/numpy/ma/tests/__pycache__/test_arrayobject.cpython-312.pyc new file mode 100644 index 000000000..cfb334ac5 Binary files /dev/null and b/python/numpy/ma/tests/__pycache__/test_arrayobject.cpython-312.pyc differ diff --git a/python/numpy/ma/tests/__pycache__/test_core.cpython-312.pyc b/python/numpy/ma/tests/__pycache__/test_core.cpython-312.pyc new file mode 100644 index 000000000..e19f17151 Binary files /dev/null and b/python/numpy/ma/tests/__pycache__/test_core.cpython-312.pyc differ diff --git a/python/numpy/ma/tests/__pycache__/test_deprecations.cpython-312.pyc b/python/numpy/ma/tests/__pycache__/test_deprecations.cpython-312.pyc new file mode 100644 index 000000000..1d24c3e71 Binary files /dev/null and b/python/numpy/ma/tests/__pycache__/test_deprecations.cpython-312.pyc differ diff --git a/python/numpy/ma/tests/__pycache__/test_extras.cpython-312.pyc b/python/numpy/ma/tests/__pycache__/test_extras.cpython-312.pyc new file mode 100644 index 000000000..74b8ee7d7 Binary files /dev/null and b/python/numpy/ma/tests/__pycache__/test_extras.cpython-312.pyc differ diff --git a/python/numpy/ma/tests/__pycache__/test_mrecords.cpython-312.pyc b/python/numpy/ma/tests/__pycache__/test_mrecords.cpython-312.pyc new file mode 100644 index 000000000..be049d20a Binary files /dev/null and b/python/numpy/ma/tests/__pycache__/test_mrecords.cpython-312.pyc differ diff --git a/python/numpy/ma/tests/__pycache__/test_old_ma.cpython-312.pyc b/python/numpy/ma/tests/__pycache__/test_old_ma.cpython-312.pyc new file mode 100644 index 000000000..ed02ed249 Binary files /dev/null and b/python/numpy/ma/tests/__pycache__/test_old_ma.cpython-312.pyc differ diff --git a/python/numpy/ma/tests/__pycache__/test_regression.cpython-312.pyc b/python/numpy/ma/tests/__pycache__/test_regression.cpython-312.pyc new file mode 100644 index 000000000..69c84c9c6 Binary files /dev/null and b/python/numpy/ma/tests/__pycache__/test_regression.cpython-312.pyc differ diff --git a/python/numpy/ma/tests/__pycache__/test_subclassing.cpython-312.pyc b/python/numpy/ma/tests/__pycache__/test_subclassing.cpython-312.pyc new file mode 100644 index 000000000..22e155f30 Binary files /dev/null and b/python/numpy/ma/tests/__pycache__/test_subclassing.cpython-312.pyc differ diff --git a/python/numpy/ma/tests/test_arrayobject.py b/python/numpy/ma/tests/test_arrayobject.py new file mode 100644 index 000000000..2000cea22 --- /dev/null +++ b/python/numpy/ma/tests/test_arrayobject.py @@ -0,0 +1,40 @@ +import pytest + +import numpy as np +from numpy.ma import masked_array +from numpy.testing import assert_array_equal + + +def test_matrix_transpose_raises_error_for_1d(): + msg = "matrix transpose with ndim < 2 is undefined" + ma_arr = masked_array(data=[1, 2, 3, 4, 5, 6], + mask=[1, 0, 1, 1, 1, 0]) + with pytest.raises(ValueError, match=msg): + ma_arr.mT + + +def test_matrix_transpose_equals_transpose_2d(): + ma_arr = masked_array(data=[[1, 2, 3], [4, 5, 6]], + mask=[[1, 0, 1], [1, 1, 0]]) + assert_array_equal(ma_arr.T, ma_arr.mT) + + +ARRAY_SHAPES_TO_TEST = ( + (5, 2), + (5, 2, 3), + (5, 2, 3, 4), +) + + +@pytest.mark.parametrize("shape", ARRAY_SHAPES_TO_TEST) +def test_matrix_transpose_equals_swapaxes(shape): + num_of_axes = len(shape) + vec = np.arange(shape[-1]) + arr = np.broadcast_to(vec, shape) + + rng = np.random.default_rng(42) + mask = rng.choice([0, 1], size=shape) + ma_arr = masked_array(data=arr, mask=mask) + + tgt = np.swapaxes(arr, num_of_axes - 2, num_of_axes - 1) + assert_array_equal(tgt, ma_arr.mT) diff --git a/python/numpy/ma/tests/test_core.py b/python/numpy/ma/tests/test_core.py new file mode 100644 index 000000000..091ba6c99 --- /dev/null +++ b/python/numpy/ma/tests/test_core.py @@ -0,0 +1,5886 @@ +"""Tests suite for MaskedArray & subclassing. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu +""" +__author__ = "Pierre GF Gerard-Marchant" + +import copy +import itertools +import operator +import pickle +import sys +import textwrap +import warnings +from functools import reduce + +import pytest + +import numpy as np +import numpy._core.fromnumeric as fromnumeric +import numpy._core.umath as umath +import numpy.ma.core +from numpy import ndarray +from numpy._utils import asbytes +from numpy.exceptions import AxisError +from numpy.ma.core import ( + MAError, + MaskedArray, + MaskError, + MaskType, + abs, + absolute, + add, + all, + allclose, + allequal, + alltrue, + angle, + anom, + arange, + arccos, + arccosh, + arcsin, + arctan, + arctan2, + argsort, + array, + asarray, + choose, + concatenate, + conjugate, + cos, + cosh, + count, + default_fill_value, + diag, + divide, + empty, + empty_like, + equal, + exp, + filled, + fix_invalid, + flatten_mask, + flatten_structured_array, + fromflex, + getmask, + getmaskarray, + greater, + greater_equal, + identity, + inner, + isMaskedArray, + less, + less_equal, + log, + log10, + make_mask, + make_mask_descr, + mask_or, + masked, + masked_array, + masked_equal, + masked_greater, + masked_greater_equal, + masked_inside, + masked_less, + masked_less_equal, + masked_not_equal, + masked_outside, + masked_print_option, + masked_values, + masked_where, + max, + maximum, + maximum_fill_value, + min, + minimum, + minimum_fill_value, + mod, + multiply, + mvoid, + nomask, + not_equal, + ones, + ones_like, + outer, + power, + product, + put, + putmask, + ravel, + repeat, + reshape, + resize, + shape, + sin, + sinh, + sometrue, + sort, + sqrt, + subtract, + sum, + take, + tan, + tanh, + transpose, + where, + zeros, + zeros_like, +) +from numpy.ma.testutils import ( + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_equal_records, + assert_mask_equal, + assert_not_equal, + fail_if_equal, +) +from numpy.testing import ( + IS_WASM, + assert_raises, + assert_warns, + suppress_warnings, + temppath, +) +from numpy.testing._private.utils import requires_memory + +pi = np.pi + + +suppress_copy_mask_on_assignment = suppress_warnings() +suppress_copy_mask_on_assignment.filter( + numpy.ma.core.MaskedArrayFutureWarning, + "setting an item on a masked array which has a shared mask will not copy") + + +# For parametrized numeric testing +num_dts = [np.dtype(dt_) for dt_ in '?bhilqBHILQefdgFD'] +num_ids = [dt_.char for dt_ in num_dts] + + +class TestMaskedArray: + # Base test class for MaskedArrays. + + def setup_method(self): + # Base data definition. + x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + a10 = 10. + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + z = np.array([-.5, 0., .5, .8]) + zm = masked_array(z, mask=[0, 1, 0, 0]) + xf = np.where(m1, 1e+20, x) + xm.set_fill_value(1e+20) + self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf) + + def test_basicattributes(self): + # Tests some basic array attributes. + a = array([1, 3, 2]) + b = array([1, 3, 2], mask=[1, 0, 1]) + assert_equal(a.ndim, 1) + assert_equal(b.ndim, 1) + assert_equal(a.size, 3) + assert_equal(b.size, 3) + assert_equal(a.shape, (3,)) + assert_equal(b.shape, (3,)) + + def test_basic0d(self): + # Checks masking a scalar + x = masked_array(0) + assert_equal(str(x), '0') + x = masked_array(0, mask=True) + assert_equal(str(x), str(masked_print_option)) + x = masked_array(0, mask=False) + assert_equal(str(x), '0') + x = array(0, mask=1) + assert_(x.filled().dtype is x._data.dtype) + + def test_basic1d(self): + # Test of basic array creation and properties in 1 dimension. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + assert_(not isMaskedArray(x)) + assert_(isMaskedArray(xm)) + assert_((xm - ym).filled(0).any()) + fail_if_equal(xm.mask.astype(int), ym.mask.astype(int)) + s = x.shape + assert_equal(np.shape(xm), s) + assert_equal(xm.shape, s) + assert_equal(xm.dtype, x.dtype) + assert_equal(zm.dtype, z.dtype) + assert_equal(xm.size, reduce(lambda x, y: x * y, s)) + assert_equal(count(xm), len(m1) - reduce(lambda x, y: x + y, m1)) + assert_array_equal(xm, xf) + assert_array_equal(filled(xm, 1.e20), xf) + assert_array_equal(x, xm) + + def test_basic2d(self): + # Test of basic array creation and properties in 2 dimensions. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + for s in [(4, 3), (6, 2)]: + x.shape = s + y.shape = s + xm.shape = s + ym.shape = s + xf.shape = s + + assert_(not isMaskedArray(x)) + assert_(isMaskedArray(xm)) + assert_equal(shape(xm), s) + assert_equal(xm.shape, s) + assert_equal(xm.size, reduce(lambda x, y: x * y, s)) + assert_equal(count(xm), len(m1) - reduce(lambda x, y: x + y, m1)) + assert_equal(xm, xf) + assert_equal(filled(xm, 1.e20), xf) + assert_equal(x, xm) + + def test_concatenate_basic(self): + # Tests concatenations. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + # basic concatenation + assert_equal(np.concatenate((x, y)), concatenate((xm, ym))) + assert_equal(np.concatenate((x, y)), concatenate((x, y))) + assert_equal(np.concatenate((x, y)), concatenate((xm, y))) + assert_equal(np.concatenate((x, y, x)), concatenate((x, ym, x))) + + def test_concatenate_alongaxis(self): + # Tests concatenations. + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + # Concatenation along an axis + s = (3, 4) + x.shape = y.shape = xm.shape = ym.shape = s + assert_equal(xm.mask, np.reshape(m1, s)) + assert_equal(ym.mask, np.reshape(m2, s)) + xmym = concatenate((xm, ym), 1) + assert_equal(np.concatenate((x, y), 1), xmym) + assert_equal(np.concatenate((xm.mask, ym.mask), 1), xmym._mask) + + x = zeros(2) + y = array(ones(2), mask=[False, True]) + z = concatenate((x, y)) + assert_array_equal(z, [0, 0, 1, 1]) + assert_array_equal(z.mask, [False, False, False, True]) + z = concatenate((y, x)) + assert_array_equal(z, [1, 1, 0, 0]) + assert_array_equal(z.mask, [False, True, False, False]) + + def test_concatenate_flexible(self): + # Tests the concatenation on flexible arrays. + data = masked_array(list(zip(np.random.rand(10), + np.arange(10))), + dtype=[('a', float), ('b', int)]) + + test = concatenate([data[:5], data[5:]]) + assert_equal_records(test, data) + + def test_creation_ndmin(self): + # Check the use of ndmin + x = array([1, 2, 3], mask=[1, 0, 0], ndmin=2) + assert_equal(x.shape, (1, 3)) + assert_equal(x._data, [[1, 2, 3]]) + assert_equal(x._mask, [[1, 0, 0]]) + + def test_creation_ndmin_from_maskedarray(self): + # Make sure we're not losing the original mask w/ ndmin + x = array([1, 2, 3]) + x[-1] = masked + xx = array(x, ndmin=2, dtype=float) + assert_equal(x.shape, x._mask.shape) + assert_equal(xx.shape, xx._mask.shape) + + def test_creation_maskcreation(self): + # Tests how masks are initialized at the creation of Maskedarrays. + data = arange(24, dtype=float) + data[[3, 6, 15]] = masked + dma_1 = MaskedArray(data) + assert_equal(dma_1.mask, data.mask) + dma_2 = MaskedArray(dma_1) + assert_equal(dma_2.mask, dma_1.mask) + dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6) + fail_if_equal(dma_3.mask, dma_1.mask) + + x = array([1, 2, 3], mask=True) + assert_equal(x._mask, [True, True, True]) + x = array([1, 2, 3], mask=False) + assert_equal(x._mask, [False, False, False]) + y = array([1, 2, 3], mask=x._mask, copy=False) + assert_(np.may_share_memory(x.mask, y.mask)) + y = array([1, 2, 3], mask=x._mask, copy=True) + assert_(not np.may_share_memory(x.mask, y.mask)) + x = array([1, 2, 3], mask=None) + assert_equal(x._mask, [False, False, False]) + + def test_masked_singleton_array_creation_warns(self): + # The first works, but should not (ideally), there may be no way + # to solve this, however, as long as `np.ma.masked` is an ndarray. + np.array(np.ma.masked) + with pytest.warns(UserWarning): + # Tries to create a float array, using `float(np.ma.masked)`. + # We may want to define this is invalid behaviour in the future! + # (requiring np.ma.masked to be a known NumPy scalar probably + # with a DType.) + np.array([3., np.ma.masked]) + + def test_creation_with_list_of_maskedarrays(self): + # Tests creating a masked array from a list of masked arrays. + x = array(np.arange(5), mask=[1, 0, 0, 0, 0]) + data = array((x, x[::-1])) + assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) + assert_equal(data._mask, [[1, 0, 0, 0, 0], [0, 0, 0, 0, 1]]) + + x.mask = nomask + data = array((x, x[::-1])) + assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]]) + assert_(data.mask is nomask) + + def test_creation_with_list_of_maskedarrays_no_bool_cast(self): + # Tests the regression in gh-18551 + masked_str = np.ma.masked_array(['a', 'b'], mask=[True, False]) + normal_int = np.arange(2) + res = np.ma.asarray([masked_str, normal_int], dtype="U21") + assert_array_equal(res.mask, [[True, False], [False, False]]) + + # The above only failed due a long chain of oddity, try also with + # an object array that cannot be converted to bool always: + class NotBool: + def __bool__(self): + raise ValueError("not a bool!") + masked_obj = np.ma.masked_array([NotBool(), 'b'], mask=[True, False]) + # Check that the NotBool actually fails like we would expect: + with pytest.raises(ValueError, match="not a bool!"): + np.asarray([masked_obj], dtype=bool) + + res = np.ma.asarray([masked_obj, normal_int]) + assert_array_equal(res.mask, [[True, False], [False, False]]) + + def test_creation_from_ndarray_with_padding(self): + x = np.array([('A', 0)], dtype={'names': ['f0', 'f1'], + 'formats': ['S4', 'i8'], + 'offsets': [0, 8]}) + array(x) # used to fail due to 'V' padding field in x.dtype.descr + + def test_unknown_keyword_parameter(self): + with pytest.raises(TypeError, match="unexpected keyword argument"): + MaskedArray([1, 2, 3], maks=[0, 1, 0]) # `mask` is misspelled. + + def test_asarray(self): + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + xm.fill_value = -9999 + xm._hardmask = True + xmm = asarray(xm) + assert_equal(xmm._data, xm._data) + assert_equal(xmm._mask, xm._mask) + assert_equal(xmm.fill_value, xm.fill_value) + assert_equal(xmm._hardmask, xm._hardmask) + + def test_asarray_default_order(self): + # See Issue #6646 + m = np.eye(3).T + assert_(not m.flags.c_contiguous) + + new_m = asarray(m) + assert_(new_m.flags.c_contiguous) + + def test_asarray_enforce_order(self): + # See Issue #6646 + m = np.eye(3).T + assert_(not m.flags.c_contiguous) + + new_m = asarray(m, order='C') + assert_(new_m.flags.c_contiguous) + + def test_fix_invalid(self): + # Checks fix_invalid. + with np.errstate(invalid='ignore'): + data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1]) + data_fixed = fix_invalid(data) + assert_equal(data_fixed._data, [data.fill_value, 0., 1.]) + assert_equal(data_fixed._mask, [1., 0., 1.]) + + def test_maskedelement(self): + # Test of masked element + x = arange(6) + x[1] = masked + assert_(str(masked) == '--') + assert_(x[1] is masked) + assert_equal(filled(x[1], 0), 0) + + def test_set_element_as_object(self): + # Tests setting elements with object + a = empty(1, dtype=object) + x = (1, 2, 3, 4, 5) + a[0] = x + assert_equal(a[0], x) + assert_(a[0] is x) + + import datetime + dt = datetime.datetime.now() + a[0] = dt + assert_(a[0] is dt) + + def test_indexing(self): + # Tests conversions and indexing + x1 = np.array([1, 2, 4, 3]) + x2 = array(x1, mask=[1, 0, 0, 0]) + x3 = array(x1, mask=[0, 1, 0, 1]) + x4 = array(x1) + # test conversion to strings + str(x2) # raises? + repr(x2) # raises? + assert_equal(np.sort(x1), sort(x2, endwith=False)) + # tests of indexing + assert_(type(x2[1]) is type(x1[1])) + assert_(x1[1] == x2[1]) + assert_(x2[0] is masked) + assert_equal(x1[2], x2[2]) + assert_equal(x1[2:5], x2[2:5]) + assert_equal(x1[:], x2[:]) + assert_equal(x1[1:], x3[1:]) + x1[2] = 9 + x2[2] = 9 + assert_equal(x1, x2) + x1[1:3] = 99 + x2[1:3] = 99 + assert_equal(x1, x2) + x2[1] = masked + assert_equal(x1, x2) + x2[1:3] = masked + assert_equal(x1, x2) + x2[:] = x1 + x2[1] = masked + assert_(allequal(getmask(x2), array([0, 1, 0, 0]))) + x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x3), array([0, 1, 1, 0]))) + x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x4), array([0, 1, 1, 0]))) + assert_(allequal(x4, array([1, 2, 3, 4]))) + x1 = np.arange(5) * 1.0 + x2 = masked_values(x1, 3.0) + assert_equal(x1, x2) + assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask)) + assert_equal(3.0, x2.fill_value) + x1 = array([1, 'hello', 2, 3], object) + x2 = np.array([1, 'hello', 2, 3], object) + s1 = x1[1] + s2 = x2[1] + assert_equal(type(s2), str) + assert_equal(type(s1), str) + assert_equal(s1, s2) + assert_(x1[1:1].shape == (0,)) + + def test_setitem_no_warning(self): + # Setitem shouldn't warn, because the assignment might be masked + # and warning for a masked assignment is weird (see gh-23000) + # (When the value is masked, otherwise a warning would be acceptable + # but is not given currently.) + x = np.ma.arange(60).reshape((6, 10)) + index = (slice(1, 5, 2), [7, 5]) + value = np.ma.masked_all((2, 2)) + value._data[...] = np.inf # not a valid integer... + x[index] = value + # The masked scalar is special cased, but test anyway (it's NaN): + x[...] = np.ma.masked + # Finally, a large value that cannot be cast to the float32 `x` + x = np.ma.arange(3., dtype=np.float32) + value = np.ma.array([2e234, 1, 1], mask=[True, False, False]) + x[...] = value + x[[0, 1, 2]] = value + + @suppress_copy_mask_on_assignment + def test_copy(self): + # Tests of some subtle points of copying and sizing. + n = [0, 0, 1, 0, 0] + m = make_mask(n) + m2 = make_mask(m) + assert_(m is m2) + m3 = make_mask(m, copy=True) + assert_(m is not m3) + + x1 = np.arange(5) + y1 = array(x1, mask=m) + assert_equal(y1._data.__array_interface__, x1.__array_interface__) + assert_(allequal(x1, y1.data)) + assert_equal(y1._mask.__array_interface__, m.__array_interface__) + + y1a = array(y1) + # Default for masked array is not to copy; see gh-10318. + assert_(y1a._data.__array_interface__ == + y1._data.__array_interface__) + assert_(y1a._mask.__array_interface__ == + y1._mask.__array_interface__) + + y2 = array(x1, mask=m3) + assert_(y2._data.__array_interface__ == x1.__array_interface__) + assert_(y2._mask.__array_interface__ == m3.__array_interface__) + assert_(y2[2] is masked) + y2[2] = 9 + assert_(y2[2] is not masked) + assert_(y2._mask.__array_interface__ == m3.__array_interface__) + assert_(allequal(y2.mask, 0)) + + y2a = array(x1, mask=m, copy=1) + assert_(y2a._data.__array_interface__ != x1.__array_interface__) + #assert_( y2a._mask is not m) + assert_(y2a._mask.__array_interface__ != m.__array_interface__) + assert_(y2a[2] is masked) + y2a[2] = 9 + assert_(y2a[2] is not masked) + #assert_( y2a._mask is not m) + assert_(y2a._mask.__array_interface__ != m.__array_interface__) + assert_(allequal(y2a.mask, 0)) + + y3 = array(x1 * 1.0, mask=m) + assert_(filled(y3).dtype is (x1 * 1.0).dtype) + + x4 = arange(4) + x4[2] = masked + y4 = resize(x4, (8,)) + assert_equal(concatenate([x4, x4]), y4) + assert_equal(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]) + y5 = repeat(x4, (2, 2, 2, 2), axis=0) + assert_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3]) + y6 = repeat(x4, 2, axis=0) + assert_equal(y5, y6) + y7 = x4.repeat((2, 2, 2, 2), axis=0) + assert_equal(y5, y7) + y8 = x4.repeat(2, 0) + assert_equal(y5, y8) + + y9 = x4.copy() + assert_equal(y9._data, x4._data) + assert_equal(y9._mask, x4._mask) + + x = masked_array([1, 2, 3], mask=[0, 1, 0]) + # Copy is False by default + y = masked_array(x) + assert_equal(y._data.ctypes.data, x._data.ctypes.data) + assert_equal(y._mask.ctypes.data, x._mask.ctypes.data) + y = masked_array(x, copy=True) + assert_not_equal(y._data.ctypes.data, x._data.ctypes.data) + assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data) + + def test_copy_0d(self): + # gh-9430 + x = np.ma.array(43, mask=True) + xc = x.copy() + assert_equal(xc.mask, True) + + def test_copy_on_python_builtins(self): + # Tests copy works on python builtins (issue#8019) + assert_(isMaskedArray(np.ma.copy([1, 2, 3]))) + assert_(isMaskedArray(np.ma.copy((1, 2, 3)))) + + def test_copy_immutable(self): + # Tests that the copy method is immutable, GitHub issue #5247 + a = np.ma.array([1, 2, 3]) + b = np.ma.array([4, 5, 6]) + a_copy_method = a.copy + b.copy + assert_equal(a_copy_method(), [1, 2, 3]) + + def test_deepcopy(self): + from copy import deepcopy + a = array([0, 1, 2], mask=[False, True, False]) + copied = deepcopy(a) + assert_equal(copied.mask, a.mask) + assert_not_equal(id(a._mask), id(copied._mask)) + + copied[1] = 1 + assert_equal(copied.mask, [0, 0, 0]) + assert_equal(a.mask, [0, 1, 0]) + + copied = deepcopy(a) + assert_equal(copied.mask, a.mask) + copied.mask[1] = False + assert_equal(copied.mask, [0, 0, 0]) + assert_equal(a.mask, [0, 1, 0]) + + def test_format(self): + a = array([0, 1, 2], mask=[False, True, False]) + assert_equal(format(a), "[0 -- 2]") + assert_equal(format(masked), "--") + assert_equal(format(masked, ""), "--") + + # Postponed from PR #15410, perhaps address in the future. + # assert_equal(format(masked, " >5"), " --") + # assert_equal(format(masked, " <5"), "-- ") + + # Expect a FutureWarning for using format_spec with MaskedElement + with assert_warns(FutureWarning): + with_format_string = format(masked, " >5") + assert_equal(with_format_string, "--") + + def test_str_repr(self): + a = array([0, 1, 2], mask=[False, True, False]) + assert_equal(str(a), '[0 -- 2]') + assert_equal( + repr(a), + textwrap.dedent('''\ + masked_array(data=[0, --, 2], + mask=[False, True, False], + fill_value=999999)''') + ) + + # arrays with a continuation + a = np.ma.arange(2000) + a[1:50] = np.ma.masked + assert_equal( + repr(a), + textwrap.dedent('''\ + masked_array(data=[0, --, --, ..., 1997, 1998, 1999], + mask=[False, True, True, ..., False, False, False], + fill_value=999999)''') + ) + + # line-wrapped 1d arrays are correctly aligned + a = np.ma.arange(20) + assert_equal( + repr(a), + textwrap.dedent('''\ + masked_array(data=[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19], + mask=False, + fill_value=999999)''') + ) + + # 2d arrays cause wrapping + a = array([[1, 2, 3], [4, 5, 6]], dtype=np.int8) + a[1, 1] = np.ma.masked + assert_equal( + repr(a), + textwrap.dedent(f'''\ + masked_array( + data=[[1, 2, 3], + [4, --, 6]], + mask=[[False, False, False], + [False, True, False]], + fill_value={np.array(999999)[()]!r}, + dtype=int8)''') + ) + + # but not it they're a row vector + assert_equal( + repr(a[:1]), + textwrap.dedent(f'''\ + masked_array(data=[[1, 2, 3]], + mask=[[False, False, False]], + fill_value={np.array(999999)[()]!r}, + dtype=int8)''') + ) + + # dtype=int is implied, so not shown + assert_equal( + repr(a.astype(int)), + textwrap.dedent('''\ + masked_array( + data=[[1, 2, 3], + [4, --, 6]], + mask=[[False, False, False], + [False, True, False]], + fill_value=999999)''') + ) + + def test_str_repr_legacy(self): + oldopts = np.get_printoptions() + np.set_printoptions(legacy='1.13') + try: + a = array([0, 1, 2], mask=[False, True, False]) + assert_equal(str(a), '[0 -- 2]') + assert_equal(repr(a), 'masked_array(data = [0 -- 2],\n' + ' mask = [False True False],\n' + ' fill_value = 999999)\n') + + a = np.ma.arange(2000) + a[1:50] = np.ma.masked + assert_equal( + repr(a), + 'masked_array(data = [0 -- -- ..., 1997 1998 1999],\n' + ' mask = [False True True ..., False False False],\n' + ' fill_value = 999999)\n' + ) + finally: + np.set_printoptions(**oldopts) + + def test_0d_unicode(self): + u = 'caf\xe9' + utype = type(u) + + arr_nomask = np.ma.array(u) + arr_masked = np.ma.array(u, mask=True) + + assert_equal(utype(arr_nomask), u) + assert_equal(utype(arr_masked), '--') + + def test_pickling(self): + # Tests pickling + for dtype in (int, float, str, object): + a = arange(10).astype(dtype) + a.fill_value = 999 + + masks = ([0, 0, 0, 1, 0, 1, 0, 1, 0, 1], # partially masked + True, # Fully masked + False) # Fully unmasked + + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + for mask in masks: + a.mask = mask + a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled._data, a._data) + if dtype in (object, int): + assert_equal(a_pickled.fill_value, 999) + else: + assert_equal(a_pickled.fill_value, dtype(999)) + assert_array_equal(a_pickled.mask, mask) + + def test_pickling_subbaseclass(self): + # Test pickling w/ a subclass of ndarray + x = np.array([(1.0, 2), (3.0, 4)], + dtype=[('x', float), ('y', int)]).view(np.recarray) + a = masked_array(x, mask=[(True, False), (False, True)]) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled, a) + assert_(isinstance(a_pickled._data, np.recarray)) + + def test_pickling_maskedconstant(self): + # Test pickling MaskedConstant + mc = np.ma.masked + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + mc_pickled = pickle.loads(pickle.dumps(mc, protocol=proto)) + assert_equal(mc_pickled._baseclass, mc._baseclass) + assert_equal(mc_pickled._mask, mc._mask) + assert_equal(mc_pickled._data, mc._data) + + def test_pickling_wstructured(self): + # Tests pickling w/ structured array + a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)], + dtype=[('a', int), ('b', float)]) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled, a) + + def test_pickling_keepalignment(self): + # Tests pickling w/ F_CONTIGUOUS arrays + a = arange(10) + a.shape = (-1, 2) + b = a.T + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + test = pickle.loads(pickle.dumps(b, protocol=proto)) + assert_equal(test, b) + + def test_single_element_subscript(self): + # Tests single element subscripts of Maskedarrays. + a = array([1, 3, 2]) + b = array([1, 3, 2], mask=[1, 0, 1]) + assert_equal(a[0].shape, ()) + assert_equal(b[0].shape, ()) + assert_equal(b[1].shape, ()) + + def test_topython(self): + # Tests some communication issues with Python. + assert_equal(1, int(array(1))) + assert_equal(1.0, float(array(1))) + assert_equal(1, int(array([[[1]]]))) + assert_equal(1.0, float(array([[1]]))) + assert_raises(TypeError, float, array([1, 1])) + + with suppress_warnings() as sup: + sup.filter(UserWarning, 'Warning: converting a masked element') + assert_(np.isnan(float(array([1], mask=[1])))) + + a = array([1, 2, 3], mask=[1, 0, 0]) + assert_raises(TypeError, lambda: float(a)) + assert_equal(float(a[-1]), 3.) + assert_(np.isnan(float(a[0]))) + assert_raises(TypeError, int, a) + assert_equal(int(a[-1]), 3) + assert_raises(MAError, lambda: int(a[0])) + + def test_oddfeatures_1(self): + # Test of other odd features + x = arange(20) + x = x.reshape(4, 5) + x.flat[5] = 12 + assert_(x[1, 0] == 12) + z = x + 10j * x + assert_equal(z.real, x) + assert_equal(z.imag, 10 * x) + assert_equal((z * conjugate(z)).real, 101 * x * x) + z.imag[...] = 0.0 + + x = arange(10) + x[3] = masked + assert_(str(x[3]) == str(masked)) + c = x >= 8 + assert_(count(where(c, masked, masked)) == 0) + assert_(shape(where(c, masked, masked)) == c.shape) + + z = masked_where(c, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + assert_equal(x, z) + + def test_oddfeatures_2(self): + # Tests some more features. + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + c[0] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + + @suppress_copy_mask_on_assignment + def test_oddfeatures_3(self): + # Tests some generic features + atest = array([10], mask=True) + btest = array([20]) + idx = atest.mask + atest[idx] = btest[idx] + assert_equal(atest, [20]) + + def test_filled_with_object_dtype(self): + a = np.ma.masked_all(1, dtype='O') + assert_equal(a.filled('x')[0], 'x') + + def test_filled_with_flexible_dtype(self): + # Test filled w/ flexible dtype + flexi = array([(1, 1, 1)], + dtype=[('i', int), ('s', '|S8'), ('f', float)]) + flexi[0] = masked + assert_equal(flexi.filled(), + np.array([(default_fill_value(0), + default_fill_value('0'), + default_fill_value(0.),)], dtype=flexi.dtype)) + flexi[0] = masked + assert_equal(flexi.filled(1), + np.array([(1, '1', 1.)], dtype=flexi.dtype)) + + def test_filled_with_mvoid(self): + # Test filled w/ mvoid + ndtype = [('a', int), ('b', float)] + a = mvoid((1, 2.), mask=[(0, 1)], dtype=ndtype) + # Filled using default + test = a.filled() + assert_equal(tuple(test), (1, default_fill_value(1.))) + # Explicit fill_value + test = a.filled((-1, -1)) + assert_equal(tuple(test), (1, -1)) + # Using predefined filling values + a.fill_value = (-999, -999) + assert_equal(tuple(a.filled()), (1, -999)) + + def test_filled_with_nested_dtype(self): + # Test filled w/ nested dtype + ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] + a = array([(1, (1, 1)), (2, (2, 2))], + mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype) + test = a.filled(0) + control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype) + assert_equal(test, control) + + test = a['B'].filled(0) + control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype) + assert_equal(test, control) + + # test if mask gets set correctly (see #6760) + Z = numpy.ma.zeros(2, numpy.dtype([("A", "(2,2)i1,(2,2)i1", (2, 2))])) + assert_equal(Z.data.dtype, numpy.dtype([('A', [('f0', 'i1', (2, 2)), + ('f1', 'i1', (2, 2))], (2, 2))])) + assert_equal(Z.mask.dtype, numpy.dtype([('A', [('f0', '?', (2, 2)), + ('f1', '?', (2, 2))], (2, 2))])) + + def test_filled_with_f_order(self): + # Test filled w/ F-contiguous array + a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'), + mask=np.array([(0, 0, 1), (1, 0, 0)], order='F'), + order='F') # this is currently ignored + assert_(a.flags['F_CONTIGUOUS']) + assert_(a.filled(0).flags['F_CONTIGUOUS']) + + def test_optinfo_propagation(self): + # Checks that _optinfo dictionary isn't back-propagated + x = array([1, 2, 3, ], dtype=float) + x._optinfo['info'] = '???' + y = x.copy() + assert_equal(y._optinfo['info'], '???') + y._optinfo['info'] = '!!!' + assert_equal(x._optinfo['info'], '???') + + def test_optinfo_forward_propagation(self): + a = array([1, 2, 2, 4]) + a._optinfo["key"] = "value" + assert_equal(a._optinfo["key"], (a == 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a != 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a > 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a >= 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a <= 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a + 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a - 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a * 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], (a / 2)._optinfo["key"]) + assert_equal(a._optinfo["key"], a[:2]._optinfo["key"]) + assert_equal(a._optinfo["key"], a[[0, 0, 2]]._optinfo["key"]) + assert_equal(a._optinfo["key"], np.exp(a)._optinfo["key"]) + assert_equal(a._optinfo["key"], np.abs(a)._optinfo["key"]) + assert_equal(a._optinfo["key"], array(a, copy=True)._optinfo["key"]) + assert_equal(a._optinfo["key"], np.zeros_like(a)._optinfo["key"]) + + def test_fancy_printoptions(self): + # Test printing a masked array w/ fancy dtype. + fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])]) + test = array([(1, (2, 3.0)), (4, (5, 6.0))], + mask=[(1, (0, 1)), (0, (1, 0))], + dtype=fancydtype) + control = "[(--, (2, --)) (4, (--, 6.0))]" + assert_equal(str(test), control) + + # Test 0-d array with multi-dimensional dtype + t_2d0 = masked_array(data=(0, [[0.0, 0.0, 0.0], + [0.0, 0.0, 0.0]], + 0.0), + mask=(False, [[True, False, True], + [False, False, True]], + False), + dtype="int, (2,3)float, float") + control = "(0, [[--, 0.0, --], [0.0, 0.0, --]], 0.0)" + assert_equal(str(t_2d0), control) + + def test_flatten_structured_array(self): + # Test flatten_structured_array on arrays + # On ndarray + ndtype = [('a', int), ('b', float)] + a = np.array([(1, 1), (2, 2)], dtype=ndtype) + test = flatten_structured_array(a) + control = np.array([[1., 1.], [2., 2.]], dtype=float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + # On masked_array + a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = flatten_structured_array(a) + control = array([[1., 1.], [2., 2.]], + mask=[[0, 1], [1, 0]], dtype=float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + assert_equal(test.mask, control.mask) + # On masked array with nested structure + ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])] + a = array([(1, (1, 1.1)), (2, (2, 2.2))], + mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype) + test = flatten_structured_array(a) + control = array([[1., 1., 1.1], [2., 2., 2.2]], + mask=[[0, 1, 0], [1, 0, 1]], dtype=float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + assert_equal(test.mask, control.mask) + # Keeping the initial shape + ndtype = [('a', int), ('b', float)] + a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype) + test = flatten_structured_array(a) + control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=float) + assert_equal(test, control) + assert_equal(test.dtype, control.dtype) + + def test_void0d(self): + # Test creating a mvoid object + ndtype = [('a', int), ('b', int)] + a = np.array([(1, 2,)], dtype=ndtype)[0] + f = mvoid(a) + assert_(isinstance(f, mvoid)) + + a = masked_array([(1, 2)], mask=[(1, 0)], dtype=ndtype)[0] + assert_(isinstance(a, mvoid)) + + a = masked_array([(1, 2), (1, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) + f = mvoid(a._data[0], a._mask[0]) + assert_(isinstance(f, mvoid)) + + def test_mvoid_getitem(self): + # Test mvoid.__getitem__ + ndtype = [('a', int), ('b', int)] + a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], + dtype=ndtype) + # w/o mask + f = a[0] + assert_(isinstance(f, mvoid)) + assert_equal((f[0], f['a']), (1, 1)) + assert_equal(f['b'], 2) + # w/ mask + f = a[1] + assert_(isinstance(f, mvoid)) + assert_(f[0] is masked) + assert_(f['a'] is masked) + assert_equal(f[1], 4) + + # exotic dtype + A = masked_array(data=[([0, 1],)], + mask=[([True, False],)], + dtype=[("A", ">i2", (2,))]) + assert_equal(A[0]["A"], A["A"][0]) + assert_equal(A[0]["A"], masked_array(data=[0, 1], + mask=[True, False], dtype=">i2")) + + def test_mvoid_iter(self): + # Test iteration on __getitem__ + ndtype = [('a', int), ('b', int)] + a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], + dtype=ndtype) + # w/o mask + assert_equal(list(a[0]), [1, 2]) + # w/ mask + assert_equal(list(a[1]), [masked, 4]) + + def test_mvoid_print(self): + # Test printing a mvoid + mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)]) + assert_equal(str(mx[0]), "(1, 1)") + mx['b'][0] = masked + ini_display = masked_print_option._display + masked_print_option.set_display("-X-") + try: + assert_equal(str(mx[0]), "(1, -X-)") + assert_equal(repr(mx[0]), "(1, -X-)") + finally: + masked_print_option.set_display(ini_display) + + # also check if there are object datatypes (see gh-7493) + mx = array([(1,), (2,)], dtype=[('a', 'O')]) + assert_equal(str(mx[0]), "(1,)") + + def test_mvoid_multidim_print(self): + + # regression test for gh-6019 + t_ma = masked_array(data=[([1, 2, 3],)], + mask=[([False, True, False],)], + fill_value=([999999, 999999, 999999],), + dtype=[('a', ' 1: + assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1)) + assert_equal(np.add.reduce(x, 1), add.reduce(x, 1)) + assert_equal(np.sum(x, 1), sum(x, 1)) + assert_equal(np.prod(x, 1), product(x, 1)) + + def test_binops_d2D(self): + # Test binary operations on 2D data + a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) + b = array([[2., 3.], [4., 5.], [6., 7.]]) + + test = a * b + control = array([[2., 3.], [2., 2.], [3., 3.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + test = b * a + control = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + a = array([[1.], [2.], [3.]]) + b = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [0, 0], [0, 1]]) + test = a * b + control = array([[2, 3], [8, 10], [18, 3]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + test = b * a + control = array([[2, 3], [8, 10], [18, 7]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_domained_binops_d2D(self): + # Test domained binary operations on 2D data + a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]]) + b = array([[2., 3.], [4., 5.], [6., 7.]]) + + test = a / b + control = array([[1. / 2., 1. / 3.], [2., 2.], [3., 3.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + test = b / a + control = array([[2. / 1., 3. / 1.], [4., 5.], [6., 7.]], + mask=[[0, 0], [1, 1], [1, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + a = array([[1.], [2.], [3.]]) + b = array([[2., 3.], [4., 5.], [6., 7.]], + mask=[[0, 0], [0, 0], [0, 1]]) + test = a / b + control = array([[1. / 2, 1. / 3], [2. / 4, 2. / 5], [3. / 6, 3]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + test = b / a + control = array([[2 / 1., 3 / 1.], [4 / 2., 5 / 2.], [6 / 3., 7]], + mask=[[0, 0], [0, 0], [0, 1]]) + assert_equal(test, control) + assert_equal(test.data, control.data) + assert_equal(test.mask, control.mask) + + def test_noshrinking(self): + # Check that we don't shrink a mask when not wanted + # Binary operations + a = masked_array([1., 2., 3.], mask=[False, False, False], + shrink=False) + b = a + 1 + assert_equal(b.mask, [0, 0, 0]) + # In place binary operation + a += 1 + assert_equal(a.mask, [0, 0, 0]) + # Domained binary operation + b = a / 1. + assert_equal(b.mask, [0, 0, 0]) + # In place binary operation + a /= 1. + assert_equal(a.mask, [0, 0, 0]) + + def test_ufunc_nomask(self): + # check the case ufuncs should set the mask to false + m = np.ma.array([1]) + # check we don't get array([False], dtype=bool) + assert_equal(np.true_divide(m, 5).mask.shape, ()) + + def test_noshink_on_creation(self): + # Check that the mask is not shrunk on array creation when not wanted + a = np.ma.masked_values([1., 2.5, 3.1], 1.5, shrink=False) + assert_equal(a.mask, [0, 0, 0]) + + def test_mod(self): + # Tests mod + (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d + assert_equal(mod(x, y), mod(xm, ym)) + test = mod(ym, xm) + assert_equal(test, np.mod(ym, xm)) + assert_equal(test.mask, mask_or(xm.mask, ym.mask)) + test = mod(xm, ym) + assert_equal(test, np.mod(xm, ym)) + assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0))) + + def test_TakeTransposeInnerOuter(self): + # Test of take, transpose, inner, outer products + x = arange(24) + y = np.arange(24) + x[5:6] = masked + x = x.reshape(2, 3, 4) + y = y.reshape(2, 3, 4) + assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))) + assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)) + assert_equal(np.inner(filled(x, 0), filled(y, 0)), + inner(x, y)) + assert_equal(np.outer(filled(x, 0), filled(y, 0)), + outer(x, y)) + y = array(['abc', 1, 'def', 2, 3], object) + y[2] = masked + t = take(y, [0, 3, 4]) + assert_(t[0] == 'abc') + assert_(t[1] == 2) + assert_(t[2] == 3) + + def test_imag_real(self): + # Check complex + xx = array([1 + 10j, 20 + 2j], mask=[1, 0]) + assert_equal(xx.imag, [10, 2]) + assert_equal(xx.imag.filled(), [1e+20, 2]) + assert_equal(xx.imag.dtype, xx._data.imag.dtype) + assert_equal(xx.real, [1, 20]) + assert_equal(xx.real.filled(), [1e+20, 20]) + assert_equal(xx.real.dtype, xx._data.real.dtype) + + def test_methods_with_output(self): + xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) + xm[:, 0] = xm[0] = xm[-1, -1] = masked + + funclist = ('sum', 'prod', 'var', 'std', 'max', 'min', 'ptp', 'mean',) + + for funcname in funclist: + npfunc = getattr(np, funcname) + xmmeth = getattr(xm, funcname) + # A ndarray as explicit input + output = np.empty(4, dtype=float) + output.fill(-9999) + result = npfunc(xm, axis=0, out=output) + # ... the result should be the given output + assert_(result is output) + assert_equal(result, xmmeth(axis=0, out=output)) + + output = empty(4, dtype=int) + result = xmmeth(axis=0, out=output) + assert_(result is output) + assert_(output[0] is masked) + + def test_eq_on_structured(self): + # Test the equality of structured arrays + ndtype = [('A', int), ('B', int)] + a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) + + test = (a == a) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + test = (a == a[0]) + assert_equal(test.data, [True, False]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) + test = (a == b) + assert_equal(test.data, [False, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (a[0] == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = (a == b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + # complicated dtype, 2-dimensional array. + ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] + a = array([[(1, (1, 1)), (2, (2, 2))], + [(3, (3, 3)), (4, (4, 4))]], + mask=[[(0, (1, 0)), (0, (0, 1))], + [(1, (0, 0)), (1, (1, 1))]], dtype=ndtype) + test = (a[0, 0] == a) + assert_equal(test.data, [[True, False], [False, False]]) + assert_equal(test.mask, [[False, False], [False, True]]) + assert_(test.fill_value == True) + + def test_ne_on_structured(self): + # Test the equality of structured arrays + ndtype = [('A', int), ('B', int)] + a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype) + + test = (a != a) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + test = (a != a[0]) + assert_equal(test.data, [False, True]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype) + test = (a != b) + assert_equal(test.data, [True, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (a[0] != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype) + test = (a != b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [False, False]) + assert_(test.fill_value == True) + + # complicated dtype, 2-dimensional array. + ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])] + a = array([[(1, (1, 1)), (2, (2, 2))], + [(3, (3, 3)), (4, (4, 4))]], + mask=[[(0, (1, 0)), (0, (0, 1))], + [(1, (0, 0)), (1, (1, 1))]], dtype=ndtype) + test = (a[0, 0] != a) + assert_equal(test.data, [[False, True], [True, True]]) + assert_equal(test.mask, [[False, False], [False, True]]) + assert_(test.fill_value == True) + + def test_eq_ne_structured_with_non_masked(self): + a = array([(1, 1), (2, 2), (3, 4)], + mask=[(0, 1), (0, 0), (1, 1)], dtype='i4,i4') + eq = a == a.data + ne = a.data != a + # Test the obvious. + assert_(np.all(eq)) + assert_(not np.any(ne)) + # Expect the mask set only for items with all fields masked. + expected_mask = a.mask == np.ones((), a.mask.dtype) + assert_array_equal(eq.mask, expected_mask) + assert_array_equal(ne.mask, expected_mask) + # The masked element will indicated not equal, because the + # masks did not match. + assert_equal(eq.data, [True, True, False]) + assert_array_equal(eq.data, ~ne.data) + + def test_eq_ne_structured_extra(self): + # ensure simple examples are symmetric and make sense. + # from https://github.com/numpy/numpy/pull/8590#discussion_r101126465 + dt = np.dtype('i4,i4') + for m1 in (mvoid((1, 2), mask=(0, 0), dtype=dt), + mvoid((1, 2), mask=(0, 1), dtype=dt), + mvoid((1, 2), mask=(1, 0), dtype=dt), + mvoid((1, 2), mask=(1, 1), dtype=dt)): + ma1 = m1.view(MaskedArray) + r1 = ma1.view('2i4') + for m2 in (np.array((1, 1), dtype=dt), + mvoid((1, 1), dtype=dt), + mvoid((1, 0), mask=(0, 1), dtype=dt), + mvoid((3, 2), mask=(0, 1), dtype=dt)): + ma2 = m2.view(MaskedArray) + r2 = ma2.view('2i4') + eq_expected = (r1 == r2).all() + assert_equal(m1 == m2, eq_expected) + assert_equal(m2 == m1, eq_expected) + assert_equal(ma1 == m2, eq_expected) + assert_equal(m1 == ma2, eq_expected) + assert_equal(ma1 == ma2, eq_expected) + # Also check it is the same if we do it element by element. + el_by_el = [m1[name] == m2[name] for name in dt.names] + assert_equal(array(el_by_el, dtype=bool).all(), eq_expected) + ne_expected = (r1 != r2).any() + assert_equal(m1 != m2, ne_expected) + assert_equal(m2 != m1, ne_expected) + assert_equal(ma1 != m2, ne_expected) + assert_equal(m1 != ma2, ne_expected) + assert_equal(ma1 != ma2, ne_expected) + el_by_el = [m1[name] != m2[name] for name in dt.names] + assert_equal(array(el_by_el, dtype=bool).any(), ne_expected) + + @pytest.mark.parametrize('dt', ['S', 'U']) + @pytest.mark.parametrize('fill', [None, 'A']) + def test_eq_for_strings(self, dt, fill): + # Test the equality of structured arrays + a = array(['a', 'b'], dtype=dt, mask=[0, 1], fill_value=fill) + + test = (a == a) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = (a == a[0]) + assert_equal(test.data, [True, False]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array(['a', 'b'], dtype=dt, mask=[1, 0], fill_value=fill) + test = (a == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = (a[0] == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (b == a[0]) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize('dt', ['S', 'U']) + @pytest.mark.parametrize('fill', [None, 'A']) + def test_ne_for_strings(self, dt, fill): + # Test the equality of structured arrays + a = array(['a', 'b'], dtype=dt, mask=[0, 1], fill_value=fill) + + test = (a != a) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = (a != a[0]) + assert_equal(test.data, [False, True]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array(['a', 'b'], dtype=dt, mask=[1, 0], fill_value=fill) + test = (a != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = (a[0] != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (b != a[0]) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize('dt1', num_dts, ids=num_ids) + @pytest.mark.parametrize('dt2', num_dts, ids=num_ids) + @pytest.mark.parametrize('fill', [None, 1]) + def test_eq_for_numeric(self, dt1, dt2, fill): + # Test the equality of structured arrays + a = array([0, 1], dtype=dt1, mask=[0, 1], fill_value=fill) + + test = (a == a) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = (a == a[0]) + assert_equal(test.data, [True, False]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array([0, 1], dtype=dt2, mask=[1, 0], fill_value=fill) + test = (a == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = (a[0] == b) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (b == a[0]) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize("op", [operator.eq, operator.lt]) + def test_eq_broadcast_with_unmasked(self, op): + a = array([0, 1], mask=[0, 1]) + b = np.arange(10).reshape(5, 2) + result = op(a, b) + assert_(result.mask.shape == b.shape) + assert_equal(result.mask, np.zeros(b.shape, bool) | a.mask) + + @pytest.mark.parametrize("op", [operator.eq, operator.gt]) + def test_comp_no_mask_not_broadcast(self, op): + # Regression test for failing doctest in MaskedArray.nonzero + # after gh-24556. + a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + result = op(a, 3) + assert_(not result.mask.shape) + assert_(result.mask is nomask) + + @pytest.mark.parametrize('dt1', num_dts, ids=num_ids) + @pytest.mark.parametrize('dt2', num_dts, ids=num_ids) + @pytest.mark.parametrize('fill', [None, 1]) + def test_ne_for_numeric(self, dt1, dt2, fill): + # Test the equality of structured arrays + a = array([0, 1], dtype=dt1, mask=[0, 1], fill_value=fill) + + test = (a != a) + assert_equal(test.data, [False, False]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = (a != a[0]) + assert_equal(test.data, [False, True]) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array([0, 1], dtype=dt2, mask=[1, 0], fill_value=fill) + test = (a != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = (a[0] != b) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = (b != a[0]) + assert_equal(test.data, [True, True]) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize('dt1', num_dts, ids=num_ids) + @pytest.mark.parametrize('dt2', num_dts, ids=num_ids) + @pytest.mark.parametrize('fill', [None, 1]) + @pytest.mark.parametrize('op', + [operator.le, operator.lt, operator.ge, operator.gt]) + def test_comparisons_for_numeric(self, op, dt1, dt2, fill): + # Test the equality of structured arrays + a = array([0, 1], dtype=dt1, mask=[0, 1], fill_value=fill) + + test = op(a, a) + assert_equal(test.data, op(a._data, a._data)) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + test = op(a, a[0]) + assert_equal(test.data, op(a._data, a._data[0])) + assert_equal(test.mask, [False, True]) + assert_(test.fill_value == True) + + b = array([0, 1], dtype=dt2, mask=[1, 0], fill_value=fill) + test = op(a, b) + assert_equal(test.data, op(a._data, b._data)) + assert_equal(test.mask, [True, True]) + assert_(test.fill_value == True) + + test = op(a[0], b) + assert_equal(test.data, op(a._data[0], b._data)) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + test = op(b, a[0]) + assert_equal(test.data, op(b._data, a._data[0])) + assert_equal(test.mask, [True, False]) + assert_(test.fill_value == True) + + @pytest.mark.parametrize('op', + [operator.le, operator.lt, operator.ge, operator.gt]) + @pytest.mark.parametrize('fill', [None, "N/A"]) + def test_comparisons_strings(self, op, fill): + # See gh-21770, mask propagation is broken for strings (and some other + # cases) so we explicitly test strings here. + # In principle only == and != may need special handling... + ma1 = masked_array(["a", "b", "cde"], mask=[0, 1, 0], fill_value=fill) + ma2 = masked_array(["cde", "b", "a"], mask=[0, 1, 0], fill_value=fill) + assert_equal(op(ma1, ma2)._data, op(ma1._data, ma2._data)) + + def test_eq_with_None(self): + # Really, comparisons with None should not be done, but check them + # anyway. Note that pep8 will flag these tests. + # Deprecation is in place for arrays, and when it happens this + # test will fail (and have to be changed accordingly). + + # With partial mask + with suppress_warnings() as sup: + sup.filter(FutureWarning, "Comparison to `None`") + a = array([None, 1], mask=[0, 1]) + assert_equal(a == None, array([True, False], mask=[0, 1])) # noqa: E711 + assert_equal(a.data == None, [True, False]) # noqa: E711 + assert_equal(a != None, array([False, True], mask=[0, 1])) # noqa: E711 + # With nomask + a = array([None, 1], mask=False) + assert_equal(a == None, [True, False]) # noqa: E711 + assert_equal(a != None, [False, True]) # noqa: E711 + # With complete mask + a = array([None, 2], mask=True) + assert_equal(a == None, array([False, True], mask=True)) # noqa: E711 + assert_equal(a != None, array([True, False], mask=True)) # noqa: E711 + # Fully masked, even comparison to None should return "masked" + a = masked + assert_equal(a == None, masked) # noqa: E711 + + def test_eq_with_scalar(self): + a = array(1) + assert_equal(a == 1, True) + assert_equal(a == 0, False) + assert_equal(a != 1, False) + assert_equal(a != 0, True) + b = array(1, mask=True) + assert_equal(b == 0, masked) + assert_equal(b == 1, masked) + assert_equal(b != 0, masked) + assert_equal(b != 1, masked) + + def test_eq_different_dimensions(self): + m1 = array([1, 1], mask=[0, 1]) + # test comparison with both masked and regular arrays. + for m2 in (array([[0, 1], [1, 2]]), + np.array([[0, 1], [1, 2]])): + test = (m1 == m2) + assert_equal(test.data, [[False, False], + [True, False]]) + assert_equal(test.mask, [[False, True], + [False, True]]) + + def test_numpyarithmetic(self): + # Check that the mask is not back-propagated when using numpy functions + a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1]) + control = masked_array([np.nan, np.nan, 0, np.log(2), -1], + mask=[1, 1, 0, 0, 1]) + + test = log(a) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(a.mask, [0, 0, 0, 0, 1]) + + test = np.log(a) + assert_equal(test, control) + assert_equal(test.mask, control.mask) + assert_equal(a.mask, [0, 0, 0, 0, 1]) + + +class TestMaskedArrayAttributes: + + def test_keepmask(self): + # Tests the keep mask flag + x = masked_array([1, 2, 3], mask=[1, 0, 0]) + mx = masked_array(x) + assert_equal(mx.mask, x.mask) + mx = masked_array(x, mask=[0, 1, 0], keep_mask=False) + assert_equal(mx.mask, [0, 1, 0]) + mx = masked_array(x, mask=[0, 1, 0], keep_mask=True) + assert_equal(mx.mask, [1, 1, 0]) + # We default to true + mx = masked_array(x, mask=[0, 1, 0]) + assert_equal(mx.mask, [1, 1, 0]) + + def test_hardmask(self): + # Test hard_mask + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + xh = array(d, mask=m, hard_mask=True) + # We need to copy, to avoid updating d in xh ! + xs = array(d, mask=m, hard_mask=False, copy=True) + xh[[1, 4]] = [10, 40] + xs[[1, 4]] = [10, 40] + assert_equal(xh._data, [0, 10, 2, 3, 4]) + assert_equal(xs._data, [0, 10, 2, 3, 40]) + assert_equal(xs.mask, [0, 0, 0, 1, 0]) + assert_(xh._hardmask) + assert_(not xs._hardmask) + xh[1:4] = [10, 20, 30] + xs[1:4] = [10, 20, 30] + assert_equal(xh._data, [0, 10, 20, 3, 4]) + assert_equal(xs._data, [0, 10, 20, 30, 40]) + assert_equal(xs.mask, nomask) + xh[0] = masked + xs[0] = masked + assert_equal(xh.mask, [1, 0, 0, 1, 1]) + assert_equal(xs.mask, [1, 0, 0, 0, 0]) + xh[:] = 1 + xs[:] = 1 + assert_equal(xh._data, [0, 1, 1, 3, 4]) + assert_equal(xs._data, [1, 1, 1, 1, 1]) + assert_equal(xh.mask, [1, 0, 0, 1, 1]) + assert_equal(xs.mask, nomask) + # Switch to soft mask + xh.soften_mask() + xh[:] = arange(5) + assert_equal(xh._data, [0, 1, 2, 3, 4]) + assert_equal(xh.mask, nomask) + # Switch back to hard mask + xh.harden_mask() + xh[xh < 3] = masked + assert_equal(xh._data, [0, 1, 2, 3, 4]) + assert_equal(xh._mask, [1, 1, 1, 0, 0]) + xh[filled(xh > 1, False)] = 5 + assert_equal(xh._data, [0, 1, 2, 5, 5]) + assert_equal(xh._mask, [1, 1, 1, 0, 0]) + + xh = array([[1, 2], [3, 4]], mask=[[1, 0], [0, 0]], hard_mask=True) + xh[0] = 0 + assert_equal(xh._data, [[1, 0], [3, 4]]) + assert_equal(xh._mask, [[1, 0], [0, 0]]) + xh[-1, -1] = 5 + assert_equal(xh._data, [[1, 0], [3, 5]]) + assert_equal(xh._mask, [[1, 0], [0, 0]]) + xh[filled(xh < 5, False)] = 2 + assert_equal(xh._data, [[1, 2], [2, 5]]) + assert_equal(xh._mask, [[1, 0], [0, 0]]) + + def test_hardmask_again(self): + # Another test of hardmask + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + xh = array(d, mask=m, hard_mask=True) + xh[4:5] = 999 + xh[0:1] = 999 + assert_equal(xh._data, [999, 1, 2, 3, 4]) + + def test_hardmask_oncemore_yay(self): + # OK, yet another test of hardmask + # Make sure that harden_mask/soften_mask//unshare_mask returns self + a = array([1, 2, 3], mask=[1, 0, 0]) + b = a.harden_mask() + assert_equal(a, b) + b[0] = 0 + assert_equal(a, b) + assert_equal(b, array([1, 2, 3], mask=[1, 0, 0])) + a = b.soften_mask() + a[0] = 0 + assert_equal(a, b) + assert_equal(b, array([0, 2, 3], mask=[0, 0, 0])) + + def test_smallmask(self): + # Checks the behaviour of _smallmask + a = arange(10) + a[1] = masked + a[1] = 1 + assert_equal(a._mask, nomask) + a = arange(10) + a._smallmask = False + a[1] = masked + a[1] = 1 + assert_equal(a._mask, zeros(10)) + + def test_shrink_mask(self): + # Tests .shrink_mask() + a = array([1, 2, 3], mask=[0, 0, 0]) + b = a.shrink_mask() + assert_equal(a, b) + assert_equal(a.mask, nomask) + + # Mask cannot be shrunk on structured types, so is a no-op + a = np.ma.array([(1, 2.0)], [('a', int), ('b', float)]) + b = a.copy() + a.shrink_mask() + assert_equal(a.mask, b.mask) + + def test_flat(self): + # Test that flat can return all types of items [#4585, #4615] + # test 2-D record array + # ... on structured array w/ masked records + x = array([[(1, 1.1, 'one'), (2, 2.2, 'two'), (3, 3.3, 'thr')], + [(4, 4.4, 'fou'), (5, 5.5, 'fiv'), (6, 6.6, 'six')]], + dtype=[('a', int), ('b', float), ('c', '|S8')]) + x['a'][0, 1] = masked + x['b'][1, 0] = masked + x['c'][0, 2] = masked + x[-1, -1] = masked + xflat = x.flat + assert_equal(xflat[0], x[0, 0]) + assert_equal(xflat[1], x[0, 1]) + assert_equal(xflat[2], x[0, 2]) + assert_equal(xflat[:3], x[0]) + assert_equal(xflat[3], x[1, 0]) + assert_equal(xflat[4], x[1, 1]) + assert_equal(xflat[5], x[1, 2]) + assert_equal(xflat[3:], x[1]) + assert_equal(xflat[-1], x[-1, -1]) + i = 0 + j = 0 + for xf in xflat: + assert_equal(xf, x[j, i]) + i += 1 + if i >= x.shape[-1]: + i = 0 + j += 1 + + def test_assign_dtype(self): + # check that the mask's dtype is updated when dtype is changed + a = np.zeros(4, dtype='f4,i4') + + m = np.ma.array(a) + m.dtype = np.dtype('f4') + repr(m) # raises? + assert_equal(m.dtype, np.dtype('f4')) + + # check that dtype changes that change shape of mask too much + # are not allowed + def assign(): + m = np.ma.array(a) + m.dtype = np.dtype('f8') + assert_raises(ValueError, assign) + + b = a.view(dtype='f4', type=np.ma.MaskedArray) # raises? + assert_equal(b.dtype, np.dtype('f4')) + + # check that nomask is preserved + a = np.zeros(4, dtype='f4') + m = np.ma.array(a) + m.dtype = np.dtype('f4,i4') + assert_equal(m.dtype, np.dtype('f4,i4')) + assert_equal(m._mask, np.ma.nomask) + + +class TestFillingValues: + + def test_check_on_scalar(self): + # Test _check_fill_value set to valid and invalid values + _check_fill_value = np.ma.core._check_fill_value + + fval = _check_fill_value(0, int) + assert_equal(fval, 0) + fval = _check_fill_value(None, int) + assert_equal(fval, default_fill_value(0)) + + fval = _check_fill_value(0, "|S3") + assert_equal(fval, b"0") + fval = _check_fill_value(None, "|S3") + assert_equal(fval, default_fill_value(b"camelot!")) + assert_raises(TypeError, _check_fill_value, 1e+20, int) + assert_raises(TypeError, _check_fill_value, 'stuff', int) + + def test_check_on_fields(self): + # Tests _check_fill_value with records + _check_fill_value = np.ma.core._check_fill_value + ndtype = [('a', int), ('b', float), ('c', "|S3")] + # A check on a list should return a single record + fval = _check_fill_value([-999, -12345678.9, "???"], ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + # A check on None should output the defaults + fval = _check_fill_value(None, ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [default_fill_value(0), + default_fill_value(0.), + asbytes(default_fill_value("0"))]) + #.....Using a structured type as fill_value should work + fill_val = np.array((-999, -12345678.9, "???"), dtype=ndtype) + fval = _check_fill_value(fill_val, ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + + #.....Using a flexible type w/ a different type shouldn't matter + # BEHAVIOR in 1.5 and earlier, and 1.13 and later: match structured + # types by position + fill_val = np.array((-999, -12345678.9, "???"), + dtype=[("A", int), ("B", float), ("C", "|S3")]) + fval = _check_fill_value(fill_val, ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + + #.....Using an object-array shouldn't matter either + fill_val = np.ndarray(shape=(1,), dtype=object) + fill_val[0] = (-999, -12345678.9, b"???") + fval = _check_fill_value(fill_val, object) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + # NOTE: This test was never run properly as "fill_value" rather than + # "fill_val" was assigned. Written properly, it fails. + #fill_val = np.array((-999, -12345678.9, "???")) + #fval = _check_fill_value(fill_val, ndtype) + #assert_(isinstance(fval, ndarray)) + #assert_equal(fval.item(), [-999, -12345678.9, b"???"]) + #.....One-field-only flexible type should work as well + ndtype = [("a", int)] + fval = _check_fill_value(-999999999, ndtype) + assert_(isinstance(fval, ndarray)) + assert_equal(fval.item(), (-999999999,)) + + def test_fillvalue_conversion(self): + # Tests the behavior of fill_value during conversion + # We had a tailored comment to make sure special attributes are + # properly dealt with + a = array([b'3', b'4', b'5']) + a._optinfo.update({'comment': "updated!"}) + + b = array(a, dtype=int) + assert_equal(b._data, [3, 4, 5]) + assert_equal(b.fill_value, default_fill_value(0)) + + b = array(a, dtype=float) + assert_equal(b._data, [3, 4, 5]) + assert_equal(b.fill_value, default_fill_value(0.)) + + b = a.astype(int) + assert_equal(b._data, [3, 4, 5]) + assert_equal(b.fill_value, default_fill_value(0)) + assert_equal(b._optinfo['comment'], "updated!") + + b = a.astype([('a', '|S3')]) + assert_equal(b['a']._data, a._data) + assert_equal(b['a'].fill_value, a.fill_value) + + def test_default_fill_value(self): + # check all calling conventions + f1 = default_fill_value(1.) + f2 = default_fill_value(np.array(1.)) + f3 = default_fill_value(np.array(1.).dtype) + assert_equal(f1, f2) + assert_equal(f1, f3) + + def test_default_fill_value_structured(self): + fields = array([(1, 1, 1)], + dtype=[('i', int), ('s', '|S8'), ('f', float)]) + + f1 = default_fill_value(fields) + f2 = default_fill_value(fields.dtype) + expected = np.array((default_fill_value(0), + default_fill_value('0'), + default_fill_value(0.)), dtype=fields.dtype) + assert_equal(f1, expected) + assert_equal(f2, expected) + + def test_default_fill_value_void(self): + dt = np.dtype([('v', 'V7')]) + f = default_fill_value(dt) + assert_equal(f['v'], np.array(default_fill_value(dt['v']), dt['v'])) + + def test_fillvalue(self): + # Yet more fun with the fill_value + data = masked_array([1, 2, 3], fill_value=-999) + series = data[[0, 2, 1]] + assert_equal(series._fill_value, data._fill_value) + + mtype = [('f', float), ('s', '|S3')] + x = array([(1, 'a'), (2, 'b'), (pi, 'pi')], dtype=mtype) + x.fill_value = 999 + assert_equal(x.fill_value.item(), [999., b'999']) + assert_equal(x['f'].fill_value, 999) + assert_equal(x['s'].fill_value, b'999') + + x.fill_value = (9, '???') + assert_equal(x.fill_value.item(), (9, b'???')) + assert_equal(x['f'].fill_value, 9) + assert_equal(x['s'].fill_value, b'???') + + x = array([1, 2, 3.1]) + x.fill_value = 999 + assert_equal(np.asarray(x.fill_value).dtype, float) + assert_equal(x.fill_value, 999.) + assert_equal(x._fill_value, np.array(999.)) + + def test_subarray_fillvalue(self): + # gh-10483 test multi-field index fill value + fields = array([(1, 1, 1)], + dtype=[('i', int), ('s', '|S8'), ('f', float)]) + with suppress_warnings() as sup: + sup.filter(FutureWarning, "Numpy has detected") + subfields = fields[['i', 'f']] + assert_equal(tuple(subfields.fill_value), (999999, 1.e+20)) + # test comparison does not raise: + subfields[1:] == subfields[:-1] + + def test_fillvalue_exotic_dtype(self): + # Tests yet more exotic flexible dtypes + _check_fill_value = np.ma.core._check_fill_value + ndtype = [('i', int), ('s', '|S8'), ('f', float)] + control = np.array((default_fill_value(0), + default_fill_value('0'), + default_fill_value(0.),), + dtype=ndtype) + assert_equal(_check_fill_value(None, ndtype), control) + # The shape shouldn't matter + ndtype = [('f0', float, (2, 2))] + control = np.array((default_fill_value(0.),), + dtype=[('f0', float)]).astype(ndtype) + assert_equal(_check_fill_value(None, ndtype), control) + control = np.array((0,), dtype=[('f0', float)]).astype(ndtype) + assert_equal(_check_fill_value(0, ndtype), control) + + ndtype = np.dtype("int, (2,3)float, float") + control = np.array((default_fill_value(0), + default_fill_value(0.), + default_fill_value(0.),), + dtype="int, float, float").astype(ndtype) + test = _check_fill_value(None, ndtype) + assert_equal(test, control) + control = np.array((0, 0, 0), dtype="int, float, float").astype(ndtype) + assert_equal(_check_fill_value(0, ndtype), control) + # but when indexing, fill value should become scalar not tuple + # See issue #6723 + M = masked_array(control) + assert_equal(M["f1"].fill_value.ndim, 0) + + def test_fillvalue_datetime_timedelta(self): + # Test default fillvalue for datetime64 and timedelta64 types. + # See issue #4476, this would return '?' which would cause errors + # elsewhere + + for timecode in ("as", "fs", "ps", "ns", "us", "ms", "s", "m", + "h", "D", "W", "M", "Y"): + control = numpy.datetime64("NaT", timecode) + test = default_fill_value(numpy.dtype(" 0 + + # test different unary domains + sqrt(m) + log(m) + tan(m) + arcsin(m) + arccos(m) + arccosh(m) + + # test binary domains + divide(m, 2) + + # also check that allclose uses ma ufuncs, to avoid warning + allclose(m, 0.5) + + def test_masked_array_underflow(self): + x = np.arange(0, 3, 0.1) + X = np.ma.array(x) + with np.errstate(under="raise"): + X2 = X / 2.0 + np.testing.assert_array_equal(X2, x / 2) + +class TestMaskedArrayInPlaceArithmetic: + # Test MaskedArray Arithmetic + + def setup_method(self): + x = arange(10) + y = arange(10) + xm = arange(10) + xm[2] = masked + self.intdata = (x, y, xm) + self.floatdata = (x.astype(float), y.astype(float), xm.astype(float)) + self.othertypes = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + self.othertypes = [np.dtype(_).type for _ in self.othertypes] + self.uint8data = ( + x.astype(np.uint8), + y.astype(np.uint8), + xm.astype(np.uint8) + ) + + def test_inplace_addition_scalar(self): + # Test of inplace additions + (x, y, xm) = self.intdata + xm[2] = masked + x += 1 + assert_equal(x, y + 1) + xm += 1 + assert_equal(xm, y + 1) + + (x, _, xm) = self.floatdata + id1 = x.data.ctypes.data + x += 1. + assert_(id1 == x.data.ctypes.data) + assert_equal(x, y + 1.) + + def test_inplace_addition_array(self): + # Test of inplace additions + (x, y, xm) = self.intdata + m = xm.mask + a = arange(10, dtype=np.int16) + a[-1] = masked + x += a + xm += a + assert_equal(x, y + a) + assert_equal(xm, y + a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_subtraction_scalar(self): + # Test of inplace subtractions + (x, y, xm) = self.intdata + x -= 1 + assert_equal(x, y - 1) + xm -= 1 + assert_equal(xm, y - 1) + + def test_inplace_subtraction_array(self): + # Test of inplace subtractions + (x, y, xm) = self.floatdata + m = xm.mask + a = arange(10, dtype=float) + a[-1] = masked + x -= a + xm -= a + assert_equal(x, y - a) + assert_equal(xm, y - a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_multiplication_scalar(self): + # Test of inplace multiplication + (x, y, xm) = self.floatdata + x *= 2.0 + assert_equal(x, y * 2) + xm *= 2.0 + assert_equal(xm, y * 2) + + def test_inplace_multiplication_array(self): + # Test of inplace multiplication + (x, y, xm) = self.floatdata + m = xm.mask + a = arange(10, dtype=float) + a[-1] = masked + x *= a + xm *= a + assert_equal(x, y * a) + assert_equal(xm, y * a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_division_scalar_int(self): + # Test of inplace division + (x, y, xm) = self.intdata + x = arange(10) * 2 + xm = arange(10) * 2 + xm[2] = masked + x //= 2 + assert_equal(x, y) + xm //= 2 + assert_equal(xm, y) + + def test_inplace_division_scalar_float(self): + # Test of inplace division + (x, y, xm) = self.floatdata + x /= 2.0 + assert_equal(x, y / 2.0) + xm /= arange(10) + assert_equal(xm, ones((10,))) + + def test_inplace_division_array_float(self): + # Test of inplace division + (x, y, xm) = self.floatdata + m = xm.mask + a = arange(10, dtype=float) + a[-1] = masked + x /= a + xm /= a + assert_equal(x, y / a) + assert_equal(xm, y / a) + assert_equal(xm.mask, mask_or(mask_or(m, a.mask), (a == 0))) + + def test_inplace_division_misc(self): + + x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.] + y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.] + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + + z = xm / ym + assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) + assert_equal(z._data, + [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) + + xm = xm.copy() + xm /= ym + assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1]) + assert_equal(z._data, + [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.]) + + def test_datafriendly_add(self): + # Test keeping data w/ (inplace) addition + x = array([1, 2, 3], mask=[0, 0, 1]) + # Test add w/ scalar + xx = x + 1 + assert_equal(xx.data, [2, 3, 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test iadd w/ scalar + x += 1 + assert_equal(x.data, [2, 3, 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test add w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x + array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(xx.data, [1, 4, 3]) + assert_equal(xx.mask, [1, 0, 1]) + # Test iadd w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + x += array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(x.data, [1, 4, 3]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_sub(self): + # Test keeping data w/ (inplace) subtraction + # Test sub w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x - 1 + assert_equal(xx.data, [0, 1, 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test isub w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + x -= 1 + assert_equal(x.data, [0, 1, 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test sub w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x - array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(xx.data, [1, 0, 3]) + assert_equal(xx.mask, [1, 0, 1]) + # Test isub w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + x -= array([1, 2, 3], mask=[1, 0, 0]) + assert_equal(x.data, [1, 0, 3]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_mul(self): + # Test keeping data w/ (inplace) multiplication + # Test mul w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x * 2 + assert_equal(xx.data, [2, 4, 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test imul w/ scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + x *= 2 + assert_equal(x.data, [2, 4, 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test mul w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x * array([10, 20, 30], mask=[1, 0, 0]) + assert_equal(xx.data, [1, 40, 3]) + assert_equal(xx.mask, [1, 0, 1]) + # Test imul w/ array + x = array([1, 2, 3], mask=[0, 0, 1]) + x *= array([10, 20, 30], mask=[1, 0, 0]) + assert_equal(x.data, [1, 40, 3]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_div(self): + # Test keeping data w/ (inplace) division + # Test div on scalar + x = array([1, 2, 3], mask=[0, 0, 1]) + xx = x / 2. + assert_equal(xx.data, [1 / 2., 2 / 2., 3]) + assert_equal(xx.mask, [0, 0, 1]) + # Test idiv on scalar + x = array([1., 2., 3.], mask=[0, 0, 1]) + x /= 2. + assert_equal(x.data, [1 / 2., 2 / 2., 3]) + assert_equal(x.mask, [0, 0, 1]) + # Test div on array + x = array([1., 2., 3.], mask=[0, 0, 1]) + xx = x / array([10., 20., 30.], mask=[1, 0, 0]) + assert_equal(xx.data, [1., 2. / 20., 3.]) + assert_equal(xx.mask, [1, 0, 1]) + # Test idiv on array + x = array([1., 2., 3.], mask=[0, 0, 1]) + x /= array([10., 20., 30.], mask=[1, 0, 0]) + assert_equal(x.data, [1., 2 / 20., 3.]) + assert_equal(x.mask, [1, 0, 1]) + + def test_datafriendly_pow(self): + # Test keeping data w/ (inplace) power + # Test pow on scalar + x = array([1., 2., 3.], mask=[0, 0, 1]) + xx = x ** 2.5 + assert_equal(xx.data, [1., 2. ** 2.5, 3.]) + assert_equal(xx.mask, [0, 0, 1]) + # Test ipow on scalar + x **= 2.5 + assert_equal(x.data, [1., 2. ** 2.5, 3]) + assert_equal(x.mask, [0, 0, 1]) + + def test_datafriendly_add_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a += b + assert_equal(a, [[2, 2], [4, 4]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a += b + assert_equal(a, [[2, 2], [4, 4]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + + def test_datafriendly_sub_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a -= b + assert_equal(a, [[0, 0], [2, 2]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a -= b + assert_equal(a, [[0, 0], [2, 2]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + + def test_datafriendly_mul_arrays(self): + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 0]) + a *= b + assert_equal(a, [[1, 1], [3, 3]]) + if a.mask is not nomask: + assert_equal(a.mask, [[0, 0], [0, 0]]) + + a = array([[1, 1], [3, 3]]) + b = array([1, 1], mask=[0, 1]) + a *= b + assert_equal(a, [[1, 1], [3, 3]]) + assert_equal(a.mask, [[0, 1], [0, 1]]) + + def test_inplace_addition_scalar_type(self): + # Test of inplace additions + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + xm[2] = masked + x += t(1) + assert_equal(x, y + t(1)) + xm += t(1) + assert_equal(xm, y + t(1)) + + def test_inplace_addition_array_type(self): + # Test of inplace additions + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + x += a + xm += a + assert_equal(x, y + a) + assert_equal(xm, y + a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_subtraction_scalar_type(self): + # Test of inplace subtractions + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x -= t(1) + assert_equal(x, y - t(1)) + xm -= t(1) + assert_equal(xm, y - t(1)) + + def test_inplace_subtraction_array_type(self): + # Test of inplace subtractions + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + x -= a + xm -= a + assert_equal(x, y - a) + assert_equal(xm, y - a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_multiplication_scalar_type(self): + # Test of inplace multiplication + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x *= t(2) + assert_equal(x, y * t(2)) + xm *= t(2) + assert_equal(xm, y * t(2)) + + def test_inplace_multiplication_array_type(self): + # Test of inplace multiplication + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + x *= a + xm *= a + assert_equal(x, y * a) + assert_equal(xm, y * a) + assert_equal(xm.mask, mask_or(m, a.mask)) + + def test_inplace_floor_division_scalar_type(self): + # Test of inplace division + # Check for TypeError in case of unsupported types + unsupported = {np.dtype(t).type for t in np.typecodes["Complex"]} + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x = arange(10, dtype=t) * t(2) + xm = arange(10, dtype=t) * t(2) + xm[2] = masked + try: + x //= t(2) + xm //= t(2) + assert_equal(x, y) + assert_equal(xm, y) + except TypeError: + msg = f"Supported type {t} throwing TypeError" + assert t in unsupported, msg + + def test_inplace_floor_division_array_type(self): + # Test of inplace division + # Check for TypeError in case of unsupported types + unsupported = {np.dtype(t).type for t in np.typecodes["Complex"]} + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + try: + x //= a + xm //= a + assert_equal(x, y // a) + assert_equal(xm, y // a) + assert_equal( + xm.mask, + mask_or(mask_or(m, a.mask), (a == t(0))) + ) + except TypeError: + msg = f"Supported type {t} throwing TypeError" + assert t in unsupported, msg + + def test_inplace_division_scalar_type(self): + # Test of inplace division + for t in self.othertypes: + with suppress_warnings() as sup: + sup.record(UserWarning) + + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + x = arange(10, dtype=t) * t(2) + xm = arange(10, dtype=t) * t(2) + xm[2] = masked + + # May get a DeprecationWarning or a TypeError. + # + # This is a consequence of the fact that this is true divide + # and will require casting to float for calculation and + # casting back to the original type. This will only be raised + # with integers. Whether it is an error or warning is only + # dependent on how stringent the casting rules are. + # + # Will handle the same way. + try: + x /= t(2) + assert_equal(x, y) + except (DeprecationWarning, TypeError) as e: + warnings.warn(str(e), stacklevel=1) + try: + xm /= t(2) + assert_equal(xm, y) + except (DeprecationWarning, TypeError) as e: + warnings.warn(str(e), stacklevel=1) + + if issubclass(t, np.integer): + assert_equal(len(sup.log), 2, f'Failed on type={t}.') + else: + assert_equal(len(sup.log), 0, f'Failed on type={t}.') + + def test_inplace_division_array_type(self): + # Test of inplace division + for t in self.othertypes: + with suppress_warnings() as sup: + sup.record(UserWarning) + (x, y, xm) = (_.astype(t) for _ in self.uint8data) + m = xm.mask + a = arange(10, dtype=t) + a[-1] = masked + + # May get a DeprecationWarning or a TypeError. + # + # This is a consequence of the fact that this is true divide + # and will require casting to float for calculation and + # casting back to the original type. This will only be raised + # with integers. Whether it is an error or warning is only + # dependent on how stringent the casting rules are. + # + # Will handle the same way. + try: + x /= a + assert_equal(x, y / a) + except (DeprecationWarning, TypeError) as e: + warnings.warn(str(e), stacklevel=1) + try: + xm /= a + assert_equal(xm, y / a) + assert_equal( + xm.mask, + mask_or(mask_or(m, a.mask), (a == t(0))) + ) + except (DeprecationWarning, TypeError) as e: + warnings.warn(str(e), stacklevel=1) + + if issubclass(t, np.integer): + assert_equal(len(sup.log), 2, f'Failed on type={t}.') + else: + assert_equal(len(sup.log), 0, f'Failed on type={t}.') + + def test_inplace_pow_type(self): + # Test keeping data w/ (inplace) power + for t in self.othertypes: + with warnings.catch_warnings(): + warnings.filterwarnings("error") + # Test pow on scalar + x = array([1, 2, 3], mask=[0, 0, 1], dtype=t) + xx = x ** t(2) + xx_r = array([1, 2 ** 2, 3], mask=[0, 0, 1], dtype=t) + assert_equal(xx.data, xx_r.data) + assert_equal(xx.mask, xx_r.mask) + # Test ipow on scalar + x **= t(2) + assert_equal(x.data, xx_r.data) + assert_equal(x.mask, xx_r.mask) + + +class TestMaskedArrayMethods: + # Test class for miscellaneous MaskedArrays methods. + def setup_method(self): + # Base data definition. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + m2 = np.array([1, 1, 0, 1, 0, 0, + 1, 1, 1, 1, 0, 1, + 0, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 1, 0, + 0, 0, 1, 0, 1, 1]) + m2x = array(data=x, mask=m2) + m2X = array(data=X, mask=m2.reshape(X.shape)) + m2XX = array(data=XX, mask=m2.reshape(XX.shape)) + self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + + def test_generic_methods(self): + # Tests some MaskedArray methods. + a = array([1, 3, 2]) + assert_equal(a.any(), a._data.any()) + assert_equal(a.all(), a._data.all()) + assert_equal(a.argmax(), a._data.argmax()) + assert_equal(a.argmin(), a._data.argmin()) + assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4)) + assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])) + assert_equal(a.conj(), a._data.conj()) + assert_equal(a.conjugate(), a._data.conjugate()) + + m = array([[1, 2], [3, 4]]) + assert_equal(m.diagonal(), m._data.diagonal()) + assert_equal(a.sum(), a._data.sum()) + assert_equal(a.take([1, 2]), a._data.take([1, 2])) + assert_equal(m.transpose(), m._data.transpose()) + + def test_allclose(self): + # Tests allclose on arrays + a = np.random.rand(10) + b = a + np.random.rand(10) * 1e-8 + assert_(allclose(a, b)) + # Test allclose w/ infs + a[0] = np.inf + assert_(not allclose(a, b)) + b[0] = np.inf + assert_(allclose(a, b)) + # Test allclose w/ masked + a = masked_array(a) + a[-1] = masked + assert_(allclose(a, b, masked_equal=True)) + assert_(not allclose(a, b, masked_equal=False)) + # Test comparison w/ scalar + a *= 1e-8 + a[0] = 0 + assert_(allclose(a, 0, masked_equal=True)) + + # Test that the function works for MIN_INT integer typed arrays + a = masked_array([np.iinfo(np.int_).min], dtype=np.int_) + assert_(allclose(a, a)) + + def test_allclose_timedelta(self): + # Allclose currently works for timedelta64 as long as `atol` is + # an integer or also a timedelta64 + a = np.array([[1, 2, 3, 4]], dtype="m8[ns]") + assert allclose(a, a, atol=0) + assert allclose(a, a, atol=np.timedelta64(1, "ns")) + + def test_allany(self): + # Checks the any/all methods/functions. + x = np.array([[0.13, 0.26, 0.90], + [0.28, 0.33, 0.63], + [0.31, 0.87, 0.70]]) + m = np.array([[True, False, False], + [False, False, False], + [True, True, False]], dtype=np.bool) + mx = masked_array(x, mask=m) + mxbig = (mx > 0.5) + mxsmall = (mx < 0.5) + + assert_(not mxbig.all()) + assert_(mxbig.any()) + assert_equal(mxbig.all(0), [False, False, True]) + assert_equal(mxbig.all(1), [False, False, True]) + assert_equal(mxbig.any(0), [False, False, True]) + assert_equal(mxbig.any(1), [True, True, True]) + + assert_(not mxsmall.all()) + assert_(mxsmall.any()) + assert_equal(mxsmall.all(0), [True, True, False]) + assert_equal(mxsmall.all(1), [False, False, False]) + assert_equal(mxsmall.any(0), [True, True, False]) + assert_equal(mxsmall.any(1), [True, True, False]) + + def test_allany_oddities(self): + # Some fun with all and any + store = empty((), dtype=bool) + full = array([1, 2, 3], mask=True) + + assert_(full.all() is masked) + full.all(out=store) + assert_(store) + assert_(store._mask, True) + assert_(store is not masked) + + store = empty((), dtype=bool) + assert_(full.any() is masked) + full.any(out=store) + assert_(not store) + assert_(store._mask, True) + assert_(store is not masked) + + def test_argmax_argmin(self): + # Tests argmin & argmax on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + + assert_equal(mx.argmin(), 35) + assert_equal(mX.argmin(), 35) + assert_equal(m2x.argmin(), 4) + assert_equal(m2X.argmin(), 4) + assert_equal(mx.argmax(), 28) + assert_equal(mX.argmax(), 28) + assert_equal(m2x.argmax(), 31) + assert_equal(m2X.argmax(), 31) + + assert_equal(mX.argmin(0), [2, 2, 2, 5, 0, 5]) + assert_equal(m2X.argmin(0), [2, 2, 4, 5, 0, 4]) + assert_equal(mX.argmax(0), [0, 5, 0, 5, 4, 0]) + assert_equal(m2X.argmax(0), [5, 5, 0, 5, 1, 0]) + + assert_equal(mX.argmin(1), [4, 1, 0, 0, 5, 5, ]) + assert_equal(m2X.argmin(1), [4, 4, 0, 0, 5, 3]) + assert_equal(mX.argmax(1), [2, 4, 1, 1, 4, 1]) + assert_equal(m2X.argmax(1), [2, 4, 1, 1, 1, 1]) + + def test_clip(self): + # Tests clip on MaskedArrays. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0]) + mx = array(x, mask=m) + clipped = mx.clip(2, 8) + assert_equal(clipped.mask, mx.mask) + assert_equal(clipped._data, x.clip(2, 8)) + assert_equal(clipped._data, mx._data.clip(2, 8)) + + def test_clip_out(self): + # gh-14140 + a = np.arange(10) + m = np.ma.MaskedArray(a, mask=[0, 1] * 5) + m.clip(0, 5, out=m) + assert_equal(m.mask, [0, 1] * 5) + + def test_compress(self): + # test compress + a = masked_array([1., 2., 3., 4., 5.], fill_value=9999) + condition = (a > 1.5) & (a < 3.5) + assert_equal(a.compress(condition), [2., 3.]) + + a[[2, 3]] = masked + b = a.compress(condition) + assert_equal(b._data, [2., 3.]) + assert_equal(b._mask, [0, 1]) + assert_equal(b.fill_value, 9999) + assert_equal(b, a[condition]) + + condition = (a < 4.) + b = a.compress(condition) + assert_equal(b._data, [1., 2., 3.]) + assert_equal(b._mask, [0, 0, 1]) + assert_equal(b.fill_value, 9999) + assert_equal(b, a[condition]) + + a = masked_array([[10, 20, 30], [40, 50, 60]], + mask=[[0, 0, 1], [1, 0, 0]]) + b = a.compress(a.ravel() >= 22) + assert_equal(b._data, [30, 40, 50, 60]) + assert_equal(b._mask, [1, 1, 0, 0]) + + x = np.array([3, 1, 2]) + b = a.compress(x >= 2, axis=1) + assert_equal(b._data, [[10, 30], [40, 60]]) + assert_equal(b._mask, [[0, 1], [1, 0]]) + + def test_compressed(self): + # Tests compressed + a = array([1, 2, 3, 4], mask=[0, 0, 0, 0]) + b = a.compressed() + assert_equal(b, a) + a[0] = masked + b = a.compressed() + assert_equal(b, [2, 3, 4]) + + def test_empty(self): + # Tests empty/like + datatype = [('a', int), ('b', float), ('c', '|S8')] + a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')], + dtype=datatype) + assert_equal(len(a.fill_value.item()), len(datatype)) + + b = empty_like(a) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + b = empty(len(a), dtype=datatype) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + # check empty_like mask handling + a = masked_array([1, 2, 3], mask=[False, True, False]) + b = empty_like(a) + assert_(not np.may_share_memory(a.mask, b.mask)) + b = a.view(masked_array) + assert_(np.may_share_memory(a.mask, b.mask)) + + def test_zeros(self): + # Tests zeros/like + datatype = [('a', int), ('b', float), ('c', '|S8')] + a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')], + dtype=datatype) + assert_equal(len(a.fill_value.item()), len(datatype)) + + b = zeros(len(a), dtype=datatype) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + b = zeros_like(a) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + # check zeros_like mask handling + a = masked_array([1, 2, 3], mask=[False, True, False]) + b = zeros_like(a) + assert_(not np.may_share_memory(a.mask, b.mask)) + b = a.view() + assert_(np.may_share_memory(a.mask, b.mask)) + + def test_ones(self): + # Tests ones/like + datatype = [('a', int), ('b', float), ('c', '|S8')] + a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')], + dtype=datatype) + assert_equal(len(a.fill_value.item()), len(datatype)) + + b = ones(len(a), dtype=datatype) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + b = ones_like(a) + assert_equal(b.shape, a.shape) + assert_equal(b.fill_value, a.fill_value) + + # check ones_like mask handling + a = masked_array([1, 2, 3], mask=[False, True, False]) + b = ones_like(a) + assert_(not np.may_share_memory(a.mask, b.mask)) + b = a.view() + assert_(np.may_share_memory(a.mask, b.mask)) + + @suppress_copy_mask_on_assignment + def test_put(self): + # Tests put. + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + x = array(d, mask=m) + assert_(x[3] is masked) + assert_(x[4] is masked) + x[[1, 4]] = [10, 40] + assert_(x[3] is masked) + assert_(x[4] is not masked) + assert_equal(x, [0, 10, 2, -1, 40]) + + x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) + i = [0, 2, 4, 6] + x.put(i, [6, 4, 2, 0]) + assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ])) + assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) + x.put(i, masked_array([0, 2, 4, 6], [1, 0, 1, 0])) + assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) + assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) + + x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2) + put(x, i, [6, 4, 2, 0]) + assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ])) + assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) + put(x, i, masked_array([0, 2, 4, 6], [1, 0, 1, 0])) + assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ]) + assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0]) + + def test_put_nomask(self): + # GitHub issue 6425 + x = zeros(10) + z = array([3., -1.], mask=[False, True]) + + x.put([1, 2], z) + assert_(x[0] is not masked) + assert_equal(x[0], 0) + assert_(x[1] is not masked) + assert_equal(x[1], 3) + assert_(x[2] is masked) + assert_(x[3] is not masked) + assert_equal(x[3], 0) + + def test_put_hardmask(self): + # Tests put on hardmask + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + xh = array(d + 1, mask=m, hard_mask=True, copy=True) + xh.put([4, 2, 0, 1, 3], [1, 2, 3, 4, 5]) + assert_equal(xh._data, [3, 4, 2, 4, 5]) + + def test_putmask(self): + x = arange(6) + 1 + mx = array(x, mask=[0, 0, 0, 1, 1, 1]) + mask = [0, 0, 1, 0, 0, 1] + # w/o mask, w/o masked values + xx = x.copy() + putmask(xx, mask, 99) + assert_equal(xx, [1, 2, 99, 4, 5, 99]) + # w/ mask, w/o masked values + mxx = mx.copy() + putmask(mxx, mask, 99) + assert_equal(mxx._data, [1, 2, 99, 4, 5, 99]) + assert_equal(mxx._mask, [0, 0, 0, 1, 1, 0]) + # w/o mask, w/ masked values + values = array([10, 20, 30, 40, 50, 60], mask=[1, 1, 1, 0, 0, 0]) + xx = x.copy() + putmask(xx, mask, values) + assert_equal(xx._data, [1, 2, 30, 4, 5, 60]) + assert_equal(xx._mask, [0, 0, 1, 0, 0, 0]) + # w/ mask, w/ masked values + mxx = mx.copy() + putmask(mxx, mask, values) + assert_equal(mxx._data, [1, 2, 30, 4, 5, 60]) + assert_equal(mxx._mask, [0, 0, 1, 1, 1, 0]) + # w/ mask, w/ masked values + hardmask + mxx = mx.copy() + mxx.harden_mask() + putmask(mxx, mask, values) + assert_equal(mxx, [1, 2, 30, 4, 5, 60]) + + def test_ravel(self): + # Tests ravel + a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]]) + aravel = a.ravel() + assert_equal(aravel._mask.shape, aravel.shape) + a = array([0, 0], mask=[1, 1]) + aravel = a.ravel() + assert_equal(aravel._mask.shape, a.shape) + # Checks that small_mask is preserved + a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False) + assert_equal(a.ravel()._mask, [0, 0, 0, 0]) + # Test that the fill_value is preserved + a.fill_value = -99 + a.shape = (2, 2) + ar = a.ravel() + assert_equal(ar._mask, [0, 0, 0, 0]) + assert_equal(ar._data, [1, 2, 3, 4]) + assert_equal(ar.fill_value, -99) + # Test index ordering + assert_equal(a.ravel(order='C'), [1, 2, 3, 4]) + assert_equal(a.ravel(order='F'), [1, 3, 2, 4]) + + @pytest.mark.parametrize("order", "AKCF") + @pytest.mark.parametrize("data_order", "CF") + def test_ravel_order(self, order, data_order): + # Ravelling must ravel mask and data in the same order always to avoid + # misaligning the two in the ravel result. + arr = np.ones((5, 10), order=data_order) + arr[0, :] = 0 + mask = np.ones((10, 5), dtype=bool, order=data_order).T + mask[0, :] = False + x = array(arr, mask=mask) + assert x._data.flags.fnc != x._mask.flags.fnc + assert (x.filled(0) == 0).all() + raveled = x.ravel(order) + assert (raveled.filled(0) == 0).all() + + # NOTE: Can be wrong if arr order is neither C nor F and `order="K"` + assert_array_equal(arr.ravel(order), x.ravel(order)._data) + + def test_reshape(self): + # Tests reshape + x = arange(4) + x[0] = masked + y = x.reshape(2, 2) + assert_equal(y.shape, (2, 2,)) + assert_equal(y._mask.shape, (2, 2,)) + assert_equal(x.shape, (4,)) + assert_equal(x._mask.shape, (4,)) + + def test_sort(self): + # Test sort + x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) + + sortedx = sort(x) + assert_equal(sortedx._data, [1, 2, 3, 4]) + assert_equal(sortedx._mask, [0, 0, 0, 1]) + + sortedx = sort(x, endwith=False) + assert_equal(sortedx._data, [4, 1, 2, 3]) + assert_equal(sortedx._mask, [1, 0, 0, 0]) + + x.sort() + assert_equal(x._data, [1, 2, 3, 4]) + assert_equal(x._mask, [0, 0, 0, 1]) + + x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) + x.sort(endwith=False) + assert_equal(x._data, [4, 1, 2, 3]) + assert_equal(x._mask, [1, 0, 0, 0]) + + x = [1, 4, 2, 3] + sortedx = sort(x) + assert_(not isinstance(sorted, MaskedArray)) + + x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8) + sortedx = sort(x, endwith=False) + assert_equal(sortedx._data, [-2, -1, 0, 1, 2]) + x = array([0, 1, -1, -2, 2], mask=[0, 1, 0, 0, 1], dtype=np.int8) + sortedx = sort(x, endwith=False) + assert_equal(sortedx._data, [1, 2, -2, -1, 0]) + assert_equal(sortedx._mask, [1, 1, 0, 0, 0]) + + x = array([0, -1], dtype=np.int8) + sortedx = sort(x, kind="stable") + assert_equal(sortedx, array([-1, 0], dtype=np.int8)) + + def test_stable_sort(self): + x = array([1, 2, 3, 1, 2, 3], dtype=np.uint8) + expected = array([0, 3, 1, 4, 2, 5]) + computed = argsort(x, kind='stable') + assert_equal(computed, expected) + + def test_argsort_matches_sort(self): + x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8) + + for kwargs in [{}, + {"endwith": True}, + {"endwith": False}, + {"fill_value": 2}, + {"fill_value": 2, "endwith": True}, + {"fill_value": 2, "endwith": False}]: + sortedx = sort(x, **kwargs) + argsortedx = x[argsort(x, **kwargs)] + assert_equal(sortedx._data, argsortedx._data) + assert_equal(sortedx._mask, argsortedx._mask) + + def test_sort_2d(self): + # Check sort of 2D array. + # 2D array w/o mask + a = masked_array([[8, 4, 1], [2, 0, 9]]) + a.sort(0) + assert_equal(a, [[2, 0, 1], [8, 4, 9]]) + a = masked_array([[8, 4, 1], [2, 0, 9]]) + a.sort(1) + assert_equal(a, [[1, 4, 8], [0, 2, 9]]) + # 2D array w/mask + a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]]) + a.sort(0) + assert_equal(a, [[2, 0, 1], [8, 4, 9]]) + assert_equal(a._mask, [[0, 0, 0], [1, 0, 1]]) + a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]]) + a.sort(1) + assert_equal(a, [[1, 4, 8], [0, 2, 9]]) + assert_equal(a._mask, [[0, 0, 1], [0, 0, 1]]) + # 3D + a = masked_array([[[7, 8, 9], [4, 5, 6], [1, 2, 3]], + [[1, 2, 3], [7, 8, 9], [4, 5, 6]], + [[7, 8, 9], [1, 2, 3], [4, 5, 6]], + [[4, 5, 6], [1, 2, 3], [7, 8, 9]]]) + a[a % 4 == 0] = masked + am = a.copy() + an = a.filled(99) + am.sort(0) + an.sort(0) + assert_equal(am, an) + am = a.copy() + an = a.filled(99) + am.sort(1) + an.sort(1) + assert_equal(am, an) + am = a.copy() + an = a.filled(99) + am.sort(2) + an.sort(2) + assert_equal(am, an) + + def test_sort_flexible(self): + # Test sort on structured dtype. + a = array( + data=[(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)], + mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)], + dtype=[('A', int), ('B', int)]) + mask_last = array( + data=[(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)], + mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)], + dtype=[('A', int), ('B', int)]) + mask_first = array( + data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3)], + mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0)], + dtype=[('A', int), ('B', int)]) + + test = sort(a) + assert_equal(test, mask_last) + assert_equal(test.mask, mask_last.mask) + + test = sort(a, endwith=False) + assert_equal(test, mask_first) + assert_equal(test.mask, mask_first.mask) + + # Test sort on dtype with subarray (gh-8069) + # Just check that the sort does not error, structured array subarrays + # are treated as byte strings and that leads to differing behavior + # depending on endianness and `endwith`. + dt = np.dtype([('v', int, 2)]) + a = a.view(dt) + test = sort(a) + test = sort(a, endwith=False) + + def test_argsort(self): + # Test argsort + a = array([1, 5, 2, 4, 3], mask=[1, 0, 0, 1, 0]) + assert_equal(np.argsort(a), argsort(a)) + + def test_squeeze(self): + # Check squeeze + data = masked_array([[1, 2, 3]]) + assert_equal(data.squeeze(), [1, 2, 3]) + data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]]) + assert_equal(data.squeeze(), [1, 2, 3]) + assert_equal(data.squeeze()._mask, [1, 1, 1]) + + # normal ndarrays return a view + arr = np.array([[1]]) + arr_sq = arr.squeeze() + assert_equal(arr_sq, 1) + arr_sq[...] = 2 + assert_equal(arr[0, 0], 2) + + # so maskedarrays should too + m_arr = masked_array([[1]], mask=True) + m_arr_sq = m_arr.squeeze() + assert_(m_arr_sq is not np.ma.masked) + assert_equal(m_arr_sq.mask, True) + m_arr_sq[...] = 2 + assert_equal(m_arr[0, 0], 2) + + def test_swapaxes(self): + # Tests swapaxes on MaskedArrays. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mX = array(x, mask=m).reshape(6, 6) + mXX = mX.reshape(3, 2, 2, 3) + + mXswapped = mX.swapaxes(0, 1) + assert_equal(mXswapped[-1], mX[:, -1]) + + mXXswapped = mXX.swapaxes(0, 2) + assert_equal(mXXswapped.shape, (2, 2, 3, 3)) + + def test_take(self): + # Tests take + x = masked_array([10, 20, 30, 40], [0, 1, 0, 1]) + assert_equal(x.take([0, 0, 3]), masked_array([10, 10, 40], [0, 0, 1])) + assert_equal(x.take([0, 0, 3]), x[[0, 0, 3]]) + assert_equal(x.take([[0, 1], [0, 1]]), + masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]])) + + # assert_equal crashes when passed np.ma.mask + assert_(x[1] is np.ma.masked) + assert_(x.take(1) is np.ma.masked) + + x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]]) + assert_equal(x.take([0, 2], axis=1), + array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]])) + assert_equal(take(x, [0, 2], axis=1), + array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]])) + + def test_take_masked_indices(self): + # Test take w/ masked indices + a = np.array((40, 18, 37, 9, 22)) + indices = np.arange(3)[None, :] + np.arange(5)[:, None] + mindices = array(indices, mask=(indices >= len(a))) + # No mask + test = take(a, mindices, mode='clip') + ctrl = array([[40, 18, 37], + [18, 37, 9], + [37, 9, 22], + [9, 22, 22], + [22, 22, 22]]) + assert_equal(test, ctrl) + # Masked indices + test = take(a, mindices) + ctrl = array([[40, 18, 37], + [18, 37, 9], + [37, 9, 22], + [9, 22, 40], + [22, 40, 40]]) + ctrl[3, 2] = ctrl[4, 1] = ctrl[4, 2] = masked + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + # Masked input + masked indices + a = array((40, 18, 37, 9, 22), mask=(0, 1, 0, 0, 0)) + test = take(a, mindices) + ctrl[0, 1] = ctrl[1, 0] = masked + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + + def test_tolist(self): + # Tests to list + # ... on 1D + x = array(np.arange(12)) + x[[1, -2]] = masked + xlist = x.tolist() + assert_(xlist[1] is None) + assert_(xlist[-2] is None) + # ... on 2D + x.shape = (3, 4) + xlist = x.tolist() + ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]] + assert_equal(xlist[0], [0, None, 2, 3]) + assert_equal(xlist[1], [4, 5, 6, 7]) + assert_equal(xlist[2], [8, 9, None, 11]) + assert_equal(xlist, ctrl) + # ... on structured array w/ masked records + x = array(list(zip([1, 2, 3], + [1.1, 2.2, 3.3], + ['one', 'two', 'thr'])), + dtype=[('a', int), ('b', float), ('c', '|S8')]) + x[-1] = masked + assert_equal(x.tolist(), + [(1, 1.1, b'one'), + (2, 2.2, b'two'), + (None, None, None)]) + # ... on structured array w/ masked fields + a = array([(1, 2,), (3, 4)], mask=[(0, 1), (0, 0)], + dtype=[('a', int), ('b', int)]) + test = a.tolist() + assert_equal(test, [[1, None], [3, 4]]) + # ... on mvoid + a = a[0] + test = a.tolist() + assert_equal(test, [1, None]) + + def test_tolist_specialcase(self): + # Test mvoid.tolist: make sure we return a standard Python object + a = array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)]) + # w/o mask: each entry is a np.void whose elements are standard Python + for entry in a: + for item in entry.tolist(): + assert_(not isinstance(item, np.generic)) + # w/ mask: each entry is a ma.void whose elements should be + # standard Python + a.mask[0] = (0, 1) + for entry in a: + for item in entry.tolist(): + assert_(not isinstance(item, np.generic)) + + def test_toflex(self): + # Test the conversion to records + data = arange(10) + record = data.toflex() + assert_equal(record['_data'], data._data) + assert_equal(record['_mask'], data._mask) + + data[[0, 1, 2, -1]] = masked + record = data.toflex() + assert_equal(record['_data'], data._data) + assert_equal(record['_mask'], data._mask) + + ndtype = [('i', int), ('s', '|S3'), ('f', float)] + data = array(list(zip(np.arange(10), + 'ABCDEFGHIJKLM', + np.random.rand(10))), + dtype=ndtype) + data[[0, 1, 2, -1]] = masked + record = data.toflex() + assert_equal(record['_data'], data._data) + assert_equal(record['_mask'], data._mask) + + ndtype = np.dtype("int, (2,3)float, float") + data = array(list(zip(np.arange(10), + np.random.rand(10), + np.random.rand(10))), + dtype=ndtype) + data[[0, 1, 2, -1]] = masked + record = data.toflex() + assert_equal_records(record['_data'], data._data) + assert_equal_records(record['_mask'], data._mask) + + def test_fromflex(self): + # Test the reconstruction of a masked_array from a record + a = array([1, 2, 3]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.mask, a.mask) + + a = array([1, 2, 3], mask=[0, 0, 1]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.mask, a.mask) + + a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)], + dtype=[('A', int), ('B', float)]) + test = fromflex(a.toflex()) + assert_equal(test, a) + assert_equal(test.data, a.data) + + def test_arraymethod(self): + # Test a _arraymethod w/ n argument + marray = masked_array([[1, 2, 3, 4, 5]], mask=[0, 0, 1, 0, 0]) + control = masked_array([[1], [2], [3], [4], [5]], + mask=[0, 0, 1, 0, 0]) + assert_equal(marray.T, control) + assert_equal(marray.transpose(), control) + + assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0)) + + def test_arraymethod_0d(self): + # gh-9430 + x = np.ma.array(42, mask=True) + assert_equal(x.T.mask, x.mask) + assert_equal(x.T.data, x.data) + + def test_transpose_view(self): + x = np.ma.array([[1, 2, 3], [4, 5, 6]]) + x[0, 1] = np.ma.masked + xt = x.T + + xt[1, 0] = 10 + xt[0, 1] = np.ma.masked + + assert_equal(x.data, xt.T.data) + assert_equal(x.mask, xt.T.mask) + + def test_diagonal_view(self): + x = np.ma.zeros((3, 3)) + x[0, 0] = 10 + x[1, 1] = np.ma.masked + x[2, 2] = 20 + xd = x.diagonal() + x[1, 1] = 15 + assert_equal(xd.mask, x.diagonal().mask) + assert_equal(xd.data, x.diagonal().data) + + +class TestMaskedArrayMathMethods: + + def setup_method(self): + # Base data definition. + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + m2 = np.array([1, 1, 0, 1, 0, 0, + 1, 1, 1, 1, 0, 1, + 0, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 1, 0, + 0, 0, 1, 0, 1, 1]) + m2x = array(data=x, mask=m2) + m2X = array(data=X, mask=m2.reshape(X.shape)) + m2XX = array(data=XX, mask=m2.reshape(XX.shape)) + self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + + def test_cumsumprod(self): + # Tests cumsum & cumprod on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + mXcp = mX.cumsum(0) + assert_equal(mXcp._data, mX.filled(0).cumsum(0)) + mXcp = mX.cumsum(1) + assert_equal(mXcp._data, mX.filled(0).cumsum(1)) + + mXcp = mX.cumprod(0) + assert_equal(mXcp._data, mX.filled(1).cumprod(0)) + mXcp = mX.cumprod(1) + assert_equal(mXcp._data, mX.filled(1).cumprod(1)) + + def test_cumsumprod_with_output(self): + # Tests cumsum/cumprod w/ output + xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) + xm[:, 0] = xm[0] = xm[-1, -1] = masked + + for funcname in ('cumsum', 'cumprod'): + npfunc = getattr(np, funcname) + xmmeth = getattr(xm, funcname) + + # A ndarray as explicit input + output = np.empty((3, 4), dtype=float) + output.fill(-9999) + result = npfunc(xm, axis=0, out=output) + # ... the result should be the given output + assert_(result is output) + assert_equal(result, xmmeth(axis=0, out=output)) + + output = empty((3, 4), dtype=int) + result = xmmeth(axis=0, out=output) + assert_(result is output) + + def test_ptp(self): + # Tests ptp on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + (n, m) = X.shape + assert_equal(mx.ptp(), np.ptp(mx.compressed())) + rows = np.zeros(n, float) + cols = np.zeros(m, float) + for k in range(m): + cols[k] = np.ptp(mX[:, k].compressed()) + for k in range(n): + rows[k] = np.ptp(mX[k].compressed()) + assert_equal(mX.ptp(0), cols) + assert_equal(mX.ptp(1), rows) + + def test_add_object(self): + x = masked_array(['a', 'b'], mask=[1, 0], dtype=object) + y = x + 'x' + assert_equal(y[1], 'bx') + assert_(y.mask[0]) + + def test_sum_object(self): + # Test sum on object dtype + a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object) + assert_equal(a.sum(), 5) + a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) + assert_equal(a.sum(axis=0), [5, 7, 9]) + + def test_prod_object(self): + # Test prod on object dtype + a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object) + assert_equal(a.prod(), 2 * 3) + a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object) + assert_equal(a.prod(axis=0), [4, 10, 18]) + + def test_meananom_object(self): + # Test mean/anom on object dtype + a = masked_array([1, 2, 3], dtype=object) + assert_equal(a.mean(), 2) + assert_equal(a.anom(), [-1, 0, 1]) + + def test_anom_shape(self): + a = masked_array([1, 2, 3]) + assert_equal(a.anom().shape, a.shape) + a.mask = True + assert_equal(a.anom().shape, a.shape) + assert_(np.ma.is_masked(a.anom())) + + def test_anom(self): + a = masked_array(np.arange(1, 7).reshape(2, 3)) + assert_almost_equal(a.anom(), + [[-2.5, -1.5, -0.5], [0.5, 1.5, 2.5]]) + assert_almost_equal(a.anom(axis=0), + [[-1.5, -1.5, -1.5], [1.5, 1.5, 1.5]]) + assert_almost_equal(a.anom(axis=1), + [[-1., 0., 1.], [-1., 0., 1.]]) + a.mask = [[0, 0, 1], [0, 1, 0]] + mval = -99 + assert_almost_equal(a.anom().filled(mval), + [[-2.25, -1.25, mval], [0.75, mval, 2.75]]) + assert_almost_equal(a.anom(axis=0).filled(mval), + [[-1.5, 0.0, mval], [1.5, mval, 0.0]]) + assert_almost_equal(a.anom(axis=1).filled(mval), + [[-0.5, 0.5, mval], [-1.0, mval, 1.0]]) + + def test_trace(self): + # Tests trace on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + mXdiag = mX.diagonal() + assert_equal(mX.trace(), mX.diagonal().compressed().sum()) + assert_almost_equal(mX.trace(), + X.trace() - sum(mXdiag.mask * X.diagonal(), + axis=0)) + assert_equal(np.trace(mX), mX.trace()) + + # gh-5560 + arr = np.arange(2 * 4 * 4).reshape(2, 4, 4) + m_arr = np.ma.masked_array(arr, False) + assert_equal(arr.trace(axis1=1, axis2=2), m_arr.trace(axis1=1, axis2=2)) + + def test_dot(self): + # Tests dot on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + fx = mx.filled(0) + r = mx.dot(mx) + assert_almost_equal(r.filled(0), fx.dot(fx)) + assert_(r.mask is nomask) + + fX = mX.filled(0) + r = mX.dot(mX) + assert_almost_equal(r.filled(0), fX.dot(fX)) + assert_(r.mask[1, 3]) + r1 = empty_like(r) + mX.dot(mX, out=r1) + assert_almost_equal(r, r1) + + mYY = mXX.swapaxes(-1, -2) + fXX, fYY = mXX.filled(0), mYY.filled(0) + r = mXX.dot(mYY) + assert_almost_equal(r.filled(0), fXX.dot(fYY)) + r1 = empty_like(r) + mXX.dot(mYY, out=r1) + assert_almost_equal(r, r1) + + def test_dot_shape_mismatch(self): + # regression test + x = masked_array([[1, 2], [3, 4]], mask=[[0, 1], [0, 0]]) + y = masked_array([[1, 2], [3, 4]], mask=[[0, 1], [0, 0]]) + z = masked_array([[0, 1], [3, 3]]) + x.dot(y, out=z) + assert_almost_equal(z.filled(0), [[1, 0], [15, 16]]) + assert_almost_equal(z.mask, [[0, 1], [0, 0]]) + + def test_varmean_nomask(self): + # gh-5769 + foo = array([1, 2, 3, 4], dtype='f8') + bar = array([1, 2, 3, 4], dtype='f8') + assert_equal(type(foo.mean()), np.float64) + assert_equal(type(foo.var()), np.float64) + assert (foo.mean() == bar.mean()) is np.bool(True) + + # check array type is preserved and out works + foo = array(np.arange(16).reshape((4, 4)), dtype='f8') + bar = empty(4, dtype='f4') + assert_equal(type(foo.mean(axis=1)), MaskedArray) + assert_equal(type(foo.var(axis=1)), MaskedArray) + assert_(foo.mean(axis=1, out=bar) is bar) + assert_(foo.var(axis=1, out=bar) is bar) + + def test_varstd(self): + # Tests var & std on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + assert_almost_equal(mX.var(axis=None), mX.compressed().var()) + assert_almost_equal(mX.std(axis=None), mX.compressed().std()) + assert_almost_equal(mX.std(axis=None, ddof=1), + mX.compressed().std(ddof=1)) + assert_almost_equal(mX.var(axis=None, ddof=1), + mX.compressed().var(ddof=1)) + assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) + assert_equal(mX.var().shape, X.var().shape) + (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) + assert_almost_equal(mX.var(axis=None, ddof=2), + mX.compressed().var(ddof=2)) + assert_almost_equal(mX.std(axis=None, ddof=2), + mX.compressed().std(ddof=2)) + for k in range(6): + assert_almost_equal(mXvar1[k], mX[k].compressed().var()) + assert_almost_equal(mXvar0[k], mX[:, k].compressed().var()) + assert_almost_equal(np.sqrt(mXvar0[k]), + mX[:, k].compressed().std()) + + @suppress_copy_mask_on_assignment + def test_varstd_specialcases(self): + # Test a special case for var + nout = np.array(-1, dtype=float) + mout = array(-1, dtype=float) + + x = array(arange(10), mask=True) + for methodname in ('var', 'std'): + method = getattr(x, methodname) + assert_(method() is masked) + assert_(method(0) is masked) + assert_(method(-1) is masked) + # Using a masked array as explicit output + method(out=mout) + assert_(mout is not masked) + assert_equal(mout.mask, True) + # Using a ndarray as explicit output + method(out=nout) + assert_(np.isnan(nout)) + + x = array(arange(10), mask=True) + x[-1] = 9 + for methodname in ('var', 'std'): + method = getattr(x, methodname) + assert_(method(ddof=1) is masked) + assert_(method(0, ddof=1) is masked) + assert_(method(-1, ddof=1) is masked) + # Using a masked array as explicit output + method(out=mout, ddof=1) + assert_(mout is not masked) + assert_equal(mout.mask, True) + # Using a ndarray as explicit output + method(out=nout, ddof=1) + assert_(np.isnan(nout)) + + def test_varstd_ddof(self): + a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]]) + test = a.std(axis=0, ddof=0) + assert_equal(test.filled(0), [0, 0, 0]) + assert_equal(test.mask, [0, 0, 1]) + test = a.std(axis=0, ddof=1) + assert_equal(test.filled(0), [0, 0, 0]) + assert_equal(test.mask, [0, 0, 1]) + test = a.std(axis=0, ddof=2) + assert_equal(test.filled(0), [0, 0, 0]) + assert_equal(test.mask, [1, 1, 1]) + + def test_diag(self): + # Test diag + x = arange(9).reshape((3, 3)) + x[1, 1] = masked + out = np.diag(x) + assert_equal(out, [0, 4, 8]) + out = diag(x) + assert_equal(out, [0, 4, 8]) + assert_equal(out.mask, [0, 1, 0]) + out = diag(out) + control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]], + mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(out, control) + + def test_axis_methods_nomask(self): + # Test the combination nomask & methods w/ axis + a = array([[1, 2, 3], [4, 5, 6]]) + + assert_equal(a.sum(0), [5, 7, 9]) + assert_equal(a.sum(-1), [6, 15]) + assert_equal(a.sum(1), [6, 15]) + + assert_equal(a.prod(0), [4, 10, 18]) + assert_equal(a.prod(-1), [6, 120]) + assert_equal(a.prod(1), [6, 120]) + + assert_equal(a.min(0), [1, 2, 3]) + assert_equal(a.min(-1), [1, 4]) + assert_equal(a.min(1), [1, 4]) + + assert_equal(a.max(0), [4, 5, 6]) + assert_equal(a.max(-1), [3, 6]) + assert_equal(a.max(1), [3, 6]) + + @requires_memory(free_bytes=2 * 10000 * 1000 * 2) + def test_mean_overflow(self): + # Test overflow in masked arrays + # gh-20272 + a = masked_array(np.full((10000, 10000), 65535, dtype=np.uint16), + mask=np.zeros((10000, 10000))) + assert_equal(a.mean(), 65535.0) + + def test_diff_with_prepend(self): + # GH 22465 + x = np.array([1, 2, 2, 3, 4, 2, 1, 1]) + + a = np.ma.masked_equal(x[3:], value=2) + a_prep = np.ma.masked_equal(x[:3], value=2) + diff1 = np.ma.diff(a, prepend=a_prep, axis=0) + + b = np.ma.masked_equal(x, value=2) + diff2 = np.ma.diff(b, axis=0) + + assert_(np.ma.allequal(diff1, diff2)) + + def test_diff_with_append(self): + # GH 22465 + x = np.array([1, 2, 2, 3, 4, 2, 1, 1]) + + a = np.ma.masked_equal(x[:3], value=2) + a_app = np.ma.masked_equal(x[3:], value=2) + diff1 = np.ma.diff(a, append=a_app, axis=0) + + b = np.ma.masked_equal(x, value=2) + diff2 = np.ma.diff(b, axis=0) + + assert_(np.ma.allequal(diff1, diff2)) + + def test_diff_with_dim_0(self): + with pytest.raises( + ValueError, + match="diff requires input that is at least one dimensional" + ): + np.ma.diff(np.array(1)) + + def test_diff_with_n_0(self): + a = np.ma.masked_equal([1, 2, 2, 3, 4, 2, 1, 1], value=2) + diff = np.ma.diff(a, n=0, axis=0) + + assert_(np.ma.allequal(a, diff)) + + +class TestMaskedArrayMathMethodsComplex: + # Test class for miscellaneous MaskedArrays methods. + def setup_method(self): + # Base data definition. + x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479j, + 7.189j, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993j]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + m2 = np.array([1, 1, 0, 1, 0, 0, + 1, 1, 1, 1, 0, 1, + 0, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 1, 0, + 0, 0, 1, 0, 1, 1]) + m2x = array(data=x, mask=m2) + m2X = array(data=X, mask=m2.reshape(X.shape)) + m2XX = array(data=XX, mask=m2.reshape(XX.shape)) + self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) + + def test_varstd(self): + # Tests var & std on MaskedArrays. + (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d + assert_almost_equal(mX.var(axis=None), mX.compressed().var()) + assert_almost_equal(mX.std(axis=None), mX.compressed().std()) + assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape) + assert_equal(mX.var().shape, X.var().shape) + (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) + assert_almost_equal(mX.var(axis=None, ddof=2), + mX.compressed().var(ddof=2)) + assert_almost_equal(mX.std(axis=None, ddof=2), + mX.compressed().std(ddof=2)) + for k in range(6): + assert_almost_equal(mXvar1[k], mX[k].compressed().var()) + assert_almost_equal(mXvar0[k], mX[:, k].compressed().var()) + assert_almost_equal(np.sqrt(mXvar0[k]), + mX[:, k].compressed().std()) + + +class TestMaskedArrayFunctions: + # Test class for miscellaneous functions. + + def setup_method(self): + x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + xm.set_fill_value(1e+20) + self.info = (xm, ym) + + def test_masked_where_bool(self): + x = [1, 2] + y = masked_where(False, x) + assert_equal(y, [1, 2]) + assert_equal(y[1], 2) + + def test_masked_equal_wlist(self): + x = [1, 2, 3] + mx = masked_equal(x, 3) + assert_equal(mx, x) + assert_equal(mx._mask, [0, 0, 1]) + mx = masked_not_equal(x, 3) + assert_equal(mx, x) + assert_equal(mx._mask, [1, 1, 0]) + + def test_masked_equal_fill_value(self): + x = [1, 2, 3] + mx = masked_equal(x, 3) + assert_equal(mx._mask, [0, 0, 1]) + assert_equal(mx.fill_value, 3) + + def test_masked_where_condition(self): + # Tests masking functions. + x = array([1., 2., 3., 4., 5.]) + x[2] = masked + assert_equal(masked_where(greater(x, 2), x), masked_greater(x, 2)) + assert_equal(masked_where(greater_equal(x, 2), x), + masked_greater_equal(x, 2)) + assert_equal(masked_where(less(x, 2), x), masked_less(x, 2)) + assert_equal(masked_where(less_equal(x, 2), x), + masked_less_equal(x, 2)) + assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) + assert_equal(masked_where(equal(x, 2), x), masked_equal(x, 2)) + assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)) + assert_equal(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), + [99, 99, 3, 4, 5]) + + def test_masked_where_oddities(self): + # Tests some generic features. + atest = ones((10, 10, 10), dtype=float) + btest = zeros(atest.shape, MaskType) + ctest = masked_where(btest, atest) + assert_equal(atest, ctest) + + def test_masked_where_shape_constraint(self): + a = arange(10) + with assert_raises(IndexError): + masked_equal(1, a) + test = masked_equal(a, 1) + assert_equal(test.mask, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]) + + def test_masked_where_structured(self): + # test that masked_where on a structured array sets a structured + # mask (see issue #2972) + a = np.zeros(10, dtype=[("A", " 6, x) + + def test_masked_otherfunctions(self): + assert_equal(masked_inside(list(range(5)), 1, 3), + [0, 199, 199, 199, 4]) + assert_equal(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]) + assert_equal(masked_inside(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 1, 3).mask, + [1, 1, 1, 1, 0]) + assert_equal(masked_outside(array(list(range(5)), + mask=[0, 1, 0, 0, 0]), 1, 3).mask, + [1, 1, 0, 0, 1]) + assert_equal(masked_equal(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 0]) + assert_equal(masked_not_equal(array([2, 2, 1, 2, 1], + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 1]) + + def test_round(self): + a = array([1.23456, 2.34567, 3.45678, 4.56789, 5.67890], + mask=[0, 1, 0, 0, 0]) + assert_equal(a.round(), [1., 2., 3., 5., 6.]) + assert_equal(a.round(1), [1.2, 2.3, 3.5, 4.6, 5.7]) + assert_equal(a.round(3), [1.235, 2.346, 3.457, 4.568, 5.679]) + b = empty_like(a) + a.round(out=b) + assert_equal(b, [1., 2., 3., 5., 6.]) + + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + c[0] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + + def test_round_with_output(self): + # Testing round with an explicit output + + xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4) + xm[:, 0] = xm[0] = xm[-1, -1] = masked + + # A ndarray as explicit input + output = np.empty((3, 4), dtype=float) + output.fill(-9999) + result = np.round(xm, decimals=2, out=output) + # ... the result should be the given output + assert_(result is output) + assert_equal(result, xm.round(decimals=2, out=output)) + + output = empty((3, 4), dtype=float) + result = xm.round(decimals=2, out=output) + assert_(result is output) + + def test_round_with_scalar(self): + # Testing round with scalar/zero dimension input + # GH issue 2244 + a = array(1.1, mask=[False]) + assert_equal(a.round(), 1) + + a = array(1.1, mask=[True]) + assert_(a.round() is masked) + + a = array(1.1, mask=[False]) + output = np.empty(1, dtype=float) + output.fill(-9999) + a.round(out=output) + assert_equal(output, 1) + + a = array(1.1, mask=[False]) + output = array(-9999., mask=[True]) + a.round(out=output) + assert_equal(output[()], 1) + + a = array(1.1, mask=[True]) + output = array(-9999., mask=[False]) + a.round(out=output) + assert_(output[()] is masked) + + def test_identity(self): + a = identity(5) + assert_(isinstance(a, MaskedArray)) + assert_equal(a, np.identity(5)) + + def test_power(self): + x = -1.1 + assert_almost_equal(power(x, 2.), 1.21) + assert_(power(x, masked) is masked) + x = array([-1.1, -1.1, 1.1, 1.1, 0.]) + b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1]) + y = power(x, b) + assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.]) + assert_equal(y._mask, [1, 0, 0, 0, 1]) + b.mask = nomask + y = power(x, b) + assert_equal(y._mask, [1, 0, 0, 0, 1]) + z = x ** b + assert_equal(z._mask, y._mask) + assert_almost_equal(z, y) + assert_almost_equal(z._data, y._data) + x **= b + assert_equal(x._mask, y._mask) + assert_almost_equal(x, y) + assert_almost_equal(x._data, y._data) + + def test_power_with_broadcasting(self): + # Test power w/ broadcasting + a2 = np.array([[1., 2., 3.], [4., 5., 6.]]) + a2m = array(a2, mask=[[1, 0, 0], [0, 0, 1]]) + b1 = np.array([2, 4, 3]) + b2 = np.array([b1, b1]) + b2m = array(b2, mask=[[0, 1, 0], [0, 1, 0]]) + + ctrl = array([[1 ** 2, 2 ** 4, 3 ** 3], [4 ** 2, 5 ** 4, 6 ** 3]], + mask=[[1, 1, 0], [0, 1, 1]]) + # No broadcasting, base & exp w/ mask + test = a2m ** b2m + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + # No broadcasting, base w/ mask, exp w/o mask + test = a2m ** b2 + assert_equal(test, ctrl) + assert_equal(test.mask, a2m.mask) + # No broadcasting, base w/o mask, exp w/ mask + test = a2 ** b2m + assert_equal(test, ctrl) + assert_equal(test.mask, b2m.mask) + + ctrl = array([[2 ** 2, 4 ** 4, 3 ** 3], [2 ** 2, 4 ** 4, 3 ** 3]], + mask=[[0, 1, 0], [0, 1, 0]]) + test = b1 ** b2m + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + test = b2m ** b1 + assert_equal(test, ctrl) + assert_equal(test.mask, ctrl.mask) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + def test_where(self): + # Test the where function + x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.]) + y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]) + m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0] + m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1] + xm = masked_array(x, mask=m1) + ym = masked_array(y, mask=m2) + xm.set_fill_value(1e+20) + + d = where(xm > 2, xm, -9) + assert_equal(d, [-9., -9., -9., -9., -9., 4., + -9., -9., 10., -9., -9., 3.]) + assert_equal(d._mask, xm._mask) + d = where(xm > 2, -9, ym) + assert_equal(d, [5., 0., 3., 2., -1., -9., + -9., -10., -9., 1., 0., -9.]) + assert_equal(d._mask, [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0]) + d = where(xm > 2, xm, masked) + assert_equal(d, [-9., -9., -9., -9., -9., 4., + -9., -9., 10., -9., -9., 3.]) + tmp = xm._mask.copy() + tmp[(xm <= 2).filled(True)] = True + assert_equal(d._mask, tmp) + + with np.errstate(invalid="warn"): + # The fill value is 1e20, it cannot be converted to `int`: + with pytest.warns(RuntimeWarning, match="invalid value"): + ixm = xm.astype(int) + d = where(ixm > 2, ixm, masked) + assert_equal(d, [-9, -9, -9, -9, -9, 4, -9, -9, 10, -9, -9, 3]) + assert_equal(d.dtype, ixm.dtype) + + def test_where_object(self): + a = np.array(None) + b = masked_array(None) + r = b.copy() + assert_equal(np.ma.where(True, a, a), r) + assert_equal(np.ma.where(True, b, b), r) + + def test_where_with_masked_choice(self): + x = arange(10) + x[3] = masked + c = x >= 8 + # Set False to masked + z = where(c, x, masked) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is masked) + assert_(z[7] is masked) + assert_(z[8] is not masked) + assert_(z[9] is not masked) + assert_equal(x, z) + # Set True to masked + z = where(c, masked, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + + def test_where_with_masked_condition(self): + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + c[0] = masked + z = where(c, x, -x) + assert_equal(z, [1., 2., 0., -4., -5]) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + + x = arange(1, 6) + x[-1] = masked + y = arange(1, 6) * 10 + y[2] = masked + c = array([1, 1, 1, 0, 0], mask=[1, 0, 0, 0, 0]) + cm = c.filled(1) + z = where(c, x, y) + zm = where(cm, x, y) + assert_equal(z, zm) + assert_(getmask(zm) is nomask) + assert_equal(zm, [1, 2, 3, 40, 50]) + z = where(c, masked, 1) + assert_equal(z, [99, 99, 99, 1, 1]) + z = where(c, 1, masked) + assert_equal(z, [99, 1, 1, 99, 99]) + + def test_where_type(self): + # Test the type conservation with where + x = np.arange(4, dtype=np.int32) + y = np.arange(4, dtype=np.float32) * 2.2 + test = where(x > 1.5, y, x).dtype + control = np.result_type(np.int32, np.float32) + assert_equal(test, control) + + def test_where_broadcast(self): + # Issue 8599 + x = np.arange(9).reshape(3, 3) + y = np.zeros(3) + core = np.where([1, 0, 1], x, y) + ma = where([1, 0, 1], x, y) + + assert_equal(core, ma) + assert_equal(core.dtype, ma.dtype) + + def test_where_structured(self): + # Issue 8600 + dt = np.dtype([('a', int), ('b', int)]) + x = np.array([(1, 2), (3, 4), (5, 6)], dtype=dt) + y = np.array((10, 20), dtype=dt) + core = np.where([0, 1, 1], x, y) + ma = np.where([0, 1, 1], x, y) + + assert_equal(core, ma) + assert_equal(core.dtype, ma.dtype) + + def test_where_structured_masked(self): + dt = np.dtype([('a', int), ('b', int)]) + x = np.array([(1, 2), (3, 4), (5, 6)], dtype=dt) + + ma = where([0, 1, 1], x, masked) + expected = masked_where([1, 0, 0], x) + + assert_equal(ma.dtype, expected.dtype) + assert_equal(ma, expected) + assert_equal(ma.mask, expected.mask) + + def test_masked_invalid_error(self): + a = np.arange(5, dtype=object) + a[3] = np.inf + a[2] = np.nan + with pytest.raises(TypeError, + match="not supported for the input types"): + np.ma.masked_invalid(a) + + def test_masked_invalid_pandas(self): + # getdata() used to be bad for pandas series due to its _data + # attribute. This test is a regression test mainly and may be + # removed if getdata() is adjusted. + class Series: + _data = "nonsense" + + def __array__(self, dtype=None, copy=None): + return np.array([5, np.nan, np.inf]) + + arr = np.ma.masked_invalid(Series()) + assert_array_equal(arr._data, np.array(Series())) + assert_array_equal(arr._mask, [False, True, True]) + + @pytest.mark.parametrize("copy", [True, False]) + def test_masked_invalid_full_mask(self, copy): + # Matplotlib relied on masked_invalid always returning a full mask + # (Also astropy projects, but were ok with it gh-22720 and gh-22842) + a = np.ma.array([1, 2, 3, 4]) + assert a._mask is nomask + res = np.ma.masked_invalid(a, copy=copy) + assert res.mask is not nomask + # mask of a should not be mutated + assert a.mask is nomask + assert np.may_share_memory(a._data, res._data) != copy + + def test_choose(self): + # Test choose + choices = [[0, 1, 2, 3], [10, 11, 12, 13], + [20, 21, 22, 23], [30, 31, 32, 33]] + chosen = choose([2, 3, 1, 0], choices) + assert_equal(chosen, array([20, 31, 12, 3])) + chosen = choose([2, 4, 1, 0], choices, mode='clip') + assert_equal(chosen, array([20, 31, 12, 3])) + chosen = choose([2, 4, 1, 0], choices, mode='wrap') + assert_equal(chosen, array([20, 1, 12, 3])) + # Check with some masked indices + indices_ = array([2, 4, 1, 0], mask=[1, 0, 0, 1]) + chosen = choose(indices_, choices, mode='wrap') + assert_equal(chosen, array([99, 1, 12, 99])) + assert_equal(chosen.mask, [1, 0, 0, 1]) + # Check with some masked choices + choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1], + [1, 0, 0, 0], [0, 0, 0, 0]]) + indices_ = [2, 3, 1, 0] + chosen = choose(indices_, choices, mode='wrap') + assert_equal(chosen, array([20, 31, 12, 3])) + assert_equal(chosen.mask, [1, 0, 0, 1]) + + def test_choose_with_out(self): + # Test choose with an explicit out keyword + choices = [[0, 1, 2, 3], [10, 11, 12, 13], + [20, 21, 22, 23], [30, 31, 32, 33]] + store = empty(4, dtype=int) + chosen = choose([2, 3, 1, 0], choices, out=store) + assert_equal(store, array([20, 31, 12, 3])) + assert_(store is chosen) + # Check with some masked indices + out + store = empty(4, dtype=int) + indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1]) + chosen = choose(indices_, choices, mode='wrap', out=store) + assert_equal(store, array([99, 31, 12, 99])) + assert_equal(store.mask, [1, 0, 0, 1]) + # Check with some masked choices + out ina ndarray ! + choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1], + [1, 0, 0, 0], [0, 0, 0, 0]]) + indices_ = [2, 3, 1, 0] + store = empty(4, dtype=int).view(ndarray) + chosen = choose(indices_, choices, mode='wrap', out=store) + assert_equal(store, array([999999, 31, 12, 999999])) + + def test_reshape(self): + a = arange(10) + a[0] = masked + # Try the default + b = a.reshape((5, 2)) + assert_equal(b.shape, (5, 2)) + assert_(b.flags['C']) + # Try w/ arguments as list instead of tuple + b = a.reshape(5, 2) + assert_equal(b.shape, (5, 2)) + assert_(b.flags['C']) + # Try w/ order + b = a.reshape((5, 2), order='F') + assert_equal(b.shape, (5, 2)) + assert_(b.flags['F']) + # Try w/ order + b = a.reshape(5, 2, order='F') + assert_equal(b.shape, (5, 2)) + assert_(b.flags['F']) + + c = np.reshape(a, (2, 5)) + assert_(isinstance(c, MaskedArray)) + assert_equal(c.shape, (2, 5)) + assert_(c[0, 0] is masked) + assert_(c.flags['C']) + + def test_make_mask_descr(self): + # Flexible + ntype = [('a', float), ('b', float)] + test = make_mask_descr(ntype) + assert_equal(test, [('a', bool), ('b', bool)]) + assert_(test is make_mask_descr(test)) + + # Standard w/ shape + ntype = (float, 2) + test = make_mask_descr(ntype) + assert_equal(test, (bool, 2)) + assert_(test is make_mask_descr(test)) + + # Standard standard + ntype = float + test = make_mask_descr(ntype) + assert_equal(test, np.dtype(bool)) + assert_(test is make_mask_descr(test)) + + # Nested + ntype = [('a', float), ('b', [('ba', float), ('bb', float)])] + test = make_mask_descr(ntype) + control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])]) + assert_equal(test, control) + assert_(test is make_mask_descr(test)) + + # Named+ shape + ntype = [('a', (float, 2))] + test = make_mask_descr(ntype) + assert_equal(test, np.dtype([('a', (bool, 2))])) + assert_(test is make_mask_descr(test)) + + # 2 names + ntype = [(('A', 'a'), float)] + test = make_mask_descr(ntype) + assert_equal(test, np.dtype([(('A', 'a'), bool)])) + assert_(test is make_mask_descr(test)) + + # nested boolean types should preserve identity + base_type = np.dtype([('a', int, 3)]) + base_mtype = make_mask_descr(base_type) + sub_type = np.dtype([('a', int), ('b', base_mtype)]) + test = make_mask_descr(sub_type) + assert_equal(test, np.dtype([('a', bool), ('b', [('a', bool, 3)])])) + assert_(test.fields['b'][0] is base_mtype) + + def test_make_mask(self): + # Test make_mask + # w/ a list as an input + mask = [0, 1] + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [0, 1]) + # w/ a ndarray as an input + mask = np.array([0, 1], dtype=bool) + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [0, 1]) + # w/ a flexible-type ndarray as an input - use default + mdtype = [('a', bool), ('b', bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask) + assert_equal(test.dtype, MaskType) + assert_equal(test, [1, 1]) + # w/ a flexible-type ndarray as an input - use input dtype + mdtype = [('a', bool), ('b', bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask, dtype=mask.dtype) + assert_equal(test.dtype, mdtype) + assert_equal(test, mask) + # w/ a flexible-type ndarray as an input - use input dtype + mdtype = [('a', float), ('b', float)] + bdtype = [('a', bool), ('b', bool)] + mask = np.array([(0, 0), (0, 1)], dtype=mdtype) + test = make_mask(mask, dtype=mask.dtype) + assert_equal(test.dtype, bdtype) + assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype)) + # Ensure this also works for void + mask = np.array((False, True), dtype='?,?')[()] + assert_(isinstance(mask, np.void)) + test = make_mask(mask, dtype=mask.dtype) + assert_equal(test, mask) + assert_(test is not mask) + mask = np.array((0, 1), dtype='i4,i4')[()] + test2 = make_mask(mask, dtype=mask.dtype) + assert_equal(test2, test) + # test that nomask is returned when m is nomask. + bools = [True, False] + dtypes = [MaskType, float] + msgformat = 'copy=%s, shrink=%s, dtype=%s' + for cpy, shr, dt in itertools.product(bools, bools, dtypes): + res = make_mask(nomask, copy=cpy, shrink=shr, dtype=dt) + assert_(res is nomask, msgformat % (cpy, shr, dt)) + + def test_mask_or(self): + # Initialize + mtype = [('a', bool), ('b', bool)] + mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype) + # Test using nomask as input + test = mask_or(mask, nomask) + assert_equal(test, mask) + test = mask_or(nomask, mask) + assert_equal(test, mask) + # Using False as input + test = mask_or(mask, False) + assert_equal(test, mask) + # Using another array w / the same dtype + other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype) + test = mask_or(mask, other) + control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype) + assert_equal(test, control) + # Using another array w / a different dtype + othertype = [('A', bool), ('B', bool)] + other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype) + try: + test = mask_or(mask, other) + except ValueError: + pass + # Using nested arrays + dtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] + amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype) + bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype) + cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype) + assert_equal(mask_or(amask, bmask), cntrl) + + a = np.array([False, False]) + assert mask_or(a, a) is nomask # gh-27360 + + def test_allequal(self): + x = array([1, 2, 3], mask=[0, 0, 0]) + y = array([1, 2, 3], mask=[1, 0, 0]) + z = array([[1, 2, 3], [4, 5, 6]], mask=[[0, 0, 0], [1, 1, 1]]) + + assert allequal(x, y) + assert not allequal(x, y, fill_value=False) + assert allequal(x, z) + + # test allequal for the same input, with mask=nomask, this test is for + # the scenario raised in https://github.com/numpy/numpy/issues/27201 + assert allequal(x, x) + assert allequal(x, x, fill_value=False) + + assert allequal(y, y) + assert not allequal(y, y, fill_value=False) + + def test_flatten_mask(self): + # Tests flatten mask + # Standard dtype + mask = np.array([0, 0, 1], dtype=bool) + assert_equal(flatten_mask(mask), mask) + # Flexible dtype + mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) + test = flatten_mask(mask) + control = np.array([0, 0, 0, 1], dtype=bool) + assert_equal(test, control) + + mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] + data = [(0, (0, 0)), (0, (0, 1))] + mask = np.array(data, dtype=mdtype) + test = flatten_mask(mask) + control = np.array([0, 0, 0, 0, 0, 1], dtype=bool) + assert_equal(test, control) + + def test_on_ndarray(self): + # Test functions on ndarrays + a = np.array([1, 2, 3, 4]) + m = array(a, mask=False) + test = anom(a) + assert_equal(test, m.anom()) + test = reshape(a, (2, 2)) + assert_equal(test, m.reshape(2, 2)) + + def test_compress(self): + # Test compress function on ndarray and masked array + # Address Github #2495. + arr = np.arange(8) + arr.shape = 4, 2 + cond = np.array([True, False, True, True]) + control = arr[[0, 2, 3]] + test = np.ma.compress(cond, arr, axis=0) + assert_equal(test, control) + marr = np.ma.array(arr) + test = np.ma.compress(cond, marr, axis=0) + assert_equal(test, control) + + def test_compressed(self): + # Test ma.compressed function. + # Address gh-4026 + a = np.ma.array([1, 2]) + test = np.ma.compressed(a) + assert_(type(test) is np.ndarray) + + # Test case when input data is ndarray subclass + class A(np.ndarray): + pass + + a = np.ma.array(A(shape=0)) + test = np.ma.compressed(a) + assert_(type(test) is A) + + # Test that compress flattens + test = np.ma.compressed([[1], [2]]) + assert_equal(test.ndim, 1) + test = np.ma.compressed([[[[[1]]]]]) + assert_equal(test.ndim, 1) + + # Test case when input is MaskedArray subclass + class M(MaskedArray): + pass + + test = np.ma.compressed(M([[[]], [[]]])) + assert_equal(test.ndim, 1) + + # with .compressed() overridden + class M(MaskedArray): + def compressed(self): + return 42 + + test = np.ma.compressed(M([[[]], [[]]])) + assert_equal(test, 42) + + def test_convolve(self): + a = masked_equal(np.arange(5), 2) + b = np.array([1, 1]) + + result = masked_equal([0, 1, -1, -1, 7, 4], -1) + test = np.ma.convolve(a, b, mode='full') + assert_equal(test, result) + + test = np.ma.convolve(a, b, mode='same') + assert_equal(test, result[:-1]) + + test = np.ma.convolve(a, b, mode='valid') + assert_equal(test, result[1:-1]) + + result = masked_equal([0, 1, 1, 3, 7, 4], -1) + test = np.ma.convolve(a, b, mode='full', propagate_mask=False) + assert_equal(test, result) + + test = np.ma.convolve(a, b, mode='same', propagate_mask=False) + assert_equal(test, result[:-1]) + + test = np.ma.convolve(a, b, mode='valid', propagate_mask=False) + assert_equal(test, result[1:-1]) + + test = np.ma.convolve([1, 1], [1, 1, 1]) + assert_equal(test, masked_equal([1, 2, 2, 1], -1)) + + a = [1, 1] + b = masked_equal([1, -1, -1, 1], -1) + test = np.ma.convolve(a, b, propagate_mask=False) + assert_equal(test, masked_equal([1, 1, -1, 1, 1], -1)) + test = np.ma.convolve(a, b, propagate_mask=True) + assert_equal(test, masked_equal([-1, -1, -1, -1, -1], -1)) + + +class TestMaskedFields: + + def setup_method(self): + ilist = [1, 2, 3, 4, 5] + flist = [1.1, 2.2, 3.3, 4.4, 5.5] + slist = ['one', 'two', 'three', 'four', 'five'] + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mdtype = [('a', bool), ('b', bool), ('c', bool)] + mask = [0, 1, 0, 0, 1] + base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) + self.data = {"base": base, "mask": mask, "ddtype": ddtype, "mdtype": mdtype} + + def test_set_records_masks(self): + base = self.data['base'] + mdtype = self.data['mdtype'] + # Set w/ nomask or masked + base.mask = nomask + assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) + base.mask = masked + assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype)) + # Set w/ simple boolean + base.mask = False + assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype)) + base.mask = True + assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype)) + # Set w/ list + base.mask = [0, 0, 0, 1, 1] + assert_equal_records(base._mask, + np.array([(x, x, x) for x in [0, 0, 0, 1, 1]], + dtype=mdtype)) + + def test_set_record_element(self): + # Check setting an element of a record) + base = self.data['base'] + (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) + base[0] = (pi, pi, 'pi') + + assert_equal(base_a.dtype, int) + assert_equal(base_a._data, [3, 2, 3, 4, 5]) + + assert_equal(base_b.dtype, float) + assert_equal(base_b._data, [pi, 2.2, 3.3, 4.4, 5.5]) + + assert_equal(base_c.dtype, '|S8') + assert_equal(base_c._data, + [b'pi', b'two', b'three', b'four', b'five']) + + def test_set_record_slice(self): + base = self.data['base'] + (base_a, base_b, base_c) = (base['a'], base['b'], base['c']) + base[:3] = (pi, pi, 'pi') + + assert_equal(base_a.dtype, int) + assert_equal(base_a._data, [3, 3, 3, 4, 5]) + + assert_equal(base_b.dtype, float) + assert_equal(base_b._data, [pi, pi, pi, 4.4, 5.5]) + + assert_equal(base_c.dtype, '|S8') + assert_equal(base_c._data, + [b'pi', b'pi', b'pi', b'four', b'five']) + + def test_mask_element(self): + "Check record access" + base = self.data['base'] + base[0] = masked + + for n in ('a', 'b', 'c'): + assert_equal(base[n].mask, [1, 1, 0, 0, 1]) + assert_equal(base[n]._data, base._data[n]) + + def test_getmaskarray(self): + # Test getmaskarray on flexible dtype + ndtype = [('a', int), ('b', float)] + test = empty(3, dtype=ndtype) + assert_equal(getmaskarray(test), + np.array([(0, 0), (0, 0), (0, 0)], + dtype=[('a', '|b1'), ('b', '|b1')])) + test[:] = masked + assert_equal(getmaskarray(test), + np.array([(1, 1), (1, 1), (1, 1)], + dtype=[('a', '|b1'), ('b', '|b1')])) + + def test_view(self): + # Test view w/ flexible dtype + iterator = list(zip(np.arange(10), np.random.rand(10))) + data = np.array(iterator) + a = array(iterator, dtype=[('a', float), ('b', float)]) + a.mask[0] = (1, 0) + controlmask = np.array([1] + 19 * [0], dtype=bool) + # Transform globally to simple dtype + test = a.view(float) + assert_equal(test, data.ravel()) + assert_equal(test.mask, controlmask) + # Transform globally to dty + test = a.view((float, 2)) + assert_equal(test, data) + assert_equal(test.mask, controlmask.reshape(-1, 2)) + + def test_getitem(self): + ndtype = [('a', float), ('b', float)] + a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype) + a.mask = np.array(list(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1], + [1, 0, 0, 0, 0, 0, 0, 0, 1, 0])), + dtype=[('a', bool), ('b', bool)]) + + def _test_index(i): + assert_equal(type(a[i]), mvoid) + assert_equal_records(a[i]._data, a._data[i]) + assert_equal_records(a[i]._mask, a._mask[i]) + + assert_equal(type(a[i, ...]), MaskedArray) + assert_equal_records(a[i, ...]._data, a._data[i, ...]) + assert_equal_records(a[i, ...]._mask, a._mask[i, ...]) + + _test_index(1) # No mask + _test_index(0) # One element masked + _test_index(-2) # All element masked + + def test_setitem(self): + # Issue 4866: check that one can set individual items in [record][col] + # and [col][record] order + ndtype = np.dtype([('a', float), ('b', int)]) + ma = np.ma.MaskedArray([(1.0, 1), (2.0, 2)], dtype=ndtype) + ma['a'][1] = 3.0 + assert_equal(ma['a'], np.array([1.0, 3.0])) + ma[1]['a'] = 4.0 + assert_equal(ma['a'], np.array([1.0, 4.0])) + # Issue 2403 + mdtype = np.dtype([('a', bool), ('b', bool)]) + # soft mask + control = np.array([(False, True), (True, True)], dtype=mdtype) + a = np.ma.masked_all((2,), dtype=ndtype) + a['a'][0] = 2 + assert_equal(a.mask, control) + a = np.ma.masked_all((2,), dtype=ndtype) + a[0]['a'] = 2 + assert_equal(a.mask, control) + # hard mask + control = np.array([(True, True), (True, True)], dtype=mdtype) + a = np.ma.masked_all((2,), dtype=ndtype) + a.harden_mask() + a['a'][0] = 2 + assert_equal(a.mask, control) + a = np.ma.masked_all((2,), dtype=ndtype) + a.harden_mask() + a[0]['a'] = 2 + assert_equal(a.mask, control) + + def test_setitem_scalar(self): + # 8510 + mask_0d = np.ma.masked_array(1, mask=True) + arr = np.ma.arange(3) + arr[0] = mask_0d + assert_array_equal(arr.mask, [True, False, False]) + + def test_element_len(self): + # check that len() works for mvoid (Github issue #576) + for rec in self.data['base']: + assert_equal(len(rec), len(self.data['ddtype'])) + + +class TestMaskedObjectArray: + + def test_getitem(self): + arr = np.ma.array([None, None]) + for dt in [float, object]: + a0 = np.eye(2).astype(dt) + a1 = np.eye(3).astype(dt) + arr[0] = a0 + arr[1] = a1 + + assert_(arr[0] is a0) + assert_(arr[1] is a1) + assert_(isinstance(arr[0, ...], MaskedArray)) + assert_(isinstance(arr[1, ...], MaskedArray)) + assert_(arr[0, ...][()] is a0) + assert_(arr[1, ...][()] is a1) + + arr[0] = np.ma.masked + + assert_(arr[1] is a1) + assert_(isinstance(arr[0, ...], MaskedArray)) + assert_(isinstance(arr[1, ...], MaskedArray)) + assert_equal(arr[0, ...].mask, True) + assert_(arr[1, ...][()] is a1) + + # gh-5962 - object arrays of arrays do something special + assert_equal(arr[0].data, a0) + assert_equal(arr[0].mask, True) + assert_equal(arr[0, ...][()].data, a0) + assert_equal(arr[0, ...][()].mask, True) + + def test_nested_ma(self): + + arr = np.ma.array([None, None]) + # set the first object to be an unmasked masked constant. A little fiddly + arr[0, ...] = np.array([np.ma.masked], object)[0, ...] + + # check the above line did what we were aiming for + assert_(arr.data[0] is np.ma.masked) + + # test that getitem returned the value by identity + assert_(arr[0] is np.ma.masked) + + # now mask the masked value! + arr[0] = np.ma.masked + assert_(arr[0] is np.ma.masked) + + +class TestMaskedView: + + def setup_method(self): + iterator = list(zip(np.arange(10), np.random.rand(10))) + data = np.array(iterator) + a = array(iterator, dtype=[('a', float), ('b', float)]) + a.mask[0] = (1, 0) + controlmask = np.array([1] + 19 * [0], dtype=bool) + self.data = (data, a, controlmask) + + def test_view_to_nothing(self): + (data, a, controlmask) = self.data + test = a.view() + assert_(isinstance(test, MaskedArray)) + assert_equal(test._data, a._data) + assert_equal(test._mask, a._mask) + + def test_view_to_type(self): + (data, a, controlmask) = self.data + test = a.view(np.ndarray) + assert_(not isinstance(test, MaskedArray)) + assert_equal(test, a._data) + assert_equal_records(test, data.view(a.dtype).squeeze()) + + def test_view_to_simple_dtype(self): + (data, a, controlmask) = self.data + # View globally + test = a.view(float) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, data.ravel()) + assert_equal(test.mask, controlmask) + + def test_view_to_flexible_dtype(self): + (data, a, controlmask) = self.data + + test = a.view([('A', float), ('B', float)]) + assert_equal(test.mask.dtype.names, ('A', 'B')) + assert_equal(test['A'], a['a']) + assert_equal(test['B'], a['b']) + + test = a[0].view([('A', float), ('B', float)]) + assert_(isinstance(test, MaskedArray)) + assert_equal(test.mask.dtype.names, ('A', 'B')) + assert_equal(test['A'], a['a'][0]) + assert_equal(test['B'], a['b'][0]) + + test = a[-1].view([('A', float), ('B', float)]) + assert_(isinstance(test, MaskedArray)) + assert_equal(test.dtype.names, ('A', 'B')) + assert_equal(test['A'], a['a'][-1]) + assert_equal(test['B'], a['b'][-1]) + + def test_view_to_subdtype(self): + (data, a, controlmask) = self.data + # View globally + test = a.view((float, 2)) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, data) + assert_equal(test.mask, controlmask.reshape(-1, 2)) + # View on 1 masked element + test = a[0].view((float, 2)) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, data[0]) + assert_equal(test.mask, (1, 0)) + # View on 1 unmasked element + test = a[-1].view((float, 2)) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, data[-1]) + + def test_view_to_dtype_and_type(self): + (data, a, controlmask) = self.data + + test = a.view((float, 2), np.recarray) + assert_equal(test, data) + assert_(isinstance(test, np.recarray)) + assert_(not isinstance(test, MaskedArray)) + + +class TestOptionalArgs: + def test_ndarrayfuncs(self): + # test axis arg behaves the same as ndarray (including multiple axes) + + d = np.arange(24.0).reshape((2, 3, 4)) + m = np.zeros(24, dtype=bool).reshape((2, 3, 4)) + # mask out last element of last dimension + m[:, :, -1] = True + a = np.ma.array(d, mask=m) + + def testaxis(f, a, d): + numpy_f = numpy.__getattribute__(f) + ma_f = np.ma.__getattribute__(f) + + # test axis arg + assert_equal(ma_f(a, axis=1)[..., :-1], numpy_f(d[..., :-1], axis=1)) + assert_equal(ma_f(a, axis=(0, 1))[..., :-1], + numpy_f(d[..., :-1], axis=(0, 1))) + + def testkeepdims(f, a, d): + numpy_f = numpy.__getattribute__(f) + ma_f = np.ma.__getattribute__(f) + + # test keepdims arg + assert_equal(ma_f(a, keepdims=True).shape, + numpy_f(d, keepdims=True).shape) + assert_equal(ma_f(a, keepdims=False).shape, + numpy_f(d, keepdims=False).shape) + + # test both at once + assert_equal(ma_f(a, axis=1, keepdims=True)[..., :-1], + numpy_f(d[..., :-1], axis=1, keepdims=True)) + assert_equal(ma_f(a, axis=(0, 1), keepdims=True)[..., :-1], + numpy_f(d[..., :-1], axis=(0, 1), keepdims=True)) + + for f in ['sum', 'prod', 'mean', 'var', 'std']: + testaxis(f, a, d) + testkeepdims(f, a, d) + + for f in ['min', 'max']: + testaxis(f, a, d) + + d = (np.arange(24).reshape((2, 3, 4)) % 2 == 0) + a = np.ma.array(d, mask=m) + for f in ['all', 'any']: + testaxis(f, a, d) + testkeepdims(f, a, d) + + def test_count(self): + # test np.ma.count specially + + d = np.arange(24.0).reshape((2, 3, 4)) + m = np.zeros(24, dtype=bool).reshape((2, 3, 4)) + m[:, 0, :] = True + a = np.ma.array(d, mask=m) + + assert_equal(count(a), 16) + assert_equal(count(a, axis=1), 2 * ones((2, 4))) + assert_equal(count(a, axis=(0, 1)), 4 * ones((4,))) + assert_equal(count(a, keepdims=True), 16 * ones((1, 1, 1))) + assert_equal(count(a, axis=1, keepdims=True), 2 * ones((2, 1, 4))) + assert_equal(count(a, axis=(0, 1), keepdims=True), 4 * ones((1, 1, 4))) + assert_equal(count(a, axis=-2), 2 * ones((2, 4))) + assert_raises(ValueError, count, a, axis=(1, 1)) + assert_raises(AxisError, count, a, axis=3) + + # check the 'nomask' path + a = np.ma.array(d, mask=nomask) + + assert_equal(count(a), 24) + assert_equal(count(a, axis=1), 3 * ones((2, 4))) + assert_equal(count(a, axis=(0, 1)), 6 * ones((4,))) + assert_equal(count(a, keepdims=True), 24 * ones((1, 1, 1))) + assert_equal(np.ndim(count(a, keepdims=True)), 3) + assert_equal(count(a, axis=1, keepdims=True), 3 * ones((2, 1, 4))) + assert_equal(count(a, axis=(0, 1), keepdims=True), 6 * ones((1, 1, 4))) + assert_equal(count(a, axis=-2), 3 * ones((2, 4))) + assert_raises(ValueError, count, a, axis=(1, 1)) + assert_raises(AxisError, count, a, axis=3) + + # check the 'masked' singleton + assert_equal(count(np.ma.masked), 0) + + # check 0-d arrays do not allow axis > 0 + assert_raises(AxisError, count, np.ma.array(1), axis=1) + + +class TestMaskedConstant: + def _do_add_test(self, add): + # sanity check + assert_(add(np.ma.masked, 1) is np.ma.masked) + + # now try with a vector + vector = np.array([1, 2, 3]) + result = add(np.ma.masked, vector) + + # lots of things could go wrong here + assert_(result is not np.ma.masked) + assert_(not isinstance(result, np.ma.core.MaskedConstant)) + assert_equal(result.shape, vector.shape) + assert_equal(np.ma.getmask(result), np.ones(vector.shape, dtype=bool)) + + def test_ufunc(self): + self._do_add_test(np.add) + + def test_operator(self): + self._do_add_test(lambda a, b: a + b) + + def test_ctor(self): + m = np.ma.array(np.ma.masked) + + # most importantly, we do not want to create a new MaskedConstant + # instance + assert_(not isinstance(m, np.ma.core.MaskedConstant)) + assert_(m is not np.ma.masked) + + def test_repr(self): + # copies should not exist, but if they do, it should be obvious that + # something is wrong + assert_equal(repr(np.ma.masked), 'masked') + + # create a new instance in a weird way + masked2 = np.ma.MaskedArray.__new__(np.ma.core.MaskedConstant) + assert_not_equal(repr(masked2), 'masked') + + def test_pickle(self): + from io import BytesIO + + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + with BytesIO() as f: + pickle.dump(np.ma.masked, f, protocol=proto) + f.seek(0) + res = pickle.load(f) + assert_(res is np.ma.masked) + + def test_copy(self): + # gh-9328 + # copy is a no-op, like it is with np.True_ + assert_equal( + np.ma.masked.copy() is np.ma.masked, + np.True_.copy() is np.True_) + + def test__copy(self): + import copy + assert_( + copy.copy(np.ma.masked) is np.ma.masked) + + def test_deepcopy(self): + import copy + assert_( + copy.deepcopy(np.ma.masked) is np.ma.masked) + + def test_immutable(self): + orig = np.ma.masked + assert_raises(np.ma.core.MaskError, operator.setitem, orig, (), 1) + assert_raises(ValueError, operator.setitem, orig.data, (), 1) + assert_raises(ValueError, operator.setitem, orig.mask, (), False) + + view = np.ma.masked.view(np.ma.MaskedArray) + assert_raises(ValueError, operator.setitem, view, (), 1) + assert_raises(ValueError, operator.setitem, view.data, (), 1) + assert_raises(ValueError, operator.setitem, view.mask, (), False) + + def test_coercion_int(self): + a_i = np.zeros((), int) + assert_raises(MaskError, operator.setitem, a_i, (), np.ma.masked) + assert_raises(MaskError, int, np.ma.masked) + + def test_coercion_float(self): + a_f = np.zeros((), float) + assert_warns(UserWarning, operator.setitem, a_f, (), np.ma.masked) + assert_(np.isnan(a_f[()])) + + @pytest.mark.xfail(reason="See gh-9750") + def test_coercion_unicode(self): + a_u = np.zeros((), 'U10') + a_u[()] = np.ma.masked + assert_equal(a_u[()], '--') + + @pytest.mark.xfail(reason="See gh-9750") + def test_coercion_bytes(self): + a_b = np.zeros((), 'S10') + a_b[()] = np.ma.masked + assert_equal(a_b[()], b'--') + + def test_subclass(self): + # https://github.com/astropy/astropy/issues/6645 + class Sub(type(np.ma.masked)): + pass + + a = Sub() + assert_(a is Sub()) + assert_(a is not np.ma.masked) + assert_not_equal(repr(a), 'masked') + + def test_attributes_readonly(self): + assert_raises(AttributeError, setattr, np.ma.masked, 'shape', (1,)) + assert_raises(AttributeError, setattr, np.ma.masked, 'dtype', np.int64) + + +class TestMaskedWhereAliases: + + # TODO: Test masked_object, masked_equal, ... + + def test_masked_values(self): + res = masked_values(np.array([-32768.0]), np.int16(-32768)) + assert_equal(res.mask, [True]) + + res = masked_values(np.inf, np.inf) + assert_equal(res.mask, True) + + res = np.ma.masked_values(np.inf, -np.inf) + assert_equal(res.mask, False) + + res = np.ma.masked_values([1, 2, 3, 4], 5, shrink=True) + assert_(res.mask is np.ma.nomask) + + res = np.ma.masked_values([1, 2, 3, 4], 5, shrink=False) + assert_equal(res.mask, [False] * 4) + + +def test_masked_array(): + a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0]) + assert_equal(np.argwhere(a), [[1], [3]]) + +def test_masked_array_no_copy(): + # check nomask array is updated in place + a = np.ma.array([1, 2, 3, 4]) + _ = np.ma.masked_where(a == 3, a, copy=False) + assert_array_equal(a.mask, [False, False, True, False]) + # check masked array is updated in place + a = np.ma.array([1, 2, 3, 4], mask=[1, 0, 0, 0]) + _ = np.ma.masked_where(a == 3, a, copy=False) + assert_array_equal(a.mask, [True, False, True, False]) + # check masked array with masked_invalid is updated in place + a = np.ma.array([np.inf, 1, 2, 3, 4]) + _ = np.ma.masked_invalid(a, copy=False) + assert_array_equal(a.mask, [True, False, False, False, False]) + +def test_append_masked_array(): + a = np.ma.masked_equal([1, 2, 3], value=2) + b = np.ma.masked_equal([4, 3, 2], value=2) + + result = np.ma.append(a, b) + expected_data = [1, 2, 3, 4, 3, 2] + expected_mask = [False, True, False, False, False, True] + assert_array_equal(result.data, expected_data) + assert_array_equal(result.mask, expected_mask) + + a = np.ma.masked_all((2, 2)) + b = np.ma.ones((3, 1)) + + result = np.ma.append(a, b) + expected_data = [1] * 3 + expected_mask = [True] * 4 + [False] * 3 + assert_array_equal(result.data[-3], expected_data) + assert_array_equal(result.mask, expected_mask) + + result = np.ma.append(a, b, axis=None) + assert_array_equal(result.data[-3], expected_data) + assert_array_equal(result.mask, expected_mask) + + +def test_append_masked_array_along_axis(): + a = np.ma.masked_equal([1, 2, 3], value=2) + b = np.ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) + + # When `axis` is specified, `values` must have the correct shape. + assert_raises(ValueError, np.ma.append, a, b, axis=0) + + result = np.ma.append(a[np.newaxis, :], b, axis=0) + expected = np.ma.arange(1, 10) + expected[[1, 6]] = np.ma.masked + expected = expected.reshape((3, 3)) + assert_array_equal(result.data, expected.data) + assert_array_equal(result.mask, expected.mask) + +def test_default_fill_value_complex(): + # regression test for Python 3, where 'unicode' was not defined + assert_(default_fill_value(1 + 1j) == 1.e20 + 0.0j) + + +def test_ufunc_with_output(): + # check that giving an output argument always returns that output. + # Regression test for gh-8416. + x = array([1., 2., 3.], mask=[0, 0, 1]) + y = np.add(x, 1., out=x) + assert_(y is x) + + +def test_ufunc_with_out_varied(): + """ Test that masked arrays are immune to gh-10459 """ + # the mask of the output should not affect the result, however it is passed + a = array([ 1, 2, 3], mask=[1, 0, 0]) + b = array([10, 20, 30], mask=[1, 0, 0]) + out = array([ 0, 0, 0], mask=[0, 0, 1]) + expected = array([11, 22, 33], mask=[1, 0, 0]) + + out_pos = out.copy() + res_pos = np.add(a, b, out_pos) + + out_kw = out.copy() + res_kw = np.add(a, b, out=out_kw) + + out_tup = out.copy() + res_tup = np.add(a, b, out=(out_tup,)) + + assert_equal(res_kw.mask, expected.mask) + assert_equal(res_kw.data, expected.data) + assert_equal(res_tup.mask, expected.mask) + assert_equal(res_tup.data, expected.data) + assert_equal(res_pos.mask, expected.mask) + assert_equal(res_pos.data, expected.data) + + +def test_astype_mask_ordering(): + descr = np.dtype([('v', int, 3), ('x', [('y', float)])]) + x = array([ + [([1, 2, 3], (1.0,)), ([1, 2, 3], (2.0,))], + [([1, 2, 3], (3.0,)), ([1, 2, 3], (4.0,))]], dtype=descr) + x[0]['v'][0] = np.ma.masked + + x_a = x.astype(descr) + assert x_a.dtype.names == np.dtype(descr).names + assert x_a.mask.dtype.names == np.dtype(descr).names + assert_equal(x, x_a) + + assert_(x is x.astype(x.dtype, copy=False)) + assert_equal(type(x.astype(x.dtype, subok=False)), np.ndarray) + + x_f = x.astype(x.dtype, order='F') + assert_(x_f.flags.f_contiguous) + assert_(x_f.mask.flags.f_contiguous) + + # Also test the same indirectly, via np.array + x_a2 = np.array(x, dtype=descr, subok=True) + assert x_a2.dtype.names == np.dtype(descr).names + assert x_a2.mask.dtype.names == np.dtype(descr).names + assert_equal(x, x_a2) + + assert_(x is np.array(x, dtype=descr, copy=None, subok=True)) + + x_f2 = np.array(x, dtype=x.dtype, order='F', subok=True) + assert_(x_f2.flags.f_contiguous) + assert_(x_f2.mask.flags.f_contiguous) + + +@pytest.mark.parametrize('dt1', num_dts, ids=num_ids) +@pytest.mark.parametrize('dt2', num_dts, ids=num_ids) +@pytest.mark.filterwarnings('ignore::numpy.exceptions.ComplexWarning') +def test_astype_basic(dt1, dt2): + # See gh-12070 + src = np.ma.array(ones(3, dt1), fill_value=1) + dst = src.astype(dt2) + + assert_(src.fill_value == 1) + assert_(src.dtype == dt1) + assert_(src.fill_value.dtype == dt1) + + assert_(dst.fill_value == 1) + assert_(dst.dtype == dt2) + assert_(dst.fill_value.dtype == dt2) + + assert_equal(src, dst) + + +def test_fieldless_void(): + dt = np.dtype([]) # a void dtype with no fields + x = np.empty(4, dt) + + # these arrays contain no values, so there's little to test - but this + # shouldn't crash + mx = np.ma.array(x) + assert_equal(mx.dtype, x.dtype) + assert_equal(mx.shape, x.shape) + + mx = np.ma.array(x, mask=x) + assert_equal(mx.dtype, x.dtype) + assert_equal(mx.shape, x.shape) + + +def test_mask_shape_assignment_does_not_break_masked(): + a = np.ma.masked + b = np.ma.array(1, mask=a.mask) + b.shape = (1,) + assert_equal(a.mask.shape, ()) + +@pytest.mark.skipif(sys.flags.optimize > 1, + reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1") # noqa: E501 +def test_doc_note(): + def method(self): + """This docstring + + Has multiple lines + + And notes + + Notes + ----- + original note + """ + pass + + expected_doc = """This docstring + +Has multiple lines + +And notes + +Notes +----- +note + +original note""" + + assert_equal(np.ma.core.doc_note(method.__doc__, "note"), expected_doc) + + +def test_gh_22556(): + source = np.ma.array([0, [0, 1, 2]], dtype=object) + deepcopy = copy.deepcopy(source) + deepcopy[1].append('this should not appear in source') + assert len(source[1]) == 3 + + +def test_gh_21022(): + # testing for absence of reported error + source = np.ma.masked_array(data=[-1, -1], mask=True, dtype=np.float64) + axis = np.array(0) + result = np.prod(source, axis=axis, keepdims=False) + result = np.ma.masked_array(result, + mask=np.ones(result.shape, dtype=np.bool)) + array = np.ma.masked_array(data=-1, mask=True, dtype=np.float64) + copy.deepcopy(array) + copy.deepcopy(result) + + +def test_deepcopy_2d_obj(): + source = np.ma.array([[0, "dog"], + [1, 1], + [[1, 2], "cat"]], + mask=[[0, 1], + [0, 0], + [0, 0]], + dtype=object) + deepcopy = copy.deepcopy(source) + deepcopy[2, 0].extend(['this should not appear in source', 3]) + assert len(source[2, 0]) == 2 + assert len(deepcopy[2, 0]) == 4 + assert_equal(deepcopy._mask, source._mask) + deepcopy._mask[0, 0] = 1 + assert source._mask[0, 0] == 0 + + +def test_deepcopy_0d_obj(): + source = np.ma.array(0, mask=[0], dtype=object) + deepcopy = copy.deepcopy(source) + deepcopy[...] = 17 + assert_equal(source, 0) + assert_equal(deepcopy, 17) + + +def test_uint_fill_value_and_filled(): + # See also gh-27269 + a = np.ma.MaskedArray([1, 1], [True, False], dtype="uint16") + # the fill value should likely not be 99999, but for now guarantee it: + assert a.fill_value == 999999 + # However, it's type is uint: + assert a.fill_value.dtype.kind == "u" + # And this ensures things like filled work: + np.testing.assert_array_equal( + a.filled(), np.array([999999, 1]).astype("uint16"), strict=True) diff --git a/python/numpy/ma/tests/test_deprecations.py b/python/numpy/ma/tests/test_deprecations.py new file mode 100644 index 000000000..8cc8b9c72 --- /dev/null +++ b/python/numpy/ma/tests/test_deprecations.py @@ -0,0 +1,87 @@ +"""Test deprecation and future warnings. + +""" +import io +import textwrap + +import pytest + +import numpy as np +from numpy.ma.core import MaskedArrayFutureWarning +from numpy.ma.testutils import assert_equal +from numpy.testing import assert_warns + + +class TestArgsort: + """ gh-8701 """ + def _test_base(self, argsort, cls): + arr_0d = np.array(1).view(cls) + argsort(arr_0d) + + arr_1d = np.array([1, 2, 3]).view(cls) + argsort(arr_1d) + + # argsort has a bad default for >1d arrays + arr_2d = np.array([[1, 2], [3, 4]]).view(cls) + result = assert_warns( + np.ma.core.MaskedArrayFutureWarning, argsort, arr_2d) + assert_equal(result, argsort(arr_2d, axis=None)) + + # should be no warnings for explicitly specifying it + argsort(arr_2d, axis=None) + argsort(arr_2d, axis=-1) + + def test_function_ndarray(self): + return self._test_base(np.ma.argsort, np.ndarray) + + def test_function_maskedarray(self): + return self._test_base(np.ma.argsort, np.ma.MaskedArray) + + def test_method(self): + return self._test_base(np.ma.MaskedArray.argsort, np.ma.MaskedArray) + + +class TestMinimumMaximum: + + def test_axis_default(self): + # NumPy 1.13, 2017-05-06 + + data1d = np.ma.arange(6) + data2d = data1d.reshape(2, 3) + + ma_min = np.ma.minimum.reduce + ma_max = np.ma.maximum.reduce + + # check that the default axis is still None, but warns on 2d arrays + result = assert_warns(MaskedArrayFutureWarning, ma_max, data2d) + assert_equal(result, ma_max(data2d, axis=None)) + + result = assert_warns(MaskedArrayFutureWarning, ma_min, data2d) + assert_equal(result, ma_min(data2d, axis=None)) + + # no warnings on 1d, as both new and old defaults are equivalent + result = ma_min(data1d) + assert_equal(result, ma_min(data1d, axis=None)) + assert_equal(result, ma_min(data1d, axis=0)) + + result = ma_max(data1d) + assert_equal(result, ma_max(data1d, axis=None)) + assert_equal(result, ma_max(data1d, axis=0)) + + +class TestFromtextfile: + def test_fromtextfile_delimitor(self): + # NumPy 1.22.0, 2021-09-23 + + textfile = io.StringIO(textwrap.dedent( + """ + A,B,C,D + 'string 1';1;1.0;'mixed column' + 'string 2';2;2.0; + 'string 3';3;3.0;123 + 'string 4';4;4.0;3.14 + """ + )) + + with pytest.warns(DeprecationWarning): + result = np.ma.mrecords.fromtextfile(textfile, delimitor=';') diff --git a/python/numpy/ma/tests/test_extras.py b/python/numpy/ma/tests/test_extras.py new file mode 100644 index 000000000..3d10e839c --- /dev/null +++ b/python/numpy/ma/tests/test_extras.py @@ -0,0 +1,1998 @@ +"""Tests suite for MaskedArray. +Adapted from the original test_ma by Pierre Gerard-Marchant + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu + +""" +import itertools +import warnings + +import pytest + +import numpy as np +from numpy._core.numeric import normalize_axis_tuple +from numpy.ma.core import ( + MaskedArray, + arange, + array, + count, + getmaskarray, + masked, + masked_array, + nomask, + ones, + shape, + zeros, +) +from numpy.ma.extras import ( + _covhelper, + apply_along_axis, + apply_over_axes, + atleast_1d, + atleast_2d, + atleast_3d, + average, + clump_masked, + clump_unmasked, + compress_nd, + compress_rowcols, + corrcoef, + cov, + diagflat, + dot, + ediff1d, + flatnotmasked_contiguous, + in1d, + intersect1d, + isin, + mask_rowcols, + masked_all, + masked_all_like, + median, + mr_, + ndenumerate, + notmasked_contiguous, + notmasked_edges, + polyfit, + setdiff1d, + setxor1d, + stack, + union1d, + unique, + vstack, +) +from numpy.ma.testutils import ( + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, +) +from numpy.testing import assert_warns, suppress_warnings + + +class TestGeneric: + # + def test_masked_all(self): + # Tests masked_all + # Standard dtype + test = masked_all((2,), dtype=float) + control = array([1, 1], mask=[1, 1], dtype=float) + assert_equal(test, control) + # Flexible dtype + dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']}) + test = masked_all((2,), dtype=dt) + control = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt) + assert_equal(test, control) + test = masked_all((2, 2), dtype=dt) + control = array([[(0, 0), (0, 0)], [(0, 0), (0, 0)]], + mask=[[(1, 1), (1, 1)], [(1, 1), (1, 1)]], + dtype=dt) + assert_equal(test, control) + # Nested dtype + dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])]) + test = masked_all((2,), dtype=dt) + control = array([(1, (1, 1)), (1, (1, 1))], + mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) + assert_equal(test, control) + test = masked_all((2,), dtype=dt) + control = array([(1, (1, 1)), (1, (1, 1))], + mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) + assert_equal(test, control) + test = masked_all((1, 1), dtype=dt) + control = array([[(1, (1, 1))]], mask=[[(1, (1, 1))]], dtype=dt) + assert_equal(test, control) + + def test_masked_all_with_object_nested(self): + # Test masked_all works with nested array with dtype of an 'object' + # refers to issue #15895 + my_dtype = np.dtype([('b', ([('c', object)], (1,)))]) + masked_arr = np.ma.masked_all((1,), my_dtype) + + assert_equal(type(masked_arr['b']), np.ma.core.MaskedArray) + assert_equal(type(masked_arr['b']['c']), np.ma.core.MaskedArray) + assert_equal(len(masked_arr['b']['c']), 1) + assert_equal(masked_arr['b']['c'].shape, (1, 1)) + assert_equal(masked_arr['b']['c']._fill_value.shape, ()) + + def test_masked_all_with_object(self): + # same as above except that the array is not nested + my_dtype = np.dtype([('b', (object, (1,)))]) + masked_arr = np.ma.masked_all((1,), my_dtype) + + assert_equal(type(masked_arr['b']), np.ma.core.MaskedArray) + assert_equal(len(masked_arr['b']), 1) + assert_equal(masked_arr['b'].shape, (1, 1)) + assert_equal(masked_arr['b']._fill_value.shape, ()) + + def test_masked_all_like(self): + # Tests masked_all + # Standard dtype + base = array([1, 2], dtype=float) + test = masked_all_like(base) + control = array([1, 1], mask=[1, 1], dtype=float) + assert_equal(test, control) + # Flexible dtype + dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']}) + base = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt) + test = masked_all_like(base) + control = array([(10, 10), (10, 10)], mask=[(1, 1), (1, 1)], dtype=dt) + assert_equal(test, control) + # Nested dtype + dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])]) + control = array([(1, (1, 1)), (1, (1, 1))], + mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt) + test = masked_all_like(control) + assert_equal(test, control) + + def check_clump(self, f): + for i in range(1, 7): + for j in range(2**i): + k = np.arange(i, dtype=int) + ja = np.full(i, j, dtype=int) + a = masked_array(2**k) + a.mask = (ja & (2**k)) != 0 + s = 0 + for sl in f(a): + s += a.data[sl].sum() + if f == clump_unmasked: + assert_equal(a.compressed().sum(), s) + else: + a.mask = ~a.mask + assert_equal(a.compressed().sum(), s) + + def test_clump_masked(self): + # Test clump_masked + a = masked_array(np.arange(10)) + a[[0, 1, 2, 6, 8, 9]] = masked + # + test = clump_masked(a) + control = [slice(0, 3), slice(6, 7), slice(8, 10)] + assert_equal(test, control) + + self.check_clump(clump_masked) + + def test_clump_unmasked(self): + # Test clump_unmasked + a = masked_array(np.arange(10)) + a[[0, 1, 2, 6, 8, 9]] = masked + test = clump_unmasked(a) + control = [slice(3, 6), slice(7, 8), ] + assert_equal(test, control) + + self.check_clump(clump_unmasked) + + def test_flatnotmasked_contiguous(self): + # Test flatnotmasked_contiguous + a = arange(10) + # No mask + test = flatnotmasked_contiguous(a) + assert_equal(test, [slice(0, a.size)]) + # mask of all false + a.mask = np.zeros(10, dtype=bool) + assert_equal(test, [slice(0, a.size)]) + # Some mask + a[(a < 3) | (a > 8) | (a == 5)] = masked + test = flatnotmasked_contiguous(a) + assert_equal(test, [slice(3, 5), slice(6, 9)]) + # + a[:] = masked + test = flatnotmasked_contiguous(a) + assert_equal(test, []) + + +class TestAverage: + # Several tests of average. Why so many ? Good point... + def test_testAverage1(self): + # Test of average. + ott = array([0., 1., 2., 3.], mask=[True, False, False, False]) + assert_equal(2.0, average(ott, axis=0)) + assert_equal(2.0, average(ott, weights=[1., 1., 2., 1.])) + result, wts = average(ott, weights=[1., 1., 2., 1.], returned=True) + assert_equal(2.0, result) + assert_(wts == 4.0) + ott[:] = masked + assert_equal(average(ott, axis=0).mask, [True]) + ott = array([0., 1., 2., 3.], mask=[True, False, False, False]) + ott = ott.reshape(2, 2) + ott[:, 1] = masked + assert_equal(average(ott, axis=0), [2.0, 0.0]) + assert_equal(average(ott, axis=1).mask[0], [True]) + assert_equal([2., 0.], average(ott, axis=0)) + result, wts = average(ott, axis=0, returned=True) + assert_equal(wts, [1., 0.]) + + def test_testAverage2(self): + # More tests of average. + w1 = [0, 1, 1, 1, 1, 0] + w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] + x = arange(6, dtype=np.float64) + assert_equal(average(x, axis=0), 2.5) + assert_equal(average(x, axis=0, weights=w1), 2.5) + y = array([arange(6, dtype=np.float64), 2.0 * arange(6)]) + assert_equal(average(y, None), np.add.reduce(np.arange(6)) * 3. / 12.) + assert_equal(average(y, axis=0), np.arange(6) * 3. / 2.) + assert_equal(average(y, axis=1), + [average(x, axis=0), average(x, axis=0) * 2.0]) + assert_equal(average(y, None, weights=w2), 20. / 6.) + assert_equal(average(y, axis=0, weights=w2), + [0., 1., 2., 3., 4., 10.]) + assert_equal(average(y, axis=1), + [average(x, axis=0), average(x, axis=0) * 2.0]) + m1 = zeros(6) + m2 = [0, 0, 1, 1, 0, 0] + m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] + m4 = ones(6) + m5 = [0, 1, 1, 1, 1, 1] + assert_equal(average(masked_array(x, m1), axis=0), 2.5) + assert_equal(average(masked_array(x, m2), axis=0), 2.5) + assert_equal(average(masked_array(x, m4), axis=0).mask, [True]) + assert_equal(average(masked_array(x, m5), axis=0), 0.0) + assert_equal(count(average(masked_array(x, m4), axis=0)), 0) + z = masked_array(y, m3) + assert_equal(average(z, None), 20. / 6.) + assert_equal(average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5]) + assert_equal(average(z, axis=1), [2.5, 5.0]) + assert_equal(average(z, axis=0, weights=w2), + [0., 1., 99., 99., 4.0, 10.0]) + + def test_testAverage3(self): + # Yet more tests of average! + a = arange(6) + b = arange(6) * 3 + r1, w1 = average([[a, b], [b, a]], axis=1, returned=True) + assert_equal(shape(r1), shape(w1)) + assert_equal(r1.shape, w1.shape) + r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=True) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), returned=True) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=True) + assert_equal(shape(w2), shape(r2)) + a2d = array([[1, 2], [0, 4]], float) + a2dm = masked_array(a2d, [[False, False], [True, False]]) + a2da = average(a2d, axis=0) + assert_equal(a2da, [0.5, 3.0]) + a2dma = average(a2dm, axis=0) + assert_equal(a2dma, [1.0, 3.0]) + a2dma = average(a2dm, axis=None) + assert_equal(a2dma, 7. / 3.) + a2dma = average(a2dm, axis=1) + assert_equal(a2dma, [1.5, 4.0]) + + def test_testAverage4(self): + # Test that `keepdims` works with average + x = np.array([2, 3, 4]).reshape(3, 1) + b = np.ma.array(x, mask=[[False], [False], [True]]) + w = np.array([4, 5, 6]).reshape(3, 1) + actual = average(b, weights=w, axis=1, keepdims=True) + desired = masked_array([[2.], [3.], [4.]], [[False], [False], [True]]) + assert_equal(actual, desired) + + def test_weight_and_input_dims_different(self): + # this test mirrors a test for np.average() + # in lib/test/test_function_base.py + y = np.arange(12).reshape(2, 2, 3) + w = np.array([0., 0., 1., .5, .5, 0., 0., .5, .5, 1., 0., 0.])\ + .reshape(2, 2, 3) + + m = np.full((2, 2, 3), False) + yma = np.ma.array(y, mask=m) + subw0 = w[:, :, 0] + + actual = average(yma, axis=(0, 1), weights=subw0) + desired = masked_array([7., 8., 9.], mask=[False, False, False]) + assert_almost_equal(actual, desired) + + m = np.full((2, 2, 3), False) + m[:, :, 0] = True + m[0, 0, 1] = True + yma = np.ma.array(y, mask=m) + actual = average(yma, axis=(0, 1), weights=subw0) + desired = masked_array( + [np.nan, 8., 9.], + mask=[True, False, False]) + assert_almost_equal(actual, desired) + + m = np.full((2, 2, 3), False) + yma = np.ma.array(y, mask=m) + + subw1 = w[1, :, :] + actual = average(yma, axis=(1, 2), weights=subw1) + desired = masked_array([2.25, 8.25], mask=[False, False]) + assert_almost_equal(actual, desired) + + # here the weights have the wrong shape for the specified axes + with pytest.raises( + ValueError, + match="Shape of weights must be consistent with " + "shape of a along specified axis"): + average(yma, axis=(0, 1, 2), weights=subw0) + + with pytest.raises( + ValueError, + match="Shape of weights must be consistent with " + "shape of a along specified axis"): + average(yma, axis=(0, 1), weights=subw1) + + # swapping the axes should be same as transposing weights + actual = average(yma, axis=(1, 0), weights=subw0) + desired = average(yma, axis=(0, 1), weights=subw0.T) + assert_almost_equal(actual, desired) + + def test_onintegers_with_mask(self): + # Test average on integers with mask + a = average(array([1, 2])) + assert_equal(a, 1.5) + a = average(array([1, 2, 3, 4], mask=[False, False, True, True])) + assert_equal(a, 1.5) + + def test_complex(self): + # Test with complex data. + # (Regression test for https://github.com/numpy/numpy/issues/2684) + mask = np.array([[0, 0, 0, 1, 0], + [0, 1, 0, 0, 0]], dtype=bool) + a = masked_array([[0, 1 + 2j, 3 + 4j, 5 + 6j, 7 + 8j], + [9j, 0 + 1j, 2 + 3j, 4 + 5j, 7 + 7j]], + mask=mask) + + av = average(a) + expected = np.average(a.compressed()) + assert_almost_equal(av.real, expected.real) + assert_almost_equal(av.imag, expected.imag) + + av0 = average(a, axis=0) + expected0 = average(a.real, axis=0) + average(a.imag, axis=0) * 1j + assert_almost_equal(av0.real, expected0.real) + assert_almost_equal(av0.imag, expected0.imag) + + av1 = average(a, axis=1) + expected1 = average(a.real, axis=1) + average(a.imag, axis=1) * 1j + assert_almost_equal(av1.real, expected1.real) + assert_almost_equal(av1.imag, expected1.imag) + + # Test with the 'weights' argument. + wts = np.array([[0.5, 1.0, 2.0, 1.0, 0.5], + [1.0, 1.0, 1.0, 1.0, 1.0]]) + wav = average(a, weights=wts) + expected = np.average(a.compressed(), weights=wts[~mask]) + assert_almost_equal(wav.real, expected.real) + assert_almost_equal(wav.imag, expected.imag) + + wav0 = average(a, weights=wts, axis=0) + expected0 = (average(a.real, weights=wts, axis=0) + + average(a.imag, weights=wts, axis=0) * 1j) + assert_almost_equal(wav0.real, expected0.real) + assert_almost_equal(wav0.imag, expected0.imag) + + wav1 = average(a, weights=wts, axis=1) + expected1 = (average(a.real, weights=wts, axis=1) + + average(a.imag, weights=wts, axis=1) * 1j) + assert_almost_equal(wav1.real, expected1.real) + assert_almost_equal(wav1.imag, expected1.imag) + + @pytest.mark.parametrize( + 'x, axis, expected_avg, weights, expected_wavg, expected_wsum', + [([1, 2, 3], None, [2.0], [3, 4, 1], [1.75], [8.0]), + ([[1, 2, 5], [1, 6, 11]], 0, [[1.0, 4.0, 8.0]], + [1, 3], [[1.0, 5.0, 9.5]], [[4, 4, 4]])], + ) + def test_basic_keepdims(self, x, axis, expected_avg, + weights, expected_wavg, expected_wsum): + avg = np.ma.average(x, axis=axis, keepdims=True) + assert avg.shape == np.shape(expected_avg) + assert_array_equal(avg, expected_avg) + + wavg = np.ma.average(x, axis=axis, weights=weights, keepdims=True) + assert wavg.shape == np.shape(expected_wavg) + assert_array_equal(wavg, expected_wavg) + + wavg, wsum = np.ma.average(x, axis=axis, weights=weights, + returned=True, keepdims=True) + assert wavg.shape == np.shape(expected_wavg) + assert_array_equal(wavg, expected_wavg) + assert wsum.shape == np.shape(expected_wsum) + assert_array_equal(wsum, expected_wsum) + + def test_masked_weights(self): + # Test with masked weights. + # (Regression test for https://github.com/numpy/numpy/issues/10438) + a = np.ma.array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [1, 0, 0], [0, 0, 0]]) + weights_unmasked = masked_array([5, 28, 31], mask=False) + weights_masked = masked_array([5, 28, 31], mask=[1, 0, 0]) + + avg_unmasked = average(a, axis=0, + weights=weights_unmasked, returned=False) + expected_unmasked = np.array([6.0, 5.21875, 6.21875]) + assert_almost_equal(avg_unmasked, expected_unmasked) + + avg_masked = average(a, axis=0, weights=weights_masked, returned=False) + expected_masked = np.array([6.0, 5.576271186440678, 6.576271186440678]) + assert_almost_equal(avg_masked, expected_masked) + + # weights should be masked if needed + # depending on the array mask. This is to avoid summing + # masked nan or other values that are not cancelled by a zero + a = np.ma.array([1.0, 2.0, 3.0, 4.0], + mask=[False, False, True, True]) + avg_unmasked = average(a, weights=[1, 1, 1, np.nan]) + + assert_almost_equal(avg_unmasked, 1.5) + + a = np.ma.array([ + [1.0, 2.0, 3.0, 4.0], + [5.0, 6.0, 7.0, 8.0], + [9.0, 1.0, 2.0, 3.0], + ], mask=[ + [False, True, True, False], + [True, False, True, True], + [True, False, True, False], + ]) + + avg_masked = np.ma.average(a, weights=[1, np.nan, 1], axis=0) + avg_expected = np.ma.array([1.0, np.nan, np.nan, 3.5], + mask=[False, True, True, False]) + + assert_almost_equal(avg_masked, avg_expected) + assert_equal(avg_masked.mask, avg_expected.mask) + + +class TestConcatenator: + # Tests for mr_, the equivalent of r_ for masked arrays. + + def test_1d(self): + # Tests mr_ on 1D arrays. + assert_array_equal(mr_[1, 2, 3, 4, 5, 6], array([1, 2, 3, 4, 5, 6])) + b = ones(5) + m = [1, 0, 0, 0, 0] + d = masked_array(b, mask=m) + c = mr_[d, 0, 0, d] + assert_(isinstance(c, MaskedArray)) + assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1]) + assert_array_equal(c.mask, mr_[m, 0, 0, m]) + + def test_2d(self): + # Tests mr_ on 2D arrays. + a_1 = np.random.rand(5, 5) + a_2 = np.random.rand(5, 5) + m_1 = np.round(np.random.rand(5, 5), 0) + m_2 = np.round(np.random.rand(5, 5), 0) + b_1 = masked_array(a_1, mask=m_1) + b_2 = masked_array(a_2, mask=m_2) + # append columns + d = mr_['1', b_1, b_2] + assert_(d.shape == (5, 10)) + assert_array_equal(d[:, :5], b_1) + assert_array_equal(d[:, 5:], b_2) + assert_array_equal(d.mask, np.r_['1', m_1, m_2]) + d = mr_[b_1, b_2] + assert_(d.shape == (10, 5)) + assert_array_equal(d[:5, :], b_1) + assert_array_equal(d[5:, :], b_2) + assert_array_equal(d.mask, np.r_[m_1, m_2]) + + def test_masked_constant(self): + actual = mr_[np.ma.masked, 1] + assert_equal(actual.mask, [True, False]) + assert_equal(actual.data[1], 1) + + actual = mr_[[1, 2], np.ma.masked] + assert_equal(actual.mask, [False, False, True]) + assert_equal(actual.data[:2], [1, 2]) + + +class TestNotMasked: + # Tests notmasked_edges and notmasked_contiguous. + + def test_edges(self): + # Tests unmasked_edges + data = masked_array(np.arange(25).reshape(5, 5), + mask=[[0, 0, 1, 0, 0], + [0, 0, 0, 1, 1], + [1, 1, 0, 0, 0], + [0, 0, 0, 0, 0], + [1, 1, 1, 0, 0]],) + test = notmasked_edges(data, None) + assert_equal(test, [0, 24]) + test = notmasked_edges(data, 0) + assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)]) + assert_equal(test[1], [(3, 3, 3, 4, 4), (0, 1, 2, 3, 4)]) + test = notmasked_edges(data, 1) + assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 2, 0, 3)]) + assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 2, 4, 4, 4)]) + # + test = notmasked_edges(data.data, None) + assert_equal(test, [0, 24]) + test = notmasked_edges(data.data, 0) + assert_equal(test[0], [(0, 0, 0, 0, 0), (0, 1, 2, 3, 4)]) + assert_equal(test[1], [(4, 4, 4, 4, 4), (0, 1, 2, 3, 4)]) + test = notmasked_edges(data.data, -1) + assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 0, 0, 0)]) + assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 4, 4, 4, 4)]) + # + data[-2] = masked + test = notmasked_edges(data, 0) + assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)]) + assert_equal(test[1], [(1, 1, 2, 4, 4), (0, 1, 2, 3, 4)]) + test = notmasked_edges(data, -1) + assert_equal(test[0], [(0, 1, 2, 4), (0, 0, 2, 3)]) + assert_equal(test[1], [(0, 1, 2, 4), (4, 2, 4, 4)]) + + def test_contiguous(self): + # Tests notmasked_contiguous + a = masked_array(np.arange(24).reshape(3, 8), + mask=[[0, 0, 0, 0, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1], + [0, 0, 0, 0, 0, 0, 1, 0]]) + tmp = notmasked_contiguous(a, None) + assert_equal(tmp, [ + slice(0, 4, None), + slice(16, 22, None), + slice(23, 24, None) + ]) + + tmp = notmasked_contiguous(a, 0) + assert_equal(tmp, [ + [slice(0, 1, None), slice(2, 3, None)], + [slice(0, 1, None), slice(2, 3, None)], + [slice(0, 1, None), slice(2, 3, None)], + [slice(0, 1, None), slice(2, 3, None)], + [slice(2, 3, None)], + [slice(2, 3, None)], + [], + [slice(2, 3, None)] + ]) + # + tmp = notmasked_contiguous(a, 1) + assert_equal(tmp, [ + [slice(0, 4, None)], + [], + [slice(0, 6, None), slice(7, 8, None)] + ]) + + +class TestCompressFunctions: + + def test_compress_nd(self): + # Tests compress_nd + x = np.array(list(range(3 * 4 * 5))).reshape(3, 4, 5) + m = np.zeros((3, 4, 5)).astype(bool) + m[1, 1, 1] = True + x = array(x, mask=m) + + # axis=None + a = compress_nd(x) + assert_equal(a, [[[ 0, 2, 3, 4], + [10, 12, 13, 14], + [15, 17, 18, 19]], + [[40, 42, 43, 44], + [50, 52, 53, 54], + [55, 57, 58, 59]]]) + + # axis=0 + a = compress_nd(x, 0) + assert_equal(a, [[[ 0, 1, 2, 3, 4], + [ 5, 6, 7, 8, 9], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]], + [[40, 41, 42, 43, 44], + [45, 46, 47, 48, 49], + [50, 51, 52, 53, 54], + [55, 56, 57, 58, 59]]]) + + # axis=1 + a = compress_nd(x, 1) + assert_equal(a, [[[ 0, 1, 2, 3, 4], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]], + [[20, 21, 22, 23, 24], + [30, 31, 32, 33, 34], + [35, 36, 37, 38, 39]], + [[40, 41, 42, 43, 44], + [50, 51, 52, 53, 54], + [55, 56, 57, 58, 59]]]) + + a2 = compress_nd(x, (1,)) + a3 = compress_nd(x, -2) + a4 = compress_nd(x, (-2,)) + assert_equal(a, a2) + assert_equal(a, a3) + assert_equal(a, a4) + + # axis=2 + a = compress_nd(x, 2) + assert_equal(a, [[[ 0, 2, 3, 4], + [ 5, 7, 8, 9], + [10, 12, 13, 14], + [15, 17, 18, 19]], + [[20, 22, 23, 24], + [25, 27, 28, 29], + [30, 32, 33, 34], + [35, 37, 38, 39]], + [[40, 42, 43, 44], + [45, 47, 48, 49], + [50, 52, 53, 54], + [55, 57, 58, 59]]]) + + a2 = compress_nd(x, (2,)) + a3 = compress_nd(x, -1) + a4 = compress_nd(x, (-1,)) + assert_equal(a, a2) + assert_equal(a, a3) + assert_equal(a, a4) + + # axis=(0, 1) + a = compress_nd(x, (0, 1)) + assert_equal(a, [[[ 0, 1, 2, 3, 4], + [10, 11, 12, 13, 14], + [15, 16, 17, 18, 19]], + [[40, 41, 42, 43, 44], + [50, 51, 52, 53, 54], + [55, 56, 57, 58, 59]]]) + a2 = compress_nd(x, (0, -2)) + assert_equal(a, a2) + + # axis=(1, 2) + a = compress_nd(x, (1, 2)) + assert_equal(a, [[[ 0, 2, 3, 4], + [10, 12, 13, 14], + [15, 17, 18, 19]], + [[20, 22, 23, 24], + [30, 32, 33, 34], + [35, 37, 38, 39]], + [[40, 42, 43, 44], + [50, 52, 53, 54], + [55, 57, 58, 59]]]) + + a2 = compress_nd(x, (-2, 2)) + a3 = compress_nd(x, (1, -1)) + a4 = compress_nd(x, (-2, -1)) + assert_equal(a, a2) + assert_equal(a, a3) + assert_equal(a, a4) + + # axis=(0, 2) + a = compress_nd(x, (0, 2)) + assert_equal(a, [[[ 0, 2, 3, 4], + [ 5, 7, 8, 9], + [10, 12, 13, 14], + [15, 17, 18, 19]], + [[40, 42, 43, 44], + [45, 47, 48, 49], + [50, 52, 53, 54], + [55, 57, 58, 59]]]) + + a2 = compress_nd(x, (0, -1)) + assert_equal(a, a2) + + def test_compress_rowcols(self): + # Tests compress_rowcols + x = array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + assert_equal(compress_rowcols(x), [[4, 5], [7, 8]]) + assert_equal(compress_rowcols(x, 0), [[3, 4, 5], [6, 7, 8]]) + assert_equal(compress_rowcols(x, 1), [[1, 2], [4, 5], [7, 8]]) + x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(compress_rowcols(x), [[0, 2], [6, 8]]) + assert_equal(compress_rowcols(x, 0), [[0, 1, 2], [6, 7, 8]]) + assert_equal(compress_rowcols(x, 1), [[0, 2], [3, 5], [6, 8]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(compress_rowcols(x), [[8]]) + assert_equal(compress_rowcols(x, 0), [[6, 7, 8]]) + assert_equal(compress_rowcols(x, 1,), [[2], [5], [8]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + assert_equal(compress_rowcols(x).size, 0) + assert_equal(compress_rowcols(x, 0).size, 0) + assert_equal(compress_rowcols(x, 1).size, 0) + + def test_mask_rowcols(self): + # Tests mask_rowcols. + x = array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x).mask, + [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) + assert_equal(mask_rowcols(x, 0).mask, + [[1, 1, 1], [0, 0, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x, 1).mask, + [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) + x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x).mask, + [[0, 1, 0], [1, 1, 1], [0, 1, 0]]) + assert_equal(mask_rowcols(x, 0).mask, + [[0, 0, 0], [1, 1, 1], [0, 0, 0]]) + assert_equal(mask_rowcols(x, 1).mask, + [[0, 1, 0], [0, 1, 0], [0, 1, 0]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]]) + assert_equal(mask_rowcols(x).mask, + [[1, 1, 1], [1, 1, 1], [1, 1, 0]]) + assert_equal(mask_rowcols(x, 0).mask, + [[1, 1, 1], [1, 1, 1], [0, 0, 0]]) + assert_equal(mask_rowcols(x, 1,).mask, + [[1, 1, 0], [1, 1, 0], [1, 1, 0]]) + x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + assert_(mask_rowcols(x).all() is masked) + assert_(mask_rowcols(x, 0).all() is masked) + assert_(mask_rowcols(x, 1).all() is masked) + assert_(mask_rowcols(x).mask.all()) + assert_(mask_rowcols(x, 0).mask.all()) + assert_(mask_rowcols(x, 1).mask.all()) + + @pytest.mark.parametrize("axis", [None, 0, 1]) + @pytest.mark.parametrize(["func", "rowcols_axis"], + [(np.ma.mask_rows, 0), (np.ma.mask_cols, 1)]) + def test_mask_row_cols_axis_deprecation(self, axis, func, rowcols_axis): + # Test deprecation of the axis argument to `mask_rows` and `mask_cols` + x = array(np.arange(9).reshape(3, 3), + mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + + with assert_warns(DeprecationWarning): + res = func(x, axis=axis) + assert_equal(res, mask_rowcols(x, rowcols_axis)) + + def test_dot(self): + # Tests dot product + n = np.arange(1, 7) + # + m = [1, 0, 0, 0, 0, 0] + a = masked_array(n, mask=m).reshape(2, 3) + b = masked_array(n, mask=m).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[1, 1], [1, 0]]) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[1, 1, 1], [1, 0, 0], [1, 0, 0]]) + c = dot(a, b, strict=False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + m = [0, 0, 0, 0, 0, 1] + a = masked_array(n, mask=m).reshape(2, 3) + b = masked_array(n, mask=m).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[0, 1], [1, 1]]) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [1, 1, 1]]) + c = dot(a, b, strict=False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + assert_equal(c, dot(a, b)) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + m = [0, 0, 0, 0, 0, 0] + a = masked_array(n, mask=m).reshape(2, 3) + b = masked_array(n, mask=m).reshape(3, 2) + c = dot(a, b) + assert_equal(c.mask, nomask) + c = dot(b, a) + assert_equal(c.mask, nomask) + # + a = masked_array(n, mask=[1, 0, 0, 0, 0, 0]).reshape(2, 3) + b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[1, 1], [0, 0]]) + c = dot(a, b, strict=False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[1, 0, 0], [1, 0, 0], [1, 0, 0]]) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) + b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[0, 0], [1, 1]]) + c = dot(a, b) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [0, 0, 1]]) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3) + b = masked_array(n, mask=[0, 0, 1, 0, 0, 0]).reshape(3, 2) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[1, 0], [1, 1]]) + c = dot(a, b, strict=False) + assert_equal(c, np.dot(a.filled(0), b.filled(0))) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[0, 0, 1], [1, 1, 1], [0, 0, 1]]) + c = dot(b, a, strict=False) + assert_equal(c, np.dot(b.filled(0), a.filled(0))) + # + a = masked_array(np.arange(8).reshape(2, 2, 2), + mask=[[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + b = masked_array(np.arange(8).reshape(2, 2, 2), + mask=[[[0, 0], [0, 0]], [[0, 0], [0, 1]]]) + c = dot(a, b, strict=True) + assert_equal(c.mask, + [[[[1, 1], [1, 1]], [[0, 0], [0, 1]]], + [[[0, 0], [0, 1]], [[0, 0], [0, 1]]]]) + c = dot(a, b, strict=False) + assert_equal(c.mask, + [[[[0, 0], [0, 1]], [[0, 0], [0, 0]]], + [[[0, 0], [0, 0]], [[0, 0], [0, 0]]]]) + c = dot(b, a, strict=True) + assert_equal(c.mask, + [[[[1, 0], [0, 0]], [[1, 0], [0, 0]]], + [[[1, 0], [0, 0]], [[1, 1], [1, 1]]]]) + c = dot(b, a, strict=False) + assert_equal(c.mask, + [[[[0, 0], [0, 0]], [[0, 0], [0, 0]]], + [[[0, 0], [0, 0]], [[1, 0], [0, 0]]]]) + # + a = masked_array(np.arange(8).reshape(2, 2, 2), + mask=[[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + b = 5. + c = dot(a, b, strict=True) + assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + c = dot(a, b, strict=False) + assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + c = dot(b, a, strict=True) + assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + c = dot(b, a, strict=False) + assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + # + a = masked_array(np.arange(8).reshape(2, 2, 2), + mask=[[[1, 0], [0, 0]], [[0, 0], [0, 0]]]) + b = masked_array(np.arange(2), mask=[0, 1]) + c = dot(a, b, strict=True) + assert_equal(c.mask, [[1, 1], [1, 1]]) + c = dot(a, b, strict=False) + assert_equal(c.mask, [[1, 0], [0, 0]]) + + def test_dot_returns_maskedarray(self): + # See gh-6611 + a = np.eye(3) + b = array(a) + assert_(type(dot(a, a)) is MaskedArray) + assert_(type(dot(a, b)) is MaskedArray) + assert_(type(dot(b, a)) is MaskedArray) + assert_(type(dot(b, b)) is MaskedArray) + + def test_dot_out(self): + a = array(np.eye(3)) + out = array(np.zeros((3, 3))) + res = dot(a, a, out=out) + assert_(res is out) + assert_equal(a, res) + + +class TestApplyAlongAxis: + # Tests 2D functions + def test_3d(self): + a = arange(12.).reshape(2, 2, 3) + + def myfunc(b): + return b[1] + + xa = apply_along_axis(myfunc, 2, a) + assert_equal(xa, [[1, 4], [7, 10]]) + + # Tests kwargs functions + def test_3d_kwargs(self): + a = arange(12).reshape(2, 2, 3) + + def myfunc(b, offset=0): + return b[1 + offset] + + xa = apply_along_axis(myfunc, 2, a, offset=1) + assert_equal(xa, [[2, 5], [8, 11]]) + + +class TestApplyOverAxes: + # Tests apply_over_axes + def test_basic(self): + a = arange(24).reshape(2, 3, 4) + test = apply_over_axes(np.sum, a, [0, 2]) + ctrl = np.array([[[60], [92], [124]]]) + assert_equal(test, ctrl) + a[(a % 2).astype(bool)] = masked + test = apply_over_axes(np.sum, a, [0, 2]) + ctrl = np.array([[[28], [44], [60]]]) + assert_equal(test, ctrl) + + +class TestMedian: + def test_pytype(self): + r = np.ma.median([[np.inf, np.inf], [np.inf, np.inf]], axis=-1) + assert_equal(r, np.inf) + + def test_inf(self): + # test that even which computes handles inf / x = masked + r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], + [np.inf, np.inf]]), axis=-1) + assert_equal(r, np.inf) + r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], + [np.inf, np.inf]]), axis=None) + assert_equal(r, np.inf) + # all masked + r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], + [np.inf, np.inf]], mask=True), + axis=-1) + assert_equal(r.mask, True) + r = np.ma.median(np.ma.masked_array([[np.inf, np.inf], + [np.inf, np.inf]], mask=True), + axis=None) + assert_equal(r.mask, True) + + def test_non_masked(self): + x = np.arange(9) + assert_equal(np.ma.median(x), 4.) + assert_(type(np.ma.median(x)) is not MaskedArray) + x = range(8) + assert_equal(np.ma.median(x), 3.5) + assert_(type(np.ma.median(x)) is not MaskedArray) + x = 5 + assert_equal(np.ma.median(x), 5.) + assert_(type(np.ma.median(x)) is not MaskedArray) + # integer + x = np.arange(9 * 8).reshape(9, 8) + assert_equal(np.ma.median(x, axis=0), np.median(x, axis=0)) + assert_equal(np.ma.median(x, axis=1), np.median(x, axis=1)) + assert_(np.ma.median(x, axis=1) is not MaskedArray) + # float + x = np.arange(9 * 8.).reshape(9, 8) + assert_equal(np.ma.median(x, axis=0), np.median(x, axis=0)) + assert_equal(np.ma.median(x, axis=1), np.median(x, axis=1)) + assert_(np.ma.median(x, axis=1) is not MaskedArray) + + def test_docstring_examples(self): + "test the examples given in the docstring of ma.median" + x = array(np.arange(8), mask=[0] * 4 + [1] * 4) + assert_equal(np.ma.median(x), 1.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + x = array(np.arange(10).reshape(2, 5), mask=[0] * 6 + [1] * 4) + assert_equal(np.ma.median(x), 2.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + ma_x = np.ma.median(x, axis=-1, overwrite_input=True) + assert_equal(ma_x, [2., 5.]) + assert_equal(ma_x.shape, (2,), "shape mismatch") + assert_(type(ma_x) is MaskedArray) + + def test_axis_argument_errors(self): + msg = "mask = %s, ndim = %s, axis = %s, overwrite_input = %s" + for ndmin in range(5): + for mask in [False, True]: + x = array(1, ndmin=ndmin, mask=mask) + + # Valid axis values should not raise exception + args = itertools.product(range(-ndmin, ndmin), [False, True]) + for axis, over in args: + try: + np.ma.median(x, axis=axis, overwrite_input=over) + except Exception: + raise AssertionError(msg % (mask, ndmin, axis, over)) + + # Invalid axis values should raise exception + args = itertools.product([-(ndmin + 1), ndmin], [False, True]) + for axis, over in args: + try: + np.ma.median(x, axis=axis, overwrite_input=over) + except np.exceptions.AxisError: + pass + else: + raise AssertionError(msg % (mask, ndmin, axis, over)) + + def test_masked_0d(self): + # Check values + x = array(1, mask=False) + assert_equal(np.ma.median(x), 1) + x = array(1, mask=True) + assert_equal(np.ma.median(x), np.ma.masked) + + def test_masked_1d(self): + x = array(np.arange(5), mask=True) + assert_equal(np.ma.median(x), np.ma.masked) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is np.ma.core.MaskedConstant) + x = array(np.arange(5), mask=False) + assert_equal(np.ma.median(x), 2.) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + x = array(np.arange(5), mask=[0, 1, 0, 0, 0]) + assert_equal(np.ma.median(x), 2.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + x = array(np.arange(5), mask=[0, 1, 1, 1, 1]) + assert_equal(np.ma.median(x), 0.) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + # integer + x = array(np.arange(5), mask=[0, 1, 1, 0, 0]) + assert_equal(np.ma.median(x), 3.) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + # float + x = array(np.arange(5.), mask=[0, 1, 1, 0, 0]) + assert_equal(np.ma.median(x), 3.) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + # integer + x = array(np.arange(6), mask=[0, 1, 1, 1, 1, 0]) + assert_equal(np.ma.median(x), 2.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + # float + x = array(np.arange(6.), mask=[0, 1, 1, 1, 1, 0]) + assert_equal(np.ma.median(x), 2.5) + assert_equal(np.ma.median(x).shape, (), "shape mismatch") + assert_(type(np.ma.median(x)) is not MaskedArray) + + def test_1d_shape_consistency(self): + assert_equal(np.ma.median(array([1, 2, 3], mask=[0, 0, 0])).shape, + np.ma.median(array([1, 2, 3], mask=[0, 1, 0])).shape) + + def test_2d(self): + # Tests median w/ 2D + (n, p) = (101, 30) + x = masked_array(np.linspace(-1., 1., n),) + x[:10] = x[-10:] = masked + z = masked_array(np.empty((n, p), dtype=float)) + z[:, 0] = x[:] + idx = np.arange(len(x)) + for i in range(1, p): + np.random.shuffle(idx) + z[:, i] = x[idx] + assert_equal(median(z[:, 0]), 0) + assert_equal(median(z), 0) + assert_equal(median(z, axis=0), np.zeros(p)) + assert_equal(median(z.T, axis=1), np.zeros(p)) + + def test_2d_waxis(self): + # Tests median w/ 2D arrays and different axis. + x = masked_array(np.arange(30).reshape(10, 3)) + x[:3] = x[-3:] = masked + assert_equal(median(x), 14.5) + assert_(type(np.ma.median(x)) is not MaskedArray) + assert_equal(median(x, axis=0), [13.5, 14.5, 15.5]) + assert_(type(np.ma.median(x, axis=0)) is MaskedArray) + assert_equal(median(x, axis=1), [0, 0, 0, 10, 13, 16, 19, 0, 0, 0]) + assert_(type(np.ma.median(x, axis=1)) is MaskedArray) + assert_equal(median(x, axis=1).mask, [1, 1, 1, 0, 0, 0, 0, 1, 1, 1]) + + def test_3d(self): + # Tests median w/ 3D + x = np.ma.arange(24).reshape(3, 4, 2) + x[x % 3 == 0] = masked + assert_equal(median(x, 0), [[12, 9], [6, 15], [12, 9], [18, 15]]) + x.shape = (4, 3, 2) + assert_equal(median(x, 0), [[99, 10], [11, 99], [13, 14]]) + x = np.ma.arange(24).reshape(4, 3, 2) + x[x % 5 == 0] = masked + assert_equal(median(x, 0), [[12, 10], [8, 9], [16, 17]]) + + def test_neg_axis(self): + x = masked_array(np.arange(30).reshape(10, 3)) + x[:3] = x[-3:] = masked + assert_equal(median(x, axis=-1), median(x, axis=1)) + + def test_out_1d(self): + # integer float even odd + for v in (30, 30., 31, 31.): + x = masked_array(np.arange(v)) + x[:3] = x[-3:] = masked + out = masked_array(np.ones(())) + r = median(x, out=out) + if v == 30: + assert_equal(out, 14.5) + else: + assert_equal(out, 15.) + assert_(r is out) + assert_(type(r) is MaskedArray) + + def test_out(self): + # integer float even odd + for v in (40, 40., 30, 30.): + x = masked_array(np.arange(v).reshape(10, -1)) + x[:3] = x[-3:] = masked + out = masked_array(np.ones(10)) + r = median(x, axis=1, out=out) + if v == 30: + e = masked_array([0.] * 3 + [10, 13, 16, 19] + [0.] * 3, + mask=[True] * 3 + [False] * 4 + [True] * 3) + else: + e = masked_array([0.] * 3 + [13.5, 17.5, 21.5, 25.5] + [0.] * 3, + mask=[True] * 3 + [False] * 4 + [True] * 3) + assert_equal(r, e) + assert_(r is out) + assert_(type(r) is MaskedArray) + + @pytest.mark.parametrize( + argnames='axis', + argvalues=[ + None, + 1, + (1, ), + (0, 1), + (-3, -1), + ] + ) + def test_keepdims_out(self, axis): + mask = np.zeros((3, 5, 7, 11), dtype=bool) + # Randomly set some elements to True: + w = np.random.random((4, 200)) * np.array(mask.shape)[:, None] + w = w.astype(np.intp) + mask[tuple(w)] = np.nan + d = masked_array(np.ones(mask.shape), mask=mask) + if axis is None: + shape_out = (1,) * d.ndim + else: + axis_norm = normalize_axis_tuple(axis, d.ndim) + shape_out = tuple( + 1 if i in axis_norm else d.shape[i] for i in range(d.ndim)) + out = masked_array(np.empty(shape_out)) + result = median(d, axis=axis, keepdims=True, out=out) + assert result is out + assert_equal(result.shape, shape_out) + + def test_single_non_masked_value_on_axis(self): + data = [[1., 0.], + [0., 3.], + [0., 0.]] + masked_arr = np.ma.masked_equal(data, 0) + expected = [1., 3.] + assert_array_equal(np.ma.median(masked_arr, axis=0), + expected) + + def test_nan(self): + for mask in (False, np.zeros(6, dtype=bool)): + dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]]) + dm.mask = mask + + # scalar result + r = np.ma.median(dm, axis=None) + assert_(np.isscalar(r)) + assert_array_equal(r, np.nan) + r = np.ma.median(dm.ravel(), axis=0) + assert_(np.isscalar(r)) + assert_array_equal(r, np.nan) + + r = np.ma.median(dm, axis=0) + assert_equal(type(r), MaskedArray) + assert_array_equal(r, [1, np.nan, 3]) + r = np.ma.median(dm, axis=1) + assert_equal(type(r), MaskedArray) + assert_array_equal(r, [np.nan, 2]) + r = np.ma.median(dm, axis=-1) + assert_equal(type(r), MaskedArray) + assert_array_equal(r, [np.nan, 2]) + + dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]]) + dm[:, 2] = np.ma.masked + assert_array_equal(np.ma.median(dm, axis=None), np.nan) + assert_array_equal(np.ma.median(dm, axis=0), [1, np.nan, 3]) + assert_array_equal(np.ma.median(dm, axis=1), [np.nan, 1.5]) + + def test_out_nan(self): + o = np.ma.masked_array(np.zeros((4,))) + d = np.ma.masked_array(np.ones((3, 4))) + d[2, 1] = np.nan + d[2, 2] = np.ma.masked + assert_equal(np.ma.median(d, 0, out=o), o) + o = np.ma.masked_array(np.zeros((3,))) + assert_equal(np.ma.median(d, 1, out=o), o) + o = np.ma.masked_array(np.zeros(())) + assert_equal(np.ma.median(d, out=o), o) + + def test_nan_behavior(self): + a = np.ma.masked_array(np.arange(24, dtype=float)) + a[::3] = np.ma.masked + a[2] = np.nan + assert_array_equal(np.ma.median(a), np.nan) + assert_array_equal(np.ma.median(a, axis=0), np.nan) + + a = np.ma.masked_array(np.arange(24, dtype=float).reshape(2, 3, 4)) + a.mask = np.arange(a.size) % 2 == 1 + aorig = a.copy() + a[1, 2, 3] = np.nan + a[1, 1, 2] = np.nan + + # no axis + assert_array_equal(np.ma.median(a), np.nan) + assert_(np.isscalar(np.ma.median(a))) + + # axis0 + b = np.ma.median(aorig, axis=0) + b[2, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.ma.median(a, 0), b) + + # axis1 + b = np.ma.median(aorig, axis=1) + b[1, 3] = np.nan + b[1, 2] = np.nan + assert_equal(np.ma.median(a, 1), b) + + # axis02 + b = np.ma.median(aorig, axis=(0, 2)) + b[1] = np.nan + b[2] = np.nan + assert_equal(np.ma.median(a, (0, 2)), b) + + def test_ambigous_fill(self): + # 255 is max value, used as filler for sort + a = np.array([[3, 3, 255], [3, 3, 255]], dtype=np.uint8) + a = np.ma.masked_array(a, mask=a == 3) + assert_array_equal(np.ma.median(a, axis=1), 255) + assert_array_equal(np.ma.median(a, axis=1).mask, False) + assert_array_equal(np.ma.median(a, axis=0), a[0]) + assert_array_equal(np.ma.median(a), 255) + + def test_special(self): + for inf in [np.inf, -np.inf]: + a = np.array([[inf, np.nan], [np.nan, np.nan]]) + a = np.ma.masked_array(a, mask=np.isnan(a)) + assert_equal(np.ma.median(a, axis=0), [inf, np.nan]) + assert_equal(np.ma.median(a, axis=1), [inf, np.nan]) + assert_equal(np.ma.median(a), inf) + + a = np.array([[np.nan, np.nan, inf], [np.nan, np.nan, inf]]) + a = np.ma.masked_array(a, mask=np.isnan(a)) + assert_array_equal(np.ma.median(a, axis=1), inf) + assert_array_equal(np.ma.median(a, axis=1).mask, False) + assert_array_equal(np.ma.median(a, axis=0), a[0]) + assert_array_equal(np.ma.median(a), inf) + + # no mask + a = np.array([[inf, inf], [inf, inf]]) + assert_equal(np.ma.median(a), inf) + assert_equal(np.ma.median(a, axis=0), inf) + assert_equal(np.ma.median(a, axis=1), inf) + + a = np.array([[inf, 7, -inf, -9], + [-10, np.nan, np.nan, 5], + [4, np.nan, np.nan, inf]], + dtype=np.float32) + a = np.ma.masked_array(a, mask=np.isnan(a)) + if inf > 0: + assert_equal(np.ma.median(a, axis=0), [4., 7., -inf, 5.]) + assert_equal(np.ma.median(a), 4.5) + else: + assert_equal(np.ma.median(a, axis=0), [-10., 7., -inf, -9.]) + assert_equal(np.ma.median(a), -2.5) + assert_equal(np.ma.median(a, axis=1), [-1., -2.5, inf]) + + for i in range(10): + for j in range(1, 10): + a = np.array([([np.nan] * i) + ([inf] * j)] * 2) + a = np.ma.masked_array(a, mask=np.isnan(a)) + assert_equal(np.ma.median(a), inf) + assert_equal(np.ma.median(a, axis=1), inf) + assert_equal(np.ma.median(a, axis=0), + ([np.nan] * i) + [inf] * j) + + def test_empty(self): + # empty arrays + a = np.ma.masked_array(np.array([], dtype=float)) + with suppress_warnings() as w: + w.record(RuntimeWarning) + assert_array_equal(np.ma.median(a), np.nan) + assert_(w.log[0].category is RuntimeWarning) + + # multiple dimensions + a = np.ma.masked_array(np.array([], dtype=float, ndmin=3)) + # no axis + with suppress_warnings() as w: + w.record(RuntimeWarning) + warnings.filterwarnings('always', '', RuntimeWarning) + assert_array_equal(np.ma.median(a), np.nan) + assert_(w.log[0].category is RuntimeWarning) + + # axis 0 and 1 + b = np.ma.masked_array(np.array([], dtype=float, ndmin=2)) + assert_equal(np.ma.median(a, axis=0), b) + assert_equal(np.ma.median(a, axis=1), b) + + # axis 2 + b = np.ma.masked_array(np.array(np.nan, dtype=float, ndmin=2)) + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', RuntimeWarning) + assert_equal(np.ma.median(a, axis=2), b) + assert_(w[0].category is RuntimeWarning) + + def test_object(self): + o = np.ma.masked_array(np.arange(7.)) + assert_(type(np.ma.median(o.astype(object))), float) + o[2] = np.nan + assert_(type(np.ma.median(o.astype(object))), float) + + +class TestCov: + + def setup_method(self): + self.data = array(np.random.rand(12)) + + def test_covhelper(self): + x = self.data + # Test not mask output type is a float. + assert_(_covhelper(x, rowvar=True)[1].dtype, np.float32) + assert_(_covhelper(x, y=x, rowvar=False)[1].dtype, np.float32) + # Test not mask output is equal after casting to float. + mask = x > 0.5 + assert_array_equal( + _covhelper( + np.ma.masked_array(x, mask), rowvar=True + )[1].astype(bool), + ~mask.reshape(1, -1), + ) + assert_array_equal( + _covhelper( + np.ma.masked_array(x, mask), y=x, rowvar=False + )[1].astype(bool), + np.vstack((~mask, ~mask)), + ) + + def test_1d_without_missing(self): + # Test cov on 1D variable w/o missing values + x = self.data + assert_almost_equal(np.cov(x), cov(x)) + assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) + assert_almost_equal(np.cov(x, rowvar=False, bias=True), + cov(x, rowvar=False, bias=True)) + + def test_2d_without_missing(self): + # Test cov on 1 2D variable w/o missing values + x = self.data.reshape(3, 4) + assert_almost_equal(np.cov(x), cov(x)) + assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False)) + assert_almost_equal(np.cov(x, rowvar=False, bias=True), + cov(x, rowvar=False, bias=True)) + + def test_1d_with_missing(self): + # Test cov 1 1D variable w/missing values + x = self.data + x[-1] = masked + x -= x.mean() + nx = x.compressed() + assert_almost_equal(np.cov(nx), cov(x)) + assert_almost_equal(np.cov(nx, rowvar=False), cov(x, rowvar=False)) + assert_almost_equal(np.cov(nx, rowvar=False, bias=True), + cov(x, rowvar=False, bias=True)) + # + try: + cov(x, allow_masked=False) + except ValueError: + pass + # + # 2 1D variables w/ missing values + nx = x[1:-1] + assert_almost_equal(np.cov(nx, nx[::-1]), cov(x, x[::-1])) + assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False), + cov(x, x[::-1], rowvar=False)) + assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False, bias=True), + cov(x, x[::-1], rowvar=False, bias=True)) + + def test_2d_with_missing(self): + # Test cov on 2D variable w/ missing value + x = self.data + x[-1] = masked + x = x.reshape(3, 4) + valid = np.logical_not(getmaskarray(x)).astype(int) + frac = np.dot(valid, valid.T) + xf = (x - x.mean(1)[:, None]).filled(0) + assert_almost_equal(cov(x), + np.cov(xf) * (x.shape[1] - 1) / (frac - 1.)) + assert_almost_equal(cov(x, bias=True), + np.cov(xf, bias=True) * x.shape[1] / frac) + frac = np.dot(valid.T, valid) + xf = (x - x.mean(0)).filled(0) + assert_almost_equal(cov(x, rowvar=False), + (np.cov(xf, rowvar=False) * + (x.shape[0] - 1) / (frac - 1.))) + assert_almost_equal(cov(x, rowvar=False, bias=True), + (np.cov(xf, rowvar=False, bias=True) * + x.shape[0] / frac)) + + +class TestCorrcoef: + + def setup_method(self): + self.data = array(np.random.rand(12)) + self.data2 = array(np.random.rand(12)) + + def test_ddof(self): + # ddof raises DeprecationWarning + x, y = self.data, self.data2 + expected = np.corrcoef(x) + expected2 = np.corrcoef(x, y) + with suppress_warnings() as sup: + warnings.simplefilter("always") + assert_warns(DeprecationWarning, corrcoef, x, ddof=-1) + sup.filter(DeprecationWarning, "bias and ddof have no effect") + # ddof has no or negligible effect on the function + assert_almost_equal(np.corrcoef(x, ddof=0), corrcoef(x, ddof=0)) + assert_almost_equal(corrcoef(x, ddof=-1), expected) + assert_almost_equal(corrcoef(x, y, ddof=-1), expected2) + assert_almost_equal(corrcoef(x, ddof=3), expected) + assert_almost_equal(corrcoef(x, y, ddof=3), expected2) + + def test_bias(self): + x, y = self.data, self.data2 + expected = np.corrcoef(x) + # bias raises DeprecationWarning + with suppress_warnings() as sup: + warnings.simplefilter("always") + assert_warns(DeprecationWarning, corrcoef, x, y, True, False) + assert_warns(DeprecationWarning, corrcoef, x, y, True, True) + assert_warns(DeprecationWarning, corrcoef, x, bias=False) + sup.filter(DeprecationWarning, "bias and ddof have no effect") + # bias has no or negligible effect on the function + assert_almost_equal(corrcoef(x, bias=1), expected) + + def test_1d_without_missing(self): + # Test cov on 1D variable w/o missing values + x = self.data + assert_almost_equal(np.corrcoef(x), corrcoef(x)) + assert_almost_equal(np.corrcoef(x, rowvar=False), + corrcoef(x, rowvar=False)) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), + corrcoef(x, rowvar=False, bias=True)) + + def test_2d_without_missing(self): + # Test corrcoef on 1 2D variable w/o missing values + x = self.data.reshape(3, 4) + assert_almost_equal(np.corrcoef(x), corrcoef(x)) + assert_almost_equal(np.corrcoef(x, rowvar=False), + corrcoef(x, rowvar=False)) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True), + corrcoef(x, rowvar=False, bias=True)) + + def test_1d_with_missing(self): + # Test corrcoef 1 1D variable w/missing values + x = self.data + x[-1] = masked + x -= x.mean() + nx = x.compressed() + assert_almost_equal(np.corrcoef(nx), corrcoef(x)) + assert_almost_equal(np.corrcoef(nx, rowvar=False), + corrcoef(x, rowvar=False)) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + assert_almost_equal(np.corrcoef(nx, rowvar=False, bias=True), + corrcoef(x, rowvar=False, bias=True)) + try: + corrcoef(x, allow_masked=False) + except ValueError: + pass + # 2 1D variables w/ missing values + nx = x[1:-1] + assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1])) + assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False), + corrcoef(x, x[::-1], rowvar=False)) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + # ddof and bias have no or negligible effect on the function + assert_almost_equal(np.corrcoef(nx, nx[::-1]), + corrcoef(x, x[::-1], bias=1)) + assert_almost_equal(np.corrcoef(nx, nx[::-1]), + corrcoef(x, x[::-1], ddof=2)) + + def test_2d_with_missing(self): + # Test corrcoef on 2D variable w/ missing value + x = self.data + x[-1] = masked + x = x.reshape(3, 4) + + test = corrcoef(x) + control = np.corrcoef(x) + assert_almost_equal(test[:-1, :-1], control[:-1, :-1]) + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + # ddof and bias have no or negligible effect on the function + assert_almost_equal(corrcoef(x, ddof=-2)[:-1, :-1], + control[:-1, :-1]) + assert_almost_equal(corrcoef(x, ddof=3)[:-1, :-1], + control[:-1, :-1]) + assert_almost_equal(corrcoef(x, bias=1)[:-1, :-1], + control[:-1, :-1]) + + +class TestPolynomial: + # + def test_polyfit(self): + # Tests polyfit + # On ndarrays + x = np.random.rand(10) + y = np.random.rand(20).reshape(-1, 2) + assert_almost_equal(polyfit(x, y, 3), np.polyfit(x, y, 3)) + # ON 1D maskedarrays + x = x.view(MaskedArray) + x[0] = masked + y = y.view(MaskedArray) + y[0, 0] = y[-1, -1] = masked + # + (C, R, K, S, D) = polyfit(x, y[:, 0], 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:], y[1:, 0].compressed(), 3, + full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + # + (C, R, K, S, D) = polyfit(x, y[:, -1], 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, -1], 3, full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + # + (C, R, K, S, D) = polyfit(x, y, 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, :], 3, full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + # + w = np.random.rand(10) + 1 + wo = w.copy() + xs = x[1:-1] + ys = y[1:-1] + ws = w[1:-1] + (C, R, K, S, D) = polyfit(x, y, 3, full=True, w=w) + (c, r, k, s, d) = np.polyfit(xs, ys, 3, full=True, w=ws) + assert_equal(w, wo) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + + def test_polyfit_with_masked_NaNs(self): + x = np.random.rand(10) + y = np.random.rand(20).reshape(-1, 2) + + x[0] = np.nan + y[-1, -1] = np.nan + x = x.view(MaskedArray) + y = y.view(MaskedArray) + x[0] = masked + y[-1, -1] = masked + + (C, R, K, S, D) = polyfit(x, y, 3, full=True) + (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, :], 3, full=True) + for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)): + assert_almost_equal(a, a_) + + +class TestArraySetOps: + + def test_unique_onlist(self): + # Test unique on list + data = [1, 1, 1, 2, 2, 3] + test = unique(data, return_index=True, return_inverse=True) + assert_(isinstance(test[0], MaskedArray)) + assert_equal(test[0], masked_array([1, 2, 3], mask=[0, 0, 0])) + assert_equal(test[1], [0, 3, 5]) + assert_equal(test[2], [0, 0, 0, 1, 1, 2]) + + def test_unique_onmaskedarray(self): + # Test unique on masked data w/use_mask=True + data = masked_array([1, 1, 1, 2, 2, 3], mask=[0, 0, 1, 0, 1, 0]) + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) + assert_equal(test[1], [0, 3, 5, 2]) + assert_equal(test[2], [0, 0, 3, 1, 3, 2]) + # + data.fill_value = 3 + data = masked_array(data=[1, 1, 1, 2, 2, 3], + mask=[0, 0, 1, 0, 1, 0], fill_value=3) + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1])) + assert_equal(test[1], [0, 3, 5, 2]) + assert_equal(test[2], [0, 0, 3, 1, 3, 2]) + + def test_unique_allmasked(self): + # Test all masked + data = masked_array([1, 1, 1], mask=True) + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array([1, ], mask=[True])) + assert_equal(test[1], [0]) + assert_equal(test[2], [0, 0, 0]) + # + # Test masked + data = masked + test = unique(data, return_index=True, return_inverse=True) + assert_equal(test[0], masked_array(masked)) + assert_equal(test[1], [0]) + assert_equal(test[2], [0]) + + def test_ediff1d(self): + # Tests mediff1d + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + control = array([1, 1, 1, 4], mask=[1, 0, 0, 1]) + test = ediff1d(x) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_ediff1d_tobegin(self): + # Test ediff1d w/ to_begin + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + test = ediff1d(x, to_begin=masked) + control = array([0, 1, 1, 1, 4], mask=[1, 1, 0, 0, 1]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_begin=[1, 2, 3]) + control = array([1, 2, 3, 1, 1, 1, 4], mask=[0, 0, 0, 1, 0, 0, 1]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_ediff1d_toend(self): + # Test ediff1d w/ to_end + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + test = ediff1d(x, to_end=masked) + control = array([1, 1, 1, 4, 0], mask=[1, 0, 0, 1, 1]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=[1, 2, 3]) + control = array([1, 1, 1, 4, 1, 2, 3], mask=[1, 0, 0, 1, 0, 0, 0]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_ediff1d_tobegin_toend(self): + # Test ediff1d w/ to_begin and to_end + x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1]) + test = ediff1d(x, to_end=masked, to_begin=masked) + control = array([0, 1, 1, 1, 4, 0], mask=[1, 1, 0, 0, 1, 1]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=[1, 2, 3], to_begin=masked) + control = array([0, 1, 1, 1, 4, 1, 2, 3], + mask=[1, 1, 0, 0, 1, 0, 0, 0]) + assert_equal(test, control) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_ediff1d_ndarray(self): + # Test ediff1d w/ a ndarray + x = np.arange(5) + test = ediff1d(x) + control = array([1, 1, 1, 1], mask=[0, 0, 0, 0]) + assert_equal(test, control) + assert_(isinstance(test, MaskedArray)) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + # + test = ediff1d(x, to_end=masked, to_begin=masked) + control = array([0, 1, 1, 1, 1, 0], mask=[1, 0, 0, 0, 0, 1]) + assert_(isinstance(test, MaskedArray)) + assert_equal(test.filled(0), control.filled(0)) + assert_equal(test.mask, control.mask) + + def test_intersect1d(self): + # Test intersect1d + x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) + y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) + test = intersect1d(x, y) + control = array([1, 3, -1], mask=[0, 0, 1]) + assert_equal(test, control) + + def test_setxor1d(self): + # Test setxor1d + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + test = setxor1d(a, b) + assert_equal(test, array([3, 4, 7])) + # + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = [1, 2, 3, 4, 5] + test = setxor1d(a, b) + assert_equal(test, array([3, 4, 7, -1], mask=[0, 0, 0, 1])) + # + a = array([1, 2, 3]) + b = array([6, 5, 4]) + test = setxor1d(a, b) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + # + a = array([1, 8, 2, 3], mask=[0, 1, 0, 0]) + b = array([6, 5, 4, 8], mask=[0, 0, 0, 1]) + test = setxor1d(a, b) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + # + assert_array_equal([], setxor1d([], [])) + + def test_setxor1d_unique(self): + # Test setxor1d with assume_unique=True + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = [1, 2, 3, 4, 5] + test = setxor1d(a, b, assume_unique=True) + assert_equal(test, array([3, 4, 7, -1], mask=[0, 0, 0, 1])) + # + a = array([1, 8, 2, 3], mask=[0, 1, 0, 0]) + b = array([6, 5, 4, 8], mask=[0, 0, 0, 1]) + test = setxor1d(a, b, assume_unique=True) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + # + a = array([[1], [8], [2], [3]]) + b = array([[6, 5], [4, 8]]) + test = setxor1d(a, b, assume_unique=True) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + + def test_isin(self): + # the tests for in1d cover most of isin's behavior + # if in1d is removed, would need to change those tests to test + # isin instead. + a = np.arange(24).reshape([2, 3, 4]) + mask = np.zeros([2, 3, 4]) + mask[1, 2, 0] = 1 + a = array(a, mask=mask) + b = array(data=[0, 10, 20, 30, 1, 3, 11, 22, 33], + mask=[0, 1, 0, 1, 0, 1, 0, 1, 0]) + ec = zeros((2, 3, 4), dtype=bool) + ec[0, 0, 0] = True + ec[0, 0, 1] = True + ec[0, 2, 3] = True + c = isin(a, b) + assert_(isinstance(c, MaskedArray)) + assert_array_equal(c, ec) + # compare results of np.isin to ma.isin + d = np.isin(a, b[~b.mask]) & ~a.mask + assert_array_equal(c, d) + + def test_in1d(self): + # Test in1d + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + test = in1d(a, b) + assert_equal(test, [True, True, True, False, True]) + # + a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 5, -1], mask=[0, 0, 1]) + test = in1d(a, b) + assert_equal(test, [True, True, False, True, True]) + # + assert_array_equal([], in1d([], [])) + + def test_in1d_invert(self): + # Test in1d's invert parameter + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) + + a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1]) + b = array([1, 5, -1], mask=[0, 0, 1]) + assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) + + assert_array_equal([], in1d([], [], invert=True)) + + def test_union1d(self): + # Test union1d + a = array([1, 2, 5, 7, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) + test = union1d(a, b) + control = array([1, 2, 3, 4, 5, 7, -1], mask=[0, 0, 0, 0, 0, 0, 1]) + assert_equal(test, control) + + # Tests gh-10340, arguments to union1d should be + # flattened if they are not already 1D + x = array([[0, 1, 2], [3, 4, 5]], mask=[[0, 0, 0], [0, 0, 1]]) + y = array([0, 1, 2, 3, 4], mask=[0, 0, 0, 0, 1]) + ez = array([0, 1, 2, 3, 4, 5], mask=[0, 0, 0, 0, 0, 1]) + z = union1d(x, y) + assert_equal(z, ez) + # + assert_array_equal([], union1d([], [])) + + def test_setdiff1d(self): + # Test setdiff1d + a = array([6, 5, 4, 7, 7, 1, 2, 1], mask=[0, 0, 0, 0, 0, 0, 0, 1]) + b = array([2, 4, 3, 3, 2, 1, 5]) + test = setdiff1d(a, b) + assert_equal(test, array([6, 7, -1], mask=[0, 0, 1])) + # + a = arange(10) + b = arange(8) + assert_equal(setdiff1d(a, b), array([8, 9])) + a = array([], np.uint32, mask=[]) + assert_equal(setdiff1d(a, []).dtype, np.uint32) + + def test_setdiff1d_char_array(self): + # Test setdiff1d_charray + a = np.array(['a', 'b', 'c']) + b = np.array(['a', 'b', 's']) + assert_array_equal(setdiff1d(a, b), np.array(['c'])) + + +class TestShapeBase: + + def test_atleast_2d(self): + # Test atleast_2d + a = masked_array([0, 1, 2], mask=[0, 1, 0]) + b = atleast_2d(a) + assert_equal(b.shape, (1, 3)) + assert_equal(b.mask.shape, b.data.shape) + assert_equal(a.shape, (3,)) + assert_equal(a.mask.shape, a.data.shape) + assert_equal(b.mask.shape, b.data.shape) + + def test_shape_scalar(self): + # the atleast and diagflat function should work with scalars + # GitHub issue #3367 + # Additionally, the atleast functions should accept multiple scalars + # correctly + b = atleast_1d(1.0) + assert_equal(b.shape, (1,)) + assert_equal(b.mask.shape, b.shape) + assert_equal(b.data.shape, b.shape) + + b = atleast_1d(1.0, 2.0) + for a in b: + assert_equal(a.shape, (1,)) + assert_equal(a.mask.shape, a.shape) + assert_equal(a.data.shape, a.shape) + + b = atleast_2d(1.0) + assert_equal(b.shape, (1, 1)) + assert_equal(b.mask.shape, b.shape) + assert_equal(b.data.shape, b.shape) + + b = atleast_2d(1.0, 2.0) + for a in b: + assert_equal(a.shape, (1, 1)) + assert_equal(a.mask.shape, a.shape) + assert_equal(a.data.shape, a.shape) + + b = atleast_3d(1.0) + assert_equal(b.shape, (1, 1, 1)) + assert_equal(b.mask.shape, b.shape) + assert_equal(b.data.shape, b.shape) + + b = atleast_3d(1.0, 2.0) + for a in b: + assert_equal(a.shape, (1, 1, 1)) + assert_equal(a.mask.shape, a.shape) + assert_equal(a.data.shape, a.shape) + + b = diagflat(1.0) + assert_equal(b.shape, (1, 1)) + assert_equal(b.mask.shape, b.data.shape) + + +class TestNDEnumerate: + + def test_ndenumerate_nomasked(self): + ordinary = np.arange(6.).reshape((1, 3, 2)) + empty_mask = np.zeros_like(ordinary, dtype=bool) + with_mask = masked_array(ordinary, mask=empty_mask) + assert_equal(list(np.ndenumerate(ordinary)), + list(ndenumerate(ordinary))) + assert_equal(list(ndenumerate(ordinary)), + list(ndenumerate(with_mask))) + assert_equal(list(ndenumerate(with_mask)), + list(ndenumerate(with_mask, compressed=False))) + + def test_ndenumerate_allmasked(self): + a = masked_all(()) + b = masked_all((100,)) + c = masked_all((2, 3, 4)) + assert_equal(list(ndenumerate(a)), []) + assert_equal(list(ndenumerate(b)), []) + assert_equal(list(ndenumerate(b, compressed=False)), + list(zip(np.ndindex((100,)), 100 * [masked]))) + assert_equal(list(ndenumerate(c)), []) + assert_equal(list(ndenumerate(c, compressed=False)), + list(zip(np.ndindex((2, 3, 4)), 2 * 3 * 4 * [masked]))) + + def test_ndenumerate_mixedmasked(self): + a = masked_array(np.arange(12).reshape((3, 4)), + mask=[[1, 1, 1, 1], + [1, 1, 0, 1], + [0, 0, 0, 0]]) + items = [((1, 2), 6), + ((2, 0), 8), ((2, 1), 9), ((2, 2), 10), ((2, 3), 11)] + assert_equal(list(ndenumerate(a)), items) + assert_equal(len(list(ndenumerate(a, compressed=False))), a.size) + for coordinate, value in ndenumerate(a, compressed=False): + assert_equal(a[coordinate], value) + + +class TestStack: + + def test_stack_1d(self): + a = masked_array([0, 1, 2], mask=[0, 1, 0]) + b = masked_array([9, 8, 7], mask=[1, 0, 0]) + + c = stack([a, b], axis=0) + assert_equal(c.shape, (2, 3)) + assert_array_equal(a.mask, c[0].mask) + assert_array_equal(b.mask, c[1].mask) + + d = vstack([a, b]) + assert_array_equal(c.data, d.data) + assert_array_equal(c.mask, d.mask) + + c = stack([a, b], axis=1) + assert_equal(c.shape, (3, 2)) + assert_array_equal(a.mask, c[:, 0].mask) + assert_array_equal(b.mask, c[:, 1].mask) + + def test_stack_masks(self): + a = masked_array([0, 1, 2], mask=True) + b = masked_array([9, 8, 7], mask=False) + + c = stack([a, b], axis=0) + assert_equal(c.shape, (2, 3)) + assert_array_equal(a.mask, c[0].mask) + assert_array_equal(b.mask, c[1].mask) + + d = vstack([a, b]) + assert_array_equal(c.data, d.data) + assert_array_equal(c.mask, d.mask) + + c = stack([a, b], axis=1) + assert_equal(c.shape, (3, 2)) + assert_array_equal(a.mask, c[:, 0].mask) + assert_array_equal(b.mask, c[:, 1].mask) + + def test_stack_nd(self): + # 2D + shp = (3, 2) + d1 = np.random.randint(0, 10, shp) + d2 = np.random.randint(0, 10, shp) + m1 = np.random.randint(0, 2, shp).astype(bool) + m2 = np.random.randint(0, 2, shp).astype(bool) + a1 = masked_array(d1, mask=m1) + a2 = masked_array(d2, mask=m2) + + c = stack([a1, a2], axis=0) + c_shp = (2,) + shp + assert_equal(c.shape, c_shp) + assert_array_equal(a1.mask, c[0].mask) + assert_array_equal(a2.mask, c[1].mask) + + c = stack([a1, a2], axis=-1) + c_shp = shp + (2,) + assert_equal(c.shape, c_shp) + assert_array_equal(a1.mask, c[..., 0].mask) + assert_array_equal(a2.mask, c[..., 1].mask) + + # 4D + shp = (3, 2, 4, 5,) + d1 = np.random.randint(0, 10, shp) + d2 = np.random.randint(0, 10, shp) + m1 = np.random.randint(0, 2, shp).astype(bool) + m2 = np.random.randint(0, 2, shp).astype(bool) + a1 = masked_array(d1, mask=m1) + a2 = masked_array(d2, mask=m2) + + c = stack([a1, a2], axis=0) + c_shp = (2,) + shp + assert_equal(c.shape, c_shp) + assert_array_equal(a1.mask, c[0].mask) + assert_array_equal(a2.mask, c[1].mask) + + c = stack([a1, a2], axis=-1) + c_shp = shp + (2,) + assert_equal(c.shape, c_shp) + assert_array_equal(a1.mask, c[..., 0].mask) + assert_array_equal(a2.mask, c[..., 1].mask) diff --git a/python/numpy/ma/tests/test_mrecords.py b/python/numpy/ma/tests/test_mrecords.py new file mode 100644 index 000000000..0da915101 --- /dev/null +++ b/python/numpy/ma/tests/test_mrecords.py @@ -0,0 +1,497 @@ +"""Tests suite for mrecords. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu + +""" +import pickle + +import numpy as np +import numpy.ma as ma +from numpy._core.records import fromarrays as recfromarrays +from numpy._core.records import fromrecords as recfromrecords +from numpy._core.records import recarray +from numpy.ma import masked, nomask +from numpy.ma.mrecords import ( + MaskedRecords, + addfield, + fromarrays, + fromrecords, + fromtextfile, + mrecarray, +) +from numpy.ma.testutils import ( + assert_, + assert_equal, + assert_equal_records, +) +from numpy.testing import temppath + + +class TestMRecords: + + ilist = [1, 2, 3, 4, 5] + flist = [1.1, 2.2, 3.3, 4.4, 5.5] + slist = [b'one', b'two', b'three', b'four', b'five'] + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mask = [0, 1, 0, 0, 1] + base = ma.array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype) + + def test_byview(self): + # Test creation by view + base = self.base + mbase = base.view(mrecarray) + assert_equal(mbase.recordmask, base.recordmask) + assert_equal_records(mbase._mask, base._mask) + assert_(isinstance(mbase._data, recarray)) + assert_equal_records(mbase._data, base._data.view(recarray)) + for field in ('a', 'b', 'c'): + assert_equal(base[field], mbase[field]) + assert_equal_records(mbase.view(mrecarray), mbase) + + def test_get(self): + # Tests fields retrieval + base = self.base.copy() + mbase = base.view(mrecarray) + # As fields.......... + for field in ('a', 'b', 'c'): + assert_equal(getattr(mbase, field), mbase[field]) + assert_equal(base[field], mbase[field]) + # as elements ....... + mbase_first = mbase[0] + assert_(isinstance(mbase_first, mrecarray)) + assert_equal(mbase_first.dtype, mbase.dtype) + assert_equal(mbase_first.tolist(), (1, 1.1, b'one')) + # Used to be mask, now it's recordmask + assert_equal(mbase_first.recordmask, nomask) + assert_equal(mbase_first._mask.item(), (False, False, False)) + assert_equal(mbase_first['a'], mbase['a'][0]) + mbase_last = mbase[-1] + assert_(isinstance(mbase_last, mrecarray)) + assert_equal(mbase_last.dtype, mbase.dtype) + assert_equal(mbase_last.tolist(), (None, None, None)) + # Used to be mask, now it's recordmask + assert_equal(mbase_last.recordmask, True) + assert_equal(mbase_last._mask.item(), (True, True, True)) + assert_equal(mbase_last['a'], mbase['a'][-1]) + assert_(mbase_last['a'] is masked) + # as slice .......... + mbase_sl = mbase[:2] + assert_(isinstance(mbase_sl, mrecarray)) + assert_equal(mbase_sl.dtype, mbase.dtype) + # Used to be mask, now it's recordmask + assert_equal(mbase_sl.recordmask, [0, 1]) + assert_equal_records(mbase_sl.mask, + np.array([(False, False, False), + (True, True, True)], + dtype=mbase._mask.dtype)) + assert_equal_records(mbase_sl, base[:2].view(mrecarray)) + for field in ('a', 'b', 'c'): + assert_equal(getattr(mbase_sl, field), base[:2][field]) + + def test_set_fields(self): + # Tests setting fields. + base = self.base.copy() + mbase = base.view(mrecarray) + mbase = mbase.copy() + mbase.fill_value = (999999, 1e20, 'N/A') + # Change the data, the mask should be conserved + mbase.a._data[:] = 5 + assert_equal(mbase['a']._data, [5, 5, 5, 5, 5]) + assert_equal(mbase['a']._mask, [0, 1, 0, 0, 1]) + # Change the elements, and the mask will follow + mbase.a = 1 + assert_equal(mbase['a']._data, [1] * 5) + assert_equal(ma.getmaskarray(mbase['a']), [0] * 5) + # Use to be _mask, now it's recordmask + assert_equal(mbase.recordmask, [False] * 5) + assert_equal(mbase._mask.tolist(), + np.array([(0, 0, 0), + (0, 1, 1), + (0, 0, 0), + (0, 0, 0), + (0, 1, 1)], + dtype=bool)) + # Set a field to mask ........................ + mbase.c = masked + # Use to be mask, and now it's still mask ! + assert_equal(mbase.c.mask, [1] * 5) + assert_equal(mbase.c.recordmask, [1] * 5) + assert_equal(ma.getmaskarray(mbase['c']), [1] * 5) + assert_equal(ma.getdata(mbase['c']), [b'N/A'] * 5) + assert_equal(mbase._mask.tolist(), + np.array([(0, 0, 1), + (0, 1, 1), + (0, 0, 1), + (0, 0, 1), + (0, 1, 1)], + dtype=bool)) + # Set fields by slices ....................... + mbase = base.view(mrecarray).copy() + mbase.a[3:] = 5 + assert_equal(mbase.a, [1, 2, 3, 5, 5]) + assert_equal(mbase.a._mask, [0, 1, 0, 0, 0]) + mbase.b[3:] = masked + assert_equal(mbase.b, base['b']) + assert_equal(mbase.b._mask, [0, 1, 0, 1, 1]) + # Set fields globally.......................... + ndtype = [('alpha', '|S1'), ('num', int)] + data = ma.array([('a', 1), ('b', 2), ('c', 3)], dtype=ndtype) + rdata = data.view(MaskedRecords) + val = ma.array([10, 20, 30], mask=[1, 0, 0]) + + rdata['num'] = val + assert_equal(rdata.num, val) + assert_equal(rdata.num.mask, [1, 0, 0]) + + def test_set_fields_mask(self): + # Tests setting the mask of a field. + base = self.base.copy() + # This one has already a mask.... + mbase = base.view(mrecarray) + mbase['a'][-2] = masked + assert_equal(mbase.a, [1, 2, 3, 4, 5]) + assert_equal(mbase.a._mask, [0, 1, 0, 1, 1]) + # This one has not yet + mbase = fromarrays([np.arange(5), np.random.rand(5)], + dtype=[('a', int), ('b', float)]) + mbase['a'][-2] = masked + assert_equal(mbase.a, [0, 1, 2, 3, 4]) + assert_equal(mbase.a._mask, [0, 0, 0, 1, 0]) + + def test_set_mask(self): + base = self.base.copy() + mbase = base.view(mrecarray) + # Set the mask to True ....................... + mbase.mask = masked + assert_equal(ma.getmaskarray(mbase['b']), [1] * 5) + assert_equal(mbase['a']._mask, mbase['b']._mask) + assert_equal(mbase['a']._mask, mbase['c']._mask) + assert_equal(mbase._mask.tolist(), + np.array([(1, 1, 1)] * 5, dtype=bool)) + # Delete the mask ............................ + mbase.mask = nomask + assert_equal(ma.getmaskarray(mbase['c']), [0] * 5) + assert_equal(mbase._mask.tolist(), + np.array([(0, 0, 0)] * 5, dtype=bool)) + + def test_set_mask_fromarray(self): + base = self.base.copy() + mbase = base.view(mrecarray) + # Sets the mask w/ an array + mbase.mask = [1, 0, 0, 0, 1] + assert_equal(mbase.a.mask, [1, 0, 0, 0, 1]) + assert_equal(mbase.b.mask, [1, 0, 0, 0, 1]) + assert_equal(mbase.c.mask, [1, 0, 0, 0, 1]) + # Yay, once more ! + mbase.mask = [0, 0, 0, 0, 1] + assert_equal(mbase.a.mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.b.mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.c.mask, [0, 0, 0, 0, 1]) + + def test_set_mask_fromfields(self): + mbase = self.base.copy().view(mrecarray) + + nmask = np.array( + [(0, 1, 0), (0, 1, 0), (1, 0, 1), (1, 0, 1), (0, 0, 0)], + dtype=[('a', bool), ('b', bool), ('c', bool)]) + mbase.mask = nmask + assert_equal(mbase.a.mask, [0, 0, 1, 1, 0]) + assert_equal(mbase.b.mask, [1, 1, 0, 0, 0]) + assert_equal(mbase.c.mask, [0, 0, 1, 1, 0]) + # Reinitialize and redo + mbase.mask = False + mbase.fieldmask = nmask + assert_equal(mbase.a.mask, [0, 0, 1, 1, 0]) + assert_equal(mbase.b.mask, [1, 1, 0, 0, 0]) + assert_equal(mbase.c.mask, [0, 0, 1, 1, 0]) + + def test_set_elements(self): + base = self.base.copy() + # Set an element to mask ..................... + mbase = base.view(mrecarray).copy() + mbase[-2] = masked + assert_equal( + mbase._mask.tolist(), + np.array([(0, 0, 0), (1, 1, 1), (0, 0, 0), (1, 1, 1), (1, 1, 1)], + dtype=bool)) + # Used to be mask, now it's recordmask! + assert_equal(mbase.recordmask, [0, 1, 0, 1, 1]) + # Set slices ................................. + mbase = base.view(mrecarray).copy() + mbase[:2] = (5, 5, 5) + assert_equal(mbase.a._data, [5, 5, 3, 4, 5]) + assert_equal(mbase.a._mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.b._data, [5., 5., 3.3, 4.4, 5.5]) + assert_equal(mbase.b._mask, [0, 0, 0, 0, 1]) + assert_equal(mbase.c._data, + [b'5', b'5', b'three', b'four', b'five']) + assert_equal(mbase.b._mask, [0, 0, 0, 0, 1]) + + mbase = base.view(mrecarray).copy() + mbase[:2] = masked + assert_equal(mbase.a._data, [1, 2, 3, 4, 5]) + assert_equal(mbase.a._mask, [1, 1, 0, 0, 1]) + assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 4.4, 5.5]) + assert_equal(mbase.b._mask, [1, 1, 0, 0, 1]) + assert_equal(mbase.c._data, + [b'one', b'two', b'three', b'four', b'five']) + assert_equal(mbase.b._mask, [1, 1, 0, 0, 1]) + + def test_setslices_hardmask(self): + # Tests setting slices w/ hardmask. + base = self.base.copy() + mbase = base.view(mrecarray) + mbase.harden_mask() + try: + mbase[-2:] = (5, 5, 5) + assert_equal(mbase.a._data, [1, 2, 3, 5, 5]) + assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 5, 5.5]) + assert_equal(mbase.c._data, + [b'one', b'two', b'three', b'5', b'five']) + assert_equal(mbase.a._mask, [0, 1, 0, 0, 1]) + assert_equal(mbase.b._mask, mbase.a._mask) + assert_equal(mbase.b._mask, mbase.c._mask) + except NotImplementedError: + # OK, not implemented yet... + pass + except AssertionError: + raise + else: + raise Exception("Flexible hard masks should be supported !") + # Not using a tuple should crash + try: + mbase[-2:] = 3 + except (NotImplementedError, TypeError): + pass + else: + raise TypeError("Should have expected a readable buffer object!") + + def test_hardmask(self): + # Test hardmask + base = self.base.copy() + mbase = base.view(mrecarray) + mbase.harden_mask() + assert_(mbase._hardmask) + mbase.mask = nomask + assert_equal_records(mbase._mask, base._mask) + mbase.soften_mask() + assert_(not mbase._hardmask) + mbase.mask = nomask + # So, the mask of a field is no longer set to nomask... + assert_equal_records(mbase._mask, + ma.make_mask_none(base.shape, base.dtype)) + assert_(ma.make_mask(mbase['b']._mask) is nomask) + assert_equal(mbase['a']._mask, mbase['b']._mask) + + def test_pickling(self): + # Test pickling + base = self.base.copy() + mrec = base.view(mrecarray) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + _ = pickle.dumps(mrec, protocol=proto) + mrec_ = pickle.loads(_) + assert_equal(mrec_.dtype, mrec.dtype) + assert_equal_records(mrec_._data, mrec._data) + assert_equal(mrec_._mask, mrec._mask) + assert_equal_records(mrec_._mask, mrec._mask) + + def test_filled(self): + # Test filling the array + _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) + _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) + _c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8') + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mrec = fromarrays([_a, _b, _c], dtype=ddtype, + fill_value=(99999, 99999., 'N/A')) + mrecfilled = mrec.filled() + assert_equal(mrecfilled['a'], np.array((1, 2, 99999), dtype=int)) + assert_equal(mrecfilled['b'], np.array((1.1, 2.2, 99999.), + dtype=float)) + assert_equal(mrecfilled['c'], np.array(('one', 'two', 'N/A'), + dtype='|S8')) + + def test_tolist(self): + # Test tolist. + _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int) + _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float) + _c = ma.array(['one', 'two', 'three'], mask=[1, 0, 0], dtype='|S8') + ddtype = [('a', int), ('b', float), ('c', '|S8')] + mrec = fromarrays([_a, _b, _c], dtype=ddtype, + fill_value=(99999, 99999., 'N/A')) + + assert_equal(mrec.tolist(), + [(1, 1.1, None), (2, 2.2, b'two'), + (None, None, b'three')]) + + def test_withnames(self): + # Test the creation w/ format and names + x = mrecarray(1, formats=float, names='base') + x[0]['base'] = 10 + assert_equal(x['base'][0], 10) + + def test_exotic_formats(self): + # Test that 'exotic' formats are processed properly + easy = mrecarray(1, dtype=[('i', int), ('s', '|S8'), ('f', float)]) + easy[0] = masked + assert_equal(easy.filled(1).item(), (1, b'1', 1.)) + + solo = mrecarray(1, dtype=[('f0', ' 1: + assert_(eq(np.concatenate((x, y), 1), + concatenate((xm, ym), 1))) + assert_(eq(np.add.reduce(x, 1), add.reduce(x, 1))) + assert_(eq(np.sum(x, 1), sum(x, 1))) + assert_(eq(np.prod(x, 1), product(x, 1))) + + def test_testCI(self): + # Test of conversions and indexing + x1 = np.array([1, 2, 4, 3]) + x2 = array(x1, mask=[1, 0, 0, 0]) + x3 = array(x1, mask=[0, 1, 0, 1]) + x4 = array(x1) + # test conversion to strings + str(x2) # raises? + repr(x2) # raises? + assert_(eq(np.sort(x1), sort(x2, fill_value=0))) + # tests of indexing + assert_(type(x2[1]) is type(x1[1])) + assert_(x1[1] == x2[1]) + assert_(x2[0] is masked) + assert_(eq(x1[2], x2[2])) + assert_(eq(x1[2:5], x2[2:5])) + assert_(eq(x1[:], x2[:])) + assert_(eq(x1[1:], x3[1:])) + x1[2] = 9 + x2[2] = 9 + assert_(eq(x1, x2)) + x1[1:3] = 99 + x2[1:3] = 99 + assert_(eq(x1, x2)) + x2[1] = masked + assert_(eq(x1, x2)) + x2[1:3] = masked + assert_(eq(x1, x2)) + x2[:] = x1 + x2[1] = masked + assert_(allequal(getmask(x2), array([0, 1, 0, 0]))) + x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x3), array([0, 1, 1, 0]))) + x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0]) + assert_(allequal(getmask(x4), array([0, 1, 1, 0]))) + assert_(allequal(x4, array([1, 2, 3, 4]))) + x1 = np.arange(5) * 1.0 + x2 = masked_values(x1, 3.0) + assert_(eq(x1, x2)) + assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask)) + assert_(eq(3.0, x2.fill_value)) + x1 = array([1, 'hello', 2, 3], object) + x2 = np.array([1, 'hello', 2, 3], object) + s1 = x1[1] + s2 = x2[1] + assert_equal(type(s2), str) + assert_equal(type(s1), str) + assert_equal(s1, s2) + assert_(x1[1:1].shape == (0,)) + + def test_testCopySize(self): + # Tests of some subtle points of copying and sizing. + n = [0, 0, 1, 0, 0] + m = make_mask(n) + m2 = make_mask(m) + assert_(m is m2) + m3 = make_mask(m, copy=True) + assert_(m is not m3) + + x1 = np.arange(5) + y1 = array(x1, mask=m) + assert_(y1._data is not x1) + assert_(allequal(x1, y1._data)) + assert_(y1._mask is m) + + y1a = array(y1, copy=0) + # For copy=False, one might expect that the array would just + # passed on, i.e., that it would be "is" instead of "==". + # See gh-4043 for discussion. + assert_(y1a._mask.__array_interface__ == + y1._mask.__array_interface__) + + y2 = array(x1, mask=m3, copy=0) + assert_(y2._mask is m3) + assert_(y2[2] is masked) + y2[2] = 9 + assert_(y2[2] is not masked) + assert_(y2._mask is m3) + assert_(allequal(y2.mask, 0)) + + y2a = array(x1, mask=m, copy=1) + assert_(y2a._mask is not m) + assert_(y2a[2] is masked) + y2a[2] = 9 + assert_(y2a[2] is not masked) + assert_(y2a._mask is not m) + assert_(allequal(y2a.mask, 0)) + + y3 = array(x1 * 1.0, mask=m) + assert_(filled(y3).dtype is (x1 * 1.0).dtype) + + x4 = arange(4) + x4[2] = masked + y4 = resize(x4, (8,)) + assert_(eq(concatenate([x4, x4]), y4)) + assert_(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])) + y5 = repeat(x4, (2, 2, 2, 2), axis=0) + assert_(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3])) + y6 = repeat(x4, 2, axis=0) + assert_(eq(y5, y6)) + + def test_testPut(self): + # Test of put + d = arange(5) + n = [0, 0, 0, 1, 1] + m = make_mask(n) + m2 = m.copy() + x = array(d, mask=m) + assert_(x[3] is masked) + assert_(x[4] is masked) + x[[1, 4]] = [10, 40] + assert_(x._mask is m) + assert_(x[3] is masked) + assert_(x[4] is not masked) + assert_(eq(x, [0, 10, 2, -1, 40])) + + x = array(d, mask=m2, copy=True) + x.put([0, 1, 2], [-1, 100, 200]) + assert_(x._mask is not m2) + assert_(x[3] is masked) + assert_(x[4] is masked) + assert_(eq(x, [-1, 100, 200, 0, 0])) + + def test_testPut2(self): + # Test of put + d = arange(5) + x = array(d, mask=[0, 0, 0, 0, 0]) + z = array([10, 40], mask=[1, 0]) + assert_(x[2] is not masked) + assert_(x[3] is not masked) + x[2:4] = z + assert_(x[2] is masked) + assert_(x[3] is not masked) + assert_(eq(x, [0, 1, 10, 40, 4])) + + d = arange(5) + x = array(d, mask=[0, 0, 0, 0, 0]) + y = x[2:4] + z = array([10, 40], mask=[1, 0]) + assert_(x[2] is not masked) + assert_(x[3] is not masked) + y[:] = z + assert_(y[0] is masked) + assert_(y[1] is not masked) + assert_(eq(y, [10, 40])) + assert_(x[2] is masked) + assert_(x[3] is not masked) + assert_(eq(x, [0, 1, 10, 40, 4])) + + def test_testMaPut(self): + (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d + m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1] + i = np.nonzero(m)[0] + put(ym, i, zm) + assert_(all(take(ym, i, axis=0) == zm)) + + def test_testOddFeatures(self): + # Test of other odd features + x = arange(20) + x = x.reshape(4, 5) + x.flat[5] = 12 + assert_(x[1, 0] == 12) + z = x + 10j * x + assert_(eq(z.real, x)) + assert_(eq(z.imag, 10 * x)) + assert_(eq((z * conjugate(z)).real, 101 * x * x)) + z.imag[...] = 0.0 + + x = arange(10) + x[3] = masked + assert_(str(x[3]) == str(masked)) + c = x >= 8 + assert_(count(where(c, masked, masked)) == 0) + assert_(shape(where(c, masked, masked)) == c.shape) + z = where(c, x, masked) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is masked) + assert_(z[7] is masked) + assert_(z[8] is not masked) + assert_(z[9] is not masked) + assert_(eq(x, z)) + z = where(c, masked, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + z = masked_where(c, x) + assert_(z.dtype is x.dtype) + assert_(z[3] is masked) + assert_(z[4] is not masked) + assert_(z[7] is not masked) + assert_(z[8] is masked) + assert_(z[9] is masked) + assert_(eq(x, z)) + x = array([1., 2., 3., 4., 5.]) + c = array([1, 1, 1, 0, 0]) + x[2] = masked + z = where(c, x, -x) + assert_(eq(z, [1., 2., 0., -4., -5])) + c[0] = masked + z = where(c, x, -x) + assert_(eq(z, [1., 2., 0., -4., -5])) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2))) + assert_(eq(masked_where(greater_equal(x, 2), x), + masked_greater_equal(x, 2))) + assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2))) + assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2))) + assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))) + assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2))) + assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))) + assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4])) + assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199])) + assert_(eq(masked_inside(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 1, 3).mask, + [1, 1, 1, 1, 0])) + assert_(eq(masked_outside(array(list(range(5)), + mask=[0, 1, 0, 0, 0]), 1, 3).mask, + [1, 1, 0, 0, 1])) + assert_(eq(masked_equal(array(list(range(5)), + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 0])) + assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1], + mask=[1, 0, 0, 0, 0]), 2).mask, + [1, 0, 1, 0, 1])) + assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), + [99, 99, 3, 4, 5])) + atest = ones((10, 10, 10), dtype=np.float32) + btest = zeros(atest.shape, MaskType) + ctest = masked_where(btest, atest) + assert_(eq(atest, ctest)) + z = choose(c, (-x, x)) + assert_(eq(z, [1., 2., 0., -4., -5])) + assert_(z[0] is masked) + assert_(z[1] is not masked) + assert_(z[2] is masked) + x = arange(6) + x[5] = masked + y = arange(6) * 10 + y[2] = masked + c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0]) + cm = c.filled(1) + z = where(c, x, y) + zm = where(cm, x, y) + assert_(eq(z, zm)) + assert_(getmask(zm) is nomask) + assert_(eq(zm, [0, 1, 2, 30, 40, 50])) + z = where(c, masked, 1) + assert_(eq(z, [99, 99, 99, 1, 1, 1])) + z = where(c, 1, masked) + assert_(eq(z, [99, 1, 1, 99, 99, 99])) + + def test_testMinMax2(self): + # Test of minimum, maximum. + assert_(eq(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3])) + assert_(eq(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9])) + x = arange(5) + y = arange(5) - 2 + x[3] = masked + y[0] = masked + assert_(eq(minimum(x, y), where(less(x, y), x, y))) + assert_(eq(maximum(x, y), where(greater(x, y), x, y))) + assert_(minimum.reduce(x) == 0) + assert_(maximum.reduce(x) == 4) + + def test_testTakeTransposeInnerOuter(self): + # Test of take, transpose, inner, outer products + x = arange(24) + y = np.arange(24) + x[5:6] = masked + x = x.reshape(2, 3, 4) + y = y.reshape(2, 3, 4) + assert_(eq(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))) + assert_(eq(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))) + assert_(eq(np.inner(filled(x, 0), filled(y, 0)), + inner(x, y))) + assert_(eq(np.outer(filled(x, 0), filled(y, 0)), + outer(x, y))) + y = array(['abc', 1, 'def', 2, 3], object) + y[2] = masked + t = take(y, [0, 3, 4]) + assert_(t[0] == 'abc') + assert_(t[1] == 2) + assert_(t[2] == 3) + + def test_testInplace(self): + # Test of inplace operations and rich comparisons + y = arange(10) + + x = arange(10) + xm = arange(10) + xm[2] = masked + x += 1 + assert_(eq(x, y + 1)) + xm += 1 + assert_(eq(x, y + 1)) + + x = arange(10) + xm = arange(10) + xm[2] = masked + x -= 1 + assert_(eq(x, y - 1)) + xm -= 1 + assert_(eq(xm, y - 1)) + + x = arange(10) * 1.0 + xm = arange(10) * 1.0 + xm[2] = masked + x *= 2.0 + assert_(eq(x, y * 2)) + xm *= 2.0 + assert_(eq(xm, y * 2)) + + x = arange(10) * 2 + xm = arange(10) + xm[2] = masked + x //= 2 + assert_(eq(x, y)) + xm //= 2 + assert_(eq(x, y)) + + x = arange(10) * 1.0 + xm = arange(10) * 1.0 + xm[2] = masked + x /= 2.0 + assert_(eq(x, y / 2.0)) + xm /= arange(10) + assert_(eq(xm, ones((10,)))) + + x = arange(10).astype(np.float32) + xm = arange(10) + xm[2] = masked + x += 1. + assert_(eq(x, y + 1.)) + + def test_testPickle(self): + # Test of pickling + x = arange(12) + x[4:10:2] = masked + x = x.reshape(4, 3) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + s = pickle.dumps(x, protocol=proto) + y = pickle.loads(s) + assert_(eq(x, y)) + + def test_testMasked(self): + # Test of masked element + xx = arange(6) + xx[1] = masked + assert_(str(masked) == '--') + assert_(xx[1] is masked) + assert_equal(filled(xx[1], 0), 0) + + def test_testAverage1(self): + # Test of average. + ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + assert_(eq(2.0, average(ott, axis=0))) + assert_(eq(2.0, average(ott, weights=[1., 1., 2., 1.]))) + result, wts = average(ott, weights=[1., 1., 2., 1.], returned=True) + assert_(eq(2.0, result)) + assert_(wts == 4.0) + ott[:] = masked + assert_(average(ott, axis=0) is masked) + ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0]) + ott = ott.reshape(2, 2) + ott[:, 1] = masked + assert_(eq(average(ott, axis=0), [2.0, 0.0])) + assert_(average(ott, axis=1)[0] is masked) + assert_(eq([2., 0.], average(ott, axis=0))) + result, wts = average(ott, axis=0, returned=True) + assert_(eq(wts, [1., 0.])) + + def test_testAverage2(self): + # More tests of average. + w1 = [0, 1, 1, 1, 1, 0] + w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]] + x = arange(6) + assert_(allclose(average(x, axis=0), 2.5)) + assert_(allclose(average(x, axis=0, weights=w1), 2.5)) + y = array([arange(6), 2.0 * arange(6)]) + assert_(allclose(average(y, None), + np.add.reduce(np.arange(6)) * 3. / 12.)) + assert_(allclose(average(y, axis=0), np.arange(6) * 3. / 2.)) + assert_(allclose(average(y, axis=1), + [average(x, axis=0), average(x, axis=0) * 2.0])) + assert_(allclose(average(y, None, weights=w2), 20. / 6.)) + assert_(allclose(average(y, axis=0, weights=w2), + [0., 1., 2., 3., 4., 10.])) + assert_(allclose(average(y, axis=1), + [average(x, axis=0), average(x, axis=0) * 2.0])) + m1 = zeros(6) + m2 = [0, 0, 1, 1, 0, 0] + m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]] + m4 = ones(6) + m5 = [0, 1, 1, 1, 1, 1] + assert_(allclose(average(masked_array(x, m1), axis=0), 2.5)) + assert_(allclose(average(masked_array(x, m2), axis=0), 2.5)) + assert_(average(masked_array(x, m4), axis=0) is masked) + assert_equal(average(masked_array(x, m5), axis=0), 0.0) + assert_equal(count(average(masked_array(x, m4), axis=0)), 0) + z = masked_array(y, m3) + assert_(allclose(average(z, None), 20. / 6.)) + assert_(allclose(average(z, axis=0), + [0., 1., 99., 99., 4.0, 7.5])) + assert_(allclose(average(z, axis=1), [2.5, 5.0])) + assert_(allclose(average(z, axis=0, weights=w2), + [0., 1., 99., 99., 4.0, 10.0])) + + a = arange(6) + b = arange(6) * 3 + r1, w1 = average([[a, b], [b, a]], axis=1, returned=True) + assert_equal(shape(r1), shape(w1)) + assert_equal(r1.shape, w1.shape) + r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=True) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), returned=True) + assert_equal(shape(w2), shape(r2)) + r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=True) + assert_(shape(w2) == shape(r2)) + a2d = array([[1, 2], [0, 4]], float) + a2dm = masked_array(a2d, [[0, 0], [1, 0]]) + a2da = average(a2d, axis=0) + assert_(eq(a2da, [0.5, 3.0])) + a2dma = average(a2dm, axis=0) + assert_(eq(a2dma, [1.0, 3.0])) + a2dma = average(a2dm, axis=None) + assert_(eq(a2dma, 7. / 3.)) + a2dma = average(a2dm, axis=1) + assert_(eq(a2dma, [1.5, 4.0])) + + def test_testToPython(self): + assert_equal(1, int(array(1))) + assert_equal(1.0, float(array(1))) + assert_equal(1, int(array([[[1]]]))) + assert_equal(1.0, float(array([[1]]))) + assert_raises(TypeError, float, array([1, 1])) + assert_raises(ValueError, bool, array([0, 1])) + assert_raises(ValueError, bool, array([0, 0], mask=[0, 1])) + + def test_testScalarArithmetic(self): + xm = array(0, mask=1) + # TODO FIXME: Find out what the following raises a warning in r8247 + with np.errstate(divide='ignore'): + assert_((1 / array(0)).mask) + assert_((1 + xm).mask) + assert_((-xm).mask) + assert_((-xm).mask) + assert_(maximum(xm, xm).mask) + assert_(minimum(xm, xm).mask) + assert_(xm.filled().dtype is xm._data.dtype) + x = array(0, mask=0) + assert_(x.filled() == x._data) + assert_equal(str(xm), str(masked_print_option)) + + def test_testArrayMethods(self): + a = array([1, 3, 2]) + assert_(eq(a.any(), a._data.any())) + assert_(eq(a.all(), a._data.all())) + assert_(eq(a.argmax(), a._data.argmax())) + assert_(eq(a.argmin(), a._data.argmin())) + assert_(eq(a.choose(0, 1, 2, 3, 4), + a._data.choose(0, 1, 2, 3, 4))) + assert_(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))) + assert_(eq(a.conj(), a._data.conj())) + assert_(eq(a.conjugate(), a._data.conjugate())) + m = array([[1, 2], [3, 4]]) + assert_(eq(m.diagonal(), m._data.diagonal())) + assert_(eq(a.sum(), a._data.sum())) + assert_(eq(a.take([1, 2]), a._data.take([1, 2]))) + assert_(eq(m.transpose(), m._data.transpose())) + + def test_testArrayAttributes(self): + a = array([1, 3, 2]) + assert_equal(a.ndim, 1) + + def test_testAPI(self): + assert_(not [m for m in dir(np.ndarray) + if m not in dir(MaskedArray) and + not m.startswith('_')]) + + def test_testSingleElementSubscript(self): + a = array([1, 3, 2]) + b = array([1, 3, 2], mask=[1, 0, 1]) + assert_equal(a[0].shape, ()) + assert_equal(b[0].shape, ()) + assert_equal(b[1].shape, ()) + + def test_assignment_by_condition(self): + # Test for gh-18951 + a = array([1, 2, 3, 4], mask=[1, 0, 1, 0]) + c = a >= 3 + a[c] = 5 + assert_(a[2] is masked) + + def test_assignment_by_condition_2(self): + # gh-19721 + a = masked_array([0, 1], mask=[False, False]) + b = masked_array([0, 1], mask=[True, True]) + mask = a < 1 + b[mask] = a[mask] + expected_mask = [False, True] + assert_equal(b.mask, expected_mask) + + +class TestUfuncs: + def setup_method(self): + self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6), + array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),) + + def test_testUfuncRegression(self): + f_invalid_ignore = [ + 'sqrt', 'arctanh', 'arcsin', 'arccos', + 'arccosh', 'arctanh', 'log', 'log10', 'divide', + 'true_divide', 'floor_divide', 'remainder', 'fmod'] + for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate', + 'sin', 'cos', 'tan', + 'arcsin', 'arccos', 'arctan', + 'sinh', 'cosh', 'tanh', + 'arcsinh', + 'arccosh', + 'arctanh', + 'absolute', 'fabs', 'negative', + 'floor', 'ceil', + 'logical_not', + 'add', 'subtract', 'multiply', + 'divide', 'true_divide', 'floor_divide', + 'remainder', 'fmod', 'hypot', 'arctan2', + 'equal', 'not_equal', 'less_equal', 'greater_equal', + 'less', 'greater', + 'logical_and', 'logical_or', 'logical_xor']: + try: + uf = getattr(umath, f) + except AttributeError: + uf = getattr(fromnumeric, f) + mf = getattr(np.ma, f) + args = self.d[:uf.nin] + with np.errstate(): + if f in f_invalid_ignore: + np.seterr(invalid='ignore') + if f in ['arctanh', 'log', 'log10']: + np.seterr(divide='ignore') + ur = uf(*args) + mr = mf(*args) + assert_(eq(ur.filled(0), mr.filled(0), f)) + assert_(eqmask(ur.mask, mr.mask)) + + def test_reduce(self): + a = self.d[0] + assert_(not alltrue(a, axis=0)) + assert_(sometrue(a, axis=0)) + assert_equal(sum(a[:3], axis=0), 0) + assert_equal(product(a, axis=0), 0) + + def test_minmax(self): + a = arange(1, 13).reshape(3, 4) + amask = masked_where(a < 5, a) + assert_equal(amask.max(), a.max()) + assert_equal(amask.min(), 5) + assert_((amask.max(0) == a.max(0)).all()) + assert_((amask.min(0) == [5, 6, 7, 8]).all()) + assert_(amask.max(1)[0].mask) + assert_(amask.min(1)[0].mask) + + def test_nonzero(self): + for t in "?bhilqpBHILQPfdgFDGO": + x = array([1, 0, 2, 0], mask=[0, 0, 1, 1]) + assert_(eq(nonzero(x), [0])) + + +class TestArrayMethods: + + def setup_method(self): + x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928, + 8.43, 7.78, 9.865, 5.878, 8.979, 4.732, + 3.012, 6.022, 5.095, 3.116, 5.238, 3.957, + 6.04, 9.63, 7.712, 3.382, 4.489, 6.479, + 7.189, 9.645, 5.395, 4.961, 9.894, 2.893, + 7.357, 9.828, 6.272, 3.758, 6.693, 0.993]) + X = x.reshape(6, 6) + XX = x.reshape(3, 2, 2, 3) + + m = np.array([0, 1, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 1, + 0, 0, 0, 1, 1, 1, + 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0]) + mx = array(data=x, mask=m) + mX = array(data=X, mask=m.reshape(X.shape)) + mXX = array(data=XX, mask=m.reshape(XX.shape)) + + self.d = (x, X, XX, m, mx, mX, mXX) + + def test_trace(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + mXdiag = mX.diagonal() + assert_equal(mX.trace(), mX.diagonal().compressed().sum()) + assert_(eq(mX.trace(), + X.trace() - sum(mXdiag.mask * X.diagonal(), + axis=0))) + + def test_clip(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + clipped = mx.clip(2, 8) + assert_(eq(clipped.mask, mx.mask)) + assert_(eq(clipped._data, x.clip(2, 8))) + assert_(eq(clipped._data, mx._data.clip(2, 8))) + + def test_ptp(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + (n, m) = X.shape + # print(type(mx), mx.compressed()) + # raise Exception() + assert_equal(mx.ptp(), np.ptp(mx.compressed())) + rows = np.zeros(n, np.float64) + cols = np.zeros(m, np.float64) + for k in range(m): + cols[k] = np.ptp(mX[:, k].compressed()) + for k in range(n): + rows[k] = np.ptp(mX[k].compressed()) + assert_(eq(mX.ptp(0), cols)) + assert_(eq(mX.ptp(1), rows)) + + def test_swapaxes(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + mXswapped = mX.swapaxes(0, 1) + assert_(eq(mXswapped[-1], mX[:, -1])) + mXXswapped = mXX.swapaxes(0, 2) + assert_equal(mXXswapped.shape, (2, 2, 3, 3)) + + def test_cumprod(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + mXcp = mX.cumprod(0) + assert_(eq(mXcp._data, mX.filled(1).cumprod(0))) + mXcp = mX.cumprod(1) + assert_(eq(mXcp._data, mX.filled(1).cumprod(1))) + + def test_cumsum(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + mXcp = mX.cumsum(0) + assert_(eq(mXcp._data, mX.filled(0).cumsum(0))) + mXcp = mX.cumsum(1) + assert_(eq(mXcp._data, mX.filled(0).cumsum(1))) + + def test_varstd(self): + (x, X, XX, m, mx, mX, mXX,) = self.d + assert_(eq(mX.var(axis=None), mX.compressed().var())) + assert_(eq(mX.std(axis=None), mX.compressed().std())) + assert_(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape)) + assert_(eq(mX.var().shape, X.var().shape)) + (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1)) + for k in range(6): + assert_(eq(mXvar1[k], mX[k].compressed().var())) + assert_(eq(mXvar0[k], mX[:, k].compressed().var())) + assert_(eq(np.sqrt(mXvar0[k]), + mX[:, k].compressed().std())) + + +def eqmask(m1, m2): + if m1 is nomask: + return m2 is nomask + if m2 is nomask: + return m1 is nomask + return (m1 == m2).all() diff --git a/python/numpy/ma/tests/test_regression.py b/python/numpy/ma/tests/test_regression.py new file mode 100644 index 000000000..025387ba4 --- /dev/null +++ b/python/numpy/ma/tests/test_regression.py @@ -0,0 +1,100 @@ +import numpy as np +from numpy.testing import ( + assert_, + assert_allclose, + assert_array_equal, + suppress_warnings, +) + + +class TestRegression: + def test_masked_array_create(self): + # Ticket #17 + x = np.ma.masked_array([0, 1, 2, 3, 0, 4, 5, 6], + mask=[0, 0, 0, 1, 1, 1, 0, 0]) + assert_array_equal(np.ma.nonzero(x), [[1, 2, 6, 7]]) + + def test_masked_array(self): + # Ticket #61 + np.ma.array(1, mask=[1]) + + def test_mem_masked_where(self): + # Ticket #62 + from numpy.ma import MaskType, masked_where + a = np.zeros((1, 1)) + b = np.zeros(a.shape, MaskType) + c = masked_where(b, a) + a - c + + def test_masked_array_multiply(self): + # Ticket #254 + a = np.ma.zeros((4, 1)) + a[2, 0] = np.ma.masked + b = np.zeros((4, 2)) + a * b + b * a + + def test_masked_array_repeat(self): + # Ticket #271 + np.ma.array([1], mask=False).repeat(10) + + def test_masked_array_repr_unicode(self): + # Ticket #1256 + repr(np.ma.array("Unicode")) + + def test_atleast_2d(self): + # Ticket #1559 + a = np.ma.masked_array([0.0, 1.2, 3.5], mask=[False, True, False]) + b = np.atleast_2d(a) + assert_(a.mask.ndim == 1) + assert_(b.mask.ndim == 2) + + def test_set_fill_value_unicode_py3(self): + # Ticket #2733 + a = np.ma.masked_array(['a', 'b', 'c'], mask=[1, 0, 0]) + a.fill_value = 'X' + assert_(a.fill_value == 'X') + + def test_var_sets_maskedarray_scalar(self): + # Issue gh-2757 + a = np.ma.array(np.arange(5), mask=True) + mout = np.ma.array(-1, dtype=float) + a.var(out=mout) + assert_(mout._data == 0) + + def test_ddof_corrcoef(self): + # See gh-3336 + x = np.ma.masked_equal([1, 2, 3, 4, 5], 4) + y = np.array([2, 2.5, 3.1, 3, 5]) + # this test can be removed after deprecation. + with suppress_warnings() as sup: + sup.filter(DeprecationWarning, "bias and ddof have no effect") + r0 = np.ma.corrcoef(x, y, ddof=0) + r1 = np.ma.corrcoef(x, y, ddof=1) + # ddof should not have an effect (it gets cancelled out) + assert_allclose(r0.data, r1.data) + + def test_mask_not_backmangled(self): + # See gh-10314. Test case taken from gh-3140. + a = np.ma.MaskedArray([1., 2.], mask=[False, False]) + assert_(a.mask.shape == (2,)) + b = np.tile(a, (2, 1)) + # Check that the above no longer changes a.shape to (1, 2) + assert_(a.mask.shape == (2,)) + assert_(b.shape == (2, 2)) + assert_(b.mask.shape == (2, 2)) + + def test_empty_list_on_structured(self): + # See gh-12464. Indexing with empty list should give empty result. + ma = np.ma.MaskedArray([(1, 1.), (2, 2.), (3, 3.)], dtype='i4,f4') + assert_array_equal(ma[[]], ma[:0]) + + def test_masked_array_tobytes_fortran(self): + ma = np.ma.arange(4).reshape((2, 2)) + assert_array_equal(ma.tobytes(order='F'), ma.T.tobytes()) + + def test_structured_array(self): + # see gh-22041 + np.ma.array((1, (b"", b"")), + dtype=[("x", np.int_), + ("y", [("i", np.void), ("j", np.void)])]) diff --git a/python/numpy/ma/tests/test_subclassing.py b/python/numpy/ma/tests/test_subclassing.py new file mode 100644 index 000000000..3364e5630 --- /dev/null +++ b/python/numpy/ma/tests/test_subclassing.py @@ -0,0 +1,469 @@ +"""Tests suite for MaskedArray & subclassing. + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu + +""" +import numpy as np +from numpy.lib.mixins import NDArrayOperatorsMixin +from numpy.ma.core import ( + MaskedArray, + add, + arange, + array, + asanyarray, + asarray, + divide, + hypot, + log, + masked, + masked_array, + nomask, +) +from numpy.ma.testutils import assert_equal +from numpy.testing import assert_, assert_raises + +# from numpy.ma.core import ( + +def assert_startswith(a, b): + # produces a better error message than assert_(a.startswith(b)) + assert_equal(a[:len(b)], b) + +class SubArray(np.ndarray): + # Defines a generic np.ndarray subclass, that stores some metadata + # in the dictionary `info`. + def __new__(cls, arr, info={}): + x = np.asanyarray(arr).view(cls) + x.info = info.copy() + return x + + def __array_finalize__(self, obj): + super().__array_finalize__(obj) + self.info = getattr(obj, 'info', {}).copy() + + def __add__(self, other): + result = super().__add__(other) + result.info['added'] = result.info.get('added', 0) + 1 + return result + + def __iadd__(self, other): + result = super().__iadd__(other) + result.info['iadded'] = result.info.get('iadded', 0) + 1 + return result + + +subarray = SubArray + + +class SubMaskedArray(MaskedArray): + """Pure subclass of MaskedArray, keeping some info on subclass.""" + def __new__(cls, info=None, **kwargs): + obj = super().__new__(cls, **kwargs) + obj._optinfo['info'] = info + return obj + + +class MSubArray(SubArray, MaskedArray): + + def __new__(cls, data, info={}, mask=nomask): + subarr = SubArray(data, info) + _data = MaskedArray.__new__(cls, data=subarr, mask=mask) + _data.info = subarr.info + return _data + + @property + def _series(self): + _view = self.view(MaskedArray) + _view._sharedmask = False + return _view + + +msubarray = MSubArray + + +# Also a subclass that overrides __str__, __repr__ and __setitem__, disallowing +# setting to non-class values (and thus np.ma.core.masked_print_option) +# and overrides __array_wrap__, updating the info dict, to check that this +# doesn't get destroyed by MaskedArray._update_from. But this one also needs +# its own iterator... +class CSAIterator: + """ + Flat iterator object that uses its own setter/getter + (works around ndarray.flat not propagating subclass setters/getters + see https://github.com/numpy/numpy/issues/4564) + roughly following MaskedIterator + """ + def __init__(self, a): + self._original = a + self._dataiter = a.view(np.ndarray).flat + + def __iter__(self): + return self + + def __getitem__(self, indx): + out = self._dataiter.__getitem__(indx) + if not isinstance(out, np.ndarray): + out = out.__array__() + out = out.view(type(self._original)) + return out + + def __setitem__(self, index, value): + self._dataiter[index] = self._original._validate_input(value) + + def __next__(self): + return next(self._dataiter).__array__().view(type(self._original)) + + +class ComplicatedSubArray(SubArray): + + def __str__(self): + return f'myprefix {self.view(SubArray)} mypostfix' + + def __repr__(self): + # Return a repr that does not start with 'name(' + return f'<{self.__class__.__name__} {self}>' + + def _validate_input(self, value): + if not isinstance(value, ComplicatedSubArray): + raise ValueError("Can only set to MySubArray values") + return value + + def __setitem__(self, item, value): + # validation ensures direct assignment with ndarray or + # masked_print_option will fail + super().__setitem__(item, self._validate_input(value)) + + def __getitem__(self, item): + # ensure getter returns our own class also for scalars + value = super().__getitem__(item) + if not isinstance(value, np.ndarray): # scalar + value = value.__array__().view(ComplicatedSubArray) + return value + + @property + def flat(self): + return CSAIterator(self) + + @flat.setter + def flat(self, value): + y = self.ravel() + y[:] = value + + def __array_wrap__(self, obj, context=None, return_scalar=False): + obj = super().__array_wrap__(obj, context, return_scalar) + if context is not None and context[0] is np.multiply: + obj.info['multiplied'] = obj.info.get('multiplied', 0) + 1 + + return obj + + +class WrappedArray(NDArrayOperatorsMixin): + """ + Wrapping a MaskedArray rather than subclassing to test that + ufunc deferrals are commutative. + See: https://github.com/numpy/numpy/issues/15200) + """ + __slots__ = ('_array', 'attrs') + __array_priority__ = 20 + + def __init__(self, array, **attrs): + self._array = array + self.attrs = attrs + + def __repr__(self): + return f"{self.__class__.__name__}(\n{self._array}\n{self.attrs}\n)" + + def __array__(self, dtype=None, copy=None): + return np.asarray(self._array) + + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + if method == '__call__': + inputs = [arg._array if isinstance(arg, self.__class__) else arg + for arg in inputs] + return self.__class__(ufunc(*inputs, **kwargs), **self.attrs) + else: + return NotImplemented + + +class TestSubclassing: + # Test suite for masked subclasses of ndarray. + + def setup_method(self): + x = np.arange(5, dtype='float') + mx = msubarray(x, mask=[0, 1, 0, 0, 0]) + self.data = (x, mx) + + def test_data_subclassing(self): + # Tests whether the subclass is kept. + x = np.arange(5) + m = [0, 0, 1, 0, 0] + xsub = SubArray(x) + xmsub = masked_array(xsub, mask=m) + assert_(isinstance(xmsub, MaskedArray)) + assert_equal(xmsub._data, xsub) + assert_(isinstance(xmsub._data, SubArray)) + + def test_maskedarray_subclassing(self): + # Tests subclassing MaskedArray + (x, mx) = self.data + assert_(isinstance(mx._data, subarray)) + + def test_masked_unary_operations(self): + # Tests masked_unary_operation + (x, mx) = self.data + with np.errstate(divide='ignore'): + assert_(isinstance(log(mx), msubarray)) + assert_equal(log(x), np.log(x)) + + def test_masked_binary_operations(self): + # Tests masked_binary_operation + (x, mx) = self.data + # Result should be a msubarray + assert_(isinstance(add(mx, mx), msubarray)) + assert_(isinstance(add(mx, x), msubarray)) + # Result should work + assert_equal(add(mx, x), mx + x) + assert_(isinstance(add(mx, mx)._data, subarray)) + assert_(isinstance(add.outer(mx, mx), msubarray)) + assert_(isinstance(hypot(mx, mx), msubarray)) + assert_(isinstance(hypot(mx, x), msubarray)) + + def test_masked_binary_operations2(self): + # Tests domained_masked_binary_operation + (x, mx) = self.data + xmx = masked_array(mx.data.__array__(), mask=mx.mask) + assert_(isinstance(divide(mx, mx), msubarray)) + assert_(isinstance(divide(mx, x), msubarray)) + assert_equal(divide(mx, mx), divide(xmx, xmx)) + + def test_attributepropagation(self): + x = array(arange(5), mask=[0] + [1] * 4) + my = masked_array(subarray(x)) + ym = msubarray(x) + # + z = (my + 1) + assert_(isinstance(z, MaskedArray)) + assert_(not isinstance(z, MSubArray)) + assert_(isinstance(z._data, SubArray)) + assert_equal(z._data.info, {}) + # + z = (ym + 1) + assert_(isinstance(z, MaskedArray)) + assert_(isinstance(z, MSubArray)) + assert_(isinstance(z._data, SubArray)) + assert_(z._data.info['added'] > 0) + # Test that inplace methods from data get used (gh-4617) + ym += 1 + assert_(isinstance(ym, MaskedArray)) + assert_(isinstance(ym, MSubArray)) + assert_(isinstance(ym._data, SubArray)) + assert_(ym._data.info['iadded'] > 0) + # + ym._set_mask([1, 0, 0, 0, 1]) + assert_equal(ym._mask, [1, 0, 0, 0, 1]) + ym._series._set_mask([0, 0, 0, 0, 1]) + assert_equal(ym._mask, [0, 0, 0, 0, 1]) + # + xsub = subarray(x, info={'name': 'x'}) + mxsub = masked_array(xsub) + assert_(hasattr(mxsub, 'info')) + assert_equal(mxsub.info, xsub.info) + + def test_subclasspreservation(self): + # Checks that masked_array(...,subok=True) preserves the class. + x = np.arange(5) + m = [0, 0, 1, 0, 0] + xinfo = list(zip(x, m)) + xsub = MSubArray(x, mask=m, info={'xsub': xinfo}) + # + mxsub = masked_array(xsub, subok=False) + assert_(not isinstance(mxsub, MSubArray)) + assert_(isinstance(mxsub, MaskedArray)) + assert_equal(mxsub._mask, m) + # + mxsub = asarray(xsub) + assert_(not isinstance(mxsub, MSubArray)) + assert_(isinstance(mxsub, MaskedArray)) + assert_equal(mxsub._mask, m) + # + mxsub = masked_array(xsub, subok=True) + assert_(isinstance(mxsub, MSubArray)) + assert_equal(mxsub.info, xsub.info) + assert_equal(mxsub._mask, xsub._mask) + # + mxsub = asanyarray(xsub) + assert_(isinstance(mxsub, MSubArray)) + assert_equal(mxsub.info, xsub.info) + assert_equal(mxsub._mask, m) + + def test_subclass_items(self): + """test that getter and setter go via baseclass""" + x = np.arange(5) + xcsub = ComplicatedSubArray(x) + mxcsub = masked_array(xcsub, mask=[True, False, True, False, False]) + # getter should return a ComplicatedSubArray, even for single item + # first check we wrote ComplicatedSubArray correctly + assert_(isinstance(xcsub[1], ComplicatedSubArray)) + assert_(isinstance(xcsub[1, ...], ComplicatedSubArray)) + assert_(isinstance(xcsub[1:4], ComplicatedSubArray)) + + # now that it propagates inside the MaskedArray + assert_(isinstance(mxcsub[1], ComplicatedSubArray)) + assert_(isinstance(mxcsub[1, ...].data, ComplicatedSubArray)) + assert_(mxcsub[0] is masked) + assert_(isinstance(mxcsub[0, ...].data, ComplicatedSubArray)) + assert_(isinstance(mxcsub[1:4].data, ComplicatedSubArray)) + + # also for flattened version (which goes via MaskedIterator) + assert_(isinstance(mxcsub.flat[1].data, ComplicatedSubArray)) + assert_(mxcsub.flat[0] is masked) + assert_(isinstance(mxcsub.flat[1:4].base, ComplicatedSubArray)) + + # setter should only work with ComplicatedSubArray input + # first check we wrote ComplicatedSubArray correctly + assert_raises(ValueError, xcsub.__setitem__, 1, x[4]) + # now that it propagates inside the MaskedArray + assert_raises(ValueError, mxcsub.__setitem__, 1, x[4]) + assert_raises(ValueError, mxcsub.__setitem__, slice(1, 4), x[1:4]) + mxcsub[1] = xcsub[4] + mxcsub[1:4] = xcsub[1:4] + # also for flattened version (which goes via MaskedIterator) + assert_raises(ValueError, mxcsub.flat.__setitem__, 1, x[4]) + assert_raises(ValueError, mxcsub.flat.__setitem__, slice(1, 4), x[1:4]) + mxcsub.flat[1] = xcsub[4] + mxcsub.flat[1:4] = xcsub[1:4] + + def test_subclass_nomask_items(self): + x = np.arange(5) + xcsub = ComplicatedSubArray(x) + mxcsub_nomask = masked_array(xcsub) + + assert_(isinstance(mxcsub_nomask[1, ...].data, ComplicatedSubArray)) + assert_(isinstance(mxcsub_nomask[0, ...].data, ComplicatedSubArray)) + + assert_(isinstance(mxcsub_nomask[1], ComplicatedSubArray)) + assert_(isinstance(mxcsub_nomask[0], ComplicatedSubArray)) + + def test_subclass_repr(self): + """test that repr uses the name of the subclass + and 'array' for np.ndarray""" + x = np.arange(5) + mx = masked_array(x, mask=[True, False, True, False, False]) + assert_startswith(repr(mx), 'masked_array') + xsub = SubArray(x) + mxsub = masked_array(xsub, mask=[True, False, True, False, False]) + assert_startswith(repr(mxsub), + f'masked_{SubArray.__name__}(data=[--, 1, --, 3, 4]') + + def test_subclass_str(self): + """test str with subclass that has overridden str, setitem""" + # first without override + x = np.arange(5) + xsub = SubArray(x) + mxsub = masked_array(xsub, mask=[True, False, True, False, False]) + assert_equal(str(mxsub), '[-- 1 -- 3 4]') + + xcsub = ComplicatedSubArray(x) + assert_raises(ValueError, xcsub.__setitem__, 0, + np.ma.core.masked_print_option) + mxcsub = masked_array(xcsub, mask=[True, False, True, False, False]) + assert_equal(str(mxcsub), 'myprefix [-- 1 -- 3 4] mypostfix') + + def test_pure_subclass_info_preservation(self): + # Test that ufuncs and methods conserve extra information consistently; + # see gh-7122. + arr1 = SubMaskedArray('test', data=[1, 2, 3, 4, 5, 6]) + arr2 = SubMaskedArray(data=[0, 1, 2, 3, 4, 5]) + diff1 = np.subtract(arr1, arr2) + assert_('info' in diff1._optinfo) + assert_(diff1._optinfo['info'] == 'test') + diff2 = arr1 - arr2 + assert_('info' in diff2._optinfo) + assert_(diff2._optinfo['info'] == 'test') + + +class ArrayNoInheritance: + """Quantity-like class that does not inherit from ndarray""" + def __init__(self, data, units): + self.magnitude = data + self.units = units + + def __getattr__(self, attr): + return getattr(self.magnitude, attr) + + +def test_array_no_inheritance(): + data_masked = np.ma.array([1, 2, 3], mask=[True, False, True]) + data_masked_units = ArrayNoInheritance(data_masked, 'meters') + + # Get the masked representation of the Quantity-like class + new_array = np.ma.array(data_masked_units) + assert_equal(data_masked.data, new_array.data) + assert_equal(data_masked.mask, new_array.mask) + # Test sharing the mask + data_masked.mask = [True, False, False] + assert_equal(data_masked.mask, new_array.mask) + assert_(new_array.sharedmask) + + # Get the masked representation of the Quantity-like class + new_array = np.ma.array(data_masked_units, copy=True) + assert_equal(data_masked.data, new_array.data) + assert_equal(data_masked.mask, new_array.mask) + # Test that the mask is not shared when copy=True + data_masked.mask = [True, False, True] + assert_equal([True, False, False], new_array.mask) + assert_(not new_array.sharedmask) + + # Get the masked representation of the Quantity-like class + new_array = np.ma.array(data_masked_units, keep_mask=False) + assert_equal(data_masked.data, new_array.data) + # The change did not affect the original mask + assert_equal(data_masked.mask, [True, False, True]) + # Test that the mask is False and not shared when keep_mask=False + assert_(not new_array.mask) + assert_(not new_array.sharedmask) + + +class TestClassWrapping: + # Test suite for classes that wrap MaskedArrays + + def setup_method(self): + m = np.ma.masked_array([1, 3, 5], mask=[False, True, False]) + wm = WrappedArray(m) + self.data = (m, wm) + + def test_masked_unary_operations(self): + # Tests masked_unary_operation + (m, wm) = self.data + with np.errstate(divide='ignore'): + assert_(isinstance(np.log(wm), WrappedArray)) + + def test_masked_binary_operations(self): + # Tests masked_binary_operation + (m, wm) = self.data + # Result should be a WrappedArray + assert_(isinstance(np.add(wm, wm), WrappedArray)) + assert_(isinstance(np.add(m, wm), WrappedArray)) + assert_(isinstance(np.add(wm, m), WrappedArray)) + # add and '+' should call the same ufunc + assert_equal(np.add(m, wm), m + wm) + assert_(isinstance(np.hypot(m, wm), WrappedArray)) + assert_(isinstance(np.hypot(wm, m), WrappedArray)) + # Test domained binary operations + assert_(isinstance(np.divide(wm, m), WrappedArray)) + assert_(isinstance(np.divide(m, wm), WrappedArray)) + assert_equal(np.divide(wm, m) * m, np.divide(m, m) * wm) + # Test broadcasting + m2 = np.stack([m, m]) + assert_(isinstance(np.divide(wm, m2), WrappedArray)) + assert_(isinstance(np.divide(m2, wm), WrappedArray)) + assert_equal(np.divide(m2, wm), np.divide(wm, m2)) + + def test_mixins_have_slots(self): + mixin = NDArrayOperatorsMixin() + # Should raise an error + assert_raises(AttributeError, mixin.__setattr__, "not_a_real_attr", 1) + + m = np.ma.masked_array([1, 3, 5], mask=[False, True, False]) + wm = WrappedArray(m) + assert_raises(AttributeError, wm.__setattr__, "not_an_attr", 2) diff --git a/python/numpy/ma/testutils.py b/python/numpy/ma/testutils.py new file mode 100644 index 000000000..bffcc34b7 --- /dev/null +++ b/python/numpy/ma/testutils.py @@ -0,0 +1,294 @@ +"""Miscellaneous functions for testing masked arrays and subclasses + +:author: Pierre Gerard-Marchant +:contact: pierregm_at_uga_dot_edu + +""" +import operator + +import numpy as np +import numpy._core.umath as umath +import numpy.testing +from numpy import ndarray +from numpy.testing import ( # noqa: F401 + assert_, + assert_allclose, + assert_array_almost_equal_nulp, + assert_raises, + build_err_msg, +) + +from .core import filled, getmask, mask_or, masked, masked_array, nomask + +__all__masked = [ + 'almost', 'approx', 'assert_almost_equal', 'assert_array_almost_equal', + 'assert_array_approx_equal', 'assert_array_compare', + 'assert_array_equal', 'assert_array_less', 'assert_close', + 'assert_equal', 'assert_equal_records', 'assert_mask_equal', + 'assert_not_equal', 'fail_if_array_equal', + ] + +# Include some normal test functions to avoid breaking other projects who +# have mistakenly included them from this file. SciPy is one. That is +# unfortunate, as some of these functions are not intended to work with +# masked arrays. But there was no way to tell before. +from unittest import TestCase # noqa: F401 + +__some__from_testing = [ + 'TestCase', 'assert_', 'assert_allclose', 'assert_array_almost_equal_nulp', + 'assert_raises' + ] + +__all__ = __all__masked + __some__from_testing # noqa: PLE0605 + + +def approx(a, b, fill_value=True, rtol=1e-5, atol=1e-8): + """ + Returns true if all components of a and b are equal to given tolerances. + + If fill_value is True, masked values considered equal. Otherwise, + masked values are considered unequal. The relative error rtol should + be positive and << 1.0 The absolute error atol comes into play for + those elements of b that are very small or zero; it says how small a + must be also. + + """ + m = mask_or(getmask(a), getmask(b)) + d1 = filled(a) + d2 = filled(b) + if d1.dtype.char == "O" or d2.dtype.char == "O": + return np.equal(d1, d2).ravel() + x = filled( + masked_array(d1, copy=False, mask=m), fill_value + ).astype(np.float64) + y = filled(masked_array(d2, copy=False, mask=m), 1).astype(np.float64) + d = np.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y)) + return d.ravel() + + +def almost(a, b, decimal=6, fill_value=True): + """ + Returns True if a and b are equal up to decimal places. + + If fill_value is True, masked values considered equal. Otherwise, + masked values are considered unequal. + + """ + m = mask_or(getmask(a), getmask(b)) + d1 = filled(a) + d2 = filled(b) + if d1.dtype.char == "O" or d2.dtype.char == "O": + return np.equal(d1, d2).ravel() + x = filled( + masked_array(d1, copy=False, mask=m), fill_value + ).astype(np.float64) + y = filled(masked_array(d2, copy=False, mask=m), 1).astype(np.float64) + d = np.around(np.abs(x - y), decimal) <= 10.0 ** (-decimal) + return d.ravel() + + +def _assert_equal_on_sequences(actual, desired, err_msg=''): + """ + Asserts the equality of two non-array sequences. + + """ + assert_equal(len(actual), len(desired), err_msg) + for k in range(len(desired)): + assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}') + + +def assert_equal_records(a, b): + """ + Asserts that two records are equal. + + Pretty crude for now. + + """ + assert_equal(a.dtype, b.dtype) + for f in a.dtype.names: + (af, bf) = (operator.getitem(a, f), operator.getitem(b, f)) + if not (af is masked) and not (bf is masked): + assert_equal(operator.getitem(a, f), operator.getitem(b, f)) + + +def assert_equal(actual, desired, err_msg=''): + """ + Asserts that two items are equal. + + """ + # Case #1: dictionary ..... + if isinstance(desired, dict): + if not isinstance(actual, dict): + raise AssertionError(repr(type(actual))) + assert_equal(len(actual), len(desired), err_msg) + for k, i in desired.items(): + if k not in actual: + raise AssertionError(f"{k} not in {actual}") + assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') + return + # Case #2: lists ..... + if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): + return _assert_equal_on_sequences(actual, desired, err_msg='') + if not (isinstance(actual, ndarray) or isinstance(desired, ndarray)): + msg = build_err_msg([actual, desired], err_msg,) + if not desired == actual: + raise AssertionError(msg) + return + # Case #4. arrays or equivalent + if ((actual is masked) and not (desired is masked)) or \ + ((desired is masked) and not (actual is masked)): + msg = build_err_msg([actual, desired], + err_msg, header='', names=('x', 'y')) + raise ValueError(msg) + actual = np.asanyarray(actual) + desired = np.asanyarray(desired) + (actual_dtype, desired_dtype) = (actual.dtype, desired.dtype) + if actual_dtype.char == "S" and desired_dtype.char == "S": + return _assert_equal_on_sequences(actual.tolist(), + desired.tolist(), + err_msg='') + return assert_array_equal(actual, desired, err_msg) + + +def fail_if_equal(actual, desired, err_msg='',): + """ + Raises an assertion error if two items are equal. + + """ + if isinstance(desired, dict): + if not isinstance(actual, dict): + raise AssertionError(repr(type(actual))) + fail_if_equal(len(actual), len(desired), err_msg) + for k, i in desired.items(): + if k not in actual: + raise AssertionError(repr(k)) + fail_if_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}') + return + if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): + fail_if_equal(len(actual), len(desired), err_msg) + for k in range(len(desired)): + fail_if_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}') + return + if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): + return fail_if_array_equal(actual, desired, err_msg) + msg = build_err_msg([actual, desired], err_msg) + if not desired != actual: + raise AssertionError(msg) + + +assert_not_equal = fail_if_equal + + +def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): + """ + Asserts that two items are almost equal. + + The test is equivalent to abs(desired-actual) < 0.5 * 10**(-decimal). + + """ + if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray): + return assert_array_almost_equal(actual, desired, decimal=decimal, + err_msg=err_msg, verbose=verbose) + msg = build_err_msg([actual, desired], + err_msg=err_msg, verbose=verbose) + if not round(abs(desired - actual), decimal) == 0: + raise AssertionError(msg) + + +assert_close = assert_almost_equal + + +def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', + fill_value=True): + """ + Asserts that comparison between two masked arrays is satisfied. + + The comparison is elementwise. + + """ + # Allocate a common mask and refill + m = mask_or(getmask(x), getmask(y)) + x = masked_array(x, copy=False, mask=m, keep_mask=False, subok=False) + y = masked_array(y, copy=False, mask=m, keep_mask=False, subok=False) + if ((x is masked) and not (y is masked)) or \ + ((y is masked) and not (x is masked)): + msg = build_err_msg([x, y], err_msg=err_msg, verbose=verbose, + header=header, names=('x', 'y')) + raise ValueError(msg) + # OK, now run the basic tests on filled versions + return np.testing.assert_array_compare(comparison, + x.filled(fill_value), + y.filled(fill_value), + err_msg=err_msg, + verbose=verbose, header=header) + + +def assert_array_equal(x, y, err_msg='', verbose=True): + """ + Checks the elementwise equality of two masked arrays. + + """ + assert_array_compare(operator.__eq__, x, y, + err_msg=err_msg, verbose=verbose, + header='Arrays are not equal') + + +def fail_if_array_equal(x, y, err_msg='', verbose=True): + """ + Raises an assertion error if two masked arrays are not equal elementwise. + + """ + def compare(x, y): + return (not np.all(approx(x, y))) + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header='Arrays are not equal') + + +def assert_array_approx_equal(x, y, decimal=6, err_msg='', verbose=True): + """ + Checks the equality of two masked arrays, up to given number odecimals. + + The equality is checked elementwise. + + """ + def compare(x, y): + "Returns the result of the loose comparison between x and y)." + return approx(x, y, rtol=10. ** -decimal) + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header='Arrays are not almost equal') + + +def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): + """ + Checks the equality of two masked arrays, up to given number odecimals. + + The equality is checked elementwise. + + """ + def compare(x, y): + "Returns the result of the loose comparison between x and y)." + return almost(x, y, decimal) + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header='Arrays are not almost equal') + + +def assert_array_less(x, y, err_msg='', verbose=True): + """ + Checks that x is smaller than y elementwise. + + """ + assert_array_compare(operator.__lt__, x, y, + err_msg=err_msg, verbose=verbose, + header='Arrays are not less-ordered') + + +def assert_mask_equal(m1, m2, err_msg=''): + """ + Asserts the equality of two masks. + + """ + if m1 is nomask: + assert_(m2 is nomask) + if m2 is nomask: + assert_(m1 is nomask) + assert_array_equal(m1, m2, err_msg=err_msg) diff --git a/python/numpy/matlib.py b/python/numpy/matlib.py new file mode 100644 index 000000000..f27d503cd --- /dev/null +++ b/python/numpy/matlib.py @@ -0,0 +1,380 @@ +import warnings + +# 2018-05-29, PendingDeprecationWarning added to matrix.__new__ +# 2020-01-23, numpy 1.19.0 PendingDeprecatonWarning +warnings.warn("Importing from numpy.matlib is deprecated since 1.19.0. " + "The matrix subclass is not the recommended way to represent " + "matrices or deal with linear algebra (see " + "https://docs.scipy.org/doc/numpy/user/numpy-for-matlab-users.html). " + "Please adjust your code to use regular ndarray. ", + PendingDeprecationWarning, stacklevel=2) + +import numpy as np + +# Matlib.py contains all functions in the numpy namespace with a few +# replacements. See doc/source/reference/routines.matlib.rst for details. +# Need * as we're copying the numpy namespace. +from numpy import * # noqa: F403 +from numpy.matrixlib.defmatrix import asmatrix, matrix + +__version__ = np.__version__ + +__all__ = ['rand', 'randn', 'repmat'] +__all__ += np.__all__ + +def empty(shape, dtype=None, order='C'): + """Return a new matrix of given shape and type, without initializing entries. + + Parameters + ---------- + shape : int or tuple of int + Shape of the empty matrix. + dtype : data-type, optional + Desired output data-type. + order : {'C', 'F'}, optional + Whether to store multi-dimensional data in row-major + (C-style) or column-major (Fortran-style) order in + memory. + + See Also + -------- + numpy.empty : Equivalent array function. + matlib.zeros : Return a matrix of zeros. + matlib.ones : Return a matrix of ones. + + Notes + ----- + Unlike other matrix creation functions (e.g. `matlib.zeros`, + `matlib.ones`), `matlib.empty` does not initialize the values of the + matrix, and may therefore be marginally faster. However, the values + stored in the newly allocated matrix are arbitrary. For reproducible + behavior, be sure to set each element of the matrix before reading. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.empty((2, 2)) # filled with random data + matrix([[ 6.76425276e-320, 9.79033856e-307], # random + [ 7.39337286e-309, 3.22135945e-309]]) + >>> np.matlib.empty((2, 2), dtype=int) + matrix([[ 6600475, 0], # random + [ 6586976, 22740995]]) + + """ + return ndarray.__new__(matrix, shape, dtype, order=order) + +def ones(shape, dtype=None, order='C'): + """ + Matrix of ones. + + Return a matrix of given shape and type, filled with ones. + + Parameters + ---------- + shape : {sequence of ints, int} + Shape of the matrix + dtype : data-type, optional + The desired data-type for the matrix, default is np.float64. + order : {'C', 'F'}, optional + Whether to store matrix in C- or Fortran-contiguous order, + default is 'C'. + + Returns + ------- + out : matrix + Matrix of ones of given shape, dtype, and order. + + See Also + -------- + ones : Array of ones. + matlib.zeros : Zero matrix. + + Notes + ----- + If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``, + `out` becomes a single row matrix of shape ``(1,N)``. + + Examples + -------- + >>> np.matlib.ones((2,3)) + matrix([[1., 1., 1.], + [1., 1., 1.]]) + + >>> np.matlib.ones(2) + matrix([[1., 1.]]) + + """ + a = ndarray.__new__(matrix, shape, dtype, order=order) + a.fill(1) + return a + +def zeros(shape, dtype=None, order='C'): + """ + Return a matrix of given shape and type, filled with zeros. + + Parameters + ---------- + shape : int or sequence of ints + Shape of the matrix + dtype : data-type, optional + The desired data-type for the matrix, default is float. + order : {'C', 'F'}, optional + Whether to store the result in C- or Fortran-contiguous order, + default is 'C'. + + Returns + ------- + out : matrix + Zero matrix of given shape, dtype, and order. + + See Also + -------- + numpy.zeros : Equivalent array function. + matlib.ones : Return a matrix of ones. + + Notes + ----- + If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``, + `out` becomes a single row matrix of shape ``(1,N)``. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.zeros((2, 3)) + matrix([[0., 0., 0.], + [0., 0., 0.]]) + + >>> np.matlib.zeros(2) + matrix([[0., 0.]]) + + """ + a = ndarray.__new__(matrix, shape, dtype, order=order) + a.fill(0) + return a + +def identity(n, dtype=None): + """ + Returns the square identity matrix of given size. + + Parameters + ---------- + n : int + Size of the returned identity matrix. + dtype : data-type, optional + Data-type of the output. Defaults to ``float``. + + Returns + ------- + out : matrix + `n` x `n` matrix with its main diagonal set to one, + and all other elements zero. + + See Also + -------- + numpy.identity : Equivalent array function. + matlib.eye : More general matrix identity function. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.identity(3, dtype=int) + matrix([[1, 0, 0], + [0, 1, 0], + [0, 0, 1]]) + + """ + a = array([1] + n * [0], dtype=dtype) + b = empty((n, n), dtype=dtype) + b.flat = a + return b + +def eye(n, M=None, k=0, dtype=float, order='C'): + """ + Return a matrix with ones on the diagonal and zeros elsewhere. + + Parameters + ---------- + n : int + Number of rows in the output. + M : int, optional + Number of columns in the output, defaults to `n`. + k : int, optional + Index of the diagonal: 0 refers to the main diagonal, + a positive value refers to an upper diagonal, + and a negative value to a lower diagonal. + dtype : dtype, optional + Data-type of the returned matrix. + order : {'C', 'F'}, optional + Whether the output should be stored in row-major (C-style) or + column-major (Fortran-style) order in memory. + + Returns + ------- + I : matrix + A `n` x `M` matrix where all elements are equal to zero, + except for the `k`-th diagonal, whose values are equal to one. + + See Also + -------- + numpy.eye : Equivalent array function. + identity : Square identity matrix. + + Examples + -------- + >>> import numpy.matlib + >>> np.matlib.eye(3, k=1, dtype=float) + matrix([[0., 1., 0.], + [0., 0., 1.], + [0., 0., 0.]]) + + """ + return asmatrix(np.eye(n, M=M, k=k, dtype=dtype, order=order)) + +def rand(*args): + """ + Return a matrix of random values with given shape. + + Create a matrix of the given shape and propagate it with + random samples from a uniform distribution over ``[0, 1)``. + + Parameters + ---------- + \\*args : Arguments + Shape of the output. + If given as N integers, each integer specifies the size of one + dimension. + If given as a tuple, this tuple gives the complete shape. + + Returns + ------- + out : ndarray + The matrix of random values with shape given by `\\*args`. + + See Also + -------- + randn, numpy.random.RandomState.rand + + Examples + -------- + >>> np.random.seed(123) + >>> import numpy.matlib + >>> np.matlib.rand(2, 3) + matrix([[0.69646919, 0.28613933, 0.22685145], + [0.55131477, 0.71946897, 0.42310646]]) + >>> np.matlib.rand((2, 3)) + matrix([[0.9807642 , 0.68482974, 0.4809319 ], + [0.39211752, 0.34317802, 0.72904971]]) + + If the first argument is a tuple, other arguments are ignored: + + >>> np.matlib.rand((2, 3), 4) + matrix([[0.43857224, 0.0596779 , 0.39804426], + [0.73799541, 0.18249173, 0.17545176]]) + + """ + if isinstance(args[0], tuple): + args = args[0] + return asmatrix(np.random.rand(*args)) + +def randn(*args): + """ + Return a random matrix with data from the "standard normal" distribution. + + `randn` generates a matrix filled with random floats sampled from a + univariate "normal" (Gaussian) distribution of mean 0 and variance 1. + + Parameters + ---------- + \\*args : Arguments + Shape of the output. + If given as N integers, each integer specifies the size of one + dimension. If given as a tuple, this tuple gives the complete shape. + + Returns + ------- + Z : matrix of floats + A matrix of floating-point samples drawn from the standard normal + distribution. + + See Also + -------- + rand, numpy.random.RandomState.randn + + Notes + ----- + For random samples from the normal distribution with mean ``mu`` and + standard deviation ``sigma``, use:: + + sigma * np.matlib.randn(...) + mu + + Examples + -------- + >>> np.random.seed(123) + >>> import numpy.matlib + >>> np.matlib.randn(1) + matrix([[-1.0856306]]) + >>> np.matlib.randn(1, 2, 3) + matrix([[ 0.99734545, 0.2829785 , -1.50629471], + [-0.57860025, 1.65143654, -2.42667924]]) + + Two-by-four matrix of samples from the normal distribution with + mean 3 and standard deviation 2.5: + + >>> 2.5 * np.matlib.randn((2, 4)) + 3 + matrix([[1.92771843, 6.16484065, 0.83314899, 1.30278462], + [2.76322758, 6.72847407, 1.40274501, 1.8900451 ]]) + + """ + if isinstance(args[0], tuple): + args = args[0] + return asmatrix(np.random.randn(*args)) + +def repmat(a, m, n): + """ + Repeat a 0-D to 2-D array or matrix MxN times. + + Parameters + ---------- + a : array_like + The array or matrix to be repeated. + m, n : int + The number of times `a` is repeated along the first and second axes. + + Returns + ------- + out : ndarray + The result of repeating `a`. + + Examples + -------- + >>> import numpy.matlib + >>> a0 = np.array(1) + >>> np.matlib.repmat(a0, 2, 3) + array([[1, 1, 1], + [1, 1, 1]]) + + >>> a1 = np.arange(4) + >>> np.matlib.repmat(a1, 2, 2) + array([[0, 1, 2, 3, 0, 1, 2, 3], + [0, 1, 2, 3, 0, 1, 2, 3]]) + + >>> a2 = np.asmatrix(np.arange(6).reshape(2, 3)) + >>> np.matlib.repmat(a2, 2, 3) + matrix([[0, 1, 2, 0, 1, 2, 0, 1, 2], + [3, 4, 5, 3, 4, 5, 3, 4, 5], + [0, 1, 2, 0, 1, 2, 0, 1, 2], + [3, 4, 5, 3, 4, 5, 3, 4, 5]]) + + """ + a = asanyarray(a) + ndim = a.ndim + if ndim == 0: + origrows, origcols = (1, 1) + elif ndim == 1: + origrows, origcols = (1, a.shape[0]) + else: + origrows, origcols = a.shape + rows = origrows * m + cols = origcols * n + c = a.reshape(1, a.size).repeat(m, 0).reshape(rows, origcols).repeat(n, 0) + return c.reshape(rows, cols) diff --git a/python/numpy/matlib.pyi b/python/numpy/matlib.pyi new file mode 100644 index 000000000..baeadc078 --- /dev/null +++ b/python/numpy/matlib.pyi @@ -0,0 +1,582 @@ +from typing import Any, Literal, TypeAlias, TypeVar, overload + +import numpy as np +import numpy.typing as npt +from numpy import ( # noqa: F401 + False_, + ScalarType, + True_, + __array_namespace_info__, + __version__, + abs, + absolute, + acos, + acosh, + add, + all, + allclose, + amax, + amin, + angle, + any, + append, + apply_along_axis, + apply_over_axes, + arange, + arccos, + arccosh, + arcsin, + arcsinh, + arctan, + arctan2, + arctanh, + argmax, + argmin, + argpartition, + argsort, + argwhere, + around, + array, + array2string, + array_equal, + array_equiv, + array_repr, + array_split, + array_str, + asanyarray, + asarray, + asarray_chkfinite, + ascontiguousarray, + asfortranarray, + asin, + asinh, + asmatrix, + astype, + atan, + atan2, + atanh, + atleast_1d, + atleast_2d, + atleast_3d, + average, + bartlett, + base_repr, + binary_repr, + bincount, + bitwise_and, + bitwise_count, + bitwise_invert, + bitwise_left_shift, + bitwise_not, + bitwise_or, + bitwise_right_shift, + bitwise_xor, + blackman, + block, + bmat, + bool, + bool_, + broadcast, + broadcast_arrays, + broadcast_shapes, + broadcast_to, + busday_count, + busday_offset, + busdaycalendar, + byte, + bytes_, + c_, + can_cast, + cbrt, + cdouble, + ceil, + char, + character, + choose, + clip, + clongdouble, + column_stack, + common_type, + complex64, + complex128, + complex256, + complexfloating, + compress, + concat, + concatenate, + conj, + conjugate, + convolve, + copy, + copysign, + copyto, + core, + corrcoef, + correlate, + cos, + cosh, + count_nonzero, + cov, + cross, + csingle, + ctypeslib, + cumprod, + cumsum, + cumulative_prod, + cumulative_sum, + datetime64, + datetime_as_string, + datetime_data, + deg2rad, + degrees, + delete, + diag, + diag_indices, + diag_indices_from, + diagflat, + diagonal, + diff, + digitize, + divide, + divmod, + dot, + double, + dsplit, + dstack, + dtype, + dtypes, + e, + ediff1d, + einsum, + einsum_path, + emath, + empty_like, + equal, + errstate, + euler_gamma, + exceptions, + exp, + exp2, + expand_dims, + expm1, + extract, + f2py, + fabs, + fft, + fill_diagonal, + finfo, + fix, + flatiter, + flatnonzero, + flexible, + flip, + fliplr, + flipud, + float16, + float32, + float64, + float128, + float_power, + floating, + floor, + floor_divide, + fmax, + fmin, + fmod, + format_float_positional, + format_float_scientific, + frexp, + from_dlpack, + frombuffer, + fromfile, + fromfunction, + fromiter, + frompyfunc, + fromregex, + fromstring, + full, + full_like, + gcd, + generic, + genfromtxt, + geomspace, + get_include, + get_printoptions, + getbufsize, + geterr, + geterrcall, + gradient, + greater, + greater_equal, + half, + hamming, + hanning, + heaviside, + histogram, + histogram2d, + histogram_bin_edges, + histogramdd, + hsplit, + hstack, + hypot, + i0, + iinfo, + imag, + in1d, + index_exp, + indices, + inexact, + inf, + info, + inner, + insert, + int8, + int16, + int32, + int64, + int_, + intc, + integer, + interp, + intersect1d, + intp, + invert, + is_busday, + isclose, + iscomplex, + iscomplexobj, + isdtype, + isfinite, + isfortran, + isin, + isinf, + isnan, + isnat, + isneginf, + isposinf, + isreal, + isrealobj, + isscalar, + issubdtype, + iterable, + ix_, + kaiser, + kron, + lcm, + ldexp, + left_shift, + less, + less_equal, + lexsort, + lib, + linalg, + linspace, + little_endian, + load, + loadtxt, + log, + log1p, + log2, + log10, + logaddexp, + logaddexp2, + logical_and, + logical_not, + logical_or, + logical_xor, + logspace, + long, + longdouble, + longlong, + ma, + mask_indices, + matmul, + matrix, + matrix_transpose, + matvec, + max, + maximum, + may_share_memory, + mean, + median, + memmap, + meshgrid, + mgrid, + min, + min_scalar_type, + minimum, + mintypecode, + mod, + modf, + moveaxis, + multiply, + nan, + nan_to_num, + nanargmax, + nanargmin, + nancumprod, + nancumsum, + nanmax, + nanmean, + nanmedian, + nanmin, + nanpercentile, + nanprod, + nanquantile, + nanstd, + nansum, + nanvar, + ndarray, + ndenumerate, + ndim, + ndindex, + nditer, + negative, + nested_iters, + newaxis, + nextafter, + nonzero, + not_equal, + number, + object_, + ogrid, + ones_like, + outer, + packbits, + pad, + partition, + percentile, + permute_dims, + pi, + piecewise, + place, + poly, + poly1d, + polyadd, + polyder, + polydiv, + polyfit, + polyint, + polymul, + polynomial, + polysub, + polyval, + positive, + pow, + power, + printoptions, + prod, + promote_types, + ptp, + put, + put_along_axis, + putmask, + quantile, + r_, + rad2deg, + radians, + random, + ravel, + ravel_multi_index, + real, + real_if_close, + rec, + recarray, + reciprocal, + record, + remainder, + repeat, + require, + reshape, + resize, + result_type, + right_shift, + rint, + roll, + rollaxis, + roots, + rot90, + round, + row_stack, + s_, + save, + savetxt, + savez, + savez_compressed, + sctypeDict, + searchsorted, + select, + set_printoptions, + setbufsize, + setdiff1d, + seterr, + seterrcall, + setxor1d, + shape, + shares_memory, + short, + show_config, + show_runtime, + sign, + signbit, + signedinteger, + sin, + sinc, + single, + sinh, + size, + sort, + sort_complex, + spacing, + split, + sqrt, + square, + squeeze, + stack, + std, + str_, + strings, + subtract, + sum, + swapaxes, + take, + take_along_axis, + tan, + tanh, + tensordot, + test, + testing, + tile, + timedelta64, + trace, + transpose, + trapezoid, + trapz, + tri, + tril, + tril_indices, + tril_indices_from, + trim_zeros, + triu, + triu_indices, + triu_indices_from, + true_divide, + trunc, + typecodes, + typename, + typing, + ubyte, + ufunc, + uint, + uint8, + uint16, + uint32, + uint64, + uintc, + uintp, + ulong, + ulonglong, + union1d, + unique, + unique_all, + unique_counts, + unique_inverse, + unique_values, + unpackbits, + unravel_index, + unsignedinteger, + unstack, + unwrap, + ushort, + vander, + var, + vdot, + vecdot, + vecmat, + vectorize, + void, + vsplit, + vstack, + where, + zeros_like, +) +from numpy._typing import _ArrayLike, _DTypeLike + +__all__ = ["rand", "randn", "repmat"] +__all__ += np.__all__ + +### + +_T = TypeVar("_T", bound=np.generic) +_Matrix: TypeAlias = np.matrix[tuple[int, int], np.dtype[_T]] +_Order: TypeAlias = Literal["C", "F"] + +### + +# +@overload +def empty(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... +@overload +def empty(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +@overload +def empty(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def ones(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... +@overload +def ones(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +@overload +def ones(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def zeros(shape: int | tuple[int, int], dtype: None = None, order: _Order = "C") -> _Matrix[np.float64]: ... +@overload +def zeros(shape: int | tuple[int, int], dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +@overload +def zeros(shape: int | tuple[int, int], dtype: npt.DTypeLike, order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def identity(n: int, dtype: None = None) -> _Matrix[np.float64]: ... +@overload +def identity(n: int, dtype: _DTypeLike[_T]) -> _Matrix[_T]: ... +@overload +def identity(n: int, dtype: npt.DTypeLike | None = None) -> _Matrix[Any]: ... + +# +@overload +def eye( + n: int, + M: int | None = None, + k: int = 0, + dtype: type[np.float64] | None = ..., + order: _Order = "C", +) -> _Matrix[np.float64]: ... +@overload +def eye(n: int, M: int | None, k: int, dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +@overload +def eye(n: int, M: int | None = None, k: int = 0, *, dtype: _DTypeLike[_T], order: _Order = "C") -> _Matrix[_T]: ... +@overload +def eye(n: int, M: int | None = None, k: int = 0, dtype: npt.DTypeLike = ..., order: _Order = "C") -> _Matrix[Any]: ... + +# +@overload +def rand(arg: int | tuple[()] | tuple[int] | tuple[int, int], /) -> _Matrix[np.float64]: ... +@overload +def rand(arg: int, /, *args: int) -> _Matrix[np.float64]: ... + +# +@overload +def randn(arg: int | tuple[()] | tuple[int] | tuple[int, int], /) -> _Matrix[np.float64]: ... +@overload +def randn(arg: int, /, *args: int) -> _Matrix[np.float64]: ... + +# +@overload +def repmat(a: _Matrix[_T], m: int, n: int) -> _Matrix[_T]: ... +@overload +def repmat(a: _ArrayLike[_T], m: int, n: int) -> npt.NDArray[_T]: ... +@overload +def repmat(a: npt.ArrayLike, m: int, n: int) -> npt.NDArray[Any]: ... diff --git a/python/numpy/matrixlib/__init__.py b/python/numpy/matrixlib/__init__.py new file mode 100644 index 000000000..1ff5cb58c --- /dev/null +++ b/python/numpy/matrixlib/__init__.py @@ -0,0 +1,12 @@ +"""Sub-package containing the matrix class and related functions. + +""" +from . import defmatrix +from .defmatrix import * + +__all__ = defmatrix.__all__ + +from numpy._pytesttester import PytestTester + +test = PytestTester(__name__) +del PytestTester diff --git a/python/numpy/matrixlib/__init__.pyi b/python/numpy/matrixlib/__init__.pyi new file mode 100644 index 000000000..56ae8bf4c --- /dev/null +++ b/python/numpy/matrixlib/__init__.pyi @@ -0,0 +1,5 @@ +from numpy import matrix + +from .defmatrix import asmatrix, bmat + +__all__ = ["matrix", "bmat", "asmatrix"] diff --git a/python/numpy/matrixlib/__pycache__/__init__.cpython-312.pyc b/python/numpy/matrixlib/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..f53f61572 Binary files /dev/null and b/python/numpy/matrixlib/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/matrixlib/__pycache__/defmatrix.cpython-312.pyc b/python/numpy/matrixlib/__pycache__/defmatrix.cpython-312.pyc new file mode 100644 index 000000000..045ed7880 Binary files /dev/null and b/python/numpy/matrixlib/__pycache__/defmatrix.cpython-312.pyc differ diff --git a/python/numpy/matrixlib/defmatrix.py b/python/numpy/matrixlib/defmatrix.py new file mode 100644 index 000000000..39b9a9355 --- /dev/null +++ b/python/numpy/matrixlib/defmatrix.py @@ -0,0 +1,1119 @@ +__all__ = ['matrix', 'bmat', 'asmatrix'] + +import ast +import sys +import warnings + +import numpy._core.numeric as N +from numpy._core.numeric import concatenate, isscalar +from numpy._utils import set_module + +# While not in __all__, matrix_power used to be defined here, so we import +# it for backward compatibility. +from numpy.linalg import matrix_power + + +def _convert_from_string(data): + for char in '[]': + data = data.replace(char, '') + + rows = data.split(';') + newdata = [] + for count, row in enumerate(rows): + trow = row.split(',') + newrow = [] + for col in trow: + temp = col.split() + newrow.extend(map(ast.literal_eval, temp)) + if count == 0: + Ncols = len(newrow) + elif len(newrow) != Ncols: + raise ValueError("Rows not the same size.") + newdata.append(newrow) + return newdata + + +@set_module('numpy') +def asmatrix(data, dtype=None): + """ + Interpret the input as a matrix. + + Unlike `matrix`, `asmatrix` does not make a copy if the input is already + a matrix or an ndarray. Equivalent to ``matrix(data, copy=False)``. + + Parameters + ---------- + data : array_like + Input data. + dtype : data-type + Data-type of the output matrix. + + Returns + ------- + mat : matrix + `data` interpreted as a matrix. + + Examples + -------- + >>> import numpy as np + >>> x = np.array([[1, 2], [3, 4]]) + + >>> m = np.asmatrix(x) + + >>> x[0,0] = 5 + + >>> m + matrix([[5, 2], + [3, 4]]) + + """ + return matrix(data, dtype=dtype, copy=False) + + +@set_module('numpy') +class matrix(N.ndarray): + """ + matrix(data, dtype=None, copy=True) + + Returns a matrix from an array-like object, or from a string of data. + + A matrix is a specialized 2-D array that retains its 2-D nature + through operations. It has certain special operators, such as ``*`` + (matrix multiplication) and ``**`` (matrix power). + + .. note:: It is no longer recommended to use this class, even for linear + algebra. Instead use regular arrays. The class may be removed + in the future. + + Parameters + ---------- + data : array_like or string + If `data` is a string, it is interpreted as a matrix with commas + or spaces separating columns, and semicolons separating rows. + dtype : data-type + Data-type of the output matrix. + copy : bool + If `data` is already an `ndarray`, then this flag determines + whether the data is copied (the default), or whether a view is + constructed. + + See Also + -------- + array + + Examples + -------- + >>> import numpy as np + >>> a = np.matrix('1 2; 3 4') + >>> a + matrix([[1, 2], + [3, 4]]) + + >>> np.matrix([[1, 2], [3, 4]]) + matrix([[1, 2], + [3, 4]]) + + """ + __array_priority__ = 10.0 + + def __new__(subtype, data, dtype=None, copy=True): + warnings.warn('the matrix subclass is not the recommended way to ' + 'represent matrices or deal with linear algebra (see ' + 'https://docs.scipy.org/doc/numpy/user/' + 'numpy-for-matlab-users.html). ' + 'Please adjust your code to use regular ndarray.', + PendingDeprecationWarning, stacklevel=2) + if isinstance(data, matrix): + dtype2 = data.dtype + if (dtype is None): + dtype = dtype2 + if (dtype2 == dtype) and (not copy): + return data + return data.astype(dtype) + + if isinstance(data, N.ndarray): + if dtype is None: + intype = data.dtype + else: + intype = N.dtype(dtype) + new = data.view(subtype) + if intype != data.dtype: + return new.astype(intype) + if copy: + return new.copy() + else: + return new + + if isinstance(data, str): + data = _convert_from_string(data) + + # now convert data to an array + copy = None if not copy else True + arr = N.array(data, dtype=dtype, copy=copy) + ndim = arr.ndim + shape = arr.shape + if (ndim > 2): + raise ValueError("matrix must be 2-dimensional") + elif ndim == 0: + shape = (1, 1) + elif ndim == 1: + shape = (1, shape[0]) + + order = 'C' + if (ndim == 2) and arr.flags.fortran: + order = 'F' + + if not (order or arr.flags.contiguous): + arr = arr.copy() + + ret = N.ndarray.__new__(subtype, shape, arr.dtype, + buffer=arr, + order=order) + return ret + + def __array_finalize__(self, obj): + self._getitem = False + if (isinstance(obj, matrix) and obj._getitem): + return + ndim = self.ndim + if (ndim == 2): + return + if (ndim > 2): + newshape = tuple(x for x in self.shape if x > 1) + ndim = len(newshape) + if ndim == 2: + self.shape = newshape + return + elif (ndim > 2): + raise ValueError("shape too large to be a matrix.") + else: + newshape = self.shape + if ndim == 0: + self.shape = (1, 1) + elif ndim == 1: + self.shape = (1, newshape[0]) + return + + def __getitem__(self, index): + self._getitem = True + + try: + out = N.ndarray.__getitem__(self, index) + finally: + self._getitem = False + + if not isinstance(out, N.ndarray): + return out + + if out.ndim == 0: + return out[()] + if out.ndim == 1: + sh = out.shape[0] + # Determine when we should have a column array + try: + n = len(index) + except Exception: + n = 0 + if n > 1 and isscalar(index[1]): + out.shape = (sh, 1) + else: + out.shape = (1, sh) + return out + + def __mul__(self, other): + if isinstance(other, (N.ndarray, list, tuple)): + # This promotes 1-D vectors to row vectors + return N.dot(self, asmatrix(other)) + if isscalar(other) or not hasattr(other, '__rmul__'): + return N.dot(self, other) + return NotImplemented + + def __rmul__(self, other): + return N.dot(other, self) + + def __imul__(self, other): + self[:] = self * other + return self + + def __pow__(self, other): + return matrix_power(self, other) + + def __ipow__(self, other): + self[:] = self ** other + return self + + def __rpow__(self, other): + return NotImplemented + + def _align(self, axis): + """A convenience function for operations that need to preserve axis + orientation. + """ + if axis is None: + return self[0, 0] + elif axis == 0: + return self + elif axis == 1: + return self.transpose() + else: + raise ValueError("unsupported axis") + + def _collapse(self, axis): + """A convenience function for operations that want to collapse + to a scalar like _align, but are using keepdims=True + """ + if axis is None: + return self[0, 0] + else: + return self + + # Necessary because base-class tolist expects dimension + # reduction by x[0] + def tolist(self): + """ + Return the matrix as a (possibly nested) list. + + See `ndarray.tolist` for full documentation. + + See Also + -------- + ndarray.tolist + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.tolist() + [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]] + + """ + return self.__array__().tolist() + + # To preserve orientation of result... + def sum(self, axis=None, dtype=None, out=None): + """ + Returns the sum of the matrix elements, along the given axis. + + Refer to `numpy.sum` for full documentation. + + See Also + -------- + numpy.sum + + Notes + ----- + This is the same as `ndarray.sum`, except that where an `ndarray` would + be returned, a `matrix` object is returned instead. + + Examples + -------- + >>> x = np.matrix([[1, 2], [4, 3]]) + >>> x.sum() + 10 + >>> x.sum(axis=1) + matrix([[3], + [7]]) + >>> x.sum(axis=1, dtype='float') + matrix([[3.], + [7.]]) + >>> out = np.zeros((2, 1), dtype='float') + >>> x.sum(axis=1, dtype='float', out=np.asmatrix(out)) + matrix([[3.], + [7.]]) + + """ + return N.ndarray.sum(self, axis, dtype, out, keepdims=True)._collapse(axis) + + # To update docstring from array to matrix... + def squeeze(self, axis=None): + """ + Return a possibly reshaped matrix. + + Refer to `numpy.squeeze` for more documentation. + + Parameters + ---------- + axis : None or int or tuple of ints, optional + Selects a subset of the axes of length one in the shape. + If an axis is selected with shape entry greater than one, + an error is raised. + + Returns + ------- + squeezed : matrix + The matrix, but as a (1, N) matrix if it had shape (N, 1). + + See Also + -------- + numpy.squeeze : related function + + Notes + ----- + If `m` has a single column then that column is returned + as the single row of a matrix. Otherwise `m` is returned. + The returned matrix is always either `m` itself or a view into `m`. + Supplying an axis keyword argument will not affect the returned matrix + but it may cause an error to be raised. + + Examples + -------- + >>> c = np.matrix([[1], [2]]) + >>> c + matrix([[1], + [2]]) + >>> c.squeeze() + matrix([[1, 2]]) + >>> r = c.T + >>> r + matrix([[1, 2]]) + >>> r.squeeze() + matrix([[1, 2]]) + >>> m = np.matrix([[1, 2], [3, 4]]) + >>> m.squeeze() + matrix([[1, 2], + [3, 4]]) + + """ + return N.ndarray.squeeze(self, axis=axis) + + # To update docstring from array to matrix... + def flatten(self, order='C'): + """ + Return a flattened copy of the matrix. + + All `N` elements of the matrix are placed into a single row. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + 'C' means to flatten in row-major (C-style) order. 'F' means to + flatten in column-major (Fortran-style) order. 'A' means to + flatten in column-major order if `m` is Fortran *contiguous* in + memory, row-major order otherwise. 'K' means to flatten `m` in + the order the elements occur in memory. The default is 'C'. + + Returns + ------- + y : matrix + A copy of the matrix, flattened to a `(1, N)` matrix where `N` + is the number of elements in the original matrix. + + See Also + -------- + ravel : Return a flattened array. + flat : A 1-D flat iterator over the matrix. + + Examples + -------- + >>> m = np.matrix([[1,2], [3,4]]) + >>> m.flatten() + matrix([[1, 2, 3, 4]]) + >>> m.flatten('F') + matrix([[1, 3, 2, 4]]) + + """ + return N.ndarray.flatten(self, order=order) + + def mean(self, axis=None, dtype=None, out=None): + """ + Returns the average of the matrix elements along the given axis. + + Refer to `numpy.mean` for full documentation. + + See Also + -------- + numpy.mean + + Notes + ----- + Same as `ndarray.mean` except that, where that returns an `ndarray`, + this returns a `matrix` object. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3, 4))) + >>> x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.mean() + 5.5 + >>> x.mean(0) + matrix([[4., 5., 6., 7.]]) + >>> x.mean(1) + matrix([[ 1.5], + [ 5.5], + [ 9.5]]) + + """ + return N.ndarray.mean(self, axis, dtype, out, keepdims=True)._collapse(axis) + + def std(self, axis=None, dtype=None, out=None, ddof=0): + """ + Return the standard deviation of the array elements along the given axis. + + Refer to `numpy.std` for full documentation. + + See Also + -------- + numpy.std + + Notes + ----- + This is the same as `ndarray.std`, except that where an `ndarray` would + be returned, a `matrix` object is returned instead. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3, 4))) + >>> x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.std() + 3.4520525295346629 # may vary + >>> x.std(0) + matrix([[ 3.26598632, 3.26598632, 3.26598632, 3.26598632]]) # may vary + >>> x.std(1) + matrix([[ 1.11803399], + [ 1.11803399], + [ 1.11803399]]) + + """ + return N.ndarray.std(self, axis, dtype, out, ddof, + keepdims=True)._collapse(axis) + + def var(self, axis=None, dtype=None, out=None, ddof=0): + """ + Returns the variance of the matrix elements, along the given axis. + + Refer to `numpy.var` for full documentation. + + See Also + -------- + numpy.var + + Notes + ----- + This is the same as `ndarray.var`, except that where an `ndarray` would + be returned, a `matrix` object is returned instead. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3, 4))) + >>> x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.var() + 11.916666666666666 + >>> x.var(0) + matrix([[ 10.66666667, 10.66666667, 10.66666667, 10.66666667]]) # may vary + >>> x.var(1) + matrix([[1.25], + [1.25], + [1.25]]) + + """ + return N.ndarray.var(self, axis, dtype, out, ddof, + keepdims=True)._collapse(axis) + + def prod(self, axis=None, dtype=None, out=None): + """ + Return the product of the array elements over the given axis. + + Refer to `prod` for full documentation. + + See Also + -------- + prod, ndarray.prod + + Notes + ----- + Same as `ndarray.prod`, except, where that returns an `ndarray`, this + returns a `matrix` object instead. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.prod() + 0 + >>> x.prod(0) + matrix([[ 0, 45, 120, 231]]) + >>> x.prod(1) + matrix([[ 0], + [ 840], + [7920]]) + + """ + return N.ndarray.prod(self, axis, dtype, out, keepdims=True)._collapse(axis) + + def any(self, axis=None, out=None): + """ + Test whether any array element along a given axis evaluates to True. + + Refer to `numpy.any` for full documentation. + + Parameters + ---------- + axis : int, optional + Axis along which logical OR is performed + out : ndarray, optional + Output to existing array instead of creating new one, must have + same shape as expected output + + Returns + ------- + any : bool, ndarray + Returns a single bool if `axis` is ``None``; otherwise, + returns `ndarray` + + """ + return N.ndarray.any(self, axis, out, keepdims=True)._collapse(axis) + + def all(self, axis=None, out=None): + """ + Test whether all matrix elements along a given axis evaluate to True. + + Parameters + ---------- + See `numpy.all` for complete descriptions + + See Also + -------- + numpy.all + + Notes + ----- + This is the same as `ndarray.all`, but it returns a `matrix` object. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> y = x[0]; y + matrix([[0, 1, 2, 3]]) + >>> (x == y) + matrix([[ True, True, True, True], + [False, False, False, False], + [False, False, False, False]]) + >>> (x == y).all() + False + >>> (x == y).all(0) + matrix([[False, False, False, False]]) + >>> (x == y).all(1) + matrix([[ True], + [False], + [False]]) + + """ + return N.ndarray.all(self, axis, out, keepdims=True)._collapse(axis) + + def max(self, axis=None, out=None): + """ + Return the maximum value along an axis. + + Parameters + ---------- + See `amax` for complete descriptions + + See Also + -------- + amax, ndarray.max + + Notes + ----- + This is the same as `ndarray.max`, but returns a `matrix` object + where `ndarray.max` would return an ndarray. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.max() + 11 + >>> x.max(0) + matrix([[ 8, 9, 10, 11]]) + >>> x.max(1) + matrix([[ 3], + [ 7], + [11]]) + + """ + return N.ndarray.max(self, axis, out, keepdims=True)._collapse(axis) + + def argmax(self, axis=None, out=None): + """ + Indexes of the maximum values along an axis. + + Return the indexes of the first occurrences of the maximum values + along the specified axis. If axis is None, the index is for the + flattened matrix. + + Parameters + ---------- + See `numpy.argmax` for complete descriptions + + See Also + -------- + numpy.argmax + + Notes + ----- + This is the same as `ndarray.argmax`, but returns a `matrix` object + where `ndarray.argmax` would return an `ndarray`. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.argmax() + 11 + >>> x.argmax(0) + matrix([[2, 2, 2, 2]]) + >>> x.argmax(1) + matrix([[3], + [3], + [3]]) + + """ + return N.ndarray.argmax(self, axis, out)._align(axis) + + def min(self, axis=None, out=None): + """ + Return the minimum value along an axis. + + Parameters + ---------- + See `amin` for complete descriptions. + + See Also + -------- + amin, ndarray.min + + Notes + ----- + This is the same as `ndarray.min`, but returns a `matrix` object + where `ndarray.min` would return an ndarray. + + Examples + -------- + >>> x = -np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, -1, -2, -3], + [ -4, -5, -6, -7], + [ -8, -9, -10, -11]]) + >>> x.min() + -11 + >>> x.min(0) + matrix([[ -8, -9, -10, -11]]) + >>> x.min(1) + matrix([[ -3], + [ -7], + [-11]]) + + """ + return N.ndarray.min(self, axis, out, keepdims=True)._collapse(axis) + + def argmin(self, axis=None, out=None): + """ + Indexes of the minimum values along an axis. + + Return the indexes of the first occurrences of the minimum values + along the specified axis. If axis is None, the index is for the + flattened matrix. + + Parameters + ---------- + See `numpy.argmin` for complete descriptions. + + See Also + -------- + numpy.argmin + + Notes + ----- + This is the same as `ndarray.argmin`, but returns a `matrix` object + where `ndarray.argmin` would return an `ndarray`. + + Examples + -------- + >>> x = -np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, -1, -2, -3], + [ -4, -5, -6, -7], + [ -8, -9, -10, -11]]) + >>> x.argmin() + 11 + >>> x.argmin(0) + matrix([[2, 2, 2, 2]]) + >>> x.argmin(1) + matrix([[3], + [3], + [3]]) + + """ + return N.ndarray.argmin(self, axis, out)._align(axis) + + def ptp(self, axis=None, out=None): + """ + Peak-to-peak (maximum - minimum) value along the given axis. + + Refer to `numpy.ptp` for full documentation. + + See Also + -------- + numpy.ptp + + Notes + ----- + Same as `ndarray.ptp`, except, where that would return an `ndarray` object, + this returns a `matrix` object. + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.ptp() + 11 + >>> x.ptp(0) + matrix([[8, 8, 8, 8]]) + >>> x.ptp(1) + matrix([[3], + [3], + [3]]) + + """ + return N.ptp(self, axis, out)._align(axis) + + @property + def I(self): # noqa: E743 + """ + Returns the (multiplicative) inverse of invertible `self`. + + Parameters + ---------- + None + + Returns + ------- + ret : matrix object + If `self` is non-singular, `ret` is such that ``ret * self`` == + ``self * ret`` == ``np.matrix(np.eye(self[0,:].size))`` all return + ``True``. + + Raises + ------ + numpy.linalg.LinAlgError: Singular matrix + If `self` is singular. + + See Also + -------- + linalg.inv + + Examples + -------- + >>> m = np.matrix('[1, 2; 3, 4]'); m + matrix([[1, 2], + [3, 4]]) + >>> m.getI() + matrix([[-2. , 1. ], + [ 1.5, -0.5]]) + >>> m.getI() * m + matrix([[ 1., 0.], # may vary + [ 0., 1.]]) + + """ + M, N = self.shape + if M == N: + from numpy.linalg import inv as func + else: + from numpy.linalg import pinv as func + return asmatrix(func(self)) + + @property + def A(self): + """ + Return `self` as an `ndarray` object. + + Equivalent to ``np.asarray(self)``. + + Parameters + ---------- + None + + Returns + ------- + ret : ndarray + `self` as an `ndarray` + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.getA() + array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + + """ + return self.__array__() + + @property + def A1(self): + """ + Return `self` as a flattened `ndarray`. + + Equivalent to ``np.asarray(x).ravel()`` + + Parameters + ---------- + None + + Returns + ------- + ret : ndarray + `self`, 1-D, as an `ndarray` + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))); x + matrix([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]) + >>> x.getA1() + array([ 0, 1, 2, ..., 9, 10, 11]) + + + """ + return self.__array__().ravel() + + def ravel(self, order='C'): + """ + Return a flattened matrix. + + Refer to `numpy.ravel` for more documentation. + + Parameters + ---------- + order : {'C', 'F', 'A', 'K'}, optional + The elements of `m` are read using this index order. 'C' means to + index the elements in C-like order, with the last axis index + changing fastest, back to the first axis index changing slowest. + 'F' means to index the elements in Fortran-like index order, with + the first index changing fastest, and the last index changing + slowest. Note that the 'C' and 'F' options take no account of the + memory layout of the underlying array, and only refer to the order + of axis indexing. 'A' means to read the elements in Fortran-like + index order if `m` is Fortran *contiguous* in memory, C-like order + otherwise. 'K' means to read the elements in the order they occur + in memory, except for reversing the data when strides are negative. + By default, 'C' index order is used. + + Returns + ------- + ret : matrix + Return the matrix flattened to shape `(1, N)` where `N` + is the number of elements in the original matrix. + A copy is made only if necessary. + + See Also + -------- + matrix.flatten : returns a similar output matrix but always a copy + matrix.flat : a flat iterator on the array. + numpy.ravel : related function which returns an ndarray + + """ + return N.ndarray.ravel(self, order=order) + + @property + def T(self): + """ + Returns the transpose of the matrix. + + Does *not* conjugate! For the complex conjugate transpose, use ``.H``. + + Parameters + ---------- + None + + Returns + ------- + ret : matrix object + The (non-conjugated) transpose of the matrix. + + See Also + -------- + transpose, getH + + Examples + -------- + >>> m = np.matrix('[1, 2; 3, 4]') + >>> m + matrix([[1, 2], + [3, 4]]) + >>> m.getT() + matrix([[1, 3], + [2, 4]]) + + """ + return self.transpose() + + @property + def H(self): + """ + Returns the (complex) conjugate transpose of `self`. + + Equivalent to ``np.transpose(self)`` if `self` is real-valued. + + Parameters + ---------- + None + + Returns + ------- + ret : matrix object + complex conjugate transpose of `self` + + Examples + -------- + >>> x = np.matrix(np.arange(12).reshape((3,4))) + >>> z = x - 1j*x; z + matrix([[ 0. +0.j, 1. -1.j, 2. -2.j, 3. -3.j], + [ 4. -4.j, 5. -5.j, 6. -6.j, 7. -7.j], + [ 8. -8.j, 9. -9.j, 10.-10.j, 11.-11.j]]) + >>> z.getH() + matrix([[ 0. -0.j, 4. +4.j, 8. +8.j], + [ 1. +1.j, 5. +5.j, 9. +9.j], + [ 2. +2.j, 6. +6.j, 10.+10.j], + [ 3. +3.j, 7. +7.j, 11.+11.j]]) + + """ + if issubclass(self.dtype.type, N.complexfloating): + return self.transpose().conjugate() + else: + return self.transpose() + + # kept for compatibility + getT = T.fget + getA = A.fget + getA1 = A1.fget + getH = H.fget + getI = I.fget + +def _from_string(str, gdict, ldict): + rows = str.split(';') + rowtup = [] + for row in rows: + trow = row.split(',') + newrow = [] + for x in trow: + newrow.extend(x.split()) + trow = newrow + coltup = [] + for col in trow: + col = col.strip() + try: + thismat = ldict[col] + except KeyError: + try: + thismat = gdict[col] + except KeyError as e: + raise NameError(f"name {col!r} is not defined") from None + + coltup.append(thismat) + rowtup.append(concatenate(coltup, axis=-1)) + return concatenate(rowtup, axis=0) + + +@set_module('numpy') +def bmat(obj, ldict=None, gdict=None): + """ + Build a matrix object from a string, nested sequence, or array. + + Parameters + ---------- + obj : str or array_like + Input data. If a string, variables in the current scope may be + referenced by name. + ldict : dict, optional + A dictionary that replaces local operands in current frame. + Ignored if `obj` is not a string or `gdict` is None. + gdict : dict, optional + A dictionary that replaces global operands in current frame. + Ignored if `obj` is not a string. + + Returns + ------- + out : matrix + Returns a matrix object, which is a specialized 2-D array. + + See Also + -------- + block : + A generalization of this function for N-d arrays, that returns normal + ndarrays. + + Examples + -------- + >>> import numpy as np + >>> A = np.asmatrix('1 1; 1 1') + >>> B = np.asmatrix('2 2; 2 2') + >>> C = np.asmatrix('3 4; 5 6') + >>> D = np.asmatrix('7 8; 9 0') + + All the following expressions construct the same block matrix: + + >>> np.bmat([[A, B], [C, D]]) + matrix([[1, 1, 2, 2], + [1, 1, 2, 2], + [3, 4, 7, 8], + [5, 6, 9, 0]]) + >>> np.bmat(np.r_[np.c_[A, B], np.c_[C, D]]) + matrix([[1, 1, 2, 2], + [1, 1, 2, 2], + [3, 4, 7, 8], + [5, 6, 9, 0]]) + >>> np.bmat('A,B; C,D') + matrix([[1, 1, 2, 2], + [1, 1, 2, 2], + [3, 4, 7, 8], + [5, 6, 9, 0]]) + + """ + if isinstance(obj, str): + if gdict is None: + # get previous frame + frame = sys._getframe().f_back + glob_dict = frame.f_globals + loc_dict = frame.f_locals + else: + glob_dict = gdict + loc_dict = ldict + + return matrix(_from_string(obj, glob_dict, loc_dict)) + + if isinstance(obj, (tuple, list)): + # [[A,B],[C,D]] + arr_rows = [] + for row in obj: + if isinstance(row, N.ndarray): # not 2-d + return matrix(concatenate(obj, axis=-1)) + else: + arr_rows.append(concatenate(row, axis=-1)) + return matrix(concatenate(arr_rows, axis=0)) + if isinstance(obj, N.ndarray): + return matrix(obj) diff --git a/python/numpy/matrixlib/defmatrix.pyi b/python/numpy/matrixlib/defmatrix.pyi new file mode 100644 index 000000000..ee8f83746 --- /dev/null +++ b/python/numpy/matrixlib/defmatrix.pyi @@ -0,0 +1,17 @@ +from collections.abc import Mapping, Sequence +from typing import Any + +from numpy import matrix +from numpy._typing import ArrayLike, DTypeLike, NDArray + +__all__ = ["asmatrix", "bmat", "matrix"] + +def bmat( + obj: str | Sequence[ArrayLike] | NDArray[Any], + ldict: Mapping[str, Any] | None = ..., + gdict: Mapping[str, Any] | None = ..., +) -> matrix[tuple[int, int], Any]: ... + +def asmatrix( + data: ArrayLike, dtype: DTypeLike = ... +) -> matrix[tuple[int, int], Any]: ... diff --git a/python/numpy/matrixlib/tests/__init__.py b/python/numpy/matrixlib/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/python/numpy/matrixlib/tests/__pycache__/__init__.cpython-312.pyc b/python/numpy/matrixlib/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..77b91f492 Binary files /dev/null and b/python/numpy/matrixlib/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/matrixlib/tests/__pycache__/test_defmatrix.cpython-312.pyc b/python/numpy/matrixlib/tests/__pycache__/test_defmatrix.cpython-312.pyc new file mode 100644 index 000000000..4bdefeabb Binary files /dev/null and b/python/numpy/matrixlib/tests/__pycache__/test_defmatrix.cpython-312.pyc differ diff --git a/python/numpy/matrixlib/tests/__pycache__/test_interaction.cpython-312.pyc b/python/numpy/matrixlib/tests/__pycache__/test_interaction.cpython-312.pyc new file mode 100644 index 000000000..6c845958b Binary files /dev/null and b/python/numpy/matrixlib/tests/__pycache__/test_interaction.cpython-312.pyc differ diff --git a/python/numpy/matrixlib/tests/__pycache__/test_masked_matrix.cpython-312.pyc b/python/numpy/matrixlib/tests/__pycache__/test_masked_matrix.cpython-312.pyc new file mode 100644 index 000000000..d0c7a5946 Binary files /dev/null and b/python/numpy/matrixlib/tests/__pycache__/test_masked_matrix.cpython-312.pyc differ diff --git a/python/numpy/matrixlib/tests/__pycache__/test_matrix_linalg.cpython-312.pyc b/python/numpy/matrixlib/tests/__pycache__/test_matrix_linalg.cpython-312.pyc new file mode 100644 index 000000000..cec1ac780 Binary files /dev/null and b/python/numpy/matrixlib/tests/__pycache__/test_matrix_linalg.cpython-312.pyc differ diff --git a/python/numpy/matrixlib/tests/__pycache__/test_multiarray.cpython-312.pyc b/python/numpy/matrixlib/tests/__pycache__/test_multiarray.cpython-312.pyc new file mode 100644 index 000000000..23cc16642 Binary files /dev/null and b/python/numpy/matrixlib/tests/__pycache__/test_multiarray.cpython-312.pyc differ diff --git a/python/numpy/matrixlib/tests/__pycache__/test_numeric.cpython-312.pyc b/python/numpy/matrixlib/tests/__pycache__/test_numeric.cpython-312.pyc new file mode 100644 index 000000000..22c5ce3ec Binary files /dev/null and b/python/numpy/matrixlib/tests/__pycache__/test_numeric.cpython-312.pyc differ diff --git a/python/numpy/matrixlib/tests/__pycache__/test_regression.cpython-312.pyc b/python/numpy/matrixlib/tests/__pycache__/test_regression.cpython-312.pyc new file mode 100644 index 000000000..79a2de367 Binary files /dev/null and b/python/numpy/matrixlib/tests/__pycache__/test_regression.cpython-312.pyc differ diff --git a/python/numpy/matrixlib/tests/test_defmatrix.py b/python/numpy/matrixlib/tests/test_defmatrix.py new file mode 100644 index 000000000..ce23933ab --- /dev/null +++ b/python/numpy/matrixlib/tests/test_defmatrix.py @@ -0,0 +1,455 @@ +import collections.abc + +import numpy as np +from numpy import asmatrix, bmat, matrix +from numpy.linalg import matrix_power +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, +) + + +class TestCtor: + def test_basic(self): + A = np.array([[1, 2], [3, 4]]) + mA = matrix(A) + assert_(np.all(mA.A == A)) + + B = bmat("A,A;A,A") + C = bmat([[A, A], [A, A]]) + D = np.array([[1, 2, 1, 2], + [3, 4, 3, 4], + [1, 2, 1, 2], + [3, 4, 3, 4]]) + assert_(np.all(B.A == D)) + assert_(np.all(C.A == D)) + + E = np.array([[5, 6], [7, 8]]) + AEresult = matrix([[1, 2, 5, 6], [3, 4, 7, 8]]) + assert_(np.all(bmat([A, E]) == AEresult)) + + vec = np.arange(5) + mvec = matrix(vec) + assert_(mvec.shape == (1, 5)) + + def test_exceptions(self): + # Check for ValueError when called with invalid string data. + assert_raises(ValueError, matrix, "invalid") + + def test_bmat_nondefault_str(self): + A = np.array([[1, 2], [3, 4]]) + B = np.array([[5, 6], [7, 8]]) + Aresult = np.array([[1, 2, 1, 2], + [3, 4, 3, 4], + [1, 2, 1, 2], + [3, 4, 3, 4]]) + mixresult = np.array([[1, 2, 5, 6], + [3, 4, 7, 8], + [5, 6, 1, 2], + [7, 8, 3, 4]]) + assert_(np.all(bmat("A,A;A,A") == Aresult)) + assert_(np.all(bmat("A,A;A,A", ldict={'A': B}) == Aresult)) + assert_raises(TypeError, bmat, "A,A;A,A", gdict={'A': B}) + assert_( + np.all(bmat("A,A;A,A", ldict={'A': A}, gdict={'A': B}) == Aresult)) + b2 = bmat("A,B;C,D", ldict={'A': A, 'B': B}, gdict={'C': B, 'D': A}) + assert_(np.all(b2 == mixresult)) + + +class TestProperties: + def test_sum(self): + """Test whether matrix.sum(axis=1) preserves orientation. + Fails in NumPy <= 0.9.6.2127. + """ + M = matrix([[1, 2, 0, 0], + [3, 4, 0, 0], + [1, 2, 1, 2], + [3, 4, 3, 4]]) + sum0 = matrix([8, 12, 4, 6]) + sum1 = matrix([3, 7, 6, 14]).T + sumall = 30 + assert_array_equal(sum0, M.sum(axis=0)) + assert_array_equal(sum1, M.sum(axis=1)) + assert_equal(sumall, M.sum()) + + assert_array_equal(sum0, np.sum(M, axis=0)) + assert_array_equal(sum1, np.sum(M, axis=1)) + assert_equal(sumall, np.sum(M)) + + def test_prod(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.prod(), 720) + assert_equal(x.prod(0), matrix([[4, 10, 18]])) + assert_equal(x.prod(1), matrix([[6], [120]])) + + assert_equal(np.prod(x), 720) + assert_equal(np.prod(x, axis=0), matrix([[4, 10, 18]])) + assert_equal(np.prod(x, axis=1), matrix([[6], [120]])) + + y = matrix([0, 1, 3]) + assert_(y.prod() == 0) + + def test_max(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.max(), 6) + assert_equal(x.max(0), matrix([[4, 5, 6]])) + assert_equal(x.max(1), matrix([[3], [6]])) + + assert_equal(np.max(x), 6) + assert_equal(np.max(x, axis=0), matrix([[4, 5, 6]])) + assert_equal(np.max(x, axis=1), matrix([[3], [6]])) + + def test_min(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.min(), 1) + assert_equal(x.min(0), matrix([[1, 2, 3]])) + assert_equal(x.min(1), matrix([[1], [4]])) + + assert_equal(np.min(x), 1) + assert_equal(np.min(x, axis=0), matrix([[1, 2, 3]])) + assert_equal(np.min(x, axis=1), matrix([[1], [4]])) + + def test_ptp(self): + x = np.arange(4).reshape((2, 2)) + mx = x.view(np.matrix) + assert_(mx.ptp() == 3) + assert_(np.all(mx.ptp(0) == np.array([2, 2]))) + assert_(np.all(mx.ptp(1) == np.array([1, 1]))) + + def test_var(self): + x = np.arange(9).reshape((3, 3)) + mx = x.view(np.matrix) + assert_equal(x.var(ddof=0), mx.var(ddof=0)) + assert_equal(x.var(ddof=1), mx.var(ddof=1)) + + def test_basic(self): + import numpy.linalg as linalg + + A = np.array([[1., 2.], + [3., 4.]]) + mA = matrix(A) + assert_(np.allclose(linalg.inv(A), mA.I)) + assert_(np.all(np.array(np.transpose(A) == mA.T))) + assert_(np.all(np.array(np.transpose(A) == mA.H))) + assert_(np.all(A == mA.A)) + + B = A + 2j * A + mB = matrix(B) + assert_(np.allclose(linalg.inv(B), mB.I)) + assert_(np.all(np.array(np.transpose(B) == mB.T))) + assert_(np.all(np.array(np.transpose(B).conj() == mB.H))) + + def test_pinv(self): + x = matrix(np.arange(6).reshape(2, 3)) + xpinv = matrix([[-0.77777778, 0.27777778], + [-0.11111111, 0.11111111], + [ 0.55555556, -0.05555556]]) + assert_almost_equal(x.I, xpinv) + + def test_comparisons(self): + A = np.arange(100).reshape(10, 10) + mA = matrix(A) + mB = matrix(A) + 0.1 + assert_(np.all(mB == A + 0.1)) + assert_(np.all(mB == matrix(A + 0.1))) + assert_(not np.any(mB == matrix(A - 0.1))) + assert_(np.all(mA < mB)) + assert_(np.all(mA <= mB)) + assert_(np.all(mA <= mA)) + assert_(not np.any(mA < mA)) + + assert_(not np.any(mB < mA)) + assert_(np.all(mB >= mA)) + assert_(np.all(mB >= mB)) + assert_(not np.any(mB > mB)) + + assert_(np.all(mA == mA)) + assert_(not np.any(mA == mB)) + assert_(np.all(mB != mA)) + + assert_(not np.all(abs(mA) > 0)) + assert_(np.all(abs(mB > 0))) + + def test_asmatrix(self): + A = np.arange(100).reshape(10, 10) + mA = asmatrix(A) + A[0, 0] = -10 + assert_(A[0, 0] == mA[0, 0]) + + def test_noaxis(self): + A = matrix([[1, 0], [0, 1]]) + assert_(A.sum() == matrix(2)) + assert_(A.mean() == matrix(0.5)) + + def test_repr(self): + A = matrix([[1, 0], [0, 1]]) + assert_(repr(A) == "matrix([[1, 0],\n [0, 1]])") + + def test_make_bool_matrix_from_str(self): + A = matrix('True; True; False') + B = matrix([[True], [True], [False]]) + assert_array_equal(A, B) + +class TestCasting: + def test_basic(self): + A = np.arange(100).reshape(10, 10) + mA = matrix(A) + + mB = mA.copy() + O = np.ones((10, 10), np.float64) * 0.1 + mB = mB + O + assert_(mB.dtype.type == np.float64) + assert_(np.all(mA != mB)) + assert_(np.all(mB == mA + 0.1)) + + mC = mA.copy() + O = np.ones((10, 10), np.complex128) + mC = mC * O + assert_(mC.dtype.type == np.complex128) + assert_(np.all(mA != mB)) + + +class TestAlgebra: + def test_basic(self): + import numpy.linalg as linalg + + A = np.array([[1., 2.], [3., 4.]]) + mA = matrix(A) + + B = np.identity(2) + for i in range(6): + assert_(np.allclose((mA ** i).A, B)) + B = np.dot(B, A) + + Ainv = linalg.inv(A) + B = np.identity(2) + for i in range(6): + assert_(np.allclose((mA ** -i).A, B)) + B = np.dot(B, Ainv) + + assert_(np.allclose((mA * mA).A, np.dot(A, A))) + assert_(np.allclose((mA + mA).A, (A + A))) + assert_(np.allclose((3 * mA).A, (3 * A))) + + mA2 = matrix(A) + mA2 *= 3 + assert_(np.allclose(mA2.A, 3 * A)) + + def test_pow(self): + """Test raising a matrix to an integer power works as expected.""" + m = matrix("1. 2.; 3. 4.") + m2 = m.copy() + m2 **= 2 + mi = m.copy() + mi **= -1 + m4 = m2.copy() + m4 **= 2 + assert_array_almost_equal(m2, m**2) + assert_array_almost_equal(m4, np.dot(m2, m2)) + assert_array_almost_equal(np.dot(mi, m), np.eye(2)) + + def test_scalar_type_pow(self): + m = matrix([[1, 2], [3, 4]]) + for scalar_t in [np.int8, np.uint8]: + two = scalar_t(2) + assert_array_almost_equal(m ** 2, m ** two) + + def test_notimplemented(self): + '''Check that 'not implemented' operations produce a failure.''' + A = matrix([[1., 2.], + [3., 4.]]) + + # __rpow__ + with assert_raises(TypeError): + 1.0**A + + # __mul__ with something not a list, ndarray, tuple, or scalar + with assert_raises(TypeError): + A * object() + + +class TestMatrixReturn: + def test_instance_methods(self): + a = matrix([1.0], dtype='f8') + methodargs = { + 'astype': ('intc',), + 'clip': (0.0, 1.0), + 'compress': ([1],), + 'repeat': (1,), + 'reshape': (1,), + 'swapaxes': (0, 0), + 'dot': np.array([1.0]), + } + excluded_methods = [ + 'argmin', 'choose', 'dump', 'dumps', 'fill', 'getfield', + 'getA', 'getA1', 'item', 'nonzero', 'put', 'putmask', 'resize', + 'searchsorted', 'setflags', 'setfield', 'sort', + 'partition', 'argpartition', 'newbyteorder', 'to_device', + 'take', 'tofile', 'tolist', 'tobytes', 'all', 'any', + 'sum', 'argmax', 'argmin', 'min', 'max', 'mean', 'var', 'ptp', + 'prod', 'std', 'ctypes', 'itemset', 'bitwise_count', + ] + for attrib in dir(a): + if attrib.startswith('_') or attrib in excluded_methods: + continue + f = getattr(a, attrib) + if isinstance(f, collections.abc.Callable): + # reset contents of a + a.astype('f8') + a.fill(1.0) + args = methodargs.get(attrib, ()) + b = f(*args) + assert_(type(b) is matrix, f"{attrib}") + assert_(type(a.real) is matrix) + assert_(type(a.imag) is matrix) + c, d = matrix([0.0]).nonzero() + assert_(type(c) is np.ndarray) + assert_(type(d) is np.ndarray) + + +class TestIndexing: + def test_basic(self): + x = asmatrix(np.zeros((3, 2), float)) + y = np.zeros((3, 1), float) + y[:, 0] = [0.8, 0.2, 0.3] + x[:, 1] = y > 0.5 + assert_equal(x, [[0, 1], [0, 0], [0, 0]]) + + +class TestNewScalarIndexing: + a = matrix([[1, 2], [3, 4]]) + + def test_dimesions(self): + a = self.a + x = a[0] + assert_equal(x.ndim, 2) + + def test_array_from_matrix_list(self): + a = self.a + x = np.array([a, a]) + assert_equal(x.shape, [2, 2, 2]) + + def test_array_to_list(self): + a = self.a + assert_equal(a.tolist(), [[1, 2], [3, 4]]) + + def test_fancy_indexing(self): + a = self.a + x = a[1, [0, 1, 0]] + assert_(isinstance(x, matrix)) + assert_equal(x, matrix([[3, 4, 3]])) + x = a[[1, 0]] + assert_(isinstance(x, matrix)) + assert_equal(x, matrix([[3, 4], [1, 2]])) + x = a[[[1], [0]], [[1, 0], [0, 1]]] + assert_(isinstance(x, matrix)) + assert_equal(x, matrix([[4, 3], [1, 2]])) + + def test_matrix_element(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x[0][0], matrix([[1, 2, 3]])) + assert_equal(x[0][0].shape, (1, 3)) + assert_equal(x[0].shape, (1, 3)) + assert_equal(x[:, 0].shape, (2, 1)) + + x = matrix(0) + assert_equal(x[0, 0], 0) + assert_equal(x[0], 0) + assert_equal(x[:, 0].shape, x.shape) + + def test_scalar_indexing(self): + x = asmatrix(np.zeros((3, 2), float)) + assert_equal(x[0, 0], x[0][0]) + + def test_row_column_indexing(self): + x = asmatrix(np.eye(2)) + assert_array_equal(x[0, :], [[1, 0]]) + assert_array_equal(x[1, :], [[0, 1]]) + assert_array_equal(x[:, 0], [[1], [0]]) + assert_array_equal(x[:, 1], [[0], [1]]) + + def test_boolean_indexing(self): + A = np.arange(6) + A.shape = (3, 2) + x = asmatrix(A) + assert_array_equal(x[:, np.array([True, False])], x[:, 0]) + assert_array_equal(x[np.array([True, False, False]), :], x[0, :]) + + def test_list_indexing(self): + A = np.arange(6) + A.shape = (3, 2) + x = asmatrix(A) + assert_array_equal(x[:, [1, 0]], x[:, ::-1]) + assert_array_equal(x[[2, 1, 0], :], x[::-1, :]) + + +class TestPower: + def test_returntype(self): + a = np.array([[0, 1], [0, 0]]) + assert_(type(matrix_power(a, 2)) is np.ndarray) + a = asmatrix(a) + assert_(type(matrix_power(a, 2)) is matrix) + + def test_list(self): + assert_array_equal(matrix_power([[0, 1], [0, 0]], 2), [[0, 0], [0, 0]]) + + +class TestShape: + + a = np.array([[1], [2]]) + m = matrix([[1], [2]]) + + def test_shape(self): + assert_equal(self.a.shape, (2, 1)) + assert_equal(self.m.shape, (2, 1)) + + def test_numpy_ravel(self): + assert_equal(np.ravel(self.a).shape, (2,)) + assert_equal(np.ravel(self.m).shape, (2,)) + + def test_member_ravel(self): + assert_equal(self.a.ravel().shape, (2,)) + assert_equal(self.m.ravel().shape, (1, 2)) + + def test_member_flatten(self): + assert_equal(self.a.flatten().shape, (2,)) + assert_equal(self.m.flatten().shape, (1, 2)) + + def test_numpy_ravel_order(self): + x = np.array([[1, 2, 3], [4, 5, 6]]) + assert_equal(np.ravel(x), [1, 2, 3, 4, 5, 6]) + assert_equal(np.ravel(x, order='F'), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T, order='A'), [1, 2, 3, 4, 5, 6]) + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(np.ravel(x), [1, 2, 3, 4, 5, 6]) + assert_equal(np.ravel(x, order='F'), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T), [1, 4, 2, 5, 3, 6]) + assert_equal(np.ravel(x.T, order='A'), [1, 2, 3, 4, 5, 6]) + + def test_matrix_ravel_order(self): + x = matrix([[1, 2, 3], [4, 5, 6]]) + assert_equal(x.ravel(), [[1, 2, 3, 4, 5, 6]]) + assert_equal(x.ravel(order='F'), [[1, 4, 2, 5, 3, 6]]) + assert_equal(x.T.ravel(), [[1, 4, 2, 5, 3, 6]]) + assert_equal(x.T.ravel(order='A'), [[1, 2, 3, 4, 5, 6]]) + + def test_array_memory_sharing(self): + assert_(np.may_share_memory(self.a, self.a.ravel())) + assert_(not np.may_share_memory(self.a, self.a.flatten())) + + def test_matrix_memory_sharing(self): + assert_(np.may_share_memory(self.m, self.m.ravel())) + assert_(not np.may_share_memory(self.m, self.m.flatten())) + + def test_expand_dims_matrix(self): + # matrices are always 2d - so expand_dims only makes sense when the + # type is changed away from matrix. + a = np.arange(10).reshape((2, 5)).view(np.matrix) + expanded = np.expand_dims(a, axis=1) + assert_equal(expanded.ndim, 3) + assert_(not isinstance(expanded, np.matrix)) diff --git a/python/numpy/matrixlib/tests/test_interaction.py b/python/numpy/matrixlib/tests/test_interaction.py new file mode 100644 index 000000000..87d133a2c --- /dev/null +++ b/python/numpy/matrixlib/tests/test_interaction.py @@ -0,0 +1,360 @@ +"""Tests of interaction of matrix with other parts of numpy. + +Note that tests with MaskedArray and linalg are done in separate files. +""" +import textwrap +import warnings + +import pytest + +import numpy as np +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, +) + + +def test_fancy_indexing(): + # The matrix class messes with the shape. While this is always + # weird (getitem is not used, it does not have setitem nor knows + # about fancy indexing), this tests gh-3110 + # 2018-04-29: moved here from core.tests.test_index. + m = np.matrix([[1, 2], [3, 4]]) + + assert_(isinstance(m[[0, 1, 0], :], np.matrix)) + + # gh-3110. Note the transpose currently because matrices do *not* + # support dimension fixing for fancy indexing correctly. + x = np.asmatrix(np.arange(50).reshape(5, 10)) + assert_equal(x[:2, np.array(-1)], x[:2, -1].T) + + +def test_polynomial_mapdomain(): + # test that polynomial preserved matrix subtype. + # 2018-04-29: moved here from polynomial.tests.polyutils. + dom1 = [0, 4] + dom2 = [1, 3] + x = np.matrix([dom1, dom1]) + res = np.polynomial.polyutils.mapdomain(x, dom1, dom2) + assert_(isinstance(res, np.matrix)) + + +def test_sort_matrix_none(): + # 2018-04-29: moved here from core.tests.test_multiarray + a = np.matrix([[2, 1, 0]]) + actual = np.sort(a, axis=None) + expected = np.matrix([[0, 1, 2]]) + assert_equal(actual, expected) + assert_(type(expected) is np.matrix) + + +def test_partition_matrix_none(): + # gh-4301 + # 2018-04-29: moved here from core.tests.test_multiarray + a = np.matrix([[2, 1, 0]]) + actual = np.partition(a, 1, axis=None) + expected = np.matrix([[0, 1, 2]]) + assert_equal(actual, expected) + assert_(type(expected) is np.matrix) + + +def test_dot_scalar_and_matrix_of_objects(): + # Ticket #2469 + # 2018-04-29: moved here from core.tests.test_multiarray + arr = np.matrix([1, 2], dtype=object) + desired = np.matrix([[3, 6]], dtype=object) + assert_equal(np.dot(arr, 3), desired) + assert_equal(np.dot(3, arr), desired) + + +def test_inner_scalar_and_matrix(): + # 2018-04-29: moved here from core.tests.test_multiarray + for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?': + sca = np.array(3, dtype=dt)[()] + arr = np.matrix([[1, 2], [3, 4]], dtype=dt) + desired = np.matrix([[3, 6], [9, 12]], dtype=dt) + assert_equal(np.inner(arr, sca), desired) + assert_equal(np.inner(sca, arr), desired) + + +def test_inner_scalar_and_matrix_of_objects(): + # Ticket #4482 + # 2018-04-29: moved here from core.tests.test_multiarray + arr = np.matrix([1, 2], dtype=object) + desired = np.matrix([[3, 6]], dtype=object) + assert_equal(np.inner(arr, 3), desired) + assert_equal(np.inner(3, arr), desired) + + +def test_iter_allocate_output_subtype(): + # Make sure that the subtype with priority wins + # 2018-04-29: moved here from core.tests.test_nditer, given the + # matrix specific shape test. + + # matrix vs ndarray + a = np.matrix([[1, 2], [3, 4]]) + b = np.arange(4).reshape(2, 2).T + i = np.nditer([a, b, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) + assert_(type(i.operands[2]) is np.matrix) + assert_(type(i.operands[2]) is not np.ndarray) + assert_equal(i.operands[2].shape, (2, 2)) + + # matrix always wants things to be 2D + b = np.arange(4).reshape(1, 2, 2) + assert_raises(RuntimeError, np.nditer, [a, b, None], [], + [['readonly'], ['readonly'], ['writeonly', 'allocate']]) + # but if subtypes are disabled, the result can still work + i = np.nditer([a, b, None], [], + [['readonly'], ['readonly'], + ['writeonly', 'allocate', 'no_subtype']]) + assert_(type(i.operands[2]) is np.ndarray) + assert_(type(i.operands[2]) is not np.matrix) + assert_equal(i.operands[2].shape, (1, 2, 2)) + + +def like_function(): + # 2018-04-29: moved here from core.tests.test_numeric + a = np.matrix([[1, 2], [3, 4]]) + for like_function in np.zeros_like, np.ones_like, np.empty_like: + b = like_function(a) + assert_(type(b) is np.matrix) + + c = like_function(a, subok=False) + assert_(type(c) is not np.matrix) + + +def test_array_astype(): + # 2018-04-29: copied here from core.tests.test_api + # subok=True passes through a matrix + a = np.matrix([[0, 1, 2], [3, 4, 5]], dtype='f4') + b = a.astype('f4', subok=True, copy=False) + assert_(a is b) + + # subok=True is default, and creates a subtype on a cast + b = a.astype('i4', copy=False) + assert_equal(a, b) + assert_equal(type(b), np.matrix) + + # subok=False never returns a matrix + b = a.astype('f4', subok=False, copy=False) + assert_equal(a, b) + assert_(not (a is b)) + assert_(type(b) is not np.matrix) + + +def test_stack(): + # 2018-04-29: copied here from core.tests.test_shape_base + # check np.matrix cannot be stacked + m = np.matrix([[1, 2], [3, 4]]) + assert_raises_regex(ValueError, 'shape too large to be a matrix', + np.stack, [m, m]) + + +def test_object_scalar_multiply(): + # Tickets #2469 and #4482 + # 2018-04-29: moved here from core.tests.test_ufunc + arr = np.matrix([1, 2], dtype=object) + desired = np.matrix([[3, 6]], dtype=object) + assert_equal(np.multiply(arr, 3), desired) + assert_equal(np.multiply(3, arr), desired) + + +def test_nanfunctions_matrices(): + # Check that it works and that type and + # shape are preserved + # 2018-04-29: moved here from core.tests.test_nanfunctions + mat = np.matrix(np.eye(3)) + for f in [np.nanmin, np.nanmax]: + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (1, 3)) + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 1)) + res = f(mat) + assert_(np.isscalar(res)) + # check that rows of nan are dealt with for subclasses (#4628) + mat[1] = np.nan + for f in [np.nanmin, np.nanmax]: + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(not np.any(np.isnan(res))) + assert_(len(w) == 0) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0]) + and not np.isnan(res[2, 0])) + assert_(len(w) == 1, 'no warning raised') + assert_(issubclass(w[0].category, RuntimeWarning)) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + res = f(mat) + assert_(np.isscalar(res)) + assert_(res != np.nan) + assert_(len(w) == 0) + + +def test_nanfunctions_matrices_general(): + # Check that it works and that type and + # shape are preserved + # 2018-04-29: moved here from core.tests.test_nanfunctions + mat = np.matrix(np.eye(3)) + for f in (np.nanargmin, np.nanargmax, np.nansum, np.nanprod, + np.nanmean, np.nanvar, np.nanstd): + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (1, 3)) + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 1)) + res = f(mat) + assert_(np.isscalar(res)) + + for f in np.nancumsum, np.nancumprod: + res = f(mat, axis=0) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 3)) + res = f(mat, axis=1) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (3, 3)) + res = f(mat) + assert_(isinstance(res, np.matrix)) + assert_(res.shape == (1, 3 * 3)) + + +def test_average_matrix(): + # 2018-04-29: moved here from core.tests.test_function_base. + y = np.matrix(np.random.rand(5, 5)) + assert_array_equal(y.mean(0), np.average(y, 0)) + + a = np.matrix([[1, 2], [3, 4]]) + w = np.matrix([[1, 2], [3, 4]]) + + r = np.average(a, axis=0, weights=w) + assert_equal(type(r), np.matrix) + assert_equal(r, [[2.5, 10.0 / 3]]) + + +def test_dot_matrix(): + # Test to make sure matrices give the same answer as ndarrays + # 2018-04-29: moved here from core.tests.test_function_base. + x = np.linspace(0, 5) + y = np.linspace(-5, 0) + mx = np.matrix(x) + my = np.matrix(y) + r = np.dot(x, y) + mr = np.dot(mx, my.T) + assert_almost_equal(mr, r) + + +def test_ediff1d_matrix(): + # 2018-04-29: moved here from core.tests.test_arraysetops. + assert isinstance(np.ediff1d(np.matrix(1)), np.matrix) + assert isinstance(np.ediff1d(np.matrix(1), to_begin=1), np.matrix) + + +def test_apply_along_axis_matrix(): + # this test is particularly malicious because matrix + # refuses to become 1d + # 2018-04-29: moved here from core.tests.test_shape_base. + def double(row): + return row * 2 + + m = np.matrix([[0, 1], [2, 3]]) + expected = np.matrix([[0, 2], [4, 6]]) + + result = np.apply_along_axis(double, 0, m) + assert_(isinstance(result, np.matrix)) + assert_array_equal(result, expected) + + result = np.apply_along_axis(double, 1, m) + assert_(isinstance(result, np.matrix)) + assert_array_equal(result, expected) + + +def test_kron_matrix(): + # 2018-04-29: moved here from core.tests.test_shape_base. + a = np.ones([2, 2]) + m = np.asmatrix(a) + assert_equal(type(np.kron(a, a)), np.ndarray) + assert_equal(type(np.kron(m, m)), np.matrix) + assert_equal(type(np.kron(a, m)), np.matrix) + assert_equal(type(np.kron(m, a)), np.matrix) + + +class TestConcatenatorMatrix: + # 2018-04-29: moved here from core.tests.test_index_tricks. + def test_matrix(self): + a = [1, 2] + b = [3, 4] + + ab_r = np.r_['r', a, b] + ab_c = np.r_['c', a, b] + + assert_equal(type(ab_r), np.matrix) + assert_equal(type(ab_c), np.matrix) + + assert_equal(np.array(ab_r), [[1, 2, 3, 4]]) + assert_equal(np.array(ab_c), [[1], [2], [3], [4]]) + + assert_raises(ValueError, lambda: np.r_['rc', a, b]) + + def test_matrix_scalar(self): + r = np.r_['r', [1, 2], 3] + assert_equal(type(r), np.matrix) + assert_equal(np.array(r), [[1, 2, 3]]) + + def test_matrix_builder(self): + a = np.array([1]) + b = np.array([2]) + c = np.array([3]) + d = np.array([4]) + actual = np.r_['a, b; c, d'] + expected = np.bmat([[a, b], [c, d]]) + + assert_equal(actual, expected) + assert_equal(type(actual), type(expected)) + + +def test_array_equal_error_message_matrix(): + # 2018-04-29: moved here from testing.tests.test_utils. + with pytest.raises(AssertionError) as exc_info: + assert_equal(np.array([1, 2]), np.matrix([1, 2])) + msg = str(exc_info.value) + msg_reference = textwrap.dedent("""\ + + Arrays are not equal + + (shapes (2,), (1, 2) mismatch) + ACTUAL: array([1, 2]) + DESIRED: matrix([[1, 2]])""") + assert_equal(msg, msg_reference) + + +def test_array_almost_equal_matrix(): + # Matrix slicing keeps things 2-D, while array does not necessarily. + # See gh-8452. + # 2018-04-29: moved here from testing.tests.test_utils. + m1 = np.matrix([[1., 2.]]) + m2 = np.matrix([[1., np.nan]]) + m3 = np.matrix([[1., -np.inf]]) + m4 = np.matrix([[np.nan, np.inf]]) + m5 = np.matrix([[1., 2.], [np.nan, np.inf]]) + for assert_func in assert_array_almost_equal, assert_almost_equal: + for m in m1, m2, m3, m4, m5: + assert_func(m, m) + a = np.array(m) + assert_func(a, m) + assert_func(m, a) diff --git a/python/numpy/matrixlib/tests/test_masked_matrix.py b/python/numpy/matrixlib/tests/test_masked_matrix.py new file mode 100644 index 000000000..e6df047ee --- /dev/null +++ b/python/numpy/matrixlib/tests/test_masked_matrix.py @@ -0,0 +1,240 @@ +import pickle + +import numpy as np +from numpy.ma.core import ( + MaskedArray, + MaskType, + add, + allequal, + divide, + getmask, + hypot, + log, + masked, + masked_array, + masked_values, + nomask, +) +from numpy.ma.extras import mr_ +from numpy.ma.testutils import assert_, assert_array_equal, assert_equal, assert_raises + + +class MMatrix(MaskedArray, np.matrix,): + + def __new__(cls, data, mask=nomask): + mat = np.matrix(data) + _data = MaskedArray.__new__(cls, data=mat, mask=mask) + return _data + + def __array_finalize__(self, obj): + np.matrix.__array_finalize__(self, obj) + MaskedArray.__array_finalize__(self, obj) + + @property + def _series(self): + _view = self.view(MaskedArray) + _view._sharedmask = False + return _view + + +class TestMaskedMatrix: + def test_matrix_indexing(self): + # Tests conversions and indexing + x1 = np.matrix([[1, 2, 3], [4, 3, 2]]) + x2 = masked_array(x1, mask=[[1, 0, 0], [0, 1, 0]]) + x3 = masked_array(x1, mask=[[0, 1, 0], [1, 0, 0]]) + x4 = masked_array(x1) + # test conversion to strings + str(x2) # raises? + repr(x2) # raises? + # tests of indexing + assert_(type(x2[1, 0]) is type(x1[1, 0])) + assert_(x1[1, 0] == x2[1, 0]) + assert_(x2[1, 1] is masked) + assert_equal(x1[0, 2], x2[0, 2]) + assert_equal(x1[0, 1:], x2[0, 1:]) + assert_equal(x1[:, 2], x2[:, 2]) + assert_equal(x1[:], x2[:]) + assert_equal(x1[1:], x3[1:]) + x1[0, 2] = 9 + x2[0, 2] = 9 + assert_equal(x1, x2) + x1[0, 1:] = 99 + x2[0, 1:] = 99 + assert_equal(x1, x2) + x2[0, 1] = masked + assert_equal(x1, x2) + x2[0, 1:] = masked + assert_equal(x1, x2) + x2[0, :] = x1[0, :] + x2[0, 1] = masked + assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]]))) + x3[1, :] = masked_array([1, 2, 3], [1, 1, 0]) + assert_(allequal(getmask(x3)[1], masked_array([1, 1, 0]))) + assert_(allequal(getmask(x3[1]), masked_array([1, 1, 0]))) + x4[1, :] = masked_array([1, 2, 3], [1, 1, 0]) + assert_(allequal(getmask(x4[1]), masked_array([1, 1, 0]))) + assert_(allequal(x4[1], masked_array([1, 2, 3]))) + x1 = np.matrix(np.arange(5) * 1.0) + x2 = masked_values(x1, 3.0) + assert_equal(x1, x2) + assert_(allequal(masked_array([0, 0, 0, 1, 0], dtype=MaskType), + x2.mask)) + assert_equal(3.0, x2.fill_value) + + def test_pickling_subbaseclass(self): + # Test pickling w/ a subclass of ndarray + a = masked_array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2) + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + a_pickled = pickle.loads(pickle.dumps(a, protocol=proto)) + assert_equal(a_pickled._mask, a._mask) + assert_equal(a_pickled, a) + assert_(isinstance(a_pickled._data, np.matrix)) + + def test_count_mean_with_matrix(self): + m = masked_array(np.matrix([[1, 2], [3, 4]]), mask=np.zeros((2, 2))) + + assert_equal(m.count(axis=0).shape, (1, 2)) + assert_equal(m.count(axis=1).shape, (2, 1)) + + # Make sure broadcasting inside mean and var work + assert_equal(m.mean(axis=0), [[2., 3.]]) + assert_equal(m.mean(axis=1), [[1.5], [3.5]]) + + def test_flat(self): + # Test that flat can return items even for matrices [#4585, #4615] + # test simple access + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + assert_equal(test.flat[1], 2) + assert_equal(test.flat[2], masked) + assert_(np.all(test.flat[0:2] == test[0, 0:2])) + # Test flat on masked_matrices + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + test.flat = masked_array([3, 2, 1], mask=[1, 0, 0]) + control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0]) + assert_equal(test, control) + # Test setting + test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1]) + testflat = test.flat + testflat[:] = testflat[[2, 1, 0]] + assert_equal(test, control) + testflat[0] = 9 + # test that matrices keep the correct shape (#4615) + a = masked_array(np.matrix(np.eye(2)), mask=0) + b = a.flat + b01 = b[:2] + assert_equal(b01.data, np.array([[1., 0.]])) + assert_equal(b01.mask, np.array([[False, False]])) + + def test_allany_onmatrices(self): + x = np.array([[0.13, 0.26, 0.90], + [0.28, 0.33, 0.63], + [0.31, 0.87, 0.70]]) + X = np.matrix(x) + m = np.array([[True, False, False], + [False, False, False], + [True, True, False]], dtype=np.bool) + mX = masked_array(X, mask=m) + mXbig = (mX > 0.5) + mXsmall = (mX < 0.5) + + assert_(not mXbig.all()) + assert_(mXbig.any()) + assert_equal(mXbig.all(0), np.matrix([False, False, True])) + assert_equal(mXbig.all(1), np.matrix([False, False, True]).T) + assert_equal(mXbig.any(0), np.matrix([False, False, True])) + assert_equal(mXbig.any(1), np.matrix([True, True, True]).T) + + assert_(not mXsmall.all()) + assert_(mXsmall.any()) + assert_equal(mXsmall.all(0), np.matrix([True, True, False])) + assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T) + assert_equal(mXsmall.any(0), np.matrix([True, True, False])) + assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T) + + def test_compressed(self): + a = masked_array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0]) + b = a.compressed() + assert_equal(b, a) + assert_(isinstance(b, np.matrix)) + a[0, 0] = masked + b = a.compressed() + assert_equal(b, [[2, 3, 4]]) + + def test_ravel(self): + a = masked_array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]]) + aravel = a.ravel() + assert_equal(aravel.shape, (1, 5)) + assert_equal(aravel._mask.shape, a.shape) + + def test_view(self): + # Test view w/ flexible dtype + iterator = list(zip(np.arange(10), np.random.rand(10))) + data = np.array(iterator) + a = masked_array(iterator, dtype=[('a', float), ('b', float)]) + a.mask[0] = (1, 0) + test = a.view((float, 2), np.matrix) + assert_equal(test, data) + assert_(isinstance(test, np.matrix)) + assert_(not isinstance(test, MaskedArray)) + + +class TestSubclassing: + # Test suite for masked subclasses of ndarray. + + def setup_method(self): + x = np.arange(5, dtype='float') + mx = MMatrix(x, mask=[0, 1, 0, 0, 0]) + self.data = (x, mx) + + def test_maskedarray_subclassing(self): + # Tests subclassing MaskedArray + (x, mx) = self.data + assert_(isinstance(mx._data, np.matrix)) + + def test_masked_unary_operations(self): + # Tests masked_unary_operation + (x, mx) = self.data + with np.errstate(divide='ignore'): + assert_(isinstance(log(mx), MMatrix)) + assert_equal(log(x), np.log(x)) + + def test_masked_binary_operations(self): + # Tests masked_binary_operation + (x, mx) = self.data + # Result should be a MMatrix + assert_(isinstance(add(mx, mx), MMatrix)) + assert_(isinstance(add(mx, x), MMatrix)) + # Result should work + assert_equal(add(mx, x), mx + x) + assert_(isinstance(add(mx, mx)._data, np.matrix)) + with assert_raises(TypeError): + add.outer(mx, mx) + assert_(isinstance(hypot(mx, mx), MMatrix)) + assert_(isinstance(hypot(mx, x), MMatrix)) + + def test_masked_binary_operations2(self): + # Tests domained_masked_binary_operation + (x, mx) = self.data + xmx = masked_array(mx.data.__array__(), mask=mx.mask) + assert_(isinstance(divide(mx, mx), MMatrix)) + assert_(isinstance(divide(mx, x), MMatrix)) + assert_equal(divide(mx, mx), divide(xmx, xmx)) + +class TestConcatenator: + # Tests for mr_, the equivalent of r_ for masked arrays. + + def test_matrix_builder(self): + assert_raises(np.ma.MAError, lambda: mr_['1, 2; 3, 4']) + + def test_matrix(self): + # Test consistency with unmasked version. If we ever deprecate + # matrix, this test should either still pass, or both actual and + # expected should fail to be build. + actual = mr_['r', 1, 2, 3] + expected = np.ma.array(np.r_['r', 1, 2, 3]) + assert_array_equal(actual, expected) + + # outer type is masked array, inner type is matrix + assert_equal(type(actual), type(expected)) + assert_equal(type(actual.data), type(expected.data)) diff --git a/python/numpy/matrixlib/tests/test_matrix_linalg.py b/python/numpy/matrixlib/tests/test_matrix_linalg.py new file mode 100644 index 000000000..4e639653b --- /dev/null +++ b/python/numpy/matrixlib/tests/test_matrix_linalg.py @@ -0,0 +1,105 @@ +""" Test functions for linalg module using the matrix class.""" +import numpy as np +from numpy.linalg.tests.test_linalg import ( + CondCases, + DetCases, + EigCases, + EigvalsCases, + InvCases, + LinalgCase, + LinalgTestCase, + LstsqCases, + PinvCases, + SolveCases, + SVDCases, + _TestNorm2D, + _TestNormDoubleBase, + _TestNormInt64Base, + _TestNormSingleBase, + apply_tag, +) +from numpy.linalg.tests.test_linalg import TestQR as _TestQR + +CASES = [] + +# square test cases +CASES += apply_tag('square', [ + LinalgCase("0x0_matrix", + np.empty((0, 0), dtype=np.double).view(np.matrix), + np.empty((0, 1), dtype=np.double).view(np.matrix), + tags={'size-0'}), + LinalgCase("matrix_b_only", + np.array([[1., 2.], [3., 4.]]), + np.matrix([2., 1.]).T), + LinalgCase("matrix_a_and_b", + np.matrix([[1., 2.], [3., 4.]]), + np.matrix([2., 1.]).T), +]) + +# hermitian test-cases +CASES += apply_tag('hermitian', [ + LinalgCase("hmatrix_a_and_b", + np.matrix([[1., 2.], [2., 1.]]), + None), +]) +# No need to make generalized or strided cases for matrices. + + +class MatrixTestCase(LinalgTestCase): + TEST_CASES = CASES + + +class TestSolveMatrix(SolveCases, MatrixTestCase): + pass + + +class TestInvMatrix(InvCases, MatrixTestCase): + pass + + +class TestEigvalsMatrix(EigvalsCases, MatrixTestCase): + pass + + +class TestEigMatrix(EigCases, MatrixTestCase): + pass + + +class TestSVDMatrix(SVDCases, MatrixTestCase): + pass + + +class TestCondMatrix(CondCases, MatrixTestCase): + pass + + +class TestPinvMatrix(PinvCases, MatrixTestCase): + pass + + +class TestDetMatrix(DetCases, MatrixTestCase): + pass + + +class TestLstsqMatrix(LstsqCases, MatrixTestCase): + pass + + +class _TestNorm2DMatrix(_TestNorm2D): + array = np.matrix + + +class TestNormDoubleMatrix(_TestNorm2DMatrix, _TestNormDoubleBase): + pass + + +class TestNormSingleMatrix(_TestNorm2DMatrix, _TestNormSingleBase): + pass + + +class TestNormInt64Matrix(_TestNorm2DMatrix, _TestNormInt64Base): + pass + + +class TestQRMatrix(_TestQR): + array = np.matrix diff --git a/python/numpy/matrixlib/tests/test_multiarray.py b/python/numpy/matrixlib/tests/test_multiarray.py new file mode 100644 index 000000000..2d9d1f8ef --- /dev/null +++ b/python/numpy/matrixlib/tests/test_multiarray.py @@ -0,0 +1,17 @@ +import numpy as np +from numpy.testing import assert_, assert_array_equal, assert_equal + + +class TestView: + def test_type(self): + x = np.array([1, 2, 3]) + assert_(isinstance(x.view(np.matrix), np.matrix)) + + def test_keywords(self): + x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) + # We must be specific about the endianness here: + y = x.view(dtype='>> from numpy.polynomial import Chebyshev + >>> xdata = [1, 2, 3, 4] + >>> ydata = [1, 4, 9, 16] + >>> c = Chebyshev.fit(xdata, ydata, deg=1) + +is preferred over the `chebyshev.chebfit` function from the +``np.polynomial.chebyshev`` module:: + + >>> from numpy.polynomial.chebyshev import chebfit + >>> c = chebfit(xdata, ydata, deg=1) + +See :doc:`routines.polynomials.classes` for more details. + +Convenience Classes +=================== + +The following lists the various constants and methods common to all of +the classes representing the various kinds of polynomials. In the following, +the term ``Poly`` represents any one of the convenience classes (e.g. +`~polynomial.Polynomial`, `~chebyshev.Chebyshev`, `~hermite.Hermite`, etc.) +while the lowercase ``p`` represents an **instance** of a polynomial class. + +Constants +--------- + +- ``Poly.domain`` -- Default domain +- ``Poly.window`` -- Default window +- ``Poly.basis_name`` -- String used to represent the basis +- ``Poly.maxpower`` -- Maximum value ``n`` such that ``p**n`` is allowed + +Creation +-------- + +Methods for creating polynomial instances. + +- ``Poly.basis(degree)`` -- Basis polynomial of given degree +- ``Poly.identity()`` -- ``p`` where ``p(x) = x`` for all ``x`` +- ``Poly.fit(x, y, deg)`` -- ``p`` of degree ``deg`` with coefficients + determined by the least-squares fit to the data ``x``, ``y`` +- ``Poly.fromroots(roots)`` -- ``p`` with specified roots +- ``p.copy()`` -- Create a copy of ``p`` + +Conversion +---------- + +Methods for converting a polynomial instance of one kind to another. + +- ``p.cast(Poly)`` -- Convert ``p`` to instance of kind ``Poly`` +- ``p.convert(Poly)`` -- Convert ``p`` to instance of kind ``Poly`` or map + between ``domain`` and ``window`` + +Calculus +-------- +- ``p.deriv()`` -- Take the derivative of ``p`` +- ``p.integ()`` -- Integrate ``p`` + +Validation +---------- +- ``Poly.has_samecoef(p1, p2)`` -- Check if coefficients match +- ``Poly.has_samedomain(p1, p2)`` -- Check if domains match +- ``Poly.has_sametype(p1, p2)`` -- Check if types match +- ``Poly.has_samewindow(p1, p2)`` -- Check if windows match + +Misc +---- +- ``p.linspace()`` -- Return ``x, p(x)`` at equally-spaced points in ``domain`` +- ``p.mapparms()`` -- Return the parameters for the linear mapping between + ``domain`` and ``window``. +- ``p.roots()`` -- Return the roots of ``p``. +- ``p.trim()`` -- Remove trailing coefficients. +- ``p.cutdeg(degree)`` -- Truncate ``p`` to given degree +- ``p.truncate(size)`` -- Truncate ``p`` to given size + +""" +from .chebyshev import Chebyshev +from .hermite import Hermite +from .hermite_e import HermiteE +from .laguerre import Laguerre +from .legendre import Legendre +from .polynomial import Polynomial + +__all__ = [ # noqa: F822 + "set_default_printstyle", + "polynomial", "Polynomial", + "chebyshev", "Chebyshev", + "legendre", "Legendre", + "hermite", "Hermite", + "hermite_e", "HermiteE", + "laguerre", "Laguerre", +] + + +def set_default_printstyle(style): + """ + Set the default format for the string representation of polynomials. + + Values for ``style`` must be valid inputs to ``__format__``, i.e. 'ascii' + or 'unicode'. + + Parameters + ---------- + style : str + Format string for default printing style. Must be either 'ascii' or + 'unicode'. + + Notes + ----- + The default format depends on the platform: 'unicode' is used on + Unix-based systems and 'ascii' on Windows. This determination is based on + default font support for the unicode superscript and subscript ranges. + + Examples + -------- + >>> p = np.polynomial.Polynomial([1, 2, 3]) + >>> c = np.polynomial.Chebyshev([1, 2, 3]) + >>> np.polynomial.set_default_printstyle('unicode') + >>> print(p) + 1.0 + 2.0·x + 3.0·x² + >>> print(c) + 1.0 + 2.0·T₁(x) + 3.0·T₂(x) + >>> np.polynomial.set_default_printstyle('ascii') + >>> print(p) + 1.0 + 2.0 x + 3.0 x**2 + >>> print(c) + 1.0 + 2.0 T_1(x) + 3.0 T_2(x) + >>> # Formatting supersedes all class/package-level defaults + >>> print(f"{p:unicode}") + 1.0 + 2.0·x + 3.0·x² + """ + if style not in ('unicode', 'ascii'): + raise ValueError( + f"Unsupported format string '{style}'. Valid options are 'ascii' " + f"and 'unicode'" + ) + _use_unicode = True + if style == 'ascii': + _use_unicode = False + from ._polybase import ABCPolyBase + ABCPolyBase._use_unicode = _use_unicode + + +from numpy._pytesttester import PytestTester + +test = PytestTester(__name__) +del PytestTester diff --git a/python/numpy/polynomial/__init__.pyi b/python/numpy/polynomial/__init__.pyi new file mode 100644 index 000000000..6fb0fb5ec --- /dev/null +++ b/python/numpy/polynomial/__init__.pyi @@ -0,0 +1,25 @@ +from typing import Final, Literal + +from . import chebyshev, hermite, hermite_e, laguerre, legendre, polynomial +from .chebyshev import Chebyshev +from .hermite import Hermite +from .hermite_e import HermiteE +from .laguerre import Laguerre +from .legendre import Legendre +from .polynomial import Polynomial + +__all__ = [ + "set_default_printstyle", + "polynomial", "Polynomial", + "chebyshev", "Chebyshev", + "legendre", "Legendre", + "hermite", "Hermite", + "hermite_e", "HermiteE", + "laguerre", "Laguerre", +] + +def set_default_printstyle(style: Literal["ascii", "unicode"]) -> None: ... + +from numpy._pytesttester import PytestTester as _PytestTester + +test: Final[_PytestTester] diff --git a/python/numpy/polynomial/__pycache__/__init__.cpython-312.pyc b/python/numpy/polynomial/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..287a1730b Binary files /dev/null and b/python/numpy/polynomial/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/polynomial/__pycache__/_polybase.cpython-312.pyc b/python/numpy/polynomial/__pycache__/_polybase.cpython-312.pyc new file mode 100644 index 000000000..9debb59ae Binary files /dev/null and b/python/numpy/polynomial/__pycache__/_polybase.cpython-312.pyc differ diff --git a/python/numpy/polynomial/__pycache__/chebyshev.cpython-312.pyc b/python/numpy/polynomial/__pycache__/chebyshev.cpython-312.pyc new file mode 100644 index 000000000..91a930d62 Binary files /dev/null and b/python/numpy/polynomial/__pycache__/chebyshev.cpython-312.pyc differ diff --git a/python/numpy/polynomial/__pycache__/hermite.cpython-312.pyc b/python/numpy/polynomial/__pycache__/hermite.cpython-312.pyc new file mode 100644 index 000000000..f5ae27ae0 Binary files /dev/null and b/python/numpy/polynomial/__pycache__/hermite.cpython-312.pyc differ diff --git a/python/numpy/polynomial/__pycache__/hermite_e.cpython-312.pyc b/python/numpy/polynomial/__pycache__/hermite_e.cpython-312.pyc new file mode 100644 index 000000000..b85edded0 Binary files /dev/null and b/python/numpy/polynomial/__pycache__/hermite_e.cpython-312.pyc differ diff --git a/python/numpy/polynomial/__pycache__/laguerre.cpython-312.pyc b/python/numpy/polynomial/__pycache__/laguerre.cpython-312.pyc new file mode 100644 index 000000000..04234e761 Binary files /dev/null and b/python/numpy/polynomial/__pycache__/laguerre.cpython-312.pyc differ diff --git a/python/numpy/polynomial/__pycache__/legendre.cpython-312.pyc b/python/numpy/polynomial/__pycache__/legendre.cpython-312.pyc new file mode 100644 index 000000000..4b02be1a1 Binary files /dev/null and b/python/numpy/polynomial/__pycache__/legendre.cpython-312.pyc differ diff --git a/python/numpy/polynomial/__pycache__/polynomial.cpython-312.pyc b/python/numpy/polynomial/__pycache__/polynomial.cpython-312.pyc new file mode 100644 index 000000000..628b90511 Binary files /dev/null and b/python/numpy/polynomial/__pycache__/polynomial.cpython-312.pyc differ diff --git a/python/numpy/polynomial/__pycache__/polyutils.cpython-312.pyc b/python/numpy/polynomial/__pycache__/polyutils.cpython-312.pyc new file mode 100644 index 000000000..17b3a9cea Binary files /dev/null and b/python/numpy/polynomial/__pycache__/polyutils.cpython-312.pyc differ diff --git a/python/numpy/polynomial/_polybase.py b/python/numpy/polynomial/_polybase.py new file mode 100644 index 000000000..f89343340 --- /dev/null +++ b/python/numpy/polynomial/_polybase.py @@ -0,0 +1,1191 @@ +""" +Abstract base class for the various polynomial Classes. + +The ABCPolyBase class provides the methods needed to implement the common API +for the various polynomial classes. It operates as a mixin, but uses the +abc module from the stdlib, hence it is only available for Python >= 2.6. + +""" +import abc +import numbers +import os +from collections.abc import Callable + +import numpy as np + +from . import polyutils as pu + +__all__ = ['ABCPolyBase'] + +class ABCPolyBase(abc.ABC): + """An abstract base class for immutable series classes. + + ABCPolyBase provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' along with the + methods listed below. + + Parameters + ---------- + coef : array_like + Series coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``, where + ``P_i`` is the basis polynomials of degree ``i``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is the derived class domain. + window : (2,) array_like, optional + Window, see domain for its use. The default value is the + derived class window. + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + Attributes + ---------- + coef : (N,) ndarray + Series coefficients in order of increasing degree. + domain : (2,) ndarray + Domain that is mapped to window. + window : (2,) ndarray + Window that domain is mapped to. + symbol : str + Symbol representing the independent variable. + + Class Attributes + ---------------- + maxpower : int + Maximum power allowed, i.e., the largest number ``n`` such that + ``p(x)**n`` is allowed. This is to limit runaway polynomial size. + domain : (2,) ndarray + Default domain of the class. + window : (2,) ndarray + Default window of the class. + + """ + + # Not hashable + __hash__ = None + + # Opt out of numpy ufuncs and Python ops with ndarray subclasses. + __array_ufunc__ = None + + # Limit runaway size. T_n^m has degree n*m + maxpower = 100 + + # Unicode character mappings for improved __str__ + _superscript_mapping = str.maketrans({ + "0": "⁰", + "1": "¹", + "2": "²", + "3": "³", + "4": "⁴", + "5": "⁵", + "6": "⁶", + "7": "⁷", + "8": "⁸", + "9": "⁹" + }) + _subscript_mapping = str.maketrans({ + "0": "₀", + "1": "₁", + "2": "₂", + "3": "₃", + "4": "₄", + "5": "₅", + "6": "₆", + "7": "₇", + "8": "₈", + "9": "₉" + }) + # Some fonts don't support full unicode character ranges necessary for + # the full set of superscripts and subscripts, including common/default + # fonts in Windows shells/terminals. Therefore, default to ascii-only + # printing on windows. + _use_unicode = not os.name == 'nt' + + @property + def symbol(self): + return self._symbol + + @property + @abc.abstractmethod + def domain(self): + pass + + @property + @abc.abstractmethod + def window(self): + pass + + @property + @abc.abstractmethod + def basis_name(self): + pass + + @staticmethod + @abc.abstractmethod + def _add(c1, c2): + pass + + @staticmethod + @abc.abstractmethod + def _sub(c1, c2): + pass + + @staticmethod + @abc.abstractmethod + def _mul(c1, c2): + pass + + @staticmethod + @abc.abstractmethod + def _div(c1, c2): + pass + + @staticmethod + @abc.abstractmethod + def _pow(c, pow, maxpower=None): + pass + + @staticmethod + @abc.abstractmethod + def _val(x, c): + pass + + @staticmethod + @abc.abstractmethod + def _int(c, m, k, lbnd, scl): + pass + + @staticmethod + @abc.abstractmethod + def _der(c, m, scl): + pass + + @staticmethod + @abc.abstractmethod + def _fit(x, y, deg, rcond, full): + pass + + @staticmethod + @abc.abstractmethod + def _line(off, scl): + pass + + @staticmethod + @abc.abstractmethod + def _roots(c): + pass + + @staticmethod + @abc.abstractmethod + def _fromroots(r): + pass + + def has_samecoef(self, other): + """Check if coefficients match. + + Parameters + ---------- + other : class instance + The other class must have the ``coef`` attribute. + + Returns + ------- + bool : boolean + True if the coefficients are the same, False otherwise. + + """ + return ( + len(self.coef) == len(other.coef) + and np.all(self.coef == other.coef) + ) + + def has_samedomain(self, other): + """Check if domains match. + + Parameters + ---------- + other : class instance + The other class must have the ``domain`` attribute. + + Returns + ------- + bool : boolean + True if the domains are the same, False otherwise. + + """ + return np.all(self.domain == other.domain) + + def has_samewindow(self, other): + """Check if windows match. + + Parameters + ---------- + other : class instance + The other class must have the ``window`` attribute. + + Returns + ------- + bool : boolean + True if the windows are the same, False otherwise. + + """ + return np.all(self.window == other.window) + + def has_sametype(self, other): + """Check if types match. + + Parameters + ---------- + other : object + Class instance. + + Returns + ------- + bool : boolean + True if other is same class as self + + """ + return isinstance(other, self.__class__) + + def _get_coefficients(self, other): + """Interpret other as polynomial coefficients. + + The `other` argument is checked to see if it is of the same + class as self with identical domain and window. If so, + return its coefficients, otherwise return `other`. + + Parameters + ---------- + other : anything + Object to be checked. + + Returns + ------- + coef + The coefficients of`other` if it is a compatible instance, + of ABCPolyBase, otherwise `other`. + + Raises + ------ + TypeError + When `other` is an incompatible instance of ABCPolyBase. + + """ + if isinstance(other, ABCPolyBase): + if not isinstance(other, self.__class__): + raise TypeError("Polynomial types differ") + elif not np.all(self.domain == other.domain): + raise TypeError("Domains differ") + elif not np.all(self.window == other.window): + raise TypeError("Windows differ") + elif self.symbol != other.symbol: + raise ValueError("Polynomial symbols differ") + return other.coef + return other + + def __init__(self, coef, domain=None, window=None, symbol='x'): + [coef] = pu.as_series([coef], trim=False) + self.coef = coef + + if domain is not None: + [domain] = pu.as_series([domain], trim=False) + if len(domain) != 2: + raise ValueError("Domain has wrong number of elements.") + self.domain = domain + + if window is not None: + [window] = pu.as_series([window], trim=False) + if len(window) != 2: + raise ValueError("Window has wrong number of elements.") + self.window = window + + # Validation for symbol + try: + if not symbol.isidentifier(): + raise ValueError( + "Symbol string must be a valid Python identifier" + ) + # If a user passes in something other than a string, the above + # results in an AttributeError. Catch this and raise a more + # informative exception + except AttributeError: + raise TypeError("Symbol must be a non-empty string") + + self._symbol = symbol + + def __repr__(self): + coef = repr(self.coef)[6:-1] + domain = repr(self.domain)[6:-1] + window = repr(self.window)[6:-1] + name = self.__class__.__name__ + return (f"{name}({coef}, domain={domain}, window={window}, " + f"symbol='{self.symbol}')") + + def __format__(self, fmt_str): + if fmt_str == '': + return self.__str__() + if fmt_str not in ('ascii', 'unicode'): + raise ValueError( + f"Unsupported format string '{fmt_str}' passed to " + f"{self.__class__}.__format__. Valid options are " + f"'ascii' and 'unicode'" + ) + if fmt_str == 'ascii': + return self._generate_string(self._str_term_ascii) + return self._generate_string(self._str_term_unicode) + + def __str__(self): + if self._use_unicode: + return self._generate_string(self._str_term_unicode) + return self._generate_string(self._str_term_ascii) + + def _generate_string(self, term_method): + """ + Generate the full string representation of the polynomial, using + ``term_method`` to generate each polynomial term. + """ + # Get configuration for line breaks + linewidth = np.get_printoptions().get('linewidth', 75) + if linewidth < 1: + linewidth = 1 + out = pu.format_float(self.coef[0]) + + off, scale = self.mapparms() + + scaled_symbol, needs_parens = self._format_term(pu.format_float, + off, scale) + if needs_parens: + scaled_symbol = '(' + scaled_symbol + ')' + + for i, coef in enumerate(self.coef[1:]): + out += " " + power = str(i + 1) + # Polynomial coefficient + # The coefficient array can be an object array with elements that + # will raise a TypeError with >= 0 (e.g. strings or Python + # complex). In this case, represent the coefficient as-is. + try: + if coef >= 0: + next_term = "+ " + pu.format_float(coef, parens=True) + else: + next_term = "- " + pu.format_float(-coef, parens=True) + except TypeError: + next_term = f"+ {coef}" + # Polynomial term + next_term += term_method(power, scaled_symbol) + # Length of the current line with next term added + line_len = len(out.split('\n')[-1]) + len(next_term) + # If not the last term in the polynomial, it will be two + # characters longer due to the +/- with the next term + if i < len(self.coef[1:]) - 1: + line_len += 2 + # Handle linebreaking + if line_len >= linewidth: + next_term = next_term.replace(" ", "\n", 1) + out += next_term + return out + + @classmethod + def _str_term_unicode(cls, i, arg_str): + """ + String representation of single polynomial term using unicode + characters for superscripts and subscripts. + """ + if cls.basis_name is None: + raise NotImplementedError( + "Subclasses must define either a basis_name, or override " + "_str_term_unicode(cls, i, arg_str)" + ) + return (f"·{cls.basis_name}{i.translate(cls._subscript_mapping)}" + f"({arg_str})") + + @classmethod + def _str_term_ascii(cls, i, arg_str): + """ + String representation of a single polynomial term using ** and _ to + represent superscripts and subscripts, respectively. + """ + if cls.basis_name is None: + raise NotImplementedError( + "Subclasses must define either a basis_name, or override " + "_str_term_ascii(cls, i, arg_str)" + ) + return f" {cls.basis_name}_{i}({arg_str})" + + @classmethod + def _repr_latex_term(cls, i, arg_str, needs_parens): + if cls.basis_name is None: + raise NotImplementedError( + "Subclasses must define either a basis name, or override " + "_repr_latex_term(i, arg_str, needs_parens)") + # since we always add parens, we don't care if the expression needs them + return f"{{{cls.basis_name}}}_{{{i}}}({arg_str})" + + @staticmethod + def _repr_latex_scalar(x, parens=False): + # TODO: we're stuck with disabling math formatting until we handle + # exponents in this function + return fr'\text{{{pu.format_float(x, parens=parens)}}}' + + def _format_term(self, scalar_format: Callable, off: float, scale: float): + """ Format a single term in the expansion """ + if off == 0 and scale == 1: + term = self.symbol + needs_parens = False + elif scale == 1: + term = f"{scalar_format(off)} + {self.symbol}" + needs_parens = True + elif off == 0: + term = f"{scalar_format(scale)}{self.symbol}" + needs_parens = True + else: + term = ( + f"{scalar_format(off)} + " + f"{scalar_format(scale)}{self.symbol}" + ) + needs_parens = True + return term, needs_parens + + def _repr_latex_(self): + # get the scaled argument string to the basis functions + off, scale = self.mapparms() + term, needs_parens = self._format_term(self._repr_latex_scalar, + off, scale) + + mute = r"\color{{LightGray}}{{{}}}".format + + parts = [] + for i, c in enumerate(self.coef): + # prevent duplication of + and - signs + if i == 0: + coef_str = f"{self._repr_latex_scalar(c)}" + elif not isinstance(c, numbers.Real): + coef_str = f" + ({self._repr_latex_scalar(c)})" + elif c >= 0: + coef_str = f" + {self._repr_latex_scalar(c, parens=True)}" + else: + coef_str = f" - {self._repr_latex_scalar(-c, parens=True)}" + + # produce the string for the term + term_str = self._repr_latex_term(i, term, needs_parens) + if term_str == '1': + part = coef_str + else: + part = rf"{coef_str}\,{term_str}" + + if c == 0: + part = mute(part) + + parts.append(part) + + if parts: + body = ''.join(parts) + else: + # in case somehow there are no coefficients at all + body = '0' + + return rf"${self.symbol} \mapsto {body}$" + + # Pickle and copy + + def __getstate__(self): + ret = self.__dict__.copy() + ret['coef'] = self.coef.copy() + ret['domain'] = self.domain.copy() + ret['window'] = self.window.copy() + ret['symbol'] = self.symbol + return ret + + def __setstate__(self, dict): + self.__dict__ = dict + + # Call + + def __call__(self, arg): + arg = pu.mapdomain(arg, self.domain, self.window) + return self._val(arg, self.coef) + + def __iter__(self): + return iter(self.coef) + + def __len__(self): + return len(self.coef) + + # Numeric properties. + + def __neg__(self): + return self.__class__( + -self.coef, self.domain, self.window, self.symbol + ) + + def __pos__(self): + return self + + def __add__(self, other): + othercoef = self._get_coefficients(other) + try: + coef = self._add(self.coef, othercoef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __sub__(self, other): + othercoef = self._get_coefficients(other) + try: + coef = self._sub(self.coef, othercoef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __mul__(self, other): + othercoef = self._get_coefficients(other) + try: + coef = self._mul(self.coef, othercoef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __truediv__(self, other): + # there is no true divide if the rhs is not a Number, although it + # could return the first n elements of an infinite series. + # It is hard to see where n would come from, though. + if not isinstance(other, numbers.Number) or isinstance(other, bool): + raise TypeError( + f"unsupported types for true division: " + f"'{type(self)}', '{type(other)}'" + ) + return self.__floordiv__(other) + + def __floordiv__(self, other): + res = self.__divmod__(other) + if res is NotImplemented: + return res + return res[0] + + def __mod__(self, other): + res = self.__divmod__(other) + if res is NotImplemented: + return res + return res[1] + + def __divmod__(self, other): + othercoef = self._get_coefficients(other) + try: + quo, rem = self._div(self.coef, othercoef) + except ZeroDivisionError: + raise + except Exception: + return NotImplemented + quo = self.__class__(quo, self.domain, self.window, self.symbol) + rem = self.__class__(rem, self.domain, self.window, self.symbol) + return quo, rem + + def __pow__(self, other): + coef = self._pow(self.coef, other, maxpower=self.maxpower) + res = self.__class__(coef, self.domain, self.window, self.symbol) + return res + + def __radd__(self, other): + try: + coef = self._add(other, self.coef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __rsub__(self, other): + try: + coef = self._sub(other, self.coef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __rmul__(self, other): + try: + coef = self._mul(other, self.coef) + except Exception: + return NotImplemented + return self.__class__(coef, self.domain, self.window, self.symbol) + + def __rtruediv__(self, other): + # An instance of ABCPolyBase is not considered a + # Number. + return NotImplemented + + def __rfloordiv__(self, other): + res = self.__rdivmod__(other) + if res is NotImplemented: + return res + return res[0] + + def __rmod__(self, other): + res = self.__rdivmod__(other) + if res is NotImplemented: + return res + return res[1] + + def __rdivmod__(self, other): + try: + quo, rem = self._div(other, self.coef) + except ZeroDivisionError: + raise + except Exception: + return NotImplemented + quo = self.__class__(quo, self.domain, self.window, self.symbol) + rem = self.__class__(rem, self.domain, self.window, self.symbol) + return quo, rem + + def __eq__(self, other): + res = (isinstance(other, self.__class__) and + np.all(self.domain == other.domain) and + np.all(self.window == other.window) and + (self.coef.shape == other.coef.shape) and + np.all(self.coef == other.coef) and + (self.symbol == other.symbol)) + return res + + def __ne__(self, other): + return not self.__eq__(other) + + # + # Extra methods. + # + + def copy(self): + """Return a copy. + + Returns + ------- + new_series : series + Copy of self. + + """ + return self.__class__(self.coef, self.domain, self.window, self.symbol) + + def degree(self): + """The degree of the series. + + Returns + ------- + degree : int + Degree of the series, one less than the number of coefficients. + + Examples + -------- + + Create a polynomial object for ``1 + 7*x + 4*x**2``: + + >>> np.polynomial.set_default_printstyle("unicode") + >>> poly = np.polynomial.Polynomial([1, 7, 4]) + >>> print(poly) + 1.0 + 7.0·x + 4.0·x² + >>> poly.degree() + 2 + + Note that this method does not check for non-zero coefficients. + You must trim the polynomial to remove any trailing zeroes: + + >>> poly = np.polynomial.Polynomial([1, 7, 0]) + >>> print(poly) + 1.0 + 7.0·x + 0.0·x² + >>> poly.degree() + 2 + >>> poly.trim().degree() + 1 + + """ + return len(self) - 1 + + def cutdeg(self, deg): + """Truncate series to the given degree. + + Reduce the degree of the series to `deg` by discarding the + high order terms. If `deg` is greater than the current degree a + copy of the current series is returned. This can be useful in least + squares where the coefficients of the high degree terms may be very + small. + + Parameters + ---------- + deg : non-negative int + The series is reduced to degree `deg` by discarding the high + order terms. The value of `deg` must be a non-negative integer. + + Returns + ------- + new_series : series + New instance of series with reduced degree. + + """ + return self.truncate(deg + 1) + + def trim(self, tol=0): + """Remove trailing coefficients + + Remove trailing coefficients until a coefficient is reached whose + absolute value greater than `tol` or the beginning of the series is + reached. If all the coefficients would be removed the series is set + to ``[0]``. A new series instance is returned with the new + coefficients. The current instance remains unchanged. + + Parameters + ---------- + tol : non-negative number. + All trailing coefficients less than `tol` will be removed. + + Returns + ------- + new_series : series + New instance of series with trimmed coefficients. + + """ + coef = pu.trimcoef(self.coef, tol) + return self.__class__(coef, self.domain, self.window, self.symbol) + + def truncate(self, size): + """Truncate series to length `size`. + + Reduce the series to length `size` by discarding the high + degree terms. The value of `size` must be a positive integer. This + can be useful in least squares where the coefficients of the + high degree terms may be very small. + + Parameters + ---------- + size : positive int + The series is reduced to length `size` by discarding the high + degree terms. The value of `size` must be a positive integer. + + Returns + ------- + new_series : series + New instance of series with truncated coefficients. + + """ + isize = int(size) + if isize != size or isize < 1: + raise ValueError("size must be a positive integer") + if isize >= len(self.coef): + coef = self.coef + else: + coef = self.coef[:isize] + return self.__class__(coef, self.domain, self.window, self.symbol) + + def convert(self, domain=None, kind=None, window=None): + """Convert series to a different kind and/or domain and/or window. + + Parameters + ---------- + domain : array_like, optional + The domain of the converted series. If the value is None, + the default domain of `kind` is used. + kind : class, optional + The polynomial series type class to which the current instance + should be converted. If kind is None, then the class of the + current instance is used. + window : array_like, optional + The window of the converted series. If the value is None, + the default window of `kind` is used. + + Returns + ------- + new_series : series + The returned class can be of different type than the current + instance and/or have a different domain and/or different + window. + + Notes + ----- + Conversion between domains and class types can result in + numerically ill defined series. + + """ + if kind is None: + kind = self.__class__ + if domain is None: + domain = kind.domain + if window is None: + window = kind.window + return self(kind.identity(domain, window=window, symbol=self.symbol)) + + def mapparms(self): + """Return the mapping parameters. + + The returned values define a linear map ``off + scl*x`` that is + applied to the input arguments before the series is evaluated. The + map depends on the ``domain`` and ``window``; if the current + ``domain`` is equal to the ``window`` the resulting map is the + identity. If the coefficients of the series instance are to be + used by themselves outside this class, then the linear function + must be substituted for the ``x`` in the standard representation of + the base polynomials. + + Returns + ------- + off, scl : float or complex + The mapping function is defined by ``off + scl*x``. + + Notes + ----- + If the current domain is the interval ``[l1, r1]`` and the window + is ``[l2, r2]``, then the linear mapping function ``L`` is + defined by the equations:: + + L(l1) = l2 + L(r1) = r2 + + """ + return pu.mapparms(self.domain, self.window) + + def integ(self, m=1, k=[], lbnd=None): + """Integrate. + + Return a series instance that is the definite integral of the + current series. + + Parameters + ---------- + m : non-negative int + The number of integrations to perform. + k : array_like + Integration constants. The first constant is applied to the + first integration, the second to the second, and so on. The + list of values must less than or equal to `m` in length and any + missing values are set to zero. + lbnd : Scalar + The lower bound of the definite integral. + + Returns + ------- + new_series : series + A new series representing the integral. The domain is the same + as the domain of the integrated series. + + """ + off, scl = self.mapparms() + if lbnd is None: + lbnd = 0 + else: + lbnd = off + scl * lbnd + coef = self._int(self.coef, m, k, lbnd, 1. / scl) + return self.__class__(coef, self.domain, self.window, self.symbol) + + def deriv(self, m=1): + """Differentiate. + + Return a series instance of that is the derivative of the current + series. + + Parameters + ---------- + m : non-negative int + Find the derivative of order `m`. + + Returns + ------- + new_series : series + A new series representing the derivative. The domain is the same + as the domain of the differentiated series. + + """ + off, scl = self.mapparms() + coef = self._der(self.coef, m, scl) + return self.__class__(coef, self.domain, self.window, self.symbol) + + def roots(self): + """Return the roots of the series polynomial. + + Compute the roots for the series. Note that the accuracy of the + roots decreases the further outside the `domain` they lie. + + Returns + ------- + roots : ndarray + Array containing the roots of the series. + + """ + roots = self._roots(self.coef) + return pu.mapdomain(roots, self.window, self.domain) + + def linspace(self, n=100, domain=None): + """Return x, y values at equally spaced points in domain. + + Returns the x, y values at `n` linearly spaced points across the + domain. Here y is the value of the polynomial at the points x. By + default the domain is the same as that of the series instance. + This method is intended mostly as a plotting aid. + + Parameters + ---------- + n : int, optional + Number of point pairs to return. The default value is 100. + domain : {None, array_like}, optional + If not None, the specified domain is used instead of that of + the calling instance. It should be of the form ``[beg,end]``. + The default is None which case the class domain is used. + + Returns + ------- + x, y : ndarray + x is equal to linspace(self.domain[0], self.domain[1], n) and + y is the series evaluated at element of x. + + """ + if domain is None: + domain = self.domain + x = np.linspace(domain[0], domain[1], n) + y = self(x) + return x, y + + @classmethod + def fit(cls, x, y, deg, domain=None, rcond=None, full=False, w=None, + window=None, symbol='x'): + """Least squares fit to data. + + Return a series instance that is the least squares fit to the data + `y` sampled at `x`. The domain of the returned instance can be + specified and this will often result in a superior fit with less + chance of ill conditioning. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) + y-coordinates of the M sample points ``(x[i], y[i])``. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + domain : {None, [beg, end], []}, optional + Domain to use for the returned series. If ``None``, + then a minimal domain that covers the points `x` is chosen. If + ``[]`` the class domain is used. The default value was the + class domain in NumPy 1.4 and ``None`` in later versions. + The ``[]`` option was added in numpy 1.5.0. + rcond : float, optional + Relative condition number of the fit. Singular values smaller + than this relative to the largest singular value will be + ignored. The default value is ``len(x)*eps``, where eps is the + relative precision of the float type, about 2e-16 in most + cases. + full : bool, optional + Switch determining nature of return value. When it is False + (the default) just the coefficients are returned, when True + diagnostic information from the singular value decomposition is + also returned. + w : array_like, shape (M,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have + the same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + window : {[beg, end]}, optional + Window to use for the returned series. The default + value is the default class domain + symbol : str, optional + Symbol representing the independent variable. Default is 'x'. + + Returns + ------- + new_series : series + A series that represents the least squares fit to the data and + has the domain and window specified in the call. If the + coefficients for the unscaled and unshifted basis polynomials are + of interest, do ``new_series.convert().coef``. + + [resid, rank, sv, rcond] : list + These values are only returned if ``full == True`` + + - resid -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - sv -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `linalg.lstsq`. + + """ + if domain is None: + domain = pu.getdomain(x) + if domain[0] == domain[1]: + domain[0] -= 1 + domain[1] += 1 + elif isinstance(domain, list) and len(domain) == 0: + domain = cls.domain + + if window is None: + window = cls.window + + xnew = pu.mapdomain(x, domain, window) + res = cls._fit(xnew, y, deg, w=w, rcond=rcond, full=full) + if full: + [coef, status] = res + return ( + cls(coef, domain=domain, window=window, symbol=symbol), status + ) + else: + coef = res + return cls(coef, domain=domain, window=window, symbol=symbol) + + @classmethod + def fromroots(cls, roots, domain=[], window=None, symbol='x'): + """Return series instance that has the specified roots. + + Returns a series representing the product + ``(x - r[0])*(x - r[1])*...*(x - r[n-1])``, where ``r`` is a + list of roots. + + Parameters + ---------- + roots : array_like + List of roots. + domain : {[], None, array_like}, optional + Domain for the resulting series. If None the domain is the + interval from the smallest root to the largest. If [] the + domain is the class domain. The default is []. + window : {None, array_like}, optional + Window for the returned series. If None the class window is + used. The default is None. + symbol : str, optional + Symbol representing the independent variable. Default is 'x'. + + Returns + ------- + new_series : series + Series with the specified roots. + + """ + [roots] = pu.as_series([roots], trim=False) + if domain is None: + domain = pu.getdomain(roots) + elif isinstance(domain, list) and len(domain) == 0: + domain = cls.domain + + if window is None: + window = cls.window + + deg = len(roots) + off, scl = pu.mapparms(domain, window) + rnew = off + scl * roots + coef = cls._fromroots(rnew) / scl**deg + return cls(coef, domain=domain, window=window, symbol=symbol) + + @classmethod + def identity(cls, domain=None, window=None, symbol='x'): + """Identity function. + + If ``p`` is the returned series, then ``p(x) == x`` for all + values of x. + + Parameters + ---------- + domain : {None, array_like}, optional + If given, the array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. If None is + given then the class domain is used. The default is None. + window : {None, array_like}, optional + If given, the resulting array must be if the form + ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of + the window. If None is given then the class window is used. The + default is None. + symbol : str, optional + Symbol representing the independent variable. Default is 'x'. + + Returns + ------- + new_series : series + Series of representing the identity. + + """ + if domain is None: + domain = cls.domain + if window is None: + window = cls.window + off, scl = pu.mapparms(window, domain) + coef = cls._line(off, scl) + return cls(coef, domain, window, symbol) + + @classmethod + def basis(cls, deg, domain=None, window=None, symbol='x'): + """Series basis polynomial of degree `deg`. + + Returns the series representing the basis polynomial of degree `deg`. + + Parameters + ---------- + deg : int + Degree of the basis polynomial for the series. Must be >= 0. + domain : {None, array_like}, optional + If given, the array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. If None is + given then the class domain is used. The default is None. + window : {None, array_like}, optional + If given, the resulting array must be if the form + ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of + the window. If None is given then the class window is used. The + default is None. + symbol : str, optional + Symbol representing the independent variable. Default is 'x'. + + Returns + ------- + new_series : series + A series with the coefficient of the `deg` term set to one and + all others zero. + + """ + if domain is None: + domain = cls.domain + if window is None: + window = cls.window + ideg = int(deg) + + if ideg != deg or ideg < 0: + raise ValueError("deg must be non-negative integer") + return cls([0] * ideg + [1], domain, window, symbol) + + @classmethod + def cast(cls, series, domain=None, window=None): + """Convert series to series of this class. + + The `series` is expected to be an instance of some polynomial + series of one of the types supported by by the numpy.polynomial + module, but could be some other class that supports the convert + method. + + Parameters + ---------- + series : series + The series instance to be converted. + domain : {None, array_like}, optional + If given, the array must be of the form ``[beg, end]``, where + ``beg`` and ``end`` are the endpoints of the domain. If None is + given then the class domain is used. The default is None. + window : {None, array_like}, optional + If given, the resulting array must be if the form + ``[beg, end]``, where ``beg`` and ``end`` are the endpoints of + the window. If None is given then the class window is used. The + default is None. + + Returns + ------- + new_series : series + A series of the same kind as the calling class and equal to + `series` when evaluated. + + See Also + -------- + convert : similar instance method + + """ + if domain is None: + domain = cls.domain + if window is None: + window = cls.window + return series.convert(domain, cls, window) diff --git a/python/numpy/polynomial/_polybase.pyi b/python/numpy/polynomial/_polybase.pyi new file mode 100644 index 000000000..6d71a8cb8 --- /dev/null +++ b/python/numpy/polynomial/_polybase.pyi @@ -0,0 +1,285 @@ +import abc +import decimal +import numbers +from collections.abc import Iterator, Mapping, Sequence +from typing import ( + Any, + ClassVar, + Generic, + Literal, + LiteralString, + Self, + SupportsIndex, + TypeAlias, + overload, +) + +from typing_extensions import TypeIs, TypeVar + +import numpy as np +import numpy.typing as npt +from numpy._typing import ( + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _FloatLike_co, + _NumberLike_co, +) + +from ._polytypes import ( + _AnyInt, + _Array2, + _ArrayLikeCoef_co, + _ArrayLikeCoefObject_co, + _CoefLike_co, + _CoefSeries, + _Series, + _SeriesLikeCoef_co, + _SeriesLikeInt_co, + _Tuple2, +) + +__all__ = ["ABCPolyBase"] + +_NameCo = TypeVar( + "_NameCo", + bound=LiteralString | None, + covariant=True, + default=LiteralString | None +) +_Other = TypeVar("_Other", bound=ABCPolyBase) + +_AnyOther: TypeAlias = ABCPolyBase | _CoefLike_co | _SeriesLikeCoef_co +_Hundred: TypeAlias = Literal[100] + +class ABCPolyBase(Generic[_NameCo], abc.ABC): + __hash__: ClassVar[None] # type: ignore[assignment] # pyright: ignore[reportIncompatibleMethodOverride] + __array_ufunc__: ClassVar[None] + + maxpower: ClassVar[_Hundred] + _superscript_mapping: ClassVar[Mapping[int, str]] + _subscript_mapping: ClassVar[Mapping[int, str]] + _use_unicode: ClassVar[bool] + + basis_name: _NameCo + coef: _CoefSeries + domain: _Array2[np.inexact | np.object_] + window: _Array2[np.inexact | np.object_] + + _symbol: LiteralString + @property + def symbol(self, /) -> LiteralString: ... + + def __init__( + self, + /, + coef: _SeriesLikeCoef_co, + domain: _SeriesLikeCoef_co | None = ..., + window: _SeriesLikeCoef_co | None = ..., + symbol: str = ..., + ) -> None: ... + + @overload + def __call__(self, /, arg: _Other) -> _Other: ... + # TODO: Once `_ShapeT@ndarray` is covariant and bounded (see #26081), + # additionally include 0-d arrays as input types with scalar return type. + @overload + def __call__( + self, + /, + arg: _FloatLike_co | decimal.Decimal | numbers.Real | np.object_, + ) -> np.float64 | np.complex128: ... + @overload + def __call__( + self, + /, + arg: _NumberLike_co | numbers.Complex, + ) -> np.complex128: ... + @overload + def __call__(self, /, arg: _ArrayLikeFloat_co) -> ( + npt.NDArray[np.float64] + | npt.NDArray[np.complex128] + | npt.NDArray[np.object_] + ): ... + @overload + def __call__( + self, + /, + arg: _ArrayLikeComplex_co, + ) -> npt.NDArray[np.complex128] | npt.NDArray[np.object_]: ... + @overload + def __call__( + self, + /, + arg: _ArrayLikeCoefObject_co, + ) -> npt.NDArray[np.object_]: ... + + def __format__(self, fmt_str: str, /) -> str: ... + def __eq__(self, x: object, /) -> bool: ... + def __ne__(self, x: object, /) -> bool: ... + def __neg__(self, /) -> Self: ... + def __pos__(self, /) -> Self: ... + def __add__(self, x: _AnyOther, /) -> Self: ... + def __sub__(self, x: _AnyOther, /) -> Self: ... + def __mul__(self, x: _AnyOther, /) -> Self: ... + def __truediv__(self, x: _AnyOther, /) -> Self: ... + def __floordiv__(self, x: _AnyOther, /) -> Self: ... + def __mod__(self, x: _AnyOther, /) -> Self: ... + def __divmod__(self, x: _AnyOther, /) -> _Tuple2[Self]: ... + def __pow__(self, x: _AnyOther, /) -> Self: ... + def __radd__(self, x: _AnyOther, /) -> Self: ... + def __rsub__(self, x: _AnyOther, /) -> Self: ... + def __rmul__(self, x: _AnyOther, /) -> Self: ... + def __rtruediv__(self, x: _AnyOther, /) -> Self: ... + def __rfloordiv__(self, x: _AnyOther, /) -> Self: ... + def __rmod__(self, x: _AnyOther, /) -> Self: ... + def __rdivmod__(self, x: _AnyOther, /) -> _Tuple2[Self]: ... + def __len__(self, /) -> int: ... + def __iter__(self, /) -> Iterator[np.inexact | object]: ... + def __getstate__(self, /) -> dict[str, Any]: ... + def __setstate__(self, dict: dict[str, Any], /) -> None: ... + + def has_samecoef(self, /, other: ABCPolyBase) -> bool: ... + def has_samedomain(self, /, other: ABCPolyBase) -> bool: ... + def has_samewindow(self, /, other: ABCPolyBase) -> bool: ... + @overload + def has_sametype(self, /, other: ABCPolyBase) -> TypeIs[Self]: ... + @overload + def has_sametype(self, /, other: object) -> Literal[False]: ... + + def copy(self, /) -> Self: ... + def degree(self, /) -> int: ... + def cutdeg(self, /) -> Self: ... + def trim(self, /, tol: _FloatLike_co = ...) -> Self: ... + def truncate(self, /, size: _AnyInt) -> Self: ... + + @overload + def convert( + self, + /, + domain: _SeriesLikeCoef_co | None, + kind: type[_Other], + window: _SeriesLikeCoef_co | None = ..., + ) -> _Other: ... + @overload + def convert( + self, + /, + domain: _SeriesLikeCoef_co | None = ..., + *, + kind: type[_Other], + window: _SeriesLikeCoef_co | None = ..., + ) -> _Other: ... + @overload + def convert( + self, + /, + domain: _SeriesLikeCoef_co | None = ..., + kind: None = None, + window: _SeriesLikeCoef_co | None = ..., + ) -> Self: ... + + def mapparms(self, /) -> _Tuple2[Any]: ... + + def integ( + self, + /, + m: SupportsIndex = ..., + k: _CoefLike_co | _SeriesLikeCoef_co = ..., + lbnd: _CoefLike_co | None = ..., + ) -> Self: ... + + def deriv(self, /, m: SupportsIndex = ...) -> Self: ... + + def roots(self, /) -> _CoefSeries: ... + + def linspace( + self, + /, + n: SupportsIndex = ..., + domain: _SeriesLikeCoef_co | None = ..., + ) -> _Tuple2[_Series[np.float64 | np.complex128]]: ... + + @overload + @classmethod + def fit( + cls, + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: int | _SeriesLikeInt_co, + domain: _SeriesLikeCoef_co | None = ..., + rcond: _FloatLike_co = ..., + full: Literal[False] = ..., + w: _SeriesLikeCoef_co | None = ..., + window: _SeriesLikeCoef_co | None = ..., + symbol: str = ..., + ) -> Self: ... + @overload + @classmethod + def fit( + cls, + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: int | _SeriesLikeInt_co, + domain: _SeriesLikeCoef_co | None = ..., + rcond: _FloatLike_co = ..., + *, + full: Literal[True], + w: _SeriesLikeCoef_co | None = ..., + window: _SeriesLikeCoef_co | None = ..., + symbol: str = ..., + ) -> tuple[Self, Sequence[np.inexact | np.int32]]: ... + @overload + @classmethod + def fit( + cls, + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: int | _SeriesLikeInt_co, + domain: _SeriesLikeCoef_co | None, + rcond: _FloatLike_co, + full: Literal[True], /, + w: _SeriesLikeCoef_co | None = ..., + window: _SeriesLikeCoef_co | None = ..., + symbol: str = ..., + ) -> tuple[Self, Sequence[np.inexact | np.int32]]: ... + + @classmethod + def fromroots( + cls, + roots: _ArrayLikeCoef_co, + domain: _SeriesLikeCoef_co | None = ..., + window: _SeriesLikeCoef_co | None = ..., + symbol: str = ..., + ) -> Self: ... + + @classmethod + def identity( + cls, + domain: _SeriesLikeCoef_co | None = ..., + window: _SeriesLikeCoef_co | None = ..., + symbol: str = ..., + ) -> Self: ... + + @classmethod + def basis( + cls, + deg: _AnyInt, + domain: _SeriesLikeCoef_co | None = ..., + window: _SeriesLikeCoef_co | None = ..., + symbol: str = ..., + ) -> Self: ... + + @classmethod + def cast( + cls, + series: ABCPolyBase, + domain: _SeriesLikeCoef_co | None = ..., + window: _SeriesLikeCoef_co | None = ..., + ) -> Self: ... + + @classmethod + def _str_term_unicode(cls, /, i: str, arg_str: str) -> str: ... + @staticmethod + def _str_term_ascii(i: str, arg_str: str) -> str: ... + @staticmethod + def _repr_latex_term(i: str, arg_str: str, needs_parens: bool) -> str: ... diff --git a/python/numpy/polynomial/_polytypes.pyi b/python/numpy/polynomial/_polytypes.pyi new file mode 100644 index 000000000..241a65be2 --- /dev/null +++ b/python/numpy/polynomial/_polytypes.pyi @@ -0,0 +1,892 @@ +# ruff: noqa: PYI046, PYI047 + +from collections.abc import Callable, Sequence +from typing import ( + Any, + Literal, + LiteralString, + NoReturn, + Protocol, + Self, + SupportsIndex, + SupportsInt, + TypeAlias, + TypeVar, + overload, + type_check_only, +) + +import numpy as np +import numpy.typing as npt +from numpy._typing import ( + _ArrayLikeComplex_co, + # array-likes + _ArrayLikeFloat_co, + _ArrayLikeNumber_co, + _ArrayLikeObject_co, + _ComplexLike_co, + _FloatLike_co, + # scalar-likes + _IntLike_co, + _NestedSequence, + _NumberLike_co, + _SupportsArray, +) + +_T = TypeVar("_T") +_T_contra = TypeVar("_T_contra", contravariant=True) +_ScalarT = TypeVar("_ScalarT", bound=np.number | np.bool | np.object_) + +# compatible with e.g. int, float, complex, Decimal, Fraction, and ABCPolyBase +@type_check_only +class _SupportsCoefOps(Protocol[_T_contra]): + def __eq__(self, x: object, /) -> bool: ... + def __ne__(self, x: object, /) -> bool: ... + + def __neg__(self, /) -> Self: ... + def __pos__(self, /) -> Self: ... + + def __add__(self, x: _T_contra, /) -> Self: ... + def __sub__(self, x: _T_contra, /) -> Self: ... + def __mul__(self, x: _T_contra, /) -> Self: ... + def __pow__(self, x: _T_contra, /) -> Self | float: ... + + def __radd__(self, x: _T_contra, /) -> Self: ... + def __rsub__(self, x: _T_contra, /) -> Self: ... + def __rmul__(self, x: _T_contra, /) -> Self: ... + +_Series: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] + +_FloatSeries: TypeAlias = _Series[np.floating] +_ComplexSeries: TypeAlias = _Series[np.complexfloating] +_ObjectSeries: TypeAlias = _Series[np.object_] +_CoefSeries: TypeAlias = _Series[np.inexact | np.object_] + +_FloatArray: TypeAlias = npt.NDArray[np.floating] +_ComplexArray: TypeAlias = npt.NDArray[np.complexfloating] +_ObjectArray: TypeAlias = npt.NDArray[np.object_] +_CoefArray: TypeAlias = npt.NDArray[np.inexact | np.object_] + +_Tuple2: TypeAlias = tuple[_T, _T] +_Array1: TypeAlias = np.ndarray[tuple[Literal[1]], np.dtype[_ScalarT]] +_Array2: TypeAlias = np.ndarray[tuple[Literal[2]], np.dtype[_ScalarT]] + +_AnyInt: TypeAlias = SupportsInt | SupportsIndex + +_CoefObjectLike_co: TypeAlias = np.object_ | _SupportsCoefOps[Any] +_CoefLike_co: TypeAlias = _NumberLike_co | _CoefObjectLike_co + +# The term "series" is used here to refer to 1-d arrays of numeric scalars. +_SeriesLikeBool_co: TypeAlias = ( + _SupportsArray[np.dtype[np.bool]] + | Sequence[bool | np.bool] +) +_SeriesLikeInt_co: TypeAlias = ( + _SupportsArray[np.dtype[np.integer | np.bool]] + | Sequence[_IntLike_co] +) +_SeriesLikeFloat_co: TypeAlias = ( + _SupportsArray[np.dtype[np.floating | np.integer | np.bool]] + | Sequence[_FloatLike_co] +) +_SeriesLikeComplex_co: TypeAlias = ( + _SupportsArray[np.dtype[np.inexact | np.integer | np.bool]] + | Sequence[_ComplexLike_co] +) +_SeriesLikeObject_co: TypeAlias = ( + _SupportsArray[np.dtype[np.object_]] + | Sequence[_CoefObjectLike_co] +) +_SeriesLikeCoef_co: TypeAlias = ( + _SupportsArray[np.dtype[np.number | np.bool | np.object_]] + | Sequence[_CoefLike_co] +) + +_ArrayLikeCoefObject_co: TypeAlias = ( + _CoefObjectLike_co + | _SeriesLikeObject_co + | _NestedSequence[_SeriesLikeObject_co] +) +_ArrayLikeCoef_co: TypeAlias = ( + npt.NDArray[np.number | np.bool | np.object_] + | _ArrayLikeNumber_co + | _ArrayLikeCoefObject_co +) + +_Name_co = TypeVar( + "_Name_co", + bound=LiteralString, + covariant=True, + default=LiteralString +) + +@type_check_only +class _Named(Protocol[_Name_co]): + @property + def __name__(self, /) -> _Name_co: ... + +_Line: TypeAlias = np.ndarray[tuple[Literal[1, 2]], np.dtype[_ScalarT]] + +@type_check_only +class _FuncLine(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__(self, /, off: _ScalarT, scl: _ScalarT) -> _Line[_ScalarT]: ... + @overload + def __call__(self, /, off: int, scl: int) -> _Line[np.int_]: ... + @overload + def __call__(self, /, off: float, scl: float) -> _Line[np.float64]: ... + @overload + def __call__( + self, + /, + off: complex, + scl: complex, + ) -> _Line[np.complex128]: ... + @overload + def __call__( + self, + /, + off: _SupportsCoefOps[Any], + scl: _SupportsCoefOps[Any], + ) -> _Line[np.object_]: ... + +@type_check_only +class _FuncFromRoots(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__(self, /, roots: _SeriesLikeFloat_co) -> _FloatSeries: ... + @overload + def __call__(self, /, roots: _SeriesLikeComplex_co) -> _ComplexSeries: ... + @overload + def __call__(self, /, roots: _SeriesLikeCoef_co) -> _ObjectSeries: ... + +@type_check_only +class _FuncBinOp(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + c1: _SeriesLikeBool_co, + c2: _SeriesLikeBool_co, + ) -> NoReturn: ... + @overload + def __call__( + self, + /, + c1: _SeriesLikeFloat_co, + c2: _SeriesLikeFloat_co, + ) -> _FloatSeries: ... + @overload + def __call__( + self, + /, + c1: _SeriesLikeComplex_co, + c2: _SeriesLikeComplex_co, + ) -> _ComplexSeries: ... + @overload + def __call__( + self, + /, + c1: _SeriesLikeCoef_co, + c2: _SeriesLikeCoef_co, + ) -> _ObjectSeries: ... + +@type_check_only +class _FuncUnOp(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__(self, /, c: _SeriesLikeFloat_co) -> _FloatSeries: ... + @overload + def __call__(self, /, c: _SeriesLikeComplex_co) -> _ComplexSeries: ... + @overload + def __call__(self, /, c: _SeriesLikeCoef_co) -> _ObjectSeries: ... + +@type_check_only +class _FuncPoly2Ortho(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__(self, /, pol: _SeriesLikeFloat_co) -> _FloatSeries: ... + @overload + def __call__(self, /, pol: _SeriesLikeComplex_co) -> _ComplexSeries: ... + @overload + def __call__(self, /, pol: _SeriesLikeCoef_co) -> _ObjectSeries: ... + +@type_check_only +class _FuncPow(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + c: _SeriesLikeFloat_co, + pow: _IntLike_co, + maxpower: _IntLike_co | None = ..., + ) -> _FloatSeries: ... + @overload + def __call__( + self, + /, + c: _SeriesLikeComplex_co, + pow: _IntLike_co, + maxpower: _IntLike_co | None = ..., + ) -> _ComplexSeries: ... + @overload + def __call__( + self, + /, + c: _SeriesLikeCoef_co, + pow: _IntLike_co, + maxpower: _IntLike_co | None = ..., + ) -> _ObjectSeries: ... + +@type_check_only +class _FuncDer(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + c: _ArrayLikeFloat_co, + m: SupportsIndex = ..., + scl: _FloatLike_co = ..., + axis: SupportsIndex = ..., + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + c: _ArrayLikeComplex_co, + m: SupportsIndex = ..., + scl: _ComplexLike_co = ..., + axis: SupportsIndex = ..., + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + c: _ArrayLikeCoef_co, + m: SupportsIndex = ..., + scl: _CoefLike_co = ..., + axis: SupportsIndex = ..., + ) -> _ObjectArray: ... + +@type_check_only +class _FuncInteg(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + c: _ArrayLikeFloat_co, + m: SupportsIndex = ..., + k: _FloatLike_co | _SeriesLikeFloat_co = ..., + lbnd: _FloatLike_co = ..., + scl: _FloatLike_co = ..., + axis: SupportsIndex = ..., + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + c: _ArrayLikeComplex_co, + m: SupportsIndex = ..., + k: _ComplexLike_co | _SeriesLikeComplex_co = ..., + lbnd: _ComplexLike_co = ..., + scl: _ComplexLike_co = ..., + axis: SupportsIndex = ..., + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + c: _ArrayLikeCoef_co, + m: SupportsIndex = ..., + k: _CoefLike_co | _SeriesLikeCoef_co = ..., + lbnd: _CoefLike_co = ..., + scl: _CoefLike_co = ..., + axis: SupportsIndex = ..., + ) -> _ObjectArray: ... + +@type_check_only +class _FuncValFromRoots(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + x: _FloatLike_co, + r: _FloatLike_co, + tensor: bool = ..., + ) -> np.floating: ... + @overload + def __call__( + self, + /, + x: _NumberLike_co, + r: _NumberLike_co, + tensor: bool = ..., + ) -> np.complexfloating: ... + @overload + def __call__( + self, + /, + x: _FloatLike_co | _ArrayLikeFloat_co, + r: _ArrayLikeFloat_co, + tensor: bool = ..., + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + x: _NumberLike_co | _ArrayLikeComplex_co, + r: _ArrayLikeComplex_co, + tensor: bool = ..., + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + x: _CoefLike_co | _ArrayLikeCoef_co, + r: _ArrayLikeCoef_co, + tensor: bool = ..., + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: _CoefLike_co, + r: _CoefLike_co, + tensor: bool = ..., + ) -> _SupportsCoefOps[Any]: ... + +@type_check_only +class _FuncVal(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + x: _FloatLike_co, + c: _SeriesLikeFloat_co, + tensor: bool = ..., + ) -> np.floating: ... + @overload + def __call__( + self, + /, + x: _NumberLike_co, + c: _SeriesLikeComplex_co, + tensor: bool = ..., + ) -> np.complexfloating: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeFloat_co, + c: _ArrayLikeFloat_co, + tensor: bool = ..., + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeComplex_co, + c: _ArrayLikeComplex_co, + tensor: bool = ..., + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeCoef_co, + c: _ArrayLikeCoef_co, + tensor: bool = ..., + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: _CoefLike_co, + c: _SeriesLikeObject_co, + tensor: bool = ..., + ) -> _SupportsCoefOps[Any]: ... + +@type_check_only +class _FuncVal2D(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + x: _FloatLike_co, + y: _FloatLike_co, + c: _SeriesLikeFloat_co, + ) -> np.floating: ... + @overload + def __call__( + self, + /, + x: _NumberLike_co, + y: _NumberLike_co, + c: _SeriesLikeComplex_co, + ) -> np.complexfloating: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + c: _ArrayLikeFloat_co, + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + c: _ArrayLikeComplex_co, + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeCoef_co, + y: _ArrayLikeCoef_co, + c: _ArrayLikeCoef_co, + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: _CoefLike_co, + y: _CoefLike_co, + c: _SeriesLikeCoef_co, + ) -> _SupportsCoefOps[Any]: ... + +@type_check_only +class _FuncVal3D(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + x: _FloatLike_co, + y: _FloatLike_co, + z: _FloatLike_co, + c: _SeriesLikeFloat_co + ) -> np.floating: ... + @overload + def __call__( + self, + /, + x: _NumberLike_co, + y: _NumberLike_co, + z: _NumberLike_co, + c: _SeriesLikeComplex_co, + ) -> np.complexfloating: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + z: _ArrayLikeFloat_co, + c: _ArrayLikeFloat_co, + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + z: _ArrayLikeComplex_co, + c: _ArrayLikeComplex_co, + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeCoef_co, + y: _ArrayLikeCoef_co, + z: _ArrayLikeCoef_co, + c: _ArrayLikeCoef_co, + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: _CoefLike_co, + y: _CoefLike_co, + z: _CoefLike_co, + c: _SeriesLikeCoef_co, + ) -> _SupportsCoefOps[Any]: ... + +_AnyValF: TypeAlias = Callable[ + [npt.ArrayLike, npt.ArrayLike, bool], + _CoefArray, +] + +@type_check_only +class _FuncValND(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + val_f: _AnyValF, + c: _SeriesLikeFloat_co, + /, + *args: _FloatLike_co, + ) -> np.floating: ... + @overload + def __call__( + self, + val_f: _AnyValF, + c: _SeriesLikeComplex_co, + /, + *args: _NumberLike_co, + ) -> np.complexfloating: ... + @overload + def __call__( + self, + val_f: _AnyValF, + c: _ArrayLikeFloat_co, + /, + *args: _ArrayLikeFloat_co, + ) -> _FloatArray: ... + @overload + def __call__( + self, + val_f: _AnyValF, + c: _ArrayLikeComplex_co, + /, + *args: _ArrayLikeComplex_co, + ) -> _ComplexArray: ... + @overload + def __call__( + self, + val_f: _AnyValF, + c: _SeriesLikeObject_co, + /, + *args: _CoefObjectLike_co, + ) -> _SupportsCoefOps[Any]: ... + @overload + def __call__( + self, + val_f: _AnyValF, + c: _ArrayLikeCoef_co, + /, + *args: _ArrayLikeCoef_co, + ) -> _ObjectArray: ... + +@type_check_only +class _FuncVander(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + x: _ArrayLikeFloat_co, + deg: SupportsIndex, + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeComplex_co, + deg: SupportsIndex, + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeCoef_co, + deg: SupportsIndex, + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: npt.ArrayLike, + deg: SupportsIndex, + ) -> _CoefArray: ... + +_AnyDegrees: TypeAlias = Sequence[SupportsIndex] + +@type_check_only +class _FuncVander2D(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: _AnyDegrees, + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: _AnyDegrees, + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeCoef_co, + y: _ArrayLikeCoef_co, + deg: _AnyDegrees, + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: npt.ArrayLike, + y: npt.ArrayLike, + deg: _AnyDegrees, + ) -> _CoefArray: ... + +@type_check_only +class _FuncVander3D(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + z: _ArrayLikeFloat_co, + deg: _AnyDegrees, + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + z: _ArrayLikeComplex_co, + deg: _AnyDegrees, + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeCoef_co, + y: _ArrayLikeCoef_co, + z: _ArrayLikeCoef_co, + deg: _AnyDegrees, + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: npt.ArrayLike, + y: npt.ArrayLike, + z: npt.ArrayLike, + deg: _AnyDegrees, + ) -> _CoefArray: ... + +# keep in sync with the broadest overload of `._FuncVander` +_AnyFuncVander: TypeAlias = Callable[ + [npt.ArrayLike, SupportsIndex], + _CoefArray, +] + +@type_check_only +class _FuncVanderND(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + vander_fs: Sequence[_AnyFuncVander], + points: Sequence[_ArrayLikeFloat_co], + degrees: Sequence[SupportsIndex], + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + vander_fs: Sequence[_AnyFuncVander], + points: Sequence[_ArrayLikeComplex_co], + degrees: Sequence[SupportsIndex], + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + vander_fs: Sequence[_AnyFuncVander], + points: Sequence[ + _ArrayLikeObject_co | _ArrayLikeComplex_co, + ], + degrees: Sequence[SupportsIndex], + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + vander_fs: Sequence[_AnyFuncVander], + points: Sequence[npt.ArrayLike], + degrees: Sequence[SupportsIndex], + ) -> _CoefArray: ... + +_FullFitResult: TypeAlias = Sequence[np.inexact | np.int32] + +@type_check_only +class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + x: _SeriesLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None = ..., + full: Literal[False] = ..., + w: _SeriesLikeFloat_co | None = ..., + ) -> _FloatArray: ... + @overload + def __call__( + self, + x: _SeriesLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None, + full: Literal[True], + /, + w: _SeriesLikeFloat_co | None = ..., + ) -> tuple[_FloatArray, _FullFitResult]: ... + @overload + def __call__( + self, + /, + x: _SeriesLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None = ..., + *, + full: Literal[True], + w: _SeriesLikeFloat_co | None = ..., + ) -> tuple[_FloatArray, _FullFitResult]: ... + + @overload + def __call__( + self, + /, + x: _SeriesLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None = ..., + full: Literal[False] = ..., + w: _SeriesLikeFloat_co | None = ..., + ) -> _ComplexArray: ... + @overload + def __call__( + self, + x: _SeriesLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None, + full: Literal[True], + /, + w: _SeriesLikeFloat_co | None = ..., + ) -> tuple[_ComplexArray, _FullFitResult]: ... + @overload + def __call__( + self, + /, + x: _SeriesLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None = ..., + *, + full: Literal[True], + w: _SeriesLikeFloat_co | None = ..., + ) -> tuple[_ComplexArray, _FullFitResult]: ... + + @overload + def __call__( + self, + /, + x: _SeriesLikeComplex_co, + y: _ArrayLikeCoef_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None = ..., + full: Literal[False] = ..., + w: _SeriesLikeFloat_co | None = ..., + ) -> _ObjectArray: ... + @overload + def __call__( + self, + x: _SeriesLikeComplex_co, + y: _ArrayLikeCoef_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None, + full: Literal[True], + /, + w: _SeriesLikeFloat_co | None = ..., + ) -> tuple[_ObjectArray, _FullFitResult]: ... + @overload + def __call__( + self, + /, + x: _SeriesLikeComplex_co, + y: _ArrayLikeCoef_co, + deg: int | _SeriesLikeInt_co, + rcond: float | None = ..., + *, + full: Literal[True], + w: _SeriesLikeFloat_co | None = ..., + ) -> tuple[_ObjectArray, _FullFitResult]: ... + +@type_check_only +class _FuncRoots(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + c: _SeriesLikeFloat_co, + ) -> _Series[np.float64]: ... + @overload + def __call__( + self, + /, + c: _SeriesLikeComplex_co, + ) -> _Series[np.complex128]: ... + @overload + def __call__(self, /, c: _SeriesLikeCoef_co) -> _ObjectSeries: ... + +_Companion: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_ScalarT]] + +@type_check_only +class _FuncCompanion(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + c: _SeriesLikeFloat_co, + ) -> _Companion[np.float64]: ... + @overload + def __call__( + self, + /, + c: _SeriesLikeComplex_co, + ) -> _Companion[np.complex128]: ... + @overload + def __call__(self, /, c: _SeriesLikeCoef_co) -> _Companion[np.object_]: ... + +@type_check_only +class _FuncGauss(_Named[_Name_co], Protocol[_Name_co]): + def __call__( + self, + /, + deg: SupportsIndex, + ) -> _Tuple2[_Series[np.float64]]: ... + +@type_check_only +class _FuncWeight(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + c: _ArrayLikeFloat_co, + ) -> npt.NDArray[np.float64]: ... + @overload + def __call__( + self, + /, + c: _ArrayLikeComplex_co, + ) -> npt.NDArray[np.complex128]: ... + @overload + def __call__(self, /, c: _ArrayLikeCoef_co) -> _ObjectArray: ... + +@type_check_only +class _FuncPts(_Named[_Name_co], Protocol[_Name_co]): + def __call__(self, /, npts: _AnyInt) -> _Series[np.float64]: ... diff --git a/python/numpy/polynomial/chebyshev.py b/python/numpy/polynomial/chebyshev.py new file mode 100644 index 000000000..58fce6046 --- /dev/null +++ b/python/numpy/polynomial/chebyshev.py @@ -0,0 +1,2003 @@ +""" +==================================================== +Chebyshev Series (:mod:`numpy.polynomial.chebyshev`) +==================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Chebyshev series, including a `Chebyshev` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- + +.. autosummary:: + :toctree: generated/ + + Chebyshev + + +Constants +--------- + +.. autosummary:: + :toctree: generated/ + + chebdomain + chebzero + chebone + chebx + +Arithmetic +---------- + +.. autosummary:: + :toctree: generated/ + + chebadd + chebsub + chebmulx + chebmul + chebdiv + chebpow + chebval + chebval2d + chebval3d + chebgrid2d + chebgrid3d + +Calculus +-------- + +.. autosummary:: + :toctree: generated/ + + chebder + chebint + +Misc Functions +-------------- + +.. autosummary:: + :toctree: generated/ + + chebfromroots + chebroots + chebvander + chebvander2d + chebvander3d + chebgauss + chebweight + chebcompanion + chebfit + chebpts1 + chebpts2 + chebtrim + chebline + cheb2poly + poly2cheb + chebinterpolate + +See also +-------- +`numpy.polynomial` + +Notes +----- +The implementations of multiplication, division, integration, and +differentiation use the algebraic identities [1]_: + +.. math:: + T_n(x) = \\frac{z^n + z^{-n}}{2} \\\\ + z\\frac{dx}{dz} = \\frac{z - z^{-1}}{2}. + +where + +.. math:: x = \\frac{z + z^{-1}}{2}. + +These identities allow a Chebyshev series to be expressed as a finite, +symmetric Laurent series. In this module, this sort of Laurent series +is referred to as a "z-series." + +References +---------- +.. [1] A. T. Benjamin, et al., "Combinatorial Trigonometry with Chebyshev + Polynomials," *Journal of Statistical Planning and Inference 14*, 2008 + (https://web.archive.org/web/20080221202153/https://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4) + +""" # noqa: E501 +import numpy as np +import numpy.linalg as la +from numpy.lib.array_utils import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline', 'chebadd', + 'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow', 'chebval', + 'chebder', 'chebint', 'cheb2poly', 'poly2cheb', 'chebfromroots', + 'chebvander', 'chebfit', 'chebtrim', 'chebroots', 'chebpts1', + 'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', 'chebgrid2d', + 'chebgrid3d', 'chebvander2d', 'chebvander3d', 'chebcompanion', + 'chebgauss', 'chebweight', 'chebinterpolate'] + +chebtrim = pu.trimcoef + +# +# A collection of functions for manipulating z-series. These are private +# functions and do minimal error checking. +# + +def _cseries_to_zseries(c): + """Convert Chebyshev series to z-series. + + Convert a Chebyshev series to the equivalent z-series. The result is + never an empty array. The dtype of the return is the same as that of + the input. No checks are run on the arguments as this routine is for + internal use. + + Parameters + ---------- + c : 1-D ndarray + Chebyshev coefficients, ordered from low to high + + Returns + ------- + zs : 1-D ndarray + Odd length symmetric z-series, ordered from low to high. + + """ + n = c.size + zs = np.zeros(2 * n - 1, dtype=c.dtype) + zs[n - 1:] = c / 2 + return zs + zs[::-1] + + +def _zseries_to_cseries(zs): + """Convert z-series to a Chebyshev series. + + Convert a z series to the equivalent Chebyshev series. The result is + never an empty array. The dtype of the return is the same as that of + the input. No checks are run on the arguments as this routine is for + internal use. + + Parameters + ---------- + zs : 1-D ndarray + Odd length symmetric z-series, ordered from low to high. + + Returns + ------- + c : 1-D ndarray + Chebyshev coefficients, ordered from low to high. + + """ + n = (zs.size + 1) // 2 + c = zs[n - 1:].copy() + c[1:n] *= 2 + return c + + +def _zseries_mul(z1, z2): + """Multiply two z-series. + + Multiply two z-series to produce a z-series. + + Parameters + ---------- + z1, z2 : 1-D ndarray + The arrays must be 1-D but this is not checked. + + Returns + ------- + product : 1-D ndarray + The product z-series. + + Notes + ----- + This is simply convolution. If symmetric/anti-symmetric z-series are + denoted by S/A then the following rules apply: + + S*S, A*A -> S + S*A, A*S -> A + + """ + return np.convolve(z1, z2) + + +def _zseries_div(z1, z2): + """Divide the first z-series by the second. + + Divide `z1` by `z2` and return the quotient and remainder as z-series. + Warning: this implementation only applies when both z1 and z2 have the + same symmetry, which is sufficient for present purposes. + + Parameters + ---------- + z1, z2 : 1-D ndarray + The arrays must be 1-D and have the same symmetry, but this is not + checked. + + Returns + ------- + + (quotient, remainder) : 1-D ndarrays + Quotient and remainder as z-series. + + Notes + ----- + This is not the same as polynomial division on account of the desired form + of the remainder. If symmetric/anti-symmetric z-series are denoted by S/A + then the following rules apply: + + S/S -> S,S + A/A -> S,A + + The restriction to types of the same symmetry could be fixed but seems like + unneeded generality. There is no natural form for the remainder in the case + where there is no symmetry. + + """ + z1 = z1.copy() + z2 = z2.copy() + lc1 = len(z1) + lc2 = len(z2) + if lc2 == 1: + z1 /= z2 + return z1, z1[:1] * 0 + elif lc1 < lc2: + return z1[:1] * 0, z1 + else: + dlen = lc1 - lc2 + scl = z2[0] + z2 /= scl + quo = np.empty(dlen + 1, dtype=z1.dtype) + i = 0 + j = dlen + while i < j: + r = z1[i] + quo[i] = z1[i] + quo[dlen - i] = r + tmp = r * z2 + z1[i:i + lc2] -= tmp + z1[j:j + lc2] -= tmp + i += 1 + j -= 1 + r = z1[i] + quo[i] = r + tmp = r * z2 + z1[i:i + lc2] -= tmp + quo /= scl + rem = z1[i + 1:i - 1 + lc2].copy() + return quo, rem + + +def _zseries_der(zs): + """Differentiate a z-series. + + The derivative is with respect to x, not z. This is achieved using the + chain rule and the value of dx/dz given in the module notes. + + Parameters + ---------- + zs : z-series + The z-series to differentiate. + + Returns + ------- + derivative : z-series + The derivative + + Notes + ----- + The zseries for x (ns) has been multiplied by two in order to avoid + using floats that are incompatible with Decimal and likely other + specialized scalar types. This scaling has been compensated by + multiplying the value of zs by two also so that the two cancels in the + division. + + """ + n = len(zs) // 2 + ns = np.array([-1, 0, 1], dtype=zs.dtype) + zs *= np.arange(-n, n + 1) * 2 + d, r = _zseries_div(zs, ns) + return d + + +def _zseries_int(zs): + """Integrate a z-series. + + The integral is with respect to x, not z. This is achieved by a change + of variable using dx/dz given in the module notes. + + Parameters + ---------- + zs : z-series + The z-series to integrate + + Returns + ------- + integral : z-series + The indefinite integral + + Notes + ----- + The zseries for x (ns) has been multiplied by two in order to avoid + using floats that are incompatible with Decimal and likely other + specialized scalar types. This scaling has been compensated by + dividing the resulting zs by two. + + """ + n = 1 + len(zs) // 2 + ns = np.array([-1, 0, 1], dtype=zs.dtype) + zs = _zseries_mul(zs, ns) + div = np.arange(-n, n + 1) * 2 + zs[:n] /= div[:n] + zs[n + 1:] /= div[n + 1:] + zs[n] = 0 + return zs + +# +# Chebyshev series functions +# + + +def poly2cheb(pol): + """ + Convert a polynomial to a Chebyshev series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Chebyshev series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Chebyshev + series. + + See Also + -------- + cheb2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy import polynomial as P + >>> p = P.Polynomial(range(4)) + >>> p + Polynomial([0., 1., 2., 3.], domain=[-1., 1.], window=[-1., 1.], symbol='x') + >>> c = p.convert(kind=P.Chebyshev) + >>> c + Chebyshev([1. , 3.25, 1. , 0.75], domain=[-1., 1.], window=[-1., ... + >>> P.chebyshev.poly2cheb(range(4)) + array([1. , 3.25, 1. , 0.75]) + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = chebadd(chebmulx(res), pol[i]) + return res + + +def cheb2poly(c): + """ + Convert a Chebyshev series to a polynomial. + + Convert an array representing the coefficients of a Chebyshev series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Chebyshev series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2cheb + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy import polynomial as P + >>> c = P.Chebyshev(range(4)) + >>> c + Chebyshev([0., 1., 2., 3.], domain=[-1., 1.], window=[-1., 1.], symbol='x') + >>> p = c.convert(kind=P.Polynomial) + >>> p + Polynomial([-2., -8., 4., 12.], domain=[-1., 1.], window=[-1., 1.], ... + >>> P.chebyshev.cheb2poly(range(4)) + array([-2., -8., 4., 12.]) + + """ + from .polynomial import polyadd, polymulx, polysub + + [c] = pu.as_series([c]) + n = len(c) + if n < 3: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], c1) + c1 = polyadd(tmp, polymulx(c1) * 2) + return polyadd(c0, polymulx(c1)) + + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Chebyshev default domain. +chebdomain = np.array([-1., 1.]) + +# Chebyshev coefficients representing zero. +chebzero = np.array([0]) + +# Chebyshev coefficients representing one. +chebone = np.array([1]) + +# Chebyshev coefficients representing the identity x. +chebx = np.array([0, 1]) + + +def chebline(off, scl): + """ + Chebyshev series whose graph is a straight line. + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Chebyshev series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.legendre.legline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite.hermline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> import numpy.polynomial.chebyshev as C + >>> C.chebline(3,2) + array([3, 2]) + >>> C.chebval(-3, C.chebline(3,2)) # should be -3 + -3.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def chebfromroots(roots): + """ + Generate a Chebyshev series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Chebyshev form, where the :math:`r_n` are the roots specified in + `roots`. If a zero has multiplicity n, then it must appear in `roots` + n times. For instance, if 2 is a root of multiplicity three and 3 is a + root of multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. + The roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Chebyshev form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.hermite_e.hermefromroots + + Examples + -------- + >>> import numpy.polynomial.chebyshev as C + >>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis + array([ 0. , -0.25, 0. , 0.25]) + >>> j = complex(0,1) + >>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis + array([1.5+0.j, 0. +0.j, 0.5+0.j]) + + """ + return pu._fromroots(chebline, chebmul, roots) + + +def chebadd(c1, c2): + """ + Add one Chebyshev series to another. + + Returns the sum of two Chebyshev series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Chebyshev series of their sum. + + See Also + -------- + chebsub, chebmulx, chebmul, chebdiv, chebpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Chebyshev series + is a Chebyshev series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebadd(c1,c2) + array([4., 4., 4.]) + + """ + return pu._add(c1, c2) + + +def chebsub(c1, c2): + """ + Subtract one Chebyshev series from another. + + Returns the difference of two Chebyshev series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Chebyshev series coefficients representing their difference. + + See Also + -------- + chebadd, chebmulx, chebmul, chebdiv, chebpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Chebyshev + series is a Chebyshev series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebsub(c1,c2) + array([-2., 0., 2.]) + >>> C.chebsub(c2,c1) # -C.chebsub(c1,c2) + array([ 2., 0., -2.]) + + """ + return pu._sub(c1, c2) + + +def chebmulx(c): + """Multiply a Chebyshev series by x. + + Multiply the polynomial `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + chebadd, chebsub, chebmul, chebdiv, chebpow + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> C.chebmulx([1,2,3]) + array([1. , 2.5, 1. , 1.5]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0] * 0 + prd[1] = c[0] + if len(c) > 1: + tmp = c[1:] / 2 + prd[2:] = tmp + prd[0:-2] += tmp + return prd + + +def chebmul(c1, c2): + """ + Multiply one Chebyshev series by another. + + Returns the product of two Chebyshev series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Chebyshev series coefficients representing their product. + + See Also + -------- + chebadd, chebsub, chebmulx, chebdiv, chebpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Chebyshev polynomial basis set. Thus, to express + the product as a C-series, it is typically necessary to "reproject" + the product onto said basis set, which typically produces + "unintuitive live" (but correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebmul(c1,c2) # multiplication requires "reprojection" + array([ 6.5, 12. , 12. , 4. , 1.5]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + z1 = _cseries_to_zseries(c1) + z2 = _cseries_to_zseries(c2) + prd = _zseries_mul(z1, z2) + ret = _zseries_to_cseries(prd) + return pu.trimseq(ret) + + +def chebdiv(c1, c2): + """ + Divide one Chebyshev series by another. + + Returns the quotient-with-remainder of two Chebyshev series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``T_0 + 2*T_1 + 3*T_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Chebyshev series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Chebyshev series coefficients representing the quotient and + remainder. + + See Also + -------- + chebadd, chebsub, chebmulx, chebmul, chebpow + + Notes + ----- + In general, the (polynomial) division of one C-series by another + results in quotient and remainder terms that are not in the Chebyshev + polynomial basis set. Thus, to express these results as C-series, it + is typically necessary to "reproject" the results onto said basis + set, which typically produces "unintuitive" (but correct) results; + see Examples section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not + (array([3.]), array([-8., -4.])) + >>> c2 = (0,1,2,3) + >>> C.chebdiv(c2,c1) # neither "intuitive" + (array([0., 2.]), array([-2., -4.])) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError # FIXME: add message with details to exception + + # note: this is more efficient than `pu._div(chebmul, c1, c2)` + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1] * 0, c1 + elif lc2 == 1: + return c1 / c2[-1], c1[:1] * 0 + else: + z1 = _cseries_to_zseries(c1) + z2 = _cseries_to_zseries(c2) + quo, rem = _zseries_div(z1, z2) + quo = pu.trimseq(_zseries_to_cseries(quo)) + rem = pu.trimseq(_zseries_to_cseries(rem)) + return quo, rem + + +def chebpow(c, pow, maxpower=16): + """Raise a Chebyshev series to a power. + + Returns the Chebyshev series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Chebyshev series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Chebyshev series of power. + + See Also + -------- + chebadd, chebsub, chebmulx, chebmul, chebdiv + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> C.chebpow([1, 2, 3, 4], 2) + array([15.5, 22. , 16. , ..., 12.5, 12. , 8. ]) + + """ + # note: this is more efficient than `pu._pow(chebmul, c1, c2)`, as it + # avoids converting between z and c series repeatedly + + # c is a trimmed copy + [c] = pu.as_series([c]) + power = int(pow) + if power != pow or power < 0: + raise ValueError("Power must be a non-negative integer.") + elif maxpower is not None and power > maxpower: + raise ValueError("Power is too large") + elif power == 0: + return np.array([1], dtype=c.dtype) + elif power == 1: + return c + else: + # This can be made more efficient by using powers of two + # in the usual way. + zs = _cseries_to_zseries(c) + prd = zs + for i in range(2, power + 1): + prd = np.convolve(prd, zs) + return _zseries_to_cseries(prd) + + +def chebder(c, m=1, scl=1, axis=0): + """ + Differentiate a Chebyshev series. + + Returns the Chebyshev series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*T_0 + 2*T_1 + 3*T_2`` + while [[1,2],[1,2]] represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + + 2*T_0(x)*T_1(y) + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Chebyshev series coefficients. If c is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + Returns + ------- + der : ndarray + Chebyshev series of the derivative. + + See Also + -------- + chebint + + Notes + ----- + In general, the result of differentiating a C-series needs to be + "reprojected" onto the C-series basis set. Thus, typically, the + result of this function is "unintuitive," albeit correct; see Examples + section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c = (1,2,3,4) + >>> C.chebder(c) + array([14., 12., 24.]) + >>> C.chebder(c,3) + array([96.]) + >>> C.chebder(c,scl=-1) + array([-14., -12., -24.]) + >>> C.chebder(c,2,-1) + array([12., 96.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt = pu._as_int(m, "the order of derivation") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1] * 0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 2, -1): + der[j - 1] = (2 * j) * c[j] + c[j - 2] += (j * c[j]) / (j - 2) + if n > 1: + der[1] = 4 * c[2] + der[0] = c[1] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Chebyshev series. + + Returns the Chebyshev series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``T_0 + 2*T_1 + 3*T_2`` while [[1,2],[1,2]] + represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + 2*T_0(x)*T_1(y) + + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Chebyshev series coefficients. If c is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at zero + is the first value in the list, the value of the second integral + at zero is the second value, etc. If ``k == []`` (the default), + all constants are set to zero. If ``m == 1``, a single scalar can + be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + Returns + ------- + S : ndarray + C-series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 1``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + chebder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a`- perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial import chebyshev as C + >>> c = (1,2,3) + >>> C.chebint(c) + array([ 0.5, -0.5, 0.5, 0.5]) + >>> C.chebint(c,3) + array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667, # may vary + 0.00625 ]) + >>> C.chebint(c, k=3) + array([ 3.5, -0.5, 0.5, 0.5]) + >>> C.chebint(c,lbnd=-2) + array([ 8.5, -0.5, 0.5, 0.5]) + >>> C.chebint(c,scl=-2) + array([-1., 1., -1., -1.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._as_int(m, "the order of integration") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0] * (cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0] * 0 + tmp[1] = c[0] + if n > 1: + tmp[2] = c[1] / 4 + for j in range(2, n): + tmp[j + 1] = c[j] / (2 * (j + 1)) + tmp[j - 1] -= c[j] / (2 * (j - 1)) + tmp[0] += k[i] - chebval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def chebval(x, c, tensor=True): + """ + Evaluate a Chebyshev series at points x. + + If `c` is of length `n + 1`, this function returns the value: + + .. math:: p(x) = c_0 * T_0(x) + c_1 * T_1(x) + ... + c_n * T_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + chebval2d, chebgrid2d, chebval3d, chebgrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,) * x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + x2 = 2 * x + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + c0 = c[-i] - c1 + c1 = tmp + c1 * x2 + return c0 + c1 * x + + +def chebval2d(x, y, c): + """ + Evaluate a 2-D Chebyshev series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * T_i(x) * T_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points ``(x, y)``, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than 2 the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points formed + from pairs of corresponding values from `x` and `y`. + + See Also + -------- + chebval, chebgrid2d, chebval3d, chebgrid3d + """ + return pu._valnd(chebval, c, x, y) + + +def chebgrid2d(x, y, c): + """ + Evaluate a 2-D Chebyshev series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * T_i(a) * T_j(b), + + where the points `(a, b)` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j is contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points in the + Cartesian product of `x` and `y`. + + See Also + -------- + chebval, chebval2d, chebval3d, chebgrid3d + """ + return pu._gridnd(chebval, c, x, y) + + +def chebval3d(x, y, z, c): + """ + Evaluate a 3-D Chebyshev series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * T_i(x) * T_j(y) * T_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + ``(x, y, z)``, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + chebval, chebval2d, chebgrid2d, chebgrid3d + """ + return pu._valnd(chebval, c, x, y, z) + + +def chebgrid3d(x, y, z, c): + """ + Evaluate a 3-D Chebyshev series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * T_i(a) * T_j(b) * T_k(c) + + where the points ``(a, b, c)`` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`, `y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + chebval, chebval2d, chebgrid2d, chebval3d + """ + return pu._gridnd(chebval, c, x, y, z) + + +def chebvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = T_i(x), + + where ``0 <= i <= deg``. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Chebyshev polynomial. + + If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the + matrix ``V = chebvander(x, n)``, then ``np.dot(V, c)`` and + ``chebval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Chebyshev series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Chebyshev polynomial. The dtype will be the same as + the converted `x`. + + """ + ideg = pu._as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=None, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + # Use forward recursion to generate the entries. + v[0] = x * 0 + 1 + if ideg > 0: + x2 = 2 * x + v[1] = x + for i in range(2, ideg + 1): + v[i] = v[i - 1] * x2 - v[i - 2] + return np.moveaxis(v, 0, -1) + + +def chebvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y)``. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = T_i(x) * T_j(y), + + where ``0 <= i <= deg[0]`` and ``0 <= j <= deg[1]``. The leading indices of + `V` index the points ``(x, y)`` and the last index encodes the degrees of + the Chebyshev polynomials. + + If ``V = chebvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``chebval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Chebyshev + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + chebvander, chebvander3d, chebval2d, chebval3d + """ + return pu._vander_nd_flat((chebvander, chebvander), (x, y), deg) + + +def chebvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y, z)``. If `l`, `m`, `n` are the given degrees in `x`, `y`, `z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = T_i(x)*T_j(y)*T_k(z), + + where ``0 <= i <= l``, ``0 <= j <= m``, and ``0 <= j <= n``. The leading + indices of `V` index the points ``(x, y, z)`` and the last index encodes + the degrees of the Chebyshev polynomials. + + If ``V = chebvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``chebval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Chebyshev + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + chebvander, chebvander3d, chebval2d, chebval3d + """ + return pu._vander_nd_flat((chebvander, chebvander, chebvander), (x, y, z), deg) + + +def chebfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Chebyshev series to data. + + Return the coefficients of a Chebyshev series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer, + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is ``len(x)*eps``, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Chebyshev coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column k of `y` are in column + `k`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full == False``. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.exceptions.RankWarning) + + See Also + -------- + numpy.polynomial.polynomial.polyfit + numpy.polynomial.legendre.legfit + numpy.polynomial.laguerre.lagfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.hermite_e.hermefit + chebval : Evaluates a Chebyshev series. + chebvander : Vandermonde matrix of Chebyshev series. + chebweight : Chebyshev weight function. + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Chebyshev series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where :math:`w_j` are the weights. This problem is solved by setting up + as the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `~exceptions.RankWarning` will be issued. This means that + the coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Chebyshev series are usually better conditioned than fits + using power series, but much can depend on the distribution of the + sample points and the smoothness of the data. If the quality of the fit + is inadequate splines may be a good alternative. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + + """ + return pu._fit(chebvander, x, y, deg, rcond, full, w) + + +def chebcompanion(c): + """Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is a Chebyshev basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of Chebyshev series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0] / c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = np.array([1.] + [np.sqrt(.5)] * (n - 1)) + top = mat.reshape(-1)[1::n + 1] + bot = mat.reshape(-1)[n::n + 1] + top[0] = np.sqrt(.5) + top[1:] = 1 / 2 + bot[...] = top + mat[:, -1] -= (c[:-1] / c[-1]) * (scl / scl[-1]) * .5 + return mat + + +def chebroots(c): + """ + Compute the roots of a Chebyshev series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * T_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.legendre.legroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The Chebyshev series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> import numpy.polynomial.chebyshev as cheb + >>> cheb.chebroots((-1, 1,-1, 1)) # T3 - T2 + T1 - T0 has real roots + array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00]) # may vary + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0] / c[1]]) + + # rotated companion matrix reduces error + m = chebcompanion(c)[::-1, ::-1] + r = la.eigvals(m) + r.sort() + return r + + +def chebinterpolate(func, deg, args=()): + """Interpolate a function at the Chebyshev points of the first kind. + + Returns the Chebyshev series that interpolates `func` at the Chebyshev + points of the first kind in the interval [-1, 1]. The interpolating + series tends to a minmax approximation to `func` with increasing `deg` + if the function is continuous in the interval. + + Parameters + ---------- + func : function + The function to be approximated. It must be a function of a single + variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are + extra arguments passed in the `args` parameter. + deg : int + Degree of the interpolating polynomial + args : tuple, optional + Extra arguments to be used in the function call. Default is no extra + arguments. + + Returns + ------- + coef : ndarray, shape (deg + 1,) + Chebyshev coefficients of the interpolating series ordered from low to + high. + + Examples + -------- + >>> import numpy.polynomial.chebyshev as C + >>> C.chebinterpolate(lambda x: np.tanh(x) + 0.5, 8) + array([ 5.00000000e-01, 8.11675684e-01, -9.86864911e-17, + -5.42457905e-02, -2.71387850e-16, 4.51658839e-03, + 2.46716228e-17, -3.79694221e-04, -3.26899002e-16]) + + Notes + ----- + The Chebyshev polynomials used in the interpolation are orthogonal when + sampled at the Chebyshev points of the first kind. If it is desired to + constrain some of the coefficients they can simply be set to the desired + value after the interpolation, no new interpolation or fit is needed. This + is especially useful if it is known apriori that some of coefficients are + zero. For instance, if the function is even then the coefficients of the + terms of odd degree in the result can be set to zero. + + """ + deg = np.asarray(deg) + + # check arguments. + if deg.ndim > 0 or deg.dtype.kind not in 'iu' or deg.size == 0: + raise TypeError("deg must be an int") + if deg < 0: + raise ValueError("expected deg >= 0") + + order = deg + 1 + xcheb = chebpts1(order) + yfunc = func(xcheb, *args) + m = chebvander(xcheb, deg) + c = np.dot(m.T, yfunc) + c[0] /= order + c[1:] /= 0.5 * order + + return c + + +def chebgauss(deg): + """ + Gauss-Chebyshev quadrature. + + Computes the sample points and weights for Gauss-Chebyshev quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with + the weight function :math:`f(x) = 1/\\sqrt{1 - x^2}`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + The results have only been tested up to degree 100, higher degrees may + be problematic. For Gauss-Chebyshev there are closed form solutions for + the sample points and weights. If n = `deg`, then + + .. math:: x_i = \\cos(\\pi (2 i - 1) / (2 n)) + + .. math:: w_i = \\pi / n + + """ + ideg = pu._as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + x = np.cos(np.pi * np.arange(1, 2 * ideg, 2) / (2.0 * ideg)) + w = np.ones(ideg) * (np.pi / ideg) + + return x, w + + +def chebweight(x): + """ + The weight function of the Chebyshev polynomials. + + The weight function is :math:`1/\\sqrt{1 - x^2}` and the interval of + integration is :math:`[-1, 1]`. The Chebyshev polynomials are + orthogonal, but not normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + """ + w = 1. / (np.sqrt(1. + x) * np.sqrt(1. - x)) + return w + + +def chebpts1(npts): + """ + Chebyshev points of the first kind. + + The Chebyshev points of the first kind are the points ``cos(x)``, + where ``x = [pi*(k + .5)/npts for k in range(npts)]``. + + Parameters + ---------- + npts : int + Number of sample points desired. + + Returns + ------- + pts : ndarray + The Chebyshev points of the first kind. + + See Also + -------- + chebpts2 + """ + _npts = int(npts) + if _npts != npts: + raise ValueError("npts must be integer") + if _npts < 1: + raise ValueError("npts must be >= 1") + + x = 0.5 * np.pi / _npts * np.arange(-_npts + 1, _npts + 1, 2) + return np.sin(x) + + +def chebpts2(npts): + """ + Chebyshev points of the second kind. + + The Chebyshev points of the second kind are the points ``cos(x)``, + where ``x = [pi*k/(npts - 1) for k in range(npts)]`` sorted in ascending + order. + + Parameters + ---------- + npts : int + Number of sample points desired. + + Returns + ------- + pts : ndarray + The Chebyshev points of the second kind. + """ + _npts = int(npts) + if _npts != npts: + raise ValueError("npts must be integer") + if _npts < 2: + raise ValueError("npts must be >= 2") + + x = np.linspace(-np.pi, 0, _npts) + return np.cos(x) + + +# +# Chebyshev series class +# + +class Chebyshev(ABCPolyBase): + """A Chebyshev series class. + + The Chebyshev class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed below. + + Parameters + ---------- + coef : array_like + Chebyshev coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` gives ``1*T_0(x) + 2*T_1(x) + 3*T_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1., 1.]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1., 1.]. + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(chebadd) + _sub = staticmethod(chebsub) + _mul = staticmethod(chebmul) + _div = staticmethod(chebdiv) + _pow = staticmethod(chebpow) + _val = staticmethod(chebval) + _int = staticmethod(chebint) + _der = staticmethod(chebder) + _fit = staticmethod(chebfit) + _line = staticmethod(chebline) + _roots = staticmethod(chebroots) + _fromroots = staticmethod(chebfromroots) + + @classmethod + def interpolate(cls, func, deg, domain=None, args=()): + """Interpolate a function at the Chebyshev points of the first kind. + + Returns the series that interpolates `func` at the Chebyshev points of + the first kind scaled and shifted to the `domain`. The resulting series + tends to a minmax approximation of `func` when the function is + continuous in the domain. + + Parameters + ---------- + func : function + The function to be interpolated. It must be a function of a single + variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are + extra arguments passed in the `args` parameter. + deg : int + Degree of the interpolating polynomial. + domain : {None, [beg, end]}, optional + Domain over which `func` is interpolated. The default is None, in + which case the domain is [-1, 1]. + args : tuple, optional + Extra arguments to be used in the function call. Default is no + extra arguments. + + Returns + ------- + polynomial : Chebyshev instance + Interpolating Chebyshev instance. + + Notes + ----- + See `numpy.polynomial.chebinterpolate` for more details. + + """ + if domain is None: + domain = cls.domain + xfunc = lambda x: func(pu.mapdomain(x, cls.window, domain), *args) + coef = chebinterpolate(xfunc, deg) + return cls(coef, domain=domain) + + # Virtual properties + domain = np.array(chebdomain) + window = np.array(chebdomain) + basis_name = 'T' diff --git a/python/numpy/polynomial/chebyshev.pyi b/python/numpy/polynomial/chebyshev.pyi new file mode 100644 index 000000000..ec342df0f --- /dev/null +++ b/python/numpy/polynomial/chebyshev.pyi @@ -0,0 +1,181 @@ +from collections.abc import Callable, Iterable +from typing import Any, Concatenate, Final, Self, TypeVar, overload +from typing import Literal as L + +import numpy as np +import numpy.typing as npt +from numpy._typing import _IntLike_co + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _CoefSeries, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncPts, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncValFromRoots, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, + _Series, + _SeriesLikeCoef_co, +) +from .polyutils import trimcoef as chebtrim + +__all__ = [ + "chebzero", + "chebone", + "chebx", + "chebdomain", + "chebline", + "chebadd", + "chebsub", + "chebmulx", + "chebmul", + "chebdiv", + "chebpow", + "chebval", + "chebder", + "chebint", + "cheb2poly", + "poly2cheb", + "chebfromroots", + "chebvander", + "chebfit", + "chebtrim", + "chebroots", + "chebpts1", + "chebpts2", + "Chebyshev", + "chebval2d", + "chebval3d", + "chebgrid2d", + "chebgrid3d", + "chebvander2d", + "chebvander3d", + "chebcompanion", + "chebgauss", + "chebweight", + "chebinterpolate", +] + +_NumberOrObjectT = TypeVar("_NumberOrObjectT", bound=np.number | np.object_) +def _cseries_to_zseries(c: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... +def _zseries_to_cseries(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... +def _zseries_mul( + z1: npt.NDArray[_NumberOrObjectT], + z2: npt.NDArray[_NumberOrObjectT], +) -> _Series[_NumberOrObjectT]: ... +def _zseries_div( + z1: npt.NDArray[_NumberOrObjectT], + z2: npt.NDArray[_NumberOrObjectT], +) -> _Series[_NumberOrObjectT]: ... +def _zseries_der(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... +def _zseries_int(zs: npt.NDArray[_NumberOrObjectT]) -> _Series[_NumberOrObjectT]: ... + +poly2cheb: _FuncPoly2Ortho[L["poly2cheb"]] +cheb2poly: _FuncUnOp[L["cheb2poly"]] + +chebdomain: Final[_Array2[np.float64]] +chebzero: Final[_Array1[np.int_]] +chebone: Final[_Array1[np.int_]] +chebx: Final[_Array2[np.int_]] + +chebline: _FuncLine[L["chebline"]] +chebfromroots: _FuncFromRoots[L["chebfromroots"]] +chebadd: _FuncBinOp[L["chebadd"]] +chebsub: _FuncBinOp[L["chebsub"]] +chebmulx: _FuncUnOp[L["chebmulx"]] +chebmul: _FuncBinOp[L["chebmul"]] +chebdiv: _FuncBinOp[L["chebdiv"]] +chebpow: _FuncPow[L["chebpow"]] +chebder: _FuncDer[L["chebder"]] +chebint: _FuncInteg[L["chebint"]] +chebval: _FuncVal[L["chebval"]] +chebval2d: _FuncVal2D[L["chebval2d"]] +chebval3d: _FuncVal3D[L["chebval3d"]] +chebvalfromroots: _FuncValFromRoots[L["chebvalfromroots"]] +chebgrid2d: _FuncVal2D[L["chebgrid2d"]] +chebgrid3d: _FuncVal3D[L["chebgrid3d"]] +chebvander: _FuncVander[L["chebvander"]] +chebvander2d: _FuncVander2D[L["chebvander2d"]] +chebvander3d: _FuncVander3D[L["chebvander3d"]] +chebfit: _FuncFit[L["chebfit"]] +chebcompanion: _FuncCompanion[L["chebcompanion"]] +chebroots: _FuncRoots[L["chebroots"]] +chebgauss: _FuncGauss[L["chebgauss"]] +chebweight: _FuncWeight[L["chebweight"]] +chebpts1: _FuncPts[L["chebpts1"]] +chebpts2: _FuncPts[L["chebpts2"]] + +# keep in sync with `Chebyshev.interpolate` +_RT = TypeVar("_RT", bound=np.number | np.bool | np.object_) +@overload +def chebinterpolate( + func: np.ufunc, + deg: _IntLike_co, + args: tuple[()] = ..., +) -> npt.NDArray[np.float64 | np.complex128 | np.object_]: ... +@overload +def chebinterpolate( + func: Callable[[npt.NDArray[np.float64]], _RT], + deg: _IntLike_co, + args: tuple[()] = ..., +) -> npt.NDArray[_RT]: ... +@overload +def chebinterpolate( + func: Callable[Concatenate[npt.NDArray[np.float64], ...], _RT], + deg: _IntLike_co, + args: Iterable[Any], +) -> npt.NDArray[_RT]: ... + +class Chebyshev(ABCPolyBase[L["T"]]): + @overload + @classmethod + def interpolate( + cls, + func: Callable[[npt.NDArray[np.float64]], _CoefSeries], + deg: _IntLike_co, + domain: _SeriesLikeCoef_co | None = ..., + args: tuple[()] = ..., + ) -> Self: ... + @overload + @classmethod + def interpolate( + cls, + func: Callable[ + Concatenate[npt.NDArray[np.float64], ...], + _CoefSeries, + ], + deg: _IntLike_co, + domain: _SeriesLikeCoef_co | None = ..., + *, + args: Iterable[Any], + ) -> Self: ... + @overload + @classmethod + def interpolate( + cls, + func: Callable[ + Concatenate[npt.NDArray[np.float64], ...], + _CoefSeries, + ], + deg: _IntLike_co, + domain: _SeriesLikeCoef_co | None, + args: Iterable[Any], + ) -> Self: ... diff --git a/python/numpy/polynomial/hermite.py b/python/numpy/polynomial/hermite.py new file mode 100644 index 000000000..47e1dfc05 --- /dev/null +++ b/python/numpy/polynomial/hermite.py @@ -0,0 +1,1740 @@ +""" +============================================================== +Hermite Series, "Physicists" (:mod:`numpy.polynomial.hermite`) +============================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Hermite series, including a `Hermite` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + Hermite + +Constants +--------- +.. autosummary:: + :toctree: generated/ + + hermdomain + hermzero + hermone + hermx + +Arithmetic +---------- +.. autosummary:: + :toctree: generated/ + + hermadd + hermsub + hermmulx + hermmul + hermdiv + hermpow + hermval + hermval2d + hermval3d + hermgrid2d + hermgrid3d + +Calculus +-------- +.. autosummary:: + :toctree: generated/ + + hermder + hermint + +Misc Functions +-------------- +.. autosummary:: + :toctree: generated/ + + hermfromroots + hermroots + hermvander + hermvander2d + hermvander3d + hermgauss + hermweight + hermcompanion + hermfit + hermtrim + hermline + herm2poly + poly2herm + +See also +-------- +`numpy.polynomial` + +""" +import numpy as np +import numpy.linalg as la +from numpy.lib.array_utils import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'hermzero', 'hermone', 'hermx', 'hermdomain', 'hermline', 'hermadd', + 'hermsub', 'hermmulx', 'hermmul', 'hermdiv', 'hermpow', 'hermval', + 'hermder', 'hermint', 'herm2poly', 'poly2herm', 'hermfromroots', + 'hermvander', 'hermfit', 'hermtrim', 'hermroots', 'Hermite', + 'hermval2d', 'hermval3d', 'hermgrid2d', 'hermgrid3d', 'hermvander2d', + 'hermvander3d', 'hermcompanion', 'hermgauss', 'hermweight'] + +hermtrim = pu.trimcoef + + +def poly2herm(pol): + """ + poly2herm(pol) + + Convert a polynomial to a Hermite series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Hermite series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Hermite + series. + + See Also + -------- + herm2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite import poly2herm + >>> poly2herm(np.arange(4)) + array([1. , 2.75 , 0.5 , 0.375]) + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = hermadd(hermmulx(res), pol[i]) + return res + + +def herm2poly(c): + """ + Convert a Hermite series to a polynomial. + + Convert an array representing the coefficients of a Hermite series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Hermite series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2herm + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite import herm2poly + >>> herm2poly([ 1. , 2.75 , 0.5 , 0.375]) + array([0., 1., 2., 3.]) + + """ + from .polynomial import polyadd, polymulx, polysub + + [c] = pu.as_series([c]) + n = len(c) + if n == 1: + return c + if n == 2: + c[1] *= 2 + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], c1 * (2 * (i - 1))) + c1 = polyadd(tmp, polymulx(c1) * 2) + return polyadd(c0, polymulx(c1) * 2) + + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Hermite +hermdomain = np.array([-1., 1.]) + +# Hermite coefficients representing zero. +hermzero = np.array([0]) + +# Hermite coefficients representing one. +hermone = np.array([1]) + +# Hermite coefficients representing the identity x. +hermx = np.array([0, 1 / 2]) + + +def hermline(off, scl): + """ + Hermite series whose graph is a straight line. + + + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Hermite series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.chebyshev.chebline + numpy.polynomial.legendre.legline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> from numpy.polynomial.hermite import hermline, hermval + >>> hermval(0,hermline(3, 2)) + 3.0 + >>> hermval(1,hermline(3, 2)) + 5.0 + + """ + if scl != 0: + return np.array([off, scl / 2]) + else: + return np.array([off]) + + +def hermfromroots(roots): + """ + Generate a Hermite series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Hermite form, where the :math:`r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Hermite form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.chebyshev.chebfromroots + numpy.polynomial.hermite_e.hermefromroots + + Examples + -------- + >>> from numpy.polynomial.hermite import hermfromroots, hermval + >>> coef = hermfromroots((-1, 0, 1)) + >>> hermval((-1, 0, 1), coef) + array([0., 0., 0.]) + >>> coef = hermfromroots((-1j, 1j)) + >>> hermval((-1j, 1j), coef) + array([0.+0.j, 0.+0.j]) + + """ + return pu._fromroots(hermline, hermmul, roots) + + +def hermadd(c1, c2): + """ + Add one Hermite series to another. + + Returns the sum of two Hermite series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Hermite series of their sum. + + See Also + -------- + hermsub, hermmulx, hermmul, hermdiv, hermpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Hermite series + is a Hermite series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite import hermadd + >>> hermadd([1, 2, 3], [1, 2, 3, 4]) + array([2., 4., 6., 4.]) + + """ + return pu._add(c1, c2) + + +def hermsub(c1, c2): + """ + Subtract one Hermite series from another. + + Returns the difference of two Hermite series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their difference. + + See Also + -------- + hermadd, hermmulx, hermmul, hermdiv, hermpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Hermite + series is a Hermite series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite import hermsub + >>> hermsub([1, 2, 3, 4], [1, 2, 3]) + array([0., 0., 0., 4.]) + + """ + return pu._sub(c1, c2) + + +def hermmulx(c): + """Multiply a Hermite series by x. + + Multiply the Hermite series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + hermadd, hermsub, hermmul, hermdiv, hermpow + + Notes + ----- + The multiplication uses the recursion relationship for Hermite + polynomials in the form + + .. math:: + + xP_i(x) = (P_{i + 1}(x)/2 + i*P_{i - 1}(x)) + + Examples + -------- + >>> from numpy.polynomial.hermite import hermmulx + >>> hermmulx([1, 2, 3]) + array([2. , 6.5, 1. , 1.5]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0] * 0 + prd[1] = c[0] / 2 + for i in range(1, len(c)): + prd[i + 1] = c[i] / 2 + prd[i - 1] += c[i] * i + return prd + + +def hermmul(c1, c2): + """ + Multiply one Hermite series by another. + + Returns the product of two Hermite series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their product. + + See Also + -------- + hermadd, hermsub, hermmulx, hermdiv, hermpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Hermite polynomial basis set. Thus, to express + the product as a Hermite series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermmul + >>> hermmul([1, 2, 3], [0, 1, 2]) + array([52., 29., 52., 7., 6.]) + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0] * xs + c1 = 0 + elif len(c) == 2: + c0 = c[0] * xs + c1 = c[1] * xs + else: + nd = len(c) + c0 = c[-2] * xs + c1 = c[-1] * xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = hermsub(c[-i] * xs, c1 * (2 * (nd - 1))) + c1 = hermadd(tmp, hermmulx(c1) * 2) + return hermadd(c0, hermmulx(c1) * 2) + + +def hermdiv(c1, c2): + """ + Divide one Hermite series by another. + + Returns the quotient-with-remainder of two Hermite series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Hermite series coefficients representing the quotient and + remainder. + + See Also + -------- + hermadd, hermsub, hermmulx, hermmul, hermpow + + Notes + ----- + In general, the (polynomial) division of one Hermite series by another + results in quotient and remainder terms that are not in the Hermite + polynomial basis set. Thus, to express these results as a Hermite + series, it is necessary to "reproject" the results onto the Hermite + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermdiv + >>> hermdiv([ 52., 29., 52., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([0.])) + >>> hermdiv([ 54., 31., 52., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([2., 2.])) + >>> hermdiv([ 53., 30., 52., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([1., 1.])) + + """ + return pu._div(hermmul, c1, c2) + + +def hermpow(c, pow, maxpower=16): + """Raise a Hermite series to a power. + + Returns the Hermite series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Hermite series of power. + + See Also + -------- + hermadd, hermsub, hermmulx, hermmul, hermdiv + + Examples + -------- + >>> from numpy.polynomial.hermite import hermpow + >>> hermpow([1, 2, 3], 2) + array([81., 52., 82., 12., 9.]) + + """ + return pu._pow(hermmul, c, pow, maxpower) + + +def hermder(c, m=1, scl=1, axis=0): + """ + Differentiate a Hermite series. + + Returns the Hermite series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*H_0 + 2*H_1 + 3*H_2`` + while [[1,2],[1,2]] represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + + 2*H_0(x)*H_1(y) + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite series coefficients. If `c` is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + Returns + ------- + der : ndarray + Hermite series of the derivative. + + See Also + -------- + hermint + + Notes + ----- + In general, the result of differentiating a Hermite series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermder + >>> hermder([ 1. , 0.5, 0.5, 0.5]) + array([1., 2., 3.]) + >>> hermder([-0.5, 1./2., 1./8., 1./12., 1./16.], m=2) + array([1., 2., 3.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt = pu._as_int(m, "the order of derivation") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1] * 0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 0, -1): + der[j - 1] = (2 * j) * c[j] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Hermite series. + + Returns the Hermite series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]] + represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) + + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite series coefficients. If c is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + Returns + ------- + S : ndarray + Hermite series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + hermder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermint + >>> hermint([1,2,3]) # integrate once, value 0 at 0. + array([1. , 0.5, 0.5, 0.5]) + >>> hermint([1,2,3], m=2) # integrate twice, value & deriv 0 at 0 + array([-0.5 , 0.5 , 0.125 , 0.08333333, 0.0625 ]) # may vary + >>> hermint([1,2,3], k=1) # integrate once, value 1 at 0. + array([2. , 0.5, 0.5, 0.5]) + >>> hermint([1,2,3], lbnd=-1) # integrate once, value 0 at -1 + array([-2. , 0.5, 0.5, 0.5]) + >>> hermint([1,2,3], m=2, k=[1,2], lbnd=-1) + array([ 1.66666667, -0.5 , 0.125 , 0.08333333, 0.0625 ]) # may vary + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._as_int(m, "the order of integration") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0] * (cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0] * 0 + tmp[1] = c[0] / 2 + for j in range(1, n): + tmp[j + 1] = c[j] / (2 * (j + 1)) + tmp[0] += k[i] - hermval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def hermval(x, c, tensor=True): + """ + Evaluate an Hermite series at points x. + + If `c` is of length ``n + 1``, this function returns the value: + + .. math:: p(x) = c_0 * H_0(x) + c_1 * H_1(x) + ... + c_n * H_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + hermval2d, hermgrid2d, hermval3d, hermgrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermval + >>> coef = [1,2,3] + >>> hermval(1, coef) + 11.0 + >>> hermval([[1,2],[3,4]], coef) + array([[ 11., 51.], + [115., 203.]]) + + """ + c = np.array(c, ndmin=1, copy=None) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,) * x.ndim) + + x2 = x * 2 + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - c1 * (2 * (nd - 1)) + c1 = tmp + c1 * x2 + return c0 + c1 * x2 + + +def hermval2d(x, y, c): + """ + Evaluate a 2-D Hermite series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * H_i(x) * H_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points ``(x, y)``, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + hermval, hermgrid2d, hermval3d, hermgrid3d + + Examples + -------- + >>> from numpy.polynomial.hermite import hermval2d + >>> x = [1, 2] + >>> y = [4, 5] + >>> c = [[1, 2, 3], [4, 5, 6]] + >>> hermval2d(x, y, c) + array([1035., 2883.]) + + """ + return pu._valnd(hermval, c, x, y) + + +def hermgrid2d(x, y, c): + """ + Evaluate a 2-D Hermite series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * H_i(a) * H_j(b) + + where the points ``(a, b)`` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermval, hermval2d, hermval3d, hermgrid3d + + Examples + -------- + >>> from numpy.polynomial.hermite import hermgrid2d + >>> x = [1, 2, 3] + >>> y = [4, 5] + >>> c = [[1, 2, 3], [4, 5, 6]] + >>> hermgrid2d(x, y, c) + array([[1035., 1599.], + [1867., 2883.], + [2699., 4167.]]) + + """ + return pu._gridnd(hermval, c, x, y) + + +def hermval3d(x, y, z, c): + """ + Evaluate a 3-D Hermite series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * H_i(x) * H_j(y) * H_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + ``(x, y, z)``, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + hermval, hermval2d, hermgrid2d, hermgrid3d + + Examples + -------- + >>> from numpy.polynomial.hermite import hermval3d + >>> x = [1, 2] + >>> y = [4, 5] + >>> z = [6, 7] + >>> c = [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]] + >>> hermval3d(x, y, z, c) + array([ 40077., 120131.]) + + """ + return pu._valnd(hermval, c, x, y, z) + + +def hermgrid3d(x, y, z, c): + """ + Evaluate a 3-D Hermite series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * H_i(a) * H_j(b) * H_k(c) + + where the points ``(a, b, c)`` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`, `y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermval, hermval2d, hermgrid2d, hermval3d + + Examples + -------- + >>> from numpy.polynomial.hermite import hermgrid3d + >>> x = [1, 2] + >>> y = [4, 5] + >>> z = [6, 7] + >>> c = [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]] + >>> hermgrid3d(x, y, z, c) + array([[[ 40077., 54117.], + [ 49293., 66561.]], + [[ 72375., 97719.], + [ 88975., 120131.]]]) + + """ + return pu._gridnd(hermval, c, x, y, z) + + +def hermvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = H_i(x), + + where ``0 <= i <= deg``. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Hermite polynomial. + + If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the + array ``V = hermvander(x, n)``, then ``np.dot(V, c)`` and + ``hermval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Hermite series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Hermite polynomial. The dtype will be the same as + the converted `x`. + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.hermite import hermvander + >>> x = np.array([-1, 0, 1]) + >>> hermvander(x, 3) + array([[ 1., -2., 2., 4.], + [ 1., 0., -2., -0.], + [ 1., 2., 2., -4.]]) + + """ + ideg = pu._as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=None, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x * 0 + 1 + if ideg > 0: + x2 = x * 2 + v[1] = x2 + for i in range(2, ideg + 1): + v[i] = (v[i - 1] * x2 - v[i - 2] * (2 * (i - 1))) + return np.moveaxis(v, 0, -1) + + +def hermvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y)``. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = H_i(x) * H_j(y), + + where ``0 <= i <= deg[0]`` and ``0 <= j <= deg[1]``. The leading indices of + `V` index the points ``(x, y)`` and the last index encodes the degrees of + the Hermite polynomials. + + If ``V = hermvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``hermval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Hermite + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + hermvander, hermvander3d, hermval2d, hermval3d + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.hermite import hermvander2d + >>> x = np.array([-1, 0, 1]) + >>> y = np.array([-1, 0, 1]) + >>> hermvander2d(x, y, [2, 2]) + array([[ 1., -2., 2., -2., 4., -4., 2., -4., 4.], + [ 1., 0., -2., 0., 0., -0., -2., -0., 4.], + [ 1., 2., 2., 2., 4., 4., 2., 4., 4.]]) + + """ + return pu._vander_nd_flat((hermvander, hermvander), (x, y), deg) + + +def hermvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y, z)``. If `l`, `m`, `n` are the given degrees in `x`, `y`, `z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = H_i(x)*H_j(y)*H_k(z), + + where ``0 <= i <= l``, ``0 <= j <= m``, and ``0 <= j <= n``. The leading + indices of `V` index the points ``(x, y, z)`` and the last index encodes + the degrees of the Hermite polynomials. + + If ``V = hermvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``hermval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Hermite + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + hermvander, hermvander3d, hermval2d, hermval3d + + Examples + -------- + >>> from numpy.polynomial.hermite import hermvander3d + >>> x = np.array([-1, 0, 1]) + >>> y = np.array([-1, 0, 1]) + >>> z = np.array([-1, 0, 1]) + >>> hermvander3d(x, y, z, [0, 1, 2]) + array([[ 1., -2., 2., -2., 4., -4.], + [ 1., 0., -2., 0., 0., -0.], + [ 1., 2., 2., 2., 4., 4.]]) + + """ + return pu._vander_nd_flat((hermvander, hermvander, hermvander), (x, y, z), deg) + + +def hermfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Hermite series to data. + + Return the coefficients of a Hermite series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Hermite coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column k of `y` are in column + `k`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full == False``. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.exceptions.RankWarning) + + See Also + -------- + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.legendre.legfit + numpy.polynomial.laguerre.lagfit + numpy.polynomial.polynomial.polyfit + numpy.polynomial.hermite_e.hermefit + hermval : Evaluates a Hermite series. + hermvander : Vandermonde matrix of Hermite series. + hermweight : Hermite weight function + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Hermite series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `~exceptions.RankWarning` will be issued. This means that + the coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Hermite series are probably most useful when the data can be + approximated by ``sqrt(w(x)) * p(x)``, where ``w(x)`` is the Hermite + weight. In that case the weight ``sqrt(w(x[i]))`` should be used + together with data values ``y[i]/sqrt(w(x[i]))``. The weight function is + available as `hermweight`. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.hermite import hermfit, hermval + >>> x = np.linspace(-10, 10) + >>> rng = np.random.default_rng() + >>> err = rng.normal(scale=1./10, size=len(x)) + >>> y = hermval(x, [1, 2, 3]) + err + >>> hermfit(x, y, 2) + array([1.02294967, 2.00016403, 2.99994614]) # may vary + + """ + return pu._fit(hermvander, x, y, deg, rcond, full, w) + + +def hermcompanion(c): + """Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is an Hermite basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + + Examples + -------- + >>> from numpy.polynomial.hermite import hermcompanion + >>> hermcompanion([1, 0, 1]) + array([[0. , 0.35355339], + [0.70710678, 0. ]]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-.5 * c[0] / c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = np.hstack((1., 1. / np.sqrt(2. * np.arange(n - 1, 0, -1)))) + scl = np.multiply.accumulate(scl)[::-1] + top = mat.reshape(-1)[1::n + 1] + bot = mat.reshape(-1)[n::n + 1] + top[...] = np.sqrt(.5 * np.arange(1, n)) + bot[...] = top + mat[:, -1] -= scl * c[:-1] / (2.0 * c[-1]) + return mat + + +def hermroots(c): + """ + Compute the roots of a Hermite series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * H_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.legendre.legroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.chebyshev.chebroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The Hermite series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermroots, hermfromroots + >>> coef = hermfromroots([-1, 0, 1]) + >>> coef + array([0. , 0.25 , 0. , 0.125]) + >>> hermroots(coef) + array([-1.00000000e+00, -1.38777878e-17, 1.00000000e+00]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) <= 1: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-.5 * c[0] / c[1]]) + + # rotated companion matrix reduces error + m = hermcompanion(c)[::-1, ::-1] + r = la.eigvals(m) + r.sort() + return r + + +def _normed_hermite_n(x, n): + """ + Evaluate a normalized Hermite polynomial. + + Compute the value of the normalized Hermite polynomial of degree ``n`` + at the points ``x``. + + + Parameters + ---------- + x : ndarray of double. + Points at which to evaluate the function + n : int + Degree of the normalized Hermite function to be evaluated. + + Returns + ------- + values : ndarray + The shape of the return value is described above. + + Notes + ----- + This function is needed for finding the Gauss points and integration + weights for high degrees. The values of the standard Hermite functions + overflow when n >= 207. + + """ + if n == 0: + return np.full(x.shape, 1 / np.sqrt(np.sqrt(np.pi))) + + c0 = 0. + c1 = 1. / np.sqrt(np.sqrt(np.pi)) + nd = float(n) + for i in range(n - 1): + tmp = c0 + c0 = -c1 * np.sqrt((nd - 1.) / nd) + c1 = tmp + c1 * x * np.sqrt(2. / nd) + nd = nd - 1.0 + return c0 + c1 * x * np.sqrt(2) + + +def hermgauss(deg): + """ + Gauss-Hermite quadrature. + + Computes the sample points and weights for Gauss-Hermite quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]` + with the weight function :math:`f(x) = \\exp(-x^2)`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + The results have only been tested up to degree 100, higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (H'_n(x_k) * H_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`H_n`, and then scaling the results to get + the right value when integrating 1. + + Examples + -------- + >>> from numpy.polynomial.hermite import hermgauss + >>> hermgauss(2) + (array([-0.70710678, 0.70710678]), array([0.88622693, 0.88622693])) + + """ + ideg = pu._as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0] * deg + [1], dtype=np.float64) + m = hermcompanion(c) + x = la.eigvalsh(m) + + # improve roots by one application of Newton + dy = _normed_hermite_n(x, ideg) + df = _normed_hermite_n(x, ideg - 1) * np.sqrt(2 * ideg) + x -= dy / df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = _normed_hermite_n(x, ideg - 1) + fm /= np.abs(fm).max() + w = 1 / (fm * fm) + + # for Hermite we can also symmetrize + w = (w + w[::-1]) / 2 + x = (x - x[::-1]) / 2 + + # scale w to get the right value + w *= np.sqrt(np.pi) / w.sum() + + return x, w + + +def hermweight(x): + """ + Weight function of the Hermite polynomials. + + The weight function is :math:`\\exp(-x^2)` and the interval of + integration is :math:`[-\\inf, \\inf]`. the Hermite polynomials are + orthogonal, but not normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.hermite import hermweight + >>> x = np.arange(-2, 2) + >>> hermweight(x) + array([0.01831564, 0.36787944, 1. , 0.36787944]) + + """ + w = np.exp(-x**2) + return w + + +# +# Hermite series class +# + +class Hermite(ABCPolyBase): + """An Hermite series class. + + The Hermite class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed below. + + Parameters + ---------- + coef : array_like + Hermite coefficients in order of increasing degree, i.e, + ``(1, 2, 3)`` gives ``1*H_0(x) + 2*H_1(x) + 3*H_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1., 1.]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1., 1.]. + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(hermadd) + _sub = staticmethod(hermsub) + _mul = staticmethod(hermmul) + _div = staticmethod(hermdiv) + _pow = staticmethod(hermpow) + _val = staticmethod(hermval) + _int = staticmethod(hermint) + _der = staticmethod(hermder) + _fit = staticmethod(hermfit) + _line = staticmethod(hermline) + _roots = staticmethod(hermroots) + _fromroots = staticmethod(hermfromroots) + + # Virtual properties + domain = np.array(hermdomain) + window = np.array(hermdomain) + basis_name = 'H' diff --git a/python/numpy/polynomial/hermite.pyi b/python/numpy/polynomial/hermite.pyi new file mode 100644 index 000000000..f7d907c1b --- /dev/null +++ b/python/numpy/polynomial/hermite.pyi @@ -0,0 +1,107 @@ +from typing import Any, Final, TypeVar +from typing import Literal as L + +import numpy as np + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncValFromRoots, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, +) +from .polyutils import trimcoef as hermtrim + +__all__ = [ + "hermzero", + "hermone", + "hermx", + "hermdomain", + "hermline", + "hermadd", + "hermsub", + "hermmulx", + "hermmul", + "hermdiv", + "hermpow", + "hermval", + "hermder", + "hermint", + "herm2poly", + "poly2herm", + "hermfromroots", + "hermvander", + "hermfit", + "hermtrim", + "hermroots", + "Hermite", + "hermval2d", + "hermval3d", + "hermgrid2d", + "hermgrid3d", + "hermvander2d", + "hermvander3d", + "hermcompanion", + "hermgauss", + "hermweight", +] + +poly2herm: _FuncPoly2Ortho[L["poly2herm"]] +herm2poly: _FuncUnOp[L["herm2poly"]] + +hermdomain: Final[_Array2[np.float64]] +hermzero: Final[_Array1[np.int_]] +hermone: Final[_Array1[np.int_]] +hermx: Final[_Array2[np.int_]] + +hermline: _FuncLine[L["hermline"]] +hermfromroots: _FuncFromRoots[L["hermfromroots"]] +hermadd: _FuncBinOp[L["hermadd"]] +hermsub: _FuncBinOp[L["hermsub"]] +hermmulx: _FuncUnOp[L["hermmulx"]] +hermmul: _FuncBinOp[L["hermmul"]] +hermdiv: _FuncBinOp[L["hermdiv"]] +hermpow: _FuncPow[L["hermpow"]] +hermder: _FuncDer[L["hermder"]] +hermint: _FuncInteg[L["hermint"]] +hermval: _FuncVal[L["hermval"]] +hermval2d: _FuncVal2D[L["hermval2d"]] +hermval3d: _FuncVal3D[L["hermval3d"]] +hermvalfromroots: _FuncValFromRoots[L["hermvalfromroots"]] +hermgrid2d: _FuncVal2D[L["hermgrid2d"]] +hermgrid3d: _FuncVal3D[L["hermgrid3d"]] +hermvander: _FuncVander[L["hermvander"]] +hermvander2d: _FuncVander2D[L["hermvander2d"]] +hermvander3d: _FuncVander3D[L["hermvander3d"]] +hermfit: _FuncFit[L["hermfit"]] +hermcompanion: _FuncCompanion[L["hermcompanion"]] +hermroots: _FuncRoots[L["hermroots"]] + +_ND = TypeVar("_ND", bound=Any) +def _normed_hermite_n( + x: np.ndarray[_ND, np.dtype[np.float64]], + n: int | np.intp, +) -> np.ndarray[_ND, np.dtype[np.float64]]: ... + +hermgauss: _FuncGauss[L["hermgauss"]] +hermweight: _FuncWeight[L["hermweight"]] + +class Hermite(ABCPolyBase[L["H"]]): ... diff --git a/python/numpy/polynomial/hermite_e.py b/python/numpy/polynomial/hermite_e.py new file mode 100644 index 000000000..d30fc1b5a --- /dev/null +++ b/python/numpy/polynomial/hermite_e.py @@ -0,0 +1,1642 @@ +""" +=================================================================== +HermiteE Series, "Probabilists" (:mod:`numpy.polynomial.hermite_e`) +=================================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Hermite_e series, including a `HermiteE` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + HermiteE + +Constants +--------- +.. autosummary:: + :toctree: generated/ + + hermedomain + hermezero + hermeone + hermex + +Arithmetic +---------- +.. autosummary:: + :toctree: generated/ + + hermeadd + hermesub + hermemulx + hermemul + hermediv + hermepow + hermeval + hermeval2d + hermeval3d + hermegrid2d + hermegrid3d + +Calculus +-------- +.. autosummary:: + :toctree: generated/ + + hermeder + hermeint + +Misc Functions +-------------- +.. autosummary:: + :toctree: generated/ + + hermefromroots + hermeroots + hermevander + hermevander2d + hermevander3d + hermegauss + hermeweight + hermecompanion + hermefit + hermetrim + hermeline + herme2poly + poly2herme + +See also +-------- +`numpy.polynomial` + +""" +import numpy as np +import numpy.linalg as la +from numpy.lib.array_utils import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'hermezero', 'hermeone', 'hermex', 'hermedomain', 'hermeline', + 'hermeadd', 'hermesub', 'hermemulx', 'hermemul', 'hermediv', + 'hermepow', 'hermeval', 'hermeder', 'hermeint', 'herme2poly', + 'poly2herme', 'hermefromroots', 'hermevander', 'hermefit', 'hermetrim', + 'hermeroots', 'HermiteE', 'hermeval2d', 'hermeval3d', 'hermegrid2d', + 'hermegrid3d', 'hermevander2d', 'hermevander3d', 'hermecompanion', + 'hermegauss', 'hermeweight'] + +hermetrim = pu.trimcoef + + +def poly2herme(pol): + """ + poly2herme(pol) + + Convert a polynomial to a Hermite series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Hermite series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Hermite + series. + + See Also + -------- + herme2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.hermite_e import poly2herme + >>> poly2herme(np.arange(4)) + array([ 2., 10., 2., 3.]) + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = hermeadd(hermemulx(res), pol[i]) + return res + + +def herme2poly(c): + """ + Convert a Hermite series to a polynomial. + + Convert an array representing the coefficients of a Hermite series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Hermite series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2herme + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import herme2poly + >>> herme2poly([ 2., 10., 2., 3.]) + array([0., 1., 2., 3.]) + + """ + from .polynomial import polyadd, polymulx, polysub + + [c] = pu.as_series([c]) + n = len(c) + if n == 1: + return c + if n == 2: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], c1 * (i - 1)) + c1 = polyadd(tmp, polymulx(c1)) + return polyadd(c0, polymulx(c1)) + + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Hermite +hermedomain = np.array([-1., 1.]) + +# Hermite coefficients representing zero. +hermezero = np.array([0]) + +# Hermite coefficients representing one. +hermeone = np.array([1]) + +# Hermite coefficients representing the identity x. +hermex = np.array([0, 1]) + + +def hermeline(off, scl): + """ + Hermite series whose graph is a straight line. + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Hermite series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.chebyshev.chebline + numpy.polynomial.legendre.legline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite.hermline + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeline + >>> from numpy.polynomial.hermite_e import hermeline, hermeval + >>> hermeval(0,hermeline(3, 2)) + 3.0 + >>> hermeval(1,hermeline(3, 2)) + 5.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def hermefromroots(roots): + """ + Generate a HermiteE series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in HermiteE form, where the :math:`r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in HermiteE form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.chebyshev.chebfromroots + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermefromroots, hermeval + >>> coef = hermefromroots((-1, 0, 1)) + >>> hermeval((-1, 0, 1), coef) + array([0., 0., 0.]) + >>> coef = hermefromroots((-1j, 1j)) + >>> hermeval((-1j, 1j), coef) + array([0.+0.j, 0.+0.j]) + + """ + return pu._fromroots(hermeline, hermemul, roots) + + +def hermeadd(c1, c2): + """ + Add one Hermite series to another. + + Returns the sum of two Hermite series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Hermite series of their sum. + + See Also + -------- + hermesub, hermemulx, hermemul, hermediv, hermepow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Hermite series + is a Hermite series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeadd + >>> hermeadd([1, 2, 3], [1, 2, 3, 4]) + array([2., 4., 6., 4.]) + + """ + return pu._add(c1, c2) + + +def hermesub(c1, c2): + """ + Subtract one Hermite series from another. + + Returns the difference of two Hermite series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their difference. + + See Also + -------- + hermeadd, hermemulx, hermemul, hermediv, hermepow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Hermite + series is a Hermite series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermesub + >>> hermesub([1, 2, 3, 4], [1, 2, 3]) + array([0., 0., 0., 4.]) + + """ + return pu._sub(c1, c2) + + +def hermemulx(c): + """Multiply a Hermite series by x. + + Multiply the Hermite series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + hermeadd, hermesub, hermemul, hermediv, hermepow + + Notes + ----- + The multiplication uses the recursion relationship for Hermite + polynomials in the form + + .. math:: + + xP_i(x) = (P_{i + 1}(x) + iP_{i - 1}(x))) + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermemulx + >>> hermemulx([1, 2, 3]) + array([2., 7., 2., 3.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0] * 0 + prd[1] = c[0] + for i in range(1, len(c)): + prd[i + 1] = c[i] + prd[i - 1] += c[i] * i + return prd + + +def hermemul(c1, c2): + """ + Multiply one Hermite series by another. + + Returns the product of two Hermite series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Hermite series coefficients representing their product. + + See Also + -------- + hermeadd, hermesub, hermemulx, hermediv, hermepow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Hermite polynomial basis set. Thus, to express + the product as a Hermite series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermemul + >>> hermemul([1, 2, 3], [0, 1, 2]) + array([14., 15., 28., 7., 6.]) + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0] * xs + c1 = 0 + elif len(c) == 2: + c0 = c[0] * xs + c1 = c[1] * xs + else: + nd = len(c) + c0 = c[-2] * xs + c1 = c[-1] * xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = hermesub(c[-i] * xs, c1 * (nd - 1)) + c1 = hermeadd(tmp, hermemulx(c1)) + return hermeadd(c0, hermemulx(c1)) + + +def hermediv(c1, c2): + """ + Divide one Hermite series by another. + + Returns the quotient-with-remainder of two Hermite series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Hermite series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Hermite series coefficients representing the quotient and + remainder. + + See Also + -------- + hermeadd, hermesub, hermemulx, hermemul, hermepow + + Notes + ----- + In general, the (polynomial) division of one Hermite series by another + results in quotient and remainder terms that are not in the Hermite + polynomial basis set. Thus, to express these results as a Hermite + series, it is necessary to "reproject" the results onto the Hermite + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermediv + >>> hermediv([ 14., 15., 28., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([0.])) + >>> hermediv([ 15., 17., 28., 7., 6.], [0, 1, 2]) + (array([1., 2., 3.]), array([1., 2.])) + + """ + return pu._div(hermemul, c1, c2) + + +def hermepow(c, pow, maxpower=16): + """Raise a Hermite series to a power. + + Returns the Hermite series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Hermite series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Hermite series of power. + + See Also + -------- + hermeadd, hermesub, hermemulx, hermemul, hermediv + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermepow + >>> hermepow([1, 2, 3], 2) + array([23., 28., 46., 12., 9.]) + + """ + return pu._pow(hermemul, c, pow, maxpower) + + +def hermeder(c, m=1, scl=1, axis=0): + """ + Differentiate a Hermite_e series. + + Returns the series coefficients `c` differentiated `m` times along + `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*He_0 + 2*He_1 + 3*He_2`` + while [[1,2],[1,2]] represents ``1*He_0(x)*He_0(y) + 1*He_1(x)*He_0(y) + + 2*He_0(x)*He_1(y) + 2*He_1(x)*He_1(y)`` if axis=0 is ``x`` and axis=1 + is ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite_e series coefficients. If `c` is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + Returns + ------- + der : ndarray + Hermite series of the derivative. + + See Also + -------- + hermeint + + Notes + ----- + In general, the result of differentiating a Hermite series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeder + >>> hermeder([ 1., 1., 1., 1.]) + array([1., 2., 3.]) + >>> hermeder([-0.25, 1., 1./2., 1./3., 1./4 ], m=2) + array([1., 2., 3.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt = pu._as_int(m, "the order of derivation") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + return c[:1] * 0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 0, -1): + der[j - 1] = j * c[j] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Hermite_e series. + + Returns the Hermite_e series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]] + represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) + + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Hermite_e series coefficients. If c is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + Returns + ------- + S : ndarray + Hermite_e series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + hermeder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeint + >>> hermeint([1, 2, 3]) # integrate once, value 0 at 0. + array([1., 1., 1., 1.]) + >>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0 + array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ]) # may vary + >>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0. + array([2., 1., 1., 1.]) + >>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1 + array([-1., 1., 1., 1.]) + >>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1) + array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ]) # may vary + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._as_int(m, "the order of integration") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0] * (cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0] * 0 + tmp[1] = c[0] + for j in range(1, n): + tmp[j + 1] = c[j] / (j + 1) + tmp[0] += k[i] - hermeval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def hermeval(x, c, tensor=True): + """ + Evaluate an HermiteE series at points x. + + If `c` is of length ``n + 1``, this function returns the value: + + .. math:: p(x) = c_0 * He_0(x) + c_1 * He_1(x) + ... + c_n * He_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + hermeval2d, hermegrid2d, hermeval3d, hermegrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeval + >>> coef = [1,2,3] + >>> hermeval(1, coef) + 3.0 + >>> hermeval([[1,2],[3,4]], coef) + array([[ 3., 14.], + [31., 54.]]) + + """ + c = np.array(c, ndmin=1, copy=None) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,) * x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - c1 * (nd - 1) + c1 = tmp + c1 * x + return c0 + c1 * x + + +def hermeval2d(x, y, c): + """ + Evaluate a 2-D HermiteE series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * He_i(x) * He_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points ``(x, y)``, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + hermeval, hermegrid2d, hermeval3d, hermegrid3d + """ + return pu._valnd(hermeval, c, x, y) + + +def hermegrid2d(x, y, c): + """ + Evaluate a 2-D HermiteE series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * H_i(a) * H_j(b) + + where the points ``(a, b)`` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermeval, hermeval2d, hermeval3d, hermegrid3d + """ + return pu._gridnd(hermeval, c, x, y) + + +def hermeval3d(x, y, z, c): + """ + Evaluate a 3-D Hermite_e series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * He_i(x) * He_j(y) * He_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + `(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + hermeval, hermeval2d, hermegrid2d, hermegrid3d + """ + return pu._valnd(hermeval, c, x, y, z) + + +def hermegrid3d(x, y, z, c): + """ + Evaluate a 3-D HermiteE series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * He_i(a) * He_j(b) * He_k(c) + + where the points ``(a, b, c)`` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`, `y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + hermeval, hermeval2d, hermegrid2d, hermeval3d + """ + return pu._gridnd(hermeval, c, x, y, z) + + +def hermevander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = He_i(x), + + where ``0 <= i <= deg``. The leading indices of `V` index the elements of + `x` and the last index is the degree of the HermiteE polynomial. + + If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the + array ``V = hermevander(x, n)``, then ``np.dot(V, c)`` and + ``hermeval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of HermiteE series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding HermiteE polynomial. The dtype will be the same as + the converted `x`. + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.hermite_e import hermevander + >>> x = np.array([-1, 0, 1]) + >>> hermevander(x, 3) + array([[ 1., -1., 0., 2.], + [ 1., 0., -1., -0.], + [ 1., 1., 0., -2.]]) + + """ + ideg = pu._as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=None, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x * 0 + 1 + if ideg > 0: + v[1] = x + for i in range(2, ideg + 1): + v[i] = (v[i - 1] * x - v[i - 2] * (i - 1)) + return np.moveaxis(v, 0, -1) + + +def hermevander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y)``. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = He_i(x) * He_j(y), + + where ``0 <= i <= deg[0]`` and ``0 <= j <= deg[1]``. The leading indices of + `V` index the points ``(x, y)`` and the last index encodes the degrees of + the HermiteE polynomials. + + If ``V = hermevander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``hermeval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D HermiteE + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + hermevander, hermevander3d, hermeval2d, hermeval3d + """ + return pu._vander_nd_flat((hermevander, hermevander), (x, y), deg) + + +def hermevander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y, z)``. If `l`, `m`, `n` are the given degrees in `x`, `y`, `z`, + then Hehe pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = He_i(x)*He_j(y)*He_k(z), + + where ``0 <= i <= l``, ``0 <= j <= m``, and ``0 <= j <= n``. The leading + indices of `V` index the points ``(x, y, z)`` and the last index encodes + the degrees of the HermiteE polynomials. + + If ``V = hermevander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``hermeval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D HermiteE + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + hermevander, hermevander3d, hermeval2d, hermeval3d + """ + return pu._vander_nd_flat((hermevander, hermevander, hermevander), (x, y, z), deg) + + +def hermefit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Hermite series to data. + + Return the coefficients of a HermiteE series of degree `deg` that is + the least squares fit to the data values `y` given at points `x`. If + `y` is 1-D the returned coefficients will also be 1-D. If `y` is 2-D + multiple fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Hermite coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column k of `y` are in column + `k`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full = False``. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.exceptions.RankWarning) + + See Also + -------- + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.legendre.legfit + numpy.polynomial.polynomial.polyfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.laguerre.lagfit + hermeval : Evaluates a Hermite series. + hermevander : pseudo Vandermonde matrix of Hermite series. + hermeweight : HermiteE weight function. + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the HermiteE series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the pseudo Vandermonde matrix of `x`, the elements of `c` + are the coefficients to be solved for, and the elements of `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `~exceptions.RankWarning` will be issued. This means that + the coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using HermiteE series are probably most useful when the data can + be approximated by ``sqrt(w(x)) * p(x)``, where ``w(x)`` is the HermiteE + weight. In that case the weight ``sqrt(w(x[i]))`` should be used + together with data values ``y[i]/sqrt(w(x[i]))``. The weight function is + available as `hermeweight`. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.hermite_e import hermefit, hermeval + >>> x = np.linspace(-10, 10) + >>> rng = np.random.default_rng() + >>> err = rng.normal(scale=1./10, size=len(x)) + >>> y = hermeval(x, [1, 2, 3]) + err + >>> hermefit(x, y, 2) + array([1.02284196, 2.00032805, 2.99978457]) # may vary + + """ + return pu._fit(hermevander, x, y, deg, rcond, full, w) + + +def hermecompanion(c): + """ + Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is an HermiteE basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of HermiteE series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0] / c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = np.hstack((1., 1. / np.sqrt(np.arange(n - 1, 0, -1)))) + scl = np.multiply.accumulate(scl)[::-1] + top = mat.reshape(-1)[1::n + 1] + bot = mat.reshape(-1)[n::n + 1] + top[...] = np.sqrt(np.arange(1, n)) + bot[...] = top + mat[:, -1] -= scl * c[:-1] / c[-1] + return mat + + +def hermeroots(c): + """ + Compute the roots of a HermiteE series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * He_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.legendre.legroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.chebyshev.chebroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The HermiteE series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> from numpy.polynomial.hermite_e import hermeroots, hermefromroots + >>> coef = hermefromroots([-1, 0, 1]) + >>> coef + array([0., 2., 0., 1.]) + >>> hermeroots(coef) + array([-1., 0., 1.]) # may vary + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) <= 1: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0] / c[1]]) + + # rotated companion matrix reduces error + m = hermecompanion(c)[::-1, ::-1] + r = la.eigvals(m) + r.sort() + return r + + +def _normed_hermite_e_n(x, n): + """ + Evaluate a normalized HermiteE polynomial. + + Compute the value of the normalized HermiteE polynomial of degree ``n`` + at the points ``x``. + + + Parameters + ---------- + x : ndarray of double. + Points at which to evaluate the function + n : int + Degree of the normalized HermiteE function to be evaluated. + + Returns + ------- + values : ndarray + The shape of the return value is described above. + + Notes + ----- + This function is needed for finding the Gauss points and integration + weights for high degrees. The values of the standard HermiteE functions + overflow when n >= 207. + + """ + if n == 0: + return np.full(x.shape, 1 / np.sqrt(np.sqrt(2 * np.pi))) + + c0 = 0. + c1 = 1. / np.sqrt(np.sqrt(2 * np.pi)) + nd = float(n) + for i in range(n - 1): + tmp = c0 + c0 = -c1 * np.sqrt((nd - 1.) / nd) + c1 = tmp + c1 * x * np.sqrt(1. / nd) + nd = nd - 1.0 + return c0 + c1 * x + + +def hermegauss(deg): + """ + Gauss-HermiteE quadrature. + + Computes the sample points and weights for Gauss-HermiteE quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]` + with the weight function :math:`f(x) = \\exp(-x^2/2)`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + The results have only been tested up to degree 100, higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (He'_n(x_k) * He_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`He_n`, and then scaling the results to get + the right value when integrating 1. + + """ + ideg = pu._as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0] * deg + [1]) + m = hermecompanion(c) + x = la.eigvalsh(m) + + # improve roots by one application of Newton + dy = _normed_hermite_e_n(x, ideg) + df = _normed_hermite_e_n(x, ideg - 1) * np.sqrt(ideg) + x -= dy / df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = _normed_hermite_e_n(x, ideg - 1) + fm /= np.abs(fm).max() + w = 1 / (fm * fm) + + # for Hermite_e we can also symmetrize + w = (w + w[::-1]) / 2 + x = (x - x[::-1]) / 2 + + # scale w to get the right value + w *= np.sqrt(2 * np.pi) / w.sum() + + return x, w + + +def hermeweight(x): + """Weight function of the Hermite_e polynomials. + + The weight function is :math:`\\exp(-x^2/2)` and the interval of + integration is :math:`[-\\inf, \\inf]`. the HermiteE polynomials are + orthogonal, but not normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + """ + w = np.exp(-.5 * x**2) + return w + + +# +# HermiteE series class +# + +class HermiteE(ABCPolyBase): + """An HermiteE series class. + + The HermiteE class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed below. + + Parameters + ---------- + coef : array_like + HermiteE coefficients in order of increasing degree, i.e, + ``(1, 2, 3)`` gives ``1*He_0(x) + 2*He_1(X) + 3*He_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1., 1.]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1., 1.]. + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(hermeadd) + _sub = staticmethod(hermesub) + _mul = staticmethod(hermemul) + _div = staticmethod(hermediv) + _pow = staticmethod(hermepow) + _val = staticmethod(hermeval) + _int = staticmethod(hermeint) + _der = staticmethod(hermeder) + _fit = staticmethod(hermefit) + _line = staticmethod(hermeline) + _roots = staticmethod(hermeroots) + _fromroots = staticmethod(hermefromroots) + + # Virtual properties + domain = np.array(hermedomain) + window = np.array(hermedomain) + basis_name = 'He' diff --git a/python/numpy/polynomial/hermite_e.pyi b/python/numpy/polynomial/hermite_e.pyi new file mode 100644 index 000000000..e8013e66b --- /dev/null +++ b/python/numpy/polynomial/hermite_e.pyi @@ -0,0 +1,107 @@ +from typing import Any, Final, TypeVar +from typing import Literal as L + +import numpy as np + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncValFromRoots, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, +) +from .polyutils import trimcoef as hermetrim + +__all__ = [ + "hermezero", + "hermeone", + "hermex", + "hermedomain", + "hermeline", + "hermeadd", + "hermesub", + "hermemulx", + "hermemul", + "hermediv", + "hermepow", + "hermeval", + "hermeder", + "hermeint", + "herme2poly", + "poly2herme", + "hermefromroots", + "hermevander", + "hermefit", + "hermetrim", + "hermeroots", + "HermiteE", + "hermeval2d", + "hermeval3d", + "hermegrid2d", + "hermegrid3d", + "hermevander2d", + "hermevander3d", + "hermecompanion", + "hermegauss", + "hermeweight", +] + +poly2herme: _FuncPoly2Ortho[L["poly2herme"]] +herme2poly: _FuncUnOp[L["herme2poly"]] + +hermedomain: Final[_Array2[np.float64]] +hermezero: Final[_Array1[np.int_]] +hermeone: Final[_Array1[np.int_]] +hermex: Final[_Array2[np.int_]] + +hermeline: _FuncLine[L["hermeline"]] +hermefromroots: _FuncFromRoots[L["hermefromroots"]] +hermeadd: _FuncBinOp[L["hermeadd"]] +hermesub: _FuncBinOp[L["hermesub"]] +hermemulx: _FuncUnOp[L["hermemulx"]] +hermemul: _FuncBinOp[L["hermemul"]] +hermediv: _FuncBinOp[L["hermediv"]] +hermepow: _FuncPow[L["hermepow"]] +hermeder: _FuncDer[L["hermeder"]] +hermeint: _FuncInteg[L["hermeint"]] +hermeval: _FuncVal[L["hermeval"]] +hermeval2d: _FuncVal2D[L["hermeval2d"]] +hermeval3d: _FuncVal3D[L["hermeval3d"]] +hermevalfromroots: _FuncValFromRoots[L["hermevalfromroots"]] +hermegrid2d: _FuncVal2D[L["hermegrid2d"]] +hermegrid3d: _FuncVal3D[L["hermegrid3d"]] +hermevander: _FuncVander[L["hermevander"]] +hermevander2d: _FuncVander2D[L["hermevander2d"]] +hermevander3d: _FuncVander3D[L["hermevander3d"]] +hermefit: _FuncFit[L["hermefit"]] +hermecompanion: _FuncCompanion[L["hermecompanion"]] +hermeroots: _FuncRoots[L["hermeroots"]] + +_ND = TypeVar("_ND", bound=Any) +def _normed_hermite_e_n( + x: np.ndarray[_ND, np.dtype[np.float64]], + n: int | np.intp, +) -> np.ndarray[_ND, np.dtype[np.float64]]: ... + +hermegauss: _FuncGauss[L["hermegauss"]] +hermeweight: _FuncWeight[L["hermeweight"]] + +class HermiteE(ABCPolyBase[L["He"]]): ... diff --git a/python/numpy/polynomial/laguerre.py b/python/numpy/polynomial/laguerre.py new file mode 100644 index 000000000..38eb5a80b --- /dev/null +++ b/python/numpy/polynomial/laguerre.py @@ -0,0 +1,1675 @@ +""" +================================================== +Laguerre Series (:mod:`numpy.polynomial.laguerre`) +================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Laguerre series, including a `Laguerre` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + Laguerre + +Constants +--------- +.. autosummary:: + :toctree: generated/ + + lagdomain + lagzero + lagone + lagx + +Arithmetic +---------- +.. autosummary:: + :toctree: generated/ + + lagadd + lagsub + lagmulx + lagmul + lagdiv + lagpow + lagval + lagval2d + lagval3d + laggrid2d + laggrid3d + +Calculus +-------- +.. autosummary:: + :toctree: generated/ + + lagder + lagint + +Misc Functions +-------------- +.. autosummary:: + :toctree: generated/ + + lagfromroots + lagroots + lagvander + lagvander2d + lagvander3d + laggauss + lagweight + lagcompanion + lagfit + lagtrim + lagline + lag2poly + poly2lag + +See also +-------- +`numpy.polynomial` + +""" +import numpy as np +import numpy.linalg as la +from numpy.lib.array_utils import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'lagzero', 'lagone', 'lagx', 'lagdomain', 'lagline', 'lagadd', + 'lagsub', 'lagmulx', 'lagmul', 'lagdiv', 'lagpow', 'lagval', 'lagder', + 'lagint', 'lag2poly', 'poly2lag', 'lagfromroots', 'lagvander', + 'lagfit', 'lagtrim', 'lagroots', 'Laguerre', 'lagval2d', 'lagval3d', + 'laggrid2d', 'laggrid3d', 'lagvander2d', 'lagvander3d', 'lagcompanion', + 'laggauss', 'lagweight'] + +lagtrim = pu.trimcoef + + +def poly2lag(pol): + """ + poly2lag(pol) + + Convert a polynomial to a Laguerre series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Laguerre series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Laguerre + series. + + See Also + -------- + lag2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.laguerre import poly2lag + >>> poly2lag(np.arange(4)) + array([ 23., -63., 58., -18.]) + + """ + [pol] = pu.as_series([pol]) + res = 0 + for p in pol[::-1]: + res = lagadd(lagmulx(res), p) + return res + + +def lag2poly(c): + """ + Convert a Laguerre series to a polynomial. + + Convert an array representing the coefficients of a Laguerre series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Laguerre series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2lag + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lag2poly + >>> lag2poly([ 23., -63., 58., -18.]) + array([0., 1., 2., 3.]) + + """ + from .polynomial import polyadd, polymulx, polysub + + [c] = pu.as_series([c]) + n = len(c) + if n == 1: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], (c1 * (i - 1)) / i) + c1 = polyadd(tmp, polysub((2 * i - 1) * c1, polymulx(c1)) / i) + return polyadd(c0, polysub(c1, polymulx(c1))) + + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Laguerre +lagdomain = np.array([0., 1.]) + +# Laguerre coefficients representing zero. +lagzero = np.array([0]) + +# Laguerre coefficients representing one. +lagone = np.array([1]) + +# Laguerre coefficients representing the identity x. +lagx = np.array([1, -1]) + + +def lagline(off, scl): + """ + Laguerre series whose graph is a straight line. + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Laguerre series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.chebyshev.chebline + numpy.polynomial.legendre.legline + numpy.polynomial.hermite.hermline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagline, lagval + >>> lagval(0,lagline(3, 2)) + 3.0 + >>> lagval(1,lagline(3, 2)) + 5.0 + + """ + if scl != 0: + return np.array([off + scl, -scl]) + else: + return np.array([off]) + + +def lagfromroots(roots): + """ + Generate a Laguerre series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Laguerre form, where the :math:`r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Laguerre form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.chebyshev.chebfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.hermite_e.hermefromroots + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagfromroots, lagval + >>> coef = lagfromroots((-1, 0, 1)) + >>> lagval((-1, 0, 1), coef) + array([0., 0., 0.]) + >>> coef = lagfromroots((-1j, 1j)) + >>> lagval((-1j, 1j), coef) + array([0.+0.j, 0.+0.j]) + + """ + return pu._fromroots(lagline, lagmul, roots) + + +def lagadd(c1, c2): + """ + Add one Laguerre series to another. + + Returns the sum of two Laguerre series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Laguerre series of their sum. + + See Also + -------- + lagsub, lagmulx, lagmul, lagdiv, lagpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Laguerre series + is a Laguerre series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagadd + >>> lagadd([1, 2, 3], [1, 2, 3, 4]) + array([2., 4., 6., 4.]) + + """ + return pu._add(c1, c2) + + +def lagsub(c1, c2): + """ + Subtract one Laguerre series from another. + + Returns the difference of two Laguerre series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Laguerre series coefficients representing their difference. + + See Also + -------- + lagadd, lagmulx, lagmul, lagdiv, lagpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Laguerre + series is a Laguerre series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagsub + >>> lagsub([1, 2, 3, 4], [1, 2, 3]) + array([0., 0., 0., 4.]) + + """ + return pu._sub(c1, c2) + + +def lagmulx(c): + """Multiply a Laguerre series by x. + + Multiply the Laguerre series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + lagadd, lagsub, lagmul, lagdiv, lagpow + + Notes + ----- + The multiplication uses the recursion relationship for Laguerre + polynomials in the form + + .. math:: + + xP_i(x) = (-(i + 1)*P_{i + 1}(x) + (2i + 1)P_{i}(x) - iP_{i - 1}(x)) + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagmulx + >>> lagmulx([1, 2, 3]) + array([-1., -1., 11., -9.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0] + prd[1] = -c[0] + for i in range(1, len(c)): + prd[i + 1] = -c[i] * (i + 1) + prd[i] += c[i] * (2 * i + 1) + prd[i - 1] -= c[i] * i + return prd + + +def lagmul(c1, c2): + """ + Multiply one Laguerre series by another. + + Returns the product of two Laguerre series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Laguerre series coefficients representing their product. + + See Also + -------- + lagadd, lagsub, lagmulx, lagdiv, lagpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Laguerre polynomial basis set. Thus, to express + the product as a Laguerre series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagmul + >>> lagmul([1, 2, 3], [0, 1, 2]) + array([ 8., -13., 38., -51., 36.]) + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0] * xs + c1 = 0 + elif len(c) == 2: + c0 = c[0] * xs + c1 = c[1] * xs + else: + nd = len(c) + c0 = c[-2] * xs + c1 = c[-1] * xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = lagsub(c[-i] * xs, (c1 * (nd - 1)) / nd) + c1 = lagadd(tmp, lagsub((2 * nd - 1) * c1, lagmulx(c1)) / nd) + return lagadd(c0, lagsub(c1, lagmulx(c1))) + + +def lagdiv(c1, c2): + """ + Divide one Laguerre series by another. + + Returns the quotient-with-remainder of two Laguerre series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Laguerre series coefficients ordered from low to + high. + + Returns + ------- + [quo, rem] : ndarrays + Of Laguerre series coefficients representing the quotient and + remainder. + + See Also + -------- + lagadd, lagsub, lagmulx, lagmul, lagpow + + Notes + ----- + In general, the (polynomial) division of one Laguerre series by another + results in quotient and remainder terms that are not in the Laguerre + polynomial basis set. Thus, to express these results as a Laguerre + series, it is necessary to "reproject" the results onto the Laguerre + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagdiv + >>> lagdiv([ 8., -13., 38., -51., 36.], [0, 1, 2]) + (array([1., 2., 3.]), array([0.])) + >>> lagdiv([ 9., -12., 38., -51., 36.], [0, 1, 2]) + (array([1., 2., 3.]), array([1., 1.])) + + """ + return pu._div(lagmul, c1, c2) + + +def lagpow(c, pow, maxpower=16): + """Raise a Laguerre series to a power. + + Returns the Laguerre series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Laguerre series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Laguerre series of power. + + See Also + -------- + lagadd, lagsub, lagmulx, lagmul, lagdiv + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagpow + >>> lagpow([1, 2, 3], 2) + array([ 14., -16., 56., -72., 54.]) + + """ + return pu._pow(lagmul, c, pow, maxpower) + + +def lagder(c, m=1, scl=1, axis=0): + """ + Differentiate a Laguerre series. + + Returns the Laguerre series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2`` + while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + + 2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Laguerre series coefficients. If `c` is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + Returns + ------- + der : ndarray + Laguerre series of the derivative. + + See Also + -------- + lagint + + Notes + ----- + In general, the result of differentiating a Laguerre series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagder + >>> lagder([ 1., 1., 1., -3.]) + array([1., 2., 3.]) + >>> lagder([ 1., 0., 0., -4., 3.], m=2) + array([1., 2., 3.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + + cnt = pu._as_int(m, "the order of derivation") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1] * 0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 1, -1): + der[j - 1] = -c[j] + c[j - 1] += c[j] + der[0] = -c[1] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Laguerre series. + + Returns the Laguerre series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]] + represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) + + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + + Parameters + ---------- + c : array_like + Array of Laguerre series coefficients. If `c` is multidimensional + the different axis correspond to different variables with the + degree in each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + Returns + ------- + S : ndarray + Laguerre series coefficients of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + lagder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagint + >>> lagint([1,2,3]) + array([ 1., 1., 1., -3.]) + >>> lagint([1,2,3], m=2) + array([ 1., 0., 0., -4., 3.]) + >>> lagint([1,2,3], k=1) + array([ 2., 1., 1., -3.]) + >>> lagint([1,2,3], lbnd=-1) + array([11.5, 1. , 1. , -3. ]) + >>> lagint([1,2], m=2, k=[1,2], lbnd=-1) + array([ 11.16666667, -5. , -3. , 2. ]) # may vary + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._as_int(m, "the order of integration") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0] * (cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0] + tmp[1] = -c[0] + for j in range(1, n): + tmp[j] += c[j] + tmp[j + 1] = -c[j] + tmp[0] += k[i] - lagval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def lagval(x, c, tensor=True): + """ + Evaluate a Laguerre series at points x. + + If `c` is of length ``n + 1``, this function returns the value: + + .. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + lagval2d, laggrid2d, lagval3d, laggrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagval + >>> coef = [1, 2, 3] + >>> lagval(1, coef) + -0.5 + >>> lagval([[1, 2],[3, 4]], coef) + array([[-0.5, -4. ], + [-4.5, -2. ]]) + + """ + c = np.array(c, ndmin=1, copy=None) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,) * x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - (c1 * (nd - 1)) / nd + c1 = tmp + (c1 * ((2 * nd - 1) - x)) / nd + return c0 + c1 * (1 - x) + + +def lagval2d(x, y, c): + """ + Evaluate a 2-D Laguerre series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points ``(x, y)``, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + lagval, laggrid2d, lagval3d, laggrid3d + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagval2d + >>> c = [[1, 2],[3, 4]] + >>> lagval2d(1, 1, c) + 1.0 + """ + return pu._valnd(lagval, c, x, y) + + +def laggrid2d(x, y, c): + """ + Evaluate a 2-D Laguerre series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b) + + where the points ``(a, b)`` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j is contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points in the + Cartesian product of `x` and `y`. + + See Also + -------- + lagval, lagval2d, lagval3d, laggrid3d + + Examples + -------- + >>> from numpy.polynomial.laguerre import laggrid2d + >>> c = [[1, 2], [3, 4]] + >>> laggrid2d([0, 1], [0, 1], c) + array([[10., 4.], + [ 3., 1.]]) + + """ + return pu._gridnd(lagval, c, x, y) + + +def lagval3d(x, y, z, c): + """ + Evaluate a 3-D Laguerre series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + ``(x, y, z)``, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + lagval, lagval2d, laggrid2d, laggrid3d + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagval3d + >>> c = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] + >>> lagval3d(1, 1, 2, c) + -1.0 + + """ + return pu._valnd(lagval, c, x, y, z) + + +def laggrid3d(x, y, z, c): + """ + Evaluate a 3-D Laguerre series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c) + + where the points ``(a, b, c)`` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`, `y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + lagval, lagval2d, laggrid2d, lagval3d + + Examples + -------- + >>> from numpy.polynomial.laguerre import laggrid3d + >>> c = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] + >>> laggrid3d([0, 1], [0, 1], [2, 4], c) + array([[[ -4., -44.], + [ -2., -18.]], + [[ -2., -14.], + [ -1., -5.]]]) + + """ + return pu._gridnd(lagval, c, x, y, z) + + +def lagvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = L_i(x) + + where ``0 <= i <= deg``. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Laguerre polynomial. + + If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the + array ``V = lagvander(x, n)``, then ``np.dot(V, c)`` and + ``lagval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Laguerre series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Laguerre polynomial. The dtype will be the same as + the converted `x`. + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.laguerre import lagvander + >>> x = np.array([0, 1, 2]) + >>> lagvander(x, 3) + array([[ 1. , 1. , 1. , 1. ], + [ 1. , 0. , -0.5 , -0.66666667], + [ 1. , -1. , -1. , -0.33333333]]) + + """ + ideg = pu._as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=None, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x * 0 + 1 + if ideg > 0: + v[1] = 1 - x + for i in range(2, ideg + 1): + v[i] = (v[i - 1] * (2 * i - 1 - x) - v[i - 2] * (i - 1)) / i + return np.moveaxis(v, 0, -1) + + +def lagvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y)``. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = L_i(x) * L_j(y), + + where ``0 <= i <= deg[0]`` and ``0 <= j <= deg[1]``. The leading indices of + `V` index the points ``(x, y)`` and the last index encodes the degrees of + the Laguerre polynomials. + + If ``V = lagvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``lagval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Laguerre + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + lagvander, lagvander3d, lagval2d, lagval3d + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.laguerre import lagvander2d + >>> x = np.array([0]) + >>> y = np.array([2]) + >>> lagvander2d(x, y, [2, 1]) + array([[ 1., -1., 1., -1., 1., -1.]]) + + """ + return pu._vander_nd_flat((lagvander, lagvander), (x, y), deg) + + +def lagvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y, z)``. If `l`, `m`, `n` are the given degrees in `x`, `y`, `z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z), + + where ``0 <= i <= l``, ``0 <= j <= m``, and ``0 <= j <= n``. The leading + indices of `V` index the points ``(x, y, z)`` and the last index encodes + the degrees of the Laguerre polynomials. + + If ``V = lagvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``lagval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Laguerre + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + lagvander, lagvander3d, lagval2d, lagval3d + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.laguerre import lagvander3d + >>> x = np.array([0]) + >>> y = np.array([2]) + >>> z = np.array([0]) + >>> lagvander3d(x, y, z, [2, 1, 3]) + array([[ 1., 1., 1., 1., -1., -1., -1., -1., 1., 1., 1., 1., -1., + -1., -1., -1., 1., 1., 1., 1., -1., -1., -1., -1.]]) + + """ + return pu._vander_nd_flat((lagvander, lagvander, lagvander), (x, y, z), deg) + + +def lagfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Laguerre series to data. + + Return the coefficients of a Laguerre series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x), + + where ``n`` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Laguerre coefficients ordered from low to high. If `y` was 2-D, + the coefficients for the data in column *k* of `y` are in column + *k*. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full == False``. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.exceptions.RankWarning) + + See Also + -------- + numpy.polynomial.polynomial.polyfit + numpy.polynomial.legendre.legfit + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.hermite_e.hermefit + lagval : Evaluates a Laguerre series. + lagvander : pseudo Vandermonde matrix of Laguerre series. + lagweight : Laguerre weight function. + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Laguerre series ``p`` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up as the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where ``V`` is the weighted pseudo Vandermonde matrix of `x`, ``c`` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of ``V``. + + If some of the singular values of `V` are so small that they are + neglected, then a `~exceptions.RankWarning` will be issued. This means that + the coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Laguerre series are probably most useful when the data can + be approximated by ``sqrt(w(x)) * p(x)``, where ``w(x)`` is the Laguerre + weight. In that case the weight ``sqrt(w(x[i]))`` should be used + together with data values ``y[i]/sqrt(w(x[i]))``. The weight function is + available as `lagweight`. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.laguerre import lagfit, lagval + >>> x = np.linspace(0, 10) + >>> rng = np.random.default_rng() + >>> err = rng.normal(scale=1./10, size=len(x)) + >>> y = lagval(x, [1, 2, 3]) + err + >>> lagfit(x, y, 2) + array([1.00578369, 1.99417356, 2.99827656]) # may vary + + """ + return pu._fit(lagvander, x, y, deg, rcond, full, w) + + +def lagcompanion(c): + """ + Return the companion matrix of c. + + The usual companion matrix of the Laguerre polynomials is already + symmetric when `c` is a basis Laguerre polynomial, so no scaling is + applied. + + Parameters + ---------- + c : array_like + 1-D array of Laguerre series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Companion matrix of dimensions (deg, deg). + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagcompanion + >>> lagcompanion([1, 2, 3]) + array([[ 1. , -0.33333333], + [-1. , 4.33333333]]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[1 + c[0] / c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + top = mat.reshape(-1)[1::n + 1] + mid = mat.reshape(-1)[0::n + 1] + bot = mat.reshape(-1)[n::n + 1] + top[...] = -np.arange(1, n) + mid[...] = 2. * np.arange(n) + 1. + bot[...] = top + mat[:, -1] += (c[:-1] / c[-1]) * n + return mat + + +def lagroots(c): + """ + Compute the roots of a Laguerre series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * L_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.legendre.legroots + numpy.polynomial.chebyshev.chebroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + The Laguerre series basis polynomials aren't powers of `x` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagroots, lagfromroots + >>> coef = lagfromroots([0, 1, 2]) + >>> coef + array([ 2., -8., 12., -6.]) + >>> lagroots(coef) + array([-4.4408921e-16, 1.0000000e+00, 2.0000000e+00]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) <= 1: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([1 + c[0] / c[1]]) + + # rotated companion matrix reduces error + m = lagcompanion(c)[::-1, ::-1] + r = la.eigvals(m) + r.sort() + return r + + +def laggauss(deg): + """ + Gauss-Laguerre quadrature. + + Computes the sample points and weights for Gauss-Laguerre quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[0, \\inf]` + with the weight function :math:`f(x) = \\exp(-x)`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + The results have only been tested up to degree 100 higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`L_n`, and then scaling the results to get + the right value when integrating 1. + + Examples + -------- + >>> from numpy.polynomial.laguerre import laggauss + >>> laggauss(2) + (array([0.58578644, 3.41421356]), array([0.85355339, 0.14644661])) + + """ + ideg = pu._as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0] * deg + [1]) + m = lagcompanion(c) + x = la.eigvalsh(m) + + # improve roots by one application of Newton + dy = lagval(x, c) + df = lagval(x, lagder(c)) + x -= dy / df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = lagval(x, c[1:]) + fm /= np.abs(fm).max() + df /= np.abs(df).max() + w = 1 / (fm * df) + + # scale w to get the right value, 1 in this case + w /= w.sum() + + return x, w + + +def lagweight(x): + """Weight function of the Laguerre polynomials. + + The weight function is :math:`exp(-x)` and the interval of integration + is :math:`[0, \\inf]`. The Laguerre polynomials are orthogonal, but not + normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + + Examples + -------- + >>> from numpy.polynomial.laguerre import lagweight + >>> x = np.array([0, 1, 2]) + >>> lagweight(x) + array([1. , 0.36787944, 0.13533528]) + + """ + w = np.exp(-x) + return w + +# +# Laguerre series class +# + +class Laguerre(ABCPolyBase): + """A Laguerre series class. + + The Laguerre class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed below. + + Parameters + ---------- + coef : array_like + Laguerre coefficients in order of increasing degree, i.e, + ``(1, 2, 3)`` gives ``1*L_0(x) + 2*L_1(X) + 3*L_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [0., 1.]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [0., 1.]. + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(lagadd) + _sub = staticmethod(lagsub) + _mul = staticmethod(lagmul) + _div = staticmethod(lagdiv) + _pow = staticmethod(lagpow) + _val = staticmethod(lagval) + _int = staticmethod(lagint) + _der = staticmethod(lagder) + _fit = staticmethod(lagfit) + _line = staticmethod(lagline) + _roots = staticmethod(lagroots) + _fromroots = staticmethod(lagfromroots) + + # Virtual properties + domain = np.array(lagdomain) + window = np.array(lagdomain) + basis_name = 'L' diff --git a/python/numpy/polynomial/laguerre.pyi b/python/numpy/polynomial/laguerre.pyi new file mode 100644 index 000000000..6f67257a6 --- /dev/null +++ b/python/numpy/polynomial/laguerre.pyi @@ -0,0 +1,100 @@ +from typing import Final +from typing import Literal as L + +import numpy as np + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncValFromRoots, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, +) +from .polyutils import trimcoef as lagtrim + +__all__ = [ + "lagzero", + "lagone", + "lagx", + "lagdomain", + "lagline", + "lagadd", + "lagsub", + "lagmulx", + "lagmul", + "lagdiv", + "lagpow", + "lagval", + "lagder", + "lagint", + "lag2poly", + "poly2lag", + "lagfromroots", + "lagvander", + "lagfit", + "lagtrim", + "lagroots", + "Laguerre", + "lagval2d", + "lagval3d", + "laggrid2d", + "laggrid3d", + "lagvander2d", + "lagvander3d", + "lagcompanion", + "laggauss", + "lagweight", +] + +poly2lag: _FuncPoly2Ortho[L["poly2lag"]] +lag2poly: _FuncUnOp[L["lag2poly"]] + +lagdomain: Final[_Array2[np.float64]] +lagzero: Final[_Array1[np.int_]] +lagone: Final[_Array1[np.int_]] +lagx: Final[_Array2[np.int_]] + +lagline: _FuncLine[L["lagline"]] +lagfromroots: _FuncFromRoots[L["lagfromroots"]] +lagadd: _FuncBinOp[L["lagadd"]] +lagsub: _FuncBinOp[L["lagsub"]] +lagmulx: _FuncUnOp[L["lagmulx"]] +lagmul: _FuncBinOp[L["lagmul"]] +lagdiv: _FuncBinOp[L["lagdiv"]] +lagpow: _FuncPow[L["lagpow"]] +lagder: _FuncDer[L["lagder"]] +lagint: _FuncInteg[L["lagint"]] +lagval: _FuncVal[L["lagval"]] +lagval2d: _FuncVal2D[L["lagval2d"]] +lagval3d: _FuncVal3D[L["lagval3d"]] +lagvalfromroots: _FuncValFromRoots[L["lagvalfromroots"]] +laggrid2d: _FuncVal2D[L["laggrid2d"]] +laggrid3d: _FuncVal3D[L["laggrid3d"]] +lagvander: _FuncVander[L["lagvander"]] +lagvander2d: _FuncVander2D[L["lagvander2d"]] +lagvander3d: _FuncVander3D[L["lagvander3d"]] +lagfit: _FuncFit[L["lagfit"]] +lagcompanion: _FuncCompanion[L["lagcompanion"]] +lagroots: _FuncRoots[L["lagroots"]] +laggauss: _FuncGauss[L["laggauss"]] +lagweight: _FuncWeight[L["lagweight"]] + +class Laguerre(ABCPolyBase[L["L"]]): ... diff --git a/python/numpy/polynomial/legendre.py b/python/numpy/polynomial/legendre.py new file mode 100644 index 000000000..b43bdfa83 --- /dev/null +++ b/python/numpy/polynomial/legendre.py @@ -0,0 +1,1605 @@ +""" +================================================== +Legendre Series (:mod:`numpy.polynomial.legendre`) +================================================== + +This module provides a number of objects (mostly functions) useful for +dealing with Legendre series, including a `Legendre` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with such polynomials is in the +docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + Legendre + +Constants +--------- + +.. autosummary:: + :toctree: generated/ + + legdomain + legzero + legone + legx + +Arithmetic +---------- + +.. autosummary:: + :toctree: generated/ + + legadd + legsub + legmulx + legmul + legdiv + legpow + legval + legval2d + legval3d + leggrid2d + leggrid3d + +Calculus +-------- + +.. autosummary:: + :toctree: generated/ + + legder + legint + +Misc Functions +-------------- + +.. autosummary:: + :toctree: generated/ + + legfromroots + legroots + legvander + legvander2d + legvander3d + leggauss + legweight + legcompanion + legfit + legtrim + legline + leg2poly + poly2leg + +See also +-------- +numpy.polynomial + +""" +import numpy as np +import numpy.linalg as la +from numpy.lib.array_utils import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +__all__ = [ + 'legzero', 'legone', 'legx', 'legdomain', 'legline', 'legadd', + 'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval', 'legder', + 'legint', 'leg2poly', 'poly2leg', 'legfromroots', 'legvander', + 'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', 'legval3d', + 'leggrid2d', 'leggrid3d', 'legvander2d', 'legvander3d', 'legcompanion', + 'leggauss', 'legweight'] + +legtrim = pu.trimcoef + + +def poly2leg(pol): + """ + Convert a polynomial to a Legendre series. + + Convert an array representing the coefficients of a polynomial (relative + to the "standard" basis) ordered from lowest degree to highest, to an + array of the coefficients of the equivalent Legendre series, ordered + from lowest to highest degree. + + Parameters + ---------- + pol : array_like + 1-D array containing the polynomial coefficients + + Returns + ------- + c : ndarray + 1-D array containing the coefficients of the equivalent Legendre + series. + + See Also + -------- + leg2poly + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> import numpy as np + >>> from numpy import polynomial as P + >>> p = P.Polynomial(np.arange(4)) + >>> p + Polynomial([0., 1., 2., 3.], domain=[-1., 1.], window=[-1., 1.], ... + >>> c = P.Legendre(P.legendre.poly2leg(p.coef)) + >>> c + Legendre([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1]) # may vary + + """ + [pol] = pu.as_series([pol]) + deg = len(pol) - 1 + res = 0 + for i in range(deg, -1, -1): + res = legadd(legmulx(res), pol[i]) + return res + + +def leg2poly(c): + """ + Convert a Legendre series to a polynomial. + + Convert an array representing the coefficients of a Legendre series, + ordered from lowest degree to highest, to an array of the coefficients + of the equivalent polynomial (relative to the "standard" basis) ordered + from lowest to highest degree. + + Parameters + ---------- + c : array_like + 1-D array containing the Legendre series coefficients, ordered + from lowest order term to highest. + + Returns + ------- + pol : ndarray + 1-D array containing the coefficients of the equivalent polynomial + (relative to the "standard" basis) ordered from lowest order term + to highest. + + See Also + -------- + poly2leg + + Notes + ----- + The easy way to do conversions between polynomial basis sets + is to use the convert method of a class instance. + + Examples + -------- + >>> from numpy import polynomial as P + >>> c = P.Legendre(range(4)) + >>> c + Legendre([0., 1., 2., 3.], domain=[-1., 1.], window=[-1., 1.], symbol='x') + >>> p = c.convert(kind=P.Polynomial) + >>> p + Polynomial([-1. , -3.5, 3. , 7.5], domain=[-1., 1.], window=[-1., ... + >>> P.legendre.leg2poly(range(4)) + array([-1. , -3.5, 3. , 7.5]) + + + """ + from .polynomial import polyadd, polymulx, polysub + + [c] = pu.as_series([c]) + n = len(c) + if n < 3: + return c + else: + c0 = c[-2] + c1 = c[-1] + # i is the current degree of c1 + for i in range(n - 1, 1, -1): + tmp = c0 + c0 = polysub(c[i - 2], (c1 * (i - 1)) / i) + c1 = polyadd(tmp, (polymulx(c1) * (2 * i - 1)) / i) + return polyadd(c0, polymulx(c1)) + + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Legendre +legdomain = np.array([-1., 1.]) + +# Legendre coefficients representing zero. +legzero = np.array([0]) + +# Legendre coefficients representing one. +legone = np.array([1]) + +# Legendre coefficients representing the identity x. +legx = np.array([0, 1]) + + +def legline(off, scl): + """ + Legendre series whose graph is a straight line. + + + + Parameters + ---------- + off, scl : scalars + The specified line is given by ``off + scl*x``. + + Returns + ------- + y : ndarray + This module's representation of the Legendre series for + ``off + scl*x``. + + See Also + -------- + numpy.polynomial.polynomial.polyline + numpy.polynomial.chebyshev.chebline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite.hermline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> import numpy.polynomial.legendre as L + >>> L.legline(3,2) + array([3, 2]) + >>> L.legval(-3, L.legline(3,2)) # should be -3 + -3.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def legfromroots(roots): + """ + Generate a Legendre series with given roots. + + The function returns the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + in Legendre form, where the :math:`r_n` are the roots specified in `roots`. + If a zero has multiplicity n, then it must appear in `roots` n times. + For instance, if 2 is a root of multiplicity three and 3 is a root of + multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The + roots can appear in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x) + + The coefficient of the last term is not generally 1 for monic + polynomials in Legendre form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of coefficients. If all roots are real then `out` is a + real array, if some of the roots are complex, then `out` is complex + even if all the coefficients in the result are real (see Examples + below). + + See Also + -------- + numpy.polynomial.polynomial.polyfromroots + numpy.polynomial.chebyshev.chebfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.hermite_e.hermefromroots + + Examples + -------- + >>> import numpy.polynomial.legendre as L + >>> L.legfromroots((-1,0,1)) # x^3 - x relative to the standard basis + array([ 0. , -0.4, 0. , 0.4]) + >>> j = complex(0,1) + >>> L.legfromroots((-j,j)) # x^2 + 1 relative to the standard basis + array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j]) # may vary + + """ + return pu._fromroots(legline, legmul, roots) + + +def legadd(c1, c2): + """ + Add one Legendre series to another. + + Returns the sum of two Legendre series `c1` + `c2`. The arguments + are sequences of coefficients ordered from lowest order term to + highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the Legendre series of their sum. + + See Also + -------- + legsub, legmulx, legmul, legdiv, legpow + + Notes + ----- + Unlike multiplication, division, etc., the sum of two Legendre series + is a Legendre series (without having to "reproject" the result onto + the basis set) so addition, just like that of "standard" polynomials, + is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> L.legadd(c1,c2) + array([4., 4., 4.]) + + """ + return pu._add(c1, c2) + + +def legsub(c1, c2): + """ + Subtract one Legendre series from another. + + Returns the difference of two Legendre series `c1` - `c2`. The + sequences of coefficients are from lowest order term to highest, i.e., + [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Legendre series coefficients representing their difference. + + See Also + -------- + legadd, legmulx, legmul, legdiv, legpow + + Notes + ----- + Unlike multiplication, division, etc., the difference of two Legendre + series is a Legendre series (without having to "reproject" the result + onto the basis set) so subtraction, just like that of "standard" + polynomials, is simply "component-wise." + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> L.legsub(c1,c2) + array([-2., 0., 2.]) + >>> L.legsub(c2,c1) # -C.legsub(c1,c2) + array([ 2., 0., -2.]) + + """ + return pu._sub(c1, c2) + + +def legmulx(c): + """Multiply a Legendre series by x. + + Multiply the Legendre series `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + legadd, legsub, legmul, legdiv, legpow + + Notes + ----- + The multiplication uses the recursion relationship for Legendre + polynomials in the form + + .. math:: + + xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1) + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> L.legmulx([1,2,3]) + array([ 0.66666667, 2.2, 1.33333333, 1.8]) # may vary + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0] * 0 + prd[1] = c[0] + for i in range(1, len(c)): + j = i + 1 + k = i - 1 + s = i + j + prd[j] = (c[i] * j) / s + prd[k] += (c[i] * i) / s + return prd + + +def legmul(c1, c2): + """ + Multiply one Legendre series by another. + + Returns the product of two Legendre series `c1` * `c2`. The arguments + are sequences of coefficients, from lowest order "term" to highest, + e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of Legendre series coefficients representing their product. + + See Also + -------- + legadd, legsub, legmulx, legdiv, legpow + + Notes + ----- + In general, the (polynomial) product of two C-series results in terms + that are not in the Legendre polynomial basis set. Thus, to express + the product as a Legendre series, it is necessary to "reproject" the + product onto said basis set, which may produce "unintuitive" (but + correct) results; see Examples section below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2) + >>> L.legmul(c1,c2) # multiplication requires "reprojection" + array([ 4.33333333, 10.4 , 11.66666667, 3.6 ]) # may vary + + """ + # s1, s2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + + if len(c1) > len(c2): + c = c2 + xs = c1 + else: + c = c1 + xs = c2 + + if len(c) == 1: + c0 = c[0] * xs + c1 = 0 + elif len(c) == 2: + c0 = c[0] * xs + c1 = c[1] * xs + else: + nd = len(c) + c0 = c[-2] * xs + c1 = c[-1] * xs + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = legsub(c[-i] * xs, (c1 * (nd - 1)) / nd) + c1 = legadd(tmp, (legmulx(c1) * (2 * nd - 1)) / nd) + return legadd(c0, legmulx(c1)) + + +def legdiv(c1, c2): + """ + Divide one Legendre series by another. + + Returns the quotient-with-remainder of two Legendre series + `c1` / `c2`. The arguments are sequences of coefficients from lowest + order "term" to highest, e.g., [1,2,3] represents the series + ``P_0 + 2*P_1 + 3*P_2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of Legendre series coefficients ordered from low to + high. + + Returns + ------- + quo, rem : ndarrays + Of Legendre series coefficients representing the quotient and + remainder. + + See Also + -------- + legadd, legsub, legmulx, legmul, legpow + + Notes + ----- + In general, the (polynomial) division of one Legendre series by another + results in quotient and remainder terms that are not in the Legendre + polynomial basis set. Thus, to express these results as a Legendre + series, it is necessary to "reproject" the results onto the Legendre + basis set, which may produce "unintuitive" (but correct) results; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c1 = (1,2,3) + >>> c2 = (3,2,1) + >>> L.legdiv(c1,c2) # quotient "intuitive," remainder not + (array([3.]), array([-8., -4.])) + >>> c2 = (0,1,2,3) + >>> L.legdiv(c2,c1) # neither "intuitive" + (array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852])) # may vary + + """ + return pu._div(legmul, c1, c2) + + +def legpow(c, pow, maxpower=16): + """Raise a Legendre series to a power. + + Returns the Legendre series `c` raised to the power `pow`. The + argument `c` is a sequence of coefficients ordered from low to high. + i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.`` + + Parameters + ---------- + c : array_like + 1-D array of Legendre series coefficients ordered from low to + high. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Legendre series of power. + + See Also + -------- + legadd, legsub, legmulx, legmul, legdiv + + """ + return pu._pow(legmul, c, pow, maxpower) + + +def legder(c, m=1, scl=1, axis=0): + """ + Differentiate a Legendre series. + + Returns the Legendre series coefficients `c` differentiated `m` times + along `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The argument + `c` is an array of coefficients from low to high degree along each + axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2`` + while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + + 2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + Array of Legendre series coefficients. If c is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change of + variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + Returns + ------- + der : ndarray + Legendre series of the derivative. + + See Also + -------- + legint + + Notes + ----- + In general, the result of differentiating a Legendre series does not + resemble the same operation on a power series. Thus the result of this + function may be "unintuitive," albeit correct; see Examples section + below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c = (1,2,3,4) + >>> L.legder(c) + array([ 6., 9., 20.]) + >>> L.legder(c, 3) + array([60.]) + >>> L.legder(c, scl=-1) + array([ -6., -9., -20.]) + >>> L.legder(c, 2,-1) + array([ 9., 60.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + cnt = pu._as_int(m, "the order of derivation") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1] * 0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=c.dtype) + for j in range(n, 2, -1): + der[j - 1] = (2 * j - 1) * c[j] + c[j - 2] += c[j] + if n > 1: + der[1] = 3 * c[2] + der[0] = c[1] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a Legendre series. + + Returns the Legendre series coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients from low to high degree along each axis, e.g., [1,2,3] + represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]] + represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) + + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of Legendre series coefficients. If c is multidimensional the + different axis correspond to different variables with the degree in + each axis given by the corresponding index. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at + ``lbnd`` is the first value in the list, the value of the second + integral at ``lbnd`` is the second value, etc. If ``k == []`` (the + default), all constants are set to zero. If ``m == 1``, a single + scalar can be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + Returns + ------- + S : ndarray + Legendre series coefficient array of the integral. + + Raises + ------ + ValueError + If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + legder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. + Why is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Also note that, in general, the result of integrating a C-series needs + to be "reprojected" onto the C-series basis set. Thus, typically, + the result of this function is "unintuitive," albeit correct; see + Examples section below. + + Examples + -------- + >>> from numpy.polynomial import legendre as L + >>> c = (1,2,3) + >>> L.legint(c) + array([ 0.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary + >>> L.legint(c, 3) + array([ 1.66666667e-02, -1.78571429e-02, 4.76190476e-02, # may vary + -1.73472348e-18, 1.90476190e-02, 9.52380952e-03]) + >>> L.legint(c, k=3) + array([ 3.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary + >>> L.legint(c, lbnd=-2) + array([ 7.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary + >>> L.legint(c, scl=2) + array([ 0.66666667, 0.8 , 1.33333333, 1.2 ]) # may vary + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if not np.iterable(k): + k = [k] + cnt = pu._as_int(m, "the order of integration") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + k = list(k) + [0] * (cnt - len(k)) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype) + tmp[0] = c[0] * 0 + tmp[1] = c[0] + if n > 1: + tmp[2] = c[1] / 3 + for j in range(2, n): + t = c[j] / (2 * j + 1) + tmp[j + 1] = t + tmp[j - 1] -= t + tmp[0] += k[i] - legval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def legval(x, c, tensor=True): + """ + Evaluate a Legendre series at points x. + + If `c` is of length ``n + 1``, this function returns the value: + + .. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + Returns + ------- + values : ndarray, algebra_like + The shape of the return value is described above. + + See Also + -------- + legval2d, leggrid2d, legval3d, leggrid3d + + Notes + ----- + The evaluation uses Clenshaw recursion, aka synthetic division. + + """ + c = np.array(c, ndmin=1, copy=None) + if c.dtype.char in '?bBhHiIlLqQpP': + c = c.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,) * x.ndim) + + if len(c) == 1: + c0 = c[0] + c1 = 0 + elif len(c) == 2: + c0 = c[0] + c1 = c[1] + else: + nd = len(c) + c0 = c[-2] + c1 = c[-1] + for i in range(3, len(c) + 1): + tmp = c0 + nd = nd - 1 + c0 = c[-i] - c1 * ((nd - 1) / nd) + c1 = tmp + c1 * x * ((2 * nd - 1) / nd) + return c0 + c1 * x + + +def legval2d(x, y, c): + """ + Evaluate a 2-D Legendre series at points (x, y). + + This function returns the values: + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y) + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` is a 1-D array a one is implicitly appended to its shape to make + it 2-D. The shape of the result will be c.shape[2:] + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points ``(x, y)``, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and if it isn't an ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Legendre series at points formed + from pairs of corresponding values from `x` and `y`. + + See Also + -------- + legval, leggrid2d, legval3d, leggrid3d + """ + return pu._valnd(legval, c, x, y) + + +def leggrid2d(x, y, c): + """ + Evaluate a 2-D Legendre series on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b) + + where the points ``(a, b)`` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j is contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional Chebyshev series at points in the + Cartesian product of `x` and `y`. + + See Also + -------- + legval, legval2d, legval3d, leggrid3d + """ + return pu._gridnd(legval, c, x, y) + + +def legval3d(x, y, z, c): + """ + Evaluate a 3-D Legendre series at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z) + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + ``(x, y, z)``, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + legval, legval2d, leggrid2d, leggrid3d + """ + return pu._valnd(legval, c, x, y, z) + + +def leggrid3d(x, y, z, c): + """ + Evaluate a 3-D Legendre series on the Cartesian product of x, y, and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c) + + where the points ``(a, b, c)`` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`, `y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + legval, legval2d, leggrid2d, legval3d + """ + return pu._gridnd(legval, c, x, y, z) + + +def legvander(x, deg): + """Pseudo-Vandermonde matrix of given degree. + + Returns the pseudo-Vandermonde matrix of degree `deg` and sample points + `x`. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., i] = L_i(x) + + where ``0 <= i <= deg``. The leading indices of `V` index the elements of + `x` and the last index is the degree of the Legendre polynomial. + + If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the + array ``V = legvander(x, n)``, then ``np.dot(V, c)`` and + ``legval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of Legendre series of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray + The pseudo-Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where The last index is the degree of the + corresponding Legendre polynomial. The dtype will be the same as + the converted `x`. + + """ + ideg = pu._as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=None, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + # Use forward recursion to generate the entries. This is not as accurate + # as reverse recursion in this application but it is more efficient. + v[0] = x * 0 + 1 + if ideg > 0: + v[1] = x + for i in range(2, ideg + 1): + v[i] = (v[i - 1] * x * (2 * i - 1) - v[i - 2] * (i - 1)) / i + return np.moveaxis(v, 0, -1) + + +def legvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y)``. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = L_i(x) * L_j(y), + + where ``0 <= i <= deg[0]`` and ``0 <= j <= deg[1]``. The leading indices of + `V` index the points ``(x, y)`` and the last index encodes the degrees of + the Legendre polynomials. + + If ``V = legvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``legval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D Legendre + series of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + legvander, legvander3d, legval2d, legval3d + """ + return pu._vander_nd_flat((legvander, legvander), (x, y), deg) + + +def legvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y, z)``. If `l`, `m`, `n` are the given degrees in `x`, `y`, `z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z), + + where ``0 <= i <= l``, ``0 <= j <= m``, and ``0 <= j <= n``. The leading + indices of `V` index the points ``(x, y, z)`` and the last index encodes + the degrees of the Legendre polynomials. + + If ``V = legvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``legval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D Legendre + series of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + legvander, legvander3d, legval2d, legval3d + """ + return pu._vander_nd_flat((legvander, legvander, legvander), (x, y, z), deg) + + +def legfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least squares fit of Legendre series to data. + + Return the coefficients of a Legendre series of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x), + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (M,) + x-coordinates of the M sample points ``(x[i], y[i])``. + y : array_like, shape (M,) or (M, K) + y-coordinates of the sample points. Several data sets of sample + points sharing the same x-coordinates can be fitted at once by + passing in a 2D-array that contains one dataset per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller than + this relative to the largest singular value will be ignored. The + default value is len(x)*eps, where eps is the relative precision of + the float type, about 2e-16 in most cases. + full : bool, optional + Switch determining nature of return value. When it is False (the + default) just the coefficients are returned, when True diagnostic + information from the singular value decomposition is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + Returns + ------- + coef : ndarray, shape (M,) or (M, K) + Legendre coefficients ordered from low to high. If `y` was + 2-D, the coefficients for the data in column k of `y` are in + column `k`. If `deg` is specified as a list, coefficients for + terms not included in the fit are set equal to zero in the + returned `coef`. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Warns + ----- + RankWarning + The rank of the coefficient matrix in the least-squares fit is + deficient. The warning is only raised if ``full == False``. The + warnings can be turned off by + + >>> import warnings + >>> warnings.simplefilter('ignore', np.exceptions.RankWarning) + + See Also + -------- + numpy.polynomial.polynomial.polyfit + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.laguerre.lagfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.hermite_e.hermefit + legval : Evaluates a Legendre series. + legvander : Vandermonde matrix of Legendre series. + legweight : Legendre weight function (= 1). + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the Legendre series `p` that + minimizes the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where :math:`w_j` are the weights. This problem is solved by setting up + as the (typically) overdetermined matrix equation + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected, then a `~exceptions.RankWarning` will be issued. This means that + the coefficient values may be poorly determined. Using a lower order fit + will usually get rid of the warning. The `rcond` parameter can also be + set to a value smaller than its default, but the resulting fit may be + spurious and have large contributions from roundoff error. + + Fits using Legendre series are usually better conditioned than fits + using power series, but much can depend on the distribution of the + sample points and the smoothness of the data. If the quality of the fit + is inadequate splines may be a good alternative. + + References + ---------- + .. [1] Wikipedia, "Curve fitting", + https://en.wikipedia.org/wiki/Curve_fitting + + Examples + -------- + + """ + return pu._fit(legvander, x, y, deg, rcond, full, w) + + +def legcompanion(c): + """Return the scaled companion matrix of c. + + The basis polynomials are scaled so that the companion matrix is + symmetric when `c` is an Legendre basis polynomial. This provides + better eigenvalue estimates than the unscaled case and for basis + polynomials the eigenvalues are guaranteed to be real if + `numpy.linalg.eigvalsh` is used to obtain them. + + Parameters + ---------- + c : array_like + 1-D array of Legendre series coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Scaled companion matrix of dimensions (deg, deg). + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0] / c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + scl = 1. / np.sqrt(2 * np.arange(n) + 1) + top = mat.reshape(-1)[1::n + 1] + bot = mat.reshape(-1)[n::n + 1] + top[...] = np.arange(1, n) * scl[:n - 1] * scl[1:n] + bot[...] = top + mat[:, -1] -= (c[:-1] / c[-1]) * (scl / scl[-1]) * (n / (2 * n - 1)) + return mat + + +def legroots(c): + """ + Compute the roots of a Legendre series. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * L_i(x). + + Parameters + ---------- + c : 1-D array_like + 1-D array of coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the series. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.polynomial.polyroots + numpy.polynomial.chebyshev.chebroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the series for such values. + Roots with multiplicity greater than 1 will also show larger errors as + the value of the series near such points is relatively insensitive to + errors in the roots. Isolated roots near the origin can be improved by + a few iterations of Newton's method. + + The Legendre series basis polynomials aren't powers of ``x`` so the + results of this function may seem unintuitive. + + Examples + -------- + >>> import numpy.polynomial.legendre as leg + >>> leg.legroots((1, 2, 3, 4)) # 4L_3 + 3L_2 + 2L_1 + 1L_0, all real roots + array([-0.85099543, -0.11407192, 0.51506735]) # may vary + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0] / c[1]]) + + # rotated companion matrix reduces error + m = legcompanion(c)[::-1, ::-1] + r = la.eigvals(m) + r.sort() + return r + + +def leggauss(deg): + """ + Gauss-Legendre quadrature. + + Computes the sample points and weights for Gauss-Legendre quadrature. + These sample points and weights will correctly integrate polynomials of + degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with + the weight function :math:`f(x) = 1`. + + Parameters + ---------- + deg : int + Number of sample points and weights. It must be >= 1. + + Returns + ------- + x : ndarray + 1-D ndarray containing the sample points. + y : ndarray + 1-D ndarray containing the weights. + + Notes + ----- + The results have only been tested up to degree 100, higher degrees may + be problematic. The weights are determined by using the fact that + + .. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k)) + + where :math:`c` is a constant independent of :math:`k` and :math:`x_k` + is the k'th root of :math:`L_n`, and then scaling the results to get + the right value when integrating 1. + + """ + ideg = pu._as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") + + # first approximation of roots. We use the fact that the companion + # matrix is symmetric in this case in order to obtain better zeros. + c = np.array([0] * deg + [1]) + m = legcompanion(c) + x = la.eigvalsh(m) + + # improve roots by one application of Newton + dy = legval(x, c) + df = legval(x, legder(c)) + x -= dy / df + + # compute the weights. We scale the factor to avoid possible numerical + # overflow. + fm = legval(x, c[1:]) + fm /= np.abs(fm).max() + df /= np.abs(df).max() + w = 1 / (fm * df) + + # for Legendre we can also symmetrize + w = (w + w[::-1]) / 2 + x = (x - x[::-1]) / 2 + + # scale w to get the right value + w *= 2. / w.sum() + + return x, w + + +def legweight(x): + """ + Weight function of the Legendre polynomials. + + The weight function is :math:`1` and the interval of integration is + :math:`[-1, 1]`. The Legendre polynomials are orthogonal, but not + normalized, with respect to this weight function. + + Parameters + ---------- + x : array_like + Values at which the weight function will be computed. + + Returns + ------- + w : ndarray + The weight function at `x`. + """ + w = x * 0.0 + 1.0 + return w + +# +# Legendre series class +# + +class Legendre(ABCPolyBase): + """A Legendre series class. + + The Legendre class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed below. + + Parameters + ---------- + coef : array_like + Legendre coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1., 1.]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1., 1.]. + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(legadd) + _sub = staticmethod(legsub) + _mul = staticmethod(legmul) + _div = staticmethod(legdiv) + _pow = staticmethod(legpow) + _val = staticmethod(legval) + _int = staticmethod(legint) + _der = staticmethod(legder) + _fit = staticmethod(legfit) + _line = staticmethod(legline) + _roots = staticmethod(legroots) + _fromroots = staticmethod(legfromroots) + + # Virtual properties + domain = np.array(legdomain) + window = np.array(legdomain) + basis_name = 'P' diff --git a/python/numpy/polynomial/legendre.pyi b/python/numpy/polynomial/legendre.pyi new file mode 100644 index 000000000..35ea2ffd2 --- /dev/null +++ b/python/numpy/polynomial/legendre.pyi @@ -0,0 +1,100 @@ +from typing import Final +from typing import Literal as L + +import numpy as np + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncValFromRoots, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, +) +from .polyutils import trimcoef as legtrim + +__all__ = [ + "legzero", + "legone", + "legx", + "legdomain", + "legline", + "legadd", + "legsub", + "legmulx", + "legmul", + "legdiv", + "legpow", + "legval", + "legder", + "legint", + "leg2poly", + "poly2leg", + "legfromroots", + "legvander", + "legfit", + "legtrim", + "legroots", + "Legendre", + "legval2d", + "legval3d", + "leggrid2d", + "leggrid3d", + "legvander2d", + "legvander3d", + "legcompanion", + "leggauss", + "legweight", +] + +poly2leg: _FuncPoly2Ortho[L["poly2leg"]] +leg2poly: _FuncUnOp[L["leg2poly"]] + +legdomain: Final[_Array2[np.float64]] +legzero: Final[_Array1[np.int_]] +legone: Final[_Array1[np.int_]] +legx: Final[_Array2[np.int_]] + +legline: _FuncLine[L["legline"]] +legfromroots: _FuncFromRoots[L["legfromroots"]] +legadd: _FuncBinOp[L["legadd"]] +legsub: _FuncBinOp[L["legsub"]] +legmulx: _FuncUnOp[L["legmulx"]] +legmul: _FuncBinOp[L["legmul"]] +legdiv: _FuncBinOp[L["legdiv"]] +legpow: _FuncPow[L["legpow"]] +legder: _FuncDer[L["legder"]] +legint: _FuncInteg[L["legint"]] +legval: _FuncVal[L["legval"]] +legval2d: _FuncVal2D[L["legval2d"]] +legval3d: _FuncVal3D[L["legval3d"]] +legvalfromroots: _FuncValFromRoots[L["legvalfromroots"]] +leggrid2d: _FuncVal2D[L["leggrid2d"]] +leggrid3d: _FuncVal3D[L["leggrid3d"]] +legvander: _FuncVander[L["legvander"]] +legvander2d: _FuncVander2D[L["legvander2d"]] +legvander3d: _FuncVander3D[L["legvander3d"]] +legfit: _FuncFit[L["legfit"]] +legcompanion: _FuncCompanion[L["legcompanion"]] +legroots: _FuncRoots[L["legroots"]] +leggauss: _FuncGauss[L["leggauss"]] +legweight: _FuncWeight[L["legweight"]] + +class Legendre(ABCPolyBase[L["P"]]): ... diff --git a/python/numpy/polynomial/polynomial.py b/python/numpy/polynomial/polynomial.py new file mode 100644 index 000000000..32b53b757 --- /dev/null +++ b/python/numpy/polynomial/polynomial.py @@ -0,0 +1,1616 @@ +""" +================================================= +Power Series (:mod:`numpy.polynomial.polynomial`) +================================================= + +This module provides a number of objects (mostly functions) useful for +dealing with polynomials, including a `Polynomial` class that +encapsulates the usual arithmetic operations. (General information +on how this module represents and works with polynomial objects is in +the docstring for its "parent" sub-package, `numpy.polynomial`). + +Classes +------- +.. autosummary:: + :toctree: generated/ + + Polynomial + +Constants +--------- +.. autosummary:: + :toctree: generated/ + + polydomain + polyzero + polyone + polyx + +Arithmetic +---------- +.. autosummary:: + :toctree: generated/ + + polyadd + polysub + polymulx + polymul + polydiv + polypow + polyval + polyval2d + polyval3d + polygrid2d + polygrid3d + +Calculus +-------- +.. autosummary:: + :toctree: generated/ + + polyder + polyint + +Misc Functions +-------------- +.. autosummary:: + :toctree: generated/ + + polyfromroots + polyroots + polyvalfromroots + polyvander + polyvander2d + polyvander3d + polycompanion + polyfit + polytrim + polyline + +See Also +-------- +`numpy.polynomial` + +""" +__all__ = [ + 'polyzero', 'polyone', 'polyx', 'polydomain', 'polyline', 'polyadd', + 'polysub', 'polymulx', 'polymul', 'polydiv', 'polypow', 'polyval', + 'polyvalfromroots', 'polyder', 'polyint', 'polyfromroots', 'polyvander', + 'polyfit', 'polytrim', 'polyroots', 'Polynomial', 'polyval2d', 'polyval3d', + 'polygrid2d', 'polygrid3d', 'polyvander2d', 'polyvander3d', + 'polycompanion'] + +import numpy as np +import numpy.linalg as la +from numpy.lib.array_utils import normalize_axis_index + +from . import polyutils as pu +from ._polybase import ABCPolyBase + +polytrim = pu.trimcoef + +# +# These are constant arrays are of integer type so as to be compatible +# with the widest range of other types, such as Decimal. +# + +# Polynomial default domain. +polydomain = np.array([-1., 1.]) + +# Polynomial coefficients representing zero. +polyzero = np.array([0]) + +# Polynomial coefficients representing one. +polyone = np.array([1]) + +# Polynomial coefficients representing the identity x. +polyx = np.array([0, 1]) + +# +# Polynomial series functions +# + + +def polyline(off, scl): + """ + Returns an array representing a linear polynomial. + + Parameters + ---------- + off, scl : scalars + The "y-intercept" and "slope" of the line, respectively. + + Returns + ------- + y : ndarray + This module's representation of the linear polynomial ``off + + scl*x``. + + See Also + -------- + numpy.polynomial.chebyshev.chebline + numpy.polynomial.legendre.legline + numpy.polynomial.laguerre.lagline + numpy.polynomial.hermite.hermline + numpy.polynomial.hermite_e.hermeline + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> P.polyline(1, -1) + array([ 1, -1]) + >>> P.polyval(1, P.polyline(1, -1)) # should be 0 + 0.0 + + """ + if scl != 0: + return np.array([off, scl]) + else: + return np.array([off]) + + +def polyfromroots(roots): + """ + Generate a monic polynomial with given roots. + + Return the coefficients of the polynomial + + .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), + + where the :math:`r_n` are the roots specified in `roots`. If a zero has + multiplicity n, then it must appear in `roots` n times. For instance, + if 2 is a root of multiplicity three and 3 is a root of multiplicity 2, + then `roots` looks something like [2, 2, 2, 3, 3]. The roots can appear + in any order. + + If the returned coefficients are `c`, then + + .. math:: p(x) = c_0 + c_1 * x + ... + x^n + + The coefficient of the last term is 1 for monic polynomials in this + form. + + Parameters + ---------- + roots : array_like + Sequence containing the roots. + + Returns + ------- + out : ndarray + 1-D array of the polynomial's coefficients If all the roots are + real, then `out` is also real, otherwise it is complex. (see + Examples below). + + See Also + -------- + numpy.polynomial.chebyshev.chebfromroots + numpy.polynomial.legendre.legfromroots + numpy.polynomial.laguerre.lagfromroots + numpy.polynomial.hermite.hermfromroots + numpy.polynomial.hermite_e.hermefromroots + + Notes + ----- + The coefficients are determined by multiplying together linear factors + of the form ``(x - r_i)``, i.e. + + .. math:: p(x) = (x - r_0) (x - r_1) ... (x - r_n) + + where ``n == len(roots) - 1``; note that this implies that ``1`` is always + returned for :math:`a_n`. + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> P.polyfromroots((-1,0,1)) # x(x - 1)(x + 1) = x^3 - x + array([ 0., -1., 0., 1.]) + >>> j = complex(0,1) + >>> P.polyfromroots((-j,j)) # complex returned, though values are real + array([1.+0.j, 0.+0.j, 1.+0.j]) + + """ + return pu._fromroots(polyline, polymul, roots) + + +def polyadd(c1, c2): + """ + Add one polynomial to another. + + Returns the sum of two polynomials `c1` + `c2`. The arguments are + sequences of coefficients from lowest order term to highest, i.e., + [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of polynomial coefficients ordered from low to high. + + Returns + ------- + out : ndarray + The coefficient array representing their sum. + + See Also + -------- + polysub, polymulx, polymul, polydiv, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1, 2, 3) + >>> c2 = (3, 2, 1) + >>> sum = P.polyadd(c1,c2); sum + array([4., 4., 4.]) + >>> P.polyval(2, sum) # 4 + 4(2) + 4(2**2) + 28.0 + + """ + return pu._add(c1, c2) + + +def polysub(c1, c2): + """ + Subtract one polynomial from another. + + Returns the difference of two polynomials `c1` - `c2`. The arguments + are sequences of coefficients from lowest order term to highest, i.e., + [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of polynomial coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Of coefficients representing their difference. + + See Also + -------- + polyadd, polymulx, polymul, polydiv, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1, 2, 3) + >>> c2 = (3, 2, 1) + >>> P.polysub(c1,c2) + array([-2., 0., 2.]) + >>> P.polysub(c2, c1) # -P.polysub(c1,c2) + array([ 2., 0., -2.]) + + """ + return pu._sub(c1, c2) + + +def polymulx(c): + """Multiply a polynomial by x. + + Multiply the polynomial `c` by x, where x is the independent + variable. + + + Parameters + ---------- + c : array_like + 1-D array of polynomial coefficients ordered from low to + high. + + Returns + ------- + out : ndarray + Array representing the result of the multiplication. + + See Also + -------- + polyadd, polysub, polymul, polydiv, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = (1, 2, 3) + >>> P.polymulx(c) + array([0., 1., 2., 3.]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + # The zero series needs special treatment + if len(c) == 1 and c[0] == 0: + return c + + prd = np.empty(len(c) + 1, dtype=c.dtype) + prd[0] = c[0] * 0 + prd[1:] = c + return prd + + +def polymul(c1, c2): + """ + Multiply one polynomial by another. + + Returns the product of two polynomials `c1` * `c2`. The arguments are + sequences of coefficients, from lowest order term to highest, e.g., + [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2.`` + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of coefficients representing a polynomial, relative to the + "standard" basis, and ordered from lowest order term to highest. + + Returns + ------- + out : ndarray + Of the coefficients of their product. + + See Also + -------- + polyadd, polysub, polymulx, polydiv, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1, 2, 3) + >>> c2 = (3, 2, 1) + >>> P.polymul(c1, c2) + array([ 3., 8., 14., 8., 3.]) + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + ret = np.convolve(c1, c2) + return pu.trimseq(ret) + + +def polydiv(c1, c2): + """ + Divide one polynomial by another. + + Returns the quotient-with-remainder of two polynomials `c1` / `c2`. + The arguments are sequences of coefficients, from lowest order term + to highest, e.g., [1,2,3] represents ``1 + 2*x + 3*x**2``. + + Parameters + ---------- + c1, c2 : array_like + 1-D arrays of polynomial coefficients ordered from low to high. + + Returns + ------- + [quo, rem] : ndarrays + Of coefficient series representing the quotient and remainder. + + See Also + -------- + polyadd, polysub, polymulx, polymul, polypow + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c1 = (1, 2, 3) + >>> c2 = (3, 2, 1) + >>> P.polydiv(c1, c2) + (array([3.]), array([-8., -4.])) + >>> P.polydiv(c2, c1) + (array([ 0.33333333]), array([ 2.66666667, 1.33333333])) # may vary + + """ + # c1, c2 are trimmed copies + [c1, c2] = pu.as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError # FIXME: add message with details to exception + + # note: this is more efficient than `pu._div(polymul, c1, c2)` + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1] * 0, c1 + elif lc2 == 1: + return c1 / c2[-1], c1[:1] * 0 + else: + dlen = lc1 - lc2 + scl = c2[-1] + c2 = c2[:-1] / scl + i = dlen + j = lc1 - 1 + while i >= 0: + c1[i:j] -= c2 * c1[j] + i -= 1 + j -= 1 + return c1[j + 1:] / scl, pu.trimseq(c1[:j + 1]) + + +def polypow(c, pow, maxpower=None): + """Raise a polynomial to a power. + + Returns the polynomial `c` raised to the power `pow`. The argument + `c` is a sequence of coefficients ordered from low to high. i.e., + [1,2,3] is the series ``1 + 2*x + 3*x**2.`` + + Parameters + ---------- + c : array_like + 1-D array of array of series coefficients ordered from low to + high degree. + pow : integer + Power to which the series will be raised + maxpower : integer, optional + Maximum power allowed. This is mainly to limit growth of the series + to unmanageable size. Default is 16 + + Returns + ------- + coef : ndarray + Power series of power. + + See Also + -------- + polyadd, polysub, polymulx, polymul, polydiv + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> P.polypow([1, 2, 3], 2) + array([ 1., 4., 10., 12., 9.]) + + """ + # note: this is more efficient than `pu._pow(polymul, c1, c2)`, as it + # avoids calling `as_series` repeatedly + return pu._pow(np.convolve, c, pow, maxpower) + + +def polyder(c, m=1, scl=1, axis=0): + """ + Differentiate a polynomial. + + Returns the polynomial coefficients `c` differentiated `m` times along + `axis`. At each iteration the result is multiplied by `scl` (the + scaling factor is for use in a linear change of variable). The + argument `c` is an array of coefficients from low to high degree along + each axis, e.g., [1,2,3] represents the polynomial ``1 + 2*x + 3*x**2`` + while [[1,2],[1,2]] represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is + ``x`` and axis=1 is ``y``. + + Parameters + ---------- + c : array_like + Array of polynomial coefficients. If c is multidimensional the + different axis correspond to different variables with the degree + in each axis given by the corresponding index. + m : int, optional + Number of derivatives taken, must be non-negative. (Default: 1) + scl : scalar, optional + Each differentiation is multiplied by `scl`. The end result is + multiplication by ``scl**m``. This is for use in a linear change + of variable. (Default: 1) + axis : int, optional + Axis over which the derivative is taken. (Default: 0). + + Returns + ------- + der : ndarray + Polynomial coefficients of the derivative. + + See Also + -------- + polyint + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = (1, 2, 3, 4) + >>> P.polyder(c) # (d/dx)(c) + array([ 2., 6., 12.]) + >>> P.polyder(c, 3) # (d**3/dx**3)(c) + array([24.]) + >>> P.polyder(c, scl=-1) # (d/d(-x))(c) + array([ -2., -6., -12.]) + >>> P.polyder(c, 2, -1) # (d**2/d(-x)**2)(c) + array([ 6., 24.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + # astype fails with NA + c = c + 0.0 + cdt = c.dtype + cnt = pu._as_int(m, "the order of derivation") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of derivation must be non-negative") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + c = np.moveaxis(c, iaxis, 0) + n = len(c) + if cnt >= n: + c = c[:1] * 0 + else: + for i in range(cnt): + n = n - 1 + c *= scl + der = np.empty((n,) + c.shape[1:], dtype=cdt) + for j in range(n, 0, -1): + der[j - 1] = j * c[j] + c = der + c = np.moveaxis(c, 0, iaxis) + return c + + +def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0): + """ + Integrate a polynomial. + + Returns the polynomial coefficients `c` integrated `m` times from + `lbnd` along `axis`. At each iteration the resulting series is + **multiplied** by `scl` and an integration constant, `k`, is added. + The scaling factor is for use in a linear change of variable. ("Buyer + beware": note that, depending on what one is doing, one may want `scl` + to be the reciprocal of what one might expect; for more information, + see the Notes section below.) The argument `c` is an array of + coefficients, from low to high degree along each axis, e.g., [1,2,3] + represents the polynomial ``1 + 2*x + 3*x**2`` while [[1,2],[1,2]] + represents ``1 + 1*x + 2*y + 2*x*y`` if axis=0 is ``x`` and axis=1 is + ``y``. + + Parameters + ---------- + c : array_like + 1-D array of polynomial coefficients, ordered from low to high. + m : int, optional + Order of integration, must be positive. (Default: 1) + k : {[], list, scalar}, optional + Integration constant(s). The value of the first integral at zero + is the first value in the list, the value of the second integral + at zero is the second value, etc. If ``k == []`` (the default), + all constants are set to zero. If ``m == 1``, a single scalar can + be given instead of a list. + lbnd : scalar, optional + The lower bound of the integral. (Default: 0) + scl : scalar, optional + Following each integration the result is *multiplied* by `scl` + before the integration constant is added. (Default: 1) + axis : int, optional + Axis over which the integral is taken. (Default: 0). + + Returns + ------- + S : ndarray + Coefficient array of the integral. + + Raises + ------ + ValueError + If ``m < 1``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or + ``np.ndim(scl) != 0``. + + See Also + -------- + polyder + + Notes + ----- + Note that the result of each integration is *multiplied* by `scl`. Why + is this important to note? Say one is making a linear change of + variable :math:`u = ax + b` in an integral relative to `x`. Then + :math:`dx = du/a`, so one will need to set `scl` equal to + :math:`1/a` - perhaps not what one would have first thought. + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = (1, 2, 3) + >>> P.polyint(c) # should return array([0, 1, 1, 1]) + array([0., 1., 1., 1.]) + >>> P.polyint(c, 3) # should return array([0, 0, 0, 1/6, 1/12, 1/20]) + array([ 0. , 0. , 0. , 0.16666667, 0.08333333, # may vary + 0.05 ]) + >>> P.polyint(c, k=3) # should return array([3, 1, 1, 1]) + array([3., 1., 1., 1.]) + >>> P.polyint(c,lbnd=-2) # should return array([6, 1, 1, 1]) + array([6., 1., 1., 1.]) + >>> P.polyint(c,scl=-2) # should return array([0, -2, -2, -2]) + array([ 0., -2., -2., -2.]) + + """ + c = np.array(c, ndmin=1, copy=True) + if c.dtype.char in '?bBhHiIlLqQpP': + # astype doesn't preserve mask attribute. + c = c + 0.0 + cdt = c.dtype + if not np.iterable(k): + k = [k] + cnt = pu._as_int(m, "the order of integration") + iaxis = pu._as_int(axis, "the axis") + if cnt < 0: + raise ValueError("The order of integration must be non-negative") + if len(k) > cnt: + raise ValueError("Too many integration constants") + if np.ndim(lbnd) != 0: + raise ValueError("lbnd must be a scalar.") + if np.ndim(scl) != 0: + raise ValueError("scl must be a scalar.") + iaxis = normalize_axis_index(iaxis, c.ndim) + + if cnt == 0: + return c + + k = list(k) + [0] * (cnt - len(k)) + c = np.moveaxis(c, iaxis, 0) + for i in range(cnt): + n = len(c) + c *= scl + if n == 1 and np.all(c[0] == 0): + c[0] += k[i] + else: + tmp = np.empty((n + 1,) + c.shape[1:], dtype=cdt) + tmp[0] = c[0] * 0 + tmp[1] = c[0] + for j in range(1, n): + tmp[j + 1] = c[j] / (j + 1) + tmp[0] += k[i] - polyval(lbnd, tmp) + c = tmp + c = np.moveaxis(c, 0, iaxis) + return c + + +def polyval(x, c, tensor=True): + """ + Evaluate a polynomial at points x. + + If `c` is of length ``n + 1``, this function returns the value + + .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `c`. + + If `c` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If + `c` is multidimensional, then the shape of the result depends on the + value of `tensor`. If `tensor` is true the shape will be c.shape[1:] + + x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that + scalars have shape (,). + + Trailing zeros in the coefficients will be used in the evaluation, so + they should be avoided if efficiency is a concern. + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `c`. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree n are contained in c[n]. If `c` is multidimensional the + remaining indices enumerate multiple polynomials. In the two + dimensional case the coefficients may be thought of as stored in + the columns of `c`. + tensor : boolean, optional + If True, the shape of the coefficient array is extended with ones + on the right, one for each dimension of `x`. Scalars have dimension 0 + for this action. The result is that every column of coefficients in + `c` is evaluated for every element of `x`. If False, `x` is broadcast + over the columns of `c` for the evaluation. This keyword is useful + when `c` is multidimensional. The default value is True. + + Returns + ------- + values : ndarray, compatible object + The shape of the returned array is described above. + + See Also + -------- + polyval2d, polygrid2d, polyval3d, polygrid3d + + Notes + ----- + The evaluation uses Horner's method. + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial.polynomial import polyval + >>> polyval(1, [1,2,3]) + 6.0 + >>> a = np.arange(4).reshape(2,2) + >>> a + array([[0, 1], + [2, 3]]) + >>> polyval(a, [1, 2, 3]) + array([[ 1., 6.], + [17., 34.]]) + >>> coef = np.arange(4).reshape(2, 2) # multidimensional coefficients + >>> coef + array([[0, 1], + [2, 3]]) + >>> polyval([1, 2], coef, tensor=True) + array([[2., 4.], + [4., 7.]]) + >>> polyval([1, 2], coef, tensor=False) + array([2., 7.]) + + """ + c = np.array(c, ndmin=1, copy=None) + if c.dtype.char in '?bBhHiIlLqQpP': + # astype fails with NA + c = c + 0.0 + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray) and tensor: + c = c.reshape(c.shape + (1,) * x.ndim) + + c0 = c[-1] + x * 0 + for i in range(2, len(c) + 1): + c0 = c[-i] + c0 * x + return c0 + + +def polyvalfromroots(x, r, tensor=True): + """ + Evaluate a polynomial specified by its roots at points x. + + If `r` is of length ``N``, this function returns the value + + .. math:: p(x) = \\prod_{n=1}^{N} (x - r_n) + + The parameter `x` is converted to an array only if it is a tuple or a + list, otherwise it is treated as a scalar. In either case, either `x` + or its elements must support multiplication and addition both with + themselves and with the elements of `r`. + + If `r` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If `r` + is multidimensional, then the shape of the result depends on the value of + `tensor`. If `tensor` is ``True`` the shape will be r.shape[1:] + x.shape; + that is, each polynomial is evaluated at every value of `x`. If `tensor` is + ``False``, the shape will be r.shape[1:]; that is, each polynomial is + evaluated only for the corresponding broadcast value of `x`. Note that + scalars have shape (,). + + Parameters + ---------- + x : array_like, compatible object + If `x` is a list or tuple, it is converted to an ndarray, otherwise + it is left unchanged and treated as a scalar. In either case, `x` + or its elements must support addition and multiplication with + with themselves and with the elements of `r`. + r : array_like + Array of roots. If `r` is multidimensional the first index is the + root index, while the remaining indices enumerate multiple + polynomials. For instance, in the two dimensional case the roots + of each polynomial may be thought of as stored in the columns of `r`. + tensor : boolean, optional + If True, the shape of the roots array is extended with ones on the + right, one for each dimension of `x`. Scalars have dimension 0 for this + action. The result is that every column of coefficients in `r` is + evaluated for every element of `x`. If False, `x` is broadcast over the + columns of `r` for the evaluation. This keyword is useful when `r` is + multidimensional. The default value is True. + + Returns + ------- + values : ndarray, compatible object + The shape of the returned array is described above. + + See Also + -------- + polyroots, polyfromroots, polyval + + Examples + -------- + >>> from numpy.polynomial.polynomial import polyvalfromroots + >>> polyvalfromroots(1, [1, 2, 3]) + 0.0 + >>> a = np.arange(4).reshape(2, 2) + >>> a + array([[0, 1], + [2, 3]]) + >>> polyvalfromroots(a, [-1, 0, 1]) + array([[-0., 0.], + [ 6., 24.]]) + >>> r = np.arange(-2, 2).reshape(2,2) # multidimensional coefficients + >>> r # each column of r defines one polynomial + array([[-2, -1], + [ 0, 1]]) + >>> b = [-2, 1] + >>> polyvalfromroots(b, r, tensor=True) + array([[-0., 3.], + [ 3., 0.]]) + >>> polyvalfromroots(b, r, tensor=False) + array([-0., 0.]) + + """ + r = np.array(r, ndmin=1, copy=None) + if r.dtype.char in '?bBhHiIlLqQpP': + r = r.astype(np.double) + if isinstance(x, (tuple, list)): + x = np.asarray(x) + if isinstance(x, np.ndarray): + if tensor: + r = r.reshape(r.shape + (1,) * x.ndim) + elif x.ndim >= r.ndim: + raise ValueError("x.ndim must be < r.ndim when tensor == False") + return np.prod(x - r, axis=0) + + +def polyval2d(x, y, c): + """ + Evaluate a 2-D polynomial at points (x, y). + + This function returns the value + + .. math:: p(x,y) = \\sum_{i,j} c_{i,j} * x^i * y^j + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars and they + must have the same shape after conversion. In either case, either `x` + and `y` or their elements must support multiplication and addition both + with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points ``(x, y)``, + where `x` and `y` must have the same shape. If `x` or `y` is a list + or tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term + of multi-degree i,j is contained in ``c[i,j]``. If `c` has + dimension greater than two the remaining indices enumerate multiple + sets of coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points formed with + pairs of corresponding values from `x` and `y`. + + See Also + -------- + polyval, polygrid2d, polyval3d, polygrid3d + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = ((1, 2, 3), (4, 5, 6)) + >>> P.polyval2d(1, 1, c) + 21.0 + + """ + return pu._valnd(polyval, c, x, y) + + +def polygrid2d(x, y, c): + """ + Evaluate a 2-D polynomial on the Cartesian product of x and y. + + This function returns the values: + + .. math:: p(a,b) = \\sum_{i,j} c_{i,j} * a^i * b^j + + where the points ``(a, b)`` consist of all pairs formed by taking + `a` from `x` and `b` from `y`. The resulting points form a grid with + `x` in the first dimension and `y` in the second. + + The parameters `x` and `y` are converted to arrays only if they are + tuples or a lists, otherwise they are treated as a scalars. In either + case, either `x` and `y` or their elements must support multiplication + and addition both with themselves and with the elements of `c`. + + If `c` has fewer than two dimensions, ones are implicitly appended to + its shape to make it 2-D. The shape of the result will be c.shape[2:] + + x.shape + y.shape. + + Parameters + ---------- + x, y : array_like, compatible objects + The two dimensional series is evaluated at the points in the + Cartesian product of `x` and `y`. If `x` or `y` is a list or + tuple, it is first converted to an ndarray, otherwise it is left + unchanged and, if it isn't an ndarray, it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + polyval, polyval2d, polyval3d, polygrid3d + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = ((1, 2, 3), (4, 5, 6)) + >>> P.polygrid2d([0, 1], [0, 1], c) + array([[ 1., 6.], + [ 5., 21.]]) + + """ + return pu._gridnd(polyval, c, x, y) + + +def polyval3d(x, y, z, c): + """ + Evaluate a 3-D polynomial at points (x, y, z). + + This function returns the values: + + .. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * x^i * y^j * z^k + + The parameters `x`, `y`, and `z` are converted to arrays only if + they are tuples or a lists, otherwise they are treated as a scalars and + they must have the same shape after conversion. In either case, either + `x`, `y`, and `z` or their elements must support multiplication and + addition both with themselves and with the elements of `c`. + + If `c` has fewer than 3 dimensions, ones are implicitly appended to its + shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape. + + Parameters + ---------- + x, y, z : array_like, compatible object + The three dimensional series is evaluated at the points + ``(x, y, z)``, where `x`, `y`, and `z` must have the same shape. If + any of `x`, `y`, or `z` is a list or tuple, it is first converted + to an ndarray, otherwise it is left unchanged and if it isn't an + ndarray it is treated as a scalar. + c : array_like + Array of coefficients ordered so that the coefficient of the term of + multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension + greater than 3 the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the multidimensional polynomial on points formed with + triples of corresponding values from `x`, `y`, and `z`. + + See Also + -------- + polyval, polyval2d, polygrid2d, polygrid3d + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = ((1, 2, 3), (4, 5, 6), (7, 8, 9)) + >>> P.polyval3d(1, 1, 1, c) + 45.0 + + """ + return pu._valnd(polyval, c, x, y, z) + + +def polygrid3d(x, y, z, c): + """ + Evaluate a 3-D polynomial on the Cartesian product of x, y and z. + + This function returns the values: + + .. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * a^i * b^j * c^k + + where the points ``(a, b, c)`` consist of all triples formed by taking + `a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form + a grid with `x` in the first dimension, `y` in the second, and `z` in + the third. + + The parameters `x`, `y`, and `z` are converted to arrays only if they + are tuples or a lists, otherwise they are treated as a scalars. In + either case, either `x`, `y`, and `z` or their elements must support + multiplication and addition both with themselves and with the elements + of `c`. + + If `c` has fewer than three dimensions, ones are implicitly appended to + its shape to make it 3-D. The shape of the result will be c.shape[3:] + + x.shape + y.shape + z.shape. + + Parameters + ---------- + x, y, z : array_like, compatible objects + The three dimensional series is evaluated at the points in the + Cartesian product of `x`, `y`, and `z`. If `x`, `y`, or `z` is a + list or tuple, it is first converted to an ndarray, otherwise it is + left unchanged and, if it isn't an ndarray, it is treated as a + scalar. + c : array_like + Array of coefficients ordered so that the coefficients for terms of + degree i,j are contained in ``c[i,j]``. If `c` has dimension + greater than two the remaining indices enumerate multiple sets of + coefficients. + + Returns + ------- + values : ndarray, compatible object + The values of the two dimensional polynomial at points in the Cartesian + product of `x` and `y`. + + See Also + -------- + polyval, polyval2d, polygrid2d, polyval3d + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = ((1, 2, 3), (4, 5, 6), (7, 8, 9)) + >>> P.polygrid3d([0, 1], [0, 1], [0, 1], c) + array([[ 1., 13.], + [ 6., 51.]]) + + """ + return pu._gridnd(polyval, c, x, y, z) + + +def polyvander(x, deg): + """Vandermonde matrix of given degree. + + Returns the Vandermonde matrix of degree `deg` and sample points + `x`. The Vandermonde matrix is defined by + + .. math:: V[..., i] = x^i, + + where ``0 <= i <= deg``. The leading indices of `V` index the elements of + `x` and the last index is the power of `x`. + + If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the + matrix ``V = polyvander(x, n)``, then ``np.dot(V, c)`` and + ``polyval(x, c)`` are the same up to roundoff. This equivalence is + useful both for least squares fitting and for the evaluation of a large + number of polynomials of the same degree and sample points. + + Parameters + ---------- + x : array_like + Array of points. The dtype is converted to float64 or complex128 + depending on whether any of the elements are complex. If `x` is + scalar it is converted to a 1-D array. + deg : int + Degree of the resulting matrix. + + Returns + ------- + vander : ndarray. + The Vandermonde matrix. The shape of the returned matrix is + ``x.shape + (deg + 1,)``, where the last index is the power of `x`. + The dtype will be the same as the converted `x`. + + See Also + -------- + polyvander2d, polyvander3d + + Examples + -------- + The Vandermonde matrix of degree ``deg = 5`` and sample points + ``x = [-1, 2, 3]`` contains the element-wise powers of `x` + from 0 to 5 as its columns. + + >>> from numpy.polynomial import polynomial as P + >>> x, deg = [-1, 2, 3], 5 + >>> P.polyvander(x=x, deg=deg) + array([[ 1., -1., 1., -1., 1., -1.], + [ 1., 2., 4., 8., 16., 32.], + [ 1., 3., 9., 27., 81., 243.]]) + + """ + ideg = pu._as_int(deg, "deg") + if ideg < 0: + raise ValueError("deg must be non-negative") + + x = np.array(x, copy=None, ndmin=1) + 0.0 + dims = (ideg + 1,) + x.shape + dtyp = x.dtype + v = np.empty(dims, dtype=dtyp) + v[0] = x * 0 + 1 + if ideg > 0: + v[1] = x + for i in range(2, ideg + 1): + v[i] = v[i - 1] * x + return np.moveaxis(v, 0, -1) + + +def polyvander2d(x, y, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y)``. The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (deg[1] + 1)*i + j] = x^i * y^j, + + where ``0 <= i <= deg[0]`` and ``0 <= j <= deg[1]``. The leading indices of + `V` index the points ``(x, y)`` and the last index encodes the powers of + `x` and `y`. + + If ``V = polyvander2d(x, y, [xdeg, ydeg])``, then the columns of `V` + correspond to the elements of a 2-D coefficient array `c` of shape + (xdeg + 1, ydeg + 1) in the order + + .. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ... + + and ``np.dot(V, c.flat)`` and ``polyval2d(x, y, c)`` will be the same + up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 2-D polynomials + of the same degrees and sample points. + + Parameters + ---------- + x, y : array_like + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg]. + + Returns + ------- + vander2d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same + as the converted `x` and `y`. + + See Also + -------- + polyvander, polyvander3d, polyval2d, polyval3d + + Examples + -------- + >>> import numpy as np + + The 2-D pseudo-Vandermonde matrix of degree ``[1, 2]`` and sample + points ``x = [-1, 2]`` and ``y = [1, 3]`` is as follows: + + >>> from numpy.polynomial import polynomial as P + >>> x = np.array([-1, 2]) + >>> y = np.array([1, 3]) + >>> m, n = 1, 2 + >>> deg = np.array([m, n]) + >>> V = P.polyvander2d(x=x, y=y, deg=deg) + >>> V + array([[ 1., 1., 1., -1., -1., -1.], + [ 1., 3., 9., 2., 6., 18.]]) + + We can verify the columns for any ``0 <= i <= m`` and ``0 <= j <= n``: + + >>> i, j = 0, 1 + >>> V[:, (deg[1]+1)*i + j] == x**i * y**j + array([ True, True]) + + The (1D) Vandermonde matrix of sample points ``x`` and degree ``m`` is a + special case of the (2D) pseudo-Vandermonde matrix with ``y`` points all + zero and degree ``[m, 0]``. + + >>> P.polyvander2d(x=x, y=0*x, deg=(m, 0)) == P.polyvander(x=x, deg=m) + array([[ True, True], + [ True, True]]) + + """ + return pu._vander_nd_flat((polyvander, polyvander), (x, y), deg) + + +def polyvander3d(x, y, z, deg): + """Pseudo-Vandermonde matrix of given degrees. + + Returns the pseudo-Vandermonde matrix of degrees `deg` and sample + points ``(x, y, z)``. If `l`, `m`, `n` are the given degrees in `x`, `y`, `z`, + then The pseudo-Vandermonde matrix is defined by + + .. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = x^i * y^j * z^k, + + where ``0 <= i <= l``, ``0 <= j <= m``, and ``0 <= j <= n``. The leading + indices of `V` index the points ``(x, y, z)`` and the last index encodes + the powers of `x`, `y`, and `z`. + + If ``V = polyvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns + of `V` correspond to the elements of a 3-D coefficient array `c` of + shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order + + .. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},... + + and ``np.dot(V, c.flat)`` and ``polyval3d(x, y, z, c)`` will be the + same up to roundoff. This equivalence is useful both for least squares + fitting and for the evaluation of a large number of 3-D polynomials + of the same degrees and sample points. + + Parameters + ---------- + x, y, z : array_like + Arrays of point coordinates, all of the same shape. The dtypes will + be converted to either float64 or complex128 depending on whether + any of the elements are complex. Scalars are converted to 1-D + arrays. + deg : list of ints + List of maximum degrees of the form [x_deg, y_deg, z_deg]. + + Returns + ------- + vander3d : ndarray + The shape of the returned matrix is ``x.shape + (order,)``, where + :math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will + be the same as the converted `x`, `y`, and `z`. + + See Also + -------- + polyvander, polyvander3d, polyval2d, polyval3d + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial import polynomial as P + >>> x = np.asarray([-1, 2, 1]) + >>> y = np.asarray([1, -2, -3]) + >>> z = np.asarray([2, 2, 5]) + >>> l, m, n = [2, 2, 1] + >>> deg = [l, m, n] + >>> V = P.polyvander3d(x=x, y=y, z=z, deg=deg) + >>> V + array([[ 1., 2., 1., 2., 1., 2., -1., -2., -1., + -2., -1., -2., 1., 2., 1., 2., 1., 2.], + [ 1., 2., -2., -4., 4., 8., 2., 4., -4., + -8., 8., 16., 4., 8., -8., -16., 16., 32.], + [ 1., 5., -3., -15., 9., 45., 1., 5., -3., + -15., 9., 45., 1., 5., -3., -15., 9., 45.]]) + + We can verify the columns for any ``0 <= i <= l``, ``0 <= j <= m``, + and ``0 <= k <= n`` + + >>> i, j, k = 2, 1, 0 + >>> V[:, (m+1)*(n+1)*i + (n+1)*j + k] == x**i * y**j * z**k + array([ True, True, True]) + + """ + return pu._vander_nd_flat((polyvander, polyvander, polyvander), (x, y, z), deg) + + +def polyfit(x, y, deg, rcond=None, full=False, w=None): + """ + Least-squares fit of a polynomial to data. + + Return the coefficients of a polynomial of degree `deg` that is the + least squares fit to the data values `y` given at points `x`. If `y` is + 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple + fits are done, one for each column of `y`, and the resulting + coefficients are stored in the corresponding columns of a 2-D return. + The fitted polynomial(s) are in the form + + .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n, + + where `n` is `deg`. + + Parameters + ---------- + x : array_like, shape (`M`,) + x-coordinates of the `M` sample (data) points ``(x[i], y[i])``. + y : array_like, shape (`M`,) or (`M`, `K`) + y-coordinates of the sample points. Several sets of sample points + sharing the same x-coordinates can be (independently) fit with one + call to `polyfit` by passing in for `y` a 2-D array that contains + one data set per column. + deg : int or 1-D array_like + Degree(s) of the fitting polynomials. If `deg` is a single integer + all terms up to and including the `deg`'th term are included in the + fit. For NumPy versions >= 1.11.0 a list of integers specifying the + degrees of the terms to include may be used instead. + rcond : float, optional + Relative condition number of the fit. Singular values smaller + than `rcond`, relative to the largest singular value, will be + ignored. The default value is ``len(x)*eps``, where `eps` is the + relative precision of the platform's float type, about 2e-16 in + most cases. + full : bool, optional + Switch determining the nature of the return value. When ``False`` + (the default) just the coefficients are returned; when ``True``, + diagnostic information from the singular value decomposition (used + to solve the fit's matrix equation) is also returned. + w : array_like, shape (`M`,), optional + Weights. If not None, the weight ``w[i]`` applies to the unsquared + residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are + chosen so that the errors of the products ``w[i]*y[i]`` all have the + same variance. When using inverse-variance weighting, use + ``w[i] = 1/sigma(y[i])``. The default value is None. + + Returns + ------- + coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`) + Polynomial coefficients ordered from low to high. If `y` was 2-D, + the coefficients in column `k` of `coef` represent the polynomial + fit to the data in `y`'s `k`-th column. + + [residuals, rank, singular_values, rcond] : list + These values are only returned if ``full == True`` + + - residuals -- sum of squared residuals of the least squares fit + - rank -- the numerical rank of the scaled Vandermonde matrix + - singular_values -- singular values of the scaled Vandermonde matrix + - rcond -- value of `rcond`. + + For more details, see `numpy.linalg.lstsq`. + + Raises + ------ + RankWarning + Raised if the matrix in the least-squares fit is rank deficient. + The warning is only raised if ``full == False``. The warnings can + be turned off by: + + >>> import warnings + >>> warnings.simplefilter('ignore', np.exceptions.RankWarning) + + See Also + -------- + numpy.polynomial.chebyshev.chebfit + numpy.polynomial.legendre.legfit + numpy.polynomial.laguerre.lagfit + numpy.polynomial.hermite.hermfit + numpy.polynomial.hermite_e.hermefit + polyval : Evaluates a polynomial. + polyvander : Vandermonde matrix for powers. + numpy.linalg.lstsq : Computes a least-squares fit from the matrix. + scipy.interpolate.UnivariateSpline : Computes spline fits. + + Notes + ----- + The solution is the coefficients of the polynomial `p` that minimizes + the sum of the weighted squared errors + + .. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2, + + where the :math:`w_j` are the weights. This problem is solved by + setting up the (typically) over-determined matrix equation: + + .. math:: V(x) * c = w * y, + + where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the + coefficients to be solved for, `w` are the weights, and `y` are the + observed values. This equation is then solved using the singular value + decomposition of `V`. + + If some of the singular values of `V` are so small that they are + neglected (and `full` == ``False``), a `~exceptions.RankWarning` will be + raised. This means that the coefficient values may be poorly determined. + Fitting to a lower order polynomial will usually get rid of the warning + (but may not be what you want, of course; if you have independent + reason(s) for choosing the degree which isn't working, you may have to: + a) reconsider those reasons, and/or b) reconsider the quality of your + data). The `rcond` parameter can also be set to a value smaller than + its default, but the resulting fit may be spurious and have large + contributions from roundoff error. + + Polynomial fits using double precision tend to "fail" at about + (polynomial) degree 20. Fits using Chebyshev or Legendre series are + generally better conditioned, but much can still depend on the + distribution of the sample points and the smoothness of the data. If + the quality of the fit is inadequate, splines may be a good + alternative. + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial import polynomial as P + >>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1] + >>> rng = np.random.default_rng() + >>> err = rng.normal(size=len(x)) + >>> y = x**3 - x + err # x^3 - x + Gaussian noise + >>> c, stats = P.polyfit(x,y,3,full=True) + >>> c # c[0], c[1] approx. -1, c[2] should be approx. 0, c[3] approx. 1 + array([ 0.23111996, -1.02785049, -0.2241444 , 1.08405657]) # may vary + >>> stats # note the large SSR, explaining the rather poor results + [array([48.312088]), # may vary + 4, + array([1.38446749, 1.32119158, 0.50443316, 0.28853036]), + 1.1324274851176597e-14] + + Same thing without the added noise + + >>> y = x**3 - x + >>> c, stats = P.polyfit(x,y,3,full=True) + >>> c # c[0], c[1] ~= -1, c[2] should be "very close to 0", c[3] ~= 1 + array([-6.73496154e-17, -1.00000000e+00, 0.00000000e+00, 1.00000000e+00]) + >>> stats # note the minuscule SSR + [array([8.79579319e-31]), + np.int32(4), + array([1.38446749, 1.32119158, 0.50443316, 0.28853036]), + 1.1324274851176597e-14] + + """ + return pu._fit(polyvander, x, y, deg, rcond, full, w) + + +def polycompanion(c): + """ + Return the companion matrix of c. + + The companion matrix for power series cannot be made symmetric by + scaling the basis, so this function differs from those for the + orthogonal polynomials. + + Parameters + ---------- + c : array_like + 1-D array of polynomial coefficients ordered from low to high + degree. + + Returns + ------- + mat : ndarray + Companion matrix of dimensions (deg, deg). + + Examples + -------- + >>> from numpy.polynomial import polynomial as P + >>> c = (1, 2, 3) + >>> P.polycompanion(c) + array([[ 0. , -0.33333333], + [ 1. , -0.66666667]]) + + """ + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + raise ValueError('Series must have maximum degree of at least 1.') + if len(c) == 2: + return np.array([[-c[0] / c[1]]]) + + n = len(c) - 1 + mat = np.zeros((n, n), dtype=c.dtype) + bot = mat.reshape(-1)[n::n + 1] + bot[...] = 1 + mat[:, -1] -= c[:-1] / c[-1] + return mat + + +def polyroots(c): + """ + Compute the roots of a polynomial. + + Return the roots (a.k.a. "zeros") of the polynomial + + .. math:: p(x) = \\sum_i c[i] * x^i. + + Parameters + ---------- + c : 1-D array_like + 1-D array of polynomial coefficients. + + Returns + ------- + out : ndarray + Array of the roots of the polynomial. If all the roots are real, + then `out` is also real, otherwise it is complex. + + See Also + -------- + numpy.polynomial.chebyshev.chebroots + numpy.polynomial.legendre.legroots + numpy.polynomial.laguerre.lagroots + numpy.polynomial.hermite.hermroots + numpy.polynomial.hermite_e.hermeroots + + Notes + ----- + The root estimates are obtained as the eigenvalues of the companion + matrix, Roots far from the origin of the complex plane may have large + errors due to the numerical instability of the power series for such + values. Roots with multiplicity greater than 1 will also show larger + errors as the value of the series near such points is relatively + insensitive to errors in the roots. Isolated roots near the origin can + be improved by a few iterations of Newton's method. + + Examples + -------- + >>> import numpy.polynomial.polynomial as poly + >>> poly.polyroots(poly.polyfromroots((-1,0,1))) + array([-1., 0., 1.]) + >>> poly.polyroots(poly.polyfromroots((-1,0,1))).dtype + dtype('float64') + >>> j = complex(0,1) + >>> poly.polyroots(poly.polyfromroots((-j,0,j))) + array([ 0.00000000e+00+0.j, 0.00000000e+00+1.j, 2.77555756e-17-1.j]) # may vary + + """ # noqa: E501 + # c is a trimmed copy + [c] = pu.as_series([c]) + if len(c) < 2: + return np.array([], dtype=c.dtype) + if len(c) == 2: + return np.array([-c[0] / c[1]]) + + m = polycompanion(c) + r = la.eigvals(m) + r.sort() + return r + + +# +# polynomial class +# + +class Polynomial(ABCPolyBase): + """A power series class. + + The Polynomial class provides the standard Python numerical methods + '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the + attributes and methods listed below. + + Parameters + ---------- + coef : array_like + Polynomial coefficients in order of increasing degree, i.e., + ``(1, 2, 3)`` give ``1 + 2*x + 3*x**2``. + domain : (2,) array_like, optional + Domain to use. The interval ``[domain[0], domain[1]]`` is mapped + to the interval ``[window[0], window[1]]`` by shifting and scaling. + The default value is [-1., 1.]. + window : (2,) array_like, optional + Window, see `domain` for its use. The default value is [-1., 1.]. + symbol : str, optional + Symbol used to represent the independent variable in string + representations of the polynomial expression, e.g. for printing. + The symbol must be a valid Python identifier. Default value is 'x'. + + .. versionadded:: 1.24 + + """ + # Virtual Functions + _add = staticmethod(polyadd) + _sub = staticmethod(polysub) + _mul = staticmethod(polymul) + _div = staticmethod(polydiv) + _pow = staticmethod(polypow) + _val = staticmethod(polyval) + _int = staticmethod(polyint) + _der = staticmethod(polyder) + _fit = staticmethod(polyfit) + _line = staticmethod(polyline) + _roots = staticmethod(polyroots) + _fromroots = staticmethod(polyfromroots) + + # Virtual properties + domain = np.array(polydomain) + window = np.array(polydomain) + basis_name = None + + @classmethod + def _str_term_unicode(cls, i, arg_str): + if i == '1': + return f"·{arg_str}" + else: + return f"·{arg_str}{i.translate(cls._superscript_mapping)}" + + @staticmethod + def _str_term_ascii(i, arg_str): + if i == '1': + return f" {arg_str}" + else: + return f" {arg_str}**{i}" + + @staticmethod + def _repr_latex_term(i, arg_str, needs_parens): + if needs_parens: + arg_str = rf"\left({arg_str}\right)" + if i == 0: + return '1' + elif i == 1: + return arg_str + else: + return f"{arg_str}^{{{i}}}" diff --git a/python/numpy/polynomial/polynomial.pyi b/python/numpy/polynomial/polynomial.pyi new file mode 100644 index 000000000..b4c784492 --- /dev/null +++ b/python/numpy/polynomial/polynomial.pyi @@ -0,0 +1,89 @@ +from typing import Final +from typing import Literal as L + +import numpy as np + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncInteg, + _FuncLine, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncValFromRoots, + _FuncVander, + _FuncVander2D, + _FuncVander3D, +) +from .polyutils import trimcoef as polytrim + +__all__ = [ + "polyzero", + "polyone", + "polyx", + "polydomain", + "polyline", + "polyadd", + "polysub", + "polymulx", + "polymul", + "polydiv", + "polypow", + "polyval", + "polyvalfromroots", + "polyder", + "polyint", + "polyfromroots", + "polyvander", + "polyfit", + "polytrim", + "polyroots", + "Polynomial", + "polyval2d", + "polyval3d", + "polygrid2d", + "polygrid3d", + "polyvander2d", + "polyvander3d", + "polycompanion", +] + +polydomain: Final[_Array2[np.float64]] +polyzero: Final[_Array1[np.int_]] +polyone: Final[_Array1[np.int_]] +polyx: Final[_Array2[np.int_]] + +polyline: _FuncLine[L["Polyline"]] +polyfromroots: _FuncFromRoots[L["polyfromroots"]] +polyadd: _FuncBinOp[L["polyadd"]] +polysub: _FuncBinOp[L["polysub"]] +polymulx: _FuncUnOp[L["polymulx"]] +polymul: _FuncBinOp[L["polymul"]] +polydiv: _FuncBinOp[L["polydiv"]] +polypow: _FuncPow[L["polypow"]] +polyder: _FuncDer[L["polyder"]] +polyint: _FuncInteg[L["polyint"]] +polyval: _FuncVal[L["polyval"]] +polyval2d: _FuncVal2D[L["polyval2d"]] +polyval3d: _FuncVal3D[L["polyval3d"]] +polyvalfromroots: _FuncValFromRoots[L["polyvalfromroots"]] +polygrid2d: _FuncVal2D[L["polygrid2d"]] +polygrid3d: _FuncVal3D[L["polygrid3d"]] +polyvander: _FuncVander[L["polyvander"]] +polyvander2d: _FuncVander2D[L["polyvander2d"]] +polyvander3d: _FuncVander3D[L["polyvander3d"]] +polyfit: _FuncFit[L["polyfit"]] +polycompanion: _FuncCompanion[L["polycompanion"]] +polyroots: _FuncRoots[L["polyroots"]] + +class Polynomial(ABCPolyBase[None]): ... diff --git a/python/numpy/polynomial/polyutils.py b/python/numpy/polynomial/polyutils.py new file mode 100644 index 000000000..18dc0a8d1 --- /dev/null +++ b/python/numpy/polynomial/polyutils.py @@ -0,0 +1,759 @@ +""" +Utility classes and functions for the polynomial modules. + +This module provides: error and warning objects; a polynomial base class; +and some routines used in both the `polynomial` and `chebyshev` modules. + +Functions +--------- + +.. autosummary:: + :toctree: generated/ + + as_series convert list of array_likes into 1-D arrays of common type. + trimseq remove trailing zeros. + trimcoef remove small trailing coefficients. + getdomain return the domain appropriate for a given set of abscissae. + mapdomain maps points between domains. + mapparms parameters of the linear map between domains. + +""" +import functools +import operator +import warnings + +import numpy as np +from numpy._core.multiarray import dragon4_positional, dragon4_scientific +from numpy.exceptions import RankWarning + +__all__ = [ + 'as_series', 'trimseq', 'trimcoef', 'getdomain', 'mapdomain', 'mapparms', + 'format_float'] + +# +# Helper functions to convert inputs to 1-D arrays +# +def trimseq(seq): + """Remove small Poly series coefficients. + + Parameters + ---------- + seq : sequence + Sequence of Poly series coefficients. + + Returns + ------- + series : sequence + Subsequence with trailing zeros removed. If the resulting sequence + would be empty, return the first element. The returned sequence may + or may not be a view. + + Notes + ----- + Do not lose the type info if the sequence contains unknown objects. + + """ + if len(seq) == 0 or seq[-1] != 0: + return seq + else: + for i in range(len(seq) - 1, -1, -1): + if seq[i] != 0: + break + return seq[:i + 1] + + +def as_series(alist, trim=True): + """ + Return argument as a list of 1-d arrays. + + The returned list contains array(s) of dtype double, complex double, or + object. A 1-d argument of shape ``(N,)`` is parsed into ``N`` arrays of + size one; a 2-d argument of shape ``(M,N)`` is parsed into ``M`` arrays + of size ``N`` (i.e., is "parsed by row"); and a higher dimensional array + raises a Value Error if it is not first reshaped into either a 1-d or 2-d + array. + + Parameters + ---------- + alist : array_like + A 1- or 2-d array_like + trim : boolean, optional + When True, trailing zeros are removed from the inputs. + When False, the inputs are passed through intact. + + Returns + ------- + [a1, a2,...] : list of 1-D arrays + A copy of the input data as a list of 1-d arrays. + + Raises + ------ + ValueError + Raised when `as_series` cannot convert its input to 1-d arrays, or at + least one of the resulting arrays is empty. + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial import polyutils as pu + >>> a = np.arange(4) + >>> pu.as_series(a) + [array([0.]), array([1.]), array([2.]), array([3.])] + >>> b = np.arange(6).reshape((2,3)) + >>> pu.as_series(b) + [array([0., 1., 2.]), array([3., 4., 5.])] + + >>> pu.as_series((1, np.arange(3), np.arange(2, dtype=np.float16))) + [array([1.]), array([0., 1., 2.]), array([0., 1.])] + + >>> pu.as_series([2, [1.1, 0.]]) + [array([2.]), array([1.1])] + + >>> pu.as_series([2, [1.1, 0.]], trim=False) + [array([2.]), array([1.1, 0. ])] + + """ + arrays = [np.array(a, ndmin=1, copy=None) for a in alist] + for a in arrays: + if a.size == 0: + raise ValueError("Coefficient array is empty") + if a.ndim != 1: + raise ValueError("Coefficient array is not 1-d") + if trim: + arrays = [trimseq(a) for a in arrays] + + try: + dtype = np.common_type(*arrays) + except Exception as e: + object_dtype = np.dtypes.ObjectDType() + has_one_object_type = False + ret = [] + for a in arrays: + if a.dtype != object_dtype: + tmp = np.empty(len(a), dtype=object_dtype) + tmp[:] = a[:] + ret.append(tmp) + else: + has_one_object_type = True + ret.append(a.copy()) + if not has_one_object_type: + raise ValueError("Coefficient arrays have no common type") from e + else: + ret = [np.array(a, copy=True, dtype=dtype) for a in arrays] + return ret + + +def trimcoef(c, tol=0): + """ + Remove "small" "trailing" coefficients from a polynomial. + + "Small" means "small in absolute value" and is controlled by the + parameter `tol`; "trailing" means highest order coefficient(s), e.g., in + ``[0, 1, 1, 0, 0]`` (which represents ``0 + x + x**2 + 0*x**3 + 0*x**4``) + both the 3-rd and 4-th order coefficients would be "trimmed." + + Parameters + ---------- + c : array_like + 1-d array of coefficients, ordered from lowest order to highest. + tol : number, optional + Trailing (i.e., highest order) elements with absolute value less + than or equal to `tol` (default value is zero) are removed. + + Returns + ------- + trimmed : ndarray + 1-d array with trailing zeros removed. If the resulting series + would be empty, a series containing a single zero is returned. + + Raises + ------ + ValueError + If `tol` < 0 + + Examples + -------- + >>> from numpy.polynomial import polyutils as pu + >>> pu.trimcoef((0,0,3,0,5,0,0)) + array([0., 0., 3., 0., 5.]) + >>> pu.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed + array([0.]) + >>> i = complex(0,1) # works for complex + >>> pu.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3) + array([0.0003+0.j , 0.001 -0.001j]) + + """ + if tol < 0: + raise ValueError("tol must be non-negative") + + [c] = as_series([c]) + [ind] = np.nonzero(np.abs(c) > tol) + if len(ind) == 0: + return c[:1] * 0 + else: + return c[:ind[-1] + 1].copy() + +def getdomain(x): + """ + Return a domain suitable for given abscissae. + + Find a domain suitable for a polynomial or Chebyshev series + defined at the values supplied. + + Parameters + ---------- + x : array_like + 1-d array of abscissae whose domain will be determined. + + Returns + ------- + domain : ndarray + 1-d array containing two values. If the inputs are complex, then + the two returned points are the lower left and upper right corners + of the smallest rectangle (aligned with the axes) in the complex + plane containing the points `x`. If the inputs are real, then the + two points are the ends of the smallest interval containing the + points `x`. + + See Also + -------- + mapparms, mapdomain + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial import polyutils as pu + >>> points = np.arange(4)**2 - 5; points + array([-5, -4, -1, 4]) + >>> pu.getdomain(points) + array([-5., 4.]) + >>> c = np.exp(complex(0,1)*np.pi*np.arange(12)/6) # unit circle + >>> pu.getdomain(c) + array([-1.-1.j, 1.+1.j]) + + """ + [x] = as_series([x], trim=False) + if x.dtype.char in np.typecodes['Complex']: + rmin, rmax = x.real.min(), x.real.max() + imin, imax = x.imag.min(), x.imag.max() + return np.array((complex(rmin, imin), complex(rmax, imax))) + else: + return np.array((x.min(), x.max())) + +def mapparms(old, new): + """ + Linear map parameters between domains. + + Return the parameters of the linear map ``offset + scale*x`` that maps + `old` to `new` such that ``old[i] -> new[i]``, ``i = 0, 1``. + + Parameters + ---------- + old, new : array_like + Domains. Each domain must (successfully) convert to a 1-d array + containing precisely two values. + + Returns + ------- + offset, scale : scalars + The map ``L(x) = offset + scale*x`` maps the first domain to the + second. + + See Also + -------- + getdomain, mapdomain + + Notes + ----- + Also works for complex numbers, and thus can be used to calculate the + parameters required to map any line in the complex plane to any other + line therein. + + Examples + -------- + >>> from numpy.polynomial import polyutils as pu + >>> pu.mapparms((-1,1),(-1,1)) + (0.0, 1.0) + >>> pu.mapparms((1,-1),(-1,1)) + (-0.0, -1.0) + >>> i = complex(0,1) + >>> pu.mapparms((-i,-1),(1,i)) + ((1+1j), (1-0j)) + + """ + oldlen = old[1] - old[0] + newlen = new[1] - new[0] + off = (old[1] * new[0] - old[0] * new[1]) / oldlen + scl = newlen / oldlen + return off, scl + +def mapdomain(x, old, new): + """ + Apply linear map to input points. + + The linear map ``offset + scale*x`` that maps the domain `old` to + the domain `new` is applied to the points `x`. + + Parameters + ---------- + x : array_like + Points to be mapped. If `x` is a subtype of ndarray the subtype + will be preserved. + old, new : array_like + The two domains that determine the map. Each must (successfully) + convert to 1-d arrays containing precisely two values. + + Returns + ------- + x_out : ndarray + Array of points of the same shape as `x`, after application of the + linear map between the two domains. + + See Also + -------- + getdomain, mapparms + + Notes + ----- + Effectively, this implements: + + .. math:: + x\\_out = new[0] + m(x - old[0]) + + where + + .. math:: + m = \\frac{new[1]-new[0]}{old[1]-old[0]} + + Examples + -------- + >>> import numpy as np + >>> from numpy.polynomial import polyutils as pu + >>> old_domain = (-1,1) + >>> new_domain = (0,2*np.pi) + >>> x = np.linspace(-1,1,6); x + array([-1. , -0.6, -0.2, 0.2, 0.6, 1. ]) + >>> x_out = pu.mapdomain(x, old_domain, new_domain); x_out + array([ 0. , 1.25663706, 2.51327412, 3.76991118, 5.02654825, # may vary + 6.28318531]) + >>> x - pu.mapdomain(x_out, new_domain, old_domain) + array([0., 0., 0., 0., 0., 0.]) + + Also works for complex numbers (and thus can be used to map any line in + the complex plane to any other line therein). + + >>> i = complex(0,1) + >>> old = (-1 - i, 1 + i) + >>> new = (-1 + i, 1 - i) + >>> z = np.linspace(old[0], old[1], 6); z + array([-1. -1.j , -0.6-0.6j, -0.2-0.2j, 0.2+0.2j, 0.6+0.6j, 1. +1.j ]) + >>> new_z = pu.mapdomain(z, old, new); new_z + array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ]) # may vary + + """ + if type(x) not in (int, float, complex) and not isinstance(x, np.generic): + x = np.asanyarray(x) + off, scl = mapparms(old, new) + return off + scl * x + + +def _nth_slice(i, ndim): + sl = [np.newaxis] * ndim + sl[i] = slice(None) + return tuple(sl) + + +def _vander_nd(vander_fs, points, degrees): + r""" + A generalization of the Vandermonde matrix for N dimensions + + The result is built by combining the results of 1d Vandermonde matrices, + + .. math:: + W[i_0, \ldots, i_M, j_0, \ldots, j_N] = \prod_{k=0}^N{V_k(x_k)[i_0, \ldots, i_M, j_k]} + + where + + .. math:: + N &= \texttt{len(points)} = \texttt{len(degrees)} = \texttt{len(vander\_fs)} \\ + M &= \texttt{points[k].ndim} \\ + V_k &= \texttt{vander\_fs[k]} \\ + x_k &= \texttt{points[k]} \\ + 0 \le j_k &\le \texttt{degrees[k]} + + Expanding the one-dimensional :math:`V_k` functions gives: + + .. math:: + W[i_0, \ldots, i_M, j_0, \ldots, j_N] = \prod_{k=0}^N{B_{k, j_k}(x_k[i_0, \ldots, i_M])} + + where :math:`B_{k,m}` is the m'th basis of the polynomial construction used along + dimension :math:`k`. For a regular polynomial, :math:`B_{k, m}(x) = P_m(x) = x^m`. + + Parameters + ---------- + vander_fs : Sequence[function(array_like, int) -> ndarray] + The 1d vander function to use for each axis, such as ``polyvander`` + points : Sequence[array_like] + Arrays of point coordinates, all of the same shape. The dtypes + will be converted to either float64 or complex128 depending on + whether any of the elements are complex. Scalars are converted to + 1-D arrays. + This must be the same length as `vander_fs`. + degrees : Sequence[int] + The maximum degree (inclusive) to use for each axis. + This must be the same length as `vander_fs`. + + Returns + ------- + vander_nd : ndarray + An array of shape ``points[0].shape + tuple(d + 1 for d in degrees)``. + """ # noqa: E501 + n_dims = len(vander_fs) + if n_dims != len(points): + raise ValueError( + f"Expected {n_dims} dimensions of sample points, got {len(points)}") + if n_dims != len(degrees): + raise ValueError( + f"Expected {n_dims} dimensions of degrees, got {len(degrees)}") + if n_dims == 0: + raise ValueError("Unable to guess a dtype or shape when no points are given") + + # convert to the same shape and type + points = tuple(np.asarray(tuple(points)) + 0.0) + + # produce the vandermonde matrix for each dimension, placing the last + # axis of each in an independent trailing axis of the output + vander_arrays = ( + vander_fs[i](points[i], degrees[i])[(...,) + _nth_slice(i, n_dims)] + for i in range(n_dims) + ) + + # we checked this wasn't empty already, so no `initial` needed + return functools.reduce(operator.mul, vander_arrays) + + +def _vander_nd_flat(vander_fs, points, degrees): + """ + Like `_vander_nd`, but flattens the last ``len(degrees)`` axes into a single axis + + Used to implement the public ``vanderd`` functions. + """ + v = _vander_nd(vander_fs, points, degrees) + return v.reshape(v.shape[:-len(degrees)] + (-1,)) + + +def _fromroots(line_f, mul_f, roots): + """ + Helper function used to implement the ``fromroots`` functions. + + Parameters + ---------- + line_f : function(float, float) -> ndarray + The ``line`` function, such as ``polyline`` + mul_f : function(array_like, array_like) -> ndarray + The ``mul`` function, such as ``polymul`` + roots + See the ``fromroots`` functions for more detail + """ + if len(roots) == 0: + return np.ones(1) + else: + [roots] = as_series([roots], trim=False) + roots.sort() + p = [line_f(-r, 1) for r in roots] + n = len(p) + while n > 1: + m, r = divmod(n, 2) + tmp = [mul_f(p[i], p[i + m]) for i in range(m)] + if r: + tmp[0] = mul_f(tmp[0], p[-1]) + p = tmp + n = m + return p[0] + + +def _valnd(val_f, c, *args): + """ + Helper function used to implement the ``vald`` functions. + + Parameters + ---------- + val_f : function(array_like, array_like, tensor: bool) -> array_like + The ``val`` function, such as ``polyval`` + c, args + See the ``vald`` functions for more detail + """ + args = [np.asanyarray(a) for a in args] + shape0 = args[0].shape + if not all(a.shape == shape0 for a in args[1:]): + if len(args) == 3: + raise ValueError('x, y, z are incompatible') + elif len(args) == 2: + raise ValueError('x, y are incompatible') + else: + raise ValueError('ordinates are incompatible') + it = iter(args) + x0 = next(it) + + # use tensor on only the first + c = val_f(x0, c) + for xi in it: + c = val_f(xi, c, tensor=False) + return c + + +def _gridnd(val_f, c, *args): + """ + Helper function used to implement the ``gridd`` functions. + + Parameters + ---------- + val_f : function(array_like, array_like, tensor: bool) -> array_like + The ``val`` function, such as ``polyval`` + c, args + See the ``gridd`` functions for more detail + """ + for xi in args: + c = val_f(xi, c) + return c + + +def _div(mul_f, c1, c2): + """ + Helper function used to implement the ``div`` functions. + + Implementation uses repeated subtraction of c2 multiplied by the nth basis. + For some polynomial types, a more efficient approach may be possible. + + Parameters + ---------- + mul_f : function(array_like, array_like) -> array_like + The ``mul`` function, such as ``polymul`` + c1, c2 + See the ``div`` functions for more detail + """ + # c1, c2 are trimmed copies + [c1, c2] = as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError # FIXME: add message with details to exception + + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1] * 0, c1 + elif lc2 == 1: + return c1 / c2[-1], c1[:1] * 0 + else: + quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) + rem = c1 + for i in range(lc1 - lc2, - 1, -1): + p = mul_f([0] * i + [1], c2) + q = rem[-1] / p[-1] + rem = rem[:-1] - q * p[:-1] + quo[i] = q + return quo, trimseq(rem) + + +def _add(c1, c2): + """ Helper function used to implement the ``add`` functions. """ + # c1, c2 are trimmed copies + [c1, c2] = as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] += c2 + ret = c1 + else: + c2[:c1.size] += c1 + ret = c2 + return trimseq(ret) + + +def _sub(c1, c2): + """ Helper function used to implement the ``sub`` functions. """ + # c1, c2 are trimmed copies + [c1, c2] = as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] -= c2 + ret = c1 + else: + c2 = -c2 + c2[:c1.size] += c1 + ret = c2 + return trimseq(ret) + + +def _fit(vander_f, x, y, deg, rcond=None, full=False, w=None): + """ + Helper function used to implement the ``fit`` functions. + + Parameters + ---------- + vander_f : function(array_like, int) -> ndarray + The 1d vander function, such as ``polyvander`` + c1, c2 + See the ``fit`` functions for more detail + """ + x = np.asarray(x) + 0.0 + y = np.asarray(y) + 0.0 + deg = np.asarray(deg) + + # check arguments. + if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: + raise TypeError("deg must be an int or non-empty 1-D array of int") + if deg.min() < 0: + raise ValueError("expected deg >= 0") + if x.ndim != 1: + raise TypeError("expected 1D vector for x") + if x.size == 0: + raise TypeError("expected non-empty vector for x") + if y.ndim < 1 or y.ndim > 2: + raise TypeError("expected 1D or 2D array for y") + if len(x) != len(y): + raise TypeError("expected x and y to have same length") + + if deg.ndim == 0: + lmax = deg + order = lmax + 1 + van = vander_f(x, lmax) + else: + deg = np.sort(deg) + lmax = deg[-1] + order = len(deg) + van = vander_f(x, lmax)[:, deg] + + # set up the least squares matrices in transposed form + lhs = van.T + rhs = y.T + if w is not None: + w = np.asarray(w) + 0.0 + if w.ndim != 1: + raise TypeError("expected 1D vector for w") + if len(x) != len(w): + raise TypeError("expected x and w to have same length") + # apply weights. Don't use inplace operations as they + # can cause problems with NA. + lhs = lhs * w + rhs = rhs * w + + # set rcond + if rcond is None: + rcond = len(x) * np.finfo(x.dtype).eps + + # Determine the norms of the design matrix columns. + if issubclass(lhs.dtype.type, np.complexfloating): + scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) + else: + scl = np.sqrt(np.square(lhs).sum(1)) + scl[scl == 0] = 1 + + # Solve the least squares problem. + c, resids, rank, s = np.linalg.lstsq(lhs.T / scl, rhs.T, rcond) + c = (c.T / scl).T + + # Expand c to include non-fitted coefficients which are set to zero + if deg.ndim > 0: + if c.ndim == 2: + cc = np.zeros((lmax + 1, c.shape[1]), dtype=c.dtype) + else: + cc = np.zeros(lmax + 1, dtype=c.dtype) + cc[deg] = c + c = cc + + # warn on rank reduction + if rank != order and not full: + msg = "The fit may be poorly conditioned" + warnings.warn(msg, RankWarning, stacklevel=2) + + if full: + return c, [resids, rank, s, rcond] + else: + return c + + +def _pow(mul_f, c, pow, maxpower): + """ + Helper function used to implement the ``pow`` functions. + + Parameters + ---------- + mul_f : function(array_like, array_like) -> ndarray + The ``mul`` function, such as ``polymul`` + c : array_like + 1-D array of array of series coefficients + pow, maxpower + See the ``pow`` functions for more detail + """ + # c is a trimmed copy + [c] = as_series([c]) + power = int(pow) + if power != pow or power < 0: + raise ValueError("Power must be a non-negative integer.") + elif maxpower is not None and power > maxpower: + raise ValueError("Power is too large") + elif power == 0: + return np.array([1], dtype=c.dtype) + elif power == 1: + return c + else: + # This can be made more efficient by using powers of two + # in the usual way. + prd = c + for i in range(2, power + 1): + prd = mul_f(prd, c) + return prd + + +def _as_int(x, desc): + """ + Like `operator.index`, but emits a custom exception when passed an + incorrect type + + Parameters + ---------- + x : int-like + Value to interpret as an integer + desc : str + description to include in any error message + + Raises + ------ + TypeError : if x is a float or non-numeric + """ + try: + return operator.index(x) + except TypeError as e: + raise TypeError(f"{desc} must be an integer, received {x}") from e + + +def format_float(x, parens=False): + if not np.issubdtype(type(x), np.floating): + return str(x) + + opts = np.get_printoptions() + + if np.isnan(x): + return opts['nanstr'] + elif np.isinf(x): + return opts['infstr'] + + exp_format = False + if x != 0: + a = np.abs(x) + if a >= 1.e8 or a < 10**min(0, -(opts['precision'] - 1) // 2): + exp_format = True + + trim, unique = '0', True + if opts['floatmode'] == 'fixed': + trim, unique = 'k', False + + if exp_format: + s = dragon4_scientific(x, precision=opts['precision'], + unique=unique, trim=trim, + sign=opts['sign'] == '+') + if parens: + s = '(' + s + ')' + else: + s = dragon4_positional(x, precision=opts['precision'], + fractional=True, + unique=unique, trim=trim, + sign=opts['sign'] == '+') + return s diff --git a/python/numpy/polynomial/polyutils.pyi b/python/numpy/polynomial/polyutils.pyi new file mode 100644 index 000000000..c627e16dc --- /dev/null +++ b/python/numpy/polynomial/polyutils.pyi @@ -0,0 +1,423 @@ +from collections.abc import Callable, Iterable, Sequence +from typing import ( + Final, + Literal, + SupportsIndex, + TypeAlias, + TypeVar, + overload, +) + +import numpy as np +import numpy.typing as npt +from numpy._typing import ( + _ArrayLikeComplex_co, + _ArrayLikeFloat_co, + _FloatLike_co, + _NumberLike_co, +) + +from ._polytypes import ( + _AnyInt, + _Array2, + _ArrayLikeCoef_co, + _CoefArray, + _CoefLike_co, + _CoefSeries, + _ComplexArray, + _ComplexSeries, + _FloatArray, + _FloatSeries, + _FuncBinOp, + _FuncValND, + _FuncVanderND, + _ObjectArray, + _ObjectSeries, + _SeriesLikeCoef_co, + _SeriesLikeComplex_co, + _SeriesLikeFloat_co, + _SeriesLikeInt_co, + _Tuple2, +) + +__all__: Final[Sequence[str]] = [ + "as_series", + "format_float", + "getdomain", + "mapdomain", + "mapparms", + "trimcoef", + "trimseq", +] + +_AnyLineF: TypeAlias = Callable[ + [_CoefLike_co, _CoefLike_co], + _CoefArray, +] +_AnyMulF: TypeAlias = Callable[ + [npt.ArrayLike, npt.ArrayLike], + _CoefArray, +] +_AnyVanderF: TypeAlias = Callable[ + [npt.ArrayLike, SupportsIndex], + _CoefArray, +] + +@overload +def as_series( + alist: npt.NDArray[np.integer] | _FloatArray, + trim: bool = ..., +) -> list[_FloatSeries]: ... +@overload +def as_series( + alist: _ComplexArray, + trim: bool = ..., +) -> list[_ComplexSeries]: ... +@overload +def as_series( + alist: _ObjectArray, + trim: bool = ..., +) -> list[_ObjectSeries]: ... +@overload +def as_series( # type: ignore[overload-overlap] + alist: Iterable[_FloatArray | npt.NDArray[np.integer]], + trim: bool = ..., +) -> list[_FloatSeries]: ... +@overload +def as_series( + alist: Iterable[_ComplexArray], + trim: bool = ..., +) -> list[_ComplexSeries]: ... +@overload +def as_series( + alist: Iterable[_ObjectArray], + trim: bool = ..., +) -> list[_ObjectSeries]: ... +@overload +def as_series( # type: ignore[overload-overlap] + alist: Iterable[_SeriesLikeFloat_co | float], + trim: bool = ..., +) -> list[_FloatSeries]: ... +@overload +def as_series( + alist: Iterable[_SeriesLikeComplex_co | complex], + trim: bool = ..., +) -> list[_ComplexSeries]: ... +@overload +def as_series( + alist: Iterable[_SeriesLikeCoef_co | object], + trim: bool = ..., +) -> list[_ObjectSeries]: ... + +_T_seq = TypeVar("_T_seq", bound=_CoefArray | Sequence[_CoefLike_co]) +def trimseq(seq: _T_seq) -> _T_seq: ... + +@overload +def trimcoef( # type: ignore[overload-overlap] + c: npt.NDArray[np.integer] | _FloatArray, + tol: _FloatLike_co = ..., +) -> _FloatSeries: ... +@overload +def trimcoef( + c: _ComplexArray, + tol: _FloatLike_co = ..., +) -> _ComplexSeries: ... +@overload +def trimcoef( + c: _ObjectArray, + tol: _FloatLike_co = ..., +) -> _ObjectSeries: ... +@overload +def trimcoef( # type: ignore[overload-overlap] + c: _SeriesLikeFloat_co | float, + tol: _FloatLike_co = ..., +) -> _FloatSeries: ... +@overload +def trimcoef( + c: _SeriesLikeComplex_co | complex, + tol: _FloatLike_co = ..., +) -> _ComplexSeries: ... +@overload +def trimcoef( + c: _SeriesLikeCoef_co | object, + tol: _FloatLike_co = ..., +) -> _ObjectSeries: ... + +@overload +def getdomain( # type: ignore[overload-overlap] + x: _FloatArray | npt.NDArray[np.integer], +) -> _Array2[np.float64]: ... +@overload +def getdomain( + x: _ComplexArray, +) -> _Array2[np.complex128]: ... +@overload +def getdomain( + x: _ObjectArray, +) -> _Array2[np.object_]: ... +@overload +def getdomain( # type: ignore[overload-overlap] + x: _SeriesLikeFloat_co | float, +) -> _Array2[np.float64]: ... +@overload +def getdomain( + x: _SeriesLikeComplex_co | complex, +) -> _Array2[np.complex128]: ... +@overload +def getdomain( + x: _SeriesLikeCoef_co | object, +) -> _Array2[np.object_]: ... + +@overload +def mapparms( # type: ignore[overload-overlap] + old: npt.NDArray[np.floating | np.integer], + new: npt.NDArray[np.floating | np.integer], +) -> _Tuple2[np.floating]: ... +@overload +def mapparms( + old: npt.NDArray[np.number], + new: npt.NDArray[np.number], +) -> _Tuple2[np.complexfloating]: ... +@overload +def mapparms( + old: npt.NDArray[np.object_ | np.number], + new: npt.NDArray[np.object_ | np.number], +) -> _Tuple2[object]: ... +@overload +def mapparms( # type: ignore[overload-overlap] + old: Sequence[float], + new: Sequence[float], +) -> _Tuple2[float]: ... +@overload +def mapparms( + old: Sequence[complex], + new: Sequence[complex], +) -> _Tuple2[complex]: ... +@overload +def mapparms( + old: _SeriesLikeFloat_co, + new: _SeriesLikeFloat_co, +) -> _Tuple2[np.floating]: ... +@overload +def mapparms( + old: _SeriesLikeComplex_co, + new: _SeriesLikeComplex_co, +) -> _Tuple2[np.complexfloating]: ... +@overload +def mapparms( + old: _SeriesLikeCoef_co, + new: _SeriesLikeCoef_co, +) -> _Tuple2[object]: ... + +@overload +def mapdomain( # type: ignore[overload-overlap] + x: _FloatLike_co, + old: _SeriesLikeFloat_co, + new: _SeriesLikeFloat_co, +) -> np.floating: ... +@overload +def mapdomain( + x: _NumberLike_co, + old: _SeriesLikeComplex_co, + new: _SeriesLikeComplex_co, +) -> np.complexfloating: ... +@overload +def mapdomain( # type: ignore[overload-overlap] + x: npt.NDArray[np.floating | np.integer], + old: npt.NDArray[np.floating | np.integer], + new: npt.NDArray[np.floating | np.integer], +) -> _FloatSeries: ... +@overload +def mapdomain( + x: npt.NDArray[np.number], + old: npt.NDArray[np.number], + new: npt.NDArray[np.number], +) -> _ComplexSeries: ... +@overload +def mapdomain( + x: npt.NDArray[np.object_ | np.number], + old: npt.NDArray[np.object_ | np.number], + new: npt.NDArray[np.object_ | np.number], +) -> _ObjectSeries: ... +@overload +def mapdomain( # type: ignore[overload-overlap] + x: _SeriesLikeFloat_co, + old: _SeriesLikeFloat_co, + new: _SeriesLikeFloat_co, +) -> _FloatSeries: ... +@overload +def mapdomain( + x: _SeriesLikeComplex_co, + old: _SeriesLikeComplex_co, + new: _SeriesLikeComplex_co, +) -> _ComplexSeries: ... +@overload +def mapdomain( + x: _SeriesLikeCoef_co, + old: _SeriesLikeCoef_co, + new: _SeriesLikeCoef_co, +) -> _ObjectSeries: ... +@overload +def mapdomain( + x: _CoefLike_co, + old: _SeriesLikeCoef_co, + new: _SeriesLikeCoef_co, +) -> object: ... + +def _nth_slice( + i: SupportsIndex, + ndim: SupportsIndex, +) -> tuple[slice | None, ...]: ... + +_vander_nd: _FuncVanderND[Literal["_vander_nd"]] +_vander_nd_flat: _FuncVanderND[Literal["_vander_nd_flat"]] + +# keep in sync with `._polytypes._FuncFromRoots` +@overload +def _fromroots( # type: ignore[overload-overlap] + line_f: _AnyLineF, + mul_f: _AnyMulF, + roots: _SeriesLikeFloat_co, +) -> _FloatSeries: ... +@overload +def _fromroots( + line_f: _AnyLineF, + mul_f: _AnyMulF, + roots: _SeriesLikeComplex_co, +) -> _ComplexSeries: ... +@overload +def _fromroots( + line_f: _AnyLineF, + mul_f: _AnyMulF, + roots: _SeriesLikeCoef_co, +) -> _ObjectSeries: ... +@overload +def _fromroots( + line_f: _AnyLineF, + mul_f: _AnyMulF, + roots: _SeriesLikeCoef_co, +) -> _CoefSeries: ... + +_valnd: _FuncValND[Literal["_valnd"]] +_gridnd: _FuncValND[Literal["_gridnd"]] + +# keep in sync with `_polytypes._FuncBinOp` +@overload +def _div( # type: ignore[overload-overlap] + mul_f: _AnyMulF, + c1: _SeriesLikeFloat_co, + c2: _SeriesLikeFloat_co, +) -> _Tuple2[_FloatSeries]: ... +@overload +def _div( + mul_f: _AnyMulF, + c1: _SeriesLikeComplex_co, + c2: _SeriesLikeComplex_co, +) -> _Tuple2[_ComplexSeries]: ... +@overload +def _div( + mul_f: _AnyMulF, + c1: _SeriesLikeCoef_co, + c2: _SeriesLikeCoef_co, +) -> _Tuple2[_ObjectSeries]: ... +@overload +def _div( + mul_f: _AnyMulF, + c1: _SeriesLikeCoef_co, + c2: _SeriesLikeCoef_co, +) -> _Tuple2[_CoefSeries]: ... + +_add: Final[_FuncBinOp] +_sub: Final[_FuncBinOp] + +# keep in sync with `_polytypes._FuncPow` +@overload +def _pow( # type: ignore[overload-overlap] + mul_f: _AnyMulF, + c: _SeriesLikeFloat_co, + pow: _AnyInt, + maxpower: _AnyInt | None = ..., +) -> _FloatSeries: ... +@overload +def _pow( + mul_f: _AnyMulF, + c: _SeriesLikeComplex_co, + pow: _AnyInt, + maxpower: _AnyInt | None = ..., +) -> _ComplexSeries: ... +@overload +def _pow( + mul_f: _AnyMulF, + c: _SeriesLikeCoef_co, + pow: _AnyInt, + maxpower: _AnyInt | None = ..., +) -> _ObjectSeries: ... +@overload +def _pow( + mul_f: _AnyMulF, + c: _SeriesLikeCoef_co, + pow: _AnyInt, + maxpower: _AnyInt | None = ..., +) -> _CoefSeries: ... + +# keep in sync with `_polytypes._FuncFit` +@overload +def _fit( # type: ignore[overload-overlap] + vander_f: _AnyVanderF, + x: _SeriesLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: _SeriesLikeInt_co, + domain: _SeriesLikeFloat_co | None = ..., + rcond: _FloatLike_co | None = ..., + full: Literal[False] = ..., + w: _SeriesLikeFloat_co | None = ..., +) -> _FloatArray: ... +@overload +def _fit( + vander_f: _AnyVanderF, + x: _SeriesLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: _SeriesLikeInt_co, + domain: _SeriesLikeComplex_co | None = ..., + rcond: _FloatLike_co | None = ..., + full: Literal[False] = ..., + w: _SeriesLikeComplex_co | None = ..., +) -> _ComplexArray: ... +@overload +def _fit( + vander_f: _AnyVanderF, + x: _SeriesLikeCoef_co, + y: _ArrayLikeCoef_co, + deg: _SeriesLikeInt_co, + domain: _SeriesLikeCoef_co | None = ..., + rcond: _FloatLike_co | None = ..., + full: Literal[False] = ..., + w: _SeriesLikeCoef_co | None = ..., +) -> _CoefArray: ... +@overload +def _fit( + vander_f: _AnyVanderF, + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: _SeriesLikeInt_co, + domain: _SeriesLikeCoef_co | None, + rcond: _FloatLike_co | None, + full: Literal[True], + /, + w: _SeriesLikeCoef_co | None = ..., +) -> tuple[_CoefSeries, Sequence[np.inexact | np.int32]]: ... +@overload +def _fit( + vander_f: _AnyVanderF, + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: _SeriesLikeInt_co, + domain: _SeriesLikeCoef_co | None = ..., + rcond: _FloatLike_co | None = ..., + *, + full: Literal[True], + w: _SeriesLikeCoef_co | None = ..., +) -> tuple[_CoefSeries, Sequence[np.inexact | np.int32]]: ... + +def _as_int(x: SupportsIndex, desc: str) -> int: ... +def format_float(x: _FloatLike_co, parens: bool = ...) -> str: ... diff --git a/python/numpy/polynomial/tests/__init__.py b/python/numpy/polynomial/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/python/numpy/polynomial/tests/__pycache__/__init__.cpython-312.pyc b/python/numpy/polynomial/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..3cb7ad646 Binary files /dev/null and b/python/numpy/polynomial/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/polynomial/tests/__pycache__/test_chebyshev.cpython-312.pyc b/python/numpy/polynomial/tests/__pycache__/test_chebyshev.cpython-312.pyc new file mode 100644 index 000000000..79350e2e9 Binary files /dev/null and b/python/numpy/polynomial/tests/__pycache__/test_chebyshev.cpython-312.pyc differ diff --git a/python/numpy/polynomial/tests/__pycache__/test_classes.cpython-312.pyc b/python/numpy/polynomial/tests/__pycache__/test_classes.cpython-312.pyc new file mode 100644 index 000000000..290241795 Binary files /dev/null and b/python/numpy/polynomial/tests/__pycache__/test_classes.cpython-312.pyc differ diff --git a/python/numpy/polynomial/tests/__pycache__/test_hermite.cpython-312.pyc b/python/numpy/polynomial/tests/__pycache__/test_hermite.cpython-312.pyc new file mode 100644 index 000000000..ffe013559 Binary files /dev/null and b/python/numpy/polynomial/tests/__pycache__/test_hermite.cpython-312.pyc differ diff --git a/python/numpy/polynomial/tests/__pycache__/test_hermite_e.cpython-312.pyc b/python/numpy/polynomial/tests/__pycache__/test_hermite_e.cpython-312.pyc new file mode 100644 index 000000000..60ba2f068 Binary files /dev/null and b/python/numpy/polynomial/tests/__pycache__/test_hermite_e.cpython-312.pyc differ diff --git a/python/numpy/polynomial/tests/__pycache__/test_laguerre.cpython-312.pyc b/python/numpy/polynomial/tests/__pycache__/test_laguerre.cpython-312.pyc new file mode 100644 index 000000000..fab007c16 Binary files /dev/null and b/python/numpy/polynomial/tests/__pycache__/test_laguerre.cpython-312.pyc differ diff --git a/python/numpy/polynomial/tests/__pycache__/test_legendre.cpython-312.pyc b/python/numpy/polynomial/tests/__pycache__/test_legendre.cpython-312.pyc new file mode 100644 index 000000000..f5c7712aa Binary files /dev/null and b/python/numpy/polynomial/tests/__pycache__/test_legendre.cpython-312.pyc differ diff --git a/python/numpy/polynomial/tests/__pycache__/test_polynomial.cpython-312.pyc b/python/numpy/polynomial/tests/__pycache__/test_polynomial.cpython-312.pyc new file mode 100644 index 000000000..509cea0c7 Binary files /dev/null and b/python/numpy/polynomial/tests/__pycache__/test_polynomial.cpython-312.pyc differ diff --git a/python/numpy/polynomial/tests/__pycache__/test_polyutils.cpython-312.pyc b/python/numpy/polynomial/tests/__pycache__/test_polyutils.cpython-312.pyc new file mode 100644 index 000000000..b949acb98 Binary files /dev/null and b/python/numpy/polynomial/tests/__pycache__/test_polyutils.cpython-312.pyc differ diff --git a/python/numpy/polynomial/tests/__pycache__/test_printing.cpython-312.pyc b/python/numpy/polynomial/tests/__pycache__/test_printing.cpython-312.pyc new file mode 100644 index 000000000..022988194 Binary files /dev/null and b/python/numpy/polynomial/tests/__pycache__/test_printing.cpython-312.pyc differ diff --git a/python/numpy/polynomial/tests/__pycache__/test_symbol.cpython-312.pyc b/python/numpy/polynomial/tests/__pycache__/test_symbol.cpython-312.pyc new file mode 100644 index 000000000..90ad33332 Binary files /dev/null and b/python/numpy/polynomial/tests/__pycache__/test_symbol.cpython-312.pyc differ diff --git a/python/numpy/polynomial/tests/test_chebyshev.py b/python/numpy/polynomial/tests/test_chebyshev.py new file mode 100644 index 000000000..2cead4546 --- /dev/null +++ b/python/numpy/polynomial/tests/test_chebyshev.py @@ -0,0 +1,623 @@ +"""Tests for chebyshev module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.chebyshev as cheb +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_equal, + assert_raises, +) + + +def trim(x): + return cheb.chebtrim(x, tol=1e-6) + + +T0 = [1] +T1 = [0, 1] +T2 = [-1, 0, 2] +T3 = [0, -3, 0, 4] +T4 = [1, 0, -8, 0, 8] +T5 = [0, 5, 0, -20, 0, 16] +T6 = [-1, 0, 18, 0, -48, 0, 32] +T7 = [0, -7, 0, 56, 0, -112, 0, 64] +T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128] +T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256] + +Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] + + +class TestPrivate: + + def test__cseries_to_zseries(self): + for i in range(5): + inp = np.array([2] + [1] * i, np.double) + tgt = np.array([.5] * i + [2] + [.5] * i, np.double) + res = cheb._cseries_to_zseries(inp) + assert_equal(res, tgt) + + def test__zseries_to_cseries(self): + for i in range(5): + inp = np.array([.5] * i + [2] + [.5] * i, np.double) + tgt = np.array([2] + [1] * i, np.double) + res = cheb._zseries_to_cseries(inp) + assert_equal(res, tgt) + + +class TestConstants: + + def test_chebdomain(self): + assert_equal(cheb.chebdomain, [-1, 1]) + + def test_chebzero(self): + assert_equal(cheb.chebzero, [0]) + + def test_chebone(self): + assert_equal(cheb.chebone, [1]) + + def test_chebx(self): + assert_equal(cheb.chebx, [0, 1]) + + +class TestArithmetic: + + def test_chebadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = cheb.chebadd([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebsub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = cheb.chebsub([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebmulx(self): + assert_equal(cheb.chebmulx([0]), [0]) + assert_equal(cheb.chebmulx([1]), [0, 1]) + for i in range(1, 5): + ser = [0] * i + [1] + tgt = [0] * (i - 1) + [.5, 0, .5] + assert_equal(cheb.chebmulx(ser), tgt) + + def test_chebmul(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(i + j + 1) + tgt[i + j] += .5 + tgt[abs(i - j)] += .5 + res = cheb.chebmul([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebdiv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0] * i + [1] + cj = [0] * j + [1] + tgt = cheb.chebadd(ci, cj) + quo, rem = cheb.chebdiv(tgt, ci) + res = cheb.chebadd(cheb.chebmul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_chebpow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(cheb.chebmul, [c] * j, np.array([1])) + res = cheb.chebpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([2.5, 2., 1.5]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_chebval(self): + # check empty input + assert_equal(cheb.chebval([], [1]).size, 0) + + # check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Tlist] + for i in range(10): + msg = f"At i={i}" + tgt = y[i] + res = cheb.chebval(x, [0] * i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + # check that shape is preserved + for i in range(3): + dims = [2] * i + x = np.zeros(dims) + assert_equal(cheb.chebval(x, [1]).shape, dims) + assert_equal(cheb.chebval(x, [1, 0]).shape, dims) + assert_equal(cheb.chebval(x, [1, 0, 0]).shape, dims) + + def test_chebval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, cheb.chebval2d, x1, x2[:2], self.c2d) + + # test values + tgt = y1 * y2 + res = cheb.chebval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = cheb.chebval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_chebval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, cheb.chebval3d, x1, x2, x3[:2], self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = cheb.chebval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = cheb.chebval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_chebgrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j->ij', y1, y2) + res = cheb.chebgrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = cheb.chebgrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3) * 2) + + def test_chebgrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = cheb.chebgrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = cheb.chebgrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3) * 3) + + +class TestIntegral: + + def test_chebint(self): + # check exceptions + assert_raises(TypeError, cheb.chebint, [0], .5) + assert_raises(ValueError, cheb.chebint, [0], -1) + assert_raises(ValueError, cheb.chebint, [0], 1, [0, 0]) + assert_raises(ValueError, cheb.chebint, [0], lbnd=[0]) + assert_raises(ValueError, cheb.chebint, [0], scl=[0]) + assert_raises(TypeError, cheb.chebint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0] * (i - 2) + [1] + res = cheb.chebint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] + chebpol = cheb.poly2cheb(pol) + chebint = cheb.chebint(chebpol, m=1, k=[i]) + res = cheb.cheb2poly(chebint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + chebpol = cheb.poly2cheb(pol) + chebint = cheb.chebint(chebpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(cheb.chebval(-1, chebint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] + chebpol = cheb.poly2cheb(pol) + chebint = cheb.chebint(chebpol, m=1, k=[i], scl=2) + res = cheb.cheb2poly(chebint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1) + res = cheb.chebint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1, k=[k]) + res = cheb.chebint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1, k=[k], lbnd=-1) + res = cheb.chebint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = cheb.chebint(tgt, m=1, k=[k], scl=2) + res = cheb.chebint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([cheb.chebint(c) for c in c2d.T]).T + res = cheb.chebint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([cheb.chebint(c) for c in c2d]) + res = cheb.chebint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([cheb.chebint(c, k=3) for c in c2d]) + res = cheb.chebint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_chebder(self): + # check exceptions + assert_raises(TypeError, cheb.chebder, [0], .5) + assert_raises(ValueError, cheb.chebder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0] * i + [1] + res = cheb.chebder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = cheb.chebder(cheb.chebint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = cheb.chebder(cheb.chebint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([cheb.chebder(c) for c in c2d.T]).T + res = cheb.chebder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([cheb.chebder(c) for c in c2d]) + res = cheb.chebder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + + def test_chebvander(self): + # check for 1d x + x = np.arange(3) + v = cheb.chebvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], cheb.chebval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = cheb.chebvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], cheb.chebval(x, coef)) + + def test_chebvander2d(self): + # also tests chebval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = cheb.chebvander2d(x1, x2, [1, 2]) + tgt = cheb.chebval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = cheb.chebvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_chebvander3d(self): + # also tests chebval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = cheb.chebvander3d(x1, x2, x3, [1, 2, 3]) + tgt = cheb.chebval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = cheb.chebvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting: + + def test_chebfit(self): + def f(x): + return x * (x - 1) * (x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, cheb.chebfit, [1], [1], -1) + assert_raises(TypeError, cheb.chebfit, [[1]], [1], 0) + assert_raises(TypeError, cheb.chebfit, [], [1], 0) + assert_raises(TypeError, cheb.chebfit, [1], [[[1]]], 0) + assert_raises(TypeError, cheb.chebfit, [1, 2], [1], 0) + assert_raises(TypeError, cheb.chebfit, [1], [1, 2], 0) + assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, cheb.chebfit, [1], [1], [-1,]) + assert_raises(ValueError, cheb.chebfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, cheb.chebfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = cheb.chebfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(cheb.chebval(x, coef3), y) + coef3 = cheb.chebfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(cheb.chebval(x, coef3), y) + # + coef4 = cheb.chebfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(cheb.chebval(x, coef4), y) + coef4 = cheb.chebfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(cheb.chebval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = cheb.chebfit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(cheb.chebval(x, coef4), y) + # + coef2d = cheb.chebfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = cheb.chebfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = cheb.chebfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = cheb.chebfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(cheb.chebfit(x, x, 1), [0, 1]) + assert_almost_equal(cheb.chebfit(x, x, [0, 1]), [0, 1]) + # test fitting only even polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = cheb.chebfit(x, y, 4) + assert_almost_equal(cheb.chebval(x, coef1), y) + coef2 = cheb.chebfit(x, y, [0, 2, 4]) + assert_almost_equal(cheb.chebval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + +class TestInterpolate: + + def f(self, x): + return x * (x - 1) * (x - 2) + + def test_raises(self): + assert_raises(ValueError, cheb.chebinterpolate, self.f, -1) + assert_raises(TypeError, cheb.chebinterpolate, self.f, 10.) + + def test_dimensions(self): + for deg in range(1, 5): + assert_(cheb.chebinterpolate(self.f, deg).shape == (deg + 1,)) + + def test_approximation(self): + + def powx(x, p): + return x**p + + x = np.linspace(-1, 1, 10) + for deg in range(10): + for p in range(deg + 1): + c = cheb.chebinterpolate(powx, deg, (p,)) + assert_almost_equal(cheb.chebval(x, c), powx(x, p), decimal=12) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, cheb.chebcompanion, []) + assert_raises(ValueError, cheb.chebcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0] * i + [1] + assert_(cheb.chebcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(cheb.chebcompanion([1, 2])[0, 0] == -.5) + + +class TestGauss: + + def test_100(self): + x, w = cheb.chebgauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = cheb.chebvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1 / np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = np.pi + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_chebfromroots(self): + res = cheb.chebfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) + tgt = [0] * i + [1] + res = cheb.chebfromroots(roots) * 2**(i - 1) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebroots(self): + assert_almost_equal(cheb.chebroots([1]), []) + assert_almost_equal(cheb.chebroots([1, 2]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = cheb.chebroots(cheb.chebfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_chebtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, cheb.chebtrim, coef, -1) + + # Test results + assert_equal(cheb.chebtrim(coef), coef[:-1]) + assert_equal(cheb.chebtrim(coef, 1), coef[:-3]) + assert_equal(cheb.chebtrim(coef, 2), [0]) + + def test_chebline(self): + assert_equal(cheb.chebline(3, 4), [3, 4]) + + def test_cheb2poly(self): + for i in range(10): + assert_almost_equal(cheb.cheb2poly([0] * i + [1]), Tlist[i]) + + def test_poly2cheb(self): + for i in range(10): + assert_almost_equal(cheb.poly2cheb(Tlist[i]), [0] * i + [1]) + + def test_weight(self): + x = np.linspace(-1, 1, 11)[1:-1] + tgt = 1. / (np.sqrt(1 + x) * np.sqrt(1 - x)) + res = cheb.chebweight(x) + assert_almost_equal(res, tgt) + + def test_chebpts1(self): + # test exceptions + assert_raises(ValueError, cheb.chebpts1, 1.5) + assert_raises(ValueError, cheb.chebpts1, 0) + + # test points + tgt = [0] + assert_almost_equal(cheb.chebpts1(1), tgt) + tgt = [-0.70710678118654746, 0.70710678118654746] + assert_almost_equal(cheb.chebpts1(2), tgt) + tgt = [-0.86602540378443871, 0, 0.86602540378443871] + assert_almost_equal(cheb.chebpts1(3), tgt) + tgt = [-0.9238795325, -0.3826834323, 0.3826834323, 0.9238795325] + assert_almost_equal(cheb.chebpts1(4), tgt) + + def test_chebpts2(self): + # test exceptions + assert_raises(ValueError, cheb.chebpts2, 1.5) + assert_raises(ValueError, cheb.chebpts2, 1) + + # test points + tgt = [-1, 1] + assert_almost_equal(cheb.chebpts2(2), tgt) + tgt = [-1, 0, 1] + assert_almost_equal(cheb.chebpts2(3), tgt) + tgt = [-1, -0.5, .5, 1] + assert_almost_equal(cheb.chebpts2(4), tgt) + tgt = [-1.0, -0.707106781187, 0, 0.707106781187, 1.0] + assert_almost_equal(cheb.chebpts2(5), tgt) diff --git a/python/numpy/polynomial/tests/test_classes.py b/python/numpy/polynomial/tests/test_classes.py new file mode 100644 index 000000000..d10aafbda --- /dev/null +++ b/python/numpy/polynomial/tests/test_classes.py @@ -0,0 +1,618 @@ +"""Test inter-conversion of different polynomial classes. + +This tests the convert and cast methods of all the polynomial classes. + +""" +import operator as op +from numbers import Number + +import pytest + +import numpy as np +from numpy.exceptions import RankWarning +from numpy.polynomial import ( + Chebyshev, + Hermite, + HermiteE, + Laguerre, + Legendre, + Polynomial, +) +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_equal, + assert_raises, +) + +# +# fixtures +# + +classes = ( + Polynomial, Legendre, Chebyshev, Laguerre, + Hermite, HermiteE + ) +classids = tuple(cls.__name__ for cls in classes) + +@pytest.fixture(params=classes, ids=classids) +def Poly(request): + return request.param + + +# +# helper functions +# +random = np.random.random + + +def assert_poly_almost_equal(p1, p2, msg=""): + try: + assert_(np.all(p1.domain == p2.domain)) + assert_(np.all(p1.window == p2.window)) + assert_almost_equal(p1.coef, p2.coef) + except AssertionError: + msg = f"Result: {p1}\nTarget: {p2}" + raise AssertionError(msg) + + +# +# Test conversion methods that depend on combinations of two classes. +# + +Poly1 = Poly +Poly2 = Poly + + +def test_conversion(Poly1, Poly2): + x = np.linspace(0, 1, 10) + coef = random((3,)) + + d1 = Poly1.domain + random((2,)) * .25 + w1 = Poly1.window + random((2,)) * .25 + p1 = Poly1(coef, domain=d1, window=w1) + + d2 = Poly2.domain + random((2,)) * .25 + w2 = Poly2.window + random((2,)) * .25 + p2 = p1.convert(kind=Poly2, domain=d2, window=w2) + + assert_almost_equal(p2.domain, d2) + assert_almost_equal(p2.window, w2) + assert_almost_equal(p2(x), p1(x)) + + +def test_cast(Poly1, Poly2): + x = np.linspace(0, 1, 10) + coef = random((3,)) + + d1 = Poly1.domain + random((2,)) * .25 + w1 = Poly1.window + random((2,)) * .25 + p1 = Poly1(coef, domain=d1, window=w1) + + d2 = Poly2.domain + random((2,)) * .25 + w2 = Poly2.window + random((2,)) * .25 + p2 = Poly2.cast(p1, domain=d2, window=w2) + + assert_almost_equal(p2.domain, d2) + assert_almost_equal(p2.window, w2) + assert_almost_equal(p2(x), p1(x)) + + +# +# test methods that depend on one class +# + + +def test_identity(Poly): + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 + x = np.linspace(d[0], d[1], 11) + p = Poly.identity(domain=d, window=w) + assert_equal(p.domain, d) + assert_equal(p.window, w) + assert_almost_equal(p(x), x) + + +def test_basis(Poly): + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 + p = Poly.basis(5, domain=d, window=w) + assert_equal(p.domain, d) + assert_equal(p.window, w) + assert_equal(p.coef, [0] * 5 + [1]) + + +def test_fromroots(Poly): + # check that requested roots are zeros of a polynomial + # of correct degree, domain, and window. + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 + r = random((5,)) + p1 = Poly.fromroots(r, domain=d, window=w) + assert_equal(p1.degree(), len(r)) + assert_equal(p1.domain, d) + assert_equal(p1.window, w) + assert_almost_equal(p1(r), 0) + + # check that polynomial is monic + pdom = Polynomial.domain + pwin = Polynomial.window + p2 = Polynomial.cast(p1, domain=pdom, window=pwin) + assert_almost_equal(p2.coef[-1], 1) + + +def test_bad_conditioned_fit(Poly): + + x = [0., 0., 1.] + y = [1., 2., 3.] + + # check RankWarning is raised + with pytest.warns(RankWarning) as record: + Poly.fit(x, y, 2) + assert record[0].message.args[0] == "The fit may be poorly conditioned" + + +def test_fit(Poly): + + def f(x): + return x * (x - 1) * (x - 2) + x = np.linspace(0, 3) + y = f(x) + + # check default value of domain and window + p = Poly.fit(x, y, 3) + assert_almost_equal(p.domain, [0, 3]) + assert_almost_equal(p(x), y) + assert_equal(p.degree(), 3) + + # check with given domains and window + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 + p = Poly.fit(x, y, 3, domain=d, window=w) + assert_almost_equal(p(x), y) + assert_almost_equal(p.domain, d) + assert_almost_equal(p.window, w) + p = Poly.fit(x, y, [0, 1, 2, 3], domain=d, window=w) + assert_almost_equal(p(x), y) + assert_almost_equal(p.domain, d) + assert_almost_equal(p.window, w) + + # check with class domain default + p = Poly.fit(x, y, 3, []) + assert_equal(p.domain, Poly.domain) + assert_equal(p.window, Poly.window) + p = Poly.fit(x, y, [0, 1, 2, 3], []) + assert_equal(p.domain, Poly.domain) + assert_equal(p.window, Poly.window) + + # check that fit accepts weights. + w = np.zeros_like(x) + z = y + random(y.shape) * .25 + w[::2] = 1 + p1 = Poly.fit(x[::2], z[::2], 3) + p2 = Poly.fit(x, z, 3, w=w) + p3 = Poly.fit(x, z, [0, 1, 2, 3], w=w) + assert_almost_equal(p1(x), p2(x)) + assert_almost_equal(p2(x), p3(x)) + + +def test_equal(Poly): + p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3]) + p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3]) + p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3]) + p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2]) + assert_(p1 == p1) + assert_(not p1 == p2) + assert_(not p1 == p3) + assert_(not p1 == p4) + + +def test_not_equal(Poly): + p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3]) + p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3]) + p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3]) + p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2]) + assert_(not p1 != p1) + assert_(p1 != p2) + assert_(p1 != p3) + assert_(p1 != p4) + + +def test_add(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = p1 + p2 + assert_poly_almost_equal(p2 + p1, p3) + assert_poly_almost_equal(p1 + c2, p3) + assert_poly_almost_equal(c2 + p1, p3) + assert_poly_almost_equal(p1 + tuple(c2), p3) + assert_poly_almost_equal(tuple(c2) + p1, p3) + assert_poly_almost_equal(p1 + np.array(c2), p3) + assert_poly_almost_equal(np.array(c2) + p1, p3) + assert_raises(TypeError, op.add, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.add, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.add, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.add, p1, Polynomial([0])) + + +def test_sub(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = p1 - p2 + assert_poly_almost_equal(p2 - p1, -p3) + assert_poly_almost_equal(p1 - c2, p3) + assert_poly_almost_equal(c2 - p1, -p3) + assert_poly_almost_equal(p1 - tuple(c2), p3) + assert_poly_almost_equal(tuple(c2) - p1, -p3) + assert_poly_almost_equal(p1 - np.array(c2), p3) + assert_poly_almost_equal(np.array(c2) - p1, -p3) + assert_raises(TypeError, op.sub, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.sub, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.sub, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.sub, p1, Polynomial([0])) + + +def test_mul(Poly): + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = p1 * p2 + assert_poly_almost_equal(p2 * p1, p3) + assert_poly_almost_equal(p1 * c2, p3) + assert_poly_almost_equal(c2 * p1, p3) + assert_poly_almost_equal(p1 * tuple(c2), p3) + assert_poly_almost_equal(tuple(c2) * p1, p3) + assert_poly_almost_equal(p1 * np.array(c2), p3) + assert_poly_almost_equal(np.array(c2) * p1, p3) + assert_poly_almost_equal(p1 * 2, p1 * Poly([2])) + assert_poly_almost_equal(2 * p1, p1 * Poly([2])) + assert_raises(TypeError, op.mul, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.mul, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.mul, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.mul, p1, Polynomial([0])) + + +def test_floordiv(Poly): + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + c3 = list(random((2,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = Poly(c3) + p4 = p1 * p2 + p3 + c4 = list(p4.coef) + assert_poly_almost_equal(p4 // p2, p1) + assert_poly_almost_equal(p4 // c2, p1) + assert_poly_almost_equal(c4 // p2, p1) + assert_poly_almost_equal(p4 // tuple(c2), p1) + assert_poly_almost_equal(tuple(c4) // p2, p1) + assert_poly_almost_equal(p4 // np.array(c2), p1) + assert_poly_almost_equal(np.array(c4) // p2, p1) + assert_poly_almost_equal(2 // p2, Poly([0])) + assert_poly_almost_equal(p2 // 2, 0.5 * p2) + assert_raises( + TypeError, op.floordiv, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises( + TypeError, op.floordiv, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.floordiv, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.floordiv, p1, Polynomial([0])) + + +def test_truediv(Poly): + # true division is valid only if the denominator is a Number and + # not a python bool. + p1 = Poly([1, 2, 3]) + p2 = p1 * 5 + + for stype in np.ScalarType: + if not issubclass(stype, Number) or issubclass(stype, bool): + continue + s = stype(5) + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) + for stype in (int, float): + s = stype(5) + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) + for stype in [complex]: + s = stype(5, 0) + assert_poly_almost_equal(op.truediv(p2, s), p1) + assert_raises(TypeError, op.truediv, s, p2) + for s in [(), [], {}, False, np.array([1])]: + assert_raises(TypeError, op.truediv, p2, s) + assert_raises(TypeError, op.truediv, s, p2) + for ptype in classes: + assert_raises(TypeError, op.truediv, p2, ptype(1)) + + +def test_mod(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + c3 = list(random((2,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = Poly(c3) + p4 = p1 * p2 + p3 + c4 = list(p4.coef) + assert_poly_almost_equal(p4 % p2, p3) + assert_poly_almost_equal(p4 % c2, p3) + assert_poly_almost_equal(c4 % p2, p3) + assert_poly_almost_equal(p4 % tuple(c2), p3) + assert_poly_almost_equal(tuple(c4) % p2, p3) + assert_poly_almost_equal(p4 % np.array(c2), p3) + assert_poly_almost_equal(np.array(c4) % p2, p3) + assert_poly_almost_equal(2 % p2, Poly([2])) + assert_poly_almost_equal(p2 % 2, Poly([0])) + assert_raises(TypeError, op.mod, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, op.mod, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, op.mod, p1, Chebyshev([0])) + else: + assert_raises(TypeError, op.mod, p1, Polynomial([0])) + + +def test_divmod(Poly): + # This checks commutation, not numerical correctness + c1 = list(random((4,)) + .5) + c2 = list(random((3,)) + .5) + c3 = list(random((2,)) + .5) + p1 = Poly(c1) + p2 = Poly(c2) + p3 = Poly(c3) + p4 = p1 * p2 + p3 + c4 = list(p4.coef) + quo, rem = divmod(p4, p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p4, c2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(c4, p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p4, tuple(c2)) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(tuple(c4), p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p4, np.array(c2)) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(np.array(c4), p2) + assert_poly_almost_equal(quo, p1) + assert_poly_almost_equal(rem, p3) + quo, rem = divmod(p2, 2) + assert_poly_almost_equal(quo, 0.5 * p2) + assert_poly_almost_equal(rem, Poly([0])) + quo, rem = divmod(2, p2) + assert_poly_almost_equal(quo, Poly([0])) + assert_poly_almost_equal(rem, Poly([2])) + assert_raises(TypeError, divmod, p1, Poly([0], domain=Poly.domain + 1)) + assert_raises(TypeError, divmod, p1, Poly([0], window=Poly.window + 1)) + if Poly is Polynomial: + assert_raises(TypeError, divmod, p1, Chebyshev([0])) + else: + assert_raises(TypeError, divmod, p1, Polynomial([0])) + + +def test_roots(Poly): + d = Poly.domain * 1.25 + .25 + w = Poly.window + tgt = np.linspace(d[0], d[1], 5) + res = np.sort(Poly.fromroots(tgt, domain=d, window=w).roots()) + assert_almost_equal(res, tgt) + # default domain and window + res = np.sort(Poly.fromroots(tgt).roots()) + assert_almost_equal(res, tgt) + + +def test_degree(Poly): + p = Poly.basis(5) + assert_equal(p.degree(), 5) + + +def test_copy(Poly): + p1 = Poly.basis(5) + p2 = p1.copy() + assert_(p1 == p2) + assert_(p1 is not p2) + assert_(p1.coef is not p2.coef) + assert_(p1.domain is not p2.domain) + assert_(p1.window is not p2.window) + + +def test_integ(Poly): + P = Polynomial + # Check defaults + p0 = Poly.cast(P([1 * 2, 2 * 3, 3 * 4])) + p1 = P.cast(p0.integ()) + p2 = P.cast(p0.integ(2)) + assert_poly_almost_equal(p1, P([0, 2, 3, 4])) + assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1])) + # Check with k + p0 = Poly.cast(P([1 * 2, 2 * 3, 3 * 4])) + p1 = P.cast(p0.integ(k=1)) + p2 = P.cast(p0.integ(2, k=[1, 1])) + assert_poly_almost_equal(p1, P([1, 2, 3, 4])) + assert_poly_almost_equal(p2, P([1, 1, 1, 1, 1])) + # Check with lbnd + p0 = Poly.cast(P([1 * 2, 2 * 3, 3 * 4])) + p1 = P.cast(p0.integ(lbnd=1)) + p2 = P.cast(p0.integ(2, lbnd=1)) + assert_poly_almost_equal(p1, P([-9, 2, 3, 4])) + assert_poly_almost_equal(p2, P([6, -9, 1, 1, 1])) + # Check scaling + d = 2 * Poly.domain + p0 = Poly.cast(P([1 * 2, 2 * 3, 3 * 4]), domain=d) + p1 = P.cast(p0.integ()) + p2 = P.cast(p0.integ(2)) + assert_poly_almost_equal(p1, P([0, 2, 3, 4])) + assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1])) + + +def test_deriv(Poly): + # Check that the derivative is the inverse of integration. It is + # assumes that the integration has been checked elsewhere. + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 + p1 = Poly([1, 2, 3], domain=d, window=w) + p2 = p1.integ(2, k=[1, 2]) + p3 = p1.integ(1, k=[1]) + assert_almost_equal(p2.deriv(1).coef, p3.coef) + assert_almost_equal(p2.deriv(2).coef, p1.coef) + # default domain and window + p1 = Poly([1, 2, 3]) + p2 = p1.integ(2, k=[1, 2]) + p3 = p1.integ(1, k=[1]) + assert_almost_equal(p2.deriv(1).coef, p3.coef) + assert_almost_equal(p2.deriv(2).coef, p1.coef) + + +def test_linspace(Poly): + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 + p = Poly([1, 2, 3], domain=d, window=w) + # check default domain + xtgt = np.linspace(d[0], d[1], 20) + ytgt = p(xtgt) + xres, yres = p.linspace(20) + assert_almost_equal(xres, xtgt) + assert_almost_equal(yres, ytgt) + # check specified domain + xtgt = np.linspace(0, 2, 20) + ytgt = p(xtgt) + xres, yres = p.linspace(20, domain=[0, 2]) + assert_almost_equal(xres, xtgt) + assert_almost_equal(yres, ytgt) + + +def test_pow(Poly): + d = Poly.domain + random((2,)) * .25 + w = Poly.window + random((2,)) * .25 + tgt = Poly([1], domain=d, window=w) + tst = Poly([1, 2, 3], domain=d, window=w) + for i in range(5): + assert_poly_almost_equal(tst**i, tgt) + tgt = tgt * tst + # default domain and window + tgt = Poly([1]) + tst = Poly([1, 2, 3]) + for i in range(5): + assert_poly_almost_equal(tst**i, tgt) + tgt = tgt * tst + # check error for invalid powers + assert_raises(ValueError, op.pow, tgt, 1.5) + assert_raises(ValueError, op.pow, tgt, -1) + + +def test_call(Poly): + P = Polynomial + d = Poly.domain + x = np.linspace(d[0], d[1], 11) + + # Check defaults + p = Poly.cast(P([1, 2, 3])) + tgt = 1 + x * (2 + 3 * x) + res = p(x) + assert_almost_equal(res, tgt) + + +def test_call_with_list(Poly): + p = Poly([1, 2, 3]) + x = [-1, 0, 2] + res = p(x) + assert_equal(res, p(np.array(x))) + + +def test_cutdeg(Poly): + p = Poly([1, 2, 3]) + assert_raises(ValueError, p.cutdeg, .5) + assert_raises(ValueError, p.cutdeg, -1) + assert_equal(len(p.cutdeg(3)), 3) + assert_equal(len(p.cutdeg(2)), 3) + assert_equal(len(p.cutdeg(1)), 2) + assert_equal(len(p.cutdeg(0)), 1) + + +def test_truncate(Poly): + p = Poly([1, 2, 3]) + assert_raises(ValueError, p.truncate, .5) + assert_raises(ValueError, p.truncate, 0) + assert_equal(len(p.truncate(4)), 3) + assert_equal(len(p.truncate(3)), 3) + assert_equal(len(p.truncate(2)), 2) + assert_equal(len(p.truncate(1)), 1) + + +def test_trim(Poly): + c = [1, 1e-6, 1e-12, 0] + p = Poly(c) + assert_equal(p.trim().coef, c[:3]) + assert_equal(p.trim(1e-10).coef, c[:2]) + assert_equal(p.trim(1e-5).coef, c[:1]) + + +def test_mapparms(Poly): + # check with defaults. Should be identity. + d = Poly.domain + w = Poly.window + p = Poly([1], domain=d, window=w) + assert_almost_equal([0, 1], p.mapparms()) + # + w = 2 * d + 1 + p = Poly([1], domain=d, window=w) + assert_almost_equal([1, 2], p.mapparms()) + + +def test_ufunc_override(Poly): + p = Poly([1, 2, 3]) + x = np.ones(3) + assert_raises(TypeError, np.add, p, x) + assert_raises(TypeError, np.add, x, p) + + +# +# Test class method that only exists for some classes +# + + +class TestInterpolate: + + def f(self, x): + return x * (x - 1) * (x - 2) + + def test_raises(self): + assert_raises(ValueError, Chebyshev.interpolate, self.f, -1) + assert_raises(TypeError, Chebyshev.interpolate, self.f, 10.) + + def test_dimensions(self): + for deg in range(1, 5): + assert_(Chebyshev.interpolate(self.f, deg).degree() == deg) + + def test_approximation(self): + + def powx(x, p): + return x**p + + x = np.linspace(0, 2, 10) + for deg in range(10): + for t in range(deg + 1): + p = Chebyshev.interpolate(powx, deg, domain=[0, 2], args=(t,)) + assert_almost_equal(p(x), powx(x, t), decimal=11) diff --git a/python/numpy/polynomial/tests/test_hermite.py b/python/numpy/polynomial/tests/test_hermite.py new file mode 100644 index 000000000..8bd3951f4 --- /dev/null +++ b/python/numpy/polynomial/tests/test_hermite.py @@ -0,0 +1,558 @@ +"""Tests for hermite module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.hermite as herm +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_equal, + assert_raises, +) + +H0 = np.array([1]) +H1 = np.array([0, 2]) +H2 = np.array([-2, 0, 4]) +H3 = np.array([0, -12, 0, 8]) +H4 = np.array([12, 0, -48, 0, 16]) +H5 = np.array([0, 120, 0, -160, 0, 32]) +H6 = np.array([-120, 0, 720, 0, -480, 0, 64]) +H7 = np.array([0, -1680, 0, 3360, 0, -1344, 0, 128]) +H8 = np.array([1680, 0, -13440, 0, 13440, 0, -3584, 0, 256]) +H9 = np.array([0, 30240, 0, -80640, 0, 48384, 0, -9216, 0, 512]) + +Hlist = [H0, H1, H2, H3, H4, H5, H6, H7, H8, H9] + + +def trim(x): + return herm.hermtrim(x, tol=1e-6) + + +class TestConstants: + + def test_hermdomain(self): + assert_equal(herm.hermdomain, [-1, 1]) + + def test_hermzero(self): + assert_equal(herm.hermzero, [0]) + + def test_hermone(self): + assert_equal(herm.hermone, [1]) + + def test_hermx(self): + assert_equal(herm.hermx, [0, .5]) + + +class TestArithmetic: + x = np.linspace(-3, 3, 100) + + def test_hermadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = herm.hermadd([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermsub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = herm.hermsub([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermmulx(self): + assert_equal(herm.hermmulx([0]), [0]) + assert_equal(herm.hermmulx([1]), [0, .5]) + for i in range(1, 5): + ser = [0] * i + [1] + tgt = [0] * (i - 1) + [i, 0, .5] + assert_equal(herm.hermmulx(ser), tgt) + + def test_hermmul(self): + # check values of result + for i in range(5): + pol1 = [0] * i + [1] + val1 = herm.hermval(self.x, pol1) + for j in range(5): + msg = f"At i={i}, j={j}" + pol2 = [0] * j + [1] + val2 = herm.hermval(self.x, pol2) + pol3 = herm.hermmul(pol1, pol2) + val3 = herm.hermval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1 * val2, err_msg=msg) + + def test_hermdiv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0] * i + [1] + cj = [0] * j + [1] + tgt = herm.hermadd(ci, cj) + quo, rem = herm.hermdiv(tgt, ci) + res = herm.hermadd(herm.hermmul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermpow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(herm.hermmul, [c] * j, np.array([1])) + res = herm.hermpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([2.5, 1., .75]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_hermval(self): + # check empty input + assert_equal(herm.hermval([], [1]).size, 0) + + # check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Hlist] + for i in range(10): + msg = f"At i={i}" + tgt = y[i] + res = herm.hermval(x, [0] * i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + # check that shape is preserved + for i in range(3): + dims = [2] * i + x = np.zeros(dims) + assert_equal(herm.hermval(x, [1]).shape, dims) + assert_equal(herm.hermval(x, [1, 0]).shape, dims) + assert_equal(herm.hermval(x, [1, 0, 0]).shape, dims) + + def test_hermval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, herm.hermval2d, x1, x2[:2], self.c2d) + + # test values + tgt = y1 * y2 + res = herm.hermval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = herm.hermval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_hermval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, herm.hermval3d, x1, x2, x3[:2], self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = herm.hermval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = herm.hermval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_hermgrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j->ij', y1, y2) + res = herm.hermgrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = herm.hermgrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3) * 2) + + def test_hermgrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = herm.hermgrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = herm.hermgrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3) * 3) + + +class TestIntegral: + + def test_hermint(self): + # check exceptions + assert_raises(TypeError, herm.hermint, [0], .5) + assert_raises(ValueError, herm.hermint, [0], -1) + assert_raises(ValueError, herm.hermint, [0], 1, [0, 0]) + assert_raises(ValueError, herm.hermint, [0], lbnd=[0]) + assert_raises(ValueError, herm.hermint, [0], scl=[0]) + assert_raises(TypeError, herm.hermint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0] * (i - 2) + [1] + res = herm.hermint([0], m=i, k=k) + assert_almost_equal(res, [0, .5]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] + hermpol = herm.poly2herm(pol) + hermint = herm.hermint(hermpol, m=1, k=[i]) + res = herm.herm2poly(hermint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + hermpol = herm.poly2herm(pol) + hermint = herm.hermint(hermpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(herm.hermval(-1, hermint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] + hermpol = herm.poly2herm(pol) + hermint = herm.hermint(hermpol, m=1, k=[i], scl=2) + res = herm.herm2poly(hermint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1) + res = herm.hermint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1, k=[k]) + res = herm.hermint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1, k=[k], lbnd=-1) + res = herm.hermint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = herm.hermint(tgt, m=1, k=[k], scl=2) + res = herm.hermint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herm.hermint(c) for c in c2d.T]).T + res = herm.hermint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herm.hermint(c) for c in c2d]) + res = herm.hermint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herm.hermint(c, k=3) for c in c2d]) + res = herm.hermint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_hermder(self): + # check exceptions + assert_raises(TypeError, herm.hermder, [0], .5) + assert_raises(ValueError, herm.hermder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0] * i + [1] + res = herm.hermder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = herm.hermder(herm.hermint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = herm.hermder(herm.hermint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herm.hermder(c) for c in c2d.T]).T + res = herm.hermder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herm.hermder(c) for c in c2d]) + res = herm.hermder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + + def test_hermvander(self): + # check for 1d x + x = np.arange(3) + v = herm.hermvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], herm.hermval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = herm.hermvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], herm.hermval(x, coef)) + + def test_hermvander2d(self): + # also tests hermval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = herm.hermvander2d(x1, x2, [1, 2]) + tgt = herm.hermval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herm.hermvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_hermvander3d(self): + # also tests hermval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = herm.hermvander3d(x1, x2, x3, [1, 2, 3]) + tgt = herm.hermval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herm.hermvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting: + + def test_hermfit(self): + def f(x): + return x * (x - 1) * (x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, herm.hermfit, [1], [1], -1) + assert_raises(TypeError, herm.hermfit, [[1]], [1], 0) + assert_raises(TypeError, herm.hermfit, [], [1], 0) + assert_raises(TypeError, herm.hermfit, [1], [[[1]]], 0) + assert_raises(TypeError, herm.hermfit, [1, 2], [1], 0) + assert_raises(TypeError, herm.hermfit, [1], [1, 2], 0) + assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, herm.hermfit, [1], [1], [-1,]) + assert_raises(ValueError, herm.hermfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, herm.hermfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = herm.hermfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(herm.hermval(x, coef3), y) + coef3 = herm.hermfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(herm.hermval(x, coef3), y) + # + coef4 = herm.hermfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(herm.hermval(x, coef4), y) + coef4 = herm.hermfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(herm.hermval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = herm.hermfit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(herm.hermval(x, coef4), y) + # + coef2d = herm.hermfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = herm.hermfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = herm.hermfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = herm.hermfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(herm.hermfit(x, x, 1), [0, .5]) + assert_almost_equal(herm.hermfit(x, x, [0, 1]), [0, .5]) + # test fitting only even Legendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = herm.hermfit(x, y, 4) + assert_almost_equal(herm.hermval(x, coef1), y) + coef2 = herm.hermfit(x, y, [0, 2, 4]) + assert_almost_equal(herm.hermval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, herm.hermcompanion, []) + assert_raises(ValueError, herm.hermcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0] * i + [1] + assert_(herm.hermcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(herm.hermcompanion([1, 2])[0, 0] == -.25) + + +class TestGauss: + + def test_100(self): + x, w = herm.hermgauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = herm.hermvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1 / np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = np.sqrt(np.pi) + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_hermfromroots(self): + res = herm.hermfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) + pol = herm.hermfromroots(roots) + res = herm.hermval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(herm.herm2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_hermroots(self): + assert_almost_equal(herm.hermroots([1]), []) + assert_almost_equal(herm.hermroots([1, 1]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = herm.hermroots(herm.hermfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, herm.hermtrim, coef, -1) + + # Test results + assert_equal(herm.hermtrim(coef), coef[:-1]) + assert_equal(herm.hermtrim(coef, 1), coef[:-3]) + assert_equal(herm.hermtrim(coef, 2), [0]) + + def test_hermline(self): + assert_equal(herm.hermline(3, 4), [3, 2]) + + def test_herm2poly(self): + for i in range(10): + assert_almost_equal(herm.herm2poly([0] * i + [1]), Hlist[i]) + + def test_poly2herm(self): + for i in range(10): + assert_almost_equal(herm.poly2herm(Hlist[i]), [0] * i + [1]) + + def test_weight(self): + x = np.linspace(-5, 5, 11) + tgt = np.exp(-x**2) + res = herm.hermweight(x) + assert_almost_equal(res, tgt) diff --git a/python/numpy/polynomial/tests/test_hermite_e.py b/python/numpy/polynomial/tests/test_hermite_e.py new file mode 100644 index 000000000..29f34f663 --- /dev/null +++ b/python/numpy/polynomial/tests/test_hermite_e.py @@ -0,0 +1,559 @@ +"""Tests for hermite_e module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.hermite_e as herme +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_equal, + assert_raises, +) + +He0 = np.array([1]) +He1 = np.array([0, 1]) +He2 = np.array([-1, 0, 1]) +He3 = np.array([0, -3, 0, 1]) +He4 = np.array([3, 0, -6, 0, 1]) +He5 = np.array([0, 15, 0, -10, 0, 1]) +He6 = np.array([-15, 0, 45, 0, -15, 0, 1]) +He7 = np.array([0, -105, 0, 105, 0, -21, 0, 1]) +He8 = np.array([105, 0, -420, 0, 210, 0, -28, 0, 1]) +He9 = np.array([0, 945, 0, -1260, 0, 378, 0, -36, 0, 1]) + +Helist = [He0, He1, He2, He3, He4, He5, He6, He7, He8, He9] + + +def trim(x): + return herme.hermetrim(x, tol=1e-6) + + +class TestConstants: + + def test_hermedomain(self): + assert_equal(herme.hermedomain, [-1, 1]) + + def test_hermezero(self): + assert_equal(herme.hermezero, [0]) + + def test_hermeone(self): + assert_equal(herme.hermeone, [1]) + + def test_hermex(self): + assert_equal(herme.hermex, [0, 1]) + + +class TestArithmetic: + x = np.linspace(-3, 3, 100) + + def test_hermeadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = herme.hermeadd([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermesub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = herme.hermesub([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermemulx(self): + assert_equal(herme.hermemulx([0]), [0]) + assert_equal(herme.hermemulx([1]), [0, 1]) + for i in range(1, 5): + ser = [0] * i + [1] + tgt = [0] * (i - 1) + [i, 0, 1] + assert_equal(herme.hermemulx(ser), tgt) + + def test_hermemul(self): + # check values of result + for i in range(5): + pol1 = [0] * i + [1] + val1 = herme.hermeval(self.x, pol1) + for j in range(5): + msg = f"At i={i}, j={j}" + pol2 = [0] * j + [1] + val2 = herme.hermeval(self.x, pol2) + pol3 = herme.hermemul(pol1, pol2) + val3 = herme.hermeval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1 * val2, err_msg=msg) + + def test_hermediv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0] * i + [1] + cj = [0] * j + [1] + tgt = herme.hermeadd(ci, cj) + quo, rem = herme.hermediv(tgt, ci) + res = herme.hermeadd(herme.hermemul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_hermepow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(herme.hermemul, [c] * j, np.array([1])) + res = herme.hermepow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([4., 2., 3.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_hermeval(self): + # check empty input + assert_equal(herme.hermeval([], [1]).size, 0) + + # check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Helist] + for i in range(10): + msg = f"At i={i}" + tgt = y[i] + res = herme.hermeval(x, [0] * i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + # check that shape is preserved + for i in range(3): + dims = [2] * i + x = np.zeros(dims) + assert_equal(herme.hermeval(x, [1]).shape, dims) + assert_equal(herme.hermeval(x, [1, 0]).shape, dims) + assert_equal(herme.hermeval(x, [1, 0, 0]).shape, dims) + + def test_hermeval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, herme.hermeval2d, x1, x2[:2], self.c2d) + + # test values + tgt = y1 * y2 + res = herme.hermeval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = herme.hermeval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_hermeval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, herme.hermeval3d, x1, x2, x3[:2], self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = herme.hermeval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = herme.hermeval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_hermegrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j->ij', y1, y2) + res = herme.hermegrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = herme.hermegrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3) * 2) + + def test_hermegrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = herme.hermegrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = herme.hermegrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3) * 3) + + +class TestIntegral: + + def test_hermeint(self): + # check exceptions + assert_raises(TypeError, herme.hermeint, [0], .5) + assert_raises(ValueError, herme.hermeint, [0], -1) + assert_raises(ValueError, herme.hermeint, [0], 1, [0, 0]) + assert_raises(ValueError, herme.hermeint, [0], lbnd=[0]) + assert_raises(ValueError, herme.hermeint, [0], scl=[0]) + assert_raises(TypeError, herme.hermeint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0] * (i - 2) + [1] + res = herme.hermeint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] + hermepol = herme.poly2herme(pol) + hermeint = herme.hermeint(hermepol, m=1, k=[i]) + res = herme.herme2poly(hermeint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + hermepol = herme.poly2herme(pol) + hermeint = herme.hermeint(hermepol, m=1, k=[i], lbnd=-1) + assert_almost_equal(herme.hermeval(-1, hermeint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] + hermepol = herme.poly2herme(pol) + hermeint = herme.hermeint(hermepol, m=1, k=[i], scl=2) + res = herme.herme2poly(hermeint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1) + res = herme.hermeint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1, k=[k]) + res = herme.hermeint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1, k=[k], lbnd=-1) + res = herme.hermeint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = herme.hermeint(tgt, m=1, k=[k], scl=2) + res = herme.hermeint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermeint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herme.hermeint(c) for c in c2d.T]).T + res = herme.hermeint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herme.hermeint(c) for c in c2d]) + res = herme.hermeint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herme.hermeint(c, k=3) for c in c2d]) + res = herme.hermeint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_hermeder(self): + # check exceptions + assert_raises(TypeError, herme.hermeder, [0], .5) + assert_raises(ValueError, herme.hermeder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0] * i + [1] + res = herme.hermeder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = herme.hermeder(herme.hermeint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = herme.hermeder( + herme.hermeint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermeder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([herme.hermeder(c) for c in c2d.T]).T + res = herme.hermeder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([herme.hermeder(c) for c in c2d]) + res = herme.hermeder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + + def test_hermevander(self): + # check for 1d x + x = np.arange(3) + v = herme.hermevander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], herme.hermeval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = herme.hermevander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], herme.hermeval(x, coef)) + + def test_hermevander2d(self): + # also tests hermeval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = herme.hermevander2d(x1, x2, [1, 2]) + tgt = herme.hermeval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herme.hermevander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_hermevander3d(self): + # also tests hermeval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = herme.hermevander3d(x1, x2, x3, [1, 2, 3]) + tgt = herme.hermeval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = herme.hermevander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting: + + def test_hermefit(self): + def f(x): + return x * (x - 1) * (x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, herme.hermefit, [1], [1], -1) + assert_raises(TypeError, herme.hermefit, [[1]], [1], 0) + assert_raises(TypeError, herme.hermefit, [], [1], 0) + assert_raises(TypeError, herme.hermefit, [1], [[[1]]], 0) + assert_raises(TypeError, herme.hermefit, [1, 2], [1], 0) + assert_raises(TypeError, herme.hermefit, [1], [1, 2], 0) + assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, herme.hermefit, [1], [1], [-1,]) + assert_raises(ValueError, herme.hermefit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, herme.hermefit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = herme.hermefit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(herme.hermeval(x, coef3), y) + coef3 = herme.hermefit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(herme.hermeval(x, coef3), y) + # + coef4 = herme.hermefit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(herme.hermeval(x, coef4), y) + coef4 = herme.hermefit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(herme.hermeval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = herme.hermefit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(herme.hermeval(x, coef4), y) + # + coef2d = herme.hermefit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = herme.hermefit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = herme.hermefit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = herme.hermefit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(herme.hermefit(x, x, 1), [0, 1]) + assert_almost_equal(herme.hermefit(x, x, [0, 1]), [0, 1]) + # test fitting only even Legendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = herme.hermefit(x, y, 4) + assert_almost_equal(herme.hermeval(x, coef1), y) + coef2 = herme.hermefit(x, y, [0, 2, 4]) + assert_almost_equal(herme.hermeval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, herme.hermecompanion, []) + assert_raises(ValueError, herme.hermecompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0] * i + [1] + assert_(herme.hermecompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(herme.hermecompanion([1, 2])[0, 0] == -.5) + + +class TestGauss: + + def test_100(self): + x, w = herme.hermegauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = herme.hermevander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1 / np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = np.sqrt(2 * np.pi) + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_hermefromroots(self): + res = herme.hermefromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) + pol = herme.hermefromroots(roots) + res = herme.hermeval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(herme.herme2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_hermeroots(self): + assert_almost_equal(herme.hermeroots([1]), []) + assert_almost_equal(herme.hermeroots([1, 1]), [-1]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = herme.hermeroots(herme.hermefromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_hermetrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, herme.hermetrim, coef, -1) + + # Test results + assert_equal(herme.hermetrim(coef), coef[:-1]) + assert_equal(herme.hermetrim(coef, 1), coef[:-3]) + assert_equal(herme.hermetrim(coef, 2), [0]) + + def test_hermeline(self): + assert_equal(herme.hermeline(3, 4), [3, 4]) + + def test_herme2poly(self): + for i in range(10): + assert_almost_equal(herme.herme2poly([0] * i + [1]), Helist[i]) + + def test_poly2herme(self): + for i in range(10): + assert_almost_equal(herme.poly2herme(Helist[i]), [0] * i + [1]) + + def test_weight(self): + x = np.linspace(-5, 5, 11) + tgt = np.exp(-.5 * x**2) + res = herme.hermeweight(x) + assert_almost_equal(res, tgt) diff --git a/python/numpy/polynomial/tests/test_laguerre.py b/python/numpy/polynomial/tests/test_laguerre.py new file mode 100644 index 000000000..6793b7804 --- /dev/null +++ b/python/numpy/polynomial/tests/test_laguerre.py @@ -0,0 +1,540 @@ +"""Tests for laguerre module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.laguerre as lag +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_equal, + assert_raises, +) + +L0 = np.array([1]) / 1 +L1 = np.array([1, -1]) / 1 +L2 = np.array([2, -4, 1]) / 2 +L3 = np.array([6, -18, 9, -1]) / 6 +L4 = np.array([24, -96, 72, -16, 1]) / 24 +L5 = np.array([120, -600, 600, -200, 25, -1]) / 120 +L6 = np.array([720, -4320, 5400, -2400, 450, -36, 1]) / 720 + +Llist = [L0, L1, L2, L3, L4, L5, L6] + + +def trim(x): + return lag.lagtrim(x, tol=1e-6) + + +class TestConstants: + + def test_lagdomain(self): + assert_equal(lag.lagdomain, [0, 1]) + + def test_lagzero(self): + assert_equal(lag.lagzero, [0]) + + def test_lagone(self): + assert_equal(lag.lagone, [1]) + + def test_lagx(self): + assert_equal(lag.lagx, [1, -1]) + + +class TestArithmetic: + x = np.linspace(-3, 3, 100) + + def test_lagadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = lag.lagadd([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_lagsub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = lag.lagsub([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_lagmulx(self): + assert_equal(lag.lagmulx([0]), [0]) + assert_equal(lag.lagmulx([1]), [1, -1]) + for i in range(1, 5): + ser = [0] * i + [1] + tgt = [0] * (i - 1) + [-i, 2 * i + 1, -(i + 1)] + assert_almost_equal(lag.lagmulx(ser), tgt) + + def test_lagmul(self): + # check values of result + for i in range(5): + pol1 = [0] * i + [1] + val1 = lag.lagval(self.x, pol1) + for j in range(5): + msg = f"At i={i}, j={j}" + pol2 = [0] * j + [1] + val2 = lag.lagval(self.x, pol2) + pol3 = lag.lagmul(pol1, pol2) + val3 = lag.lagval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1 * val2, err_msg=msg) + + def test_lagdiv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0] * i + [1] + cj = [0] * j + [1] + tgt = lag.lagadd(ci, cj) + quo, rem = lag.lagdiv(tgt, ci) + res = lag.lagadd(lag.lagmul(quo, ci), rem) + assert_almost_equal(trim(res), trim(tgt), err_msg=msg) + + def test_lagpow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(lag.lagmul, [c] * j, np.array([1])) + res = lag.lagpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([9., -14., 6.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_lagval(self): + # check empty input + assert_equal(lag.lagval([], [1]).size, 0) + + # check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Llist] + for i in range(7): + msg = f"At i={i}" + tgt = y[i] + res = lag.lagval(x, [0] * i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + # check that shape is preserved + for i in range(3): + dims = [2] * i + x = np.zeros(dims) + assert_equal(lag.lagval(x, [1]).shape, dims) + assert_equal(lag.lagval(x, [1, 0]).shape, dims) + assert_equal(lag.lagval(x, [1, 0, 0]).shape, dims) + + def test_lagval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, lag.lagval2d, x1, x2[:2], self.c2d) + + # test values + tgt = y1 * y2 + res = lag.lagval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = lag.lagval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_lagval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, lag.lagval3d, x1, x2, x3[:2], self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = lag.lagval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = lag.lagval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_laggrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j->ij', y1, y2) + res = lag.laggrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = lag.laggrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3) * 2) + + def test_laggrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = lag.laggrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = lag.laggrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3) * 3) + + +class TestIntegral: + + def test_lagint(self): + # check exceptions + assert_raises(TypeError, lag.lagint, [0], .5) + assert_raises(ValueError, lag.lagint, [0], -1) + assert_raises(ValueError, lag.lagint, [0], 1, [0, 0]) + assert_raises(ValueError, lag.lagint, [0], lbnd=[0]) + assert_raises(ValueError, lag.lagint, [0], scl=[0]) + assert_raises(TypeError, lag.lagint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0] * (i - 2) + [1] + res = lag.lagint([0], m=i, k=k) + assert_almost_equal(res, [1, -1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] + lagpol = lag.poly2lag(pol) + lagint = lag.lagint(lagpol, m=1, k=[i]) + res = lag.lag2poly(lagint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + lagpol = lag.poly2lag(pol) + lagint = lag.lagint(lagpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(lag.lagval(-1, lagint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] + lagpol = lag.poly2lag(pol) + lagint = lag.lagint(lagpol, m=1, k=[i], scl=2) + res = lag.lag2poly(lagint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1) + res = lag.lagint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1, k=[k]) + res = lag.lagint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1, k=[k], lbnd=-1) + res = lag.lagint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = lag.lagint(tgt, m=1, k=[k], scl=2) + res = lag.lagint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_lagint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([lag.lagint(c) for c in c2d.T]).T + res = lag.lagint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([lag.lagint(c) for c in c2d]) + res = lag.lagint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([lag.lagint(c, k=3) for c in c2d]) + res = lag.lagint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_lagder(self): + # check exceptions + assert_raises(TypeError, lag.lagder, [0], .5) + assert_raises(ValueError, lag.lagder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0] * i + [1] + res = lag.lagder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = lag.lagder(lag.lagint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = lag.lagder(lag.lagint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_lagder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([lag.lagder(c) for c in c2d.T]).T + res = lag.lagder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([lag.lagder(c) for c in c2d]) + res = lag.lagder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + + def test_lagvander(self): + # check for 1d x + x = np.arange(3) + v = lag.lagvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], lag.lagval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = lag.lagvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], lag.lagval(x, coef)) + + def test_lagvander2d(self): + # also tests lagval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = lag.lagvander2d(x1, x2, [1, 2]) + tgt = lag.lagval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = lag.lagvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_lagvander3d(self): + # also tests lagval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = lag.lagvander3d(x1, x2, x3, [1, 2, 3]) + tgt = lag.lagval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = lag.lagvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + +class TestFitting: + + def test_lagfit(self): + def f(x): + return x * (x - 1) * (x - 2) + + # Test exceptions + assert_raises(ValueError, lag.lagfit, [1], [1], -1) + assert_raises(TypeError, lag.lagfit, [[1]], [1], 0) + assert_raises(TypeError, lag.lagfit, [], [1], 0) + assert_raises(TypeError, lag.lagfit, [1], [[[1]]], 0) + assert_raises(TypeError, lag.lagfit, [1, 2], [1], 0) + assert_raises(TypeError, lag.lagfit, [1], [1, 2], 0) + assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, lag.lagfit, [1], [1], [-1,]) + assert_raises(ValueError, lag.lagfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, lag.lagfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = lag.lagfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(lag.lagval(x, coef3), y) + coef3 = lag.lagfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(lag.lagval(x, coef3), y) + # + coef4 = lag.lagfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(lag.lagval(x, coef4), y) + coef4 = lag.lagfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(lag.lagval(x, coef4), y) + # + coef2d = lag.lagfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = lag.lagfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = lag.lagfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = lag.lagfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(lag.lagfit(x, x, 1), [1, -1]) + assert_almost_equal(lag.lagfit(x, x, [0, 1]), [1, -1]) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, lag.lagcompanion, []) + assert_raises(ValueError, lag.lagcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0] * i + [1] + assert_(lag.lagcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(lag.lagcompanion([1, 2])[0, 0] == 1.5) + + +class TestGauss: + + def test_100(self): + x, w = lag.laggauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = lag.lagvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1 / np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = 1.0 + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_lagfromroots(self): + res = lag.lagfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) + pol = lag.lagfromroots(roots) + res = lag.lagval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(lag.lag2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_lagroots(self): + assert_almost_equal(lag.lagroots([1]), []) + assert_almost_equal(lag.lagroots([0, 1]), [1]) + for i in range(2, 5): + tgt = np.linspace(0, 3, i) + res = lag.lagroots(lag.lagfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_lagtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, lag.lagtrim, coef, -1) + + # Test results + assert_equal(lag.lagtrim(coef), coef[:-1]) + assert_equal(lag.lagtrim(coef, 1), coef[:-3]) + assert_equal(lag.lagtrim(coef, 2), [0]) + + def test_lagline(self): + assert_equal(lag.lagline(3, 4), [7, -4]) + + def test_lag2poly(self): + for i in range(7): + assert_almost_equal(lag.lag2poly([0] * i + [1]), Llist[i]) + + def test_poly2lag(self): + for i in range(7): + assert_almost_equal(lag.poly2lag(Llist[i]), [0] * i + [1]) + + def test_weight(self): + x = np.linspace(0, 10, 11) + tgt = np.exp(-x) + res = lag.lagweight(x) + assert_almost_equal(res, tgt) diff --git a/python/numpy/polynomial/tests/test_legendre.py b/python/numpy/polynomial/tests/test_legendre.py new file mode 100644 index 000000000..d0ed7060c --- /dev/null +++ b/python/numpy/polynomial/tests/test_legendre.py @@ -0,0 +1,571 @@ +"""Tests for legendre module. + +""" +from functools import reduce + +import numpy as np +import numpy.polynomial.legendre as leg +from numpy.polynomial.polynomial import polyval +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_equal, + assert_raises, +) + +L0 = np.array([1]) +L1 = np.array([0, 1]) +L2 = np.array([-1, 0, 3]) / 2 +L3 = np.array([0, -3, 0, 5]) / 2 +L4 = np.array([3, 0, -30, 0, 35]) / 8 +L5 = np.array([0, 15, 0, -70, 0, 63]) / 8 +L6 = np.array([-5, 0, 105, 0, -315, 0, 231]) / 16 +L7 = np.array([0, -35, 0, 315, 0, -693, 0, 429]) / 16 +L8 = np.array([35, 0, -1260, 0, 6930, 0, -12012, 0, 6435]) / 128 +L9 = np.array([0, 315, 0, -4620, 0, 18018, 0, -25740, 0, 12155]) / 128 + +Llist = [L0, L1, L2, L3, L4, L5, L6, L7, L8, L9] + + +def trim(x): + return leg.legtrim(x, tol=1e-6) + + +class TestConstants: + + def test_legdomain(self): + assert_equal(leg.legdomain, [-1, 1]) + + def test_legzero(self): + assert_equal(leg.legzero, [0]) + + def test_legone(self): + assert_equal(leg.legone, [1]) + + def test_legx(self): + assert_equal(leg.legx, [0, 1]) + + +class TestArithmetic: + x = np.linspace(-1, 1, 100) + + def test_legadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = leg.legadd([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_legsub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = leg.legsub([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_legmulx(self): + assert_equal(leg.legmulx([0]), [0]) + assert_equal(leg.legmulx([1]), [0, 1]) + for i in range(1, 5): + tmp = 2 * i + 1 + ser = [0] * i + [1] + tgt = [0] * (i - 1) + [i / tmp, 0, (i + 1) / tmp] + assert_equal(leg.legmulx(ser), tgt) + + def test_legmul(self): + # check values of result + for i in range(5): + pol1 = [0] * i + [1] + val1 = leg.legval(self.x, pol1) + for j in range(5): + msg = f"At i={i}, j={j}" + pol2 = [0] * j + [1] + val2 = leg.legval(self.x, pol2) + pol3 = leg.legmul(pol1, pol2) + val3 = leg.legval(self.x, pol3) + assert_(len(pol3) == i + j + 1, msg) + assert_almost_equal(val3, val1 * val2, err_msg=msg) + + def test_legdiv(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0] * i + [1] + cj = [0] * j + [1] + tgt = leg.legadd(ci, cj) + quo, rem = leg.legdiv(tgt, ci) + res = leg.legadd(leg.legmul(quo, ci), rem) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_legpow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(leg.legmul, [c] * j, np.array([1])) + res = leg.legpow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([2., 2., 2.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + y = polyval(x, [1., 2., 3.]) + + def test_legval(self): + # check empty input + assert_equal(leg.legval([], [1]).size, 0) + + # check normal input) + x = np.linspace(-1, 1) + y = [polyval(x, c) for c in Llist] + for i in range(10): + msg = f"At i={i}" + tgt = y[i] + res = leg.legval(x, [0] * i + [1]) + assert_almost_equal(res, tgt, err_msg=msg) + + # check that shape is preserved + for i in range(3): + dims = [2] * i + x = np.zeros(dims) + assert_equal(leg.legval(x, [1]).shape, dims) + assert_equal(leg.legval(x, [1, 0]).shape, dims) + assert_equal(leg.legval(x, [1, 0, 0]).shape, dims) + + def test_legval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, leg.legval2d, x1, x2[:2], self.c2d) + + # test values + tgt = y1 * y2 + res = leg.legval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = leg.legval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_legval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises(ValueError, leg.legval3d, x1, x2, x3[:2], self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = leg.legval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = leg.legval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_leggrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j->ij', y1, y2) + res = leg.leggrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = leg.leggrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3) * 2) + + def test_leggrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = leg.leggrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = leg.leggrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3) * 3) + + +class TestIntegral: + + def test_legint(self): + # check exceptions + assert_raises(TypeError, leg.legint, [0], .5) + assert_raises(ValueError, leg.legint, [0], -1) + assert_raises(ValueError, leg.legint, [0], 1, [0, 0]) + assert_raises(ValueError, leg.legint, [0], lbnd=[0]) + assert_raises(ValueError, leg.legint, [0], scl=[0]) + assert_raises(TypeError, leg.legint, [0], axis=.5) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0] * (i - 2) + [1] + res = leg.legint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] + legpol = leg.poly2leg(pol) + legint = leg.legint(legpol, m=1, k=[i]) + res = leg.leg2poly(legint) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + legpol = leg.poly2leg(pol) + legint = leg.legint(legpol, m=1, k=[i], lbnd=-1) + assert_almost_equal(leg.legval(-1, legint), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] + legpol = leg.poly2leg(pol) + legint = leg.legint(legpol, m=1, k=[i], scl=2) + res = leg.leg2poly(legint) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1) + res = leg.legint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1, k=[k]) + res = leg.legint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1, k=[k], lbnd=-1) + res = leg.legint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = leg.legint(tgt, m=1, k=[k], scl=2) + res = leg.legint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_legint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([leg.legint(c) for c in c2d.T]).T + res = leg.legint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([leg.legint(c) for c in c2d]) + res = leg.legint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([leg.legint(c, k=3) for c in c2d]) + res = leg.legint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + def test_legint_zerointord(self): + assert_equal(leg.legint((1, 2, 3), 0), (1, 2, 3)) + + +class TestDerivative: + + def test_legder(self): + # check exceptions + assert_raises(TypeError, leg.legder, [0], .5) + assert_raises(ValueError, leg.legder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0] * i + [1] + res = leg.legder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = leg.legder(leg.legint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = leg.legder(leg.legint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_legder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([leg.legder(c) for c in c2d.T]).T + res = leg.legder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([leg.legder(c) for c in c2d]) + res = leg.legder(c2d, axis=1) + assert_almost_equal(res, tgt) + + def test_legder_orderhigherthancoeff(self): + c = (1, 2, 3, 4) + assert_equal(leg.legder(c, 4), [0]) + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + + def test_legvander(self): + # check for 1d x + x = np.arange(3) + v = leg.legvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], leg.legval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = leg.legvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], leg.legval(x, coef)) + + def test_legvander2d(self): + # also tests polyval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = leg.legvander2d(x1, x2, [1, 2]) + tgt = leg.legval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = leg.legvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_legvander3d(self): + # also tests polyval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = leg.legvander3d(x1, x2, x3, [1, 2, 3]) + tgt = leg.legval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = leg.legvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + def test_legvander_negdeg(self): + assert_raises(ValueError, leg.legvander, (1, 2, 3), -1) + + +class TestFitting: + + def test_legfit(self): + def f(x): + return x * (x - 1) * (x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, leg.legfit, [1], [1], -1) + assert_raises(TypeError, leg.legfit, [[1]], [1], 0) + assert_raises(TypeError, leg.legfit, [], [1], 0) + assert_raises(TypeError, leg.legfit, [1], [[[1]]], 0) + assert_raises(TypeError, leg.legfit, [1, 2], [1], 0) + assert_raises(TypeError, leg.legfit, [1], [1, 2], 0) + assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, leg.legfit, [1], [1], [-1,]) + assert_raises(ValueError, leg.legfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, leg.legfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = leg.legfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(leg.legval(x, coef3), y) + coef3 = leg.legfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(leg.legval(x, coef3), y) + # + coef4 = leg.legfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(leg.legval(x, coef4), y) + coef4 = leg.legfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(leg.legval(x, coef4), y) + # check things still work if deg is not in strict increasing + coef4 = leg.legfit(x, y, [2, 3, 4, 1, 0]) + assert_equal(len(coef4), 5) + assert_almost_equal(leg.legval(x, coef4), y) + # + coef2d = leg.legfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = leg.legfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + y[0::2] = 0 + wcoef3 = leg.legfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = leg.legfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = leg.legfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = leg.legfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(leg.legfit(x, x, 1), [0, 1]) + assert_almost_equal(leg.legfit(x, x, [0, 1]), [0, 1]) + # test fitting only even Legendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = leg.legfit(x, y, 4) + assert_almost_equal(leg.legval(x, coef1), y) + coef2 = leg.legfit(x, y, [0, 2, 4]) + assert_almost_equal(leg.legval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, leg.legcompanion, []) + assert_raises(ValueError, leg.legcompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0] * i + [1] + assert_(leg.legcompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(leg.legcompanion([1, 2])[0, 0] == -.5) + + +class TestGauss: + + def test_100(self): + x, w = leg.leggauss(100) + + # test orthogonality. Note that the results need to be normalized, + # otherwise the huge values that can arise from fast growing + # functions like Laguerre can be very confusing. + v = leg.legvander(x, 99) + vv = np.dot(v.T * w, v) + vd = 1 / np.sqrt(vv.diagonal()) + vv = vd[:, None] * vv * vd + assert_almost_equal(vv, np.eye(100)) + + # check that the integral of 1 is correct + tgt = 2.0 + assert_almost_equal(w.sum(), tgt) + + +class TestMisc: + + def test_legfromroots(self): + res = leg.legfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) + pol = leg.legfromroots(roots) + res = leg.legval(roots, pol) + tgt = 0 + assert_(len(pol) == i + 1) + assert_almost_equal(leg.leg2poly(pol)[-1], 1) + assert_almost_equal(res, tgt) + + def test_legroots(self): + assert_almost_equal(leg.legroots([1]), []) + assert_almost_equal(leg.legroots([1, 2]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = leg.legroots(leg.legfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + def test_legtrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, leg.legtrim, coef, -1) + + # Test results + assert_equal(leg.legtrim(coef), coef[:-1]) + assert_equal(leg.legtrim(coef, 1), coef[:-3]) + assert_equal(leg.legtrim(coef, 2), [0]) + + def test_legline(self): + assert_equal(leg.legline(3, 4), [3, 4]) + + def test_legline_zeroscl(self): + assert_equal(leg.legline(3, 0), [3]) + + def test_leg2poly(self): + for i in range(10): + assert_almost_equal(leg.leg2poly([0] * i + [1]), Llist[i]) + + def test_poly2leg(self): + for i in range(10): + assert_almost_equal(leg.poly2leg(Llist[i]), [0] * i + [1]) + + def test_weight(self): + x = np.linspace(-1, 1, 11) + tgt = 1. + res = leg.legweight(x) + assert_almost_equal(res, tgt) diff --git a/python/numpy/polynomial/tests/test_polynomial.py b/python/numpy/polynomial/tests/test_polynomial.py new file mode 100644 index 000000000..27513fd68 --- /dev/null +++ b/python/numpy/polynomial/tests/test_polynomial.py @@ -0,0 +1,669 @@ +"""Tests for polynomial module. + +""" +import pickle +from copy import deepcopy +from fractions import Fraction +from functools import reduce + +import numpy as np +import numpy.polynomial.polynomial as poly +import numpy.polynomial.polyutils as pu +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_array_equal, + assert_equal, + assert_raises, + assert_raises_regex, + assert_warns, +) + + +def trim(x): + return poly.polytrim(x, tol=1e-6) + + +T0 = [1] +T1 = [0, 1] +T2 = [-1, 0, 2] +T3 = [0, -3, 0, 4] +T4 = [1, 0, -8, 0, 8] +T5 = [0, 5, 0, -20, 0, 16] +T6 = [-1, 0, 18, 0, -48, 0, 32] +T7 = [0, -7, 0, 56, 0, -112, 0, 64] +T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128] +T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256] + +Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9] + + +class TestConstants: + + def test_polydomain(self): + assert_equal(poly.polydomain, [-1, 1]) + + def test_polyzero(self): + assert_equal(poly.polyzero, [0]) + + def test_polyone(self): + assert_equal(poly.polyone, [1]) + + def test_polyx(self): + assert_equal(poly.polyx, [0, 1]) + + def test_copy(self): + x = poly.Polynomial([1, 2, 3]) + y = deepcopy(x) + assert_equal(x, y) + + def test_pickle(self): + x = poly.Polynomial([1, 2, 3]) + y = pickle.loads(pickle.dumps(x)) + assert_equal(x, y) + +class TestArithmetic: + + def test_polyadd(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] += 1 + res = poly.polyadd([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_polysub(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(max(i, j) + 1) + tgt[i] += 1 + tgt[j] -= 1 + res = poly.polysub([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_polymulx(self): + assert_equal(poly.polymulx([0]), [0]) + assert_equal(poly.polymulx([1]), [0, 1]) + for i in range(1, 5): + ser = [0] * i + [1] + tgt = [0] * (i + 1) + [1] + assert_equal(poly.polymulx(ser), tgt) + + def test_polymul(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + tgt = np.zeros(i + j + 1) + tgt[i + j] += 1 + res = poly.polymul([0] * i + [1], [0] * j + [1]) + assert_equal(trim(res), trim(tgt), err_msg=msg) + + def test_polydiv(self): + # check zero division + assert_raises(ZeroDivisionError, poly.polydiv, [1], [0]) + + # check scalar division + quo, rem = poly.polydiv([2], [2]) + assert_equal((quo, rem), (1, 0)) + quo, rem = poly.polydiv([2, 2], [2]) + assert_equal((quo, rem), ((1, 1), 0)) + + # check rest. + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + ci = [0] * i + [1, 2] + cj = [0] * j + [1, 2] + tgt = poly.polyadd(ci, cj) + quo, rem = poly.polydiv(tgt, ci) + res = poly.polyadd(poly.polymul(quo, ci), rem) + assert_equal(res, tgt, err_msg=msg) + + def test_polypow(self): + for i in range(5): + for j in range(5): + msg = f"At i={i}, j={j}" + c = np.arange(i + 1) + tgt = reduce(poly.polymul, [c] * j, np.array([1])) + res = poly.polypow(c, j) + assert_equal(trim(res), trim(tgt), err_msg=msg) + +class TestFraction: + + def test_Fraction(self): + # assert we can use Polynomials with coefficients of object dtype + f = Fraction(2, 3) + one = Fraction(1, 1) + zero = Fraction(0, 1) + p = poly.Polynomial([f, f], domain=[zero, one], window=[zero, one]) + + x = 2 * p + p ** 2 + assert_equal(x.coef, np.array([Fraction(16, 9), Fraction(20, 9), + Fraction(4, 9)], dtype=object)) + assert_equal(p.domain, [zero, one]) + assert_equal(p.coef.dtype, np.dtypes.ObjectDType()) + assert_(isinstance(p(f), Fraction)) + assert_equal(p(f), Fraction(10, 9)) + p_deriv = poly.Polynomial([Fraction(2, 3)], domain=[zero, one], + window=[zero, one]) + assert_equal(p.deriv(), p_deriv) + +class TestEvaluation: + # coefficients of 1 + 2*x + 3*x**2 + c1d = np.array([1., 2., 3.]) + c2d = np.einsum('i,j->ij', c1d, c1d) + c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d) + + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + y = poly.polyval(x, [1., 2., 3.]) + + def test_polyval(self): + # check empty input + assert_equal(poly.polyval([], [1]).size, 0) + + # check normal input) + x = np.linspace(-1, 1) + y = [x**i for i in range(5)] + for i in range(5): + tgt = y[i] + res = poly.polyval(x, [0] * i + [1]) + assert_almost_equal(res, tgt) + tgt = x * (x**2 - 1) + res = poly.polyval(x, [0, -1, 0, 1]) + assert_almost_equal(res, tgt) + + # check that shape is preserved + for i in range(3): + dims = [2] * i + x = np.zeros(dims) + assert_equal(poly.polyval(x, [1]).shape, dims) + assert_equal(poly.polyval(x, [1, 0]).shape, dims) + assert_equal(poly.polyval(x, [1, 0, 0]).shape, dims) + + # check masked arrays are processed correctly + mask = [False, True, False] + mx = np.ma.array([1, 2, 3], mask=mask) + res = np.polyval([7, 5, 3], mx) + assert_array_equal(res.mask, mask) + + # check subtypes of ndarray are preserved + class C(np.ndarray): + pass + + cx = np.array([1, 2, 3]).view(C) + assert_equal(type(np.polyval([2, 3, 4], cx)), C) + + def test_polyvalfromroots(self): + # check exception for broadcasting x values over root array with + # too few dimensions + assert_raises(ValueError, poly.polyvalfromroots, + [1], [1], tensor=False) + + # check empty input + assert_equal(poly.polyvalfromroots([], [1]).size, 0) + assert_(poly.polyvalfromroots([], [1]).shape == (0,)) + + # check empty input + multidimensional roots + assert_equal(poly.polyvalfromroots([], [[1] * 5]).size, 0) + assert_(poly.polyvalfromroots([], [[1] * 5]).shape == (5, 0)) + + # check scalar input + assert_equal(poly.polyvalfromroots(1, 1), 0) + assert_(poly.polyvalfromroots(1, np.ones((3, 3))).shape == (3,)) + + # check normal input) + x = np.linspace(-1, 1) + y = [x**i for i in range(5)] + for i in range(1, 5): + tgt = y[i] + res = poly.polyvalfromroots(x, [0] * i) + assert_almost_equal(res, tgt) + tgt = x * (x - 1) * (x + 1) + res = poly.polyvalfromroots(x, [-1, 0, 1]) + assert_almost_equal(res, tgt) + + # check that shape is preserved + for i in range(3): + dims = [2] * i + x = np.zeros(dims) + assert_equal(poly.polyvalfromroots(x, [1]).shape, dims) + assert_equal(poly.polyvalfromroots(x, [1, 0]).shape, dims) + assert_equal(poly.polyvalfromroots(x, [1, 0, 0]).shape, dims) + + # check compatibility with factorization + ptest = [15, 2, -16, -2, 1] + r = poly.polyroots(ptest) + x = np.linspace(-1, 1) + assert_almost_equal(poly.polyval(x, ptest), + poly.polyvalfromroots(x, r)) + + # check multidimensional arrays of roots and values + # check tensor=False + rshape = (3, 5) + x = np.arange(-3, 2) + r = np.random.randint(-5, 5, size=rshape) + res = poly.polyvalfromroots(x, r, tensor=False) + tgt = np.empty(r.shape[1:]) + for ii in range(tgt.size): + tgt[ii] = poly.polyvalfromroots(x[ii], r[:, ii]) + assert_equal(res, tgt) + + # check tensor=True + x = np.vstack([x, 2 * x]) + res = poly.polyvalfromroots(x, r, tensor=True) + tgt = np.empty(r.shape[1:] + x.shape) + for ii in range(r.shape[1]): + for jj in range(x.shape[0]): + tgt[ii, jj, :] = poly.polyvalfromroots(x[jj], r[:, ii]) + assert_equal(res, tgt) + + def test_polyval2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises_regex(ValueError, 'incompatible', + poly.polyval2d, x1, x2[:2], self.c2d) + + # test values + tgt = y1 * y2 + res = poly.polyval2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = poly.polyval2d(z, z, self.c2d) + assert_(res.shape == (2, 3)) + + def test_polyval3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test exceptions + assert_raises_regex(ValueError, 'incompatible', + poly.polyval3d, x1, x2, x3[:2], self.c3d) + + # test values + tgt = y1 * y2 * y3 + res = poly.polyval3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = poly.polyval3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3)) + + def test_polygrid2d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j->ij', y1, y2) + res = poly.polygrid2d(x1, x2, self.c2d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = poly.polygrid2d(z, z, self.c2d) + assert_(res.shape == (2, 3) * 2) + + def test_polygrid3d(self): + x1, x2, x3 = self.x + y1, y2, y3 = self.y + + # test values + tgt = np.einsum('i,j,k->ijk', y1, y2, y3) + res = poly.polygrid3d(x1, x2, x3, self.c3d) + assert_almost_equal(res, tgt) + + # test shape + z = np.ones((2, 3)) + res = poly.polygrid3d(z, z, z, self.c3d) + assert_(res.shape == (2, 3) * 3) + + +class TestIntegral: + + def test_polyint(self): + # check exceptions + assert_raises(TypeError, poly.polyint, [0], .5) + assert_raises(ValueError, poly.polyint, [0], -1) + assert_raises(ValueError, poly.polyint, [0], 1, [0, 0]) + assert_raises(ValueError, poly.polyint, [0], lbnd=[0]) + assert_raises(ValueError, poly.polyint, [0], scl=[0]) + assert_raises(TypeError, poly.polyint, [0], axis=.5) + assert_raises(TypeError, poly.polyint, [1, 1], 1.) + + # test integration of zero polynomial + for i in range(2, 5): + k = [0] * (i - 2) + [1] + res = poly.polyint([0], m=i, k=k) + assert_almost_equal(res, [0, 1]) + + # check single integration with integration constant + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [1 / scl] + res = poly.polyint(pol, m=1, k=[i]) + assert_almost_equal(trim(res), trim(tgt)) + + # check single integration with integration constant and lbnd + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + res = poly.polyint(pol, m=1, k=[i], lbnd=-1) + assert_almost_equal(poly.polyval(-1, res), i) + + # check single integration with integration constant and scaling + for i in range(5): + scl = i + 1 + pol = [0] * i + [1] + tgt = [i] + [0] * i + [2 / scl] + res = poly.polyint(pol, m=1, k=[i], scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with default k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1) + res = poly.polyint(pol, m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with defined k + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1, k=[k]) + res = poly.polyint(pol, m=j, k=list(range(j))) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with lbnd + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1, k=[k], lbnd=-1) + res = poly.polyint(pol, m=j, k=list(range(j)), lbnd=-1) + assert_almost_equal(trim(res), trim(tgt)) + + # check multiple integrations with scaling + for i in range(5): + for j in range(2, 5): + pol = [0] * i + [1] + tgt = pol[:] + for k in range(j): + tgt = poly.polyint(tgt, m=1, k=[k], scl=2) + res = poly.polyint(pol, m=j, k=list(range(j)), scl=2) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyint_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([poly.polyint(c) for c in c2d.T]).T + res = poly.polyint(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([poly.polyint(c) for c in c2d]) + res = poly.polyint(c2d, axis=1) + assert_almost_equal(res, tgt) + + tgt = np.vstack([poly.polyint(c, k=3) for c in c2d]) + res = poly.polyint(c2d, k=3, axis=1) + assert_almost_equal(res, tgt) + + +class TestDerivative: + + def test_polyder(self): + # check exceptions + assert_raises(TypeError, poly.polyder, [0], .5) + assert_raises(ValueError, poly.polyder, [0], -1) + + # check that zeroth derivative does nothing + for i in range(5): + tgt = [0] * i + [1] + res = poly.polyder(tgt, m=0) + assert_equal(trim(res), trim(tgt)) + + # check that derivation is the inverse of integration + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = poly.polyder(poly.polyint(tgt, m=j), m=j) + assert_almost_equal(trim(res), trim(tgt)) + + # check derivation with scaling + for i in range(5): + for j in range(2, 5): + tgt = [0] * i + [1] + res = poly.polyder(poly.polyint(tgt, m=j, scl=2), m=j, scl=.5) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyder_axis(self): + # check that axis keyword works + c2d = np.random.random((3, 4)) + + tgt = np.vstack([poly.polyder(c) for c in c2d.T]).T + res = poly.polyder(c2d, axis=0) + assert_almost_equal(res, tgt) + + tgt = np.vstack([poly.polyder(c) for c in c2d]) + res = poly.polyder(c2d, axis=1) + assert_almost_equal(res, tgt) + + +class TestVander: + # some random values in [-1, 1) + x = np.random.random((3, 5)) * 2 - 1 + + def test_polyvander(self): + # check for 1d x + x = np.arange(3) + v = poly.polyvander(x, 3) + assert_(v.shape == (3, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], poly.polyval(x, coef)) + + # check for 2d x + x = np.array([[1, 2], [3, 4], [5, 6]]) + v = poly.polyvander(x, 3) + assert_(v.shape == (3, 2, 4)) + for i in range(4): + coef = [0] * i + [1] + assert_almost_equal(v[..., i], poly.polyval(x, coef)) + + def test_polyvander2d(self): + # also tests polyval2d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3)) + van = poly.polyvander2d(x1, x2, [1, 2]) + tgt = poly.polyval2d(x1, x2, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = poly.polyvander2d([x1], [x2], [1, 2]) + assert_(van.shape == (1, 5, 6)) + + def test_polyvander3d(self): + # also tests polyval3d for non-square coefficient array + x1, x2, x3 = self.x + c = np.random.random((2, 3, 4)) + van = poly.polyvander3d(x1, x2, x3, [1, 2, 3]) + tgt = poly.polyval3d(x1, x2, x3, c) + res = np.dot(van, c.flat) + assert_almost_equal(res, tgt) + + # check shape + van = poly.polyvander3d([x1], [x2], [x3], [1, 2, 3]) + assert_(van.shape == (1, 5, 24)) + + def test_polyvandernegdeg(self): + x = np.arange(3) + assert_raises(ValueError, poly.polyvander, x, -1) + + +class TestCompanion: + + def test_raises(self): + assert_raises(ValueError, poly.polycompanion, []) + assert_raises(ValueError, poly.polycompanion, [1]) + + def test_dimensions(self): + for i in range(1, 5): + coef = [0] * i + [1] + assert_(poly.polycompanion(coef).shape == (i, i)) + + def test_linear_root(self): + assert_(poly.polycompanion([1, 2])[0, 0] == -.5) + + +class TestMisc: + + def test_polyfromroots(self): + res = poly.polyfromroots([]) + assert_almost_equal(trim(res), [1]) + for i in range(1, 5): + roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2]) + tgt = Tlist[i] + res = poly.polyfromroots(roots) * 2**(i - 1) + assert_almost_equal(trim(res), trim(tgt)) + + def test_polyroots(self): + assert_almost_equal(poly.polyroots([1]), []) + assert_almost_equal(poly.polyroots([1, 2]), [-.5]) + for i in range(2, 5): + tgt = np.linspace(-1, 1, i) + res = poly.polyroots(poly.polyfromroots(tgt)) + assert_almost_equal(trim(res), trim(tgt)) + + # Testing for larger root values + for i in np.logspace(10, 25, num=1000, base=10): + tgt = np.array([-1, 1, i]) + res = poly.polyroots(poly.polyfromroots(tgt)) + # Adapting the expected precision according to the root value, + # to take into account numerical calculation error. + assert_almost_equal(res, tgt, 15 - int(np.log10(i))) + for i in np.logspace(10, 25, num=1000, base=10): + tgt = np.array([-1, 1.01, i]) + res = poly.polyroots(poly.polyfromroots(tgt)) + # Adapting the expected precision according to the root value, + # to take into account numerical calculation error. + assert_almost_equal(res, tgt, 14 - int(np.log10(i))) + + def test_polyfit(self): + def f(x): + return x * (x - 1) * (x - 2) + + def f2(x): + return x**4 + x**2 + 1 + + # Test exceptions + assert_raises(ValueError, poly.polyfit, [1], [1], -1) + assert_raises(TypeError, poly.polyfit, [[1]], [1], 0) + assert_raises(TypeError, poly.polyfit, [], [1], 0) + assert_raises(TypeError, poly.polyfit, [1], [[[1]]], 0) + assert_raises(TypeError, poly.polyfit, [1, 2], [1], 0) + assert_raises(TypeError, poly.polyfit, [1], [1, 2], 0) + assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[[1]]) + assert_raises(TypeError, poly.polyfit, [1], [1], 0, w=[1, 1]) + assert_raises(ValueError, poly.polyfit, [1], [1], [-1,]) + assert_raises(ValueError, poly.polyfit, [1], [1], [2, -1, 6]) + assert_raises(TypeError, poly.polyfit, [1], [1], []) + + # Test fit + x = np.linspace(0, 2) + y = f(x) + # + coef3 = poly.polyfit(x, y, 3) + assert_equal(len(coef3), 4) + assert_almost_equal(poly.polyval(x, coef3), y) + coef3 = poly.polyfit(x, y, [0, 1, 2, 3]) + assert_equal(len(coef3), 4) + assert_almost_equal(poly.polyval(x, coef3), y) + # + coef4 = poly.polyfit(x, y, 4) + assert_equal(len(coef4), 5) + assert_almost_equal(poly.polyval(x, coef4), y) + coef4 = poly.polyfit(x, y, [0, 1, 2, 3, 4]) + assert_equal(len(coef4), 5) + assert_almost_equal(poly.polyval(x, coef4), y) + # + coef2d = poly.polyfit(x, np.array([y, y]).T, 3) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + coef2d = poly.polyfit(x, np.array([y, y]).T, [0, 1, 2, 3]) + assert_almost_equal(coef2d, np.array([coef3, coef3]).T) + # test weighting + w = np.zeros_like(x) + yw = y.copy() + w[1::2] = 1 + yw[0::2] = 0 + wcoef3 = poly.polyfit(x, yw, 3, w=w) + assert_almost_equal(wcoef3, coef3) + wcoef3 = poly.polyfit(x, yw, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef3, coef3) + # + wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, 3, w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + wcoef2d = poly.polyfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w) + assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T) + # test scaling with complex values x points whose square + # is zero when summed. + x = [1, 1j, -1, -1j] + assert_almost_equal(poly.polyfit(x, x, 1), [0, 1]) + assert_almost_equal(poly.polyfit(x, x, [0, 1]), [0, 1]) + # test fitting only even Polyendre polynomials + x = np.linspace(-1, 1) + y = f2(x) + coef1 = poly.polyfit(x, y, 4) + assert_almost_equal(poly.polyval(x, coef1), y) + coef2 = poly.polyfit(x, y, [0, 2, 4]) + assert_almost_equal(poly.polyval(x, coef2), y) + assert_almost_equal(coef1, coef2) + + def test_polytrim(self): + coef = [2, -1, 1, 0] + + # Test exceptions + assert_raises(ValueError, poly.polytrim, coef, -1) + + # Test results + assert_equal(poly.polytrim(coef), coef[:-1]) + assert_equal(poly.polytrim(coef, 1), coef[:-3]) + assert_equal(poly.polytrim(coef, 2), [0]) + + def test_polyline(self): + assert_equal(poly.polyline(3, 4), [3, 4]) + + def test_polyline_zero(self): + assert_equal(poly.polyline(3, 0), [3]) + + def test_fit_degenerate_domain(self): + p = poly.Polynomial.fit([1], [2], deg=0) + assert_equal(p.coef, [2.]) + p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=0) + assert_almost_equal(p.coef, [2.05]) + with assert_warns(pu.RankWarning): + p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=1) + + def test_result_type(self): + w = np.array([-1, 1], dtype=np.float32) + p = np.polynomial.Polynomial(w, domain=w, window=w) + v = p(2) + assert_equal(v.dtype, np.float32) + + arr = np.polydiv(1, np.float32(1)) + assert_equal(arr[0].dtype, np.float64) diff --git a/python/numpy/polynomial/tests/test_polyutils.py b/python/numpy/polynomial/tests/test_polyutils.py new file mode 100644 index 000000000..96e88b9de --- /dev/null +++ b/python/numpy/polynomial/tests/test_polyutils.py @@ -0,0 +1,128 @@ +"""Tests for polyutils module. + +""" +import numpy as np +import numpy.polynomial.polyutils as pu +from numpy.testing import ( + assert_, + assert_almost_equal, + assert_equal, + assert_raises, +) + + +class TestMisc: + + def test_trimseq(self): + tgt = [1] + for num_trailing_zeros in range(5): + res = pu.trimseq([1] + [0] * num_trailing_zeros) + assert_equal(res, tgt) + + def test_trimseq_empty_input(self): + for empty_seq in [[], np.array([], dtype=np.int32)]: + assert_equal(pu.trimseq(empty_seq), empty_seq) + + def test_as_series(self): + # check exceptions + assert_raises(ValueError, pu.as_series, [[]]) + assert_raises(ValueError, pu.as_series, [[[1, 2]]]) + assert_raises(ValueError, pu.as_series, [[1], ['a']]) + # check common types + types = ['i', 'd', 'O'] + for i in range(len(types)): + for j in range(i): + ci = np.ones(1, types[i]) + cj = np.ones(1, types[j]) + [resi, resj] = pu.as_series([ci, cj]) + assert_(resi.dtype.char == resj.dtype.char) + assert_(resj.dtype.char == types[i]) + + def test_trimcoef(self): + coef = [2, -1, 1, 0] + # Test exceptions + assert_raises(ValueError, pu.trimcoef, coef, -1) + # Test results + assert_equal(pu.trimcoef(coef), coef[:-1]) + assert_equal(pu.trimcoef(coef, 1), coef[:-3]) + assert_equal(pu.trimcoef(coef, 2), [0]) + + def test_vander_nd_exception(self): + # n_dims != len(points) + assert_raises(ValueError, pu._vander_nd, (), (1, 2, 3), [90]) + # n_dims != len(degrees) + assert_raises(ValueError, pu._vander_nd, (), (), [90.65]) + # n_dims == 0 + assert_raises(ValueError, pu._vander_nd, (), (), []) + + def test_div_zerodiv(self): + # c2[-1] == 0 + assert_raises(ZeroDivisionError, pu._div, pu._div, (1, 2, 3), [0]) + + def test_pow_too_large(self): + # power > maxpower + assert_raises(ValueError, pu._pow, (), [1, 2, 3], 5, 4) + +class TestDomain: + + def test_getdomain(self): + # test for real values + x = [1, 10, 3, -1] + tgt = [-1, 10] + res = pu.getdomain(x) + assert_almost_equal(res, tgt) + + # test for complex values + x = [1 + 1j, 1 - 1j, 0, 2] + tgt = [-1j, 2 + 1j] + res = pu.getdomain(x) + assert_almost_equal(res, tgt) + + def test_mapdomain(self): + # test for real values + dom1 = [0, 4] + dom2 = [1, 3] + tgt = dom2 + res = pu.mapdomain(dom1, dom1, dom2) + assert_almost_equal(res, tgt) + + # test for complex values + dom1 = [0 - 1j, 2 + 1j] + dom2 = [-2, 2] + tgt = dom2 + x = dom1 + res = pu.mapdomain(x, dom1, dom2) + assert_almost_equal(res, tgt) + + # test for multidimensional arrays + dom1 = [0, 4] + dom2 = [1, 3] + tgt = np.array([dom2, dom2]) + x = np.array([dom1, dom1]) + res = pu.mapdomain(x, dom1, dom2) + assert_almost_equal(res, tgt) + + # test that subtypes are preserved. + class MyNDArray(np.ndarray): + pass + + dom1 = [0, 4] + dom2 = [1, 3] + x = np.array([dom1, dom1]).view(MyNDArray) + res = pu.mapdomain(x, dom1, dom2) + assert_(isinstance(res, MyNDArray)) + + def test_mapparms(self): + # test for real values + dom1 = [0, 4] + dom2 = [1, 3] + tgt = [1, .5] + res = pu. mapparms(dom1, dom2) + assert_almost_equal(res, tgt) + + # test for complex values + dom1 = [0 - 1j, 2 + 1j] + dom2 = [-2, 2] + tgt = [-1 + 1j, 1 - 1j] + res = pu.mapparms(dom1, dom2) + assert_almost_equal(res, tgt) diff --git a/python/numpy/polynomial/tests/test_printing.py b/python/numpy/polynomial/tests/test_printing.py new file mode 100644 index 000000000..d3735e3b8 --- /dev/null +++ b/python/numpy/polynomial/tests/test_printing.py @@ -0,0 +1,555 @@ +from decimal import Decimal + +# For testing polynomial printing with object arrays +from fractions import Fraction +from math import inf, nan + +import pytest + +import numpy.polynomial as poly +from numpy._core import arange, array, printoptions +from numpy.testing import assert_, assert_equal + + +class TestStrUnicodeSuperSubscripts: + + @pytest.fixture(scope='class', autouse=True) + def use_unicode(self): + poly.set_default_printstyle('unicode') + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·x + 3.0·x²"), + ([-1, 0, 3, -1], "-1.0 + 0.0·x + 3.0·x² - 1.0·x³"), + (arange(12), ("0.0 + 1.0·x + 2.0·x² + 3.0·x³ + 4.0·x⁴ + 5.0·x⁵ + " + "6.0·x⁶ + 7.0·x⁷ +\n8.0·x⁸ + 9.0·x⁹ + 10.0·x¹⁰ + " + "11.0·x¹¹")), + )) + def test_polynomial_str(self, inp, tgt): + p = poly.Polynomial(inp) + res = str(p) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·T₁(x) + 3.0·T₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·T₁(x) + 3.0·T₂(x) - 1.0·T₃(x)"), + (arange(12), ("0.0 + 1.0·T₁(x) + 2.0·T₂(x) + 3.0·T₃(x) + 4.0·T₄(x) + " + "5.0·T₅(x) +\n6.0·T₆(x) + 7.0·T₇(x) + 8.0·T₈(x) + " + "9.0·T₉(x) + 10.0·T₁₀(x) + 11.0·T₁₁(x)")), + )) + def test_chebyshev_str(self, inp, tgt): + res = str(poly.Chebyshev(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·P₁(x) + 3.0·P₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·P₁(x) + 3.0·P₂(x) - 1.0·P₃(x)"), + (arange(12), ("0.0 + 1.0·P₁(x) + 2.0·P₂(x) + 3.0·P₃(x) + 4.0·P₄(x) + " + "5.0·P₅(x) +\n6.0·P₆(x) + 7.0·P₇(x) + 8.0·P₈(x) + " + "9.0·P₉(x) + 10.0·P₁₀(x) + 11.0·P₁₁(x)")), + )) + def test_legendre_str(self, inp, tgt): + res = str(poly.Legendre(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·H₁(x) + 3.0·H₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·H₁(x) + 3.0·H₂(x) - 1.0·H₃(x)"), + (arange(12), ("0.0 + 1.0·H₁(x) + 2.0·H₂(x) + 3.0·H₃(x) + 4.0·H₄(x) + " + "5.0·H₅(x) +\n6.0·H₆(x) + 7.0·H₇(x) + 8.0·H₈(x) + " + "9.0·H₉(x) + 10.0·H₁₀(x) + 11.0·H₁₁(x)")), + )) + def test_hermite_str(self, inp, tgt): + res = str(poly.Hermite(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·He₁(x) + 3.0·He₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·He₁(x) + 3.0·He₂(x) - 1.0·He₃(x)"), + (arange(12), ("0.0 + 1.0·He₁(x) + 2.0·He₂(x) + 3.0·He₃(x) + " + "4.0·He₄(x) + 5.0·He₅(x) +\n6.0·He₆(x) + 7.0·He₇(x) + " + "8.0·He₈(x) + 9.0·He₉(x) + 10.0·He₁₀(x) +\n" + "11.0·He₁₁(x)")), + )) + def test_hermiteE_str(self, inp, tgt): + res = str(poly.HermiteE(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0·L₁(x) + 3.0·L₂(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0·L₁(x) + 3.0·L₂(x) - 1.0·L₃(x)"), + (arange(12), ("0.0 + 1.0·L₁(x) + 2.0·L₂(x) + 3.0·L₃(x) + 4.0·L₄(x) + " + "5.0·L₅(x) +\n6.0·L₆(x) + 7.0·L₇(x) + 8.0·L₈(x) + " + "9.0·L₉(x) + 10.0·L₁₀(x) + 11.0·L₁₁(x)")), + )) + def test_laguerre_str(self, inp, tgt): + res = str(poly.Laguerre(inp)) + assert_equal(res, tgt) + + def test_polynomial_str_domains(self): + res = str(poly.Polynomial([0, 1])) + tgt = '0.0 + 1.0·x' + assert_equal(res, tgt) + + res = str(poly.Polynomial([0, 1], domain=[1, 2])) + tgt = '0.0 + 1.0·(-3.0 + 2.0x)' + assert_equal(res, tgt) + +class TestStrAscii: + + @pytest.fixture(scope='class', autouse=True) + def use_ascii(self): + poly.set_default_printstyle('ascii') + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 x + 3.0 x**2"), + ([-1, 0, 3, -1], "-1.0 + 0.0 x + 3.0 x**2 - 1.0 x**3"), + (arange(12), ("0.0 + 1.0 x + 2.0 x**2 + 3.0 x**3 + 4.0 x**4 + " + "5.0 x**5 + 6.0 x**6 +\n7.0 x**7 + 8.0 x**8 + " + "9.0 x**9 + 10.0 x**10 + 11.0 x**11")), + )) + def test_polynomial_str(self, inp, tgt): + res = str(poly.Polynomial(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 T_1(x) + 3.0 T_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 T_1(x) + 3.0 T_2(x) - 1.0 T_3(x)"), + (arange(12), ("0.0 + 1.0 T_1(x) + 2.0 T_2(x) + 3.0 T_3(x) + " + "4.0 T_4(x) + 5.0 T_5(x) +\n6.0 T_6(x) + 7.0 T_7(x) + " + "8.0 T_8(x) + 9.0 T_9(x) + 10.0 T_10(x) +\n" + "11.0 T_11(x)")), + )) + def test_chebyshev_str(self, inp, tgt): + res = str(poly.Chebyshev(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 P_1(x) + 3.0 P_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 P_1(x) + 3.0 P_2(x) - 1.0 P_3(x)"), + (arange(12), ("0.0 + 1.0 P_1(x) + 2.0 P_2(x) + 3.0 P_3(x) + " + "4.0 P_4(x) + 5.0 P_5(x) +\n6.0 P_6(x) + 7.0 P_7(x) + " + "8.0 P_8(x) + 9.0 P_9(x) + 10.0 P_10(x) +\n" + "11.0 P_11(x)")), + )) + def test_legendre_str(self, inp, tgt): + res = str(poly.Legendre(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 H_1(x) + 3.0 H_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 H_1(x) + 3.0 H_2(x) - 1.0 H_3(x)"), + (arange(12), ("0.0 + 1.0 H_1(x) + 2.0 H_2(x) + 3.0 H_3(x) + " + "4.0 H_4(x) + 5.0 H_5(x) +\n6.0 H_6(x) + 7.0 H_7(x) + " + "8.0 H_8(x) + 9.0 H_9(x) + 10.0 H_10(x) +\n" + "11.0 H_11(x)")), + )) + def test_hermite_str(self, inp, tgt): + res = str(poly.Hermite(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 He_1(x) + 3.0 He_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 He_1(x) + 3.0 He_2(x) - 1.0 He_3(x)"), + (arange(12), ("0.0 + 1.0 He_1(x) + 2.0 He_2(x) + 3.0 He_3(x) + " + "4.0 He_4(x) +\n5.0 He_5(x) + 6.0 He_6(x) + " + "7.0 He_7(x) + 8.0 He_8(x) + 9.0 He_9(x) +\n" + "10.0 He_10(x) + 11.0 He_11(x)")), + )) + def test_hermiteE_str(self, inp, tgt): + res = str(poly.HermiteE(inp)) + assert_equal(res, tgt) + + @pytest.mark.parametrize(('inp', 'tgt'), ( + ([1, 2, 3], "1.0 + 2.0 L_1(x) + 3.0 L_2(x)"), + ([-1, 0, 3, -1], "-1.0 + 0.0 L_1(x) + 3.0 L_2(x) - 1.0 L_3(x)"), + (arange(12), ("0.0 + 1.0 L_1(x) + 2.0 L_2(x) + 3.0 L_3(x) + " + "4.0 L_4(x) + 5.0 L_5(x) +\n6.0 L_6(x) + 7.0 L_7(x) + " + "8.0 L_8(x) + 9.0 L_9(x) + 10.0 L_10(x) +\n" + "11.0 L_11(x)")), + )) + def test_laguerre_str(self, inp, tgt): + res = str(poly.Laguerre(inp)) + assert_equal(res, tgt) + + def test_polynomial_str_domains(self): + res = str(poly.Polynomial([0, 1])) + tgt = '0.0 + 1.0 x' + assert_equal(res, tgt) + + res = str(poly.Polynomial([0, 1], domain=[1, 2])) + tgt = '0.0 + 1.0 (-3.0 + 2.0x)' + assert_equal(res, tgt) + +class TestLinebreaking: + + @pytest.fixture(scope='class', autouse=True) + def use_ascii(self): + poly.set_default_printstyle('ascii') + + def test_single_line_one_less(self): + # With 'ascii' style, len(str(p)) is default linewidth - 1 (i.e. 74) + p = poly.Polynomial([12345678, 12345678, 12345678, 12345678, 123]) + assert_equal(len(str(p)), 74) + assert_equal(str(p), ( + '12345678.0 + 12345678.0 x + 12345678.0 x**2 + ' + '12345678.0 x**3 + 123.0 x**4' + )) + + def test_num_chars_is_linewidth(self): + # len(str(p)) == default linewidth == 75 + p = poly.Polynomial([12345678, 12345678, 12345678, 12345678, 1234]) + assert_equal(len(str(p)), 75) + assert_equal(str(p), ( + '12345678.0 + 12345678.0 x + 12345678.0 x**2 + ' + '12345678.0 x**3 +\n1234.0 x**4' + )) + + def test_first_linebreak_multiline_one_less_than_linewidth(self): + # Multiline str where len(first_line) + len(next_term) == lw - 1 == 74 + p = poly.Polynomial( + [12345678, 12345678, 12345678, 12345678, 1, 12345678] + ) + assert_equal(len(str(p).split('\n')[0]), 74) + assert_equal(str(p), ( + '12345678.0 + 12345678.0 x + 12345678.0 x**2 + ' + '12345678.0 x**3 + 1.0 x**4 +\n12345678.0 x**5' + )) + + def test_first_linebreak_multiline_on_linewidth(self): + # First line is one character longer than previous test + p = poly.Polynomial( + [12345678, 12345678, 12345678, 12345678.12, 1, 12345678] + ) + assert_equal(str(p), ( + '12345678.0 + 12345678.0 x + 12345678.0 x**2 + ' + '12345678.12 x**3 +\n1.0 x**4 + 12345678.0 x**5' + )) + + @pytest.mark.parametrize(('lw', 'tgt'), ( + (75, ('0.0 + 10.0 x + 200.0 x**2 + 3000.0 x**3 + 40000.0 x**4 + ' + '500000.0 x**5 +\n600000.0 x**6 + 70000.0 x**7 + 8000.0 x**8 + ' + '900.0 x**9')), + (45, ('0.0 + 10.0 x + 200.0 x**2 + 3000.0 x**3 +\n40000.0 x**4 + ' + '500000.0 x**5 +\n600000.0 x**6 + 70000.0 x**7 + 8000.0 x**8 +\n' + '900.0 x**9')), + (132, ('0.0 + 10.0 x + 200.0 x**2 + 3000.0 x**3 + 40000.0 x**4 + ' + '500000.0 x**5 + 600000.0 x**6 + 70000.0 x**7 + 8000.0 x**8 + ' + '900.0 x**9')), + )) + def test_linewidth_printoption(self, lw, tgt): + p = poly.Polynomial( + [0, 10, 200, 3000, 40000, 500000, 600000, 70000, 8000, 900] + ) + with printoptions(linewidth=lw): + assert_equal(str(p), tgt) + for line in str(p).split('\n'): + assert_(len(line) < lw) + + +def test_set_default_printoptions(): + p = poly.Polynomial([1, 2, 3]) + c = poly.Chebyshev([1, 2, 3]) + poly.set_default_printstyle('ascii') + assert_equal(str(p), "1.0 + 2.0 x + 3.0 x**2") + assert_equal(str(c), "1.0 + 2.0 T_1(x) + 3.0 T_2(x)") + poly.set_default_printstyle('unicode') + assert_equal(str(p), "1.0 + 2.0·x + 3.0·x²") + assert_equal(str(c), "1.0 + 2.0·T₁(x) + 3.0·T₂(x)") + with pytest.raises(ValueError): + poly.set_default_printstyle('invalid_input') + + +def test_complex_coefficients(): + """Test both numpy and built-in complex.""" + coefs = [0 + 1j, 1 + 1j, -2 + 2j, 3 + 0j] + # numpy complex + p1 = poly.Polynomial(coefs) + # Python complex + p2 = poly.Polynomial(array(coefs, dtype=object)) + poly.set_default_printstyle('unicode') + assert_equal(str(p1), "1j + (1+1j)·x - (2-2j)·x² + (3+0j)·x³") + assert_equal(str(p2), "1j + (1+1j)·x + (-2+2j)·x² + (3+0j)·x³") + poly.set_default_printstyle('ascii') + assert_equal(str(p1), "1j + (1+1j) x - (2-2j) x**2 + (3+0j) x**3") + assert_equal(str(p2), "1j + (1+1j) x + (-2+2j) x**2 + (3+0j) x**3") + + +@pytest.mark.parametrize(('coefs', 'tgt'), ( + (array([Fraction(1, 2), Fraction(3, 4)], dtype=object), ( + "1/2 + 3/4·x" + )), + (array([1, 2, Fraction(5, 7)], dtype=object), ( + "1 + 2·x + 5/7·x²" + )), + (array([Decimal('1.00'), Decimal('2.2'), 3], dtype=object), ( + "1.00 + 2.2·x + 3·x²" + )), +)) +def test_numeric_object_coefficients(coefs, tgt): + p = poly.Polynomial(coefs) + poly.set_default_printstyle('unicode') + assert_equal(str(p), tgt) + + +@pytest.mark.parametrize(('coefs', 'tgt'), ( + (array([1, 2, 'f'], dtype=object), '1 + 2·x + f·x²'), + (array([1, 2, [3, 4]], dtype=object), '1 + 2·x + [3, 4]·x²'), +)) +def test_nonnumeric_object_coefficients(coefs, tgt): + """ + Test coef fallback for object arrays of non-numeric coefficients. + """ + p = poly.Polynomial(coefs) + poly.set_default_printstyle('unicode') + assert_equal(str(p), tgt) + + +class TestFormat: + def test_format_unicode(self): + poly.set_default_printstyle('ascii') + p = poly.Polynomial([1, 2, 0, -1]) + assert_equal(format(p, 'unicode'), "1.0 + 2.0·x + 0.0·x² - 1.0·x³") + + def test_format_ascii(self): + poly.set_default_printstyle('unicode') + p = poly.Polynomial([1, 2, 0, -1]) + assert_equal( + format(p, 'ascii'), "1.0 + 2.0 x + 0.0 x**2 - 1.0 x**3" + ) + + def test_empty_formatstr(self): + poly.set_default_printstyle('ascii') + p = poly.Polynomial([1, 2, 3]) + assert_equal(format(p), "1.0 + 2.0 x + 3.0 x**2") + assert_equal(f"{p}", "1.0 + 2.0 x + 3.0 x**2") + + def test_bad_formatstr(self): + p = poly.Polynomial([1, 2, 0, -1]) + with pytest.raises(ValueError): + format(p, '.2f') + + +@pytest.mark.parametrize(('poly', 'tgt'), ( + (poly.Polynomial, '1.0 + 2.0·z + 3.0·z²'), + (poly.Chebyshev, '1.0 + 2.0·T₁(z) + 3.0·T₂(z)'), + (poly.Hermite, '1.0 + 2.0·H₁(z) + 3.0·H₂(z)'), + (poly.HermiteE, '1.0 + 2.0·He₁(z) + 3.0·He₂(z)'), + (poly.Laguerre, '1.0 + 2.0·L₁(z) + 3.0·L₂(z)'), + (poly.Legendre, '1.0 + 2.0·P₁(z) + 3.0·P₂(z)'), +)) +def test_symbol(poly, tgt): + p = poly([1, 2, 3], symbol='z') + assert_equal(f"{p:unicode}", tgt) + + +class TestRepr: + def test_polynomial_repr(self): + res = repr(poly.Polynomial([0, 1])) + tgt = ( + "Polynomial([0., 1.], domain=[-1., 1.], window=[-1., 1.], " + "symbol='x')" + ) + assert_equal(res, tgt) + + def test_chebyshev_repr(self): + res = repr(poly.Chebyshev([0, 1])) + tgt = ( + "Chebyshev([0., 1.], domain=[-1., 1.], window=[-1., 1.], " + "symbol='x')" + ) + assert_equal(res, tgt) + + def test_legendre_repr(self): + res = repr(poly.Legendre([0, 1])) + tgt = ( + "Legendre([0., 1.], domain=[-1., 1.], window=[-1., 1.], " + "symbol='x')" + ) + assert_equal(res, tgt) + + def test_hermite_repr(self): + res = repr(poly.Hermite([0, 1])) + tgt = ( + "Hermite([0., 1.], domain=[-1., 1.], window=[-1., 1.], " + "symbol='x')" + ) + assert_equal(res, tgt) + + def test_hermiteE_repr(self): + res = repr(poly.HermiteE([0, 1])) + tgt = ( + "HermiteE([0., 1.], domain=[-1., 1.], window=[-1., 1.], " + "symbol='x')" + ) + assert_equal(res, tgt) + + def test_laguerre_repr(self): + res = repr(poly.Laguerre([0, 1])) + tgt = ( + "Laguerre([0., 1.], domain=[0., 1.], window=[0., 1.], " + "symbol='x')" + ) + assert_equal(res, tgt) + + +class TestLatexRepr: + """Test the latex repr used by Jupyter""" + + @staticmethod + def as_latex(obj): + # right now we ignore the formatting of scalars in our tests, since + # it makes them too verbose. Ideally, the formatting of scalars will + # be fixed such that tests below continue to pass + obj._repr_latex_scalar = lambda x, parens=False: str(x) + try: + return obj._repr_latex_() + finally: + del obj._repr_latex_scalar + + def test_simple_polynomial(self): + # default input + p = poly.Polynomial([1, 2, 3]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,x + 3.0\,x^{2}$') + + # translated input + p = poly.Polynomial([1, 2, 3], domain=[-2, 0]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,\left(1.0 + x\right) + 3.0\,\left(1.0 + x\right)^{2}$') # noqa: E501 + + # scaled input + p = poly.Polynomial([1, 2, 3], domain=[-0.5, 0.5]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,\left(2.0x\right) + 3.0\,\left(2.0x\right)^{2}$') + + # affine input + p = poly.Polynomial([1, 2, 3], domain=[-1, 0]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0 + 2.0\,\left(1.0 + 2.0x\right) + 3.0\,\left(1.0 + 2.0x\right)^{2}$') # noqa: E501 + + def test_basis_func(self): + p = poly.Chebyshev([1, 2, 3]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0\,{T}_{0}(x) + 2.0\,{T}_{1}(x) + 3.0\,{T}_{2}(x)$') + # affine input - check no surplus parens are added + p = poly.Chebyshev([1, 2, 3], domain=[-1, 0]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0\,{T}_{0}(1.0 + 2.0x) + 2.0\,{T}_{1}(1.0 + 2.0x) + 3.0\,{T}_{2}(1.0 + 2.0x)$') # noqa: E501 + + def test_multichar_basis_func(self): + p = poly.HermiteE([1, 2, 3]) + assert_equal(self.as_latex(p), + r'$x \mapsto 1.0\,{He}_{0}(x) + 2.0\,{He}_{1}(x) + 3.0\,{He}_{2}(x)$') + + def test_symbol_basic(self): + # default input + p = poly.Polynomial([1, 2, 3], symbol='z') + assert_equal(self.as_latex(p), + r'$z \mapsto 1.0 + 2.0\,z + 3.0\,z^{2}$') + + # translated input + p = poly.Polynomial([1, 2, 3], domain=[-2, 0], symbol='z') + assert_equal( + self.as_latex(p), + ( + r'$z \mapsto 1.0 + 2.0\,\left(1.0 + z\right) + 3.0\,' + r'\left(1.0 + z\right)^{2}$' + ), + ) + + # scaled input + p = poly.Polynomial([1, 2, 3], domain=[-0.5, 0.5], symbol='z') + assert_equal( + self.as_latex(p), + ( + r'$z \mapsto 1.0 + 2.0\,\left(2.0z\right) + 3.0\,' + r'\left(2.0z\right)^{2}$' + ), + ) + + # affine input + p = poly.Polynomial([1, 2, 3], domain=[-1, 0], symbol='z') + assert_equal( + self.as_latex(p), + ( + r'$z \mapsto 1.0 + 2.0\,\left(1.0 + 2.0z\right) + 3.0\,' + r'\left(1.0 + 2.0z\right)^{2}$' + ), + ) + + def test_numeric_object_coefficients(self): + coefs = array([Fraction(1, 2), Fraction(1)]) + p = poly.Polynomial(coefs) + assert_equal(self.as_latex(p), '$x \\mapsto 1/2 + 1\\,x$') + + +SWITCH_TO_EXP = ( + '1.0 + (1.0e-01) x + (1.0e-02) x**2', + '1.2 + (1.2e-01) x + (1.2e-02) x**2', + '1.23 + 0.12 x + (1.23e-02) x**2 + (1.23e-03) x**3', + '1.235 + 0.123 x + (1.235e-02) x**2 + (1.235e-03) x**3', + '1.2346 + 0.1235 x + 0.0123 x**2 + (1.2346e-03) x**3 + (1.2346e-04) x**4', + '1.23457 + 0.12346 x + 0.01235 x**2 + (1.23457e-03) x**3 + ' + '(1.23457e-04) x**4', + '1.234568 + 0.123457 x + 0.012346 x**2 + 0.001235 x**3 + ' + '(1.234568e-04) x**4 + (1.234568e-05) x**5', + '1.2345679 + 0.1234568 x + 0.0123457 x**2 + 0.0012346 x**3 + ' + '(1.2345679e-04) x**4 + (1.2345679e-05) x**5') + +class TestPrintOptions: + """ + Test the output is properly configured via printoptions. + The exponential notation is enabled automatically when the values + are too small or too large. + """ + + @pytest.fixture(scope='class', autouse=True) + def use_ascii(self): + poly.set_default_printstyle('ascii') + + def test_str(self): + p = poly.Polynomial([1 / 2, 1 / 7, 1 / 7 * 10**8, 1 / 7 * 10**9]) + assert_equal(str(p), '0.5 + 0.14285714 x + 14285714.28571429 x**2 ' + '+ (1.42857143e+08) x**3') + + with printoptions(precision=3): + assert_equal(str(p), '0.5 + 0.143 x + 14285714.286 x**2 ' + '+ (1.429e+08) x**3') + + def test_latex(self): + p = poly.Polynomial([1 / 2, 1 / 7, 1 / 7 * 10**8, 1 / 7 * 10**9]) + assert_equal(p._repr_latex_(), + r'$x \mapsto \text{0.5} + \text{0.14285714}\,x + ' + r'\text{14285714.28571429}\,x^{2} + ' + r'\text{(1.42857143e+08)}\,x^{3}$') + + with printoptions(precision=3): + assert_equal(p._repr_latex_(), + r'$x \mapsto \text{0.5} + \text{0.143}\,x + ' + r'\text{14285714.286}\,x^{2} + \text{(1.429e+08)}\,x^{3}$') + + def test_fixed(self): + p = poly.Polynomial([1 / 2]) + assert_equal(str(p), '0.5') + + with printoptions(floatmode='fixed'): + assert_equal(str(p), '0.50000000') + + with printoptions(floatmode='fixed', precision=4): + assert_equal(str(p), '0.5000') + + def test_switch_to_exp(self): + for i, s in enumerate(SWITCH_TO_EXP): + with printoptions(precision=i): + p = poly.Polynomial([1.23456789 * 10**-i + for i in range(i // 2 + 3)]) + assert str(p).replace('\n', ' ') == s + + def test_non_finite(self): + p = poly.Polynomial([nan, inf]) + assert str(p) == 'nan + inf x' + assert p._repr_latex_() == r'$x \mapsto \text{nan} + \text{inf}\,x$' # noqa: RUF027 + with printoptions(nanstr='NAN', infstr='INF'): + assert str(p) == 'NAN + INF x' + assert p._repr_latex_() == \ + r'$x \mapsto \text{NAN} + \text{INF}\,x$' diff --git a/python/numpy/polynomial/tests/test_symbol.py b/python/numpy/polynomial/tests/test_symbol.py new file mode 100644 index 000000000..3de9e38ce --- /dev/null +++ b/python/numpy/polynomial/tests/test_symbol.py @@ -0,0 +1,217 @@ +""" +Tests related to the ``symbol`` attribute of the ABCPolyBase class. +""" + +import pytest + +import numpy.polynomial as poly +from numpy._core import array +from numpy.testing import assert_, assert_equal, assert_raises + + +class TestInit: + """ + Test polynomial creation with symbol kwarg. + """ + c = [1, 2, 3] + + def test_default_symbol(self): + p = poly.Polynomial(self.c) + assert_equal(p.symbol, 'x') + + @pytest.mark.parametrize(('bad_input', 'exception'), ( + ('', ValueError), + ('3', ValueError), + (None, TypeError), + (1, TypeError), + )) + def test_symbol_bad_input(self, bad_input, exception): + with pytest.raises(exception): + p = poly.Polynomial(self.c, symbol=bad_input) + + @pytest.mark.parametrize('symbol', ( + 'x', + 'x_1', + 'A', + 'xyz', + 'β', + )) + def test_valid_symbols(self, symbol): + """ + Values for symbol that should pass input validation. + """ + p = poly.Polynomial(self.c, symbol=symbol) + assert_equal(p.symbol, symbol) + + def test_property(self): + """ + 'symbol' attribute is read only. + """ + p = poly.Polynomial(self.c, symbol='x') + with pytest.raises(AttributeError): + p.symbol = 'z' + + def test_change_symbol(self): + p = poly.Polynomial(self.c, symbol='y') + # Create new polynomial from p with different symbol + pt = poly.Polynomial(p.coef, symbol='t') + assert_equal(pt.symbol, 't') + + +class TestUnaryOperators: + p = poly.Polynomial([1, 2, 3], symbol='z') + + def test_neg(self): + n = -self.p + assert_equal(n.symbol, 'z') + + def test_scalarmul(self): + out = self.p * 10 + assert_equal(out.symbol, 'z') + + def test_rscalarmul(self): + out = 10 * self.p + assert_equal(out.symbol, 'z') + + def test_pow(self): + out = self.p ** 3 + assert_equal(out.symbol, 'z') + + +@pytest.mark.parametrize( + 'rhs', + ( + poly.Polynomial([4, 5, 6], symbol='z'), + array([4, 5, 6]), + ), +) +class TestBinaryOperatorsSameSymbol: + """ + Ensure symbol is preserved for numeric operations on polynomials with + the same symbol + """ + p = poly.Polynomial([1, 2, 3], symbol='z') + + def test_add(self, rhs): + out = self.p + rhs + assert_equal(out.symbol, 'z') + + def test_sub(self, rhs): + out = self.p - rhs + assert_equal(out.symbol, 'z') + + def test_polymul(self, rhs): + out = self.p * rhs + assert_equal(out.symbol, 'z') + + def test_divmod(self, rhs): + for out in divmod(self.p, rhs): + assert_equal(out.symbol, 'z') + + def test_radd(self, rhs): + out = rhs + self.p + assert_equal(out.symbol, 'z') + + def test_rsub(self, rhs): + out = rhs - self.p + assert_equal(out.symbol, 'z') + + def test_rmul(self, rhs): + out = rhs * self.p + assert_equal(out.symbol, 'z') + + def test_rdivmod(self, rhs): + for out in divmod(rhs, self.p): + assert_equal(out.symbol, 'z') + + +class TestBinaryOperatorsDifferentSymbol: + p = poly.Polynomial([1, 2, 3], symbol='x') + other = poly.Polynomial([4, 5, 6], symbol='y') + ops = (p.__add__, p.__sub__, p.__mul__, p.__floordiv__, p.__mod__) + + @pytest.mark.parametrize('f', ops) + def test_binops_fails(self, f): + assert_raises(ValueError, f, self.other) + + +class TestEquality: + p = poly.Polynomial([1, 2, 3], symbol='x') + + def test_eq(self): + other = poly.Polynomial([1, 2, 3], symbol='x') + assert_(self.p == other) + + def test_neq(self): + other = poly.Polynomial([1, 2, 3], symbol='y') + assert_(not self.p == other) + + +class TestExtraMethods: + """ + Test other methods for manipulating/creating polynomial objects. + """ + p = poly.Polynomial([1, 2, 3, 0], symbol='z') + + def test_copy(self): + other = self.p.copy() + assert_equal(other.symbol, 'z') + + def test_trim(self): + other = self.p.trim() + assert_equal(other.symbol, 'z') + + def test_truncate(self): + other = self.p.truncate(2) + assert_equal(other.symbol, 'z') + + @pytest.mark.parametrize('kwarg', ( + {'domain': [-10, 10]}, + {'window': [-10, 10]}, + {'kind': poly.Chebyshev}, + )) + def test_convert(self, kwarg): + other = self.p.convert(**kwarg) + assert_equal(other.symbol, 'z') + + def test_integ(self): + other = self.p.integ() + assert_equal(other.symbol, 'z') + + def test_deriv(self): + other = self.p.deriv() + assert_equal(other.symbol, 'z') + + +def test_composition(): + p = poly.Polynomial([3, 2, 1], symbol="t") + q = poly.Polynomial([5, 1, 0, -1], symbol="λ_1") + r = p(q) + assert r.symbol == "λ_1" + + +# +# Class methods that result in new polynomial class instances +# + + +def test_fit(): + x, y = (range(10),) * 2 + p = poly.Polynomial.fit(x, y, deg=1, symbol='z') + assert_equal(p.symbol, 'z') + + +def test_froomroots(): + roots = [-2, 2] + p = poly.Polynomial.fromroots(roots, symbol='z') + assert_equal(p.symbol, 'z') + + +def test_identity(): + p = poly.Polynomial.identity(domain=[-1, 1], window=[5, 20], symbol='z') + assert_equal(p.symbol, 'z') + + +def test_basis(): + p = poly.Polynomial.basis(3, symbol='z') + assert_equal(p.symbol, 'z') diff --git a/python/numpy/py.typed b/python/numpy/py.typed new file mode 100644 index 000000000..e69de29bb diff --git a/python/numpy/random/LICENSE.md b/python/numpy/random/LICENSE.md new file mode 100644 index 000000000..a6cf1b17e --- /dev/null +++ b/python/numpy/random/LICENSE.md @@ -0,0 +1,71 @@ +**This software is dual-licensed under the The University of Illinois/NCSA +Open Source License (NCSA) and The 3-Clause BSD License** + +# NCSA Open Source License +**Copyright (c) 2019 Kevin Sheppard. All rights reserved.** + +Developed by: Kevin Sheppard (, +) +[http://www.kevinsheppard.com](http://www.kevinsheppard.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal with +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimers. + +Redistributions in binary form must reproduce the above copyright notice, this +list of conditions and the following disclaimers in the documentation and/or +other materials provided with the distribution. + +Neither the names of Kevin Sheppard, nor the names of any contributors may be +used to endorse or promote products derived from this Software without specific +prior written permission. + +**THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH +THE SOFTWARE.** + + +# 3-Clause BSD License +**Copyright (c) 2019 Kevin Sheppard. All rights reserved.** + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +**THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE.** + +# Components + +Many parts of this module have been derived from original sources, +often the algorithm's designer. Component licenses are located with +the component code. diff --git a/python/numpy/random/__init__.pxd b/python/numpy/random/__init__.pxd new file mode 100644 index 000000000..1f9057296 --- /dev/null +++ b/python/numpy/random/__init__.pxd @@ -0,0 +1,14 @@ +cimport numpy as np +from libc.stdint cimport uint32_t, uint64_t + +cdef extern from "numpy/random/bitgen.h": + struct bitgen: + void *state + uint64_t (*next_uint64)(void *st) nogil + uint32_t (*next_uint32)(void *st) nogil + double (*next_double)(void *st) nogil + uint64_t (*next_raw)(void *st) nogil + + ctypedef bitgen bitgen_t + +from numpy.random.bit_generator cimport BitGenerator, SeedSequence diff --git a/python/numpy/random/__init__.py b/python/numpy/random/__init__.py new file mode 100644 index 000000000..3e21d598a --- /dev/null +++ b/python/numpy/random/__init__.py @@ -0,0 +1,213 @@ +""" +======================== +Random Number Generation +======================== + +Use ``default_rng()`` to create a `Generator` and call its methods. + +=============== ========================================================= +Generator +--------------- --------------------------------------------------------- +Generator Class implementing all of the random number distributions +default_rng Default constructor for ``Generator`` +=============== ========================================================= + +============================================= === +BitGenerator Streams that work with Generator +--------------------------------------------- --- +MT19937 +PCG64 +PCG64DXSM +Philox +SFC64 +============================================= === + +============================================= === +Getting entropy to initialize a BitGenerator +--------------------------------------------- --- +SeedSequence +============================================= === + + +Legacy +------ + +For backwards compatibility with previous versions of numpy before 1.17, the +various aliases to the global `RandomState` methods are left alone and do not +use the new `Generator` API. + +==================== ========================================================= +Utility functions +-------------------- --------------------------------------------------------- +random Uniformly distributed floats over ``[0, 1)`` +bytes Uniformly distributed random bytes. +permutation Randomly permute a sequence / generate a random sequence. +shuffle Randomly permute a sequence in place. +choice Random sample from 1-D array. +==================== ========================================================= + +==================== ========================================================= +Compatibility +functions - removed +in the new API +-------------------- --------------------------------------------------------- +rand Uniformly distributed values. +randn Normally distributed values. +ranf Uniformly distributed floating point numbers. +random_integers Uniformly distributed integers in a given range. + (deprecated, use ``integers(..., closed=True)`` instead) +random_sample Alias for `random_sample` +randint Uniformly distributed integers in a given range +seed Seed the legacy random number generator. +==================== ========================================================= + +==================== ========================================================= +Univariate +distributions +-------------------- --------------------------------------------------------- +beta Beta distribution over ``[0, 1]``. +binomial Binomial distribution. +chisquare :math:`\\chi^2` distribution. +exponential Exponential distribution. +f F (Fisher-Snedecor) distribution. +gamma Gamma distribution. +geometric Geometric distribution. +gumbel Gumbel distribution. +hypergeometric Hypergeometric distribution. +laplace Laplace distribution. +logistic Logistic distribution. +lognormal Log-normal distribution. +logseries Logarithmic series distribution. +negative_binomial Negative binomial distribution. +noncentral_chisquare Non-central chi-square distribution. +noncentral_f Non-central F distribution. +normal Normal / Gaussian distribution. +pareto Pareto distribution. +poisson Poisson distribution. +power Power distribution. +rayleigh Rayleigh distribution. +triangular Triangular distribution. +uniform Uniform distribution. +vonmises Von Mises circular distribution. +wald Wald (inverse Gaussian) distribution. +weibull Weibull distribution. +zipf Zipf's distribution over ranked data. +==================== ========================================================= + +==================== ========================================================== +Multivariate +distributions +-------------------- ---------------------------------------------------------- +dirichlet Multivariate generalization of Beta distribution. +multinomial Multivariate generalization of the binomial distribution. +multivariate_normal Multivariate generalization of the normal distribution. +==================== ========================================================== + +==================== ========================================================= +Standard +distributions +-------------------- --------------------------------------------------------- +standard_cauchy Standard Cauchy-Lorentz distribution. +standard_exponential Standard exponential distribution. +standard_gamma Standard Gamma distribution. +standard_normal Standard normal distribution. +standard_t Standard Student's t-distribution. +==================== ========================================================= + +==================== ========================================================= +Internal functions +-------------------- --------------------------------------------------------- +get_state Get tuple representing internal state of generator. +set_state Set state of generator. +==================== ========================================================= + + +""" +__all__ = [ + 'beta', + 'binomial', + 'bytes', + 'chisquare', + 'choice', + 'dirichlet', + 'exponential', + 'f', + 'gamma', + 'geometric', + 'get_state', + 'gumbel', + 'hypergeometric', + 'laplace', + 'logistic', + 'lognormal', + 'logseries', + 'multinomial', + 'multivariate_normal', + 'negative_binomial', + 'noncentral_chisquare', + 'noncentral_f', + 'normal', + 'pareto', + 'permutation', + 'poisson', + 'power', + 'rand', + 'randint', + 'randn', + 'random', + 'random_integers', + 'random_sample', + 'ranf', + 'rayleigh', + 'sample', + 'seed', + 'set_state', + 'shuffle', + 'standard_cauchy', + 'standard_exponential', + 'standard_gamma', + 'standard_normal', + 'standard_t', + 'triangular', + 'uniform', + 'vonmises', + 'wald', + 'weibull', + 'zipf', +] + +# add these for module-freeze analysis (like PyInstaller) +from . import _bounded_integers, _common, _pickle +from ._generator import Generator, default_rng +from ._mt19937 import MT19937 +from ._pcg64 import PCG64, PCG64DXSM +from ._philox import Philox +from ._sfc64 import SFC64 +from .bit_generator import BitGenerator, SeedSequence +from .mtrand import * + +__all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937', + 'Philox', 'PCG64', 'PCG64DXSM', 'SFC64', 'default_rng', + 'BitGenerator'] + + +def __RandomState_ctor(): + """Return a RandomState instance. + + This function exists solely to assist (un)pickling. + + Note that the state of the RandomState returned here is irrelevant, as this + function's entire purpose is to return a newly allocated RandomState whose + state pickle can set. Consequently the RandomState returned by this function + is a freshly allocated copy with a seed=0. + + See https://github.com/numpy/numpy/issues/4763 for a detailed discussion + + """ + return RandomState(seed=0) + + +from numpy._pytesttester import PytestTester + +test = PytestTester(__name__) +del PytestTester diff --git a/python/numpy/random/__init__.pyi b/python/numpy/random/__init__.pyi new file mode 100644 index 000000000..e9b9fb50a --- /dev/null +++ b/python/numpy/random/__init__.pyi @@ -0,0 +1,124 @@ +from ._generator import Generator, default_rng +from ._mt19937 import MT19937 +from ._pcg64 import PCG64, PCG64DXSM +from ._philox import Philox +from ._sfc64 import SFC64 +from .bit_generator import BitGenerator, SeedSequence +from .mtrand import ( + RandomState, + beta, + binomial, + bytes, + chisquare, + choice, + dirichlet, + exponential, + f, + gamma, + geometric, + get_bit_generator, # noqa: F401 + get_state, + gumbel, + hypergeometric, + laplace, + logistic, + lognormal, + logseries, + multinomial, + multivariate_normal, + negative_binomial, + noncentral_chisquare, + noncentral_f, + normal, + pareto, + permutation, + poisson, + power, + rand, + randint, + randn, + random, + random_integers, + random_sample, + ranf, + rayleigh, + sample, + seed, + set_bit_generator, # noqa: F401 + set_state, + shuffle, + standard_cauchy, + standard_exponential, + standard_gamma, + standard_normal, + standard_t, + triangular, + uniform, + vonmises, + wald, + weibull, + zipf, +) + +__all__ = [ + "beta", + "binomial", + "bytes", + "chisquare", + "choice", + "dirichlet", + "exponential", + "f", + "gamma", + "geometric", + "get_state", + "gumbel", + "hypergeometric", + "laplace", + "logistic", + "lognormal", + "logseries", + "multinomial", + "multivariate_normal", + "negative_binomial", + "noncentral_chisquare", + "noncentral_f", + "normal", + "pareto", + "permutation", + "poisson", + "power", + "rand", + "randint", + "randn", + "random", + "random_integers", + "random_sample", + "ranf", + "rayleigh", + "sample", + "seed", + "set_state", + "shuffle", + "standard_cauchy", + "standard_exponential", + "standard_gamma", + "standard_normal", + "standard_t", + "triangular", + "uniform", + "vonmises", + "wald", + "weibull", + "zipf", + "Generator", + "RandomState", + "SeedSequence", + "MT19937", + "Philox", + "PCG64", + "PCG64DXSM", + "SFC64", + "default_rng", + "BitGenerator", +] diff --git a/python/numpy/random/__pycache__/__init__.cpython-312.pyc b/python/numpy/random/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..cd9676def Binary files /dev/null and b/python/numpy/random/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/random/__pycache__/_pickle.cpython-312.pyc b/python/numpy/random/__pycache__/_pickle.cpython-312.pyc new file mode 100644 index 000000000..275877c5f Binary files /dev/null and b/python/numpy/random/__pycache__/_pickle.cpython-312.pyc differ diff --git a/python/numpy/random/_bounded_integers.cpython-312-x86_64-linux-gnu.so b/python/numpy/random/_bounded_integers.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 000000000..a9e1e60be Binary files /dev/null and b/python/numpy/random/_bounded_integers.cpython-312-x86_64-linux-gnu.so differ diff --git a/python/numpy/random/_bounded_integers.pxd b/python/numpy/random/_bounded_integers.pxd new file mode 100644 index 000000000..607014cbf --- /dev/null +++ b/python/numpy/random/_bounded_integers.pxd @@ -0,0 +1,29 @@ +from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t, + int8_t, int16_t, int32_t, int64_t, intptr_t) +import numpy as np +cimport numpy as np +ctypedef np.npy_bool bool_t + +from numpy.random cimport bitgen_t + +cdef inline uint64_t _gen_mask(uint64_t max_val) noexcept nogil: + """Mask generator for use in bounded random numbers""" + # Smallest bit mask >= max + cdef uint64_t mask = max_val + mask |= mask >> 1 + mask |= mask >> 2 + mask |= mask >> 4 + mask |= mask >> 8 + mask |= mask >> 16 + mask |= mask >> 32 + return mask + +cdef object _rand_uint64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_uint32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_uint16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_uint8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_bool(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_int64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_int32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_int16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) +cdef object _rand_int8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock) diff --git a/python/numpy/random/_bounded_integers.pyi b/python/numpy/random/_bounded_integers.pyi new file mode 100644 index 000000000..c9c2ef67b --- /dev/null +++ b/python/numpy/random/_bounded_integers.pyi @@ -0,0 +1 @@ +__all__: list[str] = [] diff --git a/python/numpy/random/_common.cpython-312-x86_64-linux-gnu.so b/python/numpy/random/_common.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 000000000..2a1a3449a Binary files /dev/null and b/python/numpy/random/_common.cpython-312-x86_64-linux-gnu.so differ diff --git a/python/numpy/random/_common.pxd b/python/numpy/random/_common.pxd new file mode 100644 index 000000000..0de4456d7 --- /dev/null +++ b/python/numpy/random/_common.pxd @@ -0,0 +1,107 @@ +#cython: language_level=3 + +from libc.stdint cimport uint32_t, uint64_t, int32_t, int64_t + +import numpy as np +cimport numpy as np + +from numpy.random cimport bitgen_t + +cdef double POISSON_LAM_MAX +cdef double LEGACY_POISSON_LAM_MAX +cdef uint64_t MAXSIZE + +cdef enum ConstraintType: + CONS_NONE + CONS_NON_NEGATIVE + CONS_POSITIVE + CONS_POSITIVE_NOT_NAN + CONS_BOUNDED_0_1 + CONS_BOUNDED_GT_0_1 + CONS_BOUNDED_LT_0_1 + CONS_GT_1 + CONS_GTE_1 + CONS_POISSON + LEGACY_CONS_POISSON + LEGACY_CONS_NON_NEGATIVE_INBOUNDS_LONG + +ctypedef ConstraintType constraint_type + +cdef object benchmark(bitgen_t *bitgen, object lock, Py_ssize_t cnt, object method) +cdef object random_raw(bitgen_t *bitgen, object lock, object size, object output) +cdef object prepare_cffi(bitgen_t *bitgen) +cdef object prepare_ctypes(bitgen_t *bitgen) +cdef int check_constraint(double val, object name, constraint_type cons) except -1 +cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1 + +cdef extern from "include/aligned_malloc.h": + cdef void *PyArray_realloc_aligned(void *p, size_t n) + cdef void *PyArray_malloc_aligned(size_t n) + cdef void *PyArray_calloc_aligned(size_t n, size_t s) + cdef void PyArray_free_aligned(void *p) + +ctypedef void (*random_double_fill)(bitgen_t *state, np.npy_intp count, double* out) noexcept nogil +ctypedef double (*random_double_0)(void *state) noexcept nogil +ctypedef double (*random_double_1)(void *state, double a) noexcept nogil +ctypedef double (*random_double_2)(void *state, double a, double b) noexcept nogil +ctypedef double (*random_double_3)(void *state, double a, double b, double c) noexcept nogil + +ctypedef void (*random_float_fill)(bitgen_t *state, np.npy_intp count, float* out) noexcept nogil +ctypedef float (*random_float_0)(bitgen_t *state) noexcept nogil +ctypedef float (*random_float_1)(bitgen_t *state, float a) noexcept nogil + +ctypedef int64_t (*random_uint_0)(void *state) noexcept nogil +ctypedef int64_t (*random_uint_d)(void *state, double a) noexcept nogil +ctypedef int64_t (*random_uint_dd)(void *state, double a, double b) noexcept nogil +ctypedef int64_t (*random_uint_di)(void *state, double a, uint64_t b) noexcept nogil +ctypedef int64_t (*random_uint_i)(void *state, int64_t a) noexcept nogil +ctypedef int64_t (*random_uint_iii)(void *state, int64_t a, int64_t b, int64_t c) noexcept nogil + +ctypedef uint32_t (*random_uint_0_32)(bitgen_t *state) noexcept nogil +ctypedef uint32_t (*random_uint_1_i_32)(bitgen_t *state, uint32_t a) noexcept nogil + +ctypedef int32_t (*random_int_2_i_32)(bitgen_t *state, int32_t a, int32_t b) noexcept nogil +ctypedef int64_t (*random_int_2_i)(bitgen_t *state, int64_t a, int64_t b) noexcept nogil + +cdef double kahan_sum(double *darr, np.npy_intp n) noexcept + +cdef inline double uint64_to_double(uint64_t rnd) noexcept nogil: + return (rnd >> 11) * (1.0 / 9007199254740992.0) + +cdef object double_fill(void *func, bitgen_t *state, object size, object lock, object out) + +cdef object float_fill(void *func, bitgen_t *state, object size, object lock, object out) + +cdef object float_fill_from_double(void *func, bitgen_t *state, object size, object lock, object out) + +cdef object wrap_int(object val, object bits) + +cdef np.ndarray int_to_array(object value, object name, object bits, object uint_size) + +cdef validate_output_shape(iter_shape, np.ndarray output) + +cdef object cont(void *func, void *state, object size, object lock, int narg, + object a, object a_name, constraint_type a_constraint, + object b, object b_name, constraint_type b_constraint, + object c, object c_name, constraint_type c_constraint, + object out) + +cdef object disc(void *func, void *state, object size, object lock, + int narg_double, int narg_int64, + object a, object a_name, constraint_type a_constraint, + object b, object b_name, constraint_type b_constraint, + object c, object c_name, constraint_type c_constraint) + +cdef object cont_f(void *func, bitgen_t *state, object size, object lock, + object a, object a_name, constraint_type a_constraint, + object out) + +cdef object cont_broadcast_3(void *func, void *state, object size, object lock, + np.ndarray a_arr, object a_name, constraint_type a_constraint, + np.ndarray b_arr, object b_name, constraint_type b_constraint, + np.ndarray c_arr, object c_name, constraint_type c_constraint) + +cdef object discrete_broadcast_iii(void *func, void *state, object size, object lock, + np.ndarray a_arr, object a_name, constraint_type a_constraint, + np.ndarray b_arr, object b_name, constraint_type b_constraint, + np.ndarray c_arr, object c_name, constraint_type c_constraint) diff --git a/python/numpy/random/_common.pyi b/python/numpy/random/_common.pyi new file mode 100644 index 000000000..b667fd1c8 --- /dev/null +++ b/python/numpy/random/_common.pyi @@ -0,0 +1,16 @@ +from collections.abc import Callable +from typing import Any, NamedTuple, TypeAlias + +import numpy as np + +__all__: list[str] = ["interface"] + +_CDataVoidPointer: TypeAlias = Any + +class interface(NamedTuple): + state_address: int + state: _CDataVoidPointer + next_uint64: Callable[..., np.uint64] + next_uint32: Callable[..., np.uint32] + next_double: Callable[..., np.float64] + bit_generator: _CDataVoidPointer diff --git a/python/numpy/random/_examples/cffi/__pycache__/extending.cpython-312.pyc b/python/numpy/random/_examples/cffi/__pycache__/extending.cpython-312.pyc new file mode 100644 index 000000000..f92946b54 Binary files /dev/null and b/python/numpy/random/_examples/cffi/__pycache__/extending.cpython-312.pyc differ diff --git a/python/numpy/random/_examples/cffi/__pycache__/parse.cpython-312.pyc b/python/numpy/random/_examples/cffi/__pycache__/parse.cpython-312.pyc new file mode 100644 index 000000000..002521216 Binary files /dev/null and b/python/numpy/random/_examples/cffi/__pycache__/parse.cpython-312.pyc differ diff --git a/python/numpy/random/_examples/cffi/extending.py b/python/numpy/random/_examples/cffi/extending.py new file mode 100644 index 000000000..ad4c9acbd --- /dev/null +++ b/python/numpy/random/_examples/cffi/extending.py @@ -0,0 +1,44 @@ +""" +Use cffi to access any of the underlying C functions from distributions.h +""" +import os + +import cffi + +import numpy as np + +from .parse import parse_distributions_h + +ffi = cffi.FFI() + +inc_dir = os.path.join(np.get_include(), 'numpy') + +# Basic numpy types +ffi.cdef(''' + typedef intptr_t npy_intp; + typedef unsigned char npy_bool; + +''') + +parse_distributions_h(ffi, inc_dir) + +lib = ffi.dlopen(np.random._generator.__file__) + +# Compare the distributions.h random_standard_normal_fill to +# Generator.standard_random +bit_gen = np.random.PCG64() +rng = np.random.Generator(bit_gen) +state = bit_gen.state + +interface = rng.bit_generator.cffi +n = 100 +vals_cffi = ffi.new('double[%d]' % n) +lib.random_standard_normal_fill(interface.bit_generator, n, vals_cffi) + +# reset the state +bit_gen.state = state + +vals = rng.standard_normal(n) + +for i in range(n): + assert vals[i] == vals_cffi[i] diff --git a/python/numpy/random/_examples/cffi/parse.py b/python/numpy/random/_examples/cffi/parse.py new file mode 100644 index 000000000..0f80adb35 --- /dev/null +++ b/python/numpy/random/_examples/cffi/parse.py @@ -0,0 +1,53 @@ +import os + + +def parse_distributions_h(ffi, inc_dir): + """ + Parse distributions.h located in inc_dir for CFFI, filling in the ffi.cdef + + Read the function declarations without the "#define ..." macros that will + be filled in when loading the library. + """ + + with open(os.path.join(inc_dir, 'random', 'bitgen.h')) as fid: + s = [] + for line in fid: + # massage the include file + if line.strip().startswith('#'): + continue + s.append(line) + ffi.cdef('\n'.join(s)) + + with open(os.path.join(inc_dir, 'random', 'distributions.h')) as fid: + s = [] + in_skip = 0 + ignoring = False + for line in fid: + # check for and remove extern "C" guards + if ignoring: + if line.strip().startswith('#endif'): + ignoring = False + continue + if line.strip().startswith('#ifdef __cplusplus'): + ignoring = True + + # massage the include file + if line.strip().startswith('#'): + continue + + # skip any inlined function definition + # which starts with 'static inline xxx(...) {' + # and ends with a closing '}' + if line.strip().startswith('static inline'): + in_skip += line.count('{') + continue + elif in_skip > 0: + in_skip += line.count('{') + in_skip -= line.count('}') + continue + + # replace defines with their value or remove them + line = line.replace('DECLDIR', '') + line = line.replace('RAND_INT_TYPE', 'int64_t') + s.append(line) + ffi.cdef('\n'.join(s)) diff --git a/python/numpy/random/_examples/cython/extending.pyx b/python/numpy/random/_examples/cython/extending.pyx new file mode 100644 index 000000000..6a0f45e1b --- /dev/null +++ b/python/numpy/random/_examples/cython/extending.pyx @@ -0,0 +1,77 @@ +#cython: language_level=3 + +from libc.stdint cimport uint32_t +from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer + +import numpy as np +cimport numpy as np +cimport cython + +from numpy.random cimport bitgen_t +from numpy.random import PCG64 + +np.import_array() + + +@cython.boundscheck(False) +@cython.wraparound(False) +def uniform_mean(Py_ssize_t n): + cdef Py_ssize_t i + cdef bitgen_t *rng + cdef const char *capsule_name = "BitGenerator" + cdef double[::1] random_values + cdef np.ndarray randoms + + x = PCG64() + capsule = x.capsule + if not PyCapsule_IsValid(capsule, capsule_name): + raise ValueError("Invalid pointer to anon_func_state") + rng = PyCapsule_GetPointer(capsule, capsule_name) + random_values = np.empty(n) + # Best practice is to acquire the lock whenever generating random values. + # This prevents other threads from modifying the state. Acquiring the lock + # is only necessary if the GIL is also released, as in this example. + with x.lock, nogil: + for i in range(n): + random_values[i] = rng.next_double(rng.state) + randoms = np.asarray(random_values) + return randoms.mean() + + +# This function is declared nogil so it can be used without the GIL below +cdef uint32_t bounded_uint(uint32_t lb, uint32_t ub, bitgen_t *rng) nogil: + cdef uint32_t mask, delta, val + mask = delta = ub - lb + mask |= mask >> 1 + mask |= mask >> 2 + mask |= mask >> 4 + mask |= mask >> 8 + mask |= mask >> 16 + + val = rng.next_uint32(rng.state) & mask + while val > delta: + val = rng.next_uint32(rng.state) & mask + + return lb + val + + +@cython.boundscheck(False) +@cython.wraparound(False) +def bounded_uints(uint32_t lb, uint32_t ub, Py_ssize_t n): + cdef Py_ssize_t i + cdef bitgen_t *rng + cdef uint32_t[::1] out + cdef const char *capsule_name = "BitGenerator" + + x = PCG64() + out = np.empty(n, dtype=np.uint32) + capsule = x.capsule + + if not PyCapsule_IsValid(capsule, capsule_name): + raise ValueError("Invalid pointer to anon_func_state") + rng = PyCapsule_GetPointer(capsule, capsule_name) + + with x.lock, nogil: + for i in range(n): + out[i] = bounded_uint(lb, ub, rng) + return np.asarray(out) diff --git a/python/numpy/random/_examples/cython/extending_distributions.pyx b/python/numpy/random/_examples/cython/extending_distributions.pyx new file mode 100644 index 000000000..e1d1ea6c8 --- /dev/null +++ b/python/numpy/random/_examples/cython/extending_distributions.pyx @@ -0,0 +1,118 @@ +#cython: language_level=3 +""" +This file shows how the to use a BitGenerator to create a distribution. +""" +import numpy as np +cimport numpy as np +cimport cython +from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer +from libc.stdint cimport uint16_t, uint64_t +from numpy.random cimport bitgen_t +from numpy.random import PCG64 +from numpy.random.c_distributions cimport ( + random_standard_uniform_fill, random_standard_uniform_fill_f) + +np.import_array() + + +@cython.boundscheck(False) +@cython.wraparound(False) +def uniforms(Py_ssize_t n): + """ + Create an array of `n` uniformly distributed doubles. + A 'real' distribution would want to process the values into + some non-uniform distribution + """ + cdef Py_ssize_t i + cdef bitgen_t *rng + cdef const char *capsule_name = "BitGenerator" + cdef double[::1] random_values + + x = PCG64() + capsule = x.capsule + # Optional check that the capsule if from a BitGenerator + if not PyCapsule_IsValid(capsule, capsule_name): + raise ValueError("Invalid pointer to anon_func_state") + # Cast the pointer + rng = PyCapsule_GetPointer(capsule, capsule_name) + random_values = np.empty(n, dtype='float64') + with x.lock, nogil: + for i in range(n): + # Call the function + random_values[i] = rng.next_double(rng.state) + randoms = np.asarray(random_values) + + return randoms + +# cython example 2 +@cython.boundscheck(False) +@cython.wraparound(False) +def uint10_uniforms(Py_ssize_t n): + """Uniform 10 bit integers stored as 16-bit unsigned integers""" + cdef Py_ssize_t i + cdef bitgen_t *rng + cdef const char *capsule_name = "BitGenerator" + cdef uint16_t[::1] random_values + cdef int bits_remaining + cdef int width = 10 + cdef uint64_t buff, mask = 0x3FF + + x = PCG64() + capsule = x.capsule + if not PyCapsule_IsValid(capsule, capsule_name): + raise ValueError("Invalid pointer to anon_func_state") + rng = PyCapsule_GetPointer(capsule, capsule_name) + random_values = np.empty(n, dtype='uint16') + # Best practice is to release GIL and acquire the lock + bits_remaining = 0 + with x.lock, nogil: + for i in range(n): + if bits_remaining < width: + buff = rng.next_uint64(rng.state) + random_values[i] = buff & mask + buff >>= width + + randoms = np.asarray(random_values) + return randoms + +# cython example 3 +def uniforms_ex(bit_generator, Py_ssize_t n, dtype=np.float64): + """ + Create an array of `n` uniformly distributed doubles via a "fill" function. + + A 'real' distribution would want to process the values into + some non-uniform distribution + + Parameters + ---------- + bit_generator: BitGenerator instance + n: int + Output vector length + dtype: {str, dtype}, optional + Desired dtype, either 'd' (or 'float64') or 'f' (or 'float32'). The + default dtype value is 'd' + """ + cdef Py_ssize_t i + cdef bitgen_t *rng + cdef const char *capsule_name = "BitGenerator" + cdef np.ndarray randoms + + capsule = bit_generator.capsule + # Optional check that the capsule if from a BitGenerator + if not PyCapsule_IsValid(capsule, capsule_name): + raise ValueError("Invalid pointer to anon_func_state") + # Cast the pointer + rng = PyCapsule_GetPointer(capsule, capsule_name) + + _dtype = np.dtype(dtype) + randoms = np.empty(n, dtype=_dtype) + if _dtype == np.float32: + with bit_generator.lock: + random_standard_uniform_fill_f(rng, n, np.PyArray_DATA(randoms)) + elif _dtype == np.float64: + with bit_generator.lock: + random_standard_uniform_fill(rng, n, np.PyArray_DATA(randoms)) + else: + raise TypeError('Unsupported dtype %r for random' % _dtype) + return randoms + diff --git a/python/numpy/random/_examples/cython/meson.build b/python/numpy/random/_examples/cython/meson.build new file mode 100644 index 000000000..7aa367d13 --- /dev/null +++ b/python/numpy/random/_examples/cython/meson.build @@ -0,0 +1,53 @@ +project('random-build-examples', 'c', 'cpp', 'cython') + +py_mod = import('python') +py3 = py_mod.find_installation(pure: false) + +cc = meson.get_compiler('c') +cy = meson.get_compiler('cython') + +# Keep synced with pyproject.toml +if not cy.version().version_compare('>=3.0.6') + error('tests requires Cython >= 3.0.6') +endif + +base_cython_args = [] +if cy.version().version_compare('>=3.1.0') + base_cython_args += ['-Xfreethreading_compatible=True'] +endif + +_numpy_abs = run_command(py3, ['-c', + 'import os; os.chdir(".."); import numpy; print(os.path.abspath(numpy.get_include() + "../../.."))'], + check: true).stdout().strip() + +npymath_path = _numpy_abs / '_core' / 'lib' +npy_include_path = _numpy_abs / '_core' / 'include' +npyrandom_path = _numpy_abs / 'random' / 'lib' +npymath_lib = cc.find_library('npymath', dirs: npymath_path) +npyrandom_lib = cc.find_library('npyrandom', dirs: npyrandom_path) + +py3.extension_module( + 'extending_distributions', + 'extending_distributions.pyx', + install: false, + include_directories: [npy_include_path], + dependencies: [npyrandom_lib, npymath_lib], + cython_args: base_cython_args, +) +py3.extension_module( + 'extending', + 'extending.pyx', + install: false, + include_directories: [npy_include_path], + dependencies: [npyrandom_lib, npymath_lib], + cython_args: base_cython_args, +) +py3.extension_module( + 'extending_cpp', + 'extending_distributions.pyx', + install: false, + override_options : ['cython_language=cpp'], + cython_args: base_cython_args + ['--module-name', 'extending_cpp'], + include_directories: [npy_include_path], + dependencies: [npyrandom_lib, npymath_lib], +) diff --git a/python/numpy/random/_examples/numba/__pycache__/extending.cpython-312.pyc b/python/numpy/random/_examples/numba/__pycache__/extending.cpython-312.pyc new file mode 100644 index 000000000..f79f80290 Binary files /dev/null and b/python/numpy/random/_examples/numba/__pycache__/extending.cpython-312.pyc differ diff --git a/python/numpy/random/_examples/numba/__pycache__/extending_distributions.cpython-312.pyc b/python/numpy/random/_examples/numba/__pycache__/extending_distributions.cpython-312.pyc new file mode 100644 index 000000000..49a12709f Binary files /dev/null and b/python/numpy/random/_examples/numba/__pycache__/extending_distributions.cpython-312.pyc differ diff --git a/python/numpy/random/_examples/numba/extending.py b/python/numpy/random/_examples/numba/extending.py new file mode 100644 index 000000000..c1d0f4fbd --- /dev/null +++ b/python/numpy/random/_examples/numba/extending.py @@ -0,0 +1,86 @@ +from timeit import timeit + +import numba as nb + +import numpy as np +from numpy.random import PCG64 + +bit_gen = PCG64() +next_d = bit_gen.cffi.next_double +state_addr = bit_gen.cffi.state_address + +def normals(n, state): + out = np.empty(n) + for i in range((n + 1) // 2): + x1 = 2.0 * next_d(state) - 1.0 + x2 = 2.0 * next_d(state) - 1.0 + r2 = x1 * x1 + x2 * x2 + while r2 >= 1.0 or r2 == 0.0: + x1 = 2.0 * next_d(state) - 1.0 + x2 = 2.0 * next_d(state) - 1.0 + r2 = x1 * x1 + x2 * x2 + f = np.sqrt(-2.0 * np.log(r2) / r2) + out[2 * i] = f * x1 + if 2 * i + 1 < n: + out[2 * i + 1] = f * x2 + return out + + +# Compile using Numba +normalsj = nb.jit(normals, nopython=True) +# Must use state address not state with numba +n = 10000 + +def numbacall(): + return normalsj(n, state_addr) + + +rg = np.random.Generator(PCG64()) + +def numpycall(): + return rg.normal(size=n) + + +# Check that the functions work +r1 = numbacall() +r2 = numpycall() +assert r1.shape == (n,) +assert r1.shape == r2.shape + +t1 = timeit(numbacall, number=1000) +print(f'{t1:.2f} secs for {n} PCG64 (Numba/PCG64) gaussian randoms') +t2 = timeit(numpycall, number=1000) +print(f'{t2:.2f} secs for {n} PCG64 (NumPy/PCG64) gaussian randoms') + +# example 2 + +next_u32 = bit_gen.ctypes.next_uint32 +ctypes_state = bit_gen.ctypes.state + +@nb.jit(nopython=True) +def bounded_uint(lb, ub, state): + mask = delta = ub - lb + mask |= mask >> 1 + mask |= mask >> 2 + mask |= mask >> 4 + mask |= mask >> 8 + mask |= mask >> 16 + + val = next_u32(state) & mask + while val > delta: + val = next_u32(state) & mask + + return lb + val + + +print(bounded_uint(323, 2394691, ctypes_state.value)) + + +@nb.jit(nopython=True) +def bounded_uints(lb, ub, n, state): + out = np.empty(n, dtype=np.uint32) + for i in range(n): + out[i] = bounded_uint(lb, ub, state) + + +bounded_uints(323, 2394691, 10000000, ctypes_state.value) diff --git a/python/numpy/random/_examples/numba/extending_distributions.py b/python/numpy/random/_examples/numba/extending_distributions.py new file mode 100644 index 000000000..d0462e73e --- /dev/null +++ b/python/numpy/random/_examples/numba/extending_distributions.py @@ -0,0 +1,67 @@ +r""" +Building the required library in this example requires a source distribution +of NumPy or clone of the NumPy git repository since distributions.c is not +included in binary distributions. + +On *nix, execute in numpy/random/src/distributions + +export ${PYTHON_VERSION}=3.8 # Python version +export PYTHON_INCLUDE=#path to Python's include folder, usually \ + ${PYTHON_HOME}/include/python${PYTHON_VERSION}m +export NUMPY_INCLUDE=#path to numpy's include folder, usually \ + ${PYTHON_HOME}/lib/python${PYTHON_VERSION}/site-packages/numpy/_core/include +gcc -shared -o libdistributions.so -fPIC distributions.c \ + -I${NUMPY_INCLUDE} -I${PYTHON_INCLUDE} +mv libdistributions.so ../../_examples/numba/ + +On Windows + +rem PYTHON_HOME and PYTHON_VERSION are setup dependent, this is an example +set PYTHON_HOME=c:\Anaconda +set PYTHON_VERSION=38 +cl.exe /LD .\distributions.c -DDLL_EXPORT \ + -I%PYTHON_HOME%\lib\site-packages\numpy\_core\include \ + -I%PYTHON_HOME%\include %PYTHON_HOME%\libs\python%PYTHON_VERSION%.lib +move distributions.dll ../../_examples/numba/ +""" +import os + +import numba as nb +from cffi import FFI + +import numpy as np +from numpy.random import PCG64 + +ffi = FFI() +if os.path.exists('./distributions.dll'): + lib = ffi.dlopen('./distributions.dll') +elif os.path.exists('./libdistributions.so'): + lib = ffi.dlopen('./libdistributions.so') +else: + raise RuntimeError('Required DLL/so file was not found.') + +ffi.cdef(""" +double random_standard_normal(void *bitgen_state); +""") +x = PCG64() +xffi = x.cffi +bit_generator = xffi.bit_generator + +random_standard_normal = lib.random_standard_normal + + +def normals(n, bit_generator): + out = np.empty(n) + for i in range(n): + out[i] = random_standard_normal(bit_generator) + return out + + +normalsj = nb.jit(normals, nopython=True) + +# Numba requires a memory address for void * +# Can also get address from x.ctypes.bit_generator.value +bit_generator_address = int(ffi.cast('uintptr_t', bit_generator)) + +norm = normalsj(1000, bit_generator_address) +print(norm[:12]) diff --git a/python/numpy/random/_generator.cpython-312-x86_64-linux-gnu.so b/python/numpy/random/_generator.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 000000000..862def303 Binary files /dev/null and b/python/numpy/random/_generator.cpython-312-x86_64-linux-gnu.so differ diff --git a/python/numpy/random/_generator.pyi b/python/numpy/random/_generator.pyi new file mode 100644 index 000000000..6d7ef5e6c --- /dev/null +++ b/python/numpy/random/_generator.pyi @@ -0,0 +1,861 @@ +from collections.abc import Callable, MutableSequence +from typing import Any, Literal, TypeAlias, TypeVar, overload + +import numpy as np +from numpy import dtype, float32, float64, int64 +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _BoolCodes, + _DoubleCodes, + _DTypeLike, + _DTypeLikeBool, + _Float32Codes, + _Float64Codes, + _FloatLike_co, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _IntPCodes, + _ShapeLike, + _SingleCodes, + _SupportsDType, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _UIntPCodes, +) +from numpy.random import BitGenerator, RandomState, SeedSequence + +_IntegerT = TypeVar("_IntegerT", bound=np.integer) + +_DTypeLikeFloat32: TypeAlias = ( + dtype[float32] + | _SupportsDType[dtype[float32]] + | type[float32] + | _Float32Codes + | _SingleCodes +) + +_DTypeLikeFloat64: TypeAlias = ( + dtype[float64] + | _SupportsDType[dtype[float64]] + | type[float] + | type[float64] + | _Float64Codes + | _DoubleCodes +) + +class Generator: + def __init__(self, bit_generator: BitGenerator) -> None: ... + def __repr__(self) -> str: ... + def __str__(self) -> str: ... + def __getstate__(self) -> None: ... + def __setstate__(self, state: dict[str, Any] | None) -> None: ... + def __reduce__(self) -> tuple[ + Callable[[BitGenerator], Generator], + tuple[BitGenerator], + None]: ... + @property + def bit_generator(self) -> BitGenerator: ... + def spawn(self, n_children: int) -> list[Generator]: ... + def bytes(self, length: int) -> bytes: ... + @overload + def standard_normal( # type: ignore[misc] + self, + size: None = ..., + dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., + out: None = ..., + ) -> float: ... + @overload + def standard_normal( # type: ignore[misc] + self, + size: _ShapeLike = ..., + ) -> NDArray[float64]: ... + @overload + def standard_normal( # type: ignore[misc] + self, + *, + out: NDArray[float64] = ..., + ) -> NDArray[float64]: ... + @overload + def standard_normal( # type: ignore[misc] + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat32 = ..., + out: NDArray[float32] | None = ..., + ) -> NDArray[float32]: ... + @overload + def standard_normal( # type: ignore[misc] + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat64 = ..., + out: NDArray[float64] | None = ..., + ) -> NDArray[float64]: ... + @overload + def permutation(self, x: int, axis: int = ...) -> NDArray[int64]: ... + @overload + def permutation(self, x: ArrayLike, axis: int = ...) -> NDArray[Any]: ... + @overload + def standard_exponential( # type: ignore[misc] + self, + size: None = ..., + dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., + method: Literal["zig", "inv"] = ..., + out: None = ..., + ) -> float: ... + @overload + def standard_exponential( + self, + size: _ShapeLike = ..., + ) -> NDArray[float64]: ... + @overload + def standard_exponential( + self, + *, + out: NDArray[float64] = ..., + ) -> NDArray[float64]: ... + @overload + def standard_exponential( + self, + size: _ShapeLike = ..., + *, + method: Literal["zig", "inv"] = ..., + out: NDArray[float64] | None = ..., + ) -> NDArray[float64]: ... + @overload + def standard_exponential( + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat32 = ..., + method: Literal["zig", "inv"] = ..., + out: NDArray[float32] | None = ..., + ) -> NDArray[float32]: ... + @overload + def standard_exponential( + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat64 = ..., + method: Literal["zig", "inv"] = ..., + out: NDArray[float64] | None = ..., + ) -> NDArray[float64]: ... + @overload + def random( # type: ignore[misc] + self, + size: None = ..., + dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., + out: None = ..., + ) -> float: ... + @overload + def random( + self, + *, + out: NDArray[float64] = ..., + ) -> NDArray[float64]: ... + @overload + def random( + self, + size: _ShapeLike = ..., + *, + out: NDArray[float64] | None = ..., + ) -> NDArray[float64]: ... + @overload + def random( + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat32 = ..., + out: NDArray[float32] | None = ..., + ) -> NDArray[float32]: ... + @overload + def random( + self, + size: _ShapeLike = ..., + dtype: _DTypeLikeFloat64 = ..., + out: NDArray[float64] | None = ..., + ) -> NDArray[float64]: ... + @overload + def beta( + self, + a: _FloatLike_co, + b: _FloatLike_co, + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def beta( + self, + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def exponential(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def exponential(self, scale: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ...) -> NDArray[float64]: ... + + # + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + dtype: _DTypeLike[np.int64] | _Int64Codes = ..., + endpoint: bool = False, + ) -> np.int64: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: type[bool], + endpoint: bool = False, + ) -> bool: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: type[int], + endpoint: bool = False, + ) -> int: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _DTypeLike[np.bool] | _BoolCodes, + endpoint: bool = False, + ) -> np.bool: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _DTypeLike[_IntegerT], + endpoint: bool = False, + ) -> _IntegerT: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: _DTypeLike[np.int64] | _Int64Codes = ..., + endpoint: bool = False, + ) -> NDArray[np.int64]: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _DTypeLikeBool, + endpoint: bool = False, + ) -> NDArray[np.bool]: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _DTypeLike[_IntegerT], + endpoint: bool = False, + ) -> NDArray[_IntegerT]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _Int8Codes, + endpoint: bool = False, + ) -> np.int8: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _Int8Codes, + endpoint: bool = False, + ) -> NDArray[np.int8]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _UInt8Codes, + endpoint: bool = False, + ) -> np.uint8: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _UInt8Codes, + endpoint: bool = False, + ) -> NDArray[np.uint8]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _Int16Codes, + endpoint: bool = False, + ) -> np.int16: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _Int16Codes, + endpoint: bool = False, + ) -> NDArray[np.int16]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _UInt16Codes, + endpoint: bool = False, + ) -> np.uint16: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _UInt16Codes, + endpoint: bool = False, + ) -> NDArray[np.uint16]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _Int32Codes, + endpoint: bool = False, + ) -> np.int32: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _Int32Codes, + endpoint: bool = False, + ) -> NDArray[np.int32]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _UInt32Codes, + endpoint: bool = False, + ) -> np.uint32: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _UInt32Codes, + endpoint: bool = False, + ) -> NDArray[np.uint32]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _UInt64Codes, + endpoint: bool = False, + ) -> np.uint64: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _UInt64Codes, + endpoint: bool = False, + ) -> NDArray[np.uint64]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _IntPCodes, + endpoint: bool = False, + ) -> np.intp: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _IntPCodes, + endpoint: bool = False, + ) -> NDArray[np.intp]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + *, + dtype: _UIntPCodes, + endpoint: bool = False, + ) -> np.uintp: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + *, + dtype: _UIntPCodes, + endpoint: bool = False, + ) -> NDArray[np.uintp]: ... + @overload + def integers( + self, + low: int, + high: int | None = None, + size: None = None, + dtype: DTypeLike = ..., + endpoint: bool = False, + ) -> Any: ... + @overload + def integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = None, + size: _ShapeLike | None = None, + dtype: DTypeLike = ..., + endpoint: bool = False, + ) -> NDArray[Any]: ... + + # TODO: Use a TypeVar _T here to get away from Any output? + # Should be int->NDArray[int64], ArrayLike[_T] -> _T | NDArray[Any] + @overload + def choice( + self, + a: int, + size: None = ..., + replace: bool = ..., + p: _ArrayLikeFloat_co | None = ..., + axis: int = ..., + shuffle: bool = ..., + ) -> int: ... + @overload + def choice( + self, + a: int, + size: _ShapeLike = ..., + replace: bool = ..., + p: _ArrayLikeFloat_co | None = ..., + axis: int = ..., + shuffle: bool = ..., + ) -> NDArray[int64]: ... + @overload + def choice( + self, + a: ArrayLike, + size: None = ..., + replace: bool = ..., + p: _ArrayLikeFloat_co | None = ..., + axis: int = ..., + shuffle: bool = ..., + ) -> Any: ... + @overload + def choice( + self, + a: ArrayLike, + size: _ShapeLike = ..., + replace: bool = ..., + p: _ArrayLikeFloat_co | None = ..., + axis: int = ..., + shuffle: bool = ..., + ) -> NDArray[Any]: ... + @overload + def uniform( + self, + low: _FloatLike_co = ..., + high: _FloatLike_co = ..., + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def uniform( + self, + low: _ArrayLikeFloat_co = ..., + high: _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def normal( + self, + loc: _FloatLike_co = ..., + scale: _FloatLike_co = ..., + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def normal( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def standard_gamma( # type: ignore[misc] + self, + shape: _FloatLike_co, + size: None = ..., + dtype: _DTypeLikeFloat32 | _DTypeLikeFloat64 = ..., + out: None = ..., + ) -> float: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + *, + out: NDArray[float64] = ..., + ) -> NDArray[float64]: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + size: _ShapeLike | None = ..., + dtype: _DTypeLikeFloat32 = ..., + out: NDArray[float32] | None = ..., + ) -> NDArray[float32]: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + size: _ShapeLike | None = ..., + dtype: _DTypeLikeFloat64 = ..., + out: NDArray[float64] | None = ..., + ) -> NDArray[float64]: ... + @overload + def gamma( + self, shape: _FloatLike_co, scale: _FloatLike_co = ..., size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def gamma( + self, + shape: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def f( + self, dfnum: _FloatLike_co, dfden: _FloatLike_co, size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def f( + self, + dfnum: _ArrayLikeFloat_co, + dfden: _ArrayLikeFloat_co, + size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def noncentral_f( + self, + dfnum: _FloatLike_co, + dfden: _FloatLike_co, + nonc: _FloatLike_co, size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def noncentral_f( + self, + dfnum: _ArrayLikeFloat_co, + dfden: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def chisquare(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def chisquare( + self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def noncentral_chisquare( + self, df: _FloatLike_co, nonc: _FloatLike_co, size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def noncentral_chisquare( + self, + df: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def standard_t(self, df: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_t( + self, df: _ArrayLikeFloat_co, size: None = ... + ) -> NDArray[float64]: ... + @overload + def standard_t( + self, df: _ArrayLikeFloat_co, size: _ShapeLike = ... + ) -> NDArray[float64]: ... + @overload + def vonmises( + self, mu: _FloatLike_co, kappa: _FloatLike_co, size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def vonmises( + self, + mu: _ArrayLikeFloat_co, + kappa: _ArrayLikeFloat_co, + size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def pareto(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def pareto( + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def weibull(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def weibull( + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def power(self, a: _FloatLike_co, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def power( + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_cauchy(self, size: _ShapeLike = ...) -> NDArray[float64]: ... + @overload + def laplace( + self, + loc: _FloatLike_co = ..., + scale: _FloatLike_co = ..., + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def laplace( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def gumbel( + self, + loc: _FloatLike_co = ..., + scale: _FloatLike_co = ..., + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def gumbel( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def logistic( + self, + loc: _FloatLike_co = ..., + scale: _FloatLike_co = ..., + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def logistic( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def lognormal( + self, + mean: _FloatLike_co = ..., + sigma: _FloatLike_co = ..., + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def lognormal( + self, + mean: _ArrayLikeFloat_co = ..., + sigma: _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def rayleigh(self, scale: _FloatLike_co = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def rayleigh( + self, scale: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def wald( + self, mean: _FloatLike_co, scale: _FloatLike_co, size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def wald( + self, + mean: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co, + size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def triangular( + self, + left: _FloatLike_co, + mode: _FloatLike_co, + right: _FloatLike_co, + size: None = ..., + ) -> float: ... # type: ignore[misc] + @overload + def triangular( + self, + left: _ArrayLikeFloat_co, + mode: _ArrayLikeFloat_co, + right: _ArrayLikeFloat_co, + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def binomial(self, n: int, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def binomial( + self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[int64]: ... + @overload + def negative_binomial( + self, n: _FloatLike_co, p: _FloatLike_co, size: None = ... + ) -> int: ... # type: ignore[misc] + @overload + def negative_binomial( + self, + n: _ArrayLikeFloat_co, + p: _ArrayLikeFloat_co, + size: _ShapeLike | None = ... + ) -> NDArray[int64]: ... + @overload + def poisson(self, lam: _FloatLike_co = ..., size: None = ...) -> int: ... # type: ignore[misc] + @overload + def poisson( + self, lam: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ... + ) -> NDArray[int64]: ... + @overload + def zipf(self, a: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def zipf( + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[int64]: ... + @overload + def geometric(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def geometric( + self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[int64]: ... + @overload + def hypergeometric( + self, ngood: int, nbad: int, nsample: int, size: None = ... + ) -> int: ... # type: ignore[misc] + @overload + def hypergeometric( + self, + ngood: _ArrayLikeInt_co, + nbad: _ArrayLikeInt_co, + nsample: _ArrayLikeInt_co, + size: _ShapeLike | None = ..., + ) -> NDArray[int64]: ... + @overload + def logseries(self, p: _FloatLike_co, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def logseries( + self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[int64]: ... + def multivariate_normal( + self, + mean: _ArrayLikeFloat_co, + cov: _ArrayLikeFloat_co, + size: _ShapeLike | None = ..., + check_valid: Literal["warn", "raise", "ignore"] = ..., + tol: float = ..., + *, + method: Literal["svd", "eigh", "cholesky"] = ..., + ) -> NDArray[float64]: ... + def multinomial( + self, n: _ArrayLikeInt_co, + pvals: _ArrayLikeFloat_co, + size: _ShapeLike | None = ... + ) -> NDArray[int64]: ... + def multivariate_hypergeometric( + self, + colors: _ArrayLikeInt_co, + nsample: int, + size: _ShapeLike | None = ..., + method: Literal["marginals", "count"] = ..., + ) -> NDArray[int64]: ... + def dirichlet( + self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + def permuted( + self, x: ArrayLike, *, axis: int | None = ..., out: NDArray[Any] | None = ... + ) -> NDArray[Any]: ... + + # axis must be 0 for MutableSequence + @overload + def shuffle(self, /, x: np.ndarray, axis: int = 0) -> None: ... + @overload + def shuffle(self, /, x: MutableSequence[Any], axis: Literal[0] = 0) -> None: ... + +def default_rng( + seed: _ArrayLikeInt_co | SeedSequence | BitGenerator | Generator | RandomState | None = ... +) -> Generator: ... diff --git a/python/numpy/random/_mt19937.cpython-312-x86_64-linux-gnu.so b/python/numpy/random/_mt19937.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 000000000..d5155cfe0 Binary files /dev/null and b/python/numpy/random/_mt19937.cpython-312-x86_64-linux-gnu.so differ diff --git a/python/numpy/random/_mt19937.pyi b/python/numpy/random/_mt19937.pyi new file mode 100644 index 000000000..70b2506da --- /dev/null +++ b/python/numpy/random/_mt19937.pyi @@ -0,0 +1,25 @@ +from typing import TypedDict, type_check_only + +from numpy import uint32 +from numpy._typing import _ArrayLikeInt_co +from numpy.random.bit_generator import BitGenerator, SeedSequence +from numpy.typing import NDArray + +@type_check_only +class _MT19937Internal(TypedDict): + key: NDArray[uint32] + pos: int + +@type_check_only +class _MT19937State(TypedDict): + bit_generator: str + state: _MT19937Internal + +class MT19937(BitGenerator): + def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... + def _legacy_seeding(self, seed: _ArrayLikeInt_co) -> None: ... + def jumped(self, jumps: int = ...) -> MT19937: ... + @property + def state(self) -> _MT19937State: ... + @state.setter + def state(self, value: _MT19937State) -> None: ... diff --git a/python/numpy/random/_pcg64.cpython-312-x86_64-linux-gnu.so b/python/numpy/random/_pcg64.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 000000000..34fd12e1c Binary files /dev/null and b/python/numpy/random/_pcg64.cpython-312-x86_64-linux-gnu.so differ diff --git a/python/numpy/random/_pcg64.pyi b/python/numpy/random/_pcg64.pyi new file mode 100644 index 000000000..5dc7bb663 --- /dev/null +++ b/python/numpy/random/_pcg64.pyi @@ -0,0 +1,44 @@ +from typing import TypedDict, type_check_only + +from numpy._typing import _ArrayLikeInt_co +from numpy.random.bit_generator import BitGenerator, SeedSequence + +@type_check_only +class _PCG64Internal(TypedDict): + state: int + inc: int + +@type_check_only +class _PCG64State(TypedDict): + bit_generator: str + state: _PCG64Internal + has_uint32: int + uinteger: int + +class PCG64(BitGenerator): + def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... + def jumped(self, jumps: int = ...) -> PCG64: ... + @property + def state( + self, + ) -> _PCG64State: ... + @state.setter + def state( + self, + value: _PCG64State, + ) -> None: ... + def advance(self, delta: int) -> PCG64: ... + +class PCG64DXSM(BitGenerator): + def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... + def jumped(self, jumps: int = ...) -> PCG64DXSM: ... + @property + def state( + self, + ) -> _PCG64State: ... + @state.setter + def state( + self, + value: _PCG64State, + ) -> None: ... + def advance(self, delta: int) -> PCG64DXSM: ... diff --git a/python/numpy/random/_philox.cpython-312-x86_64-linux-gnu.so b/python/numpy/random/_philox.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 000000000..f60086ba6 Binary files /dev/null and b/python/numpy/random/_philox.cpython-312-x86_64-linux-gnu.so differ diff --git a/python/numpy/random/_philox.pyi b/python/numpy/random/_philox.pyi new file mode 100644 index 000000000..d8895bba6 --- /dev/null +++ b/python/numpy/random/_philox.pyi @@ -0,0 +1,39 @@ +from typing import TypedDict, type_check_only + +from numpy import uint64 +from numpy._typing import _ArrayLikeInt_co +from numpy.random.bit_generator import BitGenerator, SeedSequence +from numpy.typing import NDArray + +@type_check_only +class _PhiloxInternal(TypedDict): + counter: NDArray[uint64] + key: NDArray[uint64] + +@type_check_only +class _PhiloxState(TypedDict): + bit_generator: str + state: _PhiloxInternal + buffer: NDArray[uint64] + buffer_pos: int + has_uint32: int + uinteger: int + +class Philox(BitGenerator): + def __init__( + self, + seed: _ArrayLikeInt_co | SeedSequence | None = ..., + counter: _ArrayLikeInt_co | None = ..., + key: _ArrayLikeInt_co | None = ..., + ) -> None: ... + @property + def state( + self, + ) -> _PhiloxState: ... + @state.setter + def state( + self, + value: _PhiloxState, + ) -> None: ... + def jumped(self, jumps: int = ...) -> Philox: ... + def advance(self, delta: int) -> Philox: ... diff --git a/python/numpy/random/_pickle.py b/python/numpy/random/_pickle.py new file mode 100644 index 000000000..05f7232e6 --- /dev/null +++ b/python/numpy/random/_pickle.py @@ -0,0 +1,88 @@ +from ._generator import Generator +from ._mt19937 import MT19937 +from ._pcg64 import PCG64, PCG64DXSM +from ._philox import Philox +from ._sfc64 import SFC64 +from .bit_generator import BitGenerator +from .mtrand import RandomState + +BitGenerators = {'MT19937': MT19937, + 'PCG64': PCG64, + 'PCG64DXSM': PCG64DXSM, + 'Philox': Philox, + 'SFC64': SFC64, + } + + +def __bit_generator_ctor(bit_generator: str | type[BitGenerator] = 'MT19937'): + """ + Pickling helper function that returns a bit generator object + + Parameters + ---------- + bit_generator : type[BitGenerator] or str + BitGenerator class or string containing the name of the BitGenerator + + Returns + ------- + BitGenerator + BitGenerator instance + """ + if isinstance(bit_generator, type): + bit_gen_class = bit_generator + elif bit_generator in BitGenerators: + bit_gen_class = BitGenerators[bit_generator] + else: + raise ValueError( + str(bit_generator) + ' is not a known BitGenerator module.' + ) + + return bit_gen_class() + + +def __generator_ctor(bit_generator_name="MT19937", + bit_generator_ctor=__bit_generator_ctor): + """ + Pickling helper function that returns a Generator object + + Parameters + ---------- + bit_generator_name : str or BitGenerator + String containing the core BitGenerator's name or a + BitGenerator instance + bit_generator_ctor : callable, optional + Callable function that takes bit_generator_name as its only argument + and returns an instantized bit generator. + + Returns + ------- + rg : Generator + Generator using the named core BitGenerator + """ + if isinstance(bit_generator_name, BitGenerator): + return Generator(bit_generator_name) + # Legacy path that uses a bit generator name and ctor + return Generator(bit_generator_ctor(bit_generator_name)) + + +def __randomstate_ctor(bit_generator_name="MT19937", + bit_generator_ctor=__bit_generator_ctor): + """ + Pickling helper function that returns a legacy RandomState-like object + + Parameters + ---------- + bit_generator_name : str + String containing the core BitGenerator's name + bit_generator_ctor : callable, optional + Callable function that takes bit_generator_name as its only argument + and returns an instantized bit generator. + + Returns + ------- + rs : RandomState + Legacy RandomState using the named core BitGenerator + """ + if isinstance(bit_generator_name, BitGenerator): + return RandomState(bit_generator_name) + return RandomState(bit_generator_ctor(bit_generator_name)) diff --git a/python/numpy/random/_pickle.pyi b/python/numpy/random/_pickle.pyi new file mode 100644 index 000000000..b8b1b7bcf --- /dev/null +++ b/python/numpy/random/_pickle.pyi @@ -0,0 +1,43 @@ +from collections.abc import Callable +from typing import Final, Literal, TypedDict, TypeVar, overload, type_check_only + +from numpy.random._generator import Generator +from numpy.random._mt19937 import MT19937 +from numpy.random._pcg64 import PCG64, PCG64DXSM +from numpy.random._philox import Philox +from numpy.random._sfc64 import SFC64 +from numpy.random.bit_generator import BitGenerator +from numpy.random.mtrand import RandomState + +_T = TypeVar("_T", bound=BitGenerator) + +@type_check_only +class _BitGenerators(TypedDict): + MT19937: type[MT19937] + PCG64: type[PCG64] + PCG64DXSM: type[PCG64DXSM] + Philox: type[Philox] + SFC64: type[SFC64] + +BitGenerators: Final[_BitGenerators] = ... + +@overload +def __bit_generator_ctor(bit_generator: Literal["MT19937"] = "MT19937") -> MT19937: ... +@overload +def __bit_generator_ctor(bit_generator: Literal["PCG64"]) -> PCG64: ... +@overload +def __bit_generator_ctor(bit_generator: Literal["PCG64DXSM"]) -> PCG64DXSM: ... +@overload +def __bit_generator_ctor(bit_generator: Literal["Philox"]) -> Philox: ... +@overload +def __bit_generator_ctor(bit_generator: Literal["SFC64"]) -> SFC64: ... +@overload +def __bit_generator_ctor(bit_generator: type[_T]) -> _T: ... +def __generator_ctor( + bit_generator_name: str | type[BitGenerator] | BitGenerator = "MT19937", + bit_generator_ctor: Callable[[str | type[BitGenerator]], BitGenerator] = ..., +) -> Generator: ... +def __randomstate_ctor( + bit_generator_name: str | type[BitGenerator] | BitGenerator = "MT19937", + bit_generator_ctor: Callable[[str | type[BitGenerator]], BitGenerator] = ..., +) -> RandomState: ... diff --git a/python/numpy/random/_sfc64.cpython-312-x86_64-linux-gnu.so b/python/numpy/random/_sfc64.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 000000000..184b963d1 Binary files /dev/null and b/python/numpy/random/_sfc64.cpython-312-x86_64-linux-gnu.so differ diff --git a/python/numpy/random/_sfc64.pyi b/python/numpy/random/_sfc64.pyi new file mode 100644 index 000000000..a6f0d8445 --- /dev/null +++ b/python/numpy/random/_sfc64.pyi @@ -0,0 +1,28 @@ +from typing import TypedDict, type_check_only + +from numpy import uint64 +from numpy._typing import NDArray, _ArrayLikeInt_co +from numpy.random.bit_generator import BitGenerator, SeedSequence + +@type_check_only +class _SFC64Internal(TypedDict): + state: NDArray[uint64] + +@type_check_only +class _SFC64State(TypedDict): + bit_generator: str + state: _SFC64Internal + has_uint32: int + uinteger: int + +class SFC64(BitGenerator): + def __init__(self, seed: _ArrayLikeInt_co | SeedSequence | None = ...) -> None: ... + @property + def state( + self, + ) -> _SFC64State: ... + @state.setter + def state( + self, + value: _SFC64State, + ) -> None: ... diff --git a/python/numpy/random/bit_generator.cpython-312-x86_64-linux-gnu.so b/python/numpy/random/bit_generator.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 000000000..e59b981fc Binary files /dev/null and b/python/numpy/random/bit_generator.cpython-312-x86_64-linux-gnu.so differ diff --git a/python/numpy/random/bit_generator.pxd b/python/numpy/random/bit_generator.pxd new file mode 100644 index 000000000..dfa7d0a71 --- /dev/null +++ b/python/numpy/random/bit_generator.pxd @@ -0,0 +1,35 @@ +cimport numpy as np +from libc.stdint cimport uint32_t, uint64_t + +cdef extern from "numpy/random/bitgen.h": + struct bitgen: + void *state + uint64_t (*next_uint64)(void *st) nogil + uint32_t (*next_uint32)(void *st) nogil + double (*next_double)(void *st) nogil + uint64_t (*next_raw)(void *st) nogil + + ctypedef bitgen bitgen_t + +cdef class BitGenerator(): + cdef readonly object _seed_seq + cdef readonly object lock + cdef bitgen_t _bitgen + cdef readonly object _ctypes + cdef readonly object _cffi + cdef readonly object capsule + + +cdef class SeedSequence(): + cdef readonly object entropy + cdef readonly tuple spawn_key + cdef readonly Py_ssize_t pool_size + cdef readonly object pool + cdef readonly uint32_t n_children_spawned + + cdef mix_entropy(self, np.ndarray[np.npy_uint32, ndim=1] mixer, + np.ndarray[np.npy_uint32, ndim=1] entropy_array) + cdef get_assembled_entropy(self) + +cdef class SeedlessSequence(): + pass diff --git a/python/numpy/random/bit_generator.pyi b/python/numpy/random/bit_generator.pyi new file mode 100644 index 000000000..6ce4f4b9d --- /dev/null +++ b/python/numpy/random/bit_generator.pyi @@ -0,0 +1,124 @@ +import abc +from collections.abc import Callable, Mapping, Sequence +from threading import Lock +from typing import ( + Any, + ClassVar, + Literal, + NamedTuple, + Self, + TypeAlias, + TypedDict, + overload, + type_check_only, +) + +from _typeshed import Incomplete +from typing_extensions import CapsuleType + +import numpy as np +from numpy._typing import ( + NDArray, + _ArrayLikeInt_co, + _DTypeLike, + _ShapeLike, + _UInt32Codes, + _UInt64Codes, +) + +__all__ = ["BitGenerator", "SeedSequence"] + +### + +_DTypeLikeUint_: TypeAlias = _DTypeLike[np.uint32 | np.uint64] | _UInt32Codes | _UInt64Codes + +@type_check_only +class _SeedSeqState(TypedDict): + entropy: int | Sequence[int] | None + spawn_key: tuple[int, ...] + pool_size: int + n_children_spawned: int + +@type_check_only +class _Interface(NamedTuple): + state_address: Incomplete + state: Incomplete + next_uint64: Incomplete + next_uint32: Incomplete + next_double: Incomplete + bit_generator: Incomplete + +@type_check_only +class _CythonMixin: + def __setstate_cython__(self, pyx_state: object, /) -> None: ... + def __reduce_cython__(self) -> Any: ... # noqa: ANN401 + +@type_check_only +class _GenerateStateMixin(_CythonMixin): + def generate_state(self, /, n_words: int, dtype: _DTypeLikeUint_ = ...) -> NDArray[np.uint32 | np.uint64]: ... + +### + +class ISeedSequence(abc.ABC): + @abc.abstractmethod + def generate_state(self, /, n_words: int, dtype: _DTypeLikeUint_ = ...) -> NDArray[np.uint32 | np.uint64]: ... + +class ISpawnableSeedSequence(ISeedSequence, abc.ABC): + @abc.abstractmethod + def spawn(self, /, n_children: int) -> list[Self]: ... + +class SeedlessSeedSequence(_GenerateStateMixin, ISpawnableSeedSequence): + def spawn(self, /, n_children: int) -> list[Self]: ... + +class SeedSequence(_GenerateStateMixin, ISpawnableSeedSequence): + __pyx_vtable__: ClassVar[CapsuleType] = ... + + entropy: int | Sequence[int] | None + spawn_key: tuple[int, ...] + pool_size: int + n_children_spawned: int + pool: NDArray[np.uint32] + + def __init__( + self, + /, + entropy: _ArrayLikeInt_co | None = None, + *, + spawn_key: Sequence[int] = (), + pool_size: int = 4, + n_children_spawned: int = ..., + ) -> None: ... + def spawn(self, /, n_children: int) -> list[Self]: ... + @property + def state(self) -> _SeedSeqState: ... + +class BitGenerator(_CythonMixin, abc.ABC): + lock: Lock + @property + def state(self) -> Mapping[str, Any]: ... + @state.setter + def state(self, value: Mapping[str, Any], /) -> None: ... + @property + def seed_seq(self) -> ISeedSequence: ... + @property + def ctypes(self) -> _Interface: ... + @property + def cffi(self) -> _Interface: ... + @property + def capsule(self) -> CapsuleType: ... + + # + def __init__(self, /, seed: _ArrayLikeInt_co | SeedSequence | None = None) -> None: ... + def __reduce__(self) -> tuple[Callable[[str], Self], tuple[str], tuple[Mapping[str, Any], ISeedSequence]]: ... + def spawn(self, /, n_children: int) -> list[Self]: ... + def _benchmark(self, /, cnt: int, method: str = "uint64") -> None: ... + + # + @overload + def random_raw(self, /, size: None = None, output: Literal[True] = True) -> int: ... + @overload + def random_raw(self, /, size: _ShapeLike, output: Literal[True] = True) -> NDArray[np.uint64]: ... + @overload + def random_raw(self, /, size: _ShapeLike | None, output: Literal[False]) -> None: ... + @overload + def random_raw(self, /, size: _ShapeLike | None = None, *, output: Literal[False]) -> None: ... diff --git a/python/numpy/random/c_distributions.pxd b/python/numpy/random/c_distributions.pxd new file mode 100644 index 000000000..da790ca49 --- /dev/null +++ b/python/numpy/random/c_distributions.pxd @@ -0,0 +1,119 @@ +#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3 +from numpy cimport npy_intp + +from libc.stdint cimport (uint64_t, int32_t, int64_t) +from numpy.random cimport bitgen_t + +cdef extern from "numpy/random/distributions.h": + + struct s_binomial_t: + int has_binomial + double psave + int64_t nsave + double r + double q + double fm + int64_t m + double p1 + double xm + double xl + double xr + double c + double laml + double lamr + double p2 + double p3 + double p4 + + ctypedef s_binomial_t binomial_t + + float random_standard_uniform_f(bitgen_t *bitgen_state) nogil + double random_standard_uniform(bitgen_t *bitgen_state) nogil + void random_standard_uniform_fill(bitgen_t* bitgen_state, npy_intp cnt, double *out) nogil + void random_standard_uniform_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) nogil + + double random_standard_exponential(bitgen_t *bitgen_state) nogil + float random_standard_exponential_f(bitgen_t *bitgen_state) nogil + void random_standard_exponential_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil + void random_standard_exponential_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) nogil + void random_standard_exponential_inv_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) nogil + void random_standard_exponential_inv_fill_f(bitgen_t *bitgen_state, npy_intp cnt, float *out) nogil + + double random_standard_normal(bitgen_t* bitgen_state) nogil + float random_standard_normal_f(bitgen_t *bitgen_state) nogil + void random_standard_normal_fill(bitgen_t *bitgen_state, npy_intp count, double *out) nogil + void random_standard_normal_fill_f(bitgen_t *bitgen_state, npy_intp count, float *out) nogil + double random_standard_gamma(bitgen_t *bitgen_state, double shape) nogil + float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) nogil + + float random_standard_uniform_f(bitgen_t *bitgen_state) nogil + void random_standard_uniform_fill_f(bitgen_t* bitgen_state, npy_intp cnt, float *out) nogil + float random_standard_normal_f(bitgen_t* bitgen_state) nogil + float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) nogil + + int64_t random_positive_int64(bitgen_t *bitgen_state) nogil + int32_t random_positive_int32(bitgen_t *bitgen_state) nogil + int64_t random_positive_int(bitgen_t *bitgen_state) nogil + uint64_t random_uint(bitgen_t *bitgen_state) nogil + + double random_normal(bitgen_t *bitgen_state, double loc, double scale) nogil + + double random_gamma(bitgen_t *bitgen_state, double shape, double scale) nogil + float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale) nogil + + double random_exponential(bitgen_t *bitgen_state, double scale) nogil + double random_uniform(bitgen_t *bitgen_state, double lower, double range) nogil + double random_beta(bitgen_t *bitgen_state, double a, double b) nogil + double random_chisquare(bitgen_t *bitgen_state, double df) nogil + double random_f(bitgen_t *bitgen_state, double dfnum, double dfden) nogil + double random_standard_cauchy(bitgen_t *bitgen_state) nogil + double random_pareto(bitgen_t *bitgen_state, double a) nogil + double random_weibull(bitgen_t *bitgen_state, double a) nogil + double random_power(bitgen_t *bitgen_state, double a) nogil + double random_laplace(bitgen_t *bitgen_state, double loc, double scale) nogil + double random_gumbel(bitgen_t *bitgen_state, double loc, double scale) nogil + double random_logistic(bitgen_t *bitgen_state, double loc, double scale) nogil + double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma) nogil + double random_rayleigh(bitgen_t *bitgen_state, double mode) nogil + double random_standard_t(bitgen_t *bitgen_state, double df) nogil + double random_noncentral_chisquare(bitgen_t *bitgen_state, double df, + double nonc) nogil + double random_noncentral_f(bitgen_t *bitgen_state, double dfnum, + double dfden, double nonc) nogil + double random_wald(bitgen_t *bitgen_state, double mean, double scale) nogil + double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) nogil + double random_triangular(bitgen_t *bitgen_state, double left, double mode, + double right) nogil + + int64_t random_poisson(bitgen_t *bitgen_state, double lam) nogil + int64_t random_negative_binomial(bitgen_t *bitgen_state, double n, double p) nogil + int64_t random_binomial(bitgen_t *bitgen_state, double p, int64_t n, binomial_t *binomial) nogil + int64_t random_logseries(bitgen_t *bitgen_state, double p) nogil + int64_t random_geometric_search(bitgen_t *bitgen_state, double p) nogil + int64_t random_geometric_inversion(bitgen_t *bitgen_state, double p) nogil + int64_t random_geometric(bitgen_t *bitgen_state, double p) nogil + int64_t random_zipf(bitgen_t *bitgen_state, double a) nogil + int64_t random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad, + int64_t sample) nogil + + uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max) nogil + + # Generate random uint64 numbers in closed interval [off, off + rng]. + uint64_t random_bounded_uint64(bitgen_t *bitgen_state, + uint64_t off, uint64_t rng, + uint64_t mask, bint use_masked) nogil + + void random_multinomial(bitgen_t *bitgen_state, int64_t n, int64_t *mnix, + double *pix, npy_intp d, binomial_t *binomial) nogil + + int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state, + int64_t total, + size_t num_colors, int64_t *colors, + int64_t nsample, + size_t num_variates, int64_t *variates) nogil + void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state, + int64_t total, + size_t num_colors, int64_t *colors, + int64_t nsample, + size_t num_variates, int64_t *variates) nogil + diff --git a/python/numpy/random/lib/libnpyrandom.a b/python/numpy/random/lib/libnpyrandom.a new file mode 100644 index 000000000..ff738dc08 Binary files /dev/null and b/python/numpy/random/lib/libnpyrandom.a differ diff --git a/python/numpy/random/mtrand.cpython-312-x86_64-linux-gnu.so b/python/numpy/random/mtrand.cpython-312-x86_64-linux-gnu.so new file mode 100644 index 000000000..ba7e73301 Binary files /dev/null and b/python/numpy/random/mtrand.cpython-312-x86_64-linux-gnu.so differ diff --git a/python/numpy/random/mtrand.pyi b/python/numpy/random/mtrand.pyi new file mode 100644 index 000000000..54bb1462f --- /dev/null +++ b/python/numpy/random/mtrand.pyi @@ -0,0 +1,703 @@ +import builtins +from collections.abc import Callable +from typing import Any, Literal, overload + +import numpy as np +from numpy import ( + dtype, + float64, + int8, + int16, + int32, + int64, + int_, + long, + uint, + uint8, + uint16, + uint32, + uint64, + ulong, +) +from numpy._typing import ( + ArrayLike, + NDArray, + _ArrayLikeFloat_co, + _ArrayLikeInt_co, + _DTypeLikeBool, + _Int8Codes, + _Int16Codes, + _Int32Codes, + _Int64Codes, + _IntCodes, + _LongCodes, + _ShapeLike, + _SupportsDType, + _UInt8Codes, + _UInt16Codes, + _UInt32Codes, + _UInt64Codes, + _UIntCodes, + _ULongCodes, +) +from numpy.random.bit_generator import BitGenerator + +class RandomState: + _bit_generator: BitGenerator + def __init__(self, seed: _ArrayLikeInt_co | BitGenerator | None = ...) -> None: ... + def __repr__(self) -> str: ... + def __str__(self) -> str: ... + def __getstate__(self) -> dict[str, Any]: ... + def __setstate__(self, state: dict[str, Any]) -> None: ... + def __reduce__(self) -> tuple[Callable[[BitGenerator], RandomState], tuple[BitGenerator], dict[str, Any]]: ... # noqa: E501 + def seed(self, seed: _ArrayLikeFloat_co | None = ...) -> None: ... + @overload + def get_state(self, legacy: Literal[False] = ...) -> dict[str, Any]: ... + @overload + def get_state( + self, legacy: Literal[True] = ... + ) -> dict[str, Any] | tuple[str, NDArray[uint32], int, int, float]: ... + def set_state( + self, state: dict[str, Any] | tuple[str, NDArray[uint32], int, int, float] + ) -> None: ... + @overload + def random_sample(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def random_sample(self, size: _ShapeLike) -> NDArray[float64]: ... + @overload + def random(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def random(self, size: _ShapeLike) -> NDArray[float64]: ... + @overload + def beta(self, a: float, b: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def beta( + self, + a: _ArrayLikeFloat_co, + b: _ArrayLikeFloat_co, + size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def exponential(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def exponential( + self, scale: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def standard_exponential(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_exponential(self, size: _ShapeLike) -> NDArray[float64]: ... + @overload + def tomaxint(self, size: None = ...) -> int: ... # type: ignore[misc] + @overload + # Generates long values, but stores it in a 64bit int: + def tomaxint(self, size: _ShapeLike) -> NDArray[int64]: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + ) -> int: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: type[bool] = ..., + ) -> bool: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: type[np.bool] = ..., + ) -> np.bool: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: type[int] = ..., + ) -> int: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., # noqa: E501 + ) -> uint8: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., # noqa: E501 + ) -> uint16: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., # noqa: E501 + ) -> uint32: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., # noqa: E501 + ) -> uint: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., # noqa: E501 + ) -> ulong: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., # noqa: E501 + ) -> uint64: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., # noqa: E501 + ) -> int8: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., # noqa: E501 + ) -> int16: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., # noqa: E501 + ) -> int32: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: dtype[int_] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., # noqa: E501 + ) -> int_: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: dtype[long] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., # noqa: E501 + ) -> long: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: int | None = ..., + size: None = ..., + dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., # noqa: E501 + ) -> int64: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[long]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = ..., + size: _ShapeLike | None = ..., + dtype: _DTypeLikeBool = ..., + ) -> NDArray[np.bool]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = ..., + size: _ShapeLike | None = ..., + dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., # noqa: E501 + ) -> NDArray[int8]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = ..., + size: _ShapeLike | None = ..., + dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., # noqa: E501 + ) -> NDArray[int16]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = ..., + size: _ShapeLike | None = ..., + dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., # noqa: E501 + ) -> NDArray[int32]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = ..., + size: _ShapeLike | None = ..., + dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] | None = ..., # noqa: E501 + ) -> NDArray[int64]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = ..., + size: _ShapeLike | None = ..., + dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., # noqa: E501 + ) -> NDArray[uint8]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = ..., + size: _ShapeLike | None = ..., + dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., # noqa: E501 + ) -> NDArray[uint16]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = ..., + size: _ShapeLike | None = ..., + dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., # noqa: E501 + ) -> NDArray[uint32]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = ..., + size: _ShapeLike | None = ..., + dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., # noqa: E501 + ) -> NDArray[uint64]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = ..., + size: _ShapeLike | None = ..., + dtype: dtype[long] | type[int] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., # noqa: E501 + ) -> NDArray[long]: ... + @overload + def randint( # type: ignore[misc] + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = ..., + size: _ShapeLike | None = ..., + dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., # noqa: E501 + ) -> NDArray[ulong]: ... + def bytes(self, length: int) -> builtins.bytes: ... + @overload + def choice( + self, + a: int, + size: None = ..., + replace: bool = ..., + p: _ArrayLikeFloat_co | None = ..., + ) -> int: ... + @overload + def choice( + self, + a: int, + size: _ShapeLike = ..., + replace: bool = ..., + p: _ArrayLikeFloat_co | None = ..., + ) -> NDArray[long]: ... + @overload + def choice( + self, + a: ArrayLike, + size: None = ..., + replace: bool = ..., + p: _ArrayLikeFloat_co | None = ..., + ) -> Any: ... + @overload + def choice( + self, + a: ArrayLike, + size: _ShapeLike = ..., + replace: bool = ..., + p: _ArrayLikeFloat_co | None = ..., + ) -> NDArray[Any]: ... + @overload + def uniform( + self, low: float = ..., high: float = ..., size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def uniform( + self, + low: _ArrayLikeFloat_co = ..., + high: _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def rand(self) -> float: ... + @overload + def rand(self, *args: int) -> NDArray[float64]: ... + @overload + def randn(self) -> float: ... + @overload + def randn(self, *args: int) -> NDArray[float64]: ... + @overload + def random_integers( + self, low: int, high: int | None = ..., size: None = ... + ) -> int: ... # type: ignore[misc] + @overload + def random_integers( + self, + low: _ArrayLikeInt_co, + high: _ArrayLikeInt_co | None = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[long]: ... + @overload + def standard_normal(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_normal( # type: ignore[misc] + self, size: _ShapeLike = ... + ) -> NDArray[float64]: ... + @overload + def normal( + self, loc: float = ..., scale: float = ..., size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def normal( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def standard_gamma( # type: ignore[misc] + self, + shape: float, + size: None = ..., + ) -> float: ... + @overload + def standard_gamma( + self, + shape: _ArrayLikeFloat_co, + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def gamma(self, shape: float, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def gamma( + self, + shape: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def f(self, dfnum: float, dfden: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def f( + self, + dfnum: _ArrayLikeFloat_co, + dfden: _ArrayLikeFloat_co, + size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def noncentral_f( + self, dfnum: float, dfden: float, nonc: float, size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def noncentral_f( + self, + dfnum: _ArrayLikeFloat_co, + dfden: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def chisquare(self, df: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def chisquare( + self, df: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def noncentral_chisquare( + self, df: float, nonc: float, size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def noncentral_chisquare( + self, + df: _ArrayLikeFloat_co, + nonc: _ArrayLikeFloat_co, + size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def standard_t(self, df: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_t( + self, df: _ArrayLikeFloat_co, size: None = ... + ) -> NDArray[float64]: ... + @overload + def standard_t( + self, df: _ArrayLikeFloat_co, size: _ShapeLike = ... + ) -> NDArray[float64]: ... + @overload + def vonmises(self, mu: float, kappa: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def vonmises( + self, + mu: _ArrayLikeFloat_co, + kappa: _ArrayLikeFloat_co, + size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def pareto(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def pareto( + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def weibull(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def weibull( + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def power(self, a: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def power( + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def standard_cauchy(self, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def standard_cauchy(self, size: _ShapeLike = ...) -> NDArray[float64]: ... + @overload + def laplace( + self, loc: float = ..., scale: float = ..., size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def laplace( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def gumbel( + self, loc: float = ..., scale: float = ..., size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def gumbel( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def logistic( + self, loc: float = ..., scale: float = ..., size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def logistic( + self, + loc: _ArrayLikeFloat_co = ..., + scale: _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def lognormal( + self, mean: float = ..., sigma: float = ..., size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def lognormal( + self, + mean: _ArrayLikeFloat_co = ..., + sigma: _ArrayLikeFloat_co = ..., + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def rayleigh(self, scale: float = ..., size: None = ...) -> float: ... # type: ignore[misc] + @overload + def rayleigh( + self, scale: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def wald(self, mean: float, scale: float, size: None = ...) -> float: ... # type: ignore[misc] + @overload + def wald( + self, + mean: _ArrayLikeFloat_co, + scale: _ArrayLikeFloat_co, + size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + @overload + def triangular( + self, left: float, mode: float, right: float, size: None = ... + ) -> float: ... # type: ignore[misc] + @overload + def triangular( + self, + left: _ArrayLikeFloat_co, + mode: _ArrayLikeFloat_co, + right: _ArrayLikeFloat_co, + size: _ShapeLike | None = ..., + ) -> NDArray[float64]: ... + @overload + def binomial( + self, n: int, p: float, size: None = ... + ) -> int: ... # type: ignore[misc] + @overload + def binomial( + self, n: _ArrayLikeInt_co, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[long]: ... + @overload + def negative_binomial( + self, n: float, p: float, size: None = ... + ) -> int: ... # type: ignore[misc] + @overload + def negative_binomial( + self, + n: _ArrayLikeFloat_co, + p: _ArrayLikeFloat_co, + size: _ShapeLike | None = ... + ) -> NDArray[long]: ... + @overload + def poisson( + self, lam: float = ..., size: None = ... + ) -> int: ... # type: ignore[misc] + @overload + def poisson( + self, lam: _ArrayLikeFloat_co = ..., size: _ShapeLike | None = ... + ) -> NDArray[long]: ... + @overload + def zipf(self, a: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def zipf( + self, a: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[long]: ... + @overload + def geometric(self, p: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def geometric( + self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[long]: ... + @overload + def hypergeometric( + self, ngood: int, nbad: int, nsample: int, size: None = ... + ) -> int: ... # type: ignore[misc] + @overload + def hypergeometric( + self, + ngood: _ArrayLikeInt_co, + nbad: _ArrayLikeInt_co, + nsample: _ArrayLikeInt_co, + size: _ShapeLike | None = ..., + ) -> NDArray[long]: ... + @overload + def logseries(self, p: float, size: None = ...) -> int: ... # type: ignore[misc] + @overload + def logseries( + self, p: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[long]: ... + def multivariate_normal( + self, + mean: _ArrayLikeFloat_co, + cov: _ArrayLikeFloat_co, + size: _ShapeLike | None = ..., + check_valid: Literal["warn", "raise", "ignore"] = ..., + tol: float = ..., + ) -> NDArray[float64]: ... + def multinomial( + self, n: _ArrayLikeInt_co, + pvals: _ArrayLikeFloat_co, + size: _ShapeLike | None = ... + ) -> NDArray[long]: ... + def dirichlet( + self, alpha: _ArrayLikeFloat_co, size: _ShapeLike | None = ... + ) -> NDArray[float64]: ... + def shuffle(self, x: ArrayLike) -> None: ... + @overload + def permutation(self, x: int) -> NDArray[long]: ... + @overload + def permutation(self, x: ArrayLike) -> NDArray[Any]: ... + +_rand: RandomState + +beta = _rand.beta +binomial = _rand.binomial +bytes = _rand.bytes +chisquare = _rand.chisquare +choice = _rand.choice +dirichlet = _rand.dirichlet +exponential = _rand.exponential +f = _rand.f +gamma = _rand.gamma +get_state = _rand.get_state +geometric = _rand.geometric +gumbel = _rand.gumbel +hypergeometric = _rand.hypergeometric +laplace = _rand.laplace +logistic = _rand.logistic +lognormal = _rand.lognormal +logseries = _rand.logseries +multinomial = _rand.multinomial +multivariate_normal = _rand.multivariate_normal +negative_binomial = _rand.negative_binomial +noncentral_chisquare = _rand.noncentral_chisquare +noncentral_f = _rand.noncentral_f +normal = _rand.normal +pareto = _rand.pareto +permutation = _rand.permutation +poisson = _rand.poisson +power = _rand.power +rand = _rand.rand +randint = _rand.randint +randn = _rand.randn +random = _rand.random +random_integers = _rand.random_integers +random_sample = _rand.random_sample +rayleigh = _rand.rayleigh +seed = _rand.seed +set_state = _rand.set_state +shuffle = _rand.shuffle +standard_cauchy = _rand.standard_cauchy +standard_exponential = _rand.standard_exponential +standard_gamma = _rand.standard_gamma +standard_normal = _rand.standard_normal +standard_t = _rand.standard_t +triangular = _rand.triangular +uniform = _rand.uniform +vonmises = _rand.vonmises +wald = _rand.wald +weibull = _rand.weibull +zipf = _rand.zipf +# Two legacy that are trivial wrappers around random_sample +sample = _rand.random_sample +ranf = _rand.random_sample + +def set_bit_generator(bitgen: BitGenerator) -> None: ... + +def get_bit_generator() -> BitGenerator: ... diff --git a/python/numpy/random/tests/__init__.py b/python/numpy/random/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/python/numpy/random/tests/__pycache__/__init__.cpython-312.pyc b/python/numpy/random/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..26d60171c Binary files /dev/null and b/python/numpy/random/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/random/tests/__pycache__/test_direct.cpython-312.pyc b/python/numpy/random/tests/__pycache__/test_direct.cpython-312.pyc new file mode 100644 index 000000000..7e27060b6 Binary files /dev/null and b/python/numpy/random/tests/__pycache__/test_direct.cpython-312.pyc differ diff --git a/python/numpy/random/tests/__pycache__/test_extending.cpython-312.pyc b/python/numpy/random/tests/__pycache__/test_extending.cpython-312.pyc new file mode 100644 index 000000000..2b3df935c Binary files /dev/null and b/python/numpy/random/tests/__pycache__/test_extending.cpython-312.pyc differ diff --git a/python/numpy/random/tests/__pycache__/test_generator_mt19937.cpython-312.pyc b/python/numpy/random/tests/__pycache__/test_generator_mt19937.cpython-312.pyc new file mode 100644 index 000000000..77a85582a Binary files /dev/null and b/python/numpy/random/tests/__pycache__/test_generator_mt19937.cpython-312.pyc differ diff --git a/python/numpy/random/tests/__pycache__/test_generator_mt19937_regressions.cpython-312.pyc b/python/numpy/random/tests/__pycache__/test_generator_mt19937_regressions.cpython-312.pyc new file mode 100644 index 000000000..36b1cb7cf Binary files /dev/null and b/python/numpy/random/tests/__pycache__/test_generator_mt19937_regressions.cpython-312.pyc differ diff --git a/python/numpy/random/tests/__pycache__/test_random.cpython-312.pyc b/python/numpy/random/tests/__pycache__/test_random.cpython-312.pyc new file mode 100644 index 000000000..3be1df951 Binary files /dev/null and b/python/numpy/random/tests/__pycache__/test_random.cpython-312.pyc differ diff --git a/python/numpy/random/tests/__pycache__/test_randomstate.cpython-312.pyc b/python/numpy/random/tests/__pycache__/test_randomstate.cpython-312.pyc new file mode 100644 index 000000000..9a3781a1b Binary files /dev/null and b/python/numpy/random/tests/__pycache__/test_randomstate.cpython-312.pyc differ diff --git a/python/numpy/random/tests/__pycache__/test_randomstate_regression.cpython-312.pyc b/python/numpy/random/tests/__pycache__/test_randomstate_regression.cpython-312.pyc new file mode 100644 index 000000000..42c0d3f09 Binary files /dev/null and b/python/numpy/random/tests/__pycache__/test_randomstate_regression.cpython-312.pyc differ diff --git a/python/numpy/random/tests/__pycache__/test_regression.cpython-312.pyc b/python/numpy/random/tests/__pycache__/test_regression.cpython-312.pyc new file mode 100644 index 000000000..56f7a83b4 Binary files /dev/null and b/python/numpy/random/tests/__pycache__/test_regression.cpython-312.pyc differ diff --git a/python/numpy/random/tests/__pycache__/test_seed_sequence.cpython-312.pyc b/python/numpy/random/tests/__pycache__/test_seed_sequence.cpython-312.pyc new file mode 100644 index 000000000..0b03cc9fb Binary files /dev/null and b/python/numpy/random/tests/__pycache__/test_seed_sequence.cpython-312.pyc differ diff --git a/python/numpy/random/tests/__pycache__/test_smoke.cpython-312.pyc b/python/numpy/random/tests/__pycache__/test_smoke.cpython-312.pyc new file mode 100644 index 000000000..966fc5b3b Binary files /dev/null and b/python/numpy/random/tests/__pycache__/test_smoke.cpython-312.pyc differ diff --git a/python/numpy/random/tests/data/__init__.py b/python/numpy/random/tests/data/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/python/numpy/random/tests/data/__pycache__/__init__.cpython-312.pyc b/python/numpy/random/tests/data/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..be03560e8 Binary files /dev/null and b/python/numpy/random/tests/data/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/random/tests/data/generator_pcg64_np121.pkl.gz b/python/numpy/random/tests/data/generator_pcg64_np121.pkl.gz new file mode 100644 index 000000000..b7ad03d8e Binary files /dev/null and b/python/numpy/random/tests/data/generator_pcg64_np121.pkl.gz differ diff --git a/python/numpy/random/tests/data/generator_pcg64_np126.pkl.gz b/python/numpy/random/tests/data/generator_pcg64_np126.pkl.gz new file mode 100644 index 000000000..6c5130b5e Binary files /dev/null and b/python/numpy/random/tests/data/generator_pcg64_np126.pkl.gz differ diff --git a/python/numpy/random/tests/data/mt19937-testset-1.csv b/python/numpy/random/tests/data/mt19937-testset-1.csv new file mode 100644 index 000000000..b97bfa66f --- /dev/null +++ b/python/numpy/random/tests/data/mt19937-testset-1.csv @@ -0,0 +1,1001 @@ +seed, 0xdeadbeaf +0, 0xc816921f +1, 0xb3623c6d +2, 0x5fa391bb +3, 0x40178d9 +4, 0x7dcc9811 +5, 0x548eb8e6 +6, 0x92ba3125 +7, 0x65fde68d +8, 0x2f81ec95 +9, 0xbd94f7a2 +10, 0xdc4d9bcc +11, 0xa672bf13 +12, 0xb41113e +13, 0xec7e0066 +14, 0x50239372 +15, 0xd9d66b1d +16, 0xab72a161 +17, 0xddc2e29f +18, 0x7ea29ab4 +19, 0x80d141ba +20, 0xb1c7edf1 +21, 0x44d29203 +22, 0xe224d98 +23, 0x5b3e9d26 +24, 0x14fd567c +25, 0x27d98c96 +26, 0x838779fc +27, 0x92a138a +28, 0x5d08965b +29, 0x531e0ad6 +30, 0x984ee8f4 +31, 0x1ed78539 +32, 0x32bd6d8d +33, 0xc37c8516 +34, 0x9aef5c6b +35, 0x3aacd139 +36, 0xd96ed154 +37, 0x489cd1ed +38, 0x2cba4b3b +39, 0x76c6ae72 +40, 0x2dae02b9 +41, 0x52ac5fd6 +42, 0xc2b5e265 +43, 0x630e6a28 +44, 0x3f560d5d +45, 0x9315bdf3 +46, 0xf1055aba +47, 0x840e42c6 +48, 0xf2099c6b +49, 0x15ff7696 +50, 0x7948d146 +51, 0x97342961 +52, 0x7a7a21c +53, 0xc66f4fb1 +54, 0x23c4103e +55, 0xd7321f98 +56, 0xeb7efb75 +57, 0xe02490b5 +58, 0x2aa02de +59, 0x8bee0bf7 +60, 0xfc2da059 +61, 0xae835034 +62, 0x678f2075 +63, 0x6d03094b +64, 0x56455e05 +65, 0x18b32373 +66, 0x8ff0356b +67, 0x1fe442fb +68, 0x3f1ab6c3 +69, 0xb6fd21b +70, 0xfc310eb2 +71, 0xb19e9a4d +72, 0x17ddee72 +73, 0xfd534251 +74, 0x9e500564 +75, 0x9013a036 +76, 0xcf08f118 +77, 0x6b6d5969 +78, 0x3ccf1977 +79, 0x7cc11497 +80, 0x651c6ac9 +81, 0x4d6b104b +82, 0x9a28314e +83, 0x14c237be +84, 0x9cfc8d52 +85, 0x2947fad5 +86, 0xd71eff49 +87, 0x5188730e +88, 0x4b894614 +89, 0xf4fa2a34 +90, 0x42f7cc69 +91, 0x4089c9e8 +92, 0xbf0bbfe4 +93, 0x3cea65c +94, 0xc6221207 +95, 0x1bb71a8f +96, 0x54843fe7 +97, 0xbc59de4c +98, 0x79c6ee64 +99, 0x14e57a26 +100, 0x68d88fe +101, 0x2b86ef64 +102, 0x8ffff3c1 +103, 0x5bdd573f +104, 0x85671813 +105, 0xefe32ca2 +106, 0x105ded1e +107, 0x90ca2769 +108, 0xb33963ac +109, 0x363fbbc3 +110, 0x3b3763ae +111, 0x1d50ab88 +112, 0xc9ec01eb +113, 0xc8bbeada +114, 0x5d704692 +115, 0x5fd9e40 +116, 0xe61c125 +117, 0x2fe05792 +118, 0xda8afb72 +119, 0x4cbaa653 +120, 0xdd2243df +121, 0x896fd3f5 +122, 0x5bc23db +123, 0xa1c4e807 +124, 0x57d1a24d +125, 0x66503ddc +126, 0xcf7c0838 +127, 0x19e034fc +128, 0x66807450 +129, 0xfc219b3b +130, 0xe8a843e7 +131, 0x9ce61f08 +132, 0x92b950d6 +133, 0xce955ec4 +134, 0xda0d1f0d +135, 0x960c6250 +136, 0x39552432 +137, 0xde845e84 +138, 0xff3b4b11 +139, 0x5d918e6f +140, 0xbb930df2 +141, 0x7cfb0993 +142, 0x5400e1e9 +143, 0x3bfa0954 +144, 0x7e2605fb +145, 0x11941591 +146, 0x887e6994 +147, 0xdc8bed45 +148, 0x45b3fb50 +149, 0xfbdf8358 +150, 0x41507468 +151, 0x34c87166 +152, 0x17f64d77 +153, 0x3bbaf4f8 +154, 0x4f26f37e +155, 0x4a56ebf2 +156, 0x81100f1 +157, 0x96d94eae +158, 0xca88fda5 +159, 0x2eef3a60 +160, 0x952afbd3 +161, 0x2bec88c7 +162, 0x52335c4b +163, 0x8296db8e +164, 0x4da7d00a +165, 0xc00ac899 +166, 0xadff8c72 +167, 0xbecf26cf +168, 0x8835c83c +169, 0x1d13c804 +170, 0xaa940ddc +171, 0x68222cfe +172, 0x4569c0e1 +173, 0x29077976 +174, 0x32d4a5af +175, 0xd31fcdef +176, 0xdc60682b +177, 0x7c95c368 +178, 0x75a70213 +179, 0x43021751 +180, 0x5e52e0a6 +181, 0xf7e190b5 +182, 0xee3e4bb +183, 0x2fe3b150 +184, 0xcf419c07 +185, 0x478a4570 +186, 0xe5c3ea50 +187, 0x417f30a8 +188, 0xf0cfdaa0 +189, 0xd1f7f738 +190, 0x2c70fc23 +191, 0x54fc89f9 +192, 0x444dcf01 +193, 0xec2a002d +194, 0xef0c3a88 +195, 0xde21be9 +196, 0x88ab3296 +197, 0x3028897c +198, 0x264b200b +199, 0xd8ae0706 +200, 0x9eef901a +201, 0xbd1b96e0 +202, 0xea71366c +203, 0x1465b694 +204, 0x5a794650 +205, 0x83df52d4 +206, 0x8262413d +207, 0x5bc148c0 +208, 0xe0ecd80c +209, 0x40649571 +210, 0xb4d2ee5f +211, 0xedfd7d09 +212, 0xa082e25f +213, 0xc62992d1 +214, 0xbc7e65ee +215, 0x5499cf8a +216, 0xac28f775 +217, 0x649840fb +218, 0xd4c54805 +219, 0x1d166ba6 +220, 0xbeb1171f +221, 0x45b66703 +222, 0x78c03349 +223, 0x38d2a6ff +224, 0x935cae8b +225, 0x1d07dc3f +226, 0x6c1ed365 +227, 0x579fc585 +228, 0x1320c0ec +229, 0x632757eb +230, 0xd265a397 +231, 0x70e9b6c2 +232, 0xc81e322c +233, 0xa27153cf +234, 0x2118ba19 +235, 0x514ec400 +236, 0x2bd0ecd6 +237, 0xc3e7dae3 +238, 0xfa39355e +239, 0x48f23cc1 +240, 0xbcf75948 +241, 0x53ccc70c +242, 0x75346423 +243, 0x951181e0 +244, 0x348e90df +245, 0x14365d7f +246, 0xfbc95d7a +247, 0xdc98a9e6 +248, 0xed202df7 +249, 0xa59ec913 +250, 0x6b6e9ae2 +251, 0x1697f265 +252, 0x15d322d0 +253, 0xa2e7ee0a +254, 0x88860b7e +255, 0x455d8b9d +256, 0x2f5c59cb +257, 0xac49c9f1 +258, 0xa6a6a039 +259, 0xc057f56b +260, 0xf1ff1208 +261, 0x5eb8dc9d +262, 0xe6702509 +263, 0xe238b0ed +264, 0x5ae32e3d +265, 0xa88ebbdf +266, 0xef885ae7 +267, 0xafa6d49b +268, 0xc94499e0 +269, 0x1a196325 +270, 0x88938da3 +271, 0x14f4345 +272, 0xd8e33637 +273, 0xa3551bd5 +274, 0x73fe35c7 +275, 0x9561e94b +276, 0xd673bf68 +277, 0x16134872 +278, 0x68c42f9f +279, 0xdf7574c8 +280, 0x8809bab9 +281, 0x1432cf69 +282, 0xafb66bf1 +283, 0xc184aa7b +284, 0xedbf2007 +285, 0xbd420ce1 +286, 0x761033a0 +287, 0xff7e351f +288, 0xd6c3780e +289, 0x5844416f +290, 0xc6c0ee1c +291, 0xd2e147db +292, 0x92ac601a +293, 0x393e846b +294, 0x18196cca +295, 0x54a22be +296, 0x32bab1c4 +297, 0x60365183 +298, 0x64fa342 +299, 0xca24a493 +300, 0xd8cc8b83 +301, 0x3faf102b +302, 0x6e09bb58 +303, 0x812f0ea +304, 0x592c95d8 +305, 0xe45ea4c5 +306, 0x23aebf83 +307, 0xbd9691d4 +308, 0xf47b4baa +309, 0x4ac7b487 +310, 0xcce18803 +311, 0x3377556e +312, 0x3ff8e6b6 +313, 0x99d22063 +314, 0x23250bec +315, 0x4e1f9861 +316, 0x8554249b +317, 0x8635c2fc +318, 0xe8426e8a +319, 0x966c29d8 +320, 0x270b6082 +321, 0x3180a8a1 +322, 0xe7e1668b +323, 0x7f868dc +324, 0xcf4c17cf +325, 0xe31de4d1 +326, 0xc8c8aff4 +327, 0xae8db704 +328, 0x3c928cc2 +329, 0xe12cd48 +330, 0xb33ecd04 +331, 0xb93d7cbe +332, 0x49c69d6a +333, 0x7d3bce64 +334, 0x86bc219 +335, 0x8408233b +336, 0x44dc7479 +337, 0xdf80d538 +338, 0xf3db02c3 +339, 0xbbbd31d7 +340, 0x121281f +341, 0x7521e9a3 +342, 0x8859675a +343, 0x75aa6502 +344, 0x430ed15b +345, 0xecf0a28d +346, 0x659774fd +347, 0xd58a2311 +348, 0x512389a9 +349, 0xff65e1ff +350, 0xb6ddf222 +351, 0xe3458895 +352, 0x8b13cd6e +353, 0xd4a22870 +354, 0xe604c50c +355, 0x27f54f26 +356, 0x8f7f422f +357, 0x9735b4cf +358, 0x414072b0 +359, 0x76a1c6d5 +360, 0xa2208c06 +361, 0x83cd0f61 +362, 0x6c4f7ead +363, 0x6553cf76 +364, 0xeffcf44 +365, 0x7f434a3f +366, 0x9dc364bd +367, 0x3cdf52ed +368, 0xad597594 +369, 0x9c3e211b +370, 0x6c04a33f +371, 0x885dafa6 +372, 0xbbdaca71 +373, 0x7ae5dd5c +374, 0x37675644 +375, 0x251853c6 +376, 0x130b086b +377, 0x143fa54b +378, 0x54cdc282 +379, 0x9faff5b3 +380, 0x502a5c8b +381, 0xd9524550 +382, 0xae221aa6 +383, 0x55cf759b +384, 0x24782da4 +385, 0xd715d815 +386, 0x250ea09a +387, 0x4e0744ac +388, 0x11e15814 +389, 0xabe5f9df +390, 0xc8146350 +391, 0xfba67d9b +392, 0x2b82e42f +393, 0xd4ea96fc +394, 0x5ffc179e +395, 0x1598bafe +396, 0x7fb6d662 +397, 0x1a12a0db +398, 0x450cee4a +399, 0x85f8e12 +400, 0xce71b594 +401, 0xd4bb1d19 +402, 0x968f379d +403, 0x54cc1d52 +404, 0x467e6066 +405, 0x7da5f9a9 +406, 0x70977034 +407, 0x49e65c4b +408, 0xd08570d1 +409, 0x7acdf60b +410, 0xdffa038b +411, 0x9ce14e4c +412, 0x107cbbf8 +413, 0xdd746ca0 +414, 0xc6370a46 +415, 0xe7f83312 +416, 0x373fa9ce +417, 0xd822a2c6 +418, 0x1d4efea6 +419, 0xc53dcadb +420, 0x9b4e898f +421, 0x71daa6bf +422, 0x7a0bc78b +423, 0xd7b86f50 +424, 0x1b8b3286 +425, 0xcf9425dd +426, 0xd5263220 +427, 0x4ea0b647 +428, 0xc767fe64 +429, 0xcfc5e67 +430, 0xcc6a2942 +431, 0xa51eff00 +432, 0x76092e1b +433, 0xf606e80f +434, 0x824b5e20 +435, 0xebb55e14 +436, 0x783d96a6 +437, 0x10696512 +438, 0x17ee510a +439, 0x3ab70a1f +440, 0xcce6b210 +441, 0x8f72f0fb +442, 0xf0610b41 +443, 0x83d01fb5 +444, 0x6b3de36 +445, 0xe4c2e84f +446, 0x9c43bb15 +447, 0xddf2905 +448, 0x7dd63556 +449, 0x3662ca09 +450, 0xfb81f35b +451, 0xc2c8a72a +452, 0x8e93c37 +453, 0xa93da2d4 +454, 0xa03af8f1 +455, 0x8d75159a +456, 0x15f010b0 +457, 0xa296ab06 +458, 0xe55962ba +459, 0xeae700a9 +460, 0xe388964a +461, 0x917f2bec +462, 0x1c203fea +463, 0x792a01ba +464, 0xa93a80ac +465, 0x9eb8a197 +466, 0x56c0bc73 +467, 0xb8f05799 +468, 0xf429a8c8 +469, 0xb92cee42 +470, 0xf8864ec +471, 0x62f2518a +472, 0x3a7bfa3e +473, 0x12e56e6d +474, 0xd7a18313 +475, 0x41fa3899 +476, 0xa09c4956 +477, 0xebcfd94a +478, 0xc485f90b +479, 0x4391ce40 +480, 0x742a3333 +481, 0xc932f9e5 +482, 0x75c6c263 +483, 0x80937f0 +484, 0xcf21833c +485, 0x16027520 +486, 0xd42e669f +487, 0xb0f01fb7 +488, 0xb35896f1 +489, 0x763737a9 +490, 0x1bb20209 +491, 0x3551f189 +492, 0x56bc2602 +493, 0xb6eacf4 +494, 0x42ec4d11 +495, 0x245cc68 +496, 0xc27ac43b +497, 0x9d903466 +498, 0xce3f0c05 +499, 0xb708c31c +500, 0xc0fd37eb +501, 0x95938b2c +502, 0xf20175a7 +503, 0x4a86ee9b +504, 0xbe039a58 +505, 0xd41cabe7 +506, 0x83bc99ba +507, 0x761d60e1 +508, 0x7737cc2e +509, 0x2b82fc4b +510, 0x375aa401 +511, 0xfe9597a0 +512, 0x5543806a +513, 0x44f31238 +514, 0x7df31538 +515, 0x74cfa770 +516, 0x8755d881 +517, 0x1fde665a +518, 0xda8bf315 +519, 0x973d8e95 +520, 0x72205228 +521, 0x8fe59717 +522, 0x7bb90b34 +523, 0xef6ed945 +524, 0x16fd4a38 +525, 0x5db44de1 +526, 0xf09f93b3 +527, 0xe84824cc +528, 0x945bb50e +529, 0xd0be4aa5 +530, 0x47c277c2 +531, 0xd3800c28 +532, 0xac1c33ec +533, 0xd3dacce +534, 0x811c8387 +535, 0x6761b36 +536, 0x70d3882f +537, 0xd6e62e3a +538, 0xea25daa2 +539, 0xb07f39d1 +540, 0x391d89d7 +541, 0x84b6fb5e +542, 0x3dda3fca +543, 0x229e80a4 +544, 0x3d94a4b7 +545, 0x5d3d576a +546, 0xad7818a0 +547, 0xce23b03a +548, 0x7aa2079c +549, 0x9a6be555 +550, 0x83f3b34a +551, 0x1848f9d9 +552, 0xd8fefc1c +553, 0x48e6ce48 +554, 0x52e55750 +555, 0xf41a71cf +556, 0xba08e259 +557, 0xfaf06a15 +558, 0xeaaac0fb +559, 0x34f90098 +560, 0xb1dfffbb +561, 0x718daec2 +562, 0xab4dda21 +563, 0xd27cc1ee +564, 0x4aafbc4c +565, 0x356dfb4f +566, 0x83fcdfd6 +567, 0x8f0bcde0 +568, 0x4363f844 +569, 0xadc0f4d5 +570, 0x3bde994e +571, 0x3884d452 +572, 0x21876b4a +573, 0x9c985398 +574, 0xca55a226 +575, 0x3a88c583 +576, 0x916dc33c +577, 0x8f67d1d7 +578, 0x3b26a667 +579, 0xe4ddeb4b +580, 0x1a9d8c33 +581, 0x81c9b74f +582, 0x9ed1e9df +583, 0x6e61aecf +584, 0x95e95a5d +585, 0x68864ff5 +586, 0xb8fa5b9 +587, 0x72b1b3de +588, 0x5e18a86b +589, 0xd7f2337d +590, 0xd70e0925 +591, 0xb573a4c1 +592, 0xc77b3f8a +593, 0x389b20de +594, 0x16cf6afb +595, 0xa39bd275 +596, 0xf491cf01 +597, 0x6f88a802 +598, 0x8510af05 +599, 0xe7cd549a +600, 0x8603179a +601, 0xef43f191 +602, 0xf9b64c60 +603, 0xb00254a7 +604, 0xd7c06a2d +605, 0x17e9380b +606, 0x529e727b +607, 0xaaa8fe0a +608, 0xfb64ff4c +609, 0xcd75af26 +610, 0xfb717c87 +611, 0xa0789899 +612, 0x10391ec9 +613, 0x7e9b40b3 +614, 0x18536554 +615, 0x728c05f7 +616, 0x787dca98 +617, 0xad948d1 +618, 0x44c18def +619, 0x3303f2ec +620, 0xa15acb5 +621, 0xb58d38f4 +622, 0xfe041ef8 +623, 0xd151a956 +624, 0x7b9168e8 +625, 0x5ebeca06 +626, 0x90fe95df +627, 0xf76875aa +628, 0xb2e0d664 +629, 0x2e3253b7 +630, 0x68e34469 +631, 0x1f0c2d89 +632, 0x13a34ac2 +633, 0x5ffeb841 +634, 0xe381e91c +635, 0xb8549a92 +636, 0x3f35cf1 +637, 0xda0f9dcb +638, 0xdd9828a6 +639, 0xe1428f29 +640, 0xf4db80b5 +641, 0xdac30af5 +642, 0x1af1dd17 +643, 0x9a540254 +644, 0xcab68a38 +645, 0x33560361 +646, 0x2fbf3886 +647, 0xbc785923 +648, 0xe081cd10 +649, 0x8e473356 +650, 0xd102c357 +651, 0xeea4fe48 +652, 0x248d3453 +653, 0x1da79ac +654, 0x815a65ff +655, 0x27693e76 +656, 0xb7d5af40 +657, 0x6d245d30 +658, 0x9e06fa8f +659, 0xb0570dcb +660, 0x469f0005 +661, 0x3e0ca132 +662, 0xd89bbf3 +663, 0xd61ccd47 +664, 0x6383878 +665, 0x62b5956 +666, 0x4dc83675 +667, 0x93fd8492 +668, 0x5a0091f5 +669, 0xc9f9bc3 +670, 0xa26e7778 +671, 0xeabf2d01 +672, 0xe612dc06 +673, 0x85d89ff9 +674, 0xd1763179 +675, 0xcb88947b +676, 0x9e8757a5 +677, 0xe100e85c +678, 0x904166eb +679, 0x4996243d +680, 0x4038e1cb +681, 0x2be2c63d +682, 0x77017e81 +683, 0x3b1f556b +684, 0x1c785c77 +685, 0x6869b8bd +686, 0xe1217ed4 +687, 0x4012ab2f +688, 0xc06c0d8e +689, 0x2122eb68 +690, 0xad1783fd +691, 0x5f0c80e3 +692, 0x828f7efa +693, 0x29328399 +694, 0xeadf1087 +695, 0x85dc0037 +696, 0x9691ef26 +697, 0xc0947a53 +698, 0x2a178d2a +699, 0x2a2c7e8f +700, 0x90378380 +701, 0xaad8d326 +702, 0x9cf1c3c8 +703, 0x84eccd44 +704, 0x79e61808 +705, 0x8b3f454e +706, 0x209e6e1 +707, 0x51f88378 +708, 0xc210226f +709, 0xd982adb5 +710, 0x55d44a31 +711, 0x9817d443 +712, 0xa328c626 +713, 0x13455966 +714, 0xb8f681d3 +715, 0x2a3c713b +716, 0xc186959b +717, 0x814a74b0 +718, 0xed7bc90 +719, 0xa88d3d6d +720, 0x88a9f561 +721, 0x73aa1c0a +722, 0xdfeff404 +723, 0xec037e4b +724, 0xa5c209f0 +725, 0xb3a223b4 +726, 0x24ce3709 +727, 0x3184c790 +728, 0xa1398c62 +729, 0x2f92034e +730, 0xbb37a79a +731, 0x605287b4 +732, 0x8faa772c +733, 0x6ce56c1d +734, 0xc035fb4c +735, 0x7cf5b316 +736, 0x6502645 +737, 0xa283d810 +738, 0x778bc2f1 +739, 0xfdf99313 +740, 0x1f513265 +741, 0xbd3837e2 +742, 0x9b84a9a +743, 0x2139ce91 +744, 0x61a8e890 +745, 0xf9ff12db +746, 0xb43d2ea7 +747, 0x88532e61 +748, 0x175a6655 +749, 0x7a6c4f72 +750, 0x6dafc1b7 +751, 0x449b1459 +752, 0x514f654f +753, 0x9a6731e2 +754, 0x8632da43 +755, 0xc81b0422 +756, 0x81fe9005 +757, 0x15b79618 +758, 0xb5fa629f +759, 0x987a474f +760, 0x1c74f54e +761, 0xf9743232 +762, 0xec4b55f +763, 0x87d761e5 +764, 0xd1ad78b7 +765, 0x453d9350 +766, 0xc7a7d85 +767, 0xb2576ff5 +768, 0xcdde49b7 +769, 0x8e1f763e +770, 0x1338583e +771, 0xfd65b9dc +772, 0x4f19c4f4 +773, 0x3a52d73d +774, 0xd3509c4c +775, 0xda24fe31 +776, 0xe2de56ba +777, 0x2db5e540 +778, 0x23172734 +779, 0x4db572f +780, 0xeb941718 +781, 0x84c2649a +782, 0x3b1e5b6a +783, 0x4c9c61b9 +784, 0x3bccd11 +785, 0xb4d7b78e +786, 0x48580ae5 +787, 0xd273ab68 +788, 0x25c11615 +789, 0x470b53f6 +790, 0x329c2068 +791, 0x1693721b +792, 0xf8c9aacf +793, 0x4c3d5693 +794, 0xd778284e +795, 0xae1cb24f +796, 0x3c11d1b3 +797, 0xddd2b0c0 +798, 0x90269fa7 +799, 0x5666e0a2 +800, 0xf9f195a4 +801, 0x61d78eb2 +802, 0xada5a7c0 +803, 0xaa272fbe +804, 0xba3bae2f +805, 0xd0b70fc2 +806, 0x529f32b +807, 0xda7a3e21 +808, 0x9a776a20 +809, 0xb21f9635 +810, 0xb3acc14e +811, 0xac55f56 +812, 0x29dccf41 +813, 0x32dabdb3 +814, 0xaa032f58 +815, 0xfa406af4 +816, 0xce3c415d +817, 0xb44fb4d9 +818, 0x32248d1c +819, 0x680c6440 +820, 0xae2337b +821, 0x294cb597 +822, 0x5bca48fe +823, 0xaef19f40 +824, 0xad60406 +825, 0x4781f090 +826, 0xfd691ffc +827, 0xb6568268 +828, 0xa56c72cb +829, 0xf8a9e0fc +830, 0x9af4fd02 +831, 0x2cd30932 +832, 0x776cefd7 +833, 0xe31f476e +834, 0x6d94a437 +835, 0xb3cab598 +836, 0xf582d13f +837, 0x3bf8759d +838, 0xc3777dc +839, 0x5e425ea8 +840, 0x1c7ff4ed +841, 0x1c2e97d1 +842, 0xc062d2b4 +843, 0x46dc80e0 +844, 0xbcdb47e6 +845, 0x32282fe0 +846, 0xaba89063 +847, 0x5e94e9bb +848, 0x3e667f78 +849, 0xea6eb21a +850, 0xe56e54e8 +851, 0xa0383510 +852, 0x6768fe2b +853, 0xb53ac3e0 +854, 0x779569a0 +855, 0xeca83c6a +856, 0x24db4d2d +857, 0x4585f696 +858, 0xf84748b2 +859, 0xf6a4dd5b +860, 0x31fb524d +861, 0x67ab39fe +862, 0x5882a899 +863, 0x9a05fcf6 +864, 0x712b5674 +865, 0xe8c6958f +866, 0x4b448bb3 +867, 0x530b9abf +868, 0xb491f491 +869, 0x98352c62 +870, 0x2d0a50e3 +871, 0xeb4384da +872, 0x36246f07 +873, 0xcbc5c1a +874, 0xae24031d +875, 0x44d11ed6 +876, 0xf07f1608 +877, 0xf296aadd +878, 0x3bcfe3be +879, 0x8fa1e7df +880, 0xfd317a6e +881, 0xe4975c44 +882, 0x15205892 +883, 0xa762d4df +884, 0xf1167365 +885, 0x6811cc00 +886, 0x8315f23 +887, 0xe045b4b1 +888, 0xa8496414 +889, 0xbed313ae +890, 0xcdae3ddb +891, 0xa9c22c9 +892, 0x275fab1a +893, 0xedd65fa +894, 0x4c188229 +895, 0x63a83e58 +896, 0x18aa9207 +897, 0xa41f2e78 +898, 0xd9f63653 +899, 0xbe2be73b +900, 0xa3364d39 +901, 0x896d5428 +902, 0xc737539e +903, 0x745a78c6 +904, 0xf0b2b042 +905, 0x510773b4 +906, 0x92ad8e37 +907, 0x27f2f8c4 +908, 0x23704cc8 +909, 0x3d95a77f +910, 0xf08587a4 +911, 0xbd696a25 +912, 0x948924f3 +913, 0x8cddb634 +914, 0xcd2a4910 +915, 0x8e0e300e +916, 0x83815a9b +917, 0x67383510 +918, 0x3c18f0d0 +919, 0xc7a7bccc +920, 0x7cc2d3a2 +921, 0x52eb2eeb +922, 0xe4a257e5 +923, 0xec76160e +924, 0x63f9ad68 +925, 0x36d0bbbf +926, 0x957bc4e4 +927, 0xc9ed90ff +928, 0x4cb6059d +929, 0x2f86eca1 +930, 0x3e3665a3 +931, 0x9b7eb6f4 +932, 0x492e7e18 +933, 0xa098aa51 +934, 0x7eb568b2 +935, 0x3fd639ba +936, 0x7bebcf1 +937, 0x99c844ad +938, 0x43cb5ec7 +939, 0x8dfbbef5 +940, 0x5be413ff +941, 0xd93b976d +942, 0xc1c7a86d +943, 0x1f0e93d0 +944, 0x498204a2 +945, 0xe8fe832a +946, 0x2236bd7 +947, 0x89953769 +948, 0x2acc3491 +949, 0x2c4f22c6 +950, 0xd7996277 +951, 0x3bcdc349 +952, 0xfc286630 +953, 0x5f8909fd +954, 0x242677c0 +955, 0x4cb34104 +956, 0xa6ff8100 +957, 0x39ea47ec +958, 0x9bd54140 +959, 0x7502ffe8 +960, 0x7ebef8ae +961, 0x1ed8abe4 +962, 0xfaba8450 +963, 0xc197b65f +964, 0x19431455 +965, 0xe229c176 +966, 0xeb2967da +967, 0xe0c5dc05 +968, 0xa84e3227 +969, 0x10dd9e0f +970, 0xbdb70b02 +971, 0xce24808a +972, 0x423edab8 +973, 0x194caf71 +974, 0x144f150d +975, 0xf811c2d2 +976, 0xc224ee85 +977, 0x2b217a5b +978, 0xf78a5a79 +979, 0x6554a4b1 +980, 0x769582df +981, 0xf4b2cf93 +982, 0x89648483 +983, 0xb3283a3e +984, 0x82b895db +985, 0x79388ef0 +986, 0x54bc42a6 +987, 0xc4dd39d9 +988, 0x45b33b7d +989, 0x8703b2c1 +990, 0x1cc94806 +991, 0xe0f43e49 +992, 0xcaa7b6bc +993, 0x4f88e9af +994, 0x1477cce5 +995, 0x347dd115 +996, 0x36e335fa +997, 0xb93c9a31 +998, 0xaac3a175 +999, 0x68a19647 diff --git a/python/numpy/random/tests/data/mt19937-testset-2.csv b/python/numpy/random/tests/data/mt19937-testset-2.csv new file mode 100644 index 000000000..cdb8e4794 --- /dev/null +++ b/python/numpy/random/tests/data/mt19937-testset-2.csv @@ -0,0 +1,1001 @@ +seed, 0x0 +0, 0x7ab4ea94 +1, 0x9b561119 +2, 0x4957d02e +3, 0x7dd3fdc2 +4, 0x5affe54 +5, 0x5a01741c +6, 0x8b9e8c1f +7, 0xda5bf11a +8, 0x509226 +9, 0x64e2ea17 +10, 0x82c6dab5 +11, 0xe4302515 +12, 0x8198b873 +13, 0xc3ec9a82 +14, 0x829dff28 +15, 0x5278e44f +16, 0x994a7d2c +17, 0xf1c89398 +18, 0xaf2fddec +19, 0x22abc6ee +20, 0x963dbd43 +21, 0xc29edffb +22, 0x41c1ce07 +23, 0x9c90034d +24, 0x1f17a796 +25, 0x3833caa8 +26, 0xb8795528 +27, 0xebc595a2 +28, 0xf8f5b5dd +29, 0xc2881f72 +30, 0x18e5d3f0 +31, 0x9b19ac7a +32, 0xb9992436 +33, 0xc00052b3 +34, 0xb63f4475 +35, 0x962642d9 +36, 0x63506c10 +37, 0x2be6b127 +38, 0x569bdbc6 +39, 0x7f185e01 +40, 0xebb55f53 +41, 0x1c30198c +42, 0x7c8d75c6 +43, 0xd3f2186b +44, 0xaca5b9b1 +45, 0xbc49ff45 +46, 0xc4a802af +47, 0x2cecd86f +48, 0x8e0da529 +49, 0x1f22b00e +50, 0x4559ea80 +51, 0x60f587d8 +52, 0x7c7460e9 +53, 0x67be0a4a +54, 0x987a0183 +55, 0x7bd30f1 +56, 0xab18c4ac +57, 0xffdbfb64 +58, 0x9ea917f9 +59, 0x1239dab7 +60, 0x38efabeb +61, 0x5da91888 +62, 0x8f49ed62 +63, 0x83f60b1e +64, 0x5950a3fc +65, 0xd8911104 +66, 0x19e8859e +67, 0x1a4d89ec +68, 0x968ca180 +69, 0x9e1b6da3 +70, 0x3d99c2c +71, 0x55f76289 +72, 0x8fa28b9e +73, 0x9fe01d33 +74, 0xdade4e38 +75, 0x1ea04290 +76, 0xa7263313 +77, 0xaafc762e +78, 0x460476d6 +79, 0x31226e12 +80, 0x451d3f05 +81, 0xd0d2764b +82, 0xd06e1ab3 +83, 0x1394e3f4 +84, 0x2fc04ea3 +85, 0x5b8401c +86, 0xebd6c929 +87, 0xe881687c +88, 0x94bdd66a +89, 0xabf85983 +90, 0x223ad12d +91, 0x2aaeeaa3 +92, 0x1f704934 +93, 0x2db2efb6 +94, 0xf49b8dfb +95, 0x5bdbbb9d +96, 0xba0cd0db +97, 0x4ec4674e +98, 0xad0129e +99, 0x7a66129b +100, 0x50d12c5e +101, 0x85b1d335 +102, 0x3efda58a +103, 0xecd886fb +104, 0x8ecadd3d +105, 0x60ebac0f +106, 0x5e10fe79 +107, 0xa84f7e5d +108, 0x43931288 +109, 0xfacf448 +110, 0x4ee01997 +111, 0xcdc0a651 +112, 0x33c87037 +113, 0x8b50fc03 +114, 0xf52aad34 +115, 0xda6cd856 +116, 0x7585bea0 +117, 0xe947c762 +118, 0x4ddff5d8 +119, 0xe0e79b3b +120, 0xb804cf09 +121, 0x84765c44 +122, 0x3ff666b4 +123, 0xe31621ad +124, 0x816f2236 +125, 0x228176bc +126, 0xfdc14904 +127, 0x635f5077 +128, 0x6981a817 +129, 0xfd9a0300 +130, 0xd3fa8a24 +131, 0xd67c1a77 +132, 0x903fe97a +133, 0xf7c4a4d5 +134, 0x109f2058 +135, 0x48ab87fe +136, 0xfd6f1928 +137, 0x707e9452 +138, 0xf327db9e +139, 0x7b80d76d +140, 0xfb6ba193 +141, 0x454a1ad0 +142, 0xe20b51e +143, 0xb774d085 +144, 0x6b1ed574 +145, 0xb1e77de4 +146, 0xe2a83b37 +147, 0x33d3176f +148, 0x2f0ca0fc +149, 0x17f51e2 +150, 0x7c1fbf55 +151, 0xf09e9cd0 +152, 0xe3d9bacd +153, 0x4244db0a +154, 0x876c09fc +155, 0x9db4fc2f +156, 0xd3771d60 +157, 0x25fc6a75 +158, 0xb309915c +159, 0xc50ee027 +160, 0xaa5b7b38 +161, 0x4c650ded +162, 0x1acb2879 +163, 0x50db5887 +164, 0x90054847 +165, 0xfef23e5b +166, 0x2dd7b7d5 +167, 0x990b8c2e +168, 0x6001a601 +169, 0xb5d314c4 +170, 0xfbfb7bf9 +171, 0x1aba997d +172, 0x814e7304 +173, 0x989d956a +174, 0x86d5a29c +175, 0x70a9fa08 +176, 0xc4ccba87 +177, 0x7e9cb366 +178, 0xee18eb0a +179, 0x44f5be58 +180, 0x91d4af2d +181, 0x5ab6e593 +182, 0x9fd6bb4d +183, 0x85894ce +184, 0x728a2401 +185, 0xf006f6d4 +186, 0xd782741e +187, 0x842cd5bd +188, 0xfb5883aa +189, 0x7e5a471 +190, 0x83ff6965 +191, 0xc9675c6b +192, 0xb6ced3c7 +193, 0x3de6425b +194, 0x25e14db4 +195, 0x69ca3dec +196, 0x81342d13 +197, 0xd7cd8417 +198, 0x88d15e69 +199, 0xefba17c9 +200, 0x43d595e6 +201, 0x89d4cf25 +202, 0x7cae9b9b +203, 0x2242c621 +204, 0x27fc3598 +205, 0x467b1d84 +206, 0xe84d4622 +207, 0xa26bf980 +208, 0x80411010 +209, 0xe2c2bfea +210, 0xbc6ca25a +211, 0x3ddb592a +212, 0xdd46eb9e +213, 0xdfe8f657 +214, 0x2cedc974 +215, 0xf0dc546b +216, 0xd46be68f +217, 0x26d8a5aa +218, 0x76e96ba3 +219, 0x7d5b5353 +220, 0xf532237c +221, 0x6478b79 +222, 0x9b81a5e5 +223, 0x5fc68e5c +224, 0x68436e70 +225, 0x2a0043f9 +226, 0x108d523c +227, 0x7a4c32a3 +228, 0x9c84c742 +229, 0x6f813dae +230, 0xfcc5bbcc +231, 0x215b6f3a +232, 0x84cb321d +233, 0x7913a248 +234, 0xb1e6b585 +235, 0x49376b31 +236, 0x1dc896b0 +237, 0x347051ad +238, 0x5524c042 +239, 0xda0eef9d +240, 0xf2e73342 +241, 0xbeee2f9d +242, 0x7c702874 +243, 0x9eb3bd34 +244, 0x97b09700 +245, 0xcdbab1d4 +246, 0x4a2f6ed1 +247, 0x2047bda5 +248, 0x3ecc7005 +249, 0x8d0d5e67 +250, 0x40876fb5 +251, 0xb5fd2187 +252, 0xe915d8af +253, 0x9a2351c7 +254, 0xccc658ae +255, 0xebb1eddc +256, 0xc4a83671 +257, 0xffb2548f +258, 0xe4fe387a +259, 0x477aaab4 +260, 0x8475a4e4 +261, 0xf8823e46 +262, 0xe4130f71 +263, 0xbdb54482 +264, 0x98fe0462 +265, 0xf36b27b8 +266, 0xed7733da +267, 0x5f428afc +268, 0x43a3a21a +269, 0xf8370b55 +270, 0xfade1de1 +271, 0xd9a038ea +272, 0x3c69af23 +273, 0x24df7dd0 +274, 0xf66d9353 +275, 0x71d811be +276, 0xcc4d024b +277, 0xb8c30bf0 +278, 0x4198509d +279, 0x8b37ba36 +280, 0xa41ae29a +281, 0x8cf7799e +282, 0x5cd0136a +283, 0xa11324ef +284, 0x2f8b6d4b +285, 0x3657cf17 +286, 0x35b6873f +287, 0xee6e5bd7 +288, 0xbeeaa98 +289, 0x9ad3c581 +290, 0xe2376c3f +291, 0x738027cc +292, 0x536ac839 +293, 0xf066227 +294, 0x6c9cb0f9 +295, 0x84082ae6 +296, 0xab38ae9d +297, 0x493eade9 +298, 0xcb630b3a +299, 0x64d44250 +300, 0xe5efb557 +301, 0xea2424d9 +302, 0x11a690ba +303, 0x30a48ae4 +304, 0x58987e53 +305, 0x94ec6076 +306, 0x5d3308fa +307, 0xf1635ebb +308, 0x56a5ab90 +309, 0x2b2f2ee4 +310, 0x6f9e6483 +311, 0x8b93e327 +312, 0xa7ce140b +313, 0x4c8aa42 +314, 0x7657bb3f +315, 0xf250fd75 +316, 0x1edfcb0f +317, 0xdb42ace3 +318, 0xf8147e16 +319, 0xd1992bd +320, 0x64bb14d1 +321, 0x423e724d +322, 0x7b172f7c +323, 0x17171696 +324, 0x4acaf83b +325, 0x7a83527e +326, 0xfc980c60 +327, 0xc8b56bb +328, 0x2453f77f +329, 0x85ad1bf9 +330, 0x62a85dfe +331, 0x48238c4d +332, 0xbb3ec1eb +333, 0x4c1c039c +334, 0x1f37f571 +335, 0x98aecb63 +336, 0xc3b3ddd6 +337, 0xd22dad4 +338, 0xe49671a3 +339, 0xe3baf945 +340, 0xb9e21680 +341, 0xda562856 +342, 0xe8b88ce4 +343, 0x86f88de2 +344, 0x986faf76 +345, 0x6f0025c3 +346, 0x3fe21234 +347, 0xd8d3f729 +348, 0xc2d11c6f +349, 0xd4f9e8f +350, 0xf61a0aa +351, 0xc48bb313 +352, 0xe944e940 +353, 0xf1801b2e +354, 0x253590be +355, 0x981f069d +356, 0x891454d8 +357, 0xa4f824ad +358, 0x6dd2cc48 +359, 0x3018827e +360, 0x3fb329e6 +361, 0x65276517 +362, 0x8d2c0dd2 +363, 0xc965b48e +364, 0x85d14d90 +365, 0x5a51623c +366, 0xa9573d6a +367, 0x82d00edf +368, 0x5ed7ce07 +369, 0x1d946abc +370, 0x24fa567b +371, 0x83ef5ecc +372, 0x9001724a +373, 0xc4fe48f3 +374, 0x1e07c25c +375, 0xf4d5e65e +376, 0xb734f6e9 +377, 0x327a2df8 +378, 0x766d59b7 +379, 0x625e6b61 +380, 0xe82f32d7 +381, 0x1566c638 +382, 0x2e815871 +383, 0x606514aa +384, 0x36b7386e +385, 0xcaa8ce08 +386, 0xb453fe9c +387, 0x48574e23 +388, 0x71f0da06 +389, 0xa8a79463 +390, 0x6b590210 +391, 0x86e989db +392, 0x42899f4f +393, 0x7a654ef9 +394, 0x4c4fe932 +395, 0x77b2fd10 +396, 0xb6b4565c +397, 0xa2e537a3 +398, 0xef5a3dca +399, 0x41235ea8 +400, 0x95c90541 +401, 0x50ad32c4 +402, 0xc1b8e0a4 +403, 0x498e9aab +404, 0xffc965f1 +405, 0x72633485 +406, 0x3a731aef +407, 0x7cfddd0b +408, 0xb04d4129 +409, 0x184fc28e +410, 0x424369b0 +411, 0xf9ae13a1 +412, 0xaf357c8d +413, 0x7a19228e +414, 0xb46de2a8 +415, 0xeff2ac76 +416, 0xa6c9357b +417, 0x614f19c1 +418, 0x8ee1a53f +419, 0xbe1257b1 +420, 0xf72651fe +421, 0xd347c298 +422, 0x96dd2f23 +423, 0x5bb1d63e +424, 0x32e10887 +425, 0x36a144da +426, 0x9d70e791 +427, 0x5e535a25 +428, 0x214253da +429, 0x2e43dd40 +430, 0xfc0413f4 +431, 0x1f5ea409 +432, 0x1754c126 +433, 0xcdbeebbe +434, 0x1fb44a14 +435, 0xaec7926 +436, 0xb9d9a1e +437, 0x9e4a6577 +438, 0x8b1f04c5 +439, 0x19854e8a +440, 0x531080cd +441, 0xc0cbd73 +442, 0x20399d77 +443, 0x7d8e9ed5 +444, 0x66177598 +445, 0x4d18a5c2 +446, 0xe08ebf58 +447, 0xb1f9c87b +448, 0x66bedb10 +449, 0x26670d21 +450, 0x7a7892da +451, 0x69b69d86 +452, 0xd04f1d1c +453, 0xaf469625 +454, 0x7946b813 +455, 0x1ee596bd +456, 0x7f365d85 +457, 0x795b662b +458, 0x194ad02d +459, 0x5a9649b5 +460, 0x6085e278 +461, 0x2cf54550 +462, 0x9c77ea0b +463, 0x3c6ff8b +464, 0x2141cd34 +465, 0xb90bc671 +466, 0x35037c4b +467, 0xd04c0d76 +468, 0xc75bff8 +469, 0x8f52003b +470, 0xfad3d031 +471, 0x667024bc +472, 0xcb04ea36 +473, 0x3e03d587 +474, 0x2644d3a0 +475, 0xa8fe99ba +476, 0x2b9a55fc +477, 0x45c4d44a +478, 0xd059881 +479, 0xe07fcd20 +480, 0x4e22046c +481, 0x7c2cbf81 +482, 0xbf7f23de +483, 0x69d924c3 +484, 0xe53cd01 +485, 0x3879017c +486, 0xa590e558 +487, 0x263bc076 +488, 0x245465b1 +489, 0x449212c6 +490, 0x249dcb29 +491, 0x703d42d7 +492, 0x140eb9ec +493, 0xc86c5741 +494, 0x7992aa5b +495, 0xb8b76a91 +496, 0x771dac3d +497, 0x4ecd81e3 +498, 0xe5ac30b3 +499, 0xf4d7a5a6 +500, 0xac24b97 +501, 0x63494d78 +502, 0x627ffa89 +503, 0xfa4f330 +504, 0x8098a1aa +505, 0xcc0c61dc +506, 0x34749fa0 +507, 0x7f217822 +508, 0x418d6f15 +509, 0xa4b6e51e +510, 0x1036de68 +511, 0x1436986e +512, 0x44df961d +513, 0x368e4651 +514, 0x6a9e5d8c +515, 0x27d1597e +516, 0xa1926c62 +517, 0x8d1f2b55 +518, 0x5797eb42 +519, 0xa90f9e81 +520, 0x57547b10 +521, 0xdbbcca8e +522, 0x9edd2d86 +523, 0xbb0a7527 +524, 0x7662380c +525, 0xe7c98590 +526, 0x950fbf3f +527, 0xdc2b76b3 +528, 0x8a945102 +529, 0x3f0a1a85 +530, 0xeb215834 +531, 0xc59f2802 +532, 0xe2a4610 +533, 0x8b5a8665 +534, 0x8b2d9933 +535, 0x40a4f0bc +536, 0xaab5bc67 +537, 0x1442a69e +538, 0xdf531193 +539, 0x698d3db4 +540, 0x2d40324e +541, 0x1a25feb2 +542, 0xe8cc898f +543, 0xf12e98f5 +544, 0xc03ad34c +545, 0xf62fceff +546, 0xdd827e1e +547, 0x7d8ccb3b +548, 0xab2d6bc1 +549, 0xc323a124 +550, 0x8184a19a +551, 0xc3c4e934 +552, 0x5487424d +553, 0xd6a81a44 +554, 0x90a8689d +555, 0xe69c4c67 +556, 0xbdae02dd +557, 0x72a18a79 +558, 0x2a88e907 +559, 0x31cf4b5d +560, 0xb157772f +561, 0x206ba601 +562, 0x18529232 +563, 0x7dac90d8 +564, 0x3a5f8a09 +565, 0x9f4b64a3 +566, 0xae373af9 +567, 0x1d79447c +568, 0x2a23684b +569, 0x41fb7ba4 +570, 0x55e4bb9e +571, 0xd7619d3e +572, 0xc04e4dd8 +573, 0x8418d516 +574, 0x2b2ca585 +575, 0xfa8eedf +576, 0x5bafd977 +577, 0x31974fb0 +578, 0x9eb6697b +579, 0xc8be22f5 +580, 0x173b126a +581, 0x8809becf +582, 0x3e41efe1 +583, 0x3d6cbbb8 +584, 0x278c81d8 +585, 0xa6f08434 +586, 0xa0e6601d +587, 0x2fccd88d +588, 0x3cbc8beb +589, 0x5f65d864 +590, 0xa1ff8ddf +591, 0x609dcb7c +592, 0x4a4e1663 +593, 0xeae5531 +594, 0x962a7c85 +595, 0x1e110607 +596, 0x8c5db5d0 +597, 0xc7f2337e +598, 0xc94fcc9c +599, 0xe7f62629 +600, 0x6c9aa9f8 +601, 0x2e27fe0e +602, 0x4d0dae12 +603, 0x9eecf588 +604, 0x977ba3f2 +605, 0xed0a51af +606, 0x3f3ec633 +607, 0xc174b2ec +608, 0x590be8a9 +609, 0x4f630d18 +610, 0xf579e989 +611, 0xe2a55584 +612, 0xee11edcd +613, 0x150a4833 +614, 0xc0a0535c +615, 0xb5e00993 +616, 0xb6435700 +617, 0xa98dbff +618, 0x315716af +619, 0x94395776 +620, 0x6cbd48d9 +621, 0xab17f8fc +622, 0xa794ffb7 +623, 0x6b55e231 +624, 0x89ff5783 +625, 0x431dcb26 +626, 0x270f9bf8 +627, 0x2af1b8d0 +628, 0x881745ed +629, 0x17e1be4e +630, 0x132a0ec4 +631, 0x5712df17 +632, 0x2dfb3334 +633, 0xf5a35519 +634, 0xcafbdac6 +635, 0x73b6189d +636, 0x10107cac +637, 0x18c1045e +638, 0xbc19bbad +639, 0x8b4f05ac +640, 0x5830d038 +641, 0x468cd98a +642, 0x5b83a201 +643, 0xf0ccdd9c +644, 0xcb20c4bd +645, 0x1ff186c9 +646, 0xcdddb47f +647, 0x5c65ce6 +648, 0xb748c580 +649, 0x23b6f262 +650, 0xe2ba8e5c +651, 0x9a164a03 +652, 0x62d3322e +653, 0x918d8b43 +654, 0x45c8b49d +655, 0xce172c6e +656, 0x23febc6 +657, 0x84fdc5b7 +658, 0xe7d1fd82 +659, 0xf0ddf3a6 +660, 0x87050436 +661, 0x13d46375 +662, 0x5b191c78 +663, 0x2cbd99c0 +664, 0x7686c7f +665, 0xcff56c84 +666, 0x7f9b4486 +667, 0xefc997fe +668, 0x984d4588 +669, 0xfa44f36a +670, 0x7a5276c1 +671, 0xcfde6176 +672, 0xcacf7b1d +673, 0xcffae9a7 +674, 0xe98848d5 +675, 0xd4346001 +676, 0xa2196cac +677, 0x217f07dc +678, 0x42d5bef +679, 0x6f2e8838 +680, 0x4677a24 +681, 0x4ad9cd54 +682, 0x43df42af +683, 0x2dde417 +684, 0xaef5acb1 +685, 0xf377f4b3 +686, 0x7d870d40 +687, 0xe53df1c2 +688, 0xaeb5be50 +689, 0x7c92eac0 +690, 0x4f00838c +691, 0x91e05e84 +692, 0x23856c80 +693, 0xc4266fa6 +694, 0x912fddb +695, 0x34d42d22 +696, 0x6c02ffa +697, 0xe47d093 +698, 0x183c55b3 +699, 0xc161d142 +700, 0x3d43ff5f +701, 0xc944a36 +702, 0x27bb9fc6 +703, 0x75c91080 +704, 0x2460d0dc +705, 0xd2174558 +706, 0x68062dbf +707, 0x778e5c6e +708, 0xa4dc9a +709, 0x7a191e69 +710, 0xc084b2ba +711, 0xbb391d2 +712, 0x88849be +713, 0x69c02714 +714, 0x69d4a389 +715, 0x8f51854d +716, 0xaf10bb82 +717, 0x4d5d1c77 +718, 0x53b53109 +719, 0xa0a92aa0 +720, 0x83ecb757 +721, 0x5325752a +722, 0x114e466e +723, 0x4b3f2780 +724, 0xa7a6a39c +725, 0x5e723357 +726, 0xa6b8be9b +727, 0x157c32ff +728, 0x8b898012 +729, 0xd7ff2b1e +730, 0x69cd8444 +731, 0x6ad8030c +732, 0xa08a49ec +733, 0xfbc055d3 +734, 0xedf17e46 +735, 0xc9526200 +736, 0x3849b88a +737, 0x2746860b +738, 0xae13d0c1 +739, 0x4f15154f +740, 0xd65c3975 +741, 0x6a377278 +742, 0x54d501f7 +743, 0x81a054ea +744, 0x143592ba +745, 0x97714ad6 +746, 0x4f9926d9 +747, 0x4f7ac56d +748, 0xe87ca939 +749, 0x58b76f6f +750, 0x60901ad8 +751, 0x3e401bb6 +752, 0xa058468e +753, 0xc0bb14f6 +754, 0x2cb8f02a +755, 0x7c2cf756 +756, 0x34c31de5 +757, 0x9b243e83 +758, 0xa5c85ab4 +759, 0x2741e3b3 +760, 0x1249000e +761, 0x3fc4e72b +762, 0xa3e038a2 +763, 0x952dd92c +764, 0x2b821966 +765, 0xfa81b365 +766, 0x530919b9 +767, 0x4486d66f +768, 0xccf4f3c1 +769, 0xa8bddd1d +770, 0xcc295eb9 +771, 0xfccbe42f +772, 0x38bacd8d +773, 0x2261854f +774, 0x56068c62 +775, 0x9bdaeb8 +776, 0x555fa5b6 +777, 0x20fe615e +778, 0x49fb23d3 +779, 0xd093bad6 +780, 0x54919e86 +781, 0x7373eb24 +782, 0xfbaa7a98 +783, 0x5f62fb39 +784, 0xe03bc9ec +785, 0xa5074d41 +786, 0xa1cefb1 +787, 0x13912d74 +788, 0xf6421b8 +789, 0xfcb48812 +790, 0x8f1db50b +791, 0xc1654b87 +792, 0x948b43c2 +793, 0xf503ef77 +794, 0x117d891d +795, 0x5493ffa +796, 0x171313b1 +797, 0xa4b62e1e +798, 0x77454ea6 +799, 0xbea0aff0 +800, 0x13c36389 +801, 0xe3b60bac +802, 0xa176bed3 +803, 0x2863d428 +804, 0xe2314f46 +805, 0xa85cd3d4 +806, 0x7866e57 +807, 0x8f03f5bc +808, 0x239ae +809, 0x46f279fb +810, 0xcca00559 +811, 0xaa07a104 +812, 0x89123d08 +813, 0x2e6856ba +814, 0x43a9780d +815, 0x676cff25 +816, 0x6744b87d +817, 0xee260d4f +818, 0xb98d8b77 +819, 0x9b0ca455 +820, 0x659f6fe +821, 0x28d20d1c +822, 0x601f2657 +823, 0xdec3073e +824, 0x61263863 +825, 0x1a13435a +826, 0x27497d1e +827, 0x17a8458e +828, 0xdddc407d +829, 0x4bb2e8ac +830, 0x16b2aedb +831, 0x77ccd696 +832, 0x9d108fcd +833, 0x25ad233e +834, 0xaa9bc370 +835, 0xa873ab50 +836, 0xaf19c9d9 +837, 0x696e1e6b +838, 0x1fdc4bf4 +839, 0x4c2ebc81 +840, 0xde4929ed +841, 0xf4d0c10c +842, 0xb6595b76 +843, 0x75cbb1b3 +844, 0xbcb6de49 +845, 0xe23157fd +846, 0x5e596078 +847, 0xa69b0d29 +848, 0x2118a41 +849, 0x7088c16 +850, 0xc75e1e1 +851, 0x6a4af2d6 +852, 0xf19c6521 +853, 0xaff7b3b1 +854, 0x615295c7 +855, 0xbda3a8d7 +856, 0x5b5ca72e +857, 0xdad9d80f +858, 0xfa81c084 +859, 0xf4703fa +860, 0x3ca54540 +861, 0xa8961d51 +862, 0x53d1ecc2 +863, 0x808d83b6 +864, 0x68e8c48e +865, 0x89be2039 +866, 0x9088ea11 +867, 0xb8665d12 +868, 0x91272f9 +869, 0x53dddff2 +870, 0xb7a54ab +871, 0xd2b645ca +872, 0x99fb8590 +873, 0x5315c8e +874, 0x2a913806 +875, 0x7f15eb2b +876, 0xa7f1cc5d +877, 0xbb2ee836 +878, 0xd9fafd60 +879, 0x17448d6f +880, 0x999ec436 +881, 0x482ec606 +882, 0x9b403c0e +883, 0x569eb51b +884, 0xb275d1a6 +885, 0xadd29c31 +886, 0xb7ebdb15 +887, 0xdfef3662 +888, 0x51aba6db +889, 0x6d41946d +890, 0x77bf8896 +891, 0xcafa6fab +892, 0x976ab40f +893, 0x49a6d86b +894, 0x56639e55 +895, 0x9945b996 +896, 0x81459b50 +897, 0xbce97542 +898, 0xe397c9c9 +899, 0x247a5955 +900, 0xb72b1573 +901, 0x86306f86 +902, 0x34f65dc5 +903, 0x909360c0 +904, 0xf3f696ef +905, 0xcb9faae5 +906, 0x93daecd9 +907, 0xde1af7af +908, 0x43a1f2d +909, 0x6d75cde5 +910, 0x9e412b6 +911, 0x5673fed +912, 0x16bb511a +913, 0x35ef4cca +914, 0x4e615aca +915, 0x5cdaf47a +916, 0x26676047 +917, 0x8c199325 +918, 0x2adf0cb9 +919, 0x84f2e6fd +920, 0x5e627f64 +921, 0xb7cee354 +922, 0x542ab4a6 +923, 0xe59cd83b +924, 0x89cc3f10 +925, 0x92b0f5f +926, 0xc1328370 +927, 0x8208d9f7 +928, 0x68eb00cf +929, 0xfadd4ac4 +930, 0x2517784f +931, 0x4042b99 +932, 0x75ce0230 +933, 0x97c5a1b4 +934, 0x1a97f709 +935, 0x4c62781e +936, 0xf530a83 +937, 0x75776413 +938, 0x321c7240 +939, 0x6afe4e36 +940, 0xad00a2b4 +941, 0xbc05477d +942, 0xb0911e80 +943, 0x9935b87d +944, 0xd535eec5 +945, 0x149af45e +946, 0x786934b0 +947, 0xbc13cdac +948, 0x208bfa2e +949, 0xcf4b39cc +950, 0x6ac6c172 +951, 0xbfa9a37 +952, 0x42d28db6 +953, 0x2bf1ea63 +954, 0xbed6e677 +955, 0x50325d27 +956, 0xa79d3b8b +957, 0x52448bb1 +958, 0xefaad1bd +959, 0x833a2e54 +960, 0xd9de549a +961, 0x9f59672f +962, 0x9d5f5f16 +963, 0x1c914489 +964, 0xc08fa058 +965, 0xb188698b +966, 0xdc4672b5 +967, 0x594f720e +968, 0x56ed428f +969, 0x9b0898af +970, 0x8a64d3d5 +971, 0x773308d6 +972, 0x84d62098 +973, 0x46da7cf9 +974, 0x1114eae7 +975, 0xf9f2a092 +976, 0x5363a28 +977, 0xf2db7b3a +978, 0x102c71a9 +979, 0xe8e76aaf +980, 0x77a97b3b +981, 0x77b090d +982, 0x1099620e +983, 0xa6daaae6 +984, 0x86ff4713 +985, 0xc0ef85b8 +986, 0xf621d409 +987, 0xfd1561e2 +988, 0x4bcc687d +989, 0x596f760 +990, 0x7c8819f9 +991, 0x8cb865b8 +992, 0xadea115a +993, 0x56609348 +994, 0xb321ac14 +995, 0x1bac7db2 +996, 0x5fe6ee2 +997, 0xe9bfe072 +998, 0x15549e74 +999, 0xad8c191b diff --git a/python/numpy/random/tests/data/pcg64-testset-1.csv b/python/numpy/random/tests/data/pcg64-testset-1.csv new file mode 100644 index 000000000..0c8271fab --- /dev/null +++ b/python/numpy/random/tests/data/pcg64-testset-1.csv @@ -0,0 +1,1001 @@ +seed, 0xdeadbeaf +0, 0x60d24054e17a0698 +1, 0xd5e79d89856e4f12 +2, 0xd254972fe64bd782 +3, 0xf1e3072a53c72571 +4, 0xd7c1d7393d4115c9 +5, 0x77b75928b763e1e2 +6, 0xee6dee05190f7909 +7, 0x15f7b1c51d7fa319 +8, 0x27e44105f26ac2d7 +9, 0xcc0d88b29e5b415 +10, 0xe07b1a90c685e361 +11, 0xd2e430240de95e38 +12, 0x3260bca9a24ca9da +13, 0x9b3cf2e92385adb7 +14, 0x30b5514548271976 +15, 0xa3a1fa16c124faf9 +16, 0xf53e17e918e45bb6 +17, 0x26f19faaeb833bfc +18, 0x95e1d605730cce1b +19, 0xa7b520c5c093c1aa +20, 0x4b68c010c9b106a3 +21, 0x25e19fe91df703f0 +22, 0x898364bb0bf593cb +23, 0x5bd6ab7dbaa125db +24, 0xd1fe47f25152045c +25, 0x3bb11919addf2409 +26, 0x26a8cb7b3f54af8 +27, 0xe6a27ee11200aa24 +28, 0x7cb585ab01e22000 +29, 0x78e60028676d2ef3 +30, 0x5c32535e5a899528 +31, 0x83e8b6f8c4a46fb3 +32, 0xe56ef7668a161246 +33, 0x36dcbc15aeb73055 +34, 0x5ea247f0bd188acb +35, 0x438b547b84601a80 +36, 0x8acda2a1273e9e3d +37, 0x2b05e30a4b40c24c +38, 0xfd87236bd13af032 +39, 0x471df211d8d985ef +40, 0x18e8a5609a793292 +41, 0x46f0951fab6dc4e3 +42, 0x6c199c4e700f6795 +43, 0xf04aa16bfb7d22cb +44, 0xd763d269fbaffc89 +45, 0x9991930cefbe5c2b +46, 0xb2a11b953f824c96 +47, 0x63fd9f52172c44b0 +48, 0x183bdad907b1d848 +49, 0xe17953cddb931c52 +50, 0x515cf16726ec205a +51, 0x88c327605150711a +52, 0xc7090dd79cbc8dc3 +53, 0xcb487cedeb00a350 +54, 0xc8abf254d87b657 +55, 0xd43cc4cbfb493d1a +56, 0x8705452e5d9ed1e +57, 0xcecd11446769cf43 +58, 0xde72156c8d65bc69 +59, 0x796a8f0f47d52ee8 +60, 0xb4c0da443917d6c3 +61, 0xe07ad7568a8e3dc3 +62, 0xc24a8da39ce6dc21 +63, 0x92b21ea80a8556eb +64, 0x572f21e531edf3af +65, 0x9b917ed56bbed198 +66, 0xe65fd8ddc5ab3d7d +67, 0xf55a80a8ec84fa18 +68, 0x18fc22e1a5227b61 +69, 0x72305dc7eeaa79d3 +70, 0x47ce58a36e7592cf +71, 0x14c6374340c0f7cc +72, 0x6f98273d4eb5a2c +73, 0x59a8702c46fe8f8a +74, 0xb67cbd8113cfe57f +75, 0xaa03c5db5f5b7690 +76, 0x3fb0f77ea4568013 +77, 0x756530990398b26e +78, 0x4c1952b2a3a6a343 +79, 0x1da15c5383074582 +80, 0xb405b21c81c274f7 +81, 0xbe664677a16788b +82, 0x9d2e37550bcee656 +83, 0x8b4589f0d9defe02 +84, 0x2935f018ee06a59 +85, 0x3834bf88be97ed11 +86, 0xa610d049cea79b6d +87, 0xd49ffc0d09a59ea9 +88, 0x4073365b76567adf +89, 0x499eefb9bb7513e2 +90, 0x74a743ee6b0138a9 +91, 0x3bf0880f2d947594 +92, 0x555d1c0498600a99 +93, 0x923b32a88ef2ffa4 +94, 0x7325411065fbedea +95, 0x9f4129ff8b79d300 +96, 0xab2b0a9b8a3785dc +97, 0x11734bdfba3a1713 +98, 0xc8333398841ba585 +99, 0xee2409cc234e6742 +100, 0xf6638e700872ecd2 +101, 0x10875300c13cd284 +102, 0x27a9bbed7c15b2d3 +103, 0x3c87f8fef31ce9bd +104, 0x92be263cd0914a95 +105, 0xa7b0f11bc742307e +106, 0x4a56f788cc1c1a3c +107, 0x4a130fa32257a48b +108, 0x5d4d9eda16e90286 +109, 0x7cc2af564844bedc +110, 0x2532867bfe7cda1a +111, 0xb1c504676611fd17 +112, 0xce8e86cfb4189aee +113, 0x99685898980d1970 +114, 0x8c3b67db23bcf1e +115, 0x73e14c93905b135f +116, 0xf0271b64ac2bd4d3 +117, 0xf4beba82f3ec1b2d +118, 0x1cdbf3ee9f210af +119, 0x2e938557c09c3ea6 +120, 0x2d314ccfa6ffd81d +121, 0x31ad47079950ade4 +122, 0x342b27547b900872 +123, 0x171b0e20b9ef1a76 +124, 0xdf10ce6318b03654 +125, 0x1d625df4aa718897 +126, 0x8712715a9f6e02ec +127, 0xb4a072da725bca3b +128, 0x19d346cb7734bd42 +129, 0xfd4281d311cb2958 +130, 0x58274c9519fc8789 +131, 0x4cacf29d885fd544 +132, 0x784b14d1c2523b80 +133, 0x2d25242131bb2373 +134, 0xcd2a5e43a7d9abf9 +135, 0x15eda3806e650ecb +136, 0xdaac5e277d764d96 +137, 0xdc5a5dd59aaa94e0 +138, 0x40d00237a46d5999 +139, 0x6205dd35a692743f +140, 0xbbd8236740361f09 +141, 0x1625c9f4e7288bf9 +142, 0xb74f12df1479e3ce +143, 0xb2d72a51b43d7131 +144, 0xf006a324b3707c83 +145, 0x28e8ab4abe7655b8 +146, 0xfb480093ad7ab55 +147, 0x3f8abd0d6ff8d272 +148, 0xc81a94177ac26bb7 +149, 0x3cdc178307751b14 +150, 0x9de84cc2b10ba025 +151, 0x3f8ab5aefcd046e2 +152, 0x43bdb894e1ee83b2 +153, 0xe288a40f3f06ac9d +154, 0xdab62a7d04b4f30f +155, 0x49f4e20295e1a805 +156, 0x3643764805e0edef +157, 0x9449954618b6b +158, 0x6c87e0d4508e0ce0 +159, 0x3a334be688a9dd7b +160, 0xb35c39228776e499 +161, 0xc4118bfff938490e +162, 0x88cbde3dcbb034b2 +163, 0xf91b287793c417c3 +164, 0x42b15f731a59f5b3 +165, 0xffa27104bbe4814d +166, 0x1b6789d138beccde +167, 0x542c2c1440d0ceb9 +168, 0x367294504d18fa0d +169, 0xf918b60e804a1b58 +170, 0xd390964e33a9d0e3 +171, 0x23bb1be7c4030fe8 +172, 0x9731054d039a8afb +173, 0x1a6205026b9d139b +174, 0x2fa13b318254a07e +175, 0x69571de7d8520626 +176, 0x641a13d7c03332b7 +177, 0x76a6237818f7a441 +178, 0x4e77860d0c660d81 +179, 0x4441448a1c1cbdb2 +180, 0xccd7783a042046e5 +181, 0xf620d8e0805e3200 +182, 0x7de02971367fdd0c +183, 0x539c263c5914cab1 +184, 0x9c3b9ba1a87bbf08 +185, 0x6d95baa34cda215f +186, 0x2db3f83ace0bac5f +187, 0x7f5af1da2dc670a4 +188, 0xfcc098d16c891bfb +189, 0x81a33df1d7a5ab12 +190, 0x767b0f863c8e9882 +191, 0x7a92983830de483d +192, 0xfa7598c37a79ac25 +193, 0xb89b3ca42ce03053 +194, 0x457a542b8efed4f7 +195, 0x571b7737fd0eeda7 +196, 0xa0f59e524485c0a +197, 0x82dca766b7901efd +198, 0xa68243caf6a3bd5d +199, 0x1bac981c6c740e5e +200, 0xbcd51bedf9103e44 +201, 0x4e197efd3ae5a7bf +202, 0x523568efd782268b +203, 0x5ec4ef1191fef09 +204, 0xed751ed5e31c9ab +205, 0x44eac24de03e1b29 +206, 0x9237d57c011d3fb3 +207, 0xa8c6da0f7692f235 +208, 0x9f9eb6bc15d6cac7 +209, 0x34bb8e0c93427aad +210, 0x115febd738eaac4a +211, 0xa439991ed139d27a +212, 0x45c7c2633d8710a2 +213, 0x48b7475f3405a3ce +214, 0x80158497c77bd00b +215, 0x935c316a5b1657cb +216, 0x59c5d54440e9695e +217, 0x337c78c5b3d0ede2 +218, 0x8c46bb956b93790d +219, 0xbf1dd03e471d71c5 +220, 0x2d375e90a4bef583 +221, 0xd0365428331b3790 +222, 0xfcd3969ac827ecd4 +223, 0x392fb6c580498410 +224, 0x6d6db4ceab5ea6c0 +225, 0x9bf84f1972e24786 +226, 0x798dfd820959dcc5 +227, 0x2e425095e65e8bfb +228, 0x8c1aa11536b1c9c3 +229, 0xd28e2ef9b12f6f74 +230, 0x86583bc98c8f78d2 +231, 0x489877530e3f93e7 +232, 0xb1d9430631104a15 +233, 0x1814f6098e6263bd +234, 0x8e2658a4e0d4cd53 +235, 0x5afe20e2531cdb2a +236, 0x30d02f7c4755c9bf +237, 0xe1e217cda16ed2d2 +238, 0xccb4913a42e3b791 +239, 0xfff21363ac183226 +240, 0xe788690bbda147a7 +241, 0x76905cf5917bfc6a +242, 0x2a8fa58f7916f52c +243, 0xf903c0cc0357815a +244, 0x15d20f243a4998d2 +245, 0x5b7decee5a86ea44 +246, 0x114f7fc421211185 +247, 0x328eb21715764c50 +248, 0xaffaa3f45c0678fd +249, 0x2579e6ef50378393 +250, 0x7610ab7743c19795 +251, 0xf9923d2bd101b197 +252, 0x57e42e7a62ba7e53 +253, 0x9f1dc217b4f02901 +254, 0x88a9ebd86509b234 +255, 0x867fc926aecc8591 +256, 0xaf22c1bfef04c718 +257, 0x39f701f0313f4288 +258, 0x6171ad397e6faab2 +259, 0x239bb5b9abdec4fc +260, 0xd9a591e25dd01c6e +261, 0x826dc4a75b628e49 +262, 0xf112b152c408f47 +263, 0x6843a06110f86c0 +264, 0x965e56a7185c1332 +265, 0x8d84492edbc71710 +266, 0xeee8ec111cfd1319 +267, 0xf2858e94ad98e458 +268, 0xbc9589fdf5f3a97e +269, 0xaf0ceef3bc375130 +270, 0x48f4aaf13fa75c1e +271, 0x111e9db47bee758f +272, 0xea3171df130164ba +273, 0x2a7bbe30bf827ab6 +274, 0xc516c3fdbf758c35 +275, 0xec55097754b04be5 +276, 0x374a997d52b6d3e6 +277, 0x487df5456085ffbc +278, 0x528883b84df8eafe +279, 0x805f77ab5ba26f86 +280, 0x8eb81477dc04f213 +281, 0x471ea08ec6794d72 +282, 0x69d3667ecc4d2176 +283, 0x98b7b6e295548a66 +284, 0x3877713c173f8f2 +285, 0xa00542570d0e8de3 +286, 0xf534b1bfa4033e50 +287, 0x7e1fedeac8bf6b26 +288, 0x8043f37c89628af4 +289, 0x1dd7039ec295e86d +290, 0xce9c05b763a40cc4 +291, 0x246926481e61028f +292, 0xb7cb0f1babf5893b +293, 0xefe6b777f37fc63e +294, 0xebbcabb4cb35cdcb +295, 0x39fa63cd711eeea9 +296, 0xad5d3ba7aaf30c8d +297, 0x8e9e78fe46021990 +298, 0xc7eaef6e7d5a3c62 +299, 0xefccdd5495d3f386 +300, 0x2179557ee8cfc76a +301, 0x88a77f621f0885ce +302, 0xafda62674543d90c +303, 0xb8e6fbe2e13e56c0 +304, 0x8bfbbe26a14f9b1a +305, 0x1404f59f5851f8c3 +306, 0x1140c53a0489566d +307, 0x3edf2d138b5c3f1d +308, 0x75d6bb275d817dc +309, 0x8e660ae27107664e +310, 0x7a8021038ee303e1 +311, 0x2042ef5eefa9079f +312, 0xe3e7b90bbf6d457a +313, 0xf3f819d2bb9405b +314, 0x522e42155cae0c10 +315, 0xf5bfbb975b40e233 +316, 0x2cf82b614dd95cfa +317, 0x183ef4a96bc40e55 +318, 0x9f6e351c5ba4e752 +319, 0x37c1110683c90846 +320, 0x1d89b7a996d8a977 +321, 0x18a444f77c7cb4d9 +322, 0xd0a8a971b78dc893 +323, 0x860232fb9e6543f1 +324, 0x60b6097f51002555 +325, 0xca1e5214123e3894 +326, 0xe03fe695c95f99bb +327, 0x2c7c6779d5f03622 +328, 0xafeeee42f63055d1 +329, 0x670dde905515936a +330, 0x9a922f42b59fb094 +331, 0xddb5ff49af5a651a +332, 0xe61b04c9e58ebbf8 +333, 0x4e459dcf272e7fc4 +334, 0xd549e92c16adceeb +335, 0x7a17dba1299d4a9c +336, 0x825d756109f2b585 +337, 0xba142e61a9cb203e +338, 0xc2a19f00e9c04a30 +339, 0x2d0f8140d23d0652 +340, 0x8b866d4d4d6caaf4 +341, 0x4f11d90dd91f8217 +342, 0xf6efc37373b9e0d +343, 0x248493d6cd6a4736 +344, 0xd12b6ae74a951a3e +345, 0x56e34722070b70a7 +346, 0x22d3f201cc9fa0eb +347, 0xbfdcc320008291b7 +348, 0x1a7a6922e9204fbd +349, 0x831421e0c4945ae4 +350, 0x66316feddddf0e11 +351, 0xa8c86a1517456554 +352, 0x14a9049ad989e335 +353, 0x837022259f141ecd +354, 0xcb71793a06c261f7 +355, 0x4aeefc07ebe09a79 +356, 0x8982f15aa3b6594b +357, 0x67bccfa7ed9b0d5b +358, 0xb377463b523e9dec +359, 0x53d3d594870fecb7 +360, 0xa5274b1caec5a60a +361, 0xd6316d0cb643db39 +362, 0xabc1a9b536de88ce +363, 0xed2fdb1383d2a077 +364, 0x12319c6feb97221b +365, 0x7e0f6cd40ef47403 +366, 0x86135c84fe26dbf8 +367, 0xc96622d3fbbee19b +368, 0xe3989d8d8511573f +369, 0x42cc365554d1fdc7 +370, 0x4c1a1eb8bbce8b4f +371, 0xfc4e30e7ef2034c1 +372, 0xc490444317a91e76 +373, 0x7ccdf469ff5dc81c +374, 0xf5a0da4110cc09d7 +375, 0x505227baf34c0fb5 +376, 0xbe58737e8a35cc88 +377, 0xd449bee91b3e8c41 +378, 0x3e590e23299d0e6 +379, 0x291a7d9e0a64caf7 +380, 0xdc6fafbdfebd2293 +381, 0x8223f1e259fe8a65 +382, 0x6186fbc9efd9e3df +383, 0xfda39b07e4007ffb +384, 0xfc19aea98574dc02 +385, 0xd0e10d354fcacd8c +386, 0xc9619916544a55a5 +387, 0xd454d50a8c8558cd +388, 0xcd94a246712d91e +389, 0x76a771f5d1231cce +390, 0xdd20cb2b7b370ee5 +391, 0xa6f4f50feca57c49 +392, 0x78c8fb431f17ab9c +393, 0x1b692b79a59b43cc +394, 0x4c45045d287da7e6 +395, 0x522132e18bf43928 +396, 0x25c458983138b41c +397, 0x2a1fb426ef229796 +398, 0x74dc324c74e5dd3d +399, 0x6df75e3eb6eb5374 +400, 0xb63f2f4f9ca25b61 +401, 0xac72286112ee54d6 +402, 0x5a966f3d0a6863c4 +403, 0x8d7046bc64a46fc2 +404, 0xa7b740fd6e3087eb +405, 0xcdbcbe0340cfcdf5 +406, 0xcb632613bf312b65 +407, 0xa91b3f2c2aac238b +408, 0xa06deb3f5ae555a3 +409, 0x29d72e1f8db69 +410, 0x2d004bae09728ea6 +411, 0xc6eee5dce0736cc1 +412, 0xa7493145500ff60f +413, 0xc4d68c4aa18ab93c +414, 0x8210c29e79d48d7f +415, 0xd0999d7889ecbef6 +416, 0x6e3bd61e66e93566 +417, 0xe6cc13d47d7d7b1f +418, 0x3d6f181f42e03979 +419, 0xbed4e14fd867604a +420, 0xbe511c84067bd86d +421, 0x49a876d89e697d38 +422, 0xc04c3dde8f889c98 +423, 0xaf293eeab0f53e3f +424, 0x9f6291dd65732cd6 +425, 0xd7811ac01de78c01 +426, 0xe385cf0261d50ec2 +427, 0x5a64134b3542bbf +428, 0xf9d1302bc6f13a68 +429, 0x5d2aabbea37d8c31 +430, 0xd9842e99a5192970 +431, 0x713eadc4cd30e837 +432, 0xb7b002fc72abb413 +433, 0x276cfeea526af1cf +434, 0x8519fe79b633a0ce +435, 0x2f0e87363705a3e2 +436, 0x9adbac0be3c371e7 +437, 0xf3f44ba899a6173c +438, 0x782d6c29618fde2b +439, 0x7f61062acec408f +440, 0x6e79cd836359258f +441, 0x5c8e9b138df5785a +442, 0xa54359c9f39a9a84 +443, 0xeec3f033135084b0 +444, 0x883ee717787a535c +445, 0x9a2422b513a73b00 +446, 0x2dd4beddcdd64a58 +447, 0x90c8a13202239c7b +448, 0x85b352ab759646d9 +449, 0x139f5cb2e46c53aa +450, 0xe1d3ba6c721c66d1 +451, 0xaa66e0edc4b60a98 +452, 0x3521275c75be29b6 +453, 0x490a5190b3edfa5d +454, 0xd2abcdd2ccb2f14e +455, 0x9d9be8bef4a5857d +456, 0xde19676f13ef7755 +457, 0xdac2fee2e42615f3 +458, 0xf4239801cb02f2ab +459, 0xaa8bf923ed91875c +460, 0x61d18a1940e4c7c0 +461, 0x1eb6aa3d5f077a6d +462, 0xee7374c063bf29d8 +463, 0x2f0a59e34d76268d +464, 0xc92e80e17d1eb3e9 +465, 0xafd05b3ec3d2ca72 +466, 0x28a61ad8d6c497b8 +467, 0xa7094d6834ad7d47 +468, 0x57d80ea9eccbb4f +469, 0xb047e0fee6cdaf16 +470, 0x44f41b5eb48c00bb +471, 0xd6dc8e1eb9c8c9ba +472, 0x47adfd2c638c7849 +473, 0x365d63db7d526c68 +474, 0xc21cda439016135d +475, 0x14d10c3f0f98863c +476, 0xa93e56f74e037602 +477, 0x3b4e9c8915bdc9 +478, 0xb46f5ae155e54aa2 +479, 0x8e470d21ce1943e1 +480, 0x60b96301b5ba2e8d +481, 0x1b473a41d381f9ff +482, 0xabcf5a8e3269e73f +483, 0xd410f6e94fb21fa1 +484, 0x65d1a47eebf87e5e +485, 0x48eaa201c61cb843 +486, 0x212c1abc2499bfc5 +487, 0x4255ad8377d2d8d +488, 0x44caeef472010612 +489, 0xffae764524f572f2 +490, 0x78d374d20c9ee550 +491, 0x6e003206c0511cee +492, 0x7998a159145bfb82 +493, 0x921239650bda1d4d +494, 0xae05025509bcfdc5 +495, 0xc6430c980be407b4 +496, 0x78524f1744b153f1 +497, 0x84089e6f468181fe +498, 0x8d0d21d7dfb6c254 +499, 0x90bad90502a33603 +500, 0x3072a403cbd16315 +501, 0xdfadddf3f1c040c2 +502, 0x22f0b0639d9ff975 +503, 0xb49e48a4cad0765b +504, 0x95a0a04f8239709d +505, 0x56e147a24a4c481f +506, 0xacf16ef61dea4c7e +507, 0x424040afd2700de6 +508, 0xc67e8096a3c717a9 +509, 0x39f164181dd0a399 +510, 0x2449cedc1d62198c +511, 0x7a53df11a1f1a61c +512, 0x5596f1d4a3badae3 +513, 0x38ed4c822072b3d0 +514, 0xf07ef346b3fd730a +515, 0xfd349c35c3ed51fd +516, 0x2f15c9c7890f8f32 +517, 0x3b470df52b173c29 +518, 0xd31bfc8981281af7 +519, 0xbbcc9bdf561215bb +520, 0x5782fffea326574f +521, 0xb0ebdcfcc5e03290 +522, 0x7fd89d93d2b3fbef +523, 0x280ea1865d9ba2 +524, 0xe726959845b2c100 +525, 0xd0361f032cd7dbb1 +526, 0x3c65ec2028b81a22 +527, 0x5221e9b2188920bf +528, 0xeb5ab27c4125ec20 +529, 0x80a32dd48b54f0a4 +530, 0x369b5ced1012bebb +531, 0x582d35d76530bc6f +532, 0x7b50dc9b48e1e37d +533, 0x37fdfe8bbacf8dad +534, 0x7a0cb7e6e93840ea +535, 0xa1132c870be0b2ce +536, 0x9d8ac2c68267cd1a +537, 0x470969b647fa7df4 +538, 0xabcb7d8adf7e2d24 +539, 0xacdebec9bdf9eb1c +540, 0xe30f4cbf7eb6a59 +541, 0x746673836c4df41d +542, 0x75120a6b647bb326 +543, 0x2f4eab556c3f6878 +544, 0xd84651ab05405b7a +545, 0x9e695808b9622284 +546, 0xc93b71e56aa6e1a5 +547, 0x2be7f3be4a7b7050 +548, 0x6497e910b6733241 +549, 0xcf7050dfd08076fc +550, 0x4e3cc156eca183f7 +551, 0xf801a33d9326c265 +552, 0x6aa293c8a47d40e6 +553, 0x28c429755faa6230 +554, 0x82b818651f54e7bb +555, 0xa84d726d7acdbead +556, 0x5cfa535d5774965d +557, 0x4a34b7b1cb48d53 +558, 0x86a7b5bce426de84 +559, 0xfcd2307cecdb7318 +560, 0x16dbaaa71181a038 +561, 0x88e7e8cd261c2547 +562, 0x3c09ba6d1d5ea913 +563, 0x5dd3d643734ee5b6 +564, 0x326d725fe8cbb33 +565, 0x7bcca9ca2da8e784 +566, 0x482dcf6b11d7f9a4 +567, 0x1291b605b4cd3e04 +568, 0x6988181b50e2f4a8 +569, 0x649e3c37131fc292 +570, 0x4eeb67b9e21eba54 +571, 0xc051d39073dec45f +572, 0xc99c52e110270d67 +573, 0xcb813d5d77868add +574, 0x423a5f13573e7ac0 +575, 0x231ac4cc4fe73616 +576, 0x4c22b888a6e600ea +577, 0x8059a6dc7c9e25c6 +578, 0x49f498a5b8ad22de +579, 0xf1e812cc6d1826c8 +580, 0xbbaf60abe8b11e00 +581, 0x1d31d7f4d8be9a6a +582, 0xfeadce70a9a10c14 +583, 0xb47c635bc136996a +584, 0xd88e694c8da030cb +585, 0xc41bbe132aff1364 +586, 0x34249ab18a4b0800 +587, 0xf14b5c825aa736cc +588, 0x2710be6b08df78e +589, 0x2ab56bcc9bf9e740 +590, 0x9b7f6e591b5f648 +591, 0xfb665c3772f34135 +592, 0x628a0a5d2db5d8d5 +593, 0xb3e3f251e61b5259 +594, 0x82310ae33faf1b23 +595, 0x24af8723a65cbd0b +596, 0x671c93282fc4ad97 +597, 0x6cabeaac77270cad +598, 0xef4643fe38b02b7f +599, 0x7b011549d1ac6653 +600, 0xe2af87b9fccfe89 +601, 0x36b71ad67197ac8a +602, 0xdbba55d06f2fd93b +603, 0xf571dbd764b7f7e5 +604, 0x38ea402501cdbd45 +605, 0xb8ab5b5b1bab2913 +606, 0xfab973c4d45f32bd +607, 0x9364f1717c2636b9 +608, 0xfad00f4d983e00fe +609, 0xc90c532a11aef75a +610, 0x64a6eda96e44783c +611, 0x35891f2eb84520be +612, 0x28d216080caed43 +613, 0x129629cc5bd206f6 +614, 0x22c3d39822cbb4b3 +615, 0xf1efbf4cce1eaa2b +616, 0x7070cba12524ed08 +617, 0xa7ed0be9deabf20d +618, 0x8ddb4cd6b454f76b +619, 0xb82814b1db37b63 +620, 0x418e83b36de01876 +621, 0x9a538c7f39c6413 +622, 0xee0cd7abf8a2ecb9 +623, 0xa9222b07e95590f3 +624, 0x6296a415d68341e6 +625, 0x981e0a5a8f811929 +626, 0x4bb372d3b0de283d +627, 0xa9805b5971866e16 +628, 0xaf3b5f5183497657 +629, 0x2152b0fd23c3d9f +630, 0xb730c325b7173180 +631, 0x1e3439d231608c19 +632, 0x1c5ba6031379823c +633, 0x87f5d12d6d365cbc +634, 0xd3bc7f29614bc594 +635, 0x63102214bb391268 +636, 0x482bbd5bba648a44 +637, 0x6a23604690759dc4 +638, 0x4091d41408d3a39e +639, 0x7cd017f922101b15 +640, 0x7ce9004ac5f9231 +641, 0x978bc3d8ec7f7fdf +642, 0x5bd0c4d780580c11 +643, 0x4313c068bb040153 +644, 0x3ab7dab7bc38bf80 +645, 0x3aaf9c187728deea +646, 0x6633a4ce8efb88d9 +647, 0x7263b089878f00fc +648, 0xd0d767e96fe00eb8 +649, 0x184a7c0c01908028 +650, 0x1ebdf41e6f76e186 +651, 0xeb740ee1d0402083 +652, 0xfccf4974edb1c339 +653, 0x16e2707aa28306d +654, 0x1684f0bdb018c3a5 +655, 0x887b6b67b88aa862 +656, 0x923d7810a2bea33a +657, 0x56b3560babef5d6b +658, 0xb39a14614c54b8c6 +659, 0x33e4dc545a509fc8 +660, 0x26e21f84142da9b +661, 0xdd07598125756855 +662, 0x572d49a071d7ae0a +663, 0xba3c7e3baea28760 +664, 0x7ecdb2d714db4b61 +665, 0x1c62b4920e1b2fe2 +666, 0x71bfafb70092834a +667, 0xd710a4228f60d56a +668, 0xeb16277d4ce4e95b +669, 0x968168c90b16d3a1 +670, 0xac3439dfe8ad0062 +671, 0x5a8226f9dd5876ad +672, 0xb843affe917291b0 +673, 0xd76d1e67051f8259 +674, 0xb73a6638cce8ccde +675, 0xa0e6afd3c7295f9 +676, 0xff8857b4bbb5f4c6 +677, 0x99becf78938f0426 +678, 0xfcd17edc1e70f004 +679, 0x6223b8b23f2f50 +680, 0xca875f3e84587b4c +681, 0x7d1e81e589f87fb9 +682, 0x9eb621586aa826fc +683, 0xf46fb9ef5b9c2086 +684, 0x2882c9b7092725f3 +685, 0x5493f099bbedcd02 +686, 0x90c1ec979ffa811d +687, 0x963f765025bcc53 +688, 0x56194e3ec3d9d4e9 +689, 0x7ec4720954cac1f0 +690, 0xfab3145171af7f90 +691, 0x52a0b4e41a13b593 +692, 0x740e2d4d5909d126 +693, 0x98f5339c09c94a28 +694, 0x1700e462fe8dec76 +695, 0x3dbffc2aa4695ac3 +696, 0x5763edacabdfe2a1 +697, 0x7b5b623ce49ef21d +698, 0x30addc66f49860df +699, 0xcc7511a6c31bceda +700, 0x1b25b61ca75db43b +701, 0x416bc4c298e59046 +702, 0x4cd11fe2d74e4649 +703, 0xb54458a9229fc978 +704, 0x8c21a27882b6ca35 +705, 0x57887c8b5e01639b +706, 0xf4e893da996680bb +707, 0x8d601297702c9c0d +708, 0x2a27904a30aa53af +709, 0x497800f6917ea8d0 +710, 0xe96db3340ada9c00 +711, 0xcc23166f14c010ee +712, 0x782690d78fa65ec9 +713, 0xf3e00d74a0878eda +714, 0xa7cbb683decca0a3 +715, 0xdd2e038e683a94aa +716, 0xe2096ff8da896ca5 +717, 0xf7c83400afdabe11 +718, 0x395b8c6f6a4086a4 +719, 0x4a164ec05bee71d4 +720, 0xe87aa5d1ca0462fe +721, 0x8dbc5aed6dff9ceb +722, 0x12120d1e9552707b +723, 0x877dca6889b3e6cd +724, 0xbd65605c01e900fb +725, 0xbd6b82c4157c3115 +726, 0x8b60282732caf78a +727, 0x279fcf5e5de9e57f +728, 0x34b34ebfb6a37eae +729, 0xd258cc1a14e03b7b +730, 0x9a528ba3db4a13fb +731, 0xffa0aea59d057746 +732, 0x27fa7f456cd37c4e +733, 0xe1117a57a6fdce63 +734, 0xdc8fc903970a1551 +735, 0x492dd104f30faf29 +736, 0x110def0959e5652b +737, 0x7f8d1997636fdd15 +738, 0xfb77b05e538a9b59 +739, 0x2e41fa35b4b01fc6 +740, 0xbc35ae69a3374085 +741, 0x192c2a681c2d9b4b +742, 0x12566b8866c189d6 +743, 0x9d88ea785c5185c8 +744, 0x30a621ad5f983c4 +745, 0x8b875efe1206f587 +746, 0x224d25c3af6e3423 +747, 0x7503e976a1ac7bcc +748, 0x3c98aa869e823859 +749, 0x3d8835304b646892 +750, 0xf6353330ff970bc2 +751, 0x8a673f5e2edb8acb +752, 0xf2fdcc53493838b9 +753, 0x85ddcd526236af16 +754, 0x60afb99814c676c5 +755, 0x32a1c2749e281ca8 +756, 0x2367a92ae3bee9ca +757, 0x219fe082703743cc +758, 0x34d8b74dc85182a9 +759, 0xdd04164c72db23f +760, 0xe293ac28fe2671a9 +761, 0x9ca7d169cbda6f45 +762, 0x705c47972b4240ed +763, 0xc10eda9eeb536209 +764, 0xc36ddacd0c94e85d +765, 0x8eb592c27e8cd0d2 +766, 0x3e815991c76e7cc4 +767, 0xac9cfce31acf7580 +768, 0xbf7a4cb31c7aee94 +769, 0x663077444aceecf6 +770, 0xe7f614ff386eb568 +771, 0x79d7a229c66912c0 +772, 0x161ed4311f63e1f3 +773, 0x308a5faeb9982ede +774, 0x7b38ddb9b7efd10 +775, 0x1e103a2589b27ecf +776, 0x67b02baf4259f27e +777, 0x868921c115ea2eee +778, 0x959791912200f71e +779, 0x4dd55f36dec10557 +780, 0xe3464d90080cb99d +781, 0xfb2d4f6accce652f +782, 0x109900a9257d77ba +783, 0x3c4bda8e2c83684c +784, 0xc9ae040fb7f868c6 +785, 0x78098ffe994f4905 +786, 0x7a94c33eca77f0b4 +787, 0xbe6a2a95e9b5c0e8 +788, 0x797d39cf963f4837 +789, 0x8d2e249e4425d06d +790, 0x6ae2c30cd5da06f4 +791, 0x904489de762b179f +792, 0x84713e2dfb591e3b +793, 0x6405a40da3f6f51b +794, 0x976b560d663a2df1 +795, 0xed1c544784ba1e22 +796, 0xca658e995ed9344c +797, 0x2b1c6b8e4db49025 +798, 0x52b1513da528bad +799, 0x3c63406d256d9968 +800, 0x63a31ca3d423f85e +801, 0xb05a81f55789a720 +802, 0xd04412992c476c8e +803, 0x828ec2f77a150a3d +804, 0xee50926671bb60c6 +805, 0x5aa70f93e2df61b4 +806, 0x94d60fa2e8655858 +807, 0x3f5e5b770703cc7d +808, 0xc62dfb2688ca7784 +809, 0xaaf02e1e8ba89fe4 +810, 0x4ab74e0d8c047405 +811, 0x31ee04fbac6fcead +812, 0x1203b78b8228f5af +813, 0x412a70836f9aa71a +814, 0xab51cf98c03f1819 +815, 0x783a3ce9ce137f65 +816, 0x8897085b0a072cf2 +817, 0x685dd9bde8798cb +818, 0x9a1fac7b1705e2c1 +819, 0xf3e9ff98de48e9cb +820, 0x5c2d3eb1a1fbe917 +821, 0x3bda718b6b54d82e +822, 0x29f2dd18f22f0821 +823, 0xb992da1572ac3597 +824, 0xacb69e7aa14b34f7 +825, 0xcd36e3ad14f088d1 +826, 0x6aaacc96a1ec55e8 +827, 0xf8ac593f154fe68f +828, 0x18fc9cbff012339f +829, 0x2f3368ccbbb99899 +830, 0x7cec7d17f37031f7 +831, 0x96e86bfaadcb8fc2 +832, 0x74f9e7ee3d42a752 +833, 0xbd52f6c7d9b0733 +834, 0xa48e6d96bb6ce1c9 +835, 0xaefa058254b82133 +836, 0xb7a19edfd0929107 +837, 0x6160ce9125b26e26 +838, 0x6537dbbde1d2aed +839, 0xc567f9a6bec52dde +840, 0xca29fd3f22443342 +841, 0x7732aa6db6a1c476 +842, 0x8f5a4d7df6b11b3 +843, 0x76649262aa7e31e1 +844, 0x60a13eb125fbc829 +845, 0xc81e4d123dd21ac1 +846, 0x643cbb09bb72f86b +847, 0xf971a98fb25555a6 +848, 0xffa2774c66692d56 +849, 0xcb33c16c50b13ea9 +850, 0xfabf388dffda0e9b +851, 0x55d41ec12ca24b9f +852, 0x91cf693a3467e807 +853, 0x6be2c00b2c31d6dd +854, 0xc5cf513b5251ae28 +855, 0xffc4384212403dec +856, 0x45d4e1865255a69d +857, 0xfb1dcf956972086a +858, 0xcae946a55c4c55b8 +859, 0x7351ac7720e385c1 +860, 0x19aa8ffd86240254 +861, 0x8f515ae78f4040da +862, 0x1e1ed2058de50fce +863, 0x22d006dcdb374243 +864, 0x6e0f0ede7c95b441 +865, 0x70e8aa81b53b4d25 +866, 0x998f309ea41e3814 +867, 0x89ed6598fb66f390 +868, 0xb5997dc3278060df +869, 0xb2a021eac4f7e046 +870, 0x3705b60aa2fd0768 +871, 0xfc415079ab9200e +872, 0xf2871ac4cf45ecc9 +873, 0x24bf758d2246175f +874, 0xac503dd6f8141b3 +875, 0x4e879d12d9f03b3 +876, 0x82034af8cf93b644 +877, 0x59899dd7e478a6c7 +878, 0xae90addb6eb11507 +879, 0x1524ddf76730cdef +880, 0x6fd4afd5456b1c9d +881, 0xcddb9221ea001cbc +882, 0x64ff400bbf2e8604 +883, 0x6dda10549b06ed9b +884, 0xed2c85104c261527 +885, 0xc7e09217d29929a8 +886, 0x56284df611a428b1 +887, 0x1a7608289c0a61 +888, 0x7cb63db15166ff66 +889, 0xc6013c76fcdcdc72 +890, 0x8e5dd566c7a5a676 +891, 0x5a8e8565f40d133b +892, 0xe465973455848c44 +893, 0xf92eecbfe0f3c2c0 +894, 0x7d64155d4dcc5cac +895, 0xf17595706f988dad +896, 0xd590a001a6a19c5c +897, 0x82a164475758db3d +898, 0x6b144993ea1bbe32 +899, 0x22a81a7a6e453779 +900, 0x8e8c298df1a68a73 +901, 0x78056afd6d936b4c +902, 0xaaceef0325faaf62 +903, 0xe78bb7699f82266f +904, 0x523a2d283c5a5166 +905, 0x7076d87088f6c6db +906, 0x6087dd54cff5aeb2 +907, 0x7ef82e62cb851680 +908, 0x4e8bcc8ed84d03d8 +909, 0xd12fa0361df3cfd3 +910, 0xefb89c79f8127297 +911, 0xa9af4e2fbce0b1f8 +912, 0x462136685b70331e +913, 0xe9e74c93da699b77 +914, 0x9ec69215fb11d0c3 +915, 0xc10f229939e3e111 +916, 0x3f67fa79e41d2374 +917, 0xd5e7c1a9a7185162 +918, 0xa1dcce9ec91492fe +919, 0xd4e61f0727b5d21b +920, 0xdf6cdce46551800a +921, 0xa3f256ce906982d3 +922, 0x209742a6b9ffc27 +923, 0x4006c96958526a57 +924, 0x9606aebc75a1967e +925, 0x91b9f42fb64189df +926, 0xb27119defcb938bc +927, 0x128cc7a84ba05597 +928, 0x6c3df613c62d0d30 +929, 0x3adf69d48b629ec7 +930, 0xda42ee493837b128 +931, 0xb8e770480e760bb5 +932, 0x9feb55d57c99c626 +933, 0x29812d80afdae3ed +934, 0xae4222a64276a8c7 +935, 0xe3897212a5b4ed53 +936, 0x98bedfd13886e669 +937, 0xca858675d7fc0d0e +938, 0x28a359f665354234 +939, 0xfac2ccabe4128b35 +940, 0x61373cc5d11ca180 +941, 0x7007605a4512a87a +942, 0xe71f8eade7b30b3d +943, 0x3a9e77f9b99bd04d +944, 0x70d3e42488098866 +945, 0xd30fc159c7cd4d99 +946, 0xe4d3f6600d2e2d6f +947, 0x1088324dfa955c25 +948, 0x516437acd4764623 +949, 0x38a31abe50d0aa03 +950, 0x72e1054e9dc02ba +951, 0xe6971dd664d1a2e2 +952, 0xf6698cb095d3b702 +953, 0xad995a5a8c19bd92 +954, 0x34e53c6936f656e6 +955, 0x10de240bc07c757a +956, 0x3e3b9a6861c2bd1c +957, 0x9c0b0b97d3712ec9 +958, 0xabf1505a75043aed +959, 0xbdf93d3de3274179 +960, 0x28fa5904d3f62c28 +961, 0xc3b97b39ef6c5133 +962, 0xf2b2219225b8679d +963, 0x8be4ec0f930c0aaa +964, 0x47de5a56aa590643 +965, 0xb6f871b304129856 +966, 0x80a61c06233ab0f9 +967, 0x3ce6c3af8101b055 +968, 0x85b911708274e7d1 +969, 0x4cab65d093a488b7 +970, 0xaabc4b10661fe28e +971, 0x35b16dea64474a68 +972, 0x1d6eb5b093361223 +973, 0xc39107b92f0fe1fb +974, 0x1d09e048073c4841 +975, 0xc6a02f43aca8cb2f +976, 0xaf6613dbc7da909c +977, 0x5ac2a40c230aa756 +978, 0x33afb5e7c01c39a5 +979, 0xc7b0b20ea8b7d0ef +980, 0xdf7306c8ccb1bbea +981, 0x9710efc0c188b2a0 +982, 0xd6303eadb72c873e +983, 0xa38ca609b118f35a +984, 0x8390613065c6e535 +985, 0xdf9a0106757e431f +986, 0x8bcf77039788e143 +987, 0x6026806a986b378e +988, 0x482ff3b1394cb1dc +989, 0x2a27d0ccac9ede9c +990, 0x53c77f26e271b3ab +991, 0x1ba004cf276cf3f +992, 0xc135b0517dc81f7c +993, 0x5d137838db75e442 +994, 0x3fe505f93d1dbdd7 +995, 0x351654ae7d598294 +996, 0x173f8d182af9d84d +997, 0xf97dfcd164fe11c5 +998, 0xcda423e5ad43b290 +999, 0xa5cb380b8de10d10 diff --git a/python/numpy/random/tests/data/pcg64-testset-2.csv b/python/numpy/random/tests/data/pcg64-testset-2.csv new file mode 100644 index 000000000..7c13e3172 --- /dev/null +++ b/python/numpy/random/tests/data/pcg64-testset-2.csv @@ -0,0 +1,1001 @@ +seed, 0x0 +0, 0xa30febcfd9c2825f +1, 0x4510bdf882d9d721 +2, 0xa7d3da94ecde8b8 +3, 0x43b27b61342f01d +4, 0xd0327a782cde513b +5, 0xe9aa5979a6401c4e +6, 0x9b4c7b7180edb27f +7, 0xbac0495ff8829a45 +8, 0x8b2b01e7a1dc7fbf +9, 0xef60e8078f56bfed +10, 0xd0dbc74d4700374c +11, 0xb37868abbe90b0 +12, 0xdb7ed8bf64e6f5f0 +13, 0x89910738de7951f +14, 0xbacab307c3cfd379 +15, 0x2cf7c449d8b927a6 +16, 0xdcf94b3a16db7f0e +17, 0x8a9d33d905a8792e +18, 0x4cb9eb2014951238 +19, 0x6c353acf7b26d6f1 +20, 0x73ff53d673aa30c +21, 0x1fd10760015eca68 +22, 0xabae0aa9021eeba8 +23, 0xa5ae363a868ee2bb +24, 0x9d89e0f041de6631 +25, 0x6238b133c3991a65 +26, 0xff49267d75fef51a +27, 0xfb180656ce13c53f +28, 0xaf7fadf36128712d +29, 0xa6847fc6f339c63e +30, 0xb03e0b80d71ea5bc +31, 0x63905abcb43969af +32, 0x2295af3ee00a3bba +33, 0xb8b375b994330415 +34, 0x867d9ef1d8716a3b +35, 0x4f6c02f5601b4e18 +36, 0x7c5fb4c16c470d18 +37, 0xe3b57986b804b343 +38, 0xef1d79d212aca692 +39, 0x5b98774c8806209c +40, 0x924fc76bac38a5d1 +41, 0x5266084c412ddeed +42, 0x98240bf9b831d6a3 +43, 0x5681599e81219442 +44, 0x6441248fc2ba92bc +45, 0xe3e9051a540349ea +46, 0x3a2700034390baa3 +47, 0x9f893155b6d402bc +48, 0x158207910c6d8aef +49, 0xd5282ab7608c2cbc +50, 0xc97f4651669dee4f +51, 0x3d4750d95103ed60 +52, 0xe0614542caac1f04 +53, 0xefe5092144cfc6c +54, 0x560bc486abd7e9ae +55, 0x2678b71392daa4b8 +56, 0x734970d3dc2ba416 +57, 0xcbdbe849e51e4aaf +58, 0x3b0b5e28b491556c +59, 0xd51449ac45abd88 +60, 0x6790b59991f1b7ab +61, 0x32d1c039ff2415bc +62, 0x173b9772f24f72e0 +63, 0x9490a9ca9f883b1b +64, 0x4c775989e6214222 +65, 0xac07db37e6ee6114 +66, 0x331371b2e3f10aee +67, 0xf12e5326c21c28e4 +68, 0x5d77dc280c70d614 +69, 0x1b01bd17a2f281ec +70, 0xa10d3b5882938487 +71, 0xed5a0033c394ae8f +72, 0x70bc8ea568ea44b4 +73, 0xf4600ae77965e730 +74, 0x7ff92c0b321ce233 +75, 0x6cdbc87d0cc1d670 +76, 0x9ec64f0cf2000eb1 +77, 0xfebea50259800f68 +78, 0xf2edf9019a8fd343 +79, 0x75c584ac042e5468 +80, 0xc1fa8481d5bf9a1d +81, 0x7f57180168514ac2 +82, 0x878100716b94f81e +83, 0xc929406e3af17fd2 +84, 0x6a26e2c013e4bf4d +85, 0xbc071d8848280955 +86, 0xb60d75abbfd1bdac +87, 0xee9b76afeca9fa69 +88, 0x1d6c399d2f452810 +89, 0xbaa0bc1621e25c83 +90, 0xed6ba792f8671ba5 +91, 0xf7ca02c2ab11d8d7 +92, 0x3c3cadadf0b21e3 +93, 0xdd1784571e864e9c +94, 0xfb2f992015157509 +95, 0xf50bb9f0d3ced743 +96, 0x261565f75c3e185f +97, 0xf8fe33b284513e60 +98, 0xe3d2d10b5e024664 +99, 0xd28717566242cf35 +100, 0x7ae07d133ac5b789 +101, 0x3b7ccaaa53ac338e +102, 0xcd480bace4871650 +103, 0xec6c78f923c080e9 +104, 0x44211d0ff8919d59 +105, 0x89f79af76d2a45fe +106, 0x71583fd8a837548b +107, 0xee57269c261511f5 +108, 0xa5ee8f3b128c5d1 +109, 0xbb64c20ed0765a17 +110, 0x9d4790ab2eeaf7e4 +111, 0x742f3db806d9e98 +112, 0xb81ec97aed6a0d1b +113, 0x41808b34f6a8a23 +114, 0xc20913af175dfd4d +115, 0x834427db263b22bb +116, 0xedd9c632e611828a +117, 0x10eac8524496f571 +118, 0xd76091b97eb00ab7 +119, 0x111298ae9fe95666 +120, 0x5824b2e2a6719c43 +121, 0x6e280ec539e934ed +122, 0xf74fd832df90083e +123, 0x8fee6d0f241c2e97 +124, 0x4244f331c2f19c3c +125, 0x3dde75a845cce97f +126, 0xe35bb8e635a9915b +127, 0x39d2943037f7932e +128, 0x1fe2d134201d0970 +129, 0x49d00b63c749b804 +130, 0x960c2942cd4e4e04 +131, 0x8dd8e009dbc0435f +132, 0xcf493495c3a055cd +133, 0x8f7b5a1c0f9fe9cd +134, 0x49d5f90374641a25 +135, 0x69b3932073d3524c +136, 0xd170603e7de84ee2 +137, 0xa062ba3ed3539948 +138, 0xf5861cc5b5d56c82 +139, 0x5e914998a30c7e76 +140, 0x8d77f2ad1503c0f1 +141, 0x980b6a9e3b4181fb +142, 0xd9299cd50694c084 +143, 0x253dc0f8f1cec4c5 +144, 0x68110fb9d1b3e695 +145, 0xe8f3120d0aabc461 +146, 0xb066e7df0dfb042 +147, 0xd29ce0f797e6b60b +148, 0x6a569bb7ca33bd42 +149, 0xd46e08b2dc2385f8 +150, 0x28c61d11d055767 +151, 0x5d73aa3d1a2bb725 +152, 0x1421191e1c14829a +153, 0xa711bfb6423df35e +154, 0x461af97a86308006 +155, 0xb3e1018ff3519367 +156, 0xf19cf866a268ef2b +157, 0x207715eac9199d1d +158, 0xdd621c410975b78c +159, 0xf390aea68683610 +160, 0x617a2d107a0047d9 +161, 0x6e05ac416e5bebf0 +162, 0x7d253e70506c1bed +163, 0xf9f96f4a7dd53810 +164, 0xc693b29cb1573f73 +165, 0x4f1146b0020ea544 +166, 0x45140608fbd40579 +167, 0xdcf57219828ce6be +168, 0xe19d58cca37b5b32 +169, 0x82bda95b2a161235 +170, 0x5823c3d8a2b6c9ba +171, 0xfeb2e74092fdf89a +172, 0x50e1ad1abc8f869d +173, 0x2ec63d0c105eb8da +174, 0xe14e1c4845a3264a +175, 0xcff53670455eb6aa +176, 0xaafaccd24619fa3e +177, 0xf55a988486e2422a +178, 0xecfba16a90ff4d04 +179, 0xbf8d36c2f644757a +180, 0xdc56ed75a0dd6249 +181, 0x3f45023eff17c3bb +182, 0x2428bbfe90023fab +183, 0xab892c611adcb70c +184, 0xb6f13d8c0c2b9d74 +185, 0x2ac3fb11d224f2a8 +186, 0x65433dcfae2d9351 +187, 0xe906859ae4b45f82 +188, 0x8fb7f5f093d76a3b +189, 0x940dd290b5e88d1a +190, 0x31b27d21bef116e7 +191, 0x86a964e2c83b5296 +192, 0x85ffd17bc079a9e8 +193, 0x16c47c724e7ab7f1 +194, 0xfb6098a9867e7d7f +195, 0x9246fb69092c6cb2 +196, 0x1a4033572760f32 +197, 0xc5cc568a8b273b84 +198, 0xfa6f9f2fbdd44abc +199, 0x9701b8e087718ba3 +200, 0x51d6a7dcf73f8f3a +201, 0x30008172cc6a972d +202, 0xac2ab49a5ca6ac81 +203, 0x31f28ef79461e54c +204, 0x93e35a8da8cc6132 +205, 0x9a2c58beeba3d5b9 +206, 0xf6615c1de266ac39 +207, 0x127ff9f8166b766b +208, 0x7ffe380e80a69556 +209, 0xbe7d2c228e1542f7 +210, 0x2d5ebb4e50ba1746 +211, 0x63585761ae1bf684 +212, 0x1019eb5cee022fea +213, 0xb9d3540ab58da30d +214, 0x1677f4cb45620eb9 +215, 0x6524baee51783822 +216, 0xdf9f2ddcfabb0adc +217, 0x78e8acc43b287935 +218, 0xe9a1974e999222b5 +219, 0xc41324ec2291e780 +220, 0xea52abc9ecdcbc9f +221, 0x209d7bcd46ec6b04 +222, 0x12d504c09803db2e +223, 0x1200e6bf21475d81 +224, 0xde6d3c2b35fd2cfc +225, 0xa2526900ac33bd3c +226, 0x7f1f5290fc432bc5 +227, 0x29ddfb380a3d69c8 +228, 0xac79cb6942a2909d +229, 0x516996685b67a92a +230, 0xb5fc39041cb828bb +231, 0x75d9d8ca0644a276 +232, 0x81e98b76be92a3e9 +233, 0xca27888fafe12179 +234, 0x17be2ae039925765 +235, 0x9429846c0e6d0342 +236, 0x327dfd50439815e9 +237, 0xcee20cd7bc254aeb +238, 0x7d250389f453f29e +239, 0xfd1b232a85c95569 +240, 0x2ed55fac80f3e9e9 +241, 0xf6886c20417a1be7 +242, 0xcd08e61f0b0fdfde +243, 0x7b33e34da5c27bff +244, 0xd043c4b7d5603dd5 +245, 0x9a544e4c70a3b686 +246, 0xa7b60398c381f771 +247, 0xe9e7a3487c4bd4f2 +248, 0x10b58fdfe1ff112c +249, 0xd5c1c9748c0f4ceb +250, 0x61be9d09159d54ff +251, 0x5356f51e8239f510 +252, 0xfe7889d9b202ecef +253, 0xc7fc19ca5d263d5d +254, 0x7c4c07e61dfd9f69 +255, 0x6c315fe5015f300a +256, 0xe0a5bc00039747b4 +257, 0x16397fdcf829ee80 +258, 0xb55aee80d16a5169 +259, 0xca0609944d007eea +260, 0xcc982249f65a02ce +261, 0x528161feb149c148 +262, 0xcbf08ba49b41c006 +263, 0x39af1ff0b6f14138 +264, 0x5cc036be69799aec +265, 0x6adde125b1db21c5 +266, 0x8a99d83d6b613b67 +267, 0x1cd43fca9451f74c +268, 0x682dbb26ecc96365 +269, 0x13b4be2ceb43e3 +270, 0xbe8fbc3b6f4f581e +271, 0xda148a2f4bda5719 +272, 0x239106ca3319f393 +273, 0xb42b4dde641f0dd5 +274, 0xd233cfdf4cb0af74 +275, 0xfb5919d905589afc +276, 0xd802a8860c10b66a +277, 0x6c923e1d00e7b5bc +278, 0xfacce1134f383b89 +279, 0xf9570abda7a6d553 +280, 0x80f0f9796a208f18 +281, 0xc0e1df5280951c57 +282, 0xe9f143f08257bbe0 +283, 0x79e4c6463123d588 +284, 0xdd2118583f2b1684 +285, 0xb399ff5f2329fa18 +286, 0x4b3e9ebae96f813c +287, 0xc484dbf247787384 +288, 0x921865eb97603f2c +289, 0x18063c68e257d300 +290, 0x643181f345e7fc26 +291, 0x12e0b0e8eadf9fa7 +292, 0x79e613fe73dfa354 +293, 0x6db4c59203b7217a +294, 0x6c7a0e9ba6139eaf +295, 0x9617c7ac4e3f6d97 +296, 0x1f68a7b4fb1b4b75 +297, 0xef0b7ab24944f466 +298, 0xaf1dee1f4be1bc89 +299, 0xd2e355c959f5fd8d +300, 0xe594c3fb95d96efc +301, 0x9554766ca3342906 +302, 0xa4bbdc77d12842c +303, 0xb62400211ee489a8 +304, 0x91abadaaa3bbe67c +305, 0xd371eeb91deb42bb +306, 0x883bab35cbd2b6e5 +307, 0xd030c3d9411a9041 +308, 0xff3c110a858ff000 +309, 0x59bdf5ca47d0bde7 +310, 0x2bc80fa3cdba1853 +311, 0x6444ccb652662cb8 +312, 0xc0c7e256b9e90339 +313, 0x70714ea9c9d72302 +314, 0x96a0142f9d897d27 +315, 0x209a9097c5a91ef7 +316, 0xb9e33afc5171e009 +317, 0x47b37af433a58d40 +318, 0x30cc4ffbfa831d26 +319, 0xdcea4a85ff815466 +320, 0x907d5bd027f2e5cc +321, 0x7c081f6852e04a4b +322, 0xe61950749c1d502b +323, 0x1604e937ee69834a +324, 0xb2372d952dd25309 +325, 0x53f6a5b834c72577 +326, 0x2ce7a74395e0b694 +327, 0xacbf9ab4fe91f225 +328, 0x5ce1e63d3a2bb90f +329, 0x54740da3a5ed139b +330, 0xf194ddb39f29880b +331, 0x3305374f5d8ec08b +332, 0x831dd0164927ff4a +333, 0x625baa78e4458cf +334, 0x29d27dc0a4a71152 +335, 0xe227bae9a1401034 +336, 0xca0c209831846b2b +337, 0x8e8cc54b08b5a411 +338, 0x38f2b4acaac27db6 +339, 0x8ec88baac814e86b +340, 0x31c08e46b007bde +341, 0xb686c02722794c09 +342, 0xb77cf8fc682e3907 +343, 0xa56334e7f606f4b2 +344, 0x9c80b127bddd5f4f +345, 0x12df14834cd858bf +346, 0x3f14762a9cf5fb9f +347, 0x930a70941ef5779e +348, 0x64e96c849c30c080 +349, 0xfdf53bfba1300484 +350, 0xec7a9363c21bc616 +351, 0x26e9fd6a115ecb47 +352, 0x9707a84b5bc77fbb +353, 0xb23b2737b20d5903 +354, 0x22f4825ae80f6501 +355, 0x500644b12be6a01b +356, 0xb746645b2af082db +357, 0xe6af051f697892f8 +358, 0x577c724248a1cfc6 +359, 0x3d2b6a434c84eed3 +360, 0xd260f5efd7328314 +361, 0x95c16cc84bb3f55c +362, 0x7a01b2e4e0e80ca7 +363, 0x41930c3ce70a0935 +364, 0x1299bccf39d4e110 +365, 0x494883ba1a8a87f +366, 0x9478ecfe2d918e60 +367, 0x30ec9a5670cda8af +368, 0xf9bc877e833e2b99 +369, 0x1b83a0acfbb4a8db +370, 0x73bc1740c0d18880 +371, 0x65086ca9773cb3e1 +372, 0x3b78c3ccd63cff2e +373, 0xbfae748795acfb31 +374, 0xa4c9d5d56a15ba20 +375, 0xb9cb41721e52b71e +376, 0x1532f15d4dc47748 +377, 0x5a4d647a4b9ee632 +378, 0x8513c7c5a50898d9 +379, 0x6d3d98ccd5461b2e +380, 0xa65e99be2fe98d6 +381, 0x31abc8855334a0e5 +382, 0xf1ed22a661dca5b8 +383, 0x299e2b63229e03be +384, 0xda201a06687bce48 +385, 0xd27794b302142c55 +386, 0x642bd3e1c7898a9d +387, 0x777f1ff00afa1a87 +388, 0xd2f1c84fb3877baa +389, 0xae417583289191fd +390, 0xd641f1d88e0e2d55 +391, 0xc1f1d98fb5d18ebf +392, 0xb0f72aecdadce97b +393, 0xe9b8abc764f6018a +394, 0xd2a37cff8e890594 +395, 0x2dd70d631a528771 +396, 0xbf8ba0478c18e336 +397, 0x1630bf47f372ce0a +398, 0x6d04ea20dc3f46b8 +399, 0x6591881bf34337f2 +400, 0x33c149c7eb5b4103 +401, 0xf01a8c9857c86748 +402, 0x184348cdfc16d215 +403, 0x141168b253d2ed7 +404, 0x52aaf012ef50a6f1 +405, 0xfda1722387e16f4c +406, 0x43c30f57d6c038fa +407, 0xd4a8611f5f96d214 +408, 0x2c512ce17e987f2c +409, 0x961ce450f0fa2822 +410, 0xf55a506ec6cea9cd +411, 0xb76d694d9c7f5ef6 +412, 0xfb029216dbd8e988 +413, 0x93162501896a0081 +414, 0xfbbbd2c5ab300f5c +415, 0xd648b6da7387d491 +416, 0xc73b4697471d9d98 +417, 0xe37412bf1c93ee76 +418, 0xa1a96d96570e6637 +419, 0x5b3ab4f82428f65c +420, 0x873d849b188aa36f +421, 0x39fbee0ffc9fa9ff +422, 0xc70d21b744d677fe +423, 0x2b8a43c23043d209 +424, 0x93c33eaa37370d16 +425, 0x8930ac1880f2b0ef +426, 0xac01d27707036af0 +427, 0xc2af3fee504343a0 +428, 0x1c1dae2ad5535d97 +429, 0x9ffc21804b76a480 +430, 0x69f903412cc13563 +431, 0x9d3c4e2759a0c47d +432, 0xb1a8f894be6302b9 +433, 0x95e1fd7951479506 +434, 0xbb9e6c03cd4ae8e3 +435, 0x85206010c9b737cf +436, 0x767e813694d6238c +437, 0x4969af329ccbb30a +438, 0x3aa9af1075aaea5c +439, 0xb1ff519e8118a993 +440, 0xb21a23a3c91180fe +441, 0x320b24582ca3fd88 +442, 0xf8ca56415fb4e453 +443, 0xabd0899c07205e77 +444, 0x87fdc7a44b4ad50f +445, 0xd75744911641a278 +446, 0x7c8c9a65df6fcb95 +447, 0x79d785e3c7a5b695 +448, 0x421e4565ba1f592f +449, 0x27f87eb2517835cf +450, 0xb62cc4297441c83e +451, 0xd817a80ac815ca6d +452, 0xad84388130df2aa8 +453, 0x5e6b1640452d6ac8 +454, 0x936285e15edce2a3 +455, 0x903bccc4969768e8 +456, 0xefc2cb7b109d3140 +457, 0x633e9dfdda2d903a +458, 0x2a2f3225925678a1 +459, 0xe07eac91a27f8547 +460, 0xe50ced40eda78cb3 +461, 0xc5b22500e1c7441 +462, 0x32becf61bca3aa72 +463, 0xa2e37c4b30671344 +464, 0xc9f1c1910f45d544 +465, 0x9b50333b2dcdf730 +466, 0x310bfd53a1684b94 +467, 0x1e1dc21e66ac6455 +468, 0x81876c2bfb1ed5a1 +469, 0xd0c54a3e25eadc7b +470, 0x3791b6fbbd5c7ba0 +471, 0x133be57356c599fc +472, 0x8d1148eb8e83fdea +473, 0x311aedba0d8b42cc +474, 0x1142ae52745f94bb +475, 0xc5f4ab2fbde8c4a3 +476, 0xd23be827b5b24f6d +477, 0x65f95194cd122715 +478, 0x4b48969d73125922 +479, 0x46f165052b8ff988 +480, 0x5c689f94b9275ff4 +481, 0x93b03823ff2d536b +482, 0x871f3775aa4e3523 +483, 0x5af829f7cc0f66a5 +484, 0xa32e05739cbeac8c +485, 0xacff1856ddace0fe +486, 0x8eeb5e7f991a5322 +487, 0x6325c2720e0dbdea +488, 0x9fb817bc4fdf5200 +489, 0x9786f0d850e43d78 +490, 0x571f76dd7f9fb77a +491, 0x4d9e94e181cbc63f +492, 0x8bb632d3376c547a +493, 0x9cc26d9efd1c88b9 +494, 0x9c5d49579df52b0b +495, 0x6201abf7e1cda07b +496, 0x90d68f0c6c884963 +497, 0xfc5b66188ef7f561 +498, 0x6d9303cf2e0e0f95 +499, 0xd7cfcff535f5ed07 +500, 0x14d1a1228daa4ac6 +501, 0xe00ef5762f66ae50 +502, 0xf113a79471582978 +503, 0x430985281785dc7a +504, 0x31914108c206ed5 +505, 0x7ba6707b6419971c +506, 0x2ec63b033ce112e5 +507, 0xf8bcd36ced3b41e3 +508, 0xe5cf908c8010414b +509, 0xf5ee224b7c703e30 +510, 0x9a9733af0b12338b +511, 0x83e18cc00ace34f8 +512, 0xd52cff39e23008b8 +513, 0xa700578136b9c0c5 +514, 0x3fa179d32ac51f99 +515, 0xef2d5eab6d4ad380 +516, 0x709024a5abd032df +517, 0xc607c7ee349ede87 +518, 0x803d784e9731eb5f +519, 0x2ef06f4ba769282d +520, 0x4bc1dca1e9f07eb9 +521, 0x930c958a7a72f94d +522, 0x249bc8db2cc7a3bf +523, 0x3845305798f9a5d +524, 0x6f137eca9ab6f948 +525, 0xc31f5a963d31bd67 +526, 0x9d39693d5383626f +527, 0x52fb41c335a8b98e +528, 0xb79d1a29a06006ec +529, 0x7c0926a7a3eda2cc +530, 0xffdf5214406fd53e +531, 0xc6aa02a7e94282b9 +532, 0xd4a4431b4aa301ee +533, 0x4271cc0f9420d3ab +534, 0x26fccd7cc7fc2485 +535, 0x330594bb945b8d5a +536, 0x6ea8eaad12e5cb8c +537, 0x831c3467726bede3 +538, 0x31d1eb10017eaa61 +539, 0xc7aa75e41508f5cb +540, 0xde51810f0cadd0b5 +541, 0x50e5b3e73692f80b +542, 0x82107ec55636e188 +543, 0x9828ef175d843ab4 +544, 0xb8edc6a860dd421e +545, 0x25c0c138fd537ac3 +546, 0x47e72a771e8eb563 +547, 0xbb0f8c5333f4a2cc +548, 0x91750d2fb9b2d479 +549, 0xe662d8f6fe38df36 +550, 0x72a6d879fb5619f0 +551, 0x6817c7878dcbf077 +552, 0x4e7741cb484661e8 +553, 0x3b3b3ba0be5711bf +554, 0xa6989f5d25868765 +555, 0x43c276398997e4e0 +556, 0xdcbe16a94da28870 +557, 0x454936980a699c99 +558, 0xac614bfa8f0266c6 +559, 0x9174841392e213d5 +560, 0xa0e2acffc5fc9d1f +561, 0xe53a08a7a0e6521a +562, 0x2b845cf7c24172e0 +563, 0x265a4fc5f7adec0d +564, 0x1f34fbe5f1e49420 +565, 0x139181f6fb647f20 +566, 0x88c35d46e2fcd05e +567, 0x2a6d5b55903c0459 +568, 0xcea28eb621ad7bf1 +569, 0x5c9cdc13e7aaa30 +570, 0x5fe63e14746e7103 +571, 0x7923e53d73835db9 +572, 0x376e661210bf1b06 +573, 0x5b1cab85450efdd5 +574, 0x3908dc096c70b452 +575, 0x4825e303cd1f396f +576, 0xed476bfd702957c3 +577, 0x6acc013aff5db743 +578, 0x62c80b776343d488 +579, 0x9c75edcd5b012697 +580, 0xaa053362a3b9770a +581, 0xa907e236c7c07e94 +582, 0x15b2c380451692c0 +583, 0x94f79142697bd61f +584, 0xbc657d31ea98d44f +585, 0xcbaa5e52517a1f5e +586, 0x96aa2e44a7c4a03f +587, 0x216d3c66db2b515d +588, 0x157001807e3ca88a +589, 0x52b3a596bdd3859a +590, 0xed747e7fc5e3adac +591, 0x78fd765ddb2c448d +592, 0xe53dc7299ed8614e +593, 0x75ad41fb1d7a790a +594, 0xc14f6b944b0e6cb1 +595, 0x7c314b69fce3df1c +596, 0xb56d82eb740d7abc +597, 0x5132a93c41251fdb +598, 0xe3ce35bd2a82f958 +599, 0x440571a981c722f2 +600, 0x194cdfd9f186bc9 +601, 0xb89e522a5db00939 +602, 0xad35f339f68df3c8 +603, 0xa82ab18420322293 +604, 0xaffa6df9b72b27c4 +605, 0x9615694d23beaa2c +606, 0x1d82ebe563abad91 +607, 0xab50ef65fbd94385 +608, 0x1b070dbd70a9a14 +609, 0x2ececa796abbadf0 +610, 0x6bbeafe9e81ab2a2 +611, 0x60dcd0d2a9b76914 +612, 0x1e748039ef05c33f +613, 0x6d4d17f2213ccdff +614, 0x9fa56132957bc987 +615, 0x60a17185de2428eb +616, 0xb56038ddf306479c +617, 0x3b1db5df92d06d8b +618, 0x24d1bba8bdedf580 +619, 0xbfb7e6740ebaa4d9 +620, 0xab31c4473e46f61d +621, 0x6deb3cdd8fd5869f +622, 0x23032e47746d72d6 +623, 0xa9e72d734e10f2e8 +624, 0xbffd199b6157bc23 +625, 0x29f8254df273fb62 +626, 0xb076142130ee55ec +627, 0x5b0b08374126c309 +628, 0xea4536aae979521f +629, 0xc064e7abec91a174 +630, 0x46133ef80c59d935 +631, 0xf0227e2da1b14160 +632, 0x675a76641e1af5a +633, 0x2f50a069b33d198c +634, 0x3ded5a65e1d657eb +635, 0xbb6999b020694f6b +636, 0x86b2f2b33487aed7 +637, 0x76e14e85f8bfb4cf +638, 0x38f7f1e44bd4e0db +639, 0xc1a7d41b7e80d4ae +640, 0x1dfaaf80bbceb42e +641, 0x3f51c11497720c2b +642, 0xce6da1415ddb8b80 +643, 0x7377d8bcd359b5f3 +644, 0xe077208f3f810aca +645, 0x9a06a8a2dacbffce +646, 0xca1f99156b09b735 +647, 0x2ff9a93064d91451 +648, 0x50f3ea93f351a7ef +649, 0x606fceccb07054de +650, 0x7e83d6d2f8f6685d +651, 0x78f3995291c5d407 +652, 0xd28d2460e22d0228 +653, 0x2c5636f68a0054dd +654, 0xd9fafb1c56c8f6cb +655, 0xe39889b5f9d74464 +656, 0x1355372bf5db2cc1 +657, 0x26768426b9ac323 +658, 0x4af1dbdc1111fd89 +659, 0x66973587943b927f +660, 0xf86f5f50684dfb1d +661, 0x1247d574ff79b534 +662, 0xc8039f3259210fe2 +663, 0x79b573235c92a9f5 +664, 0x213f642d8450e2f0 +665, 0x5db7706973376566 +666, 0x6182c12e69b373d7 +667, 0x3e5ac47300aec07f +668, 0x4b5b6c57b1574376 +669, 0x6b7fcceefd56b17c +670, 0xf656c3455cb9d4b8 +671, 0x7577e2e13329721f +672, 0xf33c0c53ce956e8d +673, 0x7d0f328ee356174 +674, 0x10ec9a168088686e +675, 0x71ef1776d062dfa +676, 0xaa7b590a488a6bc4 +677, 0x38612b6dd8049a1c +678, 0x939045e36874f731 +679, 0xcb9d1d74c56d5ac9 +680, 0x54f1c1c8fef1d8ff +681, 0x3ee4b85c8c7e939e +682, 0xb9b4608e019f352c +683, 0x79d4701275d12e6a +684, 0x2632a2d9835c7f19 +685, 0x1662cd9fba293692 +686, 0xbcb70265115ee944 +687, 0xdc43fb9761468604 +688, 0xe3eec4e7d3871352 +689, 0x829531753226989d +690, 0x2748cc67f540e074 +691, 0x39c4af25d607837d +692, 0x741a243f4cb5df99 +693, 0xda1353287e18b49a +694, 0xa6735689d751ea74 +695, 0x46326d587340ce0b +696, 0xc18531df4550012b +697, 0x6f7901e05dd4b818 +698, 0xfb966afc4c001d63 +699, 0x6dc10fca67a9cfdb +700, 0xd6527ffadf0feaae +701, 0x3b900172045e25d +702, 0xb7dd594cdded6a46 +703, 0x6602aee7ec1599fc +704, 0x7fbf12f23747546a +705, 0x32e63f662bd2de0d +706, 0xedf47770b67ed641 +707, 0x331bef83481c5c2a +708, 0x8fc4256fdf05158c +709, 0x98eba48dabccf5e0 +710, 0xdbc2f2cdb7b1c154 +711, 0x7777755616517ad3 +712, 0xd473c147d2628ac1 +713, 0x861e15d1d760b5a7 +714, 0xf4d25926405ecb07 +715, 0xb7739c69effff86e +716, 0xe97fbafa6f96830c +717, 0xf13e8a334e8bede1 +718, 0xcd60010cba4ee4f9 +719, 0x1f537ac2b82e6008 +720, 0x1fda8d781a89140a +721, 0x9dc204f3f4a463f0 +722, 0x456dcd18eb56a1ab +723, 0x629957bc87bd16a1 +724, 0x2c8000ddb8c75253 +725, 0xc31dae9ec8449284 +726, 0xdac05c8baa2b691a +727, 0x21ff7be9ffa3e7ac +728, 0x844f4b5ed4ee08d0 +729, 0x651f913fd636c994 +730, 0xca3e71a2110b2d49 +731, 0x7709bc42253ed09d +732, 0xbb164d45b6569d43 +733, 0x90ec2f040c20a112 +734, 0xfa6e77e9166f5be4 +735, 0x6b6d12c1842d587d +736, 0xfcd7ff8466e25e2a +737, 0x6a5a2ed8bd971297 +738, 0x2ec35f6bba5adcbc +739, 0xc83676e16651249a +740, 0x458f6064cefe10ba +741, 0x90d54d527e6cd028 +742, 0xa5613e88db27c388 +743, 0x331e0c7d85aa1abc +744, 0x8cee4977e210358 +745, 0xfcae379aa6cbff8e +746, 0xd1407afc97a57e86 +747, 0x1fab25c864f094ae +748, 0xd914864a63004552 +749, 0x4214d226a20f1384 +750, 0x3f4e0d80c488b715 +751, 0xc5ca2f654024b7c8 +752, 0xc1e27a124e7c821c +753, 0xd890a915ffc7918c +754, 0x22fba040ce51a9f8 +755, 0xbf61cebd8891617a +756, 0x7846609ee228e319 +757, 0x536d1854375509b8 +758, 0xbbfb45fc6e666f50 +759, 0xd85b4c0527f9d7d6 +760, 0x528cc9c7fa2a84c8 +761, 0x27a1baece647f2cb +762, 0xfddf0cb92fe09dc3 +763, 0xeb5008fe965d8d96 +764, 0x4a3307937eb2e5c8 +765, 0xd07d74c240c6c363 +766, 0x16f62290179d1bbf +767, 0xe99c9bcc9cb1ece7 +768, 0xc64f9be03c8a93be +769, 0x32659effaf666c1f +770, 0x4bb228cfb30b6672 +771, 0x98764870842068a5 +772, 0x5b12ef2d2cd8bdcc +773, 0xbc79d1c1b41f28b8 +774, 0x97a517cf3279fc9a +775, 0x34ffd46c1d4d6025 +776, 0x9c302307ee25c8f0 +777, 0x399604eed1f18a8 +778, 0x1c9b813c2043142a +779, 0x2944ea5e55267fe9 +780, 0x5a8a9f5e728ea667 +781, 0x30c8440adb804a0 +782, 0xee0e6b627099a937 +783, 0x3d50757ada3c52da +784, 0x4548916b32c813ab +785, 0x602a186fe5bf109b +786, 0xf0d440a2227ba304 +787, 0x5a10d4e0ca9ea32b +788, 0x6e5eb90da13ba64c +789, 0x4c6af8fd04241ab2 +790, 0xf9eb31d26e093006 +791, 0x5d674878839fe3ea +792, 0x1562b55b2484e47c +793, 0xa87188c099c1cb61 +794, 0xb7736b8aa02a3392 +795, 0x5f4b301125abb20f +796, 0x361d566984637f44 +797, 0x68c4b3feac8bd0c3 +798, 0x7066c634dd2503c1 +799, 0xfecbf7c9441eb6ea +800, 0xdbc26ae0fc81436b +801, 0x9ef3e2b48252e7a4 +802, 0x31a49b4c339b37c7 +803, 0xb01b2a83cf346cf4 +804, 0xc24dc2347f82fbe3 +805, 0x134cad272dcd410f +806, 0x61260742823ba59c +807, 0x53ac4c193a97c730 +808, 0x9207c9833af34b52 +809, 0xa72e7ee77078d1f5 +810, 0x2e6f6e1b05936885 +811, 0x783b99ce5dbf9464 +812, 0xfdfeb6f0d027bb44 +813, 0x40eeb27096f92b0 +814, 0x5ef96ff5d4a4521f +815, 0x5595806ae873718a +816, 0x67d449eecf4ca1c3 +817, 0xde837ab611364f3f +818, 0x7034c24d2b139be9 +819, 0xe21166603e0a9c86 +820, 0x935694435c1f0d51 +821, 0x6cb3bec90c126088 +822, 0x4096ef662b7a9f89 +823, 0xd2d85b8d238d8c15 +824, 0xa4ea533ce3ec59b2 +825, 0x3654729d80a2db29 +826, 0x214c4cc3906d29d4 +827, 0x201c447e7588e373 +828, 0xe8b8f0ae25f683eb +829, 0x6744aaf5754e38af +830, 0xd1ffb10d6f27a061 +831, 0xe536733a7b3a6c30 +832, 0x39f0f66e47cbf2c9 +833, 0x856a9593526fde2 +834, 0x2e2a817a0098ea4b +835, 0xc5e1eeb551a0e3d3 +836, 0x3f21e2f5e2d50b2 +837, 0x906af56c66dd9f8c +838, 0x30f6dbd70329fac8 +839, 0xc443dfddf3c01a60 +840, 0x7ab85d9aa9675470 +841, 0x8c9080bd39717bfc +842, 0x4b1ccdb3c3597f6f +843, 0x74e2542d70ab5d67 +844, 0xbb3d236aad00f74 +845, 0xcf3cadf9a2804774 +846, 0xe851d9750e42bd07 +847, 0xc0ad82029b1c371f +848, 0x7ee119eb552d6c07 +849, 0xd8024049bd1d784a +850, 0xfa67a899760363 +851, 0xaa7c2f438b178197 +852, 0xc473674a47ffe064 +853, 0x539fbe3fc674c270 +854, 0xdb48484748a76f3b +855, 0xc73b2b092060d +856, 0xa1d2a15345016f5d +857, 0x4d0fe8599f9bba47 +858, 0xa0edc275e6f8f1d1 +859, 0x40590a8655bc8d72 +860, 0x35b4223161f05f75 +861, 0xa04c0c0f616752dc +862, 0x7f371ed2ca45432d +863, 0x2ff1a08f75ac6438 +864, 0xe2dc5c3682282f48 +865, 0xe1e4179fa98d9013 +866, 0x8cb083d6843a73d5 +867, 0xb4c2b5921b706854 +868, 0x738e14c0e7352445 +869, 0xcd2b646f91afd8c7 +870, 0xd5779a5b57a264fd +871, 0xc39ff855586c7d07 +872, 0x3e3f0098c631a859 +873, 0x644e02fae032110 +874, 0xa8834613c0a45278 +875, 0x69482f2c08e10657 +876, 0xe4ee475bdb87e69a +877, 0xdc1ef7b25c0d0019 +878, 0x88a3fa2be18d8744 +879, 0x60a02e0b21c5bec7 +880, 0xb6867b88aa19bc1a +881, 0xb599409affcf10eb +882, 0xaeaa1778a5e59daa +883, 0xd7a91a52c16663e3 +884, 0x93cb269affe07b1c +885, 0x841b6ced3a4ba815 +886, 0x84541768e1540a5c +887, 0xe3943c84f83b3020 +888, 0x5de366fbd7b45258 +889, 0xd787cc3bde91a661 +890, 0x814071446edecb57 +891, 0x15d8c602a1141514 +892, 0x72f07bc8002d1d0d +893, 0x4a8bd8dc9a1f0f3e +894, 0x8723796ae0f20d35 +895, 0xda7283c2051f73b2 +896, 0x2df0cc247f90bd3b +897, 0x79a8522b968f990a +898, 0x951ede190c8b9d02 +899, 0xc512f1a5b14b018a +900, 0xf0e3ddc03b9a4259 +901, 0x8cf4a35ad312e15f +902, 0xebef28926b11094b +903, 0x5628ba687325921c +904, 0xc3aa75e57edc49c3 +905, 0xc38382fa98e762ba +906, 0x8d209e896285848e +907, 0x2c7d6adf592b4a3e +908, 0x62de48e36f8338f3 +909, 0x4a752741e00de30e +910, 0xf7855b70f1f6ec2b +911, 0xa505fa4428199e43 +912, 0xe8b6b423b826bbac +913, 0x4bd1206cf8786d05 +914, 0x6dcf040391fe3bf4 +915, 0x913f500f87e1bba3 +916, 0x5acf775aa180a5d5 +917, 0x74dd28d9432ce739 +918, 0x996c2ff2f0dc2495 +919, 0x73dbfe6c56effe4 +920, 0x56fddd25196f5e40 +921, 0xe87810158f5b7 +922, 0x7b8795e996383f1f +923, 0x9ba5ee7c777c4c82 +924, 0x17ce3908d270fe1c +925, 0x3df9e613c1aedfae +926, 0xcdd26871b32fc8e1 +927, 0xd71cb13afc633979 +928, 0x63427c8ea9b1c79e +929, 0xd070f7664d3b405d +930, 0x46f2a9e32d9fb769 +931, 0xb4c3822a45e9fe9b +932, 0x8ba30b97fe6f5ec7 +933, 0x70aa554ee2fc11f9 +934, 0xa80c99dbe0cfcfaf +935, 0x36d9250cb2d68ed +936, 0x2995e4b9e1cd1db4 +937, 0x4b3803ba57fc570f +938, 0xae3959e7d740eaa5 +939, 0xb4cbd6662adbae08 +940, 0xae46576446e8dbc4 +941, 0xc4828e008a9a8a54 +942, 0x145d7db8e6554b2f +943, 0x1b1b8916a730c371 +944, 0xdaf84b2bebe31963 +945, 0x5b59b80ef23a2403 +946, 0x9180c7e89cab6fd3 +947, 0x80e58f5411babf34 +948, 0xa06cf55185b9b005 +949, 0x13b2c798424173ad +950, 0xc510f8e706311d49 +951, 0x1f974b83b6046d3a +952, 0xae6e8e85e822d1c3 +953, 0x66f2c8dc3274a31a +954, 0x7e04dbcbf65bd377 +955, 0xabf41ede01ec20a4 +956, 0x5efa0948f6bbb2ea +957, 0xbc91c99d8592255 +958, 0xf6d6917911d86d75 +959, 0x85ce273d54e9097a +960, 0xbdfd30f2420fff92 +961, 0x8802f02f610b537c +962, 0xd1d70037ed543229 +963, 0x908aaf97f9693a46 +964, 0x1f6cfeaa0834d53a +965, 0xa453fd1648ce04d2 +966, 0x2c38bb85ebc64af9 +967, 0xd2daff551c90c4f8 +968, 0xae5a0d949797d784 +969, 0xf0974c8552ac9593 +970, 0xa10b70499f65c693 +971, 0x39a449ebd594ddff +972, 0x8ea090f2b17b9b49 +973, 0xc592de318090fd83 +974, 0xb63e4fbc467b6912 +975, 0x57a0c1c5ce0e4dcc +976, 0xa7c517cf3d436b35 +977, 0xef6dcb0f3fad038b +978, 0xaf4fb60315b91287 +979, 0x5e0776f67304f331 +980, 0xe927753b8e6f7932 +981, 0xd3df2dd92559e304 +982, 0xdaed52aa6af44413 +983, 0x1b59f4dac1e181f8 +984, 0x4a73c2293877ef39 +985, 0xca45d0d015fe44de +986, 0x4659c8b7853735a8 +987, 0x12de6466bdf8adeb +988, 0xaeea857a09bfec15 +989, 0xcc9cf4b3c0b88a23 +990, 0xa44ae52396a5e1bf +991, 0x5847a724305d137f +992, 0x8f4d4de223956182 +993, 0x58254dfada867a8 +994, 0x900a98222c2f339e +995, 0xdb575260935d51d5 +996, 0x13fb4bfbbc0d7b53 +997, 0x62213850186bb92b +998, 0x2a34823312c00388 +999, 0x6148329042f743b0 diff --git a/python/numpy/random/tests/data/pcg64dxsm-testset-1.csv b/python/numpy/random/tests/data/pcg64dxsm-testset-1.csv new file mode 100644 index 000000000..39cef057f --- /dev/null +++ b/python/numpy/random/tests/data/pcg64dxsm-testset-1.csv @@ -0,0 +1,1001 @@ +seed, 0xdeadbeaf +0, 0xdf1ddcf1e22521fe +1, 0xc71b2f9c706cf151 +2, 0x6922a8cc24ad96b2 +3, 0x82738c549beccc30 +4, 0x5e8415cdb1f17580 +5, 0x64c54ad0c09cb43 +6, 0x361a17a607dce278 +7, 0x4346f6afb7acad68 +8, 0x6e9f14d4f6398d6b +9, 0xf818d4343f8ed822 +10, 0x6327647daf508ed6 +11, 0xe1d1dbe5496a262a +12, 0xfc081e619076b2e0 +13, 0x37126563a956ab1 +14, 0x8bb46e155db16b9 +15, 0x56449f006c9f3fb4 +16, 0x34a9273550941803 +17, 0x5b4df62660f99462 +18, 0xb8665cad532e3018 +19, 0x72fc3e5f7f84216a +20, 0x71d3c47f6fd59939 +21, 0xfd4218afa1de463b +22, 0xc84054c78e0a9a71 +23, 0xae59034726be61a8 +24, 0xa6a5f21de983654d +25, 0x3b633acf572009da +26, 0x6a0884f347ab54c8 +27, 0x7a907ebe9adcab50 +28, 0xbe779be53d7b8d4a +29, 0xf5976e8c69b9dcd1 +30, 0x1d8302f114699e11 +31, 0x7d37e43042c038a0 +32, 0x2cc1d4edc2a40f35 +33, 0x83e3347bb2d581f1 +34, 0x253f8698651a844d +35, 0x4312dea0dd4e32f6 +36, 0x10f106439964ea3a +37, 0x810eb374844868cc +38, 0x366342a54b1978cc +39, 0x9fb39b13aaddfb5e +40, 0xdb91fd0d9482bed7 +41, 0x89f6ea4ca9c68204 +42, 0x146b31ccca461792 +43, 0x203fd9724deb2486 +44, 0x58a84f23748e25cb +45, 0x2f20eb6aeb94e88 +46, 0x14d3581460e473c +47, 0xad5bd0d25f37d047 +48, 0x1cf88fa16de258b2 +49, 0x3bcab6485b7a341 +50, 0xb2433b37f227d90c +51, 0x2cffd7e0a8360cc8 +52, 0x5d2eeff7c9ebc847 +53, 0x6fd7c7ae23f9f64b +54, 0x381650b2d00f175d +55, 0x9d93edcedc873cae +56, 0x56e369a033d4cb49 +57, 0x7547997116a3bac +58, 0x11debaa897fd4665 +59, 0xdf799d2b73bd6fb8 +60, 0x3747d299c66624d +61, 0xac9346701afd0cfa +62, 0xac90e150fa13c7bf +63, 0x85c56ad2248c2871 +64, 0xdea66bf35c45f195 +65, 0x59cf910ea079fb74 +66, 0x2f841bb782274586 +67, 0x9814df4384d92bd9 +68, 0x15bc70824be09925 +69, 0x16d4d0524c0503a3 +70, 0xf04ea249135c0cc7 +71, 0xa707ab509b7e3032 +72, 0x465459efa869e372 +73, 0x64cbf70a783fab67 +74, 0x36b3541a14ca8ed7 +75, 0x9a4dfae8f4c596bf +76, 0x11d9a04224281be3 +77, 0xe09bbe6d5e98ec32 +78, 0xa6c60d908973aa0d +79, 0x7c524c57dd5915c8 +80, 0xa810c170b27f1fdc +81, 0xce5d409819621583 +82, 0xfe2ee3d5332a3525 +83, 0x162fb7c8b32045eb +84, 0x4a3327156b0b2d83 +85, 0x808d0282f971064 +86, 0x2e6f04cf5ed27e60 +87, 0xaf6800699cca67a9 +88, 0xc7590aae7244c3bf +89, 0x7824345f4713f5f9 +90, 0x8f713505f8fd059b +91, 0x3d5b5b9bb6b1e80e +92, 0x8674f45e5dc40d79 +93, 0xcb1e36846aa14773 +94, 0xe0ae45b2b9b778c1 +95, 0xd7254ce931eefcfb +96, 0xef34e15e4f55ac0a +97, 0xf17cc0ba15a99bc4 +98, 0x77bb0f7ffe7b31f1 +99, 0x6ee86438d2e71d38 +100, 0x584890f86829a455 +101, 0x7baf0d8d30ba70fe +102, 0xb1ac8f326b8403ae +103, 0xcc1963435c874ba7 +104, 0x9c483b953d1334ce +105, 0xc0924bcbf3e10941 +106, 0x21bcc581558717b1 +107, 0x2c5ad1623f8d292b +108, 0xa8ea110f6124557e +109, 0x15f24a6c5c4c591 +110, 0x40fe0d9cd7629126 +111, 0xcfe8f2b3b081484d +112, 0x891383f4b4cac284 +113, 0x76f2fcdef7fa845 +114, 0x4edd12133aed0584 +115, 0xd53c06d12308873d +116, 0xf7f22882c17f86bf +117, 0xfbaa4aad72f35e10 +118, 0x627610da2e3c0cc3 +119, 0x582b16a143634d9a +120, 0x9b4a7f69ed38f4a0 +121, 0x2df694974d1e1cbe +122, 0xe5be6eaafed5d4b +123, 0xc48e2a288ad6605e +124, 0xbcb088149ce27c2b +125, 0x3cb6a7fb06ceecbe +126, 0x516735fff3b9e3ac +127, 0x5cbafc551ee5008d +128, 0xee27d1ab855c5fd5 +129, 0xc99fb341f6baf846 +130, 0x7ad8891b92058e6d +131, 0xf50310d03c1ac6c7 +132, 0x947e281d998cbd3e +133, 0x1d4d94a93824fe80 +134, 0x5568b77289e7ee73 +135, 0x7d82d1b2b41e3c8b +136, 0x1af462c7abc787b +137, 0xcfd8dfe80bfae1ef +138, 0xd314caeb723a63ea +139, 0x1c63ddcfc1145429 +140, 0x3801b7cc6cbf2437 +141, 0xc327d5b9fdafddd3 +142, 0xe140278430ca3c78 +143, 0x4d0345a685cb6ef8 +144, 0x47640dc86e261ff9 +145, 0xab817f158523ebf4 +146, 0x37c51e35fbe65a6b +147, 0xab090f475d30a178 +148, 0x4d3ec225bf599fc1 +149, 0xefd517b0041679b1 +150, 0x20ad50bca4da32c5 +151, 0x75e1f7cd07fad86d +152, 0x348cf781ee655f4b +153, 0x9375f0e5ffc2d2ec +154, 0x7689082fd5f7279c +155, 0x633e56f763561e77 +156, 0x9d1752d70861f9fd +157, 0xa3c994b4e70b0b0f +158, 0xabf7276a58701b88 +159, 0xbfa18d1a0540d000 +160, 0xc6a28a2475646d26 +161, 0x7cdf108583f65085 +162, 0x82dcefb9f32104be +163, 0xc6baadd0adc6b446 +164, 0x7a63cff01075b1b4 +165, 0x67ac62e575c89919 +166, 0x96fa4320a0942035 +167, 0xc4658859385b325f +168, 0xde22c17ff47808f6 +169, 0xbb952c4d89e2f2ec +170, 0x638251fbc55bdc37 +171, 0x38918b307a03b3ea +172, 0xccb60f2cedbb570b +173, 0x3c06f4086a28f012 +174, 0x4e8d238388986e33 +175, 0x1760b7793514a143 +176, 0xa3f924efe49ee7d6 +177, 0xaf6be2dbaebc0bdf +178, 0x6782682090dffe09 +179, 0xb63a4d90d848e8ef +180, 0x5f649c7eaf4c54c5 +181, 0xbe57582426a085ba +182, 0xb5dd825aa52fb76d +183, 0x74cb4e6ca4039617 +184, 0x382e578bf0a49588 +185, 0xc043e8ea6e1dcdae +186, 0xf902addd5c04fa7c +187, 0xf3337994612528db +188, 0x4e8fd48d6d15b4e6 +189, 0x7190a509927c07ab +190, 0x864c2dee5b7108ae +191, 0xbb9972ddc196f467 +192, 0x1ea02ab3ca10a448 +193, 0xe50a8ffde35ddef9 +194, 0x7bd2f59a67183541 +195, 0x5a940b30d8fcd27a +196, 0x82b4cea62623d4d3 +197, 0x6fbda76d4afef445 +198, 0x8b1f6880f418328e +199, 0x8b69a025c72c54b7 +200, 0xb71e0f3986a3835f +201, 0xa4a7ddb8b9816825 +202, 0x945dcda28228b1d8 +203, 0xb471abf2f8044d72 +204, 0xf07d4af64742b1ba +205, 0xfca5190bc4dd6a2a +206, 0xd681497262e11bc5 +207, 0xbe95d5f00c577028 +208, 0x56313439fd8bde19 +209, 0x3f3d9ac9b5ee6522 +210, 0x7b8d457dd2b49bbe +211, 0xe76b5747885d214b +212, 0xa8a695b3deb493ea +213, 0x5292446548c95d71 +214, 0xbf5cdf0d436412df +215, 0x7936abaed779d28d +216, 0x659c6e8073b3a06d +217, 0x86c9ff28f5543b71 +218, 0x6faa748445a99146 +219, 0xdcc1e6ab57904fd7 +220, 0x770bd61233addc5f +221, 0x16963e041e46d94f +222, 0x158e6cb2934157ac +223, 0xb65088a8fd246441 +224, 0x2b12ced6ce8a68c3 +225, 0x59a18d02cd6082b3 +226, 0x4ddbc318cb5488ee +227, 0x3d4cf520b3ed20a1 +228, 0x7028b3a92e2b292d +229, 0xf141da264a250e4d +230, 0x9788d53e86041c37 +231, 0x1bb91238a7c97dbf +232, 0x81953d0ddb634309 +233, 0xfa39ccfe14d2d46 +234, 0xf7c7861c9b7e8399 +235, 0x18d27ca50d9dc249 +236, 0x258dfdf38510d0d9 +237, 0x9e72d8af910ea76f +238, 0x4f8ef24b96de50ad +239, 0xb9d9c12297e03dc9 +240, 0x91994e41b4a1929c +241, 0x8defa79b2ccc83b9 +242, 0x948566748706dac5 +243, 0x7b0454946e70e4cf +244, 0x340b7cb298c70ed7 +245, 0x6602005330cebd95 +246, 0xf71cb803aa61f722 +247, 0x4683fb07fc70ae8a +248, 0xc6db9f0c4de3ed88 +249, 0x3e8dfae2a593cef9 +250, 0x615f7c38e3862b33 +251, 0x676c7996550d857 +252, 0xc6d520d54a5c266a +253, 0x202b1e8eef14aa2e +254, 0xa3a84891a27a582 +255, 0x84dbee451658d47f +256, 0x254c7cd97e777e3a +257, 0xf50b6e977f0eba50 +258, 0x2898b1d3062a4798 +259, 0x4096f7cbbb019773 +260, 0x9fb8e75548062c50 +261, 0x4647071e5ca318ec +262, 0x2b4750bdb3b3b01 +263, 0x88ac41cc69a39786 +264, 0x705e25476ef46fa3 +265, 0xc0c1db19884a48a6 +266, 0x1364c0afdbb465e5 +267, 0x58e98534701272a6 +268, 0x746a5ea9701517c0 +269, 0x523a70bc6b300b67 +270, 0x9b1c098eda8564ad +271, 0xfbaeb28d3637067f +272, 0xddd9a13551fdba65 +273, 0x56461a670559e832 +274, 0xab4fd79be85570ad +275, 0xd4b691ecaff8ca55 +276, 0x11a4495939e7f004 +277, 0x40d069d19477eb47 +278, 0xe790783d285cd81e +279, 0xde8218b16d935bc7 +280, 0x2635e8c65cd4182d +281, 0xeae402623e3454 +282, 0x9f99c833184e0279 +283, 0x3d0f79a0d52d84e7 +284, 0xc1f8edb10c625b90 +285, 0x9b4546363d1f0489 +286, 0x98d86d0b1212a282 +287, 0x386b53863161200d +288, 0xbe1165c7fe48a135 +289, 0xb9658b04dbbfdc8c +290, 0xcea14eddfe84d71a +291, 0x55d03298be74abe7 +292, 0x5be3b50d961ffd7e +293, 0xc76b1045dc4b78e1 +294, 0x7830e3ff3f6c3d4c +295, 0xb617adb36ca3729 +296, 0x4a51bdb194f14aa9 +297, 0x246024e54e6b682a +298, 0x33d42fc9c6d33083 +299, 0xadccba149f31e1d +300, 0x5183e66b9002f8b +301, 0x70eb2416404d51b7 +302, 0x26c25eb225535351 +303, 0xbc2d5b0d23076561 +304, 0x5823019ddead1da +305, 0x85cfa109fca69f62 +306, 0x26017933e7e1efd9 +307, 0x3ec7be9a32212753 +308, 0x697e8a0697cd6f60 +309, 0x44735f6cca03920f +310, 0x8cc655eb94ee212e +311, 0x8b8b74eba84929a0 +312, 0x7708ccedd0c98c80 +313, 0x1b6f21f19777cbe1 +314, 0x363e564bd5fadedb +315, 0x5921543a641591fe +316, 0xc390786d68ea8a1b +317, 0x9b293138dc033fca +318, 0x45447ca8dc843345 +319, 0xee6ef6755bc49c5e +320, 0x70a3a1f5163c3be5 +321, 0xf05e25448b6343b0 +322, 0x4739f4f8717b7e69 +323, 0xb006141975bf957 +324, 0x31874a91b707f452 +325, 0x3a07f2c90bae2869 +326, 0xb73dae5499a55c5e +327, 0x489070893bb51575 +328, 0x7129acf423940575 +329, 0x38c41f4b90130972 +330, 0xc5260ca65f5a84a1 +331, 0x6e76194f39563932 +332, 0x62ca1f9ca3de3ca6 +333, 0xb4a97874e640853f +334, 0x38ed0f71e311cc02 +335, 0xde183b81099e8f47 +336, 0x9bb8bf8e6694346 +337, 0xd15497b6bf81e0f2 +338, 0xaaae52536c00111 +339, 0x4e4e60d1435aaafd +340, 0x5a15512e5d6ea721 +341, 0xff0f1ffabfc6664f +342, 0xba3ffcedc5f97fec +343, 0xef87f391c0c6bfb6 +344, 0x4a888c5d31eb0f98 +345, 0x559a3fbfd7946e95 +346, 0xe45b44a0db5a9bad +347, 0x9457898964190af1 +348, 0xd9357dfaab76cd9e +349, 0xa60e907178d965a1 +350, 0x76b2dc3032dc2f4a +351, 0x13549b9c2802120 +352, 0x8656b965a66a1800 +353, 0x16802e6e22456a23 +354, 0x23b62edc60efaa9 +355, 0x6832a366e1e4ea3b +356, 0x46b1b41093ff2b1e +357, 0x55c857128143f219 +358, 0x7fc35ddf5e138200 +359, 0x790abe78be67467e +360, 0xa4446fc08babd466 +361, 0xc23d70327999b855 +362, 0x2e019d1597148196 +363, 0xfefd98e560403ab8 +364, 0xbe5f0a33da330d58 +365, 0x3078a4e9d43ca395 +366, 0x511bfedd6f12f2b3 +367, 0x8bc138e335be987c +368, 0x24640f803465716d +369, 0xf6530b04d0bd618f +370, 0x9b7833e5aa782716 +371, 0x778cd35aea5841b1 +372, 0xecea3c458cefbc60 +373, 0x5107ae83fc527f46 +374, 0x278ad83d44bd2d1a +375, 0x7014a382295aeb16 +376, 0xf326dd762048743f +377, 0x858633d56279e553 +378, 0x76408154085f01bc +379, 0x3e77d3364d02e746 +380, 0x2f26cea26cadd50b +381, 0x6d6846a4ecb84273 +382, 0x4847e96f2df5f76 +383, 0x5a8610f46e13ff61 +384, 0x4e7a7cac403e10dd +385, 0x754bdf2e20c7bc90 +386, 0x8bdd80e6c51bd0be +387, 0x61c655fae2b4bc52 +388, 0x60873ef48e3d2f03 +389, 0x9d7d8d3698a0b4a4 +390, 0xdf48e9c355cd5d4b +391, 0x69ecf03e20be99ac +392, 0xc1a0c5a339bd1815 +393, 0x2e3263a6a3adccb +394, 0x23557459719adbdc +395, 0xd1b709a3b330e5a +396, 0xade5ab00a5d88b9d +397, 0x69a6bd644120cfad +398, 0x40187ecceee92342 +399, 0x1c41964ba1ac78da +400, 0x9ac5c51cbecabe67 +401, 0xbdc075781cf36d55 +402, 0xeaf5a32246ded56 +403, 0xcda0b67e39c0fb71 +404, 0x4839ee456ef7cc95 +405, 0xf17092fdd41d5658 +406, 0x2b5d422e60ae3253 +407, 0x3effe71102008551 +408, 0x20a47108e83934b7 +409, 0xd02da65fe768a88f +410, 0xeb046bd56afa4026 +411, 0x70c0509c08e0fbe0 +412, 0x1d35c38d4f8bac6c +413, 0x9aa8eb6466f392e0 +414, 0x587bd4a430740f30 +415, 0x82978fe4bad4195 +416, 0xdc4ebc4c0feb50ab +417, 0xd3b7164d0240c06f +418, 0x6e2ad6e5a5003a63 +419, 0xa24b430e2ee6b59c +420, 0x2905f49fd5073094 +421, 0x5f209e4de03aa941 +422, 0x57b7da3e0bedb1dc +423, 0x5e054018875b01f5 +424, 0xb2f2da6145658db3 +425, 0xbd9c94a69a8eb651 +426, 0x9c5f9a07cd6ac749 +427, 0x2296c4af4d529c38 +428, 0x522ed800fafdefab +429, 0xe2a447ced0c66791 +430, 0x937f10d45e455fef +431, 0xc882987d9e29a24 +432, 0x4610bfd6a247ee1a +433, 0x562ba3e50870059 +434, 0x59d8d58793602189 +435, 0xfe9a606e3e34abe +436, 0x6825f7932a5e9282 +437, 0xe77f7061bab476ad +438, 0xbf42001da340ace3 +439, 0x9c3e9230f5e47960 +440, 0x2c0f700d96d5ad58 +441, 0x330048b7cd18f1f9 +442, 0xffc08785eca5cca9 +443, 0xb5879046915f07a5 +444, 0xef51fe26f83c988e +445, 0xfa4c2968e7881a9a +446, 0xc0a9744455a4aad +447, 0xbd2ad686d6313928 +448, 0x6b9f0984c127682a +449, 0xc9aaa00a5da59ed8 +450, 0x762a0c4b98980dbf +451, 0x52d1a2393d3ca2d1 +452, 0x1e9308f2861db15c +453, 0xe7b3c74fe4b4a844 +454, 0x485e15704a7fc594 +455, 0x9e7f67ea44c221f6 +456, 0xbab9ad47fde916e0 +457, 0x50e383912b7fc1f4 +458, 0xaad63db8abcef62d +459, 0xc2f0c5699f47f013 +460, 0xee15b36ada826812 +461, 0x2a1b1cf1e1777142 +462, 0x8adb03ede79e937d +463, 0xf14105ef65643bf3 +464, 0x752bbaefc374a3c7 +465, 0xa4980a08a5a21d23 +466, 0x418a1c05194b2db7 +467, 0xdd6ff32efe1c3cd6 +468, 0x272473ed1f0d3aa2 +469, 0x1e7fdebadabe6c06 +470, 0xd1baa90c17b3842f +471, 0xd3d3a778e9c8404a +472, 0x781ae7fda49fa1a0 +473, 0x61c44fdbdacc672d +474, 0x6d447d0a1404f257 +475, 0x9303e8bdfbfb894d +476, 0x3b3482cdec016244 +477, 0xb149bf245d062e7b +478, 0x96f8d54b14cf992d +479, 0x4741549a01f8c3d0 +480, 0x48270811b2992af +481, 0x7b58f175cd25d147 +482, 0x8f19a840b56f4be9 +483, 0x84a77f43c0951a93 +484, 0x34e1a69381f0c374 +485, 0xb158383c9b4040f +486, 0x372f1abc7cf3a9fa +487, 0x5439819a84571763 +488, 0xabf8515e9084e2fa +489, 0xb02312b9387ff99 +490, 0x238a85bb47a68b12 +491, 0x2068cb83857c49bb +492, 0xc6170e743083664c +493, 0x745cf8470bcb8467 +494, 0xe3a759a301670300 +495, 0x292c7686ad3e67da +496, 0x359efedaff192a45 +497, 0x511f2c31a2d8c475 +498, 0x97fd041bf21c20b3 +499, 0x25ef1fe841b7b3f6 +500, 0xbb71739e656f262d +501, 0x2729b0e989b6b7b8 +502, 0xd2142702ec7dbabf +503, 0x7008decd2488ee3f +504, 0x69daa95e303298d7 +505, 0xc35eca4efb8baa5a +506, 0xf3f16d261cec3b6c +507, 0x22371c1d75396bd3 +508, 0x7aefa08eccae857e +509, 0x255b493c5e3c2a2f +510, 0x779474a077d34241 +511, 0x5199c42686bea241 +512, 0x16c83931e293b8d3 +513, 0xa57fe8db8c0302c7 +514, 0xd7ace619e5312eb1 +515, 0x8740f013306d217c +516, 0xb6a1ad5e29f4d453 +517, 0x31abf7c964688597 +518, 0xbc3d791daed71e7 +519, 0x31ee4ca67b7056ed +520, 0x1ab5416bfe290ea3 +521, 0x93db416f6d3b843a +522, 0xed83bbe5b1dd2fed +523, 0xece38271470d9b6d +524, 0x3a620f42663cd8ae +525, 0x50c87e02acafee5d +526, 0xcabeb8bedbc6dab5 +527, 0x2880a6d09970c729 +528, 0x4aba5dd3bfc81bc +529, 0xaba54edf41080cec +530, 0xb86bb916fc85a169 +531, 0x4c41de87bc79d8ca +532, 0xcce2a202622945fe +533, 0x513f086fad94c107 +534, 0x18b3960c11f8cc96 +535, 0x2f0d1cfd1896e236 +536, 0x1702ae3880d79b15 +537, 0x88923749029ae81 +538, 0x84810d4bdec668eb +539, 0xf85b0a123f4fc68d +540, 0x93efd68974b6e4d1 +541, 0x5d16d6d993a071c9 +542, 0x94436858f94ca43b +543, 0xb3dbb9ed0cb180b6 +544, 0x6447030a010b8c99 +545, 0xd7224897c62925d8 +546, 0xb0c13c1d50605d3a +547, 0xdff02c7cb9d45f30 +548, 0xe8103179f983570d +549, 0xbc552037d6d0a24e +550, 0x775e500b01486b0d +551, 0x2050ac632c694dd6 +552, 0x218910387c4d7ae7 +553, 0xf83e8b68ff885d5d +554, 0xe3374ec25fca51a3 +555, 0xfa750ffa3a60f3af +556, 0x29ee40ba6df5592e +557, 0x70e21a68f48260d2 +558, 0x3805ca72cd40886e +559, 0x2f23e73f8eabf062 +560, 0x2296f80cdf6531ae +561, 0x903099ed968db43a +562, 0xf044445cf9f2929f +563, 0xcd47fdc2de1b7a1 +564, 0xaab1cbd4f849da99 +565, 0x5fc990688da01acb +566, 0xa9cee52ea7dab392 +567, 0xecefc3a4349283a8 +568, 0xdd6b572972e3fafc +569, 0xc1f0b1a2ffb155da +570, 0xc30d53fc17bd25c8 +571, 0x8afa89c77834db28 +572, 0x5569a596fb32896c +573, 0x36f207fc8df3e3d4 +574, 0x57c2bd58517d81db +575, 0xb524693e73d0061c +576, 0xb69f6eb233f5c48b +577, 0x4f0fb23cab8dc695 +578, 0x492c1ad0a48df8df +579, 0xf6dcc348ec8dec1f +580, 0xa4d8708d6eb2e262 +581, 0x4c2072c2c9766ff1 +582, 0xa9bf27c4304875f0 +583, 0xfc8fb8066d4f9ae2 +584, 0x188095f6235fec3c +585, 0x1d8227a2938c2864 +586, 0x89ea50c599010378 +587, 0xcac86df0a7c6d56d +588, 0x47a8c5df84c7d78 +589, 0xe607ae24ea228bfa +590, 0x36624a7996efe104 +591, 0x5d72881c1227d810 +592, 0x78694a6750374c8 +593, 0x7b9a217d4ab5ff45 +594, 0xd53e5d6f7504becc +595, 0x197a72d3f4889a0e +596, 0xfdc70c4755a8df36 +597, 0xd0fda83748c77f74 +598, 0x7ddc919ac9d6dcc9 +599, 0x785c810a6a2dc08b +600, 0xba4be83e7e36896c +601, 0x379d6fe80cf2bffe +602, 0x74cae2dabc429206 +603, 0x1efac32d5d34c917 +604, 0x3cb64e2f98d36e70 +605, 0xc0a7c3cdc3c60aa7 +606, 0x699dfadd38790ebe +607, 0x4861e61b3ecfbeac +608, 0x531744826c345baa +609, 0x5ec26427ad450cba +610, 0xf2c1741479abdcae +611, 0xe9328a78b2595458 +612, 0x30cd1bdf087acd7f +613, 0x7491ced4e009adbe +614, 0xdcd942df1e2e7023 +615, 0xfe63f01689fee35 +616, 0x80282dfe5eaedc42 +617, 0x6ecdea86495f8427 +618, 0xe0adfdd5e9ed31c3 +619, 0xf32bd2a7418127e +620, 0x8aabba078db6ee2 +621, 0xa8a8e60499145aca +622, 0xf76b086ac4e8a0f2 +623, 0x6e55b3c452ff27f8 +624, 0xe18fa7cd025a71bf +625, 0xeed7b685fde0fa25 +626, 0xba9b6c95867fa721 +627, 0x4c2603bc69de2df2 +628, 0xaac87eee1b58cd66 +629, 0x3c9af6656e01282c +630, 0x2dfa05ce8ff476b6 +631, 0xeae9143fcf92f23d +632, 0x3f0699f631be3bc8 +633, 0xa0f5f79f2492bd67 +634, 0x59c47722388131ed +635, 0x5f6e9d2941cef1de +636, 0xe9ad915c09788b7b +637, 0x92c6d37e4f9482f5 +638, 0x57d301b7fdadd911 +639, 0x7e952d23d2a8443 +640, 0xbb2fa5e0704b3871 +641, 0xe5642199be36e2d5 +642, 0x5020b60d54358291 +643, 0xa0b6317ec3f60343 +644, 0xb57b08b99540bc5c +645, 0x21f1890adc997a88 +646, 0xfcf824200dd9da2d +647, 0x8146293d83d425d1 +648, 0xdadfbf5fbb99d420 +649, 0x1eb9bbc5e6482b7d +650, 0xd40ff44f1bbd0f1c +651, 0xa9f948ba2d08afa5 +652, 0x638cc07c5301e601 +653, 0x1f984baa606e14e8 +654, 0x44e153671081f398 +655, 0xb17882eeb1d77a5d +656, 0x5fd8dbee995f14c +657, 0xff3533e87f81b7fe +658, 0x2f44124293c49795 +659, 0x3bf6b51e9360248 +660, 0x72d615edf1436371 +661, 0x8fc5cf4a38adab9d +662, 0xfa517e9022078374 +663, 0xf356733f3e26f4d8 +664, 0x20ea099cdc6aad40 +665, 0xe15b977deb37637d +666, 0xcc85601b89dae88d +667, 0x5768c62f8dd4905c +668, 0xa43cc632b4e56ea +669, 0xc4240cf980e82458 +670, 0xb194e8ffb4b3eeb6 +671, 0xee753cf2219c5fa1 +672, 0xfe2500192181d44d +673, 0x2d03d7d6493dd821 +674, 0xff0e787bb98e7f9b +675, 0xa05cf8d3bd810ce7 +676, 0x718d5d6dcbbdcd65 +677, 0x8d0b5343a06931c +678, 0xae3a00a932e7eaf9 +679, 0x7ed3d8f18f983e18 +680, 0x3bb778ee466dc143 +681, 0x711c685c4e9062c0 +682, 0x104c3af5d7ac9834 +683, 0x17bdbb671fb5d5cf +684, 0xabf26caead4d2292 +685, 0xa45f02866467c005 +686, 0xf3769a32dc945d2d +687, 0xe78d0007f6aabb66 +688, 0x34b60be4acbd8d4b +689, 0x58c0b04b69359084 +690, 0x3a8bb354c212b1 +691, 0x6b82a8f3d70058d5 +692, 0x405bdef80a276a4a +693, 0xe20ca40ee9195cad +694, 0xf5dd96ba2446fefd +695, 0xc1e180c55fe55e3c +696, 0xa329caf6daa952b3 +697, 0xb4809dd0c84a6b0a +698, 0xd27f82661070cee7 +699, 0xa7121f15ee2b0d8a +700, 0x4bdaea70d6b34583 +701, 0xe821dc2f310f7a49 +702, 0x4c00a5a68e76f647 +703, 0x331065b064a2d5ea +704, 0xac0c2ce3dc04fa37 +705, 0x56b32b37b8229008 +706, 0xe757cdb51534fcfa +707, 0xd3ff183576b2fad7 +708, 0x179e1f4190f197a7 +709, 0xf874c626a7c9aae5 +710, 0xd58514ffc37c80e4 +711, 0xc65de31d33fa7fd3 +712, 0x6f6637052025769b +713, 0xca1c6bdadb519cc0 +714, 0xd1f3534cde37828a +715, 0xc858c339eee4830a +716, 0x2371eacc215e02f4 +717, 0x84e5022db85bbbe9 +718, 0x5f71c50bba48610e +719, 0xe420192dad9c323f +720, 0x2889342721fca003 +721, 0x83e64f63334f501d +722, 0xac2617172953f2c +723, 0xfa1f78d8433938ff +724, 0x5578382760051462 +725, 0x375d7a2e3b90af16 +726, 0xb93ff44e6c07552d +727, 0xded1d5ad811e818c +728, 0x7cf256b3b29e3a8c +729, 0x78d581b8e7bf95e8 +730, 0x5b69192f2caa6ad3 +731, 0xa9e25855a52de3ce +732, 0x69d8e8fc45cc188d +733, 0x5dd012c139ad347d +734, 0xfcb01c07b77db606 +735, 0x56253e36ab3d1cce +736, 0x1181edbb3ea2192 +737, 0x325bef47ff19a08d +738, 0xd3e231ceb27e5f7 +739, 0x8e819dd2de7956d2 +740, 0x34a9689fe6f84a51 +741, 0x3e4eeb719a9c2927 +742, 0x5c3b3440581d0aaf +743, 0x57caf51897d7c920 +744, 0xec6a458130464b40 +745, 0xe98f044e0da40e9b +746, 0xbe38662020eeb8e7 +747, 0x7b8c407c632724ae +748, 0x16c7cfa97b33a544 +749, 0xd23359e2e978ae5a +750, 0x4fdba458250933dd +751, 0x3c9e0713cfe616ba +752, 0x6f0df87b13163b42 +753, 0xc460902cb852cc97 +754, 0x289df8fefd6b0bce +755, 0x4ac2a2a1c3fb8029 +756, 0x2fc3e24d8b68eef7 +757, 0x34564386a59aab9a +758, 0x31047391ebd67ce4 +759, 0x6c23d070a0564d41 +760, 0xba6387b2b72545f7 +761, 0xcdcf1008058387af +762, 0xc9308fa98db05192 +763, 0xdbdbb5abd01a9d84 +764, 0x937088275c7804ab +765, 0x6f6accfefe34ee81 +766, 0x5c33c74c49cfdb2c +767, 0x5e1a771edfb92bd3 +768, 0x6e89b009069ecae7 +769, 0x34d64e17ec0e8968 +770, 0x841203d0cde0c330 +771, 0x7642cc9d7eb9e9cb +772, 0xca01d2e8c128b97e +773, 0x5b8390617b3304ab +774, 0x52ec4ed10de1eb2d +775, 0xb90f288b9616f237 +776, 0x5bd43cd49617b2e2 +777, 0x1a53e21d25230596 +778, 0x36ccd15207a21cd6 +779, 0xc8263d780618fd3c +780, 0x6eb520598c6ce1cb +781, 0x493c99a3b341564f +782, 0xab999e9c5aa8764f +783, 0xab2fa4ceaba84b +784, 0xbbd2f17e5cb2331b +785, 0xc8b4d377c0cc4e81 +786, 0x31f71a6e165c4b1e +787, 0xd1011e55fb3addaa +788, 0x5f7ec34728dfa59 +789, 0x2aef59e60a84eb0f +790, 0x5dde6f09aec9ad5f +791, 0x968c6cdbc0ef0438 +792, 0x1957133afa15b13a +793, 0xbaf28f27573a64c2 +794, 0xc6f6ddd543ebf862 +795, 0xdd7534315ec9ae1e +796, 0xd2b80cd2758dd3b +797, 0xa38c3da00cc81538 +798, 0x15c95b82d3f9b0f9 +799, 0x6704930287ce2571 +800, 0x9c40cc2f6f4ecb0c +801, 0xc8de91f50b22e94e +802, 0x39272e8fddbfdf0a +803, 0x879e0aa810a117d +804, 0xa312fff4e9e5f3bd +805, 0x10dd747f2835dfec +806, 0xeb8466db7171cdae +807, 0xaa808d87b9ad040a +808, 0xab4d2229a329243a +809, 0x7c622f70d46f789c +810, 0x5d41cef5965b2a8e +811, 0xce97ec4702410d99 +812, 0x5beba2812c91211b +813, 0xf134b46c93a3fec7 +814, 0x76401d5630127226 +815, 0xc55fc9d9eacd4ec1 +816, 0xaec8cefaa12f813f +817, 0x2f845dcfd7b00722 +818, 0x3380ab4c20885921 +819, 0xdb68ad2597691b74 +820, 0x8a7e4951455f563f +821, 0x2372d007ed761c53 +822, 0xcab691907714c4f1 +823, 0x16bc31d6f3abec1a +824, 0x7dff639fbcf1824 +825, 0x6666985fbcff543d +826, 0xb618948e3d8e6d0c +827, 0x77b87837c794e068 +828, 0xcd48288d54fcb5a8 +829, 0x47a773ed6ae30dc3 +830, 0xba85ae44e203c942 +831, 0xa7a7b21791a25b2d +832, 0x4029dd92e63f19e0 +833, 0xc2ad66ab85e7d5aa +834, 0xa0f237c96fdab0db +835, 0xffefb0ab1ca18ed +836, 0x90cb4500785fd7d5 +837, 0xa7dd3120f4876435 +838, 0x53f7872624694300 +839, 0xea111326ff0040d9 +840, 0x5f83cb4cce40c83b +841, 0x918e04936c3b504d +842, 0x87a8db4c0e15e87c +843, 0x7cff39da6a0dedd0 +844, 0x36f7de2037f85381 +845, 0xd1d8d94022a1e9a7 +846, 0x2c9930127dc33ec9 +847, 0x6cb4719dcd0101c6 +848, 0xc01868cde76935f7 +849, 0x6b86f2ec1ab50143 +850, 0x68af607d8d94ae61 +851, 0xe216c5b95feedf34 +852, 0x4b866bd91efe2e4b +853, 0x4bff79df08f92c99 +854, 0x6ff664ea806acfd1 +855, 0x7fce0b3f9ece39bc +856, 0x29bc90b59cb3db97 +857, 0x833c4b419198607d +858, 0xf3573e36ca4d4768 +859, 0x50d71c0a3c2a3fa8 +860, 0xd754591aea2017e7 +861, 0x3f9126f1ee1ebf3 +862, 0xe775d7f4b1e43de8 +863, 0xe93d51628c263060 +864, 0x83e77f6fb32d6d82 +865, 0x43dd7eef823408e4 +866, 0x1c843c2c90180662 +867, 0xe924dafb9a16066b +868, 0x6af3ee96e7b7fbd9 +869, 0x94d5c4f37befcd1f +870, 0x40ffb04bedef4236 +871, 0x71c17bbc20e553e +872, 0x101f7a0a6208729f +873, 0x5ca34570cf923548 +874, 0x8e3139db2e96e814 +875, 0x3ab96d96263d048d +876, 0x97f3c0bbc6755c3c +877, 0x31fc72daedaef3dc +878, 0x71f8d7855d10789b +879, 0xce6dc97b4662333b +880, 0xfddc2aabd342bc61 +881, 0xefbd4007ff8c7d2e +882, 0xf72cd6c689ef8758 +883, 0x932c8b0c0e755137 +884, 0x94cc4dedd58ff69 +885, 0xde4dfd6890535979 +886, 0xdb00dcd2dcb4a50a +887, 0xb0466240b4548107 +888, 0x9cb9264c7b90d1a3 +889, 0x357e378e9be5766b +890, 0x6e0316ef03367bbf +891, 0x201ea18839544ca +892, 0x803ff3406be5f338 +893, 0xf9d5e82fd4144bb2 +894, 0x1b6b88ca701e9f47 +895, 0xd1fe5ab8e1f89cc0 +896, 0x14171fe176c4bece +897, 0x887948bdef78beaa +898, 0x80449ddc3eb9b977 +899, 0x5f4e1f900fb4bcf3 +900, 0xbe30f8701909f8e2 +901, 0xd1f2a2fb5503306d +902, 0x6b1c77238dc23803 +903, 0x102156a6c9860f66 +904, 0x4cd446e099edf4c1 +905, 0xc79ac6cbc911f33b +906, 0x3ee096ffe3384f1c +907, 0xb58f83b18a306dc7 +908, 0x9f76582141de56b2 +909, 0x9ddfa85e02c13866 +910, 0x4d9a19d4ce90a543 +911, 0xbf81ab39fd17d376 +912, 0x5327e5054c6a74f1 +913, 0xd5062dd31db1a9b7 +914, 0x645853735527edc +915, 0x485393967f91af08 +916, 0xeff9667dcf77ca68 +917, 0xd012313f5fbec464 +918, 0xbeae35bdfae55144 +919, 0x302c41ebac8444a0 +920, 0x9ccdb6c2fe58fba8 +921, 0x567753af68ed23f8 +922, 0xff90f790e43efec3 +923, 0x970cc756fb799696 +924, 0xe59239d1c44915 +925, 0x4d2d189fb3941f05 +926, 0x96f23085db165a9c +927, 0xa1202dec7a37b1a5 +928, 0xc0c1ee74bcd7dc1a +929, 0x9edcf2048b30333a +930, 0xd848588ba7e865fb +931, 0x8d9f0897317cab40 +932, 0x67b96f15e25924fb +933, 0xefc8d8536619ee42 +934, 0xf3f621d22bdde0c2 +935, 0x68610a0de862ae32 +936, 0xa22ca5142de24cbd +937, 0x8815452f4e6b4801 +938, 0x4e9c1b607b2750e5 +939, 0x19b3c09ba6fc9b25 +940, 0x9b2543c8836780ac +941, 0xe702b8f950e56431 +942, 0xb357cc329cac3917 +943, 0x387bf86a17a31e08 +944, 0x9940b983d331b163 +945, 0xf5d89d7fe9095e18 +946, 0x4362682329e5c4d1 +947, 0xd2132573f6ae7b42 +948, 0xc0a5849e23a61606 +949, 0xdadbddf47265bc02 +950, 0x1b96f00339a705f7 +951, 0x94e6642329288913 +952, 0x825ab3f10e6d330b +953, 0x1a1c31ac9d883ea0 +954, 0xb49076b7155c6f47 +955, 0x920cf3085dfe3ccb +956, 0x9743407c9f28e825 +957, 0x6ce8a28622402719 +958, 0xce2fe67e06baf8a6 +959, 0x3a16b34784ecf5e6 +960, 0x140467cc1d162a0c +961, 0x32d4772692ab625 +962, 0xa4f4b28562f43336 +963, 0x885b4335457bd84a +964, 0x499d3ed26c87ad8a +965, 0xc7328bcedb9a545e +966, 0xc6dd76a6cbf5d2b2 +967, 0xba9c22be404ee1aa +968, 0x70e6aee45f23521d +969, 0x61e03a798593c177 +970, 0x171671f809c68213 +971, 0x28d54872fc1d914c +972, 0x43c2fcd9bd098b53 +973, 0x172ad4c4a98b9d37 +974, 0x330860c9460f2516 +975, 0x49547f472df984f4 +976, 0x873b2436d3f0e114 +977, 0x6f99accf4ea050b6 +978, 0x5968ac874ed51613 +979, 0x4939d70d29a3c611 +980, 0x11f381ed28738d3d +981, 0xa97430d36ab3a869 +982, 0xe6fa880801129e22 +983, 0xf84decbd8f48c913 +984, 0x4425c0ed1e9a82a5 +985, 0x7a1f9485e9929d5a +986, 0xc7c51f155dfce1c6 +987, 0x9619a39501d74f2b +988, 0x7c7035955dbf4c1b +989, 0xc61ee569cf57c2c9 +990, 0x3eaf7c5b0df734e1 +991, 0xe71cb4064d1ede05 +992, 0x356e3cec80e418b2 +993, 0xca04306243a15be6 +994, 0x941cf3881fa18896 +995, 0x30dbb0e819d644e0 +996, 0xaae22c0bef02859a +997, 0x7bd30917bbaa8a94 +998, 0x2672547bc8d7d329 +999, 0x4955c92aaa231578 diff --git a/python/numpy/random/tests/data/pcg64dxsm-testset-2.csv b/python/numpy/random/tests/data/pcg64dxsm-testset-2.csv new file mode 100644 index 000000000..878c5ea7c --- /dev/null +++ b/python/numpy/random/tests/data/pcg64dxsm-testset-2.csv @@ -0,0 +1,1001 @@ +seed, 0x0 +0, 0xd97e4a147f788a70 +1, 0x8dfa7bce56e3a253 +2, 0x13556ed9f53d3c10 +3, 0x55dbf1c241341e98 +4, 0xa2cd98f722eb0e0a +5, 0x83dfc407203ade8 +6, 0xeaa083df518f030d +7, 0x44968c87e432852b +8, 0x573107b9cb8d9ecc +9, 0x9eedd1da50b9daca +10, 0xb33a6735ca451e3c +11, 0x72830d2b39677262 +12, 0x9da8c512fd0207e8 +13, 0x1fc5c91954a2672b +14, 0xd33479437116e08 +15, 0x9ccdd9390cee46f3 +16, 0x1fd39bb01acd9e76 +17, 0xedc1869a42ff7fe5 +18, 0xbd68ca0b42a6e7e9 +19, 0x620b67df09621b1f +20, 0xfa11d51bd6950221 +21, 0xc8c45b36e7d28d08 +22, 0xe9c91272fbaad777 +23, 0x2dc87a143f220e90 +24, 0x6376a7c82361f49d +25, 0x552c5e434232fe75 +26, 0x468f7f872ac195bc +27, 0x32bed6858125cf89 +28, 0xe4f06111494d09d3 +29, 0xa5c166ffea248b80 +30, 0x4e26605b97064a3f +31, 0xceafd9f6fc5569d +32, 0xb772f2f9eed9e106 +33, 0x672c65e6a93534e2 +34, 0xcdc5e1a28d1bd6a0 +35, 0x1ed9c96daeebd3e3 +36, 0x4d189dcfc0c93c3f +37, 0x50df5a95c62f4b43 +38, 0xcccf4949fa65bbb8 +39, 0x19b8073d53cdc984 +40, 0x6fb40bba35483703 +41, 0xb02de4aef86b515a +42, 0x4d90c63655350310 +43, 0xea44e4089825b16c +44, 0x8d676958b1f9da2b +45, 0x6d313940917ae195 +46, 0x1b1d35a4c1dd19f4 +47, 0x117720f8397337ef +48, 0xcc073cf3ac11eeaa +49, 0x8331ec58a9ff8acb +50, 0xf3dc2a308b6b866f +51, 0x7eba1202663382b6 +52, 0x8269839debeb4e5a +53, 0x87fd3dc0f9181a8e +54, 0xabe62ddd3c925f03 +55, 0x7f56f146944fe8d4 +56, 0xc535972150852068 +57, 0x60b252d453bd3a68 +58, 0x4251f0134634490a +59, 0x338950da210dfeb2 +60, 0xcadfe932971c9471 +61, 0xfb7049457fab470e +62, 0x9bfb8145a4459dff +63, 0x4a89dda3898f9d8a +64, 0x88cc560151483929 +65, 0x277dc820f4b6796e +66, 0x3524bd07ea0afb88 +67, 0x92eb6ffb2bf14311 +68, 0xf6559be0783f3fe9 +69, 0xf0844f9af54af00d +70, 0xdd5e0b59adcef8a +71, 0x4ff7e4f2ab18554c +72, 0x3fa22c8a02634587 +73, 0x1db8e1a9442fe300 +74, 0x40cf15953ad3d3e7 +75, 0x92af15fe1a9f6f0a +76, 0xab4a0e466fb0cfd +77, 0x944f1555a06cca82 +78, 0x10cf48412f1f6066 +79, 0x7f51f9a455f9e8e1 +80, 0x47ee93530f024c7e +81, 0x36cf2f0413e0f6f2 +82, 0xa315e23731969407 +83, 0xd8e2796327cf5f87 +84, 0xa86072696a555c34 +85, 0xee3f0b8804feaab7 +86, 0x41e80dc858f8360b +87, 0x31ec2e9b78f5b29 +88, 0xd397fb9b8561344c +89, 0x28081e724e649b74 +90, 0x5c135fc3fc672348 +91, 0x9a276ca70ce9caa0 +92, 0x9216da059229050a +93, 0xcf7d375ed68007b0 +94, 0xa68ad1963724a770 +95, 0xd4350de8d3b6787c +96, 0xee7d2c2cc275b6d2 +97, 0x71645ec738749735 +98, 0x45abdf8c68d33dbb +99, 0xe71cadb692c705ea +100, 0x60af6f061fd90622 +101, 0x1eabe2072632c99d +102, 0x947dda995a402cb6 +103, 0xbb19f49a3454f3b +104, 0xe6e43e907407758c +105, 0xfe2b67016bd6873a +106, 0x7fdb4dd8ab30a722 +107, 0x39d3265b0ff1a45b +108, 0xed24c0e4fce8d0c2 +109, 0xf6e074f86faf669d +110, 0x9142040df8dc2a79 +111, 0x9682ab16bc939a9c +112, 0x6a4e80c378d971c8 +113, 0x31309c2c7fc2d3d6 +114, 0xb7237ec682993339 +115, 0x6a30c06bb83dccd9 +116, 0x21c8e9b6d8e7c382 +117, 0x258a24ae6f086a19 +118, 0xb76edb5be7df5c35 +119, 0x3c11d7d5c16e7175 +120, 0xbdfc34c31eff66e1 +121, 0x8af66e44be8bf3a2 +122, 0x3053292e193dec28 +123, 0xd0cc44545b454995 +124, 0x408ac01a9289d56 +125, 0x4e02d34318ec2e85 +126, 0x9413ff3777c6eb6b +127, 0xa3a301f8e37eb3df +128, 0x14e6306bd8d8f9f9 +129, 0xd3ea06ce16c4a653 +130, 0x170abe5429122982 +131, 0x7f9e6fddc6cacb85 +132, 0xa41b93e10a10a4c8 +133, 0x239216f9d5b6d0b5 +134, 0x985fcb6cb4190d98 +135, 0xb45e3e7c68f480c6 +136, 0xc1b2fc2e0446211c +137, 0x4596adb28858c498 +138, 0x2dd706f3458ddc75 +139, 0x29c988c86f75464 +140, 0xac33a65aa679a60 +141, 0xa28fef762d39d938 +142, 0x541e6fa48647f53 +143, 0x27838d56b2649735 +144, 0x8e143d318a796212 +145, 0xaea6097745f586b8 +146, 0x636143330f8ee2e6 +147, 0xc2d05fd8b945b172 +148, 0x6e355f9eb4353055 +149, 0xeb64ca42e8bf282e +150, 0xe8202dfd9da0fe5 +151, 0x7305689c9d790cba +152, 0xf122f8b1bef32970 +153, 0x9562887e38c32ba5 +154, 0xf9cd9be121b738d +155, 0x6238e0c398307913 +156, 0x5f2e79bb07c30f47 +157, 0x8ce8e45c465006e +158, 0x39281fe1e99e2441 +159, 0xafb10c2ca2874fea +160, 0x6e52f91633f83cf +161, 0x8ff12c1ac73c4494 +162, 0xe48608a09365af59 +163, 0xefd9bbc7e76e6a33 +164, 0xbe16a39d5c38ec92 +165, 0x6a6ffbcaf5a2330f +166, 0xdd5d6ac7d998d43d +167, 0x207bf978226d4f11 +168, 0xf8eec56bd2a0f62e +169, 0xa5bccf05dce0d975 +170, 0x93cf3ec1afe457a6 +171, 0x38651466d201f736 +172, 0x3ad21473985c9184 +173, 0xc6407a3bd38c92a6 +174, 0xb1ec42c7afa90a25 +175, 0xbdeca984df8b7dd3 +176, 0xb6926b1d00aa6c55 +177, 0x86141d0022352d49 +178, 0x169316256135ee09 +179, 0xffb1c7767af02a5c +180, 0x502af38ad19f5c91 +181, 0xfbf6cbc080086658 +182, 0x33cf9b219edae501 +183, 0x46e69bebd77b8862 +184, 0xf11e0cc91125d041 +185, 0xb4cd1649f85e078f +186, 0xb49be408db4e952 +187, 0xb0b8db46140cce3c +188, 0xba647f2174012be7 +189, 0x4f0a09e406970ac9 +190, 0xf868c7aec9890a5c +191, 0xde4c8fa7498ea090 +192, 0x872ceb197978c1d4 +193, 0x1eb5cd9c3269b258 +194, 0x3ea189f91724f014 +195, 0x41379656f7746f2c +196, 0x7bd18493aca60e51 +197, 0x5380c23b0cbbf15e +198, 0x920b72835f88246b +199, 0x24d7f734a4548b8e +200, 0x9944edb57e5aa145 +201, 0x4628e136ebb8afe1 +202, 0xb4ee6a776356e2a7 +203, 0x481cbe9744ccf7d7 +204, 0x7e8d67e8b0b995d9 +205, 0xeeacde100af7b47e +206, 0x103da08f2487dab7 +207, 0x6b9890a91d831459 +208, 0xd0c5beae37b572c7 +209, 0xfdccc371ee73fcc +210, 0x65438f0a367a2003 +211, 0x5d23b2c818a7e943 +212, 0x9a8ed45ac04b58b3 +213, 0xdaf3c3f1695dce10 +214, 0x5960eec706fa2bc0 +215, 0x98ca652facb80d40 +216, 0x72970ae5e2194143 +217, 0x18c6374d878c5c94 +218, 0x20fa51f997381900 +219, 0x3af253dba26d6e1d +220, 0x1b23d65db15c7f78 +221, 0x9f53ae976259b0e3 +222, 0x9a6addb28dc92d49 +223, 0x1e085c4accd0a7d7 +224, 0xe9d3f4cc9bad6ce5 +225, 0xe018fad78b5b1059 +226, 0x5ef7682232b4b95 +227, 0xb2242aa649f5de80 +228, 0x8f3e6d8dd99b9e4e +229, 0xb9be6cc22949d62a +230, 0xecbdc7beaa5ff1fe +231, 0xd388db43a855bdf0 +232, 0xd71ee3238852568d +233, 0x85ab3056304c04b5 +234, 0x2ed7ae7ad3cfc3cb +235, 0x781d1b03d40b6c48 +236, 0x7d3c740886657e6d +237, 0x982cfa6828daa6b0 +238, 0x278579599c529464 +239, 0x773adecfae9f0e08 +240, 0x63a243ea4b85c5d7 +241, 0x59940074fc3709e1 +242, 0xc914a2eed58a6363 +243, 0x2602b04274dd724c +244, 0xdf636eb7636c2c42 +245, 0x891a334d0d26c547 +246, 0xde8cd586d499e22d +247, 0x3ea1aa4d9b7035b6 +248, 0xd085cff6f9501523 +249, 0xe82a872f374959e +250, 0x55cb495bbd42cc53 +251, 0x5f42b3226e56ca97 +252, 0xea463f6f203493a3 +253, 0xeef3718e57731737 +254, 0x1bd4f9d62b7f9f3c +255, 0x19284f5e74817511 +256, 0xaf6e842c7450ca87 +257, 0x1d27d2b08a6b3600 +258, 0xfb4b912b396a52e3 +259, 0x30804d4c5c710121 +260, 0x4907e82564e36338 +261, 0x6441cf3b2900ddb7 +262, 0xd76de6f51988dc66 +263, 0x4f298ef96fd5e6d2 +264, 0x65432960c009f83d +265, 0x65ebed07e1d2e3df +266, 0xf83ee8078febca20 +267, 0x7bb18e9d74fc5b29 +268, 0x597b5fbc2261d91 +269, 0xea4f8ed0732b15b2 +270, 0xba2267f74f458268 +271, 0x3f304acabd746bbb +272, 0x7bd187af85659a82 +273, 0x88e20dbdb7a08ea3 +274, 0x2a2dc948c772fcb4 +275, 0x87784fec2993c867 +276, 0x89163933cd362d4e +277, 0xfd7b24f04302f957 +278, 0x9bdd544405dfb153 +279, 0xddee0fac58ffc611 +280, 0xa8e8993417e71ec1 +281, 0x55e0ab46ff7757af +282, 0x53e7645f08d3d7df +283, 0xbf78e563bc656ba2 +284, 0x1d162253b45ee2de +285, 0x15e2bfefedf29eb4 +286, 0x4e2a4584aa394702 +287, 0xa89fb12b01525897 +288, 0x825bd98f0544e4df +289, 0xfc6c50da6750700 +290, 0xc24aaabde7d28423 +291, 0x79d6f4660fcb19e5 +292, 0xee7d4fb40c8d659f +293, 0x70bc281b462e811d +294, 0x23ed4dc9636519a7 +295, 0xcb7c3f5a5711b935 +296, 0xe73090e0508c5d9d +297, 0xb25a331f375952a6 +298, 0xa64c86e0c04740f6 +299, 0xb8f3ffc8d56ac124 +300, 0x2479266fc5ee6b15 +301, 0x8d5792d27f5ffbcb +302, 0xb064298be946cd52 +303, 0xf0934a98912ffe26 +304, 0xbe805682c6634d98 +305, 0xe0e6e2c010012b4f +306, 0x58c47d475f75976 +307, 0x358c9a6e646b2b4a +308, 0x7e7c4ffca5b17ba7 +309, 0x43585c8c9a24a04c +310, 0x5154ddbcd68d5c2c +311, 0x4a2b062d3742a5e +312, 0xca5691191da2b946 +313, 0x696a542109457466 +314, 0x9eb5d658a5022ba5 +315, 0x8158cf6b599ab8dc +316, 0x1b95391eaa4af4a6 +317, 0x9953e79bd0fc3107 +318, 0x8639690086748123 +319, 0x2d35781c287c6842 +320, 0x393ef0001cd7bc8f +321, 0xe3a61be8c5f2c22a +322, 0x5e4ff21b847cc29b +323, 0x4c9c9389a370eb84 +324, 0xd43a25a8fc3635fa +325, 0xf6790e4a85385508 +326, 0x37edf0c81cb95e1d +327, 0x52db00d6e6e79af8 +328, 0x3b202bceeb7f096 +329, 0x2a164a1c776136bb +330, 0x73e03ee3fd80fd1b +331, 0xd2c58c0746b8d858 +332, 0x2ed2cb0038153d22 +333, 0x98996d0fc8ceeacc +334, 0xa4ed0589936b37f +335, 0x5f61cf41a6d2c172 +336, 0xa6d4afb538c110d7 +337, 0xe85834541baadf1a +338, 0x4c8967107fd49212 +339, 0x49bafb762ab1a8c1 +340, 0x45d540e2a834bf17 +341, 0x1c0ec8b4ed671dac +342, 0x3d503ce2c83fe883 +343, 0x437bfffd95f42022 +344, 0xc82d1e3d5c2bc8d2 +345, 0x7a0a9cbfcb0d3f24 +346, 0xc0a4f00251b7a3be +347, 0xb5be24e74bb6a1c6 +348, 0xa3104b94b57545b1 +349, 0x86de7d0c4b97b361 +350, 0x879c1483f26538a6 +351, 0xd74c87557f6accfb +352, 0x2f9be40dbf0fe8a1 +353, 0x445a93398f608d89 +354, 0x7b3cb8a7211d7fdc +355, 0xe86cc51290d031e7 +356, 0x33ef3594052ad79f +357, 0xc61911d241dbb590 +358, 0x37cccb0c0e3de461 +359, 0xb75259124080b48b +360, 0xd81e8961beb4abe5 +361, 0xf4542deb84a754e +362, 0x6ea036d00385f02e +363, 0xa7b60b0ac3b88681 +364, 0x108a6c36ca30baf5 +365, 0x4a2adc5bbfe2bf07 +366, 0x4079501f892a5342 +367, 0x55e113963c5448f0 +368, 0x8019ff4903b37242 +369, 0x109c6dcdb7ec6618 +370, 0x1239ac50944da450 +371, 0xe1399c7f94c651c1 +372, 0x5a6bbbae388d365a +373, 0x4d72be57b8810929 +374, 0x3f067df24384e1fb +375, 0x4f8b9e0f7f6c7be +376, 0x202492c342a3b08 +377, 0x250753192af93a3 +378, 0xfba1159d9de2cb8e +379, 0xba964497ab05505c +380, 0x1329ec5d8a709dca +381, 0x32927cacb6cd22bb +382, 0x6b4d7db904187d56 +383, 0xe76adccf8e841e02 +384, 0x8c4bf4b6a788202 +385, 0x3013a3b409831651 +386, 0x7427d125c475412f +387, 0x84dcc4bb2bf43202 +388, 0x117526f1101372a5 +389, 0xfe95d64b8984bd72 +390, 0x524e129934cc55c1 +391, 0xc3db4b0418c36d30 +392, 0xe1cb2047e9c19f7a +393, 0xea43d6c8d8982795 +394, 0xe80ac8a37df89ed +395, 0xfecc2104329ed306 +396, 0xa5c38aac9c1d51ea +397, 0x3abe5d1c01e4fe17 +398, 0x717a805d97fcc7ac +399, 0x94441f8207a1fb78 +400, 0x22d7869c5f002607 +401, 0x349e899f28c3a1b9 +402, 0x5639950cdea92b75 +403, 0x7e08450497c375b +404, 0x94bf898b475d211d +405, 0x75c761a402375104 +406, 0x1930920ec9d2a1e7 +407, 0xb774ba1bc6f6e4e2 +408, 0xf715602412e5d900 +409, 0x87bb995f4a13f0ba +410, 0xa3c787868dfa9c8d +411, 0xa17fd42a5a4f0987 +412, 0x4a9f7d435242b86 +413, 0x240364aff88f8aef +414, 0xe7cd4cf4bf39f144 +415, 0xd030f313ca4c2692 +416, 0xc46696f4e03ec1e9 +417, 0x22c60f1ec21060b3 +418, 0x16c88058fd68986f +419, 0x69ca448e8e6bde3f +420, 0x3466c2cdec218abd +421, 0x837ac4d05e6b117d +422, 0x911210e154690191 +423, 0x9ece851d6fa358b7 +424, 0x42f79cb0c45e7897 +425, 0xbf7583babd7c499b +426, 0x2059fe8031c6e0b9 +427, 0xabbec8fc00f7e51d +428, 0x88809d86a3a256e1 +429, 0xd36056df829fdcb5 +430, 0x515632b6cb914c64 +431, 0xba76d06c2558874 +432, 0x632c54ca4214d253 +433, 0xadec487adf2cb215 +434, 0x521e663e1940513d +435, 0xb1b638b548806694 +436, 0xbe2d5bfbe57d2c72 +437, 0x8b89e7719db02f7 +438, 0x90ba5281c1d56e63 +439, 0x899e1b92fceea102 +440, 0xf90d918e15182fa6 +441, 0x94a489ce96c948c4 +442, 0xad34db453517fcd4 +443, 0xc5264eb2de15930f +444, 0x101b4e6603a21cee +445, 0xef9b6258d6e85fff +446, 0x6075c7d6c048bd7a +447, 0x6f03232c64e438aa +448, 0x18c983d7105ee469 +449, 0x3ffc23f5c1375879 +450, 0xbc1b4a00afb1f9f +451, 0x5afa6b2bb8c6b46e +452, 0xe7fce4af2f2c152a +453, 0x5b00ab5c4b3982c7 +454, 0x2d4b0c9c0eb4bd0c +455, 0x61d926270642f1f2 +456, 0x7219c485c23a2377 +457, 0x7e471c752fecd895 +458, 0x23c4d30a4d17ba1f +459, 0x65cb277fe565ca22 +460, 0xcbb56ed9c701363b +461, 0xfd04ab3a6eba8282 +462, 0x19c9e5c8bab38500 +463, 0xea4c15227676b65b +464, 0x20f3412606c8da6f +465, 0xb06782d3bf61a239 +466, 0xf96e02d5276a9a31 +467, 0x835d256b42aa52a6 +468, 0x25b09151747f39c1 +469, 0x64507386e1103eda +470, 0x51cbc05716ef88e4 +471, 0x998cd9b7989e81cc +472, 0x9d7115416bec28d1 +473, 0xc992ca39de97906b +474, 0xd571e6f7ca598214 +475, 0xafc7fb6ccd9abbf8 +476, 0x88ef456febff7bf4 +477, 0xdbe87ccc55b157d2 +478, 0xaab95e405f8a4f6d +479, 0xad586a385e74af4f +480, 0x23cd15225c8485aa +481, 0x370940bf47900ac7 +482, 0xefd6afda1a4b0ead +483, 0x9cb1a4c90993dd7a +484, 0xff7893e8b2f70b11 +485, 0xb09e1807c0638e8e +486, 0xb10915dcb4978f74 +487, 0x88212ab0051a85eb +488, 0x7af41b76e1ec793f +489, 0x2e5c486406d3fefd +490, 0xebe54eff67f513cc +491, 0xab6c90d0876a79b8 +492, 0x224df82f93fe9089 +493, 0xc51c1ce053dc9cd2 +494, 0x5ef35a4d8a633ee7 +495, 0x4aca033459c2585f +496, 0xd066932c6eefb23d +497, 0x5309768aab9a7591 +498, 0xa2a3e33823df37f9 +499, 0xcec77ff6a359ee9 +500, 0x784dc62d999d3483 +501, 0x84e789fb8acc985d +502, 0xd590237e86aa60f +503, 0x737e2ffe1c8ad600 +504, 0xc019c3a39a99eab8 +505, 0x6a39e9836964c516 +506, 0xe0fe43129535d9da +507, 0xdfc5f603d639d4de +508, 0x7b9a7d048a9c03b6 +509, 0xbb5aa520faa27fdd +510, 0x2a09b4200f398fa2 +511, 0x38cc88107904064e +512, 0xa9a90d0b2d92bb25 +513, 0x9419762f87e987e3 +514, 0x1a52c525153dedcd +515, 0xc26d9973dd65ae99 +516, 0x8e89bd9d0dc6e6a1 +517, 0x2f30868dc01bfb53 +518, 0x20f09d99b46501c4 +519, 0x78b468a563b8f1e9 +520, 0xcccf34b0b6c380c7 +521, 0xf554e7dc815297e6 +522, 0x332a585cfb4a50ef +523, 0xa9fb64a2b6da41d7 +524, 0xdcd2a5a337391ce0 +525, 0x8a9bd3e324c6463d +526, 0x9f4487d725503bdd +527, 0xf72282d82f1d0ff +528, 0x308f4160abb72d42 +529, 0x648de1db3a601b08 +530, 0x36cab5192e7ebd39 +531, 0x7975fbe4ab6a1c66 +532, 0xd515b4d72243864e +533, 0x43a568f8b915e895 +534, 0x15fa9f2057bdb91d +535, 0x7a43858ef7a222dc +536, 0x17b4a9175ac074fe +537, 0xa932c833b8d0f8f8 +538, 0x1d2db93a9a587678 +539, 0x98abd1d146124d27 +540, 0xf0ab0431671740aa +541, 0xa9d182467540ad33 +542, 0x41c8a6cfc331b7fc +543, 0xa52c6bd0fcd1d228 +544, 0x2773c29a34dc6fa3 +545, 0x3098230746fc1f37 +546, 0xd63311bb4f23fabe +547, 0x6712bf530cd2faec +548, 0x342e8f342e42c4dd +549, 0xfbd83331851cdcad +550, 0xe903be1361bbc34d +551, 0xd94372e5077e3ef9 +552, 0x95aaa234f194bd8 +553, 0x20c0c8fb11e27538 +554, 0xfaf47dc90462b30b +555, 0x8ddc6d144147682a +556, 0xf626833fd926af55 +557, 0x5df93c34290d1793 +558, 0xb06a903e6e9fca5e +559, 0x10c792dc851d77ca +560, 0xd9b1b817b18e56cb +561, 0x3a81730c408eb408 +562, 0x65052c04a8d4b63c +563, 0x3328546598e33742 +564, 0xeca44a13f62d156d +565, 0x69f83d1d86b20170 +566, 0x937764200412027d +567, 0xc57eb1b58df0f191 +568, 0xa1c7d67dce81bc41 +569, 0x8e709c59a6a579ce +570, 0x776a2f5155d46c70 +571, 0xd92906fbbc373aa5 +572, 0xe97ad478a2a98bf6 +573, 0xc296c8819ac815f +574, 0x613ede67ba70e93e +575, 0xe145222498f99cde +576, 0xafcdfa7a3c1cf9bf +577, 0x1c89252176db670d +578, 0xad245eda5c0865ff +579, 0x249463d3053eb917 +580, 0xc9be16d337517c0b +581, 0xefcc82bf67b8f731 +582, 0x1e01577d029e0d00 +583, 0xad9c24b2a4f3d418 +584, 0xed2cceb510db4d0f +585, 0xbddadcdb92400c70 +586, 0x67d6b0476ef82186 +587, 0xbc7662ff7bf19f73 +588, 0x9d94452a729e6e92 +589, 0x6b278d8594f55428 +590, 0x6c4b31cceb1b2109 +591, 0xccc6c3a726701e9 +592, 0x6bc28ece07df8925 +593, 0xc0422b7bf150ccc4 +594, 0xab7158f044e73479 +595, 0xdf3347546d9ed83f +596, 0x3b3235a02c70dff4 +597, 0x2551c49c14ea8d77 +598, 0xee2f7f5bb3cc228e +599, 0x39b87bfe8c882d39 +600, 0x7dd420fad380b51c +601, 0xffe64976af093f96 +602, 0x4a4f48dc6e7eaa5f +603, 0x85f2514d32fdc8cc +604, 0x1ab1215fd7f94801 +605, 0x4cd1200fc795b774 +606, 0xcf8af463a38942ee +607, 0x319caa7ce3022721 +608, 0x8cd9798a76d1aea4 +609, 0x2bd3933ac7afd34e +610, 0x85d4c323403cf811 +611, 0xd7b956d3064efa30 +612, 0x67a078dbf1f13068 +613, 0x665fa6c83e87c290 +614, 0x9333ac2416d2469b +615, 0xdfb1fd21a0094977 +616, 0xa1962a6e2c25f8ff +617, 0x1f3b10a7ed5287cf +618, 0x70641efb3d362713 +619, 0xe527a2cf85d00918 +620, 0x9741e45d3f9890a3 +621, 0x6cb74b5d4d36db4b +622, 0xf24734d622bd2209 +623, 0xadd6d94f78e9d378 +624, 0xc3bbdb59225cca7f +625, 0x5ad36614275b30cd +626, 0x495568dd74eea434 +627, 0xf35de47e0ffe1f2d +628, 0xefa209dca719ab18 +629, 0x844ddcaeb5b99ae8 +630, 0x37449670a1dc7b19 +631, 0x5a4612c166f845c1 +632, 0xe70f7782f2087947 +633, 0x98d484deac365721 +634, 0x705302198cf52457 +635, 0x7135ae0f5b77df41 +636, 0x342ac6e44a9b6fc3 +637, 0x2713fd2a59af5826 +638, 0x6e1a3f90f84efa75 +639, 0x9fb3b4dd446ca040 +640, 0x530044ae91e6bd49 +641, 0xe984c4183974dc3e +642, 0x40c1fa961997d066 +643, 0xb7868250d8c21559 +644, 0x8bc929fa085fd1de +645, 0x7bdb63288dc8733e +646, 0xac4faad24326a468 +647, 0x1c6e799833aea0b1 +648, 0xcc8a749e94f20f36 +649, 0x4e7abfd0443547c5 +650, 0xb661c73bb8caa358 +651, 0x4a800f5728ff2351 +652, 0x8c15e15189b9f7ed +653, 0xab367846b811362c +654, 0x4ba7508f0851ca2a +655, 0xe9af891acbafc356 +656, 0xbdebe183989601f8 +657, 0x4c665ea496afc061 +658, 0x3ca1d14a5f2ed7c +659, 0xfbdff10a1027dd21 +660, 0xdfd28f77c8cff968 +661, 0xc4fbaadf8a3e9c77 +662, 0xdac7e448b218c589 +663, 0xb26390b5befd19e2 +664, 0xd2ef14916c66dba9 +665, 0xfab600284b0ff86b +666, 0xf04a1c229b58dabb +667, 0xc21c45637e452476 +668, 0xd1435966f75e0791 +669, 0xc1f28522eda4a2d0 +670, 0x52332ae8f1222185 +671, 0x81c6c0790c0bf47e +672, 0xfebd215e7d8ffb86 +673, 0x68c5dce55dbe962b +674, 0x231d09cb0d2531d1 +675, 0x3218fba199dbbc6b +676, 0x8f23c535f8ea0bf6 +677, 0x6c228963e1df8bd9 +678, 0x9843c7722ed153e3 +679, 0xd032d99e419bddec +680, 0xe2dca88aa7814cab +681, 0x4d53fb8c6a59cdc2 +682, 0x8fb3abc46157b68b +683, 0xa3e733087e09b8e +684, 0x6bdc1aee029d6b96 +685, 0x4089667a8906d65b +686, 0x8f3026a52d39dd03 +687, 0x6d2e0ccb567bae84 +688, 0x74bad450199e464 +689, 0xf114fb68a8f300d5 +690, 0xc7a5cc7b374c7d10 +691, 0xf0e93da639b279d1 +692, 0xb9943841ad493166 +693, 0x77a69290455a3664 +694, 0x41530da2ebea054b +695, 0xe8f9fab03ea24abf +696, 0xaa931f0c9f55a57a +697, 0xb4d68a75d56f97ae +698, 0x3d58ff898b6ba297 +699, 0x49d81e08faf5a3f5 +700, 0xfc5207b9f3697f3b +701, 0xa25911abb3cf19b7 +702, 0x6b8908eb67c3a41 +703, 0xd63ef402e2e3fa33 +704, 0x728e75d3f33b14c5 +705, 0x248cb1b8bc6f379a +706, 0x3aa3d6d2b8c72996 +707, 0x49cc50bd2d3d2860 +708, 0xb4e1387647c72075 +709, 0x435a1630a4a81ed3 +710, 0xa5ea13005d2460cf +711, 0xc7a613df37d159ec +712, 0x95721ccc218b857e +713, 0xd4b70d8c86b124d3 +714, 0x2b82bcc4b612d494 +715, 0xaf13062885276050 +716, 0xcbd8fcf571a33d9c +717, 0x3f7f67ca1125fc15 +718, 0xddf4bb45aac81b4c +719, 0x23606da62de9c040 +720, 0xa3a172375666b636 +721, 0x292f87387a6c6c3c +722, 0xd1d10d00c5496fe1 +723, 0x86b0411ce8a25550 +724, 0x38e0487872e33976 +725, 0x363e49f88ddfd42c +726, 0x45bdf1e9f6b66b0a +727, 0x8a6fff3de394f9b5 +728, 0x8502158bb03f6209 +729, 0x22e24d16dba42907 +730, 0x3fe3ba427cc2b779 +731, 0x77144793f66b3d7e +732, 0xcf8912ccb29b8af9 +733, 0xdc856caff2abd670 +734, 0xe6d3ae0b0d9d4c8b +735, 0xb8f5d40e454c539f +736, 0x79ca953114fbc6b7 +737, 0x478d6f4bbfa38837 +738, 0x9babae1a3ffdc340 +739, 0x40edd56802bae613 +740, 0x97a56c2dcccf0641 +741, 0xafc250257f027f8e +742, 0x8da41ef1edf69125 +743, 0x6574b0280ff9d309 +744, 0x197c776151b8f820 +745, 0x6b03e077c9dac3b6 +746, 0x24a40ebbc5c341c5 +747, 0x50e585169a6a1c4b +748, 0x37783a5a6a3e4e02 +749, 0xb3de81ee6fbad647 +750, 0xf4f292f57ca4591e +751, 0x6214e9e7d44d30a +752, 0x5920190c56d21c12 +753, 0x9ac163419b5e0c9b +754, 0xfc2328761ae8ed93 +755, 0xc68f945b545508c6 +756, 0x687c49a17ce0a5e2 +757, 0x276d8f53d30d4ab4 +758, 0x8201804970343ce1 +759, 0x1b5d323cc2e7fb7e +760, 0x6f351ef04fd904b +761, 0x6c793a7d455d5198 +762, 0x46f5d108430ae91f +763, 0xac16a15b2a0cf77f +764, 0xa0d479d9e4122b9d +765, 0x3afd94604307f19 +766, 0x2573ed6d39d38dbf +767, 0xa58e14ba60b4294b +768, 0xe69c1aed5840d156 +769, 0x4cf6fda7f04855c2 +770, 0x2fb65a56ef5f22da +771, 0xf95819434d5dc220 +772, 0x29c65133623dafba +773, 0x8e997bd018467523 +774, 0xfd08ba9d498461a7 +775, 0xdd52243bc78a5592 +776, 0x39c30108f6db88b3 +777, 0x38af8e1894f259b9 +778, 0x97eedf3b4ae5f6de +779, 0x757825add80c5ece +780, 0xf0fdd90ac14edb14 +781, 0xbbb19d4cc8cac6d4 +782, 0x9a82234edfae05e3 +783, 0x704401c61d1edf1c +784, 0x8b0eb481fb3a1fb2 +785, 0xef6f36e7cc06c002 +786, 0x7a208b17e04b8cd7 +787, 0xf20e33d498838fe9 +788, 0xc2bdb22117058326 +789, 0x6ec31939eb4ca543 +790, 0x6f1654838f507a21 +791, 0xc65ab81a955d2b93 +792, 0x40b1420fdd9531b8 +793, 0xe31f221cab9f4f40 +794, 0x798cdd414c1deb7a +795, 0x9c84e9c7d41cd983 +796, 0x63d6b1ae3b60b7fa +797, 0xb42bfdd1a2f78ffa +798, 0x37e431eaccaaa8e9 +799, 0x7508142a0f73eac9 +800, 0x91662a023df5893a +801, 0x59782070e2fe3031 +802, 0xb2acd589a8ce7961 +803, 0xa224743fa877b292 +804, 0xaa5362aa27e6ed9e +805, 0xa394a4e520c0c1c7 +806, 0xe49b16d2018ffb6f +807, 0xb8074b9f2f1e762b +808, 0xcf5f86143d5c23a7 +809, 0xfd838785db987087 +810, 0x31b1889df389aff8 +811, 0x30aaca876a4383b +812, 0x1731bb71c4c38d4f +813, 0x9a83a65395e05458 +814, 0x99cd0c8d67c8f4fc +815, 0xfbd9fdc849b761a5 +816, 0x82c04834fc466889 +817, 0xdeef9d6e715e8c97 +818, 0x549c281c16da6078 +819, 0x2d70661254ad599d +820, 0x57995793a72acac +821, 0xf1727005116183ba +822, 0xa22bb38945285de3 +823, 0x4f2d687fe45131ff +824, 0x5666c87ddbbc981f +825, 0xbcb4b2d4e7a517d0 +826, 0x5e794dd2e20b785d +827, 0x449ad020149e093c +828, 0x7704ee0412d106f5 +829, 0x83cbdf257b072ac1 +830, 0xae5c4fc9f638b0da +831, 0x7b9e5a64e372ed47 +832, 0x7eddbbb22c2cdf57 +833, 0x3f19ebfa155b08e +834, 0x91d991154dfd7177 +835, 0x611ae74b952d387f +836, 0x3fdf7a335bda36ee +837, 0xdf182433fc7a7c05 +838, 0x62c78598d1f8db0a +839, 0xc3750c69d2c5c1f0 +840, 0xf1318024709efdee +841, 0xaa3fd360d224dc29 +842, 0x62af53b2f307c19 +843, 0xdf527683c58120c2 +844, 0x3281deecc496f93d +845, 0x4f704ad31527ef08 +846, 0x127a14a5e07cfdfc +847, 0x90d0b1f549255c92 +848, 0xbc3406b212c5e1fc +849, 0x4e89f39379dba91d +850, 0x1290ef43c4998e6e +851, 0xecfeb1a1cb1c6e1b +852, 0x2067e90403003bf1 +853, 0x38ae04be30bdbeba +854, 0x8a3537f298baedda +855, 0xd07f3b825cdb2936 +856, 0xea020b5aebae8b45 +857, 0xfcd614ab031132b0 +858, 0x5fb682a4ff2268f5 +859, 0xd1c4662ce65596f4 +860, 0x7026b8270dd0b8dc +861, 0x8101ec4b4beae45a +862, 0xa0e9dc87940610a6 +863, 0x83ec33679d83165b +864, 0x981847ca82e86d41 +865, 0xda84c188a304a0b7 +866, 0x3c37529c5a5bbbb8 +867, 0x34a8491ce3e19a5a +868, 0xd36ad716a2fa6cb8 +869, 0xfd1d1d6a5189a15c +870, 0x9716eb47851e8d8d +871, 0x7dfb13ea3b15c5aa +872, 0xbdf6e707f45113a5 +873, 0xb8118261b04bd097 +874, 0x6191f9895881bec6 +875, 0x7aac257ae11acf9b +876, 0x35a491e1537ff120 +877, 0xe078943432efa71c +878, 0xb3338485dd3dc2b9 +879, 0x456060975d2bb3b5 +880, 0xaddc4c451bdfc44c +881, 0x18bfa7beacf96430 +882, 0x8802ebcaf0f67498 +883, 0xad922a5a825bd780 +884, 0x9fb4587d748f4efa +885, 0xdb2a445136cd5e7 +886, 0xb98b3676ea8e96ac +887, 0xb02d8d244d784878 +888, 0xa1a8442b18860abb +889, 0x6a3029ba1361e5d1 +890, 0xf426d5fac161eb1 +891, 0xfa5ac2b87acecb23 +892, 0xaa659896e50535df +893, 0xf40dd7a3d3c5c8ed +894, 0x3f8367abecb705bc +895, 0x2d60e7525873358f +896, 0xc4a9d3948a0c3937 +897, 0x5ecc04fef6003909 +898, 0x7a865004918cba2 +899, 0x47ae110a678ec10b +900, 0xa0f02f629d91aa67 +901, 0x4848b99e7fac9347 +902, 0xaa858346d63b80ac +903, 0xeb5bf42ee161eeef +904, 0x4d35d723d3c6ba37 +905, 0xdf22ca6ca93b64a7 +906, 0x9d198520f97b25b1 +907, 0x3068415350778efe +908, 0xf3709f2e8793c2fe +909, 0xd1517bac8dd9f16f +910, 0xfb99bccaa15861dc +911, 0xa9ad607d796a2521 +912, 0x55d3793d36bd22e4 +913, 0xf99270d891ff7401 +914, 0x401750a5c4aa8238 +915, 0xd84b3003e6f28309 +916, 0x8a23798b5fa7c98b +917, 0xadd58bbc8f43e399 +918, 0xbd8c741ada62c6a8 +919, 0xbdc6937bc55b49fa +920, 0x4aefa82201b8502 +921, 0x17adf29a717b303 +922, 0xa6ed2197be168f6c +923, 0x1ba47543f4359a95 +924, 0xe34299949ac01ae9 +925, 0x711c76cffc9b62f3 +926, 0xbac259895508a4b7 +927, 0x3c8b3b3626b0d900 +928, 0x1a8d23fbe2ae71bf +929, 0xca984fa3b5a5c3a1 +930, 0xb1986ab7521a9c93 +931, 0xd6b5b2c8d47a75b5 +932, 0xc7f1c4a88afb4957 +933, 0xdeb58033a3acd6cc +934, 0xabe49ddfe1167e67 +935, 0x8d559c10205c06e3 +936, 0xea07a1a7de67a651 +937, 0xcbef60db15b6fef8 +938, 0xbfca142cff280e7 +939, 0x362693eba0732221 +940, 0x7463237e134db103 +941, 0x45574ddb5035e17a +942, 0xfc65e0cb9b94a1aa +943, 0x3154c55f1d86b36d +944, 0x2d93a96dd6ab2d8b +945, 0xbe3bc1d1f2542a25 +946, 0xdd4b541f7385bdaa +947, 0x3b56b919d914e3f8 +948, 0x82fd51468a21895f +949, 0x8988cf120731b916 +950, 0xa06a61db5fb93e32 +951, 0x6ed66c1b36f68623 +952, 0x875ae844d2f01c59 +953, 0x17ccd7ac912e5925 +954, 0x12fe2a66b8e40cb1 +955, 0xf843e5e3923ad791 +956, 0xa17560f2fd4ef48 +957, 0x27a2968191a8ee07 +958, 0xa9aab4d22ff44a3c +959, 0x63cd0dcc3bb083ae +960, 0x7a30b48c6160bf85 +961, 0x956160fb572503b3 +962, 0xc47f6b7546640257 +963, 0xaf4b625f7f49153 +964, 0x2f5c86a790e0c7e8 +965, 0xb52e0610ae07f0b8 +966, 0x38a589292c3d849e +967, 0xc3e9ef655d30b4ef +968, 0xb5695f765cda998a +969, 0xde5d5e692a028e91 +970, 0x839476721555f72e +971, 0x48b20679b17d9ebf +972, 0xe3d4c6b2c26fb0df +973, 0xce5a9834f0b4e71f +974, 0x533abb253d5d420e +975, 0x9eac5ad9aed34627 +976, 0xc0f2a01ab3c90dbb +977, 0x6528eda93f6a066c +978, 0xc16a1b625e467ade +979, 0x1a4a320fb5e8b098 +980, 0x8819cccd8b4ab32f +981, 0x42daa88531fd0bfd +982, 0xcf732226409be17c +983, 0xfddcdb25ccbf378c +984, 0x9b15b603bf589fc1 +985, 0x2436066b95d366fe +986, 0x8d42eff2e9cbda90 +987, 0x694b2fc8a4e8303c +988, 0x8e207f98aaea3ccd +989, 0x4730d7a620f822d9 +990, 0x468dc9ca30fe2fd4 +991, 0x74b36d8a1c0f031b +992, 0x3c1aac1c488c1a94 +993, 0x19d0101042444585 +994, 0x8ec50c56d0c8adf4 +995, 0x721ec629e4d66394 +996, 0x3ca5ad93abeac4a4 +997, 0xaaebc76e71592623 +998, 0x969cc319e3ed6058 +999, 0xc0a277e3b2bfc3de diff --git a/python/numpy/random/tests/data/philox-testset-1.csv b/python/numpy/random/tests/data/philox-testset-1.csv new file mode 100644 index 000000000..e448cbf73 --- /dev/null +++ b/python/numpy/random/tests/data/philox-testset-1.csv @@ -0,0 +1,1001 @@ +seed, 0xdeadbeaf +0, 0xedc95200e2bd66a5 +1, 0x581d4e43b7682352 +2, 0x4be7278f5e373eab +3, 0xee47f17991a9e7ea +4, 0x38a7d2ae422f2e2c +5, 0xe2a6730a3b4a8a15 +6, 0x1588b7a841486442 +7, 0x13ad777246700504 +8, 0x14d157e0f5e18204 +9, 0xd87c22a7ee8c13f1 +10, 0x30cc389ce3542ba1 +11, 0xb8a53348955bb2e9 +12, 0xc08802e3c454f74f +13, 0xb444f627671a5780 +14, 0x4b6dd42b29cbf567 +15, 0x6109c7dc0bc5f7d5 +16, 0x85c954715d6b5b1e +17, 0x646178d3d9a3a5d5 +18, 0xebbde42b1cd83465 +19, 0x3d015102f6bc9c1a +20, 0x720fe2ec3798d5fd +21, 0x93120961289ceb2e +22, 0xc9207e960a56fae2 +23, 0xa7f042f31d991b98 +24, 0x5fac117415fae74b +25, 0xd0a970ba8dddc287 +26, 0x84b4e7e51b43106 +27, 0x6ad02bf525ea265f +28, 0xcdc7e5992b36ef8f +29, 0x44d4985209261d60 +30, 0x628c02d50f4b902e +31, 0xc7b1914922d1e76d +32, 0xfde99ff895cba51d +33, 0x175a0be050fa985f +34, 0x47297d3699e03228 +35, 0xccf1e9aeaa3339cd +36, 0x9fdd18ebeeaf15b1 +37, 0x7c94c9ab68747011 +38, 0x612d8ef22c1fa80f +39, 0x13f52b860de89ab5 +40, 0x81f264b8c139c43b +41, 0x8d017ba4ef1e85ba +42, 0x6d0556f46219951e +43, 0x8ee7b85663cf67b6 +44, 0x2432fc707645fe67 +45, 0xaf814046051e5941 +46, 0x4d432a83739ac76f +47, 0x59e5060d0983ccdd +48, 0xdd20e828b83d9b53 +49, 0x1b891800d7385f4c +50, 0x10e86a026c52ff5e +51, 0xb932f11723f7b90c +52, 0xb2413d0a1f3582d0 +53, 0xe7cd4edda65fc6b5 +54, 0x6d3808848d56593b +55, 0x192a727c3c7f47d9 +56, 0x9659d8aea5db8c16 +57, 0x4242c79fe2c77c16 +58, 0x605f90c913827cea +59, 0x53e153c8bfc2138a +60, 0xed2158fbdef5910e +61, 0xae9e6e29d4cb5060 +62, 0x7dd51afaad3b11ce +63, 0x2b9ba533d01a5453 +64, 0x7e0e9cf2b6c72c8 +65, 0x1cc8b3c7747ed147 +66, 0x9b102651e2e11b48 +67, 0x30b0b53cbaac33ea +68, 0x70c28aec39b99b85 +69, 0x5f1417ff536fdb75 +70, 0x3a1d91abd53acf58 +71, 0xba116a1772168259 +72, 0xf5369bc9bd284151 +73, 0x67bf11373bf183ca +74, 0xef0b2d44dbd33dc7 +75, 0xbfd567ee1a2953ed +76, 0x7d373f2579b5e5c6 +77, 0x756eeae7bcdd99be +78, 0x75f16eb9faa56f3b +79, 0x96d55ded2b54b9a5 +80, 0x94495191db692c24 +81, 0x32358bdd56bab38c +82, 0x3f6b64078576579 +83, 0x7177e7948bc064c9 +84, 0x2cbf23f09ba9bc91 +85, 0x9b97cc31c26645f5 +86, 0x5af2d239ff9028b1 +87, 0x316fa920e0332abe +88, 0x46535b7d1cae10a0 +89, 0x21f0a6869298022c +90, 0xf395c623b12deb14 +91, 0x8573995180675aa7 +92, 0xc3076509f4dc42d5 +93, 0x15e11e49760c6066 +94, 0xe8a6d311e67a021d +95, 0x7482f389c883339b +96, 0xda6f881573cba403 +97, 0xb110ffb847e42f07 +98, 0x2c3393140605ccf9 +99, 0xba1c8ba37d8bdc33 +100, 0x59adf43db7a86fe0 +101, 0xb4fcbf6aa585ca85 +102, 0xd794a93c18033fa6 +103, 0x6e839c01985f9d4 +104, 0x64065bf28222b2c7 +105, 0x6a6359b293fa0640 +106, 0x5ff610969e383e44 +107, 0xa8172c263f05c7f7 +108, 0x62a0172e8bd75d07 +109, 0x7be66e3c453b65ac +110, 0x6a3b8d5a14014292 +111, 0xa2583e6087450020 +112, 0xd5d3ecc480c627d2 +113, 0xa24e83f1eec8a27c +114, 0xa23febd2a99ee75a +115, 0x9a5fbf91c7310366 +116, 0x5b63156932e039b +117, 0x942af3c569908505 +118, 0x89a850f71ab6a912 +119, 0xfeadc803ac132fe9 +120, 0x67bf60e758250f3 +121, 0x533c25103466a697 +122, 0xb7deede3482f9769 +123, 0x325e043b53bba915 +124, 0x9e8d9e7fde132006 +125, 0x6bacc6860bbc436e +126, 0xb3ea0534c42b1c53 +127, 0xb2389334db583172 +128, 0xa74b1bfbf5242ee4 +129, 0x53a487e2dc51d15c +130, 0xe5a3b538d2c7a82e +131, 0x7b6c70bb0c4cadaf +132, 0xae20791b2081df1 +133, 0xc685c12e3c61d32c +134, 0x60110e6b0286e882 +135, 0x49682119c774045c +136, 0x53dc11a3bbd072e +137, 0xbdc87c6e732d9c2d +138, 0xcc4620861ebac8fd +139, 0x7e9c3558759350cc +140, 0x157408dee34891ba +141, 0x9bcad1855b80651b +142, 0xd81b29141d636908 +143, 0x1ed041a9f319c69d +144, 0x805b2f541208b490 +145, 0x484ef3bba2eb7c66 +146, 0xb6b5e37d50a99691 +147, 0xabc26a7d9e97e85f +148, 0xcba2a3cce0417c2f +149, 0xa030dfffd701993c +150, 0x2bf2dc50582ebf33 +151, 0xd9df13dd3eb9993e +152, 0x31ca28b757232ae5 +153, 0x614562a0ccf37263 +154, 0x44d635b01725afbb +155, 0x5ae230bc9ca9cd +156, 0xb23a124eb98705c6 +157, 0x6395675444981b11 +158, 0xd97314c34119f9ca +159, 0x9de61048327dd980 +160, 0x16bac6bded819707 +161, 0xcea3700e3e84b8c7 +162, 0xaa96955e2ee9c408 +163, 0x95361dcc93b5bc99 +164, 0x306921aed3713287 +165, 0x4df87f3130cd302a +166, 0x37c451daeb6a4af5 +167, 0x8dbbe35f911d5cc1 +168, 0x518157ce61cb10f9 +169, 0x669f577aebc7b35b +170, 0x4b0a5824a8786040 +171, 0x519bc3528de379f5 +172, 0x6128012516b54e02 +173, 0x98e4f165e5e6a6dd +174, 0x6404d03618a9b882 +175, 0x15b6aeb3d9cd8dc5 +176, 0x87ed2c1bae83c35b +177, 0x8377fc0252d41278 +178, 0x843f89d257a9ba02 +179, 0xcdda696ea95d0180 +180, 0xcfc4b23a50a89def +181, 0xf37fd270d5e29902 +182, 0xafe14418f76b7efa +183, 0xf984b81577076842 +184, 0xe8c60649ccb5458d +185, 0x3b7be8e50f8ff27b +186, 0xaa7506f25cef1464 +187, 0x5e513da59f106688 +188, 0x3c585e1f21a90d91 +189, 0x1df0e2075af292a +190, 0x29fdd36d4f72795f +191, 0xb162fe6c24cb4741 +192, 0x45073a8c02bd12c4 +193, 0xcbaaa395c2106f34 +194, 0x5db3c4c6011bc21c +195, 0x1b02aac4f752e377 +196, 0xa2dfb583eb7bec5 +197, 0xfe1d728805d34bb1 +198, 0xf647fb78bb4601ec +199, 0xd17be06f0d1f51ef +200, 0x39ec97c26e3d18a0 +201, 0xb7117c6037e142c8 +202, 0xe3a6ce6e6c71a028 +203, 0xe70a265e5db90bb2 +204, 0x24da4480530def1e +205, 0xfd82b28ce11d9a90 +206, 0x5bf61ead55074a1d +207, 0xbe9899c61dec480d +208, 0xae7d66d21e51ec9e +209, 0x384ee62c26a08419 +210, 0x6648dccb7c2f4abf +211, 0xc72aa0c2c708bdc9 +212, 0x205c5946b2b5ba71 +213, 0xd4d8d0b01890a812 +214, 0x56f185493625378d +215, 0x92f8072c81d39bd0 +216, 0xa60b3ceecb3e4979 +217, 0xfcf41d88b63b5896 +218, 0xf5a49aa845c14003 +219, 0xffcc7e99eee1e705 +220, 0xdd98312a7a43b32d +221, 0xa6339bd7730b004 +222, 0xdac7874ba7e30386 +223, 0xadf6f0b0d321c8 +224, 0x126a173ae4ffa39f +225, 0x5c854b137385c1e7 +226, 0x8173d471b1e69c00 +227, 0x23fa34de43581e27 +228, 0x343b373aef4507b1 +229, 0xa482d262b4ea919c +230, 0xf7fbef1b6f7fbba +231, 0xd8ce559487976613 +232, 0xbf3c8dd1e6ebc654 +233, 0xda41ed375451e988 +234, 0xf54906371fd4b9b3 +235, 0x5b6bb41231a04230 +236, 0x866d816482b29c17 +237, 0x11315b96941f27dc +238, 0xff95c79205c47d50 +239, 0x19c4fff96fbdac98 +240, 0xbfb1ae6e4131d0f4 +241, 0x9d20923f3cdb82c9 +242, 0x282175507c865dff +243, 0xdfd5e58a40fe29be +244, 0xedbd906ff40c8e4f +245, 0x11b04fc82614ccb3 +246, 0xeceb8afda76ae49f +247, 0xa4856913847c2cdf +248, 0x6f1425f15a627f2a +249, 0xdf144ffedf60349e +250, 0x392d7ecfd77cc65f +251, 0x72b8e2531049b2c6 +252, 0x5a7eb2bdb0ec9529 +253, 0xdcfd4306443e78c1 +254, 0x89ad67ed86cd7583 +255, 0x276b06c0779a6c8f +256, 0xb2dbb723196a0ac3 +257, 0x66c86a3b65906016 +258, 0x938348768a730b47 +259, 0x5f5282de938d1a96 +260, 0xa4d4588c4b473b1f +261, 0x8daed5962be4796f +262, 0x9dde8d796985a56e +263, 0x46be06dbd9ed9543 +264, 0xdf98286ceb9c5955 +265, 0xa1da1f52d7a7ca2b +266, 0x5a7f1449f24bbd62 +267, 0x3aedc4e324e525fd +268, 0xced62464cd0154e1 +269, 0x148fc035e7d88ce3 +270, 0x82f8878948f40d4c +271, 0x4c04d9cdd6135c17 +272, 0xdf046948d86b3b93 +273, 0x2f0dec84f403fe40 +274, 0xa61954fb71e63c0d +275, 0x616d8496f00382e8 +276, 0x162c622472746e27 +277, 0x43bcfe48731d2ceb +278, 0xff22432f9ff16d85 +279, 0xc033ed32bb0ad5a4 +280, 0x5d3717cc91c0ce09 +281, 0x7a39a4852d251075 +282, 0x61cd73d71d6e6a6 +283, 0xe37e2ea4783ab1a5 +284, 0x60e1882162579ea8 +285, 0x9258ec33f1a88e00 +286, 0x24b32acf029f0407 +287, 0x1410fc9aea6d3fac +288, 0x6054cf2a3c71d8f7 +289, 0x82f7605157a66183 +290, 0x3b34c1c0dff9eac5 +291, 0xfebe01b6d5c61819 +292, 0x7372187c68b777f2 +293, 0xc6923812cda479f0 +294, 0x386613be41b45156 +295, 0x92cfebe8cc4014b +296, 0x8e13c4595849828b +297, 0x90e47390d412291f +298, 0x6b21a1d93d285138 +299, 0xbf5b1f5922f04b12 +300, 0x21e65d1643b3cb69 +301, 0xf7683b131948ac3c +302, 0xe5d99fc926196ed2 +303, 0x7b138debbec90116 +304, 0x8a2650a75c2c2a5c +305, 0x20689a768f9b347b +306, 0xdfa2900cfb72dc6e +307, 0x98959c3855611cc2 +308, 0x5fdb71b89596cc7c +309, 0x1c14ac5c49568c7b +310, 0x958c4293016091fe +311, 0x7484522eb0087243 +312, 0xc4018dfb34fc190f +313, 0xca638567e9888860 +314, 0x102cd4805f0c0e89 +315, 0xcc3bc438e04548f8 +316, 0xb808944bb56ea5be +317, 0xffd4778dbf945c57 +318, 0xfe42617784c0233b +319, 0x3eccbfeae9b42d3c +320, 0xd9f1b585fd0bfa60 +321, 0x5c063d1b2705d5dd +322, 0x8e8bec3519941b64 +323, 0x9e94c36cbec2a42 +324, 0x1cd19f5b64ffd3ad +325, 0x9632e3aebfc68e66 +326, 0x98960c2d9da4ae45 +327, 0xb76994b1f2bbfc1f +328, 0xca184a737d3971cc +329, 0x964d31b07183adfb +330, 0xe9e0ff351cd276d4 +331, 0xb5747c860b05bbe4 +332, 0x5549ddc3bd3862e2 +333, 0x495496677b27873b +334, 0x53910baa26e3ea18 +335, 0xaa07a07ad0a688d3 +336, 0xbb43bd1f09ecdb1e +337, 0xe2ebc105699dd84 +338, 0x6e815a2729584035 +339, 0x2caab1713b17948a +340, 0x43d39d209fa41c90 +341, 0xfe3e71089d5d1c3a +342, 0xa778646c32f81177 +343, 0x8d42bfb86e6e92d5 +344, 0x175571f70b4fcfbe +345, 0x2a66a6fe10dc3b5b +346, 0xd9545e85235ca709 +347, 0x5642781c77ced48a +348, 0x24facc40b72ccd09 +349, 0xa800fbacce33f6f8 +350, 0x675f58a0ff19fba +351, 0x35aedf57bb5cde1b +352, 0xe5535a6b63f6d068 +353, 0x84dffd0102aaa85d +354, 0x621faad65467aaa7 +355, 0x596ad85b556b112f +356, 0x837545fff8894c7a +357, 0x3d9a4ae1356bc6a6 +358, 0xcd8b7153205d4ad0 +359, 0x98afdd40f1ed09a6 +360, 0xa38b2dc55a5cf87f +361, 0x484aecce2b6838bc +362, 0x6af05c26bdab18d9 +363, 0xf418b7399dcf2e4b +364, 0x1cfa38789b0d2445 +365, 0xfbed23c34166ee67 +366, 0x38e6820039e4912a +367, 0x1fe94911e963591e +368, 0x1291c79aee29ad70 +369, 0x65eccfc89506f963 +370, 0x7d14de3b2f55b1f6 +371, 0x82eb79c36cd2a739 +372, 0x41ffe3b75ea0def5 +373, 0x9eba9156470a51d9 +374, 0xd17c00b981db37d1 +375, 0xf688769a75601aa7 +376, 0xbcf738e9e03d571e +377, 0x14712e56df8f919b +378, 0xab14e227d156e310 +379, 0xf53d193e993e351e +380, 0x857fae46bd312141 +381, 0xc2dd71e41b639966 +382, 0x74f8b987a3d00ad1 +383, 0x5bce8526dc527981 +384, 0x94910926c172a379 +385, 0x503c45557688a9d5 +386, 0x244d03834e05807f +387, 0x6e014cbab9c7a31f +388, 0xae544c638530facf +389, 0x9b853aaaf9cbc22d +390, 0xfb42ab7024d060ed +391, 0x74cc3fba0dfd7ff2 +392, 0x24ec9e8f62144ad5 +393, 0x72f082954307bbe7 +394, 0x36feda21bbf67577 +395, 0x3222191611b832f1 +396, 0xd0584e81bcac8b0b +397, 0xdce8d793ef75e771 +398, 0x978824c6c2578fc +399, 0x6e8f77503b3c2ee4 +400, 0xc85d2d86fecf5d03 +401, 0x3d35b4a5d4d723c4 +402, 0xd3987dfd4727fff3 +403, 0xd3cde63fb6a31add +404, 0xf6699e86165bdaeb +405, 0x9d60ba158ec364c4 +406, 0x920c3c18b346bfc9 +407, 0x770fd1fdfbc236ca +408, 0x45998cfc5fc12ddd +409, 0xd74a3454e888834b +410, 0xbf2aa68081a4a28f +411, 0xea41b26a6f1da1b3 +412, 0x5560a2d24b9d5903 +413, 0xe3791f652a228d8b +414, 0x365116d3b5a8520c +415, 0xb1b2bd46528f8969 +416, 0xfcfe14943ef16ae7 +417, 0xf4d43425e8a535dc +418, 0xe6cf10a78782a7e0 +419, 0x9c7ac0de46556e3e +420, 0xc667ae0856eed9ef +421, 0x47dbb532e16f9c7e +422, 0xdf4785a5d89ee82e +423, 0xbd014925ce79dbcf +424, 0xea0d663fb58fa5be +425, 0x51af07d5cc3821fb +426, 0x27a1bdcdc4159a9d +427, 0x520c986c59b1e140 +428, 0x50b73fd9bacd5b39 +429, 0xae5240641f51e4f3 +430, 0x71faecc164ed9681 +431, 0xda95aa35529a7ee +432, 0xe25ba29b853c1c6d +433, 0x9871a925cda53735 +434, 0xde481ad8540e114d +435, 0xa2997f540e8abca0 +436, 0xc9683c5035e28185 +437, 0x1082471b57182bac +438, 0xbd3ecf0f0b788988 +439, 0xf479760776fbb342 +440, 0x3730929200d91f44 +441, 0xc1762d79ae72809c +442, 0xfaa0a4c7b1686cb3 +443, 0xd581e6d55afdafcd +444, 0x6cf57bdfba2dcf6d +445, 0xdef79d9fe6a5bcef +446, 0x13ed376e18132bd3 +447, 0xbe67efd72defa2a +448, 0x5acc176c468966ea +449, 0x8b35b626af139187 +450, 0x446de3fac0d973ac +451, 0xe1d49e06dc890317 +452, 0x817bc3fd21fc09b7 +453, 0xb71c3958a13d5579 +454, 0x8746e010f73d7148 +455, 0x1b61c06009922e83 +456, 0xba17e62e6b092316 +457, 0x1375fa23c4db8290 +458, 0x3f071230f51245a6 +459, 0x51c99a086a61cd13 +460, 0x5f0f2ae78589e1fd +461, 0x604834e114bbbc27 +462, 0x5eb2a7a34814e9a9 +463, 0x77a6907f386bf11e +464, 0x99525de2bd407eeb +465, 0xb818348c57b3b98f +466, 0x25f5f9e702fbe78d +467, 0x8f66669e6f884473 +468, 0x1e47d46e2af4f919 +469, 0xf6a19df846476833 +470, 0xff00c67bcd06621f +471, 0xe3dfe069795d72d8 +472, 0x8affc88b2fea4d73 +473, 0x66df747e5f827168 +474, 0xf368ec338d898a0e +475, 0x9e1f1a739c5984a2 +476, 0x46a1c90e1ca32cbc +477, 0xc261bc305ed8d762 +478, 0x754d7949f7da9e72 +479, 0x4c8fbbb14ef47b17 +480, 0xccbdc67a3848d80d +481, 0x3c25e6f58bae751d +482, 0x7078b163b936d9b6 +483, 0x440e27463c134ecf +484, 0x6c83ee39f324db0f +485, 0x27cf901b22aea535 +486, 0x57262dec79a3f366 +487, 0x91db09f1dbb524fb +488, 0xd7436eefba865df2 +489, 0x16c86b0a275a3f43 +490, 0x689493e6681deaa9 +491, 0x7e1dc536c1a9ac42 +492, 0x1145beac3ac7f5cc +493, 0x3d05e211a104b2b0 +494, 0x4f9e77ced3c52f44 +495, 0x53de1369354add72 +496, 0x1fb60f835f47cdeb +497, 0x6ab36f089e40c106 +498, 0xaabffcb0d3d04c7 +499, 0xaa399686d921bd25 +500, 0x2bf8dd8b6d6fa7f0 +501, 0x1ddbf4e124329613 +502, 0x466a740241466a72 +503, 0x98d7381eb68a761 +504, 0x817691510bc4857a +505, 0x8837622c0171fe33 +506, 0xcba078873179ee16 +507, 0x13adad1ab7b75af4 +508, 0x3bac3f502428840c +509, 0xbeb3cce138de9a91 +510, 0x30ef556e40b5f0b4 +511, 0x19c22abdf3bbb108 +512, 0x977e66ea4ddc7cf +513, 0x9f4a505f223d3bf3 +514, 0x6bc3f42ac79ec87b +515, 0x31e77712158d6c23 +516, 0x6d8de4295a28af0d +517, 0xee1807dbda72adb7 +518, 0xda54140179cd038f +519, 0x715aa5cdac38e062 +520, 0x5a7e55e99a22fa16 +521, 0xf190c36aa8edbe4f +522, 0xccadd93a82c1d044 +523, 0x7070e6d5012c3f15 +524, 0x50a83341a26c1ba5 +525, 0x11bca7cc634142e5 +526, 0x623a0d27867d8b04 +527, 0x75c18acff54fbf6e +528, 0x455ae7d933497a6f +529, 0xf624cf27d030c3d3 +530, 0x7a852716f8758bac +531, 0xe7a497ac1fa2b5b4 +532, 0xf84f097498f57562 +533, 0xc4bb392f87f65943 +534, 0x618e79a5d499fbfb +535, 0xb3c0b61d82b48b8 +536, 0x4750a10815c78ea7 +537, 0x9cf09cca3ddece69 +538, 0x2a69f1c94cc901a2 +539, 0x347a0e446e1ce86d +540, 0xb06f3a5a5ab37bb1 +541, 0x8035bd0713d591db +542, 0x539c9637042c3a1f +543, 0xd7ba4dc6b273cbd7 +544, 0x12f3f99933444f85 +545, 0x4a9517b9783fb9a4 +546, 0x6422b2ea95093bc5 +547, 0x3a5ecff0f996c2a6 +548, 0x31de504efc76a723 +549, 0x7ccb7c5233c21a9f +550, 0xc687d9e6ce4186e8 +551, 0x6e40769d6940376a +552, 0xf51207314f1f7528 +553, 0x67ee3acb190865e3 +554, 0xe08d586270588761 +555, 0xe387fa489af1a75c +556, 0x73414a52d29d8375 +557, 0x671a38191cf2a357 +558, 0xe00fb25b1aa54008 +559, 0x11a0610e22cf549b +560, 0xc90cc865d57c75be +561, 0x90d0863cc15f2b79 +562, 0x8b3e60d32ebcb856 +563, 0xb28cc55af621e04a +564, 0xcf60bd3cb2a5ab1d +565, 0x212cb5d421948f86 +566, 0xee297b96e0a3363f +567, 0x4e9392ff998760d1 +568, 0x61940c8d0105ba3e +569, 0x14ebcbae72a59a16 +570, 0xdf0f39a3d10c02af +571, 0xfc047b2b3c1c549d +572, 0x91718b5b98e3b286 +573, 0x9ea9539b1547d326 +574, 0x7a5a624a89a165e6 +575, 0x145b37dcaa8c4166 +576, 0x63814bbb90e5616c +577, 0xc4bc3ca6c38bb739 +578, 0x853c3a61ddc6626c +579, 0xa7ce8481c433829a +580, 0x8aff426941cc07b +581, 0x2dc3347ca68d8b95 +582, 0xce69f44f349e9917 +583, 0x2fa5cb8aca009b11 +584, 0xf26bb012115d9aca +585, 0xafa01c2f2d27235a +586, 0xabcba21f1b40305e +587, 0xfec20c896c0c1128 +588, 0xc5f7a71ebacadfa0 +589, 0xc8479ad14bab4eef +590, 0xad86ec9a3e7d3dc +591, 0xbbecd65292b915c5 +592, 0xb1f9e28149e67446 +593, 0x708d081c03dad352 +594, 0xaa8a84dbd1de916c +595, 0x9aa3efb29ba9480b +596, 0xd3c63969ff11443e +597, 0x1e9e9ac861315919 +598, 0x4fe227f91e66b41d +599, 0xefc0212d43d253ab +600, 0x98341437727c42d1 +601, 0x5ea85c0fe9008adc +602, 0x7891b15faa808613 +603, 0x32db2d63989aacfd +604, 0xc92f7f28e88fd7bc +605, 0x3513545eb6549475 +606, 0x49abe0082906fbf8 +607, 0xcee1e1a6551e729c +608, 0x38556672b592a28e +609, 0xc3e61409c4ec2d45 +610, 0x96c67ce2995a0fd4 +611, 0x9b9b0cada870293 +612, 0x82d6dd5dada48037 +613, 0xeea4f415299f1706 +614, 0x371107895f152ab3 +615, 0x2f6686159f4396bb +616, 0x61005a2ff3680089 +617, 0x9d2f2cafb595e6b6 +618, 0x4a812a920f011672 +619, 0x317554d3a77385d7 +620, 0x24c01086727eb74b +621, 0xa15ff76d618a3a9e +622, 0x2121bfd983859940 +623, 0x384d11577eea8114 +624, 0xab0f4299f3c44d88 +625, 0x136fd4b07cfa14d9 +626, 0x665fe45cbfaa972a +627, 0x76c5a23398a314e9 +628, 0x5507036357ccda98 +629, 0xd9b8c5ac9dce632b +630, 0x366bc71781da6e27 +631, 0xdd2b2ba1d6be6d15 +632, 0xf33ed0d50ea6f1a6 +633, 0xf05a9b1900174c18 +634, 0x3947e1419e2787cf +635, 0x6c742b1e029637d0 +636, 0x32aba12196a0d2e8 +637, 0x1b94aab2e82e7df +638, 0x68b617db19229d6 +639, 0x6c88a95ac0a33f98 +640, 0xdc9b95fd60c2d23e +641, 0x999e6971d3afc8b3 +642, 0x7071fc6ad8b60129 +643, 0x41a8184ef62485f6 +644, 0xb68e0605c7d5e713 +645, 0x272b961a1d1bbee +646, 0x23f04e76446187b0 +647, 0x999a7a8f6d33f260 +648, 0xdbd6318df4f168d +649, 0x8f5e74c84c40711e +650, 0x8ccc6b04393a19d6 +651, 0xadcd24b782dd8d3d +652, 0x1a966b4f80ef9499 +653, 0xcb6d4f9ff5a280f0 +654, 0x8095ff2b8484018a +655, 0xbfd3389611b8e771 +656, 0x278eb670b7d12d51 +657, 0x31df54ca8d65c20f +658, 0x121c7fb38af6985e +659, 0x84fb94f38fe1d0a +660, 0x15ae8af1a6d48f02 +661, 0x8d51e4a62cba1a28 +662, 0x58e6b6b3ae0f9e42 +663, 0x9365a0a85669cc99 +664, 0xe56e92f65a2106df +665, 0x68fa299c66b428fc +666, 0x55e51bb0b0a832c6 +667, 0x48b565293f9bc494 +668, 0x73d8132b1cbabb57 +669, 0x9178ac3926c36cbc +670, 0xe2f22c7b28ea5e0f +671, 0x6af45322a99afb12 +672, 0x59072fcb486a46f4 +673, 0x166b717b08d3d8e +674, 0xd4e627a2dfacc4ab +675, 0x33dad6f2921dedaa +676, 0x4b13b806834a6704 +677, 0xe5f7971b398ed54d +678, 0x20bfae65e3e6899b +679, 0x881dab45d2b4fc98 +680, 0x6f248126b5b885be +681, 0x7aeb39e986f9deee +682, 0xf819f9574b8c3a03 +683, 0xff3d93ed6bd9781a +684, 0x3a31e2e24a2f6385 +685, 0x7888a88f8944a5e +686, 0x4faee12f5de95537 +687, 0x7f3e4efccdb2ed67 +688, 0x91e0f2fc12593af5 +689, 0xb5be8a4b886a40d3 +690, 0x998e8288ac3a9b1b +691, 0x85c48fc8b1349e7b +692, 0xf03af25222d8fae5 +693, 0x45467e805b242c2e +694, 0xa2350db793dbebdc +695, 0xfebe5b61d2174553 +696, 0xa9a331f02c54ad0b +697, 0xe94e49a0f905aef3 +698, 0xe54b4c812b55e3da +699, 0xdc454114c6bc0278 +700, 0x99c7765ab476baa2 +701, 0xccd9590e47fdff7c +702, 0xfa2bcae7afd6cb71 +703, 0x2c1bf1a433a6f0f7 +704, 0x53882c62ff0aab28 +705, 0x80ac900f844dacc +706, 0x27ba8eb5c4a44d54 +707, 0x78f3dfb072a46004 +708, 0x34e00e6ec629edce +709, 0x5b88d19b552d1fbd +710, 0xe4df375dc79df432 +711, 0x37446312ff79c3b4 +712, 0xb72256900a95fa6d +713, 0x89f3171fbdff0bfc +714, 0xd37885b048687eba +715, 0xbb033213b283b60e +716, 0xcf10b523ee769030 +717, 0xbf8070b6cfd7bafb +718, 0xb7194da81fd1763b +719, 0xbfc303de88e68d24 +720, 0xb949c7a5aea8a072 +721, 0x844216e7bae90455 +722, 0xf1e7f20840049a33 +723, 0x96e3263ad0cae794 +724, 0x10772d51f6e9ba49 +725, 0xcea24fccae9d23b3 +726, 0xefd378add9dde040 +727, 0xba0c7c5275805976 +728, 0x2e2a04608f64fa8c +729, 0xafb42ec43aa0fa7 +730, 0x30444b84241ac465 +731, 0x19ef384bac4493ab +732, 0xfd1ac615d3ba5ab9 +733, 0x6cc781ba38643aff +734, 0x30ff27ebed875cfd +735, 0xee1a261aca97ae62 +736, 0xc5a92715202bc940 +737, 0x9e6ec76f93c657ff +738, 0x9b9fd55f55191ca5 +739, 0x654b13af008d8f03 +740, 0x1b7f030d9bd0719f +741, 0x6d622e277550cb7f +742, 0x3f8ee6b8830d0538 +743, 0x475462bcd0de190f +744, 0x21380e8a513bdbcd +745, 0x629bf3771b1bd7a4 +746, 0x3b5fd0b62c353709 +747, 0xf95634006ec3867e +748, 0x1be8bb584a6653c2 +749, 0x2e2d3cfa85320ce8 +750, 0x5b904b692252d11d +751, 0x4bfd76631d527990 +752, 0xc019571ca2bec4a0 +753, 0xf2eb730cea4cd751 +754, 0xd4571d709530191a +755, 0x3b5bd947061f5a7d +756, 0x56e2322cd2d1d1c0 +757, 0xa8830a5f62019f83 +758, 0x901d130c1b873cf3 +759, 0xb5dd29b363c61299 +760, 0xbb710bec3a17b26d +761, 0xc0c464daca0f2328 +762, 0x4dc8055df02650f5 +763, 0x3d3cd9bbe8b957af +764, 0xdb79612c2635b828 +765, 0xe25b3a8ad8fa3040 +766, 0xd5875c563cbf236b +767, 0x46861c1c3849c9bc +768, 0xf84bf1a2814dff43 +769, 0x6d8103902e0ad5e6 +770, 0x99f51c9be8af79e5 +771, 0xb0bfa8540ff94a96 +772, 0xaf45109a4e06f7d0 +773, 0x281df3e55aea9bfc +774, 0x6a1155ca8aa40e60 +775, 0x754d32c5de1f5da +776, 0xce1eafb1c6ca916f +777, 0xc4f2185fa8577bd1 +778, 0x4a188e9bdb5501d9 +779, 0xbb14107e99bd5550 +780, 0xf0381d8425ec2962 +781, 0x213dbfffc16ec4f6 +782, 0x7a999c5a28ea65bc +783, 0x23758c2aba7709ff +784, 0xea7e4bb205e93b44 +785, 0x9c5a31e53911c658 +786, 0x7f04d0bbdc689ddc +787, 0xe3ed89ab8d78dcb3 +788, 0x73c38bfb43986210 +789, 0x740c7d787eb8e158 +790, 0x5284fafdfb3fb9ec +791, 0x2e91a58ac1fb1409 +792, 0xb94a600bf0a09af3 +793, 0x533ea4dbe07d81dd +794, 0x48c3f1a736b3c5fd +795, 0x56ae3499fa8720ce +796, 0x526f2def663ca818 +797, 0x2f085759c65665c4 +798, 0xf715f042c69e0db4 +799, 0x110889c399231e60 +800, 0x64584a244866f3a0 +801, 0xf02ec101a39405d3 +802, 0xe73cd5e9a7f17283 +803, 0xfea64869e7028234 +804, 0x97559974ad877891 +805, 0xc8695aba1dc9f2e5 +806, 0x7b62b76ffc2264ec +807, 0xf5e1df172ec5ccd +808, 0xafaeb68765e443bd +809, 0xd3870eb2e8337623 +810, 0x4f944d684138fb39 +811, 0x6977c575038916ad +812, 0x8ada1a225df95a56 +813, 0xe4044c6c58d15e54 +814, 0x4e5121366681cf2 +815, 0xcf8640b079357b0d +816, 0xcd5b157d44106fa3 +817, 0x9d7a5481279e25a1 +818, 0xe10e9db41fb4b34f +819, 0x1052607be1eadff9 +820, 0x3403d67232fe2265 +821, 0xac9358f498c34afc +822, 0x820172da0dc39c9 +823, 0xe186e91a3b826b6a +824, 0x1a838e2a40284445 +825, 0x1870b617ebd7bce6 +826, 0xcb7cba4424be1ed7 +827, 0x6a2e56e40fdf9041 +828, 0xace93bbe108f97ee +829, 0xfeb9bc74ac41ca08 +830, 0x8cb2d05b0f6a1f51 +831, 0x73792309f3fac0a9 +832, 0x2507343d431308ca +833, 0xd0ea1197be615412 +834, 0xb1870812f1d2fa94 +835, 0x6d067b6935dcd23e +836, 0xaf161014e5492c31 +837, 0xd4be0dce97064be4 +838, 0xf8edfe3fc75c20f1 +839, 0x894751dc442d2d9c +840, 0xb4a95f6a6663456c +841, 0x74e93162e2d805db +842, 0x784bc5f3a7a2f645 +843, 0xd234d7c5b0582ea9 +844, 0x491f28d0ab6cb97c +845, 0xa79419e5cf4336c3 +846, 0x66b00141978c849 +847, 0xa7ddbd64698d563f +848, 0xefc33a4a5d97d4b2 +849, 0x95075514a65aebdc +850, 0x40eca5b3e28cd25e +851, 0x90ec7d00e9c9e35d +852, 0x63e84104d5af417a +853, 0xdaca0ea32df5744 +854, 0x7ed54f2587795881 +855, 0x5a73931760af4ee0 +856, 0x857d1a185a3081ec +857, 0x6eac2aabe67fb463 +858, 0xd1f86155d8bfc55f +859, 0x6d56398f3e7877ef +860, 0x7642f61dfc62bc17 +861, 0x1d76b12843246ffa +862, 0xde7817809b8a31d0 +863, 0xbcca9cd091198f9d +864, 0xf71ca566dddcdfd4 +865, 0xea4386ee8b61d082 +866, 0xe351729d6010bac4 +867, 0xfd685d8a49910dd6 +868, 0xa7a20ea6c686bd3 +869, 0x1cdaf82f4dbd5536 +870, 0xa3da1d1e77dda3e0 +871, 0x4f723b3818ff8b2a +872, 0x1290669eca152469 +873, 0xb54158b52d30651b +874, 0xc06b74f2c7f0fee +875, 0x7d5840bcbf702379 +876, 0x19fa4c1254a82ed +877, 0xcf5ce090ad0b38ea +878, 0xd4edd6ac9437e16d +879, 0xc6ebf25eb623b426 +880, 0xd2b6dbdf00d8fea2 +881, 0x949cf98391cc59e1 +882, 0x380a0c7d0356f7b3 +883, 0x8ffefe32465473bf +884, 0x637b6542d27c861e +885, 0x347d12ffc664ecd9 +886, 0xea66e3a0c75a6b37 +887, 0xc3aff6f34fb537a1 +888, 0x67bdf3579959bf49 +889, 0xa17a348e3a74b723 +890, 0x93c9ef26ddadd569 +891, 0x483909059a5ac0b2 +892, 0x26ec9074b56d5a0d +893, 0x6216000d9a48403a +894, 0x79b43909eab1ec05 +895, 0xe4a8e8d03649e0de +896, 0x1435d666f3ccdc08 +897, 0xb9e22ba902650a0e +898, 0x44dffcccc68b41f8 +899, 0x23e60dcc7a559a17 +900, 0x6fd1735eacd81266 +901, 0xf6bda0745ea20c8e +902, 0x85efcaefe271e07c +903, 0x9be996ee931cef42 +904, 0xe78b41c158611d64 +905, 0xd6201df605839830 +906, 0x702e8e47d2769fd3 +907, 0xb8dcf70e18cf14c +908, 0xac2690bab1bf5c17 +909, 0x92b166b71205d696 +910, 0xb0e73c795fc6df28 +911, 0x4bf2322c8b6b6f0d +912, 0xa842fbe67918cea0 +913, 0xb01a8675d9294e54 +914, 0xfbe3c94f03ca5af2 +915, 0x51a5c089600c441f +916, 0x60f0fd7512d85ded +917, 0xef3113d3bc2cadb0 +918, 0xe1ea128ade300d60 +919, 0xde413b7f8d92d746 +920, 0xfc32c6d43f47c5d8 +921, 0x69d551d8c2b54c68 +922, 0xb9bc68c175777943 +923, 0xb9c79c687f0dae90 +924, 0xd799421ef883c06e +925, 0xbff553ca95a29a3e +926, 0xfc9ffac46bd0aca1 +927, 0x4f6c3a30c80c3e5a +928, 0x8b7245bc6dc4a0a +929, 0xaf4e191a4575ff60 +930, 0x41218c4a76b90f0b +931, 0x986052aa51b8e89b +932, 0x284b464ed5622f9 +933, 0xba6bded912626b40 +934, 0x43cad3ed7443cb5c +935, 0x21641fa95725f328 +936, 0x6d99d6d09d755822 +937, 0x8246dfa2d4838492 +938, 0xd2ee70b9056f4726 +939, 0x87db515a786fbb8b +940, 0x7c63e4c1d7786e7d +941, 0xd1a9d548f10b3e88 +942, 0xa00856475f3b74c9 +943, 0x7f1964ce67148bf4 +944, 0x446650ec71e6018c +945, 0xb1805ca07d1b6345 +946, 0x869c0a1625b7271b +947, 0x79d6da06ce2ecfe2 +948, 0xec7b3cafc5e3c85f +949, 0x1745ce21e39f2c3d +950, 0xd9a0a7af6ee97825 +951, 0x680e0e52a6e11d5c +952, 0xd86b3f344ff7f4cd +953, 0xab56af117c840b9c +954, 0x5c5404c7e333a10e +955, 0x4f1eb462f35d990d +956, 0xf857605a5644458e +957, 0x3bb87cdf09262f86 +958, 0xd57295baf6da64b +959, 0xb5993f48472f2894 +960, 0x7d1a501608c060b2 +961, 0x45fabe2d0e54adf0 +962, 0xbb41c3806afb4efe +963, 0xbfbc506049424c8 +964, 0xb7dd6b67f2203344 +965, 0x389ce52eff883b81 +966, 0xe259c55c0cf6d000 +967, 0x70fb3e3824f7d213 +968, 0x9f36d5599ed55f4b +969, 0xd14cf6f12f83c4f7 +970, 0x570a09d56aaa0b66 +971, 0x8accafd527f4598 +972, 0xa42d64c62175adfd +973, 0xddb9c6a87b6e1558 +974, 0xd80b6c69fa1cde2a +975, 0x44ebaac10082207b +976, 0xf99be8889552fa1a +977, 0x38253cd4b38b5dc5 +978, 0x85356c8b02675791 +979, 0xbf91677b2ecdcf55 +980, 0x2316cb85e93f366e +981, 0x9abf35954db6b053 +982, 0xf49f7425e086b45a +983, 0x8f5b625e074afde2 +984, 0xe0d614559791b080 +985, 0xbf7b866afab2a525 +986, 0xde89d7e1641a6412 +987, 0x1d10687d8ae5b86f +988, 0x1f034caa0e904cbd +989, 0x2086357aec8a7a2c +990, 0x22dc476b80c56e1e +991, 0xbef5a73cc0e3a493 +992, 0xddfa3829b26ed797 +993, 0x8917a87ec3d4dc78 +994, 0xfeabe390628c365e +995, 0x581b0c4f6fb2d642 +996, 0x1ef8c590adbf5b9a +997, 0x4d8e13aac0cce879 +998, 0xfe38f71e5977fad0 +999, 0x1f83a32d4adfd2ed diff --git a/python/numpy/random/tests/data/philox-testset-2.csv b/python/numpy/random/tests/data/philox-testset-2.csv new file mode 100644 index 000000000..69d24c38c --- /dev/null +++ b/python/numpy/random/tests/data/philox-testset-2.csv @@ -0,0 +1,1001 @@ +seed, 0x0 +0, 0x399e5b222b82fa9 +1, 0x41fd08c1f00f3bc5 +2, 0x78b8824162ee4d04 +3, 0x176747919e02739d +4, 0xfaa88f002a8d3596 +5, 0x418eb6f592e6c227 +6, 0xef83020b8344dd45 +7, 0x30a74a1a6eaa064b +8, 0x93d43bf97a490c3 +9, 0xe4ba28b442194cc +10, 0xc829083a168a8656 +11, 0x73f45d50f8e22849 +12, 0xf912db57352824cc +13, 0xf524216927b12ada +14, 0x22b7697473b1dfda +15, 0x311e2a936414b39f +16, 0xb905abfdcc425be6 +17, 0x4b14630d031eac9c +18, 0x1cf0c4ae01222bc8 +19, 0xa6c33efc6e82ef3 +20, 0x43b3576937ba0948 +21, 0x1e483d17cdde108a +22, 0x6722784cac11ac88 +23, 0xee87569a48fc45d7 +24, 0xb821dcbe74d18661 +25, 0xa5d1876ef3da1a81 +26, 0xe4121c2af72a483 +27, 0x2d747e355a52cf43 +28, 0x609059957bd03725 +29, 0xc3327244b49e16c5 +30, 0xb5ae6cb000dde769 +31, 0x774315003209017 +32, 0xa2013397ba8db605 +33, 0x73b228945dbcd957 +34, 0x801af7190375d3c0 +35, 0xae6dca29f24c9c67 +36, 0xd1cc0bcb1ca26249 +37, 0x1defa62a5bd853be +38, 0x67c2f5557fa89462 +39, 0xf1729b58122fab02 +40, 0xb67eb71949ec6c42 +41, 0x5456366ec1f8f7d7 +42, 0x44492b32eb7966f5 +43, 0xa801804159f175f1 +44, 0x5a416f23cac70d84 +45, 0x186f55293302303d +46, 0x7339d5d7b6a43639 +47, 0xfc6df38d6a566121 +48, 0xed2fe018f150b39e +49, 0x508e0b04a781fa1b +50, 0x8bee9d50f32eaf50 +51, 0x9870015d37e63cc +52, 0x93c6b12309c14f2d +53, 0xb571cf798abe93ff +54, 0x85c35a297a88ae6e +55, 0x9b1b79afe497a2ae +56, 0x1ca02e5b95d96b8d +57, 0x5bb695a666c0a94a +58, 0x4e3caf9bbab0b208 +59, 0x44a44be1a89f2dc1 +60, 0x4ff37c33445758d1 +61, 0xd0e02875322f35da +62, 0xfd449a91fb92646b +63, 0xbe0b49096b95db4d +64, 0xffa3647cad13ef5d +65, 0x75c127a61acd10c8 +66, 0xd65f697756f5f98e +67, 0x3ced84be93d94434 +68, 0x4da3095c2fc46d68 +69, 0x67564e2a771ee9ac +70, 0x36944775180644a9 +71, 0xf458db1c177cdb60 +72, 0x5b58406dcd034c8 +73, 0x793301a3fdab2a73 +74, 0x1c2a1a16d6db6128 +75, 0xc2dacd4ddddbe56c +76, 0x2e7d15be2301a111 +77, 0xd4f4a6341b3bcd18 +78, 0x3622996bbe6a9e3b +79, 0xaf29aa9a7d6d47da +80, 0x6d7dbb74a4cd68ae +81, 0xc260a17e0f39f841 +82, 0xdee0170f2af66f0d +83, 0xf84ae780d7b5a06e +84, 0x8326247b73f43c3a +85, 0xd44eef44b4f98b84 +86, 0x3d10aee62ec895e3 +87, 0x4f23fef01bf703b3 +88, 0xf8e50aa57d888df6 +89, 0x7da67411e3bef261 +90, 0x1d00f2769b2f96d7 +91, 0x7ef9a15b7444b84e +92, 0xcfa16436cc2b7e21 +93, 0x29ab8cfac00460ff +94, 0x23613de8608b0e70 +95, 0xb1aa0980625798a8 +96, 0xb9256fd29db7df99 +97, 0xdacf311bf3e7fa18 +98, 0xa013c8f9fada20d8 +99, 0xaf5fd4fe8230fe3e +100, 0xd3d59ca55102bc5c +101, 0x9d08e2aa5242767f +102, 0x40278fe131e83b53 +103, 0x56397d03c7c14c98 +104, 0xe874b77b119359b3 +105, 0x926a1ba4304ab19f +106, 0x1e115d5aa695a91d +107, 0xc6a459df441f2fe3 +108, 0x2ca842bc1b0b3c6a +109, 0x24c804cf8e5eed16 +110, 0x7ca00fc4a4c3ebd3 +111, 0x546af7cecc4a4ba6 +112, 0x8faae1fa18fd6e3 +113, 0x40420b0089641a6a +114, 0x88175a35d9abcb83 +115, 0xf7d746d1b8b1357c +116, 0x7dae771a651be970 +117, 0x2f6485247ee4df84 +118, 0x6883702fab2d8ec5 +119, 0xeb7eea829a67f9a6 +120, 0x60d5880b485562ed +121, 0x7d4ca3d7e41a4e7e +122, 0xbb7fef961ab8de18 +123, 0x3b92452fb810c164 +124, 0x5f4b4755348b338 +125, 0xca45a715a7539806 +126, 0xc33efd9da5399dd +127, 0x593d665a51d4aedd +128, 0x75d6b8636563036b +129, 0x7b57caa55e262082 +130, 0x4ede7427969e0dd5 +131, 0xc3f19b6f78ea00b +132, 0xeea7bab9be2181ea +133, 0x652c45fe9c420c04 +134, 0x14ba9e3d175670ee +135, 0xd2ad156ba6490474 +136, 0x4d65ae41065f614 +137, 0x6ff911c8afa28eb1 +138, 0xedc2b33588f3cb68 +139, 0x437c8bc324666a2f +140, 0x828cee25457a3f0 +141, 0x530c986091f31b9b +142, 0x2f34671e8326ade7 +143, 0x4f686a8f4d77f6da +144, 0xa4c1987083498895 +145, 0xbce5a88b672b0fb1 +146, 0x8476115a9e6a00cc +147, 0x16de18a55dd2c238 +148, 0xdf38cf4c416232bc +149, 0x2cb837924e7559f3 +150, 0xfad4727484e982ed +151, 0x32a55d4b7801e4f +152, 0x8b9ef96804bd10a5 +153, 0xa1fd422c9b5cf2a9 +154, 0xf46ddb122eb7e442 +155, 0x6e3842547afa3b33 +156, 0x863dee1c34afe5c4 +157, 0x6a43a1935b6db171 +158, 0x1060a5c2f8145821 +159, 0xf783ec9ed34c4607 +160, 0x1da4a86bf5f8c0b0 +161, 0x4c7714041ba12af8 +162, 0x580da7010be2f192 +163, 0xad682fe795a7ea7a +164, 0x6687b6cb88a9ed2c +165, 0x3c8d4b175517cd18 +166, 0xe9247c3a524a6b6b +167, 0x337ca9cfaa02658 +168, 0xed95399481c6feec +169, 0x58726a088e606062 +170, 0xfe7588a5b4ee342a +171, 0xee434c7ed146fdee +172, 0xe2ade8b60fdc4ba5 +173, 0xd57e4c155de4eaab +174, 0xdefeae12de1137cb +175, 0xb7a276a241316ac1 +176, 0xeb838b1b1df4ca15 +177, 0x6f78965edea32f6f +178, 0x18bebd264d7a5d53 +179, 0x3641c691d77005ec +180, 0xbe70ed7efea8c24c +181, 0x33047fa8d03ca560 +182, 0x3bed0d2221ff0f87 +183, 0x23083a6ffbcf38a2 +184, 0xc23eb827073d3fa5 +185, 0xc873bb3415e9fb9b +186, 0xa4645179e54147fe +187, 0x2c72fb443f66e207 +188, 0x98084915dd89d8f4 +189, 0x88baa2de12c99037 +190, 0x85c74ab238cb795f +191, 0xe122186469ea3a26 +192, 0x4c3bba99b3249292 +193, 0x85d6845d9a015234 +194, 0x147ddd69c13e6a31 +195, 0x255f4d678c9a570b +196, 0x2d7c0c410bf962b4 +197, 0x58eb7649e0aa16ca +198, 0x9d240bf662fe0783 +199, 0x5f74f6fa32d293cc +200, 0x4928e52f0f79d9b9 +201, 0xe61c2b87146b706d +202, 0xcfcd90d100cf5431 +203, 0xf15ea8138e6aa178 +204, 0x6ab8287024f9a819 +205, 0xed8942593db74e01 +206, 0xefc00e4ec2ae36dd +207, 0xc21429fb9387f334 +208, 0xf9a3389e285a9bce +209, 0xacdee8c43aae49b3 +210, 0xefc382f02ad55c25 +211, 0x1153b50e8d406b72 +212, 0xb00d39ebcc2f89d8 +213, 0xde62f0b9831c8850 +214, 0xc076994662eef6c7 +215, 0x66f08f4752f1e3ef +216, 0x283b90619796249a +217, 0x4e4869bc4227499e +218, 0xb45ad78a49efd7ed +219, 0xffe19aa77abf5f4b +220, 0xfce11a0daf913aef +221, 0x7e4e64450d5cdceb +222, 0xe9621997cfd62762 +223, 0x4d2c9e156868081 +224, 0x4e2d96eb7cc9a08 +225, 0xda74849bba6e3bd3 +226, 0x6f4621da935e7fde +227, 0xb94b914aa0497259 +228, 0xd50d03e8b8db1563 +229, 0x1a45c1ce5dca422e +230, 0xc8d30d33276f843f +231, 0xb57245774e4176b4 +232, 0x8d36342c05abbbb1 +233, 0x3591ad893ecf9e78 +234, 0x62f4717239ee0ac8 +235, 0x9b71148a1a1d4200 +236, 0x65f8e0f56dd94463 +237, 0x453b1fcfd4fac8c2 +238, 0x4c25e48e54a55865 +239, 0xa866baa05112ace2 +240, 0x7741d3c69c6e79c5 +241, 0x7deb375e8f4f7a8a +242, 0xc242087ede42abd8 +243, 0x2fa9d1d488750c4b +244, 0xe8940137a935d3d3 +245, 0x1dab4918ca24b2f2 +246, 0xe2368c782168fe3e +247, 0x6e8b2d1d73695909 +248, 0x70455ebea268b33e +249, 0x656a919202e28da1 +250, 0x5a5a8935647da999 +251, 0x428c6f77e118c13c +252, 0xa87aee2b675bb083 +253, 0x3873a6412b239969 +254, 0x5f72c1e91cb8a2ee +255, 0xa25af80a1beb5679 +256, 0x1af65d27c7b4abc3 +257, 0x133437060670e067 +258, 0xb1990fa39a97d32e +259, 0x724adc89ae10ed17 +260, 0x3f682a3f2363a240 +261, 0x29198f8dbd343499 +262, 0xdfaeeaa42bc51105 +263, 0x5baff3901b9480c2 +264, 0x3f760a67043e77f5 +265, 0x610fa7aa355a43ba +266, 0x394856ac09c4f7a7 +267, 0x1d9229d058aee82e +268, 0x19c674804c41aeec +269, 0x74cf12372012f4aa +270, 0xa5d89b353fa2f6ca +271, 0x697e4f672ac363dd +272, 0xde6f55ba73df5af9 +273, 0x679cf537510bd68f +274, 0x3dc916114ae9ef7e +275, 0xd7e31a66ec2ee7ba +276, 0xc21bebb968728495 +277, 0xc5e0781414e2adfd +278, 0x71147b5412ddd4bd +279, 0x3b864b410625cca9 +280, 0x433d67c0036cdc6 +281, 0x48083afa0ae20b1b +282, 0x2d80beecd64ac4e8 +283, 0x2a753c27c3a3ee3e +284, 0xb2c5e6afd1fe051a +285, 0xea677930cd66c46b +286, 0x4c3960932f92810a +287, 0xf1b367a9e527eaba +288, 0xb7d92a8a9a69a98e +289, 0x9f9ad3210bd6b453 +290, 0x817f2889db2dcbd8 +291, 0x4270a665ac15813c +292, 0x90b85353bd2be4dd +293, 0x10c0460f7b2d68d +294, 0x11cef32b94f947f5 +295, 0x3cf29ed8e7d477e8 +296, 0x793aaa9bd50599ef +297, 0xbac15d1190014aad +298, 0x987944ae80b5cb13 +299, 0x460aa51f8d57c484 +300, 0xc77df0385f97c2d3 +301, 0x92e743b7293a3822 +302, 0xbc3458bcfbcbb8c0 +303, 0xe277bcf3d04b4ed7 +304, 0xa537ae5cf1c9a31c +305, 0x95eb00d30bd8cfb2 +306, 0x6376361c24e4f2dd +307, 0x374477fe87b9ea8e +308, 0x8210f1a9a039902e +309, 0xe7628f7031321f68 +310, 0x8b8e9c0888fc1d3d +311, 0x306be461fdc9e0ed +312, 0x510009372f9b56f5 +313, 0xa6e6fa486b7a027a +314, 0x9d3f002025203b5a +315, 0x7a46e0e81ecbef86 +316, 0x41e280c611d04df0 +317, 0xedcec10418a99e8a +318, 0x5c27b6327e0b9dbd +319, 0xa81ed2035b509f07 +320, 0x3581e855983a4cc4 +321, 0x4744594b25e9809d +322, 0xc737ac7c27fbd0ed +323, 0x1b523a307045433a +324, 0x8b4ce9171076f1d9 +325, 0x2db02d817cd5eec0 +326, 0x24a1f1229af50288 +327, 0x5550c0dcf583ff16 +328, 0x3587baaa122ec422 +329, 0xf9d3dc894229e510 +330, 0xf3100430d5cf8e87 +331, 0xc31af79862f8e2fb +332, 0xd20582063b9f3537 +333, 0xac5e90ac95fcc7ad +334, 0x107c4c704d5109d4 +335, 0xebc8628906dbfd70 +336, 0x215242776da8c531 +337, 0xa98002f1dcf08b51 +338, 0xbc3bdc07f3b09718 +339, 0x238677062495b512 +340, 0x53b4796f2a3c49e8 +341, 0x6424286467e22f0e +342, 0x14d0952a11a71bac +343, 0x2f97098149b82514 +344, 0x3777f2fdc425ad2 +345, 0xa32f2382938876d4 +346, 0xda8a39a021f20ae3 +347, 0x364361ef0a6ac32c +348, 0x4413eede008ff05a +349, 0x8dda8ace851aa327 +350, 0x4303cabbdcecd1ee +351, 0x2e69f06d74aa549f +352, 0x4797079cd4d9275c +353, 0xc7b1890917e98307 +354, 0x34031b0e822a4b4c +355, 0xfc79f76b566303ea +356, 0x77014adbe255a930 +357, 0xab6c43dd162f3be5 +358, 0xa430041f3463f6b9 +359, 0x5c191a32ada3f84a +360, 0xe8674a0781645a31 +361, 0x3a11cb667b8d0916 +362, 0xaedc73e80c39fd8a +363, 0xfde12c1b42328765 +364, 0x97abb7dcccdc1a0b +365, 0x52475c14d2167bc8 +366, 0x540e8811196d5aff +367, 0xa867e4ccdb2b4b77 +368, 0x2be04af61e5bcfb9 +369, 0x81b645102bfc5dfd +370, 0x96a52c9a66c6450f +371, 0x632ec2d136889234 +372, 0x4ed530c0b36a6c25 +373, 0x6f4851225546b75 +374, 0x2c065d6ba46a1144 +375, 0xf8a3613ff416551d +376, 0xb5f0fd60e9c971a9 +377, 0x339011a03bb4be65 +378, 0x9439f72b6995ded6 +379, 0xc1b03f3ef3b2292d +380, 0xad12fd221daab3ae +381, 0xf615b770f2cf996f +382, 0x269d0fdcb764172 +383, 0x67837025e8039256 +384, 0x6402831fc823fafa +385, 0x22854146a4abb964 +386, 0x7b5ad9b5a1bad7a8 +387, 0x67170e7beb6ac935 +388, 0xfc2d1e8e24adfaaa +389, 0x7ded4395345ff40d +390, 0x418981760a80dd07 +391, 0xc03bef38022c1d2 +392, 0x3a11850b26eade29 +393, 0xaa56d02c7175c5f4 +394, 0xd83b7917b9bfbff5 +395, 0x3c1df2f8fa6fced3 +396, 0xf3d6e2999c0bb760 +397, 0xc66d683a59a950e3 +398, 0x8e3972a9d73ffabf +399, 0x97720a0443edffd9 +400, 0xa85f5d2fe198444a +401, 0xfc5f0458e1b0de5e +402, 0xe3973f03df632b87 +403, 0xe151073c84c594b3 +404, 0x68eb4e22e7ff8ecf +405, 0x274f36eaed7cae27 +406, 0x3b87b1eb60896b13 +407, 0xbe0b2f831442d70a +408, 0x2782ed7a48a1b328 +409, 0xb3619d890310f704 +410, 0xb03926b11b55921a +411, 0xdb46fc44aa6a0ce4 +412, 0x4b063e2ef2e9453a +413, 0xe1584f1aeec60fb5 +414, 0x7092bd6a879c5a49 +415, 0xb84e1e7c7d52b0e6 +416, 0x29d09ca48db64dfb +417, 0x8f6c4a402066e905 +418, 0x77390795eabc36b +419, 0xcc2dc2e4141cc69f +420, 0x2727f83beb9e3c7c +421, 0x1b29868619331de0 +422, 0xd38c571e192c246f +423, 0x535327479fe37b6f +424, 0xaff9ce5758617eb3 +425, 0x5658539e9288a4e4 +426, 0x8df91d87126c4c6d +427, 0xe931cf8fdba6e255 +428, 0x815dfdf25fbee9e8 +429, 0x5c61f4c7cba91697 +430, 0xdd5f5512fe2313a1 +431, 0x499dd918a92a53cd +432, 0xa7e969d007c97dfd +433, 0xb8d39c6fc81ac0bb +434, 0x1d646983def5746c +435, 0x44d4b3b17432a60c +436, 0x65664232a14db1e3 +437, 0xda8fae6433e7500b +438, 0xbe51b94ff2a3fe94 +439, 0xe9b1bd9a9098ef9f +440, 0xfe47d54176297ef5 +441, 0xb8ab99bc03bb7135 +442, 0xcfad97f608565b38 +443, 0xf05da71f6760d9c1 +444, 0xef8da40a7c70e7b +445, 0xe0465d58dbd5d138 +446, 0xb54a2d70eb1a938 +447, 0xfdd50c905958f2d8 +448, 0x3c41933c90a57d43 +449, 0x678f6d894c6ad0bb +450, 0x403e8f4582274e8 +451, 0x5cbbe975668df6b0 +452, 0x297e6520a7902f03 +453, 0x8f6dded33cd1efd7 +454, 0x8e903c97be8d783b +455, 0x10bd015577e30f77 +456, 0x3fcd69d1c36eab0c +457, 0xb45989f3ca198d3 +458, 0x507655ce02b491a9 +459, 0xa92cf99bb78602ce +460, 0xebfb82055fbc2f0f +461, 0x3334256279289b7a +462, 0xc19d2a0f740ee0ac +463, 0x8bb070dea3934905 +464, 0xa4ab57d3a8d1b3eb +465, 0xfee1b09bcacf7ff4 +466, 0xccc7fb41ceec41fa +467, 0xd4da49094eb5a74d +468, 0xed5c693770af02ed +469, 0x369dabc9bbfaa8e4 +470, 0x7eab9f360d054199 +471, 0xe36dbebf5ee94076 +472, 0xd30840e499b23d7 +473, 0x8678e6cb545015ff +474, 0x3a47932ca0b336e +475, 0xeb7c742b6e93d6fe +476, 0x1404ea51fe5a62a9 +477, 0xa72cd49db978e288 +478, 0xfd7bada020173dcf +479, 0xc9e74fc7abe50054 +480, 0x93197847bb66808d +481, 0x25fd5f053dce5698 +482, 0xe198a9b18cc21f4 +483, 0x5cc27b1689452d5d +484, 0x8b3657af955a98dc +485, 0xc17f7584f54aa1c0 +486, 0xe821b088246b1427 +487, 0x32b5a9f6b45b6fa0 +488, 0x2aef7c315c2bae0c +489, 0xe1af8129846b705a +490, 0x4123b4c091b34614 +491, 0x6999d61ec341c073 +492, 0x14b9a8fcf86831ea +493, 0xfd4cff6548f46c9f +494, 0x350c3b7e6cc8d7d6 +495, 0x202a5047fecafcd5 +496, 0xa82509fe496bb57d +497, 0x835e4b2608b575fe +498, 0xf3abe3da919f54ec +499, 0x8705a21e2c9b8796 +500, 0xfd02d1427005c314 +501, 0xa38458faa637f49b +502, 0x61622f2360e7622a +503, 0xe89335a773c2963b +504, 0x481264b659b0e0d0 +505, 0x1e82ae94ebf62f15 +506, 0x8ea7812de49209d4 +507, 0xff963d764680584 +508, 0x418a68bef717f4af +509, 0x581f0e7621a8ab91 +510, 0x840337e9a0ec4150 +511, 0x951ef61b344be505 +512, 0xc8b1b899feb61ec2 +513, 0x8b78ca13c56f6ed9 +514, 0x3d2fd793715a946f +515, 0xf1c04fabcd0f4084 +516, 0x92b602614a9a9fcc +517, 0x7991bd7a94a65be7 +518, 0x5dead10b06cad2d7 +519, 0xda7719b33f722f06 +520, 0x9d87a722b7bff71e +521, 0xb038e479071409e9 +522, 0xf4e8bbec48054775 +523, 0x4fec2cd7a28a88ea +524, 0x839e28526aad3e56 +525, 0xd37ec57852a98bf0 +526, 0xdef2cbbe00f3a02d +527, 0x1aecfe01a9e4d801 +528, 0x59018d3c8beaf067 +529, 0x892753e6ac8bf3cd +530, 0xefdd3437023d2d1c +531, 0x447bfbd148c8cb88 +532, 0x282380221bd442b8 +533, 0xfce8658d1347384a +534, 0x60b211a7ec6bfa8 +535, 0xd21729cfcc692974 +536, 0x162087ecd5038a47 +537, 0x2b17000c4bce39d2 +538, 0x3a1f75ff6adcdce0 +539, 0x721a411d312f1a2c +540, 0x9c13b6133f66934d +541, 0xaa975d14978980e5 +542, 0x9403dbd4754203fa +543, 0x588c15762fdd643 +544, 0xdd1290f8d0ada73a +545, 0xd9b77380936103f4 +546, 0xb2e2047a356eb829 +547, 0x7019e5e7f76f7a47 +548, 0x3c29a461f62b001d +549, 0xa07dc6cfab59c116 +550, 0x9b97e278433f8eb +551, 0x6affc714e7236588 +552, 0x36170aeb32911a73 +553, 0x4a665104d364a789 +554, 0x4be01464ec276c9c +555, 0x71bb10271a8b4ecf +556, 0xbf62e1d068bc018 +557, 0xc9ada5db2cbbb413 +558, 0x2bded75e726650e5 +559, 0x33d5a7af2f34385d +560, 0x8179c46661d85657 +561, 0x324ebcfd29267359 +562, 0xac4c9311dc9f9110 +563, 0xc14bb6a52f9f9c0 +564, 0xc430abe15e7fb9db +565, 0xf1cce5c14df91c38 +566, 0x651e3efa2c0750d3 +567, 0x38a33604a8be5c75 +568, 0x7aaf77fe7ff56a49 +569, 0xc0d1cc56bbf27706 +570, 0x887aa47324e156c6 +571, 0x12547c004b085e8d +572, 0xd86a8d6fbbbfd011 +573, 0x57c860188c92d7b4 +574, 0xcd5d3843d361b8ca +575, 0x8f586ef05a9cb3ef +576, 0x174456e1ba6267d5 +577, 0xf5dc302c62fe583c +578, 0xa349442fabcdb71 +579, 0xe5123c1a8b6fd08e +580, 0x80681552aa318593 +581, 0xb295396deaef1e31 +582, 0xabb626e0b900e32b +583, 0xf024db8d3f19c15e +584, 0x1d04bb9548e2fb6c +585, 0xd8ed2b2214936c2b +586, 0x618ca1e430a52bc9 +587, 0xccbca44a6088136b +588, 0xd0481855c8b9ccbe +589, 0x3c92a2fade28bdf7 +590, 0x855e9fefc38c0816 +591, 0x1269bbfe55a7b27c +592, 0x1d6c853d83726d43 +593, 0xc8655511cc7fcafc +594, 0x301503eb125a9b0e +595, 0xb3108e4532016b11 +596, 0xbb7ab6245da9cb3d +597, 0x18004c49116d85eb +598, 0x3480849c20f61129 +599, 0xe28f45157463937b +600, 0x8e85e61060f2ce1 +601, 0x1673da4ec589ba5e +602, 0x74b9a6bd1b194712 +603, 0xed39e147fa8b7601 +604, 0x28ce54019102ca77 +605, 0x42e0347f6d7a2f30 +606, 0xb6a908d1c4814731 +607, 0x16c3435e4e9a126d +608, 0x8880190514c1ad54 +609, 0xfffd86229a6f773c +610, 0x4f2420cdb0aa1a93 +611, 0xf8e1acb4120fc1fa +612, 0x63a8c553ab36a2f2 +613, 0x86b88cf3c0a6a190 +614, 0x44d8b2801622c792 +615, 0xf6eae14e93082ff1 +616, 0xd9ed4f5d1b8fac61 +617, 0x1808ce17f4e1f70 +618, 0x446e83ea336f262f +619, 0xc7c802b04c0917b7 +620, 0x626f45fd64968b73 +621, 0x9ffa540edc9b2c5c +622, 0xa96a1e219e486af8 +623, 0x2bb8963884e887a1 +624, 0xba7f68a5d029e3c4 +625, 0xefc45f44392d9ca0 +626, 0x98d77762503c5eab +627, 0xd89bcf62f2da627c +628, 0xa3cab8347f833151 +629, 0xa095b7595907d5c7 +630, 0x3b3041274286181 +631, 0xb518db8919eb71fa +632, 0x187036c14fdc9a36 +633, 0xd06e28301e696f5d +634, 0xdbc71184e0c56492 +635, 0xfe51e9cae6125bfd +636, 0x3b12d17cd014df24 +637, 0x3b95e4e2c986ac1a +638, 0x29c1cce59fb2dea2 +639, 0x58c05793182a49d6 +640, 0xc016477e330d8c00 +641, 0x79ef335133ada5d +642, 0x168e2cad941203f3 +643, 0xf99d0f219d702ef0 +644, 0x655628068f8f135b +645, 0xdcdea51910ae3f92 +646, 0x8e4505039c567892 +647, 0x91a9ec7e947c89ae +648, 0x8717172530f93949 +649, 0x1c80aba9a440171a +650, 0x9c8f83f6ebe7441e +651, 0x6c05e1efea4aa7f9 +652, 0x10af696b777c01b +653, 0x5892e9d9a92fc309 +654, 0xd2ba7da71e709432 +655, 0x46378c7c3269a466 +656, 0x942c63dfe18e772c +657, 0x6245cf02ef2476f +658, 0x6f265b2759ea2aea +659, 0x5aa757f17d17f4a6 +660, 0x1ad6a3c44fa09be6 +661, 0xe861af14e7015fb8 +662, 0x86be2e7db388c77 +663, 0x5c7bba32b519e9a0 +664, 0x3feb314850c4437b +665, 0x97955add60cfb45b +666, 0xfdb536230a540bdc +667, 0xdac9d7bf6e58512e +668, 0x4894c00e474e8120 +669, 0xa1918a37739da366 +670, 0xa8097f2096532807 +671, 0x592afe50e6c5e643 +672, 0xd69050ee6dcb33dc +673, 0xa6956b262dd3c561 +674, 0x1a55c815555e63f7 +675, 0x2ec7fd37516de2bb +676, 0x8ec251d9c70e76ba +677, 0x9b76e4abafd2689 +678, 0x9ce3f5c751a57df1 +679, 0x915c4818bf287bc7 +680, 0x2293a0d1fe07c735 +681, 0x7627dcd5d5a66d3d +682, 0xb5e4f92cc49c7138 +683, 0x6fc51298731d268c +684, 0xd19800aa95441f87 +685, 0x14f70f31162fa115 +686, 0x41a3da3752936f59 +687, 0xbec0652be95652ee +688, 0x7aa4bdb1020a290f +689, 0x4382d0d9bee899ef +690, 0xe6d988ae4277d6ff +691, 0xe618088ccb2a32d1 +692, 0x411669dfaa899e90 +693, 0x234e2bf4ba76d9f +694, 0xe109fe4cb7828687 +695, 0x1fb96b5022b0b360 +696, 0x6b24ad76c061a716 +697, 0x7e1781d4d7ecee15 +698, 0xf20c2dbe82ba38ba +699, 0xeda8e8ae1d943655 +700, 0xa58d196e2a77eaec +701, 0x44564765a5995a0b +702, 0x11902fe871ecae21 +703, 0x2ea60279900e675d +704, 0x38427227c18a9a96 +705, 0xe0af01490a1b1b48 +706, 0x826f91997e057824 +707, 0x1e57308e6e50451 +708, 0xb42d469bbbfdc350 +709, 0xb9734cff1109c49b +710, 0x98967559bb9d364f +711, 0xd6be360041907c12 +712, 0xa86a1279122a1e21 +713, 0x26f99a8527bfc698 +714, 0xfa8b85758f28f5d6 +715, 0xe3057429940806ae +716, 0x4bee2d7e84f93b2b +717, 0x948350a76ea506f4 +718, 0xa139154488045e74 +719, 0x8893579ba5e78085 +720, 0x5f21c215c6a9e397 +721, 0x456134f3a59641dc +722, 0x92c0273f8e97a9c6 +723, 0xd2936c9c3f0c6936 +724, 0xcfa4221e752c4735 +725, 0x28cd5a7457355dca +726, 0xecdfdde23d90999f +727, 0x60631b2d494d032b +728, 0xf67289df269a827f +729, 0xcbe8011ef0f5b7ef +730, 0x20eea973c70a84f5 +731, 0xbe1fd200398557ce +732, 0xd2279ee030191bba +733, 0xf2bd4291dedaf819 +734, 0xfc6d167dbe8c402 +735, 0x39ac298da5d0044b +736, 0xceac026f5f561ce +737, 0x10a5b0bdd8ad60e6 +738, 0xdeb3c626df6d4bcb +739, 0x3c128962e77ff6ca +740, 0xc786262e9c67a0e5 +741, 0x4332855b3febcdc0 +742, 0x7bda9724d1c0e020 +743, 0x6a8c93399bc4df22 +744, 0xa9b20100ac707396 +745, 0xa11a3458502c4eb5 +746, 0xb185461c60478941 +747, 0x13131d56195b7ff6 +748, 0x8d55875ddbd4aa1c +749, 0xc09b67425f469aa5 +750, 0x39e33786cc7594c4 +751, 0x75e96db8e4b08b93 +752, 0xda01cd12a3275d1e +753, 0x2c49e7822344fab5 +754, 0x9bd5f10612514ca7 +755, 0x1c801a5c828e7332 +756, 0x29797d3f4f6c7b4c +757, 0xac992715e21e4e53 +758, 0xe40e89ee887ddb37 +759, 0x15189a2b265a783b +760, 0xa854159a52af5c5 +761, 0xb9d8a5a81c12bead +762, 0x3240cdc9d59e2a58 +763, 0x1d0b872234cf8e23 +764, 0xc01224cf6ce12cff +765, 0x2601e9f3905c8663 +766, 0xd4ecf9890168d6b4 +767, 0xa45db796d89bfdd5 +768, 0x9f389406dad64ab4 +769, 0xa5a851adce43ffe3 +770, 0xd0962c41c26e5aa9 +771, 0x8a671679e48510a4 +772, 0xc196dc0924a6bfeb +773, 0x3ead661043b549cb +774, 0x51af4ca737d405ac +775, 0xf4425b5c62275fb6 +776, 0x71e69d1f818c10f5 +777, 0xacaf4af2d3c70162 +778, 0x2e1f1d4fd7524244 +779, 0xe54fdd8f388890e8 +780, 0xfda0d33e84eb2b83 +781, 0x53965c5e392b81da +782, 0x5c92288267263097 +783, 0xcac1b431c878c66c +784, 0x36c0e1cf417241c6 +785, 0x5cc4d9cd1a36bf2c +786, 0x32e4257bb5d3e470 +787, 0x4aecff904adb44fb +788, 0x4d91a8e0d1d60cac +789, 0xa3b478388385b038 +790, 0x48d955f24eba70be +791, 0x310e4deb07f24f68 +792, 0x8853e73b1f30a5a +793, 0x278aee45c2a65c5 +794, 0xf6932eedbd62fb0b +795, 0xafb95958c82fafad +796, 0x78e807c18616c16c +797, 0xd7abadda7488ed9f +798, 0x2dd72e2572aa2ae6 +799, 0x6ec3791982c2be09 +800, 0x6865bb314fac478f +801, 0xa14dc0ce09000d1a +802, 0xb8081ad134da10f2 +803, 0xc4ac1534aa825ef5 +804, 0xd83aeb48ae2d538f +805, 0x38052027e3074be4 +806, 0xa9833e06ef136582 +807, 0x4f02d790ec9fd78 +808, 0xec2f60bc711c5bdc +809, 0x9253b0d12268e561 +810, 0xa8ac607fdd62c206 +811, 0x895e28ebc920289f +812, 0xe2fd42b154243ac7 +813, 0xc69cac2f776eee19 +814, 0xf4d4ac11db56d0dc +815, 0xa8d37049b9f39833 +816, 0x75abbf8a196c337c +817, 0xb115bb76750d27b8 +818, 0x39426d187839154 +819, 0xd488423e7f38bf83 +820, 0xbb92e0c76ecb6a62 +821, 0x3055a018ce39f4e3 +822, 0xc93fe0e907729bfb +823, 0x65985d17c5863340 +824, 0x2088ae081b2028e1 +825, 0x6e628de873314057 +826, 0x864377cccf573f0e +827, 0xae03f4c9aa63d132 +828, 0xb1db766d6404c66d +829, 0xdce5a22414a374b +830, 0x622155b777819997 +831, 0x69fe96e620371f3c +832, 0xa9c67dbc326d94fc +833, 0x932a84ae5dd43bab +834, 0xe2301a20f6c48c3f +835, 0x795d2e79c6477300 +836, 0xd8e3e631289521e7 +837, 0xae2684979002dfd6 +838, 0xc9c2392377550f89 +839, 0xa1b0c99d508ef7ec +840, 0x593aef3c5a5272ec +841, 0xe32e511a4b7162cd +842, 0xab3b81655f5a2857 +843, 0x1b535e1a0aaf053e +844, 0x5b33f56c1b6a07e2 +845, 0x782dc8cfcac4ef36 +846, 0xb3d4f256eecfd202 +847, 0xf73a6598f58c4f7e +848, 0xd5722189524870ae +849, 0x707878de6b995fc0 +850, 0xc3eb6ba73e3d7e8a +851, 0xca75c017655b75a7 +852, 0x1b29369ea3541e5f +853, 0x352e98858bdb58a3 +854, 0x1e4412d184b6b27d +855, 0x2d375ba0304b2d17 +856, 0x56c30fce69a5d08e +857, 0x6b8c2b0c06584bda +858, 0xde4dfff228c8c91f +859, 0xb7c9edd574e6287f +860, 0xf6078281c9fca2b2 +861, 0xb9b9a51de02a2f1e +862, 0xa411bef31c0103b0 +863, 0xc5facd8fc5e1d7a3 +864, 0x54e631c05ddf7359 +865, 0x815b42b3fd06c474 +866, 0xc9ac07566fda18ec +867, 0xd84ea62957bd8e15 +868, 0x5575f74b5cfd8803 +869, 0x5779a8d460c2e304 +870, 0xfd6e87e264a85587 +871, 0xa1d674daa320b26d +872, 0x2c3c3ec64b35afc4 +873, 0x393a274ff03e6935 +874, 0x1f40ecbac52c50ea +875, 0xc3de64fa324ffc0c +876, 0x56ae828b7f9deb04 +877, 0xe7c1a77b5c1f2cb3 +878, 0xa4c4aab19ea921cc +879, 0xec164c238825822c +880, 0xa6a3304770c03b03 +881, 0x3a63641d5b1e8123 +882, 0x42677be3a54617ef +883, 0xa2680423e3a200c0 +884, 0x8b17cf75f3f37277 +885, 0xe7ce65a49242be3d +886, 0x7f85934271323e4b +887, 0xcfb0f431f79a4fab +888, 0x392e4041a8505b65 +889, 0xd3e5daf0d8b25ea6 +890, 0x9447eff675d80f53 +891, 0xea27a9d53cfaeea8 +892, 0xe3f2335945a83ba +893, 0x8875a43ce216413b +894, 0xe49941f9eabce33e +895, 0x9357c1296683a5b1 +896, 0xf0f16439e81ee701 +897, 0x3181515295ffd79a +898, 0x9d7150fffd169ed8 +899, 0x2d6a1d281e255a72 +900, 0x81bf1286fb3a92b6 +901, 0x566d3079b499e279 +902, 0xc7939ca8f047341 +903, 0xb1f8050e7c2d59f6 +904, 0x605701045e7be192 +905, 0x51b73360e8e31a1c +906, 0x9f4ad54483ba9fe0 +907, 0xd3085b8fcf69d1c8 +908, 0xc3e7475026dc5f0b +909, 0x5800f8554b157354 +910, 0x37dfdf858cfcd963 +911, 0x3a1fce05ce385072 +912, 0xf495c062645c20c3 +913, 0xdcbeec2c3492c773 +914, 0xc38f427589d1d0b4 +915, 0x681ead60216a8184 +916, 0x4bd569c40cc88c41 +917, 0x49b0d442e130b7a2 +918, 0xee349156b7d1fa3f +919, 0x2bde2d2db055135b +920, 0xc6a460d2fbcb2378 +921, 0xd0f170494ff3dbb +922, 0xb294422492528a23 +923, 0xfc95873c854e7b86 +924, 0x6c9c3ad1797bb19c +925, 0xe0c06f2aab65062d +926, 0x58e32ce0f11e3a81 +927, 0xa745fcd729ff5036 +928, 0x599b249b2fc2cdb2 +929, 0x78f23b5b0dd5b082 +930, 0x6de3e957f549ecfc +931, 0x9d0712fa6d878756 +932, 0x9076e8554e4a413a +933, 0xf3185818c0294de8 +934, 0x5de7cdf4b455b9b6 +935, 0xb15f6908ed703f7d +936, 0x98c654dfedc6818 +937, 0x120502ab0e93ae42 +938, 0x67966a98a58dc120 +939, 0x1caa0fc628989482 +940, 0xd8b2c3cd480a8625 +941, 0x85c70071b3aed671 +942, 0xff385f8473714662 +943, 0xe2868e4bf3773b63 +944, 0x96cf8019b279298e +945, 0x8511cc930bd74800 +946, 0x5312e48fdd55f5ab +947, 0xfcdae564b52df78d +948, 0x9eee48373e652176 +949, 0x953788f6bcbc56b0 +950, 0xd1a3855dbd2f6b37 +951, 0x3ad32acf77f4d1e9 +952, 0x917c7be81b003e30 +953, 0x9ce817da1e2e9dfb +954, 0x2968983db162d44d +955, 0x1e005decef5828ad +956, 0xc38fe59d1aa4f3d5 +957, 0xf357f1710dc02f1d +958, 0x2613912a4c83ec67 +959, 0x832a11470b9a17cb +960, 0x5e85508a611f0dad +961, 0x2781131677f59d56 +962, 0xa82358d7d4b0237f +963, 0xfbf8b3cc030c3af6 +964, 0x68b2f68ac8a55adb +965, 0x3b6fcf353add0ada +966, 0xd1956049bcd15bd5 +967, 0x95b76f31c7f98b6d +968, 0x814b6690df971a84 +969, 0xdcf7959cddd819e4 +970, 0xcf8c72c5d804fc88 +971, 0x56883769c8945a22 +972, 0x1f034652f658cf46 +973, 0x41df1324cda235a1 +974, 0xeccd32524504a054 +975, 0x974e0910a04ec02c +976, 0x72104507b821f6db +977, 0x791f8d089f273044 +978, 0xe0f79a4f567f73c3 +979, 0x52fe5bea3997f024 +980, 0x5f8b9b446494f78 +981, 0xfd9f511947059190 +982, 0x3aea9dac6063bce3 +983, 0xbfdae4dfc24aee60 +984, 0xa82cdbbf0a280318 +985, 0xf460aae18d70aa9d +986, 0x997367cb204a57c4 +987, 0x616e21ab95ba05ef +988, 0x9bfc93bec116769f +989, 0x2b2ee27c37a3fa5b +990, 0xb25c6ed54006ee38 +991, 0xab04d4a5c69e69a5 +992, 0x6d2f6b45f2d8438f +993, 0x4ad2f32afc82f092 +994, 0x513d718908f709c0 +995, 0x5272aadc4fffca51 +996, 0xeb3f87e66156ef5d +997, 0xf8a3d5a46a86ba85 +998, 0xdb4548a86f27abfd +999, 0x57c05f47ff62380d diff --git a/python/numpy/random/tests/data/sfc64-testset-1.csv b/python/numpy/random/tests/data/sfc64-testset-1.csv new file mode 100644 index 000000000..4fffe6959 --- /dev/null +++ b/python/numpy/random/tests/data/sfc64-testset-1.csv @@ -0,0 +1,1001 @@ +seed, 0xdeadbeaf +0, 0xa475f55fbb6bc638 +1, 0xb2d594b6c29d971c +2, 0x275bc4ece4484fb1 +3, 0x569be72d9b3492fb +4, 0x89a5bb9b206a670c +5, 0xd951bfa06afdc3f9 +6, 0x7ee2e1029d52a265 +7, 0x12ef1d4de0cb4d4c +8, 0x41658ba8f0ef0280 +9, 0x5b650c82e4fe09c5 +10, 0x638a9f3e30ec4e94 +11, 0x147487fb2ba9233e +12, 0x89ef035603d2d1fb +13, 0xe66ca57a190e6cbe +14, 0x330f673740dd61fc +15, 0xc71d3dce2f8bb34e +16, 0x3c07c39ff150b185 +17, 0x5df952b6cae8f099 +18, 0x9f09f2b1f0ceac80 +19, 0x19598eee2d0c4c67 +20, 0x64e06483702e0ebd +21, 0xda04d1fdb545f7fa +22, 0xf2cf53b61a0c4f9b +23, 0xf0bb724ce196f66e +24, 0x71cefde55d9cf0f +25, 0x6323f62824a20048 +26, 0x1e93604680f14b4e +27, 0xd9d8fad1d4654025 +28, 0xf4ee25af2e76ca08 +29, 0x6af3325896befa98 +30, 0xad9e43abf5e04053 +31, 0xbf930e318ce09de3 +32, 0x61f9583b4f9ffe76 +33, 0x9b69d0b3d5ec8958 +34, 0xa608f250f9b2ca41 +35, 0x6fdba7073dc2bb5d +36, 0xa9d57601efea6d26 +37, 0xc24a88a994954105 +38, 0xc728b1f78d88fe5b +39, 0x88da88c2b083b3b2 +40, 0xa9e27f7303c76cfd +41, 0xc4c24608c29176eb +42, 0x5420b58466b972fd +43, 0xd2018a661b6756c8 +44, 0x7caed83d9573fc7 +45, 0x562a3d81b849a06a +46, 0x16588af120c21f2c +47, 0x658109a7e0eb4837 +48, 0x877aabb14d3822e1 +49, 0x95704c342c3745fe +50, 0xeeb8a0dc81603616 +51, 0x431bf94889290419 +52, 0xe4a9410ab92a5863 +53, 0xbc6be64ea60f12ba +54, 0x328a2da920015063 +55, 0x40f6b3bf8271ae07 +56, 0x4068ff00a0e854f8 +57, 0x1b287572ca13fa78 +58, 0xa11624a600490b99 +59, 0x4a04ef29eb7150fa +60, 0xcc9469ab5ffb739 +61, 0x99a6a9f8d95e782 +62, 0x8e90356573e7a070 +63, 0xa740b8fb415c81c4 +64, 0x47eccef67447f3da +65, 0x2c720afe3a62a49b +66, 0xe2a747f0a43eacf4 +67, 0xba063a87ab165576 +68, 0xbc1c78ed27feb5a3 +69, 0x285a19fa3974f9d +70, 0x489c61e704f5f0e3 +71, 0xf5ab04f6b03f238b +72, 0x7e25f88138a110dd +73, 0xc3d1cef3d7c1f1d1 +74, 0xc3de6ec64d0d8e00 +75, 0x73682a15b6cc5088 +76, 0x6fecbeb319163dc5 +77, 0x7e100d5defe570a1 +78, 0xad2af9af076dce57 +79, 0x3c65100e23cd3a9a +80, 0x4b442cc6cfe521bb +81, 0xe89dc50f8ab1ef75 +82, 0x8b3c6fdc2496566 +83, 0xdfc50042bc2c308c +84, 0xe39c5f158b33d2b2 +85, 0x92f6adefdfeb0ac +86, 0xdf5808a949c85b3e +87, 0x437384021c9dace9 +88, 0xa7b5ed0d3d67d8f +89, 0xe1408f8b21da3c34 +90, 0xa1bba125c1e80522 +91, 0x7611dc4710385264 +92, 0xb00a46ea84082917 +93, 0x51bf8002ffa87cef +94, 0x9bb81013e9810adc +95, 0xd28f6600013541cd +96, 0xc2ca3b1fa7791c1f +97, 0x47f9ad58f099c82c +98, 0x4d1bb9458469caf9 +99, 0xca0b165b2844257 +100, 0xc3b2e667d075dc66 +101, 0xde22f71136a3dbb1 +102, 0x23b4e3b6f219e4c3 +103, 0x327e0db4c9782f66 +104, 0x9365506a6c7a1807 +105, 0x3e868382dedd3be7 +106, 0xff04fa6534bcaa99 +107, 0x96621a8862995305 +108, 0x81bf39cb5f8e1df7 +109, 0x79b684bb8c37af7a +110, 0xae3bc073c3cde33c +111, 0x7805674112c899ac +112, 0xd95a27995abb20f2 +113, 0x71a503c57b105c40 +114, 0x5ff00d6a73ec8acc +115, 0x12f96391d91e47c2 +116, 0xd55ca097b3bd4947 +117, 0x794d79d20468b04 +118, 0x35d814efb0d7a07d +119, 0xfa9ac9bd0aae76d3 +120, 0xa77b8a3711e175cd +121, 0xe6694fbf421f9489 +122, 0xd8f1756525a1a0aa +123, 0xe38dfa8426277433 +124, 0x16b640c269bbcd44 +125, 0x2a7a5a67ca24cfeb +126, 0x669039c28d5344b4 +127, 0x2a445ee81fd596bb +128, 0x600df94cf25607e0 +129, 0x9358561a7579abff +130, 0xee1d52ea179fc274 +131, 0x21a8b325e89d31be +132, 0x36fc0917486eec0a +133, 0x3d99f40717a6be9f +134, 0x39ac140051ca55ff +135, 0xcef7447c26711575 +136, 0xf22666870eff441d +137, 0x4a53c6134e1c7268 +138, 0xd26de518ad6bdb1b +139, 0x1a736bf75b8b0e55 +140, 0xef1523f4e6bd0219 +141, 0xb287b32fd615ad92 +142, 0x2583d6af5e841dd5 +143, 0x4b9294aae7ca670c +144, 0xf5aa4a84174f3ca9 +145, 0x886300f9e0dc6376 +146, 0x3611401e475ef130 +147, 0x69b56432b367e1ac +148, 0x30c330e9ab36b7c4 +149, 0x1e0e73079a85b8d5 +150, 0x40fdfc7a5bfaecf +151, 0xd7760f3e8e75a085 +152, 0x1cc1891f7f625313 +153, 0xeece1fe6165b4272 +154, 0xe61111b0c166a3c1 +155, 0x2f1201563312f185 +156, 0xfd10e8ecdd2a57cb +157, 0x51cdc8c9dd3a89bf +158, 0xed13cc93938b5496 +159, 0x843816129750526b +160, 0xd09995cd6819ada +161, 0x4601e778d40607df +162, 0xef9df06bd66c2ea0 +163, 0xae0bdecd3db65d69 +164, 0xbb921a3c65a4ae9a +165, 0xd66698ce8e9361be +166, 0xacdc91647b6068f4 +167, 0xe505ef68f2a5c1c0 +168, 0xd6e62fd27c6ab137 +169, 0x6a2ba2c6a4641d86 +170, 0x9c89143715c3b81 +171, 0xe408c4e00362601a +172, 0x986155cbf5d4bd9d +173, 0xb9e6831728c893a7 +174, 0xb985497c3bf88d8c +175, 0xd0d729214b727bec +176, 0x4e557f75fece38a +177, 0x6572067fdfd623ca +178, 0x178d49bb4d5cd794 +179, 0xe6baf59f60445d82 +180, 0x5607d53518e3a8d2 +181, 0xba7931adb6ebbd61 +182, 0xe853576172611329 +183, 0xe945daff96000c44 +184, 0x565b9ba3d952a176 +185, 0xcdb54d4f88c584c8 +186, 0x482a7499bee9b5e5 +187, 0x76560dd0affe825b +188, 0x2a56221faa5ca22c +189, 0x7729be5b361f5a25 +190, 0xd6f2195795764876 +191, 0x59ef7f8f423f18c5 +192, 0x7ebefed6d02adde1 +193, 0xcfec7265329c73e5 +194, 0x4fd8606a5e59881c +195, 0x95860982ae370b73 +196, 0xdecfa33b1f902acc +197, 0xf9b8a57400b7c0a6 +198, 0xd20b822672ec857b +199, 0x4eb81084096c7364 +200, 0xe535c29a44d9b6ad +201, 0xdef8b48ebacb2e29 +202, 0x1063bc2b8ba0e915 +203, 0xe4e837fb53d76d02 +204, 0x4df935db53579fb8 +205, 0xa30a0c8053869a89 +206, 0xe891ee58a388a7b5 +207, 0x17931a0c64b8a985 +208, 0xaf2d350b494ce1b3 +209, 0x2ab9345ffbcfed82 +210, 0x7de3fe628a2592f0 +211, 0x85cf54fab8b7e79d +212, 0x42d221520edab71b +213, 0x17b695b3af36c233 +214, 0xa4ffe50fe53eb485 +215, 0x1102d242db800e4d +216, 0xc8dc01f0233b3b6 +217, 0x984a030321053d36 +218, 0x27fa8dc7b7112c0e +219, 0xba634dd8294e177f +220, 0xe67ce34b36332eb +221, 0x8f1351e1894fb41a +222, 0xb522a3048761fd30 +223, 0xc350ad9bc6729edc +224, 0xe0ed105bd3c805e1 +225, 0xa14043d2b0825aa7 +226, 0xee7779ce7fc11fdf +227, 0xc0fa8ba23a60ab25 +228, 0xb596d1ce259afbad +229, 0xaa9b8445537fdf62 +230, 0x770ab2c700762e13 +231, 0xe812f1183e40cc1 +232, 0x44bc898e57aefbbd +233, 0xdd8a871df785c996 +234, 0x88836a5e371eb36b +235, 0xb6081c9152623f27 +236, 0x895acbcd6528ca96 +237, 0xfb67e33ddfbed435 +238, 0xaf7af47d323ce26 +239, 0xe354a510c3c39b2d +240, 0x5cacdedda0672ba3 +241, 0xa440d9a2c6c22b09 +242, 0x6395099f48d64304 +243, 0xc11cf04c75f655b5 +244, 0x1c4e054d144ddb30 +245, 0x3e0c2db89d336636 +246, 0x127ecf18a5b0b9a7 +247, 0x3b50551a88ea7a73 +248, 0xbd27003e47f1f684 +249, 0xf32d657782baac9b +250, 0x727f5cabf020bc9 +251, 0x39c1c1c226197dc7 +252, 0x5552c87b35deeb69 +253, 0x64d54067b5ce493f +254, 0x3494b091fe28dda0 +255, 0xdf0278bc85ee2965 +256, 0xdef16fec25efbd66 +257, 0xe2be09f578c4ce28 +258, 0xd27a9271979d3019 +259, 0x427f6fcd71845e3 +260, 0x26b52c5f81ec142b +261, 0x98267efc3986ad46 +262, 0x7bf4165ddb7e4374 +263, 0xd05f7996d7941010 +264, 0x3b3991de97b45f14 +265, 0x9068217fb4f27a30 +266, 0xd8fe295160afc7f3 +267, 0x8a159fab4c3bc06f +268, 0x57855506d19080b6 +269, 0x7636df6b3f2367a4 +270, 0x2844ee3abd1d5ec9 +271, 0xe5788de061f51c16 +272, 0x69e78cc9132a164 +273, 0xacd53cde6d8cd421 +274, 0xb23f3100068e91da +275, 0x4140070a47f53891 +276, 0xe4a422225a96e53a +277, 0xb82a8925a272a2ac +278, 0x7c2f9573590fe3b7 +279, 0xbaf80764db170575 +280, 0x955abffa54358368 +281, 0x355ce7460614a869 +282, 0x3700ede779a4afbf +283, 0x10a6ec01d92d68cd +284, 0x3308f5a0a4c0afef +285, 0x97b892d7601136c9 +286, 0x4955c3b941b8552e +287, 0xca85aa67e941961d +288, 0xb1859ae5db28e9d2 +289, 0x305d072ac1521fbd +290, 0xed52a868996085bb +291, 0x723bfa6a76358852 +292, 0x78d946ecd97c5fb3 +293, 0x39205b30a8e23e79 +294, 0xb927e3d086baadbe +295, 0xa18d6946136e1ff5 +296, 0xdab6f0b51c1eb5ff +297, 0xf0a640bf7a1af60c +298, 0xf0e81db09004d0d4 +299, 0xfe76cebdbe5a4dde +300, 0x2dafe9cc3decc376 +301, 0x4c871fdf1af34205 +302, 0xe79617d0c8fa893b +303, 0xee658aaad3a141f7 +304, 0xfd91aa74863e19f1 +305, 0x841b8f55c103cc22 +306, 0x22766ed65444ad5d +307, 0x56d03d1beca6c17a +308, 0x5fd4c112c92036ae +309, 0x75466ae58a5616dc +310, 0xfbf98b1081e802a9 +311, 0xdc325e957bf6d8f5 +312, 0xb08da7015ebd19b7 +313, 0xf25a9c0944f0c073 +314, 0xf4625bafa0ced718 +315, 0x4349c9e093a9e692 +316, 0x75a9ccd4dd8935cb +317, 0x7e6cf9e539361e91 +318, 0x20fdd22fb6edd475 +319, 0x5973021b57c2311f +320, 0x75392403667edc15 +321, 0xed9b2156ea70d9f1 +322, 0xf40c114db50b64a0 +323, 0xe26bb2c9eef20c62 +324, 0x409c1e3037869f03 +325, 0xcdfd71fdda3b7f91 +326, 0xa0dfae46816777d6 +327, 0xde060a8f61a8deb8 +328, 0x890e082a8b0ca4fc +329, 0xb9f2958eddf2d0db +330, 0xd17c148020d20e30 +331, 0xffdc9cc176fe7201 +332, 0xffb83d925b764c1 +333, 0x817ea639e313da8d +334, 0xa4dd335dd891ca91 +335, 0x1342d25a5e81f488 +336, 0xfa7eb9c3cf466b03 +337, 0xfe0a423d44b185d0 +338, 0x101cfd430ab96049 +339, 0x7b5d3eda9c4504b +340, 0xe20ccc006e0193f1 +341, 0xf54ccddedebc5df0 +342, 0xc0edd142bd58f1db +343, 0x3831f40d378d2430 +344, 0x80132353f0a88289 +345, 0x688f23c419d03ef8 +346, 0x4c6837e697884066 +347, 0x699387bb2e9a3a8f +348, 0x8996f860342448d8 +349, 0xb0f80dff99bfa5cc +350, 0x3e927a7f9ea12c8e +351, 0xd7e498d1e5f9dff3 +352, 0x78ecb97bb3f864cc +353, 0x3c4ffd069a014d38 +354, 0xf8d5073a1e09b4d4 +355, 0x8717e854f9faef23 +356, 0xfbcc5478d8d0ad7 +357, 0xd3cd8b233ca274ff +358, 0x8bd8f11f79beb265 +359, 0xf64498a832d8fd0e +360, 0xb01bba75112131ec +361, 0x55572445a7869781 +362, 0x7b56622f18cb3d7a +363, 0x7f192c9e075bdb83 +364, 0xd9a112f836b83ff3 +365, 0x68673b37269653dc +366, 0xe46a9433fb6a0879 +367, 0x127d756ca4779001 +368, 0xc1378e8b1e8eab94 +369, 0x1006edb0f51d078c +370, 0xc6dd53961232d926 +371, 0x9a4aeef44038256d +372, 0xd357f4fa652d4f5f +373, 0x59f3d2cc3378598 +374, 0xe76e6207a824a7fc +375, 0x5fc5e33712ceffef +376, 0x77d24aeb0ccb1adc +377, 0x5be4b2826805659e +378, 0x257c69d787e64634 +379, 0x58dd52ca6bc727b1 +380, 0x3ab997767235ea33 +381, 0x986a2a7a966fad14 +382, 0xc900f8b27761dcc4 +383, 0x44991bdb13795700 +384, 0xe5c145a4fe733b2 +385, 0x56f041b56bffe0d3 +386, 0x5779c4fef8067996 +387, 0xa0fe8748e829532d +388, 0x840c1277d78d9dd4 +389, 0x37ebcb315432acbc +390, 0xf4bc8738433ba3be +391, 0x8b122993f2e10062 +392, 0xe1fe8481f2681ed5 +393, 0x8e23f1630d9f494a +394, 0xda24661a01b7d0b3 +395, 0x7a02942a179cee36 +396, 0xf1e08a3c09b71ac +397, 0x3dec2cc7ee0bd8fd +398, 0x1f3e480113d805d4 +399, 0xc061b973ad4e3f2c +400, 0x6bea750f17a66836 +401, 0xbc2add72eac84c25 +402, 0xcff058d3f97934ca +403, 0x54ccc30987778ec2 +404, 0x93449ec1e1469558 +405, 0xe2ff369eb0c6836 +406, 0x41c2df2d63bf8e55 +407, 0xf9302629b6c71be2 +408, 0xdd30376b8e5ab29a +409, 0x12db9e04f911d754 +410, 0x8d03d6cd359f1b97 +411, 0xe15956511abf1cee +412, 0x9b68e10e2c2fd940 +413, 0x2e28de6491c1ce53 +414, 0x52b329b72d0c109d +415, 0xc2c0b115f9da2a60 +416, 0x6ca084105271bbff +417, 0x49b92b8676058c1e +418, 0x767fc92a70f7e5a3 +419, 0x87ba4ed4b65a6aa0 +420, 0xf70b052e0a3975e9 +421, 0x3e925c3306db9eec +422, 0x43253f1d96ac9513 +423, 0xe3e04f1a1ea454c4 +424, 0x763e3f4cc81ba0c8 +425, 0x2a2721ac69265705 +426, 0xdf3b0ac6416ea214 +427, 0xa6a6b57450f3e000 +428, 0xc3d3b1ac7dbfe6ac +429, 0xb66e5e6f7d2e4ec0 +430, 0x43c65296f98f0f04 +431, 0xdb0f6e3ff974d842 +432, 0x3d6b48e02ebb203b +433, 0xd74674ebf09d8f27 +434, 0xbe65243c58fc1200 +435, 0x55eb210a68d42625 +436, 0x87badab097dbe883 +437, 0xada3fda85a53824f +438, 0xef2791e8f48cd37a +439, 0x3fe7fceb927a641a +440, 0xd3bffd3ff031ac78 +441, 0xb94efe03da4d18fb +442, 0x162a0ad8da65ea68 +443, 0x300f234ef5b7e4a6 +444, 0xa2a8b4c77024e4fb +445, 0x5950f095ddd7b109 +446, 0xded66dd2b1bb02ba +447, 0x8ec24b7fa509bcb6 +448, 0x9bede53d924bdad6 +449, 0xa9c3f46423be1930 +450, 0x6dfc90597f8de8b4 +451, 0xb7419ebc65b434f0 +452, 0xa6596949238f58b9 +453, 0x966cbade640829b8 +454, 0x58c74877bdcbf65e +455, 0xaa103b8f89b0c453 +456, 0x219f0a86e41179a4 +457, 0x90f534fc06ddc57f +458, 0x8db7cdd644f1affa +459, 0x38f91de0167127ac +460, 0xdcd2a65e4df43daa +461, 0x3e04f34a7e01f834 +462, 0x5b237eea68007768 +463, 0x7ff4d2b015921768 +464, 0xf786b286549d3d51 +465, 0xaefa053fc2c3884c +466, 0x8e6a8ff381515d36 +467, 0x35b94f3d0a1fce3c +468, 0x165266d19e9abb64 +469, 0x1deb5caa5f9d8076 +470, 0x13ab91290c7cfe9d +471, 0x3651ca9856be3e05 +472, 0xe7b705f6e9cccc19 +473, 0xd6e7f79668c127ed +474, 0xa9faf37154896f92 +475, 0x89fbf190603e0ab1 +476, 0xb34d155a86f942d0 +477, 0xb2d4400a78bfdd76 +478, 0x7c0946aca8cfb3f0 +479, 0x7492771591c9d0e8 +480, 0xd084d95c5ca2eb28 +481, 0xb18d12bd3a6023e +482, 0xea217ed7b864d80b +483, 0xe52f69a755dd5c6f +484, 0x127133993d81c4aa +485, 0xe07188fcf1670bfb +486, 0x178fbfe668e4661d +487, 0x1c9ee14bb0cda154 +488, 0x8d043b96b6668f98 +489, 0xbc858986ec96ca2b +490, 0x7660f779d528b6b7 +491, 0xd448c6a1f74ae1d3 +492, 0x178e122cfc2a6862 +493, 0x236f000abaf2d23b +494, 0x171b27f3f0921915 +495, 0x4c3ff07652f50a70 +496, 0x18663e5e7d3a66ca +497, 0xb38c97946c750cc9 +498, 0xc5031aae6f78f909 +499, 0x4d1514e2925e95c1 +500, 0x4c2184a741dabfbb +501, 0xfd410364edf77182 +502, 0xc228157f863ee873 +503, 0x9856fdc735cc09fc +504, 0x660496cd1e41d60e +505, 0x2edf1d7e01954c32 +506, 0xd32e94639bdd98cf +507, 0x8e153f48709a77d +508, 0x89357f332d2d6561 +509, 0x1840d512c97085e6 +510, 0x2f18d035c9e26a85 +511, 0x77b88b1448b26d5b +512, 0xc1ca6ef4cdae0799 +513, 0xcc203f9e4508165f +514, 0xeaf762fbc9e0cbbe +515, 0xc070c687f3c4a290 +516, 0xd49ed321068d5c15 +517, 0x84a55eec17ee64ee +518, 0x4d8ee685298a8871 +519, 0x9ff5f17d7e029793 +520, 0x791d7d0d62e46302 +521, 0xab218b9114e22bc6 +522, 0x4902b7ab3f7119a7 +523, 0x694930f2e29b049e +524, 0x1a3c90650848999f +525, 0x79f1b9d8499c932b +526, 0xfacb6d3d55e3c92f +527, 0x8fd8b4f25a5da9f5 +528, 0xd037dcc3a7e62ae7 +529, 0xfecf57300d8f84f4 +530, 0x32079b1e1dc12d48 +531, 0xe5f8f1e62b288f54 +532, 0x97feba3a9c108894 +533, 0xd279a51e1899a9a0 +534, 0xd68eea8e8e363fa8 +535, 0x7394cf2deeca9386 +536, 0x5f70b0c80f1dbf10 +537, 0x8d646916ed40462 +538, 0xd253bb1c8a12bbb6 +539, 0x38f399a821fbd73e +540, 0x947523a26333ac90 +541, 0xb52e90affbc52a37 +542, 0xcf899cd964654da4 +543, 0xdf66ae9cca8d99e7 +544, 0x6051478e57c21b6a +545, 0xffa7dc975af3c1da +546, 0x195c7bff2d1a8f5 +547, 0x64f12b6575cf984d +548, 0x536034cb842cf9e1 +549, 0x180f247ce5bbfad +550, 0x8ced45081b134867 +551, 0x532bbfdf426710f3 +552, 0x4747933e74c4f54d +553, 0x197a890dc4793401 +554, 0x76c7cc2bd42fae2 +555, 0xdabfd67f69675dd0 +556, 0x85c690a68cdb3197 +557, 0xe482cec89ce8f92 +558, 0x20bc9fb7797011b1 +559, 0x76dc85a2185782ad +560, 0x3df37c164422117a +561, 0x99211f5d231e0ab0 +562, 0xef7fd794a0a91f4 +563, 0x419577151915f5fe +564, 0x3ce14a0a7135dae3 +565, 0x389b57598a075d6a +566, 0x8cc2a9d51b5af9aa +567, 0xe80a9beffbd13f13 +568, 0x65e96b22ea8a54d8 +569, 0x79f38c4164138ede +570, 0xd1955846cba03d81 +571, 0x60359fe58e4f26d6 +572, 0x4ea724f585f8d13e +573, 0x316dfdbadc801a3c +574, 0x20aa29b7c6dd66fe +575, 0x65eaf83a6a008caa +576, 0x407000aff1b9e8cb +577, 0xb4d49bfb2b268c40 +578, 0xd4e6fe8a7a0f14a9 +579, 0xe34afef924e8f58e +580, 0xe377b0c891844824 +581, 0x29c2e20c112d30c8 +582, 0x906aad1fe0c18a95 +583, 0x308385f0efbb6474 +584, 0xf23900481bf70445 +585, 0xfdfe3ade7f937a55 +586, 0xf37aae71c33c4f97 +587, 0x1c81e3775a8bed85 +588, 0x7eb5013882ce35ea +589, 0x37a1c1692495818d +590, 0x3f90ae118622a0ba +591, 0x58e4fe6fea29b037 +592, 0xd10ff1d269808825 +593, 0xbce30edb60c21bba +594, 0x123732329afd6fee +595, 0x429b4059f797d840 +596, 0x421166568a8c4be1 +597, 0x88f895c424c1bd7f +598, 0x2adaf7a7b9f781cb +599, 0xa425644b26cb698 +600, 0x8cc44d2486cc5743 +601, 0xdb9f357a33abf6ba +602, 0x1a57c4ea77a4d70c +603, 0x1dea29be75239e44 +604, 0x463141a137121a06 +605, 0x8fecfbbe0b8a9517 +606, 0x92c83984b3566123 +607, 0x3b1c69180ed28665 +608, 0x14a6073425ea8717 +609, 0x71f4c2b3283238d7 +610, 0xb3d491e3152f19f +611, 0x3a0ba3a11ebac5d2 +612, 0xddb4d1dd4c0f54ac +613, 0xdb8f36fe02414035 +614, 0x1cf5df5031b1902c +615, 0x23a20ed12ef95870 +616, 0xf113e573b2dedcbb +617, 0x308e2395cde0a9fa +618, 0xd377a22581c3a7da +619, 0xe0ced97a947a66fb +620, 0xe44f4de9cd754b00 +621, 0x2344943337d9d1bf +622, 0x4b5ae5e2ea6e749c +623, 0x9b8d2e3ef41d1c01 +624, 0x59a5a53ebbd24c6b +625, 0x4f7611bf9e8a06fb +626, 0xea38c7b61361cd06 +627, 0xf125a2bfdd2c0c7 +628, 0x2df8dcb5926b9ebb +629, 0x233e18720cc56988 +630, 0x974c61379b4aa95e +631, 0xc7fe24c1c868910b +632, 0x818fd1affc82a842 +633, 0xcee92a952a26d38e +634, 0x8962f575ebcbf43 +635, 0x7770687e3678c460 +636, 0xdfb1db4ed1298117 +637, 0xb9db54cb03d434d3 +638, 0x34aebbf2244257ad +639, 0xd836db0cb210c490 +640, 0x935daed7138957cd +641, 0x3cd914b14e7948fd +642, 0xd0472e9ed0a0f7f0 +643, 0xa9df33dca697f75e +644, 0x15e9ea259398721a +645, 0x23eeba0f970abd60 +646, 0x2217fdf8bbe99a12 +647, 0x5ea490a95717b198 +648, 0xf4e2bfc28280b639 +649, 0x9d19916072d6f05c +650, 0x5e0387cab1734c6a +651, 0x93c2c8ac26e5f01e +652, 0xb0d934354d957eb1 +653, 0xee5099a1eef3188c +654, 0x8be0abca8edc1115 +655, 0x989a60845dbf5aa3 +656, 0x181c7ed964eee892 +657, 0x49838ea07481288d +658, 0x17dbc75d66116b2e +659, 0xa4cafb7a87c0117e +660, 0xab2d0ae44cdc2e6e +661, 0xdf802f2457e7da6 +662, 0x4b966c4b9187e124 +663, 0x62de9db6f4811e1a +664, 0x1e20485968bc62 +665, 0xe9ac288265caca94 +666, 0xc5c694d349aa8c1a +667, 0x3d67f2083d9bdf10 +668, 0x9a2468e503085486 +669, 0x9d6acd3dc152d1a3 +670, 0xca951e2aeee8df77 +671, 0x2707371af9cdd7b0 +672, 0x2347ae6a4eb5ecbd +673, 0x16abe5582cb426f +674, 0x523af4ff980bbccb +675, 0xb07a0f043e3694aa +676, 0x14d7c3da81b2de7 +677, 0xf471f1b8ac22305b +678, 0xdb087ffff9e18520 +679, 0x1a352db3574359e8 +680, 0x48d5431502cc7476 +681, 0x7c9b7e7003dfd1bf +682, 0x4f43a48aae987169 +683, 0x9a5d3eb66dedb3e9 +684, 0xa7b331af76a9f817 +685, 0xba440154b118ab2d +686, 0x64d22344ce24c9c6 +687, 0xa22377bd52bd043 +688, 0x9dfa1bb18ca6c5f7 +689, 0xdccf44a92f644c8b +690, 0xf623d0a49fd18145 +691, 0x556d5c37978e28b3 +692, 0xad96e32ce9d2bb8b +693, 0x2e479c120be52798 +694, 0x7501cf871af7b2f7 +695, 0xd02536a5d026a5b8 +696, 0x4b37ff53e76ab5a4 +697, 0xdb3a4039caaeab13 +698, 0x6cbd65e3b700c7be +699, 0x7367abd98761a147 +700, 0xf4f9ba216a35aa77 +701, 0xf88ca25ce921eb86 +702, 0xb211de082ec2cbf2 +703, 0xdd94aa46ec57e12e +704, 0xa967d74ad8210240 +705, 0xdaa1fada8cfa887 +706, 0x85901d081c4488ee +707, 0xcf67f79a699ef06 +708, 0x7f2f1f0de921ee14 +709, 0x28bc61e9d3f2328b +710, 0x3332f2963faf18e5 +711, 0x4167ac71fcf43a6 +712, 0x843c1746b0160b74 +713, 0xd9be80070c578a5e +714, 0xbd7250c9af1473e7 +715, 0x43f78afaa3647899 +716, 0x91c6b5dd715a75a5 +717, 0x29cc66c8a07bfef3 +718, 0x3f5c667311dc22be +719, 0x4f49cd47958260cd +720, 0xbef8be43d920b64e +721, 0x7a892a5f13061d8b +722, 0x9532f40125c819b1 +723, 0x924fca3045f8a564 +724, 0x9b2c6442453b0c20 +725, 0x7e21009085b8e793 +726, 0x9b98c17e17af59d2 +727, 0xba61acb73e3ae89a +728, 0xb9d61a710555c138 +729, 0xc2a425d80978974b +730, 0xa275e13592da7d67 +731, 0xe962103202d9ad0f +732, 0xbdf8367a4d6f33fd +733, 0xe59beb2f8648bdc8 +734, 0xb4c387d8fbc4ac1c +735, 0x5e3f276b63054b75 +736, 0xf27e616aa54d8464 +737, 0x3f271661d1cd7426 +738, 0x43a69dbee7502c78 +739, 0x8066fcea6df059a1 +740, 0x3c10f19409bdc993 +741, 0x6ba6f43fb21f23e0 +742, 0x9e182d70a5bccf09 +743, 0x1520783d2a63a199 +744, 0xba1dcc0c70b9cace +745, 0x1009e1e9b1032d8 +746, 0xf632f6a95fb0315 +747, 0x48e711c7114cbfff +748, 0xef281dcec67debf7 +749, 0x33789894d6abf59b +750, 0x6c8e541fffbe7f9c +751, 0x85417f13b08e0a88 +752, 0x9a581e36d589608f +753, 0x461dca50b1befd35 +754, 0x5a3231680dde6462 +755, 0xcc57acf729780b97 +756, 0x50301efef62e1054 +757, 0x675d042cd4f6bbc9 +758, 0x1652fdd3794384c9 +759, 0x1c93bbeeb763cd4d +760, 0x44b7240c4b105242 +761, 0x4c6af2a1b606ccfb +762, 0x18fc43ece2ec1a40 +763, 0x859a5511aeae8acb +764, 0x2f56826f1996ad2f +765, 0xa8e95ce8bb363bdf +766, 0xf4da396054e50e4b +767, 0x5493865e9895883c +768, 0x768e4c8b332ac0e3 +769, 0x32195d2aa583fca5 +770, 0xf2f353f21266bc15 +771, 0x43cddf1d021307d +772, 0x6031e3aa30300e4a +773, 0x4f1298469ac6088f +774, 0x4b4d450bafac574e +775, 0x23e1cf9c0582a22b +776, 0x2e9036980db49cd0 +777, 0xe4e228b113c411b2 +778, 0x8bddcdb82b51706 +779, 0xd2a7ea8288593629 +780, 0x67fe90e98fdda61 +781, 0x7b63494dba95717b +782, 0x105625904510d782 +783, 0xdf4aa2242454e50a +784, 0x32541d6cd7d6c7e3 +785, 0x5661fb432591cf3b +786, 0xce920a5ed047bce7 +787, 0xed4178a3c96eea8f +788, 0xe378cd996e39863b +789, 0x169e1fdc8e2b05e1 +790, 0xaee1812ef7149a96 +791, 0x648571c7453d12c5 +792, 0xb7b6bc9328573c43 +793, 0xe7fb969078e270d7 +794, 0xdfc2b1b8985f6e6f +795, 0x862b6527ee39a1aa +796, 0x1ee329aea91d7882 +797, 0x20d25324f2fe704 +798, 0xbfcc47401fc3bbfd +799, 0x1515cdc8d48b2904 +800, 0xbd6eefe86284261c +801, 0x9b1f28e3b35f22ee +802, 0x842a29d35e5aecda +803, 0xf2346109ad370765 +804, 0x24d68add5a71afd9 +805, 0x4a691421613d91e2 +806, 0x60e3058b3c244051 +807, 0x79194905cdaa5de8 +808, 0xe0e2df35c01e8987 +809, 0xe29b78beffbb5e4a +810, 0xcdcdbc020218c19e +811, 0x5ae0af8c16feae43 +812, 0x8109292feeaf14fa +813, 0x34113f7508dfa521 +814, 0xc062ac163f56730a +815, 0xf1660e66ec6d4c4c +816, 0x5966c55f60151c80 +817, 0x3865ae8ec934b17 +818, 0x472a7314afb055ec +819, 0x7a24277309a44a44 +820, 0x556e02dd35d38baa +821, 0x9849611a1bc96ec1 +822, 0xd176f5d5a8eb0843 +823, 0x44db12ec60510030 +824, 0x272e3a06a0030078 +825, 0x7c4764dbefc075ea +826, 0x910712f3735c1183 +827, 0xd49a2da74ae7aff6 +828, 0xcf9b3e6e8f776d71 +829, 0x27789fe3ec481a02 +830, 0x86659f82c6b5912b +831, 0xe044b3dbf339158c +832, 0x99d81f6bb62a37b0 +833, 0x5f5830c246fada9a +834, 0xe68abab1eeb432cb +835, 0x49c5c5ace04e104 +836, 0x1ac3871b3fc6771b +837, 0x773b39f32d070652 +838, 0x9c4138c2ae58b1f3 +839, 0xac41c63d7452ac60 +840, 0x9248826b245359e1 +841, 0x99bba1c7a64f1670 +842, 0xe0dc99ff4ebb92f2 +843, 0x113638652740f87c +844, 0xebf51e94da88cfc +845, 0x5441c344b81b2585 +846, 0xe1e69e0bc2de652a +847, 0xe9ab6d64ae42ed1e +848, 0x879af8730e305f31 +849, 0x36b9ad912c7e00d6 +850, 0x83ef5e9fca853886 +851, 0xda54d48bb20ea974 +852, 0x32c6d93aefa92aa2 +853, 0x4e887b2c3391847d +854, 0x50966e815f42b1b8 +855, 0x53411ac087832837 +856, 0x46f64fef79df4f29 +857, 0xb34aae3924cd272c +858, 0xf5ad455869a0adbe +859, 0x8351ded7144edac8 +860, 0xeb558af089677494 +861, 0x36ed71d69293a8d6 +862, 0x659f90bf5431b254 +863, 0x53349102b7519949 +864, 0x3db83e20b1713610 +865, 0x6d63f96090556254 +866, 0x4cc0467e8f45c645 +867, 0xb8840c4bd5cd4091 +868, 0xbd381463cc93d584 +869, 0x203410d878c2066d +870, 0x2ebea06213cf71c8 +871, 0x598e8fb75e3fceb4 +872, 0xdcca41ceba0fce02 +873, 0x61bf69212b56aae5 +874, 0x97eed7f70c9114fa +875, 0xf46f37a8b7a063f9 +876, 0x66c8f4ffe5bd6efa +877, 0xe43fd6efda2d4e32 +878, 0x12d6c799e5ad01de +879, 0x9ac83e7f8b709360 +880, 0xbbb7bb3c1957513d +881, 0x7f87c08d4b3796b0 +882, 0x9a7d1d74b6aa4a5c +883, 0xa4314530ff741b6f +884, 0x99a80c6b6f15fca8 +885, 0xd2fec81d6d5fc3ce +886, 0x15a98be1cc40cea +887, 0x98693eb7719366f3 +888, 0x36ccdc2a9e9d4de8 +889, 0x3c8208f63d77df25 +890, 0xca2e376e2343df6 +891, 0xcc9b17cbb54420c6 +892, 0x8724c44a64d7dcb8 +893, 0x9d00c6949ff33869 +894, 0xf4f8e584d2699372 +895, 0x88f4748cdd5a2d53 +896, 0xe215072a1205bc6d +897, 0x190934fe6d740442 +898, 0x7fac5c0ab2af106d +899, 0x1b86633a0bd84fa1 +900, 0x1293e54318492dfb +901, 0x433324fd390f34b9 +902, 0x4c5eb2c67a44643b +903, 0x59a6e281c388b0dd +904, 0xe78e03f9c44623b7 +905, 0x91307a93c768fc3d +906, 0xde8867b004d8e3ff +907, 0xdf52c3f57b7c5862 +908, 0x993f3e1d10358a92 +909, 0x9ccb10bc3e18662d +910, 0x45093ce48a114c73 +911, 0xd59d05979d26330a +912, 0x417c0e03300119a9 +913, 0x1c336500f90cde81 +914, 0x1c8ccd29ead9b85b +915, 0xb76baf3e55d4d950 +916, 0x133ad6196c75fd7e +917, 0x34200b0cde7ed560 +918, 0x9c7c3dacb213c8d9 +919, 0xd97563c4fd9bf1b6 +920, 0x5d910e871835b6cb +921, 0x7d46c4733a16bdf9 +922, 0xe41d73194ddc87b2 +923, 0x7d3d8a0855a465a9 +924, 0x70c2a8b5d3f90c0f +925, 0x9e7565ca5dccfe12 +926, 0x2c0acb4577aa51b1 +927, 0x3d2cd211145b79c7 +928, 0x15a7b17aa6da7732 +929, 0xab44a3730c27d780 +930, 0xf008bd6c802bde3a +931, 0x82ed86ddf3619f77 +932, 0xaabe982ab15c49f9 +933, 0x9bcad8fa6d8e58a4 +934, 0x8f39ed8243718aa1 +935, 0xe9489340e03e3cb6 +936, 0xc722314f5eefb8d0 +937, 0x870e8869a436df59 +938, 0x4dae75b8087a8204 +939, 0xe1d790f6ec6e425b +940, 0xafd39ea1b1d0ed09 +941, 0xdf2c99e464ddf08f +942, 0x74936d859ab9644d +943, 0x3871302164250e73 +944, 0x764b68921e911886 +945, 0x2a1d024b26bb9d66 +946, 0x797fba43918e75b4 +947, 0x62ec6d24ccca335b +948, 0xf4bd8b951762b520 +949, 0x9d450dede9119397 +950, 0x5393a26d10f8c124 +951, 0x6b74769392896b57 +952, 0x7f61dbcc0e328581 +953, 0x64e1df3884d0d94 +954, 0xba77dcdf23738c37 +955, 0xf8e288bc0a177475 +956, 0x4a8abfd1702ecb7d +957, 0x53f22886694736a7 +958, 0x8fc982597ced3e3 +959, 0x1bc46090f820fff7 +960, 0x8bd31f965d02229f +961, 0x65cd0cb29996ee53 +962, 0x702e0f4fcf8c2e9f +963, 0x293b77bff307a9a0 +964, 0x125a986b8b305788 +965, 0x416b0eea428ebf3c +966, 0xeac85421ab0e8469 +967, 0x7f5496095019aa68 +968, 0x1a96d7afbc708e0 +969, 0xb91262e6766e01e1 +970, 0xd0a549cc4ccc6954 +971, 0x75a9a073f50c8a0d +972, 0xae275d2c1c6cd23c +973, 0xcf159b5ec5d28fd4 +974, 0x75d0838ce9b92b +975, 0xd4eddcee6dc4677f +976, 0x6a0a8ad5df6b75b8 +977, 0x6f3fd0ef0f13ecc4 +978, 0xb75a5826c1a8f8a8 +979, 0xd47098bbc7943766 +980, 0x3d4ddd62d5f23dd1 +981, 0x760a904e4583841c +982, 0x2afeb5022b4cf1f +983, 0x66d5f653729f0a13 +984, 0x9a6a5ab62980d30f +985, 0xc332f5643bbf8d5b +986, 0x848fb702e4056a90 +987, 0xa057beaf3f9e8c5f +988, 0x6cc603e4560a6c6a +989, 0xec761811a7b23211 +990, 0xb14aa4090a82aaa5 +991, 0xe29d9d028a5b2dbb +992, 0x5564e53738d68f97 +993, 0xfabca36542eaaf3b +994, 0xb9912fcb782020a2 +995, 0xe865e01b349284fd +996, 0x540b5ff11c5f9274 +997, 0x3463f64e1e7451dc +998, 0xe15d3e2f33b735f8 +999, 0xf5433336eadef6e diff --git a/python/numpy/random/tests/data/sfc64-testset-2.csv b/python/numpy/random/tests/data/sfc64-testset-2.csv new file mode 100644 index 000000000..70aebd5d5 --- /dev/null +++ b/python/numpy/random/tests/data/sfc64-testset-2.csv @@ -0,0 +1,1001 @@ +seed, 0x0 +0, 0x91959e5fb96a6332 +1, 0x3c1dd8a25a7e9f21 +2, 0x657bdffc99798d9e +3, 0x1a04de320b19e022 +4, 0x65b92af0e5f3c61c +5, 0x9c84070ce8f743c0 +6, 0xbb10e573693cdb25 +7, 0xd65ea9e76b37fb6b +8, 0x503efd0e76c8ae66 +9, 0xd711dcd04c26d0f +10, 0x12f53f435814ac8c +11, 0xb392cd402cfc82bd +12, 0x461764550e06c889 +13, 0x716a48b3514e6979 +14, 0xdd0a322213c18ad7 +15, 0x6673a8ca0a05c4d7 +16, 0x2992ef333437f844 +17, 0xc4aaf7e8240b2aad +18, 0x6ab0a1af1f41474f +19, 0xb0bae400c226941d +20, 0xe5f80c2eeeab48c6 +21, 0x3832c6a93a4024bf +22, 0x280bd824fabe8368 +23, 0x66b626228321e5ff +24, 0xe0bdfba5325a307e +25, 0x3a5f65c6ef254e05 +26, 0x99ea12503cb02f94 +27, 0x5d01fd2db77d420b +28, 0x6959bf5f36b2368d +29, 0xd856e30c62b5f5be +30, 0xe33233e1d8140e66 +31, 0xb78be619d415fa8d +32, 0x4f943bb2cc63d3b +33, 0x9b1460b290952d81 +34, 0x19205d794826740e +35, 0x64617bd9d7a6a1ff +36, 0x30442124b55ea76a +37, 0xebbbc3b29d0333fc +38, 0x39235a0fe359751c +39, 0xf9629768891121aa +40, 0x32052f53f366e05a +41, 0x60cc5b412c925bc8 +42, 0xf8b7ecda1c0e5a9 +43, 0x195f036e170a2568 +44, 0xfe06d0381a9ca782 +45, 0x919d89e8b88eebbf +46, 0xa47fb30148cf0d43 +47, 0x5c983e99d5f9fd56 +48, 0xe7492cdb6a1d42cd +49, 0xf9cfe5c865b0cfd8 +50, 0x35b653367bbc3b99 +51, 0xb1d92f6f4d4e440b +52, 0x737e1d5bd87ed9c0 +53, 0x7a880ca1498f8e17 +54, 0x687dae8494f9a3f7 +55, 0x6bae1989f441d5d7 +56, 0x71ad3fa5a9195c2e +57, 0x16b3969779f5d03 +58, 0xd1bce2ac973f15b3 +59, 0xa114b1ee2ce0dcdd +60, 0x270d75c11eb1b8d5 +61, 0xc48ffa087c0a7bc +62, 0xaaf9dc48cda9848d +63, 0x8111cf10ef6e584d +64, 0x6736df6af40ee6f4 +65, 0x1a1a111682fbf98d +66, 0xeb217658e1cb3b5d +67, 0xcaf58a8b79de9dec +68, 0x25d0ffd63c88d7a1 +69, 0x4c498cd871b7f176 +70, 0x4069a6156eb0cf3c +71, 0xdf012f12edcdd867 +72, 0x7734c0ac8edb1689 +73, 0xed6960ac53dbc245 +74, 0x305e20da8868c661 +75, 0x5f0c7a3719956f95 +76, 0x66842bbe3b28895 +77, 0xb608bc9a31eac410 +78, 0xfcb17d5529503abd +79, 0x829ae5cbc29b92ee +80, 0x17f2f0027bc24f3a +81, 0x435926c33d8f44cc +82, 0x3ab899327098dbec +83, 0xaf78573b27f8ead8 +84, 0xa8b334fabcf8dc60 +85, 0xcdf3b366a6a303db +86, 0x8da9379dd62b34c8 +87, 0xb0ba511955f264a7 +88, 0x9d72e21a644f961d +89, 0xfac28382e2e7e710 +90, 0xd457065f048410aa +91, 0x1cae57d952563969 +92, 0x5a160a6223253e03 +93, 0x2c45df736d73c8bd +94, 0x7f651ebc6ad9cec5 +95, 0x77a6be96c7d2e7e7 +96, 0x1721fb1dbfd6546a +97, 0xf73f433ecff3c997 +98, 0xed1e80f680965bfe +99, 0x6705ad67a3003b30 +100, 0xac21134efcadb9f7 +101, 0x4d2ba0a91d456ac +102, 0x59da7b59434eb52b +103, 0x26c1d070fd414b5f +104, 0xed7079ddfce83d9a +105, 0x9277d21f88e0fb7a +106, 0xfae16b9a8d53d282 +107, 0xb08a0e2e405fdf7d +108, 0x2ea20df44229d6ec +109, 0x80e4634cd3612825 +110, 0xbe62e8aeba8f8a1a +111, 0x4981209769c190fb +112, 0xcec96ef14c7e1f65 +113, 0x73fe4457b47e7b53 +114, 0x1d66300677315c31 +115, 0xe26821290498c4cc +116, 0xf6110248fd8fb1c5 +117, 0x30fd7fe32dbd8be3 +118, 0x534ec9b910a2bd72 +119, 0x8f9bfe878bbf7382 +120, 0x4f4eb5295c0c2193 +121, 0xdeb22f03a913be9e +122, 0x40f716f8e2a8886c +123, 0xc65007d0e386cdb1 +124, 0x9bdd26d92b143a14 +125, 0xf644b0b77ea44625 +126, 0x75f5a53f6b01993a +127, 0xfe803e347bf41010 +128, 0x594bff5fa17bc360 +129, 0x3551edfb450373c7 +130, 0x898f9dad433615db +131, 0x923d2406daa26d49 +132, 0x99e07faccbc33426 +133, 0x7389f9ff4470f807 +134, 0xdc2a25957c6df90b +135, 0x33c6d8965ef3053f +136, 0x51a8f07e838f1ab +137, 0x91c5db369380274f +138, 0xc37de65ac56b207e +139, 0xfcc6d2375dde7f14 +140, 0xa4e6418bff505958 +141, 0x4b8b9f78e46953c4 +142, 0x255ab2e0f93cf278 +143, 0xdf650717af3d96ef +144, 0x2caa21cba3aae2b2 +145, 0xce7e46c6f393daa4 +146, 0x1d5b3573f9997ac7 +147, 0x5280c556e850847d +148, 0x32edc31bef920ad7 +149, 0xefaa6b0b08cf2c6 +150, 0x5151c99d97b111c5 +151, 0x35ccf4bf53d17590 +152, 0xa210d7bd8697b385 +153, 0xa9419f95738fbe61 +154, 0xdeccf93a1a4fdc90 +155, 0xd0ea3365b18e7a05 +156, 0x84122df6dcd31b9a +157, 0x33040a2125cea5f5 +158, 0xfe18306a862f6d86 +159, 0xdb97c8392e5c4457 +160, 0xc3e0fa735e80e422 +161, 0x7d106ff36467a0c1 +162, 0xb9825eecc720a76d +163, 0x7fefc6f771647081 +164, 0xf5df3f5b3977bf13 +165, 0x18fb22736d36f1e0 +166, 0xadc4637b4953abfc +167, 0x174e66d3e17974bd +168, 0xf1614c51df4db5db +169, 0x6664ecde5717b293 +170, 0xd5bc5b6839265c26 +171, 0xf6ca9ce1af3f1832 +172, 0xca696789a9d506ea +173, 0x7399c246c8f9d53 +174, 0xadf49049626417e2 +175, 0xbcd84af37d09ab91 +176, 0xbb41c177f3a3fa45 +177, 0x592becc814d55302 +178, 0xa88b4e65f6cfe5f7 +179, 0xa0a55e34ff879426 +180, 0x3c2ea6aa725b42b7 +181, 0x65ac4a407b1f9521 +182, 0xde63d53f7e88b556 +183, 0x18bc76696d015f40 +184, 0xd1363f2cd4c116a8 +185, 0x2fe859be19a48e4a +186, 0x83d6099b1415e656 +187, 0x43f2cbc1a4ee6410 +188, 0xb2eca3d3421c533d +189, 0xc52b98ea3f031f5d +190, 0xfe57eb01da07e9d1 +191, 0xf9377883537a6031 +192, 0x364030c05dac7add +193, 0x6815cb06b35d4404 +194, 0xceae2d4ce31894be +195, 0xc602bcdf6062bf6a +196, 0xc8e4bd8dcc6062e3 +197, 0x9c29e87b92a1a791 +198, 0x41e626b871ca9651 +199, 0x325c3d1fb8efbcd8 +200, 0x7dbbacf8e3419fb3 +201, 0x3602e72516bb7319 +202, 0x537a008ebd94d24b +203, 0xda7714fc9d4d161d +204, 0x1c8c73700e1b621b +205, 0x2749b80937d6c939 +206, 0x76ee6abac5b14d33 +207, 0xf18d1e92cb6a8b5c +208, 0x6ce9579d9291c721 +209, 0x60523c745a40e58 +210, 0x637f837fcc901757 +211, 0x2ff71b19661dc5b3 +212, 0x393ab586326ad16f +213, 0xa0970ea30fe742b7 +214, 0x570222d7f27fe5ae +215, 0x3b5806d43fd38629 +216, 0x129a0ad7420180c5 +217, 0x1c4726355778d52c +218, 0x7c1459cf77656499 +219, 0xfe038a0932132069 +220, 0x4c4cc317a937483a +221, 0xa333d24067e926ba +222, 0x401d9b6ab37f6ef2 +223, 0x87ad0e491ebe4a2a +224, 0xfc02f312e72d121d +225, 0xfde715b3b99767b2 +226, 0xd111c342ba521c92 +227, 0x83b221b10879c617 +228, 0x6a1bf5c01fdf4277 +229, 0x166bfc0c3f5892ee +230, 0x4608d556d7c57856 +231, 0x8d786857c95ece49 +232, 0x2d357445a1aca4ac +233, 0x79620dae28ecd796 +234, 0x90e715dc0f2201c4 +235, 0x173b68b4c9f4b665 +236, 0x4e14d040ebac4eef +237, 0xbd25960b4b892e +238, 0x911a199db6f1989d +239, 0xfe822d7c601fd2e0 +240, 0x9b4c1d58d8223a69 +241, 0x907c1891283843b0 +242, 0xf4868bf54061c4b2 +243, 0x17f8cd1fc24efd85 +244, 0xd44253f9af14c3aa +245, 0x16d0da0cb911d43c +246, 0x3c6a46615828e79a +247, 0x498591c1138e11a5 +248, 0xcc0f26336d0d6141 +249, 0x4d3ebc873212309a +250, 0x16bad7792d5c2c6a +251, 0x474215a80b2bbd11 +252, 0x7159848abd8492fc +253, 0x359341c50973685f +254, 0x27512ee7bf784a4a +255, 0x45228ea080f70447 +256, 0x880cab616500d50e +257, 0x12fae93f9830d56e +258, 0x6744ee64348d9acd +259, 0x484dada28cd2a828 +260, 0x98491d0729e41863 +261, 0x2f15aac43c2863b0 +262, 0x5727a34d77a1da0f +263, 0xa435cebef6a62eed +264, 0xd211697d57b053b0 +265, 0x65aa757b68bd557 +266, 0xe3a1b7a2d8a3e06a +267, 0x2adf64e67252a7a9 +268, 0xadadcb75cadee276 +269, 0x7934bc57ac8d97bf +270, 0xccff0d0f412e0606 +271, 0x101a82aa3e8f3db9 +272, 0xb0f2498094b4575c +273, 0xba2561d9ef26ed8a +274, 0xfbcd1268fc3febe1 +275, 0x9aa10bb19eb152e0 +276, 0xf496217a601a6d72 +277, 0xe4be1e4f2fa91363 +278, 0x473a602bf3dd68eb +279, 0xfe8ed2a48c26f4b5 +280, 0x20e94b1a00159476 +281, 0x93e1cb1c6af86ec7 +282, 0x4fcba3898f7442ba +283, 0x5150c3a3d94891df +284, 0x91cfce6c85b033ea +285, 0x625e8a832a806491 +286, 0x28c97ba72e3ec0b2 +287, 0x8e172de217c71ea1 +288, 0x926b80216c732639 +289, 0x28b19431a649ae3d +290, 0x57c039a6e95a3795 +291, 0xfbc354182fe52718 +292, 0x819dfd7c7d534cef +293, 0xabb4093a619ed44f +294, 0xe785b7ac6f656745 +295, 0xb647b4588b2f942f +296, 0x64cf870a14c72d27 +297, 0x6d4a4a2a0ba9b37e +298, 0x78bfb0427d7ce6b0 +299, 0x8dcc72b8bfc79ac6 +300, 0x1c14d915d5e76c99 +301, 0xaf48ddea6f096d79 +302, 0x51b39b67aa130d8 +303, 0x1aeeb39d4def06de +304, 0xd678092ffedfdd27 +305, 0x8f54787f325111d3 +306, 0xf2ca2e827beaa6bc +307, 0x339d134099e98545 +308, 0x1f6a8a7b33942e43 +309, 0x952c8065dbef669a +310, 0xe066aeb6690147f7 +311, 0xed25aa92cf58ebb6 +312, 0x7601edce215ef521 +313, 0xed1c5b396abd9434 +314, 0x4fd1e407535de9d5 +315, 0xccc8315a0d4d1441 +316, 0x85753e250bb86976 +317, 0xf232e469378761c3 +318, 0x81d691b8e9aef3c6 +319, 0x224a2f9cab0ad0e +320, 0x978f3d3e50007f4e +321, 0xd3713e6a6c0cbe60 +322, 0xcce8f1eadd41f80d +323, 0x34bda028a97d469 +324, 0x90e242fdf0f59183 +325, 0x4d749754fbc5f092 +326, 0x4399f5b7851cc87b +327, 0xcb921a5f25f6c5d7 +328, 0x120bf5d0162101 +329, 0x1304cc2aa352735a +330, 0xf7236c5d0d5d417b +331, 0xc31b320fc1654306 +332, 0xb468c6b23f3fb4e7 +333, 0xb5985b5bfaca4166 +334, 0x898285a1cd2f8375 +335, 0xa13493da372aa7c9 +336, 0x15c80c09c12634e7 +337, 0x9b765c5cc9d438bd +338, 0xee7da816a9201dcb +339, 0x92e269f73b5a248e +340, 0xa8086c5de81400ce +341, 0xe0053901853d42be +342, 0x821df32c012f433e +343, 0x17a6d69ca37387c7 +344, 0x2b10044bfba3501f +345, 0x8dfd262afc2e8515 +346, 0xd68c2c7b60226371 +347, 0xe81ac114e4416774 +348, 0x5896d60061ebc471 +349, 0xa996e3147811dbd1 +350, 0xa819c7b80ecb3661 +351, 0x982ad71b38afbc01 +352, 0xab152b65aa17b7fe +353, 0x4582bc282ef187ef +354, 0xab5a17fe8d9bc669 +355, 0x83664fa9cb0284b7 +356, 0x234c4b0091968f52 +357, 0x8ab5f51805688d37 +358, 0xe9e11186e0c53eda +359, 0x10df37ef1de2eccf +360, 0x780f1b0d52db968f +361, 0x50bd4ff292872cd5 +362, 0x51e681c265f5ad0 +363, 0x842c49660a527566 +364, 0x6e56ee026e9eda87 +365, 0x4cf39e40d8c80393 +366, 0x13e466df371f7e1f +367, 0xf2ce1799f38e028e +368, 0x833c8db7adc6ff0e +369, 0xc6e189abc2ec98f +370, 0xafebb3721283fec5 +371, 0xb49bc1eb5cc17bdc +372, 0xf1d02e818f5e4488 +373, 0xe5e9d5b41a1dd815 +374, 0xce8aca6573b1bfe5 +375, 0x9b0a5d70e268b1d5 +376, 0xf3c0503a8358f4de +377, 0x2681605dd755669d +378, 0xea265ca7601efc70 +379, 0xa93747f0a159439f +380, 0x62a86ede78a23e50 +381, 0xac8a18935c3d063c +382, 0x729c0a298f5059f5 +383, 0xbbf195e5b54399f4 +384, 0x38aa9d551f968900 +385, 0x3b3e700c58778caa +386, 0x68e6e33c4443957a +387, 0x7c56fc13eb269815 +388, 0xaf7daca39711804a +389, 0x50fde6d10f9544b3 +390, 0xf3d37159f6f6c03d +391, 0x82d298f5c1a71685 +392, 0x478661ac54c5002c +393, 0x6053768e1a324ae0 +394, 0xde8fb4a7e56707ea +395, 0xaa2809301faa8cf4 +396, 0x690a8d49fedd0722 +397, 0xe17c481b9c217de9 +398, 0x60d1d8a2b57288e3 +399, 0x149adfaadc6b0886 +400, 0xa3c18b6eb79cd5fa +401, 0x5774e3a091af5f58 +402, 0x2acca57ff30e5712 +403, 0x94454d67367c4b0c +404, 0x581b2985ac2df5ca +405, 0x71618e50744f3e70 +406, 0x270a7f3bd9a94ae6 +407, 0x3ef81af9bb36cd7b +408, 0x8a4a2592875254aa +409, 0x704ac6086fbb414a +410, 0xda774d5d3f57414d +411, 0xe20d3358b918ae9e +412, 0x934a6b9f7b91e247 +413, 0xf91649cde87ec42c +414, 0x248cec5f9b6ced30 +415, 0x56791809fd8d64ba +416, 0xf502b2765c1395f +417, 0x6b04ec973d75aa7f +418, 0xb0339f2794bb26f +419, 0x4c524636efbaea49 +420, 0x6bbf3876e9738748 +421, 0xf686524e754e9e24 +422, 0x8dafa05a42d19cd3 +423, 0xc5f069ab2434008e +424, 0x4fd64cc713cba76 +425, 0xdbf93450c881ed5f +426, 0x492e278ebabb59a2 +427, 0x993fddfde4542642 +428, 0xecde68a72c8d4e52 +429, 0xe0760b3074c311fd +430, 0x68dc0e7e06528707 +431, 0x52b50edf49c0fdc7 +432, 0xb2bd4185c138f412 +433, 0x431496d7e1d86f3 +434, 0xa4e605b037e26c44 +435, 0x58236ae1f0aca2b5 +436, 0x26c72c420fc314d8 +437, 0x20134e982ab99a2b +438, 0x544b59b8b211374b +439, 0x1301c42f3a14d993 +440, 0x52a6ea740f763b0f +441, 0xf209d70c2bebf119 +442, 0xac66a4ebc2aa1be +443, 0x683713ed35878788 +444, 0x2b5578acec06b80c +445, 0x86428efa11c45b36 +446, 0xb49010adb17d291e +447, 0x73b686bd8664b6be +448, 0x6d28ebf57b6884cc +449, 0x9712091230ff58d9 +450, 0xc9c91f74c38b286 +451, 0x776310ac41dc008e +452, 0x2f3739df0bf6a88e +453, 0x5792dc62b94db675 +454, 0x5715910d024b06af +455, 0xeb1dd745458da08 +456, 0xfce7b07ccfa851a7 +457, 0xc305f1e983ac368 +458, 0x485aa9519ac00bb0 +459, 0xa5354f6589fb0ea0 +460, 0x32fee02dfdbf4454 +461, 0x4d1ddc304bbefaaa +462, 0x789a270a1737e57e +463, 0x9f3072f4b1ed8156 +464, 0x4de3c00e89058120 +465, 0xb00a02529e0a86fa +466, 0x539f6f0edd845d9a +467, 0x85e578fe15a8c001 +468, 0xa12c8e1a72cce7d8 +469, 0xc6908abbc2b1828 +470, 0xcf70090774cbb38c +471, 0x3b636a6977b45d4a +472, 0xf0a731b220680b57 +473, 0x18973929f51443a8 +474, 0xe93e1fbe7eadabe +475, 0x8233730f0a6dfa02 +476, 0x66e50b6919b0ab74 +477, 0xb1aba87c97fd08a2 +478, 0xd4dffc1fbc117ad6 +479, 0x6f7fa65724b96e6a +480, 0x4bd5800dee92e0fa +481, 0xe18a959db6256da +482, 0xe53a291bc66df487 +483, 0xb7ec306a08651806 +484, 0x1847a6b80d2821e1 +485, 0xda50391283b14d39 +486, 0xacc4d3cd7cceb97a +487, 0x57f70185165b7bc6 +488, 0x302b6d597c3aaba7 +489, 0xa47f32d037eab51e +490, 0xe1509b4408abc559 +491, 0x4f30a1d7c2934157 +492, 0x2ad03e6c60b650b2 +493, 0x334d9c337b0a9064 +494, 0xc7f442821e7aac12 +495, 0xbcdeb09298694cdd +496, 0xe42402389f8f0fb4 +497, 0xe5de56af539df727 +498, 0x7017f9b2101ee240 +499, 0x1ee5e68d5b10001d +500, 0x436229051836387a +501, 0xcd532d6d6ec38fb7 +502, 0x30a66606fdf38272 +503, 0xfdaa2ab9cf798496 +504, 0x4277b4adec70e7df +505, 0x72cfc30256e0eaef +506, 0x3c3359fd9bd34917 +507, 0xb7aa89598856efb0 +508, 0xf72226f8bf299ef5 +509, 0x258c499275a4356f +510, 0x999a56bfc7f20d76 +511, 0x2b3e7432e20c18b +512, 0x2d1251332f760cb5 +513, 0x7420e0eea62157c5 +514, 0xe85c895aa27cec3d +515, 0x27a0545c7020d57c +516, 0xc68638a65b4fff0d +517, 0xfda473983a4ea747 +518, 0xd19fe65fb4c06062 +519, 0x6b1374e050ee15e4 +520, 0x80065ecd49bc4bef +521, 0x4ee655954bc838de +522, 0xe8fb777504a72299 +523, 0x86b652ea70f4bdde +524, 0xcdc9e0fbde7e4f33 +525, 0x352c0a50cd3ac56 +526, 0x4b8605d368be75dc +527, 0x1ac9ea8129efbc37 +528, 0x470325faa99f39c5 +529, 0x25dd7ef9adccf7a1 +530, 0x5ae2c7a03e965816 +531, 0xf733d2df59dacc7d +532, 0xa05bbf0a8a1a7a70 +533, 0xe8aa3f102846ef5f +534, 0xc9b85ec49ae71789 +535, 0xb904c14ed1cb1936 +536, 0x5ae618230b5f0444 +537, 0x97987fe47b5d7467 +538, 0xabb3aca8865ca761 +539, 0x38bfdf29d4508228 +540, 0x353654f408353330 +541, 0xeb7e92930ae4ef0d +542, 0xec50f1a7ca526b96 +543, 0xd5e2dc08b5697544 +544, 0x24c7fd69d5ec32df +545, 0x6f7e1095568b8620 +546, 0x6ed9c16ca13b3c8 +547, 0xe676ef460002130f +548, 0xa3a01a3992c4b430 +549, 0xe2130406c3b1f202 +550, 0xa8f7263e2aedcd20 +551, 0xc45d71ef2e35f507 +552, 0x37155594021da7ba +553, 0x22dc94f19de73159 +554, 0x7969fc6bffc5443f +555, 0x97def7e44faa6bfe +556, 0x8b940f5e8931d71f +557, 0xd95b1dd3f1a3fdd5 +558, 0x1c83bfdca615701a +559, 0xb7fcb56279ceca6b +560, 0xd84f8950f20dcd0 +561, 0xb03343698de3cbe0 +562, 0xf64565d448d71f71 +563, 0xda52b4676e0ae662 +564, 0xda39c2c05b4ffb91 +565, 0xb35e2560421f6a85 +566, 0x1a7b108d48ac3646 +567, 0xc4e264dc390d79ed +568, 0xa10727dfd9813256 +569, 0x40d23154e720e4f7 +570, 0xd9fa7cd7e313e119 +571, 0xcbf29107859e6013 +572, 0xc357338553d940b7 +573, 0x2641b7ab0bdfcbaa +574, 0xd12f2b6060533ae7 +575, 0xd0435aa626411c56 +576, 0x44af4a488a9cec72 +577, 0xb934232ea8fa5696 +578, 0x760a8b12072b572d +579, 0xfab18f9942cfa9b3 +580, 0x5676834c1fe84d16 +581, 0x9c54e4fddb353236 +582, 0xab49edfc9551f293 +583, 0x567f1fb45a871d +584, 0x32a967c873998834 +585, 0x99240aad380ef8d1 +586, 0x7f66cbd432859a64 +587, 0x4cdc8a4658166822 +588, 0x984e3984a5766492 +589, 0xa3b2d0a3d64d3d94 +590, 0x177f667172f2affc +591, 0xb1a90607a73a303f +592, 0xe600b6c36427f878 +593, 0xf758f9834cb7f466 +594, 0x8ee9fce4a3f36449 +595, 0xcb8f11533e7da347 +596, 0xe7cf647794dabd7c +597, 0xc9d92cfe6110806 +598, 0xea1335fa9145a1ec +599, 0xbc6c29821d094552 +600, 0x37b9d6a858cc8bc3 +601, 0xf24e4c694929893e +602, 0x55d025ce2d7d0004 +603, 0xccdc69acccf4267b +604, 0xc491c04340c222eb +605, 0xba50f75ecec9befb +606, 0x1ec7bd85b8fe3bb9 +607, 0xe4de66498c59ae8a +608, 0x38aa9e912712c889 +609, 0xcee0e43c5cc31566 +610, 0x72b69aa708fc7ed +611, 0xdff70b7f6fa96679 +612, 0xd6d71d82112aadc3 +613, 0x365177892cb78531 +614, 0xa54852b39de4f72c +615, 0x11dd5832bf16dd59 +616, 0x248a0f3369c97097 +617, 0xa14cec0260e26792 +618, 0x3517616ff142bed1 +619, 0x9b693ad39dab7636 +620, 0x739dff825e994434 +621, 0x67711e7356098c9 +622, 0xa81f8515d2fdf458 +623, 0xdac2908113fe568e +624, 0xe99944ebc6e2806a +625, 0x671728ca5b030975 +626, 0xfdad20edb2b4a789 +627, 0xedc6e466bd0369d2 +628, 0x88b5d469821f7e1b +629, 0x2eabf94049a522a5 +630, 0x247794b7a2f5a8e3 +631, 0x278942bdbe02c649 +632, 0xbe5a9a9196ab99c1 +633, 0x75955060866da1b5 +634, 0xdedcfa149273c0b5 +635, 0xdbeb7a57758f3867 +636, 0x7b9053347a2c8d5a +637, 0xa059b3f2eed338a5 +638, 0x59401a46ded3b79f +639, 0x38044ba56a6d19fb +640, 0x72c7221b4e77e779 +641, 0x526df3491a3a34da +642, 0xc3b31184ba16c0c2 +643, 0xd94c7144488624af +644, 0xcf966ee4dc373f91 +645, 0x62049e65dd416266 +646, 0x7c2adccb925bf8f +647, 0xd5fa5c22ed4ef8e1 +648, 0xd00134ebd11f2cd1 +649, 0xfbdf81767bed3634 +650, 0x62e8cc8ff66b6e26 +651, 0x3a72d6bcd4f2dcf7 +652, 0xf1cd45b1b46a86ed +653, 0x1271f98e0938bb9a +654, 0x82e6927e83dc31fa +655, 0x7b9b0e0acb67b92d +656, 0x6df503e397b2e701 +657, 0x93888f6fb561e0c3 +658, 0x393fb6069a40291 +659, 0x967a7d894cc0754d +660, 0x6e298996ad866333 +661, 0x5ff3cf5559d6ab46 +662, 0xd0d70508c40349f5 +663, 0xc64c66c0dd426b33 +664, 0x8fea340ee35c64dd +665, 0xf9cd381eb3060005 +666, 0xfcc37c2799fc0b11 +667, 0x6a37c91d65b489fa +668, 0x57231000fa0a0c9d +669, 0x55f6e292c6703f9a +670, 0xd0508ffbfa55a7a6 +671, 0x885db543276bdac8 +672, 0xc26dbe6a26b0e704 +673, 0x21f884874ebd709e +674, 0x711f0b6c8f732220 +675, 0x354d0a361eaee195 +676, 0x721344d8d30b006a +677, 0xa0e090a0d3a56f07 +678, 0x16b3d5d823a4952b +679, 0x59d7874bc9eae7b6 +680, 0x9bbb32710076455f +681, 0xd4fb22242ffabafd +682, 0xe1d4ac6770be1d89 +683, 0xb259cedebc73dc8a +684, 0x35faaa3b4246ab69 +685, 0x5d26addefdaee89 +686, 0x8e7ec350da0f3545 +687, 0xd0f316eed9f8fc79 +688, 0x98b2a52c9bf291b2 +689, 0xe4d294a8aca6a314 +690, 0x25bd554e6aa7673c +691, 0xcfde5dcba5be2a6c +692, 0xb5e01fb48d2d2107 +693, 0xe1caf28948028536 +694, 0xd434aa0a26f3ee9b +695, 0xd17723381641b8f6 +696, 0xfe73bd1f3f3768a2 +697, 0x1cc6b1abd08d67e9 +698, 0x247e328371a28de0 +699, 0x502e7942e5a9104a +700, 0x6a030fd242eb4502 +701, 0xa2ffe02744014ce8 +702, 0x59290763b18fe04e +703, 0xcf14241564271436 +704, 0xb0fb73c3c1503aff +705, 0x94e27c622f82137a +706, 0x747a5b406ac3e1f0 +707, 0x9a914e96a732031d +708, 0x59f68c6c8f078835 +709, 0x809d012c73eb4724 +710, 0x5b3c3b73e1b37d74 +711, 0xdde60ef3ba49cdf7 +712, 0x87a14e1f9c761986 +713, 0x4109b960604522af +714, 0x122d0e1ed0eb6bb9 +715, 0xadc0d29e80bfe33 +716, 0xa25b1b44f5fc8e4e +717, 0xbab85d8a9b793f20 +718, 0x825f4cbced0e7d1e +719, 0x2d6ae8807acb37ea +720, 0x8234420adce2e39 +721, 0x4a8ad4da6b804807 +722, 0x1e19f9bc215e5245 +723, 0x1d6f4848a916dd5e +724, 0x9ac40dfcdc2d39cc +725, 0x9f3524e3086155ec +726, 0x861fffc43124b2ef +727, 0xe640e3b756396372 +728, 0x41cb0f0c5e149669 +729, 0xe0bd37e1192e4205 +730, 0x62917d3858f4ce47 +731, 0xa36e7eb4d855820a +732, 0x204b90255a3bf724 +733, 0x66ee83a0175535bc +734, 0x2c14ce7c6b0c1423 +735, 0x85d9495fa514f70d +736, 0x5a4fe45ead874dbc +737, 0xe72248dcb8cfc863 +738, 0xfc21ff2932ed98cd +739, 0xcbba1edd735b5cad +740, 0x91ddc32809679bf5 +741, 0x192cdf2c7631ea1f +742, 0xbbc451ddf2ea286f +743, 0xad9e80cae2397a64 +744, 0x6918f0119b95d0e5 +745, 0xa40379017a27d70a +746, 0x1aaeddb600e61e1 +747, 0x15afd93cbd7adda9 +748, 0x156719bc2b757ff4 +749, 0x13d9a59e2b2df49d +750, 0x9a490986eaddf0a +751, 0xef9a350f0b3eb6b4 +752, 0x5de7f6295ba4fa4d +753, 0x7f37fd087c3fdb49 +754, 0xa9fe3749d6f3f209 +755, 0x50912ac036d9bfb +756, 0x982cb4d726a441f8 +757, 0x8ca8d8af59b872d0 +758, 0x7f8adfb0ceeade8a +759, 0xdad390ec742be44 +760, 0xa637944d0045be5b +761, 0x3569a3b3af807061 +762, 0x9599da8eae14511d +763, 0xc333e8d19589b01a +764, 0xfb9b524a20b571e1 +765, 0xbd9dc8b37ce5c3e1 +766, 0x142333005fa389ac +767, 0x1368bc37cd5bcce1 +768, 0x16094907ad6ecf73 +769, 0xb32c90dbba4c1130 +770, 0x82761d97c1747dd0 +771, 0x599f9f267ae3444d +772, 0x79ad3382994852e1 +773, 0x2511f06d9ef06e54 +774, 0xb35e6ab7d5bbddae +775, 0xfca9fa83a2988732 +776, 0x7d4350f0394ac3ba +777, 0xa52a9527bb176ea3 +778, 0xb49fa0ceb2aa8353 +779, 0x1f62e504d1468cc0 +780, 0xe1a77bfccce6efc3 +781, 0x776cdff4dc0d6797 +782, 0x56612e39b652c1f2 +783, 0x5f096a29294eda04 +784, 0x7978abc3aabd8b23 +785, 0x79dd875e0485b979 +786, 0x8a98aa4d5735d778 +787, 0xcca43940f69d2388 +788, 0xb2d4b156f144f93a +789, 0xbd528a676e9a862 +790, 0x2a394939c8e7ec5e +791, 0xb1da900c6efe4abc +792, 0x9869af479de4c034 +793, 0x78dbdfb88ac7c1db +794, 0x18cb169143088041 +795, 0xe69e5461c51a3e13 +796, 0x5389fa16ea98183c +797, 0xed7c80d1be1ea520 +798, 0x87246fc359758ced +799, 0xab323eba95fae4ed +800, 0xbc4c0dde7f8a1828 +801, 0xdb739f7955610b1a +802, 0xecd8c68c3434cc +803, 0x138c2eb88c477f44 +804, 0x28a65f96727aae41 +805, 0xdee879f2cf5629d +806, 0x684f0c90ef20070f +807, 0xa24a819ef5621800 +808, 0x8d0054f870e4fdcb +809, 0x99e8c6e695b600b +810, 0x50b705245891f7c3 +811, 0xc02eed3a6e58e51a +812, 0x443d64e95443606c +813, 0xca24959cfbd2d120 +814, 0xe072609ea48815bc +815, 0xbcc715026590315b +816, 0x3e76df24d7aa5938 +817, 0xd8ff04940d9b79ae +818, 0x54474ce790059bcd +819, 0x278390dd6aa70e81 +820, 0xf4df619fe35414e4 +821, 0x757d71270264e615 +822, 0x1e8a373699c11b23 +823, 0xef68c82046e67dd6 +824, 0xe280006599972620 +825, 0x234e095183b0f4d6 +826, 0xe3b7560ed9839749 +827, 0xcd5ec4086572332e +828, 0xc41c0d4aaa279108 +829, 0x4b9cd6126bc16a6d +830, 0x4a7252734f3e3dd0 +831, 0xb3132df156cc103a +832, 0xf9e4abbf7b64464a +833, 0xf936df27fb3c47b7 +834, 0x9142960873f6d71a +835, 0x4ba6aa3235cdb10d +836, 0x3237a2e765ba7766 +837, 0xd62f0b94c8e99e54 +838, 0x26b682f90a3ae41b +839, 0x40ad5e82072b6f81 +840, 0xd0198101f5484000 +841, 0xe4fac60ba11c332 +842, 0x472d0b0a95ef9d38 +843, 0x8512557aec5a3d8f +844, 0xef83169d3efd4de9 +845, 0x53fe89283e7a7676 +846, 0x2f50933053d69fc4 +847, 0x76f5e4362e2e53a2 +848, 0x8676fdccce28874a +849, 0x2737764c1fb1f821 +850, 0x4a6f70afc066ab55 +851, 0x27f8e151e310fca4 +852, 0xd606960ccbe85161 +853, 0xcce51d7ddd270a32 +854, 0xb4235999794875c2 +855, 0x580084e358e884 +856, 0x2159d5e6dc8586d7 +857, 0x87bd54d8599b3ba4 +858, 0x3e9ade6a2181664 +859, 0x5e6e140406d97623 +860, 0x511545d5aa0080a2 +861, 0xf49d78ed219aac57 +862, 0xbece1f9c90b8ea87 +863, 0x1c741cac36a2c514 +864, 0x7453c141047db967 +865, 0xd751832a5037eba2 +866, 0x71370a3f30ada1f7 +867, 0x7c01cf2dcb408631 +868, 0x1052a4fbdccc0fa1 +869, 0x13d525c9df3fb6c +870, 0xa3aa8dbfee760c55 +871, 0xc0288d200f5155cf +872, 0x79f4bcd12af567c3 +873, 0x8160d163bb548755 +874, 0x5cf2995fb69fd2df +875, 0xcc98ed01396639df +876, 0xad95f1d9cfc8256e +877, 0xa3df27d9fbdbfb9d +878, 0x83e5f5dda4d52929 +879, 0x9adc05043009f55b +880, 0xdfe8329dfde1c001 +881, 0x9980ccdd5298e6a2 +882, 0x636a7bd134f6ef56 +883, 0xef5ff780c4be6ba4 +884, 0x290d71dc77a56d16 +885, 0x6d65db9ff58de1e6 +886, 0x944b063b3805a696 +887, 0xce468ca2cce33008 +888, 0x5ba1ccb840f80f48 +889, 0x28ddce36fc9ad268 +890, 0x4f77ef254d507a21 +891, 0xce9b4057fadf3ab +892, 0xb518bc68298730e6 +893, 0xd2eb5b8e2ec665b0 +894, 0xe1583303a4f87344 +895, 0x9d5a0df4fbe1bed5 +896, 0x2ba9bc03ec8cfd07 +897, 0x479ed880a96ca669 +898, 0xcedf96338324771a +899, 0x312f4fc2da41ffaa +900, 0xa0eb9cf23b5e1ed8 +901, 0xf8f88f975dc3f539 +902, 0x4a37e185d0e96e0f +903, 0xf829654a5c0b46f9 +904, 0x3909cca7a7f8c7fb +905, 0x4c2e1d66ceb45105 +906, 0xaffaa19e1db8af87 +907, 0x9ec498246bd18c76 +908, 0x21d51558edc089da +909, 0xe8984112cd1b1561 +910, 0x7de1d2cf54b0c0e1 +911, 0xa06729aed50bfb9d +912, 0xcf19f733e5db19e1 +913, 0x70edf2624ab777cd +914, 0x46685becad10e078 +915, 0x825e0f6add46785 +916, 0x66d4af3b15f70de4 +917, 0xc676614b0666b21 +918, 0x282a916c864f5cb7 +919, 0x2707283a3f512167 +920, 0x37ff3afda7461623 +921, 0xc767eb1205e4ca86 +922, 0x46b359aecc4ea25b +923, 0x67fbbb797a16dbb1 +924, 0x64fd4ba57122290e +925, 0x8acc2a8ae59d8fac +926, 0x64a49298599acc67 +927, 0xedf00de67177ce30 +928, 0x1ea9d8d7e76d2d2c +929, 0x363fcac323f70eb2 +930, 0x19e6e3ec8a9712eb +931, 0xca541e96b0961f09 +932, 0x4d8fd34c2822ec46 +933, 0x2fdd56a50b32f705 +934, 0xaac2fcf251e3fd3 +935, 0xb0c600299e57045c +936, 0xd951ec589e909e38 +937, 0x4dc8414390cae508 +938, 0x537ef9d5e2321344 +939, 0xa57bc21fd31aa2dc +940, 0xa3a60df564183750 +941, 0xbe69a5ce2e369fb6 +942, 0x7744601f4c053ec8 +943, 0x3838452af42f2612 +944, 0xd4f0dad7115a54e9 +945, 0x629cf68d8009a624 +946, 0x2211c8fa34cb98cb +947, 0x8040b19e2213db83 +948, 0xb2a86d3ba2384fd +949, 0x4b85cec4f93f0dab +950, 0xc8d212d21ea6845d +951, 0x5b271a03a4fe2be0 +952, 0xff4f671319ad8434 +953, 0x8e615a919d5afa96 +954, 0xea7f47c53161160a +955, 0x33273930b13c6efc +956, 0x98eedda27fb59c3c +957, 0x188dc5e92e939677 +958, 0x9dbd0fa0911430f1 +959, 0x5b3dcf3fa75dfd2b +960, 0x3f03846febdb275d +961, 0x20cc24faea9e9cf6 +962, 0x854f3ac66199ff5d +963, 0x31169ac99d341e6f +964, 0xa85daed3c0bc1bbe +965, 0x64633711e71ba5dd +966, 0x530e79978dc73334 +967, 0x636f2ee6e20aef13 +968, 0xf6220f8b6d9a58fb +969, 0x425db8fa32141a7b +970, 0xac7c210f4b02be95 +971, 0x5fe8cfbe197a7754 +972, 0xfff7d40c79420ea +973, 0x5f8bab9ef4697b77 +974, 0xaf6fe54e45b23fe8 +975, 0xce79456ccc70bbce +976, 0x645ef680f48f1c00 +977, 0xa4dfac46e2028595 +978, 0x6bece4c41effc5df +979, 0xd316df886442641f +980, 0xa4f6ff994edd2a6 +981, 0x30281ae3cc49abe4 +982, 0x39acb7b663dea974 +983, 0x5e8829b01a7c06fb +984, 0x87bdb08cf027f13e +985, 0xdfa5ede784e802f6 +986, 0x46d03d55711c38cc +987, 0xa55a961fc9788306 +988, 0xbf09ded495a2e57a +989, 0xcd601b29a639cc16 +990, 0x2193ce026bfd1085 +991, 0x25ba27f3f225be13 +992, 0x6f685be82f64f2fe +993, 0xec8454108229c450 +994, 0x6e79d8d205447a44 +995, 0x9ed7b6a96b9ccd68 +996, 0xae7134b3b7f8ee37 +997, 0x66963de0e5ebcc02 +998, 0x29c8dcd0d17c423f +999, 0xfb8482c827eb90bc diff --git a/python/numpy/random/tests/data/sfc64_np126.pkl.gz b/python/numpy/random/tests/data/sfc64_np126.pkl.gz new file mode 100644 index 000000000..94fbceb38 Binary files /dev/null and b/python/numpy/random/tests/data/sfc64_np126.pkl.gz differ diff --git a/python/numpy/random/tests/test_direct.py b/python/numpy/random/tests/test_direct.py new file mode 100644 index 000000000..6f069e488 --- /dev/null +++ b/python/numpy/random/tests/test_direct.py @@ -0,0 +1,592 @@ +import os +import sys +from os.path import join + +import pytest + +import numpy as np +from numpy.random import ( + MT19937, + PCG64, + PCG64DXSM, + SFC64, + Generator, + Philox, + RandomState, + SeedSequence, + default_rng, +) +from numpy.random._common import interface +from numpy.testing import ( + assert_allclose, + assert_array_equal, + assert_equal, + assert_raises, +) + +try: + import cffi # noqa: F401 + + MISSING_CFFI = False +except ImportError: + MISSING_CFFI = True + +try: + import ctypes # noqa: F401 + + MISSING_CTYPES = False +except ImportError: + MISSING_CTYPES = False + +if sys.flags.optimize > 1: + # no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1 + # cffi cannot succeed + MISSING_CFFI = True + + +pwd = os.path.dirname(os.path.abspath(__file__)) + + +def assert_state_equal(actual, target): + for key in actual: + if isinstance(actual[key], dict): + assert_state_equal(actual[key], target[key]) + elif isinstance(actual[key], np.ndarray): + assert_array_equal(actual[key], target[key]) + else: + assert actual[key] == target[key] + + +def uint32_to_float32(u): + return ((u >> np.uint32(8)) * (1.0 / 2**24)).astype(np.float32) + + +def uniform32_from_uint64(x): + x = np.uint64(x) + upper = np.array(x >> np.uint64(32), dtype=np.uint32) + lower = np.uint64(0xffffffff) + lower = np.array(x & lower, dtype=np.uint32) + joined = np.column_stack([lower, upper]).ravel() + return uint32_to_float32(joined) + + +def uniform32_from_uint53(x): + x = np.uint64(x) >> np.uint64(16) + x = np.uint32(x & np.uint64(0xffffffff)) + return uint32_to_float32(x) + + +def uniform32_from_uint32(x): + return uint32_to_float32(x) + + +def uniform32_from_uint(x, bits): + if bits == 64: + return uniform32_from_uint64(x) + elif bits == 53: + return uniform32_from_uint53(x) + elif bits == 32: + return uniform32_from_uint32(x) + else: + raise NotImplementedError + + +def uniform_from_uint(x, bits): + if bits in (64, 63, 53): + return uniform_from_uint64(x) + elif bits == 32: + return uniform_from_uint32(x) + + +def uniform_from_uint64(x): + return (x >> np.uint64(11)) * (1.0 / 9007199254740992.0) + + +def uniform_from_uint32(x): + out = np.empty(len(x) // 2) + for i in range(0, len(x), 2): + a = x[i] >> 5 + b = x[i + 1] >> 6 + out[i // 2] = (a * 67108864.0 + b) / 9007199254740992.0 + return out + + +def uniform_from_dsfmt(x): + return x.view(np.double) - 1.0 + + +def gauss_from_uint(x, n, bits): + if bits in (64, 63): + doubles = uniform_from_uint64(x) + elif bits == 32: + doubles = uniform_from_uint32(x) + else: # bits == 'dsfmt' + doubles = uniform_from_dsfmt(x) + gauss = [] + loc = 0 + x1 = x2 = 0.0 + while len(gauss) < n: + r2 = 2 + while r2 >= 1.0 or r2 == 0.0: + x1 = 2.0 * doubles[loc] - 1.0 + x2 = 2.0 * doubles[loc + 1] - 1.0 + r2 = x1 * x1 + x2 * x2 + loc += 2 + + f = np.sqrt(-2.0 * np.log(r2) / r2) + gauss.append(f * x2) + gauss.append(f * x1) + + return gauss[:n] + + +def test_seedsequence(): + from numpy.random.bit_generator import ( + ISeedSequence, + ISpawnableSeedSequence, + SeedlessSeedSequence, + ) + + s1 = SeedSequence(range(10), spawn_key=(1, 2), pool_size=6) + s1.spawn(10) + s2 = SeedSequence(**s1.state) + assert_equal(s1.state, s2.state) + assert_equal(s1.n_children_spawned, s2.n_children_spawned) + + # The interfaces cannot be instantiated themselves. + assert_raises(TypeError, ISeedSequence) + assert_raises(TypeError, ISpawnableSeedSequence) + dummy = SeedlessSeedSequence() + assert_raises(NotImplementedError, dummy.generate_state, 10) + assert len(dummy.spawn(10)) == 10 + + +def test_generator_spawning(): + """ Test spawning new generators and bit_generators directly. + """ + rng = np.random.default_rng() + seq = rng.bit_generator.seed_seq + new_ss = seq.spawn(5) + expected_keys = [seq.spawn_key + (i,) for i in range(5)] + assert [c.spawn_key for c in new_ss] == expected_keys + + new_bgs = rng.bit_generator.spawn(5) + expected_keys = [seq.spawn_key + (i,) for i in range(5, 10)] + assert [bg.seed_seq.spawn_key for bg in new_bgs] == expected_keys + + new_rngs = rng.spawn(5) + expected_keys = [seq.spawn_key + (i,) for i in range(10, 15)] + found_keys = [rng.bit_generator.seed_seq.spawn_key for rng in new_rngs] + assert found_keys == expected_keys + + # Sanity check that streams are actually different: + assert new_rngs[0].uniform() != new_rngs[1].uniform() + + +def test_non_spawnable(): + from numpy.random.bit_generator import ISeedSequence + + class FakeSeedSequence: + def generate_state(self, n_words, dtype=np.uint32): + return np.zeros(n_words, dtype=dtype) + + ISeedSequence.register(FakeSeedSequence) + + rng = np.random.default_rng(FakeSeedSequence()) + + with pytest.raises(TypeError, match="The underlying SeedSequence"): + rng.spawn(5) + + with pytest.raises(TypeError, match="The underlying SeedSequence"): + rng.bit_generator.spawn(5) + + +class Base: + dtype = np.uint64 + data2 = data1 = {} + + @classmethod + def setup_class(cls): + cls.bit_generator = PCG64 + cls.bits = 64 + cls.dtype = np.uint64 + cls.seed_error_type = TypeError + cls.invalid_init_types = [] + cls.invalid_init_values = [] + + @classmethod + def _read_csv(cls, filename): + with open(filename) as csv: + seed = csv.readline() + seed = seed.split(',') + seed = [int(s.strip(), 0) for s in seed[1:]] + data = [] + for line in csv: + data.append(int(line.split(',')[-1].strip(), 0)) + return {'seed': seed, 'data': np.array(data, dtype=cls.dtype)} + + def test_raw(self): + bit_generator = self.bit_generator(*self.data1['seed']) + uints = bit_generator.random_raw(1000) + assert_equal(uints, self.data1['data']) + + bit_generator = self.bit_generator(*self.data1['seed']) + uints = bit_generator.random_raw() + assert_equal(uints, self.data1['data'][0]) + + bit_generator = self.bit_generator(*self.data2['seed']) + uints = bit_generator.random_raw(1000) + assert_equal(uints, self.data2['data']) + + def test_random_raw(self): + bit_generator = self.bit_generator(*self.data1['seed']) + uints = bit_generator.random_raw(output=False) + assert uints is None + uints = bit_generator.random_raw(1000, output=False) + assert uints is None + + def test_gauss_inv(self): + n = 25 + rs = RandomState(self.bit_generator(*self.data1['seed'])) + gauss = rs.standard_normal(n) + assert_allclose(gauss, + gauss_from_uint(self.data1['data'], n, self.bits)) + + rs = RandomState(self.bit_generator(*self.data2['seed'])) + gauss = rs.standard_normal(25) + assert_allclose(gauss, + gauss_from_uint(self.data2['data'], n, self.bits)) + + def test_uniform_double(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + vals = uniform_from_uint(self.data1['data'], self.bits) + uniforms = rs.random(len(vals)) + assert_allclose(uniforms, vals) + assert_equal(uniforms.dtype, np.float64) + + rs = Generator(self.bit_generator(*self.data2['seed'])) + vals = uniform_from_uint(self.data2['data'], self.bits) + uniforms = rs.random(len(vals)) + assert_allclose(uniforms, vals) + assert_equal(uniforms.dtype, np.float64) + + def test_uniform_float(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + vals = uniform32_from_uint(self.data1['data'], self.bits) + uniforms = rs.random(len(vals), dtype=np.float32) + assert_allclose(uniforms, vals) + assert_equal(uniforms.dtype, np.float32) + + rs = Generator(self.bit_generator(*self.data2['seed'])) + vals = uniform32_from_uint(self.data2['data'], self.bits) + uniforms = rs.random(len(vals), dtype=np.float32) + assert_allclose(uniforms, vals) + assert_equal(uniforms.dtype, np.float32) + + def test_repr(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + assert 'Generator' in repr(rs) + assert f'{id(rs):#x}'.upper().replace('X', 'x') in repr(rs) + + def test_str(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + assert 'Generator' in str(rs) + assert str(self.bit_generator.__name__) in str(rs) + assert f'{id(rs):#x}'.upper().replace('X', 'x') not in str(rs) + + def test_pickle(self): + import pickle + + bit_generator = self.bit_generator(*self.data1['seed']) + state = bit_generator.state + bitgen_pkl = pickle.dumps(bit_generator) + reloaded = pickle.loads(bitgen_pkl) + reloaded_state = reloaded.state + assert_array_equal(Generator(bit_generator).standard_normal(1000), + Generator(reloaded).standard_normal(1000)) + assert bit_generator is not reloaded + assert_state_equal(reloaded_state, state) + + ss = SeedSequence(100) + aa = pickle.loads(pickle.dumps(ss)) + assert_equal(ss.state, aa.state) + + def test_pickle_preserves_seed_sequence(self): + # GH 26234 + # Add explicit test that bit generators preserve seed sequences + import pickle + + bit_generator = self.bit_generator(*self.data1['seed']) + ss = bit_generator.seed_seq + bg_plk = pickle.loads(pickle.dumps(bit_generator)) + ss_plk = bg_plk.seed_seq + assert_equal(ss.state, ss_plk.state) + assert_equal(ss.pool, ss_plk.pool) + + bit_generator.seed_seq.spawn(10) + bg_plk = pickle.loads(pickle.dumps(bit_generator)) + ss_plk = bg_plk.seed_seq + assert_equal(ss.state, ss_plk.state) + assert_equal(ss.n_children_spawned, ss_plk.n_children_spawned) + + def test_invalid_state_type(self): + bit_generator = self.bit_generator(*self.data1['seed']) + with pytest.raises(TypeError): + bit_generator.state = {'1'} + + def test_invalid_state_value(self): + bit_generator = self.bit_generator(*self.data1['seed']) + state = bit_generator.state + state['bit_generator'] = 'otherBitGenerator' + with pytest.raises(ValueError): + bit_generator.state = state + + def test_invalid_init_type(self): + bit_generator = self.bit_generator + for st in self.invalid_init_types: + with pytest.raises(TypeError): + bit_generator(*st) + + def test_invalid_init_values(self): + bit_generator = self.bit_generator + for st in self.invalid_init_values: + with pytest.raises((ValueError, OverflowError)): + bit_generator(*st) + + def test_benchmark(self): + bit_generator = self.bit_generator(*self.data1['seed']) + bit_generator._benchmark(1) + bit_generator._benchmark(1, 'double') + with pytest.raises(ValueError): + bit_generator._benchmark(1, 'int32') + + @pytest.mark.skipif(MISSING_CFFI, reason='cffi not available') + def test_cffi(self): + bit_generator = self.bit_generator(*self.data1['seed']) + cffi_interface = bit_generator.cffi + assert isinstance(cffi_interface, interface) + other_cffi_interface = bit_generator.cffi + assert other_cffi_interface is cffi_interface + + @pytest.mark.skipif(MISSING_CTYPES, reason='ctypes not available') + def test_ctypes(self): + bit_generator = self.bit_generator(*self.data1['seed']) + ctypes_interface = bit_generator.ctypes + assert isinstance(ctypes_interface, interface) + other_ctypes_interface = bit_generator.ctypes + assert other_ctypes_interface is ctypes_interface + + def test_getstate(self): + bit_generator = self.bit_generator(*self.data1['seed']) + state = bit_generator.state + alt_state = bit_generator.__getstate__() + assert isinstance(alt_state, tuple) + assert_state_equal(state, alt_state[0]) + assert isinstance(alt_state[1], SeedSequence) + +class TestPhilox(Base): + @classmethod + def setup_class(cls): + cls.bit_generator = Philox + cls.bits = 64 + cls.dtype = np.uint64 + cls.data1 = cls._read_csv( + join(pwd, './data/philox-testset-1.csv')) + cls.data2 = cls._read_csv( + join(pwd, './data/philox-testset-2.csv')) + cls.seed_error_type = TypeError + cls.invalid_init_types = [] + cls.invalid_init_values = [(1, None, 1), (-1,), (None, None, 2 ** 257 + 1)] + + def test_set_key(self): + bit_generator = self.bit_generator(*self.data1['seed']) + state = bit_generator.state + keyed = self.bit_generator(counter=state['state']['counter'], + key=state['state']['key']) + assert_state_equal(bit_generator.state, keyed.state) + + +class TestPCG64(Base): + @classmethod + def setup_class(cls): + cls.bit_generator = PCG64 + cls.bits = 64 + cls.dtype = np.uint64 + cls.data1 = cls._read_csv(join(pwd, './data/pcg64-testset-1.csv')) + cls.data2 = cls._read_csv(join(pwd, './data/pcg64-testset-2.csv')) + cls.seed_error_type = (ValueError, TypeError) + cls.invalid_init_types = [(3.2,), ([None],), (1, None)] + cls.invalid_init_values = [(-1,)] + + def test_advance_symmetry(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + state = rs.bit_generator.state + step = -0x9e3779b97f4a7c150000000000000000 + rs.bit_generator.advance(step) + val_neg = rs.integers(10) + rs.bit_generator.state = state + rs.bit_generator.advance(2**128 + step) + val_pos = rs.integers(10) + rs.bit_generator.state = state + rs.bit_generator.advance(10 * 2**128 + step) + val_big = rs.integers(10) + assert val_neg == val_pos + assert val_big == val_pos + + def test_advange_large(self): + rs = Generator(self.bit_generator(38219308213743)) + pcg = rs.bit_generator + state = pcg.state["state"] + initial_state = 287608843259529770491897792873167516365 + assert state["state"] == initial_state + pcg.advance(sum(2**i for i in (96, 64, 32, 16, 8, 4, 2, 1))) + state = pcg.state["state"] + advanced_state = 135275564607035429730177404003164635391 + assert state["state"] == advanced_state + + +class TestPCG64DXSM(Base): + @classmethod + def setup_class(cls): + cls.bit_generator = PCG64DXSM + cls.bits = 64 + cls.dtype = np.uint64 + cls.data1 = cls._read_csv(join(pwd, './data/pcg64dxsm-testset-1.csv')) + cls.data2 = cls._read_csv(join(pwd, './data/pcg64dxsm-testset-2.csv')) + cls.seed_error_type = (ValueError, TypeError) + cls.invalid_init_types = [(3.2,), ([None],), (1, None)] + cls.invalid_init_values = [(-1,)] + + def test_advance_symmetry(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + state = rs.bit_generator.state + step = -0x9e3779b97f4a7c150000000000000000 + rs.bit_generator.advance(step) + val_neg = rs.integers(10) + rs.bit_generator.state = state + rs.bit_generator.advance(2**128 + step) + val_pos = rs.integers(10) + rs.bit_generator.state = state + rs.bit_generator.advance(10 * 2**128 + step) + val_big = rs.integers(10) + assert val_neg == val_pos + assert val_big == val_pos + + def test_advange_large(self): + rs = Generator(self.bit_generator(38219308213743)) + pcg = rs.bit_generator + state = pcg.state + initial_state = 287608843259529770491897792873167516365 + assert state["state"]["state"] == initial_state + pcg.advance(sum(2**i for i in (96, 64, 32, 16, 8, 4, 2, 1))) + state = pcg.state["state"] + advanced_state = 277778083536782149546677086420637664879 + assert state["state"] == advanced_state + + +class TestMT19937(Base): + @classmethod + def setup_class(cls): + cls.bit_generator = MT19937 + cls.bits = 32 + cls.dtype = np.uint32 + cls.data1 = cls._read_csv(join(pwd, './data/mt19937-testset-1.csv')) + cls.data2 = cls._read_csv(join(pwd, './data/mt19937-testset-2.csv')) + cls.seed_error_type = ValueError + cls.invalid_init_types = [] + cls.invalid_init_values = [(-1,)] + + def test_seed_float_array(self): + assert_raises(TypeError, self.bit_generator, np.array([np.pi])) + assert_raises(TypeError, self.bit_generator, np.array([-np.pi])) + assert_raises(TypeError, self.bit_generator, np.array([np.pi, -np.pi])) + assert_raises(TypeError, self.bit_generator, np.array([0, np.pi])) + assert_raises(TypeError, self.bit_generator, [np.pi]) + assert_raises(TypeError, self.bit_generator, [0, np.pi]) + + def test_state_tuple(self): + rs = Generator(self.bit_generator(*self.data1['seed'])) + bit_generator = rs.bit_generator + state = bit_generator.state + desired = rs.integers(2 ** 16) + tup = (state['bit_generator'], state['state']['key'], + state['state']['pos']) + bit_generator.state = tup + actual = rs.integers(2 ** 16) + assert_equal(actual, desired) + tup = tup + (0, 0.0) + bit_generator.state = tup + actual = rs.integers(2 ** 16) + assert_equal(actual, desired) + + +class TestSFC64(Base): + @classmethod + def setup_class(cls): + cls.bit_generator = SFC64 + cls.bits = 64 + cls.dtype = np.uint64 + cls.data1 = cls._read_csv( + join(pwd, './data/sfc64-testset-1.csv')) + cls.data2 = cls._read_csv( + join(pwd, './data/sfc64-testset-2.csv')) + cls.seed_error_type = (ValueError, TypeError) + cls.invalid_init_types = [(3.2,), ([None],), (1, None)] + cls.invalid_init_values = [(-1,)] + + def test_legacy_pickle(self): + # Pickling format was changed in 2.0.x + import gzip + import pickle + + expected_state = np.array( + [ + 9957867060933711493, + 532597980065565856, + 14769588338631205282, + 13 + ], + dtype=np.uint64 + ) + + base_path = os.path.split(os.path.abspath(__file__))[0] + pkl_file = os.path.join(base_path, "data", "sfc64_np126.pkl.gz") + with gzip.open(pkl_file) as gz: + sfc = pickle.load(gz) + + assert isinstance(sfc, SFC64) + assert_equal(sfc.state["state"]["state"], expected_state) + + +class TestDefaultRNG: + def test_seed(self): + for args in [(), (None,), (1234,), ([1234, 5678],)]: + rg = default_rng(*args) + assert isinstance(rg.bit_generator, PCG64) + + def test_passthrough(self): + bg = Philox() + rg = default_rng(bg) + assert rg.bit_generator is bg + rg2 = default_rng(rg) + assert rg2 is rg + assert rg2.bit_generator is bg + + def test_coercion_RandomState_Generator(self): + # use default_rng to coerce RandomState to Generator + rs = RandomState(1234) + rg = default_rng(rs) + assert isinstance(rg.bit_generator, MT19937) + assert rg.bit_generator is rs._bit_generator + + # RandomState with a non MT19937 bit generator + _original = np.random.get_bit_generator() + bg = PCG64(12342298) + np.random.set_bit_generator(bg) + rs = np.random.mtrand._rand + rg = default_rng(rs) + assert rg.bit_generator is bg + + # vital to get global state back to original, otherwise + # other tests start to fail. + np.random.set_bit_generator(_original) diff --git a/python/numpy/random/tests/test_extending.py b/python/numpy/random/tests/test_extending.py new file mode 100644 index 000000000..7a079d636 --- /dev/null +++ b/python/numpy/random/tests/test_extending.py @@ -0,0 +1,127 @@ +import os +import shutil +import subprocess +import sys +import sysconfig +import warnings +from importlib.util import module_from_spec, spec_from_file_location + +import pytest + +import numpy as np +from numpy.testing import IS_EDITABLE, IS_WASM + +try: + import cffi +except ImportError: + cffi = None + +if sys.flags.optimize > 1: + # no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1 + # cffi cannot succeed + cffi = None + +try: + with warnings.catch_warnings(record=True) as w: + # numba issue gh-4733 + warnings.filterwarnings('always', '', DeprecationWarning) + import numba +except (ImportError, SystemError): + # Certain numpy/numba versions trigger a SystemError due to a numba bug + numba = None + +try: + import cython + from Cython.Compiler.Version import version as cython_version +except ImportError: + cython = None +else: + from numpy._utils import _pep440 + # Note: keep in sync with the one in pyproject.toml + required_version = '3.0.6' + if _pep440.parse(cython_version) < _pep440.Version(required_version): + # too old or wrong cython, skip the test + cython = None + + +@pytest.mark.skipif( + IS_EDITABLE, + reason='Editable install cannot find .pxd headers' +) +@pytest.mark.skipif( + sys.platform == "win32" and sys.maxsize < 2**32, + reason="Failing in 32-bit Windows wheel build job, skip for now" +) +@pytest.mark.skipif(IS_WASM, reason="Can't start subprocess") +@pytest.mark.skipif(cython is None, reason="requires cython") +@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', + reason='Meson unable to find MSVC linker on win-arm64') +@pytest.mark.slow +def test_cython(tmp_path): + import glob + # build the examples in a temporary directory + srcdir = os.path.join(os.path.dirname(__file__), '..') + shutil.copytree(srcdir, tmp_path / 'random') + build_dir = tmp_path / 'random' / '_examples' / 'cython' + target_dir = build_dir / "build" + os.makedirs(target_dir, exist_ok=True) + # Ensure we use the correct Python interpreter even when `meson` is + # installed in a different Python environment (see gh-24956) + native_file = str(build_dir / 'interpreter-native-file.ini') + with open(native_file, 'w') as f: + f.write("[binaries]\n") + f.write(f"python = '{sys.executable}'\n") + f.write(f"python3 = '{sys.executable}'") + if sys.platform == "win32": + subprocess.check_call(["meson", "setup", + "--buildtype=release", + "--vsenv", "--native-file", native_file, + str(build_dir)], + cwd=target_dir, + ) + else: + subprocess.check_call(["meson", "setup", + "--native-file", native_file, str(build_dir)], + cwd=target_dir + ) + subprocess.check_call(["meson", "compile", "-vv"], cwd=target_dir) + + # gh-16162: make sure numpy's __init__.pxd was used for cython + # not really part of this test, but it is a convenient place to check + + g = glob.glob(str(target_dir / "*" / "extending.pyx.c")) + with open(g[0]) as fid: + txt_to_find = 'NumPy API declarations from "numpy/__init__' + for line in fid: + if txt_to_find in line: + break + else: + assert False, f"Could not find '{txt_to_find}' in C file, wrong pxd used" + # import without adding the directory to sys.path + suffix = sysconfig.get_config_var('EXT_SUFFIX') + + def load(modname): + so = (target_dir / modname).with_suffix(suffix) + spec = spec_from_file_location(modname, so) + mod = module_from_spec(spec) + spec.loader.exec_module(mod) + return mod + + # test that the module can be imported + load("extending") + load("extending_cpp") + # actually test the cython c-extension + extending_distributions = load("extending_distributions") + from numpy.random import PCG64 + values = extending_distributions.uniforms_ex(PCG64(0), 10, 'd') + assert values.shape == (10,) + assert values.dtype == np.float64 + +@pytest.mark.skipif(numba is None or cffi is None, + reason="requires numba and cffi") +def test_numba(): + from numpy.random._examples.numba import extending # noqa: F401 + +@pytest.mark.skipif(cffi is None, reason="requires cffi") +def test_cffi(): + from numpy.random._examples.cffi import extending # noqa: F401 diff --git a/python/numpy/random/tests/test_generator_mt19937.py b/python/numpy/random/tests/test_generator_mt19937.py new file mode 100644 index 000000000..68a1f7c63 --- /dev/null +++ b/python/numpy/random/tests/test_generator_mt19937.py @@ -0,0 +1,2809 @@ +import hashlib +import os.path +import sys + +import pytest + +import numpy as np +from numpy.exceptions import AxisError +from numpy.linalg import LinAlgError +from numpy.random import MT19937, Generator, RandomState, SeedSequence +from numpy.testing import ( + IS_WASM, + assert_, + assert_allclose, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_no_warnings, + assert_raises, + assert_warns, + suppress_warnings, +) + +random = Generator(MT19937()) + +JUMP_TEST_DATA = [ + { + "seed": 0, + "steps": 10, + "initial": {"key_sha256": "bb1636883c2707b51c5b7fc26c6927af4430f2e0785a8c7bc886337f919f9edf", "pos": 9}, # noqa: E501 + "jumped": {"key_sha256": "ff682ac12bb140f2d72fba8d3506cf4e46817a0db27aae1683867629031d8d55", "pos": 598}, # noqa: E501 + }, + { + "seed": 384908324, + "steps": 312, + "initial": {"key_sha256": "16b791a1e04886ccbbb4d448d6ff791267dc458ae599475d08d5cced29d11614", "pos": 311}, # noqa: E501 + "jumped": {"key_sha256": "a0110a2cf23b56be0feaed8f787a7fc84bef0cb5623003d75b26bdfa1c18002c", "pos": 276}, # noqa: E501 + }, + { + "seed": [839438204, 980239840, 859048019, 821], + "steps": 511, + "initial": {"key_sha256": "d306cf01314d51bd37892d874308200951a35265ede54d200f1e065004c3e9ea", "pos": 510}, # noqa: E501 + "jumped": {"key_sha256": "0e00ab449f01a5195a83b4aee0dfbc2ce8d46466a640b92e33977d2e42f777f8", "pos": 475}, # noqa: E501 + }, +] + + +@pytest.fixture(scope='module', params=[True, False]) +def endpoint(request): + return request.param + + +class TestSeed: + def test_scalar(self): + s = Generator(MT19937(0)) + assert_equal(s.integers(1000), 479) + s = Generator(MT19937(4294967295)) + assert_equal(s.integers(1000), 324) + + def test_array(self): + s = Generator(MT19937(range(10))) + assert_equal(s.integers(1000), 465) + s = Generator(MT19937(np.arange(10))) + assert_equal(s.integers(1000), 465) + s = Generator(MT19937([0])) + assert_equal(s.integers(1000), 479) + s = Generator(MT19937([4294967295])) + assert_equal(s.integers(1000), 324) + + def test_seedsequence(self): + s = MT19937(SeedSequence(0)) + assert_equal(s.random_raw(1), 2058676884) + + def test_invalid_scalar(self): + # seed must be an unsigned 32 bit integer + assert_raises(TypeError, MT19937, -0.5) + assert_raises(ValueError, MT19937, -1) + + def test_invalid_array(self): + # seed must be an unsigned integer + assert_raises(TypeError, MT19937, [-0.5]) + assert_raises(ValueError, MT19937, [-1]) + assert_raises(ValueError, MT19937, [1, -2, 4294967296]) + + def test_noninstantized_bitgen(self): + assert_raises(ValueError, Generator, MT19937) + + +class TestBinomial: + def test_n_zero(self): + # Tests the corner case of n == 0 for the binomial distribution. + # binomial(0, p) should be zero for any p in [0, 1]. + # This test addresses issue #3480. + zeros = np.zeros(2, dtype='int') + for p in [0, .5, 1]: + assert_(random.binomial(0, p) == 0) + assert_array_equal(random.binomial(zeros, p), zeros) + + def test_p_is_nan(self): + # Issue #4571. + assert_raises(ValueError, random.binomial, 1, np.nan) + + +class TestMultinomial: + def test_basic(self): + random.multinomial(100, [0.2, 0.8]) + + def test_zero_probability(self): + random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0]) + + def test_int_negative_interval(self): + assert_(-5 <= random.integers(-5, -1) < -1) + x = random.integers(-5, -1, 5) + assert_(np.all(-5 <= x)) + assert_(np.all(x < -1)) + + def test_size(self): + # gh-3173 + p = [0.5, 0.5] + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2)) + assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2)) + assert_equal(random.multinomial(1, p, np.array((2, 2))).shape, + (2, 2, 2)) + + assert_raises(TypeError, random.multinomial, 1, p, + float(1)) + + def test_invalid_prob(self): + assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2]) + assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9]) + + def test_invalid_n(self): + assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2]) + assert_raises(ValueError, random.multinomial, [-1] * 10, [0.8, 0.2]) + + def test_p_non_contiguous(self): + p = np.arange(15.) + p /= np.sum(p[1::3]) + pvals = p[1::3] + random = Generator(MT19937(1432985819)) + non_contig = random.multinomial(100, pvals=pvals) + random = Generator(MT19937(1432985819)) + contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals)) + assert_array_equal(non_contig, contig) + + def test_multinomial_pvals_float32(self): + x = np.array([9.9e-01, 9.9e-01, 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09, + 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09], dtype=np.float32) + pvals = x / x.sum() + random = Generator(MT19937(1432985819)) + match = r"[\w\s]*pvals array is cast to 64-bit floating" + with pytest.raises(ValueError, match=match): + random.multinomial(1, pvals) + + +class TestMultivariateHypergeometric: + + def setup_method(self): + self.seed = 8675309 + + def test_argument_validation(self): + # Error cases... + + # `colors` must be a 1-d sequence + assert_raises(ValueError, random.multivariate_hypergeometric, + 10, 4) + + # Negative nsample + assert_raises(ValueError, random.multivariate_hypergeometric, + [2, 3, 4], -1) + + # Negative color + assert_raises(ValueError, random.multivariate_hypergeometric, + [-1, 2, 3], 2) + + # nsample exceeds sum(colors) + assert_raises(ValueError, random.multivariate_hypergeometric, + [2, 3, 4], 10) + + # nsample exceeds sum(colors) (edge case of empty colors) + assert_raises(ValueError, random.multivariate_hypergeometric, + [], 1) + + # Validation errors associated with very large values in colors. + assert_raises(ValueError, random.multivariate_hypergeometric, + [999999999, 101], 5, 1, 'marginals') + + int64_info = np.iinfo(np.int64) + max_int64 = int64_info.max + max_int64_index = max_int64 // int64_info.dtype.itemsize + assert_raises(ValueError, random.multivariate_hypergeometric, + [max_int64_index - 100, 101], 5, 1, 'count') + + @pytest.mark.parametrize('method', ['count', 'marginals']) + def test_edge_cases(self, method): + # Set the seed, but in fact, all the results in this test are + # deterministic, so we don't really need this. + random = Generator(MT19937(self.seed)) + + x = random.multivariate_hypergeometric([0, 0, 0], 0, method=method) + assert_array_equal(x, [0, 0, 0]) + + x = random.multivariate_hypergeometric([], 0, method=method) + assert_array_equal(x, []) + + x = random.multivariate_hypergeometric([], 0, size=1, method=method) + assert_array_equal(x, np.empty((1, 0), dtype=np.int64)) + + x = random.multivariate_hypergeometric([1, 2, 3], 0, method=method) + assert_array_equal(x, [0, 0, 0]) + + x = random.multivariate_hypergeometric([9, 0, 0], 3, method=method) + assert_array_equal(x, [3, 0, 0]) + + colors = [1, 1, 0, 1, 1] + x = random.multivariate_hypergeometric(colors, sum(colors), + method=method) + assert_array_equal(x, colors) + + x = random.multivariate_hypergeometric([3, 4, 5], 12, size=3, + method=method) + assert_array_equal(x, [[3, 4, 5]] * 3) + + # Cases for nsample: + # nsample < 10 + # 10 <= nsample < colors.sum()/2 + # colors.sum()/2 < nsample < colors.sum() - 10 + # colors.sum() - 10 < nsample < colors.sum() + @pytest.mark.parametrize('nsample', [8, 25, 45, 55]) + @pytest.mark.parametrize('method', ['count', 'marginals']) + @pytest.mark.parametrize('size', [5, (2, 3), 150000]) + def test_typical_cases(self, nsample, method, size): + random = Generator(MT19937(self.seed)) + + colors = np.array([10, 5, 20, 25]) + sample = random.multivariate_hypergeometric(colors, nsample, size, + method=method) + if isinstance(size, int): + expected_shape = (size,) + colors.shape + else: + expected_shape = size + colors.shape + assert_equal(sample.shape, expected_shape) + assert_((sample >= 0).all()) + assert_((sample <= colors).all()) + assert_array_equal(sample.sum(axis=-1), + np.full(size, fill_value=nsample, dtype=int)) + if isinstance(size, int) and size >= 100000: + # This sample is large enough to compare its mean to + # the expected values. + assert_allclose(sample.mean(axis=0), + nsample * colors / colors.sum(), + rtol=1e-3, atol=0.005) + + def test_repeatability1(self): + random = Generator(MT19937(self.seed)) + sample = random.multivariate_hypergeometric([3, 4, 5], 5, size=5, + method='count') + expected = np.array([[2, 1, 2], + [2, 1, 2], + [1, 1, 3], + [2, 0, 3], + [2, 1, 2]]) + assert_array_equal(sample, expected) + + def test_repeatability2(self): + random = Generator(MT19937(self.seed)) + sample = random.multivariate_hypergeometric([20, 30, 50], 50, + size=5, + method='marginals') + expected = np.array([[ 9, 17, 24], + [ 7, 13, 30], + [ 9, 15, 26], + [ 9, 17, 24], + [12, 14, 24]]) + assert_array_equal(sample, expected) + + def test_repeatability3(self): + random = Generator(MT19937(self.seed)) + sample = random.multivariate_hypergeometric([20, 30, 50], 12, + size=5, + method='marginals') + expected = np.array([[2, 3, 7], + [5, 3, 4], + [2, 5, 5], + [5, 3, 4], + [1, 5, 6]]) + assert_array_equal(sample, expected) + + +class TestSetState: + def setup_method(self): + self.seed = 1234567890 + self.rg = Generator(MT19937(self.seed)) + self.bit_generator = self.rg.bit_generator + self.state = self.bit_generator.state + self.legacy_state = (self.state['bit_generator'], + self.state['state']['key'], + self.state['state']['pos']) + + def test_gaussian_reset(self): + # Make sure the cached every-other-Gaussian is reset. + old = self.rg.standard_normal(size=3) + self.bit_generator.state = self.state + new = self.rg.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_gaussian_reset_in_media_res(self): + # When the state is saved with a cached Gaussian, make sure the + # cached Gaussian is restored. + + self.rg.standard_normal() + state = self.bit_generator.state + old = self.rg.standard_normal(size=3) + self.bit_generator.state = state + new = self.rg.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_negative_binomial(self): + # Ensure that the negative binomial results take floating point + # arguments without truncation. + self.rg.negative_binomial(0.5, 0.5) + + +class TestIntegers: + rfunc = random.integers + + # valid integer/boolean types + itype = [bool, np.int8, np.uint8, np.int16, np.uint16, + np.int32, np.uint32, np.int64, np.uint64] + + def test_unsupported_type(self, endpoint): + assert_raises(TypeError, self.rfunc, 1, endpoint=endpoint, dtype=float) + + def test_bounds_checking(self, endpoint): + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, ubnd, lbnd, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, 1, 0, endpoint=endpoint, + dtype=dt) + + assert_raises(ValueError, self.rfunc, [lbnd - 1], ubnd, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, [lbnd], [ubnd + 1], + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, [ubnd], [lbnd], + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, 1, [0], + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, [ubnd + 1], [ubnd], + endpoint=endpoint, dtype=dt) + + def test_bounds_checking_array(self, endpoint): + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + (not endpoint) + + assert_raises(ValueError, self.rfunc, [lbnd - 1] * 2, [ubnd] * 2, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, [lbnd] * 2, + [ubnd + 1] * 2, endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, ubnd, [lbnd] * 2, + endpoint=endpoint, dtype=dt) + assert_raises(ValueError, self.rfunc, [1] * 2, 0, + endpoint=endpoint, dtype=dt) + + def test_rng_zero_and_extremes(self, endpoint): + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + is_open = not endpoint + + tgt = ubnd - 1 + assert_equal(self.rfunc(tgt, tgt + is_open, size=1000, + endpoint=endpoint, dtype=dt), tgt) + assert_equal(self.rfunc([tgt], tgt + is_open, size=1000, + endpoint=endpoint, dtype=dt), tgt) + + tgt = lbnd + assert_equal(self.rfunc(tgt, tgt + is_open, size=1000, + endpoint=endpoint, dtype=dt), tgt) + assert_equal(self.rfunc(tgt, [tgt + is_open], size=1000, + endpoint=endpoint, dtype=dt), tgt) + + tgt = (lbnd + ubnd) // 2 + assert_equal(self.rfunc(tgt, tgt + is_open, size=1000, + endpoint=endpoint, dtype=dt), tgt) + assert_equal(self.rfunc([tgt], [tgt + is_open], + size=1000, endpoint=endpoint, dtype=dt), + tgt) + + def test_rng_zero_and_extremes_array(self, endpoint): + size = 1000 + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + tgt = ubnd - 1 + assert_equal(self.rfunc([tgt], [tgt + 1], + size=size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt) + + tgt = lbnd + assert_equal(self.rfunc([tgt], [tgt + 1], + size=size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt) + + tgt = (lbnd + ubnd) // 2 + assert_equal(self.rfunc([tgt], [tgt + 1], + size=size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, dtype=dt), tgt) + assert_equal(self.rfunc( + [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt) + + def test_full_range(self, endpoint): + # Test for ticket #1690 + + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + try: + self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt) + except Exception as e: + raise AssertionError("No error should have been raised, " + "but one was with the following " + "message:\n\n%s" % str(e)) + + def test_full_range_array(self, endpoint): + # Test for ticket #1690 + + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + try: + self.rfunc([lbnd] * 2, [ubnd], endpoint=endpoint, dtype=dt) + except Exception as e: + raise AssertionError("No error should have been raised, " + "but one was with the following " + "message:\n\n%s" % str(e)) + + def test_in_bounds_fuzz(self, endpoint): + # Don't use fixed seed + random = Generator(MT19937()) + + for dt in self.itype[1:]: + for ubnd in [4, 8, 16]: + vals = self.rfunc(2, ubnd - endpoint, size=2 ** 16, + endpoint=endpoint, dtype=dt) + assert_(vals.max() < ubnd) + assert_(vals.min() >= 2) + + vals = self.rfunc(0, 2 - endpoint, size=2 ** 16, endpoint=endpoint, + dtype=bool) + assert_(vals.max() < 2) + assert_(vals.min() >= 0) + + def test_scalar_array_equiv(self, endpoint): + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + size = 1000 + random = Generator(MT19937(1234)) + scalar = random.integers(lbnd, ubnd, size=size, endpoint=endpoint, + dtype=dt) + + random = Generator(MT19937(1234)) + scalar_array = random.integers([lbnd], [ubnd], size=size, + endpoint=endpoint, dtype=dt) + + random = Generator(MT19937(1234)) + array = random.integers([lbnd] * size, [ubnd] * + size, size=size, endpoint=endpoint, dtype=dt) + assert_array_equal(scalar, scalar_array) + assert_array_equal(scalar, array) + + def test_repeatability(self, endpoint): + # We use a sha256 hash of generated sequences of 1000 samples + # in the range [0, 6) for all but bool, where the range + # is [0, 2). Hashes are for little endian numbers. + tgt = {'bool': '053594a9b82d656f967c54869bc6970aa0358cf94ad469c81478459c6a90eee3', # noqa: E501 + 'int16': '54de9072b6ee9ff7f20b58329556a46a447a8a29d67db51201bf88baa6e4e5d4', # noqa: E501 + 'int32': 'd3a0d5efb04542b25ac712e50d21f39ac30f312a5052e9bbb1ad3baa791ac84b', # noqa: E501 + 'int64': '14e224389ac4580bfbdccb5697d6190b496f91227cf67df60989de3d546389b1', # noqa: E501 + 'int8': '0e203226ff3fbbd1580f15da4621e5f7164d0d8d6b51696dd42d004ece2cbec1', # noqa: E501 + 'uint16': '54de9072b6ee9ff7f20b58329556a46a447a8a29d67db51201bf88baa6e4e5d4', # noqa: E501 + 'uint32': 'd3a0d5efb04542b25ac712e50d21f39ac30f312a5052e9bbb1ad3baa791ac84b', # noqa: E501 + 'uint64': '14e224389ac4580bfbdccb5697d6190b496f91227cf67df60989de3d546389b1', # noqa: E501 + 'uint8': '0e203226ff3fbbd1580f15da4621e5f7164d0d8d6b51696dd42d004ece2cbec1'} # noqa: E501 + + for dt in self.itype[1:]: + random = Generator(MT19937(1234)) + + # view as little endian for hash + if sys.byteorder == 'little': + val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint, + dtype=dt) + else: + val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint, + dtype=dt).byteswap() + + res = hashlib.sha256(val).hexdigest() + assert_(tgt[np.dtype(dt).name] == res) + + # bools do not depend on endianness + random = Generator(MT19937(1234)) + val = random.integers(0, 2 - endpoint, size=1000, endpoint=endpoint, + dtype=bool).view(np.int8) + res = hashlib.sha256(val).hexdigest() + assert_(tgt[np.dtype(bool).name] == res) + + def test_repeatability_broadcasting(self, endpoint): + for dt in self.itype: + lbnd = 0 if dt in (bool, np.bool) else np.iinfo(dt).min + ubnd = 2 if dt in (bool, np.bool) else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + # view as little endian for hash + random = Generator(MT19937(1234)) + val = random.integers(lbnd, ubnd, size=1000, endpoint=endpoint, + dtype=dt) + + random = Generator(MT19937(1234)) + val_bc = random.integers([lbnd] * 1000, ubnd, endpoint=endpoint, + dtype=dt) + + assert_array_equal(val, val_bc) + + random = Generator(MT19937(1234)) + val_bc = random.integers([lbnd] * 1000, [ubnd] * 1000, + endpoint=endpoint, dtype=dt) + + assert_array_equal(val, val_bc) + + @pytest.mark.parametrize( + 'bound, expected', + [(2**32 - 1, np.array([517043486, 1364798665, 1733884389, 1353720612, + 3769704066, 1170797179, 4108474671])), + (2**32, np.array([517043487, 1364798666, 1733884390, 1353720613, + 3769704067, 1170797180, 4108474672])), + (2**32 + 1, np.array([517043487, 1733884390, 3769704068, 4108474673, + 1831631863, 1215661561, 3869512430]))] + ) + def test_repeatability_32bit_boundary(self, bound, expected): + for size in [None, len(expected)]: + random = Generator(MT19937(1234)) + x = random.integers(bound, size=size) + assert_equal(x, expected if size is not None else expected[0]) + + def test_repeatability_32bit_boundary_broadcasting(self): + desired = np.array([[[1622936284, 3620788691, 1659384060], + [1417365545, 760222891, 1909653332], + [3788118662, 660249498, 4092002593]], + [[3625610153, 2979601262, 3844162757], + [ 685800658, 120261497, 2694012896], + [1207779440, 1586594375, 3854335050]], + [[3004074748, 2310761796, 3012642217], + [2067714190, 2786677879, 1363865881], + [ 791663441, 1867303284, 2169727960]], + [[1939603804, 1250951100, 298950036], + [1040128489, 3791912209, 3317053765], + [3155528714, 61360675, 2305155588]], + [[ 817688762, 1335621943, 3288952434], + [1770890872, 1102951817, 1957607470], + [3099996017, 798043451, 48334215]]]) + for size in [None, (5, 3, 3)]: + random = Generator(MT19937(12345)) + x = random.integers([[-1], [0], [1]], + [2**32 - 1, 2**32, 2**32 + 1], + size=size) + assert_array_equal(x, desired if size is not None else desired[0]) + + def test_int64_uint64_broadcast_exceptions(self, endpoint): + configs = {np.uint64: ((0, 2**65), (-1, 2**62), (10, 9), (0, 0)), + np.int64: ((0, 2**64), (-(2**64), 2**62), (10, 9), (0, 0), + (-2**63 - 1, -2**63 - 1))} + for dtype in configs: + for config in configs[dtype]: + low, high = config + high = high - endpoint + low_a = np.array([[low] * 10]) + high_a = np.array([high] * 10) + assert_raises(ValueError, random.integers, low, high, + endpoint=endpoint, dtype=dtype) + assert_raises(ValueError, random.integers, low_a, high, + endpoint=endpoint, dtype=dtype) + assert_raises(ValueError, random.integers, low, high_a, + endpoint=endpoint, dtype=dtype) + assert_raises(ValueError, random.integers, low_a, high_a, + endpoint=endpoint, dtype=dtype) + + low_o = np.array([[low] * 10], dtype=object) + high_o = np.array([high] * 10, dtype=object) + assert_raises(ValueError, random.integers, low_o, high, + endpoint=endpoint, dtype=dtype) + assert_raises(ValueError, random.integers, low, high_o, + endpoint=endpoint, dtype=dtype) + assert_raises(ValueError, random.integers, low_o, high_o, + endpoint=endpoint, dtype=dtype) + + def test_int64_uint64_corner_case(self, endpoint): + # When stored in Numpy arrays, `lbnd` is casted + # as np.int64, and `ubnd` is casted as np.uint64. + # Checking whether `lbnd` >= `ubnd` used to be + # done solely via direct comparison, which is incorrect + # because when Numpy tries to compare both numbers, + # it casts both to np.float64 because there is + # no integer superset of np.int64 and np.uint64. However, + # `ubnd` is too large to be represented in np.float64, + # causing it be round down to np.iinfo(np.int64).max, + # leading to a ValueError because `lbnd` now equals + # the new `ubnd`. + + dt = np.int64 + tgt = np.iinfo(np.int64).max + lbnd = np.int64(np.iinfo(np.int64).max) + ubnd = np.uint64(np.iinfo(np.int64).max + 1 - endpoint) + + # None of these function calls should + # generate a ValueError now. + actual = random.integers(lbnd, ubnd, endpoint=endpoint, dtype=dt) + assert_equal(actual, tgt) + + def test_respect_dtype_singleton(self, endpoint): + # See gh-7203 + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + dt = np.bool if dt is bool else dt + + sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt) + assert_equal(sample.dtype, dt) + + for dt in (bool, int): + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + + # gh-7284: Ensure that we get Python data types + sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt) + assert not hasattr(sample, 'dtype') + assert_equal(type(sample), dt) + + def test_respect_dtype_array(self, endpoint): + # See gh-7203 + for dt in self.itype: + lbnd = 0 if dt is bool else np.iinfo(dt).min + ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 + ubnd = ubnd - 1 if endpoint else ubnd + dt = np.bool if dt is bool else dt + + sample = self.rfunc([lbnd], [ubnd], endpoint=endpoint, dtype=dt) + assert_equal(sample.dtype, dt) + sample = self.rfunc([lbnd] * 2, [ubnd] * 2, endpoint=endpoint, + dtype=dt) + assert_equal(sample.dtype, dt) + + def test_zero_size(self, endpoint): + # See gh-7203 + for dt in self.itype: + sample = self.rfunc(0, 0, (3, 0, 4), endpoint=endpoint, dtype=dt) + assert sample.shape == (3, 0, 4) + assert sample.dtype == dt + assert self.rfunc(0, -10, 0, endpoint=endpoint, + dtype=dt).shape == (0,) + assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, + (3, 0, 4)) + assert_equal(random.integers(0, -10, size=0).shape, (0,)) + assert_equal(random.integers(10, 10, size=0).shape, (0,)) + + def test_error_byteorder(self): + other_byteord_dt = 'i4' + with pytest.raises(ValueError): + random.integers(0, 200, size=10, dtype=other_byteord_dt) + + # chi2max is the maximum acceptable chi-squared value. + @pytest.mark.slow + @pytest.mark.parametrize('sample_size,high,dtype,chi2max', + [(5000000, 5, np.int8, 125.0), # p-value ~4.6e-25 + (5000000, 7, np.uint8, 150.0), # p-value ~7.7e-30 + (10000000, 2500, np.int16, 3300.0), # p-value ~3.0e-25 + (50000000, 5000, np.uint16, 6500.0), # p-value ~3.5e-25 + ]) + def test_integers_small_dtype_chisquared(self, sample_size, high, + dtype, chi2max): + # Regression test for gh-14774. + samples = random.integers(high, size=sample_size, dtype=dtype) + + values, counts = np.unique(samples, return_counts=True) + expected = sample_size / high + chi2 = ((counts - expected)**2 / expected).sum() + assert chi2 < chi2max + + +class TestRandomDist: + # Make sure the random distribution returns the correct value for a + # given seed + + def setup_method(self): + self.seed = 1234567890 + + def test_integers(self): + random = Generator(MT19937(self.seed)) + actual = random.integers(-99, 99, size=(3, 2)) + desired = np.array([[-80, -56], [41, 37], [-83, -16]]) + assert_array_equal(actual, desired) + + def test_integers_masked(self): + # Test masked rejection sampling algorithm to generate array of + # uint32 in an interval. + random = Generator(MT19937(self.seed)) + actual = random.integers(0, 99, size=(3, 2), dtype=np.uint32) + desired = np.array([[9, 21], [70, 68], [8, 41]], dtype=np.uint32) + assert_array_equal(actual, desired) + + def test_integers_closed(self): + random = Generator(MT19937(self.seed)) + actual = random.integers(-99, 99, size=(3, 2), endpoint=True) + desired = np.array([[-80, -56], [41, 38], [-83, -15]]) + assert_array_equal(actual, desired) + + def test_integers_max_int(self): + # Tests whether integers with closed=True can generate the + # maximum allowed Python int that can be converted + # into a C long. Previous implementations of this + # method have thrown an OverflowError when attempting + # to generate this integer. + actual = random.integers(np.iinfo('l').max, np.iinfo('l').max, + endpoint=True) + + desired = np.iinfo('l').max + assert_equal(actual, desired) + + def test_random(self): + random = Generator(MT19937(self.seed)) + actual = random.random((3, 2)) + desired = np.array([[0.096999199829214, 0.707517457682192], + [0.084364834598269, 0.767731206553125], + [0.665069021359413, 0.715487190596693]]) + assert_array_almost_equal(actual, desired, decimal=15) + + random = Generator(MT19937(self.seed)) + actual = random.random() + assert_array_almost_equal(actual, desired[0, 0], decimal=15) + + def test_random_float(self): + random = Generator(MT19937(self.seed)) + actual = random.random((3, 2)) + desired = np.array([[0.0969992 , 0.70751746], # noqa: E203 + [0.08436483, 0.76773121], + [0.66506902, 0.71548719]]) + assert_array_almost_equal(actual, desired, decimal=7) + + def test_random_float_scalar(self): + random = Generator(MT19937(self.seed)) + actual = random.random(dtype=np.float32) + desired = 0.0969992 + assert_array_almost_equal(actual, desired, decimal=7) + + @pytest.mark.parametrize('dtype, uint_view_type', + [(np.float32, np.uint32), + (np.float64, np.uint64)]) + def test_random_distribution_of_lsb(self, dtype, uint_view_type): + random = Generator(MT19937(self.seed)) + sample = random.random(100000, dtype=dtype) + num_ones_in_lsb = np.count_nonzero(sample.view(uint_view_type) & 1) + # The probability of a 1 in the least significant bit is 0.25. + # With a sample size of 100000, the probability that num_ones_in_lsb + # is outside the following range is less than 5e-11. + assert 24100 < num_ones_in_lsb < 25900 + + def test_random_unsupported_type(self): + assert_raises(TypeError, random.random, dtype='int32') + + def test_choice_uniform_replace(self): + random = Generator(MT19937(self.seed)) + actual = random.choice(4, 4) + desired = np.array([0, 0, 2, 2], dtype=np.int64) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_replace(self): + random = Generator(MT19937(self.seed)) + actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) + desired = np.array([0, 1, 0, 1], dtype=np.int64) + assert_array_equal(actual, desired) + + def test_choice_uniform_noreplace(self): + random = Generator(MT19937(self.seed)) + actual = random.choice(4, 3, replace=False) + desired = np.array([2, 0, 3], dtype=np.int64) + assert_array_equal(actual, desired) + actual = random.choice(4, 4, replace=False, shuffle=False) + desired = np.arange(4, dtype=np.int64) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_noreplace(self): + random = Generator(MT19937(self.seed)) + actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) + desired = np.array([0, 2, 3], dtype=np.int64) + assert_array_equal(actual, desired) + + def test_choice_noninteger(self): + random = Generator(MT19937(self.seed)) + actual = random.choice(['a', 'b', 'c', 'd'], 4) + desired = np.array(['a', 'a', 'c', 'c']) + assert_array_equal(actual, desired) + + def test_choice_multidimensional_default_axis(self): + random = Generator(MT19937(self.seed)) + actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 3) + desired = np.array([[0, 1], [0, 1], [4, 5]]) + assert_array_equal(actual, desired) + + def test_choice_multidimensional_custom_axis(self): + random = Generator(MT19937(self.seed)) + actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 1, axis=1) + desired = np.array([[0], [2], [4], [6]]) + assert_array_equal(actual, desired) + + def test_choice_exceptions(self): + sample = random.choice + assert_raises(ValueError, sample, -1, 3) + assert_raises(ValueError, sample, 3., 3) + assert_raises(ValueError, sample, [], 3) + assert_raises(ValueError, sample, [1, 2, 3, 4], 3, + p=[[0.25, 0.25], [0.25, 0.25]]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2]) + assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4]) + assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False) + # gh-13087 + assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], 2, + replace=False, p=[1, 0, 0]) + + def test_choice_return_shape(self): + p = [0.1, 0.9] + # Check scalar + assert_(np.isscalar(random.choice(2, replace=True))) + assert_(np.isscalar(random.choice(2, replace=False))) + assert_(np.isscalar(random.choice(2, replace=True, p=p))) + assert_(np.isscalar(random.choice(2, replace=False, p=p))) + assert_(np.isscalar(random.choice([1, 2], replace=True))) + assert_(random.choice([None], replace=True) is None) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(random.choice(arr, replace=True) is a) + + # Check 0-d array + s = () + assert_(not np.isscalar(random.choice(2, s, replace=True))) + assert_(not np.isscalar(random.choice(2, s, replace=False))) + assert_(not np.isscalar(random.choice(2, s, replace=True, p=p))) + assert_(not np.isscalar(random.choice(2, s, replace=False, p=p))) + assert_(not np.isscalar(random.choice([1, 2], s, replace=True))) + assert_(random.choice([None], s, replace=True).ndim == 0) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(random.choice(arr, s, replace=True).item() is a) + + # Check multi dimensional array + s = (2, 3) + p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2] + assert_equal(random.choice(6, s, replace=True).shape, s) + assert_equal(random.choice(6, s, replace=False).shape, s) + assert_equal(random.choice(6, s, replace=True, p=p).shape, s) + assert_equal(random.choice(6, s, replace=False, p=p).shape, s) + assert_equal(random.choice(np.arange(6), s, replace=True).shape, s) + + # Check zero-size + assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, (3, 0, 4)) + assert_equal(random.integers(0, -10, size=0).shape, (0,)) + assert_equal(random.integers(10, 10, size=0).shape, (0,)) + assert_equal(random.choice(0, size=0).shape, (0,)) + assert_equal(random.choice([], size=(0,)).shape, (0,)) + assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape, + (3, 0, 4)) + assert_raises(ValueError, random.choice, [], 10) + + def test_choice_nan_probabilities(self): + a = np.array([42, 1, 2]) + p = [None, None, None] + assert_raises(ValueError, random.choice, a, p=p) + + def test_choice_p_non_contiguous(self): + p = np.ones(10) / 5 + p[1::2] = 3.0 + random = Generator(MT19937(self.seed)) + non_contig = random.choice(5, 3, p=p[::2]) + random = Generator(MT19937(self.seed)) + contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2])) + assert_array_equal(non_contig, contig) + + def test_choice_return_type(self): + # gh 9867 + p = np.ones(4) / 4. + actual = random.choice(4, 2) + assert actual.dtype == np.int64 + actual = random.choice(4, 2, replace=False) + assert actual.dtype == np.int64 + actual = random.choice(4, 2, p=p) + assert actual.dtype == np.int64 + actual = random.choice(4, 2, p=p, replace=False) + assert actual.dtype == np.int64 + + def test_choice_large_sample(self): + choice_hash = '4266599d12bfcfb815213303432341c06b4349f5455890446578877bb322e222' + random = Generator(MT19937(self.seed)) + actual = random.choice(10000, 5000, replace=False) + if sys.byteorder != 'little': + actual = actual.byteswap() + res = hashlib.sha256(actual.view(np.int8)).hexdigest() + assert_(choice_hash == res) + + def test_choice_array_size_empty_tuple(self): + random = Generator(MT19937(self.seed)) + assert_array_equal(random.choice([1, 2, 3], size=()), np.array(1), + strict=True) + assert_array_equal(random.choice([[1, 2, 3]], size=()), [1, 2, 3]) + assert_array_equal(random.choice([[1]], size=()), [1], strict=True) + assert_array_equal(random.choice([[1]], size=(), axis=1), [1], + strict=True) + + def test_bytes(self): + random = Generator(MT19937(self.seed)) + actual = random.bytes(10) + desired = b'\x86\xf0\xd4\x18\xe1\x81\t8%\xdd' + assert_equal(actual, desired) + + def test_shuffle(self): + # Test lists, arrays (of various dtypes), and multidimensional versions + # of both, c-contiguous or not: + for conv in [lambda x: np.array([]), + lambda x: x, + lambda x: np.asarray(x).astype(np.int8), + lambda x: np.asarray(x).astype(np.float32), + lambda x: np.asarray(x).astype(np.complex64), + lambda x: np.asarray(x).astype(object), + lambda x: [(i, i) for i in x], + lambda x: np.asarray([[i, i] for i in x]), + lambda x: np.vstack([x, x]).T, + # gh-11442 + lambda x: (np.asarray([(i, i) for i in x], + [("a", int), ("b", int)]) + .view(np.recarray)), + # gh-4270 + lambda x: np.asarray([(i, i) for i in x], + [("a", object, (1,)), + ("b", np.int32, (1,))])]: + random = Generator(MT19937(self.seed)) + alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) + random.shuffle(alist) + actual = alist + desired = conv([4, 1, 9, 8, 0, 5, 3, 6, 2, 7]) + assert_array_equal(actual, desired) + + def test_shuffle_custom_axis(self): + random = Generator(MT19937(self.seed)) + actual = np.arange(16).reshape((4, 4)) + random.shuffle(actual, axis=1) + desired = np.array([[ 0, 3, 1, 2], + [ 4, 7, 5, 6], + [ 8, 11, 9, 10], + [12, 15, 13, 14]]) + assert_array_equal(actual, desired) + random = Generator(MT19937(self.seed)) + actual = np.arange(16).reshape((4, 4)) + random.shuffle(actual, axis=-1) + assert_array_equal(actual, desired) + + def test_shuffle_custom_axis_empty(self): + random = Generator(MT19937(self.seed)) + desired = np.array([]).reshape((0, 6)) + for axis in (0, 1): + actual = np.array([]).reshape((0, 6)) + random.shuffle(actual, axis=axis) + assert_array_equal(actual, desired) + + def test_shuffle_axis_nonsquare(self): + y1 = np.arange(20).reshape(2, 10) + y2 = y1.copy() + random = Generator(MT19937(self.seed)) + random.shuffle(y1, axis=1) + random = Generator(MT19937(self.seed)) + random.shuffle(y2.T) + assert_array_equal(y1, y2) + + def test_shuffle_masked(self): + # gh-3263 + a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1) + b = np.ma.masked_values(np.arange(20) % 3 - 1, -1) + a_orig = a.copy() + b_orig = b.copy() + for i in range(50): + random.shuffle(a) + assert_equal( + sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask])) + random.shuffle(b) + assert_equal( + sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask])) + + def test_shuffle_exceptions(self): + random = Generator(MT19937(self.seed)) + arr = np.arange(10) + assert_raises(AxisError, random.shuffle, arr, 1) + arr = np.arange(9).reshape((3, 3)) + assert_raises(AxisError, random.shuffle, arr, 3) + assert_raises(TypeError, random.shuffle, arr, slice(1, 2, None)) + arr = [[1, 2, 3], [4, 5, 6]] + assert_raises(NotImplementedError, random.shuffle, arr, 1) + + arr = np.array(3) + assert_raises(TypeError, random.shuffle, arr) + arr = np.ones((3, 2)) + assert_raises(AxisError, random.shuffle, arr, 2) + + def test_shuffle_not_writeable(self): + random = Generator(MT19937(self.seed)) + a = np.zeros(5) + a.flags.writeable = False + with pytest.raises(ValueError, match='read-only'): + random.shuffle(a) + + def test_permutation(self): + random = Generator(MT19937(self.seed)) + alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] + actual = random.permutation(alist) + desired = [4, 1, 9, 8, 0, 5, 3, 6, 2, 7] + assert_array_equal(actual, desired) + + random = Generator(MT19937(self.seed)) + arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T + actual = random.permutation(arr_2d) + assert_array_equal(actual, np.atleast_2d(desired).T) + + bad_x_str = "abcd" + assert_raises(AxisError, random.permutation, bad_x_str) + + bad_x_float = 1.2 + assert_raises(AxisError, random.permutation, bad_x_float) + + random = Generator(MT19937(self.seed)) + integer_val = 10 + desired = [3, 0, 8, 7, 9, 4, 2, 5, 1, 6] + + actual = random.permutation(integer_val) + assert_array_equal(actual, desired) + + def test_permutation_custom_axis(self): + a = np.arange(16).reshape((4, 4)) + desired = np.array([[ 0, 3, 1, 2], + [ 4, 7, 5, 6], + [ 8, 11, 9, 10], + [12, 15, 13, 14]]) + random = Generator(MT19937(self.seed)) + actual = random.permutation(a, axis=1) + assert_array_equal(actual, desired) + random = Generator(MT19937(self.seed)) + actual = random.permutation(a, axis=-1) + assert_array_equal(actual, desired) + + def test_permutation_exceptions(self): + random = Generator(MT19937(self.seed)) + arr = np.arange(10) + assert_raises(AxisError, random.permutation, arr, 1) + arr = np.arange(9).reshape((3, 3)) + assert_raises(AxisError, random.permutation, arr, 3) + assert_raises(TypeError, random.permutation, arr, slice(1, 2, None)) + + @pytest.mark.parametrize("dtype", [int, object]) + @pytest.mark.parametrize("axis, expected", + [(None, np.array([[3, 7, 0, 9, 10, 11], + [8, 4, 2, 5, 1, 6]])), + (0, np.array([[6, 1, 2, 9, 10, 11], + [0, 7, 8, 3, 4, 5]])), + (1, np.array([[ 5, 3, 4, 0, 2, 1], + [11, 9, 10, 6, 8, 7]]))]) + def test_permuted(self, dtype, axis, expected): + random = Generator(MT19937(self.seed)) + x = np.arange(12).reshape(2, 6).astype(dtype) + random.permuted(x, axis=axis, out=x) + assert_array_equal(x, expected) + + random = Generator(MT19937(self.seed)) + x = np.arange(12).reshape(2, 6).astype(dtype) + y = random.permuted(x, axis=axis) + assert y.dtype == dtype + assert_array_equal(y, expected) + + def test_permuted_with_strides(self): + random = Generator(MT19937(self.seed)) + x0 = np.arange(22).reshape(2, 11) + x1 = x0.copy() + x = x0[:, ::3] + y = random.permuted(x, axis=1, out=x) + expected = np.array([[0, 9, 3, 6], + [14, 20, 11, 17]]) + assert_array_equal(y, expected) + x1[:, ::3] = expected + # Verify that the original x0 was modified in-place as expected. + assert_array_equal(x1, x0) + + def test_permuted_empty(self): + y = random.permuted([]) + assert_array_equal(y, []) + + @pytest.mark.parametrize('outshape', [(2, 3), 5]) + def test_permuted_out_with_wrong_shape(self, outshape): + a = np.array([1, 2, 3]) + out = np.zeros(outshape, dtype=a.dtype) + with pytest.raises(ValueError, match='same shape'): + random.permuted(a, out=out) + + def test_permuted_out_with_wrong_type(self): + out = np.zeros((3, 5), dtype=np.int32) + x = np.ones((3, 5)) + with pytest.raises(TypeError, match='Cannot cast'): + random.permuted(x, axis=1, out=out) + + def test_permuted_not_writeable(self): + x = np.zeros((2, 5)) + x.flags.writeable = False + with pytest.raises(ValueError, match='read-only'): + random.permuted(x, axis=1, out=x) + + def test_beta(self): + random = Generator(MT19937(self.seed)) + actual = random.beta(.1, .9, size=(3, 2)) + desired = np.array( + [[1.083029353267698e-10, 2.449965303168024e-11], + [2.397085162969853e-02, 3.590779671820755e-08], + [2.830254190078299e-04, 1.744709918330393e-01]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_binomial(self): + random = Generator(MT19937(self.seed)) + actual = random.binomial(100.123, .456, size=(3, 2)) + desired = np.array([[42, 41], + [42, 48], + [44, 50]]) + assert_array_equal(actual, desired) + + random = Generator(MT19937(self.seed)) + actual = random.binomial(100.123, .456) + desired = 42 + assert_array_equal(actual, desired) + + def test_chisquare(self): + random = Generator(MT19937(self.seed)) + actual = random.chisquare(50, size=(3, 2)) + desired = np.array([[32.9850547060149, 39.0219480493301], + [56.2006134779419, 57.3474165711485], + [55.4243733880198, 55.4209797925213]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_dirichlet(self): + random = Generator(MT19937(self.seed)) + alpha = np.array([51.72840233779265162, 39.74494232180943953]) + actual = random.dirichlet(alpha, size=(3, 2)) + desired = np.array([[[0.5439892869558927, 0.45601071304410745], + [0.5588917345860708, 0.4411082654139292 ]], # noqa: E202 + [[0.5632074165063435, 0.43679258349365657], + [0.54862581112627, 0.45137418887373015]], + [[0.49961831357047226, 0.5003816864295278 ], # noqa: E202 + [0.52374806183482, 0.47625193816517997]]]) + assert_array_almost_equal(actual, desired, decimal=15) + bad_alpha = np.array([5.4e-01, -1.0e-16]) + assert_raises(ValueError, random.dirichlet, bad_alpha) + + random = Generator(MT19937(self.seed)) + alpha = np.array([51.72840233779265162, 39.74494232180943953]) + actual = random.dirichlet(alpha) + assert_array_almost_equal(actual, desired[0, 0], decimal=15) + + def test_dirichlet_size(self): + # gh-3173 + p = np.array([51.72840233779265162, 39.74494232180943953]) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2)) + assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2)) + assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2)) + + assert_raises(TypeError, random.dirichlet, p, float(1)) + + def test_dirichlet_bad_alpha(self): + # gh-2089 + alpha = np.array([5.4e-01, -1.0e-16]) + assert_raises(ValueError, random.dirichlet, alpha) + + # gh-15876 + assert_raises(ValueError, random.dirichlet, [[5, 1]]) + assert_raises(ValueError, random.dirichlet, [[5], [1]]) + assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]]) + assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]])) + + def test_dirichlet_alpha_non_contiguous(self): + a = np.array([51.72840233779265162, -1.0, 39.74494232180943953]) + alpha = a[::2] + random = Generator(MT19937(self.seed)) + non_contig = random.dirichlet(alpha, size=(3, 2)) + random = Generator(MT19937(self.seed)) + contig = random.dirichlet(np.ascontiguousarray(alpha), + size=(3, 2)) + assert_array_almost_equal(non_contig, contig) + + def test_dirichlet_small_alpha(self): + eps = 1.0e-9 # 1.0e-10 -> runtime x 10; 1e-11 -> runtime x 200, etc. + alpha = eps * np.array([1., 1.0e-3]) + random = Generator(MT19937(self.seed)) + actual = random.dirichlet(alpha, size=(3, 2)) + expected = np.array([ + [[1., 0.], + [1., 0.]], + [[1., 0.], + [1., 0.]], + [[1., 0.], + [1., 0.]] + ]) + assert_array_almost_equal(actual, expected, decimal=15) + + @pytest.mark.slow + def test_dirichlet_moderately_small_alpha(self): + # Use alpha.max() < 0.1 to trigger stick breaking code path + alpha = np.array([0.02, 0.04, 0.03]) + exact_mean = alpha / alpha.sum() + random = Generator(MT19937(self.seed)) + sample = random.dirichlet(alpha, size=20000000) + sample_mean = sample.mean(axis=0) + assert_allclose(sample_mean, exact_mean, rtol=1e-3) + + # This set of parameters includes inputs with alpha.max() >= 0.1 and + # alpha.max() < 0.1 to exercise both generation methods within the + # dirichlet code. + @pytest.mark.parametrize( + 'alpha', + [[5, 9, 0, 8], + [0.5, 0, 0, 0], + [1, 5, 0, 0, 1.5, 0, 0, 0], + [0.01, 0.03, 0, 0.005], + [1e-5, 0, 0, 0], + [0.002, 0.015, 0, 0, 0.04, 0, 0, 0], + [0.0], + [0, 0, 0]], + ) + def test_dirichlet_multiple_zeros_in_alpha(self, alpha): + alpha = np.array(alpha) + y = random.dirichlet(alpha) + assert_equal(y[alpha == 0], 0.0) + + def test_exponential(self): + random = Generator(MT19937(self.seed)) + actual = random.exponential(1.1234, size=(3, 2)) + desired = np.array([[0.098845481066258, 1.560752510746964], + [0.075730916041636, 1.769098974710777], + [1.488602544592235, 2.49684815275751 ]]) # noqa: E202 + assert_array_almost_equal(actual, desired, decimal=15) + + def test_exponential_0(self): + assert_equal(random.exponential(scale=0), 0) + assert_raises(ValueError, random.exponential, scale=-0.) + + def test_f(self): + random = Generator(MT19937(self.seed)) + actual = random.f(12, 77, size=(3, 2)) + desired = np.array([[0.461720027077085, 1.100441958872451], + [1.100337455217484, 0.91421736740018 ], # noqa: E202 + [0.500811891303113, 0.826802454552058]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gamma(self): + random = Generator(MT19937(self.seed)) + actual = random.gamma(5, 3, size=(3, 2)) + desired = np.array([[ 5.03850858902096, 7.9228656732049 ], # noqa: E202 + [18.73983605132985, 19.57961681699238], + [18.17897755150825, 18.17653912505234]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_gamma_0(self): + assert_equal(random.gamma(shape=0, scale=0), 0) + assert_raises(ValueError, random.gamma, shape=-0., scale=-0.) + + def test_geometric(self): + random = Generator(MT19937(self.seed)) + actual = random.geometric(.123456789, size=(3, 2)) + desired = np.array([[1, 11], + [1, 12], + [11, 17]]) + assert_array_equal(actual, desired) + + def test_geometric_exceptions(self): + assert_raises(ValueError, random.geometric, 1.1) + assert_raises(ValueError, random.geometric, [1.1] * 10) + assert_raises(ValueError, random.geometric, -0.1) + assert_raises(ValueError, random.geometric, [-0.1] * 10) + with np.errstate(invalid='ignore'): + assert_raises(ValueError, random.geometric, np.nan) + assert_raises(ValueError, random.geometric, [np.nan] * 10) + + def test_gumbel(self): + random = Generator(MT19937(self.seed)) + actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[ 4.688397515056245, -0.289514845417841], + [ 4.981176042584683, -0.633224272589149], + [-0.055915275687488, -0.333962478257953]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gumbel_0(self): + assert_equal(random.gumbel(scale=0), 0) + assert_raises(ValueError, random.gumbel, scale=-0.) + + def test_hypergeometric(self): + random = Generator(MT19937(self.seed)) + actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2)) + desired = np.array([[ 9, 9], + [ 9, 9], + [10, 9]]) + assert_array_equal(actual, desired) + + # Test nbad = 0 + actual = random.hypergeometric(5, 0, 3, size=4) + desired = np.array([3, 3, 3, 3]) + assert_array_equal(actual, desired) + + actual = random.hypergeometric(15, 0, 12, size=4) + desired = np.array([12, 12, 12, 12]) + assert_array_equal(actual, desired) + + # Test ngood = 0 + actual = random.hypergeometric(0, 5, 3, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + actual = random.hypergeometric(0, 15, 12, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + def test_laplace(self): + random = Generator(MT19937(self.seed)) + actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[-3.156353949272393, 1.195863024830054], + [-3.435458081645966, 1.656882398925444], + [ 0.924824032467446, 1.251116432209336]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_laplace_0(self): + assert_equal(random.laplace(scale=0), 0) + assert_raises(ValueError, random.laplace, scale=-0.) + + def test_logistic(self): + random = Generator(MT19937(self.seed)) + actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[-4.338584631510999, 1.890171436749954], + [-4.64547787337966 , 2.514545562919217], # noqa: E203 + [ 1.495389489198666, 1.967827627577474]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_lognormal(self): + random = Generator(MT19937(self.seed)) + actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) + desired = np.array([[ 0.0268252166335, 13.9534486483053], + [ 0.1204014788936, 2.2422077497792], + [ 4.2484199496128, 12.0093343977523]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_lognormal_0(self): + assert_equal(random.lognormal(sigma=0), 1) + assert_raises(ValueError, random.lognormal, sigma=-0.) + + def test_logseries(self): + random = Generator(MT19937(self.seed)) + actual = random.logseries(p=.923456789, size=(3, 2)) + desired = np.array([[14, 17], + [3, 18], + [5, 1]]) + assert_array_equal(actual, desired) + + def test_logseries_zero(self): + random = Generator(MT19937(self.seed)) + assert random.logseries(0) == 1 + + @pytest.mark.parametrize("value", [np.nextafter(0., -1), 1., np.nan, 5.]) + def test_logseries_exceptions(self, value): + random = Generator(MT19937(self.seed)) + with np.errstate(invalid="ignore"): + with pytest.raises(ValueError): + random.logseries(value) + with pytest.raises(ValueError): + # contiguous path: + random.logseries(np.array([value] * 10)) + with pytest.raises(ValueError): + # non-contiguous path: + random.logseries(np.array([value] * 10)[::2]) + + def test_multinomial(self): + random = Generator(MT19937(self.seed)) + actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2)) + desired = np.array([[[1, 5, 1, 6, 4, 3], + [4, 2, 6, 2, 4, 2]], + [[5, 3, 2, 6, 3, 1], + [4, 4, 0, 2, 3, 7]], + [[6, 3, 1, 5, 3, 2], + [5, 5, 3, 1, 2, 4]]]) + assert_array_equal(actual, desired) + + @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm") + @pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"]) + def test_multivariate_normal(self, method): + random = Generator(MT19937(self.seed)) + mean = (.123456789, 10) + cov = [[1, 0], [0, 1]] + size = (3, 2) + actual = random.multivariate_normal(mean, cov, size, method=method) + desired = np.array([[[-1.747478062846581, 11.25613495182354 ], # noqa: E202 + [-0.9967333370066214, 10.342002097029821]], + [[ 0.7850019631242964, 11.181113712443013], + [ 0.8901349653255224, 8.873825399642492]], + [[ 0.7130260107430003, 9.551628690083056], + [ 0.7127098726541128, 11.991709234143173]]]) + + assert_array_almost_equal(actual, desired, decimal=15) + + # Check for default size, was raising deprecation warning + actual = random.multivariate_normal(mean, cov, method=method) + desired = np.array([0.233278563284287, 9.424140804347195]) + assert_array_almost_equal(actual, desired, decimal=15) + # Check that non symmetric covariance input raises exception when + # check_valid='raises' if using default svd method. + mean = [0, 0] + cov = [[1, 2], [1, 2]] + assert_raises(ValueError, random.multivariate_normal, mean, cov, + check_valid='raise') + + # Check that non positive-semidefinite covariance warns with + # RuntimeWarning + cov = [[1, 2], [2, 1]] + assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov) + assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov, + method='eigh') + assert_raises(LinAlgError, random.multivariate_normal, mean, cov, + method='cholesky') + + # and that it doesn't warn with RuntimeWarning check_valid='ignore' + assert_no_warnings(random.multivariate_normal, mean, cov, + check_valid='ignore') + + # and that it raises with RuntimeWarning check_valid='raises' + assert_raises(ValueError, random.multivariate_normal, mean, cov, + check_valid='raise') + assert_raises(ValueError, random.multivariate_normal, mean, cov, + check_valid='raise', method='eigh') + + # check degenerate samples from singular covariance matrix + cov = [[1, 1], [1, 1]] + if method in ('svd', 'eigh'): + samples = random.multivariate_normal(mean, cov, size=(3, 2), + method=method) + assert_array_almost_equal(samples[..., 0], samples[..., 1], + decimal=6) + else: + assert_raises(LinAlgError, random.multivariate_normal, mean, cov, + method='cholesky') + + cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) + with suppress_warnings() as sup: + random.multivariate_normal(mean, cov, method=method) + w = sup.record(RuntimeWarning) + assert len(w) == 0 + + mu = np.zeros(2) + cov = np.eye(2) + assert_raises(ValueError, random.multivariate_normal, mean, cov, + check_valid='other') + assert_raises(ValueError, random.multivariate_normal, + np.zeros((2, 1, 1)), cov) + assert_raises(ValueError, random.multivariate_normal, + mu, np.empty((3, 2))) + assert_raises(ValueError, random.multivariate_normal, + mu, np.eye(3)) + + @pytest.mark.parametrize('mean, cov', [([0], [[1 + 1j]]), ([0j], [[1]])]) + def test_multivariate_normal_disallow_complex(self, mean, cov): + random = Generator(MT19937(self.seed)) + with pytest.raises(TypeError, match="must not be complex"): + random.multivariate_normal(mean, cov) + + @pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"]) + def test_multivariate_normal_basic_stats(self, method): + random = Generator(MT19937(self.seed)) + n_s = 1000 + mean = np.array([1, 2]) + cov = np.array([[2, 1], [1, 2]]) + s = random.multivariate_normal(mean, cov, size=(n_s,), method=method) + s_center = s - mean + cov_emp = (s_center.T @ s_center) / (n_s - 1) + # these are pretty loose and are only designed to detect major errors + assert np.all(np.abs(s_center.mean(-2)) < 0.1) + assert np.all(np.abs(cov_emp - cov) < 0.2) + + def test_negative_binomial(self): + random = Generator(MT19937(self.seed)) + actual = random.negative_binomial(n=100, p=.12345, size=(3, 2)) + desired = np.array([[543, 727], + [775, 760], + [600, 674]]) + assert_array_equal(actual, desired) + + def test_negative_binomial_exceptions(self): + with np.errstate(invalid='ignore'): + assert_raises(ValueError, random.negative_binomial, 100, np.nan) + assert_raises(ValueError, random.negative_binomial, 100, + [np.nan] * 10) + + def test_negative_binomial_p0_exception(self): + # Verify that p=0 raises an exception. + with assert_raises(ValueError): + x = random.negative_binomial(1, 0) + + def test_negative_binomial_invalid_p_n_combination(self): + # Verify that values of p and n that would result in an overflow + # or infinite loop raise an exception. + with np.errstate(invalid='ignore'): + assert_raises(ValueError, random.negative_binomial, 2**62, 0.1) + assert_raises(ValueError, random.negative_binomial, [2**62], [0.1]) + + def test_noncentral_chisquare(self): + random = Generator(MT19937(self.seed)) + actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) + desired = np.array([[ 1.70561552362133, 15.97378184942111], + [13.71483425173724, 20.17859633310629], + [11.3615477156643 , 3.67891108738029]]) # noqa: E203 + assert_array_almost_equal(actual, desired, decimal=14) + + actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) + desired = np.array([[9.41427665607629e-04, 1.70473157518850e-04], + [1.14554372041263e+00, 1.38187755933435e-03], + [1.90659181905387e+00, 1.21772577941822e+00]]) + assert_array_almost_equal(actual, desired, decimal=14) + + random = Generator(MT19937(self.seed)) + actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) + desired = np.array([[0.82947954590419, 1.80139670767078], + [6.58720057417794, 7.00491463609814], + [6.31101879073157, 6.30982307753005]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_noncentral_f(self): + random = Generator(MT19937(self.seed)) + actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1, + size=(3, 2)) + desired = np.array([[0.060310671139 , 0.23866058175939], # noqa: E203 + [0.86860246709073, 0.2668510459738 ], # noqa: E202 + [0.23375780078364, 1.88922102885943]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_noncentral_f_nan(self): + random = Generator(MT19937(self.seed)) + actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan) + assert np.isnan(actual) + + def test_normal(self): + random = Generator(MT19937(self.seed)) + actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[-3.618412914693162, 2.635726692647081], + [-2.116923463013243, 0.807460983059643], + [ 1.446547137248593, 2.485684213886024]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_normal_0(self): + assert_equal(random.normal(scale=0), 0) + assert_raises(ValueError, random.normal, scale=-0.) + + def test_pareto(self): + random = Generator(MT19937(self.seed)) + actual = random.pareto(a=.123456789, size=(3, 2)) + desired = np.array([[1.0394926776069018e+00, 7.7142534343505773e+04], + [7.2640150889064703e-01, 3.4650454783825594e+05], + [4.5852344481994740e+04, 6.5851383009539105e+07]]) + # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this + # matrix differs by 24 nulps. Discussion: + # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html + # Consensus is that this is probably some gcc quirk that affects + # rounding but not in any important way, so we just use a looser + # tolerance on this test: + np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) + + def test_poisson(self): + random = Generator(MT19937(self.seed)) + actual = random.poisson(lam=.123456789, size=(3, 2)) + desired = np.array([[0, 0], + [0, 0], + [0, 0]]) + assert_array_equal(actual, desired) + + def test_poisson_exceptions(self): + lambig = np.iinfo('int64').max + lamneg = -1 + assert_raises(ValueError, random.poisson, lamneg) + assert_raises(ValueError, random.poisson, [lamneg] * 10) + assert_raises(ValueError, random.poisson, lambig) + assert_raises(ValueError, random.poisson, [lambig] * 10) + with np.errstate(invalid='ignore'): + assert_raises(ValueError, random.poisson, np.nan) + assert_raises(ValueError, random.poisson, [np.nan] * 10) + + def test_power(self): + random = Generator(MT19937(self.seed)) + actual = random.power(a=.123456789, size=(3, 2)) + desired = np.array([[1.977857368842754e-09, 9.806792196620341e-02], + [2.482442984543471e-10, 1.527108843266079e-01], + [8.188283434244285e-02, 3.950547209346948e-01]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_rayleigh(self): + random = Generator(MT19937(self.seed)) + actual = random.rayleigh(scale=10, size=(3, 2)) + desired = np.array([[4.19494429102666, 16.66920198906598], + [3.67184544902662, 17.74695521962917], + [16.27935397855501, 21.08355560691792]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_rayleigh_0(self): + assert_equal(random.rayleigh(scale=0), 0) + assert_raises(ValueError, random.rayleigh, scale=-0.) + + def test_standard_cauchy(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_cauchy(size=(3, 2)) + desired = np.array([[-1.489437778266206, -3.275389641569784], + [ 0.560102864910406, -0.680780916282552], + [-1.314912905226277, 0.295852965660225]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_exponential(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_exponential(size=(3, 2), method='inv') + desired = np.array([[0.102031839440643, 1.229350298474972], + [0.088137284693098, 1.459859985522667], + [1.093830802293668, 1.256977002164613]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_expoential_type_error(self): + assert_raises(TypeError, random.standard_exponential, dtype=np.int32) + + def test_standard_gamma(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_gamma(shape=3, size=(3, 2)) + desired = np.array([[0.62970724056362, 1.22379851271008], + [3.899412530884 , 4.12479964250139], # noqa: E203 + [3.74994102464584, 3.74929307690815]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_standard_gammma_scalar_float(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_gamma(3, dtype=np.float32) + desired = 2.9242148399353027 + assert_array_almost_equal(actual, desired, decimal=6) + + def test_standard_gamma_float(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_gamma(shape=3, size=(3, 2)) + desired = np.array([[0.62971, 1.2238], + [3.89941, 4.1248], + [3.74994, 3.74929]]) + assert_array_almost_equal(actual, desired, decimal=5) + + def test_standard_gammma_float_out(self): + actual = np.zeros((3, 2), dtype=np.float32) + random = Generator(MT19937(self.seed)) + random.standard_gamma(10.0, out=actual, dtype=np.float32) + desired = np.array([[10.14987, 7.87012], + [ 9.46284, 12.56832], + [13.82495, 7.81533]], dtype=np.float32) + assert_array_almost_equal(actual, desired, decimal=5) + + random = Generator(MT19937(self.seed)) + random.standard_gamma(10.0, out=actual, size=(3, 2), dtype=np.float32) + assert_array_almost_equal(actual, desired, decimal=5) + + def test_standard_gamma_unknown_type(self): + assert_raises(TypeError, random.standard_gamma, 1., + dtype='int32') + + def test_out_size_mismatch(self): + out = np.zeros(10) + assert_raises(ValueError, random.standard_gamma, 10.0, size=20, + out=out) + assert_raises(ValueError, random.standard_gamma, 10.0, size=(10, 1), + out=out) + + def test_standard_gamma_0(self): + assert_equal(random.standard_gamma(shape=0), 0) + assert_raises(ValueError, random.standard_gamma, shape=-0.) + + def test_standard_normal(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_normal(size=(3, 2)) + desired = np.array([[-1.870934851846581, 1.25613495182354 ], # noqa: E202 + [-1.120190126006621, 0.342002097029821], + [ 0.661545174124296, 1.181113712443012]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_normal_unsupported_type(self): + assert_raises(TypeError, random.standard_normal, dtype=np.int32) + + def test_standard_t(self): + random = Generator(MT19937(self.seed)) + actual = random.standard_t(df=10, size=(3, 2)) + desired = np.array([[-1.484666193042647, 0.30597891831161], + [ 1.056684299648085, -0.407312602088507], + [ 0.130704414281157, -2.038053410490321]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_triangular(self): + random = Generator(MT19937(self.seed)) + actual = random.triangular(left=5.12, mode=10.23, right=20.34, + size=(3, 2)) + desired = np.array([[ 7.86664070590917, 13.6313848513185 ], # noqa: E202 + [ 7.68152445215983, 14.36169131136546], + [13.16105603911429, 13.72341621856971]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_uniform(self): + random = Generator(MT19937(self.seed)) + actual = random.uniform(low=1.23, high=10.54, size=(3, 2)) + desired = np.array([[2.13306255040998 , 7.816987531021207], # noqa: E203 + [2.015436610109887, 8.377577533009589], + [7.421792588856135, 7.891185744455209]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_uniform_range_bounds(self): + fmin = np.finfo('float').min + fmax = np.finfo('float').max + + func = random.uniform + assert_raises(OverflowError, func, -np.inf, 0) + assert_raises(OverflowError, func, 0, np.inf) + assert_raises(OverflowError, func, fmin, fmax) + assert_raises(OverflowError, func, [-np.inf], [0]) + assert_raises(OverflowError, func, [0], [np.inf]) + + # (fmax / 1e17) - fmin is within range, so this should not throw + # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX > + # DBL_MAX by increasing fmin a bit + random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17) + + def test_uniform_zero_range(self): + func = random.uniform + result = func(1.5, 1.5) + assert_allclose(result, 1.5) + result = func([0.0, np.pi], [0.0, np.pi]) + assert_allclose(result, [0.0, np.pi]) + result = func([[2145.12], [2145.12]], [2145.12, 2145.12]) + assert_allclose(result, 2145.12 + np.zeros((2, 2))) + + def test_uniform_neg_range(self): + func = random.uniform + assert_raises(ValueError, func, 2, 1) + assert_raises(ValueError, func, [1, 2], [1, 1]) + assert_raises(ValueError, func, [[0, 1], [2, 3]], 2) + + def test_scalar_exception_propagation(self): + # Tests that exceptions are correctly propagated in distributions + # when called with objects that throw exceptions when converted to + # scalars. + # + # Regression test for gh: 8865 + + class ThrowingFloat(np.ndarray): + def __float__(self): + raise TypeError + + throwing_float = np.array(1.0).view(ThrowingFloat) + assert_raises(TypeError, random.uniform, throwing_float, + throwing_float) + + class ThrowingInteger(np.ndarray): + def __int__(self): + raise TypeError + + throwing_int = np.array(1).view(ThrowingInteger) + assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1) + + def test_vonmises(self): + random = Generator(MT19937(self.seed)) + actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) + desired = np.array([[ 1.107972248690106, 2.841536476232361], + [ 1.832602376042457, 1.945511926976032], + [-0.260147475776542, 2.058047492231698]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_vonmises_small(self): + # check infinite loop, gh-4720 + random = Generator(MT19937(self.seed)) + r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6) + assert_(np.isfinite(r).all()) + + def test_vonmises_nan(self): + random = Generator(MT19937(self.seed)) + r = random.vonmises(mu=0., kappa=np.nan) + assert_(np.isnan(r)) + + @pytest.mark.parametrize("kappa", [1e4, 1e15]) + def test_vonmises_large_kappa(self, kappa): + random = Generator(MT19937(self.seed)) + rs = RandomState(random.bit_generator) + state = random.bit_generator.state + + random_state_vals = rs.vonmises(0, kappa, size=10) + random.bit_generator.state = state + gen_vals = random.vonmises(0, kappa, size=10) + if kappa < 1e6: + assert_allclose(random_state_vals, gen_vals) + else: + assert np.all(random_state_vals != gen_vals) + + @pytest.mark.parametrize("mu", [-7., -np.pi, -3.1, np.pi, 3.2]) + @pytest.mark.parametrize("kappa", [1e-9, 1e-6, 1, 1e3, 1e15]) + def test_vonmises_large_kappa_range(self, mu, kappa): + random = Generator(MT19937(self.seed)) + r = random.vonmises(mu, kappa, 50) + assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) + + def test_wald(self): + random = Generator(MT19937(self.seed)) + actual = random.wald(mean=1.23, scale=1.54, size=(3, 2)) + desired = np.array([[0.26871721804551, 3.2233942732115 ], # noqa: E202 + [2.20328374987066, 2.40958405189353], + [2.07093587449261, 0.73073890064369]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_wald_nonnegative(self): + random = Generator(MT19937(self.seed)) + samples = random.wald(mean=1e9, scale=2.25, size=1000) + assert_(np.all(samples >= 0.0)) + + def test_weibull(self): + random = Generator(MT19937(self.seed)) + actual = random.weibull(a=1.23, size=(3, 2)) + desired = np.array([[0.138613914769468, 1.306463419753191], + [0.111623365934763, 1.446570494646721], + [1.257145775276011, 1.914247725027957]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_weibull_0(self): + random = Generator(MT19937(self.seed)) + assert_equal(random.weibull(a=0, size=12), np.zeros(12)) + assert_raises(ValueError, random.weibull, a=-0.) + + def test_zipf(self): + random = Generator(MT19937(self.seed)) + actual = random.zipf(a=1.23, size=(3, 2)) + desired = np.array([[ 1, 1], + [ 10, 867], + [354, 2]]) + assert_array_equal(actual, desired) + + +class TestBroadcast: + # tests that functions that broadcast behave + # correctly when presented with non-scalar arguments + def setup_method(self): + self.seed = 123456789 + + def test_uniform(self): + random = Generator(MT19937(self.seed)) + low = [0] + high = [1] + uniform = random.uniform + desired = np.array([0.16693771389729, 0.19635129550675, 0.75563050964095]) + + random = Generator(MT19937(self.seed)) + actual = random.uniform(low * 3, high) + assert_array_almost_equal(actual, desired, decimal=14) + + random = Generator(MT19937(self.seed)) + actual = random.uniform(low, high * 3) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_normal(self): + loc = [0] + scale = [1] + bad_scale = [-1] + random = Generator(MT19937(self.seed)) + desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097]) + + random = Generator(MT19937(self.seed)) + actual = random.normal(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.normal, loc * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + normal = random.normal + actual = normal(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, normal, loc, bad_scale * 3) + + def test_beta(self): + a = [1] + b = [2] + bad_a = [-1] + bad_b = [-2] + desired = np.array([0.18719338682602, 0.73234824491364, 0.17928615186455]) + + random = Generator(MT19937(self.seed)) + beta = random.beta + actual = beta(a * 3, b) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, beta, bad_a * 3, b) + assert_raises(ValueError, beta, a * 3, bad_b) + + random = Generator(MT19937(self.seed)) + actual = random.beta(a, b * 3) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_exponential(self): + scale = [1] + bad_scale = [-1] + desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629]) + + random = Generator(MT19937(self.seed)) + actual = random.exponential(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.exponential, bad_scale * 3) + + def test_standard_gamma(self): + shape = [1] + bad_shape = [-1] + desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629]) + + random = Generator(MT19937(self.seed)) + std_gamma = random.standard_gamma + actual = std_gamma(shape * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, std_gamma, bad_shape * 3) + + def test_gamma(self): + shape = [1] + scale = [2] + bad_shape = [-1] + bad_scale = [-2] + desired = np.array([1.34491986425611, 0.42760990636187, 1.4355697857258]) + + random = Generator(MT19937(self.seed)) + gamma = random.gamma + actual = gamma(shape * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape * 3, scale) + assert_raises(ValueError, gamma, shape * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + gamma = random.gamma + actual = gamma(shape, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape, scale * 3) + assert_raises(ValueError, gamma, shape, bad_scale * 3) + + def test_f(self): + dfnum = [1] + dfden = [2] + bad_dfnum = [-1] + bad_dfden = [-2] + desired = np.array([0.07765056244107, 7.72951397913186, 0.05786093891763]) + + random = Generator(MT19937(self.seed)) + f = random.f + actual = f(dfnum * 3, dfden) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum * 3, dfden) + assert_raises(ValueError, f, dfnum * 3, bad_dfden) + + random = Generator(MT19937(self.seed)) + f = random.f + actual = f(dfnum, dfden * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum, dfden * 3) + assert_raises(ValueError, f, dfnum, bad_dfden * 3) + + def test_noncentral_f(self): + dfnum = [2] + dfden = [3] + nonc = [4] + bad_dfnum = [0] + bad_dfden = [-1] + bad_nonc = [-2] + desired = np.array([2.02434240411421, 12.91838601070124, 1.24395160354629]) + + random = Generator(MT19937(self.seed)) + nonc_f = random.noncentral_f + actual = nonc_f(dfnum * 3, dfden, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3))) + + assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) + + random = Generator(MT19937(self.seed)) + nonc_f = random.noncentral_f + actual = nonc_f(dfnum, dfden * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) + + random = Generator(MT19937(self.seed)) + nonc_f = random.noncentral_f + actual = nonc_f(dfnum, dfden, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) + + def test_noncentral_f_small_df(self): + random = Generator(MT19937(self.seed)) + desired = np.array([0.04714867120827, 0.1239390327694]) + actual = random.noncentral_f(0.9, 0.9, 2, size=2) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_chisquare(self): + df = [1] + bad_df = [-1] + desired = np.array([0.05573640064251, 1.47220224353539, 2.9469379318589]) + + random = Generator(MT19937(self.seed)) + actual = random.chisquare(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.chisquare, bad_df * 3) + + def test_noncentral_chisquare(self): + df = [1] + nonc = [2] + bad_df = [-1] + bad_nonc = [-2] + desired = np.array([0.07710766249436, 5.27829115110304, 0.630732147399]) + + random = Generator(MT19937(self.seed)) + nonc_chi = random.noncentral_chisquare + actual = nonc_chi(df * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) + assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) + + random = Generator(MT19937(self.seed)) + nonc_chi = random.noncentral_chisquare + actual = nonc_chi(df, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) + assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) + + def test_standard_t(self): + df = [1] + bad_df = [-1] + desired = np.array([-1.39498829447098, -1.23058658835223, 0.17207021065983]) + + random = Generator(MT19937(self.seed)) + actual = random.standard_t(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.standard_t, bad_df * 3) + + def test_vonmises(self): + mu = [2] + kappa = [1] + bad_kappa = [-1] + desired = np.array([2.25935584988528, 2.23326261461399, -2.84152146503326]) + + random = Generator(MT19937(self.seed)) + actual = random.vonmises(mu * 3, kappa) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.vonmises, mu * 3, bad_kappa) + + random = Generator(MT19937(self.seed)) + actual = random.vonmises(mu, kappa * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.vonmises, mu, bad_kappa * 3) + + def test_pareto(self): + a = [1] + bad_a = [-1] + desired = np.array([0.95905052946317, 0.2383810889437, 1.04988745750013]) + + random = Generator(MT19937(self.seed)) + actual = random.pareto(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.pareto, bad_a * 3) + + def test_weibull(self): + a = [1] + bad_a = [-1] + desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629]) + + random = Generator(MT19937(self.seed)) + actual = random.weibull(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.weibull, bad_a * 3) + + def test_power(self): + a = [1] + bad_a = [-1] + desired = np.array([0.48954864361052, 0.19249412888486, 0.51216834058807]) + + random = Generator(MT19937(self.seed)) + actual = random.power(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.power, bad_a * 3) + + def test_laplace(self): + loc = [0] + scale = [1] + bad_scale = [-1] + desired = np.array([-1.09698732625119, -0.93470271947368, 0.71592671378202]) + + random = Generator(MT19937(self.seed)) + laplace = random.laplace + actual = laplace(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + laplace = random.laplace + actual = laplace(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc, bad_scale * 3) + + def test_gumbel(self): + loc = [0] + scale = [1] + bad_scale = [-1] + desired = np.array([1.70020068231762, 1.52054354273631, -0.34293267607081]) + + random = Generator(MT19937(self.seed)) + gumbel = random.gumbel + actual = gumbel(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + gumbel = random.gumbel + actual = gumbel(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc, bad_scale * 3) + + def test_logistic(self): + loc = [0] + scale = [1] + bad_scale = [-1] + desired = np.array([-1.607487640433, -1.40925686003678, 1.12887112820397]) + + random = Generator(MT19937(self.seed)) + actual = random.logistic(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.logistic, loc * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + actual = random.logistic(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.logistic, loc, bad_scale * 3) + assert_equal(random.logistic(1.0, 0.0), 1.0) + + def test_lognormal(self): + mean = [0] + sigma = [1] + bad_sigma = [-1] + desired = np.array([0.67884390500697, 2.21653186290321, 1.01990310084276]) + + random = Generator(MT19937(self.seed)) + lognormal = random.lognormal + actual = lognormal(mean * 3, sigma) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, lognormal, mean * 3, bad_sigma) + + random = Generator(MT19937(self.seed)) + actual = random.lognormal(mean, sigma * 3) + assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3) + + def test_rayleigh(self): + scale = [1] + bad_scale = [-1] + desired = np.array( + [1.1597068009872629, + 0.6539188836253857, + 1.1981526554349398] + ) + + random = Generator(MT19937(self.seed)) + actual = random.rayleigh(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.rayleigh, bad_scale * 3) + + def test_wald(self): + mean = [0.5] + scale = [1] + bad_mean = [0] + bad_scale = [-2] + desired = np.array([0.38052407392905, 0.50701641508592, 0.484935249864]) + + random = Generator(MT19937(self.seed)) + actual = random.wald(mean * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.wald, bad_mean * 3, scale) + assert_raises(ValueError, random.wald, mean * 3, bad_scale) + + random = Generator(MT19937(self.seed)) + actual = random.wald(mean, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, random.wald, bad_mean, scale * 3) + assert_raises(ValueError, random.wald, mean, bad_scale * 3) + + def test_triangular(self): + left = [1] + right = [3] + mode = [2] + bad_left_one = [3] + bad_mode_one = [4] + bad_left_two, bad_mode_two = right * 2 + desired = np.array([1.57781954604754, 1.62665986867957, 2.30090130831326]) + + random = Generator(MT19937(self.seed)) + triangular = random.triangular + actual = triangular(left * 3, mode, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) + assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) + assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, + right) + + random = Generator(MT19937(self.seed)) + triangular = random.triangular + actual = triangular(left, mode * 3, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) + assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, + right) + + random = Generator(MT19937(self.seed)) + triangular = random.triangular + actual = triangular(left, mode, right * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) + assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, + right * 3) + + assert_raises(ValueError, triangular, 10., 0., 20.) + assert_raises(ValueError, triangular, 10., 25., 20.) + assert_raises(ValueError, triangular, 10., 10., 10.) + + def test_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + desired = np.array([0, 0, 1]) + + random = Generator(MT19937(self.seed)) + binom = random.binomial + actual = binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n * 3, p) + assert_raises(ValueError, binom, n * 3, bad_p_one) + assert_raises(ValueError, binom, n * 3, bad_p_two) + + random = Generator(MT19937(self.seed)) + actual = random.binomial(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n, p * 3) + assert_raises(ValueError, binom, n, bad_p_one * 3) + assert_raises(ValueError, binom, n, bad_p_two * 3) + + def test_negative_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + desired = np.array([0, 2, 1], dtype=np.int64) + + random = Generator(MT19937(self.seed)) + neg_binom = random.negative_binomial + actual = neg_binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n * 3, p) + assert_raises(ValueError, neg_binom, n * 3, bad_p_one) + assert_raises(ValueError, neg_binom, n * 3, bad_p_two) + + random = Generator(MT19937(self.seed)) + neg_binom = random.negative_binomial + actual = neg_binom(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n, p * 3) + assert_raises(ValueError, neg_binom, n, bad_p_one * 3) + assert_raises(ValueError, neg_binom, n, bad_p_two * 3) + + def test_poisson(self): + + lam = [1] + bad_lam_one = [-1] + desired = np.array([0, 0, 3]) + + random = Generator(MT19937(self.seed)) + max_lam = random._poisson_lam_max + bad_lam_two = [max_lam * 2] + poisson = random.poisson + actual = poisson(lam * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, poisson, bad_lam_one * 3) + assert_raises(ValueError, poisson, bad_lam_two * 3) + + def test_zipf(self): + a = [2] + bad_a = [0] + desired = np.array([1, 8, 1]) + + random = Generator(MT19937(self.seed)) + zipf = random.zipf + actual = zipf(a * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, zipf, bad_a * 3) + with np.errstate(invalid='ignore'): + assert_raises(ValueError, zipf, np.nan) + assert_raises(ValueError, zipf, [0, 0, np.nan]) + + def test_geometric(self): + p = [0.5] + bad_p_one = [-1] + bad_p_two = [1.5] + desired = np.array([1, 1, 3]) + + random = Generator(MT19937(self.seed)) + geometric = random.geometric + actual = geometric(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, geometric, bad_p_one * 3) + assert_raises(ValueError, geometric, bad_p_two * 3) + + def test_hypergeometric(self): + ngood = [1] + nbad = [2] + nsample = [2] + bad_ngood = [-1] + bad_nbad = [-2] + bad_nsample_one = [-1] + bad_nsample_two = [4] + desired = np.array([0, 0, 1]) + + random = Generator(MT19937(self.seed)) + actual = random.hypergeometric(ngood * 3, nbad, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, random.hypergeometric, bad_ngood * 3, nbad, nsample) + assert_raises(ValueError, random.hypergeometric, ngood * 3, bad_nbad, nsample) + assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one) # noqa: E501 + assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two) # noqa: E501 + + random = Generator(MT19937(self.seed)) + actual = random.hypergeometric(ngood, nbad * 3, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, random.hypergeometric, bad_ngood, nbad * 3, nsample) + assert_raises(ValueError, random.hypergeometric, ngood, bad_nbad * 3, nsample) + assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one) # noqa: E501 + assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two) # noqa: E501 + + random = Generator(MT19937(self.seed)) + hypergeom = random.hypergeometric + actual = hypergeom(ngood, nbad, nsample * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) + + assert_raises(ValueError, hypergeom, -1, 10, 20) + assert_raises(ValueError, hypergeom, 10, -1, 20) + assert_raises(ValueError, hypergeom, 10, 10, -1) + assert_raises(ValueError, hypergeom, 10, 10, 25) + + # ValueError for arguments that are too big. + assert_raises(ValueError, hypergeom, 2**30, 10, 20) + assert_raises(ValueError, hypergeom, 999, 2**31, 50) + assert_raises(ValueError, hypergeom, 999, [2**29, 2**30], 1000) + + def test_logseries(self): + p = [0.5] + bad_p_one = [2] + bad_p_two = [-1] + desired = np.array([1, 1, 1]) + + random = Generator(MT19937(self.seed)) + logseries = random.logseries + actual = logseries(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, logseries, bad_p_one * 3) + assert_raises(ValueError, logseries, bad_p_two * 3) + + def test_multinomial(self): + random = Generator(MT19937(self.seed)) + actual = random.multinomial([5, 20], [1 / 6.] * 6, size=(3, 2)) + desired = np.array([[[0, 0, 2, 1, 2, 0], + [2, 3, 6, 4, 2, 3]], + [[1, 0, 1, 0, 2, 1], + [7, 2, 2, 1, 4, 4]], + [[0, 2, 0, 1, 2, 0], + [3, 2, 3, 3, 4, 5]]], dtype=np.int64) + assert_array_equal(actual, desired) + + random = Generator(MT19937(self.seed)) + actual = random.multinomial([5, 20], [1 / 6.] * 6) + desired = np.array([[0, 0, 2, 1, 2, 0], + [2, 3, 6, 4, 2, 3]], dtype=np.int64) + assert_array_equal(actual, desired) + + random = Generator(MT19937(self.seed)) + actual = random.multinomial([5, 20], [[1 / 6.] * 6] * 2) + desired = np.array([[0, 0, 2, 1, 2, 0], + [2, 3, 6, 4, 2, 3]], dtype=np.int64) + assert_array_equal(actual, desired) + + random = Generator(MT19937(self.seed)) + actual = random.multinomial([[5], [20]], [[1 / 6.] * 6] * 2) + desired = np.array([[[0, 0, 2, 1, 2, 0], + [0, 0, 2, 1, 1, 1]], + [[4, 2, 3, 3, 5, 3], + [7, 2, 2, 1, 4, 4]]], dtype=np.int64) + assert_array_equal(actual, desired) + + @pytest.mark.parametrize("n", [10, + np.array([10, 10]), + np.array([[[10]], [[10]]]) + ] + ) + def test_multinomial_pval_broadcast(self, n): + random = Generator(MT19937(self.seed)) + pvals = np.array([1 / 4] * 4) + actual = random.multinomial(n, pvals) + n_shape = () if isinstance(n, int) else n.shape + expected_shape = n_shape + (4,) + assert actual.shape == expected_shape + pvals = np.vstack([pvals, pvals]) + actual = random.multinomial(n, pvals) + expected_shape = np.broadcast_shapes(n_shape, pvals.shape[:-1]) + (4,) + assert actual.shape == expected_shape + + pvals = np.vstack([[pvals], [pvals]]) + actual = random.multinomial(n, pvals) + expected_shape = np.broadcast_shapes(n_shape, pvals.shape[:-1]) + assert actual.shape == expected_shape + (4,) + actual = random.multinomial(n, pvals, size=(3, 2) + expected_shape) + assert actual.shape == (3, 2) + expected_shape + (4,) + + with pytest.raises(ValueError): + # Ensure that size is not broadcast + actual = random.multinomial(n, pvals, size=(1,) * 6) + + def test_invalid_pvals_broadcast(self): + random = Generator(MT19937(self.seed)) + pvals = [[1 / 6] * 6, [1 / 4] * 6] + assert_raises(ValueError, random.multinomial, 1, pvals) + assert_raises(ValueError, random.multinomial, 6, 0.5) + + def test_empty_outputs(self): + random = Generator(MT19937(self.seed)) + actual = random.multinomial(np.empty((10, 0, 6), "i8"), [1 / 6] * 6) + assert actual.shape == (10, 0, 6, 6) + actual = random.multinomial(12, np.empty((10, 0, 10))) + assert actual.shape == (10, 0, 10) + actual = random.multinomial(np.empty((3, 0, 7), "i8"), + np.empty((3, 0, 7, 4))) + assert actual.shape == (3, 0, 7, 4) + + +@pytest.mark.skipif(IS_WASM, reason="can't start thread") +class TestThread: + # make sure each state produces the same sequence even in threads + def setup_method(self): + self.seeds = range(4) + + def check_function(self, function, sz): + from threading import Thread + + out1 = np.empty((len(self.seeds),) + sz) + out2 = np.empty((len(self.seeds),) + sz) + + # threaded generation + t = [Thread(target=function, args=(Generator(MT19937(s)), o)) + for s, o in zip(self.seeds, out1)] + [x.start() for x in t] + [x.join() for x in t] + + # the same serial + for s, o in zip(self.seeds, out2): + function(Generator(MT19937(s)), o) + + # these platforms change x87 fpu precision mode in threads + if np.intp().dtype.itemsize == 4 and sys.platform == "win32": + assert_array_almost_equal(out1, out2) + else: + assert_array_equal(out1, out2) + + def test_normal(self): + def gen_random(state, out): + out[...] = state.normal(size=10000) + + self.check_function(gen_random, sz=(10000,)) + + def test_exp(self): + def gen_random(state, out): + out[...] = state.exponential(scale=np.ones((100, 1000))) + + self.check_function(gen_random, sz=(100, 1000)) + + def test_multinomial(self): + def gen_random(state, out): + out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000) + + self.check_function(gen_random, sz=(10000, 6)) + + +# See Issue #4263 +class TestSingleEltArrayInput: + def setup_method(self): + self.argOne = np.array([2]) + self.argTwo = np.array([3]) + self.argThree = np.array([4]) + self.tgtShape = (1,) + + def test_one_arg_funcs(self): + funcs = (random.exponential, random.standard_gamma, + random.chisquare, random.standard_t, + random.pareto, random.weibull, + random.power, random.rayleigh, + random.poisson, random.zipf, + random.geometric, random.logseries) + + probfuncs = (random.geometric, random.logseries) + + for func in funcs: + if func in probfuncs: # p < 1.0 + out = func(np.array([0.5])) + + else: + out = func(self.argOne) + + assert_equal(out.shape, self.tgtShape) + + def test_two_arg_funcs(self): + funcs = (random.uniform, random.normal, + random.beta, random.gamma, + random.f, random.noncentral_chisquare, + random.vonmises, random.laplace, + random.gumbel, random.logistic, + random.lognormal, random.wald, + random.binomial, random.negative_binomial) + + probfuncs = (random.binomial, random.negative_binomial) + + for func in funcs: + if func in probfuncs: # p <= 1 + argTwo = np.array([0.5]) + + else: + argTwo = self.argTwo + + out = func(self.argOne, argTwo) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne[0], argTwo) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne, argTwo[0]) + assert_equal(out.shape, self.tgtShape) + + def test_integers(self, endpoint): + itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16, + np.int32, np.uint32, np.int64, np.uint64] + func = random.integers + high = np.array([1]) + low = np.array([0]) + + for dt in itype: + out = func(low, high, endpoint=endpoint, dtype=dt) + assert_equal(out.shape, self.tgtShape) + + out = func(low[0], high, endpoint=endpoint, dtype=dt) + assert_equal(out.shape, self.tgtShape) + + out = func(low, high[0], endpoint=endpoint, dtype=dt) + assert_equal(out.shape, self.tgtShape) + + def test_three_arg_funcs(self): + funcs = [random.noncentral_f, random.triangular, + random.hypergeometric] + + for func in funcs: + out = func(self.argOne, self.argTwo, self.argThree) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne[0], self.argTwo, self.argThree) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne, self.argTwo[0], self.argThree) + assert_equal(out.shape, self.tgtShape) + + +@pytest.mark.parametrize("config", JUMP_TEST_DATA) +def test_jumped(config): + # Each config contains the initial seed, a number of raw steps + # the sha256 hashes of the initial and the final states' keys and + # the position of the initial and the final state. + # These were produced using the original C implementation. + seed = config["seed"] + steps = config["steps"] + + mt19937 = MT19937(seed) + # Burn step + mt19937.random_raw(steps) + key = mt19937.state["state"]["key"] + if sys.byteorder == 'big': + key = key.byteswap() + sha256 = hashlib.sha256(key) + assert mt19937.state["state"]["pos"] == config["initial"]["pos"] + assert sha256.hexdigest() == config["initial"]["key_sha256"] + + jumped = mt19937.jumped() + key = jumped.state["state"]["key"] + if sys.byteorder == 'big': + key = key.byteswap() + sha256 = hashlib.sha256(key) + assert jumped.state["state"]["pos"] == config["jumped"]["pos"] + assert sha256.hexdigest() == config["jumped"]["key_sha256"] + + +def test_broadcast_size_error(): + mu = np.ones(3) + sigma = np.ones((4, 3)) + size = (10, 4, 2) + assert random.normal(mu, sigma, size=(5, 4, 3)).shape == (5, 4, 3) + with pytest.raises(ValueError): + random.normal(mu, sigma, size=size) + with pytest.raises(ValueError): + random.normal(mu, sigma, size=(1, 3)) + with pytest.raises(ValueError): + random.normal(mu, sigma, size=(4, 1, 1)) + # 1 arg + shape = np.ones((4, 3)) + with pytest.raises(ValueError): + random.standard_gamma(shape, size=size) + with pytest.raises(ValueError): + random.standard_gamma(shape, size=(3,)) + with pytest.raises(ValueError): + random.standard_gamma(shape, size=3) + # Check out + out = np.empty(size) + with pytest.raises(ValueError): + random.standard_gamma(shape, out=out) + + # 2 arg + with pytest.raises(ValueError): + random.binomial(1, [0.3, 0.7], size=(2, 1)) + with pytest.raises(ValueError): + random.binomial([1, 2], 0.3, size=(2, 1)) + with pytest.raises(ValueError): + random.binomial([1, 2], [0.3, 0.7], size=(2, 1)) + with pytest.raises(ValueError): + random.multinomial([2, 2], [.3, .7], size=(2, 1)) + + # 3 arg + a = random.chisquare(5, size=3) + b = random.chisquare(5, size=(4, 3)) + c = random.chisquare(5, size=(5, 4, 3)) + assert random.noncentral_f(a, b, c).shape == (5, 4, 3) + with pytest.raises(ValueError, match=r"Output size \(6, 5, 1, 1\) is"): + random.noncentral_f(a, b, c, size=(6, 5, 1, 1)) + + +def test_broadcast_size_scalar(): + mu = np.ones(3) + sigma = np.ones(3) + random.normal(mu, sigma, size=3) + with pytest.raises(ValueError): + random.normal(mu, sigma, size=2) + + +def test_ragged_shuffle(): + # GH 18142 + seq = [[], [], 1] + gen = Generator(MT19937(0)) + assert_no_warnings(gen.shuffle, seq) + assert seq == [1, [], []] + + +@pytest.mark.parametrize("high", [-2, [-2]]) +@pytest.mark.parametrize("endpoint", [True, False]) +def test_single_arg_integer_exception(high, endpoint): + # GH 14333 + gen = Generator(MT19937(0)) + msg = 'high < 0' if endpoint else 'high <= 0' + with pytest.raises(ValueError, match=msg): + gen.integers(high, endpoint=endpoint) + msg = 'low > high' if endpoint else 'low >= high' + with pytest.raises(ValueError, match=msg): + gen.integers(-1, high, endpoint=endpoint) + with pytest.raises(ValueError, match=msg): + gen.integers([-1], high, endpoint=endpoint) + + +@pytest.mark.parametrize("dtype", ["f4", "f8"]) +def test_c_contig_req_out(dtype): + # GH 18704 + out = np.empty((2, 3), order="F", dtype=dtype) + shape = [1, 2, 3] + with pytest.raises(ValueError, match="Supplied output array"): + random.standard_gamma(shape, out=out, dtype=dtype) + with pytest.raises(ValueError, match="Supplied output array"): + random.standard_gamma(shape, out=out, size=out.shape, dtype=dtype) + + +@pytest.mark.parametrize("dtype", ["f4", "f8"]) +@pytest.mark.parametrize("order", ["F", "C"]) +@pytest.mark.parametrize("dist", [random.standard_normal, random.random]) +def test_contig_req_out(dist, order, dtype): + # GH 18704 + out = np.empty((2, 3), dtype=dtype, order=order) + variates = dist(out=out, dtype=dtype) + assert variates is out + variates = dist(out=out, dtype=dtype, size=out.shape) + assert variates is out + + +def test_generator_ctor_old_style_pickle(): + rg = np.random.Generator(np.random.PCG64DXSM(0)) + rg.standard_normal(1) + # Directly call reduce which is used in pickling + ctor, (bit_gen, ), _ = rg.__reduce__() + # Simulate unpickling an old pickle that only has the name + assert bit_gen.__class__.__name__ == "PCG64DXSM" + print(ctor) + b = ctor(*("PCG64DXSM",)) + print(b) + b.bit_generator.state = bit_gen.state + state_b = b.bit_generator.state + assert bit_gen.state == state_b + + +def test_pickle_preserves_seed_sequence(): + # GH 26234 + # Add explicit test that bit generators preserve seed sequences + import pickle + + rg = np.random.Generator(np.random.PCG64DXSM(20240411)) + ss = rg.bit_generator.seed_seq + rg_plk = pickle.loads(pickle.dumps(rg)) + ss_plk = rg_plk.bit_generator.seed_seq + assert_equal(ss.state, ss_plk.state) + assert_equal(ss.pool, ss_plk.pool) + + rg.bit_generator.seed_seq.spawn(10) + rg_plk = pickle.loads(pickle.dumps(rg)) + ss_plk = rg_plk.bit_generator.seed_seq + assert_equal(ss.state, ss_plk.state) + + +@pytest.mark.parametrize("version", [121, 126]) +def test_legacy_pickle(version): + # Pickling format was changes in 1.22.x and in 2.0.x + import gzip + import pickle + + base_path = os.path.split(os.path.abspath(__file__))[0] + pkl_file = os.path.join( + base_path, "data", f"generator_pcg64_np{version}.pkl.gz" + ) + with gzip.open(pkl_file) as gz: + rg = pickle.load(gz) + state = rg.bit_generator.state['state'] + + assert isinstance(rg, Generator) + assert isinstance(rg.bit_generator, np.random.PCG64) + assert state['state'] == 35399562948360463058890781895381311971 + assert state['inc'] == 87136372517582989555478159403783844777 diff --git a/python/numpy/random/tests/test_generator_mt19937_regressions.py b/python/numpy/random/tests/test_generator_mt19937_regressions.py new file mode 100644 index 000000000..abfacb87d --- /dev/null +++ b/python/numpy/random/tests/test_generator_mt19937_regressions.py @@ -0,0 +1,207 @@ +import pytest + +import numpy as np +from numpy.random import MT19937, Generator +from numpy.testing import assert_, assert_array_equal + + +class TestRegression: + + def setup_method(self): + self.mt19937 = Generator(MT19937(121263137472525314065)) + + def test_vonmises_range(self): + # Make sure generated random variables are in [-pi, pi]. + # Regression test for ticket #986. + for mu in np.linspace(-7., 7., 5): + r = self.mt19937.vonmises(mu, 1, 50) + assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) + + def test_hypergeometric_range(self): + # Test for ticket #921 + assert_(np.all(self.mt19937.hypergeometric(3, 18, 11, size=10) < 4)) + assert_(np.all(self.mt19937.hypergeometric(18, 3, 11, size=10) > 0)) + + # Test for ticket #5623 + args = (2**20 - 2, 2**20 - 2, 2**20 - 2) # Check for 32-bit systems + assert_(self.mt19937.hypergeometric(*args) > 0) + + def test_logseries_convergence(self): + # Test for ticket #923 + N = 1000 + rvsn = self.mt19937.logseries(0.8, size=N) + # these two frequency counts should be close to theoretical + # numbers with this large sample + # theoretical large N result is 0.49706795 + freq = np.sum(rvsn == 1) / N + msg = f'Frequency was {freq:f}, should be > 0.45' + assert_(freq > 0.45, msg) + # theoretical large N result is 0.19882718 + freq = np.sum(rvsn == 2) / N + msg = f'Frequency was {freq:f}, should be < 0.23' + assert_(freq < 0.23, msg) + + def test_shuffle_mixed_dimension(self): + # Test for trac ticket #2074 + for t in [[1, 2, 3, None], + [(1, 1), (2, 2), (3, 3), None], + [1, (2, 2), (3, 3), None], + [(1, 1), 2, 3, None]]: + mt19937 = Generator(MT19937(12345)) + shuffled = np.array(t, dtype=object) + mt19937.shuffle(shuffled) + expected = np.array([t[2], t[0], t[3], t[1]], dtype=object) + assert_array_equal(np.array(shuffled, dtype=object), expected) + + def test_call_within_randomstate(self): + # Check that custom BitGenerator does not call into global state + res = np.array([1, 8, 0, 1, 5, 3, 3, 8, 1, 4]) + for i in range(3): + mt19937 = Generator(MT19937(i)) + m = Generator(MT19937(4321)) + # If m.state is not honored, the result will change + assert_array_equal(m.choice(10, size=10, p=np.ones(10) / 10.), res) + + def test_multivariate_normal_size_types(self): + # Test for multivariate_normal issue with 'size' argument. + # Check that the multivariate_normal size argument can be a + # numpy integer. + self.mt19937.multivariate_normal([0], [[0]], size=1) + self.mt19937.multivariate_normal([0], [[0]], size=np.int_(1)) + self.mt19937.multivariate_normal([0], [[0]], size=np.int64(1)) + + def test_beta_small_parameters(self): + # Test that beta with small a and b parameters does not produce + # NaNs due to roundoff errors causing 0 / 0, gh-5851 + x = self.mt19937.beta(0.0001, 0.0001, size=100) + assert_(not np.any(np.isnan(x)), 'Nans in mt19937.beta') + + def test_beta_very_small_parameters(self): + # gh-24203: beta would hang with very small parameters. + self.mt19937.beta(1e-49, 1e-40) + + def test_beta_ridiculously_small_parameters(self): + # gh-24266: beta would generate nan when the parameters + # were subnormal or a small multiple of the smallest normal. + tiny = np.finfo(1.0).tiny + x = self.mt19937.beta(tiny / 32, tiny / 40, size=50) + assert not np.any(np.isnan(x)) + + def test_beta_expected_zero_frequency(self): + # gh-24475: For small a and b (e.g. a=0.0025, b=0.0025), beta + # would generate too many zeros. + a = 0.0025 + b = 0.0025 + n = 1000000 + x = self.mt19937.beta(a, b, size=n) + nzeros = np.count_nonzero(x == 0) + # beta CDF at x = np.finfo(np.double).smallest_subnormal/2 + # is p = 0.0776169083131899, e.g, + # + # import numpy as np + # from mpmath import mp + # mp.dps = 160 + # x = mp.mpf(np.finfo(np.float64).smallest_subnormal)/2 + # # CDF of the beta distribution at x: + # p = mp.betainc(a, b, x1=0, x2=x, regularized=True) + # n = 1000000 + # exprected_freq = float(n*p) + # + expected_freq = 77616.90831318991 + assert 0.95 * expected_freq < nzeros < 1.05 * expected_freq + + def test_choice_sum_of_probs_tolerance(self): + # The sum of probs should be 1.0 with some tolerance. + # For low precision dtypes the tolerance was too tight. + # See numpy github issue 6123. + a = [1, 2, 3] + counts = [4, 4, 2] + for dt in np.float16, np.float32, np.float64: + probs = np.array(counts, dtype=dt) / sum(counts) + c = self.mt19937.choice(a, p=probs) + assert_(c in a) + with pytest.raises(ValueError): + self.mt19937.choice(a, p=probs * 0.9) + + def test_shuffle_of_array_of_different_length_strings(self): + # Test that permuting an array of different length strings + # will not cause a segfault on garbage collection + # Tests gh-7710 + + a = np.array(['a', 'a' * 1000]) + + for _ in range(100): + self.mt19937.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_shuffle_of_array_of_objects(self): + # Test that permuting an array of objects will not cause + # a segfault on garbage collection. + # See gh-7719 + a = np.array([np.arange(1), np.arange(4)], dtype=object) + + for _ in range(1000): + self.mt19937.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_permutation_subclass(self): + + class N(np.ndarray): + pass + + mt19937 = Generator(MT19937(1)) + orig = np.arange(3).view(N) + perm = mt19937.permutation(orig) + assert_array_equal(perm, np.array([2, 0, 1])) + assert_array_equal(orig, np.arange(3).view(N)) + + class M: + a = np.arange(5) + + def __array__(self, dtype=None, copy=None): + return self.a + + mt19937 = Generator(MT19937(1)) + m = M() + perm = mt19937.permutation(m) + assert_array_equal(perm, np.array([4, 1, 3, 0, 2])) + assert_array_equal(m.__array__(), np.arange(5)) + + def test_gamma_0(self): + assert self.mt19937.standard_gamma(0.0) == 0.0 + assert_array_equal(self.mt19937.standard_gamma([0.0]), 0.0) + + actual = self.mt19937.standard_gamma([0.0], dtype='float') + expected = np.array([0.], dtype=np.float32) + assert_array_equal(actual, expected) + + def test_geometric_tiny_prob(self): + # Regression test for gh-17007. + # When p = 1e-30, the probability that a sample will exceed 2**63-1 + # is 0.9999999999907766, so we expect the result to be all 2**63-1. + assert_array_equal(self.mt19937.geometric(p=1e-30, size=3), + np.iinfo(np.int64).max) + + def test_zipf_large_parameter(self): + # Regression test for part of gh-9829: a call such as rng.zipf(10000) + # would hang. + n = 8 + sample = self.mt19937.zipf(10000, size=n) + assert_array_equal(sample, np.ones(n, dtype=np.int64)) + + def test_zipf_a_near_1(self): + # Regression test for gh-9829: a call such as rng.zipf(1.0000000000001) + # would hang. + n = 100000 + sample = self.mt19937.zipf(1.0000000000001, size=n) + # Not much of a test, but let's do something more than verify that + # it doesn't hang. Certainly for a monotonically decreasing + # discrete distribution truncated to signed 64 bit integers, more + # than half should be less than 2**62. + assert np.count_nonzero(sample < 2**62) > n / 2 diff --git a/python/numpy/random/tests/test_random.py b/python/numpy/random/tests/test_random.py new file mode 100644 index 000000000..d5981906f --- /dev/null +++ b/python/numpy/random/tests/test_random.py @@ -0,0 +1,1757 @@ +import sys +import warnings + +import pytest + +import numpy as np +from numpy import random +from numpy.testing import ( + IS_WASM, + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_no_warnings, + assert_raises, + assert_warns, + suppress_warnings, +) + + +class TestSeed: + def test_scalar(self): + s = np.random.RandomState(0) + assert_equal(s.randint(1000), 684) + s = np.random.RandomState(4294967295) + assert_equal(s.randint(1000), 419) + + def test_array(self): + s = np.random.RandomState(range(10)) + assert_equal(s.randint(1000), 468) + s = np.random.RandomState(np.arange(10)) + assert_equal(s.randint(1000), 468) + s = np.random.RandomState([0]) + assert_equal(s.randint(1000), 973) + s = np.random.RandomState([4294967295]) + assert_equal(s.randint(1000), 265) + + def test_invalid_scalar(self): + # seed must be an unsigned 32 bit integer + assert_raises(TypeError, np.random.RandomState, -0.5) + assert_raises(ValueError, np.random.RandomState, -1) + + def test_invalid_array(self): + # seed must be an unsigned 32 bit integer + assert_raises(TypeError, np.random.RandomState, [-0.5]) + assert_raises(ValueError, np.random.RandomState, [-1]) + assert_raises(ValueError, np.random.RandomState, [4294967296]) + assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296]) + assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296]) + + def test_invalid_array_shape(self): + # gh-9832 + assert_raises(ValueError, np.random.RandomState, + np.array([], dtype=np.int64)) + assert_raises(ValueError, np.random.RandomState, [[1, 2, 3]]) + assert_raises(ValueError, np.random.RandomState, [[1, 2, 3], + [4, 5, 6]]) + + +class TestBinomial: + def test_n_zero(self): + # Tests the corner case of n == 0 for the binomial distribution. + # binomial(0, p) should be zero for any p in [0, 1]. + # This test addresses issue #3480. + zeros = np.zeros(2, dtype='int') + for p in [0, .5, 1]: + assert_(random.binomial(0, p) == 0) + assert_array_equal(random.binomial(zeros, p), zeros) + + def test_p_is_nan(self): + # Issue #4571. + assert_raises(ValueError, random.binomial, 1, np.nan) + + +class TestMultinomial: + def test_basic(self): + random.multinomial(100, [0.2, 0.8]) + + def test_zero_probability(self): + random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0]) + + def test_int_negative_interval(self): + assert_(-5 <= random.randint(-5, -1) < -1) + x = random.randint(-5, -1, 5) + assert_(np.all(-5 <= x)) + assert_(np.all(x < -1)) + + def test_size(self): + # gh-3173 + p = [0.5, 0.5] + assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2)) + assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2)) + assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape, + (2, 2, 2)) + + assert_raises(TypeError, np.random.multinomial, 1, p, + float(1)) + + def test_multidimensional_pvals(self): + assert_raises(ValueError, np.random.multinomial, 10, [[0, 1]]) + assert_raises(ValueError, np.random.multinomial, 10, [[0], [1]]) + assert_raises(ValueError, np.random.multinomial, 10, [[[0], [1]], [[1], [0]]]) + assert_raises(ValueError, np.random.multinomial, 10, np.array([[0, 1], [1, 0]])) + + +class TestSetState: + def setup_method(self): + self.seed = 1234567890 + self.prng = random.RandomState(self.seed) + self.state = self.prng.get_state() + + def test_basic(self): + old = self.prng.tomaxint(16) + self.prng.set_state(self.state) + new = self.prng.tomaxint(16) + assert_(np.all(old == new)) + + def test_gaussian_reset(self): + # Make sure the cached every-other-Gaussian is reset. + old = self.prng.standard_normal(size=3) + self.prng.set_state(self.state) + new = self.prng.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_gaussian_reset_in_media_res(self): + # When the state is saved with a cached Gaussian, make sure the + # cached Gaussian is restored. + + self.prng.standard_normal() + state = self.prng.get_state() + old = self.prng.standard_normal(size=3) + self.prng.set_state(state) + new = self.prng.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_backwards_compatibility(self): + # Make sure we can accept old state tuples that do not have the + # cached Gaussian value. + old_state = self.state[:-2] + x1 = self.prng.standard_normal(size=16) + self.prng.set_state(old_state) + x2 = self.prng.standard_normal(size=16) + self.prng.set_state(self.state) + x3 = self.prng.standard_normal(size=16) + assert_(np.all(x1 == x2)) + assert_(np.all(x1 == x3)) + + def test_negative_binomial(self): + # Ensure that the negative binomial results take floating point + # arguments without truncation. + self.prng.negative_binomial(0.5, 0.5) + + def test_set_invalid_state(self): + # gh-25402 + with pytest.raises(IndexError): + self.prng.set_state(()) + + +class TestRandint: + + rfunc = np.random.randint + + # valid integer/boolean types + itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16, + np.int32, np.uint32, np.int64, np.uint64] + + def test_unsupported_type(self): + assert_raises(TypeError, self.rfunc, 1, dtype=float) + + def test_bounds_checking(self): + for dt in self.itype: + lbnd = 0 if dt is np.bool else np.iinfo(dt).min + ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 + assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt) + assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt) + assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt) + assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt) + + def test_rng_zero_and_extremes(self): + for dt in self.itype: + lbnd = 0 if dt is np.bool else np.iinfo(dt).min + ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 + + tgt = ubnd - 1 + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + tgt = lbnd + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + tgt = (lbnd + ubnd) // 2 + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + def test_full_range(self): + # Test for ticket #1690 + + for dt in self.itype: + lbnd = 0 if dt is np.bool else np.iinfo(dt).min + ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 + + try: + self.rfunc(lbnd, ubnd, dtype=dt) + except Exception as e: + raise AssertionError("No error should have been raised, " + "but one was with the following " + "message:\n\n%s" % str(e)) + + def test_in_bounds_fuzz(self): + # Don't use fixed seed + np.random.seed() + + for dt in self.itype[1:]: + for ubnd in [4, 8, 16]: + vals = self.rfunc(2, ubnd, size=2**16, dtype=dt) + assert_(vals.max() < ubnd) + assert_(vals.min() >= 2) + + vals = self.rfunc(0, 2, size=2**16, dtype=np.bool) + + assert_(vals.max() < 2) + assert_(vals.min() >= 0) + + def test_repeatability(self): + import hashlib + # We use a sha256 hash of generated sequences of 1000 samples + # in the range [0, 6) for all but bool, where the range + # is [0, 2). Hashes are for little endian numbers. + tgt = {'bool': '509aea74d792fb931784c4b0135392c65aec64beee12b0cc167548a2c3d31e71', # noqa: E501 + 'int16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', # noqa: E501 + 'int32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', # noqa: E501 + 'int64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', # noqa: E501 + 'int8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404', # noqa: E501 + 'uint16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', # noqa: E501 + 'uint32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', # noqa: E501 + 'uint64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', # noqa: E501 + 'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'} # noqa: E501 + + for dt in self.itype[1:]: + np.random.seed(1234) + + # view as little endian for hash + if sys.byteorder == 'little': + val = self.rfunc(0, 6, size=1000, dtype=dt) + else: + val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap() + + res = hashlib.sha256(val.view(np.int8)).hexdigest() + assert_(tgt[np.dtype(dt).name] == res) + + # bools do not depend on endianness + np.random.seed(1234) + val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8) + res = hashlib.sha256(val).hexdigest() + assert_(tgt[np.dtype(bool).name] == res) + + def test_int64_uint64_corner_case(self): + # When stored in Numpy arrays, `lbnd` is casted + # as np.int64, and `ubnd` is casted as np.uint64. + # Checking whether `lbnd` >= `ubnd` used to be + # done solely via direct comparison, which is incorrect + # because when Numpy tries to compare both numbers, + # it casts both to np.float64 because there is + # no integer superset of np.int64 and np.uint64. However, + # `ubnd` is too large to be represented in np.float64, + # causing it be round down to np.iinfo(np.int64).max, + # leading to a ValueError because `lbnd` now equals + # the new `ubnd`. + + dt = np.int64 + tgt = np.iinfo(np.int64).max + lbnd = np.int64(np.iinfo(np.int64).max) + ubnd = np.uint64(np.iinfo(np.int64).max + 1) + + # None of these function calls should + # generate a ValueError now. + actual = np.random.randint(lbnd, ubnd, dtype=dt) + assert_equal(actual, tgt) + + def test_respect_dtype_singleton(self): + # See gh-7203 + for dt in self.itype: + lbnd = 0 if dt is np.bool else np.iinfo(dt).min + ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 + + sample = self.rfunc(lbnd, ubnd, dtype=dt) + assert_equal(sample.dtype, np.dtype(dt)) + + for dt in (bool, int): + # The legacy rng uses "long" as the default integer: + lbnd = 0 if dt is bool else np.iinfo("long").min + ubnd = 2 if dt is bool else np.iinfo("long").max + 1 + + # gh-7284: Ensure that we get Python data types + sample = self.rfunc(lbnd, ubnd, dtype=dt) + assert_(not hasattr(sample, 'dtype')) + assert_equal(type(sample), dt) + + +class TestRandomDist: + # Make sure the random distribution returns the correct value for a + # given seed + + def setup_method(self): + self.seed = 1234567890 + + def test_rand(self): + np.random.seed(self.seed) + actual = np.random.rand(3, 2) + desired = np.array([[0.61879477158567997, 0.59162362775974664], + [0.88868358904449662, 0.89165480011560816], + [0.4575674820298663, 0.7781880808593471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_randn(self): + np.random.seed(self.seed) + actual = np.random.randn(3, 2) + desired = np.array([[1.34016345771863121, 1.73759122771936081], + [1.498988344300628, -0.2286433324536169], + [2.031033998682787, 2.17032494605655257]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_randint(self): + np.random.seed(self.seed) + actual = np.random.randint(-99, 99, size=(3, 2)) + desired = np.array([[31, 3], + [-52, 41], + [-48, -66]]) + assert_array_equal(actual, desired) + + def test_random_integers(self): + np.random.seed(self.seed) + with suppress_warnings() as sup: + w = sup.record(DeprecationWarning) + actual = np.random.random_integers(-99, 99, size=(3, 2)) + assert_(len(w) == 1) + desired = np.array([[31, 3], + [-52, 41], + [-48, -66]]) + assert_array_equal(actual, desired) + + def test_random_integers_max_int(self): + # Tests whether random_integers can generate the + # maximum allowed Python int that can be converted + # into a C long. Previous implementations of this + # method have thrown an OverflowError when attempting + # to generate this integer. + with suppress_warnings() as sup: + w = sup.record(DeprecationWarning) + actual = np.random.random_integers(np.iinfo('l').max, + np.iinfo('l').max) + assert_(len(w) == 1) + + desired = np.iinfo('l').max + assert_equal(actual, desired) + + def test_random_integers_deprecated(self): + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + + # DeprecationWarning raised with high == None + assert_raises(DeprecationWarning, + np.random.random_integers, + np.iinfo('l').max) + + # DeprecationWarning raised with high != None + assert_raises(DeprecationWarning, + np.random.random_integers, + np.iinfo('l').max, np.iinfo('l').max) + + def test_random(self): + np.random.seed(self.seed) + actual = np.random.random((3, 2)) + desired = np.array([[0.61879477158567997, 0.59162362775974664], + [0.88868358904449662, 0.89165480011560816], + [0.4575674820298663, 0.7781880808593471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_choice_uniform_replace(self): + np.random.seed(self.seed) + actual = np.random.choice(4, 4) + desired = np.array([2, 3, 2, 3]) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_replace(self): + np.random.seed(self.seed) + actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) + desired = np.array([1, 1, 2, 2]) + assert_array_equal(actual, desired) + + def test_choice_uniform_noreplace(self): + np.random.seed(self.seed) + actual = np.random.choice(4, 3, replace=False) + desired = np.array([0, 1, 3]) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_noreplace(self): + np.random.seed(self.seed) + actual = np.random.choice(4, 3, replace=False, + p=[0.1, 0.3, 0.5, 0.1]) + desired = np.array([2, 3, 1]) + assert_array_equal(actual, desired) + + def test_choice_noninteger(self): + np.random.seed(self.seed) + actual = np.random.choice(['a', 'b', 'c', 'd'], 4) + desired = np.array(['c', 'd', 'c', 'd']) + assert_array_equal(actual, desired) + + def test_choice_exceptions(self): + sample = np.random.choice + assert_raises(ValueError, sample, -1, 3) + assert_raises(ValueError, sample, 3., 3) + assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3) + assert_raises(ValueError, sample, [], 3) + assert_raises(ValueError, sample, [1, 2, 3, 4], 3, + p=[[0.25, 0.25], [0.25, 0.25]]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2]) + assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4]) + assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False) + # gh-13087 + assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], 2, + replace=False, p=[1, 0, 0]) + + def test_choice_return_shape(self): + p = [0.1, 0.9] + # Check scalar + assert_(np.isscalar(np.random.choice(2, replace=True))) + assert_(np.isscalar(np.random.choice(2, replace=False))) + assert_(np.isscalar(np.random.choice(2, replace=True, p=p))) + assert_(np.isscalar(np.random.choice(2, replace=False, p=p))) + assert_(np.isscalar(np.random.choice([1, 2], replace=True))) + assert_(np.random.choice([None], replace=True) is None) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(np.random.choice(arr, replace=True) is a) + + # Check 0-d array + s = () + assert_(not np.isscalar(np.random.choice(2, s, replace=True))) + assert_(not np.isscalar(np.random.choice(2, s, replace=False))) + assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p))) + assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p))) + assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True))) + assert_(np.random.choice([None], s, replace=True).ndim == 0) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(np.random.choice(arr, s, replace=True).item() is a) + + # Check multi dimensional array + s = (2, 3) + p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2] + assert_equal(np.random.choice(6, s, replace=True).shape, s) + assert_equal(np.random.choice(6, s, replace=False).shape, s) + assert_equal(np.random.choice(6, s, replace=True, p=p).shape, s) + assert_equal(np.random.choice(6, s, replace=False, p=p).shape, s) + assert_equal(np.random.choice(np.arange(6), s, replace=True).shape, s) + + # Check zero-size + assert_equal(np.random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4)) + assert_equal(np.random.randint(0, -10, size=0).shape, (0,)) + assert_equal(np.random.randint(10, 10, size=0).shape, (0,)) + assert_equal(np.random.choice(0, size=0).shape, (0,)) + assert_equal(np.random.choice([], size=(0,)).shape, (0,)) + assert_equal(np.random.choice(['a', 'b'], size=(3, 0, 4)).shape, + (3, 0, 4)) + assert_raises(ValueError, np.random.choice, [], 10) + + def test_choice_nan_probabilities(self): + a = np.array([42, 1, 2]) + p = [None, None, None] + assert_raises(ValueError, np.random.choice, a, p=p) + + def test_bytes(self): + np.random.seed(self.seed) + actual = np.random.bytes(10) + desired = b'\x82Ui\x9e\xff\x97+Wf\xa5' + assert_equal(actual, desired) + + def test_shuffle(self): + # Test lists, arrays (of various dtypes), and multidimensional versions + # of both, c-contiguous or not: + for conv in [lambda x: np.array([]), + lambda x: x, + lambda x: np.asarray(x).astype(np.int8), + lambda x: np.asarray(x).astype(np.float32), + lambda x: np.asarray(x).astype(np.complex64), + lambda x: np.asarray(x).astype(object), + lambda x: [(i, i) for i in x], + lambda x: np.asarray([[i, i] for i in x]), + lambda x: np.vstack([x, x]).T, + # gh-11442 + lambda x: (np.asarray([(i, i) for i in x], + [("a", int), ("b", int)]) + .view(np.recarray)), + # gh-4270 + lambda x: np.asarray([(i, i) for i in x], + [("a", object), ("b", np.int32)])]: + np.random.seed(self.seed) + alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) + np.random.shuffle(alist) + actual = alist + desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) + assert_array_equal(actual, desired) + + def test_shuffle_masked(self): + # gh-3263 + a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1) + b = np.ma.masked_values(np.arange(20) % 3 - 1, -1) + a_orig = a.copy() + b_orig = b.copy() + for i in range(50): + np.random.shuffle(a) + assert_equal( + sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask])) + np.random.shuffle(b) + assert_equal( + sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask])) + + @pytest.mark.parametrize("random", + [np.random, np.random.RandomState(), np.random.default_rng()]) + def test_shuffle_untyped_warning(self, random): + # Create a dict works like a sequence but isn't one + values = {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6} + with pytest.warns(UserWarning, + match="you are shuffling a 'dict' object") as rec: + random.shuffle(values) + assert "test_random" in rec[0].filename + + @pytest.mark.parametrize("random", + [np.random, np.random.RandomState(), np.random.default_rng()]) + @pytest.mark.parametrize("use_array_like", [True, False]) + def test_shuffle_no_object_unpacking(self, random, use_array_like): + class MyArr(np.ndarray): + pass + + items = [ + None, np.array([3]), np.float64(3), np.array(10), np.float64(7) + ] + arr = np.array(items, dtype=object) + item_ids = {id(i) for i in items} + if use_array_like: + arr = arr.view(MyArr) + + # The array was created fine, and did not modify any objects: + assert all(id(i) in item_ids for i in arr) + + if use_array_like and not isinstance(random, np.random.Generator): + # The old API gives incorrect results, but warns about it. + with pytest.warns(UserWarning, + match="Shuffling a one dimensional array.*"): + random.shuffle(arr) + else: + random.shuffle(arr) + assert all(id(i) in item_ids for i in arr) + + def test_shuffle_memoryview(self): + # gh-18273 + # allow graceful handling of memoryviews + # (treat the same as arrays) + np.random.seed(self.seed) + a = np.arange(5).data + np.random.shuffle(a) + assert_equal(np.asarray(a), [0, 1, 4, 3, 2]) + rng = np.random.RandomState(self.seed) + rng.shuffle(a) + assert_equal(np.asarray(a), [0, 1, 2, 3, 4]) + rng = np.random.default_rng(self.seed) + rng.shuffle(a) + assert_equal(np.asarray(a), [4, 1, 0, 3, 2]) + + def test_shuffle_not_writeable(self): + a = np.zeros(3) + a.flags.writeable = False + with pytest.raises(ValueError, match='read-only'): + np.random.shuffle(a) + + def test_beta(self): + np.random.seed(self.seed) + actual = np.random.beta(.1, .9, size=(3, 2)) + desired = np.array( + [[1.45341850513746058e-02, 5.31297615662868145e-04], + [1.85366619058432324e-06, 4.19214516800110563e-03], + [1.58405155108498093e-04, 1.26252891949397652e-04]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_binomial(self): + np.random.seed(self.seed) + actual = np.random.binomial(100, .456, size=(3, 2)) + desired = np.array([[37, 43], + [42, 48], + [46, 45]]) + assert_array_equal(actual, desired) + + def test_chisquare(self): + np.random.seed(self.seed) + actual = np.random.chisquare(50, size=(3, 2)) + desired = np.array([[63.87858175501090585, 68.68407748911370447], + [65.77116116901505904, 47.09686762438974483], + [72.3828403199695174, 74.18408615260374006]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_dirichlet(self): + np.random.seed(self.seed) + alpha = np.array([51.72840233779265162, 39.74494232180943953]) + actual = np.random.mtrand.dirichlet(alpha, size=(3, 2)) + desired = np.array([[[0.54539444573611562, 0.45460555426388438], + [0.62345816822039413, 0.37654183177960598]], + [[0.55206000085785778, 0.44793999914214233], + [0.58964023305154301, 0.41035976694845688]], + [[0.59266909280647828, 0.40733090719352177], + [0.56974431743975207, 0.43025568256024799]]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_dirichlet_size(self): + # gh-3173 + p = np.array([51.72840233779265162, 39.74494232180943953]) + assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2)) + assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2)) + assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2)) + + assert_raises(TypeError, np.random.dirichlet, p, float(1)) + + def test_dirichlet_bad_alpha(self): + # gh-2089 + alpha = np.array([5.4e-01, -1.0e-16]) + assert_raises(ValueError, np.random.mtrand.dirichlet, alpha) + + # gh-15876 + assert_raises(ValueError, random.dirichlet, [[5, 1]]) + assert_raises(ValueError, random.dirichlet, [[5], [1]]) + assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]]) + assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]])) + + def test_exponential(self): + np.random.seed(self.seed) + actual = np.random.exponential(1.1234, size=(3, 2)) + desired = np.array([[1.08342649775011624, 1.00607889924557314], + [2.46628830085216721, 2.49668106809923884], + [0.68717433461363442, 1.69175666993575979]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_exponential_0(self): + assert_equal(np.random.exponential(scale=0), 0) + assert_raises(ValueError, np.random.exponential, scale=-0.) + + def test_f(self): + np.random.seed(self.seed) + actual = np.random.f(12, 77, size=(3, 2)) + desired = np.array([[1.21975394418575878, 1.75135759791559775], + [1.44803115017146489, 1.22108959480396262], + [1.02176975757740629, 1.34431827623300415]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gamma(self): + np.random.seed(self.seed) + actual = np.random.gamma(5, 3, size=(3, 2)) + desired = np.array([[24.60509188649287182, 28.54993563207210627], + [26.13476110204064184, 12.56988482927716078], + [31.71863275789960568, 33.30143302795922011]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_gamma_0(self): + assert_equal(np.random.gamma(shape=0, scale=0), 0) + assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.) + + def test_geometric(self): + np.random.seed(self.seed) + actual = np.random.geometric(.123456789, size=(3, 2)) + desired = np.array([[8, 7], + [17, 17], + [5, 12]]) + assert_array_equal(actual, desired) + + def test_gumbel(self): + np.random.seed(self.seed) + actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[0.19591898743416816, 0.34405539668096674], + [-1.4492522252274278, -1.47374816298446865], + [1.10651090478803416, -0.69535848626236174]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gumbel_0(self): + assert_equal(np.random.gumbel(scale=0), 0) + assert_raises(ValueError, np.random.gumbel, scale=-0.) + + def test_hypergeometric(self): + np.random.seed(self.seed) + actual = np.random.hypergeometric(10, 5, 14, size=(3, 2)) + desired = np.array([[10, 10], + [10, 10], + [9, 9]]) + assert_array_equal(actual, desired) + + # Test nbad = 0 + actual = np.random.hypergeometric(5, 0, 3, size=4) + desired = np.array([3, 3, 3, 3]) + assert_array_equal(actual, desired) + + actual = np.random.hypergeometric(15, 0, 12, size=4) + desired = np.array([12, 12, 12, 12]) + assert_array_equal(actual, desired) + + # Test ngood = 0 + actual = np.random.hypergeometric(0, 5, 3, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + actual = np.random.hypergeometric(0, 15, 12, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + def test_laplace(self): + np.random.seed(self.seed) + actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[0.66599721112760157, 0.52829452552221945], + [3.12791959514407125, 3.18202813572992005], + [-0.05391065675859356, 1.74901336242837324]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_laplace_0(self): + assert_equal(np.random.laplace(scale=0), 0) + assert_raises(ValueError, np.random.laplace, scale=-0.) + + def test_logistic(self): + np.random.seed(self.seed) + actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[1.09232835305011444, 0.8648196662399954], + [4.27818590694950185, 4.33897006346929714], + [-0.21682183359214885, 2.63373365386060332]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_lognormal(self): + np.random.seed(self.seed) + actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) + desired = np.array([[16.50698631688883822, 36.54846706092654784], + [22.67886599981281748, 0.71617561058995771], + [65.72798501792723869, 86.84341601437161273]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_lognormal_0(self): + assert_equal(np.random.lognormal(sigma=0), 1) + assert_raises(ValueError, np.random.lognormal, sigma=-0.) + + def test_logseries(self): + np.random.seed(self.seed) + actual = np.random.logseries(p=.923456789, size=(3, 2)) + desired = np.array([[2, 2], + [6, 17], + [3, 6]]) + assert_array_equal(actual, desired) + + def test_multinomial(self): + np.random.seed(self.seed) + actual = np.random.multinomial(20, [1 / 6.] * 6, size=(3, 2)) + desired = np.array([[[4, 3, 5, 4, 2, 2], + [5, 2, 8, 2, 2, 1]], + [[3, 4, 3, 6, 0, 4], + [2, 1, 4, 3, 6, 4]], + [[4, 4, 2, 5, 2, 3], + [4, 3, 4, 2, 3, 4]]]) + assert_array_equal(actual, desired) + + def test_multivariate_normal(self): + np.random.seed(self.seed) + mean = (.123456789, 10) + cov = [[1, 0], [0, 1]] + size = (3, 2) + actual = np.random.multivariate_normal(mean, cov, size) + desired = np.array([[[1.463620246718631, 11.73759122771936], + [1.622445133300628, 9.771356667546383]], + [[2.154490787682787, 12.170324946056553], + [1.719909438201865, 9.230548443648306]], + [[0.689515026297799, 9.880729819607714], + [-0.023054015651998, 9.201096623542879]]]) + + assert_array_almost_equal(actual, desired, decimal=15) + + # Check for default size, was raising deprecation warning + actual = np.random.multivariate_normal(mean, cov) + desired = np.array([0.895289569463708, 9.17180864067987]) + assert_array_almost_equal(actual, desired, decimal=15) + + # Check that non positive-semidefinite covariance warns with + # RuntimeWarning + mean = [0, 0] + cov = [[1, 2], [2, 1]] + assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov) + + # and that it doesn't warn with RuntimeWarning check_valid='ignore' + assert_no_warnings(np.random.multivariate_normal, mean, cov, + check_valid='ignore') + + # and that it raises with RuntimeWarning check_valid='raises' + assert_raises(ValueError, np.random.multivariate_normal, mean, cov, + check_valid='raise') + + cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) + with suppress_warnings() as sup: + np.random.multivariate_normal(mean, cov) + w = sup.record(RuntimeWarning) + assert len(w) == 0 + + def test_negative_binomial(self): + np.random.seed(self.seed) + actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2)) + desired = np.array([[848, 841], + [892, 611], + [779, 647]]) + assert_array_equal(actual, desired) + + def test_noncentral_chisquare(self): + np.random.seed(self.seed) + actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) + desired = np.array([[23.91905354498517511, 13.35324692733826346], + [31.22452661329736401, 16.60047399466177254], + [5.03461598262724586, 17.94973089023519464]]) + assert_array_almost_equal(actual, desired, decimal=14) + + actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) + desired = np.array([[1.47145377828516666, 0.15052899268012659], + [0.00943803056963588, 1.02647251615666169], + [0.332334982684171, 0.15451287602753125]]) + assert_array_almost_equal(actual, desired, decimal=14) + + np.random.seed(self.seed) + actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) + desired = np.array([[9.597154162763948, 11.725484450296079], + [10.413711048138335, 3.694475922923986], + [13.484222138963087, 14.377255424602957]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_noncentral_f(self): + np.random.seed(self.seed) + actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1, + size=(3, 2)) + desired = np.array([[1.40598099674926669, 0.34207973179285761], + [3.57715069265772545, 7.92632662577829805], + [0.43741599463544162, 1.1774208752428319]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_normal(self): + np.random.seed(self.seed) + actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[2.80378370443726244, 3.59863924443872163], + [3.121433477601256, -0.33382987590723379], + [4.18552478636557357, 4.46410668111310471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_normal_0(self): + assert_equal(np.random.normal(scale=0), 0) + assert_raises(ValueError, np.random.normal, scale=-0.) + + def test_pareto(self): + np.random.seed(self.seed) + actual = np.random.pareto(a=.123456789, size=(3, 2)) + desired = np.array( + [[2.46852460439034849e+03, 1.41286880810518346e+03], + [5.28287797029485181e+07, 6.57720981047328785e+07], + [1.40840323350391515e+02, 1.98390255135251704e+05]]) + # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this + # matrix differs by 24 nulps. Discussion: + # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html + # Consensus is that this is probably some gcc quirk that affects + # rounding but not in any important way, so we just use a looser + # tolerance on this test: + np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) + + def test_poisson(self): + np.random.seed(self.seed) + actual = np.random.poisson(lam=.123456789, size=(3, 2)) + desired = np.array([[0, 0], + [1, 0], + [0, 0]]) + assert_array_equal(actual, desired) + + def test_poisson_exceptions(self): + lambig = np.iinfo('l').max + lamneg = -1 + assert_raises(ValueError, np.random.poisson, lamneg) + assert_raises(ValueError, np.random.poisson, [lamneg] * 10) + assert_raises(ValueError, np.random.poisson, lambig) + assert_raises(ValueError, np.random.poisson, [lambig] * 10) + + def test_power(self): + np.random.seed(self.seed) + actual = np.random.power(a=.123456789, size=(3, 2)) + desired = np.array([[0.02048932883240791, 0.01424192241128213], + [0.38446073748535298, 0.39499689943484395], + [0.00177699707563439, 0.13115505880863756]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_rayleigh(self): + np.random.seed(self.seed) + actual = np.random.rayleigh(scale=10, size=(3, 2)) + desired = np.array([[13.8882496494248393, 13.383318339044731], + [20.95413364294492098, 21.08285015800712614], + [11.06066537006854311, 17.35468505778271009]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_rayleigh_0(self): + assert_equal(np.random.rayleigh(scale=0), 0) + assert_raises(ValueError, np.random.rayleigh, scale=-0.) + + def test_standard_cauchy(self): + np.random.seed(self.seed) + actual = np.random.standard_cauchy(size=(3, 2)) + desired = np.array([[0.77127660196445336, -6.55601161955910605], + [0.93582023391158309, -2.07479293013759447], + [-4.74601644297011926, 0.18338989290760804]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_exponential(self): + np.random.seed(self.seed) + actual = np.random.standard_exponential(size=(3, 2)) + desired = np.array([[0.96441739162374596, 0.89556604882105506], + [2.1953785836319808, 2.22243285392490542], + [0.6116915921431676, 1.50592546727413201]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_gamma(self): + np.random.seed(self.seed) + actual = np.random.standard_gamma(shape=3, size=(3, 2)) + desired = np.array([[5.50841531318455058, 6.62953470301903103], + [5.93988484943779227, 2.31044849402133989], + [7.54838614231317084, 8.012756093271868]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_standard_gamma_0(self): + assert_equal(np.random.standard_gamma(shape=0), 0) + assert_raises(ValueError, np.random.standard_gamma, shape=-0.) + + def test_standard_normal(self): + np.random.seed(self.seed) + actual = np.random.standard_normal(size=(3, 2)) + desired = np.array([[1.34016345771863121, 1.73759122771936081], + [1.498988344300628, -0.2286433324536169], + [2.031033998682787, 2.17032494605655257]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_t(self): + np.random.seed(self.seed) + actual = np.random.standard_t(df=10, size=(3, 2)) + desired = np.array([[0.97140611862659965, -0.08830486548450577], + [1.36311143689505321, -0.55317463909867071], + [-0.18473749069684214, 0.61181537341755321]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_triangular(self): + np.random.seed(self.seed) + actual = np.random.triangular(left=5.12, mode=10.23, right=20.34, + size=(3, 2)) + desired = np.array([[12.68117178949215784, 12.4129206149193152], + [16.20131377335158263, 16.25692138747600524], + [11.20400690911820263, 14.4978144835829923]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_uniform(self): + np.random.seed(self.seed) + actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2)) + desired = np.array([[6.99097932346268003, 6.73801597444323974], + [9.50364421400426274, 9.53130618907631089], + [5.48995325769805476, 8.47493103280052118]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_uniform_range_bounds(self): + fmin = np.finfo('float').min + fmax = np.finfo('float').max + + func = np.random.uniform + assert_raises(OverflowError, func, -np.inf, 0) + assert_raises(OverflowError, func, 0, np.inf) + assert_raises(OverflowError, func, fmin, fmax) + assert_raises(OverflowError, func, [-np.inf], [0]) + assert_raises(OverflowError, func, [0], [np.inf]) + + # (fmax / 1e17) - fmin is within range, so this should not throw + # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX > + # DBL_MAX by increasing fmin a bit + np.random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17) + + def test_scalar_exception_propagation(self): + # Tests that exceptions are correctly propagated in distributions + # when called with objects that throw exceptions when converted to + # scalars. + # + # Regression test for gh: 8865 + + class ThrowingFloat(np.ndarray): + def __float__(self): + raise TypeError + + throwing_float = np.array(1.0).view(ThrowingFloat) + assert_raises(TypeError, np.random.uniform, throwing_float, + throwing_float) + + class ThrowingInteger(np.ndarray): + def __int__(self): + raise TypeError + + __index__ = __int__ + + throwing_int = np.array(1).view(ThrowingInteger) + assert_raises(TypeError, np.random.hypergeometric, throwing_int, 1, 1) + + def test_vonmises(self): + np.random.seed(self.seed) + actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) + desired = np.array([[2.28567572673902042, 2.89163838442285037], + [0.38198375564286025, 2.57638023113890746], + [1.19153771588353052, 1.83509849681825354]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_vonmises_small(self): + # check infinite loop, gh-4720 + np.random.seed(self.seed) + r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6) + np.testing.assert_(np.isfinite(r).all()) + + def test_wald(self): + np.random.seed(self.seed) + actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2)) + desired = np.array([[3.82935265715889983, 5.13125249184285526], + [0.35045403618358717, 1.50832396872003538], + [0.24124319895843183, 0.22031101461955038]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_weibull(self): + np.random.seed(self.seed) + actual = np.random.weibull(a=1.23, size=(3, 2)) + desired = np.array([[0.97097342648766727, 0.91422896443565516], + [1.89517770034962929, 1.91414357960479564], + [0.67057783752390987, 1.39494046635066793]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_weibull_0(self): + np.random.seed(self.seed) + assert_equal(np.random.weibull(a=0, size=12), np.zeros(12)) + assert_raises(ValueError, np.random.weibull, a=-0.) + + def test_zipf(self): + np.random.seed(self.seed) + actual = np.random.zipf(a=1.23, size=(3, 2)) + desired = np.array([[66, 29], + [1, 1], + [3, 13]]) + assert_array_equal(actual, desired) + + +class TestBroadcast: + # tests that functions that broadcast behave + # correctly when presented with non-scalar arguments + def setup_method(self): + self.seed = 123456789 + + def setSeed(self): + np.random.seed(self.seed) + + # TODO: Include test for randint once it can broadcast + # Can steal the test written in PR #6938 + + def test_uniform(self): + low = [0] + high = [1] + uniform = np.random.uniform + desired = np.array([0.53283302478975902, + 0.53413660089041659, + 0.50955303552646702]) + + self.setSeed() + actual = uniform(low * 3, high) + assert_array_almost_equal(actual, desired, decimal=14) + + self.setSeed() + actual = uniform(low, high * 3) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_normal(self): + loc = [0] + scale = [1] + bad_scale = [-1] + normal = np.random.normal + desired = np.array([2.2129019979039612, + 2.1283977976520019, + 1.8417114045748335]) + + self.setSeed() + actual = normal(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, normal, loc * 3, bad_scale) + + self.setSeed() + actual = normal(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, normal, loc, bad_scale * 3) + + def test_beta(self): + a = [1] + b = [2] + bad_a = [-1] + bad_b = [-2] + beta = np.random.beta + desired = np.array([0.19843558305989056, + 0.075230336409423643, + 0.24976865978980844]) + + self.setSeed() + actual = beta(a * 3, b) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, beta, bad_a * 3, b) + assert_raises(ValueError, beta, a * 3, bad_b) + + self.setSeed() + actual = beta(a, b * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, beta, bad_a, b * 3) + assert_raises(ValueError, beta, a, bad_b * 3) + + def test_exponential(self): + scale = [1] + bad_scale = [-1] + exponential = np.random.exponential + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.setSeed() + actual = exponential(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, exponential, bad_scale * 3) + + def test_standard_gamma(self): + shape = [1] + bad_shape = [-1] + std_gamma = np.random.standard_gamma + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.setSeed() + actual = std_gamma(shape * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, std_gamma, bad_shape * 3) + + def test_gamma(self): + shape = [1] + scale = [2] + bad_shape = [-1] + bad_scale = [-2] + gamma = np.random.gamma + desired = np.array([1.5221370731769048, + 1.5277256455738331, + 1.4248762625178359]) + + self.setSeed() + actual = gamma(shape * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape * 3, scale) + assert_raises(ValueError, gamma, shape * 3, bad_scale) + + self.setSeed() + actual = gamma(shape, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape, scale * 3) + assert_raises(ValueError, gamma, shape, bad_scale * 3) + + def test_f(self): + dfnum = [1] + dfden = [2] + bad_dfnum = [-1] + bad_dfden = [-2] + f = np.random.f + desired = np.array([0.80038951638264799, + 0.86768719635363512, + 2.7251095168386801]) + + self.setSeed() + actual = f(dfnum * 3, dfden) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum * 3, dfden) + assert_raises(ValueError, f, dfnum * 3, bad_dfden) + + self.setSeed() + actual = f(dfnum, dfden * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum, dfden * 3) + assert_raises(ValueError, f, dfnum, bad_dfden * 3) + + def test_noncentral_f(self): + dfnum = [2] + dfden = [3] + nonc = [4] + bad_dfnum = [0] + bad_dfden = [-1] + bad_nonc = [-2] + nonc_f = np.random.noncentral_f + desired = np.array([9.1393943263705211, + 13.025456344595602, + 8.8018098359100545]) + + self.setSeed() + actual = nonc_f(dfnum * 3, dfden, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) + + self.setSeed() + actual = nonc_f(dfnum, dfden * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) + + self.setSeed() + actual = nonc_f(dfnum, dfden, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) + + def test_noncentral_f_small_df(self): + self.setSeed() + desired = np.array([6.869638627492048, 0.785880199263955]) + actual = np.random.noncentral_f(0.9, 0.9, 2, size=2) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_chisquare(self): + df = [1] + bad_df = [-1] + chisquare = np.random.chisquare + desired = np.array([0.57022801133088286, + 0.51947702108840776, + 0.1320969254923558]) + + self.setSeed() + actual = chisquare(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, chisquare, bad_df * 3) + + def test_noncentral_chisquare(self): + df = [1] + nonc = [2] + bad_df = [-1] + bad_nonc = [-2] + nonc_chi = np.random.noncentral_chisquare + desired = np.array([9.0015599467913763, + 4.5804135049718742, + 6.0872302432834564]) + + self.setSeed() + actual = nonc_chi(df * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) + assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) + + self.setSeed() + actual = nonc_chi(df, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) + assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) + + def test_standard_t(self): + df = [1] + bad_df = [-1] + t = np.random.standard_t + desired = np.array([3.0702872575217643, + 5.8560725167361607, + 1.0274791436474273]) + + self.setSeed() + actual = t(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, t, bad_df * 3) + + def test_vonmises(self): + mu = [2] + kappa = [1] + bad_kappa = [-1] + vonmises = np.random.vonmises + desired = np.array([2.9883443664201312, + -2.7064099483995943, + -1.8672476700665914]) + + self.setSeed() + actual = vonmises(mu * 3, kappa) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, vonmises, mu * 3, bad_kappa) + + self.setSeed() + actual = vonmises(mu, kappa * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, vonmises, mu, bad_kappa * 3) + + def test_pareto(self): + a = [1] + bad_a = [-1] + pareto = np.random.pareto + desired = np.array([1.1405622680198362, + 1.1465519762044529, + 1.0389564467453547]) + + self.setSeed() + actual = pareto(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, pareto, bad_a * 3) + + def test_weibull(self): + a = [1] + bad_a = [-1] + weibull = np.random.weibull + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.setSeed() + actual = weibull(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, weibull, bad_a * 3) + + def test_power(self): + a = [1] + bad_a = [-1] + power = np.random.power + desired = np.array([0.53283302478975902, + 0.53413660089041659, + 0.50955303552646702]) + + self.setSeed() + actual = power(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, power, bad_a * 3) + + def test_laplace(self): + loc = [0] + scale = [1] + bad_scale = [-1] + laplace = np.random.laplace + desired = np.array([0.067921356028507157, + 0.070715642226971326, + 0.019290950698972624]) + + self.setSeed() + actual = laplace(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc * 3, bad_scale) + + self.setSeed() + actual = laplace(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc, bad_scale * 3) + + def test_gumbel(self): + loc = [0] + scale = [1] + bad_scale = [-1] + gumbel = np.random.gumbel + desired = np.array([0.2730318639556768, + 0.26936705726291116, + 0.33906220393037939]) + + self.setSeed() + actual = gumbel(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc * 3, bad_scale) + + self.setSeed() + actual = gumbel(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc, bad_scale * 3) + + def test_logistic(self): + loc = [0] + scale = [1] + bad_scale = [-1] + logistic = np.random.logistic + desired = np.array([0.13152135837586171, + 0.13675915696285773, + 0.038216792802833396]) + + self.setSeed() + actual = logistic(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, logistic, loc * 3, bad_scale) + + self.setSeed() + actual = logistic(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, logistic, loc, bad_scale * 3) + + def test_lognormal(self): + mean = [0] + sigma = [1] + bad_sigma = [-1] + lognormal = np.random.lognormal + desired = np.array([9.1422086044848427, + 8.4013952870126261, + 6.3073234116578671]) + + self.setSeed() + actual = lognormal(mean * 3, sigma) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, lognormal, mean * 3, bad_sigma) + + self.setSeed() + actual = lognormal(mean, sigma * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, lognormal, mean, bad_sigma * 3) + + def test_rayleigh(self): + scale = [1] + bad_scale = [-1] + rayleigh = np.random.rayleigh + desired = np.array([1.2337491937897689, + 1.2360119924878694, + 1.1936818095781789]) + + self.setSeed() + actual = rayleigh(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rayleigh, bad_scale * 3) + + def test_wald(self): + mean = [0.5] + scale = [1] + bad_mean = [0] + bad_scale = [-2] + wald = np.random.wald + desired = np.array([0.11873681120271318, + 0.12450084820795027, + 0.9096122728408238]) + + self.setSeed() + actual = wald(mean * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, wald, bad_mean * 3, scale) + assert_raises(ValueError, wald, mean * 3, bad_scale) + + self.setSeed() + actual = wald(mean, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, wald, bad_mean, scale * 3) + assert_raises(ValueError, wald, mean, bad_scale * 3) + assert_raises(ValueError, wald, 0.0, 1) + assert_raises(ValueError, wald, 0.5, 0.0) + + def test_triangular(self): + left = [1] + right = [3] + mode = [2] + bad_left_one = [3] + bad_mode_one = [4] + bad_left_two, bad_mode_two = right * 2 + triangular = np.random.triangular + desired = np.array([2.03339048710429, + 2.0347400359389356, + 2.0095991069536208]) + + self.setSeed() + actual = triangular(left * 3, mode, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) + assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) + assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, + right) + + self.setSeed() + actual = triangular(left, mode * 3, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) + assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, + right) + + self.setSeed() + actual = triangular(left, mode, right * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) + assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, + right * 3) + + def test_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + binom = np.random.binomial + desired = np.array([1, 1, 1]) + + self.setSeed() + actual = binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n * 3, p) + assert_raises(ValueError, binom, n * 3, bad_p_one) + assert_raises(ValueError, binom, n * 3, bad_p_two) + + self.setSeed() + actual = binom(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n, p * 3) + assert_raises(ValueError, binom, n, bad_p_one * 3) + assert_raises(ValueError, binom, n, bad_p_two * 3) + + def test_negative_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + neg_binom = np.random.negative_binomial + desired = np.array([1, 0, 1]) + + self.setSeed() + actual = neg_binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n * 3, p) + assert_raises(ValueError, neg_binom, n * 3, bad_p_one) + assert_raises(ValueError, neg_binom, n * 3, bad_p_two) + + self.setSeed() + actual = neg_binom(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n, p * 3) + assert_raises(ValueError, neg_binom, n, bad_p_one * 3) + assert_raises(ValueError, neg_binom, n, bad_p_two * 3) + + def test_poisson(self): + max_lam = np.random.RandomState()._poisson_lam_max + + lam = [1] + bad_lam_one = [-1] + bad_lam_two = [max_lam * 2] + poisson = np.random.poisson + desired = np.array([1, 1, 0]) + + self.setSeed() + actual = poisson(lam * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, poisson, bad_lam_one * 3) + assert_raises(ValueError, poisson, bad_lam_two * 3) + + def test_zipf(self): + a = [2] + bad_a = [0] + zipf = np.random.zipf + desired = np.array([2, 2, 1]) + + self.setSeed() + actual = zipf(a * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, zipf, bad_a * 3) + with np.errstate(invalid='ignore'): + assert_raises(ValueError, zipf, np.nan) + assert_raises(ValueError, zipf, [0, 0, np.nan]) + + def test_geometric(self): + p = [0.5] + bad_p_one = [-1] + bad_p_two = [1.5] + geom = np.random.geometric + desired = np.array([2, 2, 2]) + + self.setSeed() + actual = geom(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, geom, bad_p_one * 3) + assert_raises(ValueError, geom, bad_p_two * 3) + + def test_hypergeometric(self): + ngood = [1] + nbad = [2] + nsample = [2] + bad_ngood = [-1] + bad_nbad = [-2] + bad_nsample_one = [0] + bad_nsample_two = [4] + hypergeom = np.random.hypergeometric + desired = np.array([1, 1, 1]) + + self.setSeed() + actual = hypergeom(ngood * 3, nbad, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample) + assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample) + assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one) + assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two) + + self.setSeed() + actual = hypergeom(ngood, nbad * 3, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample) + assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample) + assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one) + assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two) + + self.setSeed() + actual = hypergeom(ngood, nbad, nsample * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) + + def test_logseries(self): + p = [0.5] + bad_p_one = [2] + bad_p_two = [-1] + logseries = np.random.logseries + desired = np.array([1, 1, 1]) + + self.setSeed() + actual = logseries(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, logseries, bad_p_one * 3) + assert_raises(ValueError, logseries, bad_p_two * 3) + + +@pytest.mark.skipif(IS_WASM, reason="can't start thread") +class TestThread: + # make sure each state produces the same sequence even in threads + def setup_method(self): + self.seeds = range(4) + + def check_function(self, function, sz): + from threading import Thread + + out1 = np.empty((len(self.seeds),) + sz) + out2 = np.empty((len(self.seeds),) + sz) + + # threaded generation + t = [Thread(target=function, args=(np.random.RandomState(s), o)) + for s, o in zip(self.seeds, out1)] + [x.start() for x in t] + [x.join() for x in t] + + # the same serial + for s, o in zip(self.seeds, out2): + function(np.random.RandomState(s), o) + + # these platforms change x87 fpu precision mode in threads + if np.intp().dtype.itemsize == 4 and sys.platform == "win32": + assert_array_almost_equal(out1, out2) + else: + assert_array_equal(out1, out2) + + def test_normal(self): + def gen_random(state, out): + out[...] = state.normal(size=10000) + self.check_function(gen_random, sz=(10000,)) + + def test_exp(self): + def gen_random(state, out): + out[...] = state.exponential(scale=np.ones((100, 1000))) + self.check_function(gen_random, sz=(100, 1000)) + + def test_multinomial(self): + def gen_random(state, out): + out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000) + self.check_function(gen_random, sz=(10000, 6)) + + +# See Issue #4263 +class TestSingleEltArrayInput: + def setup_method(self): + self.argOne = np.array([2]) + self.argTwo = np.array([3]) + self.argThree = np.array([4]) + self.tgtShape = (1,) + + def test_one_arg_funcs(self): + funcs = (np.random.exponential, np.random.standard_gamma, + np.random.chisquare, np.random.standard_t, + np.random.pareto, np.random.weibull, + np.random.power, np.random.rayleigh, + np.random.poisson, np.random.zipf, + np.random.geometric, np.random.logseries) + + probfuncs = (np.random.geometric, np.random.logseries) + + for func in funcs: + if func in probfuncs: # p < 1.0 + out = func(np.array([0.5])) + + else: + out = func(self.argOne) + + assert_equal(out.shape, self.tgtShape) + + def test_two_arg_funcs(self): + funcs = (np.random.uniform, np.random.normal, + np.random.beta, np.random.gamma, + np.random.f, np.random.noncentral_chisquare, + np.random.vonmises, np.random.laplace, + np.random.gumbel, np.random.logistic, + np.random.lognormal, np.random.wald, + np.random.binomial, np.random.negative_binomial) + + probfuncs = (np.random.binomial, np.random.negative_binomial) + + for func in funcs: + if func in probfuncs: # p <= 1 + argTwo = np.array([0.5]) + + else: + argTwo = self.argTwo + + out = func(self.argOne, argTwo) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne[0], argTwo) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne, argTwo[0]) + assert_equal(out.shape, self.tgtShape) + + def test_randint(self): + itype = [bool, np.int8, np.uint8, np.int16, np.uint16, + np.int32, np.uint32, np.int64, np.uint64] + func = np.random.randint + high = np.array([1]) + low = np.array([0]) + + for dt in itype: + out = func(low, high, dtype=dt) + assert_equal(out.shape, self.tgtShape) + + out = func(low[0], high, dtype=dt) + assert_equal(out.shape, self.tgtShape) + + out = func(low, high[0], dtype=dt) + assert_equal(out.shape, self.tgtShape) + + def test_three_arg_funcs(self): + funcs = [np.random.noncentral_f, np.random.triangular, + np.random.hypergeometric] + + for func in funcs: + out = func(self.argOne, self.argTwo, self.argThree) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne[0], self.argTwo, self.argThree) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne, self.argTwo[0], self.argThree) + assert_equal(out.shape, self.tgtShape) diff --git a/python/numpy/random/tests/test_randomstate.py b/python/numpy/random/tests/test_randomstate.py new file mode 100644 index 000000000..cf4488543 --- /dev/null +++ b/python/numpy/random/tests/test_randomstate.py @@ -0,0 +1,2130 @@ +import hashlib +import pickle +import sys +import warnings + +import pytest + +import numpy as np +from numpy import random +from numpy.random import MT19937, PCG64 +from numpy.testing import ( + IS_WASM, + assert_, + assert_array_almost_equal, + assert_array_equal, + assert_equal, + assert_no_warnings, + assert_raises, + assert_warns, + suppress_warnings, +) + +INT_FUNCS = {'binomial': (100.0, 0.6), + 'geometric': (.5,), + 'hypergeometric': (20, 20, 10), + 'logseries': (.5,), + 'multinomial': (20, np.ones(6) / 6.0), + 'negative_binomial': (100, .5), + 'poisson': (10.0,), + 'zipf': (2,), + } + +if np.iinfo(np.long).max < 2**32: + # Windows and some 32-bit platforms, e.g., ARM + INT_FUNC_HASHES = {'binomial': '2fbead005fc63942decb5326d36a1f32fe2c9d32c904ee61e46866b88447c263', # noqa: E501 + 'logseries': '23ead5dcde35d4cfd4ef2c105e4c3d43304b45dc1b1444b7823b9ee4fa144ebb', # noqa: E501 + 'geometric': '0d764db64f5c3bad48c8c33551c13b4d07a1e7b470f77629bef6c985cac76fcf', # noqa: E501 + 'hypergeometric': '7b59bf2f1691626c5815cdcd9a49e1dd68697251d4521575219e4d2a1b8b2c67', # noqa: E501 + 'multinomial': 'd754fa5b92943a38ec07630de92362dd2e02c43577fc147417dc5b9db94ccdd3', # noqa: E501 + 'negative_binomial': '8eb216f7cb2a63cf55605422845caaff002fddc64a7dc8b2d45acd477a49e824', # noqa: E501 + 'poisson': '70c891d76104013ebd6f6bcf30d403a9074b886ff62e4e6b8eb605bf1a4673b7', # noqa: E501 + 'zipf': '01f074f97517cd5d21747148ac6ca4074dde7fcb7acbaec0a936606fecacd93f', # noqa: E501 + } +else: + INT_FUNC_HASHES = {'binomial': '8626dd9d052cb608e93d8868de0a7b347258b199493871a1dc56e2a26cacb112', # noqa: E501 + 'geometric': '8edd53d272e49c4fc8fbbe6c7d08d563d62e482921f3131d0a0e068af30f0db9', # noqa: E501 + 'hypergeometric': '83496cc4281c77b786c9b7ad88b74d42e01603a55c60577ebab81c3ba8d45657', # noqa: E501 + 'logseries': '65878a38747c176bc00e930ebafebb69d4e1e16cd3a704e264ea8f5e24f548db', # noqa: E501 + 'multinomial': '7a984ae6dca26fd25374479e118b22f55db0aedccd5a0f2584ceada33db98605', # noqa: E501 + 'negative_binomial': 'd636d968e6a24ae92ab52fe11c46ac45b0897e98714426764e820a7d77602a61', # noqa: E501 + 'poisson': '956552176f77e7c9cb20d0118fc9cf690be488d790ed4b4c4747b965e61b0bb4', # noqa: E501 + 'zipf': 'f84ba7feffda41e606e20b28dfc0f1ea9964a74574513d4a4cbc98433a8bfa45', # noqa: E501 + } + + +@pytest.fixture(scope='module', params=INT_FUNCS) +def int_func(request): + return (request.param, INT_FUNCS[request.param], + INT_FUNC_HASHES[request.param]) + + +@pytest.fixture +def restore_singleton_bitgen(): + """Ensures that the singleton bitgen is restored after a test""" + orig_bitgen = np.random.get_bit_generator() + yield + np.random.set_bit_generator(orig_bitgen) + + +def assert_mt19937_state_equal(a, b): + assert_equal(a['bit_generator'], b['bit_generator']) + assert_array_equal(a['state']['key'], b['state']['key']) + assert_array_equal(a['state']['pos'], b['state']['pos']) + assert_equal(a['has_gauss'], b['has_gauss']) + assert_equal(a['gauss'], b['gauss']) + + +class TestSeed: + def test_scalar(self): + s = random.RandomState(0) + assert_equal(s.randint(1000), 684) + s = random.RandomState(4294967295) + assert_equal(s.randint(1000), 419) + + def test_array(self): + s = random.RandomState(range(10)) + assert_equal(s.randint(1000), 468) + s = random.RandomState(np.arange(10)) + assert_equal(s.randint(1000), 468) + s = random.RandomState([0]) + assert_equal(s.randint(1000), 973) + s = random.RandomState([4294967295]) + assert_equal(s.randint(1000), 265) + + def test_invalid_scalar(self): + # seed must be an unsigned 32 bit integer + assert_raises(TypeError, random.RandomState, -0.5) + assert_raises(ValueError, random.RandomState, -1) + + def test_invalid_array(self): + # seed must be an unsigned 32 bit integer + assert_raises(TypeError, random.RandomState, [-0.5]) + assert_raises(ValueError, random.RandomState, [-1]) + assert_raises(ValueError, random.RandomState, [4294967296]) + assert_raises(ValueError, random.RandomState, [1, 2, 4294967296]) + assert_raises(ValueError, random.RandomState, [1, -2, 4294967296]) + + def test_invalid_array_shape(self): + # gh-9832 + assert_raises(ValueError, random.RandomState, np.array([], + dtype=np.int64)) + assert_raises(ValueError, random.RandomState, [[1, 2, 3]]) + assert_raises(ValueError, random.RandomState, [[1, 2, 3], + [4, 5, 6]]) + + def test_cannot_seed(self): + rs = random.RandomState(PCG64(0)) + with assert_raises(TypeError): + rs.seed(1234) + + def test_invalid_initialization(self): + assert_raises(ValueError, random.RandomState, MT19937) + + +class TestBinomial: + def test_n_zero(self): + # Tests the corner case of n == 0 for the binomial distribution. + # binomial(0, p) should be zero for any p in [0, 1]. + # This test addresses issue #3480. + zeros = np.zeros(2, dtype='int') + for p in [0, .5, 1]: + assert_(random.binomial(0, p) == 0) + assert_array_equal(random.binomial(zeros, p), zeros) + + def test_p_is_nan(self): + # Issue #4571. + assert_raises(ValueError, random.binomial, 1, np.nan) + + +class TestMultinomial: + def test_basic(self): + random.multinomial(100, [0.2, 0.8]) + + def test_zero_probability(self): + random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0]) + + def test_int_negative_interval(self): + assert_(-5 <= random.randint(-5, -1) < -1) + x = random.randint(-5, -1, 5) + assert_(np.all(-5 <= x)) + assert_(np.all(x < -1)) + + def test_size(self): + # gh-3173 + p = [0.5, 0.5] + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2)) + assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2)) + assert_equal(random.multinomial(1, p, np.array((2, 2))).shape, + (2, 2, 2)) + + assert_raises(TypeError, random.multinomial, 1, p, + float(1)) + + def test_invalid_prob(self): + assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2]) + assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9]) + + def test_invalid_n(self): + assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2]) + + def test_p_non_contiguous(self): + p = np.arange(15.) + p /= np.sum(p[1::3]) + pvals = p[1::3] + random.seed(1432985819) + non_contig = random.multinomial(100, pvals=pvals) + random.seed(1432985819) + contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals)) + assert_array_equal(non_contig, contig) + + def test_multinomial_pvals_float32(self): + x = np.array([9.9e-01, 9.9e-01, 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09, + 1.0e-09, 1.0e-09, 1.0e-09, 1.0e-09], dtype=np.float32) + pvals = x / x.sum() + match = r"[\w\s]*pvals array is cast to 64-bit floating" + with pytest.raises(ValueError, match=match): + random.multinomial(1, pvals) + + def test_multinomial_n_float(self): + # Non-index integer types should gracefully truncate floats + random.multinomial(100.5, [0.2, 0.8]) + +class TestSetState: + def setup_method(self): + self.seed = 1234567890 + self.random_state = random.RandomState(self.seed) + self.state = self.random_state.get_state() + + def test_basic(self): + old = self.random_state.tomaxint(16) + self.random_state.set_state(self.state) + new = self.random_state.tomaxint(16) + assert_(np.all(old == new)) + + def test_gaussian_reset(self): + # Make sure the cached every-other-Gaussian is reset. + old = self.random_state.standard_normal(size=3) + self.random_state.set_state(self.state) + new = self.random_state.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_gaussian_reset_in_media_res(self): + # When the state is saved with a cached Gaussian, make sure the + # cached Gaussian is restored. + + self.random_state.standard_normal() + state = self.random_state.get_state() + old = self.random_state.standard_normal(size=3) + self.random_state.set_state(state) + new = self.random_state.standard_normal(size=3) + assert_(np.all(old == new)) + + def test_backwards_compatibility(self): + # Make sure we can accept old state tuples that do not have the + # cached Gaussian value. + old_state = self.state[:-2] + x1 = self.random_state.standard_normal(size=16) + self.random_state.set_state(old_state) + x2 = self.random_state.standard_normal(size=16) + self.random_state.set_state(self.state) + x3 = self.random_state.standard_normal(size=16) + assert_(np.all(x1 == x2)) + assert_(np.all(x1 == x3)) + + def test_negative_binomial(self): + # Ensure that the negative binomial results take floating point + # arguments without truncation. + self.random_state.negative_binomial(0.5, 0.5) + + def test_get_state_warning(self): + rs = random.RandomState(PCG64()) + with suppress_warnings() as sup: + w = sup.record(RuntimeWarning) + state = rs.get_state() + assert_(len(w) == 1) + assert isinstance(state, dict) + assert state['bit_generator'] == 'PCG64' + + def test_invalid_legacy_state_setting(self): + state = self.random_state.get_state() + new_state = ('Unknown', ) + state[1:] + assert_raises(ValueError, self.random_state.set_state, new_state) + assert_raises(TypeError, self.random_state.set_state, + np.array(new_state, dtype=object)) + state = self.random_state.get_state(legacy=False) + del state['bit_generator'] + assert_raises(ValueError, self.random_state.set_state, state) + + def test_pickle(self): + self.random_state.seed(0) + self.random_state.random_sample(100) + self.random_state.standard_normal() + pickled = self.random_state.get_state(legacy=False) + assert_equal(pickled['has_gauss'], 1) + rs_unpick = pickle.loads(pickle.dumps(self.random_state)) + unpickled = rs_unpick.get_state(legacy=False) + assert_mt19937_state_equal(pickled, unpickled) + + def test_state_setting(self): + attr_state = self.random_state.__getstate__() + self.random_state.standard_normal() + self.random_state.__setstate__(attr_state) + state = self.random_state.get_state(legacy=False) + assert_mt19937_state_equal(attr_state, state) + + def test_repr(self): + assert repr(self.random_state).startswith('RandomState(MT19937)') + + +class TestRandint: + + rfunc = random.randint + + # valid integer/boolean types + itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16, + np.int32, np.uint32, np.int64, np.uint64] + + def test_unsupported_type(self): + assert_raises(TypeError, self.rfunc, 1, dtype=float) + + def test_bounds_checking(self): + for dt in self.itype: + lbnd = 0 if dt is np.bool else np.iinfo(dt).min + ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 + assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt) + assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt) + assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt) + assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt) + + def test_rng_zero_and_extremes(self): + for dt in self.itype: + lbnd = 0 if dt is np.bool else np.iinfo(dt).min + ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 + + tgt = ubnd - 1 + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + tgt = lbnd + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + tgt = (lbnd + ubnd) // 2 + assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) + + def test_full_range(self): + # Test for ticket #1690 + + for dt in self.itype: + lbnd = 0 if dt is np.bool else np.iinfo(dt).min + ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 + + try: + self.rfunc(lbnd, ubnd, dtype=dt) + except Exception as e: + raise AssertionError("No error should have been raised, " + "but one was with the following " + "message:\n\n%s" % str(e)) + + def test_in_bounds_fuzz(self): + # Don't use fixed seed + random.seed() + + for dt in self.itype[1:]: + for ubnd in [4, 8, 16]: + vals = self.rfunc(2, ubnd, size=2**16, dtype=dt) + assert_(vals.max() < ubnd) + assert_(vals.min() >= 2) + + vals = self.rfunc(0, 2, size=2**16, dtype=np.bool) + + assert_(vals.max() < 2) + assert_(vals.min() >= 0) + + def test_repeatability(self): + # We use a sha256 hash of generated sequences of 1000 samples + # in the range [0, 6) for all but bool, where the range + # is [0, 2). Hashes are for little endian numbers. + tgt = {'bool': '509aea74d792fb931784c4b0135392c65aec64beee12b0cc167548a2c3d31e71', # noqa: E501 + 'int16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', # noqa: E501 + 'int32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', # noqa: E501 + 'int64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', # noqa: E501 + 'int8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404', # noqa: E501 + 'uint16': '7b07f1a920e46f6d0fe02314155a2330bcfd7635e708da50e536c5ebb631a7d4', # noqa: E501 + 'uint32': 'e577bfed6c935de944424667e3da285012e741892dcb7051a8f1ce68ab05c92f', # noqa: E501 + 'uint64': '0fbead0b06759df2cfb55e43148822d4a1ff953c7eb19a5b08445a63bb64fa9e', # noqa: E501 + 'uint8': '001aac3a5acb935a9b186cbe14a1ca064b8bb2dd0b045d48abeacf74d0203404'} # noqa: E501 + + for dt in self.itype[1:]: + random.seed(1234) + + # view as little endian for hash + if sys.byteorder == 'little': + val = self.rfunc(0, 6, size=1000, dtype=dt) + else: + val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap() + + res = hashlib.sha256(val.view(np.int8)).hexdigest() + assert_(tgt[np.dtype(dt).name] == res) + + # bools do not depend on endianness + random.seed(1234) + val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8) + res = hashlib.sha256(val).hexdigest() + assert_(tgt[np.dtype(bool).name] == res) + + @pytest.mark.skipif(np.iinfo('l').max < 2**32, + reason='Cannot test with 32-bit C long') + def test_repeatability_32bit_boundary_broadcasting(self): + desired = np.array([[[3992670689, 2438360420, 2557845020], + [4107320065, 4142558326, 3216529513], + [1605979228, 2807061240, 665605495]], + [[3211410639, 4128781000, 457175120], + [1712592594, 1282922662, 3081439808], + [3997822960, 2008322436, 1563495165]], + [[1398375547, 4269260146, 115316740], + [3414372578, 3437564012, 2112038651], + [3572980305, 2260248732, 3908238631]], + [[2561372503, 223155946, 3127879445], + [ 441282060, 3514786552, 2148440361], + [1629275283, 3479737011, 3003195987]], + [[ 412181688, 940383289, 3047321305], + [2978368172, 764731833, 2282559898], + [ 105711276, 720447391, 3596512484]]]) + for size in [None, (5, 3, 3)]: + random.seed(12345) + x = self.rfunc([[-1], [0], [1]], [2**32 - 1, 2**32, 2**32 + 1], + size=size) + assert_array_equal(x, desired if size is not None else desired[0]) + + def test_int64_uint64_corner_case(self): + # When stored in Numpy arrays, `lbnd` is casted + # as np.int64, and `ubnd` is casted as np.uint64. + # Checking whether `lbnd` >= `ubnd` used to be + # done solely via direct comparison, which is incorrect + # because when Numpy tries to compare both numbers, + # it casts both to np.float64 because there is + # no integer superset of np.int64 and np.uint64. However, + # `ubnd` is too large to be represented in np.float64, + # causing it be round down to np.iinfo(np.int64).max, + # leading to a ValueError because `lbnd` now equals + # the new `ubnd`. + + dt = np.int64 + tgt = np.iinfo(np.int64).max + lbnd = np.int64(np.iinfo(np.int64).max) + ubnd = np.uint64(np.iinfo(np.int64).max + 1) + + # None of these function calls should + # generate a ValueError now. + actual = random.randint(lbnd, ubnd, dtype=dt) + assert_equal(actual, tgt) + + def test_respect_dtype_singleton(self): + # See gh-7203 + for dt in self.itype: + lbnd = 0 if dt is np.bool else np.iinfo(dt).min + ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1 + + sample = self.rfunc(lbnd, ubnd, dtype=dt) + assert_equal(sample.dtype, np.dtype(dt)) + + for dt in (bool, int): + # The legacy random generation forces the use of "long" on this + # branch even when the input is `int` and the default dtype + # for int changed (dtype=int is also the functions default) + op_dtype = "long" if dt is int else "bool" + lbnd = 0 if dt is bool else np.iinfo(op_dtype).min + ubnd = 2 if dt is bool else np.iinfo(op_dtype).max + 1 + + sample = self.rfunc(lbnd, ubnd, dtype=dt) + assert_(not hasattr(sample, 'dtype')) + assert_equal(type(sample), dt) + + +class TestRandomDist: + # Make sure the random distribution returns the correct value for a + # given seed + + def setup_method(self): + self.seed = 1234567890 + + def test_rand(self): + random.seed(self.seed) + actual = random.rand(3, 2) + desired = np.array([[0.61879477158567997, 0.59162362775974664], + [0.88868358904449662, 0.89165480011560816], + [0.4575674820298663, 0.7781880808593471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_rand_singleton(self): + random.seed(self.seed) + actual = random.rand() + desired = 0.61879477158567997 + assert_array_almost_equal(actual, desired, decimal=15) + + def test_randn(self): + random.seed(self.seed) + actual = random.randn(3, 2) + desired = np.array([[1.34016345771863121, 1.73759122771936081], + [1.498988344300628, -0.2286433324536169], + [2.031033998682787, 2.17032494605655257]]) + assert_array_almost_equal(actual, desired, decimal=15) + + random.seed(self.seed) + actual = random.randn() + assert_array_almost_equal(actual, desired[0, 0], decimal=15) + + def test_randint(self): + random.seed(self.seed) + actual = random.randint(-99, 99, size=(3, 2)) + desired = np.array([[31, 3], + [-52, 41], + [-48, -66]]) + assert_array_equal(actual, desired) + + def test_random_integers(self): + random.seed(self.seed) + with suppress_warnings() as sup: + w = sup.record(DeprecationWarning) + actual = random.random_integers(-99, 99, size=(3, 2)) + assert_(len(w) == 1) + desired = np.array([[31, 3], + [-52, 41], + [-48, -66]]) + assert_array_equal(actual, desired) + + random.seed(self.seed) + with suppress_warnings() as sup: + w = sup.record(DeprecationWarning) + actual = random.random_integers(198, size=(3, 2)) + assert_(len(w) == 1) + assert_array_equal(actual, desired + 100) + + def test_tomaxint(self): + random.seed(self.seed) + rs = random.RandomState(self.seed) + actual = rs.tomaxint(size=(3, 2)) + if np.iinfo(np.long).max == 2147483647: + desired = np.array([[1328851649, 731237375], + [1270502067, 320041495], + [1908433478, 499156889]], dtype=np.int64) + else: + desired = np.array([[5707374374421908479, 5456764827585442327], + [8196659375100692377, 8224063923314595285], + [4220315081820346526, 7177518203184491332]], + dtype=np.int64) + + assert_equal(actual, desired) + + rs.seed(self.seed) + actual = rs.tomaxint() + assert_equal(actual, desired[0, 0]) + + def test_random_integers_max_int(self): + # Tests whether random_integers can generate the + # maximum allowed Python int that can be converted + # into a C long. Previous implementations of this + # method have thrown an OverflowError when attempting + # to generate this integer. + with suppress_warnings() as sup: + w = sup.record(DeprecationWarning) + actual = random.random_integers(np.iinfo('l').max, + np.iinfo('l').max) + assert_(len(w) == 1) + + desired = np.iinfo('l').max + assert_equal(actual, desired) + with suppress_warnings() as sup: + w = sup.record(DeprecationWarning) + typer = np.dtype('l').type + actual = random.random_integers(typer(np.iinfo('l').max), + typer(np.iinfo('l').max)) + assert_(len(w) == 1) + assert_equal(actual, desired) + + def test_random_integers_deprecated(self): + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + + # DeprecationWarning raised with high == None + assert_raises(DeprecationWarning, + random.random_integers, + np.iinfo('l').max) + + # DeprecationWarning raised with high != None + assert_raises(DeprecationWarning, + random.random_integers, + np.iinfo('l').max, np.iinfo('l').max) + + def test_random_sample(self): + random.seed(self.seed) + actual = random.random_sample((3, 2)) + desired = np.array([[0.61879477158567997, 0.59162362775974664], + [0.88868358904449662, 0.89165480011560816], + [0.4575674820298663, 0.7781880808593471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + random.seed(self.seed) + actual = random.random_sample() + assert_array_almost_equal(actual, desired[0, 0], decimal=15) + + def test_choice_uniform_replace(self): + random.seed(self.seed) + actual = random.choice(4, 4) + desired = np.array([2, 3, 2, 3]) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_replace(self): + random.seed(self.seed) + actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) + desired = np.array([1, 1, 2, 2]) + assert_array_equal(actual, desired) + + def test_choice_uniform_noreplace(self): + random.seed(self.seed) + actual = random.choice(4, 3, replace=False) + desired = np.array([0, 1, 3]) + assert_array_equal(actual, desired) + + def test_choice_nonuniform_noreplace(self): + random.seed(self.seed) + actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) + desired = np.array([2, 3, 1]) + assert_array_equal(actual, desired) + + def test_choice_noninteger(self): + random.seed(self.seed) + actual = random.choice(['a', 'b', 'c', 'd'], 4) + desired = np.array(['c', 'd', 'c', 'd']) + assert_array_equal(actual, desired) + + def test_choice_exceptions(self): + sample = random.choice + assert_raises(ValueError, sample, -1, 3) + assert_raises(ValueError, sample, 3., 3) + assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3) + assert_raises(ValueError, sample, [], 3) + assert_raises(ValueError, sample, [1, 2, 3, 4], 3, + p=[[0.25, 0.25], [0.25, 0.25]]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2]) + assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1]) + assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4]) + assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False) + # gh-13087 + assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], 2, + replace=False, p=[1, 0, 0]) + + def test_choice_return_shape(self): + p = [0.1, 0.9] + # Check scalar + assert_(np.isscalar(random.choice(2, replace=True))) + assert_(np.isscalar(random.choice(2, replace=False))) + assert_(np.isscalar(random.choice(2, replace=True, p=p))) + assert_(np.isscalar(random.choice(2, replace=False, p=p))) + assert_(np.isscalar(random.choice([1, 2], replace=True))) + assert_(random.choice([None], replace=True) is None) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(random.choice(arr, replace=True) is a) + + # Check 0-d array + s = () + assert_(not np.isscalar(random.choice(2, s, replace=True))) + assert_(not np.isscalar(random.choice(2, s, replace=False))) + assert_(not np.isscalar(random.choice(2, s, replace=True, p=p))) + assert_(not np.isscalar(random.choice(2, s, replace=False, p=p))) + assert_(not np.isscalar(random.choice([1, 2], s, replace=True))) + assert_(random.choice([None], s, replace=True).ndim == 0) + a = np.array([1, 2]) + arr = np.empty(1, dtype=object) + arr[0] = a + assert_(random.choice(arr, s, replace=True).item() is a) + + # Check multi dimensional array + s = (2, 3) + p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2] + assert_equal(random.choice(6, s, replace=True).shape, s) + assert_equal(random.choice(6, s, replace=False).shape, s) + assert_equal(random.choice(6, s, replace=True, p=p).shape, s) + assert_equal(random.choice(6, s, replace=False, p=p).shape, s) + assert_equal(random.choice(np.arange(6), s, replace=True).shape, s) + + # Check zero-size + assert_equal(random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4)) + assert_equal(random.randint(0, -10, size=0).shape, (0,)) + assert_equal(random.randint(10, 10, size=0).shape, (0,)) + assert_equal(random.choice(0, size=0).shape, (0,)) + assert_equal(random.choice([], size=(0,)).shape, (0,)) + assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape, + (3, 0, 4)) + assert_raises(ValueError, random.choice, [], 10) + + def test_choice_nan_probabilities(self): + a = np.array([42, 1, 2]) + p = [None, None, None] + assert_raises(ValueError, random.choice, a, p=p) + + def test_choice_p_non_contiguous(self): + p = np.ones(10) / 5 + p[1::2] = 3.0 + random.seed(self.seed) + non_contig = random.choice(5, 3, p=p[::2]) + random.seed(self.seed) + contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2])) + assert_array_equal(non_contig, contig) + + def test_bytes(self): + random.seed(self.seed) + actual = random.bytes(10) + desired = b'\x82Ui\x9e\xff\x97+Wf\xa5' + assert_equal(actual, desired) + + def test_shuffle(self): + # Test lists, arrays (of various dtypes), and multidimensional versions + # of both, c-contiguous or not: + for conv in [lambda x: np.array([]), + lambda x: x, + lambda x: np.asarray(x).astype(np.int8), + lambda x: np.asarray(x).astype(np.float32), + lambda x: np.asarray(x).astype(np.complex64), + lambda x: np.asarray(x).astype(object), + lambda x: [(i, i) for i in x], + lambda x: np.asarray([[i, i] for i in x]), + lambda x: np.vstack([x, x]).T, + # gh-11442 + lambda x: (np.asarray([(i, i) for i in x], + [("a", int), ("b", int)]) + .view(np.recarray)), + # gh-4270 + lambda x: np.asarray([(i, i) for i in x], + [("a", object, (1,)), + ("b", np.int32, (1,))])]: + random.seed(self.seed) + alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) + random.shuffle(alist) + actual = alist + desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) + assert_array_equal(actual, desired) + + def test_shuffle_masked(self): + # gh-3263 + a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1) + b = np.ma.masked_values(np.arange(20) % 3 - 1, -1) + a_orig = a.copy() + b_orig = b.copy() + for i in range(50): + random.shuffle(a) + assert_equal( + sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask])) + random.shuffle(b) + assert_equal( + sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask])) + + def test_shuffle_invalid_objects(self): + x = np.array(3) + assert_raises(TypeError, random.shuffle, x) + + def test_permutation(self): + random.seed(self.seed) + alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] + actual = random.permutation(alist) + desired = [0, 1, 9, 6, 2, 4, 5, 8, 7, 3] + assert_array_equal(actual, desired) + + random.seed(self.seed) + arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T + actual = random.permutation(arr_2d) + assert_array_equal(actual, np.atleast_2d(desired).T) + + random.seed(self.seed) + bad_x_str = "abcd" + assert_raises(IndexError, random.permutation, bad_x_str) + + random.seed(self.seed) + bad_x_float = 1.2 + assert_raises(IndexError, random.permutation, bad_x_float) + + integer_val = 10 + desired = [9, 0, 8, 5, 1, 3, 4, 7, 6, 2] + + random.seed(self.seed) + actual = random.permutation(integer_val) + assert_array_equal(actual, desired) + + def test_beta(self): + random.seed(self.seed) + actual = random.beta(.1, .9, size=(3, 2)) + desired = np.array( + [[1.45341850513746058e-02, 5.31297615662868145e-04], + [1.85366619058432324e-06, 4.19214516800110563e-03], + [1.58405155108498093e-04, 1.26252891949397652e-04]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_binomial(self): + random.seed(self.seed) + actual = random.binomial(100.123, .456, size=(3, 2)) + desired = np.array([[37, 43], + [42, 48], + [46, 45]]) + assert_array_equal(actual, desired) + + random.seed(self.seed) + actual = random.binomial(100.123, .456) + desired = 37 + assert_array_equal(actual, desired) + + def test_chisquare(self): + random.seed(self.seed) + actual = random.chisquare(50, size=(3, 2)) + desired = np.array([[63.87858175501090585, 68.68407748911370447], + [65.77116116901505904, 47.09686762438974483], + [72.3828403199695174, 74.18408615260374006]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_dirichlet(self): + random.seed(self.seed) + alpha = np.array([51.72840233779265162, 39.74494232180943953]) + actual = random.dirichlet(alpha, size=(3, 2)) + desired = np.array([[[0.54539444573611562, 0.45460555426388438], + [0.62345816822039413, 0.37654183177960598]], + [[0.55206000085785778, 0.44793999914214233], + [0.58964023305154301, 0.41035976694845688]], + [[0.59266909280647828, 0.40733090719352177], + [0.56974431743975207, 0.43025568256024799]]]) + assert_array_almost_equal(actual, desired, decimal=15) + bad_alpha = np.array([5.4e-01, -1.0e-16]) + assert_raises(ValueError, random.dirichlet, bad_alpha) + + random.seed(self.seed) + alpha = np.array([51.72840233779265162, 39.74494232180943953]) + actual = random.dirichlet(alpha) + assert_array_almost_equal(actual, desired[0, 0], decimal=15) + + def test_dirichlet_size(self): + # gh-3173 + p = np.array([51.72840233779265162, 39.74494232180943953]) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2)) + assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2)) + assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2)) + assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2)) + + assert_raises(TypeError, random.dirichlet, p, float(1)) + + def test_dirichlet_bad_alpha(self): + # gh-2089 + alpha = np.array([5.4e-01, -1.0e-16]) + assert_raises(ValueError, random.dirichlet, alpha) + + def test_dirichlet_alpha_non_contiguous(self): + a = np.array([51.72840233779265162, -1.0, 39.74494232180943953]) + alpha = a[::2] + random.seed(self.seed) + non_contig = random.dirichlet(alpha, size=(3, 2)) + random.seed(self.seed) + contig = random.dirichlet(np.ascontiguousarray(alpha), + size=(3, 2)) + assert_array_almost_equal(non_contig, contig) + + def test_exponential(self): + random.seed(self.seed) + actual = random.exponential(1.1234, size=(3, 2)) + desired = np.array([[1.08342649775011624, 1.00607889924557314], + [2.46628830085216721, 2.49668106809923884], + [0.68717433461363442, 1.69175666993575979]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_exponential_0(self): + assert_equal(random.exponential(scale=0), 0) + assert_raises(ValueError, random.exponential, scale=-0.) + + def test_f(self): + random.seed(self.seed) + actual = random.f(12, 77, size=(3, 2)) + desired = np.array([[1.21975394418575878, 1.75135759791559775], + [1.44803115017146489, 1.22108959480396262], + [1.02176975757740629, 1.34431827623300415]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gamma(self): + random.seed(self.seed) + actual = random.gamma(5, 3, size=(3, 2)) + desired = np.array([[24.60509188649287182, 28.54993563207210627], + [26.13476110204064184, 12.56988482927716078], + [31.71863275789960568, 33.30143302795922011]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_gamma_0(self): + assert_equal(random.gamma(shape=0, scale=0), 0) + assert_raises(ValueError, random.gamma, shape=-0., scale=-0.) + + def test_geometric(self): + random.seed(self.seed) + actual = random.geometric(.123456789, size=(3, 2)) + desired = np.array([[8, 7], + [17, 17], + [5, 12]]) + assert_array_equal(actual, desired) + + def test_geometric_exceptions(self): + assert_raises(ValueError, random.geometric, 1.1) + assert_raises(ValueError, random.geometric, [1.1] * 10) + assert_raises(ValueError, random.geometric, -0.1) + assert_raises(ValueError, random.geometric, [-0.1] * 10) + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + assert_raises(ValueError, random.geometric, np.nan) + assert_raises(ValueError, random.geometric, [np.nan] * 10) + + def test_gumbel(self): + random.seed(self.seed) + actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[0.19591898743416816, 0.34405539668096674], + [-1.4492522252274278, -1.47374816298446865], + [1.10651090478803416, -0.69535848626236174]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_gumbel_0(self): + assert_equal(random.gumbel(scale=0), 0) + assert_raises(ValueError, random.gumbel, scale=-0.) + + def test_hypergeometric(self): + random.seed(self.seed) + actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2)) + desired = np.array([[10, 10], + [10, 10], + [9, 9]]) + assert_array_equal(actual, desired) + + # Test nbad = 0 + actual = random.hypergeometric(5, 0, 3, size=4) + desired = np.array([3, 3, 3, 3]) + assert_array_equal(actual, desired) + + actual = random.hypergeometric(15, 0, 12, size=4) + desired = np.array([12, 12, 12, 12]) + assert_array_equal(actual, desired) + + # Test ngood = 0 + actual = random.hypergeometric(0, 5, 3, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + actual = random.hypergeometric(0, 15, 12, size=4) + desired = np.array([0, 0, 0, 0]) + assert_array_equal(actual, desired) + + def test_laplace(self): + random.seed(self.seed) + actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[0.66599721112760157, 0.52829452552221945], + [3.12791959514407125, 3.18202813572992005], + [-0.05391065675859356, 1.74901336242837324]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_laplace_0(self): + assert_equal(random.laplace(scale=0), 0) + assert_raises(ValueError, random.laplace, scale=-0.) + + def test_logistic(self): + random.seed(self.seed) + actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[1.09232835305011444, 0.8648196662399954], + [4.27818590694950185, 4.33897006346929714], + [-0.21682183359214885, 2.63373365386060332]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_lognormal(self): + random.seed(self.seed) + actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) + desired = np.array([[16.50698631688883822, 36.54846706092654784], + [22.67886599981281748, 0.71617561058995771], + [65.72798501792723869, 86.84341601437161273]]) + assert_array_almost_equal(actual, desired, decimal=13) + + def test_lognormal_0(self): + assert_equal(random.lognormal(sigma=0), 1) + assert_raises(ValueError, random.lognormal, sigma=-0.) + + def test_logseries(self): + random.seed(self.seed) + actual = random.logseries(p=.923456789, size=(3, 2)) + desired = np.array([[2, 2], + [6, 17], + [3, 6]]) + assert_array_equal(actual, desired) + + def test_logseries_zero(self): + assert random.logseries(0) == 1 + + @pytest.mark.parametrize("value", [np.nextafter(0., -1), 1., np.nan, 5.]) + def test_logseries_exceptions(self, value): + with np.errstate(invalid="ignore"): + with pytest.raises(ValueError): + random.logseries(value) + with pytest.raises(ValueError): + # contiguous path: + random.logseries(np.array([value] * 10)) + with pytest.raises(ValueError): + # non-contiguous path: + random.logseries(np.array([value] * 10)[::2]) + + def test_multinomial(self): + random.seed(self.seed) + actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2)) + desired = np.array([[[4, 3, 5, 4, 2, 2], + [5, 2, 8, 2, 2, 1]], + [[3, 4, 3, 6, 0, 4], + [2, 1, 4, 3, 6, 4]], + [[4, 4, 2, 5, 2, 3], + [4, 3, 4, 2, 3, 4]]]) + assert_array_equal(actual, desired) + + def test_multivariate_normal(self): + random.seed(self.seed) + mean = (.123456789, 10) + cov = [[1, 0], [0, 1]] + size = (3, 2) + actual = random.multivariate_normal(mean, cov, size) + desired = np.array([[[1.463620246718631, 11.73759122771936], + [1.622445133300628, 9.771356667546383]], + [[2.154490787682787, 12.170324946056553], + [1.719909438201865, 9.230548443648306]], + [[0.689515026297799, 9.880729819607714], + [-0.023054015651998, 9.201096623542879]]]) + + assert_array_almost_equal(actual, desired, decimal=15) + + # Check for default size, was raising deprecation warning + actual = random.multivariate_normal(mean, cov) + desired = np.array([0.895289569463708, 9.17180864067987]) + assert_array_almost_equal(actual, desired, decimal=15) + + # Check that non positive-semidefinite covariance warns with + # RuntimeWarning + mean = [0, 0] + cov = [[1, 2], [2, 1]] + assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov) + + # and that it doesn't warn with RuntimeWarning check_valid='ignore' + assert_no_warnings(random.multivariate_normal, mean, cov, + check_valid='ignore') + + # and that it raises with RuntimeWarning check_valid='raises' + assert_raises(ValueError, random.multivariate_normal, mean, cov, + check_valid='raise') + + cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) + with suppress_warnings() as sup: + random.multivariate_normal(mean, cov) + w = sup.record(RuntimeWarning) + assert len(w) == 0 + + mu = np.zeros(2) + cov = np.eye(2) + assert_raises(ValueError, random.multivariate_normal, mean, cov, + check_valid='other') + assert_raises(ValueError, random.multivariate_normal, + np.zeros((2, 1, 1)), cov) + assert_raises(ValueError, random.multivariate_normal, + mu, np.empty((3, 2))) + assert_raises(ValueError, random.multivariate_normal, + mu, np.eye(3)) + + def test_negative_binomial(self): + random.seed(self.seed) + actual = random.negative_binomial(n=100, p=.12345, size=(3, 2)) + desired = np.array([[848, 841], + [892, 611], + [779, 647]]) + assert_array_equal(actual, desired) + + def test_negative_binomial_exceptions(self): + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + assert_raises(ValueError, random.negative_binomial, 100, np.nan) + assert_raises(ValueError, random.negative_binomial, 100, + [np.nan] * 10) + + def test_noncentral_chisquare(self): + random.seed(self.seed) + actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) + desired = np.array([[23.91905354498517511, 13.35324692733826346], + [31.22452661329736401, 16.60047399466177254], + [5.03461598262724586, 17.94973089023519464]]) + assert_array_almost_equal(actual, desired, decimal=14) + + actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) + desired = np.array([[1.47145377828516666, 0.15052899268012659], + [0.00943803056963588, 1.02647251615666169], + [0.332334982684171, 0.15451287602753125]]) + assert_array_almost_equal(actual, desired, decimal=14) + + random.seed(self.seed) + actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) + desired = np.array([[9.597154162763948, 11.725484450296079], + [10.413711048138335, 3.694475922923986], + [13.484222138963087, 14.377255424602957]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_noncentral_f(self): + random.seed(self.seed) + actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1, + size=(3, 2)) + desired = np.array([[1.40598099674926669, 0.34207973179285761], + [3.57715069265772545, 7.92632662577829805], + [0.43741599463544162, 1.1774208752428319]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_noncentral_f_nan(self): + random.seed(self.seed) + actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan) + assert np.isnan(actual) + + def test_normal(self): + random.seed(self.seed) + actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2)) + desired = np.array([[2.80378370443726244, 3.59863924443872163], + [3.121433477601256, -0.33382987590723379], + [4.18552478636557357, 4.46410668111310471]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_normal_0(self): + assert_equal(random.normal(scale=0), 0) + assert_raises(ValueError, random.normal, scale=-0.) + + def test_pareto(self): + random.seed(self.seed) + actual = random.pareto(a=.123456789, size=(3, 2)) + desired = np.array( + [[2.46852460439034849e+03, 1.41286880810518346e+03], + [5.28287797029485181e+07, 6.57720981047328785e+07], + [1.40840323350391515e+02, 1.98390255135251704e+05]]) + # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this + # matrix differs by 24 nulps. Discussion: + # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html + # Consensus is that this is probably some gcc quirk that affects + # rounding but not in any important way, so we just use a looser + # tolerance on this test: + np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) + + def test_poisson(self): + random.seed(self.seed) + actual = random.poisson(lam=.123456789, size=(3, 2)) + desired = np.array([[0, 0], + [1, 0], + [0, 0]]) + assert_array_equal(actual, desired) + + def test_poisson_exceptions(self): + lambig = np.iinfo('l').max + lamneg = -1 + assert_raises(ValueError, random.poisson, lamneg) + assert_raises(ValueError, random.poisson, [lamneg] * 10) + assert_raises(ValueError, random.poisson, lambig) + assert_raises(ValueError, random.poisson, [lambig] * 10) + with suppress_warnings() as sup: + sup.record(RuntimeWarning) + assert_raises(ValueError, random.poisson, np.nan) + assert_raises(ValueError, random.poisson, [np.nan] * 10) + + def test_power(self): + random.seed(self.seed) + actual = random.power(a=.123456789, size=(3, 2)) + desired = np.array([[0.02048932883240791, 0.01424192241128213], + [0.38446073748535298, 0.39499689943484395], + [0.00177699707563439, 0.13115505880863756]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_rayleigh(self): + random.seed(self.seed) + actual = random.rayleigh(scale=10, size=(3, 2)) + desired = np.array([[13.8882496494248393, 13.383318339044731], + [20.95413364294492098, 21.08285015800712614], + [11.06066537006854311, 17.35468505778271009]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_rayleigh_0(self): + assert_equal(random.rayleigh(scale=0), 0) + assert_raises(ValueError, random.rayleigh, scale=-0.) + + def test_standard_cauchy(self): + random.seed(self.seed) + actual = random.standard_cauchy(size=(3, 2)) + desired = np.array([[0.77127660196445336, -6.55601161955910605], + [0.93582023391158309, -2.07479293013759447], + [-4.74601644297011926, 0.18338989290760804]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_exponential(self): + random.seed(self.seed) + actual = random.standard_exponential(size=(3, 2)) + desired = np.array([[0.96441739162374596, 0.89556604882105506], + [2.1953785836319808, 2.22243285392490542], + [0.6116915921431676, 1.50592546727413201]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_gamma(self): + random.seed(self.seed) + actual = random.standard_gamma(shape=3, size=(3, 2)) + desired = np.array([[5.50841531318455058, 6.62953470301903103], + [5.93988484943779227, 2.31044849402133989], + [7.54838614231317084, 8.012756093271868]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_standard_gamma_0(self): + assert_equal(random.standard_gamma(shape=0), 0) + assert_raises(ValueError, random.standard_gamma, shape=-0.) + + def test_standard_normal(self): + random.seed(self.seed) + actual = random.standard_normal(size=(3, 2)) + desired = np.array([[1.34016345771863121, 1.73759122771936081], + [1.498988344300628, -0.2286433324536169], + [2.031033998682787, 2.17032494605655257]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_randn_singleton(self): + random.seed(self.seed) + actual = random.randn() + desired = np.array(1.34016345771863121) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_standard_t(self): + random.seed(self.seed) + actual = random.standard_t(df=10, size=(3, 2)) + desired = np.array([[0.97140611862659965, -0.08830486548450577], + [1.36311143689505321, -0.55317463909867071], + [-0.18473749069684214, 0.61181537341755321]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_triangular(self): + random.seed(self.seed) + actual = random.triangular(left=5.12, mode=10.23, right=20.34, + size=(3, 2)) + desired = np.array([[12.68117178949215784, 12.4129206149193152], + [16.20131377335158263, 16.25692138747600524], + [11.20400690911820263, 14.4978144835829923]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_uniform(self): + random.seed(self.seed) + actual = random.uniform(low=1.23, high=10.54, size=(3, 2)) + desired = np.array([[6.99097932346268003, 6.73801597444323974], + [9.50364421400426274, 9.53130618907631089], + [5.48995325769805476, 8.47493103280052118]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_uniform_range_bounds(self): + fmin = np.finfo('float').min + fmax = np.finfo('float').max + + func = random.uniform + assert_raises(OverflowError, func, -np.inf, 0) + assert_raises(OverflowError, func, 0, np.inf) + assert_raises(OverflowError, func, fmin, fmax) + assert_raises(OverflowError, func, [-np.inf], [0]) + assert_raises(OverflowError, func, [0], [np.inf]) + + # (fmax / 1e17) - fmin is within range, so this should not throw + # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX > + # DBL_MAX by increasing fmin a bit + random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17) + + def test_scalar_exception_propagation(self): + # Tests that exceptions are correctly propagated in distributions + # when called with objects that throw exceptions when converted to + # scalars. + # + # Regression test for gh: 8865 + + class ThrowingFloat(np.ndarray): + def __float__(self): + raise TypeError + + throwing_float = np.array(1.0).view(ThrowingFloat) + assert_raises(TypeError, random.uniform, throwing_float, + throwing_float) + + class ThrowingInteger(np.ndarray): + def __int__(self): + raise TypeError + + throwing_int = np.array(1).view(ThrowingInteger) + assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1) + + def test_vonmises(self): + random.seed(self.seed) + actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) + desired = np.array([[2.28567572673902042, 2.89163838442285037], + [0.38198375564286025, 2.57638023113890746], + [1.19153771588353052, 1.83509849681825354]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_vonmises_small(self): + # check infinite loop, gh-4720 + random.seed(self.seed) + r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6) + assert_(np.isfinite(r).all()) + + def test_vonmises_large(self): + # guard against changes in RandomState when Generator is fixed + random.seed(self.seed) + actual = random.vonmises(mu=0., kappa=1e7, size=3) + desired = np.array([4.634253748521111e-04, + 3.558873596114509e-04, + -2.337119622577433e-04]) + assert_array_almost_equal(actual, desired, decimal=8) + + def test_vonmises_nan(self): + random.seed(self.seed) + r = random.vonmises(mu=0., kappa=np.nan) + assert_(np.isnan(r)) + + def test_wald(self): + random.seed(self.seed) + actual = random.wald(mean=1.23, scale=1.54, size=(3, 2)) + desired = np.array([[3.82935265715889983, 5.13125249184285526], + [0.35045403618358717, 1.50832396872003538], + [0.24124319895843183, 0.22031101461955038]]) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_weibull(self): + random.seed(self.seed) + actual = random.weibull(a=1.23, size=(3, 2)) + desired = np.array([[0.97097342648766727, 0.91422896443565516], + [1.89517770034962929, 1.91414357960479564], + [0.67057783752390987, 1.39494046635066793]]) + assert_array_almost_equal(actual, desired, decimal=15) + + def test_weibull_0(self): + random.seed(self.seed) + assert_equal(random.weibull(a=0, size=12), np.zeros(12)) + assert_raises(ValueError, random.weibull, a=-0.) + + def test_zipf(self): + random.seed(self.seed) + actual = random.zipf(a=1.23, size=(3, 2)) + desired = np.array([[66, 29], + [1, 1], + [3, 13]]) + assert_array_equal(actual, desired) + + +class TestBroadcast: + # tests that functions that broadcast behave + # correctly when presented with non-scalar arguments + def setup_method(self): + self.seed = 123456789 + + def set_seed(self): + random.seed(self.seed) + + def test_uniform(self): + low = [0] + high = [1] + uniform = random.uniform + desired = np.array([0.53283302478975902, + 0.53413660089041659, + 0.50955303552646702]) + + self.set_seed() + actual = uniform(low * 3, high) + assert_array_almost_equal(actual, desired, decimal=14) + + self.set_seed() + actual = uniform(low, high * 3) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_normal(self): + loc = [0] + scale = [1] + bad_scale = [-1] + normal = random.normal + desired = np.array([2.2129019979039612, + 2.1283977976520019, + 1.8417114045748335]) + + self.set_seed() + actual = normal(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, normal, loc * 3, bad_scale) + + self.set_seed() + actual = normal(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, normal, loc, bad_scale * 3) + + def test_beta(self): + a = [1] + b = [2] + bad_a = [-1] + bad_b = [-2] + beta = random.beta + desired = np.array([0.19843558305989056, + 0.075230336409423643, + 0.24976865978980844]) + + self.set_seed() + actual = beta(a * 3, b) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, beta, bad_a * 3, b) + assert_raises(ValueError, beta, a * 3, bad_b) + + self.set_seed() + actual = beta(a, b * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, beta, bad_a, b * 3) + assert_raises(ValueError, beta, a, bad_b * 3) + + def test_exponential(self): + scale = [1] + bad_scale = [-1] + exponential = random.exponential + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.set_seed() + actual = exponential(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, exponential, bad_scale * 3) + + def test_standard_gamma(self): + shape = [1] + bad_shape = [-1] + std_gamma = random.standard_gamma + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.set_seed() + actual = std_gamma(shape * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, std_gamma, bad_shape * 3) + + def test_gamma(self): + shape = [1] + scale = [2] + bad_shape = [-1] + bad_scale = [-2] + gamma = random.gamma + desired = np.array([1.5221370731769048, + 1.5277256455738331, + 1.4248762625178359]) + + self.set_seed() + actual = gamma(shape * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape * 3, scale) + assert_raises(ValueError, gamma, shape * 3, bad_scale) + + self.set_seed() + actual = gamma(shape, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gamma, bad_shape, scale * 3) + assert_raises(ValueError, gamma, shape, bad_scale * 3) + + def test_f(self): + dfnum = [1] + dfden = [2] + bad_dfnum = [-1] + bad_dfden = [-2] + f = random.f + desired = np.array([0.80038951638264799, + 0.86768719635363512, + 2.7251095168386801]) + + self.set_seed() + actual = f(dfnum * 3, dfden) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum * 3, dfden) + assert_raises(ValueError, f, dfnum * 3, bad_dfden) + + self.set_seed() + actual = f(dfnum, dfden * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, f, bad_dfnum, dfden * 3) + assert_raises(ValueError, f, dfnum, bad_dfden * 3) + + def test_noncentral_f(self): + dfnum = [2] + dfden = [3] + nonc = [4] + bad_dfnum = [0] + bad_dfden = [-1] + bad_nonc = [-2] + nonc_f = random.noncentral_f + desired = np.array([9.1393943263705211, + 13.025456344595602, + 8.8018098359100545]) + + self.set_seed() + actual = nonc_f(dfnum * 3, dfden, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3))) + + assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) + assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) + + self.set_seed() + actual = nonc_f(dfnum, dfden * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) + assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) + + self.set_seed() + actual = nonc_f(dfnum, dfden, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) + assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) + + def test_noncentral_f_small_df(self): + self.set_seed() + desired = np.array([6.869638627492048, 0.785880199263955]) + actual = random.noncentral_f(0.9, 0.9, 2, size=2) + assert_array_almost_equal(actual, desired, decimal=14) + + def test_chisquare(self): + df = [1] + bad_df = [-1] + chisquare = random.chisquare + desired = np.array([0.57022801133088286, + 0.51947702108840776, + 0.1320969254923558]) + + self.set_seed() + actual = chisquare(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, chisquare, bad_df * 3) + + def test_noncentral_chisquare(self): + df = [1] + nonc = [2] + bad_df = [-1] + bad_nonc = [-2] + nonc_chi = random.noncentral_chisquare + desired = np.array([9.0015599467913763, + 4.5804135049718742, + 6.0872302432834564]) + + self.set_seed() + actual = nonc_chi(df * 3, nonc) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) + assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) + + self.set_seed() + actual = nonc_chi(df, nonc * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) + assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) + + def test_standard_t(self): + df = [1] + bad_df = [-1] + t = random.standard_t + desired = np.array([3.0702872575217643, + 5.8560725167361607, + 1.0274791436474273]) + + self.set_seed() + actual = t(df * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, t, bad_df * 3) + assert_raises(ValueError, random.standard_t, bad_df * 3) + + def test_vonmises(self): + mu = [2] + kappa = [1] + bad_kappa = [-1] + vonmises = random.vonmises + desired = np.array([2.9883443664201312, + -2.7064099483995943, + -1.8672476700665914]) + + self.set_seed() + actual = vonmises(mu * 3, kappa) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, vonmises, mu * 3, bad_kappa) + + self.set_seed() + actual = vonmises(mu, kappa * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, vonmises, mu, bad_kappa * 3) + + def test_pareto(self): + a = [1] + bad_a = [-1] + pareto = random.pareto + desired = np.array([1.1405622680198362, + 1.1465519762044529, + 1.0389564467453547]) + + self.set_seed() + actual = pareto(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, pareto, bad_a * 3) + assert_raises(ValueError, random.pareto, bad_a * 3) + + def test_weibull(self): + a = [1] + bad_a = [-1] + weibull = random.weibull + desired = np.array([0.76106853658845242, + 0.76386282278691653, + 0.71243813125891797]) + + self.set_seed() + actual = weibull(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, weibull, bad_a * 3) + assert_raises(ValueError, random.weibull, bad_a * 3) + + def test_power(self): + a = [1] + bad_a = [-1] + power = random.power + desired = np.array([0.53283302478975902, + 0.53413660089041659, + 0.50955303552646702]) + + self.set_seed() + actual = power(a * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, power, bad_a * 3) + assert_raises(ValueError, random.power, bad_a * 3) + + def test_laplace(self): + loc = [0] + scale = [1] + bad_scale = [-1] + laplace = random.laplace + desired = np.array([0.067921356028507157, + 0.070715642226971326, + 0.019290950698972624]) + + self.set_seed() + actual = laplace(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc * 3, bad_scale) + + self.set_seed() + actual = laplace(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, laplace, loc, bad_scale * 3) + + def test_gumbel(self): + loc = [0] + scale = [1] + bad_scale = [-1] + gumbel = random.gumbel + desired = np.array([0.2730318639556768, + 0.26936705726291116, + 0.33906220393037939]) + + self.set_seed() + actual = gumbel(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc * 3, bad_scale) + + self.set_seed() + actual = gumbel(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, gumbel, loc, bad_scale * 3) + + def test_logistic(self): + loc = [0] + scale = [1] + bad_scale = [-1] + logistic = random.logistic + desired = np.array([0.13152135837586171, + 0.13675915696285773, + 0.038216792802833396]) + + self.set_seed() + actual = logistic(loc * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, logistic, loc * 3, bad_scale) + + self.set_seed() + actual = logistic(loc, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, logistic, loc, bad_scale * 3) + assert_equal(random.logistic(1.0, 0.0), 1.0) + + def test_lognormal(self): + mean = [0] + sigma = [1] + bad_sigma = [-1] + lognormal = random.lognormal + desired = np.array([9.1422086044848427, + 8.4013952870126261, + 6.3073234116578671]) + + self.set_seed() + actual = lognormal(mean * 3, sigma) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, lognormal, mean * 3, bad_sigma) + assert_raises(ValueError, random.lognormal, mean * 3, bad_sigma) + + self.set_seed() + actual = lognormal(mean, sigma * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, lognormal, mean, bad_sigma * 3) + assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3) + + def test_rayleigh(self): + scale = [1] + bad_scale = [-1] + rayleigh = random.rayleigh + desired = np.array([1.2337491937897689, + 1.2360119924878694, + 1.1936818095781789]) + + self.set_seed() + actual = rayleigh(scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, rayleigh, bad_scale * 3) + + def test_wald(self): + mean = [0.5] + scale = [1] + bad_mean = [0] + bad_scale = [-2] + wald = random.wald + desired = np.array([0.11873681120271318, + 0.12450084820795027, + 0.9096122728408238]) + + self.set_seed() + actual = wald(mean * 3, scale) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, wald, bad_mean * 3, scale) + assert_raises(ValueError, wald, mean * 3, bad_scale) + assert_raises(ValueError, random.wald, bad_mean * 3, scale) + assert_raises(ValueError, random.wald, mean * 3, bad_scale) + + self.set_seed() + actual = wald(mean, scale * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, wald, bad_mean, scale * 3) + assert_raises(ValueError, wald, mean, bad_scale * 3) + assert_raises(ValueError, wald, 0.0, 1) + assert_raises(ValueError, wald, 0.5, 0.0) + + def test_triangular(self): + left = [1] + right = [3] + mode = [2] + bad_left_one = [3] + bad_mode_one = [4] + bad_left_two, bad_mode_two = right * 2 + triangular = random.triangular + desired = np.array([2.03339048710429, + 2.0347400359389356, + 2.0095991069536208]) + + self.set_seed() + actual = triangular(left * 3, mode, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) + assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) + assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, + right) + + self.set_seed() + actual = triangular(left, mode * 3, right) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) + assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, + right) + + self.set_seed() + actual = triangular(left, mode, right * 3) + assert_array_almost_equal(actual, desired, decimal=14) + assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) + assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) + assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, + right * 3) + + assert_raises(ValueError, triangular, 10., 0., 20.) + assert_raises(ValueError, triangular, 10., 25., 20.) + assert_raises(ValueError, triangular, 10., 10., 10.) + + def test_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + binom = random.binomial + desired = np.array([1, 1, 1]) + + self.set_seed() + actual = binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n * 3, p) + assert_raises(ValueError, binom, n * 3, bad_p_one) + assert_raises(ValueError, binom, n * 3, bad_p_two) + + self.set_seed() + actual = binom(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, binom, bad_n, p * 3) + assert_raises(ValueError, binom, n, bad_p_one * 3) + assert_raises(ValueError, binom, n, bad_p_two * 3) + + def test_negative_binomial(self): + n = [1] + p = [0.5] + bad_n = [-1] + bad_p_one = [-1] + bad_p_two = [1.5] + neg_binom = random.negative_binomial + desired = np.array([1, 0, 1]) + + self.set_seed() + actual = neg_binom(n * 3, p) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n * 3, p) + assert_raises(ValueError, neg_binom, n * 3, bad_p_one) + assert_raises(ValueError, neg_binom, n * 3, bad_p_two) + + self.set_seed() + actual = neg_binom(n, p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, neg_binom, bad_n, p * 3) + assert_raises(ValueError, neg_binom, n, bad_p_one * 3) + assert_raises(ValueError, neg_binom, n, bad_p_two * 3) + + def test_poisson(self): + max_lam = random.RandomState()._poisson_lam_max + + lam = [1] + bad_lam_one = [-1] + bad_lam_two = [max_lam * 2] + poisson = random.poisson + desired = np.array([1, 1, 0]) + + self.set_seed() + actual = poisson(lam * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, poisson, bad_lam_one * 3) + assert_raises(ValueError, poisson, bad_lam_two * 3) + + def test_zipf(self): + a = [2] + bad_a = [0] + zipf = random.zipf + desired = np.array([2, 2, 1]) + + self.set_seed() + actual = zipf(a * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, zipf, bad_a * 3) + with np.errstate(invalid='ignore'): + assert_raises(ValueError, zipf, np.nan) + assert_raises(ValueError, zipf, [0, 0, np.nan]) + + def test_geometric(self): + p = [0.5] + bad_p_one = [-1] + bad_p_two = [1.5] + geom = random.geometric + desired = np.array([2, 2, 2]) + + self.set_seed() + actual = geom(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, geom, bad_p_one * 3) + assert_raises(ValueError, geom, bad_p_two * 3) + + def test_hypergeometric(self): + ngood = [1] + nbad = [2] + nsample = [2] + bad_ngood = [-1] + bad_nbad = [-2] + bad_nsample_one = [0] + bad_nsample_two = [4] + hypergeom = random.hypergeometric + desired = np.array([1, 1, 1]) + + self.set_seed() + actual = hypergeom(ngood * 3, nbad, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample) + assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample) + assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one) + assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two) + + self.set_seed() + actual = hypergeom(ngood, nbad * 3, nsample) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample) + assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample) + assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one) + assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two) + + self.set_seed() + actual = hypergeom(ngood, nbad, nsample * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) + assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) + + assert_raises(ValueError, hypergeom, -1, 10, 20) + assert_raises(ValueError, hypergeom, 10, -1, 20) + assert_raises(ValueError, hypergeom, 10, 10, 0) + assert_raises(ValueError, hypergeom, 10, 10, 25) + + def test_logseries(self): + p = [0.5] + bad_p_one = [2] + bad_p_two = [-1] + logseries = random.logseries + desired = np.array([1, 1, 1]) + + self.set_seed() + actual = logseries(p * 3) + assert_array_equal(actual, desired) + assert_raises(ValueError, logseries, bad_p_one * 3) + assert_raises(ValueError, logseries, bad_p_two * 3) + + +@pytest.mark.skipif(IS_WASM, reason="can't start thread") +class TestThread: + # make sure each state produces the same sequence even in threads + def setup_method(self): + self.seeds = range(4) + + def check_function(self, function, sz): + from threading import Thread + + out1 = np.empty((len(self.seeds),) + sz) + out2 = np.empty((len(self.seeds),) + sz) + + # threaded generation + t = [Thread(target=function, args=(random.RandomState(s), o)) + for s, o in zip(self.seeds, out1)] + [x.start() for x in t] + [x.join() for x in t] + + # the same serial + for s, o in zip(self.seeds, out2): + function(random.RandomState(s), o) + + # these platforms change x87 fpu precision mode in threads + if np.intp().dtype.itemsize == 4 and sys.platform == "win32": + assert_array_almost_equal(out1, out2) + else: + assert_array_equal(out1, out2) + + def test_normal(self): + def gen_random(state, out): + out[...] = state.normal(size=10000) + + self.check_function(gen_random, sz=(10000,)) + + def test_exp(self): + def gen_random(state, out): + out[...] = state.exponential(scale=np.ones((100, 1000))) + + self.check_function(gen_random, sz=(100, 1000)) + + def test_multinomial(self): + def gen_random(state, out): + out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000) + + self.check_function(gen_random, sz=(10000, 6)) + + +# See Issue #4263 +class TestSingleEltArrayInput: + def setup_method(self): + self.argOne = np.array([2]) + self.argTwo = np.array([3]) + self.argThree = np.array([4]) + self.tgtShape = (1,) + + def test_one_arg_funcs(self): + funcs = (random.exponential, random.standard_gamma, + random.chisquare, random.standard_t, + random.pareto, random.weibull, + random.power, random.rayleigh, + random.poisson, random.zipf, + random.geometric, random.logseries) + + probfuncs = (random.geometric, random.logseries) + + for func in funcs: + if func in probfuncs: # p < 1.0 + out = func(np.array([0.5])) + + else: + out = func(self.argOne) + + assert_equal(out.shape, self.tgtShape) + + def test_two_arg_funcs(self): + funcs = (random.uniform, random.normal, + random.beta, random.gamma, + random.f, random.noncentral_chisquare, + random.vonmises, random.laplace, + random.gumbel, random.logistic, + random.lognormal, random.wald, + random.binomial, random.negative_binomial) + + probfuncs = (random.binomial, random.negative_binomial) + + for func in funcs: + if func in probfuncs: # p <= 1 + argTwo = np.array([0.5]) + + else: + argTwo = self.argTwo + + out = func(self.argOne, argTwo) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne[0], argTwo) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne, argTwo[0]) + assert_equal(out.shape, self.tgtShape) + + def test_three_arg_funcs(self): + funcs = [random.noncentral_f, random.triangular, + random.hypergeometric] + + for func in funcs: + out = func(self.argOne, self.argTwo, self.argThree) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne[0], self.argTwo, self.argThree) + assert_equal(out.shape, self.tgtShape) + + out = func(self.argOne, self.argTwo[0], self.argThree) + assert_equal(out.shape, self.tgtShape) + + +# Ensure returned array dtype is correct for platform +def test_integer_dtype(int_func): + random.seed(123456789) + fname, args, sha256 = int_func + f = getattr(random, fname) + actual = f(*args, size=2) + assert_(actual.dtype == np.dtype('l')) + + +def test_integer_repeat(int_func): + random.seed(123456789) + fname, args, sha256 = int_func + f = getattr(random, fname) + val = f(*args, size=1000000) + if sys.byteorder != 'little': + val = val.byteswap() + res = hashlib.sha256(val.view(np.int8)).hexdigest() + assert_(res == sha256) + + +def test_broadcast_size_error(): + # GH-16833 + with pytest.raises(ValueError): + random.binomial(1, [0.3, 0.7], size=(2, 1)) + with pytest.raises(ValueError): + random.binomial([1, 2], 0.3, size=(2, 1)) + with pytest.raises(ValueError): + random.binomial([1, 2], [0.3, 0.7], size=(2, 1)) + + +def test_randomstate_ctor_old_style_pickle(): + rs = np.random.RandomState(MT19937(0)) + rs.standard_normal(1) + # Directly call reduce which is used in pickling + ctor, args, state_a = rs.__reduce__() + # Simulate unpickling an old pickle that only has the name + assert args[0].__class__.__name__ == "MT19937" + b = ctor(*("MT19937",)) + b.set_state(state_a) + state_b = b.get_state(legacy=False) + + assert_equal(state_a['bit_generator'], state_b['bit_generator']) + assert_array_equal(state_a['state']['key'], state_b['state']['key']) + assert_array_equal(state_a['state']['pos'], state_b['state']['pos']) + assert_equal(state_a['has_gauss'], state_b['has_gauss']) + assert_equal(state_a['gauss'], state_b['gauss']) + + +def test_hot_swap(restore_singleton_bitgen): + # GH 21808 + def_bg = np.random.default_rng(0) + bg = def_bg.bit_generator + np.random.set_bit_generator(bg) + assert isinstance(np.random.mtrand._rand._bit_generator, type(bg)) + + second_bg = np.random.get_bit_generator() + assert bg is second_bg + + +def test_seed_alt_bit_gen(restore_singleton_bitgen): + # GH 21808 + bg = PCG64(0) + np.random.set_bit_generator(bg) + state = np.random.get_state(legacy=False) + np.random.seed(1) + new_state = np.random.get_state(legacy=False) + print(state) + print(new_state) + assert state["bit_generator"] == "PCG64" + assert state["state"]["state"] != new_state["state"]["state"] + assert state["state"]["inc"] != new_state["state"]["inc"] + + +def test_state_error_alt_bit_gen(restore_singleton_bitgen): + # GH 21808 + state = np.random.get_state() + bg = PCG64(0) + np.random.set_bit_generator(bg) + with pytest.raises(ValueError, match="state must be for a PCG64"): + np.random.set_state(state) + + +def test_swap_worked(restore_singleton_bitgen): + # GH 21808 + np.random.seed(98765) + vals = np.random.randint(0, 2 ** 30, 10) + bg = PCG64(0) + state = bg.state + np.random.set_bit_generator(bg) + state_direct = np.random.get_state(legacy=False) + for field in state: + assert state[field] == state_direct[field] + np.random.seed(98765) + pcg_vals = np.random.randint(0, 2 ** 30, 10) + assert not np.all(vals == pcg_vals) + new_state = bg.state + assert new_state["state"]["state"] != state["state"]["state"] + assert new_state["state"]["inc"] == new_state["state"]["inc"] + + +def test_swapped_singleton_against_direct(restore_singleton_bitgen): + np.random.set_bit_generator(PCG64(98765)) + singleton_vals = np.random.randint(0, 2 ** 30, 10) + rg = np.random.RandomState(PCG64(98765)) + non_singleton_vals = rg.randint(0, 2 ** 30, 10) + assert_equal(non_singleton_vals, singleton_vals) diff --git a/python/numpy/random/tests/test_randomstate_regression.py b/python/numpy/random/tests/test_randomstate_regression.py new file mode 100644 index 000000000..6ccc61806 --- /dev/null +++ b/python/numpy/random/tests/test_randomstate_regression.py @@ -0,0 +1,217 @@ +import sys + +import pytest + +import numpy as np +from numpy import random +from numpy.testing import ( + assert_, + assert_array_equal, + assert_raises, +) + + +class TestRegression: + + def test_VonMises_range(self): + # Make sure generated random variables are in [-pi, pi]. + # Regression test for ticket #986. + for mu in np.linspace(-7., 7., 5): + r = random.vonmises(mu, 1, 50) + assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) + + def test_hypergeometric_range(self): + # Test for ticket #921 + assert_(np.all(random.hypergeometric(3, 18, 11, size=10) < 4)) + assert_(np.all(random.hypergeometric(18, 3, 11, size=10) > 0)) + + # Test for ticket #5623 + args = [ + (2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems + ] + is_64bits = sys.maxsize > 2**32 + if is_64bits and sys.platform != 'win32': + # Check for 64-bit systems + args.append((2**40 - 2, 2**40 - 2, 2**40 - 2)) + for arg in args: + assert_(random.hypergeometric(*arg) > 0) + + def test_logseries_convergence(self): + # Test for ticket #923 + N = 1000 + random.seed(0) + rvsn = random.logseries(0.8, size=N) + # these two frequency counts should be close to theoretical + # numbers with this large sample + # theoretical large N result is 0.49706795 + freq = np.sum(rvsn == 1) / N + msg = f'Frequency was {freq:f}, should be > 0.45' + assert_(freq > 0.45, msg) + # theoretical large N result is 0.19882718 + freq = np.sum(rvsn == 2) / N + msg = f'Frequency was {freq:f}, should be < 0.23' + assert_(freq < 0.23, msg) + + def test_shuffle_mixed_dimension(self): + # Test for trac ticket #2074 + for t in [[1, 2, 3, None], + [(1, 1), (2, 2), (3, 3), None], + [1, (2, 2), (3, 3), None], + [(1, 1), 2, 3, None]]: + random.seed(12345) + shuffled = list(t) + random.shuffle(shuffled) + expected = np.array([t[0], t[3], t[1], t[2]], dtype=object) + assert_array_equal(np.array(shuffled, dtype=object), expected) + + def test_call_within_randomstate(self): + # Check that custom RandomState does not call into global state + m = random.RandomState() + res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3]) + for i in range(3): + random.seed(i) + m.seed(4321) + # If m.state is not honored, the result will change + assert_array_equal(m.choice(10, size=10, p=np.ones(10) / 10.), res) + + def test_multivariate_normal_size_types(self): + # Test for multivariate_normal issue with 'size' argument. + # Check that the multivariate_normal size argument can be a + # numpy integer. + random.multivariate_normal([0], [[0]], size=1) + random.multivariate_normal([0], [[0]], size=np.int_(1)) + random.multivariate_normal([0], [[0]], size=np.int64(1)) + + def test_beta_small_parameters(self): + # Test that beta with small a and b parameters does not produce + # NaNs due to roundoff errors causing 0 / 0, gh-5851 + random.seed(1234567890) + x = random.beta(0.0001, 0.0001, size=100) + assert_(not np.any(np.isnan(x)), 'Nans in random.beta') + + def test_choice_sum_of_probs_tolerance(self): + # The sum of probs should be 1.0 with some tolerance. + # For low precision dtypes the tolerance was too tight. + # See numpy github issue 6123. + random.seed(1234) + a = [1, 2, 3] + counts = [4, 4, 2] + for dt in np.float16, np.float32, np.float64: + probs = np.array(counts, dtype=dt) / sum(counts) + c = random.choice(a, p=probs) + assert_(c in a) + assert_raises(ValueError, random.choice, a, p=probs * 0.9) + + def test_shuffle_of_array_of_different_length_strings(self): + # Test that permuting an array of different length strings + # will not cause a segfault on garbage collection + # Tests gh-7710 + random.seed(1234) + + a = np.array(['a', 'a' * 1000]) + + for _ in range(100): + random.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_shuffle_of_array_of_objects(self): + # Test that permuting an array of objects will not cause + # a segfault on garbage collection. + # See gh-7719 + random.seed(1234) + a = np.array([np.arange(1), np.arange(4)], dtype=object) + + for _ in range(1000): + random.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_permutation_subclass(self): + class N(np.ndarray): + pass + + random.seed(1) + orig = np.arange(3).view(N) + perm = random.permutation(orig) + assert_array_equal(perm, np.array([0, 2, 1])) + assert_array_equal(orig, np.arange(3).view(N)) + + class M: + a = np.arange(5) + + def __array__(self, dtype=None, copy=None): + return self.a + + random.seed(1) + m = M() + perm = random.permutation(m) + assert_array_equal(perm, np.array([2, 1, 4, 0, 3])) + assert_array_equal(m.__array__(), np.arange(5)) + + def test_warns_byteorder(self): + # GH 13159 + other_byteord_dt = 'i4' + with pytest.deprecated_call(match='non-native byteorder is not'): + random.randint(0, 200, size=10, dtype=other_byteord_dt) + + def test_named_argument_initialization(self): + # GH 13669 + rs1 = np.random.RandomState(123456789) + rs2 = np.random.RandomState(seed=123456789) + assert rs1.randint(0, 100) == rs2.randint(0, 100) + + def test_choice_retun_dtype(self): + # GH 9867, now long since the NumPy default changed. + c = np.random.choice(10, p=[.1] * 10, size=2) + assert c.dtype == np.dtype(np.long) + c = np.random.choice(10, p=[.1] * 10, replace=False, size=2) + assert c.dtype == np.dtype(np.long) + c = np.random.choice(10, size=2) + assert c.dtype == np.dtype(np.long) + c = np.random.choice(10, replace=False, size=2) + assert c.dtype == np.dtype(np.long) + + @pytest.mark.skipif(np.iinfo('l').max < 2**32, + reason='Cannot test with 32-bit C long') + def test_randint_117(self): + # GH 14189 + random.seed(0) + expected = np.array([2357136044, 2546248239, 3071714933, 3626093760, + 2588848963, 3684848379, 2340255427, 3638918503, + 1819583497, 2678185683], dtype='int64') + actual = random.randint(2**32, size=10) + assert_array_equal(actual, expected) + + def test_p_zero_stream(self): + # Regression test for gh-14522. Ensure that future versions + # generate the same variates as version 1.16. + np.random.seed(12345) + assert_array_equal(random.binomial(1, [0, 0.25, 0.5, 0.75, 1]), + [0, 0, 0, 1, 1]) + + def test_n_zero_stream(self): + # Regression test for gh-14522. Ensure that future versions + # generate the same variates as version 1.16. + np.random.seed(8675309) + expected = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [3, 4, 2, 3, 3, 1, 5, 3, 1, 3]]) + assert_array_equal(random.binomial([[0], [10]], 0.25, size=(2, 10)), + expected) + + +def test_multinomial_empty(): + # gh-20483 + # Ensure that empty p-vals are correctly handled + assert random.multinomial(10, []).shape == (0,) + assert random.multinomial(3, [], size=(7, 5, 3)).shape == (7, 5, 3, 0) + + +def test_multinomial_1d_pval(): + # gh-20483 + with pytest.raises(TypeError, match="pvals must be a 1-d"): + random.multinomial(10, 0.3) diff --git a/python/numpy/random/tests/test_regression.py b/python/numpy/random/tests/test_regression.py new file mode 100644 index 000000000..39b7d8c71 --- /dev/null +++ b/python/numpy/random/tests/test_regression.py @@ -0,0 +1,152 @@ +import sys + +import numpy as np +from numpy import random +from numpy.testing import ( + assert_, + assert_array_equal, + assert_raises, +) + + +class TestRegression: + + def test_VonMises_range(self): + # Make sure generated random variables are in [-pi, pi]. + # Regression test for ticket #986. + for mu in np.linspace(-7., 7., 5): + r = random.mtrand.vonmises(mu, 1, 50) + assert_(np.all(r > -np.pi) and np.all(r <= np.pi)) + + def test_hypergeometric_range(self): + # Test for ticket #921 + assert_(np.all(np.random.hypergeometric(3, 18, 11, size=10) < 4)) + assert_(np.all(np.random.hypergeometric(18, 3, 11, size=10) > 0)) + + # Test for ticket #5623 + args = [ + (2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems + ] + is_64bits = sys.maxsize > 2**32 + if is_64bits and sys.platform != 'win32': + # Check for 64-bit systems + args.append((2**40 - 2, 2**40 - 2, 2**40 - 2)) + for arg in args: + assert_(np.random.hypergeometric(*arg) > 0) + + def test_logseries_convergence(self): + # Test for ticket #923 + N = 1000 + np.random.seed(0) + rvsn = np.random.logseries(0.8, size=N) + # these two frequency counts should be close to theoretical + # numbers with this large sample + # theoretical large N result is 0.49706795 + freq = np.sum(rvsn == 1) / N + msg = f'Frequency was {freq:f}, should be > 0.45' + assert_(freq > 0.45, msg) + # theoretical large N result is 0.19882718 + freq = np.sum(rvsn == 2) / N + msg = f'Frequency was {freq:f}, should be < 0.23' + assert_(freq < 0.23, msg) + + def test_shuffle_mixed_dimension(self): + # Test for trac ticket #2074 + for t in [[1, 2, 3, None], + [(1, 1), (2, 2), (3, 3), None], + [1, (2, 2), (3, 3), None], + [(1, 1), 2, 3, None]]: + np.random.seed(12345) + shuffled = list(t) + random.shuffle(shuffled) + expected = np.array([t[0], t[3], t[1], t[2]], dtype=object) + assert_array_equal(np.array(shuffled, dtype=object), expected) + + def test_call_within_randomstate(self): + # Check that custom RandomState does not call into global state + m = np.random.RandomState() + res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3]) + for i in range(3): + np.random.seed(i) + m.seed(4321) + # If m.state is not honored, the result will change + assert_array_equal(m.choice(10, size=10, p=np.ones(10) / 10.), res) + + def test_multivariate_normal_size_types(self): + # Test for multivariate_normal issue with 'size' argument. + # Check that the multivariate_normal size argument can be a + # numpy integer. + np.random.multivariate_normal([0], [[0]], size=1) + np.random.multivariate_normal([0], [[0]], size=np.int_(1)) + np.random.multivariate_normal([0], [[0]], size=np.int64(1)) + + def test_beta_small_parameters(self): + # Test that beta with small a and b parameters does not produce + # NaNs due to roundoff errors causing 0 / 0, gh-5851 + np.random.seed(1234567890) + x = np.random.beta(0.0001, 0.0001, size=100) + assert_(not np.any(np.isnan(x)), 'Nans in np.random.beta') + + def test_choice_sum_of_probs_tolerance(self): + # The sum of probs should be 1.0 with some tolerance. + # For low precision dtypes the tolerance was too tight. + # See numpy github issue 6123. + np.random.seed(1234) + a = [1, 2, 3] + counts = [4, 4, 2] + for dt in np.float16, np.float32, np.float64: + probs = np.array(counts, dtype=dt) / sum(counts) + c = np.random.choice(a, p=probs) + assert_(c in a) + assert_raises(ValueError, np.random.choice, a, p=probs * 0.9) + + def test_shuffle_of_array_of_different_length_strings(self): + # Test that permuting an array of different length strings + # will not cause a segfault on garbage collection + # Tests gh-7710 + np.random.seed(1234) + + a = np.array(['a', 'a' * 1000]) + + for _ in range(100): + np.random.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_shuffle_of_array_of_objects(self): + # Test that permuting an array of objects will not cause + # a segfault on garbage collection. + # See gh-7719 + np.random.seed(1234) + a = np.array([np.arange(1), np.arange(4)], dtype=object) + + for _ in range(1000): + np.random.shuffle(a) + + # Force Garbage Collection - should not segfault. + import gc + gc.collect() + + def test_permutation_subclass(self): + class N(np.ndarray): + pass + + np.random.seed(1) + orig = np.arange(3).view(N) + perm = np.random.permutation(orig) + assert_array_equal(perm, np.array([0, 2, 1])) + assert_array_equal(orig, np.arange(3).view(N)) + + class M: + a = np.arange(5) + + def __array__(self, dtype=None, copy=None): + return self.a + + np.random.seed(1) + m = M() + perm = np.random.permutation(m) + assert_array_equal(perm, np.array([2, 1, 4, 0, 3])) + assert_array_equal(m.__array__(), np.arange(5)) diff --git a/python/numpy/random/tests/test_seed_sequence.py b/python/numpy/random/tests/test_seed_sequence.py new file mode 100644 index 000000000..87ae4ff72 --- /dev/null +++ b/python/numpy/random/tests/test_seed_sequence.py @@ -0,0 +1,79 @@ +import numpy as np +from numpy.random import SeedSequence +from numpy.testing import assert_array_compare, assert_array_equal + + +def test_reference_data(): + """ Check that SeedSequence generates data the same as the C++ reference. + + https://gist.github.com/imneme/540829265469e673d045 + """ + inputs = [ + [3735928559, 195939070, 229505742, 305419896], + [3668361503, 4165561550, 1661411377, 3634257570], + [164546577, 4166754639, 1765190214, 1303880213], + [446610472, 3941463886, 522937693, 1882353782], + [1864922766, 1719732118, 3882010307, 1776744564], + [4141682960, 3310988675, 553637289, 902896340], + [1134851934, 2352871630, 3699409824, 2648159817], + [1240956131, 3107113773, 1283198141, 1924506131], + [2669565031, 579818610, 3042504477, 2774880435], + [2766103236, 2883057919, 4029656435, 862374500], + ] + outputs = [ + [3914649087, 576849849, 3593928901, 2229911004], + [2240804226, 3691353228, 1365957195, 2654016646], + [3562296087, 3191708229, 1147942216, 3726991905], + [1403443605, 3591372999, 1291086759, 441919183], + [1086200464, 2191331643, 560336446, 3658716651], + [3249937430, 2346751812, 847844327, 2996632307], + [2584285912, 4034195531, 3523502488, 169742686], + [959045797, 3875435559, 1886309314, 359682705], + [3978441347, 432478529, 3223635119, 138903045], + [296367413, 4262059219, 13109864, 3283683422], + ] + outputs64 = [ + [2477551240072187391, 9577394838764454085], + [15854241394484835714, 11398914698975566411], + [13708282465491374871, 16007308345579681096], + [15424829579845884309, 1898028439751125927], + [9411697742461147792, 15714068361935982142], + [10079222287618677782, 12870437757549876199], + [17326737873898640088, 729039288628699544], + [16644868984619524261, 1544825456798124994], + [1857481142255628931, 596584038813451439], + [18305404959516669237, 14103312907920476776], + ] + for seed, expected, expected64 in zip(inputs, outputs, outputs64): + expected = np.array(expected, dtype=np.uint32) + ss = SeedSequence(seed) + state = ss.generate_state(len(expected)) + assert_array_equal(state, expected) + state64 = ss.generate_state(len(expected64), dtype=np.uint64) + assert_array_equal(state64, expected64) + + +def test_zero_padding(): + """ Ensure that the implicit zero-padding does not cause problems. + """ + # Ensure that large integers are inserted in little-endian fashion to avoid + # trailing 0s. + ss0 = SeedSequence(42) + ss1 = SeedSequence(42 << 32) + assert_array_compare( + np.not_equal, + ss0.generate_state(4), + ss1.generate_state(4)) + + # Ensure backwards compatibility with the original 0.17 release for small + # integers and no spawn key. + expected42 = np.array([3444837047, 2669555309, 2046530742, 3581440988], + dtype=np.uint32) + assert_array_equal(SeedSequence(42).generate_state(4), expected42) + + # Regression test for gh-16539 to ensure that the implicit 0s don't + # conflict with spawn keys. + assert_array_compare( + np.not_equal, + SeedSequence(42, spawn_key=(0,)).generate_state(4), + expected42) diff --git a/python/numpy/random/tests/test_smoke.py b/python/numpy/random/tests/test_smoke.py new file mode 100644 index 000000000..6f07443f7 --- /dev/null +++ b/python/numpy/random/tests/test_smoke.py @@ -0,0 +1,819 @@ +import pickle +from functools import partial + +import pytest + +import numpy as np +from numpy.random import MT19937, PCG64, PCG64DXSM, SFC64, Generator, Philox +from numpy.testing import assert_, assert_array_equal, assert_equal + + +@pytest.fixture(scope='module', + params=(np.bool, np.int8, np.int16, np.int32, np.int64, + np.uint8, np.uint16, np.uint32, np.uint64)) +def dtype(request): + return request.param + + +def params_0(f): + val = f() + assert_(np.isscalar(val)) + val = f(10) + assert_(val.shape == (10,)) + val = f((10, 10)) + assert_(val.shape == (10, 10)) + val = f((10, 10, 10)) + assert_(val.shape == (10, 10, 10)) + val = f(size=(5, 5)) + assert_(val.shape == (5, 5)) + + +def params_1(f, bounded=False): + a = 5.0 + b = np.arange(2.0, 12.0) + c = np.arange(2.0, 102.0).reshape((10, 10)) + d = np.arange(2.0, 1002.0).reshape((10, 10, 10)) + e = np.array([2.0, 3.0]) + g = np.arange(2.0, 12.0).reshape((1, 10, 1)) + if bounded: + a = 0.5 + b = b / (1.5 * b.max()) + c = c / (1.5 * c.max()) + d = d / (1.5 * d.max()) + e = e / (1.5 * e.max()) + g = g / (1.5 * g.max()) + + # Scalar + f(a) + # Scalar - size + f(a, size=(10, 10)) + # 1d + f(b) + # 2d + f(c) + # 3d + f(d) + # 1d size + f(b, size=10) + # 2d - size - broadcast + f(e, size=(10, 2)) + # 3d - size + f(g, size=(10, 10, 10)) + + +def comp_state(state1, state2): + identical = True + if isinstance(state1, dict): + for key in state1: + identical &= comp_state(state1[key], state2[key]) + elif type(state1) != type(state2): + identical &= type(state1) == type(state2) + elif (isinstance(state1, (list, tuple, np.ndarray)) and isinstance( + state2, (list, tuple, np.ndarray))): + for s1, s2 in zip(state1, state2): + identical &= comp_state(s1, s2) + else: + identical &= state1 == state2 + return identical + + +def warmup(rg, n=None): + if n is None: + n = 11 + np.random.randint(0, 20) + rg.standard_normal(n) + rg.standard_normal(n) + rg.standard_normal(n, dtype=np.float32) + rg.standard_normal(n, dtype=np.float32) + rg.integers(0, 2 ** 24, n, dtype=np.uint64) + rg.integers(0, 2 ** 48, n, dtype=np.uint64) + rg.standard_gamma(11.0, n) + rg.standard_gamma(11.0, n, dtype=np.float32) + rg.random(n, dtype=np.float64) + rg.random(n, dtype=np.float32) + + +class RNG: + @classmethod + def setup_class(cls): + # Overridden in test classes. Place holder to silence IDE noise + cls.bit_generator = PCG64 + cls.advance = None + cls.seed = [12345] + cls.rg = Generator(cls.bit_generator(*cls.seed)) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 64 + cls._extra_setup() + + @classmethod + def _extra_setup(cls): + cls.vec_1d = np.arange(2.0, 102.0) + cls.vec_2d = np.arange(2.0, 102.0)[None, :] + cls.mat = np.arange(2.0, 102.0, 0.01).reshape((100, 100)) + cls.seed_error = TypeError + + def _reset_state(self): + self.rg.bit_generator.state = self.initial_state + + def test_init(self): + rg = Generator(self.bit_generator()) + state = rg.bit_generator.state + rg.standard_normal(1) + rg.standard_normal(1) + rg.bit_generator.state = state + new_state = rg.bit_generator.state + assert_(comp_state(state, new_state)) + + def test_advance(self): + state = self.rg.bit_generator.state + if hasattr(self.rg.bit_generator, 'advance'): + self.rg.bit_generator.advance(self.advance) + assert_(not comp_state(state, self.rg.bit_generator.state)) + else: + bitgen_name = self.rg.bit_generator.__class__.__name__ + pytest.skip(f'Advance is not supported by {bitgen_name}') + + def test_jump(self): + state = self.rg.bit_generator.state + if hasattr(self.rg.bit_generator, 'jumped'): + bit_gen2 = self.rg.bit_generator.jumped() + jumped_state = bit_gen2.state + assert_(not comp_state(state, jumped_state)) + self.rg.random(2 * 3 * 5 * 7 * 11 * 13 * 17) + self.rg.bit_generator.state = state + bit_gen3 = self.rg.bit_generator.jumped() + rejumped_state = bit_gen3.state + assert_(comp_state(jumped_state, rejumped_state)) + else: + bitgen_name = self.rg.bit_generator.__class__.__name__ + if bitgen_name not in ('SFC64',): + raise AttributeError(f'no "jumped" in {bitgen_name}') + pytest.skip(f'Jump is not supported by {bitgen_name}') + + def test_uniform(self): + r = self.rg.uniform(-1.0, 0.0, size=10) + assert_(len(r) == 10) + assert_((r > -1).all()) + assert_((r <= 0).all()) + + def test_uniform_array(self): + r = self.rg.uniform(np.array([-1.0] * 10), 0.0, size=10) + assert_(len(r) == 10) + assert_((r > -1).all()) + assert_((r <= 0).all()) + r = self.rg.uniform(np.array([-1.0] * 10), + np.array([0.0] * 10), size=10) + assert_(len(r) == 10) + assert_((r > -1).all()) + assert_((r <= 0).all()) + r = self.rg.uniform(-1.0, np.array([0.0] * 10), size=10) + assert_(len(r) == 10) + assert_((r > -1).all()) + assert_((r <= 0).all()) + + def test_random(self): + assert_(len(self.rg.random(10)) == 10) + params_0(self.rg.random) + + def test_standard_normal_zig(self): + assert_(len(self.rg.standard_normal(10)) == 10) + + def test_standard_normal(self): + assert_(len(self.rg.standard_normal(10)) == 10) + params_0(self.rg.standard_normal) + + def test_standard_gamma(self): + assert_(len(self.rg.standard_gamma(10, 10)) == 10) + assert_(len(self.rg.standard_gamma(np.array([10] * 10), 10)) == 10) + params_1(self.rg.standard_gamma) + + def test_standard_exponential(self): + assert_(len(self.rg.standard_exponential(10)) == 10) + params_0(self.rg.standard_exponential) + + def test_standard_exponential_float(self): + randoms = self.rg.standard_exponential(10, dtype='float32') + assert_(len(randoms) == 10) + assert randoms.dtype == np.float32 + params_0(partial(self.rg.standard_exponential, dtype='float32')) + + def test_standard_exponential_float_log(self): + randoms = self.rg.standard_exponential(10, dtype='float32', + method='inv') + assert_(len(randoms) == 10) + assert randoms.dtype == np.float32 + params_0(partial(self.rg.standard_exponential, dtype='float32', + method='inv')) + + def test_standard_cauchy(self): + assert_(len(self.rg.standard_cauchy(10)) == 10) + params_0(self.rg.standard_cauchy) + + def test_standard_t(self): + assert_(len(self.rg.standard_t(10, 10)) == 10) + params_1(self.rg.standard_t) + + def test_binomial(self): + assert_(self.rg.binomial(10, .5) >= 0) + assert_(self.rg.binomial(1000, .5) >= 0) + + def test_reset_state(self): + state = self.rg.bit_generator.state + int_1 = self.rg.integers(2**31) + self.rg.bit_generator.state = state + int_2 = self.rg.integers(2**31) + assert_(int_1 == int_2) + + def test_entropy_init(self): + rg = Generator(self.bit_generator()) + rg2 = Generator(self.bit_generator()) + assert_(not comp_state(rg.bit_generator.state, + rg2.bit_generator.state)) + + def test_seed(self): + rg = Generator(self.bit_generator(*self.seed)) + rg2 = Generator(self.bit_generator(*self.seed)) + rg.random() + rg2.random() + assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) + + def test_reset_state_gauss(self): + rg = Generator(self.bit_generator(*self.seed)) + rg.standard_normal() + state = rg.bit_generator.state + n1 = rg.standard_normal(size=10) + rg2 = Generator(self.bit_generator()) + rg2.bit_generator.state = state + n2 = rg2.standard_normal(size=10) + assert_array_equal(n1, n2) + + def test_reset_state_uint32(self): + rg = Generator(self.bit_generator(*self.seed)) + rg.integers(0, 2 ** 24, 120, dtype=np.uint32) + state = rg.bit_generator.state + n1 = rg.integers(0, 2 ** 24, 10, dtype=np.uint32) + rg2 = Generator(self.bit_generator()) + rg2.bit_generator.state = state + n2 = rg2.integers(0, 2 ** 24, 10, dtype=np.uint32) + assert_array_equal(n1, n2) + + def test_reset_state_float(self): + rg = Generator(self.bit_generator(*self.seed)) + rg.random(dtype='float32') + state = rg.bit_generator.state + n1 = rg.random(size=10, dtype='float32') + rg2 = Generator(self.bit_generator()) + rg2.bit_generator.state = state + n2 = rg2.random(size=10, dtype='float32') + assert_((n1 == n2).all()) + + def test_shuffle(self): + original = np.arange(200, 0, -1) + permuted = self.rg.permutation(original) + assert_((original != permuted).any()) + + def test_permutation(self): + original = np.arange(200, 0, -1) + permuted = self.rg.permutation(original) + assert_((original != permuted).any()) + + def test_beta(self): + vals = self.rg.beta(2.0, 2.0, 10) + assert_(len(vals) == 10) + vals = self.rg.beta(np.array([2.0] * 10), 2.0) + assert_(len(vals) == 10) + vals = self.rg.beta(2.0, np.array([2.0] * 10)) + assert_(len(vals) == 10) + vals = self.rg.beta(np.array([2.0] * 10), np.array([2.0] * 10)) + assert_(len(vals) == 10) + vals = self.rg.beta(np.array([2.0] * 10), np.array([[2.0]] * 10)) + assert_(vals.shape == (10, 10)) + + def test_bytes(self): + vals = self.rg.bytes(10) + assert_(len(vals) == 10) + + def test_chisquare(self): + vals = self.rg.chisquare(2.0, 10) + assert_(len(vals) == 10) + params_1(self.rg.chisquare) + + def test_exponential(self): + vals = self.rg.exponential(2.0, 10) + assert_(len(vals) == 10) + params_1(self.rg.exponential) + + def test_f(self): + vals = self.rg.f(3, 1000, 10) + assert_(len(vals) == 10) + + def test_gamma(self): + vals = self.rg.gamma(3, 2, 10) + assert_(len(vals) == 10) + + def test_geometric(self): + vals = self.rg.geometric(0.5, 10) + assert_(len(vals) == 10) + params_1(self.rg.exponential, bounded=True) + + def test_gumbel(self): + vals = self.rg.gumbel(2.0, 2.0, 10) + assert_(len(vals) == 10) + + def test_laplace(self): + vals = self.rg.laplace(2.0, 2.0, 10) + assert_(len(vals) == 10) + + def test_logitic(self): + vals = self.rg.logistic(2.0, 2.0, 10) + assert_(len(vals) == 10) + + def test_logseries(self): + vals = self.rg.logseries(0.5, 10) + assert_(len(vals) == 10) + + def test_negative_binomial(self): + vals = self.rg.negative_binomial(10, 0.2, 10) + assert_(len(vals) == 10) + + def test_noncentral_chisquare(self): + vals = self.rg.noncentral_chisquare(10, 2, 10) + assert_(len(vals) == 10) + + def test_noncentral_f(self): + vals = self.rg.noncentral_f(3, 1000, 2, 10) + assert_(len(vals) == 10) + vals = self.rg.noncentral_f(np.array([3] * 10), 1000, 2) + assert_(len(vals) == 10) + vals = self.rg.noncentral_f(3, np.array([1000] * 10), 2) + assert_(len(vals) == 10) + vals = self.rg.noncentral_f(3, 1000, np.array([2] * 10)) + assert_(len(vals) == 10) + + def test_normal(self): + vals = self.rg.normal(10, 0.2, 10) + assert_(len(vals) == 10) + + def test_pareto(self): + vals = self.rg.pareto(3.0, 10) + assert_(len(vals) == 10) + + def test_poisson(self): + vals = self.rg.poisson(10, 10) + assert_(len(vals) == 10) + vals = self.rg.poisson(np.array([10] * 10)) + assert_(len(vals) == 10) + params_1(self.rg.poisson) + + def test_power(self): + vals = self.rg.power(0.2, 10) + assert_(len(vals) == 10) + + def test_integers(self): + vals = self.rg.integers(10, 20, 10) + assert_(len(vals) == 10) + + def test_rayleigh(self): + vals = self.rg.rayleigh(0.2, 10) + assert_(len(vals) == 10) + params_1(self.rg.rayleigh, bounded=True) + + def test_vonmises(self): + vals = self.rg.vonmises(10, 0.2, 10) + assert_(len(vals) == 10) + + def test_wald(self): + vals = self.rg.wald(1.0, 1.0, 10) + assert_(len(vals) == 10) + + def test_weibull(self): + vals = self.rg.weibull(1.0, 10) + assert_(len(vals) == 10) + + def test_zipf(self): + vals = self.rg.zipf(10, 10) + assert_(len(vals) == 10) + vals = self.rg.zipf(self.vec_1d) + assert_(len(vals) == 100) + vals = self.rg.zipf(self.vec_2d) + assert_(vals.shape == (1, 100)) + vals = self.rg.zipf(self.mat) + assert_(vals.shape == (100, 100)) + + def test_hypergeometric(self): + vals = self.rg.hypergeometric(25, 25, 20) + assert_(np.isscalar(vals)) + vals = self.rg.hypergeometric(np.array([25] * 10), 25, 20) + assert_(vals.shape == (10,)) + + def test_triangular(self): + vals = self.rg.triangular(-5, 0, 5) + assert_(np.isscalar(vals)) + vals = self.rg.triangular(-5, np.array([0] * 10), 5) + assert_(vals.shape == (10,)) + + def test_multivariate_normal(self): + mean = [0, 0] + cov = [[1, 0], [0, 100]] # diagonal covariance + x = self.rg.multivariate_normal(mean, cov, 5000) + assert_(x.shape == (5000, 2)) + x_zig = self.rg.multivariate_normal(mean, cov, 5000) + assert_(x.shape == (5000, 2)) + x_inv = self.rg.multivariate_normal(mean, cov, 5000) + assert_(x.shape == (5000, 2)) + assert_((x_zig != x_inv).any()) + + def test_multinomial(self): + vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3]) + assert_(vals.shape == (2,)) + vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3], size=10) + assert_(vals.shape == (10, 2)) + + def test_dirichlet(self): + s = self.rg.dirichlet((10, 5, 3), 20) + assert_(s.shape == (20, 3)) + + def test_pickle(self): + pick = pickle.dumps(self.rg) + unpick = pickle.loads(pick) + assert_(type(self.rg) == type(unpick)) + assert_(comp_state(self.rg.bit_generator.state, + unpick.bit_generator.state)) + + pick = pickle.dumps(self.rg) + unpick = pickle.loads(pick) + assert_(type(self.rg) == type(unpick)) + assert_(comp_state(self.rg.bit_generator.state, + unpick.bit_generator.state)) + + def test_seed_array(self): + if self.seed_vector_bits is None: + bitgen_name = self.bit_generator.__name__ + pytest.skip(f'Vector seeding is not supported by {bitgen_name}') + + if self.seed_vector_bits == 32: + dtype = np.uint32 + else: + dtype = np.uint64 + seed = np.array([1], dtype=dtype) + bg = self.bit_generator(seed) + state1 = bg.state + bg = self.bit_generator(1) + state2 = bg.state + assert_(comp_state(state1, state2)) + + seed = np.arange(4, dtype=dtype) + bg = self.bit_generator(seed) + state1 = bg.state + bg = self.bit_generator(seed[0]) + state2 = bg.state + assert_(not comp_state(state1, state2)) + + seed = np.arange(1500, dtype=dtype) + bg = self.bit_generator(seed) + state1 = bg.state + bg = self.bit_generator(seed[0]) + state2 = bg.state + assert_(not comp_state(state1, state2)) + + seed = 2 ** np.mod(np.arange(1500, dtype=dtype), + self.seed_vector_bits - 1) + 1 + bg = self.bit_generator(seed) + state1 = bg.state + bg = self.bit_generator(seed[0]) + state2 = bg.state + assert_(not comp_state(state1, state2)) + + def test_uniform_float(self): + rg = Generator(self.bit_generator(12345)) + warmup(rg) + state = rg.bit_generator.state + r1 = rg.random(11, dtype=np.float32) + rg2 = Generator(self.bit_generator()) + warmup(rg2) + rg2.bit_generator.state = state + r2 = rg2.random(11, dtype=np.float32) + assert_array_equal(r1, r2) + assert_equal(r1.dtype, np.float32) + assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) + + def test_gamma_floats(self): + rg = Generator(self.bit_generator()) + warmup(rg) + state = rg.bit_generator.state + r1 = rg.standard_gamma(4.0, 11, dtype=np.float32) + rg2 = Generator(self.bit_generator()) + warmup(rg2) + rg2.bit_generator.state = state + r2 = rg2.standard_gamma(4.0, 11, dtype=np.float32) + assert_array_equal(r1, r2) + assert_equal(r1.dtype, np.float32) + assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) + + def test_normal_floats(self): + rg = Generator(self.bit_generator()) + warmup(rg) + state = rg.bit_generator.state + r1 = rg.standard_normal(11, dtype=np.float32) + rg2 = Generator(self.bit_generator()) + warmup(rg2) + rg2.bit_generator.state = state + r2 = rg2.standard_normal(11, dtype=np.float32) + assert_array_equal(r1, r2) + assert_equal(r1.dtype, np.float32) + assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) + + def test_normal_zig_floats(self): + rg = Generator(self.bit_generator()) + warmup(rg) + state = rg.bit_generator.state + r1 = rg.standard_normal(11, dtype=np.float32) + rg2 = Generator(self.bit_generator()) + warmup(rg2) + rg2.bit_generator.state = state + r2 = rg2.standard_normal(11, dtype=np.float32) + assert_array_equal(r1, r2) + assert_equal(r1.dtype, np.float32) + assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state)) + + def test_output_fill(self): + rg = self.rg + state = rg.bit_generator.state + size = (31, 7, 97) + existing = np.empty(size) + rg.bit_generator.state = state + rg.standard_normal(out=existing) + rg.bit_generator.state = state + direct = rg.standard_normal(size=size) + assert_equal(direct, existing) + + sized = np.empty(size) + rg.bit_generator.state = state + rg.standard_normal(out=sized, size=sized.shape) + + existing = np.empty(size, dtype=np.float32) + rg.bit_generator.state = state + rg.standard_normal(out=existing, dtype=np.float32) + rg.bit_generator.state = state + direct = rg.standard_normal(size=size, dtype=np.float32) + assert_equal(direct, existing) + + def test_output_filling_uniform(self): + rg = self.rg + state = rg.bit_generator.state + size = (31, 7, 97) + existing = np.empty(size) + rg.bit_generator.state = state + rg.random(out=existing) + rg.bit_generator.state = state + direct = rg.random(size=size) + assert_equal(direct, existing) + + existing = np.empty(size, dtype=np.float32) + rg.bit_generator.state = state + rg.random(out=existing, dtype=np.float32) + rg.bit_generator.state = state + direct = rg.random(size=size, dtype=np.float32) + assert_equal(direct, existing) + + def test_output_filling_exponential(self): + rg = self.rg + state = rg.bit_generator.state + size = (31, 7, 97) + existing = np.empty(size) + rg.bit_generator.state = state + rg.standard_exponential(out=existing) + rg.bit_generator.state = state + direct = rg.standard_exponential(size=size) + assert_equal(direct, existing) + + existing = np.empty(size, dtype=np.float32) + rg.bit_generator.state = state + rg.standard_exponential(out=existing, dtype=np.float32) + rg.bit_generator.state = state + direct = rg.standard_exponential(size=size, dtype=np.float32) + assert_equal(direct, existing) + + def test_output_filling_gamma(self): + rg = self.rg + state = rg.bit_generator.state + size = (31, 7, 97) + existing = np.zeros(size) + rg.bit_generator.state = state + rg.standard_gamma(1.0, out=existing) + rg.bit_generator.state = state + direct = rg.standard_gamma(1.0, size=size) + assert_equal(direct, existing) + + existing = np.zeros(size, dtype=np.float32) + rg.bit_generator.state = state + rg.standard_gamma(1.0, out=existing, dtype=np.float32) + rg.bit_generator.state = state + direct = rg.standard_gamma(1.0, size=size, dtype=np.float32) + assert_equal(direct, existing) + + def test_output_filling_gamma_broadcast(self): + rg = self.rg + state = rg.bit_generator.state + size = (31, 7, 97) + mu = np.arange(97.0) + 1.0 + existing = np.zeros(size) + rg.bit_generator.state = state + rg.standard_gamma(mu, out=existing) + rg.bit_generator.state = state + direct = rg.standard_gamma(mu, size=size) + assert_equal(direct, existing) + + existing = np.zeros(size, dtype=np.float32) + rg.bit_generator.state = state + rg.standard_gamma(mu, out=existing, dtype=np.float32) + rg.bit_generator.state = state + direct = rg.standard_gamma(mu, size=size, dtype=np.float32) + assert_equal(direct, existing) + + def test_output_fill_error(self): + rg = self.rg + size = (31, 7, 97) + existing = np.empty(size) + with pytest.raises(TypeError): + rg.standard_normal(out=existing, dtype=np.float32) + with pytest.raises(ValueError): + rg.standard_normal(out=existing[::3]) + existing = np.empty(size, dtype=np.float32) + with pytest.raises(TypeError): + rg.standard_normal(out=existing, dtype=np.float64) + + existing = np.zeros(size, dtype=np.float32) + with pytest.raises(TypeError): + rg.standard_gamma(1.0, out=existing, dtype=np.float64) + with pytest.raises(ValueError): + rg.standard_gamma(1.0, out=existing[::3], dtype=np.float32) + existing = np.zeros(size, dtype=np.float64) + with pytest.raises(TypeError): + rg.standard_gamma(1.0, out=existing, dtype=np.float32) + with pytest.raises(ValueError): + rg.standard_gamma(1.0, out=existing[::3]) + + def test_integers_broadcast(self, dtype): + if dtype == np.bool: + upper = 2 + lower = 0 + else: + info = np.iinfo(dtype) + upper = int(info.max) + 1 + lower = info.min + self._reset_state() + a = self.rg.integers(lower, [upper] * 10, dtype=dtype) + self._reset_state() + b = self.rg.integers([lower] * 10, upper, dtype=dtype) + assert_equal(a, b) + self._reset_state() + c = self.rg.integers(lower, upper, size=10, dtype=dtype) + assert_equal(a, c) + self._reset_state() + d = self.rg.integers(np.array( + [lower] * 10), np.array([upper], dtype=object), size=10, + dtype=dtype) + assert_equal(a, d) + self._reset_state() + e = self.rg.integers( + np.array([lower] * 10), np.array([upper] * 10), size=10, + dtype=dtype) + assert_equal(a, e) + + self._reset_state() + a = self.rg.integers(0, upper, size=10, dtype=dtype) + self._reset_state() + b = self.rg.integers([upper] * 10, dtype=dtype) + assert_equal(a, b) + + def test_integers_numpy(self, dtype): + high = np.array([1]) + low = np.array([0]) + + out = self.rg.integers(low, high, dtype=dtype) + assert out.shape == (1,) + + out = self.rg.integers(low[0], high, dtype=dtype) + assert out.shape == (1,) + + out = self.rg.integers(low, high[0], dtype=dtype) + assert out.shape == (1,) + + def test_integers_broadcast_errors(self, dtype): + if dtype == np.bool: + upper = 2 + lower = 0 + else: + info = np.iinfo(dtype) + upper = int(info.max) + 1 + lower = info.min + with pytest.raises(ValueError): + self.rg.integers(lower, [upper + 1] * 10, dtype=dtype) + with pytest.raises(ValueError): + self.rg.integers(lower - 1, [upper] * 10, dtype=dtype) + with pytest.raises(ValueError): + self.rg.integers([lower - 1], [upper] * 10, dtype=dtype) + with pytest.raises(ValueError): + self.rg.integers([0], [0], dtype=dtype) + + +class TestMT19937(RNG): + @classmethod + def setup_class(cls): + cls.bit_generator = MT19937 + cls.advance = None + cls.seed = [2 ** 21 + 2 ** 16 + 2 ** 5 + 1] + cls.rg = Generator(cls.bit_generator(*cls.seed)) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 32 + cls._extra_setup() + cls.seed_error = ValueError + + def test_numpy_state(self): + nprg = np.random.RandomState() + nprg.standard_normal(99) + state = nprg.get_state() + self.rg.bit_generator.state = state + state2 = self.rg.bit_generator.state + assert_((state[1] == state2['state']['key']).all()) + assert_(state[2] == state2['state']['pos']) + + +class TestPhilox(RNG): + @classmethod + def setup_class(cls): + cls.bit_generator = Philox + cls.advance = 2**63 + 2**31 + 2**15 + 1 + cls.seed = [12345] + cls.rg = Generator(cls.bit_generator(*cls.seed)) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 64 + cls._extra_setup() + + +class TestSFC64(RNG): + @classmethod + def setup_class(cls): + cls.bit_generator = SFC64 + cls.advance = None + cls.seed = [12345] + cls.rg = Generator(cls.bit_generator(*cls.seed)) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 192 + cls._extra_setup() + + +class TestPCG64(RNG): + @classmethod + def setup_class(cls): + cls.bit_generator = PCG64 + cls.advance = 2**63 + 2**31 + 2**15 + 1 + cls.seed = [12345] + cls.rg = Generator(cls.bit_generator(*cls.seed)) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 64 + cls._extra_setup() + + +class TestPCG64DXSM(RNG): + @classmethod + def setup_class(cls): + cls.bit_generator = PCG64DXSM + cls.advance = 2**63 + 2**31 + 2**15 + 1 + cls.seed = [12345] + cls.rg = Generator(cls.bit_generator(*cls.seed)) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 64 + cls._extra_setup() + + +class TestDefaultRNG(RNG): + @classmethod + def setup_class(cls): + # This will duplicate some tests that directly instantiate a fresh + # Generator(), but that's okay. + cls.bit_generator = PCG64 + cls.advance = 2**63 + 2**31 + 2**15 + 1 + cls.seed = [12345] + cls.rg = np.random.default_rng(*cls.seed) + cls.initial_state = cls.rg.bit_generator.state + cls.seed_vector_bits = 64 + cls._extra_setup() + + def test_default_is_pcg64(self): + # In order to change the default BitGenerator, we'll go through + # a deprecation cycle to move to a different function. + assert_(isinstance(self.rg.bit_generator, PCG64)) + + def test_seed(self): + np.random.default_rng() + np.random.default_rng(None) + np.random.default_rng(12345) + np.random.default_rng(0) + np.random.default_rng(43660444402423911716352051725018508569) + np.random.default_rng([43660444402423911716352051725018508569, + 279705150948142787361475340226491943209]) + with pytest.raises(ValueError): + np.random.default_rng(-1) + with pytest.raises(ValueError): + np.random.default_rng([12345, -1]) diff --git a/python/numpy/rec/__init__.py b/python/numpy/rec/__init__.py new file mode 100644 index 000000000..420240c8d --- /dev/null +++ b/python/numpy/rec/__init__.py @@ -0,0 +1,2 @@ +from numpy._core.records import * +from numpy._core.records import __all__, __doc__ diff --git a/python/numpy/rec/__init__.pyi b/python/numpy/rec/__init__.pyi new file mode 100644 index 000000000..6a78c66ff --- /dev/null +++ b/python/numpy/rec/__init__.pyi @@ -0,0 +1,23 @@ +from numpy._core.records import ( + array, + find_duplicate, + format_parser, + fromarrays, + fromfile, + fromrecords, + fromstring, + recarray, + record, +) + +__all__ = [ + "record", + "recarray", + "format_parser", + "fromarrays", + "fromrecords", + "fromstring", + "fromfile", + "array", + "find_duplicate", +] diff --git a/python/numpy/rec/__pycache__/__init__.cpython-312.pyc b/python/numpy/rec/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..8d44c17a4 Binary files /dev/null and b/python/numpy/rec/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/strings/__init__.py b/python/numpy/strings/__init__.py new file mode 100644 index 000000000..561dadcf3 --- /dev/null +++ b/python/numpy/strings/__init__.py @@ -0,0 +1,2 @@ +from numpy._core.strings import * +from numpy._core.strings import __all__, __doc__ diff --git a/python/numpy/strings/__init__.pyi b/python/numpy/strings/__init__.pyi new file mode 100644 index 000000000..b2fb36353 --- /dev/null +++ b/python/numpy/strings/__init__.pyi @@ -0,0 +1,97 @@ +from numpy._core.strings import ( + add, + capitalize, + center, + count, + decode, + encode, + endswith, + equal, + expandtabs, + find, + greater, + greater_equal, + index, + isalnum, + isalpha, + isdecimal, + isdigit, + islower, + isnumeric, + isspace, + istitle, + isupper, + less, + less_equal, + ljust, + lower, + lstrip, + mod, + multiply, + not_equal, + partition, + replace, + rfind, + rindex, + rjust, + rpartition, + rstrip, + slice, + startswith, + str_len, + strip, + swapcase, + title, + translate, + upper, + zfill, +) + +__all__ = [ + "equal", + "not_equal", + "less", + "less_equal", + "greater", + "greater_equal", + "add", + "multiply", + "isalpha", + "isdigit", + "isspace", + "isalnum", + "islower", + "isupper", + "istitle", + "isdecimal", + "isnumeric", + "str_len", + "find", + "rfind", + "index", + "rindex", + "count", + "startswith", + "endswith", + "lstrip", + "rstrip", + "strip", + "replace", + "expandtabs", + "center", + "ljust", + "rjust", + "zfill", + "partition", + "rpartition", + "upper", + "lower", + "swapcase", + "capitalize", + "title", + "mod", + "decode", + "encode", + "translate", + "slice", +] diff --git a/python/numpy/strings/__pycache__/__init__.cpython-312.pyc b/python/numpy/strings/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..c5d51caf2 Binary files /dev/null and b/python/numpy/strings/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/testing/__init__.py b/python/numpy/testing/__init__.py new file mode 100644 index 000000000..fe0c4f236 --- /dev/null +++ b/python/numpy/testing/__init__.py @@ -0,0 +1,22 @@ +"""Common test support for all numpy test scripts. + +This single module should provide all the common functionality for numpy tests +in a single location, so that test scripts can just import it and work right +away. + +""" +from unittest import TestCase + +from . import _private, overrides +from ._private import extbuild +from ._private.utils import * +from ._private.utils import _assert_valid_refcount, _gen_alignment_data + +__all__ = ( + _private.utils.__all__ + ['TestCase', 'overrides'] +) + +from numpy._pytesttester import PytestTester + +test = PytestTester(__name__) +del PytestTester diff --git a/python/numpy/testing/__init__.pyi b/python/numpy/testing/__init__.pyi new file mode 100644 index 000000000..ba3c9a2b7 --- /dev/null +++ b/python/numpy/testing/__init__.pyi @@ -0,0 +1,102 @@ +from unittest import TestCase + +from . import overrides +from ._private.utils import ( + HAS_LAPACK64, + HAS_REFCOUNT, + IS_EDITABLE, + IS_INSTALLED, + IS_MUSL, + IS_PYPY, + IS_PYSTON, + IS_WASM, + NOGIL_BUILD, + NUMPY_ROOT, + IgnoreException, + KnownFailureException, + SkipTest, + assert_, + assert_allclose, + assert_almost_equal, + assert_approx_equal, + assert_array_almost_equal, + assert_array_almost_equal_nulp, + assert_array_compare, + assert_array_equal, + assert_array_less, + assert_array_max_ulp, + assert_equal, + assert_no_gc_cycles, + assert_no_warnings, + assert_raises, + assert_raises_regex, + assert_string_equal, + assert_warns, + break_cycles, + build_err_msg, + check_support_sve, + clear_and_catch_warnings, + decorate_methods, + jiffies, + measure, + memusage, + print_assert_equal, + run_threaded, + rundocs, + runstring, + suppress_warnings, + tempdir, + temppath, + verbose, +) + +__all__ = [ + "HAS_LAPACK64", + "HAS_REFCOUNT", + "IS_EDITABLE", + "IS_INSTALLED", + "IS_MUSL", + "IS_PYPY", + "IS_PYSTON", + "IS_WASM", + "NOGIL_BUILD", + "NUMPY_ROOT", + "IgnoreException", + "KnownFailureException", + "SkipTest", + "TestCase", + "assert_", + "assert_allclose", + "assert_almost_equal", + "assert_approx_equal", + "assert_array_almost_equal", + "assert_array_almost_equal_nulp", + "assert_array_compare", + "assert_array_equal", + "assert_array_less", + "assert_array_max_ulp", + "assert_equal", + "assert_no_gc_cycles", + "assert_no_warnings", + "assert_raises", + "assert_raises_regex", + "assert_string_equal", + "assert_warns", + "break_cycles", + "build_err_msg", + "check_support_sve", + "clear_and_catch_warnings", + "decorate_methods", + "jiffies", + "measure", + "memusage", + "overrides", + "print_assert_equal", + "run_threaded", + "rundocs", + "runstring", + "suppress_warnings", + "tempdir", + "temppath", + "verbose", +] diff --git a/python/numpy/testing/__pycache__/__init__.cpython-312.pyc b/python/numpy/testing/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..a982ab5b7 Binary files /dev/null and b/python/numpy/testing/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/testing/__pycache__/overrides.cpython-312.pyc b/python/numpy/testing/__pycache__/overrides.cpython-312.pyc new file mode 100644 index 000000000..ff549b096 Binary files /dev/null and b/python/numpy/testing/__pycache__/overrides.cpython-312.pyc differ diff --git a/python/numpy/testing/__pycache__/print_coercion_tables.cpython-312.pyc b/python/numpy/testing/__pycache__/print_coercion_tables.cpython-312.pyc new file mode 100644 index 000000000..abb3eb02a Binary files /dev/null and b/python/numpy/testing/__pycache__/print_coercion_tables.cpython-312.pyc differ diff --git a/python/numpy/testing/_private/__init__.py b/python/numpy/testing/_private/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/python/numpy/testing/_private/__init__.pyi b/python/numpy/testing/_private/__init__.pyi new file mode 100644 index 000000000..e69de29bb diff --git a/python/numpy/testing/_private/__pycache__/__init__.cpython-312.pyc b/python/numpy/testing/_private/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..e0c60184b Binary files /dev/null and b/python/numpy/testing/_private/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/testing/_private/__pycache__/extbuild.cpython-312.pyc b/python/numpy/testing/_private/__pycache__/extbuild.cpython-312.pyc new file mode 100644 index 000000000..06da647af Binary files /dev/null and b/python/numpy/testing/_private/__pycache__/extbuild.cpython-312.pyc differ diff --git a/python/numpy/testing/_private/__pycache__/utils.cpython-312.pyc b/python/numpy/testing/_private/__pycache__/utils.cpython-312.pyc new file mode 100644 index 000000000..d96c4a0d6 Binary files /dev/null and b/python/numpy/testing/_private/__pycache__/utils.cpython-312.pyc differ diff --git a/python/numpy/testing/_private/extbuild.py b/python/numpy/testing/_private/extbuild.py new file mode 100644 index 000000000..2a724b73c --- /dev/null +++ b/python/numpy/testing/_private/extbuild.py @@ -0,0 +1,250 @@ +""" +Build a c-extension module on-the-fly in tests. +See build_and_import_extensions for usage hints + +""" + +import os +import pathlib +import subprocess +import sys +import sysconfig +import textwrap + +__all__ = ['build_and_import_extension', 'compile_extension_module'] + + +def build_and_import_extension( + modname, functions, *, prologue="", build_dir=None, + include_dirs=None, more_init=""): + """ + Build and imports a c-extension module `modname` from a list of function + fragments `functions`. + + + Parameters + ---------- + functions : list of fragments + Each fragment is a sequence of func_name, calling convention, snippet. + prologue : string + Code to precede the rest, usually extra ``#include`` or ``#define`` + macros. + build_dir : pathlib.Path + Where to build the module, usually a temporary directory + include_dirs : list + Extra directories to find include files when compiling + more_init : string + Code to appear in the module PyMODINIT_FUNC + + Returns + ------- + out: module + The module will have been loaded and is ready for use + + Examples + -------- + >>> functions = [("test_bytes", "METH_O", \"\"\" + if ( !PyBytesCheck(args)) { + Py_RETURN_FALSE; + } + Py_RETURN_TRUE; + \"\"\")] + >>> mod = build_and_import_extension("testme", functions) + >>> assert not mod.test_bytes('abc') + >>> assert mod.test_bytes(b'abc') + """ + if include_dirs is None: + include_dirs = [] + body = prologue + _make_methods(functions, modname) + init = """ + PyObject *mod = PyModule_Create(&moduledef); + #ifdef Py_GIL_DISABLED + PyUnstable_Module_SetGIL(mod, Py_MOD_GIL_NOT_USED); + #endif + """ + if not build_dir: + build_dir = pathlib.Path('.') + if more_init: + init += """#define INITERROR return NULL + """ + init += more_init + init += "\nreturn mod;" + source_string = _make_source(modname, init, body) + mod_so = compile_extension_module( + modname, build_dir, include_dirs, source_string) + import importlib.util + spec = importlib.util.spec_from_file_location(modname, mod_so) + foo = importlib.util.module_from_spec(spec) + spec.loader.exec_module(foo) + return foo + + +def compile_extension_module( + name, builddir, include_dirs, + source_string, libraries=None, library_dirs=None): + """ + Build an extension module and return the filename of the resulting + native code file. + + Parameters + ---------- + name : string + name of the module, possibly including dots if it is a module inside a + package. + builddir : pathlib.Path + Where to build the module, usually a temporary directory + include_dirs : list + Extra directories to find include files when compiling + libraries : list + Libraries to link into the extension module + library_dirs: list + Where to find the libraries, ``-L`` passed to the linker + """ + modname = name.split('.')[-1] + dirname = builddir / name + dirname.mkdir(exist_ok=True) + cfile = _convert_str_to_file(source_string, dirname) + include_dirs = include_dirs or [] + libraries = libraries or [] + library_dirs = library_dirs or [] + + return _c_compile( + cfile, outputfilename=dirname / modname, + include_dirs=include_dirs, libraries=libraries, + library_dirs=library_dirs, + ) + + +def _convert_str_to_file(source, dirname): + """Helper function to create a file ``source.c`` in `dirname` that contains + the string in `source`. Returns the file name + """ + filename = dirname / 'source.c' + with filename.open('w') as f: + f.write(str(source)) + return filename + + +def _make_methods(functions, modname): + """ Turns the name, signature, code in functions into complete functions + and lists them in a methods_table. Then turns the methods_table into a + ``PyMethodDef`` structure and returns the resulting code fragment ready + for compilation + """ + methods_table = [] + codes = [] + for funcname, flags, code in functions: + cfuncname = f"{modname}_{funcname}" + if 'METH_KEYWORDS' in flags: + signature = '(PyObject *self, PyObject *args, PyObject *kwargs)' + else: + signature = '(PyObject *self, PyObject *args)' + methods_table.append( + "{\"%s\", (PyCFunction)%s, %s}," % (funcname, cfuncname, flags)) + func_code = f""" + static PyObject* {cfuncname}{signature} + {{ + {code} + }} + """ + codes.append(func_code) + + body = "\n".join(codes) + """ + static PyMethodDef methods[] = { + %(methods)s + { NULL } + }; + static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "%(modname)s", /* m_name */ + NULL, /* m_doc */ + -1, /* m_size */ + methods, /* m_methods */ + }; + """ % {'methods': '\n'.join(methods_table), 'modname': modname} + return body + + +def _make_source(name, init, body): + """ Combines the code fragments into source code ready to be compiled + """ + code = """ + #include + + %(body)s + + PyMODINIT_FUNC + PyInit_%(name)s(void) { + %(init)s + } + """ % { + 'name': name, 'init': init, 'body': body, + } + return code + + +def _c_compile(cfile, outputfilename, include_dirs, libraries, + library_dirs): + link_extra = [] + if sys.platform == 'win32': + compile_extra = ["/we4013"] + link_extra.append('/DEBUG') # generate .pdb file + elif sys.platform.startswith('linux'): + compile_extra = [ + "-O0", "-g", "-Werror=implicit-function-declaration", "-fPIC"] + else: + compile_extra = [] + + return build( + cfile, outputfilename, + compile_extra, link_extra, + include_dirs, libraries, library_dirs) + + +def build(cfile, outputfilename, compile_extra, link_extra, + include_dirs, libraries, library_dirs): + "use meson to build" + + build_dir = cfile.parent / "build" + os.makedirs(build_dir, exist_ok=True) + with open(cfile.parent / "meson.build", "wt") as fid: + link_dirs = ['-L' + d for d in library_dirs] + fid.write(textwrap.dedent(f"""\ + project('foo', 'c') + py = import('python').find_installation(pure: false) + py.extension_module( + '{outputfilename.parts[-1]}', + '{cfile.parts[-1]}', + c_args: {compile_extra}, + link_args: {link_dirs}, + include_directories: {include_dirs}, + ) + """)) + native_file_name = cfile.parent / ".mesonpy-native-file.ini" + with open(native_file_name, "wt") as fid: + fid.write(textwrap.dedent(f"""\ + [binaries] + python = '{sys.executable}' + """)) + if sys.platform == "win32": + subprocess.check_call(["meson", "setup", + "--buildtype=release", + "--vsenv", ".."], + cwd=build_dir, + ) + else: + subprocess.check_call(["meson", "setup", "--vsenv", + "..", f'--native-file={os.fspath(native_file_name)}'], + cwd=build_dir + ) + + so_name = outputfilename.parts[-1] + get_so_suffix() + subprocess.check_call(["meson", "compile"], cwd=build_dir) + os.rename(str(build_dir / so_name), cfile.parent / so_name) + return cfile.parent / so_name + + +def get_so_suffix(): + ret = sysconfig.get_config_var('EXT_SUFFIX') + assert ret + return ret diff --git a/python/numpy/testing/_private/extbuild.pyi b/python/numpy/testing/_private/extbuild.pyi new file mode 100644 index 000000000..c1ae507d6 --- /dev/null +++ b/python/numpy/testing/_private/extbuild.pyi @@ -0,0 +1,25 @@ +import pathlib +import types +from collections.abc import Sequence + +__all__ = ["build_and_import_extension", "compile_extension_module"] + +def build_and_import_extension( + modname: str, + functions: Sequence[tuple[str, str, str]], + *, + prologue: str = "", + build_dir: pathlib.Path | None = None, + include_dirs: Sequence[str] | None = None, + more_init: str = "", +) -> types.ModuleType: ... + +# +def compile_extension_module( + name: str, + builddir: pathlib.Path, + include_dirs: Sequence[str], + source_string: str, + libraries: Sequence[str] | None = None, + library_dirs: Sequence[str] | None = None, +) -> pathlib.Path: ... diff --git a/python/numpy/testing/_private/utils.py b/python/numpy/testing/_private/utils.py new file mode 100644 index 000000000..6d4c87ba2 --- /dev/null +++ b/python/numpy/testing/_private/utils.py @@ -0,0 +1,2752 @@ +""" +Utility function to facilitate testing. + +""" +import concurrent.futures +import contextlib +import gc +import importlib.metadata +import operator +import os +import pathlib +import platform +import pprint +import re +import shutil +import sys +import sysconfig +import threading +import warnings +from functools import partial, wraps +from io import StringIO +from tempfile import mkdtemp, mkstemp +from unittest.case import SkipTest +from warnings import WarningMessage + +import numpy as np +import numpy.linalg._umath_linalg +from numpy import isfinite, isinf, isnan +from numpy._core import arange, array, array_repr, empty, float32, intp, isnat, ndarray + +__all__ = [ + 'assert_equal', 'assert_almost_equal', 'assert_approx_equal', + 'assert_array_equal', 'assert_array_less', 'assert_string_equal', + 'assert_array_almost_equal', 'assert_raises', 'build_err_msg', + 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal', + 'rundocs', 'runstring', 'verbose', 'measure', + 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex', + 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings', + 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings', + 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY', + 'HAS_REFCOUNT', "IS_WASM", 'suppress_warnings', 'assert_array_compare', + 'assert_no_gc_cycles', 'break_cycles', 'HAS_LAPACK64', 'IS_PYSTON', + 'IS_MUSL', 'check_support_sve', 'NOGIL_BUILD', + 'IS_EDITABLE', 'IS_INSTALLED', 'NUMPY_ROOT', 'run_threaded', 'IS_64BIT', + 'BLAS_SUPPORTS_FPE', + ] + + +class KnownFailureException(Exception): + '''Raise this exception to mark a test as a known failing test.''' + pass + + +KnownFailureTest = KnownFailureException # backwards compat +verbose = 0 + +NUMPY_ROOT = pathlib.Path(np.__file__).parent + +try: + np_dist = importlib.metadata.distribution('numpy') +except importlib.metadata.PackageNotFoundError: + IS_INSTALLED = IS_EDITABLE = False +else: + IS_INSTALLED = True + try: + if sys.version_info >= (3, 13): + IS_EDITABLE = np_dist.origin.dir_info.editable + else: + # Backport importlib.metadata.Distribution.origin + import json # noqa: E401 + import types + origin = json.loads( + np_dist.read_text('direct_url.json') or '{}', + object_hook=lambda data: types.SimpleNamespace(**data), + ) + IS_EDITABLE = origin.dir_info.editable + except AttributeError: + IS_EDITABLE = False + + # spin installs numpy directly via meson, instead of using meson-python, and + # runs the module by setting PYTHONPATH. This is problematic because the + # resulting installation lacks the Python metadata (.dist-info), and numpy + # might already be installed on the environment, causing us to find its + # metadata, even though we are not actually loading that package. + # Work around this issue by checking if the numpy root matches. + if not IS_EDITABLE and np_dist.locate_file('numpy') != NUMPY_ROOT: + IS_INSTALLED = False + +IS_WASM = platform.machine() in ["wasm32", "wasm64"] +IS_PYPY = sys.implementation.name == 'pypy' +IS_PYSTON = hasattr(sys, "pyston_version_info") +HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None and not IS_PYSTON +BLAS_SUPPORTS_FPE = np._core._multiarray_umath._blas_supports_fpe(None) + +HAS_LAPACK64 = numpy.linalg._umath_linalg._ilp64 + +IS_MUSL = False +# alternate way is +# from packaging.tags import sys_tags +# _tags = list(sys_tags()) +# if 'musllinux' in _tags[0].platform: +_v = sysconfig.get_config_var('HOST_GNU_TYPE') or '' +if 'musl' in _v: + IS_MUSL = True + +NOGIL_BUILD = bool(sysconfig.get_config_var("Py_GIL_DISABLED")) +IS_64BIT = np.dtype(np.intp).itemsize == 8 + +def assert_(val, msg=''): + """ + Assert that works in release mode. + Accepts callable msg to allow deferring evaluation until failure. + + The Python built-in ``assert`` does not work when executing code in + optimized mode (the ``-O`` flag) - no byte-code is generated for it. + + For documentation on usage, refer to the Python documentation. + + """ + __tracebackhide__ = True # Hide traceback for py.test + if not val: + try: + smsg = msg() + except TypeError: + smsg = msg + raise AssertionError(smsg) + + +if os.name == 'nt': + # Code "stolen" from enthought/debug/memusage.py + def GetPerformanceAttributes(object, counter, instance=None, + inum=-1, format=None, machine=None): + # NOTE: Many counters require 2 samples to give accurate results, + # including "% Processor Time" (as by definition, at any instant, a + # thread's CPU usage is either 0 or 100). To read counters like this, + # you should copy this function, but keep the counter open, and call + # CollectQueryData() each time you need to know. + # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp + # (dead link) + # My older explanation for this was that the "AddCounter" process + # forced the CPU to 100%, but the above makes more sense :) + import win32pdh + if format is None: + format = win32pdh.PDH_FMT_LONG + path = win32pdh.MakeCounterPath((machine, object, instance, None, + inum, counter)) + hq = win32pdh.OpenQuery() + try: + hc = win32pdh.AddCounter(hq, path) + try: + win32pdh.CollectQueryData(hq) + type, val = win32pdh.GetFormattedCounterValue(hc, format) + return val + finally: + win32pdh.RemoveCounter(hc) + finally: + win32pdh.CloseQuery(hq) + + def memusage(processName="python", instance=0): + # from win32pdhutil, part of the win32all package + import win32pdh + return GetPerformanceAttributes("Process", "Virtual Bytes", + processName, instance, + win32pdh.PDH_FMT_LONG, None) +elif sys.platform[:5] == 'linux': + + def memusage(_proc_pid_stat=None): + """ + Return virtual memory size in bytes of the running python. + + """ + _proc_pid_stat = _proc_pid_stat or f'/proc/{os.getpid()}/stat' + try: + with open(_proc_pid_stat) as f: + l = f.readline().split(' ') + return int(l[22]) + except Exception: + return +else: + def memusage(): + """ + Return memory usage of running python. [Not implemented] + + """ + raise NotImplementedError + + +if sys.platform[:5] == 'linux': + def jiffies(_proc_pid_stat=None, _load_time=None): + """ + Return number of jiffies elapsed. + + Return number of jiffies (1/100ths of a second) that this + process has been scheduled in user mode. See man 5 proc. + + """ + _proc_pid_stat = _proc_pid_stat or f'/proc/{os.getpid()}/stat' + _load_time = _load_time or [] + import time + if not _load_time: + _load_time.append(time.time()) + try: + with open(_proc_pid_stat) as f: + l = f.readline().split(' ') + return int(l[13]) + except Exception: + return int(100 * (time.time() - _load_time[0])) +else: + # os.getpid is not in all platforms available. + # Using time is safe but inaccurate, especially when process + # was suspended or sleeping. + def jiffies(_load_time=[]): + """ + Return number of jiffies elapsed. + + Return number of jiffies (1/100ths of a second) that this + process has been scheduled in user mode. See man 5 proc. + + """ + import time + if not _load_time: + _load_time.append(time.time()) + return int(100 * (time.time() - _load_time[0])) + + +def build_err_msg(arrays, err_msg, header='Items are not equal:', + verbose=True, names=('ACTUAL', 'DESIRED'), precision=8): + msg = ['\n' + header] + err_msg = str(err_msg) + if err_msg: + if err_msg.find('\n') == -1 and len(err_msg) < 79 - len(header): + msg = [msg[0] + ' ' + err_msg] + else: + msg.append(err_msg) + if verbose: + for i, a in enumerate(arrays): + + if isinstance(a, ndarray): + # precision argument is only needed if the objects are ndarrays + r_func = partial(array_repr, precision=precision) + else: + r_func = repr + + try: + r = r_func(a) + except Exception as exc: + r = f'[repr failed for <{type(a).__name__}>: {exc}]' + if r.count('\n') > 3: + r = '\n'.join(r.splitlines()[:3]) + r += '...' + msg.append(f' {names[i]}: {r}') + return '\n'.join(msg) + + +def assert_equal(actual, desired, err_msg='', verbose=True, *, strict=False): + """ + Raises an AssertionError if two objects are not equal. + + Given two objects (scalars, lists, tuples, dictionaries or numpy arrays), + check that all elements of these objects are equal. An exception is raised + at the first conflicting values. + + This function handles NaN comparisons as if NaN was a "normal" number. + That is, AssertionError is not raised if both objects have NaNs in the same + positions. This is in contrast to the IEEE standard on NaNs, which says + that NaN compared to anything must return False. + + Parameters + ---------- + actual : array_like + The object to check. + desired : array_like + The expected object. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + strict : bool, optional + If True and either of the `actual` and `desired` arguments is an array, + raise an ``AssertionError`` when either the shape or the data type of + the arguments does not match. If neither argument is an array, this + parameter has no effect. + + .. versionadded:: 2.0.0 + + Raises + ------ + AssertionError + If actual and desired are not equal. + + See Also + -------- + assert_allclose + assert_array_almost_equal_nulp, + assert_array_max_ulp, + + Notes + ----- + By default, when one of `actual` and `desired` is a scalar and the other is + an array, the function checks that each element of the array is equal to + the scalar. This behaviour can be disabled by setting ``strict==True``. + + Examples + -------- + >>> np.testing.assert_equal([4, 5], [4, 6]) + Traceback (most recent call last): + ... + AssertionError: + Items are not equal: + item=1 + ACTUAL: 5 + DESIRED: 6 + + The following comparison does not raise an exception. There are NaNs + in the inputs, but they are in the same positions. + + >>> np.testing.assert_equal(np.array([1.0, 2.0, np.nan]), [1, 2, np.nan]) + + As mentioned in the Notes section, `assert_equal` has special + handling for scalars when one of the arguments is an array. + Here, the test checks that each value in `x` is 3: + + >>> x = np.full((2, 5), fill_value=3) + >>> np.testing.assert_equal(x, 3) + + Use `strict` to raise an AssertionError when comparing a scalar with an + array of a different shape: + + >>> np.testing.assert_equal(x, 3, strict=True) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not equal + + (shapes (2, 5), () mismatch) + ACTUAL: array([[3, 3, 3, 3, 3], + [3, 3, 3, 3, 3]]) + DESIRED: array(3) + + The `strict` parameter also ensures that the array data types match: + + >>> x = np.array([2, 2, 2]) + >>> y = np.array([2., 2., 2.], dtype=np.float32) + >>> np.testing.assert_equal(x, y, strict=True) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not equal + + (dtypes int64, float32 mismatch) + ACTUAL: array([2, 2, 2]) + DESIRED: array([2., 2., 2.], dtype=float32) + """ + __tracebackhide__ = True # Hide traceback for py.test + if isinstance(desired, dict): + if not isinstance(actual, dict): + raise AssertionError(repr(type(actual))) + assert_equal(len(actual), len(desired), err_msg, verbose) + for k, i in desired.items(): + if k not in actual: + raise AssertionError(repr(k)) + assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}', + verbose) + return + if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): + assert_equal(len(actual), len(desired), err_msg, verbose) + for k in range(len(desired)): + assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}', + verbose) + return + from numpy import imag, iscomplexobj, real + from numpy._core import isscalar, ndarray, signbit + if isinstance(actual, ndarray) or isinstance(desired, ndarray): + return assert_array_equal(actual, desired, err_msg, verbose, + strict=strict) + msg = build_err_msg([actual, desired], err_msg, verbose=verbose) + + # Handle complex numbers: separate into real/imag to handle + # nan/inf/negative zero correctly + # XXX: catch ValueError for subclasses of ndarray where iscomplex fail + try: + usecomplex = iscomplexobj(actual) or iscomplexobj(desired) + except (ValueError, TypeError): + usecomplex = False + + if usecomplex: + if iscomplexobj(actual): + actualr = real(actual) + actuali = imag(actual) + else: + actualr = actual + actuali = 0 + if iscomplexobj(desired): + desiredr = real(desired) + desiredi = imag(desired) + else: + desiredr = desired + desiredi = 0 + try: + assert_equal(actualr, desiredr) + assert_equal(actuali, desiredi) + except AssertionError: + raise AssertionError(msg) + + # isscalar test to check cases such as [np.nan] != np.nan + if isscalar(desired) != isscalar(actual): + raise AssertionError(msg) + + try: + isdesnat = isnat(desired) + isactnat = isnat(actual) + dtypes_match = (np.asarray(desired).dtype.type == + np.asarray(actual).dtype.type) + if isdesnat and isactnat: + # If both are NaT (and have the same dtype -- datetime or + # timedelta) they are considered equal. + if dtypes_match: + return + else: + raise AssertionError(msg) + + except (TypeError, ValueError, NotImplementedError): + pass + + # Inf/nan/negative zero handling + try: + isdesnan = isnan(desired) + isactnan = isnan(actual) + if isdesnan and isactnan: + return # both nan, so equal + + # handle signed zero specially for floats + array_actual = np.asarray(actual) + array_desired = np.asarray(desired) + if (array_actual.dtype.char in 'Mm' or + array_desired.dtype.char in 'Mm'): + # version 1.18 + # until this version, isnan failed for datetime64 and timedelta64. + # Now it succeeds but comparison to scalar with a different type + # emits a DeprecationWarning. + # Avoid that by skipping the next check + raise NotImplementedError('cannot compare to a scalar ' + 'with a different type') + + if desired == 0 and actual == 0: + if not signbit(desired) == signbit(actual): + raise AssertionError(msg) + + except (TypeError, ValueError, NotImplementedError): + pass + + try: + # Explicitly use __eq__ for comparison, gh-2552 + if not (desired == actual): + raise AssertionError(msg) + + except (DeprecationWarning, FutureWarning) as e: + # this handles the case when the two types are not even comparable + if 'elementwise == comparison' in e.args[0]: + raise AssertionError(msg) + else: + raise + + +def print_assert_equal(test_string, actual, desired): + """ + Test if two objects are equal, and print an error message if test fails. + + The test is performed with ``actual == desired``. + + Parameters + ---------- + test_string : str + The message supplied to AssertionError. + actual : object + The object to test for equality against `desired`. + desired : object + The expected result. + + Examples + -------- + >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1]) + >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2]) + Traceback (most recent call last): + ... + AssertionError: Test XYZ of func xyz failed + ACTUAL: + [0, 1] + DESIRED: + [0, 2] + + """ + __tracebackhide__ = True # Hide traceback for py.test + import pprint + + if not (actual == desired): + msg = StringIO() + msg.write(test_string) + msg.write(' failed\nACTUAL: \n') + pprint.pprint(actual, msg) + msg.write('DESIRED: \n') + pprint.pprint(desired, msg) + raise AssertionError(msg.getvalue()) + + +def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True): + """ + Raises an AssertionError if two items are not equal up to desired + precision. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + The test verifies that the elements of `actual` and `desired` satisfy:: + + abs(desired-actual) < float64(1.5 * 10**(-decimal)) + + That is a looser test than originally documented, but agrees with what the + actual implementation in `assert_array_almost_equal` did up to rounding + vagaries. An exception is raised at conflicting values. For ndarrays this + delegates to assert_array_almost_equal + + Parameters + ---------- + actual : array_like + The object to check. + desired : array_like + The expected object. + decimal : int, optional + Desired precision, default is 7. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + >>> from numpy.testing import assert_almost_equal + >>> assert_almost_equal(2.3333333333333, 2.33333334) + >>> assert_almost_equal(2.3333333333333, 2.33333334, decimal=10) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not almost equal to 10 decimals + ACTUAL: 2.3333333333333 + DESIRED: 2.33333334 + + >>> assert_almost_equal(np.array([1.0,2.3333333333333]), + ... np.array([1.0,2.33333334]), decimal=9) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not almost equal to 9 decimals + + Mismatched elements: 1 / 2 (50%) + Max absolute difference among violations: 6.66669964e-09 + Max relative difference among violations: 2.85715698e-09 + ACTUAL: array([1. , 2.333333333]) + DESIRED: array([1. , 2.33333334]) + + """ + __tracebackhide__ = True # Hide traceback for py.test + from numpy import imag, iscomplexobj, real + from numpy._core import ndarray + + # Handle complex numbers: separate into real/imag to handle + # nan/inf/negative zero correctly + # XXX: catch ValueError for subclasses of ndarray where iscomplex fail + try: + usecomplex = iscomplexobj(actual) or iscomplexobj(desired) + except ValueError: + usecomplex = False + + def _build_err_msg(): + header = ('Arrays are not almost equal to %d decimals' % decimal) + return build_err_msg([actual, desired], err_msg, verbose=verbose, + header=header) + + if usecomplex: + if iscomplexobj(actual): + actualr = real(actual) + actuali = imag(actual) + else: + actualr = actual + actuali = 0 + if iscomplexobj(desired): + desiredr = real(desired) + desiredi = imag(desired) + else: + desiredr = desired + desiredi = 0 + try: + assert_almost_equal(actualr, desiredr, decimal=decimal) + assert_almost_equal(actuali, desiredi, decimal=decimal) + except AssertionError: + raise AssertionError(_build_err_msg()) + + if isinstance(actual, (ndarray, tuple, list)) \ + or isinstance(desired, (ndarray, tuple, list)): + return assert_array_almost_equal(actual, desired, decimal, err_msg) + try: + # If one of desired/actual is not finite, handle it specially here: + # check that both are nan if any is a nan, and test for equality + # otherwise + if not (isfinite(desired) and isfinite(actual)): + if isnan(desired) or isnan(actual): + if not (isnan(desired) and isnan(actual)): + raise AssertionError(_build_err_msg()) + elif not desired == actual: + raise AssertionError(_build_err_msg()) + return + except (NotImplementedError, TypeError): + pass + if abs(desired - actual) >= np.float64(1.5 * 10.0**(-decimal)): + raise AssertionError(_build_err_msg()) + + +def assert_approx_equal(actual, desired, significant=7, err_msg='', + verbose=True): + """ + Raises an AssertionError if two items are not equal up to significant + digits. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + Given two numbers, check that they are approximately equal. + Approximately equal is defined as the number of significant digits + that agree. + + Parameters + ---------- + actual : scalar + The object to check. + desired : scalar + The expected object. + significant : int, optional + Desired precision, default is 7. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20) + >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20, + ... significant=8) + >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20, + ... significant=8) + Traceback (most recent call last): + ... + AssertionError: + Items are not equal to 8 significant digits: + ACTUAL: 1.234567e-21 + DESIRED: 1.2345672e-21 + + the evaluated condition that raises the exception is + + >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1) + True + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + + (actual, desired) = map(float, (actual, desired)) + if desired == actual: + return + # Normalized the numbers to be in range (-10.0,10.0) + # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual)))))) + with np.errstate(invalid='ignore'): + scale = 0.5 * (np.abs(desired) + np.abs(actual)) + scale = np.power(10, np.floor(np.log10(scale))) + try: + sc_desired = desired / scale + except ZeroDivisionError: + sc_desired = 0.0 + try: + sc_actual = actual / scale + except ZeroDivisionError: + sc_actual = 0.0 + msg = build_err_msg( + [actual, desired], err_msg, + header='Items are not equal to %d significant digits:' % significant, + verbose=verbose) + try: + # If one of desired/actual is not finite, handle it specially here: + # check that both are nan if any is a nan, and test for equality + # otherwise + if not (isfinite(desired) and isfinite(actual)): + if isnan(desired) or isnan(actual): + if not (isnan(desired) and isnan(actual)): + raise AssertionError(msg) + elif not desired == actual: + raise AssertionError(msg) + return + except (TypeError, NotImplementedError): + pass + if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant - 1)): + raise AssertionError(msg) + + +def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='', + precision=6, equal_nan=True, equal_inf=True, + *, strict=False, names=('ACTUAL', 'DESIRED')): + __tracebackhide__ = True # Hide traceback for py.test + from numpy._core import all, array2string, errstate, inf, isnan, max, object_ + + x = np.asanyarray(x) + y = np.asanyarray(y) + + # original array for output formatting + ox, oy = x, y + + def isnumber(x): + return x.dtype.char in '?bhilqpBHILQPefdgFDG' + + def istime(x): + return x.dtype.char in "Mm" + + def isvstring(x): + return x.dtype.char == "T" + + def func_assert_same_pos(x, y, func=isnan, hasval='nan'): + """Handling nan/inf. + + Combine results of running func on x and y, checking that they are True + at the same locations. + + """ + __tracebackhide__ = True # Hide traceback for py.test + + x_id = func(x) + y_id = func(y) + # We include work-arounds here to handle three types of slightly + # pathological ndarray subclasses: + # (1) all() on `masked` array scalars can return masked arrays, so we + # use != True + # (2) __eq__ on some ndarray subclasses returns Python booleans + # instead of element-wise comparisons, so we cast to np.bool() and + # use isinstance(..., bool) checks + # (3) subclasses with bare-bones __array_function__ implementations may + # not implement np.all(), so favor using the .all() method + # We are not committed to supporting such subclasses, but it's nice to + # support them if possible. + if np.bool(x_id == y_id).all() != True: + msg = build_err_msg( + [x, y], + err_msg + '\n%s location mismatch:' + % (hasval), verbose=verbose, header=header, + names=names, + precision=precision) + raise AssertionError(msg) + # If there is a scalar, then here we know the array has the same + # flag as it everywhere, so we should return the scalar flag. + if isinstance(x_id, bool) or x_id.ndim == 0: + return np.bool(x_id) + elif isinstance(y_id, bool) or y_id.ndim == 0: + return np.bool(y_id) + else: + return y_id + + try: + if strict: + cond = x.shape == y.shape and x.dtype == y.dtype + else: + cond = (x.shape == () or y.shape == ()) or x.shape == y.shape + if not cond: + if x.shape != y.shape: + reason = f'\n(shapes {x.shape}, {y.shape} mismatch)' + else: + reason = f'\n(dtypes {x.dtype}, {y.dtype} mismatch)' + msg = build_err_msg([x, y], + err_msg + + reason, + verbose=verbose, header=header, + names=names, + precision=precision) + raise AssertionError(msg) + + flagged = np.bool(False) + if isnumber(x) and isnumber(y): + if equal_nan: + flagged = func_assert_same_pos(x, y, func=isnan, hasval='nan') + + if equal_inf: + flagged |= func_assert_same_pos(x, y, + func=lambda xy: xy == +inf, + hasval='+inf') + flagged |= func_assert_same_pos(x, y, + func=lambda xy: xy == -inf, + hasval='-inf') + + elif istime(x) and istime(y): + # If one is datetime64 and the other timedelta64 there is no point + if equal_nan and x.dtype.type == y.dtype.type: + flagged = func_assert_same_pos(x, y, func=isnat, hasval="NaT") + + elif isvstring(x) and isvstring(y): + dt = x.dtype + if equal_nan and dt == y.dtype and hasattr(dt, 'na_object'): + is_nan = (isinstance(dt.na_object, float) and + np.isnan(dt.na_object)) + bool_errors = 0 + try: + bool(dt.na_object) + except TypeError: + bool_errors = 1 + if is_nan or bool_errors: + # nan-like NA object + flagged = func_assert_same_pos( + x, y, func=isnan, hasval=x.dtype.na_object) + + if flagged.ndim > 0: + x, y = x[~flagged], y[~flagged] + # Only do the comparison if actual values are left + if x.size == 0: + return + elif flagged: + # no sense doing comparison if everything is flagged. + return + + val = comparison(x, y) + invalids = np.logical_not(val) + + if isinstance(val, bool): + cond = val + reduced = array([val]) + else: + reduced = val.ravel() + cond = reduced.all() + + # The below comparison is a hack to ensure that fully masked + # results, for which val.ravel().all() returns np.ma.masked, + # do not trigger a failure (np.ma.masked != True evaluates as + # np.ma.masked, which is falsy). + if cond != True: + n_mismatch = reduced.size - reduced.sum(dtype=intp) + n_elements = flagged.size if flagged.ndim != 0 else reduced.size + percent_mismatch = 100 * n_mismatch / n_elements + remarks = [f'Mismatched elements: {n_mismatch} / {n_elements} ' + f'({percent_mismatch:.3g}%)'] + + with errstate(all='ignore'): + # ignore errors for non-numeric types + with contextlib.suppress(TypeError): + error = abs(x - y) + if np.issubdtype(x.dtype, np.unsignedinteger): + error2 = abs(y - x) + np.minimum(error, error2, out=error) + + reduced_error = error[invalids] + max_abs_error = max(reduced_error) + if getattr(error, 'dtype', object_) == object_: + remarks.append( + 'Max absolute difference among violations: ' + + str(max_abs_error)) + else: + remarks.append( + 'Max absolute difference among violations: ' + + array2string(max_abs_error)) + + # note: this definition of relative error matches that one + # used by assert_allclose (found in np.isclose) + # Filter values where the divisor would be zero + nonzero = np.bool(y != 0) + nonzero_and_invalid = np.logical_and(invalids, nonzero) + + if all(~nonzero_and_invalid): + max_rel_error = array(inf) + else: + nonzero_invalid_error = error[nonzero_and_invalid] + broadcasted_y = np.broadcast_to(y, error.shape) + nonzero_invalid_y = broadcasted_y[nonzero_and_invalid] + max_rel_error = max(nonzero_invalid_error + / abs(nonzero_invalid_y)) + + if getattr(error, 'dtype', object_) == object_: + remarks.append( + 'Max relative difference among violations: ' + + str(max_rel_error)) + else: + remarks.append( + 'Max relative difference among violations: ' + + array2string(max_rel_error)) + err_msg = str(err_msg) + err_msg += '\n' + '\n'.join(remarks) + msg = build_err_msg([ox, oy], err_msg, + verbose=verbose, header=header, + names=names, + precision=precision) + raise AssertionError(msg) + except ValueError: + import traceback + efmt = traceback.format_exc() + header = f'error during assertion:\n\n{efmt}\n\n{header}' + + msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header, + names=names, precision=precision) + raise ValueError(msg) + + +def assert_array_equal(actual, desired, err_msg='', verbose=True, *, + strict=False): + """ + Raises an AssertionError if two array_like objects are not equal. + + Given two array_like objects, check that the shape is equal and all + elements of these objects are equal (but see the Notes for the special + handling of a scalar). An exception is raised at shape mismatch or + conflicting values. In contrast to the standard usage in numpy, NaNs + are compared like numbers, no assertion is raised if both objects have + NaNs in the same positions. + + The usual caution for verifying equality with floating point numbers is + advised. + + .. note:: When either `actual` or `desired` is already an instance of + `numpy.ndarray` and `desired` is not a ``dict``, the behavior of + ``assert_equal(actual, desired)`` is identical to the behavior of this + function. Otherwise, this function performs `np.asanyarray` on the + inputs before comparison, whereas `assert_equal` defines special + comparison rules for common Python types. For example, only + `assert_equal` can be used to compare nested Python lists. In new code, + consider using only `assert_equal`, explicitly converting either + `actual` or `desired` to arrays if the behavior of `assert_array_equal` + is desired. + + Parameters + ---------- + actual : array_like + The actual object to check. + desired : array_like + The desired, expected object. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + strict : bool, optional + If True, raise an AssertionError when either the shape or the data + type of the array_like objects does not match. The special + handling for scalars mentioned in the Notes section is disabled. + + .. versionadded:: 1.24.0 + + Raises + ------ + AssertionError + If actual and desired objects are not equal. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Notes + ----- + When one of `actual` and `desired` is a scalar and the other is array_like, + the function checks that each element of the array_like object is equal to + the scalar. This behaviour can be disabled with the `strict` parameter. + + Examples + -------- + The first assert does not raise an exception: + + >>> np.testing.assert_array_equal([1.0,2.33333,np.nan], + ... [np.exp(0),2.33333, np.nan]) + + Assert fails with numerical imprecision with floats: + + >>> np.testing.assert_array_equal([1.0,np.pi,np.nan], + ... [1, np.sqrt(np.pi)**2, np.nan]) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not equal + + Mismatched elements: 1 / 3 (33.3%) + Max absolute difference among violations: 4.4408921e-16 + Max relative difference among violations: 1.41357986e-16 + ACTUAL: array([1. , 3.141593, nan]) + DESIRED: array([1. , 3.141593, nan]) + + Use `assert_allclose` or one of the nulp (number of floating point values) + functions for these cases instead: + + >>> np.testing.assert_allclose([1.0,np.pi,np.nan], + ... [1, np.sqrt(np.pi)**2, np.nan], + ... rtol=1e-10, atol=0) + + As mentioned in the Notes section, `assert_array_equal` has special + handling for scalars. Here the test checks that each value in `x` is 3: + + >>> x = np.full((2, 5), fill_value=3) + >>> np.testing.assert_array_equal(x, 3) + + Use `strict` to raise an AssertionError when comparing a scalar with an + array: + + >>> np.testing.assert_array_equal(x, 3, strict=True) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not equal + + (shapes (2, 5), () mismatch) + ACTUAL: array([[3, 3, 3, 3, 3], + [3, 3, 3, 3, 3]]) + DESIRED: array(3) + + The `strict` parameter also ensures that the array data types match: + + >>> x = np.array([2, 2, 2]) + >>> y = np.array([2., 2., 2.], dtype=np.float32) + >>> np.testing.assert_array_equal(x, y, strict=True) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not equal + + (dtypes int64, float32 mismatch) + ACTUAL: array([2, 2, 2]) + DESIRED: array([2., 2., 2.], dtype=float32) + """ + __tracebackhide__ = True # Hide traceback for py.test + assert_array_compare(operator.__eq__, actual, desired, err_msg=err_msg, + verbose=verbose, header='Arrays are not equal', + strict=strict) + + +def assert_array_almost_equal(actual, desired, decimal=6, err_msg='', + verbose=True): + """ + Raises an AssertionError if two objects are not equal up to desired + precision. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + The test verifies identical shapes and that the elements of ``actual`` and + ``desired`` satisfy:: + + abs(desired-actual) < 1.5 * 10**(-decimal) + + That is a looser test than originally documented, but agrees with what the + actual implementation did up to rounding vagaries. An exception is raised + at shape mismatch or conflicting values. In contrast to the standard usage + in numpy, NaNs are compared like numbers, no assertion is raised if both + objects have NaNs in the same positions. + + Parameters + ---------- + actual : array_like + The actual object to check. + desired : array_like + The desired, expected object. + decimal : int, optional + Desired precision, default is 6. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + the first assert does not raise an exception + + >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan], + ... [1.0,2.333,np.nan]) + + >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], + ... [1.0,2.33339,np.nan], decimal=5) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not almost equal to 5 decimals + + Mismatched elements: 1 / 3 (33.3%) + Max absolute difference among violations: 6.e-05 + Max relative difference among violations: 2.57136612e-05 + ACTUAL: array([1. , 2.33333, nan]) + DESIRED: array([1. , 2.33339, nan]) + + >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], + ... [1.0,2.33333, 5], decimal=5) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not almost equal to 5 decimals + + nan location mismatch: + ACTUAL: array([1. , 2.33333, nan]) + DESIRED: array([1. , 2.33333, 5. ]) + + """ + __tracebackhide__ = True # Hide traceback for py.test + from numpy._core import number, result_type + from numpy._core.fromnumeric import any as npany + from numpy._core.numerictypes import issubdtype + + def compare(x, y): + try: + if npany(isinf(x)) or npany(isinf(y)): + xinfid = isinf(x) + yinfid = isinf(y) + if not (xinfid == yinfid).all(): + return False + # if one item, x and y is +- inf + if x.size == y.size == 1: + return x == y + x = x[~xinfid] + y = y[~yinfid] + except (TypeError, NotImplementedError): + pass + + # make sure y is an inexact type to avoid abs(MIN_INT); will cause + # casting of x later. + dtype = result_type(y, 1.) + y = np.asanyarray(y, dtype) + z = abs(x - y) + + if not issubdtype(z.dtype, number): + z = z.astype(np.float64) # handle object arrays + + return z < 1.5 * 10.0**(-decimal) + + assert_array_compare(compare, actual, desired, err_msg=err_msg, + verbose=verbose, + header=('Arrays are not almost equal to %d decimals' % decimal), + precision=decimal) + + +def assert_array_less(x, y, err_msg='', verbose=True, *, strict=False): + """ + Raises an AssertionError if two array_like objects are not ordered by less + than. + + Given two array_like objects `x` and `y`, check that the shape is equal and + all elements of `x` are strictly less than the corresponding elements of + `y` (but see the Notes for the special handling of a scalar). An exception + is raised at shape mismatch or values that are not correctly ordered. In + contrast to the standard usage in NumPy, no assertion is raised if both + objects have NaNs in the same positions. + + Parameters + ---------- + x : array_like + The smaller object to check. + y : array_like + The larger object to compare. + err_msg : string + The error message to be printed in case of failure. + verbose : bool + If True, the conflicting values are appended to the error message. + strict : bool, optional + If True, raise an AssertionError when either the shape or the data + type of the array_like objects does not match. The special + handling for scalars mentioned in the Notes section is disabled. + + .. versionadded:: 2.0.0 + + Raises + ------ + AssertionError + If x is not strictly smaller than y, element-wise. + + See Also + -------- + assert_array_equal: tests objects for equality + assert_array_almost_equal: test objects for equality up to precision + + Notes + ----- + When one of `x` and `y` is a scalar and the other is array_like, the + function performs the comparison as though the scalar were broadcasted + to the shape of the array. This behaviour can be disabled with the `strict` + parameter. + + Examples + -------- + The following assertion passes because each finite element of `x` is + strictly less than the corresponding element of `y`, and the NaNs are in + corresponding locations. + + >>> x = [1.0, 1.0, np.nan] + >>> y = [1.1, 2.0, np.nan] + >>> np.testing.assert_array_less(x, y) + + The following assertion fails because the zeroth element of `x` is no + longer strictly less than the zeroth element of `y`. + + >>> y[0] = 1 + >>> np.testing.assert_array_less(x, y) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not strictly ordered `x < y` + + Mismatched elements: 1 / 3 (33.3%) + Max absolute difference among violations: 0. + Max relative difference among violations: 0. + x: array([ 1., 1., nan]) + y: array([ 1., 2., nan]) + + Here, `y` is a scalar, so each element of `x` is compared to `y`, and + the assertion passes. + + >>> x = [1.0, 4.0] + >>> y = 5.0 + >>> np.testing.assert_array_less(x, y) + + However, with ``strict=True``, the assertion will fail because the shapes + do not match. + + >>> np.testing.assert_array_less(x, y, strict=True) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not strictly ordered `x < y` + + (shapes (2,), () mismatch) + x: array([1., 4.]) + y: array(5.) + + With ``strict=True``, the assertion also fails if the dtypes of the two + arrays do not match. + + >>> y = [5, 5] + >>> np.testing.assert_array_less(x, y, strict=True) + Traceback (most recent call last): + ... + AssertionError: + Arrays are not strictly ordered `x < y` + + (dtypes float64, int64 mismatch) + x: array([1., 4.]) + y: array([5, 5]) + """ + __tracebackhide__ = True # Hide traceback for py.test + assert_array_compare(operator.__lt__, x, y, err_msg=err_msg, + verbose=verbose, + header='Arrays are not strictly ordered `x < y`', + equal_inf=False, + strict=strict, + names=('x', 'y')) + + +def runstring(astr, dict): + exec(astr, dict) + + +def assert_string_equal(actual, desired): + """ + Test if two strings are equal. + + If the given strings are equal, `assert_string_equal` does nothing. + If they are not equal, an AssertionError is raised, and the diff + between the strings is shown. + + Parameters + ---------- + actual : str + The string to test for equality against the expected string. + desired : str + The expected string. + + Examples + -------- + >>> np.testing.assert_string_equal('abc', 'abc') + >>> np.testing.assert_string_equal('abc', 'abcd') + Traceback (most recent call last): + File "", line 1, in + ... + AssertionError: Differences in strings: + - abc+ abcd? + + + """ + # delay import of difflib to reduce startup time + __tracebackhide__ = True # Hide traceback for py.test + import difflib + + if not isinstance(actual, str): + raise AssertionError(repr(type(actual))) + if not isinstance(desired, str): + raise AssertionError(repr(type(desired))) + if desired == actual: + return + + diff = list(difflib.Differ().compare(actual.splitlines(True), + desired.splitlines(True))) + diff_list = [] + while diff: + d1 = diff.pop(0) + if d1.startswith(' '): + continue + if d1.startswith('- '): + l = [d1] + d2 = diff.pop(0) + if d2.startswith('? '): + l.append(d2) + d2 = diff.pop(0) + if not d2.startswith('+ '): + raise AssertionError(repr(d2)) + l.append(d2) + if diff: + d3 = diff.pop(0) + if d3.startswith('? '): + l.append(d3) + else: + diff.insert(0, d3) + if d2[2:] == d1[2:]: + continue + diff_list.extend(l) + continue + raise AssertionError(repr(d1)) + if not diff_list: + return + msg = f"Differences in strings:\n{''.join(diff_list).rstrip()}" + if actual != desired: + raise AssertionError(msg) + + +def rundocs(filename=None, raise_on_error=True): + """ + Run doctests found in the given file. + + By default `rundocs` raises an AssertionError on failure. + + Parameters + ---------- + filename : str + The path to the file for which the doctests are run. + raise_on_error : bool + Whether to raise an AssertionError when a doctest fails. Default is + True. + + Notes + ----- + The doctests can be run by the user/developer by adding the ``doctests`` + argument to the ``test()`` call. For example, to run all tests (including + doctests) for ``numpy.lib``: + + >>> np.lib.test(doctests=True) # doctest: +SKIP + """ + import doctest + + from numpy.distutils.misc_util import exec_mod_from_location + if filename is None: + f = sys._getframe(1) + filename = f.f_globals['__file__'] + name = os.path.splitext(os.path.basename(filename))[0] + m = exec_mod_from_location(name, filename) + + tests = doctest.DocTestFinder().find(m) + runner = doctest.DocTestRunner(verbose=False) + + msg = [] + if raise_on_error: + out = msg.append + else: + out = None + + for test in tests: + runner.run(test, out=out) + + if runner.failures > 0 and raise_on_error: + raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg)) + + +def check_support_sve(__cache=[]): + """ + gh-22982 + """ + + if __cache: + return __cache[0] + + import subprocess + cmd = 'lscpu' + try: + output = subprocess.run(cmd, capture_output=True, text=True) + result = 'sve' in output.stdout + except (OSError, subprocess.SubprocessError): + result = False + __cache.append(result) + return __cache[0] + + +# +# assert_raises and assert_raises_regex are taken from unittest. +# +import unittest + + +class _Dummy(unittest.TestCase): + def nop(self): + pass + + +_d = _Dummy('nop') + + +def assert_raises(*args, **kwargs): + """ + assert_raises(exception_class, callable, *args, **kwargs) + assert_raises(exception_class) + + Fail unless an exception of class exception_class is thrown + by callable when invoked with arguments args and keyword + arguments kwargs. If a different type of exception is + thrown, it will not be caught, and the test case will be + deemed to have suffered an error, exactly as for an + unexpected exception. + + Alternatively, `assert_raises` can be used as a context manager: + + >>> from numpy.testing import assert_raises + >>> with assert_raises(ZeroDivisionError): + ... 1 / 0 + + is equivalent to + + >>> def div(x, y): + ... return x / y + >>> assert_raises(ZeroDivisionError, div, 1, 0) + + """ + __tracebackhide__ = True # Hide traceback for py.test + return _d.assertRaises(*args, **kwargs) + + +def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs): + """ + assert_raises_regex(exception_class, expected_regexp, callable, *args, + **kwargs) + assert_raises_regex(exception_class, expected_regexp) + + Fail unless an exception of class exception_class and with message that + matches expected_regexp is thrown by callable when invoked with arguments + args and keyword arguments kwargs. + + Alternatively, can be used as a context manager like `assert_raises`. + """ + __tracebackhide__ = True # Hide traceback for py.test + return _d.assertRaisesRegex(exception_class, expected_regexp, *args, **kwargs) + + +def decorate_methods(cls, decorator, testmatch=None): + """ + Apply a decorator to all methods in a class matching a regular expression. + + The given decorator is applied to all public methods of `cls` that are + matched by the regular expression `testmatch` + (``testmatch.search(methodname)``). Methods that are private, i.e. start + with an underscore, are ignored. + + Parameters + ---------- + cls : class + Class whose methods to decorate. + decorator : function + Decorator to apply to methods + testmatch : compiled regexp or str, optional + The regular expression. Default value is None, in which case the + nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``) + is used. + If `testmatch` is a string, it is compiled to a regular expression + first. + + """ + if testmatch is None: + testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep) + else: + testmatch = re.compile(testmatch) + cls_attr = cls.__dict__ + + # delayed import to reduce startup time + from inspect import isfunction + + methods = [_m for _m in cls_attr.values() if isfunction(_m)] + for function in methods: + try: + if hasattr(function, 'compat_func_name'): + funcname = function.compat_func_name + else: + funcname = function.__name__ + except AttributeError: + # not a function + continue + if testmatch.search(funcname) and not funcname.startswith('_'): + setattr(cls, funcname, decorator(function)) + + +def measure(code_str, times=1, label=None): + """ + Return elapsed time for executing code in the namespace of the caller. + + The supplied code string is compiled with the Python builtin ``compile``. + The precision of the timing is 10 milli-seconds. If the code will execute + fast on this timescale, it can be executed many times to get reasonable + timing accuracy. + + Parameters + ---------- + code_str : str + The code to be timed. + times : int, optional + The number of times the code is executed. Default is 1. The code is + only compiled once. + label : str, optional + A label to identify `code_str` with. This is passed into ``compile`` + as the second argument (for run-time error messages). + + Returns + ------- + elapsed : float + Total elapsed time in seconds for executing `code_str` `times` times. + + Examples + -------- + >>> times = 10 + >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)', times=times) + >>> print("Time for a single execution : ", etime / times, "s") # doctest: +SKIP + Time for a single execution : 0.005 s + + """ + frame = sys._getframe(1) + locs, globs = frame.f_locals, frame.f_globals + + code = compile(code_str, f'Test name: {label} ', 'exec') + i = 0 + elapsed = jiffies() + while i < times: + i += 1 + exec(code, globs, locs) + elapsed = jiffies() - elapsed + return 0.01 * elapsed + + +def _assert_valid_refcount(op): + """ + Check that ufuncs don't mishandle refcount of object `1`. + Used in a few regression tests. + """ + if not HAS_REFCOUNT: + return True + + import gc + + import numpy as np + + b = np.arange(100 * 100).reshape(100, 100) + c = b + i = 1 + + gc.disable() + try: + rc = sys.getrefcount(i) + for j in range(15): + d = op(b, c) + assert_(sys.getrefcount(i) >= rc) + finally: + gc.enable() + + +def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, + err_msg='', verbose=True, *, strict=False): + """ + Raises an AssertionError if two objects are not equal up to desired + tolerance. + + Given two array_like objects, check that their shapes and all elements + are equal (but see the Notes for the special handling of a scalar). An + exception is raised if the shapes mismatch or any values conflict. In + contrast to the standard usage in numpy, NaNs are compared like numbers, + no assertion is raised if both objects have NaNs in the same positions. + + The test is equivalent to ``allclose(actual, desired, rtol, atol)`` (note + that ``allclose`` has different default values). It compares the difference + between `actual` and `desired` to ``atol + rtol * abs(desired)``. + + Parameters + ---------- + actual : array_like + Array obtained. + desired : array_like + Array desired. + rtol : float, optional + Relative tolerance. + atol : float, optional + Absolute tolerance. + equal_nan : bool, optional. + If True, NaNs will compare equal. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + strict : bool, optional + If True, raise an ``AssertionError`` when either the shape or the data + type of the arguments does not match. The special handling of scalars + mentioned in the Notes section is disabled. + + .. versionadded:: 2.0.0 + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_array_almost_equal_nulp, assert_array_max_ulp + + Notes + ----- + When one of `actual` and `desired` is a scalar and the other is + array_like, the function performs the comparison as if the scalar were + broadcasted to the shape of the array. + This behaviour can be disabled with the `strict` parameter. + + Examples + -------- + >>> x = [1e-5, 1e-3, 1e-1] + >>> y = np.arccos(np.cos(x)) + >>> np.testing.assert_allclose(x, y, rtol=1e-5, atol=0) + + As mentioned in the Notes section, `assert_allclose` has special + handling for scalars. Here, the test checks that the value of `numpy.sin` + is nearly zero at integer multiples of π. + + >>> x = np.arange(3) * np.pi + >>> np.testing.assert_allclose(np.sin(x), 0, atol=1e-15) + + Use `strict` to raise an ``AssertionError`` when comparing an array + with one or more dimensions against a scalar. + + >>> np.testing.assert_allclose(np.sin(x), 0, atol=1e-15, strict=True) + Traceback (most recent call last): + ... + AssertionError: + Not equal to tolerance rtol=1e-07, atol=1e-15 + + (shapes (3,), () mismatch) + ACTUAL: array([ 0.000000e+00, 1.224647e-16, -2.449294e-16]) + DESIRED: array(0) + + The `strict` parameter also ensures that the array data types match: + + >>> y = np.zeros(3, dtype=np.float32) + >>> np.testing.assert_allclose(np.sin(x), y, atol=1e-15, strict=True) + Traceback (most recent call last): + ... + AssertionError: + Not equal to tolerance rtol=1e-07, atol=1e-15 + + (dtypes float64, float32 mismatch) + ACTUAL: array([ 0.000000e+00, 1.224647e-16, -2.449294e-16]) + DESIRED: array([0., 0., 0.], dtype=float32) + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + + def compare(x, y): + return np._core.numeric.isclose(x, y, rtol=rtol, atol=atol, + equal_nan=equal_nan) + + actual, desired = np.asanyarray(actual), np.asanyarray(desired) + header = f'Not equal to tolerance rtol={rtol:g}, atol={atol:g}' + assert_array_compare(compare, actual, desired, err_msg=str(err_msg), + verbose=verbose, header=header, equal_nan=equal_nan, + strict=strict) + + +def assert_array_almost_equal_nulp(x, y, nulp=1): + """ + Compare two arrays relatively to their spacing. + + This is a relatively robust method to compare two arrays whose amplitude + is variable. + + Parameters + ---------- + x, y : array_like + Input arrays. + nulp : int, optional + The maximum number of unit in the last place for tolerance (see Notes). + Default is 1. + + Returns + ------- + None + + Raises + ------ + AssertionError + If the spacing between `x` and `y` for one or more elements is larger + than `nulp`. + + See Also + -------- + assert_array_max_ulp : Check that all items of arrays differ in at most + N Units in the Last Place. + spacing : Return the distance between x and the nearest adjacent number. + + Notes + ----- + An assertion is raised if the following condition is not met:: + + abs(x - y) <= nulp * spacing(maximum(abs(x), abs(y))) + + Examples + -------- + >>> x = np.array([1., 1e-10, 1e-20]) + >>> eps = np.finfo(x.dtype).eps + >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x) + + >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x) + Traceback (most recent call last): + ... + AssertionError: Arrays are not equal to 1 ULP (max is 2) + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + ax = np.abs(x) + ay = np.abs(y) + ref = nulp * np.spacing(np.where(ax > ay, ax, ay)) + if not np.all(np.abs(x - y) <= ref): + if np.iscomplexobj(x) or np.iscomplexobj(y): + msg = f"Arrays are not equal to {nulp} ULP" + else: + max_nulp = np.max(nulp_diff(x, y)) + msg = f"Arrays are not equal to {nulp} ULP (max is {max_nulp:g})" + raise AssertionError(msg) + + +def assert_array_max_ulp(a, b, maxulp=1, dtype=None): + """ + Check that all items of arrays differ in at most N Units in the Last Place. + + Parameters + ---------- + a, b : array_like + Input arrays to be compared. + maxulp : int, optional + The maximum number of units in the last place that elements of `a` and + `b` can differ. Default is 1. + dtype : dtype, optional + Data-type to convert `a` and `b` to if given. Default is None. + + Returns + ------- + ret : ndarray + Array containing number of representable floating point numbers between + items in `a` and `b`. + + Raises + ------ + AssertionError + If one or more elements differ by more than `maxulp`. + + Notes + ----- + For computing the ULP difference, this API does not differentiate between + various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000 + is zero). + + See Also + -------- + assert_array_almost_equal_nulp : Compare two arrays relatively to their + spacing. + + Examples + -------- + >>> a = np.linspace(0., 1., 100) + >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a))) + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + ret = nulp_diff(a, b, dtype) + if not np.all(ret <= maxulp): + raise AssertionError("Arrays are not almost equal up to %g " + "ULP (max difference is %g ULP)" % + (maxulp, np.max(ret))) + return ret + + +def nulp_diff(x, y, dtype=None): + """For each item in x and y, return the number of representable floating + points between them. + + Parameters + ---------- + x : array_like + first input array + y : array_like + second input array + dtype : dtype, optional + Data-type to convert `x` and `y` to if given. Default is None. + + Returns + ------- + nulp : array_like + number of representable floating point numbers between each item in x + and y. + + Notes + ----- + For computing the ULP difference, this API does not differentiate between + various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000 + is zero). + + Examples + -------- + # By definition, epsilon is the smallest number such as 1 + eps != 1, so + # there should be exactly one ULP between 1 and 1 + eps + >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps) + 1.0 + """ + import numpy as np + if dtype: + x = np.asarray(x, dtype=dtype) + y = np.asarray(y, dtype=dtype) + else: + x = np.asarray(x) + y = np.asarray(y) + + t = np.common_type(x, y) + if np.iscomplexobj(x) or np.iscomplexobj(y): + raise NotImplementedError("_nulp not implemented for complex array") + + x = np.array([x], dtype=t) + y = np.array([y], dtype=t) + + x[np.isnan(x)] = np.nan + y[np.isnan(y)] = np.nan + + if not x.shape == y.shape: + raise ValueError(f"Arrays do not have the same shape: {x.shape} - {y.shape}") + + def _diff(rx, ry, vdt): + diff = np.asarray(rx - ry, dtype=vdt) + return np.abs(diff) + + rx = integer_repr(x) + ry = integer_repr(y) + return _diff(rx, ry, t) + + +def _integer_repr(x, vdt, comp): + # Reinterpret binary representation of the float as sign-magnitude: + # take into account two-complement representation + # See also + # https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/ + rx = x.view(vdt) + if not (rx.size == 1): + rx[rx < 0] = comp - rx[rx < 0] + elif rx < 0: + rx = comp - rx + + return rx + + +def integer_repr(x): + """Return the signed-magnitude interpretation of the binary representation + of x.""" + import numpy as np + if x.dtype == np.float16: + return _integer_repr(x, np.int16, np.int16(-2**15)) + elif x.dtype == np.float32: + return _integer_repr(x, np.int32, np.int32(-2**31)) + elif x.dtype == np.float64: + return _integer_repr(x, np.int64, np.int64(-2**63)) + else: + raise ValueError(f'Unsupported dtype {x.dtype}') + + +@contextlib.contextmanager +def _assert_warns_context(warning_class, name=None): + __tracebackhide__ = True # Hide traceback for py.test + with suppress_warnings() as sup: + l = sup.record(warning_class) + yield + if not len(l) > 0: + name_str = f' when calling {name}' if name is not None else '' + raise AssertionError("No warning raised" + name_str) + + +def assert_warns(warning_class, *args, **kwargs): + """ + Fail unless the given callable throws the specified warning. + + A warning of class warning_class should be thrown by the callable when + invoked with arguments args and keyword arguments kwargs. + If a different type of warning is thrown, it will not be caught. + + If called with all arguments other than the warning class omitted, may be + used as a context manager:: + + with assert_warns(SomeWarning): + do_something() + + The ability to be used as a context manager is new in NumPy v1.11.0. + + Parameters + ---------- + warning_class : class + The class defining the warning that `func` is expected to throw. + func : callable, optional + Callable to test + *args : Arguments + Arguments for `func`. + **kwargs : Kwargs + Keyword arguments for `func`. + + Returns + ------- + The value returned by `func`. + + Examples + -------- + >>> import warnings + >>> def deprecated_func(num): + ... warnings.warn("Please upgrade", DeprecationWarning) + ... return num*num + >>> with np.testing.assert_warns(DeprecationWarning): + ... assert deprecated_func(4) == 16 + >>> # or passing a func + >>> ret = np.testing.assert_warns(DeprecationWarning, deprecated_func, 4) + >>> assert ret == 16 + """ + if not args and not kwargs: + return _assert_warns_context(warning_class) + elif len(args) < 1: + if "match" in kwargs: + raise RuntimeError( + "assert_warns does not use 'match' kwarg, " + "use pytest.warns instead" + ) + raise RuntimeError("assert_warns(...) needs at least one arg") + + func = args[0] + args = args[1:] + with _assert_warns_context(warning_class, name=func.__name__): + return func(*args, **kwargs) + + +@contextlib.contextmanager +def _assert_no_warnings_context(name=None): + __tracebackhide__ = True # Hide traceback for py.test + with warnings.catch_warnings(record=True) as l: + warnings.simplefilter('always') + yield + if len(l) > 0: + name_str = f' when calling {name}' if name is not None else '' + raise AssertionError(f'Got warnings{name_str}: {l}') + + +def assert_no_warnings(*args, **kwargs): + """ + Fail if the given callable produces any warnings. + + If called with all arguments omitted, may be used as a context manager:: + + with assert_no_warnings(): + do_something() + + The ability to be used as a context manager is new in NumPy v1.11.0. + + Parameters + ---------- + func : callable + The callable to test. + \\*args : Arguments + Arguments passed to `func`. + \\*\\*kwargs : Kwargs + Keyword arguments passed to `func`. + + Returns + ------- + The value returned by `func`. + + """ + if not args: + return _assert_no_warnings_context() + + func = args[0] + args = args[1:] + with _assert_no_warnings_context(name=func.__name__): + return func(*args, **kwargs) + + +def _gen_alignment_data(dtype=float32, type='binary', max_size=24): + """ + generator producing data with different alignment and offsets + to test simd vectorization + + Parameters + ---------- + dtype : dtype + data type to produce + type : string + 'unary': create data for unary operations, creates one input + and output array + 'binary': create data for unary operations, creates two input + and output array + max_size : integer + maximum size of data to produce + + Returns + ------- + if type is 'unary' yields one output, one input array and a message + containing information on the data + if type is 'binary' yields one output array, two input array and a message + containing information on the data + + """ + ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s' + bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s' + for o in range(3): + for s in range(o + 2, max(o + 3, max_size)): + if type == 'unary': + inp = lambda: arange(s, dtype=dtype)[o:] + out = empty((s,), dtype=dtype)[o:] + yield out, inp(), ufmt % (o, o, s, dtype, 'out of place') + d = inp() + yield d, d, ufmt % (o, o, s, dtype, 'in place') + yield out[1:], inp()[:-1], ufmt % \ + (o + 1, o, s - 1, dtype, 'out of place') + yield out[:-1], inp()[1:], ufmt % \ + (o, o + 1, s - 1, dtype, 'out of place') + yield inp()[:-1], inp()[1:], ufmt % \ + (o, o + 1, s - 1, dtype, 'aliased') + yield inp()[1:], inp()[:-1], ufmt % \ + (o + 1, o, s - 1, dtype, 'aliased') + if type == 'binary': + inp1 = lambda: arange(s, dtype=dtype)[o:] + inp2 = lambda: arange(s, dtype=dtype)[o:] + out = empty((s,), dtype=dtype)[o:] + yield out, inp1(), inp2(), bfmt % \ + (o, o, o, s, dtype, 'out of place') + d = inp1() + yield d, d, inp2(), bfmt % \ + (o, o, o, s, dtype, 'in place1') + d = inp2() + yield d, inp1(), d, bfmt % \ + (o, o, o, s, dtype, 'in place2') + yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \ + (o + 1, o, o, s - 1, dtype, 'out of place') + yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \ + (o, o + 1, o, s - 1, dtype, 'out of place') + yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \ + (o, o, o + 1, s - 1, dtype, 'out of place') + yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \ + (o + 1, o, o, s - 1, dtype, 'aliased') + yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \ + (o, o + 1, o, s - 1, dtype, 'aliased') + yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \ + (o, o, o + 1, s - 1, dtype, 'aliased') + + +class IgnoreException(Exception): + "Ignoring this exception due to disabled feature" + pass + + +@contextlib.contextmanager +def tempdir(*args, **kwargs): + """Context manager to provide a temporary test folder. + + All arguments are passed as this to the underlying tempfile.mkdtemp + function. + + """ + tmpdir = mkdtemp(*args, **kwargs) + try: + yield tmpdir + finally: + shutil.rmtree(tmpdir) + + +@contextlib.contextmanager +def temppath(*args, **kwargs): + """Context manager for temporary files. + + Context manager that returns the path to a closed temporary file. Its + parameters are the same as for tempfile.mkstemp and are passed directly + to that function. The underlying file is removed when the context is + exited, so it should be closed at that time. + + Windows does not allow a temporary file to be opened if it is already + open, so the underlying file must be closed after opening before it + can be opened again. + + """ + fd, path = mkstemp(*args, **kwargs) + os.close(fd) + try: + yield path + finally: + os.remove(path) + + +class clear_and_catch_warnings(warnings.catch_warnings): + """ Context manager that resets warning registry for catching warnings + + Warnings can be slippery, because, whenever a warning is triggered, Python + adds a ``__warningregistry__`` member to the *calling* module. This makes + it impossible to retrigger the warning in this module, whatever you put in + the warnings filters. This context manager accepts a sequence of `modules` + as a keyword argument to its constructor and: + + * stores and removes any ``__warningregistry__`` entries in given `modules` + on entry; + * resets ``__warningregistry__`` to its previous state on exit. + + This makes it possible to trigger any warning afresh inside the context + manager without disturbing the state of warnings outside. + + For compatibility with Python, please consider all arguments to be + keyword-only. + + Parameters + ---------- + record : bool, optional + Specifies whether warnings should be captured by a custom + implementation of ``warnings.showwarning()`` and be appended to a list + returned by the context manager. Otherwise None is returned by the + context manager. The objects appended to the list are arguments whose + attributes mirror the arguments to ``showwarning()``. + modules : sequence, optional + Sequence of modules for which to reset warnings registry on entry and + restore on exit. To work correctly, all 'ignore' filters should + filter by one of these modules. + + Examples + -------- + >>> import warnings + >>> with np.testing.clear_and_catch_warnings( + ... modules=[np._core.fromnumeric]): + ... warnings.simplefilter('always') + ... warnings.filterwarnings('ignore', module='np._core.fromnumeric') + ... # do something that raises a warning but ignore those in + ... # np._core.fromnumeric + """ + class_modules = () + + def __init__(self, record=False, modules=()): + self.modules = set(modules).union(self.class_modules) + self._warnreg_copies = {} + super().__init__(record=record) + + def __enter__(self): + for mod in self.modules: + if hasattr(mod, '__warningregistry__'): + mod_reg = mod.__warningregistry__ + self._warnreg_copies[mod] = mod_reg.copy() + mod_reg.clear() + return super().__enter__() + + def __exit__(self, *exc_info): + super().__exit__(*exc_info) + for mod in self.modules: + if hasattr(mod, '__warningregistry__'): + mod.__warningregistry__.clear() + if mod in self._warnreg_copies: + mod.__warningregistry__.update(self._warnreg_copies[mod]) + + +class suppress_warnings: + """ + Context manager and decorator doing much the same as + ``warnings.catch_warnings``. + + However, it also provides a filter mechanism to work around + https://bugs.python.org/issue4180. + + This bug causes Python before 3.4 to not reliably show warnings again + after they have been ignored once (even within catch_warnings). It + means that no "ignore" filter can be used easily, since following + tests might need to see the warning. Additionally it allows easier + specificity for testing warnings and can be nested. + + Parameters + ---------- + forwarding_rule : str, optional + One of "always", "once", "module", or "location". Analogous to + the usual warnings module filter mode, it is useful to reduce + noise mostly on the outmost level. Unsuppressed and unrecorded + warnings will be forwarded based on this rule. Defaults to "always". + "location" is equivalent to the warnings "default", match by exact + location the warning warning originated from. + + Notes + ----- + Filters added inside the context manager will be discarded again + when leaving it. Upon entering all filters defined outside a + context will be applied automatically. + + When a recording filter is added, matching warnings are stored in the + ``log`` attribute as well as in the list returned by ``record``. + + If filters are added and the ``module`` keyword is given, the + warning registry of this module will additionally be cleared when + applying it, entering the context, or exiting it. This could cause + warnings to appear a second time after leaving the context if they + were configured to be printed once (default) and were already + printed before the context was entered. + + Nesting this context manager will work as expected when the + forwarding rule is "always" (default). Unfiltered and unrecorded + warnings will be passed out and be matched by the outer level. + On the outmost level they will be printed (or caught by another + warnings context). The forwarding rule argument can modify this + behaviour. + + Like ``catch_warnings`` this context manager is not threadsafe. + + Examples + -------- + + With a context manager:: + + with np.testing.suppress_warnings() as sup: + sup.filter(DeprecationWarning, "Some text") + sup.filter(module=np.ma.core) + log = sup.record(FutureWarning, "Does this occur?") + command_giving_warnings() + # The FutureWarning was given once, the filtered warnings were + # ignored. All other warnings abide outside settings (may be + # printed/error) + assert_(len(log) == 1) + assert_(len(sup.log) == 1) # also stored in log attribute + + Or as a decorator:: + + sup = np.testing.suppress_warnings() + sup.filter(module=np.ma.core) # module must match exactly + @sup + def some_function(): + # do something which causes a warning in np.ma.core + pass + """ + def __init__(self, forwarding_rule="always"): + self._entered = False + + # Suppressions are either instance or defined inside one with block: + self._suppressions = [] + + if forwarding_rule not in {"always", "module", "once", "location"}: + raise ValueError("unsupported forwarding rule.") + self._forwarding_rule = forwarding_rule + + def _clear_registries(self): + if hasattr(warnings, "_filters_mutated"): + # clearing the registry should not be necessary on new pythons, + # instead the filters should be mutated. + warnings._filters_mutated() + return + # Simply clear the registry, this should normally be harmless, + # note that on new pythons it would be invalidated anyway. + for module in self._tmp_modules: + if hasattr(module, "__warningregistry__"): + module.__warningregistry__.clear() + + def _filter(self, category=Warning, message="", module=None, record=False): + if record: + record = [] # The log where to store warnings + else: + record = None + if self._entered: + if module is None: + warnings.filterwarnings( + "always", category=category, message=message) + else: + module_regex = module.__name__.replace('.', r'\.') + '$' + warnings.filterwarnings( + "always", category=category, message=message, + module=module_regex) + self._tmp_modules.add(module) + self._clear_registries() + + self._tmp_suppressions.append( + (category, message, re.compile(message, re.I), module, record)) + else: + self._suppressions.append( + (category, message, re.compile(message, re.I), module, record)) + + return record + + def filter(self, category=Warning, message="", module=None): + """ + Add a new suppressing filter or apply it if the state is entered. + + Parameters + ---------- + category : class, optional + Warning class to filter + message : string, optional + Regular expression matching the warning message. + module : module, optional + Module to filter for. Note that the module (and its file) + must match exactly and cannot be a submodule. This may make + it unreliable for external modules. + + Notes + ----- + When added within a context, filters are only added inside + the context and will be forgotten when the context is exited. + """ + self._filter(category=category, message=message, module=module, + record=False) + + def record(self, category=Warning, message="", module=None): + """ + Append a new recording filter or apply it if the state is entered. + + All warnings matching will be appended to the ``log`` attribute. + + Parameters + ---------- + category : class, optional + Warning class to filter + message : string, optional + Regular expression matching the warning message. + module : module, optional + Module to filter for. Note that the module (and its file) + must match exactly and cannot be a submodule. This may make + it unreliable for external modules. + + Returns + ------- + log : list + A list which will be filled with all matched warnings. + + Notes + ----- + When added within a context, filters are only added inside + the context and will be forgotten when the context is exited. + """ + return self._filter(category=category, message=message, module=module, + record=True) + + def __enter__(self): + if self._entered: + raise RuntimeError("cannot enter suppress_warnings twice.") + + self._orig_show = warnings.showwarning + self._filters = warnings.filters + warnings.filters = self._filters[:] + + self._entered = True + self._tmp_suppressions = [] + self._tmp_modules = set() + self._forwarded = set() + + self.log = [] # reset global log (no need to keep same list) + + for cat, mess, _, mod, log in self._suppressions: + if log is not None: + del log[:] # clear the log + if mod is None: + warnings.filterwarnings( + "always", category=cat, message=mess) + else: + module_regex = mod.__name__.replace('.', r'\.') + '$' + warnings.filterwarnings( + "always", category=cat, message=mess, + module=module_regex) + self._tmp_modules.add(mod) + warnings.showwarning = self._showwarning + self._clear_registries() + + return self + + def __exit__(self, *exc_info): + warnings.showwarning = self._orig_show + warnings.filters = self._filters + self._clear_registries() + self._entered = False + del self._orig_show + del self._filters + + def _showwarning(self, message, category, filename, lineno, + *args, use_warnmsg=None, **kwargs): + for cat, _, pattern, mod, rec in ( + self._suppressions + self._tmp_suppressions)[::-1]: + if (issubclass(category, cat) and + pattern.match(message.args[0]) is not None): + if mod is None: + # Message and category match, either recorded or ignored + if rec is not None: + msg = WarningMessage(message, category, filename, + lineno, **kwargs) + self.log.append(msg) + rec.append(msg) + return + # Use startswith, because warnings strips the c or o from + # .pyc/.pyo files. + elif mod.__file__.startswith(filename): + # The message and module (filename) match + if rec is not None: + msg = WarningMessage(message, category, filename, + lineno, **kwargs) + self.log.append(msg) + rec.append(msg) + return + + # There is no filter in place, so pass to the outside handler + # unless we should only pass it once + if self._forwarding_rule == "always": + if use_warnmsg is None: + self._orig_show(message, category, filename, lineno, + *args, **kwargs) + else: + self._orig_showmsg(use_warnmsg) + return + + if self._forwarding_rule == "once": + signature = (message.args, category) + elif self._forwarding_rule == "module": + signature = (message.args, category, filename) + elif self._forwarding_rule == "location": + signature = (message.args, category, filename, lineno) + + if signature in self._forwarded: + return + self._forwarded.add(signature) + if use_warnmsg is None: + self._orig_show(message, category, filename, lineno, *args, + **kwargs) + else: + self._orig_showmsg(use_warnmsg) + + def __call__(self, func): + """ + Function decorator to apply certain suppressions to a whole + function. + """ + @wraps(func) + def new_func(*args, **kwargs): + with self: + return func(*args, **kwargs) + + return new_func + + +@contextlib.contextmanager +def _assert_no_gc_cycles_context(name=None): + __tracebackhide__ = True # Hide traceback for py.test + + # not meaningful to test if there is no refcounting + if not HAS_REFCOUNT: + yield + return + + assert_(gc.isenabled()) + gc.disable() + gc_debug = gc.get_debug() + try: + for i in range(100): + if gc.collect() == 0: + break + else: + raise RuntimeError( + "Unable to fully collect garbage - perhaps a __del__ method " + "is creating more reference cycles?") + + gc.set_debug(gc.DEBUG_SAVEALL) + yield + # gc.collect returns the number of unreachable objects in cycles that + # were found -- we are checking that no cycles were created in the context + n_objects_in_cycles = gc.collect() + objects_in_cycles = gc.garbage[:] + finally: + del gc.garbage[:] + gc.set_debug(gc_debug) + gc.enable() + + if n_objects_in_cycles: + name_str = f' when calling {name}' if name is not None else '' + raise AssertionError( + "Reference cycles were found{}: {} objects were collected, " + "of which {} are shown below:{}" + .format( + name_str, + n_objects_in_cycles, + len(objects_in_cycles), + ''.join( + "\n {} object with id={}:\n {}".format( + type(o).__name__, + id(o), + pprint.pformat(o).replace('\n', '\n ') + ) for o in objects_in_cycles + ) + ) + ) + + +def assert_no_gc_cycles(*args, **kwargs): + """ + Fail if the given callable produces any reference cycles. + + If called with all arguments omitted, may be used as a context manager:: + + with assert_no_gc_cycles(): + do_something() + + Parameters + ---------- + func : callable + The callable to test. + \\*args : Arguments + Arguments passed to `func`. + \\*\\*kwargs : Kwargs + Keyword arguments passed to `func`. + + Returns + ------- + Nothing. The result is deliberately discarded to ensure that all cycles + are found. + + """ + if not args: + return _assert_no_gc_cycles_context() + + func = args[0] + args = args[1:] + with _assert_no_gc_cycles_context(name=func.__name__): + func(*args, **kwargs) + + +def break_cycles(): + """ + Break reference cycles by calling gc.collect + Objects can call other objects' methods (for instance, another object's + __del__) inside their own __del__. On PyPy, the interpreter only runs + between calls to gc.collect, so multiple calls are needed to completely + release all cycles. + """ + + gc.collect() + if IS_PYPY: + # a few more, just to make sure all the finalizers are called + gc.collect() + gc.collect() + gc.collect() + gc.collect() + + +def requires_memory(free_bytes): + """Decorator to skip a test if not enough memory is available""" + import pytest + + def decorator(func): + @wraps(func) + def wrapper(*a, **kw): + msg = check_free_memory(free_bytes) + if msg is not None: + pytest.skip(msg) + + try: + return func(*a, **kw) + except MemoryError: + # Probably ran out of memory regardless: don't regard as failure + pytest.xfail("MemoryError raised") + + return wrapper + + return decorator + + +def check_free_memory(free_bytes): + """ + Check whether `free_bytes` amount of memory is currently free. + Returns: None if enough memory available, otherwise error message + """ + env_var = 'NPY_AVAILABLE_MEM' + env_value = os.environ.get(env_var) + if env_value is not None: + try: + mem_free = _parse_size(env_value) + except ValueError as exc: + raise ValueError(f'Invalid environment variable {env_var}: {exc}') + + msg = (f'{free_bytes / 1e9} GB memory required, but environment variable ' + f'NPY_AVAILABLE_MEM={env_value} set') + else: + mem_free = _get_mem_available() + + if mem_free is None: + msg = ("Could not determine available memory; set NPY_AVAILABLE_MEM " + "environment variable (e.g. NPY_AVAILABLE_MEM=16GB) to run " + "the test.") + mem_free = -1 + else: + free_bytes_gb = free_bytes / 1e9 + mem_free_gb = mem_free / 1e9 + msg = f'{free_bytes_gb} GB memory required, but {mem_free_gb} GB available' + + return msg if mem_free < free_bytes else None + + +def _parse_size(size_str): + """Convert memory size strings ('12 GB' etc.) to float""" + suffixes = {'': 1, 'b': 1, + 'k': 1000, 'm': 1000**2, 'g': 1000**3, 't': 1000**4, + 'kb': 1000, 'mb': 1000**2, 'gb': 1000**3, 'tb': 1000**4, + 'kib': 1024, 'mib': 1024**2, 'gib': 1024**3, 'tib': 1024**4} + + pipe_suffixes = "|".join(suffixes.keys()) + + size_re = re.compile(fr'^\s*(\d+|\d+\.\d+)\s*({pipe_suffixes})\s*$', re.I) + + m = size_re.match(size_str.lower()) + if not m or m.group(2) not in suffixes: + raise ValueError(f'value {size_str!r} not a valid size') + return int(float(m.group(1)) * suffixes[m.group(2)]) + + +def _get_mem_available(): + """Return available memory in bytes, or None if unknown.""" + try: + import psutil + return psutil.virtual_memory().available + except (ImportError, AttributeError): + pass + + if sys.platform.startswith('linux'): + info = {} + with open('/proc/meminfo') as f: + for line in f: + p = line.split() + info[p[0].strip(':').lower()] = int(p[1]) * 1024 + + if 'memavailable' in info: + # Linux >= 3.14 + return info['memavailable'] + else: + return info['memfree'] + info['cached'] + + return None + + +def _no_tracing(func): + """ + Decorator to temporarily turn off tracing for the duration of a test. + Needed in tests that check refcounting, otherwise the tracing itself + influences the refcounts + """ + if not hasattr(sys, 'gettrace'): + return func + else: + @wraps(func) + def wrapper(*args, **kwargs): + original_trace = sys.gettrace() + try: + sys.settrace(None) + return func(*args, **kwargs) + finally: + sys.settrace(original_trace) + return wrapper + + +def _get_glibc_version(): + try: + ver = os.confstr('CS_GNU_LIBC_VERSION').rsplit(' ')[1] + except Exception: + ver = '0.0' + + return ver + + +_glibcver = _get_glibc_version() +_glibc_older_than = lambda x: (_glibcver != '0.0' and _glibcver < x) + + +def run_threaded(func, max_workers=8, pass_count=False, + pass_barrier=False, outer_iterations=1, + prepare_args=None): + """Runs a function many times in parallel""" + for _ in range(outer_iterations): + with (concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) + as tpe): + if prepare_args is None: + args = [] + else: + args = prepare_args() + if pass_barrier: + barrier = threading.Barrier(max_workers) + args.append(barrier) + if pass_count: + all_args = [(func, i, *args) for i in range(max_workers)] + else: + all_args = [(func, *args) for i in range(max_workers)] + try: + futures = [] + for arg in all_args: + futures.append(tpe.submit(*arg)) + except RuntimeError as e: + import pytest + pytest.skip(f"Spawning {max_workers} threads failed with " + f"error {e!r} (likely due to resource limits on the " + "system running the tests)") + finally: + if len(futures) < max_workers and pass_barrier: + barrier.abort() + for f in futures: + f.result() diff --git a/python/numpy/testing/_private/utils.pyi b/python/numpy/testing/_private/utils.pyi new file mode 100644 index 000000000..43981a5de --- /dev/null +++ b/python/numpy/testing/_private/utils.pyi @@ -0,0 +1,499 @@ +import ast +import sys +import types +import unittest +import warnings +from collections.abc import Callable, Iterable, Sequence +from contextlib import _GeneratorContextManager +from pathlib import Path +from re import Pattern +from typing import ( + Any, + AnyStr, + ClassVar, + Final, + Generic, + NoReturn, + ParamSpec, + Self, + SupportsIndex, + TypeAlias, + TypeVarTuple, + overload, + type_check_only, +) +from typing import Literal as L +from unittest.case import SkipTest + +from _typeshed import ConvertibleToFloat, GenericPath, StrOrBytesPath, StrPath +from typing_extensions import TypeVar + +import numpy as np +from numpy._typing import ( + ArrayLike, + DTypeLike, + NDArray, + _ArrayLikeDT64_co, + _ArrayLikeNumber_co, + _ArrayLikeObject_co, + _ArrayLikeTD64_co, +) + +__all__ = [ # noqa: RUF022 + "IS_EDITABLE", + "IS_MUSL", + "IS_PYPY", + "IS_PYSTON", + "IS_WASM", + "HAS_LAPACK64", + "HAS_REFCOUNT", + "NOGIL_BUILD", + "assert_", + "assert_array_almost_equal_nulp", + "assert_raises_regex", + "assert_array_max_ulp", + "assert_warns", + "assert_no_warnings", + "assert_allclose", + "assert_equal", + "assert_almost_equal", + "assert_approx_equal", + "assert_array_equal", + "assert_array_less", + "assert_string_equal", + "assert_array_almost_equal", + "assert_raises", + "build_err_msg", + "decorate_methods", + "jiffies", + "memusage", + "print_assert_equal", + "rundocs", + "runstring", + "verbose", + "measure", + "IgnoreException", + "clear_and_catch_warnings", + "SkipTest", + "KnownFailureException", + "temppath", + "tempdir", + "suppress_warnings", + "assert_array_compare", + "assert_no_gc_cycles", + "break_cycles", + "check_support_sve", + "run_threaded", +] + +### + +_T = TypeVar("_T") +_Ts = TypeVarTuple("_Ts") +_Tss = ParamSpec("_Tss") +_ET = TypeVar("_ET", bound=BaseException, default=BaseException) +_FT = TypeVar("_FT", bound=Callable[..., Any]) +_W_co = TypeVar("_W_co", bound=_WarnLog | None, default=_WarnLog | None, covariant=True) +_T_or_bool = TypeVar("_T_or_bool", default=bool) + +_StrLike: TypeAlias = str | bytes +_RegexLike: TypeAlias = _StrLike | Pattern[Any] +_NumericArrayLike: TypeAlias = _ArrayLikeNumber_co | _ArrayLikeObject_co + +_ExceptionSpec: TypeAlias = type[_ET] | tuple[type[_ET], ...] +_WarningSpec: TypeAlias = type[Warning] +_WarnLog: TypeAlias = list[warnings.WarningMessage] +_ToModules: TypeAlias = Iterable[types.ModuleType] + +# Must return a bool or an ndarray/generic type that is supported by `np.logical_and.reduce` +_ComparisonFunc: TypeAlias = Callable[ + [NDArray[Any], NDArray[Any]], + bool | np.bool | np.number | NDArray[np.bool | np.number | np.object_], +] + +# Type-check only `clear_and_catch_warnings` subclasses for both values of the +# `record` parameter. Copied from the stdlib `warnings` stubs. +@type_check_only +class _clear_and_catch_warnings_with_records(clear_and_catch_warnings): + def __enter__(self) -> list[warnings.WarningMessage]: ... + +@type_check_only +class _clear_and_catch_warnings_without_records(clear_and_catch_warnings): + def __enter__(self) -> None: ... + +### + +verbose: int = 0 +NUMPY_ROOT: Final[Path] = ... +IS_INSTALLED: Final[bool] = ... +IS_EDITABLE: Final[bool] = ... +IS_MUSL: Final[bool] = ... +IS_PYPY: Final[bool] = ... +IS_PYSTON: Final[bool] = ... +IS_WASM: Final[bool] = ... +HAS_REFCOUNT: Final[bool] = ... +HAS_LAPACK64: Final[bool] = ... +NOGIL_BUILD: Final[bool] = ... + +class KnownFailureException(Exception): ... +class IgnoreException(Exception): ... + +# NOTE: `warnings.catch_warnings` is incorrectly defined as invariant in typeshed +class clear_and_catch_warnings(warnings.catch_warnings[_W_co], Generic[_W_co]): # type: ignore[type-var] # pyright: ignore[reportInvalidTypeArguments] + class_modules: ClassVar[tuple[types.ModuleType, ...]] = () + modules: Final[set[types.ModuleType]] + @overload # record: True + def __init__(self: clear_and_catch_warnings[_WarnLog], /, record: L[True], modules: _ToModules = ()) -> None: ... + @overload # record: False (default) + def __init__(self: clear_and_catch_warnings[None], /, record: L[False] = False, modules: _ToModules = ()) -> None: ... + @overload # record; bool + def __init__(self, /, record: bool, modules: _ToModules = ()) -> None: ... + +class suppress_warnings: + log: Final[_WarnLog] + def __init__(self, /, forwarding_rule: L["always", "module", "once", "location"] = "always") -> None: ... + def __enter__(self) -> Self: ... + def __exit__(self, cls: type[BaseException] | None, exc: BaseException | None, tb: types.TracebackType | None, /) -> None: ... + def __call__(self, /, func: _FT) -> _FT: ... + + # + def filter(self, /, category: type[Warning] = ..., message: str = "", module: types.ModuleType | None = None) -> None: ... + def record(self, /, category: type[Warning] = ..., message: str = "", module: types.ModuleType | None = None) -> _WarnLog: ... + +# Contrary to runtime we can't do `os.name` checks while type checking, +# only `sys.platform` checks +if sys.platform == "win32" or sys.platform == "cygwin": + def memusage(processName: str = ..., instance: int = ...) -> int: ... +elif sys.platform == "linux": + def memusage(_proc_pid_stat: StrOrBytesPath = ...) -> int | None: ... +else: + def memusage() -> NoReturn: ... + +if sys.platform == "linux": + def jiffies(_proc_pid_stat: StrOrBytesPath | None = None, _load_time: list[float] | None = None) -> int: ... +else: + def jiffies(_load_time: list[float] = []) -> int: ... + +# +def build_err_msg( + arrays: Iterable[object], + err_msg: object, + header: str = ..., + verbose: bool = ..., + names: Sequence[str] = ..., + precision: SupportsIndex | None = ..., +) -> str: ... + +# +def print_assert_equal(test_string: str, actual: object, desired: object) -> None: ... + +# +def assert_(val: object, msg: str | Callable[[], str] = "") -> None: ... + +# +def assert_equal( + actual: object, + desired: object, + err_msg: object = "", + verbose: bool = True, + *, + strict: bool = False, +) -> None: ... + +def assert_almost_equal( + actual: _NumericArrayLike, + desired: _NumericArrayLike, + decimal: int = 7, + err_msg: object = "", + verbose: bool = True, +) -> None: ... + +# +def assert_approx_equal( + actual: ConvertibleToFloat, + desired: ConvertibleToFloat, + significant: int = 7, + err_msg: object = "", + verbose: bool = True, +) -> None: ... + +# +def assert_array_compare( + comparison: _ComparisonFunc, + x: ArrayLike, + y: ArrayLike, + err_msg: object = "", + verbose: bool = True, + header: str = "", + precision: SupportsIndex = 6, + equal_nan: bool = True, + equal_inf: bool = True, + *, + strict: bool = False, + names: tuple[str, str] = ("ACTUAL", "DESIRED"), +) -> None: ... + +# +def assert_array_equal( + actual: object, + desired: object, + err_msg: object = "", + verbose: bool = True, + *, + strict: bool = False, +) -> None: ... + +# +def assert_array_almost_equal( + actual: _NumericArrayLike, + desired: _NumericArrayLike, + decimal: float = 6, + err_msg: object = "", + verbose: bool = True, +) -> None: ... + +@overload +def assert_array_less( + x: _ArrayLikeDT64_co, + y: _ArrayLikeDT64_co, + err_msg: object = "", + verbose: bool = True, + *, + strict: bool = False, +) -> None: ... +@overload +def assert_array_less( + x: _ArrayLikeTD64_co, + y: _ArrayLikeTD64_co, + err_msg: object = "", + verbose: bool = True, + *, + strict: bool = False, +) -> None: ... +@overload +def assert_array_less( + x: _NumericArrayLike, + y: _NumericArrayLike, + err_msg: object = "", + verbose: bool = True, + *, + strict: bool = False, +) -> None: ... + +# +def assert_string_equal(actual: str, desired: str) -> None: ... + +# +@overload +def assert_raises( + exception_class: _ExceptionSpec[_ET], + /, + *, + msg: str | None = None, +) -> unittest.case._AssertRaisesContext[_ET]: ... +@overload +def assert_raises( + exception_class: _ExceptionSpec, + callable: Callable[_Tss, Any], + /, + *args: _Tss.args, + **kwargs: _Tss.kwargs, +) -> None: ... + +# +@overload +def assert_raises_regex( + exception_class: _ExceptionSpec[_ET], + expected_regexp: _RegexLike, + *, + msg: str | None = None, +) -> unittest.case._AssertRaisesContext[_ET]: ... +@overload +def assert_raises_regex( + exception_class: _ExceptionSpec, + expected_regexp: _RegexLike, + callable: Callable[_Tss, Any], + *args: _Tss.args, + **kwargs: _Tss.kwargs, +) -> None: ... + +# +@overload +def assert_allclose( + actual: _ArrayLikeTD64_co, + desired: _ArrayLikeTD64_co, + rtol: float = 1e-7, + atol: float = 0, + equal_nan: bool = True, + err_msg: object = "", + verbose: bool = True, + *, + strict: bool = False, +) -> None: ... +@overload +def assert_allclose( + actual: _NumericArrayLike, + desired: _NumericArrayLike, + rtol: float = 1e-7, + atol: float = 0, + equal_nan: bool = True, + err_msg: object = "", + verbose: bool = True, + *, + strict: bool = False, +) -> None: ... + +# +def assert_array_almost_equal_nulp( + x: _ArrayLikeNumber_co, + y: _ArrayLikeNumber_co, + nulp: float = 1, +) -> None: ... + +# +def assert_array_max_ulp( + a: _ArrayLikeNumber_co, + b: _ArrayLikeNumber_co, + maxulp: float = 1, + dtype: DTypeLike | None = None, +) -> NDArray[Any]: ... + +# +@overload +def assert_warns(warning_class: _WarningSpec) -> _GeneratorContextManager[None]: ... +@overload +def assert_warns(warning_class: _WarningSpec, func: Callable[_Tss, _T], *args: _Tss.args, **kwargs: _Tss.kwargs) -> _T: ... + +# +@overload +def assert_no_warnings() -> _GeneratorContextManager[None]: ... +@overload +def assert_no_warnings(func: Callable[_Tss, _T], /, *args: _Tss.args, **kwargs: _Tss.kwargs) -> _T: ... + +# +@overload +def assert_no_gc_cycles() -> _GeneratorContextManager[None]: ... +@overload +def assert_no_gc_cycles(func: Callable[_Tss, Any], /, *args: _Tss.args, **kwargs: _Tss.kwargs) -> None: ... + +### + +# +@overload +def tempdir( + suffix: None = None, + prefix: None = None, + dir: None = None, +) -> _GeneratorContextManager[str]: ... +@overload +def tempdir( + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + *, + dir: GenericPath[AnyStr], +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def tempdir( + suffix: AnyStr | None = None, + *, + prefix: AnyStr, + dir: GenericPath[AnyStr] | None = None, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def tempdir( + suffix: AnyStr, + prefix: AnyStr | None = None, + dir: GenericPath[AnyStr] | None = None, +) -> _GeneratorContextManager[AnyStr]: ... + +# +@overload +def temppath( + suffix: None = None, + prefix: None = None, + dir: None = None, + text: bool = False, +) -> _GeneratorContextManager[str]: ... +@overload +def temppath( + suffix: AnyStr | None, + prefix: AnyStr | None, + dir: GenericPath[AnyStr], + text: bool = False, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def temppath( + suffix: AnyStr | None = None, + prefix: AnyStr | None = None, + *, + dir: GenericPath[AnyStr], + text: bool = False, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def temppath( + suffix: AnyStr | None, + prefix: AnyStr, + dir: GenericPath[AnyStr] | None = None, + text: bool = False, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def temppath( + suffix: AnyStr | None = None, + *, + prefix: AnyStr, + dir: GenericPath[AnyStr] | None = None, + text: bool = False, +) -> _GeneratorContextManager[AnyStr]: ... +@overload +def temppath( + suffix: AnyStr, + prefix: AnyStr | None = None, + dir: GenericPath[AnyStr] | None = None, + text: bool = False, +) -> _GeneratorContextManager[AnyStr]: ... + +# +def check_support_sve(__cache: list[_T_or_bool] = []) -> _T_or_bool: ... # noqa: PYI063 + +# +def decorate_methods( + cls: type, + decorator: Callable[[Callable[..., Any]], Any], + testmatch: _RegexLike | None = None, +) -> None: ... + +# +@overload +def run_threaded( + func: Callable[[], None], + max_workers: int = 8, + pass_count: bool = False, + pass_barrier: bool = False, + outer_iterations: int = 1, + prepare_args: None = None, +) -> None: ... +@overload +def run_threaded( + func: Callable[[*_Ts], None], + max_workers: int, + pass_count: bool, + pass_barrier: bool, + outer_iterations: int, + prepare_args: tuple[*_Ts], +) -> None: ... +@overload +def run_threaded( + func: Callable[[*_Ts], None], + max_workers: int = 8, + pass_count: bool = False, + pass_barrier: bool = False, + outer_iterations: int = 1, + *, + prepare_args: tuple[*_Ts], +) -> None: ... + +# +def runstring(astr: _StrLike | types.CodeType, dict: dict[str, Any] | None) -> Any: ... # noqa: ANN401 +def rundocs(filename: StrPath | None = None, raise_on_error: bool = True) -> None: ... +def measure(code_str: _StrLike | ast.AST, times: int = 1, label: str | None = None) -> float: ... +def break_cycles() -> None: ... diff --git a/python/numpy/testing/overrides.py b/python/numpy/testing/overrides.py new file mode 100644 index 000000000..61771c4c0 --- /dev/null +++ b/python/numpy/testing/overrides.py @@ -0,0 +1,84 @@ +"""Tools for testing implementations of __array_function__ and ufunc overrides + + +""" + +import numpy._core.umath as _umath +from numpy import ufunc as _ufunc +from numpy._core.overrides import ARRAY_FUNCTIONS as _array_functions + + +def get_overridable_numpy_ufuncs(): + """List all numpy ufuncs overridable via `__array_ufunc__` + + Parameters + ---------- + None + + Returns + ------- + set + A set containing all overridable ufuncs in the public numpy API. + """ + ufuncs = {obj for obj in _umath.__dict__.values() + if isinstance(obj, _ufunc)} + return ufuncs + + +def allows_array_ufunc_override(func): + """Determine if a function can be overridden via `__array_ufunc__` + + Parameters + ---------- + func : callable + Function that may be overridable via `__array_ufunc__` + + Returns + ------- + bool + `True` if `func` is overridable via `__array_ufunc__` and + `False` otherwise. + + Notes + ----- + This function is equivalent to ``isinstance(func, np.ufunc)`` and + will work correctly for ufuncs defined outside of Numpy. + + """ + return isinstance(func, _ufunc) + + +def get_overridable_numpy_array_functions(): + """List all numpy functions overridable via `__array_function__` + + Parameters + ---------- + None + + Returns + ------- + set + A set containing all functions in the public numpy API that are + overridable via `__array_function__`. + + """ + # 'import numpy' doesn't import recfunctions, so make sure it's imported + # so ufuncs defined there show up in the ufunc listing + from numpy.lib import recfunctions # noqa: F401 + return _array_functions.copy() + +def allows_array_function_override(func): + """Determine if a Numpy function can be overridden via `__array_function__` + + Parameters + ---------- + func : callable + Function that may be overridable via `__array_function__` + + Returns + ------- + bool + `True` if `func` is a function in the Numpy API that is + overridable via `__array_function__` and `False` otherwise. + """ + return func in _array_functions diff --git a/python/numpy/testing/overrides.pyi b/python/numpy/testing/overrides.pyi new file mode 100644 index 000000000..3fefc3f35 --- /dev/null +++ b/python/numpy/testing/overrides.pyi @@ -0,0 +1,11 @@ +from collections.abc import Callable, Hashable +from typing import Any + +from typing_extensions import TypeIs + +import numpy as np + +def get_overridable_numpy_ufuncs() -> set[np.ufunc]: ... +def get_overridable_numpy_array_functions() -> set[Callable[..., Any]]: ... +def allows_array_ufunc_override(func: object) -> TypeIs[np.ufunc]: ... +def allows_array_function_override(func: Hashable) -> bool: ... diff --git a/python/numpy/testing/print_coercion_tables.py b/python/numpy/testing/print_coercion_tables.py new file mode 100644 index 000000000..89f0de393 --- /dev/null +++ b/python/numpy/testing/print_coercion_tables.py @@ -0,0 +1,207 @@ +#!/usr/bin/env python3 +"""Prints type-coercion tables for the built-in NumPy types + +""" +from collections import namedtuple + +import numpy as np +from numpy._core.numerictypes import obj2sctype + + +# Generic object that can be added, but doesn't do anything else +class GenericObject: + def __init__(self, v): + self.v = v + + def __add__(self, other): + return self + + def __radd__(self, other): + return self + + dtype = np.dtype('O') + +def print_cancast_table(ntypes): + print('X', end=' ') + for char in ntypes: + print(char, end=' ') + print() + for row in ntypes: + print(row, end=' ') + for col in ntypes: + if np.can_cast(row, col, "equiv"): + cast = "#" + elif np.can_cast(row, col, "safe"): + cast = "=" + elif np.can_cast(row, col, "same_kind"): + cast = "~" + elif np.can_cast(row, col, "unsafe"): + cast = "." + else: + cast = " " + print(cast, end=' ') + print() + +def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray, + use_promote_types=False): + print('+', end=' ') + for char in ntypes: + print(char, end=' ') + print() + for row in ntypes: + if row == 'O': + rowtype = GenericObject + else: + rowtype = obj2sctype(row) + + print(row, end=' ') + for col in ntypes: + if col == 'O': + coltype = GenericObject + else: + coltype = obj2sctype(col) + try: + if firstarray: + rowvalue = np.array([rowtype(inputfirstvalue)], dtype=rowtype) + else: + rowvalue = rowtype(inputfirstvalue) + colvalue = coltype(inputsecondvalue) + if use_promote_types: + char = np.promote_types(rowvalue.dtype, colvalue.dtype).char + else: + value = np.add(rowvalue, colvalue) + if isinstance(value, np.ndarray): + char = value.dtype.char + else: + char = np.dtype(type(value)).char + except ValueError: + char = '!' + except OverflowError: + char = '@' + except TypeError: + char = '#' + print(char, end=' ') + print() + + +def print_new_cast_table(*, can_cast=True, legacy=False, flags=False): + """Prints new casts, the values given are default "can-cast" values, not + actual ones. + """ + from numpy._core._multiarray_tests import get_all_cast_information + + cast_table = { + -1: " ", + 0: "#", # No cast (classify as equivalent here) + 1: "#", # equivalent casting + 2: "=", # safe casting + 3: "~", # same-kind casting + 4: ".", # unsafe casting + } + flags_table = { + 0: "▗", 7: "█", + 1: "▚", 2: "▐", 4: "▄", + 3: "▜", 5: "▙", + 6: "▟", + } + + cast_info = namedtuple("cast_info", ["can_cast", "legacy", "flags"]) + no_cast_info = cast_info(" ", " ", " ") + + casts = get_all_cast_information() + table = {} + dtypes = set() + for cast in casts: + dtypes.add(cast["from"]) + dtypes.add(cast["to"]) + + if cast["from"] not in table: + table[cast["from"]] = {} + to_dict = table[cast["from"]] + + can_cast = cast_table[cast["casting"]] + legacy = "L" if cast["legacy"] else "." + flags = 0 + if cast["requires_pyapi"]: + flags |= 1 + if cast["supports_unaligned"]: + flags |= 2 + if cast["no_floatingpoint_errors"]: + flags |= 4 + + flags = flags_table[flags] + to_dict[cast["to"]] = cast_info(can_cast=can_cast, legacy=legacy, flags=flags) + + # The np.dtype(x.type) is a bit strange, because dtype classes do + # not expose much yet. + types = np.typecodes["All"] + + def sorter(x): + # This is a bit weird hack, to get a table as close as possible to + # the one printing all typecodes (but expecting user-dtypes). + dtype = np.dtype(x.type) + try: + indx = types.index(dtype.char) + except ValueError: + indx = np.inf + return (indx, dtype.char) + + dtypes = sorted(dtypes, key=sorter) + + def print_table(field="can_cast"): + print('X', end=' ') + for dt in dtypes: + print(np.dtype(dt.type).char, end=' ') + print() + for from_dt in dtypes: + print(np.dtype(from_dt.type).char, end=' ') + row = table.get(from_dt, {}) + for to_dt in dtypes: + print(getattr(row.get(to_dt, no_cast_info), field), end=' ') + print() + + if can_cast: + # Print the actual table: + print() + print("Casting: # is equivalent, = is safe, ~ is same-kind, and . is unsafe") + print() + print_table("can_cast") + + if legacy: + print() + print("L denotes a legacy cast . a non-legacy one.") + print() + print_table("legacy") + + if flags: + print() + print(f"{flags_table[0]}: no flags, " + f"{flags_table[1]}: PyAPI, " + f"{flags_table[2]}: supports unaligned, " + f"{flags_table[4]}: no-float-errors") + print() + print_table("flags") + + +if __name__ == '__main__': + print("can cast") + print_cancast_table(np.typecodes['All']) + print() + print("In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'") + print() + print("scalar + scalar") + print_coercion_table(np.typecodes['All'], 0, 0, False) + print() + print("scalar + neg scalar") + print_coercion_table(np.typecodes['All'], 0, -1, False) + print() + print("array + scalar") + print_coercion_table(np.typecodes['All'], 0, 0, True) + print() + print("array + neg scalar") + print_coercion_table(np.typecodes['All'], 0, -1, True) + print() + print("promote_types") + print_coercion_table(np.typecodes['All'], 0, 0, False, True) + print("New casting type promotion:") + print_new_cast_table(can_cast=True, legacy=True, flags=True) diff --git a/python/numpy/testing/print_coercion_tables.pyi b/python/numpy/testing/print_coercion_tables.pyi new file mode 100644 index 000000000..c859305f2 --- /dev/null +++ b/python/numpy/testing/print_coercion_tables.pyi @@ -0,0 +1,27 @@ +from collections.abc import Iterable +from typing import ClassVar, Generic, Self + +from typing_extensions import TypeVar + +import numpy as np + +_VT_co = TypeVar("_VT_co", default=object, covariant=True) + +# undocumented +class GenericObject(Generic[_VT_co]): + dtype: ClassVar[np.dtype[np.object_]] = ... + v: _VT_co + + def __init__(self, /, v: _VT_co) -> None: ... + def __add__(self, other: object, /) -> Self: ... + def __radd__(self, other: object, /) -> Self: ... + +def print_cancast_table(ntypes: Iterable[str]) -> None: ... +def print_coercion_table( + ntypes: Iterable[str], + inputfirstvalue: int, + inputsecondvalue: int, + firstarray: bool, + use_promote_types: bool = False, +) -> None: ... +def print_new_cast_table(*, can_cast: bool = True, legacy: bool = False, flags: bool = False) -> None: ... diff --git a/python/numpy/testing/tests/__init__.py b/python/numpy/testing/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/python/numpy/testing/tests/__pycache__/__init__.cpython-312.pyc b/python/numpy/testing/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..bd253734a Binary files /dev/null and b/python/numpy/testing/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/testing/tests/__pycache__/test_utils.cpython-312.pyc b/python/numpy/testing/tests/__pycache__/test_utils.cpython-312.pyc new file mode 100644 index 000000000..6b5192d0f Binary files /dev/null and b/python/numpy/testing/tests/__pycache__/test_utils.cpython-312.pyc differ diff --git a/python/numpy/testing/tests/test_utils.py b/python/numpy/testing/tests/test_utils.py new file mode 100644 index 000000000..fcf20091c --- /dev/null +++ b/python/numpy/testing/tests/test_utils.py @@ -0,0 +1,1917 @@ +import itertools +import os +import re +import sys +import warnings +import weakref + +import pytest + +import numpy as np +import numpy._core._multiarray_umath as ncu +from numpy.testing import ( + HAS_REFCOUNT, + assert_, + assert_allclose, + assert_almost_equal, + assert_approx_equal, + assert_array_almost_equal, + assert_array_almost_equal_nulp, + assert_array_equal, + assert_array_less, + assert_array_max_ulp, + assert_equal, + assert_no_gc_cycles, + assert_no_warnings, + assert_raises, + assert_string_equal, + assert_warns, + build_err_msg, + clear_and_catch_warnings, + suppress_warnings, + tempdir, + temppath, +) + + +class _GenericTest: + + def _test_equal(self, a, b): + self._assert_func(a, b) + + def _test_not_equal(self, a, b): + with assert_raises(AssertionError): + self._assert_func(a, b) + + def test_array_rank1_eq(self): + """Test two equal array of rank 1 are found equal.""" + a = np.array([1, 2]) + b = np.array([1, 2]) + + self._test_equal(a, b) + + def test_array_rank1_noteq(self): + """Test two different array of rank 1 are found not equal.""" + a = np.array([1, 2]) + b = np.array([2, 2]) + + self._test_not_equal(a, b) + + def test_array_rank2_eq(self): + """Test two equal array of rank 2 are found equal.""" + a = np.array([[1, 2], [3, 4]]) + b = np.array([[1, 2], [3, 4]]) + + self._test_equal(a, b) + + def test_array_diffshape(self): + """Test two arrays with different shapes are found not equal.""" + a = np.array([1, 2]) + b = np.array([[1, 2], [1, 2]]) + + self._test_not_equal(a, b) + + def test_objarray(self): + """Test object arrays.""" + a = np.array([1, 1], dtype=object) + self._test_equal(a, 1) + + def test_array_likes(self): + self._test_equal([1, 2, 3], (1, 2, 3)) + + +class TestArrayEqual(_GenericTest): + + def setup_method(self): + self._assert_func = assert_array_equal + + def test_generic_rank1(self): + """Test rank 1 array for all dtypes.""" + def foo(t): + a = np.empty(2, t) + a.fill(1) + b = a.copy() + c = a.copy() + c.fill(0) + self._test_equal(a, b) + self._test_not_equal(c, b) + + # Test numeric types and object + for t in '?bhilqpBHILQPfdgFDG': + foo(t) + + # Test strings + for t in ['S1', 'U1']: + foo(t) + + def test_0_ndim_array(self): + x = np.array(473963742225900817127911193656584771) + y = np.array(18535119325151578301457182298393896) + + with pytest.raises(AssertionError) as exc_info: + self._assert_func(x, y) + msg = str(exc_info.value) + assert_('Mismatched elements: 1 / 1 (100%)\n' + in msg) + + y = x + self._assert_func(x, y) + + x = np.array(4395065348745.5643764887869876) + y = np.array(0) + expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Max absolute difference among violations: ' + '4.39506535e+12\n' + 'Max relative difference among violations: inf\n') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + x = y + self._assert_func(x, y) + + def test_generic_rank3(self): + """Test rank 3 array for all dtypes.""" + def foo(t): + a = np.empty((4, 2, 3), t) + a.fill(1) + b = a.copy() + c = a.copy() + c.fill(0) + self._test_equal(a, b) + self._test_not_equal(c, b) + + # Test numeric types and object + for t in '?bhilqpBHILQPfdgFDG': + foo(t) + + # Test strings + for t in ['S1', 'U1']: + foo(t) + + def test_nan_array(self): + """Test arrays with nan values in them.""" + a = np.array([1, 2, np.nan]) + b = np.array([1, 2, np.nan]) + + self._test_equal(a, b) + + c = np.array([1, 2, 3]) + self._test_not_equal(c, b) + + def test_string_arrays(self): + """Test two arrays with different shapes are found not equal.""" + a = np.array(['floupi', 'floupa']) + b = np.array(['floupi', 'floupa']) + + self._test_equal(a, b) + + c = np.array(['floupipi', 'floupa']) + + self._test_not_equal(c, b) + + def test_recarrays(self): + """Test record arrays.""" + a = np.empty(2, [('floupi', float), ('floupa', float)]) + a['floupi'] = [1, 2] + a['floupa'] = [1, 2] + b = a.copy() + + self._test_equal(a, b) + + c = np.empty(2, [('floupipi', float), + ('floupi', float), ('floupa', float)]) + c['floupipi'] = a['floupi'].copy() + c['floupa'] = a['floupa'].copy() + + with pytest.raises(TypeError): + self._test_not_equal(c, b) + + def test_masked_nan_inf(self): + # Regression test for gh-11121 + a = np.ma.MaskedArray([3., 4., 6.5], mask=[False, True, False]) + b = np.array([3., np.nan, 6.5]) + self._test_equal(a, b) + self._test_equal(b, a) + a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, False, False]) + b = np.array([np.inf, 4., 6.5]) + self._test_equal(a, b) + self._test_equal(b, a) + + def test_subclass_that_overrides_eq(self): + # While we cannot guarantee testing functions will always work for + # subclasses, the tests should ideally rely only on subclasses having + # comparison operators, not on them being able to store booleans + # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. + class MyArray(np.ndarray): + def __eq__(self, other): + return bool(np.equal(self, other).all()) + + def __ne__(self, other): + return not self == other + + a = np.array([1., 2.]).view(MyArray) + b = np.array([2., 3.]).view(MyArray) + assert_(type(a == a), bool) + assert_(a == a) + assert_(a != b) + self._test_equal(a, a) + self._test_not_equal(a, b) + self._test_not_equal(b, a) + + expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Max absolute difference among violations: 1.\n' + 'Max relative difference among violations: 0.5') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._test_equal(a, b) + + c = np.array([0., 2.9]).view(MyArray) + expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Max absolute difference among violations: 2.\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._test_equal(b, c) + + def test_subclass_that_does_not_implement_npall(self): + class MyArray(np.ndarray): + def __array_function__(self, *args, **kwargs): + return NotImplemented + + a = np.array([1., 2.]).view(MyArray) + b = np.array([2., 3.]).view(MyArray) + with assert_raises(TypeError): + np.all(a) + self._test_equal(a, a) + self._test_not_equal(a, b) + self._test_not_equal(b, a) + + def test_suppress_overflow_warnings(self): + # Based on issue #18992 + with pytest.raises(AssertionError): + with np.errstate(all="raise"): + np.testing.assert_array_equal( + np.array([1, 2, 3], np.float32), + np.array([1, 1e-40, 3], np.float32)) + + def test_array_vs_scalar_is_equal(self): + """Test comparing an array with a scalar when all values are equal.""" + a = np.array([1., 1., 1.]) + b = 1. + + self._test_equal(a, b) + + def test_array_vs_array_not_equal(self): + """Test comparing an array with a scalar when not all values equal.""" + a = np.array([34986, 545676, 439655, 563766]) + b = np.array([34986, 545676, 439655, 0]) + + expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Max absolute difference among violations: 563766\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(a, b) + + a = np.array([34986, 545676, 439655.2, 563766]) + expected_msg = ('Mismatched elements: 2 / 4 (50%)\n' + 'Max absolute difference among violations: ' + '563766.\n' + 'Max relative difference among violations: ' + '4.54902139e-07') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(a, b) + + def test_array_vs_scalar_strict(self): + """Test comparing an array with a scalar with strict option.""" + a = np.array([1., 1., 1.]) + b = 1. + + with pytest.raises(AssertionError): + self._assert_func(a, b, strict=True) + + def test_array_vs_array_strict(self): + """Test comparing two arrays with strict option.""" + a = np.array([1., 1., 1.]) + b = np.array([1., 1., 1.]) + + self._assert_func(a, b, strict=True) + + def test_array_vs_float_array_strict(self): + """Test comparing two arrays with strict option.""" + a = np.array([1, 1, 1]) + b = np.array([1., 1., 1.]) + + with pytest.raises(AssertionError): + self._assert_func(a, b, strict=True) + + +class TestBuildErrorMessage: + + def test_build_err_msg_defaults(self): + x = np.array([1.00001, 2.00002, 3.00003]) + y = np.array([1.00002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg) + b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([' + '1.00001, 2.00002, 3.00003])\n DESIRED: array([1.00002, ' + '2.00003, 3.00004])') + assert_equal(a, b) + + def test_build_err_msg_no_verbose(self): + x = np.array([1.00001, 2.00002, 3.00003]) + y = np.array([1.00002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg, verbose=False) + b = '\nItems are not equal: There is a mismatch' + assert_equal(a, b) + + def test_build_err_msg_custom_names(self): + x = np.array([1.00001, 2.00002, 3.00003]) + y = np.array([1.00002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg, names=('FOO', 'BAR')) + b = ('\nItems are not equal: There is a mismatch\n FOO: array([' + '1.00001, 2.00002, 3.00003])\n BAR: array([1.00002, 2.00003, ' + '3.00004])') + assert_equal(a, b) + + def test_build_err_msg_custom_precision(self): + x = np.array([1.000000001, 2.00002, 3.00003]) + y = np.array([1.000000002, 2.00003, 3.00004]) + err_msg = 'There is a mismatch' + + a = build_err_msg([x, y], err_msg, precision=10) + b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array([' + '1.000000001, 2.00002 , 3.00003 ])\n DESIRED: array([' + '1.000000002, 2.00003 , 3.00004 ])') + assert_equal(a, b) + + +class TestEqual(TestArrayEqual): + + def setup_method(self): + self._assert_func = assert_equal + + def test_nan_items(self): + self._assert_func(np.nan, np.nan) + self._assert_func([np.nan], [np.nan]) + self._test_not_equal(np.nan, [np.nan]) + self._test_not_equal(np.nan, 1) + + def test_inf_items(self): + self._assert_func(np.inf, np.inf) + self._assert_func([np.inf], [np.inf]) + self._test_not_equal(np.inf, [np.inf]) + + def test_datetime(self): + self._test_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-01", "s") + ) + self._test_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-01", "m") + ) + + # gh-10081 + self._test_not_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-02", "s") + ) + self._test_not_equal( + np.datetime64("2017-01-01", "s"), + np.datetime64("2017-01-02", "m") + ) + + def test_nat_items(self): + # not a datetime + nadt_no_unit = np.datetime64("NaT") + nadt_s = np.datetime64("NaT", "s") + nadt_d = np.datetime64("NaT", "ns") + # not a timedelta + natd_no_unit = np.timedelta64("NaT") + natd_s = np.timedelta64("NaT", "s") + natd_d = np.timedelta64("NaT", "ns") + + dts = [nadt_no_unit, nadt_s, nadt_d] + tds = [natd_no_unit, natd_s, natd_d] + for a, b in itertools.product(dts, dts): + self._assert_func(a, b) + self._assert_func([a], [b]) + self._test_not_equal([a], b) + + for a, b in itertools.product(tds, tds): + self._assert_func(a, b) + self._assert_func([a], [b]) + self._test_not_equal([a], b) + + for a, b in itertools.product(tds, dts): + self._test_not_equal(a, b) + self._test_not_equal(a, [b]) + self._test_not_equal([a], [b]) + self._test_not_equal([a], np.datetime64("2017-01-01", "s")) + self._test_not_equal([b], np.datetime64("2017-01-01", "s")) + self._test_not_equal([a], np.timedelta64(123, "s")) + self._test_not_equal([b], np.timedelta64(123, "s")) + + def test_non_numeric(self): + self._assert_func('ab', 'ab') + self._test_not_equal('ab', 'abb') + + def test_complex_item(self): + self._assert_func(complex(1, 2), complex(1, 2)) + self._assert_func(complex(1, np.nan), complex(1, np.nan)) + self._test_not_equal(complex(1, np.nan), complex(1, 2)) + self._test_not_equal(complex(np.nan, 1), complex(1, np.nan)) + self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2)) + + def test_negative_zero(self): + self._test_not_equal(ncu.PZERO, ncu.NZERO) + + def test_complex(self): + x = np.array([complex(1, 2), complex(1, np.nan)]) + y = np.array([complex(1, 2), complex(1, 2)]) + self._assert_func(x, x) + self._test_not_equal(x, y) + + def test_object(self): + # gh-12942 + import datetime + a = np.array([datetime.datetime(2000, 1, 1), + datetime.datetime(2000, 1, 2)]) + self._test_not_equal(a, a[::-1]) + + +class TestArrayAlmostEqual(_GenericTest): + + def setup_method(self): + self._assert_func = assert_array_almost_equal + + def test_closeness(self): + # Note that in the course of time we ended up with + # `abs(x - y) < 1.5 * 10**(-decimal)` + # instead of the previously documented + # `abs(x - y) < 0.5 * 10**(-decimal)` + # so this check serves to preserve the wrongness. + + # test scalars + expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Max absolute difference among violations: 1.5\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(1.5, 0.0, decimal=0) + + # test arrays + self._assert_func([1.499999], [0.0], decimal=0) + + expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Max absolute difference among violations: 1.5\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func([1.5], [0.0], decimal=0) + + a = [1.4999999, 0.00003] + b = [1.49999991, 0] + expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Max absolute difference among violations: 3.e-05\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(a, b, decimal=7) + + expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Max absolute difference among violations: 3.e-05\n' + 'Max relative difference among violations: 1.') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(b, a, decimal=7) + + def test_simple(self): + x = np.array([1234.2222]) + y = np.array([1234.2223]) + + self._assert_func(x, y, decimal=3) + self._assert_func(x, y, decimal=4) + + expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Max absolute difference among violations: ' + '1.e-04\n' + 'Max relative difference among violations: ' + '8.10226812e-08') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y, decimal=5) + + def test_array_vs_scalar(self): + a = [5498.42354, 849.54345, 0.00] + b = 5498.42354 + expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n' + 'Max absolute difference among violations: ' + '5498.42354\n' + 'Max relative difference among violations: 1.') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(a, b, decimal=9) + + expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n' + 'Max absolute difference among violations: ' + '5498.42354\n' + 'Max relative difference among violations: 5.4722099') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(b, a, decimal=9) + + a = [5498.42354, 0.00] + expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Max absolute difference among violations: ' + '5498.42354\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(b, a, decimal=7) + + b = 0 + expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Max absolute difference among violations: ' + '5498.42354\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(a, b, decimal=7) + + def test_nan(self): + anan = np.array([np.nan]) + aone = np.array([1]) + ainf = np.array([np.inf]) + self._assert_func(anan, anan) + assert_raises(AssertionError, + lambda: self._assert_func(anan, aone)) + assert_raises(AssertionError, + lambda: self._assert_func(anan, ainf)) + assert_raises(AssertionError, + lambda: self._assert_func(ainf, anan)) + + def test_inf(self): + a = np.array([[1., 2.], [3., 4.]]) + b = a.copy() + a[0, 0] = np.inf + assert_raises(AssertionError, + lambda: self._assert_func(a, b)) + b[0, 0] = -np.inf + assert_raises(AssertionError, + lambda: self._assert_func(a, b)) + + def test_subclass(self): + a = np.array([[1., 2.], [3., 4.]]) + b = np.ma.masked_array([[1., 2.], [0., 4.]], + [[False, False], [True, False]]) + self._assert_func(a, b) + self._assert_func(b, a) + self._assert_func(b, b) + + # Test fully masked as well (see gh-11123). + a = np.ma.MaskedArray(3.5, mask=True) + b = np.array([3., 4., 6.5]) + self._test_equal(a, b) + self._test_equal(b, a) + a = np.ma.masked + b = np.array([3., 4., 6.5]) + self._test_equal(a, b) + self._test_equal(b, a) + a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True]) + b = np.array([1., 2., 3.]) + self._test_equal(a, b) + self._test_equal(b, a) + a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True]) + b = np.array(1.) + self._test_equal(a, b) + self._test_equal(b, a) + + def test_subclass_2(self): + # While we cannot guarantee testing functions will always work for + # subclasses, the tests should ideally rely only on subclasses having + # comparison operators, not on them being able to store booleans + # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. + class MyArray(np.ndarray): + def __eq__(self, other): + return super().__eq__(other).view(np.ndarray) + + def __lt__(self, other): + return super().__lt__(other).view(np.ndarray) + + def all(self, *args, **kwargs): + return all(self) + + a = np.array([1., 2.]).view(MyArray) + self._assert_func(a, a) + + z = np.array([True, True]).view(MyArray) + all(z) + b = np.array([1., 202]).view(MyArray) + expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Max absolute difference among violations: 200.\n' + 'Max relative difference among violations: 0.99009') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(a, b) + + def test_subclass_that_cannot_be_bool(self): + # While we cannot guarantee testing functions will always work for + # subclasses, the tests should ideally rely only on subclasses having + # comparison operators, not on them being able to store booleans + # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. + class MyArray(np.ndarray): + def __eq__(self, other): + return super().__eq__(other).view(np.ndarray) + + def __lt__(self, other): + return super().__lt__(other).view(np.ndarray) + + def all(self, *args, **kwargs): + raise NotImplementedError + + a = np.array([1., 2.]).view(MyArray) + self._assert_func(a, a) + + +class TestAlmostEqual(_GenericTest): + + def setup_method(self): + self._assert_func = assert_almost_equal + + def test_closeness(self): + # Note that in the course of time we ended up with + # `abs(x - y) < 1.5 * 10**(-decimal)` + # instead of the previously documented + # `abs(x - y) < 0.5 * 10**(-decimal)` + # so this check serves to preserve the wrongness. + + # test scalars + self._assert_func(1.499999, 0.0, decimal=0) + assert_raises(AssertionError, + lambda: self._assert_func(1.5, 0.0, decimal=0)) + + # test arrays + self._assert_func([1.499999], [0.0], decimal=0) + assert_raises(AssertionError, + lambda: self._assert_func([1.5], [0.0], decimal=0)) + + def test_nan_item(self): + self._assert_func(np.nan, np.nan) + assert_raises(AssertionError, + lambda: self._assert_func(np.nan, 1)) + assert_raises(AssertionError, + lambda: self._assert_func(np.nan, np.inf)) + assert_raises(AssertionError, + lambda: self._assert_func(np.inf, np.nan)) + + def test_inf_item(self): + self._assert_func(np.inf, np.inf) + self._assert_func(-np.inf, -np.inf) + assert_raises(AssertionError, + lambda: self._assert_func(np.inf, 1)) + assert_raises(AssertionError, + lambda: self._assert_func(-np.inf, np.inf)) + + def test_simple_item(self): + self._test_not_equal(1, 2) + + def test_complex_item(self): + self._assert_func(complex(1, 2), complex(1, 2)) + self._assert_func(complex(1, np.nan), complex(1, np.nan)) + self._assert_func(complex(np.inf, np.nan), complex(np.inf, np.nan)) + self._test_not_equal(complex(1, np.nan), complex(1, 2)) + self._test_not_equal(complex(np.nan, 1), complex(1, np.nan)) + self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2)) + + def test_complex(self): + x = np.array([complex(1, 2), complex(1, np.nan)]) + z = np.array([complex(1, 2), complex(np.nan, 1)]) + y = np.array([complex(1, 2), complex(1, 2)]) + self._assert_func(x, x) + self._test_not_equal(x, y) + self._test_not_equal(x, z) + + def test_error_message(self): + """Check the message is formatted correctly for the decimal value. + Also check the message when input includes inf or nan (gh12200)""" + x = np.array([1.00000000001, 2.00000000002, 3.00003]) + y = np.array([1.00000000002, 2.00000000003, 3.00004]) + + # Test with a different amount of decimal digits + expected_msg = ('Mismatched elements: 3 / 3 (100%)\n' + 'Max absolute difference among violations: 1.e-05\n' + 'Max relative difference among violations: ' + '3.33328889e-06\n' + ' ACTUAL: array([1.00000000001, ' + '2.00000000002, ' + '3.00003 ])\n' + ' DESIRED: array([1.00000000002, 2.00000000003, ' + '3.00004 ])') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y, decimal=12) + + # With the default value of decimal digits, only the 3rd element + # differs. Note that we only check for the formatting of the arrays + # themselves. + expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Max absolute difference among violations: 1.e-05\n' + 'Max relative difference among violations: ' + '3.33328889e-06\n' + ' ACTUAL: array([1. , 2. , 3.00003])\n' + ' DESIRED: array([1. , 2. , 3.00004])') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + # Check the error message when input includes inf + x = np.array([np.inf, 0]) + y = np.array([np.inf, 1]) + expected_msg = ('Mismatched elements: 1 / 2 (50%)\n' + 'Max absolute difference among violations: 1.\n' + 'Max relative difference among violations: 1.\n' + ' ACTUAL: array([inf, 0.])\n' + ' DESIRED: array([inf, 1.])') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + # Check the error message when dividing by zero + x = np.array([1, 2]) + y = np.array([0, 0]) + expected_msg = ('Mismatched elements: 2 / 2 (100%)\n' + 'Max absolute difference among violations: 2\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + def test_error_message_2(self): + """Check the message is formatted correctly """ + """when either x or y is a scalar.""" + x = 2 + y = np.ones(20) + expected_msg = ('Mismatched elements: 20 / 20 (100%)\n' + 'Max absolute difference among violations: 1.\n' + 'Max relative difference among violations: 1.') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + y = 2 + x = np.ones(20) + expected_msg = ('Mismatched elements: 20 / 20 (100%)\n' + 'Max absolute difference among violations: 1.\n' + 'Max relative difference among violations: 0.5') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + def test_subclass_that_cannot_be_bool(self): + # While we cannot guarantee testing functions will always work for + # subclasses, the tests should ideally rely only on subclasses having + # comparison operators, not on them being able to store booleans + # (which, e.g., astropy Quantity cannot usefully do). See gh-8452. + class MyArray(np.ndarray): + def __eq__(self, other): + return super().__eq__(other).view(np.ndarray) + + def __lt__(self, other): + return super().__lt__(other).view(np.ndarray) + + def all(self, *args, **kwargs): + raise NotImplementedError + + a = np.array([1., 2.]).view(MyArray) + self._assert_func(a, a) + + +class TestApproxEqual: + + def setup_method(self): + self._assert_func = assert_approx_equal + + def test_simple_0d_arrays(self): + x = np.array(1234.22) + y = np.array(1234.23) + + self._assert_func(x, y, significant=5) + self._assert_func(x, y, significant=6) + assert_raises(AssertionError, + lambda: self._assert_func(x, y, significant=7)) + + def test_simple_items(self): + x = 1234.22 + y = 1234.23 + + self._assert_func(x, y, significant=4) + self._assert_func(x, y, significant=5) + self._assert_func(x, y, significant=6) + assert_raises(AssertionError, + lambda: self._assert_func(x, y, significant=7)) + + def test_nan_array(self): + anan = np.array(np.nan) + aone = np.array(1) + ainf = np.array(np.inf) + self._assert_func(anan, anan) + assert_raises(AssertionError, lambda: self._assert_func(anan, aone)) + assert_raises(AssertionError, lambda: self._assert_func(anan, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, anan)) + + def test_nan_items(self): + anan = np.array(np.nan) + aone = np.array(1) + ainf = np.array(np.inf) + self._assert_func(anan, anan) + assert_raises(AssertionError, lambda: self._assert_func(anan, aone)) + assert_raises(AssertionError, lambda: self._assert_func(anan, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, anan)) + + +class TestArrayAssertLess: + + def setup_method(self): + self._assert_func = assert_array_less + + def test_simple_arrays(self): + x = np.array([1.1, 2.2]) + y = np.array([1.2, 2.3]) + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y = np.array([1.0, 2.3]) + + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + a = np.array([1, 3, 6, 20]) + b = np.array([2, 4, 6, 8]) + + expected_msg = ('Mismatched elements: 2 / 4 (50%)\n' + 'Max absolute difference among violations: 12\n' + 'Max relative difference among violations: 1.5') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(a, b) + + def test_rank2(self): + x = np.array([[1.1, 2.2], [3.3, 4.4]]) + y = np.array([[1.2, 2.3], [3.4, 4.5]]) + + self._assert_func(x, y) + expected_msg = ('Mismatched elements: 4 / 4 (100%)\n' + 'Max absolute difference among violations: 0.1\n' + 'Max relative difference among violations: 0.09090909') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(y, x) + + y = np.array([[1.0, 2.3], [3.4, 4.5]]) + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + def test_rank3(self): + x = np.ones(shape=(2, 2, 2)) + y = np.ones(shape=(2, 2, 2)) + 1 + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y[0, 0, 0] = 0 + expected_msg = ('Mismatched elements: 1 / 8 (12.5%)\n' + 'Max absolute difference among violations: 1.\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + def test_simple_items(self): + x = 1.1 + y = 2.2 + + self._assert_func(x, y) + expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Max absolute difference among violations: 1.1\n' + 'Max relative difference among violations: 1.') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(y, x) + + y = np.array([2.2, 3.3]) + + self._assert_func(x, y) + assert_raises(AssertionError, lambda: self._assert_func(y, x)) + + y = np.array([1.0, 3.3]) + + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + + def test_simple_items_and_array(self): + x = np.array([[621.345454, 390.5436, 43.54657, 626.4535], + [54.54, 627.3399, 13., 405.5435], + [543.545, 8.34, 91.543, 333.3]]) + y = 627.34 + self._assert_func(x, y) + + y = 8.339999 + self._assert_func(y, x) + + x = np.array([[3.4536, 2390.5436, 435.54657, 324525.4535], + [5449.54, 999090.54, 130303.54, 405.5435], + [543.545, 8.34, 91.543, 999090.53999]]) + y = 999090.54 + + expected_msg = ('Mismatched elements: 1 / 12 (8.33%)\n' + 'Max absolute difference among violations: 0.\n' + 'Max relative difference among violations: 0.') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + expected_msg = ('Mismatched elements: 12 / 12 (100%)\n' + 'Max absolute difference among violations: ' + '999087.0864\n' + 'Max relative difference among violations: ' + '289288.5934676') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(y, x) + + def test_zeroes(self): + x = np.array([546456., 0, 15.455]) + y = np.array(87654.) + + expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Max absolute difference among violations: 458802.\n' + 'Max relative difference among violations: 5.23423917') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n' + 'Max absolute difference among violations: 87654.\n' + 'Max relative difference among violations: ' + '5670.5626011') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(y, x) + + y = 0 + + expected_msg = ('Mismatched elements: 3 / 3 (100%)\n' + 'Max absolute difference among violations: 546456.\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(x, y) + + expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n' + 'Max absolute difference among violations: 0.\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + self._assert_func(y, x) + + def test_nan_noncompare(self): + anan = np.array(np.nan) + aone = np.array(1) + ainf = np.array(np.inf) + self._assert_func(anan, anan) + assert_raises(AssertionError, lambda: self._assert_func(aone, anan)) + assert_raises(AssertionError, lambda: self._assert_func(anan, aone)) + assert_raises(AssertionError, lambda: self._assert_func(anan, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, anan)) + + def test_nan_noncompare_array(self): + x = np.array([1.1, 2.2, 3.3]) + anan = np.array(np.nan) + + assert_raises(AssertionError, lambda: self._assert_func(x, anan)) + assert_raises(AssertionError, lambda: self._assert_func(anan, x)) + + x = np.array([1.1, 2.2, np.nan]) + + assert_raises(AssertionError, lambda: self._assert_func(x, anan)) + assert_raises(AssertionError, lambda: self._assert_func(anan, x)) + + y = np.array([1.0, 2.0, np.nan]) + + self._assert_func(y, x) + assert_raises(AssertionError, lambda: self._assert_func(x, y)) + + def test_inf_compare(self): + aone = np.array(1) + ainf = np.array(np.inf) + + self._assert_func(aone, ainf) + self._assert_func(-ainf, aone) + self._assert_func(-ainf, ainf) + assert_raises(AssertionError, lambda: self._assert_func(ainf, aone)) + assert_raises(AssertionError, lambda: self._assert_func(aone, -ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, -ainf)) + assert_raises(AssertionError, lambda: self._assert_func(-ainf, -ainf)) + + def test_inf_compare_array(self): + x = np.array([1.1, 2.2, np.inf]) + ainf = np.array(np.inf) + + assert_raises(AssertionError, lambda: self._assert_func(x, ainf)) + assert_raises(AssertionError, lambda: self._assert_func(ainf, x)) + assert_raises(AssertionError, lambda: self._assert_func(x, -ainf)) + assert_raises(AssertionError, lambda: self._assert_func(-x, -ainf)) + assert_raises(AssertionError, lambda: self._assert_func(-ainf, -x)) + self._assert_func(-ainf, x) + + def test_strict(self): + """Test the behavior of the `strict` option.""" + x = np.zeros(3) + y = np.ones(()) + self._assert_func(x, y) + with pytest.raises(AssertionError): + self._assert_func(x, y, strict=True) + y = np.broadcast_to(y, x.shape) + self._assert_func(x, y) + with pytest.raises(AssertionError): + self._assert_func(x, y.astype(np.float32), strict=True) + + +class TestWarns: + + def test_warn(self): + def f(): + warnings.warn("yo") + return 3 + + before_filters = sys.modules['warnings'].filters[:] + assert_equal(assert_warns(UserWarning, f), 3) + after_filters = sys.modules['warnings'].filters + + assert_raises(AssertionError, assert_no_warnings, f) + assert_equal(assert_no_warnings(lambda x: x, 1), 1) + + # Check that the warnings state is unchanged + assert_equal(before_filters, after_filters, + "assert_warns does not preserver warnings state") + + def test_context_manager(self): + + before_filters = sys.modules['warnings'].filters[:] + with assert_warns(UserWarning): + warnings.warn("yo") + after_filters = sys.modules['warnings'].filters + + def no_warnings(): + with assert_no_warnings(): + warnings.warn("yo") + + assert_raises(AssertionError, no_warnings) + assert_equal(before_filters, after_filters, + "assert_warns does not preserver warnings state") + + def test_args(self): + def f(a=0, b=1): + warnings.warn("yo") + return a + b + + assert assert_warns(UserWarning, f, b=20) == 20 + + with pytest.raises(RuntimeError) as exc: + # assert_warns cannot do regexp matching, use pytest.warns + with assert_warns(UserWarning, match="A"): + warnings.warn("B", UserWarning) + assert "assert_warns" in str(exc) + assert "pytest.warns" in str(exc) + + with pytest.raises(RuntimeError) as exc: + # assert_warns cannot do regexp matching, use pytest.warns + with assert_warns(UserWarning, wrong="A"): + warnings.warn("B", UserWarning) + assert "assert_warns" in str(exc) + assert "pytest.warns" not in str(exc) + + def test_warn_wrong_warning(self): + def f(): + warnings.warn("yo", DeprecationWarning) + + failed = False + with warnings.catch_warnings(): + warnings.simplefilter("error", DeprecationWarning) + try: + # Should raise a DeprecationWarning + assert_warns(UserWarning, f) + failed = True + except DeprecationWarning: + pass + + if failed: + raise AssertionError("wrong warning caught by assert_warn") + + +class TestAssertAllclose: + + def test_simple(self): + x = 1e-3 + y = 1e-9 + + assert_allclose(x, y, atol=1) + assert_raises(AssertionError, assert_allclose, x, y) + + expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Max absolute difference among violations: 0.001\n' + 'Max relative difference among violations: 999999.') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(x, y) + + z = 0 + expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Max absolute difference among violations: 1.e-09\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(y, z) + + expected_msg = ('Mismatched elements: 1 / 1 (100%)\n' + 'Max absolute difference among violations: 1.e-09\n' + 'Max relative difference among violations: 1.') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(z, y) + + a = np.array([x, y, x, y]) + b = np.array([x, y, x, x]) + + assert_allclose(a, b, atol=1) + assert_raises(AssertionError, assert_allclose, a, b) + + b[-1] = y * (1 + 1e-8) + assert_allclose(a, b) + assert_raises(AssertionError, assert_allclose, a, b, rtol=1e-9) + + assert_allclose(6, 10, rtol=0.5) + assert_raises(AssertionError, assert_allclose, 10, 6, rtol=0.5) + + b = np.array([x, y, x, x]) + c = np.array([x, y, x, z]) + expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Max absolute difference among violations: 0.001\n' + 'Max relative difference among violations: inf') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(b, c) + + expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Max absolute difference among violations: 0.001\n' + 'Max relative difference among violations: 1.') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(c, b) + + def test_min_int(self): + a = np.array([np.iinfo(np.int_).min], dtype=np.int_) + # Should not raise: + assert_allclose(a, a) + + def test_report_fail_percentage(self): + a = np.array([1, 1, 1, 1]) + b = np.array([1, 1, 1, 2]) + + expected_msg = ('Mismatched elements: 1 / 4 (25%)\n' + 'Max absolute difference among violations: 1\n' + 'Max relative difference among violations: 0.5') + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) + + def test_equal_nan(self): + a = np.array([np.nan]) + b = np.array([np.nan]) + # Should not raise: + assert_allclose(a, b, equal_nan=True) + + def test_not_equal_nan(self): + a = np.array([np.nan]) + b = np.array([np.nan]) + assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False) + + def test_equal_nan_default(self): + # Make sure equal_nan default behavior remains unchanged. (All + # of these functions use assert_array_compare under the hood.) + # None of these should raise. + a = np.array([np.nan]) + b = np.array([np.nan]) + assert_array_equal(a, b) + assert_array_almost_equal(a, b) + assert_array_less(a, b) + assert_allclose(a, b) + + def test_report_max_relative_error(self): + a = np.array([0, 1]) + b = np.array([0, 2]) + + expected_msg = 'Max relative difference among violations: 0.5' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(a, b) + + def test_timedelta(self): + # see gh-18286 + a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]") + assert_allclose(a, a) + + def test_error_message_unsigned(self): + """Check the message is formatted correctly when overflow can occur + (gh21768)""" + # Ensure to test for potential overflow in the case of: + # x - y + # and + # y - x + x = np.asarray([0, 1, 8], dtype='uint8') + y = np.asarray([4, 4, 4], dtype='uint8') + expected_msg = 'Max absolute difference among violations: 4' + with pytest.raises(AssertionError, match=re.escape(expected_msg)): + assert_allclose(x, y, atol=3) + + def test_strict(self): + """Test the behavior of the `strict` option.""" + x = np.ones(3) + y = np.ones(()) + assert_allclose(x, y) + with pytest.raises(AssertionError): + assert_allclose(x, y, strict=True) + assert_allclose(x, x) + with pytest.raises(AssertionError): + assert_allclose(x, x.astype(np.float32), strict=True) + + +class TestArrayAlmostEqualNulp: + + def test_float64_pass(self): + # The number of units of least precision + # In this case, use a few places above the lowest level (ie nulp=1) + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float64) + x = 10**x + x = np.r_[-x, x] + + # Addition + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp / 2. + assert_array_almost_equal_nulp(x, y, nulp) + + # Subtraction + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp / 2. + assert_array_almost_equal_nulp(x, y, nulp) + + def test_float64_fail(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float64) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + def test_float64_ignore_nan(self): + # Ignore ULP differences between various NAN's + # Note that MIPS may reverse quiet and signaling nans + # so we use the builtin version as a base. + offset = np.uint64(0xffffffff) + nan1_i64 = np.array(np.nan, dtype=np.float64).view(np.uint64) + nan2_i64 = nan1_i64 ^ offset # nan payload on MIPS is all ones. + nan1_f64 = nan1_i64.view(np.float64) + nan2_f64 = nan2_i64.view(np.float64) + assert_array_max_ulp(nan1_f64, nan2_f64, 0) + + def test_float32_pass(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float32) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp / 2. + assert_array_almost_equal_nulp(x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp / 2. + assert_array_almost_equal_nulp(x, y, nulp) + + def test_float32_fail(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float32) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + def test_float32_ignore_nan(self): + # Ignore ULP differences between various NAN's + # Note that MIPS may reverse quiet and signaling nans + # so we use the builtin version as a base. + offset = np.uint32(0xffff) + nan1_i32 = np.array(np.nan, dtype=np.float32).view(np.uint32) + nan2_i32 = nan1_i32 ^ offset # nan payload on MIPS is all ones. + nan1_f32 = nan1_i32.view(np.float32) + nan2_f32 = nan2_i32.view(np.float32) + assert_array_max_ulp(nan1_f32, nan2_f32, 0) + + def test_float16_pass(self): + nulp = 5 + x = np.linspace(-4, 4, 10, dtype=np.float16) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp / 2. + assert_array_almost_equal_nulp(x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp / 2. + assert_array_almost_equal_nulp(x, y, nulp) + + def test_float16_fail(self): + nulp = 5 + x = np.linspace(-4, 4, 10, dtype=np.float16) + x = 10**x + x = np.r_[-x, x] + + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + x, y, nulp) + + def test_float16_ignore_nan(self): + # Ignore ULP differences between various NAN's + # Note that MIPS may reverse quiet and signaling nans + # so we use the builtin version as a base. + offset = np.uint16(0xff) + nan1_i16 = np.array(np.nan, dtype=np.float16).view(np.uint16) + nan2_i16 = nan1_i16 ^ offset # nan payload on MIPS is all ones. + nan1_f16 = nan1_i16.view(np.float16) + nan2_f16 = nan2_i16.view(np.float16) + assert_array_max_ulp(nan1_f16, nan2_f16, 0) + + def test_complex128_pass(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float64) + x = 10**x + x = np.r_[-x, x] + xi = x + x * 1j + + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp / 2. + assert_array_almost_equal_nulp(xi, x + y * 1j, nulp) + assert_array_almost_equal_nulp(xi, y + x * 1j, nulp) + # The test condition needs to be at least a factor of sqrt(2) smaller + # because the real and imaginary parts both change + y = x + x * eps * nulp / 4. + assert_array_almost_equal_nulp(xi, y + y * 1j, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp / 2. + assert_array_almost_equal_nulp(xi, x + y * 1j, nulp) + assert_array_almost_equal_nulp(xi, y + x * 1j, nulp) + y = x - x * epsneg * nulp / 4. + assert_array_almost_equal_nulp(xi, y + y * 1j, nulp) + + def test_complex128_fail(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float64) + x = 10**x + x = np.r_[-x, x] + xi = x + x * 1j + + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, x + y * 1j, nulp) + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + x * 1j, nulp) + # The test condition needs to be at least a factor of sqrt(2) smaller + # because the real and imaginary parts both change + y = x + x * eps * nulp + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + y * 1j, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, x + y * 1j, nulp) + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + x * 1j, nulp) + y = x - x * epsneg * nulp + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + y * 1j, nulp) + + def test_complex64_pass(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float32) + x = 10**x + x = np.r_[-x, x] + xi = x + x * 1j + + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp / 2. + assert_array_almost_equal_nulp(xi, x + y * 1j, nulp) + assert_array_almost_equal_nulp(xi, y + x * 1j, nulp) + y = x + x * eps * nulp / 4. + assert_array_almost_equal_nulp(xi, y + y * 1j, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp / 2. + assert_array_almost_equal_nulp(xi, x + y * 1j, nulp) + assert_array_almost_equal_nulp(xi, y + x * 1j, nulp) + y = x - x * epsneg * nulp / 4. + assert_array_almost_equal_nulp(xi, y + y * 1j, nulp) + + def test_complex64_fail(self): + nulp = 5 + x = np.linspace(-20, 20, 50, dtype=np.float32) + x = 10**x + x = np.r_[-x, x] + xi = x + x * 1j + + eps = np.finfo(x.dtype).eps + y = x + x * eps * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, x + y * 1j, nulp) + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + x * 1j, nulp) + y = x + x * eps * nulp + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + y * 1j, nulp) + + epsneg = np.finfo(x.dtype).epsneg + y = x - x * epsneg * nulp * 2. + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, x + y * 1j, nulp) + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + x * 1j, nulp) + y = x - x * epsneg * nulp + assert_raises(AssertionError, assert_array_almost_equal_nulp, + xi, y + y * 1j, nulp) + + +class TestULP: + + def test_equal(self): + x = np.random.randn(10) + assert_array_max_ulp(x, x, maxulp=0) + + def test_single(self): + # Generate 1 + small deviation, check that adding eps gives a few UNL + x = np.ones(10).astype(np.float32) + x += 0.01 * np.random.randn(10).astype(np.float32) + eps = np.finfo(np.float32).eps + assert_array_max_ulp(x, x + eps, maxulp=20) + + def test_double(self): + # Generate 1 + small deviation, check that adding eps gives a few UNL + x = np.ones(10).astype(np.float64) + x += 0.01 * np.random.randn(10).astype(np.float64) + eps = np.finfo(np.float64).eps + assert_array_max_ulp(x, x + eps, maxulp=200) + + def test_inf(self): + for dt in [np.float32, np.float64]: + inf = np.array([np.inf]).astype(dt) + big = np.array([np.finfo(dt).max]) + assert_array_max_ulp(inf, big, maxulp=200) + + def test_nan(self): + # Test that nan is 'far' from small, tiny, inf, max and min + for dt in [np.float32, np.float64]: + if dt == np.float32: + maxulp = 1e6 + else: + maxulp = 1e12 + inf = np.array([np.inf]).astype(dt) + nan = np.array([np.nan]).astype(dt) + big = np.array([np.finfo(dt).max]) + tiny = np.array([np.finfo(dt).tiny]) + zero = np.array([0.0]).astype(dt) + nzero = np.array([-0.0]).astype(dt) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, inf, + maxulp=maxulp)) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, big, + maxulp=maxulp)) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, tiny, + maxulp=maxulp)) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, zero, + maxulp=maxulp)) + assert_raises(AssertionError, + lambda: assert_array_max_ulp(nan, nzero, + maxulp=maxulp)) + + +class TestStringEqual: + def test_simple(self): + assert_string_equal("hello", "hello") + assert_string_equal("hello\nmultiline", "hello\nmultiline") + + with pytest.raises(AssertionError) as exc_info: + assert_string_equal("foo\nbar", "hello\nbar") + msg = str(exc_info.value) + assert_equal(msg, "Differences in strings:\n- foo\n+ hello") + + assert_raises(AssertionError, + lambda: assert_string_equal("foo", "hello")) + + def test_regex(self): + assert_string_equal("a+*b", "a+*b") + + assert_raises(AssertionError, + lambda: assert_string_equal("aaa", "a+b")) + + +def assert_warn_len_equal(mod, n_in_context): + try: + mod_warns = mod.__warningregistry__ + except AttributeError: + # the lack of a __warningregistry__ + # attribute means that no warning has + # occurred; this can be triggered in + # a parallel test scenario, while in + # a serial test scenario an initial + # warning (and therefore the attribute) + # are always created first + mod_warns = {} + + num_warns = len(mod_warns) + + if 'version' in mod_warns: + # Python adds a 'version' entry to the registry, + # do not count it. + num_warns -= 1 + + assert_equal(num_warns, n_in_context) + + +def test_warn_len_equal_call_scenarios(): + # assert_warn_len_equal is called under + # varying circumstances depending on serial + # vs. parallel test scenarios; this test + # simply aims to probe both code paths and + # check that no assertion is uncaught + + # parallel scenario -- no warning issued yet + class mod: + pass + + mod_inst = mod() + + assert_warn_len_equal(mod=mod_inst, + n_in_context=0) + + # serial test scenario -- the __warningregistry__ + # attribute should be present + class mod: + def __init__(self): + self.__warningregistry__ = {'warning1': 1, + 'warning2': 2} + + mod_inst = mod() + assert_warn_len_equal(mod=mod_inst, + n_in_context=2) + + +def _get_fresh_mod(): + # Get this module, with warning registry empty + my_mod = sys.modules[__name__] + try: + my_mod.__warningregistry__.clear() + except AttributeError: + # will not have a __warningregistry__ unless warning has been + # raised in the module at some point + pass + return my_mod + + +def test_clear_and_catch_warnings(): + # Initial state of module, no warnings + my_mod = _get_fresh_mod() + assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) + with clear_and_catch_warnings(modules=[my_mod]): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_equal(my_mod.__warningregistry__, {}) + # Without specified modules, don't clear warnings during context. + # catch_warnings doesn't make an entry for 'ignore'. + with clear_and_catch_warnings(): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + # Manually adding two warnings to the registry: + my_mod.__warningregistry__ = {'warning1': 1, + 'warning2': 2} + + # Confirm that specifying module keeps old warning, does not add new + with clear_and_catch_warnings(modules=[my_mod]): + warnings.simplefilter('ignore') + warnings.warn('Another warning') + assert_warn_len_equal(my_mod, 2) + + # Another warning, no module spec it clears up registry + with clear_and_catch_warnings(): + warnings.simplefilter('ignore') + warnings.warn('Another warning') + assert_warn_len_equal(my_mod, 0) + + +def test_suppress_warnings_module(): + # Initial state of module, no warnings + my_mod = _get_fresh_mod() + assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) + + def warn_other_module(): + # Apply along axis is implemented in python; stacklevel=2 means + # we end up inside its module, not ours. + def warn(arr): + warnings.warn("Some warning 2", stacklevel=2) + return arr + np.apply_along_axis(warn, 0, [0]) + + # Test module based warning suppression: + assert_warn_len_equal(my_mod, 0) + with suppress_warnings() as sup: + sup.record(UserWarning) + # suppress warning from other module (may have .pyc ending), + # if apply_along_axis is moved, had to be changed. + sup.filter(module=np.lib._shape_base_impl) + warnings.warn("Some warning") + warn_other_module() + # Check that the suppression did test the file correctly (this module + # got filtered) + assert_equal(len(sup.log), 1) + assert_equal(sup.log[0].message.args[0], "Some warning") + assert_warn_len_equal(my_mod, 0) + sup = suppress_warnings() + # Will have to be changed if apply_along_axis is moved: + sup.filter(module=my_mod) + with sup: + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + # And test repeat works: + sup.filter(module=my_mod) + with sup: + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + # Without specified modules + with suppress_warnings(): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + +def test_suppress_warnings_type(): + # Initial state of module, no warnings + my_mod = _get_fresh_mod() + assert_equal(getattr(my_mod, '__warningregistry__', {}), {}) + + # Test module based warning suppression: + with suppress_warnings() as sup: + sup.filter(UserWarning) + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + sup = suppress_warnings() + sup.filter(UserWarning) + with sup: + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + # And test repeat works: + sup.filter(module=my_mod) + with sup: + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + # Without specified modules + with suppress_warnings(): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_warn_len_equal(my_mod, 0) + + +def test_suppress_warnings_decorate_no_record(): + sup = suppress_warnings() + sup.filter(UserWarning) + + @sup + def warn(category): + warnings.warn('Some warning', category) + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + warn(UserWarning) # should be suppressed + warn(RuntimeWarning) + assert_equal(len(w), 1) + + +def test_suppress_warnings_record(): + sup = suppress_warnings() + log1 = sup.record() + + with sup: + log2 = sup.record(message='Some other warning 2') + sup.filter(message='Some warning') + warnings.warn('Some warning') + warnings.warn('Some other warning') + warnings.warn('Some other warning 2') + + assert_equal(len(sup.log), 2) + assert_equal(len(log1), 1) + assert_equal(len(log2), 1) + assert_equal(log2[0].message.args[0], 'Some other warning 2') + + # Do it again, with the same context to see if some warnings survived: + with sup: + log2 = sup.record(message='Some other warning 2') + sup.filter(message='Some warning') + warnings.warn('Some warning') + warnings.warn('Some other warning') + warnings.warn('Some other warning 2') + + assert_equal(len(sup.log), 2) + assert_equal(len(log1), 1) + assert_equal(len(log2), 1) + assert_equal(log2[0].message.args[0], 'Some other warning 2') + + # Test nested: + with suppress_warnings() as sup: + sup.record() + with suppress_warnings() as sup2: + sup2.record(message='Some warning') + warnings.warn('Some warning') + warnings.warn('Some other warning') + assert_equal(len(sup2.log), 1) + assert_equal(len(sup.log), 1) + + +def test_suppress_warnings_forwarding(): + def warn_other_module(): + # Apply along axis is implemented in python; stacklevel=2 means + # we end up inside its module, not ours. + def warn(arr): + warnings.warn("Some warning", stacklevel=2) + return arr + np.apply_along_axis(warn, 0, [0]) + + with suppress_warnings() as sup: + sup.record() + with suppress_warnings("always"): + for i in range(2): + warnings.warn("Some warning") + + assert_equal(len(sup.log), 2) + + with suppress_warnings() as sup: + sup.record() + with suppress_warnings("location"): + for i in range(2): + warnings.warn("Some warning") + warnings.warn("Some warning") + + assert_equal(len(sup.log), 2) + + with suppress_warnings() as sup: + sup.record() + with suppress_warnings("module"): + for i in range(2): + warnings.warn("Some warning") + warnings.warn("Some warning") + warn_other_module() + + assert_equal(len(sup.log), 2) + + with suppress_warnings() as sup: + sup.record() + with suppress_warnings("once"): + for i in range(2): + warnings.warn("Some warning") + warnings.warn("Some other warning") + warn_other_module() + + assert_equal(len(sup.log), 2) + + +def test_tempdir(): + with tempdir() as tdir: + fpath = os.path.join(tdir, 'tmp') + with open(fpath, 'w'): + pass + assert_(not os.path.isdir(tdir)) + + raised = False + try: + with tempdir() as tdir: + raise ValueError + except ValueError: + raised = True + assert_(raised) + assert_(not os.path.isdir(tdir)) + + +def test_temppath(): + with temppath() as fpath: + with open(fpath, 'w'): + pass + assert_(not os.path.isfile(fpath)) + + raised = False + try: + with temppath() as fpath: + raise ValueError + except ValueError: + raised = True + assert_(raised) + assert_(not os.path.isfile(fpath)) + + +class my_cacw(clear_and_catch_warnings): + + class_modules = (sys.modules[__name__],) + + +def test_clear_and_catch_warnings_inherit(): + # Test can subclass and add default modules + my_mod = _get_fresh_mod() + with my_cacw(): + warnings.simplefilter('ignore') + warnings.warn('Some warning') + assert_equal(my_mod.__warningregistry__, {}) + + +@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") +class TestAssertNoGcCycles: + """ Test assert_no_gc_cycles """ + + def test_passes(self): + def no_cycle(): + b = [] + b.append([]) + return b + + with assert_no_gc_cycles(): + no_cycle() + + assert_no_gc_cycles(no_cycle) + + def test_asserts(self): + def make_cycle(): + a = [] + a.append(a) + a.append(a) + return a + + with assert_raises(AssertionError): + with assert_no_gc_cycles(): + make_cycle() + + with assert_raises(AssertionError): + assert_no_gc_cycles(make_cycle) + + @pytest.mark.slow + def test_fails(self): + """ + Test that in cases where the garbage cannot be collected, we raise an + error, instead of hanging forever trying to clear it. + """ + + class ReferenceCycleInDel: + """ + An object that not only contains a reference cycle, but creates new + cycles whenever it's garbage-collected and its __del__ runs + """ + make_cycle = True + + def __init__(self): + self.cycle = self + + def __del__(self): + # break the current cycle so that `self` can be freed + self.cycle = None + + if ReferenceCycleInDel.make_cycle: + # but create a new one so that the garbage collector (GC) has more + # work to do. + ReferenceCycleInDel() + + try: + w = weakref.ref(ReferenceCycleInDel()) + try: + with assert_raises(RuntimeError): + # this will be unable to get a baseline empty garbage + assert_no_gc_cycles(lambda: None) + except AssertionError: + # the above test is only necessary if the GC actually tried to free + # our object anyway. + if w() is not None: + pytest.skip("GC does not call __del__ on cyclic objects") + raise + + finally: + # make sure that we stop creating reference cycles + ReferenceCycleInDel.make_cycle = False diff --git a/python/numpy/tests/__init__.py b/python/numpy/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/python/numpy/tests/__pycache__/__init__.cpython-312.pyc b/python/numpy/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..8f18e0180 Binary files /dev/null and b/python/numpy/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/tests/__pycache__/test__all__.cpython-312.pyc b/python/numpy/tests/__pycache__/test__all__.cpython-312.pyc new file mode 100644 index 000000000..daebc2020 Binary files /dev/null and b/python/numpy/tests/__pycache__/test__all__.cpython-312.pyc differ diff --git a/python/numpy/tests/__pycache__/test_configtool.cpython-312.pyc b/python/numpy/tests/__pycache__/test_configtool.cpython-312.pyc new file mode 100644 index 000000000..5d333db0f Binary files /dev/null and b/python/numpy/tests/__pycache__/test_configtool.cpython-312.pyc differ diff --git a/python/numpy/tests/__pycache__/test_ctypeslib.cpython-312.pyc b/python/numpy/tests/__pycache__/test_ctypeslib.cpython-312.pyc new file mode 100644 index 000000000..d6f8de3df Binary files /dev/null and b/python/numpy/tests/__pycache__/test_ctypeslib.cpython-312.pyc differ diff --git a/python/numpy/tests/__pycache__/test_lazyloading.cpython-312.pyc b/python/numpy/tests/__pycache__/test_lazyloading.cpython-312.pyc new file mode 100644 index 000000000..cb9c03c4f Binary files /dev/null and b/python/numpy/tests/__pycache__/test_lazyloading.cpython-312.pyc differ diff --git a/python/numpy/tests/__pycache__/test_matlib.cpython-312.pyc b/python/numpy/tests/__pycache__/test_matlib.cpython-312.pyc new file mode 100644 index 000000000..80b0961db Binary files /dev/null and b/python/numpy/tests/__pycache__/test_matlib.cpython-312.pyc differ diff --git a/python/numpy/tests/__pycache__/test_numpy_config.cpython-312.pyc b/python/numpy/tests/__pycache__/test_numpy_config.cpython-312.pyc new file mode 100644 index 000000000..b9875543a Binary files /dev/null and b/python/numpy/tests/__pycache__/test_numpy_config.cpython-312.pyc differ diff --git a/python/numpy/tests/__pycache__/test_numpy_version.cpython-312.pyc b/python/numpy/tests/__pycache__/test_numpy_version.cpython-312.pyc new file mode 100644 index 000000000..73c8a234b Binary files /dev/null and b/python/numpy/tests/__pycache__/test_numpy_version.cpython-312.pyc differ diff --git a/python/numpy/tests/__pycache__/test_public_api.cpython-312.pyc b/python/numpy/tests/__pycache__/test_public_api.cpython-312.pyc new file mode 100644 index 000000000..bd0dc42db Binary files /dev/null and b/python/numpy/tests/__pycache__/test_public_api.cpython-312.pyc differ diff --git a/python/numpy/tests/__pycache__/test_reloading.cpython-312.pyc b/python/numpy/tests/__pycache__/test_reloading.cpython-312.pyc new file mode 100644 index 000000000..454130fbf Binary files /dev/null and b/python/numpy/tests/__pycache__/test_reloading.cpython-312.pyc differ diff --git a/python/numpy/tests/__pycache__/test_scripts.cpython-312.pyc b/python/numpy/tests/__pycache__/test_scripts.cpython-312.pyc new file mode 100644 index 000000000..5095bcfae Binary files /dev/null and b/python/numpy/tests/__pycache__/test_scripts.cpython-312.pyc differ diff --git a/python/numpy/tests/__pycache__/test_warnings.cpython-312.pyc b/python/numpy/tests/__pycache__/test_warnings.cpython-312.pyc new file mode 100644 index 000000000..07712405f Binary files /dev/null and b/python/numpy/tests/__pycache__/test_warnings.cpython-312.pyc differ diff --git a/python/numpy/tests/test__all__.py b/python/numpy/tests/test__all__.py new file mode 100644 index 000000000..2dc81669d --- /dev/null +++ b/python/numpy/tests/test__all__.py @@ -0,0 +1,10 @@ + +import collections + +import numpy as np + + +def test_no_duplicates_in_np__all__(): + # Regression test for gh-10198. + dups = {k: v for k, v in collections.Counter(np.__all__).items() if v > 1} + assert len(dups) == 0 diff --git a/python/numpy/tests/test_configtool.py b/python/numpy/tests/test_configtool.py new file mode 100644 index 000000000..8262606fc --- /dev/null +++ b/python/numpy/tests/test_configtool.py @@ -0,0 +1,48 @@ +import importlib +import importlib.metadata +import os +import pathlib +import subprocess + +import pytest + +import numpy as np +import numpy._core.include +import numpy._core.lib.pkgconfig +from numpy.testing import IS_EDITABLE, IS_INSTALLED, IS_WASM, NUMPY_ROOT + +INCLUDE_DIR = NUMPY_ROOT / '_core' / 'include' +PKG_CONFIG_DIR = NUMPY_ROOT / '_core' / 'lib' / 'pkgconfig' + + +@pytest.mark.skipif(not IS_INSTALLED, reason="`numpy-config` not expected to be installed") +@pytest.mark.skipif(IS_WASM, reason="wasm interpreter cannot start subprocess") +class TestNumpyConfig: + def check_numpyconfig(self, arg): + p = subprocess.run(['numpy-config', arg], capture_output=True, text=True) + p.check_returncode() + return p.stdout.strip() + + def test_configtool_version(self): + stdout = self.check_numpyconfig('--version') + assert stdout == np.__version__ + + def test_configtool_cflags(self): + stdout = self.check_numpyconfig('--cflags') + assert f'-I{os.fspath(INCLUDE_DIR)}' in stdout + + def test_configtool_pkgconfigdir(self): + stdout = self.check_numpyconfig('--pkgconfigdir') + assert pathlib.Path(stdout) == PKG_CONFIG_DIR.resolve() + + +@pytest.mark.skipif(not IS_INSTALLED, reason="numpy must be installed to check its entrypoints") +def test_pkg_config_entrypoint(): + (entrypoint,) = importlib.metadata.entry_points(group='pkg_config', name='numpy') + assert entrypoint.value == numpy._core.lib.pkgconfig.__name__ + + +@pytest.mark.skipif(not IS_INSTALLED, reason="numpy.pc is only available when numpy is installed") +@pytest.mark.skipif(IS_EDITABLE, reason="editable installs don't have a numpy.pc") +def test_pkg_config_config_exists(): + assert PKG_CONFIG_DIR.joinpath('numpy.pc').is_file() diff --git a/python/numpy/tests/test_ctypeslib.py b/python/numpy/tests/test_ctypeslib.py new file mode 100644 index 000000000..68d314160 --- /dev/null +++ b/python/numpy/tests/test_ctypeslib.py @@ -0,0 +1,377 @@ +import sys +import sysconfig +import weakref +from pathlib import Path + +import pytest + +import numpy as np +from numpy.ctypeslib import as_array, load_library, ndpointer +from numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises + +try: + import ctypes +except ImportError: + ctypes = None +else: + cdll = None + test_cdll = None + if hasattr(sys, 'gettotalrefcount'): + try: + cdll = load_library( + '_multiarray_umath_d', np._core._multiarray_umath.__file__ + ) + except OSError: + pass + try: + test_cdll = load_library( + '_multiarray_tests', np._core._multiarray_tests.__file__ + ) + except OSError: + pass + if cdll is None: + cdll = load_library( + '_multiarray_umath', np._core._multiarray_umath.__file__) + if test_cdll is None: + test_cdll = load_library( + '_multiarray_tests', np._core._multiarray_tests.__file__ + ) + + c_forward_pointer = test_cdll.forward_pointer + + +@pytest.mark.skipif(ctypes is None, + reason="ctypes not available in this python") +@pytest.mark.skipif(sys.platform == 'cygwin', + reason="Known to fail on cygwin") +class TestLoadLibrary: + def test_basic(self): + loader_path = np._core._multiarray_umath.__file__ + + out1 = load_library('_multiarray_umath', loader_path) + out2 = load_library(Path('_multiarray_umath'), loader_path) + out3 = load_library('_multiarray_umath', Path(loader_path)) + out4 = load_library(b'_multiarray_umath', loader_path) + + assert isinstance(out1, ctypes.CDLL) + assert out1 is out2 is out3 is out4 + + def test_basic2(self): + # Regression for #801: load_library with a full library name + # (including extension) does not work. + try: + so_ext = sysconfig.get_config_var('EXT_SUFFIX') + load_library(f'_multiarray_umath{so_ext}', + np._core._multiarray_umath.__file__) + except ImportError as e: + msg = ("ctypes is not available on this python: skipping the test" + " (import error was: %s)" % str(e)) + print(msg) + + +class TestNdpointer: + def test_dtype(self): + dt = np.intc + p = ndpointer(dtype=dt) + assert_(p.from_param(np.array([1], dt))) + dt = 'i4') + p = ndpointer(dtype=dt) + p.from_param(np.array([1], dt)) + assert_raises(TypeError, p.from_param, + np.array([1], dt.newbyteorder('swap'))) + dtnames = ['x', 'y'] + dtformats = [np.intc, np.float64] + dtdescr = {'names': dtnames, 'formats': dtformats} + dt = np.dtype(dtdescr) + p = ndpointer(dtype=dt) + assert_(p.from_param(np.zeros((10,), dt))) + samedt = np.dtype(dtdescr) + p = ndpointer(dtype=samedt) + assert_(p.from_param(np.zeros((10,), dt))) + dt2 = np.dtype(dtdescr, align=True) + if dt.itemsize != dt2.itemsize: + assert_raises(TypeError, p.from_param, np.zeros((10,), dt2)) + else: + assert_(p.from_param(np.zeros((10,), dt2))) + + def test_ndim(self): + p = ndpointer(ndim=0) + assert_(p.from_param(np.array(1))) + assert_raises(TypeError, p.from_param, np.array([1])) + p = ndpointer(ndim=1) + assert_raises(TypeError, p.from_param, np.array(1)) + assert_(p.from_param(np.array([1]))) + p = ndpointer(ndim=2) + assert_(p.from_param(np.array([[1]]))) + + def test_shape(self): + p = ndpointer(shape=(1, 2)) + assert_(p.from_param(np.array([[1, 2]]))) + assert_raises(TypeError, p.from_param, np.array([[1], [2]])) + p = ndpointer(shape=()) + assert_(p.from_param(np.array(1))) + + def test_flags(self): + x = np.array([[1, 2], [3, 4]], order='F') + p = ndpointer(flags='FORTRAN') + assert_(p.from_param(x)) + p = ndpointer(flags='CONTIGUOUS') + assert_raises(TypeError, p.from_param, x) + p = ndpointer(flags=x.flags.num) + assert_(p.from_param(x)) + assert_raises(TypeError, p.from_param, np.array([[1, 2], [3, 4]])) + + def test_cache(self): + assert_(ndpointer(dtype=np.float64) is ndpointer(dtype=np.float64)) + + # shapes are normalized + assert_(ndpointer(shape=2) is ndpointer(shape=(2,))) + + # 1.12 <= v < 1.16 had a bug that made these fail + assert_(ndpointer(shape=2) is not ndpointer(ndim=2)) + assert_(ndpointer(ndim=2) is not ndpointer(shape=2)) + +@pytest.mark.skipif(ctypes is None, + reason="ctypes not available on this python installation") +class TestNdpointerCFunc: + def test_arguments(self): + """ Test that arguments are coerced from arrays """ + c_forward_pointer.restype = ctypes.c_void_p + c_forward_pointer.argtypes = (ndpointer(ndim=2),) + + c_forward_pointer(np.zeros((2, 3))) + # too many dimensions + assert_raises( + ctypes.ArgumentError, c_forward_pointer, np.zeros((2, 3, 4))) + + @pytest.mark.parametrize( + 'dt', [ + float, + np.dtype({ + 'formats': ['u2') + ct = np.ctypeslib.as_ctypes_type(dt) + assert_equal(ct, ctypes.c_uint16.__ctype_be__) + + dt = np.dtype('u2') + ct = np.ctypeslib.as_ctypes_type(dt) + assert_equal(ct, ctypes.c_uint16) + + def test_subarray(self): + dt = np.dtype((np.int32, (2, 3))) + ct = np.ctypeslib.as_ctypes_type(dt) + assert_equal(ct, 2 * (3 * ctypes.c_int32)) + + def test_structure(self): + dt = np.dtype([ + ('a', np.uint16), + ('b', np.uint32), + ]) + + ct = np.ctypeslib.as_ctypes_type(dt) + assert_(issubclass(ct, ctypes.Structure)) + assert_equal(ctypes.sizeof(ct), dt.itemsize) + assert_equal(ct._fields_, [ + ('a', ctypes.c_uint16), + ('b', ctypes.c_uint32), + ]) + + def test_structure_aligned(self): + dt = np.dtype([ + ('a', np.uint16), + ('b', np.uint32), + ], align=True) + + ct = np.ctypeslib.as_ctypes_type(dt) + assert_(issubclass(ct, ctypes.Structure)) + assert_equal(ctypes.sizeof(ct), dt.itemsize) + assert_equal(ct._fields_, [ + ('a', ctypes.c_uint16), + ('', ctypes.c_char * 2), # padding + ('b', ctypes.c_uint32), + ]) + + def test_union(self): + dt = np.dtype({ + 'names': ['a', 'b'], + 'offsets': [0, 0], + 'formats': [np.uint16, np.uint32] + }) + + ct = np.ctypeslib.as_ctypes_type(dt) + assert_(issubclass(ct, ctypes.Union)) + assert_equal(ctypes.sizeof(ct), dt.itemsize) + assert_equal(ct._fields_, [ + ('a', ctypes.c_uint16), + ('b', ctypes.c_uint32), + ]) + + def test_padded_union(self): + dt = np.dtype({ + 'names': ['a', 'b'], + 'offsets': [0, 0], + 'formats': [np.uint16, np.uint32], + 'itemsize': 5, + }) + + ct = np.ctypeslib.as_ctypes_type(dt) + assert_(issubclass(ct, ctypes.Union)) + assert_equal(ctypes.sizeof(ct), dt.itemsize) + assert_equal(ct._fields_, [ + ('a', ctypes.c_uint16), + ('b', ctypes.c_uint32), + ('', ctypes.c_char * 5), # padding + ]) + + def test_overlapping(self): + dt = np.dtype({ + 'names': ['a', 'b'], + 'offsets': [0, 2], + 'formats': [np.uint32, np.uint32] + }) + assert_raises(NotImplementedError, np.ctypeslib.as_ctypes_type, dt) diff --git a/python/numpy/tests/test_lazyloading.py b/python/numpy/tests/test_lazyloading.py new file mode 100644 index 000000000..5f6233f1c --- /dev/null +++ b/python/numpy/tests/test_lazyloading.py @@ -0,0 +1,38 @@ +import sys +from importlib.util import LazyLoader, find_spec, module_from_spec + +import pytest + + +# Warning raised by _reload_guard() in numpy/__init__.py +@pytest.mark.filterwarnings("ignore:The NumPy module was reloaded") +def test_lazy_load(): + # gh-22045. lazyload doesn't import submodule names into the namespace + # muck with sys.modules to test the importing system + old_numpy = sys.modules.pop("numpy") + + numpy_modules = {} + for mod_name, mod in list(sys.modules.items()): + if mod_name[:6] == "numpy.": + numpy_modules[mod_name] = mod + sys.modules.pop(mod_name) + + try: + # create lazy load of numpy as np + spec = find_spec("numpy") + module = module_from_spec(spec) + sys.modules["numpy"] = module + loader = LazyLoader(spec.loader) + loader.exec_module(module) + np = module + + # test a subpackage import + from numpy.lib import recfunctions # noqa: F401 + + # test triggering the import of the package + np.ndarray + + finally: + if old_numpy: + sys.modules["numpy"] = old_numpy + sys.modules.update(numpy_modules) diff --git a/python/numpy/tests/test_matlib.py b/python/numpy/tests/test_matlib.py new file mode 100644 index 000000000..2aac1f258 --- /dev/null +++ b/python/numpy/tests/test_matlib.py @@ -0,0 +1,59 @@ +import numpy as np +import numpy.matlib +from numpy.testing import assert_, assert_array_equal + + +def test_empty(): + x = numpy.matlib.empty((2,)) + assert_(isinstance(x, np.matrix)) + assert_(x.shape, (1, 2)) + +def test_ones(): + assert_array_equal(numpy.matlib.ones((2, 3)), + np.matrix([[ 1., 1., 1.], + [ 1., 1., 1.]])) + + assert_array_equal(numpy.matlib.ones(2), np.matrix([[ 1., 1.]])) + +def test_zeros(): + assert_array_equal(numpy.matlib.zeros((2, 3)), + np.matrix([[ 0., 0., 0.], + [ 0., 0., 0.]])) + + assert_array_equal(numpy.matlib.zeros(2), np.matrix([[0., 0.]])) + +def test_identity(): + x = numpy.matlib.identity(2, dtype=int) + assert_array_equal(x, np.matrix([[1, 0], [0, 1]])) + +def test_eye(): + xc = numpy.matlib.eye(3, k=1, dtype=int) + assert_array_equal(xc, np.matrix([[ 0, 1, 0], + [ 0, 0, 1], + [ 0, 0, 0]])) + assert xc.flags.c_contiguous + assert not xc.flags.f_contiguous + + xf = numpy.matlib.eye(3, 4, dtype=int, order='F') + assert_array_equal(xf, np.matrix([[ 1, 0, 0, 0], + [ 0, 1, 0, 0], + [ 0, 0, 1, 0]])) + assert not xf.flags.c_contiguous + assert xf.flags.f_contiguous + +def test_rand(): + x = numpy.matlib.rand(3) + # check matrix type, array would have shape (3,) + assert_(x.ndim == 2) + +def test_randn(): + x = np.matlib.randn(3) + # check matrix type, array would have shape (3,) + assert_(x.ndim == 2) + +def test_repmat(): + a1 = np.arange(4) + x = numpy.matlib.repmat(a1, 2, 2) + y = np.array([[0, 1, 2, 3, 0, 1, 2, 3], + [0, 1, 2, 3, 0, 1, 2, 3]]) + assert_array_equal(x, y) diff --git a/python/numpy/tests/test_numpy_config.py b/python/numpy/tests/test_numpy_config.py new file mode 100644 index 000000000..f01a27957 --- /dev/null +++ b/python/numpy/tests/test_numpy_config.py @@ -0,0 +1,46 @@ +""" +Check the numpy config is valid. +""" +from unittest.mock import patch + +import pytest + +import numpy as np + +pytestmark = pytest.mark.skipif( + not hasattr(np.__config__, "_built_with_meson"), + reason="Requires Meson builds", +) + + +class TestNumPyConfigs: + REQUIRED_CONFIG_KEYS = [ + "Compilers", + "Machine Information", + "Python Information", + ] + + @patch("numpy.__config__._check_pyyaml") + def test_pyyaml_not_found(self, mock_yaml_importer): + mock_yaml_importer.side_effect = ModuleNotFoundError() + with pytest.warns(UserWarning): + np.show_config() + + def test_dict_mode(self): + config = np.show_config(mode="dicts") + + assert isinstance(config, dict) + assert all(key in config for key in self.REQUIRED_CONFIG_KEYS), ( + "Required key missing," + " see index of `False` with `REQUIRED_CONFIG_KEYS`" + ) + + def test_invalid_mode(self): + with pytest.raises(AttributeError): + np.show_config(mode="foo") + + def test_warn_to_add_tests(self): + assert len(np.__config__.DisplayModes) == 2, ( + "New mode detected," + " please add UT if applicable and increment this count" + ) diff --git a/python/numpy/tests/test_numpy_version.py b/python/numpy/tests/test_numpy_version.py new file mode 100644 index 000000000..ea164225f --- /dev/null +++ b/python/numpy/tests/test_numpy_version.py @@ -0,0 +1,54 @@ +""" +Check the numpy version is valid. + +Note that a development version is marked by the presence of 'dev0' or '+' +in the version string, all else is treated as a release. The version string +itself is set from the output of ``git describe`` which relies on tags. + +Examples +-------- + +Valid Development: 1.22.0.dev0 1.22.0.dev0+5-g7999db4df2 1.22.0+5-g7999db4df2 +Valid Release: 1.21.0.rc1, 1.21.0.b1, 1.21.0 +Invalid: 1.22.0.dev, 1.22.0.dev0-5-g7999db4dfB, 1.21.0.d1, 1.21.a + +Note that a release is determined by the version string, which in turn +is controlled by the result of the ``git describe`` command. +""" +import re + +import numpy as np +from numpy.testing import assert_ + + +def test_valid_numpy_version(): + # Verify that the numpy version is a valid one (no .post suffix or other + # nonsense). See gh-6431 for an issue caused by an invalid version. + version_pattern = r"^[0-9]+\.[0-9]+\.[0-9]+(a[0-9]|b[0-9]|rc[0-9])?" + dev_suffix = r"(\.dev[0-9]+(\+git[0-9]+\.[0-9a-f]+)?)?" + res = re.match(version_pattern + dev_suffix + '$', np.__version__) + + assert_(res is not None, np.__version__) + + +def test_short_version(): + # Check numpy.short_version actually exists + if np.version.release: + assert_(np.__version__ == np.version.short_version, + "short_version mismatch in release version") + else: + assert_(np.__version__.split("+")[0] == np.version.short_version, + "short_version mismatch in development version") + + +def test_version_module(): + contents = {s for s in dir(np.version) if not s.startswith('_')} + expected = { + 'full_version', + 'git_revision', + 'release', + 'short_version', + 'version', + } + + assert contents == expected diff --git a/python/numpy/tests/test_public_api.py b/python/numpy/tests/test_public_api.py new file mode 100644 index 000000000..a56cd1329 --- /dev/null +++ b/python/numpy/tests/test_public_api.py @@ -0,0 +1,806 @@ +import functools +import importlib +import inspect +import pkgutil +import subprocess +import sys +import sysconfig +import types +import warnings + +import pytest + +import numpy +import numpy as np +from numpy.testing import IS_WASM + +try: + import ctypes +except ImportError: + ctypes = None + + +def check_dir(module, module_name=None): + """Returns a mapping of all objects with the wrong __module__ attribute.""" + if module_name is None: + module_name = module.__name__ + results = {} + for name in dir(module): + if name == "core": + continue + item = getattr(module, name) + if (hasattr(item, '__module__') and hasattr(item, '__name__') + and item.__module__ != module_name): + results[name] = item.__module__ + '.' + item.__name__ + return results + + +def test_numpy_namespace(): + # We override dir to not show these members + allowlist = { + 'recarray': 'numpy.rec.recarray', + } + bad_results = check_dir(np) + # pytest gives better error messages with the builtin assert than with + # assert_equal + assert bad_results == allowlist + + +@pytest.mark.skipif(IS_WASM, reason="can't start subprocess") +@pytest.mark.parametrize('name', ['testing']) +def test_import_lazy_import(name): + """Make sure we can actually use the modules we lazy load. + + While not exported as part of the public API, it was accessible. With the + use of __getattr__ and __dir__, this isn't always true It can happen that + an infinite recursion may happen. + + This is the only way I found that would force the failure to appear on the + badly implemented code. + + We also test for the presence of the lazily imported modules in dir + + """ + exe = (sys.executable, '-c', "import numpy; numpy." + name) + result = subprocess.check_output(exe) + assert not result + + # Make sure they are still in the __dir__ + assert name in dir(np) + + +def test_dir_testing(): + """Assert that output of dir has only one "testing/tester" + attribute without duplicate""" + assert len(dir(np)) == len(set(dir(np))) + + +def test_numpy_linalg(): + bad_results = check_dir(np.linalg) + assert bad_results == {} + + +def test_numpy_fft(): + bad_results = check_dir(np.fft) + assert bad_results == {} + + +@pytest.mark.skipif(ctypes is None, + reason="ctypes not available in this python") +def test_NPY_NO_EXPORT(): + cdll = ctypes.CDLL(np._core._multiarray_tests.__file__) + # Make sure an arbitrary NPY_NO_EXPORT function is actually hidden + f = getattr(cdll, 'test_not_exported', None) + assert f is None, ("'test_not_exported' is mistakenly exported, " + "NPY_NO_EXPORT does not work") + + +# Historically NumPy has not used leading underscores for private submodules +# much. This has resulted in lots of things that look like public modules +# (i.e. things that can be imported as `import numpy.somesubmodule.somefile`), +# but were never intended to be public. The PUBLIC_MODULES list contains +# modules that are either public because they were meant to be, or because they +# contain public functions/objects that aren't present in any other namespace +# for whatever reason and therefore should be treated as public. +# +# The PRIVATE_BUT_PRESENT_MODULES list contains modules that look public (lack +# of underscores) but should not be used. For many of those modules the +# current status is fine. For others it may make sense to work on making them +# private, to clean up our public API and avoid confusion. +PUBLIC_MODULES = ['numpy.' + s for s in [ + "ctypeslib", + "dtypes", + "exceptions", + "f2py", + "fft", + "lib", + "lib.array_utils", + "lib.format", + "lib.introspect", + "lib.mixins", + "lib.npyio", + "lib.recfunctions", # note: still needs cleaning, was forgotten for 2.0 + "lib.scimath", + "lib.stride_tricks", + "linalg", + "ma", + "ma.extras", + "ma.mrecords", + "polynomial", + "polynomial.chebyshev", + "polynomial.hermite", + "polynomial.hermite_e", + "polynomial.laguerre", + "polynomial.legendre", + "polynomial.polynomial", + "random", + "strings", + "testing", + "testing.overrides", + "typing", + "typing.mypy_plugin", + "version", +]] +if sys.version_info < (3, 12): + PUBLIC_MODULES += [ + 'numpy.' + s for s in [ + "distutils", + "distutils.cpuinfo", + "distutils.exec_command", + "distutils.misc_util", + "distutils.log", + "distutils.system_info", + ] + ] + + +PUBLIC_ALIASED_MODULES = [ + "numpy.char", + "numpy.emath", + "numpy.rec", +] + + +PRIVATE_BUT_PRESENT_MODULES = ['numpy.' + s for s in [ + "conftest", + "core", + "core.multiarray", + "core.numeric", + "core.umath", + "core.arrayprint", + "core.defchararray", + "core.einsumfunc", + "core.fromnumeric", + "core.function_base", + "core.getlimits", + "core.numerictypes", + "core.overrides", + "core.records", + "core.shape_base", + "f2py.auxfuncs", + "f2py.capi_maps", + "f2py.cb_rules", + "f2py.cfuncs", + "f2py.common_rules", + "f2py.crackfortran", + "f2py.diagnose", + "f2py.f2py2e", + "f2py.f90mod_rules", + "f2py.func2subr", + "f2py.rules", + "f2py.symbolic", + "f2py.use_rules", + "fft.helper", + "lib.user_array", # note: not in np.lib, but probably should just be deleted + "linalg.lapack_lite", + "linalg.linalg", + "ma.core", + "ma.testutils", + "matlib", + "matrixlib", + "matrixlib.defmatrix", + "polynomial.polyutils", + "random.mtrand", + "random.bit_generator", + "testing.print_coercion_tables", +]] +if sys.version_info < (3, 12): + PRIVATE_BUT_PRESENT_MODULES += [ + 'numpy.' + s for s in [ + "distutils.armccompiler", + "distutils.fujitsuccompiler", + "distutils.ccompiler", + 'distutils.ccompiler_opt', + "distutils.command", + "distutils.command.autodist", + "distutils.command.bdist_rpm", + "distutils.command.build", + "distutils.command.build_clib", + "distutils.command.build_ext", + "distutils.command.build_py", + "distutils.command.build_scripts", + "distutils.command.build_src", + "distutils.command.config", + "distutils.command.config_compiler", + "distutils.command.develop", + "distutils.command.egg_info", + "distutils.command.install", + "distutils.command.install_clib", + "distutils.command.install_data", + "distutils.command.install_headers", + "distutils.command.sdist", + "distutils.conv_template", + "distutils.core", + "distutils.extension", + "distutils.fcompiler", + "distutils.fcompiler.absoft", + "distutils.fcompiler.arm", + "distutils.fcompiler.compaq", + "distutils.fcompiler.environment", + "distutils.fcompiler.g95", + "distutils.fcompiler.gnu", + "distutils.fcompiler.hpux", + "distutils.fcompiler.ibm", + "distutils.fcompiler.intel", + "distutils.fcompiler.lahey", + "distutils.fcompiler.mips", + "distutils.fcompiler.nag", + "distutils.fcompiler.none", + "distutils.fcompiler.pathf95", + "distutils.fcompiler.pg", + "distutils.fcompiler.nv", + "distutils.fcompiler.sun", + "distutils.fcompiler.vast", + "distutils.fcompiler.fujitsu", + "distutils.from_template", + "distutils.intelccompiler", + "distutils.lib2def", + "distutils.line_endings", + "distutils.mingw32ccompiler", + "distutils.msvccompiler", + "distutils.npy_pkg_config", + "distutils.numpy_distribution", + "distutils.pathccompiler", + "distutils.unixccompiler", + ] + ] + + +def is_unexpected(name): + """Check if this needs to be considered.""" + return ( + '._' not in name and '.tests' not in name and '.setup' not in name + and name not in PUBLIC_MODULES + and name not in PUBLIC_ALIASED_MODULES + and name not in PRIVATE_BUT_PRESENT_MODULES + ) + + +if sys.version_info >= (3, 12): + SKIP_LIST = [] +else: + SKIP_LIST = ["numpy.distutils.msvc9compiler"] + + +def test_all_modules_are_expected(): + """ + Test that we don't add anything that looks like a new public module by + accident. Check is based on filenames. + """ + + modnames = [] + for _, modname, ispkg in pkgutil.walk_packages(path=np.__path__, + prefix=np.__name__ + '.', + onerror=None): + if is_unexpected(modname) and modname not in SKIP_LIST: + # We have a name that is new. If that's on purpose, add it to + # PUBLIC_MODULES. We don't expect to have to add anything to + # PRIVATE_BUT_PRESENT_MODULES. Use an underscore in the name! + modnames.append(modname) + + if modnames: + raise AssertionError(f'Found unexpected modules: {modnames}') + + +# Stuff that clearly shouldn't be in the API and is detected by the next test +# below +SKIP_LIST_2 = [ + 'numpy.lib.math', + 'numpy.matlib.char', + 'numpy.matlib.rec', + 'numpy.matlib.emath', + 'numpy.matlib.exceptions', + 'numpy.matlib.math', + 'numpy.matlib.linalg', + 'numpy.matlib.fft', + 'numpy.matlib.random', + 'numpy.matlib.ctypeslib', + 'numpy.matlib.ma', +] +if sys.version_info < (3, 12): + SKIP_LIST_2 += [ + 'numpy.distutils.log.sys', + 'numpy.distutils.log.logging', + 'numpy.distutils.log.warnings', + ] + + +def test_all_modules_are_expected_2(): + """ + Method checking all objects. The pkgutil-based method in + `test_all_modules_are_expected` does not catch imports into a namespace, + only filenames. So this test is more thorough, and checks this like: + + import .lib.scimath as emath + + To check if something in a module is (effectively) public, one can check if + there's anything in that namespace that's a public function/object but is + not exposed in a higher-level namespace. For example for a `numpy.lib` + submodule:: + + mod = np.lib.mixins + for obj in mod.__all__: + if obj in np.__all__: + continue + elif obj in np.lib.__all__: + continue + + else: + print(obj) + + """ + + def find_unexpected_members(mod_name): + members = [] + module = importlib.import_module(mod_name) + if hasattr(module, '__all__'): + objnames = module.__all__ + else: + objnames = dir(module) + + for objname in objnames: + if not objname.startswith('_'): + fullobjname = mod_name + '.' + objname + if isinstance(getattr(module, objname), types.ModuleType): + if is_unexpected(fullobjname): + if fullobjname not in SKIP_LIST_2: + members.append(fullobjname) + + return members + + unexpected_members = find_unexpected_members("numpy") + for modname in PUBLIC_MODULES: + unexpected_members.extend(find_unexpected_members(modname)) + + if unexpected_members: + raise AssertionError("Found unexpected object(s) that look like " + f"modules: {unexpected_members}") + + +def test_api_importable(): + """ + Check that all submodules listed higher up in this file can be imported + + Note that if a PRIVATE_BUT_PRESENT_MODULES entry goes missing, it may + simply need to be removed from the list (deprecation may or may not be + needed - apply common sense). + """ + def check_importable(module_name): + try: + importlib.import_module(module_name) + except (ImportError, AttributeError): + return False + + return True + + module_names = [] + for module_name in PUBLIC_MODULES: + if not check_importable(module_name): + module_names.append(module_name) + + if module_names: + raise AssertionError("Modules in the public API that cannot be " + f"imported: {module_names}") + + for module_name in PUBLIC_ALIASED_MODULES: + try: + eval(module_name) + except AttributeError: + module_names.append(module_name) + + if module_names: + raise AssertionError("Modules in the public API that were not " + f"found: {module_names}") + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', category=DeprecationWarning) + warnings.filterwarnings('always', category=ImportWarning) + for module_name in PRIVATE_BUT_PRESENT_MODULES: + if not check_importable(module_name): + module_names.append(module_name) + + if module_names: + raise AssertionError("Modules that are not really public but looked " + "public and can not be imported: " + f"{module_names}") + + +@pytest.mark.xfail( + sysconfig.get_config_var("Py_DEBUG") not in (None, 0, "0"), + reason=( + "NumPy possibly built with `USE_DEBUG=True ./tools/travis-test.sh`, " + "which does not expose the `array_api` entry point. " + "See https://github.com/numpy/numpy/pull/19800" + ), +) +def test_array_api_entry_point(): + """ + Entry point for Array API implementation can be found with importlib and + returns the main numpy namespace. + """ + # For a development install that did not go through meson-python, + # the entrypoint will not have been installed. So ensure this test fails + # only if numpy is inside site-packages. + numpy_in_sitepackages = sysconfig.get_path('platlib') in np.__file__ + + eps = importlib.metadata.entry_points() + xp_eps = eps.select(group="array_api") + if len(xp_eps) == 0: + if numpy_in_sitepackages: + msg = "No entry points for 'array_api' found" + raise AssertionError(msg) from None + return + + try: + ep = next(ep for ep in xp_eps if ep.name == "numpy") + except StopIteration: + if numpy_in_sitepackages: + msg = "'numpy' not in array_api entry points" + raise AssertionError(msg) from None + return + + if ep.value == 'numpy.array_api': + # Looks like the entrypoint for the current numpy build isn't + # installed, but an older numpy is also installed and hence the + # entrypoint is pointing to the old (no longer existing) location. + # This isn't a problem except for when running tests with `spin` or an + # in-place build. + return + + xp = ep.load() + msg = ( + f"numpy entry point value '{ep.value}' " + "does not point to our Array API implementation" + ) + assert xp is numpy, msg + + +def test_main_namespace_all_dir_coherence(): + """ + Checks if `dir(np)` and `np.__all__` are consistent and return + the same content, excluding exceptions and private members. + """ + def _remove_private_members(member_set): + return {m for m in member_set if not m.startswith('_')} + + def _remove_exceptions(member_set): + return member_set.difference({ + "bool" # included only in __dir__ + }) + + all_members = _remove_private_members(np.__all__) + all_members = _remove_exceptions(all_members) + + dir_members = _remove_private_members(np.__dir__()) + dir_members = _remove_exceptions(dir_members) + + assert all_members == dir_members, ( + "Members that break symmetry: " + f"{all_members.symmetric_difference(dir_members)}" + ) + + +@pytest.mark.filterwarnings( + r"ignore:numpy.core(\.\w+)? is deprecated:DeprecationWarning" +) +def test_core_shims_coherence(): + """ + Check that all "semi-public" members of `numpy._core` are also accessible + from `numpy.core` shims. + """ + import numpy.core as core + + for member_name in dir(np._core): + # Skip private and test members. Also if a module is aliased, + # no need to add it to np.core + if ( + member_name.startswith("_") + or member_name in ["tests", "strings"] + or f"numpy.{member_name}" in PUBLIC_ALIASED_MODULES + ): + continue + + member = getattr(np._core, member_name) + + # np.core is a shim and all submodules of np.core are shims + # but we should be able to import everything in those shims + # that are available in the "real" modules in np._core, with + # the exception of the namespace packages (__spec__.origin is None), + # like numpy._core.include, or numpy._core.lib.pkgconfig. + if ( + inspect.ismodule(member) + and member.__spec__ and member.__spec__.origin is not None + ): + submodule = member + submodule_name = member_name + for submodule_member_name in dir(submodule): + # ignore dunder names + if submodule_member_name.startswith("__"): + continue + submodule_member = getattr(submodule, submodule_member_name) + + core_submodule = __import__( + f"numpy.core.{submodule_name}", + fromlist=[submodule_member_name] + ) + + assert submodule_member is getattr( + core_submodule, submodule_member_name + ) + + else: + assert member is getattr(core, member_name) + + +def test_functions_single_location(): + """ + Check that each public function is available from one location only. + + Test performs BFS search traversing NumPy's public API. It flags + any function-like object that is accessible from more that one place. + """ + from collections.abc import Callable + from typing import Any + + from numpy._core._multiarray_umath import ( + _ArrayFunctionDispatcher as dispatched_function, + ) + + visited_modules: set[types.ModuleType] = {np} + visited_functions: set[Callable[..., Any]] = set() + # Functions often have `__name__` overridden, therefore we need + # to keep track of locations where functions have been found. + functions_original_paths: dict[Callable[..., Any], str] = {} + + # Here we aggregate functions with more than one location. + # It must be empty for the test to pass. + duplicated_functions: list[tuple] = [] + + modules_queue = [np] + + while len(modules_queue) > 0: + + module = modules_queue.pop() + + for member_name in dir(module): + member = getattr(module, member_name) + + # first check if we got a module + if ( + inspect.ismodule(member) and # it's a module + "numpy" in member.__name__ and # inside NumPy + not member_name.startswith("_") and # not private + "numpy._core" not in member.__name__ and # outside _core + # not a legacy or testing module + member_name not in ["f2py", "ma", "testing", "tests"] and + member not in visited_modules # not visited yet + ): + modules_queue.append(member) + visited_modules.add(member) + + # else check if we got a function-like object + elif ( + inspect.isfunction(member) or + isinstance(member, (dispatched_function, np.ufunc)) + ): + if member in visited_functions: + + # skip main namespace functions with aliases + if ( + member.__name__ in [ + "absolute", # np.abs + "arccos", # np.acos + "arccosh", # np.acosh + "arcsin", # np.asin + "arcsinh", # np.asinh + "arctan", # np.atan + "arctan2", # np.atan2 + "arctanh", # np.atanh + "left_shift", # np.bitwise_left_shift + "right_shift", # np.bitwise_right_shift + "conjugate", # np.conj + "invert", # np.bitwise_not & np.bitwise_invert + "remainder", # np.mod + "divide", # np.true_divide + "concatenate", # np.concat + "power", # np.pow + "transpose", # np.permute_dims + ] and + module.__name__ == "numpy" + ): + continue + # skip trimcoef from numpy.polynomial as it is + # duplicated by design. + if ( + member.__name__ == "trimcoef" and + module.__name__.startswith("numpy.polynomial") + ): + continue + + # skip ufuncs that are exported in np.strings as well + if member.__name__ in ( + "add", + "equal", + "not_equal", + "greater", + "greater_equal", + "less", + "less_equal", + ) and module.__name__ == "numpy.strings": + continue + + # numpy.char reexports all numpy.strings functions for + # backwards-compatibility + if module.__name__ == "numpy.char": + continue + + # function is present in more than one location! + duplicated_functions.append( + (member.__name__, + module.__name__, + functions_original_paths[member]) + ) + else: + visited_functions.add(member) + functions_original_paths[member] = module.__name__ + + del visited_functions, visited_modules, functions_original_paths + + assert len(duplicated_functions) == 0, duplicated_functions + + +def test___module___attribute(): + modules_queue = [np] + visited_modules = {np} + visited_functions = set() + incorrect_entries = [] + + while len(modules_queue) > 0: + module = modules_queue.pop() + for member_name in dir(module): + member = getattr(module, member_name) + # first check if we got a module + if ( + inspect.ismodule(member) and # it's a module + "numpy" in member.__name__ and # inside NumPy + not member_name.startswith("_") and # not private + "numpy._core" not in member.__name__ and # outside _core + # not in a skip module list + member_name not in [ + "char", "core", "f2py", "ma", "lapack_lite", "mrecords", + "testing", "tests", "polynomial", "typing", "mtrand", + "bit_generator", + ] and + member not in visited_modules # not visited yet + ): + modules_queue.append(member) + visited_modules.add(member) + elif ( + not inspect.ismodule(member) and + hasattr(member, "__name__") and + not member.__name__.startswith("_") and + member.__module__ != module.__name__ and + member not in visited_functions + ): + # skip ufuncs that are exported in np.strings as well + if member.__name__ in ( + "add", "equal", "not_equal", "greater", "greater_equal", + "less", "less_equal", + ) and module.__name__ == "numpy.strings": + continue + + # recarray and record are exported in np and np.rec + if ( + (member.__name__ == "recarray" and module.__name__ == "numpy") or + (member.__name__ == "record" and module.__name__ == "numpy.rec") + ): + continue + + # ctypeslib exports ctypes c_long/c_longlong + if ( + member.__name__ in ("c_long", "c_longlong") and + module.__name__ == "numpy.ctypeslib" + ): + continue + + # skip cdef classes + if member.__name__ in ( + "BitGenerator", "Generator", "MT19937", "PCG64", "PCG64DXSM", + "Philox", "RandomState", "SFC64", "SeedSequence", + ): + continue + + incorrect_entries.append( + { + "Func": member.__name__, + "actual": member.__module__, + "expected": module.__name__, + } + ) + visited_functions.add(member) + + if incorrect_entries: + assert len(incorrect_entries) == 0, incorrect_entries + + +def _check_correct_qualname_and_module(obj) -> bool: + qualname = obj.__qualname__ + name = obj.__name__ + module_name = obj.__module__ + assert name == qualname.split(".")[-1] + + module = sys.modules[module_name] + actual_obj = functools.reduce(getattr, qualname.split("."), module) + return ( + actual_obj is obj or + # `obj` may be a bound method/property of `actual_obj`: + ( + hasattr(actual_obj, "__get__") and hasattr(obj, "__self__") and + actual_obj.__module__ == obj.__module__ and + actual_obj.__qualname__ == qualname + ) + ) + + +def test___qualname___and___module___attribute(): + # NumPy messes with module and name/qualname attributes, but any object + # should be discoverable based on its module and qualname, so test that. + # We do this for anything with a name (ensuring qualname is also set). + modules_queue = [np] + visited_modules = {np} + visited_functions = set() + incorrect_entries = [] + + while len(modules_queue) > 0: + module = modules_queue.pop() + for member_name in dir(module): + member = getattr(module, member_name) + # first check if we got a module + if ( + inspect.ismodule(member) and # it's a module + "numpy" in member.__name__ and # inside NumPy + not member_name.startswith("_") and # not private + member_name not in {"tests", "typing"} and # 2024-12: type names don't match + "numpy._core" not in member.__name__ and # outside _core + member not in visited_modules # not visited yet + ): + modules_queue.append(member) + visited_modules.add(member) + elif ( + not inspect.ismodule(member) and + hasattr(member, "__name__") and + not member.__name__.startswith("_") and + not member_name.startswith("_") and + not _check_correct_qualname_and_module(member) and + member not in visited_functions + ): + incorrect_entries.append( + { + "found_at": f"{module.__name__}:{member_name}", + "advertises": f"{member.__module__}:{member.__qualname__}", + } + ) + visited_functions.add(member) + + if incorrect_entries: + assert len(incorrect_entries) == 0, incorrect_entries diff --git a/python/numpy/tests/test_reloading.py b/python/numpy/tests/test_reloading.py new file mode 100644 index 000000000..c21dc007b --- /dev/null +++ b/python/numpy/tests/test_reloading.py @@ -0,0 +1,74 @@ +import pickle +import subprocess +import sys +import textwrap +from importlib import reload + +import pytest + +import numpy.exceptions as ex +from numpy.testing import ( + IS_WASM, + assert_, + assert_equal, + assert_raises, + assert_warns, +) + + +def test_numpy_reloading(): + # gh-7844. Also check that relevant globals retain their identity. + import numpy as np + import numpy._globals + + _NoValue = np._NoValue + VisibleDeprecationWarning = ex.VisibleDeprecationWarning + ModuleDeprecationWarning = ex.ModuleDeprecationWarning + + with assert_warns(UserWarning): + reload(np) + assert_(_NoValue is np._NoValue) + assert_(ModuleDeprecationWarning is ex.ModuleDeprecationWarning) + assert_(VisibleDeprecationWarning is ex.VisibleDeprecationWarning) + + assert_raises(RuntimeError, reload, numpy._globals) + with assert_warns(UserWarning): + reload(np) + assert_(_NoValue is np._NoValue) + assert_(ModuleDeprecationWarning is ex.ModuleDeprecationWarning) + assert_(VisibleDeprecationWarning is ex.VisibleDeprecationWarning) + +def test_novalue(): + import numpy as np + for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): + assert_equal(repr(np._NoValue), '') + assert_(pickle.loads(pickle.dumps(np._NoValue, + protocol=proto)) is np._NoValue) + + +@pytest.mark.skipif(IS_WASM, reason="can't start subprocess") +def test_full_reimport(): + """At the time of writing this, it is *not* truly supported, but + apparently enough users rely on it, for it to be an annoying change + when it started failing previously. + """ + # Test within a new process, to ensure that we do not mess with the + # global state during the test run (could lead to cryptic test failures). + # This is generally unsafe, especially, since we also reload the C-modules. + code = textwrap.dedent(r""" + import sys + from pytest import warns + import numpy as np + + for k in list(sys.modules.keys()): + if "numpy" in k: + del sys.modules[k] + + with warns(UserWarning): + import numpy as np + """) + p = subprocess.run([sys.executable, '-c', code], capture_output=True) + if p.returncode: + raise AssertionError( + f"Non-zero return code: {p.returncode!r}\n\n{p.stderr.decode()}" + ) diff --git a/python/numpy/tests/test_scripts.py b/python/numpy/tests/test_scripts.py new file mode 100644 index 000000000..d8ce95887 --- /dev/null +++ b/python/numpy/tests/test_scripts.py @@ -0,0 +1,49 @@ +""" Test scripts + +Test that we can run executable scripts that have been installed with numpy. +""" +import os +import subprocess +import sys +from os.path import dirname, isfile +from os.path import join as pathjoin + +import pytest + +import numpy as np +from numpy.testing import IS_WASM, assert_equal + +is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py')) + + +def find_f2py_commands(): + if sys.platform == 'win32': + exe_dir = dirname(sys.executable) + if exe_dir.endswith('Scripts'): # virtualenv + return [os.path.join(exe_dir, 'f2py')] + else: + return [os.path.join(exe_dir, "Scripts", 'f2py')] + else: + # Three scripts are installed in Unix-like systems: + # 'f2py', 'f2py{major}', and 'f2py{major.minor}'. For example, + # if installed with python3.9 the scripts would be named + # 'f2py', 'f2py3', and 'f2py3.9'. + version = sys.version_info + major = str(version.major) + minor = str(version.minor) + return ['f2py', 'f2py' + major, 'f2py' + major + '.' + minor] + + +@pytest.mark.skipif(is_inplace, reason="Cannot test f2py command inplace") +@pytest.mark.xfail(reason="Test is unreliable") +@pytest.mark.parametrize('f2py_cmd', find_f2py_commands()) +def test_f2py(f2py_cmd): + # test that we can run f2py script + stdout = subprocess.check_output([f2py_cmd, '-v']) + assert_equal(stdout.strip(), np.__version__.encode('ascii')) + + +@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess") +def test_pep338(): + stdout = subprocess.check_output([sys.executable, '-mnumpy.f2py', '-v']) + assert_equal(stdout.strip(), np.__version__.encode('ascii')) diff --git a/python/numpy/tests/test_warnings.py b/python/numpy/tests/test_warnings.py new file mode 100644 index 000000000..560ee6143 --- /dev/null +++ b/python/numpy/tests/test_warnings.py @@ -0,0 +1,78 @@ +""" +Tests which scan for certain occurrences in the code, they may not find +all of these occurrences but should catch almost all. +""" +import ast +import tokenize +from pathlib import Path + +import pytest + +import numpy + + +class ParseCall(ast.NodeVisitor): + def __init__(self): + self.ls = [] + + def visit_Attribute(self, node): + ast.NodeVisitor.generic_visit(self, node) + self.ls.append(node.attr) + + def visit_Name(self, node): + self.ls.append(node.id) + + +class FindFuncs(ast.NodeVisitor): + def __init__(self, filename): + super().__init__() + self.__filename = filename + + def visit_Call(self, node): + p = ParseCall() + p.visit(node.func) + ast.NodeVisitor.generic_visit(self, node) + + if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings': + if node.args[0].value == "ignore": + raise AssertionError( + "warnings should have an appropriate stacklevel; " + f"found in {self.__filename} on line {node.lineno}") + + if p.ls[-1] == 'warn' and ( + len(p.ls) == 1 or p.ls[-2] == 'warnings'): + + if "testing/tests/test_warnings.py" == self.__filename: + # This file + return + + # See if stacklevel exists: + if len(node.args) == 3: + return + args = {kw.arg for kw in node.keywords} + if "stacklevel" in args: + return + raise AssertionError( + "warnings should have an appropriate stacklevel; " + f"found in {self.__filename} on line {node.lineno}") + + +@pytest.mark.slow +def test_warning_calls(): + # combined "ignore" and stacklevel error + base = Path(numpy.__file__).parent + + for path in base.rglob("*.py"): + if base / "testing" in path.parents: + continue + if path == base / "__init__.py": + continue + if path == base / "random" / "__init__.py": + continue + if path == base / "conftest.py": + continue + # use tokenize to auto-detect encoding on systems where no + # default encoding is defined (e.g. LANG='C') + with tokenize.open(str(path)) as file: + tree = ast.parse(file.read()) + FindFuncs(path).visit(tree) diff --git a/python/numpy/typing/__init__.py b/python/numpy/typing/__init__.py new file mode 100644 index 000000000..173c094b4 --- /dev/null +++ b/python/numpy/typing/__init__.py @@ -0,0 +1,201 @@ +""" +============================ +Typing (:mod:`numpy.typing`) +============================ + +.. versionadded:: 1.20 + +Large parts of the NumPy API have :pep:`484`-style type annotations. In +addition a number of type aliases are available to users, most prominently +the two below: + +- `ArrayLike`: objects that can be converted to arrays +- `DTypeLike`: objects that can be converted to dtypes + +.. _typing-extensions: https://pypi.org/project/typing-extensions/ + +Mypy plugin +----------- + +.. versionadded:: 1.21 + +.. automodule:: numpy.typing.mypy_plugin + +.. currentmodule:: numpy.typing + +Differences from the runtime NumPy API +-------------------------------------- + +NumPy is very flexible. Trying to describe the full range of +possibilities statically would result in types that are not very +helpful. For that reason, the typed NumPy API is often stricter than +the runtime NumPy API. This section describes some notable +differences. + +ArrayLike +~~~~~~~~~ + +The `ArrayLike` type tries to avoid creating object arrays. For +example, + +.. code-block:: python + + >>> np.array(x**2 for x in range(10)) + array( at ...>, dtype=object) + +is valid NumPy code which will create a 0-dimensional object +array. Type checkers will complain about the above example when using +the NumPy types however. If you really intended to do the above, then +you can either use a ``# type: ignore`` comment: + +.. code-block:: python + + >>> np.array(x**2 for x in range(10)) # type: ignore + +or explicitly type the array like object as `~typing.Any`: + +.. code-block:: python + + >>> from typing import Any + >>> array_like: Any = (x**2 for x in range(10)) + >>> np.array(array_like) + array( at ...>, dtype=object) + +ndarray +~~~~~~~ + +It's possible to mutate the dtype of an array at runtime. For example, +the following code is valid: + +.. code-block:: python + + >>> x = np.array([1, 2]) + >>> x.dtype = np.bool + +This sort of mutation is not allowed by the types. Users who want to +write statically typed code should instead use the `numpy.ndarray.view` +method to create a view of the array with a different dtype. + +DTypeLike +~~~~~~~~~ + +The `DTypeLike` type tries to avoid creation of dtype objects using +dictionary of fields like below: + +.. code-block:: python + + >>> x = np.dtype({"field1": (float, 1), "field2": (int, 3)}) + +Although this is valid NumPy code, the type checker will complain about it, +since its usage is discouraged. +Please see : :ref:`Data type objects ` + +Number precision +~~~~~~~~~~~~~~~~ + +The precision of `numpy.number` subclasses is treated as a invariant generic +parameter (see :class:`~NBitBase`), simplifying the annotating of processes +involving precision-based casting. + +.. code-block:: python + + >>> from typing import TypeVar + >>> import numpy as np + >>> import numpy.typing as npt + + >>> T = TypeVar("T", bound=npt.NBitBase) + >>> def func(a: "np.floating[T]", b: "np.floating[T]") -> "np.floating[T]": + ... ... + +Consequently, the likes of `~numpy.float16`, `~numpy.float32` and +`~numpy.float64` are still sub-types of `~numpy.floating`, but, contrary to +runtime, they're not necessarily considered as sub-classes. + +Timedelta64 +~~~~~~~~~~~ + +The `~numpy.timedelta64` class is not considered a subclass of +`~numpy.signedinteger`, the former only inheriting from `~numpy.generic` +while static type checking. + +0D arrays +~~~~~~~~~ + +During runtime numpy aggressively casts any passed 0D arrays into their +corresponding `~numpy.generic` instance. Until the introduction of shape +typing (see :pep:`646`) it is unfortunately not possible to make the +necessary distinction between 0D and >0D arrays. While thus not strictly +correct, all operations that can potentially perform a 0D-array -> scalar +cast are currently annotated as exclusively returning an `~numpy.ndarray`. + +If it is known in advance that an operation *will* perform a +0D-array -> scalar cast, then one can consider manually remedying the +situation with either `typing.cast` or a ``# type: ignore`` comment. + +Record array dtypes +~~~~~~~~~~~~~~~~~~~ + +The dtype of `numpy.recarray`, and the :ref:`routines.array-creation.rec` +functions in general, can be specified in one of two ways: + +* Directly via the ``dtype`` argument. +* With up to five helper arguments that operate via `numpy.rec.format_parser`: + ``formats``, ``names``, ``titles``, ``aligned`` and ``byteorder``. + +These two approaches are currently typed as being mutually exclusive, +*i.e.* if ``dtype`` is specified than one may not specify ``formats``. +While this mutual exclusivity is not (strictly) enforced during runtime, +combining both dtype specifiers can lead to unexpected or even downright +buggy behavior. + +API +--- + +""" +# NOTE: The API section will be appended with additional entries +# further down in this file + +# pyright: reportDeprecated=false + +from numpy._typing import ArrayLike, DTypeLike, NBitBase, NDArray + +__all__ = ["ArrayLike", "DTypeLike", "NBitBase", "NDArray"] + + +__DIR = __all__ + [k for k in globals() if k.startswith("__") and k.endswith("__")] +__DIR_SET = frozenset(__DIR) + + +def __dir__() -> list[str]: + return __DIR + +def __getattr__(name: str): + if name == "NBitBase": + import warnings + + # Deprecated in NumPy 2.3, 2025-05-01 + warnings.warn( + "`NBitBase` is deprecated and will be removed from numpy.typing in the " + "future. Use `@typing.overload` or a `TypeVar` with a scalar-type as upper " + "bound, instead. (deprecated in NumPy 2.3)", + DeprecationWarning, + stacklevel=2, + ) + return NBitBase + + if name in __DIR_SET: + return globals()[name] + + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") + + +if __doc__ is not None: + from numpy._typing._add_docstring import _docstrings + __doc__ += _docstrings + __doc__ += '\n.. autoclass:: numpy.typing.NBitBase\n' + del _docstrings + +from numpy._pytesttester import PytestTester + +test = PytestTester(__name__) +del PytestTester diff --git a/python/numpy/typing/__pycache__/__init__.cpython-312.pyc b/python/numpy/typing/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..13fae3b58 Binary files /dev/null and b/python/numpy/typing/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/typing/__pycache__/mypy_plugin.cpython-312.pyc b/python/numpy/typing/__pycache__/mypy_plugin.cpython-312.pyc new file mode 100644 index 000000000..dcba9bdd1 Binary files /dev/null and b/python/numpy/typing/__pycache__/mypy_plugin.cpython-312.pyc differ diff --git a/python/numpy/typing/mypy_plugin.py b/python/numpy/typing/mypy_plugin.py new file mode 100644 index 000000000..dc1e2564f --- /dev/null +++ b/python/numpy/typing/mypy_plugin.py @@ -0,0 +1,195 @@ +"""A mypy_ plugin for managing a number of platform-specific annotations. +Its functionality can be split into three distinct parts: + +* Assigning the (platform-dependent) precisions of certain `~numpy.number` + subclasses, including the likes of `~numpy.int_`, `~numpy.intp` and + `~numpy.longlong`. See the documentation on + :ref:`scalar types ` for a comprehensive overview + of the affected classes. Without the plugin the precision of all relevant + classes will be inferred as `~typing.Any`. +* Removing all extended-precision `~numpy.number` subclasses that are + unavailable for the platform in question. Most notably this includes the + likes of `~numpy.float128` and `~numpy.complex256`. Without the plugin *all* + extended-precision types will, as far as mypy is concerned, be available + to all platforms. +* Assigning the (platform-dependent) precision of `~numpy.ctypeslib.c_intp`. + Without the plugin the type will default to `ctypes.c_int64`. + + .. versionadded:: 1.22 + +.. deprecated:: 2.3 + +Examples +-------- +To enable the plugin, one must add it to their mypy `configuration file`_: + +.. code-block:: ini + + [mypy] + plugins = numpy.typing.mypy_plugin + +.. _mypy: https://mypy-lang.org/ +.. _configuration file: https://mypy.readthedocs.io/en/stable/config_file.html + +""" + +from collections.abc import Callable, Iterable +from typing import TYPE_CHECKING, Final, TypeAlias, cast + +import numpy as np + +__all__: list[str] = [] + + +def _get_precision_dict() -> dict[str, str]: + names = [ + ("_NBitByte", np.byte), + ("_NBitShort", np.short), + ("_NBitIntC", np.intc), + ("_NBitIntP", np.intp), + ("_NBitInt", np.int_), + ("_NBitLong", np.long), + ("_NBitLongLong", np.longlong), + + ("_NBitHalf", np.half), + ("_NBitSingle", np.single), + ("_NBitDouble", np.double), + ("_NBitLongDouble", np.longdouble), + ] + ret: dict[str, str] = {} + for name, typ in names: + n = 8 * np.dtype(typ).itemsize + ret[f"{_MODULE}._nbit.{name}"] = f"{_MODULE}._nbit_base._{n}Bit" + return ret + + +def _get_extended_precision_list() -> list[str]: + extended_names = [ + "float96", + "float128", + "complex192", + "complex256", + ] + return [i for i in extended_names if hasattr(np, i)] + +def _get_c_intp_name() -> str: + # Adapted from `np.core._internal._getintp_ctype` + return { + "i": "c_int", + "l": "c_long", + "q": "c_longlong", + }.get(np.dtype("n").char, "c_long") + + +_MODULE: Final = "numpy._typing" + +#: A dictionary mapping type-aliases in `numpy._typing._nbit` to +#: concrete `numpy.typing.NBitBase` subclasses. +_PRECISION_DICT: Final = _get_precision_dict() + +#: A list with the names of all extended precision `np.number` subclasses. +_EXTENDED_PRECISION_LIST: Final = _get_extended_precision_list() + +#: The name of the ctypes equivalent of `np.intp` +_C_INTP: Final = _get_c_intp_name() + + +try: + if TYPE_CHECKING: + from mypy.typeanal import TypeAnalyser + + import mypy.types + from mypy.build import PRI_MED + from mypy.nodes import ImportFrom, MypyFile, Statement + from mypy.plugin import AnalyzeTypeContext, Plugin + +except ModuleNotFoundError as e: + + def plugin(version: str) -> type: + raise e + +else: + + _HookFunc: TypeAlias = Callable[[AnalyzeTypeContext], mypy.types.Type] + + def _hook(ctx: AnalyzeTypeContext) -> mypy.types.Type: + """Replace a type-alias with a concrete ``NBitBase`` subclass.""" + typ, _, api = ctx + name = typ.name.split(".")[-1] + name_new = _PRECISION_DICT[f"{_MODULE}._nbit.{name}"] + return cast("TypeAnalyser", api).named_type(name_new) + + def _index(iterable: Iterable[Statement], id: str) -> int: + """Identify the first ``ImportFrom`` instance the specified `id`.""" + for i, value in enumerate(iterable): + if getattr(value, "id", None) == id: + return i + raise ValueError("Failed to identify a `ImportFrom` instance " + f"with the following id: {id!r}") + + def _override_imports( + file: MypyFile, + module: str, + imports: list[tuple[str, str | None]], + ) -> None: + """Override the first `module`-based import with new `imports`.""" + # Construct a new `from module import y` statement + import_obj = ImportFrom(module, 0, names=imports) + import_obj.is_top_level = True + + # Replace the first `module`-based import statement with `import_obj` + for lst in [file.defs, cast("list[Statement]", file.imports)]: + i = _index(lst, module) + lst[i] = import_obj + + class _NumpyPlugin(Plugin): + """A mypy plugin for handling versus numpy-specific typing tasks.""" + + def get_type_analyze_hook(self, fullname: str) -> _HookFunc | None: + """Set the precision of platform-specific `numpy.number` + subclasses. + + For example: `numpy.int_`, `numpy.longlong` and `numpy.longdouble`. + """ + if fullname in _PRECISION_DICT: + return _hook + return None + + def get_additional_deps( + self, file: MypyFile + ) -> list[tuple[int, str, int]]: + """Handle all import-based overrides. + + * Import platform-specific extended-precision `numpy.number` + subclasses (*e.g.* `numpy.float96` and `numpy.float128`). + * Import the appropriate `ctypes` equivalent to `numpy.intp`. + + """ + fullname = file.fullname + if fullname == "numpy": + _override_imports( + file, + f"{_MODULE}._extended_precision", + imports=[(v, v) for v in _EXTENDED_PRECISION_LIST], + ) + elif fullname == "numpy.ctypeslib": + _override_imports( + file, + "ctypes", + imports=[(_C_INTP, "_c_intp")], + ) + return [(PRI_MED, fullname, -1)] + + def plugin(version: str) -> type: + import warnings + + plugin = "numpy.typing.mypy_plugin" + # Deprecated 2025-01-10, NumPy 2.3 + warn_msg = ( + f"`{plugin}` is deprecated, and will be removed in a future " + f"release. Please remove `plugins = {plugin}` in your mypy config." + f"(deprecated in NumPy 2.3)" + ) + warnings.warn(warn_msg, DeprecationWarning, stacklevel=3) + + return _NumpyPlugin diff --git a/python/numpy/typing/tests/__init__.py b/python/numpy/typing/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/python/numpy/typing/tests/__pycache__/__init__.cpython-312.pyc b/python/numpy/typing/tests/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..c8ae95e1a Binary files /dev/null and b/python/numpy/typing/tests/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/__pycache__/test_isfile.cpython-312.pyc b/python/numpy/typing/tests/__pycache__/test_isfile.cpython-312.pyc new file mode 100644 index 000000000..cae72109a Binary files /dev/null and b/python/numpy/typing/tests/__pycache__/test_isfile.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/__pycache__/test_runtime.cpython-312.pyc b/python/numpy/typing/tests/__pycache__/test_runtime.cpython-312.pyc new file mode 100644 index 000000000..e72e3edcd Binary files /dev/null and b/python/numpy/typing/tests/__pycache__/test_runtime.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/__pycache__/test_typing.cpython-312.pyc b/python/numpy/typing/tests/__pycache__/test_typing.cpython-312.pyc new file mode 100644 index 000000000..a1d008238 Binary files /dev/null and b/python/numpy/typing/tests/__pycache__/test_typing.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/fail/arithmetic.pyi b/python/numpy/typing/tests/data/fail/arithmetic.pyi new file mode 100644 index 000000000..968d95cca --- /dev/null +++ b/python/numpy/typing/tests/data/fail/arithmetic.pyi @@ -0,0 +1,126 @@ +from typing import Any + +import numpy as np +import numpy.typing as npt + +b_ = np.bool() +dt = np.datetime64(0, "D") +td = np.timedelta64(0, "D") + +AR_b: npt.NDArray[np.bool] +AR_u: npt.NDArray[np.uint32] +AR_i: npt.NDArray[np.int64] +AR_f: npt.NDArray[np.longdouble] +AR_c: npt.NDArray[np.complex128] +AR_m: npt.NDArray[np.timedelta64] +AR_M: npt.NDArray[np.datetime64] + +ANY: Any + +AR_LIKE_b: list[bool] +AR_LIKE_u: list[np.uint32] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_c: list[complex] +AR_LIKE_m: list[np.timedelta64] +AR_LIKE_M: list[np.datetime64] + +# Array subtraction + +# NOTE: mypys `NoReturn` errors are, unfortunately, not that great +_1 = AR_b - AR_LIKE_b # type: ignore[var-annotated] +_2 = AR_LIKE_b - AR_b # type: ignore[var-annotated] +AR_i - bytes() # type: ignore[operator] + +AR_f - AR_LIKE_m # type: ignore[operator] +AR_f - AR_LIKE_M # type: ignore[operator] +AR_c - AR_LIKE_m # type: ignore[operator] +AR_c - AR_LIKE_M # type: ignore[operator] + +AR_m - AR_LIKE_f # type: ignore[operator] +AR_M - AR_LIKE_f # type: ignore[operator] +AR_m - AR_LIKE_c # type: ignore[operator] +AR_M - AR_LIKE_c # type: ignore[operator] + +AR_m - AR_LIKE_M # type: ignore[operator] +AR_LIKE_m - AR_M # type: ignore[operator] + +# array floor division + +AR_M // AR_LIKE_b # type: ignore[operator] +AR_M // AR_LIKE_u # type: ignore[operator] +AR_M // AR_LIKE_i # type: ignore[operator] +AR_M // AR_LIKE_f # type: ignore[operator] +AR_M // AR_LIKE_c # type: ignore[operator] +AR_M // AR_LIKE_m # type: ignore[operator] +AR_M // AR_LIKE_M # type: ignore[operator] + +AR_b // AR_LIKE_M # type: ignore[operator] +AR_u // AR_LIKE_M # type: ignore[operator] +AR_i // AR_LIKE_M # type: ignore[operator] +AR_f // AR_LIKE_M # type: ignore[operator] +AR_c // AR_LIKE_M # type: ignore[operator] +AR_m // AR_LIKE_M # type: ignore[operator] +AR_M // AR_LIKE_M # type: ignore[operator] + +_3 = AR_m // AR_LIKE_b # type: ignore[var-annotated] +AR_m // AR_LIKE_c # type: ignore[operator] + +AR_b // AR_LIKE_m # type: ignore[operator] +AR_u // AR_LIKE_m # type: ignore[operator] +AR_i // AR_LIKE_m # type: ignore[operator] +AR_f // AR_LIKE_m # type: ignore[operator] +AR_c // AR_LIKE_m # type: ignore[operator] + +# regression tests for https://github.com/numpy/numpy/issues/28957 +AR_c // 2 # type: ignore[operator] +AR_c // AR_i # type: ignore[operator] +AR_c // AR_c # type: ignore[operator] + +# Array multiplication + +AR_b *= AR_LIKE_u # type: ignore[arg-type] +AR_b *= AR_LIKE_i # type: ignore[arg-type] +AR_b *= AR_LIKE_f # type: ignore[arg-type] +AR_b *= AR_LIKE_c # type: ignore[arg-type] +AR_b *= AR_LIKE_m # type: ignore[arg-type] + +AR_u *= AR_LIKE_f # type: ignore[arg-type] +AR_u *= AR_LIKE_c # type: ignore[arg-type] +AR_u *= AR_LIKE_m # type: ignore[arg-type] + +AR_i *= AR_LIKE_f # type: ignore[arg-type] +AR_i *= AR_LIKE_c # type: ignore[arg-type] +AR_i *= AR_LIKE_m # type: ignore[arg-type] + +AR_f *= AR_LIKE_c # type: ignore[arg-type] +AR_f *= AR_LIKE_m # type: ignore[arg-type] + +# Array power + +AR_b **= AR_LIKE_b # type: ignore[misc] +AR_b **= AR_LIKE_u # type: ignore[misc] +AR_b **= AR_LIKE_i # type: ignore[misc] +AR_b **= AR_LIKE_f # type: ignore[misc] +AR_b **= AR_LIKE_c # type: ignore[misc] + +AR_u **= AR_LIKE_f # type: ignore[arg-type] +AR_u **= AR_LIKE_c # type: ignore[arg-type] + +AR_i **= AR_LIKE_f # type: ignore[arg-type] +AR_i **= AR_LIKE_c # type: ignore[arg-type] + +AR_f **= AR_LIKE_c # type: ignore[arg-type] + +# Scalars + +b_ - b_ # type: ignore[operator] + +dt + dt # type: ignore[operator] +td - dt # type: ignore[operator] +td % 1 # type: ignore[operator] +td / dt # type: ignore[operator] +td % dt # type: ignore[operator] + +-b_ # type: ignore[operator] ++b_ # type: ignore[operator] diff --git a/python/numpy/typing/tests/data/fail/array_constructors.pyi b/python/numpy/typing/tests/data/fail/array_constructors.pyi new file mode 100644 index 000000000..cadc2ae59 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/array_constructors.pyi @@ -0,0 +1,34 @@ +import numpy as np +import numpy.typing as npt + +a: npt.NDArray[np.float64] +generator = (i for i in range(10)) + +np.require(a, requirements=1) # type: ignore[call-overload] +np.require(a, requirements="TEST") # type: ignore[arg-type] + +np.zeros("test") # type: ignore[arg-type] +np.zeros() # type: ignore[call-overload] + +np.ones("test") # type: ignore[arg-type] +np.ones() # type: ignore[call-overload] + +np.array(0, float, True) # type: ignore[call-overload] + +np.linspace(None, 'bob') # type: ignore[call-overload] +np.linspace(0, 2, num=10.0) # type: ignore[call-overload] +np.linspace(0, 2, endpoint='True') # type: ignore[call-overload] +np.linspace(0, 2, retstep=b'False') # type: ignore[call-overload] +np.linspace(0, 2, dtype=0) # type: ignore[call-overload] +np.linspace(0, 2, axis=None) # type: ignore[call-overload] + +np.logspace(None, 'bob') # type: ignore[call-overload] +np.logspace(0, 2, base=None) # type: ignore[call-overload] + +np.geomspace(None, 'bob') # type: ignore[call-overload] + +np.stack(generator) # type: ignore[call-overload] +np.hstack({1, 2}) # type: ignore[call-overload] +np.vstack(1) # type: ignore[call-overload] + +np.array([1], like=1) # type: ignore[call-overload] diff --git a/python/numpy/typing/tests/data/fail/array_like.pyi b/python/numpy/typing/tests/data/fail/array_like.pyi new file mode 100644 index 000000000..4e37354e8 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/array_like.pyi @@ -0,0 +1,15 @@ +import numpy as np +from numpy._typing import ArrayLike + +class A: ... + +x1: ArrayLike = (i for i in range(10)) # type: ignore[assignment] +x2: ArrayLike = A() # type: ignore[assignment] +x3: ArrayLike = {1: "foo", 2: "bar"} # type: ignore[assignment] + +scalar = np.int64(1) +scalar.__array__(dtype=np.float64) # type: ignore[call-overload] +array = np.array([1]) +array.__array__(dtype=np.float64) # type: ignore[call-overload] + +array.setfield(np.eye(1), np.int32, (0, 1)) # type: ignore[arg-type] diff --git a/python/numpy/typing/tests/data/fail/array_pad.pyi b/python/numpy/typing/tests/data/fail/array_pad.pyi new file mode 100644 index 000000000..42e61c8d7 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/array_pad.pyi @@ -0,0 +1,6 @@ +import numpy as np +import numpy.typing as npt + +AR_i8: npt.NDArray[np.int64] + +np.pad(AR_i8, 2, mode="bob") # type: ignore[call-overload] diff --git a/python/numpy/typing/tests/data/fail/arrayprint.pyi b/python/numpy/typing/tests/data/fail/arrayprint.pyi new file mode 100644 index 000000000..224a4105b --- /dev/null +++ b/python/numpy/typing/tests/data/fail/arrayprint.pyi @@ -0,0 +1,16 @@ +from collections.abc import Callable +from typing import Any + +import numpy as np +import numpy.typing as npt + +AR: npt.NDArray[np.float64] +func1: Callable[[Any], str] +func2: Callable[[np.integer], str] + +np.array2string(AR, style=None) # type: ignore[call-overload] +np.array2string(AR, legacy="1.14") # type: ignore[call-overload] +np.array2string(AR, sign="*") # type: ignore[call-overload] +np.array2string(AR, floatmode="default") # type: ignore[call-overload] +np.array2string(AR, formatter={"A": func1}) # type: ignore[call-overload] +np.array2string(AR, formatter={"float": func2}) # type: ignore[call-overload] diff --git a/python/numpy/typing/tests/data/fail/arrayterator.pyi b/python/numpy/typing/tests/data/fail/arrayterator.pyi new file mode 100644 index 000000000..8d2295a58 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/arrayterator.pyi @@ -0,0 +1,14 @@ +import numpy as np +import numpy.typing as npt + +AR_i8: npt.NDArray[np.int64] +ar_iter = np.lib.Arrayterator(AR_i8) + +np.lib.Arrayterator(np.int64()) # type: ignore[arg-type] +ar_iter.shape = (10, 5) # type: ignore[misc] +ar_iter[None] # type: ignore[index] +ar_iter[None, 1] # type: ignore[index] +ar_iter[np.intp()] # type: ignore[index] +ar_iter[np.intp(), ...] # type: ignore[index] +ar_iter[AR_i8] # type: ignore[index] +ar_iter[AR_i8, :] # type: ignore[index] diff --git a/python/numpy/typing/tests/data/fail/bitwise_ops.pyi b/python/numpy/typing/tests/data/fail/bitwise_ops.pyi new file mode 100644 index 000000000..1b8d023a1 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/bitwise_ops.pyi @@ -0,0 +1,17 @@ +import numpy as np + +i8 = np.int64() +i4 = np.int32() +u8 = np.uint64() +b_ = np.bool() +i = int() + +f8 = np.float64() + +b_ >> f8 # type: ignore[operator] +i8 << f8 # type: ignore[operator] +i | f8 # type: ignore[operator] +i8 ^ f8 # type: ignore[operator] +u8 & f8 # type: ignore[operator] +~f8 # type: ignore[operator] +# TODO: Certain mixes like i4 << u8 go to float and thus should fail diff --git a/python/numpy/typing/tests/data/fail/char.pyi b/python/numpy/typing/tests/data/fail/char.pyi new file mode 100644 index 000000000..62c4475c2 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/char.pyi @@ -0,0 +1,65 @@ +import numpy as np +import numpy.typing as npt + +AR_U: npt.NDArray[np.str_] +AR_S: npt.NDArray[np.bytes_] + +np.char.equal(AR_U, AR_S) # type: ignore[arg-type] +np.char.not_equal(AR_U, AR_S) # type: ignore[arg-type] + +np.char.greater_equal(AR_U, AR_S) # type: ignore[arg-type] +np.char.less_equal(AR_U, AR_S) # type: ignore[arg-type] +np.char.greater(AR_U, AR_S) # type: ignore[arg-type] +np.char.less(AR_U, AR_S) # type: ignore[arg-type] + +np.char.encode(AR_S) # type: ignore[arg-type] +np.char.decode(AR_U) # type: ignore[arg-type] + +np.char.join(AR_U, b"_") # type: ignore[arg-type] +np.char.join(AR_S, "_") # type: ignore[arg-type] + +np.char.ljust(AR_U, 5, fillchar=b"a") # type: ignore[arg-type] +np.char.ljust(AR_S, 5, fillchar="a") # type: ignore[arg-type] +np.char.rjust(AR_U, 5, fillchar=b"a") # type: ignore[arg-type] +np.char.rjust(AR_S, 5, fillchar="a") # type: ignore[arg-type] + +np.char.lstrip(AR_U, chars=b"a") # type: ignore[arg-type] +np.char.lstrip(AR_S, chars="a") # type: ignore[arg-type] +np.char.strip(AR_U, chars=b"a") # type: ignore[arg-type] +np.char.strip(AR_S, chars="a") # type: ignore[arg-type] +np.char.rstrip(AR_U, chars=b"a") # type: ignore[arg-type] +np.char.rstrip(AR_S, chars="a") # type: ignore[arg-type] + +np.char.partition(AR_U, b"a") # type: ignore[arg-type] +np.char.partition(AR_S, "a") # type: ignore[arg-type] +np.char.rpartition(AR_U, b"a") # type: ignore[arg-type] +np.char.rpartition(AR_S, "a") # type: ignore[arg-type] + +np.char.replace(AR_U, b"_", b"-") # type: ignore[arg-type] +np.char.replace(AR_S, "_", "-") # type: ignore[arg-type] + +np.char.split(AR_U, b"_") # type: ignore[arg-type] +np.char.split(AR_S, "_") # type: ignore[arg-type] +np.char.rsplit(AR_U, b"_") # type: ignore[arg-type] +np.char.rsplit(AR_S, "_") # type: ignore[arg-type] + +np.char.count(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.count(AR_S, "a", end=9) # type: ignore[arg-type] + +np.char.endswith(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.endswith(AR_S, "a", end=9) # type: ignore[arg-type] +np.char.startswith(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.startswith(AR_S, "a", end=9) # type: ignore[arg-type] + +np.char.find(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.find(AR_S, "a", end=9) # type: ignore[arg-type] +np.char.rfind(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.rfind(AR_S, "a", end=9) # type: ignore[arg-type] + +np.char.index(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.index(AR_S, "a", end=9) # type: ignore[arg-type] +np.char.rindex(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.char.rindex(AR_S, "a", end=9) # type: ignore[arg-type] + +np.char.isdecimal(AR_S) # type: ignore[arg-type] +np.char.isnumeric(AR_S) # type: ignore[arg-type] diff --git a/python/numpy/typing/tests/data/fail/chararray.pyi b/python/numpy/typing/tests/data/fail/chararray.pyi new file mode 100644 index 000000000..fb52f7349 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/chararray.pyi @@ -0,0 +1,62 @@ +from typing import Any +import numpy as np + +AR_U: np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] +AR_S: np.char.chararray[tuple[Any, ...], np.dtype[np.bytes_]] + +AR_S.encode() # type: ignore[misc] +AR_U.decode() # type: ignore[misc] + +AR_U.join(b"_") # type: ignore[arg-type] +AR_S.join("_") # type: ignore[arg-type] + +AR_U.ljust(5, fillchar=b"a") # type: ignore[arg-type] +AR_S.ljust(5, fillchar="a") # type: ignore[arg-type] +AR_U.rjust(5, fillchar=b"a") # type: ignore[arg-type] +AR_S.rjust(5, fillchar="a") # type: ignore[arg-type] + +AR_U.lstrip(chars=b"a") # type: ignore[arg-type] +AR_S.lstrip(chars="a") # type: ignore[arg-type] +AR_U.strip(chars=b"a") # type: ignore[arg-type] +AR_S.strip(chars="a") # type: ignore[arg-type] +AR_U.rstrip(chars=b"a") # type: ignore[arg-type] +AR_S.rstrip(chars="a") # type: ignore[arg-type] + +AR_U.partition(b"a") # type: ignore[arg-type] +AR_S.partition("a") # type: ignore[arg-type] +AR_U.rpartition(b"a") # type: ignore[arg-type] +AR_S.rpartition("a") # type: ignore[arg-type] + +AR_U.replace(b"_", b"-") # type: ignore[arg-type] +AR_S.replace("_", "-") # type: ignore[arg-type] + +AR_U.split(b"_") # type: ignore[arg-type] +AR_S.split("_") # type: ignore[arg-type] +AR_S.split(1) # type: ignore[arg-type] +AR_U.rsplit(b"_") # type: ignore[arg-type] +AR_S.rsplit("_") # type: ignore[arg-type] + +AR_U.count(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.count("a", end=9) # type: ignore[arg-type] + +AR_U.endswith(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.endswith("a", end=9) # type: ignore[arg-type] +AR_U.startswith(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.startswith("a", end=9) # type: ignore[arg-type] + +AR_U.find(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.find("a", end=9) # type: ignore[arg-type] +AR_U.rfind(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.rfind("a", end=9) # type: ignore[arg-type] + +AR_U.index(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.index("a", end=9) # type: ignore[arg-type] +AR_U.rindex(b"a", start=[1, 2, 3]) # type: ignore[arg-type] +AR_S.rindex("a", end=9) # type: ignore[arg-type] + +AR_U == AR_S # type: ignore[operator] +AR_U != AR_S # type: ignore[operator] +AR_U >= AR_S # type: ignore[operator] +AR_U <= AR_S # type: ignore[operator] +AR_U > AR_S # type: ignore[operator] +AR_U < AR_S # type: ignore[operator] diff --git a/python/numpy/typing/tests/data/fail/comparisons.pyi b/python/numpy/typing/tests/data/fail/comparisons.pyi new file mode 100644 index 000000000..3c8a94bff --- /dev/null +++ b/python/numpy/typing/tests/data/fail/comparisons.pyi @@ -0,0 +1,27 @@ +import numpy as np +import numpy.typing as npt + +AR_i: npt.NDArray[np.int64] +AR_f: npt.NDArray[np.float64] +AR_c: npt.NDArray[np.complex128] +AR_m: npt.NDArray[np.timedelta64] +AR_M: npt.NDArray[np.datetime64] + +AR_f > AR_m # type: ignore[operator] +AR_c > AR_m # type: ignore[operator] + +AR_m > AR_f # type: ignore[operator] +AR_m > AR_c # type: ignore[operator] + +AR_i > AR_M # type: ignore[operator] +AR_f > AR_M # type: ignore[operator] +AR_m > AR_M # type: ignore[operator] + +AR_M > AR_i # type: ignore[operator] +AR_M > AR_f # type: ignore[operator] +AR_M > AR_m # type: ignore[operator] + +AR_i > str() # type: ignore[operator] +AR_i > bytes() # type: ignore[operator] +str() > AR_M # type: ignore[operator] +bytes() > AR_M # type: ignore[operator] diff --git a/python/numpy/typing/tests/data/fail/constants.pyi b/python/numpy/typing/tests/data/fail/constants.pyi new file mode 100644 index 000000000..10717f664 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/constants.pyi @@ -0,0 +1,3 @@ +import numpy as np + +np.little_endian = np.little_endian # type: ignore[misc] diff --git a/python/numpy/typing/tests/data/fail/datasource.pyi b/python/numpy/typing/tests/data/fail/datasource.pyi new file mode 100644 index 000000000..267b672ba --- /dev/null +++ b/python/numpy/typing/tests/data/fail/datasource.pyi @@ -0,0 +1,15 @@ +from pathlib import Path +import numpy as np + +path: Path +d1: np.lib.npyio.DataSource + +d1.abspath(path) # type: ignore[arg-type] +d1.abspath(b"...") # type: ignore[arg-type] + +d1.exists(path) # type: ignore[arg-type] +d1.exists(b"...") # type: ignore[arg-type] + +d1.open(path, "r") # type: ignore[arg-type] +d1.open(b"...", encoding="utf8") # type: ignore[arg-type] +d1.open(None, newline="/n") # type: ignore[arg-type] diff --git a/python/numpy/typing/tests/data/fail/dtype.pyi b/python/numpy/typing/tests/data/fail/dtype.pyi new file mode 100644 index 000000000..64a7c3f77 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/dtype.pyi @@ -0,0 +1,17 @@ +import numpy as np + +class Test1: + not_dtype = np.dtype(float) + +class Test2: + dtype = float + +np.dtype(Test1()) # type: ignore[call-overload] +np.dtype(Test2()) # type: ignore[arg-type] + +np.dtype( # type: ignore[call-overload] + { + "field1": (float, 1), + "field2": (int, 3), + } +) diff --git a/python/numpy/typing/tests/data/fail/einsumfunc.pyi b/python/numpy/typing/tests/data/fail/einsumfunc.pyi new file mode 100644 index 000000000..982ad9862 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/einsumfunc.pyi @@ -0,0 +1,12 @@ +import numpy as np +import numpy.typing as npt + +AR_i: npt.NDArray[np.int64] +AR_f: npt.NDArray[np.float64] +AR_m: npt.NDArray[np.timedelta64] +AR_U: npt.NDArray[np.str_] + +np.einsum("i,i->i", AR_i, AR_m) # type: ignore[arg-type] +np.einsum("i,i->i", AR_f, AR_f, dtype=np.int32) # type: ignore[arg-type] +np.einsum("i,i->i", AR_i, AR_i, out=AR_U) # type: ignore[type-var] +np.einsum("i,i->i", AR_i, AR_i, out=AR_U, casting="unsafe") # type: ignore[call-overload] diff --git a/python/numpy/typing/tests/data/fail/flatiter.pyi b/python/numpy/typing/tests/data/fail/flatiter.pyi new file mode 100644 index 000000000..06e23fed9 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/flatiter.pyi @@ -0,0 +1,20 @@ +import numpy as np +import numpy._typing as npt + +class Index: + def __index__(self) -> int: ... + +a: np.flatiter[npt.NDArray[np.float64]] +supports_array: npt._SupportsArray[np.dtype[np.float64]] + +a.base = object() # type: ignore[assignment, misc] +a.coords = object() # type: ignore[assignment, misc] +a.index = object() # type: ignore[assignment, misc] +a.copy(order='C') # type: ignore[call-arg] + +# NOTE: Contrary to `ndarray.__getitem__` its counterpart in `flatiter` +# does not accept objects with the `__array__` or `__index__` protocols; +# boolean indexing is just plain broken (gh-17175) +a[np.bool()] # type: ignore[index] +a[Index()] # type: ignore[call-overload] +a[supports_array] # type: ignore[index] diff --git a/python/numpy/typing/tests/data/fail/fromnumeric.pyi b/python/numpy/typing/tests/data/fail/fromnumeric.pyi new file mode 100644 index 000000000..51ef26810 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/fromnumeric.pyi @@ -0,0 +1,148 @@ +"""Tests for :mod:`numpy._core.fromnumeric`.""" + +import numpy as np +import numpy.typing as npt + +A = np.array(True, ndmin=2, dtype=bool) +A.setflags(write=False) +AR_U: npt.NDArray[np.str_] +AR_M: npt.NDArray[np.datetime64] +AR_f4: npt.NDArray[np.float32] + +a = np.bool(True) + +np.take(a, None) # type: ignore[call-overload] +np.take(a, axis=1.0) # type: ignore[call-overload] +np.take(A, out=1) # type: ignore[call-overload] +np.take(A, mode="bob") # type: ignore[call-overload] + +np.reshape(a, None) # type: ignore[call-overload] +np.reshape(A, 1, order="bob") # type: ignore[call-overload] + +np.choose(a, None) # type: ignore[call-overload] +np.choose(a, out=1.0) # type: ignore[call-overload] +np.choose(A, mode="bob") # type: ignore[call-overload] + +np.repeat(a, None) # type: ignore[call-overload] +np.repeat(A, 1, axis=1.0) # type: ignore[call-overload] + +np.swapaxes(A, None, 1) # type: ignore[call-overload] +np.swapaxes(A, 1, [0]) # type: ignore[call-overload] + +np.transpose(A, axes=1.0) # type: ignore[call-overload] + +np.partition(a, None) # type: ignore[call-overload] +np.partition(a, 0, axis="bob") # type: ignore[call-overload] +np.partition(A, 0, kind="bob") # type: ignore[call-overload] +np.partition(A, 0, order=range(5)) # type: ignore[arg-type] + +np.argpartition(a, None) # type: ignore[arg-type] +np.argpartition(a, 0, axis="bob") # type: ignore[arg-type] +np.argpartition(A, 0, kind="bob") # type: ignore[arg-type] +np.argpartition(A, 0, order=range(5)) # type: ignore[arg-type] + +np.sort(A, axis="bob") # type: ignore[call-overload] +np.sort(A, kind="bob") # type: ignore[call-overload] +np.sort(A, order=range(5)) # type: ignore[arg-type] + +np.argsort(A, axis="bob") # type: ignore[arg-type] +np.argsort(A, kind="bob") # type: ignore[arg-type] +np.argsort(A, order=range(5)) # type: ignore[arg-type] + +np.argmax(A, axis="bob") # type: ignore[call-overload] +np.argmax(A, kind="bob") # type: ignore[call-overload] +np.argmax(A, out=AR_f4) # type: ignore[type-var] + +np.argmin(A, axis="bob") # type: ignore[call-overload] +np.argmin(A, kind="bob") # type: ignore[call-overload] +np.argmin(A, out=AR_f4) # type: ignore[type-var] + +np.searchsorted(A[0], 0, side="bob") # type: ignore[call-overload] +np.searchsorted(A[0], 0, sorter=1.0) # type: ignore[call-overload] + +np.resize(A, 1.0) # type: ignore[call-overload] + +np.squeeze(A, 1.0) # type: ignore[call-overload] + +np.diagonal(A, offset=None) # type: ignore[call-overload] +np.diagonal(A, axis1="bob") # type: ignore[call-overload] +np.diagonal(A, axis2=[]) # type: ignore[call-overload] + +np.trace(A, offset=None) # type: ignore[call-overload] +np.trace(A, axis1="bob") # type: ignore[call-overload] +np.trace(A, axis2=[]) # type: ignore[call-overload] + +np.ravel(a, order="bob") # type: ignore[call-overload] + +np.nonzero(0) # type: ignore[arg-type] + +np.compress([True], A, axis=1.0) # type: ignore[call-overload] + +np.clip(a, 1, 2, out=1) # type: ignore[call-overload] + +np.sum(a, axis=1.0) # type: ignore[call-overload] +np.sum(a, keepdims=1.0) # type: ignore[call-overload] +np.sum(a, initial=[1]) # type: ignore[call-overload] + +np.all(a, axis=1.0) # type: ignore[call-overload] +np.all(a, keepdims=1.0) # type: ignore[call-overload] +np.all(a, out=1.0) # type: ignore[call-overload] + +np.any(a, axis=1.0) # type: ignore[call-overload] +np.any(a, keepdims=1.0) # type: ignore[call-overload] +np.any(a, out=1.0) # type: ignore[call-overload] + +np.cumsum(a, axis=1.0) # type: ignore[call-overload] +np.cumsum(a, dtype=1.0) # type: ignore[call-overload] +np.cumsum(a, out=1.0) # type: ignore[call-overload] + +np.ptp(a, axis=1.0) # type: ignore[call-overload] +np.ptp(a, keepdims=1.0) # type: ignore[call-overload] +np.ptp(a, out=1.0) # type: ignore[call-overload] + +np.amax(a, axis=1.0) # type: ignore[call-overload] +np.amax(a, keepdims=1.0) # type: ignore[call-overload] +np.amax(a, out=1.0) # type: ignore[call-overload] +np.amax(a, initial=[1.0]) # type: ignore[call-overload] +np.amax(a, where=[1.0]) # type: ignore[arg-type] + +np.amin(a, axis=1.0) # type: ignore[call-overload] +np.amin(a, keepdims=1.0) # type: ignore[call-overload] +np.amin(a, out=1.0) # type: ignore[call-overload] +np.amin(a, initial=[1.0]) # type: ignore[call-overload] +np.amin(a, where=[1.0]) # type: ignore[arg-type] + +np.prod(a, axis=1.0) # type: ignore[call-overload] +np.prod(a, out=False) # type: ignore[call-overload] +np.prod(a, keepdims=1.0) # type: ignore[call-overload] +np.prod(a, initial=int) # type: ignore[call-overload] +np.prod(a, where=1.0) # type: ignore[call-overload] +np.prod(AR_U) # type: ignore[arg-type] + +np.cumprod(a, axis=1.0) # type: ignore[call-overload] +np.cumprod(a, out=False) # type: ignore[call-overload] +np.cumprod(AR_U) # type: ignore[arg-type] + +np.size(a, axis=1.0) # type: ignore[arg-type] + +np.around(a, decimals=1.0) # type: ignore[call-overload] +np.around(a, out=type) # type: ignore[call-overload] +np.around(AR_U) # type: ignore[arg-type] + +np.mean(a, axis=1.0) # type: ignore[call-overload] +np.mean(a, out=False) # type: ignore[call-overload] +np.mean(a, keepdims=1.0) # type: ignore[call-overload] +np.mean(AR_U) # type: ignore[arg-type] +np.mean(AR_M) # type: ignore[arg-type] + +np.std(a, axis=1.0) # type: ignore[call-overload] +np.std(a, out=False) # type: ignore[call-overload] +np.std(a, ddof='test') # type: ignore[call-overload] +np.std(a, keepdims=1.0) # type: ignore[call-overload] +np.std(AR_U) # type: ignore[arg-type] + +np.var(a, axis=1.0) # type: ignore[call-overload] +np.var(a, out=False) # type: ignore[call-overload] +np.var(a, ddof='test') # type: ignore[call-overload] +np.var(a, keepdims=1.0) # type: ignore[call-overload] +np.var(AR_U) # type: ignore[arg-type] diff --git a/python/numpy/typing/tests/data/fail/histograms.pyi b/python/numpy/typing/tests/data/fail/histograms.pyi new file mode 100644 index 000000000..5f7892719 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/histograms.pyi @@ -0,0 +1,12 @@ +import numpy as np +import numpy.typing as npt + +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] + +np.histogram_bin_edges(AR_i8, range=(0, 1, 2)) # type: ignore[arg-type] + +np.histogram(AR_i8, range=(0, 1, 2)) # type: ignore[arg-type] + +np.histogramdd(AR_i8, range=(0, 1)) # type: ignore[arg-type] +np.histogramdd(AR_i8, range=[(0, 1, 2)]) # type: ignore[list-item] diff --git a/python/numpy/typing/tests/data/fail/index_tricks.pyi b/python/numpy/typing/tests/data/fail/index_tricks.pyi new file mode 100644 index 000000000..8b7b1ae2b --- /dev/null +++ b/python/numpy/typing/tests/data/fail/index_tricks.pyi @@ -0,0 +1,14 @@ +import numpy as np + +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] + +np.ndindex([1, 2, 3]) # type: ignore[call-overload] +np.unravel_index(AR_LIKE_f, (1, 2, 3)) # type: ignore[arg-type] +np.ravel_multi_index(AR_LIKE_i, (1, 2, 3), mode="bob") # type: ignore[call-overload] +np.mgrid[1] # type: ignore[index] +np.mgrid[...] # type: ignore[index] +np.ogrid[1] # type: ignore[index] +np.ogrid[...] # type: ignore[index] +np.fill_diagonal(AR_LIKE_f, 2) # type: ignore[arg-type] +np.diag_indices(1.0) # type: ignore[arg-type] diff --git a/python/numpy/typing/tests/data/fail/lib_function_base.pyi b/python/numpy/typing/tests/data/fail/lib_function_base.pyi new file mode 100644 index 000000000..f0bf63476 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/lib_function_base.pyi @@ -0,0 +1,62 @@ +from typing import Any + +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_m: npt.NDArray[np.timedelta64] +AR_M: npt.NDArray[np.datetime64] +AR_O: npt.NDArray[np.object_] +AR_b_list: list[npt.NDArray[np.bool]] + +def fn_none_i(a: None, /) -> npt.NDArray[Any]: ... +def fn_ar_i(a: npt.NDArray[np.float64], posarg: int, /) -> npt.NDArray[Any]: ... + +np.average(AR_m) # type: ignore[arg-type] +np.select(1, [AR_f8]) # type: ignore[arg-type] +np.angle(AR_m) # type: ignore[arg-type] +np.unwrap(AR_m) # type: ignore[arg-type] +np.unwrap(AR_c16) # type: ignore[arg-type] +np.trim_zeros(1) # type: ignore[arg-type] +np.place(1, [True], 1.5) # type: ignore[arg-type] +np.vectorize(1) # type: ignore[arg-type] +np.place(AR_f8, slice(None), 5) # type: ignore[arg-type] + +np.piecewise(AR_f8, True, [fn_ar_i], 42) # type: ignore[call-overload] +# TODO: enable these once mypy actually supports ParamSpec (released in 2021) +# NOTE: pyright correctly reports errors for these (`reportCallIssue`) +# np.piecewise(AR_f8, AR_b_list, [fn_none_i]) # type: ignore[call-overload]s +# np.piecewise(AR_f8, AR_b_list, [fn_ar_i]) # type: ignore[call-overload] +# np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 3.14) # type: ignore[call-overload] +# np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 42, None) # type: ignore[call-overload] +# np.piecewise(AR_f8, AR_b_list, [fn_ar_i], 42, _=None) # type: ignore[call-overload] + +np.interp(AR_f8, AR_c16, AR_f8) # type: ignore[arg-type] +np.interp(AR_c16, AR_f8, AR_f8) # type: ignore[arg-type] +np.interp(AR_f8, AR_f8, AR_f8, period=AR_c16) # type: ignore[call-overload] +np.interp(AR_f8, AR_f8, AR_O) # type: ignore[arg-type] + +np.cov(AR_m) # type: ignore[arg-type] +np.cov(AR_O) # type: ignore[arg-type] +np.corrcoef(AR_m) # type: ignore[arg-type] +np.corrcoef(AR_O) # type: ignore[arg-type] +np.corrcoef(AR_f8, bias=True) # type: ignore[call-overload] +np.corrcoef(AR_f8, ddof=2) # type: ignore[call-overload] +np.blackman(1j) # type: ignore[arg-type] +np.bartlett(1j) # type: ignore[arg-type] +np.hanning(1j) # type: ignore[arg-type] +np.hamming(1j) # type: ignore[arg-type] +np.hamming(AR_c16) # type: ignore[arg-type] +np.kaiser(1j, 1) # type: ignore[arg-type] +np.sinc(AR_O) # type: ignore[arg-type] +np.median(AR_M) # type: ignore[arg-type] + +np.percentile(AR_f8, 50j) # type: ignore[call-overload] +np.percentile(AR_f8, 50, interpolation="bob") # type: ignore[call-overload] +np.quantile(AR_f8, 0.5j) # type: ignore[call-overload] +np.quantile(AR_f8, 0.5, interpolation="bob") # type: ignore[call-overload] +np.meshgrid(AR_f8, AR_f8, indexing="bob") # type: ignore[call-overload] +np.delete(AR_f8, AR_f8) # type: ignore[arg-type] +np.insert(AR_f8, AR_f8, 1.5) # type: ignore[arg-type] +np.digitize(AR_f8, 1j) # type: ignore[call-overload] diff --git a/python/numpy/typing/tests/data/fail/lib_polynomial.pyi b/python/numpy/typing/tests/data/fail/lib_polynomial.pyi new file mode 100644 index 000000000..727eb7f4b --- /dev/null +++ b/python/numpy/typing/tests/data/fail/lib_polynomial.pyi @@ -0,0 +1,29 @@ +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_O: npt.NDArray[np.object_] +AR_U: npt.NDArray[np.str_] + +poly_obj: np.poly1d + +np.polymul(AR_f8, AR_U) # type: ignore[arg-type] +np.polydiv(AR_f8, AR_U) # type: ignore[arg-type] + +5**poly_obj # type: ignore[operator] + +np.polyint(AR_U) # type: ignore[arg-type] +np.polyint(AR_f8, m=1j) # type: ignore[call-overload] + +np.polyder(AR_U) # type: ignore[arg-type] +np.polyder(AR_f8, m=1j) # type: ignore[call-overload] + +np.polyfit(AR_O, AR_f8, 1) # type: ignore[arg-type] +np.polyfit(AR_f8, AR_f8, 1, rcond=1j) # type: ignore[call-overload] +np.polyfit(AR_f8, AR_f8, 1, w=AR_c16) # type: ignore[arg-type] +np.polyfit(AR_f8, AR_f8, 1, cov="bob") # type: ignore[call-overload] + +np.polyval(AR_f8, AR_U) # type: ignore[arg-type] +np.polyadd(AR_f8, AR_U) # type: ignore[arg-type] +np.polysub(AR_f8, AR_U) # type: ignore[arg-type] diff --git a/python/numpy/typing/tests/data/fail/lib_utils.pyi b/python/numpy/typing/tests/data/fail/lib_utils.pyi new file mode 100644 index 000000000..25af32b43 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/lib_utils.pyi @@ -0,0 +1,3 @@ +import numpy.lib.array_utils as array_utils + +array_utils.byte_bounds(1) # type: ignore[arg-type] diff --git a/python/numpy/typing/tests/data/fail/lib_version.pyi b/python/numpy/typing/tests/data/fail/lib_version.pyi new file mode 100644 index 000000000..62011a848 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/lib_version.pyi @@ -0,0 +1,6 @@ +from numpy.lib import NumpyVersion + +version: NumpyVersion + +NumpyVersion(b"1.8.0") # type: ignore[arg-type] +version >= b"1.8.0" # type: ignore[operator] diff --git a/python/numpy/typing/tests/data/fail/linalg.pyi b/python/numpy/typing/tests/data/fail/linalg.pyi new file mode 100644 index 000000000..c4695ee67 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/linalg.pyi @@ -0,0 +1,48 @@ +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] +AR_O: npt.NDArray[np.object_] +AR_M: npt.NDArray[np.datetime64] + +np.linalg.tensorsolve(AR_O, AR_O) # type: ignore[arg-type] + +np.linalg.solve(AR_O, AR_O) # type: ignore[arg-type] + +np.linalg.tensorinv(AR_O) # type: ignore[arg-type] + +np.linalg.inv(AR_O) # type: ignore[arg-type] + +np.linalg.matrix_power(AR_M, 5) # type: ignore[arg-type] + +np.linalg.cholesky(AR_O) # type: ignore[arg-type] + +np.linalg.qr(AR_O) # type: ignore[arg-type] +np.linalg.qr(AR_f8, mode="bob") # type: ignore[call-overload] + +np.linalg.eigvals(AR_O) # type: ignore[arg-type] + +np.linalg.eigvalsh(AR_O) # type: ignore[arg-type] +np.linalg.eigvalsh(AR_O, UPLO="bob") # type: ignore[call-overload] + +np.linalg.eig(AR_O) # type: ignore[arg-type] + +np.linalg.eigh(AR_O) # type: ignore[arg-type] +np.linalg.eigh(AR_O, UPLO="bob") # type: ignore[call-overload] + +np.linalg.svd(AR_O) # type: ignore[arg-type] + +np.linalg.cond(AR_O) # type: ignore[arg-type] +np.linalg.cond(AR_f8, p="bob") # type: ignore[arg-type] + +np.linalg.matrix_rank(AR_O) # type: ignore[arg-type] + +np.linalg.pinv(AR_O) # type: ignore[arg-type] + +np.linalg.slogdet(AR_O) # type: ignore[arg-type] + +np.linalg.det(AR_O) # type: ignore[arg-type] + +np.linalg.norm(AR_f8, ord="bob") # type: ignore[call-overload] + +np.linalg.multi_dot([AR_M]) # type: ignore[list-item] diff --git a/python/numpy/typing/tests/data/fail/ma.pyi b/python/numpy/typing/tests/data/fail/ma.pyi new file mode 100644 index 000000000..41306b23f --- /dev/null +++ b/python/numpy/typing/tests/data/fail/ma.pyi @@ -0,0 +1,143 @@ +from typing import TypeAlias, TypeVar + +import numpy as np +import numpy.typing as npt +from numpy._typing import _Shape + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +MaskedArray: TypeAlias = np.ma.MaskedArray[_Shape, np.dtype[_ScalarT]] + +MAR_1d_f8: np.ma.MaskedArray[tuple[int], np.dtype[np.float64]] +MAR_b: MaskedArray[np.bool] +MAR_c: MaskedArray[np.complex128] +MAR_td64: MaskedArray[np.timedelta64] + +AR_b: npt.NDArray[np.bool] + +MAR_1d_f8.shape = (3, 1) # type: ignore[assignment] +MAR_1d_f8.dtype = np.bool # type: ignore[assignment] + +np.ma.min(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.min(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.min(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.min(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.min(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.min(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.min(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.min(fill_value=lambda x: 27) # type: ignore[call-overload] + +np.ma.max(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.max(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.max(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.max(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.max(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.max(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.max(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.max(fill_value=lambda x: 27) # type: ignore[call-overload] + +np.ma.ptp(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.ptp(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.ptp(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.ptp(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.ptp(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.ptp(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.ptp(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.ptp(fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.argmin(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmin(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmin(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmin(fill_value=lambda x: 27) # type: ignore[call-overload] + +np.ma.argmin(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.argmin(MAR_1d_f8, axis=(1,)) # type: ignore[call-overload] +np.ma.argmin(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.argmin(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.argmin(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.argmax(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmax(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmax(out=1.0) # type: ignore[call-overload] +MAR_1d_f8.argmax(fill_value=lambda x: 27) # type: ignore[call-overload] + +np.ma.argmax(MAR_1d_f8, axis=1.0) # type: ignore[call-overload] +np.ma.argmax(MAR_1d_f8, axis=(0,)) # type: ignore[call-overload] +np.ma.argmax(MAR_1d_f8, keepdims=1.0) # type: ignore[call-overload] +np.ma.argmax(MAR_1d_f8, out=1.0) # type: ignore[call-overload] +np.ma.argmax(MAR_1d_f8, fill_value=lambda x: 27) # type: ignore[call-overload] + +MAR_1d_f8.all(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.all(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.all(out=1.0) # type: ignore[call-overload] + +MAR_1d_f8.any(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.any(keepdims=1.0) # type: ignore[call-overload] +MAR_1d_f8.any(out=1.0) # type: ignore[call-overload] + +MAR_1d_f8.sort(axis=(0,1)) # type: ignore[arg-type] +MAR_1d_f8.sort(axis=None) # type: ignore[arg-type] +MAR_1d_f8.sort(kind='cabbage') # type: ignore[arg-type] +MAR_1d_f8.sort(order=lambda: 'cabbage') # type: ignore[arg-type] +MAR_1d_f8.sort(endwith='cabbage') # type: ignore[arg-type] +MAR_1d_f8.sort(fill_value=lambda: 'cabbage') # type: ignore[arg-type] +MAR_1d_f8.sort(stable='cabbage') # type: ignore[arg-type] +MAR_1d_f8.sort(stable=True) # type: ignore[arg-type] + +MAR_1d_f8.take(axis=1.0) # type: ignore[call-overload] +MAR_1d_f8.take(out=1) # type: ignore[call-overload] +MAR_1d_f8.take(mode="bob") # type: ignore[call-overload] + +np.ma.take(None) # type: ignore[call-overload] +np.ma.take(axis=1.0) # type: ignore[call-overload] +np.ma.take(out=1) # type: ignore[call-overload] +np.ma.take(mode="bob") # type: ignore[call-overload] + +MAR_1d_f8.partition(['cabbage']) # type: ignore[arg-type] +MAR_1d_f8.partition(axis=(0,1)) # type: ignore[arg-type, call-arg] +MAR_1d_f8.partition(kind='cabbage') # type: ignore[arg-type, call-arg] +MAR_1d_f8.partition(order=lambda: 'cabbage') # type: ignore[arg-type, call-arg] +MAR_1d_f8.partition(AR_b) # type: ignore[arg-type] + +MAR_1d_f8.argpartition(['cabbage']) # type: ignore[arg-type] +MAR_1d_f8.argpartition(axis=(0,1)) # type: ignore[arg-type, call-arg] +MAR_1d_f8.argpartition(kind='cabbage') # type: ignore[arg-type, call-arg] +MAR_1d_f8.argpartition(order=lambda: 'cabbage') # type: ignore[arg-type, call-arg] +MAR_1d_f8.argpartition(AR_b) # type: ignore[arg-type] + +np.ma.ndim(lambda: 'lambda') # type: ignore[arg-type] + +np.ma.size(AR_b, axis='0') # type: ignore[arg-type] + +MAR_1d_f8 >= (lambda x: 'mango') # type: ignore[operator] +MAR_1d_f8 > (lambda x: 'mango') # type: ignore[operator] +MAR_1d_f8 <= (lambda x: 'mango') # type: ignore[operator] +MAR_1d_f8 < (lambda x: 'mango') # type: ignore[operator] + +MAR_1d_f8.count(axis=0.) # type: ignore[call-overload] + +np.ma.count(MAR_1d_f8, axis=0.) # type: ignore[call-overload] + +MAR_1d_f8.put(4, 999, mode='flip') # type: ignore[arg-type] + +np.ma.put(MAR_1d_f8, 4, 999, mode='flip') # type: ignore[arg-type] + +np.ma.put([1,1,3], 0, 999) # type: ignore[arg-type] + +np.ma.compressed(lambda: 'compress me') # type: ignore[call-overload] + +np.ma.allequal(MAR_1d_f8, [1,2,3], fill_value=1.5) # type: ignore[arg-type] + +np.ma.allclose(MAR_1d_f8, [1,2,3], masked_equal=4.5) # type: ignore[arg-type] +np.ma.allclose(MAR_1d_f8, [1,2,3], rtol='.4') # type: ignore[arg-type] +np.ma.allclose(MAR_1d_f8, [1,2,3], atol='.5') # type: ignore[arg-type] + +MAR_1d_f8.__setmask__('mask') # type: ignore[arg-type] + +MAR_b *= 2 # type: ignore[arg-type] +MAR_c //= 2 # type: ignore[misc] +MAR_td64 **= 2 # type: ignore[misc] + +MAR_1d_f8.swapaxes(axis1=1, axis2=0) # type: ignore[call-arg] diff --git a/python/numpy/typing/tests/data/fail/memmap.pyi b/python/numpy/typing/tests/data/fail/memmap.pyi new file mode 100644 index 000000000..3a4fc7df0 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/memmap.pyi @@ -0,0 +1,5 @@ +import numpy as np + +with open("file.txt", "r") as f: + np.memmap(f) # type: ignore[call-overload] +np.memmap("test.txt", shape=[10, 5]) # type: ignore[call-overload] diff --git a/python/numpy/typing/tests/data/fail/modules.pyi b/python/numpy/typing/tests/data/fail/modules.pyi new file mode 100644 index 000000000..c12a18280 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/modules.pyi @@ -0,0 +1,17 @@ +import numpy as np + +np.testing.bob # type: ignore[attr-defined] +np.bob # type: ignore[attr-defined] + +# Stdlib modules in the namespace by accident +np.warnings # type: ignore[attr-defined] +np.sys # type: ignore[attr-defined] +np.os # type: ignore[attr-defined] +np.math # type: ignore[attr-defined] + +# Public sub-modules that are not imported to their parent module by default; +# e.g. one must first execute `import numpy.lib.recfunctions` +np.lib.recfunctions # type: ignore[attr-defined] + +np.__deprecated_attrs__ # type: ignore[attr-defined] +np.__expired_functions__ # type: ignore[attr-defined] diff --git a/python/numpy/typing/tests/data/fail/multiarray.pyi b/python/numpy/typing/tests/data/fail/multiarray.pyi new file mode 100644 index 000000000..1f9ef6894 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/multiarray.pyi @@ -0,0 +1,52 @@ +import numpy as np +import numpy.typing as npt + +i8: np.int64 + +AR_b: npt.NDArray[np.bool] +AR_u1: npt.NDArray[np.uint8] +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_M: npt.NDArray[np.datetime64] + +M: np.datetime64 + +AR_LIKE_f: list[float] + +def func(a: int) -> None: ... + +np.where(AR_b, 1) # type: ignore[call-overload] + +np.can_cast(AR_f8, 1) # type: ignore[arg-type] + +np.vdot(AR_M, AR_M) # type: ignore[arg-type] + +np.copyto(AR_LIKE_f, AR_f8) # type: ignore[arg-type] + +np.putmask(AR_LIKE_f, [True, True, False], 1.5) # type: ignore[arg-type] + +np.packbits(AR_f8) # type: ignore[arg-type] +np.packbits(AR_u1, bitorder=">") # type: ignore[arg-type] + +np.unpackbits(AR_i8) # type: ignore[arg-type] +np.unpackbits(AR_u1, bitorder=">") # type: ignore[arg-type] + +np.shares_memory(1, 1, max_work=i8) # type: ignore[arg-type] +np.may_share_memory(1, 1, max_work=i8) # type: ignore[arg-type] + +np.arange(stop=10) # type: ignore[call-overload] + +np.datetime_data(int) # type: ignore[arg-type] + +np.busday_offset("2012", 10) # type: ignore[call-overload] + +np.datetime_as_string("2012") # type: ignore[call-overload] + +np.char.compare_chararrays("a", b"a", "==", False) # type: ignore[call-overload] + +np.nested_iters([AR_i8, AR_i8]) # type: ignore[call-arg] +np.nested_iters([AR_i8, AR_i8], 0) # type: ignore[arg-type] +np.nested_iters([AR_i8, AR_i8], [0]) # type: ignore[list-item] +np.nested_iters([AR_i8, AR_i8], [[0], [1]], flags=["test"]) # type: ignore[list-item] +np.nested_iters([AR_i8, AR_i8], [[0], [1]], op_flags=[["test"]]) # type: ignore[list-item] +np.nested_iters([AR_i8, AR_i8], [[0], [1]], buffersize=1.0) # type: ignore[arg-type] diff --git a/python/numpy/typing/tests/data/fail/ndarray.pyi b/python/numpy/typing/tests/data/fail/ndarray.pyi new file mode 100644 index 000000000..2aeec0883 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/ndarray.pyi @@ -0,0 +1,11 @@ +import numpy as np + +# Ban setting dtype since mutating the type of the array in place +# makes having ndarray be generic over dtype impossible. Generally +# users should use `ndarray.view` in this situation anyway. See +# +# https://github.com/numpy/numpy-stubs/issues/7 +# +# for more context. +float_array = np.array([1.0]) +float_array.dtype = np.bool # type: ignore[assignment, misc] diff --git a/python/numpy/typing/tests/data/fail/ndarray_misc.pyi b/python/numpy/typing/tests/data/fail/ndarray_misc.pyi new file mode 100644 index 000000000..93e1bce8f --- /dev/null +++ b/python/numpy/typing/tests/data/fail/ndarray_misc.pyi @@ -0,0 +1,36 @@ +""" +Tests for miscellaneous (non-magic) ``np.ndarray``/``np.generic`` methods. + +More extensive tests are performed for the methods' +function-based counterpart in `../from_numeric.py`. + +""" + +import numpy as np +import numpy.typing as npt + +f8: np.float64 +AR_f8: npt.NDArray[np.float64] +AR_M: npt.NDArray[np.datetime64] +AR_b: npt.NDArray[np.bool] + +ctypes_obj = AR_f8.ctypes + +f8.argpartition(0) # type: ignore[attr-defined] +f8.diagonal() # type: ignore[attr-defined] +f8.dot(1) # type: ignore[attr-defined] +f8.nonzero() # type: ignore[attr-defined] +f8.partition(0) # type: ignore[attr-defined] +f8.put(0, 2) # type: ignore[attr-defined] +f8.setfield(2, np.float64) # type: ignore[attr-defined] +f8.sort() # type: ignore[attr-defined] +f8.trace() # type: ignore[attr-defined] + +AR_M.__complex__() # type: ignore[misc] +AR_b.__index__() # type: ignore[misc] + +AR_f8[1.5] # type: ignore[call-overload] +AR_f8["field_a"] # type: ignore[call-overload] +AR_f8[["field_a", "field_b"]] # type: ignore[index] + +AR_f8.__array_finalize__(object()) # type: ignore[arg-type] diff --git a/python/numpy/typing/tests/data/fail/nditer.pyi b/python/numpy/typing/tests/data/fail/nditer.pyi new file mode 100644 index 000000000..cb64061e4 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/nditer.pyi @@ -0,0 +1,8 @@ +import numpy as np + +class Test(np.nditer): ... # type: ignore[misc] + +np.nditer([0, 1], flags=["test"]) # type: ignore[list-item] +np.nditer([0, 1], op_flags=[["test"]]) # type: ignore[list-item] +np.nditer([0, 1], itershape=(1.0,)) # type: ignore[arg-type] +np.nditer([0, 1], buffersize=1.0) # type: ignore[arg-type] diff --git a/python/numpy/typing/tests/data/fail/nested_sequence.pyi b/python/numpy/typing/tests/data/fail/nested_sequence.pyi new file mode 100644 index 000000000..a28d3df3c --- /dev/null +++ b/python/numpy/typing/tests/data/fail/nested_sequence.pyi @@ -0,0 +1,16 @@ +from collections.abc import Sequence +from numpy._typing import _NestedSequence + +a: Sequence[float] +b: list[complex] +c: tuple[str, ...] +d: int +e: str + +def func(a: _NestedSequence[int]) -> None: ... + +reveal_type(func(a)) # type: ignore[arg-type, misc] +reveal_type(func(b)) # type: ignore[arg-type, misc] +reveal_type(func(c)) # type: ignore[arg-type, misc] +reveal_type(func(d)) # type: ignore[arg-type, misc] +reveal_type(func(e)) # type: ignore[arg-type, misc] diff --git a/python/numpy/typing/tests/data/fail/npyio.pyi b/python/numpy/typing/tests/data/fail/npyio.pyi new file mode 100644 index 000000000..e204566a5 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/npyio.pyi @@ -0,0 +1,24 @@ +import pathlib +from typing import IO + +import numpy.typing as npt +import numpy as np + +str_path: str +bytes_path: bytes +pathlib_path: pathlib.Path +str_file: IO[str] +AR_i8: npt.NDArray[np.int64] + +np.load(str_file) # type: ignore[arg-type] + +np.save(bytes_path, AR_i8) # type: ignore[call-overload] +np.save(str_path, AR_i8, fix_imports=True) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] + +np.savez(bytes_path, AR_i8) # type: ignore[arg-type] + +np.savez_compressed(bytes_path, AR_i8) # type: ignore[arg-type] + +np.loadtxt(bytes_path) # type: ignore[arg-type] + +np.fromregex(bytes_path, ".", np.int64) # type: ignore[call-overload] diff --git a/python/numpy/typing/tests/data/fail/numerictypes.pyi b/python/numpy/typing/tests/data/fail/numerictypes.pyi new file mode 100644 index 000000000..a1fd47a6f --- /dev/null +++ b/python/numpy/typing/tests/data/fail/numerictypes.pyi @@ -0,0 +1,5 @@ +import numpy as np + +np.isdtype(1, np.int64) # type: ignore[arg-type] + +np.issubdtype(1, np.int64) # type: ignore[arg-type] diff --git a/python/numpy/typing/tests/data/fail/random.pyi b/python/numpy/typing/tests/data/fail/random.pyi new file mode 100644 index 000000000..1abf4b776 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/random.pyi @@ -0,0 +1,62 @@ +import numpy as np +import numpy.typing as npt + +SEED_FLOAT: float = 457.3 +SEED_ARR_FLOAT: npt.NDArray[np.float64] = np.array([1.0, 2, 3, 4]) +SEED_ARRLIKE_FLOAT: list[float] = [1.0, 2.0, 3.0, 4.0] +SEED_SEED_SEQ: np.random.SeedSequence = np.random.SeedSequence(0) +SEED_STR: str = "String seeding not allowed" + +# default rng +np.random.default_rng(SEED_FLOAT) # type: ignore[arg-type] +np.random.default_rng(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.default_rng(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.default_rng(SEED_STR) # type: ignore[arg-type] + +# Seed Sequence +np.random.SeedSequence(SEED_FLOAT) # type: ignore[arg-type] +np.random.SeedSequence(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.SeedSequence(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.SeedSequence(SEED_SEED_SEQ) # type: ignore[arg-type] +np.random.SeedSequence(SEED_STR) # type: ignore[arg-type] + +seed_seq: np.random.bit_generator.SeedSequence = np.random.SeedSequence() +seed_seq.spawn(11.5) # type: ignore[arg-type] +seed_seq.generate_state(3.14) # type: ignore[arg-type] +seed_seq.generate_state(3, np.uint8) # type: ignore[arg-type] +seed_seq.generate_state(3, "uint8") # type: ignore[arg-type] +seed_seq.generate_state(3, "u1") # type: ignore[arg-type] +seed_seq.generate_state(3, np.uint16) # type: ignore[arg-type] +seed_seq.generate_state(3, "uint16") # type: ignore[arg-type] +seed_seq.generate_state(3, "u2") # type: ignore[arg-type] +seed_seq.generate_state(3, np.int32) # type: ignore[arg-type] +seed_seq.generate_state(3, "int32") # type: ignore[arg-type] +seed_seq.generate_state(3, "i4") # type: ignore[arg-type] + +# Bit Generators +np.random.MT19937(SEED_FLOAT) # type: ignore[arg-type] +np.random.MT19937(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.MT19937(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.MT19937(SEED_STR) # type: ignore[arg-type] + +np.random.PCG64(SEED_FLOAT) # type: ignore[arg-type] +np.random.PCG64(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.PCG64(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.PCG64(SEED_STR) # type: ignore[arg-type] + +np.random.Philox(SEED_FLOAT) # type: ignore[arg-type] +np.random.Philox(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.Philox(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.Philox(SEED_STR) # type: ignore[arg-type] + +np.random.SFC64(SEED_FLOAT) # type: ignore[arg-type] +np.random.SFC64(SEED_ARR_FLOAT) # type: ignore[arg-type] +np.random.SFC64(SEED_ARRLIKE_FLOAT) # type: ignore[arg-type] +np.random.SFC64(SEED_STR) # type: ignore[arg-type] + +# Generator +np.random.Generator(None) # type: ignore[arg-type] +np.random.Generator(12333283902830213) # type: ignore[arg-type] +np.random.Generator("OxFEEDF00D") # type: ignore[arg-type] +np.random.Generator([123, 234]) # type: ignore[arg-type] +np.random.Generator(np.array([123, 234], dtype="u4")) # type: ignore[arg-type] diff --git a/python/numpy/typing/tests/data/fail/rec.pyi b/python/numpy/typing/tests/data/fail/rec.pyi new file mode 100644 index 000000000..c9d43dd2f --- /dev/null +++ b/python/numpy/typing/tests/data/fail/rec.pyi @@ -0,0 +1,17 @@ +import numpy as np +import numpy.typing as npt + +AR_i8: npt.NDArray[np.int64] + +np.rec.fromarrays(1) # type: ignore[call-overload] +np.rec.fromarrays([1, 2, 3], dtype=[("f8", "f8")], formats=["f8", "f8"]) # type: ignore[call-overload] + +np.rec.fromrecords(AR_i8) # type: ignore[arg-type] +np.rec.fromrecords([(1.5,)], dtype=[("f8", "f8")], formats=["f8", "f8"]) # type: ignore[call-overload] + +np.rec.fromstring("string", dtype=[("f8", "f8")]) # type: ignore[call-overload] +np.rec.fromstring(b"bytes") # type: ignore[call-overload] +np.rec.fromstring(b"(1.5,)", dtype=[("f8", "f8")], formats=["f8", "f8"]) # type: ignore[call-overload] + +with open("test", "r") as f: + np.rec.fromfile(f, dtype=[("f8", "f8")]) # type: ignore[call-overload] diff --git a/python/numpy/typing/tests/data/fail/scalars.pyi b/python/numpy/typing/tests/data/fail/scalars.pyi new file mode 100644 index 000000000..bfbe9125e --- /dev/null +++ b/python/numpy/typing/tests/data/fail/scalars.pyi @@ -0,0 +1,87 @@ +import sys +import numpy as np + +f2: np.float16 +f8: np.float64 +c8: np.complex64 + +# Construction + +np.float32(3j) # type: ignore[arg-type] + +# Technically the following examples are valid NumPy code. But they +# are not considered a best practice, and people who wish to use the +# stubs should instead do +# +# np.array([1.0, 0.0, 0.0], dtype=np.float32) +# np.array([], dtype=np.complex64) +# +# See e.g. the discussion on the mailing list +# +# https://mail.python.org/pipermail/numpy-discussion/2020-April/080566.html +# +# and the issue +# +# https://github.com/numpy/numpy-stubs/issues/41 +# +# for more context. +np.float32([1.0, 0.0, 0.0]) # type: ignore[arg-type] +np.complex64([]) # type: ignore[call-overload] + +# TODO: protocols (can't check for non-existent protocols w/ __getattr__) + +np.datetime64(0) # type: ignore[call-overload] + +class A: + def __float__(self) -> float: ... + +np.int8(A()) # type: ignore[arg-type] +np.int16(A()) # type: ignore[arg-type] +np.int32(A()) # type: ignore[arg-type] +np.int64(A()) # type: ignore[arg-type] +np.uint8(A()) # type: ignore[arg-type] +np.uint16(A()) # type: ignore[arg-type] +np.uint32(A()) # type: ignore[arg-type] +np.uint64(A()) # type: ignore[arg-type] + +np.void("test") # type: ignore[call-overload] +np.void("test", dtype=None) # type: ignore[call-overload] + +np.generic(1) # type: ignore[abstract] +np.number(1) # type: ignore[abstract] +np.integer(1) # type: ignore[abstract] +np.inexact(1) # type: ignore[abstract] +np.character("test") # type: ignore[abstract] +np.flexible(b"test") # type: ignore[abstract] + +np.float64(value=0.0) # type: ignore[call-arg] +np.int64(value=0) # type: ignore[call-arg] +np.uint64(value=0) # type: ignore[call-arg] +np.complex128(value=0.0j) # type: ignore[call-overload] +np.str_(value='bob') # type: ignore[call-overload] +np.bytes_(value=b'test') # type: ignore[call-overload] +np.void(value=b'test') # type: ignore[call-overload] +np.bool(value=True) # type: ignore[call-overload] +np.datetime64(value="2019") # type: ignore[call-overload] +np.timedelta64(value=0) # type: ignore[call-overload] + +np.bytes_(b"hello", encoding='utf-8') # type: ignore[call-overload] +np.str_("hello", encoding='utf-8') # type: ignore[call-overload] + +f8.item(1) # type: ignore[call-overload] +f8.item((0, 1)) # type: ignore[arg-type] +f8.squeeze(axis=1) # type: ignore[arg-type] +f8.squeeze(axis=(0, 1)) # type: ignore[arg-type] +f8.transpose(1) # type: ignore[arg-type] + +def func(a: np.float32) -> None: ... + +func(f2) # type: ignore[arg-type] +func(f8) # type: ignore[arg-type] + +c8.__getnewargs__() # type: ignore[attr-defined] +f2.__getnewargs__() # type: ignore[attr-defined] +f2.hex() # type: ignore[attr-defined] +np.float16.fromhex("0x0.0p+0") # type: ignore[attr-defined] +f2.__trunc__() # type: ignore[attr-defined] +f2.__getformat__("float") # type: ignore[attr-defined] diff --git a/python/numpy/typing/tests/data/fail/shape.pyi b/python/numpy/typing/tests/data/fail/shape.pyi new file mode 100644 index 000000000..fea055583 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/shape.pyi @@ -0,0 +1,6 @@ +from typing import Any +import numpy as np + +# test bounds of _ShapeT_co + +np.ndarray[tuple[str, str], Any] # type: ignore[type-var] diff --git a/python/numpy/typing/tests/data/fail/shape_base.pyi b/python/numpy/typing/tests/data/fail/shape_base.pyi new file mode 100644 index 000000000..652b24ba3 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/shape_base.pyi @@ -0,0 +1,8 @@ +import numpy as np + +class DTypeLike: + dtype: np.dtype[np.int_] + +dtype_like: DTypeLike + +np.expand_dims(dtype_like, (5, 10)) # type: ignore[call-overload] diff --git a/python/numpy/typing/tests/data/fail/stride_tricks.pyi b/python/numpy/typing/tests/data/fail/stride_tricks.pyi new file mode 100644 index 000000000..7f9a26b96 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/stride_tricks.pyi @@ -0,0 +1,9 @@ +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] + +np.lib.stride_tricks.as_strided(AR_f8, shape=8) # type: ignore[call-overload] +np.lib.stride_tricks.as_strided(AR_f8, strides=8) # type: ignore[call-overload] + +np.lib.stride_tricks.sliding_window_view(AR_f8, axis=(1,)) # type: ignore[call-overload] diff --git a/python/numpy/typing/tests/data/fail/strings.pyi b/python/numpy/typing/tests/data/fail/strings.pyi new file mode 100644 index 000000000..328a521ae --- /dev/null +++ b/python/numpy/typing/tests/data/fail/strings.pyi @@ -0,0 +1,52 @@ +import numpy as np +import numpy.typing as npt + +AR_U: npt.NDArray[np.str_] +AR_S: npt.NDArray[np.bytes_] + +np.strings.equal(AR_U, AR_S) # type: ignore[arg-type] +np.strings.not_equal(AR_U, AR_S) # type: ignore[arg-type] + +np.strings.greater_equal(AR_U, AR_S) # type: ignore[arg-type] +np.strings.less_equal(AR_U, AR_S) # type: ignore[arg-type] +np.strings.greater(AR_U, AR_S) # type: ignore[arg-type] +np.strings.less(AR_U, AR_S) # type: ignore[arg-type] + +np.strings.encode(AR_S) # type: ignore[arg-type] +np.strings.decode(AR_U) # type: ignore[arg-type] + +np.strings.lstrip(AR_U, b"a") # type: ignore[arg-type] +np.strings.lstrip(AR_S, "a") # type: ignore[arg-type] +np.strings.strip(AR_U, b"a") # type: ignore[arg-type] +np.strings.strip(AR_S, "a") # type: ignore[arg-type] +np.strings.rstrip(AR_U, b"a") # type: ignore[arg-type] +np.strings.rstrip(AR_S, "a") # type: ignore[arg-type] + +np.strings.partition(AR_U, b"a") # type: ignore[arg-type] +np.strings.partition(AR_S, "a") # type: ignore[arg-type] +np.strings.rpartition(AR_U, b"a") # type: ignore[arg-type] +np.strings.rpartition(AR_S, "a") # type: ignore[arg-type] + +np.strings.count(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.strings.count(AR_S, "a", 0, 9) # type: ignore[arg-type] + +np.strings.endswith(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.strings.endswith(AR_S, "a", 0, 9) # type: ignore[arg-type] +np.strings.startswith(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.strings.startswith(AR_S, "a", 0, 9) # type: ignore[arg-type] + +np.strings.find(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.strings.find(AR_S, "a", 0, 9) # type: ignore[arg-type] +np.strings.rfind(AR_U, b"a", [1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.strings.rfind(AR_S, "a", 0, 9) # type: ignore[arg-type] + +np.strings.index(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.strings.index(AR_S, "a", end=9) # type: ignore[arg-type] +np.strings.rindex(AR_U, b"a", start=[1, 2, 3]) # type: ignore[arg-type] +np.strings.rindex(AR_S, "a", end=9) # type: ignore[arg-type] + +np.strings.isdecimal(AR_S) # type: ignore[arg-type] +np.strings.isnumeric(AR_S) # type: ignore[arg-type] + +np.strings.replace(AR_U, b"_", b"-", 10) # type: ignore[arg-type] +np.strings.replace(AR_S, "_", "-", 1) # type: ignore[arg-type] diff --git a/python/numpy/typing/tests/data/fail/testing.pyi b/python/numpy/typing/tests/data/fail/testing.pyi new file mode 100644 index 000000000..517062c4c --- /dev/null +++ b/python/numpy/typing/tests/data/fail/testing.pyi @@ -0,0 +1,28 @@ +import numpy as np +import numpy.typing as npt + +AR_U: npt.NDArray[np.str_] + +def func(x: object) -> bool: ... + +np.testing.assert_(True, msg=1) # type: ignore[arg-type] +np.testing.build_err_msg(1, "test") # type: ignore[arg-type] +np.testing.assert_almost_equal(AR_U, AR_U) # type: ignore[arg-type] +np.testing.assert_approx_equal([1, 2, 3], [1, 2, 3]) # type: ignore[arg-type] +np.testing.assert_array_almost_equal(AR_U, AR_U) # type: ignore[arg-type] +np.testing.assert_array_less(AR_U, AR_U) # type: ignore[arg-type] +np.testing.assert_string_equal(b"a", b"a") # type: ignore[arg-type] + +np.testing.assert_raises(expected_exception=TypeError, callable=func) # type: ignore[call-overload] +np.testing.assert_raises_regex(expected_exception=TypeError, expected_regex="T", callable=func) # type: ignore[call-overload] + +np.testing.assert_allclose(AR_U, AR_U) # type: ignore[arg-type] +np.testing.assert_array_almost_equal_nulp(AR_U, AR_U) # type: ignore[arg-type] +np.testing.assert_array_max_ulp(AR_U, AR_U) # type: ignore[arg-type] + +np.testing.assert_warns(RuntimeWarning, func) # type: ignore[call-overload] +np.testing.assert_no_warnings(func=func) # type: ignore[call-overload] +np.testing.assert_no_warnings(func) # type: ignore[call-overload] +np.testing.assert_no_warnings(func, y=None) # type: ignore[call-overload] + +np.testing.assert_no_gc_cycles(func=func) # type: ignore[call-overload] diff --git a/python/numpy/typing/tests/data/fail/twodim_base.pyi b/python/numpy/typing/tests/data/fail/twodim_base.pyi new file mode 100644 index 000000000..d0f2b7ad8 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/twodim_base.pyi @@ -0,0 +1,32 @@ +from typing import Any, TypeVar + +import numpy as np +import numpy.typing as npt + +def func1(ar: npt.NDArray[Any], a: int) -> npt.NDArray[np.str_]: ... + +def func2(ar: npt.NDArray[Any], a: float) -> float: ... + +AR_b: npt.NDArray[np.bool] +AR_m: npt.NDArray[np.timedelta64] + +AR_LIKE_b: list[bool] + +np.eye(10, M=20.0) # type: ignore[call-overload] +np.eye(10, k=2.5, dtype=int) # type: ignore[call-overload] + +np.diag(AR_b, k=0.5) # type: ignore[call-overload] +np.diagflat(AR_b, k=0.5) # type: ignore[call-overload] + +np.tri(10, M=20.0) # type: ignore[call-overload] +np.tri(10, k=2.5, dtype=int) # type: ignore[call-overload] + +np.tril(AR_b, k=0.5) # type: ignore[call-overload] +np.triu(AR_b, k=0.5) # type: ignore[call-overload] + +np.vander(AR_m) # type: ignore[arg-type] + +np.histogram2d(AR_m) # type: ignore[call-overload] + +np.mask_indices(10, func1) # type: ignore[arg-type] +np.mask_indices(10, func2, 10.5) # type: ignore[arg-type] diff --git a/python/numpy/typing/tests/data/fail/type_check.pyi b/python/numpy/typing/tests/data/fail/type_check.pyi new file mode 100644 index 000000000..94b6ee425 --- /dev/null +++ b/python/numpy/typing/tests/data/fail/type_check.pyi @@ -0,0 +1,13 @@ +import numpy as np +import numpy.typing as npt + +DTYPE_i8: np.dtype[np.int64] + +np.mintypecode(DTYPE_i8) # type: ignore[arg-type] +np.iscomplexobj(DTYPE_i8) # type: ignore[arg-type] +np.isrealobj(DTYPE_i8) # type: ignore[arg-type] + +np.typename(DTYPE_i8) # type: ignore[call-overload] +np.typename("invalid") # type: ignore[call-overload] + +np.common_type(np.timedelta64()) # type: ignore[arg-type] diff --git a/python/numpy/typing/tests/data/fail/ufunc_config.pyi b/python/numpy/typing/tests/data/fail/ufunc_config.pyi new file mode 100644 index 000000000..c67b6a3ac --- /dev/null +++ b/python/numpy/typing/tests/data/fail/ufunc_config.pyi @@ -0,0 +1,21 @@ +"""Typing tests for `numpy._core._ufunc_config`.""" + +import numpy as np + +def func1(a: str, b: int, c: float) -> None: ... +def func2(a: str, *, b: int) -> None: ... + +class Write1: + def write1(self, a: str) -> None: ... + +class Write2: + def write(self, a: str, b: str) -> None: ... + +class Write3: + def write(self, *, a: str) -> None: ... + +np.seterrcall(func1) # type: ignore[arg-type] +np.seterrcall(func2) # type: ignore[arg-type] +np.seterrcall(Write1()) # type: ignore[arg-type] +np.seterrcall(Write2()) # type: ignore[arg-type] +np.seterrcall(Write3()) # type: ignore[arg-type] diff --git a/python/numpy/typing/tests/data/fail/ufunclike.pyi b/python/numpy/typing/tests/data/fail/ufunclike.pyi new file mode 100644 index 000000000..e556e409e --- /dev/null +++ b/python/numpy/typing/tests/data/fail/ufunclike.pyi @@ -0,0 +1,21 @@ +import numpy as np +import numpy.typing as npt + +AR_c: npt.NDArray[np.complex128] +AR_m: npt.NDArray[np.timedelta64] +AR_M: npt.NDArray[np.datetime64] +AR_O: npt.NDArray[np.object_] + +np.fix(AR_c) # type: ignore[arg-type] +np.fix(AR_m) # type: ignore[arg-type] +np.fix(AR_M) # type: ignore[arg-type] + +np.isposinf(AR_c) # type: ignore[arg-type] +np.isposinf(AR_m) # type: ignore[arg-type] +np.isposinf(AR_M) # type: ignore[arg-type] +np.isposinf(AR_O) # type: ignore[arg-type] + +np.isneginf(AR_c) # type: ignore[arg-type] +np.isneginf(AR_m) # type: ignore[arg-type] +np.isneginf(AR_M) # type: ignore[arg-type] +np.isneginf(AR_O) # type: ignore[arg-type] diff --git a/python/numpy/typing/tests/data/fail/ufuncs.pyi b/python/numpy/typing/tests/data/fail/ufuncs.pyi new file mode 100644 index 000000000..1b1628d7d --- /dev/null +++ b/python/numpy/typing/tests/data/fail/ufuncs.pyi @@ -0,0 +1,17 @@ +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] + +np.sin.nin + "foo" # type: ignore[operator] +np.sin(1, foo="bar") # type: ignore[call-overload] + +np.abs(None) # type: ignore[call-overload] + +np.add(1, 1, 1) # type: ignore[call-overload] +np.add(1, 1, axis=0) # type: ignore[call-overload] + +np.matmul(AR_f8, AR_f8, where=True) # type: ignore[call-overload] + +np.frexp(AR_f8, out=None) # type: ignore[call-overload] +np.frexp(AR_f8, out=AR_f8) # type: ignore[call-overload] diff --git a/python/numpy/typing/tests/data/fail/warnings_and_errors.pyi b/python/numpy/typing/tests/data/fail/warnings_and_errors.pyi new file mode 100644 index 000000000..8ba34f6df --- /dev/null +++ b/python/numpy/typing/tests/data/fail/warnings_and_errors.pyi @@ -0,0 +1,5 @@ +import numpy.exceptions as ex + +ex.AxisError(1.0) # type: ignore[call-overload] +ex.AxisError(1, ndim=2.0) # type: ignore[call-overload] +ex.AxisError(2, msg_prefix=404) # type: ignore[call-overload] diff --git a/python/numpy/typing/tests/data/misc/extended_precision.pyi b/python/numpy/typing/tests/data/misc/extended_precision.pyi new file mode 100644 index 000000000..84b5f516b --- /dev/null +++ b/python/numpy/typing/tests/data/misc/extended_precision.pyi @@ -0,0 +1,9 @@ +import numpy as np +from numpy._typing import _96Bit, _128Bit + +from typing import assert_type + +assert_type(np.float96(), np.floating[_96Bit]) +assert_type(np.float128(), np.floating[_128Bit]) +assert_type(np.complex192(), np.complexfloating[_96Bit, _96Bit]) +assert_type(np.complex256(), np.complexfloating[_128Bit, _128Bit]) diff --git a/python/numpy/typing/tests/data/mypy.ini b/python/numpy/typing/tests/data/mypy.ini new file mode 100644 index 000000000..bca203260 --- /dev/null +++ b/python/numpy/typing/tests/data/mypy.ini @@ -0,0 +1,9 @@ +[mypy] +enable_error_code = deprecated, ignore-without-code, truthy-bool +strict_bytes = True +warn_unused_ignores = True +implicit_reexport = False +disallow_any_unimported = True +disallow_any_generics = True +show_absolute_path = True +pretty = True diff --git a/python/numpy/typing/tests/data/pass/__pycache__/arithmetic.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/arithmetic.cpython-312.pyc new file mode 100644 index 000000000..1090aa11e Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/arithmetic.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/array_constructors.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/array_constructors.cpython-312.pyc new file mode 100644 index 000000000..6b4369a72 Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/array_constructors.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/array_like.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/array_like.cpython-312.pyc new file mode 100644 index 000000000..92c1f3273 Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/array_like.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/arrayprint.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/arrayprint.cpython-312.pyc new file mode 100644 index 000000000..d344545a8 Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/arrayprint.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/arrayterator.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/arrayterator.cpython-312.pyc new file mode 100644 index 000000000..3d2576455 Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/arrayterator.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/bitwise_ops.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/bitwise_ops.cpython-312.pyc new file mode 100644 index 000000000..0c25f89f9 Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/bitwise_ops.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/comparisons.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/comparisons.cpython-312.pyc new file mode 100644 index 000000000..994f4c7df Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/comparisons.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/dtype.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/dtype.cpython-312.pyc new file mode 100644 index 000000000..74f5e1b16 Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/dtype.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/einsumfunc.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/einsumfunc.cpython-312.pyc new file mode 100644 index 000000000..6d9cac1a4 Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/einsumfunc.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/flatiter.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/flatiter.cpython-312.pyc new file mode 100644 index 000000000..908620868 Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/flatiter.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/fromnumeric.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/fromnumeric.cpython-312.pyc new file mode 100644 index 000000000..7fc1075d0 Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/fromnumeric.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/index_tricks.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/index_tricks.cpython-312.pyc new file mode 100644 index 000000000..2e1f064cc Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/index_tricks.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/lib_user_array.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/lib_user_array.cpython-312.pyc new file mode 100644 index 000000000..56c8d6873 Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/lib_user_array.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/lib_utils.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/lib_utils.cpython-312.pyc new file mode 100644 index 000000000..701228ac2 Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/lib_utils.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/lib_version.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/lib_version.cpython-312.pyc new file mode 100644 index 000000000..c5484990d Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/lib_version.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/literal.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/literal.cpython-312.pyc new file mode 100644 index 000000000..16e2051df Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/literal.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/ma.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/ma.cpython-312.pyc new file mode 100644 index 000000000..b660b24e2 Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/ma.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/mod.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/mod.cpython-312.pyc new file mode 100644 index 000000000..56c07a46a Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/mod.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/modules.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/modules.cpython-312.pyc new file mode 100644 index 000000000..2948d5764 Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/modules.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/multiarray.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/multiarray.cpython-312.pyc new file mode 100644 index 000000000..cf114dd30 Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/multiarray.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/ndarray_conversion.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/ndarray_conversion.cpython-312.pyc new file mode 100644 index 000000000..c3db6c045 Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/ndarray_conversion.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/ndarray_misc.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/ndarray_misc.cpython-312.pyc new file mode 100644 index 000000000..4109e73cc Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/ndarray_misc.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/ndarray_shape_manipulation.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/ndarray_shape_manipulation.cpython-312.pyc new file mode 100644 index 000000000..ab816f0df Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/ndarray_shape_manipulation.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/nditer.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/nditer.cpython-312.pyc new file mode 100644 index 000000000..b0394d562 Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/nditer.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/numeric.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/numeric.cpython-312.pyc new file mode 100644 index 000000000..cc890b818 Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/numeric.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/numerictypes.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/numerictypes.cpython-312.pyc new file mode 100644 index 000000000..bfd839d80 Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/numerictypes.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/random.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/random.cpython-312.pyc new file mode 100644 index 000000000..df35fef79 Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/random.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/recfunctions.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/recfunctions.cpython-312.pyc new file mode 100644 index 000000000..f0d801b3a Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/recfunctions.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/scalars.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/scalars.cpython-312.pyc new file mode 100644 index 000000000..352caa097 Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/scalars.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/shape.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/shape.cpython-312.pyc new file mode 100644 index 000000000..f2cb4f3a2 Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/shape.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/simple.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/simple.cpython-312.pyc new file mode 100644 index 000000000..1faaaa249 Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/simple.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/simple_py3.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/simple_py3.cpython-312.pyc new file mode 100644 index 000000000..3513ec9a3 Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/simple_py3.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/ufunc_config.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/ufunc_config.cpython-312.pyc new file mode 100644 index 000000000..b86c95c9a Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/ufunc_config.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/ufunclike.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/ufunclike.cpython-312.pyc new file mode 100644 index 000000000..4ff81fb20 Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/ufunclike.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/ufuncs.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/ufuncs.cpython-312.pyc new file mode 100644 index 000000000..e91e37957 Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/ufuncs.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/__pycache__/warnings_and_errors.cpython-312.pyc b/python/numpy/typing/tests/data/pass/__pycache__/warnings_and_errors.cpython-312.pyc new file mode 100644 index 000000000..4dc6889f0 Binary files /dev/null and b/python/numpy/typing/tests/data/pass/__pycache__/warnings_and_errors.cpython-312.pyc differ diff --git a/python/numpy/typing/tests/data/pass/arithmetic.py b/python/numpy/typing/tests/data/pass/arithmetic.py new file mode 100644 index 000000000..3b2901cf2 --- /dev/null +++ b/python/numpy/typing/tests/data/pass/arithmetic.py @@ -0,0 +1,612 @@ +from __future__ import annotations + +from typing import Any, cast +import numpy as np +import numpy.typing as npt +import pytest + +c16 = np.complex128(1) +f8 = np.float64(1) +i8 = np.int64(1) +u8 = np.uint64(1) + +c8 = np.complex64(1) +f4 = np.float32(1) +i4 = np.int32(1) +u4 = np.uint32(1) + +dt = np.datetime64(1, "D") +td = np.timedelta64(1, "D") + +b_ = np.bool(1) + +b = bool(1) +c = complex(1) +f = float(1) +i = int(1) + + +class Object: + def __array__(self, dtype: np.typing.DTypeLike = None, + copy: bool | None = None) -> np.ndarray[Any, np.dtype[np.object_]]: + ret = np.empty((), dtype=object) + ret[()] = self + return ret + + def __sub__(self, value: Any) -> Object: + return self + + def __rsub__(self, value: Any) -> Object: + return self + + def __floordiv__(self, value: Any) -> Object: + return self + + def __rfloordiv__(self, value: Any) -> Object: + return self + + def __mul__(self, value: Any) -> Object: + return self + + def __rmul__(self, value: Any) -> Object: + return self + + def __pow__(self, value: Any) -> Object: + return self + + def __rpow__(self, value: Any) -> Object: + return self + + +AR_b: npt.NDArray[np.bool] = np.array([True]) +AR_u: npt.NDArray[np.uint32] = np.array([1], dtype=np.uint32) +AR_i: npt.NDArray[np.int64] = np.array([1]) +AR_integer: npt.NDArray[np.integer] = cast(npt.NDArray[np.integer], AR_i) +AR_f: npt.NDArray[np.float64] = np.array([1.0]) +AR_c: npt.NDArray[np.complex128] = np.array([1j]) +AR_m: npt.NDArray[np.timedelta64] = np.array([np.timedelta64(1, "D")]) +AR_M: npt.NDArray[np.datetime64] = np.array([np.datetime64(1, "D")]) +AR_O: npt.NDArray[np.object_] = np.array([Object()]) + +AR_LIKE_b = [True] +AR_LIKE_u = [np.uint32(1)] +AR_LIKE_i = [1] +AR_LIKE_f = [1.0] +AR_LIKE_c = [1j] +AR_LIKE_m = [np.timedelta64(1, "D")] +AR_LIKE_M = [np.datetime64(1, "D")] +AR_LIKE_O = [Object()] + +# Array subtractions + +AR_b - AR_LIKE_u +AR_b - AR_LIKE_i +AR_b - AR_LIKE_f +AR_b - AR_LIKE_c +AR_b - AR_LIKE_m +AR_b - AR_LIKE_O + +AR_LIKE_u - AR_b +AR_LIKE_i - AR_b +AR_LIKE_f - AR_b +AR_LIKE_c - AR_b +AR_LIKE_m - AR_b +AR_LIKE_M - AR_b +AR_LIKE_O - AR_b + +AR_u - AR_LIKE_b +AR_u - AR_LIKE_u +AR_u - AR_LIKE_i +AR_u - AR_LIKE_f +AR_u - AR_LIKE_c +AR_u - AR_LIKE_m +AR_u - AR_LIKE_O + +AR_LIKE_b - AR_u +AR_LIKE_u - AR_u +AR_LIKE_i - AR_u +AR_LIKE_f - AR_u +AR_LIKE_c - AR_u +AR_LIKE_m - AR_u +AR_LIKE_M - AR_u +AR_LIKE_O - AR_u + +AR_i - AR_LIKE_b +AR_i - AR_LIKE_u +AR_i - AR_LIKE_i +AR_i - AR_LIKE_f +AR_i - AR_LIKE_c +AR_i - AR_LIKE_m +AR_i - AR_LIKE_O + +AR_LIKE_b - AR_i +AR_LIKE_u - AR_i +AR_LIKE_i - AR_i +AR_LIKE_f - AR_i +AR_LIKE_c - AR_i +AR_LIKE_m - AR_i +AR_LIKE_M - AR_i +AR_LIKE_O - AR_i + +AR_f - AR_LIKE_b +AR_f - AR_LIKE_u +AR_f - AR_LIKE_i +AR_f - AR_LIKE_f +AR_f - AR_LIKE_c +AR_f - AR_LIKE_O + +AR_LIKE_b - AR_f +AR_LIKE_u - AR_f +AR_LIKE_i - AR_f +AR_LIKE_f - AR_f +AR_LIKE_c - AR_f +AR_LIKE_O - AR_f + +AR_c - AR_LIKE_b +AR_c - AR_LIKE_u +AR_c - AR_LIKE_i +AR_c - AR_LIKE_f +AR_c - AR_LIKE_c +AR_c - AR_LIKE_O + +AR_LIKE_b - AR_c +AR_LIKE_u - AR_c +AR_LIKE_i - AR_c +AR_LIKE_f - AR_c +AR_LIKE_c - AR_c +AR_LIKE_O - AR_c + +AR_m - AR_LIKE_b +AR_m - AR_LIKE_u +AR_m - AR_LIKE_i +AR_m - AR_LIKE_m + +AR_LIKE_b - AR_m +AR_LIKE_u - AR_m +AR_LIKE_i - AR_m +AR_LIKE_m - AR_m +AR_LIKE_M - AR_m + +AR_M - AR_LIKE_b +AR_M - AR_LIKE_u +AR_M - AR_LIKE_i +AR_M - AR_LIKE_m +AR_M - AR_LIKE_M + +AR_LIKE_M - AR_M + +AR_O - AR_LIKE_b +AR_O - AR_LIKE_u +AR_O - AR_LIKE_i +AR_O - AR_LIKE_f +AR_O - AR_LIKE_c +AR_O - AR_LIKE_O + +AR_LIKE_b - AR_O +AR_LIKE_u - AR_O +AR_LIKE_i - AR_O +AR_LIKE_f - AR_O +AR_LIKE_c - AR_O +AR_LIKE_O - AR_O + +AR_u += AR_b +AR_u += AR_u +AR_u += 1 # Allowed during runtime as long as the object is 0D and >=0 + +# Array floor division + +AR_b // AR_LIKE_b +AR_b // AR_LIKE_u +AR_b // AR_LIKE_i +AR_b // AR_LIKE_f +AR_b // AR_LIKE_O + +AR_LIKE_b // AR_b +AR_LIKE_u // AR_b +AR_LIKE_i // AR_b +AR_LIKE_f // AR_b +AR_LIKE_O // AR_b + +AR_u // AR_LIKE_b +AR_u // AR_LIKE_u +AR_u // AR_LIKE_i +AR_u // AR_LIKE_f +AR_u // AR_LIKE_O + +AR_LIKE_b // AR_u +AR_LIKE_u // AR_u +AR_LIKE_i // AR_u +AR_LIKE_f // AR_u +AR_LIKE_m // AR_u +AR_LIKE_O // AR_u + +AR_i // AR_LIKE_b +AR_i // AR_LIKE_u +AR_i // AR_LIKE_i +AR_i // AR_LIKE_f +AR_i // AR_LIKE_O + +AR_LIKE_b // AR_i +AR_LIKE_u // AR_i +AR_LIKE_i // AR_i +AR_LIKE_f // AR_i +AR_LIKE_m // AR_i +AR_LIKE_O // AR_i + +AR_f // AR_LIKE_b +AR_f // AR_LIKE_u +AR_f // AR_LIKE_i +AR_f // AR_LIKE_f +AR_f // AR_LIKE_O + +AR_LIKE_b // AR_f +AR_LIKE_u // AR_f +AR_LIKE_i // AR_f +AR_LIKE_f // AR_f +AR_LIKE_m // AR_f +AR_LIKE_O // AR_f + +AR_m // AR_LIKE_u +AR_m // AR_LIKE_i +AR_m // AR_LIKE_f +AR_m // AR_LIKE_m + +AR_LIKE_m // AR_m + +AR_m /= f +AR_m //= f +AR_m /= AR_f +AR_m /= AR_LIKE_f +AR_m //= AR_f +AR_m //= AR_LIKE_f + +AR_O // AR_LIKE_b +AR_O // AR_LIKE_u +AR_O // AR_LIKE_i +AR_O // AR_LIKE_f +AR_O // AR_LIKE_O + +AR_LIKE_b // AR_O +AR_LIKE_u // AR_O +AR_LIKE_i // AR_O +AR_LIKE_f // AR_O +AR_LIKE_O // AR_O + +# Inplace multiplication + +AR_b *= AR_LIKE_b + +AR_u *= AR_LIKE_b +AR_u *= AR_LIKE_u + +AR_i *= AR_LIKE_b +AR_i *= AR_LIKE_u +AR_i *= AR_LIKE_i + +AR_integer *= AR_LIKE_b +AR_integer *= AR_LIKE_u +AR_integer *= AR_LIKE_i + +AR_f *= AR_LIKE_b +AR_f *= AR_LIKE_u +AR_f *= AR_LIKE_i +AR_f *= AR_LIKE_f + +AR_c *= AR_LIKE_b +AR_c *= AR_LIKE_u +AR_c *= AR_LIKE_i +AR_c *= AR_LIKE_f +AR_c *= AR_LIKE_c + +AR_m *= AR_LIKE_b +AR_m *= AR_LIKE_u +AR_m *= AR_LIKE_i +AR_m *= AR_LIKE_f + +AR_O *= AR_LIKE_b +AR_O *= AR_LIKE_u +AR_O *= AR_LIKE_i +AR_O *= AR_LIKE_f +AR_O *= AR_LIKE_c +AR_O *= AR_LIKE_O + +# Inplace power + +AR_u **= AR_LIKE_b +AR_u **= AR_LIKE_u + +AR_i **= AR_LIKE_b +AR_i **= AR_LIKE_u +AR_i **= AR_LIKE_i + +AR_integer **= AR_LIKE_b +AR_integer **= AR_LIKE_u +AR_integer **= AR_LIKE_i + +AR_f **= AR_LIKE_b +AR_f **= AR_LIKE_u +AR_f **= AR_LIKE_i +AR_f **= AR_LIKE_f + +AR_c **= AR_LIKE_b +AR_c **= AR_LIKE_u +AR_c **= AR_LIKE_i +AR_c **= AR_LIKE_f +AR_c **= AR_LIKE_c + +AR_O **= AR_LIKE_b +AR_O **= AR_LIKE_u +AR_O **= AR_LIKE_i +AR_O **= AR_LIKE_f +AR_O **= AR_LIKE_c +AR_O **= AR_LIKE_O + +# unary ops + +-c16 +-c8 +-f8 +-f4 +-i8 +-i4 +with pytest.warns(RuntimeWarning): + -u8 + -u4 +-td +-AR_f + ++c16 ++c8 ++f8 ++f4 ++i8 ++i4 ++u8 ++u4 ++td ++AR_f + +abs(c16) +abs(c8) +abs(f8) +abs(f4) +abs(i8) +abs(i4) +abs(u8) +abs(u4) +abs(td) +abs(b_) +abs(AR_f) + +# Time structures + +dt + td +dt + i +dt + i4 +dt + i8 +dt - dt +dt - i +dt - i4 +dt - i8 + +td + td +td + i +td + i4 +td + i8 +td - td +td - i +td - i4 +td - i8 +td / f +td / f4 +td / f8 +td / td +td // td +td % td + + +# boolean + +b_ / b +b_ / b_ +b_ / i +b_ / i8 +b_ / i4 +b_ / u8 +b_ / u4 +b_ / f +b_ / f8 +b_ / f4 +b_ / c +b_ / c16 +b_ / c8 + +b / b_ +b_ / b_ +i / b_ +i8 / b_ +i4 / b_ +u8 / b_ +u4 / b_ +f / b_ +f8 / b_ +f4 / b_ +c / b_ +c16 / b_ +c8 / b_ + +# Complex + +c16 + c16 +c16 + f8 +c16 + i8 +c16 + c8 +c16 + f4 +c16 + i4 +c16 + b_ +c16 + b +c16 + c +c16 + f +c16 + i +c16 + AR_f + +c16 + c16 +f8 + c16 +i8 + c16 +c8 + c16 +f4 + c16 +i4 + c16 +b_ + c16 +b + c16 +c + c16 +f + c16 +i + c16 +AR_f + c16 + +c8 + c16 +c8 + f8 +c8 + i8 +c8 + c8 +c8 + f4 +c8 + i4 +c8 + b_ +c8 + b +c8 + c +c8 + f +c8 + i +c8 + AR_f + +c16 + c8 +f8 + c8 +i8 + c8 +c8 + c8 +f4 + c8 +i4 + c8 +b_ + c8 +b + c8 +c + c8 +f + c8 +i + c8 +AR_f + c8 + +# Float + +f8 + f8 +f8 + i8 +f8 + f4 +f8 + i4 +f8 + b_ +f8 + b +f8 + c +f8 + f +f8 + i +f8 + AR_f + +f8 + f8 +i8 + f8 +f4 + f8 +i4 + f8 +b_ + f8 +b + f8 +c + f8 +f + f8 +i + f8 +AR_f + f8 + +f4 + f8 +f4 + i8 +f4 + f4 +f4 + i4 +f4 + b_ +f4 + b +f4 + c +f4 + f +f4 + i +f4 + AR_f + +f8 + f4 +i8 + f4 +f4 + f4 +i4 + f4 +b_ + f4 +b + f4 +c + f4 +f + f4 +i + f4 +AR_f + f4 + +# Int + +i8 + i8 +i8 + u8 +i8 + i4 +i8 + u4 +i8 + b_ +i8 + b +i8 + c +i8 + f +i8 + i +i8 + AR_f + +u8 + u8 +u8 + i4 +u8 + u4 +u8 + b_ +u8 + b +u8 + c +u8 + f +u8 + i +u8 + AR_f + +i8 + i8 +u8 + i8 +i4 + i8 +u4 + i8 +b_ + i8 +b + i8 +c + i8 +f + i8 +i + i8 +AR_f + i8 + +u8 + u8 +i4 + u8 +u4 + u8 +b_ + u8 +b + u8 +c + u8 +f + u8 +i + u8 +AR_f + u8 + +i4 + i8 +i4 + i4 +i4 + i +i4 + b_ +i4 + b +i4 + AR_f + +u4 + i8 +u4 + i4 +u4 + u8 +u4 + u4 +u4 + i +u4 + b_ +u4 + b +u4 + AR_f + +i8 + i4 +i4 + i4 +i + i4 +b_ + i4 +b + i4 +AR_f + i4 + +i8 + u4 +i4 + u4 +u8 + u4 +u4 + u4 +b_ + u4 +b + u4 +i + u4 +AR_f + u4 diff --git a/python/numpy/typing/tests/data/pass/array_constructors.py b/python/numpy/typing/tests/data/pass/array_constructors.py new file mode 100644 index 000000000..17b6fab93 --- /dev/null +++ b/python/numpy/typing/tests/data/pass/array_constructors.py @@ -0,0 +1,137 @@ +from typing import Any + +import numpy as np +import numpy.typing as npt + +class Index: + def __index__(self) -> int: + return 0 + + +class SubClass(npt.NDArray[np.float64]): + pass + + +def func(i: int, j: int, **kwargs: Any) -> SubClass: + return B + + +i8 = np.int64(1) + +A = np.array([1]) +B = A.view(SubClass).copy() +B_stack = np.array([[1], [1]]).view(SubClass) +C = [1] + +np.ndarray(Index()) +np.ndarray([Index()]) + +np.array(1, dtype=float) +np.array(1, copy=None) +np.array(1, order='F') +np.array(1, order=None) +np.array(1, subok=True) +np.array(1, ndmin=3) +np.array(1, str, copy=True, order='C', subok=False, ndmin=2) + +np.asarray(A) +np.asarray(B) +np.asarray(C) + +np.asanyarray(A) +np.asanyarray(B) +np.asanyarray(B, dtype=int) +np.asanyarray(C) + +np.ascontiguousarray(A) +np.ascontiguousarray(B) +np.ascontiguousarray(C) + +np.asfortranarray(A) +np.asfortranarray(B) +np.asfortranarray(C) + +np.require(A) +np.require(B) +np.require(B, dtype=int) +np.require(B, requirements=None) +np.require(B, requirements="E") +np.require(B, requirements=["ENSUREARRAY"]) +np.require(B, requirements={"F", "E"}) +np.require(B, requirements=["C", "OWNDATA"]) +np.require(B, requirements="W") +np.require(B, requirements="A") +np.require(C) + +np.linspace(0, 2) +np.linspace(0.5, [0, 1, 2]) +np.linspace([0, 1, 2], 3) +np.linspace(0j, 2) +np.linspace(0, 2, num=10) +np.linspace(0, 2, endpoint=True) +np.linspace(0, 2, retstep=True) +np.linspace(0j, 2j, retstep=True) +np.linspace(0, 2, dtype=bool) +np.linspace([0, 1], [2, 3], axis=Index()) + +np.logspace(0, 2, base=2) +np.logspace(0, 2, base=2) +np.logspace(0, 2, base=[1j, 2j], num=2) + +np.geomspace(1, 2) + +np.zeros_like(A) +np.zeros_like(C) +np.zeros_like(B) +np.zeros_like(B, dtype=np.int64) + +np.ones_like(A) +np.ones_like(C) +np.ones_like(B) +np.ones_like(B, dtype=np.int64) + +np.empty_like(A) +np.empty_like(C) +np.empty_like(B) +np.empty_like(B, dtype=np.int64) + +np.full_like(A, i8) +np.full_like(C, i8) +np.full_like(B, i8) +np.full_like(B, i8, dtype=np.int64) + +np.ones(1) +np.ones([1, 1, 1]) + +np.full(1, i8) +np.full([1, 1, 1], i8) + +np.indices([1, 2, 3]) +np.indices([1, 2, 3], sparse=True) + +np.fromfunction(func, (3, 5)) + +np.identity(10) + +np.atleast_1d(C) +np.atleast_1d(A) +np.atleast_1d(C, C) +np.atleast_1d(C, A) +np.atleast_1d(A, A) + +np.atleast_2d(C) + +np.atleast_3d(C) + +np.vstack([C, C]) +np.vstack([C, A]) +np.vstack([A, A]) + +np.hstack([C, C]) + +np.stack([C, C]) +np.stack([C, C], axis=0) +np.stack([C, C], out=B_stack) + +np.block([[C, C], [C, C]]) +np.block(A) diff --git a/python/numpy/typing/tests/data/pass/array_like.py b/python/numpy/typing/tests/data/pass/array_like.py new file mode 100644 index 000000000..264ec55da --- /dev/null +++ b/python/numpy/typing/tests/data/pass/array_like.py @@ -0,0 +1,43 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +if TYPE_CHECKING: + from numpy._typing import NDArray, ArrayLike, _SupportsArray + +x1: ArrayLike = True +x2: ArrayLike = 5 +x3: ArrayLike = 1.0 +x4: ArrayLike = 1 + 1j +x5: ArrayLike = np.int8(1) +x6: ArrayLike = np.float64(1) +x7: ArrayLike = np.complex128(1) +x8: ArrayLike = np.array([1, 2, 3]) +x9: ArrayLike = [1, 2, 3] +x10: ArrayLike = (1, 2, 3) +x11: ArrayLike = "foo" +x12: ArrayLike = memoryview(b'foo') + + +class A: + def __array__(self, dtype: np.dtype | None = None) -> NDArray[np.float64]: + return np.array([1.0, 2.0, 3.0]) + + +x13: ArrayLike = A() + +scalar: _SupportsArray[np.dtype[np.int64]] = np.int64(1) +scalar.__array__() +array: _SupportsArray[np.dtype[np.int_]] = np.array(1) +array.__array__() + +a: _SupportsArray[np.dtype[np.float64]] = A() +a.__array__() +a.__array__() + +# Escape hatch for when you mean to make something like an object +# array. +object_array_scalar: object = (i for i in range(10)) +np.array(object_array_scalar) diff --git a/python/numpy/typing/tests/data/pass/arrayprint.py b/python/numpy/typing/tests/data/pass/arrayprint.py new file mode 100644 index 000000000..6c704c755 --- /dev/null +++ b/python/numpy/typing/tests/data/pass/arrayprint.py @@ -0,0 +1,37 @@ +import numpy as np + +AR = np.arange(10) +AR.setflags(write=False) + +with np.printoptions(): + np.set_printoptions( + precision=1, + threshold=2, + edgeitems=3, + linewidth=4, + suppress=False, + nanstr="Bob", + infstr="Bill", + formatter={}, + sign="+", + floatmode="unique", + ) + np.get_printoptions() + str(AR) + + np.array2string( + AR, + max_line_width=5, + precision=2, + suppress_small=True, + separator=";", + prefix="test", + threshold=5, + floatmode="fixed", + suffix="?", + legacy="1.13", + ) + np.format_float_scientific(1, precision=5) + np.format_float_positional(1, trim="k") + np.array_repr(AR) + np.array_str(AR) diff --git a/python/numpy/typing/tests/data/pass/arrayterator.py b/python/numpy/typing/tests/data/pass/arrayterator.py new file mode 100644 index 000000000..572be5e2f --- /dev/null +++ b/python/numpy/typing/tests/data/pass/arrayterator.py @@ -0,0 +1,27 @@ + +from __future__ import annotations + +from typing import Any +import numpy as np + +AR_i8: np.ndarray[Any, np.dtype[np.int_]] = np.arange(10) +ar_iter = np.lib.Arrayterator(AR_i8) + +ar_iter.var +ar_iter.buf_size +ar_iter.start +ar_iter.stop +ar_iter.step +ar_iter.shape +ar_iter.flat + +ar_iter.__array__() + +for i in ar_iter: + pass + +ar_iter[0] +ar_iter[...] +ar_iter[:] +ar_iter[0, 0, 0] +ar_iter[..., 0, :] diff --git a/python/numpy/typing/tests/data/pass/bitwise_ops.py b/python/numpy/typing/tests/data/pass/bitwise_ops.py new file mode 100644 index 000000000..22a245d21 --- /dev/null +++ b/python/numpy/typing/tests/data/pass/bitwise_ops.py @@ -0,0 +1,131 @@ +import numpy as np + +i8 = np.int64(1) +u8 = np.uint64(1) + +i4 = np.int32(1) +u4 = np.uint32(1) + +b_ = np.bool(1) + +b = bool(1) +i = int(1) + +AR = np.array([0, 1, 2], dtype=np.int32) +AR.setflags(write=False) + + +i8 << i8 +i8 >> i8 +i8 | i8 +i8 ^ i8 +i8 & i8 + +i << AR +i >> AR +i | AR +i ^ AR +i & AR + +i8 << AR +i8 >> AR +i8 | AR +i8 ^ AR +i8 & AR + +i4 << i4 +i4 >> i4 +i4 | i4 +i4 ^ i4 +i4 & i4 + +i8 << i4 +i8 >> i4 +i8 | i4 +i8 ^ i4 +i8 & i4 + +i8 << i +i8 >> i +i8 | i +i8 ^ i +i8 & i + +i8 << b_ +i8 >> b_ +i8 | b_ +i8 ^ b_ +i8 & b_ + +i8 << b +i8 >> b +i8 | b +i8 ^ b +i8 & b + +u8 << u8 +u8 >> u8 +u8 | u8 +u8 ^ u8 +u8 & u8 + +u4 << u4 +u4 >> u4 +u4 | u4 +u4 ^ u4 +u4 & u4 + +u4 << i4 +u4 >> i4 +u4 | i4 +u4 ^ i4 +u4 & i4 + +u4 << i +u4 >> i +u4 | i +u4 ^ i +u4 & i + +u8 << b_ +u8 >> b_ +u8 | b_ +u8 ^ b_ +u8 & b_ + +u8 << b +u8 >> b +u8 | b +u8 ^ b +u8 & b + +b_ << b_ +b_ >> b_ +b_ | b_ +b_ ^ b_ +b_ & b_ + +b_ << AR +b_ >> AR +b_ | AR +b_ ^ AR +b_ & AR + +b_ << b +b_ >> b +b_ | b +b_ ^ b +b_ & b + +b_ << i +b_ >> i +b_ | i +b_ ^ i +b_ & i + +~i8 +~i4 +~u8 +~u4 +~b_ +~AR diff --git a/python/numpy/typing/tests/data/pass/comparisons.py b/python/numpy/typing/tests/data/pass/comparisons.py new file mode 100644 index 000000000..a461d8b66 --- /dev/null +++ b/python/numpy/typing/tests/data/pass/comparisons.py @@ -0,0 +1,315 @@ +from __future__ import annotations + +from typing import cast, Any +import numpy as np + +c16 = np.complex128() +f8 = np.float64() +i8 = np.int64() +u8 = np.uint64() + +c8 = np.complex64() +f4 = np.float32() +i4 = np.int32() +u4 = np.uint32() + +dt = np.datetime64(0, "D") +td = np.timedelta64(0, "D") + +b_ = np.bool() + +b = bool() +c = complex() +f = float() +i = int() + +SEQ = (0, 1, 2, 3, 4) + +AR_b: np.ndarray[Any, np.dtype[np.bool]] = np.array([True]) +AR_u: np.ndarray[Any, np.dtype[np.uint32]] = np.array([1], dtype=np.uint32) +AR_i: np.ndarray[Any, np.dtype[np.int_]] = np.array([1]) +AR_f: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.0]) +AR_c: np.ndarray[Any, np.dtype[np.complex128]] = np.array([1.0j]) +AR_S: np.ndarray[Any, np.dtype[np.bytes_]] = np.array([b"a"], "S") +AR_T = cast(np.ndarray[Any, np.dtypes.StringDType], np.array(["a"], "T")) +AR_U: np.ndarray[Any, np.dtype[np.str_]] = np.array(["a"], "U") +AR_m: np.ndarray[Any, np.dtype[np.timedelta64]] = np.array([np.timedelta64("1")]) +AR_M: np.ndarray[Any, np.dtype[np.datetime64]] = np.array([np.datetime64("1")]) +AR_O: np.ndarray[Any, np.dtype[np.object_]] = np.array([1], dtype=object) + +# Arrays + +AR_b > AR_b +AR_b > AR_u +AR_b > AR_i +AR_b > AR_f +AR_b > AR_c + +AR_u > AR_b +AR_u > AR_u +AR_u > AR_i +AR_u > AR_f +AR_u > AR_c + +AR_i > AR_b +AR_i > AR_u +AR_i > AR_i +AR_i > AR_f +AR_i > AR_c + +AR_f > AR_b +AR_f > AR_u +AR_f > AR_i +AR_f > AR_f +AR_f > AR_c + +AR_c > AR_b +AR_c > AR_u +AR_c > AR_i +AR_c > AR_f +AR_c > AR_c + +AR_S > AR_S +AR_S > b"" + +AR_T > AR_T +AR_T > AR_U +AR_T > "" + +AR_U > AR_U +AR_U > AR_T +AR_U > "" + +AR_m > AR_b +AR_m > AR_u +AR_m > AR_i +AR_b > AR_m +AR_u > AR_m +AR_i > AR_m + +AR_M > AR_M + +AR_O > AR_O +1 > AR_O +AR_O > 1 + +# Time structures + +dt > dt + +td > td +td > i +td > i4 +td > i8 +td > AR_i +td > SEQ + +# boolean + +b_ > b +b_ > b_ +b_ > i +b_ > i8 +b_ > i4 +b_ > u8 +b_ > u4 +b_ > f +b_ > f8 +b_ > f4 +b_ > c +b_ > c16 +b_ > c8 +b_ > AR_i +b_ > SEQ + +# Complex + +c16 > c16 +c16 > f8 +c16 > i8 +c16 > c8 +c16 > f4 +c16 > i4 +c16 > b_ +c16 > b +c16 > c +c16 > f +c16 > i +c16 > AR_i +c16 > SEQ + +c16 > c16 +f8 > c16 +i8 > c16 +c8 > c16 +f4 > c16 +i4 > c16 +b_ > c16 +b > c16 +c > c16 +f > c16 +i > c16 +AR_i > c16 +SEQ > c16 + +c8 > c16 +c8 > f8 +c8 > i8 +c8 > c8 +c8 > f4 +c8 > i4 +c8 > b_ +c8 > b +c8 > c +c8 > f +c8 > i +c8 > AR_i +c8 > SEQ + +c16 > c8 +f8 > c8 +i8 > c8 +c8 > c8 +f4 > c8 +i4 > c8 +b_ > c8 +b > c8 +c > c8 +f > c8 +i > c8 +AR_i > c8 +SEQ > c8 + +# Float + +f8 > f8 +f8 > i8 +f8 > f4 +f8 > i4 +f8 > b_ +f8 > b +f8 > c +f8 > f +f8 > i +f8 > AR_i +f8 > SEQ + +f8 > f8 +i8 > f8 +f4 > f8 +i4 > f8 +b_ > f8 +b > f8 +c > f8 +f > f8 +i > f8 +AR_i > f8 +SEQ > f8 + +f4 > f8 +f4 > i8 +f4 > f4 +f4 > i4 +f4 > b_ +f4 > b +f4 > c +f4 > f +f4 > i +f4 > AR_i +f4 > SEQ + +f8 > f4 +i8 > f4 +f4 > f4 +i4 > f4 +b_ > f4 +b > f4 +c > f4 +f > f4 +i > f4 +AR_i > f4 +SEQ > f4 + +# Int + +i8 > i8 +i8 > u8 +i8 > i4 +i8 > u4 +i8 > b_ +i8 > b +i8 > c +i8 > f +i8 > i +i8 > AR_i +i8 > SEQ + +u8 > u8 +u8 > i4 +u8 > u4 +u8 > b_ +u8 > b +u8 > c +u8 > f +u8 > i +u8 > AR_i +u8 > SEQ + +i8 > i8 +u8 > i8 +i4 > i8 +u4 > i8 +b_ > i8 +b > i8 +c > i8 +f > i8 +i > i8 +AR_i > i8 +SEQ > i8 + +u8 > u8 +i4 > u8 +u4 > u8 +b_ > u8 +b > u8 +c > u8 +f > u8 +i > u8 +AR_i > u8 +SEQ > u8 + +i4 > i8 +i4 > i4 +i4 > i +i4 > b_ +i4 > b +i4 > AR_i +i4 > SEQ + +u4 > i8 +u4 > i4 +u4 > u8 +u4 > u4 +u4 > i +u4 > b_ +u4 > b +u4 > AR_i +u4 > SEQ + +i8 > i4 +i4 > i4 +i > i4 +b_ > i4 +b > i4 +AR_i > i4 +SEQ > i4 + +i8 > u4 +i4 > u4 +u8 > u4 +u4 > u4 +b_ > u4 +b > u4 +i > u4 +AR_i > u4 +SEQ > u4 diff --git a/python/numpy/typing/tests/data/pass/dtype.py b/python/numpy/typing/tests/data/pass/dtype.py new file mode 100644 index 000000000..9f1151827 --- /dev/null +++ b/python/numpy/typing/tests/data/pass/dtype.py @@ -0,0 +1,57 @@ +import numpy as np + +dtype_obj = np.dtype(np.str_) +void_dtype_obj = np.dtype([("f0", np.float64), ("f1", np.float32)]) + +np.dtype(dtype=np.int64) +np.dtype(int) +np.dtype("int") +np.dtype(None) + +np.dtype((int, 2)) +np.dtype((int, (1,))) + +np.dtype({"names": ["a", "b"], "formats": [int, float]}) +np.dtype({"names": ["a"], "formats": [int], "titles": [object]}) +np.dtype({"names": ["a"], "formats": [int], "titles": [object()]}) + +np.dtype([("name", np.str_, 16), ("grades", np.float64, (2,)), ("age", "int32")]) + +np.dtype( + { + "names": ["a", "b"], + "formats": [int, float], + "itemsize": 9, + "aligned": False, + "titles": ["x", "y"], + "offsets": [0, 1], + } +) + +np.dtype((np.float64, float)) + + +class Test: + dtype = np.dtype(float) + + +np.dtype(Test()) + +# Methods and attributes +dtype_obj.base +dtype_obj.subdtype +dtype_obj.newbyteorder() +dtype_obj.type +dtype_obj.name +dtype_obj.names + +dtype_obj * 0 +dtype_obj * 2 + +0 * dtype_obj +2 * dtype_obj + +void_dtype_obj["f0"] +void_dtype_obj[0] +void_dtype_obj[["f0", "f1"]] +void_dtype_obj[["f0"]] diff --git a/python/numpy/typing/tests/data/pass/einsumfunc.py b/python/numpy/typing/tests/data/pass/einsumfunc.py new file mode 100644 index 000000000..429764e67 --- /dev/null +++ b/python/numpy/typing/tests/data/pass/einsumfunc.py @@ -0,0 +1,36 @@ +from __future__ import annotations + +from typing import Any + +import numpy as np + +AR_LIKE_b = [True, True, True] +AR_LIKE_u = [np.uint32(1), np.uint32(2), np.uint32(3)] +AR_LIKE_i = [1, 2, 3] +AR_LIKE_f = [1.0, 2.0, 3.0] +AR_LIKE_c = [1j, 2j, 3j] +AR_LIKE_U = ["1", "2", "3"] + +OUT_f: np.ndarray[Any, np.dtype[np.float64]] = np.empty(3, dtype=np.float64) +OUT_c: np.ndarray[Any, np.dtype[np.complex128]] = np.empty(3, dtype=np.complex128) + +np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_b) +np.einsum("i,i->i", AR_LIKE_u, AR_LIKE_u) +np.einsum("i,i->i", AR_LIKE_i, AR_LIKE_i) +np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f) +np.einsum("i,i->i", AR_LIKE_c, AR_LIKE_c) +np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_i) +np.einsum("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c) + +np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f, dtype="c16") +np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe") +np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f, out=OUT_c) +np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=int, casting="unsafe", out=OUT_f) + +np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_b) +np.einsum_path("i,i->i", AR_LIKE_u, AR_LIKE_u) +np.einsum_path("i,i->i", AR_LIKE_i, AR_LIKE_i) +np.einsum_path("i,i->i", AR_LIKE_f, AR_LIKE_f) +np.einsum_path("i,i->i", AR_LIKE_c, AR_LIKE_c) +np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_i) +np.einsum_path("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c) diff --git a/python/numpy/typing/tests/data/pass/flatiter.py b/python/numpy/typing/tests/data/pass/flatiter.py new file mode 100644 index 000000000..e64e4261b --- /dev/null +++ b/python/numpy/typing/tests/data/pass/flatiter.py @@ -0,0 +1,19 @@ +import numpy as np + +a = np.empty((2, 2)).flat + +a.base +a.copy() +a.coords +a.index +iter(a) +next(a) +a[0] +a[[0, 1, 2]] +a[...] +a[:] +a.__array__() +a.__array__(np.dtype(np.float64)) + +b = np.array([1]).flat +a[b] diff --git a/python/numpy/typing/tests/data/pass/fromnumeric.py b/python/numpy/typing/tests/data/pass/fromnumeric.py new file mode 100644 index 000000000..7cc2bcfd8 --- /dev/null +++ b/python/numpy/typing/tests/data/pass/fromnumeric.py @@ -0,0 +1,272 @@ +"""Tests for :mod:`numpy._core.fromnumeric`.""" + +import numpy as np + +A = np.array(True, ndmin=2, dtype=bool) +B = np.array(1.0, ndmin=2, dtype=np.float32) +A.setflags(write=False) +B.setflags(write=False) + +a = np.bool(True) +b = np.float32(1.0) +c = 1.0 +d = np.array(1.0, dtype=np.float32) # writeable + +np.take(a, 0) +np.take(b, 0) +np.take(c, 0) +np.take(A, 0) +np.take(B, 0) +np.take(A, [0]) +np.take(B, [0]) + +np.reshape(a, 1) +np.reshape(b, 1) +np.reshape(c, 1) +np.reshape(A, 1) +np.reshape(B, 1) + +np.choose(a, [True, True]) +np.choose(A, [1.0, 1.0]) + +np.repeat(a, 1) +np.repeat(b, 1) +np.repeat(c, 1) +np.repeat(A, 1) +np.repeat(B, 1) + +np.swapaxes(A, 0, 0) +np.swapaxes(B, 0, 0) + +np.transpose(a) +np.transpose(b) +np.transpose(c) +np.transpose(A) +np.transpose(B) + +np.partition(a, 0, axis=None) +np.partition(b, 0, axis=None) +np.partition(c, 0, axis=None) +np.partition(A, 0) +np.partition(B, 0) + +np.argpartition(a, 0) +np.argpartition(b, 0) +np.argpartition(c, 0) +np.argpartition(A, 0) +np.argpartition(B, 0) + +np.sort(A, 0) +np.sort(B, 0) + +np.argsort(A, 0) +np.argsort(B, 0) + +np.argmax(A) +np.argmax(B) +np.argmax(A, axis=0) +np.argmax(B, axis=0) + +np.argmin(A) +np.argmin(B) +np.argmin(A, axis=0) +np.argmin(B, axis=0) + +np.searchsorted(A[0], 0) +np.searchsorted(B[0], 0) +np.searchsorted(A[0], [0]) +np.searchsorted(B[0], [0]) + +np.resize(a, (5, 5)) +np.resize(b, (5, 5)) +np.resize(c, (5, 5)) +np.resize(A, (5, 5)) +np.resize(B, (5, 5)) + +np.squeeze(a) +np.squeeze(b) +np.squeeze(c) +np.squeeze(A) +np.squeeze(B) + +np.diagonal(A) +np.diagonal(B) + +np.trace(A) +np.trace(B) + +np.ravel(a) +np.ravel(b) +np.ravel(c) +np.ravel(A) +np.ravel(B) + +np.nonzero(A) +np.nonzero(B) + +np.shape(a) +np.shape(b) +np.shape(c) +np.shape(A) +np.shape(B) + +np.compress([True], a) +np.compress([True], b) +np.compress([True], c) +np.compress([True], A) +np.compress([True], B) + +np.clip(a, 0, 1.0) +np.clip(b, -1, 1) +np.clip(a, 0, None) +np.clip(b, None, 1) +np.clip(c, 0, 1) +np.clip(A, 0, 1) +np.clip(B, 0, 1) +np.clip(B, [0, 1], [1, 2]) + +np.sum(a) +np.sum(b) +np.sum(c) +np.sum(A) +np.sum(B) +np.sum(A, axis=0) +np.sum(B, axis=0) + +np.all(a) +np.all(b) +np.all(c) +np.all(A) +np.all(B) +np.all(A, axis=0) +np.all(B, axis=0) +np.all(A, keepdims=True) +np.all(B, keepdims=True) + +np.any(a) +np.any(b) +np.any(c) +np.any(A) +np.any(B) +np.any(A, axis=0) +np.any(B, axis=0) +np.any(A, keepdims=True) +np.any(B, keepdims=True) + +np.cumsum(a) +np.cumsum(b) +np.cumsum(c) +np.cumsum(A) +np.cumsum(B) + +np.cumulative_sum(a) +np.cumulative_sum(b) +np.cumulative_sum(c) +np.cumulative_sum(A, axis=0) +np.cumulative_sum(B, axis=0) + +np.ptp(b) +np.ptp(c) +np.ptp(B) +np.ptp(B, axis=0) +np.ptp(B, keepdims=True) + +np.amax(a) +np.amax(b) +np.amax(c) +np.amax(A) +np.amax(B) +np.amax(A, axis=0) +np.amax(B, axis=0) +np.amax(A, keepdims=True) +np.amax(B, keepdims=True) + +np.amin(a) +np.amin(b) +np.amin(c) +np.amin(A) +np.amin(B) +np.amin(A, axis=0) +np.amin(B, axis=0) +np.amin(A, keepdims=True) +np.amin(B, keepdims=True) + +np.prod(a) +np.prod(b) +np.prod(c) +np.prod(A) +np.prod(B) +np.prod(a, dtype=None) +np.prod(A, dtype=None) +np.prod(A, axis=0) +np.prod(B, axis=0) +np.prod(A, keepdims=True) +np.prod(B, keepdims=True) +np.prod(b, out=d) +np.prod(B, out=d) + +np.cumprod(a) +np.cumprod(b) +np.cumprod(c) +np.cumprod(A) +np.cumprod(B) + +np.cumulative_prod(a) +np.cumulative_prod(b) +np.cumulative_prod(c) +np.cumulative_prod(A, axis=0) +np.cumulative_prod(B, axis=0) + +np.ndim(a) +np.ndim(b) +np.ndim(c) +np.ndim(A) +np.ndim(B) + +np.size(a) +np.size(b) +np.size(c) +np.size(A) +np.size(B) + +np.around(a) +np.around(b) +np.around(c) +np.around(A) +np.around(B) + +np.mean(a) +np.mean(b) +np.mean(c) +np.mean(A) +np.mean(B) +np.mean(A, axis=0) +np.mean(B, axis=0) +np.mean(A, keepdims=True) +np.mean(B, keepdims=True) +np.mean(b, out=d) +np.mean(B, out=d) + +np.std(a) +np.std(b) +np.std(c) +np.std(A) +np.std(B) +np.std(A, axis=0) +np.std(B, axis=0) +np.std(A, keepdims=True) +np.std(B, keepdims=True) +np.std(b, out=d) +np.std(B, out=d) + +np.var(a) +np.var(b) +np.var(c) +np.var(A) +np.var(B) +np.var(A, axis=0) +np.var(B, axis=0) +np.var(A, keepdims=True) +np.var(B, keepdims=True) +np.var(b, out=d) +np.var(B, out=d) diff --git a/python/numpy/typing/tests/data/pass/index_tricks.py b/python/numpy/typing/tests/data/pass/index_tricks.py new file mode 100644 index 000000000..dfc4ff2f3 --- /dev/null +++ b/python/numpy/typing/tests/data/pass/index_tricks.py @@ -0,0 +1,60 @@ +from __future__ import annotations +from typing import Any +import numpy as np + +AR_LIKE_b = [[True, True], [True, True]] +AR_LIKE_i = [[1, 2], [3, 4]] +AR_LIKE_f = [[1.0, 2.0], [3.0, 4.0]] +AR_LIKE_U = [["1", "2"], ["3", "4"]] + +AR_i8: np.ndarray[Any, np.dtype[np.int64]] = np.array(AR_LIKE_i, dtype=np.int64) + +np.ndenumerate(AR_i8) +np.ndenumerate(AR_LIKE_f) +np.ndenumerate(AR_LIKE_U) + +next(np.ndenumerate(AR_i8)) +next(np.ndenumerate(AR_LIKE_f)) +next(np.ndenumerate(AR_LIKE_U)) + +iter(np.ndenumerate(AR_i8)) +iter(np.ndenumerate(AR_LIKE_f)) +iter(np.ndenumerate(AR_LIKE_U)) + +iter(np.ndindex(1, 2, 3)) +next(np.ndindex(1, 2, 3)) + +np.unravel_index([22, 41, 37], (7, 6)) +np.unravel_index([31, 41, 13], (7, 6), order='F') +np.unravel_index(1621, (6, 7, 8, 9)) + +np.ravel_multi_index(AR_LIKE_i, (7, 6)) +np.ravel_multi_index(AR_LIKE_i, (7, 6), order='F') +np.ravel_multi_index(AR_LIKE_i, (4, 6), mode='clip') +np.ravel_multi_index(AR_LIKE_i, (4, 4), mode=('clip', 'wrap')) +np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)) + +np.mgrid[1:1:2] +np.mgrid[1:1:2, None:10] + +np.ogrid[1:1:2] +np.ogrid[1:1:2, None:10] + +np.index_exp[0:1] +np.index_exp[0:1, None:3] +np.index_exp[0, 0:1, ..., [0, 1, 3]] + +np.s_[0:1] +np.s_[0:1, None:3] +np.s_[0, 0:1, ..., [0, 1, 3]] + +np.ix_(AR_LIKE_b[0]) +np.ix_(AR_LIKE_i[0], AR_LIKE_f[0]) +np.ix_(AR_i8[0]) + +np.fill_diagonal(AR_i8, 5) + +np.diag_indices(4) +np.diag_indices(2, 3) + +np.diag_indices_from(AR_i8) diff --git a/python/numpy/typing/tests/data/pass/lib_user_array.py b/python/numpy/typing/tests/data/pass/lib_user_array.py new file mode 100644 index 000000000..62b7e85d7 --- /dev/null +++ b/python/numpy/typing/tests/data/pass/lib_user_array.py @@ -0,0 +1,22 @@ +"""Based on the `if __name__ == "__main__"` test code in `lib/_user_array_impl.py`.""" + +from __future__ import annotations + +import numpy as np +from numpy.lib.user_array import container + +N = 10_000 +W = H = int(N**0.5) + +a: np.ndarray[tuple[int, int], np.dtype[np.int32]] +ua: container[tuple[int, int], np.dtype[np.int32]] + +a = np.arange(N, dtype=np.int32).reshape(W, H) +ua = container(a) + +ua_small: container[tuple[int, int], np.dtype[np.int32]] = ua[:3, :5] +ua_small[0, 0] = 10 + +ua_bool: container[tuple[int, int], np.dtype[np.bool]] = ua_small > 1 + +# shape: tuple[int, int] = np.shape(ua) diff --git a/python/numpy/typing/tests/data/pass/lib_utils.py b/python/numpy/typing/tests/data/pass/lib_utils.py new file mode 100644 index 000000000..f9b3381e1 --- /dev/null +++ b/python/numpy/typing/tests/data/pass/lib_utils.py @@ -0,0 +1,19 @@ +from __future__ import annotations + +from io import StringIO + +import numpy as np +import numpy.lib.array_utils as array_utils + +FILE = StringIO() +AR = np.arange(10, dtype=np.float64) + + +def func(a: int) -> bool: + return True + + +array_utils.byte_bounds(AR) +array_utils.byte_bounds(np.float64()) + +np.info(1, output=FILE) diff --git a/python/numpy/typing/tests/data/pass/lib_version.py b/python/numpy/typing/tests/data/pass/lib_version.py new file mode 100644 index 000000000..f3825eca5 --- /dev/null +++ b/python/numpy/typing/tests/data/pass/lib_version.py @@ -0,0 +1,18 @@ +from numpy.lib import NumpyVersion + +version = NumpyVersion("1.8.0") + +version.vstring +version.version +version.major +version.minor +version.bugfix +version.pre_release +version.is_devversion + +version == version +version != version +version < "1.8.0" +version <= version +version > version +version >= "1.8.0" diff --git a/python/numpy/typing/tests/data/pass/literal.py b/python/numpy/typing/tests/data/pass/literal.py new file mode 100644 index 000000000..c8fa47621 --- /dev/null +++ b/python/numpy/typing/tests/data/pass/literal.py @@ -0,0 +1,51 @@ +from __future__ import annotations + +from typing import Any, TYPE_CHECKING +from functools import partial + +import pytest +import numpy as np + +if TYPE_CHECKING: + from collections.abc import Callable + +AR = np.array(0) +AR.setflags(write=False) + +KACF = frozenset({None, "K", "A", "C", "F"}) +ACF = frozenset({None, "A", "C", "F"}) +CF = frozenset({None, "C", "F"}) + +order_list: list[tuple[frozenset[str | None], Callable[..., Any]]] = [ + (KACF, AR.tobytes), + (KACF, partial(AR.astype, int)), + (KACF, AR.copy), + (ACF, partial(AR.reshape, 1)), + (KACF, AR.flatten), + (KACF, AR.ravel), + (KACF, partial(np.array, 1)), + # NOTE: __call__ is needed due to mypy bugs (#17620, #17631) + (KACF, partial(np.ndarray.__call__, 1)), + (CF, partial(np.zeros.__call__, 1)), + (CF, partial(np.ones.__call__, 1)), + (CF, partial(np.empty.__call__, 1)), + (CF, partial(np.full, 1, 1)), + (KACF, partial(np.zeros_like, AR)), + (KACF, partial(np.ones_like, AR)), + (KACF, partial(np.empty_like, AR)), + (KACF, partial(np.full_like, AR, 1)), + (KACF, partial(np.add.__call__, 1, 1)), # i.e. np.ufunc.__call__ + (ACF, partial(np.reshape, AR, 1)), + (KACF, partial(np.ravel, AR)), + (KACF, partial(np.asarray, 1)), + (KACF, partial(np.asanyarray, 1)), +] + +for order_set, func in order_list: + for order in order_set: + func(order=order) + + invalid_orders = KACF - order_set + for order in invalid_orders: + with pytest.raises(ValueError): + func(order=order) diff --git a/python/numpy/typing/tests/data/pass/ma.py b/python/numpy/typing/tests/data/pass/ma.py new file mode 100644 index 000000000..e7915a583 --- /dev/null +++ b/python/numpy/typing/tests/data/pass/ma.py @@ -0,0 +1,174 @@ +from typing import Any, TypeAlias, TypeVar, cast + +import numpy as np +import numpy.typing as npt +from numpy._typing import _Shape + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +MaskedArray: TypeAlias = np.ma.MaskedArray[_Shape, np.dtype[_ScalarT]] + +MAR_b: MaskedArray[np.bool] = np.ma.MaskedArray([True]) +MAR_u: MaskedArray[np.uint32] = np.ma.MaskedArray([1], dtype=np.uint32) +MAR_i: MaskedArray[np.int64] = np.ma.MaskedArray([1]) +MAR_f: MaskedArray[np.float64] = np.ma.MaskedArray([1.0]) +MAR_c: MaskedArray[np.complex128] = np.ma.MaskedArray([1j]) +MAR_td64: MaskedArray[np.timedelta64] = np.ma.MaskedArray([np.timedelta64(1, "D")]) +MAR_M_dt64: MaskedArray[np.datetime64] = np.ma.MaskedArray([np.datetime64(1, "D")]) +MAR_S: MaskedArray[np.bytes_] = np.ma.MaskedArray([b'foo'], dtype=np.bytes_) +MAR_U: MaskedArray[np.str_] = np.ma.MaskedArray(['foo'], dtype=np.str_) +MAR_T = cast(np.ma.MaskedArray[Any, np.dtypes.StringDType], + np.ma.MaskedArray(["a"], dtype="T")) + +AR_b: npt.NDArray[np.bool] = np.array([True, False, True]) + +AR_LIKE_b = [True] +AR_LIKE_u = [np.uint32(1)] +AR_LIKE_i = [1] +AR_LIKE_f = [1.0] +AR_LIKE_c = [1j] +AR_LIKE_m = [np.timedelta64(1, "D")] +AR_LIKE_M = [np.datetime64(1, "D")] + +MAR_f.mask = AR_b +MAR_f.mask = np.False_ + +# Inplace addition + +MAR_b += AR_LIKE_b + +MAR_u += AR_LIKE_b +MAR_u += AR_LIKE_u + +MAR_i += AR_LIKE_b +MAR_i += 2 +MAR_i += AR_LIKE_i + +MAR_f += AR_LIKE_b +MAR_f += 2 +MAR_f += AR_LIKE_u +MAR_f += AR_LIKE_i +MAR_f += AR_LIKE_f + +MAR_c += AR_LIKE_b +MAR_c += AR_LIKE_u +MAR_c += AR_LIKE_i +MAR_c += AR_LIKE_f +MAR_c += AR_LIKE_c + +MAR_td64 += AR_LIKE_b +MAR_td64 += AR_LIKE_u +MAR_td64 += AR_LIKE_i +MAR_td64 += AR_LIKE_m +MAR_M_dt64 += AR_LIKE_b +MAR_M_dt64 += AR_LIKE_u +MAR_M_dt64 += AR_LIKE_i +MAR_M_dt64 += AR_LIKE_m + +MAR_S += b'snakes' +MAR_U += 'snakes' +MAR_T += 'snakes' + +# Inplace subtraction + +MAR_u -= AR_LIKE_b +MAR_u -= AR_LIKE_u + +MAR_i -= AR_LIKE_b +MAR_i -= AR_LIKE_i + +MAR_f -= AR_LIKE_b +MAR_f -= AR_LIKE_u +MAR_f -= AR_LIKE_i +MAR_f -= AR_LIKE_f + +MAR_c -= AR_LIKE_b +MAR_c -= AR_LIKE_u +MAR_c -= AR_LIKE_i +MAR_c -= AR_LIKE_f +MAR_c -= AR_LIKE_c + +MAR_td64 -= AR_LIKE_b +MAR_td64 -= AR_LIKE_u +MAR_td64 -= AR_LIKE_i +MAR_td64 -= AR_LIKE_m +MAR_M_dt64 -= AR_LIKE_b +MAR_M_dt64 -= AR_LIKE_u +MAR_M_dt64 -= AR_LIKE_i +MAR_M_dt64 -= AR_LIKE_m + +# Inplace floor division + +MAR_f //= AR_LIKE_b +MAR_f //= 2 +MAR_f //= AR_LIKE_u +MAR_f //= AR_LIKE_i +MAR_f //= AR_LIKE_f + +MAR_td64 //= AR_LIKE_i + +# Inplace true division + +MAR_f /= AR_LIKE_b +MAR_f /= 2 +MAR_f /= AR_LIKE_u +MAR_f /= AR_LIKE_i +MAR_f /= AR_LIKE_f + +MAR_c /= AR_LIKE_b +MAR_c /= AR_LIKE_u +MAR_c /= AR_LIKE_i +MAR_c /= AR_LIKE_f +MAR_c /= AR_LIKE_c + +MAR_td64 /= AR_LIKE_i + +# Inplace multiplication + +MAR_b *= AR_LIKE_b + +MAR_u *= AR_LIKE_b +MAR_u *= AR_LIKE_u + +MAR_i *= AR_LIKE_b +MAR_i *= 2 +MAR_i *= AR_LIKE_i + +MAR_f *= AR_LIKE_b +MAR_f *= 2 +MAR_f *= AR_LIKE_u +MAR_f *= AR_LIKE_i +MAR_f *= AR_LIKE_f + +MAR_c *= AR_LIKE_b +MAR_c *= AR_LIKE_u +MAR_c *= AR_LIKE_i +MAR_c *= AR_LIKE_f +MAR_c *= AR_LIKE_c + +MAR_td64 *= AR_LIKE_b +MAR_td64 *= AR_LIKE_u +MAR_td64 *= AR_LIKE_i +MAR_td64 *= AR_LIKE_f + +MAR_S *= 2 +MAR_U *= 2 +MAR_T *= 2 + +# Inplace power + +MAR_u **= AR_LIKE_b +MAR_u **= AR_LIKE_u + +MAR_i **= AR_LIKE_b +MAR_i **= AR_LIKE_i + +MAR_f **= AR_LIKE_b +MAR_f **= AR_LIKE_u +MAR_f **= AR_LIKE_i +MAR_f **= AR_LIKE_f + +MAR_c **= AR_LIKE_b +MAR_c **= AR_LIKE_u +MAR_c **= AR_LIKE_i +MAR_c **= AR_LIKE_f +MAR_c **= AR_LIKE_c diff --git a/python/numpy/typing/tests/data/pass/mod.py b/python/numpy/typing/tests/data/pass/mod.py new file mode 100644 index 000000000..2b7e6cd85 --- /dev/null +++ b/python/numpy/typing/tests/data/pass/mod.py @@ -0,0 +1,149 @@ +import numpy as np + +f8 = np.float64(1) +i8 = np.int64(1) +u8 = np.uint64(1) + +f4 = np.float32(1) +i4 = np.int32(1) +u4 = np.uint32(1) + +td = np.timedelta64(1, "D") +b_ = np.bool(1) + +b = bool(1) +f = float(1) +i = int(1) + +AR = np.array([1], dtype=np.bool) +AR.setflags(write=False) + +AR2 = np.array([1], dtype=np.timedelta64) +AR2.setflags(write=False) + +# Time structures + +td % td +td % AR2 +AR2 % td + +divmod(td, td) +divmod(td, AR2) +divmod(AR2, td) + +# Bool + +b_ % b +b_ % i +b_ % f +b_ % b_ +b_ % i8 +b_ % u8 +b_ % f8 +b_ % AR + +divmod(b_, b) +divmod(b_, i) +divmod(b_, f) +divmod(b_, b_) +divmod(b_, i8) +divmod(b_, u8) +divmod(b_, f8) +divmod(b_, AR) + +b % b_ +i % b_ +f % b_ +b_ % b_ +i8 % b_ +u8 % b_ +f8 % b_ +AR % b_ + +divmod(b, b_) +divmod(i, b_) +divmod(f, b_) +divmod(b_, b_) +divmod(i8, b_) +divmod(u8, b_) +divmod(f8, b_) +divmod(AR, b_) + +# int + +i8 % b +i8 % i +i8 % f +i8 % i8 +i8 % f8 +i4 % i8 +i4 % f8 +i4 % i4 +i4 % f4 +i8 % AR + +divmod(i8, b) +divmod(i8, i) +divmod(i8, f) +divmod(i8, i8) +divmod(i8, f8) +divmod(i8, i4) +divmod(i8, f4) +divmod(i4, i4) +divmod(i4, f4) +divmod(i8, AR) + +b % i8 +i % i8 +f % i8 +i8 % i8 +f8 % i8 +i8 % i4 +f8 % i4 +i4 % i4 +f4 % i4 +AR % i8 + +divmod(b, i8) +divmod(i, i8) +divmod(f, i8) +divmod(i8, i8) +divmod(f8, i8) +divmod(i4, i8) +divmod(f4, i8) +divmod(i4, i4) +divmod(f4, i4) +divmod(AR, i8) + +# float + +f8 % b +f8 % i +f8 % f +i8 % f4 +f4 % f4 +f8 % AR + +divmod(f8, b) +divmod(f8, i) +divmod(f8, f) +divmod(f8, f8) +divmod(f8, f4) +divmod(f4, f4) +divmod(f8, AR) + +b % f8 +i % f8 +f % f8 +f8 % f8 +f8 % f8 +f4 % f4 +AR % f8 + +divmod(b, f8) +divmod(i, f8) +divmod(f, f8) +divmod(f8, f8) +divmod(f4, f8) +divmod(f4, f4) +divmod(AR, f8) diff --git a/python/numpy/typing/tests/data/pass/modules.py b/python/numpy/typing/tests/data/pass/modules.py new file mode 100644 index 000000000..0c2fd4b7e --- /dev/null +++ b/python/numpy/typing/tests/data/pass/modules.py @@ -0,0 +1,45 @@ +import numpy as np +from numpy import f2py + +np.char +np.ctypeslib +np.emath +np.fft +np.lib +np.linalg +np.ma +np.matrixlib +np.polynomial +np.random +np.rec +np.strings +np.testing +np.version + +np.lib.format +np.lib.mixins +np.lib.scimath +np.lib.stride_tricks +np.lib.array_utils +np.ma.extras +np.polynomial.chebyshev +np.polynomial.hermite +np.polynomial.hermite_e +np.polynomial.laguerre +np.polynomial.legendre +np.polynomial.polynomial + +np.__path__ +np.__version__ + +np.__all__ +np.char.__all__ +np.ctypeslib.__all__ +np.emath.__all__ +np.lib.__all__ +np.ma.__all__ +np.random.__all__ +np.rec.__all__ +np.strings.__all__ +np.testing.__all__ +f2py.__all__ diff --git a/python/numpy/typing/tests/data/pass/multiarray.py b/python/numpy/typing/tests/data/pass/multiarray.py new file mode 100644 index 000000000..26cedfd77 --- /dev/null +++ b/python/numpy/typing/tests/data/pass/multiarray.py @@ -0,0 +1,76 @@ +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] = np.array([1.0]) +AR_i4 = np.array([1], dtype=np.int32) +AR_u1 = np.array([1], dtype=np.uint8) + +AR_LIKE_f = [1.5] +AR_LIKE_i = [1] + +b_f8 = np.broadcast(AR_f8) +b_i4_f8_f8 = np.broadcast(AR_i4, AR_f8, AR_f8) + +next(b_f8) +b_f8.reset() +b_f8.index +b_f8.iters +b_f8.nd +b_f8.ndim +b_f8.numiter +b_f8.shape +b_f8.size + +next(b_i4_f8_f8) +b_i4_f8_f8.reset() +b_i4_f8_f8.ndim +b_i4_f8_f8.index +b_i4_f8_f8.iters +b_i4_f8_f8.nd +b_i4_f8_f8.numiter +b_i4_f8_f8.shape +b_i4_f8_f8.size + +np.inner(AR_f8, AR_i4) + +np.where([True, True, False]) +np.where([True, True, False], 1, 0) + +np.lexsort([0, 1, 2]) + +np.can_cast(np.dtype("i8"), int) +np.can_cast(AR_f8, "f8") +np.can_cast(AR_f8, np.complex128, casting="unsafe") + +np.min_scalar_type([1]) +np.min_scalar_type(AR_f8) + +np.result_type(int, AR_i4) +np.result_type(AR_f8, AR_u1) +np.result_type(AR_f8, np.complex128) + +np.dot(AR_LIKE_f, AR_i4) +np.dot(AR_u1, 1) +np.dot(1.5j, 1) +np.dot(AR_u1, 1, out=AR_f8) + +np.vdot(AR_LIKE_f, AR_i4) +np.vdot(AR_u1, 1) +np.vdot(1.5j, 1) + +np.bincount(AR_i4) + +np.copyto(AR_f8, [1.6]) + +np.putmask(AR_f8, [True], 1.5) + +np.packbits(AR_i4) +np.packbits(AR_u1) + +np.unpackbits(AR_u1) + +np.shares_memory(1, 2) +np.shares_memory(AR_f8, AR_f8, max_work=1) + +np.may_share_memory(1, 2) +np.may_share_memory(AR_f8, AR_f8, max_work=1) diff --git a/python/numpy/typing/tests/data/pass/ndarray_conversion.py b/python/numpy/typing/tests/data/pass/ndarray_conversion.py new file mode 100644 index 000000000..76da1dadd --- /dev/null +++ b/python/numpy/typing/tests/data/pass/ndarray_conversion.py @@ -0,0 +1,87 @@ +import os +import tempfile + +import numpy as np + +nd = np.array([[1, 2], [3, 4]]) +scalar_array = np.array(1) + +# item +scalar_array.item() +nd.item(1) +nd.item(0, 1) +nd.item((0, 1)) + +# tobytes +nd.tobytes() +nd.tobytes("C") +nd.tobytes(None) + +# tofile +if os.name != "nt": + with tempfile.NamedTemporaryFile(suffix=".txt") as tmp: + nd.tofile(tmp.name) + nd.tofile(tmp.name, "") + nd.tofile(tmp.name, sep="") + + nd.tofile(tmp.name, "", "%s") + nd.tofile(tmp.name, format="%s") + + nd.tofile(tmp) + +# dump is pretty simple +# dumps is pretty simple + +# astype +nd.astype("float") +nd.astype(float) + +nd.astype(float, "K") +nd.astype(float, order="K") + +nd.astype(float, "K", "unsafe") +nd.astype(float, casting="unsafe") + +nd.astype(float, "K", "unsafe", True) +nd.astype(float, subok=True) + +nd.astype(float, "K", "unsafe", True, True) +nd.astype(float, copy=True) + +# byteswap +nd.byteswap() +nd.byteswap(True) + +# copy +nd.copy() +nd.copy("C") + +# view +nd.view() +nd.view(np.int64) +nd.view(dtype=np.int64) +nd.view(np.int64, np.matrix) +nd.view(type=np.matrix) + +# getfield +complex_array = np.array([[1 + 1j, 0], [0, 1 - 1j]], dtype=np.complex128) + +complex_array.getfield("float") +complex_array.getfield(float) + +complex_array.getfield("float", 8) +complex_array.getfield(float, offset=8) + +# setflags +nd.setflags() + +nd.setflags(True) +nd.setflags(write=True) + +nd.setflags(True, True) +nd.setflags(write=True, align=True) + +nd.setflags(True, True, False) +nd.setflags(write=True, align=True, uic=False) + +# fill is pretty simple diff --git a/python/numpy/typing/tests/data/pass/ndarray_misc.py b/python/numpy/typing/tests/data/pass/ndarray_misc.py new file mode 100644 index 000000000..8b7df182a --- /dev/null +++ b/python/numpy/typing/tests/data/pass/ndarray_misc.py @@ -0,0 +1,203 @@ +""" +Tests for miscellaneous (non-magic) ``np.ndarray``/``np.generic`` methods. + +More extensive tests are performed for the methods' +function-based counterpart in `../from_numeric.py`. + +""" + +from __future__ import annotations + +import operator +from typing import cast, Any + +import numpy as np +import numpy.typing as npt + +class SubClass(npt.NDArray[np.float64]): ... +class IntSubClass(npt.NDArray[np.intp]): ... + +i4 = np.int32(1) +A: np.ndarray[Any, np.dtype[np.int32]] = np.array([[1]], dtype=np.int32) +B0 = np.empty((), dtype=np.int32).view(SubClass) +B1 = np.empty((1,), dtype=np.int32).view(SubClass) +B2 = np.empty((1, 1), dtype=np.int32).view(SubClass) +B_int0: IntSubClass = np.empty((), dtype=np.intp).view(IntSubClass) +C: np.ndarray[Any, np.dtype[np.int32]] = np.array([0, 1, 2], dtype=np.int32) +D = np.ones(3).view(SubClass) + +ctypes_obj = A.ctypes + +i4.all() +A.all() +A.all(axis=0) +A.all(keepdims=True) +A.all(out=B0) + +i4.any() +A.any() +A.any(axis=0) +A.any(keepdims=True) +A.any(out=B0) + +i4.argmax() +A.argmax() +A.argmax(axis=0) +A.argmax(out=B_int0) + +i4.argmin() +A.argmin() +A.argmin(axis=0) +A.argmin(out=B_int0) + +i4.argsort() +i4.argsort(stable=True) +A.argsort() +A.argsort(stable=True) + +A.sort() +A.sort(stable=True) + +i4.choose([()]) +_choices = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype=np.int32) +C.choose(_choices) +C.choose(_choices, out=D) + +i4.clip(1) +A.clip(1) +A.clip(None, 1) +A.clip(1, out=B2) +A.clip(None, 1, out=B2) + +i4.compress([1]) +A.compress([1]) +A.compress([1], out=B1) + +i4.conj() +A.conj() +B0.conj() + +i4.conjugate() +A.conjugate() +B0.conjugate() + +i4.cumprod() +A.cumprod() +A.cumprod(out=B1) + +i4.cumsum() +A.cumsum() +A.cumsum(out=B1) + +i4.max() +A.max() +A.max(axis=0) +A.max(keepdims=True) +A.max(out=B0) + +i4.mean() +A.mean() +A.mean(axis=0) +A.mean(keepdims=True) +A.mean(out=B0) + +i4.min() +A.min() +A.min(axis=0) +A.min(keepdims=True) +A.min(out=B0) + +i4.prod() +A.prod() +A.prod(axis=0) +A.prod(keepdims=True) +A.prod(out=B0) + +i4.round() +A.round() +A.round(out=B2) + +i4.repeat(1) +A.repeat(1) +B0.repeat(1) + +i4.std() +A.std() +A.std(axis=0) +A.std(keepdims=True) +A.std(out=B0.astype(np.float64)) + +i4.sum() +A.sum() +A.sum(axis=0) +A.sum(keepdims=True) +A.sum(out=B0) + +i4.take(0) +A.take(0) +A.take([0]) +A.take(0, out=B0) +A.take([0], out=B1) + +i4.var() +A.var() +A.var(axis=0) +A.var(keepdims=True) +A.var(out=B0) + +A.argpartition([0]) + +A.diagonal() + +A.dot(1) +A.dot(1, out=B2) + +A.nonzero() + +C.searchsorted(1) + +A.trace() +A.trace(out=B0) + +void = cast(np.void, np.array(1, dtype=[("f", np.float64)]).take(0)) +void.setfield(10, np.float64) + +A.item(0) +C.item(0) + +A.ravel() +C.ravel() + +A.flatten() +C.flatten() + +A.reshape(1) +C.reshape(3) + +int(np.array(1.0, dtype=np.float64)) +int(np.array("1", dtype=np.str_)) + +float(np.array(1.0, dtype=np.float64)) +float(np.array("1", dtype=np.str_)) + +complex(np.array(1.0, dtype=np.float64)) + +operator.index(np.array(1, dtype=np.int64)) + +# this fails on numpy 2.2.1 +# https://github.com/scipy/scipy/blob/a755ee77ec47a64849abe42c349936475a6c2f24/scipy/io/arff/tests/test_arffread.py#L41-L44 +A_float = np.array([[1, 5], [2, 4], [np.nan, np.nan]]) +A_void: npt.NDArray[np.void] = np.empty(3, [("yop", float), ("yap", float)]) +A_void["yop"] = A_float[:, 0] +A_void["yap"] = A_float[:, 1] + +# deprecated + +with np.testing.assert_warns(DeprecationWarning): + ctypes_obj.get_data() # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +with np.testing.assert_warns(DeprecationWarning): + ctypes_obj.get_shape() # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +with np.testing.assert_warns(DeprecationWarning): + ctypes_obj.get_strides() # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +with np.testing.assert_warns(DeprecationWarning): + ctypes_obj.get_as_parameter() # type: ignore[deprecated] # pyright: ignore[reportDeprecated] diff --git a/python/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py b/python/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py new file mode 100644 index 000000000..0ca3dff39 --- /dev/null +++ b/python/numpy/typing/tests/data/pass/ndarray_shape_manipulation.py @@ -0,0 +1,47 @@ +import numpy as np + +nd1 = np.array([[1, 2], [3, 4]]) + +# reshape +nd1.reshape(4) +nd1.reshape(2, 2) +nd1.reshape((2, 2)) + +nd1.reshape((2, 2), order="C") +nd1.reshape(4, order="C") + +# resize +nd1.resize() +nd1.resize(4) +nd1.resize(2, 2) +nd1.resize((2, 2)) + +nd1.resize((2, 2), refcheck=True) +nd1.resize(4, refcheck=True) + +nd2 = np.array([[1, 2], [3, 4]]) + +# transpose +nd2.transpose() +nd2.transpose(1, 0) +nd2.transpose((1, 0)) + +# swapaxes +nd2.swapaxes(0, 1) + +# flatten +nd2.flatten() +nd2.flatten("C") + +# ravel +nd2.ravel() +nd2.ravel("C") + +# squeeze +nd2.squeeze() + +nd3 = np.array([[1, 2]]) +nd3.squeeze(0) + +nd4 = np.array([[[1, 2]]]) +nd4.squeeze((0, 1)) diff --git a/python/numpy/typing/tests/data/pass/nditer.py b/python/numpy/typing/tests/data/pass/nditer.py new file mode 100644 index 000000000..25a5b44d7 --- /dev/null +++ b/python/numpy/typing/tests/data/pass/nditer.py @@ -0,0 +1,4 @@ +import numpy as np + +arr = np.array([1]) +np.nditer([arr, None]) diff --git a/python/numpy/typing/tests/data/pass/numeric.py b/python/numpy/typing/tests/data/pass/numeric.py new file mode 100644 index 000000000..1eb14cf3a --- /dev/null +++ b/python/numpy/typing/tests/data/pass/numeric.py @@ -0,0 +1,95 @@ +""" +Tests for :mod:`numpy._core.numeric`. + +Does not include tests which fall under ``array_constructors``. + +""" + +from __future__ import annotations +from typing import cast + +import numpy as np +import numpy.typing as npt + +class SubClass(npt.NDArray[np.float64]): ... + + +i8 = np.int64(1) + +A = cast( + np.ndarray[tuple[int, int, int], np.dtype[np.intp]], + np.arange(27).reshape(3, 3, 3), +) +B: list[list[list[int]]] = A.tolist() +C = np.empty((27, 27)).view(SubClass) + +np.count_nonzero(i8) +np.count_nonzero(A) +np.count_nonzero(B) +np.count_nonzero(A, keepdims=True) +np.count_nonzero(A, axis=0) + +np.isfortran(i8) +np.isfortran(A) + +np.argwhere(i8) +np.argwhere(A) + +np.flatnonzero(i8) +np.flatnonzero(A) + +np.correlate(B[0][0], A.ravel(), mode="valid") +np.correlate(A.ravel(), A.ravel(), mode="same") + +np.convolve(B[0][0], A.ravel(), mode="valid") +np.convolve(A.ravel(), A.ravel(), mode="same") + +np.outer(i8, A) +np.outer(B, A) +np.outer(A, A) +np.outer(A, A, out=C) + +np.tensordot(B, A) +np.tensordot(A, A) +np.tensordot(A, A, axes=0) +np.tensordot(A, A, axes=(0, 1)) + +np.isscalar(i8) +np.isscalar(A) +np.isscalar(B) + +np.roll(A, 1) +np.roll(A, (1, 2)) +np.roll(B, 1) + +np.rollaxis(A, 0, 1) + +np.moveaxis(A, 0, 1) +np.moveaxis(A, (0, 1), (1, 2)) + +np.cross(B, A) +np.cross(A, A) + +np.indices([0, 1, 2]) +np.indices([0, 1, 2], sparse=False) +np.indices([0, 1, 2], sparse=True) + +np.binary_repr(1) + +np.base_repr(1) + +np.allclose(i8, A) +np.allclose(B, A) +np.allclose(A, A) + +np.isclose(i8, A) +np.isclose(B, A) +np.isclose(A, A) + +np.array_equal(i8, A) +np.array_equal(B, A) +np.array_equal(A, A) + +np.array_equiv(i8, A) +np.array_equiv(B, A) +np.array_equiv(A, A) diff --git a/python/numpy/typing/tests/data/pass/numerictypes.py b/python/numpy/typing/tests/data/pass/numerictypes.py new file mode 100644 index 000000000..24e1a9986 --- /dev/null +++ b/python/numpy/typing/tests/data/pass/numerictypes.py @@ -0,0 +1,17 @@ +import numpy as np + +np.isdtype(np.float64, (np.int64, np.float64)) +np.isdtype(np.int64, "signed integer") + +np.issubdtype("S1", np.bytes_) +np.issubdtype(np.float64, np.float32) + +np.ScalarType +np.ScalarType[0] +np.ScalarType[3] +np.ScalarType[8] +np.ScalarType[10] + +np.typecodes["Character"] +np.typecodes["Complex"] +np.typecodes["All"] diff --git a/python/numpy/typing/tests/data/pass/random.py b/python/numpy/typing/tests/data/pass/random.py new file mode 100644 index 000000000..bce204a73 --- /dev/null +++ b/python/numpy/typing/tests/data/pass/random.py @@ -0,0 +1,1497 @@ +from __future__ import annotations + +from typing import Any +import numpy as np + +SEED_NONE = None +SEED_INT = 4579435749574957634658964293569 +SEED_ARR: np.ndarray[Any, np.dtype[np.int64]] = np.array([1, 2, 3, 4], dtype=np.int64) +SEED_ARRLIKE: list[int] = [1, 2, 3, 4] +SEED_SEED_SEQ: np.random.SeedSequence = np.random.SeedSequence(0) +SEED_MT19937: np.random.MT19937 = np.random.MT19937(0) +SEED_PCG64: np.random.PCG64 = np.random.PCG64(0) +SEED_PHILOX: np.random.Philox = np.random.Philox(0) +SEED_SFC64: np.random.SFC64 = np.random.SFC64(0) + +# default rng +np.random.default_rng() +np.random.default_rng(SEED_NONE) +np.random.default_rng(SEED_INT) +np.random.default_rng(SEED_ARR) +np.random.default_rng(SEED_ARRLIKE) +np.random.default_rng(SEED_SEED_SEQ) +np.random.default_rng(SEED_MT19937) +np.random.default_rng(SEED_PCG64) +np.random.default_rng(SEED_PHILOX) +np.random.default_rng(SEED_SFC64) + +# Seed Sequence +np.random.SeedSequence(SEED_NONE) +np.random.SeedSequence(SEED_INT) +np.random.SeedSequence(SEED_ARR) +np.random.SeedSequence(SEED_ARRLIKE) + +# Bit Generators +np.random.MT19937(SEED_NONE) +np.random.MT19937(SEED_INT) +np.random.MT19937(SEED_ARR) +np.random.MT19937(SEED_ARRLIKE) +np.random.MT19937(SEED_SEED_SEQ) + +np.random.PCG64(SEED_NONE) +np.random.PCG64(SEED_INT) +np.random.PCG64(SEED_ARR) +np.random.PCG64(SEED_ARRLIKE) +np.random.PCG64(SEED_SEED_SEQ) + +np.random.Philox(SEED_NONE) +np.random.Philox(SEED_INT) +np.random.Philox(SEED_ARR) +np.random.Philox(SEED_ARRLIKE) +np.random.Philox(SEED_SEED_SEQ) + +np.random.SFC64(SEED_NONE) +np.random.SFC64(SEED_INT) +np.random.SFC64(SEED_ARR) +np.random.SFC64(SEED_ARRLIKE) +np.random.SFC64(SEED_SEED_SEQ) + +seed_seq: np.random.bit_generator.SeedSequence = np.random.SeedSequence(SEED_NONE) +seed_seq.spawn(10) +seed_seq.generate_state(3) +seed_seq.generate_state(3, "u4") +seed_seq.generate_state(3, "uint32") +seed_seq.generate_state(3, "u8") +seed_seq.generate_state(3, "uint64") +seed_seq.generate_state(3, np.uint32) +seed_seq.generate_state(3, np.uint64) + + +def_gen: np.random.Generator = np.random.default_rng() + +D_arr_0p1: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.1]) +D_arr_0p5: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.5]) +D_arr_0p9: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.9]) +D_arr_1p5: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.5]) +I_arr_10: np.ndarray[Any, np.dtype[np.int_]] = np.array([10], dtype=np.int_) +I_arr_20: np.ndarray[Any, np.dtype[np.int_]] = np.array([20], dtype=np.int_) +D_arr_like_0p1: list[float] = [0.1] +D_arr_like_0p5: list[float] = [0.5] +D_arr_like_0p9: list[float] = [0.9] +D_arr_like_1p5: list[float] = [1.5] +I_arr_like_10: list[int] = [10] +I_arr_like_20: list[int] = [20] +D_2D_like: list[list[float]] = [[1, 2], [2, 3], [3, 4], [4, 5.1]] +D_2D: np.ndarray[Any, np.dtype[np.float64]] = np.array(D_2D_like) + +S_out: np.ndarray[Any, np.dtype[np.float32]] = np.empty(1, dtype=np.float32) +D_out: np.ndarray[Any, np.dtype[np.float64]] = np.empty(1) + +def_gen.standard_normal() +def_gen.standard_normal(dtype=np.float32) +def_gen.standard_normal(dtype="float32") +def_gen.standard_normal(dtype="double") +def_gen.standard_normal(dtype=np.float64) +def_gen.standard_normal(size=None) +def_gen.standard_normal(size=1) +def_gen.standard_normal(size=1, dtype=np.float32) +def_gen.standard_normal(size=1, dtype="f4") +def_gen.standard_normal(size=1, dtype="float32", out=S_out) +def_gen.standard_normal(dtype=np.float32, out=S_out) +def_gen.standard_normal(size=1, dtype=np.float64) +def_gen.standard_normal(size=1, dtype="float64") +def_gen.standard_normal(size=1, dtype="f8") +def_gen.standard_normal(out=D_out) +def_gen.standard_normal(size=1, dtype="float64") +def_gen.standard_normal(size=1, dtype="float64", out=D_out) + +def_gen.random() +def_gen.random(dtype=np.float32) +def_gen.random(dtype="float32") +def_gen.random(dtype="double") +def_gen.random(dtype=np.float64) +def_gen.random(size=None) +def_gen.random(size=1) +def_gen.random(size=1, dtype=np.float32) +def_gen.random(size=1, dtype="f4") +def_gen.random(size=1, dtype="float32", out=S_out) +def_gen.random(dtype=np.float32, out=S_out) +def_gen.random(size=1, dtype=np.float64) +def_gen.random(size=1, dtype="float64") +def_gen.random(size=1, dtype="f8") +def_gen.random(out=D_out) +def_gen.random(size=1, dtype="float64") +def_gen.random(size=1, dtype="float64", out=D_out) + +def_gen.standard_cauchy() +def_gen.standard_cauchy(size=None) +def_gen.standard_cauchy(size=1) + +def_gen.standard_exponential() +def_gen.standard_exponential(method="inv") +def_gen.standard_exponential(dtype=np.float32) +def_gen.standard_exponential(dtype="float32") +def_gen.standard_exponential(dtype="double") +def_gen.standard_exponential(dtype=np.float64) +def_gen.standard_exponential(size=None) +def_gen.standard_exponential(size=None, method="inv") +def_gen.standard_exponential(size=1, method="inv") +def_gen.standard_exponential(size=1, dtype=np.float32) +def_gen.standard_exponential(size=1, dtype="f4", method="inv") +def_gen.standard_exponential(size=1, dtype="float32", out=S_out) +def_gen.standard_exponential(dtype=np.float32, out=S_out) +def_gen.standard_exponential(size=1, dtype=np.float64, method="inv") +def_gen.standard_exponential(size=1, dtype="float64") +def_gen.standard_exponential(size=1, dtype="f8") +def_gen.standard_exponential(out=D_out) +def_gen.standard_exponential(size=1, dtype="float64") +def_gen.standard_exponential(size=1, dtype="float64", out=D_out) + +def_gen.zipf(1.5) +def_gen.zipf(1.5, size=None) +def_gen.zipf(1.5, size=1) +def_gen.zipf(D_arr_1p5) +def_gen.zipf(D_arr_1p5, size=1) +def_gen.zipf(D_arr_like_1p5) +def_gen.zipf(D_arr_like_1p5, size=1) + +def_gen.weibull(0.5) +def_gen.weibull(0.5, size=None) +def_gen.weibull(0.5, size=1) +def_gen.weibull(D_arr_0p5) +def_gen.weibull(D_arr_0p5, size=1) +def_gen.weibull(D_arr_like_0p5) +def_gen.weibull(D_arr_like_0p5, size=1) + +def_gen.standard_t(0.5) +def_gen.standard_t(0.5, size=None) +def_gen.standard_t(0.5, size=1) +def_gen.standard_t(D_arr_0p5) +def_gen.standard_t(D_arr_0p5, size=1) +def_gen.standard_t(D_arr_like_0p5) +def_gen.standard_t(D_arr_like_0p5, size=1) + +def_gen.poisson(0.5) +def_gen.poisson(0.5, size=None) +def_gen.poisson(0.5, size=1) +def_gen.poisson(D_arr_0p5) +def_gen.poisson(D_arr_0p5, size=1) +def_gen.poisson(D_arr_like_0p5) +def_gen.poisson(D_arr_like_0p5, size=1) + +def_gen.power(0.5) +def_gen.power(0.5, size=None) +def_gen.power(0.5, size=1) +def_gen.power(D_arr_0p5) +def_gen.power(D_arr_0p5, size=1) +def_gen.power(D_arr_like_0p5) +def_gen.power(D_arr_like_0p5, size=1) + +def_gen.pareto(0.5) +def_gen.pareto(0.5, size=None) +def_gen.pareto(0.5, size=1) +def_gen.pareto(D_arr_0p5) +def_gen.pareto(D_arr_0p5, size=1) +def_gen.pareto(D_arr_like_0p5) +def_gen.pareto(D_arr_like_0p5, size=1) + +def_gen.chisquare(0.5) +def_gen.chisquare(0.5, size=None) +def_gen.chisquare(0.5, size=1) +def_gen.chisquare(D_arr_0p5) +def_gen.chisquare(D_arr_0p5, size=1) +def_gen.chisquare(D_arr_like_0p5) +def_gen.chisquare(D_arr_like_0p5, size=1) + +def_gen.exponential(0.5) +def_gen.exponential(0.5, size=None) +def_gen.exponential(0.5, size=1) +def_gen.exponential(D_arr_0p5) +def_gen.exponential(D_arr_0p5, size=1) +def_gen.exponential(D_arr_like_0p5) +def_gen.exponential(D_arr_like_0p5, size=1) + +def_gen.geometric(0.5) +def_gen.geometric(0.5, size=None) +def_gen.geometric(0.5, size=1) +def_gen.geometric(D_arr_0p5) +def_gen.geometric(D_arr_0p5, size=1) +def_gen.geometric(D_arr_like_0p5) +def_gen.geometric(D_arr_like_0p5, size=1) + +def_gen.logseries(0.5) +def_gen.logseries(0.5, size=None) +def_gen.logseries(0.5, size=1) +def_gen.logseries(D_arr_0p5) +def_gen.logseries(D_arr_0p5, size=1) +def_gen.logseries(D_arr_like_0p5) +def_gen.logseries(D_arr_like_0p5, size=1) + +def_gen.rayleigh(0.5) +def_gen.rayleigh(0.5, size=None) +def_gen.rayleigh(0.5, size=1) +def_gen.rayleigh(D_arr_0p5) +def_gen.rayleigh(D_arr_0p5, size=1) +def_gen.rayleigh(D_arr_like_0p5) +def_gen.rayleigh(D_arr_like_0p5, size=1) + +def_gen.standard_gamma(0.5) +def_gen.standard_gamma(0.5, size=None) +def_gen.standard_gamma(0.5, dtype="float32") +def_gen.standard_gamma(0.5, size=None, dtype="float32") +def_gen.standard_gamma(0.5, size=1) +def_gen.standard_gamma(D_arr_0p5) +def_gen.standard_gamma(D_arr_0p5, dtype="f4") +def_gen.standard_gamma(0.5, size=1, dtype="float32", out=S_out) +def_gen.standard_gamma(D_arr_0p5, dtype=np.float32, out=S_out) +def_gen.standard_gamma(D_arr_0p5, size=1) +def_gen.standard_gamma(D_arr_like_0p5) +def_gen.standard_gamma(D_arr_like_0p5, size=1) +def_gen.standard_gamma(0.5, out=D_out) +def_gen.standard_gamma(D_arr_like_0p5, out=D_out) +def_gen.standard_gamma(D_arr_like_0p5, size=1) +def_gen.standard_gamma(D_arr_like_0p5, size=1, out=D_out, dtype=np.float64) + +def_gen.vonmises(0.5, 0.5) +def_gen.vonmises(0.5, 0.5, size=None) +def_gen.vonmises(0.5, 0.5, size=1) +def_gen.vonmises(D_arr_0p5, 0.5) +def_gen.vonmises(0.5, D_arr_0p5) +def_gen.vonmises(D_arr_0p5, 0.5, size=1) +def_gen.vonmises(0.5, D_arr_0p5, size=1) +def_gen.vonmises(D_arr_like_0p5, 0.5) +def_gen.vonmises(0.5, D_arr_like_0p5) +def_gen.vonmises(D_arr_0p5, D_arr_0p5) +def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5) +def_gen.vonmises(D_arr_0p5, D_arr_0p5, size=1) +def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.wald(0.5, 0.5) +def_gen.wald(0.5, 0.5, size=None) +def_gen.wald(0.5, 0.5, size=1) +def_gen.wald(D_arr_0p5, 0.5) +def_gen.wald(0.5, D_arr_0p5) +def_gen.wald(D_arr_0p5, 0.5, size=1) +def_gen.wald(0.5, D_arr_0p5, size=1) +def_gen.wald(D_arr_like_0p5, 0.5) +def_gen.wald(0.5, D_arr_like_0p5) +def_gen.wald(D_arr_0p5, D_arr_0p5) +def_gen.wald(D_arr_like_0p5, D_arr_like_0p5) +def_gen.wald(D_arr_0p5, D_arr_0p5, size=1) +def_gen.wald(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.uniform(0.5, 0.5) +def_gen.uniform(0.5, 0.5, size=None) +def_gen.uniform(0.5, 0.5, size=1) +def_gen.uniform(D_arr_0p5, 0.5) +def_gen.uniform(0.5, D_arr_0p5) +def_gen.uniform(D_arr_0p5, 0.5, size=1) +def_gen.uniform(0.5, D_arr_0p5, size=1) +def_gen.uniform(D_arr_like_0p5, 0.5) +def_gen.uniform(0.5, D_arr_like_0p5) +def_gen.uniform(D_arr_0p5, D_arr_0p5) +def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5) +def_gen.uniform(D_arr_0p5, D_arr_0p5, size=1) +def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.beta(0.5, 0.5) +def_gen.beta(0.5, 0.5, size=None) +def_gen.beta(0.5, 0.5, size=1) +def_gen.beta(D_arr_0p5, 0.5) +def_gen.beta(0.5, D_arr_0p5) +def_gen.beta(D_arr_0p5, 0.5, size=1) +def_gen.beta(0.5, D_arr_0p5, size=1) +def_gen.beta(D_arr_like_0p5, 0.5) +def_gen.beta(0.5, D_arr_like_0p5) +def_gen.beta(D_arr_0p5, D_arr_0p5) +def_gen.beta(D_arr_like_0p5, D_arr_like_0p5) +def_gen.beta(D_arr_0p5, D_arr_0p5, size=1) +def_gen.beta(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.f(0.5, 0.5) +def_gen.f(0.5, 0.5, size=None) +def_gen.f(0.5, 0.5, size=1) +def_gen.f(D_arr_0p5, 0.5) +def_gen.f(0.5, D_arr_0p5) +def_gen.f(D_arr_0p5, 0.5, size=1) +def_gen.f(0.5, D_arr_0p5, size=1) +def_gen.f(D_arr_like_0p5, 0.5) +def_gen.f(0.5, D_arr_like_0p5) +def_gen.f(D_arr_0p5, D_arr_0p5) +def_gen.f(D_arr_like_0p5, D_arr_like_0p5) +def_gen.f(D_arr_0p5, D_arr_0p5, size=1) +def_gen.f(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.gamma(0.5, 0.5) +def_gen.gamma(0.5, 0.5, size=None) +def_gen.gamma(0.5, 0.5, size=1) +def_gen.gamma(D_arr_0p5, 0.5) +def_gen.gamma(0.5, D_arr_0p5) +def_gen.gamma(D_arr_0p5, 0.5, size=1) +def_gen.gamma(0.5, D_arr_0p5, size=1) +def_gen.gamma(D_arr_like_0p5, 0.5) +def_gen.gamma(0.5, D_arr_like_0p5) +def_gen.gamma(D_arr_0p5, D_arr_0p5) +def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5) +def_gen.gamma(D_arr_0p5, D_arr_0p5, size=1) +def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.gumbel(0.5, 0.5) +def_gen.gumbel(0.5, 0.5, size=None) +def_gen.gumbel(0.5, 0.5, size=1) +def_gen.gumbel(D_arr_0p5, 0.5) +def_gen.gumbel(0.5, D_arr_0p5) +def_gen.gumbel(D_arr_0p5, 0.5, size=1) +def_gen.gumbel(0.5, D_arr_0p5, size=1) +def_gen.gumbel(D_arr_like_0p5, 0.5) +def_gen.gumbel(0.5, D_arr_like_0p5) +def_gen.gumbel(D_arr_0p5, D_arr_0p5) +def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5) +def_gen.gumbel(D_arr_0p5, D_arr_0p5, size=1) +def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.laplace(0.5, 0.5) +def_gen.laplace(0.5, 0.5, size=None) +def_gen.laplace(0.5, 0.5, size=1) +def_gen.laplace(D_arr_0p5, 0.5) +def_gen.laplace(0.5, D_arr_0p5) +def_gen.laplace(D_arr_0p5, 0.5, size=1) +def_gen.laplace(0.5, D_arr_0p5, size=1) +def_gen.laplace(D_arr_like_0p5, 0.5) +def_gen.laplace(0.5, D_arr_like_0p5) +def_gen.laplace(D_arr_0p5, D_arr_0p5) +def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5) +def_gen.laplace(D_arr_0p5, D_arr_0p5, size=1) +def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.logistic(0.5, 0.5) +def_gen.logistic(0.5, 0.5, size=None) +def_gen.logistic(0.5, 0.5, size=1) +def_gen.logistic(D_arr_0p5, 0.5) +def_gen.logistic(0.5, D_arr_0p5) +def_gen.logistic(D_arr_0p5, 0.5, size=1) +def_gen.logistic(0.5, D_arr_0p5, size=1) +def_gen.logistic(D_arr_like_0p5, 0.5) +def_gen.logistic(0.5, D_arr_like_0p5) +def_gen.logistic(D_arr_0p5, D_arr_0p5) +def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5) +def_gen.logistic(D_arr_0p5, D_arr_0p5, size=1) +def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.lognormal(0.5, 0.5) +def_gen.lognormal(0.5, 0.5, size=None) +def_gen.lognormal(0.5, 0.5, size=1) +def_gen.lognormal(D_arr_0p5, 0.5) +def_gen.lognormal(0.5, D_arr_0p5) +def_gen.lognormal(D_arr_0p5, 0.5, size=1) +def_gen.lognormal(0.5, D_arr_0p5, size=1) +def_gen.lognormal(D_arr_like_0p5, 0.5) +def_gen.lognormal(0.5, D_arr_like_0p5) +def_gen.lognormal(D_arr_0p5, D_arr_0p5) +def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5) +def_gen.lognormal(D_arr_0p5, D_arr_0p5, size=1) +def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.noncentral_chisquare(0.5, 0.5) +def_gen.noncentral_chisquare(0.5, 0.5, size=None) +def_gen.noncentral_chisquare(0.5, 0.5, size=1) +def_gen.noncentral_chisquare(D_arr_0p5, 0.5) +def_gen.noncentral_chisquare(0.5, D_arr_0p5) +def_gen.noncentral_chisquare(D_arr_0p5, 0.5, size=1) +def_gen.noncentral_chisquare(0.5, D_arr_0p5, size=1) +def_gen.noncentral_chisquare(D_arr_like_0p5, 0.5) +def_gen.noncentral_chisquare(0.5, D_arr_like_0p5) +def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5) +def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5) +def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1) +def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.normal(0.5, 0.5) +def_gen.normal(0.5, 0.5, size=None) +def_gen.normal(0.5, 0.5, size=1) +def_gen.normal(D_arr_0p5, 0.5) +def_gen.normal(0.5, D_arr_0p5) +def_gen.normal(D_arr_0p5, 0.5, size=1) +def_gen.normal(0.5, D_arr_0p5, size=1) +def_gen.normal(D_arr_like_0p5, 0.5) +def_gen.normal(0.5, D_arr_like_0p5) +def_gen.normal(D_arr_0p5, D_arr_0p5) +def_gen.normal(D_arr_like_0p5, D_arr_like_0p5) +def_gen.normal(D_arr_0p5, D_arr_0p5, size=1) +def_gen.normal(D_arr_like_0p5, D_arr_like_0p5, size=1) + +def_gen.triangular(0.1, 0.5, 0.9) +def_gen.triangular(0.1, 0.5, 0.9, size=None) +def_gen.triangular(0.1, 0.5, 0.9, size=1) +def_gen.triangular(D_arr_0p1, 0.5, 0.9) +def_gen.triangular(0.1, D_arr_0p5, 0.9) +def_gen.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1) +def_gen.triangular(0.1, D_arr_0p5, 0.9, size=1) +def_gen.triangular(D_arr_like_0p1, 0.5, D_arr_0p9) +def_gen.triangular(0.5, D_arr_like_0p5, 0.9) +def_gen.triangular(D_arr_0p1, D_arr_0p5, 0.9) +def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9) +def_gen.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1) +def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1) + +def_gen.noncentral_f(0.1, 0.5, 0.9) +def_gen.noncentral_f(0.1, 0.5, 0.9, size=None) +def_gen.noncentral_f(0.1, 0.5, 0.9, size=1) +def_gen.noncentral_f(D_arr_0p1, 0.5, 0.9) +def_gen.noncentral_f(0.1, D_arr_0p5, 0.9) +def_gen.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1) +def_gen.noncentral_f(0.1, D_arr_0p5, 0.9, size=1) +def_gen.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9) +def_gen.noncentral_f(0.5, D_arr_like_0p5, 0.9) +def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9) +def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9) +def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1) +def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1) + +def_gen.binomial(10, 0.5) +def_gen.binomial(10, 0.5, size=None) +def_gen.binomial(10, 0.5, size=1) +def_gen.binomial(I_arr_10, 0.5) +def_gen.binomial(10, D_arr_0p5) +def_gen.binomial(I_arr_10, 0.5, size=1) +def_gen.binomial(10, D_arr_0p5, size=1) +def_gen.binomial(I_arr_like_10, 0.5) +def_gen.binomial(10, D_arr_like_0p5) +def_gen.binomial(I_arr_10, D_arr_0p5) +def_gen.binomial(I_arr_like_10, D_arr_like_0p5) +def_gen.binomial(I_arr_10, D_arr_0p5, size=1) +def_gen.binomial(I_arr_like_10, D_arr_like_0p5, size=1) + +def_gen.negative_binomial(10, 0.5) +def_gen.negative_binomial(10, 0.5, size=None) +def_gen.negative_binomial(10, 0.5, size=1) +def_gen.negative_binomial(I_arr_10, 0.5) +def_gen.negative_binomial(10, D_arr_0p5) +def_gen.negative_binomial(I_arr_10, 0.5, size=1) +def_gen.negative_binomial(10, D_arr_0p5, size=1) +def_gen.negative_binomial(I_arr_like_10, 0.5) +def_gen.negative_binomial(10, D_arr_like_0p5) +def_gen.negative_binomial(I_arr_10, D_arr_0p5) +def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5) +def_gen.negative_binomial(I_arr_10, D_arr_0p5, size=1) +def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1) + +def_gen.hypergeometric(20, 20, 10) +def_gen.hypergeometric(20, 20, 10, size=None) +def_gen.hypergeometric(20, 20, 10, size=1) +def_gen.hypergeometric(I_arr_20, 20, 10) +def_gen.hypergeometric(20, I_arr_20, 10) +def_gen.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1) +def_gen.hypergeometric(20, I_arr_20, 10, size=1) +def_gen.hypergeometric(I_arr_like_20, 20, I_arr_10) +def_gen.hypergeometric(20, I_arr_like_20, 10) +def_gen.hypergeometric(I_arr_20, I_arr_20, 10) +def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, 10) +def_gen.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1) +def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1) + +I_int64_100: np.ndarray[Any, np.dtype[np.int64]] = np.array([100], dtype=np.int64) + +def_gen.integers(0, 100) +def_gen.integers(100) +def_gen.integers([100]) +def_gen.integers(0, [100]) + +I_bool_low: np.ndarray[Any, np.dtype[np.bool]] = np.array([0], dtype=np.bool) +I_bool_low_like: list[int] = [0] +I_bool_high_open: np.ndarray[Any, np.dtype[np.bool]] = np.array([1], dtype=np.bool) +I_bool_high_closed: np.ndarray[Any, np.dtype[np.bool]] = np.array([1], dtype=np.bool) + +def_gen.integers(2, dtype=bool) +def_gen.integers(0, 2, dtype=bool) +def_gen.integers(1, dtype=bool, endpoint=True) +def_gen.integers(0, 1, dtype=bool, endpoint=True) +def_gen.integers(I_bool_low_like, 1, dtype=bool, endpoint=True) +def_gen.integers(I_bool_high_open, dtype=bool) +def_gen.integers(I_bool_low, I_bool_high_open, dtype=bool) +def_gen.integers(0, I_bool_high_open, dtype=bool) +def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True) +def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True) +def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True) + +def_gen.integers(2, dtype=np.bool) +def_gen.integers(0, 2, dtype=np.bool) +def_gen.integers(1, dtype=np.bool, endpoint=True) +def_gen.integers(0, 1, dtype=np.bool, endpoint=True) +def_gen.integers(I_bool_low_like, 1, dtype=np.bool, endpoint=True) +def_gen.integers(I_bool_high_open, dtype=np.bool) +def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool) +def_gen.integers(0, I_bool_high_open, dtype=np.bool) +def_gen.integers(I_bool_high_closed, dtype=np.bool, endpoint=True) +def_gen.integers(I_bool_low, I_bool_high_closed, dtype=np.bool, endpoint=True) +def_gen.integers(0, I_bool_high_closed, dtype=np.bool, endpoint=True) + +I_u1_low: np.ndarray[Any, np.dtype[np.uint8]] = np.array([0], dtype=np.uint8) +I_u1_low_like: list[int] = [0] +I_u1_high_open: np.ndarray[Any, np.dtype[np.uint8]] = np.array([255], dtype=np.uint8) +I_u1_high_closed: np.ndarray[Any, np.dtype[np.uint8]] = np.array([255], dtype=np.uint8) + +def_gen.integers(256, dtype="u1") +def_gen.integers(0, 256, dtype="u1") +def_gen.integers(255, dtype="u1", endpoint=True) +def_gen.integers(0, 255, dtype="u1", endpoint=True) +def_gen.integers(I_u1_low_like, 255, dtype="u1", endpoint=True) +def_gen.integers(I_u1_high_open, dtype="u1") +def_gen.integers(I_u1_low, I_u1_high_open, dtype="u1") +def_gen.integers(0, I_u1_high_open, dtype="u1") +def_gen.integers(I_u1_high_closed, dtype="u1", endpoint=True) +def_gen.integers(I_u1_low, I_u1_high_closed, dtype="u1", endpoint=True) +def_gen.integers(0, I_u1_high_closed, dtype="u1", endpoint=True) + +def_gen.integers(256, dtype="uint8") +def_gen.integers(0, 256, dtype="uint8") +def_gen.integers(255, dtype="uint8", endpoint=True) +def_gen.integers(0, 255, dtype="uint8", endpoint=True) +def_gen.integers(I_u1_low_like, 255, dtype="uint8", endpoint=True) +def_gen.integers(I_u1_high_open, dtype="uint8") +def_gen.integers(I_u1_low, I_u1_high_open, dtype="uint8") +def_gen.integers(0, I_u1_high_open, dtype="uint8") +def_gen.integers(I_u1_high_closed, dtype="uint8", endpoint=True) +def_gen.integers(I_u1_low, I_u1_high_closed, dtype="uint8", endpoint=True) +def_gen.integers(0, I_u1_high_closed, dtype="uint8", endpoint=True) + +def_gen.integers(256, dtype=np.uint8) +def_gen.integers(0, 256, dtype=np.uint8) +def_gen.integers(255, dtype=np.uint8, endpoint=True) +def_gen.integers(0, 255, dtype=np.uint8, endpoint=True) +def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True) +def_gen.integers(I_u1_high_open, dtype=np.uint8) +def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8) +def_gen.integers(0, I_u1_high_open, dtype=np.uint8) +def_gen.integers(I_u1_high_closed, dtype=np.uint8, endpoint=True) +def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True) +def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True) + +I_u2_low: np.ndarray[Any, np.dtype[np.uint16]] = np.array([0], dtype=np.uint16) +I_u2_low_like: list[int] = [0] +I_u2_high_open: np.ndarray[Any, np.dtype[np.uint16]] = np.array([65535], dtype=np.uint16) +I_u2_high_closed: np.ndarray[Any, np.dtype[np.uint16]] = np.array([65535], dtype=np.uint16) + +def_gen.integers(65536, dtype="u2") +def_gen.integers(0, 65536, dtype="u2") +def_gen.integers(65535, dtype="u2", endpoint=True) +def_gen.integers(0, 65535, dtype="u2", endpoint=True) +def_gen.integers(I_u2_low_like, 65535, dtype="u2", endpoint=True) +def_gen.integers(I_u2_high_open, dtype="u2") +def_gen.integers(I_u2_low, I_u2_high_open, dtype="u2") +def_gen.integers(0, I_u2_high_open, dtype="u2") +def_gen.integers(I_u2_high_closed, dtype="u2", endpoint=True) +def_gen.integers(I_u2_low, I_u2_high_closed, dtype="u2", endpoint=True) +def_gen.integers(0, I_u2_high_closed, dtype="u2", endpoint=True) + +def_gen.integers(65536, dtype="uint16") +def_gen.integers(0, 65536, dtype="uint16") +def_gen.integers(65535, dtype="uint16", endpoint=True) +def_gen.integers(0, 65535, dtype="uint16", endpoint=True) +def_gen.integers(I_u2_low_like, 65535, dtype="uint16", endpoint=True) +def_gen.integers(I_u2_high_open, dtype="uint16") +def_gen.integers(I_u2_low, I_u2_high_open, dtype="uint16") +def_gen.integers(0, I_u2_high_open, dtype="uint16") +def_gen.integers(I_u2_high_closed, dtype="uint16", endpoint=True) +def_gen.integers(I_u2_low, I_u2_high_closed, dtype="uint16", endpoint=True) +def_gen.integers(0, I_u2_high_closed, dtype="uint16", endpoint=True) + +def_gen.integers(65536, dtype=np.uint16) +def_gen.integers(0, 65536, dtype=np.uint16) +def_gen.integers(65535, dtype=np.uint16, endpoint=True) +def_gen.integers(0, 65535, dtype=np.uint16, endpoint=True) +def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True) +def_gen.integers(I_u2_high_open, dtype=np.uint16) +def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16) +def_gen.integers(0, I_u2_high_open, dtype=np.uint16) +def_gen.integers(I_u2_high_closed, dtype=np.uint16, endpoint=True) +def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True) +def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True) + +I_u4_low: np.ndarray[Any, np.dtype[np.uint32]] = np.array([0], dtype=np.uint32) +I_u4_low_like: list[int] = [0] +I_u4_high_open: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32) +I_u4_high_closed: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32) + +def_gen.integers(4294967296, dtype="u4") +def_gen.integers(0, 4294967296, dtype="u4") +def_gen.integers(4294967295, dtype="u4", endpoint=True) +def_gen.integers(0, 4294967295, dtype="u4", endpoint=True) +def_gen.integers(I_u4_low_like, 4294967295, dtype="u4", endpoint=True) +def_gen.integers(I_u4_high_open, dtype="u4") +def_gen.integers(I_u4_low, I_u4_high_open, dtype="u4") +def_gen.integers(0, I_u4_high_open, dtype="u4") +def_gen.integers(I_u4_high_closed, dtype="u4", endpoint=True) +def_gen.integers(I_u4_low, I_u4_high_closed, dtype="u4", endpoint=True) +def_gen.integers(0, I_u4_high_closed, dtype="u4", endpoint=True) + +def_gen.integers(4294967296, dtype="uint32") +def_gen.integers(0, 4294967296, dtype="uint32") +def_gen.integers(4294967295, dtype="uint32", endpoint=True) +def_gen.integers(0, 4294967295, dtype="uint32", endpoint=True) +def_gen.integers(I_u4_low_like, 4294967295, dtype="uint32", endpoint=True) +def_gen.integers(I_u4_high_open, dtype="uint32") +def_gen.integers(I_u4_low, I_u4_high_open, dtype="uint32") +def_gen.integers(0, I_u4_high_open, dtype="uint32") +def_gen.integers(I_u4_high_closed, dtype="uint32", endpoint=True) +def_gen.integers(I_u4_low, I_u4_high_closed, dtype="uint32", endpoint=True) +def_gen.integers(0, I_u4_high_closed, dtype="uint32", endpoint=True) + +def_gen.integers(4294967296, dtype=np.uint32) +def_gen.integers(0, 4294967296, dtype=np.uint32) +def_gen.integers(4294967295, dtype=np.uint32, endpoint=True) +def_gen.integers(0, 4294967295, dtype=np.uint32, endpoint=True) +def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True) +def_gen.integers(I_u4_high_open, dtype=np.uint32) +def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32) +def_gen.integers(0, I_u4_high_open, dtype=np.uint32) +def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True) +def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True) +def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True) + +I_u8_low: np.ndarray[Any, np.dtype[np.uint64]] = np.array([0], dtype=np.uint64) +I_u8_low_like: list[int] = [0] +I_u8_high_open: np.ndarray[Any, np.dtype[np.uint64]] = np.array([18446744073709551615], dtype=np.uint64) +I_u8_high_closed: np.ndarray[Any, np.dtype[np.uint64]] = np.array([18446744073709551615], dtype=np.uint64) + +def_gen.integers(18446744073709551616, dtype="u8") +def_gen.integers(0, 18446744073709551616, dtype="u8") +def_gen.integers(18446744073709551615, dtype="u8", endpoint=True) +def_gen.integers(0, 18446744073709551615, dtype="u8", endpoint=True) +def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="u8", endpoint=True) +def_gen.integers(I_u8_high_open, dtype="u8") +def_gen.integers(I_u8_low, I_u8_high_open, dtype="u8") +def_gen.integers(0, I_u8_high_open, dtype="u8") +def_gen.integers(I_u8_high_closed, dtype="u8", endpoint=True) +def_gen.integers(I_u8_low, I_u8_high_closed, dtype="u8", endpoint=True) +def_gen.integers(0, I_u8_high_closed, dtype="u8", endpoint=True) + +def_gen.integers(18446744073709551616, dtype="uint64") +def_gen.integers(0, 18446744073709551616, dtype="uint64") +def_gen.integers(18446744073709551615, dtype="uint64", endpoint=True) +def_gen.integers(0, 18446744073709551615, dtype="uint64", endpoint=True) +def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="uint64", endpoint=True) +def_gen.integers(I_u8_high_open, dtype="uint64") +def_gen.integers(I_u8_low, I_u8_high_open, dtype="uint64") +def_gen.integers(0, I_u8_high_open, dtype="uint64") +def_gen.integers(I_u8_high_closed, dtype="uint64", endpoint=True) +def_gen.integers(I_u8_low, I_u8_high_closed, dtype="uint64", endpoint=True) +def_gen.integers(0, I_u8_high_closed, dtype="uint64", endpoint=True) + +def_gen.integers(18446744073709551616, dtype=np.uint64) +def_gen.integers(0, 18446744073709551616, dtype=np.uint64) +def_gen.integers(18446744073709551615, dtype=np.uint64, endpoint=True) +def_gen.integers(0, 18446744073709551615, dtype=np.uint64, endpoint=True) +def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True) +def_gen.integers(I_u8_high_open, dtype=np.uint64) +def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64) +def_gen.integers(0, I_u8_high_open, dtype=np.uint64) +def_gen.integers(I_u8_high_closed, dtype=np.uint64, endpoint=True) +def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True) +def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True) + +I_i1_low: np.ndarray[Any, np.dtype[np.int8]] = np.array([-128], dtype=np.int8) +I_i1_low_like: list[int] = [-128] +I_i1_high_open: np.ndarray[Any, np.dtype[np.int8]] = np.array([127], dtype=np.int8) +I_i1_high_closed: np.ndarray[Any, np.dtype[np.int8]] = np.array([127], dtype=np.int8) + +def_gen.integers(128, dtype="i1") +def_gen.integers(-128, 128, dtype="i1") +def_gen.integers(127, dtype="i1", endpoint=True) +def_gen.integers(-128, 127, dtype="i1", endpoint=True) +def_gen.integers(I_i1_low_like, 127, dtype="i1", endpoint=True) +def_gen.integers(I_i1_high_open, dtype="i1") +def_gen.integers(I_i1_low, I_i1_high_open, dtype="i1") +def_gen.integers(-128, I_i1_high_open, dtype="i1") +def_gen.integers(I_i1_high_closed, dtype="i1", endpoint=True) +def_gen.integers(I_i1_low, I_i1_high_closed, dtype="i1", endpoint=True) +def_gen.integers(-128, I_i1_high_closed, dtype="i1", endpoint=True) + +def_gen.integers(128, dtype="int8") +def_gen.integers(-128, 128, dtype="int8") +def_gen.integers(127, dtype="int8", endpoint=True) +def_gen.integers(-128, 127, dtype="int8", endpoint=True) +def_gen.integers(I_i1_low_like, 127, dtype="int8", endpoint=True) +def_gen.integers(I_i1_high_open, dtype="int8") +def_gen.integers(I_i1_low, I_i1_high_open, dtype="int8") +def_gen.integers(-128, I_i1_high_open, dtype="int8") +def_gen.integers(I_i1_high_closed, dtype="int8", endpoint=True) +def_gen.integers(I_i1_low, I_i1_high_closed, dtype="int8", endpoint=True) +def_gen.integers(-128, I_i1_high_closed, dtype="int8", endpoint=True) + +def_gen.integers(128, dtype=np.int8) +def_gen.integers(-128, 128, dtype=np.int8) +def_gen.integers(127, dtype=np.int8, endpoint=True) +def_gen.integers(-128, 127, dtype=np.int8, endpoint=True) +def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True) +def_gen.integers(I_i1_high_open, dtype=np.int8) +def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8) +def_gen.integers(-128, I_i1_high_open, dtype=np.int8) +def_gen.integers(I_i1_high_closed, dtype=np.int8, endpoint=True) +def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True) +def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True) + +I_i2_low: np.ndarray[Any, np.dtype[np.int16]] = np.array([-32768], dtype=np.int16) +I_i2_low_like: list[int] = [-32768] +I_i2_high_open: np.ndarray[Any, np.dtype[np.int16]] = np.array([32767], dtype=np.int16) +I_i2_high_closed: np.ndarray[Any, np.dtype[np.int16]] = np.array([32767], dtype=np.int16) + +def_gen.integers(32768, dtype="i2") +def_gen.integers(-32768, 32768, dtype="i2") +def_gen.integers(32767, dtype="i2", endpoint=True) +def_gen.integers(-32768, 32767, dtype="i2", endpoint=True) +def_gen.integers(I_i2_low_like, 32767, dtype="i2", endpoint=True) +def_gen.integers(I_i2_high_open, dtype="i2") +def_gen.integers(I_i2_low, I_i2_high_open, dtype="i2") +def_gen.integers(-32768, I_i2_high_open, dtype="i2") +def_gen.integers(I_i2_high_closed, dtype="i2", endpoint=True) +def_gen.integers(I_i2_low, I_i2_high_closed, dtype="i2", endpoint=True) +def_gen.integers(-32768, I_i2_high_closed, dtype="i2", endpoint=True) + +def_gen.integers(32768, dtype="int16") +def_gen.integers(-32768, 32768, dtype="int16") +def_gen.integers(32767, dtype="int16", endpoint=True) +def_gen.integers(-32768, 32767, dtype="int16", endpoint=True) +def_gen.integers(I_i2_low_like, 32767, dtype="int16", endpoint=True) +def_gen.integers(I_i2_high_open, dtype="int16") +def_gen.integers(I_i2_low, I_i2_high_open, dtype="int16") +def_gen.integers(-32768, I_i2_high_open, dtype="int16") +def_gen.integers(I_i2_high_closed, dtype="int16", endpoint=True) +def_gen.integers(I_i2_low, I_i2_high_closed, dtype="int16", endpoint=True) +def_gen.integers(-32768, I_i2_high_closed, dtype="int16", endpoint=True) + +def_gen.integers(32768, dtype=np.int16) +def_gen.integers(-32768, 32768, dtype=np.int16) +def_gen.integers(32767, dtype=np.int16, endpoint=True) +def_gen.integers(-32768, 32767, dtype=np.int16, endpoint=True) +def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True) +def_gen.integers(I_i2_high_open, dtype=np.int16) +def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16) +def_gen.integers(-32768, I_i2_high_open, dtype=np.int16) +def_gen.integers(I_i2_high_closed, dtype=np.int16, endpoint=True) +def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True) +def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True) + +I_i4_low: np.ndarray[Any, np.dtype[np.int32]] = np.array([-2147483648], dtype=np.int32) +I_i4_low_like: list[int] = [-2147483648] +I_i4_high_open: np.ndarray[Any, np.dtype[np.int32]] = np.array([2147483647], dtype=np.int32) +I_i4_high_closed: np.ndarray[Any, np.dtype[np.int32]] = np.array([2147483647], dtype=np.int32) + +def_gen.integers(2147483648, dtype="i4") +def_gen.integers(-2147483648, 2147483648, dtype="i4") +def_gen.integers(2147483647, dtype="i4", endpoint=True) +def_gen.integers(-2147483648, 2147483647, dtype="i4", endpoint=True) +def_gen.integers(I_i4_low_like, 2147483647, dtype="i4", endpoint=True) +def_gen.integers(I_i4_high_open, dtype="i4") +def_gen.integers(I_i4_low, I_i4_high_open, dtype="i4") +def_gen.integers(-2147483648, I_i4_high_open, dtype="i4") +def_gen.integers(I_i4_high_closed, dtype="i4", endpoint=True) +def_gen.integers(I_i4_low, I_i4_high_closed, dtype="i4", endpoint=True) +def_gen.integers(-2147483648, I_i4_high_closed, dtype="i4", endpoint=True) + +def_gen.integers(2147483648, dtype="int32") +def_gen.integers(-2147483648, 2147483648, dtype="int32") +def_gen.integers(2147483647, dtype="int32", endpoint=True) +def_gen.integers(-2147483648, 2147483647, dtype="int32", endpoint=True) +def_gen.integers(I_i4_low_like, 2147483647, dtype="int32", endpoint=True) +def_gen.integers(I_i4_high_open, dtype="int32") +def_gen.integers(I_i4_low, I_i4_high_open, dtype="int32") +def_gen.integers(-2147483648, I_i4_high_open, dtype="int32") +def_gen.integers(I_i4_high_closed, dtype="int32", endpoint=True) +def_gen.integers(I_i4_low, I_i4_high_closed, dtype="int32", endpoint=True) +def_gen.integers(-2147483648, I_i4_high_closed, dtype="int32", endpoint=True) + +def_gen.integers(2147483648, dtype=np.int32) +def_gen.integers(-2147483648, 2147483648, dtype=np.int32) +def_gen.integers(2147483647, dtype=np.int32, endpoint=True) +def_gen.integers(-2147483648, 2147483647, dtype=np.int32, endpoint=True) +def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True) +def_gen.integers(I_i4_high_open, dtype=np.int32) +def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32) +def_gen.integers(-2147483648, I_i4_high_open, dtype=np.int32) +def_gen.integers(I_i4_high_closed, dtype=np.int32, endpoint=True) +def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True) +def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True) + +I_i8_low: np.ndarray[Any, np.dtype[np.int64]] = np.array([-9223372036854775808], dtype=np.int64) +I_i8_low_like: list[int] = [-9223372036854775808] +I_i8_high_open: np.ndarray[Any, np.dtype[np.int64]] = np.array([9223372036854775807], dtype=np.int64) +I_i8_high_closed: np.ndarray[Any, np.dtype[np.int64]] = np.array([9223372036854775807], dtype=np.int64) + +def_gen.integers(9223372036854775808, dtype="i8") +def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="i8") +def_gen.integers(9223372036854775807, dtype="i8", endpoint=True) +def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="i8", endpoint=True) +def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="i8", endpoint=True) +def_gen.integers(I_i8_high_open, dtype="i8") +def_gen.integers(I_i8_low, I_i8_high_open, dtype="i8") +def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="i8") +def_gen.integers(I_i8_high_closed, dtype="i8", endpoint=True) +def_gen.integers(I_i8_low, I_i8_high_closed, dtype="i8", endpoint=True) +def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="i8", endpoint=True) + +def_gen.integers(9223372036854775808, dtype="int64") +def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="int64") +def_gen.integers(9223372036854775807, dtype="int64", endpoint=True) +def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="int64", endpoint=True) +def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="int64", endpoint=True) +def_gen.integers(I_i8_high_open, dtype="int64") +def_gen.integers(I_i8_low, I_i8_high_open, dtype="int64") +def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="int64") +def_gen.integers(I_i8_high_closed, dtype="int64", endpoint=True) +def_gen.integers(I_i8_low, I_i8_high_closed, dtype="int64", endpoint=True) +def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="int64", endpoint=True) + +def_gen.integers(9223372036854775808, dtype=np.int64) +def_gen.integers(-9223372036854775808, 9223372036854775808, dtype=np.int64) +def_gen.integers(9223372036854775807, dtype=np.int64, endpoint=True) +def_gen.integers(-9223372036854775808, 9223372036854775807, dtype=np.int64, endpoint=True) +def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True) +def_gen.integers(I_i8_high_open, dtype=np.int64) +def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64) +def_gen.integers(-9223372036854775808, I_i8_high_open, dtype=np.int64) +def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True) +def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True) +def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True) + + +def_gen.bit_generator + +def_gen.bytes(2) + +def_gen.choice(5) +def_gen.choice(5, 3) +def_gen.choice(5, 3, replace=True) +def_gen.choice(5, 3, p=[1 / 5] * 5) +def_gen.choice(5, 3, p=[1 / 5] * 5, replace=False) + +def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"]) +def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3) +def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4) +def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True) +def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4])) + +def_gen.dirichlet([0.5, 0.5]) +def_gen.dirichlet(np.array([0.5, 0.5])) +def_gen.dirichlet(np.array([0.5, 0.5]), size=3) + +def_gen.multinomial(20, [1 / 6.0] * 6) +def_gen.multinomial(20, np.array([0.5, 0.5])) +def_gen.multinomial(20, [1 / 6.0] * 6, size=2) +def_gen.multinomial([[10], [20]], [1 / 6.0] * 6, size=(2, 2)) +def_gen.multinomial(np.array([[10], [20]]), np.array([0.5, 0.5]), size=(2, 2)) + +def_gen.multivariate_hypergeometric([3, 5, 7], 2) +def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2) +def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=4) +def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=(4, 7)) +def_gen.multivariate_hypergeometric([3, 5, 7], 2, method="count") +def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, method="marginals") + +def_gen.multivariate_normal([0.0], [[1.0]]) +def_gen.multivariate_normal([0.0], np.array([[1.0]])) +def_gen.multivariate_normal(np.array([0.0]), [[1.0]]) +def_gen.multivariate_normal([0.0], np.array([[1.0]])) + +def_gen.permutation(10) +def_gen.permutation([1, 2, 3, 4]) +def_gen.permutation(np.array([1, 2, 3, 4])) +def_gen.permutation(D_2D, axis=1) +def_gen.permuted(D_2D) +def_gen.permuted(D_2D_like) +def_gen.permuted(D_2D, axis=1) +def_gen.permuted(D_2D, out=D_2D) +def_gen.permuted(D_2D_like, out=D_2D) +def_gen.permuted(D_2D_like, out=D_2D) +def_gen.permuted(D_2D, axis=1, out=D_2D) + +def_gen.shuffle(np.arange(10)) +def_gen.shuffle([1, 2, 3, 4, 5]) +def_gen.shuffle(D_2D, axis=1) + +def_gen.__str__() +def_gen.__repr__() +def_gen.__setstate__(dict(def_gen.bit_generator.state)) + +# RandomState +random_st: np.random.RandomState = np.random.RandomState() + +random_st.standard_normal() +random_st.standard_normal(size=None) +random_st.standard_normal(size=1) + +random_st.random() +random_st.random(size=None) +random_st.random(size=1) + +random_st.standard_cauchy() +random_st.standard_cauchy(size=None) +random_st.standard_cauchy(size=1) + +random_st.standard_exponential() +random_st.standard_exponential(size=None) +random_st.standard_exponential(size=1) + +random_st.zipf(1.5) +random_st.zipf(1.5, size=None) +random_st.zipf(1.5, size=1) +random_st.zipf(D_arr_1p5) +random_st.zipf(D_arr_1p5, size=1) +random_st.zipf(D_arr_like_1p5) +random_st.zipf(D_arr_like_1p5, size=1) + +random_st.weibull(0.5) +random_st.weibull(0.5, size=None) +random_st.weibull(0.5, size=1) +random_st.weibull(D_arr_0p5) +random_st.weibull(D_arr_0p5, size=1) +random_st.weibull(D_arr_like_0p5) +random_st.weibull(D_arr_like_0p5, size=1) + +random_st.standard_t(0.5) +random_st.standard_t(0.5, size=None) +random_st.standard_t(0.5, size=1) +random_st.standard_t(D_arr_0p5) +random_st.standard_t(D_arr_0p5, size=1) +random_st.standard_t(D_arr_like_0p5) +random_st.standard_t(D_arr_like_0p5, size=1) + +random_st.poisson(0.5) +random_st.poisson(0.5, size=None) +random_st.poisson(0.5, size=1) +random_st.poisson(D_arr_0p5) +random_st.poisson(D_arr_0p5, size=1) +random_st.poisson(D_arr_like_0p5) +random_st.poisson(D_arr_like_0p5, size=1) + +random_st.power(0.5) +random_st.power(0.5, size=None) +random_st.power(0.5, size=1) +random_st.power(D_arr_0p5) +random_st.power(D_arr_0p5, size=1) +random_st.power(D_arr_like_0p5) +random_st.power(D_arr_like_0p5, size=1) + +random_st.pareto(0.5) +random_st.pareto(0.5, size=None) +random_st.pareto(0.5, size=1) +random_st.pareto(D_arr_0p5) +random_st.pareto(D_arr_0p5, size=1) +random_st.pareto(D_arr_like_0p5) +random_st.pareto(D_arr_like_0p5, size=1) + +random_st.chisquare(0.5) +random_st.chisquare(0.5, size=None) +random_st.chisquare(0.5, size=1) +random_st.chisquare(D_arr_0p5) +random_st.chisquare(D_arr_0p5, size=1) +random_st.chisquare(D_arr_like_0p5) +random_st.chisquare(D_arr_like_0p5, size=1) + +random_st.exponential(0.5) +random_st.exponential(0.5, size=None) +random_st.exponential(0.5, size=1) +random_st.exponential(D_arr_0p5) +random_st.exponential(D_arr_0p5, size=1) +random_st.exponential(D_arr_like_0p5) +random_st.exponential(D_arr_like_0p5, size=1) + +random_st.geometric(0.5) +random_st.geometric(0.5, size=None) +random_st.geometric(0.5, size=1) +random_st.geometric(D_arr_0p5) +random_st.geometric(D_arr_0p5, size=1) +random_st.geometric(D_arr_like_0p5) +random_st.geometric(D_arr_like_0p5, size=1) + +random_st.logseries(0.5) +random_st.logseries(0.5, size=None) +random_st.logseries(0.5, size=1) +random_st.logseries(D_arr_0p5) +random_st.logseries(D_arr_0p5, size=1) +random_st.logseries(D_arr_like_0p5) +random_st.logseries(D_arr_like_0p5, size=1) + +random_st.rayleigh(0.5) +random_st.rayleigh(0.5, size=None) +random_st.rayleigh(0.5, size=1) +random_st.rayleigh(D_arr_0p5) +random_st.rayleigh(D_arr_0p5, size=1) +random_st.rayleigh(D_arr_like_0p5) +random_st.rayleigh(D_arr_like_0p5, size=1) + +random_st.standard_gamma(0.5) +random_st.standard_gamma(0.5, size=None) +random_st.standard_gamma(0.5, size=1) +random_st.standard_gamma(D_arr_0p5) +random_st.standard_gamma(D_arr_0p5, size=1) +random_st.standard_gamma(D_arr_like_0p5) +random_st.standard_gamma(D_arr_like_0p5, size=1) +random_st.standard_gamma(D_arr_like_0p5, size=1) + +random_st.vonmises(0.5, 0.5) +random_st.vonmises(0.5, 0.5, size=None) +random_st.vonmises(0.5, 0.5, size=1) +random_st.vonmises(D_arr_0p5, 0.5) +random_st.vonmises(0.5, D_arr_0p5) +random_st.vonmises(D_arr_0p5, 0.5, size=1) +random_st.vonmises(0.5, D_arr_0p5, size=1) +random_st.vonmises(D_arr_like_0p5, 0.5) +random_st.vonmises(0.5, D_arr_like_0p5) +random_st.vonmises(D_arr_0p5, D_arr_0p5) +random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5) +random_st.vonmises(D_arr_0p5, D_arr_0p5, size=1) +random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.wald(0.5, 0.5) +random_st.wald(0.5, 0.5, size=None) +random_st.wald(0.5, 0.5, size=1) +random_st.wald(D_arr_0p5, 0.5) +random_st.wald(0.5, D_arr_0p5) +random_st.wald(D_arr_0p5, 0.5, size=1) +random_st.wald(0.5, D_arr_0p5, size=1) +random_st.wald(D_arr_like_0p5, 0.5) +random_st.wald(0.5, D_arr_like_0p5) +random_st.wald(D_arr_0p5, D_arr_0p5) +random_st.wald(D_arr_like_0p5, D_arr_like_0p5) +random_st.wald(D_arr_0p5, D_arr_0p5, size=1) +random_st.wald(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.uniform(0.5, 0.5) +random_st.uniform(0.5, 0.5, size=None) +random_st.uniform(0.5, 0.5, size=1) +random_st.uniform(D_arr_0p5, 0.5) +random_st.uniform(0.5, D_arr_0p5) +random_st.uniform(D_arr_0p5, 0.5, size=1) +random_st.uniform(0.5, D_arr_0p5, size=1) +random_st.uniform(D_arr_like_0p5, 0.5) +random_st.uniform(0.5, D_arr_like_0p5) +random_st.uniform(D_arr_0p5, D_arr_0p5) +random_st.uniform(D_arr_like_0p5, D_arr_like_0p5) +random_st.uniform(D_arr_0p5, D_arr_0p5, size=1) +random_st.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.beta(0.5, 0.5) +random_st.beta(0.5, 0.5, size=None) +random_st.beta(0.5, 0.5, size=1) +random_st.beta(D_arr_0p5, 0.5) +random_st.beta(0.5, D_arr_0p5) +random_st.beta(D_arr_0p5, 0.5, size=1) +random_st.beta(0.5, D_arr_0p5, size=1) +random_st.beta(D_arr_like_0p5, 0.5) +random_st.beta(0.5, D_arr_like_0p5) +random_st.beta(D_arr_0p5, D_arr_0p5) +random_st.beta(D_arr_like_0p5, D_arr_like_0p5) +random_st.beta(D_arr_0p5, D_arr_0p5, size=1) +random_st.beta(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.f(0.5, 0.5) +random_st.f(0.5, 0.5, size=None) +random_st.f(0.5, 0.5, size=1) +random_st.f(D_arr_0p5, 0.5) +random_st.f(0.5, D_arr_0p5) +random_st.f(D_arr_0p5, 0.5, size=1) +random_st.f(0.5, D_arr_0p5, size=1) +random_st.f(D_arr_like_0p5, 0.5) +random_st.f(0.5, D_arr_like_0p5) +random_st.f(D_arr_0p5, D_arr_0p5) +random_st.f(D_arr_like_0p5, D_arr_like_0p5) +random_st.f(D_arr_0p5, D_arr_0p5, size=1) +random_st.f(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.gamma(0.5, 0.5) +random_st.gamma(0.5, 0.5, size=None) +random_st.gamma(0.5, 0.5, size=1) +random_st.gamma(D_arr_0p5, 0.5) +random_st.gamma(0.5, D_arr_0p5) +random_st.gamma(D_arr_0p5, 0.5, size=1) +random_st.gamma(0.5, D_arr_0p5, size=1) +random_st.gamma(D_arr_like_0p5, 0.5) +random_st.gamma(0.5, D_arr_like_0p5) +random_st.gamma(D_arr_0p5, D_arr_0p5) +random_st.gamma(D_arr_like_0p5, D_arr_like_0p5) +random_st.gamma(D_arr_0p5, D_arr_0p5, size=1) +random_st.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.gumbel(0.5, 0.5) +random_st.gumbel(0.5, 0.5, size=None) +random_st.gumbel(0.5, 0.5, size=1) +random_st.gumbel(D_arr_0p5, 0.5) +random_st.gumbel(0.5, D_arr_0p5) +random_st.gumbel(D_arr_0p5, 0.5, size=1) +random_st.gumbel(0.5, D_arr_0p5, size=1) +random_st.gumbel(D_arr_like_0p5, 0.5) +random_st.gumbel(0.5, D_arr_like_0p5) +random_st.gumbel(D_arr_0p5, D_arr_0p5) +random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5) +random_st.gumbel(D_arr_0p5, D_arr_0p5, size=1) +random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.laplace(0.5, 0.5) +random_st.laplace(0.5, 0.5, size=None) +random_st.laplace(0.5, 0.5, size=1) +random_st.laplace(D_arr_0p5, 0.5) +random_st.laplace(0.5, D_arr_0p5) +random_st.laplace(D_arr_0p5, 0.5, size=1) +random_st.laplace(0.5, D_arr_0p5, size=1) +random_st.laplace(D_arr_like_0p5, 0.5) +random_st.laplace(0.5, D_arr_like_0p5) +random_st.laplace(D_arr_0p5, D_arr_0p5) +random_st.laplace(D_arr_like_0p5, D_arr_like_0p5) +random_st.laplace(D_arr_0p5, D_arr_0p5, size=1) +random_st.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.logistic(0.5, 0.5) +random_st.logistic(0.5, 0.5, size=None) +random_st.logistic(0.5, 0.5, size=1) +random_st.logistic(D_arr_0p5, 0.5) +random_st.logistic(0.5, D_arr_0p5) +random_st.logistic(D_arr_0p5, 0.5, size=1) +random_st.logistic(0.5, D_arr_0p5, size=1) +random_st.logistic(D_arr_like_0p5, 0.5) +random_st.logistic(0.5, D_arr_like_0p5) +random_st.logistic(D_arr_0p5, D_arr_0p5) +random_st.logistic(D_arr_like_0p5, D_arr_like_0p5) +random_st.logistic(D_arr_0p5, D_arr_0p5, size=1) +random_st.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.lognormal(0.5, 0.5) +random_st.lognormal(0.5, 0.5, size=None) +random_st.lognormal(0.5, 0.5, size=1) +random_st.lognormal(D_arr_0p5, 0.5) +random_st.lognormal(0.5, D_arr_0p5) +random_st.lognormal(D_arr_0p5, 0.5, size=1) +random_st.lognormal(0.5, D_arr_0p5, size=1) +random_st.lognormal(D_arr_like_0p5, 0.5) +random_st.lognormal(0.5, D_arr_like_0p5) +random_st.lognormal(D_arr_0p5, D_arr_0p5) +random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5) +random_st.lognormal(D_arr_0p5, D_arr_0p5, size=1) +random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.noncentral_chisquare(0.5, 0.5) +random_st.noncentral_chisquare(0.5, 0.5, size=None) +random_st.noncentral_chisquare(0.5, 0.5, size=1) +random_st.noncentral_chisquare(D_arr_0p5, 0.5) +random_st.noncentral_chisquare(0.5, D_arr_0p5) +random_st.noncentral_chisquare(D_arr_0p5, 0.5, size=1) +random_st.noncentral_chisquare(0.5, D_arr_0p5, size=1) +random_st.noncentral_chisquare(D_arr_like_0p5, 0.5) +random_st.noncentral_chisquare(0.5, D_arr_like_0p5) +random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5) +random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5) +random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1) +random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.normal(0.5, 0.5) +random_st.normal(0.5, 0.5, size=None) +random_st.normal(0.5, 0.5, size=1) +random_st.normal(D_arr_0p5, 0.5) +random_st.normal(0.5, D_arr_0p5) +random_st.normal(D_arr_0p5, 0.5, size=1) +random_st.normal(0.5, D_arr_0p5, size=1) +random_st.normal(D_arr_like_0p5, 0.5) +random_st.normal(0.5, D_arr_like_0p5) +random_st.normal(D_arr_0p5, D_arr_0p5) +random_st.normal(D_arr_like_0p5, D_arr_like_0p5) +random_st.normal(D_arr_0p5, D_arr_0p5, size=1) +random_st.normal(D_arr_like_0p5, D_arr_like_0p5, size=1) + +random_st.triangular(0.1, 0.5, 0.9) +random_st.triangular(0.1, 0.5, 0.9, size=None) +random_st.triangular(0.1, 0.5, 0.9, size=1) +random_st.triangular(D_arr_0p1, 0.5, 0.9) +random_st.triangular(0.1, D_arr_0p5, 0.9) +random_st.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1) +random_st.triangular(0.1, D_arr_0p5, 0.9, size=1) +random_st.triangular(D_arr_like_0p1, 0.5, D_arr_0p9) +random_st.triangular(0.5, D_arr_like_0p5, 0.9) +random_st.triangular(D_arr_0p1, D_arr_0p5, 0.9) +random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9) +random_st.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1) +random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1) + +random_st.noncentral_f(0.1, 0.5, 0.9) +random_st.noncentral_f(0.1, 0.5, 0.9, size=None) +random_st.noncentral_f(0.1, 0.5, 0.9, size=1) +random_st.noncentral_f(D_arr_0p1, 0.5, 0.9) +random_st.noncentral_f(0.1, D_arr_0p5, 0.9) +random_st.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1) +random_st.noncentral_f(0.1, D_arr_0p5, 0.9, size=1) +random_st.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9) +random_st.noncentral_f(0.5, D_arr_like_0p5, 0.9) +random_st.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9) +random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9) +random_st.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1) +random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1) + +random_st.binomial(10, 0.5) +random_st.binomial(10, 0.5, size=None) +random_st.binomial(10, 0.5, size=1) +random_st.binomial(I_arr_10, 0.5) +random_st.binomial(10, D_arr_0p5) +random_st.binomial(I_arr_10, 0.5, size=1) +random_st.binomial(10, D_arr_0p5, size=1) +random_st.binomial(I_arr_like_10, 0.5) +random_st.binomial(10, D_arr_like_0p5) +random_st.binomial(I_arr_10, D_arr_0p5) +random_st.binomial(I_arr_like_10, D_arr_like_0p5) +random_st.binomial(I_arr_10, D_arr_0p5, size=1) +random_st.binomial(I_arr_like_10, D_arr_like_0p5, size=1) + +random_st.negative_binomial(10, 0.5) +random_st.negative_binomial(10, 0.5, size=None) +random_st.negative_binomial(10, 0.5, size=1) +random_st.negative_binomial(I_arr_10, 0.5) +random_st.negative_binomial(10, D_arr_0p5) +random_st.negative_binomial(I_arr_10, 0.5, size=1) +random_st.negative_binomial(10, D_arr_0p5, size=1) +random_st.negative_binomial(I_arr_like_10, 0.5) +random_st.negative_binomial(10, D_arr_like_0p5) +random_st.negative_binomial(I_arr_10, D_arr_0p5) +random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5) +random_st.negative_binomial(I_arr_10, D_arr_0p5, size=1) +random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1) + +random_st.hypergeometric(20, 20, 10) +random_st.hypergeometric(20, 20, 10, size=None) +random_st.hypergeometric(20, 20, 10, size=1) +random_st.hypergeometric(I_arr_20, 20, 10) +random_st.hypergeometric(20, I_arr_20, 10) +random_st.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1) +random_st.hypergeometric(20, I_arr_20, 10, size=1) +random_st.hypergeometric(I_arr_like_20, 20, I_arr_10) +random_st.hypergeometric(20, I_arr_like_20, 10) +random_st.hypergeometric(I_arr_20, I_arr_20, 10) +random_st.hypergeometric(I_arr_like_20, I_arr_like_20, 10) +random_st.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1) +random_st.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1) + +random_st.randint(0, 100) +random_st.randint(100) +random_st.randint([100]) +random_st.randint(0, [100]) + +random_st.randint(2, dtype=bool) +random_st.randint(0, 2, dtype=bool) +random_st.randint(I_bool_high_open, dtype=bool) +random_st.randint(I_bool_low, I_bool_high_open, dtype=bool) +random_st.randint(0, I_bool_high_open, dtype=bool) + +random_st.randint(2, dtype=np.bool) +random_st.randint(0, 2, dtype=np.bool) +random_st.randint(I_bool_high_open, dtype=np.bool) +random_st.randint(I_bool_low, I_bool_high_open, dtype=np.bool) +random_st.randint(0, I_bool_high_open, dtype=np.bool) + +random_st.randint(256, dtype="u1") +random_st.randint(0, 256, dtype="u1") +random_st.randint(I_u1_high_open, dtype="u1") +random_st.randint(I_u1_low, I_u1_high_open, dtype="u1") +random_st.randint(0, I_u1_high_open, dtype="u1") + +random_st.randint(256, dtype="uint8") +random_st.randint(0, 256, dtype="uint8") +random_st.randint(I_u1_high_open, dtype="uint8") +random_st.randint(I_u1_low, I_u1_high_open, dtype="uint8") +random_st.randint(0, I_u1_high_open, dtype="uint8") + +random_st.randint(256, dtype=np.uint8) +random_st.randint(0, 256, dtype=np.uint8) +random_st.randint(I_u1_high_open, dtype=np.uint8) +random_st.randint(I_u1_low, I_u1_high_open, dtype=np.uint8) +random_st.randint(0, I_u1_high_open, dtype=np.uint8) + +random_st.randint(65536, dtype="u2") +random_st.randint(0, 65536, dtype="u2") +random_st.randint(I_u2_high_open, dtype="u2") +random_st.randint(I_u2_low, I_u2_high_open, dtype="u2") +random_st.randint(0, I_u2_high_open, dtype="u2") + +random_st.randint(65536, dtype="uint16") +random_st.randint(0, 65536, dtype="uint16") +random_st.randint(I_u2_high_open, dtype="uint16") +random_st.randint(I_u2_low, I_u2_high_open, dtype="uint16") +random_st.randint(0, I_u2_high_open, dtype="uint16") + +random_st.randint(65536, dtype=np.uint16) +random_st.randint(0, 65536, dtype=np.uint16) +random_st.randint(I_u2_high_open, dtype=np.uint16) +random_st.randint(I_u2_low, I_u2_high_open, dtype=np.uint16) +random_st.randint(0, I_u2_high_open, dtype=np.uint16) + +random_st.randint(4294967296, dtype="u4") +random_st.randint(0, 4294967296, dtype="u4") +random_st.randint(I_u4_high_open, dtype="u4") +random_st.randint(I_u4_low, I_u4_high_open, dtype="u4") +random_st.randint(0, I_u4_high_open, dtype="u4") + +random_st.randint(4294967296, dtype="uint32") +random_st.randint(0, 4294967296, dtype="uint32") +random_st.randint(I_u4_high_open, dtype="uint32") +random_st.randint(I_u4_low, I_u4_high_open, dtype="uint32") +random_st.randint(0, I_u4_high_open, dtype="uint32") + +random_st.randint(4294967296, dtype=np.uint32) +random_st.randint(0, 4294967296, dtype=np.uint32) +random_st.randint(I_u4_high_open, dtype=np.uint32) +random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint32) +random_st.randint(0, I_u4_high_open, dtype=np.uint32) + + +random_st.randint(18446744073709551616, dtype="u8") +random_st.randint(0, 18446744073709551616, dtype="u8") +random_st.randint(I_u8_high_open, dtype="u8") +random_st.randint(I_u8_low, I_u8_high_open, dtype="u8") +random_st.randint(0, I_u8_high_open, dtype="u8") + +random_st.randint(18446744073709551616, dtype="uint64") +random_st.randint(0, 18446744073709551616, dtype="uint64") +random_st.randint(I_u8_high_open, dtype="uint64") +random_st.randint(I_u8_low, I_u8_high_open, dtype="uint64") +random_st.randint(0, I_u8_high_open, dtype="uint64") + +random_st.randint(18446744073709551616, dtype=np.uint64) +random_st.randint(0, 18446744073709551616, dtype=np.uint64) +random_st.randint(I_u8_high_open, dtype=np.uint64) +random_st.randint(I_u8_low, I_u8_high_open, dtype=np.uint64) +random_st.randint(0, I_u8_high_open, dtype=np.uint64) + +random_st.randint(128, dtype="i1") +random_st.randint(-128, 128, dtype="i1") +random_st.randint(I_i1_high_open, dtype="i1") +random_st.randint(I_i1_low, I_i1_high_open, dtype="i1") +random_st.randint(-128, I_i1_high_open, dtype="i1") + +random_st.randint(128, dtype="int8") +random_st.randint(-128, 128, dtype="int8") +random_st.randint(I_i1_high_open, dtype="int8") +random_st.randint(I_i1_low, I_i1_high_open, dtype="int8") +random_st.randint(-128, I_i1_high_open, dtype="int8") + +random_st.randint(128, dtype=np.int8) +random_st.randint(-128, 128, dtype=np.int8) +random_st.randint(I_i1_high_open, dtype=np.int8) +random_st.randint(I_i1_low, I_i1_high_open, dtype=np.int8) +random_st.randint(-128, I_i1_high_open, dtype=np.int8) + +random_st.randint(32768, dtype="i2") +random_st.randint(-32768, 32768, dtype="i2") +random_st.randint(I_i2_high_open, dtype="i2") +random_st.randint(I_i2_low, I_i2_high_open, dtype="i2") +random_st.randint(-32768, I_i2_high_open, dtype="i2") +random_st.randint(32768, dtype="int16") +random_st.randint(-32768, 32768, dtype="int16") +random_st.randint(I_i2_high_open, dtype="int16") +random_st.randint(I_i2_low, I_i2_high_open, dtype="int16") +random_st.randint(-32768, I_i2_high_open, dtype="int16") +random_st.randint(32768, dtype=np.int16) +random_st.randint(-32768, 32768, dtype=np.int16) +random_st.randint(I_i2_high_open, dtype=np.int16) +random_st.randint(I_i2_low, I_i2_high_open, dtype=np.int16) +random_st.randint(-32768, I_i2_high_open, dtype=np.int16) + +random_st.randint(2147483648, dtype="i4") +random_st.randint(-2147483648, 2147483648, dtype="i4") +random_st.randint(I_i4_high_open, dtype="i4") +random_st.randint(I_i4_low, I_i4_high_open, dtype="i4") +random_st.randint(-2147483648, I_i4_high_open, dtype="i4") + +random_st.randint(2147483648, dtype="int32") +random_st.randint(-2147483648, 2147483648, dtype="int32") +random_st.randint(I_i4_high_open, dtype="int32") +random_st.randint(I_i4_low, I_i4_high_open, dtype="int32") +random_st.randint(-2147483648, I_i4_high_open, dtype="int32") + +random_st.randint(2147483648, dtype=np.int32) +random_st.randint(-2147483648, 2147483648, dtype=np.int32) +random_st.randint(I_i4_high_open, dtype=np.int32) +random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int32) +random_st.randint(-2147483648, I_i4_high_open, dtype=np.int32) + +random_st.randint(9223372036854775808, dtype="i8") +random_st.randint(-9223372036854775808, 9223372036854775808, dtype="i8") +random_st.randint(I_i8_high_open, dtype="i8") +random_st.randint(I_i8_low, I_i8_high_open, dtype="i8") +random_st.randint(-9223372036854775808, I_i8_high_open, dtype="i8") + +random_st.randint(9223372036854775808, dtype="int64") +random_st.randint(-9223372036854775808, 9223372036854775808, dtype="int64") +random_st.randint(I_i8_high_open, dtype="int64") +random_st.randint(I_i8_low, I_i8_high_open, dtype="int64") +random_st.randint(-9223372036854775808, I_i8_high_open, dtype="int64") + +random_st.randint(9223372036854775808, dtype=np.int64) +random_st.randint(-9223372036854775808, 9223372036854775808, dtype=np.int64) +random_st.randint(I_i8_high_open, dtype=np.int64) +random_st.randint(I_i8_low, I_i8_high_open, dtype=np.int64) +random_st.randint(-9223372036854775808, I_i8_high_open, dtype=np.int64) + +bg: np.random.BitGenerator = random_st._bit_generator + +random_st.bytes(2) + +random_st.choice(5) +random_st.choice(5, 3) +random_st.choice(5, 3, replace=True) +random_st.choice(5, 3, p=[1 / 5] * 5) +random_st.choice(5, 3, p=[1 / 5] * 5, replace=False) + +random_st.choice(["pooh", "rabbit", "piglet", "Christopher"]) +random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3) +random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4) +random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True) +random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4])) + +random_st.dirichlet([0.5, 0.5]) +random_st.dirichlet(np.array([0.5, 0.5])) +random_st.dirichlet(np.array([0.5, 0.5]), size=3) + +random_st.multinomial(20, [1 / 6.0] * 6) +random_st.multinomial(20, np.array([0.5, 0.5])) +random_st.multinomial(20, [1 / 6.0] * 6, size=2) + +random_st.multivariate_normal([0.0], [[1.0]]) +random_st.multivariate_normal([0.0], np.array([[1.0]])) +random_st.multivariate_normal(np.array([0.0]), [[1.0]]) +random_st.multivariate_normal([0.0], np.array([[1.0]])) + +random_st.permutation(10) +random_st.permutation([1, 2, 3, 4]) +random_st.permutation(np.array([1, 2, 3, 4])) +random_st.permutation(D_2D) + +random_st.shuffle(np.arange(10)) +random_st.shuffle([1, 2, 3, 4, 5]) +random_st.shuffle(D_2D) + +np.random.RandomState(SEED_PCG64) +np.random.RandomState(0) +np.random.RandomState([0, 1, 2]) +random_st.__str__() +random_st.__repr__() +random_st_state = random_st.__getstate__() +random_st.__setstate__(random_st_state) +random_st.seed() +random_st.seed(1) +random_st.seed([0, 1]) +random_st_get_state = random_st.get_state() +random_st_get_state_legacy = random_st.get_state(legacy=True) +random_st.set_state(random_st_get_state) + +random_st.rand() +random_st.rand(1) +random_st.rand(1, 2) +random_st.randn() +random_st.randn(1) +random_st.randn(1, 2) +random_st.random_sample() +random_st.random_sample(1) +random_st.random_sample(size=(1, 2)) + +random_st.tomaxint() +random_st.tomaxint(1) +random_st.tomaxint((1,)) + +np.random.mtrand.set_bit_generator(SEED_PCG64) +np.random.mtrand.get_bit_generator() diff --git a/python/numpy/typing/tests/data/pass/recfunctions.py b/python/numpy/typing/tests/data/pass/recfunctions.py new file mode 100644 index 000000000..52a3d78a7 --- /dev/null +++ b/python/numpy/typing/tests/data/pass/recfunctions.py @@ -0,0 +1,161 @@ +"""These tests are based on the doctests from `numpy/lib/recfunctions.py`.""" + +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt +from numpy.lib import recfunctions as rfn + + +def test_recursive_fill_fields() -> None: + a: npt.NDArray[np.void] = np.array( + [(1, 10.0), (2, 20.0)], + dtype=[("A", np.int64), ("B", np.float64)], + ) + b = np.zeros((int(3),), dtype=a.dtype) + out = rfn.recursive_fill_fields(a, b) + assert_type(out, np.ndarray[tuple[int], np.dtype[np.void]]) + + +def test_get_names() -> None: + names: tuple[str | Any, ...] + names = rfn.get_names(np.empty((1,), dtype=[("A", int)]).dtype) + names = rfn.get_names(np.empty((1,), dtype=[("A", int), ("B", float)]).dtype) + + adtype = np.dtype([("a", int), ("b", [("b_a", int), ("b_b", int)])]) + names = rfn.get_names(adtype) + + +def test_get_names_flat() -> None: + names: tuple[str, ...] + names = rfn.get_names_flat(np.empty((1,), dtype=[("A", int)]).dtype) + names = rfn.get_names_flat(np.empty((1,), dtype=[("A", int), ("B", float)]).dtype) + + adtype = np.dtype([("a", int), ("b", [("b_a", int), ("b_b", int)])]) + names = rfn.get_names_flat(adtype) + + +def test_flatten_descr() -> None: + ndtype = np.dtype([("a", " None: + ndtype = np.dtype([ + ("A", int), + ("B", [("B_A", int), ("B_B", [("B_B_A", int), ("B_B_B", int)])]), + ]) + assert_type(rfn.get_fieldstructure(ndtype), dict[str, list[str]]) + + +def test_merge_arrays() -> None: + assert_type( + rfn.merge_arrays(( + np.ones((int(2),), np.int_), + np.ones((int(3),), np.float64), + )), + np.recarray[tuple[int], np.dtype[np.void]], + ) + + +def test_drop_fields() -> None: + ndtype = [("a", np.int64), ("b", [("b_a", np.double), ("b_b", np.int64)])] + a = np.ones((int(3),), dtype=ndtype) + + assert_type( + rfn.drop_fields(a, "a"), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + assert_type( + rfn.drop_fields(a, "a", asrecarray=True), + np.rec.recarray[tuple[int], np.dtype[np.void]], + ) + assert_type( + rfn.rec_drop_fields(a, "a"), + np.rec.recarray[tuple[int], np.dtype[np.void]], + ) + + +def test_rename_fields() -> None: + ndtype = [("a", np.int64), ("b", [("b_a", np.double), ("b_b", np.int64)])] + a = np.ones((int(3),), dtype=ndtype) + + assert_type( + rfn.rename_fields(a, {"a": "A", "b_b": "B_B"}), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + + +def test_repack_fields() -> None: + dt: np.dtype[np.void] = np.dtype("u1, None: + a = np.zeros(4, dtype=[("a", "i4"), ("b", "f4,u2"), ("c", "f4", 2)]) + assert_type(rfn.structured_to_unstructured(a), npt.NDArray[Any]) + + +def unstructured_to_structured() -> None: + dt: np.dtype[np.void] = np.dtype([("a", "i4"), ("b", "f4,u2"), ("c", "f4", 2)]) + a = np.arange(20, dtype=np.int32).reshape((4, 5)) + assert_type(rfn.unstructured_to_structured(a, dt), npt.NDArray[np.void]) + + +def test_apply_along_fields() -> None: + b = np.ones(4, dtype=[("x", "i4"), ("y", "f4"), ("z", "f8")]) + assert_type( + rfn.apply_along_fields(np.mean, b), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + + +def test_assign_fields_by_name() -> None: + b = np.ones(4, dtype=[("x", "i4"), ("y", "f4"), ("z", "f8")]) + assert_type( + rfn.apply_along_fields(np.mean, b), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + + +def test_require_fields() -> None: + a = np.ones(4, dtype=[("a", "i4"), ("b", "f8"), ("c", "u1")]) + assert_type( + rfn.require_fields(a, [("b", "f4"), ("c", "u1")]), + np.ndarray[tuple[int], np.dtype[np.void]], + ) + + +def test_stack_arrays() -> None: + x = np.zeros((int(2),), np.int32) + assert_type( + rfn.stack_arrays(x), + np.ndarray[tuple[int], np.dtype[np.int32]], + ) + + z = np.ones((int(2),), [("A", "|S3"), ("B", float)]) + zz = np.ones((int(2),), [("A", "|S3"), ("B", np.float64), ("C", np.float64)]) + assert_type( + rfn.stack_arrays((z, zz)), + np.ma.MaskedArray[tuple[Any, ...], np.dtype[np.void]], + ) + + +def test_find_duplicates() -> None: + ndtype = np.dtype([("a", int)]) + + a = np.ma.ones(7, mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) + assert_type(rfn.find_duplicates(a), np.ma.MaskedArray[Any, np.dtype[np.void]]) + assert_type( + rfn.find_duplicates(a, ignoremask=True, return_index=True), + tuple[ + np.ma.MaskedArray[Any, np.dtype[np.void]], + np.ndarray[Any, np.dtype[np.int_]], + ], + ) diff --git a/python/numpy/typing/tests/data/pass/scalars.py b/python/numpy/typing/tests/data/pass/scalars.py new file mode 100644 index 000000000..655903a50 --- /dev/null +++ b/python/numpy/typing/tests/data/pass/scalars.py @@ -0,0 +1,248 @@ +import datetime as dt + +import pytest +import numpy as np + +b = np.bool() +b_ = np.bool_() +u8 = np.uint64() +i8 = np.int64() +f8 = np.float64() +c16 = np.complex128() +U = np.str_() +S = np.bytes_() + + +# Construction +class D: + def __index__(self) -> int: + return 0 + + +class C: + def __complex__(self) -> complex: + return 3j + + +class B: + def __int__(self) -> int: + return 4 + + +class A: + def __float__(self) -> float: + return 4.0 + + +np.complex64(3j) +np.complex64(A()) +np.complex64(C()) +np.complex128(3j) +np.complex128(C()) +np.complex128(None) +np.complex64("1.2") +np.complex128(b"2j") + +np.int8(4) +np.int16(3.4) +np.int32(4) +np.int64(-1) +np.uint8(B()) +np.uint32() +np.int32("1") +np.int64(b"2") + +np.float16(A()) +np.float32(16) +np.float64(3.0) +np.float64(None) +np.float32("1") +np.float16(b"2.5") + +np.uint64(D()) +np.float32(D()) +np.complex64(D()) + +np.bytes_(b"hello") +np.bytes_("hello", 'utf-8') +np.bytes_("hello", encoding='utf-8') +np.str_("hello") +np.str_(b"hello", 'utf-8') +np.str_(b"hello", encoding='utf-8') + +# Array-ish semantics +np.int8().real +np.int16().imag +np.int32().data +np.int64().flags + +np.uint8().itemsize * 2 +np.uint16().ndim + 1 +np.uint32().strides +np.uint64().shape + +# Time structures +np.datetime64() +np.datetime64(0, "D") +np.datetime64(0, b"D") +np.datetime64(0, ('ms', 3)) +np.datetime64("2019") +np.datetime64(b"2019") +np.datetime64("2019", "D") +np.datetime64("2019", "us") +np.datetime64("2019", "as") +np.datetime64(np.datetime64()) +np.datetime64(np.datetime64()) +np.datetime64(dt.datetime(2000, 5, 3)) +np.datetime64(dt.datetime(2000, 5, 3), "D") +np.datetime64(dt.datetime(2000, 5, 3), "us") +np.datetime64(dt.datetime(2000, 5, 3), "as") +np.datetime64(dt.date(2000, 5, 3)) +np.datetime64(dt.date(2000, 5, 3), "D") +np.datetime64(dt.date(2000, 5, 3), "us") +np.datetime64(dt.date(2000, 5, 3), "as") +np.datetime64(None) +np.datetime64(None, "D") + +np.timedelta64() +np.timedelta64(0) +np.timedelta64(0, "D") +np.timedelta64(0, ('ms', 3)) +np.timedelta64(0, b"D") +np.timedelta64("3") +np.timedelta64(b"5") +np.timedelta64(np.timedelta64(2)) +np.timedelta64(dt.timedelta(2)) +np.timedelta64(None) +np.timedelta64(None, "D") + +np.void(1) +np.void(np.int64(1)) +np.void(True) +np.void(np.bool(True)) +np.void(b"test") +np.void(np.bytes_("test")) +np.void(object(), [("a", "O"), ("b", "O")]) +np.void(object(), dtype=[("a", "O"), ("b", "O")]) + +# Protocols +i8 = np.int64() +u8 = np.uint64() +f8 = np.float64() +c16 = np.complex128() +b = np.bool() +td = np.timedelta64() +U = np.str_("1") +S = np.bytes_("1") +AR = np.array(1, dtype=np.float64) + +int(i8) +int(u8) +int(f8) +int(b) +int(td) +int(U) +int(S) +int(AR) +with pytest.warns(np.exceptions.ComplexWarning): + int(c16) + +float(i8) +float(u8) +float(f8) +float(b_) +float(td) +float(U) +float(S) +float(AR) +with pytest.warns(np.exceptions.ComplexWarning): + float(c16) + +complex(i8) +complex(u8) +complex(f8) +complex(c16) +complex(b_) +complex(td) +complex(U) +complex(AR) + + +# Misc +c16.dtype +c16.real +c16.imag +c16.real.real +c16.real.imag +c16.ndim +c16.size +c16.itemsize +c16.shape +c16.strides +c16.squeeze() +c16.byteswap() +c16.transpose() + +# Aliases +np.byte() +np.short() +np.intc() +np.intp() +np.int_() +np.longlong() + +np.ubyte() +np.ushort() +np.uintc() +np.uintp() +np.uint() +np.ulonglong() + +np.half() +np.single() +np.double() +np.longdouble() + +np.csingle() +np.cdouble() +np.clongdouble() + +b.item() +i8.item() +u8.item() +f8.item() +c16.item() +U.item() +S.item() + +b.tolist() +i8.tolist() +u8.tolist() +f8.tolist() +c16.tolist() +U.tolist() +S.tolist() + +b.ravel() +i8.ravel() +u8.ravel() +f8.ravel() +c16.ravel() +U.ravel() +S.ravel() + +b.flatten() +i8.flatten() +u8.flatten() +f8.flatten() +c16.flatten() +U.flatten() +S.flatten() + +b.reshape(1) +i8.reshape(1) +u8.reshape(1) +f8.reshape(1) +c16.reshape(1) +U.reshape(1) +S.reshape(1) diff --git a/python/numpy/typing/tests/data/pass/shape.py b/python/numpy/typing/tests/data/pass/shape.py new file mode 100644 index 000000000..286c8a81d --- /dev/null +++ b/python/numpy/typing/tests/data/pass/shape.py @@ -0,0 +1,19 @@ +from typing import Any, NamedTuple, cast + +import numpy as np + + +# Subtype of tuple[int, int] +class XYGrid(NamedTuple): + x_axis: int + y_axis: int + +# Test variance of _ShapeT_co +def accepts_2d(a: np.ndarray[tuple[int, int], Any]) -> None: + return None + + +accepts_2d(np.empty(XYGrid(2, 2))) +accepts_2d(np.zeros(XYGrid(2, 2), dtype=int)) +accepts_2d(np.ones(XYGrid(2, 2), dtype=int)) +accepts_2d(np.full(XYGrid(2, 2), fill_value=5, dtype=int)) diff --git a/python/numpy/typing/tests/data/pass/simple.py b/python/numpy/typing/tests/data/pass/simple.py new file mode 100644 index 000000000..8f44e6e76 --- /dev/null +++ b/python/numpy/typing/tests/data/pass/simple.py @@ -0,0 +1,168 @@ +"""Simple expression that should pass with mypy.""" +import operator + +import numpy as np +import numpy.typing as npt +from collections.abc import Iterable + +# Basic checks +array = np.array([1, 2]) + + +def ndarray_func(x: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]: + return x + + +ndarray_func(np.array([1, 2], dtype=np.float64)) +array == 1 +array.dtype == float + +# Dtype construction +np.dtype(float) +np.dtype(np.float64) +np.dtype(None) +np.dtype("float64") +np.dtype(np.dtype(float)) +np.dtype(("U", 10)) +np.dtype((np.int32, (2, 2))) +# Define the arguments on the previous line to prevent bidirectional +# type inference in mypy from broadening the types. +two_tuples_dtype = [("R", "u1"), ("G", "u1"), ("B", "u1")] +np.dtype(two_tuples_dtype) + +three_tuples_dtype = [("R", "u1", 2)] +np.dtype(three_tuples_dtype) + +mixed_tuples_dtype = [("R", "u1"), ("G", np.str_, 1)] +np.dtype(mixed_tuples_dtype) + +shape_tuple_dtype = [("R", "u1", (2, 2))] +np.dtype(shape_tuple_dtype) + +shape_like_dtype = [("R", "u1", (2, 2)), ("G", np.str_, 1)] +np.dtype(shape_like_dtype) + +object_dtype = [("field1", object)] +np.dtype(object_dtype) + +np.dtype((np.int32, (np.int8, 4))) + +# Dtype comparison +np.dtype(float) == float +np.dtype(float) != np.float64 +np.dtype(float) < None +np.dtype(float) <= "float64" +np.dtype(float) > np.dtype(float) +np.dtype(float) >= np.dtype(("U", 10)) + +# Iteration and indexing +def iterable_func(x: Iterable[object]) -> Iterable[object]: + return x + + +iterable_func(array) +list(array) +iter(array) +zip(array, array) +array[1] +array[:] +array[...] +array[:] = 0 + +array_2d = np.ones((3, 3)) +array_2d[:2, :2] +array_2d[:2, :2] = 0 +array_2d[..., 0] +array_2d[..., 0] = 2 +array_2d[-1, -1] = None + +array_obj = np.zeros(1, dtype=np.object_) +array_obj[0] = slice(None) + +# Other special methods +len(array) +str(array) +array_scalar = np.array(1) +int(array_scalar) +float(array_scalar) +complex(array_scalar) +bytes(array_scalar) +operator.index(array_scalar) +bool(array_scalar) + +# comparisons +array < 1 +array <= 1 +array == 1 +array != 1 +array > 1 +array >= 1 +1 < array +1 <= array +1 == array +1 != array +1 > array +1 >= array + +# binary arithmetic +array + 1 +1 + array +array += 1 + +array - 1 +1 - array +array -= 1 + +array * 1 +1 * array +array *= 1 + +nonzero_array = np.array([1, 2]) +array / 1 +1 / nonzero_array +float_array = np.array([1.0, 2.0]) +float_array /= 1 + +array // 1 +1 // nonzero_array +array //= 1 + +array % 1 +1 % nonzero_array +array %= 1 + +divmod(array, 1) +divmod(1, nonzero_array) + +array ** 1 +1 ** array +array **= 1 + +array << 1 +1 << array +array <<= 1 + +array >> 1 +1 >> array +array >>= 1 + +array & 1 +1 & array +array &= 1 + +array ^ 1 +1 ^ array +array ^= 1 + +array | 1 +1 | array +array |= 1 + +# unary arithmetic +-array ++array +abs(array) +~array + +# Other methods +np.array([1, 2]).transpose() diff --git a/python/numpy/typing/tests/data/pass/simple_py3.py b/python/numpy/typing/tests/data/pass/simple_py3.py new file mode 100644 index 000000000..c05a1ce61 --- /dev/null +++ b/python/numpy/typing/tests/data/pass/simple_py3.py @@ -0,0 +1,6 @@ +import numpy as np + +array = np.array([1, 2]) + +# The @ operator is not in python 2 +array @ array diff --git a/python/numpy/typing/tests/data/pass/ufunc_config.py b/python/numpy/typing/tests/data/pass/ufunc_config.py new file mode 100644 index 000000000..778e1b57f --- /dev/null +++ b/python/numpy/typing/tests/data/pass/ufunc_config.py @@ -0,0 +1,64 @@ +"""Typing tests for `numpy._core._ufunc_config`.""" + +import numpy as np + + +def func1(a: str, b: int) -> None: + return None + + +def func2(a: str, b: int, c: float = 1.0) -> None: + return None + + +def func3(a: str, b: int) -> int: + return 0 + + +class Write1: + def write(self, a: str) -> None: + return None + + +class Write2: + def write(self, a: str, b: int = 1) -> None: + return None + + +class Write3: + def write(self, a: str) -> int: + return 0 + + +_err_default = np.geterr() +_bufsize_default = np.getbufsize() +_errcall_default = np.geterrcall() + +try: + np.seterr(all=None) + np.seterr(divide="ignore") + np.seterr(over="warn") + np.seterr(under="call") + np.seterr(invalid="raise") + np.geterr() + + np.setbufsize(4096) + np.getbufsize() + + np.seterrcall(func1) + np.seterrcall(func2) + np.seterrcall(func3) + np.seterrcall(Write1()) + np.seterrcall(Write2()) + np.seterrcall(Write3()) + np.geterrcall() + + with np.errstate(call=func1, all="call"): + pass + with np.errstate(call=Write1(), divide="log", over="log"): + pass + +finally: + np.seterr(**_err_default) + np.setbufsize(_bufsize_default) + np.seterrcall(_errcall_default) diff --git a/python/numpy/typing/tests/data/pass/ufunclike.py b/python/numpy/typing/tests/data/pass/ufunclike.py new file mode 100644 index 000000000..f993939dd --- /dev/null +++ b/python/numpy/typing/tests/data/pass/ufunclike.py @@ -0,0 +1,47 @@ +from __future__ import annotations +from typing import Any +import numpy as np + + +class Object: + def __ceil__(self) -> Object: + return self + + def __floor__(self) -> Object: + return self + + def __ge__(self, value: object) -> bool: + return True + + def __array__(self, dtype: np.typing.DTypeLike | None = None, + copy: bool | None = None) -> np.ndarray[Any, np.dtype[np.object_]]: + ret = np.empty((), dtype=object) + ret[()] = self + return ret + + +AR_LIKE_b = [True, True, False] +AR_LIKE_u = [np.uint32(1), np.uint32(2), np.uint32(3)] +AR_LIKE_i = [1, 2, 3] +AR_LIKE_f = [1.0, 2.0, 3.0] +AR_LIKE_O = [Object(), Object(), Object()] +AR_U: np.ndarray[Any, np.dtype[np.str_]] = np.zeros(3, dtype="U5") + +np.fix(AR_LIKE_b) +np.fix(AR_LIKE_u) +np.fix(AR_LIKE_i) +np.fix(AR_LIKE_f) +np.fix(AR_LIKE_O) +np.fix(AR_LIKE_f, out=AR_U) + +np.isposinf(AR_LIKE_b) +np.isposinf(AR_LIKE_u) +np.isposinf(AR_LIKE_i) +np.isposinf(AR_LIKE_f) +np.isposinf(AR_LIKE_f, out=AR_U) + +np.isneginf(AR_LIKE_b) +np.isneginf(AR_LIKE_u) +np.isneginf(AR_LIKE_i) +np.isneginf(AR_LIKE_f) +np.isneginf(AR_LIKE_f, out=AR_U) diff --git a/python/numpy/typing/tests/data/pass/ufuncs.py b/python/numpy/typing/tests/data/pass/ufuncs.py new file mode 100644 index 000000000..dbc61bb0b --- /dev/null +++ b/python/numpy/typing/tests/data/pass/ufuncs.py @@ -0,0 +1,16 @@ +import numpy as np + +np.sin(1) +np.sin([1, 2, 3]) +np.sin(1, out=np.empty(1)) +np.matmul(np.ones((2, 2, 2)), np.ones((2, 2, 2)), axes=[(0, 1), (0, 1), (0, 1)]) +np.sin(1, signature="D->D") +# NOTE: `np.generic` subclasses are not guaranteed to support addition; +# re-enable this we can infer the exact return type of `np.sin(...)`. +# +# np.sin(1) + np.sin(1) +np.sin.types[0] +np.sin.__name__ +np.sin.__doc__ + +np.abs(np.array([1])) diff --git a/python/numpy/typing/tests/data/pass/warnings_and_errors.py b/python/numpy/typing/tests/data/pass/warnings_and_errors.py new file mode 100644 index 000000000..c351afb08 --- /dev/null +++ b/python/numpy/typing/tests/data/pass/warnings_and_errors.py @@ -0,0 +1,6 @@ +import numpy.exceptions as ex + +ex.AxisError("test") +ex.AxisError(1, ndim=2) +ex.AxisError(1, ndim=2, msg_prefix="error") +ex.AxisError(1, ndim=2, msg_prefix=None) diff --git a/python/numpy/typing/tests/data/reveal/arithmetic.pyi b/python/numpy/typing/tests/data/reveal/arithmetic.pyi new file mode 100644 index 000000000..763ff3914 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/arithmetic.pyi @@ -0,0 +1,720 @@ +import datetime as dt +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt +from numpy._typing import _64Bit, _128Bit + +b: bool +c: complex +f: float +i: int + +c16: np.complex128 +c8: np.complex64 + +# Can't directly import `np.float128` as it is not available on all platforms +f16: np.floating[_128Bit] +f8: np.float64 +f4: np.float32 + +i8: np.int64 +i4: np.int32 + +u8: np.uint64 +u4: np.uint32 + +b_: np.bool + +M8: np.datetime64 +M8_none: np.datetime64[None] +M8_date: np.datetime64[dt.date] +M8_time: np.datetime64[dt.datetime] +M8_int: np.datetime64[int] +date: dt.date +time: dt.datetime + +m8: np.timedelta64 +m8_none: np.timedelta64[None] +m8_int: np.timedelta64[int] +m8_delta: np.timedelta64[dt.timedelta] +delta: dt.timedelta + +AR_b: npt.NDArray[np.bool] +AR_u: npt.NDArray[np.uint32] +AR_i: npt.NDArray[np.int64] +AR_f: npt.NDArray[np.float64] +AR_c: npt.NDArray[np.complex128] +AR_m: npt.NDArray[np.timedelta64] +AR_M: npt.NDArray[np.datetime64] +AR_O: npt.NDArray[np.object_] +AR_S: npt.NDArray[np.bytes_] +AR_U: npt.NDArray[np.str_] +AR_T: np.ndarray[tuple[Any, ...], np.dtypes.StringDType] +AR_floating: npt.NDArray[np.floating] +AR_number: npt.NDArray[np.number] +AR_Any: npt.NDArray[Any] + +AR_LIKE_b: list[bool] +AR_LIKE_u: list[np.uint32] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_c: list[complex] +AR_LIKE_m: list[np.timedelta64] +AR_LIKE_M: list[np.datetime64] +AR_LIKE_O: list[np.object_] + + +# Array subtraction + +assert_type(AR_number - AR_number, npt.NDArray[np.number]) + +assert_type(AR_b - AR_LIKE_u, npt.NDArray[np.uint32]) +assert_type(AR_b - AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_b - AR_LIKE_f, npt.NDArray[np.floating]) +assert_type(AR_b - AR_LIKE_c, npt.NDArray[np.complexfloating]) +assert_type(AR_b - AR_LIKE_m, npt.NDArray[np.timedelta64]) +assert_type(AR_b - AR_LIKE_O, Any) + +assert_type(AR_LIKE_u - AR_b, npt.NDArray[np.uint32]) +assert_type(AR_LIKE_i - AR_b, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f - AR_b, npt.NDArray[np.floating]) +assert_type(AR_LIKE_c - AR_b, npt.NDArray[np.complexfloating]) +assert_type(AR_LIKE_m - AR_b, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_M - AR_b, npt.NDArray[np.datetime64]) +assert_type(AR_LIKE_O - AR_b, Any) + +assert_type(AR_u - AR_LIKE_b, npt.NDArray[np.uint32]) +assert_type(AR_u - AR_LIKE_u, npt.NDArray[np.unsignedinteger]) +assert_type(AR_u - AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_u - AR_LIKE_f, npt.NDArray[np.floating]) +assert_type(AR_u - AR_LIKE_c, npt.NDArray[np.complexfloating]) +assert_type(AR_u - AR_LIKE_m, npt.NDArray[np.timedelta64]) +assert_type(AR_u - AR_LIKE_O, Any) + +assert_type(AR_LIKE_b - AR_u, npt.NDArray[np.uint32]) +assert_type(AR_LIKE_u - AR_u, npt.NDArray[np.unsignedinteger]) +assert_type(AR_LIKE_i - AR_u, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f - AR_u, npt.NDArray[np.floating]) +assert_type(AR_LIKE_c - AR_u, npt.NDArray[np.complexfloating]) +assert_type(AR_LIKE_m - AR_u, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_M - AR_u, npt.NDArray[np.datetime64]) +assert_type(AR_LIKE_O - AR_u, Any) + +assert_type(AR_i - AR_LIKE_b, npt.NDArray[np.int64]) +assert_type(AR_i - AR_LIKE_u, npt.NDArray[np.signedinteger]) +assert_type(AR_i - AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_i - AR_LIKE_f, npt.NDArray[np.floating]) +assert_type(AR_i - AR_LIKE_c, npt.NDArray[np.complexfloating]) +assert_type(AR_i - AR_LIKE_m, npt.NDArray[np.timedelta64]) +assert_type(AR_i - AR_LIKE_O, Any) + +assert_type(AR_LIKE_b - AR_i, npt.NDArray[np.int64]) +assert_type(AR_LIKE_u - AR_i, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_i - AR_i, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f - AR_i, npt.NDArray[np.floating]) +assert_type(AR_LIKE_c - AR_i, npt.NDArray[np.complexfloating]) +assert_type(AR_LIKE_m - AR_i, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_M - AR_i, npt.NDArray[np.datetime64]) +assert_type(AR_LIKE_O - AR_i, Any) + +assert_type(AR_f - AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_f - AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_f - AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_f - AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_f - AR_LIKE_c, npt.NDArray[np.complexfloating]) +assert_type(AR_f - AR_LIKE_O, Any) + +assert_type(AR_LIKE_b - AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u - AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i - AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f - AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_c - AR_f, npt.NDArray[np.complexfloating]) +assert_type(AR_LIKE_O - AR_f, Any) + +assert_type(AR_c - AR_LIKE_b, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_u, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_i, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_f, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_c, npt.NDArray[np.complex128]) +assert_type(AR_c - AR_LIKE_O, Any) + +assert_type(AR_LIKE_b - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_u - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_i - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_f - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_c - AR_c, npt.NDArray[np.complex128]) +assert_type(AR_LIKE_O - AR_c, Any) + +assert_type(AR_m - AR_LIKE_b, npt.NDArray[np.timedelta64]) +assert_type(AR_m - AR_LIKE_u, npt.NDArray[np.timedelta64]) +assert_type(AR_m - AR_LIKE_i, npt.NDArray[np.timedelta64]) +assert_type(AR_m - AR_LIKE_m, npt.NDArray[np.timedelta64]) +assert_type(AR_m - AR_LIKE_O, Any) + +assert_type(AR_LIKE_b - AR_m, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_u - AR_m, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_i - AR_m, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_m - AR_m, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_M - AR_m, npt.NDArray[np.datetime64]) +assert_type(AR_LIKE_O - AR_m, Any) + +assert_type(AR_M - AR_LIKE_b, npt.NDArray[np.datetime64]) +assert_type(AR_M - AR_LIKE_u, npt.NDArray[np.datetime64]) +assert_type(AR_M - AR_LIKE_i, npt.NDArray[np.datetime64]) +assert_type(AR_M - AR_LIKE_m, npt.NDArray[np.datetime64]) +assert_type(AR_M - AR_LIKE_M, npt.NDArray[np.timedelta64]) +assert_type(AR_M - AR_LIKE_O, Any) + +assert_type(AR_LIKE_M - AR_M, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O - AR_M, Any) + +assert_type(AR_O - AR_LIKE_b, Any) +assert_type(AR_O - AR_LIKE_u, Any) +assert_type(AR_O - AR_LIKE_i, Any) +assert_type(AR_O - AR_LIKE_f, Any) +assert_type(AR_O - AR_LIKE_c, Any) +assert_type(AR_O - AR_LIKE_m, Any) +assert_type(AR_O - AR_LIKE_M, Any) +assert_type(AR_O - AR_LIKE_O, Any) + +assert_type(AR_LIKE_b - AR_O, Any) +assert_type(AR_LIKE_u - AR_O, Any) +assert_type(AR_LIKE_i - AR_O, Any) +assert_type(AR_LIKE_f - AR_O, Any) +assert_type(AR_LIKE_c - AR_O, Any) +assert_type(AR_LIKE_m - AR_O, Any) +assert_type(AR_LIKE_M - AR_O, Any) +assert_type(AR_LIKE_O - AR_O, Any) + +# Array "true" division + +assert_type(AR_f / b, npt.NDArray[np.float64]) +assert_type(AR_f / i, npt.NDArray[np.float64]) +assert_type(AR_f / f, npt.NDArray[np.float64]) + +assert_type(b / AR_f, npt.NDArray[np.float64]) +assert_type(i / AR_f, npt.NDArray[np.float64]) +assert_type(f / AR_f, npt.NDArray[np.float64]) + +assert_type(AR_b / AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_b / AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_b / AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_b / AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_b / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_b, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u / AR_b, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i / AR_b, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f / AR_b, npt.NDArray[np.float64]) +assert_type(AR_LIKE_O / AR_b, Any) + +assert_type(AR_u / AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_u / AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_u / AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_u / AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_u / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_u, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u / AR_u, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i / AR_u, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f / AR_u, npt.NDArray[np.float64]) +assert_type(AR_LIKE_m / AR_u, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O / AR_u, Any) + +assert_type(AR_i / AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_i / AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_i / AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_i / AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_i / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_i, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u / AR_i, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i / AR_i, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f / AR_i, npt.NDArray[np.float64]) +assert_type(AR_LIKE_m / AR_i, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O / AR_i, Any) + +assert_type(AR_f / AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_f / AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_f / AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_f / AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_f / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u / AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i / AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f / AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_m / AR_f, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O / AR_f, Any) + +assert_type(AR_m / AR_LIKE_u, npt.NDArray[np.timedelta64]) +assert_type(AR_m / AR_LIKE_i, npt.NDArray[np.timedelta64]) +assert_type(AR_m / AR_LIKE_f, npt.NDArray[np.timedelta64]) +assert_type(AR_m / AR_LIKE_m, npt.NDArray[np.float64]) +assert_type(AR_m / AR_LIKE_O, Any) + +assert_type(AR_LIKE_m / AR_m, npt.NDArray[np.float64]) +assert_type(AR_LIKE_O / AR_m, Any) + +assert_type(AR_O / AR_LIKE_b, Any) +assert_type(AR_O / AR_LIKE_u, Any) +assert_type(AR_O / AR_LIKE_i, Any) +assert_type(AR_O / AR_LIKE_f, Any) +assert_type(AR_O / AR_LIKE_m, Any) +assert_type(AR_O / AR_LIKE_M, Any) +assert_type(AR_O / AR_LIKE_O, Any) + +assert_type(AR_LIKE_b / AR_O, Any) +assert_type(AR_LIKE_u / AR_O, Any) +assert_type(AR_LIKE_i / AR_O, Any) +assert_type(AR_LIKE_f / AR_O, Any) +assert_type(AR_LIKE_m / AR_O, Any) +assert_type(AR_LIKE_M / AR_O, Any) +assert_type(AR_LIKE_O / AR_O, Any) + +# Array floor division + +assert_type(AR_b // AR_LIKE_b, npt.NDArray[np.int8]) +assert_type(AR_b // AR_LIKE_u, npt.NDArray[np.uint32]) +assert_type(AR_b // AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_b // AR_LIKE_f, npt.NDArray[np.floating]) +assert_type(AR_b // AR_LIKE_O, Any) + +assert_type(AR_LIKE_b // AR_b, npt.NDArray[np.int8]) +assert_type(AR_LIKE_u // AR_b, npt.NDArray[np.uint32]) +assert_type(AR_LIKE_i // AR_b, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f // AR_b, npt.NDArray[np.floating]) +assert_type(AR_LIKE_O // AR_b, Any) + +assert_type(AR_u // AR_LIKE_b, npt.NDArray[np.uint32]) +assert_type(AR_u // AR_LIKE_u, npt.NDArray[np.unsignedinteger]) +assert_type(AR_u // AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_u // AR_LIKE_f, npt.NDArray[np.floating]) +assert_type(AR_u // AR_LIKE_O, Any) + +assert_type(AR_LIKE_b // AR_u, npt.NDArray[np.uint32]) +assert_type(AR_LIKE_u // AR_u, npt.NDArray[np.unsignedinteger]) +assert_type(AR_LIKE_i // AR_u, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f // AR_u, npt.NDArray[np.floating]) +assert_type(AR_LIKE_m // AR_u, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O // AR_u, Any) + +assert_type(AR_i // AR_LIKE_b, npt.NDArray[np.int64]) +assert_type(AR_i // AR_LIKE_u, npt.NDArray[np.signedinteger]) +assert_type(AR_i // AR_LIKE_i, npt.NDArray[np.signedinteger]) +assert_type(AR_i // AR_LIKE_f, npt.NDArray[np.floating]) +assert_type(AR_i // AR_LIKE_O, Any) + +assert_type(AR_LIKE_b // AR_i, npt.NDArray[np.int64]) +assert_type(AR_LIKE_u // AR_i, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_i // AR_i, npt.NDArray[np.signedinteger]) +assert_type(AR_LIKE_f // AR_i, npt.NDArray[np.floating]) +assert_type(AR_LIKE_m // AR_i, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O // AR_i, Any) + +assert_type(AR_f // AR_LIKE_b, npt.NDArray[np.float64]) +assert_type(AR_f // AR_LIKE_u, npt.NDArray[np.float64]) +assert_type(AR_f // AR_LIKE_i, npt.NDArray[np.float64]) +assert_type(AR_f // AR_LIKE_f, npt.NDArray[np.float64]) +assert_type(AR_f // AR_LIKE_O, Any) + +assert_type(AR_LIKE_b // AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_u // AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_i // AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_f // AR_f, npt.NDArray[np.float64]) +assert_type(AR_LIKE_m // AR_f, npt.NDArray[np.timedelta64]) +assert_type(AR_LIKE_O // AR_f, Any) + +assert_type(AR_m // AR_LIKE_u, npt.NDArray[np.timedelta64]) +assert_type(AR_m // AR_LIKE_i, npt.NDArray[np.timedelta64]) +assert_type(AR_m // AR_LIKE_f, npt.NDArray[np.timedelta64]) +assert_type(AR_m // AR_LIKE_m, npt.NDArray[np.int64]) +assert_type(AR_m // AR_LIKE_O, Any) + +assert_type(AR_LIKE_m // AR_m, npt.NDArray[np.int64]) +assert_type(AR_LIKE_O // AR_m, Any) + +assert_type(AR_O // AR_LIKE_b, Any) +assert_type(AR_O // AR_LIKE_u, Any) +assert_type(AR_O // AR_LIKE_i, Any) +assert_type(AR_O // AR_LIKE_f, Any) +assert_type(AR_O // AR_LIKE_m, Any) +assert_type(AR_O // AR_LIKE_M, Any) +assert_type(AR_O // AR_LIKE_O, Any) + +assert_type(AR_LIKE_b // AR_O, Any) +assert_type(AR_LIKE_u // AR_O, Any) +assert_type(AR_LIKE_i // AR_O, Any) +assert_type(AR_LIKE_f // AR_O, Any) +assert_type(AR_LIKE_m // AR_O, Any) +assert_type(AR_LIKE_M // AR_O, Any) +assert_type(AR_LIKE_O // AR_O, Any) + +# unary ops + +assert_type(-f16, np.floating[_128Bit]) +assert_type(-c16, np.complex128) +assert_type(-c8, np.complex64) +assert_type(-f8, np.float64) +assert_type(-f4, np.float32) +assert_type(-i8, np.int64) +assert_type(-i4, np.int32) +assert_type(-u8, np.uint64) +assert_type(-u4, np.uint32) +assert_type(-m8, np.timedelta64) +assert_type(-m8_none, np.timedelta64[None]) +assert_type(-m8_int, np.timedelta64[int]) +assert_type(-m8_delta, np.timedelta64[dt.timedelta]) +assert_type(-AR_f, npt.NDArray[np.float64]) + +assert_type(+f16, np.floating[_128Bit]) +assert_type(+c16, np.complex128) +assert_type(+c8, np.complex64) +assert_type(+f8, np.float64) +assert_type(+f4, np.float32) +assert_type(+i8, np.int64) +assert_type(+i4, np.int32) +assert_type(+u8, np.uint64) +assert_type(+u4, np.uint32) +assert_type(+m8_none, np.timedelta64[None]) +assert_type(+m8_int, np.timedelta64[int]) +assert_type(+m8_delta, np.timedelta64[dt.timedelta]) +assert_type(+AR_f, npt.NDArray[np.float64]) + +assert_type(abs(f16), np.floating[_128Bit]) +assert_type(abs(c16), np.float64) +assert_type(abs(c8), np.float32) +assert_type(abs(f8), np.float64) +assert_type(abs(f4), np.float32) +assert_type(abs(i8), np.int64) +assert_type(abs(i4), np.int32) +assert_type(abs(u8), np.uint64) +assert_type(abs(u4), np.uint32) +assert_type(abs(m8), np.timedelta64) +assert_type(abs(m8_none), np.timedelta64[None]) +assert_type(abs(m8_int), np.timedelta64[int]) +assert_type(abs(m8_delta), np.timedelta64[dt.timedelta]) +assert_type(abs(b_), np.bool) +assert_type(abs(AR_O), npt.NDArray[np.object_]) + +# Time structures + +assert_type(M8 + m8, np.datetime64) +assert_type(M8 + i, np.datetime64) +assert_type(M8 + i8, np.datetime64) +assert_type(M8 - M8, np.timedelta64) +assert_type(M8 - i, np.datetime64) +assert_type(M8 - i8, np.datetime64) + +assert_type(M8_none + m8, np.datetime64[None]) +assert_type(M8_none + i, np.datetime64[None]) +assert_type(M8_none + i8, np.datetime64[None]) +assert_type(M8_none - M8, np.timedelta64[None]) +assert_type(M8_none - m8, np.datetime64[None]) +assert_type(M8_none - i, np.datetime64[None]) +assert_type(M8_none - i8, np.datetime64[None]) + +assert_type(m8 + m8, np.timedelta64) +assert_type(m8 + i, np.timedelta64) +assert_type(m8 + i8, np.timedelta64) +assert_type(m8 - m8, np.timedelta64) +assert_type(m8 - i, np.timedelta64) +assert_type(m8 - i8, np.timedelta64) +assert_type(m8 * f, np.timedelta64) +assert_type(m8 * f4, np.timedelta64) +assert_type(m8 * np.True_, np.timedelta64) +assert_type(m8 / f, np.timedelta64) +assert_type(m8 / f4, np.timedelta64) +assert_type(m8 / m8, np.float64) +assert_type(m8 // m8, np.int64) +assert_type(m8 % m8, np.timedelta64) +assert_type(divmod(m8, m8), tuple[np.int64, np.timedelta64]) + +assert_type(m8_none + m8, np.timedelta64[None]) +assert_type(m8_none + i, np.timedelta64[None]) +assert_type(m8_none + i8, np.timedelta64[None]) +assert_type(m8_none - i, np.timedelta64[None]) +assert_type(m8_none - i8, np.timedelta64[None]) + +assert_type(m8_int + i, np.timedelta64[int]) +assert_type(m8_int + m8_delta, np.timedelta64[int]) +assert_type(m8_int + m8, np.timedelta64[int | None]) +assert_type(m8_int - i, np.timedelta64[int]) +assert_type(m8_int - m8_delta, np.timedelta64[int]) +assert_type(m8_int - m8, np.timedelta64[int | None]) + +assert_type(m8_delta + date, dt.date) +assert_type(m8_delta + time, dt.datetime) +assert_type(m8_delta + delta, dt.timedelta) +assert_type(m8_delta - delta, dt.timedelta) +assert_type(m8_delta / delta, float) +assert_type(m8_delta // delta, int) +assert_type(m8_delta % delta, dt.timedelta) +assert_type(divmod(m8_delta, delta), tuple[int, dt.timedelta]) + +# boolean + +assert_type(b_ / b, np.float64) +assert_type(b_ / b_, np.float64) +assert_type(b_ / i, np.float64) +assert_type(b_ / i8, np.float64) +assert_type(b_ / i4, np.float64) +assert_type(b_ / u8, np.float64) +assert_type(b_ / u4, np.float64) +assert_type(b_ / f, np.float64) +assert_type(b_ / f16, np.floating[_128Bit]) +assert_type(b_ / f8, np.float64) +assert_type(b_ / f4, np.float32) +assert_type(b_ / c, np.complex128) +assert_type(b_ / c16, np.complex128) +assert_type(b_ / c8, np.complex64) + +assert_type(b / b_, np.float64) +assert_type(b_ / b_, np.float64) +assert_type(i / b_, np.float64) +assert_type(i8 / b_, np.float64) +assert_type(i4 / b_, np.float64) +assert_type(u8 / b_, np.float64) +assert_type(u4 / b_, np.float64) +assert_type(f / b_, np.float64) +assert_type(f16 / b_, np.floating[_128Bit]) +assert_type(f8 / b_, np.float64) +assert_type(f4 / b_, np.float32) +assert_type(c / b_, np.complex128) +assert_type(c16 / b_, np.complex128) +assert_type(c8 / b_, np.complex64) + +# Complex + +assert_type(c16 + f16, np.complexfloating) +assert_type(c16 + c16, np.complex128) +assert_type(c16 + f8, np.complex128) +assert_type(c16 + i8, np.complex128) +assert_type(c16 + c8, np.complex128) +assert_type(c16 + f4, np.complex128) +assert_type(c16 + i4, np.complex128) +assert_type(c16 + b_, np.complex128) +assert_type(c16 + b, np.complex128) +assert_type(c16 + c, np.complex128) +assert_type(c16 + f, np.complex128) +assert_type(c16 + AR_f, npt.NDArray[np.complex128]) + +assert_type(f16 + c16, np.complexfloating) +assert_type(c16 + c16, np.complex128) +assert_type(f8 + c16, np.complex128) +assert_type(i8 + c16, np.complex128) +assert_type(c8 + c16, np.complex128 | np.complex64) +assert_type(f4 + c16, np.complexfloating) +assert_type(i4 + c16, np.complex128) +assert_type(b_ + c16, np.complex128) +assert_type(b + c16, np.complex128) +assert_type(c + c16, np.complex128) +assert_type(f + c16, np.complex128) +assert_type(AR_f + c16, npt.NDArray[np.complex128]) + +assert_type(c8 + f16, np.complex64 | np.complexfloating[_128Bit, _128Bit]) +assert_type(c8 + c16, np.complex64 | np.complex128) +assert_type(c8 + f8, np.complex64 | np.complex128) +assert_type(c8 + i8, np.complex64 | np.complexfloating[_64Bit, _64Bit]) +assert_type(c8 + c8, np.complex64) +assert_type(c8 + f4, np.complex64) +assert_type(c8 + i4, np.complex64) +assert_type(c8 + b_, np.complex64) +assert_type(c8 + b, np.complex64) +assert_type(c8 + c, np.complex64 | np.complex128) +assert_type(c8 + f, np.complex64 | np.complex128) +assert_type(c8 + AR_f, npt.NDArray[np.complexfloating]) + +assert_type(f16 + c8, np.complexfloating[_128Bit, _128Bit] | np.complex64) +assert_type(c16 + c8, np.complex128) +assert_type(f8 + c8, np.complexfloating[_64Bit, _64Bit]) +assert_type(i8 + c8, np.complexfloating[_64Bit, _64Bit] | np.complex64) +assert_type(c8 + c8, np.complex64) +assert_type(f4 + c8, np.complex64) +assert_type(i4 + c8, np.complex64) +assert_type(b_ + c8, np.complex64) +assert_type(b + c8, np.complex64) +assert_type(c + c8, np.complex64 | np.complex128) +assert_type(f + c8, np.complex64 | np.complex128) +assert_type(AR_f + c8, npt.NDArray[np.complexfloating]) + +# Float + +assert_type(f8 + f16, np.floating) +assert_type(f8 + f8, np.float64) +assert_type(f8 + i8, np.float64) +assert_type(f8 + f4, np.float64) +assert_type(f8 + i4, np.float64) +assert_type(f8 + b_, np.float64) +assert_type(f8 + b, np.float64) +assert_type(f8 + c, np.float64 | np.complex128) +assert_type(f8 + f, np.float64) +assert_type(f8 + AR_f, npt.NDArray[np.float64]) + +assert_type(f16 + f8, np.floating) +assert_type(f8 + f8, np.float64) +assert_type(i8 + f8, np.float64) +assert_type(f4 + f8, np.floating) +assert_type(i4 + f8, np.float64) +assert_type(b_ + f8, np.float64) +assert_type(b + f8, np.float64) +assert_type(c + f8, np.complex128 | np.float64) +assert_type(f + f8, np.float64) +assert_type(AR_f + f8, npt.NDArray[np.float64]) + +assert_type(f4 + f16, np.floating) +assert_type(f4 + f8, np.floating) +assert_type(f4 + i8, np.floating) +assert_type(f4 + f4, np.float32) +assert_type(f4 + i4, np.floating) +assert_type(f4 + b_, np.float32) +assert_type(f4 + b, np.float32) +assert_type(f4 + c, np.complexfloating) +assert_type(f4 + f, np.float32) +assert_type(f4 + AR_f, npt.NDArray[np.float64]) + +assert_type(f16 + f4, np.floating) +assert_type(f8 + f4, np.float64) +assert_type(i8 + f4, np.floating) +assert_type(f4 + f4, np.float32) +assert_type(i4 + f4, np.floating) +assert_type(b_ + f4, np.float32) +assert_type(b + f4, np.float32) +assert_type(c + f4, np.complexfloating) +assert_type(f + f4, np.float32) +assert_type(AR_f + f4, npt.NDArray[np.float64]) + +# Int + +assert_type(i8 + i8, np.int64) +assert_type(i8 + u8, Any) +assert_type(i8 + i4, np.signedinteger) +assert_type(i8 + u4, Any) +assert_type(i8 + b_, np.int64) +assert_type(i8 + b, np.int64) +assert_type(i8 + c, np.complex128) +assert_type(i8 + f, np.float64) +assert_type(i8 + AR_f, npt.NDArray[np.float64]) + +assert_type(u8 + u8, np.uint64) +assert_type(u8 + i4, Any) +assert_type(u8 + u4, np.unsignedinteger) +assert_type(u8 + b_, np.uint64) +assert_type(u8 + b, np.uint64) +assert_type(u8 + c, np.complex128) +assert_type(u8 + f, np.float64) +assert_type(u8 + AR_f, npt.NDArray[np.float64]) + +assert_type(i8 + i8, np.int64) +assert_type(u8 + i8, Any) +assert_type(i4 + i8, np.signedinteger) +assert_type(u4 + i8, Any) +assert_type(b_ + i8, np.int64) +assert_type(b + i8, np.int64) +assert_type(c + i8, np.complex128) +assert_type(f + i8, np.float64) +assert_type(AR_f + i8, npt.NDArray[np.float64]) + +assert_type(u8 + u8, np.uint64) +assert_type(i4 + u8, Any) +assert_type(u4 + u8, np.unsignedinteger) +assert_type(b_ + u8, np.uint64) +assert_type(b + u8, np.uint64) +assert_type(c + u8, np.complex128) +assert_type(f + u8, np.float64) +assert_type(AR_f + u8, npt.NDArray[np.float64]) + +assert_type(i4 + i8, np.signedinteger) +assert_type(i4 + i4, np.int32) +assert_type(i4 + b_, np.int32) +assert_type(i4 + b, np.int32) +assert_type(i4 + AR_f, npt.NDArray[np.float64]) + +assert_type(u4 + i8, Any) +assert_type(u4 + i4, Any) +assert_type(u4 + u8, np.unsignedinteger) +assert_type(u4 + u4, np.uint32) +assert_type(u4 + b_, np.uint32) +assert_type(u4 + b, np.uint32) +assert_type(u4 + AR_f, npt.NDArray[np.float64]) + +assert_type(i8 + i4, np.signedinteger) +assert_type(i4 + i4, np.int32) +assert_type(b_ + i4, np.int32) +assert_type(b + i4, np.int32) +assert_type(AR_f + i4, npt.NDArray[np.float64]) + +assert_type(i8 + u4, Any) +assert_type(i4 + u4, Any) +assert_type(u8 + u4, np.unsignedinteger) +assert_type(u4 + u4, np.uint32) +assert_type(b_ + u4, np.uint32) +assert_type(b + u4, np.uint32) +assert_type(AR_f + u4, npt.NDArray[np.float64]) + +# Any + +assert_type(AR_Any + 2, npt.NDArray[Any]) + +# regression tests for https://github.com/numpy/numpy/issues/28805 + +assert_type(AR_floating + f, npt.NDArray[np.floating]) +assert_type(AR_floating - f, npt.NDArray[np.floating]) +assert_type(AR_floating * f, npt.NDArray[np.floating]) +assert_type(AR_floating ** f, npt.NDArray[np.floating]) +assert_type(AR_floating / f, npt.NDArray[np.floating]) +assert_type(AR_floating // f, npt.NDArray[np.floating]) +assert_type(AR_floating % f, npt.NDArray[np.floating]) +assert_type(divmod(AR_floating, f), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) + +assert_type(f + AR_floating, npt.NDArray[np.floating]) +assert_type(f - AR_floating, npt.NDArray[np.floating]) +assert_type(f * AR_floating, npt.NDArray[np.floating]) +assert_type(f ** AR_floating, npt.NDArray[np.floating]) +assert_type(f / AR_floating, npt.NDArray[np.floating]) +assert_type(f // AR_floating, npt.NDArray[np.floating]) +assert_type(f % AR_floating, npt.NDArray[np.floating]) +assert_type(divmod(f, AR_floating), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) + +# character-like + +assert_type(AR_S + b"", npt.NDArray[np.bytes_]) +assert_type(AR_S + [b""], npt.NDArray[np.bytes_]) +assert_type([b""] + AR_S, npt.NDArray[np.bytes_]) +assert_type(AR_S + AR_S, npt.NDArray[np.bytes_]) + +assert_type(AR_U + "", npt.NDArray[np.str_]) +assert_type(AR_U + [""], npt.NDArray[np.str_]) +assert_type("" + AR_U, npt.NDArray[np.str_]) +assert_type([""] + AR_U, npt.NDArray[np.str_]) +assert_type(AR_U + AR_U, npt.NDArray[np.str_]) + +assert_type(AR_T + "", np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T + [""], np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type("" + AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type([""] + AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T + AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T + AR_U, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_U + AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) + +assert_type(AR_S * i, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) +assert_type(AR_S * AR_LIKE_i, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) +assert_type(AR_S * AR_i, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) +assert_type(i * AR_S, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) +# mypy incorrectly infers `AR_LIKE_i * AR_S` as `list[int]` +assert_type(AR_i * AR_S, np.ndarray[tuple[Any, ...], np.dtype[np.bytes_]]) + +assert_type(AR_U * i, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) +assert_type(AR_U * AR_LIKE_i, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) +assert_type(AR_U * AR_i, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) +assert_type(i * AR_U, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) +# mypy incorrectly infers `AR_LIKE_i * AR_U` as `list[int]` +assert_type(AR_i * AR_U, np.ndarray[tuple[Any, ...], np.dtype[np.str_]]) + +assert_type(AR_T * i, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T * AR_LIKE_i, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(AR_T * AR_i, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +assert_type(i * AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) +# mypy incorrectly infers `AR_LIKE_i * AR_T` as `list[int]` +assert_type(AR_i * AR_T, np.ndarray[tuple[Any, ...], np.dtypes.StringDType]) diff --git a/python/numpy/typing/tests/data/reveal/array_api_info.pyi b/python/numpy/typing/tests/data/reveal/array_api_info.pyi new file mode 100644 index 000000000..765f9eff5 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/array_api_info.pyi @@ -0,0 +1,70 @@ +from typing import Literal, Never, assert_type + +import numpy as np + +info = np.__array_namespace_info__() + +assert_type(info.__module__, Literal["numpy"]) + +assert_type(info.default_device(), Literal["cpu"]) +assert_type(info.devices()[0], Literal["cpu"]) +assert_type(info.devices()[-1], Literal["cpu"]) + +assert_type(info.capabilities()["boolean indexing"], Literal[True]) +assert_type(info.capabilities()["data-dependent shapes"], Literal[True]) + +assert_type(info.default_dtypes()["real floating"], np.dtype[np.float64]) +assert_type(info.default_dtypes()["complex floating"], np.dtype[np.complex128]) +assert_type(info.default_dtypes()["integral"], np.dtype[np.int_]) +assert_type(info.default_dtypes()["indexing"], np.dtype[np.intp]) + +assert_type(info.dtypes()["bool"], np.dtype[np.bool]) +assert_type(info.dtypes()["int8"], np.dtype[np.int8]) +assert_type(info.dtypes()["uint8"], np.dtype[np.uint8]) +assert_type(info.dtypes()["float32"], np.dtype[np.float32]) +assert_type(info.dtypes()["complex64"], np.dtype[np.complex64]) + +assert_type(info.dtypes(kind="bool")["bool"], np.dtype[np.bool]) +assert_type(info.dtypes(kind="signed integer")["int64"], np.dtype[np.int64]) +assert_type(info.dtypes(kind="unsigned integer")["uint64"], np.dtype[np.uint64]) +assert_type(info.dtypes(kind="integral")["int32"], np.dtype[np.int32]) +assert_type(info.dtypes(kind="integral")["uint32"], np.dtype[np.uint32]) +assert_type(info.dtypes(kind="real floating")["float64"], np.dtype[np.float64]) +assert_type(info.dtypes(kind="complex floating")["complex128"], np.dtype[np.complex128]) +assert_type(info.dtypes(kind="numeric")["int16"], np.dtype[np.int16]) +assert_type(info.dtypes(kind="numeric")["uint16"], np.dtype[np.uint16]) +assert_type(info.dtypes(kind="numeric")["float64"], np.dtype[np.float64]) +assert_type(info.dtypes(kind="numeric")["complex128"], np.dtype[np.complex128]) + +assert_type(info.dtypes(kind=()), dict[Never, Never]) + +assert_type(info.dtypes(kind=("bool",))["bool"], np.dtype[np.bool]) +assert_type(info.dtypes(kind=("signed integer",))["int64"], np.dtype[np.int64]) +assert_type(info.dtypes(kind=("integral",))["uint32"], np.dtype[np.uint32]) +assert_type(info.dtypes(kind=("complex floating",))["complex128"], np.dtype[np.complex128]) +assert_type(info.dtypes(kind=("numeric",))["float64"], np.dtype[np.float64]) + +assert_type( + info.dtypes(kind=("signed integer", "unsigned integer"))["int8"], + np.dtype[np.int8], +) +assert_type( + info.dtypes(kind=("signed integer", "unsigned integer"))["uint8"], + np.dtype[np.uint8], +) +assert_type( + info.dtypes(kind=("integral", "real floating", "complex floating"))["int16"], + np.dtype[np.int16], +) +assert_type( + info.dtypes(kind=("integral", "real floating", "complex floating"))["uint16"], + np.dtype[np.uint16], +) +assert_type( + info.dtypes(kind=("integral", "real floating", "complex floating"))["float32"], + np.dtype[np.float32], +) +assert_type( + info.dtypes(kind=("integral", "real floating", "complex floating"))["complex64"], + np.dtype[np.complex64], +) diff --git a/python/numpy/typing/tests/data/reveal/array_constructors.pyi b/python/numpy/typing/tests/data/reveal/array_constructors.pyi new file mode 100644 index 000000000..49425bb89 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -0,0 +1,249 @@ +import sys +from collections import deque +from pathlib import Path +from typing import Any, TypeVar, assert_type + +import numpy as np +import numpy.typing as npt + +_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) + +class SubClass(npt.NDArray[_ScalarT_co]): ... + +i8: np.int64 + +A: npt.NDArray[np.float64] +B: SubClass[np.float64] +C: list[int] +D: SubClass[np.float64 | np.int64] + +mixed_shape: tuple[int, np.int64] + +def func(i: int, j: int, **kwargs: Any) -> SubClass[np.float64]: ... + +assert_type(np.empty_like(A), npt.NDArray[np.float64]) +assert_type(np.empty_like(B), SubClass[np.float64]) +assert_type(np.empty_like([1, 1.0]), npt.NDArray[Any]) +assert_type(np.empty_like(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.empty_like(A, dtype='c16'), npt.NDArray[Any]) + +assert_type(np.array(A), npt.NDArray[np.float64]) +assert_type(np.array(B), npt.NDArray[np.float64]) +assert_type(np.array([1, 1.0]), npt.NDArray[Any]) +assert_type(np.array(deque([1, 2, 3])), npt.NDArray[Any]) +assert_type(np.array(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.array(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.array(A, like=A), npt.NDArray[np.float64]) +assert_type(np.array(A, subok=True), npt.NDArray[np.float64]) +assert_type(np.array(B, subok=True), SubClass[np.float64]) +assert_type(np.array(B, subok=True, ndmin=0), SubClass[np.float64]) +assert_type(np.array(B, subok=True, ndmin=1), SubClass[np.float64]) +assert_type(np.array(D), npt.NDArray[np.float64 | np.int64]) +# https://github.com/numpy/numpy/issues/29245 +assert_type(np.array([], dtype=np.bool), npt.NDArray[np.bool]) + +assert_type(np.zeros([1, 5, 6]), npt.NDArray[np.float64]) +assert_type(np.zeros([1, 5, 6], dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.zeros([1, 5, 6], dtype='c16'), npt.NDArray[Any]) +assert_type(np.zeros(mixed_shape), npt.NDArray[np.float64]) + +assert_type(np.empty([1, 5, 6]), npt.NDArray[np.float64]) +assert_type(np.empty([1, 5, 6], dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.empty([1, 5, 6], dtype='c16'), npt.NDArray[Any]) +assert_type(np.empty(mixed_shape), npt.NDArray[np.float64]) + +assert_type(np.concatenate(A), npt.NDArray[np.float64]) +assert_type(np.concatenate([A, A]), npt.NDArray[Any]) # pyright correctly infers this as NDArray[float64] +assert_type(np.concatenate([[1], A]), npt.NDArray[Any]) +assert_type(np.concatenate([[1], [1]]), npt.NDArray[Any]) +assert_type(np.concatenate((A, A)), npt.NDArray[np.float64]) +assert_type(np.concatenate(([1], [1])), npt.NDArray[Any]) +assert_type(np.concatenate([1, 1.0]), npt.NDArray[Any]) +assert_type(np.concatenate(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.concatenate(A, dtype='c16'), npt.NDArray[Any]) +assert_type(np.concatenate([1, 1.0], out=A), npt.NDArray[np.float64]) + +assert_type(np.asarray(A), npt.NDArray[np.float64]) +assert_type(np.asarray(B), npt.NDArray[np.float64]) +assert_type(np.asarray([1, 1.0]), npt.NDArray[Any]) +assert_type(np.asarray(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.asarray(A, dtype='c16'), npt.NDArray[Any]) + +assert_type(np.asanyarray(A), npt.NDArray[np.float64]) +assert_type(np.asanyarray(B), SubClass[np.float64]) +assert_type(np.asanyarray([1, 1.0]), npt.NDArray[Any]) +assert_type(np.asanyarray(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.asanyarray(A, dtype='c16'), npt.NDArray[Any]) + +assert_type(np.ascontiguousarray(A), npt.NDArray[np.float64]) +assert_type(np.ascontiguousarray(B), npt.NDArray[np.float64]) +assert_type(np.ascontiguousarray([1, 1.0]), npt.NDArray[Any]) +assert_type(np.ascontiguousarray(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.ascontiguousarray(A, dtype='c16'), npt.NDArray[Any]) + +assert_type(np.asfortranarray(A), npt.NDArray[np.float64]) +assert_type(np.asfortranarray(B), npt.NDArray[np.float64]) +assert_type(np.asfortranarray([1, 1.0]), npt.NDArray[Any]) +assert_type(np.asfortranarray(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.asfortranarray(A, dtype='c16'), npt.NDArray[Any]) + +assert_type(np.fromstring("1 1 1", sep=" "), npt.NDArray[np.float64]) +assert_type(np.fromstring(b"1 1 1", sep=" "), npt.NDArray[np.float64]) +assert_type(np.fromstring("1 1 1", dtype=np.int64, sep=" "), npt.NDArray[np.int64]) +assert_type(np.fromstring(b"1 1 1", dtype=np.int64, sep=" "), npt.NDArray[np.int64]) +assert_type(np.fromstring("1 1 1", dtype="c16", sep=" "), npt.NDArray[Any]) +assert_type(np.fromstring(b"1 1 1", dtype="c16", sep=" "), npt.NDArray[Any]) + +assert_type(np.fromfile("test.txt", sep=" "), npt.NDArray[np.float64]) +assert_type(np.fromfile("test.txt", dtype=np.int64, sep=" "), npt.NDArray[np.int64]) +assert_type(np.fromfile("test.txt", dtype="c16", sep=" "), npt.NDArray[Any]) +with open("test.txt") as f: + assert_type(np.fromfile(f, sep=" "), npt.NDArray[np.float64]) + assert_type(np.fromfile(b"test.txt", sep=" "), npt.NDArray[np.float64]) + assert_type(np.fromfile(Path("test.txt"), sep=" "), npt.NDArray[np.float64]) + +assert_type(np.fromiter("12345", np.float64), npt.NDArray[np.float64]) +assert_type(np.fromiter("12345", float), npt.NDArray[Any]) + +assert_type(np.frombuffer(A), npt.NDArray[np.float64]) +assert_type(np.frombuffer(A, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.frombuffer(A, dtype="c16"), npt.NDArray[Any]) + +assert_type(np.arange(False, True), np.ndarray[tuple[int], np.dtype[np.signedinteger]]) +assert_type(np.arange(10), np.ndarray[tuple[int], np.dtype[np.signedinteger]]) +assert_type(np.arange(0, 10, step=2), np.ndarray[tuple[int], np.dtype[np.signedinteger]]) +assert_type(np.arange(10.0), np.ndarray[tuple[int], np.dtype[np.floating]]) +assert_type(np.arange(start=0, stop=10.0), np.ndarray[tuple[int], np.dtype[np.floating]]) +assert_type(np.arange(np.timedelta64(0)), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) +assert_type(np.arange(0, np.timedelta64(10)), np.ndarray[tuple[int], np.dtype[np.timedelta64]]) +assert_type(np.arange(np.datetime64("0"), np.datetime64("10")), np.ndarray[tuple[int], np.dtype[np.datetime64]]) +assert_type(np.arange(10, dtype=np.float64), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.arange(0, 10, step=2, dtype=np.int16), np.ndarray[tuple[int], np.dtype[np.int16]]) +assert_type(np.arange(10, dtype=int), np.ndarray[tuple[int], np.dtype]) +assert_type(np.arange(0, 10, dtype="f8"), np.ndarray[tuple[int], np.dtype]) + +assert_type(np.require(A), npt.NDArray[np.float64]) +assert_type(np.require(B), SubClass[np.float64]) +assert_type(np.require(B, requirements=None), SubClass[np.float64]) +assert_type(np.require(B, dtype=int), npt.NDArray[Any]) +assert_type(np.require(B, requirements="E"), npt.NDArray[Any]) +assert_type(np.require(B, requirements=["ENSUREARRAY"]), npt.NDArray[Any]) +assert_type(np.require(B, requirements={"F", "E"}), npt.NDArray[Any]) +assert_type(np.require(B, requirements=["C", "OWNDATA"]), SubClass[np.float64]) +assert_type(np.require(B, requirements="W"), SubClass[np.float64]) +assert_type(np.require(B, requirements="A"), SubClass[np.float64]) +assert_type(np.require(C), npt.NDArray[Any]) + +assert_type(np.linspace(0, 10), npt.NDArray[np.float64]) +assert_type(np.linspace(0, 10j), npt.NDArray[np.complexfloating]) +assert_type(np.linspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.linspace(0, 10, dtype=int), npt.NDArray[Any]) +assert_type(np.linspace(0, 10, retstep=True), tuple[npt.NDArray[np.float64], np.float64]) +assert_type(np.linspace(0j, 10, retstep=True), tuple[npt.NDArray[np.complexfloating], np.complexfloating]) +assert_type(np.linspace(0, 10, retstep=True, dtype=np.int64), tuple[npt.NDArray[np.int64], np.int64]) +assert_type(np.linspace(0j, 10, retstep=True, dtype=int), tuple[npt.NDArray[Any], Any]) + +assert_type(np.logspace(0, 10), npt.NDArray[np.float64]) +assert_type(np.logspace(0, 10j), npt.NDArray[np.complexfloating]) +assert_type(np.logspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.logspace(0, 10, dtype=int), npt.NDArray[Any]) + +assert_type(np.geomspace(0, 10), npt.NDArray[np.float64]) +assert_type(np.geomspace(0, 10j), npt.NDArray[np.complexfloating]) +assert_type(np.geomspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.geomspace(0, 10, dtype=int), npt.NDArray[Any]) + +assert_type(np.zeros_like(A), npt.NDArray[np.float64]) +assert_type(np.zeros_like(C), npt.NDArray[Any]) +assert_type(np.zeros_like(A, dtype=float), npt.NDArray[Any]) +assert_type(np.zeros_like(B), SubClass[np.float64]) +assert_type(np.zeros_like(B, dtype=np.int64), npt.NDArray[np.int64]) + +assert_type(np.ones_like(A), npt.NDArray[np.float64]) +assert_type(np.ones_like(C), npt.NDArray[Any]) +assert_type(np.ones_like(A, dtype=float), npt.NDArray[Any]) +assert_type(np.ones_like(B), SubClass[np.float64]) +assert_type(np.ones_like(B, dtype=np.int64), npt.NDArray[np.int64]) + +assert_type(np.full_like(A, i8), npt.NDArray[np.float64]) +assert_type(np.full_like(C, i8), npt.NDArray[Any]) +assert_type(np.full_like(A, i8, dtype=int), npt.NDArray[Any]) +assert_type(np.full_like(B, i8), SubClass[np.float64]) +assert_type(np.full_like(B, i8, dtype=np.int64), npt.NDArray[np.int64]) + +_size: int +_shape_0d: tuple[()] +_shape_1d: tuple[int] +_shape_2d: tuple[int, int] +_shape_nd: tuple[int, ...] +_shape_like: list[int] + +assert_type(np.ones(_shape_0d), np.ndarray[tuple[()], np.dtype[np.float64]]) +assert_type(np.ones(_size), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.ones(_shape_2d), np.ndarray[tuple[int, int], np.dtype[np.float64]]) +assert_type(np.ones(_shape_nd), np.ndarray[tuple[int, ...], np.dtype[np.float64]]) +assert_type(np.ones(_shape_1d, dtype=np.int64), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.ones(_shape_like), npt.NDArray[np.float64]) +assert_type(np.ones(_shape_like, dtype=np.dtypes.Int64DType()), np.ndarray[Any, np.dtypes.Int64DType]) +assert_type(np.ones(_shape_like, dtype=int), npt.NDArray[Any]) +assert_type(np.ones(mixed_shape), npt.NDArray[np.float64]) + +assert_type(np.full(_size, i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.full(_shape_2d, i8), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(np.full(_shape_like, i8), npt.NDArray[np.int64]) +assert_type(np.full(_shape_like, 42), npt.NDArray[Any]) +assert_type(np.full(_size, i8, dtype=np.float64), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(np.full(_size, i8, dtype=float), np.ndarray[tuple[int], np.dtype]) +assert_type(np.full(_shape_like, 42, dtype=float), npt.NDArray[Any]) +assert_type(np.full(_shape_0d, i8, dtype=object), np.ndarray[tuple[()], np.dtype]) + +assert_type(np.indices([1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.indices([1, 2, 3], sparse=True), tuple[npt.NDArray[np.int_], ...]) + +assert_type(np.fromfunction(func, (3, 5)), SubClass[np.float64]) + +assert_type(np.identity(10), npt.NDArray[np.float64]) +assert_type(np.identity(10, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.identity(10, dtype=int), npt.NDArray[Any]) + +assert_type(np.atleast_1d(A), npt.NDArray[np.float64]) +assert_type(np.atleast_1d(C), npt.NDArray[Any]) +assert_type(np.atleast_1d(A, A), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.atleast_1d(A, C), tuple[npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.atleast_1d(C, C), tuple[npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.atleast_1d(A, A, A), tuple[npt.NDArray[np.float64], ...]) +assert_type(np.atleast_1d(C, C, C), tuple[npt.NDArray[Any], ...]) + +assert_type(np.atleast_2d(A), npt.NDArray[np.float64]) +assert_type(np.atleast_2d(A, A), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.atleast_2d(A, A, A), tuple[npt.NDArray[np.float64], ...]) + +assert_type(np.atleast_3d(A), npt.NDArray[np.float64]) +assert_type(np.atleast_3d(A, A), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) +assert_type(np.atleast_3d(A, A, A), tuple[npt.NDArray[np.float64], ...]) + +assert_type(np.vstack([A, A]), npt.NDArray[np.float64]) +assert_type(np.vstack([A, A], dtype=np.float32), npt.NDArray[np.float32]) +assert_type(np.vstack([A, C]), npt.NDArray[Any]) +assert_type(np.vstack([C, C]), npt.NDArray[Any]) + +assert_type(np.hstack([A, A]), npt.NDArray[np.float64]) +assert_type(np.hstack([A, A], dtype=np.float32), npt.NDArray[np.float32]) + +assert_type(np.stack([A, A]), npt.NDArray[np.float64]) +assert_type(np.stack([A, A], dtype=np.float32), npt.NDArray[np.float32]) +assert_type(np.stack([A, C]), npt.NDArray[Any]) +assert_type(np.stack([C, C]), npt.NDArray[Any]) +assert_type(np.stack([A, A], axis=0), npt.NDArray[np.float64]) +assert_type(np.stack([A, A], out=B), SubClass[np.float64]) + +assert_type(np.block([[A, A], [A, A]]), npt.NDArray[Any]) # pyright correctly infers this as NDArray[float64] +assert_type(np.block(C), npt.NDArray[Any]) + +if sys.version_info >= (3, 12): + from collections.abc import Buffer + + def create_array(obj: npt.ArrayLike) -> npt.NDArray[Any]: ... + + buffer: Buffer + assert_type(create_array(buffer), npt.NDArray[Any]) diff --git a/python/numpy/typing/tests/data/reveal/arraypad.pyi b/python/numpy/typing/tests/data/reveal/arraypad.pyi new file mode 100644 index 000000000..c5a443d93 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/arraypad.pyi @@ -0,0 +1,22 @@ +from collections.abc import Mapping +from typing import Any, SupportsIndex, assert_type + +import numpy as np +import numpy.typing as npt + +def mode_func( + ar: npt.NDArray[np.number], + width: tuple[int, int], + iaxis: SupportsIndex, + kwargs: Mapping[str, Any], +) -> None: ... + +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_LIKE: list[int] + +assert_type(np.pad(AR_i8, (2, 3), "constant"), npt.NDArray[np.int64]) +assert_type(np.pad(AR_LIKE, (2, 3), "constant"), npt.NDArray[Any]) + +assert_type(np.pad(AR_f8, (2, 3), mode_func), npt.NDArray[np.float64]) +assert_type(np.pad(AR_f8, (2, 3), mode_func, a=1, b=2), npt.NDArray[np.float64]) diff --git a/python/numpy/typing/tests/data/reveal/arrayprint.pyi b/python/numpy/typing/tests/data/reveal/arrayprint.pyi new file mode 100644 index 000000000..3b339edce --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/arrayprint.pyi @@ -0,0 +1,25 @@ +import contextlib +from collections.abc import Callable +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt +from numpy._core.arrayprint import _FormatOptions + +AR: npt.NDArray[np.int64] +func_float: Callable[[np.floating], str] +func_int: Callable[[np.integer], str] + +assert_type(np.get_printoptions(), _FormatOptions) +assert_type( + np.array2string(AR, formatter={'float_kind': func_float, 'int_kind': func_int}), + str, +) +assert_type(np.format_float_scientific(1.0), str) +assert_type(np.format_float_positional(1), str) +assert_type(np.array_repr(AR), str) +assert_type(np.array_str(AR), str) + +assert_type(np.printoptions(), contextlib._GeneratorContextManager[_FormatOptions]) +with np.printoptions() as dct: + assert_type(dct, _FormatOptions) diff --git a/python/numpy/typing/tests/data/reveal/arraysetops.pyi b/python/numpy/typing/tests/data/reveal/arraysetops.pyi new file mode 100644 index 000000000..7e5ca5c57 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/arraysetops.pyi @@ -0,0 +1,74 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt +from numpy.lib._arraysetops_impl import ( + UniqueAllResult, + UniqueCountsResult, + UniqueInverseResult, +) + +AR_b: npt.NDArray[np.bool] +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_M: npt.NDArray[np.datetime64] +AR_O: npt.NDArray[np.object_] + +AR_LIKE_f8: list[float] + +assert_type(np.ediff1d(AR_b), npt.NDArray[np.int8]) +assert_type(np.ediff1d(AR_i8, to_end=[1, 2, 3]), npt.NDArray[np.int64]) +assert_type(np.ediff1d(AR_M), npt.NDArray[np.timedelta64]) +assert_type(np.ediff1d(AR_O), npt.NDArray[np.object_]) +assert_type(np.ediff1d(AR_LIKE_f8, to_begin=[1, 1.5]), npt.NDArray[Any]) + +assert_type(np.intersect1d(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.intersect1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) +assert_type(np.intersect1d(AR_f8, AR_i8), npt.NDArray[Any]) +assert_type( + np.intersect1d(AR_f8, AR_f8, return_indices=True), + tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp]], +) + +assert_type(np.setxor1d(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.setxor1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) +assert_type(np.setxor1d(AR_f8, AR_i8), npt.NDArray[Any]) + +assert_type(np.isin(AR_i8, AR_i8), npt.NDArray[np.bool]) +assert_type(np.isin(AR_M, AR_M, assume_unique=True), npt.NDArray[np.bool]) +assert_type(np.isin(AR_f8, AR_i8), npt.NDArray[np.bool]) +assert_type(np.isin(AR_f8, AR_LIKE_f8, invert=True), npt.NDArray[np.bool]) + +assert_type(np.union1d(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.union1d(AR_M, AR_M), npt.NDArray[np.datetime64]) +assert_type(np.union1d(AR_f8, AR_i8), npt.NDArray[Any]) + +assert_type(np.setdiff1d(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.setdiff1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64]) +assert_type(np.setdiff1d(AR_f8, AR_i8), npt.NDArray[Any]) + +assert_type(np.unique(AR_f8), npt.NDArray[np.float64]) +assert_type(np.unique(AR_LIKE_f8, axis=0), npt.NDArray[Any]) +assert_type(np.unique(AR_f8, return_index=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_LIKE_f8, return_index=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_f8, return_inverse=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_LIKE_f8, return_inverse=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_f8, return_counts=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_LIKE_f8, return_counts=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_f8, return_index=True, return_inverse=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_LIKE_f8, return_index=True, return_inverse=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_f8, return_index=True, return_counts=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_LIKE_f8, return_index=True, return_counts=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_f8, return_inverse=True, return_counts=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_LIKE_f8, return_inverse=True, return_counts=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_f8, return_index=True, return_inverse=True, return_counts=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.unique(AR_LIKE_f8, return_index=True, return_inverse=True, return_counts=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp], npt.NDArray[np.intp], npt.NDArray[np.intp]]) + +assert_type(np.unique_all(AR_f8), UniqueAllResult[np.float64]) +assert_type(np.unique_all(AR_LIKE_f8), UniqueAllResult[Any]) +assert_type(np.unique_counts(AR_f8), UniqueCountsResult[np.float64]) +assert_type(np.unique_counts(AR_LIKE_f8), UniqueCountsResult[Any]) +assert_type(np.unique_inverse(AR_f8), UniqueInverseResult[np.float64]) +assert_type(np.unique_inverse(AR_LIKE_f8), UniqueInverseResult[Any]) +assert_type(np.unique_values(AR_f8), npt.NDArray[np.float64]) +assert_type(np.unique_values(AR_LIKE_f8), npt.NDArray[Any]) diff --git a/python/numpy/typing/tests/data/reveal/arrayterator.pyi b/python/numpy/typing/tests/data/reveal/arrayterator.pyi new file mode 100644 index 000000000..470160c24 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/arrayterator.pyi @@ -0,0 +1,27 @@ +from collections.abc import Generator +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +AR_i8: npt.NDArray[np.int64] +ar_iter = np.lib.Arrayterator(AR_i8) + +assert_type(ar_iter.var, npt.NDArray[np.int64]) +assert_type(ar_iter.buf_size, int | None) +assert_type(ar_iter.start, list[int]) +assert_type(ar_iter.stop, list[int]) +assert_type(ar_iter.step, list[int]) +assert_type(ar_iter.shape, tuple[Any, ...]) +assert_type(ar_iter.flat, Generator[np.int64, None, None]) + +assert_type(ar_iter.__array__(), npt.NDArray[np.int64]) + +for i in ar_iter: + assert_type(i, npt.NDArray[np.int64]) + +assert_type(ar_iter[0], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) +assert_type(ar_iter[...], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) +assert_type(ar_iter[:], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) +assert_type(ar_iter[0, 0, 0], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) +assert_type(ar_iter[..., 0, :], np.lib.Arrayterator[tuple[Any, ...], np.dtype[np.int64]]) diff --git a/python/numpy/typing/tests/data/reveal/bitwise_ops.pyi b/python/numpy/typing/tests/data/reveal/bitwise_ops.pyi new file mode 100644 index 000000000..cd56caad1 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/bitwise_ops.pyi @@ -0,0 +1,167 @@ +from typing import Any, TypeAlias, assert_type +from typing import Literal as L + +import numpy as np +import numpy.typing as npt + +FalseType: TypeAlias = L[False] +TrueType: TypeAlias = L[True] + +i4: np.int32 +i8: np.int64 + +u4: np.uint32 +u8: np.uint64 + +b_: np.bool[bool] +b0_: np.bool[FalseType] +b1_: np.bool[TrueType] + +b: bool +b0: FalseType +b1: TrueType + +i: int + +AR: npt.NDArray[np.int32] + +assert_type(i8 << i8, np.int64) +assert_type(i8 >> i8, np.int64) +assert_type(i8 | i8, np.int64) +assert_type(i8 ^ i8, np.int64) +assert_type(i8 & i8, np.int64) + +assert_type(i8 << AR, npt.NDArray[np.signedinteger]) +assert_type(i8 >> AR, npt.NDArray[np.signedinteger]) +assert_type(i8 | AR, npt.NDArray[np.signedinteger]) +assert_type(i8 ^ AR, npt.NDArray[np.signedinteger]) +assert_type(i8 & AR, npt.NDArray[np.signedinteger]) + +assert_type(i4 << i4, np.int32) +assert_type(i4 >> i4, np.int32) +assert_type(i4 | i4, np.int32) +assert_type(i4 ^ i4, np.int32) +assert_type(i4 & i4, np.int32) + +assert_type(i8 << i4, np.signedinteger) +assert_type(i8 >> i4, np.signedinteger) +assert_type(i8 | i4, np.signedinteger) +assert_type(i8 ^ i4, np.signedinteger) +assert_type(i8 & i4, np.signedinteger) + +assert_type(i8 << b_, np.int64) +assert_type(i8 >> b_, np.int64) +assert_type(i8 | b_, np.int64) +assert_type(i8 ^ b_, np.int64) +assert_type(i8 & b_, np.int64) + +assert_type(i8 << b, np.int64) +assert_type(i8 >> b, np.int64) +assert_type(i8 | b, np.int64) +assert_type(i8 ^ b, np.int64) +assert_type(i8 & b, np.int64) + +assert_type(u8 << u8, np.uint64) +assert_type(u8 >> u8, np.uint64) +assert_type(u8 | u8, np.uint64) +assert_type(u8 ^ u8, np.uint64) +assert_type(u8 & u8, np.uint64) + +assert_type(u8 << AR, npt.NDArray[np.signedinteger]) +assert_type(u8 >> AR, npt.NDArray[np.signedinteger]) +assert_type(u8 | AR, npt.NDArray[np.signedinteger]) +assert_type(u8 ^ AR, npt.NDArray[np.signedinteger]) +assert_type(u8 & AR, npt.NDArray[np.signedinteger]) + +assert_type(u4 << u4, np.uint32) +assert_type(u4 >> u4, np.uint32) +assert_type(u4 | u4, np.uint32) +assert_type(u4 ^ u4, np.uint32) +assert_type(u4 & u4, np.uint32) + +assert_type(u4 << i4, np.signedinteger) +assert_type(u4 >> i4, np.signedinteger) +assert_type(u4 | i4, np.signedinteger) +assert_type(u4 ^ i4, np.signedinteger) +assert_type(u4 & i4, np.signedinteger) + +assert_type(u4 << i, np.uint32) +assert_type(u4 >> i, np.uint32) +assert_type(u4 | i, np.uint32) +assert_type(u4 ^ i, np.uint32) +assert_type(u4 & i, np.uint32) + +assert_type(u8 << b_, np.uint64) +assert_type(u8 >> b_, np.uint64) +assert_type(u8 | b_, np.uint64) +assert_type(u8 ^ b_, np.uint64) +assert_type(u8 & b_, np.uint64) + +assert_type(u8 << b, np.uint64) +assert_type(u8 >> b, np.uint64) +assert_type(u8 | b, np.uint64) +assert_type(u8 ^ b, np.uint64) +assert_type(u8 & b, np.uint64) + +assert_type(b_ << b_, np.int8) +assert_type(b_ >> b_, np.int8) +assert_type(b_ | b_, np.bool) +assert_type(b_ ^ b_, np.bool) +assert_type(b_ & b_, np.bool) + +assert_type(b_ << AR, npt.NDArray[np.signedinteger]) +assert_type(b_ >> AR, npt.NDArray[np.signedinteger]) +assert_type(b_ | AR, npt.NDArray[np.signedinteger]) +assert_type(b_ ^ AR, npt.NDArray[np.signedinteger]) +assert_type(b_ & AR, npt.NDArray[np.signedinteger]) + +assert_type(b_ << b, np.int8) +assert_type(b_ >> b, np.int8) +assert_type(b_ | b, np.bool) +assert_type(b_ ^ b, np.bool) +assert_type(b_ & b, np.bool) + +assert_type(b_ << i, np.int_) +assert_type(b_ >> i, np.int_) +assert_type(b_ | i, np.bool | np.int_) +assert_type(b_ ^ i, np.bool | np.int_) +assert_type(b_ & i, np.bool | np.int_) + +assert_type(~i8, np.int64) +assert_type(~i4, np.int32) +assert_type(~u8, np.uint64) +assert_type(~u4, np.uint32) +assert_type(~b_, np.bool) +assert_type(~b0_, np.bool[TrueType]) +assert_type(~b1_, np.bool[FalseType]) +assert_type(~AR, npt.NDArray[np.int32]) + +assert_type(b_ | b0_, np.bool) +assert_type(b0_ | b_, np.bool) +assert_type(b_ | b1_, np.bool[TrueType]) +assert_type(b1_ | b_, np.bool[TrueType]) + +assert_type(b_ ^ b0_, np.bool) +assert_type(b0_ ^ b_, np.bool) +assert_type(b_ ^ b1_, np.bool) +assert_type(b1_ ^ b_, np.bool) + +assert_type(b_ & b0_, np.bool[FalseType]) +assert_type(b0_ & b_, np.bool[FalseType]) +assert_type(b_ & b1_, np.bool) +assert_type(b1_ & b_, np.bool) + +assert_type(b0_ | b0_, np.bool[FalseType]) +assert_type(b0_ | b1_, np.bool[TrueType]) +assert_type(b1_ | b0_, np.bool[TrueType]) +assert_type(b1_ | b1_, np.bool[TrueType]) + +assert_type(b0_ ^ b0_, np.bool[FalseType]) +assert_type(b0_ ^ b1_, np.bool[TrueType]) +assert_type(b1_ ^ b0_, np.bool[TrueType]) +assert_type(b1_ ^ b1_, np.bool[FalseType]) + +assert_type(b0_ & b0_, np.bool[FalseType]) +assert_type(b0_ & b1_, np.bool[FalseType]) +assert_type(b1_ & b0_, np.bool[FalseType]) +assert_type(b1_ & b1_, np.bool[TrueType]) diff --git a/python/numpy/typing/tests/data/reveal/char.pyi b/python/numpy/typing/tests/data/reveal/char.pyi new file mode 100644 index 000000000..5c6af7388 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/char.pyi @@ -0,0 +1,224 @@ +from typing import TypeAlias, assert_type + +import numpy as np +import numpy._typing as np_t +import numpy.typing as npt + +AR_T_alias: TypeAlias = np.ndarray[np_t._AnyShape, np.dtypes.StringDType] +AR_TU_alias: TypeAlias = AR_T_alias | npt.NDArray[np.str_] + +AR_U: npt.NDArray[np.str_] +AR_S: npt.NDArray[np.bytes_] +AR_T: AR_T_alias + +assert_type(np.char.equal(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.char.equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.equal(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.not_equal(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.char.not_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.not_equal(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.greater_equal(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.char.greater_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.greater_equal(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.less_equal(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.char.less_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.less_equal(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.greater(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.char.greater(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.greater(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.less(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.char.less(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.char.less(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.multiply(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.char.multiply(AR_S, [5, 4, 3]), npt.NDArray[np.bytes_]) +assert_type(np.char.multiply(AR_T, 5), AR_T_alias) + +assert_type(np.char.mod(AR_U, "test"), npt.NDArray[np.str_]) +assert_type(np.char.mod(AR_S, "test"), npt.NDArray[np.bytes_]) +assert_type(np.char.mod(AR_T, "test"), AR_T_alias) + +assert_type(np.char.capitalize(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.capitalize(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.char.capitalize(AR_T), AR_T_alias) + +assert_type(np.char.center(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.char.center(AR_S, [2, 3, 4], b"a"), npt.NDArray[np.bytes_]) +assert_type(np.char.center(AR_T, 5), AR_T_alias) + +assert_type(np.char.encode(AR_U), npt.NDArray[np.bytes_]) +assert_type(np.char.encode(AR_T), npt.NDArray[np.bytes_]) +assert_type(np.char.decode(AR_S), npt.NDArray[np.str_]) + +assert_type(np.char.expandtabs(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.expandtabs(AR_S, tabsize=4), npt.NDArray[np.bytes_]) +assert_type(np.char.expandtabs(AR_T), AR_T_alias) + +assert_type(np.char.join(AR_U, "_"), npt.NDArray[np.str_]) +assert_type(np.char.join(AR_S, [b"_", b""]), npt.NDArray[np.bytes_]) +assert_type(np.char.join(AR_T, "_"), AR_TU_alias) + +assert_type(np.char.ljust(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.char.ljust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.char.ljust(AR_T, 5), AR_T_alias) +assert_type(np.char.ljust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_TU_alias) + +assert_type(np.char.rjust(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.char.rjust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.char.rjust(AR_T, 5), AR_T_alias) +assert_type(np.char.rjust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_TU_alias) + +assert_type(np.char.lstrip(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.lstrip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.char.lstrip(AR_T), AR_T_alias) +assert_type(np.char.lstrip(AR_T, "_"), AR_TU_alias) + +assert_type(np.char.rstrip(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.rstrip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.char.rstrip(AR_T), AR_T_alias) +assert_type(np.char.rstrip(AR_T, "_"), AR_TU_alias) + +assert_type(np.char.strip(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.strip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.char.strip(AR_T), AR_T_alias) +assert_type(np.char.strip(AR_T, "_"), AR_TU_alias) + +assert_type(np.char.count(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.char.count(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.count(AR_T, AR_T, start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.char.count(AR_T, ["a", "b", "c"], end=9), npt.NDArray[np.int_]) + +assert_type(np.char.partition(AR_U, "\n"), npt.NDArray[np.str_]) +assert_type(np.char.partition(AR_S, [b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.char.partition(AR_T, "\n"), AR_TU_alias) + +assert_type(np.char.rpartition(AR_U, "\n"), npt.NDArray[np.str_]) +assert_type(np.char.rpartition(AR_S, [b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.char.rpartition(AR_T, "\n"), AR_TU_alias) + +assert_type(np.char.replace(AR_U, "_", "-"), npt.NDArray[np.str_]) +assert_type(np.char.replace(AR_S, [b"_", b""], [b"a", b"b"]), npt.NDArray[np.bytes_]) +assert_type(np.char.replace(AR_T, "_", "_"), AR_TU_alias) + +assert_type(np.char.split(AR_U, "_"), npt.NDArray[np.object_]) +assert_type(np.char.split(AR_S, maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) +assert_type(np.char.split(AR_T, "_"), npt.NDArray[np.object_]) + +assert_type(np.char.rsplit(AR_U, "_"), npt.NDArray[np.object_]) +assert_type(np.char.rsplit(AR_S, maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) +assert_type(np.char.rsplit(AR_T, "_"), npt.NDArray[np.object_]) + +assert_type(np.char.splitlines(AR_U), npt.NDArray[np.object_]) +assert_type(np.char.splitlines(AR_S, keepends=[True, True, False]), npt.NDArray[np.object_]) +assert_type(np.char.splitlines(AR_T), npt.NDArray[np.object_]) + +assert_type(np.char.lower(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.lower(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.char.lower(AR_T), AR_T_alias) + +assert_type(np.char.upper(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.upper(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.char.upper(AR_T), AR_T_alias) + +assert_type(np.char.swapcase(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.swapcase(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.char.swapcase(AR_T), AR_T_alias) + +assert_type(np.char.title(AR_U), npt.NDArray[np.str_]) +assert_type(np.char.title(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.char.title(AR_T), AR_T_alias) + +assert_type(np.char.zfill(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.char.zfill(AR_S, [2, 3, 4]), npt.NDArray[np.bytes_]) +assert_type(np.char.zfill(AR_T, 5), AR_T_alias) + +assert_type(np.char.endswith(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) +assert_type(np.char.endswith(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.bool]) +assert_type(np.char.endswith(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) + +assert_type(np.char.startswith(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) +assert_type(np.char.startswith(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.bool]) +assert_type(np.char.startswith(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) + +assert_type(np.char.find(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.char.find(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.find(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + +assert_type(np.char.rfind(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.char.rfind(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.rfind(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + +assert_type(np.char.index(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.char.index(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.index(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + +assert_type(np.char.rindex(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.char.rindex(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.char.rindex(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + +assert_type(np.char.isalpha(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.isalpha(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.isalpha(AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.isalnum(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.isalnum(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.isalnum(AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.isdecimal(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.isdecimal(AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.isdigit(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.isdigit(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.isdigit(AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.islower(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.islower(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.islower(AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.isnumeric(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.isnumeric(AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.isspace(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.isspace(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.isspace(AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.istitle(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.istitle(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.istitle(AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.isupper(AR_U), npt.NDArray[np.bool]) +assert_type(np.char.isupper(AR_S), npt.NDArray[np.bool]) +assert_type(np.char.isupper(AR_T), npt.NDArray[np.bool]) + +assert_type(np.char.str_len(AR_U), npt.NDArray[np.int_]) +assert_type(np.char.str_len(AR_S), npt.NDArray[np.int_]) +assert_type(np.char.str_len(AR_T), npt.NDArray[np.int_]) + +assert_type(np.char.translate(AR_U, ""), npt.NDArray[np.str_]) +assert_type(np.char.translate(AR_S, ""), npt.NDArray[np.bytes_]) +assert_type(np.char.translate(AR_T, ""), AR_T_alias) + +assert_type(np.char.array(AR_U), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.array(AR_S, order="K"), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array("bob", copy=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.array(b"bob", itemsize=5), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(1, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(1, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.array(1), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]] | np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(AR_U, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.array(AR_S, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) + +assert_type(np.char.asarray(AR_U), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.asarray(AR_S, order="K"), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray("bob"), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.asarray(b"bob", itemsize=5), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(1, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(1, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) +assert_type(np.char.asarray(1), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]] | np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(AR_U, unicode=False), np.char.chararray[np_t._AnyShape, np.dtype[np.bytes_]]) +assert_type(np.char.asarray(AR_S, unicode=True), np.char.chararray[np_t._AnyShape, np.dtype[np.str_]]) diff --git a/python/numpy/typing/tests/data/reveal/chararray.pyi b/python/numpy/typing/tests/data/reveal/chararray.pyi new file mode 100644 index 000000000..b5f4392b7 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/chararray.pyi @@ -0,0 +1,137 @@ +from typing import Any, TypeAlias, assert_type + +import numpy as np +import numpy.typing as npt + +_BytesCharArray: TypeAlias = np.char.chararray[tuple[Any, ...], np.dtype[np.bytes_]] +_StrCharArray: TypeAlias = np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] + +AR_U: _StrCharArray +AR_S: _BytesCharArray + +assert_type(AR_U == AR_U, npt.NDArray[np.bool]) +assert_type(AR_S == AR_S, npt.NDArray[np.bool]) + +assert_type(AR_U != AR_U, npt.NDArray[np.bool]) +assert_type(AR_S != AR_S, npt.NDArray[np.bool]) + +assert_type(AR_U >= AR_U, npt.NDArray[np.bool]) +assert_type(AR_S >= AR_S, npt.NDArray[np.bool]) + +assert_type(AR_U <= AR_U, npt.NDArray[np.bool]) +assert_type(AR_S <= AR_S, npt.NDArray[np.bool]) + +assert_type(AR_U > AR_U, npt.NDArray[np.bool]) +assert_type(AR_S > AR_S, npt.NDArray[np.bool]) + +assert_type(AR_U < AR_U, npt.NDArray[np.bool]) +assert_type(AR_S < AR_S, npt.NDArray[np.bool]) + +assert_type(AR_U * 5, _StrCharArray) +assert_type(AR_S * [5], _BytesCharArray) + +assert_type(AR_U % "test", _StrCharArray) +assert_type(AR_S % b"test", _BytesCharArray) + +assert_type(AR_U.capitalize(), _StrCharArray) +assert_type(AR_S.capitalize(), _BytesCharArray) + +assert_type(AR_U.center(5), _StrCharArray) +assert_type(AR_S.center([2, 3, 4], b"a"), _BytesCharArray) + +assert_type(AR_U.encode(), _BytesCharArray) +assert_type(AR_S.decode(), _StrCharArray) + +assert_type(AR_U.expandtabs(), _StrCharArray) +assert_type(AR_S.expandtabs(tabsize=4), _BytesCharArray) + +assert_type(AR_U.join("_"), _StrCharArray) +assert_type(AR_S.join([b"_", b""]), _BytesCharArray) + +assert_type(AR_U.ljust(5), _StrCharArray) +assert_type(AR_S.ljust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), _BytesCharArray) +assert_type(AR_U.rjust(5), _StrCharArray) +assert_type(AR_S.rjust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), _BytesCharArray) + +assert_type(AR_U.lstrip(), _StrCharArray) +assert_type(AR_S.lstrip(chars=b"_"), _BytesCharArray) +assert_type(AR_U.rstrip(), _StrCharArray) +assert_type(AR_S.rstrip(chars=b"_"), _BytesCharArray) +assert_type(AR_U.strip(), _StrCharArray) +assert_type(AR_S.strip(chars=b"_"), _BytesCharArray) + +assert_type(AR_U.partition("\n"), _StrCharArray) +assert_type(AR_S.partition([b"a", b"b", b"c"]), _BytesCharArray) +assert_type(AR_U.rpartition("\n"), _StrCharArray) +assert_type(AR_S.rpartition([b"a", b"b", b"c"]), _BytesCharArray) + +assert_type(AR_U.replace("_", "-"), _StrCharArray) +assert_type(AR_S.replace([b"_", b""], [b"a", b"b"]), _BytesCharArray) + +assert_type(AR_U.split("_"), npt.NDArray[np.object_]) +assert_type(AR_S.split(maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) +assert_type(AR_U.rsplit("_"), npt.NDArray[np.object_]) +assert_type(AR_S.rsplit(maxsplit=[1, 2, 3]), npt.NDArray[np.object_]) + +assert_type(AR_U.splitlines(), npt.NDArray[np.object_]) +assert_type(AR_S.splitlines(keepends=[True, True, False]), npt.NDArray[np.object_]) + +assert_type(AR_U.swapcase(), _StrCharArray) +assert_type(AR_S.swapcase(), _BytesCharArray) + +assert_type(AR_U.title(), _StrCharArray) +assert_type(AR_S.title(), _BytesCharArray) + +assert_type(AR_U.upper(), _StrCharArray) +assert_type(AR_S.upper(), _BytesCharArray) + +assert_type(AR_U.zfill(5), _StrCharArray) +assert_type(AR_S.zfill([2, 3, 4]), _BytesCharArray) + +assert_type(AR_U.count("a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(AR_S.count([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) + +assert_type(AR_U.endswith("a", start=[1, 2, 3]), npt.NDArray[np.bool]) +assert_type(AR_S.endswith([b"a", b"b", b"c"], end=9), npt.NDArray[np.bool]) +assert_type(AR_U.startswith("a", start=[1, 2, 3]), npt.NDArray[np.bool]) +assert_type(AR_S.startswith([b"a", b"b", b"c"], end=9), npt.NDArray[np.bool]) + +assert_type(AR_U.find("a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(AR_S.find([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(AR_U.rfind("a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(AR_S.rfind([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) + +assert_type(AR_U.index("a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(AR_S.index([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(AR_U.rindex("a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(AR_S.rindex([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) + +assert_type(AR_U.isalpha(), npt.NDArray[np.bool]) +assert_type(AR_S.isalpha(), npt.NDArray[np.bool]) + +assert_type(AR_U.isalnum(), npt.NDArray[np.bool]) +assert_type(AR_S.isalnum(), npt.NDArray[np.bool]) + +assert_type(AR_U.isdecimal(), npt.NDArray[np.bool]) +assert_type(AR_S.isdecimal(), npt.NDArray[np.bool]) + +assert_type(AR_U.isdigit(), npt.NDArray[np.bool]) +assert_type(AR_S.isdigit(), npt.NDArray[np.bool]) + +assert_type(AR_U.islower(), npt.NDArray[np.bool]) +assert_type(AR_S.islower(), npt.NDArray[np.bool]) + +assert_type(AR_U.isnumeric(), npt.NDArray[np.bool]) +assert_type(AR_S.isnumeric(), npt.NDArray[np.bool]) + +assert_type(AR_U.isspace(), npt.NDArray[np.bool]) +assert_type(AR_S.isspace(), npt.NDArray[np.bool]) + +assert_type(AR_U.istitle(), npt.NDArray[np.bool]) +assert_type(AR_S.istitle(), npt.NDArray[np.bool]) + +assert_type(AR_U.isupper(), npt.NDArray[np.bool]) +assert_type(AR_S.isupper(), npt.NDArray[np.bool]) + +assert_type(AR_U.__array_finalize__(object()), None) +assert_type(AR_S.__array_finalize__(object()), None) diff --git a/python/numpy/typing/tests/data/reveal/comparisons.pyi b/python/numpy/typing/tests/data/reveal/comparisons.pyi new file mode 100644 index 000000000..2165d17fc --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/comparisons.pyi @@ -0,0 +1,264 @@ +import decimal +import fractions +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +c16 = np.complex128() +f8 = np.float64() +i8 = np.int64() +u8 = np.uint64() + +c8 = np.complex64() +f4 = np.float32() +i4 = np.int32() +u4 = np.uint32() + +dt = np.datetime64(0, "D") +td = np.timedelta64(0, "D") + +b_ = np.bool() + +b = bool() +c = complex() +f = float() +i = int() + +AR = np.array([0], dtype=np.int64) +AR.setflags(write=False) + +SEQ = (0, 1, 2, 3, 4) + +# object-like comparisons + +assert_type(i8 > fractions.Fraction(1, 5), np.bool) +assert_type(i8 > [fractions.Fraction(1, 5)], npt.NDArray[np.bool]) +assert_type(i8 > decimal.Decimal("1.5"), np.bool) +assert_type(i8 > [decimal.Decimal("1.5")], npt.NDArray[np.bool]) + +# Time structures + +assert_type(dt > dt, np.bool) + +assert_type(td > td, np.bool) +assert_type(td > i, np.bool) +assert_type(td > i4, np.bool) +assert_type(td > i8, np.bool) + +assert_type(td > AR, npt.NDArray[np.bool]) +assert_type(td > SEQ, npt.NDArray[np.bool]) +assert_type(AR > SEQ, npt.NDArray[np.bool]) +assert_type(AR > td, npt.NDArray[np.bool]) +assert_type(SEQ > td, npt.NDArray[np.bool]) +assert_type(SEQ > AR, npt.NDArray[np.bool]) + +# boolean + +assert_type(b_ > b, np.bool) +assert_type(b_ > b_, np.bool) +assert_type(b_ > i, np.bool) +assert_type(b_ > i8, np.bool) +assert_type(b_ > i4, np.bool) +assert_type(b_ > u8, np.bool) +assert_type(b_ > u4, np.bool) +assert_type(b_ > f, np.bool) +assert_type(b_ > f8, np.bool) +assert_type(b_ > f4, np.bool) +assert_type(b_ > c, np.bool) +assert_type(b_ > c16, np.bool) +assert_type(b_ > c8, np.bool) +assert_type(b_ > AR, npt.NDArray[np.bool]) +assert_type(b_ > SEQ, npt.NDArray[np.bool]) + +# Complex + +assert_type(c16 > c16, np.bool) +assert_type(c16 > f8, np.bool) +assert_type(c16 > i8, np.bool) +assert_type(c16 > c8, np.bool) +assert_type(c16 > f4, np.bool) +assert_type(c16 > i4, np.bool) +assert_type(c16 > b_, np.bool) +assert_type(c16 > b, np.bool) +assert_type(c16 > c, np.bool) +assert_type(c16 > f, np.bool) +assert_type(c16 > i, np.bool) +assert_type(c16 > AR, npt.NDArray[np.bool]) +assert_type(c16 > SEQ, npt.NDArray[np.bool]) + +assert_type(c16 > c16, np.bool) +assert_type(f8 > c16, np.bool) +assert_type(i8 > c16, np.bool) +assert_type(c8 > c16, np.bool) +assert_type(f4 > c16, np.bool) +assert_type(i4 > c16, np.bool) +assert_type(b_ > c16, np.bool) +assert_type(b > c16, np.bool) +assert_type(c > c16, np.bool) +assert_type(f > c16, np.bool) +assert_type(i > c16, np.bool) +assert_type(AR > c16, npt.NDArray[np.bool]) +assert_type(SEQ > c16, npt.NDArray[np.bool]) + +assert_type(c8 > c16, np.bool) +assert_type(c8 > f8, np.bool) +assert_type(c8 > i8, np.bool) +assert_type(c8 > c8, np.bool) +assert_type(c8 > f4, np.bool) +assert_type(c8 > i4, np.bool) +assert_type(c8 > b_, np.bool) +assert_type(c8 > b, np.bool) +assert_type(c8 > c, np.bool) +assert_type(c8 > f, np.bool) +assert_type(c8 > i, np.bool) +assert_type(c8 > AR, npt.NDArray[np.bool]) +assert_type(c8 > SEQ, npt.NDArray[np.bool]) + +assert_type(c16 > c8, np.bool) +assert_type(f8 > c8, np.bool) +assert_type(i8 > c8, np.bool) +assert_type(c8 > c8, np.bool) +assert_type(f4 > c8, np.bool) +assert_type(i4 > c8, np.bool) +assert_type(b_ > c8, np.bool) +assert_type(b > c8, np.bool) +assert_type(c > c8, np.bool) +assert_type(f > c8, np.bool) +assert_type(i > c8, np.bool) +assert_type(AR > c8, npt.NDArray[np.bool]) +assert_type(SEQ > c8, npt.NDArray[np.bool]) + +# Float + +assert_type(f8 > f8, np.bool) +assert_type(f8 > i8, np.bool) +assert_type(f8 > f4, np.bool) +assert_type(f8 > i4, np.bool) +assert_type(f8 > b_, np.bool) +assert_type(f8 > b, np.bool) +assert_type(f8 > c, np.bool) +assert_type(f8 > f, np.bool) +assert_type(f8 > i, np.bool) +assert_type(f8 > AR, npt.NDArray[np.bool]) +assert_type(f8 > SEQ, npt.NDArray[np.bool]) + +assert_type(f8 > f8, np.bool) +assert_type(i8 > f8, np.bool) +assert_type(f4 > f8, np.bool) +assert_type(i4 > f8, np.bool) +assert_type(b_ > f8, np.bool) +assert_type(b > f8, np.bool) +assert_type(c > f8, np.bool) +assert_type(f > f8, np.bool) +assert_type(i > f8, np.bool) +assert_type(AR > f8, npt.NDArray[np.bool]) +assert_type(SEQ > f8, npt.NDArray[np.bool]) + +assert_type(f4 > f8, np.bool) +assert_type(f4 > i8, np.bool) +assert_type(f4 > f4, np.bool) +assert_type(f4 > i4, np.bool) +assert_type(f4 > b_, np.bool) +assert_type(f4 > b, np.bool) +assert_type(f4 > c, np.bool) +assert_type(f4 > f, np.bool) +assert_type(f4 > i, np.bool) +assert_type(f4 > AR, npt.NDArray[np.bool]) +assert_type(f4 > SEQ, npt.NDArray[np.bool]) + +assert_type(f8 > f4, np.bool) +assert_type(i8 > f4, np.bool) +assert_type(f4 > f4, np.bool) +assert_type(i4 > f4, np.bool) +assert_type(b_ > f4, np.bool) +assert_type(b > f4, np.bool) +assert_type(c > f4, np.bool) +assert_type(f > f4, np.bool) +assert_type(i > f4, np.bool) +assert_type(AR > f4, npt.NDArray[np.bool]) +assert_type(SEQ > f4, npt.NDArray[np.bool]) + +# Int + +assert_type(i8 > i8, np.bool) +assert_type(i8 > u8, np.bool) +assert_type(i8 > i4, np.bool) +assert_type(i8 > u4, np.bool) +assert_type(i8 > b_, np.bool) +assert_type(i8 > b, np.bool) +assert_type(i8 > c, np.bool) +assert_type(i8 > f, np.bool) +assert_type(i8 > i, np.bool) +assert_type(i8 > AR, npt.NDArray[np.bool]) +assert_type(i8 > SEQ, npt.NDArray[np.bool]) + +assert_type(u8 > u8, np.bool) +assert_type(u8 > i4, np.bool) +assert_type(u8 > u4, np.bool) +assert_type(u8 > b_, np.bool) +assert_type(u8 > b, np.bool) +assert_type(u8 > c, np.bool) +assert_type(u8 > f, np.bool) +assert_type(u8 > i, np.bool) +assert_type(u8 > AR, npt.NDArray[np.bool]) +assert_type(u8 > SEQ, npt.NDArray[np.bool]) + +assert_type(i8 > i8, np.bool) +assert_type(u8 > i8, np.bool) +assert_type(i4 > i8, np.bool) +assert_type(u4 > i8, np.bool) +assert_type(b_ > i8, np.bool) +assert_type(b > i8, np.bool) +assert_type(c > i8, np.bool) +assert_type(f > i8, np.bool) +assert_type(i > i8, np.bool) +assert_type(AR > i8, npt.NDArray[np.bool]) +assert_type(SEQ > i8, npt.NDArray[np.bool]) + +assert_type(u8 > u8, np.bool) +assert_type(i4 > u8, np.bool) +assert_type(u4 > u8, np.bool) +assert_type(b_ > u8, np.bool) +assert_type(b > u8, np.bool) +assert_type(c > u8, np.bool) +assert_type(f > u8, np.bool) +assert_type(i > u8, np.bool) +assert_type(AR > u8, npt.NDArray[np.bool]) +assert_type(SEQ > u8, npt.NDArray[np.bool]) + +assert_type(i4 > i8, np.bool) +assert_type(i4 > i4, np.bool) +assert_type(i4 > i, np.bool) +assert_type(i4 > b_, np.bool) +assert_type(i4 > b, np.bool) +assert_type(i4 > AR, npt.NDArray[np.bool]) +assert_type(i4 > SEQ, npt.NDArray[np.bool]) + +assert_type(u4 > i8, np.bool) +assert_type(u4 > i4, np.bool) +assert_type(u4 > u8, np.bool) +assert_type(u4 > u4, np.bool) +assert_type(u4 > i, np.bool) +assert_type(u4 > b_, np.bool) +assert_type(u4 > b, np.bool) +assert_type(u4 > AR, npt.NDArray[np.bool]) +assert_type(u4 > SEQ, npt.NDArray[np.bool]) + +assert_type(i8 > i4, np.bool) +assert_type(i4 > i4, np.bool) +assert_type(i > i4, np.bool) +assert_type(b_ > i4, np.bool) +assert_type(b > i4, np.bool) +assert_type(AR > i4, npt.NDArray[np.bool]) +assert_type(SEQ > i4, npt.NDArray[np.bool]) + +assert_type(i8 > u4, np.bool) +assert_type(i4 > u4, np.bool) +assert_type(u8 > u4, np.bool) +assert_type(u4 > u4, np.bool) +assert_type(b_ > u4, np.bool) +assert_type(b > u4, np.bool) +assert_type(i > u4, np.bool) +assert_type(AR > u4, npt.NDArray[np.bool]) +assert_type(SEQ > u4, npt.NDArray[np.bool]) diff --git a/python/numpy/typing/tests/data/reveal/constants.pyi b/python/numpy/typing/tests/data/reveal/constants.pyi new file mode 100644 index 000000000..d4474f46c --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/constants.pyi @@ -0,0 +1,14 @@ +from typing import Literal, assert_type + +import numpy as np + +assert_type(np.e, float) +assert_type(np.euler_gamma, float) +assert_type(np.inf, float) +assert_type(np.nan, float) +assert_type(np.pi, float) + +assert_type(np.little_endian, bool) + +assert_type(np.True_, np.bool[Literal[True]]) +assert_type(np.False_, np.bool[Literal[False]]) diff --git a/python/numpy/typing/tests/data/reveal/ctypeslib.pyi b/python/numpy/typing/tests/data/reveal/ctypeslib.pyi new file mode 100644 index 000000000..0564d725c --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/ctypeslib.pyi @@ -0,0 +1,81 @@ +import ctypes as ct +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt +from numpy import ctypeslib + +AR_bool: npt.NDArray[np.bool] +AR_ubyte: npt.NDArray[np.ubyte] +AR_ushort: npt.NDArray[np.ushort] +AR_uintc: npt.NDArray[np.uintc] +AR_ulong: npt.NDArray[np.ulong] +AR_ulonglong: npt.NDArray[np.ulonglong] +AR_byte: npt.NDArray[np.byte] +AR_short: npt.NDArray[np.short] +AR_intc: npt.NDArray[np.intc] +AR_long: npt.NDArray[np.long] +AR_longlong: npt.NDArray[np.longlong] +AR_single: npt.NDArray[np.single] +AR_double: npt.NDArray[np.double] +AR_longdouble: npt.NDArray[np.longdouble] +AR_void: npt.NDArray[np.void] + +pointer: ct._Pointer[Any] + +assert_type(np.ctypeslib.c_intp(), ctypeslib.c_intp) + +assert_type(np.ctypeslib.ndpointer(), type[ctypeslib._ndptr[None]]) +assert_type(np.ctypeslib.ndpointer(dtype=np.float64), type[ctypeslib._ndptr[np.dtype[np.float64]]]) +assert_type(np.ctypeslib.ndpointer(dtype=float), type[ctypeslib._ndptr[np.dtype]]) +assert_type(np.ctypeslib.ndpointer(shape=(10, 3)), type[ctypeslib._ndptr[None]]) +assert_type(np.ctypeslib.ndpointer(np.int64, shape=(10, 3)), type[ctypeslib._concrete_ndptr[np.dtype[np.int64]]]) +assert_type(np.ctypeslib.ndpointer(int, shape=(1,)), type[np.ctypeslib._concrete_ndptr[np.dtype]]) + +assert_type(np.ctypeslib.as_ctypes_type(np.bool), type[ct.c_bool]) +assert_type(np.ctypeslib.as_ctypes_type(np.ubyte), type[ct.c_ubyte]) +assert_type(np.ctypeslib.as_ctypes_type(np.ushort), type[ct.c_ushort]) +assert_type(np.ctypeslib.as_ctypes_type(np.uintc), type[ct.c_uint]) +assert_type(np.ctypeslib.as_ctypes_type(np.byte), type[ct.c_byte]) +assert_type(np.ctypeslib.as_ctypes_type(np.short), type[ct.c_short]) +assert_type(np.ctypeslib.as_ctypes_type(np.intc), type[ct.c_int]) +assert_type(np.ctypeslib.as_ctypes_type(np.single), type[ct.c_float]) +assert_type(np.ctypeslib.as_ctypes_type(np.double), type[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes_type(ct.c_double), type[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes_type("q"), type[ct.c_longlong]) +assert_type(np.ctypeslib.as_ctypes_type([("i8", np.int64), ("f8", np.float64)]), type[Any]) +assert_type(np.ctypeslib.as_ctypes_type("i8"), type[Any]) +assert_type(np.ctypeslib.as_ctypes_type("f8"), type[Any]) + +assert_type(np.ctypeslib.as_ctypes(AR_bool.take(0)), ct.c_bool) +assert_type(np.ctypeslib.as_ctypes(AR_ubyte.take(0)), ct.c_ubyte) +assert_type(np.ctypeslib.as_ctypes(AR_ushort.take(0)), ct.c_ushort) +assert_type(np.ctypeslib.as_ctypes(AR_uintc.take(0)), ct.c_uint) + +assert_type(np.ctypeslib.as_ctypes(AR_byte.take(0)), ct.c_byte) +assert_type(np.ctypeslib.as_ctypes(AR_short.take(0)), ct.c_short) +assert_type(np.ctypeslib.as_ctypes(AR_intc.take(0)), ct.c_int) +assert_type(np.ctypeslib.as_ctypes(AR_single.take(0)), ct.c_float) +assert_type(np.ctypeslib.as_ctypes(AR_double.take(0)), ct.c_double) +assert_type(np.ctypeslib.as_ctypes(AR_void.take(0)), Any) +assert_type(np.ctypeslib.as_ctypes(AR_bool), ct.Array[ct.c_bool]) +assert_type(np.ctypeslib.as_ctypes(AR_ubyte), ct.Array[ct.c_ubyte]) +assert_type(np.ctypeslib.as_ctypes(AR_ushort), ct.Array[ct.c_ushort]) +assert_type(np.ctypeslib.as_ctypes(AR_uintc), ct.Array[ct.c_uint]) +assert_type(np.ctypeslib.as_ctypes(AR_byte), ct.Array[ct.c_byte]) +assert_type(np.ctypeslib.as_ctypes(AR_short), ct.Array[ct.c_short]) +assert_type(np.ctypeslib.as_ctypes(AR_intc), ct.Array[ct.c_int]) +assert_type(np.ctypeslib.as_ctypes(AR_single), ct.Array[ct.c_float]) +assert_type(np.ctypeslib.as_ctypes(AR_double), ct.Array[ct.c_double]) +assert_type(np.ctypeslib.as_ctypes(AR_void), ct.Array[Any]) + +assert_type(np.ctypeslib.as_array(AR_ubyte), npt.NDArray[np.ubyte]) +assert_type(np.ctypeslib.as_array(1), npt.NDArray[Any]) +assert_type(np.ctypeslib.as_array(pointer), npt.NDArray[Any]) + +assert_type(np.ctypeslib.as_ctypes_type(np.long), type[ct.c_long]) +assert_type(np.ctypeslib.as_ctypes_type(np.ulong), type[ct.c_ulong]) +assert_type(np.ctypeslib.as_ctypes(AR_ulong), ct.Array[ct.c_ulong]) +assert_type(np.ctypeslib.as_ctypes(AR_long), ct.Array[ct.c_long]) +assert_type(np.ctypeslib.as_ctypes(AR_long.take(0)), ct.c_long) +assert_type(np.ctypeslib.as_ctypes(AR_ulong.take(0)), ct.c_ulong) diff --git a/python/numpy/typing/tests/data/reveal/datasource.pyi b/python/numpy/typing/tests/data/reveal/datasource.pyi new file mode 100644 index 000000000..9f017911a --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/datasource.pyi @@ -0,0 +1,23 @@ +from pathlib import Path +from typing import IO, Any, assert_type + +import numpy as np + +path1: Path +path2: str + +d1 = np.lib.npyio.DataSource(path1) +d2 = np.lib.npyio.DataSource(path2) +d3 = np.lib.npyio.DataSource(None) + +assert_type(d1.abspath("..."), str) +assert_type(d2.abspath("..."), str) +assert_type(d3.abspath("..."), str) + +assert_type(d1.exists("..."), bool) +assert_type(d2.exists("..."), bool) +assert_type(d3.exists("..."), bool) + +assert_type(d1.open("...", "r"), IO[Any]) +assert_type(d2.open("...", encoding="utf8"), IO[Any]) +assert_type(d3.open("...", newline="/n"), IO[Any]) diff --git a/python/numpy/typing/tests/data/reveal/dtype.pyi b/python/numpy/typing/tests/data/reveal/dtype.pyi new file mode 100644 index 000000000..721d27087 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/dtype.pyi @@ -0,0 +1,136 @@ +import ctypes as ct +import datetime as dt +from decimal import Decimal +from fractions import Fraction +from typing import Any, Literal, LiteralString, TypeAlias, assert_type + +import numpy as np +from numpy.dtypes import StringDType + +# a combination of likely `object` dtype-like candidates (no `_co`) +_PyObjectLike: TypeAlias = Decimal | Fraction | dt.datetime | dt.timedelta + +dtype_U: np.dtype[np.str_] +dtype_V: np.dtype[np.void] +dtype_i8: np.dtype[np.int64] + +py_int_co: type[int] +py_float_co: type[float] +py_complex_co: type[complex] +py_object: type[_PyObjectLike] +py_character: type[str | bytes] +py_flexible: type[str | bytes | memoryview] + +ct_floating: type[ct.c_float | ct.c_double | ct.c_longdouble] +ct_number: type[ct.c_uint8 | ct.c_float] +ct_generic: type[ct.c_bool | ct.c_char] + +cs_integer: Literal["u1", "V", "S"] +cs_generic: Literal["H", "U", "h", "|M8[Y]", "?"] + +dt_inexact: np.dtype[np.inexact] +dt_string: StringDType + +assert_type(np.dtype(np.float64), np.dtype[np.float64]) +assert_type(np.dtype(np.float64, metadata={"test": "test"}), np.dtype[np.float64]) +assert_type(np.dtype(np.int64), np.dtype[np.int64]) + +# String aliases +assert_type(np.dtype("float64"), np.dtype[np.float64]) +assert_type(np.dtype("float32"), np.dtype[np.float32]) +assert_type(np.dtype("int64"), np.dtype[np.int64]) +assert_type(np.dtype("int32"), np.dtype[np.int32]) +assert_type(np.dtype("bool"), np.dtype[np.bool]) +assert_type(np.dtype("bytes"), np.dtype[np.bytes_]) +assert_type(np.dtype("str"), np.dtype[np.str_]) + +# Python types +assert_type(np.dtype(bool), np.dtype[np.bool]) +assert_type(np.dtype(py_int_co), np.dtype[np.int_ | np.bool]) +assert_type(np.dtype(int), np.dtype[np.int_ | np.bool]) +assert_type(np.dtype(py_float_co), np.dtype[np.float64 | np.int_ | np.bool]) +assert_type(np.dtype(float), np.dtype[np.float64 | np.int_ | np.bool]) +assert_type(np.dtype(py_complex_co), np.dtype[np.complex128 | np.float64 | np.int_ | np.bool]) +assert_type(np.dtype(complex), np.dtype[np.complex128 | np.float64 | np.int_ | np.bool]) +assert_type(np.dtype(py_object), np.dtype[np.object_]) +assert_type(np.dtype(str), np.dtype[np.str_]) +assert_type(np.dtype(bytes), np.dtype[np.bytes_]) +assert_type(np.dtype(py_character), np.dtype[np.character]) +assert_type(np.dtype(memoryview), np.dtype[np.void]) +assert_type(np.dtype(py_flexible), np.dtype[np.flexible]) + +assert_type(np.dtype(list), np.dtype[np.object_]) +assert_type(np.dtype(dt.datetime), np.dtype[np.object_]) +assert_type(np.dtype(dt.timedelta), np.dtype[np.object_]) +assert_type(np.dtype(Decimal), np.dtype[np.object_]) +assert_type(np.dtype(Fraction), np.dtype[np.object_]) + +# char-codes +assert_type(np.dtype("?"), np.dtype[np.bool]) +assert_type(np.dtype("|b1"), np.dtype[np.bool]) +assert_type(np.dtype("u1"), np.dtype[np.uint8]) +assert_type(np.dtype("l"), np.dtype[np.long]) +assert_type(np.dtype("longlong"), np.dtype[np.longlong]) +assert_type(np.dtype(">g"), np.dtype[np.longdouble]) +assert_type(np.dtype(cs_integer), np.dtype[np.integer]) +assert_type(np.dtype(cs_number), np.dtype[np.number]) +assert_type(np.dtype(cs_flex), np.dtype[np.flexible]) +assert_type(np.dtype(cs_generic), np.dtype[np.generic]) + +# ctypes +assert_type(np.dtype(ct.c_double), np.dtype[np.double]) +assert_type(np.dtype(ct.c_longlong), np.dtype[np.longlong]) +assert_type(np.dtype(ct.c_uint32), np.dtype[np.uint32]) +assert_type(np.dtype(ct.c_bool), np.dtype[np.bool]) +assert_type(np.dtype(ct.c_char), np.dtype[np.bytes_]) +assert_type(np.dtype(ct.py_object), np.dtype[np.object_]) + +# Special case for None +assert_type(np.dtype(None), np.dtype[np.float64]) + +# Dypes of dtypes +assert_type(np.dtype(np.dtype(np.float64)), np.dtype[np.float64]) +assert_type(np.dtype(dt_inexact), np.dtype[np.inexact]) + +# Parameterized dtypes +assert_type(np.dtype("S8"), np.dtype) + +# Void +assert_type(np.dtype(("U", 10)), np.dtype[np.void]) + +# StringDType +assert_type(np.dtype(dt_string), StringDType) +assert_type(np.dtype("T"), StringDType) +assert_type(np.dtype("=T"), StringDType) +assert_type(np.dtype("|T"), StringDType) + +# Methods and attributes +assert_type(dtype_U.base, np.dtype) +assert_type(dtype_U.subdtype, tuple[np.dtype, tuple[Any, ...]] | None) +assert_type(dtype_U.newbyteorder(), np.dtype[np.str_]) +assert_type(dtype_U.type, type[np.str_]) +assert_type(dtype_U.name, LiteralString) +assert_type(dtype_U.names, tuple[str, ...] | None) + +assert_type(dtype_U * 0, np.dtype[np.str_]) +assert_type(dtype_U * 1, np.dtype[np.str_]) +assert_type(dtype_U * 2, np.dtype[np.str_]) + +assert_type(dtype_i8 * 0, np.dtype[np.void]) +assert_type(dtype_i8 * 1, np.dtype[np.int64]) +assert_type(dtype_i8 * 2, np.dtype[np.void]) + +assert_type(0 * dtype_U, np.dtype[np.str_]) +assert_type(1 * dtype_U, np.dtype[np.str_]) +assert_type(2 * dtype_U, np.dtype[np.str_]) + +assert_type(0 * dtype_i8, np.dtype) +assert_type(1 * dtype_i8, np.dtype) +assert_type(2 * dtype_i8, np.dtype) + +assert_type(dtype_V["f0"], np.dtype) +assert_type(dtype_V[0], np.dtype) +assert_type(dtype_V[["f0", "f1"]], np.dtype[np.void]) +assert_type(dtype_V[["f0"]], np.dtype[np.void]) diff --git a/python/numpy/typing/tests/data/reveal/einsumfunc.pyi b/python/numpy/typing/tests/data/reveal/einsumfunc.pyi new file mode 100644 index 000000000..cc58f006e --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/einsumfunc.pyi @@ -0,0 +1,39 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +AR_LIKE_b: list[bool] +AR_LIKE_u: list[np.uint32] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_c: list[complex] +AR_LIKE_U: list[str] +AR_o: npt.NDArray[np.object_] + +OUT_f: npt.NDArray[np.float64] + +assert_type(np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_b), Any) +assert_type(np.einsum("i,i->i", AR_o, AR_o), Any) +assert_type(np.einsum("i,i->i", AR_LIKE_u, AR_LIKE_u), Any) +assert_type(np.einsum("i,i->i", AR_LIKE_i, AR_LIKE_i), Any) +assert_type(np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f), Any) +assert_type(np.einsum("i,i->i", AR_LIKE_c, AR_LIKE_c), Any) +assert_type(np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_i), Any) +assert_type(np.einsum("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c), Any) + +assert_type(np.einsum("i,i->i", AR_LIKE_c, AR_LIKE_c, out=OUT_f), npt.NDArray[np.float64]) +assert_type(np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe", out=OUT_f), npt.NDArray[np.float64]) +assert_type(np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f, dtype="c16"), Any) +assert_type(np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe"), Any) + +assert_type(np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_b), tuple[list[Any], str]) +assert_type(np.einsum_path("i,i->i", AR_LIKE_u, AR_LIKE_u), tuple[list[Any], str]) +assert_type(np.einsum_path("i,i->i", AR_LIKE_i, AR_LIKE_i), tuple[list[Any], str]) +assert_type(np.einsum_path("i,i->i", AR_LIKE_f, AR_LIKE_f), tuple[list[Any], str]) +assert_type(np.einsum_path("i,i->i", AR_LIKE_c, AR_LIKE_c), tuple[list[Any], str]) +assert_type(np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_i), tuple[list[Any], str]) +assert_type(np.einsum_path("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c), tuple[list[Any], str]) + +assert_type(np.einsum([[1, 1], [1, 1]], AR_LIKE_i, AR_LIKE_i), Any) +assert_type(np.einsum_path([[1, 1], [1, 1]], AR_LIKE_i, AR_LIKE_i), tuple[list[Any], str]) diff --git a/python/numpy/typing/tests/data/reveal/emath.pyi b/python/numpy/typing/tests/data/reveal/emath.pyi new file mode 100644 index 000000000..1d7bff893 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/emath.pyi @@ -0,0 +1,54 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +f8: np.float64 +c16: np.complex128 + +assert_type(np.emath.sqrt(f8), Any) +assert_type(np.emath.sqrt(AR_f8), npt.NDArray[Any]) +assert_type(np.emath.sqrt(c16), np.complexfloating) +assert_type(np.emath.sqrt(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.emath.log(f8), Any) +assert_type(np.emath.log(AR_f8), npt.NDArray[Any]) +assert_type(np.emath.log(c16), np.complexfloating) +assert_type(np.emath.log(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.emath.log10(f8), Any) +assert_type(np.emath.log10(AR_f8), npt.NDArray[Any]) +assert_type(np.emath.log10(c16), np.complexfloating) +assert_type(np.emath.log10(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.emath.log2(f8), Any) +assert_type(np.emath.log2(AR_f8), npt.NDArray[Any]) +assert_type(np.emath.log2(c16), np.complexfloating) +assert_type(np.emath.log2(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.emath.logn(f8, 2), Any) +assert_type(np.emath.logn(AR_f8, 4), npt.NDArray[Any]) +assert_type(np.emath.logn(f8, 1j), np.complexfloating) +assert_type(np.emath.logn(AR_c16, 1.5), npt.NDArray[np.complexfloating]) + +assert_type(np.emath.power(f8, 2), Any) +assert_type(np.emath.power(AR_f8, 4), npt.NDArray[Any]) +assert_type(np.emath.power(f8, 2j), np.complexfloating) +assert_type(np.emath.power(AR_c16, 1.5), npt.NDArray[np.complexfloating]) + +assert_type(np.emath.arccos(f8), Any) +assert_type(np.emath.arccos(AR_f8), npt.NDArray[Any]) +assert_type(np.emath.arccos(c16), np.complexfloating) +assert_type(np.emath.arccos(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.emath.arcsin(f8), Any) +assert_type(np.emath.arcsin(AR_f8), npt.NDArray[Any]) +assert_type(np.emath.arcsin(c16), np.complexfloating) +assert_type(np.emath.arcsin(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.emath.arctanh(f8), Any) +assert_type(np.emath.arctanh(AR_f8), npt.NDArray[Any]) +assert_type(np.emath.arctanh(c16), np.complexfloating) +assert_type(np.emath.arctanh(AR_c16), npt.NDArray[np.complexfloating]) diff --git a/python/numpy/typing/tests/data/reveal/fft.pyi b/python/numpy/typing/tests/data/reveal/fft.pyi new file mode 100644 index 000000000..dacd2b897 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/fft.pyi @@ -0,0 +1,37 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_LIKE_f8: list[float] + +assert_type(np.fft.fftshift(AR_f8), npt.NDArray[np.float64]) +assert_type(np.fft.fftshift(AR_LIKE_f8, axes=0), npt.NDArray[Any]) + +assert_type(np.fft.ifftshift(AR_f8), npt.NDArray[np.float64]) +assert_type(np.fft.ifftshift(AR_LIKE_f8, axes=0), npt.NDArray[Any]) + +assert_type(np.fft.fftfreq(5, AR_f8), npt.NDArray[np.floating]) +assert_type(np.fft.fftfreq(np.int64(), AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.fft.fftfreq(5, AR_f8), npt.NDArray[np.floating]) +assert_type(np.fft.fftfreq(np.int64(), AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.fft.fft(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.fft.ifft(AR_f8, axis=1), npt.NDArray[np.complex128]) +assert_type(np.fft.rfft(AR_f8, n=None), npt.NDArray[np.complex128]) +assert_type(np.fft.irfft(AR_f8, norm="ortho"), npt.NDArray[np.float64]) +assert_type(np.fft.hfft(AR_f8, n=2), npt.NDArray[np.float64]) +assert_type(np.fft.ihfft(AR_f8), npt.NDArray[np.complex128]) + +assert_type(np.fft.fftn(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.fft.ifftn(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.fft.rfftn(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.fft.irfftn(AR_f8), npt.NDArray[np.float64]) + +assert_type(np.fft.rfft2(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.fft.ifft2(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.fft.fft2(AR_f8), npt.NDArray[np.complex128]) +assert_type(np.fft.irfft2(AR_f8), npt.NDArray[np.float64]) diff --git a/python/numpy/typing/tests/data/reveal/flatiter.pyi b/python/numpy/typing/tests/data/reveal/flatiter.pyi new file mode 100644 index 000000000..e188d30fe --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/flatiter.pyi @@ -0,0 +1,47 @@ +from typing import Literal, TypeAlias, assert_type + +import numpy as np +import numpy.typing as npt + +a: np.flatiter[npt.NDArray[np.str_]] +a_1d: np.flatiter[np.ndarray[tuple[int], np.dtype[np.bytes_]]] + +Size: TypeAlias = Literal[42] +a_1d_fixed: np.flatiter[np.ndarray[tuple[Size], np.dtype[np.object_]]] + +assert_type(a.base, npt.NDArray[np.str_]) +assert_type(a.copy(), npt.NDArray[np.str_]) +assert_type(a.coords, tuple[int, ...]) +assert_type(a.index, int) +assert_type(iter(a), np.flatiter[npt.NDArray[np.str_]]) +assert_type(next(a), np.str_) +assert_type(a[0], np.str_) +assert_type(a[[0, 1, 2]], npt.NDArray[np.str_]) +assert_type(a[...], npt.NDArray[np.str_]) +assert_type(a[:], npt.NDArray[np.str_]) +assert_type(a[(...,)], npt.NDArray[np.str_]) +assert_type(a[(0,)], np.str_) + +assert_type(a.__array__(), npt.NDArray[np.str_]) +assert_type(a.__array__(np.dtype(np.float64)), npt.NDArray[np.float64]) +assert_type( + a_1d.__array__(), + np.ndarray[tuple[int], np.dtype[np.bytes_]], +) +assert_type( + a_1d.__array__(np.dtype(np.float64)), + np.ndarray[tuple[int], np.dtype[np.float64]], +) +assert_type( + a_1d_fixed.__array__(), + np.ndarray[tuple[Size], np.dtype[np.object_]], +) +assert_type( + a_1d_fixed.__array__(np.dtype(np.float64)), + np.ndarray[tuple[Size], np.dtype[np.float64]], +) + +a[0] = "a" +a[:5] = "a" +a[...] = "a" +a[(...,)] = "a" diff --git a/python/numpy/typing/tests/data/reveal/fromnumeric.pyi b/python/numpy/typing/tests/data/reveal/fromnumeric.pyi new file mode 100644 index 000000000..5438e001a --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -0,0 +1,347 @@ +"""Tests for :mod:`_core.fromnumeric`.""" + +from typing import Any, assert_type +from typing import Literal as L + +import numpy as np +import numpy.typing as npt + +class NDArraySubclass(npt.NDArray[np.complex128]): ... + +AR_b: npt.NDArray[np.bool] +AR_f4: npt.NDArray[np.float32] +AR_c16: npt.NDArray[np.complex128] +AR_u8: npt.NDArray[np.uint64] +AR_i8: npt.NDArray[np.int64] +AR_O: npt.NDArray[np.object_] +AR_subclass: NDArraySubclass +AR_m: npt.NDArray[np.timedelta64] +AR_0d: np.ndarray[tuple[()]] +AR_1d: np.ndarray[tuple[int]] +AR_nd: np.ndarray + +b: np.bool +f4: np.float32 +i8: np.int64 +f: float + +# integer‑dtype subclass for argmin/argmax +class NDArrayIntSubclass(npt.NDArray[np.intp]): ... +AR_sub_i: NDArrayIntSubclass + +assert_type(np.take(b, 0), np.bool) +assert_type(np.take(f4, 0), np.float32) +assert_type(np.take(f, 0), Any) +assert_type(np.take(AR_b, 0), np.bool) +assert_type(np.take(AR_f4, 0), np.float32) +assert_type(np.take(AR_b, [0]), npt.NDArray[np.bool]) +assert_type(np.take(AR_f4, [0]), npt.NDArray[np.float32]) +assert_type(np.take([1], [0]), npt.NDArray[Any]) +assert_type(np.take(AR_f4, [0], out=AR_subclass), NDArraySubclass) + +assert_type(np.reshape(b, 1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.reshape(f4, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.reshape(f, 1), np.ndarray[tuple[int], np.dtype]) +assert_type(np.reshape(AR_b, 1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.reshape(AR_f4, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) + +assert_type(np.choose(1, [True, True]), Any) +assert_type(np.choose([1], [True, True]), npt.NDArray[Any]) +assert_type(np.choose([1], AR_b), npt.NDArray[np.bool]) +assert_type(np.choose([1], AR_b, out=AR_f4), npt.NDArray[np.float32]) + +assert_type(np.repeat(b, 1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.repeat(b, 1, axis=0), npt.NDArray[np.bool]) +assert_type(np.repeat(f4, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.repeat(f, 1), np.ndarray[tuple[int], np.dtype[Any]]) +assert_type(np.repeat(AR_b, 1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.repeat(AR_f4, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.repeat(AR_f4, 1, axis=0), npt.NDArray[np.float32]) + +# TODO: array_bdd tests for np.put() + +assert_type(np.swapaxes([[0, 1]], 0, 0), npt.NDArray[Any]) +assert_type(np.swapaxes(AR_b, 0, 0), npt.NDArray[np.bool]) +assert_type(np.swapaxes(AR_f4, 0, 0), npt.NDArray[np.float32]) + +assert_type(np.transpose(b), npt.NDArray[np.bool]) +assert_type(np.transpose(f4), npt.NDArray[np.float32]) +assert_type(np.transpose(f), npt.NDArray[Any]) +assert_type(np.transpose(AR_b), npt.NDArray[np.bool]) +assert_type(np.transpose(AR_f4), npt.NDArray[np.float32]) + +assert_type(np.partition(b, 0, axis=None), npt.NDArray[np.bool]) +assert_type(np.partition(f4, 0, axis=None), npt.NDArray[np.float32]) +assert_type(np.partition(f, 0, axis=None), npt.NDArray[Any]) +assert_type(np.partition(AR_b, 0), npt.NDArray[np.bool]) +assert_type(np.partition(AR_f4, 0), npt.NDArray[np.float32]) + +assert_type(np.argpartition(b, 0), npt.NDArray[np.intp]) +assert_type(np.argpartition(f4, 0), npt.NDArray[np.intp]) +assert_type(np.argpartition(f, 0), npt.NDArray[np.intp]) +assert_type(np.argpartition(AR_b, 0), npt.NDArray[np.intp]) +assert_type(np.argpartition(AR_f4, 0), npt.NDArray[np.intp]) + +assert_type(np.sort([2, 1], 0), npt.NDArray[Any]) +assert_type(np.sort(AR_b, 0), npt.NDArray[np.bool]) +assert_type(np.sort(AR_f4, 0), npt.NDArray[np.float32]) + +assert_type(np.argsort(AR_b, 0), npt.NDArray[np.intp]) +assert_type(np.argsort(AR_f4, 0), npt.NDArray[np.intp]) + +assert_type(np.argmax(AR_b), np.intp) +assert_type(np.argmax(AR_f4), np.intp) +assert_type(np.argmax(AR_b, axis=0), Any) +assert_type(np.argmax(AR_f4, axis=0), Any) +assert_type(np.argmax(AR_f4, out=AR_sub_i), NDArrayIntSubclass) + +assert_type(np.argmin(AR_b), np.intp) +assert_type(np.argmin(AR_f4), np.intp) +assert_type(np.argmin(AR_b, axis=0), Any) +assert_type(np.argmin(AR_f4, axis=0), Any) +assert_type(np.argmin(AR_f4, out=AR_sub_i), NDArrayIntSubclass) + +assert_type(np.searchsorted(AR_b[0], 0), np.intp) +assert_type(np.searchsorted(AR_f4[0], 0), np.intp) +assert_type(np.searchsorted(AR_b[0], [0]), npt.NDArray[np.intp]) +assert_type(np.searchsorted(AR_f4[0], [0]), npt.NDArray[np.intp]) + +assert_type(np.resize(b, (5, 5)), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.resize(f4, (5, 5)), np.ndarray[tuple[int, int], np.dtype[np.float32]]) +assert_type(np.resize(f, (5, 5)), np.ndarray[tuple[int, int], np.dtype]) +assert_type(np.resize(AR_b, (5, 5)), np.ndarray[tuple[int, int], np.dtype[np.bool]]) +assert_type(np.resize(AR_f4, (5, 5)), np.ndarray[tuple[int, int], np.dtype[np.float32]]) + +assert_type(np.squeeze(b), np.bool) +assert_type(np.squeeze(f4), np.float32) +assert_type(np.squeeze(f), npt.NDArray[Any]) +assert_type(np.squeeze(AR_b), npt.NDArray[np.bool]) +assert_type(np.squeeze(AR_f4), npt.NDArray[np.float32]) + +assert_type(np.diagonal(AR_b), npt.NDArray[np.bool]) +assert_type(np.diagonal(AR_f4), npt.NDArray[np.float32]) + +assert_type(np.trace(AR_b), Any) +assert_type(np.trace(AR_f4), Any) +assert_type(np.trace(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.ravel(b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.ravel(f4), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.ravel(f), np.ndarray[tuple[int], np.dtype[np.float64 | np.int_ | np.bool]]) +assert_type(np.ravel(AR_b), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.ravel(AR_f4), np.ndarray[tuple[int], np.dtype[np.float32]]) + +assert_type(np.nonzero(AR_b), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.nonzero(AR_f4), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.nonzero(AR_1d), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.nonzero(AR_nd), tuple[npt.NDArray[np.intp], ...]) + +assert_type(np.shape(b), tuple[()]) +assert_type(np.shape(f), tuple[()]) +assert_type(np.shape([1]), tuple[int]) +assert_type(np.shape([[2]]), tuple[int, int]) +assert_type(np.shape([[[3]]]), tuple[Any, ...]) +assert_type(np.shape(AR_b), tuple[Any, ...]) +assert_type(np.shape(AR_nd), tuple[Any, ...]) +# these fail on mypy, but it works as expected with pyright/pylance +# assert_type(np.shape(AR_0d), tuple[()]) +# assert_type(np.shape(AR_1d), tuple[int]) +# assert_type(np.shape(AR_2d), tuple[int, int]) + +assert_type(np.compress([True], b), npt.NDArray[np.bool]) +assert_type(np.compress([True], f4), npt.NDArray[np.float32]) +assert_type(np.compress([True], f), npt.NDArray[Any]) +assert_type(np.compress([True], AR_b), npt.NDArray[np.bool]) +assert_type(np.compress([True], AR_f4), npt.NDArray[np.float32]) + +assert_type(np.clip(b, 0, 1.0), np.bool) +assert_type(np.clip(f4, -1, 1), np.float32) +assert_type(np.clip(f, 0, 1), Any) +assert_type(np.clip(AR_b, 0, 1), npt.NDArray[np.bool]) +assert_type(np.clip(AR_f4, 0, 1), npt.NDArray[np.float32]) +assert_type(np.clip([0], 0, 1), npt.NDArray[Any]) +assert_type(np.clip(AR_b, 0, 1, out=AR_subclass), NDArraySubclass) + +assert_type(np.sum(b), np.bool) +assert_type(np.sum(f4), np.float32) +assert_type(np.sum(f), Any) +assert_type(np.sum(AR_b), np.bool) +assert_type(np.sum(AR_f4), np.float32) +assert_type(np.sum(AR_b, axis=0), Any) +assert_type(np.sum(AR_f4, axis=0), Any) +assert_type(np.sum(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.sum(AR_f4, dtype=np.float64), np.float64) +assert_type(np.sum(AR_f4, None, np.float64), np.float64) +assert_type(np.sum(AR_f4, dtype=np.float64, keepdims=False), np.float64) +assert_type(np.sum(AR_f4, None, np.float64, keepdims=False), np.float64) +assert_type(np.sum(AR_f4, dtype=np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) +assert_type(np.sum(AR_f4, None, np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) + +assert_type(np.all(b), np.bool) +assert_type(np.all(f4), np.bool) +assert_type(np.all(f), np.bool) +assert_type(np.all(AR_b), np.bool) +assert_type(np.all(AR_f4), np.bool) +assert_type(np.all(AR_b, axis=0), Any) +assert_type(np.all(AR_f4, axis=0), Any) +assert_type(np.all(AR_b, keepdims=True), Any) +assert_type(np.all(AR_f4, keepdims=True), Any) +assert_type(np.all(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.any(b), np.bool) +assert_type(np.any(f4), np.bool) +assert_type(np.any(f), np.bool) +assert_type(np.any(AR_b), np.bool) +assert_type(np.any(AR_f4), np.bool) +assert_type(np.any(AR_b, axis=0), Any) +assert_type(np.any(AR_f4, axis=0), Any) +assert_type(np.any(AR_b, keepdims=True), Any) +assert_type(np.any(AR_f4, keepdims=True), Any) +assert_type(np.any(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.cumsum(b), npt.NDArray[np.bool]) +assert_type(np.cumsum(f4), npt.NDArray[np.float32]) +assert_type(np.cumsum(f), npt.NDArray[Any]) +assert_type(np.cumsum(AR_b), npt.NDArray[np.bool]) +assert_type(np.cumsum(AR_f4), npt.NDArray[np.float32]) +assert_type(np.cumsum(f, dtype=float), npt.NDArray[Any]) +assert_type(np.cumsum(f, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.cumsum(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.cumulative_sum(b), npt.NDArray[np.bool]) +assert_type(np.cumulative_sum(f4), npt.NDArray[np.float32]) +assert_type(np.cumulative_sum(f), npt.NDArray[Any]) +assert_type(np.cumulative_sum(AR_b), npt.NDArray[np.bool]) +assert_type(np.cumulative_sum(AR_f4), npt.NDArray[np.float32]) +assert_type(np.cumulative_sum(f, dtype=float), npt.NDArray[Any]) +assert_type(np.cumulative_sum(f, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.cumulative_sum(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.ptp(b), np.bool) +assert_type(np.ptp(f4), np.float32) +assert_type(np.ptp(f), Any) +assert_type(np.ptp(AR_b), np.bool) +assert_type(np.ptp(AR_f4), np.float32) +assert_type(np.ptp(AR_b, axis=0), Any) +assert_type(np.ptp(AR_f4, axis=0), Any) +assert_type(np.ptp(AR_b, keepdims=True), Any) +assert_type(np.ptp(AR_f4, keepdims=True), Any) +assert_type(np.ptp(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.amax(b), np.bool) +assert_type(np.amax(f4), np.float32) +assert_type(np.amax(f), Any) +assert_type(np.amax(AR_b), np.bool) +assert_type(np.amax(AR_f4), np.float32) +assert_type(np.amax(AR_b, axis=0), Any) +assert_type(np.amax(AR_f4, axis=0), Any) +assert_type(np.amax(AR_b, keepdims=True), Any) +assert_type(np.amax(AR_f4, keepdims=True), Any) +assert_type(np.amax(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.amin(b), np.bool) +assert_type(np.amin(f4), np.float32) +assert_type(np.amin(f), Any) +assert_type(np.amin(AR_b), np.bool) +assert_type(np.amin(AR_f4), np.float32) +assert_type(np.amin(AR_b, axis=0), Any) +assert_type(np.amin(AR_f4, axis=0), Any) +assert_type(np.amin(AR_b, keepdims=True), Any) +assert_type(np.amin(AR_f4, keepdims=True), Any) +assert_type(np.amin(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.prod(AR_b), np.int_) +assert_type(np.prod(AR_u8), np.uint64) +assert_type(np.prod(AR_i8), np.int64) +assert_type(np.prod(AR_f4), np.floating) +assert_type(np.prod(AR_c16), np.complexfloating) +assert_type(np.prod(AR_O), Any) +assert_type(np.prod(AR_f4, axis=0), Any) +assert_type(np.prod(AR_f4, keepdims=True), Any) +assert_type(np.prod(AR_f4, dtype=np.float64), np.float64) +assert_type(np.prod(AR_f4, dtype=float), Any) +assert_type(np.prod(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.cumprod(AR_b), npt.NDArray[np.int_]) +assert_type(np.cumprod(AR_u8), npt.NDArray[np.uint64]) +assert_type(np.cumprod(AR_i8), npt.NDArray[np.int64]) +assert_type(np.cumprod(AR_f4), npt.NDArray[np.floating]) +assert_type(np.cumprod(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.cumprod(AR_O), npt.NDArray[np.object_]) +assert_type(np.cumprod(AR_f4, axis=0), npt.NDArray[np.floating]) +assert_type(np.cumprod(AR_f4, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.cumprod(AR_f4, dtype=float), npt.NDArray[Any]) +assert_type(np.cumprod(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.cumulative_prod(AR_b), npt.NDArray[np.int_]) +assert_type(np.cumulative_prod(AR_u8), npt.NDArray[np.uint64]) +assert_type(np.cumulative_prod(AR_i8), npt.NDArray[np.int64]) +assert_type(np.cumulative_prod(AR_f4), npt.NDArray[np.floating]) +assert_type(np.cumulative_prod(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.cumulative_prod(AR_O), npt.NDArray[np.object_]) +assert_type(np.cumulative_prod(AR_f4, axis=0), npt.NDArray[np.floating]) +assert_type(np.cumulative_prod(AR_f4, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.cumulative_prod(AR_f4, dtype=float), npt.NDArray[Any]) +assert_type(np.cumulative_prod(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.ndim(b), int) +assert_type(np.ndim(f4), int) +assert_type(np.ndim(f), int) +assert_type(np.ndim(AR_b), int) +assert_type(np.ndim(AR_f4), int) + +assert_type(np.size(b), int) +assert_type(np.size(f4), int) +assert_type(np.size(f), int) +assert_type(np.size(AR_b), int) +assert_type(np.size(AR_f4), int) + +assert_type(np.around(b), np.float16) +assert_type(np.around(f), Any) +assert_type(np.around(i8), np.int64) +assert_type(np.around(f4), np.float32) +assert_type(np.around(AR_b), npt.NDArray[np.float16]) +assert_type(np.around(AR_i8), npt.NDArray[np.int64]) +assert_type(np.around(AR_f4), npt.NDArray[np.float32]) +assert_type(np.around([1.5]), npt.NDArray[Any]) +assert_type(np.around(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.mean(AR_b), np.floating) +assert_type(np.mean(AR_i8), np.floating) +assert_type(np.mean(AR_f4), np.floating) +assert_type(np.mean(AR_m), np.timedelta64) +assert_type(np.mean(AR_c16), np.complexfloating) +assert_type(np.mean(AR_O), Any) +assert_type(np.mean(AR_f4, axis=0), Any) +assert_type(np.mean(AR_f4, keepdims=True), Any) +assert_type(np.mean(AR_f4, dtype=float), Any) +assert_type(np.mean(AR_f4, dtype=np.float64), np.float64) +assert_type(np.mean(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.mean(AR_f4, dtype=np.float64), np.float64) +assert_type(np.mean(AR_f4, None, np.float64), np.float64) +assert_type(np.mean(AR_f4, dtype=np.float64, keepdims=False), np.float64) +assert_type(np.mean(AR_f4, None, np.float64, keepdims=False), np.float64) +assert_type(np.mean(AR_f4, dtype=np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) +assert_type(np.mean(AR_f4, None, np.float64, keepdims=True), np.float64 | npt.NDArray[np.float64]) + +assert_type(np.std(AR_b), np.floating) +assert_type(np.std(AR_i8), np.floating) +assert_type(np.std(AR_f4), np.floating) +assert_type(np.std(AR_c16), np.floating) +assert_type(np.std(AR_O), Any) +assert_type(np.std(AR_f4, axis=0), Any) +assert_type(np.std(AR_f4, keepdims=True), Any) +assert_type(np.std(AR_f4, dtype=float), Any) +assert_type(np.std(AR_f4, dtype=np.float64), np.float64) +assert_type(np.std(AR_f4, out=AR_subclass), NDArraySubclass) + +assert_type(np.var(AR_b), np.floating) +assert_type(np.var(AR_i8), np.floating) +assert_type(np.var(AR_f4), np.floating) +assert_type(np.var(AR_c16), np.floating) +assert_type(np.var(AR_O), Any) +assert_type(np.var(AR_f4, axis=0), Any) +assert_type(np.var(AR_f4, keepdims=True), Any) +assert_type(np.var(AR_f4, dtype=float), Any) +assert_type(np.var(AR_f4, dtype=np.float64), np.float64) +assert_type(np.var(AR_f4, out=AR_subclass), NDArraySubclass) diff --git a/python/numpy/typing/tests/data/reveal/getlimits.pyi b/python/numpy/typing/tests/data/reveal/getlimits.pyi new file mode 100644 index 000000000..825daba43 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/getlimits.pyi @@ -0,0 +1,51 @@ +from typing import Any, LiteralString, assert_type + +import numpy as np +from numpy._typing import _64Bit + +f: float +f8: np.float64 +c8: np.complex64 + +i: int +i8: np.int64 +u4: np.uint32 + +finfo_f8: np.finfo[np.float64] +iinfo_i8: np.iinfo[np.int64] + +assert_type(np.finfo(f), np.finfo[np.float64]) +assert_type(np.finfo(f8), np.finfo[np.floating[_64Bit]]) +assert_type(np.finfo(c8), np.finfo[np.float32]) +assert_type(np.finfo('f2'), np.finfo[np.floating]) + +assert_type(finfo_f8.dtype, np.dtype[np.float64]) +assert_type(finfo_f8.bits, int) +assert_type(finfo_f8.eps, np.float64) +assert_type(finfo_f8.epsneg, np.float64) +assert_type(finfo_f8.iexp, int) +assert_type(finfo_f8.machep, int) +assert_type(finfo_f8.max, np.float64) +assert_type(finfo_f8.maxexp, int) +assert_type(finfo_f8.min, np.float64) +assert_type(finfo_f8.minexp, int) +assert_type(finfo_f8.negep, int) +assert_type(finfo_f8.nexp, int) +assert_type(finfo_f8.nmant, int) +assert_type(finfo_f8.precision, int) +assert_type(finfo_f8.resolution, np.float64) +assert_type(finfo_f8.tiny, np.float64) +assert_type(finfo_f8.smallest_normal, np.float64) +assert_type(finfo_f8.smallest_subnormal, np.float64) + +assert_type(np.iinfo(i), np.iinfo[np.int_]) +assert_type(np.iinfo(i8), np.iinfo[np.int64]) +assert_type(np.iinfo(u4), np.iinfo[np.uint32]) +assert_type(np.iinfo('i2'), np.iinfo[Any]) + +assert_type(iinfo_i8.dtype, np.dtype[np.int64]) +assert_type(iinfo_i8.kind, LiteralString) +assert_type(iinfo_i8.bits, int) +assert_type(iinfo_i8.key, LiteralString) +assert_type(iinfo_i8.min, int) +assert_type(iinfo_i8.max, int) diff --git a/python/numpy/typing/tests/data/reveal/histograms.pyi b/python/numpy/typing/tests/data/reveal/histograms.pyi new file mode 100644 index 000000000..c1c63d59c --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/histograms.pyi @@ -0,0 +1,25 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] + +assert_type(np.histogram_bin_edges(AR_i8, bins="auto"), npt.NDArray[Any]) +assert_type(np.histogram_bin_edges(AR_i8, bins="rice", range=(0, 3)), npt.NDArray[Any]) +assert_type(np.histogram_bin_edges(AR_i8, bins="scott", weights=AR_f8), npt.NDArray[Any]) + +assert_type(np.histogram(AR_i8, bins="auto"), tuple[npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.histogram(AR_i8, bins="rice", range=(0, 3)), tuple[npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.histogram(AR_i8, bins="scott", weights=AR_f8), tuple[npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.histogram(AR_f8, bins=1, density=True), tuple[npt.NDArray[Any], npt.NDArray[Any]]) + +assert_type(np.histogramdd(AR_i8, bins=[1]), + tuple[npt.NDArray[Any], tuple[npt.NDArray[Any], ...]]) +assert_type(np.histogramdd(AR_i8, range=[(0, 3)]), + tuple[npt.NDArray[Any], tuple[npt.NDArray[Any], ...]]) +assert_type(np.histogramdd(AR_i8, weights=AR_f8), + tuple[npt.NDArray[Any], tuple[npt.NDArray[Any], ...]]) +assert_type(np.histogramdd(AR_f8, density=True), + tuple[npt.NDArray[Any], tuple[npt.NDArray[Any], ...]]) diff --git a/python/numpy/typing/tests/data/reveal/index_tricks.pyi b/python/numpy/typing/tests/data/reveal/index_tricks.pyi new file mode 100644 index 000000000..f6067c3be --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/index_tricks.pyi @@ -0,0 +1,70 @@ +from types import EllipsisType +from typing import Any, Literal, assert_type + +import numpy as np +import numpy.typing as npt + +AR_LIKE_b: list[bool] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_U: list[str] +AR_LIKE_O: list[object] + +AR_i8: npt.NDArray[np.int64] +AR_O: npt.NDArray[np.object_] + +assert_type(np.ndenumerate(AR_i8), np.ndenumerate[np.int64]) +assert_type(np.ndenumerate(AR_LIKE_f), np.ndenumerate[np.float64]) +assert_type(np.ndenumerate(AR_LIKE_U), np.ndenumerate[np.str_]) +assert_type(np.ndenumerate(AR_LIKE_O), np.ndenumerate[Any]) + +assert_type(next(np.ndenumerate(AR_i8)), tuple[tuple[Any, ...], np.int64]) +assert_type(next(np.ndenumerate(AR_LIKE_f)), tuple[tuple[Any, ...], np.float64]) +assert_type(next(np.ndenumerate(AR_LIKE_U)), tuple[tuple[Any, ...], np.str_]) +assert_type(next(np.ndenumerate(AR_LIKE_O)), tuple[tuple[Any, ...], Any]) + +assert_type(iter(np.ndenumerate(AR_i8)), np.ndenumerate[np.int64]) +assert_type(iter(np.ndenumerate(AR_LIKE_f)), np.ndenumerate[np.float64]) +assert_type(iter(np.ndenumerate(AR_LIKE_U)), np.ndenumerate[np.str_]) +assert_type(iter(np.ndenumerate(AR_LIKE_O)), np.ndenumerate[Any]) + +assert_type(np.ndindex(1, 2, 3), np.ndindex) +assert_type(np.ndindex((1, 2, 3)), np.ndindex) +assert_type(iter(np.ndindex(1, 2, 3)), np.ndindex) +assert_type(next(np.ndindex(1, 2, 3)), tuple[Any, ...]) + +assert_type(np.unravel_index([22, 41, 37], (7, 6)), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.unravel_index([31, 41, 13], (7, 6), order="F"), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.unravel_index(1621, (6, 7, 8, 9)), tuple[np.intp, ...]) + +assert_type(np.ravel_multi_index([[1]], (7, 6)), npt.NDArray[np.intp]) +assert_type(np.ravel_multi_index(AR_LIKE_i, (7, 6)), np.intp) +assert_type(np.ravel_multi_index(AR_LIKE_i, (7, 6), order="F"), np.intp) +assert_type(np.ravel_multi_index(AR_LIKE_i, (4, 6), mode="clip"), np.intp) +assert_type(np.ravel_multi_index(AR_LIKE_i, (4, 4), mode=("clip", "wrap")), np.intp) +assert_type(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)), np.intp) + +assert_type(np.mgrid[1:1:2], npt.NDArray[Any]) +assert_type(np.mgrid[1:1:2, None:10], npt.NDArray[Any]) + +assert_type(np.ogrid[1:1:2], tuple[npt.NDArray[Any], ...]) +assert_type(np.ogrid[1:1:2, None:10], tuple[npt.NDArray[Any], ...]) + +assert_type(np.index_exp[0:1], tuple[slice[int, int, None]]) +assert_type(np.index_exp[0:1, None:3], tuple[slice[int, int, None], slice[None, int, None]]) +assert_type(np.index_exp[0, 0:1, ..., [0, 1, 3]], tuple[Literal[0], slice[int, int, None], EllipsisType, list[int]]) + +assert_type(np.s_[0:1], slice[int, int, None]) +assert_type(np.s_[0:1, None:3], tuple[slice[int, int, None], slice[None, int, None]]) +assert_type(np.s_[0, 0:1, ..., [0, 1, 3]], tuple[Literal[0], slice[int, int, None], EllipsisType, list[int]]) + +assert_type(np.ix_(AR_LIKE_b), tuple[npt.NDArray[np.bool], ...]) +assert_type(np.ix_(AR_LIKE_i, AR_LIKE_f), tuple[npt.NDArray[np.float64], ...]) +assert_type(np.ix_(AR_i8), tuple[npt.NDArray[np.int64], ...]) + +assert_type(np.fill_diagonal(AR_i8, 5), None) + +assert_type(np.diag_indices(4), tuple[npt.NDArray[np.int_], ...]) +assert_type(np.diag_indices(2, 3), tuple[npt.NDArray[np.int_], ...]) + +assert_type(np.diag_indices_from(AR_i8), tuple[npt.NDArray[np.int_], ...]) diff --git a/python/numpy/typing/tests/data/reveal/lib_function_base.pyi b/python/numpy/typing/tests/data/reveal/lib_function_base.pyi new file mode 100644 index 000000000..3ce8d3752 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/lib_function_base.pyi @@ -0,0 +1,213 @@ +from collections.abc import Callable +from fractions import Fraction +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +vectorized_func: np.vectorize + +f8: np.float64 +AR_LIKE_f8: list[float] +AR_LIKE_c16: list[complex] +AR_LIKE_O: list[Fraction] + +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_m: npt.NDArray[np.timedelta64] +AR_M: npt.NDArray[np.datetime64] +AR_O: npt.NDArray[np.object_] +AR_b: npt.NDArray[np.bool] +AR_U: npt.NDArray[np.str_] +CHAR_AR_U: np.char.chararray[tuple[Any, ...], np.dtype[np.str_]] + +AR_b_list: list[npt.NDArray[np.bool]] + +def func( + a: npt.NDArray[Any], + posarg: bool = ..., + /, + arg: int = ..., + *, + kwarg: str = ..., +) -> npt.NDArray[Any]: ... + +assert_type(vectorized_func.pyfunc, Callable[..., Any]) +assert_type(vectorized_func.cache, bool) +assert_type(vectorized_func.signature, str | None) +assert_type(vectorized_func.otypes, str | None) +assert_type(vectorized_func.excluded, set[int | str]) +assert_type(vectorized_func.__doc__, str | None) +assert_type(vectorized_func([1]), Any) +assert_type(np.vectorize(int), np.vectorize) +assert_type( + np.vectorize(int, otypes="i", doc="doc", excluded=(), cache=True, signature=None), + np.vectorize, +) + +assert_type(np.rot90(AR_f8, k=2), npt.NDArray[np.float64]) +assert_type(np.rot90(AR_LIKE_f8, axes=(0, 1)), npt.NDArray[Any]) + +assert_type(np.flip(f8), np.float64) +assert_type(np.flip(1.0), Any) +assert_type(np.flip(AR_f8, axis=(0, 1)), npt.NDArray[np.float64]) +assert_type(np.flip(AR_LIKE_f8, axis=0), npt.NDArray[Any]) + +assert_type(np.iterable(1), bool) +assert_type(np.iterable([1]), bool) + +assert_type(np.average(AR_f8), np.floating) +assert_type(np.average(AR_f8, weights=AR_c16), np.complexfloating) +assert_type(np.average(AR_O), Any) +assert_type(np.average(AR_f8, returned=True), tuple[np.floating, np.floating]) +assert_type(np.average(AR_f8, weights=AR_c16, returned=True), tuple[np.complexfloating, np.complexfloating]) +assert_type(np.average(AR_O, returned=True), tuple[Any, Any]) +assert_type(np.average(AR_f8, axis=0), Any) +assert_type(np.average(AR_f8, axis=0, returned=True), tuple[Any, Any]) + +assert_type(np.asarray_chkfinite(AR_f8), npt.NDArray[np.float64]) +assert_type(np.asarray_chkfinite(AR_LIKE_f8), npt.NDArray[Any]) +assert_type(np.asarray_chkfinite(AR_f8, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.asarray_chkfinite(AR_f8, dtype=float), npt.NDArray[Any]) + +assert_type(np.piecewise(AR_f8, AR_b, [func]), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func]), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, -1, kwarg=''), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_f8, AR_b_list, [func], True, arg=-1, kwarg=''), npt.NDArray[np.float64]) +assert_type(np.piecewise(AR_LIKE_f8, AR_b_list, [func]), npt.NDArray[Any]) + +assert_type(np.select([AR_f8], [AR_f8]), npt.NDArray[Any]) + +assert_type(np.copy(AR_LIKE_f8), npt.NDArray[Any]) +assert_type(np.copy(AR_U), npt.NDArray[np.str_]) +assert_type(np.copy(CHAR_AR_U), np.ndarray[Any, Any]) # pyright correctly infers `NDArray[str_]` +assert_type(np.copy(CHAR_AR_U, "K", subok=True), np.char.chararray[tuple[Any, ...], np.dtype[np.str_]]) +assert_type(np.copy(CHAR_AR_U, subok=True), np.char.chararray[tuple[Any, ...], np.dtype[np.str_]]) + +assert_type(np.gradient(AR_f8, axis=None), Any) +assert_type(np.gradient(AR_LIKE_f8, edge_order=2), Any) + +assert_type(np.diff("bob", n=0), str) +assert_type(np.diff(AR_f8, axis=0), npt.NDArray[Any]) +assert_type(np.diff(AR_LIKE_f8, prepend=1.5), npt.NDArray[Any]) + +assert_type(np.interp(1, [1], AR_f8), np.float64) +assert_type(np.interp(1, [1], [1]), np.float64) +assert_type(np.interp(1, [1], AR_c16), np.complex128) +assert_type(np.interp(1, [1], [1j]), np.complex128) # pyright correctly infers `complex128 | float64` +assert_type(np.interp([1], [1], AR_f8), npt.NDArray[np.float64]) +assert_type(np.interp([1], [1], [1]), npt.NDArray[np.float64]) +assert_type(np.interp([1], [1], AR_c16), npt.NDArray[np.complex128]) +assert_type(np.interp([1], [1], [1j]), npt.NDArray[np.complex128]) # pyright correctly infers `NDArray[complex128 | float64]` + +assert_type(np.angle(f8), np.floating) +assert_type(np.angle(AR_f8), npt.NDArray[np.floating]) +assert_type(np.angle(AR_c16, deg=True), npt.NDArray[np.floating]) +assert_type(np.angle(AR_O), npt.NDArray[np.object_]) + +assert_type(np.unwrap(AR_f8), npt.NDArray[np.floating]) +assert_type(np.unwrap(AR_O), npt.NDArray[np.object_]) + +assert_type(np.sort_complex(AR_f8), npt.NDArray[np.complexfloating]) + +assert_type(np.trim_zeros(AR_f8), npt.NDArray[np.float64]) +assert_type(np.trim_zeros(AR_LIKE_f8), list[float]) + +assert_type(np.extract(AR_i8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.extract(AR_i8, AR_LIKE_f8), npt.NDArray[Any]) + +assert_type(np.place(AR_f8, mask=AR_i8, vals=5.0), None) + +assert_type(np.cov(AR_f8, bias=True), npt.NDArray[np.floating]) +assert_type(np.cov(AR_f8, AR_c16, ddof=1), npt.NDArray[np.complexfloating]) +assert_type(np.cov(AR_f8, aweights=AR_f8, dtype=np.float32), npt.NDArray[np.float32]) +assert_type(np.cov(AR_f8, fweights=AR_f8, dtype=float), npt.NDArray[Any]) + +assert_type(np.corrcoef(AR_f8, rowvar=True), npt.NDArray[np.floating]) +assert_type(np.corrcoef(AR_f8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.corrcoef(AR_f8, dtype=np.float32), npt.NDArray[np.float32]) +assert_type(np.corrcoef(AR_f8, dtype=float), npt.NDArray[Any]) + +assert_type(np.blackman(5), npt.NDArray[np.floating]) +assert_type(np.bartlett(6), npt.NDArray[np.floating]) +assert_type(np.hanning(4.5), npt.NDArray[np.floating]) +assert_type(np.hamming(0), npt.NDArray[np.floating]) +assert_type(np.i0(AR_i8), npt.NDArray[np.floating]) +assert_type(np.kaiser(4, 5.9), npt.NDArray[np.floating]) + +assert_type(np.sinc(1.0), np.floating) +assert_type(np.sinc(1j), np.complexfloating) +assert_type(np.sinc(AR_f8), npt.NDArray[np.floating]) +assert_type(np.sinc(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.median(AR_f8, keepdims=False), np.floating) +assert_type(np.median(AR_c16, overwrite_input=True), np.complexfloating) +assert_type(np.median(AR_m), np.timedelta64) +assert_type(np.median(AR_O), Any) +assert_type(np.median(AR_f8, keepdims=True), Any) +assert_type(np.median(AR_c16, axis=0), Any) +assert_type(np.median(AR_LIKE_f8, out=AR_c16), npt.NDArray[np.complex128]) + +assert_type(np.percentile(AR_f8, 50), np.floating) +assert_type(np.percentile(AR_c16, 50), np.complexfloating) +assert_type(np.percentile(AR_m, 50), np.timedelta64) +assert_type(np.percentile(AR_M, 50, overwrite_input=True), np.datetime64) +assert_type(np.percentile(AR_O, 50), Any) +assert_type(np.percentile(AR_f8, [50]), npt.NDArray[np.floating]) +assert_type(np.percentile(AR_c16, [50]), npt.NDArray[np.complexfloating]) +assert_type(np.percentile(AR_m, [50]), npt.NDArray[np.timedelta64]) +assert_type(np.percentile(AR_M, [50], method="nearest"), npt.NDArray[np.datetime64]) +assert_type(np.percentile(AR_O, [50]), npt.NDArray[np.object_]) +assert_type(np.percentile(AR_f8, [50], keepdims=True), Any) +assert_type(np.percentile(AR_f8, [50], axis=[1]), Any) +assert_type(np.percentile(AR_f8, [50], out=AR_c16), npt.NDArray[np.complex128]) + +assert_type(np.quantile(AR_f8, 0.5), np.floating) +assert_type(np.quantile(AR_c16, 0.5), np.complexfloating) +assert_type(np.quantile(AR_m, 0.5), np.timedelta64) +assert_type(np.quantile(AR_M, 0.5, overwrite_input=True), np.datetime64) +assert_type(np.quantile(AR_O, 0.5), Any) +assert_type(np.quantile(AR_f8, [0.5]), npt.NDArray[np.floating]) +assert_type(np.quantile(AR_c16, [0.5]), npt.NDArray[np.complexfloating]) +assert_type(np.quantile(AR_m, [0.5]), npt.NDArray[np.timedelta64]) +assert_type(np.quantile(AR_M, [0.5], method="nearest"), npt.NDArray[np.datetime64]) +assert_type(np.quantile(AR_O, [0.5]), npt.NDArray[np.object_]) +assert_type(np.quantile(AR_f8, [0.5], keepdims=True), Any) +assert_type(np.quantile(AR_f8, [0.5], axis=[1]), Any) +assert_type(np.quantile(AR_f8, [0.5], out=AR_c16), npt.NDArray[np.complex128]) + +assert_type(np.trapezoid(AR_LIKE_f8), np.float64) +assert_type(np.trapezoid(AR_LIKE_f8, AR_LIKE_f8), np.float64) +assert_type(np.trapezoid(AR_LIKE_c16), np.complex128) +assert_type(np.trapezoid(AR_LIKE_c16, AR_LIKE_f8), np.complex128) +assert_type(np.trapezoid(AR_LIKE_f8, AR_LIKE_c16), np.complex128) +assert_type(np.trapezoid(AR_LIKE_O), float) +assert_type(np.trapezoid(AR_LIKE_O, AR_LIKE_f8), float) +assert_type(np.trapezoid(AR_f8), np.float64 | npt.NDArray[np.float64]) +assert_type(np.trapezoid(AR_f8, AR_f8), np.float64 | npt.NDArray[np.float64]) +assert_type(np.trapezoid(AR_c16), np.complex128 | npt.NDArray[np.complex128]) +assert_type(np.trapezoid(AR_c16, AR_c16), np.complex128 | npt.NDArray[np.complex128]) +assert_type(np.trapezoid(AR_m), np.timedelta64 | npt.NDArray[np.timedelta64]) +assert_type(np.trapezoid(AR_O), float | npt.NDArray[np.object_]) +assert_type(np.trapezoid(AR_O, AR_LIKE_f8), float | npt.NDArray[np.object_]) + +assert_type(np.meshgrid(), tuple[()]) +assert_type(np.meshgrid(AR_c16, indexing="ij"), tuple[npt.NDArray[np.complex128]]) +assert_type(np.meshgrid(AR_i8, AR_f8, copy=False), tuple[npt.NDArray[np.int64], npt.NDArray[np.float64]]) +assert_type(np.meshgrid(AR_LIKE_f8, AR_f8), tuple[npt.NDArray[Any], npt.NDArray[np.float64]]) +assert_type(np.meshgrid(AR_LIKE_f8, AR_i8, AR_c16), tuple[npt.NDArray[Any], npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.meshgrid(AR_f8, AR_f8, AR_f8, AR_f8), tuple[npt.NDArray[Any], npt.NDArray[Any], npt.NDArray[Any], npt.NDArray[Any]]) +assert_type(np.meshgrid(*AR_LIKE_f8), tuple[npt.NDArray[Any], ...]) + +assert_type(np.delete(AR_f8, np.s_[:5]), npt.NDArray[np.float64]) +assert_type(np.delete(AR_LIKE_f8, [0, 4, 9], axis=0), npt.NDArray[Any]) + +assert_type(np.insert(AR_f8, np.s_[:5], 5), npt.NDArray[np.float64]) +assert_type(np.insert(AR_LIKE_f8, [0, 4, 9], [0.5, 9.2, 7], axis=0), npt.NDArray[Any]) + +assert_type(np.append(AR_f8, 5), npt.NDArray[Any]) +assert_type(np.append(AR_LIKE_f8, 1j, axis=0), npt.NDArray[Any]) + +assert_type(np.digitize(4.5, [1]), np.intp) +assert_type(np.digitize(AR_f8, [1, 2, 3]), npt.NDArray[np.intp]) diff --git a/python/numpy/typing/tests/data/reveal/lib_polynomial.pyi b/python/numpy/typing/tests/data/reveal/lib_polynomial.pyi new file mode 100644 index 000000000..8b0a9f3d2 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/lib_polynomial.pyi @@ -0,0 +1,144 @@ +from collections.abc import Iterator +from typing import Any, NoReturn, assert_type + +import numpy as np +import numpy.typing as npt + +AR_b: npt.NDArray[np.bool] +AR_u4: npt.NDArray[np.uint32] +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_O: npt.NDArray[np.object_] + +poly_obj: np.poly1d + +assert_type(poly_obj.variable, str) +assert_type(poly_obj.order, int) +assert_type(poly_obj.o, int) +assert_type(poly_obj.roots, npt.NDArray[Any]) +assert_type(poly_obj.r, npt.NDArray[Any]) +assert_type(poly_obj.coeffs, npt.NDArray[Any]) +assert_type(poly_obj.c, npt.NDArray[Any]) +assert_type(poly_obj.coef, npt.NDArray[Any]) +assert_type(poly_obj.coefficients, npt.NDArray[Any]) +assert_type(poly_obj.__hash__, None) + +assert_type(poly_obj(1), Any) +assert_type(poly_obj([1]), npt.NDArray[Any]) +assert_type(poly_obj(poly_obj), np.poly1d) + +assert_type(len(poly_obj), int) +assert_type(-poly_obj, np.poly1d) +assert_type(+poly_obj, np.poly1d) + +assert_type(poly_obj * 5, np.poly1d) +assert_type(5 * poly_obj, np.poly1d) +assert_type(poly_obj + 5, np.poly1d) +assert_type(5 + poly_obj, np.poly1d) +assert_type(poly_obj - 5, np.poly1d) +assert_type(5 - poly_obj, np.poly1d) +assert_type(poly_obj**1, np.poly1d) +assert_type(poly_obj**1.0, np.poly1d) +assert_type(poly_obj / 5, np.poly1d) +assert_type(5 / poly_obj, np.poly1d) + +assert_type(poly_obj[0], Any) +poly_obj[0] = 5 +assert_type(iter(poly_obj), Iterator[Any]) +assert_type(poly_obj.deriv(), np.poly1d) +assert_type(poly_obj.integ(), np.poly1d) + +assert_type(np.poly(poly_obj), npt.NDArray[np.floating]) +assert_type(np.poly(AR_f8), npt.NDArray[np.floating]) +assert_type(np.poly(AR_c16), npt.NDArray[np.floating]) + +assert_type(np.polyint(poly_obj), np.poly1d) +assert_type(np.polyint(AR_f8), npt.NDArray[np.floating]) +assert_type(np.polyint(AR_f8, k=AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.polyint(AR_O, m=2), npt.NDArray[np.object_]) + +assert_type(np.polyder(poly_obj), np.poly1d) +assert_type(np.polyder(AR_f8), npt.NDArray[np.floating]) +assert_type(np.polyder(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.polyder(AR_O, m=2), npt.NDArray[np.object_]) + +assert_type(np.polyfit(AR_f8, AR_f8, 2), npt.NDArray[np.float64]) +assert_type( + np.polyfit(AR_f8, AR_i8, 1, full=True), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.float64], + npt.NDArray[np.int32], + npt.NDArray[np.float64], + npt.NDArray[np.float64], + ], +) +assert_type( + np.polyfit(AR_u4, AR_f8, 1.0, cov="unscaled"), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.float64], + ], +) +assert_type(np.polyfit(AR_c16, AR_f8, 2), npt.NDArray[np.complex128]) +assert_type( + np.polyfit(AR_f8, AR_c16, 1, full=True), + tuple[ + npt.NDArray[np.complex128], + npt.NDArray[np.float64], + npt.NDArray[np.int32], + npt.NDArray[np.float64], + npt.NDArray[np.float64], + ], +) +assert_type( + np.polyfit(AR_u4, AR_c16, 1.0, cov=True), + tuple[ + npt.NDArray[np.complex128], + npt.NDArray[np.complex128], + ], +) + +assert_type(np.polyval(AR_b, AR_b), npt.NDArray[np.int64]) +assert_type(np.polyval(AR_u4, AR_b), npt.NDArray[np.unsignedinteger]) +assert_type(np.polyval(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.polyval(AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(np.polyval(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.polyval(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.polyadd(poly_obj, AR_i8), np.poly1d) +assert_type(np.polyadd(AR_f8, poly_obj), np.poly1d) +assert_type(np.polyadd(AR_b, AR_b), npt.NDArray[np.bool]) +assert_type(np.polyadd(AR_u4, AR_b), npt.NDArray[np.unsignedinteger]) +assert_type(np.polyadd(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.polyadd(AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(np.polyadd(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.polyadd(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.polysub(poly_obj, AR_i8), np.poly1d) +assert_type(np.polysub(AR_f8, poly_obj), np.poly1d) +assert_type(np.polysub(AR_b, AR_b), NoReturn) +assert_type(np.polysub(AR_u4, AR_b), npt.NDArray[np.unsignedinteger]) +assert_type(np.polysub(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.polysub(AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(np.polysub(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.polysub(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.polymul(poly_obj, AR_i8), np.poly1d) +assert_type(np.polymul(AR_f8, poly_obj), np.poly1d) +assert_type(np.polymul(AR_b, AR_b), npt.NDArray[np.bool]) +assert_type(np.polymul(AR_u4, AR_b), npt.NDArray[np.unsignedinteger]) +assert_type(np.polymul(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.polymul(AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(np.polymul(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.polymul(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.polydiv(poly_obj, AR_i8), tuple[np.poly1d, np.poly1d]) +assert_type(np.polydiv(AR_f8, poly_obj), tuple[np.poly1d, np.poly1d]) +assert_type(np.polydiv(AR_b, AR_b), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) +assert_type(np.polydiv(AR_u4, AR_b), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) +assert_type(np.polydiv(AR_i8, AR_i8), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) +assert_type(np.polydiv(AR_f8, AR_i8), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating]]) +assert_type(np.polydiv(AR_i8, AR_c16), tuple[npt.NDArray[np.complexfloating], npt.NDArray[np.complexfloating]]) +assert_type(np.polydiv(AR_O, AR_O), tuple[npt.NDArray[Any], npt.NDArray[Any]]) diff --git a/python/numpy/typing/tests/data/reveal/lib_utils.pyi b/python/numpy/typing/tests/data/reveal/lib_utils.pyi new file mode 100644 index 000000000..c9470e00a --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/lib_utils.pyi @@ -0,0 +1,17 @@ +from io import StringIO +from typing import assert_type + +import numpy as np +import numpy.lib.array_utils as array_utils +import numpy.typing as npt + +AR: npt.NDArray[np.float64] +AR_DICT: dict[str, npt.NDArray[np.float64]] +FILE: StringIO + +def func(a: int) -> bool: ... + +assert_type(array_utils.byte_bounds(AR), tuple[int, int]) +assert_type(array_utils.byte_bounds(np.float64()), tuple[int, int]) + +assert_type(np.info(1, output=FILE), None) diff --git a/python/numpy/typing/tests/data/reveal/lib_version.pyi b/python/numpy/typing/tests/data/reveal/lib_version.pyi new file mode 100644 index 000000000..03735375a --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/lib_version.pyi @@ -0,0 +1,20 @@ +from typing import assert_type + +from numpy.lib import NumpyVersion + +version = NumpyVersion("1.8.0") + +assert_type(version.vstring, str) +assert_type(version.version, str) +assert_type(version.major, int) +assert_type(version.minor, int) +assert_type(version.bugfix, int) +assert_type(version.pre_release, str) +assert_type(version.is_devversion, bool) + +assert_type(version == version, bool) +assert_type(version != version, bool) +assert_type(version < "1.8.0", bool) +assert_type(version <= version, bool) +assert_type(version > version, bool) +assert_type(version >= "1.8.0", bool) diff --git a/python/numpy/typing/tests/data/reveal/linalg.pyi b/python/numpy/typing/tests/data/reveal/linalg.pyi new file mode 100644 index 000000000..fbaac3cfa --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/linalg.pyi @@ -0,0 +1,132 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt +from numpy.linalg._linalg import ( + EighResult, + EigResult, + QRResult, + SlogdetResult, + SVDResult, +) + +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_O: npt.NDArray[np.object_] +AR_m: npt.NDArray[np.timedelta64] +AR_S: npt.NDArray[np.str_] +AR_b: npt.NDArray[np.bool] + +assert_type(np.linalg.tensorsolve(AR_i8, AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.tensorsolve(AR_i8, AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.tensorsolve(AR_c16, AR_f8), npt.NDArray[np.complexfloating]) + +assert_type(np.linalg.solve(AR_i8, AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.solve(AR_i8, AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.solve(AR_c16, AR_f8), npt.NDArray[np.complexfloating]) + +assert_type(np.linalg.tensorinv(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.tensorinv(AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.tensorinv(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.linalg.inv(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.inv(AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.inv(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.linalg.matrix_power(AR_i8, -1), npt.NDArray[Any]) +assert_type(np.linalg.matrix_power(AR_f8, 0), npt.NDArray[Any]) +assert_type(np.linalg.matrix_power(AR_c16, 1), npt.NDArray[Any]) +assert_type(np.linalg.matrix_power(AR_O, 2), npt.NDArray[Any]) + +assert_type(np.linalg.cholesky(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.cholesky(AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.cholesky(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.linalg.outer(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.linalg.outer(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.outer(AR_c16, AR_c16), npt.NDArray[np.complex128]) +assert_type(np.linalg.outer(AR_b, AR_b), npt.NDArray[np.bool]) +assert_type(np.linalg.outer(AR_O, AR_O), npt.NDArray[np.object_]) +assert_type(np.linalg.outer(AR_i8, AR_m), npt.NDArray[np.timedelta64]) + +assert_type(np.linalg.qr(AR_i8), QRResult) +assert_type(np.linalg.qr(AR_f8), QRResult) +assert_type(np.linalg.qr(AR_c16), QRResult) + +assert_type(np.linalg.eigvals(AR_i8), npt.NDArray[np.float64] | npt.NDArray[np.complex128]) +assert_type(np.linalg.eigvals(AR_f8), npt.NDArray[np.floating] | npt.NDArray[np.complexfloating]) +assert_type(np.linalg.eigvals(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.linalg.eigvalsh(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.eigvalsh(AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.eigvalsh(AR_c16), npt.NDArray[np.floating]) + +assert_type(np.linalg.eig(AR_i8), EigResult) +assert_type(np.linalg.eig(AR_f8), EigResult) +assert_type(np.linalg.eig(AR_c16), EigResult) + +assert_type(np.linalg.eigh(AR_i8), EighResult) +assert_type(np.linalg.eigh(AR_f8), EighResult) +assert_type(np.linalg.eigh(AR_c16), EighResult) + +assert_type(np.linalg.svd(AR_i8), SVDResult) +assert_type(np.linalg.svd(AR_f8), SVDResult) +assert_type(np.linalg.svd(AR_c16), SVDResult) +assert_type(np.linalg.svd(AR_i8, compute_uv=False), npt.NDArray[np.float64]) +assert_type(np.linalg.svd(AR_f8, compute_uv=False), npt.NDArray[np.floating]) +assert_type(np.linalg.svd(AR_c16, compute_uv=False), npt.NDArray[np.floating]) + +assert_type(np.linalg.cond(AR_i8), Any) +assert_type(np.linalg.cond(AR_f8), Any) +assert_type(np.linalg.cond(AR_c16), Any) + +assert_type(np.linalg.matrix_rank(AR_i8), Any) +assert_type(np.linalg.matrix_rank(AR_f8), Any) +assert_type(np.linalg.matrix_rank(AR_c16), Any) + +assert_type(np.linalg.pinv(AR_i8), npt.NDArray[np.float64]) +assert_type(np.linalg.pinv(AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.pinv(AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.linalg.slogdet(AR_i8), SlogdetResult) +assert_type(np.linalg.slogdet(AR_f8), SlogdetResult) +assert_type(np.linalg.slogdet(AR_c16), SlogdetResult) + +assert_type(np.linalg.det(AR_i8), Any) +assert_type(np.linalg.det(AR_f8), Any) +assert_type(np.linalg.det(AR_c16), Any) + +assert_type(np.linalg.lstsq(AR_i8, AR_i8), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64], np.int32, npt.NDArray[np.float64]]) +assert_type(np.linalg.lstsq(AR_i8, AR_f8), tuple[npt.NDArray[np.floating], npt.NDArray[np.floating], np.int32, npt.NDArray[np.floating]]) +assert_type(np.linalg.lstsq(AR_f8, AR_c16), tuple[npt.NDArray[np.complexfloating], npt.NDArray[np.floating], np.int32, npt.NDArray[np.floating]]) + +assert_type(np.linalg.norm(AR_i8), np.floating) +assert_type(np.linalg.norm(AR_f8), np.floating) +assert_type(np.linalg.norm(AR_c16), np.floating) +assert_type(np.linalg.norm(AR_S), np.floating) +assert_type(np.linalg.norm(AR_f8, axis=0), Any) + +assert_type(np.linalg.matrix_norm(AR_i8), np.floating) +assert_type(np.linalg.matrix_norm(AR_f8), np.floating) +assert_type(np.linalg.matrix_norm(AR_c16), np.floating) +assert_type(np.linalg.matrix_norm(AR_S), np.floating) + +assert_type(np.linalg.vector_norm(AR_i8), np.floating) +assert_type(np.linalg.vector_norm(AR_f8), np.floating) +assert_type(np.linalg.vector_norm(AR_c16), np.floating) +assert_type(np.linalg.vector_norm(AR_S), np.floating) + +assert_type(np.linalg.multi_dot([AR_i8, AR_i8]), Any) +assert_type(np.linalg.multi_dot([AR_i8, AR_f8]), Any) +assert_type(np.linalg.multi_dot([AR_f8, AR_c16]), Any) +assert_type(np.linalg.multi_dot([AR_O, AR_O]), Any) +assert_type(np.linalg.multi_dot([AR_m, AR_m]), Any) + +assert_type(np.linalg.cross(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.linalg.cross(AR_f8, AR_f8), npt.NDArray[np.floating]) +assert_type(np.linalg.cross(AR_c16, AR_c16), npt.NDArray[np.complexfloating]) + +assert_type(np.linalg.matmul(AR_i8, AR_i8), npt.NDArray[np.int64]) +assert_type(np.linalg.matmul(AR_f8, AR_f8), npt.NDArray[np.float64]) +assert_type(np.linalg.matmul(AR_c16, AR_c16), npt.NDArray[np.complex128]) diff --git a/python/numpy/typing/tests/data/reveal/ma.pyi b/python/numpy/typing/tests/data/reveal/ma.pyi new file mode 100644 index 000000000..2c65534ec --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/ma.pyi @@ -0,0 +1,369 @@ +from typing import Any, Literal, TypeAlias, TypeVar, assert_type + +import numpy as np +from numpy import dtype, generic +from numpy._typing import NDArray, _AnyShape + +_ScalarT = TypeVar("_ScalarT", bound=generic) +MaskedArray: TypeAlias = np.ma.MaskedArray[_AnyShape, dtype[_ScalarT]] +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] + +class MaskedArraySubclass(MaskedArray[np.complex128]): ... + +AR_b: NDArray[np.bool] +AR_f4: NDArray[np.float32] +AR_dt64: NDArray[np.datetime64] +AR_td64: NDArray[np.timedelta64] +AR_o: NDArray[np.timedelta64] + +MAR_c16: MaskedArray[np.complex128] +MAR_b: MaskedArray[np.bool] +MAR_f4: MaskedArray[np.float32] +MAR_f8: MaskedArray[np.float64] +MAR_i8: MaskedArray[np.int64] +MAR_dt64: MaskedArray[np.datetime64] +MAR_td64: MaskedArray[np.timedelta64] +MAR_o: MaskedArray[np.object_] +MAR_s: MaskedArray[np.str_] +MAR_byte: MaskedArray[np.bytes_] +MAR_V: MaskedArray[np.void] + +MAR_subclass: MaskedArraySubclass + +MAR_1d: np.ma.MaskedArray[tuple[int], np.dtype] +MAR_2d_f4: np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]] + +b: np.bool +f4: np.float32 +f: float + +assert_type(MAR_1d.shape, tuple[int]) + +assert_type(MAR_f4.dtype, np.dtype[np.float32]) + +assert_type(int(MAR_i8), int) +assert_type(float(MAR_f4), float) + +assert_type(np.ma.min(MAR_b), np.bool) +assert_type(np.ma.min(MAR_f4), np.float32) +assert_type(np.ma.min(MAR_b, axis=0), Any) +assert_type(np.ma.min(MAR_f4, axis=0), Any) +assert_type(np.ma.min(MAR_b, keepdims=True), Any) +assert_type(np.ma.min(MAR_f4, keepdims=True), Any) +assert_type(np.ma.min(MAR_f4, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.min(MAR_f4, 0, MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.min(MAR_f4, None, MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_b.min(), np.bool) +assert_type(MAR_f4.min(), np.float32) +assert_type(MAR_b.min(axis=0), Any) +assert_type(MAR_f4.min(axis=0), Any) +assert_type(MAR_b.min(keepdims=True), Any) +assert_type(MAR_f4.min(keepdims=True), Any) +assert_type(MAR_f4.min(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.min(0, MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.min(None, MAR_subclass), MaskedArraySubclass) + +assert_type(np.ma.max(MAR_b), np.bool) +assert_type(np.ma.max(MAR_f4), np.float32) +assert_type(np.ma.max(MAR_b, axis=0), Any) +assert_type(np.ma.max(MAR_f4, axis=0), Any) +assert_type(np.ma.max(MAR_b, keepdims=True), Any) +assert_type(np.ma.max(MAR_f4, keepdims=True), Any) +assert_type(np.ma.max(MAR_f4, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.max(MAR_f4, 0, MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.max(MAR_f4, None, MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_b.max(), np.bool) +assert_type(MAR_f4.max(), np.float32) +assert_type(MAR_b.max(axis=0), Any) +assert_type(MAR_f4.max(axis=0), Any) +assert_type(MAR_b.max(keepdims=True), Any) +assert_type(MAR_f4.max(keepdims=True), Any) +assert_type(MAR_f4.max(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.max(0, MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.max(None, MAR_subclass), MaskedArraySubclass) + +assert_type(np.ma.ptp(MAR_b), np.bool) +assert_type(np.ma.ptp(MAR_f4), np.float32) +assert_type(np.ma.ptp(MAR_b, axis=0), Any) +assert_type(np.ma.ptp(MAR_f4, axis=0), Any) +assert_type(np.ma.ptp(MAR_b, keepdims=True), Any) +assert_type(np.ma.ptp(MAR_f4, keepdims=True), Any) +assert_type(np.ma.ptp(MAR_f4, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.ptp(MAR_f4, 0, MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.ptp(MAR_f4, None, MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_b.ptp(), np.bool) +assert_type(MAR_f4.ptp(), np.float32) +assert_type(MAR_b.ptp(axis=0), Any) +assert_type(MAR_f4.ptp(axis=0), Any) +assert_type(MAR_b.ptp(keepdims=True), Any) +assert_type(MAR_f4.ptp(keepdims=True), Any) +assert_type(MAR_f4.ptp(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.ptp(0, MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.ptp(None, MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_b.argmin(), np.intp) +assert_type(MAR_f4.argmin(), np.intp) +assert_type(MAR_f4.argmax(fill_value=6.28318, keepdims=False), np.intp) +assert_type(MAR_b.argmin(axis=0), Any) +assert_type(MAR_f4.argmin(axis=0), Any) +assert_type(MAR_b.argmin(keepdims=True), Any) +assert_type(MAR_f4.argmin(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.argmin(None, None, out=MAR_subclass), MaskedArraySubclass) + +assert_type(np.ma.argmin(MAR_b), np.intp) +assert_type(np.ma.argmin(MAR_f4), np.intp) +assert_type(np.ma.argmin(MAR_f4, fill_value=6.28318, keepdims=False), np.intp) +assert_type(np.ma.argmin(MAR_b, axis=0), Any) +assert_type(np.ma.argmin(MAR_f4, axis=0), Any) +assert_type(np.ma.argmin(MAR_b, keepdims=True), Any) +assert_type(np.ma.argmin(MAR_f4, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.argmin(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_b.argmax(), np.intp) +assert_type(MAR_f4.argmax(), np.intp) +assert_type(MAR_f4.argmax(fill_value=6.28318, keepdims=False), np.intp) +assert_type(MAR_b.argmax(axis=0), Any) +assert_type(MAR_f4.argmax(axis=0), Any) +assert_type(MAR_b.argmax(keepdims=True), Any) +assert_type(MAR_f4.argmax(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.argmax(None, None, out=MAR_subclass), MaskedArraySubclass) + +assert_type(np.ma.argmax(MAR_b), np.intp) +assert_type(np.ma.argmax(MAR_f4), np.intp) +assert_type(np.ma.argmax(MAR_f4, fill_value=6.28318, keepdims=False), np.intp) +assert_type(np.ma.argmax(MAR_b, axis=0), Any) +assert_type(np.ma.argmax(MAR_f4, axis=0), Any) +assert_type(np.ma.argmax(MAR_b, keepdims=True), Any) +assert_type(np.ma.argmax(MAR_f4, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.argmax(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_b.all(), np.bool) +assert_type(MAR_f4.all(), np.bool) +assert_type(MAR_f4.all(keepdims=False), np.bool) +assert_type(MAR_b.all(axis=0), np.bool | MaskedArray[np.bool]) +assert_type(MAR_b.all(axis=0, keepdims=True), MaskedArray[np.bool]) +assert_type(MAR_b.all(0, None, True), MaskedArray[np.bool]) +assert_type(MAR_f4.all(axis=0), np.bool | MaskedArray[np.bool]) +assert_type(MAR_b.all(keepdims=True), MaskedArray[np.bool]) +assert_type(MAR_f4.all(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.all(None, out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_b.any(), np.bool) +assert_type(MAR_f4.any(), np.bool) +assert_type(MAR_f4.any(keepdims=False), np.bool) +assert_type(MAR_b.any(axis=0), np.bool | MaskedArray[np.bool]) +assert_type(MAR_b.any(axis=0, keepdims=True), MaskedArray[np.bool]) +assert_type(MAR_b.any(0, None, True), MaskedArray[np.bool]) +assert_type(MAR_f4.any(axis=0), np.bool | MaskedArray[np.bool]) +assert_type(MAR_b.any(keepdims=True), MaskedArray[np.bool]) +assert_type(MAR_f4.any(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.any(None, out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_f4.sort(), None) +assert_type(MAR_f4.sort(axis=0, kind='quicksort', order='K', endwith=False, fill_value=42., stable=False), None) + +assert_type(np.ma.sort(MAR_f4), MaskedArray[np.float32]) +assert_type(np.ma.sort(MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.sort([[0, 1], [2, 3]]), NDArray[Any]) +assert_type(np.ma.sort(AR_f4), NDArray[np.float32]) + +assert_type(MAR_f8.take(0), np.float64) +assert_type(MAR_1d.take(0), Any) +assert_type(MAR_f8.take([0]), MaskedArray[np.float64]) +assert_type(MAR_f8.take(0, out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.take([0], out=MAR_subclass), MaskedArraySubclass) + +assert_type(np.ma.take(f, 0), Any) +assert_type(np.ma.take(f4, 0), np.float32) +assert_type(np.ma.take(MAR_f8, 0), np.float64) +assert_type(np.ma.take(AR_f4, 0), np.float32) +assert_type(np.ma.take(MAR_1d, 0), Any) +assert_type(np.ma.take(MAR_f8, [0]), MaskedArray[np.float64]) +assert_type(np.ma.take(AR_f4, [0]), MaskedArray[np.float32]) +assert_type(np.ma.take(MAR_f8, 0, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.take(MAR_f8, [0], out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.take([1], [0]), MaskedArray[Any]) +assert_type(np.ma.take(np.eye(2), 1, axis=0), MaskedArray[np.float64]) + +assert_type(MAR_f4.partition(1), None) +assert_type(MAR_V.partition(1, axis=0, kind='introselect', order='K'), None) + +assert_type(MAR_f4.argpartition(1), MaskedArray[np.intp]) +assert_type(MAR_1d.argpartition(1, axis=0, kind='introselect', order='K'), MaskedArray[np.intp]) + +assert_type(np.ma.ndim(f4), int) +assert_type(np.ma.ndim(MAR_b), int) +assert_type(np.ma.ndim(AR_f4), int) + +assert_type(np.ma.size(b), int) +assert_type(np.ma.size(MAR_f4, axis=0), int) +assert_type(np.ma.size(AR_f4), int) + +assert_type(np.ma.is_masked(MAR_f4), bool) + +assert_type(MAR_f4.ids(), tuple[int, int]) + +assert_type(MAR_f4.iscontiguous(), bool) + +assert_type(MAR_f4 >= 3, MaskedArray[np.bool]) +assert_type(MAR_i8 >= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b >= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 >= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 >= AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o >= AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d >= 0, MaskedArray[np.bool]) +assert_type(MAR_s >= MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte >= MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_f4 > 3, MaskedArray[np.bool]) +assert_type(MAR_i8 > AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b > AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 > AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 > AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o > AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d > 0, MaskedArray[np.bool]) +assert_type(MAR_s > MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte > MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_f4 <= 3, MaskedArray[np.bool]) +assert_type(MAR_i8 <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 <= AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o <= AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d <= 0, MaskedArray[np.bool]) +assert_type(MAR_s <= MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte <= MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_f4 < 3, MaskedArray[np.bool]) +assert_type(MAR_i8 < AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b < AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 < AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 < AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o < AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d < 0, MaskedArray[np.bool]) +assert_type(MAR_s < MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte < MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_f4 <= 3, MaskedArray[np.bool]) +assert_type(MAR_i8 <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 <= AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o <= AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d <= 0, MaskedArray[np.bool]) +assert_type(MAR_s <= MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte <= MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_byte.count(), int) +assert_type(MAR_f4.count(axis=None), int) +assert_type(MAR_f4.count(axis=0), NDArray[np.int_]) +assert_type(MAR_b.count(axis=(0,1)), NDArray[np.int_]) +assert_type(MAR_o.count(keepdims=True), NDArray[np.int_]) +assert_type(MAR_o.count(axis=None, keepdims=True), NDArray[np.int_]) +assert_type(MAR_o.count(None, True), NDArray[np.int_]) + +assert_type(np.ma.count(MAR_byte), int) +assert_type(np.ma.count(MAR_byte, axis=None), int) +assert_type(np.ma.count(MAR_f4, axis=0), NDArray[np.int_]) +assert_type(np.ma.count(MAR_b, axis=(0,1)), NDArray[np.int_]) +assert_type(np.ma.count(MAR_o, keepdims=True), NDArray[np.int_]) +assert_type(np.ma.count(MAR_o, axis=None, keepdims=True), NDArray[np.int_]) +assert_type(np.ma.count(MAR_o, None, True), NDArray[np.int_]) + +assert_type(MAR_f4.compressed(), np.ndarray[tuple[int], np.dtype[np.float32]]) + +assert_type(np.ma.compressed(MAR_i8), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(np.ma.compressed([[1,2,3]]), np.ndarray[tuple[int], np.dtype]) + +assert_type(MAR_f4.put([0,4,8], [10,20,30]), None) +assert_type(MAR_f4.put(4, 999), None) +assert_type(MAR_f4.put(4, 999, mode='clip'), None) + +assert_type(np.ma.put(MAR_f4, [0,4,8], [10,20,30]), None) +assert_type(np.ma.put(MAR_f4, 4, 999), None) +assert_type(np.ma.put(MAR_f4, 4, 999, mode='clip'), None) + +assert_type(np.ma.putmask(MAR_f4, [True, False], [0, 1]), None) +assert_type(np.ma.putmask(MAR_f4, np.False_, [0, 1]), None) + +assert_type(MAR_f4.filled(float('nan')), NDArray[np.float32]) +assert_type(MAR_i8.filled(), NDArray[np.int64]) +assert_type(MAR_1d.filled(), np.ndarray[tuple[int], np.dtype]) + +assert_type(np.ma.filled(MAR_f4, float('nan')), NDArray[np.float32]) +assert_type(np.ma.filled([[1,2,3]]), NDArray[Any]) +# PyRight detects this one correctly, but mypy doesn't. +# https://github.com/numpy/numpy/pull/28742#discussion_r2048968375 +assert_type(np.ma.filled(MAR_1d), np.ndarray[tuple[int], np.dtype]) # type: ignore[assert-type] + +assert_type(MAR_b.repeat(3), np.ma.MaskedArray[tuple[int], np.dtype[np.bool]]) +assert_type(MAR_2d_f4.repeat(MAR_i8), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.repeat(MAR_i8, axis=None), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.repeat(MAR_i8, axis=0), MaskedArray[np.float32]) + +assert_type(np.ma.allequal(AR_f4, MAR_f4), bool) +assert_type(np.ma.allequal(AR_f4, MAR_f4, fill_value=False), bool) + +assert_type(np.ma.allclose(AR_f4, MAR_f4), bool) +assert_type(np.ma.allclose(AR_f4, MAR_f4, masked_equal=False), bool) +assert_type(np.ma.allclose(AR_f4, MAR_f4, rtol=.4, atol=.3), bool) + +assert_type(MAR_2d_f4.ravel(), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_1d.ravel(order='A'), np.ma.MaskedArray[tuple[int], np.dtype[Any]]) + +assert_type(np.ma.getmask(MAR_f4), NDArray[np.bool] | np.bool) +# PyRight detects this one correctly, but mypy doesn't: +# `Revealed type is "Union[numpy.ndarray[Any, Any], numpy.bool[Any]]"` +assert_type(np.ma.getmask(MAR_1d), np.ndarray[tuple[int], np.dtype[np.bool]] | np.bool) # type: ignore[assert-type] +assert_type(np.ma.getmask(MAR_2d_f4), np.ndarray[tuple[int, int], np.dtype[np.bool]] | np.bool) +assert_type(np.ma.getmask([1,2]), NDArray[np.bool] | np.bool) +assert_type(np.ma.getmask(np.int64(1)), np.bool) + +assert_type(np.ma.is_mask(MAR_1d), bool) +assert_type(np.ma.is_mask(AR_b), bool) + +def func(x: object) -> None: + if np.ma.is_mask(x): + assert_type(x, NDArray[np.bool]) + else: + assert_type(x, object) + +assert_type(MAR_2d_f4.mT, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) + +assert_type(MAR_c16.real, MaskedArray[np.float64]) +assert_type(MAR_c16.imag, MaskedArray[np.float64]) + +assert_type(MAR_2d_f4.baseclass, type[NDArray[Any]]) + +assert_type(MAR_b.swapaxes(0, 1), MaskedArray[np.bool]) +assert_type(MAR_2d_f4.swapaxes(1, 0), MaskedArray[np.float32]) + +assert_type(np.ma.nomask, np.bool[Literal[False]]) +assert_type(np.ma.MaskType, type[np.bool]) + +assert_type(MAR_1d.__setmask__([True, False]), None) +assert_type(MAR_1d.__setmask__(np.False_), None) + +assert_type(MAR_2d_f4.harden_mask(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_i8.harden_mask(), MaskedArray[np.int64]) +assert_type(MAR_2d_f4.soften_mask(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_i8.soften_mask(), MaskedArray[np.int64]) +assert_type(MAR_f4.unshare_mask(), MaskedArray[np.float32]) +assert_type(MAR_b.shrink_mask(), MaskedArray[np.bool_]) + +assert_type(MAR_i8.hardmask, bool) +assert_type(MAR_i8.sharedmask, bool) + +assert_type(MAR_b.transpose(), MaskedArray[np.bool]) +assert_type(MAR_2d_f4.transpose(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.transpose(1, 0), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.transpose((1, 0)), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_b.T, MaskedArray[np.bool]) +assert_type(MAR_2d_f4.T, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) + +assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], *tuple[_Array1D[np.intp], ...]]) +assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) diff --git a/python/numpy/typing/tests/data/reveal/matrix.pyi b/python/numpy/typing/tests/data/reveal/matrix.pyi new file mode 100644 index 000000000..1a7285d42 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/matrix.pyi @@ -0,0 +1,73 @@ +from typing import Any, TypeAlias, assert_type + +import numpy as np +import numpy.typing as npt + +_Shape2D: TypeAlias = tuple[int, int] + +mat: np.matrix[_Shape2D, np.dtype[np.int64]] +ar_f8: npt.NDArray[np.float64] +ar_ip: npt.NDArray[np.intp] + +assert_type(mat * 5, np.matrix[_Shape2D, Any]) +assert_type(5 * mat, np.matrix[_Shape2D, Any]) +mat *= 5 + +assert_type(mat**5, np.matrix[_Shape2D, Any]) +mat **= 5 + +assert_type(mat.sum(), Any) +assert_type(mat.mean(), Any) +assert_type(mat.std(), Any) +assert_type(mat.var(), Any) +assert_type(mat.prod(), Any) +assert_type(mat.any(), np.bool) +assert_type(mat.all(), np.bool) +assert_type(mat.max(), np.int64) +assert_type(mat.min(), np.int64) +assert_type(mat.argmax(), np.intp) +assert_type(mat.argmin(), np.intp) +assert_type(mat.ptp(), np.int64) + +assert_type(mat.sum(axis=0), np.matrix[_Shape2D, Any]) +assert_type(mat.mean(axis=0), np.matrix[_Shape2D, Any]) +assert_type(mat.std(axis=0), np.matrix[_Shape2D, Any]) +assert_type(mat.var(axis=0), np.matrix[_Shape2D, Any]) +assert_type(mat.prod(axis=0), np.matrix[_Shape2D, Any]) +assert_type(mat.any(axis=0), np.matrix[_Shape2D, np.dtype[np.bool]]) +assert_type(mat.all(axis=0), np.matrix[_Shape2D, np.dtype[np.bool]]) +assert_type(mat.max(axis=0), np.matrix[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.min(axis=0), np.matrix[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.argmax(axis=0), np.matrix[_Shape2D, np.dtype[np.intp]]) +assert_type(mat.argmin(axis=0), np.matrix[_Shape2D, np.dtype[np.intp]]) +assert_type(mat.ptp(axis=0), np.matrix[_Shape2D, np.dtype[np.int64]]) + +assert_type(mat.sum(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.mean(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.std(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.var(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.prod(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.any(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.all(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.max(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.min(out=ar_f8), npt.NDArray[np.float64]) +assert_type(mat.argmax(out=ar_ip), npt.NDArray[np.intp]) +assert_type(mat.argmin(out=ar_ip), npt.NDArray[np.intp]) +assert_type(mat.ptp(out=ar_f8), npt.NDArray[np.float64]) + +assert_type(mat.T, np.matrix[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.I, np.matrix[_Shape2D, Any]) +assert_type(mat.A, np.ndarray[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.A1, npt.NDArray[np.int64]) +assert_type(mat.H, np.matrix[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.getT(), np.matrix[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.getI(), np.matrix[_Shape2D, Any]) +assert_type(mat.getA(), np.ndarray[_Shape2D, np.dtype[np.int64]]) +assert_type(mat.getA1(), npt.NDArray[np.int64]) +assert_type(mat.getH(), np.matrix[_Shape2D, np.dtype[np.int64]]) + +assert_type(np.bmat(ar_f8), np.matrix[_Shape2D, Any]) +assert_type(np.bmat([[0, 1, 2]]), np.matrix[_Shape2D, Any]) +assert_type(np.bmat("mat"), np.matrix[_Shape2D, Any]) + +assert_type(np.asmatrix(ar_f8, dtype=np.int64), np.matrix[_Shape2D, Any]) diff --git a/python/numpy/typing/tests/data/reveal/memmap.pyi b/python/numpy/typing/tests/data/reveal/memmap.pyi new file mode 100644 index 000000000..f3e20ed2d --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/memmap.pyi @@ -0,0 +1,19 @@ +from typing import Any, assert_type + +import numpy as np + +memmap_obj: np.memmap[Any, np.dtype[np.str_]] + +assert_type(np.memmap.__array_priority__, float) +assert_type(memmap_obj.__array_priority__, float) +assert_type(memmap_obj.filename, str | None) +assert_type(memmap_obj.offset, int) +assert_type(memmap_obj.mode, str) +assert_type(memmap_obj.flush(), None) + +assert_type(np.memmap("file.txt", offset=5), np.memmap[Any, np.dtype[np.uint8]]) +assert_type(np.memmap(b"file.txt", dtype=np.float64, shape=(10, 3)), np.memmap[Any, np.dtype[np.float64]]) +with open("file.txt", "rb") as f: + assert_type(np.memmap(f, dtype=float, order="K"), np.memmap[Any, np.dtype]) + +assert_type(memmap_obj.__array_finalize__(object()), None) diff --git a/python/numpy/typing/tests/data/reveal/mod.pyi b/python/numpy/typing/tests/data/reveal/mod.pyi new file mode 100644 index 000000000..ce74557d3 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/mod.pyi @@ -0,0 +1,179 @@ +import datetime as dt +from typing import Literal as L +from typing import assert_type + +import numpy as np +import numpy.typing as npt + +f8: np.float64 +i8: np.int64 +u8: np.uint64 + +f4: np.float32 +i4: np.int32 +u4: np.uint32 + +m: np.timedelta64 +m_nat: np.timedelta64[None] +m_int0: np.timedelta64[L[0]] +m_int: np.timedelta64[int] +m_td: np.timedelta64[dt.timedelta] + +b_: np.bool + +b: bool +i: int +f: float + +AR_b: npt.NDArray[np.bool] +AR_m: npt.NDArray[np.timedelta64] + +# Time structures + +assert_type(m % m, np.timedelta64) +assert_type(m % m_nat, np.timedelta64[None]) +assert_type(m % m_int0, np.timedelta64[None]) +assert_type(m % m_int, np.timedelta64[int | None]) +assert_type(m_nat % m, np.timedelta64[None]) +assert_type(m_int % m_nat, np.timedelta64[None]) +assert_type(m_int % m_int0, np.timedelta64[None]) +assert_type(m_int % m_int, np.timedelta64[int | None]) +assert_type(m_int % m_td, np.timedelta64[int | None]) +assert_type(m_td % m_nat, np.timedelta64[None]) +assert_type(m_td % m_int0, np.timedelta64[None]) +assert_type(m_td % m_int, np.timedelta64[int | None]) +assert_type(m_td % m_td, np.timedelta64[dt.timedelta | None]) + +assert_type(AR_m % m, npt.NDArray[np.timedelta64]) +assert_type(m % AR_m, npt.NDArray[np.timedelta64]) + +assert_type(divmod(m, m), tuple[np.int64, np.timedelta64]) +assert_type(divmod(m, m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m, m_int0), tuple[np.int64, np.timedelta64[None]]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(m.__divmod__(m_int), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(divmod(m_nat, m), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_int, m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_int, m_int0), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_int, m_int), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(divmod(m_int, m_td), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(divmod(m_td, m_nat), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_td, m_int0), tuple[np.int64, np.timedelta64[None]]) +assert_type(divmod(m_td, m_int), tuple[np.int64, np.timedelta64[int | None]]) +assert_type(divmod(m_td, m_td), tuple[np.int64, np.timedelta64[dt.timedelta | None]]) + +assert_type(divmod(AR_m, m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) +assert_type(divmod(m, AR_m), tuple[npt.NDArray[np.int64], npt.NDArray[np.timedelta64]]) + +# Bool + +assert_type(b_ % b, np.int8) +assert_type(b_ % i, np.int_) +assert_type(b_ % f, np.float64) +assert_type(b_ % b_, np.int8) +assert_type(b_ % i8, np.int64) +assert_type(b_ % u8, np.uint64) +assert_type(b_ % f8, np.float64) +assert_type(b_ % AR_b, npt.NDArray[np.int8]) + +assert_type(divmod(b_, b), tuple[np.int8, np.int8]) +assert_type(divmod(b_, b_), tuple[np.int8, np.int8]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(b_.__divmod__(i), tuple[np.int_, np.int_]) +assert_type(b_.__divmod__(f), tuple[np.float64, np.float64]) +assert_type(b_.__divmod__(i8), tuple[np.int64, np.int64]) +assert_type(b_.__divmod__(u8), tuple[np.uint64, np.uint64]) +assert_type(divmod(b_, f8), tuple[np.float64, np.float64]) +assert_type(divmod(b_, AR_b), tuple[npt.NDArray[np.int8], npt.NDArray[np.int8]]) + +assert_type(b % b_, np.int8) +assert_type(i % b_, np.int_) +assert_type(f % b_, np.float64) +assert_type(b_ % b_, np.int8) +assert_type(i8 % b_, np.int64) +assert_type(u8 % b_, np.uint64) +assert_type(f8 % b_, np.float64) +assert_type(AR_b % b_, npt.NDArray[np.int8]) + +assert_type(divmod(b, b_), tuple[np.int8, np.int8]) +assert_type(divmod(i, b_), tuple[np.int_, np.int_]) +assert_type(divmod(f, b_), tuple[np.float64, np.float64]) +assert_type(divmod(b_, b_), tuple[np.int8, np.int8]) +assert_type(divmod(i8, b_), tuple[np.int64, np.int64]) +assert_type(divmod(u8, b_), tuple[np.uint64, np.uint64]) +assert_type(divmod(f8, b_), tuple[np.float64, np.float64]) +assert_type(divmod(AR_b, b_), tuple[npt.NDArray[np.int8], npt.NDArray[np.int8]]) + +# int + +assert_type(i8 % b, np.int64) +assert_type(i8 % i8, np.int64) +assert_type(i8 % f, np.float64) +assert_type(i8 % f8, np.float64) +assert_type(i4 % i8, np.signedinteger) +assert_type(i4 % f8, np.float64) +assert_type(i4 % i4, np.int32) +assert_type(i4 % f4, np.floating) +assert_type(i8 % AR_b, npt.NDArray[np.int64]) + +assert_type(divmod(i8, b), tuple[np.int64, np.int64]) +assert_type(divmod(i8, i4), tuple[np.signedinteger, np.signedinteger]) +assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(i8.__divmod__(f), tuple[np.float64, np.float64]) +assert_type(i8.__divmod__(f8), tuple[np.float64, np.float64]) +assert_type(divmod(i8, f4), tuple[np.floating, np.floating]) +assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) +assert_type(divmod(i4, f4), tuple[np.floating, np.floating]) +assert_type(divmod(i8, AR_b), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) + +assert_type(b % i8, np.int64) +assert_type(f % i8, np.float64) +assert_type(i8 % i8, np.int64) +assert_type(f8 % i8, np.float64) +assert_type(i8 % i4, np.signedinteger) +assert_type(f8 % i4, np.float64) +assert_type(i4 % i4, np.int32) +assert_type(f4 % i4, np.floating) +assert_type(AR_b % i8, npt.NDArray[np.int64]) + +assert_type(divmod(b, i8), tuple[np.int64, np.int64]) +assert_type(divmod(f, i8), tuple[np.float64, np.float64]) +assert_type(divmod(i8, i8), tuple[np.int64, np.int64]) +assert_type(divmod(f8, i8), tuple[np.float64, np.float64]) +assert_type(divmod(i4, i8), tuple[np.signedinteger, np.signedinteger]) +assert_type(divmod(i4, i4), tuple[np.int32, np.int32]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(f4.__divmod__(i8), tuple[np.floating, np.floating]) +assert_type(f4.__divmod__(i4), tuple[np.floating, np.floating]) +assert_type(AR_b.__divmod__(i8), tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]) + +# float + +assert_type(f8 % b, np.float64) +assert_type(f8 % f, np.float64) +assert_type(i8 % f4, np.floating) +assert_type(f4 % f4, np.float32) +assert_type(f8 % AR_b, npt.NDArray[np.float64]) + +assert_type(divmod(f8, b), tuple[np.float64, np.float64]) +assert_type(divmod(f8, f), tuple[np.float64, np.float64]) +assert_type(divmod(f8, f8), tuple[np.float64, np.float64]) +assert_type(divmod(f8, f4), tuple[np.float64, np.float64]) +assert_type(divmod(f4, f4), tuple[np.float32, np.float32]) +assert_type(divmod(f8, AR_b), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) + +assert_type(b % f8, np.float64) +assert_type(f % f8, np.float64) # pyright: ignore[reportAssertTypeFailure] # pyright incorrectly infers `builtins.float` +assert_type(f8 % f8, np.float64) +assert_type(f8 % f8, np.float64) +assert_type(f4 % f4, np.float32) +assert_type(AR_b % f8, npt.NDArray[np.float64]) + +assert_type(divmod(b, f8), tuple[np.float64, np.float64]) +assert_type(divmod(f8, f8), tuple[np.float64, np.float64]) +assert_type(divmod(f4, f4), tuple[np.float32, np.float32]) +# workarounds for https://github.com/microsoft/pyright/issues/9663 +assert_type(f8.__rdivmod__(f), tuple[np.float64, np.float64]) +assert_type(f8.__rdivmod__(f4), tuple[np.float64, np.float64]) +assert_type(AR_b.__divmod__(f8), tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]) diff --git a/python/numpy/typing/tests/data/reveal/modules.pyi b/python/numpy/typing/tests/data/reveal/modules.pyi new file mode 100644 index 000000000..628fb500b --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/modules.pyi @@ -0,0 +1,51 @@ +import types +from typing import assert_type + +import numpy as np +from numpy import f2py + +assert_type(np, types.ModuleType) + +assert_type(np.char, types.ModuleType) +assert_type(np.ctypeslib, types.ModuleType) +assert_type(np.emath, types.ModuleType) +assert_type(np.fft, types.ModuleType) +assert_type(np.lib, types.ModuleType) +assert_type(np.linalg, types.ModuleType) +assert_type(np.ma, types.ModuleType) +assert_type(np.matrixlib, types.ModuleType) +assert_type(np.polynomial, types.ModuleType) +assert_type(np.random, types.ModuleType) +assert_type(np.rec, types.ModuleType) +assert_type(np.testing, types.ModuleType) +assert_type(np.version, types.ModuleType) +assert_type(np.exceptions, types.ModuleType) +assert_type(np.dtypes, types.ModuleType) + +assert_type(np.lib.format, types.ModuleType) +assert_type(np.lib.mixins, types.ModuleType) +assert_type(np.lib.scimath, types.ModuleType) +assert_type(np.lib.stride_tricks, types.ModuleType) +assert_type(np.ma.extras, types.ModuleType) +assert_type(np.polynomial.chebyshev, types.ModuleType) +assert_type(np.polynomial.hermite, types.ModuleType) +assert_type(np.polynomial.hermite_e, types.ModuleType) +assert_type(np.polynomial.laguerre, types.ModuleType) +assert_type(np.polynomial.legendre, types.ModuleType) +assert_type(np.polynomial.polynomial, types.ModuleType) + +assert_type(np.__path__, list[str]) +assert_type(np.__version__, str) +assert_type(np.test, np._pytesttester.PytestTester) +assert_type(np.test.module_name, str) + +assert_type(np.__all__, list[str]) +assert_type(np.char.__all__, list[str]) +assert_type(np.ctypeslib.__all__, list[str]) +assert_type(np.emath.__all__, list[str]) +assert_type(np.lib.__all__, list[str]) +assert_type(np.ma.__all__, list[str]) +assert_type(np.random.__all__, list[str]) +assert_type(np.rec.__all__, list[str]) +assert_type(np.testing.__all__, list[str]) +assert_type(f2py.__all__, list[str]) diff --git a/python/numpy/typing/tests/data/reveal/multiarray.pyi b/python/numpy/typing/tests/data/reveal/multiarray.pyi new file mode 100644 index 000000000..6ba3fcde6 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/multiarray.pyi @@ -0,0 +1,194 @@ +import datetime as dt +from typing import Any, Literal, TypeVar, assert_type + +import numpy as np +import numpy.typing as npt + +_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True) + +class SubClass(npt.NDArray[_ScalarT_co]): ... + +subclass: SubClass[np.float64] + +AR_f8: npt.NDArray[np.float64] +AR_i8: npt.NDArray[np.int64] +AR_u1: npt.NDArray[np.uint8] +AR_m: npt.NDArray[np.timedelta64] +AR_M: npt.NDArray[np.datetime64] + +AR_LIKE_f: list[float] +AR_LIKE_i: list[int] + +m: np.timedelta64 +M: np.datetime64 + +b_f8 = np.broadcast(AR_f8) +b_i8_f8_f8 = np.broadcast(AR_i8, AR_f8, AR_f8) + +nditer_obj: np.nditer + +date_scalar: dt.date +date_seq: list[dt.date] +timedelta_seq: list[dt.timedelta] + +n1: Literal[1] +n2: Literal[2] +n3: Literal[3] + +f8: np.float64 + +def func11(a: int) -> bool: ... +def func21(a: int, b: int) -> int: ... +def func12(a: int) -> tuple[complex, bool]: ... + +assert_type(next(b_f8), tuple[Any, ...]) +assert_type(b_f8.reset(), None) +assert_type(b_f8.index, int) +assert_type(b_f8.iters, tuple[np.flatiter[Any], ...]) +assert_type(b_f8.nd, int) +assert_type(b_f8.ndim, int) +assert_type(b_f8.numiter, int) +assert_type(b_f8.shape, tuple[Any, ...]) +assert_type(b_f8.size, int) + +assert_type(next(b_i8_f8_f8), tuple[Any, ...]) +assert_type(b_i8_f8_f8.reset(), None) +assert_type(b_i8_f8_f8.index, int) +assert_type(b_i8_f8_f8.iters, tuple[np.flatiter[Any], ...]) +assert_type(b_i8_f8_f8.nd, int) +assert_type(b_i8_f8_f8.ndim, int) +assert_type(b_i8_f8_f8.numiter, int) +assert_type(b_i8_f8_f8.shape, tuple[Any, ...]) +assert_type(b_i8_f8_f8.size, int) + +assert_type(np.inner(AR_f8, AR_i8), Any) + +assert_type(np.where([True, True, False]), tuple[npt.NDArray[np.intp], ...]) +assert_type(np.where([True, True, False], 1, 0), npt.NDArray[Any]) + +assert_type(np.lexsort([0, 1, 2]), Any) + +assert_type(np.can_cast(np.dtype("i8"), int), bool) +assert_type(np.can_cast(AR_f8, "f8"), bool) +assert_type(np.can_cast(AR_f8, np.complex128, casting="unsafe"), bool) + +assert_type(np.min_scalar_type([1]), np.dtype) +assert_type(np.min_scalar_type(AR_f8), np.dtype) + +assert_type(np.result_type(int, [1]), np.dtype) +assert_type(np.result_type(AR_f8, AR_u1), np.dtype) +assert_type(np.result_type(AR_f8, np.complex128), np.dtype) + +assert_type(np.dot(AR_LIKE_f, AR_i8), Any) +assert_type(np.dot(AR_u1, 1), Any) +assert_type(np.dot(1.5j, 1), Any) +assert_type(np.dot(AR_u1, 1, out=AR_f8), npt.NDArray[np.float64]) + +assert_type(np.vdot(AR_LIKE_f, AR_i8), np.floating) +assert_type(np.vdot(AR_u1, 1), np.signedinteger) +assert_type(np.vdot(1.5j, 1), np.complexfloating) + +assert_type(np.bincount(AR_i8), npt.NDArray[np.intp]) + +assert_type(np.copyto(AR_f8, [1., 1.5, 1.6]), None) + +assert_type(np.putmask(AR_f8, [True, True, False], 1.5), None) + +assert_type(np.packbits(AR_i8), npt.NDArray[np.uint8]) +assert_type(np.packbits(AR_u1), npt.NDArray[np.uint8]) + +assert_type(np.unpackbits(AR_u1), npt.NDArray[np.uint8]) + +assert_type(np.shares_memory(1, 2), bool) +assert_type(np.shares_memory(AR_f8, AR_f8, max_work=1), bool) + +assert_type(np.may_share_memory(1, 2), bool) +assert_type(np.may_share_memory(AR_f8, AR_f8, max_work=1), bool) + +assert_type(np.promote_types(np.int32, np.int64), np.dtype) +assert_type(np.promote_types("f4", float), np.dtype) + +assert_type(np.frompyfunc(func11, n1, n1).nin, Literal[1]) +assert_type(np.frompyfunc(func11, n1, n1).nout, Literal[1]) +assert_type(np.frompyfunc(func11, n1, n1).nargs, Literal[2]) +assert_type(np.frompyfunc(func11, n1, n1).ntypes, Literal[1]) +assert_type(np.frompyfunc(func11, n1, n1).identity, None) +assert_type(np.frompyfunc(func11, n1, n1).signature, None) +assert_type(np.frompyfunc(func11, n1, n1)(f8), bool) +assert_type(np.frompyfunc(func11, n1, n1)(AR_f8), bool | npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func11, n1, n1).at(AR_f8, AR_i8), None) + +assert_type(np.frompyfunc(func21, n2, n1).nin, Literal[2]) +assert_type(np.frompyfunc(func21, n2, n1).nout, Literal[1]) +assert_type(np.frompyfunc(func21, n2, n1).nargs, Literal[3]) +assert_type(np.frompyfunc(func21, n2, n1).ntypes, Literal[1]) +assert_type(np.frompyfunc(func21, n2, n1).identity, None) +assert_type(np.frompyfunc(func21, n2, n1).signature, None) +assert_type(np.frompyfunc(func21, n2, n1)(f8, f8), int) +assert_type(np.frompyfunc(func21, n2, n1)(AR_f8, f8), int | npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func21, n2, n1)(f8, AR_f8), int | npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func21, n2, n1).reduce(AR_f8, axis=0), int | npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func21, n2, n1).accumulate(AR_f8), npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func21, n2, n1).reduceat(AR_f8, AR_i8), npt.NDArray[np.object_]) +assert_type(np.frompyfunc(func21, n2, n1).outer(f8, f8), int) +assert_type(np.frompyfunc(func21, n2, n1).outer(AR_f8, f8), int | npt.NDArray[np.object_]) + +assert_type(np.frompyfunc(func21, n2, n1, identity=0).nin, Literal[2]) +assert_type(np.frompyfunc(func21, n2, n1, identity=0).nout, Literal[1]) +assert_type(np.frompyfunc(func21, n2, n1, identity=0).nargs, Literal[3]) +assert_type(np.frompyfunc(func21, n2, n1, identity=0).ntypes, Literal[1]) +assert_type(np.frompyfunc(func21, n2, n1, identity=0).identity, int) +assert_type(np.frompyfunc(func21, n2, n1, identity=0).signature, None) + +assert_type(np.frompyfunc(func12, n1, n2).nin, Literal[1]) +assert_type(np.frompyfunc(func12, n1, n2).nout, Literal[2]) +assert_type(np.frompyfunc(func12, n1, n2).nargs, int) +assert_type(np.frompyfunc(func12, n1, n2).ntypes, Literal[1]) +assert_type(np.frompyfunc(func12, n1, n2).identity, None) +assert_type(np.frompyfunc(func12, n1, n2).signature, None) +assert_type( + np.frompyfunc(func12, n2, n2)(f8, f8), + tuple[complex, complex, *tuple[complex, ...]], +) +assert_type( + np.frompyfunc(func12, n2, n2)(AR_f8, f8), + tuple[ + complex | npt.NDArray[np.object_], + complex | npt.NDArray[np.object_], + *tuple[complex | npt.NDArray[np.object_], ...], + ], +) + +assert_type(np.datetime_data("m8[D]"), tuple[str, int]) +assert_type(np.datetime_data(np.datetime64), tuple[str, int]) +assert_type(np.datetime_data(np.dtype(np.timedelta64)), tuple[str, int]) + +assert_type(np.busday_count("2011-01", "2011-02"), np.int_) +assert_type(np.busday_count(["2011-01"], "2011-02"), npt.NDArray[np.int_]) +assert_type(np.busday_count(["2011-01"], date_scalar), npt.NDArray[np.int_]) + +assert_type(np.busday_offset(M, m), np.datetime64) +assert_type(np.busday_offset(date_scalar, m), np.datetime64) +assert_type(np.busday_offset(M, 5), np.datetime64) +assert_type(np.busday_offset(AR_M, m), npt.NDArray[np.datetime64]) +assert_type(np.busday_offset(M, timedelta_seq), npt.NDArray[np.datetime64]) +assert_type(np.busday_offset("2011-01", "2011-02", roll="forward"), np.datetime64) +assert_type(np.busday_offset(["2011-01"], "2011-02", roll="forward"), npt.NDArray[np.datetime64]) + +assert_type(np.is_busday("2012"), np.bool) +assert_type(np.is_busday(date_scalar), np.bool) +assert_type(np.is_busday(["2012"]), npt.NDArray[np.bool]) + +assert_type(np.datetime_as_string(M), np.str_) +assert_type(np.datetime_as_string(AR_M), npt.NDArray[np.str_]) + +assert_type(np.busdaycalendar(holidays=date_seq), np.busdaycalendar) +assert_type(np.busdaycalendar(holidays=[M]), np.busdaycalendar) + +assert_type(np.char.compare_chararrays("a", "b", "!=", rstrip=False), npt.NDArray[np.bool]) +assert_type(np.char.compare_chararrays(b"a", b"a", "==", True), npt.NDArray[np.bool]) + +assert_type(np.nested_iters([AR_i8, AR_i8], [[0], [1]], flags=["c_index"]), tuple[np.nditer, ...]) +assert_type(np.nested_iters([AR_i8, AR_i8], [[0], [1]], op_flags=[["readonly", "readonly"]]), tuple[np.nditer, ...]) +assert_type(np.nested_iters([AR_i8, AR_i8], [[0], [1]], op_dtypes=np.int_), tuple[np.nditer, ...]) +assert_type(np.nested_iters([AR_i8, AR_i8], [[0], [1]], order="C", casting="no"), tuple[np.nditer, ...]) diff --git a/python/numpy/typing/tests/data/reveal/nbit_base_example.pyi b/python/numpy/typing/tests/data/reveal/nbit_base_example.pyi new file mode 100644 index 000000000..33229660b --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/nbit_base_example.pyi @@ -0,0 +1,21 @@ +from typing import TypeVar, assert_type + +import numpy as np +import numpy.typing as npt +from numpy._typing import _32Bit, _64Bit + +T1 = TypeVar("T1", bound=npt.NBitBase) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] +T2 = TypeVar("T2", bound=npt.NBitBase) # type: ignore[deprecated] # pyright: ignore[reportDeprecated] + +def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]: + return a + b + +i8: np.int64 +i4: np.int32 +f8: np.float64 +f4: np.float32 + +assert_type(add(f8, i8), np.floating[_64Bit]) +assert_type(add(f4, i8), np.floating[_32Bit | _64Bit]) +assert_type(add(f8, i4), np.floating[_32Bit | _64Bit]) +assert_type(add(f4, i4), np.floating[_32Bit]) diff --git a/python/numpy/typing/tests/data/reveal/ndarray_assignability.pyi b/python/numpy/typing/tests/data/reveal/ndarray_assignability.pyi new file mode 100644 index 000000000..d754a9400 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/ndarray_assignability.pyi @@ -0,0 +1,77 @@ +from typing import Protocol, TypeAlias, TypeVar, assert_type + +import numpy as np +from numpy._typing import _64Bit + +_T = TypeVar("_T") +_T_co = TypeVar("_T_co", covariant=True) + +class CanAbs(Protocol[_T_co]): + def __abs__(self, /) -> _T_co: ... + +class CanInvert(Protocol[_T_co]): + def __invert__(self, /) -> _T_co: ... + +class CanNeg(Protocol[_T_co]): + def __neg__(self, /) -> _T_co: ... + +class CanPos(Protocol[_T_co]): + def __pos__(self, /) -> _T_co: ... + +def do_abs(x: CanAbs[_T]) -> _T: ... +def do_invert(x: CanInvert[_T]) -> _T: ... +def do_neg(x: CanNeg[_T]) -> _T: ... +def do_pos(x: CanPos[_T]) -> _T: ... + +_Bool_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.bool]] +_UInt8_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.uint8]] +_Int16_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.int16]] +_LongLong_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.longlong]] +_Float32_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.float32]] +_Float64_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.float64]] +_LongDouble_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.longdouble]] +_Complex64_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex64]] +_Complex128_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex128]] +_CLongDouble_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[np.clongdouble]] + +b1_1d: _Bool_1d +u1_1d: _UInt8_1d +i2_1d: _Int16_1d +q_1d: _LongLong_1d +f4_1d: _Float32_1d +f8_1d: _Float64_1d +g_1d: _LongDouble_1d +c8_1d: _Complex64_1d +c16_1d: _Complex128_1d +G_1d: _CLongDouble_1d + +assert_type(do_abs(b1_1d), _Bool_1d) +assert_type(do_abs(u1_1d), _UInt8_1d) +assert_type(do_abs(i2_1d), _Int16_1d) +assert_type(do_abs(q_1d), _LongLong_1d) +assert_type(do_abs(f4_1d), _Float32_1d) +assert_type(do_abs(f8_1d), _Float64_1d) +assert_type(do_abs(g_1d), _LongDouble_1d) + +assert_type(do_abs(c8_1d), _Float32_1d) +# NOTE: Unfortunately it's not possible to have this return a `float64` sctype, see +# https://github.com/python/mypy/issues/14070 +assert_type(do_abs(c16_1d), np.ndarray[tuple[int], np.dtype[np.floating[_64Bit]]]) +assert_type(do_abs(G_1d), _LongDouble_1d) + +assert_type(do_invert(b1_1d), _Bool_1d) +assert_type(do_invert(u1_1d), _UInt8_1d) +assert_type(do_invert(i2_1d), _Int16_1d) +assert_type(do_invert(q_1d), _LongLong_1d) + +assert_type(do_neg(u1_1d), _UInt8_1d) +assert_type(do_neg(i2_1d), _Int16_1d) +assert_type(do_neg(q_1d), _LongLong_1d) +assert_type(do_neg(f4_1d), _Float32_1d) +assert_type(do_neg(c16_1d), _Complex128_1d) + +assert_type(do_pos(u1_1d), _UInt8_1d) +assert_type(do_pos(i2_1d), _Int16_1d) +assert_type(do_pos(q_1d), _LongLong_1d) +assert_type(do_pos(f4_1d), _Float32_1d) +assert_type(do_pos(c16_1d), _Complex128_1d) diff --git a/python/numpy/typing/tests/data/reveal/ndarray_conversion.pyi b/python/numpy/typing/tests/data/reveal/ndarray_conversion.pyi new file mode 100644 index 000000000..bbd42573a --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/ndarray_conversion.pyi @@ -0,0 +1,85 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +b1_0d: np.ndarray[tuple[()], np.dtype[np.bool]] +u2_1d: np.ndarray[tuple[int], np.dtype[np.uint16]] +i4_2d: np.ndarray[tuple[int, int], np.dtype[np.int32]] +f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] +cG_4d: np.ndarray[tuple[int, int, int, int], np.dtype[np.clongdouble]] +i0_nd: npt.NDArray[np.int_] +uncertain_dtype: np.int32 | np.float64 | np.str_ + +# item +assert_type(i0_nd.item(), int) +assert_type(i0_nd.item(1), int) +assert_type(i0_nd.item(0, 1), int) +assert_type(i0_nd.item((0, 1)), int) + +assert_type(b1_0d.item(()), bool) +assert_type(u2_1d.item((0,)), int) +assert_type(i4_2d.item(-1, 2), int) +assert_type(f8_3d.item(2, 1, -1), float) +assert_type(cG_4d.item(-0xEd_fed_Deb_a_dead_bee), complex) # c'mon Ed, we talked about this... + +# tolist +assert_type(b1_0d.tolist(), bool) +assert_type(u2_1d.tolist(), list[int]) +assert_type(i4_2d.tolist(), list[list[int]]) +assert_type(f8_3d.tolist(), list[list[list[float]]]) +assert_type(cG_4d.tolist(), Any) +assert_type(i0_nd.tolist(), Any) + +# regression tests for numpy/numpy#27944 +any_dtype: np.ndarray[Any, Any] +any_sctype: np.ndarray[Any, Any] +assert_type(any_dtype.tolist(), Any) +assert_type(any_sctype.tolist(), Any) + + +# itemset does not return a value +# tobytes is pretty simple +# tofile does not return a value +# dump does not return a value +# dumps is pretty simple + +# astype +assert_type(i0_nd.astype("float"), npt.NDArray[Any]) +assert_type(i0_nd.astype(float), npt.NDArray[Any]) +assert_type(i0_nd.astype(np.float64), npt.NDArray[np.float64]) +assert_type(i0_nd.astype(np.float64, "K"), npt.NDArray[np.float64]) +assert_type(i0_nd.astype(np.float64, "K", "unsafe"), npt.NDArray[np.float64]) +assert_type(i0_nd.astype(np.float64, "K", "unsafe", True), npt.NDArray[np.float64]) +assert_type(i0_nd.astype(np.float64, "K", "unsafe", True, True), npt.NDArray[np.float64]) + +assert_type(np.astype(i0_nd, np.float64), npt.NDArray[np.float64]) + +assert_type(i4_2d.astype(np.uint16), np.ndarray[tuple[int, int], np.dtype[np.uint16]]) +assert_type(np.astype(i4_2d, np.uint16), np.ndarray[tuple[int, int], np.dtype[np.uint16]]) +assert_type(f8_3d.astype(np.int16), np.ndarray[tuple[int, int, int], np.dtype[np.int16]]) +assert_type(np.astype(f8_3d, np.int16), np.ndarray[tuple[int, int, int], np.dtype[np.int16]]) +assert_type(i4_2d.astype(uncertain_dtype), np.ndarray[tuple[int, int], np.dtype[np.generic]]) +assert_type(np.astype(i4_2d, uncertain_dtype), np.ndarray[tuple[int, int], np.dtype]) + +# byteswap +assert_type(i0_nd.byteswap(), npt.NDArray[np.int_]) +assert_type(i0_nd.byteswap(True), npt.NDArray[np.int_]) + +# copy +assert_type(i0_nd.copy(), npt.NDArray[np.int_]) +assert_type(i0_nd.copy("C"), npt.NDArray[np.int_]) + +assert_type(i0_nd.view(), npt.NDArray[np.int_]) +assert_type(i0_nd.view(np.float64), npt.NDArray[np.float64]) +assert_type(i0_nd.view(float), npt.NDArray[Any]) +assert_type(i0_nd.view(np.float64, np.matrix), np.matrix[tuple[int, int], Any]) + +# getfield +assert_type(i0_nd.getfield("float"), npt.NDArray[Any]) +assert_type(i0_nd.getfield(float), npt.NDArray[Any]) +assert_type(i0_nd.getfield(np.float64), npt.NDArray[np.float64]) +assert_type(i0_nd.getfield(np.float64, 8), npt.NDArray[np.float64]) + +# setflags does not return a value +# fill does not return a value diff --git a/python/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/python/numpy/typing/tests/data/reveal/ndarray_misc.pyi new file mode 100644 index 000000000..4cbb90621 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -0,0 +1,247 @@ +""" +Tests for miscellaneous (non-magic) ``np.ndarray``/``np.generic`` methods. + +More extensive tests are performed for the methods' +function-based counterpart in `../from_numeric.py`. + +""" + +from collections.abc import Iterator +import ctypes as ct +import operator +from types import ModuleType +from typing import Any, Literal, assert_type + +from typing_extensions import CapsuleType + +import numpy as np +import numpy.typing as npt + +class SubClass(npt.NDArray[np.object_]): ... + +f8: np.float64 +i8: np.int64 +B: SubClass +AR_f8: npt.NDArray[np.float64] +AR_i8: npt.NDArray[np.int64] +AR_u1: npt.NDArray[np.uint8] +AR_c8: npt.NDArray[np.complex64] +AR_m: npt.NDArray[np.timedelta64] +AR_U: npt.NDArray[np.str_] +AR_V: npt.NDArray[np.void] + +AR_f8_1d: np.ndarray[tuple[int], np.dtype[np.float64]] +AR_f8_2d: np.ndarray[tuple[int, int], np.dtype[np.float64]] +AR_f8_3d: np.ndarray[tuple[int, int, int], np.dtype[np.float64]] + +ctypes_obj = AR_f8.ctypes + +assert_type(AR_f8.__dlpack__(), CapsuleType) +assert_type(AR_f8.__dlpack_device__(), tuple[Literal[1], Literal[0]]) + +assert_type(ctypes_obj.data, int) +assert_type(ctypes_obj.shape, ct.Array[np.ctypeslib.c_intp]) +assert_type(ctypes_obj.strides, ct.Array[np.ctypeslib.c_intp]) +assert_type(ctypes_obj._as_parameter_, ct.c_void_p) + +assert_type(ctypes_obj.data_as(ct.c_void_p), ct.c_void_p) +assert_type(ctypes_obj.shape_as(ct.c_longlong), ct.Array[ct.c_longlong]) +assert_type(ctypes_obj.strides_as(ct.c_ubyte), ct.Array[ct.c_ubyte]) + +assert_type(f8.all(), np.bool) +assert_type(AR_f8.all(), np.bool) +assert_type(AR_f8.all(axis=0), np.bool | npt.NDArray[np.bool]) +assert_type(AR_f8.all(keepdims=True), np.bool | npt.NDArray[np.bool]) +assert_type(AR_f8.all(out=B), SubClass) + +assert_type(f8.any(), np.bool) +assert_type(AR_f8.any(), np.bool) +assert_type(AR_f8.any(axis=0), np.bool | npt.NDArray[np.bool]) +assert_type(AR_f8.any(keepdims=True), np.bool | npt.NDArray[np.bool]) +assert_type(AR_f8.any(out=B), SubClass) + +assert_type(f8.argmax(), np.intp) +assert_type(AR_f8.argmax(), np.intp) +assert_type(AR_f8.argmax(axis=0), Any) +assert_type(AR_f8.argmax(out=AR_i8), npt.NDArray[np.intp]) + +assert_type(f8.argmin(), np.intp) +assert_type(AR_f8.argmin(), np.intp) +assert_type(AR_f8.argmin(axis=0), Any) +assert_type(AR_f8.argmin(out=AR_i8), npt.NDArray[np.intp]) + +assert_type(f8.argsort(), npt.NDArray[Any]) +assert_type(AR_f8.argsort(), npt.NDArray[Any]) + +assert_type(f8.astype(np.int64).choose([()]), npt.NDArray[Any]) +assert_type(AR_f8.choose([0]), npt.NDArray[Any]) +assert_type(AR_f8.choose([0], out=B), SubClass) + +assert_type(f8.clip(1), npt.NDArray[Any]) +assert_type(AR_f8.clip(1), npt.NDArray[Any]) +assert_type(AR_f8.clip(None, 1), npt.NDArray[Any]) +assert_type(AR_f8.clip(1, out=B), SubClass) +assert_type(AR_f8.clip(None, 1, out=B), SubClass) + +assert_type(f8.compress([0]), npt.NDArray[Any]) +assert_type(AR_f8.compress([0]), npt.NDArray[Any]) +assert_type(AR_f8.compress([0], out=B), SubClass) + +assert_type(f8.conj(), np.float64) +assert_type(AR_f8.conj(), npt.NDArray[np.float64]) +assert_type(B.conj(), SubClass) + +assert_type(f8.conjugate(), np.float64) +assert_type(AR_f8.conjugate(), npt.NDArray[np.float64]) +assert_type(B.conjugate(), SubClass) + +assert_type(f8.cumprod(), npt.NDArray[Any]) +assert_type(AR_f8.cumprod(), npt.NDArray[Any]) +assert_type(AR_f8.cumprod(out=B), SubClass) + +assert_type(f8.cumsum(), npt.NDArray[Any]) +assert_type(AR_f8.cumsum(), npt.NDArray[Any]) +assert_type(AR_f8.cumsum(out=B), SubClass) + +assert_type(f8.max(), Any) +assert_type(AR_f8.max(), Any) +assert_type(AR_f8.max(axis=0), Any) +assert_type(AR_f8.max(keepdims=True), Any) +assert_type(AR_f8.max(out=B), SubClass) + +assert_type(f8.mean(), Any) +assert_type(AR_f8.mean(), Any) +assert_type(AR_f8.mean(axis=0), Any) +assert_type(AR_f8.mean(keepdims=True), Any) +assert_type(AR_f8.mean(out=B), SubClass) + +assert_type(f8.min(), Any) +assert_type(AR_f8.min(), Any) +assert_type(AR_f8.min(axis=0), Any) +assert_type(AR_f8.min(keepdims=True), Any) +assert_type(AR_f8.min(out=B), SubClass) + +assert_type(f8.prod(), Any) +assert_type(AR_f8.prod(), Any) +assert_type(AR_f8.prod(axis=0), Any) +assert_type(AR_f8.prod(keepdims=True), Any) +assert_type(AR_f8.prod(out=B), SubClass) + +assert_type(f8.round(), np.float64) +assert_type(AR_f8.round(), npt.NDArray[np.float64]) +assert_type(AR_f8.round(out=B), SubClass) + +assert_type(f8.repeat(1), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(f8.repeat(1, axis=0), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_f8.repeat(1), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_f8.repeat(1, axis=0), npt.NDArray[np.float64]) +assert_type(B.repeat(1), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(B.repeat(1, axis=0), npt.NDArray[np.object_]) + +assert_type(f8.std(), Any) +assert_type(AR_f8.std(), Any) +assert_type(AR_f8.std(axis=0), Any) +assert_type(AR_f8.std(keepdims=True), Any) +assert_type(AR_f8.std(out=B), SubClass) + +assert_type(f8.sum(), Any) +assert_type(AR_f8.sum(), Any) +assert_type(AR_f8.sum(axis=0), Any) +assert_type(AR_f8.sum(keepdims=True), Any) +assert_type(AR_f8.sum(out=B), SubClass) + +assert_type(f8.take(0), np.float64) +assert_type(AR_f8.take(0), np.float64) +assert_type(AR_f8.take([0]), npt.NDArray[np.float64]) +assert_type(AR_f8.take(0, out=B), SubClass) +assert_type(AR_f8.take([0], out=B), SubClass) + +assert_type(f8.var(), Any) +assert_type(AR_f8.var(), Any) +assert_type(AR_f8.var(axis=0), Any) +assert_type(AR_f8.var(keepdims=True), Any) +assert_type(AR_f8.var(out=B), SubClass) + +assert_type(AR_f8.argpartition([0]), npt.NDArray[np.intp]) + +assert_type(AR_f8.diagonal(), npt.NDArray[np.float64]) + +assert_type(AR_f8.dot(1), npt.NDArray[Any]) +assert_type(AR_f8.dot([1]), Any) +assert_type(AR_f8.dot(1, out=B), SubClass) + +assert_type(AR_f8.nonzero(), tuple[npt.NDArray[np.intp], ...]) + +assert_type(AR_f8.searchsorted(1), np.intp) +assert_type(AR_f8.searchsorted([1]), npt.NDArray[np.intp]) + +assert_type(AR_f8.trace(), Any) +assert_type(AR_f8.trace(out=B), SubClass) + +assert_type(AR_f8.item(), float) +assert_type(AR_U.item(), str) + +assert_type(AR_f8.ravel(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_U.ravel(), np.ndarray[tuple[int], np.dtype[np.str_]]) + +assert_type(AR_f8.flatten(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_U.flatten(), np.ndarray[tuple[int], np.dtype[np.str_]]) + +assert_type(AR_i8.reshape(None), npt.NDArray[np.int64]) +assert_type(AR_f8.reshape(-1), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_c8.reshape(2, 3, 4, 5), np.ndarray[tuple[int, int, int, int], np.dtype[np.complex64]]) +assert_type(AR_m.reshape(()), np.ndarray[tuple[()], np.dtype[np.timedelta64]]) +assert_type(AR_U.reshape([]), np.ndarray[tuple[()], np.dtype[np.str_]]) +assert_type(AR_V.reshape((480, 720, 4)), np.ndarray[tuple[int, int, int], np.dtype[np.void]]) + +assert_type(int(AR_f8), int) +assert_type(int(AR_U), int) + +assert_type(float(AR_f8), float) +assert_type(float(AR_U), float) + +assert_type(complex(AR_f8), complex) + +assert_type(operator.index(AR_i8), int) + +assert_type(AR_f8.__array_wrap__(B), npt.NDArray[np.object_]) + +assert_type(AR_V[0], Any) +assert_type(AR_V[0, 0], Any) +assert_type(AR_V[AR_i8], npt.NDArray[np.void]) +assert_type(AR_V[AR_i8, AR_i8], npt.NDArray[np.void]) +assert_type(AR_V[AR_i8, None], npt.NDArray[np.void]) +assert_type(AR_V[0, ...], npt.NDArray[np.void]) +assert_type(AR_V[[0]], npt.NDArray[np.void]) +assert_type(AR_V[[0], [0]], npt.NDArray[np.void]) +assert_type(AR_V[:], npt.NDArray[np.void]) +assert_type(AR_V["a"], npt.NDArray[Any]) +assert_type(AR_V[["a", "b"]], npt.NDArray[np.void]) + +assert_type(AR_f8.dump("test_file"), None) +assert_type(AR_f8.dump(b"test_file"), None) +with open("test_file", "wb") as f: + assert_type(AR_f8.dump(f), None) + +assert_type(AR_f8.__array_finalize__(None), None) +assert_type(AR_f8.__array_finalize__(B), None) +assert_type(AR_f8.__array_finalize__(AR_f8), None) + +assert_type(f8.device, Literal["cpu"]) +assert_type(AR_f8.device, Literal["cpu"]) + +assert_type(f8.to_device("cpu"), np.float64) +assert_type(i8.to_device("cpu"), np.int64) +assert_type(AR_f8.to_device("cpu"), npt.NDArray[np.float64]) +assert_type(AR_i8.to_device("cpu"), npt.NDArray[np.int64]) +assert_type(AR_u1.to_device("cpu"), npt.NDArray[np.uint8]) +assert_type(AR_c8.to_device("cpu"), npt.NDArray[np.complex64]) +assert_type(AR_m.to_device("cpu"), npt.NDArray[np.timedelta64]) + +assert_type(f8.__array_namespace__(), ModuleType) +assert_type(AR_f8.__array_namespace__(), ModuleType) + +assert_type(iter(AR_f8), Iterator[Any]) # any-D +assert_type(iter(AR_f8_1d), Iterator[np.float64]) # 1-D +assert_type(iter(AR_f8_2d), Iterator[npt.NDArray[np.float64]]) # 2-D +assert_type(iter(AR_f8_3d), Iterator[npt.NDArray[np.float64]]) # 3-D diff --git a/python/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi b/python/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi new file mode 100644 index 000000000..4447bb13d --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi @@ -0,0 +1,39 @@ +from typing import assert_type + +import numpy as np +import numpy.typing as npt + +nd: npt.NDArray[np.int64] + +# reshape +assert_type(nd.reshape(None), npt.NDArray[np.int64]) +assert_type(nd.reshape(4), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(nd.reshape((4,)), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(nd.reshape(2, 2), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(nd.reshape((2, 2)), np.ndarray[tuple[int, int], np.dtype[np.int64]]) + +assert_type(nd.reshape((2, 2), order="C"), np.ndarray[tuple[int, int], np.dtype[np.int64]]) +assert_type(nd.reshape(4, order="C"), np.ndarray[tuple[int], np.dtype[np.int64]]) + +# resize does not return a value + +# transpose +assert_type(nd.transpose(), npt.NDArray[np.int64]) +assert_type(nd.transpose(1, 0), npt.NDArray[np.int64]) +assert_type(nd.transpose((1, 0)), npt.NDArray[np.int64]) + +# swapaxes +assert_type(nd.swapaxes(0, 1), npt.NDArray[np.int64]) + +# flatten +assert_type(nd.flatten(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(nd.flatten("C"), np.ndarray[tuple[int], np.dtype[np.int64]]) + +# ravel +assert_type(nd.ravel(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(nd.ravel("C"), np.ndarray[tuple[int], np.dtype[np.int64]]) + +# squeeze +assert_type(nd.squeeze(), npt.NDArray[np.int64]) +assert_type(nd.squeeze(0), npt.NDArray[np.int64]) +assert_type(nd.squeeze((0, 2)), npt.NDArray[np.int64]) diff --git a/python/numpy/typing/tests/data/reveal/nditer.pyi b/python/numpy/typing/tests/data/reveal/nditer.pyi new file mode 100644 index 000000000..8965f3c03 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/nditer.pyi @@ -0,0 +1,49 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +nditer_obj: np.nditer + +assert_type(np.nditer([0, 1], flags=["c_index"]), np.nditer) +assert_type(np.nditer([0, 1], op_flags=[["readonly", "readonly"]]), np.nditer) +assert_type(np.nditer([0, 1], op_dtypes=np.int_), np.nditer) +assert_type(np.nditer([0, 1], order="C", casting="no"), np.nditer) + +assert_type(nditer_obj.dtypes, tuple[np.dtype, ...]) +assert_type(nditer_obj.finished, bool) +assert_type(nditer_obj.has_delayed_bufalloc, bool) +assert_type(nditer_obj.has_index, bool) +assert_type(nditer_obj.has_multi_index, bool) +assert_type(nditer_obj.index, int) +assert_type(nditer_obj.iterationneedsapi, bool) +assert_type(nditer_obj.iterindex, int) +assert_type(nditer_obj.iterrange, tuple[int, ...]) +assert_type(nditer_obj.itersize, int) +assert_type(nditer_obj.itviews, tuple[npt.NDArray[Any], ...]) +assert_type(nditer_obj.multi_index, tuple[int, ...]) +assert_type(nditer_obj.ndim, int) +assert_type(nditer_obj.nop, int) +assert_type(nditer_obj.operands, tuple[npt.NDArray[Any], ...]) +assert_type(nditer_obj.shape, tuple[int, ...]) +assert_type(nditer_obj.value, tuple[npt.NDArray[Any], ...]) + +assert_type(nditer_obj.close(), None) +assert_type(nditer_obj.copy(), np.nditer) +assert_type(nditer_obj.debug_print(), None) +assert_type(nditer_obj.enable_external_loop(), None) +assert_type(nditer_obj.iternext(), bool) +assert_type(nditer_obj.remove_axis(0), None) +assert_type(nditer_obj.remove_multi_index(), None) +assert_type(nditer_obj.reset(), None) + +assert_type(len(nditer_obj), int) +assert_type(iter(nditer_obj), np.nditer) +assert_type(next(nditer_obj), tuple[npt.NDArray[Any], ...]) +assert_type(nditer_obj.__copy__(), np.nditer) +with nditer_obj as f: + assert_type(f, np.nditer) +assert_type(nditer_obj[0], npt.NDArray[Any]) +assert_type(nditer_obj[:], tuple[npt.NDArray[Any], ...]) +nditer_obj[0] = 0 +nditer_obj[:] = [0, 1] diff --git a/python/numpy/typing/tests/data/reveal/nested_sequence.pyi b/python/numpy/typing/tests/data/reveal/nested_sequence.pyi new file mode 100644 index 000000000..b4f98b79c --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/nested_sequence.pyi @@ -0,0 +1,25 @@ +from collections.abc import Sequence +from typing import Any, assert_type + +from numpy._typing import _NestedSequence + +a: Sequence[int] +b: Sequence[Sequence[int]] +c: Sequence[Sequence[Sequence[int]]] +d: Sequence[Sequence[Sequence[Sequence[int]]]] +e: Sequence[bool] +f: tuple[int, ...] +g: list[int] +h: Sequence[Any] + +def func(a: _NestedSequence[int]) -> None: ... + +assert_type(func(a), None) +assert_type(func(b), None) +assert_type(func(c), None) +assert_type(func(d), None) +assert_type(func(e), None) +assert_type(func(f), None) +assert_type(func(g), None) +assert_type(func(h), None) +assert_type(func(range(15)), None) diff --git a/python/numpy/typing/tests/data/reveal/npyio.pyi b/python/numpy/typing/tests/data/reveal/npyio.pyi new file mode 100644 index 000000000..40da72c85 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/npyio.pyi @@ -0,0 +1,83 @@ +import pathlib +import re +import zipfile +from collections.abc import Mapping +from typing import IO, Any, assert_type + +import numpy as np +import numpy.typing as npt +from numpy.lib._npyio_impl import BagObj + +str_path: str +pathlib_path: pathlib.Path +str_file: IO[str] +bytes_file: IO[bytes] + +npz_file: np.lib.npyio.NpzFile + +AR_i8: npt.NDArray[np.int64] +AR_LIKE_f8: list[float] + +class BytesWriter: + def write(self, data: bytes) -> None: ... + +class BytesReader: + def read(self, n: int = ...) -> bytes: ... + def seek(self, offset: int, whence: int = ...) -> int: ... + +bytes_writer: BytesWriter +bytes_reader: BytesReader + +assert_type(npz_file.zip, zipfile.ZipFile) +assert_type(npz_file.fid, IO[str] | None) +assert_type(npz_file.files, list[str]) +assert_type(npz_file.allow_pickle, bool) +assert_type(npz_file.pickle_kwargs, Mapping[str, Any] | None) +assert_type(npz_file.f, BagObj[np.lib.npyio.NpzFile]) +assert_type(npz_file["test"], npt.NDArray[Any]) +assert_type(len(npz_file), int) +with npz_file as f: + assert_type(f, np.lib.npyio.NpzFile) + +assert_type(np.load(bytes_file), Any) +assert_type(np.load(pathlib_path, allow_pickle=True), Any) +assert_type(np.load(str_path, encoding="bytes"), Any) +assert_type(np.load(bytes_reader), Any) + +assert_type(np.save(bytes_file, AR_LIKE_f8), None) +assert_type(np.save(pathlib_path, AR_i8, allow_pickle=True), None) +assert_type(np.save(str_path, AR_LIKE_f8), None) +assert_type(np.save(bytes_writer, AR_LIKE_f8), None) + +assert_type(np.savez(bytes_file, AR_LIKE_f8), None) +assert_type(np.savez(pathlib_path, ar1=AR_i8, ar2=AR_i8), None) +assert_type(np.savez(str_path, AR_LIKE_f8, ar1=AR_i8), None) +assert_type(np.savez(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None) + +assert_type(np.savez_compressed(bytes_file, AR_LIKE_f8), None) +assert_type(np.savez_compressed(pathlib_path, ar1=AR_i8, ar2=AR_i8), None) +assert_type(np.savez_compressed(str_path, AR_LIKE_f8, ar1=AR_i8), None) +assert_type(np.savez_compressed(bytes_writer, AR_LIKE_f8, ar1=AR_i8), None) + +assert_type(np.loadtxt(bytes_file), npt.NDArray[np.float64]) +assert_type(np.loadtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_]) +assert_type(np.loadtxt(str_path, dtype=str, skiprows=2), npt.NDArray[Any]) +assert_type(np.loadtxt(str_file, comments="test"), npt.NDArray[np.float64]) +assert_type(np.loadtxt(str_file, comments=None), npt.NDArray[np.float64]) +assert_type(np.loadtxt(str_path, delimiter="\n"), npt.NDArray[np.float64]) +assert_type(np.loadtxt(str_path, ndmin=2), npt.NDArray[np.float64]) +assert_type(np.loadtxt(["1", "2", "3"]), npt.NDArray[np.float64]) + +assert_type(np.fromregex(bytes_file, "test", np.float64), npt.NDArray[np.float64]) +assert_type(np.fromregex(str_file, b"test", dtype=float), npt.NDArray[Any]) +assert_type(np.fromregex(str_path, re.compile("test"), dtype=np.str_, encoding="utf8"), npt.NDArray[np.str_]) +assert_type(np.fromregex(pathlib_path, "test", np.float64), npt.NDArray[np.float64]) +assert_type(np.fromregex(bytes_reader, "test", np.float64), npt.NDArray[np.float64]) + +assert_type(np.genfromtxt(bytes_file), npt.NDArray[Any]) +assert_type(np.genfromtxt(pathlib_path, dtype=np.str_), npt.NDArray[np.str_]) +assert_type(np.genfromtxt(str_path, dtype=str, skip_header=2), npt.NDArray[Any]) +assert_type(np.genfromtxt(str_file, comments="test"), npt.NDArray[Any]) +assert_type(np.genfromtxt(str_path, delimiter="\n"), npt.NDArray[Any]) +assert_type(np.genfromtxt(str_path, ndmin=2), npt.NDArray[Any]) +assert_type(np.genfromtxt(["1", "2", "3"], ndmin=2), npt.NDArray[Any]) diff --git a/python/numpy/typing/tests/data/reveal/numeric.pyi b/python/numpy/typing/tests/data/reveal/numeric.pyi new file mode 100644 index 000000000..7c1ea8958 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/numeric.pyi @@ -0,0 +1,134 @@ +""" +Tests for :mod:`_core.numeric`. + +Does not include tests which fall under ``array_constructors``. + +""" + +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +class SubClass(npt.NDArray[np.int64]): ... + +i8: np.int64 + +AR_b: npt.NDArray[np.bool] +AR_u8: npt.NDArray[np.uint64] +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_m: npt.NDArray[np.timedelta64] +AR_O: npt.NDArray[np.object_] + +B: list[int] +C: SubClass + +assert_type(np.count_nonzero(i8), np.intp) +assert_type(np.count_nonzero(AR_i8), np.intp) +assert_type(np.count_nonzero(B), np.intp) +assert_type(np.count_nonzero(AR_i8, keepdims=True), npt.NDArray[np.intp]) +assert_type(np.count_nonzero(AR_i8, axis=0), Any) + +assert_type(np.isfortran(i8), bool) +assert_type(np.isfortran(AR_i8), bool) + +assert_type(np.argwhere(i8), npt.NDArray[np.intp]) +assert_type(np.argwhere(AR_i8), npt.NDArray[np.intp]) + +assert_type(np.flatnonzero(i8), npt.NDArray[np.intp]) +assert_type(np.flatnonzero(AR_i8), npt.NDArray[np.intp]) + +assert_type(np.correlate(B, AR_i8, mode="valid"), npt.NDArray[np.signedinteger]) +assert_type(np.correlate(AR_i8, AR_i8, mode="same"), npt.NDArray[np.signedinteger]) +assert_type(np.correlate(AR_b, AR_b), npt.NDArray[np.bool]) +assert_type(np.correlate(AR_b, AR_u8), npt.NDArray[np.unsignedinteger]) +assert_type(np.correlate(AR_i8, AR_b), npt.NDArray[np.signedinteger]) +assert_type(np.correlate(AR_i8, AR_f8), npt.NDArray[np.floating]) +assert_type(np.correlate(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.correlate(AR_i8, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.correlate(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.convolve(B, AR_i8, mode="valid"), npt.NDArray[np.signedinteger]) +assert_type(np.convolve(AR_i8, AR_i8, mode="same"), npt.NDArray[np.signedinteger]) +assert_type(np.convolve(AR_b, AR_b), npt.NDArray[np.bool]) +assert_type(np.convolve(AR_b, AR_u8), npt.NDArray[np.unsignedinteger]) +assert_type(np.convolve(AR_i8, AR_b), npt.NDArray[np.signedinteger]) +assert_type(np.convolve(AR_i8, AR_f8), npt.NDArray[np.floating]) +assert_type(np.convolve(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.convolve(AR_i8, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.convolve(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.outer(i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.outer(B, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.outer(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.outer(AR_i8, AR_i8, out=C), SubClass) +assert_type(np.outer(AR_b, AR_b), npt.NDArray[np.bool]) +assert_type(np.outer(AR_b, AR_u8), npt.NDArray[np.unsignedinteger]) +assert_type(np.outer(AR_i8, AR_b), npt.NDArray[np.signedinteger]) +assert_type(np.convolve(AR_i8, AR_f8), npt.NDArray[np.floating]) +assert_type(np.outer(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.outer(AR_i8, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.outer(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.tensordot(B, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.tensordot(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.tensordot(AR_i8, AR_i8, axes=0), npt.NDArray[np.signedinteger]) +assert_type(np.tensordot(AR_i8, AR_i8, axes=(0, 1)), npt.NDArray[np.signedinteger]) +assert_type(np.tensordot(AR_b, AR_b), npt.NDArray[np.bool]) +assert_type(np.tensordot(AR_b, AR_u8), npt.NDArray[np.unsignedinteger]) +assert_type(np.tensordot(AR_i8, AR_b), npt.NDArray[np.signedinteger]) +assert_type(np.tensordot(AR_i8, AR_f8), npt.NDArray[np.floating]) +assert_type(np.tensordot(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.tensordot(AR_i8, AR_m), npt.NDArray[np.timedelta64]) +assert_type(np.tensordot(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.isscalar(i8), bool) +assert_type(np.isscalar(AR_i8), bool) +assert_type(np.isscalar(B), bool) + +assert_type(np.roll(AR_i8, 1), npt.NDArray[np.int64]) +assert_type(np.roll(AR_i8, (1, 2)), npt.NDArray[np.int64]) +assert_type(np.roll(B, 1), npt.NDArray[Any]) + +assert_type(np.rollaxis(AR_i8, 0, 1), npt.NDArray[np.int64]) + +assert_type(np.moveaxis(AR_i8, 0, 1), npt.NDArray[np.int64]) +assert_type(np.moveaxis(AR_i8, (0, 1), (1, 2)), npt.NDArray[np.int64]) + +assert_type(np.cross(B, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.cross(AR_i8, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.cross(AR_b, AR_u8), npt.NDArray[np.unsignedinteger]) +assert_type(np.cross(AR_i8, AR_b), npt.NDArray[np.signedinteger]) +assert_type(np.cross(AR_i8, AR_f8), npt.NDArray[np.floating]) +assert_type(np.cross(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(np.cross(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(np.indices([0, 1, 2]), npt.NDArray[np.int_]) +assert_type(np.indices([0, 1, 2], sparse=True), tuple[npt.NDArray[np.int_], ...]) +assert_type(np.indices([0, 1, 2], dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.indices([0, 1, 2], sparse=True, dtype=np.float64), tuple[npt.NDArray[np.float64], ...]) +assert_type(np.indices([0, 1, 2], dtype=float), npt.NDArray[Any]) +assert_type(np.indices([0, 1, 2], sparse=True, dtype=float), tuple[npt.NDArray[Any], ...]) + +assert_type(np.binary_repr(1), str) + +assert_type(np.base_repr(1), str) + +assert_type(np.allclose(i8, AR_i8), bool) +assert_type(np.allclose(B, AR_i8), bool) +assert_type(np.allclose(AR_i8, AR_i8), bool) + +assert_type(np.isclose(i8, i8), np.bool) +assert_type(np.isclose(i8, AR_i8), npt.NDArray[np.bool]) +assert_type(np.isclose(B, AR_i8), npt.NDArray[np.bool]) +assert_type(np.isclose(AR_i8, AR_i8), npt.NDArray[np.bool]) + +assert_type(np.array_equal(i8, AR_i8), bool) +assert_type(np.array_equal(B, AR_i8), bool) +assert_type(np.array_equal(AR_i8, AR_i8), bool) + +assert_type(np.array_equiv(i8, AR_i8), bool) +assert_type(np.array_equiv(B, AR_i8), bool) +assert_type(np.array_equiv(AR_i8, AR_i8), bool) diff --git a/python/numpy/typing/tests/data/reveal/numerictypes.pyi b/python/numpy/typing/tests/data/reveal/numerictypes.pyi new file mode 100644 index 000000000..75d108ce5 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/numerictypes.pyi @@ -0,0 +1,16 @@ +from typing import Literal, assert_type + +import numpy as np + +assert_type(np.ScalarType[0], type[int]) +assert_type(np.ScalarType[3], type[bool]) +assert_type(np.ScalarType[8], type[np.complex64]) +assert_type(np.ScalarType[9], type[np.complex128]) +assert_type(np.ScalarType[-1], type[np.void]) +assert_type(np.bool_(object()), np.bool) + +assert_type(np.typecodes["Character"], Literal["c"]) +assert_type(np.typecodes["Complex"], Literal["FDG"]) +assert_type(np.typecodes["All"], Literal["?bhilqnpBHILQNPefdgFDGSUVOMm"]) + +assert_type(np.sctypeDict["uint8"], type[np.generic]) diff --git a/python/numpy/typing/tests/data/reveal/polynomial_polybase.pyi b/python/numpy/typing/tests/data/reveal/polynomial_polybase.pyi new file mode 100644 index 000000000..bb927035e --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/polynomial_polybase.pyi @@ -0,0 +1,220 @@ +from collections.abc import Sequence +from decimal import Decimal +from fractions import Fraction +from typing import Any, LiteralString, TypeAlias, TypeVar, assert_type +from typing import Literal as L + +import numpy as np +import numpy.polynomial as npp +import numpy.typing as npt + +_Ar_x: TypeAlias = npt.NDArray[np.inexact | np.object_] +_Ar_f: TypeAlias = npt.NDArray[np.floating] +_Ar_c: TypeAlias = npt.NDArray[np.complexfloating] +_Ar_O: TypeAlias = npt.NDArray[np.object_] + +_Ar_x_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.inexact | np.object_]] +_Ar_f_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating]] +_Ar_c_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating]] +_Ar_O_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] + +_Ar_x_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.inexact | np.object_]] +_Ar_f_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.floating]] +_Ar_c_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.complexfloating]] +_Ar_O_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.object_]] + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) +_Ar_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] + +_BasisName: TypeAlias = L["X"] + +SC_i: np.int_ +SC_i_co: int | np.int_ +SC_f: np.float64 +SC_f_co: float | np.float64 | np.int_ +SC_c: np.complex128 +SC_c_co: complex | np.complex128 +SC_O: Decimal + +AR_i: npt.NDArray[np.int_] +AR_f: npt.NDArray[np.float64] +AR_f_co: npt.NDArray[np.float64] | npt.NDArray[np.int_] +AR_c: npt.NDArray[np.complex128] +AR_c_co: npt.NDArray[np.complex128] | npt.NDArray[np.float64] | npt.NDArray[np.int_] +AR_O: npt.NDArray[np.object_] +AR_O_co: npt.NDArray[np.object_ | np.number] + +SQ_i: Sequence[int] +SQ_f: Sequence[float] +SQ_c: Sequence[complex] +SQ_O: Sequence[Decimal] + +PS_poly: npp.Polynomial +PS_cheb: npp.Chebyshev +PS_herm: npp.Hermite +PS_herme: npp.HermiteE +PS_lag: npp.Laguerre +PS_leg: npp.Legendre +PS_all: ( + npp.Polynomial + | npp.Chebyshev + | npp.Hermite + | npp.HermiteE + | npp.Laguerre + | npp.Legendre +) + +# static- and classmethods + +assert_type(type(PS_poly).basis_name, None) +assert_type(type(PS_cheb).basis_name, L['T']) +assert_type(type(PS_herm).basis_name, L['H']) +assert_type(type(PS_herme).basis_name, L['He']) +assert_type(type(PS_lag).basis_name, L['L']) +assert_type(type(PS_leg).basis_name, L['P']) + +assert_type(type(PS_all).__hash__, None) +assert_type(type(PS_all).__array_ufunc__, None) +assert_type(type(PS_all).maxpower, L[100]) + +assert_type(type(PS_poly).fromroots(SC_i), npp.Polynomial) +assert_type(type(PS_poly).fromroots(SQ_i), npp.Polynomial) +assert_type(type(PS_poly).fromroots(AR_i), npp.Polynomial) +assert_type(type(PS_cheb).fromroots(SC_f), npp.Chebyshev) +assert_type(type(PS_cheb).fromroots(SQ_f), npp.Chebyshev) +assert_type(type(PS_cheb).fromroots(AR_f_co), npp.Chebyshev) +assert_type(type(PS_herm).fromroots(SC_c), npp.Hermite) +assert_type(type(PS_herm).fromroots(SQ_c), npp.Hermite) +assert_type(type(PS_herm).fromroots(AR_c_co), npp.Hermite) +assert_type(type(PS_leg).fromroots(SC_O), npp.Legendre) +assert_type(type(PS_leg).fromroots(SQ_O), npp.Legendre) +assert_type(type(PS_leg).fromroots(AR_O_co), npp.Legendre) + +assert_type(type(PS_poly).identity(), npp.Polynomial) +assert_type(type(PS_cheb).identity(symbol='z'), npp.Chebyshev) + +assert_type(type(PS_lag).basis(SC_i), npp.Laguerre) +assert_type(type(PS_leg).basis(32, symbol='u'), npp.Legendre) + +assert_type(type(PS_herm).cast(PS_poly), npp.Hermite) +assert_type(type(PS_herme).cast(PS_leg), npp.HermiteE) + +# attributes / properties + +assert_type(PS_all.coef, _Ar_x_n) +assert_type(PS_all.domain, _Ar_x_2) +assert_type(PS_all.window, _Ar_x_2) +assert_type(PS_all.symbol, LiteralString) + +# instance methods + +assert_type(PS_all.has_samecoef(PS_all), bool) +assert_type(PS_all.has_samedomain(PS_all), bool) +assert_type(PS_all.has_samewindow(PS_all), bool) +assert_type(PS_all.has_sametype(PS_all), bool) +assert_type(PS_poly.has_sametype(PS_poly), bool) +assert_type(PS_poly.has_sametype(PS_leg), bool) +assert_type(PS_poly.has_sametype(NotADirectoryError), L[False]) + +assert_type(PS_poly.copy(), npp.Polynomial) +assert_type(PS_cheb.copy(), npp.Chebyshev) +assert_type(PS_herm.copy(), npp.Hermite) +assert_type(PS_herme.copy(), npp.HermiteE) +assert_type(PS_lag.copy(), npp.Laguerre) +assert_type(PS_leg.copy(), npp.Legendre) + +assert_type(PS_leg.cutdeg(), npp.Legendre) +assert_type(PS_leg.trim(), npp.Legendre) +assert_type(PS_leg.trim(tol=SC_f_co), npp.Legendre) +assert_type(PS_leg.truncate(SC_i_co), npp.Legendre) + +assert_type(PS_all.convert(None, npp.Chebyshev), npp.Chebyshev) +assert_type(PS_all.convert((0, 1), npp.Laguerre), npp.Laguerre) +assert_type(PS_all.convert([0, 1], npp.Hermite, [-1, 1]), npp.Hermite) + +assert_type(PS_all.degree(), int) +assert_type(PS_all.mapparms(), tuple[Any, Any]) + +assert_type(PS_poly.integ(), npp.Polynomial) +assert_type(PS_herme.integ(SC_i_co), npp.HermiteE) +assert_type(PS_lag.integ(SC_i_co, SC_f_co), npp.Laguerre) +assert_type(PS_poly.deriv(), npp.Polynomial) +assert_type(PS_herm.deriv(SC_i_co), npp.Hermite) + +assert_type(PS_poly.roots(), _Ar_x_n) + +assert_type( + PS_poly.linspace(), + tuple[_Ar_1d[np.float64 | np.complex128], _Ar_1d[np.float64 | np.complex128]], +) + +assert_type( + PS_poly.linspace(9), + tuple[_Ar_1d[np.float64 | np.complex128], _Ar_1d[np.float64 | np.complex128]], +) + +assert_type(PS_cheb.fit(AR_c_co, AR_c_co, SC_i_co), npp.Chebyshev) +assert_type(PS_leg.fit(AR_c_co, AR_c_co, AR_i), npp.Legendre) +assert_type(PS_herm.fit(AR_c_co, AR_c_co, SQ_i), npp.Hermite) +assert_type(PS_poly.fit(AR_c_co, SQ_c, SQ_i), npp.Polynomial) +assert_type(PS_lag.fit(SQ_c, SQ_c, SQ_i, full=False), npp.Laguerre) +assert_type( + PS_herme.fit(SQ_c, AR_c_co, SC_i_co, full=True), + tuple[npp.HermiteE, Sequence[np.inexact | np.int32]], +) + +# custom operations + +assert_type(PS_all.__hash__, None) +assert_type(PS_all.__array_ufunc__, None) + +assert_type(str(PS_all), str) +assert_type(repr(PS_all), str) +assert_type(format(PS_all), str) + +assert_type(len(PS_all), int) +assert_type(next(iter(PS_all)), np.inexact | object) + +assert_type(PS_all(SC_f_co), np.float64 | np.complex128) +assert_type(PS_all(SC_c_co), np.complex128) +assert_type(PS_all(Decimal()), np.float64 | np.complex128) +assert_type(PS_all(Fraction()), np.float64 | np.complex128) +assert_type(PS_poly(SQ_f), npt.NDArray[np.float64] | npt.NDArray[np.complex128] | npt.NDArray[np.object_]) +assert_type(PS_poly(SQ_c), npt.NDArray[np.complex128] | npt.NDArray[np.object_]) +assert_type(PS_poly(SQ_O), npt.NDArray[np.object_]) +assert_type(PS_poly(AR_f), npt.NDArray[np.float64] | npt.NDArray[np.complex128] | npt.NDArray[np.object_]) +assert_type(PS_poly(AR_c), npt.NDArray[np.complex128] | npt.NDArray[np.object_]) +assert_type(PS_poly(AR_O), npt.NDArray[np.object_]) +assert_type(PS_all(PS_poly), npp.Polynomial) + +assert_type(PS_poly == PS_poly, bool) +assert_type(PS_poly != PS_poly, bool) + +assert_type(-PS_poly, npp.Polynomial) +assert_type(+PS_poly, npp.Polynomial) + +assert_type(PS_poly + 5, npp.Polynomial) +assert_type(PS_poly - 5, npp.Polynomial) +assert_type(PS_poly * 5, npp.Polynomial) +assert_type(PS_poly / 5, npp.Polynomial) +assert_type(PS_poly // 5, npp.Polynomial) +assert_type(PS_poly % 5, npp.Polynomial) + +assert_type(PS_poly + PS_leg, npp.Polynomial) +assert_type(PS_poly - PS_leg, npp.Polynomial) +assert_type(PS_poly * PS_leg, npp.Polynomial) +assert_type(PS_poly / PS_leg, npp.Polynomial) +assert_type(PS_poly // PS_leg, npp.Polynomial) +assert_type(PS_poly % PS_leg, npp.Polynomial) + +assert_type(5 + PS_poly, npp.Polynomial) +assert_type(5 - PS_poly, npp.Polynomial) +assert_type(5 * PS_poly, npp.Polynomial) +assert_type(5 / PS_poly, npp.Polynomial) +assert_type(5 // PS_poly, npp.Polynomial) +assert_type(5 % PS_poly, npp.Polynomial) +assert_type(divmod(PS_poly, 5), tuple[npp.Polynomial, npp.Polynomial]) +assert_type(divmod(5, PS_poly), tuple[npp.Polynomial, npp.Polynomial]) + +assert_type(PS_poly**1, npp.Polynomial) +assert_type(PS_poly**1.0, npp.Polynomial) diff --git a/python/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi b/python/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi new file mode 100644 index 000000000..45522e721 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi @@ -0,0 +1,219 @@ +from collections.abc import Sequence +from decimal import Decimal +from fractions import Fraction +from typing import Any, TypeAlias, assert_type +from typing import Literal as L + +import numpy as np +import numpy.polynomial.polyutils as pu +import numpy.typing as npt +from numpy.polynomial._polytypes import _Tuple2 + +_ArrFloat1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating]] +_ArrComplex1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating]] +_ArrObject1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] + +_ArrFloat1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.float64]] +_ArrComplex1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.complex128]] +_ArrObject1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.object_]] + +num_int: int +num_float: float +num_complex: complex +# will result in an `object_` dtype +num_object: Decimal | Fraction + +sct_int: np.int_ +sct_float: np.float64 +sct_complex: np.complex128 +sct_object: np.object_ # doesn't exist at runtime + +arr_int: npt.NDArray[np.int_] +arr_float: npt.NDArray[np.float64] +arr_complex: npt.NDArray[np.complex128] +arr_object: npt.NDArray[np.object_] + +seq_num_int: Sequence[int] +seq_num_float: Sequence[float] +seq_num_complex: Sequence[complex] +seq_num_object: Sequence[Decimal | Fraction] + +seq_sct_int: Sequence[np.int_] +seq_sct_float: Sequence[np.float64] +seq_sct_complex: Sequence[np.complex128] +seq_sct_object: Sequence[np.object_] + +seq_arr_int: Sequence[npt.NDArray[np.int_]] +seq_arr_float: Sequence[npt.NDArray[np.float64]] +seq_arr_complex: Sequence[npt.NDArray[np.complex128]] +seq_arr_object: Sequence[npt.NDArray[np.object_]] + +seq_seq_num_int: Sequence[Sequence[int]] +seq_seq_num_float: Sequence[Sequence[float]] +seq_seq_num_complex: Sequence[Sequence[complex]] +seq_seq_num_object: Sequence[Sequence[Decimal | Fraction]] + +seq_seq_sct_int: Sequence[Sequence[np.int_]] +seq_seq_sct_float: Sequence[Sequence[np.float64]] +seq_seq_sct_complex: Sequence[Sequence[np.complex128]] +seq_seq_sct_object: Sequence[Sequence[np.object_]] # doesn't exist at runtime + +# as_series + +assert_type(pu.as_series(arr_int), list[_ArrFloat1D]) +assert_type(pu.as_series(arr_float), list[_ArrFloat1D]) +assert_type(pu.as_series(arr_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(arr_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_num_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_num_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_num_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_num_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_sct_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_sct_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_sct_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_sct_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_arr_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_arr_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_arr_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_arr_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_seq_num_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_seq_num_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_seq_num_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_seq_num_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_seq_sct_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_seq_sct_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_seq_sct_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_seq_sct_object), list[_ArrObject1D]) + +# trimcoef + +assert_type(pu.trimcoef(num_int), _ArrFloat1D) +assert_type(pu.trimcoef(num_float), _ArrFloat1D) +assert_type(pu.trimcoef(num_complex), _ArrComplex1D) +assert_type(pu.trimcoef(num_object), _ArrObject1D) +assert_type(pu.trimcoef(num_object), _ArrObject1D) + +assert_type(pu.trimcoef(sct_int), _ArrFloat1D) +assert_type(pu.trimcoef(sct_float), _ArrFloat1D) +assert_type(pu.trimcoef(sct_complex), _ArrComplex1D) +assert_type(pu.trimcoef(sct_object), _ArrObject1D) + +assert_type(pu.trimcoef(arr_int), _ArrFloat1D) +assert_type(pu.trimcoef(arr_float), _ArrFloat1D) +assert_type(pu.trimcoef(arr_complex), _ArrComplex1D) +assert_type(pu.trimcoef(arr_object), _ArrObject1D) + +assert_type(pu.trimcoef(seq_num_int), _ArrFloat1D) +assert_type(pu.trimcoef(seq_num_float), _ArrFloat1D) +assert_type(pu.trimcoef(seq_num_complex), _ArrComplex1D) +assert_type(pu.trimcoef(seq_num_object), _ArrObject1D) + +assert_type(pu.trimcoef(seq_sct_int), _ArrFloat1D) +assert_type(pu.trimcoef(seq_sct_float), _ArrFloat1D) +assert_type(pu.trimcoef(seq_sct_complex), _ArrComplex1D) +assert_type(pu.trimcoef(seq_sct_object), _ArrObject1D) + +# getdomain + +assert_type(pu.getdomain(num_int), _ArrFloat1D_2) +assert_type(pu.getdomain(num_float), _ArrFloat1D_2) +assert_type(pu.getdomain(num_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(num_object), _ArrObject1D_2) +assert_type(pu.getdomain(num_object), _ArrObject1D_2) + +assert_type(pu.getdomain(sct_int), _ArrFloat1D_2) +assert_type(pu.getdomain(sct_float), _ArrFloat1D_2) +assert_type(pu.getdomain(sct_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(sct_object), _ArrObject1D_2) + +assert_type(pu.getdomain(arr_int), _ArrFloat1D_2) +assert_type(pu.getdomain(arr_float), _ArrFloat1D_2) +assert_type(pu.getdomain(arr_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(arr_object), _ArrObject1D_2) + +assert_type(pu.getdomain(seq_num_int), _ArrFloat1D_2) +assert_type(pu.getdomain(seq_num_float), _ArrFloat1D_2) +assert_type(pu.getdomain(seq_num_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(seq_num_object), _ArrObject1D_2) + +assert_type(pu.getdomain(seq_sct_int), _ArrFloat1D_2) +assert_type(pu.getdomain(seq_sct_float), _ArrFloat1D_2) +assert_type(pu.getdomain(seq_sct_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(seq_sct_object), _ArrObject1D_2) + +# mapparms + +assert_type(pu.mapparms(seq_num_int, seq_num_int), _Tuple2[float]) +assert_type(pu.mapparms(seq_num_int, seq_num_float), _Tuple2[float]) +assert_type(pu.mapparms(seq_num_float, seq_num_float), _Tuple2[float]) +assert_type(pu.mapparms(seq_num_float, seq_num_complex), _Tuple2[complex]) +assert_type(pu.mapparms(seq_num_complex, seq_num_complex), _Tuple2[complex]) +assert_type(pu.mapparms(seq_num_complex, seq_num_object), _Tuple2[object]) +assert_type(pu.mapparms(seq_num_object, seq_num_object), _Tuple2[object]) + +assert_type(pu.mapparms(seq_sct_int, seq_sct_int), _Tuple2[np.floating]) +assert_type(pu.mapparms(seq_sct_int, seq_sct_float), _Tuple2[np.floating]) +assert_type(pu.mapparms(seq_sct_float, seq_sct_float), _Tuple2[float]) +assert_type(pu.mapparms(seq_sct_float, seq_sct_complex), _Tuple2[complex]) +assert_type(pu.mapparms(seq_sct_complex, seq_sct_complex), _Tuple2[complex]) +assert_type(pu.mapparms(seq_sct_complex, seq_sct_object), _Tuple2[object]) +assert_type(pu.mapparms(seq_sct_object, seq_sct_object), _Tuple2[object]) + +assert_type(pu.mapparms(arr_int, arr_int), _Tuple2[np.floating]) +assert_type(pu.mapparms(arr_int, arr_float), _Tuple2[np.floating]) +assert_type(pu.mapparms(arr_float, arr_float), _Tuple2[np.floating]) +assert_type(pu.mapparms(arr_float, arr_complex), _Tuple2[np.complexfloating]) +assert_type(pu.mapparms(arr_complex, arr_complex), _Tuple2[np.complexfloating]) +assert_type(pu.mapparms(arr_complex, arr_object), _Tuple2[object]) +assert_type(pu.mapparms(arr_object, arr_object), _Tuple2[object]) + +# mapdomain + +assert_type(pu.mapdomain(num_int, seq_num_int, seq_num_int), np.floating) +assert_type(pu.mapdomain(num_int, seq_num_int, seq_num_float), np.floating) +assert_type(pu.mapdomain(num_int, seq_num_float, seq_num_float), np.floating) +assert_type(pu.mapdomain(num_float, seq_num_float, seq_num_float), np.floating) +assert_type(pu.mapdomain(num_float, seq_num_float, seq_num_complex), np.complexfloating) +assert_type(pu.mapdomain(num_float, seq_num_complex, seq_num_complex), np.complexfloating) +assert_type(pu.mapdomain(num_complex, seq_num_complex, seq_num_complex), np.complexfloating) +assert_type(pu.mapdomain(num_complex, seq_num_complex, seq_num_object), object) +assert_type(pu.mapdomain(num_complex, seq_num_object, seq_num_object), object) +assert_type(pu.mapdomain(num_object, seq_num_object, seq_num_object), object) + +assert_type(pu.mapdomain(seq_num_int, seq_num_int, seq_num_int), _ArrFloat1D) +assert_type(pu.mapdomain(seq_num_int, seq_num_int, seq_num_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_num_int, seq_num_float, seq_num_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_num_float, seq_num_float, seq_num_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_num_float, seq_num_float, seq_num_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_num_float, seq_num_complex, seq_num_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_num_complex, seq_num_complex, seq_num_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_num_complex, seq_num_complex, seq_num_object), _ArrObject1D) +assert_type(pu.mapdomain(seq_num_complex, seq_num_object, seq_num_object), _ArrObject1D) +assert_type(pu.mapdomain(seq_num_object, seq_num_object, seq_num_object), _ArrObject1D) + +assert_type(pu.mapdomain(seq_sct_int, seq_sct_int, seq_sct_int), _ArrFloat1D) +assert_type(pu.mapdomain(seq_sct_int, seq_sct_int, seq_sct_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_sct_int, seq_sct_float, seq_sct_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_sct_float, seq_sct_float, seq_sct_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_sct_float, seq_sct_float, seq_sct_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_sct_float, seq_sct_complex, seq_sct_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_sct_complex, seq_sct_complex, seq_sct_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_sct_complex, seq_sct_complex, seq_sct_object), _ArrObject1D) +assert_type(pu.mapdomain(seq_sct_complex, seq_sct_object, seq_sct_object), _ArrObject1D) +assert_type(pu.mapdomain(seq_sct_object, seq_sct_object, seq_sct_object), _ArrObject1D) + +assert_type(pu.mapdomain(arr_int, arr_int, arr_int), _ArrFloat1D) +assert_type(pu.mapdomain(arr_int, arr_int, arr_float), _ArrFloat1D) +assert_type(pu.mapdomain(arr_int, arr_float, arr_float), _ArrFloat1D) +assert_type(pu.mapdomain(arr_float, arr_float, arr_float), _ArrFloat1D) +assert_type(pu.mapdomain(arr_float, arr_float, arr_complex), _ArrComplex1D) +assert_type(pu.mapdomain(arr_float, arr_complex, arr_complex), _ArrComplex1D) +assert_type(pu.mapdomain(arr_complex, arr_complex, arr_complex), _ArrComplex1D) +assert_type(pu.mapdomain(arr_complex, arr_complex, arr_object), _ArrObject1D) +assert_type(pu.mapdomain(arr_complex, arr_object, arr_object), _ArrObject1D) +assert_type(pu.mapdomain(arr_object, arr_object, arr_object), _ArrObject1D) diff --git a/python/numpy/typing/tests/data/reveal/polynomial_series.pyi b/python/numpy/typing/tests/data/reveal/polynomial_series.pyi new file mode 100644 index 000000000..93f0799c8 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/polynomial_series.pyi @@ -0,0 +1,138 @@ +from collections.abc import Sequence +from typing import Any, TypeAlias, assert_type + +import numpy as np +import numpy.polynomial as npp +import numpy.typing as npt + +_ArrFloat1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating]] +_ArrFloat1D64: TypeAlias = np.ndarray[tuple[int], np.dtype[np.float64]] +_ArrComplex1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating]] +_ArrComplex1D128: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex128]] +_ArrObject1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] + +AR_b: npt.NDArray[np.bool] +AR_u4: npt.NDArray[np.uint32] +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_O: npt.NDArray[np.object_] + +PS_poly: npp.Polynomial +PS_cheb: npp.Chebyshev + +assert_type(npp.polynomial.polyroots(AR_f8), _ArrFloat1D64) +assert_type(npp.polynomial.polyroots(AR_c16), _ArrComplex1D128) +assert_type(npp.polynomial.polyroots(AR_O), _ArrObject1D) + +assert_type(npp.polynomial.polyfromroots(AR_f8), _ArrFloat1D) +assert_type(npp.polynomial.polyfromroots(AR_c16), _ArrComplex1D) +assert_type(npp.polynomial.polyfromroots(AR_O), _ArrObject1D) + +# assert_type(npp.polynomial.polyadd(AR_b, AR_b), NoReturn) +assert_type(npp.polynomial.polyadd(AR_u4, AR_b), _ArrFloat1D) +assert_type(npp.polynomial.polyadd(AR_i8, AR_i8), _ArrFloat1D) +assert_type(npp.polynomial.polyadd(AR_f8, AR_i8), _ArrFloat1D) +assert_type(npp.polynomial.polyadd(AR_i8, AR_c16), _ArrComplex1D) +assert_type(npp.polynomial.polyadd(AR_O, AR_O), _ArrObject1D) + +assert_type(npp.polynomial.polymulx(AR_u4), _ArrFloat1D) +assert_type(npp.polynomial.polymulx(AR_i8), _ArrFloat1D) +assert_type(npp.polynomial.polymulx(AR_f8), _ArrFloat1D) +assert_type(npp.polynomial.polymulx(AR_c16), _ArrComplex1D) +assert_type(npp.polynomial.polymulx(AR_O), _ArrObject1D) + +assert_type(npp.polynomial.polypow(AR_u4, 2), _ArrFloat1D) +assert_type(npp.polynomial.polypow(AR_i8, 2), _ArrFloat1D) +assert_type(npp.polynomial.polypow(AR_f8, 2), _ArrFloat1D) +assert_type(npp.polynomial.polypow(AR_c16, 2), _ArrComplex1D) +assert_type(npp.polynomial.polypow(AR_O, 2), _ArrObject1D) + +# assert_type(npp.polynomial.polyder(PS_poly), npt.NDArray[np.object_]) +assert_type(npp.polynomial.polyder(AR_f8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyder(AR_c16), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyder(AR_O, m=2), npt.NDArray[np.object_]) + +# assert_type(npp.polynomial.polyint(PS_poly), npt.NDArray[np.object_]) +assert_type(npp.polynomial.polyint(AR_f8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyint(AR_f8, k=AR_c16), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyint(AR_O, m=2), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyval(AR_b, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval(AR_u4, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval(AR_i8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval(AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyval(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyval2d(AR_b, AR_b, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval2d(AR_u4, AR_u4, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval2d(AR_i8, AR_i8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval2d(AR_f8, AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval2d(AR_i8, AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyval2d(AR_O, AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyval3d(AR_b, AR_b, AR_b, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval3d(AR_u4, AR_u4, AR_u4, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval3d(AR_i8, AR_i8, AR_i8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval3d(AR_f8, AR_f8, AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyval3d(AR_i8, AR_i8, AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyval3d(AR_O, AR_O, AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyvalfromroots(AR_b, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyvalfromroots(AR_u4, AR_b), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyvalfromroots(AR_i8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyvalfromroots(AR_f8, AR_i8), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyvalfromroots(AR_i8, AR_c16), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyvalfromroots(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyvander(AR_f8, 3), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyvander(AR_c16, 3), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyvander(AR_O, 3), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyvander2d(AR_f8, AR_f8, [4, 2]), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyvander2d(AR_c16, AR_c16, [4, 2]), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyvander2d(AR_O, AR_O, [4, 2]), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyvander3d(AR_f8, AR_f8, AR_f8, [4, 3, 2]), npt.NDArray[np.floating]) +assert_type(npp.polynomial.polyvander3d(AR_c16, AR_c16, AR_c16, [4, 3, 2]), npt.NDArray[np.complexfloating]) +assert_type(npp.polynomial.polyvander3d(AR_O, AR_O, AR_O, [4, 3, 2]), npt.NDArray[np.object_]) + +assert_type( + npp.polynomial.polyfit(AR_f8, AR_f8, 2), + npt.NDArray[np.floating], +) +assert_type( + npp.polynomial.polyfit(AR_f8, AR_i8, 1, full=True), + tuple[npt.NDArray[np.floating], Sequence[np.inexact | np.int32]], +) +assert_type( + npp.polynomial.polyfit(AR_c16, AR_f8, 2), + npt.NDArray[np.complexfloating], +) +assert_type( + npp.polynomial.polyfit(AR_f8, AR_c16, 1, full=True)[0], + npt.NDArray[np.complexfloating], +) + +assert_type(npp.chebyshev.chebgauss(2), tuple[_ArrFloat1D64, _ArrFloat1D64]) + +assert_type(npp.chebyshev.chebweight(AR_f8), npt.NDArray[np.float64]) +assert_type(npp.chebyshev.chebweight(AR_c16), npt.NDArray[np.complex128]) +assert_type(npp.chebyshev.chebweight(AR_O), npt.NDArray[np.object_]) + +assert_type(npp.chebyshev.poly2cheb(AR_f8), _ArrFloat1D) +assert_type(npp.chebyshev.poly2cheb(AR_c16), _ArrComplex1D) +assert_type(npp.chebyshev.poly2cheb(AR_O), _ArrObject1D) + +assert_type(npp.chebyshev.cheb2poly(AR_f8), _ArrFloat1D) +assert_type(npp.chebyshev.cheb2poly(AR_c16), _ArrComplex1D) +assert_type(npp.chebyshev.cheb2poly(AR_O), _ArrObject1D) + +assert_type(npp.chebyshev.chebpts1(6), _ArrFloat1D64) +assert_type(npp.chebyshev.chebpts2(6), _ArrFloat1D64) + +assert_type( + npp.chebyshev.chebinterpolate(np.tanh, 3), + npt.NDArray[np.float64 | np.complex128 | np.object_], +) diff --git a/python/numpy/typing/tests/data/reveal/random.pyi b/python/numpy/typing/tests/data/reveal/random.pyi new file mode 100644 index 000000000..e188eb028 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/random.pyi @@ -0,0 +1,1546 @@ +import threading +from collections.abc import Sequence +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt +from numpy.random._generator import Generator +from numpy.random._mt19937 import MT19937 +from numpy.random._pcg64 import PCG64 +from numpy.random._philox import Philox +from numpy.random._sfc64 import SFC64 +from numpy.random.bit_generator import SeedlessSeedSequence, SeedSequence + +def_rng = np.random.default_rng() +seed_seq = np.random.SeedSequence() +mt19937 = np.random.MT19937() +pcg64 = np.random.PCG64() +sfc64 = np.random.SFC64() +philox = np.random.Philox() +seedless_seq = SeedlessSeedSequence() + +assert_type(def_rng, Generator) +assert_type(mt19937, MT19937) +assert_type(pcg64, PCG64) +assert_type(sfc64, SFC64) +assert_type(philox, Philox) +assert_type(seed_seq, SeedSequence) +assert_type(seedless_seq, SeedlessSeedSequence) + +mt19937_jumped = mt19937.jumped() +mt19937_jumped3 = mt19937.jumped(3) +mt19937_raw = mt19937.random_raw() +mt19937_raw_arr = mt19937.random_raw(5) + +assert_type(mt19937_jumped, MT19937) +assert_type(mt19937_jumped3, MT19937) +assert_type(mt19937_raw, int) +assert_type(mt19937_raw_arr, npt.NDArray[np.uint64]) +assert_type(mt19937.lock, threading.Lock) + +pcg64_jumped = pcg64.jumped() +pcg64_jumped3 = pcg64.jumped(3) +pcg64_adv = pcg64.advance(3) +pcg64_raw = pcg64.random_raw() +pcg64_raw_arr = pcg64.random_raw(5) + +assert_type(pcg64_jumped, PCG64) +assert_type(pcg64_jumped3, PCG64) +assert_type(pcg64_adv, PCG64) +assert_type(pcg64_raw, int) +assert_type(pcg64_raw_arr, npt.NDArray[np.uint64]) +assert_type(pcg64.lock, threading.Lock) + +philox_jumped = philox.jumped() +philox_jumped3 = philox.jumped(3) +philox_adv = philox.advance(3) +philox_raw = philox.random_raw() +philox_raw_arr = philox.random_raw(5) + +assert_type(philox_jumped, Philox) +assert_type(philox_jumped3, Philox) +assert_type(philox_adv, Philox) +assert_type(philox_raw, int) +assert_type(philox_raw_arr, npt.NDArray[np.uint64]) +assert_type(philox.lock, threading.Lock) + +sfc64_raw = sfc64.random_raw() +sfc64_raw_arr = sfc64.random_raw(5) + +assert_type(sfc64_raw, int) +assert_type(sfc64_raw_arr, npt.NDArray[np.uint64]) +assert_type(sfc64.lock, threading.Lock) + +assert_type(seed_seq.pool, npt.NDArray[np.uint32]) +assert_type(seed_seq.entropy, int | Sequence[int] | None) +assert_type(seed_seq.spawn(1), list[np.random.SeedSequence]) +assert_type(seed_seq.generate_state(8, "uint32"), npt.NDArray[np.uint32 | np.uint64]) +assert_type(seed_seq.generate_state(8, "uint64"), npt.NDArray[np.uint32 | np.uint64]) + +def_gen: np.random.Generator = np.random.default_rng() + +D_arr_0p1: npt.NDArray[np.float64] = np.array([0.1]) +D_arr_0p5: npt.NDArray[np.float64] = np.array([0.5]) +D_arr_0p9: npt.NDArray[np.float64] = np.array([0.9]) +D_arr_1p5: npt.NDArray[np.float64] = np.array([1.5]) +I_arr_10: npt.NDArray[np.int_] = np.array([10], dtype=np.int_) +I_arr_20: npt.NDArray[np.int_] = np.array([20], dtype=np.int_) +D_arr_like_0p1: list[float] = [0.1] +D_arr_like_0p5: list[float] = [0.5] +D_arr_like_0p9: list[float] = [0.9] +D_arr_like_1p5: list[float] = [1.5] +I_arr_like_10: list[int] = [10] +I_arr_like_20: list[int] = [20] +D_2D_like: list[list[float]] = [[1, 2], [2, 3], [3, 4], [4, 5.1]] +D_2D: npt.NDArray[np.float64] = np.array(D_2D_like) +S_out: npt.NDArray[np.float32] = np.empty(1, dtype=np.float32) +D_out: npt.NDArray[np.float64] = np.empty(1) + +assert_type(def_gen.standard_normal(), float) +assert_type(def_gen.standard_normal(dtype=np.float32), float) +assert_type(def_gen.standard_normal(dtype="float32"), float) +assert_type(def_gen.standard_normal(dtype="double"), float) +assert_type(def_gen.standard_normal(dtype=np.float64), float) +assert_type(def_gen.standard_normal(size=None), float) +assert_type(def_gen.standard_normal(size=1), npt.NDArray[np.float64]) +assert_type(def_gen.standard_normal(size=1, dtype=np.float32), npt.NDArray[np.float32]) +assert_type(def_gen.standard_normal(size=1, dtype="f4"), npt.NDArray[np.float32]) +assert_type(def_gen.standard_normal(size=1, dtype="float32", out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.standard_normal(dtype=np.float32, out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.standard_normal(size=1, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(def_gen.standard_normal(size=1, dtype="float64"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_normal(size=1, dtype="f8"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_normal(out=D_out), npt.NDArray[np.float64]) +assert_type(def_gen.standard_normal(size=1, dtype="float64"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_normal(size=1, dtype="float64", out=D_out), npt.NDArray[np.float64]) + +assert_type(def_gen.random(), float) +assert_type(def_gen.random(dtype=np.float32), float) +assert_type(def_gen.random(dtype="float32"), float) +assert_type(def_gen.random(dtype="double"), float) +assert_type(def_gen.random(dtype=np.float64), float) +assert_type(def_gen.random(size=None), float) +assert_type(def_gen.random(size=1), npt.NDArray[np.float64]) +assert_type(def_gen.random(size=1, dtype=np.float32), npt.NDArray[np.float32]) +assert_type(def_gen.random(size=1, dtype="f4"), npt.NDArray[np.float32]) +assert_type(def_gen.random(size=1, dtype="float32", out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.random(dtype=np.float32, out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.random(size=1, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(def_gen.random(size=1, dtype="float64"), npt.NDArray[np.float64]) +assert_type(def_gen.random(size=1, dtype="f8"), npt.NDArray[np.float64]) +assert_type(def_gen.random(out=D_out), npt.NDArray[np.float64]) +assert_type(def_gen.random(size=1, dtype="float64"), npt.NDArray[np.float64]) +assert_type(def_gen.random(size=1, dtype="float64", out=D_out), npt.NDArray[np.float64]) + +assert_type(def_gen.standard_cauchy(), float) +assert_type(def_gen.standard_cauchy(size=None), float) +assert_type(def_gen.standard_cauchy(size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.standard_exponential(), float) +assert_type(def_gen.standard_exponential(method="inv"), float) +assert_type(def_gen.standard_exponential(dtype=np.float32), float) +assert_type(def_gen.standard_exponential(dtype="float32"), float) +assert_type(def_gen.standard_exponential(dtype="double"), float) +assert_type(def_gen.standard_exponential(dtype=np.float64), float) +assert_type(def_gen.standard_exponential(size=None), float) +assert_type(def_gen.standard_exponential(size=None, method="inv"), float) +assert_type(def_gen.standard_exponential(size=1, method="inv"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_exponential(size=1, dtype=np.float32), npt.NDArray[np.float32]) +assert_type(def_gen.standard_exponential(size=1, dtype="f4", method="inv"), npt.NDArray[np.float32]) +assert_type(def_gen.standard_exponential(size=1, dtype="float32", out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.standard_exponential(dtype=np.float32, out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.standard_exponential(size=1, dtype=np.float64, method="inv"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_exponential(size=1, dtype="float64"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_exponential(size=1, dtype="f8"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_exponential(out=D_out), npt.NDArray[np.float64]) +assert_type(def_gen.standard_exponential(size=1, dtype="float64"), npt.NDArray[np.float64]) +assert_type(def_gen.standard_exponential(size=1, dtype="float64", out=D_out), npt.NDArray[np.float64]) + +assert_type(def_gen.zipf(1.5), int) +assert_type(def_gen.zipf(1.5, size=None), int) +assert_type(def_gen.zipf(1.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.zipf(D_arr_1p5), npt.NDArray[np.int64]) +assert_type(def_gen.zipf(D_arr_1p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.zipf(D_arr_like_1p5), npt.NDArray[np.int64]) +assert_type(def_gen.zipf(D_arr_like_1p5, size=1), npt.NDArray[np.int64]) + +assert_type(def_gen.weibull(0.5), float) +assert_type(def_gen.weibull(0.5, size=None), float) +assert_type(def_gen.weibull(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.weibull(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.weibull(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.weibull(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.weibull(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.standard_t(0.5), float) +assert_type(def_gen.standard_t(0.5, size=None), float) +assert_type(def_gen.standard_t(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.standard_t(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.standard_t(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.standard_t(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.standard_t(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.poisson(0.5), int) +assert_type(def_gen.poisson(0.5, size=None), int) +assert_type(def_gen.poisson(0.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.poisson(D_arr_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.poisson(D_arr_0p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.poisson(D_arr_like_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.poisson(D_arr_like_0p5, size=1), npt.NDArray[np.int64]) + +assert_type(def_gen.power(0.5), float) +assert_type(def_gen.power(0.5, size=None), float) +assert_type(def_gen.power(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.power(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.power(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.power(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.power(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.pareto(0.5), float) +assert_type(def_gen.pareto(0.5, size=None), float) +assert_type(def_gen.pareto(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.pareto(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.pareto(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.pareto(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.pareto(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.chisquare(0.5), float) +assert_type(def_gen.chisquare(0.5, size=None), float) +assert_type(def_gen.chisquare(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.chisquare(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.chisquare(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.chisquare(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.chisquare(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.exponential(0.5), float) +assert_type(def_gen.exponential(0.5, size=None), float) +assert_type(def_gen.exponential(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.exponential(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.exponential(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.exponential(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.exponential(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.geometric(0.5), int) +assert_type(def_gen.geometric(0.5, size=None), int) +assert_type(def_gen.geometric(0.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.geometric(D_arr_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.geometric(D_arr_0p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.geometric(D_arr_like_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.geometric(D_arr_like_0p5, size=1), npt.NDArray[np.int64]) + +assert_type(def_gen.logseries(0.5), int) +assert_type(def_gen.logseries(0.5, size=None), int) +assert_type(def_gen.logseries(0.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.logseries(D_arr_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.logseries(D_arr_0p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.logseries(D_arr_like_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.logseries(D_arr_like_0p5, size=1), npt.NDArray[np.int64]) + +assert_type(def_gen.rayleigh(0.5), float) +assert_type(def_gen.rayleigh(0.5, size=None), float) +assert_type(def_gen.rayleigh(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.rayleigh(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.rayleigh(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.rayleigh(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.rayleigh(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.standard_gamma(0.5), float) +assert_type(def_gen.standard_gamma(0.5, size=None), float) +assert_type(def_gen.standard_gamma(0.5, dtype="float32"), float) +assert_type(def_gen.standard_gamma(0.5, size=None, dtype="float32"), float) +assert_type(def_gen.standard_gamma(0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(D_arr_0p5, dtype="f4"), npt.NDArray[np.float32]) +assert_type(def_gen.standard_gamma(0.5, size=1, dtype="float32", out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.standard_gamma(D_arr_0p5, dtype=np.float32, out=S_out), npt.NDArray[np.float32]) +assert_type(def_gen.standard_gamma(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(0.5, out=D_out), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(D_arr_like_0p5, out=D_out), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.standard_gamma(D_arr_like_0p5, size=1, out=D_out, dtype=np.float64), npt.NDArray[np.float64]) + +assert_type(def_gen.vonmises(0.5, 0.5), float) +assert_type(def_gen.vonmises(0.5, 0.5, size=None), float) +assert_type(def_gen.vonmises(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.wald(0.5, 0.5), float) +assert_type(def_gen.wald(0.5, 0.5, size=None), float) +assert_type(def_gen.wald(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.wald(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.wald(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.wald(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.wald(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.wald(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.wald(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.wald(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.wald(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.wald(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.wald(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.uniform(0.5, 0.5), float) +assert_type(def_gen.uniform(0.5, 0.5, size=None), float) +assert_type(def_gen.uniform(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.beta(0.5, 0.5), float) +assert_type(def_gen.beta(0.5, 0.5, size=None), float) +assert_type(def_gen.beta(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.beta(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.beta(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.beta(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.beta(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.beta(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.beta(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.beta(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.beta(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.beta(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.beta(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.f(0.5, 0.5), float) +assert_type(def_gen.f(0.5, 0.5, size=None), float) +assert_type(def_gen.f(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.f(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.f(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.f(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.f(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.f(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.f(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.f(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.f(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.f(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.f(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.gamma(0.5, 0.5), float) +assert_type(def_gen.gamma(0.5, 0.5, size=None), float) +assert_type(def_gen.gamma(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.gumbel(0.5, 0.5), float) +assert_type(def_gen.gumbel(0.5, 0.5, size=None), float) +assert_type(def_gen.gumbel(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.laplace(0.5, 0.5), float) +assert_type(def_gen.laplace(0.5, 0.5, size=None), float) +assert_type(def_gen.laplace(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.logistic(0.5, 0.5), float) +assert_type(def_gen.logistic(0.5, 0.5, size=None), float) +assert_type(def_gen.logistic(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.lognormal(0.5, 0.5), float) +assert_type(def_gen.lognormal(0.5, 0.5, size=None), float) +assert_type(def_gen.lognormal(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.noncentral_chisquare(0.5, 0.5), float) +assert_type(def_gen.noncentral_chisquare(0.5, 0.5, size=None), float) +assert_type(def_gen.noncentral_chisquare(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.normal(0.5, 0.5), float) +assert_type(def_gen.normal(0.5, 0.5, size=None), float) +assert_type(def_gen.normal(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.normal(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.normal(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.normal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(def_gen.normal(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.normal(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.triangular(0.1, 0.5, 0.9), float) +assert_type(def_gen.triangular(0.1, 0.5, 0.9, size=None), float) +assert_type(def_gen.triangular(0.1, 0.5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(D_arr_0p1, 0.5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(0.1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(0.1, D_arr_0p5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(D_arr_like_0p1, 0.5, D_arr_0p9), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(0.5, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(D_arr_0p1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.noncentral_f(0.1, 0.5, 0.9), float) +assert_type(def_gen.noncentral_f(0.1, 0.5, 0.9, size=None), float) +assert_type(def_gen.noncentral_f(0.1, 0.5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(D_arr_0p1, 0.5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(0.1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(0.1, D_arr_0p5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(0.5, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1), npt.NDArray[np.float64]) +assert_type(def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) + +assert_type(def_gen.binomial(10, 0.5), int) +assert_type(def_gen.binomial(10, 0.5, size=None), int) +assert_type(def_gen.binomial(10, 0.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(I_arr_10, 0.5), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(10, D_arr_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(I_arr_10, 0.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(10, D_arr_0p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(I_arr_like_10, 0.5), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(10, D_arr_like_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(I_arr_10, D_arr_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(I_arr_10, D_arr_0p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.binomial(I_arr_like_10, D_arr_like_0p5, size=1), npt.NDArray[np.int64]) + +assert_type(def_gen.negative_binomial(10, 0.5), int) +assert_type(def_gen.negative_binomial(10, 0.5, size=None), int) +assert_type(def_gen.negative_binomial(10, 0.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(I_arr_10, 0.5), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(10, D_arr_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(I_arr_10, 0.5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(10, D_arr_0p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(I_arr_like_10, 0.5), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(10, D_arr_like_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(I_arr_10, D_arr_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(I_arr_10, D_arr_0p5, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1), npt.NDArray[np.int64]) + +assert_type(def_gen.hypergeometric(20, 20, 10), int) +assert_type(def_gen.hypergeometric(20, 20, 10, size=None), int) +assert_type(def_gen.hypergeometric(20, 20, 10, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_20, 20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(20, I_arr_20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(20, I_arr_20, 10, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_like_20, 20, I_arr_10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(20, I_arr_like_20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_20, I_arr_20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, 10), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1), npt.NDArray[np.int64]) +assert_type(def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1), npt.NDArray[np.int64]) + +I_int64_100: npt.NDArray[np.int64] = np.array([100], dtype=np.int64) + +assert_type(def_gen.integers(0, 100), np.int64) +assert_type(def_gen.integers(100), np.int64) +assert_type(def_gen.integers([100]), npt.NDArray[np.int64]) +assert_type(def_gen.integers(0, [100]), npt.NDArray[np.int64]) + +I_bool_low: npt.NDArray[np.bool] = np.array([0], dtype=np.bool) +I_bool_low_like: list[int] = [0] +I_bool_high_open: npt.NDArray[np.bool] = np.array([1], dtype=np.bool) +I_bool_high_closed: npt.NDArray[np.bool] = np.array([1], dtype=np.bool) + +assert_type(def_gen.integers(2, dtype=bool), bool) +assert_type(def_gen.integers(0, 2, dtype=bool), bool) +assert_type(def_gen.integers(1, dtype=bool, endpoint=True), bool) +assert_type(def_gen.integers(0, 1, dtype=bool, endpoint=True), bool) +assert_type(def_gen.integers(I_bool_low_like, 1, dtype=bool, endpoint=True), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) +assert_type(def_gen.integers(0, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) +assert_type(def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) + +assert_type(def_gen.integers(2, dtype=np.bool), np.bool) +assert_type(def_gen.integers(0, 2, dtype=np.bool), np.bool) +assert_type(def_gen.integers(1, dtype=np.bool, endpoint=True), np.bool) +assert_type(def_gen.integers(0, 1, dtype=np.bool, endpoint=True), np.bool) +assert_type(def_gen.integers(I_bool_low_like, 1, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) +assert_type(def_gen.integers(0, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) +assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) +assert_type(def_gen.integers(0, I_bool_high_closed, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) + +I_u1_low: npt.NDArray[np.uint8] = np.array([0], dtype=np.uint8) +I_u1_low_like: list[int] = [0] +I_u1_high_open: npt.NDArray[np.uint8] = np.array([255], dtype=np.uint8) +I_u1_high_closed: npt.NDArray[np.uint8] = np.array([255], dtype=np.uint8) + +assert_type(def_gen.integers(256, dtype="u1"), np.uint8) +assert_type(def_gen.integers(0, 256, dtype="u1"), np.uint8) +assert_type(def_gen.integers(255, dtype="u1", endpoint=True), np.uint8) +assert_type(def_gen.integers(0, 255, dtype="u1", endpoint=True), np.uint8) +assert_type(def_gen.integers(I_u1_low_like, 255, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(0, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(0, I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) + +assert_type(def_gen.integers(256, dtype="uint8"), np.uint8) +assert_type(def_gen.integers(0, 256, dtype="uint8"), np.uint8) +assert_type(def_gen.integers(255, dtype="uint8", endpoint=True), np.uint8) +assert_type(def_gen.integers(0, 255, dtype="uint8", endpoint=True), np.uint8) +assert_type(def_gen.integers(I_u1_low_like, 255, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(0, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(0, I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) + +assert_type(def_gen.integers(256, dtype=np.uint8), np.uint8) +assert_type(def_gen.integers(0, 256, dtype=np.uint8), np.uint8) +assert_type(def_gen.integers(255, dtype=np.uint8, endpoint=True), np.uint8) +assert_type(def_gen.integers(0, 255, dtype=np.uint8, endpoint=True), np.uint8) +assert_type(def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(0, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) +assert_type(def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) + +I_u2_low: npt.NDArray[np.uint16] = np.array([0], dtype=np.uint16) +I_u2_low_like: list[int] = [0] +I_u2_high_open: npt.NDArray[np.uint16] = np.array([65535], dtype=np.uint16) +I_u2_high_closed: npt.NDArray[np.uint16] = np.array([65535], dtype=np.uint16) + +assert_type(def_gen.integers(65536, dtype="u2"), np.uint16) +assert_type(def_gen.integers(0, 65536, dtype="u2"), np.uint16) +assert_type(def_gen.integers(65535, dtype="u2", endpoint=True), np.uint16) +assert_type(def_gen.integers(0, 65535, dtype="u2", endpoint=True), np.uint16) +assert_type(def_gen.integers(I_u2_low_like, 65535, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(0, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(0, I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) + +assert_type(def_gen.integers(65536, dtype="uint16"), np.uint16) +assert_type(def_gen.integers(0, 65536, dtype="uint16"), np.uint16) +assert_type(def_gen.integers(65535, dtype="uint16", endpoint=True), np.uint16) +assert_type(def_gen.integers(0, 65535, dtype="uint16", endpoint=True), np.uint16) +assert_type(def_gen.integers(I_u2_low_like, 65535, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(0, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(0, I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) + +assert_type(def_gen.integers(65536, dtype=np.uint16), np.uint16) +assert_type(def_gen.integers(0, 65536, dtype=np.uint16), np.uint16) +assert_type(def_gen.integers(65535, dtype=np.uint16, endpoint=True), np.uint16) +assert_type(def_gen.integers(0, 65535, dtype=np.uint16, endpoint=True), np.uint16) +assert_type(def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(0, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) +assert_type(def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) + +I_u4_low: npt.NDArray[np.uint32] = np.array([0], dtype=np.uint32) +I_u4_low_like: list[int] = [0] +I_u4_high_open: npt.NDArray[np.uint32] = np.array([4294967295], dtype=np.uint32) +I_u4_high_closed: npt.NDArray[np.uint32] = np.array([4294967295], dtype=np.uint32) + +assert_type(def_gen.integers(4294967296, dtype=np.int_), np.int_) +assert_type(def_gen.integers(0, 4294967296, dtype=np.int_), np.int_) +assert_type(def_gen.integers(4294967295, dtype=np.int_, endpoint=True), np.int_) +assert_type(def_gen.integers(0, 4294967295, dtype=np.int_, endpoint=True), np.int_) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) +assert_type(def_gen.integers(I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) +assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) +assert_type(def_gen.integers(I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) + +assert_type(def_gen.integers(4294967296, dtype="u4"), np.uint32) +assert_type(def_gen.integers(0, 4294967296, dtype="u4"), np.uint32) +assert_type(def_gen.integers(4294967295, dtype="u4", endpoint=True), np.uint32) +assert_type(def_gen.integers(0, 4294967295, dtype="u4", endpoint=True), np.uint32) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(0, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) + +assert_type(def_gen.integers(4294967296, dtype="uint32"), np.uint32) +assert_type(def_gen.integers(0, 4294967296, dtype="uint32"), np.uint32) +assert_type(def_gen.integers(4294967295, dtype="uint32", endpoint=True), np.uint32) +assert_type(def_gen.integers(0, 4294967295, dtype="uint32", endpoint=True), np.uint32) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(0, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) + +assert_type(def_gen.integers(4294967296, dtype=np.uint32), np.uint32) +assert_type(def_gen.integers(0, 4294967296, dtype=np.uint32), np.uint32) +assert_type(def_gen.integers(4294967295, dtype=np.uint32, endpoint=True), np.uint32) +assert_type(def_gen.integers(0, 4294967295, dtype=np.uint32, endpoint=True), np.uint32) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) + +assert_type(def_gen.integers(4294967296, dtype=np.uint), np.uint) +assert_type(def_gen.integers(0, 4294967296, dtype=np.uint), np.uint) +assert_type(def_gen.integers(4294967295, dtype=np.uint, endpoint=True), np.uint) +assert_type(def_gen.integers(0, 4294967295, dtype=np.uint, endpoint=True), np.uint) +assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) +assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) +assert_type(def_gen.integers(0, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) +assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) +assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) +assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) + +I_u8_low: npt.NDArray[np.uint64] = np.array([0], dtype=np.uint64) +I_u8_low_like: list[int] = [0] +I_u8_high_open: npt.NDArray[np.uint64] = np.array([18446744073709551615], dtype=np.uint64) +I_u8_high_closed: npt.NDArray[np.uint64] = np.array([18446744073709551615], dtype=np.uint64) + +assert_type(def_gen.integers(18446744073709551616, dtype="u8"), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551616, dtype="u8"), np.uint64) +assert_type(def_gen.integers(18446744073709551615, dtype="u8", endpoint=True), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551615, dtype="u8", endpoint=True), np.uint64) +assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(0, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(0, I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) + +assert_type(def_gen.integers(18446744073709551616, dtype="uint64"), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551616, dtype="uint64"), np.uint64) +assert_type(def_gen.integers(18446744073709551615, dtype="uint64", endpoint=True), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551615, dtype="uint64", endpoint=True), np.uint64) +assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(0, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(0, I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) + +assert_type(def_gen.integers(18446744073709551616, dtype=np.uint64), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551616, dtype=np.uint64), np.uint64) +assert_type(def_gen.integers(18446744073709551615, dtype=np.uint64, endpoint=True), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551615, dtype=np.uint64, endpoint=True), np.uint64) +assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(0, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) +assert_type(def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) + +I_i1_low: npt.NDArray[np.int8] = np.array([-128], dtype=np.int8) +I_i1_low_like: list[int] = [-128] +I_i1_high_open: npt.NDArray[np.int8] = np.array([127], dtype=np.int8) +I_i1_high_closed: npt.NDArray[np.int8] = np.array([127], dtype=np.int8) + +assert_type(def_gen.integers(128, dtype="i1"), np.int8) +assert_type(def_gen.integers(-128, 128, dtype="i1"), np.int8) +assert_type(def_gen.integers(127, dtype="i1", endpoint=True), np.int8) +assert_type(def_gen.integers(-128, 127, dtype="i1", endpoint=True), np.int8) +assert_type(def_gen.integers(I_i1_low_like, 127, dtype="i1", endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) +assert_type(def_gen.integers(-128, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(-128, I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) + +assert_type(def_gen.integers(128, dtype="int8"), np.int8) +assert_type(def_gen.integers(-128, 128, dtype="int8"), np.int8) +assert_type(def_gen.integers(127, dtype="int8", endpoint=True), np.int8) +assert_type(def_gen.integers(-128, 127, dtype="int8", endpoint=True), np.int8) +assert_type(def_gen.integers(I_i1_low_like, 127, dtype="int8", endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) +assert_type(def_gen.integers(-128, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(-128, I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) + +assert_type(def_gen.integers(128, dtype=np.int8), np.int8) +assert_type(def_gen.integers(-128, 128, dtype=np.int8), np.int8) +assert_type(def_gen.integers(127, dtype=np.int8, endpoint=True), np.int8) +assert_type(def_gen.integers(-128, 127, dtype=np.int8, endpoint=True), np.int8) +assert_type(def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) +assert_type(def_gen.integers(-128, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) +assert_type(def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) + +I_i2_low: npt.NDArray[np.int16] = np.array([-32768], dtype=np.int16) +I_i2_low_like: list[int] = [-32768] +I_i2_high_open: npt.NDArray[np.int16] = np.array([32767], dtype=np.int16) +I_i2_high_closed: npt.NDArray[np.int16] = np.array([32767], dtype=np.int16) + +assert_type(def_gen.integers(32768, dtype="i2"), np.int16) +assert_type(def_gen.integers(-32768, 32768, dtype="i2"), np.int16) +assert_type(def_gen.integers(32767, dtype="i2", endpoint=True), np.int16) +assert_type(def_gen.integers(-32768, 32767, dtype="i2", endpoint=True), np.int16) +assert_type(def_gen.integers(I_i2_low_like, 32767, dtype="i2", endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) +assert_type(def_gen.integers(-32768, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) + +assert_type(def_gen.integers(32768, dtype="int16"), np.int16) +assert_type(def_gen.integers(-32768, 32768, dtype="int16"), np.int16) +assert_type(def_gen.integers(32767, dtype="int16", endpoint=True), np.int16) +assert_type(def_gen.integers(-32768, 32767, dtype="int16", endpoint=True), np.int16) +assert_type(def_gen.integers(I_i2_low_like, 32767, dtype="int16", endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) +assert_type(def_gen.integers(-32768, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) + +assert_type(def_gen.integers(32768, dtype=np.int16), np.int16) +assert_type(def_gen.integers(-32768, 32768, dtype=np.int16), np.int16) +assert_type(def_gen.integers(32767, dtype=np.int16, endpoint=True), np.int16) +assert_type(def_gen.integers(-32768, 32767, dtype=np.int16, endpoint=True), np.int16) +assert_type(def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) +assert_type(def_gen.integers(-32768, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) +assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) + +I_i4_low: npt.NDArray[np.int32] = np.array([-2147483648], dtype=np.int32) +I_i4_low_like: list[int] = [-2147483648] +I_i4_high_open: npt.NDArray[np.int32] = np.array([2147483647], dtype=np.int32) +I_i4_high_closed: npt.NDArray[np.int32] = np.array([2147483647], dtype=np.int32) + +assert_type(def_gen.integers(2147483648, dtype="i4"), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483648, dtype="i4"), np.int32) +assert_type(def_gen.integers(2147483647, dtype="i4", endpoint=True), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483647, dtype="i4", endpoint=True), np.int32) +assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="i4", endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) +assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) + +assert_type(def_gen.integers(2147483648, dtype="int32"), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483648, dtype="int32"), np.int32) +assert_type(def_gen.integers(2147483647, dtype="int32", endpoint=True), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483647, dtype="int32", endpoint=True), np.int32) +assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="int32", endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) +assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) + +assert_type(def_gen.integers(2147483648, dtype=np.int32), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483648, dtype=np.int32), np.int32) +assert_type(def_gen.integers(2147483647, dtype=np.int32, endpoint=True), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483647, dtype=np.int32, endpoint=True), np.int32) +assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) +assert_type(def_gen.integers(-2147483648, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) +assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) + +I_i8_low: npt.NDArray[np.int64] = np.array([-9223372036854775808], dtype=np.int64) +I_i8_low_like: list[int] = [-9223372036854775808] +I_i8_high_open: npt.NDArray[np.int64] = np.array([9223372036854775807], dtype=np.int64) +I_i8_high_closed: npt.NDArray[np.int64] = np.array([9223372036854775807], dtype=np.int64) + +assert_type(def_gen.integers(9223372036854775808, dtype="i8"), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="i8"), np.int64) +assert_type(def_gen.integers(9223372036854775807, dtype="i8", endpoint=True), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="i8", endpoint=True), np.int64) +assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="i8", endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) + +assert_type(def_gen.integers(9223372036854775808, dtype="int64"), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="int64"), np.int64) +assert_type(def_gen.integers(9223372036854775807, dtype="int64", endpoint=True), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="int64", endpoint=True), np.int64) +assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="int64", endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) + +assert_type(def_gen.integers(9223372036854775808, dtype=np.int64), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype=np.int64), np.int64) +assert_type(def_gen.integers(9223372036854775807, dtype=np.int64, endpoint=True), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype=np.int64, endpoint=True), np.int64) +assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) +assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) + +assert_type(def_gen.bit_generator, np.random.BitGenerator) + +assert_type(def_gen.bytes(2), bytes) + +assert_type(def_gen.choice(5), int) +assert_type(def_gen.choice(5, 3), npt.NDArray[np.int64]) +assert_type(def_gen.choice(5, 3, replace=True), npt.NDArray[np.int64]) +assert_type(def_gen.choice(5, 3, p=[1 / 5] * 5), npt.NDArray[np.int64]) +assert_type(def_gen.choice(5, 3, p=[1 / 5] * 5, replace=False), npt.NDArray[np.int64]) + +assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"]), Any) +assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3), npt.NDArray[Any]) +assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4), npt.NDArray[Any]) +assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True), npt.NDArray[Any]) +assert_type(def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4])), npt.NDArray[Any]) + +assert_type(def_gen.dirichlet([0.5, 0.5]), npt.NDArray[np.float64]) +assert_type(def_gen.dirichlet(np.array([0.5, 0.5])), npt.NDArray[np.float64]) +assert_type(def_gen.dirichlet(np.array([0.5, 0.5]), size=3), npt.NDArray[np.float64]) + +assert_type(def_gen.multinomial(20, [1 / 6.0] * 6), npt.NDArray[np.int64]) +assert_type(def_gen.multinomial(20, np.array([0.5, 0.5])), npt.NDArray[np.int64]) +assert_type(def_gen.multinomial(20, [1 / 6.0] * 6, size=2), npt.NDArray[np.int64]) +assert_type(def_gen.multinomial([[10], [20]], [1 / 6.0] * 6, size=(2, 2)), npt.NDArray[np.int64]) +assert_type(def_gen.multinomial(np.array([[10], [20]]), np.array([0.5, 0.5]), size=(2, 2)), npt.NDArray[np.int64]) + +assert_type(def_gen.multivariate_hypergeometric([3, 5, 7], 2), npt.NDArray[np.int64]) +assert_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2), npt.NDArray[np.int64]) +assert_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=4), npt.NDArray[np.int64]) +assert_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=(4, 7)), npt.NDArray[np.int64]) +assert_type(def_gen.multivariate_hypergeometric([3, 5, 7], 2, method="count"), npt.NDArray[np.int64]) +assert_type(def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, method="marginals"), npt.NDArray[np.int64]) + +assert_type(def_gen.multivariate_normal([0.0], [[1.0]]), npt.NDArray[np.float64]) +assert_type(def_gen.multivariate_normal([0.0], np.array([[1.0]])), npt.NDArray[np.float64]) +assert_type(def_gen.multivariate_normal(np.array([0.0]), [[1.0]]), npt.NDArray[np.float64]) +assert_type(def_gen.multivariate_normal([0.0], np.array([[1.0]])), npt.NDArray[np.float64]) + +assert_type(def_gen.permutation(10), npt.NDArray[np.int64]) +assert_type(def_gen.permutation([1, 2, 3, 4]), npt.NDArray[Any]) +assert_type(def_gen.permutation(np.array([1, 2, 3, 4])), npt.NDArray[Any]) +assert_type(def_gen.permutation(D_2D, axis=1), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D_like), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D, axis=1), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D, out=D_2D), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D_like, out=D_2D), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D_like, out=D_2D), npt.NDArray[Any]) +assert_type(def_gen.permuted(D_2D, axis=1, out=D_2D), npt.NDArray[Any]) + +assert_type(def_gen.shuffle(np.arange(10)), None) +assert_type(def_gen.shuffle([1, 2, 3, 4, 5]), None) +assert_type(def_gen.shuffle(D_2D, axis=1), None) + +assert_type(np.random.Generator(pcg64), np.random.Generator) +assert_type(def_gen.__str__(), str) +assert_type(def_gen.__repr__(), str) +assert_type(def_gen.__setstate__(dict(def_gen.bit_generator.state)), None) + +# RandomState +random_st: np.random.RandomState = np.random.RandomState() + +assert_type(random_st.standard_normal(), float) +assert_type(random_st.standard_normal(size=None), float) +assert_type(random_st.standard_normal(size=1), npt.NDArray[np.float64]) + +assert_type(random_st.random(), float) +assert_type(random_st.random(size=None), float) +assert_type(random_st.random(size=1), npt.NDArray[np.float64]) + +assert_type(random_st.standard_cauchy(), float) +assert_type(random_st.standard_cauchy(size=None), float) +assert_type(random_st.standard_cauchy(size=1), npt.NDArray[np.float64]) + +assert_type(random_st.standard_exponential(), float) +assert_type(random_st.standard_exponential(size=None), float) +assert_type(random_st.standard_exponential(size=1), npt.NDArray[np.float64]) + +assert_type(random_st.zipf(1.5), int) +assert_type(random_st.zipf(1.5, size=None), int) +assert_type(random_st.zipf(1.5, size=1), npt.NDArray[np.long]) +assert_type(random_st.zipf(D_arr_1p5), npt.NDArray[np.long]) +assert_type(random_st.zipf(D_arr_1p5, size=1), npt.NDArray[np.long]) +assert_type(random_st.zipf(D_arr_like_1p5), npt.NDArray[np.long]) +assert_type(random_st.zipf(D_arr_like_1p5, size=1), npt.NDArray[np.long]) + +assert_type(random_st.weibull(0.5), float) +assert_type(random_st.weibull(0.5, size=None), float) +assert_type(random_st.weibull(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.weibull(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.weibull(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.weibull(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.weibull(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.standard_t(0.5), float) +assert_type(random_st.standard_t(0.5, size=None), float) +assert_type(random_st.standard_t(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.standard_t(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.standard_t(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.standard_t(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.standard_t(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.poisson(0.5), int) +assert_type(random_st.poisson(0.5, size=None), int) +assert_type(random_st.poisson(0.5, size=1), npt.NDArray[np.long]) +assert_type(random_st.poisson(D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.poisson(D_arr_0p5, size=1), npt.NDArray[np.long]) +assert_type(random_st.poisson(D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.poisson(D_arr_like_0p5, size=1), npt.NDArray[np.long]) + +assert_type(random_st.power(0.5), float) +assert_type(random_st.power(0.5, size=None), float) +assert_type(random_st.power(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.power(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.power(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.power(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.power(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.pareto(0.5), float) +assert_type(random_st.pareto(0.5, size=None), float) +assert_type(random_st.pareto(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.pareto(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.pareto(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.pareto(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.pareto(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.chisquare(0.5), float) +assert_type(random_st.chisquare(0.5, size=None), float) +assert_type(random_st.chisquare(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.chisquare(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.chisquare(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.chisquare(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.chisquare(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.exponential(0.5), float) +assert_type(random_st.exponential(0.5, size=None), float) +assert_type(random_st.exponential(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.exponential(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.exponential(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.exponential(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.exponential(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.geometric(0.5), int) +assert_type(random_st.geometric(0.5, size=None), int) +assert_type(random_st.geometric(0.5, size=1), npt.NDArray[np.long]) +assert_type(random_st.geometric(D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.geometric(D_arr_0p5, size=1), npt.NDArray[np.long]) +assert_type(random_st.geometric(D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.geometric(D_arr_like_0p5, size=1), npt.NDArray[np.long]) + +assert_type(random_st.logseries(0.5), int) +assert_type(random_st.logseries(0.5, size=None), int) +assert_type(random_st.logseries(0.5, size=1), npt.NDArray[np.long]) +assert_type(random_st.logseries(D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.logseries(D_arr_0p5, size=1), npt.NDArray[np.long]) +assert_type(random_st.logseries(D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.logseries(D_arr_like_0p5, size=1), npt.NDArray[np.long]) + +assert_type(random_st.rayleigh(0.5), float) +assert_type(random_st.rayleigh(0.5, size=None), float) +assert_type(random_st.rayleigh(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.rayleigh(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.rayleigh(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.rayleigh(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.rayleigh(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.standard_gamma(0.5), float) +assert_type(random_st.standard_gamma(0.5, size=None), float) +assert_type(random_st.standard_gamma(0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.standard_gamma(D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.standard_gamma(D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.standard_gamma(D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.standard_gamma(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.standard_gamma(D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.vonmises(0.5, 0.5), float) +assert_type(random_st.vonmises(0.5, 0.5, size=None), float) +assert_type(random_st.vonmises(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.wald(0.5, 0.5), float) +assert_type(random_st.wald(0.5, 0.5, size=None), float) +assert_type(random_st.wald(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.wald(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.wald(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.wald(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.wald(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.uniform(0.5, 0.5), float) +assert_type(random_st.uniform(0.5, 0.5, size=None), float) +assert_type(random_st.uniform(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.uniform(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.beta(0.5, 0.5), float) +assert_type(random_st.beta(0.5, 0.5, size=None), float) +assert_type(random_st.beta(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.beta(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.beta(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.beta(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.beta(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.f(0.5, 0.5), float) +assert_type(random_st.f(0.5, 0.5, size=None), float) +assert_type(random_st.f(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.f(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.f(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.f(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.f(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.gamma(0.5, 0.5), float) +assert_type(random_st.gamma(0.5, 0.5, size=None), float) +assert_type(random_st.gamma(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gamma(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.gumbel(0.5, 0.5), float) +assert_type(random_st.gumbel(0.5, 0.5, size=None), float) +assert_type(random_st.gumbel(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.laplace(0.5, 0.5), float) +assert_type(random_st.laplace(0.5, 0.5, size=None), float) +assert_type(random_st.laplace(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.laplace(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.logistic(0.5, 0.5), float) +assert_type(random_st.logistic(0.5, 0.5, size=None), float) +assert_type(random_st.logistic(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.logistic(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.lognormal(0.5, 0.5), float) +assert_type(random_st.lognormal(0.5, 0.5, size=None), float) +assert_type(random_st.lognormal(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.noncentral_chisquare(0.5, 0.5), float) +assert_type(random_st.noncentral_chisquare(0.5, 0.5, size=None), float) +assert_type(random_st.noncentral_chisquare(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.normal(0.5, 0.5), float) +assert_type(random_st.normal(0.5, 0.5, size=None), float) +assert_type(random_st.normal(0.5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.normal(0.5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_0p5, 0.5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.normal(0.5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_like_0p5, 0.5), npt.NDArray[np.float64]) +assert_type(random_st.normal(0.5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_0p5, D_arr_0p5), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_like_0p5, D_arr_like_0p5), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_0p5, D_arr_0p5, size=1), npt.NDArray[np.float64]) +assert_type(random_st.normal(D_arr_like_0p5, D_arr_like_0p5, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.triangular(0.1, 0.5, 0.9), float) +assert_type(random_st.triangular(0.1, 0.5, 0.9, size=None), float) +assert_type(random_st.triangular(0.1, 0.5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_0p1, 0.5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(0.1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.triangular(0.1, D_arr_0p5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_like_0p1, 0.5, D_arr_0p9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(0.5, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_0p1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.noncentral_f(0.1, 0.5, 0.9), float) +assert_type(random_st.noncentral_f(0.1, 0.5, 0.9, size=None), float) +assert_type(random_st.noncentral_f(0.1, 0.5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_0p1, 0.5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(0.1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(0.1, D_arr_0p5, 0.9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(0.5, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1), npt.NDArray[np.float64]) +assert_type(random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1), npt.NDArray[np.float64]) + +assert_type(random_st.binomial(10, 0.5), int) +assert_type(random_st.binomial(10, 0.5, size=None), int) +assert_type(random_st.binomial(10, 0.5, size=1), npt.NDArray[np.long]) +assert_type(random_st.binomial(I_arr_10, 0.5), npt.NDArray[np.long]) +assert_type(random_st.binomial(10, D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.binomial(I_arr_10, 0.5, size=1), npt.NDArray[np.long]) +assert_type(random_st.binomial(10, D_arr_0p5, size=1), npt.NDArray[np.long]) +assert_type(random_st.binomial(I_arr_like_10, 0.5), npt.NDArray[np.long]) +assert_type(random_st.binomial(10, D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.binomial(I_arr_10, D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.binomial(I_arr_10, D_arr_0p5, size=1), npt.NDArray[np.long]) +assert_type(random_st.binomial(I_arr_like_10, D_arr_like_0p5, size=1), npt.NDArray[np.long]) + +assert_type(random_st.negative_binomial(10, 0.5), int) +assert_type(random_st.negative_binomial(10, 0.5, size=None), int) +assert_type(random_st.negative_binomial(10, 0.5, size=1), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(I_arr_10, 0.5), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(10, D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(I_arr_10, 0.5, size=1), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(10, D_arr_0p5, size=1), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(I_arr_like_10, 0.5), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(10, D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(I_arr_10, D_arr_0p5), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(I_arr_10, D_arr_0p5, size=1), npt.NDArray[np.long]) +assert_type(random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1), npt.NDArray[np.long]) + +assert_type(random_st.hypergeometric(20, 20, 10), int) +assert_type(random_st.hypergeometric(20, 20, 10, size=None), int) +assert_type(random_st.hypergeometric(20, 20, 10, size=1), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(I_arr_20, 20, 10), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(20, I_arr_20, 10), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(20, I_arr_20, 10, size=1), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(I_arr_like_20, 20, I_arr_10), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(20, I_arr_like_20, 10), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(I_arr_20, I_arr_20, 10), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, 10), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1), npt.NDArray[np.long]) +assert_type(random_st.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1), npt.NDArray[np.long]) + +assert_type(random_st.randint(0, 100), int) +assert_type(random_st.randint(100), int) +assert_type(random_st.randint([100]), npt.NDArray[np.long]) +assert_type(random_st.randint(0, [100]), npt.NDArray[np.long]) + +assert_type(random_st.randint(2, dtype=bool), bool) +assert_type(random_st.randint(0, 2, dtype=bool), bool) +assert_type(random_st.randint(I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) +assert_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) +assert_type(random_st.randint(0, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) + +assert_type(random_st.randint(2, dtype=np.bool), np.bool) +assert_type(random_st.randint(0, 2, dtype=np.bool), np.bool) +assert_type(random_st.randint(I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) +assert_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) +assert_type(random_st.randint(0, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) + +assert_type(random_st.randint(256, dtype="u1"), np.uint8) +assert_type(random_st.randint(0, 256, dtype="u1"), np.uint8) +assert_type(random_st.randint(I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) +assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) +assert_type(random_st.randint(0, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) + +assert_type(random_st.randint(256, dtype="uint8"), np.uint8) +assert_type(random_st.randint(0, 256, dtype="uint8"), np.uint8) +assert_type(random_st.randint(I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) +assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) +assert_type(random_st.randint(0, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) + +assert_type(random_st.randint(256, dtype=np.uint8), np.uint8) +assert_type(random_st.randint(0, 256, dtype=np.uint8), np.uint8) +assert_type(random_st.randint(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) +assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) +assert_type(random_st.randint(0, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) + +assert_type(random_st.randint(65536, dtype="u2"), np.uint16) +assert_type(random_st.randint(0, 65536, dtype="u2"), np.uint16) +assert_type(random_st.randint(I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) +assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) +assert_type(random_st.randint(0, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) + +assert_type(random_st.randint(65536, dtype="uint16"), np.uint16) +assert_type(random_st.randint(0, 65536, dtype="uint16"), np.uint16) +assert_type(random_st.randint(I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) +assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) +assert_type(random_st.randint(0, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) + +assert_type(random_st.randint(65536, dtype=np.uint16), np.uint16) +assert_type(random_st.randint(0, 65536, dtype=np.uint16), np.uint16) +assert_type(random_st.randint(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) +assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) +assert_type(random_st.randint(0, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) + +assert_type(random_st.randint(4294967296, dtype="u4"), np.uint32) +assert_type(random_st.randint(0, 4294967296, dtype="u4"), np.uint32) +assert_type(random_st.randint(I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) +assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) +assert_type(random_st.randint(0, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) + +assert_type(random_st.randint(4294967296, dtype="uint32"), np.uint32) +assert_type(random_st.randint(0, 4294967296, dtype="uint32"), np.uint32) +assert_type(random_st.randint(I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) +assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) +assert_type(random_st.randint(0, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) + +assert_type(random_st.randint(4294967296, dtype=np.uint32), np.uint32) +assert_type(random_st.randint(0, 4294967296, dtype=np.uint32), np.uint32) +assert_type(random_st.randint(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) +assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) +assert_type(random_st.randint(0, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) + +assert_type(random_st.randint(4294967296, dtype=np.uint), np.uint) +assert_type(random_st.randint(0, 4294967296, dtype=np.uint), np.uint) +assert_type(random_st.randint(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) +assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) +assert_type(random_st.randint(0, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) + +assert_type(random_st.randint(18446744073709551616, dtype="u8"), np.uint64) +assert_type(random_st.randint(0, 18446744073709551616, dtype="u8"), np.uint64) +assert_type(random_st.randint(I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) +assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) +assert_type(random_st.randint(0, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) + +assert_type(random_st.randint(18446744073709551616, dtype="uint64"), np.uint64) +assert_type(random_st.randint(0, 18446744073709551616, dtype="uint64"), np.uint64) +assert_type(random_st.randint(I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) +assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) +assert_type(random_st.randint(0, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) + +assert_type(random_st.randint(18446744073709551616, dtype=np.uint64), np.uint64) +assert_type(random_st.randint(0, 18446744073709551616, dtype=np.uint64), np.uint64) +assert_type(random_st.randint(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) +assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) +assert_type(random_st.randint(0, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) + +assert_type(random_st.randint(128, dtype="i1"), np.int8) +assert_type(random_st.randint(-128, 128, dtype="i1"), np.int8) +assert_type(random_st.randint(I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) +assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) +assert_type(random_st.randint(-128, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) + +assert_type(random_st.randint(128, dtype="int8"), np.int8) +assert_type(random_st.randint(-128, 128, dtype="int8"), np.int8) +assert_type(random_st.randint(I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) +assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) +assert_type(random_st.randint(-128, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) + +assert_type(random_st.randint(128, dtype=np.int8), np.int8) +assert_type(random_st.randint(-128, 128, dtype=np.int8), np.int8) +assert_type(random_st.randint(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) +assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) +assert_type(random_st.randint(-128, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) + +assert_type(random_st.randint(32768, dtype="i2"), np.int16) +assert_type(random_st.randint(-32768, 32768, dtype="i2"), np.int16) +assert_type(random_st.randint(I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) +assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) +assert_type(random_st.randint(-32768, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) + +assert_type(random_st.randint(32768, dtype="int16"), np.int16) +assert_type(random_st.randint(-32768, 32768, dtype="int16"), np.int16) +assert_type(random_st.randint(I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) +assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) +assert_type(random_st.randint(-32768, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) + +assert_type(random_st.randint(32768, dtype=np.int16), np.int16) +assert_type(random_st.randint(-32768, 32768, dtype=np.int16), np.int16) +assert_type(random_st.randint(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) +assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) +assert_type(random_st.randint(-32768, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) + +assert_type(random_st.randint(2147483648, dtype="i4"), np.int32) +assert_type(random_st.randint(-2147483648, 2147483648, dtype="i4"), np.int32) +assert_type(random_st.randint(I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) +assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) +assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) + +assert_type(random_st.randint(2147483648, dtype="int32"), np.int32) +assert_type(random_st.randint(-2147483648, 2147483648, dtype="int32"), np.int32) +assert_type(random_st.randint(I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) +assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) +assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) + +assert_type(random_st.randint(2147483648, dtype=np.int32), np.int32) +assert_type(random_st.randint(-2147483648, 2147483648, dtype=np.int32), np.int32) +assert_type(random_st.randint(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) +assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) +assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) + +assert_type(random_st.randint(2147483648, dtype=np.int_), np.int_) +assert_type(random_st.randint(-2147483648, 2147483648, dtype=np.int_), np.int_) +assert_type(random_st.randint(I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) +assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) +assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) + +assert_type(random_st.randint(9223372036854775808, dtype="i8"), np.int64) +assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="i8"), np.int64) +assert_type(random_st.randint(I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) +assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) +assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) + +assert_type(random_st.randint(9223372036854775808, dtype="int64"), np.int64) +assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="int64"), np.int64) +assert_type(random_st.randint(I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) +assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) +assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) + +assert_type(random_st.randint(9223372036854775808, dtype=np.int64), np.int64) +assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype=np.int64), np.int64) +assert_type(random_st.randint(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) + +assert_type(random_st._bit_generator, np.random.BitGenerator) + +assert_type(random_st.bytes(2), bytes) + +assert_type(random_st.choice(5), int) +assert_type(random_st.choice(5, 3), npt.NDArray[np.long]) +assert_type(random_st.choice(5, 3, replace=True), npt.NDArray[np.long]) +assert_type(random_st.choice(5, 3, p=[1 / 5] * 5), npt.NDArray[np.long]) +assert_type(random_st.choice(5, 3, p=[1 / 5] * 5, replace=False), npt.NDArray[np.long]) + +assert_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"]), Any) +assert_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3), npt.NDArray[Any]) +assert_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4), npt.NDArray[Any]) +assert_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True), npt.NDArray[Any]) +assert_type(random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4])), npt.NDArray[Any]) + +assert_type(random_st.dirichlet([0.5, 0.5]), npt.NDArray[np.float64]) +assert_type(random_st.dirichlet(np.array([0.5, 0.5])), npt.NDArray[np.float64]) +assert_type(random_st.dirichlet(np.array([0.5, 0.5]), size=3), npt.NDArray[np.float64]) + +assert_type(random_st.multinomial(20, [1 / 6.0] * 6), npt.NDArray[np.long]) +assert_type(random_st.multinomial(20, np.array([0.5, 0.5])), npt.NDArray[np.long]) +assert_type(random_st.multinomial(20, [1 / 6.0] * 6, size=2), npt.NDArray[np.long]) + +assert_type(random_st.multivariate_normal([0.0], [[1.0]]), npt.NDArray[np.float64]) +assert_type(random_st.multivariate_normal([0.0], np.array([[1.0]])), npt.NDArray[np.float64]) +assert_type(random_st.multivariate_normal(np.array([0.0]), [[1.0]]), npt.NDArray[np.float64]) +assert_type(random_st.multivariate_normal([0.0], np.array([[1.0]])), npt.NDArray[np.float64]) + +assert_type(random_st.permutation(10), npt.NDArray[np.long]) +assert_type(random_st.permutation([1, 2, 3, 4]), npt.NDArray[Any]) +assert_type(random_st.permutation(np.array([1, 2, 3, 4])), npt.NDArray[Any]) +assert_type(random_st.permutation(D_2D), npt.NDArray[Any]) + +assert_type(random_st.shuffle(np.arange(10)), None) +assert_type(random_st.shuffle([1, 2, 3, 4, 5]), None) +assert_type(random_st.shuffle(D_2D), None) + +assert_type(np.random.RandomState(pcg64), np.random.RandomState) +assert_type(np.random.RandomState(0), np.random.RandomState) +assert_type(np.random.RandomState([0, 1, 2]), np.random.RandomState) +assert_type(random_st.__str__(), str) +assert_type(random_st.__repr__(), str) +random_st_state = random_st.__getstate__() +assert_type(random_st_state, dict[str, Any]) +assert_type(random_st.__setstate__(random_st_state), None) +assert_type(random_st.seed(), None) +assert_type(random_st.seed(1), None) +assert_type(random_st.seed([0, 1]), None) +random_st_get_state = random_st.get_state() +assert_type(random_st_state, dict[str, Any]) +random_st_get_state_legacy = random_st.get_state(legacy=True) +assert_type(random_st_get_state_legacy, dict[str, Any] | tuple[str, npt.NDArray[np.uint32], int, int, float]) +assert_type(random_st.set_state(random_st_get_state), None) + +assert_type(random_st.rand(), float) +assert_type(random_st.rand(1), npt.NDArray[np.float64]) +assert_type(random_st.rand(1, 2), npt.NDArray[np.float64]) +assert_type(random_st.randn(), float) +assert_type(random_st.randn(1), npt.NDArray[np.float64]) +assert_type(random_st.randn(1, 2), npt.NDArray[np.float64]) +assert_type(random_st.random_sample(), float) +assert_type(random_st.random_sample(1), npt.NDArray[np.float64]) +assert_type(random_st.random_sample(size=(1, 2)), npt.NDArray[np.float64]) + +assert_type(random_st.tomaxint(), int) +assert_type(random_st.tomaxint(1), npt.NDArray[np.int64]) +assert_type(random_st.tomaxint((1,)), npt.NDArray[np.int64]) + +assert_type(np.random.mtrand.set_bit_generator(pcg64), None) +assert_type(np.random.mtrand.get_bit_generator(), np.random.BitGenerator) diff --git a/python/numpy/typing/tests/data/reveal/rec.pyi b/python/numpy/typing/tests/data/reveal/rec.pyi new file mode 100644 index 000000000..aacf217e4 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/rec.pyi @@ -0,0 +1,171 @@ +import io +from typing import Any, TypeAlias, assert_type + +import numpy as np +import numpy.typing as npt + +_RecArray: TypeAlias = np.recarray[tuple[Any, ...], np.dtype[np.record]] + +AR_i8: npt.NDArray[np.int64] +REC_AR_V: _RecArray +AR_LIST: list[npt.NDArray[np.int64]] + +record: np.record +file_obj: io.BufferedIOBase + +assert_type(np.rec.format_parser( + formats=[np.float64, np.int64, np.bool], + names=["f8", "i8", "?"], + titles=None, + aligned=True, +), np.rec.format_parser) +assert_type(np.rec.format_parser.dtype, np.dtype[np.void]) + +assert_type(record.field_a, Any) +assert_type(record.field_b, Any) +assert_type(record["field_a"], Any) +assert_type(record["field_b"], Any) +assert_type(record.pprint(), str) +record.field_c = 5 + +assert_type(REC_AR_V.field(0), Any) +assert_type(REC_AR_V.field("field_a"), Any) +assert_type(REC_AR_V.field(0, AR_i8), None) +assert_type(REC_AR_V.field("field_a", AR_i8), None) +assert_type(REC_AR_V["field_a"], npt.NDArray[Any]) +assert_type(REC_AR_V.field_a, Any) +assert_type(REC_AR_V.__array_finalize__(object()), None) + +assert_type( + np.recarray( + shape=(10, 5), + formats=[np.float64, np.int64, np.bool], + order="K", + byteorder="|", + ), + _RecArray, +) + +assert_type( + np.recarray( + shape=(10, 5), + dtype=[("f8", np.float64), ("i8", np.int64)], + strides=(5, 5), + ), + np.recarray, +) + +assert_type(np.rec.fromarrays(AR_LIST), np.recarray) +assert_type( + np.rec.fromarrays(AR_LIST, dtype=np.int64), + np.recarray, +) +assert_type( + np.rec.fromarrays( + AR_LIST, + formats=[np.int64, np.float64], + names=["i8", "f8"] + ), + _RecArray, +) + +assert_type( + np.rec.fromrecords((1, 1.5)), + _RecArray +) + +assert_type( + np.rec.fromrecords( + [(1, 1.5)], + dtype=[("i8", np.int64), ("f8", np.float64)], + ), + _RecArray, +) + +assert_type( + np.rec.fromrecords( + REC_AR_V, + formats=[np.int64, np.float64], + names=["i8", "f8"] + ), + _RecArray, +) + +assert_type( + np.rec.fromstring( + b"(1, 1.5)", + dtype=[("i8", np.int64), ("f8", np.float64)], + ), + _RecArray, +) + +assert_type( + np.rec.fromstring( + REC_AR_V, + formats=[np.int64, np.float64], + names=["i8", "f8"] + ), + _RecArray, +) + +assert_type( + np.rec.fromfile( + "test_file.txt", + dtype=[("i8", np.int64), ("f8", np.float64)], + ), + np.recarray, +) + +assert_type( + np.rec.fromfile( + file_obj, + formats=[np.int64, np.float64], + names=["i8", "f8"] + ), + _RecArray, +) + +assert_type(np.rec.array(AR_i8), np.recarray[tuple[Any, ...], np.dtype[np.int64]]) + +assert_type( + np.rec.array([(1, 1.5)], dtype=[("i8", np.int64), ("f8", np.float64)]), + np.recarray, +) + +assert_type( + np.rec.array( + [(1, 1.5)], + formats=[np.int64, np.float64], + names=["i8", "f8"] + ), + _RecArray, +) + +assert_type( + np.rec.array( + None, + dtype=np.float64, + shape=(10, 3), + ), + np.recarray, +) + +assert_type( + np.rec.array( + None, + formats=[np.int64, np.float64], + names=["i8", "f8"], + shape=(10, 3), + ), + _RecArray, +) + +assert_type( + np.rec.array(file_obj, dtype=np.float64), + np.recarray, +) + +assert_type( + np.rec.array(file_obj, formats=[np.int64, np.float64], names=["i8", "f8"]), + _RecArray, +) diff --git a/python/numpy/typing/tests/data/reveal/scalars.pyi b/python/numpy/typing/tests/data/reveal/scalars.pyi new file mode 100644 index 000000000..d7b277735 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/scalars.pyi @@ -0,0 +1,191 @@ +from typing import Any, Literal, TypeAlias, assert_type + +import numpy as np + +_1: TypeAlias = Literal[1] + +b: np.bool +u8: np.uint64 +i8: np.int64 +f8: np.float64 +c8: np.complex64 +c16: np.complex128 +m: np.timedelta64 +U: np.str_ +S: np.bytes_ +V: np.void +O: np.object_ # cannot exists at runtime + +array_nd: np.ndarray[Any, Any] +array_0d: np.ndarray[tuple[()], Any] +array_2d_2x2: np.ndarray[tuple[Literal[2], Literal[2]], Any] + +assert_type(c8.real, np.float32) +assert_type(c8.imag, np.float32) + +assert_type(c8.real.real, np.float32) +assert_type(c8.real.imag, np.float32) + +assert_type(c8.itemsize, int) +assert_type(c8.shape, tuple[()]) +assert_type(c8.strides, tuple[()]) + +assert_type(c8.ndim, Literal[0]) +assert_type(c8.size, Literal[1]) + +assert_type(c8.squeeze(), np.complex64) +assert_type(c8.byteswap(), np.complex64) +assert_type(c8.transpose(), np.complex64) + +assert_type(c8.dtype, np.dtype[np.complex64]) + +assert_type(c8.real, np.float32) +assert_type(c16.imag, np.float64) + +assert_type(np.str_('foo'), np.str_) + +assert_type(V[0], Any) +assert_type(V["field1"], Any) +assert_type(V[["field1", "field2"]], np.void) +V[0] = 5 + +# Aliases +assert_type(np.bool_(), np.bool[Literal[False]]) +assert_type(np.byte(), np.byte) +assert_type(np.short(), np.short) +assert_type(np.intc(), np.intc) +assert_type(np.intp(), np.intp) +assert_type(np.int_(), np.int_) +assert_type(np.long(), np.long) +assert_type(np.longlong(), np.longlong) + +assert_type(np.ubyte(), np.ubyte) +assert_type(np.ushort(), np.ushort) +assert_type(np.uintc(), np.uintc) +assert_type(np.uintp(), np.uintp) +assert_type(np.uint(), np.uint) +assert_type(np.ulong(), np.ulong) +assert_type(np.ulonglong(), np.ulonglong) + +assert_type(np.half(), np.half) +assert_type(np.single(), np.single) +assert_type(np.double(), np.double) +assert_type(np.longdouble(), np.longdouble) + +assert_type(np.csingle(), np.csingle) +assert_type(np.cdouble(), np.cdouble) +assert_type(np.clongdouble(), np.clongdouble) + +assert_type(b.item(), bool) +assert_type(i8.item(), int) +assert_type(u8.item(), int) +assert_type(f8.item(), float) +assert_type(c16.item(), complex) +assert_type(U.item(), str) +assert_type(S.item(), bytes) + +assert_type(b.tolist(), bool) +assert_type(i8.tolist(), int) +assert_type(u8.tolist(), int) +assert_type(f8.tolist(), float) +assert_type(c16.tolist(), complex) +assert_type(U.tolist(), str) +assert_type(S.tolist(), bytes) + +assert_type(b.ravel(), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(i8.ravel(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(u8.ravel(), np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(f8.ravel(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(c16.ravel(), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(U.ravel(), np.ndarray[tuple[int], np.dtype[np.str_]]) +assert_type(S.ravel(), np.ndarray[tuple[int], np.dtype[np.bytes_]]) + +assert_type(b.flatten(), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(i8.flatten(), np.ndarray[tuple[int], np.dtype[np.int64]]) +assert_type(u8.flatten(), np.ndarray[tuple[int], np.dtype[np.uint64]]) +assert_type(f8.flatten(), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(c16.flatten(), np.ndarray[tuple[int], np.dtype[np.complex128]]) +assert_type(U.flatten(), np.ndarray[tuple[int], np.dtype[np.str_]]) +assert_type(S.flatten(), np.ndarray[tuple[int], np.dtype[np.bytes_]]) + +assert_type(b.reshape(()), np.bool) +assert_type(i8.reshape([]), np.int64) +assert_type(b.reshape(1), np.ndarray[tuple[_1], np.dtype[np.bool]]) +assert_type(i8.reshape(-1), np.ndarray[tuple[_1], np.dtype[np.int64]]) +assert_type(u8.reshape(1, 1), np.ndarray[tuple[_1, _1], np.dtype[np.uint64]]) +assert_type(f8.reshape(1, -1), np.ndarray[tuple[_1, _1], np.dtype[np.float64]]) +assert_type(c16.reshape(1, 1, 1), np.ndarray[tuple[_1, _1, _1], np.dtype[np.complex128]]) +assert_type(U.reshape(1, 1, 1, 1), np.ndarray[tuple[_1, _1, _1, _1], np.dtype[np.str_]]) +assert_type( + S.reshape(1, 1, 1, 1, 1), + np.ndarray[ + # len(shape) >= 5 + tuple[_1, _1, _1, _1, _1, *tuple[_1, ...]], + np.dtype[np.bytes_], + ], +) + +assert_type(i8.astype(float), Any) +assert_type(i8.astype(np.float64), np.float64) + +assert_type(i8.view(), np.int64) +assert_type(i8.view(np.float64), np.float64) +assert_type(i8.view(float), Any) +assert_type(i8.view(np.float64, np.ndarray), np.float64) + +assert_type(i8.getfield(float), Any) +assert_type(i8.getfield(np.float64), np.float64) +assert_type(i8.getfield(np.float64, 8), np.float64) + +assert_type(f8.as_integer_ratio(), tuple[int, int]) +assert_type(f8.is_integer(), bool) +assert_type(f8.__trunc__(), int) +assert_type(f8.__getformat__("float"), str) +assert_type(f8.hex(), str) +assert_type(np.float64.fromhex("0x0.0p+0"), np.float64) + +assert_type(f8.__getnewargs__(), tuple[float]) +assert_type(c16.__getnewargs__(), tuple[float, float]) + +assert_type(i8.numerator, np.int64) +assert_type(i8.denominator, Literal[1]) +assert_type(u8.numerator, np.uint64) +assert_type(u8.denominator, Literal[1]) +assert_type(m.numerator, np.timedelta64) +assert_type(m.denominator, Literal[1]) + +assert_type(round(i8), int) +assert_type(round(i8, 3), np.int64) +assert_type(round(u8), int) +assert_type(round(u8, 3), np.uint64) +assert_type(round(f8), int) +assert_type(round(f8, 3), np.float64) + +assert_type(f8.__ceil__(), int) +assert_type(f8.__floor__(), int) + +assert_type(i8.is_integer(), Literal[True]) + +assert_type(O.real, np.object_) +assert_type(O.imag, np.object_) +assert_type(int(O), int) +assert_type(float(O), float) +assert_type(complex(O), complex) + +# These fail fail because of a mypy __new__ bug: +# https://github.com/python/mypy/issues/15182 +# According to the typing spec, the following statements are valid, see +# https://typing.readthedocs.io/en/latest/spec/constructors.html#new-method + +# assert_type(np.object_(), None) +# assert_type(np.object_(None), None) +# assert_type(np.object_(array_nd), np.ndarray[Any, np.dtype[np.object_]]) +# assert_type(np.object_([]), npt.NDArray[np.object_]) +# assert_type(np.object_(()), npt.NDArray[np.object_]) +# assert_type(np.object_(range(4)), npt.NDArray[np.object_]) +# assert_type(np.object_(+42), int) +# assert_type(np.object_(1 / 137), float) +# assert_type(np.object_('Developers! ' * (1 << 6)), str) +# assert_type(np.object_(object()), object) +# assert_type(np.object_({False, True, NotADirectoryError}), set[Any]) +# assert_type(np.object_({'spam': 'food', 'ham': 'food'}), dict[str, str]) diff --git a/python/numpy/typing/tests/data/reveal/shape.pyi b/python/numpy/typing/tests/data/reveal/shape.pyi new file mode 100644 index 000000000..2406a39f9 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/shape.pyi @@ -0,0 +1,13 @@ +from typing import Any, NamedTuple, assert_type + +import numpy as np + +# Subtype of tuple[int, int] +class XYGrid(NamedTuple): + x_axis: int + y_axis: int + +arr: np.ndarray[XYGrid, Any] + +# Test shape property matches shape typevar +assert_type(arr.shape, XYGrid) diff --git a/python/numpy/typing/tests/data/reveal/shape_base.pyi b/python/numpy/typing/tests/data/reveal/shape_base.pyi new file mode 100644 index 000000000..e409a53bc --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/shape_base.pyi @@ -0,0 +1,52 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +i8: np.int64 +f8: np.float64 + +AR_b: npt.NDArray[np.bool] +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] + +AR_LIKE_f8: list[float] + +assert_type(np.take_along_axis(AR_f8, AR_i8, axis=1), npt.NDArray[np.float64]) +assert_type(np.take_along_axis(f8, AR_i8, axis=None), npt.NDArray[np.float64]) + +assert_type(np.put_along_axis(AR_f8, AR_i8, "1.0", axis=1), None) + +assert_type(np.expand_dims(AR_i8, 2), npt.NDArray[np.int64]) +assert_type(np.expand_dims(AR_LIKE_f8, 2), npt.NDArray[Any]) + +assert_type(np.column_stack([AR_i8]), npt.NDArray[np.int64]) +assert_type(np.column_stack([AR_LIKE_f8]), npt.NDArray[Any]) + +assert_type(np.dstack([AR_i8]), npt.NDArray[np.int64]) +assert_type(np.dstack([AR_LIKE_f8]), npt.NDArray[Any]) + +assert_type(np.array_split(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) +assert_type(np.array_split(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) + +assert_type(np.split(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) +assert_type(np.split(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) + +assert_type(np.hsplit(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) +assert_type(np.hsplit(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) + +assert_type(np.vsplit(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) +assert_type(np.vsplit(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) + +assert_type(np.dsplit(AR_i8, [3, 5, 6, 10]), list[npt.NDArray[np.int64]]) +assert_type(np.dsplit(AR_LIKE_f8, [3, 5, 6, 10]), list[npt.NDArray[Any]]) + +assert_type(np.kron(AR_b, AR_b), npt.NDArray[np.bool]) +assert_type(np.kron(AR_b, AR_i8), npt.NDArray[np.signedinteger]) +assert_type(np.kron(AR_f8, AR_f8), npt.NDArray[np.floating]) + +assert_type(np.tile(AR_i8, 5), npt.NDArray[np.int64]) +assert_type(np.tile(AR_LIKE_f8, [2, 2]), npt.NDArray[Any]) + +assert_type(np.unstack(AR_i8, axis=0), tuple[npt.NDArray[np.int64], ...]) +assert_type(np.unstack(AR_LIKE_f8, axis=0), tuple[npt.NDArray[Any], ...]) diff --git a/python/numpy/typing/tests/data/reveal/stride_tricks.pyi b/python/numpy/typing/tests/data/reveal/stride_tricks.pyi new file mode 100644 index 000000000..8fde9b8ae --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/stride_tricks.pyi @@ -0,0 +1,27 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] +AR_LIKE_f: list[float] +interface_dict: dict[str, Any] + +assert_type(np.lib.stride_tricks.as_strided(AR_f8), npt.NDArray[np.float64]) +assert_type(np.lib.stride_tricks.as_strided(AR_LIKE_f), npt.NDArray[Any]) +assert_type(np.lib.stride_tricks.as_strided(AR_f8, strides=(1, 5)), npt.NDArray[np.float64]) +assert_type(np.lib.stride_tricks.as_strided(AR_f8, shape=[9, 20]), npt.NDArray[np.float64]) + +assert_type(np.lib.stride_tricks.sliding_window_view(AR_f8, 5), npt.NDArray[np.float64]) +assert_type(np.lib.stride_tricks.sliding_window_view(AR_LIKE_f, (1, 5)), npt.NDArray[Any]) +assert_type(np.lib.stride_tricks.sliding_window_view(AR_f8, [9], axis=1), npt.NDArray[np.float64]) + +assert_type(np.broadcast_to(AR_f8, 5), npt.NDArray[np.float64]) +assert_type(np.broadcast_to(AR_LIKE_f, (1, 5)), npt.NDArray[Any]) +assert_type(np.broadcast_to(AR_f8, [4, 6], subok=True), npt.NDArray[np.float64]) + +assert_type(np.broadcast_shapes((1, 2), [3, 1], (3, 2)), tuple[Any, ...]) +assert_type(np.broadcast_shapes((6, 7), (5, 6, 1), 7, (5, 1, 7)), tuple[Any, ...]) + +assert_type(np.broadcast_arrays(AR_f8, AR_f8), tuple[npt.NDArray[Any], ...]) +assert_type(np.broadcast_arrays(AR_f8, AR_LIKE_f), tuple[npt.NDArray[Any], ...]) diff --git a/python/numpy/typing/tests/data/reveal/strings.pyi b/python/numpy/typing/tests/data/reveal/strings.pyi new file mode 100644 index 000000000..18bd252d5 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/strings.pyi @@ -0,0 +1,196 @@ +from typing import TypeAlias, assert_type + +import numpy as np +import numpy._typing as np_t +import numpy.typing as npt + +AR_T_alias: TypeAlias = np.ndarray[np_t._AnyShape, np.dtypes.StringDType] +AR_TU_alias: TypeAlias = AR_T_alias | npt.NDArray[np.str_] + +AR_U: npt.NDArray[np.str_] +AR_S: npt.NDArray[np.bytes_] +AR_T: AR_T_alias + +assert_type(np.strings.equal(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.equal(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.not_equal(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.not_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.not_equal(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.greater_equal(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.greater_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.greater_equal(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.less_equal(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.less_equal(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.less_equal(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.greater(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.greater(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.greater(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.less(AR_U, AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.less(AR_S, AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.less(AR_T, AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.add(AR_U, AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.add(AR_S, AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.add(AR_T, AR_T), AR_T_alias) + +assert_type(np.strings.multiply(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.strings.multiply(AR_S, [5, 4, 3]), npt.NDArray[np.bytes_]) +assert_type(np.strings.multiply(AR_T, 5), AR_T_alias) + +assert_type(np.strings.mod(AR_U, "test"), npt.NDArray[np.str_]) +assert_type(np.strings.mod(AR_S, "test"), npt.NDArray[np.bytes_]) +assert_type(np.strings.mod(AR_T, "test"), AR_T_alias) + +assert_type(np.strings.capitalize(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.capitalize(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.capitalize(AR_T), AR_T_alias) + +assert_type(np.strings.center(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.strings.center(AR_S, [2, 3, 4], b"a"), npt.NDArray[np.bytes_]) +assert_type(np.strings.center(AR_T, 5), AR_T_alias) + +assert_type(np.strings.encode(AR_U), npt.NDArray[np.bytes_]) +assert_type(np.strings.encode(AR_T), npt.NDArray[np.bytes_]) +assert_type(np.strings.decode(AR_S), npt.NDArray[np.str_]) + +assert_type(np.strings.expandtabs(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.expandtabs(AR_S, tabsize=4), npt.NDArray[np.bytes_]) +assert_type(np.strings.expandtabs(AR_T), AR_T_alias) + +assert_type(np.strings.ljust(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.strings.ljust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.strings.ljust(AR_T, 5), AR_T_alias) +assert_type(np.strings.ljust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_T_alias) + +assert_type(np.strings.rjust(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.strings.rjust(AR_S, [4, 3, 1], fillchar=[b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.strings.rjust(AR_T, 5), AR_T_alias) +assert_type(np.strings.rjust(AR_T, [4, 2, 1], fillchar=["a", "b", "c"]), AR_T_alias) + +assert_type(np.strings.lstrip(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.lstrip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.strings.lstrip(AR_T), AR_T_alias) +assert_type(np.strings.lstrip(AR_T, "_"), AR_T_alias) + +assert_type(np.strings.rstrip(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.rstrip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.strings.rstrip(AR_T), AR_T_alias) +assert_type(np.strings.rstrip(AR_T, "_"), AR_T_alias) + +assert_type(np.strings.strip(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.strip(AR_S, b"_"), npt.NDArray[np.bytes_]) +assert_type(np.strings.strip(AR_T), AR_T_alias) +assert_type(np.strings.strip(AR_T, "_"), AR_T_alias) + +assert_type(np.strings.count(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.strings.count(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.strings.count(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.strings.count(AR_T, ["a", "b", "c"], end=9), npt.NDArray[np.int_]) + +assert_type(np.strings.partition(AR_U, "\n"), npt.NDArray[np.str_]) +assert_type(np.strings.partition(AR_S, [b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.strings.partition(AR_T, "\n"), AR_TU_alias) + +assert_type(np.strings.rpartition(AR_U, "\n"), npt.NDArray[np.str_]) +assert_type(np.strings.rpartition(AR_S, [b"a", b"b", b"c"]), npt.NDArray[np.bytes_]) +assert_type(np.strings.rpartition(AR_T, "\n"), AR_TU_alias) + +assert_type(np.strings.replace(AR_U, "_", "-"), npt.NDArray[np.str_]) +assert_type(np.strings.replace(AR_S, [b"_", b""], [b"a", b"b"]), npt.NDArray[np.bytes_]) +assert_type(np.strings.replace(AR_T, "_", "_"), AR_TU_alias) + +assert_type(np.strings.lower(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.lower(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.lower(AR_T), AR_T_alias) + +assert_type(np.strings.upper(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.upper(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.upper(AR_T), AR_T_alias) + +assert_type(np.strings.swapcase(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.swapcase(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.swapcase(AR_T), AR_T_alias) + +assert_type(np.strings.title(AR_U), npt.NDArray[np.str_]) +assert_type(np.strings.title(AR_S), npt.NDArray[np.bytes_]) +assert_type(np.strings.title(AR_T), AR_T_alias) + +assert_type(np.strings.zfill(AR_U, 5), npt.NDArray[np.str_]) +assert_type(np.strings.zfill(AR_S, [2, 3, 4]), npt.NDArray[np.bytes_]) +assert_type(np.strings.zfill(AR_T, 5), AR_T_alias) + +assert_type(np.strings.endswith(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) +assert_type(np.strings.endswith(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.bool]) +assert_type(np.strings.endswith(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) + +assert_type(np.strings.startswith(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) +assert_type(np.strings.startswith(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.bool]) +assert_type(np.strings.startswith(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.bool]) + +assert_type(np.strings.find(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.strings.find(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.strings.find(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + +assert_type(np.strings.rfind(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.strings.rfind(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.strings.rfind(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + +assert_type(np.strings.index(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.strings.index(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.strings.index(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + +assert_type(np.strings.rindex(AR_U, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) +assert_type(np.strings.rindex(AR_S, [b"a", b"b", b"c"], end=9), npt.NDArray[np.int_]) +assert_type(np.strings.rindex(AR_T, "a", start=[1, 2, 3]), npt.NDArray[np.int_]) + +assert_type(np.strings.isalpha(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.isalpha(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.isalpha(AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.isalnum(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.isalnum(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.isalnum(AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.isdecimal(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.isdecimal(AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.isdigit(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.isdigit(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.isdigit(AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.islower(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.islower(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.islower(AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.isnumeric(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.isnumeric(AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.isspace(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.isspace(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.isspace(AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.istitle(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.istitle(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.istitle(AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.isupper(AR_U), npt.NDArray[np.bool]) +assert_type(np.strings.isupper(AR_S), npt.NDArray[np.bool]) +assert_type(np.strings.isupper(AR_T), npt.NDArray[np.bool]) + +assert_type(np.strings.str_len(AR_U), npt.NDArray[np.int_]) +assert_type(np.strings.str_len(AR_S), npt.NDArray[np.int_]) +assert_type(np.strings.str_len(AR_T), npt.NDArray[np.int_]) + +assert_type(np.strings.translate(AR_U, ""), npt.NDArray[np.str_]) +assert_type(np.strings.translate(AR_S, ""), npt.NDArray[np.bytes_]) +assert_type(np.strings.translate(AR_T, ""), AR_T_alias) + +assert_type(np.strings.slice(AR_U, 1, 5, 2), npt.NDArray[np.str_]) +assert_type(np.strings.slice(AR_S, 1, 5, 2), npt.NDArray[np.bytes_]) +assert_type(np.strings.slice(AR_T, 1, 5, 2), AR_T_alias) diff --git a/python/numpy/typing/tests/data/reveal/testing.pyi b/python/numpy/typing/tests/data/reveal/testing.pyi new file mode 100644 index 000000000..d70bc971c --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/testing.pyi @@ -0,0 +1,198 @@ +import contextlib +import re +import sys +import types +import unittest +import warnings +from collections.abc import Callable +from pathlib import Path +from typing import Any, TypeVar, assert_type + +import numpy as np +import numpy.typing as npt + +AR_f8: npt.NDArray[np.float64] +AR_i8: npt.NDArray[np.int64] + +bool_obj: bool +suppress_obj: np.testing.suppress_warnings +FT = TypeVar("FT", bound=Callable[..., Any]) + +def func() -> int: ... + +def func2( + x: npt.NDArray[np.number], + y: npt.NDArray[np.number], +) -> npt.NDArray[np.bool]: ... + +assert_type(np.testing.KnownFailureException(), np.testing.KnownFailureException) +assert_type(np.testing.IgnoreException(), np.testing.IgnoreException) + +assert_type( + np.testing.clear_and_catch_warnings(modules=[np.testing]), + np.testing.clear_and_catch_warnings[None], +) +assert_type( + np.testing.clear_and_catch_warnings(True), + np.testing.clear_and_catch_warnings[list[warnings.WarningMessage]], +) +assert_type( + np.testing.clear_and_catch_warnings(False), + np.testing.clear_and_catch_warnings[None], +) +assert_type( + np.testing.clear_and_catch_warnings(bool_obj), + np.testing.clear_and_catch_warnings, +) +assert_type( + np.testing.clear_and_catch_warnings.class_modules, + tuple[types.ModuleType, ...], +) +assert_type( + np.testing.clear_and_catch_warnings.modules, + set[types.ModuleType], +) + +with np.testing.clear_and_catch_warnings(True) as c1: + assert_type(c1, list[warnings.WarningMessage]) +with np.testing.clear_and_catch_warnings() as c2: + assert_type(c2, None) + +assert_type(np.testing.suppress_warnings("once"), np.testing.suppress_warnings) +assert_type(np.testing.suppress_warnings()(func), Callable[[], int]) +assert_type(suppress_obj.filter(RuntimeWarning), None) +assert_type(suppress_obj.record(RuntimeWarning), list[warnings.WarningMessage]) +with suppress_obj as c3: + assert_type(c3, np.testing.suppress_warnings) + +assert_type(np.testing.verbose, int) +assert_type(np.testing.IS_PYPY, bool) +assert_type(np.testing.HAS_REFCOUNT, bool) +assert_type(np.testing.HAS_LAPACK64, bool) + +assert_type(np.testing.assert_(1, msg="test"), None) +assert_type(np.testing.assert_(2, msg=lambda: "test"), None) + +if sys.platform == "win32" or sys.platform == "cygwin": + assert_type(np.testing.memusage(), int) +elif sys.platform == "linux": + assert_type(np.testing.memusage(), int | None) + +assert_type(np.testing.jiffies(), int) + +assert_type(np.testing.build_err_msg([0, 1, 2], "test"), str) +assert_type(np.testing.build_err_msg(range(2), "test", header="header"), str) +assert_type(np.testing.build_err_msg(np.arange(9).reshape(3, 3), "test", verbose=False), str) +assert_type(np.testing.build_err_msg("abc", "test", names=["x", "y"]), str) +assert_type(np.testing.build_err_msg([1.0, 2.0], "test", precision=5), str) + +assert_type(np.testing.assert_equal({1}, {1}), None) +assert_type(np.testing.assert_equal([1, 2, 3], [1, 2, 3], err_msg="fail"), None) +assert_type(np.testing.assert_equal(1, 1.0, verbose=True), None) + +assert_type(np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1]), None) + +assert_type(np.testing.assert_almost_equal(1.0, 1.1), None) +assert_type(np.testing.assert_almost_equal([1, 2, 3], [1, 2, 3], err_msg="fail"), None) +assert_type(np.testing.assert_almost_equal(1, 1.0, verbose=True), None) +assert_type(np.testing.assert_almost_equal(1, 1.0001, decimal=2), None) + +assert_type(np.testing.assert_approx_equal(1.0, 1.1), None) +assert_type(np.testing.assert_approx_equal("1", "2", err_msg="fail"), None) +assert_type(np.testing.assert_approx_equal(1, 1.0, verbose=True), None) +assert_type(np.testing.assert_approx_equal(1, 1.0001, significant=2), None) + +assert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, err_msg="test"), None) +assert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, verbose=True), None) +assert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, header="header"), None) +assert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, precision=np.int64()), None) +assert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, equal_nan=False), None) +assert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, equal_inf=True), None) + +assert_type(np.testing.assert_array_equal(AR_i8, AR_f8), None) +assert_type(np.testing.assert_array_equal(AR_i8, AR_f8, err_msg="test"), None) +assert_type(np.testing.assert_array_equal(AR_i8, AR_f8, verbose=True), None) + +assert_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8), None) +assert_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8, err_msg="test"), None) +assert_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8, verbose=True), None) +assert_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8, decimal=1), None) + +assert_type(np.testing.assert_array_less(AR_i8, AR_f8), None) +assert_type(np.testing.assert_array_less(AR_i8, AR_f8, err_msg="test"), None) +assert_type(np.testing.assert_array_less(AR_i8, AR_f8, verbose=True), None) + +assert_type(np.testing.runstring("1 + 1", {}), Any) +assert_type(np.testing.runstring("int64() + 1", {"int64": np.int64}), Any) + +assert_type(np.testing.assert_string_equal("1", "1"), None) + +assert_type(np.testing.rundocs(), None) +assert_type(np.testing.rundocs("test.py"), None) +assert_type(np.testing.rundocs(Path("test.py"), raise_on_error=True), None) + +def func3(a: int) -> bool: ... + +assert_type( + np.testing.assert_raises(RuntimeWarning), + unittest.case._AssertRaisesContext[RuntimeWarning], +) +assert_type(np.testing.assert_raises(RuntimeWarning, func3, 5), None) + +assert_type( + np.testing.assert_raises_regex(RuntimeWarning, r"test"), + unittest.case._AssertRaisesContext[RuntimeWarning], +) +assert_type(np.testing.assert_raises_regex(RuntimeWarning, b"test", func3, 5), None) +assert_type(np.testing.assert_raises_regex(RuntimeWarning, re.compile(b"test"), func3, 5), None) + +class Test: ... + +def decorate(a: FT) -> FT: + return a + +assert_type(np.testing.decorate_methods(Test, decorate), None) +assert_type(np.testing.decorate_methods(Test, decorate, None), None) +assert_type(np.testing.decorate_methods(Test, decorate, "test"), None) +assert_type(np.testing.decorate_methods(Test, decorate, b"test"), None) +assert_type(np.testing.decorate_methods(Test, decorate, re.compile("test")), None) + +assert_type(np.testing.measure("for i in range(1000): np.sqrt(i**2)"), float) +assert_type(np.testing.measure(b"for i in range(1000): np.sqrt(i**2)", times=5), float) + +assert_type(np.testing.assert_allclose(AR_i8, AR_f8), None) +assert_type(np.testing.assert_allclose(AR_i8, AR_f8, rtol=0.005), None) +assert_type(np.testing.assert_allclose(AR_i8, AR_f8, atol=1), None) +assert_type(np.testing.assert_allclose(AR_i8, AR_f8, equal_nan=True), None) +assert_type(np.testing.assert_allclose(AR_i8, AR_f8, err_msg="err"), None) +assert_type(np.testing.assert_allclose(AR_i8, AR_f8, verbose=False), None) + +assert_type(np.testing.assert_array_almost_equal_nulp(AR_i8, AR_f8, nulp=2), None) + +assert_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, maxulp=2), npt.NDArray[Any]) +assert_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, dtype=np.float32), npt.NDArray[Any]) + +assert_type(np.testing.assert_warns(RuntimeWarning), contextlib._GeneratorContextManager[None]) +assert_type(np.testing.assert_warns(RuntimeWarning, func3, 5), bool) + +def func4(a: int, b: str) -> bool: ... + +assert_type(np.testing.assert_no_warnings(), contextlib._GeneratorContextManager[None]) +assert_type(np.testing.assert_no_warnings(func3, 5), bool) +assert_type(np.testing.assert_no_warnings(func4, a=1, b="test"), bool) +assert_type(np.testing.assert_no_warnings(func4, 1, "test"), bool) + +assert_type(np.testing.tempdir("test_dir"), contextlib._GeneratorContextManager[str]) +assert_type(np.testing.tempdir(prefix=b"test"), contextlib._GeneratorContextManager[bytes]) +assert_type(np.testing.tempdir("test_dir", dir=Path("here")), contextlib._GeneratorContextManager[str]) + +assert_type(np.testing.temppath("test_dir", text=True), contextlib._GeneratorContextManager[str]) +assert_type(np.testing.temppath(prefix=b"test"), contextlib._GeneratorContextManager[bytes]) +assert_type(np.testing.temppath("test_dir", dir=Path("here")), contextlib._GeneratorContextManager[str]) + +assert_type(np.testing.assert_no_gc_cycles(), contextlib._GeneratorContextManager[None]) +assert_type(np.testing.assert_no_gc_cycles(func3, 5), None) + +assert_type(np.testing.break_cycles(), None) + +assert_type(np.testing.TestCase(), unittest.case.TestCase) diff --git a/python/numpy/typing/tests/data/reveal/twodim_base.pyi b/python/numpy/typing/tests/data/reveal/twodim_base.pyi new file mode 100644 index 000000000..7e9563a38 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/twodim_base.pyi @@ -0,0 +1,145 @@ +from typing import Any, TypeVar, assert_type + +import numpy as np +import numpy.typing as npt + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) + +def func1(ar: npt.NDArray[_ScalarT], a: int) -> npt.NDArray[_ScalarT]: ... + +def func2(ar: npt.NDArray[np.number], a: str) -> npt.NDArray[np.float64]: ... + +AR_b: npt.NDArray[np.bool] +AR_u: npt.NDArray[np.uint64] +AR_i: npt.NDArray[np.int64] +AR_f: npt.NDArray[np.float64] +AR_c: npt.NDArray[np.complex128] +AR_O: npt.NDArray[np.object_] + +AR_LIKE_b: list[bool] +AR_LIKE_c: list[complex] + +assert_type(np.fliplr(AR_b), npt.NDArray[np.bool]) +assert_type(np.fliplr(AR_LIKE_b), npt.NDArray[Any]) + +assert_type(np.flipud(AR_b), npt.NDArray[np.bool]) +assert_type(np.flipud(AR_LIKE_b), npt.NDArray[Any]) + +assert_type(np.eye(10), npt.NDArray[np.float64]) +assert_type(np.eye(10, M=20, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.eye(10, k=2, dtype=int), npt.NDArray[Any]) + +assert_type(np.diag(AR_b), npt.NDArray[np.bool]) +assert_type(np.diag(AR_LIKE_b, k=0), npt.NDArray[Any]) + +assert_type(np.diagflat(AR_b), npt.NDArray[np.bool]) +assert_type(np.diagflat(AR_LIKE_b, k=0), npt.NDArray[Any]) + +assert_type(np.tri(10), npt.NDArray[np.float64]) +assert_type(np.tri(10, M=20, dtype=np.int64), npt.NDArray[np.int64]) +assert_type(np.tri(10, k=2, dtype=int), npt.NDArray[Any]) + +assert_type(np.tril(AR_b), npt.NDArray[np.bool]) +assert_type(np.tril(AR_LIKE_b, k=0), npt.NDArray[Any]) + +assert_type(np.triu(AR_b), npt.NDArray[np.bool]) +assert_type(np.triu(AR_LIKE_b, k=0), npt.NDArray[Any]) + +assert_type(np.vander(AR_b), npt.NDArray[np.signedinteger]) +assert_type(np.vander(AR_u), npt.NDArray[np.signedinteger]) +assert_type(np.vander(AR_i, N=2), npt.NDArray[np.signedinteger]) +assert_type(np.vander(AR_f, increasing=True), npt.NDArray[np.floating]) +assert_type(np.vander(AR_c), npt.NDArray[np.complexfloating]) +assert_type(np.vander(AR_O), npt.NDArray[np.object_]) + +assert_type( + np.histogram2d(AR_LIKE_c, AR_LIKE_c), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.complex128 | np.float64], + npt.NDArray[np.complex128 | np.float64], + ], +) +assert_type( + np.histogram2d(AR_i, AR_b), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.float64], + npt.NDArray[np.float64], + ], +) +assert_type( + np.histogram2d(AR_f, AR_i), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.float64], + npt.NDArray[np.float64], + ], +) +assert_type( + np.histogram2d(AR_i, AR_f), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.float64], + npt.NDArray[np.float64], + ], +) +assert_type( + np.histogram2d(AR_f, AR_c, weights=AR_LIKE_b), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.complex128], + npt.NDArray[np.complex128], + ], +) +assert_type( + np.histogram2d(AR_f, AR_c, bins=8), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.complex128], + npt.NDArray[np.complex128], + ], +) +assert_type( + np.histogram2d(AR_c, AR_f, bins=(8, 5)), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.complex128], + npt.NDArray[np.complex128], + ], +) +assert_type( + np.histogram2d(AR_c, AR_i, bins=AR_u), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.uint64], + npt.NDArray[np.uint64], + ], +) +assert_type( + np.histogram2d(AR_c, AR_c, bins=(AR_u, AR_u)), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.uint64], + npt.NDArray[np.uint64], + ], +) +assert_type( + np.histogram2d(AR_c, AR_c, bins=(AR_b, 8)), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.bool | np.complex128], + npt.NDArray[np.bool | np.complex128], + ], +) + +assert_type(np.mask_indices(10, func1), tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]) +assert_type(np.mask_indices(8, func2, "0"), tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]) + +assert_type(np.tril_indices(10), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) + +assert_type(np.tril_indices_from(AR_b), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) + +assert_type(np.triu_indices(10), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) + +assert_type(np.triu_indices_from(AR_b), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]]) diff --git a/python/numpy/typing/tests/data/reveal/type_check.pyi b/python/numpy/typing/tests/data/reveal/type_check.pyi new file mode 100644 index 000000000..df95da78f --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/type_check.pyi @@ -0,0 +1,67 @@ +from typing import Any, Literal, assert_type + +import numpy as np +import numpy.typing as npt + +f8: np.float64 +f: float + +# NOTE: Avoid importing the platform specific `np.float128` type +AR_i8: npt.NDArray[np.int64] +AR_i4: npt.NDArray[np.int32] +AR_f2: npt.NDArray[np.float16] +AR_f8: npt.NDArray[np.float64] +AR_f16: npt.NDArray[np.longdouble] +AR_c8: npt.NDArray[np.complex64] +AR_c16: npt.NDArray[np.complex128] + +AR_LIKE_f: list[float] + +class ComplexObj: + real: slice + imag: slice + +assert_type(np.mintypecode(["f8"], typeset="qfQF"), str) + +assert_type(np.real(ComplexObj()), slice) +assert_type(np.real(AR_f8), npt.NDArray[np.float64]) +assert_type(np.real(AR_c16), npt.NDArray[np.float64]) +assert_type(np.real(AR_LIKE_f), npt.NDArray[Any]) + +assert_type(np.imag(ComplexObj()), slice) +assert_type(np.imag(AR_f8), npt.NDArray[np.float64]) +assert_type(np.imag(AR_c16), npt.NDArray[np.float64]) +assert_type(np.imag(AR_LIKE_f), npt.NDArray[Any]) + +assert_type(np.iscomplex(f8), np.bool) +assert_type(np.iscomplex(AR_f8), npt.NDArray[np.bool]) +assert_type(np.iscomplex(AR_LIKE_f), npt.NDArray[np.bool]) + +assert_type(np.isreal(f8), np.bool) +assert_type(np.isreal(AR_f8), npt.NDArray[np.bool]) +assert_type(np.isreal(AR_LIKE_f), npt.NDArray[np.bool]) + +assert_type(np.iscomplexobj(f8), bool) +assert_type(np.isrealobj(f8), bool) + +assert_type(np.nan_to_num(f8), np.float64) +assert_type(np.nan_to_num(f, copy=True), Any) +assert_type(np.nan_to_num(AR_f8, nan=1.5), npt.NDArray[np.float64]) +assert_type(np.nan_to_num(AR_LIKE_f, posinf=9999), npt.NDArray[Any]) + +assert_type(np.real_if_close(AR_f8), npt.NDArray[np.float64]) +assert_type(np.real_if_close(AR_c16), npt.NDArray[np.float64 | np.complex128]) +assert_type(np.real_if_close(AR_c8), npt.NDArray[np.float32 | np.complex64]) +assert_type(np.real_if_close(AR_LIKE_f), npt.NDArray[Any]) + +assert_type(np.typename("h"), Literal["short"]) +assert_type(np.typename("B"), Literal["unsigned char"]) +assert_type(np.typename("V"), Literal["void"]) +assert_type(np.typename("S1"), Literal["character"]) + +assert_type(np.common_type(AR_i4), type[np.float64]) +assert_type(np.common_type(AR_f2), type[np.float16]) +assert_type(np.common_type(AR_f2, AR_i4), type[np.float64]) +assert_type(np.common_type(AR_f16, AR_i4), type[np.longdouble]) +assert_type(np.common_type(AR_c8, AR_f2), type[np.complex64]) +assert_type(np.common_type(AR_f2, AR_c8, AR_i4), type[np.complexfloating]) diff --git a/python/numpy/typing/tests/data/reveal/ufunc_config.pyi b/python/numpy/typing/tests/data/reveal/ufunc_config.pyi new file mode 100644 index 000000000..748507530 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/ufunc_config.pyi @@ -0,0 +1,30 @@ +"""Typing tests for `_core._ufunc_config`.""" + +from collections.abc import Callable +from typing import Any, assert_type + +from _typeshed import SupportsWrite + +import numpy as np + +def func(a: str, b: int) -> None: ... + +class Write: + def write(self, value: str) -> None: ... + +assert_type(np.seterr(all=None), np._core._ufunc_config._ErrDict) +assert_type(np.seterr(divide="ignore"), np._core._ufunc_config._ErrDict) +assert_type(np.seterr(over="warn"), np._core._ufunc_config._ErrDict) +assert_type(np.seterr(under="call"), np._core._ufunc_config._ErrDict) +assert_type(np.seterr(invalid="raise"), np._core._ufunc_config._ErrDict) +assert_type(np.geterr(), np._core._ufunc_config._ErrDict) + +assert_type(np.setbufsize(4096), int) +assert_type(np.getbufsize(), int) + +assert_type(np.seterrcall(func), Callable[[str, int], Any] | SupportsWrite[str] | None) +assert_type(np.seterrcall(Write()), Callable[[str, int], Any] | SupportsWrite[str] | None) +assert_type(np.geterrcall(), Callable[[str, int], Any] | SupportsWrite[str] | None) + +assert_type(np.errstate(call=func, all="call"), np.errstate) +assert_type(np.errstate(call=Write(), divide="log", over="log"), np.errstate) diff --git a/python/numpy/typing/tests/data/reveal/ufunclike.pyi b/python/numpy/typing/tests/data/reveal/ufunclike.pyi new file mode 100644 index 000000000..a0ede60e0 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/ufunclike.pyi @@ -0,0 +1,31 @@ +from typing import Any, assert_type + +import numpy as np +import numpy.typing as npt + +AR_LIKE_b: list[bool] +AR_LIKE_u: list[np.uint32] +AR_LIKE_i: list[int] +AR_LIKE_f: list[float] +AR_LIKE_O: list[np.object_] + +AR_U: npt.NDArray[np.str_] + +assert_type(np.fix(AR_LIKE_b), npt.NDArray[np.floating]) +assert_type(np.fix(AR_LIKE_u), npt.NDArray[np.floating]) +assert_type(np.fix(AR_LIKE_i), npt.NDArray[np.floating]) +assert_type(np.fix(AR_LIKE_f), npt.NDArray[np.floating]) +assert_type(np.fix(AR_LIKE_O), npt.NDArray[np.object_]) +assert_type(np.fix(AR_LIKE_f, out=AR_U), npt.NDArray[np.str_]) + +assert_type(np.isposinf(AR_LIKE_b), npt.NDArray[np.bool]) +assert_type(np.isposinf(AR_LIKE_u), npt.NDArray[np.bool]) +assert_type(np.isposinf(AR_LIKE_i), npt.NDArray[np.bool]) +assert_type(np.isposinf(AR_LIKE_f), npt.NDArray[np.bool]) +assert_type(np.isposinf(AR_LIKE_f, out=AR_U), npt.NDArray[np.str_]) + +assert_type(np.isneginf(AR_LIKE_b), npt.NDArray[np.bool]) +assert_type(np.isneginf(AR_LIKE_u), npt.NDArray[np.bool]) +assert_type(np.isneginf(AR_LIKE_i), npt.NDArray[np.bool]) +assert_type(np.isneginf(AR_LIKE_f), npt.NDArray[np.bool]) +assert_type(np.isneginf(AR_LIKE_f, out=AR_U), npt.NDArray[np.str_]) diff --git a/python/numpy/typing/tests/data/reveal/ufuncs.pyi b/python/numpy/typing/tests/data/reveal/ufuncs.pyi new file mode 100644 index 000000000..93a8bfb15 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/ufuncs.pyi @@ -0,0 +1,123 @@ +from typing import Any, Literal, NoReturn, assert_type + +import numpy as np +import numpy.typing as npt + +i8: np.int64 +f8: np.float64 +AR_f8: npt.NDArray[np.float64] +AR_i8: npt.NDArray[np.int64] + +assert_type(np.absolute.__doc__, str) +assert_type(np.absolute.types, list[str]) + +assert_type(np.absolute.__name__, Literal["absolute"]) +assert_type(np.absolute.__qualname__, Literal["absolute"]) +assert_type(np.absolute.ntypes, Literal[20]) +assert_type(np.absolute.identity, None) +assert_type(np.absolute.nin, Literal[1]) +assert_type(np.absolute.nin, Literal[1]) +assert_type(np.absolute.nout, Literal[1]) +assert_type(np.absolute.nargs, Literal[2]) +assert_type(np.absolute.signature, None) +assert_type(np.absolute(f8), Any) +assert_type(np.absolute(AR_f8), npt.NDArray[Any]) +assert_type(np.absolute.at(AR_f8, AR_i8), None) + +assert_type(np.add.__name__, Literal["add"]) +assert_type(np.add.__qualname__, Literal["add"]) +assert_type(np.add.ntypes, Literal[22]) +assert_type(np.add.identity, Literal[0]) +assert_type(np.add.nin, Literal[2]) +assert_type(np.add.nout, Literal[1]) +assert_type(np.add.nargs, Literal[3]) +assert_type(np.add.signature, None) +assert_type(np.add(f8, f8), Any) +assert_type(np.add(AR_f8, f8), npt.NDArray[Any]) +assert_type(np.add.at(AR_f8, AR_i8, f8), None) +assert_type(np.add.reduce(AR_f8, axis=0), Any) +assert_type(np.add.accumulate(AR_f8), npt.NDArray[Any]) +assert_type(np.add.reduceat(AR_f8, AR_i8), npt.NDArray[Any]) +assert_type(np.add.outer(f8, f8), Any) +assert_type(np.add.outer(AR_f8, f8), npt.NDArray[Any]) + +assert_type(np.frexp.__name__, Literal["frexp"]) +assert_type(np.frexp.__qualname__, Literal["frexp"]) +assert_type(np.frexp.ntypes, Literal[4]) +assert_type(np.frexp.identity, None) +assert_type(np.frexp.nin, Literal[1]) +assert_type(np.frexp.nout, Literal[2]) +assert_type(np.frexp.nargs, Literal[3]) +assert_type(np.frexp.signature, None) +assert_type(np.frexp(f8), tuple[Any, Any]) +assert_type(np.frexp(AR_f8), tuple[npt.NDArray[Any], npt.NDArray[Any]]) + +assert_type(np.divmod.__name__, Literal["divmod"]) +assert_type(np.divmod.__qualname__, Literal["divmod"]) +assert_type(np.divmod.ntypes, Literal[15]) +assert_type(np.divmod.identity, None) +assert_type(np.divmod.nin, Literal[2]) +assert_type(np.divmod.nout, Literal[2]) +assert_type(np.divmod.nargs, Literal[4]) +assert_type(np.divmod.signature, None) +assert_type(np.divmod(f8, f8), tuple[Any, Any]) +assert_type(np.divmod(AR_f8, f8), tuple[npt.NDArray[Any], npt.NDArray[Any]]) + +assert_type(np.matmul.__name__, Literal["matmul"]) +assert_type(np.matmul.__qualname__, Literal["matmul"]) +assert_type(np.matmul.ntypes, Literal[19]) +assert_type(np.matmul.identity, None) +assert_type(np.matmul.nin, Literal[2]) +assert_type(np.matmul.nout, Literal[1]) +assert_type(np.matmul.nargs, Literal[3]) +assert_type(np.matmul.signature, Literal["(n?,k),(k,m?)->(n?,m?)"]) +assert_type(np.matmul.identity, None) +assert_type(np.matmul(AR_f8, AR_f8), Any) +assert_type(np.matmul(AR_f8, AR_f8, axes=[(0, 1), (0, 1), (0, 1)]), Any) + +assert_type(np.vecdot.__name__, Literal["vecdot"]) +assert_type(np.vecdot.__qualname__, Literal["vecdot"]) +assert_type(np.vecdot.ntypes, Literal[19]) +assert_type(np.vecdot.identity, None) +assert_type(np.vecdot.nin, Literal[2]) +assert_type(np.vecdot.nout, Literal[1]) +assert_type(np.vecdot.nargs, Literal[3]) +assert_type(np.vecdot.signature, Literal["(n),(n)->()"]) +assert_type(np.vecdot.identity, None) +assert_type(np.vecdot(AR_f8, AR_f8), Any) + +assert_type(np.bitwise_count.__name__, Literal["bitwise_count"]) +assert_type(np.bitwise_count.__qualname__, Literal["bitwise_count"]) +assert_type(np.bitwise_count.ntypes, Literal[11]) +assert_type(np.bitwise_count.identity, None) +assert_type(np.bitwise_count.nin, Literal[1]) +assert_type(np.bitwise_count.nout, Literal[1]) +assert_type(np.bitwise_count.nargs, Literal[2]) +assert_type(np.bitwise_count.signature, None) +assert_type(np.bitwise_count.identity, None) +assert_type(np.bitwise_count(i8), Any) +assert_type(np.bitwise_count(AR_i8), npt.NDArray[Any]) + +assert_type(np.absolute.outer(), NoReturn) +assert_type(np.frexp.outer(), NoReturn) +assert_type(np.divmod.outer(), NoReturn) +assert_type(np.matmul.outer(), NoReturn) + +assert_type(np.absolute.reduceat(), NoReturn) +assert_type(np.frexp.reduceat(), NoReturn) +assert_type(np.divmod.reduceat(), NoReturn) +assert_type(np.matmul.reduceat(), NoReturn) + +assert_type(np.absolute.reduce(), NoReturn) +assert_type(np.frexp.reduce(), NoReturn) +assert_type(np.divmod.reduce(), NoReturn) +assert_type(np.matmul.reduce(), NoReturn) + +assert_type(np.absolute.accumulate(), NoReturn) +assert_type(np.frexp.accumulate(), NoReturn) +assert_type(np.divmod.accumulate(), NoReturn) +assert_type(np.matmul.accumulate(), NoReturn) + +assert_type(np.frexp.at(), NoReturn) +assert_type(np.divmod.at(), NoReturn) +assert_type(np.matmul.at(), NoReturn) diff --git a/python/numpy/typing/tests/data/reveal/warnings_and_errors.pyi b/python/numpy/typing/tests/data/reveal/warnings_and_errors.pyi new file mode 100644 index 000000000..f756a8e45 --- /dev/null +++ b/python/numpy/typing/tests/data/reveal/warnings_and_errors.pyi @@ -0,0 +1,11 @@ +from typing import assert_type + +import numpy.exceptions as ex + +assert_type(ex.ModuleDeprecationWarning(), ex.ModuleDeprecationWarning) +assert_type(ex.VisibleDeprecationWarning(), ex.VisibleDeprecationWarning) +assert_type(ex.ComplexWarning(), ex.ComplexWarning) +assert_type(ex.RankWarning(), ex.RankWarning) +assert_type(ex.TooHardError(), ex.TooHardError) +assert_type(ex.AxisError("test"), ex.AxisError) +assert_type(ex.AxisError(5, 1), ex.AxisError) diff --git a/python/numpy/typing/tests/test_isfile.py b/python/numpy/typing/tests/test_isfile.py new file mode 100644 index 000000000..f72122f20 --- /dev/null +++ b/python/numpy/typing/tests/test_isfile.py @@ -0,0 +1,32 @@ +import os +import sys +from pathlib import Path + +import numpy as np +from numpy.testing import assert_ + +ROOT = Path(np.__file__).parents[0] +FILES = [ + ROOT / "py.typed", + ROOT / "__init__.pyi", + ROOT / "ctypeslib" / "__init__.pyi", + ROOT / "_core" / "__init__.pyi", + ROOT / "f2py" / "__init__.pyi", + ROOT / "fft" / "__init__.pyi", + ROOT / "lib" / "__init__.pyi", + ROOT / "linalg" / "__init__.pyi", + ROOT / "ma" / "__init__.pyi", + ROOT / "matrixlib" / "__init__.pyi", + ROOT / "polynomial" / "__init__.pyi", + ROOT / "random" / "__init__.pyi", + ROOT / "testing" / "__init__.pyi", +] +if sys.version_info < (3, 12): + FILES += [ROOT / "distutils" / "__init__.pyi"] + + +class TestIsFile: + def test_isfile(self): + """Test if all ``.pyi`` files are properly installed.""" + for file in FILES: + assert_(os.path.isfile(file)) diff --git a/python/numpy/typing/tests/test_runtime.py b/python/numpy/typing/tests/test_runtime.py new file mode 100644 index 000000000..236952101 --- /dev/null +++ b/python/numpy/typing/tests/test_runtime.py @@ -0,0 +1,102 @@ +"""Test the runtime usage of `numpy.typing`.""" + +from typing import ( + Any, + NamedTuple, + Union, # pyright: ignore[reportDeprecated] + get_args, + get_origin, + get_type_hints, +) + +import pytest + +import numpy as np +import numpy._typing as _npt +import numpy.typing as npt + + +class TypeTup(NamedTuple): + typ: type + args: tuple[type, ...] + origin: type | None + + +NDArrayTup = TypeTup(npt.NDArray, npt.NDArray.__args__, np.ndarray) + +TYPES = { + "ArrayLike": TypeTup(npt.ArrayLike, npt.ArrayLike.__args__, Union), + "DTypeLike": TypeTup(npt.DTypeLike, npt.DTypeLike.__args__, Union), + "NBitBase": TypeTup(npt.NBitBase, (), None), + "NDArray": NDArrayTup, +} + + +@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys()) +def test_get_args(name: type, tup: TypeTup) -> None: + """Test `typing.get_args`.""" + typ, ref = tup.typ, tup.args + out = get_args(typ) + assert out == ref + + +@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys()) +def test_get_origin(name: type, tup: TypeTup) -> None: + """Test `typing.get_origin`.""" + typ, ref = tup.typ, tup.origin + out = get_origin(typ) + assert out == ref + + +@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys()) +def test_get_type_hints(name: type, tup: TypeTup) -> None: + """Test `typing.get_type_hints`.""" + typ = tup.typ + + def func(a: typ) -> None: pass + + out = get_type_hints(func) + ref = {"a": typ, "return": type(None)} + assert out == ref + + +@pytest.mark.parametrize("name,tup", TYPES.items(), ids=TYPES.keys()) +def test_get_type_hints_str(name: type, tup: TypeTup) -> None: + """Test `typing.get_type_hints` with string-representation of types.""" + typ_str, typ = f"npt.{name}", tup.typ + + def func(a: typ_str) -> None: pass + + out = get_type_hints(func) + ref = {"a": typ, "return": type(None)} + assert out == ref + + +def test_keys() -> None: + """Test that ``TYPES.keys()`` and ``numpy.typing.__all__`` are synced.""" + keys = TYPES.keys() + ref = set(npt.__all__) + assert keys == ref + + +PROTOCOLS: dict[str, tuple[type[Any], object]] = { + "_SupportsDType": (_npt._SupportsDType, np.int64(1)), + "_SupportsArray": (_npt._SupportsArray, np.arange(10)), + "_SupportsArrayFunc": (_npt._SupportsArrayFunc, np.arange(10)), + "_NestedSequence": (_npt._NestedSequence, [1]), +} + + +@pytest.mark.parametrize("cls,obj", PROTOCOLS.values(), ids=PROTOCOLS.keys()) +class TestRuntimeProtocol: + def test_isinstance(self, cls: type[Any], obj: object) -> None: + assert isinstance(obj, cls) + assert not isinstance(None, cls) + + def test_issubclass(self, cls: type[Any], obj: object) -> None: + if cls is _npt._SupportsDType: + pytest.xfail( + "Protocols with non-method members don't support issubclass()" + ) + assert issubclass(type(obj), cls) + assert not issubclass(type(None), cls) diff --git a/python/numpy/typing/tests/test_typing.py b/python/numpy/typing/tests/test_typing.py new file mode 100644 index 000000000..ca4cf37fe --- /dev/null +++ b/python/numpy/typing/tests/test_typing.py @@ -0,0 +1,205 @@ +import importlib.util +import os +import re +import shutil +import textwrap +from collections import defaultdict +from typing import TYPE_CHECKING + +import pytest + +# Only trigger a full `mypy` run if this environment variable is set +# Note that these tests tend to take over a minute even on a macOS M1 CPU, +# and more than that in CI. +RUN_MYPY = "NPY_RUN_MYPY_IN_TESTSUITE" in os.environ +if RUN_MYPY and RUN_MYPY not in ('0', '', 'false'): + RUN_MYPY = True + +# Skips all functions in this file +pytestmark = pytest.mark.skipif( + not RUN_MYPY, + reason="`NPY_RUN_MYPY_IN_TESTSUITE` not set" +) + + +try: + from mypy import api +except ImportError: + NO_MYPY = True +else: + NO_MYPY = False + +if TYPE_CHECKING: + from collections.abc import Iterator + + # We need this as annotation, but it's located in a private namespace. + # As a compromise, do *not* import it during runtime + from _pytest.mark.structures import ParameterSet + +DATA_DIR = os.path.join(os.path.dirname(__file__), "data") +PASS_DIR = os.path.join(DATA_DIR, "pass") +FAIL_DIR = os.path.join(DATA_DIR, "fail") +REVEAL_DIR = os.path.join(DATA_DIR, "reveal") +MISC_DIR = os.path.join(DATA_DIR, "misc") +MYPY_INI = os.path.join(DATA_DIR, "mypy.ini") +CACHE_DIR = os.path.join(DATA_DIR, ".mypy_cache") + +#: A dictionary with file names as keys and lists of the mypy stdout as values. +#: To-be populated by `run_mypy`. +OUTPUT_MYPY: defaultdict[str, list[str]] = defaultdict(list) + + +def _key_func(key: str) -> str: + """Split at the first occurrence of the ``:`` character. + + Windows drive-letters (*e.g.* ``C:``) are ignored herein. + """ + drive, tail = os.path.splitdrive(key) + return os.path.join(drive, tail.split(":", 1)[0]) + + +def _strip_filename(msg: str) -> tuple[int, str]: + """Strip the filename and line number from a mypy message.""" + _, tail = os.path.splitdrive(msg) + _, lineno, msg = tail.split(":", 2) + return int(lineno), msg.strip() + + +def strip_func(match: re.Match[str]) -> str: + """`re.sub` helper function for stripping module names.""" + return match.groups()[1] + + +@pytest.fixture(scope="module", autouse=True) +def run_mypy() -> None: + """Clears the cache and run mypy before running any of the typing tests. + + The mypy results are cached in `OUTPUT_MYPY` for further use. + + The cache refresh can be skipped using + + NUMPY_TYPING_TEST_CLEAR_CACHE=0 pytest numpy/typing/tests + """ + if ( + os.path.isdir(CACHE_DIR) + and bool(os.environ.get("NUMPY_TYPING_TEST_CLEAR_CACHE", True)) # noqa: PLW1508 + ): + shutil.rmtree(CACHE_DIR) + + split_pattern = re.compile(r"(\s+)?\^(\~+)?") + for directory in (PASS_DIR, REVEAL_DIR, FAIL_DIR, MISC_DIR): + # Run mypy + stdout, stderr, exit_code = api.run([ + "--config-file", + MYPY_INI, + "--cache-dir", + CACHE_DIR, + directory, + ]) + if stderr: + pytest.fail(f"Unexpected mypy standard error\n\n{stderr}", False) + elif exit_code not in {0, 1}: + pytest.fail(f"Unexpected mypy exit code: {exit_code}\n\n{stdout}", False) + + str_concat = "" + filename: str | None = None + for i in stdout.split("\n"): + if "note:" in i: + continue + if filename is None: + filename = _key_func(i) + + str_concat += f"{i}\n" + if split_pattern.match(i) is not None: + OUTPUT_MYPY[filename].append(str_concat) + str_concat = "" + filename = None + + +def get_test_cases(*directories: str) -> "Iterator[ParameterSet]": + for directory in directories: + for root, _, files in os.walk(directory): + for fname in files: + short_fname, ext = os.path.splitext(fname) + if ext not in (".pyi", ".py"): + continue + + fullpath = os.path.join(root, fname) + yield pytest.param(fullpath, id=short_fname) + + +_FAIL_INDENT = " " * 4 +_FAIL_SEP = "\n" + "_" * 79 + "\n\n" + +_FAIL_MSG_REVEAL = """{}:{} - reveal mismatch: + +{}""" + + +@pytest.mark.slow +@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") +@pytest.mark.parametrize("path", get_test_cases(PASS_DIR, FAIL_DIR)) +def test_pass(path) -> None: + # Alias `OUTPUT_MYPY` so that it appears in the local namespace + output_mypy = OUTPUT_MYPY + + if path not in output_mypy: + return + + relpath = os.path.relpath(path) + + # collect any reported errors, and clean up the output + messages = [] + for message in output_mypy[path]: + lineno, content = _strip_filename(message) + content = content.removeprefix("error:").lstrip() + messages.append(f"{relpath}:{lineno} - {content}") + + if messages: + pytest.fail("\n".join(messages), pytrace=False) + + +@pytest.mark.slow +@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") +@pytest.mark.parametrize("path", get_test_cases(REVEAL_DIR)) +def test_reveal(path: str) -> None: + """Validate that mypy correctly infers the return-types of + the expressions in `path`. + """ + __tracebackhide__ = True + + output_mypy = OUTPUT_MYPY + if path not in output_mypy: + return + + relpath = os.path.relpath(path) + + # collect any reported errors, and clean up the output + failures = [] + for error_line in output_mypy[path]: + lineno, error_msg = _strip_filename(error_line) + error_msg = textwrap.indent(error_msg, _FAIL_INDENT) + reason = _FAIL_MSG_REVEAL.format(relpath, lineno, error_msg) + failures.append(reason) + + if failures: + reasons = _FAIL_SEP.join(failures) + pytest.fail(reasons, pytrace=False) + + +@pytest.mark.slow +@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed") +@pytest.mark.parametrize("path", get_test_cases(PASS_DIR)) +def test_code_runs(path: str) -> None: + """Validate that the code in `path` properly during runtime.""" + path_without_extension, _ = os.path.splitext(path) + dirname, filename = path.split(os.sep)[-2:] + + spec = importlib.util.spec_from_file_location( + f"{dirname}.{filename}", path + ) + assert spec is not None + assert spec.loader is not None + + test_module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(test_module) diff --git a/python/numpy/version.py b/python/numpy/version.py new file mode 100644 index 000000000..20c0ccdf3 --- /dev/null +++ b/python/numpy/version.py @@ -0,0 +1,11 @@ + +""" +Module to expose more detailed version info for the installed `numpy` +""" +version = "2.3.5" +__version__ = version +full_version = version + +git_revision = "c3d60fc8393f3ca3306b8ce8b6453d43737e3d90" +release = 'dev' not in version and '+' not in version +short_version = version.split("+")[0] diff --git a/python/numpy/version.pyi b/python/numpy/version.pyi new file mode 100644 index 000000000..113cde3f5 --- /dev/null +++ b/python/numpy/version.pyi @@ -0,0 +1,18 @@ +from typing import Final, LiteralString + +__all__ = ( + '__version__', + 'full_version', + 'git_revision', + 'release', + 'short_version', + 'version', +) + +version: Final[LiteralString] +__version__: Final[LiteralString] +full_version: Final[LiteralString] + +git_revision: Final[LiteralString] +release: Final[bool] +short_version: Final[LiteralString] diff --git a/python/pycparser-2.23.dist-info/INSTALLER b/python/pycparser-2.23.dist-info/INSTALLER new file mode 100644 index 000000000..a1b589e38 --- /dev/null +++ b/python/pycparser-2.23.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/python/pycparser-2.23.dist-info/LICENSE b/python/pycparser-2.23.dist-info/LICENSE new file mode 100644 index 000000000..bee14a47d --- /dev/null +++ b/python/pycparser-2.23.dist-info/LICENSE @@ -0,0 +1,27 @@ +pycparser -- A C parser in Python + +Copyright (c) 2008-2022, Eli Bendersky +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the copyright holder nor the names of its contributors may + be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT +OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/python/pycparser-2.23.dist-info/METADATA b/python/pycparser-2.23.dist-info/METADATA new file mode 100644 index 000000000..318b034f9 --- /dev/null +++ b/python/pycparser-2.23.dist-info/METADATA @@ -0,0 +1,28 @@ +Metadata-Version: 2.1 +Name: pycparser +Version: 2.23 +Summary: C parser in Python +Home-page: https://github.com/eliben/pycparser +Author: Eli Bendersky +Author-email: eliben@gmail.com +Maintainer: Eli Bendersky +License: BSD-3-Clause +Platform: Cross Platform +Classifier: Development Status :: 5 - Production/Stable +Classifier: License :: OSI Approved :: BSD License +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Requires-Python: >=3.8 +License-File: LICENSE + + + pycparser is a complete parser of the C language, written in + pure Python using the PLY parsing library. + It parses C code into an AST and can serve as a front-end for + C compilers or analysis tools. + diff --git a/python/pycparser-2.23.dist-info/RECORD b/python/pycparser-2.23.dist-info/RECORD new file mode 100644 index 000000000..4e9376b27 --- /dev/null +++ b/python/pycparser-2.23.dist-info/RECORD @@ -0,0 +1,41 @@ +pycparser-2.23.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pycparser-2.23.dist-info/LICENSE,sha256=DIRjmTaep23de1xE_m0WSXQV_PAV9cu1CMJL-YuBxbE,1543 +pycparser-2.23.dist-info/METADATA,sha256=osmhHMxa3n5sPwv5WeUpyyPnm76eohXYGZyKGmWbPFc,993 +pycparser-2.23.dist-info/RECORD,, +pycparser-2.23.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92 +pycparser-2.23.dist-info/top_level.txt,sha256=c-lPcS74L_8KoH7IE6PQF5ofyirRQNV4VhkbSFIPeWM,10 +pycparser/__init__.py,sha256=FQFl5XuxXZiYHrBuN1EElN1COlR8k4aCSdG7h7a7zLw,2918 +pycparser/__pycache__/__init__.cpython-312.pyc,, +pycparser/__pycache__/_ast_gen.cpython-312.pyc,, +pycparser/__pycache__/_build_tables.cpython-312.pyc,, +pycparser/__pycache__/ast_transforms.cpython-312.pyc,, +pycparser/__pycache__/c_ast.cpython-312.pyc,, +pycparser/__pycache__/c_generator.cpython-312.pyc,, +pycparser/__pycache__/c_lexer.cpython-312.pyc,, +pycparser/__pycache__/c_parser.cpython-312.pyc,, +pycparser/__pycache__/lextab.cpython-312.pyc,, +pycparser/__pycache__/plyparser.cpython-312.pyc,, +pycparser/__pycache__/yacctab.cpython-312.pyc,, +pycparser/_ast_gen.py,sha256=0JRVnDW-Jw-3IjVlo8je9rbAcp6Ko7toHAnB5zi7h0Q,10555 +pycparser/_build_tables.py,sha256=4d_UkIxJ4YfHTVn6xBzBA52wDo7qxg1B6aZAJYJas9Q,1087 +pycparser/_c_ast.cfg,sha256=ld5ezE9yzIJFIVAUfw7ezJSlMi4nXKNCzfmqjOyQTNo,4255 +pycparser/ast_transforms.py,sha256=GTMYlUgWmXd5wJVyovXY1qzzAqjxzCpVVg0664dKGBs,5691 +pycparser/c_ast.py,sha256=HWeOrfYdCY0u5XaYhE1i60uVyE3yMWdcxzECUX-DqJw,31445 +pycparser/c_generator.py,sha256=XWK3oGM_eVD5d_JfgJxFO95Y6vUMfRi8pov5FQjHH2s,17790 +pycparser/c_lexer.py,sha256=bq7LALBDUw452KT3J7QzHj2qMoCugGWjITNPWD7yiOE,17728 +pycparser/c_parser.py,sha256=ujQZ7y6Qded9h5SDrhtgSlHw0vSYVa_yiMtW9ZnsezY,75462 +pycparser/lextab.py,sha256=eLh3spnPArpAKCVAEK1JCOgUX0L9wrJdfOTLyw8sLRE,8776 +pycparser/ply/__init__.py,sha256=q4s86QwRsYRa20L9ueSxfh-hPihpftBjDOvYa2_SS2Y,102 +pycparser/ply/__pycache__/__init__.cpython-312.pyc,, +pycparser/ply/__pycache__/cpp.cpython-312.pyc,, +pycparser/ply/__pycache__/ctokens.cpython-312.pyc,, +pycparser/ply/__pycache__/lex.cpython-312.pyc,, +pycparser/ply/__pycache__/yacc.cpython-312.pyc,, +pycparser/ply/__pycache__/ygen.cpython-312.pyc,, +pycparser/ply/cpp.py,sha256=UtC3ylTWp5_1MKA-PLCuwKQR8zSOnlGuGGIdzj8xS98,33282 +pycparser/ply/ctokens.py,sha256=MKksnN40TehPhgVfxCJhjj_BjL943apreABKYz-bl0Y,3177 +pycparser/ply/lex.py,sha256=rCMi0yjlZmjH5SNXj_Yds1VxSDkaG2thS7351YvfN-I,42926 +pycparser/ply/yacc.py,sha256=eatSDkRLgRr6X3-hoDk_SQQv065R0BdL2K7fQ54CgVM,137323 +pycparser/ply/ygen.py,sha256=2JYNeYtrPz1JzLSLO3d4GsS8zJU8jY_I_CR1VI9gWrA,2251 +pycparser/plyparser.py,sha256=8tLOoEytcapvWrr1JfCf7Dog-wulBtS1YrDs8S7JfMo,4875 +pycparser/yacctab.py,sha256=ovX4pQW7sjbRf8c-GxpU6J65pqVGAgbU_G_YuW5NPHM,213007 diff --git a/python/pycparser-2.23.dist-info/WHEEL b/python/pycparser-2.23.dist-info/WHEEL new file mode 100644 index 000000000..98c0d20b7 --- /dev/null +++ b/python/pycparser-2.23.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.42.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/python/pycparser-2.23.dist-info/top_level.txt b/python/pycparser-2.23.dist-info/top_level.txt new file mode 100644 index 000000000..dc1c9e101 --- /dev/null +++ b/python/pycparser-2.23.dist-info/top_level.txt @@ -0,0 +1 @@ +pycparser diff --git a/python/pycparser/__init__.py b/python/pycparser/__init__.py new file mode 100644 index 000000000..170b18189 --- /dev/null +++ b/python/pycparser/__init__.py @@ -0,0 +1,93 @@ +#----------------------------------------------------------------- +# pycparser: __init__.py +# +# This package file exports some convenience functions for +# interacting with pycparser +# +# Eli Bendersky [https://eli.thegreenplace.net/] +# License: BSD +#----------------------------------------------------------------- +__all__ = ['c_lexer', 'c_parser', 'c_ast'] +__version__ = '2.23' + +import io +from subprocess import check_output +from .c_parser import CParser + + +def preprocess_file(filename, cpp_path='cpp', cpp_args=''): + """ Preprocess a file using cpp. + + filename: + Name of the file you want to preprocess. + + cpp_path: + cpp_args: + Refer to the documentation of parse_file for the meaning of these + arguments. + + When successful, returns the preprocessed file's contents. + Errors from cpp will be printed out. + """ + path_list = [cpp_path] + if isinstance(cpp_args, list): + path_list += cpp_args + elif cpp_args != '': + path_list += [cpp_args] + path_list += [filename] + + try: + # Note the use of universal_newlines to treat all newlines + # as \n for Python's purpose + text = check_output(path_list, universal_newlines=True) + except OSError as e: + raise RuntimeError("Unable to invoke 'cpp'. " + + 'Make sure its path was passed correctly\n' + + ('Original error: %s' % e)) + + return text + + +def parse_file(filename, use_cpp=False, cpp_path='cpp', cpp_args='', + parser=None, encoding=None): + """ Parse a C file using pycparser. + + filename: + Name of the file you want to parse. + + use_cpp: + Set to True if you want to execute the C pre-processor + on the file prior to parsing it. + + cpp_path: + If use_cpp is True, this is the path to 'cpp' on your + system. If no path is provided, it attempts to just + execute 'cpp', so it must be in your PATH. + + cpp_args: + If use_cpp is True, set this to the command line arguments strings + to cpp. Be careful with quotes - it's best to pass a raw string + (r'') here. For example: + r'-I../utils/fake_libc_include' + If several arguments are required, pass a list of strings. + + encoding: + Encoding to use for the file to parse + + parser: + Optional parser object to be used instead of the default CParser + + When successful, an AST is returned. ParseError can be + thrown if the file doesn't parse successfully. + + Errors from cpp will be printed out. + """ + if use_cpp: + text = preprocess_file(filename, cpp_path, cpp_args) + else: + with io.open(filename, encoding=encoding) as f: + text = f.read() + + if parser is None: + parser = CParser() + return parser.parse(text, filename) diff --git a/python/pycparser/__pycache__/__init__.cpython-312.pyc b/python/pycparser/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..a42c9739e Binary files /dev/null and b/python/pycparser/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/pycparser/__pycache__/_ast_gen.cpython-312.pyc b/python/pycparser/__pycache__/_ast_gen.cpython-312.pyc new file mode 100644 index 000000000..09e884b92 Binary files /dev/null and b/python/pycparser/__pycache__/_ast_gen.cpython-312.pyc differ diff --git a/python/pycparser/__pycache__/_build_tables.cpython-312.pyc b/python/pycparser/__pycache__/_build_tables.cpython-312.pyc new file mode 100644 index 000000000..08f7dceb4 Binary files /dev/null and b/python/pycparser/__pycache__/_build_tables.cpython-312.pyc differ diff --git a/python/pycparser/__pycache__/ast_transforms.cpython-312.pyc b/python/pycparser/__pycache__/ast_transforms.cpython-312.pyc new file mode 100644 index 000000000..d8666f0f0 Binary files /dev/null and b/python/pycparser/__pycache__/ast_transforms.cpython-312.pyc differ diff --git a/python/pycparser/__pycache__/c_ast.cpython-312.pyc b/python/pycparser/__pycache__/c_ast.cpython-312.pyc new file mode 100644 index 000000000..517b0f197 Binary files /dev/null and b/python/pycparser/__pycache__/c_ast.cpython-312.pyc differ diff --git a/python/pycparser/__pycache__/c_generator.cpython-312.pyc b/python/pycparser/__pycache__/c_generator.cpython-312.pyc new file mode 100644 index 000000000..8d3ff5185 Binary files /dev/null and b/python/pycparser/__pycache__/c_generator.cpython-312.pyc differ diff --git a/python/pycparser/__pycache__/c_lexer.cpython-312.pyc b/python/pycparser/__pycache__/c_lexer.cpython-312.pyc new file mode 100644 index 000000000..35deffe5a Binary files /dev/null and b/python/pycparser/__pycache__/c_lexer.cpython-312.pyc differ diff --git a/python/pycparser/__pycache__/c_parser.cpython-312.pyc b/python/pycparser/__pycache__/c_parser.cpython-312.pyc new file mode 100644 index 000000000..b4f4790f7 Binary files /dev/null and b/python/pycparser/__pycache__/c_parser.cpython-312.pyc differ diff --git a/python/pycparser/__pycache__/lextab.cpython-312.pyc b/python/pycparser/__pycache__/lextab.cpython-312.pyc new file mode 100644 index 000000000..b5949d816 Binary files /dev/null and b/python/pycparser/__pycache__/lextab.cpython-312.pyc differ diff --git a/python/pycparser/__pycache__/plyparser.cpython-312.pyc b/python/pycparser/__pycache__/plyparser.cpython-312.pyc new file mode 100644 index 000000000..2a48aab64 Binary files /dev/null and b/python/pycparser/__pycache__/plyparser.cpython-312.pyc differ diff --git a/python/pycparser/__pycache__/yacctab.cpython-312.pyc b/python/pycparser/__pycache__/yacctab.cpython-312.pyc new file mode 100644 index 000000000..f69ecafca Binary files /dev/null and b/python/pycparser/__pycache__/yacctab.cpython-312.pyc differ diff --git a/python/pycparser/_ast_gen.py b/python/pycparser/_ast_gen.py new file mode 100644 index 000000000..0f7d330ba --- /dev/null +++ b/python/pycparser/_ast_gen.py @@ -0,0 +1,336 @@ +#----------------------------------------------------------------- +# _ast_gen.py +# +# Generates the AST Node classes from a specification given in +# a configuration file +# +# The design of this module was inspired by astgen.py from the +# Python 2.5 code-base. +# +# Eli Bendersky [https://eli.thegreenplace.net/] +# License: BSD +#----------------------------------------------------------------- +from string import Template + + +class ASTCodeGenerator(object): + def __init__(self, cfg_filename='_c_ast.cfg'): + """ Initialize the code generator from a configuration + file. + """ + self.cfg_filename = cfg_filename + self.node_cfg = [NodeCfg(name, contents) + for (name, contents) in self.parse_cfgfile(cfg_filename)] + + def generate(self, file=None): + """ Generates the code into file, an open file buffer. + """ + src = Template(_PROLOGUE_COMMENT).substitute( + cfg_filename=self.cfg_filename) + + src += _PROLOGUE_CODE + for node_cfg in self.node_cfg: + src += node_cfg.generate_source() + '\n\n' + + file.write(src) + + def parse_cfgfile(self, filename): + """ Parse the configuration file and yield pairs of + (name, contents) for each node. + """ + with open(filename, "r") as f: + for line in f: + line = line.strip() + if not line or line.startswith('#'): + continue + colon_i = line.find(':') + lbracket_i = line.find('[') + rbracket_i = line.find(']') + if colon_i < 1 or lbracket_i <= colon_i or rbracket_i <= lbracket_i: + raise RuntimeError("Invalid line in %s:\n%s\n" % (filename, line)) + + name = line[:colon_i] + val = line[lbracket_i + 1:rbracket_i] + vallist = [v.strip() for v in val.split(',')] if val else [] + yield name, vallist + + +class NodeCfg(object): + """ Node configuration. + + name: node name + contents: a list of contents - attributes and child nodes + See comment at the top of the configuration file for details. + """ + + def __init__(self, name, contents): + self.name = name + self.all_entries = [] + self.attr = [] + self.child = [] + self.seq_child = [] + + for entry in contents: + clean_entry = entry.rstrip('*') + self.all_entries.append(clean_entry) + + if entry.endswith('**'): + self.seq_child.append(clean_entry) + elif entry.endswith('*'): + self.child.append(clean_entry) + else: + self.attr.append(entry) + + def generate_source(self): + src = self._gen_init() + src += '\n' + self._gen_children() + src += '\n' + self._gen_iter() + src += '\n' + self._gen_attr_names() + return src + + def _gen_init(self): + src = "class %s(Node):\n" % self.name + + if self.all_entries: + args = ', '.join(self.all_entries) + slots = ', '.join("'{0}'".format(e) for e in self.all_entries) + slots += ", 'coord', '__weakref__'" + arglist = '(self, %s, coord=None)' % args + else: + slots = "'coord', '__weakref__'" + arglist = '(self, coord=None)' + + src += " __slots__ = (%s)\n" % slots + src += " def __init__%s:\n" % arglist + + for name in self.all_entries + ['coord']: + src += " self.%s = %s\n" % (name, name) + + return src + + def _gen_children(self): + src = ' def children(self):\n' + + if self.all_entries: + src += ' nodelist = []\n' + + for child in self.child: + src += ( + ' if self.%(child)s is not None:' + + ' nodelist.append(("%(child)s", self.%(child)s))\n') % ( + dict(child=child)) + + for seq_child in self.seq_child: + src += ( + ' for i, child in enumerate(self.%(child)s or []):\n' + ' nodelist.append(("%(child)s[%%d]" %% i, child))\n') % ( + dict(child=seq_child)) + + src += ' return tuple(nodelist)\n' + else: + src += ' return ()\n' + + return src + + def _gen_iter(self): + src = ' def __iter__(self):\n' + + if self.all_entries: + for child in self.child: + src += ( + ' if self.%(child)s is not None:\n' + + ' yield self.%(child)s\n') % (dict(child=child)) + + for seq_child in self.seq_child: + src += ( + ' for child in (self.%(child)s or []):\n' + ' yield child\n') % (dict(child=seq_child)) + + if not (self.child or self.seq_child): + # Empty generator + src += ( + ' return\n' + + ' yield\n') + else: + # Empty generator + src += ( + ' return\n' + + ' yield\n') + + return src + + def _gen_attr_names(self): + src = " attr_names = (" + ''.join("%r, " % nm for nm in self.attr) + ')' + return src + + +_PROLOGUE_COMMENT = \ +r'''#----------------------------------------------------------------- +# ** ATTENTION ** +# This code was automatically generated from the file: +# $cfg_filename +# +# Do not modify it directly. Modify the configuration file and +# run the generator again. +# ** ** *** ** ** +# +# pycparser: c_ast.py +# +# AST Node classes. +# +# Eli Bendersky [https://eli.thegreenplace.net/] +# License: BSD +#----------------------------------------------------------------- + +''' + +_PROLOGUE_CODE = r''' +import sys + +def _repr(obj): + """ + Get the representation of an object, with dedicated pprint-like format for lists. + """ + if isinstance(obj, list): + return '[' + (',\n '.join((_repr(e).replace('\n', '\n ') for e in obj))) + '\n]' + else: + return repr(obj) + +class Node(object): + __slots__ = () + """ Abstract base class for AST nodes. + """ + def __repr__(self): + """ Generates a python representation of the current node + """ + result = self.__class__.__name__ + '(' + + indent = '' + separator = '' + for name in self.__slots__[:-2]: + result += separator + result += indent + result += name + '=' + (_repr(getattr(self, name)).replace('\n', '\n ' + (' ' * (len(name) + len(self.__class__.__name__))))) + + separator = ',' + indent = '\n ' + (' ' * len(self.__class__.__name__)) + + result += indent + ')' + + return result + + def children(self): + """ A sequence of all children that are Nodes + """ + pass + + def show(self, buf=sys.stdout, offset=0, attrnames=False, nodenames=False, showcoord=False, _my_node_name=None): + """ Pretty print the Node and all its attributes and + children (recursively) to a buffer. + + buf: + Open IO buffer into which the Node is printed. + + offset: + Initial offset (amount of leading spaces) + + attrnames: + True if you want to see the attribute names in + name=value pairs. False to only see the values. + + nodenames: + True if you want to see the actual node names + within their parents. + + showcoord: + Do you want the coordinates of each Node to be + displayed. + """ + lead = ' ' * offset + if nodenames and _my_node_name is not None: + buf.write(lead + self.__class__.__name__+ ' <' + _my_node_name + '>: ') + else: + buf.write(lead + self.__class__.__name__+ ': ') + + if self.attr_names: + if attrnames: + nvlist = [(n, getattr(self,n)) for n in self.attr_names] + attrstr = ', '.join('%s=%s' % nv for nv in nvlist) + else: + vlist = [getattr(self, n) for n in self.attr_names] + attrstr = ', '.join('%s' % v for v in vlist) + buf.write(attrstr) + + if showcoord: + buf.write(' (at %s)' % self.coord) + buf.write('\n') + + for (child_name, child) in self.children(): + child.show( + buf, + offset=offset + 2, + attrnames=attrnames, + nodenames=nodenames, + showcoord=showcoord, + _my_node_name=child_name) + + +class NodeVisitor(object): + """ A base NodeVisitor class for visiting c_ast nodes. + Subclass it and define your own visit_XXX methods, where + XXX is the class name you want to visit with these + methods. + + For example: + + class ConstantVisitor(NodeVisitor): + def __init__(self): + self.values = [] + + def visit_Constant(self, node): + self.values.append(node.value) + + Creates a list of values of all the constant nodes + encountered below the given node. To use it: + + cv = ConstantVisitor() + cv.visit(node) + + Notes: + + * generic_visit() will be called for AST nodes for which + no visit_XXX method was defined. + * The children of nodes for which a visit_XXX was + defined will not be visited - if you need this, call + generic_visit() on the node. + You can use: + NodeVisitor.generic_visit(self, node) + * Modeled after Python's own AST visiting facilities + (the ast module of Python 3.0) + """ + + _method_cache = None + + def visit(self, node): + """ Visit a node. + """ + + if self._method_cache is None: + self._method_cache = {} + + visitor = self._method_cache.get(node.__class__.__name__, None) + if visitor is None: + method = 'visit_' + node.__class__.__name__ + visitor = getattr(self, method, self.generic_visit) + self._method_cache[node.__class__.__name__] = visitor + + return visitor(node) + + def generic_visit(self, node): + """ Called if no explicit visitor function exists for a + node. Implements preorder visiting of the node. + """ + for c in node: + self.visit(c) + +''' diff --git a/python/pycparser/_build_tables.py b/python/pycparser/_build_tables.py new file mode 100644 index 000000000..4f3710795 --- /dev/null +++ b/python/pycparser/_build_tables.py @@ -0,0 +1,40 @@ +#----------------------------------------------------------------- +# pycparser: _build_tables.py +# +# A dummy for generating the lexing/parsing tables and and +# compiling them into .pyc for faster execution in optimized mode. +# Also generates AST code from the configuration file. +# Should be called from the pycparser directory. +# +# Eli Bendersky [https://eli.thegreenplace.net/] +# License: BSD +#----------------------------------------------------------------- + +# Insert '.' and '..' as first entries to the search path for modules. +# Restricted environments like embeddable python do not include the +# current working directory on startup. +import importlib +import sys +sys.path[0:0] = ['.', '..'] + +# Generate c_ast.py +from _ast_gen import ASTCodeGenerator +ast_gen = ASTCodeGenerator('_c_ast.cfg') +ast_gen.generate(open('c_ast.py', 'w')) + +from pycparser import c_parser + +# Generates the tables +# +c_parser.CParser( + lex_optimize=True, + yacc_debug=False, + yacc_optimize=True) + +# Load to compile into .pyc +# +importlib.invalidate_caches() + +import lextab +import yacctab +import c_ast diff --git a/python/pycparser/_c_ast.cfg b/python/pycparser/_c_ast.cfg new file mode 100644 index 000000000..0626533e8 --- /dev/null +++ b/python/pycparser/_c_ast.cfg @@ -0,0 +1,195 @@ +#----------------------------------------------------------------- +# pycparser: _c_ast.cfg +# +# Defines the AST Node classes used in pycparser. +# +# Each entry is a Node sub-class name, listing the attributes +# and child nodes of the class: +# * - a child node +# ** - a sequence of child nodes +# - an attribute +# +# Eli Bendersky [https://eli.thegreenplace.net/] +# License: BSD +#----------------------------------------------------------------- + +# ArrayDecl is a nested declaration of an array with the given type. +# dim: the dimension (for example, constant 42) +# dim_quals: list of dimension qualifiers, to support C99's allowing 'const' +# and 'static' within the array dimension in function declarations. +ArrayDecl: [type*, dim*, dim_quals] + +ArrayRef: [name*, subscript*] + +# op: =, +=, /= etc. +# +Assignment: [op, lvalue*, rvalue*] + +Alignas: [alignment*] + +BinaryOp: [op, left*, right*] + +Break: [] + +Case: [expr*, stmts**] + +Cast: [to_type*, expr*] + +# Compound statement in C99 is a list of block items (declarations or +# statements). +# +Compound: [block_items**] + +# Compound literal (anonymous aggregate) for C99. +# (type-name) {initializer_list} +# type: the typename +# init: InitList for the initializer list +# +CompoundLiteral: [type*, init*] + +# type: int, char, float, string, etc. +# +Constant: [type, value] + +Continue: [] + +# name: the variable being declared +# quals: list of qualifiers (const, volatile) +# funcspec: list function specifiers (i.e. inline in C99) +# storage: list of storage specifiers (extern, register, etc.) +# type: declaration type (probably nested with all the modifiers) +# init: initialization value, or None +# bitsize: bit field size, or None +# +Decl: [name, quals, align, storage, funcspec, type*, init*, bitsize*] + +DeclList: [decls**] + +Default: [stmts**] + +DoWhile: [cond*, stmt*] + +# Represents the ellipsis (...) parameter in a function +# declaration +# +EllipsisParam: [] + +# An empty statement (a semicolon ';' on its own) +# +EmptyStatement: [] + +# Enumeration type specifier +# name: an optional ID +# values: an EnumeratorList +# +Enum: [name, values*] + +# A name/value pair for enumeration values +# +Enumerator: [name, value*] + +# A list of enumerators +# +EnumeratorList: [enumerators**] + +# A list of expressions separated by the comma operator. +# +ExprList: [exprs**] + +# This is the top of the AST, representing a single C file (a +# translation unit in K&R jargon). It contains a list of +# "external-declaration"s, which is either declarations (Decl), +# Typedef or function definitions (FuncDef). +# +FileAST: [ext**] + +# for (init; cond; next) stmt +# +For: [init*, cond*, next*, stmt*] + +# name: Id +# args: ExprList +# +FuncCall: [name*, args*] + +# type (args) +# +FuncDecl: [args*, type*] + +# Function definition: a declarator for the function name and +# a body, which is a compound statement. +# There's an optional list of parameter declarations for old +# K&R-style definitions +# +FuncDef: [decl*, param_decls**, body*] + +Goto: [name] + +ID: [name] + +# Holder for types that are a simple identifier (e.g. the built +# ins void, char etc. and typedef-defined types) +# +IdentifierType: [names] + +If: [cond*, iftrue*, iffalse*] + +# An initialization list used for compound literals. +# +InitList: [exprs**] + +Label: [name, stmt*] + +# A named initializer for C99. +# The name of a NamedInitializer is a sequence of Nodes, because +# names can be hierarchical and contain constant expressions. +# +NamedInitializer: [name**, expr*] + +# a list of comma separated function parameter declarations +# +ParamList: [params**] + +PtrDecl: [quals, type*] + +Return: [expr*] + +StaticAssert: [cond*, message*] + +# name: struct tag name +# decls: declaration of members +# +Struct: [name, decls**] + +# type: . or -> +# name.field or name->field +# +StructRef: [name*, type, field*] + +Switch: [cond*, stmt*] + +# cond ? iftrue : iffalse +# +TernaryOp: [cond*, iftrue*, iffalse*] + +# A base type declaration +# +TypeDecl: [declname, quals, align, type*] + +# A typedef declaration. +# Very similar to Decl, but without some attributes +# +Typedef: [name, quals, storage, type*] + +Typename: [name, quals, align, type*] + +UnaryOp: [op, expr*] + +# name: union tag name +# decls: declaration of members +# +Union: [name, decls**] + +While: [cond*, stmt*] + +Pragma: [string] diff --git a/python/pycparser/ast_transforms.py b/python/pycparser/ast_transforms.py new file mode 100644 index 000000000..367dcf54c --- /dev/null +++ b/python/pycparser/ast_transforms.py @@ -0,0 +1,164 @@ +#------------------------------------------------------------------------------ +# pycparser: ast_transforms.py +# +# Some utilities used by the parser to create a friendlier AST. +# +# Eli Bendersky [https://eli.thegreenplace.net/] +# License: BSD +#------------------------------------------------------------------------------ + +from . import c_ast + + +def fix_switch_cases(switch_node): + """ The 'case' statements in a 'switch' come out of parsing with one + child node, so subsequent statements are just tucked to the parent + Compound. Additionally, consecutive (fall-through) case statements + come out messy. This is a peculiarity of the C grammar. The following: + + switch (myvar) { + case 10: + k = 10; + p = k + 1; + return 10; + case 20: + case 30: + return 20; + default: + break; + } + + Creates this tree (pseudo-dump): + + Switch + ID: myvar + Compound: + Case 10: + k = 10 + p = k + 1 + return 10 + Case 20: + Case 30: + return 20 + Default: + break + + The goal of this transform is to fix this mess, turning it into the + following: + + Switch + ID: myvar + Compound: + Case 10: + k = 10 + p = k + 1 + return 10 + Case 20: + Case 30: + return 20 + Default: + break + + A fixed AST node is returned. The argument may be modified. + """ + assert isinstance(switch_node, c_ast.Switch) + if not isinstance(switch_node.stmt, c_ast.Compound): + return switch_node + + # The new Compound child for the Switch, which will collect children in the + # correct order + new_compound = c_ast.Compound([], switch_node.stmt.coord) + + # The last Case/Default node + last_case = None + + # Goes over the children of the Compound below the Switch, adding them + # either directly below new_compound or below the last Case as appropriate + # (for `switch(cond) {}`, block_items would have been None) + for child in (switch_node.stmt.block_items or []): + if isinstance(child, (c_ast.Case, c_ast.Default)): + # If it's a Case/Default: + # 1. Add it to the Compound and mark as "last case" + # 2. If its immediate child is also a Case or Default, promote it + # to a sibling. + new_compound.block_items.append(child) + _extract_nested_case(child, new_compound.block_items) + last_case = new_compound.block_items[-1] + else: + # Other statements are added as children to the last case, if it + # exists. + if last_case is None: + new_compound.block_items.append(child) + else: + last_case.stmts.append(child) + + switch_node.stmt = new_compound + return switch_node + + +def _extract_nested_case(case_node, stmts_list): + """ Recursively extract consecutive Case statements that are made nested + by the parser and add them to the stmts_list. + """ + if isinstance(case_node.stmts[0], (c_ast.Case, c_ast.Default)): + stmts_list.append(case_node.stmts.pop()) + _extract_nested_case(stmts_list[-1], stmts_list) + + +def fix_atomic_specifiers(decl): + """ Atomic specifiers like _Atomic(type) are unusually structured, + conferring a qualifier upon the contained type. + + This function fixes a decl with atomic specifiers to have a sane AST + structure, by removing spurious Typename->TypeDecl pairs and attaching + the _Atomic qualifier in the right place. + """ + # There can be multiple levels of _Atomic in a decl; fix them until a + # fixed point is reached. + while True: + decl, found = _fix_atomic_specifiers_once(decl) + if not found: + break + + # Make sure to add an _Atomic qual on the topmost decl if needed. Also + # restore the declname on the innermost TypeDecl (it gets placed in the + # wrong place during construction). + typ = decl + while not isinstance(typ, c_ast.TypeDecl): + try: + typ = typ.type + except AttributeError: + return decl + if '_Atomic' in typ.quals and '_Atomic' not in decl.quals: + decl.quals.append('_Atomic') + if typ.declname is None: + typ.declname = decl.name + + return decl + + +def _fix_atomic_specifiers_once(decl): + """ Performs one 'fix' round of atomic specifiers. + Returns (modified_decl, found) where found is True iff a fix was made. + """ + parent = decl + grandparent = None + node = decl.type + while node is not None: + if isinstance(node, c_ast.Typename) and '_Atomic' in node.quals: + break + try: + grandparent = parent + parent = node + node = node.type + except AttributeError: + # If we've reached a node without a `type` field, it means we won't + # find what we're looking for at this point; give up the search + # and return the original decl unmodified. + return decl, False + + assert isinstance(parent, c_ast.TypeDecl) + grandparent.type = node.type + if '_Atomic' not in node.type.quals: + node.type.quals.append('_Atomic') + return decl, True diff --git a/python/pycparser/c_ast.py b/python/pycparser/c_ast.py new file mode 100644 index 000000000..6575a2ad3 --- /dev/null +++ b/python/pycparser/c_ast.py @@ -0,0 +1,1125 @@ +#----------------------------------------------------------------- +# ** ATTENTION ** +# This code was automatically generated from the file: +# _c_ast.cfg +# +# Do not modify it directly. Modify the configuration file and +# run the generator again. +# ** ** *** ** ** +# +# pycparser: c_ast.py +# +# AST Node classes. +# +# Eli Bendersky [https://eli.thegreenplace.net/] +# License: BSD +#----------------------------------------------------------------- + + +import sys + +def _repr(obj): + """ + Get the representation of an object, with dedicated pprint-like format for lists. + """ + if isinstance(obj, list): + return '[' + (',\n '.join((_repr(e).replace('\n', '\n ') for e in obj))) + '\n]' + else: + return repr(obj) + +class Node(object): + __slots__ = () + """ Abstract base class for AST nodes. + """ + def __repr__(self): + """ Generates a python representation of the current node + """ + result = self.__class__.__name__ + '(' + + indent = '' + separator = '' + for name in self.__slots__[:-2]: + result += separator + result += indent + result += name + '=' + (_repr(getattr(self, name)).replace('\n', '\n ' + (' ' * (len(name) + len(self.__class__.__name__))))) + + separator = ',' + indent = '\n ' + (' ' * len(self.__class__.__name__)) + + result += indent + ')' + + return result + + def children(self): + """ A sequence of all children that are Nodes + """ + pass + + def show(self, buf=sys.stdout, offset=0, attrnames=False, nodenames=False, showcoord=False, _my_node_name=None): + """ Pretty print the Node and all its attributes and + children (recursively) to a buffer. + + buf: + Open IO buffer into which the Node is printed. + + offset: + Initial offset (amount of leading spaces) + + attrnames: + True if you want to see the attribute names in + name=value pairs. False to only see the values. + + nodenames: + True if you want to see the actual node names + within their parents. + + showcoord: + Do you want the coordinates of each Node to be + displayed. + """ + lead = ' ' * offset + if nodenames and _my_node_name is not None: + buf.write(lead + self.__class__.__name__+ ' <' + _my_node_name + '>: ') + else: + buf.write(lead + self.__class__.__name__+ ': ') + + if self.attr_names: + if attrnames: + nvlist = [(n, getattr(self,n)) for n in self.attr_names] + attrstr = ', '.join('%s=%s' % nv for nv in nvlist) + else: + vlist = [getattr(self, n) for n in self.attr_names] + attrstr = ', '.join('%s' % v for v in vlist) + buf.write(attrstr) + + if showcoord: + buf.write(' (at %s)' % self.coord) + buf.write('\n') + + for (child_name, child) in self.children(): + child.show( + buf, + offset=offset + 2, + attrnames=attrnames, + nodenames=nodenames, + showcoord=showcoord, + _my_node_name=child_name) + + +class NodeVisitor(object): + """ A base NodeVisitor class for visiting c_ast nodes. + Subclass it and define your own visit_XXX methods, where + XXX is the class name you want to visit with these + methods. + + For example: + + class ConstantVisitor(NodeVisitor): + def __init__(self): + self.values = [] + + def visit_Constant(self, node): + self.values.append(node.value) + + Creates a list of values of all the constant nodes + encountered below the given node. To use it: + + cv = ConstantVisitor() + cv.visit(node) + + Notes: + + * generic_visit() will be called for AST nodes for which + no visit_XXX method was defined. + * The children of nodes for which a visit_XXX was + defined will not be visited - if you need this, call + generic_visit() on the node. + You can use: + NodeVisitor.generic_visit(self, node) + * Modeled after Python's own AST visiting facilities + (the ast module of Python 3.0) + """ + + _method_cache = None + + def visit(self, node): + """ Visit a node. + """ + + if self._method_cache is None: + self._method_cache = {} + + visitor = self._method_cache.get(node.__class__.__name__, None) + if visitor is None: + method = 'visit_' + node.__class__.__name__ + visitor = getattr(self, method, self.generic_visit) + self._method_cache[node.__class__.__name__] = visitor + + return visitor(node) + + def generic_visit(self, node): + """ Called if no explicit visitor function exists for a + node. Implements preorder visiting of the node. + """ + for c in node: + self.visit(c) + +class ArrayDecl(Node): + __slots__ = ('type', 'dim', 'dim_quals', 'coord', '__weakref__') + def __init__(self, type, dim, dim_quals, coord=None): + self.type = type + self.dim = dim + self.dim_quals = dim_quals + self.coord = coord + + def children(self): + nodelist = [] + if self.type is not None: nodelist.append(("type", self.type)) + if self.dim is not None: nodelist.append(("dim", self.dim)) + return tuple(nodelist) + + def __iter__(self): + if self.type is not None: + yield self.type + if self.dim is not None: + yield self.dim + + attr_names = ('dim_quals', ) + +class ArrayRef(Node): + __slots__ = ('name', 'subscript', 'coord', '__weakref__') + def __init__(self, name, subscript, coord=None): + self.name = name + self.subscript = subscript + self.coord = coord + + def children(self): + nodelist = [] + if self.name is not None: nodelist.append(("name", self.name)) + if self.subscript is not None: nodelist.append(("subscript", self.subscript)) + return tuple(nodelist) + + def __iter__(self): + if self.name is not None: + yield self.name + if self.subscript is not None: + yield self.subscript + + attr_names = () + +class Assignment(Node): + __slots__ = ('op', 'lvalue', 'rvalue', 'coord', '__weakref__') + def __init__(self, op, lvalue, rvalue, coord=None): + self.op = op + self.lvalue = lvalue + self.rvalue = rvalue + self.coord = coord + + def children(self): + nodelist = [] + if self.lvalue is not None: nodelist.append(("lvalue", self.lvalue)) + if self.rvalue is not None: nodelist.append(("rvalue", self.rvalue)) + return tuple(nodelist) + + def __iter__(self): + if self.lvalue is not None: + yield self.lvalue + if self.rvalue is not None: + yield self.rvalue + + attr_names = ('op', ) + +class Alignas(Node): + __slots__ = ('alignment', 'coord', '__weakref__') + def __init__(self, alignment, coord=None): + self.alignment = alignment + self.coord = coord + + def children(self): + nodelist = [] + if self.alignment is not None: nodelist.append(("alignment", self.alignment)) + return tuple(nodelist) + + def __iter__(self): + if self.alignment is not None: + yield self.alignment + + attr_names = () + +class BinaryOp(Node): + __slots__ = ('op', 'left', 'right', 'coord', '__weakref__') + def __init__(self, op, left, right, coord=None): + self.op = op + self.left = left + self.right = right + self.coord = coord + + def children(self): + nodelist = [] + if self.left is not None: nodelist.append(("left", self.left)) + if self.right is not None: nodelist.append(("right", self.right)) + return tuple(nodelist) + + def __iter__(self): + if self.left is not None: + yield self.left + if self.right is not None: + yield self.right + + attr_names = ('op', ) + +class Break(Node): + __slots__ = ('coord', '__weakref__') + def __init__(self, coord=None): + self.coord = coord + + def children(self): + return () + + def __iter__(self): + return + yield + + attr_names = () + +class Case(Node): + __slots__ = ('expr', 'stmts', 'coord', '__weakref__') + def __init__(self, expr, stmts, coord=None): + self.expr = expr + self.stmts = stmts + self.coord = coord + + def children(self): + nodelist = [] + if self.expr is not None: nodelist.append(("expr", self.expr)) + for i, child in enumerate(self.stmts or []): + nodelist.append(("stmts[%d]" % i, child)) + return tuple(nodelist) + + def __iter__(self): + if self.expr is not None: + yield self.expr + for child in (self.stmts or []): + yield child + + attr_names = () + +class Cast(Node): + __slots__ = ('to_type', 'expr', 'coord', '__weakref__') + def __init__(self, to_type, expr, coord=None): + self.to_type = to_type + self.expr = expr + self.coord = coord + + def children(self): + nodelist = [] + if self.to_type is not None: nodelist.append(("to_type", self.to_type)) + if self.expr is not None: nodelist.append(("expr", self.expr)) + return tuple(nodelist) + + def __iter__(self): + if self.to_type is not None: + yield self.to_type + if self.expr is not None: + yield self.expr + + attr_names = () + +class Compound(Node): + __slots__ = ('block_items', 'coord', '__weakref__') + def __init__(self, block_items, coord=None): + self.block_items = block_items + self.coord = coord + + def children(self): + nodelist = [] + for i, child in enumerate(self.block_items or []): + nodelist.append(("block_items[%d]" % i, child)) + return tuple(nodelist) + + def __iter__(self): + for child in (self.block_items or []): + yield child + + attr_names = () + +class CompoundLiteral(Node): + __slots__ = ('type', 'init', 'coord', '__weakref__') + def __init__(self, type, init, coord=None): + self.type = type + self.init = init + self.coord = coord + + def children(self): + nodelist = [] + if self.type is not None: nodelist.append(("type", self.type)) + if self.init is not None: nodelist.append(("init", self.init)) + return tuple(nodelist) + + def __iter__(self): + if self.type is not None: + yield self.type + if self.init is not None: + yield self.init + + attr_names = () + +class Constant(Node): + __slots__ = ('type', 'value', 'coord', '__weakref__') + def __init__(self, type, value, coord=None): + self.type = type + self.value = value + self.coord = coord + + def children(self): + nodelist = [] + return tuple(nodelist) + + def __iter__(self): + return + yield + + attr_names = ('type', 'value', ) + +class Continue(Node): + __slots__ = ('coord', '__weakref__') + def __init__(self, coord=None): + self.coord = coord + + def children(self): + return () + + def __iter__(self): + return + yield + + attr_names = () + +class Decl(Node): + __slots__ = ('name', 'quals', 'align', 'storage', 'funcspec', 'type', 'init', 'bitsize', 'coord', '__weakref__') + def __init__(self, name, quals, align, storage, funcspec, type, init, bitsize, coord=None): + self.name = name + self.quals = quals + self.align = align + self.storage = storage + self.funcspec = funcspec + self.type = type + self.init = init + self.bitsize = bitsize + self.coord = coord + + def children(self): + nodelist = [] + if self.type is not None: nodelist.append(("type", self.type)) + if self.init is not None: nodelist.append(("init", self.init)) + if self.bitsize is not None: nodelist.append(("bitsize", self.bitsize)) + return tuple(nodelist) + + def __iter__(self): + if self.type is not None: + yield self.type + if self.init is not None: + yield self.init + if self.bitsize is not None: + yield self.bitsize + + attr_names = ('name', 'quals', 'align', 'storage', 'funcspec', ) + +class DeclList(Node): + __slots__ = ('decls', 'coord', '__weakref__') + def __init__(self, decls, coord=None): + self.decls = decls + self.coord = coord + + def children(self): + nodelist = [] + for i, child in enumerate(self.decls or []): + nodelist.append(("decls[%d]" % i, child)) + return tuple(nodelist) + + def __iter__(self): + for child in (self.decls or []): + yield child + + attr_names = () + +class Default(Node): + __slots__ = ('stmts', 'coord', '__weakref__') + def __init__(self, stmts, coord=None): + self.stmts = stmts + self.coord = coord + + def children(self): + nodelist = [] + for i, child in enumerate(self.stmts or []): + nodelist.append(("stmts[%d]" % i, child)) + return tuple(nodelist) + + def __iter__(self): + for child in (self.stmts or []): + yield child + + attr_names = () + +class DoWhile(Node): + __slots__ = ('cond', 'stmt', 'coord', '__weakref__') + def __init__(self, cond, stmt, coord=None): + self.cond = cond + self.stmt = stmt + self.coord = coord + + def children(self): + nodelist = [] + if self.cond is not None: nodelist.append(("cond", self.cond)) + if self.stmt is not None: nodelist.append(("stmt", self.stmt)) + return tuple(nodelist) + + def __iter__(self): + if self.cond is not None: + yield self.cond + if self.stmt is not None: + yield self.stmt + + attr_names = () + +class EllipsisParam(Node): + __slots__ = ('coord', '__weakref__') + def __init__(self, coord=None): + self.coord = coord + + def children(self): + return () + + def __iter__(self): + return + yield + + attr_names = () + +class EmptyStatement(Node): + __slots__ = ('coord', '__weakref__') + def __init__(self, coord=None): + self.coord = coord + + def children(self): + return () + + def __iter__(self): + return + yield + + attr_names = () + +class Enum(Node): + __slots__ = ('name', 'values', 'coord', '__weakref__') + def __init__(self, name, values, coord=None): + self.name = name + self.values = values + self.coord = coord + + def children(self): + nodelist = [] + if self.values is not None: nodelist.append(("values", self.values)) + return tuple(nodelist) + + def __iter__(self): + if self.values is not None: + yield self.values + + attr_names = ('name', ) + +class Enumerator(Node): + __slots__ = ('name', 'value', 'coord', '__weakref__') + def __init__(self, name, value, coord=None): + self.name = name + self.value = value + self.coord = coord + + def children(self): + nodelist = [] + if self.value is not None: nodelist.append(("value", self.value)) + return tuple(nodelist) + + def __iter__(self): + if self.value is not None: + yield self.value + + attr_names = ('name', ) + +class EnumeratorList(Node): + __slots__ = ('enumerators', 'coord', '__weakref__') + def __init__(self, enumerators, coord=None): + self.enumerators = enumerators + self.coord = coord + + def children(self): + nodelist = [] + for i, child in enumerate(self.enumerators or []): + nodelist.append(("enumerators[%d]" % i, child)) + return tuple(nodelist) + + def __iter__(self): + for child in (self.enumerators or []): + yield child + + attr_names = () + +class ExprList(Node): + __slots__ = ('exprs', 'coord', '__weakref__') + def __init__(self, exprs, coord=None): + self.exprs = exprs + self.coord = coord + + def children(self): + nodelist = [] + for i, child in enumerate(self.exprs or []): + nodelist.append(("exprs[%d]" % i, child)) + return tuple(nodelist) + + def __iter__(self): + for child in (self.exprs or []): + yield child + + attr_names = () + +class FileAST(Node): + __slots__ = ('ext', 'coord', '__weakref__') + def __init__(self, ext, coord=None): + self.ext = ext + self.coord = coord + + def children(self): + nodelist = [] + for i, child in enumerate(self.ext or []): + nodelist.append(("ext[%d]" % i, child)) + return tuple(nodelist) + + def __iter__(self): + for child in (self.ext or []): + yield child + + attr_names = () + +class For(Node): + __slots__ = ('init', 'cond', 'next', 'stmt', 'coord', '__weakref__') + def __init__(self, init, cond, next, stmt, coord=None): + self.init = init + self.cond = cond + self.next = next + self.stmt = stmt + self.coord = coord + + def children(self): + nodelist = [] + if self.init is not None: nodelist.append(("init", self.init)) + if self.cond is not None: nodelist.append(("cond", self.cond)) + if self.next is not None: nodelist.append(("next", self.next)) + if self.stmt is not None: nodelist.append(("stmt", self.stmt)) + return tuple(nodelist) + + def __iter__(self): + if self.init is not None: + yield self.init + if self.cond is not None: + yield self.cond + if self.next is not None: + yield self.next + if self.stmt is not None: + yield self.stmt + + attr_names = () + +class FuncCall(Node): + __slots__ = ('name', 'args', 'coord', '__weakref__') + def __init__(self, name, args, coord=None): + self.name = name + self.args = args + self.coord = coord + + def children(self): + nodelist = [] + if self.name is not None: nodelist.append(("name", self.name)) + if self.args is not None: nodelist.append(("args", self.args)) + return tuple(nodelist) + + def __iter__(self): + if self.name is not None: + yield self.name + if self.args is not None: + yield self.args + + attr_names = () + +class FuncDecl(Node): + __slots__ = ('args', 'type', 'coord', '__weakref__') + def __init__(self, args, type, coord=None): + self.args = args + self.type = type + self.coord = coord + + def children(self): + nodelist = [] + if self.args is not None: nodelist.append(("args", self.args)) + if self.type is not None: nodelist.append(("type", self.type)) + return tuple(nodelist) + + def __iter__(self): + if self.args is not None: + yield self.args + if self.type is not None: + yield self.type + + attr_names = () + +class FuncDef(Node): + __slots__ = ('decl', 'param_decls', 'body', 'coord', '__weakref__') + def __init__(self, decl, param_decls, body, coord=None): + self.decl = decl + self.param_decls = param_decls + self.body = body + self.coord = coord + + def children(self): + nodelist = [] + if self.decl is not None: nodelist.append(("decl", self.decl)) + if self.body is not None: nodelist.append(("body", self.body)) + for i, child in enumerate(self.param_decls or []): + nodelist.append(("param_decls[%d]" % i, child)) + return tuple(nodelist) + + def __iter__(self): + if self.decl is not None: + yield self.decl + if self.body is not None: + yield self.body + for child in (self.param_decls or []): + yield child + + attr_names = () + +class Goto(Node): + __slots__ = ('name', 'coord', '__weakref__') + def __init__(self, name, coord=None): + self.name = name + self.coord = coord + + def children(self): + nodelist = [] + return tuple(nodelist) + + def __iter__(self): + return + yield + + attr_names = ('name', ) + +class ID(Node): + __slots__ = ('name', 'coord', '__weakref__') + def __init__(self, name, coord=None): + self.name = name + self.coord = coord + + def children(self): + nodelist = [] + return tuple(nodelist) + + def __iter__(self): + return + yield + + attr_names = ('name', ) + +class IdentifierType(Node): + __slots__ = ('names', 'coord', '__weakref__') + def __init__(self, names, coord=None): + self.names = names + self.coord = coord + + def children(self): + nodelist = [] + return tuple(nodelist) + + def __iter__(self): + return + yield + + attr_names = ('names', ) + +class If(Node): + __slots__ = ('cond', 'iftrue', 'iffalse', 'coord', '__weakref__') + def __init__(self, cond, iftrue, iffalse, coord=None): + self.cond = cond + self.iftrue = iftrue + self.iffalse = iffalse + self.coord = coord + + def children(self): + nodelist = [] + if self.cond is not None: nodelist.append(("cond", self.cond)) + if self.iftrue is not None: nodelist.append(("iftrue", self.iftrue)) + if self.iffalse is not None: nodelist.append(("iffalse", self.iffalse)) + return tuple(nodelist) + + def __iter__(self): + if self.cond is not None: + yield self.cond + if self.iftrue is not None: + yield self.iftrue + if self.iffalse is not None: + yield self.iffalse + + attr_names = () + +class InitList(Node): + __slots__ = ('exprs', 'coord', '__weakref__') + def __init__(self, exprs, coord=None): + self.exprs = exprs + self.coord = coord + + def children(self): + nodelist = [] + for i, child in enumerate(self.exprs or []): + nodelist.append(("exprs[%d]" % i, child)) + return tuple(nodelist) + + def __iter__(self): + for child in (self.exprs or []): + yield child + + attr_names = () + +class Label(Node): + __slots__ = ('name', 'stmt', 'coord', '__weakref__') + def __init__(self, name, stmt, coord=None): + self.name = name + self.stmt = stmt + self.coord = coord + + def children(self): + nodelist = [] + if self.stmt is not None: nodelist.append(("stmt", self.stmt)) + return tuple(nodelist) + + def __iter__(self): + if self.stmt is not None: + yield self.stmt + + attr_names = ('name', ) + +class NamedInitializer(Node): + __slots__ = ('name', 'expr', 'coord', '__weakref__') + def __init__(self, name, expr, coord=None): + self.name = name + self.expr = expr + self.coord = coord + + def children(self): + nodelist = [] + if self.expr is not None: nodelist.append(("expr", self.expr)) + for i, child in enumerate(self.name or []): + nodelist.append(("name[%d]" % i, child)) + return tuple(nodelist) + + def __iter__(self): + if self.expr is not None: + yield self.expr + for child in (self.name or []): + yield child + + attr_names = () + +class ParamList(Node): + __slots__ = ('params', 'coord', '__weakref__') + def __init__(self, params, coord=None): + self.params = params + self.coord = coord + + def children(self): + nodelist = [] + for i, child in enumerate(self.params or []): + nodelist.append(("params[%d]" % i, child)) + return tuple(nodelist) + + def __iter__(self): + for child in (self.params or []): + yield child + + attr_names = () + +class PtrDecl(Node): + __slots__ = ('quals', 'type', 'coord', '__weakref__') + def __init__(self, quals, type, coord=None): + self.quals = quals + self.type = type + self.coord = coord + + def children(self): + nodelist = [] + if self.type is not None: nodelist.append(("type", self.type)) + return tuple(nodelist) + + def __iter__(self): + if self.type is not None: + yield self.type + + attr_names = ('quals', ) + +class Return(Node): + __slots__ = ('expr', 'coord', '__weakref__') + def __init__(self, expr, coord=None): + self.expr = expr + self.coord = coord + + def children(self): + nodelist = [] + if self.expr is not None: nodelist.append(("expr", self.expr)) + return tuple(nodelist) + + def __iter__(self): + if self.expr is not None: + yield self.expr + + attr_names = () + +class StaticAssert(Node): + __slots__ = ('cond', 'message', 'coord', '__weakref__') + def __init__(self, cond, message, coord=None): + self.cond = cond + self.message = message + self.coord = coord + + def children(self): + nodelist = [] + if self.cond is not None: nodelist.append(("cond", self.cond)) + if self.message is not None: nodelist.append(("message", self.message)) + return tuple(nodelist) + + def __iter__(self): + if self.cond is not None: + yield self.cond + if self.message is not None: + yield self.message + + attr_names = () + +class Struct(Node): + __slots__ = ('name', 'decls', 'coord', '__weakref__') + def __init__(self, name, decls, coord=None): + self.name = name + self.decls = decls + self.coord = coord + + def children(self): + nodelist = [] + for i, child in enumerate(self.decls or []): + nodelist.append(("decls[%d]" % i, child)) + return tuple(nodelist) + + def __iter__(self): + for child in (self.decls or []): + yield child + + attr_names = ('name', ) + +class StructRef(Node): + __slots__ = ('name', 'type', 'field', 'coord', '__weakref__') + def __init__(self, name, type, field, coord=None): + self.name = name + self.type = type + self.field = field + self.coord = coord + + def children(self): + nodelist = [] + if self.name is not None: nodelist.append(("name", self.name)) + if self.field is not None: nodelist.append(("field", self.field)) + return tuple(nodelist) + + def __iter__(self): + if self.name is not None: + yield self.name + if self.field is not None: + yield self.field + + attr_names = ('type', ) + +class Switch(Node): + __slots__ = ('cond', 'stmt', 'coord', '__weakref__') + def __init__(self, cond, stmt, coord=None): + self.cond = cond + self.stmt = stmt + self.coord = coord + + def children(self): + nodelist = [] + if self.cond is not None: nodelist.append(("cond", self.cond)) + if self.stmt is not None: nodelist.append(("stmt", self.stmt)) + return tuple(nodelist) + + def __iter__(self): + if self.cond is not None: + yield self.cond + if self.stmt is not None: + yield self.stmt + + attr_names = () + +class TernaryOp(Node): + __slots__ = ('cond', 'iftrue', 'iffalse', 'coord', '__weakref__') + def __init__(self, cond, iftrue, iffalse, coord=None): + self.cond = cond + self.iftrue = iftrue + self.iffalse = iffalse + self.coord = coord + + def children(self): + nodelist = [] + if self.cond is not None: nodelist.append(("cond", self.cond)) + if self.iftrue is not None: nodelist.append(("iftrue", self.iftrue)) + if self.iffalse is not None: nodelist.append(("iffalse", self.iffalse)) + return tuple(nodelist) + + def __iter__(self): + if self.cond is not None: + yield self.cond + if self.iftrue is not None: + yield self.iftrue + if self.iffalse is not None: + yield self.iffalse + + attr_names = () + +class TypeDecl(Node): + __slots__ = ('declname', 'quals', 'align', 'type', 'coord', '__weakref__') + def __init__(self, declname, quals, align, type, coord=None): + self.declname = declname + self.quals = quals + self.align = align + self.type = type + self.coord = coord + + def children(self): + nodelist = [] + if self.type is not None: nodelist.append(("type", self.type)) + return tuple(nodelist) + + def __iter__(self): + if self.type is not None: + yield self.type + + attr_names = ('declname', 'quals', 'align', ) + +class Typedef(Node): + __slots__ = ('name', 'quals', 'storage', 'type', 'coord', '__weakref__') + def __init__(self, name, quals, storage, type, coord=None): + self.name = name + self.quals = quals + self.storage = storage + self.type = type + self.coord = coord + + def children(self): + nodelist = [] + if self.type is not None: nodelist.append(("type", self.type)) + return tuple(nodelist) + + def __iter__(self): + if self.type is not None: + yield self.type + + attr_names = ('name', 'quals', 'storage', ) + +class Typename(Node): + __slots__ = ('name', 'quals', 'align', 'type', 'coord', '__weakref__') + def __init__(self, name, quals, align, type, coord=None): + self.name = name + self.quals = quals + self.align = align + self.type = type + self.coord = coord + + def children(self): + nodelist = [] + if self.type is not None: nodelist.append(("type", self.type)) + return tuple(nodelist) + + def __iter__(self): + if self.type is not None: + yield self.type + + attr_names = ('name', 'quals', 'align', ) + +class UnaryOp(Node): + __slots__ = ('op', 'expr', 'coord', '__weakref__') + def __init__(self, op, expr, coord=None): + self.op = op + self.expr = expr + self.coord = coord + + def children(self): + nodelist = [] + if self.expr is not None: nodelist.append(("expr", self.expr)) + return tuple(nodelist) + + def __iter__(self): + if self.expr is not None: + yield self.expr + + attr_names = ('op', ) + +class Union(Node): + __slots__ = ('name', 'decls', 'coord', '__weakref__') + def __init__(self, name, decls, coord=None): + self.name = name + self.decls = decls + self.coord = coord + + def children(self): + nodelist = [] + for i, child in enumerate(self.decls or []): + nodelist.append(("decls[%d]" % i, child)) + return tuple(nodelist) + + def __iter__(self): + for child in (self.decls or []): + yield child + + attr_names = ('name', ) + +class While(Node): + __slots__ = ('cond', 'stmt', 'coord', '__weakref__') + def __init__(self, cond, stmt, coord=None): + self.cond = cond + self.stmt = stmt + self.coord = coord + + def children(self): + nodelist = [] + if self.cond is not None: nodelist.append(("cond", self.cond)) + if self.stmt is not None: nodelist.append(("stmt", self.stmt)) + return tuple(nodelist) + + def __iter__(self): + if self.cond is not None: + yield self.cond + if self.stmt is not None: + yield self.stmt + + attr_names = () + +class Pragma(Node): + __slots__ = ('string', 'coord', '__weakref__') + def __init__(self, string, coord=None): + self.string = string + self.coord = coord + + def children(self): + nodelist = [] + return tuple(nodelist) + + def __iter__(self): + return + yield + + attr_names = ('string', ) + diff --git a/python/pycparser/c_generator.py b/python/pycparser/c_generator.py new file mode 100644 index 000000000..f8ab0a47a --- /dev/null +++ b/python/pycparser/c_generator.py @@ -0,0 +1,502 @@ +#------------------------------------------------------------------------------ +# pycparser: c_generator.py +# +# C code generator from pycparser AST nodes. +# +# Eli Bendersky [https://eli.thegreenplace.net/] +# License: BSD +#------------------------------------------------------------------------------ +from . import c_ast + + +class CGenerator(object): + """ Uses the same visitor pattern as c_ast.NodeVisitor, but modified to + return a value from each visit method, using string accumulation in + generic_visit. + """ + def __init__(self, reduce_parentheses=False): + """ Constructs C-code generator + + reduce_parentheses: + if True, eliminates needless parentheses on binary operators + """ + # Statements start with indentation of self.indent_level spaces, using + # the _make_indent method. + self.indent_level = 0 + self.reduce_parentheses = reduce_parentheses + + def _make_indent(self): + return ' ' * self.indent_level + + def visit(self, node): + method = 'visit_' + node.__class__.__name__ + return getattr(self, method, self.generic_visit)(node) + + def generic_visit(self, node): + if node is None: + return '' + else: + return ''.join(self.visit(c) for c_name, c in node.children()) + + def visit_Constant(self, n): + return n.value + + def visit_ID(self, n): + return n.name + + def visit_Pragma(self, n): + ret = '#pragma' + if n.string: + ret += ' ' + n.string + return ret + + def visit_ArrayRef(self, n): + arrref = self._parenthesize_unless_simple(n.name) + return arrref + '[' + self.visit(n.subscript) + ']' + + def visit_StructRef(self, n): + sref = self._parenthesize_unless_simple(n.name) + return sref + n.type + self.visit(n.field) + + def visit_FuncCall(self, n): + fref = self._parenthesize_unless_simple(n.name) + return fref + '(' + self.visit(n.args) + ')' + + def visit_UnaryOp(self, n): + if n.op == 'sizeof': + # Always parenthesize the argument of sizeof since it can be + # a name. + return 'sizeof(%s)' % self.visit(n.expr) + else: + operand = self._parenthesize_unless_simple(n.expr) + if n.op == 'p++': + return '%s++' % operand + elif n.op == 'p--': + return '%s--' % operand + else: + return '%s%s' % (n.op, operand) + + # Precedence map of binary operators: + precedence_map = { + # Should be in sync with c_parser.CParser.precedence + # Higher numbers are stronger binding + '||': 0, # weakest binding + '&&': 1, + '|': 2, + '^': 3, + '&': 4, + '==': 5, '!=': 5, + '>': 6, '>=': 6, '<': 6, '<=': 6, + '>>': 7, '<<': 7, + '+': 8, '-': 8, + '*': 9, '/': 9, '%': 9 # strongest binding + } + + def visit_BinaryOp(self, n): + # Note: all binary operators are left-to-right associative + # + # If `n.left.op` has a stronger or equally binding precedence in + # comparison to `n.op`, no parenthesis are needed for the left: + # e.g., `(a*b) + c` is equivalent to `a*b + c`, as well as + # `(a+b) - c` is equivalent to `a+b - c` (same precedence). + # If the left operator is weaker binding than the current, then + # parentheses are necessary: + # e.g., `(a+b) * c` is NOT equivalent to `a+b * c`. + lval_str = self._parenthesize_if( + n.left, + lambda d: not (self._is_simple_node(d) or + self.reduce_parentheses and isinstance(d, c_ast.BinaryOp) and + self.precedence_map[d.op] >= self.precedence_map[n.op])) + # If `n.right.op` has a stronger -but not equal- binding precedence, + # parenthesis can be omitted on the right: + # e.g., `a + (b*c)` is equivalent to `a + b*c`. + # If the right operator is weaker or equally binding, then parentheses + # are necessary: + # e.g., `a * (b+c)` is NOT equivalent to `a * b+c` and + # `a - (b+c)` is NOT equivalent to `a - b+c` (same precedence). + rval_str = self._parenthesize_if( + n.right, + lambda d: not (self._is_simple_node(d) or + self.reduce_parentheses and isinstance(d, c_ast.BinaryOp) and + self.precedence_map[d.op] > self.precedence_map[n.op])) + return '%s %s %s' % (lval_str, n.op, rval_str) + + def visit_Assignment(self, n): + rval_str = self._parenthesize_if( + n.rvalue, + lambda n: isinstance(n, c_ast.Assignment)) + return '%s %s %s' % (self.visit(n.lvalue), n.op, rval_str) + + def visit_IdentifierType(self, n): + return ' '.join(n.names) + + def _visit_expr(self, n): + if isinstance(n, c_ast.InitList): + return '{' + self.visit(n) + '}' + elif isinstance(n, (c_ast.ExprList, c_ast.Compound)): + return '(' + self.visit(n) + ')' + else: + return self.visit(n) + + def visit_Decl(self, n, no_type=False): + # no_type is used when a Decl is part of a DeclList, where the type is + # explicitly only for the first declaration in a list. + # + s = n.name if no_type else self._generate_decl(n) + if n.bitsize: s += ' : ' + self.visit(n.bitsize) + if n.init: + s += ' = ' + self._visit_expr(n.init) + return s + + def visit_DeclList(self, n): + s = self.visit(n.decls[0]) + if len(n.decls) > 1: + s += ', ' + ', '.join(self.visit_Decl(decl, no_type=True) + for decl in n.decls[1:]) + return s + + def visit_Typedef(self, n): + s = '' + if n.storage: s += ' '.join(n.storage) + ' ' + s += self._generate_type(n.type) + return s + + def visit_Cast(self, n): + s = '(' + self._generate_type(n.to_type, emit_declname=False) + ')' + return s + ' ' + self._parenthesize_unless_simple(n.expr) + + def visit_ExprList(self, n): + visited_subexprs = [] + for expr in n.exprs: + visited_subexprs.append(self._visit_expr(expr)) + return ', '.join(visited_subexprs) + + def visit_InitList(self, n): + visited_subexprs = [] + for expr in n.exprs: + visited_subexprs.append(self._visit_expr(expr)) + return ', '.join(visited_subexprs) + + def visit_Enum(self, n): + return self._generate_struct_union_enum(n, name='enum') + + def visit_Alignas(self, n): + return '_Alignas({})'.format(self.visit(n.alignment)) + + def visit_Enumerator(self, n): + if not n.value: + return '{indent}{name},\n'.format( + indent=self._make_indent(), + name=n.name, + ) + else: + return '{indent}{name} = {value},\n'.format( + indent=self._make_indent(), + name=n.name, + value=self.visit(n.value), + ) + + def visit_FuncDef(self, n): + decl = self.visit(n.decl) + self.indent_level = 0 + body = self.visit(n.body) + if n.param_decls: + knrdecls = ';\n'.join(self.visit(p) for p in n.param_decls) + return decl + '\n' + knrdecls + ';\n' + body + '\n' + else: + return decl + '\n' + body + '\n' + + def visit_FileAST(self, n): + s = '' + for ext in n.ext: + if isinstance(ext, c_ast.FuncDef): + s += self.visit(ext) + elif isinstance(ext, c_ast.Pragma): + s += self.visit(ext) + '\n' + else: + s += self.visit(ext) + ';\n' + return s + + def visit_Compound(self, n): + s = self._make_indent() + '{\n' + self.indent_level += 2 + if n.block_items: + s += ''.join(self._generate_stmt(stmt) for stmt in n.block_items) + self.indent_level -= 2 + s += self._make_indent() + '}\n' + return s + + def visit_CompoundLiteral(self, n): + return '(' + self.visit(n.type) + '){' + self.visit(n.init) + '}' + + + def visit_EmptyStatement(self, n): + return ';' + + def visit_ParamList(self, n): + return ', '.join(self.visit(param) for param in n.params) + + def visit_Return(self, n): + s = 'return' + if n.expr: s += ' ' + self.visit(n.expr) + return s + ';' + + def visit_Break(self, n): + return 'break;' + + def visit_Continue(self, n): + return 'continue;' + + def visit_TernaryOp(self, n): + s = '(' + self._visit_expr(n.cond) + ') ? ' + s += '(' + self._visit_expr(n.iftrue) + ') : ' + s += '(' + self._visit_expr(n.iffalse) + ')' + return s + + def visit_If(self, n): + s = 'if (' + if n.cond: s += self.visit(n.cond) + s += ')\n' + s += self._generate_stmt(n.iftrue, add_indent=True) + if n.iffalse: + s += self._make_indent() + 'else\n' + s += self._generate_stmt(n.iffalse, add_indent=True) + return s + + def visit_For(self, n): + s = 'for (' + if n.init: s += self.visit(n.init) + s += ';' + if n.cond: s += ' ' + self.visit(n.cond) + s += ';' + if n.next: s += ' ' + self.visit(n.next) + s += ')\n' + s += self._generate_stmt(n.stmt, add_indent=True) + return s + + def visit_While(self, n): + s = 'while (' + if n.cond: s += self.visit(n.cond) + s += ')\n' + s += self._generate_stmt(n.stmt, add_indent=True) + return s + + def visit_DoWhile(self, n): + s = 'do\n' + s += self._generate_stmt(n.stmt, add_indent=True) + s += self._make_indent() + 'while (' + if n.cond: s += self.visit(n.cond) + s += ');' + return s + + def visit_StaticAssert(self, n): + s = '_Static_assert(' + s += self.visit(n.cond) + if n.message: + s += ',' + s += self.visit(n.message) + s += ')' + return s + + def visit_Switch(self, n): + s = 'switch (' + self.visit(n.cond) + ')\n' + s += self._generate_stmt(n.stmt, add_indent=True) + return s + + def visit_Case(self, n): + s = 'case ' + self.visit(n.expr) + ':\n' + for stmt in n.stmts: + s += self._generate_stmt(stmt, add_indent=True) + return s + + def visit_Default(self, n): + s = 'default:\n' + for stmt in n.stmts: + s += self._generate_stmt(stmt, add_indent=True) + return s + + def visit_Label(self, n): + return n.name + ':\n' + self._generate_stmt(n.stmt) + + def visit_Goto(self, n): + return 'goto ' + n.name + ';' + + def visit_EllipsisParam(self, n): + return '...' + + def visit_Struct(self, n): + return self._generate_struct_union_enum(n, 'struct') + + def visit_Typename(self, n): + return self._generate_type(n.type) + + def visit_Union(self, n): + return self._generate_struct_union_enum(n, 'union') + + def visit_NamedInitializer(self, n): + s = '' + for name in n.name: + if isinstance(name, c_ast.ID): + s += '.' + name.name + else: + s += '[' + self.visit(name) + ']' + s += ' = ' + self._visit_expr(n.expr) + return s + + def visit_FuncDecl(self, n): + return self._generate_type(n) + + def visit_ArrayDecl(self, n): + return self._generate_type(n, emit_declname=False) + + def visit_TypeDecl(self, n): + return self._generate_type(n, emit_declname=False) + + def visit_PtrDecl(self, n): + return self._generate_type(n, emit_declname=False) + + def _generate_struct_union_enum(self, n, name): + """ Generates code for structs, unions, and enums. name should be + 'struct', 'union', or 'enum'. + """ + if name in ('struct', 'union'): + members = n.decls + body_function = self._generate_struct_union_body + else: + assert name == 'enum' + members = None if n.values is None else n.values.enumerators + body_function = self._generate_enum_body + s = name + ' ' + (n.name or '') + if members is not None: + # None means no members + # Empty sequence means an empty list of members + s += '\n' + s += self._make_indent() + self.indent_level += 2 + s += '{\n' + s += body_function(members) + self.indent_level -= 2 + s += self._make_indent() + '}' + return s + + def _generate_struct_union_body(self, members): + return ''.join(self._generate_stmt(decl) for decl in members) + + def _generate_enum_body(self, members): + # `[:-2] + '\n'` removes the final `,` from the enumerator list + return ''.join(self.visit(value) for value in members)[:-2] + '\n' + + def _generate_stmt(self, n, add_indent=False): + """ Generation from a statement node. This method exists as a wrapper + for individual visit_* methods to handle different treatment of + some statements in this context. + """ + typ = type(n) + if add_indent: self.indent_level += 2 + indent = self._make_indent() + if add_indent: self.indent_level -= 2 + + if typ in ( + c_ast.Decl, c_ast.Assignment, c_ast.Cast, c_ast.UnaryOp, + c_ast.BinaryOp, c_ast.TernaryOp, c_ast.FuncCall, c_ast.ArrayRef, + c_ast.StructRef, c_ast.Constant, c_ast.ID, c_ast.Typedef, + c_ast.ExprList): + # These can also appear in an expression context so no semicolon + # is added to them automatically + # + return indent + self.visit(n) + ';\n' + elif typ in (c_ast.Compound,): + # No extra indentation required before the opening brace of a + # compound - because it consists of multiple lines it has to + # compute its own indentation. + # + return self.visit(n) + elif typ in (c_ast.If,): + return indent + self.visit(n) + else: + return indent + self.visit(n) + '\n' + + def _generate_decl(self, n): + """ Generation from a Decl node. + """ + s = '' + if n.funcspec: s = ' '.join(n.funcspec) + ' ' + if n.storage: s += ' '.join(n.storage) + ' ' + if n.align: s += self.visit(n.align[0]) + ' ' + s += self._generate_type(n.type) + return s + + def _generate_type(self, n, modifiers=[], emit_declname = True): + """ Recursive generation from a type node. n is the type node. + modifiers collects the PtrDecl, ArrayDecl and FuncDecl modifiers + encountered on the way down to a TypeDecl, to allow proper + generation from it. + """ + typ = type(n) + #~ print(n, modifiers) + + if typ == c_ast.TypeDecl: + s = '' + if n.quals: s += ' '.join(n.quals) + ' ' + s += self.visit(n.type) + + nstr = n.declname if n.declname and emit_declname else '' + # Resolve modifiers. + # Wrap in parens to distinguish pointer to array and pointer to + # function syntax. + # + for i, modifier in enumerate(modifiers): + if isinstance(modifier, c_ast.ArrayDecl): + if (i != 0 and + isinstance(modifiers[i - 1], c_ast.PtrDecl)): + nstr = '(' + nstr + ')' + nstr += '[' + if modifier.dim_quals: + nstr += ' '.join(modifier.dim_quals) + ' ' + nstr += self.visit(modifier.dim) + ']' + elif isinstance(modifier, c_ast.FuncDecl): + if (i != 0 and + isinstance(modifiers[i - 1], c_ast.PtrDecl)): + nstr = '(' + nstr + ')' + nstr += '(' + self.visit(modifier.args) + ')' + elif isinstance(modifier, c_ast.PtrDecl): + if modifier.quals: + nstr = '* %s%s' % (' '.join(modifier.quals), + ' ' + nstr if nstr else '') + else: + nstr = '*' + nstr + if nstr: s += ' ' + nstr + return s + elif typ == c_ast.Decl: + return self._generate_decl(n.type) + elif typ == c_ast.Typename: + return self._generate_type(n.type, emit_declname = emit_declname) + elif typ == c_ast.IdentifierType: + return ' '.join(n.names) + ' ' + elif typ in (c_ast.ArrayDecl, c_ast.PtrDecl, c_ast.FuncDecl): + return self._generate_type(n.type, modifiers + [n], + emit_declname = emit_declname) + else: + return self.visit(n) + + def _parenthesize_if(self, n, condition): + """ Visits 'n' and returns its string representation, parenthesized + if the condition function applied to the node returns True. + """ + s = self._visit_expr(n) + if condition(n): + return '(' + s + ')' + else: + return s + + def _parenthesize_unless_simple(self, n): + """ Common use case for _parenthesize_if + """ + return self._parenthesize_if(n, lambda d: not self._is_simple_node(d)) + + def _is_simple_node(self, n): + """ Returns True for nodes that are "simple" - i.e. nodes that always + have higher precedence than operators. + """ + return isinstance(n, (c_ast.Constant, c_ast.ID, c_ast.ArrayRef, + c_ast.StructRef, c_ast.FuncCall)) diff --git a/python/pycparser/c_lexer.py b/python/pycparser/c_lexer.py new file mode 100644 index 000000000..135826d17 --- /dev/null +++ b/python/pycparser/c_lexer.py @@ -0,0 +1,569 @@ +#------------------------------------------------------------------------------ +# pycparser: c_lexer.py +# +# CLexer class: lexer for the C language +# +# Eli Bendersky [https://eli.thegreenplace.net/] +# License: BSD +#------------------------------------------------------------------------------ +import re + +from .ply import lex +from .ply.lex import TOKEN + + +class CLexer(object): + """ A lexer for the C language. After building it, set the + input text with input(), and call token() to get new + tokens. + + The public attribute filename can be set to an initial + filename, but the lexer will update it upon #line + directives. + """ + def __init__(self, error_func, on_lbrace_func, on_rbrace_func, + type_lookup_func): + """ Create a new Lexer. + + error_func: + An error function. Will be called with an error + message, line and column as arguments, in case of + an error during lexing. + + on_lbrace_func, on_rbrace_func: + Called when an LBRACE or RBRACE is encountered + (likely to push/pop type_lookup_func's scope) + + type_lookup_func: + A type lookup function. Given a string, it must + return True IFF this string is a name of a type + that was defined with a typedef earlier. + """ + self.error_func = error_func + self.on_lbrace_func = on_lbrace_func + self.on_rbrace_func = on_rbrace_func + self.type_lookup_func = type_lookup_func + self.filename = '' + + # Keeps track of the last token returned from self.token() + self.last_token = None + + # Allow either "# line" or "# " to support GCC's + # cpp output + # + self.line_pattern = re.compile(r'([ \t]*line\W)|([ \t]*\d+)') + self.pragma_pattern = re.compile(r'[ \t]*pragma\W') + + def build(self, **kwargs): + """ Builds the lexer from the specification. Must be + called after the lexer object is created. + + This method exists separately, because the PLY + manual warns against calling lex.lex inside + __init__ + """ + self.lexer = lex.lex(object=self, **kwargs) + + def reset_lineno(self): + """ Resets the internal line number counter of the lexer. + """ + self.lexer.lineno = 1 + + def input(self, text): + self.lexer.input(text) + + def token(self): + self.last_token = self.lexer.token() + return self.last_token + + def find_tok_column(self, token): + """ Find the column of the token in its line. + """ + last_cr = self.lexer.lexdata.rfind('\n', 0, token.lexpos) + return token.lexpos - last_cr + + ######################-- PRIVATE --###################### + + ## + ## Internal auxiliary methods + ## + def _error(self, msg, token): + location = self._make_tok_location(token) + self.error_func(msg, location[0], location[1]) + self.lexer.skip(1) + + def _make_tok_location(self, token): + return (token.lineno, self.find_tok_column(token)) + + ## + ## Reserved keywords + ## + keywords = ( + 'AUTO', 'BREAK', 'CASE', 'CHAR', 'CONST', + 'CONTINUE', 'DEFAULT', 'DO', 'DOUBLE', 'ELSE', 'ENUM', 'EXTERN', + 'FLOAT', 'FOR', 'GOTO', 'IF', 'INLINE', 'INT', 'LONG', + 'REGISTER', 'OFFSETOF', + 'RESTRICT', 'RETURN', 'SHORT', 'SIGNED', 'SIZEOF', 'STATIC', 'STRUCT', + 'SWITCH', 'TYPEDEF', 'UNION', 'UNSIGNED', 'VOID', + 'VOLATILE', 'WHILE', '__INT128', + ) + + keywords_new = ( + '_BOOL', '_COMPLEX', + '_NORETURN', '_THREAD_LOCAL', '_STATIC_ASSERT', + '_ATOMIC', '_ALIGNOF', '_ALIGNAS', + '_PRAGMA', + ) + + keyword_map = {} + + for keyword in keywords: + keyword_map[keyword.lower()] = keyword + + for keyword in keywords_new: + keyword_map[keyword[:2].upper() + keyword[2:].lower()] = keyword + + ## + ## All the tokens recognized by the lexer + ## + tokens = keywords + keywords_new + ( + # Identifiers + 'ID', + + # Type identifiers (identifiers previously defined as + # types with typedef) + 'TYPEID', + + # constants + 'INT_CONST_DEC', 'INT_CONST_OCT', 'INT_CONST_HEX', 'INT_CONST_BIN', 'INT_CONST_CHAR', + 'FLOAT_CONST', 'HEX_FLOAT_CONST', + 'CHAR_CONST', + 'WCHAR_CONST', + 'U8CHAR_CONST', + 'U16CHAR_CONST', + 'U32CHAR_CONST', + + # String literals + 'STRING_LITERAL', + 'WSTRING_LITERAL', + 'U8STRING_LITERAL', + 'U16STRING_LITERAL', + 'U32STRING_LITERAL', + + # Operators + 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MOD', + 'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT', + 'LOR', 'LAND', 'LNOT', + 'LT', 'LE', 'GT', 'GE', 'EQ', 'NE', + + # Assignment + 'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', + 'PLUSEQUAL', 'MINUSEQUAL', + 'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', + 'OREQUAL', + + # Increment/decrement + 'PLUSPLUS', 'MINUSMINUS', + + # Structure dereference (->) + 'ARROW', + + # Conditional operator (?) + 'CONDOP', + + # Delimiters + 'LPAREN', 'RPAREN', # ( ) + 'LBRACKET', 'RBRACKET', # [ ] + 'LBRACE', 'RBRACE', # { } + 'COMMA', 'PERIOD', # . , + 'SEMI', 'COLON', # ; : + + # Ellipsis (...) + 'ELLIPSIS', + + # pre-processor + 'PPHASH', # '#' + 'PPPRAGMA', # 'pragma' + 'PPPRAGMASTR', + ) + + ## + ## Regexes for use in tokens + ## + ## + + # valid C identifiers (K&R2: A.2.3), plus '$' (supported by some compilers) + identifier = r'[a-zA-Z_$][0-9a-zA-Z_$]*' + + hex_prefix = '0[xX]' + hex_digits = '[0-9a-fA-F]+' + bin_prefix = '0[bB]' + bin_digits = '[01]+' + + # integer constants (K&R2: A.2.5.1) + integer_suffix_opt = r'(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?' + decimal_constant = '(0'+integer_suffix_opt+')|([1-9][0-9]*'+integer_suffix_opt+')' + octal_constant = '0[0-7]*'+integer_suffix_opt + hex_constant = hex_prefix+hex_digits+integer_suffix_opt + bin_constant = bin_prefix+bin_digits+integer_suffix_opt + + bad_octal_constant = '0[0-7]*[89]' + + # comments are not supported + unsupported_c_style_comment = r'\/\*' + unsupported_cxx_style_comment = r'\/\/' + + # character constants (K&R2: A.2.5.2) + # Note: a-zA-Z and '.-~^_!=&;,' are allowed as escape chars to support #line + # directives with Windows paths as filenames (..\..\dir\file) + # For the same reason, decimal_escape allows all digit sequences. We want to + # parse all correct code, even if it means to sometimes parse incorrect + # code. + # + # The original regexes were taken verbatim from the C syntax definition, + # and were later modified to avoid worst-case exponential running time. + # + # simple_escape = r"""([a-zA-Z._~!=&\^\-\\?'"])""" + # decimal_escape = r"""(\d+)""" + # hex_escape = r"""(x[0-9a-fA-F]+)""" + # bad_escape = r"""([\\][^a-zA-Z._~^!=&\^\-\\?'"x0-7])""" + # + # The following modifications were made to avoid the ambiguity that allowed backtracking: + # (https://github.com/eliben/pycparser/issues/61) + # + # - \x was removed from simple_escape, unless it was not followed by a hex digit, to avoid ambiguity with hex_escape. + # - hex_escape allows one or more hex characters, but requires that the next character(if any) is not hex + # - decimal_escape allows one or more decimal characters, but requires that the next character(if any) is not a decimal + # - bad_escape does not allow any decimals (8-9), to avoid conflicting with the permissive decimal_escape. + # + # Without this change, python's `re` module would recursively try parsing each ambiguous escape sequence in multiple ways. + # e.g. `\123` could be parsed as `\1`+`23`, `\12`+`3`, and `\123`. + + simple_escape = r"""([a-wyzA-Z._~!=&\^\-\\?'"]|x(?![0-9a-fA-F]))""" + decimal_escape = r"""(\d+)(?!\d)""" + hex_escape = r"""(x[0-9a-fA-F]+)(?![0-9a-fA-F])""" + bad_escape = r"""([\\][^a-zA-Z._~^!=&\^\-\\?'"x0-9])""" + + escape_sequence = r"""(\\("""+simple_escape+'|'+decimal_escape+'|'+hex_escape+'))' + + # This complicated regex with lookahead might be slow for strings, so because all of the valid escapes (including \x) allowed + # 0 or more non-escaped characters after the first character, simple_escape+decimal_escape+hex_escape got simplified to + + escape_sequence_start_in_string = r"""(\\[0-9a-zA-Z._~!=&\^\-\\?'"])""" + + cconst_char = r"""([^'\\\n]|"""+escape_sequence+')' + char_const = "'"+cconst_char+"'" + wchar_const = 'L'+char_const + u8char_const = 'u8'+char_const + u16char_const = 'u'+char_const + u32char_const = 'U'+char_const + multicharacter_constant = "'"+cconst_char+"{2,4}'" + unmatched_quote = "('"+cconst_char+"*\\n)|('"+cconst_char+"*$)" + bad_char_const = r"""('"""+cconst_char+"""[^'\n]+')|('')|('"""+bad_escape+r"""[^'\n]*')""" + + # string literals (K&R2: A.2.6) + string_char = r"""([^"\\\n]|"""+escape_sequence_start_in_string+')' + string_literal = '"'+string_char+'*"' + wstring_literal = 'L'+string_literal + u8string_literal = 'u8'+string_literal + u16string_literal = 'u'+string_literal + u32string_literal = 'U'+string_literal + bad_string_literal = '"'+string_char+'*'+bad_escape+string_char+'*"' + + # floating constants (K&R2: A.2.5.3) + exponent_part = r"""([eE][-+]?[0-9]+)""" + fractional_constant = r"""([0-9]*\.[0-9]+)|([0-9]+\.)""" + floating_constant = '(((('+fractional_constant+')'+exponent_part+'?)|([0-9]+'+exponent_part+'))[FfLl]?)' + binary_exponent_part = r'''([pP][+-]?[0-9]+)''' + hex_fractional_constant = '((('+hex_digits+r""")?\."""+hex_digits+')|('+hex_digits+r"""\.))""" + hex_floating_constant = '('+hex_prefix+'('+hex_digits+'|'+hex_fractional_constant+')'+binary_exponent_part+'[FfLl]?)' + + ## + ## Lexer states: used for preprocessor \n-terminated directives + ## + states = ( + # ppline: preprocessor line directives + # + ('ppline', 'exclusive'), + + # pppragma: pragma + # + ('pppragma', 'exclusive'), + ) + + def t_PPHASH(self, t): + r'[ \t]*\#' + if self.line_pattern.match(t.lexer.lexdata, pos=t.lexer.lexpos): + t.lexer.begin('ppline') + self.pp_line = self.pp_filename = None + elif self.pragma_pattern.match(t.lexer.lexdata, pos=t.lexer.lexpos): + t.lexer.begin('pppragma') + else: + t.type = 'PPHASH' + return t + + ## + ## Rules for the ppline state + ## + @TOKEN(string_literal) + def t_ppline_FILENAME(self, t): + if self.pp_line is None: + self._error('filename before line number in #line', t) + else: + self.pp_filename = t.value.lstrip('"').rstrip('"') + + @TOKEN(decimal_constant) + def t_ppline_LINE_NUMBER(self, t): + if self.pp_line is None: + self.pp_line = t.value + else: + # Ignore: GCC's cpp sometimes inserts a numeric flag + # after the file name + pass + + def t_ppline_NEWLINE(self, t): + r'\n' + if self.pp_line is None: + self._error('line number missing in #line', t) + else: + self.lexer.lineno = int(self.pp_line) + + if self.pp_filename is not None: + self.filename = self.pp_filename + + t.lexer.begin('INITIAL') + + def t_ppline_PPLINE(self, t): + r'line' + pass + + t_ppline_ignore = ' \t' + + def t_ppline_error(self, t): + self._error('invalid #line directive', t) + + ## + ## Rules for the pppragma state + ## + def t_pppragma_NEWLINE(self, t): + r'\n' + t.lexer.lineno += 1 + t.lexer.begin('INITIAL') + + def t_pppragma_PPPRAGMA(self, t): + r'pragma' + return t + + t_pppragma_ignore = ' \t' + + def t_pppragma_STR(self, t): + '.+' + t.type = 'PPPRAGMASTR' + return t + + def t_pppragma_error(self, t): + self._error('invalid #pragma directive', t) + + ## + ## Rules for the normal state + ## + t_ignore = ' \t' + + # Newlines + def t_NEWLINE(self, t): + r'\n+' + t.lexer.lineno += t.value.count("\n") + + # Operators + t_PLUS = r'\+' + t_MINUS = r'-' + t_TIMES = r'\*' + t_DIVIDE = r'/' + t_MOD = r'%' + t_OR = r'\|' + t_AND = r'&' + t_NOT = r'~' + t_XOR = r'\^' + t_LSHIFT = r'<<' + t_RSHIFT = r'>>' + t_LOR = r'\|\|' + t_LAND = r'&&' + t_LNOT = r'!' + t_LT = r'<' + t_GT = r'>' + t_LE = r'<=' + t_GE = r'>=' + t_EQ = r'==' + t_NE = r'!=' + + # Assignment operators + t_EQUALS = r'=' + t_TIMESEQUAL = r'\*=' + t_DIVEQUAL = r'/=' + t_MODEQUAL = r'%=' + t_PLUSEQUAL = r'\+=' + t_MINUSEQUAL = r'-=' + t_LSHIFTEQUAL = r'<<=' + t_RSHIFTEQUAL = r'>>=' + t_ANDEQUAL = r'&=' + t_OREQUAL = r'\|=' + t_XOREQUAL = r'\^=' + + # Increment/decrement + t_PLUSPLUS = r'\+\+' + t_MINUSMINUS = r'--' + + # -> + t_ARROW = r'->' + + # ? + t_CONDOP = r'\?' + + # Delimiters + t_LPAREN = r'\(' + t_RPAREN = r'\)' + t_LBRACKET = r'\[' + t_RBRACKET = r'\]' + t_COMMA = r',' + t_PERIOD = r'\.' + t_SEMI = r';' + t_COLON = r':' + t_ELLIPSIS = r'\.\.\.' + + # Scope delimiters + # To see why on_lbrace_func is needed, consider: + # typedef char TT; + # void foo(int TT) { TT = 10; } + # TT x = 5; + # Outside the function, TT is a typedef, but inside (starting and ending + # with the braces) it's a parameter. The trouble begins with yacc's + # lookahead token. If we open a new scope in brace_open, then TT has + # already been read and incorrectly interpreted as TYPEID. So, we need + # to open and close scopes from within the lexer. + # Similar for the TT immediately outside the end of the function. + # + @TOKEN(r'\{') + def t_LBRACE(self, t): + self.on_lbrace_func() + return t + @TOKEN(r'\}') + def t_RBRACE(self, t): + self.on_rbrace_func() + return t + + t_STRING_LITERAL = string_literal + + # The following floating and integer constants are defined as + # functions to impose a strict order (otherwise, decimal + # is placed before the others because its regex is longer, + # and this is bad) + # + @TOKEN(floating_constant) + def t_FLOAT_CONST(self, t): + return t + + @TOKEN(hex_floating_constant) + def t_HEX_FLOAT_CONST(self, t): + return t + + @TOKEN(hex_constant) + def t_INT_CONST_HEX(self, t): + return t + + @TOKEN(bin_constant) + def t_INT_CONST_BIN(self, t): + return t + + @TOKEN(bad_octal_constant) + def t_BAD_CONST_OCT(self, t): + msg = "Invalid octal constant" + self._error(msg, t) + + @TOKEN(unsupported_c_style_comment) + def t_UNSUPPORTED_C_STYLE_COMMENT(self, t): + msg = "Comments are not supported, see https://github.com/eliben/pycparser#3using." + self._error(msg, t) + + @TOKEN(unsupported_cxx_style_comment) + def t_UNSUPPORTED_CXX_STYLE_COMMENT(self, t): + msg = "Comments are not supported, see https://github.com/eliben/pycparser#3using." + self._error(msg, t) + + @TOKEN(octal_constant) + def t_INT_CONST_OCT(self, t): + return t + + @TOKEN(decimal_constant) + def t_INT_CONST_DEC(self, t): + return t + + # Must come before bad_char_const, to prevent it from + # catching valid char constants as invalid + # + @TOKEN(multicharacter_constant) + def t_INT_CONST_CHAR(self, t): + return t + + @TOKEN(char_const) + def t_CHAR_CONST(self, t): + return t + + @TOKEN(wchar_const) + def t_WCHAR_CONST(self, t): + return t + + @TOKEN(u8char_const) + def t_U8CHAR_CONST(self, t): + return t + + @TOKEN(u16char_const) + def t_U16CHAR_CONST(self, t): + return t + + @TOKEN(u32char_const) + def t_U32CHAR_CONST(self, t): + return t + + @TOKEN(unmatched_quote) + def t_UNMATCHED_QUOTE(self, t): + msg = "Unmatched '" + self._error(msg, t) + + @TOKEN(bad_char_const) + def t_BAD_CHAR_CONST(self, t): + msg = "Invalid char constant %s" % t.value + self._error(msg, t) + + @TOKEN(wstring_literal) + def t_WSTRING_LITERAL(self, t): + return t + + @TOKEN(u8string_literal) + def t_U8STRING_LITERAL(self, t): + return t + + @TOKEN(u16string_literal) + def t_U16STRING_LITERAL(self, t): + return t + + @TOKEN(u32string_literal) + def t_U32STRING_LITERAL(self, t): + return t + + # unmatched string literals are caught by the preprocessor + + @TOKEN(bad_string_literal) + def t_BAD_STRING_LITERAL(self, t): + msg = "String contains invalid escape code" + self._error(msg, t) + + @TOKEN(identifier) + def t_ID(self, t): + t.type = self.keyword_map.get(t.value, "ID") + if t.type == 'ID' and self.type_lookup_func(t.value): + t.type = "TYPEID" + return t + + def t_error(self, t): + msg = 'Illegal character %s' % repr(t.value[0]) + self._error(msg, t) diff --git a/python/pycparser/c_parser.py b/python/pycparser/c_parser.py new file mode 100644 index 000000000..bb123ac7e --- /dev/null +++ b/python/pycparser/c_parser.py @@ -0,0 +1,1973 @@ +#------------------------------------------------------------------------------ +# pycparser: c_parser.py +# +# CParser class: Parser and AST builder for the C language +# +# Eli Bendersky [https://eli.thegreenplace.net/] +# License: BSD +#------------------------------------------------------------------------------ +from .ply import yacc + +from . import c_ast +from .c_lexer import CLexer +from .plyparser import PLYParser, ParseError, parameterized, template +from .ast_transforms import fix_switch_cases, fix_atomic_specifiers + + +@template +class CParser(PLYParser): + def __init__( + self, + lex_optimize=True, + lexer=CLexer, + lextab='pycparser.lextab', + yacc_optimize=True, + yacctab='pycparser.yacctab', + yacc_debug=False, + taboutputdir=''): + """ Create a new CParser. + + Some arguments for controlling the debug/optimization + level of the parser are provided. The defaults are + tuned for release/performance mode. + The simple rules for using them are: + *) When tweaking CParser/CLexer, set these to False + *) When releasing a stable parser, set to True + + lex_optimize: + Set to False when you're modifying the lexer. + Otherwise, changes in the lexer won't be used, if + some lextab.py file exists. + When releasing with a stable lexer, set to True + to save the re-generation of the lexer table on + each run. + + lexer: + Set this parameter to define the lexer to use if + you're not using the default CLexer. + + lextab: + Points to the lex table that's used for optimized + mode. Only if you're modifying the lexer and want + some tests to avoid re-generating the table, make + this point to a local lex table file (that's been + earlier generated with lex_optimize=True) + + yacc_optimize: + Set to False when you're modifying the parser. + Otherwise, changes in the parser won't be used, if + some parsetab.py file exists. + When releasing with a stable parser, set to True + to save the re-generation of the parser table on + each run. + + yacctab: + Points to the yacc table that's used for optimized + mode. Only if you're modifying the parser, make + this point to a local yacc table file + + yacc_debug: + Generate a parser.out file that explains how yacc + built the parsing table from the grammar. + + taboutputdir: + Set this parameter to control the location of generated + lextab and yacctab files. + """ + self.clex = lexer( + error_func=self._lex_error_func, + on_lbrace_func=self._lex_on_lbrace_func, + on_rbrace_func=self._lex_on_rbrace_func, + type_lookup_func=self._lex_type_lookup_func) + + self.clex.build( + optimize=lex_optimize, + lextab=lextab, + outputdir=taboutputdir) + self.tokens = self.clex.tokens + + rules_with_opt = [ + 'abstract_declarator', + 'assignment_expression', + 'declaration_list', + 'declaration_specifiers_no_type', + 'designation', + 'expression', + 'identifier_list', + 'init_declarator_list', + 'id_init_declarator_list', + 'initializer_list', + 'parameter_type_list', + 'block_item_list', + 'type_qualifier_list', + 'struct_declarator_list' + ] + + for rule in rules_with_opt: + self._create_opt_rule(rule) + + self.cparser = yacc.yacc( + module=self, + start='translation_unit_or_empty', + debug=yacc_debug, + optimize=yacc_optimize, + tabmodule=yacctab, + outputdir=taboutputdir) + + # Stack of scopes for keeping track of symbols. _scope_stack[-1] is + # the current (topmost) scope. Each scope is a dictionary that + # specifies whether a name is a type. If _scope_stack[n][name] is + # True, 'name' is currently a type in the scope. If it's False, + # 'name' is used in the scope but not as a type (for instance, if we + # saw: int name; + # If 'name' is not a key in _scope_stack[n] then 'name' was not defined + # in this scope at all. + self._scope_stack = [dict()] + + # Keeps track of the last token given to yacc (the lookahead token) + self._last_yielded_token = None + + def parse(self, text, filename='', debug=False): + """ Parses C code and returns an AST. + + text: + A string containing the C source code + + filename: + Name of the file being parsed (for meaningful + error messages) + + debug: + Debug flag to YACC + """ + self.clex.filename = filename + self.clex.reset_lineno() + self._scope_stack = [dict()] + self._last_yielded_token = None + return self.cparser.parse( + input=text, + lexer=self.clex, + debug=debug) + + ######################-- PRIVATE --###################### + + def _push_scope(self): + self._scope_stack.append(dict()) + + def _pop_scope(self): + assert len(self._scope_stack) > 1 + self._scope_stack.pop() + + def _add_typedef_name(self, name, coord): + """ Add a new typedef name (ie a TYPEID) to the current scope + """ + if not self._scope_stack[-1].get(name, True): + self._parse_error( + "Typedef %r previously declared as non-typedef " + "in this scope" % name, coord) + self._scope_stack[-1][name] = True + + def _add_identifier(self, name, coord): + """ Add a new object, function, or enum member name (ie an ID) to the + current scope + """ + if self._scope_stack[-1].get(name, False): + self._parse_error( + "Non-typedef %r previously declared as typedef " + "in this scope" % name, coord) + self._scope_stack[-1][name] = False + + def _is_type_in_scope(self, name): + """ Is *name* a typedef-name in the current scope? + """ + for scope in reversed(self._scope_stack): + # If name is an identifier in this scope it shadows typedefs in + # higher scopes. + in_scope = scope.get(name) + if in_scope is not None: return in_scope + return False + + def _lex_error_func(self, msg, line, column): + self._parse_error(msg, self._coord(line, column)) + + def _lex_on_lbrace_func(self): + self._push_scope() + + def _lex_on_rbrace_func(self): + self._pop_scope() + + def _lex_type_lookup_func(self, name): + """ Looks up types that were previously defined with + typedef. + Passed to the lexer for recognizing identifiers that + are types. + """ + is_type = self._is_type_in_scope(name) + return is_type + + def _get_yacc_lookahead_token(self): + """ We need access to yacc's lookahead token in certain cases. + This is the last token yacc requested from the lexer, so we + ask the lexer. + """ + return self.clex.last_token + + # To understand what's going on here, read sections A.8.5 and + # A.8.6 of K&R2 very carefully. + # + # A C type consists of a basic type declaration, with a list + # of modifiers. For example: + # + # int *c[5]; + # + # The basic declaration here is 'int c', and the pointer and + # the array are the modifiers. + # + # Basic declarations are represented by TypeDecl (from module c_ast) and the + # modifiers are FuncDecl, PtrDecl and ArrayDecl. + # + # The standard states that whenever a new modifier is parsed, it should be + # added to the end of the list of modifiers. For example: + # + # K&R2 A.8.6.2: Array Declarators + # + # In a declaration T D where D has the form + # D1 [constant-expression-opt] + # and the type of the identifier in the declaration T D1 is + # "type-modifier T", the type of the + # identifier of D is "type-modifier array of T" + # + # This is what this method does. The declarator it receives + # can be a list of declarators ending with TypeDecl. It + # tacks the modifier to the end of this list, just before + # the TypeDecl. + # + # Additionally, the modifier may be a list itself. This is + # useful for pointers, that can come as a chain from the rule + # p_pointer. In this case, the whole modifier list is spliced + # into the new location. + def _type_modify_decl(self, decl, modifier): + """ Tacks a type modifier on a declarator, and returns + the modified declarator. + + Note: the declarator and modifier may be modified + """ + #~ print '****' + #~ decl.show(offset=3) + #~ modifier.show(offset=3) + #~ print '****' + + modifier_head = modifier + modifier_tail = modifier + + # The modifier may be a nested list. Reach its tail. + while modifier_tail.type: + modifier_tail = modifier_tail.type + + # If the decl is a basic type, just tack the modifier onto it. + if isinstance(decl, c_ast.TypeDecl): + modifier_tail.type = decl + return modifier + else: + # Otherwise, the decl is a list of modifiers. Reach + # its tail and splice the modifier onto the tail, + # pointing to the underlying basic type. + decl_tail = decl + + while not isinstance(decl_tail.type, c_ast.TypeDecl): + decl_tail = decl_tail.type + + modifier_tail.type = decl_tail.type + decl_tail.type = modifier_head + return decl + + # Due to the order in which declarators are constructed, + # they have to be fixed in order to look like a normal AST. + # + # When a declaration arrives from syntax construction, it has + # these problems: + # * The innermost TypeDecl has no type (because the basic + # type is only known at the uppermost declaration level) + # * The declaration has no variable name, since that is saved + # in the innermost TypeDecl + # * The typename of the declaration is a list of type + # specifiers, and not a node. Here, basic identifier types + # should be separated from more complex types like enums + # and structs. + # + # This method fixes these problems. + def _fix_decl_name_type(self, decl, typename): + """ Fixes a declaration. Modifies decl. + """ + # Reach the underlying basic type + # + type = decl + while not isinstance(type, c_ast.TypeDecl): + type = type.type + + decl.name = type.declname + type.quals = decl.quals[:] + + # The typename is a list of types. If any type in this + # list isn't an IdentifierType, it must be the only + # type in the list (it's illegal to declare "int enum ..") + # If all the types are basic, they're collected in the + # IdentifierType holder. + for tn in typename: + if not isinstance(tn, c_ast.IdentifierType): + if len(typename) > 1: + self._parse_error( + "Invalid multiple types specified", tn.coord) + else: + type.type = tn + return decl + + if not typename: + # Functions default to returning int + # + if not isinstance(decl.type, c_ast.FuncDecl): + self._parse_error( + "Missing type in declaration", decl.coord) + type.type = c_ast.IdentifierType( + ['int'], + coord=decl.coord) + else: + # At this point, we know that typename is a list of IdentifierType + # nodes. Concatenate all the names into a single list. + # + type.type = c_ast.IdentifierType( + [name for id in typename for name in id.names], + coord=typename[0].coord) + return decl + + def _add_declaration_specifier(self, declspec, newspec, kind, append=False): + """ Declaration specifiers are represented by a dictionary + with the entries: + * qual: a list of type qualifiers + * storage: a list of storage type qualifiers + * type: a list of type specifiers + * function: a list of function specifiers + * alignment: a list of alignment specifiers + + This method is given a declaration specifier, and a + new specifier of a given kind. + If `append` is True, the new specifier is added to the end of + the specifiers list, otherwise it's added at the beginning. + Returns the declaration specifier, with the new + specifier incorporated. + """ + spec = declspec or dict(qual=[], storage=[], type=[], function=[], alignment=[]) + + if append: + spec[kind].append(newspec) + else: + spec[kind].insert(0, newspec) + + return spec + + def _build_declarations(self, spec, decls, typedef_namespace=False): + """ Builds a list of declarations all sharing the given specifiers. + If typedef_namespace is true, each declared name is added + to the "typedef namespace", which also includes objects, + functions, and enum constants. + """ + is_typedef = 'typedef' in spec['storage'] + declarations = [] + + # Bit-fields are allowed to be unnamed. + if decls[0].get('bitsize') is not None: + pass + + # When redeclaring typedef names as identifiers in inner scopes, a + # problem can occur where the identifier gets grouped into + # spec['type'], leaving decl as None. This can only occur for the + # first declarator. + elif decls[0]['decl'] is None: + if len(spec['type']) < 2 or len(spec['type'][-1].names) != 1 or \ + not self._is_type_in_scope(spec['type'][-1].names[0]): + coord = '?' + for t in spec['type']: + if hasattr(t, 'coord'): + coord = t.coord + break + self._parse_error('Invalid declaration', coord) + + # Make this look as if it came from "direct_declarator:ID" + decls[0]['decl'] = c_ast.TypeDecl( + declname=spec['type'][-1].names[0], + type=None, + quals=None, + align=spec['alignment'], + coord=spec['type'][-1].coord) + # Remove the "new" type's name from the end of spec['type'] + del spec['type'][-1] + + # A similar problem can occur where the declaration ends up looking + # like an abstract declarator. Give it a name if this is the case. + elif not isinstance(decls[0]['decl'], ( + c_ast.Enum, c_ast.Struct, c_ast.Union, c_ast.IdentifierType)): + decls_0_tail = decls[0]['decl'] + while not isinstance(decls_0_tail, c_ast.TypeDecl): + decls_0_tail = decls_0_tail.type + if decls_0_tail.declname is None: + decls_0_tail.declname = spec['type'][-1].names[0] + del spec['type'][-1] + + for decl in decls: + assert decl['decl'] is not None + if is_typedef: + declaration = c_ast.Typedef( + name=None, + quals=spec['qual'], + storage=spec['storage'], + type=decl['decl'], + coord=decl['decl'].coord) + else: + declaration = c_ast.Decl( + name=None, + quals=spec['qual'], + align=spec['alignment'], + storage=spec['storage'], + funcspec=spec['function'], + type=decl['decl'], + init=decl.get('init'), + bitsize=decl.get('bitsize'), + coord=decl['decl'].coord) + + if isinstance(declaration.type, ( + c_ast.Enum, c_ast.Struct, c_ast.Union, + c_ast.IdentifierType)): + fixed_decl = declaration + else: + fixed_decl = self._fix_decl_name_type(declaration, spec['type']) + + # Add the type name defined by typedef to a + # symbol table (for usage in the lexer) + if typedef_namespace: + if is_typedef: + self._add_typedef_name(fixed_decl.name, fixed_decl.coord) + else: + self._add_identifier(fixed_decl.name, fixed_decl.coord) + + fixed_decl = fix_atomic_specifiers(fixed_decl) + declarations.append(fixed_decl) + + return declarations + + def _build_function_definition(self, spec, decl, param_decls, body): + """ Builds a function definition. + """ + if 'typedef' in spec['storage']: + self._parse_error("Invalid typedef", decl.coord) + + declaration = self._build_declarations( + spec=spec, + decls=[dict(decl=decl, init=None)], + typedef_namespace=True)[0] + + return c_ast.FuncDef( + decl=declaration, + param_decls=param_decls, + body=body, + coord=decl.coord) + + def _select_struct_union_class(self, token): + """ Given a token (either STRUCT or UNION), selects the + appropriate AST class. + """ + if token == 'struct': + return c_ast.Struct + else: + return c_ast.Union + + ## + ## Precedence and associativity of operators + ## + # If this changes, c_generator.CGenerator.precedence_map needs to change as + # well + precedence = ( + ('left', 'LOR'), + ('left', 'LAND'), + ('left', 'OR'), + ('left', 'XOR'), + ('left', 'AND'), + ('left', 'EQ', 'NE'), + ('left', 'GT', 'GE', 'LT', 'LE'), + ('left', 'RSHIFT', 'LSHIFT'), + ('left', 'PLUS', 'MINUS'), + ('left', 'TIMES', 'DIVIDE', 'MOD') + ) + + ## + ## Grammar productions + ## Implementation of the BNF defined in K&R2 A.13 + ## + + # Wrapper around a translation unit, to allow for empty input. + # Not strictly part of the C99 Grammar, but useful in practice. + def p_translation_unit_or_empty(self, p): + """ translation_unit_or_empty : translation_unit + | empty + """ + if p[1] is None: + p[0] = c_ast.FileAST([]) + else: + p[0] = c_ast.FileAST(p[1]) + + def p_translation_unit_1(self, p): + """ translation_unit : external_declaration + """ + # Note: external_declaration is already a list + p[0] = p[1] + + def p_translation_unit_2(self, p): + """ translation_unit : translation_unit external_declaration + """ + p[1].extend(p[2]) + p[0] = p[1] + + # Declarations always come as lists (because they can be + # several in one line), so we wrap the function definition + # into a list as well, to make the return value of + # external_declaration homogeneous. + def p_external_declaration_1(self, p): + """ external_declaration : function_definition + """ + p[0] = [p[1]] + + def p_external_declaration_2(self, p): + """ external_declaration : declaration + """ + p[0] = p[1] + + def p_external_declaration_3(self, p): + """ external_declaration : pp_directive + | pppragma_directive + """ + p[0] = [p[1]] + + def p_external_declaration_4(self, p): + """ external_declaration : SEMI + """ + p[0] = [] + + def p_external_declaration_5(self, p): + """ external_declaration : static_assert + """ + p[0] = p[1] + + def p_static_assert_declaration(self, p): + """ static_assert : _STATIC_ASSERT LPAREN constant_expression COMMA unified_string_literal RPAREN + | _STATIC_ASSERT LPAREN constant_expression RPAREN + """ + if len(p) == 5: + p[0] = [c_ast.StaticAssert(p[3], None, self._token_coord(p, 1))] + else: + p[0] = [c_ast.StaticAssert(p[3], p[5], self._token_coord(p, 1))] + + def p_pp_directive(self, p): + """ pp_directive : PPHASH + """ + self._parse_error('Directives not supported yet', + self._token_coord(p, 1)) + + # This encompasses two types of C99-compatible pragmas: + # - The #pragma directive: + # # pragma character_sequence + # - The _Pragma unary operator: + # _Pragma ( " string_literal " ) + def p_pppragma_directive(self, p): + """ pppragma_directive : PPPRAGMA + | PPPRAGMA PPPRAGMASTR + | _PRAGMA LPAREN unified_string_literal RPAREN + """ + if len(p) == 5: + p[0] = c_ast.Pragma(p[3], self._token_coord(p, 2)) + elif len(p) == 3: + p[0] = c_ast.Pragma(p[2], self._token_coord(p, 2)) + else: + p[0] = c_ast.Pragma("", self._token_coord(p, 1)) + + def p_pppragma_directive_list(self, p): + """ pppragma_directive_list : pppragma_directive + | pppragma_directive_list pppragma_directive + """ + p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]] + + # In function definitions, the declarator can be followed by + # a declaration list, for old "K&R style" function definitios. + def p_function_definition_1(self, p): + """ function_definition : id_declarator declaration_list_opt compound_statement + """ + # no declaration specifiers - 'int' becomes the default type + spec = dict( + qual=[], + alignment=[], + storage=[], + type=[c_ast.IdentifierType(['int'], + coord=self._token_coord(p, 1))], + function=[]) + + p[0] = self._build_function_definition( + spec=spec, + decl=p[1], + param_decls=p[2], + body=p[3]) + + def p_function_definition_2(self, p): + """ function_definition : declaration_specifiers id_declarator declaration_list_opt compound_statement + """ + spec = p[1] + + p[0] = self._build_function_definition( + spec=spec, + decl=p[2], + param_decls=p[3], + body=p[4]) + + # Note, according to C18 A.2.2 6.7.10 static_assert-declaration _Static_assert + # is a declaration, not a statement. We additionally recognise it as a statement + # to fix parsing of _Static_assert inside the functions. + # + def p_statement(self, p): + """ statement : labeled_statement + | expression_statement + | compound_statement + | selection_statement + | iteration_statement + | jump_statement + | pppragma_directive + | static_assert + """ + p[0] = p[1] + + # A pragma is generally considered a decorator rather than an actual + # statement. Still, for the purposes of analyzing an abstract syntax tree of + # C code, pragma's should not be ignored and were previously treated as a + # statement. This presents a problem for constructs that take a statement + # such as labeled_statements, selection_statements, and + # iteration_statements, causing a misleading structure in the AST. For + # example, consider the following C code. + # + # for (int i = 0; i < 3; i++) + # #pragma omp critical + # sum += 1; + # + # This code will compile and execute "sum += 1;" as the body of the for + # loop. Previous implementations of PyCParser would render the AST for this + # block of code as follows: + # + # For: + # DeclList: + # Decl: i, [], [], [] + # TypeDecl: i, [] + # IdentifierType: ['int'] + # Constant: int, 0 + # BinaryOp: < + # ID: i + # Constant: int, 3 + # UnaryOp: p++ + # ID: i + # Pragma: omp critical + # Assignment: += + # ID: sum + # Constant: int, 1 + # + # This AST misleadingly takes the Pragma as the body of the loop and the + # assignment then becomes a sibling of the loop. + # + # To solve edge cases like these, the pragmacomp_or_statement rule groups + # a pragma and its following statement (which would otherwise be orphaned) + # using a compound block, effectively turning the above code into: + # + # for (int i = 0; i < 3; i++) { + # #pragma omp critical + # sum += 1; + # } + def p_pragmacomp_or_statement(self, p): + """ pragmacomp_or_statement : pppragma_directive_list statement + | statement + """ + if len(p) == 3: + p[0] = c_ast.Compound( + block_items=p[1]+[p[2]], + coord=self._token_coord(p, 1)) + else: + p[0] = p[1] + + # In C, declarations can come several in a line: + # int x, *px, romulo = 5; + # + # However, for the AST, we will split them to separate Decl + # nodes. + # + # This rule splits its declarations and always returns a list + # of Decl nodes, even if it's one element long. + # + def p_decl_body(self, p): + """ decl_body : declaration_specifiers init_declarator_list_opt + | declaration_specifiers_no_type id_init_declarator_list_opt + """ + spec = p[1] + + # p[2] (init_declarator_list_opt) is either a list or None + # + if p[2] is None: + # By the standard, you must have at least one declarator unless + # declaring a structure tag, a union tag, or the members of an + # enumeration. + # + ty = spec['type'] + s_u_or_e = (c_ast.Struct, c_ast.Union, c_ast.Enum) + if len(ty) == 1 and isinstance(ty[0], s_u_or_e): + decls = [c_ast.Decl( + name=None, + quals=spec['qual'], + align=spec['alignment'], + storage=spec['storage'], + funcspec=spec['function'], + type=ty[0], + init=None, + bitsize=None, + coord=ty[0].coord)] + + # However, this case can also occur on redeclared identifiers in + # an inner scope. The trouble is that the redeclared type's name + # gets grouped into declaration_specifiers; _build_declarations + # compensates for this. + # + else: + decls = self._build_declarations( + spec=spec, + decls=[dict(decl=None, init=None)], + typedef_namespace=True) + + else: + decls = self._build_declarations( + spec=spec, + decls=p[2], + typedef_namespace=True) + + p[0] = decls + + # The declaration has been split to a decl_body sub-rule and + # SEMI, because having them in a single rule created a problem + # for defining typedefs. + # + # If a typedef line was directly followed by a line using the + # type defined with the typedef, the type would not be + # recognized. This is because to reduce the declaration rule, + # the parser's lookahead asked for the token after SEMI, which + # was the type from the next line, and the lexer had no chance + # to see the updated type symbol table. + # + # Splitting solves this problem, because after seeing SEMI, + # the parser reduces decl_body, which actually adds the new + # type into the table to be seen by the lexer before the next + # line is reached. + def p_declaration(self, p): + """ declaration : decl_body SEMI + """ + p[0] = p[1] + + # Since each declaration is a list of declarations, this + # rule will combine all the declarations and return a single + # list + # + def p_declaration_list(self, p): + """ declaration_list : declaration + | declaration_list declaration + """ + p[0] = p[1] if len(p) == 2 else p[1] + p[2] + + # To know when declaration-specifiers end and declarators begin, + # we require declaration-specifiers to have at least one + # type-specifier, and disallow typedef-names after we've seen any + # type-specifier. These are both required by the spec. + # + def p_declaration_specifiers_no_type_1(self, p): + """ declaration_specifiers_no_type : type_qualifier declaration_specifiers_no_type_opt + """ + p[0] = self._add_declaration_specifier(p[2], p[1], 'qual') + + def p_declaration_specifiers_no_type_2(self, p): + """ declaration_specifiers_no_type : storage_class_specifier declaration_specifiers_no_type_opt + """ + p[0] = self._add_declaration_specifier(p[2], p[1], 'storage') + + def p_declaration_specifiers_no_type_3(self, p): + """ declaration_specifiers_no_type : function_specifier declaration_specifiers_no_type_opt + """ + p[0] = self._add_declaration_specifier(p[2], p[1], 'function') + + # Without this, `typedef _Atomic(T) U` will parse incorrectly because the + # _Atomic qualifier will match, instead of the specifier. + def p_declaration_specifiers_no_type_4(self, p): + """ declaration_specifiers_no_type : atomic_specifier declaration_specifiers_no_type_opt + """ + p[0] = self._add_declaration_specifier(p[2], p[1], 'type') + + def p_declaration_specifiers_no_type_5(self, p): + """ declaration_specifiers_no_type : alignment_specifier declaration_specifiers_no_type_opt + """ + p[0] = self._add_declaration_specifier(p[2], p[1], 'alignment') + + def p_declaration_specifiers_1(self, p): + """ declaration_specifiers : declaration_specifiers type_qualifier + """ + p[0] = self._add_declaration_specifier(p[1], p[2], 'qual', append=True) + + def p_declaration_specifiers_2(self, p): + """ declaration_specifiers : declaration_specifiers storage_class_specifier + """ + p[0] = self._add_declaration_specifier(p[1], p[2], 'storage', append=True) + + def p_declaration_specifiers_3(self, p): + """ declaration_specifiers : declaration_specifiers function_specifier + """ + p[0] = self._add_declaration_specifier(p[1], p[2], 'function', append=True) + + def p_declaration_specifiers_4(self, p): + """ declaration_specifiers : declaration_specifiers type_specifier_no_typeid + """ + p[0] = self._add_declaration_specifier(p[1], p[2], 'type', append=True) + + def p_declaration_specifiers_5(self, p): + """ declaration_specifiers : type_specifier + """ + p[0] = self._add_declaration_specifier(None, p[1], 'type') + + def p_declaration_specifiers_6(self, p): + """ declaration_specifiers : declaration_specifiers_no_type type_specifier + """ + p[0] = self._add_declaration_specifier(p[1], p[2], 'type', append=True) + + def p_declaration_specifiers_7(self, p): + """ declaration_specifiers : declaration_specifiers alignment_specifier + """ + p[0] = self._add_declaration_specifier(p[1], p[2], 'alignment', append=True) + + def p_storage_class_specifier(self, p): + """ storage_class_specifier : AUTO + | REGISTER + | STATIC + | EXTERN + | TYPEDEF + | _THREAD_LOCAL + """ + p[0] = p[1] + + def p_function_specifier(self, p): + """ function_specifier : INLINE + | _NORETURN + """ + p[0] = p[1] + + def p_type_specifier_no_typeid(self, p): + """ type_specifier_no_typeid : VOID + | _BOOL + | CHAR + | SHORT + | INT + | LONG + | FLOAT + | DOUBLE + | _COMPLEX + | SIGNED + | UNSIGNED + | __INT128 + """ + p[0] = c_ast.IdentifierType([p[1]], coord=self._token_coord(p, 1)) + + def p_type_specifier(self, p): + """ type_specifier : typedef_name + | enum_specifier + | struct_or_union_specifier + | type_specifier_no_typeid + | atomic_specifier + """ + p[0] = p[1] + + # See section 6.7.2.4 of the C11 standard. + def p_atomic_specifier(self, p): + """ atomic_specifier : _ATOMIC LPAREN type_name RPAREN + """ + typ = p[3] + typ.quals.append('_Atomic') + p[0] = typ + + def p_type_qualifier(self, p): + """ type_qualifier : CONST + | RESTRICT + | VOLATILE + | _ATOMIC + """ + p[0] = p[1] + + def p_init_declarator_list(self, p): + """ init_declarator_list : init_declarator + | init_declarator_list COMMA init_declarator + """ + p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]] + + # Returns a {decl= : init=} dictionary + # If there's no initializer, uses None + # + def p_init_declarator(self, p): + """ init_declarator : declarator + | declarator EQUALS initializer + """ + p[0] = dict(decl=p[1], init=(p[3] if len(p) > 2 else None)) + + def p_id_init_declarator_list(self, p): + """ id_init_declarator_list : id_init_declarator + | id_init_declarator_list COMMA init_declarator + """ + p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]] + + def p_id_init_declarator(self, p): + """ id_init_declarator : id_declarator + | id_declarator EQUALS initializer + """ + p[0] = dict(decl=p[1], init=(p[3] if len(p) > 2 else None)) + + # Require at least one type specifier in a specifier-qualifier-list + # + def p_specifier_qualifier_list_1(self, p): + """ specifier_qualifier_list : specifier_qualifier_list type_specifier_no_typeid + """ + p[0] = self._add_declaration_specifier(p[1], p[2], 'type', append=True) + + def p_specifier_qualifier_list_2(self, p): + """ specifier_qualifier_list : specifier_qualifier_list type_qualifier + """ + p[0] = self._add_declaration_specifier(p[1], p[2], 'qual', append=True) + + def p_specifier_qualifier_list_3(self, p): + """ specifier_qualifier_list : type_specifier + """ + p[0] = self._add_declaration_specifier(None, p[1], 'type') + + def p_specifier_qualifier_list_4(self, p): + """ specifier_qualifier_list : type_qualifier_list type_specifier + """ + p[0] = dict(qual=p[1], alignment=[], storage=[], type=[p[2]], function=[]) + + def p_specifier_qualifier_list_5(self, p): + """ specifier_qualifier_list : alignment_specifier + """ + p[0] = dict(qual=[], alignment=[p[1]], storage=[], type=[], function=[]) + + def p_specifier_qualifier_list_6(self, p): + """ specifier_qualifier_list : specifier_qualifier_list alignment_specifier + """ + p[0] = self._add_declaration_specifier(p[1], p[2], 'alignment') + + # TYPEID is allowed here (and in other struct/enum related tag names), because + # struct/enum tags reside in their own namespace and can be named the same as types + # + def p_struct_or_union_specifier_1(self, p): + """ struct_or_union_specifier : struct_or_union ID + | struct_or_union TYPEID + """ + klass = self._select_struct_union_class(p[1]) + # None means no list of members + p[0] = klass( + name=p[2], + decls=None, + coord=self._token_coord(p, 2)) + + def p_struct_or_union_specifier_2(self, p): + """ struct_or_union_specifier : struct_or_union brace_open struct_declaration_list brace_close + | struct_or_union brace_open brace_close + """ + klass = self._select_struct_union_class(p[1]) + if len(p) == 4: + # Empty sequence means an empty list of members + p[0] = klass( + name=None, + decls=[], + coord=self._token_coord(p, 2)) + else: + p[0] = klass( + name=None, + decls=p[3], + coord=self._token_coord(p, 2)) + + + def p_struct_or_union_specifier_3(self, p): + """ struct_or_union_specifier : struct_or_union ID brace_open struct_declaration_list brace_close + | struct_or_union ID brace_open brace_close + | struct_or_union TYPEID brace_open struct_declaration_list brace_close + | struct_or_union TYPEID brace_open brace_close + """ + klass = self._select_struct_union_class(p[1]) + if len(p) == 5: + # Empty sequence means an empty list of members + p[0] = klass( + name=p[2], + decls=[], + coord=self._token_coord(p, 2)) + else: + p[0] = klass( + name=p[2], + decls=p[4], + coord=self._token_coord(p, 2)) + + def p_struct_or_union(self, p): + """ struct_or_union : STRUCT + | UNION + """ + p[0] = p[1] + + # Combine all declarations into a single list + # + def p_struct_declaration_list(self, p): + """ struct_declaration_list : struct_declaration + | struct_declaration_list struct_declaration + """ + if len(p) == 2: + p[0] = p[1] or [] + else: + p[0] = p[1] + (p[2] or []) + + def p_struct_declaration_1(self, p): + """ struct_declaration : specifier_qualifier_list struct_declarator_list_opt SEMI + """ + spec = p[1] + assert 'typedef' not in spec['storage'] + + if p[2] is not None: + decls = self._build_declarations( + spec=spec, + decls=p[2]) + + elif len(spec['type']) == 1: + # Anonymous struct/union, gcc extension, C1x feature. + # Although the standard only allows structs/unions here, I see no + # reason to disallow other types since some compilers have typedefs + # here, and pycparser isn't about rejecting all invalid code. + # + node = spec['type'][0] + if isinstance(node, c_ast.Node): + decl_type = node + else: + decl_type = c_ast.IdentifierType(node) + + decls = self._build_declarations( + spec=spec, + decls=[dict(decl=decl_type)]) + + else: + # Structure/union members can have the same names as typedefs. + # The trouble is that the member's name gets grouped into + # specifier_qualifier_list; _build_declarations compensates. + # + decls = self._build_declarations( + spec=spec, + decls=[dict(decl=None, init=None)]) + + p[0] = decls + + def p_struct_declaration_2(self, p): + """ struct_declaration : SEMI + """ + p[0] = None + + def p_struct_declaration_3(self, p): + """ struct_declaration : pppragma_directive + """ + p[0] = [p[1]] + + def p_struct_declarator_list(self, p): + """ struct_declarator_list : struct_declarator + | struct_declarator_list COMMA struct_declarator + """ + p[0] = p[1] + [p[3]] if len(p) == 4 else [p[1]] + + # struct_declarator passes up a dict with the keys: decl (for + # the underlying declarator) and bitsize (for the bitsize) + # + def p_struct_declarator_1(self, p): + """ struct_declarator : declarator + """ + p[0] = {'decl': p[1], 'bitsize': None} + + def p_struct_declarator_2(self, p): + """ struct_declarator : declarator COLON constant_expression + | COLON constant_expression + """ + if len(p) > 3: + p[0] = {'decl': p[1], 'bitsize': p[3]} + else: + p[0] = {'decl': c_ast.TypeDecl(None, None, None, None), 'bitsize': p[2]} + + def p_enum_specifier_1(self, p): + """ enum_specifier : ENUM ID + | ENUM TYPEID + """ + p[0] = c_ast.Enum(p[2], None, self._token_coord(p, 1)) + + def p_enum_specifier_2(self, p): + """ enum_specifier : ENUM brace_open enumerator_list brace_close + """ + p[0] = c_ast.Enum(None, p[3], self._token_coord(p, 1)) + + def p_enum_specifier_3(self, p): + """ enum_specifier : ENUM ID brace_open enumerator_list brace_close + | ENUM TYPEID brace_open enumerator_list brace_close + """ + p[0] = c_ast.Enum(p[2], p[4], self._token_coord(p, 1)) + + def p_enumerator_list(self, p): + """ enumerator_list : enumerator + | enumerator_list COMMA + | enumerator_list COMMA enumerator + """ + if len(p) == 2: + p[0] = c_ast.EnumeratorList([p[1]], p[1].coord) + elif len(p) == 3: + p[0] = p[1] + else: + p[1].enumerators.append(p[3]) + p[0] = p[1] + + def p_alignment_specifier(self, p): + """ alignment_specifier : _ALIGNAS LPAREN type_name RPAREN + | _ALIGNAS LPAREN constant_expression RPAREN + """ + p[0] = c_ast.Alignas(p[3], self._token_coord(p, 1)) + + def p_enumerator(self, p): + """ enumerator : ID + | ID EQUALS constant_expression + """ + if len(p) == 2: + enumerator = c_ast.Enumerator( + p[1], None, + self._token_coord(p, 1)) + else: + enumerator = c_ast.Enumerator( + p[1], p[3], + self._token_coord(p, 1)) + self._add_identifier(enumerator.name, enumerator.coord) + + p[0] = enumerator + + def p_declarator(self, p): + """ declarator : id_declarator + | typeid_declarator + """ + p[0] = p[1] + + @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID')) + def p_xxx_declarator_1(self, p): + """ xxx_declarator : direct_xxx_declarator + """ + p[0] = p[1] + + @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID')) + def p_xxx_declarator_2(self, p): + """ xxx_declarator : pointer direct_xxx_declarator + """ + p[0] = self._type_modify_decl(p[2], p[1]) + + @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID')) + def p_direct_xxx_declarator_1(self, p): + """ direct_xxx_declarator : yyy + """ + p[0] = c_ast.TypeDecl( + declname=p[1], + type=None, + quals=None, + align=None, + coord=self._token_coord(p, 1)) + + @parameterized(('id', 'ID'), ('typeid', 'TYPEID')) + def p_direct_xxx_declarator_2(self, p): + """ direct_xxx_declarator : LPAREN xxx_declarator RPAREN + """ + p[0] = p[2] + + @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID')) + def p_direct_xxx_declarator_3(self, p): + """ direct_xxx_declarator : direct_xxx_declarator LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET + """ + quals = (p[3] if len(p) > 5 else []) or [] + # Accept dimension qualifiers + # Per C99 6.7.5.3 p7 + arr = c_ast.ArrayDecl( + type=None, + dim=p[4] if len(p) > 5 else p[3], + dim_quals=quals, + coord=p[1].coord) + + p[0] = self._type_modify_decl(decl=p[1], modifier=arr) + + @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID')) + def p_direct_xxx_declarator_4(self, p): + """ direct_xxx_declarator : direct_xxx_declarator LBRACKET STATIC type_qualifier_list_opt assignment_expression RBRACKET + | direct_xxx_declarator LBRACKET type_qualifier_list STATIC assignment_expression RBRACKET + """ + # Using slice notation for PLY objects doesn't work in Python 3 for the + # version of PLY embedded with pycparser; see PLY Google Code issue 30. + # Work around that here by listing the two elements separately. + listed_quals = [item if isinstance(item, list) else [item] + for item in [p[3],p[4]]] + dim_quals = [qual for sublist in listed_quals for qual in sublist + if qual is not None] + arr = c_ast.ArrayDecl( + type=None, + dim=p[5], + dim_quals=dim_quals, + coord=p[1].coord) + + p[0] = self._type_modify_decl(decl=p[1], modifier=arr) + + # Special for VLAs + # + @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID')) + def p_direct_xxx_declarator_5(self, p): + """ direct_xxx_declarator : direct_xxx_declarator LBRACKET type_qualifier_list_opt TIMES RBRACKET + """ + arr = c_ast.ArrayDecl( + type=None, + dim=c_ast.ID(p[4], self._token_coord(p, 4)), + dim_quals=p[3] if p[3] is not None else [], + coord=p[1].coord) + + p[0] = self._type_modify_decl(decl=p[1], modifier=arr) + + @parameterized(('id', 'ID'), ('typeid', 'TYPEID'), ('typeid_noparen', 'TYPEID')) + def p_direct_xxx_declarator_6(self, p): + """ direct_xxx_declarator : direct_xxx_declarator LPAREN parameter_type_list RPAREN + | direct_xxx_declarator LPAREN identifier_list_opt RPAREN + """ + func = c_ast.FuncDecl( + args=p[3], + type=None, + coord=p[1].coord) + + # To see why _get_yacc_lookahead_token is needed, consider: + # typedef char TT; + # void foo(int TT) { TT = 10; } + # Outside the function, TT is a typedef, but inside (starting and + # ending with the braces) it's a parameter. The trouble begins with + # yacc's lookahead token. We don't know if we're declaring or + # defining a function until we see LBRACE, but if we wait for yacc to + # trigger a rule on that token, then TT will have already been read + # and incorrectly interpreted as TYPEID. We need to add the + # parameters to the scope the moment the lexer sees LBRACE. + # + if self._get_yacc_lookahead_token().type == "LBRACE": + if func.args is not None: + for param in func.args.params: + if isinstance(param, c_ast.EllipsisParam): break + self._add_identifier(param.name, param.coord) + + p[0] = self._type_modify_decl(decl=p[1], modifier=func) + + def p_pointer(self, p): + """ pointer : TIMES type_qualifier_list_opt + | TIMES type_qualifier_list_opt pointer + """ + coord = self._token_coord(p, 1) + # Pointer decls nest from inside out. This is important when different + # levels have different qualifiers. For example: + # + # char * const * p; + # + # Means "pointer to const pointer to char" + # + # While: + # + # char ** const p; + # + # Means "const pointer to pointer to char" + # + # So when we construct PtrDecl nestings, the leftmost pointer goes in + # as the most nested type. + nested_type = c_ast.PtrDecl(quals=p[2] or [], type=None, coord=coord) + if len(p) > 3: + tail_type = p[3] + while tail_type.type is not None: + tail_type = tail_type.type + tail_type.type = nested_type + p[0] = p[3] + else: + p[0] = nested_type + + def p_type_qualifier_list(self, p): + """ type_qualifier_list : type_qualifier + | type_qualifier_list type_qualifier + """ + p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]] + + def p_parameter_type_list(self, p): + """ parameter_type_list : parameter_list + | parameter_list COMMA ELLIPSIS + """ + if len(p) > 2: + p[1].params.append(c_ast.EllipsisParam(self._token_coord(p, 3))) + + p[0] = p[1] + + def p_parameter_list(self, p): + """ parameter_list : parameter_declaration + | parameter_list COMMA parameter_declaration + """ + if len(p) == 2: # single parameter + p[0] = c_ast.ParamList([p[1]], p[1].coord) + else: + p[1].params.append(p[3]) + p[0] = p[1] + + # From ISO/IEC 9899:TC2, 6.7.5.3.11: + # "If, in a parameter declaration, an identifier can be treated either + # as a typedef name or as a parameter name, it shall be taken as a + # typedef name." + # + # Inside a parameter declaration, once we've reduced declaration specifiers, + # if we shift in an LPAREN and see a TYPEID, it could be either an abstract + # declarator or a declarator nested inside parens. This rule tells us to + # always treat it as an abstract declarator. Therefore, we only accept + # `id_declarator`s and `typeid_noparen_declarator`s. + def p_parameter_declaration_1(self, p): + """ parameter_declaration : declaration_specifiers id_declarator + | declaration_specifiers typeid_noparen_declarator + """ + spec = p[1] + if not spec['type']: + spec['type'] = [c_ast.IdentifierType(['int'], + coord=self._token_coord(p, 1))] + p[0] = self._build_declarations( + spec=spec, + decls=[dict(decl=p[2])])[0] + + def p_parameter_declaration_2(self, p): + """ parameter_declaration : declaration_specifiers abstract_declarator_opt + """ + spec = p[1] + if not spec['type']: + spec['type'] = [c_ast.IdentifierType(['int'], + coord=self._token_coord(p, 1))] + + # Parameters can have the same names as typedefs. The trouble is that + # the parameter's name gets grouped into declaration_specifiers, making + # it look like an old-style declaration; compensate. + # + if len(spec['type']) > 1 and len(spec['type'][-1].names) == 1 and \ + self._is_type_in_scope(spec['type'][-1].names[0]): + decl = self._build_declarations( + spec=spec, + decls=[dict(decl=p[2], init=None)])[0] + + # This truly is an old-style parameter declaration + # + else: + decl = c_ast.Typename( + name='', + quals=spec['qual'], + align=None, + type=p[2] or c_ast.TypeDecl(None, None, None, None), + coord=self._token_coord(p, 2)) + typename = spec['type'] + decl = self._fix_decl_name_type(decl, typename) + + p[0] = decl + + def p_identifier_list(self, p): + """ identifier_list : identifier + | identifier_list COMMA identifier + """ + if len(p) == 2: # single parameter + p[0] = c_ast.ParamList([p[1]], p[1].coord) + else: + p[1].params.append(p[3]) + p[0] = p[1] + + def p_initializer_1(self, p): + """ initializer : assignment_expression + """ + p[0] = p[1] + + def p_initializer_2(self, p): + """ initializer : brace_open initializer_list_opt brace_close + | brace_open initializer_list COMMA brace_close + """ + if p[2] is None: + p[0] = c_ast.InitList([], self._token_coord(p, 1)) + else: + p[0] = p[2] + + def p_initializer_list(self, p): + """ initializer_list : designation_opt initializer + | initializer_list COMMA designation_opt initializer + """ + if len(p) == 3: # single initializer + init = p[2] if p[1] is None else c_ast.NamedInitializer(p[1], p[2]) + p[0] = c_ast.InitList([init], p[2].coord) + else: + init = p[4] if p[3] is None else c_ast.NamedInitializer(p[3], p[4]) + p[1].exprs.append(init) + p[0] = p[1] + + def p_designation(self, p): + """ designation : designator_list EQUALS + """ + p[0] = p[1] + + # Designators are represented as a list of nodes, in the order in which + # they're written in the code. + # + def p_designator_list(self, p): + """ designator_list : designator + | designator_list designator + """ + p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]] + + def p_designator(self, p): + """ designator : LBRACKET constant_expression RBRACKET + | PERIOD identifier + """ + p[0] = p[2] + + def p_type_name(self, p): + """ type_name : specifier_qualifier_list abstract_declarator_opt + """ + typename = c_ast.Typename( + name='', + quals=p[1]['qual'][:], + align=None, + type=p[2] or c_ast.TypeDecl(None, None, None, None), + coord=self._token_coord(p, 2)) + + p[0] = self._fix_decl_name_type(typename, p[1]['type']) + + def p_abstract_declarator_1(self, p): + """ abstract_declarator : pointer + """ + dummytype = c_ast.TypeDecl(None, None, None, None) + p[0] = self._type_modify_decl( + decl=dummytype, + modifier=p[1]) + + def p_abstract_declarator_2(self, p): + """ abstract_declarator : pointer direct_abstract_declarator + """ + p[0] = self._type_modify_decl(p[2], p[1]) + + def p_abstract_declarator_3(self, p): + """ abstract_declarator : direct_abstract_declarator + """ + p[0] = p[1] + + # Creating and using direct_abstract_declarator_opt here + # instead of listing both direct_abstract_declarator and the + # lack of it in the beginning of _1 and _2 caused two + # shift/reduce errors. + # + def p_direct_abstract_declarator_1(self, p): + """ direct_abstract_declarator : LPAREN abstract_declarator RPAREN """ + p[0] = p[2] + + def p_direct_abstract_declarator_2(self, p): + """ direct_abstract_declarator : direct_abstract_declarator LBRACKET assignment_expression_opt RBRACKET + """ + arr = c_ast.ArrayDecl( + type=None, + dim=p[3], + dim_quals=[], + coord=p[1].coord) + + p[0] = self._type_modify_decl(decl=p[1], modifier=arr) + + def p_direct_abstract_declarator_3(self, p): + """ direct_abstract_declarator : LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET + """ + quals = (p[2] if len(p) > 4 else []) or [] + p[0] = c_ast.ArrayDecl( + type=c_ast.TypeDecl(None, None, None, None), + dim=p[3] if len(p) > 4 else p[2], + dim_quals=quals, + coord=self._token_coord(p, 1)) + + def p_direct_abstract_declarator_4(self, p): + """ direct_abstract_declarator : direct_abstract_declarator LBRACKET TIMES RBRACKET + """ + arr = c_ast.ArrayDecl( + type=None, + dim=c_ast.ID(p[3], self._token_coord(p, 3)), + dim_quals=[], + coord=p[1].coord) + + p[0] = self._type_modify_decl(decl=p[1], modifier=arr) + + def p_direct_abstract_declarator_5(self, p): + """ direct_abstract_declarator : LBRACKET TIMES RBRACKET + """ + p[0] = c_ast.ArrayDecl( + type=c_ast.TypeDecl(None, None, None, None), + dim=c_ast.ID(p[3], self._token_coord(p, 3)), + dim_quals=[], + coord=self._token_coord(p, 1)) + + def p_direct_abstract_declarator_6(self, p): + """ direct_abstract_declarator : direct_abstract_declarator LPAREN parameter_type_list_opt RPAREN + """ + func = c_ast.FuncDecl( + args=p[3], + type=None, + coord=p[1].coord) + + p[0] = self._type_modify_decl(decl=p[1], modifier=func) + + def p_direct_abstract_declarator_7(self, p): + """ direct_abstract_declarator : LPAREN parameter_type_list_opt RPAREN + """ + p[0] = c_ast.FuncDecl( + args=p[2], + type=c_ast.TypeDecl(None, None, None, None), + coord=self._token_coord(p, 1)) + + def p_direct_abstract_declarator_8(self, p): + """ direct_abstract_declarator : LBRACKET STATIC type_qualifier_list_opt assignment_expression RBRACKET + | LBRACKET type_qualifier_list STATIC assignment_expression RBRACKET + """ + listed_quals = [item if isinstance(item, list) else [item] + for item in [p[2],p[3]]] + quals = [qual for sublist in listed_quals for qual in sublist + if qual is not None] + p[0] = c_ast.ArrayDecl( + type=c_ast.TypeDecl(None, None, None, None), + dim=p[4], + dim_quals=quals, + coord=self._token_coord(p, 1)) + + # declaration is a list, statement isn't. To make it consistent, block_item + # will always be a list + # + def p_block_item(self, p): + """ block_item : declaration + | statement + """ + p[0] = p[1] if isinstance(p[1], list) else [p[1]] + + # Since we made block_item a list, this just combines lists + # + def p_block_item_list(self, p): + """ block_item_list : block_item + | block_item_list block_item + """ + # Empty block items (plain ';') produce [None], so ignore them + p[0] = p[1] if (len(p) == 2 or p[2] == [None]) else p[1] + p[2] + + def p_compound_statement_1(self, p): + """ compound_statement : brace_open block_item_list_opt brace_close """ + p[0] = c_ast.Compound( + block_items=p[2], + coord=self._token_coord(p, 1)) + + def p_labeled_statement_1(self, p): + """ labeled_statement : ID COLON pragmacomp_or_statement """ + p[0] = c_ast.Label(p[1], p[3], self._token_coord(p, 1)) + + def p_labeled_statement_2(self, p): + """ labeled_statement : CASE constant_expression COLON pragmacomp_or_statement """ + p[0] = c_ast.Case(p[2], [p[4]], self._token_coord(p, 1)) + + def p_labeled_statement_3(self, p): + """ labeled_statement : DEFAULT COLON pragmacomp_or_statement """ + p[0] = c_ast.Default([p[3]], self._token_coord(p, 1)) + + def p_labeled_statement_4(self, p): + """ labeled_statement : ID COLON """ + p[0] = c_ast.Label(p[1], c_ast.EmptyStatement(self._token_coord(p, 1)), self._token_coord(p, 1)) + + def p_labeled_statement_5(self, p): + """ labeled_statement : CASE constant_expression COLON """ + p[0] = c_ast.Case(p[2], [c_ast.EmptyStatement(self._token_coord(p, 2))], self._token_coord(p, 1)) + + def p_labeled_statement_6(self, p): + """ labeled_statement : DEFAULT COLON """ + p[0] = c_ast.Default([c_ast.EmptyStatement(self._token_coord(p, 1))], self._token_coord(p, 1)) + + def p_selection_statement_1(self, p): + """ selection_statement : IF LPAREN expression RPAREN pragmacomp_or_statement """ + p[0] = c_ast.If(p[3], p[5], None, self._token_coord(p, 1)) + + def p_selection_statement_2(self, p): + """ selection_statement : IF LPAREN expression RPAREN statement ELSE pragmacomp_or_statement """ + p[0] = c_ast.If(p[3], p[5], p[7], self._token_coord(p, 1)) + + def p_selection_statement_3(self, p): + """ selection_statement : SWITCH LPAREN expression RPAREN pragmacomp_or_statement """ + p[0] = fix_switch_cases( + c_ast.Switch(p[3], p[5], self._token_coord(p, 1))) + + def p_iteration_statement_1(self, p): + """ iteration_statement : WHILE LPAREN expression RPAREN pragmacomp_or_statement """ + p[0] = c_ast.While(p[3], p[5], self._token_coord(p, 1)) + + def p_iteration_statement_2(self, p): + """ iteration_statement : DO pragmacomp_or_statement WHILE LPAREN expression RPAREN SEMI """ + p[0] = c_ast.DoWhile(p[5], p[2], self._token_coord(p, 1)) + + def p_iteration_statement_3(self, p): + """ iteration_statement : FOR LPAREN expression_opt SEMI expression_opt SEMI expression_opt RPAREN pragmacomp_or_statement """ + p[0] = c_ast.For(p[3], p[5], p[7], p[9], self._token_coord(p, 1)) + + def p_iteration_statement_4(self, p): + """ iteration_statement : FOR LPAREN declaration expression_opt SEMI expression_opt RPAREN pragmacomp_or_statement """ + p[0] = c_ast.For(c_ast.DeclList(p[3], self._token_coord(p, 1)), + p[4], p[6], p[8], self._token_coord(p, 1)) + + def p_jump_statement_1(self, p): + """ jump_statement : GOTO ID SEMI """ + p[0] = c_ast.Goto(p[2], self._token_coord(p, 1)) + + def p_jump_statement_2(self, p): + """ jump_statement : BREAK SEMI """ + p[0] = c_ast.Break(self._token_coord(p, 1)) + + def p_jump_statement_3(self, p): + """ jump_statement : CONTINUE SEMI """ + p[0] = c_ast.Continue(self._token_coord(p, 1)) + + def p_jump_statement_4(self, p): + """ jump_statement : RETURN expression SEMI + | RETURN SEMI + """ + p[0] = c_ast.Return(p[2] if len(p) == 4 else None, self._token_coord(p, 1)) + + def p_expression_statement(self, p): + """ expression_statement : expression_opt SEMI """ + if p[1] is None: + p[0] = c_ast.EmptyStatement(self._token_coord(p, 2)) + else: + p[0] = p[1] + + def p_expression(self, p): + """ expression : assignment_expression + | expression COMMA assignment_expression + """ + if len(p) == 2: + p[0] = p[1] + else: + if not isinstance(p[1], c_ast.ExprList): + p[1] = c_ast.ExprList([p[1]], p[1].coord) + + p[1].exprs.append(p[3]) + p[0] = p[1] + + def p_parenthesized_compound_expression(self, p): + """ assignment_expression : LPAREN compound_statement RPAREN """ + p[0] = p[2] + + def p_typedef_name(self, p): + """ typedef_name : TYPEID """ + p[0] = c_ast.IdentifierType([p[1]], coord=self._token_coord(p, 1)) + + def p_assignment_expression(self, p): + """ assignment_expression : conditional_expression + | unary_expression assignment_operator assignment_expression + """ + if len(p) == 2: + p[0] = p[1] + else: + p[0] = c_ast.Assignment(p[2], p[1], p[3], p[1].coord) + + # K&R2 defines these as many separate rules, to encode + # precedence and associativity. Why work hard ? I'll just use + # the built in precedence/associativity specification feature + # of PLY. (see precedence declaration above) + # + def p_assignment_operator(self, p): + """ assignment_operator : EQUALS + | XOREQUAL + | TIMESEQUAL + | DIVEQUAL + | MODEQUAL + | PLUSEQUAL + | MINUSEQUAL + | LSHIFTEQUAL + | RSHIFTEQUAL + | ANDEQUAL + | OREQUAL + """ + p[0] = p[1] + + def p_constant_expression(self, p): + """ constant_expression : conditional_expression """ + p[0] = p[1] + + def p_conditional_expression(self, p): + """ conditional_expression : binary_expression + | binary_expression CONDOP expression COLON conditional_expression + """ + if len(p) == 2: + p[0] = p[1] + else: + p[0] = c_ast.TernaryOp(p[1], p[3], p[5], p[1].coord) + + def p_binary_expression(self, p): + """ binary_expression : cast_expression + | binary_expression TIMES binary_expression + | binary_expression DIVIDE binary_expression + | binary_expression MOD binary_expression + | binary_expression PLUS binary_expression + | binary_expression MINUS binary_expression + | binary_expression RSHIFT binary_expression + | binary_expression LSHIFT binary_expression + | binary_expression LT binary_expression + | binary_expression LE binary_expression + | binary_expression GE binary_expression + | binary_expression GT binary_expression + | binary_expression EQ binary_expression + | binary_expression NE binary_expression + | binary_expression AND binary_expression + | binary_expression OR binary_expression + | binary_expression XOR binary_expression + | binary_expression LAND binary_expression + | binary_expression LOR binary_expression + """ + if len(p) == 2: + p[0] = p[1] + else: + p[0] = c_ast.BinaryOp(p[2], p[1], p[3], p[1].coord) + + def p_cast_expression_1(self, p): + """ cast_expression : unary_expression """ + p[0] = p[1] + + def p_cast_expression_2(self, p): + """ cast_expression : LPAREN type_name RPAREN cast_expression """ + p[0] = c_ast.Cast(p[2], p[4], self._token_coord(p, 1)) + + def p_unary_expression_1(self, p): + """ unary_expression : postfix_expression """ + p[0] = p[1] + + def p_unary_expression_2(self, p): + """ unary_expression : PLUSPLUS unary_expression + | MINUSMINUS unary_expression + | unary_operator cast_expression + """ + p[0] = c_ast.UnaryOp(p[1], p[2], p[2].coord) + + def p_unary_expression_3(self, p): + """ unary_expression : SIZEOF unary_expression + | SIZEOF LPAREN type_name RPAREN + | _ALIGNOF LPAREN type_name RPAREN + """ + p[0] = c_ast.UnaryOp( + p[1], + p[2] if len(p) == 3 else p[3], + self._token_coord(p, 1)) + + def p_unary_operator(self, p): + """ unary_operator : AND + | TIMES + | PLUS + | MINUS + | NOT + | LNOT + """ + p[0] = p[1] + + def p_postfix_expression_1(self, p): + """ postfix_expression : primary_expression """ + p[0] = p[1] + + def p_postfix_expression_2(self, p): + """ postfix_expression : postfix_expression LBRACKET expression RBRACKET """ + p[0] = c_ast.ArrayRef(p[1], p[3], p[1].coord) + + def p_postfix_expression_3(self, p): + """ postfix_expression : postfix_expression LPAREN argument_expression_list RPAREN + | postfix_expression LPAREN RPAREN + """ + p[0] = c_ast.FuncCall(p[1], p[3] if len(p) == 5 else None, p[1].coord) + + def p_postfix_expression_4(self, p): + """ postfix_expression : postfix_expression PERIOD ID + | postfix_expression PERIOD TYPEID + | postfix_expression ARROW ID + | postfix_expression ARROW TYPEID + """ + field = c_ast.ID(p[3], self._token_coord(p, 3)) + p[0] = c_ast.StructRef(p[1], p[2], field, p[1].coord) + + def p_postfix_expression_5(self, p): + """ postfix_expression : postfix_expression PLUSPLUS + | postfix_expression MINUSMINUS + """ + p[0] = c_ast.UnaryOp('p' + p[2], p[1], p[1].coord) + + def p_postfix_expression_6(self, p): + """ postfix_expression : LPAREN type_name RPAREN brace_open initializer_list brace_close + | LPAREN type_name RPAREN brace_open initializer_list COMMA brace_close + """ + p[0] = c_ast.CompoundLiteral(p[2], p[5]) + + def p_primary_expression_1(self, p): + """ primary_expression : identifier """ + p[0] = p[1] + + def p_primary_expression_2(self, p): + """ primary_expression : constant """ + p[0] = p[1] + + def p_primary_expression_3(self, p): + """ primary_expression : unified_string_literal + | unified_wstring_literal + """ + p[0] = p[1] + + def p_primary_expression_4(self, p): + """ primary_expression : LPAREN expression RPAREN """ + p[0] = p[2] + + def p_primary_expression_5(self, p): + """ primary_expression : OFFSETOF LPAREN type_name COMMA offsetof_member_designator RPAREN + """ + coord = self._token_coord(p, 1) + p[0] = c_ast.FuncCall(c_ast.ID(p[1], coord), + c_ast.ExprList([p[3], p[5]], coord), + coord) + + def p_offsetof_member_designator(self, p): + """ offsetof_member_designator : identifier + | offsetof_member_designator PERIOD identifier + | offsetof_member_designator LBRACKET expression RBRACKET + """ + if len(p) == 2: + p[0] = p[1] + elif len(p) == 4: + p[0] = c_ast.StructRef(p[1], p[2], p[3], p[1].coord) + elif len(p) == 5: + p[0] = c_ast.ArrayRef(p[1], p[3], p[1].coord) + else: + raise NotImplementedError("Unexpected parsing state. len(p): %u" % len(p)) + + def p_argument_expression_list(self, p): + """ argument_expression_list : assignment_expression + | argument_expression_list COMMA assignment_expression + """ + if len(p) == 2: # single expr + p[0] = c_ast.ExprList([p[1]], p[1].coord) + else: + p[1].exprs.append(p[3]) + p[0] = p[1] + + def p_identifier(self, p): + """ identifier : ID """ + p[0] = c_ast.ID(p[1], self._token_coord(p, 1)) + + def p_constant_1(self, p): + """ constant : INT_CONST_DEC + | INT_CONST_OCT + | INT_CONST_HEX + | INT_CONST_BIN + | INT_CONST_CHAR + """ + uCount = 0 + lCount = 0 + for x in p[1][-3:]: + if x in ('l', 'L'): + lCount += 1 + elif x in ('u', 'U'): + uCount += 1 + t = '' + if uCount > 1: + raise ValueError('Constant cannot have more than one u/U suffix.') + elif lCount > 2: + raise ValueError('Constant cannot have more than two l/L suffix.') + prefix = 'unsigned ' * uCount + 'long ' * lCount + p[0] = c_ast.Constant( + prefix + 'int', p[1], self._token_coord(p, 1)) + + def p_constant_2(self, p): + """ constant : FLOAT_CONST + | HEX_FLOAT_CONST + """ + if p[1][-1] in ('f', 'F'): + t = 'float' + elif p[1][-1] in ('l', 'L'): + t = 'long double' + else: + t = 'double' + + p[0] = c_ast.Constant( + t, p[1], self._token_coord(p, 1)) + + def p_constant_3(self, p): + """ constant : CHAR_CONST + | WCHAR_CONST + | U8CHAR_CONST + | U16CHAR_CONST + | U32CHAR_CONST + """ + p[0] = c_ast.Constant( + 'char', p[1], self._token_coord(p, 1)) + + # The "unified" string and wstring literal rules are for supporting + # concatenation of adjacent string literals. + # I.e. "hello " "world" is seen by the C compiler as a single string literal + # with the value "hello world" + # + def p_unified_string_literal(self, p): + """ unified_string_literal : STRING_LITERAL + | unified_string_literal STRING_LITERAL + """ + if len(p) == 2: # single literal + p[0] = c_ast.Constant( + 'string', p[1], self._token_coord(p, 1)) + else: + p[1].value = p[1].value[:-1] + p[2][1:] + p[0] = p[1] + + def p_unified_wstring_literal(self, p): + """ unified_wstring_literal : WSTRING_LITERAL + | U8STRING_LITERAL + | U16STRING_LITERAL + | U32STRING_LITERAL + | unified_wstring_literal WSTRING_LITERAL + | unified_wstring_literal U8STRING_LITERAL + | unified_wstring_literal U16STRING_LITERAL + | unified_wstring_literal U32STRING_LITERAL + """ + if len(p) == 2: # single literal + p[0] = c_ast.Constant( + 'string', p[1], self._token_coord(p, 1)) + else: + p[1].value = p[1].value.rstrip()[:-1] + p[2][2:] + p[0] = p[1] + + def p_brace_open(self, p): + """ brace_open : LBRACE + """ + p[0] = p[1] + p.set_lineno(0, p.lineno(1)) + + def p_brace_close(self, p): + """ brace_close : RBRACE + """ + p[0] = p[1] + p.set_lineno(0, p.lineno(1)) + + def p_empty(self, p): + 'empty : ' + p[0] = None + + def p_error(self, p): + # If error recovery is added here in the future, make sure + # _get_yacc_lookahead_token still works! + # + if p: + self._parse_error( + 'before: %s' % p.value, + self._coord(lineno=p.lineno, + column=self.clex.find_tok_column(p))) + else: + self._parse_error('At end of input', self.clex.filename) diff --git a/python/pycparser/lextab.py b/python/pycparser/lextab.py new file mode 100644 index 000000000..bfcaa211c --- /dev/null +++ b/python/pycparser/lextab.py @@ -0,0 +1,10 @@ +# lextab.py. This file automatically created by PLY (version 3.10). Don't edit! +_tabversion = '3.10' +_lextokens = set(('AND', 'ANDEQUAL', 'ARROW', 'AUTO', 'BREAK', 'CASE', 'CHAR', 'CHAR_CONST', 'COLON', 'COMMA', 'CONDOP', 'CONST', 'CONTINUE', 'DEFAULT', 'DIVEQUAL', 'DIVIDE', 'DO', 'DOUBLE', 'ELLIPSIS', 'ELSE', 'ENUM', 'EQ', 'EQUALS', 'EXTERN', 'FLOAT', 'FLOAT_CONST', 'FOR', 'GE', 'GOTO', 'GT', 'HEX_FLOAT_CONST', 'ID', 'IF', 'INLINE', 'INT', 'INT_CONST_BIN', 'INT_CONST_CHAR', 'INT_CONST_DEC', 'INT_CONST_HEX', 'INT_CONST_OCT', 'LAND', 'LBRACE', 'LBRACKET', 'LE', 'LNOT', 'LONG', 'LOR', 'LPAREN', 'LSHIFT', 'LSHIFTEQUAL', 'LT', 'MINUS', 'MINUSEQUAL', 'MINUSMINUS', 'MOD', 'MODEQUAL', 'NE', 'NOT', 'OFFSETOF', 'OR', 'OREQUAL', 'PERIOD', 'PLUS', 'PLUSEQUAL', 'PLUSPLUS', 'PPHASH', 'PPPRAGMA', 'PPPRAGMASTR', 'RBRACE', 'RBRACKET', 'REGISTER', 'RESTRICT', 'RETURN', 'RPAREN', 'RSHIFT', 'RSHIFTEQUAL', 'SEMI', 'SHORT', 'SIGNED', 'SIZEOF', 'STATIC', 'STRING_LITERAL', 'STRUCT', 'SWITCH', 'TIMES', 'TIMESEQUAL', 'TYPEDEF', 'TYPEID', 'U16CHAR_CONST', 'U16STRING_LITERAL', 'U32CHAR_CONST', 'U32STRING_LITERAL', 'U8CHAR_CONST', 'U8STRING_LITERAL', 'UNION', 'UNSIGNED', 'VOID', 'VOLATILE', 'WCHAR_CONST', 'WHILE', 'WSTRING_LITERAL', 'XOR', 'XOREQUAL', '_ALIGNAS', '_ALIGNOF', '_ATOMIC', '_BOOL', '_COMPLEX', '_NORETURN', '_PRAGMA', '_STATIC_ASSERT', '_THREAD_LOCAL', '__INT128')) +_lexreflags = 64 +_lexliterals = '' +_lexstateinfo = {'INITIAL': 'inclusive', 'ppline': 'exclusive', 'pppragma': 'exclusive'} +_lexstatere = {'INITIAL': [('(?P[ \\t]*\\#)|(?P\\n+)|(?P\\{)|(?P\\})|(?P((((([0-9]*\\.[0-9]+)|([0-9]+\\.))([eE][-+]?[0-9]+)?)|([0-9]+([eE][-+]?[0-9]+)))[FfLl]?))|(?P(0[xX]([0-9a-fA-F]+|((([0-9a-fA-F]+)?\\.[0-9a-fA-F]+)|([0-9a-fA-F]+\\.)))([pP][+-]?[0-9]+)[FfLl]?))|(?P0[xX][0-9a-fA-F]+(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?)|(?P0[bB][01]+(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?)|(?P0[0-7]*[89])|(?P\\/\\*)|(?P\\/\\/)|(?P0[0-7]*(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?)|(?P(0(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?)|([1-9][0-9]*(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?))|(?P\'([^\'\\\\\\n]|(\\\\(([a-wyzA-Z._~!=&\\^\\-\\\\?\'"]|x(?![0-9a-fA-F]))|(\\d+)(?!\\d)|(x[0-9a-fA-F]+)(?![0-9a-fA-F])))){2,4}\')|(?P\'([^\'\\\\\\n]|(\\\\(([a-wyzA-Z._~!=&\\^\\-\\\\?\'"]|x(?![0-9a-fA-F]))|(\\d+)(?!\\d)|(x[0-9a-fA-F]+)(?![0-9a-fA-F]))))\')|(?PL\'([^\'\\\\\\n]|(\\\\(([a-wyzA-Z._~!=&\\^\\-\\\\?\'"]|x(?![0-9a-fA-F]))|(\\d+)(?!\\d)|(x[0-9a-fA-F]+)(?![0-9a-fA-F]))))\')|(?Pu8\'([^\'\\\\\\n]|(\\\\(([a-wyzA-Z._~!=&\\^\\-\\\\?\'"]|x(?![0-9a-fA-F]))|(\\d+)(?!\\d)|(x[0-9a-fA-F]+)(?![0-9a-fA-F]))))\')|(?Pu\'([^\'\\\\\\n]|(\\\\(([a-wyzA-Z._~!=&\\^\\-\\\\?\'"]|x(?![0-9a-fA-F]))|(\\d+)(?!\\d)|(x[0-9a-fA-F]+)(?![0-9a-fA-F]))))\')|(?PU\'([^\'\\\\\\n]|(\\\\(([a-wyzA-Z._~!=&\\^\\-\\\\?\'"]|x(?![0-9a-fA-F]))|(\\d+)(?!\\d)|(x[0-9a-fA-F]+)(?![0-9a-fA-F]))))\')|(?P(\'([^\'\\\\\\n]|(\\\\(([a-wyzA-Z._~!=&\\^\\-\\\\?\'"]|x(?![0-9a-fA-F]))|(\\d+)(?!\\d)|(x[0-9a-fA-F]+)(?![0-9a-fA-F]))))*\\n)|(\'([^\'\\\\\\n]|(\\\\(([a-wyzA-Z._~!=&\\^\\-\\\\?\'"]|x(?![0-9a-fA-F]))|(\\d+)(?!\\d)|(x[0-9a-fA-F]+)(?![0-9a-fA-F]))))*$))|(?P(\'([^\'\\\\\\n]|(\\\\(([a-wyzA-Z._~!=&\\^\\-\\\\?\'"]|x(?![0-9a-fA-F]))|(\\d+)(?!\\d)|(x[0-9a-fA-F]+)(?![0-9a-fA-F]))))[^\'\n]+\')|(\'\')|(\'([\\\\][^a-zA-Z._~^!=&\\^\\-\\\\?\'"x0-9])[^\'\\n]*\'))|(?PL"([^"\\\\\\n]|(\\\\[0-9a-zA-Z._~!=&\\^\\-\\\\?\'"]))*")|(?Pu8"([^"\\\\\\n]|(\\\\[0-9a-zA-Z._~!=&\\^\\-\\\\?\'"]))*")|(?Pu"([^"\\\\\\n]|(\\\\[0-9a-zA-Z._~!=&\\^\\-\\\\?\'"]))*")|(?PU"([^"\\\\\\n]|(\\\\[0-9a-zA-Z._~!=&\\^\\-\\\\?\'"]))*")|(?P"([^"\\\\\\n]|(\\\\[0-9a-zA-Z._~!=&\\^\\-\\\\?\'"]))*([\\\\][^a-zA-Z._~^!=&\\^\\-\\\\?\'"x0-9])([^"\\\\\\n]|(\\\\[0-9a-zA-Z._~!=&\\^\\-\\\\?\'"]))*")|(?P[a-zA-Z_$][0-9a-zA-Z_$]*)|(?P"([^"\\\\\\n]|(\\\\[0-9a-zA-Z._~!=&\\^\\-\\\\?\'"]))*")|(?P\\.\\.\\.)|(?P\\|\\|)|(?P\\+\\+)|(?P<<=)|(?P\\|=)|(?P\\+=)|(?P>>=)|(?P\\*=)|(?P\\^=)|(?P&=)|(?P->)|(?P\\?)|(?P/=)|(?P==)|(?P>=)|(?P&&)|(?P\\[)|(?P<=)|(?P\\()|(?P<<)|(?P-=)|(?P--)|(?P%=)|(?P!=)|(?P\\|)|(?P\\.)|(?P\\+)|(?P\\])|(?P\\))|(?P>>)|(?P\\*)|(?P\\^)|(?P&)|(?P:)|(?P,)|(?P/)|(?P=)|(?P>)|(?P!)|(?P<)|(?P-)|(?P%)|(?P~)|(?P;)', [None, ('t_PPHASH', 'PPHASH'), ('t_NEWLINE', 'NEWLINE'), ('t_LBRACE', 'LBRACE'), ('t_RBRACE', 'RBRACE'), ('t_FLOAT_CONST', 'FLOAT_CONST'), None, None, None, None, None, None, None, None, None, ('t_HEX_FLOAT_CONST', 'HEX_FLOAT_CONST'), None, None, None, None, None, None, None, ('t_INT_CONST_HEX', 'INT_CONST_HEX'), None, None, None, None, None, None, None, ('t_INT_CONST_BIN', 'INT_CONST_BIN'), None, None, None, None, None, None, None, ('t_BAD_CONST_OCT', 'BAD_CONST_OCT'), ('t_UNSUPPORTED_C_STYLE_COMMENT', 'UNSUPPORTED_C_STYLE_COMMENT'), ('t_UNSUPPORTED_CXX_STYLE_COMMENT', 'UNSUPPORTED_CXX_STYLE_COMMENT'), ('t_INT_CONST_OCT', 'INT_CONST_OCT'), None, None, None, None, None, None, None, ('t_INT_CONST_DEC', 'INT_CONST_DEC'), None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, ('t_INT_CONST_CHAR', 'INT_CONST_CHAR'), None, None, None, None, None, None, ('t_CHAR_CONST', 'CHAR_CONST'), None, None, None, None, None, None, ('t_WCHAR_CONST', 'WCHAR_CONST'), None, None, None, None, None, None, ('t_U8CHAR_CONST', 'U8CHAR_CONST'), None, None, None, None, None, None, ('t_U16CHAR_CONST', 'U16CHAR_CONST'), None, None, None, None, None, None, ('t_U32CHAR_CONST', 'U32CHAR_CONST'), None, None, None, None, None, None, ('t_UNMATCHED_QUOTE', 'UNMATCHED_QUOTE'), None, None, None, None, None, None, None, None, None, None, None, None, None, None, ('t_BAD_CHAR_CONST', 'BAD_CHAR_CONST'), None, None, None, None, None, None, None, None, None, None, ('t_WSTRING_LITERAL', 'WSTRING_LITERAL'), None, None, ('t_U8STRING_LITERAL', 'U8STRING_LITERAL'), None, None, ('t_U16STRING_LITERAL', 'U16STRING_LITERAL'), None, None, ('t_U32STRING_LITERAL', 'U32STRING_LITERAL'), None, None, ('t_BAD_STRING_LITERAL', 'BAD_STRING_LITERAL'), None, None, None, None, None, ('t_ID', 'ID'), (None, 'STRING_LITERAL'), None, None, (None, 'ELLIPSIS'), (None, 'LOR'), (None, 'PLUSPLUS'), (None, 'LSHIFTEQUAL'), (None, 'OREQUAL'), (None, 'PLUSEQUAL'), (None, 'RSHIFTEQUAL'), (None, 'TIMESEQUAL'), (None, 'XOREQUAL'), (None, 'ANDEQUAL'), (None, 'ARROW'), (None, 'CONDOP'), (None, 'DIVEQUAL'), (None, 'EQ'), (None, 'GE'), (None, 'LAND'), (None, 'LBRACKET'), (None, 'LE'), (None, 'LPAREN'), (None, 'LSHIFT'), (None, 'MINUSEQUAL'), (None, 'MINUSMINUS'), (None, 'MODEQUAL'), (None, 'NE'), (None, 'OR'), (None, 'PERIOD'), (None, 'PLUS'), (None, 'RBRACKET'), (None, 'RPAREN'), (None, 'RSHIFT'), (None, 'TIMES'), (None, 'XOR'), (None, 'AND'), (None, 'COLON'), (None, 'COMMA'), (None, 'DIVIDE'), (None, 'EQUALS'), (None, 'GT'), (None, 'LNOT'), (None, 'LT'), (None, 'MINUS'), (None, 'MOD'), (None, 'NOT'), (None, 'SEMI')])], 'ppline': [('(?P"([^"\\\\\\n]|(\\\\[0-9a-zA-Z._~!=&\\^\\-\\\\?\'"]))*")|(?P(0(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?)|([1-9][0-9]*(([uU]ll)|([uU]LL)|(ll[uU]?)|(LL[uU]?)|([uU][lL])|([lL][uU]?)|[uU])?))|(?P\\n)|(?Pline)', [None, ('t_ppline_FILENAME', 'FILENAME'), None, None, ('t_ppline_LINE_NUMBER', 'LINE_NUMBER'), None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, ('t_ppline_NEWLINE', 'NEWLINE'), ('t_ppline_PPLINE', 'PPLINE')])], 'pppragma': [('(?P\\n)|(?Ppragma)|(?P.+)', [None, ('t_pppragma_NEWLINE', 'NEWLINE'), ('t_pppragma_PPPRAGMA', 'PPPRAGMA'), ('t_pppragma_STR', 'STR')])]} +_lexstateignore = {'INITIAL': ' \t', 'ppline': ' \t', 'pppragma': ' \t'} +_lexstateerrorf = {'INITIAL': 't_error', 'ppline': 't_ppline_error', 'pppragma': 't_pppragma_error'} +_lexstateeoff = {} diff --git a/python/pycparser/ply/__init__.py b/python/pycparser/ply/__init__.py new file mode 100644 index 000000000..6e53cddcf --- /dev/null +++ b/python/pycparser/ply/__init__.py @@ -0,0 +1,5 @@ +# PLY package +# Author: David Beazley (dave@dabeaz.com) + +__version__ = '3.9' +__all__ = ['lex','yacc'] diff --git a/python/pycparser/ply/__pycache__/__init__.cpython-312.pyc b/python/pycparser/ply/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..92c4f23a1 Binary files /dev/null and b/python/pycparser/ply/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/pycparser/ply/__pycache__/cpp.cpython-312.pyc b/python/pycparser/ply/__pycache__/cpp.cpython-312.pyc new file mode 100644 index 000000000..4b1b64aa6 Binary files /dev/null and b/python/pycparser/ply/__pycache__/cpp.cpython-312.pyc differ diff --git a/python/pycparser/ply/__pycache__/ctokens.cpython-312.pyc b/python/pycparser/ply/__pycache__/ctokens.cpython-312.pyc new file mode 100644 index 000000000..036a19957 Binary files /dev/null and b/python/pycparser/ply/__pycache__/ctokens.cpython-312.pyc differ diff --git a/python/pycparser/ply/__pycache__/lex.cpython-312.pyc b/python/pycparser/ply/__pycache__/lex.cpython-312.pyc new file mode 100644 index 000000000..9195adf06 Binary files /dev/null and b/python/pycparser/ply/__pycache__/lex.cpython-312.pyc differ diff --git a/python/pycparser/ply/__pycache__/yacc.cpython-312.pyc b/python/pycparser/ply/__pycache__/yacc.cpython-312.pyc new file mode 100644 index 000000000..2eca14941 Binary files /dev/null and b/python/pycparser/ply/__pycache__/yacc.cpython-312.pyc differ diff --git a/python/pycparser/ply/__pycache__/ygen.cpython-312.pyc b/python/pycparser/ply/__pycache__/ygen.cpython-312.pyc new file mode 100644 index 000000000..823a9d582 Binary files /dev/null and b/python/pycparser/ply/__pycache__/ygen.cpython-312.pyc differ diff --git a/python/pycparser/ply/cpp.py b/python/pycparser/ply/cpp.py new file mode 100644 index 000000000..86273eac7 --- /dev/null +++ b/python/pycparser/ply/cpp.py @@ -0,0 +1,905 @@ +# ----------------------------------------------------------------------------- +# cpp.py +# +# Author: David Beazley (http://www.dabeaz.com) +# Copyright (C) 2017 +# All rights reserved +# +# This module implements an ANSI-C style lexical preprocessor for PLY. +# ----------------------------------------------------------------------------- +import sys + +# Some Python 3 compatibility shims +if sys.version_info.major < 3: + STRING_TYPES = (str, unicode) +else: + STRING_TYPES = str + xrange = range + +# ----------------------------------------------------------------------------- +# Default preprocessor lexer definitions. These tokens are enough to get +# a basic preprocessor working. Other modules may import these if they want +# ----------------------------------------------------------------------------- + +tokens = ( + 'CPP_ID','CPP_INTEGER', 'CPP_FLOAT', 'CPP_STRING', 'CPP_CHAR', 'CPP_WS', 'CPP_COMMENT1', 'CPP_COMMENT2', 'CPP_POUND','CPP_DPOUND' +) + +literals = "+-*/%|&~^<>=!?()[]{}.,;:\\\'\"" + +# Whitespace +def t_CPP_WS(t): + r'\s+' + t.lexer.lineno += t.value.count("\n") + return t + +t_CPP_POUND = r'\#' +t_CPP_DPOUND = r'\#\#' + +# Identifier +t_CPP_ID = r'[A-Za-z_][\w_]*' + +# Integer literal +def CPP_INTEGER(t): + r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)' + return t + +t_CPP_INTEGER = CPP_INTEGER + +# Floating literal +t_CPP_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?' + +# String literal +def t_CPP_STRING(t): + r'\"([^\\\n]|(\\(.|\n)))*?\"' + t.lexer.lineno += t.value.count("\n") + return t + +# Character constant 'c' or L'c' +def t_CPP_CHAR(t): + r'(L)?\'([^\\\n]|(\\(.|\n)))*?\'' + t.lexer.lineno += t.value.count("\n") + return t + +# Comment +def t_CPP_COMMENT1(t): + r'(/\*(.|\n)*?\*/)' + ncr = t.value.count("\n") + t.lexer.lineno += ncr + # replace with one space or a number of '\n' + t.type = 'CPP_WS'; t.value = '\n' * ncr if ncr else ' ' + return t + +# Line comment +def t_CPP_COMMENT2(t): + r'(//.*?(\n|$))' + # replace with '/n' + t.type = 'CPP_WS'; t.value = '\n' + return t + +def t_error(t): + t.type = t.value[0] + t.value = t.value[0] + t.lexer.skip(1) + return t + +import re +import copy +import time +import os.path + +# ----------------------------------------------------------------------------- +# trigraph() +# +# Given an input string, this function replaces all trigraph sequences. +# The following mapping is used: +# +# ??= # +# ??/ \ +# ??' ^ +# ??( [ +# ??) ] +# ??! | +# ??< { +# ??> } +# ??- ~ +# ----------------------------------------------------------------------------- + +_trigraph_pat = re.compile(r'''\?\?[=/\'\(\)\!<>\-]''') +_trigraph_rep = { + '=':'#', + '/':'\\', + "'":'^', + '(':'[', + ')':']', + '!':'|', + '<':'{', + '>':'}', + '-':'~' +} + +def trigraph(input): + return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input) + +# ------------------------------------------------------------------ +# Macro object +# +# This object holds information about preprocessor macros +# +# .name - Macro name (string) +# .value - Macro value (a list of tokens) +# .arglist - List of argument names +# .variadic - Boolean indicating whether or not variadic macro +# .vararg - Name of the variadic parameter +# +# When a macro is created, the macro replacement token sequence is +# pre-scanned and used to create patch lists that are later used +# during macro expansion +# ------------------------------------------------------------------ + +class Macro(object): + def __init__(self,name,value,arglist=None,variadic=False): + self.name = name + self.value = value + self.arglist = arglist + self.variadic = variadic + if variadic: + self.vararg = arglist[-1] + self.source = None + +# ------------------------------------------------------------------ +# Preprocessor object +# +# Object representing a preprocessor. Contains macro definitions, +# include directories, and other information +# ------------------------------------------------------------------ + +class Preprocessor(object): + def __init__(self,lexer=None): + if lexer is None: + lexer = lex.lexer + self.lexer = lexer + self.macros = { } + self.path = [] + self.temp_path = [] + + # Probe the lexer for selected tokens + self.lexprobe() + + tm = time.localtime() + self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm)) + self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm)) + self.parser = None + + # ----------------------------------------------------------------------------- + # tokenize() + # + # Utility function. Given a string of text, tokenize into a list of tokens + # ----------------------------------------------------------------------------- + + def tokenize(self,text): + tokens = [] + self.lexer.input(text) + while True: + tok = self.lexer.token() + if not tok: break + tokens.append(tok) + return tokens + + # --------------------------------------------------------------------- + # error() + # + # Report a preprocessor error/warning of some kind + # ---------------------------------------------------------------------- + + def error(self,file,line,msg): + print("%s:%d %s" % (file,line,msg)) + + # ---------------------------------------------------------------------- + # lexprobe() + # + # This method probes the preprocessor lexer object to discover + # the token types of symbols that are important to the preprocessor. + # If this works right, the preprocessor will simply "work" + # with any suitable lexer regardless of how tokens have been named. + # ---------------------------------------------------------------------- + + def lexprobe(self): + + # Determine the token type for identifiers + self.lexer.input("identifier") + tok = self.lexer.token() + if not tok or tok.value != "identifier": + print("Couldn't determine identifier type") + else: + self.t_ID = tok.type + + # Determine the token type for integers + self.lexer.input("12345") + tok = self.lexer.token() + if not tok or int(tok.value) != 12345: + print("Couldn't determine integer type") + else: + self.t_INTEGER = tok.type + self.t_INTEGER_TYPE = type(tok.value) + + # Determine the token type for strings enclosed in double quotes + self.lexer.input("\"filename\"") + tok = self.lexer.token() + if not tok or tok.value != "\"filename\"": + print("Couldn't determine string type") + else: + self.t_STRING = tok.type + + # Determine the token type for whitespace--if any + self.lexer.input(" ") + tok = self.lexer.token() + if not tok or tok.value != " ": + self.t_SPACE = None + else: + self.t_SPACE = tok.type + + # Determine the token type for newlines + self.lexer.input("\n") + tok = self.lexer.token() + if not tok or tok.value != "\n": + self.t_NEWLINE = None + print("Couldn't determine token for newlines") + else: + self.t_NEWLINE = tok.type + + self.t_WS = (self.t_SPACE, self.t_NEWLINE) + + # Check for other characters used by the preprocessor + chars = [ '<','>','#','##','\\','(',')',',','.'] + for c in chars: + self.lexer.input(c) + tok = self.lexer.token() + if not tok or tok.value != c: + print("Unable to lex '%s' required for preprocessor" % c) + + # ---------------------------------------------------------------------- + # add_path() + # + # Adds a search path to the preprocessor. + # ---------------------------------------------------------------------- + + def add_path(self,path): + self.path.append(path) + + # ---------------------------------------------------------------------- + # group_lines() + # + # Given an input string, this function splits it into lines. Trailing whitespace + # is removed. Any line ending with \ is grouped with the next line. This + # function forms the lowest level of the preprocessor---grouping into text into + # a line-by-line format. + # ---------------------------------------------------------------------- + + def group_lines(self,input): + lex = self.lexer.clone() + lines = [x.rstrip() for x in input.splitlines()] + for i in xrange(len(lines)): + j = i+1 + while lines[i].endswith('\\') and (j < len(lines)): + lines[i] = lines[i][:-1]+lines[j] + lines[j] = "" + j += 1 + + input = "\n".join(lines) + lex.input(input) + lex.lineno = 1 + + current_line = [] + while True: + tok = lex.token() + if not tok: + break + current_line.append(tok) + if tok.type in self.t_WS and '\n' in tok.value: + yield current_line + current_line = [] + + if current_line: + yield current_line + + # ---------------------------------------------------------------------- + # tokenstrip() + # + # Remove leading/trailing whitespace tokens from a token list + # ---------------------------------------------------------------------- + + def tokenstrip(self,tokens): + i = 0 + while i < len(tokens) and tokens[i].type in self.t_WS: + i += 1 + del tokens[:i] + i = len(tokens)-1 + while i >= 0 and tokens[i].type in self.t_WS: + i -= 1 + del tokens[i+1:] + return tokens + + + # ---------------------------------------------------------------------- + # collect_args() + # + # Collects comma separated arguments from a list of tokens. The arguments + # must be enclosed in parenthesis. Returns a tuple (tokencount,args,positions) + # where tokencount is the number of tokens consumed, args is a list of arguments, + # and positions is a list of integers containing the starting index of each + # argument. Each argument is represented by a list of tokens. + # + # When collecting arguments, leading and trailing whitespace is removed + # from each argument. + # + # This function properly handles nested parenthesis and commas---these do not + # define new arguments. + # ---------------------------------------------------------------------- + + def collect_args(self,tokenlist): + args = [] + positions = [] + current_arg = [] + nesting = 1 + tokenlen = len(tokenlist) + + # Search for the opening '('. + i = 0 + while (i < tokenlen) and (tokenlist[i].type in self.t_WS): + i += 1 + + if (i < tokenlen) and (tokenlist[i].value == '('): + positions.append(i+1) + else: + self.error(self.source,tokenlist[0].lineno,"Missing '(' in macro arguments") + return 0, [], [] + + i += 1 + + while i < tokenlen: + t = tokenlist[i] + if t.value == '(': + current_arg.append(t) + nesting += 1 + elif t.value == ')': + nesting -= 1 + if nesting == 0: + if current_arg: + args.append(self.tokenstrip(current_arg)) + positions.append(i) + return i+1,args,positions + current_arg.append(t) + elif t.value == ',' and nesting == 1: + args.append(self.tokenstrip(current_arg)) + positions.append(i+1) + current_arg = [] + else: + current_arg.append(t) + i += 1 + + # Missing end argument + self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments") + return 0, [],[] + + # ---------------------------------------------------------------------- + # macro_prescan() + # + # Examine the macro value (token sequence) and identify patch points + # This is used to speed up macro expansion later on---we'll know + # right away where to apply patches to the value to form the expansion + # ---------------------------------------------------------------------- + + def macro_prescan(self,macro): + macro.patch = [] # Standard macro arguments + macro.str_patch = [] # String conversion expansion + macro.var_comma_patch = [] # Variadic macro comma patch + i = 0 + while i < len(macro.value): + if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist: + argnum = macro.arglist.index(macro.value[i].value) + # Conversion of argument to a string + if i > 0 and macro.value[i-1].value == '#': + macro.value[i] = copy.copy(macro.value[i]) + macro.value[i].type = self.t_STRING + del macro.value[i-1] + macro.str_patch.append((argnum,i-1)) + continue + # Concatenation + elif (i > 0 and macro.value[i-1].value == '##'): + macro.patch.append(('c',argnum,i-1)) + del macro.value[i-1] + continue + elif ((i+1) < len(macro.value) and macro.value[i+1].value == '##'): + macro.patch.append(('c',argnum,i)) + i += 1 + continue + # Standard expansion + else: + macro.patch.append(('e',argnum,i)) + elif macro.value[i].value == '##': + if macro.variadic and (i > 0) and (macro.value[i-1].value == ',') and \ + ((i+1) < len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \ + (macro.value[i+1].value == macro.vararg): + macro.var_comma_patch.append(i-1) + i += 1 + macro.patch.sort(key=lambda x: x[2],reverse=True) + + # ---------------------------------------------------------------------- + # macro_expand_args() + # + # Given a Macro and list of arguments (each a token list), this method + # returns an expanded version of a macro. The return value is a token sequence + # representing the replacement macro tokens + # ---------------------------------------------------------------------- + + def macro_expand_args(self,macro,args): + # Make a copy of the macro token sequence + rep = [copy.copy(_x) for _x in macro.value] + + # Make string expansion patches. These do not alter the length of the replacement sequence + + str_expansion = {} + for argnum, i in macro.str_patch: + if argnum not in str_expansion: + str_expansion[argnum] = ('"%s"' % "".join([x.value for x in args[argnum]])).replace("\\","\\\\") + rep[i] = copy.copy(rep[i]) + rep[i].value = str_expansion[argnum] + + # Make the variadic macro comma patch. If the variadic macro argument is empty, we get rid + comma_patch = False + if macro.variadic and not args[-1]: + for i in macro.var_comma_patch: + rep[i] = None + comma_patch = True + + # Make all other patches. The order of these matters. It is assumed that the patch list + # has been sorted in reverse order of patch location since replacements will cause the + # size of the replacement sequence to expand from the patch point. + + expanded = { } + for ptype, argnum, i in macro.patch: + # Concatenation. Argument is left unexpanded + if ptype == 'c': + rep[i:i+1] = args[argnum] + # Normal expansion. Argument is macro expanded first + elif ptype == 'e': + if argnum not in expanded: + expanded[argnum] = self.expand_macros(args[argnum]) + rep[i:i+1] = expanded[argnum] + + # Get rid of removed comma if necessary + if comma_patch: + rep = [_i for _i in rep if _i] + + return rep + + + # ---------------------------------------------------------------------- + # expand_macros() + # + # Given a list of tokens, this function performs macro expansion. + # The expanded argument is a dictionary that contains macros already + # expanded. This is used to prevent infinite recursion. + # ---------------------------------------------------------------------- + + def expand_macros(self,tokens,expanded=None): + if expanded is None: + expanded = {} + i = 0 + while i < len(tokens): + t = tokens[i] + if t.type == self.t_ID: + if t.value in self.macros and t.value not in expanded: + # Yes, we found a macro match + expanded[t.value] = True + + m = self.macros[t.value] + if not m.arglist: + # A simple macro + ex = self.expand_macros([copy.copy(_x) for _x in m.value],expanded) + for e in ex: + e.lineno = t.lineno + tokens[i:i+1] = ex + i += len(ex) + else: + # A macro with arguments + j = i + 1 + while j < len(tokens) and tokens[j].type in self.t_WS: + j += 1 + if tokens[j].value == '(': + tokcount,args,positions = self.collect_args(tokens[j:]) + if not m.variadic and len(args) != len(m.arglist): + self.error(self.source,t.lineno,"Macro %s requires %d arguments" % (t.value,len(m.arglist))) + i = j + tokcount + elif m.variadic and len(args) < len(m.arglist)-1: + if len(m.arglist) > 2: + self.error(self.source,t.lineno,"Macro %s must have at least %d arguments" % (t.value, len(m.arglist)-1)) + else: + self.error(self.source,t.lineno,"Macro %s must have at least %d argument" % (t.value, len(m.arglist)-1)) + i = j + tokcount + else: + if m.variadic: + if len(args) == len(m.arglist)-1: + args.append([]) + else: + args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1] + del args[len(m.arglist):] + + # Get macro replacement text + rep = self.macro_expand_args(m,args) + rep = self.expand_macros(rep,expanded) + for r in rep: + r.lineno = t.lineno + tokens[i:j+tokcount] = rep + i += len(rep) + del expanded[t.value] + continue + elif t.value == '__LINE__': + t.type = self.t_INTEGER + t.value = self.t_INTEGER_TYPE(t.lineno) + + i += 1 + return tokens + + # ---------------------------------------------------------------------- + # evalexpr() + # + # Evaluate an expression token sequence for the purposes of evaluating + # integral expressions. + # ---------------------------------------------------------------------- + + def evalexpr(self,tokens): + # tokens = tokenize(line) + # Search for defined macros + i = 0 + while i < len(tokens): + if tokens[i].type == self.t_ID and tokens[i].value == 'defined': + j = i + 1 + needparen = False + result = "0L" + while j < len(tokens): + if tokens[j].type in self.t_WS: + j += 1 + continue + elif tokens[j].type == self.t_ID: + if tokens[j].value in self.macros: + result = "1L" + else: + result = "0L" + if not needparen: break + elif tokens[j].value == '(': + needparen = True + elif tokens[j].value == ')': + break + else: + self.error(self.source,tokens[i].lineno,"Malformed defined()") + j += 1 + tokens[i].type = self.t_INTEGER + tokens[i].value = self.t_INTEGER_TYPE(result) + del tokens[i+1:j+1] + i += 1 + tokens = self.expand_macros(tokens) + for i,t in enumerate(tokens): + if t.type == self.t_ID: + tokens[i] = copy.copy(t) + tokens[i].type = self.t_INTEGER + tokens[i].value = self.t_INTEGER_TYPE("0L") + elif t.type == self.t_INTEGER: + tokens[i] = copy.copy(t) + # Strip off any trailing suffixes + tokens[i].value = str(tokens[i].value) + while tokens[i].value[-1] not in "0123456789abcdefABCDEF": + tokens[i].value = tokens[i].value[:-1] + + expr = "".join([str(x.value) for x in tokens]) + expr = expr.replace("&&"," and ") + expr = expr.replace("||"," or ") + expr = expr.replace("!"," not ") + try: + result = eval(expr) + except Exception: + self.error(self.source,tokens[0].lineno,"Couldn't evaluate expression") + result = 0 + return result + + # ---------------------------------------------------------------------- + # parsegen() + # + # Parse an input string/ + # ---------------------------------------------------------------------- + def parsegen(self,input,source=None): + + # Replace trigraph sequences + t = trigraph(input) + lines = self.group_lines(t) + + if not source: + source = "" + + self.define("__FILE__ \"%s\"" % source) + + self.source = source + chunk = [] + enable = True + iftrigger = False + ifstack = [] + + for x in lines: + for i,tok in enumerate(x): + if tok.type not in self.t_WS: break + if tok.value == '#': + # Preprocessor directive + + # insert necessary whitespace instead of eaten tokens + for tok in x: + if tok.type in self.t_WS and '\n' in tok.value: + chunk.append(tok) + + dirtokens = self.tokenstrip(x[i+1:]) + if dirtokens: + name = dirtokens[0].value + args = self.tokenstrip(dirtokens[1:]) + else: + name = "" + args = [] + + if name == 'define': + if enable: + for tok in self.expand_macros(chunk): + yield tok + chunk = [] + self.define(args) + elif name == 'include': + if enable: + for tok in self.expand_macros(chunk): + yield tok + chunk = [] + oldfile = self.macros['__FILE__'] + for tok in self.include(args): + yield tok + self.macros['__FILE__'] = oldfile + self.source = source + elif name == 'undef': + if enable: + for tok in self.expand_macros(chunk): + yield tok + chunk = [] + self.undef(args) + elif name == 'ifdef': + ifstack.append((enable,iftrigger)) + if enable: + if not args[0].value in self.macros: + enable = False + iftrigger = False + else: + iftrigger = True + elif name == 'ifndef': + ifstack.append((enable,iftrigger)) + if enable: + if args[0].value in self.macros: + enable = False + iftrigger = False + else: + iftrigger = True + elif name == 'if': + ifstack.append((enable,iftrigger)) + if enable: + result = self.evalexpr(args) + if not result: + enable = False + iftrigger = False + else: + iftrigger = True + elif name == 'elif': + if ifstack: + if ifstack[-1][0]: # We only pay attention if outer "if" allows this + if enable: # If already true, we flip enable False + enable = False + elif not iftrigger: # If False, but not triggered yet, we'll check expression + result = self.evalexpr(args) + if result: + enable = True + iftrigger = True + else: + self.error(self.source,dirtokens[0].lineno,"Misplaced #elif") + + elif name == 'else': + if ifstack: + if ifstack[-1][0]: + if enable: + enable = False + elif not iftrigger: + enable = True + iftrigger = True + else: + self.error(self.source,dirtokens[0].lineno,"Misplaced #else") + + elif name == 'endif': + if ifstack: + enable,iftrigger = ifstack.pop() + else: + self.error(self.source,dirtokens[0].lineno,"Misplaced #endif") + else: + # Unknown preprocessor directive + pass + + else: + # Normal text + if enable: + chunk.extend(x) + + for tok in self.expand_macros(chunk): + yield tok + chunk = [] + + # ---------------------------------------------------------------------- + # include() + # + # Implementation of file-inclusion + # ---------------------------------------------------------------------- + + def include(self,tokens): + # Try to extract the filename and then process an include file + if not tokens: + return + if tokens: + if tokens[0].value != '<' and tokens[0].type != self.t_STRING: + tokens = self.expand_macros(tokens) + + if tokens[0].value == '<': + # Include <...> + i = 1 + while i < len(tokens): + if tokens[i].value == '>': + break + i += 1 + else: + print("Malformed #include <...>") + return + filename = "".join([x.value for x in tokens[1:i]]) + path = self.path + [""] + self.temp_path + elif tokens[0].type == self.t_STRING: + filename = tokens[0].value[1:-1] + path = self.temp_path + [""] + self.path + else: + print("Malformed #include statement") + return + for p in path: + iname = os.path.join(p,filename) + try: + data = open(iname,"r").read() + dname = os.path.dirname(iname) + if dname: + self.temp_path.insert(0,dname) + for tok in self.parsegen(data,filename): + yield tok + if dname: + del self.temp_path[0] + break + except IOError: + pass + else: + print("Couldn't find '%s'" % filename) + + # ---------------------------------------------------------------------- + # define() + # + # Define a new macro + # ---------------------------------------------------------------------- + + def define(self,tokens): + if isinstance(tokens,STRING_TYPES): + tokens = self.tokenize(tokens) + + linetok = tokens + try: + name = linetok[0] + if len(linetok) > 1: + mtype = linetok[1] + else: + mtype = None + if not mtype: + m = Macro(name.value,[]) + self.macros[name.value] = m + elif mtype.type in self.t_WS: + # A normal macro + m = Macro(name.value,self.tokenstrip(linetok[2:])) + self.macros[name.value] = m + elif mtype.value == '(': + # A macro with arguments + tokcount, args, positions = self.collect_args(linetok[1:]) + variadic = False + for a in args: + if variadic: + print("No more arguments may follow a variadic argument") + break + astr = "".join([str(_i.value) for _i in a]) + if astr == "...": + variadic = True + a[0].type = self.t_ID + a[0].value = '__VA_ARGS__' + variadic = True + del a[1:] + continue + elif astr[-3:] == "..." and a[0].type == self.t_ID: + variadic = True + del a[1:] + # If, for some reason, "." is part of the identifier, strip off the name for the purposes + # of macro expansion + if a[0].value[-3:] == '...': + a[0].value = a[0].value[:-3] + continue + if len(a) > 1 or a[0].type != self.t_ID: + print("Invalid macro argument") + break + else: + mvalue = self.tokenstrip(linetok[1+tokcount:]) + i = 0 + while i < len(mvalue): + if i+1 < len(mvalue): + if mvalue[i].type in self.t_WS and mvalue[i+1].value == '##': + del mvalue[i] + continue + elif mvalue[i].value == '##' and mvalue[i+1].type in self.t_WS: + del mvalue[i+1] + i += 1 + m = Macro(name.value,mvalue,[x[0].value for x in args],variadic) + self.macro_prescan(m) + self.macros[name.value] = m + else: + print("Bad macro definition") + except LookupError: + print("Bad macro definition") + + # ---------------------------------------------------------------------- + # undef() + # + # Undefine a macro + # ---------------------------------------------------------------------- + + def undef(self,tokens): + id = tokens[0].value + try: + del self.macros[id] + except LookupError: + pass + + # ---------------------------------------------------------------------- + # parse() + # + # Parse input text. + # ---------------------------------------------------------------------- + def parse(self,input,source=None,ignore={}): + self.ignore = ignore + self.parser = self.parsegen(input,source) + + # ---------------------------------------------------------------------- + # token() + # + # Method to return individual tokens + # ---------------------------------------------------------------------- + def token(self): + try: + while True: + tok = next(self.parser) + if tok.type not in self.ignore: return tok + except StopIteration: + self.parser = None + return None + +if __name__ == '__main__': + import ply.lex as lex + lexer = lex.lex() + + # Run a preprocessor + import sys + f = open(sys.argv[1]) + input = f.read() + + p = Preprocessor(lexer) + p.parse(input,sys.argv[1]) + while True: + tok = p.token() + if not tok: break + print(p.source, tok) diff --git a/python/pycparser/ply/ctokens.py b/python/pycparser/ply/ctokens.py new file mode 100644 index 000000000..f6f6952d6 --- /dev/null +++ b/python/pycparser/ply/ctokens.py @@ -0,0 +1,133 @@ +# ---------------------------------------------------------------------- +# ctokens.py +# +# Token specifications for symbols in ANSI C and C++. This file is +# meant to be used as a library in other tokenizers. +# ---------------------------------------------------------------------- + +# Reserved words + +tokens = [ + # Literals (identifier, integer constant, float constant, string constant, char const) + 'ID', 'TYPEID', 'INTEGER', 'FLOAT', 'STRING', 'CHARACTER', + + # Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=) + 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MODULO', + 'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT', + 'LOR', 'LAND', 'LNOT', + 'LT', 'LE', 'GT', 'GE', 'EQ', 'NE', + + # Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=) + 'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL', + 'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL', + + # Increment/decrement (++,--) + 'INCREMENT', 'DECREMENT', + + # Structure dereference (->) + 'ARROW', + + # Ternary operator (?) + 'TERNARY', + + # Delimeters ( ) [ ] { } , . ; : + 'LPAREN', 'RPAREN', + 'LBRACKET', 'RBRACKET', + 'LBRACE', 'RBRACE', + 'COMMA', 'PERIOD', 'SEMI', 'COLON', + + # Ellipsis (...) + 'ELLIPSIS', +] + +# Operators +t_PLUS = r'\+' +t_MINUS = r'-' +t_TIMES = r'\*' +t_DIVIDE = r'/' +t_MODULO = r'%' +t_OR = r'\|' +t_AND = r'&' +t_NOT = r'~' +t_XOR = r'\^' +t_LSHIFT = r'<<' +t_RSHIFT = r'>>' +t_LOR = r'\|\|' +t_LAND = r'&&' +t_LNOT = r'!' +t_LT = r'<' +t_GT = r'>' +t_LE = r'<=' +t_GE = r'>=' +t_EQ = r'==' +t_NE = r'!=' + +# Assignment operators + +t_EQUALS = r'=' +t_TIMESEQUAL = r'\*=' +t_DIVEQUAL = r'/=' +t_MODEQUAL = r'%=' +t_PLUSEQUAL = r'\+=' +t_MINUSEQUAL = r'-=' +t_LSHIFTEQUAL = r'<<=' +t_RSHIFTEQUAL = r'>>=' +t_ANDEQUAL = r'&=' +t_OREQUAL = r'\|=' +t_XOREQUAL = r'\^=' + +# Increment/decrement +t_INCREMENT = r'\+\+' +t_DECREMENT = r'--' + +# -> +t_ARROW = r'->' + +# ? +t_TERNARY = r'\?' + +# Delimeters +t_LPAREN = r'\(' +t_RPAREN = r'\)' +t_LBRACKET = r'\[' +t_RBRACKET = r'\]' +t_LBRACE = r'\{' +t_RBRACE = r'\}' +t_COMMA = r',' +t_PERIOD = r'\.' +t_SEMI = r';' +t_COLON = r':' +t_ELLIPSIS = r'\.\.\.' + +# Identifiers +t_ID = r'[A-Za-z_][A-Za-z0-9_]*' + +# Integer literal +t_INTEGER = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?' + +# Floating literal +t_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?' + +# String literal +t_STRING = r'\"([^\\\n]|(\\.))*?\"' + +# Character constant 'c' or L'c' +t_CHARACTER = r'(L)?\'([^\\\n]|(\\.))*?\'' + +# Comment (C-Style) +def t_COMMENT(t): + r'/\*(.|\n)*?\*/' + t.lexer.lineno += t.value.count('\n') + return t + +# Comment (C++-Style) +def t_CPPCOMMENT(t): + r'//.*\n' + t.lexer.lineno += 1 + return t + + + + + + diff --git a/python/pycparser/ply/lex.py b/python/pycparser/ply/lex.py new file mode 100644 index 000000000..dfc513948 --- /dev/null +++ b/python/pycparser/ply/lex.py @@ -0,0 +1,1099 @@ +# ----------------------------------------------------------------------------- +# ply: lex.py +# +# Copyright (C) 2001-2017 +# David M. Beazley (Dabeaz LLC) +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the David Beazley or Dabeaz LLC may be used to +# endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# ----------------------------------------------------------------------------- + +__version__ = '3.10' +__tabversion__ = '3.10' + +import re +import sys +import types +import copy +import os +import inspect + +# This tuple contains known string types +try: + # Python 2.6 + StringTypes = (types.StringType, types.UnicodeType) +except AttributeError: + # Python 3.0 + StringTypes = (str, bytes) + +# This regular expression is used to match valid token names +_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$') + +# Exception thrown when invalid token encountered and no default error +# handler is defined. +class LexError(Exception): + def __init__(self, message, s): + self.args = (message,) + self.text = s + + +# Token class. This class is used to represent the tokens produced. +class LexToken(object): + def __str__(self): + return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos) + + def __repr__(self): + return str(self) + + +# This object is a stand-in for a logging object created by the +# logging module. + +class PlyLogger(object): + def __init__(self, f): + self.f = f + + def critical(self, msg, *args, **kwargs): + self.f.write((msg % args) + '\n') + + def warning(self, msg, *args, **kwargs): + self.f.write('WARNING: ' + (msg % args) + '\n') + + def error(self, msg, *args, **kwargs): + self.f.write('ERROR: ' + (msg % args) + '\n') + + info = critical + debug = critical + + +# Null logger is used when no output is generated. Does nothing. +class NullLogger(object): + def __getattribute__(self, name): + return self + + def __call__(self, *args, **kwargs): + return self + + +# ----------------------------------------------------------------------------- +# === Lexing Engine === +# +# The following Lexer class implements the lexer runtime. There are only +# a few public methods and attributes: +# +# input() - Store a new string in the lexer +# token() - Get the next token +# clone() - Clone the lexer +# +# lineno - Current line number +# lexpos - Current position in the input string +# ----------------------------------------------------------------------------- + +class Lexer: + def __init__(self): + self.lexre = None # Master regular expression. This is a list of + # tuples (re, findex) where re is a compiled + # regular expression and findex is a list + # mapping regex group numbers to rules + self.lexretext = None # Current regular expression strings + self.lexstatere = {} # Dictionary mapping lexer states to master regexs + self.lexstateretext = {} # Dictionary mapping lexer states to regex strings + self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names + self.lexstate = 'INITIAL' # Current lexer state + self.lexstatestack = [] # Stack of lexer states + self.lexstateinfo = None # State information + self.lexstateignore = {} # Dictionary of ignored characters for each state + self.lexstateerrorf = {} # Dictionary of error functions for each state + self.lexstateeoff = {} # Dictionary of eof functions for each state + self.lexreflags = 0 # Optional re compile flags + self.lexdata = None # Actual input data (as a string) + self.lexpos = 0 # Current position in input text + self.lexlen = 0 # Length of the input text + self.lexerrorf = None # Error rule (if any) + self.lexeoff = None # EOF rule (if any) + self.lextokens = None # List of valid tokens + self.lexignore = '' # Ignored characters + self.lexliterals = '' # Literal characters that can be passed through + self.lexmodule = None # Module + self.lineno = 1 # Current line number + self.lexoptimize = False # Optimized mode + + def clone(self, object=None): + c = copy.copy(self) + + # If the object parameter has been supplied, it means we are attaching the + # lexer to a new object. In this case, we have to rebind all methods in + # the lexstatere and lexstateerrorf tables. + + if object: + newtab = {} + for key, ritem in self.lexstatere.items(): + newre = [] + for cre, findex in ritem: + newfindex = [] + for f in findex: + if not f or not f[0]: + newfindex.append(f) + continue + newfindex.append((getattr(object, f[0].__name__), f[1])) + newre.append((cre, newfindex)) + newtab[key] = newre + c.lexstatere = newtab + c.lexstateerrorf = {} + for key, ef in self.lexstateerrorf.items(): + c.lexstateerrorf[key] = getattr(object, ef.__name__) + c.lexmodule = object + return c + + # ------------------------------------------------------------ + # writetab() - Write lexer information to a table file + # ------------------------------------------------------------ + def writetab(self, lextab, outputdir=''): + if isinstance(lextab, types.ModuleType): + raise IOError("Won't overwrite existing lextab module") + basetabmodule = lextab.split('.')[-1] + filename = os.path.join(outputdir, basetabmodule) + '.py' + with open(filename, 'w') as tf: + tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__)) + tf.write('_tabversion = %s\n' % repr(__tabversion__)) + tf.write('_lextokens = set(%s)\n' % repr(tuple(sorted(self.lextokens)))) + tf.write('_lexreflags = %s\n' % repr(self.lexreflags)) + tf.write('_lexliterals = %s\n' % repr(self.lexliterals)) + tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo)) + + # Rewrite the lexstatere table, replacing function objects with function names + tabre = {} + for statename, lre in self.lexstatere.items(): + titem = [] + for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]): + titem.append((retext, _funcs_to_names(func, renames))) + tabre[statename] = titem + + tf.write('_lexstatere = %s\n' % repr(tabre)) + tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore)) + + taberr = {} + for statename, ef in self.lexstateerrorf.items(): + taberr[statename] = ef.__name__ if ef else None + tf.write('_lexstateerrorf = %s\n' % repr(taberr)) + + tabeof = {} + for statename, ef in self.lexstateeoff.items(): + tabeof[statename] = ef.__name__ if ef else None + tf.write('_lexstateeoff = %s\n' % repr(tabeof)) + + # ------------------------------------------------------------ + # readtab() - Read lexer information from a tab file + # ------------------------------------------------------------ + def readtab(self, tabfile, fdict): + if isinstance(tabfile, types.ModuleType): + lextab = tabfile + else: + exec('import %s' % tabfile) + lextab = sys.modules[tabfile] + + if getattr(lextab, '_tabversion', '0.0') != __tabversion__: + raise ImportError('Inconsistent PLY version') + + self.lextokens = lextab._lextokens + self.lexreflags = lextab._lexreflags + self.lexliterals = lextab._lexliterals + self.lextokens_all = self.lextokens | set(self.lexliterals) + self.lexstateinfo = lextab._lexstateinfo + self.lexstateignore = lextab._lexstateignore + self.lexstatere = {} + self.lexstateretext = {} + for statename, lre in lextab._lexstatere.items(): + titem = [] + txtitem = [] + for pat, func_name in lre: + titem.append((re.compile(pat, lextab._lexreflags), _names_to_funcs(func_name, fdict))) + + self.lexstatere[statename] = titem + self.lexstateretext[statename] = txtitem + + self.lexstateerrorf = {} + for statename, ef in lextab._lexstateerrorf.items(): + self.lexstateerrorf[statename] = fdict[ef] + + self.lexstateeoff = {} + for statename, ef in lextab._lexstateeoff.items(): + self.lexstateeoff[statename] = fdict[ef] + + self.begin('INITIAL') + + # ------------------------------------------------------------ + # input() - Push a new string into the lexer + # ------------------------------------------------------------ + def input(self, s): + # Pull off the first character to see if s looks like a string + c = s[:1] + if not isinstance(c, StringTypes): + raise ValueError('Expected a string') + self.lexdata = s + self.lexpos = 0 + self.lexlen = len(s) + + # ------------------------------------------------------------ + # begin() - Changes the lexing state + # ------------------------------------------------------------ + def begin(self, state): + if state not in self.lexstatere: + raise ValueError('Undefined state') + self.lexre = self.lexstatere[state] + self.lexretext = self.lexstateretext[state] + self.lexignore = self.lexstateignore.get(state, '') + self.lexerrorf = self.lexstateerrorf.get(state, None) + self.lexeoff = self.lexstateeoff.get(state, None) + self.lexstate = state + + # ------------------------------------------------------------ + # push_state() - Changes the lexing state and saves old on stack + # ------------------------------------------------------------ + def push_state(self, state): + self.lexstatestack.append(self.lexstate) + self.begin(state) + + # ------------------------------------------------------------ + # pop_state() - Restores the previous state + # ------------------------------------------------------------ + def pop_state(self): + self.begin(self.lexstatestack.pop()) + + # ------------------------------------------------------------ + # current_state() - Returns the current lexing state + # ------------------------------------------------------------ + def current_state(self): + return self.lexstate + + # ------------------------------------------------------------ + # skip() - Skip ahead n characters + # ------------------------------------------------------------ + def skip(self, n): + self.lexpos += n + + # ------------------------------------------------------------ + # opttoken() - Return the next token from the Lexer + # + # Note: This function has been carefully implemented to be as fast + # as possible. Don't make changes unless you really know what + # you are doing + # ------------------------------------------------------------ + def token(self): + # Make local copies of frequently referenced attributes + lexpos = self.lexpos + lexlen = self.lexlen + lexignore = self.lexignore + lexdata = self.lexdata + + while lexpos < lexlen: + # This code provides some short-circuit code for whitespace, tabs, and other ignored characters + if lexdata[lexpos] in lexignore: + lexpos += 1 + continue + + # Look for a regular expression match + for lexre, lexindexfunc in self.lexre: + m = lexre.match(lexdata, lexpos) + if not m: + continue + + # Create a token for return + tok = LexToken() + tok.value = m.group() + tok.lineno = self.lineno + tok.lexpos = lexpos + + i = m.lastindex + func, tok.type = lexindexfunc[i] + + if not func: + # If no token type was set, it's an ignored token + if tok.type: + self.lexpos = m.end() + return tok + else: + lexpos = m.end() + break + + lexpos = m.end() + + # If token is processed by a function, call it + + tok.lexer = self # Set additional attributes useful in token rules + self.lexmatch = m + self.lexpos = lexpos + + newtok = func(tok) + + # Every function must return a token, if nothing, we just move to next token + if not newtok: + lexpos = self.lexpos # This is here in case user has updated lexpos. + lexignore = self.lexignore # This is here in case there was a state change + break + + # Verify type of the token. If not in the token map, raise an error + if not self.lexoptimize: + if newtok.type not in self.lextokens_all: + raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % ( + func.__code__.co_filename, func.__code__.co_firstlineno, + func.__name__, newtok.type), lexdata[lexpos:]) + + return newtok + else: + # No match, see if in literals + if lexdata[lexpos] in self.lexliterals: + tok = LexToken() + tok.value = lexdata[lexpos] + tok.lineno = self.lineno + tok.type = tok.value + tok.lexpos = lexpos + self.lexpos = lexpos + 1 + return tok + + # No match. Call t_error() if defined. + if self.lexerrorf: + tok = LexToken() + tok.value = self.lexdata[lexpos:] + tok.lineno = self.lineno + tok.type = 'error' + tok.lexer = self + tok.lexpos = lexpos + self.lexpos = lexpos + newtok = self.lexerrorf(tok) + if lexpos == self.lexpos: + # Error method didn't change text position at all. This is an error. + raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:]) + lexpos = self.lexpos + if not newtok: + continue + return newtok + + self.lexpos = lexpos + raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:]) + + if self.lexeoff: + tok = LexToken() + tok.type = 'eof' + tok.value = '' + tok.lineno = self.lineno + tok.lexpos = lexpos + tok.lexer = self + self.lexpos = lexpos + newtok = self.lexeoff(tok) + return newtok + + self.lexpos = lexpos + 1 + if self.lexdata is None: + raise RuntimeError('No input string given with input()') + return None + + # Iterator interface + def __iter__(self): + return self + + def next(self): + t = self.token() + if t is None: + raise StopIteration + return t + + __next__ = next + +# ----------------------------------------------------------------------------- +# ==== Lex Builder === +# +# The functions and classes below are used to collect lexing information +# and build a Lexer object from it. +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# _get_regex(func) +# +# Returns the regular expression assigned to a function either as a doc string +# or as a .regex attribute attached by the @TOKEN decorator. +# ----------------------------------------------------------------------------- +def _get_regex(func): + return getattr(func, 'regex', func.__doc__) + +# ----------------------------------------------------------------------------- +# get_caller_module_dict() +# +# This function returns a dictionary containing all of the symbols defined within +# a caller further down the call stack. This is used to get the environment +# associated with the yacc() call if none was provided. +# ----------------------------------------------------------------------------- +def get_caller_module_dict(levels): + f = sys._getframe(levels) + ldict = f.f_globals.copy() + if f.f_globals != f.f_locals: + ldict.update(f.f_locals) + return ldict + +# ----------------------------------------------------------------------------- +# _funcs_to_names() +# +# Given a list of regular expression functions, this converts it to a list +# suitable for output to a table file +# ----------------------------------------------------------------------------- +def _funcs_to_names(funclist, namelist): + result = [] + for f, name in zip(funclist, namelist): + if f and f[0]: + result.append((name, f[1])) + else: + result.append(f) + return result + +# ----------------------------------------------------------------------------- +# _names_to_funcs() +# +# Given a list of regular expression function names, this converts it back to +# functions. +# ----------------------------------------------------------------------------- +def _names_to_funcs(namelist, fdict): + result = [] + for n in namelist: + if n and n[0]: + result.append((fdict[n[0]], n[1])) + else: + result.append(n) + return result + +# ----------------------------------------------------------------------------- +# _form_master_re() +# +# This function takes a list of all of the regex components and attempts to +# form the master regular expression. Given limitations in the Python re +# module, it may be necessary to break the master regex into separate expressions. +# ----------------------------------------------------------------------------- +def _form_master_re(relist, reflags, ldict, toknames): + if not relist: + return [] + regex = '|'.join(relist) + try: + lexre = re.compile(regex, reflags) + + # Build the index to function map for the matching engine + lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1) + lexindexnames = lexindexfunc[:] + + for f, i in lexre.groupindex.items(): + handle = ldict.get(f, None) + if type(handle) in (types.FunctionType, types.MethodType): + lexindexfunc[i] = (handle, toknames[f]) + lexindexnames[i] = f + elif handle is not None: + lexindexnames[i] = f + if f.find('ignore_') > 0: + lexindexfunc[i] = (None, None) + else: + lexindexfunc[i] = (None, toknames[f]) + + return [(lexre, lexindexfunc)], [regex], [lexindexnames] + except Exception: + m = int(len(relist)/2) + if m == 0: + m = 1 + llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames) + rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames) + return (llist+rlist), (lre+rre), (lnames+rnames) + +# ----------------------------------------------------------------------------- +# def _statetoken(s,names) +# +# Given a declaration name s of the form "t_" and a dictionary whose keys are +# state names, this function returns a tuple (states,tokenname) where states +# is a tuple of state names and tokenname is the name of the token. For example, +# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM') +# ----------------------------------------------------------------------------- +def _statetoken(s, names): + nonstate = 1 + parts = s.split('_') + for i, part in enumerate(parts[1:], 1): + if part not in names and part != 'ANY': + break + + if i > 1: + states = tuple(parts[1:i]) + else: + states = ('INITIAL',) + + if 'ANY' in states: + states = tuple(names) + + tokenname = '_'.join(parts[i:]) + return (states, tokenname) + + +# ----------------------------------------------------------------------------- +# LexerReflect() +# +# This class represents information needed to build a lexer as extracted from a +# user's input file. +# ----------------------------------------------------------------------------- +class LexerReflect(object): + def __init__(self, ldict, log=None, reflags=0): + self.ldict = ldict + self.error_func = None + self.tokens = [] + self.reflags = reflags + self.stateinfo = {'INITIAL': 'inclusive'} + self.modules = set() + self.error = False + self.log = PlyLogger(sys.stderr) if log is None else log + + # Get all of the basic information + def get_all(self): + self.get_tokens() + self.get_literals() + self.get_states() + self.get_rules() + + # Validate all of the information + def validate_all(self): + self.validate_tokens() + self.validate_literals() + self.validate_rules() + return self.error + + # Get the tokens map + def get_tokens(self): + tokens = self.ldict.get('tokens', None) + if not tokens: + self.log.error('No token list is defined') + self.error = True + return + + if not isinstance(tokens, (list, tuple)): + self.log.error('tokens must be a list or tuple') + self.error = True + return + + if not tokens: + self.log.error('tokens is empty') + self.error = True + return + + self.tokens = tokens + + # Validate the tokens + def validate_tokens(self): + terminals = {} + for n in self.tokens: + if not _is_identifier.match(n): + self.log.error("Bad token name '%s'", n) + self.error = True + if n in terminals: + self.log.warning("Token '%s' multiply defined", n) + terminals[n] = 1 + + # Get the literals specifier + def get_literals(self): + self.literals = self.ldict.get('literals', '') + if not self.literals: + self.literals = '' + + # Validate literals + def validate_literals(self): + try: + for c in self.literals: + if not isinstance(c, StringTypes) or len(c) > 1: + self.log.error('Invalid literal %s. Must be a single character', repr(c)) + self.error = True + + except TypeError: + self.log.error('Invalid literals specification. literals must be a sequence of characters') + self.error = True + + def get_states(self): + self.states = self.ldict.get('states', None) + # Build statemap + if self.states: + if not isinstance(self.states, (tuple, list)): + self.log.error('states must be defined as a tuple or list') + self.error = True + else: + for s in self.states: + if not isinstance(s, tuple) or len(s) != 2: + self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s)) + self.error = True + continue + name, statetype = s + if not isinstance(name, StringTypes): + self.log.error('State name %s must be a string', repr(name)) + self.error = True + continue + if not (statetype == 'inclusive' or statetype == 'exclusive'): + self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name) + self.error = True + continue + if name in self.stateinfo: + self.log.error("State '%s' already defined", name) + self.error = True + continue + self.stateinfo[name] = statetype + + # Get all of the symbols with a t_ prefix and sort them into various + # categories (functions, strings, error functions, and ignore characters) + + def get_rules(self): + tsymbols = [f for f in self.ldict if f[:2] == 't_'] + + # Now build up a list of functions and a list of strings + self.toknames = {} # Mapping of symbols to token names + self.funcsym = {} # Symbols defined as functions + self.strsym = {} # Symbols defined as strings + self.ignore = {} # Ignore strings by state + self.errorf = {} # Error functions by state + self.eoff = {} # EOF functions by state + + for s in self.stateinfo: + self.funcsym[s] = [] + self.strsym[s] = [] + + if len(tsymbols) == 0: + self.log.error('No rules of the form t_rulename are defined') + self.error = True + return + + for f in tsymbols: + t = self.ldict[f] + states, tokname = _statetoken(f, self.stateinfo) + self.toknames[f] = tokname + + if hasattr(t, '__call__'): + if tokname == 'error': + for s in states: + self.errorf[s] = t + elif tokname == 'eof': + for s in states: + self.eoff[s] = t + elif tokname == 'ignore': + line = t.__code__.co_firstlineno + file = t.__code__.co_filename + self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__) + self.error = True + else: + for s in states: + self.funcsym[s].append((f, t)) + elif isinstance(t, StringTypes): + if tokname == 'ignore': + for s in states: + self.ignore[s] = t + if '\\' in t: + self.log.warning("%s contains a literal backslash '\\'", f) + + elif tokname == 'error': + self.log.error("Rule '%s' must be defined as a function", f) + self.error = True + else: + for s in states: + self.strsym[s].append((f, t)) + else: + self.log.error('%s not defined as a function or string', f) + self.error = True + + # Sort the functions by line number + for f in self.funcsym.values(): + f.sort(key=lambda x: x[1].__code__.co_firstlineno) + + # Sort the strings by regular expression length + for s in self.strsym.values(): + s.sort(key=lambda x: len(x[1]), reverse=True) + + # Validate all of the t_rules collected + def validate_rules(self): + for state in self.stateinfo: + # Validate all rules defined by functions + + for fname, f in self.funcsym[state]: + line = f.__code__.co_firstlineno + file = f.__code__.co_filename + module = inspect.getmodule(f) + self.modules.add(module) + + tokname = self.toknames[fname] + if isinstance(f, types.MethodType): + reqargs = 2 + else: + reqargs = 1 + nargs = f.__code__.co_argcount + if nargs > reqargs: + self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__) + self.error = True + continue + + if nargs < reqargs: + self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__) + self.error = True + continue + + if not _get_regex(f): + self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__) + self.error = True + continue + + try: + c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), self.reflags) + if c.match(''): + self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__) + self.error = True + except re.error as e: + self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e) + if '#' in _get_regex(f): + self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__) + self.error = True + + # Validate all rules defined by strings + for name, r in self.strsym[state]: + tokname = self.toknames[name] + if tokname == 'error': + self.log.error("Rule '%s' must be defined as a function", name) + self.error = True + continue + + if tokname not in self.tokens and tokname.find('ignore_') < 0: + self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname) + self.error = True + continue + + try: + c = re.compile('(?P<%s>%s)' % (name, r), self.reflags) + if (c.match('')): + self.log.error("Regular expression for rule '%s' matches empty string", name) + self.error = True + except re.error as e: + self.log.error("Invalid regular expression for rule '%s'. %s", name, e) + if '#' in r: + self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name) + self.error = True + + if not self.funcsym[state] and not self.strsym[state]: + self.log.error("No rules defined for state '%s'", state) + self.error = True + + # Validate the error function + efunc = self.errorf.get(state, None) + if efunc: + f = efunc + line = f.__code__.co_firstlineno + file = f.__code__.co_filename + module = inspect.getmodule(f) + self.modules.add(module) + + if isinstance(f, types.MethodType): + reqargs = 2 + else: + reqargs = 1 + nargs = f.__code__.co_argcount + if nargs > reqargs: + self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__) + self.error = True + + if nargs < reqargs: + self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__) + self.error = True + + for module in self.modules: + self.validate_module(module) + + # ----------------------------------------------------------------------------- + # validate_module() + # + # This checks to see if there are duplicated t_rulename() functions or strings + # in the parser input file. This is done using a simple regular expression + # match on each line in the source code of the given module. + # ----------------------------------------------------------------------------- + + def validate_module(self, module): + try: + lines, linen = inspect.getsourcelines(module) + except IOError: + return + + fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(') + sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=') + + counthash = {} + linen += 1 + for line in lines: + m = fre.match(line) + if not m: + m = sre.match(line) + if m: + name = m.group(1) + prev = counthash.get(name) + if not prev: + counthash[name] = linen + else: + filename = inspect.getsourcefile(module) + self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev) + self.error = True + linen += 1 + +# ----------------------------------------------------------------------------- +# lex(module) +# +# Build all of the regular expression rules from definitions in the supplied module +# ----------------------------------------------------------------------------- +def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab', + reflags=int(re.VERBOSE), nowarn=False, outputdir=None, debuglog=None, errorlog=None): + + if lextab is None: + lextab = 'lextab' + + global lexer + + ldict = None + stateinfo = {'INITIAL': 'inclusive'} + lexobj = Lexer() + lexobj.lexoptimize = optimize + global token, input + + if errorlog is None: + errorlog = PlyLogger(sys.stderr) + + if debug: + if debuglog is None: + debuglog = PlyLogger(sys.stderr) + + # Get the module dictionary used for the lexer + if object: + module = object + + # Get the module dictionary used for the parser + if module: + _items = [(k, getattr(module, k)) for k in dir(module)] + ldict = dict(_items) + # If no __file__ attribute is available, try to obtain it from the __module__ instead + if '__file__' not in ldict: + ldict['__file__'] = sys.modules[ldict['__module__']].__file__ + else: + ldict = get_caller_module_dict(2) + + # Determine if the module is package of a package or not. + # If so, fix the tabmodule setting so that tables load correctly + pkg = ldict.get('__package__') + if pkg and isinstance(lextab, str): + if '.' not in lextab: + lextab = pkg + '.' + lextab + + # Collect parser information from the dictionary + linfo = LexerReflect(ldict, log=errorlog, reflags=reflags) + linfo.get_all() + if not optimize: + if linfo.validate_all(): + raise SyntaxError("Can't build lexer") + + if optimize and lextab: + try: + lexobj.readtab(lextab, ldict) + token = lexobj.token + input = lexobj.input + lexer = lexobj + return lexobj + + except ImportError: + pass + + # Dump some basic debugging information + if debug: + debuglog.info('lex: tokens = %r', linfo.tokens) + debuglog.info('lex: literals = %r', linfo.literals) + debuglog.info('lex: states = %r', linfo.stateinfo) + + # Build a dictionary of valid token names + lexobj.lextokens = set() + for n in linfo.tokens: + lexobj.lextokens.add(n) + + # Get literals specification + if isinstance(linfo.literals, (list, tuple)): + lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals) + else: + lexobj.lexliterals = linfo.literals + + lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals) + + # Get the stateinfo dictionary + stateinfo = linfo.stateinfo + + regexs = {} + # Build the master regular expressions + for state in stateinfo: + regex_list = [] + + # Add rules defined by functions first + for fname, f in linfo.funcsym[state]: + line = f.__code__.co_firstlineno + file = f.__code__.co_filename + regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f))) + if debug: + debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state) + + # Now add all of the simple rules + for name, r in linfo.strsym[state]: + regex_list.append('(?P<%s>%s)' % (name, r)) + if debug: + debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state) + + regexs[state] = regex_list + + # Build the master regular expressions + + if debug: + debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====') + + for state in regexs: + lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames) + lexobj.lexstatere[state] = lexre + lexobj.lexstateretext[state] = re_text + lexobj.lexstaterenames[state] = re_names + if debug: + for i, text in enumerate(re_text): + debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text) + + # For inclusive states, we need to add the regular expressions from the INITIAL state + for state, stype in stateinfo.items(): + if state != 'INITIAL' and stype == 'inclusive': + lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL']) + lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL']) + lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL']) + + lexobj.lexstateinfo = stateinfo + lexobj.lexre = lexobj.lexstatere['INITIAL'] + lexobj.lexretext = lexobj.lexstateretext['INITIAL'] + lexobj.lexreflags = reflags + + # Set up ignore variables + lexobj.lexstateignore = linfo.ignore + lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '') + + # Set up error functions + lexobj.lexstateerrorf = linfo.errorf + lexobj.lexerrorf = linfo.errorf.get('INITIAL', None) + if not lexobj.lexerrorf: + errorlog.warning('No t_error rule is defined') + + # Set up eof functions + lexobj.lexstateeoff = linfo.eoff + lexobj.lexeoff = linfo.eoff.get('INITIAL', None) + + # Check state information for ignore and error rules + for s, stype in stateinfo.items(): + if stype == 'exclusive': + if s not in linfo.errorf: + errorlog.warning("No error rule is defined for exclusive state '%s'", s) + if s not in linfo.ignore and lexobj.lexignore: + errorlog.warning("No ignore rule is defined for exclusive state '%s'", s) + elif stype == 'inclusive': + if s not in linfo.errorf: + linfo.errorf[s] = linfo.errorf.get('INITIAL', None) + if s not in linfo.ignore: + linfo.ignore[s] = linfo.ignore.get('INITIAL', '') + + # Create global versions of the token() and input() functions + token = lexobj.token + input = lexobj.input + lexer = lexobj + + # If in optimize mode, we write the lextab + if lextab and optimize: + if outputdir is None: + # If no output directory is set, the location of the output files + # is determined according to the following rules: + # - If lextab specifies a package, files go into that package directory + # - Otherwise, files go in the same directory as the specifying module + if isinstance(lextab, types.ModuleType): + srcfile = lextab.__file__ + else: + if '.' not in lextab: + srcfile = ldict['__file__'] + else: + parts = lextab.split('.') + pkgname = '.'.join(parts[:-1]) + exec('import %s' % pkgname) + srcfile = getattr(sys.modules[pkgname], '__file__', '') + outputdir = os.path.dirname(srcfile) + try: + lexobj.writetab(lextab, outputdir) + except IOError as e: + errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e)) + + return lexobj + +# ----------------------------------------------------------------------------- +# runmain() +# +# This runs the lexer as a main program +# ----------------------------------------------------------------------------- + +def runmain(lexer=None, data=None): + if not data: + try: + filename = sys.argv[1] + f = open(filename) + data = f.read() + f.close() + except IndexError: + sys.stdout.write('Reading from standard input (type EOF to end):\n') + data = sys.stdin.read() + + if lexer: + _input = lexer.input + else: + _input = input + _input(data) + if lexer: + _token = lexer.token + else: + _token = token + + while True: + tok = _token() + if not tok: + break + sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos)) + +# ----------------------------------------------------------------------------- +# @TOKEN(regex) +# +# This decorator function can be used to set the regex expression on a function +# when its docstring might need to be set in an alternative way +# ----------------------------------------------------------------------------- + +def TOKEN(r): + def set_regex(f): + if hasattr(r, '__call__'): + f.regex = _get_regex(r) + else: + f.regex = r + return f + return set_regex + +# Alternative spelling of the TOKEN decorator +Token = TOKEN diff --git a/python/pycparser/ply/yacc.py b/python/pycparser/ply/yacc.py new file mode 100644 index 000000000..20b4f2863 --- /dev/null +++ b/python/pycparser/ply/yacc.py @@ -0,0 +1,3494 @@ +# ----------------------------------------------------------------------------- +# ply: yacc.py +# +# Copyright (C) 2001-2017 +# David M. Beazley (Dabeaz LLC) +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of the David Beazley or Dabeaz LLC may be used to +# endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# ----------------------------------------------------------------------------- +# +# This implements an LR parser that is constructed from grammar rules defined +# as Python functions. The grammer is specified by supplying the BNF inside +# Python documentation strings. The inspiration for this technique was borrowed +# from John Aycock's Spark parsing system. PLY might be viewed as cross between +# Spark and the GNU bison utility. +# +# The current implementation is only somewhat object-oriented. The +# LR parser itself is defined in terms of an object (which allows multiple +# parsers to co-exist). However, most of the variables used during table +# construction are defined in terms of global variables. Users shouldn't +# notice unless they are trying to define multiple parsers at the same +# time using threads (in which case they should have their head examined). +# +# This implementation supports both SLR and LALR(1) parsing. LALR(1) +# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu), +# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles, +# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced +# by the more efficient DeRemer and Pennello algorithm. +# +# :::::::: WARNING ::::::: +# +# Construction of LR parsing tables is fairly complicated and expensive. +# To make this module run fast, a *LOT* of work has been put into +# optimization---often at the expensive of readability and what might +# consider to be good Python "coding style." Modify the code at your +# own risk! +# ---------------------------------------------------------------------------- + +import re +import types +import sys +import os.path +import inspect +import base64 +import warnings + +__version__ = '3.10' +__tabversion__ = '3.10' + +#----------------------------------------------------------------------------- +# === User configurable parameters === +# +# Change these to modify the default behavior of yacc (if you wish) +#----------------------------------------------------------------------------- + +yaccdebug = True # Debugging mode. If set, yacc generates a + # a 'parser.out' file in the current directory + +debug_file = 'parser.out' # Default name of the debugging file +tab_module = 'parsetab' # Default name of the table module +default_lr = 'LALR' # Default LR table generation method + +error_count = 3 # Number of symbols that must be shifted to leave recovery mode + +yaccdevel = False # Set to True if developing yacc. This turns off optimized + # implementations of certain functions. + +resultlimit = 40 # Size limit of results when running in debug mode. + +pickle_protocol = 0 # Protocol to use when writing pickle files + +# String type-checking compatibility +if sys.version_info[0] < 3: + string_types = basestring +else: + string_types = str + +MAXINT = sys.maxsize + +# This object is a stand-in for a logging object created by the +# logging module. PLY will use this by default to create things +# such as the parser.out file. If a user wants more detailed +# information, they can create their own logging object and pass +# it into PLY. + +class PlyLogger(object): + def __init__(self, f): + self.f = f + + def debug(self, msg, *args, **kwargs): + self.f.write((msg % args) + '\n') + + info = debug + + def warning(self, msg, *args, **kwargs): + self.f.write('WARNING: ' + (msg % args) + '\n') + + def error(self, msg, *args, **kwargs): + self.f.write('ERROR: ' + (msg % args) + '\n') + + critical = debug + +# Null logger is used when no output is generated. Does nothing. +class NullLogger(object): + def __getattribute__(self, name): + return self + + def __call__(self, *args, **kwargs): + return self + +# Exception raised for yacc-related errors +class YaccError(Exception): + pass + +# Format the result message that the parser produces when running in debug mode. +def format_result(r): + repr_str = repr(r) + if '\n' in repr_str: + repr_str = repr(repr_str) + if len(repr_str) > resultlimit: + repr_str = repr_str[:resultlimit] + ' ...' + result = '<%s @ 0x%x> (%s)' % (type(r).__name__, id(r), repr_str) + return result + +# Format stack entries when the parser is running in debug mode +def format_stack_entry(r): + repr_str = repr(r) + if '\n' in repr_str: + repr_str = repr(repr_str) + if len(repr_str) < 16: + return repr_str + else: + return '<%s @ 0x%x>' % (type(r).__name__, id(r)) + +# Panic mode error recovery support. This feature is being reworked--much of the +# code here is to offer a deprecation/backwards compatible transition + +_errok = None +_token = None +_restart = None +_warnmsg = '''PLY: Don't use global functions errok(), token(), and restart() in p_error(). +Instead, invoke the methods on the associated parser instance: + + def p_error(p): + ... + # Use parser.errok(), parser.token(), parser.restart() + ... + + parser = yacc.yacc() +''' + +def errok(): + warnings.warn(_warnmsg) + return _errok() + +def restart(): + warnings.warn(_warnmsg) + return _restart() + +def token(): + warnings.warn(_warnmsg) + return _token() + +# Utility function to call the p_error() function with some deprecation hacks +def call_errorfunc(errorfunc, token, parser): + global _errok, _token, _restart + _errok = parser.errok + _token = parser.token + _restart = parser.restart + r = errorfunc(token) + try: + del _errok, _token, _restart + except NameError: + pass + return r + +#----------------------------------------------------------------------------- +# === LR Parsing Engine === +# +# The following classes are used for the LR parser itself. These are not +# used during table construction and are independent of the actual LR +# table generation algorithm +#----------------------------------------------------------------------------- + +# This class is used to hold non-terminal grammar symbols during parsing. +# It normally has the following attributes set: +# .type = Grammar symbol type +# .value = Symbol value +# .lineno = Starting line number +# .endlineno = Ending line number (optional, set automatically) +# .lexpos = Starting lex position +# .endlexpos = Ending lex position (optional, set automatically) + +class YaccSymbol: + def __str__(self): + return self.type + + def __repr__(self): + return str(self) + +# This class is a wrapper around the objects actually passed to each +# grammar rule. Index lookup and assignment actually assign the +# .value attribute of the underlying YaccSymbol object. +# The lineno() method returns the line number of a given +# item (or 0 if not defined). The linespan() method returns +# a tuple of (startline,endline) representing the range of lines +# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos) +# representing the range of positional information for a symbol. + +class YaccProduction: + def __init__(self, s, stack=None): + self.slice = s + self.stack = stack + self.lexer = None + self.parser = None + + def __getitem__(self, n): + if isinstance(n, slice): + return [s.value for s in self.slice[n]] + elif n >= 0: + return self.slice[n].value + else: + return self.stack[n].value + + def __setitem__(self, n, v): + self.slice[n].value = v + + def __getslice__(self, i, j): + return [s.value for s in self.slice[i:j]] + + def __len__(self): + return len(self.slice) + + def lineno(self, n): + return getattr(self.slice[n], 'lineno', 0) + + def set_lineno(self, n, lineno): + self.slice[n].lineno = lineno + + def linespan(self, n): + startline = getattr(self.slice[n], 'lineno', 0) + endline = getattr(self.slice[n], 'endlineno', startline) + return startline, endline + + def lexpos(self, n): + return getattr(self.slice[n], 'lexpos', 0) + + def lexspan(self, n): + startpos = getattr(self.slice[n], 'lexpos', 0) + endpos = getattr(self.slice[n], 'endlexpos', startpos) + return startpos, endpos + + def error(self): + raise SyntaxError + +# ----------------------------------------------------------------------------- +# == LRParser == +# +# The LR Parsing engine. +# ----------------------------------------------------------------------------- + +class LRParser: + def __init__(self, lrtab, errorf): + self.productions = lrtab.lr_productions + self.action = lrtab.lr_action + self.goto = lrtab.lr_goto + self.errorfunc = errorf + self.set_defaulted_states() + self.errorok = True + + def errok(self): + self.errorok = True + + def restart(self): + del self.statestack[:] + del self.symstack[:] + sym = YaccSymbol() + sym.type = '$end' + self.symstack.append(sym) + self.statestack.append(0) + + # Defaulted state support. + # This method identifies parser states where there is only one possible reduction action. + # For such states, the parser can make a choose to make a rule reduction without consuming + # the next look-ahead token. This delayed invocation of the tokenizer can be useful in + # certain kinds of advanced parsing situations where the lexer and parser interact with + # each other or change states (i.e., manipulation of scope, lexer states, etc.). + # + # See: https://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions + def set_defaulted_states(self): + self.defaulted_states = {} + for state, actions in self.action.items(): + rules = list(actions.values()) + if len(rules) == 1 and rules[0] < 0: + self.defaulted_states[state] = rules[0] + + def disable_defaulted_states(self): + self.defaulted_states = {} + + def parse(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): + if debug or yaccdevel: + if isinstance(debug, int): + debug = PlyLogger(sys.stderr) + return self.parsedebug(input, lexer, debug, tracking, tokenfunc) + elif tracking: + return self.parseopt(input, lexer, debug, tracking, tokenfunc) + else: + return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc) + + + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + # parsedebug(). + # + # This is the debugging enabled version of parse(). All changes made to the + # parsing engine should be made here. Optimized versions of this function + # are automatically created by the ply/ygen.py script. This script cuts out + # sections enclosed in markers such as this: + # + # #--! DEBUG + # statements + # #--! DEBUG + # + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + def parsedebug(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): + #--! parsedebug-start + lookahead = None # Current lookahead symbol + lookaheadstack = [] # Stack of lookahead symbols + actions = self.action # Local reference to action table (to avoid lookup on self.) + goto = self.goto # Local reference to goto table (to avoid lookup on self.) + prod = self.productions # Local reference to production list (to avoid lookup on self.) + defaulted_states = self.defaulted_states # Local reference to defaulted states + pslice = YaccProduction(None) # Production object passed to grammar rules + errorcount = 0 # Used during error recovery + + #--! DEBUG + debug.info('PLY: PARSE DEBUG START') + #--! DEBUG + + # If no lexer was given, we will try to use the lex module + if not lexer: + from . import lex + lexer = lex.lexer + + # Set up the lexer and parser objects on pslice + pslice.lexer = lexer + pslice.parser = self + + # If input was supplied, pass to lexer + if input is not None: + lexer.input(input) + + if tokenfunc is None: + # Tokenize function + get_token = lexer.token + else: + get_token = tokenfunc + + # Set the parser() token method (sometimes used in error recovery) + self.token = get_token + + # Set up the state and symbol stacks + + statestack = [] # Stack of parsing states + self.statestack = statestack + symstack = [] # Stack of grammar symbols + self.symstack = symstack + + pslice.stack = symstack # Put in the production + errtoken = None # Err token + + # The start state is assumed to be (0,$end) + + statestack.append(0) + sym = YaccSymbol() + sym.type = '$end' + symstack.append(sym) + state = 0 + while True: + # Get the next symbol on the input. If a lookahead symbol + # is already set, we just use that. Otherwise, we'll pull + # the next token off of the lookaheadstack or from the lexer + + #--! DEBUG + debug.debug('') + debug.debug('State : %s', state) + #--! DEBUG + + if state not in defaulted_states: + if not lookahead: + if not lookaheadstack: + lookahead = get_token() # Get the next token + else: + lookahead = lookaheadstack.pop() + if not lookahead: + lookahead = YaccSymbol() + lookahead.type = '$end' + + # Check the action table + ltype = lookahead.type + t = actions[state].get(ltype) + else: + t = defaulted_states[state] + #--! DEBUG + debug.debug('Defaulted state %s: Reduce using %d', state, -t) + #--! DEBUG + + #--! DEBUG + debug.debug('Stack : %s', + ('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()) + #--! DEBUG + + if t is not None: + if t > 0: + # shift a symbol on the stack + statestack.append(t) + state = t + + #--! DEBUG + debug.debug('Action : Shift and goto state %s', t) + #--! DEBUG + + symstack.append(lookahead) + lookahead = None + + # Decrease error count on successful shift + if errorcount: + errorcount -= 1 + continue + + if t < 0: + # reduce a symbol on the stack, emit a production + p = prod[-t] + pname = p.name + plen = p.len + + # Get production function + sym = YaccSymbol() + sym.type = pname # Production name + sym.value = None + + #--! DEBUG + if plen: + debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, + '['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']', + goto[statestack[-1-plen]][pname]) + else: + debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, [], + goto[statestack[-1]][pname]) + + #--! DEBUG + + if plen: + targ = symstack[-plen-1:] + targ[0] = sym + + #--! TRACKING + if tracking: + t1 = targ[1] + sym.lineno = t1.lineno + sym.lexpos = t1.lexpos + t1 = targ[-1] + sym.endlineno = getattr(t1, 'endlineno', t1.lineno) + sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos) + #--! TRACKING + + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + # The code enclosed in this section is duplicated + # below as a performance optimization. Make sure + # changes get made in both locations. + + pslice.slice = targ + + try: + # Call the grammar rule with our special slice object + del symstack[-plen:] + self.state = state + p.callable(pslice) + del statestack[-plen:] + #--! DEBUG + debug.info('Result : %s', format_result(pslice[0])) + #--! DEBUG + symstack.append(sym) + state = goto[statestack[-1]][pname] + statestack.append(state) + except SyntaxError: + # If an error was set. Enter error recovery state + lookaheadstack.append(lookahead) # Save the current lookahead token + symstack.extend(targ[1:-1]) # Put the production slice back on the stack + statestack.pop() # Pop back one state (before the reduce) + state = statestack[-1] + sym.type = 'error' + sym.value = 'error' + lookahead = sym + errorcount = error_count + self.errorok = False + + continue + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + else: + + #--! TRACKING + if tracking: + sym.lineno = lexer.lineno + sym.lexpos = lexer.lexpos + #--! TRACKING + + targ = [sym] + + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + # The code enclosed in this section is duplicated + # above as a performance optimization. Make sure + # changes get made in both locations. + + pslice.slice = targ + + try: + # Call the grammar rule with our special slice object + self.state = state + p.callable(pslice) + #--! DEBUG + debug.info('Result : %s', format_result(pslice[0])) + #--! DEBUG + symstack.append(sym) + state = goto[statestack[-1]][pname] + statestack.append(state) + except SyntaxError: + # If an error was set. Enter error recovery state + lookaheadstack.append(lookahead) # Save the current lookahead token + statestack.pop() # Pop back one state (before the reduce) + state = statestack[-1] + sym.type = 'error' + sym.value = 'error' + lookahead = sym + errorcount = error_count + self.errorok = False + + continue + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + if t == 0: + n = symstack[-1] + result = getattr(n, 'value', None) + #--! DEBUG + debug.info('Done : Returning %s', format_result(result)) + debug.info('PLY: PARSE DEBUG END') + #--! DEBUG + return result + + if t is None: + + #--! DEBUG + debug.error('Error : %s', + ('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()) + #--! DEBUG + + # We have some kind of parsing error here. To handle + # this, we are going to push the current token onto + # the tokenstack and replace it with an 'error' token. + # If there are any synchronization rules, they may + # catch it. + # + # In addition to pushing the error token, we call call + # the user defined p_error() function if this is the + # first syntax error. This function is only called if + # errorcount == 0. + if errorcount == 0 or self.errorok: + errorcount = error_count + self.errorok = False + errtoken = lookahead + if errtoken.type == '$end': + errtoken = None # End of file! + if self.errorfunc: + if errtoken and not hasattr(errtoken, 'lexer'): + errtoken.lexer = lexer + self.state = state + tok = call_errorfunc(self.errorfunc, errtoken, self) + if self.errorok: + # User must have done some kind of panic + # mode recovery on their own. The + # returned token is the next lookahead + lookahead = tok + errtoken = None + continue + else: + if errtoken: + if hasattr(errtoken, 'lineno'): + lineno = lookahead.lineno + else: + lineno = 0 + if lineno: + sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type)) + else: + sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type) + else: + sys.stderr.write('yacc: Parse error in input. EOF\n') + return + + else: + errorcount = error_count + + # case 1: the statestack only has 1 entry on it. If we're in this state, the + # entire parse has been rolled back and we're completely hosed. The token is + # discarded and we just keep going. + + if len(statestack) <= 1 and lookahead.type != '$end': + lookahead = None + errtoken = None + state = 0 + # Nuke the pushback stack + del lookaheadstack[:] + continue + + # case 2: the statestack has a couple of entries on it, but we're + # at the end of the file. nuke the top entry and generate an error token + + # Start nuking entries on the stack + if lookahead.type == '$end': + # Whoa. We're really hosed here. Bail out + return + + if lookahead.type != 'error': + sym = symstack[-1] + if sym.type == 'error': + # Hmmm. Error is on top of stack, we'll just nuke input + # symbol and continue + #--! TRACKING + if tracking: + sym.endlineno = getattr(lookahead, 'lineno', sym.lineno) + sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos) + #--! TRACKING + lookahead = None + continue + + # Create the error symbol for the first time and make it the new lookahead symbol + t = YaccSymbol() + t.type = 'error' + + if hasattr(lookahead, 'lineno'): + t.lineno = t.endlineno = lookahead.lineno + if hasattr(lookahead, 'lexpos'): + t.lexpos = t.endlexpos = lookahead.lexpos + t.value = lookahead + lookaheadstack.append(lookahead) + lookahead = t + else: + sym = symstack.pop() + #--! TRACKING + if tracking: + lookahead.lineno = sym.lineno + lookahead.lexpos = sym.lexpos + #--! TRACKING + statestack.pop() + state = statestack[-1] + + continue + + # Call an error function here + raise RuntimeError('yacc: internal parser error!!!\n') + + #--! parsedebug-end + + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + # parseopt(). + # + # Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY! + # This code is automatically generated by the ply/ygen.py script. Make + # changes to the parsedebug() method instead. + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): + #--! parseopt-start + lookahead = None # Current lookahead symbol + lookaheadstack = [] # Stack of lookahead symbols + actions = self.action # Local reference to action table (to avoid lookup on self.) + goto = self.goto # Local reference to goto table (to avoid lookup on self.) + prod = self.productions # Local reference to production list (to avoid lookup on self.) + defaulted_states = self.defaulted_states # Local reference to defaulted states + pslice = YaccProduction(None) # Production object passed to grammar rules + errorcount = 0 # Used during error recovery + + + # If no lexer was given, we will try to use the lex module + if not lexer: + from . import lex + lexer = lex.lexer + + # Set up the lexer and parser objects on pslice + pslice.lexer = lexer + pslice.parser = self + + # If input was supplied, pass to lexer + if input is not None: + lexer.input(input) + + if tokenfunc is None: + # Tokenize function + get_token = lexer.token + else: + get_token = tokenfunc + + # Set the parser() token method (sometimes used in error recovery) + self.token = get_token + + # Set up the state and symbol stacks + + statestack = [] # Stack of parsing states + self.statestack = statestack + symstack = [] # Stack of grammar symbols + self.symstack = symstack + + pslice.stack = symstack # Put in the production + errtoken = None # Err token + + # The start state is assumed to be (0,$end) + + statestack.append(0) + sym = YaccSymbol() + sym.type = '$end' + symstack.append(sym) + state = 0 + while True: + # Get the next symbol on the input. If a lookahead symbol + # is already set, we just use that. Otherwise, we'll pull + # the next token off of the lookaheadstack or from the lexer + + + if state not in defaulted_states: + if not lookahead: + if not lookaheadstack: + lookahead = get_token() # Get the next token + else: + lookahead = lookaheadstack.pop() + if not lookahead: + lookahead = YaccSymbol() + lookahead.type = '$end' + + # Check the action table + ltype = lookahead.type + t = actions[state].get(ltype) + else: + t = defaulted_states[state] + + + if t is not None: + if t > 0: + # shift a symbol on the stack + statestack.append(t) + state = t + + + symstack.append(lookahead) + lookahead = None + + # Decrease error count on successful shift + if errorcount: + errorcount -= 1 + continue + + if t < 0: + # reduce a symbol on the stack, emit a production + p = prod[-t] + pname = p.name + plen = p.len + + # Get production function + sym = YaccSymbol() + sym.type = pname # Production name + sym.value = None + + + if plen: + targ = symstack[-plen-1:] + targ[0] = sym + + #--! TRACKING + if tracking: + t1 = targ[1] + sym.lineno = t1.lineno + sym.lexpos = t1.lexpos + t1 = targ[-1] + sym.endlineno = getattr(t1, 'endlineno', t1.lineno) + sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos) + #--! TRACKING + + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + # The code enclosed in this section is duplicated + # below as a performance optimization. Make sure + # changes get made in both locations. + + pslice.slice = targ + + try: + # Call the grammar rule with our special slice object + del symstack[-plen:] + self.state = state + p.callable(pslice) + del statestack[-plen:] + symstack.append(sym) + state = goto[statestack[-1]][pname] + statestack.append(state) + except SyntaxError: + # If an error was set. Enter error recovery state + lookaheadstack.append(lookahead) # Save the current lookahead token + symstack.extend(targ[1:-1]) # Put the production slice back on the stack + statestack.pop() # Pop back one state (before the reduce) + state = statestack[-1] + sym.type = 'error' + sym.value = 'error' + lookahead = sym + errorcount = error_count + self.errorok = False + + continue + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + else: + + #--! TRACKING + if tracking: + sym.lineno = lexer.lineno + sym.lexpos = lexer.lexpos + #--! TRACKING + + targ = [sym] + + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + # The code enclosed in this section is duplicated + # above as a performance optimization. Make sure + # changes get made in both locations. + + pslice.slice = targ + + try: + # Call the grammar rule with our special slice object + self.state = state + p.callable(pslice) + symstack.append(sym) + state = goto[statestack[-1]][pname] + statestack.append(state) + except SyntaxError: + # If an error was set. Enter error recovery state + lookaheadstack.append(lookahead) # Save the current lookahead token + statestack.pop() # Pop back one state (before the reduce) + state = statestack[-1] + sym.type = 'error' + sym.value = 'error' + lookahead = sym + errorcount = error_count + self.errorok = False + + continue + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + if t == 0: + n = symstack[-1] + result = getattr(n, 'value', None) + return result + + if t is None: + + + # We have some kind of parsing error here. To handle + # this, we are going to push the current token onto + # the tokenstack and replace it with an 'error' token. + # If there are any synchronization rules, they may + # catch it. + # + # In addition to pushing the error token, we call call + # the user defined p_error() function if this is the + # first syntax error. This function is only called if + # errorcount == 0. + if errorcount == 0 or self.errorok: + errorcount = error_count + self.errorok = False + errtoken = lookahead + if errtoken.type == '$end': + errtoken = None # End of file! + if self.errorfunc: + if errtoken and not hasattr(errtoken, 'lexer'): + errtoken.lexer = lexer + self.state = state + tok = call_errorfunc(self.errorfunc, errtoken, self) + if self.errorok: + # User must have done some kind of panic + # mode recovery on their own. The + # returned token is the next lookahead + lookahead = tok + errtoken = None + continue + else: + if errtoken: + if hasattr(errtoken, 'lineno'): + lineno = lookahead.lineno + else: + lineno = 0 + if lineno: + sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type)) + else: + sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type) + else: + sys.stderr.write('yacc: Parse error in input. EOF\n') + return + + else: + errorcount = error_count + + # case 1: the statestack only has 1 entry on it. If we're in this state, the + # entire parse has been rolled back and we're completely hosed. The token is + # discarded and we just keep going. + + if len(statestack) <= 1 and lookahead.type != '$end': + lookahead = None + errtoken = None + state = 0 + # Nuke the pushback stack + del lookaheadstack[:] + continue + + # case 2: the statestack has a couple of entries on it, but we're + # at the end of the file. nuke the top entry and generate an error token + + # Start nuking entries on the stack + if lookahead.type == '$end': + # Whoa. We're really hosed here. Bail out + return + + if lookahead.type != 'error': + sym = symstack[-1] + if sym.type == 'error': + # Hmmm. Error is on top of stack, we'll just nuke input + # symbol and continue + #--! TRACKING + if tracking: + sym.endlineno = getattr(lookahead, 'lineno', sym.lineno) + sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos) + #--! TRACKING + lookahead = None + continue + + # Create the error symbol for the first time and make it the new lookahead symbol + t = YaccSymbol() + t.type = 'error' + + if hasattr(lookahead, 'lineno'): + t.lineno = t.endlineno = lookahead.lineno + if hasattr(lookahead, 'lexpos'): + t.lexpos = t.endlexpos = lookahead.lexpos + t.value = lookahead + lookaheadstack.append(lookahead) + lookahead = t + else: + sym = symstack.pop() + #--! TRACKING + if tracking: + lookahead.lineno = sym.lineno + lookahead.lexpos = sym.lexpos + #--! TRACKING + statestack.pop() + state = statestack[-1] + + continue + + # Call an error function here + raise RuntimeError('yacc: internal parser error!!!\n') + + #--! parseopt-end + + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + # parseopt_notrack(). + # + # Optimized version of parseopt() with line number tracking removed. + # DO NOT EDIT THIS CODE DIRECTLY. This code is automatically generated + # by the ply/ygen.py script. Make changes to the parsedebug() method instead. + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + def parseopt_notrack(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): + #--! parseopt-notrack-start + lookahead = None # Current lookahead symbol + lookaheadstack = [] # Stack of lookahead symbols + actions = self.action # Local reference to action table (to avoid lookup on self.) + goto = self.goto # Local reference to goto table (to avoid lookup on self.) + prod = self.productions # Local reference to production list (to avoid lookup on self.) + defaulted_states = self.defaulted_states # Local reference to defaulted states + pslice = YaccProduction(None) # Production object passed to grammar rules + errorcount = 0 # Used during error recovery + + + # If no lexer was given, we will try to use the lex module + if not lexer: + from . import lex + lexer = lex.lexer + + # Set up the lexer and parser objects on pslice + pslice.lexer = lexer + pslice.parser = self + + # If input was supplied, pass to lexer + if input is not None: + lexer.input(input) + + if tokenfunc is None: + # Tokenize function + get_token = lexer.token + else: + get_token = tokenfunc + + # Set the parser() token method (sometimes used in error recovery) + self.token = get_token + + # Set up the state and symbol stacks + + statestack = [] # Stack of parsing states + self.statestack = statestack + symstack = [] # Stack of grammar symbols + self.symstack = symstack + + pslice.stack = symstack # Put in the production + errtoken = None # Err token + + # The start state is assumed to be (0,$end) + + statestack.append(0) + sym = YaccSymbol() + sym.type = '$end' + symstack.append(sym) + state = 0 + while True: + # Get the next symbol on the input. If a lookahead symbol + # is already set, we just use that. Otherwise, we'll pull + # the next token off of the lookaheadstack or from the lexer + + + if state not in defaulted_states: + if not lookahead: + if not lookaheadstack: + lookahead = get_token() # Get the next token + else: + lookahead = lookaheadstack.pop() + if not lookahead: + lookahead = YaccSymbol() + lookahead.type = '$end' + + # Check the action table + ltype = lookahead.type + t = actions[state].get(ltype) + else: + t = defaulted_states[state] + + + if t is not None: + if t > 0: + # shift a symbol on the stack + statestack.append(t) + state = t + + + symstack.append(lookahead) + lookahead = None + + # Decrease error count on successful shift + if errorcount: + errorcount -= 1 + continue + + if t < 0: + # reduce a symbol on the stack, emit a production + p = prod[-t] + pname = p.name + plen = p.len + + # Get production function + sym = YaccSymbol() + sym.type = pname # Production name + sym.value = None + + + if plen: + targ = symstack[-plen-1:] + targ[0] = sym + + + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + # The code enclosed in this section is duplicated + # below as a performance optimization. Make sure + # changes get made in both locations. + + pslice.slice = targ + + try: + # Call the grammar rule with our special slice object + del symstack[-plen:] + self.state = state + p.callable(pslice) + del statestack[-plen:] + symstack.append(sym) + state = goto[statestack[-1]][pname] + statestack.append(state) + except SyntaxError: + # If an error was set. Enter error recovery state + lookaheadstack.append(lookahead) # Save the current lookahead token + symstack.extend(targ[1:-1]) # Put the production slice back on the stack + statestack.pop() # Pop back one state (before the reduce) + state = statestack[-1] + sym.type = 'error' + sym.value = 'error' + lookahead = sym + errorcount = error_count + self.errorok = False + + continue + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + else: + + + targ = [sym] + + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + # The code enclosed in this section is duplicated + # above as a performance optimization. Make sure + # changes get made in both locations. + + pslice.slice = targ + + try: + # Call the grammar rule with our special slice object + self.state = state + p.callable(pslice) + symstack.append(sym) + state = goto[statestack[-1]][pname] + statestack.append(state) + except SyntaxError: + # If an error was set. Enter error recovery state + lookaheadstack.append(lookahead) # Save the current lookahead token + statestack.pop() # Pop back one state (before the reduce) + state = statestack[-1] + sym.type = 'error' + sym.value = 'error' + lookahead = sym + errorcount = error_count + self.errorok = False + + continue + # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + if t == 0: + n = symstack[-1] + result = getattr(n, 'value', None) + return result + + if t is None: + + + # We have some kind of parsing error here. To handle + # this, we are going to push the current token onto + # the tokenstack and replace it with an 'error' token. + # If there are any synchronization rules, they may + # catch it. + # + # In addition to pushing the error token, we call call + # the user defined p_error() function if this is the + # first syntax error. This function is only called if + # errorcount == 0. + if errorcount == 0 or self.errorok: + errorcount = error_count + self.errorok = False + errtoken = lookahead + if errtoken.type == '$end': + errtoken = None # End of file! + if self.errorfunc: + if errtoken and not hasattr(errtoken, 'lexer'): + errtoken.lexer = lexer + self.state = state + tok = call_errorfunc(self.errorfunc, errtoken, self) + if self.errorok: + # User must have done some kind of panic + # mode recovery on their own. The + # returned token is the next lookahead + lookahead = tok + errtoken = None + continue + else: + if errtoken: + if hasattr(errtoken, 'lineno'): + lineno = lookahead.lineno + else: + lineno = 0 + if lineno: + sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type)) + else: + sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type) + else: + sys.stderr.write('yacc: Parse error in input. EOF\n') + return + + else: + errorcount = error_count + + # case 1: the statestack only has 1 entry on it. If we're in this state, the + # entire parse has been rolled back and we're completely hosed. The token is + # discarded and we just keep going. + + if len(statestack) <= 1 and lookahead.type != '$end': + lookahead = None + errtoken = None + state = 0 + # Nuke the pushback stack + del lookaheadstack[:] + continue + + # case 2: the statestack has a couple of entries on it, but we're + # at the end of the file. nuke the top entry and generate an error token + + # Start nuking entries on the stack + if lookahead.type == '$end': + # Whoa. We're really hosed here. Bail out + return + + if lookahead.type != 'error': + sym = symstack[-1] + if sym.type == 'error': + # Hmmm. Error is on top of stack, we'll just nuke input + # symbol and continue + lookahead = None + continue + + # Create the error symbol for the first time and make it the new lookahead symbol + t = YaccSymbol() + t.type = 'error' + + if hasattr(lookahead, 'lineno'): + t.lineno = t.endlineno = lookahead.lineno + if hasattr(lookahead, 'lexpos'): + t.lexpos = t.endlexpos = lookahead.lexpos + t.value = lookahead + lookaheadstack.append(lookahead) + lookahead = t + else: + sym = symstack.pop() + statestack.pop() + state = statestack[-1] + + continue + + # Call an error function here + raise RuntimeError('yacc: internal parser error!!!\n') + + #--! parseopt-notrack-end + +# ----------------------------------------------------------------------------- +# === Grammar Representation === +# +# The following functions, classes, and variables are used to represent and +# manipulate the rules that make up a grammar. +# ----------------------------------------------------------------------------- + +# regex matching identifiers +_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$') + +# ----------------------------------------------------------------------------- +# class Production: +# +# This class stores the raw information about a single production or grammar rule. +# A grammar rule refers to a specification such as this: +# +# expr : expr PLUS term +# +# Here are the basic attributes defined on all productions +# +# name - Name of the production. For example 'expr' +# prod - A list of symbols on the right side ['expr','PLUS','term'] +# prec - Production precedence level +# number - Production number. +# func - Function that executes on reduce +# file - File where production function is defined +# lineno - Line number where production function is defined +# +# The following attributes are defined or optional. +# +# len - Length of the production (number of symbols on right hand side) +# usyms - Set of unique symbols found in the production +# ----------------------------------------------------------------------------- + +class Production(object): + reduced = 0 + def __init__(self, number, name, prod, precedence=('right', 0), func=None, file='', line=0): + self.name = name + self.prod = tuple(prod) + self.number = number + self.func = func + self.callable = None + self.file = file + self.line = line + self.prec = precedence + + # Internal settings used during table construction + + self.len = len(self.prod) # Length of the production + + # Create a list of unique production symbols used in the production + self.usyms = [] + for s in self.prod: + if s not in self.usyms: + self.usyms.append(s) + + # List of all LR items for the production + self.lr_items = [] + self.lr_next = None + + # Create a string representation + if self.prod: + self.str = '%s -> %s' % (self.name, ' '.join(self.prod)) + else: + self.str = '%s -> ' % self.name + + def __str__(self): + return self.str + + def __repr__(self): + return 'Production(' + str(self) + ')' + + def __len__(self): + return len(self.prod) + + def __nonzero__(self): + return 1 + + def __getitem__(self, index): + return self.prod[index] + + # Return the nth lr_item from the production (or None if at the end) + def lr_item(self, n): + if n > len(self.prod): + return None + p = LRItem(self, n) + # Precompute the list of productions immediately following. + try: + p.lr_after = Prodnames[p.prod[n+1]] + except (IndexError, KeyError): + p.lr_after = [] + try: + p.lr_before = p.prod[n-1] + except IndexError: + p.lr_before = None + return p + + # Bind the production function name to a callable + def bind(self, pdict): + if self.func: + self.callable = pdict[self.func] + +# This class serves as a minimal standin for Production objects when +# reading table data from files. It only contains information +# actually used by the LR parsing engine, plus some additional +# debugging information. +class MiniProduction(object): + def __init__(self, str, name, len, func, file, line): + self.name = name + self.len = len + self.func = func + self.callable = None + self.file = file + self.line = line + self.str = str + + def __str__(self): + return self.str + + def __repr__(self): + return 'MiniProduction(%s)' % self.str + + # Bind the production function name to a callable + def bind(self, pdict): + if self.func: + self.callable = pdict[self.func] + + +# ----------------------------------------------------------------------------- +# class LRItem +# +# This class represents a specific stage of parsing a production rule. For +# example: +# +# expr : expr . PLUS term +# +# In the above, the "." represents the current location of the parse. Here +# basic attributes: +# +# name - Name of the production. For example 'expr' +# prod - A list of symbols on the right side ['expr','.', 'PLUS','term'] +# number - Production number. +# +# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term' +# then lr_next refers to 'expr -> expr PLUS . term' +# lr_index - LR item index (location of the ".") in the prod list. +# lookaheads - LALR lookahead symbols for this item +# len - Length of the production (number of symbols on right hand side) +# lr_after - List of all productions that immediately follow +# lr_before - Grammar symbol immediately before +# ----------------------------------------------------------------------------- + +class LRItem(object): + def __init__(self, p, n): + self.name = p.name + self.prod = list(p.prod) + self.number = p.number + self.lr_index = n + self.lookaheads = {} + self.prod.insert(n, '.') + self.prod = tuple(self.prod) + self.len = len(self.prod) + self.usyms = p.usyms + + def __str__(self): + if self.prod: + s = '%s -> %s' % (self.name, ' '.join(self.prod)) + else: + s = '%s -> ' % self.name + return s + + def __repr__(self): + return 'LRItem(' + str(self) + ')' + +# ----------------------------------------------------------------------------- +# rightmost_terminal() +# +# Return the rightmost terminal from a list of symbols. Used in add_production() +# ----------------------------------------------------------------------------- +def rightmost_terminal(symbols, terminals): + i = len(symbols) - 1 + while i >= 0: + if symbols[i] in terminals: + return symbols[i] + i -= 1 + return None + +# ----------------------------------------------------------------------------- +# === GRAMMAR CLASS === +# +# The following class represents the contents of the specified grammar along +# with various computed properties such as first sets, follow sets, LR items, etc. +# This data is used for critical parts of the table generation process later. +# ----------------------------------------------------------------------------- + +class GrammarError(YaccError): + pass + +class Grammar(object): + def __init__(self, terminals): + self.Productions = [None] # A list of all of the productions. The first + # entry is always reserved for the purpose of + # building an augmented grammar + + self.Prodnames = {} # A dictionary mapping the names of nonterminals to a list of all + # productions of that nonterminal. + + self.Prodmap = {} # A dictionary that is only used to detect duplicate + # productions. + + self.Terminals = {} # A dictionary mapping the names of terminal symbols to a + # list of the rules where they are used. + + for term in terminals: + self.Terminals[term] = [] + + self.Terminals['error'] = [] + + self.Nonterminals = {} # A dictionary mapping names of nonterminals to a list + # of rule numbers where they are used. + + self.First = {} # A dictionary of precomputed FIRST(x) symbols + + self.Follow = {} # A dictionary of precomputed FOLLOW(x) symbols + + self.Precedence = {} # Precedence rules for each terminal. Contains tuples of the + # form ('right',level) or ('nonassoc', level) or ('left',level) + + self.UsedPrecedence = set() # Precedence rules that were actually used by the grammer. + # This is only used to provide error checking and to generate + # a warning about unused precedence rules. + + self.Start = None # Starting symbol for the grammar + + + def __len__(self): + return len(self.Productions) + + def __getitem__(self, index): + return self.Productions[index] + + # ----------------------------------------------------------------------------- + # set_precedence() + # + # Sets the precedence for a given terminal. assoc is the associativity such as + # 'left','right', or 'nonassoc'. level is a numeric level. + # + # ----------------------------------------------------------------------------- + + def set_precedence(self, term, assoc, level): + assert self.Productions == [None], 'Must call set_precedence() before add_production()' + if term in self.Precedence: + raise GrammarError('Precedence already specified for terminal %r' % term) + if assoc not in ['left', 'right', 'nonassoc']: + raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'") + self.Precedence[term] = (assoc, level) + + # ----------------------------------------------------------------------------- + # add_production() + # + # Given an action function, this function assembles a production rule and + # computes its precedence level. + # + # The production rule is supplied as a list of symbols. For example, + # a rule such as 'expr : expr PLUS term' has a production name of 'expr' and + # symbols ['expr','PLUS','term']. + # + # Precedence is determined by the precedence of the right-most non-terminal + # or the precedence of a terminal specified by %prec. + # + # A variety of error checks are performed to make sure production symbols + # are valid and that %prec is used correctly. + # ----------------------------------------------------------------------------- + + def add_production(self, prodname, syms, func=None, file='', line=0): + + if prodname in self.Terminals: + raise GrammarError('%s:%d: Illegal rule name %r. Already defined as a token' % (file, line, prodname)) + if prodname == 'error': + raise GrammarError('%s:%d: Illegal rule name %r. error is a reserved word' % (file, line, prodname)) + if not _is_identifier.match(prodname): + raise GrammarError('%s:%d: Illegal rule name %r' % (file, line, prodname)) + + # Look for literal tokens + for n, s in enumerate(syms): + if s[0] in "'\"": + try: + c = eval(s) + if (len(c) > 1): + raise GrammarError('%s:%d: Literal token %s in rule %r may only be a single character' % + (file, line, s, prodname)) + if c not in self.Terminals: + self.Terminals[c] = [] + syms[n] = c + continue + except SyntaxError: + pass + if not _is_identifier.match(s) and s != '%prec': + raise GrammarError('%s:%d: Illegal name %r in rule %r' % (file, line, s, prodname)) + + # Determine the precedence level + if '%prec' in syms: + if syms[-1] == '%prec': + raise GrammarError('%s:%d: Syntax error. Nothing follows %%prec' % (file, line)) + if syms[-2] != '%prec': + raise GrammarError('%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule' % + (file, line)) + precname = syms[-1] + prodprec = self.Precedence.get(precname) + if not prodprec: + raise GrammarError('%s:%d: Nothing known about the precedence of %r' % (file, line, precname)) + else: + self.UsedPrecedence.add(precname) + del syms[-2:] # Drop %prec from the rule + else: + # If no %prec, precedence is determined by the rightmost terminal symbol + precname = rightmost_terminal(syms, self.Terminals) + prodprec = self.Precedence.get(precname, ('right', 0)) + + # See if the rule is already in the rulemap + map = '%s -> %s' % (prodname, syms) + if map in self.Prodmap: + m = self.Prodmap[map] + raise GrammarError('%s:%d: Duplicate rule %s. ' % (file, line, m) + + 'Previous definition at %s:%d' % (m.file, m.line)) + + # From this point on, everything is valid. Create a new Production instance + pnumber = len(self.Productions) + if prodname not in self.Nonterminals: + self.Nonterminals[prodname] = [] + + # Add the production number to Terminals and Nonterminals + for t in syms: + if t in self.Terminals: + self.Terminals[t].append(pnumber) + else: + if t not in self.Nonterminals: + self.Nonterminals[t] = [] + self.Nonterminals[t].append(pnumber) + + # Create a production and add it to the list of productions + p = Production(pnumber, prodname, syms, prodprec, func, file, line) + self.Productions.append(p) + self.Prodmap[map] = p + + # Add to the global productions list + try: + self.Prodnames[prodname].append(p) + except KeyError: + self.Prodnames[prodname] = [p] + + # ----------------------------------------------------------------------------- + # set_start() + # + # Sets the starting symbol and creates the augmented grammar. Production + # rule 0 is S' -> start where start is the start symbol. + # ----------------------------------------------------------------------------- + + def set_start(self, start=None): + if not start: + start = self.Productions[1].name + if start not in self.Nonterminals: + raise GrammarError('start symbol %s undefined' % start) + self.Productions[0] = Production(0, "S'", [start]) + self.Nonterminals[start].append(0) + self.Start = start + + # ----------------------------------------------------------------------------- + # find_unreachable() + # + # Find all of the nonterminal symbols that can't be reached from the starting + # symbol. Returns a list of nonterminals that can't be reached. + # ----------------------------------------------------------------------------- + + def find_unreachable(self): + + # Mark all symbols that are reachable from a symbol s + def mark_reachable_from(s): + if s in reachable: + return + reachable.add(s) + for p in self.Prodnames.get(s, []): + for r in p.prod: + mark_reachable_from(r) + + reachable = set() + mark_reachable_from(self.Productions[0].prod[0]) + return [s for s in self.Nonterminals if s not in reachable] + + # ----------------------------------------------------------------------------- + # infinite_cycles() + # + # This function looks at the various parsing rules and tries to detect + # infinite recursion cycles (grammar rules where there is no possible way + # to derive a string of only terminals). + # ----------------------------------------------------------------------------- + + def infinite_cycles(self): + terminates = {} + + # Terminals: + for t in self.Terminals: + terminates[t] = True + + terminates['$end'] = True + + # Nonterminals: + + # Initialize to false: + for n in self.Nonterminals: + terminates[n] = False + + # Then propagate termination until no change: + while True: + some_change = False + for (n, pl) in self.Prodnames.items(): + # Nonterminal n terminates iff any of its productions terminates. + for p in pl: + # Production p terminates iff all of its rhs symbols terminate. + for s in p.prod: + if not terminates[s]: + # The symbol s does not terminate, + # so production p does not terminate. + p_terminates = False + break + else: + # didn't break from the loop, + # so every symbol s terminates + # so production p terminates. + p_terminates = True + + if p_terminates: + # symbol n terminates! + if not terminates[n]: + terminates[n] = True + some_change = True + # Don't need to consider any more productions for this n. + break + + if not some_change: + break + + infinite = [] + for (s, term) in terminates.items(): + if not term: + if s not in self.Prodnames and s not in self.Terminals and s != 'error': + # s is used-but-not-defined, and we've already warned of that, + # so it would be overkill to say that it's also non-terminating. + pass + else: + infinite.append(s) + + return infinite + + # ----------------------------------------------------------------------------- + # undefined_symbols() + # + # Find all symbols that were used the grammar, but not defined as tokens or + # grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol + # and prod is the production where the symbol was used. + # ----------------------------------------------------------------------------- + def undefined_symbols(self): + result = [] + for p in self.Productions: + if not p: + continue + + for s in p.prod: + if s not in self.Prodnames and s not in self.Terminals and s != 'error': + result.append((s, p)) + return result + + # ----------------------------------------------------------------------------- + # unused_terminals() + # + # Find all terminals that were defined, but not used by the grammar. Returns + # a list of all symbols. + # ----------------------------------------------------------------------------- + def unused_terminals(self): + unused_tok = [] + for s, v in self.Terminals.items(): + if s != 'error' and not v: + unused_tok.append(s) + + return unused_tok + + # ------------------------------------------------------------------------------ + # unused_rules() + # + # Find all grammar rules that were defined, but not used (maybe not reachable) + # Returns a list of productions. + # ------------------------------------------------------------------------------ + + def unused_rules(self): + unused_prod = [] + for s, v in self.Nonterminals.items(): + if not v: + p = self.Prodnames[s][0] + unused_prod.append(p) + return unused_prod + + # ----------------------------------------------------------------------------- + # unused_precedence() + # + # Returns a list of tuples (term,precedence) corresponding to precedence + # rules that were never used by the grammar. term is the name of the terminal + # on which precedence was applied and precedence is a string such as 'left' or + # 'right' corresponding to the type of precedence. + # ----------------------------------------------------------------------------- + + def unused_precedence(self): + unused = [] + for termname in self.Precedence: + if not (termname in self.Terminals or termname in self.UsedPrecedence): + unused.append((termname, self.Precedence[termname][0])) + + return unused + + # ------------------------------------------------------------------------- + # _first() + # + # Compute the value of FIRST1(beta) where beta is a tuple of symbols. + # + # During execution of compute_first1, the result may be incomplete. + # Afterward (e.g., when called from compute_follow()), it will be complete. + # ------------------------------------------------------------------------- + def _first(self, beta): + + # We are computing First(x1,x2,x3,...,xn) + result = [] + for x in beta: + x_produces_empty = False + + # Add all the non- symbols of First[x] to the result. + for f in self.First[x]: + if f == '': + x_produces_empty = True + else: + if f not in result: + result.append(f) + + if x_produces_empty: + # We have to consider the next x in beta, + # i.e. stay in the loop. + pass + else: + # We don't have to consider any further symbols in beta. + break + else: + # There was no 'break' from the loop, + # so x_produces_empty was true for all x in beta, + # so beta produces empty as well. + result.append('') + + return result + + # ------------------------------------------------------------------------- + # compute_first() + # + # Compute the value of FIRST1(X) for all symbols + # ------------------------------------------------------------------------- + def compute_first(self): + if self.First: + return self.First + + # Terminals: + for t in self.Terminals: + self.First[t] = [t] + + self.First['$end'] = ['$end'] + + # Nonterminals: + + # Initialize to the empty set: + for n in self.Nonterminals: + self.First[n] = [] + + # Then propagate symbols until no change: + while True: + some_change = False + for n in self.Nonterminals: + for p in self.Prodnames[n]: + for f in self._first(p.prod): + if f not in self.First[n]: + self.First[n].append(f) + some_change = True + if not some_change: + break + + return self.First + + # --------------------------------------------------------------------- + # compute_follow() + # + # Computes all of the follow sets for every non-terminal symbol. The + # follow set is the set of all symbols that might follow a given + # non-terminal. See the Dragon book, 2nd Ed. p. 189. + # --------------------------------------------------------------------- + def compute_follow(self, start=None): + # If already computed, return the result + if self.Follow: + return self.Follow + + # If first sets not computed yet, do that first. + if not self.First: + self.compute_first() + + # Add '$end' to the follow list of the start symbol + for k in self.Nonterminals: + self.Follow[k] = [] + + if not start: + start = self.Productions[1].name + + self.Follow[start] = ['$end'] + + while True: + didadd = False + for p in self.Productions[1:]: + # Here is the production set + for i, B in enumerate(p.prod): + if B in self.Nonterminals: + # Okay. We got a non-terminal in a production + fst = self._first(p.prod[i+1:]) + hasempty = False + for f in fst: + if f != '' and f not in self.Follow[B]: + self.Follow[B].append(f) + didadd = True + if f == '': + hasempty = True + if hasempty or i == (len(p.prod)-1): + # Add elements of follow(a) to follow(b) + for f in self.Follow[p.name]: + if f not in self.Follow[B]: + self.Follow[B].append(f) + didadd = True + if not didadd: + break + return self.Follow + + + # ----------------------------------------------------------------------------- + # build_lritems() + # + # This function walks the list of productions and builds a complete set of the + # LR items. The LR items are stored in two ways: First, they are uniquely + # numbered and placed in the list _lritems. Second, a linked list of LR items + # is built for each production. For example: + # + # E -> E PLUS E + # + # Creates the list + # + # [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ] + # ----------------------------------------------------------------------------- + + def build_lritems(self): + for p in self.Productions: + lastlri = p + i = 0 + lr_items = [] + while True: + if i > len(p): + lri = None + else: + lri = LRItem(p, i) + # Precompute the list of productions immediately following + try: + lri.lr_after = self.Prodnames[lri.prod[i+1]] + except (IndexError, KeyError): + lri.lr_after = [] + try: + lri.lr_before = lri.prod[i-1] + except IndexError: + lri.lr_before = None + + lastlri.lr_next = lri + if not lri: + break + lr_items.append(lri) + lastlri = lri + i += 1 + p.lr_items = lr_items + +# ----------------------------------------------------------------------------- +# == Class LRTable == +# +# This basic class represents a basic table of LR parsing information. +# Methods for generating the tables are not defined here. They are defined +# in the derived class LRGeneratedTable. +# ----------------------------------------------------------------------------- + +class VersionError(YaccError): + pass + +class LRTable(object): + def __init__(self): + self.lr_action = None + self.lr_goto = None + self.lr_productions = None + self.lr_method = None + + def read_table(self, module): + if isinstance(module, types.ModuleType): + parsetab = module + else: + exec('import %s' % module) + parsetab = sys.modules[module] + + if parsetab._tabversion != __tabversion__: + raise VersionError('yacc table file version is out of date') + + self.lr_action = parsetab._lr_action + self.lr_goto = parsetab._lr_goto + + self.lr_productions = [] + for p in parsetab._lr_productions: + self.lr_productions.append(MiniProduction(*p)) + + self.lr_method = parsetab._lr_method + return parsetab._lr_signature + + def read_pickle(self, filename): + try: + import cPickle as pickle + except ImportError: + import pickle + + if not os.path.exists(filename): + raise ImportError + + in_f = open(filename, 'rb') + + tabversion = pickle.load(in_f) + if tabversion != __tabversion__: + raise VersionError('yacc table file version is out of date') + self.lr_method = pickle.load(in_f) + signature = pickle.load(in_f) + self.lr_action = pickle.load(in_f) + self.lr_goto = pickle.load(in_f) + productions = pickle.load(in_f) + + self.lr_productions = [] + for p in productions: + self.lr_productions.append(MiniProduction(*p)) + + in_f.close() + return signature + + # Bind all production function names to callable objects in pdict + def bind_callables(self, pdict): + for p in self.lr_productions: + p.bind(pdict) + + +# ----------------------------------------------------------------------------- +# === LR Generator === +# +# The following classes and functions are used to generate LR parsing tables on +# a grammar. +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# digraph() +# traverse() +# +# The following two functions are used to compute set valued functions +# of the form: +# +# F(x) = F'(x) U U{F(y) | x R y} +# +# This is used to compute the values of Read() sets as well as FOLLOW sets +# in LALR(1) generation. +# +# Inputs: X - An input set +# R - A relation +# FP - Set-valued function +# ------------------------------------------------------------------------------ + +def digraph(X, R, FP): + N = {} + for x in X: + N[x] = 0 + stack = [] + F = {} + for x in X: + if N[x] == 0: + traverse(x, N, stack, F, X, R, FP) + return F + +def traverse(x, N, stack, F, X, R, FP): + stack.append(x) + d = len(stack) + N[x] = d + F[x] = FP(x) # F(X) <- F'(x) + + rel = R(x) # Get y's related to x + for y in rel: + if N[y] == 0: + traverse(y, N, stack, F, X, R, FP) + N[x] = min(N[x], N[y]) + for a in F.get(y, []): + if a not in F[x]: + F[x].append(a) + if N[x] == d: + N[stack[-1]] = MAXINT + F[stack[-1]] = F[x] + element = stack.pop() + while element != x: + N[stack[-1]] = MAXINT + F[stack[-1]] = F[x] + element = stack.pop() + +class LALRError(YaccError): + pass + +# ----------------------------------------------------------------------------- +# == LRGeneratedTable == +# +# This class implements the LR table generation algorithm. There are no +# public methods except for write() +# ----------------------------------------------------------------------------- + +class LRGeneratedTable(LRTable): + def __init__(self, grammar, method='LALR', log=None): + if method not in ['SLR', 'LALR']: + raise LALRError('Unsupported method %s' % method) + + self.grammar = grammar + self.lr_method = method + + # Set up the logger + if not log: + log = NullLogger() + self.log = log + + # Internal attributes + self.lr_action = {} # Action table + self.lr_goto = {} # Goto table + self.lr_productions = grammar.Productions # Copy of grammar Production array + self.lr_goto_cache = {} # Cache of computed gotos + self.lr0_cidhash = {} # Cache of closures + + self._add_count = 0 # Internal counter used to detect cycles + + # Diagonistic information filled in by the table generator + self.sr_conflict = 0 + self.rr_conflict = 0 + self.conflicts = [] # List of conflicts + + self.sr_conflicts = [] + self.rr_conflicts = [] + + # Build the tables + self.grammar.build_lritems() + self.grammar.compute_first() + self.grammar.compute_follow() + self.lr_parse_table() + + # Compute the LR(0) closure operation on I, where I is a set of LR(0) items. + + def lr0_closure(self, I): + self._add_count += 1 + + # Add everything in I to J + J = I[:] + didadd = True + while didadd: + didadd = False + for j in J: + for x in j.lr_after: + if getattr(x, 'lr0_added', 0) == self._add_count: + continue + # Add B --> .G to J + J.append(x.lr_next) + x.lr0_added = self._add_count + didadd = True + + return J + + # Compute the LR(0) goto function goto(I,X) where I is a set + # of LR(0) items and X is a grammar symbol. This function is written + # in a way that guarantees uniqueness of the generated goto sets + # (i.e. the same goto set will never be returned as two different Python + # objects). With uniqueness, we can later do fast set comparisons using + # id(obj) instead of element-wise comparison. + + def lr0_goto(self, I, x): + # First we look for a previously cached entry + g = self.lr_goto_cache.get((id(I), x)) + if g: + return g + + # Now we generate the goto set in a way that guarantees uniqueness + # of the result + + s = self.lr_goto_cache.get(x) + if not s: + s = {} + self.lr_goto_cache[x] = s + + gs = [] + for p in I: + n = p.lr_next + if n and n.lr_before == x: + s1 = s.get(id(n)) + if not s1: + s1 = {} + s[id(n)] = s1 + gs.append(n) + s = s1 + g = s.get('$end') + if not g: + if gs: + g = self.lr0_closure(gs) + s['$end'] = g + else: + s['$end'] = gs + self.lr_goto_cache[(id(I), x)] = g + return g + + # Compute the LR(0) sets of item function + def lr0_items(self): + C = [self.lr0_closure([self.grammar.Productions[0].lr_next])] + i = 0 + for I in C: + self.lr0_cidhash[id(I)] = i + i += 1 + + # Loop over the items in C and each grammar symbols + i = 0 + while i < len(C): + I = C[i] + i += 1 + + # Collect all of the symbols that could possibly be in the goto(I,X) sets + asyms = {} + for ii in I: + for s in ii.usyms: + asyms[s] = None + + for x in asyms: + g = self.lr0_goto(I, x) + if not g or id(g) in self.lr0_cidhash: + continue + self.lr0_cidhash[id(g)] = len(C) + C.append(g) + + return C + + # ----------------------------------------------------------------------------- + # ==== LALR(1) Parsing ==== + # + # LALR(1) parsing is almost exactly the same as SLR except that instead of + # relying upon Follow() sets when performing reductions, a more selective + # lookahead set that incorporates the state of the LR(0) machine is utilized. + # Thus, we mainly just have to focus on calculating the lookahead sets. + # + # The method used here is due to DeRemer and Pennelo (1982). + # + # DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1) + # Lookahead Sets", ACM Transactions on Programming Languages and Systems, + # Vol. 4, No. 4, Oct. 1982, pp. 615-649 + # + # Further details can also be found in: + # + # J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing", + # McGraw-Hill Book Company, (1985). + # + # ----------------------------------------------------------------------------- + + # ----------------------------------------------------------------------------- + # compute_nullable_nonterminals() + # + # Creates a dictionary containing all of the non-terminals that might produce + # an empty production. + # ----------------------------------------------------------------------------- + + def compute_nullable_nonterminals(self): + nullable = set() + num_nullable = 0 + while True: + for p in self.grammar.Productions[1:]: + if p.len == 0: + nullable.add(p.name) + continue + for t in p.prod: + if t not in nullable: + break + else: + nullable.add(p.name) + if len(nullable) == num_nullable: + break + num_nullable = len(nullable) + return nullable + + # ----------------------------------------------------------------------------- + # find_nonterminal_trans(C) + # + # Given a set of LR(0) items, this functions finds all of the non-terminal + # transitions. These are transitions in which a dot appears immediately before + # a non-terminal. Returns a list of tuples of the form (state,N) where state + # is the state number and N is the nonterminal symbol. + # + # The input C is the set of LR(0) items. + # ----------------------------------------------------------------------------- + + def find_nonterminal_transitions(self, C): + trans = [] + for stateno, state in enumerate(C): + for p in state: + if p.lr_index < p.len - 1: + t = (stateno, p.prod[p.lr_index+1]) + if t[1] in self.grammar.Nonterminals: + if t not in trans: + trans.append(t) + return trans + + # ----------------------------------------------------------------------------- + # dr_relation() + # + # Computes the DR(p,A) relationships for non-terminal transitions. The input + # is a tuple (state,N) where state is a number and N is a nonterminal symbol. + # + # Returns a list of terminals. + # ----------------------------------------------------------------------------- + + def dr_relation(self, C, trans, nullable): + dr_set = {} + state, N = trans + terms = [] + + g = self.lr0_goto(C[state], N) + for p in g: + if p.lr_index < p.len - 1: + a = p.prod[p.lr_index+1] + if a in self.grammar.Terminals: + if a not in terms: + terms.append(a) + + # This extra bit is to handle the start state + if state == 0 and N == self.grammar.Productions[0].prod[0]: + terms.append('$end') + + return terms + + # ----------------------------------------------------------------------------- + # reads_relation() + # + # Computes the READS() relation (p,A) READS (t,C). + # ----------------------------------------------------------------------------- + + def reads_relation(self, C, trans, empty): + # Look for empty transitions + rel = [] + state, N = trans + + g = self.lr0_goto(C[state], N) + j = self.lr0_cidhash.get(id(g), -1) + for p in g: + if p.lr_index < p.len - 1: + a = p.prod[p.lr_index + 1] + if a in empty: + rel.append((j, a)) + + return rel + + # ----------------------------------------------------------------------------- + # compute_lookback_includes() + # + # Determines the lookback and includes relations + # + # LOOKBACK: + # + # This relation is determined by running the LR(0) state machine forward. + # For example, starting with a production "N : . A B C", we run it forward + # to obtain "N : A B C ." We then build a relationship between this final + # state and the starting state. These relationships are stored in a dictionary + # lookdict. + # + # INCLUDES: + # + # Computes the INCLUDE() relation (p,A) INCLUDES (p',B). + # + # This relation is used to determine non-terminal transitions that occur + # inside of other non-terminal transition states. (p,A) INCLUDES (p', B) + # if the following holds: + # + # B -> LAT, where T -> epsilon and p' -L-> p + # + # L is essentially a prefix (which may be empty), T is a suffix that must be + # able to derive an empty string. State p' must lead to state p with the string L. + # + # ----------------------------------------------------------------------------- + + def compute_lookback_includes(self, C, trans, nullable): + lookdict = {} # Dictionary of lookback relations + includedict = {} # Dictionary of include relations + + # Make a dictionary of non-terminal transitions + dtrans = {} + for t in trans: + dtrans[t] = 1 + + # Loop over all transitions and compute lookbacks and includes + for state, N in trans: + lookb = [] + includes = [] + for p in C[state]: + if p.name != N: + continue + + # Okay, we have a name match. We now follow the production all the way + # through the state machine until we get the . on the right hand side + + lr_index = p.lr_index + j = state + while lr_index < p.len - 1: + lr_index = lr_index + 1 + t = p.prod[lr_index] + + # Check to see if this symbol and state are a non-terminal transition + if (j, t) in dtrans: + # Yes. Okay, there is some chance that this is an includes relation + # the only way to know for certain is whether the rest of the + # production derives empty + + li = lr_index + 1 + while li < p.len: + if p.prod[li] in self.grammar.Terminals: + break # No forget it + if p.prod[li] not in nullable: + break + li = li + 1 + else: + # Appears to be a relation between (j,t) and (state,N) + includes.append((j, t)) + + g = self.lr0_goto(C[j], t) # Go to next set + j = self.lr0_cidhash.get(id(g), -1) # Go to next state + + # When we get here, j is the final state, now we have to locate the production + for r in C[j]: + if r.name != p.name: + continue + if r.len != p.len: + continue + i = 0 + # This look is comparing a production ". A B C" with "A B C ." + while i < r.lr_index: + if r.prod[i] != p.prod[i+1]: + break + i = i + 1 + else: + lookb.append((j, r)) + for i in includes: + if i not in includedict: + includedict[i] = [] + includedict[i].append((state, N)) + lookdict[(state, N)] = lookb + + return lookdict, includedict + + # ----------------------------------------------------------------------------- + # compute_read_sets() + # + # Given a set of LR(0) items, this function computes the read sets. + # + # Inputs: C = Set of LR(0) items + # ntrans = Set of nonterminal transitions + # nullable = Set of empty transitions + # + # Returns a set containing the read sets + # ----------------------------------------------------------------------------- + + def compute_read_sets(self, C, ntrans, nullable): + FP = lambda x: self.dr_relation(C, x, nullable) + R = lambda x: self.reads_relation(C, x, nullable) + F = digraph(ntrans, R, FP) + return F + + # ----------------------------------------------------------------------------- + # compute_follow_sets() + # + # Given a set of LR(0) items, a set of non-terminal transitions, a readset, + # and an include set, this function computes the follow sets + # + # Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)} + # + # Inputs: + # ntrans = Set of nonterminal transitions + # readsets = Readset (previously computed) + # inclsets = Include sets (previously computed) + # + # Returns a set containing the follow sets + # ----------------------------------------------------------------------------- + + def compute_follow_sets(self, ntrans, readsets, inclsets): + FP = lambda x: readsets[x] + R = lambda x: inclsets.get(x, []) + F = digraph(ntrans, R, FP) + return F + + # ----------------------------------------------------------------------------- + # add_lookaheads() + # + # Attaches the lookahead symbols to grammar rules. + # + # Inputs: lookbacks - Set of lookback relations + # followset - Computed follow set + # + # This function directly attaches the lookaheads to productions contained + # in the lookbacks set + # ----------------------------------------------------------------------------- + + def add_lookaheads(self, lookbacks, followset): + for trans, lb in lookbacks.items(): + # Loop over productions in lookback + for state, p in lb: + if state not in p.lookaheads: + p.lookaheads[state] = [] + f = followset.get(trans, []) + for a in f: + if a not in p.lookaheads[state]: + p.lookaheads[state].append(a) + + # ----------------------------------------------------------------------------- + # add_lalr_lookaheads() + # + # This function does all of the work of adding lookahead information for use + # with LALR parsing + # ----------------------------------------------------------------------------- + + def add_lalr_lookaheads(self, C): + # Determine all of the nullable nonterminals + nullable = self.compute_nullable_nonterminals() + + # Find all non-terminal transitions + trans = self.find_nonterminal_transitions(C) + + # Compute read sets + readsets = self.compute_read_sets(C, trans, nullable) + + # Compute lookback/includes relations + lookd, included = self.compute_lookback_includes(C, trans, nullable) + + # Compute LALR FOLLOW sets + followsets = self.compute_follow_sets(trans, readsets, included) + + # Add all of the lookaheads + self.add_lookaheads(lookd, followsets) + + # ----------------------------------------------------------------------------- + # lr_parse_table() + # + # This function constructs the parse tables for SLR or LALR + # ----------------------------------------------------------------------------- + def lr_parse_table(self): + Productions = self.grammar.Productions + Precedence = self.grammar.Precedence + goto = self.lr_goto # Goto array + action = self.lr_action # Action array + log = self.log # Logger for output + + actionp = {} # Action production array (temporary) + + log.info('Parsing method: %s', self.lr_method) + + # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items + # This determines the number of states + + C = self.lr0_items() + + if self.lr_method == 'LALR': + self.add_lalr_lookaheads(C) + + # Build the parser table, state by state + st = 0 + for I in C: + # Loop over each production in I + actlist = [] # List of actions + st_action = {} + st_actionp = {} + st_goto = {} + log.info('') + log.info('state %d', st) + log.info('') + for p in I: + log.info(' (%d) %s', p.number, p) + log.info('') + + for p in I: + if p.len == p.lr_index + 1: + if p.name == "S'": + # Start symbol. Accept! + st_action['$end'] = 0 + st_actionp['$end'] = p + else: + # We are at the end of a production. Reduce! + if self.lr_method == 'LALR': + laheads = p.lookaheads[st] + else: + laheads = self.grammar.Follow[p.name] + for a in laheads: + actlist.append((a, p, 'reduce using rule %d (%s)' % (p.number, p))) + r = st_action.get(a) + if r is not None: + # Whoa. Have a shift/reduce or reduce/reduce conflict + if r > 0: + # Need to decide on shift or reduce here + # By default we favor shifting. Need to add + # some precedence rules here. + + # Shift precedence comes from the token + sprec, slevel = Precedence.get(a, ('right', 0)) + + # Reduce precedence comes from rule being reduced (p) + rprec, rlevel = Productions[p.number].prec + + if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')): + # We really need to reduce here. + st_action[a] = -p.number + st_actionp[a] = p + if not slevel and not rlevel: + log.info(' ! shift/reduce conflict for %s resolved as reduce', a) + self.sr_conflicts.append((st, a, 'reduce')) + Productions[p.number].reduced += 1 + elif (slevel == rlevel) and (rprec == 'nonassoc'): + st_action[a] = None + else: + # Hmmm. Guess we'll keep the shift + if not rlevel: + log.info(' ! shift/reduce conflict for %s resolved as shift', a) + self.sr_conflicts.append((st, a, 'shift')) + elif r < 0: + # Reduce/reduce conflict. In this case, we favor the rule + # that was defined first in the grammar file + oldp = Productions[-r] + pp = Productions[p.number] + if oldp.line > pp.line: + st_action[a] = -p.number + st_actionp[a] = p + chosenp, rejectp = pp, oldp + Productions[p.number].reduced += 1 + Productions[oldp.number].reduced -= 1 + else: + chosenp, rejectp = oldp, pp + self.rr_conflicts.append((st, chosenp, rejectp)) + log.info(' ! reduce/reduce conflict for %s resolved using rule %d (%s)', + a, st_actionp[a].number, st_actionp[a]) + else: + raise LALRError('Unknown conflict in state %d' % st) + else: + st_action[a] = -p.number + st_actionp[a] = p + Productions[p.number].reduced += 1 + else: + i = p.lr_index + a = p.prod[i+1] # Get symbol right after the "." + if a in self.grammar.Terminals: + g = self.lr0_goto(I, a) + j = self.lr0_cidhash.get(id(g), -1) + if j >= 0: + # We are in a shift state + actlist.append((a, p, 'shift and go to state %d' % j)) + r = st_action.get(a) + if r is not None: + # Whoa have a shift/reduce or shift/shift conflict + if r > 0: + if r != j: + raise LALRError('Shift/shift conflict in state %d' % st) + elif r < 0: + # Do a precedence check. + # - if precedence of reduce rule is higher, we reduce. + # - if precedence of reduce is same and left assoc, we reduce. + # - otherwise we shift + + # Shift precedence comes from the token + sprec, slevel = Precedence.get(a, ('right', 0)) + + # Reduce precedence comes from the rule that could have been reduced + rprec, rlevel = Productions[st_actionp[a].number].prec + + if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')): + # We decide to shift here... highest precedence to shift + Productions[st_actionp[a].number].reduced -= 1 + st_action[a] = j + st_actionp[a] = p + if not rlevel: + log.info(' ! shift/reduce conflict for %s resolved as shift', a) + self.sr_conflicts.append((st, a, 'shift')) + elif (slevel == rlevel) and (rprec == 'nonassoc'): + st_action[a] = None + else: + # Hmmm. Guess we'll keep the reduce + if not slevel and not rlevel: + log.info(' ! shift/reduce conflict for %s resolved as reduce', a) + self.sr_conflicts.append((st, a, 'reduce')) + + else: + raise LALRError('Unknown conflict in state %d' % st) + else: + st_action[a] = j + st_actionp[a] = p + + # Print the actions associated with each terminal + _actprint = {} + for a, p, m in actlist: + if a in st_action: + if p is st_actionp[a]: + log.info(' %-15s %s', a, m) + _actprint[(a, m)] = 1 + log.info('') + # Print the actions that were not used. (debugging) + not_used = 0 + for a, p, m in actlist: + if a in st_action: + if p is not st_actionp[a]: + if not (a, m) in _actprint: + log.debug(' ! %-15s [ %s ]', a, m) + not_used = 1 + _actprint[(a, m)] = 1 + if not_used: + log.debug('') + + # Construct the goto table for this state + + nkeys = {} + for ii in I: + for s in ii.usyms: + if s in self.grammar.Nonterminals: + nkeys[s] = None + for n in nkeys: + g = self.lr0_goto(I, n) + j = self.lr0_cidhash.get(id(g), -1) + if j >= 0: + st_goto[n] = j + log.info(' %-30s shift and go to state %d', n, j) + + action[st] = st_action + actionp[st] = st_actionp + goto[st] = st_goto + st += 1 + + # ----------------------------------------------------------------------------- + # write() + # + # This function writes the LR parsing tables to a file + # ----------------------------------------------------------------------------- + + def write_table(self, tabmodule, outputdir='', signature=''): + if isinstance(tabmodule, types.ModuleType): + raise IOError("Won't overwrite existing tabmodule") + + basemodulename = tabmodule.split('.')[-1] + filename = os.path.join(outputdir, basemodulename) + '.py' + try: + f = open(filename, 'w') + + f.write(''' +# %s +# This file is automatically generated. Do not edit. +_tabversion = %r + +_lr_method = %r + +_lr_signature = %r + ''' % (os.path.basename(filename), __tabversion__, self.lr_method, signature)) + + # Change smaller to 0 to go back to original tables + smaller = 1 + + # Factor out names to try and make smaller + if smaller: + items = {} + + for s, nd in self.lr_action.items(): + for name, v in nd.items(): + i = items.get(name) + if not i: + i = ([], []) + items[name] = i + i[0].append(s) + i[1].append(v) + + f.write('\n_lr_action_items = {') + for k, v in items.items(): + f.write('%r:([' % k) + for i in v[0]: + f.write('%r,' % i) + f.write('],[') + for i in v[1]: + f.write('%r,' % i) + + f.write(']),') + f.write('}\n') + + f.write(''' +_lr_action = {} +for _k, _v in _lr_action_items.items(): + for _x,_y in zip(_v[0],_v[1]): + if not _x in _lr_action: _lr_action[_x] = {} + _lr_action[_x][_k] = _y +del _lr_action_items +''') + + else: + f.write('\n_lr_action = { ') + for k, v in self.lr_action.items(): + f.write('(%r,%r):%r,' % (k[0], k[1], v)) + f.write('}\n') + + if smaller: + # Factor out names to try and make smaller + items = {} + + for s, nd in self.lr_goto.items(): + for name, v in nd.items(): + i = items.get(name) + if not i: + i = ([], []) + items[name] = i + i[0].append(s) + i[1].append(v) + + f.write('\n_lr_goto_items = {') + for k, v in items.items(): + f.write('%r:([' % k) + for i in v[0]: + f.write('%r,' % i) + f.write('],[') + for i in v[1]: + f.write('%r,' % i) + + f.write(']),') + f.write('}\n') + + f.write(''' +_lr_goto = {} +for _k, _v in _lr_goto_items.items(): + for _x, _y in zip(_v[0], _v[1]): + if not _x in _lr_goto: _lr_goto[_x] = {} + _lr_goto[_x][_k] = _y +del _lr_goto_items +''') + else: + f.write('\n_lr_goto = { ') + for k, v in self.lr_goto.items(): + f.write('(%r,%r):%r,' % (k[0], k[1], v)) + f.write('}\n') + + # Write production table + f.write('_lr_productions = [\n') + for p in self.lr_productions: + if p.func: + f.write(' (%r,%r,%d,%r,%r,%d),\n' % (p.str, p.name, p.len, + p.func, os.path.basename(p.file), p.line)) + else: + f.write(' (%r,%r,%d,None,None,None),\n' % (str(p), p.name, p.len)) + f.write(']\n') + f.close() + + except IOError as e: + raise + + + # ----------------------------------------------------------------------------- + # pickle_table() + # + # This function pickles the LR parsing tables to a supplied file object + # ----------------------------------------------------------------------------- + + def pickle_table(self, filename, signature=''): + try: + import cPickle as pickle + except ImportError: + import pickle + with open(filename, 'wb') as outf: + pickle.dump(__tabversion__, outf, pickle_protocol) + pickle.dump(self.lr_method, outf, pickle_protocol) + pickle.dump(signature, outf, pickle_protocol) + pickle.dump(self.lr_action, outf, pickle_protocol) + pickle.dump(self.lr_goto, outf, pickle_protocol) + + outp = [] + for p in self.lr_productions: + if p.func: + outp.append((p.str, p.name, p.len, p.func, os.path.basename(p.file), p.line)) + else: + outp.append((str(p), p.name, p.len, None, None, None)) + pickle.dump(outp, outf, pickle_protocol) + +# ----------------------------------------------------------------------------- +# === INTROSPECTION === +# +# The following functions and classes are used to implement the PLY +# introspection features followed by the yacc() function itself. +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# get_caller_module_dict() +# +# This function returns a dictionary containing all of the symbols defined within +# a caller further down the call stack. This is used to get the environment +# associated with the yacc() call if none was provided. +# ----------------------------------------------------------------------------- + +def get_caller_module_dict(levels): + f = sys._getframe(levels) + ldict = f.f_globals.copy() + if f.f_globals != f.f_locals: + ldict.update(f.f_locals) + return ldict + +# ----------------------------------------------------------------------------- +# parse_grammar() +# +# This takes a raw grammar rule string and parses it into production data +# ----------------------------------------------------------------------------- +def parse_grammar(doc, file, line): + grammar = [] + # Split the doc string into lines + pstrings = doc.splitlines() + lastp = None + dline = line + for ps in pstrings: + dline += 1 + p = ps.split() + if not p: + continue + try: + if p[0] == '|': + # This is a continuation of a previous rule + if not lastp: + raise SyntaxError("%s:%d: Misplaced '|'" % (file, dline)) + prodname = lastp + syms = p[1:] + else: + prodname = p[0] + lastp = prodname + syms = p[2:] + assign = p[1] + if assign != ':' and assign != '::=': + raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file, dline)) + + grammar.append((file, dline, prodname, syms)) + except SyntaxError: + raise + except Exception: + raise SyntaxError('%s:%d: Syntax error in rule %r' % (file, dline, ps.strip())) + + return grammar + +# ----------------------------------------------------------------------------- +# ParserReflect() +# +# This class represents information extracted for building a parser including +# start symbol, error function, tokens, precedence list, action functions, +# etc. +# ----------------------------------------------------------------------------- +class ParserReflect(object): + def __init__(self, pdict, log=None): + self.pdict = pdict + self.start = None + self.error_func = None + self.tokens = None + self.modules = set() + self.grammar = [] + self.error = False + + if log is None: + self.log = PlyLogger(sys.stderr) + else: + self.log = log + + # Get all of the basic information + def get_all(self): + self.get_start() + self.get_error_func() + self.get_tokens() + self.get_precedence() + self.get_pfunctions() + + # Validate all of the information + def validate_all(self): + self.validate_start() + self.validate_error_func() + self.validate_tokens() + self.validate_precedence() + self.validate_pfunctions() + self.validate_modules() + return self.error + + # Compute a signature over the grammar + def signature(self): + parts = [] + try: + if self.start: + parts.append(self.start) + if self.prec: + parts.append(''.join([''.join(p) for p in self.prec])) + if self.tokens: + parts.append(' '.join(self.tokens)) + for f in self.pfuncs: + if f[3]: + parts.append(f[3]) + except (TypeError, ValueError): + pass + return ''.join(parts) + + # ----------------------------------------------------------------------------- + # validate_modules() + # + # This method checks to see if there are duplicated p_rulename() functions + # in the parser module file. Without this function, it is really easy for + # users to make mistakes by cutting and pasting code fragments (and it's a real + # bugger to try and figure out why the resulting parser doesn't work). Therefore, + # we just do a little regular expression pattern matching of def statements + # to try and detect duplicates. + # ----------------------------------------------------------------------------- + + def validate_modules(self): + # Match def p_funcname( + fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(') + + for module in self.modules: + try: + lines, linen = inspect.getsourcelines(module) + except IOError: + continue + + counthash = {} + for linen, line in enumerate(lines): + linen += 1 + m = fre.match(line) + if m: + name = m.group(1) + prev = counthash.get(name) + if not prev: + counthash[name] = linen + else: + filename = inspect.getsourcefile(module) + self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d', + filename, linen, name, prev) + + # Get the start symbol + def get_start(self): + self.start = self.pdict.get('start') + + # Validate the start symbol + def validate_start(self): + if self.start is not None: + if not isinstance(self.start, string_types): + self.log.error("'start' must be a string") + + # Look for error handler + def get_error_func(self): + self.error_func = self.pdict.get('p_error') + + # Validate the error function + def validate_error_func(self): + if self.error_func: + if isinstance(self.error_func, types.FunctionType): + ismethod = 0 + elif isinstance(self.error_func, types.MethodType): + ismethod = 1 + else: + self.log.error("'p_error' defined, but is not a function or method") + self.error = True + return + + eline = self.error_func.__code__.co_firstlineno + efile = self.error_func.__code__.co_filename + module = inspect.getmodule(self.error_func) + self.modules.add(module) + + argcount = self.error_func.__code__.co_argcount - ismethod + if argcount != 1: + self.log.error('%s:%d: p_error() requires 1 argument', efile, eline) + self.error = True + + # Get the tokens map + def get_tokens(self): + tokens = self.pdict.get('tokens') + if not tokens: + self.log.error('No token list is defined') + self.error = True + return + + if not isinstance(tokens, (list, tuple)): + self.log.error('tokens must be a list or tuple') + self.error = True + return + + if not tokens: + self.log.error('tokens is empty') + self.error = True + return + + self.tokens = tokens + + # Validate the tokens + def validate_tokens(self): + # Validate the tokens. + if 'error' in self.tokens: + self.log.error("Illegal token name 'error'. Is a reserved word") + self.error = True + return + + terminals = set() + for n in self.tokens: + if n in terminals: + self.log.warning('Token %r multiply defined', n) + terminals.add(n) + + # Get the precedence map (if any) + def get_precedence(self): + self.prec = self.pdict.get('precedence') + + # Validate and parse the precedence map + def validate_precedence(self): + preclist = [] + if self.prec: + if not isinstance(self.prec, (list, tuple)): + self.log.error('precedence must be a list or tuple') + self.error = True + return + for level, p in enumerate(self.prec): + if not isinstance(p, (list, tuple)): + self.log.error('Bad precedence table') + self.error = True + return + + if len(p) < 2: + self.log.error('Malformed precedence entry %s. Must be (assoc, term, ..., term)', p) + self.error = True + return + assoc = p[0] + if not isinstance(assoc, string_types): + self.log.error('precedence associativity must be a string') + self.error = True + return + for term in p[1:]: + if not isinstance(term, string_types): + self.log.error('precedence items must be strings') + self.error = True + return + preclist.append((term, assoc, level+1)) + self.preclist = preclist + + # Get all p_functions from the grammar + def get_pfunctions(self): + p_functions = [] + for name, item in self.pdict.items(): + if not name.startswith('p_') or name == 'p_error': + continue + if isinstance(item, (types.FunctionType, types.MethodType)): + line = getattr(item, 'co_firstlineno', item.__code__.co_firstlineno) + module = inspect.getmodule(item) + p_functions.append((line, module, name, item.__doc__)) + + # Sort all of the actions by line number; make sure to stringify + # modules to make them sortable, since `line` may not uniquely sort all + # p functions + p_functions.sort(key=lambda p_function: ( + p_function[0], + str(p_function[1]), + p_function[2], + p_function[3])) + self.pfuncs = p_functions + + # Validate all of the p_functions + def validate_pfunctions(self): + grammar = [] + # Check for non-empty symbols + if len(self.pfuncs) == 0: + self.log.error('no rules of the form p_rulename are defined') + self.error = True + return + + for line, module, name, doc in self.pfuncs: + file = inspect.getsourcefile(module) + func = self.pdict[name] + if isinstance(func, types.MethodType): + reqargs = 2 + else: + reqargs = 1 + if func.__code__.co_argcount > reqargs: + self.log.error('%s:%d: Rule %r has too many arguments', file, line, func.__name__) + self.error = True + elif func.__code__.co_argcount < reqargs: + self.log.error('%s:%d: Rule %r requires an argument', file, line, func.__name__) + self.error = True + elif not func.__doc__: + self.log.warning('%s:%d: No documentation string specified in function %r (ignored)', + file, line, func.__name__) + else: + try: + parsed_g = parse_grammar(doc, file, line) + for g in parsed_g: + grammar.append((name, g)) + except SyntaxError as e: + self.log.error(str(e)) + self.error = True + + # Looks like a valid grammar rule + # Mark the file in which defined. + self.modules.add(module) + + # Secondary validation step that looks for p_ definitions that are not functions + # or functions that look like they might be grammar rules. + + for n, v in self.pdict.items(): + if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)): + continue + if n.startswith('t_'): + continue + if n.startswith('p_') and n != 'p_error': + self.log.warning('%r not defined as a function', n) + if ((isinstance(v, types.FunctionType) and v.__code__.co_argcount == 1) or + (isinstance(v, types.MethodType) and v.__func__.__code__.co_argcount == 2)): + if v.__doc__: + try: + doc = v.__doc__.split(' ') + if doc[1] == ':': + self.log.warning('%s:%d: Possible grammar rule %r defined without p_ prefix', + v.__code__.co_filename, v.__code__.co_firstlineno, n) + except IndexError: + pass + + self.grammar = grammar + +# ----------------------------------------------------------------------------- +# yacc(module) +# +# Build a parser +# ----------------------------------------------------------------------------- + +def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None, + check_recursion=True, optimize=False, write_tables=True, debugfile=debug_file, + outputdir=None, debuglog=None, errorlog=None, picklefile=None): + + if tabmodule is None: + tabmodule = tab_module + + # Reference to the parsing method of the last built parser + global parse + + # If pickling is enabled, table files are not created + if picklefile: + write_tables = 0 + + if errorlog is None: + errorlog = PlyLogger(sys.stderr) + + # Get the module dictionary used for the parser + if module: + _items = [(k, getattr(module, k)) for k in dir(module)] + pdict = dict(_items) + # If no __file__ attribute is available, try to obtain it from the __module__ instead + if '__file__' not in pdict: + pdict['__file__'] = sys.modules[pdict['__module__']].__file__ + else: + pdict = get_caller_module_dict(2) + + if outputdir is None: + # If no output directory is set, the location of the output files + # is determined according to the following rules: + # - If tabmodule specifies a package, files go into that package directory + # - Otherwise, files go in the same directory as the specifying module + if isinstance(tabmodule, types.ModuleType): + srcfile = tabmodule.__file__ + else: + if '.' not in tabmodule: + srcfile = pdict['__file__'] + else: + parts = tabmodule.split('.') + pkgname = '.'.join(parts[:-1]) + exec('import %s' % pkgname) + srcfile = getattr(sys.modules[pkgname], '__file__', '') + outputdir = os.path.dirname(srcfile) + + # Determine if the module is package of a package or not. + # If so, fix the tabmodule setting so that tables load correctly + pkg = pdict.get('__package__') + if pkg and isinstance(tabmodule, str): + if '.' not in tabmodule: + tabmodule = pkg + '.' + tabmodule + + + + # Set start symbol if it's specified directly using an argument + if start is not None: + pdict['start'] = start + + # Collect parser information from the dictionary + pinfo = ParserReflect(pdict, log=errorlog) + pinfo.get_all() + + if pinfo.error: + raise YaccError('Unable to build parser') + + # Check signature against table files (if any) + signature = pinfo.signature() + + # Read the tables + try: + lr = LRTable() + if picklefile: + read_signature = lr.read_pickle(picklefile) + else: + read_signature = lr.read_table(tabmodule) + if optimize or (read_signature == signature): + try: + lr.bind_callables(pinfo.pdict) + parser = LRParser(lr, pinfo.error_func) + parse = parser.parse + return parser + except Exception as e: + errorlog.warning('There was a problem loading the table file: %r', e) + except VersionError as e: + errorlog.warning(str(e)) + except ImportError: + pass + + if debuglog is None: + if debug: + try: + debuglog = PlyLogger(open(os.path.join(outputdir, debugfile), 'w')) + except IOError as e: + errorlog.warning("Couldn't open %r. %s" % (debugfile, e)) + debuglog = NullLogger() + else: + debuglog = NullLogger() + + debuglog.info('Created by PLY version %s (http://www.dabeaz.com/ply)', __version__) + + errors = False + + # Validate the parser information + if pinfo.validate_all(): + raise YaccError('Unable to build parser') + + if not pinfo.error_func: + errorlog.warning('no p_error() function is defined') + + # Create a grammar object + grammar = Grammar(pinfo.tokens) + + # Set precedence level for terminals + for term, assoc, level in pinfo.preclist: + try: + grammar.set_precedence(term, assoc, level) + except GrammarError as e: + errorlog.warning('%s', e) + + # Add productions to the grammar + for funcname, gram in pinfo.grammar: + file, line, prodname, syms = gram + try: + grammar.add_production(prodname, syms, funcname, file, line) + except GrammarError as e: + errorlog.error('%s', e) + errors = True + + # Set the grammar start symbols + try: + if start is None: + grammar.set_start(pinfo.start) + else: + grammar.set_start(start) + except GrammarError as e: + errorlog.error(str(e)) + errors = True + + if errors: + raise YaccError('Unable to build parser') + + # Verify the grammar structure + undefined_symbols = grammar.undefined_symbols() + for sym, prod in undefined_symbols: + errorlog.error('%s:%d: Symbol %r used, but not defined as a token or a rule', prod.file, prod.line, sym) + errors = True + + unused_terminals = grammar.unused_terminals() + if unused_terminals: + debuglog.info('') + debuglog.info('Unused terminals:') + debuglog.info('') + for term in unused_terminals: + errorlog.warning('Token %r defined, but not used', term) + debuglog.info(' %s', term) + + # Print out all productions to the debug log + if debug: + debuglog.info('') + debuglog.info('Grammar') + debuglog.info('') + for n, p in enumerate(grammar.Productions): + debuglog.info('Rule %-5d %s', n, p) + + # Find unused non-terminals + unused_rules = grammar.unused_rules() + for prod in unused_rules: + errorlog.warning('%s:%d: Rule %r defined, but not used', prod.file, prod.line, prod.name) + + if len(unused_terminals) == 1: + errorlog.warning('There is 1 unused token') + if len(unused_terminals) > 1: + errorlog.warning('There are %d unused tokens', len(unused_terminals)) + + if len(unused_rules) == 1: + errorlog.warning('There is 1 unused rule') + if len(unused_rules) > 1: + errorlog.warning('There are %d unused rules', len(unused_rules)) + + if debug: + debuglog.info('') + debuglog.info('Terminals, with rules where they appear') + debuglog.info('') + terms = list(grammar.Terminals) + terms.sort() + for term in terms: + debuglog.info('%-20s : %s', term, ' '.join([str(s) for s in grammar.Terminals[term]])) + + debuglog.info('') + debuglog.info('Nonterminals, with rules where they appear') + debuglog.info('') + nonterms = list(grammar.Nonterminals) + nonterms.sort() + for nonterm in nonterms: + debuglog.info('%-20s : %s', nonterm, ' '.join([str(s) for s in grammar.Nonterminals[nonterm]])) + debuglog.info('') + + if check_recursion: + unreachable = grammar.find_unreachable() + for u in unreachable: + errorlog.warning('Symbol %r is unreachable', u) + + infinite = grammar.infinite_cycles() + for inf in infinite: + errorlog.error('Infinite recursion detected for symbol %r', inf) + errors = True + + unused_prec = grammar.unused_precedence() + for term, assoc in unused_prec: + errorlog.error('Precedence rule %r defined for unknown symbol %r', assoc, term) + errors = True + + if errors: + raise YaccError('Unable to build parser') + + # Run the LRGeneratedTable on the grammar + if debug: + errorlog.debug('Generating %s tables', method) + + lr = LRGeneratedTable(grammar, method, debuglog) + + if debug: + num_sr = len(lr.sr_conflicts) + + # Report shift/reduce and reduce/reduce conflicts + if num_sr == 1: + errorlog.warning('1 shift/reduce conflict') + elif num_sr > 1: + errorlog.warning('%d shift/reduce conflicts', num_sr) + + num_rr = len(lr.rr_conflicts) + if num_rr == 1: + errorlog.warning('1 reduce/reduce conflict') + elif num_rr > 1: + errorlog.warning('%d reduce/reduce conflicts', num_rr) + + # Write out conflicts to the output file + if debug and (lr.sr_conflicts or lr.rr_conflicts): + debuglog.warning('') + debuglog.warning('Conflicts:') + debuglog.warning('') + + for state, tok, resolution in lr.sr_conflicts: + debuglog.warning('shift/reduce conflict for %s in state %d resolved as %s', tok, state, resolution) + + already_reported = set() + for state, rule, rejected in lr.rr_conflicts: + if (state, id(rule), id(rejected)) in already_reported: + continue + debuglog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule) + debuglog.warning('rejected rule (%s) in state %d', rejected, state) + errorlog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule) + errorlog.warning('rejected rule (%s) in state %d', rejected, state) + already_reported.add((state, id(rule), id(rejected))) + + warned_never = [] + for state, rule, rejected in lr.rr_conflicts: + if not rejected.reduced and (rejected not in warned_never): + debuglog.warning('Rule (%s) is never reduced', rejected) + errorlog.warning('Rule (%s) is never reduced', rejected) + warned_never.append(rejected) + + # Write the table file if requested + if write_tables: + try: + lr.write_table(tabmodule, outputdir, signature) + except IOError as e: + errorlog.warning("Couldn't create %r. %s" % (tabmodule, e)) + + # Write a pickled version of the tables + if picklefile: + try: + lr.pickle_table(picklefile, signature) + except IOError as e: + errorlog.warning("Couldn't create %r. %s" % (picklefile, e)) + + # Build the parser + lr.bind_callables(pinfo.pdict) + parser = LRParser(lr, pinfo.error_func) + + parse = parser.parse + return parser diff --git a/python/pycparser/ply/ygen.py b/python/pycparser/ply/ygen.py new file mode 100644 index 000000000..acf5ca1a3 --- /dev/null +++ b/python/pycparser/ply/ygen.py @@ -0,0 +1,74 @@ +# ply: ygen.py +# +# This is a support program that auto-generates different versions of the YACC parsing +# function with different features removed for the purposes of performance. +# +# Users should edit the method LParser.parsedebug() in yacc.py. The source code +# for that method is then used to create the other methods. See the comments in +# yacc.py for further details. + +import os.path +import shutil + +def get_source_range(lines, tag): + srclines = enumerate(lines) + start_tag = '#--! %s-start' % tag + end_tag = '#--! %s-end' % tag + + for start_index, line in srclines: + if line.strip().startswith(start_tag): + break + + for end_index, line in srclines: + if line.strip().endswith(end_tag): + break + + return (start_index + 1, end_index) + +def filter_section(lines, tag): + filtered_lines = [] + include = True + tag_text = '#--! %s' % tag + for line in lines: + if line.strip().startswith(tag_text): + include = not include + elif include: + filtered_lines.append(line) + return filtered_lines + +def main(): + dirname = os.path.dirname(__file__) + shutil.copy2(os.path.join(dirname, 'yacc.py'), os.path.join(dirname, 'yacc.py.bak')) + with open(os.path.join(dirname, 'yacc.py'), 'r') as f: + lines = f.readlines() + + parse_start, parse_end = get_source_range(lines, 'parsedebug') + parseopt_start, parseopt_end = get_source_range(lines, 'parseopt') + parseopt_notrack_start, parseopt_notrack_end = get_source_range(lines, 'parseopt-notrack') + + # Get the original source + orig_lines = lines[parse_start:parse_end] + + # Filter the DEBUG sections out + parseopt_lines = filter_section(orig_lines, 'DEBUG') + + # Filter the TRACKING sections out + parseopt_notrack_lines = filter_section(parseopt_lines, 'TRACKING') + + # Replace the parser source sections with updated versions + lines[parseopt_notrack_start:parseopt_notrack_end] = parseopt_notrack_lines + lines[parseopt_start:parseopt_end] = parseopt_lines + + lines = [line.rstrip()+'\n' for line in lines] + with open(os.path.join(dirname, 'yacc.py'), 'w') as f: + f.writelines(lines) + + print('Updated yacc.py') + +if __name__ == '__main__': + main() + + + + + diff --git a/python/pycparser/plyparser.py b/python/pycparser/plyparser.py new file mode 100644 index 000000000..b8f4c4395 --- /dev/null +++ b/python/pycparser/plyparser.py @@ -0,0 +1,133 @@ +#----------------------------------------------------------------- +# plyparser.py +# +# PLYParser class and other utilities for simplifying programming +# parsers with PLY +# +# Eli Bendersky [https://eli.thegreenplace.net/] +# License: BSD +#----------------------------------------------------------------- + +import warnings + +class Coord(object): + """ Coordinates of a syntactic element. Consists of: + - File name + - Line number + - (optional) column number, for the Lexer + """ + __slots__ = ('file', 'line', 'column', '__weakref__') + def __init__(self, file, line, column=None): + self.file = file + self.line = line + self.column = column + + def __str__(self): + str = "%s:%s" % (self.file, self.line) + if self.column: str += ":%s" % self.column + return str + + +class ParseError(Exception): pass + + +class PLYParser(object): + def _create_opt_rule(self, rulename): + """ Given a rule name, creates an optional ply.yacc rule + for it. The name of the optional rule is + _opt + """ + optname = rulename + '_opt' + + def optrule(self, p): + p[0] = p[1] + + optrule.__doc__ = '%s : empty\n| %s' % (optname, rulename) + optrule.__name__ = 'p_%s' % optname + setattr(self.__class__, optrule.__name__, optrule) + + def _coord(self, lineno, column=None): + return Coord( + file=self.clex.filename, + line=lineno, + column=column) + + def _token_coord(self, p, token_idx): + """ Returns the coordinates for the YaccProduction object 'p' indexed + with 'token_idx'. The coordinate includes the 'lineno' and + 'column'. Both follow the lex semantic, starting from 1. + """ + last_cr = p.lexer.lexer.lexdata.rfind('\n', 0, p.lexpos(token_idx)) + if last_cr < 0: + last_cr = -1 + column = (p.lexpos(token_idx) - (last_cr)) + return self._coord(p.lineno(token_idx), column) + + def _parse_error(self, msg, coord): + raise ParseError("%s: %s" % (coord, msg)) + + +def parameterized(*params): + """ Decorator to create parameterized rules. + + Parameterized rule methods must be named starting with 'p_' and contain + 'xxx', and their docstrings may contain 'xxx' and 'yyy'. These will be + replaced by the given parameter tuples. For example, ``p_xxx_rule()`` with + docstring 'xxx_rule : yyy' when decorated with + ``@parameterized(('id', 'ID'))`` produces ``p_id_rule()`` with the docstring + 'id_rule : ID'. Using multiple tuples produces multiple rules. + """ + def decorate(rule_func): + rule_func._params = params + return rule_func + return decorate + + +def template(cls): + """ Class decorator to generate rules from parameterized rule templates. + + See `parameterized` for more information on parameterized rules. + """ + issued_nodoc_warning = False + for attr_name in dir(cls): + if attr_name.startswith('p_'): + method = getattr(cls, attr_name) + if hasattr(method, '_params'): + # Remove the template method + delattr(cls, attr_name) + # Create parameterized rules from this method; only run this if + # the method has a docstring. This is to address an issue when + # pycparser's users are installed in -OO mode which strips + # docstrings away. + # See: https://github.com/eliben/pycparser/pull/198/ and + # https://github.com/eliben/pycparser/issues/197 + # for discussion. + if method.__doc__ is not None: + _create_param_rules(cls, method) + elif not issued_nodoc_warning: + warnings.warn( + 'parsing methods must have __doc__ for pycparser to work properly', + RuntimeWarning, + stacklevel=2) + issued_nodoc_warning = True + return cls + + +def _create_param_rules(cls, func): + """ Create ply.yacc rules based on a parameterized rule function + + Generates new methods (one per each pair of parameters) based on the + template rule function `func`, and attaches them to `cls`. The rule + function's parameters must be accessible via its `_params` attribute. + """ + for xxx, yyy in func._params: + # Use the template method's body for each new method + def param_rule(self, p): + func(self, p) + + # Substitute in the params for the grammar rule and function name + param_rule.__doc__ = func.__doc__.replace('xxx', xxx).replace('yyy', yyy) + param_rule.__name__ = func.__name__.replace('xxx', xxx) + + # Attach the new method to the class + setattr(cls, param_rule.__name__, param_rule) diff --git a/python/pycparser/yacctab.py b/python/pycparser/yacctab.py new file mode 100644 index 000000000..832561dc4 --- /dev/null +++ b/python/pycparser/yacctab.py @@ -0,0 +1,374 @@ + +# yacctab.py +# This file is automatically generated. Do not edit. +_tabversion = '3.10' + +_lr_method = 'LALR' + +_lr_signature = 'translation_unit_or_emptyleftLORleftLANDleftORleftXORleftANDleftEQNEleftGTGELTLEleftRSHIFTLSHIFTleftPLUSMINUSleftTIMESDIVIDEMODAUTO BREAK CASE CHAR CONST CONTINUE DEFAULT DO DOUBLE ELSE ENUM EXTERN FLOAT FOR GOTO IF INLINE INT LONG REGISTER OFFSETOF RESTRICT RETURN SHORT SIGNED SIZEOF STATIC STRUCT SWITCH TYPEDEF UNION UNSIGNED VOID VOLATILE WHILE __INT128 _BOOL _COMPLEX _NORETURN _THREAD_LOCAL _STATIC_ASSERT _ATOMIC _ALIGNOF _ALIGNAS _PRAGMA ID TYPEID INT_CONST_DEC INT_CONST_OCT INT_CONST_HEX INT_CONST_BIN INT_CONST_CHAR FLOAT_CONST HEX_FLOAT_CONST CHAR_CONST WCHAR_CONST U8CHAR_CONST U16CHAR_CONST U32CHAR_CONST STRING_LITERAL WSTRING_LITERAL U8STRING_LITERAL U16STRING_LITERAL U32STRING_LITERAL PLUS MINUS TIMES DIVIDE MOD OR AND NOT XOR LSHIFT RSHIFT LOR LAND LNOT LT LE GT GE EQ NE EQUALS TIMESEQUAL DIVEQUAL MODEQUAL PLUSEQUAL MINUSEQUAL LSHIFTEQUAL RSHIFTEQUAL ANDEQUAL XOREQUAL OREQUAL PLUSPLUS MINUSMINUS ARROW CONDOP LPAREN RPAREN LBRACKET RBRACKET LBRACE RBRACE COMMA PERIOD SEMI COLON ELLIPSIS PPHASH PPPRAGMA PPPRAGMASTRabstract_declarator_opt : empty\n| abstract_declaratorassignment_expression_opt : empty\n| assignment_expressionblock_item_list_opt : empty\n| block_item_listdeclaration_list_opt : empty\n| declaration_listdeclaration_specifiers_no_type_opt : empty\n| declaration_specifiers_no_typedesignation_opt : empty\n| designationexpression_opt : empty\n| expressionid_init_declarator_list_opt : empty\n| id_init_declarator_listidentifier_list_opt : empty\n| identifier_listinit_declarator_list_opt : empty\n| init_declarator_listinitializer_list_opt : empty\n| initializer_listparameter_type_list_opt : empty\n| parameter_type_liststruct_declarator_list_opt : empty\n| struct_declarator_listtype_qualifier_list_opt : empty\n| type_qualifier_list direct_id_declarator : ID\n direct_id_declarator : LPAREN id_declarator RPAREN\n direct_id_declarator : direct_id_declarator LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET\n direct_id_declarator : direct_id_declarator LBRACKET STATIC type_qualifier_list_opt assignment_expression RBRACKET\n | direct_id_declarator LBRACKET type_qualifier_list STATIC assignment_expression RBRACKET\n direct_id_declarator : direct_id_declarator LBRACKET type_qualifier_list_opt TIMES RBRACKET\n direct_id_declarator : direct_id_declarator LPAREN parameter_type_list RPAREN\n | direct_id_declarator LPAREN identifier_list_opt RPAREN\n direct_typeid_declarator : TYPEID\n direct_typeid_declarator : LPAREN typeid_declarator RPAREN\n direct_typeid_declarator : direct_typeid_declarator LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET\n direct_typeid_declarator : direct_typeid_declarator LBRACKET STATIC type_qualifier_list_opt assignment_expression RBRACKET\n | direct_typeid_declarator LBRACKET type_qualifier_list STATIC assignment_expression RBRACKET\n direct_typeid_declarator : direct_typeid_declarator LBRACKET type_qualifier_list_opt TIMES RBRACKET\n direct_typeid_declarator : direct_typeid_declarator LPAREN parameter_type_list RPAREN\n | direct_typeid_declarator LPAREN identifier_list_opt RPAREN\n direct_typeid_noparen_declarator : TYPEID\n direct_typeid_noparen_declarator : direct_typeid_noparen_declarator LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET\n direct_typeid_noparen_declarator : direct_typeid_noparen_declarator LBRACKET STATIC type_qualifier_list_opt assignment_expression RBRACKET\n | direct_typeid_noparen_declarator LBRACKET type_qualifier_list STATIC assignment_expression RBRACKET\n direct_typeid_noparen_declarator : direct_typeid_noparen_declarator LBRACKET type_qualifier_list_opt TIMES RBRACKET\n direct_typeid_noparen_declarator : direct_typeid_noparen_declarator LPAREN parameter_type_list RPAREN\n | direct_typeid_noparen_declarator LPAREN identifier_list_opt RPAREN\n id_declarator : direct_id_declarator\n id_declarator : pointer direct_id_declarator\n typeid_declarator : direct_typeid_declarator\n typeid_declarator : pointer direct_typeid_declarator\n typeid_noparen_declarator : direct_typeid_noparen_declarator\n typeid_noparen_declarator : pointer direct_typeid_noparen_declarator\n translation_unit_or_empty : translation_unit\n | empty\n translation_unit : external_declaration\n translation_unit : translation_unit external_declaration\n external_declaration : function_definition\n external_declaration : declaration\n external_declaration : pp_directive\n | pppragma_directive\n external_declaration : SEMI\n external_declaration : static_assert\n static_assert : _STATIC_ASSERT LPAREN constant_expression COMMA unified_string_literal RPAREN\n | _STATIC_ASSERT LPAREN constant_expression RPAREN\n pp_directive : PPHASH\n pppragma_directive : PPPRAGMA\n | PPPRAGMA PPPRAGMASTR\n | _PRAGMA LPAREN unified_string_literal RPAREN\n pppragma_directive_list : pppragma_directive\n | pppragma_directive_list pppragma_directive\n function_definition : id_declarator declaration_list_opt compound_statement\n function_definition : declaration_specifiers id_declarator declaration_list_opt compound_statement\n statement : labeled_statement\n | expression_statement\n | compound_statement\n | selection_statement\n | iteration_statement\n | jump_statement\n | pppragma_directive\n | static_assert\n pragmacomp_or_statement : pppragma_directive_list statement\n | statement\n decl_body : declaration_specifiers init_declarator_list_opt\n | declaration_specifiers_no_type id_init_declarator_list_opt\n declaration : decl_body SEMI\n declaration_list : declaration\n | declaration_list declaration\n declaration_specifiers_no_type : type_qualifier declaration_specifiers_no_type_opt\n declaration_specifiers_no_type : storage_class_specifier declaration_specifiers_no_type_opt\n declaration_specifiers_no_type : function_specifier declaration_specifiers_no_type_opt\n declaration_specifiers_no_type : atomic_specifier declaration_specifiers_no_type_opt\n declaration_specifiers_no_type : alignment_specifier declaration_specifiers_no_type_opt\n declaration_specifiers : declaration_specifiers type_qualifier\n declaration_specifiers : declaration_specifiers storage_class_specifier\n declaration_specifiers : declaration_specifiers function_specifier\n declaration_specifiers : declaration_specifiers type_specifier_no_typeid\n declaration_specifiers : type_specifier\n declaration_specifiers : declaration_specifiers_no_type type_specifier\n declaration_specifiers : declaration_specifiers alignment_specifier\n storage_class_specifier : AUTO\n | REGISTER\n | STATIC\n | EXTERN\n | TYPEDEF\n | _THREAD_LOCAL\n function_specifier : INLINE\n | _NORETURN\n type_specifier_no_typeid : VOID\n | _BOOL\n | CHAR\n | SHORT\n | INT\n | LONG\n | FLOAT\n | DOUBLE\n | _COMPLEX\n | SIGNED\n | UNSIGNED\n | __INT128\n type_specifier : typedef_name\n | enum_specifier\n | struct_or_union_specifier\n | type_specifier_no_typeid\n | atomic_specifier\n atomic_specifier : _ATOMIC LPAREN type_name RPAREN\n type_qualifier : CONST\n | RESTRICT\n | VOLATILE\n | _ATOMIC\n init_declarator_list : init_declarator\n | init_declarator_list COMMA init_declarator\n init_declarator : declarator\n | declarator EQUALS initializer\n id_init_declarator_list : id_init_declarator\n | id_init_declarator_list COMMA init_declarator\n id_init_declarator : id_declarator\n | id_declarator EQUALS initializer\n specifier_qualifier_list : specifier_qualifier_list type_specifier_no_typeid\n specifier_qualifier_list : specifier_qualifier_list type_qualifier\n specifier_qualifier_list : type_specifier\n specifier_qualifier_list : type_qualifier_list type_specifier\n specifier_qualifier_list : alignment_specifier\n specifier_qualifier_list : specifier_qualifier_list alignment_specifier\n struct_or_union_specifier : struct_or_union ID\n | struct_or_union TYPEID\n struct_or_union_specifier : struct_or_union brace_open struct_declaration_list brace_close\n | struct_or_union brace_open brace_close\n struct_or_union_specifier : struct_or_union ID brace_open struct_declaration_list brace_close\n | struct_or_union ID brace_open brace_close\n | struct_or_union TYPEID brace_open struct_declaration_list brace_close\n | struct_or_union TYPEID brace_open brace_close\n struct_or_union : STRUCT\n | UNION\n struct_declaration_list : struct_declaration\n | struct_declaration_list struct_declaration\n struct_declaration : specifier_qualifier_list struct_declarator_list_opt SEMI\n struct_declaration : SEMI\n struct_declaration : pppragma_directive\n struct_declarator_list : struct_declarator\n | struct_declarator_list COMMA struct_declarator\n struct_declarator : declarator\n struct_declarator : declarator COLON constant_expression\n | COLON constant_expression\n enum_specifier : ENUM ID\n | ENUM TYPEID\n enum_specifier : ENUM brace_open enumerator_list brace_close\n enum_specifier : ENUM ID brace_open enumerator_list brace_close\n | ENUM TYPEID brace_open enumerator_list brace_close\n enumerator_list : enumerator\n | enumerator_list COMMA\n | enumerator_list COMMA enumerator\n alignment_specifier : _ALIGNAS LPAREN type_name RPAREN\n | _ALIGNAS LPAREN constant_expression RPAREN\n enumerator : ID\n | ID EQUALS constant_expression\n declarator : id_declarator\n | typeid_declarator\n pointer : TIMES type_qualifier_list_opt\n | TIMES type_qualifier_list_opt pointer\n type_qualifier_list : type_qualifier\n | type_qualifier_list type_qualifier\n parameter_type_list : parameter_list\n | parameter_list COMMA ELLIPSIS\n parameter_list : parameter_declaration\n | parameter_list COMMA parameter_declaration\n parameter_declaration : declaration_specifiers id_declarator\n | declaration_specifiers typeid_noparen_declarator\n parameter_declaration : declaration_specifiers abstract_declarator_opt\n identifier_list : identifier\n | identifier_list COMMA identifier\n initializer : assignment_expression\n initializer : brace_open initializer_list_opt brace_close\n | brace_open initializer_list COMMA brace_close\n initializer_list : designation_opt initializer\n | initializer_list COMMA designation_opt initializer\n designation : designator_list EQUALS\n designator_list : designator\n | designator_list designator\n designator : LBRACKET constant_expression RBRACKET\n | PERIOD identifier\n type_name : specifier_qualifier_list abstract_declarator_opt\n abstract_declarator : pointer\n abstract_declarator : pointer direct_abstract_declarator\n abstract_declarator : direct_abstract_declarator\n direct_abstract_declarator : LPAREN abstract_declarator RPAREN direct_abstract_declarator : direct_abstract_declarator LBRACKET assignment_expression_opt RBRACKET\n direct_abstract_declarator : LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET\n direct_abstract_declarator : direct_abstract_declarator LBRACKET TIMES RBRACKET\n direct_abstract_declarator : LBRACKET TIMES RBRACKET\n direct_abstract_declarator : direct_abstract_declarator LPAREN parameter_type_list_opt RPAREN\n direct_abstract_declarator : LPAREN parameter_type_list_opt RPAREN\n direct_abstract_declarator : LBRACKET STATIC type_qualifier_list_opt assignment_expression RBRACKET\n | LBRACKET type_qualifier_list STATIC assignment_expression RBRACKET\n block_item : declaration\n | statement\n block_item_list : block_item\n | block_item_list block_item\n compound_statement : brace_open block_item_list_opt brace_close labeled_statement : ID COLON pragmacomp_or_statement labeled_statement : CASE constant_expression COLON pragmacomp_or_statement labeled_statement : DEFAULT COLON pragmacomp_or_statement labeled_statement : ID COLON labeled_statement : CASE constant_expression COLON labeled_statement : DEFAULT COLON selection_statement : IF LPAREN expression RPAREN pragmacomp_or_statement selection_statement : IF LPAREN expression RPAREN statement ELSE pragmacomp_or_statement selection_statement : SWITCH LPAREN expression RPAREN pragmacomp_or_statement iteration_statement : WHILE LPAREN expression RPAREN pragmacomp_or_statement iteration_statement : DO pragmacomp_or_statement WHILE LPAREN expression RPAREN SEMI iteration_statement : FOR LPAREN expression_opt SEMI expression_opt SEMI expression_opt RPAREN pragmacomp_or_statement iteration_statement : FOR LPAREN declaration expression_opt SEMI expression_opt RPAREN pragmacomp_or_statement jump_statement : GOTO ID SEMI jump_statement : BREAK SEMI jump_statement : CONTINUE SEMI jump_statement : RETURN expression SEMI\n | RETURN SEMI\n expression_statement : expression_opt SEMI expression : assignment_expression\n | expression COMMA assignment_expression\n assignment_expression : LPAREN compound_statement RPAREN typedef_name : TYPEID assignment_expression : conditional_expression\n | unary_expression assignment_operator assignment_expression\n assignment_operator : EQUALS\n | XOREQUAL\n | TIMESEQUAL\n | DIVEQUAL\n | MODEQUAL\n | PLUSEQUAL\n | MINUSEQUAL\n | LSHIFTEQUAL\n | RSHIFTEQUAL\n | ANDEQUAL\n | OREQUAL\n constant_expression : conditional_expression conditional_expression : binary_expression\n | binary_expression CONDOP expression COLON conditional_expression\n binary_expression : cast_expression\n | binary_expression TIMES binary_expression\n | binary_expression DIVIDE binary_expression\n | binary_expression MOD binary_expression\n | binary_expression PLUS binary_expression\n | binary_expression MINUS binary_expression\n | binary_expression RSHIFT binary_expression\n | binary_expression LSHIFT binary_expression\n | binary_expression LT binary_expression\n | binary_expression LE binary_expression\n | binary_expression GE binary_expression\n | binary_expression GT binary_expression\n | binary_expression EQ binary_expression\n | binary_expression NE binary_expression\n | binary_expression AND binary_expression\n | binary_expression OR binary_expression\n | binary_expression XOR binary_expression\n | binary_expression LAND binary_expression\n | binary_expression LOR binary_expression\n cast_expression : unary_expression cast_expression : LPAREN type_name RPAREN cast_expression unary_expression : postfix_expression unary_expression : PLUSPLUS unary_expression\n | MINUSMINUS unary_expression\n | unary_operator cast_expression\n unary_expression : SIZEOF unary_expression\n | SIZEOF LPAREN type_name RPAREN\n | _ALIGNOF LPAREN type_name RPAREN\n unary_operator : AND\n | TIMES\n | PLUS\n | MINUS\n | NOT\n | LNOT\n postfix_expression : primary_expression postfix_expression : postfix_expression LBRACKET expression RBRACKET postfix_expression : postfix_expression LPAREN argument_expression_list RPAREN\n | postfix_expression LPAREN RPAREN\n postfix_expression : postfix_expression PERIOD ID\n | postfix_expression PERIOD TYPEID\n | postfix_expression ARROW ID\n | postfix_expression ARROW TYPEID\n postfix_expression : postfix_expression PLUSPLUS\n | postfix_expression MINUSMINUS\n postfix_expression : LPAREN type_name RPAREN brace_open initializer_list brace_close\n | LPAREN type_name RPAREN brace_open initializer_list COMMA brace_close\n primary_expression : identifier primary_expression : constant primary_expression : unified_string_literal\n | unified_wstring_literal\n primary_expression : LPAREN expression RPAREN primary_expression : OFFSETOF LPAREN type_name COMMA offsetof_member_designator RPAREN\n offsetof_member_designator : identifier\n | offsetof_member_designator PERIOD identifier\n | offsetof_member_designator LBRACKET expression RBRACKET\n argument_expression_list : assignment_expression\n | argument_expression_list COMMA assignment_expression\n identifier : ID constant : INT_CONST_DEC\n | INT_CONST_OCT\n | INT_CONST_HEX\n | INT_CONST_BIN\n | INT_CONST_CHAR\n constant : FLOAT_CONST\n | HEX_FLOAT_CONST\n constant : CHAR_CONST\n | WCHAR_CONST\n | U8CHAR_CONST\n | U16CHAR_CONST\n | U32CHAR_CONST\n unified_string_literal : STRING_LITERAL\n | unified_string_literal STRING_LITERAL\n unified_wstring_literal : WSTRING_LITERAL\n | U8STRING_LITERAL\n | U16STRING_LITERAL\n | U32STRING_LITERAL\n | unified_wstring_literal WSTRING_LITERAL\n | unified_wstring_literal U8STRING_LITERAL\n | unified_wstring_literal U16STRING_LITERAL\n | unified_wstring_literal U32STRING_LITERAL\n brace_open : LBRACE\n brace_close : RBRACE\n empty : ' + +_lr_action_items = {'$end':([0,1,2,3,4,5,6,7,8,9,10,14,15,64,90,91,127,208,251,262,267,355,501,],[-345,0,-58,-59,-60,-62,-63,-64,-65,-66,-67,-70,-71,-61,-90,-72,-76,-344,-77,-73,-69,-223,-68,]),'SEMI':([0,2,4,5,6,7,8,9,10,12,13,14,15,19,21,22,23,24,25,26,27,28,29,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,64,69,70,71,72,73,74,75,76,77,78,79,81,83,84,85,86,87,88,89,90,91,97,98,99,100,101,102,103,104,105,106,107,108,110,111,112,117,118,119,121,122,123,124,127,128,130,132,139,140,143,144,145,146,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,203,204,205,206,207,208,209,210,211,212,214,220,221,222,223,224,225,226,227,228,229,230,231,232,233,236,239,242,245,246,247,248,249,250,251,252,253,254,255,262,263,267,291,292,293,295,296,297,300,301,302,303,311,312,326,327,330,333,334,335,336,337,338,339,340,341,342,343,344,345,346,348,349,353,354,355,356,357,358,360,361,369,370,371,372,373,374,375,376,377,403,404,406,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,429,431,432,433,434,439,440,461,462,465,466,467,470,471,472,473,475,477,481,482,483,484,485,486,487,488,495,496,499,501,503,504,507,508,510,511,526,527,528,529,530,531,533,534,535,539,540,542,558,559,560,561,562,564,567,569,578,579,582,587,588,590,592,593,594,],[9,9,-60,-62,-63,-64,-65,-66,-67,-345,90,-70,-71,-52,-345,-345,-345,-128,-102,-345,-345,-29,-107,-125,-126,-127,-129,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,-345,-345,-129,-134,-181,-98,-99,-100,-101,-104,-88,-134,-19,-20,-135,-137,-182,-54,-37,-90,-72,-53,-93,-9,-10,-345,-94,-95,-103,-89,-129,-15,-16,-139,-141,-97,-96,-169,-170,-343,-149,-150,210,-76,-345,-181,-55,-333,-30,-311,-260,-261,-263,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,210,210,210,-152,-159,-344,-345,-162,-163,-145,-147,-13,-345,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,-320,361,-14,-345,374,375,377,-243,-247,-282,-77,-38,-136,-138,-196,-73,-334,-69,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-35,-36,-140,-142,-171,210,-154,210,-156,-151,-160,467,-143,-144,-148,-25,-26,-164,-166,-146,-130,-177,-178,-223,-222,-13,-227,-229,-242,-345,-87,-74,-345,485,-238,-239,486,-241,-43,-44,-313,-264,-265,-266,-267,-268,-269,-270,-271,-272,-273,-274,-275,-276,-277,-278,-279,-280,-281,-300,-301,-302,-303,-304,-31,-34,-172,-173,-153,-155,-161,-168,-224,-228,-226,-245,-244,-86,-75,533,-345,-237,-240,-248,-197,-39,-42,-283,-68,-298,-299,-289,-290,-32,-33,-165,-167,-225,-345,-345,-345,-345,565,-198,-40,-41,-262,-230,-87,-74,-232,-233,580,-307,-314,-345,588,-308,-231,-234,-345,-345,-236,-235,]),'PPHASH':([0,2,4,5,6,7,8,9,10,14,15,64,90,91,127,208,251,262,267,355,501,],[14,14,-60,-62,-63,-64,-65,-66,-67,-70,-71,-61,-90,-72,-76,-344,-77,-73,-69,-223,-68,]),'PPPRAGMA':([0,2,4,5,6,7,8,9,10,14,15,64,90,91,121,124,127,128,203,204,205,207,208,210,211,221,222,223,224,225,226,227,228,229,230,231,232,242,251,262,267,333,335,338,355,356,358,360,361,369,370,371,374,375,377,467,471,472,473,481,482,485,486,501,528,529,530,531,558,559,560,561,562,578,587,588,590,592,593,594,],[15,15,-60,-62,-63,-64,-65,-66,-67,-70,-71,-61,-90,-72,-343,15,-76,15,15,15,15,-159,-344,-162,-163,15,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,15,-77,-73,-69,15,15,-160,-223,-222,15,15,-242,15,-87,-74,-238,-239,-241,-161,-224,15,-226,-86,-75,-237,-240,-68,-225,15,15,15,-230,-87,-74,-232,-233,15,-231,-234,15,15,-236,-235,]),'_PRAGMA':([0,2,4,5,6,7,8,9,10,14,15,64,90,91,121,124,127,128,203,204,205,207,208,210,211,221,222,223,224,225,226,227,228,229,230,231,232,242,251,262,267,333,335,338,355,356,358,360,361,369,370,371,374,375,377,467,471,472,473,481,482,485,486,501,528,529,530,531,558,559,560,561,562,578,587,588,590,592,593,594,],[16,16,-60,-62,-63,-64,-65,-66,-67,-70,-71,-61,-90,-72,-343,16,-76,16,16,16,16,-159,-344,-162,-163,16,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,16,-77,-73,-69,16,16,-160,-223,-222,16,16,-242,16,-87,-74,-238,-239,-241,-161,-224,16,-226,-86,-75,-237,-240,-68,-225,16,16,16,-230,-87,-74,-232,-233,16,-231,-234,16,16,-236,-235,]),'_STATIC_ASSERT':([0,2,4,5,6,7,8,9,10,14,15,64,90,91,121,127,128,208,221,222,223,224,225,226,227,228,229,230,231,232,242,251,262,267,355,356,358,360,361,369,370,371,374,375,377,471,472,473,481,482,485,486,501,528,529,530,531,558,559,560,561,562,578,587,588,590,592,593,594,],[18,18,-60,-62,-63,-64,-65,-66,-67,-70,-71,-61,-90,-72,-343,-76,18,-344,18,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,18,-77,-73,-69,-223,-222,18,18,-242,18,-87,-74,-238,-239,-241,-224,18,-226,-86,-75,-237,-240,-68,-225,18,18,18,-230,-87,-74,-232,-233,18,-231,-234,18,18,-236,-235,]),'ID':([0,2,4,5,6,7,8,9,10,12,14,15,17,20,21,22,23,24,25,26,27,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,62,63,64,69,70,71,72,74,75,76,77,78,80,81,82,90,91,94,95,96,98,99,100,101,102,103,104,106,112,113,114,115,116,117,118,119,120,121,122,123,126,127,128,134,135,136,137,141,147,148,149,150,153,154,155,156,160,161,182,183,184,192,194,195,196,197,198,199,206,208,209,212,214,221,222,223,224,225,226,227,228,229,230,231,232,234,238,242,244,247,251,256,257,258,259,262,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,289,290,294,298,306,309,310,314,318,322,323,330,331,332,334,336,337,340,341,342,347,348,349,353,354,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,398,400,401,402,405,448,449,452,455,457,458,459,461,462,465,466,468,469,471,472,473,476,481,482,484,485,486,489,491,500,501,502,505,509,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,568,570,571,578,580,587,588,590,592,593,594,],[28,28,-60,-62,-63,-64,-65,-66,-67,28,-70,-71,28,28,-345,-345,-345,-128,-102,28,-345,-107,-345,-125,-126,-127,-129,-246,118,122,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-157,-158,-61,28,28,-129,-134,-98,-99,-100,-101,-104,28,-134,28,-90,-72,159,-345,159,-93,-9,-10,-345,-94,-95,-103,-129,-97,-183,-27,-28,-185,-96,-169,-170,202,-343,-149,-150,159,-76,233,28,159,-345,159,159,-292,-293,-294,-291,159,159,159,159,-295,-296,159,-345,-28,28,28,159,-184,-186,202,202,-152,-344,28,-145,-147,233,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,159,159,233,373,159,-77,-345,159,-345,-28,-73,-69,159,159,159,159,159,159,159,159,159,159,159,159,159,159,159,159,159,159,159,159,159,431,433,159,159,-292,159,159,159,28,28,-345,-171,202,159,-154,-156,-151,-143,-144,-148,159,-146,-130,-177,-178,-223,-222,233,233,-242,159,159,159,159,233,-87,-74,159,-238,-239,-241,159,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,159,-12,159,159,-292,159,159,159,-345,159,28,159,-345,-28,159,-172,-173,-153,-155,28,159,-224,233,-226,159,-86,-75,159,-237,-240,-345,-201,-345,-68,159,159,159,159,-345,-28,159,159,-292,-225,233,233,233,159,159,159,-11,-292,159,159,-230,-87,-74,-232,-233,159,-345,159,159,233,159,-231,-234,233,233,-236,-235,]),'LPAREN':([0,2,4,5,6,7,8,9,10,12,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,64,69,70,71,72,74,75,76,77,78,80,81,82,88,89,90,91,94,95,97,98,99,100,101,102,103,104,106,109,112,113,114,115,116,117,118,119,121,122,123,126,127,128,132,134,135,136,139,140,141,143,147,148,149,150,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,192,194,195,196,197,206,208,209,212,214,216,221,222,223,224,225,226,227,228,229,230,231,232,233,234,237,238,240,241,242,243,247,251,252,256,257,258,259,262,263,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,291,292,294,298,300,301,302,303,306,309,310,311,312,318,319,322,323,324,325,330,332,334,336,337,340,341,342,347,348,349,351,352,353,354,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,403,404,405,406,429,431,432,433,434,439,440,446,447,448,452,455,457,458,459,461,462,465,466,468,469,471,472,473,476,480,481,482,484,485,486,489,491,495,496,500,501,502,503,504,505,510,511,512,513,514,517,518,520,521,522,524,528,529,530,531,532,533,536,537,539,540,547,548,549,550,551,552,555,556,557,558,559,560,561,562,565,567,568,569,571,572,573,576,577,578,580,582,585,586,587,588,590,592,593,594,],[17,17,-60,-62,-63,-64,-65,-66,-67,82,-70,-71,92,17,94,96,17,-345,-345,-345,-128,-102,17,-345,-29,-107,-345,-125,-126,-127,-129,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,125,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,126,-61,82,17,-129,125,-98,-99,-100,-101,-104,82,-134,82,137,-37,-90,-72,141,-345,96,-93,-9,-10,-345,-94,-95,-103,-129,125,-97,-183,-27,-28,-185,-96,-169,-170,-343,-149,-150,141,-76,238,137,82,238,-345,-333,-30,238,-311,-292,-293,-294,-291,288,294,294,141,298,299,-297,-320,-295,-296,-309,-310,-312,304,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,238,-345,-28,322,82,238,-184,-186,-152,-344,82,-145,-147,351,238,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,-320,141,362,238,366,367,238,372,238,-77,-38,-345,238,-345,-28,-73,-334,-69,238,141,141,141,141,141,141,141,141,141,141,141,141,141,141,141,141,141,141,238,238,-305,-306,238,238,-339,-340,-341,-342,-292,238,238,-35,-36,322,449,322,-345,-45,460,-171,141,-154,-156,-151,-143,-144,-148,141,-146,-130,351,351,-177,-178,-223,-222,238,238,-242,238,238,238,238,238,-87,-74,238,-238,-239,-241,238,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,238,-12,141,-292,238,238,-43,-44,141,-313,-300,-301,-302,-303,-304,-31,-34,449,460,-345,322,238,-345,-28,238,-172,-173,-153,-155,82,141,-224,238,-226,141,532,-86,-75,238,-237,-240,-345,-201,-39,-42,-345,-68,141,-298,-299,238,-32,-33,238,-345,-28,-210,-216,-214,238,238,-292,-225,238,238,238,238,238,238,-11,-40,-41,-292,238,238,-50,-51,-212,-211,-213,-215,-230,-87,-74,-232,-233,238,-307,-345,-314,238,-46,-49,-217,-218,238,238,-308,-47,-48,-231,-234,238,238,-236,-235,]),'TIMES':([0,2,4,5,6,7,8,9,10,12,14,15,17,21,22,23,24,25,26,27,29,30,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,64,69,70,71,72,74,75,76,77,78,81,82,90,91,94,95,98,99,100,101,102,103,104,106,112,113,114,115,116,117,118,119,121,122,123,126,127,128,134,135,136,139,141,143,145,146,147,148,149,150,151,152,153,154,155,156,158,159,160,161,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,192,194,195,197,206,208,209,212,214,216,221,222,223,224,225,226,227,228,229,230,231,232,233,234,238,242,247,250,251,256,257,258,259,262,263,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,291,292,293,294,295,296,297,298,300,301,302,303,306,309,310,322,323,330,332,334,336,337,340,341,342,347,348,349,351,353,354,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,405,406,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,429,431,432,433,434,448,455,457,458,459,461,462,465,466,468,469,471,472,473,476,481,482,484,485,486,489,491,499,500,501,502,503,504,505,507,508,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,567,568,569,571,578,580,582,587,588,590,592,593,594,],[30,30,-60,-62,-63,-64,-65,-66,-67,30,-70,-71,30,-345,-345,-345,-128,-102,30,-345,-107,-345,-125,-126,-127,-129,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,30,30,-129,-134,-98,-99,-100,-101,-104,-134,30,-90,-72,147,-345,-93,-9,-10,-345,-94,-95,-103,-129,-97,30,-27,-28,-185,-96,-169,-170,-343,-149,-150,147,-76,147,30,147,-345,-333,147,-311,269,-263,-292,-293,-294,-291,-282,-284,147,147,147,147,-297,-320,-295,-296,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,306,-345,-28,30,30,147,-186,-152,-344,30,-145,-147,30,147,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,-320,147,147,147,147,-282,-77,-345,400,-345,-28,-73,-334,-69,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,-305,-306,-285,147,-286,-287,-288,147,-339,-340,-341,-342,-292,147,147,30,456,-171,147,-154,-156,-151,-143,-144,-148,147,-146,-130,30,-177,-178,-223,-222,147,147,-242,147,147,147,147,147,-87,-74,147,-238,-239,-241,147,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,147,-12,147,-292,147,147,147,-313,-264,-265,-266,269,269,269,269,269,269,269,269,269,269,269,269,269,269,269,-300,-301,-302,-303,-304,-345,147,-345,-28,524,-172,-173,-153,-155,30,147,-224,147,-226,147,-86,-75,147,-237,-240,-345,-201,-283,-345,-68,147,-298,-299,147,-289,-290,547,-345,-28,147,147,-292,-225,147,147,147,147,147,147,-11,-292,147,147,-230,-87,-74,-232,-233,147,-307,-345,-314,147,147,147,-308,-231,-234,147,147,-236,-235,]),'TYPEID':([0,2,4,5,6,7,8,9,10,11,12,14,15,19,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,62,63,64,67,68,69,70,71,72,73,74,75,76,77,78,80,81,82,90,91,96,97,98,99,100,101,102,103,104,106,112,113,114,115,116,117,118,119,121,122,123,124,125,126,127,128,129,134,137,140,141,192,193,194,196,197,203,204,205,206,207,208,209,210,211,212,213,214,221,222,223,224,225,226,227,228,229,230,231,232,238,251,262,267,289,290,294,298,299,304,311,312,313,318,322,330,333,334,335,336,337,338,340,341,342,348,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,449,460,461,462,465,466,467,468,471,472,473,481,482,485,486,501,510,511,528,558,559,560,561,562,587,588,593,594,],[35,35,-60,-62,-63,-64,-65,-66,-67,35,89,-70,-71,-52,-345,-345,-345,-128,-102,35,-345,-29,-107,-345,-125,-126,-127,-129,-246,119,123,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-157,-158,-61,35,-91,89,35,-129,-134,35,-98,-99,-100,-101,-104,89,-134,89,-90,-72,35,-53,-93,-9,-10,-345,-94,-95,-103,-129,-97,-183,-27,-28,-185,-96,-169,-170,-343,-149,-150,35,35,35,-76,35,-92,89,35,-30,35,324,35,89,-184,-186,35,35,35,-152,-159,-344,89,-162,-163,-145,35,-147,35,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,35,-77,-73,-69,432,434,35,35,35,35,-35,-36,35,324,35,-171,35,-154,35,-156,-151,-160,-143,-144,-148,-146,-130,35,-177,-178,-223,-222,-227,-229,-242,-87,-84,35,-238,-239,-241,-31,-34,35,35,-172,-173,-153,-155,-161,89,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'ENUM':([0,2,4,5,6,7,8,9,10,11,14,15,19,21,22,23,26,27,28,29,34,50,51,52,53,54,55,56,57,58,59,60,64,67,68,70,71,72,73,90,91,96,97,98,99,100,101,102,103,112,116,117,121,124,125,126,127,128,129,137,140,141,193,197,203,204,205,207,208,210,211,213,221,222,223,224,225,226,227,228,229,230,231,232,238,251,262,267,294,298,299,304,311,312,313,322,333,335,338,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,449,460,467,471,472,473,481,482,485,486,501,510,511,528,558,559,560,561,562,587,588,593,594,],[36,36,-60,-62,-63,-64,-65,-66,-67,36,-70,-71,-52,-345,-345,-345,36,-345,-29,-107,-345,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,36,-91,36,-345,-134,36,-90,-72,36,-53,-93,-9,-10,-345,-94,-95,-97,-185,-96,-343,36,36,36,-76,36,-92,36,-30,36,36,-186,36,36,36,-159,-344,-162,-163,36,36,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,36,-77,-73,-69,36,36,36,36,-35,-36,36,36,36,36,-160,-130,36,-177,-178,-223,-222,-227,-229,-242,-87,-84,36,-238,-239,-241,-31,-34,36,36,-161,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'VOID':([0,2,4,5,6,7,8,9,10,11,12,14,15,19,21,22,23,24,25,26,27,28,29,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,64,67,68,69,70,71,72,73,74,75,76,77,78,81,90,91,96,97,98,99,100,101,102,103,104,106,112,116,117,118,119,121,122,123,124,125,126,127,128,129,137,140,141,192,193,197,203,204,205,206,207,208,209,210,211,212,213,214,216,221,222,223,224,225,226,227,228,229,230,231,232,238,251,262,267,294,298,299,304,311,312,313,322,330,333,334,335,336,337,338,340,341,342,348,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,449,460,461,462,465,466,467,471,472,473,481,482,485,486,501,510,511,528,558,559,560,561,562,587,588,593,594,],[38,38,-60,-62,-63,-64,-65,-66,-67,38,38,-70,-71,-52,-345,-345,-345,-128,-102,38,-345,-29,-107,-125,-126,-127,-129,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,38,-91,38,38,-129,-134,38,-98,-99,-100,-101,-104,-134,-90,-72,38,-53,-93,-9,-10,-345,-94,-95,-103,-129,-97,-185,-96,-169,-170,-343,-149,-150,38,38,38,-76,38,-92,38,-30,38,38,38,-186,38,38,38,-152,-159,-344,38,-162,-163,-145,38,-147,38,38,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,38,-77,-73,-69,38,38,38,38,-35,-36,38,38,-171,38,-154,38,-156,-151,-160,-143,-144,-148,-146,-130,38,-177,-178,-223,-222,-227,-229,-242,-87,-84,38,-238,-239,-241,-31,-34,38,38,-172,-173,-153,-155,-161,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'_BOOL':([0,2,4,5,6,7,8,9,10,11,12,14,15,19,21,22,23,24,25,26,27,28,29,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,64,67,68,69,70,71,72,73,74,75,76,77,78,81,90,91,96,97,98,99,100,101,102,103,104,106,112,116,117,118,119,121,122,123,124,125,126,127,128,129,137,140,141,192,193,197,203,204,205,206,207,208,209,210,211,212,213,214,216,221,222,223,224,225,226,227,228,229,230,231,232,238,251,262,267,294,298,299,304,311,312,313,322,330,333,334,335,336,337,338,340,341,342,348,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,449,460,461,462,465,466,467,471,472,473,481,482,485,486,501,510,511,528,558,559,560,561,562,587,588,593,594,],[39,39,-60,-62,-63,-64,-65,-66,-67,39,39,-70,-71,-52,-345,-345,-345,-128,-102,39,-345,-29,-107,-125,-126,-127,-129,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,39,-91,39,39,-129,-134,39,-98,-99,-100,-101,-104,-134,-90,-72,39,-53,-93,-9,-10,-345,-94,-95,-103,-129,-97,-185,-96,-169,-170,-343,-149,-150,39,39,39,-76,39,-92,39,-30,39,39,39,-186,39,39,39,-152,-159,-344,39,-162,-163,-145,39,-147,39,39,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,39,-77,-73,-69,39,39,39,39,-35,-36,39,39,-171,39,-154,39,-156,-151,-160,-143,-144,-148,-146,-130,39,-177,-178,-223,-222,-227,-229,-242,-87,-84,39,-238,-239,-241,-31,-34,39,39,-172,-173,-153,-155,-161,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'CHAR':([0,2,4,5,6,7,8,9,10,11,12,14,15,19,21,22,23,24,25,26,27,28,29,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,64,67,68,69,70,71,72,73,74,75,76,77,78,81,90,91,96,97,98,99,100,101,102,103,104,106,112,116,117,118,119,121,122,123,124,125,126,127,128,129,137,140,141,192,193,197,203,204,205,206,207,208,209,210,211,212,213,214,216,221,222,223,224,225,226,227,228,229,230,231,232,238,251,262,267,294,298,299,304,311,312,313,322,330,333,334,335,336,337,338,340,341,342,348,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,449,460,461,462,465,466,467,471,472,473,481,482,485,486,501,510,511,528,558,559,560,561,562,587,588,593,594,],[40,40,-60,-62,-63,-64,-65,-66,-67,40,40,-70,-71,-52,-345,-345,-345,-128,-102,40,-345,-29,-107,-125,-126,-127,-129,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,40,-91,40,40,-129,-134,40,-98,-99,-100,-101,-104,-134,-90,-72,40,-53,-93,-9,-10,-345,-94,-95,-103,-129,-97,-185,-96,-169,-170,-343,-149,-150,40,40,40,-76,40,-92,40,-30,40,40,40,-186,40,40,40,-152,-159,-344,40,-162,-163,-145,40,-147,40,40,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,40,-77,-73,-69,40,40,40,40,-35,-36,40,40,-171,40,-154,40,-156,-151,-160,-143,-144,-148,-146,-130,40,-177,-178,-223,-222,-227,-229,-242,-87,-84,40,-238,-239,-241,-31,-34,40,40,-172,-173,-153,-155,-161,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'SHORT':([0,2,4,5,6,7,8,9,10,11,12,14,15,19,21,22,23,24,25,26,27,28,29,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,64,67,68,69,70,71,72,73,74,75,76,77,78,81,90,91,96,97,98,99,100,101,102,103,104,106,112,116,117,118,119,121,122,123,124,125,126,127,128,129,137,140,141,192,193,197,203,204,205,206,207,208,209,210,211,212,213,214,216,221,222,223,224,225,226,227,228,229,230,231,232,238,251,262,267,294,298,299,304,311,312,313,322,330,333,334,335,336,337,338,340,341,342,348,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,449,460,461,462,465,466,467,471,472,473,481,482,485,486,501,510,511,528,558,559,560,561,562,587,588,593,594,],[41,41,-60,-62,-63,-64,-65,-66,-67,41,41,-70,-71,-52,-345,-345,-345,-128,-102,41,-345,-29,-107,-125,-126,-127,-129,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,41,-91,41,41,-129,-134,41,-98,-99,-100,-101,-104,-134,-90,-72,41,-53,-93,-9,-10,-345,-94,-95,-103,-129,-97,-185,-96,-169,-170,-343,-149,-150,41,41,41,-76,41,-92,41,-30,41,41,41,-186,41,41,41,-152,-159,-344,41,-162,-163,-145,41,-147,41,41,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,41,-77,-73,-69,41,41,41,41,-35,-36,41,41,-171,41,-154,41,-156,-151,-160,-143,-144,-148,-146,-130,41,-177,-178,-223,-222,-227,-229,-242,-87,-84,41,-238,-239,-241,-31,-34,41,41,-172,-173,-153,-155,-161,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'INT':([0,2,4,5,6,7,8,9,10,11,12,14,15,19,21,22,23,24,25,26,27,28,29,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,64,67,68,69,70,71,72,73,74,75,76,77,78,81,90,91,96,97,98,99,100,101,102,103,104,106,112,116,117,118,119,121,122,123,124,125,126,127,128,129,137,140,141,192,193,197,203,204,205,206,207,208,209,210,211,212,213,214,216,221,222,223,224,225,226,227,228,229,230,231,232,238,251,262,267,294,298,299,304,311,312,313,322,330,333,334,335,336,337,338,340,341,342,348,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,449,460,461,462,465,466,467,471,472,473,481,482,485,486,501,510,511,528,558,559,560,561,562,587,588,593,594,],[42,42,-60,-62,-63,-64,-65,-66,-67,42,42,-70,-71,-52,-345,-345,-345,-128,-102,42,-345,-29,-107,-125,-126,-127,-129,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,42,-91,42,42,-129,-134,42,-98,-99,-100,-101,-104,-134,-90,-72,42,-53,-93,-9,-10,-345,-94,-95,-103,-129,-97,-185,-96,-169,-170,-343,-149,-150,42,42,42,-76,42,-92,42,-30,42,42,42,-186,42,42,42,-152,-159,-344,42,-162,-163,-145,42,-147,42,42,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,42,-77,-73,-69,42,42,42,42,-35,-36,42,42,-171,42,-154,42,-156,-151,-160,-143,-144,-148,-146,-130,42,-177,-178,-223,-222,-227,-229,-242,-87,-84,42,-238,-239,-241,-31,-34,42,42,-172,-173,-153,-155,-161,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'LONG':([0,2,4,5,6,7,8,9,10,11,12,14,15,19,21,22,23,24,25,26,27,28,29,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,64,67,68,69,70,71,72,73,74,75,76,77,78,81,90,91,96,97,98,99,100,101,102,103,104,106,112,116,117,118,119,121,122,123,124,125,126,127,128,129,137,140,141,192,193,197,203,204,205,206,207,208,209,210,211,212,213,214,216,221,222,223,224,225,226,227,228,229,230,231,232,238,251,262,267,294,298,299,304,311,312,313,322,330,333,334,335,336,337,338,340,341,342,348,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,449,460,461,462,465,466,467,471,472,473,481,482,485,486,501,510,511,528,558,559,560,561,562,587,588,593,594,],[43,43,-60,-62,-63,-64,-65,-66,-67,43,43,-70,-71,-52,-345,-345,-345,-128,-102,43,-345,-29,-107,-125,-126,-127,-129,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,43,-91,43,43,-129,-134,43,-98,-99,-100,-101,-104,-134,-90,-72,43,-53,-93,-9,-10,-345,-94,-95,-103,-129,-97,-185,-96,-169,-170,-343,-149,-150,43,43,43,-76,43,-92,43,-30,43,43,43,-186,43,43,43,-152,-159,-344,43,-162,-163,-145,43,-147,43,43,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,43,-77,-73,-69,43,43,43,43,-35,-36,43,43,-171,43,-154,43,-156,-151,-160,-143,-144,-148,-146,-130,43,-177,-178,-223,-222,-227,-229,-242,-87,-84,43,-238,-239,-241,-31,-34,43,43,-172,-173,-153,-155,-161,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'FLOAT':([0,2,4,5,6,7,8,9,10,11,12,14,15,19,21,22,23,24,25,26,27,28,29,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,64,67,68,69,70,71,72,73,74,75,76,77,78,81,90,91,96,97,98,99,100,101,102,103,104,106,112,116,117,118,119,121,122,123,124,125,126,127,128,129,137,140,141,192,193,197,203,204,205,206,207,208,209,210,211,212,213,214,216,221,222,223,224,225,226,227,228,229,230,231,232,238,251,262,267,294,298,299,304,311,312,313,322,330,333,334,335,336,337,338,340,341,342,348,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,449,460,461,462,465,466,467,471,472,473,481,482,485,486,501,510,511,528,558,559,560,561,562,587,588,593,594,],[44,44,-60,-62,-63,-64,-65,-66,-67,44,44,-70,-71,-52,-345,-345,-345,-128,-102,44,-345,-29,-107,-125,-126,-127,-129,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,44,-91,44,44,-129,-134,44,-98,-99,-100,-101,-104,-134,-90,-72,44,-53,-93,-9,-10,-345,-94,-95,-103,-129,-97,-185,-96,-169,-170,-343,-149,-150,44,44,44,-76,44,-92,44,-30,44,44,44,-186,44,44,44,-152,-159,-344,44,-162,-163,-145,44,-147,44,44,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,44,-77,-73,-69,44,44,44,44,-35,-36,44,44,-171,44,-154,44,-156,-151,-160,-143,-144,-148,-146,-130,44,-177,-178,-223,-222,-227,-229,-242,-87,-84,44,-238,-239,-241,-31,-34,44,44,-172,-173,-153,-155,-161,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'DOUBLE':([0,2,4,5,6,7,8,9,10,11,12,14,15,19,21,22,23,24,25,26,27,28,29,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,64,67,68,69,70,71,72,73,74,75,76,77,78,81,90,91,96,97,98,99,100,101,102,103,104,106,112,116,117,118,119,121,122,123,124,125,126,127,128,129,137,140,141,192,193,197,203,204,205,206,207,208,209,210,211,212,213,214,216,221,222,223,224,225,226,227,228,229,230,231,232,238,251,262,267,294,298,299,304,311,312,313,322,330,333,334,335,336,337,338,340,341,342,348,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,449,460,461,462,465,466,467,471,472,473,481,482,485,486,501,510,511,528,558,559,560,561,562,587,588,593,594,],[45,45,-60,-62,-63,-64,-65,-66,-67,45,45,-70,-71,-52,-345,-345,-345,-128,-102,45,-345,-29,-107,-125,-126,-127,-129,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,45,-91,45,45,-129,-134,45,-98,-99,-100,-101,-104,-134,-90,-72,45,-53,-93,-9,-10,-345,-94,-95,-103,-129,-97,-185,-96,-169,-170,-343,-149,-150,45,45,45,-76,45,-92,45,-30,45,45,45,-186,45,45,45,-152,-159,-344,45,-162,-163,-145,45,-147,45,45,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,45,-77,-73,-69,45,45,45,45,-35,-36,45,45,-171,45,-154,45,-156,-151,-160,-143,-144,-148,-146,-130,45,-177,-178,-223,-222,-227,-229,-242,-87,-84,45,-238,-239,-241,-31,-34,45,45,-172,-173,-153,-155,-161,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'_COMPLEX':([0,2,4,5,6,7,8,9,10,11,12,14,15,19,21,22,23,24,25,26,27,28,29,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,64,67,68,69,70,71,72,73,74,75,76,77,78,81,90,91,96,97,98,99,100,101,102,103,104,106,112,116,117,118,119,121,122,123,124,125,126,127,128,129,137,140,141,192,193,197,203,204,205,206,207,208,209,210,211,212,213,214,216,221,222,223,224,225,226,227,228,229,230,231,232,238,251,262,267,294,298,299,304,311,312,313,322,330,333,334,335,336,337,338,340,341,342,348,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,449,460,461,462,465,466,467,471,472,473,481,482,485,486,501,510,511,528,558,559,560,561,562,587,588,593,594,],[46,46,-60,-62,-63,-64,-65,-66,-67,46,46,-70,-71,-52,-345,-345,-345,-128,-102,46,-345,-29,-107,-125,-126,-127,-129,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,46,-91,46,46,-129,-134,46,-98,-99,-100,-101,-104,-134,-90,-72,46,-53,-93,-9,-10,-345,-94,-95,-103,-129,-97,-185,-96,-169,-170,-343,-149,-150,46,46,46,-76,46,-92,46,-30,46,46,46,-186,46,46,46,-152,-159,-344,46,-162,-163,-145,46,-147,46,46,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,46,-77,-73,-69,46,46,46,46,-35,-36,46,46,-171,46,-154,46,-156,-151,-160,-143,-144,-148,-146,-130,46,-177,-178,-223,-222,-227,-229,-242,-87,-84,46,-238,-239,-241,-31,-34,46,46,-172,-173,-153,-155,-161,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'SIGNED':([0,2,4,5,6,7,8,9,10,11,12,14,15,19,21,22,23,24,25,26,27,28,29,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,64,67,68,69,70,71,72,73,74,75,76,77,78,81,90,91,96,97,98,99,100,101,102,103,104,106,112,116,117,118,119,121,122,123,124,125,126,127,128,129,137,140,141,192,193,197,203,204,205,206,207,208,209,210,211,212,213,214,216,221,222,223,224,225,226,227,228,229,230,231,232,238,251,262,267,294,298,299,304,311,312,313,322,330,333,334,335,336,337,338,340,341,342,348,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,449,460,461,462,465,466,467,471,472,473,481,482,485,486,501,510,511,528,558,559,560,561,562,587,588,593,594,],[47,47,-60,-62,-63,-64,-65,-66,-67,47,47,-70,-71,-52,-345,-345,-345,-128,-102,47,-345,-29,-107,-125,-126,-127,-129,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,47,-91,47,47,-129,-134,47,-98,-99,-100,-101,-104,-134,-90,-72,47,-53,-93,-9,-10,-345,-94,-95,-103,-129,-97,-185,-96,-169,-170,-343,-149,-150,47,47,47,-76,47,-92,47,-30,47,47,47,-186,47,47,47,-152,-159,-344,47,-162,-163,-145,47,-147,47,47,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,47,-77,-73,-69,47,47,47,47,-35,-36,47,47,-171,47,-154,47,-156,-151,-160,-143,-144,-148,-146,-130,47,-177,-178,-223,-222,-227,-229,-242,-87,-84,47,-238,-239,-241,-31,-34,47,47,-172,-173,-153,-155,-161,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'UNSIGNED':([0,2,4,5,6,7,8,9,10,11,12,14,15,19,21,22,23,24,25,26,27,28,29,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,64,67,68,69,70,71,72,73,74,75,76,77,78,81,90,91,96,97,98,99,100,101,102,103,104,106,112,116,117,118,119,121,122,123,124,125,126,127,128,129,137,140,141,192,193,197,203,204,205,206,207,208,209,210,211,212,213,214,216,221,222,223,224,225,226,227,228,229,230,231,232,238,251,262,267,294,298,299,304,311,312,313,322,330,333,334,335,336,337,338,340,341,342,348,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,449,460,461,462,465,466,467,471,472,473,481,482,485,486,501,510,511,528,558,559,560,561,562,587,588,593,594,],[48,48,-60,-62,-63,-64,-65,-66,-67,48,48,-70,-71,-52,-345,-345,-345,-128,-102,48,-345,-29,-107,-125,-126,-127,-129,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,48,-91,48,48,-129,-134,48,-98,-99,-100,-101,-104,-134,-90,-72,48,-53,-93,-9,-10,-345,-94,-95,-103,-129,-97,-185,-96,-169,-170,-343,-149,-150,48,48,48,-76,48,-92,48,-30,48,48,48,-186,48,48,48,-152,-159,-344,48,-162,-163,-145,48,-147,48,48,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,48,-77,-73,-69,48,48,48,48,-35,-36,48,48,-171,48,-154,48,-156,-151,-160,-143,-144,-148,-146,-130,48,-177,-178,-223,-222,-227,-229,-242,-87,-84,48,-238,-239,-241,-31,-34,48,48,-172,-173,-153,-155,-161,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'__INT128':([0,2,4,5,6,7,8,9,10,11,12,14,15,19,21,22,23,24,25,26,27,28,29,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,64,67,68,69,70,71,72,73,74,75,76,77,78,81,90,91,96,97,98,99,100,101,102,103,104,106,112,116,117,118,119,121,122,123,124,125,126,127,128,129,137,140,141,192,193,197,203,204,205,206,207,208,209,210,211,212,213,214,216,221,222,223,224,225,226,227,228,229,230,231,232,238,251,262,267,294,298,299,304,311,312,313,322,330,333,334,335,336,337,338,340,341,342,348,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,449,460,461,462,465,466,467,471,472,473,481,482,485,486,501,510,511,528,558,559,560,561,562,587,588,593,594,],[49,49,-60,-62,-63,-64,-65,-66,-67,49,49,-70,-71,-52,-345,-345,-345,-128,-102,49,-345,-29,-107,-125,-126,-127,-129,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,49,-91,49,49,-129,-134,49,-98,-99,-100,-101,-104,-134,-90,-72,49,-53,-93,-9,-10,-345,-94,-95,-103,-129,-97,-185,-96,-169,-170,-343,-149,-150,49,49,49,-76,49,-92,49,-30,49,49,49,-186,49,49,49,-152,-159,-344,49,-162,-163,-145,49,-147,49,49,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,49,-77,-73,-69,49,49,49,49,-35,-36,49,49,-171,49,-154,49,-156,-151,-160,-143,-144,-148,-146,-130,49,-177,-178,-223,-222,-227,-229,-242,-87,-84,49,-238,-239,-241,-31,-34,49,49,-172,-173,-153,-155,-161,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'_ATOMIC':([0,2,4,5,6,7,8,9,10,11,12,14,15,19,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,64,67,68,69,70,71,72,73,74,75,76,77,78,81,90,91,95,96,97,98,99,100,101,102,103,104,106,112,115,116,117,118,119,121,122,123,124,125,126,127,128,129,136,137,140,141,183,184,192,193,197,203,204,205,206,207,208,209,210,211,212,213,214,216,221,222,223,224,225,226,227,228,229,230,231,232,238,251,258,259,262,267,294,298,299,304,311,312,313,322,323,330,333,334,335,336,337,338,340,341,342,348,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,448,449,457,458,460,461,462,465,466,467,471,472,473,481,482,485,486,501,510,511,513,514,528,558,559,560,561,562,587,588,593,594,],[50,50,-60,-62,-63,-64,-65,-66,-67,72,81,-70,-71,-52,72,72,72,-128,-102,109,72,-29,-107,81,-125,-126,-127,72,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,72,-91,81,109,72,-134,72,-98,-99,-100,-101,-104,-134,-90,-72,81,50,-53,-93,-9,-10,72,-94,-95,-103,-129,-97,81,-185,-96,-169,-170,-343,-149,-150,50,50,50,-76,72,-92,81,50,-30,50,81,81,81,109,-186,50,50,50,-152,-159,-344,81,-162,-163,-145,72,-147,81,72,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,50,-77,81,81,-73,-69,50,50,50,50,-35,-36,50,50,81,-171,50,-154,50,-156,-151,-160,-143,-144,-148,-146,-130,50,-177,-178,-223,-222,-227,-229,-242,-87,-84,72,-238,-239,-241,-31,-34,81,50,81,81,50,-172,-173,-153,-155,-161,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,81,81,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'CONST':([0,2,4,5,6,7,8,9,10,11,12,14,15,19,21,22,23,24,25,27,28,29,30,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,64,67,68,69,71,72,73,74,75,76,77,78,81,90,91,95,96,97,101,104,106,115,116,118,119,121,122,123,124,125,126,127,128,129,136,137,140,141,183,184,192,197,203,204,205,206,207,208,209,210,211,212,213,214,216,221,222,223,224,225,226,227,228,229,230,231,232,238,251,258,259,262,267,294,298,299,304,311,312,313,322,323,330,333,334,335,336,337,338,340,341,342,348,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,448,449,457,458,460,461,462,465,466,467,471,472,473,481,482,485,486,501,510,511,513,514,528,558,559,560,561,562,587,588,593,594,],[51,51,-60,-62,-63,-64,-65,-66,-67,51,51,-70,-71,-52,51,51,51,-128,-102,51,-29,-107,51,-125,-126,-127,51,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,51,-91,51,51,-134,51,-98,-99,-100,-101,-104,-134,-90,-72,51,51,-53,51,-103,-129,51,-185,-169,-170,-343,-149,-150,51,51,51,-76,51,-92,51,51,-30,51,51,51,51,-186,51,51,51,-152,-159,-344,51,-162,-163,-145,51,-147,51,51,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,51,-77,51,51,-73,-69,51,51,51,51,-35,-36,51,51,51,-171,51,-154,51,-156,-151,-160,-143,-144,-148,-146,-130,51,-177,-178,-223,-222,-227,-229,-242,-87,-84,51,-238,-239,-241,-31,-34,51,51,51,51,51,-172,-173,-153,-155,-161,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,51,51,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'RESTRICT':([0,2,4,5,6,7,8,9,10,11,12,14,15,19,21,22,23,24,25,27,28,29,30,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,64,67,68,69,71,72,73,74,75,76,77,78,81,90,91,95,96,97,101,104,106,115,116,118,119,121,122,123,124,125,126,127,128,129,136,137,140,141,183,184,192,197,203,204,205,206,207,208,209,210,211,212,213,214,216,221,222,223,224,225,226,227,228,229,230,231,232,238,251,258,259,262,267,294,298,299,304,311,312,313,322,323,330,333,334,335,336,337,338,340,341,342,348,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,448,449,457,458,460,461,462,465,466,467,471,472,473,481,482,485,486,501,510,511,513,514,528,558,559,560,561,562,587,588,593,594,],[52,52,-60,-62,-63,-64,-65,-66,-67,52,52,-70,-71,-52,52,52,52,-128,-102,52,-29,-107,52,-125,-126,-127,52,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,52,-91,52,52,-134,52,-98,-99,-100,-101,-104,-134,-90,-72,52,52,-53,52,-103,-129,52,-185,-169,-170,-343,-149,-150,52,52,52,-76,52,-92,52,52,-30,52,52,52,52,-186,52,52,52,-152,-159,-344,52,-162,-163,-145,52,-147,52,52,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,52,-77,52,52,-73,-69,52,52,52,52,-35,-36,52,52,52,-171,52,-154,52,-156,-151,-160,-143,-144,-148,-146,-130,52,-177,-178,-223,-222,-227,-229,-242,-87,-84,52,-238,-239,-241,-31,-34,52,52,52,52,52,-172,-173,-153,-155,-161,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,52,52,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'VOLATILE':([0,2,4,5,6,7,8,9,10,11,12,14,15,19,21,22,23,24,25,27,28,29,30,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,64,67,68,69,71,72,73,74,75,76,77,78,81,90,91,95,96,97,101,104,106,115,116,118,119,121,122,123,124,125,126,127,128,129,136,137,140,141,183,184,192,197,203,204,205,206,207,208,209,210,211,212,213,214,216,221,222,223,224,225,226,227,228,229,230,231,232,238,251,258,259,262,267,294,298,299,304,311,312,313,322,323,330,333,334,335,336,337,338,340,341,342,348,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,448,449,457,458,460,461,462,465,466,467,471,472,473,481,482,485,486,501,510,511,513,514,528,558,559,560,561,562,587,588,593,594,],[53,53,-60,-62,-63,-64,-65,-66,-67,53,53,-70,-71,-52,53,53,53,-128,-102,53,-29,-107,53,-125,-126,-127,53,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,53,-91,53,53,-134,53,-98,-99,-100,-101,-104,-134,-90,-72,53,53,-53,53,-103,-129,53,-185,-169,-170,-343,-149,-150,53,53,53,-76,53,-92,53,53,-30,53,53,53,53,-186,53,53,53,-152,-159,-344,53,-162,-163,-145,53,-147,53,53,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,53,-77,53,53,-73,-69,53,53,53,53,-35,-36,53,53,53,-171,53,-154,53,-156,-151,-160,-143,-144,-148,-146,-130,53,-177,-178,-223,-222,-227,-229,-242,-87,-84,53,-238,-239,-241,-31,-34,53,53,53,53,53,-172,-173,-153,-155,-161,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,53,53,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'AUTO':([0,2,4,5,6,7,8,9,10,11,12,14,15,19,21,22,23,24,25,27,28,29,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,64,67,68,69,71,72,73,74,75,76,77,78,81,90,91,96,97,101,104,106,118,119,121,122,123,127,128,129,137,140,192,206,208,221,222,223,224,225,226,227,228,229,230,231,232,251,262,267,311,312,313,322,330,334,336,337,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,449,460,461,462,465,466,471,472,473,481,482,485,486,501,510,511,528,558,559,560,561,562,587,588,593,594,],[54,54,-60,-62,-63,-64,-65,-66,-67,54,54,-70,-71,-52,54,54,54,-128,-102,54,-29,-107,-125,-126,-127,54,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,54,-91,54,54,-134,54,-98,-99,-100,-101,-104,-134,-90,-72,54,-53,54,-103,-129,-169,-170,-343,-149,-150,-76,54,-92,54,-30,54,-152,-344,54,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,-77,-73,-69,-35,-36,54,54,-171,-154,-156,-151,-130,54,-177,-178,-223,-222,-227,-229,-242,-87,-84,54,-238,-239,-241,-31,-34,54,54,-172,-173,-153,-155,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'REGISTER':([0,2,4,5,6,7,8,9,10,11,12,14,15,19,21,22,23,24,25,27,28,29,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,64,67,68,69,71,72,73,74,75,76,77,78,81,90,91,96,97,101,104,106,118,119,121,122,123,127,128,129,137,140,192,206,208,221,222,223,224,225,226,227,228,229,230,231,232,251,262,267,311,312,313,322,330,334,336,337,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,449,460,461,462,465,466,471,472,473,481,482,485,486,501,510,511,528,558,559,560,561,562,587,588,593,594,],[55,55,-60,-62,-63,-64,-65,-66,-67,55,55,-70,-71,-52,55,55,55,-128,-102,55,-29,-107,-125,-126,-127,55,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,55,-91,55,55,-134,55,-98,-99,-100,-101,-104,-134,-90,-72,55,-53,55,-103,-129,-169,-170,-343,-149,-150,-76,55,-92,55,-30,55,-152,-344,55,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,-77,-73,-69,-35,-36,55,55,-171,-154,-156,-151,-130,55,-177,-178,-223,-222,-227,-229,-242,-87,-84,55,-238,-239,-241,-31,-34,55,55,-172,-173,-153,-155,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'STATIC':([0,2,4,5,6,7,8,9,10,11,12,14,15,19,21,22,23,24,25,27,28,29,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,64,67,68,69,71,72,73,74,75,76,77,78,81,90,91,95,96,97,101,104,106,116,118,119,121,122,123,127,128,129,136,137,140,184,192,197,206,208,221,222,223,224,225,226,227,228,229,230,231,232,251,259,262,267,311,312,313,322,323,330,334,336,337,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,448,449,458,460,461,462,465,466,471,472,473,481,482,485,486,501,510,511,514,528,558,559,560,561,562,587,588,593,594,],[29,29,-60,-62,-63,-64,-65,-66,-67,29,29,-70,-71,-52,29,29,29,-128,-102,29,-29,-107,-125,-126,-127,29,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,29,-91,29,29,-134,29,-98,-99,-100,-101,-104,-134,-90,-72,183,29,-53,29,-103,-129,-185,-169,-170,-343,-149,-150,-76,29,-92,258,29,-30,310,29,-186,-152,-344,29,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,-77,402,-73,-69,-35,-36,29,29,457,-171,-154,-156,-151,-130,29,-177,-178,-223,-222,-227,-229,-242,-87,-84,29,-238,-239,-241,-31,-34,513,29,522,29,-172,-173,-153,-155,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,549,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'EXTERN':([0,2,4,5,6,7,8,9,10,11,12,14,15,19,21,22,23,24,25,27,28,29,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,64,67,68,69,71,72,73,74,75,76,77,78,81,90,91,96,97,101,104,106,118,119,121,122,123,127,128,129,137,140,192,206,208,221,222,223,224,225,226,227,228,229,230,231,232,251,262,267,311,312,313,322,330,334,336,337,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,449,460,461,462,465,466,471,472,473,481,482,485,486,501,510,511,528,558,559,560,561,562,587,588,593,594,],[56,56,-60,-62,-63,-64,-65,-66,-67,56,56,-70,-71,-52,56,56,56,-128,-102,56,-29,-107,-125,-126,-127,56,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,56,-91,56,56,-134,56,-98,-99,-100,-101,-104,-134,-90,-72,56,-53,56,-103,-129,-169,-170,-343,-149,-150,-76,56,-92,56,-30,56,-152,-344,56,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,-77,-73,-69,-35,-36,56,56,-171,-154,-156,-151,-130,56,-177,-178,-223,-222,-227,-229,-242,-87,-84,56,-238,-239,-241,-31,-34,56,56,-172,-173,-153,-155,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'TYPEDEF':([0,2,4,5,6,7,8,9,10,11,12,14,15,19,21,22,23,24,25,27,28,29,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,64,67,68,69,71,72,73,74,75,76,77,78,81,90,91,96,97,101,104,106,118,119,121,122,123,127,128,129,137,140,192,206,208,221,222,223,224,225,226,227,228,229,230,231,232,251,262,267,311,312,313,322,330,334,336,337,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,449,460,461,462,465,466,471,472,473,481,482,485,486,501,510,511,528,558,559,560,561,562,587,588,593,594,],[57,57,-60,-62,-63,-64,-65,-66,-67,57,57,-70,-71,-52,57,57,57,-128,-102,57,-29,-107,-125,-126,-127,57,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,57,-91,57,57,-134,57,-98,-99,-100,-101,-104,-134,-90,-72,57,-53,57,-103,-129,-169,-170,-343,-149,-150,-76,57,-92,57,-30,57,-152,-344,57,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,-77,-73,-69,-35,-36,57,57,-171,-154,-156,-151,-130,57,-177,-178,-223,-222,-227,-229,-242,-87,-84,57,-238,-239,-241,-31,-34,57,57,-172,-173,-153,-155,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'_THREAD_LOCAL':([0,2,4,5,6,7,8,9,10,11,12,14,15,19,21,22,23,24,25,27,28,29,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,64,67,68,69,71,72,73,74,75,76,77,78,81,90,91,96,97,101,104,106,118,119,121,122,123,127,128,129,137,140,192,206,208,221,222,223,224,225,226,227,228,229,230,231,232,251,262,267,311,312,313,322,330,334,336,337,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,449,460,461,462,465,466,471,472,473,481,482,485,486,501,510,511,528,558,559,560,561,562,587,588,593,594,],[58,58,-60,-62,-63,-64,-65,-66,-67,58,58,-70,-71,-52,58,58,58,-128,-102,58,-29,-107,-125,-126,-127,58,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,58,-91,58,58,-134,58,-98,-99,-100,-101,-104,-134,-90,-72,58,-53,58,-103,-129,-169,-170,-343,-149,-150,-76,58,-92,58,-30,58,-152,-344,58,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,-77,-73,-69,-35,-36,58,58,-171,-154,-156,-151,-130,58,-177,-178,-223,-222,-227,-229,-242,-87,-84,58,-238,-239,-241,-31,-34,58,58,-172,-173,-153,-155,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'INLINE':([0,2,4,5,6,7,8,9,10,11,12,14,15,19,21,22,23,24,25,27,28,29,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,64,67,68,69,71,72,73,74,75,76,77,78,81,90,91,96,97,101,104,106,118,119,121,122,123,127,128,129,137,140,192,206,208,221,222,223,224,225,226,227,228,229,230,231,232,251,262,267,311,312,313,322,330,334,336,337,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,449,460,461,462,465,466,471,472,473,481,482,485,486,501,510,511,528,558,559,560,561,562,587,588,593,594,],[59,59,-60,-62,-63,-64,-65,-66,-67,59,59,-70,-71,-52,59,59,59,-128,-102,59,-29,-107,-125,-126,-127,59,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,59,-91,59,59,-134,59,-98,-99,-100,-101,-104,-134,-90,-72,59,-53,59,-103,-129,-169,-170,-343,-149,-150,-76,59,-92,59,-30,59,-152,-344,59,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,-77,-73,-69,-35,-36,59,59,-171,-154,-156,-151,-130,59,-177,-178,-223,-222,-227,-229,-242,-87,-84,59,-238,-239,-241,-31,-34,59,59,-172,-173,-153,-155,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'_NORETURN':([0,2,4,5,6,7,8,9,10,11,12,14,15,19,21,22,23,24,25,27,28,29,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,64,67,68,69,71,72,73,74,75,76,77,78,81,90,91,96,97,101,104,106,118,119,121,122,123,127,128,129,137,140,192,206,208,221,222,223,224,225,226,227,228,229,230,231,232,251,262,267,311,312,313,322,330,334,336,337,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,449,460,461,462,465,466,471,472,473,481,482,485,486,501,510,511,528,558,559,560,561,562,587,588,593,594,],[60,60,-60,-62,-63,-64,-65,-66,-67,60,60,-70,-71,-52,60,60,60,-128,-102,60,-29,-107,-125,-126,-127,60,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,60,-91,60,60,-134,60,-98,-99,-100,-101,-104,-134,-90,-72,60,-53,60,-103,-129,-169,-170,-343,-149,-150,-76,60,-92,60,-30,60,-152,-344,60,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,-77,-73,-69,-35,-36,60,60,-171,-154,-156,-151,-130,60,-177,-178,-223,-222,-227,-229,-242,-87,-84,60,-238,-239,-241,-31,-34,60,60,-172,-173,-153,-155,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'_ALIGNAS':([0,2,4,5,6,7,8,9,10,11,12,14,15,19,21,22,23,24,25,27,28,29,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,64,67,68,69,71,72,73,74,75,76,77,78,81,90,91,96,97,101,104,106,118,119,121,122,123,124,125,126,127,128,129,137,140,141,192,203,204,205,206,207,208,209,210,211,212,214,216,221,222,223,224,225,226,227,228,229,230,231,232,238,251,262,267,294,298,299,304,311,312,313,322,330,333,334,335,336,337,338,340,341,342,348,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,449,460,461,462,465,466,467,471,472,473,481,482,485,486,501,510,511,528,558,559,560,561,562,587,588,593,594,],[61,61,-60,-62,-63,-64,-65,-66,-67,61,61,-70,-71,-52,61,61,61,-128,-102,61,-29,-107,-125,-126,-127,61,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,61,-91,61,61,-134,61,-98,-99,-100,-101,-104,-134,-90,-72,61,-53,61,-103,-129,-169,-170,-343,-149,-150,61,61,61,-76,61,-92,61,-30,61,61,61,61,61,-152,-159,-344,61,-162,-163,-145,-147,61,61,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,61,-77,-73,-69,61,61,61,61,-35,-36,61,61,-171,61,-154,61,-156,-151,-160,-143,-144,-148,-146,-130,61,-177,-178,-223,-222,-227,-229,-242,-87,-84,61,-238,-239,-241,-31,-34,61,61,-172,-173,-153,-155,-161,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'STRUCT':([0,2,4,5,6,7,8,9,10,11,14,15,19,21,22,23,26,27,28,29,34,50,51,52,53,54,55,56,57,58,59,60,64,67,68,70,71,72,73,90,91,96,97,98,99,100,101,102,103,112,116,117,121,124,125,126,127,128,129,137,140,141,193,197,203,204,205,207,208,210,211,213,221,222,223,224,225,226,227,228,229,230,231,232,238,251,262,267,294,298,299,304,311,312,313,322,333,335,338,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,449,460,467,471,472,473,481,482,485,486,501,510,511,528,558,559,560,561,562,587,588,593,594,],[62,62,-60,-62,-63,-64,-65,-66,-67,62,-70,-71,-52,-345,-345,-345,62,-345,-29,-107,-345,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,62,-91,62,-345,-134,62,-90,-72,62,-53,-93,-9,-10,-345,-94,-95,-97,-185,-96,-343,62,62,62,-76,62,-92,62,-30,62,62,-186,62,62,62,-159,-344,-162,-163,62,62,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,62,-77,-73,-69,62,62,62,62,-35,-36,62,62,62,62,-160,-130,62,-177,-178,-223,-222,-227,-229,-242,-87,-84,62,-238,-239,-241,-31,-34,62,62,-161,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'UNION':([0,2,4,5,6,7,8,9,10,11,14,15,19,21,22,23,26,27,28,29,34,50,51,52,53,54,55,56,57,58,59,60,64,67,68,70,71,72,73,90,91,96,97,98,99,100,101,102,103,112,116,117,121,124,125,126,127,128,129,137,140,141,193,197,203,204,205,207,208,210,211,213,221,222,223,224,225,226,227,228,229,230,231,232,238,251,262,267,294,298,299,304,311,312,313,322,333,335,338,349,351,353,354,355,356,358,360,361,370,371,372,374,375,377,439,440,449,460,467,471,472,473,481,482,485,486,501,510,511,528,558,559,560,561,562,587,588,593,594,],[63,63,-60,-62,-63,-64,-65,-66,-67,63,-70,-71,-52,-345,-345,-345,63,-345,-29,-107,-345,-134,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-61,63,-91,63,-345,-134,63,-90,-72,63,-53,-93,-9,-10,-345,-94,-95,-97,-185,-96,-343,63,63,63,-76,63,-92,63,-30,63,63,-186,63,63,63,-159,-344,-162,-163,63,63,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,63,-77,-73,-69,63,63,63,63,-35,-36,63,63,63,63,-160,-130,63,-177,-178,-223,-222,-227,-229,-242,-87,-84,63,-238,-239,-241,-31,-34,63,63,-161,-224,-228,-226,-86,-84,-237,-240,-68,-32,-33,-225,-230,-87,-84,-232,-233,-231,-234,-236,-235,]),'LBRACE':([11,15,19,28,36,37,62,63,65,66,67,68,73,90,91,97,118,119,121,122,123,128,129,131,135,140,195,208,221,222,223,224,225,226,227,228,229,230,231,232,238,242,256,262,267,311,312,355,356,358,360,361,369,370,371,374,375,377,392,393,394,405,439,440,471,472,473,476,481,482,485,486,489,491,500,501,506,507,510,511,528,529,530,531,536,537,558,559,560,561,562,568,578,587,588,590,592,593,594,],[-345,-71,-52,-29,121,121,-157,-158,121,-7,-8,-91,-345,-90,-72,-53,121,121,-343,121,121,121,-92,121,121,-30,121,-344,121,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,121,121,-345,-73,-69,-35,-36,-223,-222,121,121,-242,121,-87,-74,-238,-239,-241,-11,121,-12,121,-31,-34,-224,121,-226,121,-86,-75,-237,-240,-345,-201,-345,-68,121,121,-32,-33,-225,121,121,121,121,-11,-230,-87,-74,-232,-233,-345,121,-231,-234,121,121,-236,-235,]),'RBRACE':([15,90,91,121,124,128,139,143,144,145,146,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,200,201,202,203,204,205,207,208,210,211,219,220,221,222,223,224,225,226,227,228,229,230,231,232,249,250,255,256,262,263,267,291,292,293,295,296,297,300,301,302,303,328,329,331,333,335,338,355,356,358,360,361,370,371,374,375,377,390,391,392,406,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,429,431,432,433,434,463,464,467,471,472,473,475,481,482,485,486,487,488,489,490,499,501,503,504,507,508,528,535,541,542,558,559,560,561,562,566,567,568,569,582,587,588,593,594,],[-71,-90,-72,-343,208,-345,-333,-311,-260,-261,-263,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,208,-174,-179,208,208,208,-159,-344,-162,-163,208,-5,-6,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,-247,-282,-196,-345,-73,-334,-69,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,208,208,-175,208,208,-160,-223,-222,-227,-229,-242,-87,-84,-238,-239,-241,208,-22,-21,-313,-264,-265,-266,-267,-268,-269,-270,-271,-272,-273,-274,-275,-276,-277,-278,-279,-280,-281,-300,-301,-302,-303,-304,-176,-180,-161,-224,-228,-226,-245,-86,-84,-237,-240,-248,-197,208,-199,-283,-68,-298,-299,-289,-290,-225,-198,208,-262,-230,-87,-84,-232,-233,-200,-307,208,-314,-308,-231,-234,-236,-235,]),'CASE':([15,90,91,121,128,208,221,222,223,224,225,226,227,228,229,230,231,232,242,262,267,355,356,358,360,361,369,370,371,374,375,377,471,472,473,481,482,485,486,501,528,529,530,531,558,559,560,561,562,578,587,588,590,592,593,594,],[-71,-90,-72,-343,234,-344,234,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,234,-73,-69,-223,-222,234,234,-242,234,-87,-74,-238,-239,-241,-224,234,-226,-86,-75,-237,-240,-68,-225,234,234,234,-230,-87,-74,-232,-233,234,-231,-234,234,234,-236,-235,]),'DEFAULT':([15,90,91,121,128,208,221,222,223,224,225,226,227,228,229,230,231,232,242,262,267,355,356,358,360,361,369,370,371,374,375,377,471,472,473,481,482,485,486,501,528,529,530,531,558,559,560,561,562,578,587,588,590,592,593,594,],[-71,-90,-72,-343,235,-344,235,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,235,-73,-69,-223,-222,235,235,-242,235,-87,-74,-238,-239,-241,-224,235,-226,-86,-75,-237,-240,-68,-225,235,235,235,-230,-87,-74,-232,-233,235,-231,-234,235,235,-236,-235,]),'IF':([15,90,91,121,128,208,221,222,223,224,225,226,227,228,229,230,231,232,242,262,267,355,356,358,360,361,369,370,371,374,375,377,471,472,473,481,482,485,486,501,528,529,530,531,558,559,560,561,562,578,587,588,590,592,593,594,],[-71,-90,-72,-343,237,-344,237,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,237,-73,-69,-223,-222,237,237,-242,237,-87,-74,-238,-239,-241,-224,237,-226,-86,-75,-237,-240,-68,-225,237,237,237,-230,-87,-74,-232,-233,237,-231,-234,237,237,-236,-235,]),'SWITCH':([15,90,91,121,128,208,221,222,223,224,225,226,227,228,229,230,231,232,242,262,267,355,356,358,360,361,369,370,371,374,375,377,471,472,473,481,482,485,486,501,528,529,530,531,558,559,560,561,562,578,587,588,590,592,593,594,],[-71,-90,-72,-343,240,-344,240,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,240,-73,-69,-223,-222,240,240,-242,240,-87,-74,-238,-239,-241,-224,240,-226,-86,-75,-237,-240,-68,-225,240,240,240,-230,-87,-74,-232,-233,240,-231,-234,240,240,-236,-235,]),'WHILE':([15,90,91,121,128,208,221,222,223,224,225,226,227,228,229,230,231,232,242,262,267,355,356,358,360,361,368,369,370,371,374,375,377,471,472,473,481,482,485,486,501,528,529,530,531,558,559,560,561,562,578,587,588,590,592,593,594,],[-71,-90,-72,-343,241,-344,241,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,241,-73,-69,-223,-222,241,241,-242,480,241,-87,-74,-238,-239,-241,-224,241,-226,-86,-75,-237,-240,-68,-225,241,241,241,-230,-87,-74,-232,-233,241,-231,-234,241,241,-236,-235,]),'DO':([15,90,91,121,128,208,221,222,223,224,225,226,227,228,229,230,231,232,242,262,267,355,356,358,360,361,369,370,371,374,375,377,471,472,473,481,482,485,486,501,528,529,530,531,558,559,560,561,562,578,587,588,590,592,593,594,],[-71,-90,-72,-343,242,-344,242,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,242,-73,-69,-223,-222,242,242,-242,242,-87,-74,-238,-239,-241,-224,242,-226,-86,-75,-237,-240,-68,-225,242,242,242,-230,-87,-74,-232,-233,242,-231,-234,242,242,-236,-235,]),'FOR':([15,90,91,121,128,208,221,222,223,224,225,226,227,228,229,230,231,232,242,262,267,355,356,358,360,361,369,370,371,374,375,377,471,472,473,481,482,485,486,501,528,529,530,531,558,559,560,561,562,578,587,588,590,592,593,594,],[-71,-90,-72,-343,243,-344,243,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,243,-73,-69,-223,-222,243,243,-242,243,-87,-74,-238,-239,-241,-224,243,-226,-86,-75,-237,-240,-68,-225,243,243,243,-230,-87,-74,-232,-233,243,-231,-234,243,243,-236,-235,]),'GOTO':([15,90,91,121,128,208,221,222,223,224,225,226,227,228,229,230,231,232,242,262,267,355,356,358,360,361,369,370,371,374,375,377,471,472,473,481,482,485,486,501,528,529,530,531,558,559,560,561,562,578,587,588,590,592,593,594,],[-71,-90,-72,-343,244,-344,244,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,244,-73,-69,-223,-222,244,244,-242,244,-87,-74,-238,-239,-241,-224,244,-226,-86,-75,-237,-240,-68,-225,244,244,244,-230,-87,-74,-232,-233,244,-231,-234,244,244,-236,-235,]),'BREAK':([15,90,91,121,128,208,221,222,223,224,225,226,227,228,229,230,231,232,242,262,267,355,356,358,360,361,369,370,371,374,375,377,471,472,473,481,482,485,486,501,528,529,530,531,558,559,560,561,562,578,587,588,590,592,593,594,],[-71,-90,-72,-343,245,-344,245,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,245,-73,-69,-223,-222,245,245,-242,245,-87,-74,-238,-239,-241,-224,245,-226,-86,-75,-237,-240,-68,-225,245,245,245,-230,-87,-74,-232,-233,245,-231,-234,245,245,-236,-235,]),'CONTINUE':([15,90,91,121,128,208,221,222,223,224,225,226,227,228,229,230,231,232,242,262,267,355,356,358,360,361,369,370,371,374,375,377,471,472,473,481,482,485,486,501,528,529,530,531,558,559,560,561,562,578,587,588,590,592,593,594,],[-71,-90,-72,-343,246,-344,246,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,246,-73,-69,-223,-222,246,246,-242,246,-87,-74,-238,-239,-241,-224,246,-226,-86,-75,-237,-240,-68,-225,246,246,246,-230,-87,-74,-232,-233,246,-231,-234,246,246,-236,-235,]),'RETURN':([15,90,91,121,128,208,221,222,223,224,225,226,227,228,229,230,231,232,242,262,267,355,356,358,360,361,369,370,371,374,375,377,471,472,473,481,482,485,486,501,528,529,530,531,558,559,560,561,562,578,587,588,590,592,593,594,],[-71,-90,-72,-343,247,-344,247,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,247,-73,-69,-223,-222,247,247,-242,247,-87,-74,-238,-239,-241,-224,247,-226,-86,-75,-237,-240,-68,-225,247,247,247,-230,-87,-74,-232,-233,247,-231,-234,247,247,-236,-235,]),'PLUSPLUS':([15,51,52,53,81,90,91,94,95,114,115,116,121,126,128,135,136,139,141,143,147,148,149,150,152,153,154,155,156,158,159,160,161,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,195,197,208,221,222,223,224,225,226,227,228,229,230,231,232,233,234,238,242,247,256,257,258,259,262,263,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,291,292,294,298,300,301,302,303,306,309,310,323,332,347,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,405,406,429,431,432,433,434,448,455,457,458,459,469,471,472,473,476,481,482,484,485,486,489,491,500,501,502,503,504,505,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,567,568,569,571,578,580,582,587,588,590,592,593,594,],[-71,-131,-132,-133,-134,-90,-72,153,-345,-27,-28,-185,-343,153,153,153,-345,-333,153,-311,-292,-293,-294,-291,291,153,153,153,153,-297,-320,-295,-296,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,153,-345,-28,153,-186,-344,153,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,-320,153,153,153,153,-345,153,-345,-28,-73,-334,-69,153,153,153,153,153,153,153,153,153,153,153,153,153,153,153,153,153,153,153,153,153,-305,-306,153,153,-339,-340,-341,-342,-292,153,153,-345,153,153,-223,-222,153,153,-242,153,153,153,153,153,-87,-74,153,-238,-239,-241,153,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,153,-12,153,-292,153,153,153,-313,-300,-301,-302,-303,-304,-345,153,-345,-28,153,153,-224,153,-226,153,-86,-75,153,-237,-240,-345,-201,-345,-68,153,-298,-299,153,153,-345,-28,153,153,-292,-225,153,153,153,153,153,153,-11,-292,153,153,-230,-87,-74,-232,-233,153,-307,-345,-314,153,153,153,-308,-231,-234,153,153,-236,-235,]),'MINUSMINUS':([15,51,52,53,81,90,91,94,95,114,115,116,121,126,128,135,136,139,141,143,147,148,149,150,152,153,154,155,156,158,159,160,161,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,195,197,208,221,222,223,224,225,226,227,228,229,230,231,232,233,234,238,242,247,256,257,258,259,262,263,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,291,292,294,298,300,301,302,303,306,309,310,323,332,347,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,405,406,429,431,432,433,434,448,455,457,458,459,469,471,472,473,476,481,482,484,485,486,489,491,500,501,502,503,504,505,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,567,568,569,571,578,580,582,587,588,590,592,593,594,],[-71,-131,-132,-133,-134,-90,-72,154,-345,-27,-28,-185,-343,154,154,154,-345,-333,154,-311,-292,-293,-294,-291,292,154,154,154,154,-297,-320,-295,-296,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,154,-345,-28,154,-186,-344,154,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,-320,154,154,154,154,-345,154,-345,-28,-73,-334,-69,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,154,-305,-306,154,154,-339,-340,-341,-342,-292,154,154,-345,154,154,-223,-222,154,154,-242,154,154,154,154,154,-87,-74,154,-238,-239,-241,154,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,154,-12,154,-292,154,154,154,-313,-300,-301,-302,-303,-304,-345,154,-345,-28,154,154,-224,154,-226,154,-86,-75,154,-237,-240,-345,-201,-345,-68,154,-298,-299,154,154,-345,-28,154,154,-292,-225,154,154,154,154,154,154,-11,-292,154,154,-230,-87,-74,-232,-233,154,-307,-345,-314,154,154,154,-308,-231,-234,154,154,-236,-235,]),'SIZEOF':([15,51,52,53,81,90,91,94,95,114,115,116,121,126,128,135,136,141,147,148,149,150,153,154,155,156,160,161,182,183,184,195,197,208,221,222,223,224,225,226,227,228,229,230,231,232,234,238,242,247,256,257,258,259,262,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,306,309,310,323,332,347,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,405,448,455,457,458,459,469,471,472,473,476,481,482,484,485,486,489,491,500,501,502,505,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,568,571,578,580,587,588,590,592,593,594,],[-71,-131,-132,-133,-134,-90,-72,156,-345,-27,-28,-185,-343,156,156,156,-345,156,-292,-293,-294,-291,156,156,156,156,-295,-296,156,-345,-28,156,-186,-344,156,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,156,156,156,156,-345,156,-345,-28,-73,-69,156,156,156,156,156,156,156,156,156,156,156,156,156,156,156,156,156,156,156,156,156,156,156,-292,156,156,-345,156,156,-223,-222,156,156,-242,156,156,156,156,156,-87,-74,156,-238,-239,-241,156,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,156,-12,156,-292,156,156,156,-345,156,-345,-28,156,156,-224,156,-226,156,-86,-75,156,-237,-240,-345,-201,-345,-68,156,156,156,-345,-28,156,156,-292,-225,156,156,156,156,156,156,-11,-292,156,156,-230,-87,-74,-232,-233,156,-345,156,156,156,-231,-234,156,156,-236,-235,]),'_ALIGNOF':([15,51,52,53,81,90,91,94,95,114,115,116,121,126,128,135,136,141,147,148,149,150,153,154,155,156,160,161,182,183,184,195,197,208,221,222,223,224,225,226,227,228,229,230,231,232,234,238,242,247,256,257,258,259,262,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,306,309,310,323,332,347,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,405,448,455,457,458,459,469,471,472,473,476,481,482,484,485,486,489,491,500,501,502,505,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,568,571,578,580,587,588,590,592,593,594,],[-71,-131,-132,-133,-134,-90,-72,157,-345,-27,-28,-185,-343,157,157,157,-345,157,-292,-293,-294,-291,157,157,157,157,-295,-296,157,-345,-28,157,-186,-344,157,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,157,157,157,157,-345,157,-345,-28,-73,-69,157,157,157,157,157,157,157,157,157,157,157,157,157,157,157,157,157,157,157,157,157,157,157,-292,157,157,-345,157,157,-223,-222,157,157,-242,157,157,157,157,157,-87,-74,157,-238,-239,-241,157,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,157,-12,157,-292,157,157,157,-345,157,-345,-28,157,157,-224,157,-226,157,-86,-75,157,-237,-240,-345,-201,-345,-68,157,157,157,-345,-28,157,157,-292,-225,157,157,157,157,157,157,-11,-292,157,157,-230,-87,-74,-232,-233,157,-345,157,157,157,-231,-234,157,157,-236,-235,]),'AND':([15,51,52,53,81,90,91,94,95,114,115,116,121,126,128,135,136,139,141,143,145,146,147,148,149,150,151,152,153,154,155,156,158,159,160,161,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,195,197,208,221,222,223,224,225,226,227,228,229,230,231,232,233,234,238,242,247,250,256,257,258,259,262,263,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,291,292,293,294,295,296,297,298,300,301,302,303,306,309,310,323,332,347,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,405,406,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,429,431,432,433,434,448,455,457,458,459,469,471,472,473,476,481,482,484,485,486,489,491,499,500,501,502,503,504,505,507,508,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,567,568,569,571,578,580,582,587,588,590,592,593,594,],[-71,-131,-132,-133,-134,-90,-72,150,-345,-27,-28,-185,-343,150,150,150,-345,-333,150,-311,282,-263,-292,-293,-294,-291,-282,-284,150,150,150,150,-297,-320,-295,-296,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,150,-345,-28,150,-186,-344,150,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,-320,150,150,150,150,-282,-345,150,-345,-28,-73,-334,-69,150,150,150,150,150,150,150,150,150,150,150,150,150,150,150,150,150,150,150,150,150,-305,-306,-285,150,-286,-287,-288,150,-339,-340,-341,-342,-292,150,150,-345,150,150,-223,-222,150,150,-242,150,150,150,150,150,-87,-74,150,-238,-239,-241,150,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,150,-12,150,-292,150,150,150,-313,-264,-265,-266,-267,-268,-269,-270,-271,-272,-273,-274,-275,-276,-277,282,282,282,282,-300,-301,-302,-303,-304,-345,150,-345,-28,150,150,-224,150,-226,150,-86,-75,150,-237,-240,-345,-201,-283,-345,-68,150,-298,-299,150,-289,-290,150,-345,-28,150,150,-292,-225,150,150,150,150,150,150,-11,-292,150,150,-230,-87,-74,-232,-233,150,-307,-345,-314,150,150,150,-308,-231,-234,150,150,-236,-235,]),'PLUS':([15,51,52,53,81,90,91,94,95,114,115,116,121,126,128,135,136,139,141,143,145,146,147,148,149,150,151,152,153,154,155,156,158,159,160,161,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,195,197,208,221,222,223,224,225,226,227,228,229,230,231,232,233,234,238,242,247,250,256,257,258,259,262,263,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,291,292,293,294,295,296,297,298,300,301,302,303,306,309,310,323,332,347,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,405,406,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,429,431,432,433,434,448,455,457,458,459,469,471,472,473,476,481,482,484,485,486,489,491,499,500,501,502,503,504,505,507,508,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,567,568,569,571,578,580,582,587,588,590,592,593,594,],[-71,-131,-132,-133,-134,-90,-72,148,-345,-27,-28,-185,-343,148,148,148,-345,-333,148,-311,272,-263,-292,-293,-294,-291,-282,-284,148,148,148,148,-297,-320,-295,-296,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,148,-345,-28,148,-186,-344,148,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,-320,148,148,148,148,-282,-345,148,-345,-28,-73,-334,-69,148,148,148,148,148,148,148,148,148,148,148,148,148,148,148,148,148,148,148,148,148,-305,-306,-285,148,-286,-287,-288,148,-339,-340,-341,-342,-292,148,148,-345,148,148,-223,-222,148,148,-242,148,148,148,148,148,-87,-74,148,-238,-239,-241,148,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,148,-12,148,-292,148,148,148,-313,-264,-265,-266,-267,-268,272,272,272,272,272,272,272,272,272,272,272,272,272,-300,-301,-302,-303,-304,-345,148,-345,-28,148,148,-224,148,-226,148,-86,-75,148,-237,-240,-345,-201,-283,-345,-68,148,-298,-299,148,-289,-290,148,-345,-28,148,148,-292,-225,148,148,148,148,148,148,-11,-292,148,148,-230,-87,-74,-232,-233,148,-307,-345,-314,148,148,148,-308,-231,-234,148,148,-236,-235,]),'MINUS':([15,51,52,53,81,90,91,94,95,114,115,116,121,126,128,135,136,139,141,143,145,146,147,148,149,150,151,152,153,154,155,156,158,159,160,161,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,195,197,208,221,222,223,224,225,226,227,228,229,230,231,232,233,234,238,242,247,250,256,257,258,259,262,263,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,291,292,293,294,295,296,297,298,300,301,302,303,306,309,310,323,332,347,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,405,406,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,429,431,432,433,434,448,455,457,458,459,469,471,472,473,476,481,482,484,485,486,489,491,499,500,501,502,503,504,505,507,508,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,567,568,569,571,578,580,582,587,588,590,592,593,594,],[-71,-131,-132,-133,-134,-90,-72,149,-345,-27,-28,-185,-343,149,149,149,-345,-333,149,-311,273,-263,-292,-293,-294,-291,-282,-284,149,149,149,149,-297,-320,-295,-296,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,149,-345,-28,149,-186,-344,149,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,-320,149,149,149,149,-282,-345,149,-345,-28,-73,-334,-69,149,149,149,149,149,149,149,149,149,149,149,149,149,149,149,149,149,149,149,149,149,-305,-306,-285,149,-286,-287,-288,149,-339,-340,-341,-342,-292,149,149,-345,149,149,-223,-222,149,149,-242,149,149,149,149,149,-87,-74,149,-238,-239,-241,149,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,149,-12,149,-292,149,149,149,-313,-264,-265,-266,-267,-268,273,273,273,273,273,273,273,273,273,273,273,273,273,-300,-301,-302,-303,-304,-345,149,-345,-28,149,149,-224,149,-226,149,-86,-75,149,-237,-240,-345,-201,-283,-345,-68,149,-298,-299,149,-289,-290,149,-345,-28,149,149,-292,-225,149,149,149,149,149,149,-11,-292,149,149,-230,-87,-74,-232,-233,149,-307,-345,-314,149,149,149,-308,-231,-234,149,149,-236,-235,]),'NOT':([15,51,52,53,81,90,91,94,95,114,115,116,121,126,128,135,136,141,147,148,149,150,153,154,155,156,160,161,182,183,184,195,197,208,221,222,223,224,225,226,227,228,229,230,231,232,234,238,242,247,256,257,258,259,262,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,306,309,310,323,332,347,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,405,448,455,457,458,459,469,471,472,473,476,481,482,484,485,486,489,491,500,501,502,505,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,568,571,578,580,587,588,590,592,593,594,],[-71,-131,-132,-133,-134,-90,-72,160,-345,-27,-28,-185,-343,160,160,160,-345,160,-292,-293,-294,-291,160,160,160,160,-295,-296,160,-345,-28,160,-186,-344,160,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,160,160,160,160,-345,160,-345,-28,-73,-69,160,160,160,160,160,160,160,160,160,160,160,160,160,160,160,160,160,160,160,160,160,160,160,-292,160,160,-345,160,160,-223,-222,160,160,-242,160,160,160,160,160,-87,-74,160,-238,-239,-241,160,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,160,-12,160,-292,160,160,160,-345,160,-345,-28,160,160,-224,160,-226,160,-86,-75,160,-237,-240,-345,-201,-345,-68,160,160,160,-345,-28,160,160,-292,-225,160,160,160,160,160,160,-11,-292,160,160,-230,-87,-74,-232,-233,160,-345,160,160,160,-231,-234,160,160,-236,-235,]),'LNOT':([15,51,52,53,81,90,91,94,95,114,115,116,121,126,128,135,136,141,147,148,149,150,153,154,155,156,160,161,182,183,184,195,197,208,221,222,223,224,225,226,227,228,229,230,231,232,234,238,242,247,256,257,258,259,262,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,306,309,310,323,332,347,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,405,448,455,457,458,459,469,471,472,473,476,481,482,484,485,486,489,491,500,501,502,505,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,568,571,578,580,587,588,590,592,593,594,],[-71,-131,-132,-133,-134,-90,-72,161,-345,-27,-28,-185,-343,161,161,161,-345,161,-292,-293,-294,-291,161,161,161,161,-295,-296,161,-345,-28,161,-186,-344,161,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,161,161,161,161,-345,161,-345,-28,-73,-69,161,161,161,161,161,161,161,161,161,161,161,161,161,161,161,161,161,161,161,161,161,161,161,-292,161,161,-345,161,161,-223,-222,161,161,-242,161,161,161,161,161,-87,-74,161,-238,-239,-241,161,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,161,-12,161,-292,161,161,161,-345,161,-345,-28,161,161,-224,161,-226,161,-86,-75,161,-237,-240,-345,-201,-345,-68,161,161,161,-345,-28,161,161,-292,-225,161,161,161,161,161,161,-11,-292,161,161,-230,-87,-74,-232,-233,161,-345,161,161,161,-231,-234,161,161,-236,-235,]),'OFFSETOF':([15,51,52,53,81,90,91,94,95,114,115,116,121,126,128,135,136,141,147,148,149,150,153,154,155,156,160,161,182,183,184,195,197,208,221,222,223,224,225,226,227,228,229,230,231,232,234,238,242,247,256,257,258,259,262,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,306,309,310,323,332,347,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,405,448,455,457,458,459,469,471,472,473,476,481,482,484,485,486,489,491,500,501,502,505,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,568,571,578,580,587,588,590,592,593,594,],[-71,-131,-132,-133,-134,-90,-72,165,-345,-27,-28,-185,-343,165,165,165,-345,165,-292,-293,-294,-291,165,165,165,165,-295,-296,165,-345,-28,165,-186,-344,165,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,165,165,165,165,-345,165,-345,-28,-73,-69,165,165,165,165,165,165,165,165,165,165,165,165,165,165,165,165,165,165,165,165,165,165,165,-292,165,165,-345,165,165,-223,-222,165,165,-242,165,165,165,165,165,-87,-74,165,-238,-239,-241,165,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,165,-12,165,-292,165,165,165,-345,165,-345,-28,165,165,-224,165,-226,165,-86,-75,165,-237,-240,-345,-201,-345,-68,165,165,165,-345,-28,165,165,-292,-225,165,165,165,165,165,165,-11,-292,165,165,-230,-87,-74,-232,-233,165,-345,165,165,165,-231,-234,165,165,-236,-235,]),'INT_CONST_DEC':([15,51,52,53,81,90,91,94,95,114,115,116,121,126,128,135,136,141,147,148,149,150,153,154,155,156,160,161,182,183,184,195,197,208,221,222,223,224,225,226,227,228,229,230,231,232,234,238,242,247,256,257,258,259,262,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,306,309,310,323,332,347,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,405,448,455,457,458,459,469,471,472,473,476,481,482,484,485,486,489,491,500,501,502,505,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,568,571,578,580,587,588,590,592,593,594,],[-71,-131,-132,-133,-134,-90,-72,166,-345,-27,-28,-185,-343,166,166,166,-345,166,-292,-293,-294,-291,166,166,166,166,-295,-296,166,-345,-28,166,-186,-344,166,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,166,166,166,166,-345,166,-345,-28,-73,-69,166,166,166,166,166,166,166,166,166,166,166,166,166,166,166,166,166,166,166,166,166,166,166,-292,166,166,-345,166,166,-223,-222,166,166,-242,166,166,166,166,166,-87,-74,166,-238,-239,-241,166,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,166,-12,166,-292,166,166,166,-345,166,-345,-28,166,166,-224,166,-226,166,-86,-75,166,-237,-240,-345,-201,-345,-68,166,166,166,-345,-28,166,166,-292,-225,166,166,166,166,166,166,-11,-292,166,166,-230,-87,-74,-232,-233,166,-345,166,166,166,-231,-234,166,166,-236,-235,]),'INT_CONST_OCT':([15,51,52,53,81,90,91,94,95,114,115,116,121,126,128,135,136,141,147,148,149,150,153,154,155,156,160,161,182,183,184,195,197,208,221,222,223,224,225,226,227,228,229,230,231,232,234,238,242,247,256,257,258,259,262,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,306,309,310,323,332,347,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,405,448,455,457,458,459,469,471,472,473,476,481,482,484,485,486,489,491,500,501,502,505,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,568,571,578,580,587,588,590,592,593,594,],[-71,-131,-132,-133,-134,-90,-72,167,-345,-27,-28,-185,-343,167,167,167,-345,167,-292,-293,-294,-291,167,167,167,167,-295,-296,167,-345,-28,167,-186,-344,167,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,167,167,167,167,-345,167,-345,-28,-73,-69,167,167,167,167,167,167,167,167,167,167,167,167,167,167,167,167,167,167,167,167,167,167,167,-292,167,167,-345,167,167,-223,-222,167,167,-242,167,167,167,167,167,-87,-74,167,-238,-239,-241,167,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,167,-12,167,-292,167,167,167,-345,167,-345,-28,167,167,-224,167,-226,167,-86,-75,167,-237,-240,-345,-201,-345,-68,167,167,167,-345,-28,167,167,-292,-225,167,167,167,167,167,167,-11,-292,167,167,-230,-87,-74,-232,-233,167,-345,167,167,167,-231,-234,167,167,-236,-235,]),'INT_CONST_HEX':([15,51,52,53,81,90,91,94,95,114,115,116,121,126,128,135,136,141,147,148,149,150,153,154,155,156,160,161,182,183,184,195,197,208,221,222,223,224,225,226,227,228,229,230,231,232,234,238,242,247,256,257,258,259,262,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,306,309,310,323,332,347,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,405,448,455,457,458,459,469,471,472,473,476,481,482,484,485,486,489,491,500,501,502,505,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,568,571,578,580,587,588,590,592,593,594,],[-71,-131,-132,-133,-134,-90,-72,168,-345,-27,-28,-185,-343,168,168,168,-345,168,-292,-293,-294,-291,168,168,168,168,-295,-296,168,-345,-28,168,-186,-344,168,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,168,168,168,168,-345,168,-345,-28,-73,-69,168,168,168,168,168,168,168,168,168,168,168,168,168,168,168,168,168,168,168,168,168,168,168,-292,168,168,-345,168,168,-223,-222,168,168,-242,168,168,168,168,168,-87,-74,168,-238,-239,-241,168,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,168,-12,168,-292,168,168,168,-345,168,-345,-28,168,168,-224,168,-226,168,-86,-75,168,-237,-240,-345,-201,-345,-68,168,168,168,-345,-28,168,168,-292,-225,168,168,168,168,168,168,-11,-292,168,168,-230,-87,-74,-232,-233,168,-345,168,168,168,-231,-234,168,168,-236,-235,]),'INT_CONST_BIN':([15,51,52,53,81,90,91,94,95,114,115,116,121,126,128,135,136,141,147,148,149,150,153,154,155,156,160,161,182,183,184,195,197,208,221,222,223,224,225,226,227,228,229,230,231,232,234,238,242,247,256,257,258,259,262,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,306,309,310,323,332,347,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,405,448,455,457,458,459,469,471,472,473,476,481,482,484,485,486,489,491,500,501,502,505,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,568,571,578,580,587,588,590,592,593,594,],[-71,-131,-132,-133,-134,-90,-72,169,-345,-27,-28,-185,-343,169,169,169,-345,169,-292,-293,-294,-291,169,169,169,169,-295,-296,169,-345,-28,169,-186,-344,169,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,169,169,169,169,-345,169,-345,-28,-73,-69,169,169,169,169,169,169,169,169,169,169,169,169,169,169,169,169,169,169,169,169,169,169,169,-292,169,169,-345,169,169,-223,-222,169,169,-242,169,169,169,169,169,-87,-74,169,-238,-239,-241,169,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,169,-12,169,-292,169,169,169,-345,169,-345,-28,169,169,-224,169,-226,169,-86,-75,169,-237,-240,-345,-201,-345,-68,169,169,169,-345,-28,169,169,-292,-225,169,169,169,169,169,169,-11,-292,169,169,-230,-87,-74,-232,-233,169,-345,169,169,169,-231,-234,169,169,-236,-235,]),'INT_CONST_CHAR':([15,51,52,53,81,90,91,94,95,114,115,116,121,126,128,135,136,141,147,148,149,150,153,154,155,156,160,161,182,183,184,195,197,208,221,222,223,224,225,226,227,228,229,230,231,232,234,238,242,247,256,257,258,259,262,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,306,309,310,323,332,347,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,405,448,455,457,458,459,469,471,472,473,476,481,482,484,485,486,489,491,500,501,502,505,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,568,571,578,580,587,588,590,592,593,594,],[-71,-131,-132,-133,-134,-90,-72,170,-345,-27,-28,-185,-343,170,170,170,-345,170,-292,-293,-294,-291,170,170,170,170,-295,-296,170,-345,-28,170,-186,-344,170,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,170,170,170,170,-345,170,-345,-28,-73,-69,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,-292,170,170,-345,170,170,-223,-222,170,170,-242,170,170,170,170,170,-87,-74,170,-238,-239,-241,170,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,170,-12,170,-292,170,170,170,-345,170,-345,-28,170,170,-224,170,-226,170,-86,-75,170,-237,-240,-345,-201,-345,-68,170,170,170,-345,-28,170,170,-292,-225,170,170,170,170,170,170,-11,-292,170,170,-230,-87,-74,-232,-233,170,-345,170,170,170,-231,-234,170,170,-236,-235,]),'FLOAT_CONST':([15,51,52,53,81,90,91,94,95,114,115,116,121,126,128,135,136,141,147,148,149,150,153,154,155,156,160,161,182,183,184,195,197,208,221,222,223,224,225,226,227,228,229,230,231,232,234,238,242,247,256,257,258,259,262,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,306,309,310,323,332,347,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,405,448,455,457,458,459,469,471,472,473,476,481,482,484,485,486,489,491,500,501,502,505,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,568,571,578,580,587,588,590,592,593,594,],[-71,-131,-132,-133,-134,-90,-72,171,-345,-27,-28,-185,-343,171,171,171,-345,171,-292,-293,-294,-291,171,171,171,171,-295,-296,171,-345,-28,171,-186,-344,171,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,171,171,171,171,-345,171,-345,-28,-73,-69,171,171,171,171,171,171,171,171,171,171,171,171,171,171,171,171,171,171,171,171,171,171,171,-292,171,171,-345,171,171,-223,-222,171,171,-242,171,171,171,171,171,-87,-74,171,-238,-239,-241,171,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,171,-12,171,-292,171,171,171,-345,171,-345,-28,171,171,-224,171,-226,171,-86,-75,171,-237,-240,-345,-201,-345,-68,171,171,171,-345,-28,171,171,-292,-225,171,171,171,171,171,171,-11,-292,171,171,-230,-87,-74,-232,-233,171,-345,171,171,171,-231,-234,171,171,-236,-235,]),'HEX_FLOAT_CONST':([15,51,52,53,81,90,91,94,95,114,115,116,121,126,128,135,136,141,147,148,149,150,153,154,155,156,160,161,182,183,184,195,197,208,221,222,223,224,225,226,227,228,229,230,231,232,234,238,242,247,256,257,258,259,262,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,306,309,310,323,332,347,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,405,448,455,457,458,459,469,471,472,473,476,481,482,484,485,486,489,491,500,501,502,505,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,568,571,578,580,587,588,590,592,593,594,],[-71,-131,-132,-133,-134,-90,-72,172,-345,-27,-28,-185,-343,172,172,172,-345,172,-292,-293,-294,-291,172,172,172,172,-295,-296,172,-345,-28,172,-186,-344,172,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,172,172,172,172,-345,172,-345,-28,-73,-69,172,172,172,172,172,172,172,172,172,172,172,172,172,172,172,172,172,172,172,172,172,172,172,-292,172,172,-345,172,172,-223,-222,172,172,-242,172,172,172,172,172,-87,-74,172,-238,-239,-241,172,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,172,-12,172,-292,172,172,172,-345,172,-345,-28,172,172,-224,172,-226,172,-86,-75,172,-237,-240,-345,-201,-345,-68,172,172,172,-345,-28,172,172,-292,-225,172,172,172,172,172,172,-11,-292,172,172,-230,-87,-74,-232,-233,172,-345,172,172,172,-231,-234,172,172,-236,-235,]),'CHAR_CONST':([15,51,52,53,81,90,91,94,95,114,115,116,121,126,128,135,136,141,147,148,149,150,153,154,155,156,160,161,182,183,184,195,197,208,221,222,223,224,225,226,227,228,229,230,231,232,234,238,242,247,256,257,258,259,262,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,306,309,310,323,332,347,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,405,448,455,457,458,459,469,471,472,473,476,481,482,484,485,486,489,491,500,501,502,505,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,568,571,578,580,587,588,590,592,593,594,],[-71,-131,-132,-133,-134,-90,-72,173,-345,-27,-28,-185,-343,173,173,173,-345,173,-292,-293,-294,-291,173,173,173,173,-295,-296,173,-345,-28,173,-186,-344,173,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,173,173,173,173,-345,173,-345,-28,-73,-69,173,173,173,173,173,173,173,173,173,173,173,173,173,173,173,173,173,173,173,173,173,173,173,-292,173,173,-345,173,173,-223,-222,173,173,-242,173,173,173,173,173,-87,-74,173,-238,-239,-241,173,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,173,-12,173,-292,173,173,173,-345,173,-345,-28,173,173,-224,173,-226,173,-86,-75,173,-237,-240,-345,-201,-345,-68,173,173,173,-345,-28,173,173,-292,-225,173,173,173,173,173,173,-11,-292,173,173,-230,-87,-74,-232,-233,173,-345,173,173,173,-231,-234,173,173,-236,-235,]),'WCHAR_CONST':([15,51,52,53,81,90,91,94,95,114,115,116,121,126,128,135,136,141,147,148,149,150,153,154,155,156,160,161,182,183,184,195,197,208,221,222,223,224,225,226,227,228,229,230,231,232,234,238,242,247,256,257,258,259,262,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,306,309,310,323,332,347,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,405,448,455,457,458,459,469,471,472,473,476,481,482,484,485,486,489,491,500,501,502,505,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,568,571,578,580,587,588,590,592,593,594,],[-71,-131,-132,-133,-134,-90,-72,174,-345,-27,-28,-185,-343,174,174,174,-345,174,-292,-293,-294,-291,174,174,174,174,-295,-296,174,-345,-28,174,-186,-344,174,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,174,174,174,174,-345,174,-345,-28,-73,-69,174,174,174,174,174,174,174,174,174,174,174,174,174,174,174,174,174,174,174,174,174,174,174,-292,174,174,-345,174,174,-223,-222,174,174,-242,174,174,174,174,174,-87,-74,174,-238,-239,-241,174,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,174,-12,174,-292,174,174,174,-345,174,-345,-28,174,174,-224,174,-226,174,-86,-75,174,-237,-240,-345,-201,-345,-68,174,174,174,-345,-28,174,174,-292,-225,174,174,174,174,174,174,-11,-292,174,174,-230,-87,-74,-232,-233,174,-345,174,174,174,-231,-234,174,174,-236,-235,]),'U8CHAR_CONST':([15,51,52,53,81,90,91,94,95,114,115,116,121,126,128,135,136,141,147,148,149,150,153,154,155,156,160,161,182,183,184,195,197,208,221,222,223,224,225,226,227,228,229,230,231,232,234,238,242,247,256,257,258,259,262,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,306,309,310,323,332,347,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,405,448,455,457,458,459,469,471,472,473,476,481,482,484,485,486,489,491,500,501,502,505,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,568,571,578,580,587,588,590,592,593,594,],[-71,-131,-132,-133,-134,-90,-72,175,-345,-27,-28,-185,-343,175,175,175,-345,175,-292,-293,-294,-291,175,175,175,175,-295,-296,175,-345,-28,175,-186,-344,175,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,175,175,175,175,-345,175,-345,-28,-73,-69,175,175,175,175,175,175,175,175,175,175,175,175,175,175,175,175,175,175,175,175,175,175,175,-292,175,175,-345,175,175,-223,-222,175,175,-242,175,175,175,175,175,-87,-74,175,-238,-239,-241,175,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,175,-12,175,-292,175,175,175,-345,175,-345,-28,175,175,-224,175,-226,175,-86,-75,175,-237,-240,-345,-201,-345,-68,175,175,175,-345,-28,175,175,-292,-225,175,175,175,175,175,175,-11,-292,175,175,-230,-87,-74,-232,-233,175,-345,175,175,175,-231,-234,175,175,-236,-235,]),'U16CHAR_CONST':([15,51,52,53,81,90,91,94,95,114,115,116,121,126,128,135,136,141,147,148,149,150,153,154,155,156,160,161,182,183,184,195,197,208,221,222,223,224,225,226,227,228,229,230,231,232,234,238,242,247,256,257,258,259,262,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,306,309,310,323,332,347,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,405,448,455,457,458,459,469,471,472,473,476,481,482,484,485,486,489,491,500,501,502,505,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,568,571,578,580,587,588,590,592,593,594,],[-71,-131,-132,-133,-134,-90,-72,176,-345,-27,-28,-185,-343,176,176,176,-345,176,-292,-293,-294,-291,176,176,176,176,-295,-296,176,-345,-28,176,-186,-344,176,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,176,176,176,176,-345,176,-345,-28,-73,-69,176,176,176,176,176,176,176,176,176,176,176,176,176,176,176,176,176,176,176,176,176,176,176,-292,176,176,-345,176,176,-223,-222,176,176,-242,176,176,176,176,176,-87,-74,176,-238,-239,-241,176,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,176,-12,176,-292,176,176,176,-345,176,-345,-28,176,176,-224,176,-226,176,-86,-75,176,-237,-240,-345,-201,-345,-68,176,176,176,-345,-28,176,176,-292,-225,176,176,176,176,176,176,-11,-292,176,176,-230,-87,-74,-232,-233,176,-345,176,176,176,-231,-234,176,176,-236,-235,]),'U32CHAR_CONST':([15,51,52,53,81,90,91,94,95,114,115,116,121,126,128,135,136,141,147,148,149,150,153,154,155,156,160,161,182,183,184,195,197,208,221,222,223,224,225,226,227,228,229,230,231,232,234,238,242,247,256,257,258,259,262,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,306,309,310,323,332,347,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,405,448,455,457,458,459,469,471,472,473,476,481,482,484,485,486,489,491,500,501,502,505,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,568,571,578,580,587,588,590,592,593,594,],[-71,-131,-132,-133,-134,-90,-72,177,-345,-27,-28,-185,-343,177,177,177,-345,177,-292,-293,-294,-291,177,177,177,177,-295,-296,177,-345,-28,177,-186,-344,177,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,177,177,177,177,-345,177,-345,-28,-73,-69,177,177,177,177,177,177,177,177,177,177,177,177,177,177,177,177,177,177,177,177,177,177,177,-292,177,177,-345,177,177,-223,-222,177,177,-242,177,177,177,177,177,-87,-74,177,-238,-239,-241,177,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,177,-12,177,-292,177,177,177,-345,177,-345,-28,177,177,-224,177,-226,177,-86,-75,177,-237,-240,-345,-201,-345,-68,177,177,177,-345,-28,177,177,-292,-225,177,177,177,177,177,177,-11,-292,177,177,-230,-87,-74,-232,-233,177,-345,177,177,177,-231,-234,177,177,-236,-235,]),'STRING_LITERAL':([15,51,52,53,81,90,91,92,94,95,114,115,116,121,126,128,135,136,138,139,141,143,147,148,149,150,153,154,155,156,160,161,182,183,184,195,197,208,221,222,223,224,225,226,227,228,229,230,231,232,234,238,242,247,256,257,258,259,262,263,266,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,306,309,310,323,332,347,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,405,407,448,455,457,458,459,469,471,472,473,476,481,482,484,485,486,489,491,500,501,502,505,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,568,571,578,580,587,588,590,592,593,594,],[-71,-131,-132,-133,-134,-90,-72,139,139,-345,-27,-28,-185,-343,139,139,139,-345,263,-333,139,263,-292,-293,-294,-291,139,139,139,139,-295,-296,139,-345,-28,139,-186,-344,139,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,139,139,139,139,-345,139,-345,-28,-73,-334,139,-69,139,139,139,139,139,139,139,139,139,139,139,139,139,139,139,139,139,139,139,139,139,139,139,-292,139,139,-345,139,139,-223,-222,139,139,-242,139,139,139,139,139,-87,-74,139,-238,-239,-241,139,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,139,-12,139,-292,139,139,139,263,-345,139,-345,-28,139,139,-224,139,-226,139,-86,-75,139,-237,-240,-345,-201,-345,-68,139,139,139,-345,-28,139,139,-292,-225,139,139,139,139,139,139,-11,-292,139,139,-230,-87,-74,-232,-233,139,-345,139,139,139,-231,-234,139,139,-236,-235,]),'WSTRING_LITERAL':([15,51,52,53,81,90,91,94,95,114,115,116,121,126,128,135,136,141,147,148,149,150,153,154,155,156,160,161,164,178,179,180,181,182,183,184,195,197,208,221,222,223,224,225,226,227,228,229,230,231,232,234,238,242,247,256,257,258,259,262,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,300,301,302,303,306,309,310,323,332,347,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,405,448,455,457,458,459,469,471,472,473,476,481,482,484,485,486,489,491,500,501,502,505,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,568,571,578,580,587,588,590,592,593,594,],[-71,-131,-132,-133,-134,-90,-72,178,-345,-27,-28,-185,-343,178,178,178,-345,178,-292,-293,-294,-291,178,178,178,178,-295,-296,300,-335,-336,-337,-338,178,-345,-28,178,-186,-344,178,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,178,178,178,178,-345,178,-345,-28,-73,-69,178,178,178,178,178,178,178,178,178,178,178,178,178,178,178,178,178,178,178,178,178,178,178,-339,-340,-341,-342,-292,178,178,-345,178,178,-223,-222,178,178,-242,178,178,178,178,178,-87,-74,178,-238,-239,-241,178,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,178,-12,178,-292,178,178,178,-345,178,-345,-28,178,178,-224,178,-226,178,-86,-75,178,-237,-240,-345,-201,-345,-68,178,178,178,-345,-28,178,178,-292,-225,178,178,178,178,178,178,-11,-292,178,178,-230,-87,-74,-232,-233,178,-345,178,178,178,-231,-234,178,178,-236,-235,]),'U8STRING_LITERAL':([15,51,52,53,81,90,91,94,95,114,115,116,121,126,128,135,136,141,147,148,149,150,153,154,155,156,160,161,164,178,179,180,181,182,183,184,195,197,208,221,222,223,224,225,226,227,228,229,230,231,232,234,238,242,247,256,257,258,259,262,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,300,301,302,303,306,309,310,323,332,347,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,405,448,455,457,458,459,469,471,472,473,476,481,482,484,485,486,489,491,500,501,502,505,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,568,571,578,580,587,588,590,592,593,594,],[-71,-131,-132,-133,-134,-90,-72,179,-345,-27,-28,-185,-343,179,179,179,-345,179,-292,-293,-294,-291,179,179,179,179,-295,-296,301,-335,-336,-337,-338,179,-345,-28,179,-186,-344,179,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,179,179,179,179,-345,179,-345,-28,-73,-69,179,179,179,179,179,179,179,179,179,179,179,179,179,179,179,179,179,179,179,179,179,179,179,-339,-340,-341,-342,-292,179,179,-345,179,179,-223,-222,179,179,-242,179,179,179,179,179,-87,-74,179,-238,-239,-241,179,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,179,-12,179,-292,179,179,179,-345,179,-345,-28,179,179,-224,179,-226,179,-86,-75,179,-237,-240,-345,-201,-345,-68,179,179,179,-345,-28,179,179,-292,-225,179,179,179,179,179,179,-11,-292,179,179,-230,-87,-74,-232,-233,179,-345,179,179,179,-231,-234,179,179,-236,-235,]),'U16STRING_LITERAL':([15,51,52,53,81,90,91,94,95,114,115,116,121,126,128,135,136,141,147,148,149,150,153,154,155,156,160,161,164,178,179,180,181,182,183,184,195,197,208,221,222,223,224,225,226,227,228,229,230,231,232,234,238,242,247,256,257,258,259,262,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,300,301,302,303,306,309,310,323,332,347,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,405,448,455,457,458,459,469,471,472,473,476,481,482,484,485,486,489,491,500,501,502,505,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,568,571,578,580,587,588,590,592,593,594,],[-71,-131,-132,-133,-134,-90,-72,180,-345,-27,-28,-185,-343,180,180,180,-345,180,-292,-293,-294,-291,180,180,180,180,-295,-296,302,-335,-336,-337,-338,180,-345,-28,180,-186,-344,180,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,180,180,180,180,-345,180,-345,-28,-73,-69,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,180,-339,-340,-341,-342,-292,180,180,-345,180,180,-223,-222,180,180,-242,180,180,180,180,180,-87,-74,180,-238,-239,-241,180,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,180,-12,180,-292,180,180,180,-345,180,-345,-28,180,180,-224,180,-226,180,-86,-75,180,-237,-240,-345,-201,-345,-68,180,180,180,-345,-28,180,180,-292,-225,180,180,180,180,180,180,-11,-292,180,180,-230,-87,-74,-232,-233,180,-345,180,180,180,-231,-234,180,180,-236,-235,]),'U32STRING_LITERAL':([15,51,52,53,81,90,91,94,95,114,115,116,121,126,128,135,136,141,147,148,149,150,153,154,155,156,160,161,164,178,179,180,181,182,183,184,195,197,208,221,222,223,224,225,226,227,228,229,230,231,232,234,238,242,247,256,257,258,259,262,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,300,301,302,303,306,309,310,323,332,347,355,356,358,360,361,362,365,366,367,369,370,371,372,374,375,377,378,379,380,381,382,383,384,385,386,387,388,389,392,393,394,397,400,401,402,405,448,455,457,458,459,469,471,472,473,476,481,482,484,485,486,489,491,500,501,502,505,512,513,514,521,522,524,528,529,530,531,532,533,536,537,547,548,549,558,559,560,561,562,565,568,571,578,580,587,588,590,592,593,594,],[-71,-131,-132,-133,-134,-90,-72,181,-345,-27,-28,-185,-343,181,181,181,-345,181,-292,-293,-294,-291,181,181,181,181,-295,-296,303,-335,-336,-337,-338,181,-345,-28,181,-186,-344,181,-221,-219,-220,-78,-79,-80,-81,-82,-83,-84,-85,181,181,181,181,-345,181,-345,-28,-73,-69,181,181,181,181,181,181,181,181,181,181,181,181,181,181,181,181,181,181,181,181,181,181,181,-339,-340,-341,-342,-292,181,181,-345,181,181,-223,-222,181,181,-242,181,181,181,181,181,-87,-74,181,-238,-239,-241,181,-249,-250,-251,-252,-253,-254,-255,-256,-257,-258,-259,-11,181,-12,181,-292,181,181,181,-345,181,-345,-28,181,181,-224,181,-226,181,-86,-75,181,-237,-240,-345,-201,-345,-68,181,181,181,-345,-28,181,181,-292,-225,181,181,181,181,181,181,-11,-292,181,181,-230,-87,-74,-232,-233,181,-345,181,181,181,-231,-234,181,181,-236,-235,]),'ELSE':([15,91,208,225,226,227,228,229,230,232,262,267,355,358,360,361,370,371,374,375,377,471,472,473,481,482,485,486,501,528,558,559,560,561,562,587,588,593,594,],[-71,-72,-344,-78,-79,-80,-81,-82,-83,-85,-73,-69,-223,-227,-229,-242,-87,-84,-238,-239,-241,-224,-228,-226,-86,-84,-237,-240,-68,-225,-230,578,-84,-232,-233,-231,-234,-236,-235,]),'PPPRAGMASTR':([15,],[91,]),'EQUALS':([19,28,73,86,87,88,89,97,111,130,132,139,140,143,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,202,208,233,250,252,263,291,292,293,295,296,297,300,301,302,303,311,312,395,396,403,404,406,429,431,432,433,434,439,440,492,494,495,496,499,503,504,507,508,510,511,538,539,540,567,569,582,],[-52,-29,-181,135,-182,-54,-37,-53,195,-181,-55,-333,-30,-311,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,332,-344,-320,379,-38,-334,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-35,-36,491,-202,-43,-44,-313,-300,-301,-302,-303,-304,-31,-34,-203,-205,-39,-42,-283,-298,-299,-289,-290,-32,-33,-204,-40,-41,-307,-314,-308,]),'COMMA':([19,24,25,28,29,30,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,51,52,53,54,55,56,57,58,59,60,73,74,75,76,77,78,81,84,85,86,87,88,89,97,104,106,108,110,111,113,114,115,116,118,119,122,123,130,132,139,140,142,143,144,145,146,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,187,189,190,191,192,196,197,200,201,202,206,208,212,214,216,233,239,248,249,250,252,253,254,255,263,265,291,292,293,295,296,297,300,301,302,303,311,312,315,316,317,318,319,320,321,324,325,326,327,328,329,330,331,334,336,337,340,341,342,344,345,346,348,349,350,352,353,354,376,391,403,404,406,408,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,427,428,429,430,431,432,433,434,438,439,440,444,445,446,447,461,462,463,464,465,466,470,474,475,477,478,479,487,488,490,495,496,499,503,504,507,508,510,511,517,518,520,526,527,535,539,540,541,542,543,550,551,552,555,556,557,563,566,567,569,572,573,576,577,582,584,585,586,],[-52,-128,-102,-29,-107,-345,-125,-126,-127,-129,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-181,-98,-99,-100,-101,-104,-134,134,-135,-137,-182,-54,-37,-53,-103,-129,194,-139,-141,-183,-27,-28,-185,-169,-170,-149,-150,-181,-55,-333,-30,266,-311,-260,-261,-263,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,313,314,-189,-194,-345,-184,-186,331,-174,-179,-152,-344,-145,-147,-345,-320,365,-243,-247,-282,-38,-136,-138,-196,-334,365,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-35,-36,-191,-192,-193,-207,-56,-1,-2,-45,-209,-140,-142,331,331,-171,-175,-154,-156,-151,-143,-144,-148,468,-164,-166,-146,-130,-206,-207,-177,-178,365,489,-43,-44,-313,365,-264,-265,-266,-267,-268,-269,-270,-271,-272,-273,-274,-275,-276,-277,-278,-279,-280,-281,365,505,-300,-318,-301,-302,-303,-304,509,-31,-34,-190,-195,-57,-208,-172,-173,-176,-180,-153,-155,-168,365,-245,-244,365,365,-248,-197,-199,-39,-42,-283,-298,-299,-289,-290,-32,-33,-210,-216,-214,-165,-167,-198,-40,-41,568,-262,-319,-50,-51,-212,-211,-213,-215,365,-200,-307,-314,-46,-49,-217,-218,-308,365,-47,-48,]),'RPAREN':([19,24,25,28,29,30,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,51,52,53,54,55,56,57,58,59,60,74,75,76,77,78,81,88,89,93,96,97,104,106,113,114,115,116,118,119,122,123,132,133,137,138,139,140,142,143,144,145,146,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,185,186,187,188,189,190,191,192,196,197,206,208,212,214,215,216,217,218,239,248,249,250,252,260,261,263,264,265,288,291,292,293,295,296,297,300,301,302,303,311,312,315,316,317,318,319,320,321,322,324,325,330,334,336,337,340,341,342,348,349,350,351,352,353,354,355,357,363,364,403,404,406,407,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,428,429,430,431,432,433,434,435,436,437,439,440,443,444,445,446,447,449,450,451,452,453,454,460,461,462,465,466,474,475,477,478,479,487,495,496,499,503,504,507,508,510,511,515,516,517,518,520,525,539,540,542,543,544,545,550,551,552,555,556,557,563,565,567,569,572,573,576,577,580,581,582,583,585,586,589,591,],[-52,-128,-102,-29,-107,-345,-125,-126,-127,-129,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-98,-99,-100,-101,-104,-134,-54,-37,140,-345,-53,-103,-129,-183,-27,-28,-185,-169,-170,-149,-150,-55,252,-345,262,-333,-30,267,-311,-260,-261,-263,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,311,312,-187,-17,-18,-189,-194,-345,-184,-186,-152,-344,-145,-147,349,-345,353,354,-14,-243,-247,-282,-38,403,404,-334,405,406,429,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-35,-36,-191,-192,-193,-207,-56,-1,-2,-345,-45,-209,-171,-154,-156,-151,-143,-144,-148,-146,-130,-206,-345,-207,-177,-178,-223,-13,475,476,-43,-44,-313,501,-264,-265,-266,-267,-268,-269,-270,-271,-272,-273,-274,-275,-276,-277,-278,-279,-280,-281,504,-300,-318,-301,-302,-303,-304,506,507,508,-31,-34,-188,-190,-195,-57,-208,-345,517,518,-207,-23,-24,-345,-172,-173,-153,-155,529,-245,-244,530,531,-248,-39,-42,-283,-298,-299,-289,-290,-32,-33,550,551,-210,-216,-214,557,-40,-41,-262,-319,569,-315,-50,-51,-212,-211,-213,-215,579,-345,-307,-314,-46,-49,-217,-218,-345,590,-308,-316,-47,-48,592,-317,]),'COLON':([19,24,28,31,32,33,35,38,39,40,41,42,43,44,45,46,47,48,49,51,52,53,81,87,88,89,97,106,118,119,122,123,130,132,139,140,143,144,145,146,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,206,208,209,212,214,233,235,248,249,250,252,263,291,292,293,295,296,297,300,301,302,303,311,312,330,334,336,337,340,341,342,346,348,349,353,354,359,403,404,406,408,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,429,431,432,433,434,439,440,461,462,465,466,468,475,477,487,495,496,499,503,504,507,508,510,511,539,540,542,567,569,582,],[-52,-128,-29,-125,-126,-127,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-131,-132,-133,-134,-182,-54,-37,-53,-129,-169,-170,-149,-150,-181,-55,-333,-30,-311,-260,-261,-263,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-152,-344,347,-145,-147,358,360,-243,-247,-282,-38,-334,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-35,-36,-171,-154,-156,-151,-143,-144,-148,469,-146,-130,-177,-178,472,-43,-44,-313,502,-264,-265,-266,-267,-268,-269,-270,-271,-272,-273,-274,-275,-276,-277,-278,-279,-280,-281,-300,-301,-302,-303,-304,-31,-34,-172,-173,-153,-155,347,-245,-244,-248,-39,-42,-283,-298,-299,-289,-290,-32,-33,-40,-41,-262,-307,-314,-308,]),'LBRACKET':([19,24,25,28,29,30,31,32,33,34,35,38,39,40,41,42,43,44,45,46,47,48,49,51,52,53,54,55,56,57,58,59,60,74,75,76,77,78,81,88,89,97,104,106,113,114,115,116,118,119,121,122,123,132,139,140,143,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,192,196,197,206,208,212,214,216,233,252,256,263,291,292,300,301,302,303,311,312,318,319,322,324,325,330,334,336,337,340,341,342,348,349,351,352,353,354,395,396,403,404,406,429,431,432,433,434,439,440,446,447,452,461,462,465,466,489,492,494,495,496,500,503,504,510,511,517,518,520,538,539,540,544,545,550,551,552,555,556,557,567,568,569,572,573,576,577,582,583,585,586,591,],[95,-128,-102,-29,-107,-345,-125,-126,-127,-129,-246,-113,-114,-115,-116,-117,-118,-119,-120,-121,-122,-123,-124,-131,-132,-133,-105,-106,-108,-109,-110,-111,-112,-98,-99,-100,-101,-104,-134,136,-37,95,-103,-129,-183,-27,-28,-185,-169,-170,-343,-149,-150,136,-333,-30,-311,287,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,323,-184,-186,-152,-344,-145,-147,323,-320,-38,397,-334,-305,-306,-339,-340,-341,-342,-35,-36,323,448,323,-45,459,-171,-154,-156,-151,-143,-144,-148,-146,-130,323,323,-177,-178,397,-202,-43,-44,-313,-300,-301,-302,-303,-304,-31,-34,448,459,323,-172,-173,-153,-155,397,-203,-205,-39,-42,397,-298,-299,-32,-33,-210,-216,-214,-204,-40,-41,571,-315,-50,-51,-212,-211,-213,-215,-307,397,-314,-46,-49,-217,-218,-308,-316,-47,-48,-317,]),'RBRACKET':([51,52,53,81,95,114,116,136,139,143,144,145,146,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,184,197,208,248,249,250,257,259,263,291,292,293,295,296,297,300,301,302,303,305,306,307,308,323,399,400,406,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,427,429,431,432,433,434,441,442,448,455,456,458,459,475,477,487,493,497,498,499,503,504,507,508,512,514,519,523,524,542,546,547,553,554,567,569,574,575,582,584,],[-131,-132,-133,-134,-345,-27,-185,-345,-333,-311,-260,-261,-263,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-345,-28,-186,-344,-243,-247,-282,-345,-28,-334,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,439,440,-3,-4,-345,495,496,-313,-264,-265,-266,-267,-268,-269,-270,-271,-272,-273,-274,-275,-276,-277,-278,-279,-280,-281,503,-300,-301,-302,-303,-304,510,511,-345,-345,520,-28,-345,-245,-244,-248,538,539,540,-283,-298,-299,-289,-290,-345,-28,552,555,556,-262,572,573,576,577,-307,-314,585,586,-308,591,]),'PERIOD':([121,139,143,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,208,233,256,263,291,292,300,301,302,303,395,396,406,429,431,432,433,434,489,492,494,500,503,504,538,544,545,567,568,569,582,583,591,],[-343,-333,-311,289,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-344,-320,398,-334,-305,-306,-339,-340,-341,-342,398,-202,-313,-300,-301,-302,-303,-304,398,-203,-205,398,-298,-299,-204,570,-315,-307,398,-314,-308,-316,-317,]),'ARROW':([139,143,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,208,233,263,291,292,300,301,302,303,406,429,431,432,433,434,503,504,567,569,582,],[-333,-311,290,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-344,-320,-334,-305,-306,-339,-340,-341,-342,-313,-300,-301,-302,-303,-304,-298,-299,-307,-314,-308,]),'CONDOP':([139,143,145,146,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,208,233,250,263,291,292,293,295,296,297,300,301,302,303,406,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,429,431,432,433,434,499,503,504,507,508,567,569,582,],[-333,-311,268,-263,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-344,-320,-282,-334,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-313,-264,-265,-266,-267,-268,-269,-270,-271,-272,-273,-274,-275,-276,-277,-278,-279,-280,-281,-300,-301,-302,-303,-304,-283,-298,-299,-289,-290,-307,-314,-308,]),'DIVIDE':([139,143,145,146,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,208,233,250,263,291,292,293,295,296,297,300,301,302,303,406,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,429,431,432,433,434,499,503,504,507,508,567,569,582,],[-333,-311,270,-263,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-344,-320,-282,-334,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-313,-264,-265,-266,270,270,270,270,270,270,270,270,270,270,270,270,270,270,270,-300,-301,-302,-303,-304,-283,-298,-299,-289,-290,-307,-314,-308,]),'MOD':([139,143,145,146,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,208,233,250,263,291,292,293,295,296,297,300,301,302,303,406,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,429,431,432,433,434,499,503,504,507,508,567,569,582,],[-333,-311,271,-263,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-344,-320,-282,-334,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-313,-264,-265,-266,271,271,271,271,271,271,271,271,271,271,271,271,271,271,271,-300,-301,-302,-303,-304,-283,-298,-299,-289,-290,-307,-314,-308,]),'RSHIFT':([139,143,145,146,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,208,233,250,263,291,292,293,295,296,297,300,301,302,303,406,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,429,431,432,433,434,499,503,504,507,508,567,569,582,],[-333,-311,274,-263,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-344,-320,-282,-334,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-313,-264,-265,-266,-267,-268,-269,-270,274,274,274,274,274,274,274,274,274,274,274,-300,-301,-302,-303,-304,-283,-298,-299,-289,-290,-307,-314,-308,]),'LSHIFT':([139,143,145,146,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,208,233,250,263,291,292,293,295,296,297,300,301,302,303,406,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,429,431,432,433,434,499,503,504,507,508,567,569,582,],[-333,-311,275,-263,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-344,-320,-282,-334,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-313,-264,-265,-266,-267,-268,-269,-270,275,275,275,275,275,275,275,275,275,275,275,-300,-301,-302,-303,-304,-283,-298,-299,-289,-290,-307,-314,-308,]),'LT':([139,143,145,146,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,208,233,250,263,291,292,293,295,296,297,300,301,302,303,406,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,429,431,432,433,434,499,503,504,507,508,567,569,582,],[-333,-311,276,-263,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-344,-320,-282,-334,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-313,-264,-265,-266,-267,-268,-269,-270,-271,-272,-273,-274,276,276,276,276,276,276,276,-300,-301,-302,-303,-304,-283,-298,-299,-289,-290,-307,-314,-308,]),'LE':([139,143,145,146,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,208,233,250,263,291,292,293,295,296,297,300,301,302,303,406,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,429,431,432,433,434,499,503,504,507,508,567,569,582,],[-333,-311,277,-263,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-344,-320,-282,-334,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-313,-264,-265,-266,-267,-268,-269,-270,-271,-272,-273,-274,277,277,277,277,277,277,277,-300,-301,-302,-303,-304,-283,-298,-299,-289,-290,-307,-314,-308,]),'GE':([139,143,145,146,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,208,233,250,263,291,292,293,295,296,297,300,301,302,303,406,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,429,431,432,433,434,499,503,504,507,508,567,569,582,],[-333,-311,278,-263,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-344,-320,-282,-334,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-313,-264,-265,-266,-267,-268,-269,-270,-271,-272,-273,-274,278,278,278,278,278,278,278,-300,-301,-302,-303,-304,-283,-298,-299,-289,-290,-307,-314,-308,]),'GT':([139,143,145,146,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,208,233,250,263,291,292,293,295,296,297,300,301,302,303,406,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,429,431,432,433,434,499,503,504,507,508,567,569,582,],[-333,-311,279,-263,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-344,-320,-282,-334,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-313,-264,-265,-266,-267,-268,-269,-270,-271,-272,-273,-274,279,279,279,279,279,279,279,-300,-301,-302,-303,-304,-283,-298,-299,-289,-290,-307,-314,-308,]),'EQ':([139,143,145,146,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,208,233,250,263,291,292,293,295,296,297,300,301,302,303,406,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,429,431,432,433,434,499,503,504,507,508,567,569,582,],[-333,-311,280,-263,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-344,-320,-282,-334,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-313,-264,-265,-266,-267,-268,-269,-270,-271,-272,-273,-274,-275,-276,280,280,280,280,280,-300,-301,-302,-303,-304,-283,-298,-299,-289,-290,-307,-314,-308,]),'NE':([139,143,145,146,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,208,233,250,263,291,292,293,295,296,297,300,301,302,303,406,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,429,431,432,433,434,499,503,504,507,508,567,569,582,],[-333,-311,281,-263,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-344,-320,-282,-334,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-313,-264,-265,-266,-267,-268,-269,-270,-271,-272,-273,-274,-275,-276,281,281,281,281,281,-300,-301,-302,-303,-304,-283,-298,-299,-289,-290,-307,-314,-308,]),'OR':([139,143,145,146,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,208,233,250,263,291,292,293,295,296,297,300,301,302,303,406,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,429,431,432,433,434,499,503,504,507,508,567,569,582,],[-333,-311,283,-263,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-344,-320,-282,-334,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-313,-264,-265,-266,-267,-268,-269,-270,-271,-272,-273,-274,-275,-276,-277,-278,-279,283,283,-300,-301,-302,-303,-304,-283,-298,-299,-289,-290,-307,-314,-308,]),'XOR':([139,143,145,146,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,208,233,250,263,291,292,293,295,296,297,300,301,302,303,406,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,429,431,432,433,434,499,503,504,507,508,567,569,582,],[-333,-311,284,-263,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-344,-320,-282,-334,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-313,-264,-265,-266,-267,-268,-269,-270,-271,-272,-273,-274,-275,-276,-277,284,-279,284,284,-300,-301,-302,-303,-304,-283,-298,-299,-289,-290,-307,-314,-308,]),'LAND':([139,143,145,146,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,208,233,250,263,291,292,293,295,296,297,300,301,302,303,406,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,429,431,432,433,434,499,503,504,507,508,567,569,582,],[-333,-311,285,-263,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-344,-320,-282,-334,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-313,-264,-265,-266,-267,-268,-269,-270,-271,-272,-273,-274,-275,-276,-277,-278,-279,-280,285,-300,-301,-302,-303,-304,-283,-298,-299,-289,-290,-307,-314,-308,]),'LOR':([139,143,145,146,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,208,233,250,263,291,292,293,295,296,297,300,301,302,303,406,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,429,431,432,433,434,499,503,504,507,508,567,569,582,],[-333,-311,286,-263,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-344,-320,-282,-334,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-313,-264,-265,-266,-267,-268,-269,-270,-271,-272,-273,-274,-275,-276,-277,-278,-279,-280,-281,-300,-301,-302,-303,-304,-283,-298,-299,-289,-290,-307,-314,-308,]),'XOREQUAL':([139,143,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,208,233,250,263,291,292,293,295,296,297,300,301,302,303,406,429,431,432,433,434,499,503,504,507,508,567,569,582,],[-333,-311,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-344,-320,380,-334,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-313,-300,-301,-302,-303,-304,-283,-298,-299,-289,-290,-307,-314,-308,]),'TIMESEQUAL':([139,143,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,208,233,250,263,291,292,293,295,296,297,300,301,302,303,406,429,431,432,433,434,499,503,504,507,508,567,569,582,],[-333,-311,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-344,-320,381,-334,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-313,-300,-301,-302,-303,-304,-283,-298,-299,-289,-290,-307,-314,-308,]),'DIVEQUAL':([139,143,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,208,233,250,263,291,292,293,295,296,297,300,301,302,303,406,429,431,432,433,434,499,503,504,507,508,567,569,582,],[-333,-311,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-344,-320,382,-334,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-313,-300,-301,-302,-303,-304,-283,-298,-299,-289,-290,-307,-314,-308,]),'MODEQUAL':([139,143,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,208,233,250,263,291,292,293,295,296,297,300,301,302,303,406,429,431,432,433,434,499,503,504,507,508,567,569,582,],[-333,-311,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-344,-320,383,-334,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-313,-300,-301,-302,-303,-304,-283,-298,-299,-289,-290,-307,-314,-308,]),'PLUSEQUAL':([139,143,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,208,233,250,263,291,292,293,295,296,297,300,301,302,303,406,429,431,432,433,434,499,503,504,507,508,567,569,582,],[-333,-311,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-344,-320,384,-334,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-313,-300,-301,-302,-303,-304,-283,-298,-299,-289,-290,-307,-314,-308,]),'MINUSEQUAL':([139,143,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,208,233,250,263,291,292,293,295,296,297,300,301,302,303,406,429,431,432,433,434,499,503,504,507,508,567,569,582,],[-333,-311,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-344,-320,385,-334,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-313,-300,-301,-302,-303,-304,-283,-298,-299,-289,-290,-307,-314,-308,]),'LSHIFTEQUAL':([139,143,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,208,233,250,263,291,292,293,295,296,297,300,301,302,303,406,429,431,432,433,434,499,503,504,507,508,567,569,582,],[-333,-311,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-344,-320,386,-334,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-313,-300,-301,-302,-303,-304,-283,-298,-299,-289,-290,-307,-314,-308,]),'RSHIFTEQUAL':([139,143,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,208,233,250,263,291,292,293,295,296,297,300,301,302,303,406,429,431,432,433,434,499,503,504,507,508,567,569,582,],[-333,-311,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-344,-320,387,-334,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-313,-300,-301,-302,-303,-304,-283,-298,-299,-289,-290,-307,-314,-308,]),'ANDEQUAL':([139,143,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,208,233,250,263,291,292,293,295,296,297,300,301,302,303,406,429,431,432,433,434,499,503,504,507,508,567,569,582,],[-333,-311,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-344,-320,388,-334,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-313,-300,-301,-302,-303,-304,-283,-298,-299,-289,-290,-307,-314,-308,]),'OREQUAL':([139,143,151,152,158,159,162,163,164,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,208,233,250,263,291,292,293,295,296,297,300,301,302,303,406,429,431,432,433,434,499,503,504,507,508,567,569,582,],[-333,-311,-282,-284,-297,-320,-309,-310,-312,-321,-322,-323,-324,-325,-326,-327,-328,-329,-330,-331,-332,-335,-336,-337,-338,-344,-320,389,-334,-305,-306,-285,-286,-287,-288,-339,-340,-341,-342,-313,-300,-301,-302,-303,-304,-283,-298,-299,-289,-290,-307,-314,-308,]),'ELLIPSIS':([313,],[443,]),} + +_lr_action = {} +for _k, _v in _lr_action_items.items(): + for _x,_y in zip(_v[0],_v[1]): + if not _x in _lr_action: _lr_action[_x] = {} + _lr_action[_x][_k] = _y +del _lr_action_items + +_lr_goto_items = {'translation_unit_or_empty':([0,],[1,]),'translation_unit':([0,],[2,]),'empty':([0,11,12,21,22,23,26,27,30,34,69,70,71,73,95,96,101,128,136,137,182,183,192,209,216,221,242,256,257,258,322,323,351,358,360,369,372,448,449,455,457,459,460,472,484,489,500,512,513,529,530,531,533,565,568,578,580,590,592,],[3,66,83,99,99,99,107,99,114,99,83,107,99,66,114,188,99,220,114,188,307,114,320,343,320,357,357,392,307,114,453,114,453,357,357,357,357,114,188,307,114,307,453,357,357,537,537,307,114,357,357,357,357,357,537,357,357,357,357,]),'external_declaration':([0,2,],[4,64,]),'function_definition':([0,2,],[5,5,]),'declaration':([0,2,11,67,73,128,221,372,],[6,6,68,129,68,223,223,484,]),'pp_directive':([0,2,],[7,7,]),'pppragma_directive':([0,2,124,128,203,204,205,221,242,333,335,358,360,369,472,529,530,531,578,590,592,],[8,8,211,231,211,211,211,231,371,211,211,371,371,482,371,560,371,371,371,371,371,]),'static_assert':([0,2,128,221,242,358,360,369,472,529,530,531,578,590,592,],[10,10,232,232,232,232,232,232,232,232,232,232,232,232,232,]),'id_declarator':([0,2,12,17,26,69,70,82,134,192,194,209,322,468,],[11,11,73,93,111,130,111,93,130,315,130,130,93,130,]),'declaration_specifiers':([0,2,11,67,73,96,128,137,221,313,322,351,372,449,460,],[12,12,69,69,69,192,69,192,69,192,192,192,69,192,192,]),'decl_body':([0,2,11,67,73,128,221,372,],[13,13,13,13,13,13,13,13,]),'direct_id_declarator':([0,2,12,17,20,26,69,70,80,82,134,192,194,209,318,322,452,468,],[19,19,19,19,97,19,19,19,97,19,19,19,19,19,97,19,97,19,]),'pointer':([0,2,12,17,26,69,70,82,113,134,192,194,209,216,322,351,468,],[20,20,80,20,20,80,20,80,196,80,318,80,80,352,452,352,80,]),'type_qualifier':([0,2,11,12,21,22,23,27,30,34,67,69,71,73,95,96,101,115,124,125,126,128,136,137,141,183,184,192,203,204,205,209,213,216,221,238,258,259,294,298,299,304,313,322,323,333,335,351,372,448,449,457,458,460,513,514,],[21,21,21,74,21,21,21,21,116,21,21,74,21,21,116,21,21,197,116,116,116,21,116,21,116,116,197,74,116,116,116,341,197,341,21,116,116,197,116,116,116,116,21,21,116,116,116,21,21,116,21,116,197,21,116,197,]),'storage_class_specifier':([0,2,11,12,21,22,23,27,34,67,69,71,73,96,101,128,137,192,221,313,322,351,372,449,460,],[22,22,22,75,22,22,22,22,22,22,75,22,22,22,22,22,22,75,22,22,22,22,22,22,22,]),'function_specifier':([0,2,11,12,21,22,23,27,34,67,69,71,73,96,101,128,137,192,221,313,322,351,372,449,460,],[23,23,23,76,23,23,23,23,23,23,76,23,23,23,23,23,23,76,23,23,23,23,23,23,23,]),'type_specifier_no_typeid':([0,2,11,12,26,67,69,70,73,96,124,125,126,128,137,141,192,193,203,204,205,209,213,216,221,238,294,298,299,304,313,322,333,335,351,372,449,460,],[24,24,24,77,24,24,77,24,24,24,24,24,24,24,24,24,77,24,24,24,24,340,24,340,24,24,24,24,24,24,24,24,24,24,24,24,24,24,]),'type_specifier':([0,2,11,26,67,70,73,96,124,125,126,128,137,141,193,203,204,205,213,221,238,294,298,299,304,313,322,333,335,351,372,449,460,],[25,25,25,104,25,104,25,25,212,212,212,25,25,212,104,212,212,212,348,25,212,212,212,212,212,25,25,212,212,25,25,25,25,]),'declaration_specifiers_no_type':([0,2,11,21,22,23,27,34,67,71,73,96,101,128,137,221,313,322,351,372,449,460,],[26,26,70,100,100,100,100,100,70,100,70,193,100,70,193,70,193,193,193,70,193,193,]),'alignment_specifier':([0,2,11,12,21,22,23,27,34,67,69,71,73,96,101,124,125,126,128,137,141,192,203,204,205,209,216,221,238,294,298,299,304,313,322,333,335,351,372,449,460,],[27,27,27,78,27,27,27,27,27,27,78,27,27,27,27,214,214,214,27,27,214,78,214,214,214,342,342,27,214,214,214,214,214,27,27,214,214,27,27,27,27,]),'typedef_name':([0,2,11,26,67,70,73,96,124,125,126,128,137,141,193,203,204,205,213,221,238,294,298,299,304,313,322,333,335,351,372,449,460,],[31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,]),'enum_specifier':([0,2,11,26,67,70,73,96,124,125,126,128,137,141,193,203,204,205,213,221,238,294,298,299,304,313,322,333,335,351,372,449,460,],[32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,]),'struct_or_union_specifier':([0,2,11,26,67,70,73,96,124,125,126,128,137,141,193,203,204,205,213,221,238,294,298,299,304,313,322,333,335,351,372,449,460,],[33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,33,]),'atomic_specifier':([0,2,11,21,22,23,26,27,34,67,70,71,73,96,101,124,125,126,128,137,141,193,203,204,205,213,221,238,294,298,299,304,313,322,333,335,351,372,449,460,],[34,34,71,101,101,101,106,101,101,71,106,101,71,34,101,106,106,106,71,34,106,106,106,106,106,106,71,106,106,106,106,106,34,34,106,106,34,71,34,34,]),'struct_or_union':([0,2,11,26,67,70,73,96,124,125,126,128,137,141,193,203,204,205,213,221,238,294,298,299,304,313,322,333,335,351,372,449,460,],[37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,]),'declaration_list_opt':([11,73,],[65,131,]),'declaration_list':([11,73,],[67,67,]),'init_declarator_list_opt':([12,69,],[79,79,]),'init_declarator_list':([12,69,],[84,84,]),'init_declarator':([12,69,134,194,],[85,85,253,326,]),'declarator':([12,69,134,194,209,468,],[86,86,86,86,346,346,]),'typeid_declarator':([12,69,82,134,194,209,468,],[87,87,133,87,87,87,87,]),'direct_typeid_declarator':([12,69,80,82,134,194,209,468,],[88,88,132,88,88,88,88,88,]),'declaration_specifiers_no_type_opt':([21,22,23,27,34,71,101,],[98,102,103,112,117,117,117,]),'id_init_declarator_list_opt':([26,70,],[105,105,]),'id_init_declarator_list':([26,70,],[108,108,]),'id_init_declarator':([26,70,],[110,110,]),'type_qualifier_list_opt':([30,95,136,183,258,323,448,457,513,],[113,182,257,309,401,455,512,521,548,]),'type_qualifier_list':([30,95,124,125,126,136,141,183,203,204,205,238,258,294,298,299,304,323,333,335,448,457,513,],[115,184,213,213,213,259,213,115,213,213,213,213,115,213,213,213,213,458,213,213,514,115,115,]),'brace_open':([36,37,65,118,119,122,123,128,131,135,195,221,238,242,358,360,369,393,405,472,476,506,507,529,530,531,536,578,590,592,],[120,124,128,198,199,203,204,128,128,256,256,128,128,128,128,128,128,256,500,128,500,500,500,128,128,128,256,128,128,128,]),'compound_statement':([65,128,131,221,238,242,358,360,369,472,529,530,531,578,590,592,],[127,227,251,227,363,227,227,227,227,227,227,227,227,227,227,227,]),'unified_string_literal':([92,94,126,128,135,141,153,154,155,156,182,195,221,234,238,242,247,257,266,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,309,310,332,347,358,360,362,365,366,367,369,372,378,393,397,401,402,405,455,459,469,472,476,484,502,505,512,521,522,529,530,531,532,533,536,548,549,565,571,578,580,590,592,],[138,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,407,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,143,]),'constant_expression':([94,126,234,332,347,397,469,],[142,218,359,464,470,493,527,]),'conditional_expression':([94,126,128,135,141,182,195,221,234,238,242,247,257,268,287,288,294,298,309,310,332,347,358,360,362,365,366,367,369,372,378,393,397,401,402,455,459,469,472,484,502,505,512,521,522,529,530,531,532,533,536,548,549,565,571,578,580,590,592,],[144,144,249,249,249,249,249,249,144,249,249,249,249,249,249,249,249,249,249,249,144,144,249,249,249,249,249,249,249,249,249,249,144,249,249,249,249,144,249,249,542,249,249,249,249,249,249,249,249,249,249,249,249,249,249,249,249,249,249,]),'binary_expression':([94,126,128,135,141,182,195,221,234,238,242,247,257,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,309,310,332,347,358,360,362,365,366,367,369,372,378,393,397,401,402,455,459,469,472,484,502,505,512,521,522,529,530,531,532,533,536,548,549,565,571,578,580,590,592,],[145,145,145,145,145,145,145,145,145,145,145,145,145,145,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,145,]),'cast_expression':([94,126,128,135,141,155,182,195,221,234,238,242,247,257,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,309,310,332,347,358,360,362,365,366,367,369,372,378,393,397,401,402,405,455,459,469,472,476,484,502,505,512,521,522,529,530,531,532,533,536,548,549,565,571,578,580,590,592,],[146,146,146,146,146,296,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,499,146,146,146,146,499,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,146,]),'unary_expression':([94,126,128,135,141,153,154,155,156,182,195,221,234,238,242,247,257,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,309,310,332,347,358,360,362,365,366,367,369,372,378,393,397,401,402,405,455,459,469,472,476,484,502,505,512,521,522,529,530,531,532,533,536,548,549,565,571,578,580,590,592,],[151,151,250,250,250,293,295,151,297,250,250,250,151,250,250,250,250,250,151,151,151,151,151,151,151,151,151,151,151,151,151,151,151,151,151,151,250,250,250,250,250,250,151,151,250,250,250,250,250,250,250,250,250,250,151,250,250,151,250,250,151,250,151,250,151,250,250,250,250,250,250,250,250,250,250,250,250,250,250,250,250,250,250,]),'postfix_expression':([94,126,128,135,141,153,154,155,156,182,195,221,234,238,242,247,257,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,309,310,332,347,358,360,362,365,366,367,369,372,378,393,397,401,402,405,455,459,469,472,476,484,502,505,512,521,522,529,530,531,532,533,536,548,549,565,571,578,580,590,592,],[152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,152,]),'unary_operator':([94,126,128,135,141,153,154,155,156,182,195,221,234,238,242,247,257,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,309,310,332,347,358,360,362,365,366,367,369,372,378,393,397,401,402,405,455,459,469,472,476,484,502,505,512,521,522,529,530,531,532,533,536,548,549,565,571,578,580,590,592,],[155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,]),'primary_expression':([94,126,128,135,141,153,154,155,156,182,195,221,234,238,242,247,257,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,309,310,332,347,358,360,362,365,366,367,369,372,378,393,397,401,402,405,455,459,469,472,476,484,502,505,512,521,522,529,530,531,532,533,536,548,549,565,571,578,580,590,592,],[158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,]),'identifier':([94,96,126,128,135,137,141,153,154,155,156,182,195,221,234,238,242,247,257,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,309,310,314,332,347,358,360,362,365,366,367,369,372,378,393,397,398,401,402,405,449,455,459,469,472,476,484,502,505,509,512,521,522,529,530,531,532,533,536,548,549,565,570,571,578,580,590,592,],[162,191,162,162,162,191,162,162,162,162,162,162,162,162,162,162,162,162,162,162,162,162,162,162,162,162,162,162,162,162,162,162,162,162,162,162,162,162,162,162,162,162,162,162,445,162,162,162,162,162,162,162,162,162,162,162,162,162,494,162,162,162,191,162,162,162,162,162,162,162,162,545,162,162,162,162,162,162,162,162,162,162,162,162,583,162,162,162,162,162,]),'constant':([94,126,128,135,141,153,154,155,156,182,195,221,234,238,242,247,257,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,309,310,332,347,358,360,362,365,366,367,369,372,378,393,397,401,402,405,455,459,469,472,476,484,502,505,512,521,522,529,530,531,532,533,536,548,549,565,571,578,580,590,592,],[163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,]),'unified_wstring_literal':([94,126,128,135,141,153,154,155,156,182,195,221,234,238,242,247,257,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,294,298,309,310,332,347,358,360,362,365,366,367,369,372,378,393,397,401,402,405,455,459,469,472,476,484,502,505,512,521,522,529,530,531,532,533,536,548,549,565,571,578,580,590,592,],[164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,164,]),'parameter_type_list':([96,137,322,351,449,460,],[185,260,454,454,515,454,]),'identifier_list_opt':([96,137,449,],[186,261,516,]),'parameter_list':([96,137,322,351,449,460,],[187,187,187,187,187,187,]),'identifier_list':([96,137,449,],[189,189,189,]),'parameter_declaration':([96,137,313,322,351,449,460,],[190,190,444,190,190,190,190,]),'enumerator_list':([120,198,199,],[200,328,329,]),'enumerator':([120,198,199,331,],[201,201,201,463,]),'struct_declaration_list':([124,203,204,],[205,333,335,]),'brace_close':([124,200,203,204,205,219,328,329,333,335,390,489,541,568,],[206,330,334,336,337,355,461,462,465,466,488,535,567,582,]),'struct_declaration':([124,203,204,205,333,335,],[207,207,207,338,338,338,]),'specifier_qualifier_list':([124,125,126,141,203,204,205,238,294,298,299,304,333,335,],[209,216,216,216,209,209,209,216,216,216,216,216,209,209,]),'type_name':([125,126,141,238,294,298,299,304,],[215,217,264,364,435,436,437,438,]),'block_item_list_opt':([128,],[219,]),'block_item_list':([128,],[221,]),'block_item':([128,221,],[222,356,]),'statement':([128,221,242,358,360,369,472,529,530,531,578,590,592,],[224,224,370,370,370,481,370,559,370,370,370,370,370,]),'labeled_statement':([128,221,242,358,360,369,472,529,530,531,578,590,592,],[225,225,225,225,225,225,225,225,225,225,225,225,225,]),'expression_statement':([128,221,242,358,360,369,472,529,530,531,578,590,592,],[226,226,226,226,226,226,226,226,226,226,226,226,226,]),'selection_statement':([128,221,242,358,360,369,472,529,530,531,578,590,592,],[228,228,228,228,228,228,228,228,228,228,228,228,228,]),'iteration_statement':([128,221,242,358,360,369,472,529,530,531,578,590,592,],[229,229,229,229,229,229,229,229,229,229,229,229,229,]),'jump_statement':([128,221,242,358,360,369,472,529,530,531,578,590,592,],[230,230,230,230,230,230,230,230,230,230,230,230,230,]),'expression_opt':([128,221,242,358,360,369,372,472,484,529,530,531,533,565,578,580,590,592,],[236,236,236,236,236,236,483,236,534,236,236,236,564,581,236,589,236,236,]),'expression':([128,141,221,238,242,247,268,287,294,298,358,360,362,366,367,369,372,472,484,529,530,531,532,533,565,571,578,580,590,592,],[239,265,239,265,239,376,408,427,265,265,239,239,474,478,479,239,239,239,239,239,239,239,563,239,239,584,239,239,239,239,]),'assignment_expression':([128,135,141,182,195,221,238,242,247,257,268,287,288,294,298,309,310,358,360,362,365,366,367,369,372,378,393,401,402,455,459,472,484,505,512,521,522,529,530,531,532,533,536,548,549,565,571,578,580,590,592,],[248,255,248,308,255,248,248,248,248,308,248,248,430,248,248,441,442,248,248,248,477,248,248,248,248,487,255,497,498,308,308,248,248,543,308,553,554,248,248,248,248,248,255,574,575,248,248,248,248,248,248,]),'initializer':([135,195,393,536,],[254,327,490,566,]),'assignment_expression_opt':([182,257,455,459,512,],[305,399,519,523,546,]),'typeid_noparen_declarator':([192,],[316,]),'abstract_declarator_opt':([192,216,],[317,350,]),'direct_typeid_noparen_declarator':([192,318,],[319,446,]),'abstract_declarator':([192,216,322,351,],[321,321,450,450,]),'direct_abstract_declarator':([192,216,318,322,351,352,452,],[325,325,447,325,325,447,447,]),'struct_declarator_list_opt':([209,],[339,]),'struct_declarator_list':([209,],[344,]),'struct_declarator':([209,468,],[345,526,]),'pragmacomp_or_statement':([242,358,360,472,529,530,531,578,590,592,],[368,471,473,528,558,561,562,587,593,594,]),'pppragma_directive_list':([242,358,360,472,529,530,531,578,590,592,],[369,369,369,369,369,369,369,369,369,369,]),'assignment_operator':([250,],[378,]),'initializer_list_opt':([256,],[390,]),'initializer_list':([256,500,],[391,541,]),'designation_opt':([256,489,500,568,],[393,536,393,536,]),'designation':([256,489,500,568,],[394,394,394,394,]),'designator_list':([256,489,500,568,],[395,395,395,395,]),'designator':([256,395,489,500,568,],[396,492,396,396,396,]),'argument_expression_list':([288,],[428,]),'parameter_type_list_opt':([322,351,460,],[451,451,525,]),'offsetof_member_designator':([509,],[544,]),} + +_lr_goto = {} +for _k, _v in _lr_goto_items.items(): + for _x, _y in zip(_v[0], _v[1]): + if not _x in _lr_goto: _lr_goto[_x] = {} + _lr_goto[_x][_k] = _y +del _lr_goto_items +_lr_productions = [ + ("S' -> translation_unit_or_empty","S'",1,None,None,None), + ('abstract_declarator_opt -> empty','abstract_declarator_opt',1,'p_abstract_declarator_opt','plyparser.py',43), + ('abstract_declarator_opt -> abstract_declarator','abstract_declarator_opt',1,'p_abstract_declarator_opt','plyparser.py',44), + ('assignment_expression_opt -> empty','assignment_expression_opt',1,'p_assignment_expression_opt','plyparser.py',43), + ('assignment_expression_opt -> assignment_expression','assignment_expression_opt',1,'p_assignment_expression_opt','plyparser.py',44), + ('block_item_list_opt -> empty','block_item_list_opt',1,'p_block_item_list_opt','plyparser.py',43), + ('block_item_list_opt -> block_item_list','block_item_list_opt',1,'p_block_item_list_opt','plyparser.py',44), + ('declaration_list_opt -> empty','declaration_list_opt',1,'p_declaration_list_opt','plyparser.py',43), + ('declaration_list_opt -> declaration_list','declaration_list_opt',1,'p_declaration_list_opt','plyparser.py',44), + ('declaration_specifiers_no_type_opt -> empty','declaration_specifiers_no_type_opt',1,'p_declaration_specifiers_no_type_opt','plyparser.py',43), + ('declaration_specifiers_no_type_opt -> declaration_specifiers_no_type','declaration_specifiers_no_type_opt',1,'p_declaration_specifiers_no_type_opt','plyparser.py',44), + ('designation_opt -> empty','designation_opt',1,'p_designation_opt','plyparser.py',43), + ('designation_opt -> designation','designation_opt',1,'p_designation_opt','plyparser.py',44), + ('expression_opt -> empty','expression_opt',1,'p_expression_opt','plyparser.py',43), + ('expression_opt -> expression','expression_opt',1,'p_expression_opt','plyparser.py',44), + ('id_init_declarator_list_opt -> empty','id_init_declarator_list_opt',1,'p_id_init_declarator_list_opt','plyparser.py',43), + ('id_init_declarator_list_opt -> id_init_declarator_list','id_init_declarator_list_opt',1,'p_id_init_declarator_list_opt','plyparser.py',44), + ('identifier_list_opt -> empty','identifier_list_opt',1,'p_identifier_list_opt','plyparser.py',43), + ('identifier_list_opt -> identifier_list','identifier_list_opt',1,'p_identifier_list_opt','plyparser.py',44), + ('init_declarator_list_opt -> empty','init_declarator_list_opt',1,'p_init_declarator_list_opt','plyparser.py',43), + ('init_declarator_list_opt -> init_declarator_list','init_declarator_list_opt',1,'p_init_declarator_list_opt','plyparser.py',44), + ('initializer_list_opt -> empty','initializer_list_opt',1,'p_initializer_list_opt','plyparser.py',43), + ('initializer_list_opt -> initializer_list','initializer_list_opt',1,'p_initializer_list_opt','plyparser.py',44), + ('parameter_type_list_opt -> empty','parameter_type_list_opt',1,'p_parameter_type_list_opt','plyparser.py',43), + ('parameter_type_list_opt -> parameter_type_list','parameter_type_list_opt',1,'p_parameter_type_list_opt','plyparser.py',44), + ('struct_declarator_list_opt -> empty','struct_declarator_list_opt',1,'p_struct_declarator_list_opt','plyparser.py',43), + ('struct_declarator_list_opt -> struct_declarator_list','struct_declarator_list_opt',1,'p_struct_declarator_list_opt','plyparser.py',44), + ('type_qualifier_list_opt -> empty','type_qualifier_list_opt',1,'p_type_qualifier_list_opt','plyparser.py',43), + ('type_qualifier_list_opt -> type_qualifier_list','type_qualifier_list_opt',1,'p_type_qualifier_list_opt','plyparser.py',44), + ('direct_id_declarator -> ID','direct_id_declarator',1,'p_direct_id_declarator_1','plyparser.py',126), + ('direct_id_declarator -> LPAREN id_declarator RPAREN','direct_id_declarator',3,'p_direct_id_declarator_2','plyparser.py',126), + ('direct_id_declarator -> direct_id_declarator LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET','direct_id_declarator',5,'p_direct_id_declarator_3','plyparser.py',126), + ('direct_id_declarator -> direct_id_declarator LBRACKET STATIC type_qualifier_list_opt assignment_expression RBRACKET','direct_id_declarator',6,'p_direct_id_declarator_4','plyparser.py',126), + ('direct_id_declarator -> direct_id_declarator LBRACKET type_qualifier_list STATIC assignment_expression RBRACKET','direct_id_declarator',6,'p_direct_id_declarator_4','plyparser.py',127), + ('direct_id_declarator -> direct_id_declarator LBRACKET type_qualifier_list_opt TIMES RBRACKET','direct_id_declarator',5,'p_direct_id_declarator_5','plyparser.py',126), + ('direct_id_declarator -> direct_id_declarator LPAREN parameter_type_list RPAREN','direct_id_declarator',4,'p_direct_id_declarator_6','plyparser.py',126), + ('direct_id_declarator -> direct_id_declarator LPAREN identifier_list_opt RPAREN','direct_id_declarator',4,'p_direct_id_declarator_6','plyparser.py',127), + ('direct_typeid_declarator -> TYPEID','direct_typeid_declarator',1,'p_direct_typeid_declarator_1','plyparser.py',126), + ('direct_typeid_declarator -> LPAREN typeid_declarator RPAREN','direct_typeid_declarator',3,'p_direct_typeid_declarator_2','plyparser.py',126), + ('direct_typeid_declarator -> direct_typeid_declarator LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET','direct_typeid_declarator',5,'p_direct_typeid_declarator_3','plyparser.py',126), + ('direct_typeid_declarator -> direct_typeid_declarator LBRACKET STATIC type_qualifier_list_opt assignment_expression RBRACKET','direct_typeid_declarator',6,'p_direct_typeid_declarator_4','plyparser.py',126), + ('direct_typeid_declarator -> direct_typeid_declarator LBRACKET type_qualifier_list STATIC assignment_expression RBRACKET','direct_typeid_declarator',6,'p_direct_typeid_declarator_4','plyparser.py',127), + ('direct_typeid_declarator -> direct_typeid_declarator LBRACKET type_qualifier_list_opt TIMES RBRACKET','direct_typeid_declarator',5,'p_direct_typeid_declarator_5','plyparser.py',126), + ('direct_typeid_declarator -> direct_typeid_declarator LPAREN parameter_type_list RPAREN','direct_typeid_declarator',4,'p_direct_typeid_declarator_6','plyparser.py',126), + ('direct_typeid_declarator -> direct_typeid_declarator LPAREN identifier_list_opt RPAREN','direct_typeid_declarator',4,'p_direct_typeid_declarator_6','plyparser.py',127), + ('direct_typeid_noparen_declarator -> TYPEID','direct_typeid_noparen_declarator',1,'p_direct_typeid_noparen_declarator_1','plyparser.py',126), + ('direct_typeid_noparen_declarator -> direct_typeid_noparen_declarator LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET','direct_typeid_noparen_declarator',5,'p_direct_typeid_noparen_declarator_3','plyparser.py',126), + ('direct_typeid_noparen_declarator -> direct_typeid_noparen_declarator LBRACKET STATIC type_qualifier_list_opt assignment_expression RBRACKET','direct_typeid_noparen_declarator',6,'p_direct_typeid_noparen_declarator_4','plyparser.py',126), + ('direct_typeid_noparen_declarator -> direct_typeid_noparen_declarator LBRACKET type_qualifier_list STATIC assignment_expression RBRACKET','direct_typeid_noparen_declarator',6,'p_direct_typeid_noparen_declarator_4','plyparser.py',127), + ('direct_typeid_noparen_declarator -> direct_typeid_noparen_declarator LBRACKET type_qualifier_list_opt TIMES RBRACKET','direct_typeid_noparen_declarator',5,'p_direct_typeid_noparen_declarator_5','plyparser.py',126), + ('direct_typeid_noparen_declarator -> direct_typeid_noparen_declarator LPAREN parameter_type_list RPAREN','direct_typeid_noparen_declarator',4,'p_direct_typeid_noparen_declarator_6','plyparser.py',126), + ('direct_typeid_noparen_declarator -> direct_typeid_noparen_declarator LPAREN identifier_list_opt RPAREN','direct_typeid_noparen_declarator',4,'p_direct_typeid_noparen_declarator_6','plyparser.py',127), + ('id_declarator -> direct_id_declarator','id_declarator',1,'p_id_declarator_1','plyparser.py',126), + ('id_declarator -> pointer direct_id_declarator','id_declarator',2,'p_id_declarator_2','plyparser.py',126), + ('typeid_declarator -> direct_typeid_declarator','typeid_declarator',1,'p_typeid_declarator_1','plyparser.py',126), + ('typeid_declarator -> pointer direct_typeid_declarator','typeid_declarator',2,'p_typeid_declarator_2','plyparser.py',126), + ('typeid_noparen_declarator -> direct_typeid_noparen_declarator','typeid_noparen_declarator',1,'p_typeid_noparen_declarator_1','plyparser.py',126), + ('typeid_noparen_declarator -> pointer direct_typeid_noparen_declarator','typeid_noparen_declarator',2,'p_typeid_noparen_declarator_2','plyparser.py',126), + ('translation_unit_or_empty -> translation_unit','translation_unit_or_empty',1,'p_translation_unit_or_empty','c_parser.py',509), + ('translation_unit_or_empty -> empty','translation_unit_or_empty',1,'p_translation_unit_or_empty','c_parser.py',510), + ('translation_unit -> external_declaration','translation_unit',1,'p_translation_unit_1','c_parser.py',518), + ('translation_unit -> translation_unit external_declaration','translation_unit',2,'p_translation_unit_2','c_parser.py',524), + ('external_declaration -> function_definition','external_declaration',1,'p_external_declaration_1','c_parser.py',534), + ('external_declaration -> declaration','external_declaration',1,'p_external_declaration_2','c_parser.py',539), + ('external_declaration -> pp_directive','external_declaration',1,'p_external_declaration_3','c_parser.py',544), + ('external_declaration -> pppragma_directive','external_declaration',1,'p_external_declaration_3','c_parser.py',545), + ('external_declaration -> SEMI','external_declaration',1,'p_external_declaration_4','c_parser.py',550), + ('external_declaration -> static_assert','external_declaration',1,'p_external_declaration_5','c_parser.py',555), + ('static_assert -> _STATIC_ASSERT LPAREN constant_expression COMMA unified_string_literal RPAREN','static_assert',6,'p_static_assert_declaration','c_parser.py',560), + ('static_assert -> _STATIC_ASSERT LPAREN constant_expression RPAREN','static_assert',4,'p_static_assert_declaration','c_parser.py',561), + ('pp_directive -> PPHASH','pp_directive',1,'p_pp_directive','c_parser.py',569), + ('pppragma_directive -> PPPRAGMA','pppragma_directive',1,'p_pppragma_directive','c_parser.py',580), + ('pppragma_directive -> PPPRAGMA PPPRAGMASTR','pppragma_directive',2,'p_pppragma_directive','c_parser.py',581), + ('pppragma_directive -> _PRAGMA LPAREN unified_string_literal RPAREN','pppragma_directive',4,'p_pppragma_directive','c_parser.py',582), + ('pppragma_directive_list -> pppragma_directive','pppragma_directive_list',1,'p_pppragma_directive_list','c_parser.py',592), + ('pppragma_directive_list -> pppragma_directive_list pppragma_directive','pppragma_directive_list',2,'p_pppragma_directive_list','c_parser.py',593), + ('function_definition -> id_declarator declaration_list_opt compound_statement','function_definition',3,'p_function_definition_1','c_parser.py',600), + ('function_definition -> declaration_specifiers id_declarator declaration_list_opt compound_statement','function_definition',4,'p_function_definition_2','c_parser.py',618), + ('statement -> labeled_statement','statement',1,'p_statement','c_parser.py',633), + ('statement -> expression_statement','statement',1,'p_statement','c_parser.py',634), + ('statement -> compound_statement','statement',1,'p_statement','c_parser.py',635), + ('statement -> selection_statement','statement',1,'p_statement','c_parser.py',636), + ('statement -> iteration_statement','statement',1,'p_statement','c_parser.py',637), + ('statement -> jump_statement','statement',1,'p_statement','c_parser.py',638), + ('statement -> pppragma_directive','statement',1,'p_statement','c_parser.py',639), + ('statement -> static_assert','statement',1,'p_statement','c_parser.py',640), + ('pragmacomp_or_statement -> pppragma_directive_list statement','pragmacomp_or_statement',2,'p_pragmacomp_or_statement','c_parser.py',688), + ('pragmacomp_or_statement -> statement','pragmacomp_or_statement',1,'p_pragmacomp_or_statement','c_parser.py',689), + ('decl_body -> declaration_specifiers init_declarator_list_opt','decl_body',2,'p_decl_body','c_parser.py',708), + ('decl_body -> declaration_specifiers_no_type id_init_declarator_list_opt','decl_body',2,'p_decl_body','c_parser.py',709), + ('declaration -> decl_body SEMI','declaration',2,'p_declaration','c_parser.py',769), + ('declaration_list -> declaration','declaration_list',1,'p_declaration_list','c_parser.py',778), + ('declaration_list -> declaration_list declaration','declaration_list',2,'p_declaration_list','c_parser.py',779), + ('declaration_specifiers_no_type -> type_qualifier declaration_specifiers_no_type_opt','declaration_specifiers_no_type',2,'p_declaration_specifiers_no_type_1','c_parser.py',789), + ('declaration_specifiers_no_type -> storage_class_specifier declaration_specifiers_no_type_opt','declaration_specifiers_no_type',2,'p_declaration_specifiers_no_type_2','c_parser.py',794), + ('declaration_specifiers_no_type -> function_specifier declaration_specifiers_no_type_opt','declaration_specifiers_no_type',2,'p_declaration_specifiers_no_type_3','c_parser.py',799), + ('declaration_specifiers_no_type -> atomic_specifier declaration_specifiers_no_type_opt','declaration_specifiers_no_type',2,'p_declaration_specifiers_no_type_4','c_parser.py',806), + ('declaration_specifiers_no_type -> alignment_specifier declaration_specifiers_no_type_opt','declaration_specifiers_no_type',2,'p_declaration_specifiers_no_type_5','c_parser.py',811), + ('declaration_specifiers -> declaration_specifiers type_qualifier','declaration_specifiers',2,'p_declaration_specifiers_1','c_parser.py',816), + ('declaration_specifiers -> declaration_specifiers storage_class_specifier','declaration_specifiers',2,'p_declaration_specifiers_2','c_parser.py',821), + ('declaration_specifiers -> declaration_specifiers function_specifier','declaration_specifiers',2,'p_declaration_specifiers_3','c_parser.py',826), + ('declaration_specifiers -> declaration_specifiers type_specifier_no_typeid','declaration_specifiers',2,'p_declaration_specifiers_4','c_parser.py',831), + ('declaration_specifiers -> type_specifier','declaration_specifiers',1,'p_declaration_specifiers_5','c_parser.py',836), + ('declaration_specifiers -> declaration_specifiers_no_type type_specifier','declaration_specifiers',2,'p_declaration_specifiers_6','c_parser.py',841), + ('declaration_specifiers -> declaration_specifiers alignment_specifier','declaration_specifiers',2,'p_declaration_specifiers_7','c_parser.py',846), + ('storage_class_specifier -> AUTO','storage_class_specifier',1,'p_storage_class_specifier','c_parser.py',851), + ('storage_class_specifier -> REGISTER','storage_class_specifier',1,'p_storage_class_specifier','c_parser.py',852), + ('storage_class_specifier -> STATIC','storage_class_specifier',1,'p_storage_class_specifier','c_parser.py',853), + ('storage_class_specifier -> EXTERN','storage_class_specifier',1,'p_storage_class_specifier','c_parser.py',854), + ('storage_class_specifier -> TYPEDEF','storage_class_specifier',1,'p_storage_class_specifier','c_parser.py',855), + ('storage_class_specifier -> _THREAD_LOCAL','storage_class_specifier',1,'p_storage_class_specifier','c_parser.py',856), + ('function_specifier -> INLINE','function_specifier',1,'p_function_specifier','c_parser.py',861), + ('function_specifier -> _NORETURN','function_specifier',1,'p_function_specifier','c_parser.py',862), + ('type_specifier_no_typeid -> VOID','type_specifier_no_typeid',1,'p_type_specifier_no_typeid','c_parser.py',867), + ('type_specifier_no_typeid -> _BOOL','type_specifier_no_typeid',1,'p_type_specifier_no_typeid','c_parser.py',868), + ('type_specifier_no_typeid -> CHAR','type_specifier_no_typeid',1,'p_type_specifier_no_typeid','c_parser.py',869), + ('type_specifier_no_typeid -> SHORT','type_specifier_no_typeid',1,'p_type_specifier_no_typeid','c_parser.py',870), + ('type_specifier_no_typeid -> INT','type_specifier_no_typeid',1,'p_type_specifier_no_typeid','c_parser.py',871), + ('type_specifier_no_typeid -> LONG','type_specifier_no_typeid',1,'p_type_specifier_no_typeid','c_parser.py',872), + ('type_specifier_no_typeid -> FLOAT','type_specifier_no_typeid',1,'p_type_specifier_no_typeid','c_parser.py',873), + ('type_specifier_no_typeid -> DOUBLE','type_specifier_no_typeid',1,'p_type_specifier_no_typeid','c_parser.py',874), + ('type_specifier_no_typeid -> _COMPLEX','type_specifier_no_typeid',1,'p_type_specifier_no_typeid','c_parser.py',875), + ('type_specifier_no_typeid -> SIGNED','type_specifier_no_typeid',1,'p_type_specifier_no_typeid','c_parser.py',876), + ('type_specifier_no_typeid -> UNSIGNED','type_specifier_no_typeid',1,'p_type_specifier_no_typeid','c_parser.py',877), + ('type_specifier_no_typeid -> __INT128','type_specifier_no_typeid',1,'p_type_specifier_no_typeid','c_parser.py',878), + ('type_specifier -> typedef_name','type_specifier',1,'p_type_specifier','c_parser.py',883), + ('type_specifier -> enum_specifier','type_specifier',1,'p_type_specifier','c_parser.py',884), + ('type_specifier -> struct_or_union_specifier','type_specifier',1,'p_type_specifier','c_parser.py',885), + ('type_specifier -> type_specifier_no_typeid','type_specifier',1,'p_type_specifier','c_parser.py',886), + ('type_specifier -> atomic_specifier','type_specifier',1,'p_type_specifier','c_parser.py',887), + ('atomic_specifier -> _ATOMIC LPAREN type_name RPAREN','atomic_specifier',4,'p_atomic_specifier','c_parser.py',893), + ('type_qualifier -> CONST','type_qualifier',1,'p_type_qualifier','c_parser.py',900), + ('type_qualifier -> RESTRICT','type_qualifier',1,'p_type_qualifier','c_parser.py',901), + ('type_qualifier -> VOLATILE','type_qualifier',1,'p_type_qualifier','c_parser.py',902), + ('type_qualifier -> _ATOMIC','type_qualifier',1,'p_type_qualifier','c_parser.py',903), + ('init_declarator_list -> init_declarator','init_declarator_list',1,'p_init_declarator_list','c_parser.py',908), + ('init_declarator_list -> init_declarator_list COMMA init_declarator','init_declarator_list',3,'p_init_declarator_list','c_parser.py',909), + ('init_declarator -> declarator','init_declarator',1,'p_init_declarator','c_parser.py',917), + ('init_declarator -> declarator EQUALS initializer','init_declarator',3,'p_init_declarator','c_parser.py',918), + ('id_init_declarator_list -> id_init_declarator','id_init_declarator_list',1,'p_id_init_declarator_list','c_parser.py',923), + ('id_init_declarator_list -> id_init_declarator_list COMMA init_declarator','id_init_declarator_list',3,'p_id_init_declarator_list','c_parser.py',924), + ('id_init_declarator -> id_declarator','id_init_declarator',1,'p_id_init_declarator','c_parser.py',929), + ('id_init_declarator -> id_declarator EQUALS initializer','id_init_declarator',3,'p_id_init_declarator','c_parser.py',930), + ('specifier_qualifier_list -> specifier_qualifier_list type_specifier_no_typeid','specifier_qualifier_list',2,'p_specifier_qualifier_list_1','c_parser.py',937), + ('specifier_qualifier_list -> specifier_qualifier_list type_qualifier','specifier_qualifier_list',2,'p_specifier_qualifier_list_2','c_parser.py',942), + ('specifier_qualifier_list -> type_specifier','specifier_qualifier_list',1,'p_specifier_qualifier_list_3','c_parser.py',947), + ('specifier_qualifier_list -> type_qualifier_list type_specifier','specifier_qualifier_list',2,'p_specifier_qualifier_list_4','c_parser.py',952), + ('specifier_qualifier_list -> alignment_specifier','specifier_qualifier_list',1,'p_specifier_qualifier_list_5','c_parser.py',957), + ('specifier_qualifier_list -> specifier_qualifier_list alignment_specifier','specifier_qualifier_list',2,'p_specifier_qualifier_list_6','c_parser.py',962), + ('struct_or_union_specifier -> struct_or_union ID','struct_or_union_specifier',2,'p_struct_or_union_specifier_1','c_parser.py',970), + ('struct_or_union_specifier -> struct_or_union TYPEID','struct_or_union_specifier',2,'p_struct_or_union_specifier_1','c_parser.py',971), + ('struct_or_union_specifier -> struct_or_union brace_open struct_declaration_list brace_close','struct_or_union_specifier',4,'p_struct_or_union_specifier_2','c_parser.py',981), + ('struct_or_union_specifier -> struct_or_union brace_open brace_close','struct_or_union_specifier',3,'p_struct_or_union_specifier_2','c_parser.py',982), + ('struct_or_union_specifier -> struct_or_union ID brace_open struct_declaration_list brace_close','struct_or_union_specifier',5,'p_struct_or_union_specifier_3','c_parser.py',999), + ('struct_or_union_specifier -> struct_or_union ID brace_open brace_close','struct_or_union_specifier',4,'p_struct_or_union_specifier_3','c_parser.py',1000), + ('struct_or_union_specifier -> struct_or_union TYPEID brace_open struct_declaration_list brace_close','struct_or_union_specifier',5,'p_struct_or_union_specifier_3','c_parser.py',1001), + ('struct_or_union_specifier -> struct_or_union TYPEID brace_open brace_close','struct_or_union_specifier',4,'p_struct_or_union_specifier_3','c_parser.py',1002), + ('struct_or_union -> STRUCT','struct_or_union',1,'p_struct_or_union','c_parser.py',1018), + ('struct_or_union -> UNION','struct_or_union',1,'p_struct_or_union','c_parser.py',1019), + ('struct_declaration_list -> struct_declaration','struct_declaration_list',1,'p_struct_declaration_list','c_parser.py',1026), + ('struct_declaration_list -> struct_declaration_list struct_declaration','struct_declaration_list',2,'p_struct_declaration_list','c_parser.py',1027), + ('struct_declaration -> specifier_qualifier_list struct_declarator_list_opt SEMI','struct_declaration',3,'p_struct_declaration_1','c_parser.py',1035), + ('struct_declaration -> SEMI','struct_declaration',1,'p_struct_declaration_2','c_parser.py',1073), + ('struct_declaration -> pppragma_directive','struct_declaration',1,'p_struct_declaration_3','c_parser.py',1078), + ('struct_declarator_list -> struct_declarator','struct_declarator_list',1,'p_struct_declarator_list','c_parser.py',1083), + ('struct_declarator_list -> struct_declarator_list COMMA struct_declarator','struct_declarator_list',3,'p_struct_declarator_list','c_parser.py',1084), + ('struct_declarator -> declarator','struct_declarator',1,'p_struct_declarator_1','c_parser.py',1092), + ('struct_declarator -> declarator COLON constant_expression','struct_declarator',3,'p_struct_declarator_2','c_parser.py',1097), + ('struct_declarator -> COLON constant_expression','struct_declarator',2,'p_struct_declarator_2','c_parser.py',1098), + ('enum_specifier -> ENUM ID','enum_specifier',2,'p_enum_specifier_1','c_parser.py',1106), + ('enum_specifier -> ENUM TYPEID','enum_specifier',2,'p_enum_specifier_1','c_parser.py',1107), + ('enum_specifier -> ENUM brace_open enumerator_list brace_close','enum_specifier',4,'p_enum_specifier_2','c_parser.py',1112), + ('enum_specifier -> ENUM ID brace_open enumerator_list brace_close','enum_specifier',5,'p_enum_specifier_3','c_parser.py',1117), + ('enum_specifier -> ENUM TYPEID brace_open enumerator_list brace_close','enum_specifier',5,'p_enum_specifier_3','c_parser.py',1118), + ('enumerator_list -> enumerator','enumerator_list',1,'p_enumerator_list','c_parser.py',1123), + ('enumerator_list -> enumerator_list COMMA','enumerator_list',2,'p_enumerator_list','c_parser.py',1124), + ('enumerator_list -> enumerator_list COMMA enumerator','enumerator_list',3,'p_enumerator_list','c_parser.py',1125), + ('alignment_specifier -> _ALIGNAS LPAREN type_name RPAREN','alignment_specifier',4,'p_alignment_specifier','c_parser.py',1136), + ('alignment_specifier -> _ALIGNAS LPAREN constant_expression RPAREN','alignment_specifier',4,'p_alignment_specifier','c_parser.py',1137), + ('enumerator -> ID','enumerator',1,'p_enumerator','c_parser.py',1142), + ('enumerator -> ID EQUALS constant_expression','enumerator',3,'p_enumerator','c_parser.py',1143), + ('declarator -> id_declarator','declarator',1,'p_declarator','c_parser.py',1158), + ('declarator -> typeid_declarator','declarator',1,'p_declarator','c_parser.py',1159), + ('pointer -> TIMES type_qualifier_list_opt','pointer',2,'p_pointer','c_parser.py',1271), + ('pointer -> TIMES type_qualifier_list_opt pointer','pointer',3,'p_pointer','c_parser.py',1272), + ('type_qualifier_list -> type_qualifier','type_qualifier_list',1,'p_type_qualifier_list','c_parser.py',1301), + ('type_qualifier_list -> type_qualifier_list type_qualifier','type_qualifier_list',2,'p_type_qualifier_list','c_parser.py',1302), + ('parameter_type_list -> parameter_list','parameter_type_list',1,'p_parameter_type_list','c_parser.py',1307), + ('parameter_type_list -> parameter_list COMMA ELLIPSIS','parameter_type_list',3,'p_parameter_type_list','c_parser.py',1308), + ('parameter_list -> parameter_declaration','parameter_list',1,'p_parameter_list','c_parser.py',1316), + ('parameter_list -> parameter_list COMMA parameter_declaration','parameter_list',3,'p_parameter_list','c_parser.py',1317), + ('parameter_declaration -> declaration_specifiers id_declarator','parameter_declaration',2,'p_parameter_declaration_1','c_parser.py',1336), + ('parameter_declaration -> declaration_specifiers typeid_noparen_declarator','parameter_declaration',2,'p_parameter_declaration_1','c_parser.py',1337), + ('parameter_declaration -> declaration_specifiers abstract_declarator_opt','parameter_declaration',2,'p_parameter_declaration_2','c_parser.py',1348), + ('identifier_list -> identifier','identifier_list',1,'p_identifier_list','c_parser.py',1380), + ('identifier_list -> identifier_list COMMA identifier','identifier_list',3,'p_identifier_list','c_parser.py',1381), + ('initializer -> assignment_expression','initializer',1,'p_initializer_1','c_parser.py',1390), + ('initializer -> brace_open initializer_list_opt brace_close','initializer',3,'p_initializer_2','c_parser.py',1395), + ('initializer -> brace_open initializer_list COMMA brace_close','initializer',4,'p_initializer_2','c_parser.py',1396), + ('initializer_list -> designation_opt initializer','initializer_list',2,'p_initializer_list','c_parser.py',1404), + ('initializer_list -> initializer_list COMMA designation_opt initializer','initializer_list',4,'p_initializer_list','c_parser.py',1405), + ('designation -> designator_list EQUALS','designation',2,'p_designation','c_parser.py',1416), + ('designator_list -> designator','designator_list',1,'p_designator_list','c_parser.py',1424), + ('designator_list -> designator_list designator','designator_list',2,'p_designator_list','c_parser.py',1425), + ('designator -> LBRACKET constant_expression RBRACKET','designator',3,'p_designator','c_parser.py',1430), + ('designator -> PERIOD identifier','designator',2,'p_designator','c_parser.py',1431), + ('type_name -> specifier_qualifier_list abstract_declarator_opt','type_name',2,'p_type_name','c_parser.py',1436), + ('abstract_declarator -> pointer','abstract_declarator',1,'p_abstract_declarator_1','c_parser.py',1448), + ('abstract_declarator -> pointer direct_abstract_declarator','abstract_declarator',2,'p_abstract_declarator_2','c_parser.py',1456), + ('abstract_declarator -> direct_abstract_declarator','abstract_declarator',1,'p_abstract_declarator_3','c_parser.py',1461), + ('direct_abstract_declarator -> LPAREN abstract_declarator RPAREN','direct_abstract_declarator',3,'p_direct_abstract_declarator_1','c_parser.py',1471), + ('direct_abstract_declarator -> direct_abstract_declarator LBRACKET assignment_expression_opt RBRACKET','direct_abstract_declarator',4,'p_direct_abstract_declarator_2','c_parser.py',1475), + ('direct_abstract_declarator -> LBRACKET type_qualifier_list_opt assignment_expression_opt RBRACKET','direct_abstract_declarator',4,'p_direct_abstract_declarator_3','c_parser.py',1486), + ('direct_abstract_declarator -> direct_abstract_declarator LBRACKET TIMES RBRACKET','direct_abstract_declarator',4,'p_direct_abstract_declarator_4','c_parser.py',1496), + ('direct_abstract_declarator -> LBRACKET TIMES RBRACKET','direct_abstract_declarator',3,'p_direct_abstract_declarator_5','c_parser.py',1507), + ('direct_abstract_declarator -> direct_abstract_declarator LPAREN parameter_type_list_opt RPAREN','direct_abstract_declarator',4,'p_direct_abstract_declarator_6','c_parser.py',1516), + ('direct_abstract_declarator -> LPAREN parameter_type_list_opt RPAREN','direct_abstract_declarator',3,'p_direct_abstract_declarator_7','c_parser.py',1526), + ('direct_abstract_declarator -> LBRACKET STATIC type_qualifier_list_opt assignment_expression RBRACKET','direct_abstract_declarator',5,'p_direct_abstract_declarator_8','c_parser.py',1534), + ('direct_abstract_declarator -> LBRACKET type_qualifier_list STATIC assignment_expression RBRACKET','direct_abstract_declarator',5,'p_direct_abstract_declarator_8','c_parser.py',1535), + ('block_item -> declaration','block_item',1,'p_block_item','c_parser.py',1551), + ('block_item -> statement','block_item',1,'p_block_item','c_parser.py',1552), + ('block_item_list -> block_item','block_item_list',1,'p_block_item_list','c_parser.py',1559), + ('block_item_list -> block_item_list block_item','block_item_list',2,'p_block_item_list','c_parser.py',1560), + ('compound_statement -> brace_open block_item_list_opt brace_close','compound_statement',3,'p_compound_statement_1','c_parser.py',1566), + ('labeled_statement -> ID COLON pragmacomp_or_statement','labeled_statement',3,'p_labeled_statement_1','c_parser.py',1572), + ('labeled_statement -> CASE constant_expression COLON pragmacomp_or_statement','labeled_statement',4,'p_labeled_statement_2','c_parser.py',1576), + ('labeled_statement -> DEFAULT COLON pragmacomp_or_statement','labeled_statement',3,'p_labeled_statement_3','c_parser.py',1580), + ('labeled_statement -> ID COLON','labeled_statement',2,'p_labeled_statement_4','c_parser.py',1584), + ('labeled_statement -> CASE constant_expression COLON','labeled_statement',3,'p_labeled_statement_5','c_parser.py',1588), + ('labeled_statement -> DEFAULT COLON','labeled_statement',2,'p_labeled_statement_6','c_parser.py',1592), + ('selection_statement -> IF LPAREN expression RPAREN pragmacomp_or_statement','selection_statement',5,'p_selection_statement_1','c_parser.py',1596), + ('selection_statement -> IF LPAREN expression RPAREN statement ELSE pragmacomp_or_statement','selection_statement',7,'p_selection_statement_2','c_parser.py',1600), + ('selection_statement -> SWITCH LPAREN expression RPAREN pragmacomp_or_statement','selection_statement',5,'p_selection_statement_3','c_parser.py',1604), + ('iteration_statement -> WHILE LPAREN expression RPAREN pragmacomp_or_statement','iteration_statement',5,'p_iteration_statement_1','c_parser.py',1609), + ('iteration_statement -> DO pragmacomp_or_statement WHILE LPAREN expression RPAREN SEMI','iteration_statement',7,'p_iteration_statement_2','c_parser.py',1613), + ('iteration_statement -> FOR LPAREN expression_opt SEMI expression_opt SEMI expression_opt RPAREN pragmacomp_or_statement','iteration_statement',9,'p_iteration_statement_3','c_parser.py',1617), + ('iteration_statement -> FOR LPAREN declaration expression_opt SEMI expression_opt RPAREN pragmacomp_or_statement','iteration_statement',8,'p_iteration_statement_4','c_parser.py',1621), + ('jump_statement -> GOTO ID SEMI','jump_statement',3,'p_jump_statement_1','c_parser.py',1626), + ('jump_statement -> BREAK SEMI','jump_statement',2,'p_jump_statement_2','c_parser.py',1630), + ('jump_statement -> CONTINUE SEMI','jump_statement',2,'p_jump_statement_3','c_parser.py',1634), + ('jump_statement -> RETURN expression SEMI','jump_statement',3,'p_jump_statement_4','c_parser.py',1638), + ('jump_statement -> RETURN SEMI','jump_statement',2,'p_jump_statement_4','c_parser.py',1639), + ('expression_statement -> expression_opt SEMI','expression_statement',2,'p_expression_statement','c_parser.py',1644), + ('expression -> assignment_expression','expression',1,'p_expression','c_parser.py',1651), + ('expression -> expression COMMA assignment_expression','expression',3,'p_expression','c_parser.py',1652), + ('assignment_expression -> LPAREN compound_statement RPAREN','assignment_expression',3,'p_parenthesized_compound_expression','c_parser.py',1664), + ('typedef_name -> TYPEID','typedef_name',1,'p_typedef_name','c_parser.py',1668), + ('assignment_expression -> conditional_expression','assignment_expression',1,'p_assignment_expression','c_parser.py',1672), + ('assignment_expression -> unary_expression assignment_operator assignment_expression','assignment_expression',3,'p_assignment_expression','c_parser.py',1673), + ('assignment_operator -> EQUALS','assignment_operator',1,'p_assignment_operator','c_parser.py',1686), + ('assignment_operator -> XOREQUAL','assignment_operator',1,'p_assignment_operator','c_parser.py',1687), + ('assignment_operator -> TIMESEQUAL','assignment_operator',1,'p_assignment_operator','c_parser.py',1688), + ('assignment_operator -> DIVEQUAL','assignment_operator',1,'p_assignment_operator','c_parser.py',1689), + ('assignment_operator -> MODEQUAL','assignment_operator',1,'p_assignment_operator','c_parser.py',1690), + ('assignment_operator -> PLUSEQUAL','assignment_operator',1,'p_assignment_operator','c_parser.py',1691), + ('assignment_operator -> MINUSEQUAL','assignment_operator',1,'p_assignment_operator','c_parser.py',1692), + ('assignment_operator -> LSHIFTEQUAL','assignment_operator',1,'p_assignment_operator','c_parser.py',1693), + ('assignment_operator -> RSHIFTEQUAL','assignment_operator',1,'p_assignment_operator','c_parser.py',1694), + ('assignment_operator -> ANDEQUAL','assignment_operator',1,'p_assignment_operator','c_parser.py',1695), + ('assignment_operator -> OREQUAL','assignment_operator',1,'p_assignment_operator','c_parser.py',1696), + ('constant_expression -> conditional_expression','constant_expression',1,'p_constant_expression','c_parser.py',1701), + ('conditional_expression -> binary_expression','conditional_expression',1,'p_conditional_expression','c_parser.py',1705), + ('conditional_expression -> binary_expression CONDOP expression COLON conditional_expression','conditional_expression',5,'p_conditional_expression','c_parser.py',1706), + ('binary_expression -> cast_expression','binary_expression',1,'p_binary_expression','c_parser.py',1714), + ('binary_expression -> binary_expression TIMES binary_expression','binary_expression',3,'p_binary_expression','c_parser.py',1715), + ('binary_expression -> binary_expression DIVIDE binary_expression','binary_expression',3,'p_binary_expression','c_parser.py',1716), + ('binary_expression -> binary_expression MOD binary_expression','binary_expression',3,'p_binary_expression','c_parser.py',1717), + ('binary_expression -> binary_expression PLUS binary_expression','binary_expression',3,'p_binary_expression','c_parser.py',1718), + ('binary_expression -> binary_expression MINUS binary_expression','binary_expression',3,'p_binary_expression','c_parser.py',1719), + ('binary_expression -> binary_expression RSHIFT binary_expression','binary_expression',3,'p_binary_expression','c_parser.py',1720), + ('binary_expression -> binary_expression LSHIFT binary_expression','binary_expression',3,'p_binary_expression','c_parser.py',1721), + ('binary_expression -> binary_expression LT binary_expression','binary_expression',3,'p_binary_expression','c_parser.py',1722), + ('binary_expression -> binary_expression LE binary_expression','binary_expression',3,'p_binary_expression','c_parser.py',1723), + ('binary_expression -> binary_expression GE binary_expression','binary_expression',3,'p_binary_expression','c_parser.py',1724), + ('binary_expression -> binary_expression GT binary_expression','binary_expression',3,'p_binary_expression','c_parser.py',1725), + ('binary_expression -> binary_expression EQ binary_expression','binary_expression',3,'p_binary_expression','c_parser.py',1726), + ('binary_expression -> binary_expression NE binary_expression','binary_expression',3,'p_binary_expression','c_parser.py',1727), + ('binary_expression -> binary_expression AND binary_expression','binary_expression',3,'p_binary_expression','c_parser.py',1728), + ('binary_expression -> binary_expression OR binary_expression','binary_expression',3,'p_binary_expression','c_parser.py',1729), + ('binary_expression -> binary_expression XOR binary_expression','binary_expression',3,'p_binary_expression','c_parser.py',1730), + ('binary_expression -> binary_expression LAND binary_expression','binary_expression',3,'p_binary_expression','c_parser.py',1731), + ('binary_expression -> binary_expression LOR binary_expression','binary_expression',3,'p_binary_expression','c_parser.py',1732), + ('cast_expression -> unary_expression','cast_expression',1,'p_cast_expression_1','c_parser.py',1740), + ('cast_expression -> LPAREN type_name RPAREN cast_expression','cast_expression',4,'p_cast_expression_2','c_parser.py',1744), + ('unary_expression -> postfix_expression','unary_expression',1,'p_unary_expression_1','c_parser.py',1748), + ('unary_expression -> PLUSPLUS unary_expression','unary_expression',2,'p_unary_expression_2','c_parser.py',1752), + ('unary_expression -> MINUSMINUS unary_expression','unary_expression',2,'p_unary_expression_2','c_parser.py',1753), + ('unary_expression -> unary_operator cast_expression','unary_expression',2,'p_unary_expression_2','c_parser.py',1754), + ('unary_expression -> SIZEOF unary_expression','unary_expression',2,'p_unary_expression_3','c_parser.py',1759), + ('unary_expression -> SIZEOF LPAREN type_name RPAREN','unary_expression',4,'p_unary_expression_3','c_parser.py',1760), + ('unary_expression -> _ALIGNOF LPAREN type_name RPAREN','unary_expression',4,'p_unary_expression_3','c_parser.py',1761), + ('unary_operator -> AND','unary_operator',1,'p_unary_operator','c_parser.py',1769), + ('unary_operator -> TIMES','unary_operator',1,'p_unary_operator','c_parser.py',1770), + ('unary_operator -> PLUS','unary_operator',1,'p_unary_operator','c_parser.py',1771), + ('unary_operator -> MINUS','unary_operator',1,'p_unary_operator','c_parser.py',1772), + ('unary_operator -> NOT','unary_operator',1,'p_unary_operator','c_parser.py',1773), + ('unary_operator -> LNOT','unary_operator',1,'p_unary_operator','c_parser.py',1774), + ('postfix_expression -> primary_expression','postfix_expression',1,'p_postfix_expression_1','c_parser.py',1779), + ('postfix_expression -> postfix_expression LBRACKET expression RBRACKET','postfix_expression',4,'p_postfix_expression_2','c_parser.py',1783), + ('postfix_expression -> postfix_expression LPAREN argument_expression_list RPAREN','postfix_expression',4,'p_postfix_expression_3','c_parser.py',1787), + ('postfix_expression -> postfix_expression LPAREN RPAREN','postfix_expression',3,'p_postfix_expression_3','c_parser.py',1788), + ('postfix_expression -> postfix_expression PERIOD ID','postfix_expression',3,'p_postfix_expression_4','c_parser.py',1793), + ('postfix_expression -> postfix_expression PERIOD TYPEID','postfix_expression',3,'p_postfix_expression_4','c_parser.py',1794), + ('postfix_expression -> postfix_expression ARROW ID','postfix_expression',3,'p_postfix_expression_4','c_parser.py',1795), + ('postfix_expression -> postfix_expression ARROW TYPEID','postfix_expression',3,'p_postfix_expression_4','c_parser.py',1796), + ('postfix_expression -> postfix_expression PLUSPLUS','postfix_expression',2,'p_postfix_expression_5','c_parser.py',1802), + ('postfix_expression -> postfix_expression MINUSMINUS','postfix_expression',2,'p_postfix_expression_5','c_parser.py',1803), + ('postfix_expression -> LPAREN type_name RPAREN brace_open initializer_list brace_close','postfix_expression',6,'p_postfix_expression_6','c_parser.py',1808), + ('postfix_expression -> LPAREN type_name RPAREN brace_open initializer_list COMMA brace_close','postfix_expression',7,'p_postfix_expression_6','c_parser.py',1809), + ('primary_expression -> identifier','primary_expression',1,'p_primary_expression_1','c_parser.py',1814), + ('primary_expression -> constant','primary_expression',1,'p_primary_expression_2','c_parser.py',1818), + ('primary_expression -> unified_string_literal','primary_expression',1,'p_primary_expression_3','c_parser.py',1822), + ('primary_expression -> unified_wstring_literal','primary_expression',1,'p_primary_expression_3','c_parser.py',1823), + ('primary_expression -> LPAREN expression RPAREN','primary_expression',3,'p_primary_expression_4','c_parser.py',1828), + ('primary_expression -> OFFSETOF LPAREN type_name COMMA offsetof_member_designator RPAREN','primary_expression',6,'p_primary_expression_5','c_parser.py',1832), + ('offsetof_member_designator -> identifier','offsetof_member_designator',1,'p_offsetof_member_designator','c_parser.py',1840), + ('offsetof_member_designator -> offsetof_member_designator PERIOD identifier','offsetof_member_designator',3,'p_offsetof_member_designator','c_parser.py',1841), + ('offsetof_member_designator -> offsetof_member_designator LBRACKET expression RBRACKET','offsetof_member_designator',4,'p_offsetof_member_designator','c_parser.py',1842), + ('argument_expression_list -> assignment_expression','argument_expression_list',1,'p_argument_expression_list','c_parser.py',1854), + ('argument_expression_list -> argument_expression_list COMMA assignment_expression','argument_expression_list',3,'p_argument_expression_list','c_parser.py',1855), + ('identifier -> ID','identifier',1,'p_identifier','c_parser.py',1864), + ('constant -> INT_CONST_DEC','constant',1,'p_constant_1','c_parser.py',1868), + ('constant -> INT_CONST_OCT','constant',1,'p_constant_1','c_parser.py',1869), + ('constant -> INT_CONST_HEX','constant',1,'p_constant_1','c_parser.py',1870), + ('constant -> INT_CONST_BIN','constant',1,'p_constant_1','c_parser.py',1871), + ('constant -> INT_CONST_CHAR','constant',1,'p_constant_1','c_parser.py',1872), + ('constant -> FLOAT_CONST','constant',1,'p_constant_2','c_parser.py',1891), + ('constant -> HEX_FLOAT_CONST','constant',1,'p_constant_2','c_parser.py',1892), + ('constant -> CHAR_CONST','constant',1,'p_constant_3','c_parser.py',1905), + ('constant -> WCHAR_CONST','constant',1,'p_constant_3','c_parser.py',1906), + ('constant -> U8CHAR_CONST','constant',1,'p_constant_3','c_parser.py',1907), + ('constant -> U16CHAR_CONST','constant',1,'p_constant_3','c_parser.py',1908), + ('constant -> U32CHAR_CONST','constant',1,'p_constant_3','c_parser.py',1909), + ('unified_string_literal -> STRING_LITERAL','unified_string_literal',1,'p_unified_string_literal','c_parser.py',1920), + ('unified_string_literal -> unified_string_literal STRING_LITERAL','unified_string_literal',2,'p_unified_string_literal','c_parser.py',1921), + ('unified_wstring_literal -> WSTRING_LITERAL','unified_wstring_literal',1,'p_unified_wstring_literal','c_parser.py',1931), + ('unified_wstring_literal -> U8STRING_LITERAL','unified_wstring_literal',1,'p_unified_wstring_literal','c_parser.py',1932), + ('unified_wstring_literal -> U16STRING_LITERAL','unified_wstring_literal',1,'p_unified_wstring_literal','c_parser.py',1933), + ('unified_wstring_literal -> U32STRING_LITERAL','unified_wstring_literal',1,'p_unified_wstring_literal','c_parser.py',1934), + ('unified_wstring_literal -> unified_wstring_literal WSTRING_LITERAL','unified_wstring_literal',2,'p_unified_wstring_literal','c_parser.py',1935), + ('unified_wstring_literal -> unified_wstring_literal U8STRING_LITERAL','unified_wstring_literal',2,'p_unified_wstring_literal','c_parser.py',1936), + ('unified_wstring_literal -> unified_wstring_literal U16STRING_LITERAL','unified_wstring_literal',2,'p_unified_wstring_literal','c_parser.py',1937), + ('unified_wstring_literal -> unified_wstring_literal U32STRING_LITERAL','unified_wstring_literal',2,'p_unified_wstring_literal','c_parser.py',1938), + ('brace_open -> LBRACE','brace_open',1,'p_brace_open','c_parser.py',1948), + ('brace_close -> RBRACE','brace_close',1,'p_brace_close','c_parser.py',1954), + ('empty -> ','empty',0,'p_empty','c_parser.py',1960), +] diff --git a/python/timezonefinder-8.1.0.dist-info/INSTALLER b/python/timezonefinder-8.1.0.dist-info/INSTALLER new file mode 100644 index 000000000..a1b589e38 --- /dev/null +++ b/python/timezonefinder-8.1.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/python/timezonefinder-8.1.0.dist-info/METADATA b/python/timezonefinder-8.1.0.dist-info/METADATA new file mode 100644 index 000000000..684f6c1dc --- /dev/null +++ b/python/timezonefinder-8.1.0.dist-info/METADATA @@ -0,0 +1,155 @@ +Metadata-Version: 2.4 +Name: timezonefinder +Version: 8.1.0 +Summary: python package for finding the timezone of any point on earth (coordinates) offline +Author-email: jannikmi +License: The MIT License (MIT) + + Copyright (c) 2016 Jannik Michelfeit + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + +Project-URL: homepage, https://timezonefinder.michelfe.it/gui +Project-URL: repository, https://github.com/jannikmi/timezonefinder +Project-URL: documentation, https://timezonefinder.readthedocs.io/en/latest/ +Keywords: timezone,coordinates,latitude,longitude,location,offline,polygon +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Information Technology +Classifier: Natural Language :: English +Classifier: Operating System :: OS Independent +Classifier: Topic :: Software Development :: Localization +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Requires-Python: <4,>=3.9 +Description-Content-Type: text/x-rst +License-File: LICENSE +Requires-Dist: numpy<3,>=1.23; python_version >= "3.9" +Requires-Dist: h3>4 +Requires-Dist: cffi<3,>=1.15.1 +Requires-Dist: flatbuffers>=25.2.10 +Dynamic: license-file + +============== +timezonefinder +============== + + +.. + Note: can't include the badges file from the docs here, as it won't render on PyPI -> sync manually + +.. image:: https://github.com/jannikmi/timezonefinder/actions/workflows/build.yml/badge.svg?branch=master + :target: https://github.com/jannikmi/timezonefinder/actions?query=branch%3Amaster + +.. image:: https://readthedocs.org/projects/timezonefinder/badge/?version=latest + :alt: documentation status + :target: https://timezonefinder.readthedocs.io/en/latest/?badge=latest + +.. image:: https://img.shields.io/pypi/wheel/timezonefinder.svg + :target: https://pypi.python.org/pypi/timezonefinder + +.. image:: https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&logoColor=white + :target: https://github.com/pre-commit/pre-commit + :alt: pre-commit + +.. image:: https://pepy.tech/badge/timezonefinder + :alt: total PyPI downloads + :target: https://pepy.tech/project/timezonefinder + +.. image:: https://img.shields.io/pypi/v/timezonefinder.svg + :alt: latest version on PyPI + :target: https://pypi.python.org/pypi/timezonefinder + +.. image:: https://img.shields.io/conda/vn/conda-forge/timezonefinder.svg + :target: https://anaconda.org/conda-forge/timezonefinder + :alt: latest version on conda-forge + +.. image:: https://img.shields.io/badge/code%20style-black-000000.svg + :target: https://github.com/psf/black + + + + +This is a python package providing offline timezone lookups for WGS84 coordinates. +In comparison to other alternatives this package aims at maximum accuracy around timezone borders (no geometry simplifications) while offering fast lookup performance and compatibility with many (Python) runtime environments. +It combines preprocessed polygon data, H3-based spatial shortcuts, and optional acceleration via Numba or a clang-backed point-in-polygon routine. + + + +Notice: Looking for maintainers. Reach out if you want to contribute! +--------------------------------------------------------------------- + + +Quick Guide +----------- + +It is recommended to install it together with the optional `Numba `__ package for increased performance: + +.. code-block:: console + + pip install timezonefinder[numba] + + +.. code-block:: python + + from timezonefinder import timezone_at + + tz = timezone_at(lng=13.358, lat=52.5061) # 'Europe/Paris' + + + # For thread safety, increased performance and control, re-use an instance: + from timezonefinder import TimezoneFinder + + tf = TimezoneFinder(in_memory=True) # reuse + + query_points = [(13.358, 52.5061), ...] + for lng, lat in query_points: + tz = tf.timezone_at(lng=lng, lat=lat) # 'Europe/Paris' + + + +**Note:** This library uses the `"same since now dataset" `__ optimized for the primary use case of timezone determination for current/future time calculations. +For applications requiring historical accuracy or resolving to precise locations, consider parsing the original dataset (cf. `Documentation `__). + + +**Alternative:** Need maximum speed at the cost of accuracy? Check out `tzfpy `__ - a fast and lightweight alternative based on Rust. + + +References +---------- + +* `Documentation `__ +* `PyPI `__ +* `conda-forge feedstock `__ +* `download stats `__ +* `online GUI and API `__ +* `GUI repository `__ +* `ruby port `__ + + + +LICENSE +------- + +``timezonefinder`` is licensed under the `MIT license `__. + +The data is licensed under the `ODbL license `__, following the base dataset from `evansiroky/timezone-boundary-builder `__. diff --git a/python/timezonefinder-8.1.0.dist-info/RECORD b/python/timezonefinder-8.1.0.dist-info/RECORD new file mode 100644 index 000000000..a534477d8 --- /dev/null +++ b/python/timezonefinder-8.1.0.dist-info/RECORD @@ -0,0 +1,102 @@ +../../DATA_LICENSE,sha256=Bk282f3Q_982dCxAcYojWWvopRg4y0Ozl_RdODukJEA,26030 +../../bin/timezonefinder,sha256=LzZPHukd0cy0v9GRWcu1DhqH4ElczcG4rlizrhE5D3c,226 +timezonefinder-8.1.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +timezonefinder-8.1.0.dist-info/METADATA,sha256=8PtruMTy-MiLcwY9tpptMgXIFU1gXOjDxt3xguC8vnY,6793 +timezonefinder-8.1.0.dist-info/RECORD,, +timezonefinder-8.1.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +timezonefinder-8.1.0.dist-info/WHEEL,sha256=0kZLiaxmrUu7G20oRbftl3R2f8P95gwrN6kutRi5JC4,216 +timezonefinder-8.1.0.dist-info/entry_points.txt,sha256=QhAkOUbkoAMt7ETe2kT_kS_kEDCmPRfo42OKPpDnon4,68 +timezonefinder-8.1.0.dist-info/licenses/LICENSE,sha256=ln3_OlAWgGLeQAoFIbTwBbSBslJW3n0h89AAFkjXNeU,1084 +timezonefinder-8.1.0.dist-info/top_level.txt,sha256=krO_bybS5HU2wM_FuRywP9BnGFrc_QDhX-q6Tgm2BGk,15 +timezonefinder/__init__.py,sha256=hs0paFVzV65VK2-ug0ruY3WGq_GXFUz7GdYgvxHPGxs,585 +timezonefinder/__pycache__/__init__.cpython-312.pyc,, +timezonefinder/__pycache__/_numba_replacements.cpython-312.pyc,, +timezonefinder/__pycache__/build.cpython-312.pyc,, +timezonefinder/__pycache__/command_line.cpython-312.pyc,, +timezonefinder/__pycache__/configs.cpython-312.pyc,, +timezonefinder/__pycache__/coord_accessors.cpython-312.pyc,, +timezonefinder/__pycache__/global_functions.cpython-312.pyc,, +timezonefinder/__pycache__/np_binary_helpers.cpython-312.pyc,, +timezonefinder/__pycache__/polygon_array.cpython-312.pyc,, +timezonefinder/__pycache__/timezonefinder.cpython-312.pyc,, +timezonefinder/__pycache__/utils.cpython-312.pyc,, +timezonefinder/__pycache__/utils_clang.cpython-312.pyc,, +timezonefinder/__pycache__/utils_numba.cpython-312.pyc,, +timezonefinder/__pycache__/zone_names.cpython-312.pyc,, +timezonefinder/_numba_replacements.py,sha256=YvgLh4ctc63jzR1MZy_7iytvqMykanqv4Ge1YBQzi34,765 +timezonefinder/build.py,sha256=HdAIQckUKMpN0VcSQ_WU9msbC_PlulHcVDu54isoWfI,1449 +timezonefinder/command_line.py,sha256=KQunLW6_JDy_S0iCCZo9T0FgSyZsUenrWZ6qFH98Qbg,3921 +timezonefinder/configs.py,sha256=iMYcaGIywLJ9CxMiYZHpzoHulEZZQIxPQcAbXiV2jIY,2741 +timezonefinder/coord_accessors.py,sha256=DMnDOpFldcb6VHeq15FVURx9yoYOoyNvRq3qrFu0FW0,5013 +timezonefinder/data/boundaries/coordinates.fbs,sha256=BtJZyoW7XI0PsBIrHXy6U1W7tiE8GDLBkReMA8Gx9N8,30713136 +timezonefinder/data/boundaries/xmax.npy,sha256=wg7UADLQ7TZbBeWL_NRkruJT2v3yclDKcT8iNlHtDds,3552 +timezonefinder/data/boundaries/xmin.npy,sha256=1h4yJuOGaqTdDwZyviKNaJSbCbdJOR5107TBZlMCsCs,3552 +timezonefinder/data/boundaries/ymax.npy,sha256=pH2PRvEbJAv1YMSdPzTBlNuhTG7-kK8EXFKAkM5N0Xg,3552 +timezonefinder/data/boundaries/ymin.npy,sha256=TJrPR4hN8RYtAzVTW_gZuYI-bwqWVeWmws1I-GzUHvQ,3552 +timezonefinder/data/hole_registry.json,sha256=qREqxQvBX3rlHrhxCV7A2g1QIBTtLm0brQi95FOsXCg,2312 +timezonefinder/data/holes/coordinates.fbs,sha256=Qg3bfwFrZD17VVYdy7KkBNBgZUiUsDh2tMVFh2Xoc74,1806792 +timezonefinder/data/holes/xmax.npy,sha256=-Z39zQ4Nvvd-H8-cAgWszXKQPb1oE4pZbmOWQxMUK1M,2444 +timezonefinder/data/holes/xmin.npy,sha256=6gje3Qhx3lLXPTLcxqf9RqnObMGegeKvSOFkmy87UEg,2444 +timezonefinder/data/holes/ymax.npy,sha256=PajdQIue5mojJnXy_VaC_S16fRXbGVKNje_GqtfttpI,2444 +timezonefinder/data/holes/ymin.npy,sha256=g6JLqRYbElOpyC2GQR3X5rNq4yu0p_KA0cR_0IxrfaU,2444 +timezonefinder/data/hybrid_shortcuts_uint8.fbs,sha256=rI2S9TYWBiPZVVyx1rT6lgbNApDumafg9owxAWcOyLQ,1554008 +timezonefinder/data/timezone_names.txt,sha256=n5w4KieBRPuToV2gi8M-08PcjYDYqlKIVPFD6XJenj4,1279 +timezonefinder/data/zone_ids.npy,sha256=XRRqPDioAPjTfC6dbncPNhPUeiLKMGORfyx5AU7L5ig,984 +timezonefinder/data/zone_positions.npy,sha256=rcVpbGKKbB6R3P6XUfBJB3b9OHA13MjsRb9ITgxfBgI,314 +timezonefinder/flatbuf/__init__.py,sha256=v18eVdl8fOtYQozzn-Kbye-euXb48KV7fWgMHYxZzfg,155 +timezonefinder/flatbuf/__pycache__/__init__.cpython-312.pyc,, +timezonefinder/flatbuf/generated/__init__.py,sha256=9lI7qD9r-gj1_kqbL4G8ZGd-NRxdJjSuOTjyBjPOYrw,138 +timezonefinder/flatbuf/generated/__pycache__/__init__.cpython-312.pyc,, +timezonefinder/flatbuf/generated/polygons/Polygon.py,sha256=x0Ic9QqUGff-z3uLTI9mTLJ1GQdvfLzxUr6dkGiUM7A,2296 +timezonefinder/flatbuf/generated/polygons/PolygonCollection.py,sha256=Y5QwB61-YEcbsr-Zzefmx3qf2lDrWajwP4sPPASoqik,2337 +timezonefinder/flatbuf/generated/polygons/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +timezonefinder/flatbuf/generated/polygons/__pycache__/Polygon.cpython-312.pyc,, +timezonefinder/flatbuf/generated/polygons/__pycache__/PolygonCollection.cpython-312.pyc,, +timezonefinder/flatbuf/generated/polygons/__pycache__/__init__.cpython-312.pyc,, +timezonefinder/flatbuf/generated/shortcuts_uint16/HybridShortcutCollection.py,sha256=H826KyeyYhfQcq6uGmxe2Q3N9eQfT68s9SDPoOa6H2o,2514 +timezonefinder/flatbuf/generated/shortcuts_uint16/HybridShortcutEntry.py,sha256=awxaHtwBS_KSrAHyYrveCS2hsV5p9ARv5OPOVt8cBPA,2491 +timezonefinder/flatbuf/generated/shortcuts_uint16/PolygonList.py,sha256=ASIU_uk-j5K73hcPyrzMYX-lzd5mDM8vLIHd_ZEE54k,2384 +timezonefinder/flatbuf/generated/shortcuts_uint16/ShortcutValue.py,sha256=6xPEwaIlUHNDXabRuYQ6IfWqouei8M_jlfvHPQSPlwE,175 +timezonefinder/flatbuf/generated/shortcuts_uint16/UniqueZone.py,sha256=tSgF3cUsujS9JfEC2ggbsDA_z4YsyheKZiIsxyYGGAg,1381 +timezonefinder/flatbuf/generated/shortcuts_uint16/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +timezonefinder/flatbuf/generated/shortcuts_uint16/__pycache__/HybridShortcutCollection.cpython-312.pyc,, +timezonefinder/flatbuf/generated/shortcuts_uint16/__pycache__/HybridShortcutEntry.cpython-312.pyc,, +timezonefinder/flatbuf/generated/shortcuts_uint16/__pycache__/PolygonList.cpython-312.pyc,, +timezonefinder/flatbuf/generated/shortcuts_uint16/__pycache__/ShortcutValue.cpython-312.pyc,, +timezonefinder/flatbuf/generated/shortcuts_uint16/__pycache__/UniqueZone.cpython-312.pyc,, +timezonefinder/flatbuf/generated/shortcuts_uint16/__pycache__/__init__.cpython-312.pyc,, +timezonefinder/flatbuf/generated/shortcuts_uint8/HybridShortcutCollection.py,sha256=fLnoMY4Ha3AICf89H3lYzeSRtrvlIWtMrL14CcenVLE,2512 +timezonefinder/flatbuf/generated/shortcuts_uint8/HybridShortcutEntry.py,sha256=mQc4OAxzGLj6DRLYGmmHlX01kRK3sWYJ2kKlOI6B5sE,2490 +timezonefinder/flatbuf/generated/shortcuts_uint8/PolygonList.py,sha256=rqVIXk_q9ZwjRqN6isIYMqPtop_NErNqsMjH39PhPcQ,2383 +timezonefinder/flatbuf/generated/shortcuts_uint8/ShortcutValue.py,sha256=GC3uYlLdoPJNyo2hsNKAp23WXcJh4jyE0aiUz65W5bM,174 +timezonefinder/flatbuf/generated/shortcuts_uint8/UniqueZone.py,sha256=Wo7AZAY1RZINTjHgIguBCnB5ePElchP47Rn6G6Btgk4,1348 +timezonefinder/flatbuf/generated/shortcuts_uint8/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +timezonefinder/flatbuf/generated/shortcuts_uint8/__pycache__/HybridShortcutCollection.cpython-312.pyc,, +timezonefinder/flatbuf/generated/shortcuts_uint8/__pycache__/HybridShortcutEntry.cpython-312.pyc,, +timezonefinder/flatbuf/generated/shortcuts_uint8/__pycache__/PolygonList.cpython-312.pyc,, +timezonefinder/flatbuf/generated/shortcuts_uint8/__pycache__/ShortcutValue.cpython-312.pyc,, +timezonefinder/flatbuf/generated/shortcuts_uint8/__pycache__/UniqueZone.cpython-312.pyc,, +timezonefinder/flatbuf/generated/shortcuts_uint8/__pycache__/__init__.cpython-312.pyc,, +timezonefinder/flatbuf/io/__init__.py,sha256=PLjAdPX06nymnBvUMzm3ElZ1QxZ9I8G3ibkJKlYt0To,749 +timezonefinder/flatbuf/io/__pycache__/__init__.cpython-312.pyc,, +timezonefinder/flatbuf/io/__pycache__/hybrid_shortcuts.cpython-312.pyc,, +timezonefinder/flatbuf/io/__pycache__/polygons.cpython-312.pyc,, +timezonefinder/flatbuf/io/hybrid_shortcuts.py,sha256=zMRufFW6VWcbcbi9OKFPNytzQZwm4NEdBk2ruJvZyCk,14784 +timezonefinder/flatbuf/io/polygons.py,sha256=iCguQMgMzQOa7U0y3LdsvlfJRgi9vC8yrTSWuhwEM78,4431 +timezonefinder/flatbuf/schemas/__init__.py,sha256=0jSJXwyzAwqV8QwBBt7FGSsiDVZHEBMX7ajxOwGK0LI,148 +timezonefinder/flatbuf/schemas/__pycache__/__init__.cpython-312.pyc,, +timezonefinder/flatbuf/schemas/hybrid_shortcuts_uint16.fbs,sha256=KJbPH7prRy_JVGJJGaZvPEHfW-eJTj9LzgwGkajU3r4,1052 +timezonefinder/flatbuf/schemas/hybrid_shortcuts_uint8.fbs,sha256=W7VzrN1Xvp5FmRF4j_LET7aWS7QcXkjtxng6ier3r5Q,1045 +timezonefinder/flatbuf/schemas/polygons.fbs,sha256=fWaL-cp87NxzBxxRXbMzrSwPQMuT5KC8a-9YlNwecUI,229 +timezonefinder/global_functions.py,sha256=l7POnlhnOgqhGbnhG630CKeqpbDHT-mD1wotQ-HZKrk,5012 +timezonefinder/inside_poly_extension/inside_polygon_int.c,sha256=mwCgNjFEB6ghQr77dG0xWy8eYl8JLuYTvfS7hpYGNQ0,2363 +timezonefinder/inside_poly_extension/inside_polygon_int.h,sha256=iaqWb9jbl3V_FkvWKp3BenUq9QCWPjZkXdPVjWbe5J4,132 +timezonefinder/inside_polygon_ext.abi3.so,sha256=xQm0DQGx51CB0E1Xl1O7VXz1LiFV0amVBMv0YoZmWNs,32384 +timezonefinder/np_binary_helpers.py,sha256=Ie3_2LC_RgqJ7LdSGLsERmGTAVjCFT17uDDB6nBFBUY,1424 +timezonefinder/polygon_array.py,sha256=GjywPG7ztrEcJDxFeKr1bjY8yrhHY4mKQZNBysHG2_w,4551 +timezonefinder/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +timezonefinder/timezonefinder.py,sha256=D_5W4fNH-WMIfTfC7PEa2rSWPHZMo5quRhm_4t5nl_s,24483 +timezonefinder/utils.py,sha256=-_KRUu8uvgs5hQMcVauHQXxgVLKTR52YF2QIMo5252U,2472 +timezonefinder/utils_clang.py,sha256=uCt6m37OuRG505ZbHTmKnzfyUHsZEFjlZ2HXfxzNwjE,1404 +timezonefinder/utils_numba.py,sha256=Hvgq1ozcIYSY4c0O8er_CbcjXbYtWjjEOk-yexKo8FQ,6922 +timezonefinder/zone_names.py,sha256=_-OV_tUs3Wy8E-qQMova-kMHLMrF_SUUzZc5cilLz7U,1174 diff --git a/python/timezonefinder-8.1.0.dist-info/REQUESTED b/python/timezonefinder-8.1.0.dist-info/REQUESTED new file mode 100644 index 000000000..e69de29bb diff --git a/python/timezonefinder-8.1.0.dist-info/WHEEL b/python/timezonefinder-8.1.0.dist-info/WHEEL new file mode 100644 index 000000000..9efa0ed62 --- /dev/null +++ b/python/timezonefinder-8.1.0.dist-info/WHEEL @@ -0,0 +1,8 @@ +Wheel-Version: 1.0 +Generator: setuptools (80.9.0) +Root-Is-Purelib: false +Tag: cp39-abi3-manylinux_2_5_x86_64 +Tag: cp39-abi3-manylinux1_x86_64 +Tag: cp39-abi3-manylinux_2_17_x86_64 +Tag: cp39-abi3-manylinux2014_x86_64 + diff --git a/python/timezonefinder-8.1.0.dist-info/entry_points.txt b/python/timezonefinder-8.1.0.dist-info/entry_points.txt new file mode 100644 index 000000000..e770964d3 --- /dev/null +++ b/python/timezonefinder-8.1.0.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[console_scripts] +timezonefinder = timezonefinder.command_line:main diff --git a/python/timezonefinder-8.1.0.dist-info/licenses/LICENSE b/python/timezonefinder-8.1.0.dist-info/licenses/LICENSE new file mode 100644 index 000000000..296ba37dd --- /dev/null +++ b/python/timezonefinder-8.1.0.dist-info/licenses/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Jannik Michelfeit + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/python/timezonefinder-8.1.0.dist-info/top_level.txt b/python/timezonefinder-8.1.0.dist-info/top_level.txt new file mode 100644 index 000000000..4c2251e44 --- /dev/null +++ b/python/timezonefinder-8.1.0.dist-info/top_level.txt @@ -0,0 +1 @@ +timezonefinder diff --git a/python/timezonefinder/__init__.py b/python/timezonefinder/__init__.py new file mode 100644 index 000000000..637c9c6c5 --- /dev/null +++ b/python/timezonefinder/__init__.py @@ -0,0 +1,25 @@ +from timezonefinder.timezonefinder import ( + TimezoneFinder, + TimezoneFinderL, +) + +# Import module-level functions +from timezonefinder.global_functions import ( + timezone_at, + timezone_at_land, + unique_timezone_at, + certain_timezone_at, + get_geometry, +) + +# https://docs.python.org/3/tutorial/modules.html#importing-from-a-package +# determines which objects will be imported with "import *" +__all__ = ( + "TimezoneFinder", + "TimezoneFinderL", + "timezone_at", + "timezone_at_land", + "unique_timezone_at", + "certain_timezone_at", + "get_geometry", +) diff --git a/python/timezonefinder/__pycache__/__init__.cpython-312.pyc b/python/timezonefinder/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..9aacdb825 Binary files /dev/null and b/python/timezonefinder/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/timezonefinder/__pycache__/_numba_replacements.cpython-312.pyc b/python/timezonefinder/__pycache__/_numba_replacements.cpython-312.pyc new file mode 100644 index 000000000..599975b3b Binary files /dev/null and b/python/timezonefinder/__pycache__/_numba_replacements.cpython-312.pyc differ diff --git a/python/timezonefinder/__pycache__/build.cpython-312.pyc b/python/timezonefinder/__pycache__/build.cpython-312.pyc new file mode 100644 index 000000000..3e3b7fdf4 Binary files /dev/null and b/python/timezonefinder/__pycache__/build.cpython-312.pyc differ diff --git a/python/timezonefinder/__pycache__/command_line.cpython-312.pyc b/python/timezonefinder/__pycache__/command_line.cpython-312.pyc new file mode 100644 index 000000000..002a78035 Binary files /dev/null and b/python/timezonefinder/__pycache__/command_line.cpython-312.pyc differ diff --git a/python/timezonefinder/__pycache__/configs.cpython-312.pyc b/python/timezonefinder/__pycache__/configs.cpython-312.pyc new file mode 100644 index 000000000..54a18243c Binary files /dev/null and b/python/timezonefinder/__pycache__/configs.cpython-312.pyc differ diff --git a/python/timezonefinder/__pycache__/coord_accessors.cpython-312.pyc b/python/timezonefinder/__pycache__/coord_accessors.cpython-312.pyc new file mode 100644 index 000000000..d8119d854 Binary files /dev/null and b/python/timezonefinder/__pycache__/coord_accessors.cpython-312.pyc differ diff --git a/python/timezonefinder/__pycache__/global_functions.cpython-312.pyc b/python/timezonefinder/__pycache__/global_functions.cpython-312.pyc new file mode 100644 index 000000000..aa0837637 Binary files /dev/null and b/python/timezonefinder/__pycache__/global_functions.cpython-312.pyc differ diff --git a/python/timezonefinder/__pycache__/np_binary_helpers.cpython-312.pyc b/python/timezonefinder/__pycache__/np_binary_helpers.cpython-312.pyc new file mode 100644 index 000000000..6b66d6bcc Binary files /dev/null and b/python/timezonefinder/__pycache__/np_binary_helpers.cpython-312.pyc differ diff --git a/python/timezonefinder/__pycache__/polygon_array.cpython-312.pyc b/python/timezonefinder/__pycache__/polygon_array.cpython-312.pyc new file mode 100644 index 000000000..77ee7d238 Binary files /dev/null and b/python/timezonefinder/__pycache__/polygon_array.cpython-312.pyc differ diff --git a/python/timezonefinder/__pycache__/timezonefinder.cpython-312.pyc b/python/timezonefinder/__pycache__/timezonefinder.cpython-312.pyc new file mode 100644 index 000000000..f57edf8a7 Binary files /dev/null and b/python/timezonefinder/__pycache__/timezonefinder.cpython-312.pyc differ diff --git a/python/timezonefinder/__pycache__/utils.cpython-312.pyc b/python/timezonefinder/__pycache__/utils.cpython-312.pyc new file mode 100644 index 000000000..6675f1566 Binary files /dev/null and b/python/timezonefinder/__pycache__/utils.cpython-312.pyc differ diff --git a/python/timezonefinder/__pycache__/utils_clang.cpython-312.pyc b/python/timezonefinder/__pycache__/utils_clang.cpython-312.pyc new file mode 100644 index 000000000..5c1d5fc0c Binary files /dev/null and b/python/timezonefinder/__pycache__/utils_clang.cpython-312.pyc differ diff --git a/python/timezonefinder/__pycache__/utils_numba.cpython-312.pyc b/python/timezonefinder/__pycache__/utils_numba.cpython-312.pyc new file mode 100644 index 000000000..5421f52b9 Binary files /dev/null and b/python/timezonefinder/__pycache__/utils_numba.cpython-312.pyc differ diff --git a/python/timezonefinder/__pycache__/zone_names.cpython-312.pyc b/python/timezonefinder/__pycache__/zone_names.cpython-312.pyc new file mode 100644 index 000000000..571535e5e Binary files /dev/null and b/python/timezonefinder/__pycache__/zone_names.cpython-312.pyc differ diff --git a/python/timezonefinder/_numba_replacements.py b/python/timezonefinder/_numba_replacements.py new file mode 100644 index 000000000..ee42c8c12 --- /dev/null +++ b/python/timezonefinder/_numba_replacements.py @@ -0,0 +1,52 @@ +"""'transparent' numba functionality replacements + +njit decorator +data types + +dtype_2int_tuple = typeof((1, 1)) +@njit(b1(i4, i4, i4[:, :]), cache=True) +@njit(dtype_2int_tuple(f8, f8), cache=True) +""" + + +# decorator +def njit(*args, **kwargs): + def wrapper(f): + return f + + return wrapper + + +class SubscriptAndCallable: + def __init__(self, *args, **kwargs): + pass + + def __class_getitem__(cls, item): + return None + + def __call__(self, arg): + # for example int64(1) must work + return arg + + +# DTYPES + + +class f8(SubscriptAndCallable): + pass + + +class i8(SubscriptAndCallable): + pass + + +class i4(SubscriptAndCallable): + pass + + +class boolean(SubscriptAndCallable): + pass + + +class Array(SubscriptAndCallable): + pass diff --git a/python/timezonefinder/build.py b/python/timezonefinder/build.py new file mode 100644 index 000000000..756ea29b3 --- /dev/null +++ b/python/timezonefinder/build.py @@ -0,0 +1,49 @@ +"""optionally builds inside polygon algorithm C extension + +Resources: +https://github.com/FirefoxMetzger/mini-extension +https://stackoverflow.com/questions/60073711/how-to-build-c-extensions-via-poetry +https://github.com/libmbd/libmbd/blob/master/build.py +""" + +import pathlib +import re +from typing import Optional +import warnings + +import cffi + +EXTENSION_NAME = "inside_polygon_ext" +H_FILE_NAME = "inside_polygon_int.h" +C_FILE_NAME = "inside_polygon_int.c" +EXTENSION_PATH = pathlib.Path("timezonefinder") / "inside_poly_extension" +h_file_path = EXTENSION_PATH / H_FILE_NAME +c_file_path = EXTENSION_PATH / C_FILE_NAME + +ffibuilder: Optional[cffi.FFI] = None +try: + ffibuilder = cffi.FFI() +except Exception as exc: + # Clang extension should be fully optional + warnings.warn( + f"C lang extension cannot be build, since cffi failed with this error: {exc}" + ) + +if ffibuilder is not None: + ffibuilder.set_source( + "timezonefinder." + EXTENSION_NAME, + source='#include "inside_polygon_int.h"', + sources=[str(c_file_path)], + include_dirs=[str(EXTENSION_PATH)], + ) + with open(h_file_path) as h_file: + # cffi does not like our preprocessor directives, so we remove them + lns = h_file.read().splitlines() + flt = filter(lambda ln: not re.match(r" *#", ln), lns) + + ffibuilder.cdef("\n".join(flt)) + + +if __name__ == "__main__": + if ffibuilder: + ffibuilder.compile(verbose=True) diff --git a/python/timezonefinder/command_line.py b/python/timezonefinder/command_line.py new file mode 100644 index 000000000..e7fe1ba01 --- /dev/null +++ b/python/timezonefinder/command_line.py @@ -0,0 +1,122 @@ +import argparse +import contextlib +import os +import sys +import tempfile +from typing import Callable, Generator + +from timezonefinder import ( + TimezoneFinderL, + timezone_at, + certain_timezone_at, + timezone_at_land, +) + + +@contextlib.contextmanager +def redirect_stdout_to_temp_file() -> Generator[str, None, None]: + """ + Context manager that redirects stdout to a temporary file for the duration of the context. + The temporary file is created but not deleted when the context exits. + Returns the path to the temporary file. + """ + # Save the original stdout + original_stdout = sys.stdout + + # Create a temporary file that will NOT be automatically deleted + temp_fd, temp_path = tempfile.mkstemp(text=True) + temp_file = os.fdopen(temp_fd, "w") + + try: + # Redirect stdout to the temporary file + sys.stdout = temp_file + yield temp_path + finally: + # Restore the original stdout and close the file + sys.stdout = original_stdout + temp_file.close() + + +def get_timezone_function(function_id: int) -> Callable: + """ + Get the appropriate timezone function based on the function ID. + Uses global functions when available, otherwise creates instances as needed. + """ + # Use global functions for TimezoneFinder methods + if function_id == 0: + return timezone_at + elif function_id == 1: + return certain_timezone_at + elif function_id == 5: + return timezone_at_land + + # For TimezoneFinderL methods, still create an instance + tf_instance = TimezoneFinderL() + functions = { + 3: tf_instance.timezone_at, + 4: tf_instance.timezone_at_land, + } + return functions[function_id] + + +def main() -> None: + parser = argparse.ArgumentParser(description="parse TimezoneFinder parameters") + parser.add_argument("lng", type=float, help="longitude to be queried") + parser.add_argument("lat", type=float, help="latitude to be queried") + parser.add_argument("-v", action="store_true", help="verbosity flag") + parser.add_argument( + "-f", + "--function", + type=int, + choices=[0, 1, 3, 4, 5], + default=0, + help="function to be called:" + "0: TimezoneFinder.timezone_at(), " + "1: TimezoneFinder.certain_timezone_at(), " + "2: removed, " + "3: TimezoneFinderL.timezone_at(), " + "4: TimezoneFinderL.timezone_at_land(), " + "5: TimezoneFinder.timezone_at_land(), ", + ) + parsed_args = parser.parse_args() # takes input from sys.argv + timezone_function = get_timezone_function(parsed_args.function) + + verbose_mode = parsed_args.v + + # Always redirect stdout to a temp file + with redirect_stdout_to_temp_file() as temp_file_path: + print("\n" + "=" * 60) + print("TIMEZONEFINDER LOOKUP DETAILS") + print("-" * 60) + print(f"Coordinates: {parsed_args.lat:.6f}°, {parsed_args.lng:.6f}° (lat, lng)") + print( + f"Function {timezone_function.__name__} (function ID: {parsed_args.function})" + ) + + # Execute the timezone function + tz = timezone_function(lng=parsed_args.lng, lat=parsed_args.lat) + + if tz: + print(f"Result: Found timezone '{tz}'") + else: + print("Result: No timezone found at this location") + print("=" * 60) + + if verbose_mode: + # In verbose mode, print the contents of the temp file + try: + with open(temp_file_path) as f: + captured_output = f.read().strip() + if captured_output: + print(captured_output) + except Exception as e: + print(f"Warning: Could not read captured output: {e}") + else: + # In non-verbose mode, just print the result + print(tz if tz else "") + + # Always clean up the temp file + try: + os.remove(temp_file_path) + except Exception: + pass diff --git a/python/timezonefinder/configs.py b/python/timezonefinder/configs.py new file mode 100644 index 000000000..7a8f31d5a --- /dev/null +++ b/python/timezonefinder/configs.py @@ -0,0 +1,89 @@ +import os +from pathlib import Path +from typing import Any, Dict, List, Tuple, Union + +import numpy as np + +# SHORTCUT SETTINGS +# h3 library +SHORTCUT_H3_RES: int = 3 + +OCEAN_TIMEZONE_PREFIX = r"Etc/GMT" + +# PATHS +PACKAGE_DIR = Path(__file__).parent +DEFAULT_DATA_DIR = PACKAGE_DIR / "data" + + +# i = signed 4byte integer +NR_BYTES_I = 4 +# IMPORTANT: all values between -180 and 180 degree must fit into the domain of i4! +# is the same as testing if 360 fits into the domain of I4 (unsigned!) +MAX_ALLOWED_COORD_VAL = 2 ** (8 * NR_BYTES_I - 1) + +# from math import floor,log10 +# DECIMAL_PLACES_SHIFT = floor(log10(MAX_ALLOWED_COORD_VAL/180.0)) # == 7 +DECIMAL_PLACES_SHIFT = 7 +INT2COORD_FACTOR = 10 ** (-DECIMAL_PLACES_SHIFT) +COORD2INT_FACTOR = 10**DECIMAL_PLACES_SHIFT +MAX_LNG_VAL = 180.0 +MAX_LAT_VAL = 90.0 +MAX_LNG_VAL_INT = int(MAX_LNG_VAL * COORD2INT_FACTOR) +MAX_LAT_VAL_INT = int(MAX_LAT_VAL * COORD2INT_FACTOR) +MAX_INT_VAL = MAX_LNG_VAL_INT +assert MAX_INT_VAL < MAX_ALLOWED_COORD_VAL + +# TYPES +# used in Numba JIT compiled function signatures in utils_numba.py +# NOTE: Changes in the global settings might not immediately affect +# the functions due to caching! + +# Type alias for flexibility with integer types (pure int or numpy integer scalars) +IntegerLike = Union[int, np.integer] + +# hexagon id to list of polygon ids +ShortcutMapping = Dict[int, np.ndarray] +CoordPairs = List[Tuple[float, float]] +CoordLists = List[List[float]] +IntLists = List[List[int]] + + +# zone id storage settings --------------------------------------------------- + +_ZONE_ID_DTYPE_ALIASES: Dict[str, "np.dtype[Any]"] = { + "uint8": np.dtype(" str: + """Normalise user provided dtype keys to canonical form.""" + return key.lower().strip() + + +def get_zone_id_dtype(name: str) -> "np.dtype[Any]": + """Return the configured numpy dtype for storing zone IDs.""" + + try: + return _ZONE_ID_DTYPE_ALIASES[_normalise_zone_id_dtype_key(name)] + except KeyError as exc: # pragma: no cover - defensive, validated on import + valid = ", ".join(sorted(_ZONE_ID_DTYPE_ALIASES)) + raise ValueError( + f"Unsupported zone id dtype '{name}'. Choose one of: {valid}" + ) from exc + + +def zone_id_dtype_to_string(dtype: np.dtype) -> str: + """Return the little-endian numpy dtype string for serialisation.""" + + return dtype.newbyteorder("<").str + + +def available_zone_id_dtype_names() -> Tuple[str, ...]: + """Return the supported zone id dtype names.""" + + return tuple(sorted(_ZONE_ID_DTYPE_ALIASES)) + + +DEFAULT_ZONE_ID_DTYPE_NAME = os.getenv("TIMEZONEFINDER_ZONE_ID_DTYPE", "uint8") +DEFAULT_ZONE_ID_DTYPE = get_zone_id_dtype(DEFAULT_ZONE_ID_DTYPE_NAME) diff --git a/python/timezonefinder/coord_accessors.py b/python/timezonefinder/coord_accessors.py new file mode 100644 index 000000000..a402d5381 --- /dev/null +++ b/python/timezonefinder/coord_accessors.py @@ -0,0 +1,173 @@ +""" +Coordinate accessors for timezonefinder. + +This module provides classes for accessing polygon coordinates +either directly from file or from preloaded memory. +""" + +from abc import ABC, abstractmethod +import mmap +from pathlib import Path +from typing import Dict + +import numpy as np + +from timezonefinder import utils +from timezonefinder.flatbuf.generated.polygons.PolygonCollection import ( + PolygonCollection, +) +from timezonefinder.flatbuf.io.polygons import ( + get_polygon_collection, + read_polygon_array_from_binary, +) + + +class AbstractCoordAccessor(ABC): + """Abstract base class defining the interface for coordinate accessors.""" + + @abstractmethod + def __init__(self, coordinate_file_path: Path): + """ + Initialize the coordinate accessor. + + Args: + coordinate_file_path: Path to the coordinate file + """ + pass + + @abstractmethod + def __getitem__(self, idx: int) -> np.ndarray: + """ + Get the polygon coordinates for the given index. + + Args: + idx: The polygon index + + Returns: + A numpy array containing the polygon coordinates + """ + pass + + def __del__(self): + """ + Ensure resources are cleaned up when the object is destroyed. + """ + self.cleanup() + + @abstractmethod + def cleanup(self) -> None: + """Clean up resources.""" + pass + + +class FileCoordAccessor(AbstractCoordAccessor): + """Accessor that reads polygon coordinates from the file on demand.""" + + def __init__(self, coordinate_file_path: Path): + """ + Initialize the file-based coordinate accessor. + + Args: + coordinate_file_path: Path to the coordinate file + """ + self.coordinate_file_path = coordinate_file_path + # Initialize file resources using proper resource management. + try: + # Use memory-mapped file for on-demand reading + self.coord_file: object = open(self.coordinate_file_path, "rb") + # Create memory map + self.coord_buf: mmap.mmap = mmap.mmap( + self.coord_file.fileno(), 0, access=mmap.ACCESS_READ + ) + self.polygon_collection: PolygonCollection = get_polygon_collection( + self.coord_buf + ) + except Exception: + # Clean up any partially initialized resources + self.cleanup() + raise + + def __getitem__(self, idx: int) -> np.ndarray: + """ + Get the polygon coordinates for the given index. + + Args: + idx: The polygon index + + Returns: + A numpy array containing the polygon coordinates + """ + return read_polygon_array_from_binary(self.polygon_collection, idx) + + def cleanup(self) -> None: + """Clean up resources.""" + utils.close_resource(self.coord_file) + utils.close_resource(self.coord_buf) + del self.polygon_collection + + +class MemoryCoordAccessor(AbstractCoordAccessor): + """Accessor that preloads all polygon coordinates into memory.""" + + def __init__(self, coordinate_file_path: Path): + """ + Initialize the memory-based coordinate accessor. + + Args: + coordinate_file_path: Path to the coordinate file + """ + # Read entire file into memory + with open(coordinate_file_path, "rb") as f: + coord_buf = f.read() + + # Initialize polygon collection + polygon_collection = get_polygon_collection(coord_buf) + + # Get number of polygons + num_polygons = polygon_collection.PolygonsLength() + + # Preload all polygons + self.polygons: Dict[int, np.ndarray] = {} + for idx in range(num_polygons): + self.polygons[idx] = read_polygon_array_from_binary(polygon_collection, idx) + + # Once polygons are loaded, we don't need to keep polygon_collection or coord_buf references + # They'll be garbage collected + + def __getitem__(self, idx: int) -> np.ndarray: + """ + Get the polygon coordinates for the given index. + + Args: + idx: The polygon index + + Returns: + A numpy array containing the polygon coordinates + """ + return self.polygons[idx] + + def cleanup(self) -> None: + """Clean up resources.""" + del self.polygons + # Just clear the dictionary, no file resources to clean up + if hasattr(self, "polygons"): + self.polygons.clear() + + +def create_coord_accessor( + coordinate_file_path: Path, in_memory: bool +) -> AbstractCoordAccessor: + """ + Factory function to create the appropriate coordinate accessor. + + Args: + coordinate_file_path: Path to the coordinate file + in_memory: Whether to use in-memory mode + + Returns: + An instance of a coordinate accessor + """ + if in_memory: + return MemoryCoordAccessor(coordinate_file_path) + else: + return FileCoordAccessor(coordinate_file_path) diff --git a/python/timezonefinder/data/boundaries/coordinates.fbs b/python/timezonefinder/data/boundaries/coordinates.fbs new file mode 100644 index 000000000..554ce5c1c Binary files /dev/null and b/python/timezonefinder/data/boundaries/coordinates.fbs differ diff --git a/python/timezonefinder/data/boundaries/xmax.npy b/python/timezonefinder/data/boundaries/xmax.npy new file mode 100644 index 000000000..8e674caff Binary files /dev/null and b/python/timezonefinder/data/boundaries/xmax.npy differ diff --git a/python/timezonefinder/data/boundaries/xmin.npy b/python/timezonefinder/data/boundaries/xmin.npy new file mode 100644 index 000000000..b9b2f5218 Binary files /dev/null and b/python/timezonefinder/data/boundaries/xmin.npy differ diff --git a/python/timezonefinder/data/boundaries/ymax.npy b/python/timezonefinder/data/boundaries/ymax.npy new file mode 100644 index 000000000..30f2c598d Binary files /dev/null and b/python/timezonefinder/data/boundaries/ymax.npy differ diff --git a/python/timezonefinder/data/boundaries/ymin.npy b/python/timezonefinder/data/boundaries/ymin.npy new file mode 100644 index 000000000..c605a0946 Binary files /dev/null and b/python/timezonefinder/data/boundaries/ymin.npy differ diff --git a/python/timezonefinder/data/hole_registry.json b/python/timezonefinder/data/hole_registry.json new file mode 100644 index 000000000..cde3fd6c5 --- /dev/null +++ b/python/timezonefinder/data/hole_registry.json @@ -0,0 +1,302 @@ +{ + "10": [ + 1, + 0 + ], + "151": [ + 2, + 21 + ], + "153": [ + 1, + 23 + ], + "155": [ + 2, + 24 + ], + "167": [ + 1, + 26 + ], + "177": [ + 1, + 27 + ], + "19": [ + 3, + 1 + ], + "208": [ + 1, + 28 + ], + "218": [ + 21, + 29 + ], + "231": [ + 1, + 50 + ], + "268": [ + 2, + 51 + ], + "316": [ + 8, + 53 + ], + "389": [ + 3, + 61 + ], + "391": [ + 2, + 64 + ], + "40": [ + 8, + 4 + ], + "41": [ + 6, + 12 + ], + "410": [ + 5, + 66 + ], + "414": [ + 2, + 71 + ], + "481": [ + 1, + 73 + ], + "488": [ + 2, + 74 + ], + "515": [ + 1, + 76 + ], + "518": [ + 1, + 77 + ], + "522": [ + 16, + 78 + ], + "685": [ + 2, + 94 + ], + "721": [ + 15, + 96 + ], + "725": [ + 44, + 111 + ], + "728": [ + 2, + 155 + ], + "729": [ + 37, + 157 + ], + "730": [ + 20, + 194 + ], + "733": [ + 1, + 214 + ], + "736": [ + 28, + 215 + ], + "737": [ + 3, + 243 + ], + "741": [ + 1, + 246 + ], + "742": [ + 17, + 247 + ], + "743": [ + 4, + 264 + ], + "748": [ + 2, + 268 + ], + "750": [ + 1, + 270 + ], + "753": [ + 2, + 271 + ], + "754": [ + 5, + 273 + ], + "755": [ + 3, + 278 + ], + "756": [ + 14, + 281 + ], + "761": [ + 12, + 295 + ], + "762": [ + 3, + 307 + ], + "765": [ + 1, + 310 + ], + "769": [ + 1, + 311 + ], + "771": [ + 7, + 312 + ], + "776": [ + 21, + 319 + ], + "781": [ + 3, + 340 + ], + "790": [ + 1, + 343 + ], + "793": [ + 5, + 344 + ], + "800": [ + 2, + 349 + ], + "804": [ + 1, + 351 + ], + "806": [ + 2, + 352 + ], + "809": [ + 2, + 354 + ], + "810": [ + 4, + 356 + ], + "812": [ + 13, + 360 + ], + "814": [ + 17, + 373 + ], + "817": [ + 2, + 390 + ], + "819": [ + 1, + 392 + ], + "820": [ + 11, + 393 + ], + "824": [ + 2, + 404 + ], + "828": [ + 13, + 406 + ], + "829": [ + 1, + 419 + ], + "830": [ + 4, + 420 + ], + "833": [ + 6, + 424 + ], + "834": [ + 13, + 430 + ], + "838": [ + 6, + 443 + ], + "841": [ + 4, + 449 + ], + "844": [ + 32, + 453 + ], + "848": [ + 34, + 485 + ], + "851": [ + 37, + 519 + ], + "854": [ + 22, + 556 + ], + "855": [ + 1, + 578 + ], + "91": [ + 1, + 18 + ], + "98": [ + 2, + 19 + ] +} diff --git a/python/timezonefinder/data/holes/coordinates.fbs b/python/timezonefinder/data/holes/coordinates.fbs new file mode 100644 index 000000000..9c286587c Binary files /dev/null and b/python/timezonefinder/data/holes/coordinates.fbs differ diff --git a/python/timezonefinder/data/holes/xmax.npy b/python/timezonefinder/data/holes/xmax.npy new file mode 100644 index 000000000..fd0bf1db5 Binary files /dev/null and b/python/timezonefinder/data/holes/xmax.npy differ diff --git a/python/timezonefinder/data/holes/xmin.npy b/python/timezonefinder/data/holes/xmin.npy new file mode 100644 index 000000000..0380c9c70 Binary files /dev/null and b/python/timezonefinder/data/holes/xmin.npy differ diff --git a/python/timezonefinder/data/holes/ymax.npy b/python/timezonefinder/data/holes/ymax.npy new file mode 100644 index 000000000..18c258d1a Binary files /dev/null and b/python/timezonefinder/data/holes/ymax.npy differ diff --git a/python/timezonefinder/data/holes/ymin.npy b/python/timezonefinder/data/holes/ymin.npy new file mode 100644 index 000000000..146d61b72 Binary files /dev/null and b/python/timezonefinder/data/holes/ymin.npy differ diff --git a/python/timezonefinder/data/hybrid_shortcuts_uint8.fbs b/python/timezonefinder/data/hybrid_shortcuts_uint8.fbs new file mode 100644 index 000000000..c3b4eecba Binary files /dev/null and b/python/timezonefinder/data/hybrid_shortcuts_uint8.fbs differ diff --git a/python/timezonefinder/data/timezone_names.txt b/python/timezonefinder/data/timezone_names.txt new file mode 100644 index 000000000..d2f5a2ef4 --- /dev/null +++ b/python/timezonefinder/data/timezone_names.txt @@ -0,0 +1,92 @@ +Etc/UTC +Africa/Abidjan +Europe/Moscow +Africa/Lagos +Africa/Johannesburg +Africa/Cairo +Africa/Casablanca +Europe/Paris +America/Adak +America/Anchorage +America/Caracas +America/Sao_Paulo +America/Lima +America/Mexico_City +America/Denver +America/Chicago +America/Phoenix +America/New_York +America/Halifax +America/Havana +America/Los_Angeles +America/Miquelon +America/Noronha +America/Nuuk +America/Santiago +America/St_Johns +Asia/Manila +Asia/Jakarta +Australia/Brisbane +Australia/Sydney +Asia/Karachi +Pacific/Auckland +Antarctica/Troll +Pacific/Fiji +Asia/Dubai +Asia/Beirut +Asia/Dhaka +Asia/Tokyo +Asia/Kolkata +Europe/Athens +Asia/Gaza +Asia/Jerusalem +Asia/Kabul +Asia/Kathmandu +Asia/Sakhalin +Asia/Tehran +Asia/Yangon +Atlantic/Azores +Europe/Lisbon +Atlantic/Cape_Verde +Australia/Adelaide +Australia/Darwin +Australia/Eucla +Australia/Lord_Howe +Europe/Chisinau +Europe/Dublin +Europe/London +Pacific/Tongatapu +Pacific/Chatham +Pacific/Easter +Pacific/Gambier +Pacific/Honolulu +Pacific/Kiritimati +Pacific/Marquesas +Pacific/Pago_Pago +Pacific/Norfolk +Pacific/Pitcairn +Etc/GMT-12 +Etc/GMT-11 +Etc/GMT-10 +Etc/GMT-9 +Etc/GMT-8 +Etc/GMT-7 +Etc/GMT-6 +Etc/GMT-5 +Etc/GMT-4 +Etc/GMT-3 +Etc/GMT-2 +Etc/GMT-1 +Etc/GMT +Etc/GMT+1 +Etc/GMT+2 +Etc/GMT+3 +Etc/GMT+4 +Etc/GMT+5 +Etc/GMT+6 +Etc/GMT+7 +Etc/GMT+8 +Etc/GMT+9 +Etc/GMT+10 +Etc/GMT+11 +Etc/GMT+12 diff --git a/python/timezonefinder/data/zone_ids.npy b/python/timezonefinder/data/zone_ids.npy new file mode 100644 index 000000000..8841d0dad Binary files /dev/null and b/python/timezonefinder/data/zone_ids.npy differ diff --git a/python/timezonefinder/data/zone_positions.npy b/python/timezonefinder/data/zone_positions.npy new file mode 100644 index 000000000..6bb524753 Binary files /dev/null and b/python/timezonefinder/data/zone_positions.npy differ diff --git a/python/timezonefinder/flatbuf/__init__.py b/python/timezonefinder/flatbuf/__init__.py new file mode 100644 index 000000000..f3af734a3 --- /dev/null +++ b/python/timezonefinder/flatbuf/__init__.py @@ -0,0 +1,5 @@ +"""FlatBuffer schemas, bindings, and IO helpers used by timezonefinder.""" + +from . import generated, io, schemas + +__all__ = ["generated", "io", "schemas"] diff --git a/python/timezonefinder/flatbuf/__pycache__/__init__.cpython-312.pyc b/python/timezonefinder/flatbuf/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..f5eda1ab5 Binary files /dev/null and b/python/timezonefinder/flatbuf/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/timezonefinder/flatbuf/generated/__init__.py b/python/timezonefinder/flatbuf/generated/__init__.py new file mode 100644 index 000000000..8d058dab6 --- /dev/null +++ b/python/timezonefinder/flatbuf/generated/__init__.py @@ -0,0 +1,7 @@ +"""Auto-generated FlatBuffer bindings grouped by domain.""" + +__all__ = [ + "polygons", + "shortcuts_uint8", + "shortcuts_uint16", +] diff --git a/python/timezonefinder/flatbuf/generated/__pycache__/__init__.cpython-312.pyc b/python/timezonefinder/flatbuf/generated/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..61a3d10ee Binary files /dev/null and b/python/timezonefinder/flatbuf/generated/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/timezonefinder/flatbuf/generated/polygons/Polygon.py b/python/timezonefinder/flatbuf/generated/polygons/Polygon.py new file mode 100644 index 000000000..b3ef1e821 --- /dev/null +++ b/python/timezonefinder/flatbuf/generated/polygons/Polygon.py @@ -0,0 +1,92 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: polygons + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class Polygon: + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = Polygon() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsPolygon(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + # Polygon + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # Polygon + def Coords(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Int32Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 4), + ) + return 0 + + # Polygon + def CoordsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int32Flags, o) + return 0 + + # Polygon + def CoordsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # Polygon + def CoordsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + +def PolygonStart(builder): + builder.StartObject(1) + + +def Start(builder): + PolygonStart(builder) + + +def PolygonAddCoords(builder, coords): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(coords), 0 + ) + + +def AddCoords(builder, coords): + PolygonAddCoords(builder, coords) + + +def PolygonStartCoordsVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartCoordsVector(builder, numElems): + return PolygonStartCoordsVector(builder, numElems) + + +def PolygonEnd(builder): + return builder.EndObject() + + +def End(builder): + return PolygonEnd(builder) diff --git a/python/timezonefinder/flatbuf/generated/polygons/PolygonCollection.py b/python/timezonefinder/flatbuf/generated/polygons/PolygonCollection.py new file mode 100644 index 000000000..fa5164dbc --- /dev/null +++ b/python/timezonefinder/flatbuf/generated/polygons/PolygonCollection.py @@ -0,0 +1,88 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: polygons + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class PolygonCollection: + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = PolygonCollection() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsPolygonCollection(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + # PolygonCollection + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # PolygonCollection + def Polygons(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + from timezonefinder.flatbuf.generated.polygons.Polygon import Polygon + + obj = Polygon() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # PolygonCollection + def PolygonsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # PolygonCollection + def PolygonsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + +def PolygonCollectionStart(builder): + builder.StartObject(1) + + +def Start(builder): + PolygonCollectionStart(builder) + + +def PolygonCollectionAddPolygons(builder, polygons): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(polygons), 0 + ) + + +def AddPolygons(builder, polygons): + PolygonCollectionAddPolygons(builder, polygons) + + +def PolygonCollectionStartPolygonsVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartPolygonsVector(builder, numElems): + return PolygonCollectionStartPolygonsVector(builder, numElems) + + +def PolygonCollectionEnd(builder): + return builder.EndObject() + + +def End(builder): + return PolygonCollectionEnd(builder) diff --git a/python/timezonefinder/flatbuf/generated/polygons/__init__.py b/python/timezonefinder/flatbuf/generated/polygons/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/python/timezonefinder/flatbuf/generated/polygons/__pycache__/Polygon.cpython-312.pyc b/python/timezonefinder/flatbuf/generated/polygons/__pycache__/Polygon.cpython-312.pyc new file mode 100644 index 000000000..54b656539 Binary files /dev/null and b/python/timezonefinder/flatbuf/generated/polygons/__pycache__/Polygon.cpython-312.pyc differ diff --git a/python/timezonefinder/flatbuf/generated/polygons/__pycache__/PolygonCollection.cpython-312.pyc b/python/timezonefinder/flatbuf/generated/polygons/__pycache__/PolygonCollection.cpython-312.pyc new file mode 100644 index 000000000..83c43dea2 Binary files /dev/null and b/python/timezonefinder/flatbuf/generated/polygons/__pycache__/PolygonCollection.cpython-312.pyc differ diff --git a/python/timezonefinder/flatbuf/generated/polygons/__pycache__/__init__.cpython-312.pyc b/python/timezonefinder/flatbuf/generated/polygons/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..acaf914b4 Binary files /dev/null and b/python/timezonefinder/flatbuf/generated/polygons/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/timezonefinder/flatbuf/generated/shortcuts_uint16/HybridShortcutCollection.py b/python/timezonefinder/flatbuf/generated/shortcuts_uint16/HybridShortcutCollection.py new file mode 100644 index 000000000..10df14e4b --- /dev/null +++ b/python/timezonefinder/flatbuf/generated/shortcuts_uint16/HybridShortcutCollection.py @@ -0,0 +1,90 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: shortcuts_uint16 + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class HybridShortcutCollection: + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = HybridShortcutCollection() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsHybridShortcutCollection(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + # HybridShortcutCollection + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # HybridShortcutCollection + def Entries(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + from timezonefinder.flatbuf.generated.shortcuts_uint16.HybridShortcutEntry import ( + HybridShortcutEntry, + ) + + obj = HybridShortcutEntry() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # HybridShortcutCollection + def EntriesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # HybridShortcutCollection + def EntriesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + +def HybridShortcutCollectionStart(builder): + builder.StartObject(1) + + +def Start(builder): + HybridShortcutCollectionStart(builder) + + +def HybridShortcutCollectionAddEntries(builder, entries): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(entries), 0 + ) + + +def AddEntries(builder, entries): + HybridShortcutCollectionAddEntries(builder, entries) + + +def HybridShortcutCollectionStartEntriesVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartEntriesVector(builder, numElems): + return HybridShortcutCollectionStartEntriesVector(builder, numElems) + + +def HybridShortcutCollectionEnd(builder): + return builder.EndObject() + + +def End(builder): + return HybridShortcutCollectionEnd(builder) diff --git a/python/timezonefinder/flatbuf/generated/shortcuts_uint16/HybridShortcutEntry.py b/python/timezonefinder/flatbuf/generated/shortcuts_uint16/HybridShortcutEntry.py new file mode 100644 index 000000000..e22c5b5ac --- /dev/null +++ b/python/timezonefinder/flatbuf/generated/shortcuts_uint16/HybridShortcutEntry.py @@ -0,0 +1,97 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: shortcuts_uint16 + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class HybridShortcutEntry: + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = HybridShortcutEntry() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsHybridShortcutEntry(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + # HybridShortcutEntry + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # HybridShortcutEntry + def HexId(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Uint64Flags, o + self._tab.Pos + ) + return 0 + + # HybridShortcutEntry + def ValueType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) + return 0 + + # HybridShortcutEntry + def Value(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + from flatbuffers.table import Table + + obj = Table(bytearray(), 0) + self._tab.Union(obj, o) + return obj + return None + + +def HybridShortcutEntryStart(builder): + builder.StartObject(3) + + +def Start(builder): + HybridShortcutEntryStart(builder) + + +def HybridShortcutEntryAddHexId(builder, hexId): + builder.PrependUint64Slot(0, hexId, 0) + + +def AddHexId(builder, hexId): + HybridShortcutEntryAddHexId(builder, hexId) + + +def HybridShortcutEntryAddValueType(builder, valueType): + builder.PrependUint8Slot(1, valueType, 0) + + +def AddValueType(builder, valueType): + HybridShortcutEntryAddValueType(builder, valueType) + + +def HybridShortcutEntryAddValue(builder, value): + builder.PrependUOffsetTRelativeSlot( + 2, flatbuffers.number_types.UOffsetTFlags.py_type(value), 0 + ) + + +def AddValue(builder, value): + HybridShortcutEntryAddValue(builder, value) + + +def HybridShortcutEntryEnd(builder): + return builder.EndObject() + + +def End(builder): + return HybridShortcutEntryEnd(builder) diff --git a/python/timezonefinder/flatbuf/generated/shortcuts_uint16/PolygonList.py b/python/timezonefinder/flatbuf/generated/shortcuts_uint16/PolygonList.py new file mode 100644 index 000000000..0c0eefe96 --- /dev/null +++ b/python/timezonefinder/flatbuf/generated/shortcuts_uint16/PolygonList.py @@ -0,0 +1,92 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: shortcuts_uint16 + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class PolygonList: + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = PolygonList() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsPolygonList(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + # PolygonList + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # PolygonList + def PolyIds(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Uint16Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 2), + ) + return 0 + + # PolygonList + def PolyIdsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint16Flags, o) + return 0 + + # PolygonList + def PolyIdsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # PolygonList + def PolyIdsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + +def PolygonListStart(builder): + builder.StartObject(1) + + +def Start(builder): + PolygonListStart(builder) + + +def PolygonListAddPolyIds(builder, polyIds): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(polyIds), 0 + ) + + +def AddPolyIds(builder, polyIds): + PolygonListAddPolyIds(builder, polyIds) + + +def PolygonListStartPolyIdsVector(builder, numElems): + return builder.StartVector(2, numElems, 2) + + +def StartPolyIdsVector(builder, numElems): + return PolygonListStartPolyIdsVector(builder, numElems) + + +def PolygonListEnd(builder): + return builder.EndObject() + + +def End(builder): + return PolygonListEnd(builder) diff --git a/python/timezonefinder/flatbuf/generated/shortcuts_uint16/ShortcutValue.py b/python/timezonefinder/flatbuf/generated/shortcuts_uint16/ShortcutValue.py new file mode 100644 index 000000000..73bb67ddb --- /dev/null +++ b/python/timezonefinder/flatbuf/generated/shortcuts_uint16/ShortcutValue.py @@ -0,0 +1,9 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: shortcuts_uint16 + + +class ShortcutValue: + NONE = 0 + UniqueZone = 1 + PolygonList = 2 diff --git a/python/timezonefinder/flatbuf/generated/shortcuts_uint16/UniqueZone.py b/python/timezonefinder/flatbuf/generated/shortcuts_uint16/UniqueZone.py new file mode 100644 index 000000000..08641f9cb --- /dev/null +++ b/python/timezonefinder/flatbuf/generated/shortcuts_uint16/UniqueZone.py @@ -0,0 +1,61 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: shortcuts_uint16 + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class UniqueZone: + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = UniqueZone() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsUniqueZone(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + # UniqueZone + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # UniqueZone + def ZoneId(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Uint16Flags, o + self._tab.Pos + ) + return 0 + + +def UniqueZoneStart(builder): + builder.StartObject(1) + + +def Start(builder): + UniqueZoneStart(builder) + + +def UniqueZoneAddZoneId(builder, zoneId): + builder.PrependUint16Slot(0, zoneId, 0) + + +def AddZoneId(builder, zoneId): + UniqueZoneAddZoneId(builder, zoneId) + + +def UniqueZoneEnd(builder): + return builder.EndObject() + + +def End(builder): + return UniqueZoneEnd(builder) diff --git a/python/timezonefinder/flatbuf/generated/shortcuts_uint16/__init__.py b/python/timezonefinder/flatbuf/generated/shortcuts_uint16/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/python/timezonefinder/flatbuf/generated/shortcuts_uint16/__pycache__/HybridShortcutCollection.cpython-312.pyc b/python/timezonefinder/flatbuf/generated/shortcuts_uint16/__pycache__/HybridShortcutCollection.cpython-312.pyc new file mode 100644 index 000000000..3c843b1f0 Binary files /dev/null and b/python/timezonefinder/flatbuf/generated/shortcuts_uint16/__pycache__/HybridShortcutCollection.cpython-312.pyc differ diff --git a/python/timezonefinder/flatbuf/generated/shortcuts_uint16/__pycache__/HybridShortcutEntry.cpython-312.pyc b/python/timezonefinder/flatbuf/generated/shortcuts_uint16/__pycache__/HybridShortcutEntry.cpython-312.pyc new file mode 100644 index 000000000..7f9b5cacd Binary files /dev/null and b/python/timezonefinder/flatbuf/generated/shortcuts_uint16/__pycache__/HybridShortcutEntry.cpython-312.pyc differ diff --git a/python/timezonefinder/flatbuf/generated/shortcuts_uint16/__pycache__/PolygonList.cpython-312.pyc b/python/timezonefinder/flatbuf/generated/shortcuts_uint16/__pycache__/PolygonList.cpython-312.pyc new file mode 100644 index 000000000..9a40ed60e Binary files /dev/null and b/python/timezonefinder/flatbuf/generated/shortcuts_uint16/__pycache__/PolygonList.cpython-312.pyc differ diff --git a/python/timezonefinder/flatbuf/generated/shortcuts_uint16/__pycache__/ShortcutValue.cpython-312.pyc b/python/timezonefinder/flatbuf/generated/shortcuts_uint16/__pycache__/ShortcutValue.cpython-312.pyc new file mode 100644 index 000000000..0c4cf730a Binary files /dev/null and b/python/timezonefinder/flatbuf/generated/shortcuts_uint16/__pycache__/ShortcutValue.cpython-312.pyc differ diff --git a/python/timezonefinder/flatbuf/generated/shortcuts_uint16/__pycache__/UniqueZone.cpython-312.pyc b/python/timezonefinder/flatbuf/generated/shortcuts_uint16/__pycache__/UniqueZone.cpython-312.pyc new file mode 100644 index 000000000..90e309067 Binary files /dev/null and b/python/timezonefinder/flatbuf/generated/shortcuts_uint16/__pycache__/UniqueZone.cpython-312.pyc differ diff --git a/python/timezonefinder/flatbuf/generated/shortcuts_uint16/__pycache__/__init__.cpython-312.pyc b/python/timezonefinder/flatbuf/generated/shortcuts_uint16/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..51a01e139 Binary files /dev/null and b/python/timezonefinder/flatbuf/generated/shortcuts_uint16/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/timezonefinder/flatbuf/generated/shortcuts_uint8/HybridShortcutCollection.py b/python/timezonefinder/flatbuf/generated/shortcuts_uint8/HybridShortcutCollection.py new file mode 100644 index 000000000..d64bbf03e --- /dev/null +++ b/python/timezonefinder/flatbuf/generated/shortcuts_uint8/HybridShortcutCollection.py @@ -0,0 +1,90 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: shortcuts_uint8 + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class HybridShortcutCollection: + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = HybridShortcutCollection() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsHybridShortcutCollection(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + # HybridShortcutCollection + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # HybridShortcutCollection + def Entries(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + x = self._tab.Vector(o) + x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4 + x = self._tab.Indirect(x) + from timezonefinder.flatbuf.generated.shortcuts_uint8.HybridShortcutEntry import ( + HybridShortcutEntry, + ) + + obj = HybridShortcutEntry() + obj.Init(self._tab.Bytes, x) + return obj + return None + + # HybridShortcutCollection + def EntriesLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # HybridShortcutCollection + def EntriesIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + +def HybridShortcutCollectionStart(builder): + builder.StartObject(1) + + +def Start(builder): + HybridShortcutCollectionStart(builder) + + +def HybridShortcutCollectionAddEntries(builder, entries): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(entries), 0 + ) + + +def AddEntries(builder, entries): + HybridShortcutCollectionAddEntries(builder, entries) + + +def HybridShortcutCollectionStartEntriesVector(builder, numElems): + return builder.StartVector(4, numElems, 4) + + +def StartEntriesVector(builder, numElems): + return HybridShortcutCollectionStartEntriesVector(builder, numElems) + + +def HybridShortcutCollectionEnd(builder): + return builder.EndObject() + + +def End(builder): + return HybridShortcutCollectionEnd(builder) diff --git a/python/timezonefinder/flatbuf/generated/shortcuts_uint8/HybridShortcutEntry.py b/python/timezonefinder/flatbuf/generated/shortcuts_uint8/HybridShortcutEntry.py new file mode 100644 index 000000000..e9b844cb0 --- /dev/null +++ b/python/timezonefinder/flatbuf/generated/shortcuts_uint8/HybridShortcutEntry.py @@ -0,0 +1,97 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: shortcuts_uint8 + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class HybridShortcutEntry: + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = HybridShortcutEntry() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsHybridShortcutEntry(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + # HybridShortcutEntry + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # HybridShortcutEntry + def HexId(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get( + flatbuffers.number_types.Uint64Flags, o + self._tab.Pos + ) + return 0 + + # HybridShortcutEntry + def ValueType(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) + return 0 + + # HybridShortcutEntry + def Value(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) + if o != 0: + from flatbuffers.table import Table + + obj = Table(bytearray(), 0) + self._tab.Union(obj, o) + return obj + return None + + +def HybridShortcutEntryStart(builder): + builder.StartObject(3) + + +def Start(builder): + HybridShortcutEntryStart(builder) + + +def HybridShortcutEntryAddHexId(builder, hexId): + builder.PrependUint64Slot(0, hexId, 0) + + +def AddHexId(builder, hexId): + HybridShortcutEntryAddHexId(builder, hexId) + + +def HybridShortcutEntryAddValueType(builder, valueType): + builder.PrependUint8Slot(1, valueType, 0) + + +def AddValueType(builder, valueType): + HybridShortcutEntryAddValueType(builder, valueType) + + +def HybridShortcutEntryAddValue(builder, value): + builder.PrependUOffsetTRelativeSlot( + 2, flatbuffers.number_types.UOffsetTFlags.py_type(value), 0 + ) + + +def AddValue(builder, value): + HybridShortcutEntryAddValue(builder, value) + + +def HybridShortcutEntryEnd(builder): + return builder.EndObject() + + +def End(builder): + return HybridShortcutEntryEnd(builder) diff --git a/python/timezonefinder/flatbuf/generated/shortcuts_uint8/PolygonList.py b/python/timezonefinder/flatbuf/generated/shortcuts_uint8/PolygonList.py new file mode 100644 index 000000000..59a90e2a1 --- /dev/null +++ b/python/timezonefinder/flatbuf/generated/shortcuts_uint8/PolygonList.py @@ -0,0 +1,92 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: shortcuts_uint8 + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class PolygonList: + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = PolygonList() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsPolygonList(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + # PolygonList + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # PolygonList + def PolyIds(self, j): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + a = self._tab.Vector(o) + return self._tab.Get( + flatbuffers.number_types.Uint16Flags, + a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 2), + ) + return 0 + + # PolygonList + def PolyIdsAsNumpy(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Uint16Flags, o) + return 0 + + # PolygonList + def PolyIdsLength(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.VectorLen(o) + return 0 + + # PolygonList + def PolyIdsIsNone(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + return o == 0 + + +def PolygonListStart(builder): + builder.StartObject(1) + + +def Start(builder): + PolygonListStart(builder) + + +def PolygonListAddPolyIds(builder, polyIds): + builder.PrependUOffsetTRelativeSlot( + 0, flatbuffers.number_types.UOffsetTFlags.py_type(polyIds), 0 + ) + + +def AddPolyIds(builder, polyIds): + PolygonListAddPolyIds(builder, polyIds) + + +def PolygonListStartPolyIdsVector(builder, numElems): + return builder.StartVector(2, numElems, 2) + + +def StartPolyIdsVector(builder, numElems): + return PolygonListStartPolyIdsVector(builder, numElems) + + +def PolygonListEnd(builder): + return builder.EndObject() + + +def End(builder): + return PolygonListEnd(builder) diff --git a/python/timezonefinder/flatbuf/generated/shortcuts_uint8/ShortcutValue.py b/python/timezonefinder/flatbuf/generated/shortcuts_uint8/ShortcutValue.py new file mode 100644 index 000000000..13259ff50 --- /dev/null +++ b/python/timezonefinder/flatbuf/generated/shortcuts_uint8/ShortcutValue.py @@ -0,0 +1,9 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: shortcuts_uint8 + + +class ShortcutValue: + NONE = 0 + UniqueZone = 1 + PolygonList = 2 diff --git a/python/timezonefinder/flatbuf/generated/shortcuts_uint8/UniqueZone.py b/python/timezonefinder/flatbuf/generated/shortcuts_uint8/UniqueZone.py new file mode 100644 index 000000000..06237ca94 --- /dev/null +++ b/python/timezonefinder/flatbuf/generated/shortcuts_uint8/UniqueZone.py @@ -0,0 +1,59 @@ +# automatically generated by the FlatBuffers compiler, do not modify + +# namespace: shortcuts_uint8 + +import flatbuffers +from flatbuffers.compat import import_numpy + +np = import_numpy() + + +class UniqueZone: + __slots__ = ["_tab"] + + @classmethod + def GetRootAs(cls, buf, offset=0): + n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset) + x = UniqueZone() + x.Init(buf, n + offset) + return x + + @classmethod + def GetRootAsUniqueZone(cls, buf, offset=0): + """This method is deprecated. Please switch to GetRootAs.""" + return cls.GetRootAs(buf, offset) + + # UniqueZone + def Init(self, buf, pos): + self._tab = flatbuffers.table.Table(buf, pos) + + # UniqueZone + def ZoneId(self): + o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) + if o != 0: + return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos) + return 0 + + +def UniqueZoneStart(builder): + builder.StartObject(1) + + +def Start(builder): + UniqueZoneStart(builder) + + +def UniqueZoneAddZoneId(builder, zoneId): + builder.PrependUint8Slot(0, zoneId, 0) + + +def AddZoneId(builder, zoneId): + UniqueZoneAddZoneId(builder, zoneId) + + +def UniqueZoneEnd(builder): + return builder.EndObject() + + +def End(builder): + return UniqueZoneEnd(builder) diff --git a/python/timezonefinder/flatbuf/generated/shortcuts_uint8/__init__.py b/python/timezonefinder/flatbuf/generated/shortcuts_uint8/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/python/timezonefinder/flatbuf/generated/shortcuts_uint8/__pycache__/HybridShortcutCollection.cpython-312.pyc b/python/timezonefinder/flatbuf/generated/shortcuts_uint8/__pycache__/HybridShortcutCollection.cpython-312.pyc new file mode 100644 index 000000000..f3eb40ed6 Binary files /dev/null and b/python/timezonefinder/flatbuf/generated/shortcuts_uint8/__pycache__/HybridShortcutCollection.cpython-312.pyc differ diff --git a/python/timezonefinder/flatbuf/generated/shortcuts_uint8/__pycache__/HybridShortcutEntry.cpython-312.pyc b/python/timezonefinder/flatbuf/generated/shortcuts_uint8/__pycache__/HybridShortcutEntry.cpython-312.pyc new file mode 100644 index 000000000..45a5db110 Binary files /dev/null and b/python/timezonefinder/flatbuf/generated/shortcuts_uint8/__pycache__/HybridShortcutEntry.cpython-312.pyc differ diff --git a/python/timezonefinder/flatbuf/generated/shortcuts_uint8/__pycache__/PolygonList.cpython-312.pyc b/python/timezonefinder/flatbuf/generated/shortcuts_uint8/__pycache__/PolygonList.cpython-312.pyc new file mode 100644 index 000000000..63f02f1db Binary files /dev/null and b/python/timezonefinder/flatbuf/generated/shortcuts_uint8/__pycache__/PolygonList.cpython-312.pyc differ diff --git a/python/timezonefinder/flatbuf/generated/shortcuts_uint8/__pycache__/ShortcutValue.cpython-312.pyc b/python/timezonefinder/flatbuf/generated/shortcuts_uint8/__pycache__/ShortcutValue.cpython-312.pyc new file mode 100644 index 000000000..96512903c Binary files /dev/null and b/python/timezonefinder/flatbuf/generated/shortcuts_uint8/__pycache__/ShortcutValue.cpython-312.pyc differ diff --git a/python/timezonefinder/flatbuf/generated/shortcuts_uint8/__pycache__/UniqueZone.cpython-312.pyc b/python/timezonefinder/flatbuf/generated/shortcuts_uint8/__pycache__/UniqueZone.cpython-312.pyc new file mode 100644 index 000000000..3f26b9584 Binary files /dev/null and b/python/timezonefinder/flatbuf/generated/shortcuts_uint8/__pycache__/UniqueZone.cpython-312.pyc differ diff --git a/python/timezonefinder/flatbuf/generated/shortcuts_uint8/__pycache__/__init__.cpython-312.pyc b/python/timezonefinder/flatbuf/generated/shortcuts_uint8/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..f02d5b62d Binary files /dev/null and b/python/timezonefinder/flatbuf/generated/shortcuts_uint8/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/timezonefinder/flatbuf/io/__init__.py b/python/timezonefinder/flatbuf/io/__init__.py new file mode 100644 index 000000000..b82173ae6 --- /dev/null +++ b/python/timezonefinder/flatbuf/io/__init__.py @@ -0,0 +1,27 @@ +"""Utilities for reading and writing FlatBuffer assets.""" + +from .polygons import ( + flatten_polygon_coords, + reshape_to_polygon_coords, + get_coordinate_path, + write_polygon_collection_flatbuffer, + get_polygon_collection, + read_polygon_array_from_binary, +) +from .hybrid_shortcuts import ( + get_hybrid_shortcut_file_path, + write_hybrid_shortcuts_flatbuffers, + read_hybrid_shortcuts_binary, +) + +__all__ = [ + "flatten_polygon_coords", + "reshape_to_polygon_coords", + "get_coordinate_path", + "write_polygon_collection_flatbuffer", + "get_polygon_collection", + "read_polygon_array_from_binary", + "get_hybrid_shortcut_file_path", + "write_hybrid_shortcuts_flatbuffers", + "read_hybrid_shortcuts_binary", +] diff --git a/python/timezonefinder/flatbuf/io/__pycache__/__init__.cpython-312.pyc b/python/timezonefinder/flatbuf/io/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..2895b8e6d Binary files /dev/null and b/python/timezonefinder/flatbuf/io/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/timezonefinder/flatbuf/io/__pycache__/hybrid_shortcuts.cpython-312.pyc b/python/timezonefinder/flatbuf/io/__pycache__/hybrid_shortcuts.cpython-312.pyc new file mode 100644 index 000000000..87b814eb2 Binary files /dev/null and b/python/timezonefinder/flatbuf/io/__pycache__/hybrid_shortcuts.cpython-312.pyc differ diff --git a/python/timezonefinder/flatbuf/io/__pycache__/polygons.cpython-312.pyc b/python/timezonefinder/flatbuf/io/__pycache__/polygons.cpython-312.pyc new file mode 100644 index 000000000..5c3c11026 Binary files /dev/null and b/python/timezonefinder/flatbuf/io/__pycache__/polygons.cpython-312.pyc differ diff --git a/python/timezonefinder/flatbuf/io/hybrid_shortcuts.py b/python/timezonefinder/flatbuf/io/hybrid_shortcuts.py new file mode 100644 index 000000000..1ba542936 --- /dev/null +++ b/python/timezonefinder/flatbuf/io/hybrid_shortcuts.py @@ -0,0 +1,369 @@ +"""Utilities for working with optimized hybrid shortcut FlatBuffer data.""" + +from pathlib import Path +from typing import Any, Callable, Dict, List, Union +from dataclasses import dataclass + +import flatbuffers +import numpy as np + +from timezonefinder.configs import DEFAULT_DATA_DIR + +# Static imports for uint8 schema +from timezonefinder.flatbuf.generated.shortcuts_uint8.HybridShortcutCollection import ( + HybridShortcutCollection as HybridShortcutCollectionUint8, + HybridShortcutCollectionAddEntries as HybridShortcutCollectionAddEntriesUint8, + HybridShortcutCollectionEnd as HybridShortcutCollectionEndUint8, + HybridShortcutCollectionStart as HybridShortcutCollectionStartUint8, + HybridShortcutCollectionStartEntriesVector as HybridShortcutCollectionStartEntriesVectorUint8, +) +from timezonefinder.flatbuf.generated.shortcuts_uint8.HybridShortcutEntry import ( + HybridShortcutEntryAddHexId as HybridShortcutEntryAddHexIdUint8, + HybridShortcutEntryAddValue as HybridShortcutEntryAddValueUint8, + HybridShortcutEntryAddValueType as HybridShortcutEntryAddValueTypeUint8, + HybridShortcutEntryEnd as HybridShortcutEntryEndUint8, + HybridShortcutEntryStart as HybridShortcutEntryStartUint8, +) +from timezonefinder.flatbuf.generated.shortcuts_uint8.UniqueZone import ( + UniqueZone as UniqueZoneUint8, + UniqueZoneAddZoneId as UniqueZoneAddZoneIdUint8, + UniqueZoneEnd as UniqueZoneEndUint8, + UniqueZoneStart as UniqueZoneStartUint8, +) +from timezonefinder.flatbuf.generated.shortcuts_uint8.PolygonList import ( + PolygonList as PolygonListUint8, + PolygonListAddPolyIds as PolygonListAddPolyIdsUint8, + PolygonListEnd as PolygonListEndUint8, + PolygonListStart as PolygonListStartUint8, + PolygonListStartPolyIdsVector as PolygonListStartPolyIdsVectorUint8, +) +from timezonefinder.flatbuf.generated.shortcuts_uint8.ShortcutValue import ( + ShortcutValue as ShortcutValueUint8, +) + +# Static imports for uint16 schema +from timezonefinder.flatbuf.generated.shortcuts_uint16.HybridShortcutCollection import ( + HybridShortcutCollection as HybridShortcutCollectionUint16, + HybridShortcutCollectionAddEntries as HybridShortcutCollectionAddEntriesUint16, + HybridShortcutCollectionEnd as HybridShortcutCollectionEndUint16, + HybridShortcutCollectionStart as HybridShortcutCollectionStartUint16, + HybridShortcutCollectionStartEntriesVector as HybridShortcutCollectionStartEntriesVectorUint16, +) +from timezonefinder.flatbuf.generated.shortcuts_uint16.HybridShortcutEntry import ( + HybridShortcutEntryAddHexId as HybridShortcutEntryAddHexIdUint16, + HybridShortcutEntryAddValue as HybridShortcutEntryAddValueUint16, + HybridShortcutEntryAddValueType as HybridShortcutEntryAddValueTypeUint16, + HybridShortcutEntryEnd as HybridShortcutEntryEndUint16, + HybridShortcutEntryStart as HybridShortcutEntryStartUint16, +) +from timezonefinder.flatbuf.generated.shortcuts_uint16.UniqueZone import ( + UniqueZone as UniqueZoneUint16, + UniqueZoneAddZoneId as UniqueZoneAddZoneIdUint16, + UniqueZoneEnd as UniqueZoneEndUint16, + UniqueZoneStart as UniqueZoneStartUint16, +) +from timezonefinder.flatbuf.generated.shortcuts_uint16.PolygonList import ( + PolygonList as PolygonListUint16, + PolygonListAddPolyIds as PolygonListAddPolyIdsUint16, + PolygonListEnd as PolygonListEndUint16, + PolygonListStart as PolygonListStartUint16, + PolygonListStartPolyIdsVector as PolygonListStartPolyIdsVectorUint16, +) +from timezonefinder.flatbuf.generated.shortcuts_uint16.ShortcutValue import ( + ShortcutValue as ShortcutValueUint16, +) + + +@dataclass +class SchemaImports: + """Container for schema-specific imports to eliminate magic strings.""" + + # Collection functions + collection_start: Callable[..., Any] + collection_add_entries: Callable[..., Any] + collection_end: Callable[..., Any] + collection_start_entries_vector: Callable[..., Any] + + # Entry functions + entry_start: Callable[..., Any] + entry_add_hex_id: Callable[..., Any] + entry_add_value_type: Callable[..., Any] + entry_add_value: Callable[..., Any] + entry_end: Callable[..., Any] + + # UniqueZone functions + unique_zone_start: Callable[..., Any] + unique_zone_add_zone_id: Callable[..., Any] + unique_zone_end: Callable[..., Any] + + # PolygonList functions + polygon_list_start: Callable[..., Any] + polygon_list_add_poly_ids: Callable[..., Any] + polygon_list_end: Callable[..., Any] + polygon_list_start_poly_ids_vector: Callable[..., Any] + + # ShortcutValue enum + shortcut_value: Any + + # Validation parameters + max_zone_id: int + dtype_name: str + + +@dataclass +class ReadSchemaImports: + """Container for read-specific schema imports.""" + + collection: Any + unique_zone: Any + polygon_list: Any + shortcut_value: Any + + +def get_hybrid_shortcut_file_path( + zone_id_dtype: np.dtype, output_path: Path = DEFAULT_DATA_DIR +) -> Path: + """Return the path to the appropriate hybrid shortcut FlatBuffer binary file.""" + if zone_id_dtype.itemsize == 1: + return output_path / "hybrid_shortcuts_uint8.fbs" + elif zone_id_dtype.itemsize == 2: + return output_path / "hybrid_shortcuts_uint16.fbs" + else: + raise ValueError( + f"Unsupported zone_id_dtype: {zone_id_dtype}. Use uint8 or uint16." + ) + + +def _validate_zone_id_dtype(zone_id_dtype: np.dtype) -> np.dtype: + """Validate and normalize zone ID dtype.""" + dtype = np.dtype(zone_id_dtype) + if dtype.kind != "u": + raise ValueError(f"Zone id dtype must be unsigned integer, got {dtype}") + if dtype.itemsize not in (1, 2): + raise ValueError( + f"Zone id dtype must be 1 or 2 bytes, got {dtype.itemsize} bytes" + ) + return dtype.newbyteorder("<") + + +def write_hybrid_shortcuts_flatbuffers( + hybrid_mapping: Dict[int, Union[int, List[int]]], + zone_id_dtype: np.dtype, + output_file: Path, +) -> None: + """ + Write hybrid shortcut mapping to the appropriate optimized FlatBuffer binary file. + + Args: + hybrid_mapping: Dictionary mapping H3 hexagon IDs to either: + - int: unique zone ID (when all polygons share same zone) + - List[int]: list of polygon IDs (when multiple zones) + zone_id_dtype: numpy dtype for zone IDs (uint8 or uint16) + output_file: Path to save the FlatBuffer file + """ + print(f"Writing {len(hybrid_mapping)} optimized hybrid shortcuts to {output_file}") + + dtype = _validate_zone_id_dtype(zone_id_dtype) + _write_hybrid_shortcuts_generic(hybrid_mapping, dtype, output_file) + + +def _write_hybrid_shortcuts_generic( + hybrid_mapping: Dict[int, Union[int, List[int]]], + zone_id_dtype: np.dtype, + output_file: Path, +) -> None: + """Write hybrid shortcuts using the appropriate schema based on dtype.""" + if zone_id_dtype.itemsize == 1: + # uint8 schema imports + schema = SchemaImports( + collection_start=HybridShortcutCollectionStartUint8, + collection_add_entries=HybridShortcutCollectionAddEntriesUint8, + collection_end=HybridShortcutCollectionEndUint8, + collection_start_entries_vector=HybridShortcutCollectionStartEntriesVectorUint8, + entry_start=HybridShortcutEntryStartUint8, + entry_add_hex_id=HybridShortcutEntryAddHexIdUint8, + entry_add_value_type=HybridShortcutEntryAddValueTypeUint8, + entry_add_value=HybridShortcutEntryAddValueUint8, + entry_end=HybridShortcutEntryEndUint8, + unique_zone_start=UniqueZoneStartUint8, + unique_zone_add_zone_id=UniqueZoneAddZoneIdUint8, + unique_zone_end=UniqueZoneEndUint8, + polygon_list_start=PolygonListStartUint8, + polygon_list_add_poly_ids=PolygonListAddPolyIdsUint8, + polygon_list_end=PolygonListEndUint8, + polygon_list_start_poly_ids_vector=PolygonListStartPolyIdsVectorUint8, + shortcut_value=ShortcutValueUint8, + max_zone_id=255, + dtype_name="uint8", + ) + else: + # uint16 schema imports + schema = SchemaImports( + collection_start=HybridShortcutCollectionStartUint16, + collection_add_entries=HybridShortcutCollectionAddEntriesUint16, + collection_end=HybridShortcutCollectionEndUint16, + collection_start_entries_vector=HybridShortcutCollectionStartEntriesVectorUint16, + entry_start=HybridShortcutEntryStartUint16, + entry_add_hex_id=HybridShortcutEntryAddHexIdUint16, + entry_add_value_type=HybridShortcutEntryAddValueTypeUint16, + entry_add_value=HybridShortcutEntryAddValueUint16, + entry_end=HybridShortcutEntryEndUint16, + unique_zone_start=UniqueZoneStartUint16, + unique_zone_add_zone_id=UniqueZoneAddZoneIdUint16, + unique_zone_end=UniqueZoneEndUint16, + polygon_list_start=PolygonListStartUint16, + polygon_list_add_poly_ids=PolygonListAddPolyIdsUint16, + polygon_list_end=PolygonListEndUint16, + polygon_list_start_poly_ids_vector=PolygonListStartPolyIdsVectorUint16, + shortcut_value=ShortcutValueUint16, + max_zone_id=65535, + dtype_name="uint16", + ) + + _write_hybrid_shortcuts_with_schema(hybrid_mapping, output_file, schema) + + +def _write_hybrid_shortcuts_with_schema( + hybrid_mapping: Dict[int, Union[int, List[int]]], + output_file: Path, + schema: SchemaImports, +) -> None: + """Write hybrid shortcuts using the provided schema imports.""" + builder = flatbuffers.Builder(0) + entry_offsets = [] + + # Validate zone IDs fit in dtype + for value in hybrid_mapping.values(): + if isinstance(value, int) and value > schema.max_zone_id: + raise ValueError( + f"Zone ID {value} exceeds {schema.dtype_name} maximum ({schema.max_zone_id})" + ) + + for hex_id, value in hybrid_mapping.items(): + if isinstance(value, int): + # Create UniqueZone with direct storage + schema.unique_zone_start(builder) + schema.unique_zone_add_zone_id(builder, value) + unique_zone_offset = schema.unique_zone_end(builder) + + # Create entry with UniqueZone + schema.entry_start(builder) + schema.entry_add_hex_id(builder, hex_id) + schema.entry_add_value_type(builder, schema.shortcut_value.UniqueZone) + schema.entry_add_value(builder, unique_zone_offset) + entry_offset = schema.entry_end(builder) + + else: + # Create PolygonList + poly_ids = list(value) + schema.polygon_list_start_poly_ids_vector(builder, len(poly_ids)) + for i in range(len(poly_ids) - 1, -1, -1): + builder.PrependUint16(poly_ids[i]) + poly_ids_vector = builder.EndVector() + + schema.polygon_list_start(builder) + schema.polygon_list_add_poly_ids(builder, poly_ids_vector) + polygon_list_offset = schema.polygon_list_end(builder) + + # Create entry with PolygonList + schema.entry_start(builder) + schema.entry_add_hex_id(builder, hex_id) + schema.entry_add_value_type(builder, schema.shortcut_value.PolygonList) + schema.entry_add_value(builder, polygon_list_offset) + entry_offset = schema.entry_end(builder) + + entry_offsets.append(entry_offset) + + # Create entries vector + schema.collection_start_entries_vector(builder, len(entry_offsets)) + for offset in reversed(entry_offsets): + builder.PrependUOffsetTRelative(offset) + entries_vector = builder.EndVector() + + # Create HybridShortcutCollection + schema.collection_start(builder) + schema.collection_add_entries(builder, entries_vector) + collection = schema.collection_end(builder) + + builder.Finish(collection) + + # Write to file + with open(output_file, "wb") as f: + f.write(builder.Output()) + + +def read_hybrid_shortcuts_binary( + file_path: Path, +) -> Dict[int, Union[int, np.ndarray]]: + """ + Read hybrid shortcut mapping from an optimized FlatBuffer binary file. + + Auto-detects whether the file uses uint8 or uint16 schema based on filename. + + Args: + file_path: Path to the hybrid shortcut FlatBuffer file + + Returns: + Dictionary mapping H3 hexagon IDs to either: + - int: unique zone ID (when all polygons share same zone) + - np.ndarray: array of polygon IDs (when multiple zones) + """ + # Determine schema type from filename and select appropriate imports + if "uint8" in file_path.name: + schema = ReadSchemaImports( + collection=HybridShortcutCollectionUint8, + unique_zone=UniqueZoneUint8, + polygon_list=PolygonListUint8, + shortcut_value=ShortcutValueUint8, + ) + elif "uint16" in file_path.name: + schema = ReadSchemaImports( + collection=HybridShortcutCollectionUint16, + unique_zone=UniqueZoneUint16, + polygon_list=PolygonListUint16, + shortcut_value=ShortcutValueUint16, + ) + else: + raise ValueError( + f"Cannot determine schema from filename: {file_path.name}. " + "Filename must include 'uint8' or 'uint16'." + ) + + return _read_hybrid_shortcuts_with_schema(file_path, schema) + + +def _read_hybrid_shortcuts_with_schema( + file_path: Path, schema: ReadSchemaImports +) -> Dict[int, Union[int, np.ndarray]]: + """Read hybrid shortcuts using the provided schema imports.""" + with open(file_path, "rb") as f: + buf = f.read() + + # mypy: GetRootAs is a class method on FlatBuffers classes + collection = schema.collection.GetRootAs(buf, 0) # type: ignore + + hybrid_mapping: Dict[int, Union[int, np.ndarray]] = {} + for i in range(collection.EntriesLength()): + entry = collection.Entries(i) + hex_id = entry.HexId() + + # Determine value type and extract data + value_type = entry.ValueType() + value = entry.Value() + + if value_type == schema.shortcut_value.UniqueZone: + unique_zone = schema.unique_zone() # type: ignore + unique_zone.Init(value.Bytes, value.Pos) + zone_id = unique_zone.ZoneId() # Direct zone ID, no lookup needed + hybrid_mapping[hex_id] = int(zone_id) + + elif value_type == schema.shortcut_value.PolygonList: + polygon_list = schema.polygon_list() # type: ignore + polygon_list.Init(value.Bytes, value.Pos) + poly_ids = polygon_list.PolyIdsAsNumpy() + hybrid_mapping[hex_id] = poly_ids + + else: + raise ValueError(f"Unknown ShortcutValue type: {value_type}") + + return hybrid_mapping diff --git a/python/timezonefinder/flatbuf/io/polygons.py b/python/timezonefinder/flatbuf/io/polygons.py new file mode 100644 index 000000000..d966fdf05 --- /dev/null +++ b/python/timezonefinder/flatbuf/io/polygons.py @@ -0,0 +1,132 @@ +import flatbuffers +import mmap +import numpy as np +from pathlib import Path +from typing import List, Union + +from timezonefinder.configs import DEFAULT_DATA_DIR +from timezonefinder.flatbuf.generated.polygons.Polygon import ( + PolygonStart, + PolygonEnd, + PolygonAddCoords, + PolygonStartCoordsVector, +) +from timezonefinder.flatbuf.generated.polygons.PolygonCollection import ( + PolygonCollection, + PolygonCollectionStart, + PolygonCollectionEnd, + PolygonCollectionAddPolygons, + PolygonCollectionStartPolygonsVector, +) + + +def flatten_polygon_coords(polygon: np.ndarray) -> np.ndarray: + """Convert polygon coordinates from shape (2, N) to a flattened [x0, y0, x1, y1, ...] array. + + Args: + polygon: Array of polygon coordinates with shape (2, N) + where the first row contains x coordinates and the second row contains y coordinates + + Returns: + Flattened 1D array of coordinates in the format [x0, y0, x1, y1, ...] + """ + return polygon.ravel(order="F") + + +def reshape_to_polygon_coords(coords: np.ndarray) -> np.ndarray: + """Reshape flattened coordinates to the format (2, N). + + Args: + coords: Flattened 1D array of coordinates in the format [x0, y0, x1, y1, ...] + + Returns: + Array of polygon coordinates with shape (2, N) + where the first row contains x coordinates and the second row contains y coordinates + """ + return coords.reshape(2, -1, order="F") + + +def get_coordinate_path(data_dir: Path = DEFAULT_DATA_DIR) -> Path: + """Return the path to the boundaries flatbuffer file.""" + return data_dir / "coordinates.fbs" + + +def write_polygon_collection_flatbuffer( + file_path: Path, polygons: List[np.ndarray] +) -> None: + """Write a collection of polygons to a flatbuffer file using a single coordinate vector. + + Args: + file_path: Path to save the flatbuffer file + polygons: List of polygon coordinates as numpy arrays with shape (2, N) + where the first row contains x coordinates and the second row contains y coordinates + + Returns: + None + """ + print(f"writing {len(polygons)} polygons to binary file {file_path}") + builder = flatbuffers.Builder(0) + polygon_offsets = [] + + # Create each polygon and store its offset + for polygon in polygons: + # Flatten coordinates to [x0, y0, x1, y1, ...] format + coords = flatten_polygon_coords(polygon) + + # Create coords vector + PolygonStartCoordsVector(builder, len(coords)) + for coord in reversed(coords): + builder.PrependInt32(int(coord)) # Use signed 32-bit integer + coords_offset = builder.EndVector() + + # Create polygon + PolygonStart(builder) + PolygonAddCoords(builder, coords_offset) # Use Coords for combined vector + polygon_offsets.append(PolygonEnd(builder)) + + # Create polygon vector + PolygonCollectionStartPolygonsVector(builder, len(polygon_offsets)) + for offset in reversed(polygon_offsets): + builder.PrependUOffsetTRelative(offset) + polygons_offset = builder.EndVector() + + # Create root table + PolygonCollectionStart(builder) + PolygonCollectionAddPolygons(builder, polygons_offset) + collection_offset = PolygonCollectionEnd(builder) + + # Finish buffer + builder.Finish(collection_offset) + + # Write to file + with open(file_path, "wb") as f: + buf = builder.Output() + f.write(buf) + + +def get_polygon_collection(buf: Union[bytes, mmap.mmap]) -> PolygonCollection: + """Load a PolygonCollection from a file path. + + Args: + buf: A binary stream or memory-mapped file containing the flatbuffer data. + + Returns: PolygonCollection + """ + return PolygonCollection.GetRootAs(buf, 0) + + +def read_polygon_array_from_binary( + poly_collection: PolygonCollection, idx: int +) -> np.ndarray: + """Read a polygon's coordinates from a FlatBuffers collection.""" + # value checks not required as this is a private function + # processed polygon indices are expected to be in range + # nr_polygons = collection.PolygonsLength() + # if idx >= nr_polygons: + # raise IndexError( + # f"Index {idx} out of bounds for collection with {nr_polygons} polygons." + # ) + poly = poly_collection.Polygons(idx) + coords = poly.CoordsAsNumpy() # flat 1D array of coordinates + # Reshape to (2, N) format + return reshape_to_polygon_coords(coords) diff --git a/python/timezonefinder/flatbuf/schemas/__init__.py b/python/timezonefinder/flatbuf/schemas/__init__.py new file mode 100644 index 000000000..b90805ec5 --- /dev/null +++ b/python/timezonefinder/flatbuf/schemas/__init__.py @@ -0,0 +1,7 @@ +"""FlatBuffer schema definitions for timezonefinder.""" + +__all__ = [ + "polygons", + "hybrid_shortcuts_uint8", + "hybrid_shortcuts_uint16", +] diff --git a/python/timezonefinder/flatbuf/schemas/__pycache__/__init__.cpython-312.pyc b/python/timezonefinder/flatbuf/schemas/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 000000000..442e40dcb Binary files /dev/null and b/python/timezonefinder/flatbuf/schemas/__pycache__/__init__.cpython-312.pyc differ diff --git a/python/timezonefinder/flatbuf/schemas/hybrid_shortcuts_uint16.fbs b/python/timezonefinder/flatbuf/schemas/hybrid_shortcuts_uint16.fbs new file mode 100644 index 000000000..ff4558f7a --- /dev/null +++ b/python/timezonefinder/flatbuf/schemas/hybrid_shortcuts_uint16.fbs @@ -0,0 +1,37 @@ +namespace timezonefinder.flatbuf.generated.shortcuts_uint16; + +// Table representing a unique zone ID (when all polygons in hex share same zone) +// Optimized for uint16 zone IDs (0-65535) +table UniqueZone { + // Direct zone ID storage - 2 bytes per zone ID + zone_id: ushort; +} + +// Table representing a list of polygon IDs (when hex contains multiple zones) +table PolygonList { + // List of polygon IDs (fixed uint16 as per requirement) + poly_ids: [ushort]; +} + +// Union type for the shortcut value - either unique zone or polygon list +union ShortcutValue { + UniqueZone, + PolygonList +} + +// Individual shortcut entry mapping H3 hex ID to its value +table HybridShortcutEntry { + // H3 hexagon ID (uint64) + hex_id: ulong; + // The value for this hex - either unique zone or polygon list + value: ShortcutValue; +} + +// Root collection containing all shortcut entries +// Optimized for uint16 zone IDs - no metadata overhead +table HybridShortcutCollection { + // All shortcut entries + entries: [HybridShortcutEntry]; +} + +root_type HybridShortcutCollection; diff --git a/python/timezonefinder/flatbuf/schemas/hybrid_shortcuts_uint8.fbs b/python/timezonefinder/flatbuf/schemas/hybrid_shortcuts_uint8.fbs new file mode 100644 index 000000000..6551cfe81 --- /dev/null +++ b/python/timezonefinder/flatbuf/schemas/hybrid_shortcuts_uint8.fbs @@ -0,0 +1,37 @@ +namespace timezonefinder.flatbuf.generated.shortcuts_uint8; + +// Table representing a unique zone ID (when all polygons in hex share same zone) +// Optimized for uint8 zone IDs (0-255) +table UniqueZone { + // Direct zone ID storage - 1 byte per zone ID + zone_id: ubyte; +} + +// Table representing a list of polygon IDs (when hex contains multiple zones) +table PolygonList { + // List of polygon IDs (fixed uint16 as per requirement) + poly_ids: [ushort]; +} + +// Union type for the shortcut value - either unique zone or polygon list +union ShortcutValue { + UniqueZone, + PolygonList +} + +// Individual shortcut entry mapping H3 hex ID to its value +table HybridShortcutEntry { + // H3 hexagon ID (uint64) + hex_id: ulong; + // The value for this hex - either unique zone or polygon list + value: ShortcutValue; +} + +// Root collection containing all shortcut entries +// Optimized for uint8 zone IDs - no metadata overhead +table HybridShortcutCollection { + // All shortcut entries + entries: [HybridShortcutEntry]; +} + +root_type HybridShortcutCollection; diff --git a/python/timezonefinder/flatbuf/schemas/polygons.fbs b/python/timezonefinder/flatbuf/schemas/polygons.fbs new file mode 100644 index 000000000..831b0afdd --- /dev/null +++ b/python/timezonefinder/flatbuf/schemas/polygons.fbs @@ -0,0 +1,12 @@ +namespace timezonefinder.flatbuf.generated.polygons; + +table Polygon { + // combined x and y coordinates stored as a single vector + coords:[int]; +} + +table PolygonCollection { + polygons:[Polygon]; +} + +root_type PolygonCollection; diff --git a/python/timezonefinder/global_functions.py b/python/timezonefinder/global_functions.py new file mode 100644 index 000000000..95778c3f0 --- /dev/null +++ b/python/timezonefinder/global_functions.py @@ -0,0 +1,126 @@ +""" +This module provides global functions that use a singleton instance of TimezoneFinder. + +Note on thread safety: These global functions are not thread-safe. If you need to use +TimezoneFinder in a multi-threaded environment, create separate TimezoneFinder instances +for each thread. +""" + +from typing import List, Optional, Union + +from timezonefinder.timezonefinder import TimezoneFinder +from timezonefinder.configs import CoordPairs, CoordLists + +# Use a global variable to store the singleton instance +TF_INSTANCE: TimezoneFinder + + +def _get_tf_instance() -> TimezoneFinder: + """Get or create the global TimezoneFinder instance + + Lazy initialization: delayed memory allocation until actually needed + required because, the package might be used with a user defined instance + and duplicate initialisation overhead must be avoided! + """ + global TF_INSTANCE + try: + return TF_INSTANCE + except NameError: + # If TF_INSTANCE is not defined, create it + TF_INSTANCE = TimezoneFinder() + return TF_INSTANCE + + +def timezone_at(*, lng: float, lat: float) -> Optional[str]: + """ + Looks up in which timezone the given coordinate is included in. + Uses the global TimezoneFinder instance. + + Note: This function is not thread-safe. For multi-threaded environments, + create separate TimezoneFinder instances. + + :param lng: longitude of the point in degree (-180.0 to 180.0) + :param lat: latitude in degree (90.0 to -90.0) + :return: the timezone name of a matching polygon or None + """ + return _get_tf_instance().timezone_at(lng=lng, lat=lat) + + +def timezone_at_land(*, lng: float, lat: float) -> Optional[str]: + """ + Computes in which land timezone a point is included in. + Uses the global TimezoneFinder instance. + + Note: This function is not thread-safe. For multi-threaded environments, + create separate TimezoneFinder instances. + + :param lng: longitude of the point in degree (-180.0 to 180.0) + :param lat: latitude in degree (90.0 to -90.0) + :return: the timezone name of a matching polygon or + ``None`` when an ocean timezone ("Etc/GMT+-XX") has been matched. + """ + return _get_tf_instance().timezone_at_land(lng=lng, lat=lat) + + +def unique_timezone_at(*, lng: float, lat: float) -> Optional[str]: + """ + Returns the name of a unique zone within the corresponding shortcut. + Uses the global TimezoneFinder instance. + + Note: This function is not thread-safe. For multi-threaded environments, + create separate TimezoneFinder instances. + + :param lng: longitude of the point in degree (-180.0 to 180.0) + :param lat: latitude in degree (90.0 to -90.0) + :return: the timezone name of the unique zone or ``None`` if there are no or multiple zones in this shortcut + """ + return _get_tf_instance().unique_timezone_at(lng=lng, lat=lat) + + +def certain_timezone_at(*, lng: float, lat: float) -> Optional[str]: + """ + Checks in which timezone polygon the point is certainly included in. + Uses the global TimezoneFinder instance. + + Note: This function is not thread-safe. For multi-threaded environments, + create separate TimezoneFinder instances. + + .. note:: this is only meaningful when you have compiled your own timezone data + where there are areas without timezone polygon coverage. + Otherwise, some timezone will always be matched and the functionality is equal to using `.timezone_at()` + -> useless to actually test all polygons. + + .. note:: using this function is less performant than `.timezone_at()` + + :param lng: longitude of the point in degree + :param lat: latitude in degree + :return: the timezone name of the polygon the point is included in or `None` + """ + return _get_tf_instance().certain_timezone_at(lng=lng, lat=lat) + + +def get_geometry( + tz_name: Optional[str] = "", + tz_id: Optional[int] = 0, + use_id: bool = False, + coords_as_pairs: bool = False, +) -> List[List[Union[CoordPairs, CoordLists]]]: + """ + Retrieves the geometry of a timezone polygon. + Uses the global TimezoneFinder instance. + + Note: This function is not thread-safe. For multi-threaded environments, + create separate TimezoneFinder instances. + + :param tz_name: one of the names in ``timezone_names.json`` or ``self.timezone_names`` + :param tz_id: the id of the timezone (=index in ``self.timezone_names``) + :param use_id: if ``True`` uses ``tz_id`` instead of ``tz_name`` + :param coords_as_pairs: determines the structure of the polygon representation + :return: a data structure representing the multipolygon of this timezone + output format: ``[ [polygon1, hole1, hole2...], [polygon2, ...], ...]`` + and each polygon and hole is itself formatted like: ``([longitudes], [latitudes])`` + or ``[(lng1,lat1), (lng2,lat2),...]`` if ``coords_as_pairs=True``. + """ + return _get_tf_instance().get_geometry( + tz_name=tz_name, tz_id=tz_id, use_id=use_id, coords_as_pairs=coords_as_pairs + ) diff --git a/python/timezonefinder/inside_poly_extension/inside_polygon_int.c b/python/timezonefinder/inside_poly_extension/inside_polygon_int.c new file mode 100644 index 000000000..67defef25 --- /dev/null +++ b/python/timezonefinder/inside_poly_extension/inside_polygon_int.c @@ -0,0 +1,69 @@ +#include "inside_polygon_int.h" +#include + +bool inside_polygon_int(int x, int y, int nr_coords, int x_coords[], + int y_coords[]) { + // naive implementation, vulnerable to overflow: + // bool inside; + // for (int i = 0, j = nr_coords - 1; i < nr_coords; j = i++) { + // if (((y_coords[i] > y) != (y_coords[j] > y)) && + // (x < (x_coords[j] - x_coords[i]) * (y - y_coords[i]) / + // (y_coords[j] - y_coords[i]) + + // x_coords[i])) { + // inside = !inside; + // } + // } + // return inside; + + bool inside, y_gt_y1, y_gt_y2, x_le_x1, x_le_x2; + long y1, y2, x1, x2, slope1, slope2; // int64 precision + int i, j; + + inside = false; + // the edge from the last to the first point is checked first + j = nr_coords - 1; + y_gt_y1 = y > y_coords[j]; + for (i = 0; i < nr_coords; j = i++) { + y_gt_y2 = y > y_coords[i]; + if (y_gt_y1 ^ y_gt_y2) { // XOR + // [p1-p2] crosses horizontal line in p + // only count crossings "right" of the point ( >= x) + x_le_x1 = x <= x_coords[j]; + x_le_x2 = x <= x_coords[i]; + if (x_le_x1 || x_le_x2) { + if (x_le_x1 && x_le_x2) { + // p1 and p2 are both to the right -> valid crossing + inside = !inside; + } else { + // compare the slope of the line [p1-p2] and [p-p2] + // depending on the position of p2 this determines whether + // the polygon edge is right or left of the point + // to avoid expensive division the divisors (of the slope dy/dx) + // are brought to the other side ( dy/dx > a == dy > a * dx ) + // only one of the points is to the right + // NOTE: int64 precision required to prevent overflow + y1 = y_coords[j]; + y2 = y_coords[i]; + x1 = x_coords[j]; + x2 = x_coords[i]; + slope1 = (y2 - y) * (x2 - x1); + slope2 = (y2 - y1) * (x2 - x); + // NOTE: accept slope equality to also detect if p lies directly + // on an edge + if (y_gt_y1) { + if (slope1 <= slope2) { + inside = !inside; + } + } else { // NOT y_gt_y1 + if (slope1 >= slope2) { + inside = !inside; + } + } + } + } + } + // next point + y_gt_y1 = y_gt_y2; + } + return inside; +} diff --git a/python/timezonefinder/inside_poly_extension/inside_polygon_int.h b/python/timezonefinder/inside_poly_extension/inside_polygon_int.h new file mode 100644 index 000000000..d536fe31a --- /dev/null +++ b/python/timezonefinder/inside_poly_extension/inside_polygon_int.h @@ -0,0 +1,4 @@ +#include + +bool inside_polygon_int(int x, int y, int nr_coords, int x_coords[], + int y_coords[]); diff --git a/python/timezonefinder/inside_polygon_ext.abi3.so b/python/timezonefinder/inside_polygon_ext.abi3.so new file mode 100644 index 000000000..12d7a60fd Binary files /dev/null and b/python/timezonefinder/inside_polygon_ext.abi3.so differ diff --git a/python/timezonefinder/np_binary_helpers.py b/python/timezonefinder/np_binary_helpers.py new file mode 100644 index 000000000..dbd93660d --- /dev/null +++ b/python/timezonefinder/np_binary_helpers.py @@ -0,0 +1,49 @@ +""" +Utility functions for handling .npy numpy binary files related to timezone data. +""" + +from pathlib import Path + +import numpy as np + + +def get_zone_ids_path(path: Path) -> Path: + """Return the path to the zone_ids.npy file in the given directory.""" + return path / "zone_ids.npy" + + +def get_zone_positions_path(path: Path) -> Path: + """Return the path to the zone_positions.npy file in the given directory.""" + return path / "zone_positions.npy" + + +def get_xmax_path(path: Path) -> Path: + """Return the path to the xmax.npy file in the given directory.""" + return path / "xmax.npy" + + +def get_xmin_path(path: Path) -> Path: + """Return the path to the xmin.npy file in the given directory.""" + return path / "xmin.npy" + + +def get_ymax_path(path: Path) -> Path: + """Return the path to the ymax.npy file in the given directory.""" + return path / "ymax.npy" + + +def get_ymin_path(path: Path) -> Path: + """Return the path to the ymin.npy file in the given directory.""" + return path / "ymin.npy" + + +def store_per_polygon_vector(file_path: Path, vector: np.ndarray) -> None: + """Store a vector as a .npy file in the specified file path.""" + print(f"Storing vector to {file_path}") + np.save(file_path, vector) + + +def read_per_polygon_vector(file_path: Path) -> np.ndarray: + """Read a vector from a .npy file in the specified file path.""" + vector = np.load(file_path) + return vector diff --git a/python/timezonefinder/polygon_array.py b/python/timezonefinder/polygon_array.py new file mode 100644 index 000000000..3c824bb5d --- /dev/null +++ b/python/timezonefinder/polygon_array.py @@ -0,0 +1,140 @@ +from pathlib import Path +from typing import Iterable, Union + +import numpy as np + +from timezonefinder.configs import IntegerLike + +from timezonefinder import utils +from timezonefinder.coord_accessors import AbstractCoordAccessor, create_coord_accessor +from timezonefinder.flatbuf.io.polygons import ( + get_coordinate_path, +) +from timezonefinder.np_binary_helpers import ( + get_xmax_path, + get_xmin_path, + get_ymax_path, + get_ymin_path, + read_per_polygon_vector, +) + + +class PolygonArray: + xmin: np.ndarray + xmax: np.ndarray + ymin: np.ndarray + ymax: np.ndarray + coordinates: AbstractCoordAccessor + + def __init__( + self, + data_location: Union[str, Path], + in_memory: bool = False, + ): + """ + Initialize the PolygonArray. + :param data_location: The path to the binary data files to use. + :param in_memory: Whether to completely read and keep the coordinate data in memory as numpy. + """ + self.in_memory = in_memory + self.data_location: Path = Path(data_location) + + xmin_path = get_xmin_path(self.data_location) + xmax_path = get_xmax_path(self.data_location) + ymin_path = get_ymin_path(self.data_location) + ymax_path = get_ymax_path(self.data_location) + + # read all per polygon vectors directly into memory (no matter the memory mode) + self.xmin = read_per_polygon_vector(xmin_path) + self.xmax = read_per_polygon_vector(xmax_path) + self.ymin = read_per_polygon_vector(ymin_path) + self.ymax = read_per_polygon_vector(ymax_path) + + coordinate_file_path = get_coordinate_path(self.data_location) + # Initialize the appropriate coordinate accessor based on memory mode + self.coordinates = create_coord_accessor(coordinate_file_path, self.in_memory) + + def __del__(self): + """Clean up resources when the object is destroyed.""" + del self.coordinates + del self.xmin + del self.xmax + del self.ymin + del self.ymax + + def __len__(self) -> int: + """ + Get the number of polygons in the collection. + :return: Number of polygons + """ + return len(self.xmin) + + def outside_bbox(self, poly_id: IntegerLike, x: int, y: int) -> bool: + """ + Check if a point is outside the bounding box of a polygon. + + :param poly_id: Polygon ID + :param x: X-coordinate of the point + :param y: Y-coordinate of the point + :return: True if the point is outside the boundaries, False otherwise + """ + if x > self.xmax[poly_id]: + return True + if x < self.xmin[poly_id]: + return True + if y > self.ymax[poly_id]: + return True + if y < self.ymin[poly_id]: + return True + return False + + def coords_of(self, idx: IntegerLike) -> np.ndarray: + """ + Get the polygon coordinates for the given index. + + Args: + idx: The polygon index + + Returns: + A numpy array containing the polygon coordinates + """ + return self.coordinates[idx] + + def pip(self, poly_id: IntegerLike, x: int, y: int) -> bool: + """ + Point in polygon (PIP) test. + + :param poly_id: Polygon ID + :param x: X-coordinate of the point + :param y: Y-coordinate of the point + :return: True if the point is inside the polygon, False otherwise + """ + polygon = self.coords_of(poly_id) + return utils.inside_polygon(x, y, polygon) + + def pip_with_bbox_check(self, poly_id: IntegerLike, x: int, y: int) -> bool: + """ + Point in polygon (PIP) test with bounding box check. + + :param poly_id: Polygon ID + :param x: X-coordinate of the point + :param y: Y-coordinate of the point + :return: True if the point is inside the polygon, False otherwise + """ + if self.outside_bbox(poly_id, x, y): + return False + return self.pip(poly_id, x, y) + + def in_any_polygon(self, poly_ids: Iterable[int], x: int, y: int) -> bool: + """ + Check if a point is inside any of the specified polygons. + + :param poly_ids: An iterable of polygon IDs + :param x: X-coordinate of the point + :param y: Y-coordinate of the point + :return: True if the point is inside any polygon, False otherwise + """ + for poly_id in poly_ids: + if self.pip_with_bbox_check(poly_id, x, y): + return True + return False diff --git a/python/timezonefinder/py.typed b/python/timezonefinder/py.typed new file mode 100644 index 000000000..e69de29bb diff --git a/python/timezonefinder/timezonefinder.py b/python/timezonefinder/timezonefinder.py new file mode 100644 index 000000000..29787977e --- /dev/null +++ b/python/timezonefinder/timezonefinder.py @@ -0,0 +1,590 @@ +import json +from abc import ABC, abstractmethod +from pathlib import Path +from typing import Dict, Iterable, List, Optional, Tuple, Union +import numpy as np +from h3.api import numpy_int as h3 + +from timezonefinder.np_binary_helpers import ( + get_zone_ids_path, + get_zone_positions_path, + read_per_polygon_vector, +) +from timezonefinder.polygon_array import PolygonArray +from timezonefinder import utils, utils_clang +from timezonefinder.configs import ( + DEFAULT_DATA_DIR, + SHORTCUT_H3_RES, + CoordLists, + CoordPairs, + IntegerLike, +) + +from timezonefinder.flatbuf.io.hybrid_shortcuts import ( + get_hybrid_shortcut_file_path, + read_hybrid_shortcuts_binary, +) +from timezonefinder.zone_names import read_zone_names + + +class AbstractTimezoneFinder(ABC): + # prevent dynamic attribute assignment (-> safe memory) + """ + Abstract base class for TimezoneFinder instances + """ + + __slots__ = [ + "data_location", + "shortcut_mapping", + "in_memory", + "_fromfile", + "timezone_names", + "zone_ids", + "holes_dir", + "boundaries_dir", + "boundaries", + "holes", + ] + + zone_ids: np.ndarray + shortcut_mapping: Dict[int, Union[int, np.ndarray]] + """ + List of attribute names that store opened binary data files. + """ + + def __init__( + self, + bin_file_location: Optional[Union[str, Path]] = None, + in_memory: bool = False, + ): + """ + Initialize the AbstractTimezoneFinder. + :param bin_file_location: The path to the binary data files to use. If None, uses native package data. + :param in_memory: ignored. All binary files will be read into memory (few MB). Only used for polygon coordinate data. + """ + if bin_file_location is None: + bin_file_location = DEFAULT_DATA_DIR + self.data_location: Path = Path(bin_file_location) + + self.timezone_names = read_zone_names(self.data_location) + + # Load hybrid shortcut file - contains both zone IDs (for unique zones) and polygon arrays (for ambiguous zones) + zone_ids_path = get_zone_ids_path(self.data_location) + zone_ids_temp = read_per_polygon_vector(zone_ids_path) + zone_id_dtype = zone_ids_temp.dtype + + path2shortcut = get_hybrid_shortcut_file_path(zone_id_dtype, self.data_location) + self.shortcut_mapping = read_hybrid_shortcuts_binary(path2shortcut) + + zone_ids_path = get_zone_ids_path(self.data_location) + self.zone_ids = read_per_polygon_vector(zone_ids_path) + + def _iter_boundary_ids_of_zone(self, zone_id: int) -> Iterable[int]: + """ + Yield the boundary polygon IDs for a given zone ID. + + :param zone_id: ID of the zone + :yield: boundary polygon IDs + """ + # load only on demand. used when shortcuts contain zone IDs (hybrid optimization) + zone_positions_path = get_zone_positions_path(self.data_location) + zone_positions = np.load(zone_positions_path, mmap_mode="r") + first_boundary_id_zone = zone_positions[zone_id] + # read the id of the first boundary polygon of the consequent zone + # NOTE: this has also been added for the last zone + first_boundary_id_next = zone_positions[zone_id + 1] + yield from range(first_boundary_id_zone, first_boundary_id_next) + + @property + def nr_of_zones(self) -> int: + """ + Get the number of timezones. + + :rtype: int + """ + return len(self.timezone_names) + + @staticmethod + def using_numba() -> bool: + """ + Check if Numba is being used. + + :rtype: bool + :return: True if Numba is being used to JIT compile helper functions + """ + return utils.using_numba + + @staticmethod + def using_clang_pip() -> bool: + """ + :return: True if the compiled C implementation of the point in polygon algorithm is being used + """ + return utils.inside_polygon == utils_clang.pt_in_poly_clang + + def zone_id_of(self, boundary_id: IntegerLike) -> int: + """ + Get the zone ID of a polygon. + + :param boundary_id: The ID of the polygon. + :type boundary_id: int + :rtype: int + """ + try: + return int(self.zone_ids[boundary_id]) + except TypeError: + raise ValueError(f"zone_ids is not set in directory {self.data_location}.") + + def zone_ids_of(self, boundary_ids: np.ndarray) -> np.ndarray: + """ + Get the zone IDs of multiple boundary polygons. + + :param boundary_ids: An array of boundary polygon IDs. + :return: array of corresponding timezone IDs. + """ + return self.zone_ids[boundary_ids] + + def zone_name_from_id(self, zone_id: int) -> str: + """ + Get the zone name from a zone ID. + + :param zone_id: The ID of the zone. + :return: The name of the zone. + :raises ValueError: If the timezone could not be found. + """ + try: + return self.timezone_names[zone_id] + except IndexError: + raise ValueError("timezone could not be found. index error.") + + def zone_name_from_boundary_id(self, boundary_id: IntegerLike) -> str: + """ + Get the zone name from a boundary polygon ID. + + :param boundary_id: The ID of the boundary polygon. + :return: The name of the zone. + """ + zone_id = self.zone_id_of(boundary_id) + return self.zone_name_from_id(zone_id) + + def _iter_boundaries_in_shortcut(self, *, lng: float, lat: float) -> Iterable[int]: + """ + Iterate over boundary polygon IDs in the shortcut corresponding to the given coordinates. + + :param lng: The longitude of the point in degrees (-180.0 to 180.0). + :param lat: The latitude of the point in degrees (90.0 to -90.0). + :yield: Boundary polygon IDs. + """ + hex_id = h3.latlng_to_cell(lat, lng, SHORTCUT_H3_RES) + + # Handle shortcuts (hybrid structure) - if it's a zone ID, get all polygons for that zone + shortcut_value = self.shortcut_mapping.get(hex_id) + if shortcut_value is None: + return + elif isinstance(shortcut_value, int): + # Zone ID - get all boundary polygons for this zone + # Most polygons will be quickly ruled out by bbox check + yield from self._iter_boundary_ids_of_zone(shortcut_value) + else: + # Polygon array + yield from shortcut_value + + @abstractmethod + def timezone_at(self, *, lng: float, lat: float) -> Optional[str]: + """looks up in which timezone the given coordinate is included in + + :param lng: longitude of the point in degree (-180.0 to 180.0) + :param lat: latitude in degree (90.0 to -90.0) + :return: the timezone name of a matching polygon or None + """ + ... + + def timezone_at_land(self, *, lng: float, lat: float) -> Optional[str]: + """computes in which land timezone a point is included in + + Especially for large polygons it is expensive to check if a point is really included. + To speed things up there are "shortcuts" being used (stored in a binary file), + which have been precomputed and store which timezone polygons have to be checked. + + :param lng: longitude of the point in degree (-180.0 to 180.0) + :param lat: latitude in degree (90.0 to -90.0) + :return: the timezone name of a matching polygon or + ``None`` when an ocean timezone ("Etc/GMT+-XX") has been matched. + """ + tz_name = self.timezone_at(lng=lng, lat=lat) + if tz_name is not None and utils.is_ocean_timezone(tz_name): + return None + return tz_name + + def unique_timezone_at(self, *, lng: float, lat: float) -> Optional[str]: + """returns the name of a unique zone within the corresponding shortcut + + :param lng: longitude of the point in degree (-180.0 to 180.0) + :param lat: latitude in degree (90.0 to -90.0) + :return: the timezone name of the unique zone or ``None`` if there are no or multiple zones in this shortcut + """ + lng, lat = utils.validate_coordinates(lng, lat) + hex_id = h3.latlng_to_cell(lat, lng, SHORTCUT_H3_RES) + + # Shortcuts behavior (hybrid structure with precomputed uniqueness) + shortcut_value = self.shortcut_mapping.get(hex_id) + if shortcut_value is None: + return None + elif isinstance(shortcut_value, int): + # Zone ID - this is a precomputed unique zone + unique_id = shortcut_value + else: + # Polygon array - by definition not unique (would be stored as int if unique) + return None + + return self.zone_name_from_id(unique_id) + + def cleanup(self) -> None: + """Clean up resources. Override in subclasses as needed.""" + pass + + def __enter__(self): + """Enter the runtime context for the TimezoneFinder.""" + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """Exit the runtime context and clean up resources.""" + self.cleanup() + return False + + +class TimezoneFinderL(AbstractTimezoneFinder): + """a 'light' version of the TimezoneFinder class for quickly suggesting a timezone for a point on earth + + Instead of using timezone polygon data like ``TimezoneFinder``, + this class only uses a precomputed 'shortcut' to suggest a probable result: + the most common zone in a rectangle of a half degree of latitude and one degree of longitude + """ + + def __init__( + self, bin_file_location: Optional[str] = None, in_memory: bool = False + ): + super().__init__(bin_file_location, in_memory) + + def timezone_at(self, *, lng: float, lat: float) -> Optional[str]: + """instantly returns the name of the most common zone within the corresponding shortcut + + Note: 'most common' in this context means that the boundary polygons with the most coordinates in sum + occurring in the corresponding shortcut belong to this zone. + + :param lng: longitude of the point in degree (-180.0 to 180.0) + :param lat: latitude in degree (90.0 to -90.0) + :return: the timezone name of the most common zone or None if there are no timezone polygons in this shortcut + """ + lng, lat = utils.validate_coordinates(lng, lat) + # Inline fast-path to minimize helper overhead + hex_id = h3.latlng_to_cell(lat, lng, SHORTCUT_H3_RES) + + shortcut_value = self.shortcut_mapping.get(hex_id) + if shortcut_value is None: + return None + elif isinstance(shortcut_value, int): + # Zone ID - unique zone case + return self.zone_name_from_id(shortcut_value) + else: + # Polygon array - get the last polygon (most common zone) + if len(shortcut_value) == 0: + return None + poly_of_biggest_zone = shortcut_value[-1] + # poly_of_biggest_zone is a numpy scalar from array indexing, but mypy sees it as ndarray + # This is safe: array element access returns a numpy integer scalar compatible with IntegerLike + most_common_id = self.zone_id_of(poly_of_biggest_zone) # type: ignore[arg-type] + return self.zone_name_from_id(most_common_id) + + +class TimezoneFinder(AbstractTimezoneFinder): + """Class for quickly finding the timezone of a point on earth offline. + + Because of indexing ("shortcuts"), not all timezone polygons have to be tested during a query. + + Opens the required timezone polygon data in binary files to enable fast access. + For a detailed documentation of data management please refer to the code documentation of + `file_converter.py `__ + + :ivar binary_data_attributes: the names of all attributes which store the opened binary data files + + :param bin_file_location: path to the binary data files to use, None if native package data should be used + :param in_memory: Whether to completely read and keep the coordinate data in memory as numpy arrays. + """ + + # __slots__ declared in parents are available in child classes. However, child subclasses will get a __dict__ + # and __weakref__ unless they also define __slots__ (which should only contain names of any additional slots). + __slots__ = [ + "hole_registry", + "_boundaries_file", + "_holes_file", + ] + + def __init__( + self, bin_file_location: Optional[str] = None, in_memory: bool = False + ): + super().__init__(bin_file_location, in_memory) + self.holes_dir = utils.get_holes_dir(self.data_location) + self.boundaries_dir = utils.get_boundaries_dir(self.data_location) + self.boundaries = PolygonArray( + data_location=self.boundaries_dir, in_memory=in_memory + ) + self.holes = PolygonArray(data_location=self.holes_dir, in_memory=in_memory) + + # stores for which polygons (how many) holes exits and the id of the first of those holes + # since there are very few entries it is feasible to keep them in the memory + self.hole_registry = self._load_hole_registry() + + def __del__(self) -> None: + """Clean up resources when the object is destroyed.""" + del self.boundaries + del self.holes + del self.hole_registry + + def _load_hole_registry(self) -> Dict[int, Tuple[int, int]]: + """ + Load and convert the hole registry from JSON file, converting keys to int. + """ + path = utils.get_hole_registry_path(self.data_location) + with open(path, encoding="utf-8") as json_file: + hole_registry_tmp = json.loads(json_file.read()) + # convert the json string keys to int + return {int(k): v for k, v in hole_registry_tmp.items()} + + @property + def nr_of_polygons(self) -> int: + return len(self.boundaries) + + @property + def nr_of_holes(self) -> int: + return len(self.holes) + + def coords_of(self, boundary_id: IntegerLike = 0) -> np.ndarray: + """ + Get the coordinates of a boundary polygon from the FlatBuffers collection. + + :param boundary_id: The index of the polygon. + :return: Array of coordinates. + """ + return self.boundaries.coords_of(boundary_id) + + def _iter_hole_ids_of(self, boundary_id: IntegerLike) -> Iterable[int]: + """ + Yield the hole IDs for a given boundary polygon id. + + :param boundary_id: id of the boundary polygon + :yield: Hole IDs + """ + try: + amount_of_holes, first_hole_id = self.hole_registry[int(boundary_id)] + except KeyError: + return + for i in range(amount_of_holes): + yield first_hole_id + i + + def _holes_of_poly(self, boundary_id: IntegerLike) -> Iterable[np.ndarray]: + """ + Get the hole coordinates of a boundary polygon from the FlatBuffers collection. + + :param boundary_id: id of the boundary polygon + :yield: Generator of hole coordinates + """ + for hole_id in self._iter_hole_ids_of(boundary_id): + yield self.holes.coords_of(hole_id) + + def get_polygon( + self, boundary_id: IntegerLike, coords_as_pairs: bool = False + ) -> List[Union[CoordPairs, CoordLists]]: + """ + Get the polygon coordinates of a given boundary polygon including its holes. + + :param boundary_id: ID of the boundary polygon + :param coords_as_pairs: If True, returns coordinates as pairs (lng, lat). + If False, returns coordinates as separate lists of longitudes and latitudes. + :return: List of polygon coordinates + """ + list_of_converted_polygons = [] + if coords_as_pairs: + conversion_method = utils.convert2coord_pairs + else: + conversion_method = utils.convert2coords + list_of_converted_polygons.append( + conversion_method(self.coords_of(boundary_id=boundary_id)) + ) + + for hole in self._holes_of_poly(boundary_id): + list_of_converted_polygons.append(conversion_method(hole)) + + return list_of_converted_polygons + + def get_geometry( + self, + tz_name: Optional[str] = "", + tz_id: Optional[int] = 0, + use_id: bool = False, + coords_as_pairs: bool = False, + ) -> List[List[Union[CoordPairs, CoordLists]]]: + """retrieves the geometry of a timezone: multiple boundary polygons with holes + + :param tz_name: one of the names in ``timezone_names.json`` or ``self.timezone_names`` + :param tz_id: the id of the timezone (=index in ``self.timezone_names``) + :param use_id: if ``True`` uses ``tz_id`` instead of ``tz_name`` + :param coords_as_pairs: determines the structure of the polygon representation + :return: a data structure representing the multipolygon of this timezone + output format: ``[ [polygon1, hole1, hole2...], [polygon2, ...], ...]`` + and each polygon and hole is itself formatted like: ``([longitudes], [latitudes])`` + or ``[(lng1,lat1), (lng2,lat2),...]`` if ``coords_as_pairs=True``. + """ + + if use_id: + if not isinstance(tz_id, int): + raise TypeError("the zone id must be given as int.") + if tz_id < 0 or tz_id >= self.nr_of_zones: + raise ValueError( + f"the given zone id {tz_id} is invalid (value range: 0 - {self.nr_of_zones - 1}." + ) + else: + if tz_name is None: + raise ValueError("no timezone name given.") + try: + tz_id = self.timezone_names.index(tz_name) + except ValueError: + raise ValueError("The timezone '", tz_name, "' does not exist.") + if tz_id is None: + raise ValueError("no timezone id given.") + + return [ + self.get_polygon(boundary_id, coords_as_pairs) + for boundary_id in self._iter_boundary_ids_of_zone(tz_id) + ] + + def inside_of_polygon(self, boundary_id: IntegerLike, x: int, y: int) -> bool: + """ + Check if a point is inside a boundary polygon. + + :param boundary_id: boundary polygon ID + :param x: X-coordinate of the point + :param y: Y-coordinate of the point + :return: True if the point lies inside the boundary polygon, False if outside or in a hole. + """ + # avoid running the expensive PIP algorithm at any cost + # -> check bboxes first + if self.boundaries.outside_bbox(boundary_id, x, y): + return False + + # NOTE: holes are much smaller (fewer points) -> less expensive to check + # -> check holes before the boundary + hole_id_iter = self._iter_hole_ids_of(boundary_id) + if self.holes.in_any_polygon(hole_id_iter, x, y): + # the point is within one of the holes + # it is excluded fromn this boundary polygon + return False + + return self.boundaries.pip(boundary_id, x, y) + + def timezone_at(self, *, lng: float, lat: float) -> Optional[str]: + """ + Find the timezone for a given point using hybrid shortcuts, considering both land and ocean timezones. + + Uses precomputed hybrid shortcuts to reduce the number of polygons checked. Returns the timezone name + of the matched polygon, which may be an ocean timezone ("Etc/GMT+-XX") if applicable. + + Since ocean timezones span the whole globe, some timezone will always be matched! + `None` can only be returned when using custom timezone data without such ocean timezones. + + :param lng: longitude of the point in degrees (-180.0 to 180.0) + :param lat: latitude of the point in degrees (90.0 to -90.0) + :return: the timezone name of the matched polygon, or None if no match is found. + """ + # NOTE: performance critical code. avoid helper function call overhead as much as possible + lng, lat = utils.validate_coordinates(lng, lat) + hex_id = h3.latlng_to_cell(lat, lng, SHORTCUT_H3_RES) + + # Get shortcut value (hybrid optimization) + shortcut_value = self.shortcut_mapping.get(hex_id) + if shortcut_value is None: + # NOTE: hypothetical case, with ocean data every shortcut maps to at least one boundary polygon + return None + + if isinstance(shortcut_value, int): + # Direct zone ID - optimal case for performance + return self.zone_name_from_id(shortcut_value) + + # Polygon array case - need to check polygons + possible_boundaries = shortcut_value + nr_possible_polygons = len(possible_boundaries) + if nr_possible_polygons == 0: + return None + # NOTE: the length 1 case can never occur here, since this is covered by the unique zone shortcut + + # create a list of all the timezone ids of all possible boundary polygons + zone_ids = self.zone_ids_of(possible_boundaries) + + last_zone_change_idx = utils.get_last_change_idx(zone_ids) + # NOTE: the case last_zone_change_idx == 0 is covered by the unique zone shortcut + + # ATTENTION: the polygons are stored converted to 32-bit ints, + # convert the query coordinates in the same fashion in order to make the data formats match + # x = longitude y = latitude both converted to 8byte int + x = utils.coord2int(lng) + y = utils.coord2int(lat) + + # check until the point is included in one of the possible boundary polygons + for i, boundary_id in enumerate(possible_boundaries): + if i >= last_zone_change_idx: + # avoid expensive PIP checks when no other zone can be matched anymore + break + + if self.inside_of_polygon(boundary_id, x, y): + zone_id = zone_ids[i] + return self.zone_name_from_id(int(zone_id)) + + # since it is the last possible option, + # the polygons of the last possible zone don't actually have to be checked + # -> instantly return the last zone + zone_id = zone_ids[-1] + return self.zone_name_from_id(int(zone_id)) + + def certain_timezone_at(self, *, lng: float, lat: float) -> Optional[str]: + """checks in which timezone polygon the point is certainly included in using hybrid shortcuts + + .. note:: this is only meaningful when you have compiled your own timezone data + where there are areas without timezone polygon coverage. + Otherwise, some timezone will always be matched and the functionality is equal to using `.timezone_at()` + -> useless to actually test all polygons. + + .. note:: using this function is less performant than `.timezone_at()` + + :param lng: longitude of the point in degree + :param lat: latitude of the point in degree + :return: the timezone name of the polygon the point is included in or `None` + """ + lng, lat = utils.validate_coordinates(lng, lat) + hex_id = h3.latlng_to_cell(lat, lng, SHORTCUT_H3_RES) + + # Get shortcut value (hybrid optimization) + shortcut_value = self.shortcut_mapping.get(hex_id) + if shortcut_value is None: + return None + + # ATTENTION: the polygons are stored converted to 32-bit ints, + # convert the query coordinates in the same fashion in order to make the data formats match + # x = longitude y = latitude both converted to 8byte int + x = utils.coord2int(lng) + y = utils.coord2int(lat) + + # check if the query point is found to be truly included in one of the possible boundary polygons + if isinstance(shortcut_value, int): + # For zone IDs, iterate directly over boundary polygons for that zone + # Most polygons will be quickly ruled out by bbox check + boundary_ids = self._iter_boundary_ids_of_zone(shortcut_value) + else: + # Polygon array case - iterate directly over the array + boundary_ids = shortcut_value + + for boundary_id in boundary_ids: + if self.inside_of_polygon(boundary_id, x, y): + zone_id = self.zone_id_of(boundary_id) + return self.zone_name_from_id(zone_id) + + # none of the boundary polygon candidates truly matched + return None diff --git a/python/timezonefinder/utils.py b/python/timezonefinder/utils.py new file mode 100644 index 000000000..1eebbe933 --- /dev/null +++ b/python/timezonefinder/utils.py @@ -0,0 +1,83 @@ +"""utility functions""" + +from pathlib import Path +import re +from typing import Any, Callable, Tuple + +import numpy as np + +from timezonefinder.configs import ( + DEFAULT_DATA_DIR, + OCEAN_TIMEZONE_PREFIX, +) +from timezonefinder import utils_numba, utils_clang + + +# make numba functions available via utils +using_numba = utils_numba.using_numba +clang_extension_loaded = utils_clang.clang_extension_loaded +is_valid_lat = utils_numba.is_valid_lat +is_valid_lng = utils_numba.is_valid_lng +coord2int = utils_numba.coord2int +int2coord = utils_numba.int2coord +convert2coords = utils_numba.convert2coords +convert2coord_pairs = utils_numba.convert2coord_pairs +get_last_change_idx = utils_numba.get_last_change_idx + + +inside_polygon: Callable[[int, int, np.ndarray], bool] +# at import time fix which "point-in-polygon" implementation will be used +if clang_extension_loaded and not using_numba: + # use the C implementation only if Numba is not present + inside_polygon = utils_clang.pt_in_poly_clang +else: + # use the (JIT compiled) python function if Numba is present or the C extension cannot be loaded + inside_polygon = utils_numba.pt_in_poly_python + + +def validate_lat(lat: float) -> None: + if not is_valid_lat(lat): + raise ValueError(f"The given latitude {lat} is out of bounds") + + +def validate_lng(lng: float) -> None: + if not is_valid_lng(lng): + raise ValueError(f"The given longitude {lng} is out of bounds") + + +def validate_coordinates(lng: float, lat: float) -> Tuple[float, float]: + lng, lat = float(lng), float(lat) + validate_lng(lng) + validate_lat(lat) + return lng, lat + + +def close_resource(obj: Any) -> None: + """Safely close a resource, ignoring specific expected errors.""" + if obj is None: + return + try: + obj.close() + except (AttributeError, OSError, ValueError): + pass + + +def is_ocean_timezone(timezone_name: str) -> bool: + if re.match(OCEAN_TIMEZONE_PREFIX, timezone_name) is None: + return False + return True + + +def get_boundaries_dir(data_dir: Path = DEFAULT_DATA_DIR) -> Path: + """Return the path to the boundaries directory.""" + return data_dir / "boundaries" + + +def get_holes_dir(data_dir: Path = DEFAULT_DATA_DIR) -> Path: + """Return the path to the holes directory.""" + return data_dir / "holes" + + +def get_hole_registry_path(data_dir: Path = DEFAULT_DATA_DIR) -> Path: + """Return the path to the hole registry file.""" + return data_dir / "hole_registry.json" diff --git a/python/timezonefinder/utils_clang.py b/python/timezonefinder/utils_clang.py new file mode 100644 index 000000000..75bb94b35 --- /dev/null +++ b/python/timezonefinder/utils_clang.py @@ -0,0 +1,44 @@ +from typing import Final, Optional + +import cffi + +import numpy as np + +ffi: Optional[cffi.FFI] = None +try: + # Note: IDE might complain as this import comes from a cffi C extension + from timezonefinder import inside_polygon_ext # type: ignore + + clang_extension_loaded = True + ffi = cffi.FFI() + +except ImportError: + clang_extension_loaded = False + inside_polygon_ext = None + +INT_LIST_REP: Final[str] = "int []" + + +def pt_in_poly_clang(x: int, y: int, coords: np.ndarray) -> bool: + """wrapper of the point in polygon test algorithm C extension + + ATTENTION: the input numpy arrays must have a C_CONTIGUOUS memory layout + https://numpy.org/doc/stable/reference/generated/numpy.ascontiguousarray.html?highlight=ascontiguousarray#numpy.ascontiguousarray + """ + if ffi is None: + raise ValueError( + "Trying to use the clang implementation of the point in polygon algorithm " + "while the C extension in not loaded." + ) + x_coords = coords[0] + y_coords = coords[1] + nr_coords = len(x_coords) + + y_coords = np.ascontiguousarray(y_coords) + x_coords = np.ascontiguousarray(x_coords) + x_coords_ffi = ffi.from_buffer(INT_LIST_REP, x_coords) + y_coords_ffi = ffi.from_buffer(INT_LIST_REP, y_coords) + contained = inside_polygon_ext.lib.inside_polygon_int( + x, y, nr_coords, x_coords_ffi, y_coords_ffi + ) + return contained diff --git a/python/timezonefinder/utils_numba.py b/python/timezonefinder/utils_numba.py new file mode 100644 index 000000000..a50c4698d --- /dev/null +++ b/python/timezonefinder/utils_numba.py @@ -0,0 +1,201 @@ +"""performance critical utility functions + +JIT compiled for efficiency in case `numba` is installed + +TODO Numba Ahead-Of-Time Compilation: +cc = CC('precompiled_helpers', ) +# Uncomment the following line to print out the compilation steps +cc.verbose = True + +if __name__ == "__main__": + cc.compile() +""" + +import numpy as np + +from timezonefinder.configs import ( + COORD2INT_FACTOR, + INT2COORD_FACTOR, + CoordLists, + CoordPairs, +) + +try: + from numba import njit, boolean, i4, f8 + from numba.types import Array + + using_numba = True +except ImportError: + using_numba = False + # replace Numba functionality with "transparent" implementations + from timezonefinder._numba_replacements import njit, boolean, Array, i4, f8 + + +# For Fortran-ordered arrays (F-contiguous), use the 'order="F"' in the Numba signature +# F order is natural for the used coordinate schema: +# coords = [x_coords, y_coords] +# x_coords = coords[0] +CoordType = Array(i4, 2, "F", True, aligned=True) + + +# @cc.export('inside_polygon', 'b1(i4, i4, i4[:, :])') +@njit(boolean(i4, i4, CoordType), cache=True) +def pt_in_poly_python(x: int, y: int, coords: np.ndarray) -> bool: + """ + Implementing the ray casting point in polygon test algorithm + cf. https://en.wikipedia.org/wiki/Point_in_polygon#Ray_casting_algorithm + :param x: + :param y: + :param coords: a polygon represented by a list containing two lists (x and y coordinates): + [ [x1,x2,x3...], [y1,y2,y3...]] + those lists are actually numpy arrays which are being read directly from a binary file + :return: true if the point (x,y) lies within the polygon + + Some overflow considerations for the critical part of comparing the line segment slopes: + + (y2 - y) * (x2 - x1) <= delta_y_max * delta_x_max + (y2 - y1) * (x2 - x) <= delta_y_max * delta_x_max + delta_y_max * delta_x_max = 180 * 360 < 65 x10^3 + + Instead of calculating with float I decided using just ints (by multiplying with 10^7). That gives us: + + delta_y_max * delta_x_max = 180x10^7 * 360x10^7 + delta_y_max * delta_x_max <= 65x10^17 + + So these numbers need up to log_2(65 x10^17) ~ 63 bits to be represented! Even though values this big should never + occur in practice (timezone polygons do not span the whole lng lat coordinate space), + 32bit accuracy hence is not safe to use here! + pure Python automatically uses the appropriate int data type preventing overflow + (cf. https://www.python.org/dev/peps/pep-0237/), + but here the data types are numpy internal static data types. The data is stored as int32 + -> use int64 when comparing slopes! + + slower naive implementation: + j = nr_coords - 1 + for i in range(nr_coords): + if ((y_coords[i] > y) != (y_coords[j] > y)) and ( + x + < (int64(x_coords[j]) - int64(x_coords[i])) + * (int64(y) - int64(y_coords[i])) + / (int64(y_coords[j]) - int64(y_coords[i])) + + int64(x_coords[i]) + ): + inside = not inside + j = i + i += 1 + """ + x_coords = coords[0] + y_coords = coords[1] + nr_coords = len(x_coords) + inside = False + + # the edge from the last to the first point is checked first + y1 = y_coords[-1] + y_gt_y1 = y > y1 + for i in range(nr_coords): + y2 = y_coords[i] + y_gt_y2 = y > y2 + if y_gt_y1 ^ y_gt_y2: # XOR + # [p1-p2] crosses horizontal line in p + x1 = x_coords[i - 1] + x2 = x_coords[i] + # only count crossings "right" of the point ( >= x) + x_le_x1 = x <= x1 + x_le_x2 = x <= x2 + if x_le_x1 or x_le_x2: + if x_le_x1 and x_le_x2: + # p1 and p2 are both to the right -> valid crossing + inside = not inside + else: + # compare the slope of the line [p1-p2] and [p-p2] + # depending on the position of p2 this determines whether + # the polygon edge is right or left of the point + # to avoid expensive division the divisors (of the slope dy/dx) are brought to the other side + # ( dy/dx > a == dy > a * dx ) + # only one of the points is to the right + # NOTE: int64 precision required to prevent overflow + y_64 = np.int64(y) + y1_64 = np.int64(y1) + y2_64 = np.int64(y2) + x_64 = np.int64(x) + x1_64 = np.int64(x1) + x2_64 = np.int64(x2) + slope1 = (y2_64 - y_64) * (x2_64 - x1_64) + slope2 = (y2_64 - y1_64) * (x2_64 - x_64) + # NOTE: accept slope equality to also detect if p lies directly on an edge + if y_gt_y1: + if slope1 <= slope2: + inside = not inside + elif slope1 >= slope2: # NOT y_gt_y1 + inside = not inside + + # next point + y1 = y2 + y_gt_y1 = y_gt_y2 + + return inside + + +@njit(cache=True) +def get_last_change_idx(lst: np.ndarray) -> int: + """ + :param lst: list of entries + :return: returns the index to the element for which all following elements are equal + """ + nr_entries = lst.shape[0] + if nr_entries <= 1: + return 0 + # at least 2 elements + last_elem = lst[-1] + for ptr in range(2, nr_entries + 1): + # Note: from the back + element = lst[-ptr] + if element != last_elem: + # return the last pointer value + # Attention: convert into positive "absolute" index first + return nr_entries - ptr + 1 + # Note: all entries are the same -> ptr will be 0 + return 0 + + +# @cc.export('int2coord', f8(i4)) +@njit(f8(i4), cache=True) +def int2coord(i4: int) -> float: + return float(i4 * INT2COORD_FACTOR) + + +# @cc.export('coord2int', i4(f8)) +@njit(i4(f8), cache=True) +def coord2int(double: float) -> int: + return int(double * COORD2INT_FACTOR) + + +@njit(cache=True) +def convert2coords(polygon_data: np.ndarray) -> CoordLists: + # return a tuple of coordinate lists + return [ + [int2coord(x) for x in polygon_data[0]], + [int2coord(y) for y in polygon_data[1]], + ] + + +@njit(cache=True) +def convert2coord_pairs(polygon_data: np.ndarray) -> CoordPairs: + # return a list of coordinate tuples (x,y) + x_coords = polygon_data[0] + y_coords = polygon_data[1] + nr_coords = len(x_coords) + coodinate_list = [ + (int2coord(x_coords[i]), int2coord(y_coords[i])) for i in range(nr_coords) + ] + return coodinate_list + + +@njit(boolean(f8), cache=True) +def is_valid_lat(lat: float) -> bool: + return -90.0 <= lat <= 90.0 + + +@njit(boolean(f8), cache=True) +def is_valid_lng(lng: float) -> bool: + return -180.0 <= lng <= 180.0 diff --git a/python/timezonefinder/zone_names.py b/python/timezonefinder/zone_names.py new file mode 100644 index 000000000..8b0cf7bf1 --- /dev/null +++ b/python/timezonefinder/zone_names.py @@ -0,0 +1,41 @@ +from pathlib import Path +from typing import List + +from timezonefinder.configs import DEFAULT_DATA_DIR + + +def get_zone_names_path(output_path: Path = DEFAULT_DATA_DIR) -> Path: + """Get the path to the timezone names text file.""" + return output_path / "timezone_names.txt" + + +def write_zone_names( + zone_names: List[str], output_path: Path = DEFAULT_DATA_DIR +) -> None: + """ + Write timezone names to a text file. + + Args: + zone_names: List of timezone names. + output_path: Directory where the output file will be written. + """ + path = get_zone_names_path(output_path) + with open(path, "w", encoding="utf-8") as f: + f.write("\n".join(zone_names)) + f.write("\n") # write a newline at the end of the file + + +def read_zone_names(path: Path) -> List[str]: + """ + Read timezone names from a text file. + + Args: + path: Path to the timezone names text file. + If None, the default path will be used. + + Returns: + List of timezone names. + """ + file_path = get_zone_names_path(path) + with open(file_path, encoding="utf-8") as f: + return [line.strip() for line in f if line.strip()] diff --git a/src/components/Metadata.vue b/src/components/Metadata.vue index 6a8b16bf9..6fa163bc4 100644 --- a/src/components/Metadata.vue +++ b/src/components/Metadata.vue @@ -225,9 +225,9 @@ export default defineComponent({ // If we have an actual epoch, we can shift the date to the correct timezone if (!valid() && this.exif.DateTimeEpoch) { - const date = DateTime.fromSeconds(this.exif.DateTimeEpoch); + const date = DateTime.fromSeconds(this.exif.DateTimeEpoch, { zone: 'UTC' }); if (date.isValid) { - const tzOffset = this.exif.OffsetTimeOriginal || this.exif.OffsetTime; // e.g. -05:00 + const tzOffset = this.exif.OffsetTimeOriginal || this.exif.OffsetTime || this.exif.TimeZone || this.exif.OffsetTimeDigitized // e.g. -05:00 const tzId = this.exif.LocationTZID; // e.g. America/New_York // Use timezone offset if available @@ -243,12 +243,9 @@ export default defineComponent({ } // If tz info is unavailable / wrong, we will show the local time only - // In this case, use the datetaken instead, which is guaranteed to be local, shifted to UTC + // In this case, use the datetaken instead if (!valid() && this.baseInfo.datetaken) { - const date = DateTime.fromSeconds(this.baseInfo.datetaken); - if (date.isValid) { - dateWithTz = date.setZone('UTC'); - } + dateWithTz = DateTime.fromSeconds(this.baseInfo.datetaken, { zone: 'UTC' }); } // Return only if we found a valid date @@ -256,13 +253,20 @@ export default defineComponent({ }, dateOriginalStr(): string | null { - return utils.getLongDateStr(new Date(this.baseInfo.datetaken * 1000), true); + // Use the dateOriginal property which already handles timezone correctly + if (!this.dateOriginal) { + // Fallback to raw datetaken if dateOriginal couldn't be computed + return utils.getLongDateStr(new Date(this.baseInfo.datetaken * 1000), true); + } + + // Format the dateOriginal (which has timezone applied) as a long date string + return this.dateOriginal.toFormat('EEE, LLL d', { locale: getCanonicalLocale() }); }, dateOriginalTime(): string[] | null { if (!this.dateOriginal) return null; - const fields: (keyof IExif)[] = ['OffsetTimeOriginal', 'OffsetTime', 'LocationTZID']; + const fields: (keyof IExif)[] = ['OffsetTimeOriginal', 'OffsetTime', 'TimeZone', 'OffsetTimeDigitized', 'LocationTZID']; const hasTz = fields.some((key) => this.exif[key]); const format = 't' + (hasTz ? ' ZZ' : ''); diff --git a/src/components/viewer/Viewer.vue b/src/components/viewer/Viewer.vue index 1fec53daf..147f7956e 100644 --- a/src/components/viewer/Viewer.vue +++ b/src/components/viewer/Viewer.vue @@ -59,6 +59,8 @@ import NcActions from '@nextcloud/vue/dist/Components/NcActions.js'; import NcActionButton from '@nextcloud/vue/dist/Components/NcActionButton.js'; import { showError } from '@nextcloud/dialogs'; import axios from '@nextcloud/axios'; +import { getCanonicalLocale } from '@nextcloud/l10n'; +import { DateTime } from 'luxon'; import { API } from '@services/API'; import * as dav from '@services/dav'; @@ -387,9 +389,39 @@ export default defineComponent({ /** Get date taken string */ currentDateTaken(): string | null { - const date = this.currentPhoto?.imageInfo?.datetaken; - if (!date) return null; - return utils.getLongDateStr(new Date(date * 1000), false, true); + + const info = this.currentPhoto?.imageInfo; + if (!info) return null; + + const exif = info.exif; + if (!info.exif) return null; + + // Try timezone-aware formatting if EXIF data is available + if (exif?.DateTimeEpoch) { + const date = DateTime.fromSeconds(exif.DateTimeEpoch, { zone: 'UTC' }); + if (date.isValid) { + const tzOffset = exif.OffsetTimeOriginal || exif.OffsetTime || exif.TimeZone || exif.OffsetTimeDigitized; + const tzId = exif.LocationTZID; + + let dateWithTz: DateTime | null = null; + + // Apply timezone offset + if (tzOffset) { + dateWithTz = date.setZone('UTC' + tzOffset); + } else if (tzId) { + dateWithTz = date.setZone(tzId); + } + + // Format with timezone applied + if (dateWithTz?.isValid) { + return dateWithTz.toFormat('EEE, LLL d, yyyy, t', { locale: getCanonicalLocale() }); + } + } + } + + // Fallback to datetaken without timezone info + if (!info.datetaken) return null; + return utils.getLongDateStr(new Date(info.datetaken * 1000), false, true); }, /** Show edit buttons */ diff --git a/src/typings/data.d.ts b/src/typings/data.d.ts index 631ac3608..0d4c57025 100644 --- a/src/typings/data.d.ts +++ b/src/typings/data.d.ts @@ -148,6 +148,8 @@ declare module '@typings' { DateTimeOriginal?: string; DateTimeEpoch?: number; OffsetTimeOriginal?: string; + TimeZone?: string; + OffsetTimeDigitized?: string; OffsetTime?: string; LocationTZID?: string; AllDates?: string; // only for setting